Merge tag 'for-linus-4.3' of git://git.code.sf.net/p/openipmi/linux-ipmi

Pull IPMI updates from Corey Minyard:
 "Most of these have been sitting in linux-next for more than a release,
  particularly commit 0fbcf4af7c83 ("ipmi: Convert the IPMI SI ACPI
  handling to a platform device") which is probably the most complex
  patch.

  That is also the one that changes drivers/acpi/acpi_pnp.c.  The change
  in that file is only removing IPMI from a "special platform devices"
  list, since I convert it to the standard PNP interface.  I posted this
  one to the ACPI list twice and got no response, and it seems to work
  well in my testing, so I'm hoping it's good.

  Hidehiro Kawai posted a set of changes that improves the panic time
  handling in the IPMI driver.

  The rest of the changes are minor bug fixes or cleanups and some
  documentation"

* tag 'for-linus-4.3' of git://git.code.sf.net/p/openipmi/linux-ipmi:
  ipmi:ssif: Add a module parm to specify that SMBus alerts don't work
  ipmi: add of_device_id in MODULE_DEVICE_TABLE
  ipmi: Compensate for BMCs that wont set the irq enable bit
  ipmi: Don't call receive handler in the panic context
  ipmi: Avoid touching possible corrupted lists in the panic context
  ipmi: Don't flush messages in sender() in run-to-completion mode
  ipmi: Factor out message flushing procedure
  ipmi: Remove unneeded set_run_to_completion call
  ipmi: Make some data const that was only read
  ipmi: constify SSIF ACPI device ids
  ipmi: Delete an unnecessary check before the function call "cleanup_one_si"
  char:ipmi - Change 1 to true for bool type variables during initialization.
  impi:Remove unneeded setting of module owner to THIS_MODULE in the platform structure, powernv_ipmi_driver
  ipmi: Add a comment in how messages are delivered from the lower layer
  ipmi/powernv: Fix potential invalid pointer dereference
  ipmi: Convert the IPMI SI ACPI handling to a platform device
  ipmi: Add device tree bindings information
diff --git a/.gitignore b/.gitignore
index 4ad4a98..fd3a355 100644
--- a/.gitignore
+++ b/.gitignore
@@ -36,6 +36,7 @@
 modules.builtin
 Module.symvers
 *.dwo
+*.su
 
 #
 # Top-level generic files
@@ -44,6 +45,7 @@
 /TAGS
 /linux
 /vmlinux
+/vmlinux.32
 /vmlinux-gdb.py
 /vmlinuz
 /System.map
@@ -89,6 +91,9 @@
 GSYMS
 GTAGS
 
+# id-utils files
+ID
+
 *.orig
 *~
 \#*#
@@ -97,6 +102,7 @@
 # Leavings from module signing
 #
 extra_certificates
+signing_key.pem
 signing_key.priv
 signing_key.x509
 x509.genkey
diff --git a/CREDITS b/CREDITS
index 4fcf9cd..bcb8efa 100644
--- a/CREDITS
+++ b/CREDITS
@@ -20,6 +20,10 @@
 S: (ask for current address)
 S: Finland
 
+N: Thomas Abraham
+E: thomas.ab@samsung.com
+D: Samsung pin controller driver
+
 N: Dragos Acostachioaie
 E: dragos@iname.com
 W: http://www.arbornet.org/~dragos
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index acfe9df..b07e86d 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -223,3 +223,13 @@
                 Writing 1 will issue a PERST to card which may cause the card
                 to reload the FPGA depending on load_image_on_perst.
 Users:		https://github.com/ibm-capi/libcxl
+
+What:		/sys/class/cxl/<card>/perst_reloads_same_image
+Date:		July 2015
+Contact:	linuxppc-dev@lists.ozlabs.org
+Description:	read/write
+		Trust that when an image is reloaded via PERST, it will not
+		have changed.
+		0 = don't trust, the image may be different (default)
+		1 = trust that the image will not change.
+Users:		https://github.com/ibm-capi/libcxl
diff --git a/Documentation/ABI/testing/sysfs-gpio b/Documentation/ABI/testing/sysfs-gpio
index 80f4c94..55ffa2d 100644
--- a/Documentation/ABI/testing/sysfs-gpio
+++ b/Documentation/ABI/testing/sysfs-gpio
@@ -16,7 +16,8 @@
     /sys/class/gpio
 	/export ... asks the kernel to export a GPIO to userspace
 	/unexport ... to return a GPIO to the kernel
-	/gpioN ... for each exported GPIO #N
+	/gpioN ... for each exported GPIO #N OR
+	/<LINE-NAME> ... for a properly named GPIO line
 	    /value ... always readable, writes fail for input GPIOs
 	    /direction ... r/w as: in, out (default low); write: high, low
 	    /edge ... r/w as: none, falling, rising, both
diff --git a/Documentation/ABI/testing/sysfs-hypervisor-pmu b/Documentation/ABI/testing/sysfs-hypervisor-pmu
new file mode 100644
index 0000000..224faa1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-hypervisor-pmu
@@ -0,0 +1,23 @@
+What:		/sys/hypervisor/pmu/pmu_mode
+Date:		August 2015
+KernelVersion:	4.3
+Contact:	Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Description:
+		Describes mode that Xen's performance-monitoring unit (PMU)
+		uses. Accepted values are
+			"off"  -- PMU is disabled
+			"self" -- The guest can profile itself
+			"hv"   -- The guest can profile itself and, if it is
+				  privileged (e.g. dom0), the hypervisor
+			"all" --  The guest can profile itself, the hypervisor
+				  and all other guests. Only available to
+				  privileged guests.
+
+What:           /sys/hypervisor/pmu/pmu_features
+Date:           August 2015
+KernelVersion:  4.3
+Contact:        Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Description:
+		Describes Xen PMU features (as an integer). A set bit indicates
+		that the corresponding feature is enabled. See
+		include/xen/interface/xenpmu.h for available features
diff --git a/Documentation/Changes b/Documentation/Changes
index 646cdaa..6d88630 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -43,6 +43,7 @@
 o  grub                   0.93                    # grub --version || grub-install --version
 o  mcelog                 0.6                     # mcelog --version
 o  iptables               1.4.2                   # iptables -V
+o  openssl & libcrypto    1.0.1k                  # openssl version
 
 
 Kernel compilation
@@ -79,6 +80,17 @@
 You will need bc to build kernels 3.10 and higher
 
 
+OpenSSL
+-------
+
+Module signing and external certificate handling use the OpenSSL program and
+crypto library to do key creation and signature generation.
+
+You will need openssl to build kernels 3.7 and higher if module signing is
+enabled.  You will also need openssl development packages to build kernels 4.3
+and higher.
+
+
 System utilities
 ================
 
@@ -295,6 +307,10 @@
 --------
 o  <ftp://ftp.kernel.org/pub/linux/devel/binutils/>
 
+OpenSSL
+-------
+o  <https://www.openssl.org/>
+
 System utilities
 ****************
 
@@ -392,4 +408,3 @@
 NFS-Utils
 ---------
 o  <http://nfs.sourceforge.net/>
-
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 7eba542..edccacd 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -104,6 +104,13 @@
 from this pool must not cross 4KByte boundaries.
 
 
+	void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+			      dma_addr_t *handle)
+
+Wraps dma_pool_alloc() and also zeroes the returned memory if the
+allocation attempt succeeded.
+
+
 	void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags,
 			dma_addr_t *dma_handle);
 
diff --git a/Documentation/DocBook/alsa-driver-api.tmpl b/Documentation/DocBook/alsa-driver-api.tmpl
index 71f9246..e94a10b 100644
--- a/Documentation/DocBook/alsa-driver-api.tmpl
+++ b/Documentation/DocBook/alsa-driver-api.tmpl
@@ -108,7 +108,7 @@
      <sect1><title>ASoC Core API</title>
 !Iinclude/sound/soc.h
 !Esound/soc/soc-core.c
-!Esound/soc/soc-cache.c
+<!-- !Esound/soc/soc-cache.c no docbook comments here -->
 !Esound/soc/soc-devres.c
 !Esound/soc/soc-io.c
 !Esound/soc/soc-pcm.c
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index bbc1d7e..abba93f 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -217,6 +217,40 @@
 -->
   </chapter>
 
+  <chapter id="mediadev">
+     <title>Media Devices</title>
+
+     <sect1><title>Video2Linux devices</title>
+!Iinclude/media/v4l2-async.h
+!Iinclude/media/v4l2-ctrls.h
+!Iinclude/media/v4l2-dv-timings.h
+!Iinclude/media/v4l2-event.h
+!Iinclude/media/v4l2-flash-led-class.h
+!Iinclude/media/v4l2-mediabus.h
+!Iinclude/media/v4l2-mem2mem.h
+!Iinclude/media/v4l2-of.h
+!Iinclude/media/v4l2-subdev.h
+!Iinclude/media/videobuf2-core.h
+!Iinclude/media/videobuf2-memops.h
+     </sect1>
+     <sect1><title>Digital TV (DVB) devices</title>
+!Idrivers/media/dvb-core/dvb_ca_en50221.h
+!Idrivers/media/dvb-core/dvb_frontend.h
+!Idrivers/media/dvb-core/dvb_math.h
+!Idrivers/media/dvb-core/dvb_ringbuffer.h
+!Idrivers/media/dvb-core/dvbdev.h
+     </sect1>
+     <sect1><title>Remote Controller devices</title>
+!Iinclude/media/rc-core.h
+     </sect1>
+     <sect1><title>Media Controller devices</title>
+!Iinclude/media/media-device.h
+!Iinclude/media/media-devnode.h
+!Iinclude/media/media-entity.h
+     </sect1>
+
+  </chapter>
+
   <chapter id="uart16x50">
      <title>16x50 UART Driver</title>
 !Edrivers/tty/serial/serial_core.c
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 2fb9a54..9ddf8c6 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3982,7 +3982,6 @@
         <title>Interrupt Handling</title>
 !Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
       </sect2>
@@ -4012,7 +4011,6 @@
         <title>Frontbuffer Tracking</title>
 !Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
 !Idrivers/gpu/drm/i915/intel_frontbuffer.c
-!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
 !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
       </sect2>
       <sect2>
@@ -4045,6 +4043,11 @@
         </para>
       </sect2>
       <sect2>
+        <title>Hotplug</title>
+!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
+!Idrivers/gpu/drm/i915/intel_hotplug.c
+      </sect2>
+      <sect2>
 	<title>High Definition Audio</title>
 !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
 !Idrivers/gpu/drm/i915/intel_audio.c
@@ -4195,6 +4198,23 @@
 !Idrivers/gpu/drm/i915/i915_gem_gtt.c
       </sect2>
       <sect2>
+        <title>GTT Fences and Swizzling</title>
+!Idrivers/gpu/drm/i915/i915_gem_fence.c
+        <sect3>
+          <title>Global GTT Fence Handling</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c fence register handling
+        </sect3>
+        <sect3>
+          <title>Hardware Tiling and Swizzling Details</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c tiling swizzling details
+        </sect3>
+      </sect2>
+      <sect2>
+        <title>Object Tiling IOCTLs</title>
+!Idrivers/gpu/drm/i915/i915_gem_tiling.c
+!Pdrivers/gpu/drm/i915/i915_gem_tiling.c buffer object tiling
+      </sect2>
+      <sect2>
         <title>Buffer Object Eviction</title>
 	<para>
 	  This section documents the interface functions for evicting buffer
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index bcdfdb9..6006b63 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -146,36 +146,30 @@
 The journalling layer is  easy to use. You need to
 first of all create a journal_t data structure. There are
 two calls to do this dependent on how you decide to allocate the physical
-media on which the journal resides. The journal_init_inode() call
-is for journals stored in filesystem inodes, or the journal_init_dev()
-call can be use for journal stored on a raw device (in a continuous range
+media on which the journal resides. The jbd2_journal_init_inode() call
+is for journals stored in filesystem inodes, or the jbd2_journal_init_dev()
+call can be used for journal stored on a raw device (in a continuous range
 of blocks). A journal_t is a typedef for a struct pointer, so when
-you are finally finished make sure you call journal_destroy() on it
+you are finally finished make sure you call jbd2_journal_destroy() on it
 to free up any used kernel memory.
 </para>
 
 <para>
 Once you have got your journal_t object you need to 'mount' or load the journal
-file, unless of course you haven't initialised it yet - in which case you
-need to call journal_create().
+file. The journalling layer expects the space for the journal was already
+allocated and initialized properly by the userspace tools.  When loading the
+journal you must call jbd2_journal_load() to process journal contents.  If the
+client file system detects the journal contents does not need to be processed
+(or even need not have valid contents), it may call jbd2_journal_wipe() to
+clear the journal contents before calling jbd2_journal_load().
 </para>
 
 <para>
-Most of the time however your journal file will already have been created, but
-before you load it you must call journal_wipe() to empty the journal file.
-Hang on, you say , what if the filesystem wasn't cleanly umount()'d . Well, it is the
-job of the client file system to detect this and skip the call to journal_wipe().
-</para>
-
-<para>
-In either case the next call should be to journal_load() which prepares the
-journal file for use. Note that journal_wipe(..,0) calls journal_skip_recovery()
-for you if it detects any outstanding transactions in the journal and similarly
-journal_load() will call journal_recover() if necessary.
-I would advise reading fs/ext3/super.c for examples on this stage.
-[RGG: Why is the journal_wipe() call necessary - doesn't this needlessly
-complicate the API. Or isn't a good idea for the journal layer to hide
-dirty mounts from the client fs]
+Note that jbd2_journal_wipe(..,0) calls jbd2_journal_skip_recovery() for you if
+it detects any outstanding transactions in the journal and similarly
+jbd2_journal_load() will call jbd2_journal_recover() if necessary.  I would
+advise reading ext4_load_journal() in fs/ext4/super.c for examples on this
+stage.
 </para>
 
 <para>
@@ -189,41 +183,41 @@
 is done by wrapping them into transactions. Additionally you
 also need to wrap the modification of each of the buffers
 with calls to the journal layer, so it knows what the modifications
-you are actually making are. To do this use  journal_start() which
+you are actually making are. To do this use jbd2_journal_start() which
 returns a transaction handle.
 </para>
 
 <para>
-journal_start()
-and its counterpart journal_stop(), which indicates the end of a transaction
-are nestable calls, so you can reenter a transaction if necessary,
-but remember you must call journal_stop() the same number of times as
-journal_start() before the transaction is completed (or more accurately
-leaves the update phase). Ext3/VFS makes use of this feature to simplify
-quota support.
+jbd2_journal_start()
+and its counterpart jbd2_journal_stop(), which indicates the end of a
+transaction are nestable calls, so you can reenter a transaction if necessary,
+but remember you must call jbd2_journal_stop() the same number of times as
+jbd2_journal_start() before the transaction is completed (or more accurately
+leaves the update phase). Ext4/VFS makes use of this feature to simplify
+handling of inode dirtying, quota support, etc.
 </para>
 
 <para>
 Inside each transaction you need to wrap the modifications to the
 individual buffers (blocks). Before you start to modify a buffer you
-need to call journal_get_{create,write,undo}_access() as appropriate,
+need to call jbd2_journal_get_{create,write,undo}_access() as appropriate,
 this allows the journalling layer to copy the unmodified data if it
 needs to. After all the buffer may be part of a previously uncommitted
 transaction.
 At this point you are at last ready to modify a buffer, and once
-you are have done so you need to call journal_dirty_{meta,}data().
+you are have done so you need to call jbd2_journal_dirty_{meta,}data().
 Or if you've asked for access to a buffer you now know is now longer
-required to be pushed back on the device you can call journal_forget()
+required to be pushed back on the device you can call jbd2_journal_forget()
 in much the same way as you might have used bforget() in the past.
 </para>
 
 <para>
-A journal_flush() may be called at any time to commit and checkpoint
+A jbd2_journal_flush() may be called at any time to commit and checkpoint
 all your transactions.
 </para>
 
 <para>
-Then at umount time , in your put_super() you can then call journal_destroy()
+Then at umount time , in your put_super() you can then call jbd2_journal_destroy()
 to clean up your in-core journal object.
 </para>
 
@@ -231,53 +225,68 @@
 Unfortunately there a couple of ways the journal layer can cause a deadlock.
 The first thing to note is that each task can only have
 a single outstanding transaction at any one time, remember nothing
-commits until the outermost journal_stop(). This means
+commits until the outermost jbd2_journal_stop(). This means
 you must complete the transaction at the end of each file/inode/address
 etc. operation you perform, so that the journalling system isn't re-entered
 on another journal. Since transactions can't be nested/batched
 across differing journals, and another filesystem other than
-yours (say ext3) may be modified in a later syscall.
+yours (say ext4) may be modified in a later syscall.
 </para>
 
 <para>
-The second case to bear in mind is that journal_start() can
+The second case to bear in mind is that jbd2_journal_start() can
 block if there isn't enough space in the journal for your transaction
 (based on the passed nblocks param) - when it blocks it merely(!) needs to
 wait for transactions to complete and be committed from other tasks,
-so essentially we are waiting for journal_stop(). So to avoid
-deadlocks you must treat journal_start/stop() as if they
+so essentially we are waiting for jbd2_journal_stop(). So to avoid
+deadlocks you must treat jbd2_journal_start/stop() as if they
 were semaphores and include them in your semaphore ordering rules to prevent
-deadlocks. Note that journal_extend() has similar blocking behaviour to
-journal_start() so you can deadlock here just as easily as on journal_start().
+deadlocks. Note that jbd2_journal_extend() has similar blocking behaviour to
+jbd2_journal_start() so you can deadlock here just as easily as on
+jbd2_journal_start().
 </para>
 
 <para>
 Try to reserve the right number of blocks the first time. ;-). This will
 be the maximum number of blocks you are going to touch in this transaction.
-I advise having a look at at least ext3_jbd.h to see the basis on which
-ext3 uses to make these decisions.
+I advise having a look at at least ext4_jbd.h to see the basis on which
+ext4 uses to make these decisions.
 </para>
 
 <para>
 Another wriggle to watch out for is your on-disk block allocation strategy.
-why? Because, if you undo a delete, you need to ensure you haven't reused any
-of the freed blocks in a later transaction. One simple way of doing this
-is make sure any blocks you allocate only have checkpointed transactions
-listed against them. Ext3 does this in ext3_test_allocatable().
+Why? Because, if you do a delete, you need to ensure you haven't reused any
+of the freed blocks until the transaction freeing these blocks commits. If you
+reused these blocks and crash happens, there is no way to restore the contents
+of the reallocated blocks at the end of the last fully committed transaction.
+
+One simple way of doing this is to mark blocks as free in internal in-memory
+block allocation structures only after the transaction freeing them commits.
+Ext4 uses journal commit callback for this purpose.
 </para>
 
 <para>
-Lock is also providing through journal_{un,}lock_updates(),
-ext3 uses this when it wants a window with a clean and stable fs for a moment.
-eg.
+With journal commit callbacks you can ask the journalling layer to call a
+callback function when the transaction is finally committed to disk, so that
+you can do some of your own management. You ask the journalling layer for
+calling the callback by simply setting journal->j_commit_callback function
+pointer and that function is called after each transaction commit. You can also
+use transaction->t_private_list for attaching entries to a transaction that
+need processing when the transaction commits.
+</para>
+
+<para>
+JBD2 also provides a way to block all transaction updates via
+jbd2_journal_{un,}lock_updates(). Ext4 uses this when it wants a window with a
+clean and stable fs for a moment.  E.g.
 </para>
 
 <programlisting>
 
-	journal_lock_updates() //stop new stuff happening..
-	journal_flush()        // checkpoint everything.
+	jbd2_journal_lock_updates() //stop new stuff happening..
+	jbd2_journal_flush()        // checkpoint everything.
 	..do stuff on stable fs
-	journal_unlock_updates() // carry on with filesystem use.
+	jbd2_journal_unlock_updates() // carry on with filesystem use.
 </programlisting>
 
 <para>
@@ -286,29 +295,6 @@
 calls.
 </para>
 
-<para>
-A new feature of jbd since 2.5.25 is commit callbacks with the new
-journal_callback_set() function you can now ask the journalling layer
-to call you back when the transaction is finally committed to disk, so that
-you can do some of your own management. The key to this is the journal_callback
-struct, this maintains the internal callback information but you can
-extend it like this:-
-</para>
-<programlisting>
-	struct  myfs_callback_s {
-		//Data structure element required by jbd..
-		struct journal_callback for_jbd;
-		// Stuff for myfs allocated together.
-		myfs_inode*    i_commited;
-
-	}
-</programlisting>
-
-<para>
-this would be useful if you needed to know when data was committed to a
-particular inode.
-</para>
-
     </sect2>
 
     <sect2 id="jbd_summary">
@@ -319,36 +305,6 @@
 to tell the journalling layer about them.
 </para>
 
-<para>
-Here is a some pseudo code to give you an idea of how it works, as
-an example.
-</para>
-
-<programlisting>
-  journal_t* my_jnrl = journal_create();
-  journal_init_{dev,inode}(jnrl,...)
-  if (clean) journal_wipe();
-  journal_load();
-
-   foreach(transaction) { /*transactions must be
-                            completed before
-                            a syscall returns to
-                            userspace*/
-
-          handle_t * xct=journal_start(my_jnrl);
-          foreach(bh) {
-                journal_get_{create,write,undo}_access(xact,bh);
-                if ( myfs_modify(bh) ) { /* returns true
-                                        if makes changes */
-                           journal_dirty_{meta,}data(xact,bh);
-                } else {
-                           journal_forget(bh);
-                }
-          }
-          journal_stop(xct);
-   }
-   journal_destroy(my_jrnl);
-</programlisting>
     </sect2>
 
     </sect1>
@@ -357,13 +313,13 @@
      <title>Data Types</title>
      <para>
 	The journalling layer uses typedefs to 'hide' the concrete definitions
-	of the structures used. As a client of the JBD layer you can
+	of the structures used. As a client of the JBD2 layer you can
 	just rely on the using the pointer as a magic cookie  of some sort.
 
 	Obviously the hiding is not enforced as this is 'C'.
      </para>
 	<sect2 id="structures"><title>Structures</title>
-!Iinclude/linux/jbd.h
+!Iinclude/linux/jbd2.h
 	</sect2>
     </sect1>
 
@@ -375,11 +331,11 @@
 	manage transactions
      </para>
 	<sect2 id="journal_level"><title>Journal Level</title>
-!Efs/jbd/journal.c
-!Ifs/jbd/recovery.c
+!Efs/jbd2/journal.c
+!Ifs/jbd2/recovery.c
 	</sect2>
 	<sect2 id="transaction_level"><title>Transasction Level</title>
-!Efs/jbd/transaction.c
+!Efs/jbd2/transaction.c
 	</sect2>
     </sect1>
     <sect1 id="see_also">
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
index 23996f8..08527e7 100644
--- a/Documentation/DocBook/media/Makefile
+++ b/Documentation/DocBook/media/Makefile
@@ -199,7 +199,8 @@
 #
 
 install_media_images = \
-	$(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/*.svg $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+	$(Q)-mkdir $(MEDIA_OBJ_DIR)/media_api; \
+	cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/*.svg $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
 
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
 	$(Q)base64 -d $< >$@
diff --git a/Documentation/DocBook/media/dvb/intro.xml b/Documentation/DocBook/media/dvb/intro.xml
index bcc72c2..51db156 100644
--- a/Documentation/DocBook/media/dvb/intro.xml
+++ b/Documentation/DocBook/media/dvb/intro.xml
@@ -163,9 +163,8 @@
 <para>where N enumerates the DVB PCI cards in a system starting
 from&#x00A0;0, and M enumerates the devices of each type within each
 adapter, starting from&#x00A0;0, too. We will omit the &#8220;
-<constant>/dev/dvb/adapterN/</constant>&#8221; in the further dicussion
-of these devices. The naming scheme for the devices is the same wheter
-devfs is used or not.</para>
+<constant>/dev/dvb/adapterN/</constant>&#8221; in the further discussion
+of these devices.</para>
 
 <para>More details about the data structures and function calls of all
 the devices are described in the following chapters.</para>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index 6e1667b..33aece5 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -3414,7 +3414,7 @@
 		<row>
 		  <entry><constant>V4L2_EXPOSURE_METERING_MATRIX</constant>&nbsp;</entry>
 		  <entry>A multi-zone metering. The light intensity is measured
-in several points of the frame and the the results are combined. The
+in several points of the frame and the results are combined. The
 algorithm of the zones selection and their significance in calculating the
 final value is device dependent.</entry>
 		</row>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-device-info.xml b/Documentation/DocBook/media/v4l/media-ioc-device-info.xml
index 2ce5214..b0a21ac 100644
--- a/Documentation/DocBook/media/v4l/media-ioc-device-info.xml
+++ b/Documentation/DocBook/media/v4l/media-ioc-device-info.xml
@@ -102,7 +102,7 @@
 	  </row>
 	  <row>
 	    <entry>__u32</entry>
-	    <entry><structfield>media_version</structfield></entry>
+	    <entry><structfield>driver_version</structfield></entry>
 	    <entry>Media device driver version, formatted with the
 	    <constant>KERNEL_VERSION()</constant> macro. Together with the
 	    <structfield>driver</structfield> field this identifies a particular
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
index a78c920..0ae0b6a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -62,28 +62,28 @@
 &VIDIOC-REQBUFS; ioctl.</para>
 
 <para> To export a buffer, applications fill &v4l2-exportbuffer;.  The
-<structfield> type </structfield> field is set to the same buffer type as was
-previously used with  &v4l2-requestbuffers;<structfield> type </structfield>.
-Applications must also set the <structfield> index </structfield> field. Valid
+<structfield>type</structfield> field is set to the same buffer type as was
+previously used with &v4l2-requestbuffers; <structfield>type</structfield>.
+Applications must also set the <structfield>index</structfield> field. Valid
 index numbers range from zero to the number of buffers allocated with
-&VIDIOC-REQBUFS; (&v4l2-requestbuffers;<structfield> count </structfield>)
-minus one.  For the multi-planar API, applications set the <structfield> plane
-</structfield> field to the index of the plane to be exported. Valid planes
+&VIDIOC-REQBUFS; (&v4l2-requestbuffers; <structfield>count</structfield>)
+minus one.  For the multi-planar API, applications set the <structfield>plane</structfield>
+field to the index of the plane to be exported. Valid planes
 range from zero to the maximal number of valid planes for the currently active
-format. For the single-planar API, applications must set <structfield> plane
-</structfield> to zero.  Additional flags may be posted in the <structfield>
-flags </structfield> field.  Refer to a manual for open() for details.
+format. For the single-planar API, applications must set <structfield>plane</structfield>
+to zero.  Additional flags may be posted in the <structfield>flags</structfield>
+field.  Refer to a manual for open() for details.
 Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported.  All
 other fields must be set to zero.
 In the case of multi-planar API, every plane is exported separately using
-multiple <constant> VIDIOC_EXPBUF </constant> calls. </para>
+multiple <constant>VIDIOC_EXPBUF</constant> calls.</para>
 
-<para> After calling <constant>VIDIOC_EXPBUF</constant> the <structfield> fd
-</structfield> field will be set by a driver.  This is a DMABUF file
+<para>After calling <constant>VIDIOC_EXPBUF</constant> the <structfield>fd</structfield>
+field will be set by a driver.  This is a DMABUF file
 descriptor. The application may pass it to other DMABUF-aware devices. Refer to
 <link linkend="dmabuf">DMABUF importing</link> for details about importing
 DMABUF files into V4L2 nodes. It is recommended to close a DMABUF file when it
-is no longer used to allow the associated memory to be reclaimed. </para>
+is no longer used to allow the associated memory to be reclaimed.</para>
   </refsect1>
 
   <refsect1>
@@ -170,9 +170,9 @@
 	  <row>
 	    <entry>__u32</entry>
 	    <entry><structfield>flags</structfield></entry>
-	    <entry>Flags for the newly created file, currently only <constant>
-O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY
-</constant>, and <constant>O_RDWR</constant> are supported, refer to the manual
+	    <entry>Flags for the newly created file, currently only
+<constant>O_CLOEXEC</constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY</constant>,
+and <constant>O_RDWR</constant> are supported, refer to the manual
 of open() for more details.</entry>
 	  </row>
 	  <row>
@@ -200,9 +200,9 @@
 	<term><errorcode>EINVAL</errorcode></term>
 	<listitem>
 	  <para>A queue is not in MMAP mode or DMABUF exporting is not
-supported or <structfield> flags </structfield> or <structfield> type
-</structfield> or <structfield> index </structfield> or <structfield> plane
-</structfield> fields are invalid.</para>
+supported or <structfield>flags</structfield> or <structfield>type</structfield>
+or <structfield>index</structfield> or <structfield>plane</structfield> fields
+are invalid.</para>
 	</listitem>
       </varlistentry>
     </variablelist>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-parm.xml b/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
index f4e28e7..7217287 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
@@ -267,7 +267,7 @@
 best possible image quality that the hardware can deliver. It is not
 defined how the driver writer may achieve that; it will depend on the
 hardware and the ingenuity of the driver writer. High quality mode is
-a different mode from the the regular motion video capture modes. In
+a different mode from the regular motion video capture modes. In
 high quality mode:<itemizedlist>
 		  <listitem>
 		    <para>The driver may be able to capture higher
diff --git a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
index dc83ad7..6ec39c6 100644
--- a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
@@ -616,7 +616,7 @@
 	    <entry><constant>V4L2_CTRL_FLAG_EXECUTE_ON_WRITE</constant></entry>
 	    <entry>0x0200</entry>
 	    <entry>The value provided to the control will be propagated to the driver
-even if remains constant. This is required when the control represents an action
+even if it remains constant. This is required when the control represents an action
 on the hardware. For example: clearing an error flag or triggering the flash. All the
 controls of the type <constant>V4L2_CTRL_TYPE_BUTTON</constant> have this flag set.</entry>
 	  </row>
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 1690350..7d9d3c2 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -81,7 +81,7 @@
   u64 res3	= 0;		/* reserved */
   u64 res4	= 0;		/* reserved */
   u32 magic	= 0x644d5241;	/* Magic number, little endian, "ARM\x64" */
-  u32 res5;      		/* reserved (used for PE COFF offset) */
+  u32 res5;			/* reserved (used for PE COFF offset) */
 
 
 Header notes:
@@ -103,7 +103,7 @@
 
 - The flags field (introduced in v3.17) is a little-endian 64-bit field
   composed as follows:
-  Bit 0: 	Kernel endianness.  1 if BE, 0 if LE.
+  Bit 0:	Kernel endianness.  1 if BE, 0 if LE.
   Bits 1-63:	Reserved.
 
 - When image_size is zero, a bootloader should attempt to keep as much
@@ -115,11 +115,14 @@
 address near the start of usable system RAM and called there. Memory
 below that base address is currently unusable by Linux, and therefore it
 is strongly recommended that this location is the start of system RAM.
+The region between the 2 MB aligned base address and the start of the
+image has no special significance to the kernel, and may be used for
+other purposes.
 At least image_size bytes from the start of the image must be free for
 use by the kernel.
 
-Any memory described to the kernel (even that below the 2MB aligned base
-address) which is not marked as reserved from the kernel e.g. with a
+Any memory described to the kernel (even that below the start of the
+image) which is not marked as reserved from the kernel (e.g., with a
 memreserve region in the device tree) will be considered as available to
 the kernel.
 
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index dab6da3..b19fc34 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -266,7 +266,9 @@
 atomic_cmpxchg will only satisfy its atomicity semantics as long as all
 other accesses of *v are performed through atomic_xxx operations.
 
-atomic_cmpxchg must provide explicit memory barriers around the operation.
+atomic_cmpxchg must provide explicit memory barriers around the operation,
+although if the comparison fails then no memory ordering guarantees are
+required.
 
 The semantics for atomic_cmpxchg are the same as those defined for 'cas'
 below.
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index c4de576..62435bb 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -144,7 +144,8 @@
                         store compressed data
 mem_limit         RW    the maximum amount of memory ZRAM can use to store
                         the compressed data
-num_migrated      RO    the number of objects migrated migrated by compaction
+pages_compacted   RO    the number of pages freed during compaction
+                        (available only via zram<id>/mm_stat node)
 compact           WO    trigger memory compaction
 
 WARNING
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 2251dcc..06c88a4 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -67,6 +67,12 @@
   disable if zero.
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
   0-7, 15, 23, and 31.
+- arm,shared-override : The default behavior of the pl310 cache controller with
+  respect to the shareable attribute is to transform "normal memory
+  non-cacheable transactions" into "cacheable no allocate" (for reads) or
+  "write through no write allocate" (for writes).
+  On systems where this may cause DMA buffer corruption, this property must be
+  specified to indicate that such transforms are precluded.
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
   (forcibly enable), property absent (retain settings set by firmware)
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 3b5f5d1..435251f 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -26,13 +26,19 @@
 
 Optional properties:
 
-- interrupt-affinity : Valid only when using SPIs, specifies a list of phandles
-                       to CPU nodes corresponding directly to the affinity of
+- interrupt-affinity : When using SPIs, specifies a list of phandles to CPU
+                       nodes corresponding directly to the affinity of
 		       the SPIs listed in the interrupts property.
 
-		       This property should be present when there is more than
+                       When using a PPI, specifies a list of phandles to CPU
+		       nodes corresponding to the set of CPUs which have
+		       a PMU of this type signalling the PPI listed in the
+		       interrupts property.
+
+                       This property should be present when there is more than
 		       a single SPI.
 
+
 - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
                      events.
 
diff --git a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
new file mode 100644
index 0000000..47cb1d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
@@ -0,0 +1,61 @@
+Analog Device AXI-DMAC DMA controller
+
+Required properties:
+ - compatible: Must be "adi,axi-dmac-1.00.a".
+ - reg: Specification for the controllers memory mapped register map.
+ - interrupts: Specification for the controllers interrupt.
+ - clocks: Phandle and specifier to the controllers AXI interface clock
+ - #dma-cells: Must be 1.
+
+Required sub-nodes:
+ - adi,channels: This sub-node must contain a sub-node for each DMA channel. For
+   the channel sub-nodes the following bindings apply. They must match the
+   configuration options of the peripheral as it was instantiated.
+
+Required properties for adi,channels sub-node:
+ - #size-cells: Must be 0
+ - #address-cells: Must be 1
+
+Required channel sub-node properties:
+ - reg: Which channel this node refers to.
+ - adi,length-width: Width of the DMA transfer length register.
+ - adi,source-bus-width,
+   adi,destination-bus-width: Width of the source or destination bus in bits.
+ - adi,source-bus-type,
+   adi,destination-bus-type: Type of the source or destination bus. Must be one
+   of the following:
+	0 (AXI_DMAC_TYPE_AXI_MM): Memory mapped AXI interface
+	1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
+	2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface
+
+Optional channel properties:
+ - adi,cyclic: Must be set if the channel supports hardware cyclic DMA
+   transfers.
+ - adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
+
+DMA clients connected to the AXI-DMAC DMA controller must use the format
+described in the dma.txt file using a one-cell specifier. The value of the
+specifier refers to the DMA channel index.
+
+Example:
+
+dma: dma@7c420000 {
+	compatible = "adi,axi-dmac-1.00.a";
+	reg = <0x7c420000 0x10000>;
+	interrupts = <0 57 0>;
+	clocks = <&clkc 16>;
+	#dma-cells = <1>;
+
+	adi,channels {
+		#size-cells = <0>;
+		#address-cells = <1>;
+
+		dma-channel@0 {
+			reg = <0>;
+			adi,source-bus-width = <32>;
+			adi,source-bus-type = <ADI_AXI_DMAC_TYPE_MM_AXI>;
+			adi,destination-bus-width = <64>;
+			adi,destination-bus-type = <ADI_AXI_DMAC_TYPE_FIFO>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.txt b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
new file mode 100644
index 0000000..8a0097a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
@@ -0,0 +1,54 @@
+* ARM PrimeCells PL080 and PL081 and derivatives DMA controller
+
+Required properties:
+- compatible: "arm,pl080", "arm,primecell";
+	      "arm,pl081", "arm,primecell";
+- reg: Address range of the PL08x registers
+- interrupt: The PL08x interrupt number
+- clocks: The clock running the IP core clock
+- clock-names: Must contain "apb_pclk"
+- lli-bus-interface-ahb1: if AHB master 1 is eligible for fetching LLIs
+- lli-bus-interface-ahb2: if AHB master 2 is eligible for fetching LLIs
+- mem-bus-interface-ahb1: if AHB master 1 is eligible for fetching memory contents
+- mem-bus-interface-ahb2: if AHB master 2 is eligible for fetching memory contents
+- #dma-cells: must be <2>. First cell should contain the DMA request,
+              second cell should contain either 1 or 2 depending on
+              which AHB master that is used.
+
+Optional properties:
+- dma-channels: contains the total number of DMA channels supported by the DMAC
+- dma-requests: contains the total number of DMA requests supported by the DMAC
+- memcpy-burst-size: the size of the bursts for memcpy: 1, 4, 8, 16, 32
+  64, 128 or 256 bytes are legal values
+- memcpy-bus-width: the bus width used for memcpy: 8, 16 or 32 are legal
+  values
+
+Clients
+Required properties:
+- dmas: List of DMA controller phandle, request channel and AHB master id
+- dma-names: Names of the aforementioned requested channels
+
+Example:
+
+dmac0: dma-controller@10130000 {
+	compatible = "arm,pl080", "arm,primecell";
+	reg = <0x10130000 0x1000>;
+	interrupt-parent = <&vica>;
+	interrupts = <15>;
+	clocks = <&hclkdma0>;
+	clock-names = "apb_pclk";
+	lli-bus-interface-ahb1;
+	lli-bus-interface-ahb2;
+	mem-bus-interface-ahb2;
+	memcpy-burst-size = <256>;
+	memcpy-bus-width = <32>;
+	#dma-cells = <2>;
+};
+
+device@40008000 {
+	...
+	dmas = <&dmac0 0 2
+		&dmac0 1 2>;
+	dma-names = "tx", "rx";
+	...
+};
diff --git a/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt b/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
new file mode 100644
index 0000000..87740ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
@@ -0,0 +1,54 @@
+NXP LPC18xx/43xx DMA MUX (DMA request router)
+
+Required properties:
+- compatible:	"nxp,lpc1850-dmamux"
+- reg:		Memory map for accessing module
+- #dma-cells:	Should be set to <3>.
+		* 1st cell contain the master dma request signal
+		* 2nd cell contain the mux value (0-3) for the peripheral
+		* 3rd cell contain either 1 or 2 depending on the AHB
+		  master used.
+- dma-requests:	Number of DMA requests for the mux
+- dma-masters:	phandle pointing to the DMA controller
+
+The DMA controller node need to have the following poroperties:
+- dma-requests:	Number of DMA requests the controller can handle
+
+Example:
+
+dmac: dma@40002000 {
+	compatible = "nxp,lpc1850-gpdma", "arm,pl080", "arm,primecell";
+	arm,primecell-periphid = <0x00041080>;
+	reg = <0x40002000 0x1000>;
+	interrupts = <2>;
+	clocks = <&ccu1 CLK_CPU_DMA>;
+	clock-names = "apb_pclk";
+	#dma-cells = <2>;
+	dma-channels = <8>;
+	dma-requests = <16>;
+	lli-bus-interface-ahb1;
+	lli-bus-interface-ahb2;
+	mem-bus-interface-ahb1;
+	mem-bus-interface-ahb2;
+	memcpy-burst-size = <256>;
+	memcpy-bus-width = <32>;
+};
+
+dmamux: dma-mux {
+	compatible = "nxp,lpc1850-dmamux";
+	#dma-cells = <3>;
+	dma-requests = <64>;
+	dma-masters = <&dmac>;
+};
+
+uart0: serial@40081000 {
+	compatible = "nxp,lpc1850-uart", "ns16550a";
+	reg = <0x40081000 0x1000>;
+	reg-shift = <2>;
+	interrupts = <24>;
+	clocks = <&ccu2 CLK_APB0_UART0>, <&ccu1 CLK_CPU_UART0>;
+	clock-names = "uartclk", "reg";
+	dmas = <&dmamux 1 1 2
+		&dmamux 2 1 2>;
+	dma-names = "tx", "rx";
+};
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
index cc29c35..276ef81 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor.txt
@@ -12,10 +12,13 @@
 properties:
 - interrupts: interrupt of the XOR channel
 
-And the following optional properties:
+The sub-nodes used to contain one or several of the following
+properties, but they are now deprecated:
 - dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
 - dmacap,memset to indicate that the XOR channel is capable of memset operations
 - dmacap,xor to indicate that the XOR channel is capable of xor operations
+- dmacap,interrupt to indicate that the XOR channel is capable of
+  generating interrupts
 
 Example:
 
@@ -28,13 +31,8 @@
 
 	xor00 {
 	      interrupts = <51>;
-	      dmacap,memcpy;
-	      dmacap,xor;
 	};
 	xor01 {
 	      interrupts = <52>;
-	      dmacap,memcpy;
-	      dmacap,xor;
-	      dmacap,memset;
 	};
 };
diff --git a/Documentation/devicetree/bindings/dma/sun4i-dma.txt b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
new file mode 100644
index 0000000..f1634a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
@@ -0,0 +1,46 @@
+Allwinner A10 DMA Controller
+
+This driver follows the generic DMA bindings defined in dma.txt.
+
+Required properties:
+
+- compatible:	Must be "allwinner,sun4i-a10-dma"
+- reg:		Should contain the registers base address and length
+- interrupts:	Should contain a reference to the interrupt used by this device
+- clocks:	Should contain a reference to the parent AHB clock
+- #dma-cells :	Should be 2, first cell denoting normal or dedicated dma,
+		second cell holding the request line number.
+
+Example:
+	dma: dma-controller@01c02000 {
+		compatible = "allwinner,sun4i-a10-dma";
+		reg = <0x01c02000 0x1000>;
+		interrupts = <27>;
+		clocks = <&ahb_gates 6>;
+		#dma-cells = <2>;
+	};
+
+Clients:
+
+DMA clients connected to the Allwinner A10 DMA controller must use the
+format described in the dma.txt file, using a three-cell specifier for
+each channel: a phandle plus two integer cells.
+The three cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Whether it is using normal (0) or dedicated (1) channels
+3. The port ID as specified in the datasheet
+
+Example:
+	spi2: spi@01c17000 {
+		compatible = "allwinner,sun4i-a10-spi";
+		reg = <0x01c17000 0x1000>;
+		interrupts = <0 12 4>;
+		clocks = <&ahb_gates 22>, <&spi2_clk>;
+		clock-names = "ahb", "mod";
+		dmas = <&dma 1 29>, <&dma 1 28>;
+		dma-names = "rx", "tx";
+		status = "disabled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/dma/zxdma.txt b/Documentation/devicetree/bindings/dma/zxdma.txt
new file mode 100644
index 0000000..3207ceb
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/zxdma.txt
@@ -0,0 +1,38 @@
+* ZTE ZX296702 DMA controller
+
+Required properties:
+- compatible: Should be "zte,zx296702-dma"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel
+- #dma-cells: see dma.txt, should be 1, para number
+- dma-channels: physical channels supported
+- dma-requests: virtual channels supported, each virtual channel
+		have specific request line
+- clocks: clock required
+
+Example:
+
+Controller:
+	dma: dma-controller@0x09c00000{
+		compatible = "zte,zx296702-dma";
+		reg = <0x09c00000 0x1000>;
+		clocks = <&topclk ZX296702_DMA_ACLK>;
+		interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+		#dma-cells = <1>;
+		dma-channels = <24>;
+		dma-requests = <24>;
+	};
+
+Client:
+Use specific request line passing from dmax
+For example, spdif0 tx channel request line is 4
+	spdif0: spdif0@0b004000 {
+		#sound-dai-cells = <0>;
+		compatible = "zte,zx296702-spdif";
+		reg = <0x0b004000 0x1000>;
+		clocks = <&lsp0clk ZX296702_SPDIF0_DIV>;
+		clock-names = "tx";
+		interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+		dmas = <&dma 4>;
+		dma-names = "tx";
+	}
diff --git a/Documentation/devicetree/bindings/drm/msm/dsi.txt b/Documentation/devicetree/bindings/drm/msm/dsi.txt
index cd8fe6c..d56923c 100644
--- a/Documentation/devicetree/bindings/drm/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/dsi.txt
@@ -30,20 +30,27 @@
 - panel@0: Node of panel connected to this DSI controller.
   See files in Documentation/devicetree/bindings/panel/ for each supported
   panel.
-- qcom,dual-panel-mode: Boolean value indicating if the DSI controller is
+- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
   driving a panel which needs 2 DSI links.
-- qcom,master-panel: Boolean value indicating if the DSI controller is driving
+- qcom,master-dsi: Boolean value indicating if the DSI controller is driving
   the master link of the 2-DSI panel.
-- qcom,sync-dual-panel: Boolean value indicating if the DSI controller is
+- qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
   driving a 2-DSI panel whose 2 links need receive command simultaneously.
 - interrupt-parent: phandle to the MDP block if the interrupt signal is routed
   through MDP block
+- pinctrl-names: the pin control state names; should contain "default"
+- pinctrl-0: the default pinctrl state (active)
+- pinctrl-n: the "sleep" pinctrl state
+- port: DSI controller output port. This contains one endpoint subnode, with its
+  remote-endpoint set to the phandle of the connected panel's endpoint.
+  See Documentation/devicetree/bindings/graph.txt for device graph info.
 
 DSI PHY:
 Required properties:
 - compatible: Could be the following
   * "qcom,dsi-phy-28nm-hpm"
   * "qcom,dsi-phy-28nm-lp"
+  * "qcom,dsi-phy-20nm"
 - reg: Physical base address and length of the registers of PLL, PHY and PHY
   regulator
 - reg-names: The names of register regions. The following regions are required:
@@ -59,6 +66,10 @@
   * "iface_clk"
 - vddio-supply: phandle to vdd-io regulator device node
 
+Optional properties:
+- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
+  regulator is wanted.
+
 Example:
 	mdss_dsi0: qcom,mdss_dsi@fd922800 {
 		compatible = "qcom,mdss-dsi-ctrl";
@@ -90,9 +101,13 @@
 
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 
-		qcom,dual-panel-mode;
-		qcom,master-panel;
-		qcom,sync-dual-panel;
+		qcom,dual-dsi-mode;
+		qcom,master-dsi;
+		qcom,sync-dual-dsi;
+
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&mdss_dsi_active>;
+		pinctrl-1 = <&mdss_dsi_suspend>;
 
 		panel: panel@0 {
 			compatible = "sharp,lq101r1sx01";
@@ -101,6 +116,18 @@
 
 			power-supply = <...>;
 			backlight = <...>;
+
+			port {
+				panel_in: endpoint {
+					remote-endpoint = <&dsi0_out>;
+				};
+			};
+		};
+
+		port {
+			dsi0_out: endpoint {
+				remote-endpoint = <&panel_in>;
+			};
 		};
 	};
 
@@ -117,4 +144,6 @@
 		clock-names = "iface_clk";
 		clocks = <&mmcc MDSS_AHB_CLK>;
 		vddio-supply = <&pma8084_l12>;
+
+		qcom,dsi-phy-regulator-ldo-mode;
 	};
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi.txt b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
index c43aa53..e926239 100644
--- a/Documentation/devicetree/bindings/drm/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
@@ -2,8 +2,9 @@
 
 Required properties:
 - compatible: one of the following
+   * "qcom,hdmi-tx-8994"
    * "qcom,hdmi-tx-8084"
-   * "qcom,hdmi-tx-8074"
+   * "qcom,hdmi-tx-8974"
    * "qcom,hdmi-tx-8660"
    * "qcom,hdmi-tx-8960"
 - reg: Physical base address and length of the controller's registers
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
index 435f1bc..b405b44 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
@@ -33,6 +33,13 @@
 - interrupt-parent:
     phandle of the parent interrupt controller
 
+- interrupts-extended:
+    Alternate form of specifying interrupts and parents that allows for
+    multiple parents.  This takes precedence over 'interrupts' and
+    'interrupt-parent'.  Wakeup-capable GPIO controllers often route their
+    wakeup interrupt lines through a different interrupt controller than the
+    primary interrupt line, making this property necessary.
+
 - #interrupt-cells:
     Should be <2>.  The first cell is the GPIO number, the second should specify
     flags.  The following subset of flags is supported:
@@ -47,19 +54,33 @@
 - interrupt-controller:
     Marks the device node as an interrupt controller
 
-- interrupt-names:
-    The name of the IRQ resource used by this controller
+- wakeup-source:
+    GPIOs for this controller can be used as a wakeup source
 
 Example:
 	upg_gio: gpio@f040a700 {
-		#gpio-cells = <0x2>;
-		#interrupt-cells = <0x2>;
+		#gpio-cells = <2>;
+		#interrupt-cells = <2>;
 		compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
 		gpio-controller;
 		interrupt-controller;
 		reg = <0xf040a700 0x80>;
-		interrupt-parent = <0xf>;
+		interrupt-parent = <&irq0_intc>;
 		interrupts = <0x6>;
-		interrupt-names = "upg_gio";
-		brcm,gpio-bank-widths = <0x20 0x20 0x20 0x18>;
+		brcm,gpio-bank-widths = <32 32 32 24>;
+	};
+
+	upg_gio_aon: gpio@f04172c0 {
+		#gpio-cells = <2>;
+		#interrupt-cells = <2>;
+		compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+		gpio-controller;
+		interrupt-controller;
+		reg = <0xf04172c0 0x40>;
+		interrupt-parent = <&irq0_aon_intc>;
+		interrupts = <0x6>;
+		interrupts-extended = <&irq0_aon_intc 0x6>,
+			<&aon_pm_l2_intc 0x5>;
+		wakeup-source;
+		brcm,gpio-bank-widths = <18 4>;
 	};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt b/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
index abf4db7..170194a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
@@ -2,8 +2,9 @@
 
 Required properties:
 
-- compatible:
+- compatible: one of:
   - "axis,etraxfs-gio"
+  - "axis,artpec3-gio"
 - reg: Physical base address and length of the controller's registers.
 - #gpio-cells: Should be 3
   - The first cell is the gpio offset number.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
new file mode 100644
index 0000000..805ddcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
@@ -0,0 +1,22 @@
+* Freescale MPC512x/MPC8xxx GPIO controller
+
+Required properties:
+- compatible : Should be "fsl,<soc>-gpio"
+  The following <soc>s are known to be supported:
+    mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq
+- reg : Address and length of the register set for the device
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify the gpio polarity:
+      0 = active high
+      1 = active low
+
+Example:
+
+gpio0: gpio@1100 {
+	compatible = "fsl,mpc5125-gpio";
+	#gpio-cells = <2>;
+	reg = <0x1100 0x080>;
+	interrupts = <78 0x8>;
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 38fb86f..f60e2f4 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -9,6 +9,7 @@
     - "renesas,gpio-r8a7791": for R8A7791 (R-Car M2-W) compatible GPIO controller.
     - "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
     - "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
+    - "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
     - "renesas,gpio-rcar": for generic R-Car GPIO controller.
 
   - reg: Base address and length of each memory resource used by the GPIO
diff --git a/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt b/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
new file mode 100644
index 0000000..0dab156
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
@@ -0,0 +1,24 @@
+ZTE ZX296702 GPIO controller
+
+Required properties:
+- compatible : "zte,zx296702-gpio"
+- #gpio-cells : Should be two. The first cell is the pin number and the
+  second cell is used to specify optional parameters:
+  - bit 0 specifies polarity (0 for normal, 1 for inverted)
+- gpio-controller : Marks the device node as a GPIO controller.
+- interrupts : Interrupt mapping for GPIO IRQ.
+- gpio-ranges : Interaction with the PINCTRL subsystem.
+
+gpio1: gpio@b008040 {
+	compatible = "zte,zx296702-gpio";
+	reg = <0xb008040 0x40>;
+	gpio-controller;
+	#gpio-cells = <2>;
+	gpio-ranges = < &pmx0 0 54 2 &pmx0 2 59 14>;
+	interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-parent = <&intc>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+	clock-names = "gpio_pclk";
+	clocks = <&lsp0clk ZX296702_GPIO_CLK>;
+};
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index 009f4bf..e685610 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -197,9 +197,11 @@
 - sor: serial output resource
 
   Required properties:
-  - compatible: For Tegra124, must contain "nvidia,tegra124-sor".  Otherwise,
-    must contain '"nvidia,<chip>-sor", "nvidia,tegra124-sor"', where <chip>
-    is tegra132.
+  - compatible: Should be:
+    - "nvidia,tegra124-sor": for Tegra124 and Tegra132
+    - "nvidia,tegra132-sor": for Tegra132
+    - "nvidia,tegra210-sor": for Tegra210
+    - "nvidia,tegra210-sor1": for Tegra210
   - reg: Physical base address and length of the controller's registers.
   - interrupts: The interrupt outputs from the controller.
   - clocks: Must contain an entry for each entry in clock-names.
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 6b1d75f..a36dfce 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -52,10 +52,9 @@
     See ../reset/reset.txt for details.
   - reset-names: names of the resets listed in resets property in the same
     order.
-  - ranges: to allow probing of subdevices
 
 - sti-hdmi: hdmi output block
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   Required properties:
   - compatible: "st,stih<chip>-hdmi";
   - reg: Physical base address of the IP registers and length of memory mapped region.
@@ -72,7 +71,7 @@
 
 sti-hda:
   Required properties:
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   - compatible: "st,stih<chip>-hda"
   - reg: Physical base address of the IP registers and length of memory mapped region.
   - reg-names: names of the mapped memory regions listed in regs property in
@@ -85,7 +84,7 @@
 
 sti-dvo:
   Required properties:
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   - compatible: "st,stih<chip>-dvo"
   - reg: Physical base address of the IP registers and length of memory mapped region.
   - reg-names: names of the mapped memory regions listed in regs property in
@@ -195,38 +194,37 @@
 			reg-names	= "tvout-reg", "hda-reg", "syscfg";
 			reset-names     = "tvout";
 			resets          = <&softreset STIH416_HDTVOUT_SOFTRESET>;
-			ranges;
+		};
 
-			sti-hdmi@fe85c000 {
-				compatible	= "st,stih416-hdmi";
-				reg		= <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
-				reg-names	= "hdmi-reg", "syscfg";
-				interrupts	= <GIC_SPI 173 IRQ_TYPE_NONE>;
-				interrupt-names	= "irq";
-				clock-names	= "pix", "tmds", "phy", "audio";
-				clocks          = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
-			};
+		sti-hdmi@fe85c000 {
+			compatible	= "st,stih416-hdmi";
+			reg		= <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
+			reg-names	= "hdmi-reg", "syscfg";
+			interrupts	= <GIC_SPI 173 IRQ_TYPE_NONE>;
+			interrupt-names	= "irq";
+			clock-names	= "pix", "tmds", "phy", "audio";
+			clocks          = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
+		};
 
-			sti-hda@fe85a000 {
-				compatible	= "st,stih416-hda";
-				reg		= <0xfe85a000 0x400>, <0xfe83085c 0x4>;
-				reg-names	= "hda-reg", "video-dacs-ctrl";
-				clock-names	= "pix", "hddac";
-				clocks          = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
-			};
+		sti-hda@fe85a000 {
+			compatible	= "st,stih416-hda";
+			reg		= <0xfe85a000 0x400>, <0xfe83085c 0x4>;
+			reg-names	= "hda-reg", "video-dacs-ctrl";
+			clock-names	= "pix", "hddac";
+			clocks          = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
+		};
 
-			sti-dvo@8d00400 {
-				compatible	= "st,stih407-dvo";
-				reg		= <0x8d00400 0x200>;
-				reg-names	= "dvo-reg";
-				clock-names	= "dvo_pix", "dvo",
-						  "main_parent", "aux_parent";
-				clocks		= <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
-						  <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
-				pinctrl-names	= "default";
-				pinctrl-0	= <&pinctrl_dvo>;
-				sti,panel	= <&panel_dvo>;
-			};
+		sti-dvo@8d00400 {
+			compatible	= "st,stih407-dvo";
+			reg		= <0x8d00400 0x200>;
+			reg-names	= "dvo-reg";
+			clock-names	= "dvo_pix", "dvo",
+					  "main_parent", "aux_parent";
+			clocks		= <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
+					  <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
+			pinctrl-names	= "default";
+			pinctrl-0	= <&pinctrl_dvo>;
+			sti,panel	= <&panel_dvo>;
 		};
 
 		sti-hqvdp@9c000000 {
@@ -237,7 +235,7 @@
 				reset-names     = "hqvdp";
 				resets          = <&softreset STIH407_HDQVDP_SOFTRESET>;
 				st,vtg		= <&vtg_main>;
-			};
+		};
 	};
 	...
 };
diff --git a/Documentation/devicetree/bindings/i2c/ina209.txt b/Documentation/devicetree/bindings/hwmon/ina209.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/ina209.txt
rename to Documentation/devicetree/bindings/hwmon/ina209.txt
diff --git a/Documentation/devicetree/bindings/i2c/ina2xx.txt b/Documentation/devicetree/bindings/hwmon/ina2xx.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/ina2xx.txt
rename to Documentation/devicetree/bindings/hwmon/ina2xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/max6697.txt b/Documentation/devicetree/bindings/hwmon/max6697.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/max6697.txt
rename to Documentation/devicetree/bindings/hwmon/max6697.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-cadence.txt b/Documentation/devicetree/bindings/i2c/i2c-cadence.txt
index 7cb0b56..ebaa90c 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-cadence.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-cadence.txt
@@ -2,7 +2,11 @@
 
 Required properties:
   - reg: Physical base address and size of the controller's register area.
-  - compatible: Compatibility string. Must be 'cdns,i2c-r1p10'.
+  - compatible: Should contain one of:
+		* "cdns,i2c-r1p10"
+		Note:	Use this when cadence i2c controller version 1.0 is used.
+		* "cdns,i2c-r1p14"
+		Note:	Use this when cadence i2c controller version 1.4 is used.
   - clocks: Input clock specifier. Refer to common clock bindings.
   - interrupts: Interrupt specifier. Refer to interrupt bindings.
   - #address-cells: Should be 1.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-emev2.txt b/Documentation/devicetree/bindings/i2c/i2c-emev2.txt
new file mode 100644
index 0000000..5ed1ea1
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-emev2.txt
@@ -0,0 +1,22 @@
+Device tree configuration for Renesas EMEV2 IIC controller
+
+Required properties:
+- compatible      : "renesas,iic-emev2"
+- reg             : address start and address range size of device
+- interrupts      : specifier for the IIC controller interrupt
+- clocks          : phandle to the IP core SCLK
+- clock-names     : must be "sclk"
+- #address-cells  : should be <1>
+- #size-cells     : should be <0>
+
+Example:
+
+	iic0: i2c@e0070000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "renesas,iic-emev2";
+		reg = <0xe0070000 0x28>;
+		interrupts = <0 32 IRQ_TYPE_EDGE_RISING>;
+		clocks = <&iic0_sclk>;
+		clock-names = "sclk";
+	};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt b/Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt
new file mode 100644
index 0000000..4101aa6
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt
@@ -0,0 +1,33 @@
+NXP I2C controller for LPC2xxx/178x/18xx/43xx
+
+Required properties:
+ - compatible: must be "nxp,lpc1788-i2c"
+ - reg: physical address and length of the device registers
+ - interrupts: a single interrupt specifier
+ - clocks: clock for the device
+ - #address-cells: should be <1>
+ - #size-cells: should be <0>
+
+Optional properties:
+- clock-frequency: the desired I2C bus clock frequency in Hz; in
+  absence of this property the default value is used (100 kHz).
+
+Example:
+i2c0: i2c@400a1000 {
+	compatible = "nxp,lpc1788-i2c";
+	reg = <0x400a1000 0x1000>;
+	interrupts = <18>;
+	clocks = <&ccu1 CLK_APB1_I2C0>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+};
+
+&i2c0 {
+	clock-frequency = <400000>;
+
+	lm75@48 {
+		compatible = "nxp,lm75";
+		reg = <0x48>;
+	};
+};
+
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
new file mode 100644
index 0000000..688783f
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
@@ -0,0 +1,74 @@
+Register-based I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses a single register
+to route the I2C signals.
+
+Required properties:
+- compatible: i2c-mux-reg
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+  port is connected to.
+* Standard I2C mux properties. See mux.txt in this directory.
+* I2C child bus nodes. See mux.txt in this directory.
+
+Optional properties:
+- reg: this pair of <offset size> specifies the register to control the mux.
+  The <offset size> depends on its parent node. It can be any memory-mapped
+  address. The size must be either 1, 2, or 4 bytes. If reg is omitted, the
+  resource of this device will be used.
+- little-endian: The existence indicates the register is in little endian.
+- big-endian: The existence indicates the register is in big endian.
+  If both little-endian and big-endian are omitted, the endianness of the
+  CPU will be used.
+- write-only: The existence indicates the register is write-only.
+- idle-state: value to set the muxer to when idle. When no value is
+  given, it defaults to the last value used.
+
+Whenever an access is made to a device on a child bus, the value set
+in the revelant node's reg property will be output to the register.
+
+If an idle state is defined, using the idle-state (optional) property,
+whenever an access is not being made to a device on a child bus, the
+register will be set according to the idle value.
+
+If an idle state is not defined, the most recently used value will be
+left programmed into the register.
+
+Example of a mux on PCIe card, the host is a powerpc SoC (big endian):
+
+	i2c-mux {
+		/* the <offset size> depends on the address translation
+		 * of the parent device. If omitted, device resource
+		 * will be used instead. The size is to determine
+		 * whether iowrite32, iowrite16, or iowrite8 will be used.
+		 */
+		reg = <0x6028 0x4>;
+		little-endian;		/* little endian register on PCIe */
+		compatible = "i2c-mux-reg";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		i2c-parent = <&i2c1>;
+		i2c@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			si5338: clock-generator@70 {
+				compatible = "silabs,si5338";
+				reg = <0x70>;
+				/* other stuff */
+			};
+		};
+
+		i2c@1 {
+			/* data is written using iowrite32 */
+			reg = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			si5338: clock-generator@70 {
+				compatible = "silabs,si5338";
+				reg = <0x70>;
+				/* other stuff */
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
new file mode 100644
index 0000000..8a99150
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -0,0 +1,45 @@
+Generic device tree bindings for I2C busses
+===========================================
+
+This document describes generic bindings which can be used to describe I2C
+busses in a device tree.
+
+Required properties
+-------------------
+
+- #address-cells  - should be <1>. Read more about addresses below.
+- #size-cells     - should be <0>.
+- compatible      - name of I2C bus controller following generic names
+		    recommended practice.
+
+For other required properties e.g. to describe register sets,
+clocks, etc. check the binding documentation of the specific driver.
+
+The cells properties above define that an address of children of an I2C bus
+are described by a single value. This is usually a 7 bit address. However,
+flags can be attached to the address. I2C_TEN_BIT_ADDRESS is used to mark a 10
+bit address. It is needed to avoid the ambiguity between e.g. a 7 bit address
+of 0x50 and a 10 bit address of 0x050 which, in theory, can be on the same bus.
+Another flag is I2C_OWN_SLAVE_ADDRESS to mark addresses on which we listen to
+be devices ourselves.
+
+Optional properties
+-------------------
+
+These properties may not be supported by all drivers. However, if a driver
+wants to support one of the below features, it should adapt the bindings below.
+
+- clock-frequency	- frequency of bus clock in Hz.
+- wakeup-source		- device can be used as a wakeup source.
+
+- interrupts		- interrupts used by the device.
+- interrupt-names	- "irq" and "wakeup" names are recognized by I2C core,
+			  other names are left to individual drivers.
+
+Binding may contain optional "interrupts" property, describing interrupts
+used by the device. I2C core will assign "irq" interrupt (or the very first
+interrupt if not using interrupt names) as primary interrupt for the slave.
+
+Also, if device is marked as a wakeup source, I2C core will set up "wakeup"
+interrupt for the device. If "wakeup" interrupt name is not present in the
+binding, then primary interrupt will be used as wakeup interrupt.
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 00f8652..d77d412 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -95,6 +95,8 @@
 stm,m41t62		Serial real-time clock (RTC) with alarm
 stm,m41t80		M41T80 - SERIAL ACCESS RTC WITH ALARMS
 taos,tsl2550		Ambient Light Sensor with SMBUS/Two Wire Serial Interface
+ti,ads7828		8-Channels, 12-bit ADC
+ti,ads7830		8-Channels, 8-bit ADC
 ti,tsc2003		I2C Touch-Screen Controller
 ti,tmp102		Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
 ti,tmp103		Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
diff --git a/Documentation/devicetree/bindings/input/ads7846.txt b/Documentation/devicetree/bindings/input/ads7846.txt
index 5f7619c..df8b127 100644
--- a/Documentation/devicetree/bindings/input/ads7846.txt
+++ b/Documentation/devicetree/bindings/input/ads7846.txt
@@ -64,7 +64,7 @@
 					pendown-gpio (u32).
 	pendown-gpio			GPIO handle describing the pin the !PENIRQ
 					line is connected to.
-	linux,wakeup			use any event on touchscreen as wakeup event.
+	wakeup-source			use any event on touchscreen as wakeup event.
 
 
 Example for a TSC2046 chip connected to an McSPI controller of an OMAP SoC::
diff --git a/Documentation/devicetree/bindings/input/cap11xx.txt b/Documentation/devicetree/bindings/input/cap11xx.txt
index 7d0a300..8c67a0b 100644
--- a/Documentation/devicetree/bindings/input/cap11xx.txt
+++ b/Documentation/devicetree/bindings/input/cap11xx.txt
@@ -55,5 +55,24 @@
 				 <105>,		/* KEY_LEFT */
 				 <109>,		/* KEY_PAGEDOWN */
 				 <104>;		/* KEY_PAGEUP */
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		usr@0 {
+			label = "cap11xx:green:usr0";
+			reg = <0>;
+		};
+
+		usr@1 {
+			label = "cap11xx:green:usr1";
+			reg = <1>;
+		};
+
+		alive@2 {
+			label = "cap11xx:green:alive";
+			reg = <2>;
+			linux,default_trigger = "heartbeat";
+		};
 	};
 }
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
new file mode 100644
index 0000000..635a3b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -0,0 +1,44 @@
+Cypress I2C Touchpad
+
+Required properties:
+- compatible: must be "cypress,cyapa".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+	binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+	binding[0]).
+
+Optional properties:
+- wakeup-source: touchpad can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+	pinctrl binding [1]).
+- vcc-supply: a phandle for the regulator supplying 3.3V power.
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+	&i2c0 {
+		/* ... */
+
+		/* Cypress Gen3 touchpad */
+		touchpad@67 {
+			compatible = "cypress,cyapa";
+			reg = <0x24>;
+			interrupt-parent = <&gpio>;
+			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;	/* GPIO 2 */
+			wakeup-source;
+		};
+
+		/* Cypress Gen5 and later touchpad */
+		touchpad@24 {
+			compatible = "cypress,cyapa";
+			reg = <0x24>;
+			interrupt-parent = <&gpio>;
+			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;	/* GPIO 2 */
+			wakeup-source;
+		};
+
+		/* ... */
+	};
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
index a765232..8a71038 100644
--- a/Documentation/devicetree/bindings/input/elants_i2c.txt
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -13,6 +13,9 @@
 - pinctrl-names: should be "default" (see pinctrl binding [1]).
 - pinctrl-0: a phandle pointing to the pin settings for the device (see
   pinctrl binding [1]).
+- reset-gpios: reset gpio the chip is connected to.
+- vcc33-supply: a phandle for the regulator supplying 3.3V power.
+- vccio-supply: a phandle for the regulator supplying IO power.
 
 [0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
 [1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
diff --git a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
index 313abef..5b91f5a 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
@@ -20,7 +20,7 @@
 	  If not specified defaults to <1> == EV_KEY.
 	- debounce-interval: Debouncing interval time in milliseconds.
 	  If not specified defaults to 5.
-	- gpio-key,wakeup: Boolean, button can wake-up the system.
+	- wakeup-source: Boolean, button can wake-up the system.
 
 Example nodes:
 
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 44b7057..072bf75 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -23,7 +23,7 @@
 	  If not specified defaults to <1> == EV_KEY.
 	- debounce-interval: Debouncing interval time in milliseconds.
 	  If not specified defaults to 5.
-	- gpio-key,wakeup: Boolean, button can wake-up the system.
+	- wakeup-source: Boolean, button can wake-up the system.
 	- linux,can-disable: Boolean, indicates that button is connected
 	  to dedicated (not shared) interrupt which can be disabled to
 	  suppress events from the button.
diff --git a/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
index ead641c..4d86059 100644
--- a/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
+++ b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
@@ -19,7 +19,7 @@
 
 Optional Properties:
 - linux,no-autorepeat:	do no enable autorepeat feature.
-- linux,wakeup:		use any event on keypad as wakeup event.
+- wakeup-source:	use any event on keypad as wakeup event.
 - debounce-delay-ms:	debounce interval in milliseconds
 - col-scan-delay-us:	delay, measured in microseconds, that is needed
 			before we can scan keypad after activating column gpio
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt b/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
index 7d8cb92..ee62156 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
+++ b/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
@@ -33,7 +33,7 @@
 	Value type: <bool>
 	Definition: don't enable autorepeat feature.
 
-- linux,keypad-wakeup:
+- wakeup-source:
 	Usage: optional
 	Value type: <bool>
 	Definition: use any event on keypad as wakeup event.
diff --git a/Documentation/devicetree/bindings/input/samsung-keypad.txt b/Documentation/devicetree/bindings/input/samsung-keypad.txt
index 942d071..863e77f 100644
--- a/Documentation/devicetree/bindings/input/samsung-keypad.txt
+++ b/Documentation/devicetree/bindings/input/samsung-keypad.txt
@@ -36,9 +36,11 @@
 - pinctrl-0: Should specify pin control groups used for this controller.
 - pinctrl-names: Should contain only one value - "default".
 
+Optional Properties:
+- wakeup-source: use any event on keypad as wakeup event.
+
 Optional Properties specific to linux:
 - linux,keypad-no-autorepeat: do no enable autorepeat feature.
-- linux,keypad-wakeup: use any event on keypad as wakeup event.
 
 
 Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
index 6e55109..8eb240a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
@@ -8,6 +8,9 @@
 - touchscreen-size-x: horizontal resolution of touchscreen (in pixels)
 - touchscreen-size-y: vertical resolution of touchscreen (in pixels)
 
+Optional properties:
+- reset-gpio: GPIO connected to the RESET line of the chip
+
 Example:
 
 	i2c@00000000 {
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
index 80c37df..e3c27c4 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
@@ -4,12 +4,12 @@
 - compatible: must be "neonode,zforce"
 - reg: I2C address of the chip
 - interrupts: interrupt to which the chip is connected
-- gpios: gpios the chip is connected to
-  first one is the interrupt gpio and second one the reset gpio
+- reset-gpios: reset gpio the chip is connected to
 - x-size: horizontal resolution of touchscreen
 - y-size: vertical resolution of touchscreen
 
 Optional properties:
+- irq-gpios : interrupt gpio the chip is connected to
 - vdd-supply: Regulator controlling the controller supply
 
 Example:
@@ -23,8 +23,8 @@
 			interrupts = <2 0>;
 			vdd-supply = <&reg_zforce_vdd>;
 
-			gpios = <&gpio5 6 0>, /* INT */
-				<&gpio5 9 0>; /* RST */
+			reset-gpios = <&gpio5 9 0>; /* RST */
+			irq-gpios = <&gpio5 6 0>; /* IRQ, optional */
 
 			x-size = <800>;
 			y-size = <600>;
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 0676050..7180745 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -43,6 +43,12 @@
 
 ** System MMU optional properties:
 
+- dma-coherent  : Present if page table walks made by the SMMU are
+                  cache coherent with the CPU.
+
+                  NOTE: this only applies to the SMMU itself, not
+                  masters connected upstream of the SMMU.
+
 - calxeda,smmu-secure-config-access : Enable proper handling of buggy
                   implementations that always use secure access to
                   SMMU configuration registers. In this case non-secure
diff --git a/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
index 42531dc..8696999 100644
--- a/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
@@ -8,6 +8,11 @@
 - ti,hwmods  : Name of the hwmod associated with the IOMMU instance
 - reg        : Address space for the configuration registers
 - interrupts : Interrupt specifier for the IOMMU instance
+- #iommu-cells : Should be 0. OMAP IOMMUs are all "single-master" devices,
+                 and needs no additional data in the pargs specifier. Please
+                 also refer to the generic bindings document for more info
+                 on this property,
+                     Documentation/devicetree/bindings/iommu/iommu.txt
 
 Optional properties:
 - ti,#tlb-entries : Number of entries in the translation look-aside buffer.
@@ -18,6 +23,7 @@
 Example:
 	/* OMAP3 ISP MMU */
 	mmu_isp: mmu@480bd400 {
+		#iommu-cells = <0>;
 		compatible = "ti,omap2-iommu";
 		reg = <0x480bd400 0x80>;
 		interrupts = <24>;
diff --git a/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt b/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
deleted file mode 100644
index a85a964..0000000
--- a/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Binding for Qualcomm PM8941 WLED driver
-
-Required properties:
-- compatible: should be "qcom,pm8941-wled"
-- reg: slave address
-
-Optional properties:
-- label: The label for this led
-  See Documentation/devicetree/bindings/leds/common.txt
-- linux,default-trigger: Default trigger assigned to the LED
-  See Documentation/devicetree/bindings/leds/common.txt
-- qcom,cs-out: bool; enable current sink output
-- qcom,cabc: bool; enable content adaptive backlight control
-- qcom,ext-gen: bool; use externally generated modulator signal to dim
-- qcom,current-limit: mA; per-string current limit; value from 0 to 25
-	default: 20mA
-- qcom,current-boost-limit: mA; boost current limit; one of:
-	105, 385, 525, 805, 980, 1260, 1400, 1680
-	default: 805mA
-- qcom,switching-freq: kHz; switching frequency; one of:
-	600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
-	1600, 1920, 2400, 3200, 4800, 9600,
-	default: 1600kHz
-- qcom,ovp: V; Over-voltage protection limit; one of:
-	27, 29, 32, 35
-	default: 29V
-- qcom,num-strings: #; number of led strings attached; value from 1 to 3
-	default: 2
-
-Example:
-
-pm8941-wled@d800 {
-	compatible = "qcom,pm8941-wled";
-	reg = <0xd800>;
-	label = "backlight";
-
-	qcom,cs-out;
-	qcom,current-limit = <20>;
-	qcom,current-boost-limit = <805>;
-	qcom,switching-freq = <1600>;
-	qcom,ovp = <29>;
-	qcom,num-strings = <2>;
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-powernv.txt b/Documentation/devicetree/bindings/leds/leds-powernv.txt
new file mode 100644
index 0000000..6665569
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-powernv.txt
@@ -0,0 +1,26 @@
+Device Tree binding for LEDs on IBM Power Systems
+-------------------------------------------------
+
+Required properties:
+- compatible : Should be "ibm,opal-v3-led".
+- led-mode   : Should be "lightpath" or "guidinglight".
+
+Each location code of FRU/Enclosure must be expressed in the
+form of a sub-node.
+
+Required properties for the sub nodes:
+- led-types : Supported LED types (attention/identify/fault) provided
+              in the form of string array.
+
+Example:
+
+leds {
+	compatible = "ibm,opal-v3-led";
+	led-mode = "lightpath";
+
+	U78C9.001.RST0027-P1-C1 {
+		led-types = "identify", "fault";
+	};
+	...
+	...
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7604.txt b/Documentation/devicetree/bindings/media/i2c/adv7604.txt
index c27cede..8337f75 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7604.txt
+++ b/Documentation/devicetree/bindings/media/i2c/adv7604.txt
@@ -1,15 +1,17 @@
-* Analog Devices ADV7604/11 video decoder with HDMI receiver
+* Analog Devices ADV7604/11/12 video decoder with HDMI receiver
 
-The ADV7604 and ADV7611 are multiformat video decoders with an integrated HDMI
-receiver. The ADV7604 has four multiplexed HDMI inputs and one analog input,
-and the ADV7611 has one HDMI input and no analog input.
+The ADV7604 and ADV7611/12 are multiformat video decoders with an integrated
+HDMI receiver. The ADV7604 has four multiplexed HDMI inputs and one analog
+input, and the ADV7611 has one HDMI input and no analog input. The 7612 is
+similar to the 7611 but has 2 HDMI inputs.
 
-These device tree bindings support the ADV7611 only at the moment.
+These device tree bindings support the ADV7611/12 only at the moment.
 
 Required Properties:
 
   - compatible: Must contain one of the following
     - "adi,adv7611" for the ADV7611
+    - "adi,adv7612" for the ADV7612
 
   - reg: I2C slave address
 
@@ -22,10 +24,10 @@
 Documentation/devicetree/bindings/media/video-interfaces.txt. The port nodes
 are numbered as follows.
 
-  Port			ADV7611
+  Port			ADV7611    ADV7612
 ------------------------------------------------------------
-  HDMI			0
-  Digital output	1
+  HDMI			0             0, 1
+  Digital output	1                2
 
 The digital output port node must contain at least one endpoint.
 
@@ -45,6 +47,7 @@
   If none of hsync-active, vsync-active and pclk-sample is specified the
   endpoint will use embedded BT.656 synchronization.
 
+  - default-input: Select which input is selected after reset.
 
 Example:
 
@@ -58,6 +61,8 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
+		default-input = <0>;
+
 		port@0 {
 			reg = <0>;
 		};
diff --git a/Documentation/devicetree/bindings/media/i2c/tc358743.txt b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
new file mode 100644
index 0000000..5218921
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
@@ -0,0 +1,48 @@
+* Toshiba TC358743 HDMI-RX to MIPI CSI2-TX Bridge
+
+The Toshiba TC358743 HDMI-RX to MIPI CSI2-TX (H2C) is a bridge that converts
+a HDMI stream to MIPI CSI-2 TX. It is programmable through I2C.
+
+Required Properties:
+
+- compatible: value should be "toshiba,tc358743"
+- clocks, clock-names: should contain a phandle link to the reference clock
+		       source, the clock input is named "refclk".
+
+Optional Properties:
+
+- reset-gpios: gpio phandle GPIO connected to the reset pin
+- interrupts, interrupt-parent: GPIO connected to the interrupt pin
+- data-lanes: should be <1 2 3 4> for four-lane operation,
+	      or <1 2> for two-lane operation
+- clock-lanes: should be <0>
+- clock-noncontinuous: Presence of this boolean property decides whether the
+		       MIPI CSI-2 clock is continuous or non-continuous.
+- link-frequencies: List of allowed link frequencies in Hz. Each frequency is
+		    expressed as a 64-bit big-endian integer. The frequency
+		    is half of the bps per lane due to DDR transmission.
+
+For further information on the MIPI CSI-2 endpoint node properties, see
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+	tc358743@0f {
+		compatible = "toshiba,tc358743";
+		reg = <0x0f>;
+		clocks = <&hdmi_osc>;
+		clock-names = "refclk";
+		reset-gpios = <&gpio6 9 GPIO_ACTIVE_LOW>;
+		interrupt-parent = <&gpio2>;
+		interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+
+		port {
+			tc358743_out: endpoint {
+				remote-endpoint = <&mipi_csi2_in>;
+				data-lanes = <1 2 3 4>;
+				clock-lanes = <0>;
+				clock-noncontinuous;
+				link-frequencies = /bits/ 64 <297000000>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/media/renesas,jpu.txt b/Documentation/devicetree/bindings/media/renesas,jpu.txt
new file mode 100644
index 0000000..0cb9420
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/renesas,jpu.txt
@@ -0,0 +1,24 @@
+* Renesas JPEG Processing Unit
+
+The JPEG processing unit (JPU) incorporates the JPEG codec with an encoding
+and decoding function conforming to the JPEG baseline process, so that the JPU
+can encode image data and decode JPEG data quickly.
+
+Required properties:
+  - compatible: should containg one of the following:
+			- "renesas,jpu-r8a7790" for R-Car H2
+			- "renesas,jpu-r8a7791" for R-Car M2-W
+			- "renesas,jpu-r8a7792" for R-Car V2H
+			- "renesas,jpu-r8a7793" for R-Car M2-N
+
+  - reg: Base address and length of the registers block for the JPU.
+  - interrupts: JPU interrupt specifier.
+  - clocks: A phandle + clock-specifier pair for the JPU functional clock.
+
+Example: R8A7790 (R-Car H2) JPU node
+	jpeg-codec@fe980000 {
+		compatible = "renesas,jpu-r8a7790";
+		reg = <0 0xfe980000 0 0x10300>;
+		interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp1_clks R8A7790_CLK_JPU>;
+	};
diff --git a/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt
new file mode 100644
index 0000000..d4def76
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt
@@ -0,0 +1,89 @@
+STMicroelectronics STi c8sectpfe binding
+============================================
+
+This document describes the c8sectpfe device bindings that is used to get transport
+stream data into the SoC on the TS pins, and into DDR for further processing.
+
+It is typically used in conjunction with one or more demodulator and tuner devices
+which converts from the RF to digital domain. Demodulators and tuners are usually
+located on an external DVB frontend card connected to SoC TS input pins.
+
+Currently 7 TS input (tsin) channels are supported on the stih407 family SoC.
+
+Required properties (controller (parent) node):
+- compatible	: Should be "stih407-c8sectpfe"
+
+- reg		: Address and length of register sets for each device in
+		  "reg-names"
+
+- reg-names	: The names of the register addresses corresponding to the
+		  registers filled in "reg":
+			- c8sectpfe: c8sectpfe registers
+			- c8sectpfe-ram: c8sectpfe internal sram
+
+- clocks	: phandle list of c8sectpfe clocks
+- clock-names	: should be "c8sectpfe"
+See: Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+- pinctrl-names	: a pinctrl state named tsin%d-serial or tsin%d-parallel (where %d is tsin-num)
+		   must be defined for each tsin child node.
+- pinctrl-0	: phandle referencing pin configuration for this tsin configuration
+See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+
+
+Required properties (tsin (child) node):
+
+- tsin-num	: tsin id of the InputBlock (must be between 0 to 6)
+- i2c-bus	: phandle to the I2C bus DT node which the demodulators & tuners on this tsin channel are connected.
+- rst-gpio	: reset gpio for this tsin channel.
+
+Optional properties (tsin (child) node):
+
+- invert-ts-clk		: Bool property to control sense of ts input clock (data stored on falling edge of clk).
+- serial-not-parallel	: Bool property to configure input bus width (serial on ts_data<7>).
+- async-not-sync	: Bool property to control if data is received in asynchronous mode
+			   (all bits/bytes with ts_valid or ts_packet asserted are valid).
+
+- dvb-card		: Describes the NIM card connected to this tsin channel.
+
+Example:
+
+/* stih410 SoC b2120 + b2004a + stv0367-pll(NIMB) + stv0367-tda18212 (NIMA) DT example) */
+
+	c8sectpfe@08a20000 {
+		compatible = "st,stih407-c8sectpfe";
+		status = "okay";
+		reg = <0x08a20000 0x10000>, <0x08a00000 0x4000>;
+		reg-names = "stfe", "stfe-ram";
+		interrupts = <0 34 0>, <0 35 0>;
+		interrupt-names = "stfe-error-irq", "stfe-idle-irq";
+
+		pinctrl-names	= "tsin0-serial", "tsin0-parallel", "tsin3-serial",
+				"tsin4-serial", "tsin5-serial";
+
+		pinctrl-0	= <&pinctrl_tsin0_serial>;
+		pinctrl-1	= <&pinctrl_tsin0_parallel>;
+		pinctrl-2	= <&pinctrl_tsin3_serial>;
+		pinctrl-3	= <&pinctrl_tsin4_serial_alt3>;
+		pinctrl-4	= <&pinctrl_tsin5_serial_alt1>;
+
+		clocks = <&clk_s_c0_flexgen CLK_PROC_STFE>;
+		clock-names = "stfe";
+
+		/* tsin0 is TSA on NIMA */
+		tsin0: port@0 {
+			tsin-num		= <0>;
+			serial-not-parallel;
+			i2c-bus			= <&ssc2>;
+			rst-gpio		= <&pio15 4 0>;
+			dvb-card		= <STV0367_TDA18212_NIMA_1>;
+		};
+
+		tsin3: port@3 {
+			tsin-num		= <3>;
+			serial-not-parallel;
+			i2c-bus			= <&ssc3>;
+			rst-gpio		= <&pio15 7 0>;
+			dvb-card		= <STV0367_TDA18212_NIMB_1>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
index d5e3704..89427b0 100644
--- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
@@ -18,6 +18,8 @@
               interrupt (NAND_EVTER_STAT).  If there is only one,
               that interrupt reports both types of event.
 
+- little-endian : If this property is absent, the big-endian mode will
+                  be in use as default for registers.
 
 - ranges : Each range corresponds to a single chipselect, and covers
            the entire access window as configured.
@@ -34,6 +36,7 @@
 		#size-cells = <1>;
 		reg = <0x0 0xffe1e000 0 0x2000>;
 		interrupts = <16 2 19 2>;
+		little-endian;
 
 		/* NOR, NAND Flashes and CPLD on board */
 		ranges = <0x0 0x0 0x0 0xee000000 0x02000000
diff --git a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
index f64de95a..ad5d904 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
@@ -2,7 +2,11 @@
 
 Required properties:
  - compatible: value should be one of the following:
+   "atmel,at91sam9n12-hlcdc"
+   "atmel,at91sam9x5-hlcdc"
+   "atmel,sama5d2-hlcdc"
    "atmel,sama5d3-hlcdc"
+   "atmel,sama5d4-hlcdc"
  - reg: base address and size of the HLCDC device registers.
  - clock-names: the name of the 3 clocks requested by the HLCDC device.
    Should contain "periph_clk", "sys_clk" and "slow_clk".
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 753f14f..4181122 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -1,12 +1,14 @@
 AXP family PMIC device tree bindings
 
 The axp20x family current members :
+axp152 (X-Powers)
 axp202 (X-Powers)
 axp209 (X-Powers)
 axp221 (X-Powers)
 
 Required properties:
-- compatible: "x-powers,axp202", "x-powers,axp209", "x-powers,axp221"
+- compatible: "x-powers,axp152", "x-powers,axp202", "x-powers,axp209",
+	      "x-powers,axp221"
 - reg: The I2C slave address for the AXP chip
 - interrupt-parent: The parent interrupt controller
 - interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
diff --git a/Documentation/devicetree/bindings/mfd/da9062.txt b/Documentation/devicetree/bindings/mfd/da9062.txt
new file mode 100644
index 0000000..38802b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9062.txt
@@ -0,0 +1,88 @@
+* Dialog DA9062 Power Management Integrated Circuit (PMIC)
+
+DA9062 consists of a large and varied group of sub-devices:
+
+Device                   Supply Names    Description
+------                   ------------    -----------
+da9062-regulator        :               : LDOs & BUCKs
+da9062-rtc              :               : Real-Time Clock
+da9062-watchdog         :               : Watchdog Timer
+
+======
+
+Required properties:
+
+- compatible : Should be "dlg,da9062".
+- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
+  modified to match the chip's OTP settings).
+- interrupt-parent : Specifies the reference to the interrupt controller for
+  the DA9062.
+- interrupts : IRQ line information.
+- interrupt-controller
+
+See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
+further information on IRQ bindings.
+
+Sub-nodes:
+
+- regulators : This node defines the settings for the LDOs and BUCKs. The
+  DA9062 regulators are bound using their names listed below:
+
+    buck1    : BUCK_1
+    buck2    : BUCK_2
+    buck3    : BUCK_3
+    buck4    : BUCK_4
+    ldo1     : LDO_1
+    ldo2     : LDO_2
+    ldo3     : LDO_3
+    ldo4     : LDO_4
+
+  The component follows the standard regulator framework and the bindings
+  details of individual regulator device can be found in:
+  Documentation/devicetree/bindings/regulator/regulator.txt
+
+
+- rtc : This node defines settings required for the Real-Time Clock associated
+  with the DA9062. There are currently no entries in this binding, however
+  compatible = "dlg,da9062-rtc" should be added if a node is created.
+
+- watchdog: This node defines the settings for the watchdog driver associated
+  with the DA9062 PMIC. The compatible = "dlg,da9062-watchdog" should be added
+  if a node is created.
+
+
+Example:
+
+	pmic0: da9062@58 {
+		compatible = "dlg,da9062";
+		reg = <0x58>;
+		interrupt-parent = <&gpio6>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-controller;
+
+		rtc {
+			compatible = "dlg,da9062-rtc";
+		};
+
+		watchdog {
+			compatible = "dlg,da9062-watchdog";
+		};
+
+		regulators {
+			DA9062_BUCK1: buck1 {
+				regulator-name = "BUCK1";
+				regulator-min-microvolt = <300000>;
+				regulator-max-microvolt = <1570000>;
+				regulator-min-microamp = <500000>;
+				regulator-max-microamp = <2000000>;
+				regulator-boot-on;
+			};
+			DA9062_LDO1: ldo1 {
+				regulator-name = "LDO_1";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <3600000>;
+				regulator-boot-on;
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 163bd81..741e766 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -7,8 +7,9 @@
 client while probing.This document describes the binding for mfd device and
 PMIC submodule.
 
-Binding for the built-in 32k clock generator block is defined separately
-in bindings/clk/maxim,max77686.txt file.
+Bindings for the built-in 32k clock generator block and
+regulators are defined in ../clk/maxim,max77686.txt and
+../regulator/max77686.txt respectively.
 
 Required properties:
 - compatible : Must be "maxim,max77686";
@@ -16,67 +17,11 @@
 - interrupts : This i2c device has an IRQ line connected to the main SoC.
 - interrupt-parent : The parent interrupt controller.
 
-Optional node:
-- voltage-regulators : The regulators of max77686 have to be instantiated
-  under subnode named "voltage-regulators" using the following format.
-
-	regulator_name {
-		regulator-compatible = LDOn/BUCKn
-		standard regulator constraints....
-	};
-	refer Documentation/devicetree/bindings/regulator/regulator.txt
-
-  The regulator-compatible property of regulator should initialized with string
-to get matched with their hardware counterparts as follow:
-
-	-LDOn 	:	for LDOs, where n can lie in range 1 to 26.
-		 	example: LDO1, LDO2, LDO26.
-	-BUCKn 	:	for BUCKs, where n can lie in range 1 to 9.
-			example: BUCK1, BUCK5, BUCK9.
-
-  Regulators which can be turned off during system suspend:
-	-LDOn	:	2, 6-8, 10-12, 14-16,
-	-BUCKn	:	1-4.
-  Use standard regulator bindings for it ('regulator-off-in-suspend').
-
-  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
-  control. To turn this feature on this property must be added to the regulator
-  sub-node:
-	- maxim,ena-gpios :	one GPIO specifier enable control (the gpio
-				flags are actually ignored and always
-				ACTIVE_HIGH is used)
-
 Example:
 
-	max77686@09 {
+	max77686: pmic@09 {
 		compatible = "maxim,max77686";
 		interrupt-parent = <&wakeup_eint>;
 		interrupts = <26 0>;
 		reg = <0x09>;
-
-		voltage-regulators {
-			ldo11_reg {
-				regulator-compatible = "LDO11";
-				regulator-name = "vdd_ldo11";
-				regulator-min-microvolt = <1900000>;
-				regulator-max-microvolt = <1900000>;
-				regulator-always-on;
-			};
-
-			buck1_reg {
-				regulator-compatible = "BUCK1";
-				regulator-name = "vdd_mif";
-				regulator-min-microvolt = <950000>;
-				regulator-max-microvolt = <1300000>;
-				regulator-always-on;
-				regulator-boot-on;
-			};
-
-			buck9_reg {
-				regulator-compatible = "BUCK9";
-				regulator-name = "CAM_ISP_CORE_1.2V";
-				regulator-min-microvolt = <1000000>;
-				regulator-max-microvolt = <1200000>;
-				maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
-			};
-	}
+	};
diff --git a/Documentation/devicetree/bindings/mfd/max77802.txt b/Documentation/devicetree/bindings/mfd/max77802.txt
new file mode 100644
index 0000000..51fc1a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/max77802.txt
@@ -0,0 +1,26 @@
+Maxim MAX77802 multi-function device
+
+The Maxim MAX77802 is a Power Management IC (PMIC) that contains 10 high
+efficiency Buck regulators, 32 Low-DropOut (LDO) regulators used to power
+up application processors and peripherals, a 2-channel 32kHz clock outputs,
+a Real-Time-Clock (RTC) and a I2C interface to program the individual
+regulators, clocks outputs and the RTC.
+
+Bindings for the built-in 32k clock generator block and
+regulators are defined in ../clk/maxim,max77802.txt and
+../regulator/max77802.txt respectively.
+
+Required properties:
+- compatible		: Must be "maxim,max77802"
+- reg			: Specifies the I2C slave address of PMIC block.
+- interrupts		: I2C device IRQ line connected to the main SoC.
+- interrupt-parent	: The parent interrupt controller.
+
+Example:
+
+	max77802: pmic@09 {
+		compatible = "maxim,max77802";
+		interrupt-parent = <&intc>;
+		interrupts = <26 IRQ_TYPE_NONE>;
+		reg = <0x09>;
+	};
diff --git a/Documentation/devicetree/bindings/mfd/tc3589x.txt b/Documentation/devicetree/bindings/mfd/tc3589x.txt
index 6fcedba..37bf7f1 100644
--- a/Documentation/devicetree/bindings/mfd/tc3589x.txt
+++ b/Documentation/devicetree/bindings/mfd/tc3589x.txt
@@ -55,7 +55,7 @@
  - linux,keymap: the definition can be found in
    bindings/input/matrix-keymap.txt
  - linux,no-autorepeat: do no enable autorepeat feature.
- - linux,wakeup: use any event on keypad as wakeup event.
+ - wakeup-source: use any event on keypad as wakeup event.
 
 Example:
 
@@ -84,7 +84,6 @@
 		keypad,num-columns = <8>;
 		keypad,num-rows = <8>;
 		linux,no-autorepeat;
-		linux,wakeup;
 		linux,keymap = <0x0301006b
 				0x04010066
 				0x06040072
@@ -103,5 +102,6 @@
 				0x01030039
 				0x07060069
 				0x050500d9>;
+		wakeup-source;
 	};
 };
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 7e94903..da541c3 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -9,7 +9,7 @@
 
 Required Properties:
   - compatible: Compatibility string. Must be 'arasan,sdhci-8.9a' or
-                'arasan,sdhci-4.9a'
+                'arasan,sdhci-4.9a' or 'arasan,sdhci-5.1'
   - reg: From mmc bindings: Register location and length.
   - clocks: From clock bindings: Handles to clock inputs.
   - clock-names: From clock bindings: Tuple including "clk_xin" and "clk_ahb"
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 211e778..dca56d6 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -15,6 +15,7 @@
 	       "fsl,imx6q-usdhc"
 	       "fsl,imx6sl-usdhc"
 	       "fsl,imx6sx-usdhc"
+	       "fsl,imx7d-usdhc"
 
 Optional properties:
 - fsl,wp-controller : Indicate to use controller internal write protection
@@ -27,6 +28,11 @@
   transparent level shifters on the outputs of the controller. Two cells are
   required, first cell specifies minimum slot voltage (mV), second cell
   specifies maximum slot voltage (mV). Several ranges could be specified.
+- fsl,tuning-step: Specify the increasing delay cell steps in tuning procedure.
+  The uSDHC use one delay cell as default increasing step to do tuning process.
+  This property allows user to change the tuning step to more than one delay
+  cells which is useful for some special boards or cards when the default
+  tuning step can't find the proper delay window within limited tuning retries.
 
 Examples:
 
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
new file mode 100644
index 0000000..1b662d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
@@ -0,0 +1,21 @@
+* Atmel SDHCI controller
+
+This file documents the differences between the core properties in
+Documentation/devicetree/bindings/mmc/mmc.txt and the properties used by the
+sdhci-of-at91 driver.
+
+Required properties:
+- compatible:		Must be "atmel,sama5d2-sdhci".
+- clocks:		Phandlers to the clocks.
+- clock-names:		Must be "hclock", "multclk", "baseclk";
+
+
+Example:
+
+sdmmc0: sdio-host@a0000000 {
+	compatible = "atmel,sama5d2-sdhci";
+	reg = <0xa0000000 0x300>;
+	interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
+	clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>;
+	clock-names = "hclock", "multclk", "baseclk";
+};
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index 76bf087..74166a0 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -102,7 +102,7 @@
 		pinctrl-1 = <&mmc1_idle>;
 		pinctrl-2 = <&mmc1_sleep>;
 		...
-		interrupts-extended = <&intc 64 &gpio2 28 0>;
+		interrupts-extended = <&intc 64 &gpio2 28 GPIO_ACTIVE_LOW>;
 	};
 
 	mmc1_idle : pinmux_cirq_pin {
diff --git a/Documentation/devicetree/bindings/panel/auo,b080uan01.txt b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
new file mode 100644
index 0000000..bae0e2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101ean01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/lg,lg4573.txt b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
new file mode 100644
index 0000000..824441f
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
@@ -0,0 +1,19 @@
+LG LG4573 TFT Liquid Crystal Display with SPI control bus
+
+Required properties:
+  - compatible: "lg,lg4573"
+  - reg: address of the panel on the SPI bus
+
+The panel must obey rules for SPI slave device specified in document [1].
+
+[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+	lcd_panel: display@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "lg,lg4573";
+		spi-max-frequency = <10000000>;
+		reg = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
new file mode 100644
index 0000000..8e1914d
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
@@ -0,0 +1,7 @@
+NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "nec,nl4827hc19-05b"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
new file mode 100644
index 0000000..ddf8e21
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
@@ -0,0 +1,7 @@
+OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
+
+Required properties:
+- compatible: should be "okaya,rs800480t-7x0gp"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 9462ab7..3c821cd 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -48,7 +48,7 @@
 
 Examples:
 
-pinctrl@01c20800 {
+pio: pinctrl@01c20800 {
 	compatible = "allwinner,sun5i-a13-pinctrl";
 	reg = <0x01c20800 0x400>;
 	#address-cells = <1>;
@@ -68,3 +68,38 @@
 		allwinner,pull = <0>;
 	};
 };
+
+
+GPIO and interrupt controller
+-----------------------------
+
+This hardware also acts as a GPIO controller and an interrupt
+controller.
+
+Consumers that would want to refer to one or the other (or both)
+should provide through the usual *-gpios and interrupts properties a
+cell with 3 arguments, first the number of the bank, then the pin
+inside that bank, and finally the flags for the GPIO/interrupts.
+
+Example:
+
+xio: gpio@38 {
+	compatible = "nxp,pcf8574a";
+	reg = <0x38>;
+
+	gpio-controller;
+	#gpio-cells = <2>;
+
+	interrupt-parent = <&pio>;
+	interrupts = <6 0 IRQ_TYPE_EDGE_FALLING>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
+
+reg_usb1_vbus: usb1-vbus {
+	compatible = "regulator-fixed";
+	regulator-name = "usb1-vbus";
+	regulator-min-microvolt = <5000000>;
+	regulator-max-microvolt = <5000000>;
+	gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt
new file mode 100644
index 0000000..23ce8dc26
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt
@@ -0,0 +1,86 @@
+Conexant Digicolor CX92755 General Purpose Pin Mapping
+
+This document describes the device tree binding of the pin mapping hardware
+modules in the Conexant Digicolor CX92755 SoCs. The CX92755 in one of the
+Digicolor series of SoCs.
+
+=== Pin Controller Node ===
+
+Required Properties:
+
+- compatible: Must be "cnxt,cx92755-pinctrl"
+- reg: Base address of the General Purpose Pin Mapping register block and the
+  size of the block.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells: Must be <2>. The first cell is the pin number and the
+  second cell is used to specify flags. See include/dt-bindings/gpio/gpio.h
+  for possible values.
+
+For example, the following is the bare minimum node:
+
+	pinctrl: pinctrl@f0000e20 {
+		compatible = "cnxt,cx92755-pinctrl";
+		reg = <0xf0000e20 0x100>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+As a pin controller device, in addition to the required properties, this node
+should also contain the pin configuration nodes that client devices reference,
+if any.
+
+For a general description of GPIO bindings, please refer to ../gpio/gpio.txt.
+
+=== Pin Configuration Node ===
+
+Each pin configuration node is a sub-node of the pin controller node and is a
+container of an arbitrary number of subnodes, called pin group nodes in this
+document.
+
+Please refer to the pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the definition of a
+"pin configuration node".
+
+=== Pin Group Node ===
+
+A pin group node specifies the desired pin mux for an arbitrary number of
+pins. The name of the pin group node is optional and not used.
+
+A pin group node only affects the properties specified in the node, and has no
+effect on any properties that are omitted.
+
+The pin group node accepts a subset of the generic pin config properties. For
+details generic pin config properties, please refer to pinctrl-bindings.txt
+and <include/linux/pinctrl/pinconfig-generic.h>.
+
+Required Pin Group Node Properties:
+
+- pins: Multiple strings. Specifies the name(s) of one or more pins to be
+  configured by this node. The format of a pin name string is "GP_xy", where x
+  is an uppercase character from 'A' to 'R', and y is a digit from 0 to 7.
+- function: String. Specifies the pin mux selection. Values must be one of:
+  "gpio", "client_a", "client_b", "client_c"
+
+Example:
+	pinctrl: pinctrl@f0000e20 {
+		compatible = "cnxt,cx92755-pinctrl";
+		reg = <0xf0000e20 0x100>;
+
+		uart0_default: uart0_active {
+			data_signals {
+				pins = "GP_O0", "GP_O1";
+				function = "client_b";
+			};
+		};
+	};
+
+	uart0: uart@f0000740 {
+		compatible = "cnxt,cx92755-usart";
+		...
+		pinctrl-0 = <&uart0_default>;
+		pinctrl-names = "default";
+	};
+
+In the example above, a single pin group configuration node defines the
+"client select" for the Rx and Tx signals of uart0. The uart0 node references
+that pin configuration node using the &uart0_default phandle.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index ed19991..d7803a2 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -7,8 +7,13 @@
 	Usage: required
 	Value type: <string>
 	Definition: Should contain one of:
+		    "qcom,pm8018-mpp",
+		    "qcom,pm8038-mpp",
+		    "qcom,pm8821-mpp",
 		    "qcom,pm8841-mpp",
 		    "qcom,pm8916-mpp",
+		    "qcom,pm8917-mpp",
+		    "qcom,pm8921-mpp",
 		    "qcom,pm8941-mpp",
 		    "qcom,pma8084-mpp",
 
@@ -77,12 +82,9 @@
 	Value type: <string>
 	Definition: Specify the alternative function to be configured for the
 		    specified pins.  Valid values are:
-		    "normal",
-		    "paired",
-		    "dtest1",
-		    "dtest2",
-		    "dtest3",
-		    "dtest4"
+		    "digital",
+		    "analog",
+		    "sink"
 
 - bias-disable:
 	Usage: optional
@@ -127,12 +129,18 @@
 	Definition: Selects the power source for the specified pins. Valid power
 		    sources are defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 
-- qcom,analog-mode:
+- qcom,analog-level:
 	Usage: optional
-	Value type: <none>
-	Definition: Selects Analog mode of operation: combined with input-enable
-		    and/or output-high, output-low MPP could operate as
-		    Bidirectional Logic, Analog Input, Analog Output.
+	Value type: <u32>
+	Definition: Selects the source for analog output. Valued values are
+		    defined in <dt-binding/pinctrl/qcom,pmic-mpp.h>
+		    PMIC_MPP_AOUT_LVL_*
+
+- qcom,dtest:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects which dtest rail to be routed in the various functions.
+		    Valid values are 1-4
 
 - qcom,amux-route:
 	Usage: optional
@@ -140,6 +148,10 @@
 	Definition: Selects the source for analog input. Valid values are
 		    defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 		    PMIC_MPP_AMUX_ROUTE_CH5, PMIC_MPP_AMUX_ROUTE_CH6...
+- qcom,paired:
+	Usage: optional
+	Value type: <none>
+	Definition: Indicates that the pin should be operating in paired mode.
 
 Example:
 
@@ -156,7 +168,7 @@
 		pm8841_default: default {
 			gpio {
 				pins = "mpp1", "mpp2", "mpp3", "mpp4";
-				function = "normal";
+				function = "digital";
 				input-enable;
 				power-source = <PM8841_MPP_S3>;
 			};
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 51cee44..9496934 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -58,12 +58,12 @@
 
 Pin Configuration Node Properties:
 
-- renesas,pins : An array of strings, each string containing the name of a pin.
-- renesas,groups : An array of strings, each string containing the name of a pin
+- pins : An array of strings, each string containing the name of a pin.
+- groups : An array of strings, each string containing the name of a pin
   group.
 
-- renesas,function: A string containing the name of the function to mux to the
-  pin group(s) specified by the renesas,groups property
+- function: A string containing the name of the function to mux to the pin
+  group(s) specified by the groups property.
 
   Valid values for pin, group and function names can be found in the group and
   function arrays of the PFC data file corresponding to the SoC
@@ -71,7 +71,9 @@
 
 The pin configuration parameters use the generic pinconf bindings defined in
 pinctrl-bindings.txt in this directory. The supported parameters are
-bias-disable, bias-pull-up and bias-pull-down.
+bias-disable, bias-pull-up, bias-pull-down and power-source. For pins that
+have a configurable I/O voltage, the power-source value should be the
+nominal I/O voltage in millivolts.
 
 
 GPIO
@@ -141,19 +143,19 @@
 
 		mmcif_pins: mmcif {
 			mux {
-				renesas,groups = "mmc0_data8_0", "mmc0_ctrl_0";
-				renesas,function = "mmc0";
+				groups = "mmc0_data8_0", "mmc0_ctrl_0";
+				function = "mmc0";
 			};
 			cfg {
-				renesas,groups = "mmc0_data8_0";
-				renesas,pins = "PORT279";
+				groups = "mmc0_data8_0";
+				pins = "PORT279";
 				bias-pull-up;
 			};
 		};
 
 		scifa4_pins: scifa4 {
-			renesas,groups = "scifa4_data", "scifa4_ctrl";
-			renesas,function = "scifa4";
+			groups = "scifa4_data", "scifa4_ctrl";
+			function = "scifa4";
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
index f63fcb3..2213802 100644
--- a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
+++ b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
@@ -3,7 +3,9 @@
 Required properties:
 - compatible: "stericsson,db8500-pinctrl", "stericsson,db8540-pinctrl",
               "stericsson,stn8815-pinctrl"
-- reg: Should contain the register physical address and length of the PRCMU.
+- nomadik-gpio-chips: array of phandles to the corresponding GPIO chips
+              (these have the register ranges used by the pin controller).
+- prcm: phandle to the PRCMU managing the back end of this pin controller
 
 Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
@@ -74,7 +76,8 @@
 
 	pinctrl@80157000 {
 		compatible = "stericsson,db8500-pinctrl";
-		reg = <0x80157000 0x2000>;
+		nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>;
+		prcm = <&prcmu>;
 
 		pinctrl-names = "default";
 
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt b/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt
new file mode 100644
index 0000000..0532c46
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt
@@ -0,0 +1,18 @@
+Freescale Supplement configuration unit (SCFG)
+
+SCFG is the supplemental configuration unit, that provides SoC specific
+configuration and status registers for the chip. Such as getting PEX port
+status.
+
+Required properties:
+
+- compatible: should be "fsl,<chip>-scfg"
+- reg: should contain base address and length of SCFG memory-mapped
+registers
+
+Example:
+
+	scfg: global-utilities@fc000 {
+		compatible = "fsl,t1040-scfg";
+		reg = <0xfc000 0x1000>;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/max77686.txt b/Documentation/devicetree/bindings/regulator/max77686.txt
new file mode 100644
index 0000000..0dded64
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max77686.txt
@@ -0,0 +1,71 @@
+Binding for Maxim MAX77686 regulators
+
+This is a part of the device tree bindings of MAX77686 multi-function device.
+More information can be found in ../mfd/max77686.txt file.
+
+The MAX77686 PMIC has 9 high-efficiency Buck and 26 Low-DropOut (LDO)
+regulators that can be controlled over I2C.
+
+Following properties should be present in main device node of the MFD chip.
+
+Optional node:
+- voltage-regulators : The regulators of max77686 have to be instantiated
+  under subnode named "voltage-regulators" using the following format.
+
+	regulator_name {
+		regulator-compatible = LDOn/BUCKn
+		standard regulator constraints....
+	};
+	refer Documentation/devicetree/bindings/regulator/regulator.txt
+
+  The regulator node's name should be initialized with a string
+to get matched with their hardware counterparts as follow:
+
+	-LDOn 	:	for LDOs, where n can lie in range 1 to 26.
+			example: LDO1, LDO2, LDO26.
+	-BUCKn 	:	for BUCKs, where n can lie in range 1 to 9.
+			example: BUCK1, BUCK5, BUCK9.
+
+  Regulators which can be turned off during system suspend:
+	-LDOn	:	2, 6-8, 10-12, 14-16,
+	-BUCKn	:	1-4.
+  Use standard regulator bindings for it ('regulator-off-in-suspend').
+
+  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
+  control. To turn this feature on this property must be added to the regulator
+  sub-node:
+	- maxim,ena-gpios :	one GPIO specifier enable control (the gpio
+				flags are actually ignored and always
+				ACTIVE_HIGH is used)
+
+Example:
+
+	max77686: pmic@09 {
+		compatible = "maxim,max77686";
+		interrupt-parent = <&wakeup_eint>;
+		interrupts = <26 IRQ_TYPE_NONE>;
+		reg = <0x09>;
+
+		voltage-regulators {
+			ldo11_reg: LDO11 {
+				regulator-name = "vdd_ldo11";
+				regulator-min-microvolt = <1900000>;
+				regulator-max-microvolt = <1900000>;
+				regulator-always-on;
+			};
+
+			buck1_reg: BUCK1 {
+				regulator-name = "vdd_mif";
+				regulator-min-microvolt = <950000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			buck9_reg: BUCK9 {
+				regulator-name = "CAM_ISP_CORE_1.2V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1200000>;
+				maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
+			};
+	};
diff --git a/Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.txt b/Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.txt
new file mode 100644
index 0000000..3c97bd1
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.txt
@@ -0,0 +1,21 @@
+NXP LPC1788 real-time clock
+
+The LPC1788 RTC provides calendar and clock functionality
+together with periodic tick and alarm interrupt support.
+
+Required properties:
+- compatible	: must contain "nxp,lpc1788-rtc"
+- reg		: Specifies base physical address and size of the registers.
+- interrupts	: A single interrupt specifier.
+- clocks	: Must contain clock specifiers for rtc and register clock
+- clock-names	: Must contain "rtc" and "reg"
+  See ../clocks/clock-bindings.txt for details.
+
+Example:
+rtc: rtc@40046000 {
+	compatible = "nxp,lpc1788-rtc";
+	reg = <0x40046000 0x1000>;
+	interrupts = <47>;
+	clocks = <&creg_clk 0>, <&ccu1 CLK_CPU_BUS>;
+	clock-names = "rtc", "reg";
+};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index 43a8366..bf7d11a 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -16,6 +16,8 @@
 Optional properties:
 - system-power-controller: whether the rtc is controlling the system power
   through pmic_power_en
+- clocks: Any internal or external clocks feeding in to rtc
+- clock-names: Corresponding names of the clocks
 
 Example:
 
@@ -26,4 +28,6 @@
 		      19>;
 	interrupt-parent = <&intc>;
 	system-power-controller;
+	clocks = <&clk_32k_rtc>, <&clk_32768_ck>;
+	clock-names = "ext-clk", "int-clk";
 };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt b/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
index 73407f50..daf8826 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
@@ -1,20 +1,23 @@
 STMicroelectronics Low Power Controller (LPC) - RTC
 ===================================================
 
-LPC currently supports Watchdog OR Real Time Clock functionality.
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
 
 [See: ../watchdog/st_lpc_wdt.txt for Watchdog options]
+[See: ../timer/st,stih407-lpc for Clocksource options]
 
 Required properties
 
-- compatible 	: Must be one of: "st,stih407-lpc" "st,stih416-lpc"
-				  "st,stih415-lpc" "st,stid127-lpc"
+- compatible 	: Must be: "st,stih407-lpc"
 - reg		: LPC registers base address + size
 - interrupts    : LPC interrupt line number and associated flags
 - clocks	: Clock used by LPC device (See: ../clock/clock-bindings.txt)
-- st,lpc-mode	: The LPC can run either one of two modes ST_LPC_MODE_RTC [0] or
-		  ST_LPC_MODE_WDT [1].  One (and only one) mode must be
-		  selected.
+- st,lpc-mode	: The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
 
 Example:
 	lpc@fde05000 {
diff --git a/Documentation/devicetree/bindings/i2c/ti,bq32k.txt b/Documentation/devicetree/bindings/rtc/ti,bq32k.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/ti,bq32k.txt
rename to Documentation/devicetree/bindings/rtc/ti,bq32k.txt
diff --git a/Documentation/devicetree/bindings/rtc/xlnx-rtc.txt b/Documentation/devicetree/bindings/rtc/xlnx-rtc.txt
new file mode 100644
index 0000000..0df6f01
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/xlnx-rtc.txt
@@ -0,0 +1,25 @@
+* Xilinx Zynq Ultrascale+ MPSoC Real Time Clock
+
+RTC controller for the Xilinx Zynq MPSoC Real Time Clock
+Separate IRQ lines for seconds and alarm
+
+Required properties:
+- compatible: Should be "xlnx,zynqmp-rtc"
+- reg: Physical base address of the controller and length
+       of memory mapped region.
+- interrupts: IRQ lines for the RTC.
+- interrupt-names: interrupt line names eg. "sec" "alarm"
+
+Optional:
+- calibration: calibration value for 1 sec period which will
+		be programmed directly to calibration register
+
+Example:
+rtc: rtc@ffa60000 {
+	compatible = "xlnx,zynqmp-rtc";
+	reg = <0x0 0xffa60000 0x100>;
+	interrupt-parent = <&gic>;
+	interrupts = <0 26 4>, <0 27 4>;
+	interrupt-names = "alarm", "sec";
+	calibration = <0x198233>;
+};
diff --git a/Documentation/devicetree/bindings/sound/cs4349.txt b/Documentation/devicetree/bindings/sound/cs4349.txt
new file mode 100644
index 0000000..54c117b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs4349.txt
@@ -0,0 +1,19 @@
+CS4349 audio CODEC
+
+Required properties:
+
+  - compatible : "cirrus,cs4349"
+
+  - reg : the I2C address of the device for I2C
+
+Optional properties:
+
+  - reset-gpios : a GPIO spec for the reset pin.
+
+Example:
+
+codec: cs4349@48 {
+        compatible = "cirrus,cs4349";
+        reg = <0x48>;
+        reset-gpios = <&gpio 54 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/ics43432.txt b/Documentation/devicetree/bindings/sound/ics43432.txt
new file mode 100644
index 0000000..b02e3a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ics43432.txt
@@ -0,0 +1,17 @@
+Invensense ICS-43432 MEMS microphone with I2S output.
+
+There are no software configuration options for this device, indeed, the only
+host connection is the I2S interface. Apart from requirements on clock
+frequency (460 kHz to 3.379 MHz according to the data sheet) there must be
+64 clock cycles in each stereo output frame; 24 of the 32 available bits
+contain audio data. A hardware pin determines if the device outputs data
+on the left or right channel of the I2S frame.
+
+Required properties:
+  - compatible : Must be "invensense,ics43432"
+
+Example:
+
+	ics43432: ics43432 {
+		compatible = "invensense,ics43432";
+	};
diff --git a/Documentation/devicetree/bindings/sound/max98357a.txt b/Documentation/devicetree/bindings/sound/max98357a.txt
index a7a149a..28645a2 100644
--- a/Documentation/devicetree/bindings/sound/max98357a.txt
+++ b/Documentation/devicetree/bindings/sound/max98357a.txt
@@ -4,7 +4,11 @@
 
 Required properties:
 - compatible   : "maxim,max98357a"
-- sdmode-gpios : GPIO specifier for the GPIO -> DAC SDMODE pin
+
+Optional properties:
+- sdmode-gpios : GPIO specifier for the chip's SD_MODE pin.
+        If this option is not specified then driver does not manage
+        the pin state (e.g. chip is always on).
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index b6b3a78..1173395 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -18,6 +18,12 @@
 - rcar_sound,src		: Should contain SRC feature.
 				  The number of SRC subnode should be same as HW.
 				  see below for detail.
+- rcar_sound,ctu		: Should contain CTU feature.
+				  The number of CTU subnode should be same as HW.
+				  see below for detail.
+- rcar_sound,mix		: Should contain MIX feature.
+				  The number of MIX subnode should be same as HW.
+				  see below for detail.
 - rcar_sound,dvc		: Should contain DVC feature.
 				  The number of DVC subnode should be same as HW.
 				  see below for detail.
@@ -90,6 +96,22 @@
 		};
 	};
 
+	rcar_sound,mix {
+		mix0: mix@0 { };
+		mix1: mix@1 { };
+	};
+
+	rcar_sound,ctu {
+		ctu00: ctu@0 { };
+		ctu01: ctu@1 { };
+		ctu02: ctu@2 { };
+		ctu03: ctu@3 { };
+		ctu10: ctu@4 { };
+		ctu11: ctu@5 { };
+		ctu12: ctu@6 { };
+		ctu13: ctu@7 { };
+	};
+
 	rcar_sound,src {
 		src0: src@0 {
 			interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
index c641550..962748a 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
@@ -6,6 +6,7 @@
 
 - compatible				: "renesas,rsrc-card,<board>"
 					  Examples with soctypes are:
+					    - "renesas,rsrc-card"
 					    - "renesas,rsrc-card,lager"
 					    - "renesas,rsrc-card,koelsch"
 Optional properties:
@@ -29,6 +30,12 @@
 - frame-inversion			: bool property. Add this if the
 					  dai-link uses frame clock inversion.
 - convert-rate				: platform specified sampling rate convert
+- audio-prefix				: see audio-routing
+- audio-routing				: A list of the connections between audio components.
+					  Each entry is a pair of strings, the first being the connection's sink,
+					  the second being the connection's source. Valid names for sources.
+					  use audio-prefix if some components is using same sink/sources naming.
+					  it can be used if compatible was "renesas,rsrc-card";
 
 Required CPU/CODEC subnodes properties:
 
diff --git a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
new file mode 100644
index 0000000..a805aa9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
@@ -0,0 +1,19 @@
+ROCKCHIP with MAX98090 CODEC
+
+Required properties:
+- compatible: "rockchip,rockchip-audio-max98090"
+- rockchip,model: The user-visible name of this sound complex
+- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
+  connected to the CODEC
+- rockchip,audio-codec: The phandle of the MAX98090 audio codec
+- rockchip,headset-codec: The phandle of Ext chip for jack detection
+
+Example:
+
+sound {
+	compatible = "rockchip,rockchip-audio-max98090";
+	rockchip,model = "ROCKCHIP-I2S";
+	rockchip,i2s-controller = <&i2s>;
+	rockchip,audio-codec = <&max98090>;
+	rockchip,headset-codec = <&headsetcodec>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
new file mode 100644
index 0000000..411a62b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
@@ -0,0 +1,17 @@
+ROCKCHIP with RT5645/RT5650 CODECS
+
+Required properties:
+- compatible: "rockchip,rockchip-audio-rt5645"
+- rockchip,model: The user-visible name of this sound complex
+- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
+  connected to the CODEC
+- rockchip,audio-codec: The phandle of the RT5645/RT5650 audio codec
+
+Example:
+
+sound {
+	compatible = "rockchip,rockchip-audio-rt5645";
+	rockchip,model = "ROCKCHIP-I2S";
+	rockchip,i2s-controller = <&i2s>;
+	rockchip,audio-codec = <&rt5645>;
+};
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
new file mode 100644
index 0000000..028fa1c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -0,0 +1,155 @@
+STMicroelectronics sti ASoC cards
+
+The sti ASoC Sound Card can be used, for all sti SoCs using internal sti-sas
+codec or external codecs.
+
+sti sound drivers allows to expose sti SoC audio interface through the
+generic ASoC simple card. For details about sound card declaration please refer to
+Documentation/devicetree/bindings/sound/simple-card.txt.
+
+1) sti-uniperiph-dai: audio dai device.
+---------------------------------------
+
+Required properties:
+  - compatible: "st,sti-uni-player" or "st,sti-uni-reader"
+
+  - st,syscfg: phandle to boot-device system configuration registers
+
+  - clock-names: name of the clocks listed in clocks property in the same order
+
+  - reg: CPU DAI IP Base address and size entries, listed  in same
+	 order than the CPU_DAI properties.
+
+  - reg-names: names of the mapped memory regions listed in regs property in
+	       the same order.
+
+  - interrupts: CPU_DAI interrupt line, listed in the same order than the
+		CPU_DAI properties.
+
+  - dma: CPU_DAI DMA controller phandle and DMA request line, listed in the same
+	 order than the CPU_DAI properties.
+
+  - dma-names: identifier string for each DMA request line in the dmas property.
+	"tx" for "st,sti-uni-player" compatibility
+	"rx" for "st,sti-uni-reader" compatibility
+
+  - version: IP version integrated in SOC.
+
+  - dai-name: DAI name that describes the IP.
+
+Required properties ("st,sti-uni-player" compatibility only):
+  - clocks: CPU_DAI IP clock source, listed in the same order than the
+	    CPU_DAI properties.
+
+  - uniperiph-id: internal SOC IP instance ID.
+
+  - IP mode: IP working mode depending on associated codec.
+	"HDMI" connected to HDMI codec IP and IEC HDMI formats.
+	"SPDIF"connected to SPDIF codec and support SPDIF formats.
+	"PCM"  PCM standard mode for I2S or TDM bus.
+
+Optional properties:
+  - pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
+	       external codecs connection.
+
+  - pinctrl-names: should contain only one value - "default".
+
+Example:
+
+	sti_uni_player2: sti-uni-player@2 {
+		compatible = "st,sti-uni-player";
+		status = "okay";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
+		reg = <0x8D82000 0x158>;
+		interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 4 0 1>;
+		dai-name = "Uni Player #1 (DAC)";
+		dma-names = "tx";
+		uniperiph-id = <2>;
+		version = <5>;
+		mode = "PCM";
+	};
+
+	sti_uni_player3: sti-uni-player@3 {
+		compatible = "st,sti-uni-player";
+		status = "okay";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		clocks = <&clk_s_d0_flexgen CLK_SPDIFF>;
+		reg = <0x8D85000 0x158>;
+		interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 7 0 1>;
+		dma-names = "tx";
+		dai-name = "Uni Player #1 (PIO)";
+		uniperiph-id = <3>;
+		version = <5>;
+		mode = "SPDIF";
+	};
+
+	sti_uni_reader1: sti-uni-reader@1 {
+		compatible = "st,sti-uni-reader";
+		status = "disabled";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		reg = <0x8D84000 0x158>;
+		interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 6 0 1>;
+		dma-names = "rx";
+		dai-name = "Uni Reader #1 (HDMI RX)";
+		version = <3>;
+	};
+
+2) sti-sas-codec: internal audio codec IPs driver
+-------------------------------------------------
+
+Required properties:
+  - compatible: "st,sti<chip>-sas-codec" .
+	Should be chip "st,stih416-sas-codec" or "st,stih407-sas-codec"
+
+  - st,syscfg: phandle to boot-device system configuration registers.
+
+  - pinctrl-0: SPDIF PIO description.
+
+  - pinctrl-names: should contain only one value - "default".
+
+Example:
+	sti_sas_codec: sti-sas-codec {
+		compatible = "st,stih407-sas-codec";
+		#sound-dai-cells = <1>;
+		st,reg_audio = <&syscfg_core>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_spdif_out >;
+	};
+
+Example of audio card declaration:
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "sti audio card";
+		status = "okay";
+
+		simple-audio-card,dai-link@0 {
+			/* DAC */
+			format = "i2s";
+			dai-tdm-slot-width = <32>;
+			cpu {
+				sound-dai = <&sti_uni_player2>;
+			};
+
+			codec {
+				sound-dai = <&sti_sasg_codec 1>;
+			};
+		};
+		simple-audio-card,dai-link@1 {
+			/* SPDIF */
+			format = "left_j";
+			cpu {
+				sound-dai = <&sti_uni_player3>;
+			};
+
+			codec {
+				sound-dai = <&sti_sasg_codec 0>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
new file mode 100644
index 0000000..7afce80
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
@@ -0,0 +1,28 @@
+* Pistachio general-purpose timer based clocksource
+
+Required properties:
+ - compatible: "img,pistachio-gptimer".
+ - reg: Address range of the timer registers.
+ - interrupts: An interrupt for each of the four timers
+ - clocks: Should contain a clock specifier for each entry in clock-names
+ - clock-names: Should contain the following entries:
+                "sys", interface clock
+                "slow", slow counter clock
+                "fast", fast counter clock
+ - img,cr-periph: Must contain a phandle to the peripheral control
+		  syscon node.
+
+Example:
+	timer: timer@18102000 {
+		compatible = "img,pistachio-gptimer";
+		reg = <0x18102000 0x100>;
+		interrupts = <GIC_SHARED 60 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 61 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 62 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 63 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clk_periph PERIPH_CLK_COUNTER_FAST>,
+		         <&clk_periph PERIPH_CLK_COUNTER_SLOW>,
+			 <&cr_periph SYS_CLK_TIMER>;
+		clock-names = "fast", "slow", "sys";
+		img,cr-periph = <&cr_periph>;
+	};
diff --git a/Documentation/devicetree/bindings/timer/st,stih407-lpc b/Documentation/devicetree/bindings/timer/st,stih407-lpc
new file mode 100644
index 0000000..72acb48
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/st,stih407-lpc
@@ -0,0 +1,28 @@
+STMicroelectronics Low Power Controller (LPC) - Clocksource
+===========================================================
+
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
+
+[See: ../watchdog/st_lpc_wdt.txt for Watchdog options]
+[See: ../rtc/rtc-st-lpc.txt for RTC options]
+
+Required properties
+
+- compatible   : Must be: "st,stih407-lpc"
+- reg          : LPC registers base address + size
+- interrupts   : LPC interrupt line number and associated flags
+- clocks       : Clock used by LPC device (See: ../clock/clock-bindings.txt)
+- st,lpc-mode  : The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
+
+Example:
+       lpc@fde05000 {
+               compatible      = "st,stih407-lpc";
+               reg             = <0xfde05000 0x1000>;
+               clocks          = <&clk_s_d3_flexgen CLK_LPC_0>;
+               st,lpc-mode     = <ST_LPC_MODE_CLKSRC>;
+       };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 56a6d4e..ac5f0c3 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -55,6 +55,7 @@
 cosmic	Cosmic Circuits
 crystalfontz	Crystalfontz America, Inc.
 cubietech	Cubietech, Ltd.
+cypress	Cypress Semiconductor Corporation
 dallas	Maxim Integrated Products (formerly Dallas Semiconductor)
 davicom	DAVICOM Semiconductor, Inc.
 delta	Delta Electronics, Inc.
@@ -111,6 +112,7 @@
 innolux	Innolux Corporation
 intel	Intel Corporation
 intercontrol	Inter Control Group
+invensense	InvenSense Inc.
 isee	ISEE 2007 S.L.
 isil	Intersil
 jedec	JEDEC Solid State Technology Association
@@ -143,6 +145,7 @@
 murata	Murata Manufacturing Co., Ltd.
 mxicy	Macronix International Co., Ltd.
 national	National Semiconductor
+nec	NEC LCD Technologies, Ltd.
 neonode		Neonode Inc.
 netgear	NETGEAR
 netlogic	Broadcom Corporation (formerly NetLogic Microsystems)
@@ -150,10 +153,13 @@
 newhaven	Newhaven Display International
 nintendo	Nintendo
 nokia	Nokia
+nuvoton	Nuvoton Technology Corporation
 nvidia	NVIDIA
 nxp	NXP Semiconductors
+okaya	Okaya Electric America, Inc.
 onnn	ON Semiconductor Corp.
 opencores	OpenCores.org
+option	Option NV
 ortustech	Ortus Technology Co., Ltd.
 ovti	OmniVision Technologies
 panasonic	Panasonic Corporation
diff --git a/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt b/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt
new file mode 100644
index 0000000..424f844
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt
@@ -0,0 +1,40 @@
+Binding for Qualcomm PM8941 WLED driver
+
+Required properties:
+- compatible: should be "qcom,pm8941-wled"
+- reg: slave address
+
+Optional properties:
+- label: The name of the backlight device
+- qcom,cs-out: bool; enable current sink output
+- qcom,cabc: bool; enable content adaptive backlight control
+- qcom,ext-gen: bool; use externally generated modulator signal to dim
+- qcom,current-limit: mA; per-string current limit; value from 0 to 25
+	default: 20mA
+- qcom,current-boost-limit: mA; boost current limit; one of:
+	105, 385, 525, 805, 980, 1260, 1400, 1680
+	default: 805mA
+- qcom,switching-freq: kHz; switching frequency; one of:
+	600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
+	1600, 1920, 2400, 3200, 4800, 9600,
+	default: 1600kHz
+- qcom,ovp: V; Over-voltage protection limit; one of:
+	27, 29, 32, 35
+	default: 29V
+- qcom,num-strings: #; number of led strings attached; value from 1 to 3
+	default: 2
+
+Example:
+
+pm8941-wled@d800 {
+	compatible = "qcom,pm8941-wled";
+	reg = <0xd800>;
+	label = "backlight";
+
+	qcom,cs-out;
+	qcom,current-limit = <20>;
+	qcom,current-boost-limit = <805>;
+	qcom,switching-freq = <1600>;
+	qcom,ovp = <29>;
+	qcom,num-strings = <2>;
+};
diff --git a/Documentation/devicetree/bindings/video/fsl,dcu.txt b/Documentation/devicetree/bindings/video/fsl,dcu.txt
new file mode 100644
index 0000000..ebf1be9
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/fsl,dcu.txt
@@ -0,0 +1,22 @@
+Device Tree bindings for Freescale DCU DRM Driver
+
+Required properties:
+- compatible:		Should be one of
+	* "fsl,ls1021a-dcu".
+	* "fsl,vf610-dcu".
+
+- reg:			Address and length of the register set for dcu.
+- clocks:		From common clock binding: handle to dcu clock.
+- clock-names:		From common clock binding: Shall be "dcu".
+- big-endian		Boolean property, LS1021A DCU registers are big-endian.
+- fsl,panel:		The phandle to panel node.
+
+Examples:
+dcu: dcu@2ce0000 {
+	compatible = "fsl,ls1021a-dcu";
+	reg = <0x0 0x2ce0000 0x0 0x10000>;
+	clocks = <&platform_clk 0>;
+	clock-names = "dcu";
+	big-endian;
+	fsl,panel = <&panel>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
index 388c88a..039c5ca 100644
--- a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
@@ -1,9 +1,11 @@
 STMicroelectronics Low Power Controller (LPC) - Watchdog
 ========================================================
 
-LPC currently supports Watchdog OR Real Time Clock functionality.
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
 
 [See: ../rtc/rtc-st-lpc.txt for RTC options]
+[See: ../timer/st,stih407-lpc for Clocksource options]
 
 Required properties
 
@@ -12,9 +14,11 @@
 - reg		: LPC registers base address + size
 - interrupts    : LPC interrupt line number and associated flags
 - clocks	: Clock used by LPC device (See: ../clock/clock-bindings.txt)
-- st,lpc-mode	: The LPC can run either one of two modes ST_LPC_MODE_RTC [0] or
-		  ST_LPC_MODE_WDT [1].  One (and only one) mode must be
-		  selected.
+- st,lpc-mode	: The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
 
 Required properties [watchdog mode]
 
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index ca67b0f..67d4ce4 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -345,12 +345,29 @@
       that abstracts it away.
 
   * DMA_CTRL_ACK
-    - If set, the transfer can be reused after being completed.
-    - There is a guarantee the transfer won't be freed until it is acked
-      by async_tx_ack().
+    - If clear, the descriptor cannot be reused by provider until the
+      client acknowledges receipt, i.e. has has a chance to establish any
+      dependency chains
+    - This can be acked by invoking async_tx_ack()
+    - If set, does not mean descriptor can be reused
+
+  * DMA_CTRL_REUSE
+    - If set, the descriptor can be reused after being completed. It should
+      not be freed by provider if this flag is set.
+    - The descriptor should be prepared for reuse by invoking
+      dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
+    - dmaengine_desc_set_reuse() will succeed only when channel support
+      reusable descriptor as exhibited by capablities
     - As a consequence, if a device driver wants to skip the dma_map_sg() and
       dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
       it can resubmit the transfer right after its completion.
+    - Descriptor can be freed in few ways
+	- Clearing DMA_CTRL_REUSE by invoking dmaengine_desc_clear_reuse()
+	  and submitting for last txn
+	- Explicitly invoking dmaengine_desc_free(), this can succeed only
+	  when DMA_CTRL_REUSE is already set
+	- Terminating the channel
+
 
 General Design Notes
 --------------------
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 4cf1a2a..415484f 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -15,6 +15,10 @@
 
   injects page allocation failures. (alloc_pages(), get_free_pages(), ...)
 
+o fail_futex
+
+  injects futex deadlock and uaddr fault errors.
+
 o fail_make_request
 
   injects disk IO errors on devices permitted by setting
@@ -113,6 +117,12 @@
 	specifies the minimum page allocation order to be injected
 	failures.
 
+- /sys/kernel/debug/fail_futex/ignore-private:
+
+	Format: { 'Y' | 'N' }
+	default is 'N', setting it to 'Y' will disable failure injections
+	when dealing with private (address space) futexes.
+
 o Boot option
 
 In order to inject faults while debugfs is not available (early boot time),
@@ -121,6 +131,7 @@
 	failslab=
 	fail_page_alloc=
 	fail_make_request=
+	fail_futex=
 	mmc_core.fail_request=<interval>,<probability>,<space>,<times>
 
 How to add new fault injection capability
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 4efe36c..d605c3f 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -22,7 +22,7 @@
     |        m68k: | TODO |
     |       metag: | TODO |
     |  microblaze: | TODO |
-    |        mips: | TODO |
+    |        mips: |  ok  |
     |     mn10300: | TODO |
     |       nios2: | TODO |
     |    openrisc: | TODO |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index bea8009..76d39d6 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -32,7 +32,7 @@
     |       score: | TODO |
     |          sh: | TODO |
     |       sparc: | TODO |
-    |        tile: | TODO |
+    |        tile: |  ok  |
     |          um: | TODO |
     |   unicore32: | TODO |
     |         x86: |  ok  |
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
new file mode 100644
index 0000000..261b92e
--- /dev/null
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -0,0 +1,40 @@
+#
+# Feature name:          batch-unmap-tlb-flush
+#         Kconfig:       ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+#         description:   arch supports deferral of TLB flush until multiple pages are unmapped
+#
+    -----------------------
+    |         arch |status|
+    -----------------------
+    |       alpha: | TODO |
+    |         arc: | TODO |
+    |         arm: | TODO |
+    |       arm64: | TODO |
+    |       avr32: |  ..  |
+    |    blackfin: | TODO |
+    |         c6x: |  ..  |
+    |        cris: |  ..  |
+    |         frv: |  ..  |
+    |       h8300: |  ..  |
+    |     hexagon: | TODO |
+    |        ia64: | TODO |
+    |        m32r: | TODO |
+    |        m68k: |  ..  |
+    |       metag: | TODO |
+    |  microblaze: |  ..  |
+    |        mips: | TODO |
+    |     mn10300: | TODO |
+    |       nios2: |  ..  |
+    |    openrisc: |  ..  |
+    |      parisc: | TODO |
+    |     powerpc: | TODO |
+    |        s390: | TODO |
+    |       score: |  ..  |
+    |          sh: | TODO |
+    |       sparc: | TODO |
+    |        tile: | TODO |
+    |          um: |  ..  |
+    |   unicore32: |  ..  |
+    |         x86: |  ok  |
+    |      xtensa: | TODO |
+    -----------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 6a34a0f..06d4434 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -397,7 +397,8 @@
 	int (*release) (struct gendisk *, fmode_t);
 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
-	int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
+	int (*direct_access) (struct block_device *, sector_t, void __pmem **,
+				unsigned long *);
 	int (*media_changed) (struct gendisk *);
 	void (*unlock_native_capacity) (struct gendisk *);
 	int (*revalidate_disk) (struct gendisk *);
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 7af2851..7bde640 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -60,9 +60,10 @@
 - implementing the direct_IO address space operation, and calling
   dax_do_io() instead of blockdev_direct_IO() if S_DAX is set
 - implementing an mmap file operation for DAX files which sets the
-  VM_MIXEDMAP flag on the VMA, and setting the vm_ops to include handlers
-  for fault and page_mkwrite (which should probably call dax_fault() and
-  dax_mkwrite(), passing the appropriate get_block() callback)
+  VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to
+  include handlers for fault, pmd_fault and page_mkwrite (which should
+  probably call dax_fault(), dax_pmd_fault() and dax_mkwrite(), passing the
+  appropriate get_block() callback)
 - calling dax_truncate_page() instead of block_truncate_page() for DAX files
 - calling dax_zero_page_range() instead of zero_user() for DAX files
 - ensuring that there is sufficient locking between reads, writes,
diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt
index b971456..5575539 100644
--- a/Documentation/filesystems/ext2.txt
+++ b/Documentation/filesystems/ext2.txt
@@ -360,8 +360,8 @@
 the time of the crash, then there is no guarantee of consistency for
 the blocks in that transaction so they are discarded (which means any
 filesystem changes they represent are also lost).
-Check Documentation/filesystems/ext3.txt if you want to read more about
-ext3 and journaling.
+Check Documentation/filesystems/ext4.txt if you want to read more about
+ext4 and journaling.
 
 References
 ==========
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 7ed0d17..58758fb 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -6,210 +6,7 @@
 for the 2.2 branch, and ported to 2.4 kernels by Peter Braam, Andreas Dilger,
 Andrew Morton, Alexander Viro, Ted Ts'o and Stephen Tweedie.
 
-Ext3 is the ext2 filesystem enhanced with journalling capabilities.
+Ext3 is the ext2 filesystem enhanced with journalling capabilities. The
+filesystem is a subset of ext4 filesystem so use ext4 driver for accessing
+ext3 filesystems.
 
-Options
-=======
-
-When mounting an ext3 filesystem, the following option are accepted:
-(*) == default
-
-ro			Mount filesystem read only. Note that ext3 will replay
-			the journal (and thus write to the partition) even when
-			mounted "read only". Mount options "ro,noload" can be
-			used to prevent writes to the filesystem.
-
-journal=update		Update the ext3 file system's journal to the current
-			format.
-
-journal=inum		When a journal already exists, this option is ignored.
-			Otherwise, it specifies the number of the inode which
-			will represent the ext3 file system's journal file.
-
-journal_path=path
-journal_dev=devnum	When the external journal device's major/minor numbers
-			have changed, these options allow the user to specify
-			the new journal location.  The journal device is
-			identified through either its new major/minor numbers
-			encoded in devnum, or via a path to the device.
-
-norecovery		Don't load the journal on mounting. Note that this forces
-noload			mount of inconsistent filesystem, which can lead to
-			various problems.
-
-data=journal		All data are committed into the journal prior to being
-			written into the main file system.
-
-data=ordered	(*)	All data are forced directly out to the main file
-			system prior to its metadata being committed to the
-			journal.
-
-data=writeback		Data ordering is not preserved, data may be written
-			into the main file system after its metadata has been
-			committed to the journal.
-
-commit=nrsec	(*)	Ext3 can be told to sync all its data and metadata
-			every 'nrsec' seconds. The default value is 5 seconds.
-			This means that if you lose your power, you will lose
-			as much as the latest 5 seconds of work (your
-			filesystem will not be damaged though, thanks to the
-			journaling).  This default value (or any low value)
-			will hurt performance, but it's good for data-safety.
-			Setting it to 0 will have the same effect as leaving
-			it at the default (5 seconds).
-			Setting it to very large values will improve
-			performance.
-
-barrier=<0|1(*)>	This enables/disables the use of write barriers in
-barrier	(*)		the jbd code.  barrier=0 disables, barrier=1 enables.
-nobarrier		This also requires an IO stack which can support
-			barriers, and if jbd gets an error on a barrier
-			write, it will disable again with a warning.
-			Write barriers enforce proper on-disk ordering
-			of journal commits, making volatile disk write caches
-			safe to use, at some performance penalty.  If
-			your disks are battery-backed in one way or another,
-			disabling barriers may safely improve performance.
-			The mount options "barrier" and "nobarrier" can
-			also be used to enable or disable barriers, for
-			consistency with other ext3 mount options.
-
-user_xattr		Enables Extended User Attributes.  Additionally, you
-			need to have extended attribute support enabled in the
-			kernel configuration (CONFIG_EXT3_FS_XATTR).  See the
-			attr(5) manual page and http://acl.bestbits.at/ to
-			learn more about extended attributes.
-
-nouser_xattr		Disables Extended User Attributes.
-
-acl			Enables POSIX Access Control Lists support.
-			Additionally, you need to have ACL support enabled in
-			the kernel configuration (CONFIG_EXT3_FS_POSIX_ACL).
-			See the acl(5) manual page and http://acl.bestbits.at/
-			for more information.
-
-noacl			This option disables POSIX Access Control List
-			support.
-
-reservation
-
-noreservation
-
-bsddf 		(*)	Make 'df' act like BSD.
-minixdf			Make 'df' act like Minix.
-
-check=none		Don't do extra checking of bitmaps on mount.
-nocheck
-
-debug			Extra debugging information is sent to syslog.
-
-errors=remount-ro	Remount the filesystem read-only on an error.
-errors=continue		Keep going on a filesystem error.
-errors=panic		Panic and halt the machine if an error occurs.
-			(These mount options override the errors behavior
-			specified in the superblock, which can be
-			configured using tune2fs.)
-
-data_err=ignore(*)	Just print an error message if an error occurs
-			in a file data buffer in ordered mode.
-data_err=abort		Abort the journal if an error occurs in a file
-			data buffer in ordered mode.
-
-grpid			Give objects the same group ID as their creator.
-bsdgroups
-
-nogrpid		(*)	New objects have the group ID of their creator.
-sysvgroups
-
-resgid=n		The group ID which may use the reserved blocks.
-
-resuid=n		The user ID which may use the reserved blocks.
-
-sb=n			Use alternate superblock at this location.
-
-quota			These options are ignored by the filesystem. They
-noquota			are used only by quota tools to recognize volumes
-grpquota		where quota should be turned on. See documentation
-usrquota		in the quota-tools package for more details
-			(http://sourceforge.net/projects/linuxquota).
-
-jqfmt=<quota type>	These options tell filesystem details about quota
-usrjquota=<file>	so that quota information can be properly updated
-grpjquota=<file>	during journal replay. They replace the above
-			quota options. See documentation in the quota-tools
-			package for more details
-			(http://sourceforge.net/projects/linuxquota).
-
-Specification
-=============
-Ext3 shares all disk implementation with the ext2 filesystem, and adds
-transactions capabilities to ext2.  Journaling is done by the Journaling Block
-Device layer.
-
-Journaling Block Device layer
------------------------------
-The Journaling Block Device layer (JBD) isn't ext3 specific.  It was designed
-to add journaling capabilities to a block device.  The ext3 filesystem code
-will inform the JBD of modifications it is performing (called a transaction).
-The journal supports the transactions start and stop, and in case of a crash,
-the journal can replay the transactions to quickly put the partition back into
-a consistent state.
-
-Handles represent a single atomic update to a filesystem.  JBD can handle an
-external journal on a block device.
-
-Data Mode
----------
-There are 3 different data modes:
-
-* writeback mode
-In data=writeback mode, ext3 does not journal data at all.  This mode provides
-a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
-mode - metadata journaling.  A crash+recovery can cause incorrect data to
-appear in files which were written shortly before the crash.  This mode will
-typically provide the best ext3 performance.
-
-* ordered mode
-In data=ordered mode, ext3 only officially journals metadata, but it logically
-groups metadata and data blocks into a single unit called a transaction.  When
-it's time to write the new metadata out to disk, the associated data blocks
-are written first.  In general, this mode performs slightly slower than
-writeback but significantly faster than journal mode.
-
-* journal mode
-data=journal mode provides full data and metadata journaling.  All new data is
-written to the journal first, and then to its final location.
-In the event of a crash, the journal can be replayed, bringing both data and
-metadata into a consistent state.  This mode is the slowest except when data
-needs to be read from and written to disk at the same time where it
-outperforms all other modes.
-
-Compatibility
--------------
-
-Ext2 partitions can be easily convert to ext3, with `tune2fs -j <dev>`.
-Ext3 is fully compatible with Ext2.  Ext3 partitions can easily be mounted as
-Ext2.
-
-
-External Tools
-==============
-See manual pages to learn more.
-
-tune2fs: 	create a ext3 journal on a ext2 partition with the -j flag.
-mke2fs: 	create a ext3 partition with the -j flag.
-debugfs: 	ext2 and ext3 file system debugger.
-ext2online:	online (mounted) ext2 and ext3 filesystem resizer
-
-
-References
-==========
-
-kernel source:	<file:fs/ext3/>
-		<file:fs/jbd/>
-
-programs: 	http://e2fsprogs.sourceforge.net/
-		http://ext2resize.sourceforge.net
-
-useful links:	http://www.ibm.com/developerworks/library/l-fs7/index.html
-        http://www.ibm.com/developerworks/library/l-fs8/index.html
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e9e750e..e2d5105 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -143,7 +143,9 @@
 extent_cache           Enable an extent cache based on rb-tree, it can cache
                        as many as extent which map between contiguous logical
                        address and physical address per inode, resulting in
-                       increasing the cache hit ratio.
+                       increasing the cache hit ratio. Set by default.
+noextent_cache         Diable an extent cache based on rb-tree explicitly, see
+                       the above extent_cache mount option.
 noinline_data          Disable the inline data feature, inline data feature is
                        enabled by default.
 
diff --git a/Documentation/filesystems/nfs/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt
index 95c13aa..906b6c2 100644
--- a/Documentation/filesystems/nfs/nfs-rdma.txt
+++ b/Documentation/filesystems/nfs/nfs-rdma.txt
@@ -138,9 +138,9 @@
   - Build, install, reboot
 
     The NFS/RDMA code will be enabled automatically if NFS and RDMA
-    are turned on. The NFS/RDMA client and server are configured via the
-    SUNRPC_XPRT_RDMA_CLIENT and SUNRPC_XPRT_RDMA_SERVER config options that both
-    depend on SUNRPC and INFINIBAND. The default value of both options will be:
+    are turned on. The NFS/RDMA client and server are configured via the hidden
+    SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The
+    value of SUNRPC_XPRT_RDMA will be:
 
      - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client
        and server will not be built
@@ -238,9 +238,8 @@
 
   - Start the NFS server
 
-    If the NFS/RDMA server was built as a module
-    (CONFIG_SUNRPC_XPRT_RDMA_SERVER=m in kernel config), load the RDMA
-    transport module:
+    If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
+    kernel config), load the RDMA transport module:
 
     $ modprobe svcrdma
 
@@ -259,9 +258,8 @@
 
   - On the client system
 
-    If the NFS/RDMA client was built as a module
-    (CONFIG_SUNRPC_XPRT_RDMA_CLIENT=m in kernel config), load the RDMA client
-    module:
+    If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
+    kernel config), load the RDMA client module:
 
     $ modprobe xprtrdma.ko
 
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 6f7fafd..d411ca6 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -424,6 +424,7 @@
 Referenced:          892 kB
 Anonymous:             0 kB
 Swap:                  0 kB
+SwapPss:               0 kB
 KernelPageSize:        4 kB
 MMUPageSize:           4 kB
 Locked:              374 kB
@@ -433,16 +434,23 @@
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
 (size), the amount of the mapping that is currently resident in RAM (RSS), the
 process' proportional share of this mapping (PSS), the number of clean and
-dirty private pages in the mapping.  Note that even a page which is part of a
-MAP_SHARED mapping, but has only a single pte mapped, i.e.  is currently used
-by only one process, is accounted as private and not as shared.  "Referenced"
-indicates the amount of memory currently marked as referenced or accessed.
+dirty private pages in the mapping.
+
+The "proportional set size" (PSS) of a process is the count of pages it has
+in memory, where each page is divided by the number of processes sharing it.
+So if a process has 1000 pages all to itself, and 1000 shared with one other
+process, its PSS will be 1500.
+Note that even a page which is part of a MAP_SHARED mapping, but has only
+a single pte mapped, i.e.  is currently used by only one process, is accounted
+as private and not as shared.
+"Referenced" indicates the amount of memory currently marked as referenced or
+accessed.
 "Anonymous" shows the amount of memory that does not belong to any file.  Even
 a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
 and a page is modified, the file page is replaced by a private anonymous copy.
 "Swap" shows how much would-be-anonymous memory is also used, but out on
 swap.
-
+"SwapPss" shows proportional swap share of this mapping.
 "VmFlags" field deserves a separate description. This member represents the kernel
 flags associated with the particular virtual memory area in two letter encoded
 manner. The codes are the following:
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 5eb8456..8c6f07a 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -769,7 +769,7 @@
 	to stall to allow flushers a chance to complete some IO. Ordinarily
 	it can use PageDirty and PageWriteback but some filesystems have
 	more complex state (unstable pages in NFS prevent reclaim) or
-	do not set those flags due to locking problems (jbd). This callback
+	do not set those flags due to locking problems. This callback
 	allows a filesystem to indicate to the VM if a page should be
 	treated as dirty or writeback for the purposes of stalling.
 
diff --git a/Documentation/gpio/00-INDEX b/Documentation/gpio/00-INDEX
index 1de43ae..179beb2 100644
--- a/Documentation/gpio/00-INDEX
+++ b/Documentation/gpio/00-INDEX
@@ -6,6 +6,9 @@
 	- How to obtain and use GPIOs in a driver
 driver.txt
 	- How to write a GPIO driver
+drivers-on-gpio.txt:
+	- Drivers in other subsystems that can use GPIO to provide more
+	  complex functionality.
 board.txt
 	- How to assign GPIOs to a consumer device and a function
 sysfs.txt
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 75542b9..a206639 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -237,6 +237,39 @@
 should not have to care about the physical line level.
 
 
+The active-low property
+-----------------------
+
+As a driver should not have to care about the physical line level, all of the
+gpiod_set_value_xxx() or gpiod_set_array_value_xxx() functions operate with
+the *logical* value. With this they take the active-low property into account.
+This means that they check whether the GPIO is configured to be active-low,
+and if so, they manipulate the passed value before the physical line level is
+driven.
+
+With this, all the gpiod_set_(array)_value_xxx() functions interpret the
+parameter "value" as "active" ("1") or "inactive" ("0"). The physical line
+level will be driven accordingly.
+
+As an example, if the active-low property for a dedicated GPIO is set, and the
+gpiod_set_(array)_value_xxx() passes "active" ("1"), the physical line level
+will be driven low.
+
+To summarize:
+
+Function (example)               active-low proporty  physical line
+gpiod_set_raw_value(desc, 0);        don't care           low
+gpiod_set_raw_value(desc, 1);        don't care           high
+gpiod_set_value(desc, 0);       default (active-high)     low
+gpiod_set_value(desc, 1);       default (active-high)     high
+gpiod_set_value(desc, 0);             active-low          high
+gpiod_set_value(desc, 1);             active-low          low
+
+Please note again that the set_raw/get_raw functions should be avoided as much
+as possible, especially by drivers which should not care about the actual
+physical line level and worry about the logical value instead.
+
+
 Set multiple GPIO outputs with a single function call
 -----------------------------------------------------
 The following functions set the output values of an array of GPIOs:
diff --git a/Documentation/gpio/drivers-on-gpio.txt b/Documentation/gpio/drivers-on-gpio.txt
new file mode 100644
index 0000000..f612132
--- /dev/null
+++ b/Documentation/gpio/drivers-on-gpio.txt
@@ -0,0 +1,95 @@
+Subsystem drivers using GPIO
+============================
+
+Note that standard kernel drivers exist for common GPIO tasks and will provide
+the right in-kernel and userspace APIs/ABIs for the job, and that these
+drivers can quite easily interconnect with other kernel subsystems using
+hardware descriptions such as device tree or ACPI:
+
+- leds-gpio: drivers/leds/leds-gpio.c will handle LEDs connected to  GPIO
+  lines, giving you the LED sysfs interface
+
+- ledtrig-gpio: drivers/leds/trigger/ledtrig-gpio.c will provide a LED trigger,
+  i.e. a LED will turn on/off in response to a GPIO line going high or low
+  (and that LED may in turn use the leds-gpio as per above).
+
+- gpio-keys: drivers/input/keyboard/gpio_keys.c is used when your GPIO line
+  can generate interrupts in response to a key press. Also supports debounce.
+
+- gpio-keys-polled: drivers/input/keyboard/gpio_keys_polled.c is used when your
+  GPIO line cannot generate interrupts, so it needs to be periodically polled
+  by a timer.
+
+- gpio_mouse: drivers/input/mouse/gpio_mouse.c is used to provide a mouse with
+  up to three buttons by simply using GPIOs and no mouse port. You can cut the
+  mouse cable and connect the wires to GPIO lines or solder a mouse connector
+  to the lines for a more permanent solution of this type.
+
+- gpio-beeper: drivers/input/misc/gpio-beeper.c is used to provide a beep from
+  an external speaker connected to a GPIO line.
+
+- gpio-tilt-polled: drivers/input/misc/gpio_tilt_polled.c provides tilt
+  detection switches using GPIO, which is useful for your homebrewn pinball
+  machine if for nothing else. It can detect different tilt angles of the
+  monitored object.
+
+- extcon-gpio: drivers/extcon/extcon-gpio.c is used when you need to read an
+  external connector status, such as a headset line for an audio driver or an
+  HDMI connector. It will provide a better userspace sysfs interface than GPIO.
+
+- restart-gpio: drivers/power/gpio-restart.c is used to restart/reboot the
+  system by pulling a GPIO line and will register a restart handler so
+  userspace can issue the right system call to restart the system.
+
+- poweroff-gpio: drivers/power/gpio-poweroff.c is used to power the system down
+  by pulling a GPIO line and will register a pm_power_off() callback so that
+  userspace can issue the right system call to power down the system.
+
+- gpio-gate-clock: drivers/clk/clk-gpio-gate.c is used to control a gated clock
+  (off/on) that uses a GPIO, and integrated with the clock subsystem.
+
+- i2c-gpio: drivers/i2c/busses/i2c-gpio.c is used to drive an I2C bus
+  (two wires, SDA and SCL lines) by hammering (bitbang) two GPIO lines. It will
+  appear as any other I2C bus to the system and makes it possible to connect
+  drivers for the I2C devices on the bus like any other I2C bus driver.
+
+- spi_gpio: drivers/spi/spi-gpio.c is used to drive an SPI bus (variable number
+  of wires, atleast SCK and optionally MISO, MOSI and chip select lines) using
+  GPIO hammering (bitbang). It will appear as any other SPI bus on the system
+  and makes it possible to connect drivers for SPI devices on the bus like
+  any other SPI bus driver. For example any MMC/SD card can then be connected
+  to this SPI by using the mmc_spi host from the MMC/SD card subsystem.
+
+- w1-gpio: drivers/w1/masters/w1-gpio.c is used to drive a one-wire bus using
+  a GPIO line, integrating with the W1 subsystem and handling devices on
+  the bus like any other W1 device.
+
+- gpio-fan: drivers/hwmon/gpio-fan.c is used to control a fan for cooling the
+  system, connected to a GPIO line (and optionally a GPIO alarm line),
+  presenting all the right in-kernel and sysfs interfaces to make your system
+  not overheat.
+
+- gpio-regulator: drivers/regulator/gpio-regulator.c is used to control a
+  regulator providing a certain voltage by pulling a GPIO line, integrating
+  with the regulator subsystem and giving you all the right interfaces.
+
+- gpio-wdt: drivers/watchdog/gpio_wdt.c is used to provide a watchdog timer
+  that will periodically "ping" a hardware connected to a GPIO line by toggling
+  it from 1-to-0-to-1. If that hardware does not recieve its "ping"
+  periodically, it will reset the system.
+
+- gpio-nand: drivers/mtd/nand/gpio.c is used to connect a NAND flash chip to
+  a set of simple GPIO lines: RDY, NCE, ALE, CLE, NWP. It interacts with the
+  NAND flash MTD subsystem and provides chip access and partition parsing like
+  any other NAND driving hardware.
+
+Apart from this there are special GPIO drivers in subsystems like MMC/SD to
+read card detect and write protect GPIO lines, and in the TTY serial subsystem
+to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The
+MTD NOR flash has add-ons for extra GPIO lines too, though the address bus is
+usually connected directly to the flash.
+
+Use those instead of talking directly to the GPIOs using sysfs; they integrate
+with kernel frameworks better than your userspace code could. Needless to say,
+just using the apropriate kernel drivers will simplify and speed up your
+embedded hacking in particular by providing ready-made components.
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
index 535b6a8..0700b55 100644
--- a/Documentation/gpio/sysfs.txt
+++ b/Documentation/gpio/sysfs.txt
@@ -20,11 +20,10 @@
 standard kernels won't know about. And for some tasks, simple userspace
 GPIO drivers could be all that the system really needs.
 
-Note that standard kernel drivers exist for common "LEDs and Buttons"
-GPIO tasks:  "leds-gpio" and "gpio_keys", respectively. Use those
-instead of talking directly to the GPIOs; they integrate with kernel
-frameworks better than your userspace code could.
-
+DO NOT ABUSE SYFS TO CONTROL HARDWARE THAT HAS PROPER KERNEL DRIVERS.
+PLEASE READ THE DOCUMENT NAMED "drivers-on-gpio.txt" IN THIS DOCUMENTATION
+DIRECTORY TO AVOID REINVENTING KERNEL WHEELS IN USERSPACE. I MEAN IT.
+REALLY.
 
 Paths in Sysfs
 --------------
diff --git a/Documentation/i2c/busses/i2c-parport b/Documentation/i2c/busses/i2c-parport
index 0e2d17b..c3dbb3b 100644
--- a/Documentation/i2c/busses/i2c-parport
+++ b/Documentation/i2c/busses/i2c-parport
@@ -20,6 +20,7 @@
  * (type=5) Analog Devices evaluation boards: ADM1025, ADM1030, ADM1031
  * (type=6) Barco LPT->DVI (K5800236) adapter
  * (type=7) One For All JP1 parallel port adapter
+ * (type=8) VCT-jig
 
 These devices use different pinout configurations, so you have to tell
 the driver what you have, using the type module parameter. There is no
diff --git a/Documentation/i2c/slave-interface b/Documentation/i2c/slave-interface
index 2dee4e2..61ed05c 100644
--- a/Documentation/i2c/slave-interface
+++ b/Documentation/i2c/slave-interface
@@ -31,10 +31,13 @@
 ===========
 
 I2C slave backends behave like standard I2C clients. So, you can instantiate
-them as described in the document 'instantiating-devices'. A quick example for
-instantiating the slave-eeprom driver from userspace at address 0x64 on bus 1:
+them as described in the document 'instantiating-devices'. The only difference
+is that i2c slave backends have their own address space. So, you have to add
+0x1000 to the address you would originally request. An example for
+instantiating the slave-eeprom driver from userspace at the 7 bit address 0x64
+on bus 1:
 
-  # echo slave-24c02 0x64 > /sys/bus/i2c/devices/i2c-1/new_device
+  # echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-1/new_device
 
 Each backend should come with separate documentation to describe its specific
 behaviour and setup.
diff --git a/Documentation/i2c/ten-bit-addresses b/Documentation/i2c/ten-bit-addresses
index cdfe139..7b2d11e 100644
--- a/Documentation/i2c/ten-bit-addresses
+++ b/Documentation/i2c/ten-bit-addresses
@@ -2,6 +2,10 @@
 addresses, and an extended set of 10 bit addresses. The sets of addresses
 do not intersect: the 7 bit address 0x10 is not the same as the 10 bit
 address 0x10 (though a single device could respond to both of them).
+To avoid ambiguity, the user sees 10 bit addresses mapped to a different
+address space, namely 0xa000-0xa3ff. The leading 0xa (= 10) represents the
+10 bit mode. This is used for creating device names in sysfs. It is also
+needed when instantiating 10 bit devices via the new_device file in sysfs.
 
 I2C messages to and from 10-bit address devices have a different format.
 See the I2C specification for the details.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 64df08d..df1b25e 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -265,7 +265,7 @@
 's'	all	linux/cdk.h
 't'	00-7F	linux/ppp-ioctl.h
 't'	80-8F	linux/isdn_ppp.h
-'t'	90	linux/toshiba.h
+'t'	90-91	linux/toshiba.h		toshiba and toshiba_acpi SMM
 'u'	00-1F	linux/smb_fs.h		gone
 'u'	20-3F	linux/uvcvideo.h	USB video class host driver
 'v'	00-1F	linux/ext2_fs.h		conflict!
@@ -303,6 +303,7 @@
 0xA3	80-8F	Port ACL		in development:
 					<mailto:tlewis@mindspring.com>
 0xA3	90-9F	linux/dtlk.h
+0xAA	00-3F	linux/uapi/linux/userfaultfd.h
 0xAB	00-1F	linux/nbd.h
 0xAC	00-1F	linux/raw.h
 0xAD	00	Netfilter device	in development:
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 6466704..0ff6a46 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -174,6 +174,11 @@
 
 The value can be overridden in which case the default value is ignored.
 
+KBUILD_SIGN_PIN
+--------------------------------------------------
+This variable allows a passphrase or PIN to be passed to the sign-file
+utility when signing kernel modules, if the private key requires such.
+
 KBUILD_MODPOST_WARN
 --------------------------------------------------
 KBUILD_MODPOST_WARN can be set to avoid errors in case of undefined
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b09dc2c..22a4b68 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1317,6 +1317,10 @@
 			     <bus_id>,<clkrate>
 
 	i8042.debug	[HW] Toggle i8042 debug mode
+	i8042.unmask_kbd_data
+			[HW] Enable printing of interrupt data from the KBD port
+			     (disabled by default, and as a pre-condition
+			     requires that i8042.debug=1 be enabled)
 	i8042.direct	[HW] Put keyboard port into non-translated mode
 	i8042.dumbkbd	[HW] Pretend that controller can only read data from
 			     keyboard and cannot control its state
@@ -2281,6 +2285,15 @@
 			The default parameter value of '0' causes the kernel
 			not to attempt recovery of lost locks.
 
+	nfs4.layoutstats_timer =
+			[NFSv4.2] Change the rate at which the kernel sends
+			layoutstats to the pNFS metadata server.
+
+			Setting this to value to 0 causes the kernel to use
+			whatever value is the default set by the layout
+			driver. A non-zero value sets the minimum interval
+			in seconds between layoutstats transmissions.
+
 	nfsd.nfs4_disable_idmapping=
 			[NFSv4] When set to the default of '1', the NFSv4
 			server will return only numeric uids and gids to
@@ -4093,6 +4106,13 @@
 			plus one apbt timer for broadcast timer.
 			x86_intel_mid_timer=apbt_only | lapic_and_apbt
 
+	xen_512gb_limit		[KNL,X86-64,XEN]
+			Restricts the kernel running paravirtualized under Xen
+			to use only up to 512 GB of RAM. The reason to do so is
+			crash analysis tools and Xen tools for doing domain
+			save/restore/migration must be enabled to handle larger
+			domains.
+
 	xen_emul_unplug=		[HW,X86,XEN]
 			Unplug Xen emulated devices
 			Format: [unplug0,][unplug1]
diff --git a/Documentation/md-cluster.txt b/Documentation/md-cluster.txt
index de1af7d..1b79436 100644
--- a/Documentation/md-cluster.txt
+++ b/Documentation/md-cluster.txt
@@ -91,7 +91,7 @@
     this message inappropriate or redundant.
 
  3. sender write LVB.
-    sender down-convert MESSAGE from EX to CR
+    sender down-convert MESSAGE from EX to CW
     sender try to get EX of ACK
     [ wait until all receiver has *processed* the MESSAGE ]
 
@@ -112,7 +112,7 @@
     sender down-convert ACK from EX to CR
     sender release MESSAGE
     sender release TOKEN
-                               receiver upconvert to EX of MESSAGE
+                               receiver upconvert to PR of MESSAGE
                                receiver get CR of ACK
                                receiver release MESSAGE
 
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index eafa6a5..2ba8461 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -2327,9 +2327,7 @@
 explicit lock operations, described later).  These include:
 
 	xchg();
-	cmpxchg();
 	atomic_xchg();			atomic_long_xchg();
-	atomic_cmpxchg();		atomic_long_cmpxchg();
 	atomic_inc_return();		atomic_long_inc_return();
 	atomic_dec_return();		atomic_long_dec_return();
 	atomic_add_return();		atomic_long_add_return();
@@ -2342,7 +2340,9 @@
 	test_and_clear_bit();
 	test_and_change_bit();
 
-	/* when succeeds (returns 1) */
+	/* when succeeds */
+	cmpxchg();
+	atomic_cmpxchg();		atomic_long_cmpxchg();
 	atomic_add_unless();		atomic_long_add_unless();
 
 These are used for such things as implementing ACQUIRE-class and RELEASE-class
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
index c72702e..a78bf1f 100644
--- a/Documentation/module-signing.txt
+++ b/Documentation/module-signing.txt
@@ -89,6 +89,32 @@
      their signatures checked without causing a dependency loop.
 
 
+ (4) "File name or PKCS#11 URI of module signing key" (CONFIG_MODULE_SIG_KEY)
+
+     Setting this option to something other than its default of
+     "certs/signing_key.pem" will disable the autogeneration of signing keys
+     and allow the kernel modules to be signed with a key of your choosing.
+     The string provided should identify a file containing both a private key
+     and its corresponding X.509 certificate in PEM form, or — on systems where
+     the OpenSSL ENGINE_pkcs11 is functional — a PKCS#11 URI as defined by
+     RFC7512. In the latter case, the PKCS#11 URI should reference both a
+     certificate and a private key.
+
+     If the PEM file containing the private key is encrypted, or if the
+     PKCS#11 token requries a PIN, this can be provided at build time by
+     means of the KBUILD_SIGN_PIN variable.
+
+
+ (5) "Additional X.509 keys for default system keyring" (CONFIG_SYSTEM_TRUSTED_KEYS)
+
+     This option can be set to the filename of a PEM-encoded file containing
+     additional certificates which will be included in the system keyring by
+     default.
+
+Note that enabling module signing adds a dependency on the OpenSSL devel
+packages to the kernel build processes for the tool that does the signing.
+
+
 =======================
 GENERATING SIGNING KEYS
 =======================
@@ -100,16 +126,16 @@
 kernel so that it can be used to check the signatures as the modules are
 loaded.
 
-Under normal conditions, the kernel build will automatically generate a new
-keypair using openssl if one does not exist in the files:
+Under normal conditions, when CONFIG_MODULE_SIG_KEY is unchanged from its
+default, the kernel build will automatically generate a new keypair using
+openssl if one does not exist in the file:
 
-	signing_key.priv
-	signing_key.x509
+	certs/signing_key.pem
 
 during the building of vmlinux (the public part of the key needs to be built
 into vmlinux) using parameters in the:
 
-	x509.genkey
+	certs/x509.genkey
 
 file (which is also generated if it does not already exist).
 
@@ -135,8 +161,12 @@
 generate the public/private key files:
 
 	openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
-	   -config x509.genkey -outform DER -out signing_key.x509 \
-	   -keyout signing_key.priv
+	   -config x509.genkey -outform PEM -out kernel_key.pem \
+	   -keyout kernel_key.pem
+
+The full pathname for the resulting kernel_key.pem file can then be specified
+in the CONFIG_MODULE_SIG_KEY option, and the certificate and key therein will
+be used instead of an autogenerated keypair.
 
 
 =========================
@@ -152,10 +182,9 @@
 	302d2d52 I------     1 perm 1f010000     0     0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
 	...
 
-Beyond the public key generated specifically for module signing, any file
-placed in the kernel source root directory or the kernel build root directory
-whose name is suffixed with ".x509" will be assumed to be an X.509 public key
-and will be added to the keyring.
+Beyond the public key generated specifically for module signing, additional
+trusted certificates can be provided in a PEM-encoded file referenced by the
+CONFIG_SYSTEM_TRUSTED_KEYS configuration option.
 
 Further, the architecture code may take public keys from a hardware store and
 add those in also (e.g. from the UEFI key database).
@@ -181,7 +210,7 @@
 the Linux kernel source tree.  The script requires 4 arguments:
 
 	1.  The hash algorithm (e.g., sha256)
-	2.  The private key filename
+	2.  The private key filename or PKCS#11 URI
 	3.  The public key filename
 	4.  The kernel module to be signed
 
@@ -194,6 +223,9 @@
 doesn't, you should make sure that hash algorithm is either built into the
 kernel or can be loaded without requiring itself.
 
+If the private key requires a passphrase or PIN, it can be provided in the
+$KBUILD_SIGN_PIN environment variable.
+
 
 ============================
 SIGNED MODULES AND STRIPPING
diff --git a/Documentation/security/Smack.txt b/Documentation/security/Smack.txt
index de5e1ae..5e6d07f 100644
--- a/Documentation/security/Smack.txt
+++ b/Documentation/security/Smack.txt
@@ -28,6 +28,10 @@
 configurations are intolerant of IP options and can impede
 access to systems that use them as Smack does.
 
+Smack is used in the Tizen operating system. Please
+go to http://wiki.tizen.org for information about how
+Smack is used in Tizen.
+
 The current git repository for Smack user space is:
 
 	git://github.com/smack-team/smack.git
@@ -108,6 +112,8 @@
 on /sys/fs/smackfs.
 
 access
+	Provided for backward compatibility. The access2 interface
+	is preferred and should be used instead.
 	This interface reports whether a subject with the specified
 	Smack label has a particular access to an object with a
 	specified Smack label. Write a fixed format access rule to
@@ -136,6 +142,8 @@
 	those in the fourth string. If there is no such rule it will be
 	created using the access specified in the third and the fourth strings.
 cipso
+	Provided for backward compatibility. The cipso2 interface
+	is preferred and should be used instead.
 	This interface allows a specific CIPSO header to be assigned
 	to a Smack label. The format accepted on write is:
 		"%24s%4d%4d"["%4d"]...
@@ -157,7 +165,19 @@
 doi
 	This contains the CIPSO domain of interpretation used in
 	network packets.
+ipv6host
+	This interface allows specific IPv6 internet addresses to be
+	treated as single label hosts. Packets are sent to single
+	label hosts only from processes that have Smack write access
+	to the host label. All packets received from single label hosts
+	are given the specified label. The format accepted on write is:
+		"%h:%h:%h:%h:%h:%h:%h:%h label" or
+		"%h:%h:%h:%h:%h:%h:%h:%h/%d label".
+	The "::" address shortcut is not supported.
+	If label is "-DELETE" a matched entry will be deleted.
 load
+	Provided for backward compatibility. The load2 interface
+	is preferred and should be used instead.
 	This interface allows access control rules in addition to
 	the system defined rules to be specified. The format accepted
 	on write is:
@@ -181,6 +201,8 @@
 	permissions that are not allowed. The string "r-x--" would
 	specify read and execute access.
 load-self
+	Provided for backward compatibility. The load-self2 interface
+	is preferred and should be used instead.
 	This interface allows process specific access rules to be
 	defined. These rules are only consulted if access would
 	otherwise be permitted, and are intended to provide additional
@@ -205,6 +227,8 @@
 	received from single label hosts are given the specified
 	label. The format accepted on write is:
 		"%d.%d.%d.%d label" or "%d.%d.%d.%d/%d label".
+	If the label specified is "-CIPSO" the address is treated
+	as a host that supports CIPSO headers.
 onlycap
 	This contains labels processes must have for CAP_MAC_ADMIN
 	and CAP_MAC_OVERRIDE to be effective. If this file is empty
@@ -232,7 +256,8 @@
 	is dangerous and can ruin the proper labeling of your system.
 	It should never be used in production.
 
-You can add access rules in /etc/smack/accesses. They take the form:
+If you are using the smackload utility
+you can add access rules in /etc/smack/accesses. They take the form:
 
     subjectlabel objectlabel access
 
diff --git a/Documentation/security/Yama.txt b/Documentation/security/Yama.txt
index 227a63f..d9ee7d7 100644
--- a/Documentation/security/Yama.txt
+++ b/Documentation/security/Yama.txt
@@ -1,9 +1,7 @@
-Yama is a Linux Security Module that collects a number of system-wide DAC
-security protections that are not handled by the core kernel itself. To
-select it at boot time, specify "security=yama" (though this will disable
-any other LSM).
-
-Yama is controlled through sysctl in /proc/sys/kernel/yama:
+Yama is a Linux Security Module that collects system-wide DAC security
+protections that are not handled by the core kernel itself. This is
+selectable at build-time with CONFIG_SECURITY_YAMA, and can be controlled
+at run-time through sysctls in /proc/sys/kernel/yama:
 
 - ptrace_scope
 
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index c4407a4..f4cb0b2 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -1,7 +1,22 @@
 			Static Keys
 			-----------
 
-By: Jason Baron <jbaron@redhat.com>
+DEPRECATED API:
+
+The use of 'struct static_key' directly, is now DEPRECATED. In addition
+static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+
+struct static_key false = STATIC_KEY_INIT_FALSE;
+struct static_key true = STATIC_KEY_INIT_TRUE;
+static_key_true()
+static_key_false()
+
+The updated API replacements are:
+
+DEFINE_STATIC_KEY_TRUE(key);
+DEFINE_STATIC_KEY_FALSE(key);
+static_key_likely()
+statick_key_unlikely()
 
 0) Abstract
 
@@ -9,22 +24,22 @@
 performance-sensitive fast-path kernel code, via a GCC feature and a code
 patching technique. A quick example:
 
-	struct static_key key = STATIC_KEY_INIT_FALSE;
+	DEFINE_STATIC_KEY_FALSE(key);
 
 	...
 
-        if (static_key_false(&key))
+        if (static_branch_unlikely(&key))
                 do unlikely code
         else
                 do likely code
 
 	...
-	static_key_slow_inc();
+	static_branch_enable(&key);
 	...
-	static_key_slow_inc();
+	static_branch_disable(&key);
 	...
 
-The static_key_false() branch will be generated into the code with as little
+The static_branch_unlikely() branch will be generated into the code with as little
 impact to the likely code path as possible.
 
 
@@ -56,7 +71,7 @@
 
 For example, if we have a simple branch that is disabled by default:
 
-	if (static_key_false(&key))
+	if (static_branch_unlikely(&key))
 		printk("I am the true branch\n");
 
 Thus, by default the 'printk' will not be emitted. And the code generated will
@@ -75,68 +90,55 @@
 
 In order to make use of this optimization you must first define a key:
 
-	struct static_key key;
-
-Which is initialized as:
-
-	struct static_key key = STATIC_KEY_INIT_TRUE;
+	DEFINE_STATIC_KEY_TRUE(key);
 
 or:
 
-	struct static_key key = STATIC_KEY_INIT_FALSE;
+	DEFINE_STATIC_KEY_FALSE(key);
 
-If the key is not initialized, it is default false. The 'struct static_key',
-must be a 'global'. That is, it can't be allocated on the stack or dynamically
+
+The key must be global, that is, it can't be allocated on the stack or dynamically
 allocated at run-time.
 
 The key is then used in code as:
 
-        if (static_key_false(&key))
+        if (static_branch_unlikely(&key))
                 do unlikely code
         else
                 do likely code
 
 Or:
 
-        if (static_key_true(&key))
+        if (static_branch_likely(&key))
                 do likely code
         else
                 do unlikely code
 
-A key that is initialized via 'STATIC_KEY_INIT_FALSE', must be used in a
-'static_key_false()' construct. Likewise, a key initialized via
-'STATIC_KEY_INIT_TRUE' must be used in a 'static_key_true()' construct. A
-single key can be used in many branches, but all the branches must match the
-way that the key has been initialized.
+Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may
+be used in either static_branch_likely() or static_branch_unlikely()
+statemnts.
 
-The branch(es) can then be switched via:
+Branch(es) can be set true via:
 
-	static_key_slow_inc(&key);
+static_branch_enable(&key);
+
+or false via:
+
+static_branch_disable(&key);
+
+The branch(es) can then be switched via reference counts:
+
+	static_branch_inc(&key);
 	...
-	static_key_slow_dec(&key);
+	static_branch_dec(&key);
 
-Thus, 'static_key_slow_inc()' means 'make the branch true', and
-'static_key_slow_dec()' means 'make the branch false' with appropriate
+Thus, 'static_branch_inc()' means 'make the branch true', and
+'static_branch_dec()' means 'make the branch false' with appropriate
 reference counting. For example, if the key is initialized true, a
-static_key_slow_dec(), will switch the branch to false. And a subsequent
-static_key_slow_inc(), will change the branch back to true. Likewise, if the
-key is initialized false, a 'static_key_slow_inc()', will change the branch to
-true. And then a 'static_key_slow_dec()', will again make the branch false.
-
-An example usage in the kernel is the implementation of tracepoints:
-
-        static inline void trace_##name(proto)                          \
-        {                                                               \
-                if (static_key_false(&__tracepoint_##name.key))		\
-                        __DO_TRACE(&__tracepoint_##name,                \
-                                TP_PROTO(data_proto),                   \
-                                TP_ARGS(data_args),                     \
-                                TP_CONDITION(cond));                    \
-        }
-
-Tracepoints are disabled by default, and can be placed in performance critical
-pieces of the kernel. Thus, by using a static key, the tracepoints can have
-absolutely minimal impact when not in use.
+static_branch_dec(), will switch the branch to false. And a subsequent
+static_branch_inc(), will change the branch back to true. Likewise, if the
+key is initialized false, a 'static_branch_inc()', will change the branch to
+true. And then a 'static_branch_dec()', will again make the branch false.
 
 
 4) Architecture level code patching interface, 'jump labels'
@@ -150,9 +152,12 @@
 
 * #define JUMP_LABEL_NOP_SIZE, see: arch/x86/include/asm/jump_label.h
 
-* __always_inline bool arch_static_branch(struct static_key *key), see:
+* __always_inline bool arch_static_branch(struct static_key *key, bool branch), see:
 					arch/x86/include/asm/jump_label.h
 
+* __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch),
+					see: arch/x86/include/asm/jump_label.h
+
 * void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type),
 					see: arch/x86/kernel/jump_label.c
 
@@ -173,7 +178,7 @@
 {
         int pid;
 
-+       if (static_key_false(&key))
++       if (static_branch_unlikely(&key))
 +               printk("I am the true branch\n");
 
         rcu_read_lock();
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 9c3f2f8..a4482fc 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -349,7 +349,7 @@
 
 (i < j):
   zone[i]->protection[j]
-  = (total sums of present_pages from zone[i+1] to zone[j] on the node)
+  = (total sums of managed_pages from zone[i+1] to zone[j] on the node)
     / lowmem_reserve_ratio[i];
 (i = j):
    (should not be protected. = 0;
@@ -360,7 +360,7 @@
     256 (if zone[i] means DMA or DMA32 zone)
     32  (others).
 As above expression, they are reciprocal number of ratio.
-256 means 1/256. # of protection pages becomes about "0.39%" of total present
+256 means 1/256. # of protection pages becomes about "0.39%" of total managed
 pages of higher zones on the node.
 
 If you would like to protect more pages, smaller values are effective.
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 0e307c9..13f5619 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -75,7 +75,8 @@
 
 'e'     - Send a SIGTERM to all processes, except for init.
 
-'f'	- Will call oom_kill to kill a memory hog process.
+'f'	- Will call the oom killer to kill a memory hog process, but do not
+	  panic if nothing can be killed.
 
 'g'	- Used by kgdb (kernel debugger)
 
@@ -119,6 +120,7 @@
 
 'x'	- Used by xmon interface on ppc/powerpc platforms.
           Show global PMU Registers on sparc64.
+          Dump all TLB entries on MIPS.
 
 'y'	- Show global CPU Registers [SPARC-64 specific]
 
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 7ddb1e3..ef621d3 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -346,6 +346,11 @@
 	  x86-tsc: Architectures may define their own clocks. For
 	  	   example, x86 uses its own TSC cycle clock here.
 
+	  ppc-tb: This uses the powerpc timebase register value.
+		  This is in sync across CPUs and can also be used
+		  to correlate events across hypervisor/guest if
+		  tb_offset is known.
+
 	To set a clock, simply echo the clock name into this file.
 
 	  echo global > trace_clock
@@ -686,6 +691,8 @@
 	 The marks are determined by the difference between this
 	 current trace and the next trace.
 	  '$' - greater than 1 second
+	  '@' - greater than 100 milisecond
+	  '*' - greater than 10 milisecond
 	  '#' - greater than 1000 microsecond
 	  '!' - greater than 100 microsecond
 	  '+' - greater than 10 microsecond
@@ -1939,26 +1946,49 @@
 
   ie:
 
-  0)               |    up_write() {
-  0)   0.646 us    |      _spin_lock_irqsave();
-  0)   0.684 us    |      _spin_unlock_irqrestore();
-  0)   3.123 us    |    }
-  0)   0.548 us    |    fput();
-  0) + 58.628 us   |  }
+  3) # 1837.709 us |          } /* __switch_to */
+  3)               |          finish_task_switch() {
+  3)   0.313 us    |            _raw_spin_unlock_irq();
+  3)   3.177 us    |          }
+  3) # 1889.063 us |        } /* __schedule */
+  3) ! 140.417 us  |      } /* __schedule */
+  3) # 2034.948 us |    } /* schedule */
+  3) * 33998.59 us |  } /* schedule_preempt_disabled */
 
   [...]
 
-  0)               |      putname() {
-  0)               |        kmem_cache_free() {
-  0)   0.518 us    |          __phys_addr();
-  0)   1.757 us    |        }
-  0)   2.861 us    |      }
-  0) ! 115.305 us  |    }
-  0) ! 116.402 us  |  }
+  1)   0.260 us    |              msecs_to_jiffies();
+  1)   0.313 us    |              __rcu_read_unlock();
+  1) + 61.770 us   |            }
+  1) + 64.479 us   |          }
+  1)   0.313 us    |          rcu_bh_qs();
+  1)   0.313 us    |          __local_bh_enable();
+  1) ! 217.240 us  |        }
+  1)   0.365 us    |        idle_cpu();
+  1)               |        rcu_irq_exit() {
+  1)   0.417 us    |          rcu_eqs_enter_common.isra.47();
+  1)   3.125 us    |        }
+  1) ! 227.812 us  |      }
+  1) ! 457.395 us  |    }
+  1) @ 119760.2 us |  }
+
+  [...]
+
+  2)               |    handle_IPI() {
+  1)   6.979 us    |                  }
+  2)   0.417 us    |      scheduler_ipi();
+  1)   9.791 us    |                }
+  1) + 12.917 us   |              }
+  2)   3.490 us    |    }
+  1) + 15.729 us   |            }
+  1) + 18.542 us   |          }
+  2) $ 3594274 us  |  }
 
   + means that the function exceeded 10 usecs.
   ! means that the function exceeded 100 usecs.
   # means that the function exceeded 1000 usecs.
+  * means that the function exceeded 10 msecs.
+  @ means that the function exceeded 100 msecs.
   $ means that the function exceeded 1 sec.
 
 
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 030977f..54dd9b9 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -329,7 +329,14 @@
 
 3) hugepage-mmap:  see tools/testing/selftests/vm/hugepage-mmap.c
 
-4) The libhugetlbfs (http://libhugetlbfs.sourceforge.net) library provides a
-   wide range of userspace tools to help with huge page usability, environment
-   setup, and control. Furthermore it provides useful test cases that should be
-   used when modifying code to ensure no regressions are introduced.
+4) The libhugetlbfs (https://github.com/libhugetlbfs/libhugetlbfs) library
+   provides a wide range of userspace tools to help with huge page usability,
+   environment setup, and control.
+
+Kernel development regression testing
+=====================================
+
+The most complete set of hugetlb tests are in the libhugetlbfs repository.
+If you modify any hugetlb related code, use the libhugetlbfs test suite
+to check for regressions.  In addition, if you add any new hugetlb
+functionality, please add appropriate tests to libhugetlbfs.
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 6bfbc17..3cd3843 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -16,11 +16,17 @@
     * Bits 0-4   swap type if swapped
     * Bits 5-54  swap offset if swapped
     * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
-    * Bits 56-60 zero
-    * Bit  61    page is file-page or shared-anon
+    * Bit  56    page exclusively mapped (since 4.2)
+    * Bits 57-60 zero
+    * Bit  61    page is file-page or shared-anon (since 3.5)
     * Bit  62    page swapped
     * Bit  63    page present
 
+   Since Linux 4.0 only users with the CAP_SYS_ADMIN capability can get PFNs.
+   In 4.0 and 4.1 opens by unprivileged fail with -EPERM.  Starting from
+   4.2 the PFN field is zeroed if the user does not have CAP_SYS_ADMIN.
+   Reason: information about PFNs helps in exploiting Rowhammer vulnerability.
+
    If the page is not present but in swap, then the PFN contains an
    encoding of the swap file number and the page's offset into the
    swap. Unmapped pages return a null PFN. This allows determining
@@ -159,3 +165,8 @@
 Reading from any of the files will return -EINVAL if you are not starting
 the read on an 8-byte boundary (e.g., if you sought an odd number of bytes
 into the file), or if the size of the read is not a multiple of 8 bytes.
+
+Before Linux 3.11 pagemap bits 55-60 were used for "page-shift" (which is
+always 12 at most architectures). Since Linux 3.11 their meaning changes
+after first clear of soft-dirty bits. Since Linux 4.2 they are used for
+flags unconditionally.
diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt
new file mode 100644
index 0000000..70a3c94
--- /dev/null
+++ b/Documentation/vm/userfaultfd.txt
@@ -0,0 +1,144 @@
+= Userfaultfd =
+
+== Objective ==
+
+Userfaults allow the implementation of on-demand paging from userland
+and more generally they allow userland to take control of various
+memory page faults, something otherwise only the kernel code could do.
+
+For example userfaults allows a proper and more optimal implementation
+of the PROT_NONE+SIGSEGV trick.
+
+== Design ==
+
+Userfaults are delivered and resolved through the userfaultfd syscall.
+
+The userfaultfd (aside from registering and unregistering virtual
+memory ranges) provides two primary functionalities:
+
+1) read/POLLIN protocol to notify a userland thread of the faults
+   happening
+
+2) various UFFDIO_* ioctls that can manage the virtual memory regions
+   registered in the userfaultfd that allows userland to efficiently
+   resolve the userfaults it receives via 1) or to manage the virtual
+   memory in the background
+
+The real advantage of userfaults if compared to regular virtual memory
+management of mremap/mprotect is that the userfaults in all their
+operations never involve heavyweight structures like vmas (in fact the
+userfaultfd runtime load never takes the mmap_sem for writing).
+
+Vmas are not suitable for page- (or hugepage) granular fault tracking
+when dealing with virtual address spaces that could span
+Terabytes. Too many vmas would be needed for that.
+
+The userfaultfd once opened by invoking the syscall, can also be
+passed using unix domain sockets to a manager process, so the same
+manager process could handle the userfaults of a multitude of
+different processes without them being aware about what is going on
+(well of course unless they later try to use the userfaultfd
+themselves on the same region the manager is already tracking, which
+is a corner case that would currently return -EBUSY).
+
+== API ==
+
+When first opened the userfaultfd must be enabled invoking the
+UFFDIO_API ioctl specifying a uffdio_api.api value set to UFFD_API (or
+a later API version) which will specify the read/POLLIN protocol
+userland intends to speak on the UFFD and the uffdio_api.features
+userland requires. The UFFDIO_API ioctl if successful (i.e. if the
+requested uffdio_api.api is spoken also by the running kernel and the
+requested features are going to be enabled) will return into
+uffdio_api.features and uffdio_api.ioctls two 64bit bitmasks of
+respectively all the available features of the read(2) protocol and
+the generic ioctl available.
+
+Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should
+be invoked (if present in the returned uffdio_api.ioctls bitmask) to
+register a memory range in the userfaultfd by setting the
+uffdio_register structure accordingly. The uffdio_register.mode
+bitmask will specify to the kernel which kind of faults to track for
+the range (UFFDIO_REGISTER_MODE_MISSING would track missing
+pages). The UFFDIO_REGISTER ioctl will return the
+uffdio_register.ioctls bitmask of ioctls that are suitable to resolve
+userfaults on the range registered. Not all ioctls will necessarily be
+supported for all memory types depending on the underlying virtual
+memory backend (anonymous memory vs tmpfs vs real filebacked
+mappings).
+
+Userland can use the uffdio_register.ioctls to manage the virtual
+address space in the background (to add or potentially also remove
+memory from the userfaultfd registered range). This means a userfault
+could be triggering just before userland maps in the background the
+user-faulted page.
+
+The primary ioctl to resolve userfaults is UFFDIO_COPY. That
+atomically copies a page into the userfault registered range and wakes
+up the blocked userfaults (unless uffdio_copy.mode &
+UFFDIO_COPY_MODE_DONTWAKE is set). Other ioctl works similarly to
+UFFDIO_COPY. They're atomic as in guaranteeing that nothing can see an
+half copied page since it'll keep userfaulting until the copy has
+finished.
+
+== QEMU/KVM ==
+
+QEMU/KVM is using the userfaultfd syscall to implement postcopy live
+migration. Postcopy live migration is one form of memory
+externalization consisting of a virtual machine running with part or
+all of its memory residing on a different node in the cloud. The
+userfaultfd abstraction is generic enough that not a single line of
+KVM kernel code had to be modified in order to add postcopy live
+migration to QEMU.
+
+Guest async page faults, FOLL_NOWAIT and all other GUP features work
+just fine in combination with userfaults. Userfaults trigger async
+page faults in the guest scheduler so those guest processes that
+aren't waiting for userfaults (i.e. network bound) can keep running in
+the guest vcpus.
+
+It is generally beneficial to run one pass of precopy live migration
+just before starting postcopy live migration, in order to avoid
+generating userfaults for readonly guest regions.
+
+The implementation of postcopy live migration currently uses one
+single bidirectional socket but in the future two different sockets
+will be used (to reduce the latency of the userfaults to the minimum
+possible without having to decrease /proc/sys/net/ipv4/tcp_wmem).
+
+The QEMU in the source node writes all pages that it knows are missing
+in the destination node, into the socket, and the migration thread of
+the QEMU running in the destination node runs UFFDIO_COPY|ZEROPAGE
+ioctls on the userfaultfd in order to map the received pages into the
+guest (UFFDIO_ZEROCOPY is used if the source page was a zero page).
+
+A different postcopy thread in the destination node listens with
+poll() to the userfaultfd in parallel. When a POLLIN event is
+generated after a userfault triggers, the postcopy thread read() from
+the userfaultfd and receives the fault address (or -EAGAIN in case the
+userfault was already resolved and waken by a UFFDIO_COPY|ZEROPAGE run
+by the parallel QEMU migration thread).
+
+After the QEMU postcopy thread (running in the destination node) gets
+the userfault address it writes the information about the missing page
+into the socket. The QEMU source node receives the information and
+roughly "seeks" to that page address and continues sending all
+remaining missing pages from that new page offset. Soon after that
+(just the time to flush the tcp_wmem queue through the network) the
+migration thread in the QEMU running in the destination node will
+receive the page that triggered the userfault and it'll map it as
+usual with the UFFDIO_COPY|ZEROPAGE (without actually knowing if it
+was spontaneously sent by the source or if it was an urgent page
+requested through an userfault).
+
+By the time the userfaults start, the QEMU in the destination node
+doesn't need to keep any per-page state bitmap relative to the live
+migration around and a single per-page bitmap has to be maintained in
+the QEMU running in the source node to know which pages are still
+missing in the destination node. The bitmap in the source node is
+checked to find which missing pages to send in round robin and we seek
+over it when receiving incoming userfaults. After sending each page of
+course the bitmap is updated accordingly. It's also useful to avoid
+sending the same page twice (in case the userfault is read by the
+postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration
+thread).
diff --git a/MAINTAINERS b/MAINTAINERS
index cb5e818..d8a0aad 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -643,9 +643,14 @@
 L:	dri-devel@lists.freedesktop.org
 T:	git git://people.freedesktop.org/~gabbayo/linux.git
 S:	Supported
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
 F:	drivers/gpu/drm/amd/amdkfd/
 F:	drivers/gpu/drm/amd/include/cik_structs.h
 F:	drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+F:	drivers/gpu/drm/amd/include/vi_structs.h
 F:	drivers/gpu/drm/radeon/radeon_kfd.c
 F:	drivers/gpu/drm/radeon/radeon_kfd.h
 F:	include/uapi/linux/kfd_ioctl.h
@@ -735,6 +740,12 @@
 F:	drivers/staging/iio/*/ad*
 F:	staging/iio/trigger/iio-trig-bfin-timer.c
 
+ANALOG DEVICES INC DMA DRIVERS
+M:	Lars-Peter Clausen <lars@metafoo.de>
+W:	http://ez.analog.com/community/linux-device-drivers
+S:	Supported
+F:	drivers/dma/dma-axi-dmac.c
+
 ANDROID DRIVERS
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:	Arve Hjønnevåg <arve@android.com>
@@ -806,11 +817,13 @@
 ARM PMU PROFILING AND DEBUGGING
 M:	Will Deacon <will.deacon@arm.com>
 S:	Maintained
-F:	arch/arm/kernel/perf_event*
+F:	arch/arm/kernel/perf_*
 F:	arch/arm/oprofile/common.c
-F:	arch/arm/include/asm/pmu.h
 F:	arch/arm/kernel/hw_breakpoint.c
 F:	arch/arm/include/asm/hw_breakpoint.h
+F:	arch/arm/include/asm/perf_event.h
+F:	drivers/perf/arm_pmu.c
+F:	include/linux/perf/arm_pmu.h
 
 ARM PORT
 M:	Russell King <linux@arm.linux.org.uk>
@@ -1509,8 +1522,10 @@
 F:	arch/arm/mach-sti/
 F:	arch/arm/boot/dts/sti*
 F:	drivers/clocksource/arm_global_timer.c
+F:	drivers/clocksource/clksrc_st_lpc.c
 F:	drivers/i2c/busses/i2c-st.c
 F:	drivers/media/rc/st_rc.c
+F:	drivers/media/platform/sti/c8sectpfe/
 F:	drivers/mmc/host/sdhci-st.c
 F:	drivers/phy/phy-miphy28lp.c
 F:	drivers/phy/phy-miphy365x.c
@@ -1890,6 +1905,12 @@
 S:	Supported
 F:	drivers/mtd/nand/atmel_nand*
 
+ATMEL SDMMC DRIVER
+M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+L:	linux-mmc@vger.kernel.org
+S:	Supported
+F:	drivers/mmc/host/sdhci-of-at91.c
+
 ATMEL SPI DRIVER
 M:	Nicolas Ferre <nicolas.ferre@atmel.com>
 S:	Supported
@@ -1923,6 +1944,14 @@
 S:	Maintained
 F:	drivers/net/wireless/atmel*
 
+ATMEL MAXTOUCH DRIVER
+M:	Nick Dyer <nick.dyer@itdev.co.uk>
+T:	git git://github.com/atmel-maxtouch/linux.git
+S:	Supported
+F:	Documentation/devicetree/bindings/input/atmel,maxtouch.txt
+F:	drivers/input/touchscreen/atmel_mxt_ts.c
+F:	include/linux/platform_data/atmel_mxt_ts.h
+
 ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
 M:	Bradley Grove <linuxdrivers@attotech.com>
 L:	linux-scsi@vger.kernel.org
@@ -2598,6 +2627,15 @@
 F:	Documentation/filesystems/ceph.txt
 F:	fs/ceph/
 
+CERTIFICATE HANDLING:
+M:	David Howells <dhowells@redhat.com>
+M:	David Woodhouse <dwmw2@infradead.org>
+L:	keyrings@linux-nfs.org
+S:	Maintained
+F:	Documentation/module-signing.txt
+F:	certs/
+F:	scripts/extract-cert.c
+
 CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 L:	linux-usb@vger.kernel.org
 S:	Orphan
@@ -3560,6 +3598,15 @@
 F:	include/drm/exynos*
 F:	include/uapi/drm/exynos*
 
+DRM DRIVERS FOR FREESCALE DCU
+M:	Jianwei Wang <jianwei.wang.chn@gmail.com>
+M:	Alison Wang <alison.wang@freescale.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Supported
+F:	drivers/gpu/drm/fsl-dcu/
+F:	Documentation/devicetree/bindings/video/fsl,dcu.txt
+F:	Documentation/devicetree/bindings/panel/nec,nl4827hc19_05b.txt
+
 DRM DRIVERS FOR FREESCALE IMX
 M:	Philipp Zabel <p.zabel@pengutronix.de>
 L:	dri-devel@lists.freedesktop.org
@@ -4078,15 +4125,6 @@
 F:	fs/ext2/
 F:	include/linux/ext2*
 
-EXT3 FILE SYSTEM
-M:	Jan Kara <jack@suse.com>
-M:	Andrew Morton <akpm@linux-foundation.org>
-M:	Andreas Dilger <adilger.kernel@dilger.ca>
-L:	linux-ext4@vger.kernel.org
-S:	Maintained
-F:	Documentation/filesystems/ext3.txt
-F:	fs/ext3/
-
 EXT4 FILE SYSTEM
 M:	"Theodore Ts'o" <tytso@mit.edu>
 M:	Andreas Dilger <adilger.kernel@dilger.ca>
@@ -4425,6 +4463,7 @@
 F2FS FILE SYSTEM
 M:	Jaegeuk Kim <jaegeuk@kernel.org>
 M:	Changman Lee <cm224.lee@samsung.com>
+R:	Chao Yu <chao2.yu@samsung.com>
 L:	linux-f2fs-devel@lists.sourceforge.net
 W:	http://en.wikipedia.org/wiki/F2FS
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
@@ -4433,6 +4472,7 @@
 F:	Documentation/ABI/testing/sysfs-fs-f2fs
 F:	fs/f2fs/
 F:	include/linux/f2fs_fs.h
+F:	include/trace/events/f2fs.h
 
 FUJITSU FR-V (FRV) PORT
 M:	David Howells <dhowells@redhat.com>
@@ -5787,21 +5827,20 @@
 F:	fs/jffs2/
 F:	include/uapi/linux/jffs2.h
 
-JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
-M:	Andrew Morton <akpm@linux-foundation.org>
-M:	Jan Kara <jack@suse.com>
-L:	linux-ext4@vger.kernel.org
-S:	Maintained
-F:	fs/jbd/
-F:	include/linux/jbd.h
-
 JOURNALLING LAYER FOR BLOCK DEVICES (JBD2)
 M:	"Theodore Ts'o" <tytso@mit.edu>
+M:	Jan Kara <jack@suse.com>
 L:	linux-ext4@vger.kernel.org
 S:	Maintained
 F:	fs/jbd2/
 F:	include/linux/jbd2.h
 
+JPU V4L2 MEM2MEM DRIVER FOR RENESAS
+M:	Mikhail Ulyanov <mikhail.ulyanov@cogentembedded.com>
+L:	linux-media@vger.kernel.org
+S:	Maintained
+F:	drivers/media/platform/rcar_jpu.c
+
 JSM Neo PCI based serial card
 M:	Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 L:	linux-serial@vger.kernel.org
@@ -5970,7 +6009,7 @@
 
 KEYS/KEYRINGS:
 M:	David Howells <dhowells@redhat.com>
-L:	keyrings@linux-nfs.org
+L:	keyrings@vger.kernel.org
 S:	Maintained
 F:	Documentation/security/keys.txt
 F:	include/linux/key.h
@@ -5982,7 +6021,7 @@
 M:	David Safford <safford@us.ibm.com>
 M:	Mimi Zohar <zohar@linux.vnet.ibm.com>
 L:	linux-security-module@vger.kernel.org
-L:	keyrings@linux-nfs.org
+L:	keyrings@vger.kernel.org
 S:	Supported
 F:	Documentation/security/keys-trusted-encrypted.txt
 F:	include/keys/trusted-type.h
@@ -5993,7 +6032,7 @@
 M:	Mimi Zohar <zohar@linux.vnet.ibm.com>
 M:	David Safford <safford@us.ibm.com>
 L:	linux-security-module@vger.kernel.org
-L:	keyrings@linux-nfs.org
+L:	keyrings@vger.kernel.org
 S:	Supported
 F:	Documentation/security/keys-trusted-encrypted.txt
 F:	include/keys/encrypted-type.h
@@ -6196,6 +6235,7 @@
 S:	Supported
 F:	drivers/nvdimm/pmem.c
 F:	include/linux/pmem.h
+F:	arch/*/include/asm/pmem.h
 
 LINUX FOR IBM pSERIES (RS/6000)
 M:	Paul Mackerras <paulus@au.ibm.com>
@@ -6600,6 +6640,14 @@
 F:	drivers/power/max14577_charger.c
 F:	drivers/power/max77693_charger.c
 
+MAXIM MAX77802 MULTIFUNCTION PMIC DEVICE DRIVERS
+M:	Javier Martinez Canillas <javier@osg.samsung.com>
+L:	linux-kernel@vger.kernel.org
+S:	Supported
+F:	drivers/*/*max77802.c
+F:	Documentation/devicetree/bindings/*/*max77802.txt
+F:	include/dt-bindings/*/*max77802.h
+
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:	Chanwoo Choi <cw00.choi@samsung.com>
 M:	Krzysztof Kozlowski <k.kozlowski@samsung.com>
@@ -6613,7 +6661,7 @@
 F:	drivers/rtc/rtc-max77686.c
 F:	drivers/clk/clk-max77686.c
 F:	Documentation/devicetree/bindings/mfd/max14577.txt
-F:	Documentation/devicetree/bindings/mfd/max77686.txt
+F:	Documentation/devicetree/bindings/*/max77686.txt
 F:	Documentation/devicetree/bindings/mfd/max77693.txt
 F:	Documentation/devicetree/bindings/clock/maxim,max77686.txt
 F:	include/linux/mfd/max14577*.h
@@ -6637,6 +6685,51 @@
 F:	Documentation/devicetree/bindings/media/renesas,vsp1.txt
 F:	drivers/media/platform/vsp1/
 
+MEDIA DRIVERS FOR ASCOT2E
+M:	Sergey Kozlov <serjk@netup.ru>
+L:	linux-media@vger.kernel.org
+W:	http://linuxtv.org
+W:	http://netup.tv/
+T:	git git://linuxtv.org/media_tree.git
+S:	Supported
+F:	drivers/media/dvb-frontends/ascot2e*
+
+MEDIA DRIVERS FOR CXD2841ER
+M:	Sergey Kozlov <serjk@netup.ru>
+L:	linux-media@vger.kernel.org
+W:	http://linuxtv.org/
+W:	http://netup.tv/
+T:	git git://linuxtv.org/media_tree.git
+S:	Supported
+F:	drivers/media/dvb-frontends/cxd2841er*
+
+MEDIA DRIVERS FOR HORUS3A
+M:	Sergey Kozlov <serjk@netup.ru>
+L:	linux-media@vger.kernel.org
+W:	http://linuxtv.org/
+W:	http://netup.tv/
+T:	git git://linuxtv.org/media_tree.git
+S:	Supported
+F:	drivers/media/dvb-frontends/horus3a*
+
+MEDIA DRIVERS FOR LNBH25
+M:	Sergey Kozlov <serjk@netup.ru>
+L:	linux-media@vger.kernel.org
+W:	http://linuxtv.org/
+W:	http://netup.tv/
+T:	git git://linuxtv.org/media_tree.git
+S:	Supported
+F:	drivers/media/dvb-frontends/lnbh25*
+
+MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
+M:	Sergey Kozlov <serjk@netup.ru>
+L:	linux-media@vger.kernel.org
+W:	http://linuxtv.org/
+W:	http://netup.tv/
+T:	git git://linuxtv.org/media_tree.git
+S:	Supported
+F:	drivers/media/pci/netup_unidvb/*
+
 MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
 M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 P:	LinuxTV.org Project
@@ -6760,6 +6853,12 @@
 S:	Supported
 F:	arch/microblaze/
 
+MICROSOFT SURFACE PRO 3 BUTTON DRIVER
+M:	Chen Yu <yu.c.chen@intel.com>
+L:	platform-driver-x86@vger.kernel.org
+S:	Supported
+F:	drivers/platform/x86/surfacepro3_button.c
+
 MICROTEK X6 SCANNER
 M:	Oliver Neukum <oliver@neukum.org>
 S:	Maintained
@@ -8057,7 +8156,6 @@
 
 PIN CONTROLLER - SAMSUNG
 M:	Tomasz Figa <tomasz.figa@gmail.com>
-M:	Thomas Abraham <thomas.abraham@linaro.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:	Maintained
@@ -8134,6 +8232,15 @@
 F:	drivers/power/
 X:	drivers/power/avs/
 
+POWER STATE COORDINATION INTERFACE (PSCI)
+M:	Mark Rutland <mark.rutland@arm.com>
+M:	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+L:	linux-arm-kernel@lists.infradead.org
+S:	Maintained
+F:	drivers/firmware/psci.c
+F:	include/linux/psci.h
+F:	include/uapi/linux/psci.h
+
 PNP SUPPORT
 M:	"Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 S:	Maintained
@@ -9179,6 +9286,12 @@
 S:	Supported
 F:	security/apparmor/
 
+YAMA SECURITY MODULE
+M:	Kees Cook <keescook@chromium.org>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git yama/tip
+S:	Supported
+F:	security/yama/
+
 SENSABLE PHANTOM
 M:	Jiri Slaby <jirislaby@gmail.com>
 S:	Maintained
@@ -10388,13 +10501,19 @@
 
 TOSHIBA SMM DRIVER
 M:	Jonathan Buzzard <jonathan@buzzard.org.uk>
-L:	tlinux-users@tce.toshiba-dme.co.jp
 W:	http://www.buzzard.org.uk/toshiba/
 S:	Maintained
 F:	drivers/char/toshiba.c
 F:	include/linux/toshiba.h
 F:	include/uapi/linux/toshiba.h
 
+TOSHIBA TC358743 DRIVER
+M:	Mats Randgaard <matrandg@cisco.com>
+L:	linux-media@vger.kernel.org
+S:	Maintained
+F:	drivers/media/i2c/tc358743*
+F:	include/media/tc358743.h
+
 TMIO MMC DRIVER
 M:	Ian Molton <ian@mnementh.co.uk>
 L:	linux-mmc@vger.kernel.org
diff --git a/Makefile b/Makefile
index c361593..f2d2706 100644
--- a/Makefile
+++ b/Makefile
@@ -666,14 +666,7 @@
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
-ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
-COMPILER := clang
-else
-COMPILER := gcc
-endif
-export COMPILER
-
-ifeq ($(COMPILER),clang)
+ifeq ($(cc-name),clang)
 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
 KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
@@ -875,10 +868,9 @@
 # export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
 
 ifdef CONFIG_MODULE_SIG_ALL
-MODSECKEY = ./signing_key.priv
-MODPUBKEY = ./signing_key.x509
-export MODPUBKEY
-mod_sign_cmd = perl $(srctree)/scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODSECKEY) $(MODPUBKEY)
+$(eval $(call config_filename,MODULE_SIG_KEY))
+
+mod_sign_cmd = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY) certs/signing_key.x509
 else
 mod_sign_cmd = true
 endif
@@ -886,7 +878,7 @@
 
 
 ifeq ($(KBUILD_EXTMOD),)
-core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/
+core-y		+= kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
 
 vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
 		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
@@ -1178,8 +1170,8 @@
 		  arch/*/include/generated .tmp_objdiff
 MRPROPER_FILES += .config .config.old .version .old_version \
 		  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
-		  signing_key.priv signing_key.x509 x509.genkey		\
-		  extra_certificates signing_key.x509.keyid		\
+		  signing_key.pem signing_key.priv signing_key.x509	\
+		  x509.genkey extra_certificates signing_key.x509.keyid	\
 		  signing_key.x509.signer vmlinux-gdb.py
 
 # clean - Delete most, but leave enough to build external modules
@@ -1433,6 +1425,7 @@
 		\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
 		-o -name '*.ko.*' \
 		-o -name '*.dwo'  \
+		-o -name '*.su'  \
 		-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
 		-o -name '*.symtypes' -o -name 'modules.order' \
 		-o -name modules.builtin -o -name '.tmp_*.o.*' \
diff --git a/arch/Kconfig b/arch/Kconfig
index 8a8ea71..8f35649 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -71,6 +71,12 @@
 	 ( On 32-bit x86, the necessary options added to the compiler
 	   flags may increase the size of the kernel slightly. )
 
+config STATIC_KEYS_SELFTEST
+	bool "Static key selftest"
+	depends on JUMP_LABEL
+	help
+	  Boot time self-test of the branch patching code.
+
 config OPTPROBES
 	def_bool y
 	depends on KPROBES && HAVE_OPTPROBES
@@ -87,7 +93,6 @@
 
 config UPROBES
 	def_bool n
-	select PERCPU_RWSEM
 	help
 	  Uprobes is the user-space counterpart to kprobes: they
 	  enable instrumentation applications (such as 'perf probe')
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 8f8eafb..e8c9560 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -29,13 +29,13 @@
  * branch back to restart the operation.
  */
 
-#define ATOMIC_OP(op)							\
+#define ATOMIC_OP(op, asm_op)						\
 static __inline__ void atomic_##op(int i, atomic_t * v)			\
 {									\
 	unsigned long temp;						\
 	__asm__ __volatile__(						\
 	"1:	ldl_l %0,%1\n"						\
-	"	" #op "l %0,%2,%0\n"					\
+	"	" #asm_op " %0,%2,%0\n"					\
 	"	stl_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -45,15 +45,15 @@
 	:"Ir" (i), "m" (v->counter));					\
 }									\
 
-#define ATOMIC_OP_RETURN(op)						\
+#define ATOMIC_OP_RETURN(op, asm_op)					\
 static inline int atomic_##op##_return(int i, atomic_t *v)		\
 {									\
 	long temp, result;						\
 	smp_mb();							\
 	__asm__ __volatile__(						\
 	"1:	ldl_l %0,%1\n"						\
-	"	" #op "l %0,%3,%2\n"					\
-	"	" #op "l %0,%3,%0\n"					\
+	"	" #asm_op " %0,%3,%2\n"					\
+	"	" #asm_op " %0,%3,%0\n"					\
 	"	stl_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -65,13 +65,13 @@
 	return result;							\
 }
 
-#define ATOMIC64_OP(op)							\
+#define ATOMIC64_OP(op, asm_op)						\
 static __inline__ void atomic64_##op(long i, atomic64_t * v)		\
 {									\
 	unsigned long temp;						\
 	__asm__ __volatile__(						\
 	"1:	ldq_l %0,%1\n"						\
-	"	" #op "q %0,%2,%0\n"					\
+	"	" #asm_op " %0,%2,%0\n"					\
 	"	stq_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -81,15 +81,15 @@
 	:"Ir" (i), "m" (v->counter));					\
 }									\
 
-#define ATOMIC64_OP_RETURN(op)						\
+#define ATOMIC64_OP_RETURN(op, asm_op)					\
 static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)	\
 {									\
 	long temp, result;						\
 	smp_mb();							\
 	__asm__ __volatile__(						\
 	"1:	ldq_l %0,%1\n"						\
-	"	" #op "q %0,%3,%2\n"					\
-	"	" #op "q %0,%3,%0\n"					\
+	"	" #asm_op " %0,%3,%2\n"					\
+	"	" #asm_op " %0,%3,%0\n"					\
 	"	stq_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -101,15 +101,27 @@
 	return result;							\
 }
 
-#define ATOMIC_OPS(opg)							\
-	ATOMIC_OP(opg)							\
-	ATOMIC_OP_RETURN(opg)						\
-	ATOMIC64_OP(opg)						\
-	ATOMIC64_OP_RETURN(opg)
+#define ATOMIC_OPS(op)							\
+	ATOMIC_OP(op, op##l)						\
+	ATOMIC_OP_RETURN(op, op##l)					\
+	ATOMIC64_OP(op, op##q)						\
+	ATOMIC64_OP_RETURN(op, op##q)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+#define atomic_andnot atomic_andnot
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, bis)
+ATOMIC_OP(xor, xor)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, bis)
+ATOMIC64_OP(xor, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 87d18ae..c3ecda0 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -172,9 +172,13 @@
 
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
-ATOMIC_OP(and, &=, and)
 
-#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 41cbb4a..0d1b717 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,6 +188,9 @@
 config ARCH_HAS_BANDGAP
 	bool
 
+config FIX_EARLYCON_MEM
+	def_bool y if MMU
+
 config GENERIC_HWEIGHT
 	bool
 	default y
@@ -1496,6 +1499,7 @@
 config ARM_PSCI
 	bool "Support for the ARM Power State Coordination Interface (PSCI)"
 	depends on CPU_V7
+	select ARM_PSCI_FW
 	help
 	  Say Y here if you want Linux to communicate with system firmware
 	  implementing the PSCI specification for CPU-centric power
@@ -1700,13 +1704,24 @@
 	  consumed by page tables.  Setting this option will allow
 	  user-space 2nd level page tables to reside in high memory.
 
-config HW_PERF_EVENTS
-	bool "Enable hardware performance counter support for perf events"
-	depends on PERF_EVENTS
+config CPU_SW_DOMAIN_PAN
+	bool "Enable use of CPU domains to implement privileged no-access"
+	depends on MMU && !ARM_LPAE
 	default y
 	help
-	  Enable hardware performance counter support for perf events. If
-	  disabled, perf events will use software events only.
+	  Increase kernel security by ensuring that normal kernel accesses
+	  are unable to access userspace addresses.  This can help prevent
+	  use-after-free bugs becoming an exploitable privilege escalation
+	  by ensuring that magic values (such as LIST_POISON) will always
+	  fault when dereferenced.
+
+	  CPUs with low-vector mappings use a best-efforts implementation.
+	  Their lower 1MB needs to remain accessible for the vectors, but
+	  the remainder of userspace will become appropriately inaccessible.
+
+config HW_PERF_EVENTS
+	def_bool y
+	depends on ARM_PMU
 
 config SYS_SUPPORTS_HUGETLBFS
        def_bool y
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 564900b..0447c04a 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -358,6 +358,8 @@
 			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH
 				      GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "rtc";
+			clocks = <&clk_32768_ck>;
+			clock-names = "int-clk";
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 215775d..22038f2 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -112,6 +112,13 @@
 		clock-frequency = <12000000>;
 	};
 
+	/* fixed 32k external oscillator clock */
+	clk_32k_rtc: clk_32k_rtc {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <32768>;
+	};
+
 	sound0: sound@0 {
 		compatible = "simple-audio-card";
 		simple-audio-card,name = "AM437x-GP-EVM";
@@ -941,3 +948,9 @@
 	tx-num-evt = <32>;
 	rx-num-evt = <32>;
 };
+
+&rtc {
+	clocks = <&clk_32k_rtc>, <&clk_32768_ck>;
+	clock-names = "ext-clk", "int-clk";
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index 37834427..af25801 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -110,6 +110,13 @@
 			gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
 		};
 	};
+
+	/* fixed 32k external oscillator clock */
+	clk_32k_rtc: clk_32k_rtc {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <32768>;
+	};
 };
 
 &am43xx_pinmux {
@@ -394,6 +401,8 @@
 };
 
 &rtc {
+	clocks = <&clk_32k_rtc>, <&clk_32768_ck>;
+	clock-names = "ext-clk", "int-clk";
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 22af448..7da7c2d 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -24,6 +24,13 @@
 		display0 = &lcd0;
 	};
 
+	/* fixed 32k external oscillator clock */
+	clk_32k_rtc: clk_32k_rtc {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <32768>;
+	};
+
 	backlight {
 		compatible = "pwm-backlight";
 		pwms = <&ecap0 0 50000 PWM_POLARITY_INVERTED>;
@@ -697,6 +704,8 @@
 };
 
 &rtc {
+	clocks = <&clk_32k_rtc>, <&clk_32768_ck>;
+	clock-names = "ext-clk", "int-clk";
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index c5fbde3..50f5e9d 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -294,7 +294,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <0>;
-
+			gpio-ranges = <&pinctrl 0 0 32>;
 			clocks = <&prcc_pclk 1 9>;
 		};
 
@@ -309,7 +309,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <1>;
-
+			gpio-ranges = <&pinctrl 0 32 5>;
 			clocks = <&prcc_pclk 1 9>;
 		};
 
@@ -324,7 +324,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <2>;
-
+			gpio-ranges = <&pinctrl 0 64 32>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -339,7 +339,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <3>;
-
+			gpio-ranges = <&pinctrl 0 96 2>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -354,7 +354,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <4>;
-
+			gpio-ranges = <&pinctrl 0 128 32>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -369,7 +369,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <5>;
-
+			gpio-ranges = <&pinctrl 0 160 12>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -384,7 +384,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <6>;
-
+			gpio-ranges = <&pinctrl 0 192 32>;
 			clocks = <&prcc_pclk 2 11>;
 		};
 
@@ -399,7 +399,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <7>;
-
+			gpio-ranges = <&pinctrl 0 224 7>;
 			clocks = <&prcc_pclk 2 11>;
 		};
 
@@ -414,12 +414,15 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <8>;
-
+			gpio-ranges = <&pinctrl 0 256 12>;
 			clocks = <&prcc_pclk 5 1>;
 		};
 
-		pinctrl {
+		pinctrl: pinctrl {
 			compatible = "stericsson,db8500-pinctrl";
+			nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>,
+						<&gpio4>, <&gpio5>, <&gpio6>, <&gpio7>,
+						<&gpio8>;
 			prcm = <&prcmu>;
 		};
 
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 176e332..314f59c 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -59,6 +59,7 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <0>;
+		gpio-ranges = <&pinctrl 0 0 32>;
 		clocks = <&pclk>;
 	};
 
@@ -72,6 +73,7 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <1>;
+		gpio-ranges = <&pinctrl 0 32 32>;
 		clocks = <&pclk>;
 	};
 
@@ -85,12 +87,14 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <2>;
+		gpio-ranges = <&pinctrl 0 64 32>;
 		clocks = <&pclk>;
 	};
 
 	gpio3: gpio@101e7000 {
 		compatible = "st,nomadik-gpio";
 		reg =  <0x101e7000 0x80>;
+		ngpio = <28>;
 		interrupt-parent = <&vica>;
 		interrupts = <9>;
 		interrupt-controller;
@@ -98,11 +102,13 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <3>;
+		gpio-ranges = <&pinctrl 0 96 28>;
 		clocks = <&pclk>;
 	};
 
-	pinctrl {
+	pinctrl: pinctrl {
 		compatible = "stericsson,stn8815-pinctrl";
+		nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>;
 		/* Pin configurations */
 		uart1 {
 			uart1_default_mux: uart1_mux {
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 92e54d7..2b25b60 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -65,14 +65,10 @@
 	return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
 }
 
-static int mcpm_cpu_disable(unsigned int cpu)
+static bool mcpm_cpu_can_disable(unsigned int cpu)
 {
-	/*
-	 * We assume all CPUs may be shut down.
-	 * This would be the hook to use for eventual Secure
-	 * OS migration requests as described in the PSCI spec.
-	 */
-	return 0;
+	/* We assume all CPUs may be shut down. */
+	return true;
 }
 
 static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@
 	.smp_secondary_init	= mcpm_secondary_init,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_kill		= mcpm_cpu_kill,
-	.cpu_disable		= mcpm_cpu_disable,
+	.cpu_can_disable	= mcpm_cpu_can_disable,
 	.cpu_die		= mcpm_cpu_die,
 #endif
 };
diff --git a/arch/arm/configs/cm_x2xx_defconfig b/arch/arm/configs/cm_x2xx_defconfig
index dc01c04..3b32d5f 100644
--- a/arch/arm/configs/cm_x2xx_defconfig
+++ b/arch/arm/configs/cm_x2xx_defconfig
@@ -157,7 +157,7 @@
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_V3020=y
-CONFIG_RTC_DRV_SA1100=y
+CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_INOTIFY=y
diff --git a/arch/arm/configs/em_x270_defconfig b/arch/arm/configs/em_x270_defconfig
index 4560c9c..8e10df7 100644
--- a/arch/arm/configs/em_x270_defconfig
+++ b/arch/arm/configs/em_x270_defconfig
@@ -157,7 +157,7 @@
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_V3020=y
-CONFIG_RTC_DRV_SA1100=y
+CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_INOTIFY=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 9504e77..3eaf8fb 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -124,14 +124,14 @@
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
 CONFIG_DRM=y
-CONFIG_DRM_PTN3460=y
-CONFIG_DRM_PS8622=y
+CONFIG_DRM_NXP_PTN3460=y
+CONFIG_DRM_PARADE_PS8622=y
 CONFIG_DRM_EXYNOS=y
 CONFIG_DRM_EXYNOS_FIMD=y
 CONFIG_DRM_EXYNOS_DSI=y
 CONFIG_DRM_EXYNOS_HDMI=y
 CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_PANEL_S6E8AA0=y
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
 CONFIG_FB_SIMPLE=y
 CONFIG_EXYNOS_VIDEO=y
 CONFIG_EXYNOS_MIPI_DSI=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index 557dd29..a5b4920 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -150,7 +150,7 @@
 CONFIG_LEDS_TRIGGER_BACKLIGHT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DEBUG=y
-CONFIG_RTC_DRV_SA1100=y
+CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 824a0cf..f84471d 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -440,8 +440,8 @@
 CONFIG_DRM=y
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_PTN3460=m
-CONFIG_DRM_PS8622=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_DSI=y
@@ -449,7 +449,7 @@
 CONFIG_DRM_EXYNOS_HDMI=y
 CONFIG_DRM_RCAR_DU=m
 CONFIG_DRM_TEGRA=y
-CONFIG_DRM_PANEL_S6E8AA0=m
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FB_WM8505=y
diff --git a/arch/arm/configs/palmz72_defconfig b/arch/arm/configs/palmz72_defconfig
index 4baa83c..83c135e1 100644
--- a/arch/arm/configs/palmz72_defconfig
+++ b/arch/arm/configs/palmz72_defconfig
@@ -67,7 +67,7 @@
 CONFIG_MMC_DEBUG=y
 CONFIG_MMC_PXA=y
 CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_SA1100=y
+CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_DNOTIFY is not set
diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig
index 0a847d0..b5624e3 100644
--- a/arch/arm/configs/pcm027_defconfig
+++ b/arch/arm/configs/pcm027_defconfig
@@ -82,7 +82,7 @@
 CONFIG_MMC_PXA=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PCF8563=m
-CONFIG_RTC_DRV_SA1100=m
+CONFIG_RTC_DRV_PXA=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
 # CONFIG_DNOTIFY is not set
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 932ee4e..4bc8700 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -177,7 +177,7 @@
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
 CONFIG_RTC_DRV_PCF8583=m
-CONFIG_RTC_DRV_SA1100=y
+CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 30b3bc1..be648eb 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@
 generic-y += kdebug.h
 generic-y += local.h
 generic-y += local64.h
-generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += msgbuf.h
 generic-y += param.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe572..7bbf325 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
 	.endm
 #endif
 
-	.macro asm_trace_hardirqs_off
+	.macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
+	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
+	.endif
 	bl	trace_hardirqs_off
+	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
+	.endif
 #endif
 	.endm
 
-	.macro asm_trace_hardirqs_on_cond, cond
+	.macro asm_trace_hardirqs_on, cond=al, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
 	/*
 	 * actually the registers should be pushed and pop'd conditionally, but
 	 * after bl the flags are certainly clobbered
 	 */
+	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
+	.endif
 	bl\cond	trace_hardirqs_on
+	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
+	.endif
 #endif
 	.endm
 
-	.macro asm_trace_hardirqs_on
-	asm_trace_hardirqs_on_cond al
-	.endm
-
-	.macro disable_irq
+	.macro disable_irq, save=1
 	disable_irq_notrace
-	asm_trace_hardirqs_off
+	asm_trace_hardirqs_off \save
 	.endm
 
 	.macro enable_irq
@@ -173,7 +177,7 @@
 
 	.macro restore_irqs, oldcpsr
 	tst	\oldcpsr, #PSR_I_BIT
-	asm_trace_hardirqs_on_cond eq
+	asm_trace_hardirqs_on cond=eq
 	restore_irqs_notrace \oldcpsr
 	.endm
 
@@ -445,6 +449,53 @@
 #endif
 	.endm
 
+	.macro	uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_DISABLE
+	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_ENABLE
+	mcr	p15, 0, \tmp, c3, c0, 0
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	mrc	p15, 0, \tmp, c3, c0, 0
+	str	\tmp, [sp, #S_FRAME_SIZE]
+#endif
+	.endm
+
+	.macro	uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	ldr	r0, [sp, #S_FRAME_SIZE]
+	mcr	p15, 0, r0, c3, c0, 0
+#endif
+	.endm
+
+	.macro	uaccess_save_and_disable, tmp
+	uaccess_save \tmp
+	uaccess_disable \tmp
+	.endm
+
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.macro	ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c119..fe3ef39 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -57,12 +57,11 @@
 }									\
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 {									\
 	unsigned long tmp;						\
 	int result;							\
 									\
-	smp_mb();							\
 	prefetchw(&v->counter);						\
 									\
 	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
@@ -75,17 +74,17 @@
 	: "r" (&v->counter), "Ir" (i)					\
 	: "cc");							\
 									\
-	smp_mb();							\
-									\
 	return result;							\
 }
 
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+#define atomic_add_return_relaxed	atomic_add_return_relaxed
+#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
+
+static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 {
 	int oldval;
 	unsigned long res;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	do {
@@ -99,10 +98,9 @@
 		    : "cc");
 	} while (res);
 
-	smp_mb();
-
 	return oldval;
 }
+#define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
@@ -194,6 +192,13 @@
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or,  |=, orr)
+ATOMIC_OP(xor, ^=, eor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -290,12 +295,12 @@
 }									\
 
 #define ATOMIC64_OP_RETURN(op, op1, op2)				\
-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+static inline long long							\
+atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
 {									\
 	long long result;						\
 	unsigned long tmp;						\
 									\
-	smp_mb();							\
 	prefetchw(&v->counter);						\
 									\
 	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
@@ -309,8 +314,6 @@
 	: "r" (&v->counter), "r" (i)					\
 	: "cc");							\
 									\
-	smp_mb();							\
-									\
 	return result;							\
 }
 
@@ -321,17 +324,26 @@
 ATOMIC64_OPS(add, adds, adc)
 ATOMIC64_OPS(sub, subs, sbc)
 
+#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
+
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OP(and, and, and)
+ATOMIC64_OP(andnot, bic, bic)
+ATOMIC64_OP(or,  orr, orr)
+ATOMIC64_OP(xor, eor, eor)
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
-					long long new)
+static inline long long
+atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
 {
 	long long oldval;
 	unsigned long res;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	do {
@@ -346,17 +358,15 @@
 		: "cc");
 	} while (res);
 
-	smp_mb();
-
 	return oldval;
 }
+#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
 
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
 {
 	long long result;
 	unsigned long tmp;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	__asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@
 	: "r" (&ptr->counter), "r" (new)
 	: "cc");
 
-	smp_mb();
-
 	return result;
 }
+#define atomic64_xchg_relaxed		atomic64_xchg_relaxed
 
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 6c2327e..3ff5642 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
 #define __ASM_BARRIER_H
 
 #ifndef __ASSEMBLY__
-#include <asm/outercache.h>
 
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
@@ -37,12 +36,20 @@
 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
 #ifdef CONFIG_ARCH_HAS_BARRIERS
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb()		do { dsb(); outer_sync(); } while (0)
+#define mb()		__arm_heavy_mb()
 #define rmb()		dsb()
-#define wmb()		do { dsb(st); outer_sync(); } while (0)
+#define wmb()		__arm_heavy_mb(st)
 #define dma_rmb()	dmb(osh)
 #define dma_wmb()	dmb(oshst)
 #else
@@ -67,12 +74,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 5638099..e943e6c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p |= mask;
@@ -47,9 +47,9 @@
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p &= ~mask;
@@ -59,9 +59,9 @@
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p ^= mask;
@@ -73,9 +73,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
@@ -90,9 +90,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
@@ -107,9 +107,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda..d5525bf 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-#define dmac_map_area			cpu_cache.dma_map_area
-#define dmac_unmap_area			cpu_cache.dma_unmap_area
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -161,8 +159,6 @@
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
@@ -506,4 +502,21 @@
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
 			     void *kaddr, unsigned long len);
 
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side.  This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+	phys_addr_t phys = __pa(addr);
+
+	__cpuc_flush_dcache_area((void *)addr, size);
+	outer_flush_range(phys, phys + size);
+}
+
 #endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 1692a05..916a274 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -35,7 +35,6 @@
 	unsigned int tmp;
 #endif
 
-	smp_mb();
 	prefetchw((const void *)ptr);
 
 	switch (size) {
@@ -98,12 +97,11 @@
 		__bad_xchg(ptr, size), ret = 0;
 		break;
 	}
-	smp_mb();
 
 	return ret;
 }
 
-#define xchg(ptr, x) ({							\
+#define xchg_relaxed(ptr, x) ({						\
 	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\
 				   sizeof(*(ptr)));			\
 })
@@ -117,6 +115,8 @@
 #error "SMP is not supported on this platform"
 #endif
 
+#define xchg xchg_relaxed
+
 /*
  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  * them available.
@@ -194,23 +194,11 @@
 	return oldval;
 }
 
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-					 unsigned long new, int size)
-{
-	unsigned long ret;
-
-	smp_mb();
-	ret = __cmpxchg(ptr, old, new, size);
-	smp_mb();
-
-	return ret;
-}
-
-#define cmpxchg(ptr,o,n) ({						\
-	(__typeof__(*(ptr)))__cmpxchg_mb((ptr),				\
-					 (unsigned long)(o),		\
-					 (unsigned long)(n),		\
-					 sizeof(*(ptr)));		\
+#define cmpxchg_relaxed(ptr,o,n) ({					\
+	(__typeof__(*(ptr)))__cmpxchg((ptr),				\
+				      (unsigned long)(o),		\
+				      (unsigned long)(n),		\
+				      sizeof(*(ptr)));			\
 })
 
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@
 
 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
 
-static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
-						unsigned long long old,
-						unsigned long long new)
-{
-	unsigned long long ret;
-
-	smp_mb();
-	ret = __cmpxchg64(ptr, old, new);
-	smp_mb();
-
-	return ret;
-}
-
-#define cmpxchg64(ptr, o, n) ({						\
-	(__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
-					   (unsigned long long)(o),	\
-					   (unsigned long long)(n));	\
-})
-
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 
 #endif /* __ASM_ARM_CMPXCHG_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d..a68b9d8 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 
-#define DMA_ERROR_CODE	(~0)
+#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe44..e878129 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
  */
 #ifndef CONFIG_IO_36
 #define DOMAIN_KERNEL	0
-#define DOMAIN_TABLE	0
 #define DOMAIN_USER	1
 #define DOMAIN_IO	2
 #else
 #define DOMAIN_KERNEL	2
-#define DOMAIN_TABLE	2
 #define DOMAIN_USER	1
 #define DOMAIN_IO	0
 #endif
+#define DOMAIN_VECTORS	3
 
 /*
  * Domain types
@@ -55,11 +54,46 @@
 #define DOMAIN_MANAGER	1
 #endif
 
-#define domain_val(dom,type)	((type) << (2*(dom)))
+#define domain_mask(dom)	((3) << (2 * (dom)))
+#define domain_val(dom,type)	((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+	domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+	unsigned int domain;
+
+	asm(
+	"mrc	p15, 0, %0, c3, c0	@ get domain"
+	 : "=r" (domain));
+
+	return domain;
+}
+
 static inline void set_domain(unsigned val)
 {
 	asm volatile(
@@ -68,17 +102,16 @@
 	isb();
 }
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)					\
 	do {							\
-	struct thread_info *thread = current_thread_info();	\
-	unsigned int domain = thread->cpu_domain;		\
-	domain &= ~domain_val(dom, DOMAIN_MANAGER);		\
-	thread->cpu_domain = domain | domain_val(dom, type);	\
-	set_domain(thread->cpu_domain);				\
+		unsigned int domain = get_domain();		\
+		domain &= ~domain_mask(dom);			\
+		domain = domain | domain_val(dom, type);	\
+		set_domain(domain);				\
 	} while (0)
 
 #else
-static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
 #endif
 
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae..58cfe9f 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
 #define FIXADDR_TOP		(FIXADDR_END - PAGE_SIZE)
 
 #include <asm/kmap_types.h>
+#include <asm/pgtable.h>
 
 enum fixed_addresses {
-	FIX_KMAP_BEGIN,
+	FIX_EARLYCON_MEM_BASE,
+	__end_of_permanent_fixed_addresses,
+
+	FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
 	FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 
 	/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@
 	__end_of_fixed_addresses
 };
 
+#define FIXMAP_PAGE_COMMON	(L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL	(FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO		(FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE	FIXMAP_PAGE_IO
+
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
 
 #include <asm-generic/fixmap.h>
 
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed828..6795368 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
 #ifdef CONFIG_SMP
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags;				\
 	smp_mb();						\
 	prefetchw(uaddr);					\
+	__ua_flags = uaccess_save_and_enable();			\
 	__asm__ __volatile__(					\
 	"1:	ldrex	%1, [%3]\n"				\
 	"	" insn "\n"					\
@@ -34,12 +37,15 @@
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret;
 	u32 val;
 
@@ -49,6 +55,7 @@
 	smp_mb();
 	/* Prefetching cannot fault */
 	prefetchw(uaddr);
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	ldrex	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -61,6 +68,7 @@
 	: "=&r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 	smp_mb();
 
 	*uval = val;
@@ -73,6 +81,8 @@
 #include <asm/domain.h>
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags = uaccess_save_and_enable();	\
 	__asm__ __volatile__(					\
 	"1:	" TUSER(ldr) "	%1, [%3]\n"			\
 	"	" insn "\n"					\
@@ -81,12 +91,15 @@
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret = 0;
 	u32 val;
 
@@ -94,6 +107,7 @@
 		return -EFAULT;
 
 	preempt_disable();
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -103,6 +117,7 @@
 	: "+r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 
 	*uval = val;
 	preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd..cab07f6 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@
 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 
-#define dmac_map_area			__glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area			__glue(_CACHE,_dma_unmap_area)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #endif
 
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 53c15de..be1d07d 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -35,6 +35,11 @@
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 #endif
 
+#ifdef CONFIG_SMP
+extern void arch_trigger_all_cpu_backtrace(bool);
+#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
+#endif
+
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5f337dc..34f7b69 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,23 +4,32 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
+#include <asm/unified.h>
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-#ifdef CONFIG_THUMB2_KERNEL
-#define JUMP_LABEL_NOP	"nop.w"
-#else
-#define JUMP_LABEL_NOP	"nop"
-#endif
-
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
-		 JUMP_LABEL_NOP "\n\t"
+		 WASM(nop) "\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".word 1b, %l[l_yes], %c0\n\t"
 		 ".popsection\n\t"
-		 : :  "i" (key) :  : l_yes);
+		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 WASM(b) " %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection\n\t"
+		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
 
 	return false;
 l_yes:
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index b7f6fb4..98d58bb 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -119,12 +119,6 @@
 #endif
 
 /*
- * Convert a physical address to a Page Frame Number and back
- */
-#define	__phys_to_pfn(paddr)	((unsigned long)((paddr) >> PAGE_SHIFT))
-#define	__pfn_to_phys(pfn)	((phys_addr_t)(pfn) << PAGE_SHIFT)
-
-/*
  * Convert a page to/from a physical address
  */
 #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92f..c2bf24f 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@
 
 #endif
 
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
-	if (outer_cache.sync)
-		outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
 #endif	/* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278..d0131ee 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
 #define PMD_PXNTABLE		(_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK		PMD_DOMAIN(0x0f)
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
 /*
  *   - section
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
deleted file mode 100644
index 3fc87df..0000000
--- a/arch/arm/include/asm/pmu.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *  linux/arch/arm/include/asm/pmu.h
- *
- *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __ARM_PMU_H__
-#define __ARM_PMU_H__
-
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-
-#include <asm/cputype.h>
-
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- *	interrupt and passed the address of the low level handler,
- *	and can be used to implement any platform specific handling
- *	before or after calling it.
- */
-struct arm_pmu_platdata {
-	irqreturn_t (*handle_irq)(int irq, void *dev,
-				  irq_handler_t pmu_handler);
-};
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
- */
-#define ARMPMU_MAX_HWEVENTS		32
-
-#define HW_OP_UNSUPPORTED		0xFFFF
-#define C(_x)				PERF_COUNT_HW_CACHE_##_x
-#define CACHE_OP_UNSUPPORTED		0xFFFF
-
-#define PERF_MAP_ALL_UNSUPPORTED					\
-	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
-
-#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
-[0 ... C(MAX) - 1] = {							\
-	[0 ... C(OP_MAX) - 1] = {					\
-		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
-	},								\
-}
-
-/* The events for a given PMU register set. */
-struct pmu_hw_events {
-	/*
-	 * The events that are active on the PMU for the given index.
-	 */
-	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
-
-	/*
-	 * A 1 bit for an index indicates that the counter is being used for
-	 * an event. A 0 means that the counter can be used.
-	 */
-	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
-
-	/*
-	 * Hardware lock to serialize accesses to PMU registers. Needed for the
-	 * read/modify/write sequences.
-	 */
-	raw_spinlock_t		pmu_lock;
-
-	/*
-	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
-	 * already have to allocate this struct per cpu.
-	 */
-	struct arm_pmu		*percpu_pmu;
-};
-
-struct arm_pmu {
-	struct pmu	pmu;
-	cpumask_t	active_irqs;
-	cpumask_t	supported_cpus;
-	int		*irq_affinity;
-	char		*name;
-	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
-	void		(*enable)(struct perf_event *event);
-	void		(*disable)(struct perf_event *event);
-	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
-					 struct perf_event *event);
-	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
-					 struct perf_event *event);
-	int		(*set_event_filter)(struct hw_perf_event *evt,
-					    struct perf_event_attr *attr);
-	u32		(*read_counter)(struct perf_event *event);
-	void		(*write_counter)(struct perf_event *event, u32 val);
-	void		(*start)(struct arm_pmu *);
-	void		(*stop)(struct arm_pmu *);
-	void		(*reset)(void *);
-	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
-	void		(*free_irq)(struct arm_pmu *);
-	int		(*map_event)(struct perf_event *event);
-	int		num_events;
-	atomic_t	active_events;
-	struct mutex	reserve_mutex;
-	u64		max_period;
-	struct platform_device	*plat_device;
-	struct pmu_hw_events	__percpu *hw_events;
-	struct notifier_block	hotplug_nb;
-};
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-int armpmu_register(struct arm_pmu *armpmu, int type);
-
-u64 armpmu_event_update(struct perf_event *event);
-
-int armpmu_event_set_period(struct perf_event *event);
-
-int armpmu_map_event(struct perf_event *event,
-		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
-		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
-						[PERF_COUNT_HW_CACHE_OP_MAX]
-						[PERF_COUNT_HW_CACHE_RESULT_MAX],
-		     u32 raw_event_mask);
-
-struct pmu_probe_info {
-	unsigned int cpuid;
-	unsigned int mask;
-	int (*init)(struct arm_pmu *);
-};
-
-#define PMU_PROBE(_cpuid, _mask, _fn)	\
-{					\
-	.cpuid = (_cpuid),		\
-	.mask = (_mask),		\
-	.init = (_fn),			\
-}
-
-#define ARM_PMU_PROBE(_cpuid, _fn) \
-	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
-
-#define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
-
-#define XSCALE_PMU_PROBE(_version, _fn) \
-	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
-
-int arm_pmu_device_probe(struct platform_device *pdev,
-			 const struct of_device_id *of_table,
-			 const struct pmu_probe_info *probe_table);
-
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c25ef3e..68ee3ce 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -14,34 +14,11 @@
 #ifndef __ASM_ARM_PSCI_H
 #define __ASM_ARM_PSCI_H
 
-#define PSCI_POWER_STATE_TYPE_STANDBY		0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
-
-struct psci_power_state {
-	u16	id;
-	u8	type;
-	u8	affinity_level;
-};
-
-struct psci_operations {
-	int (*cpu_suspend)(struct psci_power_state state,
-			   unsigned long entry_point);
-	int (*cpu_off)(struct psci_power_state state);
-	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
-	int (*migrate)(unsigned long cpuid);
-	int (*affinity_info)(unsigned long target_affinity,
-			unsigned long lowest_affinity_level);
-	int (*migrate_info_type)(void);
-};
-
-extern struct psci_operations psci_ops;
 extern struct smp_operations psci_smp_ops;
 
 #ifdef CONFIG_ARM_PSCI
-int psci_init(void);
 bool psci_smp_available(void);
 #else
-static inline int psci_init(void) { return 0; }
 static inline bool psci_smp_available(void) { return false; }
 #endif
 
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1b..ef35665 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@
 #ifdef CONFIG_HOTPLUG_CPU
 	int  (*cpu_kill)(unsigned int cpu);
 	void (*cpu_die)(unsigned int cpu);
+	bool  (*cpu_can_disable)(unsigned int cpu);
 	int  (*cpu_disable)(unsigned int cpu);
 #endif
 #endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e522..f908071 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@
 extern int platform_can_secondary_boot(void);
 extern int platform_can_cpu_hotplug(void);
 
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	return 0;
+}
+#endif
+
 #endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32ede..d0a1119 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@
 	.flags		= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
-	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
 }
 
 #define init_thread_info	(init_thread_union.thread_info)
@@ -136,22 +133,18 @@
 
 /*
  * thread information flags:
- *  TIF_SYSCALL_TRACE	- syscall trace active
- *  TIF_SYSCAL_AUDIT	- syscall auditing active
- *  TIF_SIGPENDING	- signal pending
- *  TIF_NEED_RESCHED	- rescheduling necessary
- *  TIF_NOTIFY_RESUME	- callback before returning to user
  *  TIF_USEDFPU		- FPU was used by this task this quantum (SMP)
  *  TIF_POLLING_NRFLAG	- true if poll_idle() is polling TIF_NEED_RESCHED
  */
-#define TIF_SIGPENDING		0
-#define TIF_NEED_RESCHED	1
+#define TIF_SIGPENDING		0	/* signal pending */
+#define TIF_NEED_RESCHED	1	/* rescheduling necessary */
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
-#define TIF_UPROBE		7
-#define TIF_SYSCALL_TRACE	8
-#define TIF_SYSCALL_AUDIT	9
-#define TIF_SYSCALL_TRACEPOINT	10
-#define TIF_SECCOMP		11	/* seccomp syscall filtering active */
+#define TIF_UPROBE		3	/* breakpointed or singlestepping */
+#define TIF_SYSCALL_TRACE	4	/* syscall trace active */
+#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
+#define TIF_SECCOMP		7	/* seccomp syscall filtering active */
+
 #define TIF_NOHZ		12	/* in adaptive nohz mode */
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d0..8cc85a4 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@
 extern int fixup_exception(struct pt_regs *regs);
 
 /*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	unsigned int old_domain = get_domain();
+
+	/* Set the current domain access to permit user accesses */
+	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+	return old_domain;
+#else
+	return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/* Restore the user access mask */
+	set_domain(flags);
+#endif
+}
+
+/*
  * These two are intentionally not defined anywhere - if the kernel
  * code generates any references to them, that's a bug.
  */
@@ -165,6 +194,7 @@
 		register typeof(x) __r2 asm("r2");			\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			if (sizeof((x)) >= 8)				\
@@ -192,6 +222,7 @@
 			break;						\
 		default: __e = __get_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		x = (typeof(*(p))) __r2;				\
 		__e;							\
 	})
@@ -224,6 +255,7 @@
 		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			__put_user_x(__r2, __p, __e, __l, 1);		\
@@ -239,6 +271,7 @@
 			break;						\
 		default: __e = __put_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		__e;							\
 	})
 
@@ -300,20 +333,23 @@
 do {									\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_val;						\
+	unsigned int __ua_flags;					\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
 	default: (__gu_val) = __get_user_bad();				\
 	}								\
+	uaccess_restore(__ua_flags);					\
 	(x) = (__typeof__(*(ptr)))__gu_val;				\
 } while (0)
 
-#define __get_user_asm_byte(x, addr, err)			\
+#define __get_user_asm(x, addr, err, instr)			\
 	__asm__ __volatile__(					\
-	"1:	" TUSER(ldrb) "	%1,[%2],#0\n"			\
+	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
 	"2:\n"							\
 	"	.pushsection .text.fixup,\"ax\"\n"		\
 	"	.align	2\n"					\
@@ -329,6 +365,9 @@
 	: "r" (addr), "i" (-EFAULT)				\
 	: "cc")
 
+#define __get_user_asm_byte(x, addr, err)			\
+	__get_user_asm(x, addr, err, ldrb)
+
 #ifndef __ARMEB__
 #define __get_user_asm_half(x, __gu_addr, err)			\
 ({								\
@@ -348,22 +387,7 @@
 #endif
 
 #define __get_user_asm_word(x, addr, err)			\
-	__asm__ __volatile__(					\
-	"1:	" TUSER(ldr) "	%1,[%2],#0\n"			\
-	"2:\n"							\
-	"	.pushsection .text.fixup,\"ax\"\n"		\
-	"	.align	2\n"					\
-	"3:	mov	%0, %3\n"				\
-	"	mov	%1, #0\n"				\
-	"	b	2b\n"					\
-	"	.popsection\n"					\
-	"	.pushsection __ex_table,\"a\"\n"		\
-	"	.align	3\n"					\
-	"	.long	1b, 3b\n"				\
-	"	.popsection"					\
-	: "+r" (err), "=&r" (x)					\
-	: "r" (addr), "i" (-EFAULT)				\
-	: "cc")
+	__get_user_asm(x, addr, err, ldr)
 
 #define __put_user(x, ptr)						\
 ({									\
@@ -381,9 +405,11 @@
 #define __put_user_err(x, ptr, err)					\
 do {									\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
+	unsigned int __ua_flags;					\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
 	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
@@ -391,11 +417,12 @@
 	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
 	default: __put_user_bad();					\
 	}								\
+	uaccess_restore(__ua_flags);					\
 } while (0)
 
-#define __put_user_asm_byte(x, __pu_addr, err)			\
+#define __put_user_asm(x, __pu_addr, err, instr)		\
 	__asm__ __volatile__(					\
-	"1:	" TUSER(strb) "	%1,[%2],#0\n"			\
+	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
 	"2:\n"							\
 	"	.pushsection .text.fixup,\"ax\"\n"		\
 	"	.align	2\n"					\
@@ -410,6 +437,9 @@
 	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
 	: "cc")
 
+#define __put_user_asm_byte(x, __pu_addr, err)			\
+	__put_user_asm(x, __pu_addr, err, strb)
+
 #ifndef __ARMEB__
 #define __put_user_asm_half(x, __pu_addr, err)			\
 ({								\
@@ -427,21 +457,7 @@
 #endif
 
 #define __put_user_asm_word(x, __pu_addr, err)			\
-	__asm__ __volatile__(					\
-	"1:	" TUSER(str) "	%1,[%2],#0\n"			\
-	"2:\n"							\
-	"	.pushsection .text.fixup,\"ax\"\n"		\
-	"	.align	2\n"					\
-	"3:	mov	%0, %3\n"				\
-	"	b	2b\n"					\
-	"	.popsection\n"					\
-	"	.pushsection __ex_table,\"a\"\n"		\
-	"	.align	3\n"					\
-	"	.long	1b, 3b\n"				\
-	"	.popsection"					\
-	: "+r" (err)						\
-	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
-	: "cc")
+	__put_user_asm(x, __pu_addr, err, str)
 
 #ifndef __ARMEB__
 #define	__reg_oper0	"%R2"
@@ -474,11 +490,46 @@
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_from_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_to_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_clear_user(addr, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
 #else
 #define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@
 	return n;
 }
 
+/* These are from lib/ code, and use __get_user() and friends */
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 8b1f37b..71e473d 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -20,4 +20,10 @@
 							    atomic64_t,	\
 							    counter), (val))
 
+/* Rebind event channel is supported by default */
+static inline bool xen_support_evtchn_rebind(void)
+{
+	return true;
+}
+
 #endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 1bee8ca..98b1084 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -54,26 +54,14 @@
 
 #define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
 
-static inline xmaddr_t phys_to_machine(xpaddr_t phys)
-{
-	unsigned offset = phys.paddr & ~PAGE_MASK;
-	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
-}
-
-static inline xpaddr_t machine_to_phys(xmaddr_t machine)
-{
-	unsigned offset = machine.maddr & ~PAGE_MASK;
-	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
-}
 /* VIRT <-> MACHINE conversion */
-#define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
 #define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
 #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
 
+/* Only used in PV code. But ARM guests are always HVM. */
 static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
 {
-	/* TODO: assuming it is mapped in the kernel 1:1 */
-	return virt_to_machine(vaddr);
+	BUG();
 }
 
 /* TODO: this shouldn't be here but it is because the frontend drivers
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e69f7a1..af9e59b 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -71,8 +71,7 @@
 obj-$(CONFIG_CPU_PJ4B)		+= pj4-cp0.o
 obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
 obj-$(CONFIG_PERF_EVENTS)	+= perf_regs.o perf_callchain.o
-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o \
-				   perf_event_xscale.o perf_event_v6.o \
+obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_xscale.o perf_event_v6.o \
 				   perf_event_v7.o
 CFLAGS_pj4-cp0.o		:= -marm
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
@@ -89,7 +88,7 @@
 
 obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o
 ifeq ($(CONFIG_ARM_PSCI),y)
-obj-y				+= psci.o psci-call.o
+obj-y				+= psci-call.o
 obj-$(CONFIG_SMP)		+= psci_smp.o
 endif
 
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5e5a51a..f89811f 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -97,9 +97,9 @@
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb4fb1e..3e1c26e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@
 #define SPFIX(code...)
 #endif
 
-	.macro	svc_entry, stack_hole=0, trace=1
+	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart		)
  UNWIND(.save {r0 - pc}		)
-	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	mov	r0, sp		)
@@ -167,7 +167,7 @@
 	ldmia	r0, {r3 - r5}
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	mov	r6, #-1			@  ""  ""      ""       ""
-	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
  SPFIX(	addeq	r2, r2, #4	)
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 					@ from the exception stack
@@ -185,6 +185,11 @@
 	@
 	stmia	r7, {r2 - r6}
 
+	uaccess_save r0
+	.if \uaccess
+	uaccess_disable r0
+	.endif
+
 	.if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
@@ -194,7 +199,7 @@
 
 	.align	5
 __dabt_svc:
-	svc_entry
+	svc_entry uaccess=0
 	mov	r2, sp
 	dabt_helper
  THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
@@ -368,7 +373,7 @@
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 
-	.macro	usr_entry, trace=1
+	.macro	usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)	@ don't unwind the user space
 	sub	sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@
  ARM(	stmdb	r0, {sp, lr}^			)
  THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 
+	.if \uaccess
+	uaccess_disable ip
+	.endif
+
 	@ Enable the alignment trap while in kernel mode
  ATRAP(	teq	r8, r7)
  ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@
 
 	.align	5
 __dabt_usr:
-	usr_entry
+	usr_entry uaccess=0
 	kuser_cmpxchg_check
 	mov	r2, sp
 	dabt_helper
@@ -458,7 +467,7 @@
 
 	.align	5
 __und_usr:
-	usr_entry
+	usr_entry uaccess=0
 
 	mov	r2, r4
 	mov	r3, r5
@@ -484,6 +493,8 @@
 1:	ldrt	r0, [r4]
  ARM_BE8(rev	r0, r0)				@ little endian instruction
 
+	uaccess_disable ip
+
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@
 2:	ldrht	r5, [r4]
 ARM_BE8(rev16	r5, r5)				@ little endian instruction
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
-	blo	__und_usr_fault_16		@ 16bit undefined instruction
+	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
 3:	ldrht	r0, [r2]
 ARM_BE8(rev16	r0, r0)				@ little endian instruction
+	uaccess_disable ip
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	orr	r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@
 __und_usr_fault_32:
 	mov	r1, #4
 	b	1f
+__und_usr_fault_16_pan:
+	uaccess_disable ip
 __und_usr_fault_16:
 	mov	r1, #2
 1:	mov	r0, sp
@@ -770,6 +784,8 @@
 	ldr	r4, [r2, #TI_TP_VALUE]
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
+	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
+	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
 #endif
 	switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b48dd4f..30a7228 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
 
 
 	.align	5
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
 /*
- * This is the fast syscall return path.  We do as little as
- * possible here, and this includes saving r0 back into the SVC
- * stack.
+ * This is the fast syscall return path.  We do as little as possible here,
+ * such as avoiding writing r0 to the stack.  We only use this path if we
+ * have tracing and context tracking disabled - the overheads from those
+ * features make this path too inefficient.
  */
 ret_fast_syscall:
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)
-	disable_irq				@ disable interrupts
+	disable_irq_notrace			@ disable interrupts
 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
-	tst	r1, #_TIF_SYSCALL_WORK
-	bne	__sys_trace_return
-	tst	r1, #_TIF_WORK_MASK
+	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 	bne	fast_work_pending
-	asm_trace_hardirqs_on
 
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
-	ct_user_enter
 
 	restore_user_regs fast = 1, offset = S_OFF
  UNWIND(.fnend		)
+ENDPROC(ret_fast_syscall)
 
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
+	/* Ok, we need to do extra processing, enter the slow path. */
 fast_work_pending:
 	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
-work_pending:
+	/* fall through to work_pending */
+#else
+/*
+ * The "replacement" ret_fast_syscall for when tracing or context tracking
+ * is enabled.  As we will need to call out to some C functions, we save
+ * r0 first to avoid needing to save registers around each C function call.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
+	disable_irq_notrace			@ disable interrupts
+	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
+	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+	beq	no_work_pending
+ UNWIND(.fnend		)
+ENDPROC(ret_fast_syscall)
+
+	/* Slower path - fall through to work_pending */
+#endif
+
+	tst	r1, #_TIF_SYSCALL_WORK
+	bne	__sys_trace_return_nosave
+slow_work_pending:
 	mov	r0, sp				@ 'regs'
 	mov	r2, why				@ 'syscall'
 	bl	do_work_pending
@@ -65,16 +85,19 @@
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
+ * IRQs may be enabled here, so always disable them.  Note that we use the
+ * "notrace" version to avoid calling into the tracing code unnecessarily.
+ * do_work_pending() will update this state if necessary.
  */
 ENTRY(ret_to_user)
 ret_slow_syscall:
-	disable_irq				@ disable interrupts
+	disable_irq_notrace			@ disable interrupts
 ENTRY(ret_to_user_from_irq)
 	ldr	r1, [tsk, #TI_FLAGS]
 	tst	r1, #_TIF_WORK_MASK
-	bne	work_pending
+	bne	slow_work_pending
 no_work_pending:
-	asm_trace_hardirqs_on
+	asm_trace_hardirqs_on save = 0
 
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
@@ -174,6 +197,8 @@
  USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
 #endif
 
+	uaccess_disable tbl
+
 	adr	tbl, sys_call_table		@ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
@@ -252,6 +277,12 @@
 	bl	syscall_trace_exit
 	b	ret_slow_syscall
 
+__sys_trace_return_nosave:
+	enable_irq_notrace
+	mov	r0, sp
+	bl	syscall_trace_exit
+	b	ret_slow_syscall
+
 	.align	5
 #ifdef CONFIG_ALIGNMENT_TRAP
 	.type	__cr_alignment, #object
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045a..0d22ad2 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
 	.endm
 
-#ifndef CONFIG_THUMB2_KERNEL
+
 	.macro	svc_exit, rpsr, irq = 0
 	.if	\irq != 0
 	@ IRQs already off
@@ -215,6 +215,10 @@
 	blne	trace_hardirqs_off
 #endif
 	.endif
+	uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode SVC restore
 	msr	spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
 	strex	r1, r2, [r0]			@ clear the exclusive monitor
 #endif
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+#else
+	@ Thumb mode SVC restore
+	ldr	lr, [sp, #S_SP]			@ top of the stack
+	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
+
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
+
+	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
+	ldmia	sp, {r0 - r12}
+	mov	sp, lr
+	ldr	lr, [sp], #4
+	rfeia	sp!
+#endif
 	.endm
 
 	@
@@ -241,6 +259,9 @@
 	@ on the stack remains correct).
 	@
 	.macro  svc_exit_via_fiq
+	uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r0, sp
 	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
 				@ clobber state restored below)
@@ -250,9 +271,27 @@
 	msr	spsr_cxsf, r9
 	ldr	r0, [r0, #S_R0]
 	ldmia	r8, {pc}^
+#else
+	@ Thumb mode restore
+	add	r0, sp, #S_R2
+	ldr	lr, [sp, #S_LR]
+	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+			        @ clobber state restored below)
+	ldmia	r0, {r2 - r12}
+	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+	msr	cpsr_c, r1
+	sub	r0, #S_R2
+	add	r8, r0, #S_PC
+	ldmia	r0, {r0 - r1}
+	rfeia	r8
+#endif
 	.endm
 
+
 	.macro	restore_user_regs, fast = 0, offset = 0
+	uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
@@ -270,72 +309,16 @@
 						@ after ldm {}^
 	add	sp, sp, #\offset + S_FRAME_SIZE
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-
-#else	/* CONFIG_THUMB2_KERNEL */
-	.macro	svc_exit, rpsr, irq = 0
-	.if	\irq != 0
-	@ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
-	@ The parent context IRQs must have been enabled to get here in
-	@ the first place, so there's no point checking the PSR I bit.
-	bl	trace_hardirqs_on
-#endif
-	.else
-	@ IRQs off again before pulling preserved data off the stack
-	disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
-	tst	\rpsr, #PSR_I_BIT
-	bleq	trace_hardirqs_on
-	tst	\rpsr, #PSR_I_BIT
-	blne	trace_hardirqs_off
-#endif
-	.endif
-	ldr	lr, [sp, #S_SP]			@ top of the stack
-	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
-
-	@ We must avoid clrex due to Cortex-A15 erratum #830321
-	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
-
-	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
-	ldmia	sp, {r0 - r12}
-	mov	sp, lr
-	ldr	lr, [sp], #4
-	rfeia	sp!
-	.endm
-
-	@
-	@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
-	@
-	@ For full details see non-Thumb implementation above.
-	@
-	.macro  svc_exit_via_fiq
-	add	r0, sp, #S_R2
-	ldr	lr, [sp, #S_LR]
-	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
-			        @ clobber state restored below)
-	ldmia	r0, {r2 - r12}
-	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
-	msr	cpsr_c, r1
-	sub	r0, #S_R2
-	add	r8, r0, #S_PC
-	ldmia	r0, {r0 - r1}
-	rfeia	r8
-	.endm
-
-#ifdef CONFIG_CPU_V7M
-	/*
-	 * Note we don't need to do clrex here as clearing the local monitor is
-	 * part of each exception entry and exit sequence.
-	 */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+	@ V7M restore.
+	@ Note that we don't need to do clrex here as clearing the local
+	@ monitor is part of the exception entry and exit sequence.
 	.if	\offset
 	add	sp, #\offset
 	.endif
 	v7m_exception_slow_exit ret_r0 = \fast
-	.endm
-#else	/* ifdef CONFIG_CPU_V7M */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#else
+	@ Thumb mode restore
 	mov	r2, sp
 	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
@@ -353,9 +336,8 @@
 	.endif
 	add	sp, sp, #S_FRAME_SIZE - S_SP
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-#endif	/* ifdef CONFIG_CPU_V7M / else */
 #endif	/* !CONFIG_THUMB2_KERNEL */
+	.endm
 
 /*
  * Context tracking subsystem.  Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 29e2991..04286fd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -464,10 +464,7 @@
 #ifdef CONFIG_ARM_LPAE
 	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
 #else
-	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+	mov	r5, #DACR_INIT
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
 #endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index baf8ede..5ff4826 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -39,6 +39,7 @@
 #include <linux/export.h>
 
 #include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
 #include <asm/exception.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index e39cbf4..845a5dd 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -12,7 +12,7 @@
 	void *addr = (void *)entry->code;
 	unsigned int insn;
 
-	if (type == JUMP_LABEL_ENABLE)
+	if (type == JUMP_LABEL_JMP)
 		insn = arm_gen_branch(entry->code, entry->target);
 	else
 		insn = arm_gen_nop();
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
deleted file mode 100644
index 54272e0..0000000
--- a/arch/arm/kernel/perf_event.c
+++ /dev/null
@@ -1,896 +0,0 @@
-#undef DEBUG
-
-/*
- * ARM performance counter support.
- *
- * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
- * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
- *
- * This code is based on the sparc64 perf event code, which is in turn based
- * on the x86 code.
- */
-#define pr_fmt(fmt) "hw perfevents: " fmt
-
-#include <linux/bitmap.h>
-#include <linux/cpumask.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/irqdesc.h>
-
-#include <asm/cputype.h>
-#include <asm/irq_regs.h>
-#include <asm/pmu.h>
-
-static int
-armpmu_map_cache_event(const unsigned (*cache_map)
-				      [PERF_COUNT_HW_CACHE_MAX]
-				      [PERF_COUNT_HW_CACHE_OP_MAX]
-				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
-		       u64 config)
-{
-	unsigned int cache_type, cache_op, cache_result, ret;
-
-	cache_type = (config >>  0) & 0xff;
-	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
-		return -EINVAL;
-
-	cache_op = (config >>  8) & 0xff;
-	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
-		return -EINVAL;
-
-	cache_result = (config >> 16) & 0xff;
-	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
-		return -EINVAL;
-
-	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
-
-	if (ret == CACHE_OP_UNSUPPORTED)
-		return -ENOENT;
-
-	return ret;
-}
-
-static int
-armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
-{
-	int mapping;
-
-	if (config >= PERF_COUNT_HW_MAX)
-		return -EINVAL;
-
-	mapping = (*event_map)[config];
-	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
-}
-
-static int
-armpmu_map_raw_event(u32 raw_event_mask, u64 config)
-{
-	return (int)(config & raw_event_mask);
-}
-
-int
-armpmu_map_event(struct perf_event *event,
-		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
-		 const unsigned (*cache_map)
-				[PERF_COUNT_HW_CACHE_MAX]
-				[PERF_COUNT_HW_CACHE_OP_MAX]
-				[PERF_COUNT_HW_CACHE_RESULT_MAX],
-		 u32 raw_event_mask)
-{
-	u64 config = event->attr.config;
-	int type = event->attr.type;
-
-	if (type == event->pmu->type)
-		return armpmu_map_raw_event(raw_event_mask, config);
-
-	switch (type) {
-	case PERF_TYPE_HARDWARE:
-		return armpmu_map_hw_event(event_map, config);
-	case PERF_TYPE_HW_CACHE:
-		return armpmu_map_cache_event(cache_map, config);
-	case PERF_TYPE_RAW:
-		return armpmu_map_raw_event(raw_event_mask, config);
-	}
-
-	return -ENOENT;
-}
-
-int armpmu_event_set_period(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	s64 left = local64_read(&hwc->period_left);
-	s64 period = hwc->sample_period;
-	int ret = 0;
-
-	if (unlikely(left <= -period)) {
-		left = period;
-		local64_set(&hwc->period_left, left);
-		hwc->last_period = period;
-		ret = 1;
-	}
-
-	if (unlikely(left <= 0)) {
-		left += period;
-		local64_set(&hwc->period_left, left);
-		hwc->last_period = period;
-		ret = 1;
-	}
-
-	/*
-	 * Limit the maximum period to prevent the counter value
-	 * from overtaking the one we are about to program. In
-	 * effect we are reducing max_period to account for
-	 * interrupt latency (and we are being very conservative).
-	 */
-	if (left > (armpmu->max_period >> 1))
-		left = armpmu->max_period >> 1;
-
-	local64_set(&hwc->prev_count, (u64)-left);
-
-	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
-
-	perf_event_update_userpage(event);
-
-	return ret;
-}
-
-u64 armpmu_event_update(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	u64 delta, prev_raw_count, new_raw_count;
-
-again:
-	prev_raw_count = local64_read(&hwc->prev_count);
-	new_raw_count = armpmu->read_counter(event);
-
-	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
-			     new_raw_count) != prev_raw_count)
-		goto again;
-
-	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
-
-	local64_add(delta, &event->count);
-	local64_sub(delta, &hwc->period_left);
-
-	return new_raw_count;
-}
-
-static void
-armpmu_read(struct perf_event *event)
-{
-	armpmu_event_update(event);
-}
-
-static void
-armpmu_stop(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-
-	/*
-	 * ARM pmu always has to update the counter, so ignore
-	 * PERF_EF_UPDATE, see comments in armpmu_start().
-	 */
-	if (!(hwc->state & PERF_HES_STOPPED)) {
-		armpmu->disable(event);
-		armpmu_event_update(event);
-		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
-	}
-}
-
-static void armpmu_start(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-
-	/*
-	 * ARM pmu always has to reprogram the period, so ignore
-	 * PERF_EF_RELOAD, see the comment below.
-	 */
-	if (flags & PERF_EF_RELOAD)
-		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
-	hwc->state = 0;
-	/*
-	 * Set the period again. Some counters can't be stopped, so when we
-	 * were stopped we simply disabled the IRQ source and the counter
-	 * may have been left counting. If we don't do this step then we may
-	 * get an interrupt too soon or *way* too late if the overflow has
-	 * happened since disabling.
-	 */
-	armpmu_event_set_period(event);
-	armpmu->enable(event);
-}
-
-static void
-armpmu_del(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx = hwc->idx;
-
-	armpmu_stop(event, PERF_EF_UPDATE);
-	hw_events->events[idx] = NULL;
-	clear_bit(idx, hw_events->used_mask);
-	if (armpmu->clear_event_idx)
-		armpmu->clear_event_idx(hw_events, event);
-
-	perf_event_update_userpage(event);
-}
-
-static int
-armpmu_add(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx;
-	int err = 0;
-
-	/* An event following a process won't be stopped earlier */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return -ENOENT;
-
-	perf_pmu_disable(event->pmu);
-
-	/* If we don't have a space for the counter then finish early. */
-	idx = armpmu->get_event_idx(hw_events, event);
-	if (idx < 0) {
-		err = idx;
-		goto out;
-	}
-
-	/*
-	 * If there is an event in the counter we are going to use then make
-	 * sure it is disabled.
-	 */
-	event->hw.idx = idx;
-	armpmu->disable(event);
-	hw_events->events[idx] = event;
-
-	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
-	if (flags & PERF_EF_START)
-		armpmu_start(event, PERF_EF_RELOAD);
-
-	/* Propagate our changes to the userspace mapping. */
-	perf_event_update_userpage(event);
-
-out:
-	perf_pmu_enable(event->pmu);
-	return err;
-}
-
-static int
-validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
-			       struct perf_event *event)
-{
-	struct arm_pmu *armpmu;
-
-	if (is_software_event(event))
-		return 1;
-
-	/*
-	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
-	 * core perf code won't check that the pmu->ctx == leader->ctx
-	 * until after pmu->event_init(event).
-	 */
-	if (event->pmu != pmu)
-		return 0;
-
-	if (event->state < PERF_EVENT_STATE_OFF)
-		return 1;
-
-	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
-		return 1;
-
-	armpmu = to_arm_pmu(event->pmu);
-	return armpmu->get_event_idx(hw_events, event) >= 0;
-}
-
-static int
-validate_group(struct perf_event *event)
-{
-	struct perf_event *sibling, *leader = event->group_leader;
-	struct pmu_hw_events fake_pmu;
-
-	/*
-	 * Initialise the fake PMU. We only need to populate the
-	 * used_mask for the purposes of validation.
-	 */
-	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
-
-	if (!validate_event(event->pmu, &fake_pmu, leader))
-		return -EINVAL;
-
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
-		if (!validate_event(event->pmu, &fake_pmu, sibling))
-			return -EINVAL;
-	}
-
-	if (!validate_event(event->pmu, &fake_pmu, event))
-		return -EINVAL;
-
-	return 0;
-}
-
-static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
-{
-	struct arm_pmu *armpmu;
-	struct platform_device *plat_device;
-	struct arm_pmu_platdata *plat;
-	int ret;
-	u64 start_clock, finish_clock;
-
-	/*
-	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
-	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
-	 * do any necessary shifting, we just need to perform the first
-	 * dereference.
-	 */
-	armpmu = *(void **)dev;
-	plat_device = armpmu->plat_device;
-	plat = dev_get_platdata(&plat_device->dev);
-
-	start_clock = sched_clock();
-	if (plat && plat->handle_irq)
-		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
-	else
-		ret = armpmu->handle_irq(irq, armpmu);
-	finish_clock = sched_clock();
-
-	perf_sample_event_took(finish_clock - start_clock);
-	return ret;
-}
-
-static void
-armpmu_release_hardware(struct arm_pmu *armpmu)
-{
-	armpmu->free_irq(armpmu);
-}
-
-static int
-armpmu_reserve_hardware(struct arm_pmu *armpmu)
-{
-	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
-	if (err) {
-		armpmu_release_hardware(armpmu);
-		return err;
-	}
-
-	return 0;
-}
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	atomic_t *active_events	 = &armpmu->active_events;
-	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
-
-	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
-		armpmu_release_hardware(armpmu);
-		mutex_unlock(pmu_reserve_mutex);
-	}
-}
-
-static int
-event_requires_mode_exclusion(struct perf_event_attr *attr)
-{
-	return attr->exclude_idle || attr->exclude_user ||
-	       attr->exclude_kernel || attr->exclude_hv;
-}
-
-static int
-__hw_perf_event_init(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	int mapping;
-
-	mapping = armpmu->map_event(event);
-
-	if (mapping < 0) {
-		pr_debug("event %x:%llx not supported\n", event->attr.type,
-			 event->attr.config);
-		return mapping;
-	}
-
-	/*
-	 * We don't assign an index until we actually place the event onto
-	 * hardware. Use -1 to signify that we haven't decided where to put it
-	 * yet. For SMP systems, each core has it's own PMU so we can't do any
-	 * clever allocation or constraints checking at this point.
-	 */
-	hwc->idx		= -1;
-	hwc->config_base	= 0;
-	hwc->config		= 0;
-	hwc->event_base		= 0;
-
-	/*
-	 * Check whether we need to exclude the counter from certain modes.
-	 */
-	if ((!armpmu->set_event_filter ||
-	     armpmu->set_event_filter(hwc, &event->attr)) &&
-	     event_requires_mode_exclusion(&event->attr)) {
-		pr_debug("ARM performance counters do not support "
-			 "mode exclusion\n");
-		return -EOPNOTSUPP;
-	}
-
-	/*
-	 * Store the event encoding into the config_base field.
-	 */
-	hwc->config_base	    |= (unsigned long)mapping;
-
-	if (!is_sampling_event(event)) {
-		/*
-		 * For non-sampling runs, limit the sample_period to half
-		 * of the counter width. That way, the new counter value
-		 * is far less likely to overtake the previous one unless
-		 * you have some serious IRQ latency issues.
-		 */
-		hwc->sample_period  = armpmu->max_period >> 1;
-		hwc->last_period    = hwc->sample_period;
-		local64_set(&hwc->period_left, hwc->sample_period);
-	}
-
-	if (event->group_leader != event) {
-		if (validate_group(event) != 0)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int armpmu_event_init(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	int err = 0;
-	atomic_t *active_events = &armpmu->active_events;
-
-	/*
-	 * Reject CPU-affine events for CPUs that are of a different class to
-	 * that which this PMU handles. Process-following events (where
-	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
-	 * reject them later (in armpmu_add) if they're scheduled on a
-	 * different class of CPU.
-	 */
-	if (event->cpu != -1 &&
-		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
-		return -ENOENT;
-
-	/* does not support taken branch sampling */
-	if (has_branch_stack(event))
-		return -EOPNOTSUPP;
-
-	if (armpmu->map_event(event) == -ENOENT)
-		return -ENOENT;
-
-	event->destroy = hw_perf_event_destroy;
-
-	if (!atomic_inc_not_zero(active_events)) {
-		mutex_lock(&armpmu->reserve_mutex);
-		if (atomic_read(active_events) == 0)
-			err = armpmu_reserve_hardware(armpmu);
-
-		if (!err)
-			atomic_inc(active_events);
-		mutex_unlock(&armpmu->reserve_mutex);
-	}
-
-	if (err)
-		return err;
-
-	err = __hw_perf_event_init(event);
-	if (err)
-		hw_perf_event_destroy(event);
-
-	return err;
-}
-
-static void armpmu_enable(struct pmu *pmu)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
-
-	/* For task-bound events we may be called on other CPUs */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return;
-
-	if (enabled)
-		armpmu->start(armpmu);
-}
-
-static void armpmu_disable(struct pmu *pmu)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(pmu);
-
-	/* For task-bound events we may be called on other CPUs */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return;
-
-	armpmu->stop(armpmu);
-}
-
-/*
- * In heterogeneous systems, events are specific to a particular
- * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
- * the same microarchitecture.
- */
-static int armpmu_filter_match(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	unsigned int cpu = smp_processor_id();
-	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
-}
-
-static void armpmu_init(struct arm_pmu *armpmu)
-{
-	atomic_set(&armpmu->active_events, 0);
-	mutex_init(&armpmu->reserve_mutex);
-
-	armpmu->pmu = (struct pmu) {
-		.pmu_enable	= armpmu_enable,
-		.pmu_disable	= armpmu_disable,
-		.event_init	= armpmu_event_init,
-		.add		= armpmu_add,
-		.del		= armpmu_del,
-		.start		= armpmu_start,
-		.stop		= armpmu_stop,
-		.read		= armpmu_read,
-		.filter_match	= armpmu_filter_match,
-	};
-}
-
-int armpmu_register(struct arm_pmu *armpmu, int type)
-{
-	armpmu_init(armpmu);
-	pr_info("enabled with %s PMU driver, %d counters available\n",
-			armpmu->name, armpmu->num_events);
-	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
-}
-
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *__oprofile_cpu_pmu;
-
-/*
- * Despite the names, these two functions are CPU-specific and are used
- * by the OProfile/perf code.
- */
-const char *perf_pmu_name(void)
-{
-	if (!__oprofile_cpu_pmu)
-		return NULL;
-
-	return __oprofile_cpu_pmu->name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
-	int max_events = 0;
-
-	if (__oprofile_cpu_pmu != NULL)
-		max_events = __oprofile_cpu_pmu->num_events;
-
-	return max_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
-static void cpu_pmu_enable_percpu_irq(void *data)
-{
-	int irq = *(int *)data;
-
-	enable_percpu_irq(irq, IRQ_TYPE_NONE);
-}
-
-static void cpu_pmu_disable_percpu_irq(void *data)
-{
-	int irq = *(int *)data;
-
-	disable_percpu_irq(irq);
-}
-
-static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
-{
-	int i, irq, irqs;
-	struct platform_device *pmu_device = cpu_pmu->plat_device;
-	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-
-	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
-		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
-		free_percpu_irq(irq, &hw_events->percpu_pmu);
-	} else {
-		for (i = 0; i < irqs; ++i) {
-			int cpu = i;
-
-			if (cpu_pmu->irq_affinity)
-				cpu = cpu_pmu->irq_affinity[i];
-
-			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
-				continue;
-			irq = platform_get_irq(pmu_device, i);
-			if (irq >= 0)
-				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
-		}
-	}
-}
-
-static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
-{
-	int i, err, irq, irqs;
-	struct platform_device *pmu_device = cpu_pmu->plat_device;
-	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
-	if (!pmu_device)
-		return -ENODEV;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-	if (irqs < 1) {
-		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
-		return 0;
-	}
-
-	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
-		err = request_percpu_irq(irq, handler, "arm-pmu",
-					 &hw_events->percpu_pmu);
-		if (err) {
-			pr_err("unable to request IRQ%d for ARM PMU counters\n",
-				irq);
-			return err;
-		}
-		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
-	} else {
-		for (i = 0; i < irqs; ++i) {
-			int cpu = i;
-
-			err = 0;
-			irq = platform_get_irq(pmu_device, i);
-			if (irq < 0)
-				continue;
-
-			if (cpu_pmu->irq_affinity)
-				cpu = cpu_pmu->irq_affinity[i];
-
-			/*
-			 * If we have a single PMU interrupt that we can't shift,
-			 * assume that we're running on a uniprocessor machine and
-			 * continue. Otherwise, continue without this interrupt.
-			 */
-			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
-				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
-					irq, cpu);
-				continue;
-			}
-
-			err = request_irq(irq, handler,
-					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
-					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
-			if (err) {
-				pr_err("unable to request IRQ%d for ARM PMU counters\n",
-					irq);
-				return err;
-			}
-
-			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
-		}
-	}
-
-	return 0;
-}
-
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
-			  void *hcpu)
-{
-	int cpu = (unsigned long)hcpu;
-	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
-
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
-
-	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-		return NOTIFY_DONE;
-
-	if (pmu->reset)
-		pmu->reset(pmu);
-	else
-		return NOTIFY_DONE;
-
-	return NOTIFY_OK;
-}
-
-static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
-{
-	int err;
-	int cpu;
-	struct pmu_hw_events __percpu *cpu_hw_events;
-
-	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
-	if (!cpu_hw_events)
-		return -ENOMEM;
-
-	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
-	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
-	if (err)
-		goto out_hw_events;
-
-	for_each_possible_cpu(cpu) {
-		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
-		raw_spin_lock_init(&events->pmu_lock);
-		events->percpu_pmu = cpu_pmu;
-	}
-
-	cpu_pmu->hw_events	= cpu_hw_events;
-	cpu_pmu->request_irq	= cpu_pmu_request_irq;
-	cpu_pmu->free_irq	= cpu_pmu_free_irq;
-
-	/* Ensure the PMU has sane values out of reset. */
-	if (cpu_pmu->reset)
-		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
-			 cpu_pmu, 1);
-
-	/* If no interrupts available, set the corresponding capability flag */
-	if (!platform_get_irq(cpu_pmu->plat_device, 0))
-		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
-	return 0;
-
-out_hw_events:
-	free_percpu(cpu_hw_events);
-	return err;
-}
-
-static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
-{
-	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
-	free_percpu(cpu_pmu->hw_events);
-}
-
-/*
- * CPU PMU identification and probing.
- */
-static int probe_current_pmu(struct arm_pmu *pmu,
-			     const struct pmu_probe_info *info)
-{
-	int cpu = get_cpu();
-	unsigned int cpuid = read_cpuid_id();
-	int ret = -ENODEV;
-
-	pr_info("probing PMU on CPU %d\n", cpu);
-
-	for (; info->init != NULL; info++) {
-		if ((cpuid & info->mask) != info->cpuid)
-			continue;
-		ret = info->init(pmu);
-		break;
-	}
-
-	put_cpu();
-	return ret;
-}
-
-static int of_pmu_irq_cfg(struct arm_pmu *pmu)
-{
-	int i, irq, *irqs;
-	struct platform_device *pdev = pmu->plat_device;
-
-	/* Don't bother with PPIs; they're already affine */
-	irq = platform_get_irq(pdev, 0);
-	if (irq >= 0 && irq_is_percpu(irq))
-		return 0;
-
-	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
-	if (!irqs)
-		return -ENOMEM;
-
-	for (i = 0; i < pdev->num_resources; ++i) {
-		struct device_node *dn;
-		int cpu;
-
-		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
-				      i);
-		if (!dn) {
-			pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
-				of_node_full_name(pdev->dev.of_node), i);
-			break;
-		}
-
-		for_each_possible_cpu(cpu)
-			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
-				break;
-
-		if (cpu >= nr_cpu_ids) {
-			pr_warn("Failed to find logical CPU for %s\n",
-				dn->name);
-			of_node_put(dn);
-			break;
-		}
-		of_node_put(dn);
-
-		irqs[i] = cpu;
-		cpumask_set_cpu(cpu, &pmu->supported_cpus);
-	}
-
-	if (i == pdev->num_resources) {
-		pmu->irq_affinity = irqs;
-	} else {
-		kfree(irqs);
-		cpumask_setall(&pmu->supported_cpus);
-	}
-
-	return 0;
-}
-
-int arm_pmu_device_probe(struct platform_device *pdev,
-			 const struct of_device_id *of_table,
-			 const struct pmu_probe_info *probe_table)
-{
-	const struct of_device_id *of_id;
-	const int (*init_fn)(struct arm_pmu *);
-	struct device_node *node = pdev->dev.of_node;
-	struct arm_pmu *pmu;
-	int ret = -ENODEV;
-
-	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
-	if (!pmu) {
-		pr_info("failed to allocate PMU device!\n");
-		return -ENOMEM;
-	}
-
-	if (!__oprofile_cpu_pmu)
-		__oprofile_cpu_pmu = pmu;
-
-	pmu->plat_device = pdev;
-
-	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
-		init_fn = of_id->data;
-
-		ret = of_pmu_irq_cfg(pmu);
-		if (!ret)
-			ret = init_fn(pmu);
-	} else {
-		ret = probe_current_pmu(pmu, probe_table);
-		cpumask_setall(&pmu->supported_cpus);
-	}
-
-	if (ret) {
-		pr_info("failed to probe PMU!\n");
-		goto out_free;
-	}
-
-	ret = cpu_pmu_init(pmu);
-	if (ret)
-		goto out_free;
-
-	ret = armpmu_register(pmu, -1);
-	if (ret)
-		goto out_destroy;
-
-	return 0;
-
-out_destroy:
-	cpu_pmu_destroy(pmu);
-out_free:
-	pr_info("failed to register PMU devices!\n");
-	kfree(pmu);
-	return ret;
-}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 09f83e4..09413e7 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -34,9 +34,9 @@
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 enum armv6_perf_types {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f9b37f8..126dc67 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -21,11 +21,11 @@
 #include <asm/cp15.h>
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 #include <asm/vfp.h>
 #include "../vfp/vfpinstr.h"
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 /*
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 304d056..aa0499e 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -16,9 +16,9 @@
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 enum xscale_perf_types {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a..a3089ba 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@
 	ledtrig_cpu(CPU_LED_IDLE_END);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
-	cpu_die();
-}
-#endif
-
 void __show_regs(struct pt_regs *regs)
 {
 	unsigned long flags;
@@ -129,12 +122,36 @@
 	buf[4] = '\0';
 
 #ifndef CONFIG_CPU_V7M
-	printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
-		buf, interrupts_enabled(regs) ? "n" : "ff",
-		fast_interrupts_enabled(regs) ? "n" : "ff",
-		processor_modes[processor_mode(regs)],
-		isa_modes[isa_mode(regs)],
-		get_fs() == get_ds() ? "kernel" : "user");
+	{
+		unsigned int domain = get_domain();
+		const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		/*
+		 * Get the domain register for the parent context. In user
+		 * mode, we don't save the DACR, so lets use what it should
+		 * be. For other modes, we place it after the pt_regs struct.
+		 */
+		if (user_mode(regs))
+			domain = DACR_UACCESS_ENABLE;
+		else
+			domain = *(unsigned int *)(regs + 1);
+#endif
+
+		if ((domain & domain_mask(DOMAIN_USER)) ==
+		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+			segment = "none";
+		else if (get_fs() == get_ds())
+			segment = "kernel";
+		else
+			segment = "user";
+
+		printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+			buf, interrupts_enabled(regs) ? "n" : "ff",
+			fast_interrupts_enabled(regs) ? "n" : "ff",
+			processor_modes[processor_mode(regs)],
+			isa_modes[isa_mode(regs)], segment);
+	}
 #else
 	printk("xPSR: %08lx\n", regs->ARM_cpsr);
 #endif
@@ -146,10 +163,9 @@
 		buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
 		{
-			unsigned int transbase, dac;
+			unsigned int transbase, dac = get_domain();
 			asm("mrc p15, 0, %0, c2, c0\n\t"
-			    "mrc p15, 0, %1, c3, c0\n"
-			    : "=r" (transbase), "=r" (dac));
+			    : "=r" (transbase));
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 			  	transbase, dac);
 		}
@@ -210,6 +226,14 @@
 
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
+	/*
+	 * Copy the initial value of the domain access control register
+	 * from the current thread: thread->addr_limit will have been
+	 * copied from the current thread via setup_thread_stack() in
+	 * kernel/fork.c
+	 */
+	thread->cpu_domain = get_domain();
+
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
deleted file mode 100644
index 2e60243..0000000
--- a/arch/arm/kernel/psci.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2012 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#define pr_fmt(fmt) "psci: " fmt
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/reboot.h>
-#include <linux/pm.h>
-#include <uapi/linux/psci.h>
-
-#include <asm/compiler.h>
-#include <asm/errno.h>
-#include <asm/psci.h>
-#include <asm/system_misc.h>
-
-struct psci_operations psci_ops;
-
-static int (*invoke_psci_fn)(u32, u32, u32, u32);
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
-asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
-
-enum psci_function {
-	PSCI_FN_CPU_SUSPEND,
-	PSCI_FN_CPU_ON,
-	PSCI_FN_CPU_OFF,
-	PSCI_FN_MIGRATE,
-	PSCI_FN_AFFINITY_INFO,
-	PSCI_FN_MIGRATE_INFO_TYPE,
-	PSCI_FN_MAX,
-};
-
-static u32 psci_function_id[PSCI_FN_MAX];
-
-static int psci_to_linux_errno(int errno)
-{
-	switch (errno) {
-	case PSCI_RET_SUCCESS:
-		return 0;
-	case PSCI_RET_NOT_SUPPORTED:
-		return -EOPNOTSUPP;
-	case PSCI_RET_INVALID_PARAMS:
-		return -EINVAL;
-	case PSCI_RET_DENIED:
-		return -EPERM;
-	};
-
-	return -EINVAL;
-}
-
-static u32 psci_power_state_pack(struct psci_power_state state)
-{
-	return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
-			& PSCI_0_2_POWER_STATE_ID_MASK) |
-		((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
-		 & PSCI_0_2_POWER_STATE_TYPE_MASK) |
-		((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
-		 & PSCI_0_2_POWER_STATE_AFFL_MASK);
-}
-
-static int psci_get_version(void)
-{
-	int err;
-
-	err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-	return err;
-}
-
-static int psci_cpu_suspend(struct psci_power_state state,
-			    unsigned long entry_point)
-{
-	int err;
-	u32 fn, power_state;
-
-	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
-	power_state = psci_power_state_pack(state);
-	err = invoke_psci_fn(fn, power_state, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_off(struct psci_power_state state)
-{
-	int err;
-	u32 fn, power_state;
-
-	fn = psci_function_id[PSCI_FN_CPU_OFF];
-	power_state = psci_power_state_pack(state);
-	err = invoke_psci_fn(fn, power_state, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_ON];
-	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_migrate(unsigned long cpuid)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE];
-	err = invoke_psci_fn(fn, cpuid, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_affinity_info(unsigned long target_affinity,
-		unsigned long lowest_affinity_level)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
-	err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
-	return err;
-}
-
-static int psci_migrate_info_type(void)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
-	err = invoke_psci_fn(fn, 0, 0, 0);
-	return err;
-}
-
-static int get_set_conduit_method(struct device_node *np)
-{
-	const char *method;
-
-	pr_info("probing for conduit method from DT.\n");
-
-	if (of_property_read_string(np, "method", &method)) {
-		pr_warn("missing \"method\" property\n");
-		return -ENXIO;
-	}
-
-	if (!strcmp("hvc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	} else if (!strcmp("smc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_smc;
-	} else {
-		pr_warn("invalid \"method\" property: %s\n", method);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
-}
-
-static void psci_sys_poweroff(void)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
-}
-
-/*
- * PSCI Function IDs for v0.2+ are well defined so use
- * standard values.
- */
-static int psci_0_2_init(struct device_node *np)
-{
-	int err, ver;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	ver = psci_get_version();
-
-	if (ver == PSCI_RET_NOT_SUPPORTED) {
-		/* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
-		pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
-		err = -EOPNOTSUPP;
-		goto out_put_node;
-	} else {
-		pr_info("PSCIv%d.%d detected in firmware.\n",
-				PSCI_VERSION_MAJOR(ver),
-				PSCI_VERSION_MINOR(ver));
-
-		if (PSCI_VERSION_MAJOR(ver) == 0 &&
-				PSCI_VERSION_MINOR(ver) < 2) {
-			err = -EINVAL;
-			pr_err("Conflicting PSCI version detected.\n");
-			goto out_put_node;
-		}
-	}
-
-	pr_info("Using standard PSCI v0.2 function IDs\n");
-	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
-	psci_ops.cpu_suspend = psci_cpu_suspend;
-
-	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
-	psci_ops.cpu_off = psci_cpu_off;
-
-	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
-	psci_ops.cpu_on = psci_cpu_on;
-
-	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
-	psci_ops.migrate = psci_migrate;
-
-	psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
-	psci_ops.affinity_info = psci_affinity_info;
-
-	psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
-		PSCI_0_2_FN_MIGRATE_INFO_TYPE;
-	psci_ops.migrate_info_type = psci_migrate_info_type;
-
-	arm_pm_restart = psci_sys_reset;
-
-	pm_power_off = psci_sys_poweroff;
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-/*
- * PSCI < v0.2 get PSCI Function IDs via DT.
- */
-static int psci_0_1_init(struct device_node *np)
-{
-	u32 id;
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	pr_info("Using PSCI v0.1 Function IDs from DT\n");
-
-	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
-		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
-		psci_ops.cpu_suspend = psci_cpu_suspend;
-	}
-
-	if (!of_property_read_u32(np, "cpu_off", &id)) {
-		psci_function_id[PSCI_FN_CPU_OFF] = id;
-		psci_ops.cpu_off = psci_cpu_off;
-	}
-
-	if (!of_property_read_u32(np, "cpu_on", &id)) {
-		psci_function_id[PSCI_FN_CPU_ON] = id;
-		psci_ops.cpu_on = psci_cpu_on;
-	}
-
-	if (!of_property_read_u32(np, "migrate", &id)) {
-		psci_function_id[PSCI_FN_MIGRATE] = id;
-		psci_ops.migrate = psci_migrate;
-	}
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-static const struct of_device_id const psci_of_match[] __initconst = {
-	{ .compatible = "arm,psci", .data = psci_0_1_init},
-	{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
-	{},
-};
-
-int __init psci_init(void)
-{
-	struct device_node *np;
-	const struct of_device_id *matched_np;
-	psci_initcall_t init_fn;
-
-	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
-	if (!np)
-		return -ENODEV;
-
-	init_fn = (psci_initcall_t)matched_np->data;
-	return init_fn(np);
-}
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 28a1db4..61c04b0 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -17,6 +17,8 @@
 #include <linux/smp.h>
 #include <linux/of.h>
 #include <linux/delay.h>
+#include <linux/psci.h>
+
 #include <uapi/linux/psci.h>
 
 #include <asm/psci.h>
@@ -51,22 +53,34 @@
 {
 	if (psci_ops.cpu_on)
 		return psci_ops.cpu_on(cpu_logical_map(cpu),
-				       __pa(secondary_startup));
+					virt_to_idmap(&secondary_startup));
 	return -ENODEV;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+int psci_cpu_disable(unsigned int cpu)
+{
+	/* Fail early if we don't have CPU_OFF support */
+	if (!psci_ops.cpu_off)
+		return -EOPNOTSUPP;
+
+	/* Trusted OS will deny CPU_OFF */
+	if (psci_tos_resident_on(cpu))
+		return -EPERM;
+
+	return 0;
+}
+
 void __ref psci_cpu_die(unsigned int cpu)
 {
-       const struct psci_power_state ps = {
-               .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-       };
+	u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
+		    PSCI_0_2_POWER_STATE_TYPE_SHIFT;
 
-       if (psci_ops.cpu_off)
-               psci_ops.cpu_off(ps);
+	if (psci_ops.cpu_off)
+		psci_ops.cpu_off(state);
 
-       /* We should never return */
-       panic("psci: cpu %d failed to shutdown\n", cpu);
+	/* We should never return */
+	panic("psci: cpu %d failed to shutdown\n", cpu);
 }
 
 int __ref psci_cpu_kill(unsigned int cpu)
@@ -109,6 +123,7 @@
 struct smp_operations __initdata psci_smp_ops = {
 	.smp_boot_secondary	= psci_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= psci_cpu_disable,
 	.cpu_die		= psci_cpu_die,
 	.cpu_kill		= psci_cpu_kill,
 #endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b7..20edd34 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,12 +31,14 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/sort.h>
+#include <linux/psci.h>
 
 #include <asm/unified.h>
 #include <asm/cp15.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
+#include <asm/fixmap.h>
 #include <asm/procinfo.h>
 #include <asm/psci.h>
 #include <asm/sections.h>
@@ -954,6 +956,9 @@
 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 	*cmdline_p = cmd_line;
 
+	if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+		early_fixmap_init();
+
 	parse_early_param();
 
 #ifdef CONFIG_MMU
@@ -972,7 +977,7 @@
 	unflatten_device_tree();
 
 	arm_dt_init_cpu_maps();
-	psci_init();
+	psci_dt_init();
 	xen_early_init();
 #ifdef CONFIG_SMP
 	if (is_smp()) {
@@ -1015,7 +1020,7 @@
 
 	for_each_possible_cpu(cpu) {
 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
-		cpuinfo->cpu.hotpluggable = 1;
+		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
 		register_cpu(&cpuinfo->cpu, cpu);
 	}
 
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e..b6cda06 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@
 asmlinkage int
 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
+	/*
+	 * The assembly code enters us with IRQs off, but it hasn't
+	 * informed the tracing code of that for efficiency reasons.
+	 * Update the trace code with the current status.
+	 */
+	trace_hardirqs_off();
 	do {
 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
 			schedule();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3d6b782..48185a7 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -21,6 +21,7 @@
 #include <linux/cpu.h>
 #include <linux/seq_file.h>
 #include <linux/irq.h>
+#include <linux/nmi.h>
 #include <linux/percpu.h>
 #include <linux/clockchips.h>
 #include <linux/completion.h>
@@ -72,6 +73,7 @@
 	IPI_CPU_STOP,
 	IPI_IRQ_WORK,
 	IPI_COMPLETION,
+	IPI_CPU_BACKTRACE = 15,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -175,13 +177,26 @@
 	if (smp_ops.cpu_disable)
 		return smp_ops.cpu_disable(cpu);
 
+	return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	/* cpu_die must be specified to support hotplug */
+	if (!smp_ops.cpu_die)
+		return 0;
+
+	if (smp_ops.cpu_can_disable)
+		return smp_ops.cpu_can_disable(cpu);
+
 	/*
 	 * By default, allow disabling all CPUs except the first one,
 	 * since this is special on a lot of platforms, e.g. because
 	 * of clock tick interrupts.
 	 */
-	return cpu == 0 ? -EPERM : 0;
+	return cpu != 0;
 }
+
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
@@ -253,7 +268,7 @@
  * of the other hotplug-cpu capable cores, so presumably coming
  * out of idle fixes this.
  */
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
 {
 	unsigned int cpu = smp_processor_id();
 
@@ -630,6 +645,12 @@
 		irq_exit();
 		break;
 
+	case IPI_CPU_BACKTRACE:
+		irq_enter();
+		nmi_cpu_backtrace(regs);
+		irq_exit();
+		break;
+
 	default:
 		pr_crit("CPU%u: Unknown IPI message 0x%x\n",
 		        cpu, ipinr);
@@ -724,3 +745,13 @@
 core_initcall(register_cpufreq_notifier);
 
 #endif
+
+static void raise_nmi(cpumask_t *mask)
+{
+	smp_cross_call(mask, IPI_CPU_BACKTRACE);
+}
+
+void arch_trigger_all_cpu_backtrace(bool include_self)
+{
+	nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
+}
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756..5b26e7e 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@
 
 	while (1) {
 		unsigned long temp;
+		unsigned int __ua_flags;
 
+		__ua_flags = uaccess_save_and_enable();
 		if (type == TYPE_SWPB)
 			__user_swpb_asm(*data, address, res, temp);
 		else
 			__user_swp_asm(*data, address, res, temp);
+		uaccess_restore(__ua_flags);
 
 		if (likely(res != -EAGAIN) || signal_pending(current))
 			break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226..969f9d9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@
 	kuser_init(vectors_base);
 
 	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 #else /* ifndef CONFIG_CPU_V7M */
 	/*
 	 * on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7..970d6c0 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
 
 		.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
 		stmfd	sp!, {r1, lr}
 		mov	r2, #0
 		cmp	r1, #4
@@ -44,7 +44,7 @@
 USER(		strnebt	r2, [r0])
 		mov	r0, #0
 		ldmfd	sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 
 		.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9..1512beb 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_from_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -89,11 +89,11 @@
 
 	.text
 
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
 
 	.pushsection .fixup,"ax"
 	.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b06..caf5019 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_to_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -93,11 +93,11 @@
 	.text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
 
 	.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e..1712f13 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
 
 		.text
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		.macro	save_regs
+		mrc	p15, 0, ip, c3, c0, 0
+		stmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		uaccess_enable ip
+		.endm
+
+		.macro	load_regs
+		ldmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		mcr	p15, 0, ip, c3, c0, 0
+		ret	lr
+		.endm
+#else
 		.macro	save_regs
 		stmfd	sp!, {r1, r2, r4 - r8, lr}
 		.endm
@@ -24,6 +37,7 @@
 		.macro	load_regs
 		ldmfd	sp!, {r1, r2, r4 - r8, pc}
 		.endm
+#endif
 
 		.macro	load1b,	reg1
 		ldrusr	\reg1, r0, 1
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 4b39af2..d72b909 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	/*
 	 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@
 	return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
 	/* See rational for this in __copy_to_user() above. */
 	if (n < 64)
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 265ffeb..80e277c 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -369,7 +369,7 @@
 		return;
 	}
 
-	sram_pool = gen_pool_get(&pdev->dev);
+	sram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!sram_pool) {
 		pr_warn("%s: sram pool unavailable!\n", __func__);
 		return;
diff --git a/arch/arm/mach-clps711x/board-cdb89712.c b/arch/arm/mach-clps711x/board-cdb89712.c
index 1ec378c..972abdb 100644
--- a/arch/arm/mach-clps711x/board-cdb89712.c
+++ b/arch/arm/mach-clps711x/board-cdb89712.c
@@ -95,7 +95,7 @@
 
 static struct resource cdb89712_bootrom_resources[] __initdata = {
 	DEFINE_RES_NAMED(CS7_PHYS_BASE, SZ_128, "BOOTROM", IORESOURCE_MEM |
-			 IORESOURCE_CACHEABLE | IORESOURCE_READONLY),
+			 IORESOURCE_READONLY),
 };
 
 static struct platform_device cdb89712_bootrom_pdev __initdata = {
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 231fba0..6050a14 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -28,8 +28,8 @@
 #include <linux/reboot.h>
 #include <linux/amba/bus.h>
 #include <linux/platform_device.h>
+#include <linux/psci.h>
 
-#include <asm/psci.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c
index 7f2bd85..4003116 100644
--- a/arch/arm/mach-highbank/pm.c
+++ b/arch/arm/mach-highbank/pm.c
@@ -16,19 +16,21 @@
 
 #include <linux/cpu_pm.h>
 #include <linux/init.h>
+#include <linux/psci.h>
 #include <linux/suspend.h>
 
 #include <asm/suspend.h>
-#include <asm/psci.h>
+
+#include <uapi/linux/psci.h>
+
+#define HIGHBANK_SUSPEND_PARAM \
+	((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
+	 (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
+	 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
 
 static int highbank_suspend_finish(unsigned long val)
 {
-	const struct psci_power_state ps = {
-		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-		.affinity_level = 1,
-	};
-
-	return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
+	return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume));
 }
 
 static int highbank_pm_enter(suspend_state_t state)
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index 1885676..532d4b0 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -297,7 +297,7 @@
 		goto put_node;
 	}
 
-	ocram_pool = gen_pool_get(&pdev->dev);
+	ocram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!ocram_pool) {
 		pr_warn("%s: ocram pool unavailable!\n", __func__);
 		ret = -ENODEV;
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 93ecf55..8ff8fc0 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -451,7 +451,7 @@
 		goto put_node;
 	}
 
-	ocram_pool = gen_pool_get(&pdev->dev);
+	ocram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!ocram_pool) {
 		pr_warn("%s: ocram pool unavailable!\n", __func__);
 		ret = -ENODEV;
diff --git a/arch/arm/mach-mmp/include/mach/regs-rtc.h b/arch/arm/mach-mmp/include/mach/regs-rtc.h
deleted file mode 100644
index 5bff886..0000000
--- a/arch/arm/mach-mmp/include/mach/regs-rtc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __ASM_MACH_REGS_RTC_H
-#define __ASM_MACH_REGS_RTC_H
-
-#include <mach/addr-map.h>
-
-#define RTC_VIRT_BASE	(APB_VIRT_BASE + 0x10000)
-#define RTC_REG(x)	(*((volatile u32 __iomem *)(RTC_VIRT_BASE + (x))))
-
-/*
- * Real Time Clock
- */
-
-#define RCNR		RTC_REG(0x00)	/* RTC Count Register */
-#define RTAR		RTC_REG(0x04)	/* RTC Alarm Register */
-#define RTSR		RTC_REG(0x08)	/* RTC Status Register */
-#define RTTR		RTC_REG(0x0C)	/* RTC Timer Trim Register */
-
-#define RTSR_HZE	(1 << 3)	/* HZ interrupt enable */
-#define RTSR_ALE	(1 << 2)	/* RTC alarm interrupt enable */
-#define RTSR_HZ		(1 << 1)	/* HZ rising-edge detected */
-#define RTSR_AL		(1 << 0)	/* RTC alarm detected */
-
-#endif /* __ASM_MACH_REGS_RTC_H */
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 04c9daf..7db5870 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <asm/mach-types.h>
+#include <asm/outercache.h>
 #include <mach/hardware.h>
 #include <mach/cputype.h>
 #include <mach/addr-map.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 9e2a684..07d2e10 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -29,6 +29,7 @@
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
 	select OMAP_INTERCONNECT
+	select OMAP_INTERCONNECT_BARRIER
 	select PL310_ERRATA_588369 if CACHE_L2X0
 	select PL310_ERRATA_727915 if CACHE_L2X0
 	select PM_OPP if PM
@@ -46,6 +47,7 @@
 	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 config SOC_AM33XX
 	bool "TI AM33XX"
@@ -71,6 +73,7 @@
 	select HAVE_ARM_ARCH_TIMER
 	select IRQ_CROSSBAR
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 config ARCH_OMAP2PLUS
 	bool
@@ -92,6 +95,10 @@
 	help
 	  Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 
+config OMAP_INTERCONNECT_BARRIER
+	bool
+	select ARM_HEAVY_MB
+	
 
 if ARCH_OMAP2PLUS
 
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index eae6a0e..484cdad 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,4 +30,5 @@
 void __init omap_reserve(void)
 {
 	omap_secure_ram_reserve_memblock();
+	omap_barrier_reserve_memblock();
 }
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 749d50b..92e92cf 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -189,6 +189,15 @@
 }
 #endif
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+void omap_barrier_reserve_memblock(void);
+void omap_barriers_init(void);
+#else
+static inline void omap_barrier_reserve_memblock(void)
+{
+}
+#endif
+
 /* This gets called from mach-omap2/io.c, do not call this */
 void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
 
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
deleted file mode 100644
index 1c582a8..0000000
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * OMAP memory barrier header.
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- *  Santosh Shilimkar <santosh.shilimkar@ti.com>
- *  Richard Woodruff <r-woodruff2@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __MACH_BARRIERS_H
-#define __MACH_BARRIERS_H
-
-#include <asm/outercache.h>
-
-extern void omap_bus_sync(void);
-
-#define rmb()		dsb()
-#define wmb()		do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
-#define mb()		wmb()
-
-#endif	/* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 6a4822d..980c937 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -352,6 +352,7 @@
 void __init omap4_map_io(void)
 {
 	iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
+	omap_barriers_init();
 }
 #endif
 
@@ -359,6 +360,7 @@
 void __init omap5_map_io(void)
 {
 	iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+	omap_barriers_init();
 }
 #endif
 
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16350ee..949696b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,6 +51,127 @@
 
 #define IRQ_LOCALTIMER		29
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA			0xfe600000
+
+static void __iomem *dram_sync, *sram_sync;
+static phys_addr_t dram_sync_paddr;
+static u32 dram_sync_size;
+
+/*
+ * The OMAP4 bus structure contains asynchrnous bridges which can buffer
+ * data writes from the MPU. These asynchronous bridges can be found on
+ * paths between the MPU to EMIF, and the MPU to L3 interconnects.
+ *
+ * We need to be careful about re-ordering which can happen as a result
+ * of different accesses being performed via different paths, and
+ * therefore different asynchronous bridges.
+ */
+
+/*
+ * OMAP4 interconnect barrier which is called for each mb() and wmb().
+ * This is to ensure that normal paths to DRAM (normal memory, cacheable
+ * accesses) are properly synchronised with writes to DMA coherent memory
+ * (normal memory, uncacheable) and device writes.
+ *
+ * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
+ * path, as we need to ensure that data is visible to other system
+ * masters prior to writes to those system masters being seen.
+ *
+ * Note: the SRAM path is not synchronised via mb() and wmb().
+ */
+static void omap4_mb(void)
+{
+	if (dram_sync)
+		writel_relaxed(0, dram_sync);
+}
+
+/*
+ * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
+ *
+ * If a data is stalled inside asynchronous bridge because of back
+ * pressure, it may be accepted multiple times, creating pointer
+ * misalignment that will corrupt next transfers on that data path until
+ * next reset of the system. No recovery procedure once the issue is hit,
+ * the path remains consistently broken.
+ *
+ * Async bridges can be found on paths between MPU to EMIF and MPU to L3
+ * interconnects.
+ *
+ * This situation can happen only when the idle is initiated by a Master
+ * Request Disconnection (which is trigged by software when executing WFI
+ * on the CPU).
+ *
+ * The work-around for this errata needs all the initiators connected
+ * through an async bridge to ensure that data path is properly drained
+ * before issuing WFI. This condition will be met if one Strongly ordered
+ * access is performed to the target right before executing the WFI.
+ *
+ * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ * IO barrier ensure that there is no synchronisation loss on initiators
+ * operating on both interconnect port simultaneously.
+ *
+ * This is a stronger version of the OMAP4 memory barrier below, and
+ * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
+ * as well, and is necessary prior to executing a WFI.
+ */
+void omap_interconnect_sync(void)
+{
+	if (dram_sync && sram_sync) {
+		writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+		writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+		isb();
+	}
+}
+
+static int __init omap4_sram_init(void)
+{
+	struct device_node *np;
+	struct gen_pool *sram_pool;
+
+	np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+	if (!np)
+		pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+			__func__);
+	sram_pool = of_gen_pool_get(np, "sram", 0);
+	if (!sram_pool)
+		pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
+			__func__);
+	else
+		sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
+
+	return 0;
+}
+omap_arch_initcall(omap4_sram_init);
+
+/* Steal one page physical memory for barrier implementation */
+void __init omap_barrier_reserve_memblock(void)
+{
+	dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
+	dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
+}
+
+void __init omap_barriers_init(void)
+{
+	struct map_desc dram_io_desc[1];
+
+	dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+	dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
+	dram_io_desc[0].length = dram_sync_size;
+	dram_io_desc[0].type = MT_MEMORY_RW_SO;
+	iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+	dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+
+	pr_info("OMAP4: Map %pa to %p for dram barrier\n",
+		&dram_sync_paddr, dram_sync);
+
+	soc_mb = omap4_mb;
+}
+
+#endif
+
 void gic_dist_disable(void)
 {
 	if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index ad1bb94..9b09d85 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,14 +333,12 @@
 
 #endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
 
-ENTRY(omap_bus_sync)
-	ret	lr
-ENDPROC(omap_bus_sync)
-
 ENTRY(omap_do_wfi)
 	stmfd	sp!, {lr}
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
 	/* Drain interconnect write buffers. */
-	bl omap_bus_sync
+	bl	omap_interconnect_sync
+#endif
 
 	/*
 	 * Execute an ISB instruction to ensure that all of the
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index d99d08e..83e94c9 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -16,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/rtc/sirfsoc_rtciobrg.h>
+#include <asm/outercache.h>
 #include <asm/suspend.h>
 #include <asm/hardware/cache-l2x0.h>
 
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index e6ce669..c624732 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -440,25 +440,11 @@
 	.resource       = pxa_rtc_resources,
 };
 
-static struct resource sa1100_rtc_resources[] = {
-	{
-		.start  = IRQ_RTC1Hz,
-		.end    = IRQ_RTC1Hz,
-		.name	= "rtc 1Hz",
-		.flags  = IORESOURCE_IRQ,
-	}, {
-		.start  = IRQ_RTCAlrm,
-		.end    = IRQ_RTCAlrm,
-		.name	= "rtc alarm",
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
 struct platform_device sa1100_device_rtc = {
 	.name		= "sa1100-rtc",
 	.id		= -1,
-	.num_resources	= ARRAY_SIZE(sa1100_rtc_resources),
-	.resource	= sa1100_rtc_resources,
+	.num_resources  = ARRAY_SIZE(pxa_rtc_resources),
+	.resource       = pxa_rtc_resources,
 };
 
 static struct resource pxa_ac97_resources[] = {
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index e6aae9e..221260d 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -282,7 +282,6 @@
 	&pxa_device_asoc_ssp2,
 	&pxa_device_asoc_ssp3,
 	&pxa_device_asoc_platform,
-	&sa1100_device_rtc,
 	&pxa_device_rtc,
 	&pxa27x_device_ssp1,
 	&pxa27x_device_ssp2,
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 1656384..ce0f8d6 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -394,7 +394,6 @@
 	&pxa_device_asoc_ssp3,
 	&pxa_device_asoc_ssp4,
 	&pxa_device_asoc_platform,
-	&sa1100_device_rtc,
 	&pxa_device_rtc,
 	&pxa3xx_device_ssp1,
 	&pxa3xx_device_ssp2,
diff --git a/arch/arm/mach-sa1100/include/mach/SA-1100.h b/arch/arm/mach-sa1100/include/mach/SA-1100.h
index 0ac6cc0..7972617 100644
--- a/arch/arm/mach-sa1100/include/mach/SA-1100.h
+++ b/arch/arm/mach-sa1100/include/mach/SA-1100.h
@@ -858,40 +858,6 @@
 
 
 /*
- * Real-Time Clock (RTC) control registers
- *
- * Registers
- *    RTAR      	Real-Time Clock (RTC) Alarm Register (read/write).
- *    RCNR      	Real-Time Clock (RTC) CouNt Register (read/write).
- *    RTTR      	Real-Time Clock (RTC) Trim Register (read/write).
- *    RTSR      	Real-Time Clock (RTC) Status Register (read/write).
- *
- * Clocks
- *    frtx, Trtx	Frequency, period of the real-time clock crystal
- *              	(32.768 kHz nominal).
- *    frtc, Trtc	Frequency, period of the real-time clock counter
- *              	(1 Hz nominal).
- */
-
-#define RTAR		__REG(0x90010000)  /* RTC Alarm Reg. */
-#define RCNR		__REG(0x90010004)  /* RTC CouNt Reg. */
-#define RTTR		__REG(0x90010008)  /* RTC Trim Reg. */
-#define RTSR		__REG(0x90010010)  /* RTC Status Reg. */
-
-#define RTTR_C  	Fld (16, 0)	/* clock divider Count - 1         */
-#define RTTR_D  	Fld (10, 16)	/* trim Delete count               */
-                	        	/* frtc = (1023*(C + 1) - D)*frtx/ */
-                	        	/*        (1023*(C + 1)^2)         */
-                	        	/* Trtc = (1023*(C + 1)^2)*Trtx/   */
-                	        	/*        (1023*(C + 1) - D)       */
-
-#define RTSR_AL 	0x00000001	/* ALarm detected                  */
-#define RTSR_HZ 	0x00000002	/* 1 Hz clock detected             */
-#define RTSR_ALE	0x00000004	/* ALarm interrupt Enable          */
-#define RTSR_HZE	0x00000008	/* 1 Hz clock interrupt Enable     */
-
-
-/*
  * Power Manager (PM) control registers
  *
  * Registers
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 476092b..8d27ec5 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -13,7 +13,7 @@
 extern void shmobile_smp_sleep(void);
 extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
 			      unsigned long arg);
-extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
 extern void shmobile_boot_scu(void);
 extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 3923e09..b23378f 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -31,8 +31,8 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-int shmobile_smp_cpu_disable(unsigned int cpu)
+bool shmobile_smp_cpu_can_disable(unsigned int cpu)
 {
-	return 0; /* Hotplug of any CPU is supported */
+	return true; /* Hotplug of any CPU is supported */
 }
 #endif
diff --git a/arch/arm/mach-shmobile/pm-rcar.c b/arch/arm/mach-shmobile/pm-rcar.c
index 4092ad1..0af05d2 100644
--- a/arch/arm/mach-shmobile/pm-rcar.c
+++ b/arch/arm/mach-shmobile/pm-rcar.c
@@ -12,7 +12,7 @@
 #include <linux/err.h>
 #include <linux/mm.h>
 #include <linux/spinlock.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include "pm-rcar.h"
 
 /* SYSC Common */
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 2ef0054..4b33d43 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -64,7 +64,7 @@
 	.smp_prepare_cpus	= r8a7790_smp_prepare_cpus,
 	.smp_boot_secondary	= shmobile_smp_apmu_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 5e2d1db7..b2508c0 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -58,7 +58,7 @@
 	.smp_prepare_cpus	= r8a7791_smp_prepare_cpus,
 	.smp_boot_secondary	= r8a7791_smp_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d03aa11..bc2824a 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -60,7 +60,7 @@
 	.smp_prepare_cpus	= sh73a0_smp_prepare_cpus,
 	.smp_boot_secondary	= sh73a0_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_scu_cpu_die,
 	.cpu_kill		= shmobile_smp_scu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c
index 6a4199f..c378ab0 100644
--- a/arch/arm/mach-socfpga/pm.c
+++ b/arch/arm/mach-socfpga/pm.c
@@ -56,7 +56,7 @@
 		goto put_node;
 	}
 
-	ocram_pool = gen_pool_get(&pdev->dev);
+	ocram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!ocram_pool) {
 		pr_warn("%s: ocram pool unavailable!\n", __func__);
 		ret = -ENODEV;
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 7557bed..780bd13 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -8,6 +8,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 
+#include <asm/outercache.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "db8500-regs.h"
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index ba708ce..f805603 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -20,10 +20,10 @@
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/regulator/machine.h>
 #include <linux/random.h>
 
-#include <asm/pmu.h>
 #include <asm/mach/map.h>
 
 #include "setup.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c6b976..df7537f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -883,6 +883,7 @@
 
 config OUTER_CACHE_SYNC
 	bool
+	select ARM_HEAVY_MB
 	help
 	  The outer cache has a outer_cache_fns.sync function pointer
 	  that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@
 	  This option allows the use of custom mandatory barriers
 	  included via the mach/barriers.h file.
 
+config ARM_HEAVY_MB
+	bool
+
 config ARCH_SUPPORTS_BIG_ENDIAN
 	bool
 	help
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4..b3b31e3 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	ldr	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable userspace access
 	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
 	tst	r3, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4..a6a381a 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable user access
 	bic	r1, r1, #1 << 11		@ clear bits 11 of FSR
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ check write
 	orreq	r1, r1, #1 << 11
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a..00ab011 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@
 	bne	do_DataAbort
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
-	do_ldrd_abort tmp=ip, insn=r3
+	uaccess_disable ip			@ disable userspace access
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c..8801a15 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@
 	ldr	ip, =0x4107b36
 	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
 	teq	ip, r3, lsr #4			@ r0 ARM1136?
-	bne	do_DataAbort
+	bne	1f
 	tst	r5, #PSR_J_BIT			@ Java?
 	tsteq	r5, #PSR_T_BIT			@ Thumb?
-	bne	do_DataAbort
+	bne	1f
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	ldr	r3, [r4]			@ read aborted ARM instruction
  ARM_BE8(rev	r3, r3)
 
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	1f				@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 #endif
+1:	uaccess_disable ip			@ disable userspace access
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad0..e8d0e08 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
 ENTRY(v7_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+	uaccess_disable ip			@ disable userspace access
 
 	/*
 	 * V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f398258..6d8e8e3 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@
 #endif
 	bne	.data_thumb_abort
 	ldr	r8, [r4]			@ read arm instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
 	and	r7, r8, #15 << 24
@@ -155,6 +156,7 @@
 
 .data_thumb_abort:
 	ldrh	r8, [r4]			@ read instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 11			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 8			@ yes
 	and	r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68e..4509bee 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
 	tst	\psr, #PSR_T_BIT
 	beq	not_thumb
 	ldrh	\tmp, [\pc]			@ Read aborted Thumb instruction
+	uaccess_disable ip			@ disable userspace access
 	and	\tmp, \tmp, # 0xfe00		@ Mask opcode field
 	cmp	\tmp, # 0x5600			@ Is it ldrsb?
 	orreq	\tmp, \tmp, #1 << 11		@ Set L-bit if yes
@@ -29,12 +30,9 @@
  *   [7:4] == 1101
  *    [20] == 0
  */
-	.macro	do_ldrd_abort, tmp, insn
-	tst	\insn, #0x0e100000		@ [27:25,20] == 0
-	bne	not_ldrd
-	and	\tmp, \insn, #0x000000f0	@ [7:4] == 1101
-	cmp	\tmp, #0x000000d0
-	beq	do_DataAbort
-not_ldrd:
+	.macro	teq_ldrd, tmp, insn
+	mov	\tmp, #0x0e100000
+	orr	\tmp, #0x000000f0
+	and	\tmp, \insn, \tmp
+	teq	\tmp, #0x000000d0
 	.endm
-
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e..5c1b7a7 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@
 	struct device_node *node;
 	void __iomem *base;
 	bool l2_wt_override = false;
-	struct resource res;
 
 #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
 	l2_wt_override = true;
@@ -376,10 +375,7 @@
 
 	node = of_find_matching_node(NULL, feroceon_ids);
 	if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
-		if (of_address_to_resource(node, 0, &res))
-			return -ENODEV;
-
-		base = ioremap(res.start, resource_size(&res));
+		base = of_iomap(node, 0);
 		if (!base)
 			return -ENOMEM;
 
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d33..493692d 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@
 		}
 	}
 
+	if (of_property_read_bool(np, "arm,shared-override")) {
+		*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+	}
+
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
 
 	ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 3d3d6aa..bf35abc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
 
+#include "dma.h"
 #include "mm.h"
 
 /*
@@ -648,14 +649,18 @@
 	size = PAGE_ALIGN(size);
 	want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 
-	if (is_coherent || nommu())
+	if (nommu())
+		addr = __alloc_simple_buffer(dev, size, gfp, &page);
+	else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+		addr = __alloc_from_contiguous(dev, size, prot, &page,
+					       caller, want_vaddr);
+	else if (is_coherent)
 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
 	else if (!(gfp & __GFP_WAIT))
 		addr = __alloc_from_pool(size, &page);
-	else if (!dev_get_cma_area(dev))
-		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
 	else
-		addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+					    caller, want_vaddr);
 
 	if (page)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	void *memory;
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 		return memory;
 
-	return __dma_alloc(dev, size, handle, gfp, prot, true,
+	return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
 			   attrs, __builtin_return_address(0));
 }
 
@@ -753,12 +757,12 @@
 
 	size = PAGE_ALIGN(size);
 
-	if (is_coherent || nommu()) {
+	if (nommu()) {
 		__dma_free_buffer(page, size);
-	} else if (__free_from_pool(cpu_addr, size)) {
+	} else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
 		return;
 	} else if (!dev_get_cma_area(dev)) {
-		if (want_vaddr)
+		if (want_vaddr && !is_coherent)
 			__dma_free_remap(cpu_addr, size);
 		__dma_free_buffer(page, size);
 	} else {
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 0000000..70ea6852
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area			__glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area 		__glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area			cpu_cache.dma_map_area
+#define dmac_unmap_area 		cpu_cache.dma_unmap_area
+
+#endif
+
+#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 34b66af..1ec8e75 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -21,6 +21,21 @@
 
 #include "mm.h"
 
+#ifdef CONFIG_ARM_HEAVY_MB
+void (*soc_mb)(void);
+
+void arm_heavy_mb(void)
+{
+#ifdef CONFIG_OUTER_CACHE_SYNC
+	if (outer_cache.sync)
+		outer_cache.sync();
+#endif
+	if (soc_mb)
+		soc_mb();
+}
+EXPORT_SYMBOL(arm_heavy_mb);
+#endif
+
 #ifdef CONFIG_CPU_CACHE_VIPT
 
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa7..9df5f09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@
 
 	type = kmap_atomic_idx_push();
 
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 	/*
@@ -106,7 +106,7 @@
 
 	if (kvaddr >= (void *)FIXADDR_START) {
 		type = kmap_atomic_idx();
-		idx = type + KM_TYPE_NR * smp_processor_id();
+		idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 
 		if (cache_is_vivt())
 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@
 		return page_address(page);
 
 	type = kmap_atomic_idx_push();
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 	BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a..7cd1514 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_HIGH_VECTORS] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_USER | L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_MEMORY_RWX] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@
 }
 EXPORT_SYMBOL(get_mem_type);
 
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+	__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+	pgd_t *pgd = pgd_offset_k(addr);
+	pud_t *pud = pud_offset(pgd, addr);
+	pmd_t *pmd = pmd_offset(pud, addr);
+
+	return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+	pmd_t *pmd;
+
+	/*
+	 * The early fixmap range spans multiple pmds, for which
+	 * we are not prepared:
+	 */
+	BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+		     != FIXADDR_TOP >> PMD_SHIFT);
+
+	pmd = fixmap_pmd(FIXADDR_TOP);
+	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+	pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
 /*
  * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
  * As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 {
 	unsigned long vaddr = __fix_to_virt(idx);
-	pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+	pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 
 	/* Make sure fixmap region does not exceed available allocation. */
 	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@
 	}
 
 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-	    md->virtual >= PAGE_OFFSET &&
+	    md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
 	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 		pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1219,10 +1260,10 @@
 
 /*
  * Set up the device mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function.  This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings.  This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
  */
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
@@ -1237,7 +1278,10 @@
 
 	early_trap_init(vectors);
 
-	for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+	/*
+	 * Clear page table except top pmd used by early fixmaps
+	 */
+	for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
 		pmd_clear(pmd_off_k(addr));
 
 	/*
@@ -1489,6 +1533,35 @@
 
 #endif
 
+static void __init early_fixmap_shutdown(void)
+{
+	int i;
+	unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+	pte_offset_fixmap = pte_offset_late_fixmap;
+	pmd_clear(fixmap_pmd(va));
+	local_flush_tlb_kernel_page(va);
+
+	for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+		pte_t *pte;
+		struct map_desc map;
+
+		map.virtual = fix_to_virt(i);
+		pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+		/* Only i/o device mappings are supported ATM */
+		if (pte_none(*pte) ||
+		    (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+			continue;
+
+		map.pfn = pte_pfn(*pte);
+		map.type = MT_DEVICE;
+		map.length = PAGE_SIZE;
+
+		create_mapping(&map);
+	}
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
@@ -1502,6 +1575,7 @@
 	map_lowmem();
 	memblock_set_current_limit(arm_lowmem_limit);
 	dma_contiguous_remap();
+	early_fixmap_shutdown();
 	devicemaps_init(mdesc);
 	kmap_init();
 	tcm_init();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f1..e683db1 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@
 		if (!new_pte)
 			goto no_pte;
 
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Modify the PTE pointer to have the correct domain.  This
+		 * needs to be the vectors domain to avoid the low vectors
+		 * being unmapped.
+		 */
+		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
 		init_pud = pud_offset(init_pgd, 0);
 		init_pmd = pmd_offset(init_pud, 0);
 		init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 6c09cc4..c50c8d3 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -45,13 +45,6 @@
 unsigned long xen_released_pages;
 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 
-/* TODO: to be removed */
-__read_mostly int xen_have_vector_callback;
-EXPORT_SYMBOL_GPL(xen_have_vector_callback);
-
-int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
-EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
-
 static __read_mostly unsigned int xen_events_irq;
 
 static __initdata struct device_node *xen_node;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b7b9cea..7d95663 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -20,6 +20,7 @@
 	select ARM_GIC_V2M if PCI_MSI
 	select ARM_GIC_V3
 	select ARM_GIC_V3_ITS if PCI_MSI
+	select ARM_PSCI_FW
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS
 	select COMMON_CLK
@@ -28,7 +29,7 @@
 	select EDAC_SUPPORT
 	select GENERIC_ALLOCATOR
 	select GENERIC_CLOCKEVENTS
-	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_IRQ_PROBE
@@ -53,6 +54,7 @@
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CMPXCHG_DOUBLE
+	select HAVE_CMPXCHG_LOCAL
 	select HAVE_DEBUG_BUGVERBOSE
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
@@ -104,6 +106,10 @@
 config STACKTRACE_SUPPORT
 	def_bool y
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	default 0xdead000000000000
+
 config LOCKDEP_SUPPORT
 	def_bool y
 
@@ -113,6 +119,14 @@
 config RWSEM_XCHGADD_ALGORITHM
 	def_bool y
 
+config GENERIC_BUG
+	def_bool y
+	depends on BUG
+
+config GENERIC_BUG_RELATIVE_POINTERS
+	def_bool y
+	depends on GENERIC_BUG
+
 config GENERIC_HWEIGHT
 	def_bool y
 
@@ -137,6 +151,9 @@
 config NEED_SG_DMA_LENGTH
 	def_bool y
 
+config SMP
+	def_bool y
+
 config SWIOTLB
 	def_bool y
 
@@ -371,22 +388,8 @@
        help
          Say Y if you plan on running a kernel in big-endian mode.
 
-config SMP
-	bool "Symmetric Multi-Processing"
-	help
-	  This enables support for systems with more than one CPU.  If
-	  you say N here, the kernel will run on single and
-	  multiprocessor machines, but will use only one CPU of a
-	  multiprocessor machine. If you say Y here, the kernel will run
-	  on many, but not all, single processor machines. On a single
-	  processor machine, the kernel will run faster if you say N
-	  here.
-
-	  If you don't know what to do here, say N.
-
 config SCHED_MC
 	bool "Multi-core scheduler support"
-	depends on SMP
 	help
 	  Multi-core scheduler support improves the CPU scheduler's decision
 	  making when dealing with multi-core CPU chips at a cost of slightly
@@ -394,7 +397,6 @@
 
 config SCHED_SMT
 	bool "SMT scheduler support"
-	depends on SMP
 	help
 	  Improves the CPU scheduler's decision making when dealing with
 	  MultiThreading at a cost of slightly increased overhead in some
@@ -403,23 +405,17 @@
 config NR_CPUS
 	int "Maximum number of CPUs (2-4096)"
 	range 2 4096
-	depends on SMP
 	# These have to remain sorted largest to smallest
 	default "64"
 
 config HOTPLUG_CPU
 	bool "Support for hot-pluggable CPUs"
-	depends on SMP
 	help
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  can be controlled through /sys/devices/system/cpu.
 
 source kernel/Kconfig.preempt
 
-config UP_LATE_INIT
-       def_bool y
-       depends on !SMP
-
 config HZ
 	int
 	default 100
@@ -561,6 +557,53 @@
 	  If unsure, say Y
 endif
 
+menu "ARMv8.1 architectural features"
+
+config ARM64_HW_AFDBM
+	bool "Support for hardware updates of the Access and Dirty page flags"
+	default y
+	help
+	  The ARMv8.1 architecture extensions introduce support for
+	  hardware updates of the access and dirty information in page
+	  table entries. When enabled in TCR_EL1 (HA and HD bits) on
+	  capable processors, accesses to pages with PTE_AF cleared will
+	  set this bit instead of raising an access flag fault.
+	  Similarly, writes to read-only pages with the DBM bit set will
+	  clear the read-only bit (AP[2]) instead of raising a
+	  permission fault.
+
+	  Kernels built with this configuration option enabled continue
+	  to work on pre-ARMv8.1 hardware and the performance impact is
+	  minimal. If unsure, say Y.
+
+config ARM64_PAN
+	bool "Enable support for Privileged Access Never (PAN)"
+	default y
+	help
+	 Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+	 prevents the kernel or hypervisor from accessing user-space (EL0)
+	 memory directly.
+
+	 Choosing this option will cause any unprotected (not using
+	 copy_to_user et al) memory access to fail with a permission fault.
+
+	 The feature is detected at runtime, and will remain as a 'nop'
+	 instruction if the cpu does not implement the feature.
+
+config ARM64_LSE_ATOMICS
+	bool "Atomic instructions"
+	help
+	  As part of the Large System Extensions, ARMv8.1 introduces new
+	  atomic instructions that are designed specifically to scale in
+	  very large systems.
+
+	  Say Y here to make use of these instructions for the in-kernel
+	  atomic routines. This incurs a small overhead on CPUs that do
+	  not support these instructions and requires the kernel to be
+	  built with binutils >= 2.25.
+
+endmenu
+
 endmenu
 
 menu "Boot options"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 4d2a925..15ff5b4 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -17,7 +17,18 @@
 
 KBUILD_DEFCONFIG := defconfig
 
-KBUILD_CFLAGS	+= -mgeneral-regs-only
+# Check for binutils support for specific extensions
+lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
+
+ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
+  ifeq ($(lseinstr),)
+$(warning LSE atomics not supported by binutils)
+  endif
+endif
+
+KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
+KBUILD_AFLAGS	+= $(lseinstr)
+
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS	+= -mbig-endian
 AS		+= -EB
@@ -58,7 +69,10 @@
 
 boot := arch/arm64/boot
 
-Image Image.gz: vmlinux
+Image: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+Image.%: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 zinstall install: vmlinux
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 5a0e3ab..abcbba2 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -19,9 +19,21 @@
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 
+$(obj)/Image.bz2: $(obj)/Image FORCE
+	$(call if_changed,bzip2)
+
 $(obj)/Image.gz: $(obj)/Image FORCE
 	$(call if_changed,gzip)
 
+$(obj)/Image.lz4: $(obj)/Image FORCE
+	$(call if_changed,lz4)
+
+$(obj)/Image.lzma: $(obj)/Image FORCE
+	$(call if_changed,lzma)
+
+$(obj)/Image.lzo: $(obj)/Image FORCE
+	$(call if_changed,lzo)
+
 install: $(obj)/Image
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 406485e..208cec0 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,11 +12,11 @@
 #ifndef _ASM_ACPI_H
 #define _ASM_ACPI_H
 
-#include <linux/mm.h>
 #include <linux/irqchip/arm-gic-acpi.h>
+#include <linux/mm.h>
+#include <linux/psci.h>
 
 #include <asm/cputype.h>
-#include <asm/psci.h>
 #include <asm/smp_plat.h>
 
 /* Macros for consistency checks of the GICC subtable of MADT */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index c385a0c..d56ec07 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -3,6 +3,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/init.h>
+#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
@@ -15,7 +17,7 @@
 	u8  alt_len;		/* size of new instruction(s), <= orig_len */
 };
 
-void apply_alternatives_all(void);
+void __init apply_alternatives_all(void);
 void apply_alternatives(void *start, size_t length);
 void free_alternatives_memory(void);
 
@@ -40,7 +42,8 @@
  * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
  * containing commit 4e4d08cf7399b606 or c1baaddf8861).
  */
-#define ALTERNATIVE(oldinstr, newinstr, feature)			\
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)	\
+	".if "__stringify(cfg_enabled)" == 1\n"				\
 	"661:\n\t"							\
 	oldinstr "\n"							\
 	"662:\n"							\
@@ -53,7 +56,11 @@
 	"664:\n\t"							\
 	".popsection\n\t"						\
 	".org	. - (664b-663b) + (662b-661b)\n\t"			\
-	".org	. - (662b-661b) + (664b-663b)\n"
+	".org	. - (662b-661b) + (664b-663b)\n"			\
+	".endif\n"
+
+#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)	\
+	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
 
 #else
 
@@ -65,7 +72,8 @@
 	.byte \alt_len
 .endm
 
-.macro alternative_insn insn1 insn2 cap
+.macro alternative_insn insn1, insn2, cap, enable = 1
+	.if \enable
 661:	\insn1
 662:	.pushsection .altinstructions, "a"
 	altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
@@ -75,8 +83,70 @@
 664:	.popsection
 	.org	. - (664b-663b) + (662b-661b)
 	.org	. - (662b-661b) + (664b-663b)
+	.endif
 .endm
 
+/*
+ * Begin an alternative code sequence.
+ *
+ * The code that follows this macro will be assembled and linked as
+ * normal. There are no restrictions on this code.
+ */
+.macro alternative_if_not cap, enable = 1
+	.if \enable
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
+	.popsection
+661:
+	.endif
+.endm
+
+/*
+ * Provide the alternative code sequence.
+ *
+ * The code that follows this macro is assembled into a special
+ * section to be used for dynamic patching. Code that follows this
+ * macro must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ *    sequence.
+ *
+ * 2. Not contain a branch target that is used outside of the
+ *    alternative sequence it is defined in (branches into an
+ *    alternative sequence are not fixed up).
+ */
+.macro alternative_else, enable = 1
+	.if \enable
+662:	.pushsection .altinstr_replacement, "ax"
+663:
+	.endif
+.endm
+
+/*
+ * Complete an alternative code sequence.
+ */
+.macro alternative_endif, enable = 1
+	.if \enable
+664:	.popsection
+	.org	. - (664b-663b) + (662b-661b)
+	.org	. - (662b-661b) + (664b-663b)
+	.endif
+.endm
+
+#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)	\
+	alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+
+
 #endif  /*  __ASSEMBLY__  */
 
+/*
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
+ *
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
+ * N.B. If CONFIG_FOO is specified, but not selected, the whole block
+ *      will be omitted, including oldinstr.
+ */
+#define ALTERNATIVE(oldinstr, newinstr, ...)   \
+	_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
+
 #endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 144b64a..b51f2cc 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -50,18 +50,6 @@
 	.endm
 
 /*
- * Save/disable and restore interrupts.
- */
-	.macro	save_and_disable_irqs, olddaif
-	mrs	\olddaif, daif
-	disable_irq
-	.endm
-
-	.macro	restore_irqs, olddaif
-	msr	daif, \olddaif
-	.endm
-
-/*
  * Enable and disable debug exceptions.
  */
 	.macro	disable_dbg
@@ -103,9 +91,7 @@
  * SMP data memory barrier
  */
 	.macro	smp_dmb, opt
-#ifdef CONFIG_SMP
 	dmb	\opt
-#endif
 	.endm
 
 #define USER(l, x...)				\
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7047051..35a6778 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -24,233 +24,72 @@
 #include <linux/types.h>
 
 #include <asm/barrier.h>
-#include <asm/cmpxchg.h>
-
-#define ATOMIC_INIT(i)	{ (i) }
+#include <asm/lse.h>
 
 #ifdef __KERNEL__
 
-/*
- * On ARM, ordinary assignment (str instruction) doesn't clear the local
- * strex/ldrex monitor on some implementations. The reason we can use it for
- * atomic_set() is the clrex or dummy strex done on every exception return.
- */
-#define atomic_read(v)	ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i)	(((v)->counter) = (i))
+#define __ARM64_IN_ATOMIC_IMPL
 
-/*
- * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
- * store exclusive to ensure that these are atomic.  We may loop
- * to ensure that the update happens.
- */
+#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
+#include <asm/atomic_lse.h>
+#else
+#include <asm/atomic_ll_sc.h>
+#endif
 
-#define ATOMIC_OP(op, asm_op)						\
-static inline void atomic_##op(int i, atomic_t *v)			\
-{									\
-	unsigned long tmp;						\
-	int result;							\
+#undef __ARM64_IN_ATOMIC_IMPL
+
+#include <asm/cmpxchg.h>
+
+#define ___atomic_add_unless(v, a, u, sfx)				\
+({									\
+	typeof((v)->counter) c, old;					\
 									\
-	asm volatile("// atomic_" #op "\n"				\
-"1:	ldxr	%w0, %2\n"						\
-"	" #asm_op "	%w0, %w0, %w3\n"				\
-"	stxr	%w1, %w0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i));							\
-}									\
+	c = atomic##sfx##_read(v);					\
+	while (c != (u) &&						\
+	      (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c)	\
+		c = old;						\
+	c;								\
+ })
 
-#define ATOMIC_OP_RETURN(op, asm_op)					\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
-{									\
-	unsigned long tmp;						\
-	int result;							\
-									\
-	asm volatile("// atomic_" #op "_return\n"			\
-"1:	ldxr	%w0, %2\n"						\
-"	" #asm_op "	%w0, %w0, %w3\n"				\
-"	stlxr	%w1, %w0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i)							\
-	: "memory");							\
-									\
-	smp_mb();							\
-	return result;							\
-}
+#define ATOMIC_INIT(i)	{ (i) }
 
-#define ATOMIC_OPS(op, asm_op)						\
-	ATOMIC_OP(op, asm_op)						\
-	ATOMIC_OP_RETURN(op, asm_op)
+#define atomic_read(v)			READ_ONCE((v)->counter)
+#define atomic_set(v, i)		(((v)->counter) = (i))
+#define atomic_xchg(v, new)		xchg(&((v)->counter), (new))
+#define atomic_cmpxchg(v, old, new)	cmpxchg(&((v)->counter), (old), (new))
 
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
-	unsigned long tmp;
-	int oldval;
-
-	smp_mb();
-
-	asm volatile("// atomic_cmpxchg\n"
-"1:	ldxr	%w1, %2\n"
-"	cmp	%w1, %w3\n"
-"	b.ne	2f\n"
-"	stxr	%w0, %w4, %2\n"
-"	cbnz	%w0, 1b\n"
-"2:"
-	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
-
-	smp_mb();
-	return oldval;
-}
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-	int c, old;
-
-	c = atomic_read(v);
-	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
-		c = old;
-	return c;
-}
-
-#define atomic_inc(v)		atomic_add(1, v)
-#define atomic_dec(v)		atomic_sub(1, v)
-
-#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#define atomic_inc(v)			atomic_add(1, (v))
+#define atomic_dec(v)			atomic_sub(1, (v))
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
+#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
+#define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
+#define atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
+#define __atomic_add_unless(v, a, u)	___atomic_add_unless(v, a, u,)
+#define atomic_andnot			atomic_andnot
 
 /*
  * 64-bit atomic operations.
  */
-#define ATOMIC64_INIT(i) { (i) }
+#define ATOMIC64_INIT			ATOMIC_INIT
+#define atomic64_read			atomic_read
+#define atomic64_set			atomic_set
+#define atomic64_xchg			atomic_xchg
+#define atomic64_cmpxchg		atomic_cmpxchg
 
-#define atomic64_read(v)	ACCESS_ONCE((v)->counter)
-#define atomic64_set(v,i)	(((v)->counter) = (i))
-
-#define ATOMIC64_OP(op, asm_op)						\
-static inline void atomic64_##op(long i, atomic64_t *v)			\
-{									\
-	long result;							\
-	unsigned long tmp;						\
-									\
-	asm volatile("// atomic64_" #op "\n"				\
-"1:	ldxr	%0, %2\n"						\
-"	" #asm_op "	%0, %0, %3\n"					\
-"	stxr	%w1, %0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i));							\
-}									\
-
-#define ATOMIC64_OP_RETURN(op, asm_op)					\
-static inline long atomic64_##op##_return(long i, atomic64_t *v)	\
-{									\
-	long result;							\
-	unsigned long tmp;						\
-									\
-	asm volatile("// atomic64_" #op "_return\n"			\
-"1:	ldxr	%0, %2\n"						\
-"	" #asm_op "	%0, %0, %3\n"					\
-"	stlxr	%w1, %0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i)							\
-	: "memory");							\
-									\
-	smp_mb();							\
-	return result;							\
-}
-
-#define ATOMIC64_OPS(op, asm_op)					\
-	ATOMIC64_OP(op, asm_op)						\
-	ATOMIC64_OP_RETURN(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
-{
-	long oldval;
-	unsigned long res;
-
-	smp_mb();
-
-	asm volatile("// atomic64_cmpxchg\n"
-"1:	ldxr	%1, %2\n"
-"	cmp	%1, %3\n"
-"	b.ne	2f\n"
-"	stxr	%w0, %4, %2\n"
-"	cbnz	%w0, 1b\n"
-"2:"
-	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
-
-	smp_mb();
-	return oldval;
-}
-
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
-	long result;
-	unsigned long tmp;
-
-	asm volatile("// atomic64_dec_if_positive\n"
-"1:	ldxr	%0, %2\n"
-"	subs	%0, %0, #1\n"
-"	b.mi	2f\n"
-"	stlxr	%w1, %0, %2\n"
-"	cbnz	%w1, 1b\n"
-"	dmb	ish\n"
-"2:"
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-	:
-	: "cc", "memory");
-
-	return result;
-}
-
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-	long c, old;
-
-	c = atomic64_read(v);
-	while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
-		c = old;
-
-	return c != u;
-}
-
-#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v)			atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
+#define atomic64_inc(v)			atomic64_add(1, (v))
+#define atomic64_dec(v)			atomic64_sub(1, (v))
+#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
+#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v)			atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
+#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
+#define atomic64_add_unless(v, a, u)	(___atomic_add_unless(v, a, u, 64) != u)
+#define atomic64_andnot			atomic64_andnot
+
+#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
 
 #endif
 #endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
new file mode 100644
index 0000000..b3b5c4a
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -0,0 +1,247 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LL_SC_H
+#define __ASM_ATOMIC_LL_SC_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+/*
+ * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ *
+ * NOTE: these functions do *not* follow the PCS and must explicitly
+ * save any clobbered registers other than x0 (regardless of return
+ * value).  This is achieved through -fcall-saved-* compiler flags for
+ * this file, which unfortunately don't work on a per-function basis
+ * (the optimize attribute silently ignores these options).
+ */
+
+#define ATOMIC_OP(op, asm_op)						\
+__LL_SC_INLINE void							\
+__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))				\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	asm volatile("// atomic_" #op "\n"				\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%w0, %2\n"						\
+"	" #asm_op "	%w0, %w0, %w3\n"				\
+"	stxr	%w1, %w0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i));							\
+}									\
+__LL_SC_EXPORT(atomic_##op);
+
+#define ATOMIC_OP_RETURN(op, asm_op)					\
+__LL_SC_INLINE int							\
+__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v))		\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	asm volatile("// atomic_" #op "_return\n"			\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%w0, %2\n"						\
+"	" #asm_op "	%w0, %w0, %w3\n"				\
+"	stlxr	%w1, %w0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i)							\
+	: "memory");							\
+									\
+	smp_mb();							\
+	return result;							\
+}									\
+__LL_SC_EXPORT(atomic_##op##_return);
+
+#define ATOMIC_OPS(op, asm_op)						\
+	ATOMIC_OP(op, asm_op)						\
+	ATOMIC_OP_RETURN(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, orr)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define ATOMIC64_OP(op, asm_op)						\
+__LL_SC_INLINE void							\
+__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))			\
+{									\
+	long result;							\
+	unsigned long tmp;						\
+									\
+	asm volatile("// atomic64_" #op "\n"				\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%0, %2\n"						\
+"	" #asm_op "	%0, %0, %3\n"					\
+"	stxr	%w1, %0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i));							\
+}									\
+__LL_SC_EXPORT(atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, asm_op)					\
+__LL_SC_INLINE long							\
+__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v))		\
+{									\
+	long result;							\
+	unsigned long tmp;						\
+									\
+	asm volatile("// atomic64_" #op "_return\n"			\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%0, %2\n"						\
+"	" #asm_op "	%0, %0, %3\n"					\
+"	stlxr	%w1, %0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i)							\
+	: "memory");							\
+									\
+	smp_mb();							\
+	return result;							\
+}									\
+__LL_SC_EXPORT(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op, asm_op)					\
+	ATOMIC64_OP(op, asm_op)						\
+	ATOMIC64_OP_RETURN(op, asm_op)
+
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
+
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, orr)
+ATOMIC64_OP(xor, eor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+__LL_SC_INLINE long
+__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+{
+	long result;
+	unsigned long tmp;
+
+	asm volatile("// atomic64_dec_if_positive\n"
+"	prfm	pstl1strm, %2\n"
+"1:	ldxr	%0, %2\n"
+"	subs	%0, %0, #1\n"
+"	b.lt	2f\n"
+"	stlxr	%w1, %0, %2\n"
+"	cbnz	%w1, 1b\n"
+"	dmb	ish\n"
+"2:"
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+	:
+	: "cc", "memory");
+
+	return result;
+}
+__LL_SC_EXPORT(atomic64_dec_if_positive);
+
+#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl)			\
+__LL_SC_INLINE unsigned long						\
+__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,		\
+				     unsigned long old,			\
+				     unsigned long new))		\
+{									\
+	unsigned long tmp, oldval;					\
+									\
+	asm volatile(							\
+	"	prfm	pstl1strm, %[v]\n"				\
+	"1:	ldxr" #sz "\t%" #w "[oldval], %[v]\n"			\
+	"	eor	%" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"	\
+	"	cbnz	%" #w "[tmp], 2f\n"				\
+	"	st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"	\
+	"	cbnz	%w[tmp], 1b\n"					\
+	"	" #mb "\n"						\
+	"	mov	%" #w "[oldval], %" #w "[old]\n"		\
+	"2:"								\
+	: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),			\
+	  [v] "+Q" (*(unsigned long *)ptr)				\
+	: [old] "Lr" (old), [new] "r" (new)				\
+	: cl);								\
+									\
+	return oldval;							\
+}									\
+__LL_SC_EXPORT(__cmpxchg_case_##name);
+
+__CMPXCHG_CASE(w, b,    1,        ,  ,         )
+__CMPXCHG_CASE(w, h,    2,        ,  ,         )
+__CMPXCHG_CASE(w,  ,    4,        ,  ,         )
+__CMPXCHG_CASE( ,  ,    8,        ,  ,         )
+__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
+__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
+__CMPXCHG_CASE(w,  , mb_4, dmb ish, l, "memory")
+__CMPXCHG_CASE( ,  , mb_8, dmb ish, l, "memory")
+
+#undef __CMPXCHG_CASE
+
+#define __CMPXCHG_DBL(name, mb, rel, cl)				\
+__LL_SC_INLINE int							\
+__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,		\
+				      unsigned long old2,		\
+				      unsigned long new1,		\
+				      unsigned long new2,		\
+				      volatile void *ptr))		\
+{									\
+	unsigned long tmp, ret;						\
+									\
+	asm volatile("// __cmpxchg_double" #name "\n"			\
+	"	prfm	pstl1strm, %2\n"				\
+	"1:	ldxp	%0, %1, %2\n"					\
+	"	eor	%0, %0, %3\n"					\
+	"	eor	%1, %1, %4\n"					\
+	"	orr	%1, %0, %1\n"					\
+	"	cbnz	%1, 2f\n"					\
+	"	st" #rel "xp	%w0, %5, %6, %2\n"			\
+	"	cbnz	%w0, 1b\n"					\
+	"	" #mb "\n"						\
+	"2:"								\
+	: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)	\
+	: "r" (old1), "r" (old2), "r" (new1), "r" (new2)		\
+	: cl);								\
+									\
+	return ret;							\
+}									\
+__LL_SC_EXPORT(__cmpxchg_double##name);
+
+__CMPXCHG_DBL(   ,        ,  ,         )
+__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
+
+#undef __CMPXCHG_DBL
+
+#endif	/* __ASM_ATOMIC_LL_SC_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
new file mode 100644
index 0000000..55d740e
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -0,0 +1,391 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LSE_H
+#define __ASM_ATOMIC_LSE_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+#define __LL_SC_ATOMIC(op)	__LL_SC_CALL(atomic_##op)
+
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
+	"	stclr	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
+	"	stset	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
+	"	steor	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
+	"	stadd	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(add_return),
+	/* LSE atomics */
+	"	ldaddal	%w[i], w30, %[v]\n"
+	"	add	%w[i], %w[i], w30")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return w0;
+}
+
+static inline void atomic_and(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(and),
+	/* LSE atomics */
+	"	mvn	%w[i], %w[i]\n"
+	"	stclr	%w[i], %[v]")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(sub),
+	/* LSE atomics */
+	"	neg	%w[i], %w[i]\n"
+	"	stadd	%w[i], %[v]")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(sub_return)
+	"	nop",
+	/* LSE atomics */
+	"	neg	%w[i], %w[i]\n"
+	"	ldaddal	%w[i], w30, %[v]\n"
+	"	add	%w[i], %w[i], w30")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return w0;
+}
+
+#undef __LL_SC_ATOMIC
+
+#define __LL_SC_ATOMIC64(op)	__LL_SC_CALL(atomic64_##op)
+
+static inline void atomic64_andnot(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
+	"	stclr	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
+	"	stset	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
+	"	steor	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_add(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
+	"	stadd	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(add_return),
+	/* LSE atomics */
+	"	ldaddal	%[i], x30, %[v]\n"
+	"	add	%[i], %[i], x30")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return x0;
+}
+
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(and),
+	/* LSE atomics */
+	"	mvn	%[i], %[i]\n"
+	"	stclr	%[i], %[v]")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_sub(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(sub),
+	/* LSE atomics */
+	"	neg	%[i], %[i]\n"
+	"	stadd	%[i], %[v]")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline long atomic64_sub_return(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(sub_return)
+	"	nop",
+	/* LSE atomics */
+	"	neg	%[i], %[i]\n"
+	"	ldaddal	%[i], x30, %[v]\n"
+	"	add	%[i], %[i], x30")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return x0;
+}
+
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+	register long x0 asm ("x0") = (long)v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(dec_if_positive)
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop",
+	/* LSE atomics */
+	"1:	ldr	x30, %[v]\n"
+	"	subs	%[ret], x30, #1\n"
+	"	b.lt	2f\n"
+	"	casal	x30, %[ret], %[v]\n"
+	"	sub	x30, x30, #1\n"
+	"	sub	x30, x30, %[ret]\n"
+	"	cbnz	x30, 1b\n"
+	"2:")
+	: [ret] "+&r" (x0), [v] "+Q" (v->counter)
+	:
+	: "x30", "cc", "memory");
+
+	return x0;
+}
+
+#undef __LL_SC_ATOMIC64
+
+#define __LL_SC_CMPXCHG(op)	__LL_SC_CALL(__cmpxchg_case_##op)
+
+#define __CMPXCHG_CASE(w, sz, name, mb, cl...)				\
+static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,	\
+						  unsigned long old,	\
+						  unsigned long new)	\
+{									\
+	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
+	register unsigned long x1 asm ("x1") = old;			\
+	register unsigned long x2 asm ("x2") = new;			\
+									\
+	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
+	/* LL/SC */							\
+	"	nop\n"							\
+		__LL_SC_CMPXCHG(name)					\
+	"	nop",							\
+	/* LSE atomics */						\
+	"	mov	" #w "30, %" #w "[old]\n"			\
+	"	cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n"		\
+	"	mov	%" #w "[ret], " #w "30")			\
+	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)		\
+	: [old] "r" (x1), [new] "r" (x2)				\
+	: "x30" , ##cl);						\
+									\
+	return x0;							\
+}
+
+__CMPXCHG_CASE(w, b,    1,   )
+__CMPXCHG_CASE(w, h,    2,   )
+__CMPXCHG_CASE(w,  ,    4,   )
+__CMPXCHG_CASE(x,  ,    8,   )
+__CMPXCHG_CASE(w, b, mb_1, al, "memory")
+__CMPXCHG_CASE(w, h, mb_2, al, "memory")
+__CMPXCHG_CASE(w,  , mb_4, al, "memory")
+__CMPXCHG_CASE(x,  , mb_8, al, "memory")
+
+#undef __LL_SC_CMPXCHG
+#undef __CMPXCHG_CASE
+
+#define __LL_SC_CMPXCHG_DBL(op)	__LL_SC_CALL(__cmpxchg_double##op)
+
+#define __CMPXCHG_DBL(name, mb, cl...)					\
+static inline int __cmpxchg_double##name(unsigned long old1,		\
+					 unsigned long old2,		\
+					 unsigned long new1,		\
+					 unsigned long new2,		\
+					 volatile void *ptr)		\
+{									\
+	unsigned long oldval1 = old1;					\
+	unsigned long oldval2 = old2;					\
+	register unsigned long x0 asm ("x0") = old1;			\
+	register unsigned long x1 asm ("x1") = old2;			\
+	register unsigned long x2 asm ("x2") = new1;			\
+	register unsigned long x3 asm ("x3") = new2;			\
+	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
+									\
+	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
+	/* LL/SC */							\
+	"	nop\n"							\
+	"	nop\n"							\
+	"	nop\n"							\
+	__LL_SC_CMPXCHG_DBL(name),					\
+	/* LSE atomics */						\
+	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
+	"	eor	%[old1], %[old1], %[oldval1]\n"			\
+	"	eor	%[old2], %[old2], %[oldval2]\n"			\
+	"	orr	%[old1], %[old1], %[old2]")			\
+	: [old1] "+r" (x0), [old2] "+r" (x1),				\
+	  [v] "+Q" (*(unsigned long *)ptr)				\
+	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
+	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
+	: "x30" , ##cl);						\
+									\
+	return x0;							\
+}
+
+__CMPXCHG_DBL(   ,   )
+__CMPXCHG_DBL(_mb, al, "memory")
+
+#undef __LL_SC_CMPXCHG_DBL
+#undef __CMPXCHG_DBL
+
+#endif	/* __ASM_ATOMIC_LSE_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fa47c4..624f967 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,28 +35,6 @@
 #define dma_rmb()	dmb(oshld)
 #define dma_wmb()	dmb(oshst)
 
-#ifndef CONFIG_SMP
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-
-#define smp_store_release(p, v)						\
-do {									\
-	compiletime_assert_atomic_type(*p);				\
-	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
-} while (0)
-
-#define smp_load_acquire(p)						\
-({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
-	compiletime_assert_atomic_type(*p);				\
-	barrier();							\
-	___p1;								\
-})
-
-#else
-
 #define smp_mb()	dmb(ish)
 #define smp_rmb()	dmb(ishld)
 #define smp_wmb()	dmb(ishst)
@@ -109,8 +87,6 @@
 	___p1;								\
 })
 
-#endif
-
 #define read_barrier_depends()		do { } while(0)
 #define smp_read_barrier_depends()	do { } while(0)
 
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
new file mode 100644
index 0000000..4a748ce
--- /dev/null
+++ b/arch/arm64/include/asm/bug.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015  ARM Limited
+ * Author: Dave Martin <Dave.Martin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ARCH_ARM64_ASM_BUG_H
+#define _ARCH_ARM64_ASM_BUG_H
+
+#include <asm/debug-monitors.h>
+
+#ifdef CONFIG_GENERIC_BUG
+#define HAVE_ARCH_BUG
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
+#define __BUGVERBOSE_LOCATION(file, line)				\
+		".pushsection .rodata.str,\"aMS\",@progbits,1\n"	\
+	"2:	.string \"" file "\"\n\t"				\
+		".popsection\n\t"					\
+									\
+		".long 2b - 0b\n\t"					\
+		".short " #line "\n\t"
+#else
+#define _BUGVERBOSE_LOCATION(file, line)
+#endif
+
+#define _BUG_FLAGS(flags) __BUG_FLAGS(flags)
+
+#define __BUG_FLAGS(flags) asm volatile (		\
+		".pushsection __bug_table,\"a\"\n\t"	\
+		".align 2\n\t"				\
+	"0:	.long 1f - 0b\n\t"			\
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__)		\
+		".short " #flags "\n\t"			\
+		".popsection\n"				\
+							\
+	"1:	brk %[imm]"				\
+		:: [imm] "i" (BUG_BRK_IMM)		\
+)
+
+#define BUG() do {				\
+	_BUG_FLAGS(0);				\
+	unreachable();				\
+} while (0)
+
+#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint))
+
+#endif /* ! CONFIG_GENERIC_BUG */
+
+#include <asm-generic/bug.h>
+
+#endif /* ! _ARCH_ARM64_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index d8c25b7..899e9f1 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -21,7 +21,9 @@
 #include <linux/bug.h>
 #include <linux/mmdebug.h>
 
+#include <asm/atomic.h>
 #include <asm/barrier.h>
+#include <asm/lse.h>
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
 {
@@ -29,37 +31,73 @@
 
 	switch (size) {
 	case 1:
-		asm volatile("//	__xchg1\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxrb	%w0, %2\n"
 		"	stlxrb	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpalb	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 2:
-		asm volatile("//	__xchg2\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxrh	%w0, %2\n"
 		"	stlxrh	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpalh	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 4:
-		asm volatile("//	__xchg4\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxr	%w0, %2\n"
 		"	stlxr	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpal	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 8:
-		asm volatile("//	__xchg8\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxr	%0, %2\n"
 		"	stlxr	%w1, %3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpal	%3, %0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
 			: "r" (x)
 			: "memory");
@@ -68,7 +106,6 @@
 		BUILD_BUG();
 	}
 
-	smp_mb();
 	return ret;
 }
 
@@ -83,131 +120,39 @@
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 				      unsigned long new, int size)
 {
-	unsigned long oldval = 0, res;
-
 	switch (size) {
 	case 1:
-		do {
-			asm volatile("// __cmpxchg1\n"
-			"	ldxrb	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrb	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_1(ptr, (u8)old, new);
 	case 2:
-		do {
-			asm volatile("// __cmpxchg2\n"
-			"	ldxrh	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrh	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_2(ptr, (u16)old, new);
 	case 4:
-		do {
-			asm volatile("// __cmpxchg4\n"
-			"	ldxr	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_4(ptr, old, new);
 	case 8:
-		do {
-			asm volatile("// __cmpxchg8\n"
-			"	ldxr	%1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%1, %3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_8(ptr, old, new);
 	default:
 		BUILD_BUG();
 	}
 
-	return oldval;
-}
-
-#define system_has_cmpxchg_double()     1
-
-static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
-		unsigned long old1, unsigned long old2,
-		unsigned long new1, unsigned long new2, int size)
-{
-	unsigned long loop, lost;
-
-	switch (size) {
-	case 8:
-		VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
-		do {
-			asm volatile("// __cmpxchg_double8\n"
-			"	ldxp	%0, %1, %2\n"
-			"	eor	%0, %0, %3\n"
-			"	eor	%1, %1, %4\n"
-			"	orr	%1, %0, %1\n"
-			"	mov	%w0, #0\n"
-			"	cbnz	%1, 1f\n"
-			"	stxp	%w0, %5, %6, %2\n"
-			"1:\n"
-				: "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
-				: "r" (old1), "r"(old2), "r"(new1), "r"(new2));
-		} while (loop);
-		break;
-	default:
-		BUILD_BUG();
-	}
-
-	return !lost;
-}
-
-static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
-			unsigned long old1, unsigned long old2,
-			unsigned long new1, unsigned long new2, int size)
-{
-	int ret;
-
-	smp_mb();
-	ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
-	smp_mb();
-
-	return ret;
+	unreachable();
 }
 
 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 					 unsigned long new, int size)
 {
-	unsigned long ret;
+	switch (size) {
+	case 1:
+		return __cmpxchg_case_mb_1(ptr, (u8)old, new);
+	case 2:
+		return __cmpxchg_case_mb_2(ptr, (u16)old, new);
+	case 4:
+		return __cmpxchg_case_mb_4(ptr, old, new);
+	case 8:
+		return __cmpxchg_case_mb_8(ptr, old, new);
+	default:
+		BUILD_BUG();
+	}
 
-	smp_mb();
-	ret = __cmpxchg(ptr, old, new, size);
-	smp_mb();
-
-	return ret;
+	unreachable();
 }
 
 #define cmpxchg(ptr, o, n) \
@@ -228,21 +173,32 @@
 	__ret; \
 })
 
+#define system_has_cmpxchg_double()     1
+
+#define __cmpxchg_double_check(ptr1, ptr2)					\
+({										\
+	if (sizeof(*(ptr1)) != 8)						\
+		BUILD_BUG();							\
+	VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);	\
+})
+
 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
 ({\
 	int __ret;\
-	__ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
-			(unsigned long)(o2), (unsigned long)(n1), \
-			(unsigned long)(n2), sizeof(*(ptr1)));\
+	__cmpxchg_double_check(ptr1, ptr2); \
+	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
+				     (unsigned long)(n1), (unsigned long)(n2), \
+				     ptr1); \
 	__ret; \
 })
 
 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
 ({\
 	int __ret;\
-	__ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
-			(unsigned long)(o2), (unsigned long)(n1), \
-			(unsigned long)(n2), sizeof(*(ptr1)));\
+	__cmpxchg_double_check(ptr1, ptr2); \
+	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
+				  (unsigned long)(n1), (unsigned long)(n2), \
+				  ptr1); \
 	__ret; \
 })
 
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c104421..1715707 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -25,15 +25,20 @@
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
 #define ARM64_WORKAROUND_845719			2
 #define ARM64_HAS_SYSREG_GIC_CPUIF		3
+#define ARM64_HAS_PAN				4
+#define ARM64_HAS_LSE_ATOMICS			5
 
-#define ARM64_NCAPS				4
+#define ARM64_NCAPS				6
 
 #ifndef __ASSEMBLY__
 
+#include <linux/kernel.h>
+
 struct arm64_cpu_capabilities {
 	const char *desc;
 	u16 capability;
 	bool (*matches)(const struct arm64_cpu_capabilities *);
+	void (*enable)(void);
 	union {
 		struct {	/* To be used for erratum handling only */
 			u32 midr_model;
@@ -41,8 +46,8 @@
 		};
 
 		struct {	/* Feature register checking */
-			u64 register_mask;
-			u64 register_value;
+			int field_pos;
+			int min_field_value;
 		};
 	};
 };
@@ -70,6 +75,13 @@
 		__set_bit(num, cpu_hwcaps);
 }
 
+static inline int __attribute_const__ cpuid_feature_extract_field(u64 features,
+								  int field)
+{
+	return (s64)(features << (64 - 4 - field)) >> (64 - 4);
+}
+
+
 void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 			    const char *info);
 void check_local_cpu_errata(void);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index a84ec60..ee6403d 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -81,9 +81,6 @@
 #define ID_AA64MMFR0_BIGEND(mmfr0)	\
 	(((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
 
-#define SCTLR_EL1_CP15BEN	(0x1 << 5)
-#define SCTLR_EL1_SED		(0x1 << 8)
-
 #ifndef __ASSEMBLY__
 
 /*
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 40ec68a..279c85b5 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,12 @@
 
 #ifdef __KERNEL__
 
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <asm/esr.h>
+#include <asm/insn.h>
+#include <asm/ptrace.h>
+
 /* Low-level stepping controls. */
 #define DBG_MDSCR_SS		(1 << 0)
 #define DBG_SPSR_SS		(1 << 21)
@@ -38,12 +44,7 @@
 /*
  * Break point instruction encoding
  */
-#define BREAK_INSTR_SIZE		4
-
-/*
- * ESR values expected for dynamic and compile time BRK instruction
- */
-#define DBG_ESR_VAL_BRK(x)	(0xf2000000 | ((x) & 0xfffff))
+#define BREAK_INSTR_SIZE		AARCH64_INSN_SIZE
 
 /*
  * #imm16 values used for BRK instruction generation
@@ -51,10 +52,12 @@
  * 0x100: for triggering a fault on purpose (reserved)
  * 0x400: for dynamic BRK instruction
  * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
  */
 #define FAULT_BRK_IMM			0x100
 #define KGDB_DYN_DBG_BRK_IMM		0x400
 #define KGDB_COMPILED_DBG_BRK_IMM	0x401
+#define BUG_BRK_IMM			0x800
 
 /*
  * BRK instruction encoding
@@ -68,25 +71,10 @@
  */
 #define AARCH64_BREAK_FAULT	(AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
 
-/*
- * Extract byte from BRK instruction
- */
-#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
-	((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
-
-/*
- * Extract byte from BRK #imm16
- */
-#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
-	(((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
-
-#define KGDB_DYN_DBG_BRK_BYTE(x) \
-	(KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
-
-#define  KGDB_DYN_BRK_INS_BYTE0  KGDB_DYN_DBG_BRK_BYTE(0)
-#define  KGDB_DYN_BRK_INS_BYTE1  KGDB_DYN_DBG_BRK_BYTE(1)
-#define  KGDB_DYN_BRK_INS_BYTE2  KGDB_DYN_DBG_BRK_BYTE(2)
-#define  KGDB_DYN_BRK_INS_BYTE3  KGDB_DYN_DBG_BRK_BYTE(3)
+#define AARCH64_BREAK_KGDB_DYN_DBG	\
+	(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
+#define KGDB_DYN_BRK_INS_BYTE(x)	\
+	((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
 
 #define CACHE_FLUSH_IS_SAFE		1
 
@@ -127,13 +115,13 @@
 
 u8 debug_monitors_arch(void);
 
-enum debug_el {
+enum dbg_active_el {
 	DBG_ACTIVE_EL0 = 0,
 	DBG_ACTIVE_EL1,
 };
 
-void enable_debug_monitors(enum debug_el el);
-void disable_debug_monitors(enum debug_el el);
+void enable_debug_monitors(enum dbg_active_el el);
+void disable_debug_monitors(enum dbg_active_el el);
 
 void user_rewind_single_step(struct task_struct *task);
 void user_fastforward_single_step(struct task_struct *task);
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7052245..77eeb2c 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,6 +18,8 @@
 #ifndef __ASM_ESR_H
 #define __ASM_ESR_H
 
+#include <asm/memory.h>
+
 #define ESR_ELx_EC_UNKNOWN	(0x00)
 #define ESR_ELx_EC_WFx		(0x01)
 /* Unallocated EC: 0x02 */
@@ -99,6 +101,13 @@
 #define ESR_ELx_WFx_ISS_WFE	(UL(1) << 0)
 #define ESR_ELx_xVC_IMM_MASK	((1UL << 16) - 1)
 
+/* ESR value templates for specific events */
+
+/* BRK instruction trap from AArch64 state */
+#define ESR_ELx_VAL_BRK64(imm)					\
+	((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL |	\
+	 ((imm) & 0xffff))
+
 #ifndef __ASSEMBLY__
 #include <asm/types.h>
 
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 0303705..6cb7e1a 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -18,7 +18,13 @@
 #ifndef __ASM_EXCEPTION_H
 #define __ASM_EXCEPTION_H
 
+#include <linux/ftrace.h>
+
 #define __exception	__attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry	__irq_entry
+#else
 #define __exception_irq_entry	__exception
+#endif
 
 #endif	/* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index c0739187..8b9884c 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -8,7 +8,7 @@
  * Copyright (C) 1998 Ingo Molnar
  * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
  *
- * Adapted from arch/x86_64 version.
+ * Adapted from arch/x86 version.
  *
  */
 
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 74069b3..007a69f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -20,10 +20,17 @@
 
 #include <linux/futex.h>
 #include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/errno.h>
+#include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
 	asm volatile(							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
+"	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
 "2:	stlxr	%w3, %w0, %2\n"						\
@@ -39,6 +46,8 @@
 "	.align	3\n"							\
 "	.quad	1b, 4b, 2b, 4b\n"					\
 "	.popsection\n"							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
 	: "memory")
@@ -112,6 +121,7 @@
 		return -EFAULT;
 
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+"	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
 "	cbnz	%w3, 3f\n"
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 6aae421..2bb7009 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -24,9 +24,7 @@
 
 typedef struct {
 	unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
 	unsigned int ipi_irqs[NR_IPI];
-#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
@@ -34,10 +32,8 @@
 #define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
 #define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)
 
-#ifdef CONFIG_SMP
 u64 smp_irq_stat_cpu(unsigned int cpu);
 #define arch_irq_stat_cpu	smp_irq_stat_cpu
-#endif
 
 #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
 
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2fd9b14..bb4052e 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -13,10 +13,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __ASM_HUGETLB_H
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index b4f6b19..8e24ef3 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_IRQ_WORK_H
 #define __ASM_IRQ_WORK_H
 
-#ifdef CONFIG_SMP
-
 #include <asm/smp.h>
 
 static inline bool arch_irq_work_has_interrupt(void)
@@ -10,13 +8,4 @@
 	return !!__smp_cross_call;
 }
 
-#else
-
-static inline bool arch_irq_work_has_interrupt(void)
-{
-	return false;
-}
-
-#endif
-
 #endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index c0e5165..1b5e0e8 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -26,14 +26,28 @@
 
 #define JUMP_LABEL_NOP_SIZE		AARCH64_INSN_SIZE
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm goto("1: nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".align 3\n\t"
 		 ".quad 1b, %l[l_yes], %c0\n\t"
 		 ".popsection\n\t"
-		 :  :  "i"(key) :  : l_yes);
+		 :  :  "i"(&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm goto("1: b %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 3\n\t"
+		 ".quad 1b, %l[l_yes], %c0\n\t"
+		 ".popsection\n\t"
+		 :  :  "i"(&((char *)key)[branch]) :  : l_yes);
 
 	return false;
 l_yes:
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
new file mode 100644
index 0000000..3de42d6
--- /dev/null
+++ b/arch/arm64/include/asm/lse.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_LSE_H
+#define __ASM_LSE_H
+
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+
+#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+
+#ifdef __ASSEMBLER__
+
+.arch_extension	lse
+
+.macro alt_lse, llsc, lse
+	alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS
+.endm
+
+#else	/* __ASSEMBLER__ */
+
+__asm__(".arch_extension	lse");
+
+/* Move the ll/sc atomics out-of-line */
+#define __LL_SC_INLINE
+#define __LL_SC_PREFIX(x)	__ll_sc_##x
+#define __LL_SC_EXPORT(x)	EXPORT_SYMBOL(__LL_SC_PREFIX(x))
+
+/* Macro for constructing calls to out-of-line ll/sc atomics */
+#define __LL_SC_CALL(op)	"bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+
+/* In-line patching at runtime */
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse)				\
+	ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+
+#endif	/* __ASSEMBLER__ */
+#else	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+
+#ifdef __ASSEMBLER__
+
+.macro alt_lse, llsc, lse
+	\llsc
+.endm
+
+#else	/* __ASSEMBLER__ */
+
+#define __LL_SC_INLINE		static inline
+#define __LL_SC_PREFIX(x)	x
+#define __LL_SC_EXPORT(x)
+
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse)	llsc
+
+#endif	/* __ASSEMBLER__ */
+#endif	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif	/* __ASM_LSE_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f800d45..6b4c3ad 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -81,12 +81,6 @@
 #define __phys_to_virt(x)	((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
 
 /*
- * Convert a physical address to a Page Frame Number and back
- */
-#define	__phys_to_pfn(paddr)	((unsigned long)((paddr) >> PAGE_SHIFT))
-#define	__pfn_to_phys(pfn)	((phys_addr_t)(pfn) << PAGE_SHIFT)
-
-/*
  * Convert a page to/from a physical address
  */
 #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
@@ -114,6 +108,14 @@
 #define PHYS_OFFSET		({ memstart_addr; })
 
 /*
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
+ */
+#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
+
+/*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
  *
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 79fcfb0..0302087 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -28,7 +28,6 @@
 #define ASID(mm)	((mm)->context.id & 0xffff)
 
 extern void paging_init(void);
-extern void setup_mm_for_reboot(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
 extern void init_mem_pgprot(void);
 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 4fde8c1..0a456be 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,8 +16,6 @@
 #ifndef __ASM_PERCPU_H
 #define __ASM_PERCPU_H
 
-#ifdef CONFIG_SMP
-
 static inline void set_my_cpu_offset(unsigned long off)
 {
 	asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -38,12 +36,6 @@
 }
 #define __my_cpu_offset __my_cpu_offset()
 
-#else	/* !CONFIG_SMP */
-
-#define set_my_cpu_offset(x)	do { } while (0)
-
-#endif /* CONFIG_SMP */
-
 #define PERCPU_OP(op, asm_op)						\
 static inline unsigned long __percpu_##op(void *ptr,			\
 			unsigned long val, int size)			\
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 6471773..7bd3cdb 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,7 +17,7 @@
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
-#ifdef CONFIG_HW_PERF_EVENTS
+#ifdef CONFIG_PERF_EVENTS
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 59bfae7..24154b0 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -104,6 +104,7 @@
 #define PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
 #define PTE_AF			(_AT(pteval_t, 1) << 10)	/* Access Flag */
 #define PTE_NG			(_AT(pteval_t, 1) << 11)	/* nG */
+#define PTE_DBM			(_AT(pteval_t, 1) << 51)	/* Dirty Bit Management */
 #define PTE_PXN			(_AT(pteval_t, 1) << 53)	/* Privileged XN */
 #define PTE_UXN			(_AT(pteval_t, 1) << 54)	/* User XN */
 
@@ -168,5 +169,7 @@
 #define TCR_TG1_64K		(UL(3) << 30)
 #define TCR_ASID16		(UL(1) << 36)
 #define TCR_TBI0		(UL(1) << 37)
+#define TCR_HA			(UL(1) << 39)
+#define TCR_HD			(UL(1) << 40)
 
 #endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 56283f8..6900b2d9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -16,6 +16,7 @@
 #ifndef __ASM_PGTABLE_H
 #define __ASM_PGTABLE_H
 
+#include <asm/bug.h>
 #include <asm/proc-fns.h>
 
 #include <asm/memory.h>
@@ -27,7 +28,11 @@
 #define PTE_VALID		(_AT(pteval_t, 1) << 0)
 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
+#ifdef CONFIG_ARM64_HW_AFDBM
+#define PTE_WRITE		(PTE_DBM)		 /* same as DBM */
+#else
 #define PTE_WRITE		(_AT(pteval_t, 1) << 57)
+#endif
 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
 
 /*
@@ -48,18 +53,16 @@
 #define FIRST_USER_ADDRESS	0UL
 
 #ifndef __ASSEMBLY__
+
+#include <linux/mmdebug.h>
+
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
 extern void __pud_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 
-#ifdef CONFIG_SMP
 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#else
-#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF)
-#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF)
-#endif
 
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
@@ -137,12 +140,20 @@
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
-#define pte_dirty(pte)		(!!(pte_val(pte) & PTE_DIRTY))
 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+#define pte_hw_dirty(pte)	(!(pte_val(pte) & PTE_RDONLY))
+#else
+#define pte_hw_dirty(pte)	(0)
+#endif
+#define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
+#define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
+
+#define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
 #define pte_valid_user(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 #define pte_valid_not_user(pte) \
@@ -209,20 +220,49 @@
 	}
 }
 
+struct mm_struct;
+struct vm_area_struct;
+
 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 
+/*
+ * PTE bits configuration in the presence of hardware Dirty Bit Management
+ * (PTE_WRITE == PTE_DBM):
+ *
+ * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
+ *   0      0      |   1           0          0
+ *   0      1      |   1           1          0
+ *   1      0      |   1           0          1
+ *   1      1      |   0           1          x
+ *
+ * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
+ * the page fault mechanism. Checking the dirty status of a pte becomes:
+ *
+ *   PTE_DIRTY || !PTE_RDONLY
+ */
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t pte)
 {
 	if (pte_valid_user(pte)) {
 		if (!pte_special(pte) && pte_exec(pte))
 			__sync_icache_dcache(pte, addr);
-		if (pte_dirty(pte) && pte_write(pte))
+		if (pte_sw_dirty(pte) && pte_write(pte))
 			pte_val(pte) &= ~PTE_RDONLY;
 		else
 			pte_val(pte) |= PTE_RDONLY;
 	}
 
+	/*
+	 * If the existing pte is valid, check for potential race with
+	 * hardware updates of the pte (ptep_set_access_flags safely changes
+	 * valid ptes without going through an invalid entry).
+	 */
+	if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
+	    pte_valid(*ptep)) {
+		BUG_ON(!pte_young(pte));
+		BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
+	}
+
 	set_pte(ptep, pte);
 }
 
@@ -461,6 +501,9 @@
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
 			      PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+	/* preserve the hardware dirty information */
+	if (pte_hw_dirty(pte))
+		newprot |= PTE_DIRTY;
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }
@@ -470,6 +513,101 @@
 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
 }
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+/*
+ * Atomic pte/pmd modifications.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pte_t *ptep)
+{
+	pteval_t pteval;
+	unsigned int tmp, res;
+
+	asm volatile("//	ptep_test_and_clear_young\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	ubfx	%w3, %w0, %5, #1	// extract PTE_AF (young)\n"
+	"	and	%0, %0, %4		// clear PTE_AF\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
+	: "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
+
+	return res;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long address, pte_t *ptep)
+{
+	pteval_t old_pteval;
+	unsigned int tmp;
+
+	asm volatile("//	ptep_get_and_clear\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	stxr	%w1, xzr, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
+
+	return __pte(old_pteval);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+				       unsigned long address, pmd_t *pmdp)
+{
+	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
+ */
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+	pteval_t pteval;
+	unsigned long tmp;
+
+	asm volatile("//	ptep_set_wrprotect\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	tst	%0, %4			// check for hw dirty (!PTE_RDONLY)\n"
+	"	csel	%1, %3, xzr, eq		// set PTE_DIRTY|PTE_RDONLY if dirty\n"
+	"	orr	%0, %0, %1		// if !dirty, PTE_RDONLY is already set\n"
+	"	and	%0, %0, %5		// clear PTE_WRITE/PTE_DBM\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
+	: "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
+	: "cc");
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+#endif
+#endif	/* CONFIG_ARM64_HW_AFDBM */
+
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
 
@@ -505,6 +643,21 @@
 
 #define pgtable_cache_init() do { } while (0)
 
+/*
+ * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+				    unsigned long addr, pte_t *ptep)
+{
+	/*
+	 * set_pte() does not have a DSB for user mappings, so make sure that
+	 * the page table write is visible.
+	 */
+	dsb(ishst);
+}
+
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index e4c893e..98f3235 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -186,4 +186,6 @@
 
 #endif
 
+void cpu_enable_pan(void);
+
 #endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
deleted file mode 100644
index 49d7e1a..0000000
--- a/arch/arm64/include/asm/psci.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2013 ARM Limited
- */
-
-#ifndef __ASM_PSCI_H
-#define __ASM_PSCI_H
-
-int __init psci_dt_init(void);
-
-#ifdef CONFIG_ACPI
-int __init psci_acpi_init(void);
-bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
-#else
-static inline int psci_acpi_init(void) { return 0; }
-static inline bool acpi_psci_present(void) { return false; }
-#endif
-
-#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index d6dd9fd..536274e 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -183,11 +183,7 @@
 
 #define instruction_pointer(regs)	((unsigned long)(regs)->pc)
 
-#ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index db02be8..d9c3d6a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -20,10 +20,6 @@
 #include <linux/cpumask.h>
 #include <linux/thread_info.h>
 
-#ifndef CONFIG_SMP
-# error "<asm/smp.h> included in non-SMP build"
-#endif
-
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 struct seq_file;
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 7abf757..af58dcd 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -56,6 +56,4 @@
 	return -EINVAL;
 }
 
-void __init do_post_cpus_up_work(void);
-
 #endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cee1287..c85e96d 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -16,6 +16,7 @@
 #ifndef __ASM_SPINLOCK_H
 #define __ASM_SPINLOCK_H
 
+#include <asm/lse.h>
 #include <asm/spinlock_types.h>
 #include <asm/processor.h>
 
@@ -38,11 +39,21 @@
 
 	asm volatile(
 	/* Atomically increment the next ticket. */
+	ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 "	prfm	pstl1strm, %3\n"
 "1:	ldaxr	%w0, %3\n"
 "	add	%w1, %w0, %w5\n"
 "	stxr	%w2, %w1, %3\n"
-"	cbnz	%w2, 1b\n"
+"	cbnz	%w2, 1b\n",
+	/* LSE atomics */
+"	mov	%w2, %w5\n"
+"	ldadda	%w2, %w0, %3\n"
+"	nop\n"
+"	nop\n"
+"	nop\n"
+	)
+
 	/* Did we get the lock? */
 "	eor	%w1, %w0, %w0, ror #16\n"
 "	cbz	%w1, 3f\n"
@@ -67,15 +78,25 @@
 	unsigned int tmp;
 	arch_spinlock_t lockval;
 
-	asm volatile(
-"	prfm	pstl1strm, %2\n"
-"1:	ldaxr	%w0, %2\n"
-"	eor	%w1, %w0, %w0, ror #16\n"
-"	cbnz	%w1, 2f\n"
-"	add	%w0, %w0, %3\n"
-"	stxr	%w1, %w0, %2\n"
-"	cbnz	%w1, 1b\n"
-"2:"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldaxr	%w0, %2\n"
+	"	eor	%w1, %w0, %w0, ror #16\n"
+	"	cbnz	%w1, 2f\n"
+	"	add	%w0, %w0, %3\n"
+	"	stxr	%w1, %w0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	ldr	%w0, %2\n"
+	"	eor	%w1, %w0, %w0, ror #16\n"
+	"	cbnz	%w1, 1f\n"
+	"	add	%w1, %w0, %3\n"
+	"	casa	%w0, %w1, %2\n"
+	"	and	%w1, %w1, #0xffff\n"
+	"	eor	%w1, %w1, %w0, lsr #16\n"
+	"1:")
 	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
 	: "I" (1 << TICKET_SHIFT)
 	: "memory");
@@ -85,10 +106,19 @@
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-	asm volatile(
-"	stlrh	%w1, %0\n"
-	: "=Q" (lock->owner)
-	: "r" (lock->owner + 1)
+	unsigned long tmp;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	ldrh	%w1, %0\n"
+	"	add	%w1, %w1, #1\n"
+	"	stlrh	%w1, %0",
+	/* LSE atomics */
+	"	mov	%w1, #1\n"
+	"	nop\n"
+	"	staddlh	%w1, %0")
+	: "=Q" (lock->owner), "=&r" (tmp)
+	:
 	: "memory");
 }
 
@@ -123,13 +153,24 @@
 {
 	unsigned int tmp;
 
-	asm volatile(
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"	sevl\n"
 	"1:	wfe\n"
 	"2:	ldaxr	%w0, %1\n"
 	"	cbnz	%w0, 1b\n"
 	"	stxr	%w0, %w2, %1\n"
 	"	cbnz	%w0, 2b\n"
+	"	nop",
+	/* LSE atomics */
+	"1:	mov	%w0, wzr\n"
+	"2:	casa	%w0, %w2, %1\n"
+	"	cbz	%w0, 3f\n"
+	"	ldxr	%w0, %1\n"
+	"	cbz	%w0, 2b\n"
+	"	wfe\n"
+	"	b	1b\n"
+	"3:")
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
 	: "memory");
@@ -139,11 +180,18 @@
 {
 	unsigned int tmp;
 
-	asm volatile(
-	"	ldaxr	%w0, %1\n"
-	"	cbnz	%w0, 1f\n"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"1:	ldaxr	%w0, %1\n"
+	"	cbnz	%w0, 2f\n"
 	"	stxr	%w0, %w2, %1\n"
-	"1:\n"
+	"	cbnz	%w0, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	mov	%w0, wzr\n"
+	"	casa	%w0, %w2, %1\n"
+	"	nop\n"
+	"	nop")
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
 	: "memory");
@@ -153,9 +201,10 @@
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-	asm volatile(
-	"	stlr	%w1, %0\n"
-	: "=Q" (rw->lock) : "r" (0) : "memory");
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	"	stlr	wzr, %0",
+	"	swpl	wzr, wzr, %0")
+	: "=Q" (rw->lock) :: "memory");
 }
 
 /* write_can_lock - would write_trylock() succeed? */
@@ -172,6 +221,10 @@
  *
  * The memory barriers are implicit with the load-acquire and store-release
  * instructions.
+ *
+ * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
+ * and LSE implementations may exhibit different behaviour (although this
+ * will have no effect on lockdep).
  */
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
@@ -179,26 +232,43 @@
 
 	asm volatile(
 	"	sevl\n"
+	ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"1:	wfe\n"
 	"2:	ldaxr	%w0, %2\n"
 	"	add	%w0, %w0, #1\n"
 	"	tbnz	%w0, #31, 1b\n"
 	"	stxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 2b\n"
+	"	nop\n"
+	"	cbnz	%w1, 2b",
+	/* LSE atomics */
+	"1:	wfe\n"
+	"2:	ldxr	%w0, %2\n"
+	"	adds	%w1, %w0, #1\n"
+	"	tbnz	%w1, #31, 1b\n"
+	"	casa	%w0, %w1, %2\n"
+	"	sbc	%w0, %w1, %w0\n"
+	"	cbnz	%w0, 2b")
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "memory");
+	: "cc", "memory");
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
 	unsigned int tmp, tmp2;
 
-	asm volatile(
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"1:	ldxr	%w0, %2\n"
 	"	sub	%w0, %w0, #1\n"
 	"	stlxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 1b\n"
+	"	cbnz	%w1, 1b",
+	/* LSE atomics */
+	"	movn	%w0, #0\n"
+	"	nop\n"
+	"	nop\n"
+	"	staddl	%w0, %2")
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
 	: "memory");
@@ -206,17 +276,28 @@
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-	unsigned int tmp, tmp2 = 1;
+	unsigned int tmp, tmp2;
 
-	asm volatile(
-	"	ldaxr	%w0, %2\n"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	mov	%w1, #1\n"
+	"1:	ldaxr	%w0, %2\n"
 	"	add	%w0, %w0, #1\n"
-	"	tbnz	%w0, #31, 1f\n"
+	"	tbnz	%w0, #31, 2f\n"
 	"	stxr	%w1, %w0, %2\n"
-	"1:\n"
-	: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+	"	cbnz	%w1, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	ldr	%w0, %2\n"
+	"	adds	%w1, %w0, #1\n"
+	"	tbnz	%w1, #31, 1f\n"
+	"	casa	%w0, %w1, %2\n"
+	"	sbc	%w1, %w1, %w0\n"
+	"	nop\n"
+	"1:")
+	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "memory");
+	: "cc", "memory");
 
 	return !tmp2;
 }
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index b8d3836..55be59a 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,6 +20,8 @@
 # error "please don't include this file directly"
 #endif
 
+#include <linux/types.h>
+
 #define TICKET_SHIFT	16
 
 typedef struct {
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 5c89df0..a7f3d4b 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,8 +20,29 @@
 #ifndef __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 
+#include <asm/opcodes.h>
+
+#define SCTLR_EL1_CP15BEN	(0x1 << 5)
+#define SCTLR_EL1_SED		(0x1 << 8)
+
+/*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+ *  C5.2, version:ARM DDI 0487A.f)
+ *	[20-19] : Op0
+ *	[18-16] : Op1
+ *	[15-12] : CRn
+ *	[11-8]  : CRm
+ *	[7-5]   : Op2
+ */
 #define sys_reg(op0, op1, crn, crm, op2) \
-	((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+	((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#define REG_PSTATE_PAN_IMM                     sys_reg(0, 0, 4, 0, 4)
+#define SCTLR_EL1_SPAN                         (1 << 23)
+
+#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
+				     (!!x)<<8 | 0x1f)
 
 #ifdef __ASSEMBLY__
 
@@ -31,11 +52,11 @@
 	.equ	__reg_num_xzr, 31
 
 	.macro	mrs_s, rt, sreg
-	.inst	0xd5300000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5200000|(\sreg)|(__reg_num_\rt)
 	.endm
 
 	.macro	msr_s, sreg, rt
-	.inst	0xd5100000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5000000|(\sreg)|(__reg_num_\rt)
 	.endm
 
 #else
@@ -47,14 +68,23 @@
 "	.equ	__reg_num_xzr, 31\n"
 "\n"
 "	.macro	mrs_s, rt, sreg\n"
-"	.inst	0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
 "	.endm\n"
 "\n"
 "	.macro	msr_s, sreg, rt\n"
-"	.inst	0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
 "	.endm\n"
 );
 
+static inline void config_sctlr_el1(u32 clear, u32 set)
+{
+	u32 val;
+
+	asm volatile("mrs %0, sctlr_el1" : "=r" (val));
+	val &= ~clear;
+	val |= set;
+	asm volatile("msr sctlr_el1, %0" : : "r" (val));
+}
 #endif
 
 #endif	/* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 3a0242c..d6e6b66 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -41,7 +41,12 @@
 		flush_tlb_mm(tlb->mm);
 	} else {
 		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
-		flush_tlb_range(&vma, tlb->start, tlb->end);
+		/*
+		 * The intermediate page table levels are already handled by
+		 * the __(pte|pmd|pud)_free_tlb() functions, so last level
+		 * TLBI is sufficient here.
+		 */
+		__flush_tlb_range(&vma, tlb->start, tlb->end, true);
 	}
 }
 
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 934815d..7bd2da0 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -87,58 +87,64 @@
 		((unsigned long)ASID(vma->vm_mm) << 48);
 
 	dsb(ishst);
-	asm("tlbi	vae1is, %0" : : "r" (addr));
+	asm("tlbi	vale1is, %0" : : "r" (addr));
 	dsb(ish);
 }
 
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
-				     unsigned long start, unsigned long end)
-{
-	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
-	unsigned long addr;
-	start = asid | (start >> 12);
-	end = asid | (end >> 12);
-
-	dsb(ishst);
-	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-		asm("tlbi vae1is, %0" : : "r"(addr));
-	dsb(ish);
-}
-
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
-	unsigned long addr;
-	start >>= 12;
-	end >>= 12;
-
-	dsb(ishst);
-	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-		asm("tlbi vaae1is, %0" : : "r"(addr));
-	dsb(ish);
-	isb();
-}
-
 /*
  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
  * necessarily a performance improvement.
  */
 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
 
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end,
+				     bool last_level)
+{
+	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
+	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
+		flush_tlb_mm(vma->vm_mm);
+		return;
+	}
+
+	start = asid | (start >> 12);
+	end = asid | (end >> 12);
+
+	dsb(ishst);
+	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+		if (last_level)
+			asm("tlbi vale1is, %0" : : "r"(addr));
+		else
+			asm("tlbi vae1is, %0" : : "r"(addr));
+	}
+	dsb(ish);
+}
+
 static inline void flush_tlb_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end)
 {
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_range(vma, start, end);
-	else
-		flush_tlb_mm(vma->vm_mm);
+	__flush_tlb_range(vma, start, end, false);
 }
 
 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_kernel_range(start, end);
-	else
+	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
 		flush_tlb_all();
+		return;
+	}
+
+	start >>= 12;
+	end >>= 12;
+
+	dsb(ishst);
+	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+		asm("tlbi vaae1is, %0" : : "r"(addr));
+	dsb(ish);
+	isb();
 }
 
 /*
@@ -154,20 +160,6 @@
 	asm("tlbi	vae1is, %0" : : "r" (addr));
 	dsb(ish);
 }
-/*
- * On AArch64, the cache coherency is handled via the set_pte_at() function.
- */
-static inline void update_mmu_cache(struct vm_area_struct *vma,
-				    unsigned long addr, pte_t *ptep)
-{
-	/*
-	 * set_pte() does not have a DSB for user mappings, so make sure that
-	 * the page table write is visible.
-	 */
-	dsb(ishst);
-}
-
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
 #endif
 
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 225ec35..a3e9d6f 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_TOPOLOGY_H
 #define __ASM_TOPOLOGY_H
 
-#ifdef CONFIG_SMP
-
 #include <linux/cpumask.h>
 
 struct cpu_topology {
@@ -24,13 +22,6 @@
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 
-#else
-
-static inline void init_cpu_topology(void) { }
-static inline void store_cpu_topology(unsigned int cpuid) { }
-
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 232e4ba..0cc2f29 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -34,13 +34,32 @@
 void register_undef_hook(struct undef_hook *hook);
 void unregister_undef_hook(struct undef_hook *hook);
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+	extern char __irqentry_text_start[];
+	extern char __irqentry_text_end[];
+
+	return ptr >= (unsigned long)&__irqentry_text_start &&
+	       ptr < (unsigned long)&__irqentry_text_end;
+}
+#else
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+	return 0;
+}
+#endif
+
 static inline int in_exception_text(unsigned long ptr)
 {
 	extern char __exception_text_start[];
 	extern char __exception_text_end[];
+	int in;
 
-	return ptr >= (unsigned long)&__exception_text_start &&
-	       ptr < (unsigned long)&__exception_text_end;
+	in = ptr >= (unsigned long)&__exception_text_start &&
+	     ptr < (unsigned long)&__exception_text_end;
+
+	return in ? : __in_irqentry_text(ptr);
 }
 
 #endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 07e1ba44..b2ede967 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,7 +24,10 @@
 #include <linux/string.h>
 #include <linux/thread_info.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/ptrace.h>
+#include <asm/sysreg.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
@@ -131,6 +134,8 @@
 do {									\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));	\
@@ -148,6 +153,8 @@
 		BUILD_BUG();						\
 	}								\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __get_user(x, ptr)						\
@@ -194,6 +201,8 @@
 do {									\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__put_user_asm("strb", "%w", __pu_val, (ptr), (err));	\
@@ -210,6 +219,8 @@
 	default:							\
 		BUILD_BUG();						\
 	}								\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __put_user(x, ptr)						\
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h
index 8655321..4318866 100644
--- a/arch/arm64/include/asm/xen/events.h
+++ b/arch/arm64/include/asm/xen/events.h
@@ -18,4 +18,10 @@
 
 #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
 
+/* Rebind event channel is supported by default */
+static inline bool xen_support_evtchn_rebind(void)
+{
+	return true;
+}
+
 #endif /* _ASM_ARM64_XEN_EVENTS_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 73cf0f5..361c8a8 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -27,5 +27,6 @@
 #define HWCAP_SHA1		(1 << 5)
 #define HWCAP_SHA2		(1 << 6)
 #define HWCAP_CRC32		(1 << 7)
+#define HWCAP_ATOMICS		(1 << 8)
 
 #endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 6913643..208db3d 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -44,6 +44,7 @@
 #define PSR_I_BIT	0x00000080
 #define PSR_A_BIT	0x00000100
 #define PSR_D_BIT	0x00000200
+#define PSR_PAN_BIT	0x00400000
 #define PSR_Q_BIT	0x08000000
 #define PSR_V_BIT	0x10000000
 #define PSR_C_BIT	0x20000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 426d076..22dc9bc 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,15 +17,15 @@
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
 			   hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o	\
 			   return_address.o cpuinfo.o cpu_errata.o		\
-			   cpufeature.o alternative.o cacheinfo.o
+			   cpufeature.o alternative.o cacheinfo.o		\
+			   smp.o smp_spin_table.o topology.o
 
 arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
 					   sys_compat.o entry32.o		\
 					   ../../arm/kernel/opcodes.o
 arm64-obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o entry-ftrace.o
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP)			+= smp.o smp_spin_table.o topology.o
-arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o
+arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
 arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
 arm64-obj-$(CONFIG_CPU_PM)		+= sleep.o suspend.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 221b983..ab9db0e 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -85,7 +85,7 @@
 	return insn;
 }
 
-static int __apply_alternatives(void *alt_region)
+static void __apply_alternatives(void *alt_region)
 {
 	struct alt_instr *alt;
 	struct alt_region *region = alt_region;
@@ -114,19 +114,39 @@
 		flush_icache_range((uintptr_t)origptr,
 				   (uintptr_t)(origptr + nr_inst));
 	}
-
-	return 0;
 }
 
-void apply_alternatives_all(void)
+/*
+ * We might be patching the stop_machine state machine, so implement a
+ * really simple polling protocol here.
+ */
+static int __apply_alternatives_multi_stop(void *unused)
 {
+	static int patched = 0;
 	struct alt_region region = {
 		.begin	= __alt_instructions,
 		.end	= __alt_instructions_end,
 	};
 
+	/* We always have a CPU 0 at this point (__init) */
+	if (smp_processor_id()) {
+		while (!READ_ONCE(patched))
+			cpu_relax();
+		isb();
+	} else {
+		BUG_ON(patched);
+		__apply_alternatives(&region);
+		/* Barriers provided by the cache flushing */
+		WRITE_ONCE(patched, 1);
+	}
+
+	return 0;
+}
+
+void __init apply_alternatives_all(void)
+{
 	/* better not try code patching on a live SMP system */
-	stop_machine(__apply_alternatives, &region, NULL);
+	stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 }
 
 void apply_alternatives(void *start, size_t length)
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 7922c2e..bcee7ab 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,8 +14,11 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/insn.h>
 #include <asm/opcodes.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
@@ -279,6 +282,8 @@
  */
 #define __user_swpX_asm(data, addr, res, temp, B)		\
 	__asm__ __volatile__(					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+		    CONFIG_ARM64_PAN)				\
 	"	mov		%w2, %w1\n"			\
 	"0:	ldxr"B"		%w1, [%3]\n"			\
 	"1:	stxr"B"		%w0, %w2, [%3]\n"		\
@@ -294,7 +299,9 @@
 	"	.align		3\n"				\
 	"	.quad		0b, 3b\n"			\
 	"	.quad		1b, 3b\n"			\
-	"	.popsection"					\
+	"	.popsection\n"					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+		CONFIG_ARM64_PAN)				\
 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
 	: "memory")
@@ -504,16 +511,6 @@
 	return 0;
 }
 
-static inline void config_sctlr_el1(u32 clear, u32 set)
-{
-	u32 val;
-
-	asm volatile("mrs %0, sctlr_el1" : "=r" (val));
-	val &= ~clear;
-	val |= set;
-	asm volatile("msr sctlr_el1, %0" : : "r" (val));
-}
-
 static int cp15_barrier_set_hw_mode(bool enable)
 {
 	if (enable)
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index 5ea337d..b6bd7d4 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -30,9 +30,7 @@
 const struct cpu_operations *cpu_ops[NR_CPUS];
 
 static const struct cpu_operations *supported_cpu_ops[] __initconst = {
-#ifdef CONFIG_SMP
 	&smp_spin_table_ops,
-#endif
 	&cpu_psci_ops,
 	NULL,
 };
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5ad86ce..3c9aed3 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -21,24 +21,57 @@
 #include <linux/types.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
+#include <asm/processor.h>
 
 static bool
-has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
+feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
 {
-	u64 val;
+	int val = cpuid_feature_extract_field(reg, entry->field_pos);
 
-	val = read_cpuid(id_aa64pfr0_el1);
-	return (val & entry->register_mask) == entry->register_value;
+	return val >= entry->min_field_value;
 }
 
+#define __ID_FEAT_CHK(reg)						\
+static bool __maybe_unused						\
+has_##reg##_feature(const struct arm64_cpu_capabilities *entry)		\
+{									\
+	u64 val;							\
+									\
+	val = read_cpuid(reg##_el1);					\
+	return feature_matches(val, entry);				\
+}
+
+__ID_FEAT_CHK(id_aa64pfr0);
+__ID_FEAT_CHK(id_aa64mmfr1);
+__ID_FEAT_CHK(id_aa64isar0);
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
 		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
 		.matches = has_id_aa64pfr0_feature,
-		.register_mask = (0xf << 24),
-		.register_value = (1 << 24),
+		.field_pos = 24,
+		.min_field_value = 1,
 	},
+#ifdef CONFIG_ARM64_PAN
+	{
+		.desc = "Privileged Access Never",
+		.capability = ARM64_HAS_PAN,
+		.matches = has_id_aa64mmfr1_feature,
+		.field_pos = 20,
+		.min_field_value = 1,
+		.enable = cpu_enable_pan,
+	},
+#endif /* CONFIG_ARM64_PAN */
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+	{
+		.desc = "LSE atomic instructions",
+		.capability = ARM64_HAS_LSE_ATOMICS,
+		.matches = has_id_aa64isar0_feature,
+		.field_pos = 20,
+		.min_field_value = 2,
+	},
+#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
 	{},
 };
 
@@ -55,9 +88,15 @@
 			pr_info("%s %s\n", info, caps[i].desc);
 		cpus_set_cap(caps[i].capability);
 	}
+
+	/* second pass allows enable() to consider interacting capabilities */
+	for (i = 0; caps[i].desc; i++) {
+		if (cpus_have_cap(caps[i].capability) && caps[i].enable)
+			caps[i].enable();
+	}
 }
 
 void check_local_cpu_features(void)
 {
-	check_cpu_capabilities(arm64_features, "detected feature");
+	check_cpu_capabilities(arm64_features, "detected feature:");
 }
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index b056369..9b3b62a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -82,7 +82,7 @@
 static DEFINE_PER_CPU(int, mde_ref_count);
 static DEFINE_PER_CPU(int, kde_ref_count);
 
-void enable_debug_monitors(enum debug_el el)
+void enable_debug_monitors(enum dbg_active_el el)
 {
 	u32 mdscr, enable = 0;
 
@@ -102,7 +102,7 @@
 	}
 }
 
-void disable_debug_monitors(enum debug_el el)
+void disable_debug_monitors(enum dbg_active_el el)
 {
 	u32 mdscr, disable = 0;
 
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index f537406..816120e 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -13,7 +13,7 @@
 #include <asm/efi.h>
 #include <asm/sections.h>
 
-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
+efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
 					unsigned long *image_addr,
 					unsigned long *image_size,
 					unsigned long *reserve_addr,
@@ -23,21 +23,44 @@
 {
 	efi_status_t status;
 	unsigned long kernel_size, kernel_memsize = 0;
+	unsigned long nr_pages;
+	void *old_image_addr = (void *)*image_addr;
 
 	/* Relocate the image, if required. */
 	kernel_size = _edata - _text;
 	if (*image_addr != (dram_base + TEXT_OFFSET)) {
 		kernel_memsize = kernel_size + (_end - _edata);
-		status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
-				       SZ_2M, reserve_addr);
+
+		/*
+		 * First, try a straight allocation at the preferred offset.
+		 * This will work around the issue where, if dram_base == 0x0,
+		 * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
+		 * address of the allocation to be mistaken for a FAIL return
+		 * value or a NULL pointer). It will also ensure that, on
+		 * platforms where the [dram_base, dram_base + TEXT_OFFSET)
+		 * interval is partially occupied by the firmware (like on APM
+		 * Mustang), we can still place the kernel at the address
+		 * 'dram_base + TEXT_OFFSET'.
+		 */
+		*image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
+		nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
+			   EFI_PAGE_SIZE;
+		status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+					EFI_LOADER_DATA, nr_pages,
+					(efi_physical_addr_t *)reserve_addr);
 		if (status != EFI_SUCCESS) {
-			pr_efi_err(sys_table, "Failed to relocate kernel\n");
-			return status;
+			kernel_memsize += TEXT_OFFSET;
+			status = efi_low_alloc(sys_table_arg, kernel_memsize,
+					       SZ_2M, reserve_addr);
+
+			if (status != EFI_SUCCESS) {
+				pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+				return status;
+			}
+			*image_addr = *reserve_addr + TEXT_OFFSET;
 		}
-		memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
-		       kernel_size);
-		*image_addr = *reserve_addr + TEXT_OFFSET;
-		*reserve_size = kernel_memsize + TEXT_OFFSET;
+		memcpy((void *)*image_addr, old_image_addr, kernel_size);
+		*reserve_size = kernel_memsize;
 	}
 
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e163518..4306c93 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -116,41 +116,34 @@
 	*/
 	.endm
 
-	.macro	kernel_exit, el, ret = 0
+	.macro	kernel_exit, el
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
 	ct_user_enter
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
-
 #ifdef CONFIG_ARM64_ERRATUM_845719
-
-#undef SEQUENCE_ORG
-#undef SEQUENCE_ALT
-
+alternative_if_not ARM64_WORKAROUND_845719
+	nop
+	nop
 #ifdef CONFIG_PID_IN_CONTEXTIDR
-
-#define SEQUENCE_ORG	"nop ; nop ; nop"
-#define SEQUENCE_ALT	"tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
-
-#else
-
-#define SEQUENCE_ORG	"nop ; nop"
-#define SEQUENCE_ALT	"tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
-
+	nop
 #endif
-
-	alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
-
+alternative_else
+	tbz	x22, #4, 1f
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+	mrs	x29, contextidr_el1
+	msr	contextidr_el1, x29
+#else
+	msr contextidr_el1, xzr
+#endif
+1:
+alternative_endif
 #endif
 	.endif
 	msr	elr_el1, x21			// set up the return data
 	msr	spsr_el1, x22
-	.if	\ret
-	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
-	.else
 	ldp	x0, x1, [sp, #16 * 0]
-	.endif
 	ldp	x2, x3, [sp, #16 * 1]
 	ldp	x4, x5, [sp, #16 * 2]
 	ldp	x6, x7, [sp, #16 * 3]
@@ -613,22 +606,21 @@
  */
 ret_fast_syscall:
 	disable_irq				// disable interrupts
+	str	x0, [sp, #S_X0]			// returned x0
 	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
 	and	x2, x1, #_TIF_SYSCALL_WORK
 	cbnz	x2, ret_fast_syscall_trace
 	and	x2, x1, #_TIF_WORK_MASK
-	cbnz	x2, fast_work_pending
+	cbnz	x2, work_pending
 	enable_step_tsk x1, x2
-	kernel_exit 0, ret = 1
+	kernel_exit 0
 ret_fast_syscall_trace:
 	enable_irq				// enable interrupts
-	b	__sys_trace_return
+	b	__sys_trace_return_skipped	// we already saved x0
 
 /*
  * Ok, we need to do extra processing, enter the slow path.
  */
-fast_work_pending:
-	str	x0, [sp, #S_X0]			// returned x0
 work_pending:
 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
 	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
@@ -652,7 +644,7 @@
 	cbnz	x2, work_pending
 	enable_step_tsk x1, x2
 no_work_pending:
-	kernel_exit 0, ret = 0
+	kernel_exit 0
 ENDPROC(ret_to_user)
 
 /*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 44d6f75..c56956a1 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -158,6 +158,7 @@
 void fpsimd_flush_thread(void)
 {
 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+	fpsimd_flush_task_state(current);
 	set_thread_flag(TIF_FOREIGN_FPSTATE);
 }
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c0ff3ce..a055be6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -62,13 +62,8 @@
 /*
  * Initial memory map attributes.
  */
-#ifndef CONFIG_SMP
-#define PTE_FLAGS	PTE_TYPE_PAGE | PTE_AF
-#define PMD_FLAGS	PMD_TYPE_SECT | PMD_SECT_AF
-#else
 #define PTE_FLAGS	PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
 #define PMD_FLAGS	PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
-#endif
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define MM_MMUFLAGS	PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
@@ -574,7 +569,6 @@
 	.long	BOOT_CPU_MODE_EL1
 	.popsection
 
-#ifdef CONFIG_SMP
 	/*
 	 * This provides a "holding pen" for platforms to hold all secondary
 	 * cores are held until we're ready for them to initialise.
@@ -622,7 +616,6 @@
 	mov	x29, #0
 	b	secondary_start_kernel
 ENDPROC(__secondary_switched)
-#endif	/* CONFIG_SMP */
 
 /*
  * Enable the MMU.
@@ -641,5 +634,13 @@
 	isb
 	msr	sctlr_el1, x0
 	isb
+	/*
+	 * Invalidate the local I-cache so that any instructions fetched
+	 * speculatively from the PoC are discarded, since they may have
+	 * been dynamically patched at the PoU.
+	 */
+	ic	iallu
+	dsb	nsh
+	isb
 	br	x27
 ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 7a1a5da..003bc3d 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -156,7 +156,7 @@
  * Convert a breakpoint privilege level to the corresponding exception
  * level.
  */
-static enum debug_el debug_exception_level(int privilege)
+static enum dbg_active_el debug_exception_level(int privilege)
 {
 	switch (privilege) {
 	case AARCH64_BREAKPOINT_EL0:
@@ -230,7 +230,7 @@
 	struct perf_event **slots;
 	struct debug_info *debug_info = &current->thread.debug;
 	int i, max_slots, ctrl_reg, val_reg, reg_enable;
-	enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
+	enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
 	u32 ctrl;
 
 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -537,7 +537,7 @@
  * exception level at the register level.
  * This is used when single-stepping after a breakpoint exception.
  */
-static void toggle_bp_registers(int reg, enum debug_el el, int enable)
+static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
 {
 	int i, max_slots, privilege;
 	u32 ctrl;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index dd9671c..f341866 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -101,9 +101,8 @@
 		return addr;
 
 	BUG_ON(!page);
-	set_fixmap(fixmap, page_to_phys(page));
-
-	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+			(uintaddr & ~PAGE_MASK));
 }
 
 static void __kprobes patch_unmap(int fixmap)
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 463fa2e..11dc3fd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -33,9 +33,7 @@
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
-#ifdef CONFIG_SMP
 	show_ipi_list(p, prec);
-#endif
 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
 	return 0;
 }
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 4f1fec7..c2dd1ad 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -28,7 +28,7 @@
 	void *addr = (void *)entry->code;
 	u32 insn;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		insn = aarch64_insn_gen_branch_imm(entry->code,
 						   entry->target,
 						   AARCH64_INSN_BRANCH_NOLINK);
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index a0d10c5..bcac81e 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -235,13 +235,13 @@
 
 static struct break_hook kgdb_brkpt_hook = {
 	.esr_mask	= 0xffffffff,
-	.esr_val	= DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
+	.esr_val	= (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
 	.fn		= kgdb_brk_fn
 };
 
 static struct break_hook kgdb_compiled_brkpt_hook = {
 	.esr_mask	= 0xffffffff,
-	.esr_val	= DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
+	.esr_val	= (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
 	.fn		= kgdb_compiled_brk_fn
 };
 
@@ -328,9 +328,9 @@
  */
 struct kgdb_arch arch_kgdb_ops = {
 	.gdb_bpt_instr = {
-		KGDB_DYN_BRK_INS_BYTE0,
-		KGDB_DYN_BRK_INS_BYTE1,
-		KGDB_DYN_BRK_INS_BYTE2,
-		KGDB_DYN_BRK_INS_BYTE3,
+		KGDB_DYN_BRK_INS_BYTE(0),
+		KGDB_DYN_BRK_INS_BYTE(1),
+		KGDB_DYN_BRK_INS_BYTE(2),
+		KGDB_DYN_BRK_INS_BYTE(3),
 	}
 };
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 4095379..b3d098b 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -38,6 +38,19 @@
 	return res->start;
 }
 
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ * @mask: bitmask of BARs to enable
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	if (pci_has_flag(PCI_PROBE_ONLY))
+		return 0;
+
+	return pci_enable_resources(dev, mask);
+}
+
 /*
  * Try to assign the IRQ number from DT when adding a new device
  */
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
new file mode 100644
index 0000000..3aa7483
--- /dev/null
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -0,0 +1,196 @@
+/*
+ * arm64 callchain support
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+struct frame_tail {
+	struct frame_tail	__user *fp;
+	unsigned long		lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+	       struct perf_callchain_entry *entry)
+{
+	struct frame_tail buftail;
+	unsigned long err;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+
+	pagefault_disable();
+	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+	pagefault_enable();
+
+	if (err)
+		return NULL;
+
+	perf_callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail >= buftail.fp)
+		return NULL;
+
+	return buftail.fp;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
+	u32		sp;
+	u32		lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+		      struct perf_callchain_entry *entry)
+{
+	struct compat_frame_tail buftail;
+	unsigned long err;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+
+	pagefault_disable();
+	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+	pagefault_enable();
+
+	if (err)
+		return NULL;
+
+	perf_callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail + 1 >= (struct compat_frame_tail __user *)
+			compat_ptr(buftail.fp))
+		return NULL;
+
+	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
+void perf_callchain_user(struct perf_callchain_entry *entry,
+			 struct pt_regs *regs)
+{
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	perf_callchain_store(entry, regs->pc);
+
+	if (!compat_user_mode(regs)) {
+		/* AARCH64 mode */
+		struct frame_tail __user *tail;
+
+		tail = (struct frame_tail __user *)regs->regs[29];
+
+		while (entry->nr < PERF_MAX_STACK_DEPTH &&
+		       tail && !((unsigned long)tail & 0xf))
+			tail = user_backtrace(tail, entry);
+	} else {
+#ifdef CONFIG_COMPAT
+		/* AARCH32 compat mode */
+		struct compat_frame_tail __user *tail;
+
+		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+			tail && !((unsigned long)tail & 0x3))
+			tail = compat_user_backtrace(tail, entry);
+#endif
+	}
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+	struct perf_callchain_entry *entry = data;
+	perf_callchain_store(entry, frame->pc);
+	return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+			   struct pt_regs *regs)
+{
+	struct stackframe frame;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	frame.fp = regs->regs[29];
+	frame.sp = regs->sp;
+	frame.pc = regs->pc;
+
+	walk_stackframe(&frame, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+		return perf_guest_cbs->get_guest_ip();
+
+	return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+	int misc = 0;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		if (perf_guest_cbs->is_user_mode())
+			misc |= PERF_RECORD_MISC_GUEST_USER;
+		else
+			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+	} else {
+		if (user_mode(regs))
+			misc |= PERF_RECORD_MISC_USER;
+		else
+			misc |= PERF_RECORD_MISC_KERNEL;
+	}
+
+	return misc;
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index b31e9a4..f9a74d4 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,7 +25,7 @@
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/perf_event.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -36,7 +36,6 @@
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
 #include <asm/pmu.h>
-#include <asm/stacktrace.h>
 
 /*
  * ARMv8 supports a maximum of 32 events.
@@ -78,6 +77,16 @@
 
 #define CACHE_OP_UNSUPPORTED		0xFFFF
 
+#define PERF_MAP_ALL_UNSUPPORTED					\
+	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
+[0 ... C(MAX) - 1] = {							\
+	[0 ... C(OP_MAX) - 1] = {					\
+		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
+	},								\
+}
+
 static int
 armpmu_map_cache_event(const unsigned (*cache_map)
 				      [PERF_COUNT_HW_CACHE_MAX]
@@ -435,10 +444,8 @@
 	unsigned int i, irqs;
 	struct platform_device *pmu_device = armpmu->plat_device;
 
-	if (!pmu_device) {
-		pr_err("no PMU device registered\n");
+	if (!pmu_device)
 		return -ENODEV;
-	}
 
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 	if (!irqs) {
@@ -703,118 +710,28 @@
 
 /* PMUv3 HW events mapping. */
 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= HW_OP_UNSUPPORTED,
 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 						[PERF_COUNT_HW_CACHE_OP_MAX]
 						[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-	[C(L1D)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(L1I)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(LL)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(DTLB)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(ITLB)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(BPU)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(NODE)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
+	PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
 };
 
 /*
@@ -1337,7 +1254,7 @@
 		}
 
 		for_each_possible_cpu(cpu)
-			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+			if (dn == of_cpu_device_node_get(cpu))
 				break;
 
 		if (cpu >= nr_cpu_ids) {
@@ -1415,180 +1332,3 @@
 }
 early_initcall(init_hw_perf_events);
 
-/*
- * Callchain handling code.
- */
-struct frame_tail {
-	struct frame_tail	__user *fp;
-	unsigned long		lr;
-} __attribute__((packed));
-
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static struct frame_tail __user *
-user_backtrace(struct frame_tail __user *tail,
-	       struct perf_callchain_entry *entry)
-{
-	struct frame_tail buftail;
-	unsigned long err;
-
-	/* Also check accessibility of one struct frame_tail beyond */
-	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
-		return NULL;
-
-	pagefault_disable();
-	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
-	pagefault_enable();
-
-	if (err)
-		return NULL;
-
-	perf_callchain_store(entry, buftail.lr);
-
-	/*
-	 * Frame pointers should strictly progress back up the stack
-	 * (towards higher addresses).
-	 */
-	if (tail >= buftail.fp)
-		return NULL;
-
-	return buftail.fp;
-}
-
-#ifdef CONFIG_COMPAT
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct compat_frame_tail *)(xxx->fp)-1
- *
- * This code has been adapted from the ARM OProfile support.
- */
-struct compat_frame_tail {
-	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
-	u32		sp;
-	u32		lr;
-} __attribute__((packed));
-
-static struct compat_frame_tail __user *
-compat_user_backtrace(struct compat_frame_tail __user *tail,
-		      struct perf_callchain_entry *entry)
-{
-	struct compat_frame_tail buftail;
-	unsigned long err;
-
-	/* Also check accessibility of one struct frame_tail beyond */
-	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
-		return NULL;
-
-	pagefault_disable();
-	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
-	pagefault_enable();
-
-	if (err)
-		return NULL;
-
-	perf_callchain_store(entry, buftail.lr);
-
-	/*
-	 * Frame pointers should strictly progress back up the stack
-	 * (towards higher addresses).
-	 */
-	if (tail + 1 >= (struct compat_frame_tail __user *)
-			compat_ptr(buftail.fp))
-		return NULL;
-
-	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
-}
-#endif /* CONFIG_COMPAT */
-
-void perf_callchain_user(struct perf_callchain_entry *entry,
-			 struct pt_regs *regs)
-{
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		/* We don't support guest os callchain now */
-		return;
-	}
-
-	perf_callchain_store(entry, regs->pc);
-
-	if (!compat_user_mode(regs)) {
-		/* AARCH64 mode */
-		struct frame_tail __user *tail;
-
-		tail = (struct frame_tail __user *)regs->regs[29];
-
-		while (entry->nr < PERF_MAX_STACK_DEPTH &&
-		       tail && !((unsigned long)tail & 0xf))
-			tail = user_backtrace(tail, entry);
-	} else {
-#ifdef CONFIG_COMPAT
-		/* AARCH32 compat mode */
-		struct compat_frame_tail __user *tail;
-
-		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
-
-		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
-			tail && !((unsigned long)tail & 0x3))
-			tail = compat_user_backtrace(tail, entry);
-#endif
-	}
-}
-
-/*
- * Gets called by walk_stackframe() for every stackframe. This will be called
- * whist unwinding the stackframe and is like a subroutine return so we use
- * the PC.
- */
-static int callchain_trace(struct stackframe *frame, void *data)
-{
-	struct perf_callchain_entry *entry = data;
-	perf_callchain_store(entry, frame->pc);
-	return 0;
-}
-
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
-			   struct pt_regs *regs)
-{
-	struct stackframe frame;
-
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		/* We don't support guest os callchain now */
-		return;
-	}
-
-	frame.fp = regs->regs[29];
-	frame.sp = regs->sp;
-	frame.pc = regs->pc;
-
-	walk_stackframe(&frame, callchain_trace, entry);
-}
-
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
-{
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
-		return perf_guest_cbs->get_guest_ip();
-
-	return instruction_pointer(regs);
-}
-
-unsigned long perf_misc_flags(struct pt_regs *regs)
-{
-	int misc = 0;
-
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		if (perf_guest_cbs->is_user_mode())
-			misc |= PERF_RECORD_MISC_GUEST_USER;
-		else
-			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
-	} else {
-		if (user_mode(regs))
-			misc |= PERF_RECORD_MISC_USER;
-		else
-			misc |= PERF_RECORD_MISC_KERNEL;
-	}
-
-	return misc;
-}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 869f202..aa94a88 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -18,23 +18,17 @@
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/smp.h>
-#include <linux/reboot.h>
-#include <linux/pm.h>
 #include <linux/delay.h>
+#include <linux/psci.h>
 #include <linux/slab.h>
+
 #include <uapi/linux/psci.h>
 
 #include <asm/compiler.h>
-#include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/errno.h>
-#include <asm/psci.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
-#include <asm/system_misc.h>
-
-#define PSCI_POWER_STATE_TYPE_STANDBY		0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
 
 static bool psci_power_state_loses_context(u32 state)
 {
@@ -50,122 +44,8 @@
 	return !(state & ~valid_mask);
 }
 
-/*
- * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
- * calls to its resident CPU, so we must avoid issuing those. We never migrate
- * a Trusted OS even if it claims to be capable of migration -- doing so will
- * require cooperation with a Trusted OS driver.
- */
-static int resident_cpu = -1;
-
-struct psci_operations {
-	int (*cpu_suspend)(u32 state, unsigned long entry_point);
-	int (*cpu_off)(u32 state);
-	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
-	int (*migrate)(unsigned long cpuid);
-	int (*affinity_info)(unsigned long target_affinity,
-			unsigned long lowest_affinity_level);
-	int (*migrate_info_type)(void);
-};
-
-static struct psci_operations psci_ops;
-
-typedef unsigned long (psci_fn)(unsigned long, unsigned long,
-				unsigned long, unsigned long);
-asmlinkage psci_fn __invoke_psci_fn_hvc;
-asmlinkage psci_fn __invoke_psci_fn_smc;
-static psci_fn *invoke_psci_fn;
-
-enum psci_function {
-	PSCI_FN_CPU_SUSPEND,
-	PSCI_FN_CPU_ON,
-	PSCI_FN_CPU_OFF,
-	PSCI_FN_MIGRATE,
-	PSCI_FN_MAX,
-};
-
 static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
 
-static u32 psci_function_id[PSCI_FN_MAX];
-
-static int psci_to_linux_errno(int errno)
-{
-	switch (errno) {
-	case PSCI_RET_SUCCESS:
-		return 0;
-	case PSCI_RET_NOT_SUPPORTED:
-		return -EOPNOTSUPP;
-	case PSCI_RET_INVALID_PARAMS:
-		return -EINVAL;
-	case PSCI_RET_DENIED:
-		return -EPERM;
-	};
-
-	return -EINVAL;
-}
-
-static u32 psci_get_version(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-}
-
-static int psci_cpu_suspend(u32 state, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
-	err = invoke_psci_fn(fn, state, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_off(u32 state)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_OFF];
-	err = invoke_psci_fn(fn, state, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_ON];
-	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_migrate(unsigned long cpuid)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE];
-	err = invoke_psci_fn(fn, cpuid, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_affinity_info(unsigned long target_affinity,
-		unsigned long lowest_affinity_level)
-{
-	return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
-			      lowest_affinity_level, 0);
-}
-
-static int psci_migrate_info_type(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
-}
-
-static unsigned long psci_migrate_info_up_cpu(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
-}
-
 static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
 {
 	int i, ret, count = 0;
@@ -230,240 +110,6 @@
 	return ret;
 }
 
-static int get_set_conduit_method(struct device_node *np)
-{
-	const char *method;
-
-	pr_info("probing for conduit method from DT.\n");
-
-	if (of_property_read_string(np, "method", &method)) {
-		pr_warn("missing \"method\" property\n");
-		return -ENXIO;
-	}
-
-	if (!strcmp("hvc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	} else if (!strcmp("smc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_smc;
-	} else {
-		pr_warn("invalid \"method\" property: %s\n", method);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
-}
-
-static void psci_sys_poweroff(void)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
-}
-
-/*
- * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
- * return DENIED (which would be fatal).
- */
-static void __init psci_init_migrate(void)
-{
-	unsigned long cpuid;
-	int type, cpu;
-
-	type = psci_ops.migrate_info_type();
-
-	if (type == PSCI_0_2_TOS_MP) {
-		pr_info("Trusted OS migration not required\n");
-		return;
-	}
-
-	if (type == PSCI_RET_NOT_SUPPORTED) {
-		pr_info("MIGRATE_INFO_TYPE not supported.\n");
-		return;
-	}
-
-	if (type != PSCI_0_2_TOS_UP_MIGRATE &&
-	    type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
-		pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
-		return;
-	}
-
-	cpuid = psci_migrate_info_up_cpu();
-	if (cpuid & ~MPIDR_HWID_BITMASK) {
-		pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
-			cpuid);
-		return;
-	}
-
-	cpu = get_logical_index(cpuid);
-	resident_cpu = cpu >= 0 ? cpu : -1;
-
-	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
-}
-
-static void __init psci_0_2_set_functions(void)
-{
-	pr_info("Using standard PSCI v0.2 function IDs\n");
-	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
-	psci_ops.cpu_suspend = psci_cpu_suspend;
-
-	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
-	psci_ops.cpu_off = psci_cpu_off;
-
-	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
-	psci_ops.cpu_on = psci_cpu_on;
-
-	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
-	psci_ops.migrate = psci_migrate;
-
-	psci_ops.affinity_info = psci_affinity_info;
-
-	psci_ops.migrate_info_type = psci_migrate_info_type;
-
-	arm_pm_restart = psci_sys_reset;
-
-	pm_power_off = psci_sys_poweroff;
-}
-
-/*
- * Probe function for PSCI firmware versions >= 0.2
- */
-static int __init psci_probe(void)
-{
-	u32 ver = psci_get_version();
-
-	pr_info("PSCIv%d.%d detected in firmware.\n",
-			PSCI_VERSION_MAJOR(ver),
-			PSCI_VERSION_MINOR(ver));
-
-	if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
-		pr_err("Conflicting PSCI version detected.\n");
-		return -EINVAL;
-	}
-
-	psci_0_2_set_functions();
-
-	psci_init_migrate();
-
-	return 0;
-}
-
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-/*
- * PSCI init function for PSCI versions >=0.2
- *
- * Probe based on PSCI PSCI_VERSION function
- */
-static int __init psci_0_2_init(struct device_node *np)
-{
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-	/*
-	 * Starting with v0.2, the PSCI specification introduced a call
-	 * (PSCI_VERSION) that allows probing the firmware version, so
-	 * that PSCI function IDs and version specific initialization
-	 * can be carried out according to the specific version reported
-	 * by firmware
-	 */
-	err = psci_probe();
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-/*
- * PSCI < v0.2 get PSCI Function IDs via DT.
- */
-static int __init psci_0_1_init(struct device_node *np)
-{
-	u32 id;
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	pr_info("Using PSCI v0.1 Function IDs from DT\n");
-
-	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
-		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
-		psci_ops.cpu_suspend = psci_cpu_suspend;
-	}
-
-	if (!of_property_read_u32(np, "cpu_off", &id)) {
-		psci_function_id[PSCI_FN_CPU_OFF] = id;
-		psci_ops.cpu_off = psci_cpu_off;
-	}
-
-	if (!of_property_read_u32(np, "cpu_on", &id)) {
-		psci_function_id[PSCI_FN_CPU_ON] = id;
-		psci_ops.cpu_on = psci_cpu_on;
-	}
-
-	if (!of_property_read_u32(np, "migrate", &id)) {
-		psci_function_id[PSCI_FN_MIGRATE] = id;
-		psci_ops.migrate = psci_migrate;
-	}
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-static const struct of_device_id psci_of_match[] __initconst = {
-	{ .compatible = "arm,psci",	.data = psci_0_1_init},
-	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
-	{},
-};
-
-int __init psci_dt_init(void)
-{
-	struct device_node *np;
-	const struct of_device_id *matched_np;
-	psci_initcall_t init_fn;
-
-	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
-
-	if (!np)
-		return -ENODEV;
-
-	init_fn = (psci_initcall_t)matched_np->data;
-	return init_fn(np);
-}
-
-#ifdef CONFIG_ACPI
-/*
- * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
- * explicitly clarified in SBBR
- */
-int __init psci_acpi_init(void)
-{
-	if (!acpi_psci_present()) {
-		pr_info("is not implemented in ACPI.\n");
-		return -EOPNOTSUPP;
-	}
-
-	pr_info("probing for conduit method from ACPI.\n");
-
-	if (acpi_psci_use_hvc())
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	else
-		invoke_psci_fn = __invoke_psci_fn_smc;
-
-	return psci_probe();
-}
-#endif
-
-#ifdef CONFIG_SMP
-
 static int __init cpu_psci_cpu_init(unsigned int cpu)
 {
 	return 0;
@@ -489,11 +135,6 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static bool psci_tos_resident_on(int cpu)
-{
-	return cpu == resident_cpu;
-}
-
 static int cpu_psci_cpu_disable(unsigned int cpu)
 {
 	/* Fail early if we don't have CPU_OFF support */
@@ -550,7 +191,6 @@
 	return -ETIMEDOUT;
 }
 #endif
-#endif
 
 static int psci_suspend_finisher(unsigned long index)
 {
@@ -585,7 +225,6 @@
 	.cpu_init_idle	= cpu_psci_cpu_init_idle,
 	.cpu_suspend	= cpu_psci_cpu_suspend,
 #endif
-#ifdef CONFIG_SMP
 	.cpu_init	= cpu_psci_cpu_init,
 	.cpu_prepare	= cpu_psci_cpu_prepare,
 	.cpu_boot	= cpu_psci_cpu_boot,
@@ -594,6 +233,5 @@
 	.cpu_die	= cpu_psci_cpu_die,
 	.cpu_kill	= cpu_psci_cpu_kill,
 #endif
-#endif
 };
 
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index d882b83..1971f49 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -826,6 +826,30 @@
 	return ret;
 }
 
+static int compat_tls_get(struct task_struct *target,
+			  const struct user_regset *regset, unsigned int pos,
+			  unsigned int count, void *kbuf, void __user *ubuf)
+{
+	compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+}
+
+static int compat_tls_set(struct task_struct *target,
+			  const struct user_regset *regset, unsigned int pos,
+			  unsigned int count, const void *kbuf,
+			  const void __user *ubuf)
+{
+	int ret;
+	compat_ulong_t tls;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+	if (ret)
+		return ret;
+
+	target->thread.tp_value = tls;
+	return ret;
+}
+
 static const struct user_regset aarch32_regsets[] = {
 	[REGSET_COMPAT_GPR] = {
 		.core_note_type = NT_PRSTATUS,
@@ -850,6 +874,64 @@
 	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
 };
 
+static const struct user_regset aarch32_ptrace_regsets[] = {
+	[REGSET_GPR] = {
+		.core_note_type = NT_PRSTATUS,
+		.n = COMPAT_ELF_NGREG,
+		.size = sizeof(compat_elf_greg_t),
+		.align = sizeof(compat_elf_greg_t),
+		.get = compat_gpr_get,
+		.set = compat_gpr_set
+	},
+	[REGSET_FPR] = {
+		.core_note_type = NT_ARM_VFP,
+		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+		.size = sizeof(compat_ulong_t),
+		.align = sizeof(compat_ulong_t),
+		.get = compat_vfp_get,
+		.set = compat_vfp_set
+	},
+	[REGSET_TLS] = {
+		.core_note_type = NT_ARM_TLS,
+		.n = 1,
+		.size = sizeof(compat_ulong_t),
+		.align = sizeof(compat_ulong_t),
+		.get = compat_tls_get,
+		.set = compat_tls_set,
+	},
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	[REGSET_HW_BREAK] = {
+		.core_note_type = NT_ARM_HW_BREAK,
+		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = hw_break_get,
+		.set = hw_break_set,
+	},
+	[REGSET_HW_WATCH] = {
+		.core_note_type = NT_ARM_HW_WATCH,
+		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = hw_break_get,
+		.set = hw_break_set,
+	},
+#endif
+	[REGSET_SYSTEM_CALL] = {
+		.core_note_type = NT_ARM_SYSTEM_CALL,
+		.n = 1,
+		.size = sizeof(int),
+		.align = sizeof(int),
+		.get = system_call_get,
+		.set = system_call_set,
+	},
+};
+
+static const struct user_regset_view user_aarch32_ptrace_view = {
+	.name = "aarch32", .e_machine = EM_ARM,
+	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
+};
+
 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
 				   compat_ulong_t __user *ret)
 {
@@ -1109,8 +1191,16 @@
 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 {
 #ifdef CONFIG_COMPAT
-	if (is_compat_thread(task_thread_info(task)))
+	/*
+	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
+	 * user_aarch32_view compatible with arm32. Native ptrace requests on
+	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
+	 * access to the TLS register.
+	 */
+	if (is_compat_task())
 		return &user_aarch32_view;
+	else if (is_compat_thread(task_thread_info(task)))
+		return &user_aarch32_ptrace_view;
 #endif
 	return &user_aarch64_view;
 }
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 926ae8d..6bab21f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
 #include <linux/of_platform.h>
 #include <linux/efi.h>
 #include <linux/personality.h>
+#include <linux/psci.h>
 
 #include <asm/acpi.h>
 #include <asm/fixmap.h>
@@ -60,9 +61,7 @@
 #include <asm/tlbflush.h>
 #include <asm/traps.h>
 #include <asm/memblock.h>
-#include <asm/psci.h>
 #include <asm/efi.h>
-#include <asm/virt.h>
 #include <asm/xen/hypervisor.h>
 
 unsigned long elf_hwcap __read_mostly;
@@ -130,7 +129,6 @@
 }
 
 struct mpidr_hash mpidr_hash;
-#ifdef CONFIG_SMP
 /**
  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
  *			  level in order to build a linear index from an
@@ -196,35 +194,11 @@
 		pr_warn("Large number of MPIDR hash buckets detected\n");
 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
 }
-#endif
-
-static void __init hyp_mode_check(void)
-{
-	if (is_hyp_mode_available())
-		pr_info("CPU: All CPU(s) started at EL2\n");
-	else if (is_hyp_mode_mismatched())
-		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
-			   "CPU: CPUs started in inconsistent modes");
-	else
-		pr_info("CPU: All CPU(s) started at EL1\n");
-}
-
-void __init do_post_cpus_up_work(void)
-{
-	hyp_mode_check();
-	apply_alternatives_all();
-}
-
-#ifdef CONFIG_UP_LATE_INIT
-void __init up_late_init(void)
-{
-	do_post_cpus_up_work();
-}
-#endif /* CONFIG_UP_LATE_INIT */
 
 static void __init setup_processor(void)
 {
-	u64 features, block;
+	u64 features;
+	s64 block;
 	u32 cwg;
 	int cls;
 
@@ -254,8 +228,8 @@
 	 * for non-negative values. Negative values are reserved.
 	 */
 	features = read_cpuid(ID_AA64ISAR0_EL1);
-	block = (features >> 4) & 0xf;
-	if (!(block & 0x8)) {
+	block = cpuid_feature_extract_field(features, 4);
+	if (block > 0) {
 		switch (block) {
 		default:
 		case 2:
@@ -267,26 +241,36 @@
 		}
 	}
 
-	block = (features >> 8) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 8) > 0)
 		elf_hwcap |= HWCAP_SHA1;
 
-	block = (features >> 12) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 12) > 0)
 		elf_hwcap |= HWCAP_SHA2;
 
-	block = (features >> 16) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 16) > 0)
 		elf_hwcap |= HWCAP_CRC32;
 
+	block = cpuid_feature_extract_field(features, 20);
+	if (block > 0) {
+		switch (block) {
+		default:
+		case 2:
+			elf_hwcap |= HWCAP_ATOMICS;
+		case 1:
+			/* RESERVED */
+		case 0:
+			break;
+		}
+	}
+
 #ifdef CONFIG_COMPAT
 	/*
 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
-	 * the Aarch32 32-bit execution state.
+	 * the AArch32 32-bit execution state.
 	 */
 	features = read_cpuid(ID_ISAR5_EL1);
-	block = (features >> 4) & 0xf;
-	if (!(block & 0x8)) {
+	block = cpuid_feature_extract_field(features, 4);
+	if (block > 0) {
 		switch (block) {
 		default:
 		case 2:
@@ -298,16 +282,13 @@
 		}
 	}
 
-	block = (features >> 8) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 8) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
 
-	block = (features >> 12) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 12) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
 
-	block = (features >> 16) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 16) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
 #endif
 }
@@ -358,6 +339,67 @@
 	}
 }
 
+#ifdef CONFIG_BLK_DEV_INITRD
+/*
+ * Relocate initrd if it is not completely within the linear mapping.
+ * This would be the case if mem= cuts out all or part of it.
+ */
+static void __init relocate_initrd(void)
+{
+	phys_addr_t orig_start = __virt_to_phys(initrd_start);
+	phys_addr_t orig_end = __virt_to_phys(initrd_end);
+	phys_addr_t ram_end = memblock_end_of_DRAM();
+	phys_addr_t new_start;
+	unsigned long size, to_free = 0;
+	void *dest;
+
+	if (orig_end <= ram_end)
+		return;
+
+	/*
+	 * Any of the original initrd which overlaps the linear map should
+	 * be freed after relocating.
+	 */
+	if (orig_start < ram_end)
+		to_free = ram_end - orig_start;
+
+	size = orig_end - orig_start;
+
+	/* initrd needs to be relocated completely inside linear mapping */
+	new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
+					   size, PAGE_SIZE);
+	if (!new_start)
+		panic("Cannot relocate initrd of size %ld\n", size);
+	memblock_reserve(new_start, size);
+
+	initrd_start = __phys_to_virt(new_start);
+	initrd_end   = initrd_start + size;
+
+	pr_info("Moving initrd from [%llx-%llx] to [%llx-%llx]\n",
+		orig_start, orig_start + size - 1,
+		new_start, new_start + size - 1);
+
+	dest = (void *)initrd_start;
+
+	if (to_free) {
+		memcpy(dest, (void *)__phys_to_virt(orig_start), to_free);
+		dest += to_free;
+	}
+
+	copy_from_early_mem(dest, orig_start + to_free, size - to_free);
+
+	if (to_free) {
+		pr_info("Freeing original RAMDISK from [%llx-%llx]\n",
+			orig_start, orig_start + to_free - 1);
+		memblock_free(orig_start, to_free);
+	}
+}
+#else
+static inline void __init relocate_initrd(void)
+{
+}
+#endif
+
 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
 void __init setup_arch(char **cmdline_p)
@@ -391,6 +433,7 @@
 	acpi_boot_table_init();
 
 	paging_init();
+	relocate_initrd();
 	request_standard_resources();
 
 	early_ioremap_reset();
@@ -404,10 +447,8 @@
 	xen_early_init();
 
 	cpu_read_bootcpu_ops();
-#ifdef CONFIG_SMP
 	smp_init_cpus();
 	smp_build_mpidr_hash();
-#endif
 
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
@@ -426,8 +467,13 @@
 
 static int __init arm64_device_init(void)
 {
-	of_iommu_init();
-	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+	if (of_have_populated_dt()) {
+		of_iommu_init();
+		of_platform_populate(NULL, of_default_bus_match_table,
+				     NULL, NULL);
+	} else if (acpi_disabled) {
+		pr_crit("Device tree not populated\n");
+	}
 	return 0;
 }
 arch_initcall_sync(arm64_device_init);
@@ -455,6 +501,7 @@
 	"sha1",
 	"sha2",
 	"crc32",
+	"atomics",
 	NULL
 };
 
@@ -507,9 +554,7 @@
 		 * online processors, looking for lines beginning with
 		 * "processor".  Give glibc what it expects.
 		 */
-#ifdef CONFIG_SMP
 		seq_printf(m, "processor\t: %d\n", i);
-#endif
 
 		/*
 		 * Dump out the common processor features in a single line.
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 803cfea..f586f7c 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -82,7 +82,6 @@
 	str	x2, [x0, #CPU_CTX_SP]
 	ldr	x1, =sleep_save_sp
 	ldr	x1, [x1, #SLEEP_SAVE_SP_VIRT]
-#ifdef CONFIG_SMP
 	mrs	x7, mpidr_el1
 	ldr	x9, =mpidr_hash
 	ldr	x10, [x9, #MPIDR_HASH_MASK]
@@ -94,7 +93,6 @@
 	ldp	w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
 	add	x1, x1, x8, lsl #3
-#endif
 	bl	__cpu_suspend_save
 	/*
 	 * Grab suspend finisher in x20 and its argument in x19
@@ -135,6 +133,14 @@
 	ldr	x3, =cpu_resume_after_mmu
 	msr	sctlr_el1, x0		// restore sctlr_el1
 	isb
+	/*
+	 * Invalidate the local I-cache so that any instructions fetched
+	 * speculatively from the PoC are discarded, since they may have
+	 * been dynamically patched at the PoU.
+	 */
+	ic	iallu
+	dsb	nsh
+	isb
 	br	x3			// global jump to virtual address
 ENDPROC(cpu_resume_mmu)
 	.popsection
@@ -151,7 +157,6 @@
 
 ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
-#ifdef CONFIG_SMP
 	mrs	x1, mpidr_el1
 	adrp	x8, mpidr_hash
 	add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -161,9 +166,6 @@
 	ldp	w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
         /* x7 contains hash index, let's use it to grab context pointer */
-#else
-	mov	x7, xzr
-#endif
 	ldr_l	x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
 	ldr	x0, [x0, x7, lsl #3]
 	/* load sp from context */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 50fb469..dbdaacd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -52,6 +52,7 @@
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
+#include <asm/virt.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
@@ -310,10 +311,22 @@
 }
 #endif
 
+static void __init hyp_mode_check(void)
+{
+	if (is_hyp_mode_available())
+		pr_info("CPU: All CPU(s) started at EL2\n");
+	else if (is_hyp_mode_mismatched())
+		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+			   "CPU: CPUs started in inconsistent modes");
+	else
+		pr_info("CPU: All CPU(s) started at EL1\n");
+}
+
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-	do_post_cpus_up_work();
+	hyp_mode_check();
+	apply_alternatives_all();
 }
 
 void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 42f9195..149151fb 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -42,7 +42,6 @@
 #include <asm/thread_info.h>
 #include <asm/stacktrace.h>
 
-#ifdef CONFIG_SMP
 unsigned long profile_pc(struct pt_regs *regs)
 {
 	struct stackframe frame;
@@ -62,7 +61,6 @@
 	return frame.pc;
 }
 EXPORT_SYMBOL(profile_pc);
-#endif
 
 void __init time_init(void)
 {
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index fcb8f7b..694f6de 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -300,6 +300,6 @@
 	 * Discard anything that was parsed if we hit an error so we
 	 * don't use partial information.
 	 */
-	if (parse_dt_topology())
+	if (of_have_populated_dt() && parse_dt_topology())
 		reset_cpu_topology();
 }
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 566bc4c..f93aae5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/bug.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
 #include <linux/kallsyms.h>
@@ -32,8 +33,10 @@
 #include <linux/syscalls.h>
 
 #include <asm/atomic.h>
+#include <asm/bug.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/insn.h>
 #include <asm/traps.h>
 #include <asm/stacktrace.h>
 #include <asm/exception.h>
@@ -52,11 +55,12 @@
  * Dump out the contents of some memory nicely...
  */
 static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
-		     unsigned long top)
+		     unsigned long top, bool compat)
 {
 	unsigned long first;
 	mm_segment_t fs;
 	int i;
+	unsigned int width = compat ? 4 : 8;
 
 	/*
 	 * We need to switch to kernel mode so that we can use __get_user
@@ -75,13 +79,22 @@
 		memset(str, ' ', sizeof(str));
 		str[sizeof(str) - 1] = '\0';
 
-		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+		for (p = first, i = 0; i < (32 / width)
+					&& p < top; i++, p += width) {
 			if (p >= bottom && p < top) {
-				unsigned int val;
-				if (__get_user(val, (unsigned int *)p) == 0)
-					sprintf(str + i * 9, " %08x", val);
-				else
-					sprintf(str + i * 9, " ????????");
+				unsigned long val;
+
+				if (width == 8) {
+					if (__get_user(val, (unsigned long *)p) == 0)
+						sprintf(str + i * 17, " %016lx", val);
+					else
+						sprintf(str + i * 17, " ????????????????");
+				} else {
+					if (__get_user(val, (unsigned int *)p) == 0)
+						sprintf(str + i * 9, " %08lx", val);
+					else
+						sprintf(str + i * 9, " ????????");
+				}
 			}
 		}
 		printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
@@ -95,7 +108,7 @@
 	print_ip_sym(where);
 	if (in_exception_text(where))
 		dump_mem("", "Exception stack", stack,
-			 stack + sizeof(struct pt_regs));
+			 stack + sizeof(struct pt_regs), false);
 }
 
 static void dump_instr(const char *lvl, struct pt_regs *regs)
@@ -179,11 +192,7 @@
 #else
 #define S_PREEMPT ""
 #endif
-#ifdef CONFIG_SMP
 #define S_SMP " SMP"
-#else
-#define S_SMP ""
-#endif
 
 static int __die(const char *str, int err, struct thread_info *thread,
 		 struct pt_regs *regs)
@@ -207,7 +216,8 @@
 
 	if (!user_mode(regs) || in_interrupt()) {
 		dump_mem(KERN_EMERG, "Stack: ", regs->sp,
-			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+			 THREAD_SIZE + (unsigned long)task_stack_page(tsk),
+			 compat_user_mode(regs));
 		dump_backtrace(regs, tsk);
 		dump_instr(KERN_EMERG, regs);
 	}
@@ -459,7 +469,63 @@
 	pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
+/* GENERIC_BUG traps */
+
+int is_valid_bugaddr(unsigned long addr)
+{
+	/*
+	 * bug_handler() only called for BRK #BUG_BRK_IMM.
+	 * So the answer is trivial -- any spurious instances with no
+	 * bug table entry will be rejected by report_bug() and passed
+	 * back to the debug-monitors code and handled as a fatal
+	 * unexpected debug exception.
+	 */
+	return 1;
+}
+
+static int bug_handler(struct pt_regs *regs, unsigned int esr)
+{
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
+	switch (report_bug(regs->pc, regs)) {
+	case BUG_TRAP_TYPE_BUG:
+		die("Oops - BUG", regs, 0);
+		break;
+
+	case BUG_TRAP_TYPE_WARN:
+		/* Ideally, report_bug() should backtrace for us... but no. */
+		dump_backtrace(regs, NULL);
+		break;
+
+	default:
+		/* unknown/unrecognised bug trap type */
+		return DBG_HOOK_ERROR;
+	}
+
+	/* If thread survives, skip over the BUG instruction and continue: */
+	regs->pc += AARCH64_INSN_SIZE;	/* skip BRK and resume */
+	return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook bug_break_hook = {
+	.esr_val = 0xf2000000 | BUG_BRK_IMM,
+	.esr_mask = 0xffffffff,
+	.fn = bug_handler,
+};
+
+/*
+ * Initial handler for AArch64 BRK exceptions
+ * This handler only used until debug_traps_init().
+ */
+int __init early_brk64(unsigned long addr, unsigned int esr,
+		struct pt_regs *regs)
+{
+	return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
+}
+
+/* This registration must happen early, before debug_traps_init(). */
 void __init trap_init(void)
 {
-	return;
+	register_break_hook(&bug_break_hook);
 }
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 17a8fb1..10915aa 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -810,7 +810,11 @@
  * Call into the vgic backend for state saving
  */
 .macro save_vgic_state
-	alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+	bl	__save_vgic_v2_state
+alternative_else
+	bl	__save_vgic_v3_state
+alternative_endif
 	mrs	x24, hcr_el2
 	mov	x25, #HCR_INT_OVERRIDE
 	neg	x25, x25
@@ -827,7 +831,11 @@
 	orr	x24, x24, #HCR_INT_OVERRIDE
 	orr	x24, x24, x25
 	msr	hcr_el2, x24
-	alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+	bl	__restore_vgic_v2_state
+alternative_else
+	bl	__restore_vgic_v3_state
+alternative_endif
 .endm
 
 .macro save_timer_state
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index d98d3e3..1a811ec 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -3,3 +3,16 @@
 		   clear_page.o memchr.o memcpy.o memmove.o memset.o	\
 		   memcmp.o strcmp.o strncmp.o strlen.o strnlen.o	\
 		   strchr.o strrchr.o
+
+# Tell the compiler to treat all general purpose registers as
+# callee-saved, which allows for efficient runtime patching of the bl
+# instruction in the caller with an atomic instruction when supported by
+# the CPU. Result and argument registers are handled correctly, based on
+# the function prototype.
+lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
+CFLAGS_atomic_ll_sc.o	:= -fcall-used-x0 -ffixed-x1 -ffixed-x2		\
+		   -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6		\
+		   -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9		\
+		   -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12	\
+		   -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15	\
+		   -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c
new file mode 100644
index 0000000..b0c538b
--- /dev/null
+++ b/arch/arm64/lib/atomic_ll_sc.c
@@ -0,0 +1,3 @@
+#include <asm/atomic.h>
+#define __ARM64_IN_ATOMIC_IMPL
+#include <asm/atomic_ll_sc.h>
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 7dac371..43ac736 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -18,52 +18,59 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/lse.h>
 
 /*
  * x0: bits 5:0  bit offset
  *     bits 31:6 word offset
  * x1: address
  */
-	.macro	bitop, name, instr
+	.macro	bitop, name, llsc, lse
 ENTRY(	\name	)
 	and	w3, w0, #63		// Get bit offset
 	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
+alt_lse "	prfm	pstl1strm, [x1]",	"nop"
 	lsl	x3, x2, x3		// Create mask
-1:	ldxr	x2, [x1]
-	\instr	x2, x2, x3
-	stxr	w0, x2, [x1]
-	cbnz	w0, 1b
+
+alt_lse	"1:	ldxr	x2, [x1]",		"\lse	x3, [x1]"
+alt_lse	"	\llsc	x2, x2, x3",		"nop"
+alt_lse	"	stxr	w0, x2, [x1]",		"nop"
+alt_lse	"	cbnz	w0, 1b",		"nop"
+
 	ret
 ENDPROC(\name	)
 	.endm
 
-	.macro	testop, name, instr
+	.macro	testop, name, llsc, lse
 ENTRY(	\name	)
 	and	w3, w0, #63		// Get bit offset
 	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
+alt_lse "	prfm	pstl1strm, [x1]",	"nop"
 	lsl	x4, x2, x3		// Create mask
-1:	ldxr	x2, [x1]
-	lsr	x0, x2, x3		// Save old value of bit
-	\instr	x2, x2, x4		// toggle bit
-	stlxr	w5, x2, [x1]
-	cbnz	w5, 1b
-	dmb	ish
+
+alt_lse	"1:	ldxr	x2, [x1]",		"\lse	x4, x2, [x1]"
+	lsr	x0, x2, x3
+alt_lse	"	\llsc	x2, x2, x4",		"nop"
+alt_lse	"	stlxr	w5, x2, [x1]",		"nop"
+alt_lse	"	cbnz	w5, 1b",		"nop"
+alt_lse	"	dmb	ish",			"nop"
+
 	and	x0, x0, #1
-3:	ret
+	ret
 ENDPROC(\name	)
 	.endm
 
 /*
  * Atomic bit operations.
  */
-	bitop	change_bit, eor
-	bitop	clear_bit, bic
-	bitop	set_bit, orr
+	bitop	change_bit, eor, steor
+	bitop	clear_bit, bic, stclr
+	bitop	set_bit, orr, stset
 
-	testop	test_and_change_bit, eor
-	testop	test_and_clear_bit, bic
-	testop	test_and_set_bit, orr
+	testop	test_and_change_bit, eor, ldeoral
+	testop	test_and_clear_bit, bic, ldclral
+	testop	test_and_set_bit, orr, ldsetal
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index c17967f..a9723c7 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -16,7 +16,11 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 	.text
 
@@ -29,6 +33,8 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
@@ -48,6 +54,8 @@
 	b.mi	5f
 USER(9f, strb	wzr, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__clear_user)
 
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 5e27add..1be9ef2 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -15,7 +15,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_from_user)
-	add	x4, x1, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x1, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+USER(9f, ldp	x3, x4, [x1], #16)
+	subs	x2, x2, #16
+	stp	x3, x4, [x0], #16
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 USER(9f, ldr	x3, [x1], #8	)
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 	str	x3, [x0], #8
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 USER(9f, ldr	w3, [x1], #4	)
@@ -51,12 +62,14 @@
 USER(9f, ldrb	w3, [x1]	)
 	strb	w3, [x0]
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_from_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x2, x4, x1
+9:	sub	x2, x5, x1
 	mov	x3, x2
 10:	strb	wzr, [x0], #1			// zero remaining buffer space
 	subs	x3, x3, #1
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 84b6c9b..1b94661e 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -17,7 +17,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
@@ -30,14 +34,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_in_user)
-	add	x4, x0, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x0, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+USER(9f, ldp	x3, x4, [x1], #16)
+	subs	x2, x2, #16
+USER(9f, stp	x3, x4, [x0], #16)
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 USER(9f, ldr	x3, [x1], #8	)
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 USER(9f, str	x3, [x0], #8	)
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 USER(9f, ldr	w3, [x1], #4	)
@@ -53,11 +64,13 @@
 USER(9f, ldrb	w3, [x1]	)
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x0, x4, x0			// bytes not copied
+9:	sub	x0, x5, x0			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index a0aeeb9..a257b47 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -15,7 +15,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_to_user)
-	add	x4, x0, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x0, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+	ldp	x3, x4, [x1], #16
+	subs	x2, x2, #16
+USER(9f, stp	x3, x4, [x0], #16)
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 	ldr	x3, [x1], #8
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 USER(9f, str	x3, [x0], #8	)
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 	ldr	w3, [x1], #4
@@ -51,11 +62,13 @@
 	ldrb	w3, [x1]
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_to_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x0, x4, x0			// bytes not copied
+9:	sub	x0, x5, x0			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index bdeb5d3..eb48d5d 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -143,7 +143,12 @@
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x0, x0, x3
-1:	alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
+1:
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+	dc	cvac, x0
+alternative_else
+	dc	civac, x0
+alternative_endif
 	add	x0, x0, x2
 	cmp	x0, x1
 	b.lo	1b
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 76c1e6c..d70ff14 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -53,8 +53,6 @@
 		__flush_icache_all();
 }
 
-#ifdef CONFIG_SMP
-
 static void set_mm_context(struct mm_struct *mm, unsigned int asid)
 {
 	unsigned long flags;
@@ -110,23 +108,12 @@
 	cpu_switch_mm(mm->pgd, mm);
 }
 
-#else
-
-static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
-{
-	mm->context.id = asid;
-	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
-}
-
-#endif
-
 void __new_context(struct mm_struct *mm)
 {
 	unsigned int asid;
 	unsigned int bits = asid_bits();
 
 	raw_spin_lock(&cpu_asid_lock);
-#ifdef CONFIG_SMP
 	/*
 	 * Check the ASID again, in case the change was broadcast from another
 	 * CPU before we acquired the lock.
@@ -136,7 +123,6 @@
 		raw_spin_unlock(&cpu_asid_lock);
 		return;
 	}
-#endif
 	/*
 	 * At this point, it is guaranteed that the current mm (with an old
 	 * ASID) isn't active on any other CPU since the ASIDs are changed
@@ -155,10 +141,8 @@
 			cpu_last_asid = ASID_FIRST_VERSION;
 		asid = cpu_last_asid + smp_processor_id();
 		flush_context();
-#ifdef CONFIG_SMP
 		smp_wmb();
 		smp_call_function(reset_context, NULL, 1);
-#endif
 		cpu_last_asid += NR_CPUS - 1;
 	}
 
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d16a1ce..0bcc4bc 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -144,6 +144,7 @@
 	struct page *page;
 	void *ptr, *coherent_ptr;
 	bool coherent = is_device_dma_coherent(dev);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
 
 	size = PAGE_ALIGN(size);
 
@@ -171,9 +172,7 @@
 	/* create a coherent mapping */
 	page = virt_to_page(ptr);
 	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
-				__get_dma_pgprot(attrs,
-					__pgprot(PROT_NORMAL_NC), false),
-					NULL);
+						   prot, NULL);
 	if (!coherent_ptr)
 		goto no_map;
 
@@ -303,9 +302,10 @@
 				       sg->length, dir);
 }
 
-/* vma->vm_page_prot must be set appropriately before calling this function */
-static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-			     void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int __swiotlb_mmap(struct device *dev,
+			  struct vm_area_struct *vma,
+			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			  struct dma_attrs *attrs)
 {
 	int ret = -ENXIO;
 	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -314,6 +314,9 @@
 	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
 	unsigned long off = vma->vm_pgoff;
 
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					     is_device_dma_coherent(dev));
+
 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
 
@@ -327,20 +330,24 @@
 	return ret;
 }
 
-static int __swiotlb_mmap(struct device *dev,
-			  struct vm_area_struct *vma,
-			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
-			  struct dma_attrs *attrs)
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+				 void *cpu_addr, dma_addr_t handle, size_t size,
+				 struct dma_attrs *attrs)
 {
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     is_device_dma_coherent(dev));
-	return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+
+	if (!ret)
+		sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
+			    PAGE_ALIGN(size), 0);
+
+	return ret;
 }
 
 static struct dma_map_ops swiotlb_dma_ops = {
 	.alloc = __dma_alloc,
 	.free = __dma_free,
 	.mmap = __swiotlb_mmap,
+	.get_sgtable = __swiotlb_get_sgtable,
 	.map_page = __swiotlb_map_page,
 	.unmap_page = __swiotlb_unmap_page,
 	.map_sg = __swiotlb_map_sg_attrs,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 94d98cd..aba9ead 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -30,9 +30,11 @@
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
 
+#include <asm/cpufeature.h>
 #include <asm/exception.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -224,6 +226,13 @@
 	}
 
 	/*
+	 * PAN bit set implies the fault happened in kernel space, but not
+	 * in the arch's user access functions.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+		goto no_context;
+
+	/*
 	 * As per x86, we may deadlock here. However, since the kernel only
 	 * validly references user space from well defined areas of the code,
 	 * we can bug out early if this is from code which shouldn't.
@@ -492,14 +501,22 @@
 	arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
 }
 
-static struct fault_info debug_fault_info[] = {
+int __init early_brk64(unsigned long addr, unsigned int esr,
+		       struct pt_regs *regs);
+
+/*
+ * __refdata because early_brk64 is __init, but the reference to it is
+ * clobbered at arch_initcall time.
+ * See traps.c and debug-monitors.c:debug_traps_init().
+ */
+static struct fault_info __refdata debug_fault_info[] = {
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware breakpoint"	},
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware single-step"	},
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware watchpoint"	},
 	{ do_bad,	SIGBUS,		0,		"unknown 3"		},
 	{ do_bad,	SIGTRAP,	TRAP_BRKPT,	"aarch32 BKPT"		},
 	{ do_bad,	SIGTRAP,	0,		"aarch32 vector catch"	},
-	{ do_bad,	SIGTRAP,	TRAP_BRKPT,	"aarch64 BRK"		},
+	{ early_brk64,	SIGTRAP,	TRAP_BRKPT,	"aarch64 BRK"		},
 	{ do_bad,	SIGBUS,		0,		"unknown 7"		},
 };
 
@@ -536,3 +553,10 @@
 
 	return 0;
 }
+
+#ifdef CONFIG_ARM64_PAN
+void cpu_enable_pan(void)
+{
+	config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+}
+#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 4dfa397..c26b804 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -60,14 +60,10 @@
 		       unsigned long uaddr, void *dst, const void *src,
 		       unsigned long len)
 {
-#ifdef CONFIG_SMP
 	preempt_disable();
-#endif
 	memcpy(dst, src, len);
 	flush_ptrace_access(vma, page, uaddr, dst, len);
-#ifdef CONFIG_SMP
 	preempt_enable();
-#endif
 }
 
 void __sync_icache_dcache(pte_t pte, unsigned long addr)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 831ec53..383b03f 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -13,10 +13,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/init.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ad87ce8..f5c0680 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -358,9 +358,9 @@
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
-static int keep_initrd;
+static int keep_initrd __initdata;
 
-void free_initrd_mem(unsigned long start, unsigned long end)
+void __init free_initrd_mem(unsigned long start, unsigned long end)
 {
 	if (!keep_initrd)
 		free_reserved_area((void *)start, (void *)end, 0, "initrd");
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a4ede4e..9211b85 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -267,7 +267,7 @@
 	return ptr;
 }
 
-static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 				  phys_addr_t size, pgprot_t prot)
 {
 	if (virt < VMALLOC_START) {
@@ -461,17 +461,6 @@
 }
 
 /*
- * Enable the identity mapping to allow the MMU disabling.
- */
-void setup_mm_for_reboot(void)
-{
-	cpu_set_reserved_ttbr0();
-	flush_tlb_all();
-	cpu_set_idmap_tcr_t0sz();
-	cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
-/*
  * Check whether a kernel address is valid (derived from arch/x86/).
  */
 int kern_addr_valid(unsigned long addr)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 39139a3..e4ee7bd 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -34,11 +34,7 @@
 #define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
 #endif
 
-#ifdef CONFIG_SMP
 #define TCR_SMP_FLAGS	TCR_SHARED
-#else
-#define TCR_SMP_FLAGS	0
-#endif
 
 /* PTWs cacheable, inner/outer WBWA */
 #define TCR_CACHE_FLAGS	TCR_IRGN_WBWA | TCR_ORGN_WBWA
@@ -150,13 +146,13 @@
  *	value of the SCTLR_EL1 register.
  */
 ENTRY(__cpu_setup)
-	ic	iallu				// I+BTB cache invalidate
 	tlbi	vmalle1is			// invalidate I + D TLBs
 	dsb	ish
 
 	mov	x0, #3 << 20
 	msr	cpacr_el1, x0			// Enable FP/ASIMD
-	msr	mdscr_el1, xzr			// Reset mdscr_el1
+	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
+	msr	mdscr_el1, x0			// access to the DCC from EL0
 	/*
 	 * Memory region attributes for LPAE:
 	 *
@@ -196,6 +192,19 @@
 	 */
 	mrs	x9, ID_AA64MMFR0_EL1
 	bfi	x10, x9, #32, #3
+#ifdef CONFIG_ARM64_HW_AFDBM
+	/*
+	 * Hardware update of the Access and Dirty bits.
+	 */
+	mrs	x9, ID_AA64MMFR1_EL1
+	and	x9, x9, #0xf
+	cbz	x9, 2f
+	cmp	x9, #2
+	b.lt	1f
+	orr	x10, x10, #TCR_HD		// hardware Dirty flag update
+1:	orr	x10, x10, #TCR_HA		// hardware Access flag update
+2:
+#endif	/* CONFIG_ARM64_HW_AFDBM */
 	msr	tcr_el1, x10
 	ret					// return to head.S
 ENDPROC(__cpu_setup)
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 2d07ce1..97c9bdf 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -44,6 +44,18 @@
 ATOMIC_OP_RETURN(sub, sub, rKs21)
 ATOMIC_OP_RETURN(add, add, r)
 
+#define ATOMIC_OP(op, asm_op)						\
+ATOMIC_OP_RETURN(op, asm_op, r)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	(void)__atomic_##op##_return(i, v);				\
+}
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OP
 #undef ATOMIC_OP_RETURN
 
 /*
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98..1c1c423 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,21 @@
 #include <linux/types.h>
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
-asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
 
 #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
 
-#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
-#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
+#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
+#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
 
-#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
-#define atomic_set_mask(m, v)   __raw_atomic_set_asm(&(v)->counter, m)
+#define atomic_or(i, v)  (void)__raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
 
 #endif
 
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591..a401c27 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@
 EXPORT_SYMBOL(insl_16);
 
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(__raw_atomic_update_asm);
-EXPORT_SYMBOL(__raw_atomic_clear_asm);
-EXPORT_SYMBOL(__raw_atomic_set_asm);
+EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_and_asm);
+EXPORT_SYMBOL(__raw_atomic_or_asm);
 EXPORT_SYMBOL(__raw_atomic_xor_asm);
 EXPORT_SYMBOL(__raw_atomic_test_asm);
+
 EXPORT_SYMBOL(__raw_xchg_1_asm);
 EXPORT_SYMBOL(__raw_xchg_2_asm);
 EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8..26fccb5 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@
  * r0 = ptr
  * r1 = value
  *
- * Add a signed value to a 32bit word and return the new value atomically.
+ * ADD a signed value to a 32bit word and return the new value atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_update_asm)
+ENTRY(___raw_atomic_add_asm)
 	p1 = r0;
 	r3 = r1;
 	[--sp] = rets;
@@ -603,19 +603,19 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_update_asm)
+ENDPROC(___raw_atomic_add_asm)
 
 /*
  * r0 = ptr
  * r1 = mask
  *
- * Clear the mask bits from a 32bit word and return the old 32bit value
+ * AND the mask bits from a 32bit word and return the old 32bit value
  * atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_clear_asm)
+ENTRY(___raw_atomic_and_asm)
 	p1 = r0;
-	r3 = ~r1;
+	r3 = r1;
 	[--sp] = rets;
 	call _get_core_lock;
 	r2 = [p1];
@@ -627,17 +627,17 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_clear_asm)
+ENDPROC(___raw_atomic_and_asm)
 
 /*
  * r0 = ptr
  * r1 = mask
  *
- * Set the mask bits into a 32bit word and return the old 32bit value
+ * OR the mask bits into a 32bit word and return the old 32bit value
  * atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_set_asm)
+ENTRY(___raw_atomic_or_asm)
 	p1 = r0;
 	r3 = r1;
 	[--sp] = rets;
@@ -651,7 +651,7 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_set_asm)
+ENDPROC(___raw_atomic_or_asm)
 
 /*
  * r0 = ptr
@@ -787,7 +787,7 @@
 	r2 = r1;
 	r1 = 1;
 	r1 <<= r2;
-	jump ___raw_atomic_set_asm
+	jump ___raw_atomic_or_asm
 ENDPROC(___raw_bit_set_asm)
 
 /*
@@ -798,10 +798,10 @@
  * Clobbers: r3:0, p1:0
  */
 ENTRY(___raw_bit_clear_asm)
-	r2 = r1;
-	r1 = 1;
-	r1 <<= r2;
-	jump ___raw_atomic_clear_asm
+	r2 = 1;
+	r2 <<= r1;
+	r1 = ~r2;
+	jump ___raw_atomic_and_asm
 ENDPROC(___raw_bit_clear_asm)
 
 /*
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1c72595..0030e21 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@
 	local_irq_save(flags);
 	for_each_cpu(cpu, cpumask) {
 		bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
-		atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
+		atomic_or((1 << msg), &bfin_ipi_data->bits);
 		atomic_inc(&bfin_ipi_data->count);
 	}
 	local_irq_restore(flags);
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 102190a..0da689d 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -15,7 +15,6 @@
 #define _ASM_ATOMIC_H
 
 #include <linux/types.h>
-#include <asm/spr-regs.h>
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
@@ -23,6 +22,8 @@
 #error not SMP safe
 #endif
 
+#include <asm/atomic_defs.h>
+
 /*
  * Atomic operations that C can't guarantee us.  Useful for
  * resource counting etc..
@@ -34,56 +35,26 @@
 #define atomic_read(v)		ACCESS_ONCE((v)->counter)
 #define atomic_set(v, i)	(((v)->counter) = (i))
 
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+static inline int atomic_inc_return(atomic_t *v)
+{
+	return __atomic_add_return(1, &v->counter);
+}
+
+static inline int atomic_dec_return(atomic_t *v)
+{
+	return __atomic_sub_return(1, &v->counter);
+}
+
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-	unsigned long val;
-
-	asm("0:						\n"
-	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-	    "	ckeq		icc3,cc7		\n"
-	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
-	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-	    "	add%I2		%1,%2,%1		\n"
-	    "	cst.p		%1,%M0		,cc3,#1	\n"
-	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
-	    "	beq		icc3,#0,0b		\n"
-	    : "+U"(v->counter), "=&r"(val)
-	    : "NPr"(i)
-	    : "memory", "cc7", "cc3", "icc3"
-	    );
-
-	return val;
+	return __atomic_add_return(i, &v->counter);
 }
 
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
-	unsigned long val;
-
-	asm("0:						\n"
-	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-	    "	ckeq		icc3,cc7		\n"
-	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
-	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-	    "	sub%I2		%1,%2,%1		\n"
-	    "	cst.p		%1,%M0		,cc3,#1	\n"
-	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
-	    "	beq		icc3,#0,0b		\n"
-	    : "+U"(v->counter), "=&r"(val)
-	    : "NPr"(i)
-	    : "memory", "cc7", "cc3", "icc3"
-	    );
-
-	return val;
+	return __atomic_sub_return(i, &v->counter);
 }
 
-#else
-
-extern int atomic_add_return(int i, atomic_t *v);
-extern int atomic_sub_return(int i, atomic_t *v);
-
-#endif
-
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
 	return atomic_add_return(i, v) < 0;
@@ -101,17 +72,14 @@
 
 static inline void atomic_inc(atomic_t *v)
 {
-	atomic_add_return(1, v);
+	atomic_inc_return(v);
 }
 
 static inline void atomic_dec(atomic_t *v)
 {
-	atomic_sub_return(1, v);
+	atomic_dec_return(v);
 }
 
-#define atomic_dec_return(v)		atomic_sub_return(1, (v))
-#define atomic_inc_return(v)		atomic_add_return(1, (v))
-
 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
 #define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
 #define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
@@ -120,18 +88,19 @@
  * 64-bit atomic ops
  */
 typedef struct {
-	volatile long long counter;
+	long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(i)	{ (i) }
 
-static inline long long atomic64_read(atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
 	long long counter;
 
 	asm("ldd%I1 %M1,%0"
 	    : "=e"(counter)
 	    : "m"(v->counter));
+
 	return counter;
 }
 
@@ -142,10 +111,25 @@
 		     : "e"(i));
 }
 
-extern long long atomic64_inc_return(atomic64_t *v);
-extern long long atomic64_dec_return(atomic64_t *v);
-extern long long atomic64_add_return(long long i, atomic64_t *v);
-extern long long atomic64_sub_return(long long i, atomic64_t *v);
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+	return __atomic64_add_return(1, &v->counter);
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+	return __atomic64_sub_return(1, &v->counter);
+}
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+	return __atomic64_add_return(i, &v->counter);
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+	return __atomic64_sub_return(i, &v->counter);
+}
 
 static inline long long atomic64_add_negative(long long i, atomic64_t *v)
 {
@@ -176,6 +160,7 @@
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)	(atomic64_inc_return((v)) == 0)
 
+
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)		(xchg(&(v)->counter, new))
 #define atomic64_cmpxchg(v, old, new)	(__cmpxchg_64(old, new, &(v)->counter))
@@ -196,5 +181,21 @@
 	return c;
 }
 
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	(void)__atomic32_fetch_##op(i, &v->counter);			\
+}									\
+									\
+static inline void atomic64_##op(long long i, atomic64_t *v)		\
+{									\
+	(void)__atomic64_fetch_##op(i, &v->counter);			\
+}
+
+ATOMIC_OP(or)
+ATOMIC_OP(and)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
 
 #endif /* _ASM_ATOMIC_H */
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h
new file mode 100644
index 0000000..36e126d
--- /dev/null
+++ b/arch/frv/include/asm/atomic_defs.h
@@ -0,0 +1,172 @@
+
+#include <asm/spr-regs.h>
+
+#ifdef __ATOMIC_LIB__
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_QUALS
+#define ATOMIC_EXPORT(x)	EXPORT_SYMBOL(x)
+
+#else /* !OUTOFLINE && LIB */
+
+#define ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)
+
+#endif /* OUTOFLINE */
+
+#else /* !__ATOMIC_LIB__ */
+
+#define ATOMIC_EXPORT(x)
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_OP_RETURN(op)						\
+extern int __atomic_##op##_return(int i, int *v);			\
+extern long long __atomic64_##op##_return(long long i, long long *v);
+
+#define ATOMIC_FETCH_OP(op)						\
+extern int __atomic32_fetch_##op(int i, int *v);			\
+extern long long __atomic64_fetch_##op(long long i, long long *v);
+
+#else /* !OUTOFLINE && !LIB */
+
+#define ATOMIC_QUALS	static inline
+
+#endif /* OUTOFLINE */
+#endif /* __ATOMIC_LIB__ */
+
+
+/*
+ * Note on the 64 bit inline asm variants...
+ *
+ * CSTD is a conditional instruction and needs a constrained memory reference.
+ * Normally 'U' provides the correct constraints for conditional instructions
+ * and this is used for the 32 bit version, however 'U' does not appear to work
+ * for 64 bit values (gcc-4.9)
+ *
+ * The exact constraint is that conditional instructions cannot deal with an
+ * immediate displacement in the memory reference, so what we do is we read the
+ * address through a volatile cast into a local variable in order to insure we
+ * _have_ to compute the correct address without displacement. This allows us
+ * to use the regular 'm' for the memory address.
+ *
+ * Furthermore, the %Ln operand, which prints the low word register (r+1),
+ * really only works for registers, this means we cannot allow immediate values
+ * for the 64 bit versions -- like we do for the 32 bit ones.
+ *
+ */
+
+#ifndef ATOMIC_OP_RETURN
+#define ATOMIC_OP_RETURN(op)						\
+ATOMIC_QUALS int __atomic_##op##_return(int i, int *v)			\
+{									\
+	int val;							\
+									\
+	asm volatile(							\
+	    "0:						\n"		\
+	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
+	    "	ckeq		icc3,cc7		\n"		\
+	    "	ld.p		%M0,%1			\n"		\
+	    "	orcr		cc7,cc7,cc3		\n"		\
+	    "   "#op"%I2	%1,%2,%1		\n"		\
+	    "	cst.p		%1,%M0		,cc3,#1	\n"		\
+	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
+	    "	beq		icc3,#0,0b		\n"		\
+	    : "+U"(*v), "=&r"(val)					\
+	    : "NPr"(i)							\
+	    : "memory", "cc7", "cc3", "icc3"				\
+	    );								\
+									\
+	return val;							\
+}									\
+ATOMIC_EXPORT(__atomic_##op##_return);					\
+									\
+ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v)	\
+{									\
+	long long *__v = READ_ONCE(v);					\
+	long long val;							\
+									\
+	asm volatile(							\
+	    "0:						\n"		\
+	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
+	    "	ckeq		icc3,cc7		\n"		\
+	    "	ldd.p		%M0,%1			\n"		\
+	    "	orcr		cc7,cc7,cc3		\n"		\
+	    "   "#op"cc		%L1,%L2,%L1,icc0	\n"		\
+	    "   "#op"x		%1,%2,%1,icc0		\n"		\
+	    "	cstd.p		%1,%M0		,cc3,#1	\n"		\
+	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
+	    "	beq		icc3,#0,0b		\n"		\
+	    : "+m"(*__v), "=&e"(val)					\
+	    : "e"(i)							\
+	    : "memory", "cc7", "cc3", "icc0", "icc3"			\
+	    );								\
+									\
+	return val;							\
+}									\
+ATOMIC_EXPORT(__atomic64_##op##_return);
+#endif
+
+#ifndef ATOMIC_FETCH_OP
+#define ATOMIC_FETCH_OP(op)						\
+ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v)			\
+{									\
+	int old, tmp;							\
+									\
+	asm volatile(							\
+		"0:						\n"	\
+		"	orcc		gr0,gr0,gr0,icc3	\n"	\
+		"	ckeq		icc3,cc7		\n"	\
+		"	ld.p		%M0,%1			\n"	\
+		"	orcr		cc7,cc7,cc3		\n"	\
+		"	"#op"%I3	%1,%3,%2		\n"	\
+		"	cst.p		%2,%M0		,cc3,#1	\n"	\
+		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
+		"	beq		icc3,#0,0b		\n"	\
+		: "+U"(*v), "=&r"(old), "=r"(tmp)			\
+		: "NPr"(i)						\
+		: "memory", "cc7", "cc3", "icc3"			\
+		);							\
+									\
+	return old;							\
+}									\
+ATOMIC_EXPORT(__atomic32_fetch_##op);					\
+									\
+ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v)	\
+{									\
+	long long *__v = READ_ONCE(v);					\
+	long long old, tmp;						\
+									\
+	asm volatile(							\
+		"0:						\n"	\
+		"	orcc		gr0,gr0,gr0,icc3	\n"	\
+		"	ckeq		icc3,cc7		\n"	\
+		"	ldd.p		%M0,%1			\n"	\
+		"	orcr		cc7,cc7,cc3		\n"	\
+		"	"#op"		%L1,%L3,%L2		\n"	\
+		"	"#op"		%1,%3,%2		\n"	\
+		"	cstd.p		%2,%M0		,cc3,#1	\n"	\
+		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
+		"	beq		icc3,#0,0b		\n"	\
+		: "+m"(*__v), "=&e"(old), "=e"(tmp)			\
+		: "e"(i)						\
+		: "memory", "cc7", "cc3", "icc3"			\
+		);							\
+									\
+	return old;							\
+}									\
+ATOMIC_EXPORT(__atomic64_fetch_##op);
+#endif
+
+ATOMIC_FETCH_OP(or)
+ATOMIC_FETCH_OP(and)
+ATOMIC_FETCH_OP(xor)
+
+ATOMIC_OP_RETURN(add)
+ATOMIC_OP_RETURN(sub)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_QUALS
+#undef ATOMIC_EXPORT
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 96de220..0df8e95 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -25,109 +25,30 @@
 
 #include <asm-generic/bitops/ffz.h>
 
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-static inline
-unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	and%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(~mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-static inline
-unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	or%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-static inline
-unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	xor%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-#else
-
-extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-
-#endif
-
-#define atomic_clear_mask(mask, v)	atomic_test_and_ANDNOT_mask((mask), (v))
-#define atomic_set_mask(mask, v)	atomic_test_and_OR_mask((mask), (v))
+#include <asm/atomic.h>
 
 static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
 }
 
 static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
 }
 
 static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
 }
 
 static inline void clear_bit(unsigned long nr, volatile void *addr)
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
index 156184e..370dc9f 100644
--- a/arch/frv/kernel/dma.c
+++ b/arch/frv/kernel/dma.c
@@ -109,13 +109,13 @@
 
 static DEFINE_RWLOCK(frv_dma_channels_lock);
 
-unsigned long frv_dma_inprogress;
+unsigned int frv_dma_inprogress;
 
 #define frv_clear_dma_inprogress(channel) \
-	atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
+	(void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
 
 #define frv_set_dma_inprogress(channel) \
-	atomic_set_mask(1 << (channel), &frv_dma_inprogress);
+	(void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
 
 /*****************************************************************************/
 /*
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 86c516d..cdb4ce9 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -58,11 +58,6 @@
 EXPORT_SYMBOL(__insl_ns);
 
 #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
-EXPORT_SYMBOL(atomic_test_and_OR_mask);
-EXPORT_SYMBOL(atomic_test_and_XOR_mask);
-EXPORT_SYMBOL(atomic_add_return);
-EXPORT_SYMBOL(atomic_sub_return);
 EXPORT_SYMBOL(__xchg_32);
 EXPORT_SYMBOL(__cmpxchg_32);
 #endif
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 4ff2fb1..970e8b4 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
 lib-y := \
 	__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
 	checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
-	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
diff --git a/arch/frv/lib/atomic-lib.c b/arch/frv/lib/atomic-lib.c
new file mode 100644
index 0000000..4d1b887
--- /dev/null
+++ b/arch/frv/lib/atomic-lib.c
@@ -0,0 +1,7 @@
+
+#include <linux/export.h>
+#include <asm/atomic.h>
+
+#define __ATOMIC_LIB__
+
+#include <asm/atomic_defs.h>
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index 5e9e6ab..b7439a9 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -19,116 +19,6 @@
 
 ###############################################################################
 #
-# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_ANDNOT_mask
-        .type		atomic_test_and_ANDNOT_mask,@function
-atomic_test_and_ANDNOT_mask:
-	not.p		gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	and		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_OR_mask
-        .type		atomic_test_and_OR_mask,@function
-atomic_test_and_OR_mask:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	or		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_XOR_mask
-        .type		atomic_test_and_XOR_mask,@function
-atomic_test_and_XOR_mask:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	xor		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
-
-###############################################################################
-#
-# int atomic_add_return(int i, atomic_t *v)
-#
-###############################################################################
-	.globl		atomic_add_return
-        .type		atomic_add_return,@function
-atomic_add_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	add		gr8,gr10,gr8
-	cst.p		gr8,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_add_return, .-atomic_add_return
-
-###############################################################################
-#
-# int atomic_sub_return(int i, atomic_t *v)
-#
-###############################################################################
-	.globl		atomic_sub_return
-        .type		atomic_sub_return,@function
-atomic_sub_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	sub		gr8,gr10,gr8
-	cst.p		gr8,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_sub_return, .-atomic_sub_return
-
-###############################################################################
-#
 # uint32_t __xchg_32(uint32_t i, uint32_t *v)
 #
 ###############################################################################
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
index b6194ee..c4c4723 100644
--- a/arch/frv/lib/atomic64-ops.S
+++ b/arch/frv/lib/atomic64-ops.S
@@ -20,100 +20,6 @@
 
 ###############################################################################
 #
-# long long atomic64_inc_return(atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_inc_return
-        .type		atomic64_inc_return,@function
-atomic64_inc_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	addicc		gr9,#1,gr9,icc0
-	addxi		gr8,#0,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_inc_return, .-atomic64_inc_return
-
-###############################################################################
-#
-# long long atomic64_dec_return(atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_dec_return
-        .type		atomic64_dec_return,@function
-atomic64_dec_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	subicc		gr9,#1,gr9,icc0
-	subxi		gr8,#0,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_dec_return, .-atomic64_dec_return
-
-###############################################################################
-#
-# long long atomic64_add_return(long long i, atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_add_return
-        .type		atomic64_add_return,@function
-atomic64_add_return:
-	or.p		gr8,gr8,gr4
-	or		gr9,gr9,gr5
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	addcc		gr9,gr5,gr9,icc0
-	addx		gr8,gr4,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_add_return, .-atomic64_add_return
-
-###############################################################################
-#
-# long long atomic64_sub_return(long long i, atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_sub_return
-        .type		atomic64_sub_return,@function
-atomic64_sub_return:
-	or.p		gr8,gr8,gr4
-	or		gr9,gr9,gr5
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	subcc		gr9,gr5,gr9,icc0
-	subx		gr8,gr4,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_sub_return, .-atomic64_sub_return
-
-###############################################################################
-#
 # uint64_t __xchg_64(uint64_t i, uint64_t *v)
 #
 ###############################################################################
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 7ca73f8..702ee53 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -16,83 +16,52 @@
 
 #include <linux/kernel.h>
 
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	ret = v->counter += i;
-	arch_local_irq_restore(flags);
-	return ret;
+#define ATOMIC_OP_RETURN(op, c_op)				\
+static inline int atomic_##op##_return(int i, atomic_t *v)	\
+{								\
+	h8300flags flags;					\
+	int ret;						\
+								\
+	flags = arch_local_irq_save();				\
+	ret = v->counter c_op i;				\
+	arch_local_irq_restore(flags);				\
+	return ret;						\
 }
 
-#define atomic_add(i, v) atomic_add_return(i, v)
+#define ATOMIC_OP(op, c_op)					\
+static inline void atomic_##op(int i, atomic_t *v)		\
+{								\
+	h8300flags flags;					\
+								\
+	flags = arch_local_irq_save();				\
+	v->counter c_op i;					\
+	arch_local_irq_restore(flags);				\
+}
+
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP_RETURN(sub, -=)
+
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or,  |=)
+ATOMIC_OP(xor, ^=)
+
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_add(i, v)		(void)atomic_add_return(i, v)
 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
 
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
+#define atomic_sub(i, v)		(void)atomic_sub_return(i, v)
+#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
 
-	flags = arch_local_irq_save();
-	ret = v->counter -= i;
-	arch_local_irq_restore(flags);
-	return ret;
-}
+#define atomic_inc_return(v)		atomic_add_return(1, v)
+#define atomic_dec_return(v)		atomic_sub_return(1, v)
 
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define atomic_inc(v)			(void)atomic_inc_return(v)
+#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
 
-static inline int atomic_inc_return(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	v->counter++;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret;
-}
-
-#define atomic_inc(v) atomic_inc_return(v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static inline int atomic_dec_return(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	--v->counter;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret;
-}
-
-#define atomic_dec(v) atomic_dec_return(v)
-
-static inline int atomic_dec_and_test(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	--v->counter;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret == 0;
-}
+#define atomic_dec(v)			(void)atomic_dec_return(v)
+#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
 
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
@@ -120,40 +89,4 @@
 	return ret;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-	unsigned char ccr;
-	unsigned long tmp;
-
-	__asm__ __volatile__("stc ccr,%w3\n\t"
-			     "orc #0x80,ccr\n\t"
-			     "mov.l %0,%1\n\t"
-			     "and.l %2,%1\n\t"
-			     "mov.l %1,%0\n\t"
-			     "ldc %w3,ccr"
-			     : "=m"(*v), "=r"(tmp)
-			     : "g"(~(mask)), "r"(ccr));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-	unsigned char ccr;
-	unsigned long tmp;
-
-	__asm__ __volatile__("stc ccr,%w3\n\t"
-			     "orc #0x80,ccr\n\t"
-			     "mov.l %0,%1\n\t"
-			     "or.l %2,%1\n\t"
-			     "mov.l %1,%0\n\t"
-			     "ldc %w3,ccr"
-			     : "=m"(*v), "=r"(tmp)
-			     : "g"(~(mask)), "r"(ccr));
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
 #endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 93d0702..811d61f 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -132,6 +132,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 344387a..a6d6190 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1140,13 +1140,9 @@
 
 #ifdef CONFIG_NUMA
 	{
-		int node = ioc->node;
 		struct page *page;
 
-		if (node == NUMA_NO_NODE)
-			node = numa_node_id();
-
-		page = alloc_pages_exact_node(node, flags, get_order(size));
+		page = alloc_pages_node(ioc->node, flags, get_order(size));
 		if (unlikely(!page))
 			return NULL;
 
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0bf0350..be4beeb 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -45,8 +45,6 @@
 ATOMIC_OP(add, +)
 ATOMIC_OP(sub, -)
 
-#undef ATOMIC_OP
-
 #define atomic_add_return(i,v)						\
 ({									\
 	int __ia64_aar_i = (i);						\
@@ -71,6 +69,16 @@
 		: ia64_atomic_sub(__ia64_asr_i, v);			\
 })
 
+ATOMIC_OP(and, &)
+ATOMIC_OP(or, |)
+ATOMIC_OP(xor, ^)
+
+#define atomic_and(i,v)	(void)ia64_atomic_and(i,v)
+#define atomic_or(i,v)	(void)ia64_atomic_or(i,v)
+#define atomic_xor(i,v)	(void)ia64_atomic_xor(i,v)
+
+#undef ATOMIC_OP
+
 #define ATOMIC64_OP(op, c_op)						\
 static __inline__ long							\
 ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
@@ -89,8 +97,6 @@
 ATOMIC64_OP(add, +)
 ATOMIC64_OP(sub, -)
 
-#undef ATOMIC64_OP
-
 #define atomic64_add_return(i,v)					\
 ({									\
 	long __ia64_aar_i = (i);					\
@@ -115,6 +121,16 @@
 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
 })
 
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#define atomic64_and(i,v)	(void)ia64_atomic64_and(i,v)
+#define atomic64_or(i,v)	(void)ia64_atomic64_or(i,v)
+#define atomic64_xor(i,v)	(void)ia64_atomic64_xor(i,v)
+
+#undef ATOMIC64_OP
+
 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 843ba43..df896a1 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -66,12 +66,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 80a7e34..9041bbe 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -435,6 +435,7 @@
 {
 	return ioremap(phys_addr, size);
 }
+#define ioremap_cache ioremap_cache
 
 
 /*
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 4826ff9..5fa3848 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -4,7 +4,7 @@
 #include <linux/errno.h>
 #include <linux/timex.h>
 #include <linux/clocksource.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* IBM Summit (EXA) Cyclone counter code*/
 #define CYCLONE_CBAR_ADDR 0xFEB00CD0
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 20e8a9b..f3976da 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -97,7 +97,7 @@
 
 	/* attempt to allocate a granule's worth of cached memory pages */
 
-	page = alloc_pages_exact_node(nid,
+	page = __alloc_pages_node(nid,
 				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
 				IA64_GRANULE_SHIFT-PAGE_SHIFT);
 	if (!page) {
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 97e48b0..1841ef6 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -645,7 +645,7 @@
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 {
 	pg_data_t *pgdat;
 	struct zone *zone;
@@ -656,7 +656,7 @@
 	pgdat = NODE_DATA(nid);
 
 	zone = pgdat->node_zones +
-		zone_for_memory(nid, start, size, ZONE_NORMAL);
+		zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
 	ret = __add_pages(nid, zone, start_pfn, nr_pages);
 
 	if (ret)
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index d0853e8..8f59907 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -92,7 +92,7 @@
 	 */
 	node = pcibus_to_node(pdev->bus);
 	if (likely(node >=0)) {
-		struct page *p = alloc_pages_exact_node(node,
+		struct page *p = __alloc_pages_node(node,
 						flags, get_order(size));
 
 		if (likely(p))
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 31bb74a..025e2a1 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -94,6 +94,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -239,45 +243,4 @@
 	return c;
 }
 
-
-static __inline__ void atomic_clear_mask(unsigned long  mask, atomic_t *addr)
-{
-	unsigned long flags;
-	unsigned long tmp;
-
-	local_irq_save(flags);
-	__asm__ __volatile__ (
-		"# atomic_clear_mask		\n\t"
-		DCACHE_CLEAR("%0", "r5", "%1")
-		M32R_LOCK" %0, @%1;		\n\t"
-		"and	%0, %2;			\n\t"
-		M32R_UNLOCK" %0, @%1;		\n\t"
-		: "=&r" (tmp)
-		: "r" (addr), "r" (~mask)
-		: "memory"
-		__ATOMIC_CLOBBER
-	);
-	local_irq_restore(flags);
-}
-
-static __inline__ void atomic_set_mask(unsigned long  mask, atomic_t *addr)
-{
-	unsigned long flags;
-	unsigned long tmp;
-
-	local_irq_save(flags);
-	__asm__ __volatile__ (
-		"# atomic_set_mask		\n\t"
-		DCACHE_CLEAR("%0", "r5", "%1")
-		M32R_LOCK" %0, @%1;		\n\t"
-		"or	%0, %2;			\n\t"
-		M32R_UNLOCK" %0, @%1;		\n\t"
-		: "=&r" (tmp)
-		: "r" (addr), "r" (mask)
-		: "memory"
-		__ATOMIC_CLOBBER
-	);
-	local_irq_restore(flags);
-}
-
 #endif	/* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c18ddc7..62d6961 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@
 	cpumask_clear_cpu(smp_processor_id(), &cpumask);
 	spin_lock(&flushcache_lock);
 	mask=cpumask_bits(&cpumask);
-	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
+	atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
 	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
 	_flush_cache_copyback_all();
 	while (flushcache_cpumask)
@@ -407,7 +407,7 @@
 	flush_vma = vma;
 	flush_va = va;
 	mask=cpumask_bits(&cpumask);
-	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
+	atomic_or(*mask, (atomic_t *)&flush_cpumask);
 
 	/*
 	 * We have to send the IPI only to
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index 075aaab..f7836c6 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -25,6 +25,7 @@
 #include <asm/m54xxgpt.h>
 #ifdef CONFIG_MMU
 #include <asm/mmu_context.h>
+#include <linux/pfn.h>
 #endif
 
 /***************************************************************************/
@@ -91,13 +92,13 @@
 	m68k_memory[0].size = _ramend - _rambase;
 
 	/* compute total pages in system */
-	num_pages = (_ramend - _rambase) >> PAGE_SHIFT;
+	num_pages = PFN_DOWN(_ramend - _rambase);
 
 	/* page numbers */
 	memstart = PAGE_ALIGN(_ramstart);
-	min_low_pfn = _rambase >> PAGE_SHIFT;
-	start_pfn = memstart >> PAGE_SHIFT;
-	max_low_pfn = _ramend >> PAGE_SHIFT;
+	min_low_pfn = PFN_DOWN(_rambase);
+	start_pfn = PFN_DOWN(memstart);
+	max_low_pfn = PFN_DOWN(_ramend);
 	high_memory = (void *)_ramend;
 
 	m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
diff --git a/arch/m68k/coldfire/pit.c b/arch/m68k/coldfire/pit.c
index 493b311..d86a9ff 100644
--- a/arch/m68k/coldfire/pit.c
+++ b/arch/m68k/coldfire/pit.c
@@ -42,37 +42,28 @@
  * This is also called after resume to bring the PIT into operation again.
  */
 
-static void init_cf_pit_timer(enum clock_event_mode mode,
-                             struct clock_event_device *evt)
+static int cf_pit_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
+	__raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
+	__raw_writew(PIT_CYCLES_PER_JIFFY, TA(MCFPIT_PMR));
+	__raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE |
+		     MCFPIT_PCSR_OVW | MCFPIT_PCSR_RLD |
+		     MCFPIT_PCSR_CLK64, TA(MCFPIT_PCSR));
+	return 0;
+}
 
-		__raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
-		__raw_writew(PIT_CYCLES_PER_JIFFY, TA(MCFPIT_PMR));
-		__raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE | \
-				MCFPIT_PCSR_OVW | MCFPIT_PCSR_RLD | \
-				MCFPIT_PCSR_CLK64, TA(MCFPIT_PCSR));
-		break;
+static int cf_pit_set_oneshot(struct clock_event_device *evt)
+{
+	__raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
+	__raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE |
+		     MCFPIT_PCSR_OVW | MCFPIT_PCSR_CLK64, TA(MCFPIT_PCSR));
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-
-		__raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-
-		__raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
-		__raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE | \
-				MCFPIT_PCSR_OVW | MCFPIT_PCSR_CLK64, \
-				TA(MCFPIT_PCSR));
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here */
-		break;
-	}
+static int cf_pit_shutdown(struct clock_event_device *evt)
+{
+	__raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
+	return 0;
 }
 
 /*
@@ -88,12 +79,15 @@
 }
 
 struct clock_event_device cf_pit_clockevent = {
-	.name		= "pit",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= init_cf_pit_timer,
-	.set_next_event	= cf_pit_next_event,
-	.shift		= 32,
-	.irq		= MCF_IRQ_PIT1,
+	.name			= "pit",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= cf_pit_shutdown,
+	.set_state_periodic	= cf_pit_set_periodic,
+	.set_state_oneshot	= cf_pit_set_oneshot,
+	.set_next_event		= cf_pit_next_event,
+	.shift			= 32,
+	.irq			= MCF_IRQ_PIT1,
 };
 
 
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e85f047..039fac1 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -77,6 +77,10 @@
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, eor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -170,16 +174,6 @@
 	return c != 0;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-	__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-	__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
-}
-
 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int c, old;
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 948d868..21c4c26 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -74,44 +74,14 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	int temp;
-
-	asm volatile (
-		"1:	LNKGETD %0, [%1]\n"
-		"	AND	%0, %0, %2\n"
-		"	LNKSETD	[%1] %0\n"
-		"	DEFR	%0, TXSTAT\n"
-		"	ANDT	%0, %0, #HI(0x3f000000)\n"
-		"	CMPT	%0, #HI(0x02000000)\n"
-		"	BNZ	1b\n"
-		: "=&d" (temp)
-		: "da" (&v->counter), "bd" (~mask)
-		: "cc");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	int temp;
-
-	asm volatile (
-		"1:	LNKGETD %0, [%1]\n"
-		"	OR	%0, %0, %2\n"
-		"	LNKSETD	[%1], %0\n"
-		"	DEFR	%0, TXSTAT\n"
-		"	ANDT	%0, %0, #HI(0x3f000000)\n"
-		"	CMPT	%0, #HI(0x02000000)\n"
-		"	BNZ	1b\n"
-		: "=&d" (temp)
-		: "da" (&v->counter), "bd" (mask)
-		: "cc");
-}
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 	int result, temp;
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index f5d5898..f8efe38 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -68,31 +68,14 @@
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	__global_lock1(flags);
-	fence();
-	v->counter &= ~mask;
-	__global_unlock1(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	__global_lock1(flags);
-	fence();
-	v->counter |= mask;
-	__global_unlock1(flags);
-}
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 	int ret;
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 5a696e5..172b7e5 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -90,12 +90,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/metag/include/asm/ftrace.h b/arch/metag/include/asm/ftrace.h
index 2901f0f..a2269d6 100644
--- a/arch/metag/include/asm/ftrace.h
+++ b/arch/metag/include/asm/ftrace.h
@@ -6,7 +6,7 @@
 
 #ifndef __ASSEMBLY__
 extern void mcount_wrapper(void);
-#define MCOUNT_ADDR		((long)(mcount_wrapper))
+#define MCOUNT_ADDR		((unsigned long)(mcount_wrapper))
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
index fd2fa2ec..da0144f 100644
--- a/arch/microblaze/include/asm/ftrace.h
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -3,7 +3,7 @@
 
 #ifdef CONFIG_FUNCTION_TRACER
 
-#define MCOUNT_ADDR		((long)(_mcount))
+#define MCOUNT_ADDR		((unsigned long)(_mcount))
 #define MCOUNT_INSN_SIZE	8 /* sizeof mcount call */
 
 #ifndef __ASSEMBLY__
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4ab9a79..752acca 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1,8 +1,10 @@
 config MIPS
 	bool
 	default y
+	select ARCH_SUPPORTS_UPROBES
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
+	select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_IDE
@@ -13,7 +15,6 @@
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_BPF_JIT if !CPU_MICROMIPS
-	select ARCH_HAVE_CUSTOM_GPIO_H
 	select HAVE_FUNCTION_TRACER
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
@@ -409,6 +410,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select CLKSRC_MIPS_GIC
+	select COMMON_CLK
 	select DMA_MAYBE_COHERENT
 	select GENERIC_ISA_DMA
 	select HAVE_PCSPKR_PLATFORM
@@ -459,6 +461,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select CLKSRC_MIPS_GIC
+	select COMMON_CLK
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select DMA_NONCOHERENT
@@ -899,6 +902,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select ARCH_PHYS_ADDR_T_64BIT
+	select ARCH_REQUIRE_GPIOLIB
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_HIGHMEM
@@ -948,6 +952,7 @@
 source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
 source "arch/mips/lasat/Kconfig"
+source "arch/mips/pistachio/Kconfig"
 source "arch/mips/pmcs-msp71xx/Kconfig"
 source "arch/mips/ralink/Kconfig"
 source "arch/mips/sgi-ip27/Kconfig"
@@ -1041,6 +1046,9 @@
 config ARCH_DMA_ADDR_T_64BIT
 	def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT
 
+config ARCH_SUPPORTS_UPROBES
+	bool
+
 config DMA_MAYBE_COHERENT
 	select DMA_NONCOHERENT
 	bool
@@ -1364,7 +1372,7 @@
 	  otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
 config CPU_MIPS32_R6
-	bool "MIPS32 Release 6 (EXPERIMENTAL)"
+	bool "MIPS32 Release 6"
 	depends on SYS_HAS_CPU_MIPS32_R6
 	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_32BIT_KERNEL
@@ -1415,7 +1423,7 @@
 	  otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
 config CPU_MIPS64_R6
-	bool "MIPS64 Release 6 (EXPERIMENTAL)"
+	bool "MIPS64 Release 6"
 	depends on SYS_HAS_CPU_MIPS64_R6
 	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_32BIT_KERNEL
@@ -1965,6 +1973,7 @@
 	select TRAD_SIGNALS
 	help
 	  Select this option if you want to build a 32-bit kernel.
+
 config 64BIT
 	bool "64-bit kernel"
 	depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
@@ -2110,7 +2119,7 @@
 
 config MIPS_MT_SMP
 	bool "MIPS MT SMP support (1 TC on each available VPE)"
-	depends on SYS_SUPPORTS_MULTITHREADING
+	depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select SYNC_R4K
@@ -2211,7 +2220,7 @@
 
 config MIPS_CMP
 	bool "MIPS CMP framework support (DEPRECATED)"
-	depends on SYS_SUPPORTS_MIPS_CMP
+	depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
 	select MIPS_GIC_IPI
 	select SMP
 	select SYNC_R4K
@@ -2228,7 +2237,7 @@
 
 config MIPS_CPS
 	bool "MIPS Coherent Processing System support"
-	depends on SYS_SUPPORTS_MIPS_CPS
+	depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
 	select MIPS_CM
 	select MIPS_CPC
 	select MIPS_CPS_PM if HOTPLUG_CPU
@@ -2303,7 +2312,7 @@
 endchoice
 
 config CPU_HAS_MSA
-	bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
+	bool "Support for the MIPS SIMD Architecture"
 	depends on CPU_SUPPORTS_MSA
 	depends on 64BIT || MIPS_O32_FP64_SUPPORT
 	help
@@ -2643,7 +2652,7 @@
 	  If unsure, say Y. Only embedded should say N here.
 
 config MIPS_O32_FP64_SUPPORT
-	bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
+	bool "Support for O32 binaries using 64-bit FP"
 	depends on 32BIT || MIPS32_O32
 	help
 	  When this is enabled, the kernel will support use of 64-bit floating
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 3a2b775..e250524 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -87,15 +87,6 @@
 	  Select compile flags that produce code that can be processed by the
 	  Corelis mksym utility and UDB Emulator.
 
-config RUNTIME_DEBUG
-	bool "Enable run-time debugging"
-	depends on DEBUG_KERNEL
-	help
-	  If you say Y here, some debugging macros will do run-time checking.
-	  If you say N here, those macros will mostly turn to no-ops.  See
-	  arch/mips/include/asm/debug.h for debugging macros.
-	  If unsure, say N.
-
 config DEBUG_ZBOOT
 	bool "Enable compressed kernel support debugging"
 	depends on DEBUG_KERNEL && SYS_SUPPORTS_ZBOOT
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index b962898..7fa2488 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -6,13 +6,6 @@
 config ALCHEMY_GPIOINT_AU1300
 	bool
 
-# select this in your board config if you don't want to use the gpio
-# namespace as documented in the manuals.  In this case however you need
-# to create the necessary gpio_* functions in your board code/headers!
-# see arch/mips/include/asm/mach-au1x00/gpio.h   for more information.
-config ALCHEMY_GPIO_INDIRECT
-	def_bool n
-
 choice
 	prompt "Machine type"
 	depends on MIPS_ALCHEMY
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index acf9a2a..79efe4c 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -34,6 +34,7 @@
 #include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <prom.h>
 
 const char *get_system_type(void)
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 1e3b102..85bb756 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -32,6 +32,7 @@
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1xxx_eth.h>
 #include <prom.h>
 
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
index f64744f..23800b8 100644
--- a/arch/mips/alchemy/common/Makefile
+++ b/arch/mips/alchemy/common/Makefile
@@ -5,10 +5,5 @@
 # Makefile for the Alchemy Au1xx0 CPUs, generic files.
 #
 
-obj-y += prom.o time.o clock.o platform.o power.o \
+obj-y += prom.o time.o clock.o platform.o power.o gpiolib.o \
 	 setup.o sleeper.o dma.o dbdma.o vss.o irq.o usb.o
-
-# optional gpiolib support
-ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),)
- obj-$(CONFIG_GPIOLIB) += gpiolib.o
-endif
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 6cb60ab..4c496c5 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -491,7 +491,7 @@
 	default:
 		ret = -EINVAL;
 	}
-	__irq_set_chip_handler_name_locked(d->irq, chip, handler, name);
+	irq_set_chip_handler_name_locked(d, chip, handler, name);
 
 	wmb();
 
@@ -703,7 +703,7 @@
 		return -EINVAL;
 	}
 
-	__irq_set_chip_handler_name_locked(d->irq, &au1300_gpic, hdl, name);
+	irq_set_chip_handler_name_locked(d, &au1300_gpic, hdl, name);
 
 	au1300_gpic_chgcfg(d->irq - ALCHEMY_GPIC_INT_BASE, GPIC_CFG_IC_MASK, s);
 
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 50e17e1..f99d3ec 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -69,11 +69,6 @@
 	return 0;
 }
 
-static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *cd)
-{
-}
-
 static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
 {
 	struct clock_event_device *cd = dev_id;
@@ -86,7 +81,6 @@
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.rating		= 1500,
 	.set_next_event = au1x_rtcmatch2_set_next_event,
-	.set_mode	= au1x_rtcmatch2_set_mode,
 	.cpumask	= cpu_all_mask,
 };
 
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index c98c9ea..324ad72 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -8,6 +8,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/irq.h>
@@ -88,10 +89,11 @@
 static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
 {
 	unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
+	struct irq_chip *chip = irq_desc_get_chip(d);
 
-	disable_irq_nosync(irq);
+	chained_irq_enter(chip, d);
 	generic_handle_irq(bcsr_csc_base + __ffs(bisr));
-	enable_irq(irq);
+	chained_irq_exit(chip, d);
 }
 
 static void bcsr_irq_mask(struct irq_data *d)
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 001102e..bdeed9d 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -33,6 +33,7 @@
 #include <linux/spi/spi_gpio.h>
 #include <linux/spi/ads7846.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1000_dma.h>
 #include <asm/mach-au1x00/au1100_mmc.h>
 #include <asm/mach-db1x00/bcsr.h>
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index 1c64fdb..b580770 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -24,6 +24,7 @@
 #include <linux/wm97xx.h>
 
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
 #include <asm/mach-au1x00/au1100_mmc.h>
 #include <asm/mach-au1x00/au1200fb.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 0fd5177..5740bcf 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -20,6 +20,7 @@
 #include <linux/spi/flash.h>
 #include <asm/bootinfo.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1xxx_eth.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
 #include <asm/mach-au1x00/au1xxx_psc.h>
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index bfeb8f3..93024dc 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -9,7 +9,7 @@
 #include <linux/suspend.h>
 #include <linux/sysfs.h>
 #include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/gpio.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-db1x00/bcsr.h>
 
 /*
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index d8dbd8f..f493045 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -21,7 +21,10 @@
 #include <linux/module.h>
 #include <linux/gpio.h>
 
-#include <asm/mach-ar7/gpio.h>
+#include <asm/mach-ar7/ar7.h>
+
+#define AR7_GPIO_MAX 32
+#define TITAN_GPIO_MAX 51
 
 struct ar7_gpio_chip {
 	void __iomem		*regs;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 298b977..58fca9a 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -39,7 +39,6 @@
 
 #include <asm/addrspace.h>
 #include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/gpio.h>
 #include <asm/mach-ar7/prom.h>
 
 /*****************************************************************************
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
index 820b7a3..7bb9a67 100644
--- a/arch/mips/ar7/setup.c
+++ b/arch/mips/ar7/setup.c
@@ -23,7 +23,6 @@
 #include <asm/reboot.h>
 #include <asm/mach-ar7/ar7.h>
 #include <asm/mach-ar7/prom.h>
-#include <asm/mach-ar7/gpio.h>
 
 static void ar7_machine_restart(char *command)
 {
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 5c9ff69..fcc382c 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -8,7 +8,7 @@
 # under the terms of the GNU General Public License version 2 as published
 # by the Free Software Foundation.
 
-obj-y	:= prom.o setup.o irq.o common.o clock.o gpio.o
+obj-y	:= prom.o setup.o irq.o common.o clock.o
 
 obj-$(CONFIG_EARLY_PRINTK)		+= early_printk.o
 obj-$(CONFIG_PCI)			+= pci.o
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
index e5ea712..ca7cc19 100644
--- a/arch/mips/ath79/common.h
+++ b/arch/mips/ath79/common.h
@@ -25,9 +25,6 @@
 void ath79_ddr_ctrl_init(void);
 void ath79_ddr_wb_flush(unsigned int reg);
 
-void ath79_gpio_function_enable(u32 mask);
-void ath79_gpio_function_disable(u32 mask);
-void ath79_gpio_function_setup(u32 set, u32 clear);
 void ath79_gpio_init(void);
 
 #endif /* __ATH79_COMMON_H */
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
deleted file mode 100644
index f59ccb2..0000000
--- a/arch/mips/ath79/gpio.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- *  Atheros AR71XX/AR724X/AR913X GPIO API support
- *
- *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
- *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/gpio.h>
-#include <linux/platform_data/gpio-ath79.h>
-#include <linux/of_device.h>
-
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ath79.h>
-#include "common.h"
-
-static void __iomem *ath79_gpio_base;
-static u32 ath79_gpio_count;
-static DEFINE_SPINLOCK(ath79_gpio_lock);
-
-static void __ath79_gpio_set_value(unsigned gpio, int value)
-{
-	void __iomem *base = ath79_gpio_base;
-
-	if (value)
-		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
-}
-
-static int __ath79_gpio_get_value(unsigned gpio)
-{
-	return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
-}
-
-static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
-{
-	return __ath79_gpio_get_value(offset);
-}
-
-static void ath79_gpio_set_value(struct gpio_chip *chip,
-				  unsigned offset, int value)
-{
-	__ath79_gpio_set_value(offset, value);
-}
-
-static int ath79_gpio_direction_input(struct gpio_chip *chip,
-				       unsigned offset)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ath79_gpio_direction_output(struct gpio_chip *chip,
-					unsigned offset, int value)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	if (value)
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-					int value)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	if (value)
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static struct gpio_chip ath79_gpio_chip = {
-	.label			= "ath79",
-	.get			= ath79_gpio_get_value,
-	.set			= ath79_gpio_set_value,
-	.direction_input	= ath79_gpio_direction_input,
-	.direction_output	= ath79_gpio_direction_output,
-	.base			= 0,
-};
-
-static void __iomem *ath79_gpio_get_function_reg(void)
-{
-	u32 reg = 0;
-
-	if (soc_is_ar71xx() ||
-	    soc_is_ar724x() ||
-	    soc_is_ar913x() ||
-	    soc_is_ar933x())
-		reg = AR71XX_GPIO_REG_FUNC;
-	else if (soc_is_ar934x())
-		reg = AR934X_GPIO_REG_FUNC;
-	else
-		BUG();
-
-	return ath79_gpio_base + reg;
-}
-
-void ath79_gpio_function_setup(u32 set, u32 clear)
-{
-	void __iomem *reg = ath79_gpio_get_function_reg();
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel((__raw_readl(reg) & ~clear) | set, reg);
-	/* flush write */
-	__raw_readl(reg);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-}
-
-void ath79_gpio_function_enable(u32 mask)
-{
-	ath79_gpio_function_setup(mask, 0);
-}
-
-void ath79_gpio_function_disable(u32 mask)
-{
-	ath79_gpio_function_setup(0, mask);
-}
-
-static const struct of_device_id ath79_gpio_of_match[] = {
-	{ .compatible = "qca,ar7100-gpio" },
-	{ .compatible = "qca,ar9340-gpio" },
-	{},
-};
-
-static int ath79_gpio_probe(struct platform_device *pdev)
-{
-	struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
-	struct device_node *np = pdev->dev.of_node;
-	struct resource *res;
-	bool oe_inverted;
-	int err;
-
-	if (np) {
-		err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
-		if (err) {
-			dev_err(&pdev->dev, "ngpios property is not valid\n");
-			return err;
-		}
-		if (ath79_gpio_count >= 32) {
-			dev_err(&pdev->dev, "ngpios must be less than 32\n");
-			return -EINVAL;
-		}
-		oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
-	} else if (pdata) {
-		ath79_gpio_count = pdata->ngpios;
-		oe_inverted = pdata->oe_inverted;
-	} else {
-		dev_err(&pdev->dev, "No DT node or platform data found\n");
-		return -EINVAL;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ath79_gpio_base = devm_ioremap_nocache(
-		&pdev->dev, res->start, resource_size(res));
-	if (!ath79_gpio_base)
-		return -ENOMEM;
-
-	ath79_gpio_chip.dev = &pdev->dev;
-	ath79_gpio_chip.ngpio = ath79_gpio_count;
-	if (oe_inverted) {
-		ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
-		ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
-	}
-
-	err = gpiochip_add(&ath79_gpio_chip);
-	if (err) {
-		dev_err(&pdev->dev,
-			"cannot add AR71xx GPIO chip, error=%d", err);
-		return err;
-	}
-
-	return 0;
-}
-
-static struct platform_driver ath79_gpio_driver = {
-	.driver = {
-		.name = "ath79-gpio",
-		.of_match_table	= ath79_gpio_of_match,
-	},
-	.probe = ath79_gpio_probe,
-};
-
-module_platform_driver(ath79_gpio_driver);
-
-int gpio_get_value(unsigned gpio)
-{
-	if (gpio < ath79_gpio_count)
-		return __ath79_gpio_get_value(gpio);
-
-	return __gpio_get_value(gpio);
-}
-EXPORT_SYMBOL(gpio_get_value);
-
-void gpio_set_value(unsigned gpio, int value)
-{
-	if (gpio < ath79_gpio_count)
-		__ath79_gpio_set_value(gpio, value);
-	else
-		__gpio_set_value(gpio, value);
-}
-EXPORT_SYMBOL(gpio_set_value);
-
-int gpio_to_irq(unsigned gpio)
-{
-	/* FIXME */
-	return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	/* FIXME */
-	return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 2021be2..807132b 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -123,8 +123,6 @@
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
 
 	if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
@@ -136,8 +134,6 @@
 	} else {
 		spurious_interrupt();
 	}
-
-	enable_irq(irq);
 }
 
 static void ar934x_ip2_irq_init(void)
@@ -156,14 +152,12 @@
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
 	status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
 
 	if (status == 0) {
 		spurious_interrupt();
-		goto enable;
+		return;
 	}
 
 	if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
@@ -175,17 +169,12 @@
 		/* TODO: flush DDR? */
 		generic_handle_irq(ATH79_IP2_IRQ(1));
 	}
-
-enable:
-	enable_irq(irq);
 }
 
 static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
 	status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
 		  QCA955X_EXT_INT_USB1 |
@@ -193,7 +182,7 @@
 
 	if (status == 0) {
 		spurious_interrupt();
-		goto enable;
+		return;
 	}
 
 	if (status & QCA955X_EXT_INT_USB1) {
@@ -210,9 +199,6 @@
 		/* TODO: flush DDR? */
 		generic_handle_irq(ATH79_IP3_IRQ(2));
 	}
-
-enable:
-	enable_irq(irq);
 }
 
 static void qca955x_irq_init(void)
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 08a4abf..52caa75 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -396,10 +396,9 @@
 {
 	size_t size = nbuttons * sizeof(*buttons);
 
-	bcm47xx_button_pdata.buttons = kmalloc(size, GFP_KERNEL);
+	bcm47xx_button_pdata.buttons = kmemdup(buttons, size, GFP_KERNEL);
 	if (!bcm47xx_button_pdata.buttons)
 		return -ENOMEM;
-	memcpy(bcm47xx_button_pdata.buttons, buttons, size);
 	bcm47xx_button_pdata.nbuttons = nbuttons;
 
 	return 0;
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index e3e808a..1a47ec2 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -60,7 +60,7 @@
 	if (m)
 		enable &= cpumask_test_cpu(cpu, m);
 	else if (irqd_affinity_was_set(d))
-		enable &= cpumask_test_cpu(cpu, d->affinity);
+		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
 #endif
 	return enable;
 }
@@ -365,9 +365,9 @@
 
 	irqd_set_trigger_type(d, flow_type);
 	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return IRQ_SET_MASK_OK_NOCOPY;
 }
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index dc91bde..d5bdee1 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -78,7 +78,7 @@
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
-quiet_cmd_zld = LD	$@
+quiet_cmd_zld = LD      $@
       cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
 quiet_cmd_strip = STRIP	  $@
       cmd_strip = $(STRIP) -s $@
diff --git a/arch/mips/boot/dts/netlogic/xlp_evp.dts b/arch/mips/boot/dts/netlogic/xlp_evp.dts
index 89ad048..ec16ec2d 100644
--- a/arch/mips/boot/dts/netlogic/xlp_evp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_evp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp832-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_fvp.dts b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
index 63e62b7..4bcebe6 100644
--- a/arch/mips/boot/dts/netlogic/xlp_fvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp208-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_gvp.dts b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
index bb4ecd1..b3ccb82 100644
--- a/arch/mips/boot/dts/netlogic/xlp_gvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
@@ -69,6 +69,17 @@
 			};
 		};
 
+		gpio: xlp_gpio@114100 {
+			compatible = "netlogic,xlp980-gpio";
+			reg = <0 0x114100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_rvp.dts b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
index 7188aed..3783639a 100644
--- a/arch/mips/boot/dts/netlogic/xlp_rvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
@@ -69,6 +69,17 @@
 			};
 		};
 
+		gpio: xlp_gpio@114100 {
+			compatible = "netlogic,xlp532-gpio";
+			reg = <0 0x114100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_svp.dts b/arch/mips/boot/dts/netlogic/xlp_svp.dts
index 1ebd00ed..44d6640 100644
--- a/arch/mips/boot/dts/netlogic/xlp_svp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_svp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp316-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 9eb0fee..36e30d6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -195,6 +195,12 @@
 			return 8;
 		else
 			return -1;
+	case CVMX_BOARD_TYPE_KONTRON_S1901:
+		if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+			return 1;
+		else
+			return -1;
+
 	}
 
 	/* Some unknown board. Somebody forgot to update this function... */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
index 453d7f6..b45b297 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -95,9 +95,9 @@
 	uint8_t *data_address;
 	uint8_t *end_of_data;
 
-	cvmx_dprintf("Packet Length:   %u\n", work->len);
-	cvmx_dprintf("	  Input Port:  %u\n", work->ipprt);
-	cvmx_dprintf("	  QoS:	       %u\n", work->qos);
+	cvmx_dprintf("Packet Length:   %u\n", work->word1.len);
+	cvmx_dprintf("	  Input Port:  %u\n", cvmx_wqe_get_port(work));
+	cvmx_dprintf("	  QoS:	       %u\n", cvmx_wqe_get_qos(work));
 	cvmx_dprintf("	  Buffers:     %u\n", work->word2.s.bufs);
 
 	if (work->word2.s.bufs == 0) {
@@ -127,7 +127,7 @@
 		}
 	} else
 		buffer_ptr = work->packet_ptr;
-	remaining_bytes = work->len;
+	remaining_bytes = work->word1.len;
 
 	while (remaining_bytes) {
 		start_of_buffer =
@@ -382,6 +382,10 @@
 		return port + 32;
 	case 3:
 		return port + 36;
+	case 4:
+		return port + 40;
+	case 5:
+		return port + 44;
 	}
 	return -1;
 }
@@ -404,6 +408,10 @@
 		return 2;
 	else if (ipd_port < 40)
 		return 3;
+	else if (ipd_port < 44)
+		return 4;
+	else if (ipd_port < 48)
+		return 5;
 	else
 		cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
 			     "port number\n");
@@ -428,6 +436,10 @@
 		return ipd_port & 3;
 	else if (ipd_port < 40)
 		return ipd_port & 3;
+	else if (ipd_port < 44)
+		return ipd_port & 3;
+	else if (ipd_port < 48)
+		return ipd_port & 3;
 	else
 		cvmx_dprintf("cvmx_helper_get_interface_index_num: "
 			     "Illegal IPD port number\n");
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 7653b7e..a56ee59 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -124,6 +124,13 @@
 	union cvmx_gmxx_tx_int_en gmx_tx_int_en;
 	union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
 
+	/* Setup PKND */
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+		gmx_cfg.s.pknd = cvmx_helper_get_ipd_port(interface, 0);
+		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+	}
+
 	/* (1) Interface has already been enabled. */
 
 	/* (2) Disable GMX. */
@@ -151,7 +158,12 @@
 	/* (4)c Aply reset sequence */
 	xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
 	xauiCtl.s.lo_pwr = 0;
-	xauiCtl.s.reset = 1;
+
+	/* Issuing a reset here seems to hang some CN68XX chips. */
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
+		xauiCtl.s.reset = 1;
+
 	cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
 
 	/* Wait for PCS to come out of reset */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 7e5cf7a..376701f 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -83,6 +83,8 @@
  */
 int cvmx_helper_get_number_of_interfaces(void)
 {
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return 9;
 	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
 		return 4;
 	else
@@ -656,6 +658,21 @@
 	fau_to.s.tout_val = 0xfff;
 	fau_to.s.tout_enb = 0;
 	cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_pko_reg_min_pkt min_pkt;
+
+		min_pkt.u64 = 0;
+		min_pkt.s.size1 = 59;
+		min_pkt.s.size2 = 59;
+		min_pkt.s.size3 = 59;
+		min_pkt.s.size4 = 59;
+		min_pkt.s.size5 = 59;
+		min_pkt.s.size6 = 59;
+		min_pkt.s.size7 = 59;
+		cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+	}
+
 	return 0;
 }
 
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 008b881..87be167 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -39,6 +39,143 @@
  * Internal state of packet output
  */
 
+static int __cvmx_pko_int(int interface, int index)
+{
+	switch (interface) {
+	case 0:
+		return index;
+	case 1:
+		return 4;
+	case 2:
+		return index + 0x08;
+	case 3:
+		return index + 0x0c;
+	case 4:
+		return index + 0x10;
+	case 5:
+		return 0x1c;
+	case 6:
+		return 0x1d;
+	case 7:
+		return 0x1e;
+	case 8:
+		return 0x1f;
+	default:
+		return -1;
+	}
+}
+
+static void __cvmx_pko_iport_config(int pko_port)
+{
+	int queue;
+	const int num_queues = 1;
+	const int base_queue = pko_port;
+	const int static_priority_end = 1;
+	const int static_priority_base = 1;
+
+	for (queue = 0; queue < num_queues; queue++) {
+		union cvmx_pko_mem_iqueue_ptrs config;
+		cvmx_cmd_queue_result_t cmd_res;
+		uint64_t *buf_ptr;
+
+		config.u64		= 0;
+		config.s.index		= queue;
+		config.s.qid		= base_queue + queue;
+		config.s.ipid		= pko_port;
+		config.s.tail		= (queue == (num_queues - 1));
+		config.s.s_tail		= (queue == static_priority_end);
+		config.s.static_p	= (static_priority_base >= 0);
+		config.s.static_q	= (queue <= static_priority_end);
+		config.s.qos_mask	= 0xff;
+
+		cmd_res = cvmx_cmd_queue_initialize(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue),
+				CVMX_PKO_MAX_QUEUE_DEPTH,
+				CVMX_FPA_OUTPUT_BUFFER_POOL,
+				(CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
+				 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+		WARN(cmd_res,
+		     "%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n",
+			__func__, (int)cmd_res, pko_port, base_queue,
+			num_queues, queue);
+
+		buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue));
+		config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+		CVMX_SYNCWS;
+		cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_queue_alloc_o68(void)
+{
+	int port;
+
+	for (port = 0; port < 48; port++)
+		__cvmx_pko_iport_config(port);
+}
+
+static void __cvmx_pko_port_map_o68(void)
+{
+	int port;
+	int interface, index;
+	cvmx_helper_interface_mode_t mode;
+	union cvmx_pko_mem_iport_ptrs config;
+
+	/*
+	 * Initialize every iport with the invalid eid.
+	 */
+	config.u64 = 0;
+	config.s.eid = 31; /* Invalid */
+	for (port = 0; port < 128; port++) {
+		config.s.ipid = port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+
+	/*
+	 * Set up PKO_MEM_IPORT_PTRS
+	 */
+	for (port = 0; port < 48; port++) {
+		interface = cvmx_helper_get_interface_num(port);
+		index = cvmx_helper_get_interface_index_num(port);
+		mode = cvmx_helper_interface_get_mode(interface);
+		if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+			continue;
+
+		config.s.ipid = port;
+		config.s.qos_mask = 0xff;
+		config.s.crc = 1;
+		config.s.min_pkt = 1;
+		config.s.intr = __cvmx_pko_int(interface, index);
+		config.s.eid = config.s.intr;
+		config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
+			index : port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_chip_init(void)
+{
+	int i;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		__cvmx_pko_port_map_o68();
+		__cvmx_pko_queue_alloc_o68();
+		return;
+	}
+
+	/*
+	 * Initialize queues
+	 */
+	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) {
+		const uint64_t priority = 8;
+
+		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
+				     &priority);
+	}
+}
+
 /**
  * Call before any other calls to initialize the packet
  * output system.  This does chip global config, and should only be
@@ -47,8 +184,6 @@
 
 void cvmx_pko_initialize_global(void)
 {
-	int i;
-	uint64_t priority = 8;
 	union cvmx_pko_reg_cmd_buf config;
 
 	/*
@@ -62,9 +197,10 @@
 
 	cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
 
-	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
-		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
-				     &priority);
+	/*
+	 * Chip-specific setup.
+	 */
+	__cvmx_pko_chip_init();
 
 	/*
 	 * If we aren't using all of the queues optimize PKO's
@@ -212,6 +348,9 @@
 	int static_priority_base = -1;
 	int static_priority_end = -1;
 
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return CVMX_PKO_SUCCESS;
+
 	if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
 	    && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
 		cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index d8124a3..f26c3c6 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -225,13 +225,14 @@
 
 #ifdef CONFIG_SMP
 	int cpu;
-	int weight = cpumask_weight(data->affinity);
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
+	int weight = cpumask_weight(mask);
 	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 
 	if (weight > 1) {
 		cpu = cd->current_cpu;
 		for (;;) {
-			cpu = cpumask_next(cpu, data->affinity);
+			cpu = cpumask_next(cpu, mask);
 			if (cpu >= nr_cpu_ids) {
 				cpu = -1;
 				continue;
@@ -240,7 +241,7 @@
 			}
 		}
 	} else if (weight == 1) {
-		cpu = cpumask_first(data->affinity);
+		cpu = cpumask_first(mask);
 	} else {
 		cpu = smp_processor_id();
 	}
@@ -662,6 +663,11 @@
 	irqd_set_trigger_type(data, t);
 	octeon_irq_gpio_setup(data);
 
+	if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(data, handle_edge_irq);
+	else
+		irq_set_handler_locked(data, handle_level_irq);
+
 	return IRQ_SET_MASK_OK;
 }
 
@@ -696,32 +702,23 @@
 	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 }
 
-static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_data *data = irq_desc_get_irq_data(desc);
-
-	if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
-		handle_edge_irq(irq, desc);
-	else
-		handle_level_irq(irq, desc);
-}
-
 #ifdef CONFIG_SMP
 
 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
 {
 	int cpu = smp_processor_id();
 	cpumask_t new_affinity;
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
 
-	if (!cpumask_test_cpu(cpu, data->affinity))
+	if (!cpumask_test_cpu(cpu, mask))
 		return;
 
-	if (cpumask_weight(data->affinity) > 1) {
+	if (cpumask_weight(mask) > 1) {
 		/*
 		 * It has multi CPU affinity, just remove this CPU
 		 * from the affinity set.
 		 */
-		cpumask_copy(&new_affinity, data->affinity);
+		cpumask_copy(&new_affinity, mask);
 		cpumask_clear_cpu(cpu, &new_affinity);
 	} else {
 		/* Otherwise, put it on lowest numbered online CPU. */
@@ -1227,8 +1224,13 @@
 		octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 
+	/*
+	 * Default to handle_level_irq. If the DT contains a different
+	 * trigger type, it will call the irq_set_type callback and
+	 * the handler gets updated.
+	 */
 	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
-		octeon_irq_gpio_chip, octeon_irq_handle_trigger);
+				       octeon_irq_gpio_chip, handle_level_irq);
 	return r;
 }
 
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
index 1646cce..642b509 100644
--- a/arch/mips/configs/pistachio_defconfig
+++ b/arch/mips/configs/pistachio_defconfig
@@ -320,7 +320,6 @@
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_YAMA=y
-CONFIG_SECURITY_YAMA_STACKED=y
 CONFIG_DEFAULT_SECURITY_DAC=y
 CONFIG_CRYPTO_AUTHENC=y
 CONFIG_CRYPTO_HMAC=y
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 1f85460..40ec4ca 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -16,6 +16,5 @@
 generic-y += segment.h
 generic-y += serial.h
 generic-y += trace_clock.h
-generic-y += ucontext.h
 generic-y += user.h
 generic-y += xor.h
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
index 7186bb5..37f8407 100644
--- a/arch/mips/include/asm/abi.h
+++ b/arch/mips/include/asm/abi.h
@@ -20,6 +20,10 @@
 				     struct pt_regs *regs, sigset_t *set);
 	const unsigned long	rt_signal_return_offset;
 	const unsigned long	restart;
+
+	unsigned	off_sc_fpregs;
+	unsigned	off_sc_fpc_csr;
+	unsigned	off_sc_used_math;
 };
 
 #endif /* _ASM_ABI_H */
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 76317a7..867f924 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -232,6 +232,30 @@
 	.set	pop
 	.endm
 
+	.macro	ld_b	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.b	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	ld_h	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.h	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	ld_w	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.w	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
 	.macro	ld_d	wd, off, base
 	.set	push
 	.set	mips32r2
@@ -241,6 +265,30 @@
 	.set	pop
 	.endm
 
+	.macro	st_b	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.b	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	st_h	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.h	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	st_w	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.w	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
 	.macro	st_d	wd, off, base
 	.set	push
 	.set	mips32r2
@@ -290,7 +338,13 @@
 #ifdef CONFIG_CPU_MICROMIPS
 #define CFC_MSA_INSN		0x587e0056
 #define CTC_MSA_INSN		0x583e0816
+#define LDB_MSA_INSN		0x58000807
+#define LDH_MSA_INSN		0x58000817
+#define LDW_MSA_INSN		0x58000827
 #define LDD_MSA_INSN		0x58000837
+#define STB_MSA_INSN		0x5800080f
+#define STH_MSA_INSN		0x5800081f
+#define STW_MSA_INSN		0x5800082f
 #define STD_MSA_INSN		0x5800083f
 #define COPY_UW_MSA_INSN	0x58f00056
 #define COPY_UD_MSA_INSN	0x58f80056
@@ -299,7 +353,13 @@
 #else
 #define CFC_MSA_INSN		0x787e0059
 #define CTC_MSA_INSN		0x783e0819
+#define LDB_MSA_INSN		0x78000820
+#define LDH_MSA_INSN		0x78000821
+#define LDW_MSA_INSN		0x78000822
 #define LDD_MSA_INSN		0x78000823
+#define STB_MSA_INSN		0x78000824
+#define STH_MSA_INSN		0x78000825
+#define STW_MSA_INSN		0x78000826
 #define STD_MSA_INSN		0x78000827
 #define COPY_UW_MSA_INSN	0x78f00059
 #define COPY_UD_MSA_INSN	0x78f80059
@@ -329,6 +389,33 @@
 	.set	pop
 	.endm
 
+	.macro	ld_b	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDB_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	ld_h	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDH_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	ld_w	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDW_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
 	.macro	ld_d	wd, off, base
 	.set	push
 	.set	noat
@@ -338,6 +425,33 @@
 	.set	pop
 	.endm
 
+	.macro	st_b	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STB_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	st_h	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STH_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	st_w	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STW_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
 	.macro	st_d	wd, off, base
 	.set	push
 	.set	noat
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 26d4363..4c42fd9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -137,6 +137,10 @@
 ATOMIC_OPS(add, +=, addu)
 ATOMIC_OPS(sub, -=, subu)
 
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -416,6 +420,9 @@
 
 ATOMIC64_OPS(add, +=, daddu)
 ATOMIC64_OPS(sub, -=, dsubu)
+ATOMIC64_OP(and, &=, and)
+ATOMIC64_OP(or, |=, or)
+ATOMIC64_OP(xor, ^=, xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 7ecba84..752e0b8 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -133,12 +133,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/mips/include/asm/cdmm.h b/arch/mips/include/asm/cdmm.h
index 16e22ce..bece206 100644
--- a/arch/mips/include/asm/cdmm.h
+++ b/arch/mips/include/asm/cdmm.h
@@ -53,7 +53,7 @@
  * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
  *
  * Picking a suitable physical address at which to map the CDMM region is
- * platform specific, so this weak function can be defined by platform code to
+ * platform specific, so this function can be defined by platform code to
  * pick a suitable value if none is configured by the bootloader.
  *
  * This address must be 32kB aligned, and the region occupies a maximum of 32kB
@@ -61,7 +61,7 @@
  *
  * Returns:	Physical base address for CDMM region, or 0 on failure.
  */
-phys_addr_t __weak mips_cdmm_phys_base(void);
+phys_addr_t mips_cdmm_phys_base(void);
 
 extern struct bus_type mips_cdmm_bustype;
 void __iomem *mips_cdmm_early_probe(unsigned int dev_type);
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
index f0edf6f..2e13a03 100644
--- a/arch/mips/include/asm/cevt-r4k.h
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -21,7 +21,6 @@
 
 void mips_event_handler(struct clock_event_device *dev);
 int c0_compare_int_usable(void);
-void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
 irqreturn_t c0_compare_interrupt(int, void *);
 
 extern struct irqaction c0_compare_irqaction;
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index f25de77..9801ac9 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -411,4 +411,8 @@
 # define cpu_has_cdmm		(cpu_data[0].options & MIPS_CPU_CDMM)
 #endif
 
+#ifndef cpu_has_small_pages
+# define cpu_has_small_pages	(cpu_data[0].options & MIPS_CPU_SP)
+#endif
+
 #endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index d41e8e2..abee2bf 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -77,6 +77,10 @@
 	 */
 #endif
 
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
+	case CPU_I6400:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_R3000
 	case CPU_R2000:
 	case CPU_R3000:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index e46e406..cd89e98 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -120,6 +120,7 @@
 #define PRID_IMP_PROAPTIV_MP	0xa300
 #define PRID_IMP_M5150		0xa700
 #define PRID_IMP_P5600		0xa800
+#define PRID_IMP_I6400		0xa900
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -307,6 +308,7 @@
 	CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
 	CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
 	CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
+	CPU_I6400,
 
 	/*
 	 * MIPS64 class processors
@@ -382,6 +384,7 @@
 #define MIPS_CPU_XPA		0x2000000000ull /* CPU supports Extended Physical Addressing */
 #define MIPS_CPU_CDMM		0x4000000000ull	/* CPU has Common Device Memory Map */
 #define MIPS_CPU_BP_GHIST	0x8000000000ull /* R12K+ Branch Prediction Global History */
+#define MIPS_CPU_SP		0x10000000000ull /* Small (1KB) page support */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/debug.h b/arch/mips/include/asm/debug.h
deleted file mode 100644
index 1fd5a2b..0000000
--- a/arch/mips/include/asm/debug.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Debug macros for run-time debugging.
- * Turned on/off with CONFIG_RUNTIME_DEBUG option.
- *
- * Copyright (C) 2001 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef _ASM_DEBUG_H
-#define _ASM_DEBUG_H
-
-
-/*
- * run-time macros for catching spurious errors.  Eable CONFIG_RUNTIME_DEBUG in
- * kernel hacking config menu to use them.
- *
- * Use them as run-time debugging aid.  NEVER USE THEM AS ERROR HANDLING CODE!!!
- */
-
-#ifdef CONFIG_RUNTIME_DEBUG
-
-#include <linux/kernel.h>
-
-#define db_assert(x)  if (!(x)) { \
-	panic("assertion failed at %s:%d: %s", __FILE__, __LINE__, #x); }
-#define db_warn(x)  if (!(x)) { \
-	printk(KERN_WARNING "warning at %s:%d: %s", __FILE__, __LINE__, #x); }
-#define db_verify(x, y) db_assert(x y)
-#define db_verify_warn(x, y) db_warn(x y)
-#define db_run(x)  do { x; } while (0)
-
-#else
-
-#define db_assert(x)
-#define db_warn(x)
-#define db_verify(x, y) x
-#define db_verify_warn(x, y) x
-#define db_run(x)
-
-#endif
-
-#endif /* _ASM_DEBUG_H */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index f19e890..53b2693 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -382,7 +382,9 @@
    instruction set this cpu supports.  This could be done in userspace,
    but it's not easy, and we've already done it here.  */
 
-#define ELF_HWCAP	(0)
+#define ELF_HWCAP	(elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
 
 /*
  * This yields a string that ld.so will use to load implementation
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 1b06251..9cbf383 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -164,25 +164,30 @@
 	return ret;
 }
 
-static inline void lose_fpu(int save)
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
 {
-	preempt_disable();
 	if (is_msa_enabled()) {
 		if (save) {
-			save_msa(current);
-			current->thread.fpu.fcr31 =
+			save_msa(tsk);
+			tsk->thread.fpu.fcr31 =
 					read_32bit_cp1_register(CP1_STATUS);
 		}
 		disable_msa();
-		clear_thread_flag(TIF_USEDMSA);
+		clear_tsk_thread_flag(tsk, TIF_USEDMSA);
 		__disable_fpu();
 	} else if (is_fpu_owner()) {
 		if (save)
-			_save_fp(current);
+			_save_fp(tsk);
 		__disable_fpu();
 	}
-	KSTK_STATUS(current) &= ~ST0_CU1;
-	clear_thread_flag(TIF_USEDFPU);
+	KSTK_STATUS(tsk) &= ~ST0_CU1;
+	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+}
+
+static inline void lose_fpu(int save)
+{
+	preempt_disable();
+	lose_fpu_inatomic(save, current);
 	preempt_enable();
 }
 
diff --git a/arch/mips/include/asm/gpio.h b/arch/mips/include/asm/gpio.h
deleted file mode 100644
index 06e46fa..0000000
--- a/arch/mips/include/asm/gpio.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_MIPS_GPIO_H
-#define __ASM_MIPS_GPIO_H
-
-#include <gpio.h>
-
-#endif /* __ASM_MIPS_GPIO_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index f0db99f..15e0fec 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -49,7 +49,7 @@
 extern int cp0_perfcount_irq;
 extern int cp0_fdc_irq;
 
-extern int __weak get_c0_fdc_int(void);
+extern int get_c0_fdc_int(void);
 
 void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 608aa57..e776725 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -26,14 +26,29 @@
 #define NOP_INSN "nop"
 #endif
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\t" NOP_INSN "\n\t"
 		"nop\n\t"
 		".pushsection __jump_table,  \"aw\"\n\t"
 		WORD_INSN " 1b, %l[l_yes], %0\n\t"
 		".popsection\n\t"
-		: :  "i" (key) : : l_yes);
+		: :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\tj %l[l_yes]\n\t"
+		"nop\n\t"
+		".pushsection __jump_table,  \"aw\"\n\t"
+		WORD_INSN " 1b, %l[l_yes], %0\n\t"
+		".popsection\n\t"
+		: :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
index cba22ab..8e3d08e 100644
--- a/arch/mips/include/asm/kdebug.h
+++ b/arch/mips/include/asm/kdebug.h
@@ -11,7 +11,9 @@
 	DIE_PAGE_FAULT,
 	DIE_BREAK,
 	DIE_SSTEPBP,
-	DIE_MSAFP
+	DIE_MSAFP,
+	DIE_UPROBE,
+	DIE_UPROBE_XOL,
 };
 
 #endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/linkage.h b/arch/mips/include/asm/linkage.h
index 2767dda..99651b0 100644
--- a/arch/mips/include/asm/linkage.h
+++ b/arch/mips/include/asm/linkage.h
@@ -5,7 +5,6 @@
 #include <asm/asm.h>
 #endif
 
-#define __weak __attribute__((weak))
 #define cond_syscall(x) asm(".weak\t" #x "\n" #x "\t=\tsys_ni_syscall")
 #define SYSCALL_ALIAS(alias, name)					\
 	asm ( #alias " = " #name "\n\t.globl " #alias)
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
index 6c62b0f..b02891f 100644
--- a/arch/mips/include/asm/maar.h
+++ b/arch/mips/include/asm/maar.h
@@ -26,7 +26,7 @@
  *
  * Return:	The number of MAAR pairs configured.
  */
-unsigned __weak platform_maar_init(unsigned num_pairs);
+unsigned platform_maar_init(unsigned num_pairs);
 
 /**
  * write_maar_pair() - write to a pair of MAARs
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index a47ea0c..468cbd6 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -203,4 +203,8 @@
 int __init ar7_gpio_init(void);
 void __init ar7_init_clocks(void);
 
+/* Board specific GPIO functions */
+int ar7_gpio_enable(unsigned gpio);
+int ar7_gpio_disable(unsigned gpio);
+
 #endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/gpio.h b/arch/mips/include/asm/mach-ar7/gpio.h
deleted file mode 100644
index c177cd1..0000000
--- a/arch/mips/include/asm/mach-ar7/gpio.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Florian Fainelli <florian@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef __AR7_GPIO_H__
-#define __AR7_GPIO_H__
-
-#include <asm/mach-ar7/ar7.h>
-
-#define AR7_GPIO_MAX 32
-#define TITAN_GPIO_MAX	51
-#define NR_BUILTIN_GPIO TITAN_GPIO_MAX
-
-#define gpio_to_irq(gpio)	-1
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-/* Board specific GPIO functions */
-int ar7_gpio_enable(unsigned gpio);
-int ar7_gpio_disable(unsigned gpio);
-
-#include <asm-generic/gpio.h>
-
-#endif
diff --git a/arch/mips/include/asm/mach-ath25/gpio.h b/arch/mips/include/asm/mach-ath25/gpio.h
deleted file mode 100644
index 713564b..0000000
--- a/arch/mips/include/asm/mach-ath25/gpio.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __ASM_MACH_ATH25_GPIO_H
-#define __ASM_MACH_ATH25_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-static inline int irq_to_gpio(unsigned irq)
-{
-	return -EINVAL;
-}
-
-#endif	/* __ASM_MACH_ATH25_GPIO_H */
diff --git a/arch/mips/include/asm/mach-ath79/gpio.h b/arch/mips/include/asm/mach-ath79/gpio.h
deleted file mode 100644
index 60dcb62..0000000
--- a/arch/mips/include/asm/mach-ath79/gpio.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *  Atheros AR71XX/AR724X/AR913X GPIO API definitions
- *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- */
-
-#ifndef __ASM_MACH_ATH79_GPIO_H
-#define __ASM_MACH_ATH79_GPIO_H
-
-#define ARCH_NR_GPIOS	64
-#include <asm-generic/gpio.h>
-
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-
-#define gpio_cansleep	__gpio_cansleep
-
-#endif /* __ASM_MACH_ATH79_GPIO_H */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
index 9785e4e..adde1fa 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -266,6 +266,17 @@
 	return -ENXIO;
 }
 
+/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before
+ * SYS_PININPUTEN is written to at least once.  On Au1550/Au1200/Au1300 this
+ * register enables use of GPIOs as wake source.
+ */
+static inline void alchemy_gpio1_input_enable(void)
+{
+	void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
+	__raw_writel(0, base + 0x110);		/* the write op is key */
+	wmb();
+}
+
 /*
  * GPIO2 block macros for common linux GPIO functions. The 'gpio'
  * parameter must be in range of ALCHEMY_GPIO2_BASE..ALCHEMY_GPIO2_MAX.
@@ -518,141 +529,4 @@
 	return -ENXIO;
 }
 
-/**********************************************************************/
-
-/* Linux gpio framework integration.
- *
- * 4 use cases of Au1000-Au1200 GPIOS:
- *(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y:
- *	Board must register gpiochips.
- *(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n:
- *	2 (1 for Au1000) gpio_chips are registered.
- *
- *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
- *	the boards' gpio.h must provide the linux gpio wrapper functions,
- *
- *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
- *	inlinable gpio functions are provided which enable access to the
- *	Au1000 gpios only by using the numbers straight out of the data-
- *	sheets.
-
- * Cases 1 and 3 are intended for boards which want to provide their own
- * GPIO namespace and -operations (i.e. for example you have 8 GPIOs
- * which are in part provided by spare Au1000 GPIO pins and in part by
- * an external FPGA but you still want them to be accssible in linux
- * as gpio0-7. The board can of course use the alchemy_gpioX_* functions
- * as required).
- */
-
-#ifndef CONFIG_GPIOLIB
-
-#ifdef CONFIG_ALCHEMY_GPIOINT_AU1000
-
-#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT	/* case (4) */
-
-static inline int gpio_direction_input(int gpio)
-{
-	return alchemy_gpio_direction_input(gpio);
-}
-
-static inline int gpio_direction_output(int gpio, int v)
-{
-	return alchemy_gpio_direction_output(gpio, v);
-}
-
-static inline int gpio_get_value(int gpio)
-{
-	return alchemy_gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(int gpio, int v)
-{
-	alchemy_gpio_set_value(gpio, v);
-}
-
-static inline int gpio_get_value_cansleep(unsigned gpio)
-{
-	return gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value_cansleep(unsigned gpio, int value)
-{
-	gpio_set_value(gpio, value);
-}
-
-static inline int gpio_is_valid(int gpio)
-{
-	return alchemy_gpio_is_valid(gpio);
-}
-
-static inline int gpio_cansleep(int gpio)
-{
-	return alchemy_gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(int gpio)
-{
-	return alchemy_gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(int irq)
-{
-	return alchemy_irq_to_gpio(irq);
-}
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-	return 0;
-}
-
-static inline int gpio_request_one(unsigned gpio,
-					unsigned long flags, const char *label)
-{
-	return 0;
-}
-
-static inline int gpio_request_array(struct gpio *array, size_t num)
-{
-	return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-}
-
-static inline void gpio_free_array(struct gpio *array, size_t num)
-{
-}
-
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
-				   unsigned gpio)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
-	return -ENOSYS;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
-}
-
-#endif	/* !CONFIG_ALCHEMY_GPIO_INDIRECT */
-
-#endif	/* CONFIG_ALCHEMY_GPIOINT_AU1000 */
-
-#endif	/* !CONFIG_GPIOLIB */
-
 #endif /* _ALCHEMY_GPIO_AU1000_H_ */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio.h b/arch/mips/include/asm/mach-au1x00/gpio.h
deleted file mode 100644
index 22e7ff1..0000000
--- a/arch/mips/include/asm/mach-au1x00/gpio.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Alchemy GPIO support.
- *
- * With CONFIG_GPIOLIB=y different types of on-chip GPIO can be supported within
- *  the same kernel image.
- * With CONFIG_GPIOLIB=n, your board must select ALCHEMY_GPIOINT_AU1XXX for the
- *  appropriate CPU type (AU1000 currently).
- */
-
-#ifndef _ALCHEMY_GPIO_H_
-#define _ALCHEMY_GPIO_H_
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/gpio-au1000.h>
-#include <asm/mach-au1x00/gpio-au1300.h>
-
-/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before
- * SYS_PININPUTEN is written to at least once.  On Au1550/Au1200/Au1300 this
- * register enables use of GPIOs as wake source.
- */
-static inline void alchemy_gpio1_input_enable(void)
-{
-	void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
-	__raw_writel(0, base + 0x110);		/* the write op is key */
-	wmb();
-}
-
-
-/* Linux gpio framework integration.
-*
-* 4 use cases of Alchemy GPIOS:
-*(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y:
-*	Board must register gpiochips.
-*(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n:
-*	A gpiochip for the 75 GPIOs is registered.
-*
-*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
-*	the boards' gpio.h must provide	the linux gpio wrapper functions,
-*
-*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
-*	inlinable gpio functions are provided which enable access to the
-*	Au1300 gpios only by using the numbers straight out of the data-
-*	sheets.
-
-* Cases 1 and 3 are intended for boards which want to provide their own
-* GPIO namespace and -operations (i.e. for example you have 8 GPIOs
-* which are in part provided by spare Au1300 GPIO pins and in part by
-* an external FPGA but you still want them to be accssible in linux
-* as gpio0-7. The board can of course use the alchemy_gpioX_* functions
-* as required).
-*/
-
-#ifdef CONFIG_GPIOLIB
-
-/* wraps the cpu-dependent irq_to_gpio functions */
-/* FIXME: gpiolib needs an irq_to_gpio hook */
-static inline int __au_irq_to_gpio(unsigned int irq)
-{
-	switch (alchemy_get_cputype()) {
-	case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
-		return alchemy_irq_to_gpio(irq);
-	case ALCHEMY_CPU_AU1300:
-		return au1300_irq_to_gpio(irq);
-	}
-	return -EINVAL;
-}
-
-
-/* using gpiolib to provide up to 2 gpio_chips for on-chip gpios */
-#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT	/* case (2) */
-
-/* get everything through gpiolib */
-#define gpio_to_irq	__gpio_to_irq
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#define irq_to_gpio	__au_irq_to_gpio
-
-#include <asm-generic/gpio.h>
-
-#endif	/* !CONFIG_ALCHEMY_GPIO_INDIRECT */
-
-
-#endif	/* CONFIG_GPIOLIB */
-
-#endif	/* _ALCHEMY_GPIO_H_ */
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
deleted file mode 100644
index 90daefa..0000000
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __ASM_MIPS_MACH_BCM47XX_GPIO_H
-#define __ASM_MIPS_MACH_BCM47XX_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-static inline int irq_to_gpio(unsigned int irq)
-{
-	return -EINVAL;
-}
-
-#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/gpio.h b/arch/mips/include/asm/mach-bcm63xx/gpio.h
deleted file mode 100644
index 1eb534d..0000000
--- a/arch/mips/include/asm/mach-bcm63xx/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASM_MIPS_MACH_BCM63XX_GPIO_H
-#define __ASM_MIPS_MACH_BCM63XX_GPIO_H
-
-#include <bcm63xx_gpio.h>
-
-#define gpio_to_irq(gpio)	-1
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-#include <asm-generic/gpio.h>
-
-#endif /* __ASM_MIPS_MACH_BCM63XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/gpio.h b/arch/mips/include/asm/mach-cavium-octeon/gpio.h
deleted file mode 100644
index 34e9f7a..0000000
--- a/arch/mips/include/asm/mach-cavium-octeon/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __ASM_MACH_CAVIUM_OCTEON_GPIO_H
-#define __ASM_MACH_CAVIUM_OCTEON_GPIO_H
-
-#ifdef CONFIG_GPIOLIB
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#else
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-#endif
-
-#include <asm-generic/gpio.h>
-
-#define gpio_to_irq	__gpio_to_irq
-
-#endif /* __ASM_MACH_GENERIC_GPIO_H */
diff --git a/arch/mips/include/asm/mach-generic/gpio.h b/arch/mips/include/asm/mach-generic/gpio.h
deleted file mode 100644
index b4e7020..0000000
--- a/arch/mips/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __ASM_MACH_GENERIC_GPIO_H
-#define __ASM_MACH_GENERIC_GPIO_H
-
-#ifdef CONFIG_GPIOLIB
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#else
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-#endif
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-
-#include <asm-generic/gpio.h>		/* cansleep wrappers */
-
-#endif /* __ASM_MACH_GENERIC_GPIO_H */
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index eaacba7..bf8c3e1 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -73,8 +73,6 @@
 void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask);
 uint32_t jz_gpio_port_get_value(int port, uint32_t mask);
 
-#include <asm/mach-generic/gpio.h>
-
 #define JZ_GPIO_PORTA(x) ((x) + 32 * 0)
 #define JZ_GPIO_PORTB(x) ((x) + 32 * 1)
 #define JZ_GPIO_PORTC(x) ((x) + 32 * 2)
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
deleted file mode 100644
index 9ba1cae..0000000
--- a/arch/mips/include/asm/mach-lantiq/gpio.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
-#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
-
-#define gpio_to_irq __gpio_to_irq
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-#include <asm-generic/gpio.h>
-
-#endif
diff --git a/arch/mips/include/asm/mach-loongson64/gpio.h b/arch/mips/include/asm/mach-loongson64/gpio.h
deleted file mode 100644
index b3b2169..0000000
--- a/arch/mips/include/asm/mach-loongson64/gpio.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Loongson GPIO Support
- *
- * Copyright (c) 2008  Richard Liu, STMicroelectronics <richard.liu@st.com>
- * Copyright (c) 2008-2010  Arnaud Patard <apatard@mandriva.com>
- * Copyright (c) 2014  Huacai Chen <chenhc@lemote.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LOONGSON_GPIO_H
-#define __LOONGSON_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-
-/* The chip can do interrupt
- * but it has not been tested and doc not clear
- */
-static inline int gpio_to_irq(int gpio)
-{
-	return -EINVAL;
-}
-
-static inline int irq_to_gpio(int gpio)
-{
-	return -EINVAL;
-}
-
-#endif	/* __LOONGSON_GPIO_H */
diff --git a/arch/mips/include/asm/mach-pistachio/gpio.h b/arch/mips/include/asm/mach-pistachio/gpio.h
deleted file mode 100644
index 6c1649c..0000000
--- a/arch/mips/include/asm/mach-pistachio/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Pistachio IRQ setup
- *
- * Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_PISTACHIO_GPIO_H
-#define __ASM_MACH_PISTACHIO_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#define gpio_to_irq	__gpio_to_irq
-
-#endif /* __ASM_MACH_PISTACHIO_GPIO_H */
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
index 4dee0a3..db21121 100644
--- a/arch/mips/include/asm/mach-rc32434/gpio.h
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -13,18 +13,6 @@
 #ifndef _RC32434_GPIO_H_
 #define _RC32434_GPIO_H_
 
-#include <linux/types.h>
-#include <asm-generic/gpio.h>
-
-#define NR_BUILTIN_GPIO		32
-
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-
-#define gpio_to_irq(gpio)	(8 + 4 * 32 + gpio)
-#define irq_to_gpio(irq)	(irq - (8 + 4 * 32))
-
 struct rb532_gpio_reg {
 	u32   gpiofunc;	  /* GPIO Function Register
 			   * gpiofunc[x]==0 bit = gpio
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index edc7ee9..d75b75e 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -33,6 +33,29 @@
  */
 extern phys_addr_t __mips_cm_phys_base(void);
 
+/*
+ * mips_cm_is64 - determine CM register width
+ *
+ * The CM register width is processor and CM specific. A 64-bit processor
+ * usually has a 64-bit CM and a 32-bit one has a 32-bit CM but a 64-bit
+ * processor could come with a 32-bit CM. Moreover, accesses on 64-bit CMs
+ * can be done either using regular 64-bit load/store instructions, or 32-bit
+ * load/store instruction on 32-bit register pairs. We opt for using 64-bit
+ * accesses on 64-bit CMs and kernels and 32-bit in any other case.
+ *
+ * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
+ */
+extern int mips_cm_is64;
+
+/**
+ * mips_cm_error_report - Report CM cache errors
+ */
+#ifdef CONFIG_MIPS_CM
+extern void mips_cm_error_report(void);
+#else
+static inline void mips_cm_error_report(void) {}
+#endif
+
 /**
  * mips_cm_probe - probe for a Coherence Manager
  *
@@ -90,20 +113,46 @@
 
 /* Macros to ease the creation of register access functions */
 #define BUILD_CM_R_(name, off)					\
-static inline u32 __iomem *addr_gcr_##name(void)		\
+static inline unsigned long __iomem *addr_gcr_##name(void)	\
 {								\
-	return (u32 __iomem *)(mips_cm_base + (off));		\
+	return (unsigned long __iomem *)(mips_cm_base + (off));	\
 }								\
 								\
-static inline u32 read_gcr_##name(void)				\
+static inline u32 read32_gcr_##name(void)			\
 {								\
 	return __raw_readl(addr_gcr_##name());			\
+}								\
+								\
+static inline u64 read64_gcr_##name(void)			\
+{								\
+	return __raw_readq(addr_gcr_##name());			\
+}								\
+								\
+static inline unsigned long read_gcr_##name(void)		\
+{								\
+	if (mips_cm_is64)					\
+		return read64_gcr_##name();			\
+	else							\
+		return read32_gcr_##name();			\
 }
 
 #define BUILD_CM__W(name, off)					\
-static inline void write_gcr_##name(u32 value)			\
+static inline void write32_gcr_##name(u32 value)		\
 {								\
 	__raw_writel(value, addr_gcr_##name());			\
+}								\
+								\
+static inline void write64_gcr_##name(u64 value)		\
+{								\
+	__raw_writeq(value, addr_gcr_##name());			\
+}								\
+								\
+static inline void write_gcr_##name(unsigned long value)	\
+{								\
+	if (mips_cm_is64)					\
+		write64_gcr_##name(value);			\
+	else							\
+		write32_gcr_##name(value);			\
 }
 
 #define BUILD_CM_RW(name, off)					\
@@ -144,6 +193,7 @@
 BUILD_CM_RW(reg3_mask,		MIPS_CM_GCB_OFS + 0xc8)
 BUILD_CM_R_(gic_status,		MIPS_CM_GCB_OFS + 0xd0)
 BUILD_CM_R_(cpc_status,		MIPS_CM_GCB_OFS + 0xf0)
+BUILD_CM_RW(l2_config,		MIPS_CM_GCB_OFS + 0x130)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,	0x00)
@@ -189,6 +239,13 @@
 #define CM_GCR_REV_MINOR_SHF			0
 #define CM_GCR_REV_MINOR_MSK			(_ULCAST_(0xff) << 0)
 
+#define CM_ENCODE_REV(major, minor) \
+		(((major) << CM_GCR_REV_MAJOR_SHF) | \
+		 ((minor) << CM_GCR_REV_MINOR_SHF))
+
+#define CM_REV_CM2				CM_ENCODE_REV(6, 0)
+#define CM_REV_CM3				CM_ENCODE_REV(8, 0)
+
 /* GCR_ERROR_CAUSE register fields */
 #define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF		27
 #define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK		(_ULCAST_(0x1f) << 27)
@@ -249,6 +306,16 @@
 #define CM_GCR_CPC_STATUS_EX_SHF		0
 #define CM_GCR_CPC_STATUS_EX_MSK		(_ULCAST_(0x1) << 0)
 
+/* GCR_L2_CONFIG register fields */
+#define CM_GCR_L2_CONFIG_BYPASS_SHF		20
+#define CM_GCR_L2_CONFIG_BYPASS_MSK		(_ULCAST_(0x1) << 20)
+#define CM_GCR_L2_CONFIG_SET_SIZE_SHF		12
+#define CM_GCR_L2_CONFIG_SET_SIZE_MSK		(_ULCAST_(0xf) << 12)
+#define CM_GCR_L2_CONFIG_LINE_SIZE_SHF		8
+#define CM_GCR_L2_CONFIG_LINE_SIZE_MSK		(_ULCAST_(0xf) << 8)
+#define CM_GCR_L2_CONFIG_ASSOC_SHF		0
+#define CM_GCR_L2_CONFIG_ASSOC_MSK		(_ULCAST_(0xff) << 0)
+
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF	0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK	(_ULCAST_(0xff) << 0)
@@ -324,4 +391,18 @@
 	return 0;
 }
 
+/**
+ * mips_cm_revision() - return CM revision
+ *
+ * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
+ * return value should be checked against the CM_REV_* macros.
+ */
+static inline int mips_cm_revision(void)
+{
+	if (!mips_cm_present())
+		return 0;
+
+	return read_gcr_rev();
+}
+
 #endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index 1cebe8c..f386f32 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -28,16 +28,6 @@
 extern phys_addr_t mips_cpc_default_phys_base(void);
 
 /**
- * mips_cpc_phys_base - retrieve the physical base address of the CPC
- *
- * This function returns the physical base address of the Cluster Power
- * Controller memory mapped registers, or 0 if no Cluster Power Controller
- * is present. It may be overriden by individual platforms which determine
- * this address in a different way.
- */
-extern phys_addr_t __weak mips_cpc_phys_base(void);
-
-/**
  * mips_cpc_probe - probe for a Cluster Power Controller
  *
  * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index c5b0956..d3cd8ea 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -112,6 +112,30 @@
 #define CP0_TX39_CACHE	$7
 
 
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_G		(_ULCAST_(1) << 0)
+#define ENTRYLO_V		(_ULCAST_(1) << 1)
+#define ENTRYLO_D		(_ULCAST_(1) << 2)
+#define ENTRYLO_C_SHIFT		3
+#define ENTRYLO_C		(_ULCAST_(7) << ENTRYLO_C_SHIFT)
+
+/* R3000 EntryLo bit definitions */
+#define R3K_ENTRYLO_G		(_ULCAST_(1) << 8)
+#define R3K_ENTRYLO_V		(_ULCAST_(1) << 9)
+#define R3K_ENTRYLO_D		(_ULCAST_(1) << 10)
+#define R3K_ENTRYLO_N		(_ULCAST_(1) << 11)
+
+/* MIPS32/64 EntryLo bit definitions */
+#ifdef CONFIG_64BIT
+/* as read by dmfc0 */
+#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 62)
+#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 63)
+#else
+/* as read by mfc0 */
+#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 30)
+#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 31)
+#endif
+
 /*
  * Values for PageMask register
  */
@@ -203,6 +227,9 @@
 #define PG_ESP		(_ULCAST_(1) <<	 28)
 #define PG_IEC		(_ULCAST_(1) <<  27)
 
+/* MIPS32/64 EntryHI bit definitions */
+#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
+
 /*
  * R4x00 interrupt enable / cause bits
  */
@@ -579,6 +606,8 @@
 
 #define MIPS_CONF7_IAR		(_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR		(_ULCAST_(1) << 16)
+/* FTLB probability bits for R6 */
+#define MIPS_CONF7_FTLBP_SHIFT	(18)
 
 /* MAAR bit definitions */
 #define MIPS_MAAR_ADDR		((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
@@ -586,31 +615,6 @@
 #define MIPS_MAAR_S		(_ULCAST_(1) << 1)
 #define MIPS_MAAR_V		(_ULCAST_(1) << 0)
 
-/*  EntryHI bit definition */
-#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
-
-/* R3000 EntryLo bit definitions */
-#define R3K_ENTRYLO_G		(_ULCAST_(1) << 8)
-#define R3K_ENTRYLO_V		(_ULCAST_(1) << 9)
-#define R3K_ENTRYLO_D		(_ULCAST_(1) << 10)
-#define R3K_ENTRYLO_N		(_ULCAST_(1) << 11)
-
-/* R4000 compatible EntryLo bit definitions */
-#define MIPS_ENTRYLO_G		(_ULCAST_(1) << 0)
-#define MIPS_ENTRYLO_V		(_ULCAST_(1) << 1)
-#define MIPS_ENTRYLO_D		(_ULCAST_(1) << 2)
-#define MIPS_ENTRYLO_C_SHIFT	3
-#define MIPS_ENTRYLO_C		(_ULCAST_(7) << MIPS_ENTRYLO_C_SHIFT)
-#ifdef CONFIG_64BIT
-/* as read by dmfc0 */
-#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 62)
-#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 63)
-#else
-/* as read by mfc0 */
-#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 30)
-#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 31)
-#endif
-
 /* CMGCRBase bit definitions */
 #define MIPS_CMGCRB_BASE	11
 #define MIPS_CMGCRF_BASE	(~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
@@ -932,7 +936,7 @@
  */
 
 #define __read_32bit_c0_register(source, sel)				\
-({ int __res;								\
+({ unsigned int __res;							\
 	if (sel == 0)							\
 		__asm__ __volatile__(					\
 			"mfc0\t%0, " #source "\n\t"			\
@@ -1014,7 +1018,7 @@
  * On RM7000/RM9000 these are uses to access cop0 set 1 registers
  */
 #define __read_32bit_c0_ctrl_register(source)				\
-({ int __res;								\
+({ unsigned int __res;							\
 	__asm__ __volatile__(						\
 		"cfc0\t%0, " #source "\n\t"				\
 		: "=r" (__res));					\
@@ -1471,7 +1475,7 @@
  */
 #define _read_32bit_cp1_register(source, gas_hardfloat)			\
 ({									\
-	int __res;							\
+	unsigned int __res;						\
 									\
 	__asm__ __volatile__(						\
 	"	.set	push					\n"	\
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index af5638b..bbb85fe 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -14,10 +14,90 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/inst.h>
+
 extern void _save_msa(struct task_struct *);
 extern void _restore_msa(struct task_struct *);
 extern void _init_msa_upper(void);
 
+extern void read_msa_wr_b(unsigned idx, union fpureg *to);
+extern void read_msa_wr_h(unsigned idx, union fpureg *to);
+extern void read_msa_wr_w(unsigned idx, union fpureg *to);
+extern void read_msa_wr_d(unsigned idx, union fpureg *to);
+
+/**
+ * read_msa_wr() - Read a single MSA vector register
+ * @idx:	The index of the vector register to read
+ * @to:		The FPU register union to store the registers value in
+ * @fmt:	The format of the data in the vector register
+ *
+ * Read the value of MSA vector register idx into the FPU register
+ * union to, using the format fmt.
+ */
+static inline void read_msa_wr(unsigned idx, union fpureg *to,
+			       enum msa_2b_fmt fmt)
+{
+	switch (fmt) {
+	case msa_fmt_b:
+		read_msa_wr_b(idx, to);
+		break;
+
+	case msa_fmt_h:
+		read_msa_wr_h(idx, to);
+		break;
+
+	case msa_fmt_w:
+		read_msa_wr_w(idx, to);
+		break;
+
+	case msa_fmt_d:
+		read_msa_wr_d(idx, to);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+extern void write_msa_wr_b(unsigned idx, union fpureg *from);
+extern void write_msa_wr_h(unsigned idx, union fpureg *from);
+extern void write_msa_wr_w(unsigned idx, union fpureg *from);
+extern void write_msa_wr_d(unsigned idx, union fpureg *from);
+
+/**
+ * write_msa_wr() - Write a single MSA vector register
+ * @idx:	The index of the vector register to write
+ * @from:	The FPU register union to take the registers value from
+ * @fmt:	The format of the data in the vector register
+ *
+ * Write the value from the FPU register union from into MSA vector
+ * register idx, using the format fmt.
+ */
+static inline void write_msa_wr(unsigned idx, union fpureg *from,
+				enum msa_2b_fmt fmt)
+{
+	switch (fmt) {
+	case msa_fmt_b:
+		write_msa_wr_b(idx, from);
+		break;
+
+	case msa_fmt_h:
+		write_msa_wr_h(idx, from);
+		break;
+
+	case msa_fmt_w:
+		write_msa_wr_w(idx, from);
+		break;
+
+	case msa_fmt_d:
+		write_msa_wr_d(idx, from);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
 static inline void enable_msa(void)
 {
 	if (cpu_has_msa) {
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index c373d95..d92cf59 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -284,6 +284,7 @@
 	CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
 	CVMX_BOARD_TYPE_UBNT_E100 = 20002,
 	CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
+	CVMX_BOARD_TYPE_KONTRON_S1901 = 21901,
 	CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
 
 	/* The remaining range is reserved for future use. */
@@ -384,6 +385,7 @@
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
 	}
 	return "Unsupported Board";
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index df69bfd..c210154 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -37,7 +37,7 @@
 #include <asm/octeon/cvmx-fpa.h>
 #include <asm/octeon/cvmx-pip-defs.h>
 
-#define CVMX_PIP_NUM_INPUT_PORTS		40
+#define CVMX_PIP_NUM_INPUT_PORTS		48
 #define CVMX_PIP_NUM_WATCHERS			4
 
 /*
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index 3da59bb..5f47f76 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -542,6 +542,9 @@
  */
 static inline int cvmx_pko_get_base_queue(int port)
 {
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return port;
+
 	return cvmx_pko_get_base_queue_per_core(port, 0);
 }
 
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
index 9020ef4..6a3db4b 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -52,6 +52,12 @@
 #define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
 #define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
 
+#define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
+#define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
+#define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
+#define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
+#define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
+
 union cvmx_pow_bist_stat {
 	uint64_t u64;
 	struct cvmx_pow_bist_stat_s {
@@ -1286,4 +1292,27 @@
 	struct cvmx_pow_ws_pcx_s cnf71xx;
 };
 
+union cvmx_sso_wq_int_thrx {
+	uint64_t u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_33_63:31;
+		uint64_t tc_en:1;
+		uint64_t tc_thr:4;
+		uint64_t reserved_26_27:2;
+		uint64_t ds_thr:12;
+		uint64_t reserved_12_13:2;
+		uint64_t iq_thr:12;
+#else
+		uint64_t iq_thr:12;
+		uint64_t reserved_12_13:2;
+		uint64_t ds_thr:12;
+		uint64_t reserved_26_27:2;
+		uint64_t tc_thr:4;
+		uint64_t tc_en:1;
+		uint64_t reserved_33_63:31;
+#endif
+	} s;
+};
+
 #endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index d5565d7..5153156 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -1810,10 +1810,11 @@
 	cvmx_addr_t ptr;
 	cvmx_pow_tag_req_t tag_req;
 
-	wqp->qos = qos;
-	wqp->tag = tag;
-	wqp->tag_type = tag_type;
-	wqp->grp = grp;
+	wqp->word1.tag = tag;
+	wqp->word1.tag_type = tag_type;
+
+	cvmx_wqe_set_qos(wqp, qos);
+	cvmx_wqe_set_grp(wqp, grp);
 
 	tag_req.u64 = 0;
 	tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
index 2d6d0c7..0d697aa 100644
--- a/arch/mips/include/asm/octeon/cvmx-wqe.h
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -193,6 +193,53 @@
 	        uint64_t bufs:8;
 #endif
 	} s;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t bufs:8;
+		uint64_t ip_offset:8;
+		uint64_t vlan_valid:1;
+		uint64_t vlan_stacked:1;
+		uint64_t unassigned:1;
+		uint64_t vlan_cfi:1;
+		uint64_t vlan_id:12;
+		uint64_t port:12;		/* MAC/PIP port number. */
+		uint64_t dec_ipcomp:1;
+		uint64_t tcp_or_udp:1;
+		uint64_t dec_ipsec:1;
+		uint64_t is_v6:1;
+		uint64_t software:1;
+		uint64_t L4_error:1;
+		uint64_t is_frag:1;
+		uint64_t IP_exc:1;
+		uint64_t is_bcast:1;
+		uint64_t is_mcast:1;
+		uint64_t not_IP:1;
+		uint64_t rcv_error:1;
+		uint64_t err_code:8;
+#else
+		uint64_t err_code:8;
+		uint64_t rcv_error:1;
+		uint64_t not_IP:1;
+		uint64_t is_mcast:1;
+		uint64_t is_bcast:1;
+		uint64_t IP_exc:1;
+		uint64_t is_frag:1;
+		uint64_t L4_error:1;
+		uint64_t software:1;
+		uint64_t is_v6:1;
+		uint64_t dec_ipsec:1;
+		uint64_t tcp_or_udp:1;
+		uint64_t dec_ipcomp:1;
+		uint64_t port:12;
+		uint64_t vlan_id:12;
+		uint64_t vlan_cfi:1;
+		uint64_t unassigned:1;
+		uint64_t vlan_stacked:1;
+		uint64_t vlan_valid:1;
+		uint64_t ip_offset:8;
+		uint64_t bufs:8;
+#endif
+	} s_cn68xx;
 
 	/* use this to get at the 16 vlan bits */
 	struct {
@@ -355,6 +402,146 @@
 
 } cvmx_pip_wqe_word2;
 
+union cvmx_pip_wqe_word0 {
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		/**
+		 * raw chksum result generated by the HW
+		 */
+		uint16_t hw_chksum;
+		/**
+		 * Field unused by hardware - available for software
+		 */
+		uint8_t unused;
+		/**
+		 * Next pointer used by hardware for list maintenance.
+		 * May be written/read by HW before the work queue
+		 * entry is scheduled to a PP (Only 36 bits used in
+		 * Octeon 1)
+		 */
+		uint64_t next_ptr:40;
+#else
+		uint64_t next_ptr:40;
+		uint8_t unused;
+		uint16_t hw_chksum;
+#endif
+	} cn38xx;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t l4ptr:8;       /* 56..63 */
+		uint64_t unused0:8;     /* 48..55 */
+		uint64_t l3ptr:8;       /* 40..47 */
+		uint64_t l2ptr:8;       /* 32..39 */
+		uint64_t unused1:18;    /* 14..31 */
+		uint64_t bpid:6;        /* 8..13 */
+		uint64_t unused2:2;     /* 6..7 */
+		uint64_t pknd:6;        /* 0..5 */
+#else
+		uint64_t pknd:6;        /* 0..5 */
+		uint64_t unused2:2;     /* 6..7 */
+		uint64_t bpid:6;        /* 8..13 */
+		uint64_t unused1:18;    /* 14..31 */
+		uint64_t l2ptr:8;       /* 32..39 */
+		uint64_t l3ptr:8;       /* 40..47 */
+		uint64_t unused0:8;     /* 48..55 */
+		uint64_t l4ptr:8;       /* 56..63 */
+#endif
+	} cn68xx;
+};
+
+union cvmx_wqe_word0 {
+	uint64_t u64;
+	union cvmx_pip_wqe_word0 pip;
+};
+
+union cvmx_wqe_word1 {
+	uint64_t u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t len:16;
+		uint64_t varies:14;
+		/**
+		 * the type of the tag (ORDERED, ATOMIC, NULL)
+		 */
+		uint64_t tag_type:2;
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t varies:14;
+		uint64_t len:16;
+#endif
+	};
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t len:16;
+		uint64_t zero_0:1;
+		/**
+		 * HW sets this to what it thought the priority of
+		 * the input packet was
+		 */
+		uint64_t qos:3;
+
+		uint64_t zero_1:1;
+		/**
+		 * the group that the work queue entry will be scheduled to
+		 */
+		uint64_t grp:6;
+		uint64_t zero_2:3;
+		uint64_t tag_type:2;
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t zero_2:3;
+		uint64_t grp:6;
+		uint64_t zero_1:1;
+		uint64_t qos:3;
+		uint64_t zero_0:1;
+		uint64_t len:16;
+#endif
+	} cn68xx;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		/**
+		 * HW sets to the total number of bytes in the packet
+		 */
+		uint64_t len:16;
+		/**
+		 * HW sets this to input physical port
+		 */
+		uint64_t ipprt:6;
+
+		/**
+		 * HW sets this to what it thought the priority of
+		 * the input packet was
+		 */
+		uint64_t qos:3;
+
+		/**
+		 * the group that the work queue entry will be scheduled to
+		 */
+		uint64_t grp:4;
+		/**
+		 * the type of the tag (ORDERED, ATOMIC, NULL)
+		 */
+		uint64_t tag_type:3;
+		/**
+		 * the synchronization/ordering tag
+		 */
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t zero_2:1;
+		uint64_t grp:4;
+		uint64_t qos:3;
+		uint64_t ipprt:6;
+		uint64_t len:16;
+#endif
+	} cn38xx;
+};
+
 /**
  * Work queue entry format
  *
@@ -366,70 +553,13 @@
      * WORD 0
      *	HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-    /**
-     * raw chksum result generated by the HW
-     */
-	uint16_t hw_chksum;
-    /**
-     * Field unused by hardware - available for software
-     */
-	uint8_t unused;
-    /**
-     * Next pointer used by hardware for list maintenance.
-     * May be written/read by HW before the work queue
-     *		 entry is scheduled to a PP
-     * (Only 36 bits used in Octeon 1)
-     */
-	uint64_t next_ptr:40;
-#else
-	uint64_t next_ptr:40;
-	uint8_t unused;
-	uint16_t hw_chksum;
-#endif
+	union cvmx_wqe_word0 word0;
 
     /*****************************************************************
      * WORD 1
      *	HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-    /**
-     * HW sets to the total number of bytes in the packet
-     */
-	uint64_t len:16;
-    /**
-     * HW sets this to input physical port
-     */
-	uint64_t ipprt:6;
-
-    /**
-     * HW sets this to what it thought the priority of the input packet was
-     */
-	uint64_t qos:3;
-
-    /**
-     * the group that the work queue entry will be scheduled to
-     */
-	uint64_t grp:4;
-    /**
-     * the type of the tag (ORDERED, ATOMIC, NULL)
-     */
-	uint64_t tag_type:3;
-    /**
-     * the synchronization/ordering tag
-     */
-	uint64_t tag:32;
-#else
-	uint64_t tag:32;
-	uint64_t tag_type:2;
-	uint64_t zero_2:1;
-	uint64_t grp:4;
-	uint64_t qos:3;
-	uint64_t ipprt:6;
-	uint64_t len:16;
-#endif
+	union cvmx_wqe_word1 word1;
 
     /**
      * WORD 2 HW WRITE: the following 64-bits are filled in by
@@ -465,4 +595,64 @@
 
 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
 
+static inline int cvmx_wqe_get_port(cvmx_wqe_t *work)
+{
+	int port;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		port = work->word2.s_cn68xx.port;
+	else
+		port = work->word1.cn38xx.ipprt;
+
+	return port;
+}
+
+static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word2.s_cn68xx.port = port;
+	else
+		work->word1.cn38xx.ipprt = port;
+}
+
+static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+{
+	int grp;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		grp = work->word1.cn68xx.grp;
+	else
+		grp = work->word1.cn38xx.grp;
+
+	return grp;
+}
+
+static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word1.cn68xx.grp = grp;
+	else
+		work->word1.cn38xx.grp = grp;
+}
+
+static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+{
+	int qos;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		qos = work->word1.cn68xx.qos;
+	else
+		qos = work->word1.cn38xx.qos;
+
+	return qos;
+}
+
+static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word1.cn68xx.qos = qos;
+	else
+		work->word1.cn38xx.qos = qos;
+}
+
 #endif /* __CVMX_WQE_H__ */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index c28a849..ff7ad91 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -133,20 +133,13 @@
 #define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING_SHIFT	(_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING		(1 << _PAGE_SPLITTING_SHIFT)
-
-/* Only R2 or newer cores have the XI bit */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-#define _PAGE_NO_EXEC_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
-#else
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#endif	/* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
-
 #endif	/* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 /* XI - page cannot be executed */
-#ifndef _PAGE_NO_EXEC_SHIFT
+#ifdef _PAGE_SPLITTING_SHIFT
+#define _PAGE_NO_EXEC_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
+#else
 #define _PAGE_NO_EXEC_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
 #endif
 #define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
@@ -156,14 +149,16 @@
 #define _PAGE_READ		(cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
 #define _PAGE_NO_READ_SHIFT	_PAGE_READ_SHIFT
 #define _PAGE_NO_READ		(cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
+#endif	/* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
 
+#if defined(_PAGE_NO_READ_SHIFT)
 #define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-
-#else	/* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
+#elif defined(_PAGE_SPLITTING_SHIFT)
+#define _PAGE_GLOBAL_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
+#else
 #define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
+#endif
 #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#endif	/* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 #define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
@@ -249,7 +244,7 @@
 #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* LOONGSON       */
 #define _CACHE_CACHABLE_COHERENT    (3<<_CACHE_SHIFT)  /* LOONGSON-3     */
 
-#elif defined(CONFIG_MACH_JZ4740)
+#elif defined(CONFIG_MACH_INGENIC)
 
 /* Ingenic uses the WA bit to achieve write-combine memory writes */
 #define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index ae85694..8957f15 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -393,6 +393,8 @@
 	return __pgprot(prot);
 }
 
+#define pgprot_writecombine pgprot_writecombine
+
 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
 {
 	unsigned long prot = pgprot_val(_prot);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 9b3b48e..59ee6dc 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -275,6 +275,7 @@
 	unsigned long cp0_badvaddr;	/* Last user fault */
 	unsigned long cp0_baduaddr;	/* Last kernel fault accessing USEG */
 	unsigned long error_code;
+	unsigned long trap_nr;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 	struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
 	struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
@@ -341,6 +342,7 @@
 	.cp0_badvaddr		= 0,				\
 	.cp0_baduaddr		= 0,				\
 	.error_code		= 0,				\
+	.trap_nr		= 0,				\
 	/*							\
 	 * Platform specific cop2 registers(null if no COP2)	\
 	 */							\
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index ffc3203..f6fc6aa 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -14,11 +14,16 @@
 #include <linux/linkage.h>
 #include <linux/types.h>
 #include <asm/isadep.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
 #include <uapi/asm/ptrace.h>
 
 /*
  * This struct defines the way the registers are stored on the stack during a
  * system call/exception. As usual the registers k0/k1 aren't being saved.
+ *
+ * If you add a register here, also add it to regoffset_table[] in
+ * arch/mips/kernel/ptrace.c.
  */
 struct pt_regs {
 #ifdef CONFIG_32BIT
@@ -43,8 +48,83 @@
 	unsigned long long mpl[6];        /* MTM{0-5} */
 	unsigned long long mtp[6];        /* MTP{0-5} */
 #endif
+	unsigned long __last[0];
 } __aligned(8);
 
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+	return regs->regs[31];
+}
+
+/*
+ * Don't use asm-generic/ptrace.h it defines FP accessors that don't make
+ * sense on MIPS.  We rather want an error if they get invoked.
+ */
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+                                           unsigned long val)
+{
+	regs->cp0_epc = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:       pt_regs from which register value is gotten.
+ * @offset:     offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+                                              unsigned int offset)
+{
+	if (unlikely(offset > MAX_REG_OFFSET))
+		return 0;
+
+	return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @addr:       address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+                                           unsigned long addr)
+{
+	return ((addr & ~(THREAD_SIZE - 1))  ==
+		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @n:          stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+                                                      unsigned int n)
+{
+	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+	addr += n;
+	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+		return *addr;
+	else
+		return 0;
+}
+
 struct task_struct;
 
 extern int ptrace_getregs(struct task_struct *child,
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 8efe5a9..003e273 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -23,4 +23,7 @@
 
 #define __ARCH_HAS_IRIX_SIGACTION
 
+extern int protected_save_fp_context(void __user *sc);
+extern int protected_restore_fp_context(void __user *sc);
+
 #endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 9de4ba4..40196be 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -42,6 +42,11 @@
 	return ((counters >> 16) ^ counters) & 0xffff;
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.h.serving_now == lock.h.ticket;
+}
+
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 #define arch_spin_unlock_wait(x) \
 	while (arch_spin_is_locked(x)) { cpu_relax(); }
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 9733cd0..28b5d84a 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -16,29 +16,21 @@
 #include <asm/watch.h>
 #include <asm/dsp.h>
 #include <asm/cop2.h>
-#include <asm/msa.h>
+#include <asm/fpu.h>
 
 struct task_struct;
 
-enum {
-	FP_SAVE_NONE	= 0,
-	FP_SAVE_VECTOR	= -1,
-	FP_SAVE_SCALAR	= 1,
-};
-
 /**
  * resume - resume execution of a task
  * @prev:	The task previously executed.
  * @next:	The task to begin executing.
  * @next_ti:	task_thread_info(next).
- * @fp_save:	Which, if any, FP context to save for prev.
  *
  * This function is used whilst scheduling to save the context of prev & load
  * the context of next. Returns prev.
  */
 extern asmlinkage struct task_struct *resume(struct task_struct *prev,
-		struct task_struct *next, struct thread_info *next_ti,
-		s32 fp_save);
+		struct task_struct *next, struct thread_info *next_ti);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
@@ -91,8 +83,8 @@
  */
 #define switch_to(prev, next, last)					\
 do {									\
-	s32 __fpsave = FP_SAVE_NONE;					\
 	__mips_mt_fpaff_switch_to(prev);				\
+	lose_fpu_inatomic(1, prev);					\
 	if (cpu_has_dsp) {						\
 		__save_dsp(prev);					\
 		__restore_dsp(next);					\
@@ -111,15 +103,10 @@
 		clear_c0_status(ST0_CU2);				\
 	}								\
 	__clear_software_ll_bit();					\
-	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))		\
-		__fpsave = FP_SAVE_SCALAR;				\
-	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))		\
-		__fpsave = FP_SAVE_VECTOR;				\
 	if (cpu_has_userlocal)						\
 		write_c0_userlocal(task_thread_info(next)->tp_value);	\
 	__restore_watch();						\
-	disable_msa();							\
-	(last) = resume(prev, next, task_thread_info(next), __fpsave);	\
+	(last) = resume(prev, next, task_thread_info(next));		\
 } while (0)
 
 #endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 9c0014e..e309d8f 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -99,6 +99,7 @@
 #define TIF_SYSCALL_AUDIT	3	/* syscall auditing active */
 #define TIF_SECCOMP		4	/* secure computing */
 #define TIF_NOTIFY_RESUME	5	/* callback before returning to user */
+#define TIF_UPROBE		6	/* breakpointed or singlestepping */
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
@@ -122,6 +123,7 @@
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE		(1<<TIF_UPROBE)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_NOHZ		(1<<TIF_NOHZ)
 #define _TIF_FIXADE		(1<<TIF_FIXADE)
@@ -146,7 +148,8 @@
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		\
-	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
+	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME |	\
+	 _TIF_UPROBE)
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	(_TIF_NOHZ | _TIF_WORK_MASK |		\
 				 _TIF_WORK_SYSCALL_EXIT |		\
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 8ab2874..17d4cd2 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -51,7 +51,7 @@
 /*
  * Initialize the calling CPU's compare interrupt as clockevent device
  */
-extern unsigned int __weak get_c0_compare_int(void);
+extern unsigned int get_c0_compare_int(void);
 extern int r4k_clockevent_init(void);
 
 static inline int mips_clockevent_init(void)
diff --git a/arch/mips/include/asm/tlbdebug.h b/arch/mips/include/asm/tlbdebug.h
index bb8f5c2..3a25a87 100644
--- a/arch/mips/include/asm/tlbdebug.h
+++ b/arch/mips/include/asm/tlbdebug.h
@@ -11,6 +11,7 @@
 /*
  * TLB debugging functions:
  */
+extern void dump_tlb_regs(void);
 extern void dump_tlb_all(void);
 
 #endif /* __ASM_TLBDEBUG_H */
diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h
new file mode 100644
index 0000000..34c325c
--- /dev/null
+++ b/arch/mips/include/asm/uprobes.h
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_UPROBES_H
+#define __ASM_UPROBES_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/*
+ * We want this to be defined as union mips_instruction but that makes the
+ * generic code blow up.
+ */
+typedef u32 uprobe_opcode_t;
+
+/*
+ * Classic MIPS (note this implementation doesn't consider microMIPS yet)
+ * instructions are always 4 bytes but in order to deal with branches and
+ * their delay slots, we treat instructions as having 8 bytes maximum.
+ */
+#define MAX_UINSN_BYTES			8
+#define UPROBE_XOL_SLOT_BYTES		128	/* Max. cache line size */
+
+#define UPROBE_BRK_UPROBE		0x000d000d	/* break 13 */
+#define UPROBE_BRK_UPROBE_XOL		0x000e000d	/* break 14 */
+
+#define UPROBE_SWBP_INSN		UPROBE_BRK_UPROBE
+#define UPROBE_SWBP_INSN_SIZE		4
+
+struct arch_uprobe {
+	unsigned long	resume_epc;
+	u32	insn[2];
+	u32	ixol[2];
+	union	mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
+};
+
+struct arch_uprobe_task {
+	unsigned long saved_trap_nr;
+};
+
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+	struct mm_struct *mm, unsigned long addr);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self,
+	unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+	struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(
+	unsigned long trampoline_vaddr, struct pt_regs *regs);
+
+#endif /* __ASM_UPROBES_H */
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
index 7849f39..80e70db 100644
--- a/arch/mips/include/asm/vpe.h
+++ b/arch/mips/include/asm/vpe.h
@@ -122,7 +122,7 @@
 void *alloc_progmem(unsigned long len);
 void release_progmem(void *ptr);
 
-int __weak vpe_run(struct vpe *v);
+int vpe_run(struct vpe *v);
 void cleanup_tc(struct tc *tc);
 
 int __init vpe_module_init(void);
diff --git a/arch/mips/include/uapi/asm/break.h b/arch/mips/include/uapi/asm/break.h
index 002c39e..9c4265c 100644
--- a/arch/mips/include/uapi/asm/break.h
+++ b/arch/mips/include/uapi/asm/break.h
@@ -21,6 +21,8 @@
 #define BRK_DIVZERO	7	/* Divide by zero check */
 #define BRK_RANGE	8	/* Range error check */
 #define BRK_BUG		12	/* Used by BUG() */
+#define BRK_UPROBE	13	/* See <asm/uprobes.h> */
+#define BRK_UPROBE_XOL	14	/* See <asm/uprobes.h> */
 #define BRK_MEMU	514	/* Used by FPU emulator */
 #define BRK_KPROBE_BP	515	/* Kprobe break */
 #define BRK_KPROBE_SSTEPBP 516	/* Kprobe single step software implementation */
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
new file mode 100644
index 0000000..c7484a7
--- /dev/null
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -0,0 +1,8 @@
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_MIPS_R6		(1 << 0)
+#define HWCAP_MIPS_MSA		(1 << 1)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index fc0cf5a..9b44d5a 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -26,7 +26,7 @@
 	cop0_op, cop1_op, cop2_op, cop1x_op,
 	beql_op, bnel_op, blezl_op, bgtzl_op,
 	daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
-	spec2_op, jalx_op, mdmx_op, spec3_op,
+	spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
 	lb_op, lh_op, lwl_op, lw_op,
 	lbu_op, lhu_op, lwr_op, lwu_op,
 	sb_op, sh_op, swl_op, sw_op,
@@ -167,8 +167,13 @@
 	fround_op    =	0x0c, ftrunc_op	   =  0x0d,
 	fceil_op     =	0x0e, ffloor_op	   =  0x0f,
 	fmovc_op     =	0x11, fmovz_op	   =  0x12,
-	fmovn_op     =	0x13, frecip_op	   =  0x15,
-	frsqrt_op    =	0x16, fcvts_op	   =  0x20,
+	fmovn_op     =	0x13, fseleqz_op   =  0x14,
+	frecip_op    =  0x15, frsqrt_op    =  0x16,
+	fselnez_op   =  0x17, fmaddf_op    =  0x18,
+	fmsubf_op    =  0x19, frint_op     =  0x1a,
+	fclass_op    =  0x1b, fmin_op      =  0x1c,
+	fmina_op     =  0x1d, fmax_op      =  0x1e,
+	fmaxa_op     =  0x1f, fcvts_op     =  0x20,
 	fcvtd_op     =	0x21, fcvte_op	   =  0x22,
 	fcvtw_op     =	0x24, fcvtl_op	   =  0x25,
 	fcmp_op	     =	0x30
@@ -221,6 +226,24 @@
 };
 
 /*
+ * func field for MSA MI10 format.
+ */
+enum msa_mi10_func {
+	msa_ld_op = 8,
+	msa_st_op = 9,
+};
+
+/*
+ * MSA 2 bit format fields.
+ */
+enum msa_2b_fmt {
+	msa_fmt_b = 0,
+	msa_fmt_h = 1,
+	msa_fmt_w = 2,
+	msa_fmt_d = 3,
+};
+
+/*
  * (microMIPS) Major opcodes.
  */
 enum mm_major_op {
@@ -611,6 +634,16 @@
 	;)))))))
 };
 
+struct msa_mi10_format {		/* MSA MI10 */
+	__BITFIELD_FIELD(unsigned int opcode : 6,
+	__BITFIELD_FIELD(signed int s10 : 10,
+	__BITFIELD_FIELD(unsigned int rs : 5,
+	__BITFIELD_FIELD(unsigned int wd : 5,
+	__BITFIELD_FIELD(unsigned int func : 4,
+	__BITFIELD_FIELD(unsigned int df : 2,
+	;))))))
+};
+
 struct spec3_format {   /* SPEC3 */
 	__BITFIELD_FIELD(unsigned int opcode:6,
 	__BITFIELD_FIELD(unsigned int rs:5,
@@ -888,6 +921,7 @@
 	struct p_format p_format;
 	struct f_format f_format;
 	struct ma_format ma_format;
+	struct msa_mi10_format msa_mi10_format;
 	struct b_format b_format;
 	struct ps_format ps_format;
 	struct v_format v_format;
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 9081d88..5cbd9ae 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,6 +12,18 @@
 #include <linux/types.h>
 #include <asm/sgidefs.h>
 
+/* scalar FP context was used */
+#define USED_FP			(1 << 0)
+
+/* the value of Status.FR when context was saved */
+#define USED_FR1		(1 << 1)
+
+/* FR=1, but with odd singles in bits 63:32 of preceding even double */
+#define USED_HYBRID_FPRS	(1 << 2)
+
+/* extended context was used, see struct extcontext for details */
+#define USED_EXTCONTEXT		(1 << 3)
+
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index 8f2d184..c4ddc4f 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -16,11 +16,13 @@
 #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||		\
     defined(_MIPS_ARCH_LOONGSON3A)
 
-static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u16 __arch_swab16(__u16 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
+	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	.set	pop			\n"
 	: "=r" (x)
@@ -30,11 +32,13 @@
 }
 #define __arch_swab16 __arch_swab16
 
-static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u32 __arch_swab32(__u32 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
+	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	rotr	%0, %0, 16		\n"
 	"	.set	pop			\n"
@@ -50,11 +54,13 @@
  * 64-bit kernel on r2 CPUs.
  */
 #ifdef __mips64
-static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u64 __arch_swab64(__u64 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips64r2		\n"
+	"	.set	nomips16		\n"
 	"	dsbh	%0, %1			\n"
 	"	dshd	%0, %0			\n"
 	"	.set	pop			\n"
diff --git a/arch/mips/include/uapi/asm/ucontext.h b/arch/mips/include/uapi/asm/ucontext.h
new file mode 100644
index 0000000..2320144
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ucontext.h
@@ -0,0 +1,65 @@
+#ifndef __MIPS_UAPI_ASM_UCONTEXT_H
+#define __MIPS_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct extcontext - extended context header structure
+ * @magic:	magic value identifying the type of extended context
+ * @size:	the size in bytes of the enclosing structure
+ *
+ * Extended context structures provide context which does not fit within struct
+ * sigcontext. They are placed sequentially in memory at the end of struct
+ * ucontext and struct sigframe, with each extended context structure beginning
+ * with a header defined by this struct. The type of context represented is
+ * indicated by the magic field. Userland may check each extended context
+ * structure against magic values that it recognises. The size field allows any
+ * unrecognised context to be skipped, allowing for future expansion. The end
+ * of the extended context data is indicated by the magic value
+ * END_EXTCONTEXT_MAGIC.
+ */
+struct extcontext {
+	unsigned int		magic;
+	unsigned int		size;
+};
+
+/**
+ * struct msa_extcontext - MSA extended context structure
+ * @ext:	the extended context header, with magic == MSA_EXTCONTEXT_MAGIC
+ * @wr:		the most significant 64 bits of each MSA vector register
+ * @csr:	the value of the MSA control & status register
+ *
+ * If MSA context is live for a task at the time a signal is delivered to it,
+ * this structure will hold the MSA context of the task as it was prior to the
+ * signal delivery.
+ */
+struct msa_extcontext {
+	struct extcontext	ext;
+#define MSA_EXTCONTEXT_MAGIC	0x784d5341	/* xMSA */
+
+	unsigned long long	wr[32];
+	unsigned int		csr;
+};
+
+#define END_EXTCONTEXT_MAGIC	0x78454e44	/* xEND */
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext:	holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext:	holds extended processor state
+ */
+struct ucontext {
+	/* Historic fields matching asm-generic */
+	unsigned long		uc_flags;
+	struct ucontext		*uc_link;
+	stack_t			uc_stack;
+	struct sigcontext	uc_mcontext;
+	sigset_t		uc_sigmask;
+
+	/* Extended context structures may follow ucontext */
+	unsigned long long	uc_extcontext[0];
+};
+
+#endif /* __MIPS_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index e1ea4f6..5d6828b 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -110,18 +110,11 @@
 	}
 }
 
-static void r4030_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 struct clock_event_device r4030_clockevent = {
 	.name		= "r4030",
 	.features	= CLOCK_EVT_FEAT_PERIODIC,
 	.rating		= 300,
 	.irq		= JAZZ_TIMER_IRQ,
-	.set_mode	= r4030_set_mode,
 };
 
 static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 54c80d4..6cd69fd 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -231,6 +231,13 @@
 	return 0;
 }
 
+static int jz_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+	struct jz_gpio_chip *jz_gpio = gpio_chip_to_jz_gpio_chip(chip);
+
+	return jz_gpio->irq_base + gpio;
+}
+
 int jz_gpio_port_direction_input(int port, uint32_t mask)
 {
 	writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
@@ -262,18 +269,6 @@
 }
 EXPORT_SYMBOL(jz_gpio_port_get_value);
 
-int gpio_to_irq(unsigned gpio)
-{
-	return JZ4740_IRQ_GPIO(0) + gpio;
-}
-EXPORT_SYMBOL_GPL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	return irq - JZ4740_IRQ_GPIO(0);
-}
-EXPORT_SYMBOL_GPL(irq_to_gpio);
-
 #define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
 
 static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
@@ -403,6 +398,7 @@
 		.get = jz_gpio_get_value, \
 		.direction_output = jz_gpio_direction_output, \
 		.direction_input = jz_gpio_direction_input, \
+		.to_irq = jz_gpio_to_irq, \
 		.base = JZ4740_GPIO_BASE_ ## _bank, \
 		.ngpio = JZ4740_GPIO_NUM_ ## _bank, \
 	}, \
@@ -423,8 +419,8 @@
 	chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
 
 	chip->irq = JZ4740_IRQ_INTC_GPIO(id);
-	irq_set_handler_data(chip->irq, chip);
-	irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
+	irq_set_chained_handler_and_data(chip->irq,
+					 jz_gpio_irq_demux_handler, chip);
 
 	gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base,
 		chip->base, handle_level_irq);
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index 7ab47fe..1f7ca2c 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -58,7 +58,7 @@
 
 	jz4740_timer_ack_full(TIMER_CLOCKEVENT);
 
-	if (cd->mode != CLOCK_EVT_MODE_PERIODIC)
+	if (!clockevent_state_periodic(cd))
 		jz4740_timer_disable(TIMER_CLOCKEVENT);
 
 	cd->event_handler(cd);
@@ -66,24 +66,29 @@
 	return IRQ_HANDLED;
 }
 
-static void jz4740_clockevent_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *cd)
+static int jz4740_clockevent_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		jz4740_timer_set_count(TIMER_CLOCKEVENT, 0);
-		jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick);
-	case CLOCK_EVT_MODE_RESUME:
-		jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
-		jz4740_timer_enable(TIMER_CLOCKEVENT);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		jz4740_timer_disable(TIMER_CLOCKEVENT);
-		break;
-	default:
-		break;
-	}
+	jz4740_timer_set_count(TIMER_CLOCKEVENT, 0);
+	jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick);
+	jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
+	jz4740_timer_enable(TIMER_CLOCKEVENT);
+
+	return 0;
+}
+
+static int jz4740_clockevent_resume(struct clock_event_device *evt)
+{
+	jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
+	jz4740_timer_enable(TIMER_CLOCKEVENT);
+
+	return 0;
+}
+
+static int jz4740_clockevent_shutdown(struct clock_event_device *evt)
+{
+	jz4740_timer_disable(TIMER_CLOCKEVENT);
+
+	return 0;
 }
 
 static int jz4740_clockevent_set_next(unsigned long evt,
@@ -100,7 +105,10 @@
 	.name = "jz4740-timer",
 	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.set_next_event = jz4740_clockevent_set_next,
-	.set_mode = jz4740_clockevent_set_mode,
+	.set_state_shutdown = jz4740_clockevent_shutdown,
+	.set_state_periodic = jz4740_clockevent_set_periodic,
+	.set_state_oneshot = jz4740_clockevent_shutdown,
+	.tick_resume = jz4740_clockevent_resume,
 	.rating = 200,
 #ifdef CONFIG_MACH_JZ4740
 	.irq = JZ4740_IRQ_TCU0,
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 3156c8d..d982be1 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -99,6 +99,7 @@
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_mipsxx.o
 
 obj-$(CONFIG_JUMP_LABEL)	+= jump_label.o
+obj-$(CONFIG_UPROBES)		+= uprobes.o
 
 obj-$(CONFIG_MIPS_CM)		+= mips-cm.o
 obj-$(CONFIG_MIPS_CPC)		+= mips-cpc.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 072fab1..154e203 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -128,6 +128,7 @@
 	       thread.cp0_baduaddr);
 	OFFSET(THREAD_ECODE, task_struct, \
 	       thread.error_code);
+	OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
 	BLANK();
 }
 
@@ -245,17 +246,6 @@
 }
 #endif
 
-#ifdef CONFIG_MIPS32_COMPAT
-void output_sc32_defines(void)
-{
-	COMMENT("Linux 32-bit sigcontext offsets.");
-	OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
-	OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
-	OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
-	BLANK();
-}
-#endif
-
 void output_signal_defined(void)
 {
 	COMMENT("Linux signal numbers.");
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 7976457..940ac00 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -40,8 +40,8 @@
  * The general purpose timer ticks at 1MHz independent if
  * the rest of the system
  */
-static void sibyte_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
 {
 	unsigned int cpu = smp_processor_id();
 	void __iomem *cfg, *init;
@@ -49,24 +49,22 @@
 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writeq(0, cfg);
-		__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
-		__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
-			     cfg);
-		break;
+	__raw_writeq(0, cfg);
+	__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Stop the timer until we actually program a shot */
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writeq(0, cfg);
-		break;
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+	unsigned int cpu = smp_processor_id();
+	void __iomem *cfg;
 
-	case CLOCK_EVT_MODE_UNUSED:	/* shuddup gcc */
-	case CLOCK_EVT_MODE_RESUME:
-		;
-	}
+	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+
+	/* Stop the timer until we actually program a shot */
+	__raw_writeq(0, cfg);
+	return 0;
 }
 
 static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -91,7 +89,7 @@
 	void __iomem *cfg;
 	unsigned long tmode;
 
-	if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(cd))
 		tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
 	else
 		tmode = 0;
@@ -130,7 +128,9 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
-	cd->set_mode		= sibyte_set_mode;
+	cd->set_state_shutdown	= sibyte_shutdown;
+	cd->set_state_periodic	= sibyte_set_periodic;
+	cd->set_state_oneshot	= sibyte_shutdown;
 	clockevents_register_device(cd);
 
 	bcm1480_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index ff1f01b..77a5ddf 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -59,27 +59,32 @@
 	return -EINVAL;
 }
 
-static void ds1287_set_mode(enum clock_event_mode mode,
-			    struct clock_event_device *evt)
+static int ds1287_shutdown(struct clock_event_device *evt)
 {
 	u8 val;
 
 	spin_lock(&rtc_lock);
 
 	val = CMOS_READ(RTC_REG_B);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		val |= RTC_PIE;
-		break;
-	default:
-		val &= ~RTC_PIE;
-		break;
-	}
-
+	val &= ~RTC_PIE;
 	CMOS_WRITE(val, RTC_REG_B);
 
 	spin_unlock(&rtc_lock);
+	return 0;
+}
+
+static int ds1287_set_periodic(struct clock_event_device *evt)
+{
+	u8 val;
+
+	spin_lock(&rtc_lock);
+
+	val = CMOS_READ(RTC_REG_B);
+	val |= RTC_PIE;
+	CMOS_WRITE(val, RTC_REG_B);
+
+	spin_unlock(&rtc_lock);
+	return 0;
 }
 
 static void ds1287_event_handler(struct clock_event_device *dev)
@@ -87,11 +92,13 @@
 }
 
 static struct clock_event_device ds1287_clockevent = {
-	.name		= "ds1287",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.set_next_event = ds1287_set_next_event,
-	.set_mode	= ds1287_set_mode,
-	.event_handler	= ds1287_event_handler,
+	.name			= "ds1287",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.set_next_event		= ds1287_set_next_event,
+	.set_state_shutdown	= ds1287_shutdown,
+	.set_state_periodic	= ds1287_set_periodic,
+	.tick_resume		= ds1287_shutdown,
+	.event_handler		= ds1287_event_handler,
 };
 
 static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index f069460..6604005 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -64,8 +64,7 @@
 	return 0;
 }
 
-static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
+static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
 {
 	u32 ctrl;
 
@@ -73,21 +72,39 @@
 
 	ctrl = GT_READ(GT_TC_CONTROL_OFS);
 	ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		ctrl |= GT_TC_CONTROL_ENTC0_MSK;
-		break;
-	default:
-		break;
-	}
-
 	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
 
 	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
+}
+
+static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
+{
+	u32 ctrl;
+
+	raw_spin_lock(&gt641xx_timer_lock);
+
+	ctrl = GT_READ(GT_TC_CONTROL_OFS);
+	ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
+	ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
+}
+
+static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
+{
+	u32 ctrl;
+
+	raw_spin_lock(&gt641xx_timer_lock);
+
+	ctrl = GT_READ(GT_TC_CONTROL_OFS);
+	ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
+	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
 }
 
 static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
@@ -95,12 +112,16 @@
 }
 
 static struct clock_event_device gt641xx_timer0_clockevent = {
-	.name		= "gt641xx-timer0",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.irq		= GT641XX_TIMER0_IRQ,
-	.set_next_event = gt641xx_timer0_set_next_event,
-	.set_mode	= gt641xx_timer0_set_mode,
-	.event_handler	= gt641xx_timer0_event_handler,
+	.name			= "gt641xx-timer0",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.irq			= GT641XX_TIMER0_IRQ,
+	.set_next_event		= gt641xx_timer0_set_next_event,
+	.set_state_shutdown	= gt641xx_timer0_shutdown,
+	.set_state_periodic	= gt641xx_timer0_set_periodic,
+	.set_state_oneshot	= gt641xx_timer0_set_oneshot,
+	.tick_resume		= gt641xx_timer0_shutdown,
+	.event_handler		= gt641xx_timer0_event_handler,
 };
 
 static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index d70c4d8..8dfe6a6 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -28,12 +28,6 @@
 	return res;
 }
 
-void mips_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 int cp0_timer_irq_installed;
 
@@ -174,6 +168,11 @@
 	return 1;
 }
 
+unsigned int __weak get_c0_compare_int(void)
+{
+	return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+}
+
 int r4k_clockevent_init(void)
 {
 	unsigned int cpu = smp_processor_id();
@@ -189,11 +188,9 @@
 	/*
 	 * With vectored interrupts things are getting platform specific.
 	 * get_c0_compare_int is a hook to allow a platform to return the
-	 * interrupt number of it's liking.
+	 * interrupt number of its liking.
 	 */
-	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
-	if (get_c0_compare_int)
-		irq = get_c0_compare_int();
+	irq = get_c0_compare_int();
 
 	cd = &per_cpu(mips_clockevent_device, cpu);
 
@@ -212,7 +209,6 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= mips_next_event;
-	cd->set_mode		= mips_set_clock_mode;
 	cd->event_handler	= mips_event_handler;
 
 	clockevents_register_device(cd);
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 5ea6d6b..3d860ef 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -38,8 +38,20 @@
  * The general purpose timer ticks at 1MHz independent if
  * the rest of the system
  */
-static void sibyte_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+	void __iomem *cfg;
+
+	cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
+
+	/* Stop the timer until we actually program a shot */
+	__raw_writeq(0, cfg);
+
+	return 0;
+}
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
 {
 	unsigned int cpu = smp_processor_id();
 	void __iomem *cfg, *init;
@@ -47,24 +59,11 @@
 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writeq(0, cfg);
-		__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
-		__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
-			     cfg);
-		break;
+	__raw_writeq(0, cfg);
+	__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Stop the timer until we actually program a shot */
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writeq(0, cfg);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:	/* shuddup gcc */
-	case CLOCK_EVT_MODE_RESUME:
-		;
-	}
+	return 0;
 }
 
 static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -89,7 +88,7 @@
 	void __iomem *cfg;
 	unsigned long tmode;
 
-	if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(cd))
 		tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
 	else
 		tmode = 0;
@@ -129,7 +128,9 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
-	cd->set_mode		= sibyte_set_mode;
+	cd->set_state_shutdown	= sibyte_shutdown;
+	cd->set_state_periodic	= sibyte_set_periodic;
+	cd->set_state_oneshot	= sibyte_shutdown;
 	clockevents_register_device(cd);
 
 	sb1250_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 7239324..537eefd 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -85,36 +85,54 @@
 	__raw_writel(0, &tmrptr->tisr);
 }
 
-static void txx9tmr_set_mode(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
 {
 	struct txx9_clock_event_device *txx9_cd =
 		container_of(evt, struct txx9_clock_event_device, cd);
 	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
 
 	txx9tmr_stop_and_clear(tmrptr);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
-			     &tmrptr->itmr);
-		/* start timer */
-		__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
-			     evt->shift,
-			     &tmrptr->cpra);
-		__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		__raw_writel(0, &tmrptr->itmr);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		__raw_writel(TIMER_CCD, &tmrptr->ccdr);
-		__raw_writel(0, &tmrptr->itmr);
-		break;
-	}
+
+	__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
+	/* start timer */
+	__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
+		     &tmrptr->cpra);
+	__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+	return 0;
+}
+
+static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+	return 0;
+}
+
+static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(0, &tmrptr->itmr);
+	return 0;
+}
+
+static int txx9tmr_tick_resume(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(TIMER_CCD, &tmrptr->ccdr);
+	__raw_writel(0, &tmrptr->itmr);
+	return 0;
 }
 
 static int txx9tmr_set_next_event(unsigned long delta,
@@ -133,12 +151,15 @@
 
 static struct txx9_clock_event_device txx9_clock_event_device = {
 	.cd = {
-		.name		= "TXx9",
-		.features	= CLOCK_EVT_FEAT_PERIODIC |
-				  CLOCK_EVT_FEAT_ONESHOT,
-		.rating		= 200,
-		.set_mode	= txx9tmr_set_mode,
-		.set_next_event = txx9tmr_set_next_event,
+		.name			= "TXx9",
+		.features		= CLOCK_EVT_FEAT_PERIODIC |
+					  CLOCK_EVT_FEAT_ONESHOT,
+		.rating			= 200,
+		.set_state_shutdown	= txx9tmr_set_state_shutdown,
+		.set_state_periodic	= txx9tmr_set_state_periodic,
+		.set_state_oneshot	= txx9tmr_set_state_oneshot,
+		.tick_resume		= txx9tmr_tick_resume,
+		.set_next_event		= txx9tmr_set_next_event,
 	},
 };
 
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 1b6ca63..9f71c06 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -152,7 +152,7 @@
 
 	/* Enter the coherent domain */
 	li	t0, 0xff
-	PTR_S	t0, GCR_CL_COHERENCE_OFS(v1)
+	sw	t0, GCR_CL_COHERENCE_OFS(v1)
 	ehb
 
 	/* Jump to kseg0 */
@@ -302,7 +302,7 @@
 	PTR_L	t0, 0(t0)
 
 	/* Calculate a pointer to this cores struct core_boot_config */
-	PTR_L	t0, GCR_CL_ID_OFS(t0)
+	lw	t0, GCR_CL_ID_OFS(t0)
 	li	t1, COREBOOTCFG_SIZE
 	mul	t0, t0, t1
 	PTR_LA	t1, mips_cps_core_bootcfg
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index dbe0792..571a8e6 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -32,6 +32,9 @@
 #include <asm/spram.h>
 #include <asm/uaccess.h>
 
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+
 /*
  * Get the FPU Implementation/Revision.
  */
@@ -188,7 +191,7 @@
 static int mips_ftlb_disabled;
 static int mips_has_ftlb_configured;
 
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable);
 
 static int __init ftlb_disable(char *s)
 {
@@ -202,7 +205,10 @@
 		return 1;
 
 	/* Disable it in the boot cpu */
-	set_ftlb_enable(&cpu_data[0], 0);
+	if (set_ftlb_enable(&cpu_data[0], 0)) {
+		pr_warn("Can't turn FTLB off\n");
+		return 1;
+	}
 
 	back_to_back_c0_hazard();
 
@@ -364,30 +370,41 @@
 		return 3;
 }
 
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 {
-	unsigned int config6;
+	unsigned int config;
 
 	/* It's implementation dependent how the FTLB can be enabled */
 	switch (c->cputype) {
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 		/* proAptiv & related cores use Config6 to enable the FTLB */
-		config6 = read_c0_config6();
+		config = read_c0_config6();
 		/* Clear the old probability value */
-		config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
+		config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
 		if (enable)
 			/* Enable FTLB */
-			write_c0_config6(config6 |
+			write_c0_config6(config |
 					 (calculate_ftlb_probability(c)
 					  << MIPS_CONF6_FTLBP_SHIFT)
 					 | MIPS_CONF6_FTLBEN);
 		else
 			/* Disable FTLB */
-			write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
-		back_to_back_c0_hazard();
+			write_c0_config6(config &  ~MIPS_CONF6_FTLBEN);
 		break;
+	case CPU_I6400:
+		/* I6400 & related cores use Config7 to configure FTLB */
+		config = read_c0_config7();
+		/* Clear the old probability value */
+		config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT);
+		write_c0_config7(config | (calculate_ftlb_probability(c)
+					   << MIPS_CONF7_FTLBP_SHIFT));
+		break;
+	default:
+		return 1;
 	}
+
+	return 0;
 }
 
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -524,6 +541,8 @@
 	}
 	if (config3 & MIPS_CONF3_CDMM)
 		c->options |= MIPS_CPU_CDMM;
+	if (config3 & MIPS_CONF3_SP)
+		c->options |= MIPS_CPU_SP;
 
 	return config3 & MIPS_CONF_M;
 }
@@ -540,7 +559,16 @@
 	if (cpu_has_tlb) {
 		if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
 			c->options |= MIPS_CPU_TLBINV;
-		mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+		/*
+		 * This is a bit ugly. R6 has dropped that field from
+		 * config4 and the only valid configuration is VTLB+FTLB so
+		 * set a good value for mmuextdef for that case.
+		 */
+		if (cpu_has_mips_r6)
+			mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
+		else
+			mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+
 		switch (mmuextdef) {
 		case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
 			c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
@@ -1121,6 +1149,10 @@
 		c->cputype = CPU_P5600;
 		__cpu_name[cpu] = "MIPS P5600";
 		break;
+	case PRID_IMP_I6400:
+		c->cputype = CPU_I6400;
+		__cpu_name[cpu] = "MIPS I6400";
+		break;
 	case PRID_IMP_M5150:
 		c->cputype = CPU_M5150;
 		__cpu_name[cpu] = "MIPS M5150";
@@ -1492,10 +1524,14 @@
 	else
 		c->srsets = 1;
 
+	if (cpu_has_mips_r6)
+		elf_hwcap |= HWCAP_MIPS_R6;
+
 	if (cpu_has_msa) {
 		c->msa_id = cpu_get_msa_id();
 		WARN(c->msa_id & MSA_IR_WRPF,
 		     "Vector register partitioning unimplemented!");
+		elf_hwcap |= HWCAP_MIPS_MSA;
 	}
 
 	cpu_probe_vmbits(c);
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index e4f62b7..ab1478d 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -196,6 +196,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_M5150:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		cpu_wait = r4k_wait;
 		if (read_c0_config7() & MIPS_CONF7_WII)
 			cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index dda800e..3e586da 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -51,7 +51,7 @@
 	/* Target must have the right alignment and ISA must be preserved. */
 	BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
 		insn.j_format.target = e->target >> J_RANGE_SHIFT;
 	} else {
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 85bbe9b..b8ceee5 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -15,11 +15,131 @@
 
 void __iomem *mips_cm_base;
 void __iomem *mips_cm_l2sync_base;
+int mips_cm_is64;
+
+static char *cm2_tr[8] = {
+	"mem",	"gcr",	"gic",	"mmio",
+	"0x04", "cpc", "0x06", "0x07"
+};
+
+/* CM3 Tag ECC transation type */
+static char *cm3_tr[16] = {
+	[0x0] = "ReqNoData",
+	[0x1] = "0x1",
+	[0x2] = "ReqWData",
+	[0x3] = "0x3",
+	[0x4] = "IReqNoResp",
+	[0x5] = "IReqWResp",
+	[0x6] = "IReqNoRespDat",
+	[0x7] = "IReqWRespDat",
+	[0x8] = "RespNoData",
+	[0x9] = "RespDataFol",
+	[0xa] = "RespWData",
+	[0xb] = "RespDataOnly",
+	[0xc] = "IRespNoData",
+	[0xd] = "IRespDataFol",
+	[0xe] = "IRespWData",
+	[0xf] = "IRespDataOnly"
+};
+
+static char *cm2_cmd[32] = {
+	[0x00] = "0x00",
+	[0x01] = "Legacy Write",
+	[0x02] = "Legacy Read",
+	[0x03] = "0x03",
+	[0x04] = "0x04",
+	[0x05] = "0x05",
+	[0x06] = "0x06",
+	[0x07] = "0x07",
+	[0x08] = "Coherent Read Own",
+	[0x09] = "Coherent Read Share",
+	[0x0a] = "Coherent Read Discard",
+	[0x0b] = "Coherent Ready Share Always",
+	[0x0c] = "Coherent Upgrade",
+	[0x0d] = "Coherent Writeback",
+	[0x0e] = "0x0e",
+	[0x0f] = "0x0f",
+	[0x10] = "Coherent Copyback",
+	[0x11] = "Coherent Copyback Invalidate",
+	[0x12] = "Coherent Invalidate",
+	[0x13] = "Coherent Write Invalidate",
+	[0x14] = "Coherent Completion Sync",
+	[0x15] = "0x15",
+	[0x16] = "0x16",
+	[0x17] = "0x17",
+	[0x18] = "0x18",
+	[0x19] = "0x19",
+	[0x1a] = "0x1a",
+	[0x1b] = "0x1b",
+	[0x1c] = "0x1c",
+	[0x1d] = "0x1d",
+	[0x1e] = "0x1e",
+	[0x1f] = "0x1f"
+};
+
+/* CM3 Tag ECC command type */
+static char *cm3_cmd[16] = {
+	[0x0] = "Legacy Read",
+	[0x1] = "Legacy Write",
+	[0x2] = "Coherent Read Own",
+	[0x3] = "Coherent Read Share",
+	[0x4] = "Coherent Read Discard",
+	[0x5] = "Coherent Evicted",
+	[0x6] = "Coherent Upgrade",
+	[0x7] = "Coherent Upgrade for Store Conditional",
+	[0x8] = "Coherent Writeback",
+	[0x9] = "Coherent Write Invalidate",
+	[0xa] = "0xa",
+	[0xb] = "0xb",
+	[0xc] = "0xc",
+	[0xd] = "0xd",
+	[0xe] = "0xe",
+	[0xf] = "0xf"
+};
+
+/* CM3 Tag ECC command group */
+static char *cm3_cmd_group[8] = {
+	[0x0] = "Normal",
+	[0x1] = "Registers",
+	[0x2] = "TLB",
+	[0x3] = "0x3",
+	[0x4] = "L1I",
+	[0x5] = "L1D",
+	[0x6] = "L3",
+	[0x7] = "L2"
+};
+
+static char *cm2_core[8] = {
+	"Invalid/OK",	"Invalid/Data",
+	"Shared/OK",	"Shared/Data",
+	"Modified/OK",	"Modified/Data",
+	"Exclusive/OK", "Exclusive/Data"
+};
+
+static char *cm2_causes[32] = {
+	"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+	"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+	"0x08", "0x09", "0x0a", "0x0b",
+	"0x0c", "0x0d", "0x0e", "0x0f",
+	"0x10", "0x11", "0x12", "0x13",
+	"0x14", "0x15", "0x16", "INTVN_WR_ERR",
+	"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+	"0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static char *cm3_causes[32] = {
+	"0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
+	"MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
+	"CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
+	"0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
+	"0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
+	"0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
+};
 
 phys_addr_t __mips_cm_phys_base(void)
 {
 	u32 config3 = read_c0_config3();
-	u32 cmgcr;
+	unsigned long cmgcr;
 
 	/* Check the CMGCRBase register is implemented */
 	if (!(config3 & MIPS_CONF3_CMGCR))
@@ -81,6 +201,13 @@
 	phys_addr_t addr;
 	u32 base_reg;
 
+	/*
+	 * No need to probe again if we have already been
+	 * here before.
+	 */
+	if (mips_cm_base)
+		return 0;
+
 	addr = mips_cm_phys_base();
 	BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
 	if (!addr)
@@ -117,5 +244,133 @@
 	/* probe for an L2-only sync region */
 	mips_cm_probe_l2sync();
 
+	/* determine register width for this CM */
+	mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+
 	return 0;
 }
+
+void mips_cm_error_report(void)
+{
+	unsigned long revision = mips_cm_revision();
+	/*
+	 * CM3 has a 64-bit Error cause register with 0:57 containing the error
+	 * info and 63:58 the error type. For old CMs, everything is contained
+	 * in a single 32-bit register (0:26 and 31:27 respectively). Even
+	 * though the cm_error is u64, we will simply ignore the upper word
+	 * for CM2.
+	 */
+	u64 cm_error = read_gcr_error_cause();
+	int cm_error_cause_sft = CM_GCR_ERROR_CAUSE_ERRTYPE_SHF +
+				 ((revision >= CM_REV_CM3) ? 31 : 0);
+	unsigned long cm_addr = read_gcr_error_addr();
+	unsigned long cm_other = read_gcr_error_mult();
+	int ocause, cause;
+	char buf[256];
+
+	if (!mips_cm_present())
+		return;
+
+	cause = cm_error >> cm_error_cause_sft;
+
+	if (!cause)
+		/* All good */
+		return;
+
+	ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+	if (revision < CM_REV_CM3) { /* CM2 */
+		if (cause < 16) {
+			unsigned long cca_bits = (cm_error >> 15) & 7;
+			unsigned long tr_bits = (cm_error >> 12) & 7;
+			unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+			unsigned long stag_bits = (cm_error >> 3) & 15;
+			unsigned long sport_bits = (cm_error >> 0) & 7;
+
+			snprintf(buf, sizeof(buf),
+				 "CCA=%lu TR=%s MCmd=%s STag=%lu "
+				 "SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
+				 cm2_cmd[cmd_bits], stag_bits, sport_bits);
+		} else {
+			/* glob state & sresp together */
+			unsigned long c3_bits = (cm_error >> 18) & 7;
+			unsigned long c2_bits = (cm_error >> 15) & 7;
+			unsigned long c1_bits = (cm_error >> 12) & 7;
+			unsigned long c0_bits = (cm_error >> 9) & 7;
+			unsigned long sc_bit = (cm_error >> 8) & 1;
+			unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+			unsigned long sport_bits = (cm_error >> 0) & 7;
+
+			snprintf(buf, sizeof(buf),
+				 "C3=%s C2=%s C1=%s C0=%s SC=%s "
+				 "MCmd=%s SPort=%lu\n",
+				 cm2_core[c3_bits], cm2_core[c2_bits],
+				 cm2_core[c1_bits], cm2_core[c0_bits],
+				 sc_bit ? "True" : "False",
+				 cm2_cmd[cmd_bits], sport_bits);
+		}
+			pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+			       cm2_causes[cause], buf);
+		pr_err("CM_ADDR =%08lx\n", cm_addr);
+		pr_err("CM_OTHER=%08lx %s\n", cm_other, cm2_causes[ocause]);
+	} else { /* CM3 */
+	/* Used by cause == {1,2,3} */
+		unsigned long core_id_bits = (cm_error >> 22) & 0xf;
+		unsigned long vp_id_bits = (cm_error >> 18) & 0xf;
+		unsigned long cmd_bits = (cm_error >> 14) & 0xf;
+		unsigned long cmd_group_bits = (cm_error >> 11) & 0xf;
+		unsigned long cm3_cca_bits = (cm_error >> 8) & 7;
+		unsigned long mcp_bits = (cm_error >> 5) & 0xf;
+		unsigned long cm3_tr_bits = (cm_error >> 1) & 0xf;
+		unsigned long sched_bit = cm_error & 0x1;
+
+		if (cause == 1 || cause == 3) { /* Tag ECC */
+			unsigned long tag_ecc = (cm_error >> 57) & 0x1;
+			unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
+			unsigned long dword_bits = (cm_error >> 49) & 0xff;
+			unsigned long data_way_bits = (cm_error >> 45) & 0xf;
+			unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
+			unsigned long bank_bit = (cm_error >> 28) & 0x1;
+			snprintf(buf, sizeof(buf),
+				 "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
+				 "Bank=%lu CoreID=%lu VPID=%lu Command=%s"
+				 "Command Group=%s CCA=%lu MCP=%d"
+				 "Transaction type=%s Scheduler=%lu\n",
+				 tag_ecc ? "TAG" : "DATA",
+				 tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
+				 data_way_bits, bank_bit, dword_bits,
+				 data_sets_bits,
+				 core_id_bits, vp_id_bits,
+				 cm3_cmd[cmd_bits],
+				 cm3_cmd_group[cmd_group_bits],
+				 cm3_cca_bits, 1 << mcp_bits,
+				 cm3_tr[cm3_tr_bits], sched_bit);
+		} else if (cause == 2) {
+			unsigned long data_error_type = (cm_error >> 41) & 0xfff;
+			unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
+			unsigned long data_decode_group = (cm_error >> 34) & 0x7;
+			unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
+
+			snprintf(buf, sizeof(buf),
+				 "Decode Request Error: Type=%lu, Command=%lu"
+				 "Command Group=%lu Destination ID=%lu"
+				 "CoreID=%lu VPID=%lu Command=%s"
+				 "Command Group=%s CCA=%lu MCP=%d"
+				 "Transaction type=%s Scheduler=%lu\n",
+				 data_error_type, data_decode_cmd,
+				 data_decode_group, data_decode_destination_id,
+				 core_id_bits, vp_id_bits,
+				 cm3_cmd[cmd_bits],
+				 cm3_cmd_group[cmd_group_bits],
+				 cm3_cca_bits, 1 << mcp_bits,
+				 cm3_tr[cm3_tr_bits], sched_bit);
+		}
+
+		pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
+		       cm3_causes[cause], buf);
+		pr_err("CM_ADDR =%lx\n", cm_addr);
+		pr_err("CM_OTHER=%lx %s\n", cm_other, cm3_causes[ocause]);
+	}
+
+	/* reprime cause register */
+	write_gcr_error_cause(0);
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 1196450..8af4d62 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,9 +21,16 @@
 
 static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
 
-phys_addr_t __weak mips_cpc_phys_base(void)
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present.
+ */
+static phys_addr_t mips_cpc_phys_base(void)
 {
-	u32 cpc_base;
+	unsigned long cpc_base;
 
 	if (!mips_cm_present())
 		return 0;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index cc1b6fa..d7b8dd4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1556,6 +1556,7 @@
 #endif
 		break;
 	case CPU_P5600:
+	case CPU_I6400:
 		/* 8-bit event numbers */
 		raw_id = config & 0x1ff;
 		base_id = raw_id & 0xff;
@@ -1717,6 +1718,11 @@
 		mipspmu.general_event_map = &mipsxxcore_event_map2;
 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
 		break;
+	case CPU_I6400:
+		mipspmu.name = "mips/I6400";
+		mipspmu.general_event_map = &mipsxxcore_event_map2;
+		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+		break;
 	case CPU_1004K:
 		mipspmu.name = "mips/1004K";
 		mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 0614717..f63a289 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -267,6 +267,7 @@
 
 	/* CPUs which do not require the workaround */
 	case CPU_P5600:
+	case CPU_I6400:
 		return 0;
 
 	default:
@@ -671,6 +672,7 @@
 	case CPU_PROAPTIV:
 	case CPU_M5150:
 	case CPU_P5600:
+	case CPU_I6400:
 		stype_intervention = 0x2;
 		stype_memory = 0x3;
 		stype_ordering = 0x10;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index e933a30..4f0ac78 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
 #include <linux/regset.h>
 #include <linux/smp.h>
 #include <linux/security.h>
+#include <linux/stddef.h>
 #include <linux/tracehook.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
@@ -490,6 +491,93 @@
 	REGSET_FPR,
 };
 
+struct pt_regs_offset {
+	const char *name;
+	int offset;
+};
+
+#define REG_OFFSET_NAME(reg, r) {					\
+	.name = #reg,							\
+	.offset = offsetof(struct pt_regs, r)				\
+}
+
+#define REG_OFFSET_END {						\
+	.name = NULL,							\
+	.offset = 0							\
+}
+
+static const struct pt_regs_offset regoffset_table[] = {
+	REG_OFFSET_NAME(r0, regs[0]),
+	REG_OFFSET_NAME(r1, regs[1]),
+	REG_OFFSET_NAME(r2, regs[2]),
+	REG_OFFSET_NAME(r3, regs[3]),
+	REG_OFFSET_NAME(r4, regs[4]),
+	REG_OFFSET_NAME(r5, regs[5]),
+	REG_OFFSET_NAME(r6, regs[6]),
+	REG_OFFSET_NAME(r7, regs[7]),
+	REG_OFFSET_NAME(r8, regs[8]),
+	REG_OFFSET_NAME(r9, regs[9]),
+	REG_OFFSET_NAME(r10, regs[10]),
+	REG_OFFSET_NAME(r11, regs[11]),
+	REG_OFFSET_NAME(r12, regs[12]),
+	REG_OFFSET_NAME(r13, regs[13]),
+	REG_OFFSET_NAME(r14, regs[14]),
+	REG_OFFSET_NAME(r15, regs[15]),
+	REG_OFFSET_NAME(r16, regs[16]),
+	REG_OFFSET_NAME(r17, regs[17]),
+	REG_OFFSET_NAME(r18, regs[18]),
+	REG_OFFSET_NAME(r19, regs[19]),
+	REG_OFFSET_NAME(r20, regs[20]),
+	REG_OFFSET_NAME(r21, regs[21]),
+	REG_OFFSET_NAME(r22, regs[22]),
+	REG_OFFSET_NAME(r23, regs[23]),
+	REG_OFFSET_NAME(r24, regs[24]),
+	REG_OFFSET_NAME(r25, regs[25]),
+	REG_OFFSET_NAME(r26, regs[26]),
+	REG_OFFSET_NAME(r27, regs[27]),
+	REG_OFFSET_NAME(r28, regs[28]),
+	REG_OFFSET_NAME(r29, regs[29]),
+	REG_OFFSET_NAME(r30, regs[30]),
+	REG_OFFSET_NAME(r31, regs[31]),
+	REG_OFFSET_NAME(c0_status, cp0_status),
+	REG_OFFSET_NAME(hi, hi),
+	REG_OFFSET_NAME(lo, lo),
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+	REG_OFFSET_NAME(acx, acx),
+#endif
+	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
+	REG_OFFSET_NAME(c0_cause, cp0_cause),
+	REG_OFFSET_NAME(c0_epc, cp0_epc),
+#ifdef CONFIG_MIPS_MT_SMTC
+	REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus),
+#endif
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	REG_OFFSET_NAME(mpl0, mpl[0]),
+	REG_OFFSET_NAME(mpl1, mpl[1]),
+	REG_OFFSET_NAME(mpl2, mpl[2]),
+	REG_OFFSET_NAME(mtp0, mtp[0]),
+	REG_OFFSET_NAME(mtp1, mtp[1]),
+	REG_OFFSET_NAME(mtp2, mtp[2]),
+#endif
+	REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:       the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+        const struct pt_regs_offset *roff;
+        for (roff = regoffset_table; roff->name != NULL; roff++)
+                if (!strcmp(roff->name, name))
+                        return roff->offset;
+        return -EINVAL;
+}
+
 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 
 static const struct user_regset mips_regsets[] = {
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 1d88af2..f09546e 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
  */
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/errno.h>
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
@@ -35,6 +36,14 @@
 
 	.set	noreorder
 
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
 LEAF(_save_fp_context)
 	.set	push
 	SET_HARDFLOAT
@@ -54,117 +63,60 @@
 	 nop
 #endif
 	/* Store the 16 odd double precision registers */
-	EX	sdc1 $f1, SC_FPREGS+8(a0)
-	EX	sdc1 $f3, SC_FPREGS+24(a0)
-	EX	sdc1 $f5, SC_FPREGS+40(a0)
-	EX	sdc1 $f7, SC_FPREGS+56(a0)
-	EX	sdc1 $f9, SC_FPREGS+72(a0)
-	EX	sdc1 $f11, SC_FPREGS+88(a0)
-	EX	sdc1 $f13, SC_FPREGS+104(a0)
-	EX	sdc1 $f15, SC_FPREGS+120(a0)
-	EX	sdc1 $f17, SC_FPREGS+136(a0)
-	EX	sdc1 $f19, SC_FPREGS+152(a0)
-	EX	sdc1 $f21, SC_FPREGS+168(a0)
-	EX	sdc1 $f23, SC_FPREGS+184(a0)
-	EX	sdc1 $f25, SC_FPREGS+200(a0)
-	EX	sdc1 $f27, SC_FPREGS+216(a0)
-	EX	sdc1 $f29, SC_FPREGS+232(a0)
-	EX	sdc1 $f31, SC_FPREGS+248(a0)
+	EX	sdc1 $f1, 8(a0)
+	EX	sdc1 $f3, 24(a0)
+	EX	sdc1 $f5, 40(a0)
+	EX	sdc1 $f7, 56(a0)
+	EX	sdc1 $f9, 72(a0)
+	EX	sdc1 $f11, 88(a0)
+	EX	sdc1 $f13, 104(a0)
+	EX	sdc1 $f15, 120(a0)
+	EX	sdc1 $f17, 136(a0)
+	EX	sdc1 $f19, 152(a0)
+	EX	sdc1 $f21, 168(a0)
+	EX	sdc1 $f23, 184(a0)
+	EX	sdc1 $f25, 200(a0)
+	EX	sdc1 $f27, 216(a0)
+	EX	sdc1 $f29, 232(a0)
+	EX	sdc1 $f31, 248(a0)
 1:	.set	pop
 #endif
 
 	.set push
 	SET_HARDFLOAT
 	/* Store the 16 even double precision registers */
-	EX	sdc1 $f0, SC_FPREGS+0(a0)
-	EX	sdc1 $f2, SC_FPREGS+16(a0)
-	EX	sdc1 $f4, SC_FPREGS+32(a0)
-	EX	sdc1 $f6, SC_FPREGS+48(a0)
-	EX	sdc1 $f8, SC_FPREGS+64(a0)
-	EX	sdc1 $f10, SC_FPREGS+80(a0)
-	EX	sdc1 $f12, SC_FPREGS+96(a0)
-	EX	sdc1 $f14, SC_FPREGS+112(a0)
-	EX	sdc1 $f16, SC_FPREGS+128(a0)
-	EX	sdc1 $f18, SC_FPREGS+144(a0)
-	EX	sdc1 $f20, SC_FPREGS+160(a0)
-	EX	sdc1 $f22, SC_FPREGS+176(a0)
-	EX	sdc1 $f24, SC_FPREGS+192(a0)
-	EX	sdc1 $f26, SC_FPREGS+208(a0)
-	EX	sdc1 $f28, SC_FPREGS+224(a0)
-	EX	sdc1 $f30, SC_FPREGS+240(a0)
-	EX	sw t1, SC_FPC_CSR(a0)
+	EX	sdc1 $f0, 0(a0)
+	EX	sdc1 $f2, 16(a0)
+	EX	sdc1 $f4, 32(a0)
+	EX	sdc1 $f6, 48(a0)
+	EX	sdc1 $f8, 64(a0)
+	EX	sdc1 $f10, 80(a0)
+	EX	sdc1 $f12, 96(a0)
+	EX	sdc1 $f14, 112(a0)
+	EX	sdc1 $f16, 128(a0)
+	EX	sdc1 $f18, 144(a0)
+	EX	sdc1 $f20, 160(a0)
+	EX	sdc1 $f22, 176(a0)
+	EX	sdc1 $f24, 192(a0)
+	EX	sdc1 $f26, 208(a0)
+	EX	sdc1 $f28, 224(a0)
+	EX	sdc1 $f30, 240(a0)
+	EX	sw t1, 0(a1)
 	jr	ra
 	 li	v0, 0					# success
 	.set pop
 	END(_save_fp_context)
 
-#ifdef CONFIG_MIPS32_COMPAT
-	/* Save 32-bit process floating point context */
-LEAF(_save_fp_context32)
-	.set push
-	.set MIPS_ISA_ARCH_LEVEL_RAW
-	SET_HARDFLOAT
-	cfc1	t1, fcr31
-
-#ifndef CONFIG_CPU_MIPS64_R6
-	mfc0	t0, CP0_STATUS
-	sll	t0, t0, 5
-	bgez	t0, 1f			# skip storing odd if FR=0
-	 nop
-#endif
-
-	/* Store the 16 odd double precision registers */
-	EX      sdc1 $f1, SC32_FPREGS+8(a0)
-	EX      sdc1 $f3, SC32_FPREGS+24(a0)
-	EX      sdc1 $f5, SC32_FPREGS+40(a0)
-	EX      sdc1 $f7, SC32_FPREGS+56(a0)
-	EX      sdc1 $f9, SC32_FPREGS+72(a0)
-	EX      sdc1 $f11, SC32_FPREGS+88(a0)
-	EX      sdc1 $f13, SC32_FPREGS+104(a0)
-	EX      sdc1 $f15, SC32_FPREGS+120(a0)
-	EX      sdc1 $f17, SC32_FPREGS+136(a0)
-	EX      sdc1 $f19, SC32_FPREGS+152(a0)
-	EX      sdc1 $f21, SC32_FPREGS+168(a0)
-	EX      sdc1 $f23, SC32_FPREGS+184(a0)
-	EX      sdc1 $f25, SC32_FPREGS+200(a0)
-	EX      sdc1 $f27, SC32_FPREGS+216(a0)
-	EX      sdc1 $f29, SC32_FPREGS+232(a0)
-	EX      sdc1 $f31, SC32_FPREGS+248(a0)
-
-	/* Store the 16 even double precision registers */
-1:	EX	sdc1 $f0, SC32_FPREGS+0(a0)
-	EX	sdc1 $f2, SC32_FPREGS+16(a0)
-	EX	sdc1 $f4, SC32_FPREGS+32(a0)
-	EX	sdc1 $f6, SC32_FPREGS+48(a0)
-	EX	sdc1 $f8, SC32_FPREGS+64(a0)
-	EX	sdc1 $f10, SC32_FPREGS+80(a0)
-	EX	sdc1 $f12, SC32_FPREGS+96(a0)
-	EX	sdc1 $f14, SC32_FPREGS+112(a0)
-	EX	sdc1 $f16, SC32_FPREGS+128(a0)
-	EX	sdc1 $f18, SC32_FPREGS+144(a0)
-	EX	sdc1 $f20, SC32_FPREGS+160(a0)
-	EX	sdc1 $f22, SC32_FPREGS+176(a0)
-	EX	sdc1 $f24, SC32_FPREGS+192(a0)
-	EX	sdc1 $f26, SC32_FPREGS+208(a0)
-	EX	sdc1 $f28, SC32_FPREGS+224(a0)
-	EX	sdc1 $f30, SC32_FPREGS+240(a0)
-	EX	sw t1, SC32_FPC_CSR(a0)
-	cfc1	t0, $0				# implementation/version
-	EX	sw t0, SC32_FPC_EIR(a0)
-	.set pop
-
-	jr	ra
-	 li	v0, 0					# success
-	END(_save_fp_context32)
-#endif
-
-/*
- * Restore FPU state:
- *  - fp gp registers
- *  - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
  */
 LEAF(_restore_fp_context)
-	EX	lw t1, SC_FPC_CSR(a0)
+	EX	lw t1, 0(a1)
 
 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
 		defined(CONFIG_CPU_MIPS32_R6)
@@ -178,101 +130,231 @@
 	bgez	t0, 1f			# skip loading odd if FR=0
 	 nop
 #endif
-	EX	ldc1 $f1, SC_FPREGS+8(a0)
-	EX	ldc1 $f3, SC_FPREGS+24(a0)
-	EX	ldc1 $f5, SC_FPREGS+40(a0)
-	EX	ldc1 $f7, SC_FPREGS+56(a0)
-	EX	ldc1 $f9, SC_FPREGS+72(a0)
-	EX	ldc1 $f11, SC_FPREGS+88(a0)
-	EX	ldc1 $f13, SC_FPREGS+104(a0)
-	EX	ldc1 $f15, SC_FPREGS+120(a0)
-	EX	ldc1 $f17, SC_FPREGS+136(a0)
-	EX	ldc1 $f19, SC_FPREGS+152(a0)
-	EX	ldc1 $f21, SC_FPREGS+168(a0)
-	EX	ldc1 $f23, SC_FPREGS+184(a0)
-	EX	ldc1 $f25, SC_FPREGS+200(a0)
-	EX	ldc1 $f27, SC_FPREGS+216(a0)
-	EX	ldc1 $f29, SC_FPREGS+232(a0)
-	EX	ldc1 $f31, SC_FPREGS+248(a0)
+	EX	ldc1 $f1, 8(a0)
+	EX	ldc1 $f3, 24(a0)
+	EX	ldc1 $f5, 40(a0)
+	EX	ldc1 $f7, 56(a0)
+	EX	ldc1 $f9, 72(a0)
+	EX	ldc1 $f11, 88(a0)
+	EX	ldc1 $f13, 104(a0)
+	EX	ldc1 $f15, 120(a0)
+	EX	ldc1 $f17, 136(a0)
+	EX	ldc1 $f19, 152(a0)
+	EX	ldc1 $f21, 168(a0)
+	EX	ldc1 $f23, 184(a0)
+	EX	ldc1 $f25, 200(a0)
+	EX	ldc1 $f27, 216(a0)
+	EX	ldc1 $f29, 232(a0)
+	EX	ldc1 $f31, 248(a0)
 1:	.set pop
 #endif
 	.set push
 	SET_HARDFLOAT
-	EX	ldc1 $f0, SC_FPREGS+0(a0)
-	EX	ldc1 $f2, SC_FPREGS+16(a0)
-	EX	ldc1 $f4, SC_FPREGS+32(a0)
-	EX	ldc1 $f6, SC_FPREGS+48(a0)
-	EX	ldc1 $f8, SC_FPREGS+64(a0)
-	EX	ldc1 $f10, SC_FPREGS+80(a0)
-	EX	ldc1 $f12, SC_FPREGS+96(a0)
-	EX	ldc1 $f14, SC_FPREGS+112(a0)
-	EX	ldc1 $f16, SC_FPREGS+128(a0)
-	EX	ldc1 $f18, SC_FPREGS+144(a0)
-	EX	ldc1 $f20, SC_FPREGS+160(a0)
-	EX	ldc1 $f22, SC_FPREGS+176(a0)
-	EX	ldc1 $f24, SC_FPREGS+192(a0)
-	EX	ldc1 $f26, SC_FPREGS+208(a0)
-	EX	ldc1 $f28, SC_FPREGS+224(a0)
-	EX	ldc1 $f30, SC_FPREGS+240(a0)
+	EX	ldc1 $f0, 0(a0)
+	EX	ldc1 $f2, 16(a0)
+	EX	ldc1 $f4, 32(a0)
+	EX	ldc1 $f6, 48(a0)
+	EX	ldc1 $f8, 64(a0)
+	EX	ldc1 $f10, 80(a0)
+	EX	ldc1 $f12, 96(a0)
+	EX	ldc1 $f14, 112(a0)
+	EX	ldc1 $f16, 128(a0)
+	EX	ldc1 $f18, 144(a0)
+	EX	ldc1 $f20, 160(a0)
+	EX	ldc1 $f22, 176(a0)
+	EX	ldc1 $f24, 192(a0)
+	EX	ldc1 $f26, 208(a0)
+	EX	ldc1 $f28, 224(a0)
+	EX	ldc1 $f30, 240(a0)
 	ctc1	t1, fcr31
 	.set pop
 	jr	ra
 	 li	v0, 0					# success
 	END(_restore_fp_context)
 
-#ifdef CONFIG_MIPS32_COMPAT
-LEAF(_restore_fp_context32)
-	/* Restore an o32 sigcontext.  */
-	.set push
-	SET_HARDFLOAT
-	EX	lw t1, SC32_FPC_CSR(a0)
+#ifdef CONFIG_CPU_HAS_MSA
 
-#ifndef CONFIG_CPU_MIPS64_R6
-	mfc0	t0, CP0_STATUS
-	sll	t0, t0, 5
-	bgez	t0, 1f			# skip loading odd if FR=0
-	 nop
-#endif
-
-	EX      ldc1 $f1, SC32_FPREGS+8(a0)
-	EX      ldc1 $f3, SC32_FPREGS+24(a0)
-	EX      ldc1 $f5, SC32_FPREGS+40(a0)
-	EX      ldc1 $f7, SC32_FPREGS+56(a0)
-	EX      ldc1 $f9, SC32_FPREGS+72(a0)
-	EX      ldc1 $f11, SC32_FPREGS+88(a0)
-	EX      ldc1 $f13, SC32_FPREGS+104(a0)
-	EX      ldc1 $f15, SC32_FPREGS+120(a0)
-	EX      ldc1 $f17, SC32_FPREGS+136(a0)
-	EX      ldc1 $f19, SC32_FPREGS+152(a0)
-	EX      ldc1 $f21, SC32_FPREGS+168(a0)
-	EX      ldc1 $f23, SC32_FPREGS+184(a0)
-	EX      ldc1 $f25, SC32_FPREGS+200(a0)
-	EX      ldc1 $f27, SC32_FPREGS+216(a0)
-	EX      ldc1 $f29, SC32_FPREGS+232(a0)
-	EX      ldc1 $f31, SC32_FPREGS+248(a0)
-
-1:	EX	ldc1 $f0, SC32_FPREGS+0(a0)
-	EX	ldc1 $f2, SC32_FPREGS+16(a0)
-	EX	ldc1 $f4, SC32_FPREGS+32(a0)
-	EX	ldc1 $f6, SC32_FPREGS+48(a0)
-	EX	ldc1 $f8, SC32_FPREGS+64(a0)
-	EX	ldc1 $f10, SC32_FPREGS+80(a0)
-	EX	ldc1 $f12, SC32_FPREGS+96(a0)
-	EX	ldc1 $f14, SC32_FPREGS+112(a0)
-	EX	ldc1 $f16, SC32_FPREGS+128(a0)
-	EX	ldc1 $f18, SC32_FPREGS+144(a0)
-	EX	ldc1 $f20, SC32_FPREGS+160(a0)
-	EX	ldc1 $f22, SC32_FPREGS+176(a0)
-	EX	ldc1 $f24, SC32_FPREGS+192(a0)
-	EX	ldc1 $f26, SC32_FPREGS+208(a0)
-	EX	ldc1 $f28, SC32_FPREGS+224(a0)
-	EX	ldc1 $f30, SC32_FPREGS+240(a0)
-	ctc1	t1, fcr31
+	.macro	op_one_wr	op, idx, base
+	.align	4
+\idx:	\op	\idx, 0, \base
 	jr	ra
-	 li	v0, 0					# success
-	.set pop
-	END(_restore_fp_context32)
+	 nop
+	.endm
+
+	.macro	op_msa_wr	name, op
+LEAF(\name)
+	.set		push
+	.set		noreorder
+	sll		t0, a0, 4
+	PTR_LA		t1, 0f
+	PTR_ADDU	t0, t0, t1
+	jr		t0
+	  nop
+	op_one_wr	\op, 0, a1
+	op_one_wr	\op, 1, a1
+	op_one_wr	\op, 2, a1
+	op_one_wr	\op, 3, a1
+	op_one_wr	\op, 4, a1
+	op_one_wr	\op, 5, a1
+	op_one_wr	\op, 6, a1
+	op_one_wr	\op, 7, a1
+	op_one_wr	\op, 8, a1
+	op_one_wr	\op, 9, a1
+	op_one_wr	\op, 10, a1
+	op_one_wr	\op, 11, a1
+	op_one_wr	\op, 12, a1
+	op_one_wr	\op, 13, a1
+	op_one_wr	\op, 14, a1
+	op_one_wr	\op, 15, a1
+	op_one_wr	\op, 16, a1
+	op_one_wr	\op, 17, a1
+	op_one_wr	\op, 18, a1
+	op_one_wr	\op, 19, a1
+	op_one_wr	\op, 20, a1
+	op_one_wr	\op, 21, a1
+	op_one_wr	\op, 22, a1
+	op_one_wr	\op, 23, a1
+	op_one_wr	\op, 24, a1
+	op_one_wr	\op, 25, a1
+	op_one_wr	\op, 26, a1
+	op_one_wr	\op, 27, a1
+	op_one_wr	\op, 28, a1
+	op_one_wr	\op, 29, a1
+	op_one_wr	\op, 30, a1
+	op_one_wr	\op, 31, a1
+	.set		pop
+	END(\name)
+	.endm
+
+	op_msa_wr	read_msa_wr_b, st_b
+	op_msa_wr	read_msa_wr_h, st_h
+	op_msa_wr	read_msa_wr_w, st_w
+	op_msa_wr	read_msa_wr_d, st_d
+
+	op_msa_wr	write_msa_wr_b, ld_b
+	op_msa_wr	write_msa_wr_h, ld_h
+	op_msa_wr	write_msa_wr_w, ld_w
+	op_msa_wr	write_msa_wr_d, ld_d
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+	.macro	save_msa_upper	wr, off, base
+	.set	push
+	.set	noat
+#ifdef CONFIG_64BIT
+	copy_u_d \wr, 1
+	EX sd	$1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+	copy_u_w \wr, 2
+	EX sw	$1, \off(\base)
+	copy_u_w \wr, 3
+	EX sw	$1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+	copy_u_w \wr, 2
+	EX sw	$1, (\off+4)(\base)
+	copy_u_w \wr, 3
+	EX sw	$1, \off(\base)
 #endif
+	.set	pop
+	.endm
+
+LEAF(_save_msa_all_upper)
+	save_msa_upper	0, 0x00, a0
+	save_msa_upper	1, 0x08, a0
+	save_msa_upper	2, 0x10, a0
+	save_msa_upper	3, 0x18, a0
+	save_msa_upper	4, 0x20, a0
+	save_msa_upper	5, 0x28, a0
+	save_msa_upper	6, 0x30, a0
+	save_msa_upper	7, 0x38, a0
+	save_msa_upper	8, 0x40, a0
+	save_msa_upper	9, 0x48, a0
+	save_msa_upper	10, 0x50, a0
+	save_msa_upper	11, 0x58, a0
+	save_msa_upper	12, 0x60, a0
+	save_msa_upper	13, 0x68, a0
+	save_msa_upper	14, 0x70, a0
+	save_msa_upper	15, 0x78, a0
+	save_msa_upper	16, 0x80, a0
+	save_msa_upper	17, 0x88, a0
+	save_msa_upper	18, 0x90, a0
+	save_msa_upper	19, 0x98, a0
+	save_msa_upper	20, 0xa0, a0
+	save_msa_upper	21, 0xa8, a0
+	save_msa_upper	22, 0xb0, a0
+	save_msa_upper	23, 0xb8, a0
+	save_msa_upper	24, 0xc0, a0
+	save_msa_upper	25, 0xc8, a0
+	save_msa_upper	26, 0xd0, a0
+	save_msa_upper	27, 0xd8, a0
+	save_msa_upper	28, 0xe0, a0
+	save_msa_upper	29, 0xe8, a0
+	save_msa_upper	30, 0xf0, a0
+	save_msa_upper	31, 0xf8, a0
+	jr	ra
+	 li	v0, 0
+	END(_save_msa_all_upper)
+
+	.macro	restore_msa_upper	wr, off, base
+	.set	push
+	.set	noat
+#ifdef CONFIG_64BIT
+	EX ld	$1, \off(\base)
+	insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+	EX lw	$1, \off(\base)
+	insert_w \wr, 2
+	EX lw	$1, (\off+4)(\base)
+	insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+	EX lw	$1, (\off+4)(\base)
+	insert_w \wr, 2
+	EX lw	$1, \off(\base)
+	insert_w \wr, 3
+#endif
+	.set	pop
+	.endm
+
+LEAF(_restore_msa_all_upper)
+	restore_msa_upper	0, 0x00, a0
+	restore_msa_upper	1, 0x08, a0
+	restore_msa_upper	2, 0x10, a0
+	restore_msa_upper	3, 0x18, a0
+	restore_msa_upper	4, 0x20, a0
+	restore_msa_upper	5, 0x28, a0
+	restore_msa_upper	6, 0x30, a0
+	restore_msa_upper	7, 0x38, a0
+	restore_msa_upper	8, 0x40, a0
+	restore_msa_upper	9, 0x48, a0
+	restore_msa_upper	10, 0x50, a0
+	restore_msa_upper	11, 0x58, a0
+	restore_msa_upper	12, 0x60, a0
+	restore_msa_upper	13, 0x68, a0
+	restore_msa_upper	14, 0x70, a0
+	restore_msa_upper	15, 0x78, a0
+	restore_msa_upper	16, 0x80, a0
+	restore_msa_upper	17, 0x88, a0
+	restore_msa_upper	18, 0x90, a0
+	restore_msa_upper	19, 0x98, a0
+	restore_msa_upper	20, 0xa0, a0
+	restore_msa_upper	21, 0xa8, a0
+	restore_msa_upper	22, 0xb0, a0
+	restore_msa_upper	23, 0xb8, a0
+	restore_msa_upper	24, 0xc0, a0
+	restore_msa_upper	25, 0xc8, a0
+	restore_msa_upper	26, 0xd0, a0
+	restore_msa_upper	27, 0xd8, a0
+	restore_msa_upper	28, 0xe0, a0
+	restore_msa_upper	29, 0xe8, a0
+	restore_msa_upper	30, 0xf0, a0
+	restore_msa_upper	31, 0xf8, a0
+	jr	ra
+	 li	v0, 0
+	END(_restore_msa_all_upper)
+
+#endif /* CONFIG_CPU_HAS_MSA */
 
 	.set	reorder
 
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 04cbbde3..92cd051 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -34,7 +34,7 @@
 #ifndef USE_ALTERNATE_RESUME_IMPL
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *		       struct thread_info *next_ti, s32 fp_save)
+ *		       struct thread_info *next_ti)
  */
 	.align	5
 	LEAF(resume)
@@ -43,45 +43,6 @@
 	cpu_save_nonscratch a0
 	LONG_S	ra, THREAD_REG31(a0)
 
-	/*
-	 * Check whether we need to save any FP context. FP context is saved
-	 * iff the process has used the context with the scalar FPU or the MSA
-	 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
-	 * _TIF_USEDMSA respectively. switch_to will have set fp_save
-	 * accordingly to an FP_SAVE_ enum value.
-	 */
-	beqz	a3, 2f
-
-	/*
-	 * We do. Clear the saved CU1 bit for prev, such that next time it is
-	 * scheduled it will start in userland with the FPU disabled. If the
-	 * task uses the FPU then it will be enabled again via the do_cpu trap.
-	 * This allows us to lazily restore the FP context.
-	 */
-	PTR_L	t3, TASK_THREAD_INFO(a0)
-	LONG_L	t0, ST_OFF(t3)
-	li	t1, ~ST0_CU1
-	and	t0, t0, t1
-	LONG_S	t0, ST_OFF(t3)
-
-	/* Check whether we're saving scalar or vector context. */
-	bgtz	a3, 1f
-
-	/* Save 128b MSA vector context + scalar FP control & status. */
-	.set push
-	SET_HARDFLOAT
-	cfc1	t1, fcr31
-	msa_save_all	a0
-	.set pop	/* SET_HARDFLOAT */
-
-	sw	t1, THREAD_FCR31(a0)
-	b	2f
-
-1:	/* Save 32b/64b scalar FP context. */
-	fpu_save_double a0 t0 t1		# c0_status passed in t0
-						# clobbers t1
-2:
-
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 	PTR_LA	t8, __stack_chk_guard
 	LONG_L	t9, TASK_STACK_CANARY(a1)
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 0b85f82..f50d484 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -31,4 +31,13 @@
 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
 
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *csr);
+
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+
 #endif	/* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6a28c79..2fec67b 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -21,6 +21,7 @@
 #include <linux/wait.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
+#include <linux/uprobes.h>
 #include <linux/compiler.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
@@ -38,20 +39,21 @@
 #include <asm/vdso.h>
 #include <asm/dsp.h>
 #include <asm/inst.h>
+#include <asm/msa.h>
 
 #include "signal-common.h"
 
-static int (*save_fp_context)(struct sigcontext __user *sc);
-static int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
+static int (*save_fp_context)(void __user *sc);
+static int (*restore_fp_context)(void __user *sc);
 
 struct sigframe {
 	u32 sf_ass[4];		/* argument save space for o32 */
 	u32 sf_pad[2];		/* Was: signal trampoline */
+
+	/* Matches struct ucontext from its uc_mcontext field onwards */
 	struct sigcontext sf_sc;
 	sigset_t sf_mask;
+	unsigned long long sf_extcontext[0];
 };
 
 struct rt_sigframe {
@@ -65,43 +67,255 @@
  * Thread saved context copy to/from a signal context presumed to be on the
  * user stack, and therefore accessed with appropriate macros from uaccess.h.
  */
-static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_to_sigcontext(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
+	for (i = 0; i < NUM_FPU_REGS; i += inc) {
 		err |=
 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
-			       &sc->sc_fpregs[i]);
+			       &fpregs[i]);
 	}
-	err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+	err |= __put_user(current->thread.fpu.fcr31, csr);
 
 	return err;
 }
 
-static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_from_sigcontext(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 	u64 fpr_val;
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
-		err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+	for (i = 0; i < NUM_FPU_REGS; i += inc) {
+		err |= __get_user(fpr_val, &fpregs[i]);
 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
 	}
-	err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+	err |= __get_user(current->thread.fpu.fcr31, csr);
 
 	return err;
 }
 
 /*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fp_context(void __user *sc)
+{
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+	return _save_fp_context(fpregs, csr);
+}
+
+static int restore_hw_fp_context(void __user *sc)
+{
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+	return _restore_fp_context(fpregs, csr);
+}
+
+/*
+ * Extended context handling.
+ */
+
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+	struct ucontext __user *uc;
+
+	/*
+	 * We can just pretend the sigcontext is always embedded in a struct
+	 * ucontext here, because the offset from sigcontext to extended
+	 * context is the same in the struct sigframe case.
+	 */
+	uc = container_of(sc, struct ucontext, uc_mcontext);
+	return &uc->uc_extcontext;
+}
+
+static int save_msa_extcontext(void __user *buf)
+{
+	struct msa_extcontext __user *msa = buf;
+	uint64_t val;
+	int i, err;
+
+	if (!thread_msa_context_live())
+		return 0;
+
+	/*
+	 * Ensure that we can't lose the live MSA context between checking
+	 * for it & writing it to memory.
+	 */
+	preempt_disable();
+
+	if (is_msa_enabled()) {
+		/*
+		 * There are no EVA versions of the vector register load/store
+		 * instructions, so MSA context has to be saved to kernel memory
+		 * and then copied to user memory. The save to kernel memory
+		 * should already have been done when handling scalar FP
+		 * context.
+		 */
+		BUG_ON(config_enabled(CONFIG_EVA));
+
+		err = __put_user(read_msa_csr(), &msa->csr);
+		err |= _save_msa_all_upper(&msa->wr);
+
+		preempt_enable();
+	} else {
+		preempt_enable();
+
+		err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+		for (i = 0; i < NUM_FPU_REGS; i++) {
+			val = get_fpr64(&current->thread.fpu.fpr[i], 1);
+			err |= __put_user(val, &msa->wr[i]);
+		}
+	}
+
+	err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+	err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+	return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+	struct msa_extcontext __user *msa = buf;
+	unsigned long long val;
+	unsigned int csr;
+	int i, err;
+
+	if (size != sizeof(*msa))
+		return -EINVAL;
+
+	err = get_user(csr, &msa->csr);
+	if (err)
+		return err;
+
+	preempt_disable();
+
+	if (is_msa_enabled()) {
+		/*
+		 * There are no EVA versions of the vector register load/store
+		 * instructions, so MSA context has to be copied to kernel
+		 * memory and later loaded to registers. The same is true of
+		 * scalar FP context, so FPU & MSA should have already been
+		 * disabled whilst handling scalar FP context.
+		 */
+		BUG_ON(config_enabled(CONFIG_EVA));
+
+		write_msa_csr(csr);
+		err |= _restore_msa_all_upper(&msa->wr);
+		preempt_enable();
+	} else {
+		preempt_enable();
+
+		current->thread.fpu.msacsr = csr;
+
+		for (i = 0; i < NUM_FPU_REGS; i++) {
+			err |= __get_user(val, &msa->wr[i]);
+			set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+		}
+	}
+
+	return err;
+}
+
+static int save_extcontext(void __user *buf)
+{
+	int sz;
+
+	sz = save_msa_extcontext(buf);
+	if (sz < 0)
+		return sz;
+	buf += sz;
+
+	/* If no context was saved then trivially return */
+	if (!sz)
+		return 0;
+
+	/* Write the end marker */
+	if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+		return -EFAULT;
+
+	sz += sizeof(((struct extcontext *)NULL)->magic);
+	return sz;
+}
+
+static int restore_extcontext(void __user *buf)
+{
+	struct extcontext ext;
+	int err;
+
+	while (1) {
+		err = __get_user(ext.magic, (unsigned int *)buf);
+		if (err)
+			return err;
+
+		if (ext.magic == END_EXTCONTEXT_MAGIC)
+			return 0;
+
+		err = __get_user(ext.size, (unsigned int *)(buf
+			+ offsetof(struct extcontext, size)));
+		if (err)
+			return err;
+
+		switch (ext.magic) {
+		case MSA_EXTCONTEXT_MAGIC:
+			err = restore_msa_extcontext(buf, ext.size);
+			break;
+
+		default:
+			err = -EINVAL;
+			break;
+		}
+
+		if (err)
+			return err;
+
+		buf += ext.size;
+	}
+}
+
+/*
  * Helper routines
  */
-static int protected_save_fp_context(struct sigcontext __user *sc)
+int protected_save_fp_context(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+	uint32_t __user *used_math = sc + abi->off_sc_used_math;
+	unsigned int used, ext_sz;
 	int err;
-#ifndef CONFIG_EVA
+
+	used = used_math() ? USED_FP : 0;
+	if (!used)
+		goto fp_done;
+
+	if (!test_thread_flag(TIF_32BIT_FPREGS))
+		used |= USED_FR1;
+	if (test_thread_flag(TIF_HYBRID_FPREGS))
+		used |= USED_HYBRID_FPRS;
+
+	/*
+	 * EVA does not have userland equivalents of ldc1 or sdc1, so
+	 * save to the kernel FP context & copy that to userland below.
+	 */
+	if (config_enabled(CONFIG_EVA))
+		lose_fpu(1);
+
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
@@ -114,27 +328,57 @@
 		if (likely(!err))
 			break;
 		/* touch the sigcontext and try again */
-		err = __put_user(0, &sc->sc_fpregs[0]) |
-			__put_user(0, &sc->sc_fpregs[31]) |
-			__put_user(0, &sc->sc_fpc_csr);
+		err = __put_user(0, &fpregs[0]) |
+			__put_user(0, &fpregs[31]) |
+			__put_user(0, csr);
 		if (err)
-			break;	/* really bad sigcontext */
+			return err;	/* really bad sigcontext */
 	}
-#else
-	/*
-	 * EVA does not have FPU EVA instructions so saving fpu context directly
-	 * does not work.
-	 */
-	lose_fpu(1);
-	err = save_fp_context(sc); /* this might fail */
-#endif
-	return err;
+
+fp_done:
+	ext_sz = err = save_extcontext(sc_to_extcontext(sc));
+	if (err < 0)
+		return err;
+	used |= ext_sz ? USED_EXTCONTEXT : 0;
+
+	return __put_user(used, used_math);
 }
 
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+int protected_restore_fp_context(void __user *sc)
 {
-	int err, tmp __maybe_unused;
-#ifndef CONFIG_EVA
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+	uint32_t __user *used_math = sc + abi->off_sc_used_math;
+	unsigned int used;
+	int err, sig = 0, tmp __maybe_unused;
+
+	err = __get_user(used, used_math);
+	conditional_used_math(used & USED_FP);
+
+	/*
+	 * The signal handler may have used FPU; give it up if the program
+	 * doesn't want it following sigreturn.
+	 */
+	if (err || !(used & USED_FP))
+		lose_fpu(0);
+	if (err)
+		return err;
+	if (!(used & USED_FP))
+		goto fp_done;
+
+	err = sig = fpcsr_pending(csr);
+	if (err < 0)
+		return err;
+
+	/*
+	 * EVA does not have userland equivalents of ldc1 or sdc1, so we
+	 * disable the FPU here such that the code below simply copies to
+	 * the kernel FP context.
+	 */
+	if (config_enabled(CONFIG_EVA))
+		lose_fpu(0);
+
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
@@ -147,28 +391,24 @@
 		if (likely(!err))
 			break;
 		/* touch the sigcontext and try again */
-		err = __get_user(tmp, &sc->sc_fpregs[0]) |
-			__get_user(tmp, &sc->sc_fpregs[31]) |
-			__get_user(tmp, &sc->sc_fpc_csr);
+		err = __get_user(tmp, &fpregs[0]) |
+			__get_user(tmp, &fpregs[31]) |
+			__get_user(tmp, csr);
 		if (err)
 			break;	/* really bad sigcontext */
 	}
-#else
-	/*
-	 * EVA does not have FPU EVA instructions so restoring fpu context
-	 * directly does not work.
-	 */
-	lose_fpu(0);
-	err = restore_fp_context(sc); /* this might fail */
-#endif
-	return err;
+
+fp_done:
+	if (used & USED_EXTCONTEXT)
+		err |= restore_extcontext(sc_to_extcontext(sc));
+
+	return err ?: sig;
 }
 
 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
 	int err = 0;
 	int i;
-	unsigned int used_math;
 
 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
 
@@ -191,19 +431,38 @@
 		err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
 	}
 
-	used_math = !!used_math();
-	err |= __put_user(used_math, &sc->sc_used_math);
 
-	if (used_math) {
-		/*
-		 * Save FPU state to signal context. Signal handler
-		 * will "inherit" current FPU state.
-		 */
-		err |= protected_save_fp_context(sc);
-	}
+	/*
+	 * Save FPU state to signal context. Signal handler
+	 * will "inherit" current FPU state.
+	 */
+	err |= protected_save_fp_context(sc);
+
 	return err;
 }
 
+static size_t extcontext_max_size(void)
+{
+	size_t sz = 0;
+
+	/*
+	 * The assumption here is that between this point & the point at which
+	 * the extended context is saved the size of the context should only
+	 * ever be able to shrink (if the task is preempted), but never grow.
+	 * That is, what this function returns is an upper bound on the size of
+	 * the extended context for the current task at the current time.
+	 */
+
+	if (thread_msa_context_live())
+		sz += sizeof(struct msa_extcontext);
+
+	/* If any context is saved then we'll append the end marker */
+	if (sz)
+		sz += sizeof(((struct extcontext *)NULL)->magic);
+
+	return sz;
+}
+
 int fpcsr_pending(unsigned int __user *fpcsr)
 {
 	int err, sig = 0;
@@ -223,21 +482,8 @@
 	return err ?: sig;
 }
 
-static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
-{
-	int err, sig;
-
-	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
-	if (err > 0)
-		err = 0;
-	err |= protected_restore_fp_context(sc);
-	return err ?: sig;
-}
-
 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
-	unsigned int used_math;
 	unsigned long treg;
 	int err = 0;
 	int i;
@@ -265,19 +511,7 @@
 	for (i = 1; i < 32; i++)
 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
 
-	err |= __get_user(used_math, &sc->sc_used_math);
-	conditional_used_math(used_math);
-
-	if (used_math) {
-		/* restore fpu context if we have used it before */
-		if (!err)
-			err = check_and_restore_fp_context(sc);
-	} else {
-		/* signal handler may have used FPU.  Give it up. */
-		lose_fpu(0);
-	}
-
-	return err;
+	return err ?: protected_restore_fp_context(sc);
 }
 
 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
@@ -285,6 +519,9 @@
 {
 	unsigned long sp;
 
+	/* Leave space for potential extended context */
+	frame_size += extcontext_max_size();
+
 	/* Default to using normal stack */
 	sp = regs->regs[29];
 
@@ -520,7 +757,11 @@
 	.setup_rt_frame = setup_rt_frame,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, rt_signal_trampoline),
-	.restart	= __NR_restart_syscall
+	.restart	= __NR_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
 };
 
 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
@@ -616,6 +857,9 @@
 
 	user_exit();
 
+	if (thread_info_flags & _TIF_UPROBE)
+		uprobe_notify_resume(regs);
+
 	/* deal with pending signal delivery */
 	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs);
@@ -629,43 +873,46 @@
 }
 
 #ifdef CONFIG_SMP
-#ifndef CONFIG_EVA
-static int smp_save_fp_context(struct sigcontext __user *sc)
+static int smp_save_fp_context(void __user *sc)
 {
 	return raw_cpu_has_fpu
-	       ? _save_fp_context(sc)
+	       ? save_hw_fp_context(sc)
 	       : copy_fp_to_sigcontext(sc);
 }
 
-static int smp_restore_fp_context(struct sigcontext __user *sc)
+static int smp_restore_fp_context(void __user *sc)
 {
 	return raw_cpu_has_fpu
-	       ? _restore_fp_context(sc)
+	       ? restore_hw_fp_context(sc)
 	       : copy_fp_from_sigcontext(sc);
 }
-#endif /* CONFIG_EVA */
 #endif
 
 static int signal_setup(void)
 {
-#ifndef CONFIG_EVA
+	/*
+	 * The offset from sigcontext to extended context should be the same
+	 * regardless of the type of signal, such that userland can always know
+	 * where to look if it wishes to find the extended context structures.
+	 */
+	BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+		      offsetof(struct sigframe, sf_sc)) !=
+		     (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+		      offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
 #ifdef CONFIG_SMP
 	/* For now just do the cpu_has_fpu check when the functions are invoked */
 	save_fp_context = smp_save_fp_context;
 	restore_fp_context = smp_restore_fp_context;
 #else
 	if (cpu_has_fpu) {
-		save_fp_context = _save_fp_context;
-		restore_fp_context = _restore_fp_context;
+		save_fp_context = save_hw_fp_context;
+		restore_fp_context = restore_hw_fp_context;
 	} else {
 		save_fp_context = copy_fp_to_sigcontext;
 		restore_fp_context = copy_fp_from_sigcontext;
 	}
 #endif /* CONFIG_SMP */
-#else
-	save_fp_context = copy_fp_to_sigcontext;
-	restore_fp_context = copy_fp_from_sigcontext;
-#endif
 
 	return 0;
 }
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 5d7f263..f7e89524 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -36,12 +36,6 @@
 
 #include "signal-common.h"
 
-static int (*save_fp_context32)(struct sigcontext32 __user *sc);
-static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
-extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
  */
@@ -74,99 +68,11 @@
 	struct ucontext32 rs_uc;
 };
 
-/*
- * Thread saved context copy to/from a signal context presumed to be on the
- * user stack, and therefore accessed with appropriate macros from uaccess.h.
- */
-static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-
-	for (i = 0; i < NUM_FPU_REGS; i += inc) {
-		err |=
-		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
-			       &sc->sc_fpregs[i]);
-	}
-	err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-	return err;
-}
-
-static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-	u64 fpr_val;
-
-	for (i = 0; i < NUM_FPU_REGS; i += inc) {
-		err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
-		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
-	}
-	err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-	return err;
-}
-
-/*
- * sigcontext handlers
- */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err;
-	while (1) {
-		lock_fpu_owner();
-		if (is_fpu_owner()) {
-			err = save_fp_context32(sc);
-			unlock_fpu_owner();
-		} else {
-			unlock_fpu_owner();
-			err = copy_fp_to_sigcontext32(sc);
-		}
-		if (likely(!err))
-			break;
-		/* touch the sigcontext and try again */
-		err = __put_user(0, &sc->sc_fpregs[0]) |
-			__put_user(0, &sc->sc_fpregs[31]) |
-			__put_user(0, &sc->sc_fpc_csr);
-		if (err)
-			break;	/* really bad sigcontext */
-	}
-	return err;
-}
-
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err, tmp __maybe_unused;
-	while (1) {
-		lock_fpu_owner();
-		if (is_fpu_owner()) {
-			err = restore_fp_context32(sc);
-			unlock_fpu_owner();
-		} else {
-			unlock_fpu_owner();
-			err = copy_fp_from_sigcontext32(sc);
-		}
-		if (likely(!err))
-			break;
-		/* touch the sigcontext and try again */
-		err = __get_user(tmp, &sc->sc_fpregs[0]) |
-			__get_user(tmp, &sc->sc_fpregs[31]) |
-			__get_user(tmp, &sc->sc_fpc_csr);
-		if (err)
-			break;	/* really bad sigcontext */
-	}
-	return err;
-}
-
 static int setup_sigcontext32(struct pt_regs *regs,
 			      struct sigcontext32 __user *sc)
 {
 	int err = 0;
 	int i;
-	u32 used_math;
 
 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
 
@@ -186,35 +92,18 @@
 		err |= __put_user(mflo3(), &sc->sc_lo3);
 	}
 
-	used_math = !!used_math();
-	err |= __put_user(used_math, &sc->sc_used_math);
+	/*
+	 * Save FPU state to signal context.  Signal handler
+	 * will "inherit" current FPU state.
+	 */
+	err |= protected_save_fp_context(sc);
 
-	if (used_math) {
-		/*
-		 * Save FPU state to signal context.  Signal handler
-		 * will "inherit" current FPU state.
-		 */
-		err |= protected_save_fp_context32(sc);
-	}
 	return err;
 }
 
-static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err, sig;
-
-	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
-	if (err > 0)
-		err = 0;
-	err |= protected_restore_fp_context32(sc);
-	return err ?: sig;
-}
-
 static int restore_sigcontext32(struct pt_regs *regs,
 				struct sigcontext32 __user *sc)
 {
-	u32 used_math;
 	int err = 0;
 	s32 treg;
 	int i;
@@ -238,70 +127,7 @@
 	for (i = 1; i < 32; i++)
 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
 
-	err |= __get_user(used_math, &sc->sc_used_math);
-	conditional_used_math(used_math);
-
-	if (used_math) {
-		/* restore fpu context if we have used it before */
-		if (!err)
-			err = check_and_restore_fp_context32(sc);
-	} else {
-		/* signal handler may have used FPU.  Give it up. */
-		lose_fpu(0);
-	}
-
-	return err;
-}
-
-/*
- *
- */
-extern void __put_sigset_unknown_nsig(void);
-extern void __get_sigset_unknown_nsig(void);
-
-static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
-{
-	int err = 0;
-
-	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
-		return -EFAULT;
-
-	switch (_NSIG_WORDS) {
-	default:
-		__put_sigset_unknown_nsig();
-	case 2:
-		err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
-		err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
-	case 1:
-		err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
-		err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
-	}
-
-	return err;
-}
-
-static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
-{
-	int err = 0;
-	unsigned long sig[4];
-
-	if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
-		return -EFAULT;
-
-	switch (_NSIG_WORDS) {
-	default:
-		__get_sigset_unknown_nsig();
-	case 2:
-		err |= __get_user(sig[3], &ubuf->sig[3]);
-		err |= __get_user(sig[2], &ubuf->sig[2]);
-		kbuf->sig[1] = sig[2] | (sig[3] << 32);
-	case 1:
-		err |= __get_user(sig[1], &ubuf->sig[1]);
-		err |= __get_user(sig[0], &ubuf->sig[0]);
-		kbuf->sig[0] = sig[0] | (sig[1] << 32);
-	}
-
-	return err;
+	return err ?: protected_restore_fp_context(sc);
 }
 
 /*
@@ -585,20 +411,9 @@
 	.setup_rt_frame = setup_rt_frame_32,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, o32_rt_signal_trampoline),
-	.restart	= __NR_O32_restart_syscall
+	.restart	= __NR_O32_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
 };
-
-static int signal32_init(void)
-{
-	if (cpu_has_fpu) {
-		save_fp_context32 = _save_fp_context32;
-		restore_fp_context32 = _restore_fp_context32;
-	} else {
-		save_fp_context32 = copy_fp_to_sigcontext32;
-		restore_fp_context32 = copy_fp_from_sigcontext32;
-	}
-
-	return 0;
-}
-
-arch_initcall(signal32_init);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index f1d4751..0d017fd 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -153,5 +153,9 @@
 	.setup_rt_frame = setup_rt_frame_n32,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, n32_rt_signal_trampoline),
-	.restart	= __NR_N32_restart_syscall
+	.restart	= __NR_N32_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
 };
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index d1168d7..8489c88 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -209,6 +209,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		config0 = read_c0_config();
 		/* FIXME: addresses are Malta specific */
 		if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index 5b539f5..5f05539 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -21,24 +21,12 @@
 
 static void sysrq_tlbdump_single(void *dummy)
 {
-	const int field = 2 * sizeof(unsigned long);
 	unsigned long flags;
 
 	spin_lock_irqsave(&show_lock, flags);
 
 	pr_info("CPU%d:\n", smp_processor_id());
-	pr_info("Index	: %0x\n", read_c0_index());
-	pr_info("Pagemask: %0x\n", read_c0_pagemask());
-	pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
-	pr_info("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
-	pr_info("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
-	pr_info("Wired   : %0x\n", read_c0_wired());
-	pr_info("Pagegrain: %0x\n", read_c0_pagegrain());
-	if (cpu_has_htw) {
-		pr_info("PWField : %0*lx\n", field, read_c0_pwfield());
-		pr_info("PWSize  : %0*lx\n", field, read_c0_pwsize());
-		pr_info("PWCtl   : %0x\n", read_c0_pwctl());
-	}
+	dump_tlb_regs();
 	pr_info("\n");
 	dump_tlb_all();
 	pr_info("\n");
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 8ea28e6..fdb392b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -370,11 +370,6 @@
 	set_fs(old_fs);
 }
 
-static int regs_to_trapnr(struct pt_regs *regs)
-{
-	return (regs->cp0_cause >> 2) & 0x1f;
-}
-
 static DEFINE_RAW_SPINLOCK(die_lock);
 
 void __noreturn die(const char *str, struct pt_regs *regs)
@@ -384,7 +379,7 @@
 
 	oops_enter();
 
-	if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 		       SIGSEGV) == NOTIFY_STOP)
 		sig = 0;
 
@@ -470,7 +465,7 @@
 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 	       data ? "Data" : "Instruction",
 	       field, regs->cp0_epc, field, regs->regs[31]);
-	if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 		       SIGBUS) == NOTIFY_STOP)
 		goto out;
 
@@ -826,7 +821,7 @@
 	int sig;
 
 	prev_state = exception_enter();
-	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 		       SIGFPE) == NOTIFY_STOP)
 		goto out;
 
@@ -882,11 +877,12 @@
 	char b[40];
 
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
-	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+			 SIGTRAP) == NOTIFY_STOP)
 		return;
 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
-	if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 		       SIGTRAP) == NOTIFY_STOP)
 		return;
 
@@ -948,6 +944,7 @@
 		set_fs(KERNEL_DS);
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (get_isa16_mode(regs->cp0_epc)) {
 		u16 instr[2];
 
@@ -987,15 +984,27 @@
 	 * pertain to them.
 	 */
 	switch (bcode) {
+	case BRK_UPROBE:
+		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+			goto out;
+		else
+			break;
+	case BRK_UPROBE_XOL:
+		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+			goto out;
+		else
+			break;
 	case BRK_KPROBE_BP:
 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
-			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 			goto out;
 		else
 			break;
 	case BRK_KPROBE_SSTEPBP:
 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
-			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 			goto out;
 		else
 			break;
@@ -1028,6 +1037,7 @@
 		set_fs(get_ds());
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (get_isa16_mode(regs->cp0_epc)) {
 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
@@ -1094,8 +1104,9 @@
 no_r2_instr:
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 
-	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 		       SIGILL) == NOTIFY_STOP)
 		goto out;
 
@@ -1444,8 +1455,9 @@
 	enum ctx_state prev_state;
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
-		       regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
 		goto out;
 
 	/* Clear MSACSR.Cause before enabling interrupts */
@@ -1523,7 +1535,6 @@
 
 asmlinkage void do_mcheck(struct pt_regs *regs)
 {
-	const int field = 2 * sizeof(unsigned long);
 	int multi_match = regs->cp0_status & ST0_TS;
 	enum ctx_state prev_state;
 	mm_segment_t old_fs = get_fs();
@@ -1532,19 +1543,8 @@
 	show_regs(regs);
 
 	if (multi_match) {
-		pr_err("Index	: %0x\n", read_c0_index());
-		pr_err("Pagemask: %0x\n", read_c0_pagemask());
-		pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
-		pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
-		pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
-		pr_err("Wired   : %0x\n", read_c0_wired());
-		pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
-		if (cpu_has_htw) {
-			pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
-			pr_err("PWSize  : %0*lx\n", field, read_c0_pwsize());
-			pr_err("PWCtl   : %0x\n", read_c0_pwctl());
-		}
-		pr_err("\n");
+		dump_tlb_regs();
+		pr_info("\n");
 		dump_tlb_all();
 	}
 
@@ -1651,6 +1651,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		{
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_L2P	0x00800000
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index eb3efd1..990354d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -891,6 +891,9 @@
 #ifdef	CONFIG_EVA
 	mm_segment_t seg;
 #endif
+	union fpureg *fpr;
+	enum msa_2b_fmt df;
+	unsigned int wd;
 	origpc = (unsigned long)pc;
 	orig31 = regs->regs[31];
 
@@ -1202,6 +1205,75 @@
 			break;
 		return;
 
+	case msa_op:
+		if (!cpu_has_msa)
+			goto sigill;
+
+		/*
+		 * If we've reached this point then userland should have taken
+		 * the MSA disabled exception & initialised vector context at
+		 * some point in the past.
+		 */
+		BUG_ON(!thread_msa_context_live());
+
+		df = insn.msa_mi10_format.df;
+		wd = insn.msa_mi10_format.wd;
+		fpr = &current->thread.fpu.fpr[wd];
+
+		switch (insn.msa_mi10_format.func) {
+		case msa_ld_op:
+			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
+				goto sigbus;
+
+			/*
+			 * Disable preemption to avoid a race between copying
+			 * state from userland, migrating to another CPU and
+			 * updating the hardware vector register below.
+			 */
+			preempt_disable();
+
+			res = __copy_from_user_inatomic(fpr, addr,
+							sizeof(*fpr));
+			if (res)
+				goto fault;
+
+			/*
+			 * Update the hardware register if it is in use by the
+			 * task in this quantum, in order to avoid having to
+			 * save & restore the whole vector context.
+			 */
+			if (test_thread_flag(TIF_USEDMSA))
+				write_msa_wr(wd, fpr, df);
+
+			preempt_enable();
+			break;
+
+		case msa_st_op:
+			if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
+				goto sigbus;
+
+			/*
+			 * Update from the hardware register if it is in use by
+			 * the task in this quantum, in order to avoid having to
+			 * save & restore the whole vector context.
+			 */
+			preempt_disable();
+			if (test_thread_flag(TIF_USEDMSA))
+				read_msa_wr(wd, fpr, df);
+			preempt_enable();
+
+			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
+			if (res)
+				goto fault;
+			break;
+
+		default:
+			goto sigbus;
+		}
+
+		compute_return_epc(regs);
+		break;
+
 #ifndef CONFIG_CPU_MIPSR6
 	/*
 	 * COP2 is available to implementor for application specific use.
@@ -2240,5 +2312,5 @@
 		return -ENOMEM;
 	return 0;
 }
-__initcall(debugfs_unaligned);
+arch_initcall(debugfs_unaligned);
 #endif
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
new file mode 100644
index 0000000..8452d93
--- /dev/null
+++ b/arch/mips/kernel/uprobes.c
@@ -0,0 +1,341 @@
+#include <linux/highmem.h>
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+
+#include <asm/branch.h>
+#include <asm/cpu-features.h>
+#include <asm/ptrace.h>
+#include <asm/inst.h>
+
+static inline int insn_has_delay_slot(const union mips_instruction insn)
+{
+	switch (insn.i_format.opcode) {
+	/*
+	 * jr and jalr are in r_format format.
+	 */
+	case spec_op:
+		switch (insn.r_format.func) {
+		case jalr_op:
+		case jr_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * This group contains:
+	 * bltz_op, bgez_op, bltzl_op, bgezl_op,
+	 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+	 */
+	case bcond_op:
+		switch (insn.i_format.rt) {
+		case bltz_op:
+		case bltzl_op:
+		case bgez_op:
+		case bgezl_op:
+		case bltzal_op:
+		case bltzall_op:
+		case bgezal_op:
+		case bgezall_op:
+		case bposge32_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * These are unconditional and in j_format.
+	 */
+	case jal_op:
+	case j_op:
+	case beq_op:
+	case beql_op:
+	case bne_op:
+	case bnel_op:
+	case blez_op: /* not really i_format */
+	case blezl_op:
+	case bgtz_op:
+	case bgtzl_op:
+		return 1;
+
+	/*
+	 * And now the FPA/cp1 branch instructions.
+	 */
+	case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	case lwc2_op: /* This is bbit0 on Octeon */
+	case ldc2_op: /* This is bbit032 on Octeon */
+	case swc2_op: /* This is bbit1 on Octeon */
+	case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+	struct mm_struct *mm, unsigned long addr)
+{
+	union mips_instruction inst;
+
+	/*
+	 * For the time being this also blocks attempts to use uprobes with
+	 * MIPS16 and microMIPS.
+	 */
+	if (addr & 0x03)
+		return -EINVAL;
+
+	inst.word = aup->insn[0];
+	aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
+	aup->ixol[1] = UPROBE_BRK_UPROBE_XOL;		/* NOP  */
+
+	return 0;
+}
+
+/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ *
+ * This definition overrides the weak definition in kernel/events/uprobes.c.
+ * and is needed for the case where an architecture has multiple trap
+ * instructions (like PowerPC or MIPS).  We treat BREAK just like the more
+ * modern conditional trap instructions.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+	union mips_instruction inst;
+
+	inst.word = *insn;
+
+	switch (inst.i_format.opcode) {
+	case spec_op:
+		switch (inst.r_format.func) {
+		case break_op:
+		case teq_op:
+		case tge_op:
+		case tgeu_op:
+		case tlt_op:
+		case tltu_op:
+		case tne_op:
+			return 1;
+		}
+		break;
+
+	case bcond_op:	/* Yes, really ...  */
+		switch (inst.u_format.rt) {
+		case teqi_op:
+		case tgei_op:
+		case tgeiu_op:
+		case tlti_op:
+		case tltiu_op:
+		case tnei_op:
+			return 1;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+#define UPROBE_TRAP_NR	ULONG_MAX
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+	union mips_instruction insn;
+
+	/*
+	 * Now find the EPC where to resume after the breakpoint has been
+	 * dealt with.  This may require emulation of a branch.
+	 */
+	aup->resume_epc = regs->cp0_epc + 4;
+	if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
+		unsigned long epc;
+
+		epc = regs->cp0_epc;
+		__compute_return_epc_for_insn(regs, insn);
+		aup->resume_epc = regs->cp0_epc;
+	}
+
+	utask->autask.saved_trap_nr = current->thread.trap_nr;
+	current->thread.trap_nr = UPROBE_TRAP_NR;
+	regs->cp0_epc = current->utask->xol_vaddr;
+
+	return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	current->thread.trap_nr = utask->autask.saved_trap_nr;
+	regs->cp0_epc = aup->resume_epc;
+
+	return 0;
+}
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+	if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
+		return true;
+
+	return false;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+	unsigned long val, void *data)
+{
+	struct die_args *args = data;
+	struct pt_regs *regs = args->regs;
+
+	/* regs == NULL is a kernel bug */
+	if (WARN_ON(!regs))
+		return NOTIFY_DONE;
+
+	/* We are only interested in userspace traps */
+	if (!user_mode(regs))
+		return NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BREAK:
+		if (uprobe_pre_sstep_notifier(regs))
+			return NOTIFY_STOP;
+		break;
+	case DIE_UPROBE_XOL:
+		if (uprobe_post_sstep_notifier(regs))
+			return NOTIFY_STOP;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+	struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	instruction_pointer_set(regs, utask->vaddr);
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(
+	unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+	unsigned long ra;
+
+	ra = regs->regs[31];
+
+	/* Replace the return address with the trampoline address */
+	regs->regs[31] = ra;
+
+	return ra;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This version overrides the weak version in kernel/events/uprobes.c.
+ * It is required to handle MIPS16 and microMIPS.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+	unsigned long vaddr)
+{
+	return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+/**
+ * set_orig_insn - Restore the original instruction.
+ * @mm: the probed process address space.
+ * @auprobe: arch specific probepoint information.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, restore the original opcode (opcode) at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+		 unsigned long vaddr)
+{
+	return uprobe_write_opcode(mm, vaddr,
+			*(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
+}
+
+void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+				  void *src, unsigned long len)
+{
+	void *kaddr;
+
+	/* Initialize the slot */
+	kaddr = kmap_atomic(page);
+	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+	kunmap_atomic(kaddr);
+
+	/*
+	 * The MIPS version of flush_icache_range will operate safely on
+	 * user space addresses and more importantly, it doesn't require a
+	 * VMA argument.
+	 */
+	flush_icache_range(vaddr, vaddr + len);
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+	return instruction_pointer(regs);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ *
+ * For now we always emulate so this function just returns 0.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	return 0;
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 11da314..9067b65 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -817,6 +817,7 @@
 
 static int vpe_release(struct inode *inode, struct file *filp)
 {
+#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
 	struct vpe *v;
 	Elf_Ehdr *hdr;
 	int ret = 0;
@@ -827,7 +828,7 @@
 
 	hdr = (Elf_Ehdr *) v->pbuffer;
 	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
-		if ((vpe_elfload(v) >= 0) && vpe_run) {
+		if (vpe_elfload(v) >= 0) {
 			vpe_run(v);
 		} else {
 			pr_warn("VPE loader: ELF load failed.\n");
@@ -850,6 +851,10 @@
 	v->plen = 0;
 
 	return ret;
+#else
+	pr_warn("VPE loader: ELF load failed.\n");
+	return -ENOEXEC;
+#endif
 }
 
 static ssize_t vpe_write(struct file *file, const char __user *buffer,
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index a57959e..c710d96 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -270,4 +270,4 @@
 	return 0;
 }
 
-__initcall(lasat_register_sysctl);
+arch_initcall(lasat_register_sysctl);
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 167f3563..92a3731 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -13,6 +13,33 @@
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
 
+void dump_tlb_regs(void)
+{
+	const int field = 2 * sizeof(unsigned long);
+
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("PageMask : %0x\n", read_c0_pagemask());
+	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
+	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
+	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
+	pr_info("Wired    : %0x\n", read_c0_wired());
+	switch (current_cpu_type()) {
+	case CPU_R10000:
+	case CPU_R12000:
+	case CPU_R14000:
+	case CPU_R16000:
+		pr_info("FrameMask: %0x\n", read_c0_framemask());
+		break;
+	}
+	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
+		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
+	if (cpu_has_htw) {
+		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
+		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
+		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
+	}
+}
+
 static inline const char *msk2str(unsigned int mask)
 {
 	switch (mask) {
@@ -87,7 +114,7 @@
 		 * leave only a single G bit set after a machine check exception
 		 * due to duplicate TLB entry.
 		 */
-		if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) &&
+		if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
 		    (entryhi & 0xff) != asid)
 			continue;
 
@@ -96,8 +123,8 @@
 		 */
 		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
 
-		c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
-		c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
+		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 
 		printk("va=%0*lx asid=%02lx\n",
 		       vwidth, (entryhi & ~0x1fffUL),
@@ -114,9 +141,9 @@
 			       (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
 		printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
 		       pwidth, pa, c0,
-		       (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0,
-		       (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0,
-		       (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0);
+		       (entrylo0 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_G) ? 1 : 0);
 		/* RI/XI are in awkward places, so mask them off separately */
 		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		if (xpa)
@@ -128,9 +155,9 @@
 			       (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
 		printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
 		       pwidth, pa, c1,
-		       (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0,
-		       (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0,
-		       (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0);
+		       (entrylo1 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_G) ? 1 : 0);
 	}
 	printk("\n");
 
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 8e0d3cf..cfcbb52 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -14,6 +14,17 @@
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
 
+extern int r3k_have_wired_reg;
+
+void dump_tlb_regs(void)
+{
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("EntryHi  : %0lx\n", read_c0_entryhi());
+	pr_info("EntryLo  : %0lx\n", read_c0_entrylo0());
+	if (r3k_have_wired_reg)
+		pr_info("Wired    : %0x\n", read_c0_wired());
+}
+
 static void dump_tlb(int first, int last)
 {
 	int	i;
diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c
index df0f850..0996b02 100644
--- a/arch/mips/loongson32/common/time.c
+++ b/arch/mips/loongson32/common/time.c
@@ -126,26 +126,34 @@
 	return IRQ_HANDLED;
 }
 
-static void ls1x_clockevent_set_mode(enum clock_event_mode mode,
-				     struct clock_event_device *cd)
+static int ls1x_clockevent_set_state_periodic(struct clock_event_device *cd)
 {
 	raw_spin_lock(&ls1x_timer_lock);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
-		ls1x_pwmtimer_restart();
-	case CLOCK_EVT_MODE_RESUME:
-		__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
-			     timer_base + PWM_CTRL);
-		break;
-	default:
-		break;
-	}
+	ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
+	ls1x_pwmtimer_restart();
+	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
 	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
+}
+
+static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
+{
+	raw_spin_lock(&ls1x_timer_lock);
+	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
+}
+
+static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *cd)
+{
+	raw_spin_lock(&ls1x_timer_lock);
+	__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
+		     timer_base + PWM_CTRL);
+	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
 }
 
 static int ls1x_clockevent_set_next(unsigned long evt,
@@ -160,12 +168,15 @@
 }
 
 static struct clock_event_device ls1x_clockevent = {
-	.name		= "ls1x-pwmtimer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.rating		= 300,
-	.irq		= LS1X_TIMER_IRQ,
-	.set_next_event	= ls1x_clockevent_set_next,
-	.set_mode	= ls1x_clockevent_set_mode,
+	.name			= "ls1x-pwmtimer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.rating			= 300,
+	.irq			= LS1X_TIMER_IRQ,
+	.set_next_event		= ls1x_clockevent_set_next,
+	.set_state_shutdown	= ls1x_clockevent_set_state_shutdown,
+	.set_state_periodic	= ls1x_clockevent_set_state_periodic,
+	.set_state_oneshot	= ls1x_clockevent_set_state_shutdown,
+	.tick_resume		= ls1x_clockevent_tick_resume,
 };
 
 static struct irqaction ls1x_pwmtimer_irqaction = {
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index 8750370..da77d41 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -51,40 +51,36 @@
 }
 EXPORT_SYMBOL(enable_mfgpt0_counter);
 
-static void init_mfgpt_timer(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int mfgpt_timer_set_periodic(struct clock_event_device *evt)
 {
 	raw_spin_lock(&mfgpt_lock);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		outw(COMPARE, MFGPT0_CMP2);	/* set comparator2 */
-		outw(0, MFGPT0_CNT);	/* set counter to 0 */
-		enable_mfgpt0_counter();
-		break;
+	outw(COMPARE, MFGPT0_CMP2);	/* set comparator2 */
+	outw(0, MFGPT0_CNT);		/* set counter to 0 */
+	enable_mfgpt0_counter();
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
-		    evt->mode == CLOCK_EVT_MODE_ONESHOT)
-			disable_mfgpt0_counter();
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* The oneshot mode have very high deviation, Not use it! */
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here */
-		break;
-	}
 	raw_spin_unlock(&mfgpt_lock);
+	return 0;
+}
+
+static int mfgpt_timer_shutdown(struct clock_event_device *evt)
+{
+	if (clockevent_state_periodic(evt) || clockevent_state_oneshot(evt)) {
+		raw_spin_lock(&mfgpt_lock);
+		disable_mfgpt0_counter();
+		raw_spin_unlock(&mfgpt_lock);
+	}
+
+	return 0;
 }
 
 static struct clock_event_device mfgpt_clockevent = {
 	.name = "mfgpt",
 	.features = CLOCK_EVT_FEAT_PERIODIC,
-	.set_mode = init_mfgpt_timer,
+
+	/* The oneshot mode have very high deviation, don't use it! */
+	.set_state_shutdown = mfgpt_timer_shutdown,
+	.set_state_periodic = mfgpt_timer_set_periodic,
 	.irq = CS5536_MFGPT_INTR,
 };
 
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index 5c21cd3..bf9f1a7 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -78,55 +78,77 @@
 	/* Do nothing on Loongson-3 */
 }
 
-static void hpet_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int hpet_set_state_periodic(struct clock_event_device *evt)
 {
-	int cfg = 0;
+	int cfg;
 
 	spin_lock(&hpet_lock);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		pr_info("set clock event to periodic mode!\n");
-		/* stop counter */
-		hpet_stop_counter();
 
-		/* enables the timer0 to generate a periodic interrupt */
-		cfg = hpet_read(HPET_T0_CFG);
-		cfg &= ~HPET_TN_LEVEL;
-		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
-				HPET_TN_SETVAL | HPET_TN_32BIT;
-		hpet_write(HPET_T0_CFG, cfg);
+	pr_info("set clock event to periodic mode!\n");
+	/* stop counter */
+	hpet_stop_counter();
 
-		/* set the comparator */
-		hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
-		udelay(1);
-		hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+	/* enables the timer0 to generate a periodic interrupt */
+	cfg = hpet_read(HPET_T0_CFG);
+	cfg &= ~HPET_TN_LEVEL;
+	cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
+		HPET_TN_32BIT;
+	hpet_write(HPET_T0_CFG, cfg);
 
-		/* start counter */
-		hpet_start_counter();
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		cfg = hpet_read(HPET_T0_CFG);
-		cfg &= ~HPET_TN_ENABLE;
-		hpet_write(HPET_T0_CFG, cfg);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		pr_info("set clock event to one shot mode!\n");
-		cfg = hpet_read(HPET_T0_CFG);
-		/* set timer0 type
-		 * 1 : periodic interrupt
-		 * 0 : non-periodic(oneshot) interrupt
-		 */
-		cfg &= ~HPET_TN_PERIODIC;
-		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
-		hpet_write(HPET_T0_CFG, cfg);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		hpet_enable_legacy_int();
-		break;
-	}
+	/* set the comparator */
+	hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+	udelay(1);
+	hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+
+	/* start counter */
+	hpet_start_counter();
+
 	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_set_state_shutdown(struct clock_event_device *evt)
+{
+	int cfg;
+
+	spin_lock(&hpet_lock);
+
+	cfg = hpet_read(HPET_T0_CFG);
+	cfg &= ~HPET_TN_ENABLE;
+	hpet_write(HPET_T0_CFG, cfg);
+
+	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_set_state_oneshot(struct clock_event_device *evt)
+{
+	int cfg;
+
+	spin_lock(&hpet_lock);
+
+	pr_info("set clock event to one shot mode!\n");
+	cfg = hpet_read(HPET_T0_CFG);
+	/*
+	 * set timer0 type
+	 * 1 : periodic interrupt
+	 * 0 : non-periodic(oneshot) interrupt
+	 */
+	cfg &= ~HPET_TN_PERIODIC;
+	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+	hpet_write(HPET_T0_CFG, cfg);
+
+	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_tick_resume(struct clock_event_device *evt)
+{
+	spin_lock(&hpet_lock);
+	hpet_enable_legacy_int();
+	spin_unlock(&hpet_lock);
+
+	return 0;
 }
 
 static int hpet_next_event(unsigned long delta,
@@ -206,7 +228,10 @@
 	cd->name = "hpet";
 	cd->rating = 320;
 	cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-	cd->set_mode = hpet_set_mode;
+	cd->set_state_shutdown = hpet_set_state_shutdown;
+	cd->set_state_periodic = hpet_set_state_periodic;
+	cd->set_state_oneshot = hpet_set_state_oneshot;
+	cd->tick_resume = hpet_tick_resume;
 	cd->set_next_event = hpet_next_event;
 	cd->irq = HPET_T0_IRQ;
 	cd->cpumask = cpumask_of(cpu);
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 2e5f962..a19641d 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -4,9 +4,9 @@
 
 obj-y	+= cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \
 	   dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \
-	   dp_tint.o dp_fint.o \
+	   dp_tint.o dp_fint.o dp_maddf.o dp_msubf.o dp_2008class.o dp_fmin.o dp_fmax.o \
 	   sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \
-	   sp_tint.o sp_fint.o \
+	   sp_tint.o sp_fint.o sp_maddf.o sp_msubf.o sp_2008class.o sp_fmin.o sp_fmax.o \
 	   dsemul.o
 
 lib-y	+= ieee754d.o \
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 712f17a..32f0e19 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1137,7 +1137,7 @@
 			break;
 
 		case mfhc_op:
-			if (!cpu_has_mips_r2)
+			if (!cpu_has_mips_r2_r6)
 				goto sigill;
 
 			/* copregister rd -> gpr[rt] */
@@ -1148,7 +1148,7 @@
 			break;
 
 		case mthc_op:
-			if (!cpu_has_mips_r2)
+			if (!cpu_has_mips_r2_r6)
 				goto sigill;
 
 			/* copregister rd <- gpr[rt] */
@@ -1181,6 +1181,24 @@
 			}
 			break;
 
+		case bc1eqz_op:
+		case bc1nez_op:
+			if (!cpu_has_mips_r6 || delay_slot(xcp))
+				return SIGILL;
+
+			cond = likely = 0;
+			switch (MIPSInst_RS(ir)) {
+			case bc1eqz_op:
+				if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
+				    cond = 1;
+				break;
+			case bc1nez_op:
+				if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
+				    cond = 1;
+				break;
+			}
+			goto branch_common;
+
 		case bc_op:
 			if (delay_slot(xcp))
 				return SIGILL;
@@ -1207,7 +1225,7 @@
 			case bct_op:
 				break;
 			}
-
+branch_common:
 			set_delay_slot(xcp);
 			if (cond) {
 				/*
@@ -1376,6 +1394,14 @@
 	IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN,	/* cmp_ule (sig) cmp_ngt */
 };
 
+static const unsigned char negative_cmptab[8] = {
+	0, /* Reserved */
+	IEEE754_CLT | IEEE754_CGT | IEEE754_CEQ,
+	IEEE754_CLT | IEEE754_CGT | IEEE754_CUN,
+	IEEE754_CLT | IEEE754_CGT,
+	/* Reserved */
+};
+
 
 /*
  * Additional MIPS4 instructions
@@ -1717,6 +1743,126 @@
 			SPFROMREG(rv.s, MIPSInst_FS(ir));
 			break;
 
+		case fseleqz_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(rv.s, MIPSInst_FT(ir));
+			if (rv.w & 0x1)
+				rv.w = 0;
+			else
+				SPFROMREG(rv.s, MIPSInst_FS(ir));
+			break;
+
+		case fselnez_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(rv.s, MIPSInst_FT(ir));
+			if (rv.w & 0x1)
+				SPFROMREG(rv.s, MIPSInst_FS(ir));
+			else
+				rv.w = 0;
+			break;
+
+		case fmaddf_op: {
+			union ieee754sp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(fd, MIPSInst_FD(ir));
+			rv.s = ieee754sp_maddf(fd, fs, ft);
+			break;
+		}
+
+		case fmsubf_op: {
+			union ieee754sp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(fd, MIPSInst_FD(ir));
+			rv.s = ieee754sp_msubf(fd, fs, ft);
+			break;
+		}
+
+		case frint_op: {
+			union ieee754sp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.l = ieee754sp_tlong(fs);
+			rv.s = ieee754sp_flong(rv.l);
+			goto copcsr;
+		}
+
+		case fclass_op: {
+			union ieee754sp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.w = ieee754sp_2008class(fs);
+			rfmt = w_fmt;
+			break;
+		}
+
+		case fmin_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmin(fs, ft);
+			break;
+		}
+
+		case fmina_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmina(fs, ft);
+			break;
+		}
+
+		case fmax_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmax(fs, ft);
+			break;
+		}
+
+		case fmaxa_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmaxa(fs, ft);
+			break;
+		}
+
 		case fabs_op:
 			handler.u = ieee754sp_abs;
 			goto scopuop;
@@ -1820,7 +1966,7 @@
 			goto copcsr;
 
 		default:
-			if (MIPSInst_FUNC(ir) >= fcmp_op) {
+			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
 				unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
 				union ieee754sp fs, ft;
 
@@ -1914,6 +2060,127 @@
 				return 0;
 			DPFROMREG(rv.d, MIPSInst_FS(ir));
 			break;
+
+		case fseleqz_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(rv.d, MIPSInst_FT(ir));
+			if (rv.l & 0x1)
+				rv.l = 0;
+			else
+				DPFROMREG(rv.d, MIPSInst_FS(ir));
+			break;
+
+		case fselnez_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(rv.d, MIPSInst_FT(ir));
+			if (rv.l & 0x1)
+				DPFROMREG(rv.d, MIPSInst_FS(ir));
+			else
+				rv.l = 0;
+			break;
+
+		case fmaddf_op: {
+			union ieee754dp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(fd, MIPSInst_FD(ir));
+			rv.d = ieee754dp_maddf(fd, fs, ft);
+			break;
+		}
+
+		case fmsubf_op: {
+			union ieee754dp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(fd, MIPSInst_FD(ir));
+			rv.d = ieee754dp_msubf(fd, fs, ft);
+			break;
+		}
+
+		case frint_op: {
+			union ieee754dp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.l = ieee754dp_tlong(fs);
+			rv.d = ieee754dp_flong(rv.l);
+			goto copcsr;
+		}
+
+		case fclass_op: {
+			union ieee754dp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.w = ieee754dp_2008class(fs);
+			rfmt = w_fmt;
+			break;
+		}
+
+		case fmin_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmin(fs, ft);
+			break;
+		}
+
+		case fmina_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmina(fs, ft);
+			break;
+		}
+
+		case fmax_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmax(fs, ft);
+			break;
+		}
+
+		case fmaxa_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmaxa(fs, ft);
+			break;
+		}
+
 		case fabs_op:
 			handler.u = ieee754dp_abs;
 			goto dcopuop;
@@ -1997,7 +2264,7 @@
 			goto copcsr;
 
 		default:
-			if (MIPSInst_FUNC(ir) >= fcmp_op) {
+			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
 				unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
 				union ieee754dp fs, ft;
 
@@ -2021,8 +2288,11 @@
 			break;
 		}
 		break;
+	}
 
-	case w_fmt:
+	case w_fmt: {
+		union ieee754dp fs;
+
 		switch (MIPSInst_FUNC(ir)) {
 		case fcvts_op:
 			/* convert word to single precision real */
@@ -2036,10 +2306,65 @@
 			rv.d = ieee754dp_fint(fs.bits);
 			rfmt = d_fmt;
 			goto copcsr;
-		default:
-			return SIGILL;
+		default: {
+			/* Emulating the new CMP.condn.fmt R6 instruction */
+#define CMPOP_MASK	0x7
+#define SIGN_BIT	(0x1 << 3)
+#define PREDICATE_BIT	(0x1 << 4)
+
+			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+			union ieee754sp fs, ft;
+
+			/* This is an R6 only instruction */
+			if (!cpu_has_mips_r6 ||
+			    (MIPSInst_FUNC(ir) & 0x20))
+				return SIGILL;
+
+			/* fmt is w_fmt for single precision so fix it */
+			rfmt = s_fmt;
+			/* default to false */
+			rv.w = 0;
+
+			/* CMP.condn.S */
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(ft, MIPSInst_FT(ir));
+
+			/* positive predicates */
+			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+				if (ieee754sp_cmp(fs, ft, cmptab[cmpop],
+						  sig))
+				    rv.w = -1; /* true, all 1s */
+				if ((sig) &&
+				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+				else
+					goto copcsr;
+			} else {
+				/* negative predicates */
+				switch (cmpop) {
+				case 1:
+				case 2:
+				case 3:
+					if (ieee754sp_cmp(fs, ft,
+							  negative_cmptab[cmpop],
+							  sig))
+						rv.w = -1; /* true, all 1s */
+					if (sig &&
+					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+					else
+						goto copcsr;
+					break;
+				default:
+					/* Reserved R6 ops */
+					pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
+					return SIGILL;
+				}
+			}
+			break;
+			}
 		}
-		break;
 	}
 
 	case l_fmt:
@@ -2060,11 +2385,60 @@
 			rv.d = ieee754dp_flong(bits);
 			rfmt = d_fmt;
 			goto copcsr;
-		default:
-			return SIGILL;
-		}
-		break;
+		default: {
+			/* Emulating the new CMP.condn.fmt R6 instruction */
+			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+			union ieee754dp fs, ft;
 
+			if (!cpu_has_mips_r6 ||
+			    (MIPSInst_FUNC(ir) & 0x20))
+				return SIGILL;
+
+			/* fmt is l_fmt for double precision so fix it */
+			rfmt = d_fmt;
+			/* default to false */
+			rv.l = 0;
+
+			/* CMP.condn.D */
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(ft, MIPSInst_FT(ir));
+
+			/* positive predicates */
+			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+				if (ieee754dp_cmp(fs, ft,
+						  cmptab[cmpop], sig))
+				    rv.l = -1LL; /* true, all 1s */
+				if (sig &&
+				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+				else
+					goto copcsr;
+			} else {
+				/* negative predicates */
+				switch (cmpop) {
+				case 1:
+				case 2:
+				case 3:
+					if (ieee754dp_cmp(fs, ft,
+							  negative_cmptab[cmpop],
+							  sig))
+						rv.l = -1LL; /* true, all 1s */
+					if (sig &&
+					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+					else
+						goto copcsr;
+					break;
+				default:
+					/* Reserved R6 ops */
+					pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
+					return SIGILL;
+				}
+			}
+			break;
+			}
+		}
 	default:
 		return SIGILL;
 	}
diff --git a/arch/mips/math-emu/dp_2008class.c b/arch/mips/math-emu/dp_2008class.c
new file mode 100644
index 0000000..9dc39fc
--- /dev/null
+++ b/arch/mips/math-emu/dp_2008class.c
@@ -0,0 +1,55 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+int ieee754dp_2008class(union ieee754dp x)
+{
+	COMPXDP;
+
+	EXPLODEXDP;
+
+	/*
+	 * 10 bit mask as follows:
+	 *
+	 * bit0 = SNAN
+	 * bit1 = QNAN
+	 * bit2 = -INF
+	 * bit3 = -NORM
+	 * bit4 = -DNORM
+	 * bit5 = -ZERO
+	 * bit6 = INF
+	 * bit7 = NORM
+	 * bit8 = DNORM
+	 * bit9 = ZERO
+	 */
+
+	switch(xc) {
+	case IEEE754_CLASS_SNAN:
+		return 0x01;
+	case IEEE754_CLASS_QNAN:
+		return 0x02;
+	case IEEE754_CLASS_INF:
+		return 0x04 << (xs ? 0 : 4);
+	case IEEE754_CLASS_NORM:
+		return 0x08 << (xs ? 0 : 4);
+	case IEEE754_CLASS_DNORM:
+		return 0x10 << (xs ? 0 : 4);
+	case IEEE754_CLASS_ZERO:
+		return 0x20 << (xs ? 0 : 4);
+	default:
+		pr_err("Unknown class: %d\n", xc);
+		return 0;
+	}
+}
diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
new file mode 100644
index 0000000..fd71b8d
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmax.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return y;
+	else if (xs < ys)
+		return x;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
+
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
new file mode 100644
index 0000000..c1072b0
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmin.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return x;
+	else if (xs < ys)
+		return y;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
+
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
new file mode 100644
index 0000000..119eda9
--- /dev/null
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -0,0 +1,265 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	int re;
+	int rs;
+	u64 rm;
+	unsigned lxm;
+	unsigned hxm;
+	unsigned lym;
+	unsigned hym;
+	u64 lrm;
+	u64 hrm;
+	u64 t;
+	u64 at;
+	int s;
+
+	COMPXDP;
+	COMPYDP;
+
+	u64 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+	EXPLODEDP(z, zc, zs, ze, zm)
+
+	FLUSHXDP;
+	FLUSHYDP;
+	FLUSHDP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		DPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754dp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 64 - (DP_FBITS + 1);
+	ym <<= 64 - (DP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+
+	/* 32 * 32 => 64 */
+#define DPXMULT(x, y)	((u64)(x) * (u64)y)
+
+	lxm = xm;
+	hxm = xm >> 32;
+	lym = ym;
+	hym = ym >> 32;
+
+	lrm = DPXMULT(lxm, lym);
+	hrm = DPXMULT(hxm, hym);
+
+	t = DPXMULT(lxm, hym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	t = DPXMULT(hxm, lym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((s64) rm < 0) {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+		     ((rm << (DP_FBITS + 1 + 3)) != 0);
+			re++;
+	} else {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (DP_HIDDEN_BIT << 3));
+
+	/* And now the addition */
+	assert(zm & DP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		rm = XDPSRS(rm, s);
+		re += s;
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		zm = XDPSRS(zm, s);
+		ze += s;
+	}
+	assert(ze == re);
+	assert(ze <= DP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in xm, xs and xe.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+			zm = XDPSRS1(zm);
+			ze++;
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize to rounding precision.
+		 */
+		while ((zm >> (DP_FBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+	}
+
+	return ieee754dp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/dp_msubf.c b/arch/mips/math-emu/dp_msubf.c
new file mode 100644
index 0000000..1224126
--- /dev/null
+++ b/arch/mips/math-emu/dp_msubf.c
@@ -0,0 +1,269 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MSUB.f (Fused Multiply Subtract)
+ * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	int re;
+	int rs;
+	u64 rm;
+	unsigned lxm;
+	unsigned hxm;
+	unsigned lym;
+	unsigned hym;
+	u64 lrm;
+	u64 hrm;
+	u64 t;
+	u64 at;
+	int s;
+
+	COMPXDP;
+	COMPYDP;
+
+	u64 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+	EXPLODEDP(z, zc, zs, ze, zm)
+
+	FLUSHXDP;
+	FLUSHYDP;
+	FLUSHDP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		DPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754dp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 64 - (DP_FBITS + 1);
+	ym <<= 64 - (DP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+
+	/* 32 * 32 => 64 */
+#define DPXMULT(x, y)	((u64)(x) * (u64)y)
+
+	lxm = xm;
+	hxm = xm >> 32;
+	lym = ym;
+	hym = ym >> 32;
+
+	lrm = DPXMULT(lxm, lym);
+	hrm = DPXMULT(hxm, hym);
+
+	t = DPXMULT(lxm, hym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	t = DPXMULT(hxm, lym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((s64) rm < 0) {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+		     ((rm << (DP_FBITS + 1 + 3)) != 0);
+			re++;
+	} else {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (DP_HIDDEN_BIT << 3));
+
+	/* And now the subtraction */
+
+	/* flip sign of r and handle as add */
+	rs ^= 1;
+
+	assert(zm & DP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		rm = XDPSRS(rm, s);
+		re += s;
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		zm = XDPSRS(zm, s);
+		ze += s;
+	}
+	assert(ze == re);
+	assert(ze <= DP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in xm, xs and xe.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+			zm = XDPSRS1(zm);
+			ze++;
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize to rounding precision.
+		 */
+		while ((zm >> (DP_FBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+	}
+
+	return ieee754dp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index e0b5cc2..cbb36c1 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -33,7 +33,6 @@
 
 int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
 {
-	extern asmlinkage void handle_dsemulret(void);
 	struct emuframe __user *fr;
 	int err;
 
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index a5ca108..df94720 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -75,6 +75,16 @@
 
 union ieee754sp ieee754sp_sqrt(union ieee754sp x);
 
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+int ieee754sp_2008class(union ieee754sp x);
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y);
+
 /*
  * double precision (often aka double)
 */
@@ -99,6 +109,15 @@
 
 union ieee754dp ieee754dp_sqrt(union ieee754dp x);
 
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+int ieee754dp_2008class(union ieee754dp x);
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y);
 
 
 /* 5 types of floating point number
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 05389d5..6383e2c 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -65,8 +65,8 @@
 			vc = IEEE754_CLASS_INF;				\
 		else if (vm & SP_MBIT(SP_FBITS-1))			\
 			vc = IEEE754_CLASS_SNAN;			\
-	else								\
-		vc = IEEE754_CLASS_QNAN;				\
+		else							\
+			vc = IEEE754_CLASS_QNAN;			\
 	} else if (ve == SP_EMIN-1+SP_EBIAS) {				\
 		if (vm) {						\
 			ve = SP_EMIN;					\
@@ -105,8 +105,8 @@
 		if (vm) {						\
 			ve = DP_EMIN;					\
 			vc = IEEE754_CLASS_DNORM;			\
-	} else								\
-		vc = IEEE754_CLASS_ZERO;				\
+		} else							\
+			vc = IEEE754_CLASS_ZERO;			\
 	} else {							\
 		ve -= DP_EBIAS;						\
 		vm |= DP_HIDDEN_BIT;					\
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
index f308e0f..506a67a 100644
--- a/arch/mips/math-emu/me-debugfs.c
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -65,4 +65,4 @@
 
 	return 0;
 }
-__initcall(debugfs_fpuemu);
+arch_initcall(debugfs_fpuemu);
diff --git a/arch/mips/math-emu/sp_2008class.c b/arch/mips/math-emu/sp_2008class.c
new file mode 100644
index 0000000..ff62606
--- /dev/null
+++ b/arch/mips/math-emu/sp_2008class.c
@@ -0,0 +1,55 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+int ieee754sp_2008class(union ieee754sp x)
+{
+	COMPXSP;
+
+	EXPLODEXSP;
+
+	/*
+	 * 10 bit mask as follows:
+	 *
+	 * bit0 = SNAN
+	 * bit1 = QNAN
+	 * bit2 = -INF
+	 * bit3 = -NORM
+	 * bit4 = -DNORM
+	 * bit5 = -ZERO
+	 * bit6 = INF
+	 * bit7 = NORM
+	 * bit8 = DNORM
+	 * bit9 = ZERO
+	 */
+
+	switch(xc) {
+	case IEEE754_CLASS_SNAN:
+		return 0x01;
+	case IEEE754_CLASS_QNAN:
+		return 0x02;
+	case IEEE754_CLASS_INF:
+		return 0x04 << (xs ? 0 : 4);
+	case IEEE754_CLASS_NORM:
+		return 0x08 << (xs ? 0 : 4);
+	case IEEE754_CLASS_DNORM:
+		return 0x10 << (xs ? 0 : 4);
+	case IEEE754_CLASS_ZERO:
+		return 0x20 << (xs ? 0 : 4);
+	default:
+		pr_err("Unknown class: %d\n", xc);
+		return 0;
+	}
+}
diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
new file mode 100644
index 0000000..4d00084
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmax.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MAX{,A}.f
+ * MAX : Scalar Floating-Point Maximum
+ * MAXA: Scalar Floating-Point argument with Maximum Absolute Value
+ *
+ * MAX.S : FPR[fd] = maxNum(FPR[fs],FPR[ft])
+ * MAXA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return y;
+	else if (xs < ys)
+		return x;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
+
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
new file mode 100644
index 0000000..4eb1bb9
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmin.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.S : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return x;
+	else if (xs < ys)
+		return y;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
+
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
new file mode 100644
index 0000000..dd1dd83
--- /dev/null
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -0,0 +1,255 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	int re;
+	int rs;
+	unsigned rm;
+	unsigned short lxm;
+	unsigned short hxm;
+	unsigned short lym;
+	unsigned short hym;
+	unsigned lrm;
+	unsigned hrm;
+	unsigned t;
+	unsigned at;
+	int s;
+
+	COMPXSP;
+	COMPYSP;
+	u32 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+	EXPLODESP(z, zc, zs, ze, zm)
+
+	FLUSHXSP;
+	FLUSHYSP;
+	FLUSHSP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		SPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754sp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+
+	/* rm = xm * ym, re = xe+ye basically */
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 32 - (SP_FBITS + 1);
+	ym <<= 32 - (SP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+	lxm = xm & 0xffff;
+	hxm = xm >> 16;
+	lym = ym & 0xffff;
+	hym = ym >> 16;
+
+	lrm = lxm * lym;	/* 16 * 16 => 32 */
+	hrm = hxm * hym;	/* 16 * 16 => 32 */
+
+	t = lxm * hym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	t = hxm * lym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((int) rm < 0) {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+		    ((rm << (SP_FBITS + 1 + 3)) != 0);
+		re++;
+	} else {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (SP_HIDDEN_BIT << 3));
+
+	/* And now the addition */
+
+	assert(zm & SP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		SPXSRSYn(s);
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		SPXSRSYn(s);
+	}
+	assert(ze == re);
+	assert(ze <= SP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in zm, zs and ze.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+			SPXSRSX1();
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize in extended single precision
+		 */
+		while ((zm >> (SP_MBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+
+	}
+	return ieee754sp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/sp_msubf.c b/arch/mips/math-emu/sp_msubf.c
new file mode 100644
index 0000000..81c38b980
--- /dev/null
+++ b/arch/mips/math-emu/sp_msubf.c
@@ -0,0 +1,258 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MSUB.f (Fused Multiply Subtract)
+ * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	int re;
+	int rs;
+	unsigned rm;
+	unsigned short lxm;
+	unsigned short hxm;
+	unsigned short lym;
+	unsigned short hym;
+	unsigned lrm;
+	unsigned hrm;
+	unsigned t;
+	unsigned at;
+	int s;
+
+	COMPXSP;
+	COMPYSP;
+	u32 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+	EXPLODESP(z, zc, zs, ze, zm)
+
+	FLUSHXSP;
+	FLUSHYSP;
+	FLUSHSP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		SPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754sp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* fall through to real compuation */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+
+	/* rm = xm * ym, re = xe+ye basically */
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 32 - (SP_FBITS + 1);
+	ym <<= 32 - (SP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+	lxm = xm & 0xffff;
+	hxm = xm >> 16;
+	lym = ym & 0xffff;
+	hym = ym >> 16;
+
+	lrm = lxm * lym;	/* 16 * 16 => 32 */
+	hrm = hxm * hym;	/* 16 * 16 => 32 */
+
+	t = lxm * hym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	t = hxm * lym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((int) rm < 0) {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+		    ((rm << (SP_FBITS + 1 + 3)) != 0);
+		re++;
+	} else {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (SP_HIDDEN_BIT << 3));
+
+	/* And now the subtraction */
+
+	/* Flip sign of r and handle as add */
+	rs ^= 1;
+
+	assert(zm & SP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		SPXSRSYn(s);
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		SPXSRSYn(s);
+	}
+	assert(ze == re);
+	assert(ze <= SP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in zm, zs and ze.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+			SPXSRSX1(); /* shift preserving sticky */
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize in extended single precision
+		 */
+		while ((zm >> (SP_MBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+
+	}
+	return ieee754sp_format(zs, ze, zm);
+}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fbea443..5d3a25e 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1276,6 +1276,7 @@
 	case CPU_PROAPTIV:
 	case CPU_M5150:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
 		    (c->icache.waysize > PAGE_SIZE))
 			c->icache.flags |= MIPS_CACHE_ALIASES;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index eeaf024..8f23cf0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -194,6 +194,40 @@
 		__free_pages(page, get_order(size));
 }
 
+static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+	void *cpu_addr, dma_addr_t dma_addr, size_t size,
+	struct dma_attrs *attrs)
+{
+	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long addr = (unsigned long)cpu_addr;
+	unsigned long off = vma->vm_pgoff;
+	unsigned long pfn;
+	int ret = -ENXIO;
+
+	if (!plat_device_is_coherent(dev) && !hw_coherentio)
+		addr = CAC_ADDR(addr);
+
+	pfn = page_to_pfn(virt_to_page((void *)addr));
+
+	if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
+	if (off < count && user_count <= (count - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start,
+				      pfn + off,
+				      user_count << PAGE_SHIFT,
+				      vma->vm_page_prot);
+	}
+
+	return ret;
+}
+
 static inline void __dma_sync_virtual(void *addr, size_t size,
 	enum dma_data_direction direction)
 {
@@ -380,6 +414,7 @@
 static struct dma_map_ops mips_default_dma_map_ops = {
 	.alloc = mips_dma_alloc_coherent,
 	.free = mips_dma_free_coherent,
+	.mmap = mips_dma_mmap,
 	.map_page = mips_dma_map_page,
 	.unmap_page = mips_dma_unmap_page,
 	.map_sg = mips_dma_map_sg,
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 852a41c..4b88fa03 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -57,12 +57,10 @@
 
 #ifdef CONFIG_KPROBES
 	/*
-	 * This is to notify the fault handler of the kprobes.	The
-	 * exception code is redundant as it is also carried in REGS,
-	 * but we pass it anyhow.
+	 * This is to notify the fault handler of the kprobes.
 	 */
 	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
-		       (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
+		       current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
 		return;
 #endif
 
@@ -224,6 +222,7 @@
 			print_vma_addr(" ", regs->regs[31]);
 			pr_info("\n");
 		}
+		current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 		info.si_signo = SIGSEGV;
 		info.si_errno = 0;
 		/* info.si_code has been set above */
@@ -282,6 +281,7 @@
 		       field, (unsigned long) regs->cp0_epc,
 		       field, (unsigned long) regs->regs[31]);
 #endif
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	tsk->thread.cp0_badvaddr = address;
 	info.si_signo = SIGBUS;
 	info.si_errno = 0;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 198a314..66d0f49 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -37,6 +37,7 @@
 #include <asm/cpu.h>
 #include <asm/dma.h>
 #include <asm/kmap_types.h>
+#include <asm/maar.h>
 #include <asm/mmu_context.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
@@ -333,9 +334,40 @@
 #endif
 }
 
-unsigned __weak platform_maar_init(unsigned num_maars)
+unsigned __weak platform_maar_init(unsigned num_pairs)
 {
-	return 0;
+	struct maar_config cfg[BOOT_MEM_MAP_MAX];
+	unsigned i, num_configured, num_cfg = 0;
+	phys_addr_t skip;
+
+	for (i = 0; i < boot_mem_map.nr_map; i++) {
+		switch (boot_mem_map.map[i].type) {
+		case BOOT_MEM_RAM:
+		case BOOT_MEM_INIT_RAM:
+			break;
+		default:
+			continue;
+		}
+
+		skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+		cfg[num_cfg].lower += skip;
+
+		cfg[num_cfg].upper = cfg[num_cfg].lower;
+		cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+		cfg[num_cfg].upper -= skip;
+
+		cfg[num_cfg].attrs = MIPS_MAAR_S;
+		num_cfg++;
+	}
+
+	num_configured = maar_config(cfg, num_cfg, num_pairs);
+	if (num_configured < num_cfg)
+		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+			num_pairs, num_cfg);
+
+	return num_configured;
 }
 
 static void maar_init(void)
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 4ceafd1..53ea839 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -14,6 +14,7 @@
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 #include <asm/r4kcache.h>
+#include <asm/mips-cm.h>
 
 /*
  * MIPS32/MIPS64 L2 cache handling
@@ -94,6 +95,38 @@
 	return 1;
 }
 
+static int __init mips_sc_probe_cm3(void)
+{
+	struct cpuinfo_mips *c = &current_cpu_data;
+	unsigned long cfg = read_gcr_l2_config();
+	unsigned long sets, line_sz, assoc;
+
+	if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK)
+		return 0;
+
+	sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
+	sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
+	c->scache.sets = 64 << sets;
+
+	line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
+	line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
+	c->scache.linesz = 2 << line_sz;
+
+	assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
+	assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
+	c->scache.ways = assoc + 1;
+	c->scache.waysize = c->scache.sets * c->scache.linesz;
+	c->scache.waybit = __ffs(c->scache.waysize);
+
+	c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+	return 1;
+}
+
+void __weak platform_early_l2_init(void)
+{
+}
+
 static inline int __init mips_sc_probe(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -103,6 +136,15 @@
 	/* Mark as not present until probe completed */
 	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
 
+	/*
+	 * Do we need some platform specific probing before
+	 * we configure L2?
+	 */
+	platform_early_l2_init();
+
+	if (mips_cm_revision() >= CM_REV_CM3)
+		return mips_sc_probe_cm3();
+
 	/* Ignore anything but MIPSxx processors */
 	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
 			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 2b75b8f..b4f366f 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -36,7 +36,7 @@
 		"nop\n\t"		\
 		".set	pop\n\t")
 
-static int r3k_have_wired_reg;			/* Should be in cpu_data? */
+int r3k_have_wired_reg;			/* Should be in cpu_data? */
 
 /* TLB operations. */
 static void local_flush_tlb_from(int entry)
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index cec3e18..53c2478 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -303,3 +303,10 @@
 	if (!register_vsmp_smp_ops())
 		return;
 }
+
+void platform_early_l2_init(void)
+{
+	/* L2 configuration lives in the CM3 */
+	if (mips_cm_revision() >= CM_REV_CM3)
+		mips_cm_probe();
+}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index fa8f591..c6a6c7a 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -382,122 +382,12 @@
 	/* Could change CM error mask register. */
 }
 
-
-static char *tr[8] = {
-	"mem",	"gcr",	"gic",	"mmio",
-	"0x04", "0x05", "0x06", "0x07"
-};
-
-static char *mcmd[32] = {
-	[0x00] = "0x00",
-	[0x01] = "Legacy Write",
-	[0x02] = "Legacy Read",
-	[0x03] = "0x03",
-	[0x04] = "0x04",
-	[0x05] = "0x05",
-	[0x06] = "0x06",
-	[0x07] = "0x07",
-	[0x08] = "Coherent Read Own",
-	[0x09] = "Coherent Read Share",
-	[0x0a] = "Coherent Read Discard",
-	[0x0b] = "Coherent Ready Share Always",
-	[0x0c] = "Coherent Upgrade",
-	[0x0d] = "Coherent Writeback",
-	[0x0e] = "0x0e",
-	[0x0f] = "0x0f",
-	[0x10] = "Coherent Copyback",
-	[0x11] = "Coherent Copyback Invalidate",
-	[0x12] = "Coherent Invalidate",
-	[0x13] = "Coherent Write Invalidate",
-	[0x14] = "Coherent Completion Sync",
-	[0x15] = "0x15",
-	[0x16] = "0x16",
-	[0x17] = "0x17",
-	[0x18] = "0x18",
-	[0x19] = "0x19",
-	[0x1a] = "0x1a",
-	[0x1b] = "0x1b",
-	[0x1c] = "0x1c",
-	[0x1d] = "0x1d",
-	[0x1e] = "0x1e",
-	[0x1f] = "0x1f"
-};
-
-static char *core[8] = {
-	"Invalid/OK",	"Invalid/Data",
-	"Shared/OK",	"Shared/Data",
-	"Modified/OK",	"Modified/Data",
-	"Exclusive/OK", "Exclusive/Data"
-};
-
-static char *causes[32] = {
-	"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
-	"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
-	"0x08", "0x09", "0x0a", "0x0b",
-	"0x0c", "0x0d", "0x0e", "0x0f",
-	"0x10", "0x11", "0x12", "0x13",
-	"0x14", "0x15", "0x16", "INTVN_WR_ERR",
-	"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
-	"0x1c", "0x1d", "0x1e", "0x1f"
-};
-
 int malta_be_handler(struct pt_regs *regs, int is_fixup)
 {
 	/* This duplicates the handling in do_be which seems wrong */
 	int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
 
-	if (mips_cm_present()) {
-		unsigned long cm_error = read_gcr_error_cause();
-		unsigned long cm_addr = read_gcr_error_addr();
-		unsigned long cm_other = read_gcr_error_mult();
-		unsigned long cause, ocause;
-		char buf[256];
-
-		cause = cm_error & CM_GCR_ERROR_CAUSE_ERRTYPE_MSK;
-		if (cause != 0) {
-			cause >>= CM_GCR_ERROR_CAUSE_ERRTYPE_SHF;
-			if (cause < 16) {
-				unsigned long cca_bits = (cm_error >> 15) & 7;
-				unsigned long tr_bits = (cm_error >> 12) & 7;
-				unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
-				unsigned long stag_bits = (cm_error >> 3) & 15;
-				unsigned long sport_bits = (cm_error >> 0) & 7;
-
-				snprintf(buf, sizeof(buf),
-					 "CCA=%lu TR=%s MCmd=%s STag=%lu "
-					 "SPort=%lu\n",
-					 cca_bits, tr[tr_bits], mcmd[cmd_bits],
-					 stag_bits, sport_bits);
-			} else {
-				/* glob state & sresp together */
-				unsigned long c3_bits = (cm_error >> 18) & 7;
-				unsigned long c2_bits = (cm_error >> 15) & 7;
-				unsigned long c1_bits = (cm_error >> 12) & 7;
-				unsigned long c0_bits = (cm_error >> 9) & 7;
-				unsigned long sc_bit = (cm_error >> 8) & 1;
-				unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
-				unsigned long sport_bits = (cm_error >> 0) & 7;
-				snprintf(buf, sizeof(buf),
-					 "C3=%s C2=%s C1=%s C0=%s SC=%s "
-					 "MCmd=%s SPort=%lu\n",
-					 core[c3_bits], core[c2_bits],
-					 core[c1_bits], core[c0_bits],
-					 sc_bit ? "True" : "False",
-					 mcmd[cmd_bits], sport_bits);
-			}
-
-			ocause = (cm_other & CM_GCR_ERROR_MULT_ERR2ND_MSK) >>
-				 CM_GCR_ERROR_MULT_ERR2ND_SHF;
-
-			pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error,
-			       causes[cause], buf);
-			pr_err("CM_ADDR =%08lx\n", cm_addr);
-			pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
-
-			/* reprime cause register */
-			write_gcr_error_cause(0);
-		}
-	}
+	mips_cm_error_report();
 
 	return retval;
 }
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index b769657..dadeb83 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -179,31 +179,6 @@
 	}
 }
 
-unsigned platform_maar_init(unsigned num_pairs)
-{
-	phys_addr_t mem_end = (physical_memsize & ~0xffffull) - 1;
-	struct maar_config cfg[] = {
-		/* DRAM preceding I/O */
-		{ 0x00000000, 0x0fffffff, MIPS_MAAR_S },
-
-		/* DRAM following I/O */
-		{ 0x20000000, mem_end, MIPS_MAAR_S },
-
-		/* DRAM alias in upper half of physical */
-		{ 0x80000000, 0x80000000 + mem_end, MIPS_MAAR_S },
-	};
-	unsigned i, num_cfg = ARRAY_SIZE(cfg);
-
-	/* If DRAM fits before I/O, drop the region following it */
-	if (physical_memsize <= 0x10000000) {
-		num_cfg--;
-		for (i = 1; i < num_cfg; i++)
-			cfg[i] = cfg[i + 1];
-	}
-
-	return maar_config(cfg, num_cfg, num_pairs);
-}
-
 phys_addr_t mips_cdmm_phys_base(void)
 {
 	/* This address is "typically unused" */
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 5f5d18b..3660dc6 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -87,7 +87,7 @@
 static void xlp_pic_enable(struct irq_data *d)
 {
 	unsigned long flags;
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	BUG_ON(!pd);
 	spin_lock_irqsave(&pd->node->piclock, flags);
@@ -97,7 +97,7 @@
 
 static void xlp_pic_disable(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 
 	BUG_ON(!pd);
@@ -108,7 +108,7 @@
 
 static void xlp_pic_mask_ack(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	clear_c0_eimr(pd->picirq);
 	ack_c0_eirr(pd->picirq);
@@ -116,7 +116,7 @@
 
 static void xlp_pic_unmask(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	BUG_ON(!pd);
 
@@ -193,7 +193,7 @@
 	pic_data->picirq = picirq;
 	pic_data->node = nlm_get_node(node);
 	irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
-	irq_set_handler_data(xirq, pic_data);
+	irq_set_chip_data(xirq, pic_data);
 }
 
 void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
@@ -202,7 +202,7 @@
 	int xirq;
 
 	xirq = nlm_irq_to_xirq(node, irq);
-	pic_data = irq_get_handler_data(xirq);
+	pic_data = irq_get_chip_data(xirq);
 	if (WARN_ON(!pic_data))
 		return;
 	pic_data->extra_ack = xack;
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index f5fff22..0136b4f 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -82,8 +82,9 @@
 }
 
 /* IRQ_IPI_SMP_FUNCTION Handler */
-void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
+void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
 	ack_c0_eirr(irq);
 	generic_smp_call_function_interrupt();
@@ -91,8 +92,9 @@
 }
 
 /* IRQ_IPI_SMP_RESCHEDULE  handler */
-void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
+void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
 	ack_c0_eirr(irq);
 	scheduler_ipi();
diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
index 7b066a4..c11b9c7 100644
--- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
@@ -152,7 +152,7 @@
 	0xC9, 0xC9, 0x07, 0x07, 0x18, 0x18, 0x01, 0x01, 0x22, 0x00
 };
 
-/* SATA PHY config for register block 2 0x0x8065 .. 0x0x80A4 */
+/* SATA PHY config for register block 2 0x8065 .. 0x80A4 */
 static const u8 sata_phy_config2[]  = {
 	0xAA, 0x00, 0x4C, 0xC9, 0xC9, 0x07, 0x07, 0x18,
 	0x18, 0x05, 0x0C, 0x10, 0x00, 0x10, 0x00, 0xFF,
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index a8f4144..80ec929 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -91,6 +91,8 @@
 		return 134;
 	case PIC_SATA_IRQ:
 		return 143;
+	case PIC_NAND_IRQ:
+		return 151;
 	case PIC_SPI_IRQ:
 		return 152;
 	case PIC_MMC_IRQ:
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 81f5895..3c9ec3d 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -91,6 +91,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
+	case CPU_I6400:
 	case CPU_M5150:
 	case CPU_LOONGSON1:
 	case CPU_SB1:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 6a6e2cc..8f988a6 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -392,6 +392,10 @@
 		op_model_mipsxx_ops.cpu_type = "mips/P5600";
 		break;
 
+	case CPU_I6400:
+		op_model_mipsxx_ops.cpu_type = "mips/I6400";
+		break;
+
 	case CPU_M5150:
 		op_model_mipsxx_ops.cpu_type = "mips/M5150";
 		break;
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index 3407495..bb14335f 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -131,7 +131,7 @@
  */
 static void xlp_msi_enable(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 	int vec;
 
@@ -148,7 +148,7 @@
 
 static void xlp_msi_disable(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 	int vec;
 
@@ -165,7 +165,7 @@
 
 static void xlp_msi_mask_ack(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	int link, vec;
 
 	link = nlm_irq_msilink(d->irq);
@@ -211,7 +211,7 @@
 	msixvec = nlm_irq_msixvec(d->irq);
 	link = nlm_irq_msixlink(msixvec);
 	pci_msi_mask_irq(d);
-	md = irq_data_get_irq_handler_data(d);
+	md = irq_data_get_irq_chip_data(d);
 
 	/* Ack MSI on bridge */
 	if (cpu_is_xlp9xx()) {
@@ -302,7 +302,7 @@
 	/* Get MSI data for the link */
 	lirq = PIC_PCIE_LINK_MSI_IRQ(link);
 	xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
-	md = irq_get_handler_data(xirq);
+	md = irq_get_chip_data(xirq);
 	msiaddr = MSI_LINK_ADDR(node, link);
 
 	spin_lock_irqsave(&md->msi_lock, flags);
@@ -409,7 +409,7 @@
 	/* Get MSI data for the link */
 	lirq = PIC_PCIE_MSIX_IRQ(link);
 	xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
-	md = irq_get_handler_data(xirq);
+	md = irq_get_chip_data(xirq);
 	msixaddr = MSIX_LINK_ADDR(node, link);
 
 	spin_lock_irqsave(&md->msi_lock, flags);
@@ -485,7 +485,7 @@
 	irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
 	for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) {
 		irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq);
-		irq_set_handler_data(i, md);
+		irq_set_chip_data(i, md);
 	}
 
 	for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) {
@@ -508,7 +508,7 @@
 		/* Initialize MSI-X extended irq space for the link  */
 		irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
 		irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
-		irq_set_handler_data(irq, md);
+		irq_set_chip_data(irq, md);
 	}
 }
 
@@ -520,7 +520,7 @@
 
 	link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
 	irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
-	md = irq_get_handler_data(irqbase);
+	md = irq_get_chip_data(irqbase);
 	if (cpu_is_xlp9xx())
 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
 						md->msi_enabled_mask;
@@ -550,7 +550,7 @@
 
 	link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
 	irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
-	md = irq_get_handler_data(irqbase);
+	md = irq_get_chip_data(irqbase);
 	if (cpu_is_xlp9xx())
 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
 	else
diff --git a/arch/mips/pci/ops-emma2rh.c b/arch/mips/pci/ops-emma2rh.c
index 710aef5..2dc97c4 100644
--- a/arch/mips/pci/ops-emma2rh.c
+++ b/arch/mips/pci/ops-emma2rh.c
@@ -25,7 +25,6 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
-#include <asm/debug.h>
 
 #include <asm/emma/emma2rh.h>
 
@@ -40,10 +39,9 @@
 static int check_args(struct pci_bus *bus, u32 devfn, u32 * bus_num)
 {
 	/* check if the bus is top-level */
-	if (bus->parent != NULL) {
+	if (bus->parent != NULL)
 		*bus_num = bus->number;
-		db_assert(bus_num != NULL);
-	} else
+	else
 		*bus_num = 0;
 
 	if (*bus_num == 0) {
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 283157f..ad35a5e 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -312,8 +312,8 @@
 		irq_set_chip_data(i, apc);
 	}
 
-	irq_set_handler_data(apc->irq, apc);
-	irq_set_chained_handler(apc->irq, ar71xx_pci_irq_handler);
+	irq_set_chained_handler_and_data(apc->irq, ar71xx_pci_irq_handler,
+					 apc);
 }
 
 static void ar71xx_pci_reset(void)
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 0af362b..907d11d 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -321,8 +321,8 @@
 		irq_set_chip_data(i, apc);
 	}
 
-	irq_set_handler_data(apc->irq, apc);
-	irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler);
+	irq_set_chained_handler_and_data(apc->irq, ar724x_pci_irq_handler,
+					 apc);
 }
 
 static int ar724x_pci_probe(struct platform_device *pdev)
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index c5347d9..6a15dbd 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -20,7 +20,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 
-#include <asm/gpio.h>
 #include <asm/addrspace.h>
 
 #include <lantiq_soc.h>
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 80fafe6..53c8efa 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -129,7 +129,7 @@
 	rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
 }
 
-static void rt3883_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
 	struct rt3883_pci_controller *rpc;
 	u32 pending;
@@ -145,7 +145,7 @@
 	}
 
 	while (pending) {
-		unsigned bit = __ffs(pending);
+		unsigned irq, bit = __ffs(pending);
 
 		irq = irq_find_mapping(rpc->irq_domain, bit);
 		generic_handle_irq(irq);
@@ -225,8 +225,7 @@
 		return -ENODEV;
 	}
 
-	irq_set_handler_data(irq, rpc);
-	irq_set_chained_handler(irq, rt3883_pci_irq_handler);
+	irq_set_chained_handler_and_data(irq, rt3883_pci_irq_handler, rpc);
 
 	return 0;
 }
diff --git a/arch/mips/pistachio/Kconfig b/arch/mips/pistachio/Kconfig
new file mode 100644
index 0000000..97731ea
--- /dev/null
+++ b/arch/mips/pistachio/Kconfig
@@ -0,0 +1,13 @@
+config PISTACHIO_GPTIMER_CLKSRC
+	bool "Enable General Purpose Timer based clocksource"
+	depends on MACH_PISTACHIO
+	select CLKSRC_PISTACHIO
+	select MIPS_EXTERNAL_TIMER
+	help
+	  This option enables a clocksource driver based on a Pistachio
+	  SoC General Purpose external timer.
+
+	  If you want to enable the CPUFreq, you need to enable
+	  this option.
+
+	  If you don't want to enable CPUFreq, you can leave this disabled.
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
index 1207ec4..8b9cf64 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -88,7 +88,8 @@
 	* Make sure we have IRQ affinity.  It may have changed while
 	* we were processing the IRQ.
 	*/
-	if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
+	if (!cpumask_test_cpu(smp_processor_id(),
+			      irq_data_get_affinity_mask(d)))
 		return;
 #endif
 
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 24bf057..a8e70a9 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -36,8 +36,8 @@
 	int freq_scale;
 };
 
-static void systick_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt);
+static int systick_set_oneshot(struct clock_event_device *evt);
+static int systick_shutdown(struct clock_event_device *evt);
 
 static int systick_next_event(unsigned long delta,
 				struct clock_event_device *evt)
@@ -73,11 +73,12 @@
 		 * cevt-r4k uses 300, make sure systick
 		 * gets used if available
 		 */
-		.rating		= 310,
-		.features	= CLOCK_EVT_FEAT_ONESHOT,
-		.set_next_event	= systick_next_event,
-		.set_mode	= systick_set_clock_mode,
-		.event_handler	= systick_event_handler,
+		.rating			= 310,
+		.features		= CLOCK_EVT_FEAT_ONESHOT,
+		.set_next_event		= systick_next_event,
+		.set_state_shutdown	= systick_shutdown,
+		.set_state_oneshot	= systick_set_oneshot,
+		.event_handler		= systick_event_handler,
 	},
 };
 
@@ -87,33 +88,33 @@
 	.dev_id = &systick.dev,
 };
 
-static void systick_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int systick_shutdown(struct clock_event_device *evt)
 {
 	struct systick_device *sdev;
 
 	sdev = container_of(evt, struct systick_device, dev);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		if (!sdev->irq_requested)
-			setup_irq(systick.dev.irq, &systick_irqaction);
-		sdev->irq_requested = 1;
-		iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
-				systick.membase + SYSTICK_CONFIG);
-		break;
+	if (sdev->irq_requested)
+		free_irq(systick.dev.irq, &systick_irqaction);
+	sdev->irq_requested = 0;
+	iowrite32(0, systick.membase + SYSTICK_CONFIG);
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		if (sdev->irq_requested)
-			free_irq(systick.dev.irq, &systick_irqaction);
-		sdev->irq_requested = 0;
-		iowrite32(0, systick.membase + SYSTICK_CONFIG);
-		break;
+	return 0;
+}
 
-	default:
-		pr_err("%s: Unhandeled mips clock_mode\n", systick.dev.name);
-		break;
-	}
+static int systick_set_oneshot(struct clock_event_device *evt)
+{
+	struct systick_device *sdev;
+
+	sdev = container_of(evt, struct systick_device, dev);
+
+	if (!sdev->irq_requested)
+		setup_irq(systick.dev.irq, &systick_irqaction);
+	sdev->irq_requested = 1;
+	iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
+		  systick.membase + SYSTICK_CONFIG);
+
+	return 0;
 }
 
 static void __init ralink_systick_init(struct device_node *np)
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index e31e8cd..9bd7a2d 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -23,6 +23,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 #include <linux/serial_8250.h>
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 5aa3df8..650d5d3 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -140,6 +140,11 @@
 	return 0;
 }
 
+static int rb532_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+	return 8 + 4 * 32 + gpio;
+}
+
 static struct rb532_gpio_chip rb532_gpio_chip[] = {
 	[0] = {
 		.chip = {
@@ -148,6 +153,7 @@
 			.direction_output	= rb532_gpio_direction_output,
 			.get			= rb532_gpio_get,
 			.set			= rb532_gpio_set,
+			.to_irq			= rb532_gpio_to_irq,
 			.base			= 0,
 			.ngpio			= 32,
 		},
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index a6d10f6..42d6cb9 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -64,12 +64,6 @@
 	return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
 }
 
-static void rt_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
@@ -124,7 +118,6 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= rt_next_event;
-	cd->set_mode		= rt_set_mode;
 	clockevents_register_device(cd);
 }
 
diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c
index 41a1d22..a4e55999 100644
--- a/arch/mips/sibyte/common/bus_watcher.c
+++ b/arch/mips/sibyte/common/bus_watcher.c
@@ -250,4 +250,4 @@
 	return 0;
 }
 
-__initcall(sibyte_bus_watcher);
+device_initcall(sibyte_bus_watcher);
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index cf8ec56..fb4b352 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -14,44 +14,33 @@
 #define SNI_COUNTER2_DIV	64
 #define SNI_COUNTER0_DIV	((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
 
-static void a20r_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int a20r_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  0) = SNI_COUNTER0_DIV;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  0) = SNI_COUNTER0_DIV >> 8;
-		wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8;
+	wmb();
 
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  8) = SNI_COUNTER2_DIV;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  8) = SNI_COUNTER2_DIV >> 8;
-		wmb();
-
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8;
+	wmb();
+	return 0;
 }
 
 static struct clock_event_device a20r_clockevent_device = {
-	.name		= "a20r-timer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
+	.name			= "a20r-timer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
 
 	/* .mult, .shift, .max_delta_ns and .min_delta_ns left uninitialized */
 
-	.rating		= 300,
-	.irq		= SNI_A20R_IRQ_TIMER,
-	.set_mode	= a20r_set_mode,
+	.rating			= 300,
+	.irq			= SNI_A20R_IRQ_TIMER,
+	.set_state_periodic	= a20r_set_periodic,
 };
 
 static irqreturn_t a20r_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 2791b86..9d9962a 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -117,22 +117,6 @@
 }
 EXPORT_SYMBOL(clk_put);
 
-/* GPIO support */
-
-#ifdef CONFIG_GPIOLIB
-int gpio_to_irq(unsigned gpio)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
-#endif
-
 #define BOARD_VEC(board)	extern struct txx9_board_vec board;
 #include <asm/txx9/boards.h>
 #undef BOARD_VEC
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 5be655e..375e591 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -89,6 +89,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -127,73 +131,6 @@
 #define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
 
-/**
- * atomic_clear_mask - Atomically clear bits in memory
- * @mask: Mask of the bits to be cleared
- * @v: pointer to word in memory
- *
- * Atomically clears the bits set in mask from the memory word specified.
- */
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-	int status;
-
-	asm volatile(
-		"1:	mov	%3,(_AAR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"
-		"	and	%4,%0		\n"
-		"	mov	%0,(_ADR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"	/* flush */
-		"	mov	(_ASR,%2),%0	\n"
-		"	or	%0,%0		\n"
-		"	bne	1b		\n"
-		: "=&r"(status), "=m"(*addr)
-		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
-		: "memory", "cc");
-#else
-	unsigned long flags;
-
-	mask = ~mask;
-	flags = arch_local_cli_save();
-	*addr &= mask;
-	arch_local_irq_restore(flags);
-#endif
-}
-
-/**
- * atomic_set_mask - Atomically set bits in memory
- * @mask: Mask of the bits to be set
- * @v: pointer to word in memory
- *
- * Atomically sets the bits set in mask from the memory word specified.
- */
-static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-	int status;
-
-	asm volatile(
-		"1:	mov	%3,(_AAR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"
-		"	or	%4,%0		\n"
-		"	mov	%0,(_ADR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"	/* flush */
-		"	mov	(_ASR,%2),%0	\n"
-		"	or	%0,%0		\n"
-		"	bne	1b		\n"
-		: "=&r"(status), "=m"(*addr)
-		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
-		: "memory", "cc");
-#else
-	unsigned long flags;
-
-	flags = arch_local_cli_save();
-	*addr |= mask;
-	arch_local_irq_restore(flags);
-#endif
-}
-
 #endif /* __KERNEL__ */
 #endif /* CONFIG_SMP */
 #endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index e5d0ef7..9a39ea9 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@
 	flush_mm = mm;
 	flush_va = va;
 #if NR_CPUS <= BITS_PER_LONG
-	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+	atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
 #else
 #error Not supported.
 #endif
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 269c23d..1a8f6f95 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -242,7 +242,6 @@
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
 CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 CONFIG_LATENCYTOP=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 33b148f..0ffb08f 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -295,7 +295,6 @@
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_RCU_CPU_STALL_INFO=y
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 226f8ca9..2536965 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -126,6 +126,10 @@
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
 
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -185,6 +189,9 @@
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 8cd0abf..1a16f1d 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -137,6 +137,8 @@
 	return __ioremap(offset, size, _PAGE_NO_CACHE);
 }
 #define ioremap_nocache(off, sz)	ioremap((off), (sz))
+#define ioremap_wc			ioremap_nocache
+#define ioremap_uc			ioremap_nocache
 
 extern void iounmap(const volatile void __iomem *addr);
 
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 413ec3c..ba5e1c7 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -507,8 +507,8 @@
 	struct pt_regs *old_regs;
 	unsigned long eirr_val;
 	int irq, cpu = smp_processor_id();
-#ifdef CONFIG_SMP
 	struct irq_data *irq_data;
+#ifdef CONFIG_SMP
 	cpumask_t dest;
 #endif
 
@@ -521,8 +521,13 @@
 		goto set_out;
 	irq = eirr_to_irq(eirr_val);
 
-#ifdef CONFIG_SMP
 	irq_data = irq_get_irq_data(irq);
+
+	/* Filter out spurious interrupts, mostly from serial port at bootup */
+	if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
+		goto set_out;
+
+#ifdef CONFIG_SMP
 	cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
 	if (irqd_is_per_cpu(irq_data) &&
 	    !cpumask_test_cpu(smp_processor_id(), &dest)) {
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 7ef22e3..0b8d26d 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -821,7 +821,7 @@
 	/* 64bit CAS */
 #ifdef CONFIG_64BIT
 19:	ldd,ma	0(%sr3,%r26), %r29
-	sub,=	%r29, %r25, %r0
+	sub,*=	%r29, %r25, %r0
 	b,n	cas2_end
 20:	std,ma	%r24, 0(%sr3,%r26)
 	copy	%r0, %r28
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 70e105d..400acac0 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -202,7 +202,6 @@
 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-#ifdef CONFIG_SMP
 int update_cr16_clocksource(void)
 {
 	/* since the cr16 cycle counters are not synchronized across CPUs,
@@ -214,12 +213,6 @@
 
 	return 0;
 }
-#else
-int update_cr16_clocksource(void)
-{
-	return 0; /* no change */
-}
-#endif /*CONFIG_SMP*/
 
 void __init start_cpu_itimer(void)
 {
@@ -231,20 +224,14 @@
 	per_cpu(cpu_data, cpu).it_value = next_tick;
 }
 
-static struct platform_device rtc_generic_dev = {
-	.name = "rtc-generic",
-	.id = -1,
-};
-
 static int __init rtc_init(void)
 {
-	if (platform_device_register(&rtc_generic_dev) < 0)
-		printk(KERN_ERR "unable to register rtc device...\n");
+	struct platform_device *pdev;
 
-	/* not necessarily an error */
-	return 0;
+	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+	return PTR_ERR_OR_ZERO(pdev);
 }
-module_init(rtc_init);
+device_initcall(rtc_init);
 
 void read_persistent_clock(struct timespec *ts)
 {
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 15503ad..a762864 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -207,7 +207,7 @@
 	int fault;
 	unsigned int flags;
 
-	if (pagefault_disabled())
+	if (faulthandler_disabled())
 		goto no_context;
 
 	tsk = current;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5ef2711..b447918 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -82,6 +82,9 @@
 	bool
 	default y
 
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+        bool
+
 config PPC
 	bool
 	default y
@@ -155,6 +158,8 @@
 	select HAVE_PERF_EVENTS_NMI if PPC64
 	select EDAC_SUPPORT
 	select EDAC_ATOMIC_SCRUB
+	select ARCH_HAS_DMA_SET_COHERENT_MASK
+	select HAVE_ARCH_SECCOMP_FILTER
 
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
@@ -514,11 +519,6 @@
 	def_bool y
 	depends on NEED_MULTIPLE_NODES
 
-config PPC_HAS_HASH_64K
-	bool
-	depends on PPC64
-	default n
-
 config STDBINUTILS
 	bool "Using standard binutils settings"
 	depends on 44x
@@ -560,16 +560,16 @@
 	bool "4k page size"
 
 config PPC_16K_PAGES
-	bool "16k page size" if 44x || PPC_8xx
+	bool "16k page size"
+	depends on 44x || PPC_8xx
 
 config PPC_64K_PAGES
-	bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
-	depends on !PPC_FSL_BOOK3E
-	select PPC_HAS_HASH_64K if PPC_STD_MMU_64
+	bool "64k page size"
+	depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
 
 config PPC_256K_PAGES
-	bool "256k page size" if 44x
-	depends on !STDBINUTILS
+	bool "256k page size"
+	depends on 44x && !STDBINUTILS
 	help
 	  Make the page size 256k.
 
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 05f464e..b9b4af2 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -67,7 +67,7 @@
 
 ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
 override CC	+= -mlittle-endian
-ifneq ($(COMPILER),clang)
+ifneq ($(cc-name),clang)
 override CC	+= -mno-strict-align
 endif
 override AS	+= -mlittle-endian
@@ -288,6 +288,26 @@
 pseries_le_defconfig:
 	$(call merge_into_defconfig,pseries_defconfig,le)
 
+PHONY += mpc85xx_defconfig
+mpc85xx_defconfig:
+	$(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+		85xx-32bit 85xx-hw fsl-emb-nonhw)
+
+PHONY += mpc85xx_smp_defconfig
+mpc85xx_smp_defconfig:
+	$(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+
+PHONY += corenet32_smp_defconfig
+corenet32_smp_defconfig:
+	$(call merge_into_defconfig,corenet_basic_defconfig,\
+		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+
+PHONY += corenet64_smp_defconfig
+corenet64_smp_defconfig:
+	$(call merge_into_defconfig,corenet_basic_defconfig,\
+		85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw)
+
 define archhelp
   @echo '* zImage          - Build default images selected by kernel config'
   @echo '  zImage.*        - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
@@ -333,7 +353,7 @@
 # - Require gcc 4.0 or above on 64-bit
 # - gcc-4.2.0 has issues compiling modules on 64-bit
 checkbin:
-	@if test "${COMPILER}" != "clang" \
+	@if test "$(cc-name)" != "clang" \
 	    && test "$(cc-version)" = "0304" ; then \
 		if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
 			echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
@@ -342,14 +362,14 @@
 			false; \
 		fi ; \
 	fi
-	@if test "${COMPILER}" != "clang" \
+	@if test "$(cc-name)" != "clang" \
 	    && test "$(cc-version)" -lt "0400" \
 	    && test "x${CONFIG_PPC64}" = "xy" ; then \
                 echo -n "Sorry, GCC v4.0 or above is required to build " ; \
                 echo "the 64-bit powerpc kernel." ; \
                 false ; \
         fi
-	@if test "${COMPILER}" != "clang" \
+	@if test "$(cc-name)" != "clang" \
 	    && test "$(cc-fullversion)" = "040200" \
 	    && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
 		echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index ebf2022..426bf41 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -175,7 +175,7 @@
 
 /include/ "pq3-gpio-0.dtsi"
 
-	display@10000 {
+	display: display@10000 {
 		compatible = "fsl,diu", "fsl,p1022-diu";
 		reg = <0x10000 1000>;
 		interrupts = <64 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
index 1956dea..de76ae8 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
@@ -50,6 +50,8 @@
 		pci0 = &pci0;
 		pci1 = &pci1;
 		pci2 = &pci2;
+		vga = &display;
+		display = &display;
 	};
 
 	cpus {
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 9e9f7e2..9770d02 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -484,6 +484,11 @@
 		reg	   = <0xea000 0x4000>;
 	};
 
+	scfg: global-utilities@fc000 {
+		compatible = "fsl,t1040-scfg";
+		reg = <0xfc000 0x1000>;
+	};
+
 /include/ "elo3-dma-0.dtsi"
 /include/ "elo3-dma-1.dtsi"
 /include/ "qoriq-espi-0.dtsi"
diff --git a/arch/powerpc/boot/dts/t1023rdb.dts b/arch/powerpc/boot/dts/t1023rdb.dts
index 06b090a..d3fa829 100644
--- a/arch/powerpc/boot/dts/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/t1023rdb.dts
@@ -60,7 +60,7 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "fsl,ifc-nand";
-			reg = <0x2 0x0 0x10000>;
+			reg = <0x1 0x0 0x10000>;
 		};
 	};
 
@@ -99,6 +99,17 @@
 		};
 
 		i2c@118100 {
+			current-sensor@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
+			current-sensor@41 {
+				compatible = "ti,ina220";
+				reg = <0x41>;
+				shunt-resistor = <1000>;
+			};
 		};
 	};
 
diff --git a/arch/powerpc/boot/dts/t1024rdb.dts b/arch/powerpc/boot/dts/t1024rdb.dts
index 733e723..bf05e32 100644
--- a/arch/powerpc/boot/dts/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/t1024rdb.dts
@@ -114,6 +114,12 @@
 				reg = <0x4c>;
 			};
 
+			current-sensor@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
 			eeprom@50 {
 				compatible = "atmel,24c256";
 				reg = <0x50>;
diff --git a/arch/powerpc/boot/dts/t1040d4rdb.dts b/arch/powerpc/boot/dts/t1040d4rdb.dts
new file mode 100644
index 0000000..2d1315a
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1040d4rdb.dts
@@ -0,0 +1,46 @@
+/*
+ * T1040D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xd4rdb.dtsi"
+
+/ {
+	model = "fsl,T1040D4RDB";
+	compatible = "fsl,T1040D4RDB";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+};
+
+/include/ "fsl/t1040si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t1042d4rdb.dts b/arch/powerpc/boot/dts/t1042d4rdb.dts
new file mode 100644
index 0000000..846f8c8
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1042d4rdb.dts
@@ -0,0 +1,53 @@
+/*
+ * T1042D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xd4rdb.dtsi"
+
+/ {
+	model = "fsl,T1042D4RDB";
+	compatible = "fsl,T1042D4RDB";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	ifc: localbus@ffe124000 {
+		cpld@3,0 {
+			compatible = "fsl,t1040d4rdb-cpld",
+					"fsl,deepsleep-cpld";
+		};
+	};
+};
+
+/include/ "fsl/t1040si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t104xd4rdb.dtsi b/arch/powerpc/boot/dts/t104xd4rdb.dtsi
new file mode 100644
index 0000000..491367b
--- /dev/null
+++ b/arch/powerpc/boot/dts/t104xd4rdb.dtsi
@@ -0,0 +1,205 @@
+/*
+ * T1040D4RDB/T1042D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/ {
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+		};
+		qman_fqd: qman-fqd {
+			size = <0 0x400000>;
+			alignment = <0 0x400000>;
+		};
+		qman_pfdr: qman-pfdr {
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+		};
+	};
+
+	ifc: localbus@ffe124000 {
+		reg = <0xf 0xfe124000 0 0x2000>;
+		ranges = <0 0 0xf 0xe8000000 0x08000000
+			  2 0 0xf 0xff800000 0x00010000
+			  3 0 0xf 0xffdf0000 0x00008000>;
+
+		nor@0,0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "cfi-flash";
+			reg = <0x0 0x0 0x8000000>;
+			bank-width = <2>;
+			device-width = <1>;
+		};
+
+		nand@2,0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "fsl,ifc-nand";
+			reg = <0x2 0x0 0x10000>;
+		};
+
+		cpld@3,0 {
+			compatible = "fsl,t1040d4rdb-cpld";
+			reg = <3 0 0x300>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+	};
+
+	dcsr: dcsr@f00000000 {
+		ranges = <0x00000000 0xf 0x00000000 0x01072000>;
+	};
+
+	bportals: bman-portals@ff4000000 {
+		ranges = <0x0 0xf 0xf4000000 0x2000000>;
+	};
+
+	qportals: qman-portals@ff6000000 {
+		ranges = <0x0 0xf 0xf6000000 0x2000000>;
+	};
+
+	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
+
+		spi@110000 {
+			flash@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "micron,n25q512ax3";
+				reg = <0>;
+				/* input clock */
+				spi-max-frequency = <10000000>;
+			};
+		};
+		i2c@118000 {
+			hwmon@4c {
+				compatible = "adi,adt7461";
+				reg = <0x4c>;
+			};
+
+			rtc@68 {
+				compatible = "dallas,ds1337";
+				reg = <0x68>;
+				interrupts = <0x2 0x1 0 0>;
+			};
+		};
+
+		i2c@118100 {
+			mux@77 {
+				/*
+				 * Child nodes of mux depend on which i2c
+				 * devices are connected via the mini PCI
+				 * connector slot1, the mini PCI connector
+				 * slot2, the HDMI connector, and the PEX
+				 * slot. Systems with such devices attached
+				 * should provide a wrapper .dts file that
+				 * includes this one, and adds those nodes
+				 */
+				compatible = "nxp,pca9546";
+				reg = <0x77>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+			};
+		};
+
+	};
+
+	pci0: pcie@ffe240000 {
+		reg = <0xf 0xfe240000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x0 0x0 0x10000000
+			  0x01000000 0 0x0 0xf 0xf8000000 0x0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci1: pcie@ffe250000 {
+		reg = <0xf 0xfe250000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x10000000 0 0x10000000
+			  0x01000000 0 0 0xf 0xf8010000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci2: pcie@ffe260000 {
+		reg = <0xf 0xfe260000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x10000000
+			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci3: pcie@ffe270000 {
+		reg = <0xf 0xfe270000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x30000000 0 0x10000000
+			  0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+};
diff --git a/arch/powerpc/configs/85xx-32bit.config b/arch/powerpc/configs/85xx-32bit.config
new file mode 100644
index 0000000..6b8894d
--- /dev/null
+++ b/arch/powerpc/configs/85xx-32bit.config
@@ -0,0 +1,5 @@
+CONFIG_HIGHMEM=y
+CONFIG_KEXEC=y
+CONFIG_PPC_85xx=y
+CONFIG_PROC_KCORE=y
+CONFIG_PHYS_64BIT=y
diff --git a/arch/powerpc/configs/85xx-64bit.config b/arch/powerpc/configs/85xx-64bit.config
new file mode 100644
index 0000000..4aba812
--- /dev/null
+++ b/arch/powerpc/configs/85xx-64bit.config
@@ -0,0 +1,4 @@
+CONFIG_MATH_EMULATION=y
+CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
+CONFIG_PPC64=y
+CONFIG_PPC_BOOK3E_64=y
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
new file mode 100644
index 0000000..528ff0e
--- /dev/null
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -0,0 +1,142 @@
+CONFIG_AQUANTIA_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_ATA=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_C293_PCIE=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_CICADA_PHY=y
+CONFIG_CLK_QORIQ=y
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
+CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_DMADEVICES=y
+CONFIG_E1000E=y
+CONFIG_E1000=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_MPC85XX=y
+CONFIG_EDAC=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_LEGACY=y
+CONFIG_FB_FSL_DIU=y
+CONFIG_FS_ENET=y
+CONFIG_FSL_CORENET_CF=y
+CONFIG_FSL_DMA=y
+CONFIG_FSL_HV_MANAGER=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_FSL_RIO=y
+CONFIG_FSL_XGMAC_MDIO=y
+CONFIG_GIANFAR=y
+CONFIG_GPIO_MPC8XXX=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CPM=m
+CONFIG_I2C_MPC=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C=y
+CONFIG_IGB=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_MDIO_BUS_MUX_GPIO=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND_FSL_ELBC=y
+CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_NETDEVICES=y
+CONFIG_NVRAM=y
+CONFIG_PATA_ALI=y
+CONFIG_PATA_SIL680=y
+CONFIG_PATA_VIA=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI=y
+CONFIG_PPC_EPAPR_HV_BYTECHAN=y
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+CONFIG_QE_GPIO=y
+CONFIG_QUICC_ENGINE=y
+CONFIG_RAPIDIO=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_DS1374=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_SIL=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SENSORS_INA2XX=y
+CONFIG_SENSORS_LM90=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_QE=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+CONFIG_SND_POWERPC_SOC=y
+# CONFIG_SND_PPC is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_SPI_FSL_ESPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI=y
+CONFIG_TERANETICS_PHY=y
+CONFIG_UCC_GETH=y
+CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HID=m
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VITESSE_PHY=y
diff --git a/arch/powerpc/configs/85xx-smp.config b/arch/powerpc/configs/85xx-smp.config
new file mode 100644
index 0000000..3b4d1e5
--- /dev/null
+++ b/arch/powerpc/configs/85xx-smp.config
@@ -0,0 +1,2 @@
+CONFIG_NR_CPUS=24
+CONFIG_SMP=y
diff --git a/arch/powerpc/configs/altivec.config b/arch/powerpc/configs/altivec.config
new file mode 100644
index 0000000..58a697c
--- /dev/null
+++ b/arch/powerpc/configs/altivec.config
@@ -0,0 +1 @@
+CONFIG_ALTIVEC=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
deleted file mode 100644
index 3765993..0000000
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ /dev/null
@@ -1,185 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_CORENET_GENERIC=y
-CONFIG_HIGHMEM=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_KEXEC=y
-CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_FSL_RIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_AH=y
-CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_SATA_SIL=y
-CONFIG_PATA_SIL680=y
-CONFIG_NETDEVICES=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
-CONFIG_AT803X_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_MDIO_BUS_MUX_GPIO=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_PPC_EPAPR_HV_BYTECHAN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MPC=y
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_SENSORS_LM90=y
-CONFIG_SENSORS_INA2XX=y
-CONFIG_USB_HID=m
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_EDAC_MPC85XX=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_UIO=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_FSL_HV_MANAGER=y
-CONFIG_STAGING=y
-CONFIG_FSL_CORENET_CF=y
-CONFIG_CLK_QORIQ=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_INFO=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_RCU_TRACE=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
deleted file mode 100644
index 33cd1df..0000000
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ /dev/null
@@ -1,176 +0,0 @@
-CONFIG_PPC64=y
-CONFIG_PPC_BOOK3E_64=y
-CONFIG_ALTIVEC=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=24
-CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CGROUPS=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_CORENET_GENERIC=y
-# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_FSL_RIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_ATA=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_E1000E=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_MDIO_BUS_MUX_GPIO=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_PPC_EPAPR_HV_BYTECHAN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MPC=y
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_SENSORS_LM90=y
-CONFIG_SENSORS_INA2XX=y
-CONFIG_USB_HID=m
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_FSL_HV_MANAGER=y
-CONFIG_CLK_QORIQ=y
-CONFIG_FSL_CORENET_CF=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_INFO=y
-CONFIG_FRAME_WARN=1024
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet_basic_defconfig b/arch/powerpc/configs/corenet_basic_defconfig
new file mode 100644
index 0000000..b568d46
--- /dev/null
+++ b/arch/powerpc/configs/corenet_basic_defconfig
@@ -0,0 +1 @@
+CONFIG_CORENET_GENERIC=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
new file mode 100644
index 0000000..41e4d35
--- /dev/null
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -0,0 +1,126 @@
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_AUDIT=y
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_BINFMT_MISC=m
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CRC_T10DIF=y
+CONFIG_CPUSETS=y
+CONFIG_CRAMFS=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS=y
+CONFIG_DUMMY=y
+CONFIG_EFS_FS=m
+CONFIG_EXPERT=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS=y
+CONFIG_FB=y
+CONFIG_FHANDLE=y
+CONFIG_FIXED_PHY=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_8x8=y
+CONFIG_FONTS=y
+CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAME_WARN=1024
+CONFIG_FTL=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HPFS_FS=m
+CONFIG_HUGETLBFS=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKCONFIG=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_PNP=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_SCTP=m
+CONFIG_IPV6=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_ISO9660_FS=m
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS=y
+CONFIG_JOLIET=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_MAC_PARTITION=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MSDOS_FS=m
+CONFIG_MTD_UBI=y
+CONFIG_MTD=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_NET_KEY=y
+CONFIG_NET=y
+CONFIG_NFSD=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=m
+CONFIG_NO_HZ=y
+CONFIG_NTFS_FS=y
+CONFIG_PACKET=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_QNX4FS_FS=m
+CONFIG_RCU_TRACE=y
+CONFIG_ROOT_NFS=y
+CONFIG_SYSV_FS=m
+CONFIG_SYSVIPC=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_UFS_FS=m
+CONFIG_UIO=y
+CONFIG_UNIX=y
+CONFIG_VFAT_FS=y
+CONFIG_VXFS_FS=m
+CONFIG_XFRM_STATISTICS=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_USER=y
+CONFIG_ZISOFS=y
diff --git a/arch/powerpc/configs/mpc85xx_basic_defconfig b/arch/powerpc/configs/mpc85xx_basic_defconfig
new file mode 100644
index 0000000..850bd19
--- /dev/null
+++ b/arch/powerpc/configs/mpc85xx_basic_defconfig
@@ -0,0 +1,23 @@
+CONFIG_MATH_EMULATION=y
+CONFIG_MPC8536_DS=y
+CONFIG_MPC8540_ADS=y
+CONFIG_MPC8560_ADS=y
+CONFIG_MPC85xx_CDS=y
+CONFIG_MPC85xx_DS=y
+CONFIG_MPC85xx_MDS=y
+CONFIG_MPC85xx_RDB=y
+CONFIG_KSI8560=y
+CONFIG_MVME2500=y
+CONFIG_P1010_RDB=y
+CONFIG_P1022_DS=y
+CONFIG_P1022_RDK=y
+CONFIG_P1023_RDB=y
+CONFIG_SBC8548=y
+CONFIG_SOCRATES=y
+CONFIG_STX_GP3=y
+CONFIG_TQM8540=y
+CONFIG_TQM8541=y
+CONFIG_TQM8548=y
+CONFIG_TQM8555=y
+CONFIG_TQM8560=y
+CONFIG_XES_MPC85xx=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
deleted file mode 100644
index 6ecf7bd..0000000
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ /dev/null
@@ -1,252 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_C293_PCIE=y
-CONFIG_MPC8540_ADS=y
-CONFIG_MPC8560_ADS=y
-CONFIG_MPC85xx_CDS=y
-CONFIG_MPC85xx_MDS=y
-CONFIG_MPC8536_DS=y
-CONFIG_MPC85xx_DS=y
-CONFIG_MPC85xx_RDB=y
-CONFIG_P1010_RDB=y
-CONFIG_P1022_DS=y
-CONFIG_P1022_RDK=y
-CONFIG_P1023_RDB=y
-CONFIG_SOCRATES=y
-CONFIG_KSI8560=y
-CONFIG_XES_MPC85xx=y
-CONFIG_STX_GP3=y
-CONFIG_TQM8540=y
-CONFIG_TQM8541=y
-CONFIG_TQM8548=y
-CONFIG_TQM8555=y
-CONFIG_TQM8560=y
-CONFIG_SBC8548=y
-CONFIG_MVME2500=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_HIGHMEM=y
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=12
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_PATA_ALI=y
-CONFIG_PATA_VIA=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_UCC_GETH=y
-CONFIG_GIANFAR=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
-CONFIG_IGB=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=6
-CONFIG_SERIAL_8250_RUNTIME_UARTS=6
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_QE=m
-CONFIG_NVRAM=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_SPI=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_SENSORS_LM90=y
-CONFIG_FB=y
-CONFIG_FB_FSL_DIU=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_DRIVERS is not set
-CONFIG_SND_INTEL8X0=y
-# CONFIG_SND_PPC is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_POWERPC_SOC=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_EDAC_MPC85XX=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
deleted file mode 100644
index b6c7111..0000000
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ /dev/null
@@ -1,244 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_C293_PCIE=y
-CONFIG_MPC8540_ADS=y
-CONFIG_MPC8560_ADS=y
-CONFIG_MPC85xx_CDS=y
-CONFIG_MPC85xx_MDS=y
-CONFIG_MPC8536_DS=y
-CONFIG_MPC85xx_DS=y
-CONFIG_MPC85xx_RDB=y
-CONFIG_P1010_RDB=y
-CONFIG_P1022_DS=y
-CONFIG_P1022_RDK=y
-CONFIG_P1023_RDB=y
-CONFIG_SOCRATES=y
-CONFIG_KSI8560=y
-CONFIG_XES_MPC85xx=y
-CONFIG_STX_GP3=y
-CONFIG_TQM8540=y
-CONFIG_TQM8541=y
-CONFIG_TQM8548=y
-CONFIG_TQM8555=y
-CONFIG_TQM8560=y
-CONFIG_SBC8548=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_HIGHMEM=y
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=12
-CONFIG_PCI=y
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_PATA_ALI=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_UCC_GETH=y
-CONFIG_GIANFAR=y
-CONFIG_E1000E=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_QE=m
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_SPI=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_SENSORS_LM90=y
-CONFIG_FB=y
-CONFIG_FB_FSL_DIU=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_DRIVERS is not set
-CONFIG_SND_INTEL8X0=y
-# CONFIG_SND_PPC is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_POWERPC_SOC=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index a97efc2..6bc0ee4 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -355,3 +355,6 @@
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM_BOOK3S_64=m
 CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 0d9efce..7991f37 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -190,7 +190,8 @@
 CONFIG_HVCS=m
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_IBM_BSR=m
-CONFIG_GEN_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=y
 CONFIG_RAW_DRIVER=y
 CONFIG_MAX_RAW_DEVS=1024
 CONFIG_FB=y
@@ -319,3 +320,6 @@
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM_BOOK3S_64=m
 CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 050712e..ab9f4e0 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -6,5 +6,4 @@
 generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += rwsem.h
-generic-y += trace_clock.h
 generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index 0cc6eed..85e88f7 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -7,14 +7,23 @@
 
 static inline int arch_get_random_long(unsigned long *v)
 {
-	if (ppc_md.get_random_long)
-		return ppc_md.get_random_long(v);
-
 	return 0;
 }
 
 static inline int arch_get_random_int(unsigned int *v)
 {
+	return 0;
+}
+
+static inline int arch_get_random_seed_long(unsigned long *v)
+{
+	if (ppc_md.get_random_seed)
+		return ppc_md.get_random_seed(v);
+
+	return 0;
+}
+static inline int arch_get_random_seed_int(unsigned int *v)
+{
 	unsigned long val;
 	int rc;
 
@@ -27,22 +36,13 @@
 
 static inline int arch_has_random(void)
 {
-	return !!ppc_md.get_random_long;
+	return 0;
 }
 
-static inline int arch_get_random_seed_long(unsigned long *v)
-{
-	return 0;
-}
-static inline int arch_get_random_seed_int(unsigned int *v)
-{
-	return 0;
-}
 static inline int arch_has_random_seed(void)
 {
-	return 0;
+	return !!ppc_md.get_random_seed;
 }
-
 #endif /* CONFIG_ARCH_RANDOM */
 
 #ifdef CONFIG_PPC_POWERNV
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 512d278..55f106e 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -67,6 +67,10 @@
 ATOMIC_OPS(add, add)
 ATOMIC_OPS(sub, subf)
 
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -304,6 +308,9 @@
 
 ATOMIC64_OPS(add, add)
 ATOMIC64_OPS(sub, subf)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(or, or)
+ATOMIC64_OP(xor, xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 51ccc72..0eca6ef 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -76,12 +76,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_lwsync();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_lwsync();							\
 	___p1;								\
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 30b35ff..6229e6b 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -40,7 +40,12 @@
 extern void flush_dcache_icache_page(struct page *page);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
 extern void __flush_dcache_icache_phys(unsigned long physaddr);
-#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
+#else
+static inline void __flush_dcache_icache_phys(unsigned long physaddr)
+{
+	BUG();
+}
+#endif
 
 extern void flush_dcache_range(unsigned long start, unsigned long stop);
 #ifdef CONFIG_PPC32
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 8251a3b..e8d9ef4 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -20,15 +20,6 @@
 extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-					unsigned short len,
-					unsigned short proto,
-					__wsum sum);
-
-/*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
  *
@@ -127,6 +118,34 @@
 #endif
 }
 
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+					unsigned short len,
+					unsigned short proto,
+					__wsum sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+#ifdef __powerpc64__
+	u64 res = (__force u64)csum;
+
+	res += (__force u64)addend;
+	return (__force __wsum)((u32)res + (res >> 32));
+#else
+	asm("addc %0,%0,%1;"
+	    "addze %0,%0;"
+	    : "+r" (csum) : "r" (addend));
+	return csum;
+#endif
+}
+
 #endif
 #endif /* __KERNEL__ */
 #endif
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index b142b8e..4f2df58 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -174,6 +174,13 @@
 			int _band;	/* POLL_IN, POLL_OUT, POLL_MSG */
 			int _fd;
 		} _sigpoll;
+
+		/* SIGSYS */
+		struct {
+			unsigned int _call_addr; /* calling insn */
+			int _syscall;		 /* triggering system call number */
+			unsigned int _arch;	 /* AUDIT_ARCH_* of syscall */
+		} _sigsys;
 	} _sifields;
 } compat_siginfo_t;
 
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index e9bdda8..406c2b1 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -10,6 +10,7 @@
 struct device_node;
 #ifdef CONFIG_PPC64
 struct pci_dn;
+struct iommu_table;
 #endif
 
 /*
@@ -23,13 +24,15 @@
 	struct dma_map_ops	*dma_ops;
 
 	/*
-	 * When an iommu is in use, dma_data is used as a ptr to the base of the
-	 * iommu_table.  Otherwise, it is a simple numerical offset.
+	 * These two used to be a union. However, with the hybrid ops we need
+	 * both so here we store both a DMA offset for direct mappings and
+	 * an iommu_table for remapped DMA.
 	 */
-	union {
-		dma_addr_t	dma_offset;
-		void		*iommu_table_base;
-	} dma_data;
+	dma_addr_t		dma_offset;
+
+#ifdef CONFIG_PPC64
+	struct iommu_table	*iommu_table_base;
+#endif
 
 #ifdef CONFIG_IOMMU_API
 	void			*iommu_domain;
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 9103687..710f60e 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -21,12 +21,12 @@
 #define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
 
 /* Some dma direct funcs must be visible for use in other dma_ops */
-extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag,
+extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+					 dma_addr_t *dma_handle, gfp_t flag,
+					 struct dma_attrs *attrs);
+extern void __dma_direct_free_coherent(struct device *dev, size_t size,
+				       void *vaddr, dma_addr_t dma_handle,
 				       struct dma_attrs *attrs);
-extern void dma_direct_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t dma_handle,
-				     struct dma_attrs *attrs);
 extern int dma_direct_mmap_coherent(struct device *dev,
 				    struct vm_area_struct *vma,
 				    void *cpu_addr, dma_addr_t handle,
@@ -106,7 +106,7 @@
 static inline dma_addr_t get_dma_offset(struct device *dev)
 {
 	if (dev)
-		return dev->archdata.dma_data.dma_offset;
+		return dev->archdata.dma_offset;
 
 	return PCI_DRAM_OFFSET;
 }
@@ -114,7 +114,7 @@
 static inline void set_dma_offset(struct device *dev, dma_addr_t off)
 {
 	if (dev)
-		dev->archdata.dma_data.dma_offset = off;
+		dev->archdata.dma_offset = off;
 }
 
 /* this will be removed soon */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index e366187..ef89b14 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -2,7 +2,7 @@
 #define _ASM_POWERPC_FTRACE
 
 #ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR		((long)(_mcount))
+#define MCOUNT_ADDR		((unsigned long)(_mcount))
 #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
 
 #ifdef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ca18cff..7b87bab 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -2,17 +2,17 @@
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  * Rewrite, cleanup:
  * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
@@ -131,16 +131,21 @@
 
 struct scatterlist;
 
-static inline void set_iommu_table_base(struct device *dev, void *base)
+#ifdef CONFIG_PPC64
+
+static inline void set_iommu_table_base(struct device *dev,
+					struct iommu_table *base)
 {
-	dev->archdata.dma_data.iommu_table_base = base;
+	dev->archdata.iommu_table_base = base;
 }
 
 static inline void *get_iommu_table_base(struct device *dev)
 {
-	return dev->archdata.dma_data.iommu_table_base;
+	return dev->archdata.iommu_table_base;
 }
 
+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
+
 /* Frees table for an individual device node */
 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
 
@@ -225,6 +230,20 @@
 }
 #endif /* !CONFIG_IOMMU_API */
 
+#else
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+	return NULL;
+}
+
+static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 			    struct scatterlist *sglist, int nelems,
 			    unsigned long mask,
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index efbf9a3..47e155f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -18,14 +18,29 @@
 #define JUMP_ENTRY_TYPE		stringify_in_c(FTR_ENTRY_LONG)
 #define JUMP_LABEL_NOP_SIZE	4
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
 		 "nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
 		 ".popsection \n\t"
-		 : :  "i" (key) : : l_yes);
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "b %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 952579f..cab6753 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -249,7 +249,7 @@
 #endif
 
 #ifdef CONFIG_ARCH_RANDOM
-	int (*get_random_long)(unsigned long *v);
+	int (*get_random_seed)(unsigned long *v);
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 64dc9f5..8374afe 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -154,7 +154,10 @@
 #define OPAL_FLASH_WRITE			111
 #define OPAL_FLASH_ERASE			112
 #define OPAL_PRD_MSG				113
-#define OPAL_LAST				113
+#define OPAL_LEDS_GET_INDICATOR			114
+#define OPAL_LEDS_SET_INDICATOR			115
+#define OPAL_CEC_REBOOT2			116
+#define OPAL_LAST				116
 
 /* Device tree flags */
 
@@ -340,6 +343,18 @@
 	OPAL_ASSERT_RESET   = 1
 };
 
+enum OpalSlotLedType {
+	OPAL_SLOT_LED_TYPE_ID = 0,	/* IDENTIFY LED */
+	OPAL_SLOT_LED_TYPE_FAULT = 1,	/* FAULT LED */
+	OPAL_SLOT_LED_TYPE_ATTN = 2,	/* System Attention LED */
+	OPAL_SLOT_LED_TYPE_MAX = 3
+};
+
+enum OpalSlotLedState {
+	OPAL_SLOT_LED_STATE_OFF = 0,	/* LED is OFF */
+	OPAL_SLOT_LED_STATE_ON = 1	/* LED is ON */
+};
+
 /*
  * Address cycle types for LPC accesses. These also correspond
  * to the content of the first cell of the "reg" property for
@@ -438,6 +453,7 @@
 /* HMI interrupt event */
 enum OpalHMI_Version {
 	OpalHMIEvt_V1 = 1,
+	OpalHMIEvt_V2 = 2,
 };
 
 enum OpalHMI_Severity {
@@ -468,6 +484,49 @@
 	OpalHMI_ERROR_CAPP_RECOVERY,
 };
 
+enum OpalHMI_XstopType {
+	CHECKSTOP_TYPE_UNKNOWN	=	0,
+	CHECKSTOP_TYPE_CORE	=	1,
+	CHECKSTOP_TYPE_NX	=	2,
+};
+
+enum OpalHMI_CoreXstopReason {
+	CORE_CHECKSTOP_IFU_REGFILE		= 0x00000001,
+	CORE_CHECKSTOP_IFU_LOGIC		= 0x00000002,
+	CORE_CHECKSTOP_PC_DURING_RECOV		= 0x00000004,
+	CORE_CHECKSTOP_ISU_REGFILE		= 0x00000008,
+	CORE_CHECKSTOP_ISU_LOGIC		= 0x00000010,
+	CORE_CHECKSTOP_FXU_LOGIC		= 0x00000020,
+	CORE_CHECKSTOP_VSU_LOGIC		= 0x00000040,
+	CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE	= 0x00000080,
+	CORE_CHECKSTOP_LSU_REGFILE		= 0x00000100,
+	CORE_CHECKSTOP_PC_FWD_PROGRESS		= 0x00000200,
+	CORE_CHECKSTOP_LSU_LOGIC		= 0x00000400,
+	CORE_CHECKSTOP_PC_LOGIC			= 0x00000800,
+	CORE_CHECKSTOP_PC_HYP_RESOURCE		= 0x00001000,
+	CORE_CHECKSTOP_PC_HANG_RECOV_FAILED	= 0x00002000,
+	CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED	= 0x00004000,
+	CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ	= 0x00008000,
+	CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ	= 0x00010000,
+};
+
+enum OpalHMI_NestAccelXstopReason {
+	NX_CHECKSTOP_SHM_INVAL_STATE_ERR	= 0x00000001,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1	= 0x00000002,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2	= 0x00000004,
+	NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR	= 0x00000008,
+	NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR	= 0x00000010,
+	NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR	= 0x00000020,
+	NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR	= 0x00000040,
+	NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR	= 0x00000080,
+	NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR	= 0x00000100,
+	NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR	= 0x00000200,
+	NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR	= 0x00000400,
+	NX_CHECKSTOP_DMA_CRB_UE			= 0x00000800,
+	NX_CHECKSTOP_DMA_CRB_SUE		= 0x00001000,
+	NX_CHECKSTOP_PBI_ISN_UE			= 0x00002000,
+};
+
 struct OpalHMIEvent {
 	uint8_t		version;	/* 0x00 */
 	uint8_t		severity;	/* 0x01 */
@@ -478,6 +537,23 @@
 	__be64		hmer;
 	/* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
 	__be64		tfmr;
+
+	/* version 2 and later */
+	union {
+		/*
+		 * checkstop info (Core/NX).
+		 * Valid for OpalHMI_ERROR_MALFUNC_ALERT.
+		 */
+		struct {
+			uint8_t	xstop_type;	/* enum OpalHMI_XstopType */
+			uint8_t reserved_1[3];
+			__be32  xstop_reason;
+			union {
+				__be32 pir;	/* for CHECKSTOP_TYPE_CORE */
+				__be32 chip_id;	/* for CHECKSTOP_TYPE_NX */
+			} u;
+		} xstop_error;
+	} u;
 };
 
 enum {
@@ -768,6 +844,52 @@
 	__be64 buffer_ra;		/* Buffer real address */
 };
 
+/*
+ * EPOW status sharing (OPAL and the host)
+ *
+ * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
+ * with individual elements being 16 bits wide to fetch the system
+ * wide EPOW status. Each element in the buffer will contain the
+ * EPOW status in it's bit representation for a particular EPOW sub
+ * class as defiend here. So multiple detailed EPOW status bits
+ * specific for any sub class can be represented in a single buffer
+ * element as it's bit representation.
+ */
+
+/* System EPOW type */
+enum OpalSysEpow {
+	OPAL_SYSEPOW_POWER	= 0,	/* Power EPOW */
+	OPAL_SYSEPOW_TEMP	= 1,	/* Temperature EPOW */
+	OPAL_SYSEPOW_COOLING	= 2,	/* Cooling EPOW */
+	OPAL_SYSEPOW_MAX	= 3,	/* Max EPOW categories */
+};
+
+/* Power EPOW */
+enum OpalSysPower {
+	OPAL_SYSPOWER_UPS	= 0x0001, /* System on UPS power */
+	OPAL_SYSPOWER_CHNG	= 0x0002, /* System power config change */
+	OPAL_SYSPOWER_FAIL	= 0x0004, /* System impending power failure */
+	OPAL_SYSPOWER_INCL	= 0x0008, /* System incomplete power */
+};
+
+/* Temperature EPOW */
+enum OpalSysTemp {
+	OPAL_SYSTEMP_AMB	= 0x0001, /* System over ambient temperature */
+	OPAL_SYSTEMP_INT	= 0x0002, /* System over internal temperature */
+	OPAL_SYSTEMP_HMD	= 0x0004, /* System over ambient humidity */
+};
+
+/* Cooling EPOW */
+enum OpalSysCooling {
+	OPAL_SYSCOOL_INSF	= 0x0001, /* System insufficient cooling */
+};
+
+/* Argument to OPAL_CEC_REBOOT2() */
+enum {
+	OPAL_REBOOT_NORMAL		= 0,
+	OPAL_REBOOT_PLATFORM_ERROR	= 1,
+};
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 958e941..8001159 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -44,6 +44,7 @@
 		       uint32_t hour_min);
 int64_t opal_cec_power_down(uint64_t request);
 int64_t opal_cec_reboot(void);
+int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag);
 int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
@@ -141,7 +142,8 @@
 int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
-int64_t opal_get_epow_status(__be64 *status);
+int64_t opal_get_epow_status(__be16 *epow_status, __be16 *num_epow_classes);
+int64_t opal_get_dpo_status(__be64 *dpo_timeout);
 int64_t opal_set_system_attention_led(uint8_t led_action);
 int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
 			    __be16 *pci_error_type, __be16 *severity);
@@ -195,6 +197,10 @@
 int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
 			 struct opal_i2c_request *oreq);
 int64_t opal_prd_msg(struct opal_prd_msg *msg);
+int64_t opal_leds_get_ind(char *loc_code, __be64 *led_mask,
+			  __be64 *led_value, __be64 *max_led_type);
+int64_t opal_leds_set_ind(uint64_t token, char *loc_code, const u64 led_mask,
+			  const u64 led_value, __be64 *max_led_type);
 
 int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
 		uint64_t size, uint64_t token);
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 712add5..37fc535 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -42,6 +42,7 @@
 #endif
 
 	int             (*dma_set_mask)(struct pci_dev *dev, u64 dma_mask);
+	u64		(*dma_get_required_mask)(struct pci_dev *dev);
 
 	void		(*shutdown)(struct pci_controller *);
 };
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 3bb7488..fa1dfb7 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -134,11 +134,11 @@
 
 #define pte_iterate_hashed_end() } while(0)
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
-#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
-#else
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
 #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
-#endif
 
 #endif /* __real_pte */
 
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 11a3863..0717693 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -169,6 +169,17 @@
 	 * cases, and 32-bit non-hash with 32-bit PTEs.
 	 */
 	*ptep = pte;
+
+#ifdef CONFIG_PPC_BOOK3E_64
+	/*
+	 * With hardware tablewalk, a sync is needed to ensure that
+	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
+	 * mappings, we can't tolerate spurious faults, so make sure
+	 * the new PTE will be seen the first time.
+	 */
+	if (is_kernel_addr(addr))
+		mb();
+#endif
 #endif
 }
 
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 4122a86..ca0c5bf 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -61,6 +61,7 @@
 int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
 void eeh_pe_state_mark(struct eeh_pe *pe, int state);
 void eeh_pe_state_clear(struct eeh_pe *pe, int state);
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
 
 void eeh_sysfs_add_device(struct pci_dev *pdev);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 28ded5d..5afea36 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -264,7 +264,6 @@
 	u64		tm_tfhar;	/* Transaction fail handler addr */
 	u64		tm_texasr;	/* Transaction exception & summary */
 	u64		tm_tfiar;	/* Transaction fail instr address reg */
-	unsigned long	tm_orig_msr;	/* Thread's MSR on ctx switch */
 	struct pt_regs	ckpt_regs;	/* Checkpointed registers */
 
 	unsigned long	tm_tar;
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index b7c8d07..71537a3 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -109,7 +109,8 @@
  * the processor might need it for DMA coherency.
  */
 #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \
+	defined(CONFIG_PPC_E500MC)
 #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
 #else
 #define _PAGE_BASE	(_PAGE_BASE_NC)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index af56b5c..aa1cc5f0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1193,8 +1193,7 @@
 #ifdef CONFIG_PPC_BOOK3S_64
 #define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \
 				     : : "r" (v) : "memory")
-#define mtmsrd(v)	__mtmsrd((v), 0)
-#define mtmsr(v)	mtmsrd(v)
+#define mtmsr(v)	__mtmsrd((v), 0)
 #else
 #define mtmsr(v)	asm volatile("mtmsr %0" : \
 				     : "r" ((unsigned long)(v)) \
@@ -1281,6 +1280,15 @@
 
 extern void ppc_save_regs(struct pt_regs *regs);
 
+static inline void update_power8_hid0(unsigned long hid0)
+{
+	/*
+	 *  The HID0 update on Power8 should at the very least be
+	 *  preceded by a a SYNC instruction followed by an ISYNC
+	 *  instruction
+	 */
+	asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 7a4ede1..b77ef36 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -343,6 +343,7 @@
 extern void rtas_halt(void);
 extern void rtas_os_term(char *str);
 extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
 extern int rtas_get_power_level(int powerdomain, int *level);
 extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
 extern bool rtas_indicator_present(int token, int *maxindex);
diff --git a/arch/powerpc/include/asm/spu_csa.h b/arch/powerpc/include/asm/spu_csa.h
index a40fd49..51f80b4 100644
--- a/arch/powerpc/include/asm/spu_csa.h
+++ b/arch/powerpc/include/asm/spu_csa.h
@@ -241,12 +241,6 @@
  */
 struct spu_state {
 	struct spu_lscsa *lscsa;
-#ifdef CONFIG_SPU_FS_64K_LS
-	int		use_big_pages;
-	/* One struct page per 64k page */
-#define SPU_LSCSA_NUM_BIG_PAGES	(sizeof(struct spu_lscsa) / 0x10000)
-	struct page	*lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
-#endif
 	struct spu_problem_collapsed prob;
 	struct spu_priv1_collapsed priv1;
 	struct spu_priv2_collapsed priv2;
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index ff21b7a..ab9f3f0 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -22,10 +22,15 @@
 extern const unsigned long sys_call_table[];
 #endif /* CONFIG_FTRACE_SYSCALLS */
 
-static inline long syscall_get_nr(struct task_struct *task,
-				  struct pt_regs *regs)
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
 {
-	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L;
+	/*
+	 * Note that we are returning an int here. That means 0xffffffff, ie.
+	 * 32-bit negative 1, will be interpreted as -1 on a 64-bit kernel.
+	 * This is important for seccomp so that compat tasks can set r0 = -1
+	 * to reject the syscall.
+	 */
+	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;
 }
 
 static inline void syscall_rollback(struct task_struct *task,
@@ -34,12 +39,6 @@
 	regs->gpr[3] = regs->orig_gpr3;
 }
 
-static inline long syscall_get_error(struct task_struct *task,
-				     struct pt_regs *regs)
-{
-	return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
-}
-
 static inline long syscall_get_return_value(struct task_struct *task,
 					    struct pt_regs *regs)
 {
@@ -50,9 +49,15 @@
 					    struct pt_regs *regs,
 					    int error, long val)
 {
+	/*
+	 * In the general case it's not obvious that we must deal with CCR
+	 * here, as the syscall exit path will also do that for us. However
+	 * there are some places, eg. the signal code, which check ccr to
+	 * decide if the value in r3 is actually an error.
+	 */
 	if (error) {
 		regs->ccr |= 0x10000000L;
-		regs->gpr[3] = -error;
+		regs->gpr[3] = error;
 	} else {
 		regs->ccr &= ~0x10000000L;
 		regs->gpr[3] = val;
@@ -64,19 +69,22 @@
 					 unsigned int i, unsigned int n,
 					 unsigned long *args)
 {
+	unsigned long val, mask = -1UL;
+
 	BUG_ON(i + n > 6);
-#ifdef CONFIG_PPC64
-	if (test_tsk_thread_flag(task, TIF_32BIT)) {
-		/*
-		 * Zero-extend 32-bit argument values.  The high bits are
-		 * garbage ignored by the actual syscall dispatch.
-		 */
-		while (n-- > 0)
-			args[n] = (u32) regs->gpr[3 + i + n];
-		return;
-	}
+
+#ifdef CONFIG_COMPAT
+	if (test_tsk_thread_flag(task, TIF_32BIT))
+		mask = 0xffffffff;
 #endif
-	memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+	while (n--) {
+		if (n == 0 && i == 0)
+			val = regs->orig_gpr3;
+		else
+			val = regs->gpr[3 + i + n];
+
+		args[n] = val & mask;
+	}
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
@@ -86,6 +94,10 @@
 {
 	BUG_ON(i + n > 6);
 	memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+
+	/* Also copy the first argument into orig_gpr3 */
+	if (i == 0 && n > 0)
+		regs->orig_gpr3 = args[0];
 }
 
 static inline int syscall_get_arch(void)
diff --git a/arch/powerpc/include/asm/trace_clock.h b/arch/powerpc/include/asm/trace_clock.h
new file mode 100644
index 0000000..cf1ee75
--- /dev/null
+++ b/arch/powerpc/include/asm/trace_clock.h
@@ -0,0 +1,19 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#ifndef _ASM_PPC_TRACE_CLOCK_H
+#define _ASM_PPC_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_ppc_tb(void);
+
+#define ARCH_TRACE_CLOCKS { trace_clock_ppc_tb, "ppc-tb", 0 },
+
+#endif  /* _ASM_PPC_TRACE_CLOCK_H */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f44a0278..dab3717 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -6,6 +6,7 @@
 header-y += bootx.h
 header-y += byteorder.h
 header-y += cputable.h
+header-y += eeh.h
 header-y += elf.h
 header-y += epapr_hcalls.h
 header-y += errno.h
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
index 8c145fd..e8b6b5f 100644
--- a/arch/powerpc/include/uapi/asm/errno.h
+++ b/arch/powerpc/include/uapi/asm/errno.h
@@ -6,6 +6,4 @@
 #undef	EDEADLOCK
 #define	EDEADLOCK	58	/* File locking deadlock error */
 
-#define _LAST_ERRNO	516
-
 #endif	/* _ASM_POWERPC_ERRNO_H */
diff --git a/arch/powerpc/include/uapi/asm/sigcontext.h b/arch/powerpc/include/uapi/asm/sigcontext.h
index 9c1f24f..3ad0c7f 100644
--- a/arch/powerpc/include/uapi/asm/sigcontext.h
+++ b/arch/powerpc/include/uapi/asm/sigcontext.h
@@ -28,7 +28,7 @@
 /*
  * To maintain compatibility with current implementations the sigcontext is
  * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
- * followed by an unstructured (vmx_reserve) field of 69 doublewords.  This
+ * followed by an unstructured (vmx_reserve) field of 101 doublewords. This
  * allows the array of vector registers to be quadword aligned independent of
  * the alignment of the containing sigcontext or ucontext. It is the
  * responsibility of the code setting the sigcontext to set this pointer to
@@ -80,7 +80,7 @@
  * registers and vscr/vrsave.
  */
 	elf_vrreg_t	__user *v_regs;
-	long		vmx_reserve[ELF_NVRREG+ELF_NVRREG+32+1];
+	long		vmx_reserve[ELF_NVRREG + ELF_NVRREG + 1 + 32];
 #endif
 };
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 12868b1..ba33693 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -118,6 +118,7 @@
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
+obj-$(CONFIG_TRACING)		+= trace_clock.o
 
 ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
 obj-y				+= iomap.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9823057..810f433 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -213,7 +213,6 @@
 		offsetof(struct tlb_core_data, esel_max));
 	DEFINE(TCD_ESEL_FIRST,
 		offsetof(struct tlb_core_data, esel_first));
-	DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
 #endif /* CONFIG_PPC_BOOK3E */
 
 #ifdef CONFIG_PPC_STD_MMU_64
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 4c68bfe..41a7d9d 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -73,7 +73,7 @@
 }
 
 /* We support DMA to/from any memory page via the iommu */
-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
+int dma_iommu_dma_supported(struct device *dev, u64 mask)
 {
 	struct iommu_table *tbl = get_iommu_table_base(dev);
 
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 6e8d764..c6689f6 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -47,8 +47,8 @@
  * for everything else.
  */
 struct dma_map_ops swiotlb_dma_ops = {
-	.alloc = dma_direct_alloc_coherent,
-	.free = dma_direct_free_coherent,
+	.alloc = __dma_direct_alloc_coherent,
+	.free = __dma_direct_free_coherent,
 	.mmap = dma_direct_mmap_coherent,
 	.map_sg = swiotlb_map_sg_attrs,
 	.unmap_sg = swiotlb_unmap_sg_attrs,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 35e4dcc..59503ed 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -16,6 +16,7 @@
 #include <asm/bug.h>
 #include <asm/machdep.h>
 #include <asm/swiotlb.h>
+#include <asm/iommu.h>
 
 /*
  * Generic direct DMA implementation
@@ -39,9 +40,31 @@
 	return pfn;
 }
 
-void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag,
-				struct dma_attrs *attrs)
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_PPC64
+	u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
+
+	/* Limit fits in the mask, we are good */
+	if (mask >= limit)
+		return 1;
+
+#ifdef CONFIG_FSL_SOC
+	/* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
+	 * that will have to be refined if/when they support iommus
+	 */
+	return 1;
+#endif
+	/* Sorry ... */
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+				  dma_addr_t *dma_handle, gfp_t flag,
+				  struct dma_attrs *attrs)
 {
 	void *ret;
 #ifdef CONFIG_NOT_COHERENT_CACHE
@@ -96,9 +119,9 @@
 #endif
 }
 
-void dma_direct_free_coherent(struct device *dev, size_t size,
-			      void *vaddr, dma_addr_t dma_handle,
-			      struct dma_attrs *attrs)
+void __dma_direct_free_coherent(struct device *dev, size_t size,
+				void *vaddr, dma_addr_t dma_handle,
+				struct dma_attrs *attrs)
 {
 #ifdef CONFIG_NOT_COHERENT_CACHE
 	__dma_free_coherent(size, vaddr);
@@ -107,6 +130,51 @@
 #endif
 }
 
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag,
+				       struct dma_attrs *attrs)
+{
+	struct iommu_table *iommu;
+
+	/* The coherent mask may be smaller than the real mask, check if
+	 * we can really use the direct ops
+	 */
+	if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
+		return __dma_direct_alloc_coherent(dev, size, dma_handle,
+						   flag, attrs);
+
+	/* Ok we can't ... do we have an iommu ? If not, fail */
+	iommu = get_iommu_table_base(dev);
+	if (!iommu)
+		return NULL;
+
+	/* Try to use the iommu */
+	return iommu_alloc_coherent(dev, iommu, size, dma_handle,
+				    dev->coherent_dma_mask, flag,
+				    dev_to_node(dev));
+}
+
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+				     void *vaddr, dma_addr_t dma_handle,
+				     struct dma_attrs *attrs)
+{
+	struct iommu_table *iommu;
+
+	/* See comments in dma_direct_alloc_coherent() */
+	if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
+		return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
+						  attrs);
+	/* Maybe we used an iommu ... */
+	iommu = get_iommu_table_base(dev);
+
+	/* If we hit that we should have never allocated in the first
+	 * place so how come we are freeing ?
+	 */
+	if (WARN_ON(!iommu))
+		return;
+	iommu_free_coherent(iommu, size, vaddr, dma_handle);
+}
+
 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
 			     void *cpu_addr, dma_addr_t handle, size_t size,
 			     struct dma_attrs *attrs)
@@ -147,18 +215,6 @@
 {
 }
 
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_PPC64
-	/* Could be improved so platforms can set the limit in case
-	 * they have limited DMA windows
-	 */
-	return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
-#else
-	return 1;
-#endif
-}
-
 static u64 dma_direct_get_required_mask(struct device *dev)
 {
 	u64 end, mask;
@@ -230,6 +286,25 @@
 };
 EXPORT_SYMBOL(dma_direct_ops);
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+	if (!dma_supported(dev, mask)) {
+		/*
+		 * We need to special case the direct DMA ops which can
+		 * support a fallback for coherent allocations. There
+		 * is no dma_op->set_coherent_mask() so we have to do
+		 * things the hard way:
+		 */
+		if (get_dma_ops(dev) != &dma_direct_ops ||
+		    get_iommu_table_base(dev) == NULL ||
+		    !dma_iommu_dma_supported(dev, mask))
+			return -EIO;
+	}
+	dev->coherent_dma_mask = mask;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dma_set_coherent_mask);
+
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 
 int __dma_set_mask(struct device *dev, u64 dma_mask)
@@ -278,6 +353,13 @@
 	if (ppc_md.dma_get_required_mask)
 		return ppc_md.dma_get_required_mask(dev);
 
+	if (dev_is_pci(dev)) {
+		struct pci_dev *pdev = to_pci_dev(dev);
+		struct pci_controller *phb = pci_bus_to_host(pdev->bus);
+		if (phb->controller_ops.dma_get_required_mask)
+			return phb->controller_ops.dma_get_required_mask(pdev);
+	}
+
 	return __dma_get_required_mask(dev);
 }
 EXPORT_SYMBOL_GPL(dma_get_required_mask);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index af9b597..e968533 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -308,11 +308,26 @@
 	if (!(pe->type & EEH_PE_PHB)) {
 		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
 			eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
-		eeh_ops->configure_bridge(pe);
-		eeh_pe_restore_bars(pe);
 
-		pci_regs_buf[0] = 0;
-		eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+		/*
+		 * The config space of some PCI devices can't be accessed
+		 * when their PEs are in frozen state. Otherwise, fenced
+		 * PHB might be seen. Those PEs are identified with flag
+		 * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
+		 * is set automatically when the PE is put to EEH_PE_ISOLATED.
+		 *
+		 * Restoring BARs possibly triggers PCI config access in
+		 * (OPAL) firmware and then causes fenced PHB. If the
+		 * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
+		 * pointless to restore BARs and dump config space.
+		 */
+		eeh_ops->configure_bridge(pe);
+		if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
+			eeh_pe_restore_bars(pe);
+
+			pci_regs_buf[0] = 0;
+			eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+		}
 	}
 
 	eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
@@ -750,14 +765,14 @@
 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
 		break;
 	case pcie_hot_reset:
-		eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+		eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
 		eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
 		eeh_ops->reset(pe, EEH_RESET_HOT);
 		break;
 	case pcie_warm_reset:
-		eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+		eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
 		eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
@@ -1116,9 +1131,6 @@
 		return;
 	}
 
-	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
-		eeh_ops->probe(pdn, NULL);
-
 	/*
 	 * The EEH cache might not be removed correctly because of
 	 * unbalanced kref to the device during unplug time, which
@@ -1142,6 +1154,9 @@
 		dev->dev.archdata.edev = NULL;
 	}
 
+	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+		eeh_ops->probe(pdn, NULL);
+
 	edev->pdev = dev;
 	dev->dev.archdata.edev = edev;
 
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 35f0b62..8654cb1 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -657,6 +657,28 @@
 	eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
 }
 
+/**
+ * eeh_pe_state_mark_with_cfg - Mark PE state with unblocked config space
+ * @pe: PE
+ * @state: PE state to be set
+ *
+ * Set specified flag to PE and its child PEs. The PCI config space
+ * of some PEs is blocked automatically when EEH_PE_ISOLATED is set,
+ * which isn't needed in some situations. The function allows to set
+ * the specified flag to indicated PEs without blocking their PCI
+ * config space.
+ */
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state)
+{
+	eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
+	if (!(state & EEH_PE_ISOLATED))
+		return;
+
+	/* Clear EEH_PE_CFG_BLOCKED, which might be set just now */
+	state = EEH_PE_CFG_BLOCKED;
+	eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
+}
+
 /*
  * Some PCI bridges (e.g. PLX bridges) have primary/secondary
  * buses assigned explicitly by firmware, and we probably have
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 46fc0f4..2405631 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -20,6 +20,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <linux/sys.h>
 #include <linux/threads.h>
 #include <asm/reg.h>
@@ -354,7 +355,7 @@
 	SYNC
 	MTMSRD(r10)
 	lwz	r9,TI_FLAGS(r12)
-	li	r8,-_LAST_ERRNO
+	li	r8,-MAX_ERRNO
 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 	bne-	syscall_exit_work
 	cmplw	0,r3,r8
@@ -457,6 +458,10 @@
 	lwz	r7,GPR7(r1)
 	lwz	r8,GPR8(r1)
 	REST_NVGPRS(r1)
+
+	cmplwi	r0,NR_syscalls
+	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
+	bge-	ret_from_syscall
 	b	syscall_dotrace_cont
 
 syscall_exit_work:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 579e0f9..a94f155 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -19,6 +19,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <asm/unistd.h>
 #include <asm/processor.h>
 #include <asm/page.h>
@@ -150,8 +151,7 @@
 	CURRENT_THREAD_INFO(r11, r1)
 	ld	r10,TI_FLAGS(r11)
 	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
-	bne	syscall_dotrace
-.Lsyscall_dotrace_cont:
+	bne	syscall_dotrace		/* does not return */
 	cmpldi	0,r0,NR_syscalls
 	bge-	syscall_enosys
 
@@ -207,7 +207,7 @@
 #endif /* CONFIG_PPC_BOOK3E */
 
 	ld	r9,TI_FLAGS(r12)
-	li	r11,-_LAST_ERRNO
+	li	r11,-MAX_ERRNO
 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 	bne-	syscall_exit_work
 	cmpld	r3,r11
@@ -245,22 +245,34 @@
 	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_syscall_trace_enter
+
 	/*
-	 * Restore argument registers possibly just changed.
-	 * We use the return value of do_syscall_trace_enter
-	 * for the call number to look up in the table (r0).
+	 * We use the return value of do_syscall_trace_enter() as the syscall
+	 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
+	 * returns an invalid syscall number and the test below against
+	 * NR_syscalls will fail.
 	 */
 	mr	r0,r3
+
+	/* Restore argument registers just clobbered and/or possibly changed. */
 	ld	r3,GPR3(r1)
 	ld	r4,GPR4(r1)
 	ld	r5,GPR5(r1)
 	ld	r6,GPR6(r1)
 	ld	r7,GPR7(r1)
 	ld	r8,GPR8(r1)
+
+	/* Repopulate r9 and r10 for the system_call path */
 	addi	r9,r1,STACK_FRAME_OVERHEAD
 	CURRENT_THREAD_INFO(r10, r1)
 	ld	r10,TI_FLAGS(r10)
-	b	.Lsyscall_dotrace_cont
+
+	cmpldi	r0,NR_syscalls
+	blt+	system_call
+
+	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
+	b	.Lsyscall_exit
+
 
 syscall_enosys:
 	li	r3,-ENOSYS
@@ -277,7 +289,7 @@
 	beq+	0f
 	REST_NVGPRS(r1)
 	b	2f
-0:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
+0:	cmpld	r3,r11		/* r11 is -MAX_ERRNO */
 	blt+	1f
 	andi.	r0,r9,_TIF_NOERROR
 	bne-	1f
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 3e68d1c..f3bd5e7 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1313,11 +1313,14 @@
 	sync
 	isync
 
-/* The mapping only needs to be cache-coherent on SMP */
-#ifdef CONFIG_SMP
-#define M_IF_SMP	MAS2_M
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define M_IF_NEEDED	MAS2_M
 #else
-#define M_IF_SMP	0
+#define M_IF_NEEDED	0
 #endif
 
 /* 6. Setup KERNELBASE mapping in TLB[0]
@@ -1332,7 +1335,7 @@
 	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
 	mtspr	SPRN_MAS1,r6
 
-	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP)
+	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
 	mtspr	SPRN_MAS2,r6
 
 	rlwinm	r5,r5,0,0,25
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index f22e7e4..83dd0f6 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -152,11 +152,14 @@
 	tlbivax 0,r9
 	TLBSYNC
 
-/* The mapping only needs to be cache-coherent on SMP */
-#ifdef CONFIG_SMP
-#define M_IF_SMP	MAS2_M
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define M_IF_NEEDED	MAS2_M
 #else
-#define M_IF_SMP	0
+#define M_IF_NEEDED	0
 #endif
 
 #if defined(ENTRY_MAPPING_BOOT_SETUP)
@@ -167,8 +170,8 @@
 	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
 	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
 	mtspr	SPRN_MAS1,r6
-	lis	r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
-	ori	r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
+	lis	r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@h
+	ori	r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@l
 	mtspr	SPRN_MAS2,r6
 	mtspr	SPRN_MAS3,r8
 	tlbwe
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index a1ed8a8..6472472 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -17,7 +17,7 @@
 {
 	u32 *addr = (u32 *)(unsigned long)entry->code;
 
-	if (type == JUMP_LABEL_ENABLE)
+	if (type == JUMP_LABEL_JMP)
 		patch_branch(addr, entry->target, 0);
 	else
 		patch_instruction(addr, PPC_INST_NOP);
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 33aa4dd..9ad37f8 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -649,7 +649,6 @@
 			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
 		}
 		break;
-		break;
 #endif
 	}
 
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7c6bb4b..ed3ab50 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -596,25 +596,6 @@
 	b	2b
 
 /*
- * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
- * void atomic_set_mask(atomic_t mask, atomic_t *addr);
- */
-_GLOBAL(atomic_clear_mask)
-10:	lwarx	r5,0,r4
-	andc	r5,r5,r3
-	PPC405_ERR77(0,r4)
-	stwcx.	r5,0,r4
-	bne-	10b
-	blr
-_GLOBAL(atomic_set_mask)
-10:	lwarx	r5,0,r4
-	or	r5,r5,r3
-	PPC405_ERR77(0,r4)
-	stwcx.	r5,0,r4
-	bne-	10b
-	blr
-
-/*
  * Extended precision shifts.
  *
  * Updated to be valid for shift counts from 0 to 63 inclusive.
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4e314b9..6e4168c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -475,9 +475,18 @@
 #ifdef CONFIG_KEXEC		/* use no memory without kexec */
 	lwz	r4,0(r5)
 	cmpwi	0,r4,0
-	bnea	0x60
+	beq	99b
+#ifdef CONFIG_PPC_BOOK3S_64
+	li	r10,0x60
+	mfmsr	r11
+	clrrdi	r11,r11,1	/* Clear MSR_LE */
+	mtsrr0	r10
+	mtsrr1	r11
+	rfid
+#else
+	ba	0x60
 #endif
-	b	99b
+#endif
 
 /* this can be in text because we won't change it until we are
  * running in real anyways
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 1e703f8..98ba106 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -541,10 +541,9 @@
 			time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
 			time->tv_nsec = 0;
 		}
-		*buf = kmalloc(length, GFP_KERNEL);
+		*buf = kmemdup(buff + hdr_size, length, GFP_KERNEL);
 		if (*buf == NULL)
 			return -ENOMEM;
-		memcpy(*buf, buff + hdr_size, length);
 		kfree(buff);
 
 		if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
@@ -582,9 +581,10 @@
 	spin_lock_init(&nvram_pstore_info.buf_lock);
 
 	rc = pstore_register(&nvram_pstore_info);
-	if (rc != 0)
-		pr_err("nvram: pstore_register() failed, defaults to "
-				"kmsg_dump; returned %d\n", rc);
+	if (rc && (rc != -EPERM))
+		/* Print error only when pstore.backend == nvram */
+		pr_err("nvram: pstore_register() failed, returned %d. "
+				"Defaults to kmsg_dump\n", rc);
 
 	return rc;
 }
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 02c1d5d..a1d0632 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -823,23 +823,15 @@
 		    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
 			/* Only print message if not re-assigning */
 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
-				pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
-					 "is unassigned\n",
-					 pci_name(dev), i,
-					 (unsigned long long)res->start,
-					 (unsigned long long)res->end,
-					 (unsigned int)res->flags);
+				pr_debug("PCI:%s Resource %d %pR is unassigned\n",
+					 pci_name(dev), i, res);
 			res->end -= res->start;
 			res->start = 0;
 			res->flags |= IORESOURCE_UNSET;
 			continue;
 		}
 
-		pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
-			 pci_name(dev), i,
-			 (unsigned long long)res->start,\
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags);
+		pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
 	}
 
 	/* Call machine specific resource fixup */
@@ -943,11 +935,7 @@
 			continue;
 		}
 
-		pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
-			 pci_name(dev), i,
-			 (unsigned long long)res->start,\
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags);
+		pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
 
 		/* Try to detect uninitialized P2P bridge resources,
 		 * and clear them out so they get re-assigned later
@@ -1126,10 +1114,8 @@
 	*pp = NULL;
 	for (p = res->child; p != NULL; p = p->sibling) {
 		p->parent = res;
-		pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
-			 p->name,
-			 (unsigned long long)p->start,
-			 (unsigned long long)p->end, res->name);
+		pr_debug("PCI: Reparented %s %pR under %s\n",
+			 p->name, p, res->name);
 	}
 	return 0;
 }
@@ -1198,14 +1184,9 @@
 			}
 		}
 
-		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
-			 "[0x%x], parent %p (%s)\n",
-			 bus->self ? pci_name(bus->self) : "PHB",
-			 bus->number, i,
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags,
-			 pr, (pr && pr->name) ? pr->name : "nil");
+		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
+			 bus->self ? pci_name(bus->self) : "PHB", bus->number,
+			 i, res, pr, (pr && pr->name) ? pr->name : "nil");
 
 		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
 			struct pci_dev *dev = bus->self;
@@ -1247,11 +1228,8 @@
 {
 	struct resource *pr, *r = &dev->resource[idx];
 
-	pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
-		 pci_name(dev), idx,
-		 (unsigned long long)r->start,
-		 (unsigned long long)r->end,
-		 (unsigned int)r->flags);
+	pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
+		 pci_name(dev), idx, r);
 
 	pr = pci_find_parent_resource(dev, r);
 	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
@@ -1259,11 +1237,7 @@
 		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
 		       " of device %s, will remap\n", idx, pci_name(dev));
 		if (pr)
-			pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
-				 pr,
-				 (unsigned long long)pr->start,
-				 (unsigned long long)pr->end,
-				 (unsigned int)pr->flags);
+			pr_debug("PCI:  parent is %p: %pR\n", pr, pr);
 		/* We'll assign a new address later */
 		r->flags |= IORESOURCE_UNSET;
 		r->end -= r->start;
@@ -1425,12 +1399,8 @@
 			if (r->parent || !r->start || !r->flags)
 				continue;
 
-			pr_debug("PCI: Claiming %s: "
-				 "Resource %d: %016llx..%016llx [%x]\n",
-				 pci_name(dev), i,
-				 (unsigned long long)r->start,
-				 (unsigned long long)r->end,
-				 (unsigned int)r->flags);
+			pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
+				 pci_name(dev), i, r);
 
 			if (pci_claim_resource(dev, i) == 0)
 				continue;
@@ -1514,11 +1484,8 @@
 	} else {
 		offset = pcibios_io_space_offset(hose);
 
-		pr_debug("PCI: PHB IO resource    = %08llx-%08llx [%lx] off 0x%08llx\n",
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned long)res->flags,
-			 (unsigned long long)offset);
+		pr_debug("PCI: PHB IO resource    = %pR off 0x%08llx\n",
+			 res, (unsigned long long)offset);
 		pci_add_resource_offset(resources, res, offset);
 	}
 
@@ -1535,11 +1502,8 @@
 		offset = hose->mem_offset[i];
 
 
-		pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i,
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned long)res->flags,
-			 (unsigned long long)offset);
+		pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
+			 res, (unsigned long long)offset);
 
 		pci_add_resource_offset(resources, res, offset);
 	}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index c8c62c7..2e710c1 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -102,7 +102,7 @@
 			res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
 		} else if (i == dev->rom_base_reg) {
 			res = &dev->resource[PCI_ROM_RESOURCE];
-			flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
+			flags |= IORESOURCE_READONLY;
 		} else {
 			printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
 			continue;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 64e6e9d..75b6676 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -86,7 +86,7 @@
 	if (tsk == current && tsk->thread.regs &&
 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
 	    !test_thread_flag(TIF_RESTORE_TM)) {
-		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
 		set_thread_flag(TIF_RESTORE_TM);
 	}
 
@@ -104,7 +104,7 @@
 	if (tsk == current && tsk->thread.regs &&
 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
 	    !test_thread_flag(TIF_RESTORE_TM)) {
-		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
 		set_thread_flag(TIF_RESTORE_TM);
 	}
 
@@ -540,7 +540,7 @@
 	 * the thread will no longer be transactional.
 	 */
 	if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
-		msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
+		msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
 		if (msr_diff & MSR_FP)
 			memcpy(&thr->transact_fp, &thr->fp_state,
 			       sizeof(struct thread_fp_state));
@@ -591,10 +591,10 @@
 	/* Stash the original thread MSR, as giveup_fpu et al will
 	 * modify it.  We hold onto it to see whether the task used
 	 * FP & vector regs.  If the TIF_RESTORE_TM flag is set,
-	 * tm_orig_msr is already set.
+	 * ckpt_regs.msr is already set.
 	 */
 	if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
-		thr->tm_orig_msr = thr->regs->msr;
+		thr->ckpt_regs.msr = thr->regs->msr;
 
 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
@@ -663,7 +663,7 @@
 		tm_restore_sprs(&new->thread);
 		return;
 	}
-	msr = new->thread.tm_orig_msr;
+	msr = new->thread.ckpt_regs.msr;
 	/* Recheckpoint to restore original checkpointed register state. */
 	TM_DEBUG("*** tm_recheckpoint of pid %d "
 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
@@ -723,7 +723,7 @@
 	if (!MSR_TM_ACTIVE(regs->msr))
 		return;
 
-	msr_diff = current->thread.tm_orig_msr & ~regs->msr;
+	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
 	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
 	if (msr_diff & MSR_FP) {
 		fp_enable();
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b888b1..bef76c5 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -218,22 +218,18 @@
 }
 
 #ifdef CONFIG_PPC_STD_MMU_64
-static void __init check_cpu_slb_size(unsigned long node)
+static void __init init_mmu_slb_size(unsigned long node)
 {
 	const __be32 *slb_size_ptr;
 
-	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
-	if (slb_size_ptr != NULL) {
+	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
+			of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+
+	if (slb_size_ptr)
 		mmu_slb_size = be32_to_cpup(slb_size_ptr);
-		return;
-	}
-	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
-	if (slb_size_ptr != NULL) {
-		mmu_slb_size = be32_to_cpup(slb_size_ptr);
-	}
 }
 #else
-#define check_cpu_slb_size(node) do { } while(0)
+#define init_mmu_slb_size(node) do { } while(0)
 #endif
 
 static struct feature_property {
@@ -380,7 +376,7 @@
 
 	check_cpu_feature_properties(node);
 	check_cpu_pa_features(node);
-	check_cpu_slb_size(node);
+	init_mmu_slb_size(node);
 
 #ifdef CONFIG_PPC64
 	if (nthreads > 1)
@@ -476,9 +472,10 @@
 		flags = of_read_number(&dm[3], 1);
 		/* skip DRC index, pad, assoc. list index, flags */
 		dm += 4;
-		/* skip this block if the reserved bit is set in flags (0x80)
-		   or if the block is not assigned to this partition (0x8) */
-		if ((flags & 0x80) || !(flags & 0x8))
+		/* skip this block if the reserved bit is set in flags
+		   or if the block is not assigned to this partition */
+		if ((flags & DRCONF_MEM_RESERVED) ||
+				!(flags & DRCONF_MEM_ASSIGNED))
 			continue;
 		size = memblock_size;
 		rngs = 1;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index fcca807..15099c4 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -641,6 +641,15 @@
 #define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
 		((x) >> 8) & 0xff, (x) & 0xff
 
+/* Firmware expects the value to be n - 1, where n is the # of vectors */
+#define NUM_VECTORS(n)		((n) - 1)
+
+/*
+ * Firmware expects 1 + n - 2, where n is the length of the option vector in
+ * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
+ */
+#define VECTOR_LENGTH(n)	(1 + (n) - 2)
+
 unsigned char ibm_architecture_vec[] = {
 	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
@@ -651,16 +660,16 @@
 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
 	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
-	6 - 1,				/* 6 option vectors */
+	NUM_VECTORS(6),			/* 6 option vectors */
 
 	/* option vector 1: processor architectures supported */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
 	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
 
 	/* option vector 2: Open Firmware options supported */
-	34 - 2,				/* length */
+	VECTOR_LENGTH(33),		/* length */
 	OV2_REAL_MODE,
 	0, 0,
 	W(0xffffffff),			/* real_base */
@@ -674,17 +683,17 @@
 	48,				/* max log_2(hash table size) */
 
 	/* option vector 3: processor options supported */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV3_FP | OV3_VMX | OV3_DFP,
 
 	/* option vector 4: IBM PAPR implementation */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't halt */
 	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
 
 	/* option vector 5: PAPR/OF options */
-	19 - 2,				/* length */
+	VECTOR_LENGTH(18),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
 	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -717,12 +726,12 @@
 	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
 	OV5_FEAT(OV5_PFO_HW_842),
 	OV5_FEAT(OV5_SUB_PROCESSORS),
+
 	/* option vector 6: IBM PAPR hints */
-	4 - 2,				/* length */
+	VECTOR_LENGTH(3),		/* length */
 	0,
 	0,
 	OV6_LINUX,
-
 };
 
 /* Old method - ELF header with PT_NOTE sections only works on BE */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index f21897b..737c0d0 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1762,26 +1762,81 @@
 	return ret;
 }
 
-/*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
+#ifdef CONFIG_SECCOMP
+static int do_seccomp(struct pt_regs *regs)
+{
+	if (!test_thread_flag(TIF_SECCOMP))
+		return 0;
+
+	/*
+	 * The ABI we present to seccomp tracers is that r3 contains
+	 * the syscall return value and orig_gpr3 contains the first
+	 * syscall parameter. This is different to the ptrace ABI where
+	 * both r3 and orig_gpr3 contain the first syscall parameter.
+	 */
+	regs->gpr[3] = -ENOSYS;
+
+	/*
+	 * We use the __ version here because we have already checked
+	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
+	 * have already loaded -ENOSYS into r3, or seccomp has put
+	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+	 */
+	if (__secure_computing())
+		return -1;
+
+	/*
+	 * The syscall was allowed by seccomp, restore the register
+	 * state to what ptrace and audit expect.
+	 * Note that we use orig_gpr3, which means a seccomp tracer can
+	 * modify the first syscall parameter (in orig_gpr3) and also
+	 * allow the syscall to proceed.
+	 */
+	regs->gpr[3] = regs->orig_gpr3;
+
+	return 0;
+}
+#else
+static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_SECCOMP */
+
+/**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+ *
+ * Performs various types of tracing on syscall entry. This includes seccomp,
+ * ptrace, syscall tracepoints and audit.
+ *
+ * The pt_regs are potentially visible to userspace via ptrace, so their
+ * contents is ABI.
+ *
+ * One or more of the tracers may modify the contents of pt_regs, in particular
+ * to modify arguments or even the syscall number itself.
+ *
+ * It's also possible that a tracer can choose to reject the system call. In
+ * that case this function will return an illegal syscall number, and will put
+ * an appropriate return value in regs->r3.
+ *
+ * Return: the (possibly changed) syscall number.
  */
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
-	long ret = 0;
+	bool abort = false;
 
 	user_exit();
 
-	secure_computing_strict(regs->gpr[0]);
+	if (do_seccomp(regs))
+		return -1;
 
-	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-	    tracehook_report_syscall_entry(regs))
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
 		/*
-		 * Tracing decided this syscall should not happen.
-		 * We'll return a bogus call number to get an ENOSYS
-		 * error, but leave the original number in regs->gpr[0].
+		 * The tracer may decide to abort the syscall, if so tracehook
+		 * will return !0. Note that the tracer may also just change
+		 * regs->gpr[0] to an invalid syscall number, that is handled
+		 * below on the exit path.
 		 */
-		ret = -1L;
+		abort = tracehook_report_syscall_entry(regs) != 0;
+	}
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_enter(regs, regs->gpr[0]);
@@ -1798,7 +1853,17 @@
 				    regs->gpr[5] & 0xffffffff,
 				    regs->gpr[6] & 0xffffffff);
 
-	return ret ?: regs->gpr[0];
+	if (abort || regs->gpr[0] >= NR_syscalls) {
+		/*
+		 * If we are aborting explicitly, or if the syscall number is
+		 * now invalid, set the return value to -ENOSYS.
+		 */
+		regs->gpr[3] = -ENOSYS;
+		return -1;
+	}
+
+	/* Return the possibly modified but valid syscall number */
+	return regs->gpr[0];
 }
 
 void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 7a488c1..84bf934 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -478,8 +478,9 @@
 
 	if (status == RTAS_BUSY) {
 		ms = 1;
-	} else if (status >= 9900 && status <= 9905) {
-		order = status - 9900;
+	} else if (status >= RTAS_EXTENDED_DELAY_MIN &&
+		   status <= RTAS_EXTENDED_DELAY_MAX) {
+		order = status - RTAS_EXTENDED_DELAY_MIN;
 		for (ms = 1; order > 0; order--)
 			ms *= 10;
 	}
@@ -584,6 +585,23 @@
 }
 EXPORT_SYMBOL(rtas_get_sensor);
 
+int rtas_get_sensor_fast(int sensor, int index, int *state)
+{
+	int token = rtas_token("get-sensor-state");
+	int rc;
+
+	if (token == RTAS_UNKNOWN_SERVICE)
+		return -ENOENT;
+
+	rc = rtas_call(token, 2, 2, state, sensor, index);
+	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+				    rc <= RTAS_EXTENDED_DELAY_MAX));
+
+	if (rc < 0)
+		return rtas_error_rc(rc);
+	return rc;
+}
+
 bool rtas_indicator_present(int token, int *maxindex)
 {
 	int proplen, count, i;
@@ -641,7 +659,8 @@
 
 	rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
 
-	WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905));
+	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+				    rc <= RTAS_EXTENDED_DELAY_MAX));
 
 	if (rc < 0)
 		return rtas_error_rc(rc);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index da50e0c..0dbee46 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -949,6 +949,11 @@
 		err |= __put_user(s->si_overrun, &d->si_overrun);
 		err |= __put_user(s->si_int, &d->si_int);
 		break;
+	case __SI_SYS >> 16:
+		err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
+		err |= __put_user(s->si_syscall, &d->si_syscall);
+		err |= __put_user(s->si_arch, &d->si_arch);
+		break;
 	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
 	case __SI_MESGQ >> 16:
 		err |= __put_user(s->si_int, &d->si_int);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c7c24d2..20756df 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -74,6 +74,19 @@
 	"%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n";
 
 /*
+ * This computes a quad word aligned pointer inside the vmx_reserve array
+ * element. For historical reasons sigcontext might not be quad word aligned,
+ * but the location we write the VMX regs to must be. See the comment in
+ * sigcontext for more detail.
+ */
+#ifdef CONFIG_ALTIVEC
+static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
+{
+	return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
+}
+#endif
+
+/*
  * Set up the sigcontext for the signal frame.
  */
 
@@ -90,7 +103,7 @@
 	 * v_regs pointer or not
 	 */
 #ifdef CONFIG_ALTIVEC
-	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
+	elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
 #endif
 	unsigned long msr = regs->msr;
 	long err = 0;
@@ -181,10 +194,8 @@
 	 * v_regs pointer or not.
 	 */
 #ifdef CONFIG_ALTIVEC
-	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)
-		(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
-	elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *)
-		(((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful);
+	elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
+	elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
 #endif
 	unsigned long msr = regs->msr;
 	long err = 0;
diff --git a/arch/powerpc/kernel/trace_clock.c b/arch/powerpc/kernel/trace_clock.c
new file mode 100644
index 0000000..4917069
--- /dev/null
+++ b/arch/powerpc/kernel/trace_clock.c
@@ -0,0 +1,15 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#include <asm/trace_clock.h>
+#include <asm/time.h>
+
+u64 notrace trace_clock_ppc_tb(void)
+{
+	return get_tb();
+}
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index 7874e8a..6d67e05 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -41,22 +41,6 @@
 	blr
 
 /*
- * Compute checksum of TCP or UDP pseudo-header:
- *   csum_tcpudp_magic(saddr, daddr, len, proto, sum)
- */	
-_GLOBAL(csum_tcpudp_magic)
-	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
-	addc	r0,r3,r4	/* add 4 32-bit words together */
-	adde	r0,r0,r5
-	adde	r0,r0,r7
-	addze	r0,r0		/* add in final carry */
-	rlwinm	r3,r0,16,0,31	/* fold two halves together */
-	add	r3,r0,r3
-	not	r3,r3
-	srwi	r3,r3,16
-	blr
-
-/*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
  *
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 57a0720..f3ef354 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -45,27 +45,6 @@
 	blr
 
 /*
- * Compute checksum of TCP or UDP pseudo-header:
- *   csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
- * No real gain trying to do this specially for 64 bit, but
- * the 32 bit addition may spill into the upper bits of
- * the doubleword so we still must fold it down from 64.
- */	
-_GLOBAL(csum_tcpudp_magic)
-	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
-	addc	r0,r3,r4	/* add 4 32-bit words together */
-	adde	r0,r0,r5
-	adde	r0,r0,r7
-        rldicl  r4,r0,32,0      /* fold 64 bit value */
-        add     r0,r4,r0
-        srdi    r0,r0,32
-	rlwinm	r3,r0,16,0,31	/* fold two halves together */
-	add	r3,r0,r3
-	not	r3,r3
-	srwi	r3,r3,16
-	blr
-
-/*
  * Computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit).
  *
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 6813f80..2ef50c6 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -69,9 +69,15 @@
 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
 CACHELINE_MASK = (L1_CACHE_BYTES-1)
 
+/*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero.  This requires that the destination
+ * area is cacheable.  -- paulus
+ */
 _GLOBAL(memset)
 	rlwimi	r4,r4,8,16,23
 	rlwimi	r4,r4,16,0,15
+
 	addi	r6,r3,-4
 	cmplwi	0,r5,4
 	blt	7f
@@ -80,7 +86,29 @@
 	andi.	r0,r6,3
 	add	r5,r0,r5
 	subf	r6,r0,r6
-	srwi	r0,r5,2
+	cmplwi	0,r4,0
+	bne	2f	/* Use normal procedure if r4 is not zero */
+
+	clrlwi	r7,r6,32-LG_CACHELINE_BYTES
+	add	r8,r7,r5
+	srwi	r9,r8,LG_CACHELINE_BYTES
+	addic.	r9,r9,-1	/* total number of complete cachelines */
+	ble	2f
+	xori	r0,r7,CACHELINE_MASK & ~3
+	srwi.	r0,r0,2
+	beq	3f
+	mtctr	r0
+4:	stwu	r4,4(r6)
+	bdnz	4b
+3:	mtctr	r9
+	li	r7,4
+10:	dcbz	r7,r6
+	addi	r6,r6,CACHELINE_BYTES
+	bdnz	10b
+	clrlwi	r5,r8,32-LG_CACHELINE_BYTES
+	addi	r5,r5,4
+
+2:	srwi	r0,r5,2
 	mtctr	r0
 	bdz	6f
 1:	stwu	r4,4(r6)
@@ -94,12 +122,91 @@
 	bdnz	8b
 	blr
 
+/*
+ * This version uses dcbz on the complete cache lines in the
+ * destination area to reduce memory traffic.  This requires that
+ * the destination area is cacheable.
+ * We only use this version if the source and dest don't overlap.
+ * -- paulus.
+ */
 _GLOBAL(memmove)
 	cmplw	0,r3,r4
 	bgt	backwards_memcpy
 	/* fall through */
 
 _GLOBAL(memcpy)
+	add	r7,r3,r5		/* test if the src & dst overlap */
+	add	r8,r4,r5
+	cmplw	0,r4,r7
+	cmplw	1,r3,r8
+	crand	0,0,4			/* cr0.lt &= cr1.lt */
+	blt	generic_memcpy		/* if regions overlap */
+
+	addi	r4,r4,-4
+	addi	r6,r3,-4
+	neg	r0,r3
+	andi.	r0,r0,CACHELINE_MASK	/* # bytes to start of cache line */
+	beq	58f
+
+	cmplw	0,r5,r0			/* is this more than total to do? */
+	blt	63f			/* if not much to do */
+	andi.	r8,r0,3			/* get it word-aligned first */
+	subf	r5,r0,r5
+	mtctr	r8
+	beq+	61f
+70:	lbz	r9,4(r4)		/* do some bytes */
+	addi	r4,r4,1
+	addi	r6,r6,1
+	stb	r9,3(r6)
+	bdnz	70b
+61:	srwi.	r0,r0,2
+	mtctr	r0
+	beq	58f
+72:	lwzu	r9,4(r4)		/* do some words */
+	stwu	r9,4(r6)
+	bdnz	72b
+
+58:	srwi.	r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+	clrlwi	r5,r5,32-LG_CACHELINE_BYTES
+	li	r11,4
+	mtctr	r0
+	beq	63f
+53:
+	dcbz	r11,r6
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 32
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 64
+	COPY_16_BYTES
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 128
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+#endif
+#endif
+#endif
+	bdnz	53b
+
+63:	srwi.	r0,r5,2
+	mtctr	r0
+	beq	64f
+30:	lwzu	r0,4(r4)
+	stwu	r0,4(r6)
+	bdnz	30b
+
+64:	andi.	r0,r5,3
+	mtctr	r0
+	beq+	65f
+	addi	r4,r4,3
+	addi	r6,r6,3
+40:	lbzu	r0,1(r4)
+	stbu	r0,1(r6)
+	bdnz	40b
+65:	blr
+
+_GLOBAL(generic_memcpy)
 	srwi.	r7,r5,3
 	addi	r6,r3,-4
 	addi	r4,r4,-4
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 9c90e66..354ba3c 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -112,7 +112,7 @@
 
 	tsize = __ilog2(size) - 10;
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
 	if ((flags & _PAGE_NO_CACHE) == 0)
 		flags |= _PAGE_COHERENT;
 #endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 463174a..3b49e32 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -701,7 +701,7 @@
 
 #endif /* CONFIG_PPC_64K_PAGES */
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 
 /*****************************************************************************
  *                                                                           *
@@ -993,7 +993,7 @@
 	b	ht64_bail
 
 
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 
 
 /*****************************************************************************
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5ec987f..aee7017 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -640,7 +640,7 @@
 
 static void __init htab_finish_init(void)
 {
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	patch_branch(ht64_call_hpte_insert1,
 		ppc_function_entry(ppc_md.hpte_insert),
 		BRANCH_SET_LINK);
@@ -653,7 +653,7 @@
 	patch_branch(ht64_call_hpte_updatepp,
 		ppc_function_entry(ppc_md.hpte_updatepp),
 		BRANCH_SET_LINK);
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 
 	patch_branch(htab_call_hpte_insert1,
 		ppc_function_entry(ppc_md.hpte_insert),
@@ -1151,12 +1151,12 @@
 		check_paca_psize(ea, mm, psize, user_region);
 #endif /* CONFIG_PPC_64K_PAGES */
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	if (psize == MMU_PAGE_64K)
 		rc = __hash_page_64K(ea, access, vsid, ptep, trap,
 				     flags, ssize);
 	else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 	{
 		int spp = subpage_protection(mm, ea);
 		if (access & spp)
@@ -1264,12 +1264,12 @@
 		update_flags |= HPTE_LOCAL_UPDATE;
 
 	/* Hash it in */
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	if (mm->context.user_psize == MMU_PAGE_64K)
 		rc = __hash_page_64K(ea, access, vsid, ptep, trap,
 				     update_flags, ssize);
 	else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 		rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
 				    ssize, subpage_protection(mm, ea));
 
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index bb0bd70..06c1452 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -808,14 +808,6 @@
 	if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
 		return -EINVAL;
 
-#ifdef CONFIG_SPU_FS_64K_LS
-	/* Disable support for 64K huge pages when 64K SPU local store
-	 * support is enabled as the current implementation conflicts.
-	 */
-	if (shift == PAGE_SHIFT_64K)
-		return -EINVAL;
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
 
 	/* Return if huge page size has already been setup */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0f11819..22d94c3 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -113,7 +113,7 @@
 }
 #endif
 
-int arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 {
 	struct pglist_data *pgdata;
 	struct zone *zone;
@@ -128,7 +128,7 @@
 
 	/* this should work for most non-highmem platforms */
 	zone = pgdata->node_zones +
-		zone_for_memory(nid, start, size, 0);
+		zone_for_memory(nid, start, size, 0, for_device);
 
 	return __add_pages(nid, zone, start_pfn, nr_pages);
 }
@@ -414,17 +414,17 @@
 		return;
 	}
 #endif
-#ifdef CONFIG_BOOKE
-	{
+#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
+	/* On 8xx there is no need to kmap since highmem is not supported */
+	__flush_dcache_icache(page_address(page));
+#else
+	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
 		void *start = kmap_atomic(page);
 		__flush_dcache_icache(start);
 		kunmap_atomic(start);
+	} else {
+		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 	}
-#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
-	/* On 8xx there is no need to kmap since highmem is not supported */
-	__flush_dcache_icache(page_address(page)); 
-#else
-	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 #endif
 }
 EXPORT_SYMBOL(flush_dcache_icache_page);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5e80621..8b9502a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -225,7 +225,7 @@
 	for (i = 0; i < distance_ref_points_depth; i++) {
 		const __be32 *entry;
 
-		entry = &associativity[be32_to_cpu(distance_ref_points[i])];
+		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
 		distance_lookup_table[nid][i] = of_read_number(entry, 1);
 	}
 }
@@ -248,8 +248,12 @@
 		nid = -1;
 
 	if (nid > 0 &&
-	    of_read_number(associativity, 1) >= distance_ref_points_depth)
-		initialize_distance_lookup_table(nid, associativity);
+		of_read_number(associativity, 1) >= distance_ref_points_depth) {
+		/*
+		 * Skip the length field and send start of associativity array
+		 */
+		initialize_distance_lookup_table(nid, associativity + 1);
+	}
 
 out:
 	return nid;
@@ -507,6 +511,12 @@
 
 		if (nid == 0xffff || nid >= MAX_NUMNODES)
 			nid = default_nid;
+
+		if (nid > 0) {
+			index = drmem->aa_index * aa->array_sz;
+			initialize_distance_lookup_table(nid,
+							&aa->arrays[index]);
+		}
 	}
 
 	return nid;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 876232d..e92cb21 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -149,17 +149,7 @@
 #endif /* !CONFIG_PPC_MMU_NOHASH */
 	}
 
-#ifdef CONFIG_PPC_BOOK3E_64
-	/*
-	 * With hardware tablewalk, a sync is needed to ensure that
-	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
-	 * mappings, we can't tolerate spurious faults, so make sure
-	 * the new PTE will be seen the first time.
-	 */
-	mb();
-#else
 	smp_wmb();
-#endif
 	return 0;
 }
 
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6e450ca..8a32a2b 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -41,9 +41,9 @@
 	(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
 
 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
-					 unsigned long slot)
+					 unsigned long entry)
 {
-	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
+	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry;
 }
 
 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -249,11 +249,24 @@
 static inline void patch_slb_encoding(unsigned int *insn_addr,
 				      unsigned int immed)
 {
-	int insn = (*insn_addr & 0xffff0000) | immed;
+
+	/*
+	 * This function patches either an li or a cmpldi instruction with
+	 * a new immediate value. This relies on the fact that both li
+	 * (which is actually addi) and cmpldi both take a 16-bit immediate
+	 * value, and it is situated in the same location in the instruction,
+	 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
+	 * The signedness of the immediate operand differs between the two
+	 * instructions however this code is only ever patching a small value,
+	 * much less than 1 << 15, so we can get away with it.
+	 * To patch the value we read the existing instruction, clear the
+	 * immediate value, and or in our new value, then write the instruction
+	 * back.
+	 */
+	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
 	patch_instruction(insn_addr, insn);
 }
 
-extern u32 slb_compare_rr_to_size[];
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
@@ -309,12 +322,11 @@
 	lflags = SLB_VSID_KERNEL | linear_llp;
 	vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-	/* Invalidate the entire SLB (even slot 0) & all the ERATS */
+	/* Invalidate the entire SLB (even entry 0) & all the ERATS */
 	asm volatile("isync":::"memory");
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
 	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
 	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 
 	/* For the boot cpu, we're running on the stack in init_thread_union,
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 765b419..e418558 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -308,11 +308,11 @@
 	 *
 	 * MAS6:IND should be already set based on MAS4
 	 */
-1:	lbarx	r15,0,r11
 	lhz	r10,PACAPACAINDEX(r13)
-	cmpdi	r15,0
-	cmpdi	cr1,r15,1	/* set cr1.eq = 0 for non-recursive */
 	addi	r10,r10,1
+	crclr	cr1*4+eq	/* set cr1.eq = 0 for non-recursive */
+1:	lbarx	r15,0,r11
+	cmpdi	r15,0
 	bne	2f
 	stbcx.	r10,0,r11
 	bne	1b
@@ -320,9 +320,9 @@
 	.subsection 1
 2:	cmpd	cr1,r15,r10	/* recursive lock due to mcheck/crit/etc? */
 	beq	cr1,3b		/* unlock will happen if cr1.eq = 0 */
-	lbz	r15,0(r11)
+10:	lbz	r15,0(r11)
 	cmpdi	r15,0
-	bne	2b
+	bne	10b
 	b	1b
 	.previous
 
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 962fe7b..4b32e94 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -207,7 +207,7 @@
 	unsigned int mmcr0;
 
 	/* set the PMM bit (see comment below) */
-	mtmsrd(mfmsr() | MSR_PMM);
+	mtmsr(mfmsr() | MSR_PMM);
 
 	for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
 		if (ctr[i].enabled) {
@@ -377,7 +377,7 @@
 	is_kernel = get_kernel(pc, mmcra);
 
 	/* set the PMM bit (see comment below) */
-	mtmsrd(mfmsr() | MSR_PMM);
+	mtmsr(mfmsr() | MSR_PMM);
 
 	/* Check that the SIAR  valid bit in MMCRA is set to 1. */
 	if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index d90893b..b0382f3 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -53,7 +53,7 @@
 
 	/* BHRB bits */
 	u64				bhrb_filter;	/* BHRB HW branch filter */
-	int				bhrb_users;
+	unsigned int			bhrb_users;
 	void				*bhrb_context;
 	struct	perf_branch_stack	bhrb_stack;
 	struct	perf_branch_entry	bhrb_entries[BHRB_MAX_ENTRIES];
@@ -369,8 +369,8 @@
 	if (!ppmu->bhrb_nr)
 		return;
 
+	WARN_ON_ONCE(!cpuhw->bhrb_users);
 	cpuhw->bhrb_users--;
-	WARN_ON_ONCE(cpuhw->bhrb_users < 0);
 	perf_sched_cb_dec(event->ctx->pmu);
 
 	if (!cpuhw->disabled && !cpuhw->bhrb_users) {
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index df95629..527c8b9 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -416,7 +416,7 @@
 }
 
 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
-				int nonce)
+					    int nonce)
 {
 	int nl, dl;
 	char *name = event_name(event, &nl);
@@ -444,7 +444,7 @@
 }
 
 static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
-		struct hv_24x7_event_data *event, int nonce)
+				   struct hv_24x7_event_data *event, int nonce)
 {
 	unsigned i;
 
@@ -512,7 +512,7 @@
 }
 
 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
-					size_t s2, unsigned d2)
+		       size_t s2, unsigned d2)
 {
 	int r = memord(v1, s1, v2, s2);
 
@@ -526,7 +526,7 @@
 }
 
 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
-				unsigned domain)
+			  unsigned domain)
 {
 	struct rb_node **new = &(root->rb_node), *parent = NULL;
 	struct event_uniq *data;
@@ -650,8 +650,8 @@
 #define MAX_4K (SIZE_MAX / 4096)
 
 static int create_events_from_catalog(struct attribute ***events_,
-		struct attribute ***event_descs_,
-		struct attribute ***event_long_descs_)
+				      struct attribute ***event_descs_,
+				      struct attribute ***event_long_descs_)
 {
 	unsigned long hret;
 	size_t catalog_len, catalog_page_len, event_entry_count,
@@ -1008,8 +1008,8 @@
 };
 
 static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer,
-			unsigned long ret)
+			   struct hv_24x7_data_result_buffer *result_buffer,
+			   unsigned long ret)
 {
 	struct hv_24x7_request *req;
 
@@ -1026,7 +1026,7 @@
  * Start the process for a new H_GET_24x7_DATA hcall.
  */
 static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer)
+			      struct hv_24x7_data_result_buffer *result_buffer)
 {
 
 	memset(request_buffer, 0, 4096);
@@ -1041,7 +1041,7 @@
  * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
  */
 static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer)
+			     struct hv_24x7_data_result_buffer *result_buffer)
 {
 	unsigned long ret;
 
@@ -1104,7 +1104,6 @@
 	unsigned long ret;
 	struct hv_24x7_request_buffer *request_buffer;
 	struct hv_24x7_data_result_buffer *result_buffer;
-	struct hv_24x7_result *resb;
 
 	BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
 	BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
@@ -1125,8 +1124,7 @@
 	}
 
 	/* process result from hcall */
-	resb = &result_buffer->results[0];
-	*count = be64_to_cpu(resb->elements[0].element_data[0]);
+	*count = be64_to_cpu(result_buffer->results[0].elements[0].element_data[0]);
 
 out:
 	put_cpu_var(hv_24x7_reqb);
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index 5aa3f4b..48bf38d 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,8 +7,8 @@
 	select PPC_PCI_CHOICE
 	select FSL_PCI if PCI
 	select ARCH_WANT_OPTIONAL_GPIOLIB
-	select USB_EHCI_BIG_ENDIAN_MMIO
-	select USB_EHCI_BIG_ENDIAN_DESC
+	select USB_EHCI_BIG_ENDIAN_MMIO if USB_EHCI_HCD
+	select USB_EHCI_BIG_ENDIAN_DESC if USB_EHCI_HCD
 
 config MPC5121_ADS
 	bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 84476b6..61bc851 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -66,10 +66,6 @@
 	.probe			= c293_pcie_probe,
 	.setup_arch		= c293_pcie_setup_arch,
 	.init_IRQ		= c293_pcie_pic_init,
-#ifdef CONFIG_PCI
-	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
-	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
-#endif
 	.get_irq		= mpic_get_irq,
 	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index bd839dc..b395571 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -153,6 +153,8 @@
 	"fsl,T1023RDB",
 	"fsl,T1024QDS",
 	"fsl,T1024RDB",
+	"fsl,T1040D4RDB",
+	"fsl,T1042D4RDB",
 	"fsl,T1040QDS",
 	"fsl,T1042QDS",
 	"fsl,T1040RDB",
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 2f23133..b0ac177 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -57,21 +57,6 @@
 	  Units on machines implementing the Broadband Processor
 	  Architecture.
 
-config SPU_FS_64K_LS
-	bool "Use 64K pages to map SPE local  store"
-	# we depend on PPC_MM_SLICES for now rather than selecting
-	# it because we depend on hugetlbfs hooks being present. We
-	# will fix that when the generic code has been improved to
-	# not require hijacking hugetlbfs hooks.
-	depends on SPU_FS && PPC_MM_SLICES && !PPC_64K_PAGES
-	default y
-	select PPC_HAS_HASH_64K
-	help
-	  This option causes SPE local stores to be mapped in process
-	  address spaces using 64K pages while the rest of the kernel
-	  uses 4K pages. This can improve performances of applications
-	  using multiple SPEs by lowering the TLB pressure on them.
-
 config SPU_BASE
 	bool
 	default n
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index e865d74..2d4f60c 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,7 @@
 
 	area->nid = nid;
 	area->order = order;
-	area->pages = alloc_pages_exact_node(area->nid,
+	area->pages = __alloc_pages_node(area->nid,
 						GFP_KERNEL|__GFP_THISNODE,
 						area->order);
 
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index d966bbe..5038fd5 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -239,23 +239,6 @@
 	unsigned long address = (unsigned long)vmf->virtual_address;
 	unsigned long pfn, offset;
 
-#ifdef CONFIG_SPU_FS_64K_LS
-	struct spu_state *csa = &ctx->csa;
-	int psize;
-
-	/* Check what page size we are using */
-	psize = get_slice_psize(vma->vm_mm, address);
-
-	/* Some sanity checking */
-	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
-
-	/* Wow, 64K, cool, we need to align the address though */
-	if (csa->use_big_pages) {
-		BUG_ON(vma->vm_start & 0xffff);
-		address &= ~0xfffful;
-	}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	offset = vmf->pgoff << PAGE_SHIFT;
 	if (offset >= LS_SIZE)
 		return VM_FAULT_SIGBUS;
@@ -310,22 +293,6 @@
 
 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
 {
-#ifdef CONFIG_SPU_FS_64K_LS
-	struct spu_context	*ctx = file->private_data;
-	struct spu_state	*csa = &ctx->csa;
-
-	/* Sanity check VMA alignment */
-	if (csa->use_big_pages) {
-		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
-			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
-			 vma->vm_pgoff);
-		if (vma->vm_start & 0xffff)
-			return -EINVAL;
-		if (vma->vm_pgoff & 0xf)
-			return -EINVAL;
-	}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
@@ -336,25 +303,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_SPU_FS_64K_LS
-static unsigned long spufs_get_unmapped_area(struct file *file,
-		unsigned long addr, unsigned long len, unsigned long pgoff,
-		unsigned long flags)
-{
-	struct spu_context	*ctx = file->private_data;
-	struct spu_state	*csa = &ctx->csa;
-
-	/* If not using big pages, fallback to normal MM g_u_a */
-	if (!csa->use_big_pages)
-		return current->mm->get_unmapped_area(file, addr, len,
-						      pgoff, flags);
-
-	/* Else, try to obtain a 64K pages slice */
-	return slice_get_unmapped_area(addr, len, flags,
-				       MMU_PAGE_64K, 1);
-}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 static const struct file_operations spufs_mem_fops = {
 	.open			= spufs_mem_open,
 	.release		= spufs_mem_release,
@@ -362,9 +310,6 @@
 	.write			= spufs_mem_write,
 	.llseek			= generic_file_llseek,
 	.mmap			= spufs_mem_mmap,
-#ifdef CONFIG_SPU_FS_64K_LS
-	.get_unmapped_area	= spufs_get_unmapped_area,
-#endif
 };
 
 static int spufs_ps_fault(struct vm_area_struct *vma,
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index 1470699..b847e94 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -31,7 +31,7 @@
 
 #include "spufs.h"
 
-static int spu_alloc_lscsa_std(struct spu_state *csa)
+int spu_alloc_lscsa(struct spu_state *csa)
 {
 	struct spu_lscsa *lscsa;
 	unsigned char *p;
@@ -48,7 +48,7 @@
 	return 0;
 }
 
-static void spu_free_lscsa_std(struct spu_state *csa)
+void spu_free_lscsa(struct spu_state *csa)
 {
 	/* Clear reserved bit before vfree. */
 	unsigned char *p;
@@ -61,123 +61,3 @@
 
 	vfree(csa->lscsa);
 }
-
-#ifdef CONFIG_SPU_FS_64K_LS
-
-#define SPU_64K_PAGE_SHIFT	16
-#define SPU_64K_PAGE_ORDER	(SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
-#define SPU_64K_PAGE_COUNT	(1ul << SPU_64K_PAGE_ORDER)
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
-	struct page	**pgarray;
-	unsigned char	*p;
-	int		i, j, n_4k;
-
-	/* Check availability of 64K pages */
-	if (!spu_64k_pages_available())
-		goto fail;
-
-	csa->use_big_pages = 1;
-
-	pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
-		 csa);
-
-	/* First try to allocate our 64K pages. We need 5 of them
-	 * with the current implementation. In the future, we should try
-	 * to separate the lscsa with the actual local store image, thus
-	 * allowing us to require only 4 64K pages per context
-	 */
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
-		/* XXX This is likely to fail, we should use a special pool
-		 *     similar to what hugetlbfs does.
-		 */
-		csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
-						  SPU_64K_PAGE_ORDER);
-		if (csa->lscsa_pages[i] == NULL)
-			goto fail;
-	}
-
-	pr_debug(" success ! creating vmap...\n");
-
-	/* Now we need to create a vmalloc mapping of these for the kernel
-	 * and SPU context switch code to use. Currently, we stick to a
-	 * normal kernel vmalloc mapping, which in our case will be 4K
-	 */
-	n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
-	pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
-	if (pgarray == NULL)
-		goto fail;
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
-		for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
-			/* We assume all the struct page's are contiguous
-			 * which should be hopefully the case for an order 4
-			 * allocation..
-			 */
-			pgarray[i * SPU_64K_PAGE_COUNT + j] =
-				csa->lscsa_pages[i] + j;
-	csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
-	kfree(pgarray);
-	if (csa->lscsa == NULL)
-		goto fail;
-
-	memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
-
-	/* Set LS pages reserved to allow for user-space mapping.
-	 *
-	 * XXX isn't that a bit obsolete ? I think we should just
-	 * make sure the page count is high enough. Anyway, won't harm
-	 * for now
-	 */
-	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
-		SetPageReserved(vmalloc_to_page(p));
-
-	pr_debug(" all good !\n");
-
-	return 0;
-fail:
-	pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
-	spu_free_lscsa(csa);
-	return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
-	unsigned char *p;
-	int i;
-
-	if (!csa->use_big_pages) {
-		spu_free_lscsa_std(csa);
-		return;
-	}
-	csa->use_big_pages = 0;
-
-	if (csa->lscsa == NULL)
-		goto free_pages;
-
-	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
-		ClearPageReserved(vmalloc_to_page(p));
-
-	vunmap(csa->lscsa);
-	csa->lscsa = NULL;
-
- free_pages:
-
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
-		if (csa->lscsa_pages[i])
-			__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
-}
-
-#else /* CONFIG_SPU_FS_64K_LS */
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
-	return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
-	spu_free_lscsa_std(csa);
-}
-
-#endif /* !defined(CONFIG_SPU_FS_64K_LS) */
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 7cf0df8..3bb6acb 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1394,11 +1394,19 @@
 			 */
 			if (pnv_eeh_get_pe(hose,
 				be64_to_cpu(frozen_pe_no), pe)) {
-				/* Try best to clear it */
 				pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
-					hose->global_number, frozen_pe_no);
+					hose->global_number, be64_to_cpu(frozen_pe_no));
 				pr_info("EEH: PHB location: %s\n",
 					eeh_pe_loc_get(phb_pe));
+
+				/* Dump PHB diag-data */
+				rc = opal_pci_get_phb_diag_data2(phb->opal_id,
+					phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
+				if (rc == OPAL_SUCCESS)
+					pnv_pci_dump_phb_diag_data(hose,
+							phb->diag.blob);
+
+				/* Try best to clear it */
 				opal_pci_eeh_freeze_clear(phb->opal_id,
 					frozen_pe_no,
 					OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index a8f49d3..d000f4e 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -35,9 +35,134 @@
 	struct list_head list;
 	struct OpalHMIEvent hmi_evt;
 };
+
+struct xstop_reason {
+	uint32_t xstop_reason;
+	const char *unit_failed;
+	const char *description;
+};
+
 static LIST_HEAD(opal_hmi_evt_list);
 static DEFINE_SPINLOCK(opal_hmi_evt_lock);
 
+static void print_core_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	int i;
+	static const struct xstop_reason xstop_reason[] = {
+		{ CORE_CHECKSTOP_IFU_REGFILE, "IFU",
+				"RegFile core check stop" },
+		{ CORE_CHECKSTOP_IFU_LOGIC, "IFU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_DURING_RECOV, "PC",
+				"Core checkstop during recovery" },
+		{ CORE_CHECKSTOP_ISU_REGFILE, "ISU",
+				"RegFile core check stop (mapper error)" },
+		{ CORE_CHECKSTOP_ISU_LOGIC, "ISU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_FXU_LOGIC, "FXU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_VSU_LOGIC, "VSU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE, "PC",
+				"Recovery in maintenance mode" },
+		{ CORE_CHECKSTOP_LSU_REGFILE, "LSU",
+				"RegFile core check stop" },
+		{ CORE_CHECKSTOP_PC_FWD_PROGRESS, "PC",
+				"Forward Progress Error" },
+		{ CORE_CHECKSTOP_LSU_LOGIC, "LSU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_LOGIC, "PC", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_HYP_RESOURCE, "PC",
+				"Hypervisor Resource error - core check stop" },
+		{ CORE_CHECKSTOP_PC_HANG_RECOV_FAILED, "PC",
+				"Hang Recovery Failed (core check stop)" },
+		{ CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED, "PC",
+				"Ambiguous Hang Detected (unknown source)" },
+		{ CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ, "PC",
+				"Debug Trigger Error inject" },
+		{ CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ, "PC",
+				"Hypervisor check stop via SPRC/SPRD" },
+	};
+
+	/* Validity check */
+	if (!hmi_evt->u.xstop_error.xstop_reason) {
+		printk("%s	Unknown Core check stop.\n", level);
+		return;
+	}
+
+	printk("%s	CPU PIR: %08x\n", level,
+			be32_to_cpu(hmi_evt->u.xstop_error.u.pir));
+	for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+		if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+					xstop_reason[i].xstop_reason)
+			printk("%s	[Unit: %-3s] %s\n", level,
+					xstop_reason[i].unit_failed,
+					xstop_reason[i].description);
+}
+
+static void print_nx_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	int i;
+	static const struct xstop_reason xstop_reason[] = {
+		{ NX_CHECKSTOP_SHM_INVAL_STATE_ERR, "DMA & Engine",
+					"SHM invalid state error" },
+		{ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1, "DMA & Engine",
+					"DMA invalid state error bit 15" },
+		{ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2, "DMA & Engine",
+					"DMA invalid state error bit 16" },
+		{ NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 0 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 1 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 2 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 3 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 4 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 5 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 6 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 7 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CRB_UE, "DMA & Engine",
+					"UE error on CRB(CSB address, CCB)" },
+		{ NX_CHECKSTOP_DMA_CRB_SUE, "DMA & Engine",
+					"SUE error on CRB(CSB address, CCB)" },
+		{ NX_CHECKSTOP_PBI_ISN_UE, "PowerBus Interface",
+		"CRB Kill ISN received while holding ISN with UE error" },
+	};
+
+	/* Validity check */
+	if (!hmi_evt->u.xstop_error.xstop_reason) {
+		printk("%s	Unknown NX check stop.\n", level);
+		return;
+	}
+
+	printk("%s	NX checkstop on CHIP ID: %x\n", level,
+			be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id));
+	for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+		if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+					xstop_reason[i].xstop_reason)
+			printk("%s	[Unit: %-3s] %s\n", level,
+					xstop_reason[i].unit_failed,
+					xstop_reason[i].description);
+}
+
+static void print_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	switch (hmi_evt->u.xstop_error.xstop_type) {
+	case CHECKSTOP_TYPE_CORE:
+		print_core_checkstop_reason(level, hmi_evt);
+		break;
+	case CHECKSTOP_TYPE_NX:
+		print_nx_checkstop_reason(level, hmi_evt);
+		break;
+	case CHECKSTOP_TYPE_UNKNOWN:
+		printk("%s	Unknown Malfunction Alert.\n", level);
+		break;
+	}
+}
+
 static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
 {
 	const char *level, *sevstr, *error_info;
@@ -95,6 +220,13 @@
 		(hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY))
 		printk("%s	TFMR: %016llx\n", level,
 						be64_to_cpu(hmi_evt->tfmr));
+
+	if (hmi_evt->version < OpalHMIEvt_V2)
+		return;
+
+	/* OpalHMIEvt_V2 and above provides reason for malfunction alert. */
+	if (hmi_evt->type == OpalHMI_ERROR_MALFUNC_ALERT)
+		print_checkstop_reason(level, hmi_evt);
 }
 
 static void hmi_event_handler(struct work_struct *work)
@@ -103,6 +235,8 @@
 	struct OpalHMIEvent *hmi_evt;
 	struct OpalHmiEvtNode *msg_node;
 	uint8_t disposition;
+	struct opal_msg msg;
+	int unrecoverable = 0;
 
 	spin_lock_irqsave(&opal_hmi_evt_lock, flags);
 	while (!list_empty(&opal_hmi_evt_list)) {
@@ -118,14 +252,53 @@
 
 		/*
 		 * Check if HMI event has been recovered or not. If not
-		 * then we can't continue, invoke panic.
+		 * then kernel can't continue, we need to panic.
+		 * But before we do that, display all the HMI event
+		 * available on the list and set unrecoverable flag to 1.
 		 */
 		if (disposition != OpalHMI_DISPOSITION_RECOVERED)
-			panic("Unrecoverable HMI exception");
+			unrecoverable = 1;
 
 		spin_lock_irqsave(&opal_hmi_evt_lock, flags);
 	}
 	spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
+
+	if (unrecoverable) {
+		int ret;
+
+		/* Pull all HMI events from OPAL before we panic. */
+		while (opal_get_msg(__pa(&msg), sizeof(msg)) == OPAL_SUCCESS) {
+			u32 type;
+
+			type = be32_to_cpu(msg.msg_type);
+
+			/* skip if not HMI event */
+			if (type != OPAL_MSG_HMI_EVT)
+				continue;
+
+			/* HMI event info starts from param[0] */
+			hmi_evt = (struct OpalHMIEvent *)&msg.params[0];
+			print_hmi_event_info(hmi_evt);
+		}
+
+		/*
+		 * Unrecoverable HMI exception. We need to inform BMC/OCC
+		 * about this error so that it can collect relevant data
+		 * for error analysis before rebooting.
+		 */
+		ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+			"Unrecoverable HMI exception");
+		if (ret == OPAL_UNSUPPORTED) {
+			pr_emerg("Reboot type %d not supported\n",
+						OPAL_REBOOT_PLATFORM_ERROR);
+		}
+
+		/*
+		 * Fall through and panic if opal_cec_reboot2() returns
+		 * OPAL_UNSUPPORTED.
+		 */
+		panic("Unrecoverable HMI exception");
+	}
 }
 
 static DECLARE_WORK(hmi_event_work, hmi_event_handler);
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index ac46c2c..58dc330 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -9,9 +9,12 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt)	"opal-power: "	fmt
+
 #include <linux/kernel.h>
 #include <linux/reboot.h>
 #include <linux/notifier.h>
+#include <linux/of.h>
 
 #include <asm/opal.h>
 #include <asm/machdep.h>
@@ -19,30 +22,116 @@
 #define SOFT_OFF 0x00
 #define SOFT_REBOOT 0x01
 
-static int opal_power_control_event(struct notifier_block *nb,
-				    unsigned long msg_type, void *msg)
+/* Detect EPOW event */
+static bool detect_epow(void)
 {
-	struct opal_msg *power_msg = msg;
+	u16 epow;
+	int i, rc;
+	__be16 epow_classes;
+	__be16 opal_epow_status[OPAL_SYSEPOW_MAX] = {0};
+
+	/*
+	* Check for EPOW event. Kernel sends supported EPOW classes info
+	* to OPAL. OPAL returns EPOW info along with classes present.
+	*/
+	epow_classes = cpu_to_be16(OPAL_SYSEPOW_MAX);
+	rc = opal_get_epow_status(opal_epow_status, &epow_classes);
+	if (rc != OPAL_SUCCESS) {
+		pr_err("Failed to get EPOW event information\n");
+		return false;
+	}
+
+	/* Look for EPOW events present */
+	for (i = 0; i < be16_to_cpu(epow_classes); i++) {
+		epow = be16_to_cpu(opal_epow_status[i]);
+
+		/* Filter events which do not need shutdown. */
+		if (i == OPAL_SYSEPOW_POWER)
+			epow &= ~(OPAL_SYSPOWER_CHNG | OPAL_SYSPOWER_FAIL |
+					OPAL_SYSPOWER_INCL);
+		if (epow)
+			return true;
+	}
+
+	return false;
+}
+
+/* Check for existing EPOW, DPO events */
+static bool poweroff_pending(void)
+{
+	int rc;
+	__be64 opal_dpo_timeout;
+
+	/* Check for DPO event */
+	rc = opal_get_dpo_status(&opal_dpo_timeout);
+	if (rc == OPAL_SUCCESS) {
+		pr_info("Existing DPO event detected.\n");
+		return true;
+	}
+
+	/* Check for EPOW event */
+	if (detect_epow()) {
+		pr_info("Existing EPOW event detected.\n");
+		return true;
+	}
+
+	return false;
+}
+
+/* OPAL power-control events notifier */
+static int opal_power_control_event(struct notifier_block *nb,
+					unsigned long msg_type, void *msg)
+{
 	uint64_t type;
 
-	type = be64_to_cpu(power_msg->params[0]);
-
-	switch (type) {
-	case SOFT_REBOOT:
-		pr_info("OPAL: reboot requested\n");
-		orderly_reboot();
+	switch (msg_type) {
+	case OPAL_MSG_EPOW:
+		if (detect_epow()) {
+			pr_info("EPOW msg received. Powering off system\n");
+			orderly_poweroff(true);
+		}
 		break;
-	case SOFT_OFF:
-		pr_info("OPAL: poweroff requested\n");
+	case OPAL_MSG_DPO:
+		pr_info("DPO msg received. Powering off system\n");
 		orderly_poweroff(true);
 		break;
+	case OPAL_MSG_SHUTDOWN:
+		type = be64_to_cpu(((struct opal_msg *)msg)->params[0]);
+		switch (type) {
+		case SOFT_REBOOT:
+			pr_info("Reboot requested\n");
+			orderly_reboot();
+			break;
+		case SOFT_OFF:
+			pr_info("Poweroff requested\n");
+			orderly_poweroff(true);
+			break;
+		default:
+			pr_err("Unknown power-control type %llu\n", type);
+		}
+		break;
 	default:
-		pr_err("OPAL: power control type unexpected %016llx\n", type);
+		pr_err("Unknown OPAL message type %lu\n", msg_type);
 	}
 
 	return 0;
 }
 
+/* OPAL EPOW event notifier block */
+static struct notifier_block opal_epow_nb = {
+	.notifier_call	= opal_power_control_event,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+/* OPAL DPO event notifier block */
+static struct notifier_block opal_dpo_nb = {
+	.notifier_call	= opal_power_control_event,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+/* OPAL power-control event notifier block */
 static struct notifier_block opal_power_control_nb = {
 	.notifier_call	= opal_power_control_event,
 	.next		= NULL,
@@ -51,16 +140,40 @@
 
 static int __init opal_power_control_init(void)
 {
-	int ret;
+	int ret, supported = 0;
+	struct device_node *np;
 
+	/* Register OPAL power-control events notifier */
 	ret = opal_message_notifier_register(OPAL_MSG_SHUTDOWN,
-					     &opal_power_control_nb);
-	if (ret) {
-		pr_err("%s: Can't register OPAL event notifier (%d)\n",
-				__func__, ret);
-		return ret;
+						&opal_power_control_nb);
+	if (ret)
+		pr_err("Failed to register SHUTDOWN notifier, ret = %d\n", ret);
+
+	/* Determine OPAL EPOW, DPO support */
+	np = of_find_node_by_path("/ibm,opal/epow");
+	if (np) {
+		supported = of_device_is_compatible(np, "ibm,opal-v3-epow");
+		of_node_put(np);
 	}
 
+	if (!supported)
+		return 0;
+	pr_info("OPAL EPOW, DPO support detected.\n");
+
+	/* Register EPOW event notifier */
+	ret = opal_message_notifier_register(OPAL_MSG_EPOW, &opal_epow_nb);
+	if (ret)
+		pr_err("Failed to register EPOW notifier, ret = %d\n", ret);
+
+	/* Register DPO event notifier */
+	ret = opal_message_notifier_register(OPAL_MSG_DPO, &opal_dpo_nb);
+	if (ret)
+		pr_err("Failed to register DPO notifier, ret = %d\n", ret);
+
+	/* Check for any pending EPOW or DPO events. */
+	if (poweroff_pending())
+		orderly_poweroff(true);
+
 	return 0;
 }
 machine_subsys_initcall(powernv, opal_power_control_init);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index d6a7b82..b7a464f 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -202,6 +202,7 @@
 OPAL_CALL(opal_rtc_write,			OPAL_RTC_WRITE);
 OPAL_CALL(opal_cec_power_down,			OPAL_CEC_POWER_DOWN);
 OPAL_CALL(opal_cec_reboot,			OPAL_CEC_REBOOT);
+OPAL_CALL(opal_cec_reboot2,			OPAL_CEC_REBOOT2);
 OPAL_CALL(opal_read_nvram,			OPAL_READ_NVRAM);
 OPAL_CALL(opal_write_nvram,			OPAL_WRITE_NVRAM);
 OPAL_CALL(opal_handle_interrupt,		OPAL_HANDLE_INTERRUPT);
@@ -249,6 +250,7 @@
 OPAL_CALL(opal_pci_mask_pe_error,		OPAL_PCI_MASK_PE_ERROR);
 OPAL_CALL(opal_set_slot_led_status,		OPAL_SET_SLOT_LED_STATUS);
 OPAL_CALL(opal_get_epow_status,			OPAL_GET_EPOW_STATUS);
+OPAL_CALL(opal_get_dpo_status,			OPAL_GET_DPO_STATUS);
 OPAL_CALL(opal_set_system_attention_led,	OPAL_SET_SYSTEM_ATTENTION_LED);
 OPAL_CALL(opal_pci_next_error,			OPAL_PCI_NEXT_ERROR);
 OPAL_CALL(opal_pci_poll,			OPAL_PCI_POLL);
@@ -297,3 +299,5 @@
 OPAL_CALL(opal_flash_write,			OPAL_FLASH_WRITE);
 OPAL_CALL(opal_flash_erase,			OPAL_FLASH_ERASE);
 OPAL_CALL(opal_prd_msg,				OPAL_PRD_MSG);
+OPAL_CALL(opal_leds_get_ind,			OPAL_LEDS_GET_INDICATOR);
+OPAL_CALL(opal_leds_set_ind,			OPAL_LEDS_SET_INDICATOR);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index f084afa..230f3a7 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -441,6 +441,7 @@
 int opal_machine_check(struct pt_regs *regs)
 {
 	struct machine_check_event evt;
+	int ret;
 
 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
 		return 0;
@@ -455,6 +456,40 @@
 
 	if (opal_recover_mce(regs, &evt))
 		return 1;
+
+	/*
+	 * Unrecovered machine check, we are heading to panic path.
+	 *
+	 * We may have hit this MCE in very early stage of kernel
+	 * initialization even before opal-prd has started running. If
+	 * this is the case then this MCE error may go un-noticed or
+	 * un-analyzed if we go down panic path. We need to inform
+	 * BMC/OCC about this error so that they can collect relevant
+	 * data for error analysis before rebooting.
+	 * Use opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR) to do so.
+	 * This function may not return on BMC based system.
+	 */
+	ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+			"Unrecoverable Machine Check exception");
+	if (ret == OPAL_UNSUPPORTED) {
+		pr_emerg("Reboot type %d not supported\n",
+					OPAL_REBOOT_PLATFORM_ERROR);
+	}
+
+	/*
+	 * We reached here. There can be three possibilities:
+	 * 1. We are running on a firmware level that do not support
+	 *    opal_cec_reboot2()
+	 * 2. We are running on a firmware level that do not support
+	 *    OPAL_REBOOT_PLATFORM_ERROR reboot type.
+	 * 3. We are running on FSP based system that does not need opal
+	 *    to trigger checkstop explicitly for error analysis. The FSP
+	 *    PRD component would have already got notified about this
+	 *    error through other channels.
+	 *
+	 * In any case, let us just fall through. We anyway heading
+	 * down to panic path.
+	 */
 	return 0;
 }
 
@@ -648,7 +683,7 @@
 
 static int __init opal_init(void)
 {
-	struct device_node *np, *consoles;
+	struct device_node *np, *consoles, *leds;
 	int rc;
 
 	opal_node = of_find_node_by_path("/ibm,opal");
@@ -689,6 +724,13 @@
 	/* Setup a heatbeat thread if requested by OPAL */
 	opal_init_heartbeat();
 
+	/* Create leds platform devices */
+	leds = of_find_node_by_path("/ibm,opal/leds");
+	if (leds) {
+		of_platform_device_create(leds, "opal_leds", NULL);
+		of_node_put(leds);
+	}
+
 	/* Create "opal" kobject under /sys/firmware */
 	rc = opal_sysfs_init();
 	if (rc == 0) {
@@ -841,3 +883,6 @@
 EXPORT_SYMBOL_GPL(opal_tpo_read);
 EXPORT_SYMBOL_GPL(opal_tpo_write);
 EXPORT_SYMBOL_GPL(opal_i2c_request);
+/* Export these symbols for PowerNV LED class driver */
+EXPORT_SYMBOL_GPL(opal_leds_get_ind);
+EXPORT_SYMBOL_GPL(opal_leds_set_ind);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 85cbc96..2927cd5 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -140,11 +140,9 @@
 		return;
 	}
 
-	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
-		pr_warn("%s: PE %d was assigned on PHB#%x\n",
-			__func__, pe_no, phb->hose->global_number);
-		return;
-	}
+	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
+		pr_debug("%s: PE %d was reserved on PHB#%x\n",
+			 __func__, pe_no, phb->hose->global_number);
 
 	phb->ioda.pe_array[pe_no].phb = phb;
 	phb->ioda.pe_array[pe_no].pe_number = pe_no;
@@ -231,61 +229,60 @@
 	return -EIO;
 }
 
-static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
+static void pnv_ioda2_reserve_dev_m64_pe(struct pci_dev *pdev,
+					 unsigned long *pe_bitmap)
 {
-	resource_size_t sgsz = phb->ioda.m64_segsize;
-	struct pci_dev *pdev;
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct resource *r;
-	int base, step, i;
+	resource_size_t base, sgsz, start, end;
+	int segno, i;
 
-	/*
-	 * Root bus always has full M64 range and root port has
-	 * M64 range used in reality. So we're checking root port
-	 * instead of root bus.
-	 */
-	list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
-		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
-			r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
-			if (!r->parent ||
-			    !pnv_pci_is_mem_pref_64(r->flags))
-				continue;
+	base = phb->ioda.m64_base;
+	sgsz = phb->ioda.m64_segsize;
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		r = &pdev->resource[i];
+		if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
+			continue;
 
-			base = (r->start - phb->ioda.m64_base) / sgsz;
-			for (step = 0; step < resource_size(r) / sgsz; step++)
-				pnv_ioda_reserve_pe(phb, base + step);
+		start = _ALIGN_DOWN(r->start - base, sgsz);
+		end = _ALIGN_UP(r->end - base, sgsz);
+		for (segno = start / sgsz; segno < end / sgsz; segno++) {
+			if (pe_bitmap)
+				set_bit(segno, pe_bitmap);
+			else
+				pnv_ioda_reserve_pe(phb, segno);
 		}
 	}
 }
 
-static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
-				 struct pci_bus *bus, int all)
+static void pnv_ioda2_reserve_m64_pe(struct pci_bus *bus,
+				     unsigned long *pe_bitmap,
+				     bool all)
 {
-	resource_size_t segsz = phb->ioda.m64_segsize;
 	struct pci_dev *pdev;
-	struct resource *r;
+
+	list_for_each_entry(pdev, &bus->devices, bus_list) {
+		pnv_ioda2_reserve_dev_m64_pe(pdev, pe_bitmap);
+
+		if (all && pdev->subordinate)
+			pnv_ioda2_reserve_m64_pe(pdev->subordinate,
+						 pe_bitmap, all);
+	}
+}
+
+static int pnv_ioda2_pick_m64_pe(struct pci_bus *bus, bool all)
+{
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct pnv_ioda_pe *master_pe, *pe;
 	unsigned long size, *pe_alloc;
-	bool found;
-	int start, i, j;
+	int i;
 
 	/* Root bus shouldn't use M64 */
 	if (pci_is_root_bus(bus))
 		return IODA_INVALID_PE;
 
-	/* We support only one M64 window on each bus */
-	found = false;
-	pci_bus_for_each_resource(bus, r, i) {
-		if (r && r->parent &&
-		    pnv_pci_is_mem_pref_64(r->flags)) {
-			found = true;
-			break;
-		}
-	}
-
-	/* No M64 window found ? */
-	if (!found)
-		return IODA_INVALID_PE;
-
 	/* Allocate bitmap */
 	size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
 	pe_alloc = kzalloc(size, GFP_KERNEL);
@@ -295,35 +292,8 @@
 		return IODA_INVALID_PE;
 	}
 
-	/*
-	 * Figure out reserved PE numbers by the PE
-	 * the its child PEs.
-	 */
-	start = (r->start - phb->ioda.m64_base) / segsz;
-	for (i = 0; i < resource_size(r) / segsz; i++)
-		set_bit(start + i, pe_alloc);
-
-	if (all)
-		goto done;
-
-	/*
-	 * If the PE doesn't cover all subordinate buses,
-	 * we need subtract from reserved PEs for children.
-	 */
-	list_for_each_entry(pdev, &bus->devices, bus_list) {
-		if (!pdev->subordinate)
-			continue;
-
-		pci_bus_for_each_resource(pdev->subordinate, r, i) {
-			if (!r || !r->parent ||
-			    !pnv_pci_is_mem_pref_64(r->flags))
-				continue;
-
-			start = (r->start - phb->ioda.m64_base) / segsz;
-			for (j = 0; j < resource_size(r) / segsz ; j++)
-				clear_bit(start + j, pe_alloc);
-                }
-        }
+	/* Figure out reserved PE numbers by the PE */
+	pnv_ioda2_reserve_m64_pe(bus, pe_alloc, all);
 
 	/*
 	 * the current bus might not own M64 window and that's all
@@ -339,7 +309,6 @@
 	 * Figure out the master PE and put all slave PEs to master
 	 * PE's list to form compound PE.
 	 */
-done:
 	master_pe = NULL;
 	i = -1;
 	while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
@@ -653,7 +622,7 @@
 		pdev = pe->pdev->bus->self;
 #ifdef CONFIG_PCI_IOV
 	else if (pe->flags & PNV_IODA_PE_VF)
-		pdev = pe->parent_dev->bus->self;
+		pdev = pe->parent_dev;
 #endif /* CONFIG_PCI_IOV */
 	while (pdev) {
 		struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -732,7 +701,7 @@
 		parent = parent->bus->self;
 	}
 
-	opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
+	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
 				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
 
 	/* Disassociate PE in PELT */
@@ -946,8 +915,9 @@
 		res2 = *res;
 		res->start += size * offset;
 
-		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
-			 i, &res2, res, num_vfs, offset);
+		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
+			 i, &res2, res, (offset > 0) ? "En" : "Dis",
+			 num_vfs, offset);
 		pci_update_resource(dev, i + PCI_IOV_RESOURCES);
 	}
 	return 0;
@@ -1050,7 +1020,7 @@
  * subordinate PCI devices and buses. The second type of PE is normally
  * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
  */
-static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
+static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
 {
 	struct pci_controller *hose = pci_bus_to_host(bus);
 	struct pnv_phb *phb = hose->private_data;
@@ -1059,7 +1029,7 @@
 
 	/* Check if PE is determined by M64 */
 	if (phb->pick_m64_pe)
-		pe_num = phb->pick_m64_pe(phb, bus, all);
+		pe_num = phb->pick_m64_pe(bus, all);
 
 	/* The PE number isn't pinned by M64 */
 	if (pe_num == IODA_INVALID_PE)
@@ -1117,12 +1087,12 @@
 {
 	struct pci_dev *dev;
 
-	pnv_ioda_setup_bus_PE(bus, 0);
+	pnv_ioda_setup_bus_PE(bus, false);
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		if (dev->subordinate) {
 			if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
-				pnv_ioda_setup_bus_PE(dev->subordinate, 1);
+				pnv_ioda_setup_bus_PE(dev->subordinate, true);
 			else
 				pnv_ioda_setup_PEs(dev->subordinate);
 		}
@@ -1147,7 +1117,7 @@
 
 		/* M64 layout might affect PE allocation */
 		if (phb->reserve_m64_pe)
-			phb->reserve_m64_pe(phb);
+			phb->reserve_m64_pe(hose->bus, NULL, true);
 
 		pnv_ioda_setup_PEs(hose->bus);
 	}
@@ -1590,6 +1560,7 @@
 
 	pe = &phb->ioda.pe_array[pdn->pe_number];
 	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
+	set_dma_offset(&pdev->dev, pe->tce_bypass_base);
 	set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
 	/*
 	 * Note: iommu_add_device() will fail here as
@@ -1620,19 +1591,18 @@
 	if (bypass) {
 		dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
 		set_dma_ops(&pdev->dev, &dma_direct_ops);
-		set_dma_offset(&pdev->dev, pe->tce_bypass_base);
 	} else {
 		dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
 		set_dma_ops(&pdev->dev, &dma_iommu_ops);
-		set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
 	}
 	*pdev->dev.dma_mask = dma_mask;
 	return 0;
 }
 
-static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
-					      struct pci_dev *pdev)
+static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
 {
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct pci_dn *pdn = pci_get_pdn(pdev);
 	struct pnv_ioda_pe *pe;
 	u64 end, mask;
@@ -1659,6 +1629,7 @@
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
+		set_dma_offset(&dev->dev, pe->tce_bypass_base);
 		iommu_add_device(&dev->dev);
 
 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -3057,6 +3028,7 @@
        .window_alignment = pnv_pci_window_alignment,
        .reset_secondary_bus = pnv_pci_reset_secondary_bus,
        .dma_set_mask = pnv_pci_ioda_dma_set_mask,
+       .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
        .shutdown = pnv_pci_ioda_shutdown,
 };
 
@@ -3203,7 +3175,6 @@
 
 	/* Setup TCEs */
 	phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
-	phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
 
 	/* Setup MSI support */
 	pnv_pci_init_ioda_msis(phb);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index bc6d4e0..9b2480b 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -761,17 +761,6 @@
 		phb->dma_dev_setup(phb, pdev);
 }
 
-u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
-	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
-	struct pnv_phb *phb = hose->private_data;
-
-	if (phb && phb->dma_get_required_mask)
-		return phb->dma_get_required_mask(phb, pdev);
-
-	return __dma_get_required_mask(&pdev->dev);
-}
-
 void pnv_pci_shutdown(void)
 {
 	struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8ef2d28..c8ff50e 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -105,13 +105,12 @@
 			 unsigned int hwirq, unsigned int virq,
 			 unsigned int is_64, struct msi_msg *msg);
 	void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
-	u64 (*dma_get_required_mask)(struct pnv_phb *phb,
-				     struct pci_dev *pdev);
 	void (*fixup_phb)(struct pci_controller *hose);
 	u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
 	int (*init_m64)(struct pnv_phb *phb);
-	void (*reserve_m64_pe)(struct pnv_phb *phb);
-	int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
+	void (*reserve_m64_pe)(struct pci_bus *bus,
+			       unsigned long *pe_bitmap, bool all);
+	int (*pick_m64_pe)(struct pci_bus *bus, bool all);
 	int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
 	void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
 	int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 9269e30..6dbc0a1 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -12,15 +12,9 @@
 #ifdef CONFIG_PCI
 extern void pnv_pci_init(void);
 extern void pnv_pci_shutdown(void);
-extern u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev);
 #else
 static inline void pnv_pci_init(void) { }
 static inline void pnv_pci_shutdown(void) { }
-
-static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
-	return 0;
-}
 #endif
 
 extern u32 pnv_get_supported_cpuidle_states(void);
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 6eb808f..5dcbdea 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -128,7 +128,7 @@
 
 	pr_info_once("Registering arch random hook.\n");
 
-	ppc_md.get_random_long = powernv_get_random_long;
+	ppc_md.get_random_seed = powernv_get_random_long;
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 53737e0..685b3cb 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -165,14 +165,6 @@
 {
 }
 
-static u64 pnv_dma_get_required_mask(struct device *dev)
-{
-	if (dev_is_pci(dev))
-		return pnv_pci_dma_get_required_mask(to_pci_dev(dev));
-
-	return __dma_get_required_mask(dev);
-}
-
 static void pnv_shutdown(void)
 {
 	/* Let the PCI code clear up IODA tables */
@@ -243,6 +235,13 @@
 	} else {
 		/* Primary waits for the secondaries to have reached OPAL */
 		pnv_kexec_wait_secondaries_down();
+
+		/*
+		 * We might be running as little-endian - now that interrupts
+		 * are disabled, reset the HILE bit to big-endian so we don't
+		 * take interrupts in the wrong endian later
+		 */
+		opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
 	}
 }
 #endif /* CONFIG_KEXEC */
@@ -314,7 +313,6 @@
 	.machine_shutdown	= pnv_shutdown,
 	.power_save             = power7_idle,
 	.calibrate_decr		= generic_calibrate_decr,
-	.dma_get_required_mask	= pnv_dma_get_required_mask,
 #ifdef CONFIG_KEXEC
 	.kexec_cpu_down		= pnv_kexec_cpu_down,
 #endif
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index f60f80a..503a73f 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -190,7 +190,7 @@
 
 	hid0 = mfspr(SPRN_HID0);
 	hid0 &= ~HID0_POWER8_DYNLPARDIS;
-	mtspr(SPRN_HID0, hid0);
+	update_power8_hid0(hid0);
 	update_hid_in_slw(hid0);
 
 	while (mfspr(SPRN_HID0) & mask)
@@ -227,7 +227,7 @@
 	/* Write new mode */
 	hid0  = mfspr(SPRN_HID0);
 	hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
-	mtspr(SPRN_HID0, hid0);
+	update_power8_hid0(hid0);
 	update_hid_in_slw(hid0);
 
 	/* Wait for it to happen */
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 0ced387..e9ff44c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -92,13 +92,12 @@
 		return NULL;
 
 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
-	new_prop->value = kmalloc(prop->length, GFP_KERNEL);
+	new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
 	if (!new_prop->name || !new_prop->value) {
 		dlpar_free_drconf_property(new_prop);
 		return NULL;
 	}
 
-	memcpy(new_prop->value, prop->value, prop->length);
 	new_prop->length = prop->length;
 
 	/* Convert the property to cpu endian-ness */
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 10510de..0946b98 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1253,11 +1253,10 @@
 		}
 	}
 
-	/* fall back on iommu ops, restore table pointer with ops */
+	/* fall back on iommu ops */
 	if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
 		dev_info(dev, "Restoring 32-bit DMA via iommu\n");
 		set_dma_ops(dev, &dma_iommu_ops);
-		pci_dma_dev_setup_pSeriesLP(pdev);
 	}
 
 check_mask:
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 02e4a17..3b6647e 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -189,7 +189,8 @@
 	int state;
 	int critical;
 
-	status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
+	status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
+				      &state);
 
 	if (state > 3)
 		critical = 1;		/* Time Critical */
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index e096087..31ca557 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -38,7 +38,7 @@
 
 	pr_info("Registering arch random hook.\n");
 
-	ppc_md.get_random_long = pseries_get_random_long;
+	ppc_md.get_random_seed = pseries_get_random_long;
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index df6a704..39a74fa 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -254,19 +254,26 @@
 static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
 {
 	struct of_reconfig_data *rd = data;
-	struct device_node *np = rd->dn;
-	struct pci_dn *pci = NULL;
+	struct device_node *parent, *np = rd->dn;
+	struct pci_dn *pdn;
 	int err = NOTIFY_OK;
 
 	switch (action) {
 	case OF_RECONFIG_ATTACH_NODE:
-		pci = np->parent->data;
-		if (pci) {
-			update_dn_pci_info(np, pci->phb);
-
-			/* Create EEH device for the OF node */
-			eeh_dev_init(PCI_DN(np), pci->phb);
+		parent = of_get_parent(np);
+		pdn = parent ? PCI_DN(parent) : NULL;
+		if (pdn) {
+			/* Create pdn and EEH device */
+			update_dn_pci_info(np, pdn->phb);
+			eeh_dev_init(PCI_DN(np), pdn->phb);
 		}
+
+		of_node_put(parent);
+		break;
+	case OF_RECONFIG_DETACH_NODE:
+		pdn = PCI_DN(np);
+		if (pdn)
+			list_del(&pdn->list);
 		break;
 	default:
 		err = NOTIFY_DONE;
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index f86250c..d2b79bc 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -141,13 +141,14 @@
  */
 static long
 axon_ram_direct_access(struct block_device *device, sector_t sector,
-		       void **kaddr, unsigned long *pfn, long size)
+		       void __pmem **kaddr, unsigned long *pfn)
 {
 	struct axon_ram_bank *bank = device->bd_disk->private_data;
 	loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
+	void *addr = (void *)(bank->ph_addr + offset);
 
-	*kaddr = (void *)(bank->ph_addr + offset);
-	*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+	*kaddr = (void __pmem *)addr;
+	*pfn = virt_to_phys(addr) >> PAGE_SHIFT;
 
 	return bank->size - offset;
 }
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 4f78695..e2ea519 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -147,7 +147,7 @@
 	spin_lock_irqsave(&cpm_muram_lock, flags);
 	cpm_muram_info.alignment = align;
 	start = rh_alloc(&cpm_muram_info, size, "commproc");
-	memset(cpm_muram_addr(start), 0, size);
+	memset_io(cpm_muram_addr(start), 0, size);
 	spin_unlock_irqrestore(&cpm_muram_lock, flags);
 
 	return start;
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 90bcdfe..b734863 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -313,20 +313,11 @@
 	set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
 }
 
-static void dma_dev_setup_dart(struct device *dev)
-{
-	/* We only have one iommu table on the mac for now, which makes
-	 * things simple. Setup all PCI devices to point to this table
-	 */
-	if (get_dma_ops(dev) == &dma_direct_ops)
-		set_dma_offset(dev, DART_U4_BYPASS_BASE);
-	else
-		set_iommu_table_base(dev, &iommu_table_dart);
-}
-
 static void pci_dma_dev_setup_dart(struct pci_dev *dev)
 {
-	dma_dev_setup_dart(&dev->dev);
+	if (dart_is_u4)
+		set_dma_offset(&dev->dev, DART_U4_BYPASS_BASE);
+	set_iommu_table_base(&dev->dev, &iommu_table_dart);
 }
 
 static void pci_dma_bus_setup_dart(struct pci_bus *bus)
@@ -370,7 +361,6 @@
 		dev_info(dev, "Using 32-bit DMA via iommu\n");
 		set_dma_ops(dev, &dma_iommu_ops);
 	}
-	dma_dev_setup_dart(dev);
 
 	*dev->dma_mask = dma_mask;
 	return 0;
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index af3c144..52a93dc 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -132,7 +132,7 @@
 	struct pci_controller *phb;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (IS_ERR(mem)) {
+	if (!mem) {
 		dev_err(dev, "Unable to get mmio space\n");
 		return -EINVAL;
 	}
@@ -157,7 +157,7 @@
 		goto out;
 
 	ppc4xx_hsta_msi.irq_map = kmalloc(sizeof(int) * irq_count, GFP_KERNEL);
-	if (IS_ERR(ppc4xx_hsta_msi.irq_map)) {
+	if (!ppc4xx_hsta_msi.irq_map) {
 		ret = -ENOMEM;
 		goto out1;
 	}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index e599259..6ef1231 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1987,7 +1987,6 @@
 			case '^':
 				adrs -= size;
 				break;
-				break;
 			case '/':
 				if (nslash > 0)
 					adrs -= 1 << nslash;
@@ -2731,7 +2730,7 @@
 void dump_segments(void)
 {
 	int i;
-	unsigned long esid,vsid,valid;
+	unsigned long esid,vsid;
 	unsigned long llp;
 
 	printf("SLB contents of cpu 0x%x\n", smp_processor_id());
@@ -2739,10 +2738,9 @@
 	for (i = 0; i < mmu_slb_size; i++) {
 		asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
 		asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));
-		valid = (esid & SLB_ESID_V);
-		if (valid | esid | vsid) {
+		if (esid || vsid) {
 			printf("%02d %016lx %016lx", i, esid, vsid);
-			if (valid) {
+			if (esid & SLB_ESID_V) {
 				llp = vsid & SLB_VSID_LLP;
 				if (vsid & SLB_VSID_B_1T) {
 					printf("  1T  ESID=%9lx  VSID=%13lx LLP:%3lx \n",
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index adbe380..117fa5c 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -27,6 +27,7 @@
 #define __ATOMIC_OR	"lao"
 #define __ATOMIC_AND	"lan"
 #define __ATOMIC_ADD	"laa"
+#define __ATOMIC_XOR	"lax"
 #define __ATOMIC_BARRIER "bcr	14,0\n"
 
 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -49,6 +50,7 @@
 #define __ATOMIC_OR	"or"
 #define __ATOMIC_AND	"nr"
 #define __ATOMIC_ADD	"ar"
+#define __ATOMIC_XOR	"xr"
 #define __ATOMIC_BARRIER "\n"
 
 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -118,15 +120,17 @@
 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
+#define ATOMIC_OP(op, OP)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);	\
 }
 
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
-}
+ATOMIC_OP(and, AND)
+ATOMIC_OP(or, OR)
+ATOMIC_OP(xor, XOR)
+
+#undef ATOMIC_OP
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
@@ -167,6 +171,7 @@
 #define __ATOMIC64_OR	"laog"
 #define __ATOMIC64_AND	"lang"
 #define __ATOMIC64_ADD	"laag"
+#define __ATOMIC64_XOR	"laxg"
 #define __ATOMIC64_BARRIER "bcr	14,0\n"
 
 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -189,6 +194,7 @@
 #define __ATOMIC64_OR	"ogr"
 #define __ATOMIC64_AND	"ngr"
 #define __ATOMIC64_ADD	"agr"
+#define __ATOMIC64_XOR	"xgr"
 #define __ATOMIC64_BARRIER "\n"
 
 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -247,16 +253,6 @@
 	__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
 }
 
-static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
-{
-	__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
-}
-
-static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
-{
-	__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
-}
-
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
 static inline long long atomic64_cmpxchg(atomic64_t *v,
@@ -270,6 +266,17 @@
 	return old;
 }
 
+#define ATOMIC64_OP(op, OP)						\
+static inline void atomic64_##op(long i, atomic64_t *v)			\
+{									\
+	__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);	\
+}
+
+ATOMIC64_OP(and, AND)
+ATOMIC64_OP(or, OR)
+ATOMIC64_OP(xor, XOR)
+
+#undef ATOMIC64_OP
 #undef __ATOMIC64_LOOP
 
 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index e6f8615..d48fe01 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -42,12 +42,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 69972b7..7f9fd5e 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -12,14 +12,29 @@
  * We use a brcl 0,2 instruction for jump labels at compile time so it
  * can be easily distinguished from a hotpatch generated instruction.
  */
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("0:	brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
 		".pushsection __jump_table, \"aw\"\n"
 		".balign 8\n"
 		".quad 0b, %l[label], %0\n"
 		".popsection\n"
-		: : "X" (key) : : label);
+		: : "X" (&((char *)key)[branch]) : : label);
+
+	return false;
+label:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("0:	brcl 15, %l[label]\n"
+		".pushsection __jump_table, \"aw\"\n"
+		".balign 8\n"
+		".quad 0b, %l[label], %0\n"
+		".popsection\n"
+		: : "X" (&((char *)key)[branch]) : : label);
+
 	return false;
 label:
 	return true;
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index c9dac21..083b05f 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -61,7 +61,7 @@
 {
 	struct insn old, new;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		jump_label_make_nop(entry, &old);
 		jump_label_make_branch(entry, &new);
 	} else {
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 52524b9..017c3a9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -378,7 +378,7 @@
 	 * increase the "sequence" counter to avoid the race of an
 	 * etr event and the complete recovery against get_sync_clock.
 	 */
-	atomic_clear_mask(0x80000000, sw_ptr);
+	atomic_andnot(0x80000000, sw_ptr);
 	atomic_inc(sw_ptr);
 }
 
@@ -389,7 +389,7 @@
 static void enable_sync_clock(void)
 {
 	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
-	atomic_set_mask(0x80000000, sw_ptr);
+	atomic_or(0x80000000, sw_ptr);
 }
 
 /*
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b277d50..5c2c169 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -173,20 +173,20 @@
 
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 	set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 	clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
-			  &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+		    &vcpu->arch.sie_block->cpuflags);
 	vcpu->arch.sie_block->lctl = 0x0000;
 	vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
 
@@ -199,7 +199,7 @@
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
 {
-	atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
 }
 
 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -928,7 +928,7 @@
 	spin_unlock(&li->lock);
 
 	/* clear pending external calls set by sigp interpretation facility */
-	atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
+	atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
 	vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
 }
 
@@ -1026,7 +1026,7 @@
 
 	li->irq.ext = irq->u.ext;
 	set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1041,7 +1041,7 @@
 		/* another external call is pending */
 		return -EBUSY;
 	}
-	atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
 	return 0;
 }
 
@@ -1067,7 +1067,7 @@
 	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
 		return -EBUSY;
 	*extcall = irq->u.extcall;
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1139,7 +1139,7 @@
 
 	set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
 	set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1183,7 +1183,7 @@
 				   0, 0);
 
 	set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1196,7 +1196,7 @@
 				   0, 0);
 
 	set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1375,13 +1375,13 @@
 	spin_lock(&li->lock);
 	switch (type) {
 	case KVM_S390_MCHK:
-		atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+		atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
 		break;
 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-		atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+		atomic_or(CPUSTAT_IO_INT, li->cpuflags);
 		break;
 	default:
-		atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+		atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 		break;
 	}
 	spin_unlock(&li->lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 98df53c..c91eb94 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1333,12 +1333,12 @@
 	save_access_regs(vcpu->arch.host_acrs);
 	restore_access_regs(vcpu->run->s.regs.acrs);
 	gmap_enable(vcpu->arch.gmap);
-	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 	gmap_disable(vcpu->arch.gmap);
 
 	save_fpu_regs();
@@ -1443,9 +1443,9 @@
 						    CPUSTAT_STOPPED);
 
 	if (test_kvm_facility(vcpu->kvm, 78))
-		atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
 	else if (test_kvm_facility(vcpu->kvm, 8))
-		atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
 
 	kvm_s390_vcpu_setup_model(vcpu);
 
@@ -1557,24 +1557,24 @@
 
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 	exit_sie(vcpu);
 }
 
 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 }
 
 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
 	exit_sie(vcpu);
 }
 
 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
 }
 
 /*
@@ -1583,7 +1583,7 @@
  * return immediately. */
 void exit_sie(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
 		cpu_relax();
 }
@@ -1807,19 +1807,19 @@
 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
 		vcpu->guest_debug = dbg->control;
 		/* enforce guest PER */
-		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 
 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
 			rc = kvm_s390_import_bp_data(vcpu, dbg);
 	} else {
-		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 		vcpu->arch.guestdbg.last_bp = 0;
 	}
 
 	if (rc) {
 		vcpu->guest_debug = 0;
 		kvm_s390_clear_bp_data(vcpu);
-		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 	}
 
 	return rc;
@@ -1894,7 +1894,7 @@
 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
 		if (!ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
-			atomic_set_mask(CPUSTAT_IBS,
+			atomic_or(CPUSTAT_IBS,
 					&vcpu->arch.sie_block->cpuflags);
 		}
 		goto retry;
@@ -1903,7 +1903,7 @@
 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
 		if (ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
-			atomic_clear_mask(CPUSTAT_IBS,
+			atomic_andnot(CPUSTAT_IBS,
 					  &vcpu->arch.sie_block->cpuflags);
 		}
 		goto retry;
@@ -2419,7 +2419,7 @@
 		__disable_ibs_on_all_vcpus(vcpu->kvm);
 	}
 
-	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	/*
 	 * Another VCPU might have used IBS while we were offline.
 	 * Let's play safe and flush the VCPU at startup.
@@ -2445,7 +2445,7 @@
 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
 	kvm_s390_clear_stop_irq(vcpu);
 
-	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	__disable_ibs_on_vcpu(vcpu);
 
 	for (i = 0; i < online_vcpus; i++) {
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0d002a7..ae4de55 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,7 +15,7 @@
 #include <asm/mmu_context.h>
 #include <asm/facility.h>
 
-static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+static DEFINE_STATIC_KEY_FALSE(have_mvcos);
 
 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
 						 unsigned long size)
@@ -104,7 +104,7 @@
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_from_user_mvcos(to, from, n);
 	return copy_from_user_mvcp(to, from, n);
 }
@@ -177,7 +177,7 @@
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_to_user_mvcos(to, from, n);
 	return copy_to_user_mvcs(to, from, n);
 }
@@ -240,7 +240,7 @@
 
 unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_in_user_mvcos(to, from, n);
 	return copy_in_user_mvc(to, from, n);
 }
@@ -312,7 +312,7 @@
 
 unsigned long __clear_user(void __user *to, unsigned long size)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 			return clear_user_mvcos(to, size);
 	return clear_user_xc(to, size);
 }
@@ -373,7 +373,7 @@
 static int __init uaccess_init(void)
 {
 	if (test_facility(27))
-		static_key_slow_inc(&have_mvcos);
+		static_branch_enable(&have_mvcos);
 	return 0;
 }
 early_initcall(uaccess_init);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 2963b56..c3c07d3 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -169,7 +169,7 @@
 #endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 {
 	unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
 	unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 97a5fda..b94df40 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -48,47 +48,12 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	int tmp;
-	unsigned int _mask = ~mask;
-
-	__asm__ __volatile__ (
-		"   .align 2              \n\t"
-		"   mova    1f,   r0      \n\t" /* r0 = end point */
-		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
-		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
-		"   mov.l  @%1,   %0      \n\t" /* load  old value */
-		"   and     %2,   %0      \n\t" /* add */
-		"   mov.l   %0,   @%1     \n\t" /* store new value */
-		"1: mov     r1,   r15     \n\t" /* LOGOUT */
-		: "=&r" (tmp),
-		  "+r"  (v)
-		: "r"   (_mask)
-		: "memory" , "r0", "r1");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	int tmp;
-
-	__asm__ __volatile__ (
-		"   .align 2              \n\t"
-		"   mova    1f,   r0      \n\t" /* r0 = end point */
-		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
-		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
-		"   mov.l  @%1,   %0      \n\t" /* load  old value */
-		"   or      %2,   %0      \n\t" /* or */
-		"   mov.l   %0,   @%1     \n\t" /* store new value */
-		"1: mov     r1,   r15     \n\t" /* LOGOUT */
-		: "=&r" (tmp),
-		  "+r"  (v)
-		: "r"   (mask)
-		: "memory" , "r0", "r1");
-}
-
 #endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 61d1075..23fcdad 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -37,27 +37,12 @@
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	raw_local_irq_save(flags);
-	v->counter &= ~mask;
-	raw_local_irq_restore(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	raw_local_irq_save(flags);
-	v->counter |= mask;
-	raw_local_irq_restore(flags);
-}
-
 #endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 8575dcc..33d34b1 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -52,37 +52,12 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__ (
-"1:	movli.l @%2, %0		! atomic_clear_mask	\n"
-"	and	%1, %0					\n"
-"	movco.l	%0, @%2					\n"
-"	bf	1b					\n"
-	: "=&z" (tmp)
-	: "r" (~mask), "r" (&v->counter)
-	: "t");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__ (
-"1:	movli.l @%2, %0		! atomic_set_mask	\n"
-"	or	%1, %0					\n"
-"	movco.l	%0, @%2					\n"
-"	bf	1b					\n"
-	: "=&z" (tmp)
-	: "r" (mask), "r" (&v->counter)
-	: "t");
-}
-
 #endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
index e79fb6e..1f157b8 100644
--- a/arch/sh/include/asm/ftrace.h
+++ b/arch/sh/include/asm/ftrace.h
@@ -9,7 +9,7 @@
 #ifndef __ASSEMBLY__
 extern void mcount(void);
 
-#define MCOUNT_ADDR		((long)(mcount))
+#define MCOUNT_ADDR		((unsigned long)(mcount))
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #define CALL_ADDR		((long)(ftrace_call))
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 93ec906..3280a6b 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -342,6 +342,7 @@
 {
 	return __ioremap_mode(offset, size, PAGE_KERNEL);
 }
+#define ioremap_cache ioremap_cache
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
 static inline void __iomem *
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index c187b95..f27c618 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -343,7 +343,7 @@
 	CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]),
 	CLKDEV_DEV_ID("sh_fsi.0", &mstp_clks[HWBLK_SPU]),
 	CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
-	CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
+	CLKDEV_DEV_ID("sh-vou", &mstp_clks[HWBLK_VOU]),
 	CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]),
 	CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU0]),
 	CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]),
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 2790b6a..7549186 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -485,10 +485,10 @@
 #endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 {
 	pg_data_t *pgdat;
-	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long start_pfn = PFN_DOWN(start);
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 	int ret;
 
@@ -496,7 +496,8 @@
 
 	/* We only have ZONE_NORMAL, so this is easy.. */
 	ret = __add_pages(nid, pgdat->node_zones +
-			zone_for_memory(nid, start, size, ZONE_NORMAL),
+			zone_for_memory(nid, start, size, ZONE_NORMAL,
+			for_device),
 			start_pfn, nr_pages);
 	if (unlikely(ret))
 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
@@ -517,7 +518,7 @@
 #ifdef CONFIG_MEMORY_HOTREMOVE
 int arch_remove_memory(u64 start, u64 size)
 {
-	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long start_pfn = PFN_DOWN(start);
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 	struct zone *zone;
 	int ret;
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index bce52ba..05713d1 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -33,8 +33,8 @@
 	/* Don't allow bogus node assignment */
 	BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
 
-	start_pfn = start >> PAGE_SHIFT;
-	end_pfn = end >> PAGE_SHIFT;
+	start_pfn = PFN_DOWN(start);
+	end_pfn = PFN_DOWN(end);
 
 	pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
 			 PAGE_KERNEL);
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 0e69b7e..7dcbebb 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -17,10 +17,12 @@
 #include <asm/barrier.h>
 #include <asm-generic/atomic64.h>
 
-
 #define ATOMIC_INIT(i)  { (i) }
 
 int atomic_add_return(int, atomic_t *);
+void atomic_and(int, atomic_t *);
+void atomic_or(int, atomic_t *);
+void atomic_xor(int, atomic_t *);
 int atomic_cmpxchg(atomic_t *, int, int);
 int atomic_xchg(atomic_t *, int);
 int __atomic_add_unless(atomic_t *, int, int);
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 4082749..917084a 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -33,6 +33,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 809941e..14a9286 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -60,12 +60,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index 9ec94ad..3192a8e 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -2,7 +2,7 @@
 #define _ASM_SPARC64_FTRACE
 
 #ifdef CONFIG_MCOUNT
-#define MCOUNT_ADDR		((long)(_mcount))
+#define MCOUNT_ADDR		((unsigned long)(_mcount))
 #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
 
 #ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index cc9b04a..62d0354 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,16 +7,33 @@
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-		asm_volatile_goto("1:\n\t"
-			 "nop\n\t"
-			 "nop\n\t"
-			 ".pushsection __jump_table,  \"aw\"\n\t"
-			 ".align 4\n\t"
-			 ".word 1b, %l[l_yes], %c0\n\t"
-			 ".popsection \n\t"
-			 : :  "i" (key) : : l_yes);
+	asm_volatile_goto("1:\n\t"
+		 "nop\n\t"
+		 "nop\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 4\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "b %l[l_yes]\n\t"
+		 "nop\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 4\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index f06b36a..91b963a 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -14,7 +14,7 @@
 #include <asm-generic/4level-fixup.h>
 
 #include <linux/spinlock.h>
-#include <linux/swap.h>
+#include <linux/mm_types.h>
 #include <asm/types.h>
 #include <asm/pgtsrmmu.h>
 #include <asm/vaddrs.h>
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 48565c1..59bbeff 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -16,7 +16,7 @@
 	u32 val;
 	u32 *insn = (u32 *) (unsigned long) entry->code;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		s32 off = (s32)entry->target - (s32)entry->code;
 
 #ifdef CONFIG_SPARC64
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 3a14a35..b91d7f1 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -231,8 +231,7 @@
 			res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
 		} else if (i == dev->rom_base_reg) {
 			res = &dev->resource[PCI_ROM_RESOURCE];
-			flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE
-			      | IORESOURCE_SIZEALIGN;
+			flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
 		} else {
 			printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
 			continue;
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 71cd65a..b9d63c0 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,22 +27,38 @@
 
 #endif /* SMP */
 
-#define ATOMIC_OP(op, cop)						\
+#define ATOMIC_OP_RETURN(op, c_op)					\
 int atomic_##op##_return(int i, atomic_t *v)				\
 {									\
 	int ret;							\
 	unsigned long flags;						\
 	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 									\
-	ret = (v->counter cop i);					\
+	ret = (v->counter c_op i);					\
 									\
 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 	return ret;							\
 }									\
 EXPORT_SYMBOL(atomic_##op##_return);
 
-ATOMIC_OP(add, +=)
+#define ATOMIC_OP(op, c_op)						\
+void atomic_##op(int i, atomic_t *v)					\
+{									\
+	unsigned long flags;						\
+	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
+									\
+	v->counter c_op i;						\
+									\
+	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
+}									\
+EXPORT_SYMBOL(atomic_##op);
 
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+
+#undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
 int atomic_xchg(atomic_t *v, int new)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 05dac43..d6b0363 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -47,6 +47,9 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
@@ -84,6 +87,9 @@
 
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 8069ce1..8eb454c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -111,6 +111,9 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 9def1f5..2ba12d7 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -32,6 +32,7 @@
 	select EDAC_SUPPORT
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
+	select HAVE_ARCH_SECCOMP_FILTER
 
 # FIXME: investigate whether we need/want these options.
 #	select HAVE_IOREMAP_PROT
@@ -221,6 +222,22 @@
 	  If enabled, the kernel will support running TILE-Gx binaries
 	  that were built with the -m32 option.
 
+config SECCOMP
+	bool "Enable seccomp to safely compute untrusted bytecode"
+	depends on PROC_FS
+	help
+	  This kernel feature is useful for number crunching applications
+	  that may need to compute untrusted bytecode during their
+	  execution. By using pipes or other transports made available to
+	  the process as file descriptors supporting the read/write
+	  syscalls, it's possible to isolate those applications in
+	  their own address space using seccomp. Once seccomp is
+	  enabled via prctl, it cannot be disabled and the task is only
+	  allowed to execute a few safe syscalls defined by each seccomp
+	  mode.
+
+	  If unsure, say N.
+
 config SYSVIPC_COMPAT
 	def_bool y
 	depends on COMPAT && SYSVIPC
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index d8a8431..ba35c41 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -28,6 +28,7 @@
 generic-y += posix_types.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += seccomp.h
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1b109fa..d320ce2 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,6 +34,19 @@
 	_atomic_xchg_add(&v->counter, i);
 }
 
+#define ATOMIC_OP(op)							\
+unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	_atomic_##op((unsigned long *)&v->counter, i);			\
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
 /**
  * atomic_add_return - add integer and return
  * @v: pointer of type atomic_t
@@ -113,6 +126,17 @@
 	_atomic64_xchg_add(&v->counter, i);
 }
 
+#define ATOMIC64_OP(op)						\
+long long _atomic64_##op(long long *v, long long n);		\
+static inline void atomic64_##op(long long i, atomic64_t *v)	\
+{								\
+	_atomic64_##op(&v->counter, i);				\
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
 /**
  * atomic64_add_return - add integer and return
  * @v: pointer of type atomic64_t
@@ -225,6 +249,7 @@
 extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
 						  int *lock, int o, int n);
 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
@@ -234,6 +259,9 @@
 					long long n);
 extern long long __atomic64_xchg_add_unless(volatile long long *p,
 					int *lock, long long o, long long n);
+extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
 
 /* Return failure from the atomic wrappers. */
 struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 0496970..096a56d 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -58,6 +58,26 @@
 	return oldval;
 }
 
+static inline void atomic_and(int i, atomic_t *v)
+{
+	__insn_fetchand4((void *)&v->counter, i);
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+	__insn_fetchor4((void *)&v->counter, i);
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+	int guess, oldval = v->counter;
+	do {
+		guess = oldval;
+		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+		oldval = __insn_cmpexch4(&v->counter, guess ^ i);
+	} while (guess != oldval);
+}
+
 /* Now the true 64-bit operations. */
 
 #define ATOMIC64_INIT(i)	{ (i) }
@@ -91,6 +111,26 @@
 	return oldval != u;
 }
 
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+	__insn_fetchand((void *)&v->counter, i);
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+	__insn_fetchor((void *)&v->counter, i);
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+	long guess, oldval = v->counter;
+	do {
+		guess = oldval;
+		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+		oldval = __insn_cmpexch(&v->counter, guess ^ i);
+	} while (guess != oldval);
+}
+
 #define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
 #define atomic64_sub(i, v)		atomic64_add(-(i), (v))
 #define atomic64_inc_return(v)		atomic64_add_return(1, (v))
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 41d9878..c505d77 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -22,6 +22,7 @@
 #include <arch/chip.h>
 
 #include <linux/ptrace.h>
+#include <linux/elf-em.h>
 #include <asm/byteorder.h>
 #include <asm/page.h>
 
@@ -30,9 +31,6 @@
 #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
-#define EM_TILEPRO 188
-#define EM_TILEGX  191
-
 /* Provide a nominal data structure. */
 #define ELF_NFPREG	0
 typedef double elf_fpreg_t;
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h
index 9644b88..373d730 100644
--- a/arch/tile/include/asm/syscall.h
+++ b/arch/tile/include/asm/syscall.h
@@ -20,6 +20,8 @@
 
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <linux/audit.h>
+#include <linux/compat.h>
 #include <arch/abi.h>
 
 /* The array of function pointers for syscalls. */
@@ -61,7 +63,15 @@
 					    struct pt_regs *regs,
 					    int error, long val)
 {
-	regs->regs[0] = (long) error ?: val;
+	if (error) {
+		/* R0 is the passed-in negative error, R1 is positive. */
+		regs->regs[0] = error;
+		regs->regs[1] = -error;
+	} else {
+		/* R1 set to zero to indicate no error. */
+		regs->regs[0] = val;
+		regs->regs[1] = 0;
+	}
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
@@ -82,4 +92,20 @@
 	memcpy(&regs[i], args, n * sizeof(args[0]));
 }
 
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * tile has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+	if (is_compat_task())
+		return AUDIT_ARCH_TILEGX32;
+
+#ifdef CONFIG_TILEGX
+	return AUDIT_ARCH_TILEGX;
+#else
+	return AUDIT_ARCH_TILEPRO;
+#endif
+}
+
 #endif	/* _ASM_TILE_SYSCALL_H */
diff --git a/arch/tile/include/uapi/arch/opcode_tilegx.h b/arch/tile/include/uapi/arch/opcode_tilegx.h
index d76ff2d..9e46eaa 100644
--- a/arch/tile/include/uapi/arch/opcode_tilegx.h
+++ b/arch/tile/include/uapi/arch/opcode_tilegx.h
@@ -830,11 +830,11 @@
   ADDX_RRR_0_OPCODE_X0 = 2,
   ADDX_RRR_0_OPCODE_X1 = 2,
   ADDX_RRR_0_OPCODE_Y0 = 0,
-  ADDX_SPECIAL_0_OPCODE_Y1 = 0,
+  ADDX_RRR_0_OPCODE_Y1 = 0,
   ADD_RRR_0_OPCODE_X0 = 3,
   ADD_RRR_0_OPCODE_X1 = 3,
   ADD_RRR_0_OPCODE_Y0 = 1,
-  ADD_SPECIAL_0_OPCODE_Y1 = 1,
+  ADD_RRR_0_OPCODE_Y1 = 1,
   ANDI_IMM8_OPCODE_X0 = 3,
   ANDI_IMM8_OPCODE_X1 = 3,
   ANDI_OPCODE_Y0 = 2,
@@ -995,6 +995,7 @@
   LD4U_ADD_IMM8_OPCODE_X1 = 12,
   LD4U_OPCODE_Y2 = 2,
   LD4U_UNARY_OPCODE_X1 = 20,
+  LDNA_ADD_IMM8_OPCODE_X1 = 21,
   LDNA_UNARY_OPCODE_X1 = 21,
   LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
   LDNT1S_UNARY_OPCODE_X1 = 22,
@@ -1015,7 +1016,6 @@
   LD_UNARY_OPCODE_X1 = 29,
   LNK_UNARY_OPCODE_X1 = 30,
   LNK_UNARY_OPCODE_Y1 = 14,
-  LWNA_ADD_IMM8_OPCODE_X1 = 21,
   MFSPR_IMM8_OPCODE_X1 = 22,
   MF_UNARY_OPCODE_X1 = 31,
   MM_BF_OPCODE_X0 = 7,
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index cdbda45..fbbe2ea 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1224,6 +1224,7 @@
 	 jal    do_syscall_trace_enter
 	}
 	FEEDBACK_REENTER(handle_syscall)
+	blz     r0, .Lsyscall_sigreturn_skip
 
 	/*
 	 * We always reload our registers from the stack at this
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 800b91d..58964d2 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1247,6 +1247,7 @@
 	 jal    do_syscall_trace_enter
 	}
 	FEEDBACK_REENTER(handle_syscall)
+	bltz    r0, .Lsyscall_sigreturn_skip
 
 	/*
 	 * We always reload our registers from the stack at this
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index f84eed8..bdc126f 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -262,6 +262,9 @@
 	if (work & _TIF_NOHZ)
 		user_exit();
 
+	if (secure_computing() == -1)
+		return -1;
+
 	if (work & _TIF_SYSCALL_TRACE) {
 		if (tracehook_report_syscall_entry(regs))
 			regs->regs[TREG_SYSCALL_NR] = -1;
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 00178ec..178989e 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -140,10 +140,10 @@
  * Whenever anyone tries to change modes, we just mask interrupts
  * and wait for the next event to get set.
  */
-static void tile_timer_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int tile_timer_shutdown(struct clock_event_device *evt)
 {
 	arch_local_irq_mask_now(INT_TILE_TIMER);
+	return 0;
 }
 
 /*
@@ -157,7 +157,9 @@
 	.rating = 100,
 	.irq = -1,
 	.set_next_event = tile_timer_set_next_event,
-	.set_mode = tile_timer_set_mode,
+	.set_state_shutdown = tile_timer_shutdown,
+	.set_state_oneshot = tile_timer_shutdown,
+	.tick_resume = tile_timer_shutdown,
 };
 
 void setup_tile_timer(void)
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
index a025f63..c54fff3 100644
--- a/arch/tile/kernel/vdso/Makefile
+++ b/arch/tile/kernel/vdso/Makefile
@@ -54,7 +54,7 @@
 $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
 
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-                            $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+                            $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 SYSCFLAGS_vdso_syms.o = -r
 $(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
 	$(call if_changed,vdsold)
@@ -113,6 +113,6 @@
 $(obj)/vdso32.o: $(obj)/vdso32.so
 
 SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
-			    $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+			    $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 $(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32)
 	$(call if_changed,vdsold)
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index c89b211..298df1e 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -94,6 +94,12 @@
 }
 EXPORT_SYMBOL(_atomic_or);
 
+unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
+{
+	return __atomic_and((int *)p, __atomic_setup(p), mask).val;
+}
+EXPORT_SYMBOL(_atomic_and);
+
 unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
 {
 	return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
@@ -136,6 +142,23 @@
 }
 EXPORT_SYMBOL(_atomic64_cmpxchg);
 
+long long _atomic64_and(long long *v, long long n)
+{
+	return __atomic64_and(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_and);
+
+long long _atomic64_or(long long *v, long long n)
+{
+	return __atomic64_or(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_or);
+
+long long _atomic64_xor(long long *v, long long n)
+{
+	return __atomic64_xor(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_xor);
 
 /*
  * If any of the atomic or futex routines hit a bad address (not in
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 6bda313..f611265 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -178,6 +178,7 @@
 atomic_op _xchg_add_unless, 32, \
 	"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
 atomic_op _or, 32, "or r24, r22, r2"
+atomic_op _and, 32, "and r24, r22, r2"
 atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
 atomic_op _xor, 32, "xor r24, r22, r2"
 
@@ -191,6 +192,9 @@
 	{ bbns r26, 3f; add r24, r22, r4 }; \
 	{ bbns r27, 3f; add r25, r23, r5 }; \
 	slt_u r26, r24, r22; add r25, r25, r26"
+atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
+atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
+atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
 
 	jrp     lr              /* happy backtracer */
 
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 5bd252e..d4e1fc4 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -863,7 +863,7 @@
  * memory to the highmem for now.
  */
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-int arch_add_memory(u64 start, u64 size)
+int arch_add_memory(u64 start, u64 size, bool for_device)
 {
 	struct pglist_data *pgdata = &contig_page_data;
 	struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h
index debafc4..3bb0a29 100644
--- a/arch/unicore32/include/asm/memory.h
+++ b/arch/unicore32/include/asm/memory.h
@@ -61,12 +61,6 @@
 #endif
 
 /*
- * Convert a physical address to a Page Frame Number and back
- */
-#define	__phys_to_pfn(paddr)	((paddr) >> PAGE_SHIFT)
-#define	__pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
-
-/*
  * Convert a page to/from a physical address
  */
 #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 48f7433..cc0d73e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -27,7 +27,8 @@
 	select ARCH_HAS_ELF_RANDOMIZE
 	select ARCH_HAS_FAST_MULTIPLIER
 	select ARCH_HAS_GCOV_PROFILE_ALL
-	select ARCH_HAS_PMEM_API
+	select ARCH_HAS_PMEM_API		if X86_64
+	select ARCH_HAS_MMIO_FLUSH
 	select ARCH_HAS_SG_CHAIN
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_MIGHT_HAVE_ACPI_PDC		if ACPI
@@ -41,6 +42,7 @@
 	select ARCH_USE_CMPXCHG_LOCKREF		if X86_64
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
+	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
 	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
 	select ARCH_WANT_FRAME_POINTERS
 	select ARCH_WANT_IPC_PARSE_VERSION	if X86_32
@@ -1449,10 +1451,14 @@
 
 source "mm/Kconfig"
 
+config X86_PMEM_LEGACY_DEVICE
+	bool
+
 config X86_PMEM_LEGACY
-	bool "Support non-standard NVDIMMs and ADR protected memory"
+	tristate "Support non-standard NVDIMMs and ADR protected memory"
 	depends on PHYS_ADDR_T_64BIT
 	depends on BLK_DEV
+	select X86_PMEM_LEGACY_DEVICE
 	select LIBNVDIMM
 	help
 	  Treat memory marked using the non-standard e820 type of 12 as used
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 962297d..cb5b3ab 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -208,7 +208,6 @@
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_DRM_I915_KMS=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_EFI=y
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 64d7cf1..440df0c 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -294,6 +294,7 @@
 			.cra_name		= "ghash",
 			.cra_driver_name	= "ghash-clmulni",
 			.cra_priority		= 400,
+			.cra_ctxsize		= sizeof(struct ghash_async_ctx),
 			.cra_flags		= CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
 			.cra_blocksize		= GHASH_BLOCK_SIZE,
 			.cra_type		= &crypto_ahash_type,
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 25e3cf1..477bfa6 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -380,3 +380,4 @@
 371	i386	recvfrom		sys_recvfrom			compat_sys_recvfrom
 372	i386	recvmsg			sys_recvmsg			compat_sys_recvmsg
 373	i386	shutdown		sys_shutdown
+374	i386	userfaultfd		sys_userfaultfd
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 9ef32d5..81c4906 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -329,6 +329,7 @@
 320	common	kexec_file_load		sys_kexec_file_load
 321	common	bpf			sys_bpf
 322	64	execveat		stub_execveat
+323	common	userfaultfd		sys_userfaultfd
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index e916895..fb52aa6 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -182,6 +182,21 @@
 	return xchg(&v->counter, new);
 }
 
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
+			: "+m" (v->counter)				\
+			: "ir" (i)					\
+			: "memory");					\
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
 /**
  * __atomic_add_unless - add unless the number is already a given value
  * @v: pointer of type atomic_t
@@ -219,16 +234,6 @@
 	return *v;
 }
 
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "andl %0,%1"			\
-		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
-
-#define atomic_set_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "orl %0,%1"			\
-		     : : "r" ((unsigned)(mask)), "m" (*(addr))	\
-		     : "memory")
-
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
 #else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index b154de7..a11c30b 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -313,4 +313,18 @@
 #undef alternative_atomic64
 #undef __alternative_atomic64
 
+#define ATOMIC64_OP(op, c_op)						\
+static inline void atomic64_##op(long long i, atomic64_t *v)		\
+{									\
+	long long old, c = 0;						\
+	while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)		\
+		c = old;						\
+}
+
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index b965f9e..50e33ef 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -220,4 +220,19 @@
 	return dec;
 }
 
+#define ATOMIC64_OP(op)							\
+static inline void atomic64_##op(long i, atomic64_t *v)			\
+{									\
+	asm volatile(LOCK_PREFIX #op"q %1,%0"				\
+			: "+m" (v->counter)				\
+			: "er" (i)					\
+			: "memory");					\
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 818cb87..0681d25 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -57,12 +57,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
@@ -74,12 +74,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 9bf3ea1..e63aa38 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -89,6 +89,8 @@
 
 void clflush_cache_range(void *addr, unsigned int size);
 
+#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
+
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void);
 extern const int rodata_test_data;
@@ -109,75 +111,4 @@
 }
 #endif
 
-#ifdef ARCH_HAS_NOCACHE_UACCESS
-
-/**
- * arch_memcpy_to_pmem - copy data to persistent memory
- * @dst: destination buffer for the copy
- * @src: source buffer for the copy
- * @n: length of the copy in bytes
- *
- * Copy data to persistent memory media via non-temporal stores so that
- * a subsequent arch_wmb_pmem() can flush cpu and memory controller
- * write buffers to guarantee durability.
- */
-static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
-		size_t n)
-{
-	int unwritten;
-
-	/*
-	 * We are copying between two kernel buffers, if
-	 * __copy_from_user_inatomic_nocache() returns an error (page
-	 * fault) we would have already reported a general protection fault
-	 * before the WARN+BUG.
-	 */
-	unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
-			(void __user *) src, n);
-	if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
-				__func__, dst, src, unwritten))
-		BUG();
-}
-
-/**
- * arch_wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of arch_memcpy_to_pmem() operations this drains data
- * from cpu write buffers and any platform (memory controller) buffers
- * to ensure that written data is durable on persistent memory media.
- */
-static inline void arch_wmb_pmem(void)
-{
-	/*
-	 * wmb() to 'sfence' all previous writes such that they are
-	 * architecturally visible to 'pcommit'.  Note, that we've
-	 * already arranged for pmem writes to avoid the cache via
-	 * arch_memcpy_to_pmem().
-	 */
-	wmb();
-	pcommit_sfence();
-}
-
-static inline bool __arch_has_wmb_pmem(void)
-{
-#ifdef CONFIG_X86_64
-	/*
-	 * We require that wmb() be an 'sfence', that is only guaranteed on
-	 * 64-bit builds
-	 */
-	return static_cpu_has(X86_FEATURE_PCOMMIT);
-#else
-	return false;
-#endif
-}
-#else /* ARCH_HAS_NOCACHE_UACCESS i.e. ARCH=um */
-extern void arch_memcpy_to_pmem(void __pmem *dst, const void *src, size_t n);
-extern void arch_wmb_pmem(void);
-
-static inline bool __arch_has_wmb_pmem(void)
-{
-	return false;
-}
-#endif
-
 #endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index f45acad..2493885 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,9 +3,9 @@
 
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CC_USING_FENTRY
-# define MCOUNT_ADDR		((long)(__fentry__))
+# define MCOUNT_ADDR		((unsigned long)(__fentry__))
 #else
-# define MCOUNT_ADDR		((long)(mcount))
+# define MCOUNT_ADDR		((unsigned long)(mcount))
 #endif
 #define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */
 
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 7cfc085..de25aad 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -250,12 +250,6 @@
 #endif
 }
 
-static inline void __pmem *arch_memremap_pmem(resource_size_t offset,
-	unsigned long size)
-{
-	return (void __force __pmem *) ioremap_cache(offset, size);
-}
-
 #endif /* __KERNEL__ */
 
 extern void native_io_delay(void);
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a4c1cf7..5daeca3 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -16,15 +16,32 @@
 # define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
 #endif
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:"
 		".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
 		".pushsection __jump_table,  \"aw\" \n\t"
 		_ASM_ALIGN "\n\t"
-		_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
+		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
 		".popsection \n\t"
-		: :  "i" (key) : : l_yes);
+		: :  "i" (key), "i" (branch) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:"
+		".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+		"2:\n\t"
+		".pushsection __jump_table,  \"aw\" \n\t"
+		_ASM_ALIGN "\n\t"
+		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+		".popsection \n\t"
+		: :  "i" (key), "i" (branch) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
new file mode 100644
index 0000000..d8ce3ec
--- /dev/null
+++ b/arch/x86/include/asm/pmem.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef __ASM_X86_PMEM_H__
+#define __ASM_X86_PMEM_H__
+
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/special_insns.h>
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+/**
+ * arch_memcpy_to_pmem - copy data to persistent memory
+ * @dst: destination buffer for the copy
+ * @src: source buffer for the copy
+ * @n: length of the copy in bytes
+ *
+ * Copy data to persistent memory media via non-temporal stores so that
+ * a subsequent arch_wmb_pmem() can flush cpu and memory controller
+ * write buffers to guarantee durability.
+ */
+static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
+		size_t n)
+{
+	int unwritten;
+
+	/*
+	 * We are copying between two kernel buffers, if
+	 * __copy_from_user_inatomic_nocache() returns an error (page
+	 * fault) we would have already reported a general protection fault
+	 * before the WARN+BUG.
+	 */
+	unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
+			(void __user *) src, n);
+	if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
+				__func__, dst, src, unwritten))
+		BUG();
+}
+
+/**
+ * arch_wmb_pmem - synchronize writes to persistent memory
+ *
+ * After a series of arch_memcpy_to_pmem() operations this drains data
+ * from cpu write buffers and any platform (memory controller) buffers
+ * to ensure that written data is durable on persistent memory media.
+ */
+static inline void arch_wmb_pmem(void)
+{
+	/*
+	 * wmb() to 'sfence' all previous writes such that they are
+	 * architecturally visible to 'pcommit'.  Note, that we've
+	 * already arranged for pmem writes to avoid the cache via
+	 * arch_memcpy_to_pmem().
+	 */
+	wmb();
+	pcommit_sfence();
+}
+
+/**
+ * __arch_wb_cache_pmem - write back a cache range with CLWB
+ * @vaddr:	virtual start address
+ * @size:	number of bytes to write back
+ *
+ * Write back a cache range using the CLWB (cache line write back)
+ * instruction.  This function requires explicit ordering with an
+ * arch_wmb_pmem() call.  This API is internal to the x86 PMEM implementation.
+ */
+static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
+{
+	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
+	unsigned long clflush_mask = x86_clflush_size - 1;
+	void *vend = vaddr + size;
+	void *p;
+
+	for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
+	     p < vend; p += x86_clflush_size)
+		clwb(p);
+}
+
+/*
+ * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
+ * iterators, so for other types (bvec & kvec) we must do a cache write-back.
+ */
+static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
+{
+	return iter_is_iovec(i) == false;
+}
+
+/**
+ * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
+ * @addr:	PMEM destination address
+ * @bytes:	number of bytes to copy
+ * @i:		iterator with source data
+ *
+ * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
+ * This function requires explicit ordering with an arch_wmb_pmem() call.
+ */
+static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+		struct iov_iter *i)
+{
+	void *vaddr = (void __force *)addr;
+	size_t len;
+
+	/* TODO: skip the write-back by always using non-temporal stores */
+	len = copy_from_iter_nocache(vaddr, bytes, i);
+
+	if (__iter_needs_pmem_wb(i))
+		__arch_wb_cache_pmem(vaddr, bytes);
+
+	return len;
+}
+
+/**
+ * arch_clear_pmem - zero a PMEM memory range
+ * @addr:	virtual start address
+ * @size:	number of bytes to zero
+ *
+ * Write zeros into the memory range starting at 'addr' for 'size' bytes.
+ * This function requires explicit ordering with an arch_wmb_pmem() call.
+ */
+static inline void arch_clear_pmem(void __pmem *addr, size_t size)
+{
+	void *vaddr = (void __force *)addr;
+
+	/* TODO: implement the zeroing via non-temporal writes */
+	if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
+		clear_page(vaddr);
+	else
+		memset(vaddr, 0, size);
+
+	__arch_wb_cache_pmem(vaddr, size);
+}
+
+static inline bool __arch_has_wmb_pmem(void)
+{
+	/*
+	 * We require that wmb() be an 'sfence', that is only guaranteed on
+	 * 64-bit builds
+	 */
+	return static_cpu_has(X86_FEATURE_PCOMMIT);
+}
+#endif /* CONFIG_ARCH_HAS_PMEM_API */
+#endif /* __ASM_X86_PMEM_H__ */
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index ae0e241..c537cbb 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -2,16 +2,6 @@
 #define _ASM_X86_QRWLOCK_H
 
 #include <asm-generic/qrwlock_types.h>
-
-#ifndef CONFIG_X86_PPRO_FENCE
-#define queue_write_unlock queue_write_unlock
-static inline void queue_write_unlock(struct qrwlock *lock)
-{
-        barrier();
-        ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
-}
-#endif
-
 #include <asm-generic/qrwlock.h>
 
 #endif /* _ASM_X86_QRWLOCK_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index cd79194..6df2029 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -261,6 +261,12 @@
 
 #endif	/* SMP */
 
+/* Not inlined due to inc_irq_stat not being defined yet */
+#define flush_tlb_local() {		\
+	inc_irq_stat(irq_tlb_count);	\
+	local_flush_tlb();		\
+}
+
 #ifndef CONFIG_PARAVIRT
 #define flush_tlb_others(mask, mm, start, end)	\
 	native_flush_tlb_others(mask, mm, start, end)
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 608a79d..e6911ca 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -20,4 +20,15 @@
 /* No need for a barrier -- XCHG is a barrier on x86. */
 #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
 
+extern int xen_have_vector_callback;
+
+/*
+ * Events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 and hence cannot be rebound.
+ */
+static inline bool xen_support_evtchn_rebind(void)
+{
+	return (!xen_hvm_domain() || xen_have_vector_callback);
+}
+
 #endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index ca08a27..83aea80 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -465,6 +465,12 @@
 	return _hypercall1(int, tmem_op, op);
 }
 
+static inline int
+HYPERVISOR_xenpmu_op(unsigned int op, void *arg)
+{
+	return _hypercall2(int, xenpmu_op, op, arg);
+}
+
 static inline void
 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
 {
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 3400dba..62ca03e 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -3,12 +3,38 @@
  *
  * Guest OS interface to x86 Xen.
  *
- * Copyright (c) 2004, K A Fraser
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
  */
 
 #ifndef _ASM_X86_XEN_INTERFACE_H
 #define _ASM_X86_XEN_INTERFACE_H
 
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory.
+ * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
+ * hypercall argument.
+ * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but
+ * they might not be on other architectures.
+ */
 #ifdef __XEN__
 #define __DEFINE_GUEST_HANDLE(name, type) \
     typedef struct { type *p; } __guest_handle_ ## name
@@ -88,13 +114,16 @@
  * start of the GDT because some stupid OSes export hard-coded selector values
  * in their ABI. These hard-coded values are always near the start of the GDT,
  * so Xen places itself out of the way, at the far end of the GDT.
+ *
+ * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op
  */
 #define FIRST_RESERVED_GDT_PAGE  14
 #define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
 
 /*
- * Send an array of these to HYPERVISOR_set_trap_table()
+ * Send an array of these to HYPERVISOR_set_trap_table().
+ * Terminate the array with a sentinel entry, with traps[].address==0.
  * The privilege level specifies which modes may enter a trap via a software
  * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
  * privilege levels as follows:
@@ -118,10 +147,41 @@
 DEFINE_GUEST_HANDLE_STRUCT(trap_info);
 
 struct arch_shared_info {
-    unsigned long max_pfn;                  /* max pfn that appears in table */
-    /* Frame containing list of mfns containing list of mfns containing p2m. */
-    unsigned long pfn_to_mfn_frame_list_list;
-    unsigned long nmi_reason;
+	/*
+	 * Number of valid entries in the p2m table(s) anchored at
+	 * pfn_to_mfn_frame_list_list and/or p2m_vaddr.
+	 */
+	unsigned long max_pfn;
+	/*
+	 * Frame containing list of mfns containing list of mfns containing p2m.
+	 * A value of 0 indicates it has not yet been set up, ~0 indicates it
+	 * has been set to invalid e.g. due to the p2m being too large for the
+	 * 3-level p2m tree. In this case the linear mapper p2m list anchored
+	 * at p2m_vaddr is to be used.
+	 */
+	xen_pfn_t pfn_to_mfn_frame_list_list;
+	unsigned long nmi_reason;
+	/*
+	 * Following three fields are valid if p2m_cr3 contains a value
+	 * different from 0.
+	 * p2m_cr3 is the root of the address space where p2m_vaddr is valid.
+	 * p2m_cr3 is in the same format as a cr3 value in the vcpu register
+	 * state and holds the folded machine frame number (via xen_pfn_to_cr3)
+	 * of a L3 or L4 page table.
+	 * p2m_vaddr holds the virtual address of the linear p2m list. All
+	 * entries in the range [0...max_pfn[ are accessible via this pointer.
+	 * p2m_generation will be incremented by the guest before and after each
+	 * change of the mappings of the p2m list. p2m_generation starts at 0
+	 * and a value with the least significant bit set indicates that a
+	 * mapping update is in progress. This allows guest external software
+	 * (e.g. in Dom0) to verify that read mappings are consistent and
+	 * whether they have changed since the last check.
+	 * Modifying a p2m element in the linear p2m list is allowed via an
+	 * atomic write only.
+	 */
+	unsigned long p2m_cr3;		/* cr3 value of the p2m address space */
+	unsigned long p2m_vaddr;	/* virtual address of the p2m list */
+	unsigned long p2m_generation;	/* generation count of p2m mapping */
 };
 #endif	/* !__ASSEMBLY__ */
 
@@ -137,13 +197,31 @@
 /*
  * The following is all CPU context. Note that the fpu_ctxt block is filled
  * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ *
+ * Also note that when calling DOMCTL_setvcpucontext and VCPU_initialise
+ * for HVM and PVH guests, not all information in this structure is updated:
+ *
+ * - For HVM guests, the structures read include: fpu_ctxt (if
+ * VGCT_I387_VALID is set), flags, user_regs, debugreg[*]
+ *
+ * - PVH guests are the same as HVM guests, but additionally use ctrlreg[3] to
+ * set cr3. All other fields not used should be set to 0.
  */
 struct vcpu_guest_context {
     /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
     struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
-#define VGCF_I387_VALID (1<<0)
-#define VGCF_HVM_GUEST  (1<<1)
-#define VGCF_IN_KERNEL  (1<<2)
+#define VGCF_I387_VALID                (1<<0)
+#define VGCF_IN_KERNEL                 (1<<2)
+#define _VGCF_i387_valid               0
+#define VGCF_i387_valid                (1<<_VGCF_i387_valid)
+#define _VGCF_in_kernel                2
+#define VGCF_in_kernel                 (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events  (1<<_VGCF_failsafe_disables_events)
+#define _VGCF_syscall_disables_events  4
+#define VGCF_syscall_disables_events   (1<<_VGCF_syscall_disables_events)
+#define _VGCF_online                   5
+#define VGCF_online                    (1<<_VGCF_online)
     unsigned long flags;                    /* VGCF_* flags                 */
     struct cpu_user_regs user_regs;         /* User-level CPU registers     */
     struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
@@ -172,6 +250,129 @@
 #endif
 };
 DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
+
+/* AMD PMU registers and structures */
+struct xen_pmu_amd_ctxt {
+	/*
+	 * Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd).
+	 * For PV(H) guests these fields are RO.
+	 */
+	uint32_t counters;
+	uint32_t ctrls;
+
+	/* Counter MSRs */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+	uint64_t regs[];
+#elif defined(__GNUC__)
+	uint64_t regs[0];
+#endif
+};
+
+/* Intel PMU registers and structures */
+struct xen_pmu_cntr_pair {
+	uint64_t counter;
+	uint64_t control;
+};
+
+struct xen_pmu_intel_ctxt {
+	/*
+	 * Offsets to fixed and architectural counter MSRs (relative to
+	 * xen_pmu_arch.c.intel).
+	 * For PV(H) guests these fields are RO.
+	 */
+	uint32_t fixed_counters;
+	uint32_t arch_counters;
+
+	/* PMU registers */
+	uint64_t global_ctrl;
+	uint64_t global_ovf_ctrl;
+	uint64_t global_status;
+	uint64_t fixed_ctrl;
+	uint64_t ds_area;
+	uint64_t pebs_enable;
+	uint64_t debugctl;
+
+	/* Fixed and architectural counter MSRs */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+	uint64_t regs[];
+#elif defined(__GNUC__)
+	uint64_t regs[0];
+#endif
+};
+
+/* Sampled domain's registers */
+struct xen_pmu_regs {
+	uint64_t ip;
+	uint64_t sp;
+	uint64_t flags;
+	uint16_t cs;
+	uint16_t ss;
+	uint8_t cpl;
+	uint8_t pad[3];
+};
+
+/* PMU flags */
+#define PMU_CACHED	   (1<<0) /* PMU MSRs are cached in the context */
+#define PMU_SAMPLE_USER	   (1<<1) /* Sample is from user or kernel mode */
+#define PMU_SAMPLE_REAL	   (1<<2) /* Sample is from realmode */
+#define PMU_SAMPLE_PV	   (1<<3) /* Sample from a PV guest */
+
+/*
+ * Architecture-specific information describing state of the processor at
+ * the time of PMU interrupt.
+ * Fields of this structure marked as RW for guest should only be written by
+ * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the
+ * hypervisor during PMU interrupt). Hypervisor will read updated data in
+ * XENPMU_flush hypercall and clear PMU_CACHED bit.
+ */
+struct xen_pmu_arch {
+	union {
+		/*
+		 * Processor's registers at the time of interrupt.
+		 * WO for hypervisor, RO for guests.
+		 */
+		struct xen_pmu_regs regs;
+		/*
+		 * Padding for adding new registers to xen_pmu_regs in
+		 * the future
+		 */
+#define XENPMU_REGS_PAD_SZ  64
+		uint8_t pad[XENPMU_REGS_PAD_SZ];
+	} r;
+
+	/* WO for hypervisor, RO for guest */
+	uint64_t pmu_flags;
+
+	/*
+	 * APIC LVTPC register.
+	 * RW for both hypervisor and guest.
+	 * Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware
+	 * during XENPMU_flush or XENPMU_lvtpc_set.
+	 */
+	union {
+		uint32_t lapic_lvtpc;
+		uint64_t pad;
+	} l;
+
+	/*
+	 * Vendor-specific PMU registers.
+	 * RW for both hypervisor and guest (see exceptions above).
+	 * Guest's updates to this field are verified and then loaded by the
+	 * hypervisor into hardware during XENPMU_flush
+	 */
+	union {
+		struct xen_pmu_amd_ctxt amd;
+		struct xen_pmu_intel_ctxt intel;
+
+		/*
+		 * Padding for contexts (fixed parts only, does not include
+		 * MSR banks that are specified by offsets)
+		 */
+#define XENPMU_CTXT_PAD_SZ  128
+		uint8_t pad[XENPMU_CTXT_PAD_SZ];
+	} c;
+};
+
 #endif	/* !__ASSEMBLY__ */
 
 /*
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c44a5d5..a3804fb 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -35,9 +35,7 @@
 #define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
 #define IDENTITY_FRAME(m)	((m) | IDENTITY_FRAME_BIT)
 
-/* Maximum amount of memory we can handle in a domain in pages */
-#define MAX_DOMAIN_PAGES						\
-    ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
+#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
 
 extern unsigned long *machine_to_phys_mapping;
 extern unsigned long  machine_to_phys_nr;
@@ -48,8 +46,8 @@
 extern unsigned long get_phys_to_machine(unsigned long pfn);
 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-extern unsigned long set_phys_range_identity(unsigned long pfn_s,
-					     unsigned long pfn_e);
+extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
+						    unsigned long pfn_e);
 
 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
 				   struct gnttab_map_grant_ref *kmap_ops,
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
index 0f457e6..9dafe59 100644
--- a/arch/x86/include/uapi/asm/e820.h
+++ b/arch/x86/include/uapi/asm/e820.h
@@ -37,7 +37,7 @@
 /*
  * This is a non-standardized way to represent ADR or NVDIMM regions that
  * persist over a reboot.  The kernel will ignore their special capabilities
- * unless the CONFIG_X86_PMEM_LEGACY=y option is set.
+ * unless the CONFIG_X86_PMEM_LEGACY option is set.
  *
  * ( Note that older platforms also used 6 for the same type of memory,
  *   but newer versions switched to 12 as 6 was assigned differently.  Some
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3c36221..9ffdf25 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -94,7 +94,7 @@
 obj-$(CONFIG_PARAVIRT)		+= paravirt.o paravirt_patch_$(BITS).o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
 obj-$(CONFIG_PARAVIRT_CLOCK)	+= pvclock.o
-obj-$(CONFIG_X86_PMEM_LEGACY)	+= pmem.o
+obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
 
 obj-$(CONFIG_PCSPKR_PLATFORM)	+= pcspeaker.o
 
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 6873ab9..045e424 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -28,146 +28,21 @@
 #endif
 
 #ifdef arch_trigger_all_cpu_backtrace
-/* For reliability, we're prepared to waste bits here. */
-static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
-static cpumask_t printtrace_mask;
-
-#define NMI_BUF_SIZE		4096
-
-struct nmi_seq_buf {
-	unsigned char		buffer[NMI_BUF_SIZE];
-	struct seq_buf		seq;
-};
-
-/* Safe printing in NMI context */
-static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
-
-/* "in progress" flag of arch_trigger_all_cpu_backtrace */
-static unsigned long backtrace_flag;
-
-static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
+static void nmi_raise_cpu_backtrace(cpumask_t *mask)
 {
-	const char *buf = s->buffer + start;
-
-	printk("%.*s", (end - start) + 1, buf);
+	apic->send_IPI_mask(mask, NMI_VECTOR);
 }
 
 void arch_trigger_all_cpu_backtrace(bool include_self)
 {
-	struct nmi_seq_buf *s;
-	int len;
-	int cpu;
-	int i;
-	int this_cpu = get_cpu();
-
-	if (test_and_set_bit(0, &backtrace_flag)) {
-		/*
-		 * If there is already a trigger_all_cpu_backtrace() in progress
-		 * (backtrace_flag == 1), don't output double cpu dump infos.
-		 */
-		put_cpu();
-		return;
-	}
-
-	cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
-	if (!include_self)
-		cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
-
-	cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
-	/*
-	 * Set up per_cpu seq_buf buffers that the NMIs running on the other
-	 * CPUs will write to.
-	 */
-	for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
-		s = &per_cpu(nmi_print_seq, cpu);
-		seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
-	}
-
-	if (!cpumask_empty(to_cpumask(backtrace_mask))) {
-		pr_info("sending NMI to %s CPUs:\n",
-			(include_self ? "all" : "other"));
-		apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
-	}
-
-	/* Wait for up to 10 seconds for all CPUs to do the backtrace */
-	for (i = 0; i < 10 * 1000; i++) {
-		if (cpumask_empty(to_cpumask(backtrace_mask)))
-			break;
-		mdelay(1);
-		touch_softlockup_watchdog();
-	}
-
-	/*
-	 * Now that all the NMIs have triggered, we can dump out their
-	 * back traces safely to the console.
-	 */
-	for_each_cpu(cpu, &printtrace_mask) {
-		int last_i = 0;
-
-		s = &per_cpu(nmi_print_seq, cpu);
-		len = seq_buf_used(&s->seq);
-		if (!len)
-			continue;
-
-		/* Print line by line. */
-		for (i = 0; i < len; i++) {
-			if (s->buffer[i] == '\n') {
-				print_seq_line(s, last_i, i);
-				last_i = i + 1;
-			}
-		}
-		/* Check if there was a partial line. */
-		if (last_i < len) {
-			print_seq_line(s, last_i, len - 1);
-			pr_cont("\n");
-		}
-	}
-
-	clear_bit(0, &backtrace_flag);
-	smp_mb__after_atomic();
-	put_cpu();
-}
-
-/*
- * It is not safe to call printk() directly from NMI handlers.
- * It may be fine if the NMI detected a lock up and we have no choice
- * but to do so, but doing a NMI on all other CPUs to get a back trace
- * can be done with a sysrq-l. We don't want that to lock up, which
- * can happen if the NMI interrupts a printk in progress.
- *
- * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
- * the content into a per cpu seq_buf buffer. Then when the NMIs are
- * all done, we can safely dump the contents of the seq_buf to a printk()
- * from a non NMI context.
- */
-static int nmi_vprintk(const char *fmt, va_list args)
-{
-	struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
-	unsigned int len = seq_buf_used(&s->seq);
-
-	seq_buf_vprintf(&s->seq, fmt, args);
-	return seq_buf_used(&s->seq) - len;
+	nmi_trigger_all_cpu_backtrace(include_self, nmi_raise_cpu_backtrace);
 }
 
 static int
 arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
 {
-	int cpu;
-
-	cpu = smp_processor_id();
-
-	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
-		printk_func_t printk_func_save = this_cpu_read(printk_func);
-
-		/* Replace printk to write into the NMI seq */
-		this_cpu_write(printk_func, nmi_vprintk);
-		printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
-		show_regs(regs);
-		this_cpu_write(printk_func, printk_func_save);
-
-		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+	if (nmi_cpu_backtrace(regs))
 		return NMI_HANDLED;
-	}
 
 	return NMI_DONE;
 }
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3f124d5..cd9b6d0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/export.h>
-#include <linux/watchdog.h>
+#include <linux/nmi.h>
 
 #include <asm/cpufeature.h>
 #include <asm/hardirq.h>
@@ -3627,7 +3627,10 @@
 		return 0;
 	}
 
-	watchdog_nmi_disable_all();
+	if (lockup_detector_suspend() != 0) {
+		pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n");
+		return 0;
+	}
 
 	x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
 
@@ -3635,7 +3638,7 @@
 	x86_pmu.commit_scheduling = NULL;
 	x86_pmu.stop_scheduling = NULL;
 
-	watchdog_nmi_enable_all();
+	lockup_detector_resume();
 
 	get_online_cpus();
 
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 26d5a55..e565e0e 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -45,7 +45,7 @@
 	const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
 	const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		if (init) {
 			/*
 			 * Jump label is enabled for the first time.
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 961e51e..0f8a6bb 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -533,7 +533,9 @@
 	int ret;
 
 	ret = verify_pefile_signature(kernel, kernel_len,
-				      system_trusted_keyring, &trusted);
+				      system_trusted_keyring,
+				      VERIFYING_KEXEC_PE_SIGNATURE,
+				      &trusted);
 	if (ret < 0)
 		return ret;
 	if (!trusted)
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 64f90f5..4f00b63 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -3,80 +3,17 @@
  * Copyright (c) 2015, Intel Corporation.
  */
 #include <linux/platform_device.h>
-#include <linux/libnvdimm.h>
 #include <linux/module.h>
-#include <asm/e820.h>
-
-static void e820_pmem_release(struct device *dev)
-{
-	struct nvdimm_bus *nvdimm_bus = dev->platform_data;
-
-	if (nvdimm_bus)
-		nvdimm_bus_unregister(nvdimm_bus);
-}
-
-static struct platform_device e820_pmem = {
-	.name = "e820_pmem",
-	.id = -1,
-	.dev = {
-		.release = e820_pmem_release,
-	},
-};
-
-static const struct attribute_group *e820_pmem_attribute_groups[] = {
-	&nvdimm_bus_attribute_group,
-	NULL,
-};
-
-static const struct attribute_group *e820_pmem_region_attribute_groups[] = {
-	&nd_region_attribute_group,
-	&nd_device_attribute_group,
-	NULL,
-};
 
 static __init int register_e820_pmem(void)
 {
-	static struct nvdimm_bus_descriptor nd_desc;
-	struct device *dev = &e820_pmem.dev;
-	struct nvdimm_bus *nvdimm_bus;
-	int rc, i;
+	struct platform_device *pdev;
 
-	rc = platform_device_register(&e820_pmem);
-	if (rc)
-		return rc;
-
-	nd_desc.attr_groups = e820_pmem_attribute_groups;
-	nd_desc.provider_name = "e820";
-	nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
-	if (!nvdimm_bus)
-		goto err;
-	dev->platform_data = nvdimm_bus;
-
-	for (i = 0; i < e820.nr_map; i++) {
-		struct e820entry *ei = &e820.map[i];
-		struct resource res = {
-			.flags	= IORESOURCE_MEM,
-			.start	= ei->addr,
-			.end	= ei->addr + ei->size - 1,
-		};
-		struct nd_region_desc ndr_desc;
-
-		if (ei->type != E820_PRAM)
-			continue;
-
-		memset(&ndr_desc, 0, sizeof(ndr_desc));
-		ndr_desc.res = &res;
-		ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
-		ndr_desc.numa_node = NUMA_NO_NODE;
-		if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
-			goto err;
-	}
-
-	return 0;
-
- err:
-	dev_err(dev, "failed to register legacy persistent memory ranges\n");
-	platform_device_unregister(&e820_pmem);
-	return -ENXIO;
+	/*
+	 * See drivers/nvdimm/e820.c for the implementation, this is
+	 * simply here to trigger the module to load on demand.
+	 */
+	pdev = platform_device_alloc("e820_pmem", -1);
+	return platform_device_add(pdev);
 }
 device_initcall(register_e820_pmem);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b143c2d..baadbf9 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -317,15 +317,12 @@
 	return ramdisk_size;
 }
 
-#define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
 static void __init relocate_initrd(void)
 {
 	/* Assume only end is not page aligned */
 	u64 ramdisk_image = get_ramdisk_image();
 	u64 ramdisk_size  = get_ramdisk_size();
 	u64 area_size     = PAGE_ALIGN(ramdisk_size);
-	unsigned long slop, clen, mapaddr;
-	char *p, *q;
 
 	/* We need to move the initrd down into directly mapped mem */
 	relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
@@ -343,25 +340,8 @@
 	printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
 	       relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 
-	q = (char *)initrd_start;
+	copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
 
-	/* Copy the initrd */
-	while (ramdisk_size) {
-		slop = ramdisk_image & ~PAGE_MASK;
-		clen = ramdisk_size;
-		if (clen > MAX_MAP_CHUNK-slop)
-			clen = MAX_MAP_CHUNK-slop;
-		mapaddr = ramdisk_image & PAGE_MASK;
-		p = early_memremap(mapaddr, clen+slop);
-		memcpy(q, p+slop, clen);
-		early_memunmap(p, clen+slop);
-		q += clen;
-		ramdisk_image += clen;
-		ramdisk_size  -= clen;
-	}
-
-	ramdisk_image = get_ramdisk_image();
-	ramdisk_size  = get_ramdisk_size();
 	printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
 		" [mem %#010llx-%#010llx]\n",
 		ramdisk_image, ramdisk_image + ramdisk_size - 1,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 79055cf..c8d52cb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -38,7 +38,7 @@
    erroneous rdtsc usage on !cpu_has_tsc processors */
 static int __read_mostly tsc_disabled = -1;
 
-static struct static_key __use_tsc = STATIC_KEY_INIT;
+static DEFINE_STATIC_KEY_FALSE(__use_tsc);
 
 int tsc_clocksource_reliable;
 
@@ -274,7 +274,12 @@
  */
 u64 native_sched_clock(void)
 {
-	u64 tsc_now;
+	if (static_branch_likely(&__use_tsc)) {
+		u64 tsc_now = rdtsc();
+
+		/* return the value in ns */
+		return cycles_2_ns(tsc_now);
+	}
 
 	/*
 	 * Fall back to jiffies if there's no TSC available:
@@ -284,16 +289,9 @@
 	 *   very important for it to be as fast as the platform
 	 *   can achieve it. )
 	 */
-	if (!static_key_false(&__use_tsc)) {
-		/* No locking but a rare wrong value is not a big deal: */
-		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
-	}
 
-	/* read the Time Stamp Counter: */
-	tsc_now = rdtsc();
-
-	/* return the value in ns */
-	return cycles_2_ns(tsc_now);
+	/* No locking but a rare wrong value is not a big deal: */
+	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 }
 
 /*
@@ -1212,7 +1210,7 @@
 	/* now allow native_sched_clock() to use rdtsc */
 
 	tsc_disabled = 0;
-	static_key_slow_inc(&__use_tsc);
+	static_branch_enable(&__use_tsc);
 
 	if (!no_sched_irq_time)
 		enable_sched_clock_irqtime();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4a4eec30..148ea20 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3150,7 +3150,7 @@
 	struct page *pages;
 	struct vmcs *vmcs;
 
-	pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
+	pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
 	if (!pages)
 		return NULL;
 	vmcs = page_address(pages);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 68aec42..7562f42 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -823,11 +823,11 @@
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 {
 	struct pglist_data *pgdata = NODE_DATA(nid);
 	struct zone *zone = pgdata->node_zones +
-		zone_for_memory(nid, start, size, ZONE_HIGHMEM);
+		zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
 	unsigned long start_pfn = start >> PAGE_SHIFT;
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3fba623..30564e2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -687,11 +687,11 @@
  * Memory is added always to NORMAL zone. This means you will never get
  * additional DMA/DMA32 memory.
  */
-int arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 {
 	struct pglist_data *pgdat = NODE_DATA(nid);
 	struct zone *zone = pgdat->node_zones +
-		zone_for_memory(nid, start, size, ZONE_NORMAL);
+		zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
 	unsigned long start_pfn = start >> PAGE_SHIFT;
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 	int ret;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 4053bb5..c3b3f65 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -246,8 +246,10 @@
 		bi->start = max(bi->start, low);
 		bi->end = min(bi->end, high);
 
-		/* and there's no empty block */
-		if (bi->start >= bi->end)
+		/* and there's no empty or non-exist block */
+		if (bi->start >= bi->end ||
+		    !memblock_overlaps_region(&memblock.memory,
+			bi->start, bi->end - bi->start))
 			numa_remove_memblk_from(i--, mi);
 	}
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 90b924a..8ddb5d0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -140,6 +140,7 @@
 	info.flush_end = end;
 
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+	trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
 	if (is_uv_system()) {
 		unsigned int cpu;
 
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 4841453..c7b15f3 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -7,6 +7,7 @@
 	depends on PARAVIRT
 	select PARAVIRT_CLOCK
 	select XEN_HAVE_PVMMU
+	select XEN_HAVE_VPMU
 	depends on X86_64 || (X86_32 && X86_PAE)
 	depends on X86_LOCAL_APIC && X86_TSC
 	help
@@ -23,14 +24,18 @@
 	def_bool y
 	depends on XEN && PCI && X86_LOCAL_APIC
 
-config XEN_MAX_DOMAIN_MEMORY
-       int
-       default 500 if X86_64
-       default 64 if X86_32
-       depends on XEN
-       help
-         This only affects the sizing of some bss arrays, the unused
-         portions of which are freed.
+config XEN_512GB
+	bool "Limit Xen pv-domain memory to 512GB"
+	depends on XEN && X86_64
+	default y
+	help
+	  Limit paravirtualized user domains to 512GB of RAM.
+
+	  The Xen tools and crash dump analysis tools might not support
+	  pv-domains with more than 512 GB of RAM. This option controls the
+	  default setting of the kernel to use only up to 512 GB or more.
+	  It is always possible to change the default via specifying the
+	  boot parameter "xen_512gb_limit".
 
 config XEN_SAVE_RESTORE
        bool
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 4b6e29a..e47e527 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,7 +13,7 @@
 obj-y		:= enlighten.o setup.o multicalls.o mmu.o irq.o \
 			time.o xen-asm.o xen-asm_$(BITS).o \
 			grant-table.o suspend.o platform-pci-unplug.o \
-			p2m.o apic.o
+			p2m.o apic.o pmu.o
 
 obj-$(CONFIG_EVENT_TRACING) += trace.o
 
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 70e060a..acda713 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -7,6 +7,7 @@
 #include <xen/xen.h>
 #include <xen/interface/physdev.h>
 #include "xen-ops.h"
+#include "pmu.h"
 #include "smp.h"
 
 static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
@@ -72,6 +73,11 @@
 
 static void xen_apic_write(u32 reg, u32 val)
 {
+	if (reg == APIC_LVTPC) {
+		(void)pmu_apic_update(reg);
+		return;
+	}
+
 	/* Warn to see if there's any stray references */
 	WARN(1,"register: %x, value: %x\n", reg, val);
 }
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index d9cfa45..30d12af 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -84,6 +84,7 @@
 #include "mmu.h"
 #include "smp.h"
 #include "multicalls.h"
+#include "pmu.h"
 
 EXPORT_SYMBOL_GPL(hypercall_page);
 
@@ -1010,8 +1011,7 @@
 
 static void xen_write_cr4(unsigned long cr4)
 {
-	cr4 &= ~X86_CR4_PGE;
-	cr4 &= ~X86_CR4_PSE;
+	cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
 
 	native_write_cr4(cr4);
 }
@@ -1030,6 +1030,9 @@
 {
 	u64 val;
 
+	if (pmu_msr_read(msr, &val, err))
+		return val;
+
 	val = native_read_msr_safe(msr, err);
 	switch (msr) {
 	case MSR_IA32_APICBASE:
@@ -1076,7 +1079,8 @@
 		   Xen console noise. */
 
 	default:
-		ret = native_write_msr_safe(msr, low, high);
+		if (!pmu_msr_write(msr, low, high, &ret))
+			ret = native_write_msr_safe(msr, low, high);
 	}
 
 	return ret;
@@ -1215,7 +1219,7 @@
 	.read_msr = xen_read_msr_safe,
 	.write_msr = xen_write_msr_safe,
 
-	.read_pmc = native_read_pmc,
+	.read_pmc = xen_read_pmc,
 
 	.iret = xen_iret,
 #ifdef CONFIG_X86_64
@@ -1264,6 +1268,10 @@
 static void xen_reboot(int reason)
 {
 	struct sched_shutdown r = { .reason = reason };
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		xen_pmu_finish(cpu);
 
 	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
 		BUG();
@@ -1607,7 +1615,9 @@
 	early_boot_irqs_disabled = true;
 
 	xen_raw_console_write("mapping kernel into physical memory\n");
-	xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
+	xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
+				   xen_start_info->nr_pages);
+	xen_reserve_special_pages();
 
 	/*
 	 * Modify the cache mode translation tables to match Xen's PAT
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dd151b2..2c50b44 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -116,6 +116,7 @@
 DEFINE_PER_CPU(unsigned long, xen_cr3);	 /* cr3 stored as physaddr */
 DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */
 
+static phys_addr_t xen_pt_base, xen_pt_size __initdata;
 
 /*
  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
@@ -1093,6 +1094,16 @@
 
 static void xen_post_allocator_init(void);
 
+static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+	struct mmuext_op op;
+
+	op.cmd = cmd;
+	op.arg1.mfn = pfn_to_mfn(pfn);
+	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+		BUG();
+}
+
 #ifdef CONFIG_X86_64
 static void __init xen_cleanhighmap(unsigned long vaddr,
 				    unsigned long vaddr_end)
@@ -1114,6 +1125,83 @@
 	xen_mc_flush();
 }
 
+/*
+ * Make a page range writeable and free it.
+ */
+static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
+{
+	void *vaddr = __va(paddr);
+	void *vaddr_end = vaddr + size;
+
+	for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
+		make_lowmem_page_readwrite(vaddr);
+
+	memblock_free(paddr, size);
+}
+
+static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
+{
+	unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
+
+	if (unpin)
+		pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
+	ClearPagePinned(virt_to_page(__va(pa)));
+	xen_free_ro_pages(pa, PAGE_SIZE);
+}
+
+/*
+ * Since it is well isolated we can (and since it is perhaps large we should)
+ * also free the page tables mapping the initial P->M table.
+ */
+static void __init xen_cleanmfnmap(unsigned long vaddr)
+{
+	unsigned long va = vaddr & PMD_MASK;
+	unsigned long pa;
+	pgd_t *pgd = pgd_offset_k(va);
+	pud_t *pud_page = pud_offset(pgd, 0);
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+	unsigned int i;
+	bool unpin;
+
+	unpin = (vaddr == 2 * PGDIR_SIZE);
+	set_pgd(pgd, __pgd(0));
+	do {
+		pud = pud_page + pud_index(va);
+		if (pud_none(*pud)) {
+			va += PUD_SIZE;
+		} else if (pud_large(*pud)) {
+			pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
+			xen_free_ro_pages(pa, PUD_SIZE);
+			va += PUD_SIZE;
+		} else {
+			pmd = pmd_offset(pud, va);
+			if (pmd_large(*pmd)) {
+				pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
+				xen_free_ro_pages(pa, PMD_SIZE);
+			} else if (!pmd_none(*pmd)) {
+				pte = pte_offset_kernel(pmd, va);
+				set_pmd(pmd, __pmd(0));
+				for (i = 0; i < PTRS_PER_PTE; ++i) {
+					if (pte_none(pte[i]))
+						break;
+					pa = pte_pfn(pte[i]) << PAGE_SHIFT;
+					xen_free_ro_pages(pa, PAGE_SIZE);
+				}
+				xen_cleanmfnmap_free_pgtbl(pte, unpin);
+			}
+			va += PMD_SIZE;
+			if (pmd_index(va))
+				continue;
+			set_pud(pud, __pud(0));
+			xen_cleanmfnmap_free_pgtbl(pmd, unpin);
+		}
+
+	} while (pud_index(va) || pmd_index(va));
+	xen_cleanmfnmap_free_pgtbl(pud_page, unpin);
+}
+
 static void __init xen_pagetable_p2m_free(void)
 {
 	unsigned long size;
@@ -1128,18 +1216,31 @@
 	/* using __ka address and sticking INVALID_P2M_ENTRY! */
 	memset((void *)xen_start_info->mfn_list, 0xff, size);
 
-	/* We should be in __ka space. */
-	BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
 	addr = xen_start_info->mfn_list;
-	/* We roundup to the PMD, which means that if anybody at this stage is
-	 * using the __ka address of xen_start_info or xen_start_info->shared_info
-	 * they are in going to crash. Fortunatly we have already revectored
-	 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
+	/*
+	 * We could be in __ka space.
+	 * We roundup to the PMD, which means that if anybody at this stage is
+	 * using the __ka address of xen_start_info or
+	 * xen_start_info->shared_info they are in going to crash. Fortunatly
+	 * we have already revectored in xen_setup_kernel_pagetable and in
+	 * xen_setup_shared_info.
+	 */
 	size = roundup(size, PMD_SIZE);
-	xen_cleanhighmap(addr, addr + size);
 
-	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
-	memblock_free(__pa(xen_start_info->mfn_list), size);
+	if (addr >= __START_KERNEL_map) {
+		xen_cleanhighmap(addr, addr + size);
+		size = PAGE_ALIGN(xen_start_info->nr_pages *
+				  sizeof(unsigned long));
+		memblock_free(__pa(addr), size);
+	} else {
+		xen_cleanmfnmap(addr);
+	}
+}
+
+static void __init xen_pagetable_cleanhighmap(void)
+{
+	unsigned long size;
+	unsigned long addr;
 
 	/* At this stage, cleanup_highmap has already cleaned __ka space
 	 * from _brk_limit way up to the max_pfn_mapped (which is the end of
@@ -1172,6 +1273,8 @@
 
 #ifdef CONFIG_X86_64
 	xen_pagetable_p2m_free();
+
+	xen_pagetable_cleanhighmap();
 #endif
 	/* And revector! Bye bye old array */
 	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
@@ -1461,6 +1564,24 @@
 #else /* CONFIG_X86_64 */
 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
 {
+	unsigned long pfn;
+
+	if (xen_feature(XENFEAT_writable_page_tables) ||
+	    xen_feature(XENFEAT_auto_translated_physmap) ||
+	    xen_start_info->mfn_list >= __START_KERNEL_map)
+		return pte;
+
+	/*
+	 * Pages belonging to the initial p2m list mapped outside the default
+	 * address range must be mapped read-only. This region contains the
+	 * page tables for mapping the p2m list, too, and page tables MUST be
+	 * mapped read-only.
+	 */
+	pfn = pte_pfn(pte);
+	if (pfn >= xen_start_info->first_p2m_pfn &&
+	    pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+		pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
+
 	return pte;
 }
 #endif /* CONFIG_X86_64 */
@@ -1489,15 +1610,6 @@
 	native_set_pte(ptep, pte);
 }
 
-static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
-{
-	struct mmuext_op op;
-	op.cmd = cmd;
-	op.arg1.mfn = pfn_to_mfn(pfn);
-	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-		BUG();
-}
-
 /* Early in boot, while setting up the initial pagetable, assume
    everything is pinned. */
 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
@@ -1815,7 +1927,10 @@
 	 * mappings. Considering that on Xen after the kernel mappings we
 	 * have the mappings of some pages that don't exist in pfn space, we
 	 * set max_pfn_mapped to the last real pfn mapped. */
-	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+	if (xen_start_info->mfn_list < __START_KERNEL_map)
+		max_pfn_mapped = xen_start_info->first_p2m_pfn;
+	else
+		max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
 
 	pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
 	pt_end = pt_base + xen_start_info->nr_pt_frames;
@@ -1855,6 +1970,11 @@
 	/* Graft it onto L4[511][510] */
 	copy_page(level2_kernel_pgt, l2);
 
+	/* Copy the initial P->M table mappings if necessary. */
+	i = pgd_index(xen_start_info->mfn_list);
+	if (i && i < pgd_index(__START_KERNEL_map))
+		init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
+
 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 		/* Make pagetable pieces RO */
 		set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
@@ -1894,10 +2014,192 @@
 		check_pt_base(&pt_base, &pt_end, addr[i]);
 
 	/* Our (by three pages) smaller Xen pagetable that we are using */
-	memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
+	xen_pt_base = PFN_PHYS(pt_base);
+	xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
+	memblock_reserve(xen_pt_base, xen_pt_size);
+
 	/* Revector the xen_start_info */
 	xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
 }
+
+/*
+ * Read a value from a physical address.
+ */
+static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
+{
+	unsigned long *vaddr;
+	unsigned long val;
+
+	vaddr = early_memremap_ro(addr, sizeof(val));
+	val = *vaddr;
+	early_memunmap(vaddr, sizeof(val));
+	return val;
+}
+
+/*
+ * Translate a virtual address to a physical one without relying on mapped
+ * page tables.
+ */
+static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
+{
+	phys_addr_t pa;
+	pgd_t pgd;
+	pud_t pud;
+	pmd_t pmd;
+	pte_t pte;
+
+	pa = read_cr3();
+	pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
+						       sizeof(pgd)));
+	if (!pgd_present(pgd))
+		return 0;
+
+	pa = pgd_val(pgd) & PTE_PFN_MASK;
+	pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
+						       sizeof(pud)));
+	if (!pud_present(pud))
+		return 0;
+	pa = pud_pfn(pud) << PAGE_SHIFT;
+	if (pud_large(pud))
+		return pa + (vaddr & ~PUD_MASK);
+
+	pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
+						       sizeof(pmd)));
+	if (!pmd_present(pmd))
+		return 0;
+	pa = pmd_pfn(pmd) << PAGE_SHIFT;
+	if (pmd_large(pmd))
+		return pa + (vaddr & ~PMD_MASK);
+
+	pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
+						       sizeof(pte)));
+	if (!pte_present(pte))
+		return 0;
+	pa = pte_pfn(pte) << PAGE_SHIFT;
+
+	return pa | (vaddr & ~PAGE_MASK);
+}
+
+/*
+ * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
+ * this area.
+ */
+void __init xen_relocate_p2m(void)
+{
+	phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
+	unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
+	int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
+	pte_t *pt;
+	pmd_t *pmd;
+	pud_t *pud;
+	pgd_t *pgd;
+	unsigned long *new_p2m;
+
+	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+	n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
+	n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
+	n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
+	n_pud = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
+	n_frames = n_pte + n_pt + n_pmd + n_pud;
+
+	new_area = xen_find_free_area(PFN_PHYS(n_frames));
+	if (!new_area) {
+		xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
+		BUG();
+	}
+
+	/*
+	 * Setup the page tables for addressing the new p2m list.
+	 * We have asked the hypervisor to map the p2m list at the user address
+	 * PUD_SIZE. It may have done so, or it may have used a kernel space
+	 * address depending on the Xen version.
+	 * To avoid any possible virtual address collision, just use
+	 * 2 * PUD_SIZE for the new area.
+	 */
+	pud_phys = new_area;
+	pmd_phys = pud_phys + PFN_PHYS(n_pud);
+	pt_phys = pmd_phys + PFN_PHYS(n_pmd);
+	p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
+
+	pgd = __va(read_cr3());
+	new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
+	for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
+		pud = early_memremap(pud_phys, PAGE_SIZE);
+		clear_page(pud);
+		for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
+		     idx_pmd++) {
+			pmd = early_memremap(pmd_phys, PAGE_SIZE);
+			clear_page(pmd);
+			for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
+			     idx_pt++) {
+				pt = early_memremap(pt_phys, PAGE_SIZE);
+				clear_page(pt);
+				for (idx_pte = 0;
+				     idx_pte < min(n_pte, PTRS_PER_PTE);
+				     idx_pte++) {
+					set_pte(pt + idx_pte,
+						pfn_pte(p2m_pfn, PAGE_KERNEL));
+					p2m_pfn++;
+				}
+				n_pte -= PTRS_PER_PTE;
+				early_memunmap(pt, PAGE_SIZE);
+				make_lowmem_page_readonly(__va(pt_phys));
+				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
+						  PFN_DOWN(pt_phys));
+				set_pmd(pmd + idx_pt,
+					__pmd(_PAGE_TABLE | pt_phys));
+				pt_phys += PAGE_SIZE;
+			}
+			n_pt -= PTRS_PER_PMD;
+			early_memunmap(pmd, PAGE_SIZE);
+			make_lowmem_page_readonly(__va(pmd_phys));
+			pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
+					  PFN_DOWN(pmd_phys));
+			set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
+			pmd_phys += PAGE_SIZE;
+		}
+		n_pmd -= PTRS_PER_PUD;
+		early_memunmap(pud, PAGE_SIZE);
+		make_lowmem_page_readonly(__va(pud_phys));
+		pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
+		set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
+		pud_phys += PAGE_SIZE;
+	}
+
+	/* Now copy the old p2m info to the new area. */
+	memcpy(new_p2m, xen_p2m_addr, size);
+	xen_p2m_addr = new_p2m;
+
+	/* Release the old p2m list and set new list info. */
+	p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
+	BUG_ON(!p2m_pfn);
+	p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
+
+	if (xen_start_info->mfn_list < __START_KERNEL_map) {
+		pfn = xen_start_info->first_p2m_pfn;
+		pfn_end = xen_start_info->first_p2m_pfn +
+			  xen_start_info->nr_p2m_frames;
+		set_pgd(pgd + 1, __pgd(0));
+	} else {
+		pfn = p2m_pfn;
+		pfn_end = p2m_pfn_end;
+	}
+
+	memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
+	while (pfn < pfn_end) {
+		if (pfn == p2m_pfn) {
+			pfn = p2m_pfn_end;
+			continue;
+		}
+		make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+		pfn++;
+	}
+
+	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
+	xen_start_info->first_p2m_pfn =  PFN_DOWN(new_area);
+	xen_start_info->nr_p2m_frames = n_frames;
+}
+
 #else	/* !CONFIG_X86_64 */
 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
@@ -1938,18 +2240,41 @@
 	pv_mmu_ops.write_cr3 = &xen_write_cr3;
 }
 
+/*
+ * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
+ * not the first page table in the page table pool.
+ * Iterate through the initial page tables to find the real page table base.
+ */
+static phys_addr_t xen_find_pt_base(pmd_t *pmd)
+{
+	phys_addr_t pt_base, paddr;
+	unsigned pmdidx;
+
+	pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
+
+	for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
+		if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
+			paddr = m2p(pmd[pmdidx].pmd);
+			pt_base = min(pt_base, paddr);
+		}
+
+	return pt_base;
+}
+
 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
 	pmd_t *kernel_pmd;
 
+	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
+
+	xen_pt_base = xen_find_pt_base(kernel_pmd);
+	xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
+
 	initial_kernel_pmd =
 		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
 
-	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
-				  xen_start_info->nr_pt_frames * PAGE_SIZE +
-				  512*1024);
+	max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
 
-	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
 	copy_page(initial_kernel_pmd, kernel_pmd);
 
 	xen_map_identity_early(initial_kernel_pmd, max_pfn);
@@ -1968,11 +2293,33 @@
 			  PFN_DOWN(__pa(initial_page_table)));
 	xen_write_cr3(__pa(initial_page_table));
 
-	memblock_reserve(__pa(xen_start_info->pt_base),
-			 xen_start_info->nr_pt_frames * PAGE_SIZE);
+	memblock_reserve(xen_pt_base, xen_pt_size);
 }
 #endif	/* CONFIG_X86_64 */
 
+void __init xen_reserve_special_pages(void)
+{
+	phys_addr_t paddr;
+
+	memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
+	if (xen_start_info->store_mfn) {
+		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
+		memblock_reserve(paddr, PAGE_SIZE);
+	}
+	if (!xen_initial_domain()) {
+		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
+		memblock_reserve(paddr, PAGE_SIZE);
+	}
+}
+
+void __init xen_pt_check_e820(void)
+{
+	if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
+		xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
+		BUG();
+	}
+}
+
 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
 
 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8b7f18e..bfc08b1 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -79,10 +79,14 @@
 #include <xen/balloon.h>
 #include <xen/grant_table.h>
 
-#include "p2m.h"
 #include "multicalls.h"
 #include "xen-ops.h"
 
+#define P2M_MID_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long **))
+
+#define MAX_P2M_PFN	(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
+
 #define PMDS_PER_MID_PAGE	(P2M_MID_PER_PAGE / PTRS_PER_PTE)
 
 unsigned long *xen_p2m_addr __read_mostly;
@@ -199,7 +203,8 @@
 	unsigned int level, topidx, mididx;
 	unsigned long *mid_mfn_p;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
+	if (xen_feature(XENFEAT_auto_translated_physmap) ||
+	    xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
 		return;
 
 	/* Pre-initialize p2m_top_mfn to be completely missing */
@@ -260,9 +265,16 @@
 
 	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
-	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-		virt_to_mfn(p2m_top_mfn);
+	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
+		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
+	else
+		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
+			virt_to_mfn(p2m_top_mfn);
 	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+	HYPERVISOR_shared_info->arch.p2m_generation = 0;
+	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
+	HYPERVISOR_shared_info->arch.p2m_cr3 =
+		xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
 }
 
 /* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -478,8 +490,12 @@
 
 		ptechk = lookup_address(vaddr, &level);
 		if (ptechk == pte_pg) {
+			HYPERVISOR_shared_info->arch.p2m_generation++;
+			wmb(); /* Tools are synchronizing via p2m_generation. */
 			set_pmd(pmdp,
 				__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
+			wmb(); /* Tools are synchronizing via p2m_generation. */
+			HYPERVISOR_shared_info->arch.p2m_generation++;
 			pte_newpg[i] = NULL;
 		}
 
@@ -505,7 +521,7 @@
  */
 static bool alloc_p2m(unsigned long pfn)
 {
-	unsigned topidx, mididx;
+	unsigned topidx;
 	unsigned long *top_mfn_p, *mid_mfn;
 	pte_t *ptep, *pte_pg;
 	unsigned int level;
@@ -513,9 +529,6 @@
 	unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
 	unsigned long p2m_pfn;
 
-	topidx = p2m_top_index(pfn);
-	mididx = p2m_mid_index(pfn);
-
 	ptep = lookup_address(addr, &level);
 	BUG_ON(!ptep || level != PG_LEVEL_4K);
 	pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
@@ -527,7 +540,8 @@
 			return false;
 	}
 
-	if (p2m_top_mfn) {
+	if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
+		topidx = p2m_top_index(pfn);
 		top_mfn_p = &p2m_top_mfn[topidx];
 		mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
 
@@ -577,10 +591,14 @@
 		spin_lock_irqsave(&p2m_update_lock, flags);
 
 		if (pte_pfn(*ptep) == p2m_pfn) {
+			HYPERVISOR_shared_info->arch.p2m_generation++;
+			wmb(); /* Tools are synchronizing via p2m_generation. */
 			set_pte(ptep,
 				pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
+			wmb(); /* Tools are synchronizing via p2m_generation. */
+			HYPERVISOR_shared_info->arch.p2m_generation++;
 			if (mid_mfn)
-				mid_mfn[mididx] = virt_to_mfn(p2m);
+				mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
 			p2m = NULL;
 		}
 
@@ -630,6 +648,11 @@
 		return true;
 	}
 
+	/*
+	 * The interface requires atomic updates on p2m elements.
+	 * xen_safe_write_ulong() is using __put_user which does an atomic
+	 * store via asm().
+	 */
 	if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
 		return true;
 
diff --git a/arch/x86/xen/p2m.h b/arch/x86/xen/p2m.h
deleted file mode 100644
index ad8aee2..0000000
--- a/arch/x86/xen/p2m.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _XEN_P2M_H
-#define _XEN_P2M_H
-
-#define P2M_PER_PAGE        (PAGE_SIZE / sizeof(unsigned long))
-#define P2M_MID_PER_PAGE    (PAGE_SIZE / sizeof(unsigned long *))
-#define P2M_TOP_PER_PAGE    (PAGE_SIZE / sizeof(unsigned long **))
-
-#define MAX_P2M_PFN         (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
-
-#define MAX_REMAP_RANGES    10
-
-extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
-                                      unsigned long pfn_e);
-
-#endif  /* _XEN_P2M_H */
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index a826171..9586ff3 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -68,7 +68,7 @@
 	return 0;
 }
 
-bool xen_has_pv_devices()
+bool xen_has_pv_devices(void)
 {
 	if (!xen_domain())
 		return false;
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
new file mode 100644
index 0000000..724a087
--- /dev/null
+++ b/arch/x86/xen/pmu.c
@@ -0,0 +1,570 @@
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include <asm/xen/hypercall.h>
+#include <xen/page.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+#include <xen/interface/xenpmu.h>
+
+#include "xen-ops.h"
+#include "pmu.h"
+
+/* x86_pmu.handle_irq definition */
+#include "../kernel/cpu/perf_event.h"
+
+#define XENPMU_IRQ_PROCESSING    1
+struct xenpmu {
+	/* Shared page between hypervisor and domain */
+	struct xen_pmu_data *xenpmu_data;
+
+	uint8_t flags;
+};
+static DEFINE_PER_CPU(struct xenpmu, xenpmu_shared);
+#define get_xenpmu_data()    (this_cpu_ptr(&xenpmu_shared)->xenpmu_data)
+#define get_xenpmu_flags()   (this_cpu_ptr(&xenpmu_shared)->flags)
+
+/* Macro for computing address of a PMU MSR bank */
+#define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \
+					    (uintptr_t)ctxt->field))
+
+/* AMD PMU */
+#define F15H_NUM_COUNTERS   6
+#define F10H_NUM_COUNTERS   4
+
+static __read_mostly uint32_t amd_counters_base;
+static __read_mostly uint32_t amd_ctrls_base;
+static __read_mostly int amd_msr_step;
+static __read_mostly int k7_counters_mirrored;
+static __read_mostly int amd_num_counters;
+
+/* Intel PMU */
+#define MSR_TYPE_COUNTER            0
+#define MSR_TYPE_CTRL               1
+#define MSR_TYPE_GLOBAL             2
+#define MSR_TYPE_ARCH_COUNTER       3
+#define MSR_TYPE_ARCH_CTRL          4
+
+/* Number of general pmu registers (CPUID.EAX[0xa].EAX[8..15]) */
+#define PMU_GENERAL_NR_SHIFT        8
+#define PMU_GENERAL_NR_BITS         8
+#define PMU_GENERAL_NR_MASK         (((1 << PMU_GENERAL_NR_BITS) - 1) \
+				     << PMU_GENERAL_NR_SHIFT)
+
+/* Number of fixed pmu registers (CPUID.EDX[0xa].EDX[0..4]) */
+#define PMU_FIXED_NR_SHIFT          0
+#define PMU_FIXED_NR_BITS           5
+#define PMU_FIXED_NR_MASK           (((1 << PMU_FIXED_NR_BITS) - 1) \
+				     << PMU_FIXED_NR_SHIFT)
+
+/* Alias registers (0x4c1) for full-width writes to PMCs */
+#define MSR_PMC_ALIAS_MASK          (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
+
+#define INTEL_PMC_TYPE_SHIFT        30
+
+static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
+
+
+static void xen_pmu_arch_init(void)
+{
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+
+		switch (boot_cpu_data.x86) {
+		case 0x15:
+			amd_num_counters = F15H_NUM_COUNTERS;
+			amd_counters_base = MSR_F15H_PERF_CTR;
+			amd_ctrls_base = MSR_F15H_PERF_CTL;
+			amd_msr_step = 2;
+			k7_counters_mirrored = 1;
+			break;
+		case 0x10:
+		case 0x12:
+		case 0x14:
+		case 0x16:
+		default:
+			amd_num_counters = F10H_NUM_COUNTERS;
+			amd_counters_base = MSR_K7_PERFCTR0;
+			amd_ctrls_base = MSR_K7_EVNTSEL0;
+			amd_msr_step = 1;
+			k7_counters_mirrored = 0;
+			break;
+		}
+	} else {
+		uint32_t eax, ebx, ecx, edx;
+
+		cpuid(0xa, &eax, &ebx, &ecx, &edx);
+
+		intel_num_arch_counters = (eax & PMU_GENERAL_NR_MASK) >>
+			PMU_GENERAL_NR_SHIFT;
+		intel_num_fixed_counters = (edx & PMU_FIXED_NR_MASK) >>
+			PMU_FIXED_NR_SHIFT;
+	}
+}
+
+static inline uint32_t get_fam15h_addr(u32 addr)
+{
+	switch (addr) {
+	case MSR_K7_PERFCTR0:
+	case MSR_K7_PERFCTR1:
+	case MSR_K7_PERFCTR2:
+	case MSR_K7_PERFCTR3:
+		return MSR_F15H_PERF_CTR + (addr - MSR_K7_PERFCTR0);
+	case MSR_K7_EVNTSEL0:
+	case MSR_K7_EVNTSEL1:
+	case MSR_K7_EVNTSEL2:
+	case MSR_K7_EVNTSEL3:
+		return MSR_F15H_PERF_CTL + (addr - MSR_K7_EVNTSEL0);
+	default:
+		break;
+	}
+
+	return addr;
+}
+
+static inline bool is_amd_pmu_msr(unsigned int msr)
+{
+	if ((msr >= MSR_F15H_PERF_CTL &&
+	     msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
+	    (msr >= MSR_K7_EVNTSEL0 &&
+	     msr < MSR_K7_PERFCTR0 + amd_num_counters))
+		return true;
+
+	return false;
+}
+
+static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
+{
+	u32 msr_index_pmc;
+
+	switch (msr_index) {
+	case MSR_CORE_PERF_FIXED_CTR_CTRL:
+	case MSR_IA32_DS_AREA:
+	case MSR_IA32_PEBS_ENABLE:
+		*type = MSR_TYPE_CTRL;
+		return true;
+
+	case MSR_CORE_PERF_GLOBAL_CTRL:
+	case MSR_CORE_PERF_GLOBAL_STATUS:
+	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+		*type = MSR_TYPE_GLOBAL;
+		return true;
+
+	default:
+
+		if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) &&
+		    (msr_index < MSR_CORE_PERF_FIXED_CTR0 +
+				 intel_num_fixed_counters)) {
+			*index = msr_index - MSR_CORE_PERF_FIXED_CTR0;
+			*type = MSR_TYPE_COUNTER;
+			return true;
+		}
+
+		if ((msr_index >= MSR_P6_EVNTSEL0) &&
+		    (msr_index < MSR_P6_EVNTSEL0 +  intel_num_arch_counters)) {
+			*index = msr_index - MSR_P6_EVNTSEL0;
+			*type = MSR_TYPE_ARCH_CTRL;
+			return true;
+		}
+
+		msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK;
+		if ((msr_index_pmc >= MSR_IA32_PERFCTR0) &&
+		    (msr_index_pmc < MSR_IA32_PERFCTR0 +
+				     intel_num_arch_counters)) {
+			*type = MSR_TYPE_ARCH_COUNTER;
+			*index = msr_index_pmc - MSR_IA32_PERFCTR0;
+			return true;
+		}
+		return false;
+	}
+}
+
+static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type,
+				  int index, bool is_read)
+{
+	uint64_t *reg = NULL;
+	struct xen_pmu_intel_ctxt *ctxt;
+	uint64_t *fix_counters;
+	struct xen_pmu_cntr_pair *arch_cntr_pair;
+	struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+	uint8_t xenpmu_flags = get_xenpmu_flags();
+
+
+	if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
+		return false;
+
+	ctxt = &xenpmu_data->pmu.c.intel;
+
+	switch (msr) {
+	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+		reg = &ctxt->global_ovf_ctrl;
+		break;
+	case MSR_CORE_PERF_GLOBAL_STATUS:
+		reg = &ctxt->global_status;
+		break;
+	case MSR_CORE_PERF_GLOBAL_CTRL:
+		reg = &ctxt->global_ctrl;
+		break;
+	case MSR_CORE_PERF_FIXED_CTR_CTRL:
+		reg = &ctxt->fixed_ctrl;
+		break;
+	default:
+		switch (type) {
+		case MSR_TYPE_COUNTER:
+			fix_counters = field_offset(ctxt, fixed_counters);
+			reg = &fix_counters[index];
+			break;
+		case MSR_TYPE_ARCH_COUNTER:
+			arch_cntr_pair = field_offset(ctxt, arch_counters);
+			reg = &arch_cntr_pair[index].counter;
+			break;
+		case MSR_TYPE_ARCH_CTRL:
+			arch_cntr_pair = field_offset(ctxt, arch_counters);
+			reg = &arch_cntr_pair[index].control;
+			break;
+		default:
+			return false;
+		}
+	}
+
+	if (reg) {
+		if (is_read)
+			*val = *reg;
+		else {
+			*reg = *val;
+
+			if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL)
+				ctxt->global_status &= (~(*val));
+		}
+		return true;
+	}
+
+	return false;
+}
+
+static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
+{
+	uint64_t *reg = NULL;
+	int i, off = 0;
+	struct xen_pmu_amd_ctxt *ctxt;
+	uint64_t *counter_regs, *ctrl_regs;
+	struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+	uint8_t xenpmu_flags = get_xenpmu_flags();
+
+	if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
+		return false;
+
+	if (k7_counters_mirrored &&
+	    ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)))
+		msr = get_fam15h_addr(msr);
+
+	ctxt = &xenpmu_data->pmu.c.amd;
+	for (i = 0; i < amd_num_counters; i++) {
+		if (msr == amd_ctrls_base + off) {
+			ctrl_regs = field_offset(ctxt, ctrls);
+			reg = &ctrl_regs[i];
+			break;
+		} else if (msr == amd_counters_base + off) {
+			counter_regs = field_offset(ctxt, counters);
+			reg = &counter_regs[i];
+			break;
+		}
+		off += amd_msr_step;
+	}
+
+	if (reg) {
+		if (is_read)
+			*val = *reg;
+		else
+			*reg = *val;
+
+		return true;
+	}
+	return false;
+}
+
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
+{
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+		if (is_amd_pmu_msr(msr)) {
+			if (!xen_amd_pmu_emulate(msr, val, 1))
+				*val = native_read_msr_safe(msr, err);
+			return true;
+		}
+	} else {
+		int type, index;
+
+		if (is_intel_pmu_msr(msr, &type, &index)) {
+			if (!xen_intel_pmu_emulate(msr, val, type, index, 1))
+				*val = native_read_msr_safe(msr, err);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
+{
+	uint64_t val = ((uint64_t)high << 32) | low;
+
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+		if (is_amd_pmu_msr(msr)) {
+			if (!xen_amd_pmu_emulate(msr, &val, 0))
+				*err = native_write_msr_safe(msr, low, high);
+			return true;
+		}
+	} else {
+		int type, index;
+
+		if (is_intel_pmu_msr(msr, &type, &index)) {
+			if (!xen_intel_pmu_emulate(msr, &val, type, index, 0))
+				*err = native_write_msr_safe(msr, low, high);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static unsigned long long xen_amd_read_pmc(int counter)
+{
+	struct xen_pmu_amd_ctxt *ctxt;
+	uint64_t *counter_regs;
+	struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+	uint8_t xenpmu_flags = get_xenpmu_flags();
+
+	if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
+		uint32_t msr;
+		int err;
+
+		msr = amd_counters_base + (counter * amd_msr_step);
+		return native_read_msr_safe(msr, &err);
+	}
+
+	ctxt = &xenpmu_data->pmu.c.amd;
+	counter_regs = field_offset(ctxt, counters);
+	return counter_regs[counter];
+}
+
+static unsigned long long xen_intel_read_pmc(int counter)
+{
+	struct xen_pmu_intel_ctxt *ctxt;
+	uint64_t *fixed_counters;
+	struct xen_pmu_cntr_pair *arch_cntr_pair;
+	struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+	uint8_t xenpmu_flags = get_xenpmu_flags();
+
+	if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
+		uint32_t msr;
+		int err;
+
+		if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
+			msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
+		else
+			msr = MSR_IA32_PERFCTR0 + counter;
+
+		return native_read_msr_safe(msr, &err);
+	}
+
+	ctxt = &xenpmu_data->pmu.c.intel;
+	if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) {
+		fixed_counters = field_offset(ctxt, fixed_counters);
+		return fixed_counters[counter & 0xffff];
+	}
+
+	arch_cntr_pair = field_offset(ctxt, arch_counters);
+	return arch_cntr_pair[counter].counter;
+}
+
+unsigned long long xen_read_pmc(int counter)
+{
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+		return xen_amd_read_pmc(counter);
+	else
+		return xen_intel_read_pmc(counter);
+}
+
+int pmu_apic_update(uint32_t val)
+{
+	int ret;
+	struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+
+	if (!xenpmu_data) {
+		pr_warn_once("%s: pmudata not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	xenpmu_data->pmu.l.lapic_lvtpc = val;
+
+	if (get_xenpmu_flags() & XENPMU_IRQ_PROCESSING)
+		return 0;
+
+	ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
+
+	return ret;
+}
+
+/* perf callbacks */
+static int xen_is_in_guest(void)
+{
+	const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+
+	if (!xenpmu_data) {
+		pr_warn_once("%s: pmudata not initialized\n", __func__);
+		return 0;
+	}
+
+	if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF))
+		return 0;
+
+	return 1;
+}
+
+static int xen_is_user_mode(void)
+{
+	const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+
+	if (!xenpmu_data) {
+		pr_warn_once("%s: pmudata not initialized\n", __func__);
+		return 0;
+	}
+
+	if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV)
+		return (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER);
+	else
+		return !!(xenpmu_data->pmu.r.regs.cpl & 3);
+}
+
+static unsigned long xen_get_guest_ip(void)
+{
+	const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+
+	if (!xenpmu_data) {
+		pr_warn_once("%s: pmudata not initialized\n", __func__);
+		return 0;
+	}
+
+	return xenpmu_data->pmu.r.regs.ip;
+}
+
+static struct perf_guest_info_callbacks xen_guest_cbs = {
+	.is_in_guest            = xen_is_in_guest,
+	.is_user_mode           = xen_is_user_mode,
+	.get_guest_ip           = xen_get_guest_ip,
+};
+
+/* Convert registers from Xen's format to Linux' */
+static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
+			     struct pt_regs *regs, uint64_t pmu_flags)
+{
+	regs->ip = xen_regs->ip;
+	regs->cs = xen_regs->cs;
+	regs->sp = xen_regs->sp;
+
+	if (pmu_flags & PMU_SAMPLE_PV) {
+		if (pmu_flags & PMU_SAMPLE_USER)
+			regs->cs |= 3;
+		else
+			regs->cs &= ~3;
+	} else {
+		if (xen_regs->cpl)
+			regs->cs |= 3;
+		else
+			regs->cs &= ~3;
+	}
+}
+
+irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
+{
+	int err, ret = IRQ_NONE;
+	struct pt_regs regs;
+	const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+	uint8_t xenpmu_flags = get_xenpmu_flags();
+
+	if (!xenpmu_data) {
+		pr_warn_once("%s: pmudata not initialized\n", __func__);
+		return ret;
+	}
+
+	this_cpu_ptr(&xenpmu_shared)->flags =
+		xenpmu_flags | XENPMU_IRQ_PROCESSING;
+	xen_convert_regs(&xenpmu_data->pmu.r.regs, &regs,
+			 xenpmu_data->pmu.pmu_flags);
+	if (x86_pmu.handle_irq(&regs))
+		ret = IRQ_HANDLED;
+
+	/* Write out cached context to HW */
+	err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL);
+	this_cpu_ptr(&xenpmu_shared)->flags = xenpmu_flags;
+	if (err) {
+		pr_warn_once("%s: failed hypercall, err: %d\n", __func__, err);
+		return IRQ_NONE;
+	}
+
+	return ret;
+}
+
+bool is_xen_pmu(int cpu)
+{
+	return (get_xenpmu_data() != NULL);
+}
+
+void xen_pmu_init(int cpu)
+{
+	int err;
+	struct xen_pmu_params xp;
+	unsigned long pfn;
+	struct xen_pmu_data *xenpmu_data;
+
+	BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
+
+	if (xen_hvm_domain())
+		return;
+
+	xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
+	if (!xenpmu_data) {
+		pr_err("VPMU init: No memory\n");
+		return;
+	}
+	pfn = virt_to_pfn(xenpmu_data);
+
+	xp.val = pfn_to_mfn(pfn);
+	xp.vcpu = cpu;
+	xp.version.maj = XENPMU_VER_MAJ;
+	xp.version.min = XENPMU_VER_MIN;
+	err = HYPERVISOR_xenpmu_op(XENPMU_init, &xp);
+	if (err)
+		goto fail;
+
+	per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
+	per_cpu(xenpmu_shared, cpu).flags = 0;
+
+	if (cpu == 0) {
+		perf_register_guest_info_callbacks(&xen_guest_cbs);
+		xen_pmu_arch_init();
+	}
+
+	return;
+
+fail:
+	pr_warn_once("Could not initialize VPMU for cpu %d, error %d\n",
+		cpu, err);
+	free_pages((unsigned long)xenpmu_data, 0);
+}
+
+void xen_pmu_finish(int cpu)
+{
+	struct xen_pmu_params xp;
+
+	if (xen_hvm_domain())
+		return;
+
+	xp.vcpu = cpu;
+	xp.version.maj = XENPMU_VER_MAJ;
+	xp.version.min = XENPMU_VER_MIN;
+
+	(void)HYPERVISOR_xenpmu_op(XENPMU_finish, &xp);
+
+	free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
+	per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
+}
diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
new file mode 100644
index 0000000..af5f0ad
--- /dev/null
+++ b/arch/x86/xen/pmu.h
@@ -0,0 +1,15 @@
+#ifndef __XEN_PMU_H
+#define __XEN_PMU_H
+
+#include <xen/interface/xenpmu.h>
+
+irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
+void xen_pmu_init(int cpu);
+void xen_pmu_finish(int cpu);
+bool is_xen_pmu(int cpu);
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
+int pmu_apic_update(uint32_t reg);
+unsigned long long xen_read_pmc(int counter);
+
+#endif /* __XEN_PMU_H */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 55f388e..f5ef674 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -27,17 +27,23 @@
 #include <xen/interface/memory.h>
 #include <xen/interface/physdev.h>
 #include <xen/features.h>
+#include <xen/hvc-console.h>
 #include "xen-ops.h"
 #include "vdso.h"
-#include "p2m.h"
 #include "mmu.h"
 
+#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
+
 /* Amount of extra memory space we add to the e820 ranges */
 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 
 /* Number of pages released from the initial allocation. */
 unsigned long xen_released_pages;
 
+/* E820 map used during setting up memory. */
+static struct e820entry xen_e820_map[E820MAX] __initdata;
+static u32 xen_e820_map_entries __initdata;
+
 /*
  * Buffer used to remap identity mapped pages. We only need the virtual space.
  * The physical page behind this address is remapped as needed to different
@@ -64,62 +70,89 @@
  */
 #define EXTRA_MEM_RATIO		(10)
 
-static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
+static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
+
+static void __init xen_parse_512gb(void)
+{
+	bool val = false;
+	char *arg;
+
+	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
+	if (!arg)
+		return;
+
+	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
+	if (!arg)
+		val = true;
+	else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
+		return;
+
+	xen_512gb_limit = val;
+}
+
+static void __init xen_add_extra_mem(unsigned long start_pfn,
+				     unsigned long n_pfns)
 {
 	int i;
 
+	/*
+	 * No need to check for zero size, should happen rarely and will only
+	 * write a new entry regarded to be unused due to zero size.
+	 */
 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
 		/* Add new region. */
-		if (xen_extra_mem[i].size == 0) {
-			xen_extra_mem[i].start = start;
-			xen_extra_mem[i].size  = size;
+		if (xen_extra_mem[i].n_pfns == 0) {
+			xen_extra_mem[i].start_pfn = start_pfn;
+			xen_extra_mem[i].n_pfns = n_pfns;
 			break;
 		}
 		/* Append to existing region. */
-		if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
-			xen_extra_mem[i].size += size;
+		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
+		    start_pfn) {
+			xen_extra_mem[i].n_pfns += n_pfns;
 			break;
 		}
 	}
 	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
 		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
 
-	memblock_reserve(start, size);
+	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
 }
 
-static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
+static void __init xen_del_extra_mem(unsigned long start_pfn,
+				     unsigned long n_pfns)
 {
 	int i;
-	phys_addr_t start_r, size_r;
+	unsigned long start_r, size_r;
 
 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
-		start_r = xen_extra_mem[i].start;
-		size_r = xen_extra_mem[i].size;
+		start_r = xen_extra_mem[i].start_pfn;
+		size_r = xen_extra_mem[i].n_pfns;
 
 		/* Start of region. */
-		if (start_r == start) {
-			BUG_ON(size > size_r);
-			xen_extra_mem[i].start += size;
-			xen_extra_mem[i].size -= size;
+		if (start_r == start_pfn) {
+			BUG_ON(n_pfns > size_r);
+			xen_extra_mem[i].start_pfn += n_pfns;
+			xen_extra_mem[i].n_pfns -= n_pfns;
 			break;
 		}
 		/* End of region. */
-		if (start_r + size_r == start + size) {
-			BUG_ON(size > size_r);
-			xen_extra_mem[i].size -= size;
+		if (start_r + size_r == start_pfn + n_pfns) {
+			BUG_ON(n_pfns > size_r);
+			xen_extra_mem[i].n_pfns -= n_pfns;
 			break;
 		}
 		/* Mid of region. */
-		if (start > start_r && start < start_r + size_r) {
-			BUG_ON(start + size > start_r + size_r);
-			xen_extra_mem[i].size = start - start_r;
+		if (start_pfn > start_r && start_pfn < start_r + size_r) {
+			BUG_ON(start_pfn + n_pfns > start_r + size_r);
+			xen_extra_mem[i].n_pfns = start_pfn - start_r;
 			/* Calling memblock_reserve() again is okay. */
-			xen_add_extra_mem(start + size, start_r + size_r -
-					  (start + size));
+			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
+					  (start_pfn + n_pfns));
 			break;
 		}
 	}
-	memblock_free(start, size);
+	memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
 }
 
 /*
@@ -130,11 +163,10 @@
 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
 {
 	int i;
-	phys_addr_t addr = PFN_PHYS(pfn);
 
 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
-		if (addr >= xen_extra_mem[i].start &&
-		    addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
+		if (pfn >= xen_extra_mem[i].start_pfn &&
+		    pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
 			return INVALID_P2M_ENTRY;
 	}
 
@@ -150,10 +182,10 @@
 	int i;
 
 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
-		if (!xen_extra_mem[i].size)
+		if (!xen_extra_mem[i].n_pfns)
 			continue;
-		pfn_s = PFN_DOWN(xen_extra_mem[i].start);
-		pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
+		pfn_s = xen_extra_mem[i].start_pfn;
+		pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
 		for (pfn = pfn_s; pfn < pfn_e; pfn++)
 			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
 	}
@@ -164,15 +196,13 @@
  * This function updates min_pfn with the pfn found and returns
  * the size of that range or zero if not found.
  */
-static unsigned long __init xen_find_pfn_range(
-	const struct e820entry *list, size_t map_size,
-	unsigned long *min_pfn)
+static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
 {
-	const struct e820entry *entry;
+	const struct e820entry *entry = xen_e820_map;
 	unsigned int i;
 	unsigned long done = 0;
 
-	for (i = 0, entry = list; i < map_size; i++, entry++) {
+	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
 		unsigned long s_pfn;
 		unsigned long e_pfn;
 
@@ -221,7 +251,7 @@
  * as a fallback if the remapping fails.
  */
 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
-	unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
+			unsigned long end_pfn, unsigned long nr_pages)
 {
 	unsigned long pfn, end;
 	int ret;
@@ -241,7 +271,7 @@
 		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
 
 		if (ret == 1) {
-			(*released)++;
+			xen_released_pages++;
 			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
 				break;
 		} else
@@ -356,9 +386,8 @@
  * to Xen and not remapped.
  */
 static unsigned long __init xen_set_identity_and_remap_chunk(
-        const struct e820entry *list, size_t map_size, unsigned long start_pfn,
-	unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
-	unsigned long *released, unsigned long *remapped)
+	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+	unsigned long remap_pfn)
 {
 	unsigned long pfn;
 	unsigned long i = 0;
@@ -379,12 +408,11 @@
 		if (cur_pfn + size > nr_pages)
 			size = nr_pages - cur_pfn;
 
-		remap_range_size = xen_find_pfn_range(list, map_size,
-						      &remap_pfn);
+		remap_range_size = xen_find_pfn_range(&remap_pfn);
 		if (!remap_range_size) {
 			pr_warning("Unable to find available pfn range, not remapping identity pages\n");
 			xen_set_identity_and_release_chunk(cur_pfn,
-				cur_pfn + left, nr_pages, released);
+						cur_pfn + left, nr_pages);
 			break;
 		}
 		/* Adjust size to fit in current e820 RAM region */
@@ -396,7 +424,6 @@
 		/* Update variables to reflect new mappings. */
 		i += size;
 		remap_pfn += size;
-		*remapped += size;
 	}
 
 	/*
@@ -411,15 +438,11 @@
 	return remap_pfn;
 }
 
-static void __init xen_set_identity_and_remap(
-	const struct e820entry *list, size_t map_size, unsigned long nr_pages,
-	unsigned long *released, unsigned long *remapped)
+static void __init xen_set_identity_and_remap(unsigned long nr_pages)
 {
 	phys_addr_t start = 0;
 	unsigned long last_pfn = nr_pages;
-	const struct e820entry *entry;
-	unsigned long num_released = 0;
-	unsigned long num_remapped = 0;
+	const struct e820entry *entry = xen_e820_map;
 	int i;
 
 	/*
@@ -433,9 +456,9 @@
 	 * example) the DMI tables in a reserved region that begins on
 	 * a non-page boundary.
 	 */
-	for (i = 0, entry = list; i < map_size; i++, entry++) {
+	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
 		phys_addr_t end = entry->addr + entry->size;
-		if (entry->type == E820_RAM || i == map_size - 1) {
+		if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
 			unsigned long start_pfn = PFN_DOWN(start);
 			unsigned long end_pfn = PFN_UP(end);
 
@@ -444,17 +467,13 @@
 
 			if (start_pfn < end_pfn)
 				last_pfn = xen_set_identity_and_remap_chunk(
-						list, map_size, start_pfn,
-						end_pfn, nr_pages, last_pfn,
-						&num_released, &num_remapped);
+						start_pfn, end_pfn, nr_pages,
+						last_pfn);
 			start = end;
 		}
 	}
 
-	*released = num_released;
-	*remapped = num_remapped;
-
-	pr_info("Released %ld page(s)\n", num_released);
+	pr_info("Released %ld page(s)\n", xen_released_pages);
 }
 
 /*
@@ -494,7 +513,7 @@
 		} else if (pfn_s + len == xen_remap_buf.target_pfn) {
 			len += xen_remap_buf.size;
 		} else {
-			xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
+			xen_del_extra_mem(pfn_s, len);
 			pfn_s = xen_remap_buf.target_pfn;
 			len = xen_remap_buf.size;
 		}
@@ -504,19 +523,36 @@
 	}
 
 	if (pfn_s != ~0UL && len)
-		xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
+		xen_del_extra_mem(pfn_s, len);
 
 	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
 
 	pr_info("Remapped %ld page(s)\n", remapped);
 }
 
+static unsigned long __init xen_get_pages_limit(void)
+{
+	unsigned long limit;
+
+#ifdef CONFIG_X86_32
+	limit = GB(64) / PAGE_SIZE;
+#else
+	limit = MAXMEM / PAGE_SIZE;
+	if (!xen_initial_domain() && xen_512gb_limit)
+		limit = GB(512) / PAGE_SIZE;
+#endif
+	return limit;
+}
+
 static unsigned long __init xen_get_max_pages(void)
 {
-	unsigned long max_pages = MAX_DOMAIN_PAGES;
+	unsigned long max_pages, limit;
 	domid_t domid = DOMID_SELF;
 	int ret;
 
+	limit = xen_get_pages_limit();
+	max_pages = limit;
+
 	/*
 	 * For the initial domain we use the maximum reservation as
 	 * the maximum page.
@@ -532,7 +568,7 @@
 			max_pages = ret;
 	}
 
-	return min(max_pages, MAX_DOMAIN_PAGES);
+	return min(max_pages, limit);
 }
 
 static void __init xen_align_and_add_e820_region(phys_addr_t start,
@@ -549,39 +585,188 @@
 	e820_add_region(start, end - start, type);
 }
 
-static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
+static void __init xen_ignore_unusable(void)
 {
-	struct e820entry *entry;
+	struct e820entry *entry = xen_e820_map;
 	unsigned int i;
 
-	for (i = 0, entry = list; i < map_size; i++, entry++) {
+	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
 		if (entry->type == E820_UNUSABLE)
 			entry->type = E820_RAM;
 	}
 }
 
+static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
+{
+	unsigned long extra = 0;
+	unsigned long start_pfn, end_pfn;
+	const struct e820entry *entry = xen_e820_map;
+	int i;
+
+	end_pfn = 0;
+	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
+		start_pfn = PFN_DOWN(entry->addr);
+		/* Adjacent regions on non-page boundaries handling! */
+		end_pfn = min(end_pfn, start_pfn);
+
+		if (start_pfn >= max_pfn)
+			return extra + max_pfn - end_pfn;
+
+		/* Add any holes in map to result. */
+		extra += start_pfn - end_pfn;
+
+		end_pfn = PFN_UP(entry->addr + entry->size);
+		end_pfn = min(end_pfn, max_pfn);
+
+		if (entry->type != E820_RAM)
+			extra += end_pfn - start_pfn;
+	}
+
+	return extra;
+}
+
+bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
+{
+	struct e820entry *entry;
+	unsigned mapcnt;
+	phys_addr_t end;
+
+	if (!size)
+		return false;
+
+	end = start + size;
+	entry = xen_e820_map;
+
+	for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
+		if (entry->type == E820_RAM && entry->addr <= start &&
+		    (entry->addr + entry->size) >= end)
+			return false;
+
+		entry++;
+	}
+
+	return true;
+}
+
+/*
+ * Find a free area in physical memory not yet reserved and compliant with
+ * E820 map.
+ * Used to relocate pre-allocated areas like initrd or p2m list which are in
+ * conflict with the to be used E820 map.
+ * In case no area is found, return 0. Otherwise return the physical address
+ * of the area which is already reserved for convenience.
+ */
+phys_addr_t __init xen_find_free_area(phys_addr_t size)
+{
+	unsigned mapcnt;
+	phys_addr_t addr, start;
+	struct e820entry *entry = xen_e820_map;
+
+	for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
+		if (entry->type != E820_RAM || entry->size < size)
+			continue;
+		start = entry->addr;
+		for (addr = start; addr < start + size; addr += PAGE_SIZE) {
+			if (!memblock_is_reserved(addr))
+				continue;
+			start = addr + PAGE_SIZE;
+			if (start + size > entry->addr + entry->size)
+				break;
+		}
+		if (addr >= start + size) {
+			memblock_reserve(start, size);
+			return start;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Like memcpy, but with physical addresses for dest and src.
+ */
+static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
+				   phys_addr_t n)
+{
+	phys_addr_t dest_off, src_off, dest_len, src_len, len;
+	void *from, *to;
+
+	while (n) {
+		dest_off = dest & ~PAGE_MASK;
+		src_off = src & ~PAGE_MASK;
+		dest_len = n;
+		if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
+			dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
+		src_len = n;
+		if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
+			src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
+		len = min(dest_len, src_len);
+		to = early_memremap(dest - dest_off, dest_len + dest_off);
+		from = early_memremap(src - src_off, src_len + src_off);
+		memcpy(to, from, len);
+		early_memunmap(to, dest_len + dest_off);
+		early_memunmap(from, src_len + src_off);
+		n -= len;
+		dest += len;
+		src += len;
+	}
+}
+
+/*
+ * Reserve Xen mfn_list.
+ */
+static void __init xen_reserve_xen_mfnlist(void)
+{
+	phys_addr_t start, size;
+
+	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
+		start = __pa(xen_start_info->mfn_list);
+		size = PFN_ALIGN(xen_start_info->nr_pages *
+				 sizeof(unsigned long));
+	} else {
+		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
+		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
+	}
+
+	if (!xen_is_e820_reserved(start, size)) {
+		memblock_reserve(start, size);
+		return;
+	}
+
+#ifdef CONFIG_X86_32
+	/*
+	 * Relocating the p2m on 32 bit system to an arbitrary virtual address
+	 * is not supported, so just give up.
+	 */
+	xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
+	BUG();
+#else
+	xen_relocate_p2m();
+#endif
+}
+
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
 char * __init xen_memory_setup(void)
 {
-	static struct e820entry map[E820MAX] __initdata;
-
-	unsigned long max_pfn = xen_start_info->nr_pages;
-	phys_addr_t mem_end;
+	unsigned long max_pfn, pfn_s, n_pfns;
+	phys_addr_t mem_end, addr, size, chunk_size;
+	u32 type;
 	int rc;
 	struct xen_memory_map memmap;
 	unsigned long max_pages;
 	unsigned long extra_pages = 0;
-	unsigned long remapped_pages;
 	int i;
 	int op;
 
-	max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
+	xen_parse_512gb();
+	max_pfn = xen_get_pages_limit();
+	max_pfn = min(max_pfn, xen_start_info->nr_pages);
 	mem_end = PFN_PHYS(max_pfn);
 
 	memmap.nr_entries = E820MAX;
-	set_xen_guest_handle(memmap.buffer, map);
+	set_xen_guest_handle(memmap.buffer, xen_e820_map);
 
 	op = xen_initial_domain() ?
 		XENMEM_machine_memory_map :
@@ -590,15 +775,16 @@
 	if (rc == -ENOSYS) {
 		BUG_ON(xen_initial_domain());
 		memmap.nr_entries = 1;
-		map[0].addr = 0ULL;
-		map[0].size = mem_end;
+		xen_e820_map[0].addr = 0ULL;
+		xen_e820_map[0].size = mem_end;
 		/* 8MB slack (to balance backend allocations). */
-		map[0].size += 8ULL << 20;
-		map[0].type = E820_RAM;
+		xen_e820_map[0].size += 8ULL << 20;
+		xen_e820_map[0].type = E820_RAM;
 		rc = 0;
 	}
 	BUG_ON(rc);
 	BUG_ON(memmap.nr_entries == 0);
+	xen_e820_map_entries = memmap.nr_entries;
 
 	/*
 	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
@@ -609,72 +795,75 @@
 	 * a patch in the future.
 	 */
 	if (xen_initial_domain())
-		xen_ignore_unusable(map, memmap.nr_entries);
+		xen_ignore_unusable();
 
 	/* Make sure the Xen-supplied memory map is well-ordered. */
-	sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+	sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+			  &xen_e820_map_entries);
 
 	max_pages = xen_get_max_pages();
+
+	/* How many extra pages do we need due to remapping? */
+	max_pages += xen_count_remap_pages(max_pfn);
+
 	if (max_pages > max_pfn)
 		extra_pages += max_pages - max_pfn;
 
 	/*
-	 * Set identity map on non-RAM pages and prepare remapping the
-	 * underlying RAM.
-	 */
-	xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
-				   &xen_released_pages, &remapped_pages);
-
-	extra_pages += xen_released_pages;
-	extra_pages += remapped_pages;
-
-	/*
 	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
 	 * factor the base size.  On non-highmem systems, the base
 	 * size is the full initial memory allocation; on highmem it
 	 * is limited to the max size of lowmem, so that it doesn't
 	 * get completely filled.
 	 *
+	 * Make sure we have no memory above max_pages, as this area
+	 * isn't handled by the p2m management.
+	 *
 	 * In principle there could be a problem in lowmem systems if
 	 * the initial memory is also very large with respect to
 	 * lowmem, but we won't try to deal with that here.
 	 */
-	extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
-			  extra_pages);
+	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+			   extra_pages, max_pages - max_pfn);
 	i = 0;
-	while (i < memmap.nr_entries) {
-		phys_addr_t addr = map[i].addr;
-		phys_addr_t size = map[i].size;
-		u32 type = map[i].type;
+	addr = xen_e820_map[0].addr;
+	size = xen_e820_map[0].size;
+	while (i < xen_e820_map_entries) {
+		chunk_size = size;
+		type = xen_e820_map[i].type;
 
 		if (type == E820_RAM) {
 			if (addr < mem_end) {
-				size = min(size, mem_end - addr);
+				chunk_size = min(size, mem_end - addr);
 			} else if (extra_pages) {
-				size = min(size, PFN_PHYS(extra_pages));
-				extra_pages -= PFN_DOWN(size);
-				xen_add_extra_mem(addr, size);
-				xen_max_p2m_pfn = PFN_DOWN(addr + size);
+				chunk_size = min(size, PFN_PHYS(extra_pages));
+				pfn_s = PFN_UP(addr);
+				n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
+				extra_pages -= n_pfns;
+				xen_add_extra_mem(pfn_s, n_pfns);
+				xen_max_p2m_pfn = pfn_s + n_pfns;
 			} else
 				type = E820_UNUSABLE;
 		}
 
-		xen_align_and_add_e820_region(addr, size, type);
+		xen_align_and_add_e820_region(addr, chunk_size, type);
 
-		map[i].addr += size;
-		map[i].size -= size;
-		if (map[i].size == 0)
+		addr += chunk_size;
+		size -= chunk_size;
+		if (size == 0) {
 			i++;
+			if (i < xen_e820_map_entries) {
+				addr = xen_e820_map[i].addr;
+				size = xen_e820_map[i].size;
+			}
+		}
 	}
 
 	/*
 	 * Set the rest as identity mapped, in case PCI BARs are
 	 * located here.
-	 *
-	 * PFNs above MAX_P2M_PFN are considered identity mapped as
-	 * well.
 	 */
-	set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul);
+	set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
 
 	/*
 	 * In domU, the ISA region is normal, usable memory, but we
@@ -684,35 +873,54 @@
 	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
 			E820_RESERVED);
 
-	/*
-	 * Reserve Xen bits:
-	 *  - mfn_list
-	 *  - xen_start_info
-	 * See comment above "struct start_info" in <xen/interface/xen.h>
-	 * We tried to make the the memblock_reserve more selective so
-	 * that it would be clear what region is reserved. Sadly we ran
-	 * in the problem wherein on a 64-bit hypervisor with a 32-bit
-	 * initial domain, the pt_base has the cr3 value which is not
-	 * neccessarily where the pagetable starts! As Jan put it: "
-	 * Actually, the adjustment turns out to be correct: The page
-	 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
-	 * "first L2", "first L3", so the offset to the page table base is
-	 * indeed 2. When reading xen/include/public/xen.h's comment
-	 * very strictly, this is not a violation (since there nothing is said
-	 * that the first thing in the page table space is pointed to by
-	 * pt_base; I admit that this seems to be implied though, namely
-	 * do I think that it is implied that the page table space is the
-	 * range [pt_base, pt_base + nt_pt_frames), whereas that
-	 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
-	 * which - without a priori knowledge - the kernel would have
-	 * difficulty to figure out)." - so lets just fall back to the
-	 * easy way and reserve the whole region.
-	 */
-	memblock_reserve(__pa(xen_start_info->mfn_list),
-			 xen_start_info->pt_base - xen_start_info->mfn_list);
-
 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
+	/*
+	 * Check whether the kernel itself conflicts with the target E820 map.
+	 * Failing now is better than running into weird problems later due
+	 * to relocating (and even reusing) pages with kernel text or data.
+	 */
+	if (xen_is_e820_reserved(__pa_symbol(_text),
+			__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
+		xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
+		BUG();
+	}
+
+	/*
+	 * Check for a conflict of the hypervisor supplied page tables with
+	 * the target E820 map.
+	 */
+	xen_pt_check_e820();
+
+	xen_reserve_xen_mfnlist();
+
+	/* Check for a conflict of the initrd with the target E820 map. */
+	if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
+				 boot_params.hdr.ramdisk_size)) {
+		phys_addr_t new_area, start, size;
+
+		new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
+		if (!new_area) {
+			xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
+			BUG();
+		}
+
+		start = boot_params.hdr.ramdisk_image;
+		size = boot_params.hdr.ramdisk_size;
+		xen_phys_memcpy(new_area, start, size);
+		pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
+			start, start + size, new_area, new_area + size);
+		memblock_free(start, size);
+		boot_params.hdr.ramdisk_image = new_area;
+		boot_params.ext_ramdisk_image = new_area >> 32;
+	}
+
+	/*
+	 * Set identity map on non-RAM pages and prepare remapping the
+	 * underlying RAM.
+	 */
+	xen_set_identity_and_remap(max_pfn);
+
 	return "Xen";
 }
 
@@ -721,26 +929,30 @@
  */
 char * __init xen_auto_xlated_memory_setup(void)
 {
-	static struct e820entry map[E820MAX] __initdata;
-
 	struct xen_memory_map memmap;
 	int i;
 	int rc;
 
 	memmap.nr_entries = E820MAX;
-	set_xen_guest_handle(memmap.buffer, map);
+	set_xen_guest_handle(memmap.buffer, xen_e820_map);
 
 	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
 	if (rc < 0)
 		panic("No memory map (%d)\n", rc);
 
-	sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
+	xen_e820_map_entries = memmap.nr_entries;
 
-	for (i = 0; i < memmap.nr_entries; i++)
-		e820_add_region(map[i].addr, map[i].size, map[i].type);
+	sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
+			  &xen_e820_map_entries);
 
-	memblock_reserve(__pa(xen_start_info->mfn_list),
-			 xen_start_info->pt_base - xen_start_info->mfn_list);
+	for (i = 0; i < xen_e820_map_entries; i++)
+		e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
+				xen_e820_map[i].type);
+
+	/* Remove p2m info, it is not needed. */
+	xen_start_info->mfn_list = 0;
+	xen_start_info->first_p2m_pfn = 0;
+	xen_start_info->nr_p2m_frames = 0;
 
 	return "Xen";
 }
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8648438..2a9ff73 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -26,6 +26,7 @@
 
 #include <xen/interface/xen.h>
 #include <xen/interface/vcpu.h>
+#include <xen/interface/xenpmu.h>
 
 #include <asm/xen/interface.h>
 #include <asm/xen/hypercall.h>
@@ -38,6 +39,7 @@
 #include "xen-ops.h"
 #include "mmu.h"
 #include "smp.h"
+#include "pmu.h"
 
 cpumask_var_t xen_cpu_initialized_map;
 
@@ -50,6 +52,7 @@
 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
 
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -148,11 +151,18 @@
 		kfree(per_cpu(xen_irq_work, cpu).name);
 		per_cpu(xen_irq_work, cpu).name = NULL;
 	}
+
+	if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
+		unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
+		per_cpu(xen_pmu_irq, cpu).irq = -1;
+		kfree(per_cpu(xen_pmu_irq, cpu).name);
+		per_cpu(xen_pmu_irq, cpu).name = NULL;
+	}
 };
 static int xen_smp_intr_init(unsigned int cpu)
 {
 	int rc;
-	char *resched_name, *callfunc_name, *debug_name;
+	char *resched_name, *callfunc_name, *debug_name, *pmu_name;
 
 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -218,6 +228,18 @@
 	per_cpu(xen_irq_work, cpu).irq = rc;
 	per_cpu(xen_irq_work, cpu).name = callfunc_name;
 
+	if (is_xen_pmu(cpu)) {
+		pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
+		rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
+					     xen_pmu_irq_handler,
+					     IRQF_PERCPU|IRQF_NOBALANCING,
+					     pmu_name, NULL);
+		if (rc < 0)
+			goto fail;
+		per_cpu(xen_pmu_irq, cpu).irq = rc;
+		per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+	}
+
 	return 0;
 
  fail:
@@ -335,6 +357,8 @@
 	}
 	set_cpu_sibling_map(0);
 
+	xen_pmu_init(0);
+
 	if (xen_smp_intr_init(0))
 		BUG();
 
@@ -462,6 +486,8 @@
 	if (rc)
 		return rc;
 
+	xen_pmu_init(cpu);
+
 	rc = xen_smp_intr_init(cpu);
 	if (rc)
 		return rc;
@@ -503,6 +529,7 @@
 		xen_smp_intr_free(cpu);
 		xen_uninit_lock_cpu(cpu);
 		xen_teardown_timer(cpu);
+		xen_pmu_finish(cpu);
 	}
 }
 
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 53b4c08..feddabd 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -11,6 +11,7 @@
 
 #include "xen-ops.h"
 #include "mmu.h"
+#include "pmu.h"
 
 static void xen_pv_pre_suspend(void)
 {
@@ -67,16 +68,26 @@
 
 void xen_arch_pre_suspend(void)
 {
-    if (xen_pv_domain())
-        xen_pv_pre_suspend();
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		xen_pmu_finish(cpu);
+
+	if (xen_pv_domain())
+		xen_pv_pre_suspend();
 }
 
 void xen_arch_post_suspend(int cancelled)
 {
-    if (xen_pv_domain())
-        xen_pv_post_suspend(cancelled);
-    else
-        xen_hvm_post_suspend(cancelled);
+	int cpu;
+
+	if (xen_pv_domain())
+		xen_pv_post_suspend(cancelled);
+	else
+		xen_hvm_post_suspend(cancelled);
+
+	for_each_online_cpu(cpu)
+		xen_pmu_init(cpu);
 }
 
 static void xen_vcpu_notify_restore(void *data)
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 8afdfcc..b65f59a 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -104,6 +104,8 @@
 	ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      _ASM_PTR __PAGE_OFFSET)
 #else
 	ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      _ASM_PTR __START_KERNEL_map)
+	/* Map the p2m table to a 512GB-aligned user address. */
+	ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M,       .quad PGDIR_SIZE)
 #endif
 	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          _ASM_PTR startup_xen)
 	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 2292721..1399423 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -35,13 +35,20 @@
 void xen_setup_machphys_mapping(void);
 void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
 void xen_reserve_top(void);
+void __init xen_reserve_special_pages(void);
+void __init xen_pt_check_e820(void);
 
 void xen_mm_pin_all(void);
 void xen_mm_unpin_all(void);
+#ifdef CONFIG_X86_64
+void __init xen_relocate_p2m(void);
+#endif
 
+bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size);
 unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
 void __init xen_inv_extra_mem(void);
 void __init xen_remap_memory(void);
+phys_addr_t __init xen_find_free_area(phys_addr_t size);
 char * __init xen_memory_setup(void);
 char * xen_auto_xlated_memory_setup(void);
 void __init xen_arch_setup(void);
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index e4d193e..f3dfe0d 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -616,7 +616,6 @@
 # CONFIG_SLUB_DEBUG_ON is not set
 # CONFIG_SLUB_STATS is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index ebcd1f6..93795d0 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -145,6 +145,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -250,75 +254,6 @@
 	return c;
 }
 
-
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-	unsigned long tmp;
-	int result;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       and     %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (~mask), "a" (v)
-			: "memory"
-			);
-#else
-	unsigned int all_f = -1;
-	unsigned int vval;
-
-	__asm__ __volatile__(
-			"       rsil    a15,"__stringify(TOPLEVEL)"\n"
-			"       l32i    %0, %2, 0\n"
-			"       xor     %1, %4, %3\n"
-			"       and     %0, %0, %4\n"
-			"       s32i    %0, %2, 0\n"
-			"       wsr     a15, ps\n"
-			"       rsync\n"
-			: "=&a" (vval), "=a" (mask)
-			: "a" (v), "a" (all_f), "1" (mask)
-			: "a15", "memory"
-			);
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-	unsigned long tmp;
-	int result;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       or      %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (mask), "a" (v)
-			: "memory"
-			);
-#else
-	unsigned int vval;
-
-	__asm__ __volatile__(
-			"       rsil    a15,"__stringify(TOPLEVEL)"\n"
-			"       l32i    %0, %2, 0\n"
-			"       or      %0, %0, %1\n"
-			"       s32i    %0, %2, 0\n"
-			"       wsr     a15, ps\n"
-			"       rsync\n"
-			: "=&a" (vval)
-			: "a" (mask), "a" (v)
-			: "a15", "memory"
-			);
-#endif
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index c39bb6e..867840f 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -57,6 +57,7 @@
 	else
 		BUG();
 }
+#define ioremap_cache ioremap_cache
 
 #define ioremap_wc ioremap_nocache
 #define ioremap_wt ioremap_nocache
diff --git a/block/bounce.c b/block/bounce.c
index 2c310ea..0611aea 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -177,26 +177,8 @@
 	__bounce_end_io_read(bio, isa_page_pool);
 }
 
-#ifdef CONFIG_NEED_BOUNCE_POOL
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
-	if (bio_data_dir(bio) != WRITE)
-		return 0;
-
-	if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
-		return 0;
-
-	return bio_flagged(bio, BIO_SNAP_STABLE);
-}
-#else
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
-	return 0;
-}
-#endif /* CONFIG_NEED_BOUNCE_POOL */
-
 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
-			       mempool_t *pool, int force)
+			       mempool_t *pool)
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
@@ -204,8 +186,6 @@
 	struct bvec_iter iter;
 	unsigned i;
 
-	if (force)
-		goto bounce;
 	bio_for_each_segment(from, *bio_orig, iter)
 		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
 			goto bounce;
@@ -217,7 +197,7 @@
 	bio_for_each_segment_all(to, bio, i) {
 		struct page *page = to->bv_page;
 
-		if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
+		if (page_to_pfn(page) <= queue_bounce_pfn(q))
 			continue;
 
 		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
@@ -255,7 +235,6 @@
 
 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 {
-	int must_bounce;
 	mempool_t *pool;
 
 	/*
@@ -264,15 +243,13 @@
 	if (!bio_has_data(*bio_orig))
 		return;
 
-	must_bounce = must_snapshot_stable_pages(q, *bio_orig);
-
 	/*
 	 * for non-isa bounce case, just check if the bounce pfn is equal
 	 * to or bigger than the highest pfn in the system -- in that case,
 	 * don't waste time iterating over bio segments
 	 */
 	if (!(q->bounce_gfp & GFP_DMA)) {
-		if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
+		if (queue_bounce_pfn(q) >= blk_max_pfn)
 			return;
 		pool = page_pool;
 	} else {
@@ -283,7 +260,7 @@
 	/*
 	 * slow path
 	 */
-	__blk_queue_bounce(q, bio_orig, pool, must_bounce);
+	__blk_queue_bounce(q, bio_orig, pool);
 }
 
 EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/certs/Kconfig b/certs/Kconfig
new file mode 100644
index 0000000..b030b9c
--- /dev/null
+++ b/certs/Kconfig
@@ -0,0 +1,42 @@
+menu "Certificates for signature checking"
+
+config MODULE_SIG_KEY
+	string "File name or PKCS#11 URI of module signing key"
+	default "certs/signing_key.pem"
+	depends on MODULE_SIG
+	help
+         Provide the file name of a private key/certificate in PEM format,
+         or a PKCS#11 URI according to RFC7512. The file should contain, or
+         the URI should identify, both the certificate and its corresponding
+         private key.
+
+         If this option is unchanged from its default "certs/signing_key.pem",
+         then the kernel will automatically generate the private key and
+         certificate as described in Documentation/module-signing.txt
+
+config SYSTEM_TRUSTED_KEYRING
+	bool "Provide system-wide ring of trusted keys"
+	depends on KEYS
+	help
+	  Provide a system keyring to which trusted keys can be added.  Keys in
+	  the keyring are considered to be trusted.  Keys may be added at will
+	  by the kernel from compiled-in data and from hardware key stores, but
+	  userspace may only add extra keys if those keys can be verified by
+	  keys already in the keyring.
+
+	  Keys in this keyring are used by module signature checking.
+
+config SYSTEM_TRUSTED_KEYS
+	string "Additional X.509 keys for default system keyring"
+	depends on SYSTEM_TRUSTED_KEYRING
+	help
+	  If set, this option should be the filename of a PEM-formatted file
+	  containing trusted X.509 certificates to be included in the default
+	  system keyring. Any certificate used for module signing is implicitly
+	  also trusted.
+
+	  NOTE: If you previously provided keys for the system keyring in the
+	  form of DER-encoded *.x509 files in the top-level build directory,
+	  those are no longer used. You will need to set this option instead.
+
+endmenu
diff --git a/certs/Makefile b/certs/Makefile
new file mode 100644
index 0000000..28ac694
--- /dev/null
+++ b/certs/Makefile
@@ -0,0 +1,94 @@
+#
+# Makefile for the linux kernel signature checking certificates.
+#
+
+obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
+
+ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
+
+$(eval $(call config_filename,SYSTEM_TRUSTED_KEYS))
+
+# GCC doesn't include .incbin files in -MD generated dependencies (PR#66871)
+$(obj)/system_certificates.o: $(obj)/x509_certificate_list
+
+# Cope with signing_key.x509 existing in $(srctree) not $(objtree)
+AFLAGS_system_certificates.o := -I$(srctree)
+
+quiet_cmd_extract_certs  = EXTRACT_CERTS   $(patsubst "%",%,$(2))
+      cmd_extract_certs  = scripts/extract-cert $(2) $@ || ( rm $@; exit 1)
+
+targets += x509_certificate_list
+$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
+	$(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
+endif
+
+clean-files := x509_certificate_list .x509.list
+
+ifeq ($(CONFIG_MODULE_SIG),y)
+###############################################################################
+#
+# If module signing is requested, say by allyesconfig, but a key has not been
+# supplied, then one will need to be generated to make sure the build does not
+# fail and that the kernel may be used afterwards.
+#
+###############################################################################
+ifndef CONFIG_MODULE_SIG_HASH
+$(error Could not determine digest type to use from kernel config)
+endif
+
+# We do it this way rather than having a boolean option for enabling an
+# external private key, because 'make randconfig' might enable such a
+# boolean option and we unfortunately can't make it depend on !RANDCONFIG.
+ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
+$(obj)/signing_key.pem: $(obj)/x509.genkey
+	@echo "###"
+	@echo "### Now generating an X.509 key pair to be used for signing modules."
+	@echo "###"
+	@echo "### If this takes a long time, you might wish to run rngd in the"
+	@echo "### background to keep the supply of entropy topped up.  It"
+	@echo "### needs to be run as root, and uses a hardware random"
+	@echo "### number generator if one is available."
+	@echo "###"
+	openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \
+		-batch -x509 -config $(obj)/x509.genkey \
+		-outform PEM -out $(obj)/signing_key.pem \
+		-keyout $(obj)/signing_key.pem 2>&1
+	@echo "###"
+	@echo "### Key pair generated."
+	@echo "###"
+
+$(obj)/x509.genkey:
+	@echo Generating X.509 key generation config
+	@echo  >$@ "[ req ]"
+	@echo >>$@ "default_bits = 4096"
+	@echo >>$@ "distinguished_name = req_distinguished_name"
+	@echo >>$@ "prompt = no"
+	@echo >>$@ "string_mask = utf8only"
+	@echo >>$@ "x509_extensions = myexts"
+	@echo >>$@
+	@echo >>$@ "[ req_distinguished_name ]"
+	@echo >>$@ "#O = Unspecified company"
+	@echo >>$@ "CN = Build time autogenerated kernel key"
+	@echo >>$@ "#emailAddress = unspecified.user@unspecified.company"
+	@echo >>$@
+	@echo >>$@ "[ myexts ]"
+	@echo >>$@ "basicConstraints=critical,CA:FALSE"
+	@echo >>$@ "keyUsage=digitalSignature"
+	@echo >>$@ "subjectKeyIdentifier=hash"
+	@echo >>$@ "authorityKeyIdentifier=keyid"
+endif
+
+$(eval $(call config_filename,MODULE_SIG_KEY))
+
+# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it
+ifeq ($(patsubst pkcs11:%,%,$(firstword $(MODULE_SIG_KEY_FILENAME))),$(firstword $(MODULE_SIG_KEY_FILENAME)))
+X509_DEP := $(MODULE_SIG_KEY_SRCPREFIX)$(MODULE_SIG_KEY_FILENAME)
+endif
+
+# GCC PR#66871 again.
+$(obj)/system_certificates.o: $(obj)/signing_key.x509
+
+targets += signing_key.x509
+$(obj)/signing_key.x509: scripts/extract-cert $(X509_DEP) FORCE
+	$(call if_changed,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY))
+endif
diff --git a/certs/system_certificates.S b/certs/system_certificates.S
new file mode 100644
index 0000000..9216e8c
--- /dev/null
+++ b/certs/system_certificates.S
@@ -0,0 +1,23 @@
+#include <linux/export.h>
+#include <linux/init.h>
+
+	__INITRODATA
+
+	.align 8
+	.globl VMLINUX_SYMBOL(system_certificate_list)
+VMLINUX_SYMBOL(system_certificate_list):
+__cert_list_start:
+#ifdef CONFIG_MODULE_SIG
+	.incbin "certs/signing_key.x509"
+#endif
+	.incbin "certs/x509_certificate_list"
+__cert_list_end:
+
+	.align 8
+	.globl VMLINUX_SYMBOL(system_certificate_list_size)
+VMLINUX_SYMBOL(system_certificate_list_size):
+#ifdef CONFIG_64BIT
+	.quad __cert_list_end - __cert_list_start
+#else
+	.long __cert_list_end - __cert_list_start
+#endif
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
new file mode 100644
index 0000000..2570598
--- /dev/null
+++ b/certs/system_keyring.c
@@ -0,0 +1,157 @@
+/* System trusted keyring for trusted public keys
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include <crypto/pkcs7.h>
+
+struct key *system_trusted_keyring;
+EXPORT_SYMBOL_GPL(system_trusted_keyring);
+
+extern __initconst const u8 system_certificate_list[];
+extern __initconst const unsigned long system_certificate_list_size;
+
+/*
+ * Load the compiled-in keys
+ */
+static __init int system_trusted_keyring_init(void)
+{
+	pr_notice("Initialise system trusted keyring\n");
+
+	system_trusted_keyring =
+		keyring_alloc(".system_keyring",
+			      KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+			      ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+			      KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
+			      KEY_ALLOC_NOT_IN_QUOTA, NULL);
+	if (IS_ERR(system_trusted_keyring))
+		panic("Can't allocate system trusted keyring\n");
+
+	set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags);
+	return 0;
+}
+
+/*
+ * Must be initialised before we try and load the keys into the keyring.
+ */
+device_initcall(system_trusted_keyring_init);
+
+/*
+ * Load the compiled-in list of X.509 certificates.
+ */
+static __init int load_system_certificate_list(void)
+{
+	key_ref_t key;
+	const u8 *p, *end;
+	size_t plen;
+
+	pr_notice("Loading compiled-in X.509 certificates\n");
+
+	p = system_certificate_list;
+	end = p + system_certificate_list_size;
+	while (p < end) {
+		/* Each cert begins with an ASN.1 SEQUENCE tag and must be more
+		 * than 256 bytes in size.
+		 */
+		if (end - p < 4)
+			goto dodgy_cert;
+		if (p[0] != 0x30 &&
+		    p[1] != 0x82)
+			goto dodgy_cert;
+		plen = (p[2] << 8) | p[3];
+		plen += 4;
+		if (plen > end - p)
+			goto dodgy_cert;
+
+		key = key_create_or_update(make_key_ref(system_trusted_keyring, 1),
+					   "asymmetric",
+					   NULL,
+					   p,
+					   plen,
+					   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+					   KEY_USR_VIEW | KEY_USR_READ),
+					   KEY_ALLOC_NOT_IN_QUOTA |
+					   KEY_ALLOC_TRUSTED);
+		if (IS_ERR(key)) {
+			pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
+			       PTR_ERR(key));
+		} else {
+			set_bit(KEY_FLAG_BUILTIN, &key_ref_to_ptr(key)->flags);
+			pr_notice("Loaded X.509 cert '%s'\n",
+				  key_ref_to_ptr(key)->description);
+			key_ref_put(key);
+		}
+		p += plen;
+	}
+
+	return 0;
+
+dodgy_cert:
+	pr_err("Problem parsing in-kernel X.509 certificate list\n");
+	return 0;
+}
+late_initcall(load_system_certificate_list);
+
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+
+/**
+ * Verify a PKCS#7-based signature on system data.
+ * @data: The data to be verified.
+ * @len: Size of @data.
+ * @raw_pkcs7: The PKCS#7 message that is the signature.
+ * @pkcs7_len: The size of @raw_pkcs7.
+ * @usage: The use to which the key is being put.
+ */
+int system_verify_data(const void *data, unsigned long len,
+		       const void *raw_pkcs7, size_t pkcs7_len,
+		       enum key_being_used_for usage)
+{
+	struct pkcs7_message *pkcs7;
+	bool trusted;
+	int ret;
+
+	pkcs7 = pkcs7_parse_message(raw_pkcs7, pkcs7_len);
+	if (IS_ERR(pkcs7))
+		return PTR_ERR(pkcs7);
+
+	/* The data should be detached - so we need to supply it. */
+	if (pkcs7_supply_detached_data(pkcs7, data, len) < 0) {
+		pr_err("PKCS#7 signature with non-detached data\n");
+		ret = -EBADMSG;
+		goto error;
+	}
+
+	ret = pkcs7_verify(pkcs7, usage);
+	if (ret < 0)
+		goto error;
+
+	ret = pkcs7_validate_trust(pkcs7, system_trusted_keyring, &trusted);
+	if (ret < 0)
+		goto error;
+
+	if (!trusted) {
+		pr_err("PKCS#7 signature not signed with a trusted key\n");
+		ret = -ENOKEY;
+	}
+
+error:
+	pkcs7_free_message(pkcs7);
+	pr_devel("<==%s() = %d\n", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(system_verify_data);
+
+#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
diff --git a/crypto/Kconfig b/crypto/Kconfig
index b582ea7..48ee3e1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1635,5 +1635,6 @@
 
 source "drivers/crypto/Kconfig"
 source crypto/asymmetric_keys/Kconfig
+source certs/Kconfig
 
 endif	# if CRYPTO
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index e47fcd9..cd1406f 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -15,15 +15,21 @@
 obj-$(CONFIG_X509_CERTIFICATE_PARSER) += x509_key_parser.o
 x509_key_parser-y := \
 	x509-asn1.o \
+	x509_akid-asn1.o \
 	x509_rsakey-asn1.o \
 	x509_cert_parser.o \
 	x509_public_key.o
 
-$(obj)/x509_cert_parser.o: $(obj)/x509-asn1.h $(obj)/x509_rsakey-asn1.h
+$(obj)/x509_cert_parser.o: \
+	$(obj)/x509-asn1.h \
+	$(obj)/x509_akid-asn1.h \
+	$(obj)/x509_rsakey-asn1.h
 $(obj)/x509-asn1.o: $(obj)/x509-asn1.c $(obj)/x509-asn1.h
+$(obj)/x509_akid-asn1.o: $(obj)/x509_akid-asn1.c $(obj)/x509_akid-asn1.h
 $(obj)/x509_rsakey-asn1.o: $(obj)/x509_rsakey-asn1.c $(obj)/x509_rsakey-asn1.h
 
 clean-files	+= x509-asn1.c x509-asn1.h
+clean-files	+= x509_akid-asn1.c x509_akid-asn1.h
 clean-files	+= x509_rsakey-asn1.c x509_rsakey-asn1.h
 
 #
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index b0e4ed2..1916680 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -12,6 +12,7 @@
  */
 #include <keys/asymmetric-subtype.h>
 #include <keys/asymmetric-parser.h>
+#include <crypto/public_key.h>
 #include <linux/seq_file.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -20,6 +21,16 @@
 
 MODULE_LICENSE("GPL");
 
+const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = {
+	[VERIFYING_MODULE_SIGNATURE]		= "mod sig",
+	[VERIFYING_FIRMWARE_SIGNATURE]		= "firmware sig",
+	[VERIFYING_KEXEC_PE_SIGNATURE]		= "kexec PE sig",
+	[VERIFYING_KEY_SIGNATURE]		= "key sig",
+	[VERIFYING_KEY_SELF_SIGNATURE]		= "key self sig",
+	[VERIFYING_UNSPECIFIED_SIGNATURE]	= "unspec sig",
+};
+EXPORT_SYMBOL_GPL(key_being_used_for);
+
 static LIST_HEAD(asymmetric_key_parsers);
 static DECLARE_RWSEM(asymmetric_key_parsers_sem);
 
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 214a992..adcef59 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -97,6 +97,15 @@
 	case OID_sha256:
 		ctx->digest_algo = HASH_ALGO_SHA256;
 		break;
+	case OID_sha384:
+		ctx->digest_algo = HASH_ALGO_SHA384;
+		break;
+	case OID_sha512:
+		ctx->digest_algo = HASH_ALGO_SHA512;
+		break;
+	case OID_sha224:
+		ctx->digest_algo = HASH_ALGO_SHA224;
+		break;
 
 	case OID__NR:
 		sprint_oid(value, vlen, buffer, sizeof(buffer));
diff --git a/crypto/asymmetric_keys/pkcs7.asn1 b/crypto/asymmetric_keys/pkcs7.asn1
index a5a14ef..1eca740 100644
--- a/crypto/asymmetric_keys/pkcs7.asn1
+++ b/crypto/asymmetric_keys/pkcs7.asn1
@@ -1,14 +1,14 @@
 PKCS7ContentInfo ::= SEQUENCE {
-	contentType	ContentType,
+	contentType	ContentType ({ pkcs7_check_content_type }),
 	content		[0] EXPLICIT SignedData OPTIONAL
 }
 
 ContentType ::= OBJECT IDENTIFIER ({ pkcs7_note_OID })
 
 SignedData ::= SEQUENCE {
-	version			INTEGER,
+	version			INTEGER ({ pkcs7_note_signeddata_version }),
 	digestAlgorithms	DigestAlgorithmIdentifiers,
-	contentInfo		ContentInfo,
+	contentInfo		ContentInfo ({ pkcs7_note_content }),
 	certificates		CHOICE {
 		certSet		[0] IMPLICIT ExtendedCertificatesAndCertificates,
 		certSequence	[2] IMPLICIT Certificates
@@ -21,7 +21,7 @@
 }
 
 ContentInfo ::= SEQUENCE {
-	contentType	ContentType,
+	contentType	ContentType ({ pkcs7_note_OID }),
 	content		[0] EXPLICIT Data OPTIONAL
 }
 
@@ -68,8 +68,8 @@
 }
 
 SignerInfo ::= SEQUENCE {
-	version			INTEGER,
-	issuerAndSerialNumber	IssuerAndSerialNumber,
+	version			INTEGER ({ pkcs7_note_signerinfo_version }),
+	sid			SignerIdentifier, -- CMS variant, not PKCS#7
 	digestAlgorithm		DigestAlgorithmIdentifier ({ pkcs7_sig_note_digest_algo }),
 	authenticatedAttributes	CHOICE {
 		aaSet		[0] IMPLICIT SetOfAuthenticatedAttribute
@@ -88,6 +88,12 @@
 	} OPTIONAL
 } ({ pkcs7_note_signed_info })
 
+SignerIdentifier ::= CHOICE {
+	-- RFC5652 sec 5.3
+	issuerAndSerialNumber IssuerAndSerialNumber,
+        subjectKeyIdentifier [0] IMPLICIT SubjectKeyIdentifier
+}
+
 IssuerAndSerialNumber ::= SEQUENCE {
 	issuer			Name ({ pkcs7_sig_note_issuer }),
 	serialNumber		CertificateSerialNumber ({ pkcs7_sig_note_serial })
@@ -95,6 +101,8 @@
 
 CertificateSerialNumber ::= INTEGER
 
+SubjectKeyIdentifier ::= OCTET STRING ({ pkcs7_sig_note_skid })
+
 SetOfAuthenticatedAttribute ::= SET OF AuthenticatedAttribute
 
 AuthenticatedAttribute ::= SEQUENCE {
@@ -103,7 +111,7 @@
 }
 
 UnauthenticatedAttribute ::= SEQUENCE {
-	type			OBJECT IDENTIFIER ({ pkcs7_note_OID }),
+	type			OBJECT IDENTIFIER,
 	values			SET OF ANY
 }
 
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index 3d13b04..e2d0edb 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -14,16 +14,26 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/key-type.h>
+#include <keys/asymmetric-type.h>
 #include <crypto/pkcs7.h>
 #include <keys/user-type.h>
 #include <keys/system_keyring.h>
 #include "pkcs7_parser.h"
 
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PKCS#7 testing key type");
+
+static unsigned pkcs7_usage;
+module_param_named(usage, pkcs7_usage, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(pkcs7_usage,
+		 "Usage to specify when verifying the PKCS#7 message");
+
 /*
  * Preparse a PKCS#7 wrapped and validated data blob.
  */
 static int pkcs7_preparse(struct key_preparsed_payload *prep)
 {
+	enum key_being_used_for usage = pkcs7_usage;
 	struct pkcs7_message *pkcs7;
 	const void *data, *saved_prep_data;
 	size_t datalen, saved_prep_datalen;
@@ -32,6 +42,11 @@
 
 	kenter("");
 
+	if (usage >= NR__KEY_BEING_USED_FOR) {
+		pr_err("Invalid usage type %d\n", usage);
+		return -EINVAL;
+	}
+
 	saved_prep_data = prep->data;
 	saved_prep_datalen = prep->datalen;
 	pkcs7 = pkcs7_parse_message(saved_prep_data, saved_prep_datalen);
@@ -40,7 +55,7 @@
 		goto error;
 	}
 
-	ret = pkcs7_verify(pkcs7);
+	ret = pkcs7_verify(pkcs7, usage);
 	if (ret < 0)
 		goto error_free;
 
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 3bd5a1e..758acab 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -33,6 +33,9 @@
 	unsigned	raw_serial_size;
 	unsigned	raw_issuer_size;
 	const void	*raw_issuer;
+	const void	*raw_skid;
+	unsigned	raw_skid_size;
+	bool		expect_skid;
 };
 
 /*
@@ -78,6 +81,30 @@
 }
 EXPORT_SYMBOL_GPL(pkcs7_free_message);
 
+/*
+ * Check authenticatedAttributes are provided or not provided consistently.
+ */
+static int pkcs7_check_authattrs(struct pkcs7_message *msg)
+{
+	struct pkcs7_signed_info *sinfo;
+	bool want;
+
+	sinfo = msg->signed_infos;
+	if (sinfo->authattrs) {
+		want = true;
+		msg->have_authattrs = true;
+	}
+
+	for (sinfo = sinfo->next; sinfo; sinfo = sinfo->next)
+		if (!!sinfo->authattrs != want)
+			goto inconsistent;
+	return 0;
+
+inconsistent:
+	pr_warn("Inconsistently supplied authAttrs\n");
+	return -EINVAL;
+}
+
 /**
  * pkcs7_parse_message - Parse a PKCS#7 message
  * @data: The raw binary ASN.1 encoded message to be parsed
@@ -110,6 +137,10 @@
 		goto out;
 	}
 
+	ret = pkcs7_check_authattrs(ctx->msg);
+	if (ret < 0)
+		goto out;
+
 	msg = ctx->msg;
 	ctx->msg = NULL;
 
@@ -198,6 +229,14 @@
 	case OID_sha256:
 		ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA256;
 		break;
+	case OID_sha384:
+		ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA384;
+		break;
+	case OID_sha512:
+		ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA512;
+		break;
+	case OID_sha224:
+		ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA224;
 	default:
 		printk("Unsupported digest algo: %u\n", ctx->last_oid);
 		return -ENOPKG;
@@ -226,6 +265,100 @@
 }
 
 /*
+ * We only support signed data [RFC2315 sec 9].
+ */
+int pkcs7_check_content_type(void *context, size_t hdrlen,
+			     unsigned char tag,
+			     const void *value, size_t vlen)
+{
+	struct pkcs7_parse_context *ctx = context;
+
+	if (ctx->last_oid != OID_signed_data) {
+		pr_warn("Only support pkcs7_signedData type\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Note the SignedData version
+ */
+int pkcs7_note_signeddata_version(void *context, size_t hdrlen,
+				  unsigned char tag,
+				  const void *value, size_t vlen)
+{
+	struct pkcs7_parse_context *ctx = context;
+	unsigned version;
+
+	if (vlen != 1)
+		goto unsupported;
+
+	ctx->msg->version = version = *(const u8 *)value;
+	switch (version) {
+	case 1:
+		/* PKCS#7 SignedData [RFC2315 sec 9.1]
+		 * CMS ver 1 SignedData [RFC5652 sec 5.1]
+		 */
+		break;
+	case 3:
+		/* CMS ver 3 SignedData [RFC2315 sec 5.1] */
+		break;
+	default:
+		goto unsupported;
+	}
+
+	return 0;
+
+unsupported:
+	pr_warn("Unsupported SignedData version\n");
+	return -EINVAL;
+}
+
+/*
+ * Note the SignerInfo version
+ */
+int pkcs7_note_signerinfo_version(void *context, size_t hdrlen,
+				  unsigned char tag,
+				  const void *value, size_t vlen)
+{
+	struct pkcs7_parse_context *ctx = context;
+	unsigned version;
+
+	if (vlen != 1)
+		goto unsupported;
+
+	version = *(const u8 *)value;
+	switch (version) {
+	case 1:
+		/* PKCS#7 SignerInfo [RFC2315 sec 9.2]
+		 * CMS ver 1 SignerInfo [RFC5652 sec 5.3]
+		 */
+		if (ctx->msg->version != 1)
+			goto version_mismatch;
+		ctx->expect_skid = false;
+		break;
+	case 3:
+		/* CMS ver 3 SignerInfo [RFC2315 sec 5.3] */
+		if (ctx->msg->version == 1)
+			goto version_mismatch;
+		ctx->expect_skid = true;
+		break;
+	default:
+		goto unsupported;
+	}
+
+	return 0;
+
+unsupported:
+	pr_warn("Unsupported SignerInfo version\n");
+	return -EINVAL;
+version_mismatch:
+	pr_warn("SignedData-SignerInfo version mismatch\n");
+	return -EBADMSG;
+}
+
+/*
  * Extract a certificate and store it in the context.
  */
 int pkcs7_extract_cert(void *context, size_t hdrlen,
@@ -284,6 +417,25 @@
 }
 
 /*
+ * Note the content type.
+ */
+int pkcs7_note_content(void *context, size_t hdrlen,
+		       unsigned char tag,
+		       const void *value, size_t vlen)
+{
+	struct pkcs7_parse_context *ctx = context;
+
+	if (ctx->last_oid != OID_data &&
+	    ctx->last_oid != OID_msIndirectData) {
+		pr_warn("Unsupported data type %d\n", ctx->last_oid);
+		return -EINVAL;
+	}
+
+	ctx->msg->data_type = ctx->last_oid;
+	return 0;
+}
+
+/*
  * Extract the data from the message and store that and its content type OID in
  * the context.
  */
@@ -298,45 +450,119 @@
 	ctx->msg->data = value;
 	ctx->msg->data_len = vlen;
 	ctx->msg->data_hdrlen = hdrlen;
-	ctx->msg->data_type = ctx->last_oid;
 	return 0;
 }
 
 /*
- * Parse authenticated attributes
+ * Parse authenticated attributes.
  */
 int pkcs7_sig_note_authenticated_attr(void *context, size_t hdrlen,
 				      unsigned char tag,
 				      const void *value, size_t vlen)
 {
 	struct pkcs7_parse_context *ctx = context;
+	struct pkcs7_signed_info *sinfo = ctx->sinfo;
+	enum OID content_type;
 
 	pr_devel("AuthAttr: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value);
 
 	switch (ctx->last_oid) {
+	case OID_contentType:
+		if (__test_and_set_bit(sinfo_has_content_type, &sinfo->aa_set))
+			goto repeated;
+		content_type = look_up_OID(value, vlen);
+		if (content_type != ctx->msg->data_type) {
+			pr_warn("Mismatch between global data type (%d) and sinfo %u (%d)\n",
+				ctx->msg->data_type, sinfo->index,
+				content_type);
+			return -EBADMSG;
+		}
+		return 0;
+
+	case OID_signingTime:
+		if (__test_and_set_bit(sinfo_has_signing_time, &sinfo->aa_set))
+			goto repeated;
+		/* Should we check that the signing time is consistent
+		 * with the signer's X.509 cert?
+		 */
+		return x509_decode_time(&sinfo->signing_time,
+					hdrlen, tag, value, vlen);
+
 	case OID_messageDigest:
+		if (__test_and_set_bit(sinfo_has_message_digest, &sinfo->aa_set))
+			goto repeated;
 		if (tag != ASN1_OTS)
 			return -EBADMSG;
-		ctx->sinfo->msgdigest = value;
-		ctx->sinfo->msgdigest_len = vlen;
+		sinfo->msgdigest = value;
+		sinfo->msgdigest_len = vlen;
+		return 0;
+
+	case OID_smimeCapabilites:
+		if (__test_and_set_bit(sinfo_has_smime_caps, &sinfo->aa_set))
+			goto repeated;
+		if (ctx->msg->data_type != OID_msIndirectData) {
+			pr_warn("S/MIME Caps only allowed with Authenticode\n");
+			return -EKEYREJECTED;
+		}
+		return 0;
+
+		/* Microsoft SpOpusInfo seems to be contain cont[0] 16-bit BE
+		 * char URLs and cont[1] 8-bit char URLs.
+		 *
+		 * Microsoft StatementType seems to contain a list of OIDs that
+		 * are also used as extendedKeyUsage types in X.509 certs.
+		 */
+	case OID_msSpOpusInfo:
+		if (__test_and_set_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))
+			goto repeated;
+		goto authenticode_check;
+	case OID_msStatementType:
+		if (__test_and_set_bit(sinfo_has_ms_statement_type, &sinfo->aa_set))
+			goto repeated;
+	authenticode_check:
+		if (ctx->msg->data_type != OID_msIndirectData) {
+			pr_warn("Authenticode AuthAttrs only allowed with Authenticode\n");
+			return -EKEYREJECTED;
+		}
+		/* I'm not sure how to validate these */
 		return 0;
 	default:
 		return 0;
 	}
+
+repeated:
+	/* We permit max one item per AuthenticatedAttribute and no repeats */
+	pr_warn("Repeated/multivalue AuthAttrs not permitted\n");
+	return -EKEYREJECTED;
 }
 
 /*
- * Note the set of auth attributes for digestion purposes [RFC2315 9.3]
+ * Note the set of auth attributes for digestion purposes [RFC2315 sec 9.3]
  */
 int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen,
 				    unsigned char tag,
 				    const void *value, size_t vlen)
 {
 	struct pkcs7_parse_context *ctx = context;
+	struct pkcs7_signed_info *sinfo = ctx->sinfo;
+
+	if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) ||
+	    !test_bit(sinfo_has_message_digest, &sinfo->aa_set) ||
+	    (ctx->msg->data_type == OID_msIndirectData &&
+	     !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) {
+		pr_warn("Missing required AuthAttr\n");
+		return -EBADMSG;
+	}
+
+	if (ctx->msg->data_type != OID_msIndirectData &&
+	    test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set)) {
+		pr_warn("Unexpected Authenticode AuthAttr\n");
+		return -EBADMSG;
+	}
 
 	/* We need to switch the 'CONT 0' to a 'SET OF' when we digest */
-	ctx->sinfo->authattrs = value - (hdrlen - 1);
-	ctx->sinfo->authattrs_len = vlen + (hdrlen - 1);
+	sinfo->authattrs = value - (hdrlen - 1);
+	sinfo->authattrs_len = vlen + (hdrlen - 1);
 	return 0;
 }
 
@@ -367,6 +593,22 @@
 }
 
 /*
+ * Note the issuing cert's subjectKeyIdentifier
+ */
+int pkcs7_sig_note_skid(void *context, size_t hdrlen,
+			unsigned char tag,
+			const void *value, size_t vlen)
+{
+	struct pkcs7_parse_context *ctx = context;
+
+	pr_devel("SKID: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value);
+
+	ctx->raw_skid = value;
+	ctx->raw_skid_size = vlen;
+	return 0;
+}
+
+/*
  * Note the signature data
  */
 int pkcs7_sig_note_signature(void *context, size_t hdrlen,
@@ -398,14 +640,27 @@
 	struct pkcs7_signed_info *sinfo = ctx->sinfo;
 	struct asymmetric_key_id *kid;
 
+	if (ctx->msg->data_type == OID_msIndirectData && !sinfo->authattrs) {
+		pr_warn("Authenticode requires AuthAttrs\n");
+		return -EBADMSG;
+	}
+
 	/* Generate cert issuer + serial number key ID */
-	kid = asymmetric_key_generate_id(ctx->raw_serial,
-					 ctx->raw_serial_size,
-					 ctx->raw_issuer,
-					 ctx->raw_issuer_size);
+	if (!ctx->expect_skid) {
+		kid = asymmetric_key_generate_id(ctx->raw_serial,
+						 ctx->raw_serial_size,
+						 ctx->raw_issuer,
+						 ctx->raw_issuer_size);
+	} else {
+		kid = asymmetric_key_generate_id(ctx->raw_skid,
+						 ctx->raw_skid_size,
+						 "", 0);
+	}
 	if (IS_ERR(kid))
 		return PTR_ERR(kid);
 
+	pr_devel("SINFO KID: %u [%*phN]\n", kid->len, kid->len, kid->data);
+
 	sinfo->signing_cert_id = kid;
 	sinfo->index = ++ctx->sinfo_index;
 	*ctx->ppsinfo = sinfo;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.h b/crypto/asymmetric_keys/pkcs7_parser.h
index efc7dc9..a66b19e 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.h
+++ b/crypto/asymmetric_keys/pkcs7_parser.h
@@ -21,9 +21,9 @@
 struct pkcs7_signed_info {
 	struct pkcs7_signed_info *next;
 	struct x509_certificate *signer; /* Signing certificate (in msg->certs) */
-	unsigned index;
-	bool trusted;
-	bool unsupported_crypto;	/* T if not usable due to missing crypto */
+	unsigned	index;
+	bool		trusted;
+	bool		unsupported_crypto;	/* T if not usable due to missing crypto */
 
 	/* Message digest - the digest of the Content Data (or NULL) */
 	const void	*msgdigest;
@@ -32,8 +32,18 @@
 	/* Authenticated Attribute data (or NULL) */
 	unsigned	authattrs_len;
 	const void	*authattrs;
+	unsigned long	aa_set;
+#define	sinfo_has_content_type		0
+#define	sinfo_has_signing_time		1
+#define	sinfo_has_message_digest	2
+#define sinfo_has_smime_caps		3
+#define	sinfo_has_ms_opus_info		4
+#define	sinfo_has_ms_statement_type	5
+	time64_t	signing_time;
 
-	/* Issuing cert serial number and issuer's name */
+	/* Issuing cert serial number and issuer's name [PKCS#7 or CMS ver 1]
+	 * or issuing cert's SKID [CMS ver 3].
+	 */
 	struct asymmetric_key_id *signing_cert_id;
 
 	/* Message signature.
@@ -50,6 +60,8 @@
 	struct x509_certificate *certs;	/* Certificate list */
 	struct x509_certificate *crl;	/* Revocation list */
 	struct pkcs7_signed_info *signed_infos;
+	u8		version;	/* Version of cert (1 -> PKCS#7 or CMS; 3 -> CMS) */
+	bool		have_authattrs;	/* T if have authattrs */
 
 	/* Content Data (or NULL) */
 	enum OID	data_type;	/* Type of Data */
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 1d29376..90d6d47 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -54,7 +54,8 @@
 		/* Look to see if this certificate is present in the trusted
 		 * keys.
 		 */
-		key = x509_request_asymmetric_key(trust_keyring, x509->id,
+		key = x509_request_asymmetric_key(trust_keyring,
+						  x509->id, x509->skid,
 						  false);
 		if (!IS_ERR(key)) {
 			/* One of the X.509 certificates in the PKCS#7 message
@@ -85,8 +86,10 @@
 	/* No match - see if the root certificate has a signer amongst the
 	 * trusted keys.
 	 */
-	if (last && last->authority) {
-		key = x509_request_asymmetric_key(trust_keyring, last->authority,
+	if (last && (last->akid_id || last->akid_skid)) {
+		key = x509_request_asymmetric_key(trust_keyring,
+						  last->akid_id,
+						  last->akid_skid,
 						  false);
 		if (!IS_ERR(key)) {
 			x509 = last;
@@ -103,6 +106,7 @@
 	 */
 	key = x509_request_asymmetric_key(trust_keyring,
 					  sinfo->signing_cert_id,
+					  NULL,
 					  false);
 	if (!IS_ERR(key)) {
 		pr_devel("sinfo %u: Direct signer is key %x\n",
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index cd45545..d20c0b4 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -70,9 +70,15 @@
 	 * message digest attribute amongst them which corresponds to the
 	 * digest we just calculated.
 	 */
-	if (sinfo->msgdigest) {
+	if (sinfo->authattrs) {
 		u8 tag;
 
+		if (!sinfo->msgdigest) {
+			pr_warn("Sig %u: No messageDigest\n", sinfo->index);
+			ret = -EKEYREJECTED;
+			goto error;
+		}
+
 		if (sinfo->msgdigest_len != sinfo->sig.digest_size) {
 			pr_debug("Sig %u: Invalid digest size (%u)\n",
 				 sinfo->index, sinfo->msgdigest_len);
@@ -170,6 +176,7 @@
 				  struct pkcs7_signed_info *sinfo)
 {
 	struct x509_certificate *x509 = sinfo->signer, *p;
+	struct asymmetric_key_id *auth;
 	int ret;
 
 	kenter("");
@@ -187,11 +194,14 @@
 			goto maybe_missing_crypto_in_x509;
 
 		pr_debug("- issuer %s\n", x509->issuer);
-		if (x509->authority)
-			pr_debug("- authkeyid %*phN\n",
-				 x509->authority->len, x509->authority->data);
+		if (x509->akid_id)
+			pr_debug("- authkeyid.id %*phN\n",
+				 x509->akid_id->len, x509->akid_id->data);
+		if (x509->akid_skid)
+			pr_debug("- authkeyid.skid %*phN\n",
+				 x509->akid_skid->len, x509->akid_skid->data);
 
-		if (!x509->authority ||
+		if ((!x509->akid_id && !x509->akid_skid) ||
 		    strcmp(x509->subject, x509->issuer) == 0) {
 			/* If there's no authority certificate specified, then
 			 * the certificate must be self-signed and is the root
@@ -215,21 +225,42 @@
 		/* Look through the X.509 certificates in the PKCS#7 message's
 		 * list to see if the next one is there.
 		 */
-		pr_debug("- want %*phN\n",
-			 x509->authority->len, x509->authority->data);
-		for (p = pkcs7->certs; p; p = p->next) {
-			if (!p->skid)
-				continue;
-			pr_debug("- cmp [%u] %*phN\n",
-				 p->index, p->skid->len, p->skid->data);
-			if (asymmetric_key_id_same(p->skid, x509->authority))
-				goto found_issuer;
+		auth = x509->akid_id;
+		if (auth) {
+			pr_debug("- want %*phN\n", auth->len, auth->data);
+			for (p = pkcs7->certs; p; p = p->next) {
+				pr_debug("- cmp [%u] %*phN\n",
+					 p->index, p->id->len, p->id->data);
+				if (asymmetric_key_id_same(p->id, auth))
+					goto found_issuer_check_skid;
+			}
+		} else {
+			auth = x509->akid_skid;
+			pr_debug("- want %*phN\n", auth->len, auth->data);
+			for (p = pkcs7->certs; p; p = p->next) {
+				if (!p->skid)
+					continue;
+				pr_debug("- cmp [%u] %*phN\n",
+					 p->index, p->skid->len, p->skid->data);
+				if (asymmetric_key_id_same(p->skid, auth))
+					goto found_issuer;
+			}
 		}
 
 		/* We didn't find the root of this chain */
 		pr_debug("- top\n");
 		return 0;
 
+	found_issuer_check_skid:
+		/* We matched issuer + serialNumber, but if there's an
+		 * authKeyId.keyId, that must match the CA subjKeyId also.
+		 */
+		if (x509->akid_skid &&
+		    !asymmetric_key_id_same(p->skid, x509->akid_skid)) {
+			pr_warn("Sig %u: X.509 chain contains auth-skid nonmatch (%u->%u)\n",
+				sinfo->index, x509->index, p->index);
+			return -EKEYREJECTED;
+		}
 	found_issuer:
 		pr_debug("- subject %s\n", p->subject);
 		if (p->seen) {
@@ -289,6 +320,18 @@
 	pr_devel("Using X.509[%u] for sig %u\n",
 		 sinfo->signer->index, sinfo->index);
 
+	/* Check that the PKCS#7 signing time is valid according to the X.509
+	 * certificate.  We can't, however, check against the system clock
+	 * since that may not have been set yet and may be wrong.
+	 */
+	if (test_bit(sinfo_has_signing_time, &sinfo->aa_set)) {
+		if (sinfo->signing_time < sinfo->signer->valid_from ||
+		    sinfo->signing_time > sinfo->signer->valid_to) {
+			pr_warn("Message signed outside of X.509 validity window\n");
+			return -EKEYREJECTED;
+		}
+	}
+
 	/* Verify the PKCS#7 binary against the key */
 	ret = public_key_verify_signature(sinfo->signer->pub, &sinfo->sig);
 	if (ret < 0)
@@ -303,6 +346,7 @@
 /**
  * pkcs7_verify - Verify a PKCS#7 message
  * @pkcs7: The PKCS#7 message to be verified
+ * @usage: The use to which the key is being put
  *
  * Verify a PKCS#7 message is internally consistent - that is, the data digest
  * matches the digest in the AuthAttrs and any signature in the message or one
@@ -314,6 +358,9 @@
  *
  * Returns, in order of descending priority:
  *
+ *  (*) -EKEYREJECTED if a key was selected that had a usage restriction at
+ *      odds with the specified usage, or:
+ *
  *  (*) -EKEYREJECTED if a signature failed to match for which we found an
  *	appropriate X.509 certificate, or:
  *
@@ -325,7 +372,8 @@
  *  (*) 0 if all the signature chains that don't incur -ENOPKG can be verified
  *	(note that a signature chain may be of zero length), or:
  */
-int pkcs7_verify(struct pkcs7_message *pkcs7)
+int pkcs7_verify(struct pkcs7_message *pkcs7,
+		 enum key_being_used_for usage)
 {
 	struct pkcs7_signed_info *sinfo;
 	struct x509_certificate *x509;
@@ -334,12 +382,48 @@
 
 	kenter("");
 
+	switch (usage) {
+	case VERIFYING_MODULE_SIGNATURE:
+		if (pkcs7->data_type != OID_data) {
+			pr_warn("Invalid module sig (not pkcs7-data)\n");
+			return -EKEYREJECTED;
+		}
+		if (pkcs7->have_authattrs) {
+			pr_warn("Invalid module sig (has authattrs)\n");
+			return -EKEYREJECTED;
+		}
+		break;
+	case VERIFYING_FIRMWARE_SIGNATURE:
+		if (pkcs7->data_type != OID_data) {
+			pr_warn("Invalid firmware sig (not pkcs7-data)\n");
+			return -EKEYREJECTED;
+		}
+		if (!pkcs7->have_authattrs) {
+			pr_warn("Invalid firmware sig (missing authattrs)\n");
+			return -EKEYREJECTED;
+		}
+		break;
+	case VERIFYING_KEXEC_PE_SIGNATURE:
+		if (pkcs7->data_type != OID_msIndirectData) {
+			pr_warn("Invalid kexec sig (not Authenticode)\n");
+			return -EKEYREJECTED;
+		}
+		/* Authattr presence checked in parser */
+		break;
+	case VERIFYING_UNSPECIFIED_SIGNATURE:
+		if (pkcs7->data_type != OID_data) {
+			pr_warn("Invalid unspecified sig (not pkcs7-data)\n");
+			return -EKEYREJECTED;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	for (n = 0, x509 = pkcs7->certs; x509; x509 = x509->next, n++) {
 		ret = x509_get_sig_params(x509);
 		if (ret < 0)
 			return ret;
-		pr_debug("X.509[%u] %*phN\n",
-			 n, x509->authority->len, x509->authority->data);
 	}
 
 	for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
@@ -359,3 +443,28 @@
 	return enopkg;
 }
 EXPORT_SYMBOL_GPL(pkcs7_verify);
+
+/**
+ * pkcs7_supply_detached_data - Supply the data needed to verify a PKCS#7 message
+ * @pkcs7: The PKCS#7 message
+ * @data: The data to be verified
+ * @datalen: The amount of data
+ *
+ * Supply the detached data needed to verify a PKCS#7 message.  Note that no
+ * attempt to retain/pin the data is made.  That is left to the caller.  The
+ * data will not be modified by pkcs7_verify() and will not be freed when the
+ * PKCS#7 message is freed.
+ *
+ * Returns -EINVAL if data is already supplied in the message, 0 otherwise.
+ */
+int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
+			       const void *data, size_t datalen)
+{
+	if (pkcs7->data) {
+		pr_debug("Data already supplied\n");
+		return -EINVAL;
+	}
+	pkcs7->data = data;
+	pkcs7->data_len = datalen;
+	return 0;
+}
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 2f6e4fb..81efccb 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -39,6 +39,7 @@
 const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = {
 	[PKEY_ID_PGP]		= "PGP",
 	[PKEY_ID_X509]		= "X509",
+	[PKEY_ID_PKCS7]		= "PKCS#7",
 };
 EXPORT_SYMBOL_GPL(pkey_id_type_name);
 
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 2421f46..897b734 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -393,6 +393,7 @@
  * @pebuf: Buffer containing the PE binary image
  * @pelen: Length of the binary image
  * @trust_keyring: Signing certificates to use as starting points
+ * @usage: The use to which the key is being put.
  * @_trusted: Set to true if trustworth, false otherwise
  *
  * Validate that the certificate chain inside the PKCS#7 message inside the PE
@@ -417,7 +418,9 @@
  * May also return -ENOMEM.
  */
 int verify_pefile_signature(const void *pebuf, unsigned pelen,
-			    struct key *trusted_keyring, bool *_trusted)
+			    struct key *trusted_keyring,
+			    enum key_being_used_for usage,
+			    bool *_trusted)
 {
 	struct pkcs7_message *pkcs7;
 	struct pefile_context ctx;
@@ -462,7 +465,7 @@
 	if (ret < 0)
 		goto error;
 
-	ret = pkcs7_verify(pkcs7);
+	ret = pkcs7_verify(pkcs7, usage);
 	if (ret < 0)
 		goto error;
 
diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1
new file mode 100644
index 0000000..1a33231
--- /dev/null
+++ b/crypto/asymmetric_keys/x509_akid.asn1
@@ -0,0 +1,35 @@
+-- X.509 AuthorityKeyIdentifier
+-- rfc5280 section 4.2.1.1
+
+AuthorityKeyIdentifier ::= SEQUENCE {
+	keyIdentifier			[0] IMPLICIT KeyIdentifier		OPTIONAL,
+	authorityCertIssuer		[1] IMPLICIT GeneralNames		OPTIONAL,
+	authorityCertSerialNumber	[2] IMPLICIT CertificateSerialNumber	OPTIONAL
+	}
+
+KeyIdentifier ::= OCTET STRING ({ x509_akid_note_kid })
+
+CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial })
+
+GeneralNames ::= SEQUENCE OF GeneralName
+
+GeneralName ::= CHOICE {
+	otherName			[0] ANY,
+	rfc822Name			[1] IA5String,
+	dNSName				[2] IA5String,
+	x400Address			[3] ANY,
+	directoryName			[4] Name ({ x509_akid_note_name }),
+	ediPartyName			[5] ANY,
+	uniformResourceIdentifier	[6] IA5String,
+	iPAddress			[7] OCTET STRING,
+	registeredID			[8] OBJECT IDENTIFIER
+	}
+
+Name ::= SEQUENCE OF RelativeDistinguishedName
+
+RelativeDistinguishedName ::= SET OF AttributeValueAssertion
+
+AttributeValueAssertion ::= SEQUENCE {
+	attributeType		OBJECT IDENTIFIER ({ x509_note_OID }),
+	attributeValue		ANY ({ x509_extract_name_segment })
+	}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index a668d90..af71878 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -18,6 +18,7 @@
 #include "public_key.h"
 #include "x509_parser.h"
 #include "x509-asn1.h"
+#include "x509_akid-asn1.h"
 #include "x509_rsakey-asn1.h"
 
 struct x509_parse_context {
@@ -35,6 +36,10 @@
 	u16		o_offset;		/* Offset of organizationName (O) */
 	u16		cn_offset;		/* Offset of commonName (CN) */
 	u16		email_offset;		/* Offset of emailAddress */
+	unsigned	raw_akid_size;
+	const void	*raw_akid;		/* Raw authorityKeyId in ASN.1 */
+	const void	*akid_raw_issuer;	/* Raw directoryName in authorityKeyId */
+	unsigned	akid_raw_issuer_size;
 };
 
 /*
@@ -48,7 +53,8 @@
 		kfree(cert->subject);
 		kfree(cert->id);
 		kfree(cert->skid);
-		kfree(cert->authority);
+		kfree(cert->akid_id);
+		kfree(cert->akid_skid);
 		kfree(cert->sig.digest);
 		mpi_free(cert->sig.rsa.s);
 		kfree(cert);
@@ -85,6 +91,18 @@
 	if (ret < 0)
 		goto error_decode;
 
+	/* Decode the AuthorityKeyIdentifier */
+	if (ctx->raw_akid) {
+		pr_devel("AKID: %u %*phN\n",
+			 ctx->raw_akid_size, ctx->raw_akid_size, ctx->raw_akid);
+		ret = asn1_ber_decoder(&x509_akid_decoder, ctx,
+				       ctx->raw_akid, ctx->raw_akid_size);
+		if (ret < 0) {
+			pr_warn("Couldn't decode AuthKeyIdentifier\n");
+			goto error_decode;
+		}
+	}
+
 	/* Decode the public key */
 	ret = asn1_ber_decoder(&x509_rsakey_decoder, ctx,
 			       ctx->key, ctx->key_size);
@@ -422,7 +440,6 @@
 	struct x509_parse_context *ctx = context;
 	struct asymmetric_key_id *kid;
 	const unsigned char *v = value;
-	int i;
 
 	pr_debug("Extension: %u\n", ctx->last_oid);
 
@@ -437,9 +454,7 @@
 
 		ctx->cert->raw_skid_size = vlen;
 		ctx->cert->raw_skid = v;
-		kid = asymmetric_key_generate_id(ctx->cert->raw_subject,
-						 ctx->cert->raw_subject_size,
-						 v, vlen);
+		kid = asymmetric_key_generate_id(v, vlen, "", 0);
 		if (IS_ERR(kid))
 			return PTR_ERR(kid);
 		ctx->cert->skid = kid;
@@ -449,117 +464,113 @@
 
 	if (ctx->last_oid == OID_authorityKeyIdentifier) {
 		/* Get hold of the CA key fingerprint */
-		if (ctx->cert->authority || vlen < 5)
-			return -EBADMSG;
-
-		/* Authority Key Identifier must be a Constructed SEQUENCE */
-		if (v[0] != (ASN1_SEQ | (ASN1_CONS << 5)))
-			return -EBADMSG;
-
-		/* Authority Key Identifier is not indefinite length */
-		if (unlikely(vlen == ASN1_INDEFINITE_LENGTH))
-			return -EBADMSG;
-
-		if (vlen < ASN1_INDEFINITE_LENGTH) {
-			/* Short Form length */
-			if (v[1] != vlen - 2 ||
-			    v[2] != SEQ_TAG_KEYID ||
-			    v[3] > vlen - 4)
-				return -EBADMSG;
-
-			vlen = v[3];
-			v += 4;
-		} else {
-			/* Long Form length */
-			size_t seq_len = 0;
-			size_t sub = v[1] - ASN1_INDEFINITE_LENGTH;
-
-			if (sub > 2)
-				return -EBADMSG;
-
-			/* calculate the length from subsequent octets */
-			v += 2;
-			for (i = 0; i < sub; i++) {
-				seq_len <<= 8;
-				seq_len |= v[i];
-			}
-
-			if (seq_len != vlen - 2 - sub ||
-			    v[sub] != SEQ_TAG_KEYID ||
-			    v[sub + 1] > vlen - 4 - sub)
-				return -EBADMSG;
-
-			vlen = v[sub + 1];
-			v += (sub + 2);
-		}
-
-		kid = asymmetric_key_generate_id(ctx->cert->raw_issuer,
-						 ctx->cert->raw_issuer_size,
-						 v, vlen);
-		if (IS_ERR(kid))
-			return PTR_ERR(kid);
-		pr_debug("authkeyid %*phN\n", kid->len, kid->data);
-		ctx->cert->authority = kid;
+		ctx->raw_akid = v;
+		ctx->raw_akid_size = vlen;
 		return 0;
 	}
 
 	return 0;
 }
 
-/*
- * Record a certificate time.
+/**
+ * x509_decode_time - Decode an X.509 time ASN.1 object
+ * @_t: The time to fill in
+ * @hdrlen: The length of the object header
+ * @tag: The object tag
+ * @value: The object value
+ * @vlen: The size of the object value
+ *
+ * Decode an ASN.1 universal time or generalised time field into a struct the
+ * kernel can handle and check it for validity.  The time is decoded thus:
+ *
+ *	[RFC5280 §4.1.2.5]
+ *	CAs conforming to this profile MUST always encode certificate validity
+ *	dates through the year 2049 as UTCTime; certificate validity dates in
+ *	2050 or later MUST be encoded as GeneralizedTime.  Conforming
+ *	applications MUST be able to process validity dates that are encoded in
+ *	either UTCTime or GeneralizedTime.
  */
-static int x509_note_time(struct tm *tm,  size_t hdrlen,
-			  unsigned char tag,
-			  const unsigned char *value, size_t vlen)
+int x509_decode_time(time64_t *_t,  size_t hdrlen,
+		     unsigned char tag,
+		     const unsigned char *value, size_t vlen)
 {
+	static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
+						       31, 31, 30, 31, 30, 31 };
 	const unsigned char *p = value;
+	unsigned year, mon, day, hour, min, sec, mon_len;
 
-#define dec2bin(X) ((X) - '0')
+#define dec2bin(X) ({ unsigned char x = (X) - '0'; if (x > 9) goto invalid_time; x; })
 #define DD2bin(P) ({ unsigned x = dec2bin(P[0]) * 10 + dec2bin(P[1]); P += 2; x; })
 
 	if (tag == ASN1_UNITIM) {
 		/* UTCTime: YYMMDDHHMMSSZ */
 		if (vlen != 13)
 			goto unsupported_time;
-		tm->tm_year = DD2bin(p);
-		if (tm->tm_year >= 50)
-			tm->tm_year += 1900;
+		year = DD2bin(p);
+		if (year >= 50)
+			year += 1900;
 		else
-			tm->tm_year += 2000;
+			year += 2000;
 	} else if (tag == ASN1_GENTIM) {
 		/* GenTime: YYYYMMDDHHMMSSZ */
 		if (vlen != 15)
 			goto unsupported_time;
-		tm->tm_year = DD2bin(p) * 100 + DD2bin(p);
+		year = DD2bin(p) * 100 + DD2bin(p);
+		if (year >= 1950 && year <= 2049)
+			goto invalid_time;
 	} else {
 		goto unsupported_time;
 	}
 
-	tm->tm_year -= 1900;
-	tm->tm_mon  = DD2bin(p) - 1;
-	tm->tm_mday = DD2bin(p);
-	tm->tm_hour = DD2bin(p);
-	tm->tm_min  = DD2bin(p);
-	tm->tm_sec  = DD2bin(p);
+	mon  = DD2bin(p);
+	day = DD2bin(p);
+	hour = DD2bin(p);
+	min  = DD2bin(p);
+	sec  = DD2bin(p);
 
 	if (*p != 'Z')
 		goto unsupported_time;
 
+	mon_len = month_lengths[mon];
+	if (mon == 2) {
+		if (year % 4 == 0) {
+			mon_len = 29;
+			if (year % 100 == 0) {
+				year /= 100;
+				if (year % 4 != 0)
+					mon_len = 28;
+			}
+		}
+	}
+
+	if (year < 1970 ||
+	    mon < 1 || mon > 12 ||
+	    day < 1 || day > mon_len ||
+	    hour < 0 || hour > 23 ||
+	    min < 0 || min > 59 ||
+	    sec < 0 || sec > 59)
+		goto invalid_time;
+	
+	*_t = mktime64(year, mon, day, hour, min, sec);
 	return 0;
 
 unsupported_time:
-	pr_debug("Got unsupported time [tag %02x]: '%*.*s'\n",
-		 tag, (int)vlen, (int)vlen, value);
+	pr_debug("Got unsupported time [tag %02x]: '%*phN'\n",
+		 tag, (int)vlen, value);
+	return -EBADMSG;
+invalid_time:
+	pr_debug("Got invalid time [tag %02x]: '%*phN'\n",
+		 tag, (int)vlen, value);
 	return -EBADMSG;
 }
+EXPORT_SYMBOL_GPL(x509_decode_time);
 
 int x509_note_not_before(void *context, size_t hdrlen,
 			 unsigned char tag,
 			 const void *value, size_t vlen)
 {
 	struct x509_parse_context *ctx = context;
-	return x509_note_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen);
+	return x509_decode_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen);
 }
 
 int x509_note_not_after(void *context, size_t hdrlen,
@@ -567,5 +578,71 @@
 			const void *value, size_t vlen)
 {
 	struct x509_parse_context *ctx = context;
-	return x509_note_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen);
+	return x509_decode_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen);
+}
+
+/*
+ * Note a key identifier-based AuthorityKeyIdentifier
+ */
+int x509_akid_note_kid(void *context, size_t hdrlen,
+		       unsigned char tag,
+		       const void *value, size_t vlen)
+{
+	struct x509_parse_context *ctx = context;
+	struct asymmetric_key_id *kid;
+
+	pr_debug("AKID: keyid: %*phN\n", (int)vlen, value);
+
+	if (ctx->cert->akid_skid)
+		return 0;
+
+	kid = asymmetric_key_generate_id(value, vlen, "", 0);
+	if (IS_ERR(kid))
+		return PTR_ERR(kid);
+	pr_debug("authkeyid %*phN\n", kid->len, kid->data);
+	ctx->cert->akid_skid = kid;
+	return 0;
+}
+
+/*
+ * Note a directoryName in an AuthorityKeyIdentifier
+ */
+int x509_akid_note_name(void *context, size_t hdrlen,
+			unsigned char tag,
+			const void *value, size_t vlen)
+{
+	struct x509_parse_context *ctx = context;
+
+	pr_debug("AKID: name: %*phN\n", (int)vlen, value);
+
+	ctx->akid_raw_issuer = value;
+	ctx->akid_raw_issuer_size = vlen;
+	return 0;
+}
+
+/*
+ * Note a serial number in an AuthorityKeyIdentifier
+ */
+int x509_akid_note_serial(void *context, size_t hdrlen,
+			  unsigned char tag,
+			  const void *value, size_t vlen)
+{
+	struct x509_parse_context *ctx = context;
+	struct asymmetric_key_id *kid;
+
+	pr_debug("AKID: serial: %*phN\n", (int)vlen, value);
+
+	if (!ctx->akid_raw_issuer || ctx->cert->akid_id)
+		return 0;
+
+	kid = asymmetric_key_generate_id(value,
+					 vlen,
+					 ctx->akid_raw_issuer,
+					 ctx->akid_raw_issuer_size);
+	if (IS_ERR(kid))
+		return PTR_ERR(kid);
+
+	pr_debug("authkeyid %*phN\n", kid->len, kid->data);
+	ctx->cert->akid_id = kid;
+	return 0;
 }
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index 3dfe6b5..1de01ea 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -19,11 +19,12 @@
 	struct public_key_signature sig;	/* Signature parameters */
 	char		*issuer;		/* Name of certificate issuer */
 	char		*subject;		/* Name of certificate subject */
-	struct asymmetric_key_id *id;		/* Serial number + issuer */
+	struct asymmetric_key_id *id;		/* Issuer + Serial number */
 	struct asymmetric_key_id *skid;		/* Subject + subjectKeyId (optional) */
-	struct asymmetric_key_id *authority;	/* Authority key identifier (optional) */
-	struct tm	valid_from;
-	struct tm	valid_to;
+	struct asymmetric_key_id *akid_id;	/* CA AuthKeyId matching ->id (optional) */
+	struct asymmetric_key_id *akid_skid;	/* CA AuthKeyId matching ->skid (optional) */
+	time64_t	valid_from;
+	time64_t	valid_to;
 	const void	*tbs;			/* Signed data */
 	unsigned	tbs_size;		/* Size of signed data */
 	unsigned	raw_sig_size;		/* Size of sigature */
@@ -48,6 +49,9 @@
  */
 extern void x509_free_certificate(struct x509_certificate *cert);
 extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
+extern int x509_decode_time(time64_t *_t,  size_t hdrlen,
+			    unsigned char tag,
+			    const unsigned char *value, size_t vlen);
 
 /*
  * x509_public_key.c
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 24f17e6..6d88dd1 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -65,23 +65,37 @@
 /**
  * x509_request_asymmetric_key - Request a key by X.509 certificate params.
  * @keyring: The keys to search.
- * @kid: The key ID.
+ * @id: The issuer & serialNumber to look for or NULL.
+ * @skid: The subjectKeyIdentifier to look for or NULL.
  * @partial: Use partial match if true, exact if false.
  *
- * Find a key in the given keyring by subject name and key ID.  These might,
- * for instance, be the issuer name and the authority key ID of an X.509
- * certificate that needs to be verified.
+ * Find a key in the given keyring by identifier.  The preferred identifier is
+ * the issuer + serialNumber and the fallback identifier is the
+ * subjectKeyIdentifier.  If both are given, the lookup is by the former, but
+ * the latter must also match.
  */
 struct key *x509_request_asymmetric_key(struct key *keyring,
-					const struct asymmetric_key_id *kid,
+					const struct asymmetric_key_id *id,
+					const struct asymmetric_key_id *skid,
 					bool partial)
 {
-	key_ref_t key;
-	char *id, *p;
+	struct key *key;
+	key_ref_t ref;
+	const char *lookup;
+	char *req, *p;
+	int len;
 
+	if (id) {
+		lookup = id->data;
+		len = id->len;
+	} else {
+		lookup = skid->data;
+		len = skid->len;
+	}
+	
 	/* Construct an identifier "id:<keyid>". */
-	p = id = kmalloc(2 + 1 + kid->len * 2 + 1, GFP_KERNEL);
-	if (!id)
+	p = req = kmalloc(2 + 1 + len * 2 + 1, GFP_KERNEL);
+	if (!req)
 		return ERR_PTR(-ENOMEM);
 
 	if (partial) {
@@ -92,32 +106,48 @@
 		*p++ = 'x';
 	}
 	*p++ = ':';
-	p = bin2hex(p, kid->data, kid->len);
+	p = bin2hex(p, lookup, len);
 	*p = 0;
 
-	pr_debug("Look up: \"%s\"\n", id);
+	pr_debug("Look up: \"%s\"\n", req);
 
-	key = keyring_search(make_key_ref(keyring, 1),
-			     &key_type_asymmetric, id);
-	if (IS_ERR(key))
-		pr_debug("Request for key '%s' err %ld\n", id, PTR_ERR(key));
-	kfree(id);
+	ref = keyring_search(make_key_ref(keyring, 1),
+			     &key_type_asymmetric, req);
+	if (IS_ERR(ref))
+		pr_debug("Request for key '%s' err %ld\n", req, PTR_ERR(ref));
+	kfree(req);
 
-	if (IS_ERR(key)) {
-		switch (PTR_ERR(key)) {
+	if (IS_ERR(ref)) {
+		switch (PTR_ERR(ref)) {
 			/* Hide some search errors */
 		case -EACCES:
 		case -ENOTDIR:
 		case -EAGAIN:
 			return ERR_PTR(-ENOKEY);
 		default:
-			return ERR_CAST(key);
+			return ERR_CAST(ref);
 		}
 	}
 
-	pr_devel("<==%s() = 0 [%x]\n", __func__,
-		 key_serial(key_ref_to_ptr(key)));
-	return key_ref_to_ptr(key);
+	key = key_ref_to_ptr(ref);
+	if (id && skid) {
+		const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
+		if (!kids->id[1]) {
+			pr_debug("issuer+serial match, but expected SKID missing\n");
+			goto reject;
+		}
+		if (!asymmetric_key_id_same(skid, kids->id[1])) {
+			pr_debug("issuer+serial match, but SKID does not\n");
+			goto reject;
+		}
+	}
+	
+	pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key));
+	return key;
+
+reject:
+	key_put(key);
+	return ERR_PTR(-EKEYREJECTED);
 }
 EXPORT_SYMBOL_GPL(x509_request_asymmetric_key);
 
@@ -227,10 +257,11 @@
 	if (!trust_keyring)
 		return -EOPNOTSUPP;
 
-	if (ca_keyid && !asymmetric_key_id_partial(cert->authority, ca_keyid))
+	if (ca_keyid && !asymmetric_key_id_partial(cert->akid_skid, ca_keyid))
 		return -EPERM;
 
-	key = x509_request_asymmetric_key(trust_keyring, cert->authority,
+	key = x509_request_asymmetric_key(trust_keyring,
+					  cert->akid_id, cert->akid_skid,
 					  false);
 	if (!IS_ERR(key))  {
 		if (!use_builtin_keys
@@ -271,14 +302,7 @@
 	}
 
 	pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
-	pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",
-		 cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,
-		 cert->valid_from.tm_mday, cert->valid_from.tm_hour,
-		 cert->valid_from.tm_min,  cert->valid_from.tm_sec);
-	pr_devel("Cert Valid To: %04ld-%02d-%02d %02d:%02d:%02d\n",
-		 cert->valid_to.tm_year + 1900, cert->valid_to.tm_mon + 1,
-		 cert->valid_to.tm_mday, cert->valid_to.tm_hour,
-		 cert->valid_to.tm_min,  cert->valid_to.tm_sec);
+	pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to);
 	pr_devel("Cert Signature: %s + %s\n",
 		 pkey_algo_name[cert->sig.pkey_algo],
 		 hash_algo_name[cert->sig.pkey_hash_algo]);
@@ -287,8 +311,9 @@
 	cert->pub->id_type = PKEY_ID_X509;
 
 	/* Check the signature on the key if it appears to be self-signed */
-	if (!cert->authority ||
-	    asymmetric_key_id_same(cert->skid, cert->authority)) {
+	if ((!cert->akid_skid && !cert->akid_id) ||
+	    asymmetric_key_id_same(cert->skid, cert->akid_skid) ||
+	    asymmetric_key_id_same(cert->id, cert->akid_id)) {
 		ret = x509_check_signature(cert->pub, cert); /* self-signed */
 		if (ret < 0)
 			goto error_free_cert;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4e2e6aa..46b4a8e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -176,6 +176,8 @@
 
 source "drivers/mcb/Kconfig"
 
+source "drivers/perf/Kconfig"
+
 source "drivers/ras/Kconfig"
 
 source "drivers/thunderbolt/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 4c270f5..b250b36 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,7 +11,7 @@
 obj-$(CONFIG_GENERIC_PHY)	+= phy/
 
 # GPIO must come after pinctrl as gpios may need to mux pins etc
-obj-y				+= pinctrl/
+obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-y				+= gpio/
 obj-y				+= pwm/
 obj-$(CONFIG_PCI)		+= pci/
@@ -161,6 +161,7 @@
 obj-$(CONFIG_FMC)		+= fmc/
 obj-$(CONFIG_POWERCAP)		+= powercap/
 obj-$(CONFIG_MCB)		+= mcb/
+obj-$(CONFIG_PERF_EVENTS)	+= perf/
 obj-$(CONFIG_RAS)		+= ras/
 obj-$(CONFIG_THUNDERBOLT)	+= thunderbolt/
 obj-$(CONFIG_CORESIGHT)		+= hwtracing/coresight/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 54e9729..5d1015c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -417,6 +417,7 @@
 	tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
 	depends on PHYS_ADDR_T_64BIT
 	depends on BLK_DEV
+	depends on ARCH_HAS_MMIO_FLUSH
 	select LIBNVDIMM
 	help
 	  Infrastructure to probe ACPI 6 compliant platforms for
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index cf0fd96..c1b8d03 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -20,6 +20,7 @@
 #include <linux/sort.h>
 #include <linux/pmem.h>
 #include <linux/io.h>
+#include <asm/cacheflush.h>
 #include "nfit.h"
 
 /*
@@ -764,9 +765,7 @@
 	struct acpi_device *adev, *adev_dimm;
 	struct device *dev = acpi_desc->dev;
 	const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
-	unsigned long long sta;
-	int i, rc = -ENODEV;
-	acpi_status status;
+	int i;
 
 	nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
 	adev = to_acpi_dev(acpi_desc);
@@ -781,25 +780,11 @@
 		return force_enable_dimms ? 0 : -ENODEV;
 	}
 
-	status = acpi_evaluate_integer(adev_dimm->handle, "_STA", NULL, &sta);
-	if (status == AE_NOT_FOUND) {
-		dev_dbg(dev, "%s missing _STA, assuming enabled...\n",
-				dev_name(&adev_dimm->dev));
-		rc = 0;
-	} else if (ACPI_FAILURE(status))
-		dev_err(dev, "%s failed to retrieve_STA, disabling...\n",
-				dev_name(&adev_dimm->dev));
-	else if ((sta & ACPI_STA_DEVICE_ENABLED) == 0)
-		dev_info(dev, "%s disabled by firmware\n",
-				dev_name(&adev_dimm->dev));
-	else
-		rc = 0;
-
 	for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
 		if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
 			set_bit(i, &nfit_mem->dsm_mask);
 
-	return force_enable_dimms ? 0 : rc;
+	return 0;
 }
 
 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
@@ -868,6 +853,7 @@
 	struct acpi_device *adev;
 	int i;
 
+	nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
 	adev = to_acpi_dev(acpi_desc);
 	if (!adev)
 		return;
@@ -1032,7 +1018,7 @@
 	if (mmio->num_lines)
 		offset = to_interleave_offset(offset, mmio);
 
-	return readl(mmio->base + offset);
+	return readl(mmio->addr.base + offset);
 }
 
 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
@@ -1057,11 +1043,11 @@
 	if (mmio->num_lines)
 		offset = to_interleave_offset(offset, mmio);
 
-	writeq(cmd, mmio->base + offset);
+	writeq(cmd, mmio->addr.base + offset);
 	wmb_blk(nfit_blk);
 
 	if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
-		readq(mmio->base + offset);
+		readq(mmio->addr.base + offset);
 }
 
 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
@@ -1093,11 +1079,16 @@
 		}
 
 		if (rw)
-			memcpy_to_pmem(mmio->aperture + offset,
+			memcpy_to_pmem(mmio->addr.aperture + offset,
 					iobuf + copied, c);
-		else
+		else {
+			if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
+				mmio_flush_range((void __force *)
+					mmio->addr.aperture + offset, c);
+
 			memcpy_from_pmem(iobuf + copied,
-					mmio->aperture + offset, c);
+					mmio->addr.aperture + offset, c);
+		}
 
 		copied += c;
 		len -= c;
@@ -1144,7 +1135,10 @@
 
 	WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
 	dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
-	iounmap(spa_map->iomem);
+	if (spa_map->type == SPA_MAP_APERTURE)
+		memunmap((void __force *)spa_map->addr.aperture);
+	else
+		iounmap(spa_map->addr.base);
 	release_mem_region(spa->address, spa->length);
 	list_del(&spa_map->list);
 	kfree(spa_map);
@@ -1190,7 +1184,7 @@
 	spa_map = find_spa_mapping(acpi_desc, spa);
 	if (spa_map) {
 		kref_get(&spa_map->kref);
-		return spa_map->iomem;
+		return spa_map->addr.base;
 	}
 
 	spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
@@ -1206,20 +1200,19 @@
 	if (!res)
 		goto err_mem;
 
-	if (type == SPA_MAP_APERTURE) {
-		/*
-		 * TODO: memremap_pmem() support, but that requires cache
-		 * flushing when the aperture is moved.
-		 */
-		spa_map->iomem = ioremap_wc(start, n);
-	} else
-		spa_map->iomem = ioremap_nocache(start, n);
+	spa_map->type = type;
+	if (type == SPA_MAP_APERTURE)
+		spa_map->addr.aperture = (void __pmem *)memremap(start, n,
+							ARCH_MEMREMAP_PMEM);
+	else
+		spa_map->addr.base = ioremap_nocache(start, n);
 
-	if (!spa_map->iomem)
+
+	if (!spa_map->addr.base)
 		goto err_map;
 
 	list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
-	return spa_map->iomem;
+	return spa_map->addr.base;
 
  err_map:
 	release_mem_region(start, n);
@@ -1282,7 +1275,7 @@
 		nfit_blk->dimm_flags = flags.flags;
 	else if (rc == -ENOTTY) {
 		/* fall back to a conservative default */
-		nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;
+		nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
 		rc = 0;
 	} else
 		rc = -ENXIO;
@@ -1322,9 +1315,9 @@
 	/* map block aperture memory */
 	nfit_blk->bdw_offset = nfit_mem->bdw->offset;
 	mmio = &nfit_blk->mmio[BDW];
-	mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
+	mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
 			SPA_MAP_APERTURE);
-	if (!mmio->base) {
+	if (!mmio->addr.base) {
 		dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
 				nvdimm_name(nvdimm));
 		return -ENOMEM;
@@ -1345,9 +1338,9 @@
 	nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
 	nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
 	mmio = &nfit_blk->mmio[DCR];
-	mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
+	mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
 			SPA_MAP_CONTROL);
-	if (!mmio->base) {
+	if (!mmio->addr.base) {
 		dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
 				nvdimm_name(nvdimm));
 		return -ENOMEM;
@@ -1379,7 +1372,7 @@
 			return -ENOMEM;
 	}
 
-	if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)
+	if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
 		dev_warn(dev, "unable to guarantee persistence of writes\n");
 
 	if (mmio->line_size == 0)
@@ -1414,7 +1407,7 @@
 	for (i = 0; i < 2; i++) {
 		struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
 
-		if (mmio->base)
+		if (mmio->addr.base)
 			nfit_spa_unmap(acpi_desc, mmio->spa);
 	}
 	nd_blk_region_set_provider_data(ndbr, NULL);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 79b6d83..7e74015 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -41,6 +41,7 @@
 };
 
 enum {
+	ND_BLK_READ_FLUSH = 1,
 	ND_BLK_DCR_LATCH = 2,
 };
 
@@ -107,6 +108,7 @@
 	struct nvdimm_bus *nvdimm_bus;
 	struct device *dev;
 	unsigned long dimm_dsm_force_en;
+	unsigned long bus_dsm_force_en;
 	int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
 			void *iobuf, u64 len, int rw);
 };
@@ -116,12 +118,16 @@
 	DCR,
 };
 
+struct nd_blk_addr {
+	union {
+		void __iomem *base;
+		void __pmem  *aperture;
+	};
+};
+
 struct nfit_blk {
 	struct nfit_blk_mmio {
-		union {
-			void __iomem *base;
-			void __pmem  *aperture;
-		};
+		struct nd_blk_addr addr;
 		u64 size;
 		u64 base_offset;
 		u32 line_size;
@@ -148,7 +154,8 @@
 	struct acpi_nfit_system_address *spa;
 	struct list_head list;
 	struct kref kref;
-	void __iomem *iomem;
+	enum spa_map_type type;
+	struct nd_blk_addr addr;
 };
 
 static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref)
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 6d08446..12fe0f3 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -27,12 +27,11 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 
 #include <linux/libata.h>
 #include <scsi/scsi_host.h>
 
-#include <asm/gpio.h>
-
 #define DRV_NAME	"pata-rb532-cf"
 #define DRV_VERSION	"0.1.0"
 #define DRV_DESC	"PATA driver for RouterBOARD 532 Compact Flash"
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 31df474d..560751b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -392,6 +392,16 @@
 	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
 		int page_nid;
 
+		/*
+		 * memory block could have several absent sections from start.
+		 * skip pfn range from absent section
+		 */
+		if (!pfn_present(pfn)) {
+			pfn = round_down(pfn + PAGES_PER_SECTION,
+					 PAGES_PER_SECTION) - 1;
+			continue;
+		}
+
 		page_nid = get_nid_for_pfn(pfn);
 		if (page_nid < 0)
 			continue;
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b2b2849..cc55788 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -136,14 +136,20 @@
 	/* if set, the HW registers are known to match map->reg_defaults */
 	bool no_sync_defaults;
 
-	struct reg_default *patch;
+	struct reg_sequence *patch;
 	int patch_regs;
 
-	/* if set, converts bulk rw to single rw */
-	bool use_single_rw;
+	/* if set, converts bulk read to single read */
+	bool use_single_read;
+	/* if set, converts bulk read to single read */
+	bool use_single_write;
 	/* if set, the device supports multi write mode */
 	bool can_multi_write;
 
+	/* if set, raw reads/writes are limited to this size */
+	size_t max_raw_read;
+	size_t max_raw_write;
+
 	struct rb_root range_tree;
 	void *selector_work_buf;	/* Scratch buffer used for selector */
 };
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index b9862d7..6f8a13e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -729,7 +729,7 @@
 			unsigned int block_base, unsigned int start,
 			unsigned int end)
 {
-	if (regmap_can_raw_write(map) && !map->use_single_rw)
+	if (regmap_can_raw_write(map) && !map->use_single_write)
 		return regcache_sync_block_raw(map, block, cache_present,
 					       block_base, start, end);
 	else
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index 8d304e2..c03ebfd 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -78,37 +78,24 @@
 	.reg_read = regmap_ac97_reg_read,
 };
 
-/**
- * regmap_init_ac97(): Initialise AC'97 register map
- *
- * @ac97: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
-				const struct regmap_config *config)
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+				  const struct regmap_config *config,
+				  struct lock_class_key *lock_key,
+				  const char *lock_name)
 {
-	return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
+	return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+			     lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(regmap_init_ac97);
+EXPORT_SYMBOL_GPL(__regmap_init_ac97);
 
-/**
- * devm_regmap_init_ac97(): Initialise AC'97 register map
- *
- * @ac97: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap.  The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
-				     const struct regmap_config *config)
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+				       const struct regmap_config *config,
+				       struct lock_class_key *lock_key,
+				       const char *lock_name)
 {
-	return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
+	return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+				  lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init_ac97);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 5799a0b..f42f2ba 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -469,6 +469,87 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t regmap_cache_only_write_file(struct file *file,
+					    const char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct regmap *map = container_of(file->private_data,
+					  struct regmap, cache_only);
+	ssize_t result;
+	bool was_enabled, require_sync = false;
+	int err;
+
+	map->lock(map->lock_arg);
+
+	was_enabled = map->cache_only;
+
+	result = debugfs_write_file_bool(file, user_buf, count, ppos);
+	if (result < 0) {
+		map->unlock(map->lock_arg);
+		return result;
+	}
+
+	if (map->cache_only && !was_enabled) {
+		dev_warn(map->dev, "debugfs cache_only=Y forced\n");
+		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+	} else if (!map->cache_only && was_enabled) {
+		dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
+		require_sync = true;
+	}
+
+	map->unlock(map->lock_arg);
+
+	if (require_sync) {
+		err = regcache_sync(map);
+		if (err)
+			dev_err(map->dev, "Failed to sync cache %d\n", err);
+	}
+
+	return result;
+}
+
+static const struct file_operations regmap_cache_only_fops = {
+	.open = simple_open,
+	.read = debugfs_read_file_bool,
+	.write = regmap_cache_only_write_file,
+};
+
+static ssize_t regmap_cache_bypass_write_file(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct regmap *map = container_of(file->private_data,
+					  struct regmap, cache_bypass);
+	ssize_t result;
+	bool was_enabled;
+
+	map->lock(map->lock_arg);
+
+	was_enabled = map->cache_bypass;
+
+	result = debugfs_write_file_bool(file, user_buf, count, ppos);
+	if (result < 0)
+		goto out;
+
+	if (map->cache_bypass && !was_enabled) {
+		dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
+		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+	} else if (!map->cache_bypass && was_enabled) {
+		dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
+	}
+
+out:
+	map->unlock(map->lock_arg);
+
+	return result;
+}
+
+static const struct file_operations regmap_cache_bypass_fops = {
+	.open = simple_open,
+	.read = debugfs_read_file_bool,
+	.write = regmap_cache_bypass_write_file,
+};
+
 void regmap_debugfs_init(struct regmap *map, const char *name)
 {
 	struct rb_node *next;
@@ -518,10 +599,11 @@
 	if (map->max_register || regmap_readable(map, 0)) {
 		umode_t registers_mode;
 
-		if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS))
-			registers_mode = 0600;
-		else
-			registers_mode = 0400;
+#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
+		registers_mode = 0600;
+#else
+		registers_mode = 0400;
+#endif
 
 		debugfs_create_file("registers", registers_mode, map->debugfs,
 				    map, &regmap_map_fops);
@@ -530,12 +612,13 @@
 	}
 
 	if (map->cache_type) {
-		debugfs_create_bool("cache_only", 0400, map->debugfs,
-				    &map->cache_only);
+		debugfs_create_file("cache_only", 0600, map->debugfs,
+				    &map->cache_only, &regmap_cache_only_fops);
 		debugfs_create_bool("cache_dirty", 0400, map->debugfs,
 				    &map->cache_dirty);
-		debugfs_create_bool("cache_bypass", 0400, map->debugfs,
-				    &map->cache_bypass);
+		debugfs_create_file("cache_bypass", 0600, map->debugfs,
+				    &map->cache_bypass,
+				    &regmap_cache_bypass_fops);
 	}
 
 	next = rb_first(&map->range_tree);
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 4b76e33..1a8ec3b 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -209,11 +209,60 @@
 	.val_format_endian_default = REGMAP_ENDIAN_BIG,
 };
 
+static int regmap_i2c_smbus_i2c_write(void *context, const void *data,
+				      size_t count)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+
+	if (count < 1)
+		return -EINVAL;
+	if (count >= I2C_SMBUS_BLOCK_MAX)
+		return -E2BIG;
+
+	--count;
+	return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+					      ((u8 *)data + 1));
+}
+
+static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+				     size_t reg_size, void *val,
+				     size_t val_size)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	int ret;
+
+	if (reg_size != 1 || val_size < 1)
+		return -EINVAL;
+	if (val_size >= I2C_SMBUS_BLOCK_MAX)
+		return -E2BIG;
+
+	ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val);
+	if (ret == val_size)
+		return 0;
+	else if (ret < 0)
+		return ret;
+	else
+		return -EIO;
+}
+
+static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+	.write = regmap_i2c_smbus_i2c_write,
+	.read = regmap_i2c_smbus_i2c_read,
+	.max_raw_read = I2C_SMBUS_BLOCK_MAX,
+	.max_raw_write = I2C_SMBUS_BLOCK_MAX,
+};
+
 static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
 					const struct regmap_config *config)
 {
 	if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
 		return &regmap_i2c;
+	else if (config->reg_bits == 8 &&
+		 i2c_check_functionality(i2c->adapter,
+					 I2C_FUNC_SMBUS_I2C_BLOCK))
+		return &regmap_i2c_smbus_i2c_block;
 	else if (config->val_bits == 16 && config->reg_bits == 8 &&
 		 i2c_check_functionality(i2c->adapter,
 					 I2C_FUNC_SMBUS_WORD_DATA))
@@ -233,47 +282,34 @@
 	return ERR_PTR(-ENOTSUPP);
 }
 
-/**
- * regmap_init_i2c(): Initialise register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
-			       const struct regmap_config *config)
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
 {
 	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
 
 	if (IS_ERR(bus))
 		return ERR_CAST(bus);
 
-	return regmap_init(&i2c->dev, bus, &i2c->dev, config);
+	return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+			     lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(regmap_init_i2c);
+EXPORT_SYMBOL_GPL(__regmap_init_i2c);
 
-/**
- * devm_regmap_init_i2c(): Initialise managed register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap.  The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
-				    const struct regmap_config *config)
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
 {
 	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
 
 	if (IS_ERR(bus))
 		return ERR_CAST(bus);
 
-	return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config);
+	return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+				  lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 2597600..38d1f72 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -209,7 +209,7 @@
 	 * Read in the statuses, using a single bulk read if possible
 	 * in order to reduce the I/O overheads.
 	 */
-	if (!map->use_single_rw && map->reg_stride == 1 &&
+	if (!map->use_single_read && map->reg_stride == 1 &&
 	    data->irq_reg_stride == 1) {
 		u8 *buf8 = data->status_reg_buf;
 		u16 *buf16 = data->status_reg_buf;
@@ -398,7 +398,7 @@
 	else
 		d->irq_reg_stride = 1;
 
-	if (!map->use_single_rw && map->reg_stride == 1 &&
+	if (!map->use_single_read && map->reg_stride == 1 &&
 	    d->irq_reg_stride == 1) {
 		d->status_reg_buf = kmalloc(map->format.val_bytes *
 					    chip->num_regs, GFP_KERNEL);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 04a329a..426a57e 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -296,20 +296,11 @@
 	return ERR_PTR(ret);
 }
 
-/**
- * regmap_init_mmio_clk(): Initialise register map with register clock
- *
- * @dev: Device that will be interacted with
- * @clk_id: register clock consumer ID
- * @regs: Pointer to memory-mapped IO region
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
-				    void __iomem *regs,
-				    const struct regmap_config *config)
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+				      void __iomem *regs,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
 {
 	struct regmap_mmio_context *ctx;
 
@@ -317,25 +308,17 @@
 	if (IS_ERR(ctx))
 		return ERR_CAST(ctx);
 
-	return regmap_init(dev, &regmap_mmio, ctx, config);
+	return __regmap_init(dev, &regmap_mmio, ctx, config,
+			     lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
+EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
 
-/**
- * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
- *
- * @dev: Device that will be interacted with
- * @clk_id: register clock consumer ID
- * @regs: Pointer to memory-mapped IO region
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap.  The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
-					 void __iomem *regs,
-					 const struct regmap_config *config)
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+					   const char *clk_id,
+					   void __iomem *regs,
+					   const struct regmap_config *config,
+					   struct lock_class_key *lock_key,
+					   const char *lock_name)
 {
 	struct regmap_mmio_context *ctx;
 
@@ -343,8 +326,9 @@
 	if (IS_ERR(ctx))
 		return ERR_CAST(ctx);
 
-	return devm_regmap_init(dev, &regmap_mmio, ctx, config);
+	return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
+				  lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 53d1148..edd9a83 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -113,37 +113,24 @@
 	.val_format_endian_default = REGMAP_ENDIAN_BIG,
 };
 
-/**
- * regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spi(struct spi_device *spi,
-			       const struct regmap_config *config)
+struct regmap *__regmap_init_spi(struct spi_device *spi,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
 {
-	return regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
+	return __regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+			     lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(regmap_init_spi);
+EXPORT_SYMBOL_GPL(__regmap_init_spi);
 
-/**
- * devm_regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap.  The map will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spi(struct spi_device *spi,
-				    const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
 {
-	return devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
+	return __devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+				  lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index d7026dc..7e58f65 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -91,36 +91,25 @@
 	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE,
 };
 
-/**
- * regmap_init_spmi_base(): Create regmap for the Base register space
- * @sdev:	SPMI device that will be interacted with
- * @config:	Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
-				     const struct regmap_config *config)
+struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev,
+				       const struct regmap_config *config,
+				       struct lock_class_key *lock_key,
+				       const char *lock_name)
 {
-	return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+	return __regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+			     lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_base);
 
-/**
- * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
- * @sdev:	SPMI device that will be interacted with
- * @config:	Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap.  The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
-					  const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev,
+					    const struct regmap_config *config,
+					    struct lock_class_key *lock_key,
+					    const char *lock_name)
 {
-	return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+	return __devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+				  lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base);
 
 static int regmap_spmi_ext_read(void *context,
 				const void *reg, size_t reg_size,
@@ -222,35 +211,24 @@
 	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE,
 };
 
-/**
- * regmap_init_spmi_ext(): Create regmap for Ext register space
- * @sdev:	Device that will be interacted with
- * @config:	Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
-				    const struct regmap_config *config)
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
 {
-	return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+	return __regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+			     lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext);
 
-/**
- * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
- * @sdev:	SPMI device that will be interacted with
- * @config:	Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap.  The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
-				     const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
+					   const struct regmap_config *config,
+					   struct lock_class_key *lock_key,
+					   const char *lock_name)
 {
-	return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+	return __devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+				  lock_key, lock_name);
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7111d04..afaf562 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -18,6 +18,7 @@
 #include <linux/of.h>
 #include <linux/rbtree.h>
 #include <linux/sched.h>
+#include <linux/delay.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -34,7 +35,7 @@
 
 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 			       unsigned int mask, unsigned int val,
-			       bool *change);
+			       bool *change, bool force_write);
 
 static int _regmap_bus_reg_read(void *context, unsigned int reg,
 				unsigned int *val);
@@ -93,6 +94,9 @@
 
 bool regmap_readable(struct regmap *map, unsigned int reg)
 {
+	if (!map->reg_read)
+		return false;
+
 	if (map->max_register && reg > map->max_register)
 		return false;
 
@@ -515,22 +519,12 @@
 }
 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
 
-/**
- * regmap_init(): Initialise register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @bus_context: Data passed to bus-specific callbacks
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.  This function should generally not be called
- * directly, it should be called by bus-specific init functions.
- */
-struct regmap *regmap_init(struct device *dev,
-			   const struct regmap_bus *bus,
-			   void *bus_context,
-			   const struct regmap_config *config)
+struct regmap *__regmap_init(struct device *dev,
+			     const struct regmap_bus *bus,
+			     void *bus_context,
+			     const struct regmap_config *config,
+			     struct lock_class_key *lock_key,
+			     const char *lock_name)
 {
 	struct regmap *map;
 	int ret = -EINVAL;
@@ -556,10 +550,14 @@
 			spin_lock_init(&map->spinlock);
 			map->lock = regmap_lock_spinlock;
 			map->unlock = regmap_unlock_spinlock;
+			lockdep_set_class_and_name(&map->spinlock,
+						   lock_key, lock_name);
 		} else {
 			mutex_init(&map->mutex);
 			map->lock = regmap_lock_mutex;
 			map->unlock = regmap_unlock_mutex;
+			lockdep_set_class_and_name(&map->mutex,
+						   lock_key, lock_name);
 		}
 		map->lock_arg = map;
 	}
@@ -573,8 +571,13 @@
 		map->reg_stride = config->reg_stride;
 	else
 		map->reg_stride = 1;
-	map->use_single_rw = config->use_single_rw;
-	map->can_multi_write = config->can_multi_write;
+	map->use_single_read = config->use_single_rw || !bus || !bus->read;
+	map->use_single_write = config->use_single_rw || !bus || !bus->write;
+	map->can_multi_write = config->can_multi_write && bus && bus->write;
+	if (bus) {
+		map->max_raw_read = bus->max_raw_read;
+		map->max_raw_write = bus->max_raw_write;
+	}
 	map->dev = dev;
 	map->bus = bus;
 	map->bus_context = bus_context;
@@ -763,7 +766,7 @@
 		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
 		    (val_endian != REGMAP_ENDIAN_BIG))
 			goto err_map;
-		map->use_single_rw = true;
+		map->use_single_write = true;
 	}
 
 	if (!map->format.format_write &&
@@ -899,30 +902,19 @@
 err:
 	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(regmap_init);
+EXPORT_SYMBOL_GPL(__regmap_init);
 
 static void devm_regmap_release(struct device *dev, void *res)
 {
 	regmap_exit(*(struct regmap **)res);
 }
 
-/**
- * devm_regmap_init(): Initialise managed register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @bus_context: Data passed to bus-specific callbacks
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap.  This function should generally not be called
- * directly, it should be called by bus-specific init functions.  The
- * map will be automatically freed by the device management code.
- */
-struct regmap *devm_regmap_init(struct device *dev,
-				const struct regmap_bus *bus,
-				void *bus_context,
-				const struct regmap_config *config)
+struct regmap *__devm_regmap_init(struct device *dev,
+				  const struct regmap_bus *bus,
+				  void *bus_context,
+				  const struct regmap_config *config,
+				  struct lock_class_key *lock_key,
+				  const char *lock_name)
 {
 	struct regmap **ptr, *regmap;
 
@@ -930,7 +922,8 @@
 	if (!ptr)
 		return ERR_PTR(-ENOMEM);
 
-	regmap = regmap_init(dev, bus, bus_context, config);
+	regmap = __regmap_init(dev, bus, bus_context, config,
+			       lock_key, lock_name);
 	if (!IS_ERR(regmap)) {
 		*ptr = regmap;
 		devres_add(dev, ptr);
@@ -940,7 +933,7 @@
 
 	return regmap;
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init);
+EXPORT_SYMBOL_GPL(__devm_regmap_init);
 
 static void regmap_field_init(struct regmap_field *rm_field,
 	struct regmap *regmap, struct reg_field reg_field)
@@ -1178,7 +1171,7 @@
 		ret = _regmap_update_bits(map, range->selector_reg,
 					  range->selector_mask,
 					  win_page << range->selector_shift,
-					  &page_chg);
+					  &page_chg, false);
 
 		map->work_buf = orig_work_buf;
 
@@ -1382,10 +1375,33 @@
  */
 bool regmap_can_raw_write(struct regmap *map)
 {
-	return map->bus && map->format.format_val && map->format.format_reg;
+	return map->bus && map->bus->write && map->format.format_val &&
+		map->format.format_reg;
 }
 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
 
+/**
+ * regmap_get_raw_read_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_read_max(struct regmap *map)
+{
+	return map->max_raw_read;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
+
+/**
+ * regmap_get_raw_write_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_write_max(struct regmap *map)
+{
+	return map->max_raw_write;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
+
 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
 				       unsigned int val)
 {
@@ -1555,6 +1571,8 @@
 		return -EINVAL;
 	if (val_len % map->format.val_bytes)
 		return -EINVAL;
+	if (map->max_raw_write && map->max_raw_write > val_len)
+		return -E2BIG;
 
 	map->lock(map->lock_arg);
 
@@ -1624,6 +1642,18 @@
 }
 EXPORT_SYMBOL_GPL(regmap_fields_write);
 
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+			unsigned int val)
+{
+	if (id >= field->id_size)
+		return -EINVAL;
+
+	return regmap_write_bits(field->regmap,
+				  field->reg + (field->id_offset * id),
+				  field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_force_write);
+
 /**
  * regmap_fields_update_bits():	Perform a read/modify/write cycle
  *                              on the register field
@@ -1669,6 +1699,7 @@
 {
 	int ret = 0, i;
 	size_t val_bytes = map->format.val_bytes;
+	size_t total_size = val_bytes * val_count;
 
 	if (map->bus && !map->format.parse_inplace)
 		return -EINVAL;
@@ -1677,9 +1708,15 @@
 
 	/*
 	 * Some devices don't support bulk write, for
-	 * them we have a series of single write operations.
+	 * them we have a series of single write operations in the first two if
+	 * blocks.
+	 *
+	 * The first if block is used for memory mapped io. It does not allow
+	 * val_bytes of 3 for example.
+	 * The second one is used for busses which do not have this limitation
+	 * and can write arbitrary value lengths.
 	 */
-	if (!map->bus || map->use_single_rw) {
+	if (!map->bus) {
 		map->lock(map->lock_arg);
 		for (i = 0; i < val_count; i++) {
 			unsigned int ival;
@@ -1711,6 +1748,38 @@
 		}
 out:
 		map->unlock(map->lock_arg);
+	} else if (map->use_single_write ||
+		   (map->max_raw_write && map->max_raw_write < total_size)) {
+		int chunk_stride = map->reg_stride;
+		size_t chunk_size = val_bytes;
+		size_t chunk_count = val_count;
+
+		if (!map->use_single_write) {
+			chunk_size = map->max_raw_write;
+			if (chunk_size % val_bytes)
+				chunk_size -= chunk_size % val_bytes;
+			chunk_count = total_size / chunk_size;
+			chunk_stride *= chunk_size / val_bytes;
+		}
+
+		map->lock(map->lock_arg);
+		/* Write as many bytes as possible with chunk_size */
+		for (i = 0; i < chunk_count; i++) {
+			ret = _regmap_raw_write(map,
+						reg + (i * chunk_stride),
+						val + (i * chunk_size),
+						chunk_size);
+			if (ret)
+				break;
+		}
+
+		/* Write remaining bytes */
+		if (!ret && chunk_size * i < total_size) {
+			ret = _regmap_raw_write(map, reg + (i * chunk_stride),
+						val + (i * chunk_size),
+						total_size - i * chunk_size);
+		}
+		map->unlock(map->lock_arg);
 	} else {
 		void *wval;
 
@@ -1740,10 +1809,10 @@
  *
  * the (register,newvalue) pairs in regs have not been formatted, but
  * they are all in the same page and have been changed to being page
- * relative. The page register has been written if that was neccessary.
+ * relative. The page register has been written if that was necessary.
  */
 static int _regmap_raw_multi_reg_write(struct regmap *map,
-				       const struct reg_default *regs,
+				       const struct reg_sequence *regs,
 				       size_t num_regs)
 {
 	int ret;
@@ -1768,8 +1837,8 @@
 	u8 = buf;
 
 	for (i = 0; i < num_regs; i++) {
-		int reg = regs[i].reg;
-		int val = regs[i].def;
+		unsigned int reg = regs[i].reg;
+		unsigned int val = regs[i].def;
 		trace_regmap_hw_write_start(map, reg, 1);
 		map->format.format_reg(u8, reg, map->reg_shift);
 		u8 += reg_bytes + pad_bytes;
@@ -1800,17 +1869,19 @@
 }
 
 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
-					       struct reg_default *regs,
+					       struct reg_sequence *regs,
 					       size_t num_regs)
 {
 	int ret;
 	int i, n;
-	struct reg_default *base;
+	struct reg_sequence *base;
 	unsigned int this_page = 0;
+	unsigned int page_change = 0;
 	/*
 	 * the set of registers are not neccessarily in order, but
 	 * since the order of write must be preserved this algorithm
-	 * chops the set each time the page changes
+	 * chops the set each time the page changes. This also applies
+	 * if there is a delay required at any point in the sequence.
 	 */
 	base = regs;
 	for (i = 0, n = 0; i < num_regs; i++, n++) {
@@ -1826,16 +1897,48 @@
 				this_page = win_page;
 			if (win_page != this_page) {
 				this_page = win_page;
+				page_change = 1;
+			}
+		}
+
+		/* If we have both a page change and a delay make sure to
+		 * write the regs and apply the delay before we change the
+		 * page.
+		 */
+
+		if (page_change || regs[i].delay_us) {
+
+				/* For situations where the first write requires
+				 * a delay we need to make sure we don't call
+				 * raw_multi_reg_write with n=0
+				 * This can't occur with page breaks as we
+				 * never write on the first iteration
+				 */
+				if (regs[i].delay_us && i == 0)
+					n = 1;
+
 				ret = _regmap_raw_multi_reg_write(map, base, n);
 				if (ret != 0)
 					return ret;
+
+				if (regs[i].delay_us)
+					udelay(regs[i].delay_us);
+
 				base += n;
 				n = 0;
-			}
-			ret = _regmap_select_page(map, &base[n].reg, range, 1);
-			if (ret != 0)
-				return ret;
+
+				if (page_change) {
+					ret = _regmap_select_page(map,
+								  &base[n].reg,
+								  range, 1);
+					if (ret != 0)
+						return ret;
+
+					page_change = 0;
+				}
+
 		}
+
 	}
 	if (n > 0)
 		return _regmap_raw_multi_reg_write(map, base, n);
@@ -1843,7 +1946,7 @@
 }
 
 static int _regmap_multi_reg_write(struct regmap *map,
-				   const struct reg_default *regs,
+				   const struct reg_sequence *regs,
 				   size_t num_regs)
 {
 	int i;
@@ -1854,6 +1957,9 @@
 			ret = _regmap_write(map, regs[i].reg, regs[i].def);
 			if (ret != 0)
 				return ret;
+
+			if (regs[i].delay_us)
+				udelay(regs[i].delay_us);
 		}
 		return 0;
 	}
@@ -1893,10 +1999,14 @@
 	for (i = 0; i < num_regs; i++) {
 		unsigned int reg = regs[i].reg;
 		struct regmap_range_node *range;
+
+		/* Coalesce all the writes between a page break or a delay
+		 * in a sequence
+		 */
 		range = _regmap_range_lookup(map, reg);
-		if (range) {
-			size_t len = sizeof(struct reg_default)*num_regs;
-			struct reg_default *base = kmemdup(regs, len,
+		if (range || regs[i].delay_us) {
+			size_t len = sizeof(struct reg_sequence)*num_regs;
+			struct reg_sequence *base = kmemdup(regs, len,
 							   GFP_KERNEL);
 			if (!base)
 				return -ENOMEM;
@@ -1929,7 +2039,7 @@
  * A value of zero will be returned on success, a negative errno will be
  * returned in error cases.
  */
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
 			   int num_regs)
 {
 	int ret;
@@ -1962,7 +2072,7 @@
  * be returned in error cases.
  */
 int regmap_multi_reg_write_bypassed(struct regmap *map,
-				    const struct reg_default *regs,
+				    const struct reg_sequence *regs,
 				    int num_regs)
 {
 	int ret;
@@ -2050,7 +2160,7 @@
 
 	/*
 	 * Some buses or devices flag reads by setting the high bits in the
-	 * register addresss; since it's always the high bits for all
+	 * register address; since it's always the high bits for all
 	 * current formats we can do this here rather than in
 	 * formatting.  This may break if we get interesting formats.
 	 */
@@ -2097,8 +2207,6 @@
 	int ret;
 	void *context = _regmap_map_get_context(map);
 
-	WARN_ON(!map->reg_read);
-
 	if (!map->cache_bypass) {
 		ret = regcache_read(map, reg, val);
 		if (ret == 0)
@@ -2179,11 +2287,22 @@
 		return -EINVAL;
 	if (reg % map->reg_stride)
 		return -EINVAL;
+	if (val_count == 0)
+		return -EINVAL;
 
 	map->lock(map->lock_arg);
 
 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
 	    map->cache_type == REGCACHE_NONE) {
+		if (!map->bus->read) {
+			ret = -ENOTSUPP;
+			goto out;
+		}
+		if (map->max_raw_read && map->max_raw_read < val_len) {
+			ret = -E2BIG;
+			goto out;
+		}
+
 		/* Physical block read if there's no cache involved */
 		ret = _regmap_raw_read(map, reg, val, val_len);
 
@@ -2292,20 +2411,51 @@
 		 * Some devices does not support bulk read, for
 		 * them we have a series of single read operations.
 		 */
-		if (map->use_single_rw) {
-			for (i = 0; i < val_count; i++) {
-				ret = regmap_raw_read(map,
-						reg + (i * map->reg_stride),
-						val + (i * val_bytes),
-						val_bytes);
-				if (ret != 0)
-					return ret;
-			}
-		} else {
+		size_t total_size = val_bytes * val_count;
+
+		if (!map->use_single_read &&
+		    (!map->max_raw_read || map->max_raw_read > total_size)) {
 			ret = regmap_raw_read(map, reg, val,
 					      val_bytes * val_count);
 			if (ret != 0)
 				return ret;
+		} else {
+			/*
+			 * Some devices do not support bulk read or do not
+			 * support large bulk reads, for them we have a series
+			 * of read operations.
+			 */
+			int chunk_stride = map->reg_stride;
+			size_t chunk_size = val_bytes;
+			size_t chunk_count = val_count;
+
+			if (!map->use_single_read) {
+				chunk_size = map->max_raw_read;
+				if (chunk_size % val_bytes)
+					chunk_size -= chunk_size % val_bytes;
+				chunk_count = total_size / chunk_size;
+				chunk_stride *= chunk_size / val_bytes;
+			}
+
+			/* Read bytes that fit into a multiple of chunk_size */
+			for (i = 0; i < chunk_count; i++) {
+				ret = regmap_raw_read(map,
+						      reg + (i * chunk_stride),
+						      val + (i * chunk_size),
+						      chunk_size);
+				if (ret != 0)
+					return ret;
+			}
+
+			/* Read remaining bytes */
+			if (chunk_size * i < total_size) {
+				ret = regmap_raw_read(map,
+						      reg + (i * chunk_stride),
+						      val + (i * chunk_size),
+						      total_size - i * chunk_size);
+				if (ret != 0)
+					return ret;
+			}
 		}
 
 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
@@ -2317,7 +2467,34 @@
 					  &ival);
 			if (ret != 0)
 				return ret;
-			map->format.format_val(val + (i * val_bytes), ival, 0);
+
+			if (map->format.format_val) {
+				map->format.format_val(val + (i * val_bytes), ival, 0);
+			} else {
+				/* Devices providing read and write
+				 * operations can use the bulk I/O
+				 * functions if they define a val_bytes,
+				 * we assume that the values are native
+				 * endian.
+				 */
+				u32 *u32 = val;
+				u16 *u16 = val;
+				u8 *u8 = val;
+
+				switch (map->format.val_bytes) {
+				case 4:
+					u32[i] = ival;
+					break;
+				case 2:
+					u16[i] = ival;
+					break;
+				case 1:
+					u8[i] = ival;
+					break;
+				default:
+					return -EINVAL;
+				}
+			}
 		}
 	}
 
@@ -2327,7 +2504,7 @@
 
 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 			       unsigned int mask, unsigned int val,
-			       bool *change)
+			       bool *change, bool force_write)
 {
 	int ret;
 	unsigned int tmp, orig;
@@ -2339,7 +2516,7 @@
 	tmp = orig & ~mask;
 	tmp |= val & mask;
 
-	if (tmp != orig) {
+	if (force_write || (tmp != orig)) {
 		ret = _regmap_write(map, reg, tmp);
 		if (change)
 			*change = true;
@@ -2367,7 +2544,7 @@
 	int ret;
 
 	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, NULL);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
 	map->unlock(map->lock_arg);
 
 	return ret;
@@ -2375,6 +2552,29 @@
 EXPORT_SYMBOL_GPL(regmap_update_bits);
 
 /**
+ * regmap_write_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+		      unsigned int mask, unsigned int val)
+{
+	int ret;
+
+	map->lock(map->lock_arg);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_bits);
+
+/**
  * regmap_update_bits_async: Perform a read/modify/write cycle on the register
  *                           map asynchronously
  *
@@ -2398,7 +2598,7 @@
 
 	map->async = true;
 
-	ret = _regmap_update_bits(map, reg, mask, val, NULL);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
 
 	map->async = false;
 
@@ -2427,7 +2627,7 @@
 	int ret;
 
 	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, change);
+	ret = _regmap_update_bits(map, reg, mask, val, change, false);
 	map->unlock(map->lock_arg);
 	return ret;
 }
@@ -2460,7 +2660,7 @@
 
 	map->async = true;
 
-	ret = _regmap_update_bits(map, reg, mask, val, change);
+	ret = _regmap_update_bits(map, reg, mask, val, change, false);
 
 	map->async = false;
 
@@ -2552,10 +2752,10 @@
  * The caller must ensure that this function cannot be called
  * concurrently with either itself or regcache_sync().
  */
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
 			  int num_regs)
 {
-	struct reg_default *p;
+	struct reg_sequence *p;
 	int ret;
 	bool bypass;
 
@@ -2564,7 +2764,7 @@
 		return 0;
 
 	p = krealloc(map->patch,
-		     sizeof(struct reg_default) * (map->patch_regs + num_regs),
+		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
 		     GFP_KERNEL);
 	if (p) {
 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f9ab745..b9794ae 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -374,7 +374,7 @@
 
 #ifdef CONFIG_BLK_DEV_RAM_DAX
 static long brd_direct_access(struct block_device *bdev, sector_t sector,
-			void **kaddr, unsigned long *pfn, long size)
+			void __pmem **kaddr, unsigned long *pfn)
 {
 	struct brd_device *brd = bdev->bd_disk->private_data;
 	struct page *page;
@@ -384,13 +384,9 @@
 	page = brd_insert_page(brd, sector);
 	if (!page)
 		return -ENOSPC;
-	*kaddr = page_address(page);
+	*kaddr = (void __pmem *)page_address(page);
 	*pfn = page_to_pfn(page);
 
-	/*
-	 * TODO: If size > PAGE_SIZE, we could look to see if the next page in
-	 * the file happens to be mapped to the next page of physical RAM.
-	 */
 	return PAGE_SIZE;
 }
 #else
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5f6b3be..1508353 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -37,6 +37,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/blkdev.h>
+#include <linux/blk-mq.h>
 #include <linux/hdreg.h>
 #include <linux/cdrom.h>
 #include <linux/module.h>
@@ -147,6 +148,7 @@
 	unsigned int feature_persistent:1;
 	unsigned int max_indirect_segments;
 	int is_ready;
+	struct blk_mq_tag_set tag_set;
 };
 
 static unsigned int nr_minors;
@@ -616,54 +618,41 @@
 		 !(info->feature_flush & REQ_FUA)));
 }
 
-/*
- * do_blkif_request
- *  read a block; request is in a request queue
- */
-static void do_blkif_request(struct request_queue *rq)
+static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
+			   const struct blk_mq_queue_data *qd)
 {
-	struct blkfront_info *info = NULL;
-	struct request *req;
-	int queued;
+	struct blkfront_info *info = qd->rq->rq_disk->private_data;
 
-	pr_debug("Entered do_blkif_request\n");
+	blk_mq_start_request(qd->rq);
+	spin_lock_irq(&info->io_lock);
+	if (RING_FULL(&info->ring))
+		goto out_busy;
 
-	queued = 0;
+	if (blkif_request_flush_invalid(qd->rq, info))
+		goto out_err;
 
-	while ((req = blk_peek_request(rq)) != NULL) {
-		info = req->rq_disk->private_data;
+	if (blkif_queue_request(qd->rq))
+		goto out_busy;
 
-		if (RING_FULL(&info->ring))
-			goto wait;
+	flush_requests(info);
+	spin_unlock_irq(&info->io_lock);
+	return BLK_MQ_RQ_QUEUE_OK;
 
-		blk_start_request(req);
+out_err:
+	spin_unlock_irq(&info->io_lock);
+	return BLK_MQ_RQ_QUEUE_ERROR;
 
-		if (blkif_request_flush_invalid(req, info)) {
-			__blk_end_request_all(req, -EOPNOTSUPP);
-			continue;
-		}
-
-		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
-			 "(%u/%u) [%s]\n",
-			 req, req->cmd, (unsigned long)blk_rq_pos(req),
-			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
-			 rq_data_dir(req) ? "write" : "read");
-
-		if (blkif_queue_request(req)) {
-			blk_requeue_request(rq, req);
-wait:
-			/* Avoid pointless unplugs. */
-			blk_stop_queue(rq);
-			break;
-		}
-
-		queued++;
-	}
-
-	if (queued != 0)
-		flush_requests(info);
+out_busy:
+	spin_unlock_irq(&info->io_lock);
+	blk_mq_stop_hw_queue(hctx);
+	return BLK_MQ_RQ_QUEUE_BUSY;
 }
 
+static struct blk_mq_ops blkfront_mq_ops = {
+	.queue_rq = blkif_queue_rq,
+	.map_queue = blk_mq_map_queue,
+};
+
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
 				unsigned int physical_sector_size,
 				unsigned int segments)
@@ -671,9 +660,22 @@
 	struct request_queue *rq;
 	struct blkfront_info *info = gd->private_data;
 
-	rq = blk_init_queue(do_blkif_request, &info->io_lock);
-	if (rq == NULL)
+	memset(&info->tag_set, 0, sizeof(info->tag_set));
+	info->tag_set.ops = &blkfront_mq_ops;
+	info->tag_set.nr_hw_queues = 1;
+	info->tag_set.queue_depth =  BLK_RING_SIZE(info);
+	info->tag_set.numa_node = NUMA_NO_NODE;
+	info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+	info->tag_set.cmd_size = 0;
+	info->tag_set.driver_data = info;
+
+	if (blk_mq_alloc_tag_set(&info->tag_set))
 		return -1;
+	rq = blk_mq_init_queue(&info->tag_set);
+	if (IS_ERR(rq)) {
+		blk_mq_free_tag_set(&info->tag_set);
+		return -1;
+	}
 
 	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
 
@@ -901,19 +903,15 @@
 static void xlvbd_release_gendisk(struct blkfront_info *info)
 {
 	unsigned int minor, nr_minors;
-	unsigned long flags;
 
 	if (info->rq == NULL)
 		return;
 
-	spin_lock_irqsave(&info->io_lock, flags);
-
 	/* No more blkif_request(). */
-	blk_stop_queue(info->rq);
+	blk_mq_stop_hw_queues(info->rq);
 
 	/* No more gnttab callback work. */
 	gnttab_cancel_free_callback(&info->callback);
-	spin_unlock_irqrestore(&info->io_lock, flags);
 
 	/* Flush gnttab callback work. Must be done with no locks held. */
 	flush_work(&info->work);
@@ -925,20 +923,18 @@
 	xlbd_release_minors(minor, nr_minors);
 
 	blk_cleanup_queue(info->rq);
+	blk_mq_free_tag_set(&info->tag_set);
 	info->rq = NULL;
 
 	put_disk(info->gd);
 	info->gd = NULL;
 }
 
+/* Must be called with io_lock holded */
 static void kick_pending_request_queues(struct blkfront_info *info)
 {
-	if (!RING_FULL(&info->ring)) {
-		/* Re-enable calldowns. */
-		blk_start_queue(info->rq);
-		/* Kick things off immediately. */
-		do_blkif_request(info->rq);
-	}
+	if (!RING_FULL(&info->ring))
+		blk_mq_start_stopped_hw_queues(info->rq, true);
 }
 
 static void blkif_restart_queue(struct work_struct *work)
@@ -963,7 +959,7 @@
 		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
 	/* No more blkif_request(). */
 	if (info->rq)
-		blk_stop_queue(info->rq);
+		blk_mq_stop_hw_queues(info->rq);
 
 	/* Remove all persistent grants */
 	if (!list_empty(&info->grants)) {
@@ -1146,7 +1142,6 @@
 	RING_IDX i, rp;
 	unsigned long flags;
 	struct blkfront_info *info = (struct blkfront_info *)dev_id;
-	int error;
 
 	spin_lock_irqsave(&info->io_lock, flags);
 
@@ -1187,37 +1182,37 @@
 			continue;
 		}
 
-		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+		req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
 		switch (bret->operation) {
 		case BLKIF_OP_DISCARD:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 				struct request_queue *rq = info->rq;
 				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
 					   info->gd->disk_name, op_name(bret->operation));
-				error = -EOPNOTSUPP;
+				req->errors = -EOPNOTSUPP;
 				info->feature_discard = 0;
 				info->feature_secdiscard = 0;
 				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
 				queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
 			}
-			__blk_end_request_all(req, error);
+			blk_mq_complete_request(req);
 			break;
 		case BLKIF_OP_FLUSH_DISKCACHE:
 		case BLKIF_OP_WRITE_BARRIER:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
 				       info->gd->disk_name, op_name(bret->operation));
-				error = -EOPNOTSUPP;
+				req->errors = -EOPNOTSUPP;
 			}
 			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
 				     info->shadow[id].req.u.rw.nr_segments == 0)) {
 				printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
 				       info->gd->disk_name, op_name(bret->operation));
-				error = -EOPNOTSUPP;
+				req->errors = -EOPNOTSUPP;
 			}
-			if (unlikely(error)) {
-				if (error == -EOPNOTSUPP)
-					error = 0;
+			if (unlikely(req->errors)) {
+				if (req->errors == -EOPNOTSUPP)
+					req->errors = 0;
 				info->feature_flush = 0;
 				xlvbd_flush(info);
 			}
@@ -1228,7 +1223,7 @@
 				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
 					"request: %x\n", bret->status);
 
-			__blk_end_request_all(req, error);
+			blk_mq_complete_request(req);
 			break;
 		default:
 			BUG();
@@ -1555,28 +1550,6 @@
 
 	kfree(copy);
 
-	/*
-	 * Empty the queue, this is important because we might have
-	 * requests in the queue with more segments than what we
-	 * can handle now.
-	 */
-	spin_lock_irq(&info->io_lock);
-	while ((req = blk_fetch_request(info->rq)) != NULL) {
-		if (req->cmd_flags &
-		    (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
-			list_add(&req->queuelist, &requests);
-			continue;
-		}
-		merge_bio.head = req->bio;
-		merge_bio.tail = req->biotail;
-		bio_list_merge(&bio_list, &merge_bio);
-		req->bio = NULL;
-		if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
-			pr_alert("diskcache flush request found!\n");
-		__blk_end_request_all(req, 0);
-	}
-	spin_unlock_irq(&info->io_lock);
-
 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
 	spin_lock_irq(&info->io_lock);
@@ -1591,9 +1564,10 @@
 		/* Requeue pending requests (flush or discard) */
 		list_del_init(&req->queuelist);
 		BUG_ON(req->nr_phys_segments > segs);
-		blk_requeue_request(info->rq, req);
+		blk_mq_requeue_request(req);
 	}
 	spin_unlock_irq(&info->io_lock);
+	blk_mq_kick_requeue_list(info->rq);
 
 	while ((bio = bio_list_pop(&bio_list)) != NULL) {
 		/* Traverse the list of pending bios and re-queue them */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9c01f5b..9fa15bb 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -388,7 +388,6 @@
 static ssize_t compact_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
-	unsigned long nr_migrated;
 	struct zram *zram = dev_to_zram(dev);
 	struct zram_meta *meta;
 
@@ -399,8 +398,7 @@
 	}
 
 	meta = zram->meta;
-	nr_migrated = zs_compact(meta->mem_pool);
-	atomic64_add(nr_migrated, &zram->stats.num_migrated);
+	zs_compact(meta->mem_pool);
 	up_read(&zram->init_lock);
 
 	return len;
@@ -428,26 +426,31 @@
 		struct device_attribute *attr, char *buf)
 {
 	struct zram *zram = dev_to_zram(dev);
+	struct zs_pool_stats pool_stats;
 	u64 orig_size, mem_used = 0;
 	long max_used;
 	ssize_t ret;
 
+	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
+
 	down_read(&zram->init_lock);
-	if (init_done(zram))
+	if (init_done(zram)) {
 		mem_used = zs_get_total_pages(zram->meta->mem_pool);
+		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
+	}
 
 	orig_size = atomic64_read(&zram->stats.pages_stored);
 	max_used = atomic_long_read(&zram->stats.max_used_pages);
 
 	ret = scnprintf(buf, PAGE_SIZE,
-			"%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
+			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
 			orig_size << PAGE_SHIFT,
 			(u64)atomic64_read(&zram->stats.compr_data_size),
 			mem_used << PAGE_SHIFT,
 			zram->limit_pages << PAGE_SHIFT,
 			max_used << PAGE_SHIFT,
 			(u64)atomic64_read(&zram->stats.zero_pages),
-			(u64)atomic64_read(&zram->stats.num_migrated));
+			pool_stats.pages_compacted);
 	up_read(&zram->init_lock);
 
 	return ret;
@@ -619,7 +622,7 @@
 		uncmem = user_mem;
 
 	if (!uncmem) {
-		pr_info("Unable to allocate temp memory\n");
+		pr_err("Unable to allocate temp memory\n");
 		ret = -ENOMEM;
 		goto out_cleanup;
 	}
@@ -716,7 +719,7 @@
 
 	handle = zs_malloc(meta->mem_pool, clen);
 	if (!handle) {
-		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
+		pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
 			index, clen);
 		ret = -ENOMEM;
 		goto out;
@@ -1036,7 +1039,7 @@
 
 	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
 	if (IS_ERR(comp)) {
-		pr_info("Cannot initialise %s compressing backend\n",
+		pr_err("Cannot initialise %s compressing backend\n",
 				zram->compressor);
 		err = PTR_ERR(comp);
 		goto out_free_meta;
@@ -1214,7 +1217,7 @@
 	/* gendisk structure */
 	zram->disk = alloc_disk(1);
 	if (!zram->disk) {
-		pr_warn("Error allocating disk structure for device %d\n",
+		pr_err("Error allocating disk structure for device %d\n",
 			device_id);
 		ret = -ENOMEM;
 		goto out_free_queue;
@@ -1263,7 +1266,8 @@
 	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
 				&zram_disk_attr_group);
 	if (ret < 0) {
-		pr_warn("Error creating sysfs group");
+		pr_err("Error creating sysfs group for device %d\n",
+				device_id);
 		goto out_free_disk;
 	}
 	strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
@@ -1403,13 +1407,13 @@
 
 	ret = class_register(&zram_control_class);
 	if (ret) {
-		pr_warn("Unable to register zram-control class\n");
+		pr_err("Unable to register zram-control class\n");
 		return ret;
 	}
 
 	zram_major = register_blkdev(0, "zram");
 	if (zram_major <= 0) {
-		pr_warn("Unable to get major number\n");
+		pr_err("Unable to get major number\n");
 		class_unregister(&zram_control_class);
 		return -EBUSY;
 	}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 6dbe2df..8e92339 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -78,7 +78,6 @@
 	atomic64_t compr_data_size;	/* compressed size of pages stored */
 	atomic64_t num_reads;	/* failed + successful */
 	atomic64_t num_writes;	/* --do-- */
-	atomic64_t num_migrated;	/* no. of migrated object */
 	atomic64_t failed_reads;	/* can happen when memory is too low */
 	atomic64_t failed_writes;	/* can happen when memory is too low */
 	atomic64_t invalid_io;	/* non-page-aligned I/O requests */
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index ab3bde1..1c543ef 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -332,6 +332,18 @@
 }
 
 /**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be overridden by platform
+ * code to pick a suitable value if none is configured by the bootloader.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+	return 0;
+}
+
+/**
  * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
  * @bus:	Pointer to bus information for current CPU.
  *		IS_ERR(bus) is checked, so no need for caller to check.
@@ -368,7 +380,7 @@
 	if (!bus->phys)
 		bus->phys = mips_cdmm_cur_base();
 	/* Otherwise, ask platform code for suggestions */
-	if (!bus->phys && mips_cdmm_phys_base)
+	if (!bus->phys)
 		bus->phys = mips_cdmm_phys_base();
 	/* Otherwise, copy what other CPUs have done */
 	if (!bus->phys)
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index a64763b..6575c0f 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -107,7 +107,7 @@
 	if (!res)
 		return ERR_PTR(-ENOMEM);
 
-	regmap = bridge->ops->regmap_init(dev, bridge->context);
+	regmap = (bridge->ops->regmap_init)(dev, bridge->context);
 	if (IS_ERR(regmap)) {
 		devres_free(res);
 		return regmap;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6dea3f..1341a94 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1408,8 +1408,8 @@
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-		   phys_addr_t *mappable_base, unsigned long *mappable_end)
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+		   phys_addr_t *mappable_base, u64 *mappable_end)
 {
 	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
 	*stolen_size = intel_private.stolen_size;
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 0df5bae..9040878 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -780,6 +780,7 @@
 	"aclk_cpu",
 	"aclk_peri",
 	"hclk_peri",
+	"pclk_pd_pmu",
 };
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/shmobile/clk-emev2.c
index 5b60beb..a918254 100644
--- a/drivers/clk/shmobile/clk-emev2.c
+++ b/drivers/clk/shmobile/clk-emev2.c
@@ -28,6 +28,8 @@
 #define USIBU1_RSTCTRL 0x0ac
 #define USIBU2_RSTCTRL 0x0b0
 #define USIBU3_RSTCTRL 0x0b4
+#define IIC0_RSTCTRL 0x0dc
+#define IIC1_RSTCTRL 0x0e0
 #define STI_RSTCTRL 0x124
 #define STI_CLKSEL 0x688
 
@@ -66,6 +68,10 @@
 	emev2_smu_write(2, USIBU1_RSTCTRL);
 	emev2_smu_write(2, USIBU2_RSTCTRL);
 	emev2_smu_write(2, USIBU3_RSTCTRL);
+
+	/* deassert reset for IIC0->IIC1 */
+	emev2_smu_write(1, IIC0_RSTCTRL);
+	emev2_smu_write(1, IIC1_RSTCTRL);
 }
 
 static void __init emev2_smu_clkdiv_init(struct device_node *np)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c03f04d8..a7726db 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -111,6 +111,10 @@
 	select CLKSRC_MMIO
 	select CLKSRC_OF
 
+config CLKSRC_PISTACHIO
+	bool
+	select CLKSRC_OF
+
 config CLKSRC_STM32
 	bool "Clocksource for STM32 SoCs" if !ARCH_STM32
 	depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
@@ -293,4 +297,12 @@
 	depends on ARM && CLKDEV_LOOKUP
 	select CLKSRC_MMIO
 
+config CLKSRC_ST_LPC
+	bool
+	depends on ARCH_STI
+	select CLKSRC_OF if OF
+	help
+	  Enable this option to use the Low Power controller timer
+	  as clocksource.
+
 endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f228354..5c00863 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -44,6 +44,7 @@
 obj-$(CONFIG_VF_PIT_TIMER)	+= vf_pit_timer.o
 obj-$(CONFIG_CLKSRC_QCOM)	+= qcom-timer.o
 obj-$(CONFIG_MTK_TIMER)		+= mtk_timer.o
+obj-$(CONFIG_CLKSRC_PISTACHIO)	+= time-pistachio.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)		+= arm_global_timer.o
@@ -60,3 +61,4 @@
 obj-$(CONFIG_H8300)			+= h8300_timer8.o
 obj-$(CONFIG_H8300_TMR16)		+= h8300_timer16.o
 obj-$(CONFIG_H8300_TPU)			+= h8300_tpu.o
+obj-$(CONFIG_CLKSRC_ST_LPC)		+= clksrc_st_lpc.o
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
new file mode 100644
index 0000000..65ec467
--- /dev/null
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -0,0 +1,131 @@
+/*
+ * Clocksource using the Low Power Timer found in the Low Power Controller (LPC)
+ *
+ * Copyright (C) 2015 STMicroelectronics – All Rights Reserved
+ *
+ * Author(s): Francesco Virlinzi <francesco.virlinzi@st.com>
+ *	      Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mfd/st-lpc.h>
+
+/* Low Power Timer */
+#define LPC_LPT_LSB_OFF		0x400
+#define LPC_LPT_MSB_OFF		0x404
+#define LPC_LPT_START_OFF	0x408
+
+static struct st_clksrc_ddata {
+	struct clk		*clk;
+	void __iomem		*base;
+} ddata;
+
+static void __init st_clksrc_reset(void)
+{
+	writel_relaxed(0, ddata.base + LPC_LPT_START_OFF);
+	writel_relaxed(0, ddata.base + LPC_LPT_MSB_OFF);
+	writel_relaxed(0, ddata.base + LPC_LPT_LSB_OFF);
+	writel_relaxed(1, ddata.base + LPC_LPT_START_OFF);
+}
+
+static u64 notrace st_clksrc_sched_clock_read(void)
+{
+	return (u64)readl_relaxed(ddata.base + LPC_LPT_LSB_OFF);
+}
+
+static int __init st_clksrc_init(void)
+{
+	unsigned long rate;
+	int ret;
+
+	st_clksrc_reset();
+
+	rate = clk_get_rate(ddata.clk);
+
+	sched_clock_register(st_clksrc_sched_clock_read, 32, rate);
+
+	ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
+				    "clksrc-st-lpc", rate, 300, 32,
+				    clocksource_mmio_readl_up);
+	if (ret) {
+		pr_err("clksrc-st-lpc: Failed to register clocksource\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init st_clksrc_setup_clk(struct device_node *np)
+{
+	struct clk *clk;
+
+	clk = of_clk_get(np, 0);
+	if (IS_ERR(clk)) {
+		pr_err("clksrc-st-lpc: Failed to get LPC clock\n");
+		return PTR_ERR(clk);
+	}
+
+	if (clk_prepare_enable(clk)) {
+		pr_err("clksrc-st-lpc: Failed to enable LPC clock\n");
+		return -EINVAL;
+	}
+
+	if (!clk_get_rate(clk)) {
+		pr_err("clksrc-st-lpc: Failed to get LPC clock rate\n");
+		clk_disable_unprepare(clk);
+		return -EINVAL;
+	}
+
+	ddata.clk = clk;
+
+	return 0;
+}
+
+static void __init st_clksrc_of_register(struct device_node *np)
+{
+	int ret;
+	uint32_t mode;
+
+	ret = of_property_read_u32(np, "st,lpc-mode", &mode);
+	if (ret) {
+		pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
+		return;
+	}
+
+	/* LPC can either run as a Clocksource or in RTC or WDT mode */
+	if (mode != ST_LPC_MODE_CLKSRC)
+		return;
+
+	ddata.base = of_iomap(np, 0);
+	if (!ddata.base) {
+		pr_err("clksrc-st-lpc: Unable to map iomem\n");
+		return;
+	}
+
+	if (st_clksrc_setup_clk(np)) {
+		iounmap(ddata.base);
+		return;
+	}
+
+	if (st_clksrc_init()) {
+		clk_disable_unprepare(ddata.clk);
+		clk_put(ddata.clk);
+		iounmap(ddata.base);
+		return;
+	}
+
+	pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
+		clk_get_rate(ddata.clk));
+}
+CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index c3810b6..02a1945 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -72,6 +72,13 @@
 	disable_percpu_irq(gic_timer_irq);
 }
 
+static void gic_update_frequency(void *data)
+{
+	unsigned long rate = (unsigned long)data;
+
+	clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
+}
+
 static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
 				void *data)
 {
@@ -87,18 +94,40 @@
 	return NOTIFY_OK;
 }
 
+static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
+			    void *data)
+{
+	struct clk_notifier_data *cnd = data;
+
+	if (action == POST_RATE_CHANGE)
+		on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
+
+	return NOTIFY_OK;
+}
+
+
 static struct notifier_block gic_cpu_nb = {
 	.notifier_call = gic_cpu_notifier,
 };
 
+static struct notifier_block gic_clk_nb = {
+	.notifier_call = gic_clk_notifier,
+};
+
 static int gic_clockevent_init(void)
 {
+	int ret;
+
 	if (!cpu_has_counter || !gic_frequency)
 		return -ENXIO;
 
-	setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	if (ret < 0)
+		return ret;
 
-	register_cpu_notifier(&gic_cpu_nb);
+	ret = register_cpu_notifier(&gic_cpu_nb);
+	if (ret < 0)
+		pr_warn("GIC: Unable to register CPU notifier\n");
 
 	gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
 
@@ -118,18 +147,17 @@
 
 static void __init __gic_clocksource_init(void)
 {
+	int ret;
+
 	/* Set clocksource mask. */
 	gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width());
 
 	/* Calculate a somewhat reasonable rating value. */
 	gic_clocksource.rating = 200 + gic_frequency / 10000000;
 
-	clocksource_register_hz(&gic_clocksource, gic_frequency);
-
-	gic_clockevent_init();
-
-	/* And finally start the counter */
-	gic_start_count();
+	ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
+	if (ret < 0)
+		pr_warn("GIC: Unable to register clocksource\n");
 }
 
 void __init gic_clocksource_init(unsigned int frequency)
@@ -139,11 +167,16 @@
 		GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE);
 
 	__gic_clocksource_init();
+	gic_clockevent_init();
+
+	/* And finally start the counter */
+	gic_start_count();
 }
 
 static void __init gic_clocksource_of_init(struct device_node *node)
 {
 	struct clk *clk;
+	int ret;
 
 	if (WARN_ON(!gic_present || !node->parent ||
 		    !of_device_is_compatible(node->parent, "mti,gic")))
@@ -151,8 +184,13 @@
 
 	clk = of_clk_get(node, 0);
 	if (!IS_ERR(clk)) {
+		if (clk_prepare_enable(clk) < 0) {
+			pr_err("GIC failed to enable clock\n");
+			clk_put(clk);
+			return;
+		}
+
 		gic_frequency = clk_get_rate(clk);
-		clk_put(clk);
 	} else if (of_property_read_u32(node, "clock-frequency",
 					&gic_frequency)) {
 		pr_err("GIC frequency not specified.\n");
@@ -165,6 +203,15 @@
 	}
 
 	__gic_clocksource_init();
+
+	ret = gic_clockevent_init();
+	if (!ret && !IS_ERR(clk)) {
+		if (clk_notifier_register(clk, &gic_clk_nb) < 0)
+			pr_warn("GIC: Unable to register clock notifier\n");
+	}
+
+	/* And finally start the counter */
+	gic_start_count();
 }
 CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
 		       gic_clocksource_of_init);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
new file mode 100644
index 0000000..18d4266
--- /dev/null
+++ b/drivers/clocksource/time-pistachio.c
@@ -0,0 +1,217 @@
+/*
+ * Pistachio clocksource based on general-purpose timers
+ *
+ * Copyright (C) 2015 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched_clock.h>
+#include <linux/time.h>
+
+/* Top level reg */
+#define CR_TIMER_CTRL_CFG		0x00
+#define TIMER_ME_GLOBAL			BIT(0)
+#define CR_TIMER_REV			0x10
+
+/* Timer specific registers */
+#define TIMER_CFG			0x20
+#define TIMER_ME_LOCAL			BIT(0)
+#define TIMER_RELOAD_VALUE		0x24
+#define TIMER_CURRENT_VALUE		0x28
+#define TIMER_CURRENT_OVERFLOW_VALUE	0x2C
+#define TIMER_IRQ_STATUS		0x30
+#define TIMER_IRQ_CLEAR			0x34
+#define TIMER_IRQ_MASK			0x38
+
+#define PERIP_TIMER_CONTROL		0x90
+
+/* Timer specific configuration Values */
+#define RELOAD_VALUE			0xffffffff
+
+struct pistachio_clocksource {
+	void __iomem *base;
+	raw_spinlock_t lock;
+	struct clocksource cs;
+};
+
+static struct pistachio_clocksource pcs_gpt;
+
+#define to_pistachio_clocksource(cs)	\
+	container_of(cs, struct pistachio_clocksource, cs)
+
+static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id)
+{
+	return readl(base + 0x20 * gpt_id + offset);
+}
+
+static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
+		u32 gpt_id)
+{
+	writel(value, base + 0x20 * gpt_id + offset);
+}
+
+static cycle_t pistachio_clocksource_read_cycles(struct clocksource *cs)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+	u32 counter, overflw;
+	unsigned long flags;
+
+	/*
+	 * The counter value is only refreshed after the overflow value is read.
+	 * And they must be read in strict order, hence raw spin lock added.
+	 */
+
+	raw_spin_lock_irqsave(&pcs->lock, flags);
+	overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
+	counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
+	raw_spin_unlock_irqrestore(&pcs->lock, flags);
+
+	return ~(cycle_t)counter;
+}
+
+static u64 notrace pistachio_read_sched_clock(void)
+{
+	return pistachio_clocksource_read_cycles(&pcs_gpt.cs);
+}
+
+static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx,
+			int enable)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+	u32 val;
+
+	val = gpt_readl(pcs->base, TIMER_CFG, timeridx);
+	if (enable)
+		val |= TIMER_ME_LOCAL;
+	else
+		val &= ~TIMER_ME_LOCAL;
+
+	gpt_writel(pcs->base, val, TIMER_CFG, timeridx);
+}
+
+static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+
+	/* Disable GPT local before loading reload value */
+	pistachio_clksrc_set_mode(cs, timeridx, false);
+	gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx);
+	pistachio_clksrc_set_mode(cs, timeridx, true);
+}
+
+static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx)
+{
+	/* Disable GPT local */
+	pistachio_clksrc_set_mode(cs, timeridx, false);
+}
+
+static int pistachio_clocksource_enable(struct clocksource *cs)
+{
+	pistachio_clksrc_enable(cs, 0);
+	return 0;
+}
+
+static void pistachio_clocksource_disable(struct clocksource *cs)
+{
+	pistachio_clksrc_disable(cs, 0);
+}
+
+/* Desirable clock source for pistachio platform */
+static struct pistachio_clocksource pcs_gpt = {
+	.cs =	{
+		.name		= "gptimer",
+		.rating		= 300,
+		.enable		= pistachio_clocksource_enable,
+		.disable	= pistachio_clocksource_disable,
+		.read		= pistachio_clocksource_read_cycles,
+		.mask		= CLOCKSOURCE_MASK(32),
+		.flags		= CLOCK_SOURCE_IS_CONTINUOUS |
+				  CLOCK_SOURCE_SUSPEND_NONSTOP,
+		},
+};
+
+static void __init pistachio_clksrc_of_init(struct device_node *node)
+{
+	struct clk *sys_clk, *fast_clk;
+	struct regmap *periph_regs;
+	unsigned long rate;
+	int ret;
+
+	pcs_gpt.base = of_iomap(node, 0);
+	if (!pcs_gpt.base) {
+		pr_err("cannot iomap\n");
+		return;
+	}
+
+	periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
+	if (IS_ERR(periph_regs)) {
+		pr_err("cannot get peripheral regmap (%lu)\n",
+		       PTR_ERR(periph_regs));
+		return;
+	}
+
+	/* Switch to using the fast counter clock */
+	ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
+				 0xf, 0x0);
+	if (ret)
+		return;
+
+	sys_clk = of_clk_get_by_name(node, "sys");
+	if (IS_ERR(sys_clk)) {
+		pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk));
+		return;
+	}
+
+	fast_clk = of_clk_get_by_name(node, "fast");
+	if (IS_ERR(fast_clk)) {
+		pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
+		return;
+	}
+
+	ret = clk_prepare_enable(sys_clk);
+	if (ret < 0) {
+		pr_err("failed to enable clock (%d)\n", ret);
+		return;
+	}
+
+	ret = clk_prepare_enable(fast_clk);
+	if (ret < 0) {
+		pr_err("failed to enable clock (%d)\n", ret);
+		clk_disable_unprepare(sys_clk);
+		return;
+	}
+
+	rate = clk_get_rate(fast_clk);
+
+	/* Disable irq's for clocksource usage */
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+
+	/* Enable timer block */
+	writel(TIMER_ME_GLOBAL, pcs_gpt.base);
+
+	raw_spin_lock_init(&pcs_gpt.lock);
+	sched_clock_register(pistachio_read_sched_clock, 32, rate);
+	clocksource_register_hz(&pcs_gpt.cs, rate);
+}
+CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
+		       pistachio_clksrc_of_init);
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index c13feec..ea9728f 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -25,16 +25,21 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/platform_device.h>
+#include <linux/psci.h>
+
 #include <asm/cpuidle.h>
 #include <asm/suspend.h>
-#include <asm/psci.h>
+
+#include <uapi/linux/psci.h>
+
+#define CALXEDA_IDLE_PARAM \
+	((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
+	 (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
+	 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
 
 static int calxeda_idle_finish(unsigned long val)
 {
-	const struct psci_power_state ps = {
-		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-	};
-	return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
+	return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume));
 }
 
 static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index bdbbe5b..b458475 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,27 +33,29 @@
 
 comment "DMA Devices"
 
-config INTEL_MIC_X100_DMA
-	tristate "Intel MIC X100 DMA Driver"
-	depends on 64BIT && X86 && INTEL_MIC_BUS
-	select DMA_ENGINE
-	help
-	  This enables DMA support for the Intel Many Integrated Core
-	  (MIC) family of PCIe form factor coprocessor X100 devices that
-	  run a 64 bit Linux OS. This driver will be used by both MIC
-	  host and card drivers.
-
-	  If you are building host kernel with a MIC device or a card
-	  kernel for a MIC device, then say M (recommended) or Y, else
-	  say N. If unsure say N.
-
-	  More information about the Intel MIC family as well as the Linux
-	  OS and tools for MIC to use with this driver are available from
-	  <http://software.intel.com/en-us/mic-developer>.
-
+#core
 config ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	bool
 
+config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+	bool
+
+config DMA_ENGINE
+	bool
+
+config DMA_VIRTUAL_CHANNELS
+	tristate
+
+config DMA_ACPI
+	def_bool y
+	depends on ACPI
+
+config DMA_OF
+	def_bool y
+	depends on OF
+	select DMA_ENGINE
+
+#devices
 config AMBA_PL08X
 	bool "ARM PrimeCell PL080 or PL081 support"
 	depends on ARM_AMBA
@@ -63,9 +65,181 @@
 	  Platform has a PL08x DMAC device
 	  which can provide DMA engine support
 
+config AMCC_PPC440SPE_ADMA
+	tristate "AMCC PPC440SPe ADMA support"
+	depends on 440SPe || 440SP
+	select DMA_ENGINE
+	select DMA_ENGINE_RAID
+	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	help
+	  Enable support for the AMCC PPC440SPe RAID engines.
+
+config AT_HDMAC
+	tristate "Atmel AHB DMA support"
+	depends on ARCH_AT91
+	select DMA_ENGINE
+	help
+	  Support the Atmel AHB DMA controller.
+
+config AT_XDMAC
+	tristate "Atmel XDMA support"
+	depends on ARCH_AT91
+	select DMA_ENGINE
+	help
+	  Support the Atmel XDMA controller.
+
+config AXI_DMAC
+	tristate "Analog Devices AXI-DMAC DMA support"
+	depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
+	  controller is often used in Analog Device's reference designs for FPGA
+	  platforms.
+
+config COH901318
+	bool "ST-Ericsson COH901318 DMA support"
+	select DMA_ENGINE
+	depends on ARCH_U300
+	help
+	  Enable support for ST-Ericsson COH 901 318 DMA.
+
+config DMA_BCM2835
+	tristate "BCM2835 DMA engine support"
+	depends on ARCH_BCM2835
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4740
+	tristate "JZ4740 DMA support"
+	depends on MACH_JZ4740
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4780
+	tristate "JZ4780 DMA support"
+	depends on MACH_JZ4780
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  This selects support for the DMA controller in Ingenic JZ4780 SoCs.
+	  If you have a board based on such a SoC and wish to use DMA for
+	  devices which can use the DMA controller, say Y or M here.
+
+config DMA_OMAP
+	tristate "OMAP DMA support"
+	depends on ARCH_OMAP
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	select TI_DMA_CROSSBAR if SOC_DRA7XX
+
+config DMA_SA11X0
+	tristate "SA-11x0 DMA support"
+	depends on ARCH_SA1100
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the DMA engine found on Intel StrongARM SA-1100 and
+	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
+	  devices.
+
+config DMA_SUN4I
+	tristate "Allwinner A10 DMA SoCs support"
+	depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
+	default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+	select DMA_ENGINE
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the DMA controller present in the sun4i,
+	  sun5i and sun7i Allwinner ARM SoCs.
+
+config DMA_SUN6I
+	tristate "Allwinner A31 SoCs DMA support"
+	depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+	depends on RESET_CONTROLLER
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the DMA engine first found in Allwinner A31 SoCs.
+
+config EP93XX_DMA
+	bool "Cirrus Logic EP93xx DMA support"
+	depends on ARCH_EP93XX
+	select DMA_ENGINE
+	help
+	  Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
+config FSL_DMA
+	tristate "Freescale Elo series DMA support"
+	depends on FSL_SOC
+	select DMA_ENGINE
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	---help---
+	  Enable support for the Freescale Elo series DMA controllers.
+	  The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
+	  EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
+	  some Txxx and Bxxx parts.
+
+config FSL_EDMA
+	tristate "Freescale eDMA engine support"
+	depends on OF
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the Freescale eDMA engine with programmable channel
+	  multiplexing capability for DMA request sources(slot).
+	  This module can be found on Freescale Vybrid and LS-1 SoCs.
+
+config FSL_RAID
+        tristate "Freescale RAID engine Support"
+        depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+        select DMA_ENGINE
+        select DMA_ENGINE_RAID
+        ---help---
+          Enable support for Freescale RAID Engine. RAID Engine is
+          available on some QorIQ SoCs (like P5020/P5040). It has
+          the capability to offload memcpy, xor and pq computation
+	  for raid5/6.
+
+config IMG_MDC_DMA
+	tristate "IMG MDC support"
+	depends on MIPS || COMPILE_TEST
+	depends on MFD_SYSCON
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the IMG multi-threaded DMA controller (MDC).
+
+config IMX_DMA
+	tristate "i.MX DMA support"
+	depends on ARCH_MXC
+	select DMA_ENGINE
+	help
+	  Support the i.MX DMA engine. This engine is integrated into
+	  Freescale i.MX1/21/27 chips.
+
+config IMX_SDMA
+	tristate "i.MX SDMA support"
+	depends on ARCH_MXC
+	select DMA_ENGINE
+	help
+	  Support the i.MX SDMA engine. This engine is integrated into
+	  Freescale i.MX25/31/35/51/53/6 chips.
+
+config IDMA64
+	tristate "Intel integrated DMA 64-bit support"
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable DMA support for Intel Low Power Subsystem such as found on
+	  Intel Skylake PCH.
+
 config INTEL_IOATDMA
 	tristate "Intel I/OAT DMA support"
-	depends on PCI && X86
+	depends on PCI && X86_64
 	select DMA_ENGINE
 	select DMA_ENGINE_RAID
 	select DCA
@@ -85,53 +259,69 @@
 	help
 	  Enable support for the Intel(R) IOP Series RAID engines.
 
-config IDMA64
-	tristate "Intel integrated DMA 64-bit support"
+config INTEL_MIC_X100_DMA
+	tristate "Intel MIC X100 DMA Driver"
+	depends on 64BIT && X86 && INTEL_MIC_BUS
+	select DMA_ENGINE
+	help
+	  This enables DMA support for the Intel Many Integrated Core
+	  (MIC) family of PCIe form factor coprocessor X100 devices that
+	  run a 64 bit Linux OS. This driver will be used by both MIC
+	  host and card drivers.
+
+	  If you are building host kernel with a MIC device or a card
+	  kernel for a MIC device, then say M (recommended) or Y, else
+	  say N. If unsure say N.
+
+	  More information about the Intel MIC family as well as the Linux
+	  OS and tools for MIC to use with this driver are available from
+	  <http://software.intel.com/en-us/mic-developer>.
+
+config K3_DMA
+	tristate "Hisilicon K3 DMA support"
+	depends on ARCH_HI3xxx
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 	help
-	  Enable DMA support for Intel Low Power Subsystem such as found on
-	  Intel Skylake PCH.
+	  Support the DMA engine for Hisilicon K3 platform
+	  devices.
 
-source "drivers/dma/dw/Kconfig"
+config LPC18XX_DMAMUX
+	bool "NXP LPC18xx/43xx DMA MUX for PL080"
+	depends on ARCH_LPC18XX || COMPILE_TEST
+	depends on OF && AMBA_PL08X
+	select MFD_SYSCON
+	help
+	  Enable support for DMA on NXP LPC18xx/43xx platforms
+	  with PL080 and multiplexed DMA request lines.
 
-config AT_HDMAC
-	tristate "Atmel AHB DMA support"
-	depends on ARCH_AT91
+config MMP_PDMA
+	bool "MMP PDMA support"
+	depends on (ARCH_MMP || ARCH_PXA)
 	select DMA_ENGINE
 	help
-	  Support the Atmel AHB DMA controller.
+	  Support the MMP PDMA engine for PXA and MMP platform.
 
-config AT_XDMAC
-	tristate "Atmel XDMA support"
-	depends on ARCH_AT91
+config MMP_TDMA
+	bool "MMP Two-Channel DMA support"
+	depends on ARCH_MMP
 	select DMA_ENGINE
+	select MMP_SRAM
 	help
-	  Support the Atmel XDMA controller.
+	  Support the MMP Two-Channel DMA engine.
+	  This engine used for MMP Audio DMA and pxa910 SQU.
+	  It needs sram driver under mach-mmp.
 
-config FSL_DMA
-	tristate "Freescale Elo series DMA support"
-	depends on FSL_SOC
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
 	select DMA_ENGINE
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	---help---
-	  Enable support for the Freescale Elo series DMA controllers.
-	  The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
-	  EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
-	  some Txxx and Bxxx parts.
-
-config FSL_RAID
-        tristate "Freescale RAID engine Support"
-        depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
-        select DMA_ENGINE
-        select DMA_ENGINE_RAID
-        ---help---
-          Enable support for Freescale RAID Engine. RAID Engine is
-          available on some QorIQ SoCs (like P5020/P5040). It has
-          the capability to offload memcpy, xor and pq computation
-	  for raid5/6.
-
-source "drivers/dma/hsu/Kconfig"
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+ 
+	  Say Y here if you enabled MMP ADMA, otherwise say N.
 
 config MPC512X_DMA
 	tristate "Freescale MPC512x built-in DMA engine support"
@@ -140,8 +330,6 @@
 	---help---
 	  Enable support for the Freescale MPC512x built-in DMA engine.
 
-source "drivers/dma/bestcomm/Kconfig"
-
 config MV_XOR
 	bool "Marvell XOR engine support"
 	depends on PLAT_ORION
@@ -151,6 +339,15 @@
 	---help---
 	  Enable support for the Marvell XOR engine.
 
+config MXS_DMA
+	bool "MXS DMA support"
+	depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
+	select STMP_DEVICE
+	select DMA_ENGINE
+	help
+	  Support the MXS DMA engine. This engine including APBH-DMA
+	  and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
+
 config MX3_IPU
 	bool "MX3x Image Processing Unit support"
 	depends on ARCH_MXC
@@ -170,6 +367,36 @@
 	  To avoid bloating the irq_desc[] array we allocate a sufficient
 	  number of IRQ slots and map them dynamically to specific sources.
 
+config NBPFAXI_DMA
+	tristate "Renesas Type-AXI NBPF DMA support"
+	select DMA_ENGINE
+	depends on ARM || COMPILE_TEST
+	help
+	  Support for "Type-AXI" NBPF DMA IPs from Renesas
+
+config PCH_DMA
+	tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
+	depends on PCI && (X86_32 || COMPILE_TEST)
+	select DMA_ENGINE
+	help
+	  Enable support for Intel EG20T PCH DMA engine.
+
+	  This driver also can be used for LAPIS Semiconductor IOH(Input/
+	  Output Hub), ML7213, ML7223 and ML7831.
+	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+	  for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+config PL330_DMA
+	tristate "DMA API Driver for PL330"
+	select DMA_ENGINE
+	depends on ARM_AMBA
+	help
+	  Select if your platform has one or more PL330 DMACs.
+	  You need to provide platform specific settings via
+	  platform_data for a dma-pl330 device.
+
 config PXA_DMA
 	bool "PXA DMA support"
 	depends on (ARCH_MMP || ARCH_PXA)
@@ -181,6 +408,41 @@
 	  16 to 32 channels for peripheral to memory or memory to memory
 	  transfers.
 
+config QCOM_BAM_DMA
+	tristate "QCOM BAM DMA support"
+	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	---help---
+	  Enable support for the QCOM BAM DMA controller.  This controller
+	  provides DMA capabilities for a variety of on-chip devices.
+
+config SIRF_DMA
+	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+	depends on ARCH_SIRF
+	select DMA_ENGINE
+	help
+	  Enable support for the CSR SiRFprimaII DMA engine.
+
+config STE_DMA40
+	bool "ST-Ericsson DMA40 support"
+	depends on ARCH_U8500
+	select DMA_ENGINE
+	help
+	  Support for ST-Ericsson DMA40 controller
+
+config S3C24XX_DMAC
+	tristate "Samsung S3C24XX DMA support"
+	depends on ARCH_S3C24XX
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the Samsung S3C24XX DMA controller driver. The
+	  DMA controller is having multiple DMA channels which can be
+	  configured for different peripherals like audio, UART, SPI.
+	  The DMA controller can transfer data from memory to peripheral,
+	  periphal to memory, periphal to periphal and memory to memory.
+
 config TXX9_DMAC
 	tristate "Toshiba TXx9 SoC DMA support"
 	depends on MACH_TX49XX || MACH_TX39XX
@@ -201,44 +463,6 @@
 	  This DMA controller transfers data from memory to peripheral fifo
 	  or vice versa. It does not support memory to memory data transfer.
 
-config S3C24XX_DMAC
-	tristate "Samsung S3C24XX DMA support"
-	depends on ARCH_S3C24XX
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support for the Samsung S3C24XX DMA controller driver. The
-	  DMA controller is having multiple DMA channels which can be
-	  configured for different peripherals like audio, UART, SPI.
-	  The DMA controller can transfer data from memory to peripheral,
-	  periphal to memory, periphal to periphal and memory to memory.
-
-source "drivers/dma/sh/Kconfig"
-
-config COH901318
-	bool "ST-Ericsson COH901318 DMA support"
-	select DMA_ENGINE
-	depends on ARCH_U300
-	help
-	  Enable support for ST-Ericsson COH 901 318 DMA.
-
-config STE_DMA40
-	bool "ST-Ericsson DMA40 support"
-	depends on ARCH_U8500
-	select DMA_ENGINE
-	help
-	  Support for ST-Ericsson DMA40 controller
-
-config AMCC_PPC440SPE_ADMA
-	tristate "AMCC PPC440SPe ADMA support"
-	depends on 440SPe || 440SP
-	select DMA_ENGINE
-	select DMA_ENGINE_RAID
-	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	help
-	  Enable support for the AMCC PPC440SPe RAID engines.
-
 config TIMB_DMA
 	tristate "Timberdale FPGA DMA support"
 	depends on MFD_TIMBERDALE
@@ -246,12 +470,16 @@
 	help
 	  Enable support for the Timberdale FPGA DMA engine.
 
-config SIRF_DMA
-	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
-	depends on ARCH_SIRF
+config TI_CPPI41
+	tristate "AM33xx CPPI41 DMA support"
+	depends on ARCH_OMAP
 	select DMA_ENGINE
 	help
-	  Enable support for the CSR SiRFprimaII DMA engine.
+	  The Communications Port Programming Interface (CPPI) 4.1 DMA engine
+	  is currently used by the USB driver on AM335x platforms.
+
+config TI_DMA_CROSSBAR
+	bool
 
 config TI_EDMA
 	bool "TI EDMA support"
@@ -264,160 +492,14 @@
 	  Enable support for the TI EDMA controller. This DMA
 	  engine is found on TI DaVinci and AM33xx parts.
 
-config TI_DMA_CROSSBAR
-	bool
-
-config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
-	bool
-
-config PL330_DMA
-	tristate "DMA API Driver for PL330"
+config XGENE_DMA
+	tristate "APM X-Gene DMA support"
+	depends on ARCH_XGENE || COMPILE_TEST
 	select DMA_ENGINE
-	depends on ARM_AMBA
+	select DMA_ENGINE_RAID
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	help
-	  Select if your platform has one or more PL330 DMACs.
-	  You need to provide platform specific settings via
-	  platform_data for a dma-pl330 device.
-
-config PCH_DMA
-	tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
-	depends on PCI && (X86_32 || COMPILE_TEST)
-	select DMA_ENGINE
-	help
-	  Enable support for Intel EG20T PCH DMA engine.
-
-	  This driver also can be used for LAPIS Semiconductor IOH(Input/
-	  Output Hub), ML7213, ML7223 and ML7831.
-	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
-	  for MP(Media Phone) use and ML7831 IOH is for general purpose use.
-	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
-	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-config IMX_SDMA
-	tristate "i.MX SDMA support"
-	depends on ARCH_MXC
-	select DMA_ENGINE
-	help
-	  Support the i.MX SDMA engine. This engine is integrated into
-	  Freescale i.MX25/31/35/51/53/6 chips.
-
-config IMX_DMA
-	tristate "i.MX DMA support"
-	depends on ARCH_MXC
-	select DMA_ENGINE
-	help
-	  Support the i.MX DMA engine. This engine is integrated into
-	  Freescale i.MX1/21/27 chips.
-
-config MXS_DMA
-	bool "MXS DMA support"
-	depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
-	select STMP_DEVICE
-	select DMA_ENGINE
-	help
-	  Support the MXS DMA engine. This engine including APBH-DMA
-	  and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
-
-config EP93XX_DMA
-	bool "Cirrus Logic EP93xx DMA support"
-	depends on ARCH_EP93XX
-	select DMA_ENGINE
-	help
-	  Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
-
-config DMA_SA11X0
-	tristate "SA-11x0 DMA support"
-	depends on ARCH_SA1100
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the DMA engine found on Intel StrongARM SA-1100 and
-	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
-	  devices.
-
-config MMP_TDMA
-	bool "MMP Two-Channel DMA support"
-	depends on ARCH_MMP
-	select DMA_ENGINE
-	select MMP_SRAM
-	help
-	  Support the MMP Two-Channel DMA engine.
-	  This engine used for MMP Audio DMA and pxa910 SQU.
-	  It needs sram driver under mach-mmp.
-
-	  Say Y here if you enabled MMP ADMA, otherwise say N.
-
-config DMA_OMAP
-	tristate "OMAP DMA support"
-	depends on ARCH_OMAP
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	select TI_DMA_CROSSBAR if SOC_DRA7XX
-
-config DMA_BCM2835
-	tristate "BCM2835 DMA engine support"
-	depends on ARCH_BCM2835
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-
-config TI_CPPI41
-	tristate "AM33xx CPPI41 DMA support"
-	depends on ARCH_OMAP
-	select DMA_ENGINE
-	help
-	  The Communications Port Programming Interface (CPPI) 4.1 DMA engine
-	  is currently used by the USB driver on AM335x platforms.
-
-config MMP_PDMA
-	bool "MMP PDMA support"
-	depends on (ARCH_MMP || ARCH_PXA)
-	select DMA_ENGINE
-	help
-	  Support the MMP PDMA engine for PXA and MMP platform.
-
-config DMA_JZ4740
-	tristate "JZ4740 DMA support"
-	depends on MACH_JZ4740
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-
-config DMA_JZ4780
-	tristate "JZ4780 DMA support"
-	depends on MACH_JZ4780
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  This selects support for the DMA controller in Ingenic JZ4780 SoCs.
-	  If you have a board based on such a SoC and wish to use DMA for
-	  devices which can use the DMA controller, say Y or M here.
-
-config K3_DMA
-	tristate "Hisilicon K3 DMA support"
-	depends on ARCH_HI3xxx
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the DMA engine for Hisilicon K3 platform
-	  devices.
-
-config MOXART_DMA
-	tristate "MOXART DMA support"
-	depends on ARCH_MOXART
-	select DMA_ENGINE
-	select DMA_OF
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Enable support for the MOXA ART SoC DMA controller.
- 
-config FSL_EDMA
-	tristate "Freescale eDMA engine support"
-	depends on OF
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the Freescale eDMA engine with programmable channel
-	  multiplexing capability for DMA request sources(slot).
-	  This module can be found on Freescale Vybrid and LS-1 SoCs.
+	  Enable support for the APM X-Gene SoC DMA engine.
 
 config XILINX_VDMA
 	tristate "Xilinx AXI VDMA Engine"
@@ -433,55 +515,25 @@
 	  channels, Memory Mapped to Stream (MM2S) and Stream to
 	  Memory Mapped (S2MM) for the data transfers.
 
-config DMA_SUN6I
-	tristate "Allwinner A31 SoCs DMA support"
-	depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
-	depends on RESET_CONTROLLER
+config ZX_DMA
+	tristate "ZTE ZX296702 DMA support"
+	depends on ARCH_ZX
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 	help
-	  Support for the DMA engine first found in Allwinner A31 SoCs.
+	  Support the DMA engine for ZTE ZX296702 platform devices.
 
-config NBPFAXI_DMA
-	tristate "Renesas Type-AXI NBPF DMA support"
-	select DMA_ENGINE
-	depends on ARM || COMPILE_TEST
-	help
-	  Support for "Type-AXI" NBPF DMA IPs from Renesas
 
-config IMG_MDC_DMA
-	tristate "IMG MDC support"
-	depends on MIPS || COMPILE_TEST
-	depends on MFD_SYSCON
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Enable support for the IMG multi-threaded DMA controller (MDC).
+# driver files
+source "drivers/dma/bestcomm/Kconfig"
 
-config XGENE_DMA
-	tristate "APM X-Gene DMA support"
-	depends on ARCH_XGENE || COMPILE_TEST
-	select DMA_ENGINE
-	select DMA_ENGINE_RAID
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	help
-	  Enable support for the APM X-Gene SoC DMA engine.
+source "drivers/dma/dw/Kconfig"
 
-config DMA_ENGINE
-	bool
+source "drivers/dma/hsu/Kconfig"
 
-config DMA_VIRTUAL_CHANNELS
-	tristate
+source "drivers/dma/sh/Kconfig"
 
-config DMA_ACPI
-	def_bool y
-	depends on ACPI
-
-config DMA_OF
-	def_bool y
-	depends on OF
-	select DMA_ENGINE
-
+# clients
 comment "DMA Clients"
 	depends on DMA_ENGINE
 
@@ -506,13 +558,4 @@
 config DMA_ENGINE_RAID
 	bool
 
-config QCOM_BAM_DMA
-	tristate "QCOM BAM DMA support"
-	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	---help---
-	  Enable support for the QCOM BAM DMA controller.  This controller
-	  provides DMA capabilities for a variety of on-chip devices.
-
 endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 56ff8c7..7711a71 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,59 +1,69 @@
+#dmaengine debug flags
 subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG)  := -DDEBUG
 subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
 
+#core
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
 obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
 obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
 obj-$(CONFIG_DMA_OF) += of-dma.o
 
+#dmatest
 obj-$(CONFIG_DMATEST) += dmatest.o
-obj-$(CONFIG_INTEL_IOATDMA) += ioat/
-obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
-obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_HSU_DMA) += hsu/
-obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
-obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
-obj-$(CONFIG_MV_XOR) += mv_xor.o
-obj-$(CONFIG_IDMA64) += idma64.o
-obj-$(CONFIG_DW_DMAC_CORE) += dw/
+
+#devices
+obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
-obj-$(CONFIG_MX3_IPU) += ipu/
-obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
-obj-$(CONFIG_IMX_DMA) += imx-dma.o
-obj-$(CONFIG_MXS_DMA) += mxs-dma.o
-obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_TIMB_DMA) += timb_dma.o
-obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
-obj-$(CONFIG_TI_EDMA) += edma.o
-obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
-obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
-obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
-obj-$(CONFIG_PL330_DMA) += pl330.o
-obj-$(CONFIG_PCH_DMA) += pch_dma.o
-obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
-obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
-obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
-obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
-obj-$(CONFIG_DMA_OMAP) += omap-dma.o
-obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
 obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
-obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
-obj-$(CONFIG_TI_CPPI41) += cppi41.o
-obj-$(CONFIG_K3_DMA) += k3dma.o
-obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
-obj-$(CONFIG_FSL_RAID) += fsl_raid.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
-obj-y += xilinx/
-obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
-obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+obj-$(CONFIG_DW_DMAC_CORE) += dw/
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+obj-$(CONFIG_IMX_DMA) += imx-dma.o
+obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_IDMA64) += idma64.o
+obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_MXS_DMA) += mxs-dma.o
+obj-$(CONFIG_MX3_IPU) += ipu/
+obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_PCH_DMA) += pch_dma.o
+obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
+obj-$(CONFIG_PXA_DMA) += pxa_dma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
+obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
+obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
+obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
+obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+
+obj-y += xilinx/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5de3cf4..9b42c05 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,8 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
 #include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -2030,10 +2032,188 @@
 }
 #endif
 
+#ifdef CONFIG_OF
+static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x,
+					 u32 id)
+{
+	struct pl08x_dma_chan *chan;
+
+	list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
+		if (chan->signal == id)
+			return &chan->vc.chan;
+	}
+
+	return NULL;
+}
+
+static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec,
+				       struct of_dma *ofdma)
+{
+	struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
+	struct pl08x_channel_data *data;
+	struct pl08x_dma_chan *chan;
+	struct dma_chan *dma_chan;
+
+	if (!pl08x)
+		return NULL;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
+	if (dma_chan)
+		return dma_get_slave_channel(dma_chan);
+
+	chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data),
+			    GFP_KERNEL);
+	if (!chan)
+		return NULL;
+
+	data = (void *)&chan[1];
+	data->bus_id = "(none)";
+	data->periph_buses = dma_spec->args[1];
+
+	chan->cd = data;
+	chan->host = pl08x;
+	chan->slave = true;
+	chan->name = data->bus_id;
+	chan->state = PL08X_CHAN_IDLE;
+	chan->signal = dma_spec->args[0];
+	chan->vc.desc_free = pl08x_desc_free;
+
+	vchan_init(&chan->vc, &pl08x->slave);
+
+	return dma_get_slave_channel(&chan->vc.chan);
+}
+
+static int pl08x_of_probe(struct amba_device *adev,
+			  struct pl08x_driver_data *pl08x,
+			  struct device_node *np)
+{
+	struct pl08x_platform_data *pd;
+	u32 cctl_memcpy = 0;
+	u32 val;
+	int ret;
+
+	pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return -ENOMEM;
+
+	/* Eligible bus masters for fetching LLIs */
+	if (of_property_read_bool(np, "lli-bus-interface-ahb1"))
+		pd->lli_buses |= PL08X_AHB1;
+	if (of_property_read_bool(np, "lli-bus-interface-ahb2"))
+		pd->lli_buses |= PL08X_AHB2;
+	if (!pd->lli_buses) {
+		dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n");
+		pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2;
+	}
+
+	/* Eligible bus masters for memory access */
+	if (of_property_read_bool(np, "mem-bus-interface-ahb1"))
+		pd->mem_buses |= PL08X_AHB1;
+	if (of_property_read_bool(np, "mem-bus-interface-ahb2"))
+		pd->mem_buses |= PL08X_AHB2;
+	if (!pd->mem_buses) {
+		dev_info(&adev->dev, "no bus masters for memory stated, assume all\n");
+		pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2;
+	}
+
+	/* Parse the memcpy channel properties */
+	ret = of_property_read_u32(np, "memcpy-burst-size", &val);
+	if (ret) {
+		dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n");
+		val = 1;
+	}
+	switch (val) {
+	default:
+		dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
+		/* Fall through */
+	case 1:
+		cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 4:
+		cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 8:
+		cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 16:
+		cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 32:
+		cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 64:
+		cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 128:
+		cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 256:
+		cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	}
+
+	ret = of_property_read_u32(np, "memcpy-bus-width", &val);
+	if (ret) {
+		dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n");
+		val = 8;
+	}
+	switch (val) {
+	default:
+		dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
+		/* Fall through */
+	case 8:
+		cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	case 16:
+		cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	case 32:
+		cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	}
+
+	/* This is currently the only thing making sense */
+	cctl_memcpy |= PL080_CONTROL_PROT_SYS;
+
+	/* Set up memcpy channel */
+	pd->memcpy_channel.bus_id = "memcpy";
+	pd->memcpy_channel.cctl_memcpy = cctl_memcpy;
+	/* Use the buses that can access memory, obviously */
+	pd->memcpy_channel.periph_buses = pd->mem_buses;
+
+	pl08x->pd = pd;
+
+	return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
+					  pl08x);
+}
+#else
+static inline int pl08x_of_probe(struct amba_device *adev,
+				 struct pl08x_driver_data *pl08x,
+				 struct device_node *np)
+{
+	return -EINVAL;
+}
+#endif
+
 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 {
 	struct pl08x_driver_data *pl08x;
 	const struct vendor_data *vd = id->data;
+	struct device_node *np = adev->dev.of_node;
 	u32 tsfr_size;
 	int ret = 0;
 	int i;
@@ -2093,9 +2273,15 @@
 	/* Get the platform data */
 	pl08x->pd = dev_get_platdata(&adev->dev);
 	if (!pl08x->pd) {
-		dev_err(&adev->dev, "no platform data supplied\n");
-		ret = -EINVAL;
-		goto out_no_platdata;
+		if (np) {
+			ret = pl08x_of_probe(adev, pl08x, np);
+			if (ret)
+				goto out_no_platdata;
+		} else {
+			dev_err(&adev->dev, "no platform data supplied\n");
+			ret = -EINVAL;
+			goto out_no_platdata;
+		}
 	}
 
 	/* Assign useful pointers to the driver state */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index d3629b7..58d4062 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -448,6 +448,7 @@
 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
 {
 	struct dma_async_tx_descriptor	*txd = &desc->txd;
+	struct at_dma			*atdma = to_at_dma(atchan->chan_common.device);
 
 	dev_vdbg(chan2dev(&atchan->chan_common),
 		"descriptor %u complete\n", txd->cookie);
@@ -456,6 +457,13 @@
 	if (!atc_chan_is_cyclic(atchan))
 		dma_cookie_complete(txd);
 
+	/* If the transfer was a memset, free our temporary buffer */
+	if (desc->memset) {
+		dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+			      desc->memset_paddr);
+		desc->memset = false;
+	}
+
 	/* move children to free_list */
 	list_splice_init(&desc->tx_list, &atchan->free_list);
 	/* move myself to free_list */
@@ -717,14 +725,14 @@
 	size_t			len = 0;
 	int			i;
 
+	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
+		return NULL;
+
 	dev_info(chan2dev(chan),
 		 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
 		__func__, xt->src_start, xt->dst_start, xt->numf,
 		xt->frame_size, flags);
 
-	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
-		return NULL;
-
 	/*
 	 * The controller can only "skip" X bytes every Y bytes, so we
 	 * need to make sure we are given a template that fit that
@@ -873,6 +881,93 @@
 	return NULL;
 }
 
+/**
+ * atc_prep_dma_memset - prepare a memcpy operation
+ * @chan: the channel to prepare operation on
+ * @dest: operation virtual destination address
+ * @value: value to set memory buffer to
+ * @len: operation length
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+		    size_t len, unsigned long flags)
+{
+	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
+	struct at_dma		*atdma = to_at_dma(chan->device);
+	struct at_desc		*desc = NULL;
+	size_t			xfer_count;
+	u32			ctrla;
+	u32			ctrlb;
+
+	dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
+		dest, value, len, flags);
+
+	if (unlikely(!len)) {
+		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+		return NULL;
+	}
+
+	if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
+		dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
+			__func__);
+		return NULL;
+	}
+
+	xfer_count = len >> 2;
+	if (xfer_count > ATC_BTSIZE_MAX) {
+		dev_err(chan2dev(chan), "%s: buffer is too big\n",
+			__func__);
+		return NULL;
+	}
+
+	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
+		| ATC_SRC_ADDR_MODE_FIXED
+		| ATC_DST_ADDR_MODE_INCR
+		| ATC_FC_MEM2MEM;
+
+	ctrla = ATC_SRC_WIDTH(2) |
+		ATC_DST_WIDTH(2);
+
+	desc = atc_desc_get(atchan);
+	if (!desc) {
+		dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
+			__func__);
+		return NULL;
+	}
+
+	desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC,
+					    &desc->memset_paddr);
+	if (!desc->memset_vaddr) {
+		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
+			__func__);
+		goto err_put_desc;
+	}
+
+	*desc->memset_vaddr = value;
+	desc->memset = true;
+
+	desc->lli.saddr = desc->memset_paddr;
+	desc->lli.daddr = dest;
+	desc->lli.ctrla = ctrla | xfer_count;
+	desc->lli.ctrlb = ctrlb;
+
+	desc->txd.cookie = -EBUSY;
+	desc->len = len;
+	desc->total_len = len;
+
+	/* set end-of-link on the descriptor */
+	set_desc_eol(desc);
+
+	desc->txd.flags = flags;
+
+	return &desc->txd;
+
+err_put_desc:
+	atc_desc_put(atchan, desc);
+	return NULL;
+}
+
 
 /**
  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
@@ -1755,6 +1850,8 @@
 	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
+	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
 
@@ -1818,7 +1915,16 @@
 	if (!atdma->dma_desc_pool) {
 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
 		err = -ENOMEM;
-		goto err_pool_create;
+		goto err_desc_pool_create;
+	}
+
+	/* create a pool of consistent memory blocks for memset blocks */
+	atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
+					     &pdev->dev, sizeof(int), 4, 0);
+	if (!atdma->memset_pool) {
+		dev_err(&pdev->dev, "No memory for memset dma pool\n");
+		err = -ENOMEM;
+		goto err_memset_pool_create;
 	}
 
 	/* clear any pending interrupt */
@@ -1864,6 +1970,11 @@
 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
 
+	if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
+		atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
+		atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
+	}
+
 	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
 		/* controller can do slave DMA: can trigger cyclic transfers */
@@ -1884,8 +1995,9 @@
 
 	dma_writel(atdma, EN, AT_DMA_ENABLE);
 
-	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
+	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
+	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
 	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
 	  plat_dat->nr_channels);
@@ -1910,8 +2022,10 @@
 
 err_of_dma_controller_register:
 	dma_async_device_unregister(&atdma->dma_common);
+	dma_pool_destroy(atdma->memset_pool);
+err_memset_pool_create:
 	dma_pool_destroy(atdma->dma_desc_pool);
-err_pool_create:
+err_desc_pool_create:
 	free_irq(platform_get_irq(pdev, 0), atdma);
 err_irq:
 	clk_disable_unprepare(atdma->clk);
@@ -1936,6 +2050,7 @@
 	at_dma_off(atdma);
 	dma_async_device_unregister(&atdma->dma_common);
 
+	dma_pool_destroy(atdma->memset_pool);
 	dma_pool_destroy(atdma->dma_desc_pool);
 	free_irq(platform_get_irq(pdev, 0), atdma);
 
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 7f5a082..c3bebbe 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -200,6 +200,11 @@
 	size_t				boundary;
 	size_t				dst_hole;
 	size_t				src_hole;
+
+	/* Memset temporary buffer */
+	bool				memset;
+	dma_addr_t			memset_paddr;
+	int				*memset_vaddr;
 };
 
 static inline struct at_desc *
@@ -330,6 +335,7 @@
 	u8			all_chan_mask;
 
 	struct dma_pool		*dma_desc_pool;
+	struct dma_pool		*memset_pool;
 	/* AT THE END channels table */
 	struct at_dma_chan	chan[0];
 };
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 40afa2a..a165b4b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -625,12 +625,12 @@
 		       unsigned int sg_len, enum dma_transfer_direction direction,
 		       unsigned long flags, void *context)
 {
-	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
-	struct at_xdmac_desc	*first = NULL, *prev = NULL;
-	struct scatterlist	*sg;
-	int			i;
-	unsigned int		xfer_size = 0;
-	unsigned long		irqflags;
+	struct at_xdmac_chan		*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac_desc		*first = NULL, *prev = NULL;
+	struct scatterlist		*sg;
+	int				i;
+	unsigned int			xfer_size = 0;
+	unsigned long			irqflags;
 	struct dma_async_tx_descriptor	*ret = NULL;
 
 	if (!sgl)
@@ -797,10 +797,7 @@
 		list_add_tail(&desc->desc_node, &first->descs_list);
 	}
 
-	prev->lld.mbr_nda = first->tx_dma_desc.phys;
-	dev_dbg(chan2dev(chan),
-		"%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
-		__func__, prev, &prev->lld.mbr_nda);
+	at_xdmac_queue_desc(chan, prev, first);
 	first->tx_dma_desc.flags = flags;
 	first->xfer_size = buf_len;
 	first->direction = direction;
@@ -1135,7 +1132,7 @@
 	 * SAMA5D4x), so we can use the same interface for source and dest,
 	 * that solves the fact we don't know the direction.
 	 */
-	u32			chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+	u32			chan_cc = AT_XDMAC_CC_DAM_UBS_AM
 					| AT_XDMAC_CC_SAM_INCREMENTED_AM
 					| AT_XDMAC_CC_DIF(0)
 					| AT_XDMAC_CC_SIF(0)
@@ -1203,6 +1200,168 @@
 	return &desc->tx_dma_desc;
 }
 
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			    unsigned int sg_len, int value,
+			    unsigned long flags)
+{
+	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac_desc	*desc, *pdesc = NULL,
+				*ppdesc = NULL, *first = NULL;
+	struct scatterlist	*sg, *psg = NULL, *ppsg = NULL;
+	size_t			stride = 0, pstride = 0, len = 0;
+	int			i;
+
+	if (!sgl)
+		return NULL;
+
+	dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
+		__func__, sg_len, value, flags);
+
+	/* Prepare descriptors. */
+	for_each_sg(sgl, sg, sg_len, i) {
+		dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
+			__func__, sg_dma_address(sg), sg_dma_len(sg),
+			value, flags);
+		desc = at_xdmac_memset_create_desc(chan, atchan,
+						   sg_dma_address(sg),
+						   sg_dma_len(sg),
+						   value);
+		if (!desc && first)
+			list_splice_init(&first->descs_list,
+					 &atchan->free_descs_list);
+
+		if (!first)
+			first = desc;
+
+		/* Update our strides */
+		pstride = stride;
+		if (psg)
+			stride = sg_dma_address(sg) -
+				(sg_dma_address(psg) + sg_dma_len(psg));
+
+		/*
+		 * The scatterlist API gives us only the address and
+		 * length of each elements.
+		 *
+		 * Unfortunately, we don't have the stride, which we
+		 * will need to compute.
+		 *
+		 * That make us end up in a situation like this one:
+		 *    len    stride    len    stride    len
+		 * +-------+        +-------+        +-------+
+		 * |  N-2  |        |  N-1  |        |   N   |
+		 * +-------+        +-------+        +-------+
+		 *
+		 * We need all these three elements (N-2, N-1 and N)
+		 * to actually take the decision on whether we need to
+		 * queue N-1 or reuse N-2.
+		 *
+		 * We will only consider N if it is the last element.
+		 */
+		if (ppdesc && pdesc) {
+			if ((stride == pstride) &&
+			    (sg_dma_len(ppsg) == sg_dma_len(psg))) {
+				dev_dbg(chan2dev(chan),
+					"%s: desc 0x%p can be merged with desc 0x%p\n",
+					__func__, pdesc, ppdesc);
+
+				/*
+				 * Increment the block count of the
+				 * N-2 descriptor
+				 */
+				at_xdmac_increment_block_count(chan, ppdesc);
+				ppdesc->lld.mbr_dus = stride;
+
+				/*
+				 * Put back the N-1 descriptor in the
+				 * free descriptor list
+				 */
+				list_add_tail(&pdesc->desc_node,
+					      &atchan->free_descs_list);
+
+				/*
+				 * Make our N-1 descriptor pointer
+				 * point to the N-2 since they were
+				 * actually merged.
+				 */
+				pdesc = ppdesc;
+
+			/*
+			 * Rule out the case where we don't have
+			 * pstride computed yet (our second sg
+			 * element)
+			 *
+			 * We also want to catch the case where there
+			 * would be a negative stride,
+			 */
+			} else if (pstride ||
+				   sg_dma_address(sg) < sg_dma_address(psg)) {
+				/*
+				 * Queue the N-1 descriptor after the
+				 * N-2
+				 */
+				at_xdmac_queue_desc(chan, ppdesc, pdesc);
+
+				/*
+				 * Add the N-1 descriptor to the list
+				 * of the descriptors used for this
+				 * transfer
+				 */
+				list_add_tail(&desc->desc_node,
+					      &first->descs_list);
+				dev_dbg(chan2dev(chan),
+					"%s: add desc 0x%p to descs_list 0x%p\n",
+					__func__, desc, first);
+			}
+		}
+
+		/*
+		 * If we are the last element, just see if we have the
+		 * same size than the previous element.
+		 *
+		 * If so, we can merge it with the previous descriptor
+		 * since we don't care about the stride anymore.
+		 */
+		if ((i == (sg_len - 1)) &&
+		    sg_dma_len(ppsg) == sg_dma_len(psg)) {
+			dev_dbg(chan2dev(chan),
+				"%s: desc 0x%p can be merged with desc 0x%p\n",
+				__func__, desc, pdesc);
+
+			/*
+			 * Increment the block count of the N-1
+			 * descriptor
+			 */
+			at_xdmac_increment_block_count(chan, pdesc);
+			pdesc->lld.mbr_dus = stride;
+
+			/*
+			 * Put back the N descriptor in the free
+			 * descriptor list
+			 */
+			list_add_tail(&desc->desc_node,
+				      &atchan->free_descs_list);
+		}
+
+		/* Update our descriptors */
+		ppdesc = pdesc;
+		pdesc = desc;
+
+		/* Update our scatter pointers */
+		ppsg = psg;
+		psg = sg;
+
+		len += sg_dma_len(sg);
+	}
+
+	first->tx_dma_desc.cookie = -EBUSY;
+	first->tx_dma_desc.flags = flags;
+	first->xfer_size = len;
+
+	return &first->tx_dma_desc;
+}
+
 static enum dma_status
 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 		struct dma_tx_state *txstate)
@@ -1736,6 +1895,7 @@
 	dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
+	dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
 	/*
 	 * Without DMA_PRIVATE the driver is not able to allocate more than
@@ -1751,6 +1911,7 @@
 	atxdmac->dma.device_prep_interleaved_dma	= at_xdmac_prep_interleaved;
 	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
 	atxdmac->dma.device_prep_dma_memset		= at_xdmac_prep_dma_memset;
+	atxdmac->dma.device_prep_dma_memset_sg		= at_xdmac_prep_dma_memset_sg;
 	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
 	atxdmac->dma.device_config			= at_xdmac_device_config;
 	atxdmac->dma.device_pause			= at_xdmac_device_pause;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index fd22dd3..c340ca9 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2730,7 +2730,7 @@
 	 * This controller can only access address at even 32bit boundaries,
 	 * i.e. 2^2
 	 */
-	base->dma_memcpy.copy_align = 2;
+	base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
 	err = dma_async_device_register(&base->dma_memcpy);
 
 	if (err)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
new file mode 100644
index 0000000..5b2395e
--- /dev/null
+++ b/drivers/dma/dma-axi-dmac.c
@@ -0,0 +1,691 @@
+/*
+ * Driver for the Analog Devices AXI-DMAC core
+ *
+ * Copyright 2013-2015 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/dma/axi-dmac.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/*
+ * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
+ * various instantiation parameters which decided the exact feature set support
+ * by the core.
+ *
+ * Each channel of the core has a source interface and a destination interface.
+ * The number of channels and the type of the channel interfaces is selected at
+ * configuration time. A interface can either be a connected to a central memory
+ * interconnect, which allows access to system memory, or it can be connected to
+ * a dedicated bus which is directly connected to a data port on a peripheral.
+ * Given that those are configuration options of the core that are selected when
+ * it is instantiated this means that they can not be changed by software at
+ * runtime. By extension this means that each channel is uni-directional. It can
+ * either be device to memory or memory to device, but not both. Also since the
+ * device side is a dedicated data bus only connected to a single peripheral
+ * there is no address than can or needs to be configured for the device side.
+ */
+
+#define AXI_DMAC_REG_IRQ_MASK		0x80
+#define AXI_DMAC_REG_IRQ_PENDING	0x84
+#define AXI_DMAC_REG_IRQ_SOURCE		0x88
+
+#define AXI_DMAC_REG_CTRL		0x400
+#define AXI_DMAC_REG_TRANSFER_ID	0x404
+#define AXI_DMAC_REG_START_TRANSFER	0x408
+#define AXI_DMAC_REG_FLAGS		0x40c
+#define AXI_DMAC_REG_DEST_ADDRESS	0x410
+#define AXI_DMAC_REG_SRC_ADDRESS	0x414
+#define AXI_DMAC_REG_X_LENGTH		0x418
+#define AXI_DMAC_REG_Y_LENGTH		0x41c
+#define AXI_DMAC_REG_DEST_STRIDE	0x420
+#define AXI_DMAC_REG_SRC_STRIDE		0x424
+#define AXI_DMAC_REG_TRANSFER_DONE	0x428
+#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
+#define AXI_DMAC_REG_STATUS		0x430
+#define AXI_DMAC_REG_CURRENT_SRC_ADDR	0x434
+#define AXI_DMAC_REG_CURRENT_DEST_ADDR	0x438
+
+#define AXI_DMAC_CTRL_ENABLE		BIT(0)
+#define AXI_DMAC_CTRL_PAUSE		BIT(1)
+
+#define AXI_DMAC_IRQ_SOT		BIT(0)
+#define AXI_DMAC_IRQ_EOT		BIT(1)
+
+#define AXI_DMAC_FLAG_CYCLIC		BIT(0)
+
+struct axi_dmac_sg {
+	dma_addr_t src_addr;
+	dma_addr_t dest_addr;
+	unsigned int x_len;
+	unsigned int y_len;
+	unsigned int dest_stride;
+	unsigned int src_stride;
+	unsigned int id;
+};
+
+struct axi_dmac_desc {
+	struct virt_dma_desc vdesc;
+	bool cyclic;
+
+	unsigned int num_submitted;
+	unsigned int num_completed;
+	unsigned int num_sgs;
+	struct axi_dmac_sg sg[];
+};
+
+struct axi_dmac_chan {
+	struct virt_dma_chan vchan;
+
+	struct axi_dmac_desc *next_desc;
+	struct list_head active_descs;
+	enum dma_transfer_direction direction;
+
+	unsigned int src_width;
+	unsigned int dest_width;
+	unsigned int src_type;
+	unsigned int dest_type;
+
+	unsigned int max_length;
+	unsigned int align_mask;
+
+	bool hw_cyclic;
+	bool hw_2d;
+};
+
+struct axi_dmac {
+	void __iomem *base;
+	int irq;
+
+	struct clk *clk;
+
+	struct dma_device dma_dev;
+	struct axi_dmac_chan chan;
+
+	struct device_dma_parameters dma_parms;
+};
+
+static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
+{
+	return container_of(chan->vchan.chan.device, struct axi_dmac,
+		dma_dev);
+}
+
+static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
+{
+	return container_of(c, struct axi_dmac_chan, vchan.chan);
+}
+
+static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct axi_dmac_desc, vdesc);
+}
+
+static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
+	unsigned int val)
+{
+	writel(val, axi_dmac->base + reg);
+}
+
+static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
+{
+	return readl(axi_dmac->base + reg);
+}
+
+static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
+{
+	return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
+{
+	return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
+{
+	if (len == 0 || len > chan->max_length)
+		return false;
+	if ((len & chan->align_mask) != 0) /* Not aligned */
+		return false;
+	return true;
+}
+
+static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
+{
+	if ((addr & chan->align_mask) != 0) /* Not aligned */
+		return false;
+	return true;
+}
+
+static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
+{
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	struct virt_dma_desc *vdesc;
+	struct axi_dmac_desc *desc;
+	struct axi_dmac_sg *sg;
+	unsigned int flags = 0;
+	unsigned int val;
+
+	val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+	if (val) /* Queue is full, wait for the next SOT IRQ */
+		return;
+
+	desc = chan->next_desc;
+
+	if (!desc) {
+		vdesc = vchan_next_desc(&chan->vchan);
+		if (!vdesc)
+			return;
+		list_move_tail(&vdesc->node, &chan->active_descs);
+		desc = to_axi_dmac_desc(vdesc);
+	}
+	sg = &desc->sg[desc->num_submitted];
+
+	desc->num_submitted++;
+	if (desc->num_submitted == desc->num_sgs)
+		chan->next_desc = NULL;
+	else
+		chan->next_desc = desc;
+
+	sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
+		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
+	}
+
+	if (axi_dmac_src_is_mem(chan)) {
+		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
+		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+	}
+
+	/*
+	 * If the hardware supports cyclic transfers and there is no callback to
+	 * call, enable hw cyclic mode to avoid unnecessary interrupts.
+	 */
+	if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback)
+		flags |= AXI_DMAC_FLAG_CYCLIC;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
+	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
+	axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
+}
+
+static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
+{
+	return list_first_entry_or_null(&chan->active_descs,
+		struct axi_dmac_desc, vdesc.node);
+}
+
+static void axi_dmac_transfer_done(struct axi_dmac_chan *chan,
+	unsigned int completed_transfers)
+{
+	struct axi_dmac_desc *active;
+	struct axi_dmac_sg *sg;
+
+	active = axi_dmac_active_desc(chan);
+	if (!active)
+		return;
+
+	if (active->cyclic) {
+		vchan_cyclic_callback(&active->vdesc);
+	} else {
+		do {
+			sg = &active->sg[active->num_completed];
+			if (!(BIT(sg->id) & completed_transfers))
+				break;
+			active->num_completed++;
+			if (active->num_completed == active->num_sgs) {
+				list_del(&active->vdesc.node);
+				vchan_cookie_complete(&active->vdesc);
+				active = axi_dmac_active_desc(chan);
+			}
+		} while (active);
+	}
+}
+
+static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
+{
+	struct axi_dmac *dmac = devid;
+	unsigned int pending;
+
+	pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
+	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
+
+	spin_lock(&dmac->chan.vchan.lock);
+	/* One or more transfers have finished */
+	if (pending & AXI_DMAC_IRQ_EOT) {
+		unsigned int completed;
+
+		completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
+		axi_dmac_transfer_done(&dmac->chan, completed);
+	}
+	/* Space has become available in the descriptor queue */
+	if (pending & AXI_DMAC_IRQ_SOT)
+		axi_dmac_start_transfer(&dmac->chan);
+	spin_unlock(&dmac->chan.vchan.lock);
+
+	return IRQ_HANDLED;
+}
+
+static int axi_dmac_terminate_all(struct dma_chan *c)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
+	chan->next_desc = NULL;
+	vchan_get_all_descriptors(&chan->vchan, &head);
+	list_splice_tail_init(&chan->active_descs, &head);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&chan->vchan, &head);
+
+	return 0;
+}
+
+static void axi_dmac_issue_pending(struct dma_chan *c)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	unsigned long flags;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	if (vchan_issue_pending(&chan->vchan))
+		axi_dmac_start_transfer(chan);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+{
+	struct axi_dmac_desc *desc;
+
+	desc = kzalloc(sizeof(struct axi_dmac_desc) +
+		sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	desc->num_sgs = num_sgs;
+
+	return desc;
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
+	struct dma_chan *c, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	struct scatterlist *sg;
+	unsigned int i;
+
+	if (direction != chan->direction)
+		return NULL;
+
+	desc = axi_dmac_alloc_desc(sg_len);
+	if (!desc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
+		    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
+			kfree(desc);
+			return NULL;
+		}
+
+		if (direction == DMA_DEV_TO_MEM)
+			desc->sg[i].dest_addr = sg_dma_address(sg);
+		else
+			desc->sg[i].src_addr = sg_dma_address(sg);
+		desc->sg[i].x_len = sg_dma_len(sg);
+		desc->sg[i].y_len = 1;
+	}
+
+	desc->cyclic = false;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
+	struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	unsigned int num_periods, i;
+
+	if (direction != chan->direction)
+		return NULL;
+
+	if (!axi_dmac_check_len(chan, buf_len) ||
+	    !axi_dmac_check_addr(chan, buf_addr))
+		return NULL;
+
+	if (period_len == 0 || buf_len % period_len)
+		return NULL;
+
+	num_periods = buf_len / period_len;
+
+	desc = axi_dmac_alloc_desc(num_periods);
+	if (!desc)
+		return NULL;
+
+	for (i = 0; i < num_periods; i++) {
+		if (direction == DMA_DEV_TO_MEM)
+			desc->sg[i].dest_addr = buf_addr;
+		else
+			desc->sg[i].src_addr = buf_addr;
+		desc->sg[i].x_len = period_len;
+		desc->sg[i].y_len = 1;
+		buf_addr += period_len;
+	}
+
+	desc->cyclic = true;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
+	struct dma_chan *c, struct dma_interleaved_template *xt,
+	unsigned long flags)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	size_t dst_icg, src_icg;
+
+	if (xt->frame_size != 1)
+		return NULL;
+
+	if (xt->dir != chan->direction)
+		return NULL;
+
+	if (axi_dmac_src_is_mem(chan)) {
+		if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
+			return NULL;
+	}
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
+			return NULL;
+	}
+
+	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+
+	if (chan->hw_2d) {
+		if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
+		    !axi_dmac_check_len(chan, xt->numf))
+			return NULL;
+		if (xt->sgl[0].size + dst_icg > chan->max_length ||
+		    xt->sgl[0].size + src_icg > chan->max_length)
+			return NULL;
+	} else {
+		if (dst_icg != 0 || src_icg != 0)
+			return NULL;
+		if (chan->max_length / xt->sgl[0].size < xt->numf)
+			return NULL;
+		if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
+			return NULL;
+	}
+
+	desc = axi_dmac_alloc_desc(1);
+	if (!desc)
+		return NULL;
+
+	if (axi_dmac_src_is_mem(chan)) {
+		desc->sg[0].src_addr = xt->src_start;
+		desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+	}
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		desc->sg[0].dest_addr = xt->dst_start;
+		desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+	}
+
+	if (chan->hw_2d) {
+		desc->sg[0].x_len = xt->sgl[0].size;
+		desc->sg[0].y_len = xt->numf;
+	} else {
+		desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
+		desc->sg[0].y_len = 1;
+	}
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static void axi_dmac_free_chan_resources(struct dma_chan *c)
+{
+	vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
+{
+	kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+}
+
+/*
+ * The configuration stored in the devicetree matches the configuration
+ * parameters of the peripheral instance and allows the driver to know which
+ * features are implemented and how it should behave.
+ */
+static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
+	struct axi_dmac_chan *chan)
+{
+	u32 val;
+	int ret;
+
+	ret = of_property_read_u32(of_chan, "reg", &val);
+	if (ret)
+		return ret;
+
+	/* We only support 1 channel for now */
+	if (val != 0)
+		return -EINVAL;
+
+	ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
+	if (ret)
+		return ret;
+	if (val > AXI_DMAC_BUS_TYPE_FIFO)
+		return -EINVAL;
+	chan->src_type = val;
+
+	ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
+	if (ret)
+		return ret;
+	if (val > AXI_DMAC_BUS_TYPE_FIFO)
+		return -EINVAL;
+	chan->dest_type = val;
+
+	ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
+	if (ret)
+		return ret;
+	chan->src_width = val / 8;
+
+	ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
+	if (ret)
+		return ret;
+	chan->dest_width = val / 8;
+
+	ret = of_property_read_u32(of_chan, "adi,length-width", &val);
+	if (ret)
+		return ret;
+
+	if (val >= 32)
+		chan->max_length = UINT_MAX;
+	else
+		chan->max_length = (1ULL << val) - 1;
+
+	chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
+
+	if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_MEM_TO_MEM;
+	else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_MEM_TO_DEV;
+	else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_DEV_TO_MEM;
+	else
+		chan->direction = DMA_DEV_TO_DEV;
+
+	chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
+	chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
+
+	return 0;
+}
+
+static int axi_dmac_probe(struct platform_device *pdev)
+{
+	struct device_node *of_channels, *of_chan;
+	struct dma_device *dma_dev;
+	struct axi_dmac *dmac;
+	struct resource *res;
+	int ret;
+
+	dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+	if (!dmac)
+		return -ENOMEM;
+
+	dmac->irq = platform_get_irq(pdev, 0);
+	if (dmac->irq <= 0)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmac->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dmac->base))
+		return PTR_ERR(dmac->base);
+
+	dmac->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dmac->clk))
+		return PTR_ERR(dmac->clk);
+
+	INIT_LIST_HEAD(&dmac->chan.active_descs);
+
+	of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
+	if (of_channels == NULL)
+		return -ENODEV;
+
+	for_each_child_of_node(of_channels, of_chan) {
+		ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
+		if (ret) {
+			of_node_put(of_chan);
+			of_node_put(of_channels);
+			return -EINVAL;
+		}
+	}
+	of_node_put(of_channels);
+
+	pdev->dev.dma_parms = &dmac->dma_parms;
+	dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
+
+	dma_dev = &dmac->dma_dev;
+	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+	dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
+	dma_dev->device_tx_status = dma_cookie_status;
+	dma_dev->device_issue_pending = axi_dmac_issue_pending;
+	dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
+	dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
+	dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
+	dma_dev->device_terminate_all = axi_dmac_terminate_all;
+	dma_dev->dev = &pdev->dev;
+	dma_dev->chancnt = 1;
+	dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
+	dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
+	dma_dev->directions = BIT(dmac->chan.direction);
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+	INIT_LIST_HEAD(&dma_dev->channels);
+
+	dmac->chan.vchan.desc_free = axi_dmac_desc_free;
+	vchan_init(&dmac->chan.vchan, dma_dev);
+
+	ret = clk_prepare_enable(dmac->clk);
+	if (ret < 0)
+		return ret;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+
+	ret = dma_async_device_register(dma_dev);
+	if (ret)
+		goto err_clk_disable;
+
+	ret = of_dma_controller_register(pdev->dev.of_node,
+		of_dma_xlate_by_chan_id, dma_dev);
+	if (ret)
+		goto err_unregister_device;
+
+	ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0,
+		dev_name(&pdev->dev), dmac);
+	if (ret)
+		goto err_unregister_of;
+
+	platform_set_drvdata(pdev, dmac);
+
+	return 0;
+
+err_unregister_of:
+	of_dma_controller_free(pdev->dev.of_node);
+err_unregister_device:
+	dma_async_device_unregister(&dmac->dma_dev);
+err_clk_disable:
+	clk_disable_unprepare(dmac->clk);
+
+	return ret;
+}
+
+static int axi_dmac_remove(struct platform_device *pdev)
+{
+	struct axi_dmac *dmac = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	free_irq(dmac->irq, dmac);
+	tasklet_kill(&dmac->chan.vchan.task);
+	dma_async_device_unregister(&dmac->dma_dev);
+	clk_disable_unprepare(dmac->clk);
+
+	return 0;
+}
+
+static const struct of_device_id axi_dmac_of_match_table[] = {
+	{ .compatible = "adi,axi-dmac-1.00.a" },
+	{ },
+};
+
+static struct platform_driver axi_dmac_driver = {
+	.driver = {
+		.name = "dma-axi-dmac",
+		.of_match_table = axi_dmac_of_match_table,
+	},
+	.probe = axi_dmac_probe,
+	.remove = axi_dmac_remove,
+};
+module_platform_driver(axi_dmac_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 26d2f0e..dade7c4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -145,7 +145,8 @@
 	struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
 };
 
-struct jz4780_dma_data {
+struct jz4780_dma_filter_data {
+	struct device_node *of_node;
 	uint32_t transfer_type;
 	int channel;
 };
@@ -214,11 +215,25 @@
 	kfree(desc);
 }
 
-static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
+static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
 {
-	*ord = ffs(val) - 1;
+	int ord = ffs(val) - 1;
 
-	switch (*ord) {
+	/*
+	 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
+	 * than the maximum, just limit it. It is perfectly safe to fall back
+	 * in this way since we won't exceed the maximum burst size supported
+	 * by the device, the only effect is reduced efficiency. This is better
+	 * than refusing to perform the request at all.
+	 */
+	if (ord == 3)
+		ord = 2;
+	else if (ord > 7)
+		ord = 7;
+
+	*shift = ord;
+
+	switch (ord) {
 	case 0:
 		return JZ_DMA_SIZE_1_BYTE;
 	case 1:
@@ -231,20 +246,17 @@
 		return JZ_DMA_SIZE_32_BYTE;
 	case 6:
 		return JZ_DMA_SIZE_64_BYTE;
-	case 7:
-		return JZ_DMA_SIZE_128_BYTE;
 	default:
-		return -EINVAL;
+		return JZ_DMA_SIZE_128_BYTE;
 	}
 }
 
-static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
+static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
 	struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
 	enum dma_transfer_direction direction)
 {
 	struct dma_slave_config *config = &jzchan->config;
 	uint32_t width, maxburst, tsz;
-	int ord;
 
 	if (direction == DMA_MEM_TO_DEV) {
 		desc->dcm = JZ_DMA_DCM_SAI;
@@ -271,8 +283,8 @@
 	 * divisible by the transfer size, and we must not use more than the
 	 * maximum burst specified by the user.
 	 */
-	tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
-	jzchan->transfer_shift = ord;
+	tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+				       &jzchan->transfer_shift);
 
 	switch (width) {
 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -289,12 +301,14 @@
 	desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
 	desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
 
-	desc->dtc = len >> ord;
+	desc->dtc = len >> jzchan->transfer_shift;
+	return 0;
 }
 
 static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
-	enum dma_transfer_direction direction, unsigned long flags)
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
 {
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_desc *desc;
@@ -307,12 +321,11 @@
 
 	for (i = 0; i < sg_len; i++) {
 		err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
-					sg_dma_address(&sgl[i]),
-					sg_dma_len(&sgl[i]),
-					direction);
+					      sg_dma_address(&sgl[i]),
+					      sg_dma_len(&sgl[i]),
+					      direction);
 		if (err < 0)
-			return ERR_PTR(err);
-
+			return NULL;
 
 		desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
 
@@ -354,9 +367,9 @@
 
 	for (i = 0; i < periods; i++) {
 		err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
-					period_len, direction);
+					      period_len, direction);
 		if (err < 0)
-			return ERR_PTR(err);
+			return NULL;
 
 		buf_addr += period_len;
 
@@ -390,15 +403,13 @@
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_desc *desc;
 	uint32_t tsz;
-	int ord;
 
 	desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
 	if (!desc)
 		return NULL;
 
-	tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
-	if (tsz < 0)
-		return ERR_PTR(tsz);
+	tsz = jz4780_dma_transfer_size(dest | src | len,
+				       &jzchan->transfer_shift);
 
 	desc->desc[0].dsa = src;
 	desc->desc[0].dta = dest;
@@ -407,7 +418,7 @@
 			    tsz << JZ_DMA_DCM_TSZ_SHIFT |
 			    JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
 			    JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
-	desc->desc[0].dtc = len >> ord;
+	desc->desc[0].dtc = len >> jzchan->transfer_shift;
 
 	return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
 }
@@ -484,8 +495,9 @@
 	spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 }
 
-static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
+static int jz4780_dma_terminate_all(struct dma_chan *chan)
 {
+	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 	unsigned long flags;
 	LIST_HEAD(head);
@@ -507,9 +519,11 @@
 	return 0;
 }
 
-static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
-	const struct dma_slave_config *config)
+static int jz4780_dma_config(struct dma_chan *chan,
+	struct dma_slave_config *config)
 {
+	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
 	if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 	   || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
 		return -EINVAL;
@@ -567,8 +581,8 @@
 		txstate->residue = 0;
 
 	if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
-		&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
-			status = DMA_ERROR;
+	    && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+		status = DMA_ERROR;
 
 	spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 	return status;
@@ -671,7 +685,10 @@
 {
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
-	struct jz4780_dma_data *data = param;
+	struct jz4780_dma_filter_data *data = param;
+
+	if (jzdma->dma_device.dev->of_node != data->of_node)
+		return false;
 
 	if (data->channel > -1) {
 		if (data->channel != jzchan->id)
@@ -690,11 +707,12 @@
 {
 	struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
 	dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
-	struct jz4780_dma_data data;
+	struct jz4780_dma_filter_data data;
 
 	if (dma_spec->args_count != 2)
 		return NULL;
 
+	data.of_node = ofdma->of_node;
 	data.transfer_type = dma_spec->args[0];
 	data.channel = dma_spec->args[1];
 
@@ -713,9 +731,14 @@
 				data.channel);
 			return NULL;
 		}
-	}
 
-	return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+		jzdma->chan[data.channel].transfer_type = data.transfer_type;
+
+		return dma_get_slave_channel(
+			&jzdma->chan[data.channel].vchan.chan);
+	} else {
+		return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+	}
 }
 
 static int jz4780_dma_probe(struct platform_device *pdev)
@@ -743,23 +766,26 @@
 	if (IS_ERR(jzdma->base))
 		return PTR_ERR(jzdma->base);
 
-	jzdma->irq = platform_get_irq(pdev, 0);
-	if (jzdma->irq < 0) {
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
 		dev_err(dev, "failed to get IRQ: %d\n", ret);
-		return jzdma->irq;
+		return ret;
 	}
 
-	ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
-			       dev_name(dev), jzdma);
+	jzdma->irq = ret;
+
+	ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
+			  jzdma);
 	if (ret) {
 		dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
-		return -EINVAL;
+		return ret;
 	}
 
 	jzdma->clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(jzdma->clk)) {
 		dev_err(dev, "failed to get clock\n");
-		return PTR_ERR(jzdma->clk);
+		ret = PTR_ERR(jzdma->clk);
+		goto err_free_irq;
 	}
 
 	clk_prepare_enable(jzdma->clk);
@@ -775,13 +801,13 @@
 	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
 
 	dd->dev = dev;
-	dd->copy_align = 2; /* 2^2 = 4 byte alignment */
+	dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
 	dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
 	dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
 	dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
 	dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
 	dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
-	dd->device_config = jz4780_dma_slave_config;
+	dd->device_config = jz4780_dma_config;
 	dd->device_terminate_all = jz4780_dma_terminate_all;
 	dd->device_tx_status = jz4780_dma_tx_status;
 	dd->device_issue_pending = jz4780_dma_issue_pending;
@@ -790,7 +816,6 @@
 	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 
-
 	/*
 	 * Enable DMA controller, mark all channels as not programmable.
 	 * Also set the FMSC bit - it increases MSC performance, so it makes
@@ -832,15 +857,24 @@
 
 err_disable_clk:
 	clk_disable_unprepare(jzdma->clk);
+
+err_free_irq:
+	free_irq(jzdma->irq, jzdma);
 	return ret;
 }
 
 static int jz4780_dma_remove(struct platform_device *pdev)
 {
 	struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
+	int i;
 
 	of_dma_controller_free(pdev->dev.of_node);
-	devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
+
+	free_irq(jzdma->irq, jzdma);
+
+	for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+		tasklet_kill(&jzdma->chan[i].vchan.task);
+
 	dma_async_device_unregister(&jzdma->dma_device);
 	return 0;
 }
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 36e02f0..e00c9b0 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -6,6 +6,9 @@
 	tristate
 	select DMA_ENGINE
 
+config DW_DMAC_BIG_ENDIAN_IO
+	bool
+
 config DW_DMAC
 	tristate "Synopsys DesignWare AHB DMA platform driver"
 	select DW_DMAC_CORE
@@ -23,6 +26,3 @@
 	  Support the Synopsys DesignWare AHB DMA controller on the
 	  platfroms that enumerate it as a PCI device. For example,
 	  Intel Medfield has integrated this GPDMA controller.
-
-config DW_DMAC_BIG_ENDIAN_IO
-	bool
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 88853af..3e5d4f1 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1000,7 +1000,7 @@
 	 * code using dma memcpy must make sure alignment of
 	 * length is at dma->copy_align boundary.
 	 */
-	dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
 
 	INIT_LIST_HEAD(&dma->channels);
 }
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index f42f71e..7669c7d 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -99,21 +99,13 @@
 
 static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
 	hsu_chan_disable(hsuc);
 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 }
 
 static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
 	hsu_dma_chan_start(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 }
 
 static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
@@ -139,9 +131,9 @@
 	unsigned long flags;
 	u32 sr;
 
-	spin_lock_irqsave(&hsuc->lock, flags);
+	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
+	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
 	return sr;
 }
@@ -273,14 +265,11 @@
 	struct hsu_dma_desc *desc = hsuc->desc;
 	size_t bytes = hsu_dma_desc_size(desc);
 	int i;
-	unsigned long flags;
 
-	spin_lock_irqsave(&hsuc->lock, flags);
 	i = desc->active % HSU_DMA_CHAN_NR_DESC;
 	do {
 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
 	} while (--i >= 0);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 
 	return bytes;
 }
@@ -327,24 +316,6 @@
 	return 0;
 }
 
-static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
-	hsu_chan_disable(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
-static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
-	hsu_chan_enable(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
 static int hsu_dma_pause(struct dma_chan *chan)
 {
 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
@@ -352,7 +323,7 @@
 
 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
-		hsu_dma_chan_deactivate(hsuc);
+		hsu_chan_disable(hsuc);
 		hsuc->desc->status = DMA_PAUSED;
 	}
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
@@ -368,7 +339,7 @@
 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
 		hsuc->desc->status = DMA_IN_PROGRESS;
-		hsu_dma_chan_activate(hsuc);
+		hsu_chan_enable(hsuc);
 	}
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
@@ -441,8 +412,6 @@
 
 		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
-
-		spin_lock_init(&hsuc->lock);
 	}
 
 	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 0275233..eeb9fff 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -78,7 +78,6 @@
 	struct virt_dma_chan vchan;
 
 	void __iomem *reg;
-	spinlock_t lock;
 
 	/* hardware configuration */
 	enum dma_transfer_direction direction;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 865501fc..48d85f8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1083,8 +1083,12 @@
 	if (IS_ERR(imxdma->dma_ahb))
 		return PTR_ERR(imxdma->dma_ahb);
 
-	clk_prepare_enable(imxdma->dma_ipg);
-	clk_prepare_enable(imxdma->dma_ahb);
+	ret = clk_prepare_enable(imxdma->dma_ipg);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(imxdma->dma_ahb);
+	if (ret)
+		goto disable_dma_ipg_clk;
 
 	/* reset DMA module */
 	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -1094,20 +1098,20 @@
 				       dma_irq_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 
 		irq_err = platform_get_irq(pdev, 1);
 		if (irq_err < 0) {
 			ret = irq_err;
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 
 		ret = devm_request_irq(&pdev->dev, irq_err,
 				       imxdma_err_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 	}
 
@@ -1144,7 +1148,7 @@
 				dev_warn(imxdma->dev, "Can't register IRQ %d "
 					 "for DMA channel %d\n",
 					 irq + i, i);
-				goto err;
+				goto disable_dma_ahb_clk;
 			}
 			init_timer(&imxdmac->watchdog);
 			imxdmac->watchdog.function = &imxdma_watchdog;
@@ -1183,14 +1187,14 @@
 
 	platform_set_drvdata(pdev, imxdma);
 
-	imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
+	imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
 	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
 	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
 
 	ret = dma_async_device_register(&imxdma->dma_device);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to register\n");
-		goto err;
+		goto disable_dma_ahb_clk;
 	}
 
 	if (pdev->dev.of_node) {
@@ -1206,9 +1210,10 @@
 
 err_of_dma_controller:
 	dma_async_device_unregister(&imxdma->dma_device);
-err:
-	clk_disable_unprepare(imxdma->dma_ipg);
+disable_dma_ahb_clk:
 	clk_disable_unprepare(imxdma->dma_ahb);
+disable_dma_ipg_clk:
+	clk_disable_unprepare(imxdma->dma_ipg);
 	return ret;
 }
 
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 77b6aab..9d375bc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -35,12 +35,16 @@
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 
 #include <asm/irq.h>
 #include <linux/platform_data/dma-imx-sdma.h>
 #include <linux/platform_data/dma-imx.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 
 #include "dmaengine.h"
 
@@ -124,6 +128,56 @@
 #define CHANGE_ENDIANNESS   0x80
 
 /*
+ *  p_2_p watermark_level description
+ *	Bits		Name			Description
+ *	0-7		Lower WML		Lower watermark level
+ *	8		PS			1: Pad Swallowing
+ *						0: No Pad Swallowing
+ *	9		PA			1: Pad Adding
+ *						0: No Pad Adding
+ *	10		SPDIF			If this bit is set both source
+ *						and destination are on SPBA
+ *	11		Source Bit(SP)		1: Source on SPBA
+ *						0: Source on AIPS
+ *	12		Destination Bit(DP)	1: Destination on SPBA
+ *						0: Destination on AIPS
+ *	13-15		---------		MUST BE 0
+ *	16-23		Higher WML		HWML
+ *	24-27		N			Total number of samples after
+ *						which Pad adding/Swallowing
+ *						must be done. It must be odd.
+ *	28		Lower WML Event(LWE)	SDMA events reg to check for
+ *						LWML event mask
+ *						0: LWE in EVENTS register
+ *						1: LWE in EVENTS2 register
+ *	29		Higher WML Event(HWE)	SDMA events reg to check for
+ *						HWML event mask
+ *						0: HWE in EVENTS register
+ *						1: HWE in EVENTS2 register
+ *	30		---------		MUST BE 0
+ *	31		CONT			1: Amount of samples to be
+ *						transferred is unknown and
+ *						script will keep on
+ *						transferring samples as long as
+ *						both events are detected and
+ *						script must be manually stopped
+ *						by the application
+ *						0: The amount of samples to be
+ *						transferred is equal to the
+ *						count field of mode word
+ */
+#define SDMA_WATERMARK_LEVEL_LWML	0xFF
+#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
+#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
+#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
+#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
+#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
+#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
+#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
+#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
+#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
+
+/*
  * Mode/Count of data node descriptors - IPCv2
  */
 struct sdma_mode_count {
@@ -259,8 +313,9 @@
 	struct sdma_buffer_descriptor	*bd;
 	dma_addr_t			bd_phys;
 	unsigned int			pc_from_device, pc_to_device;
+	unsigned int			device_to_device;
 	unsigned long			flags;
-	dma_addr_t			per_address;
+	dma_addr_t			per_address, per_address2;
 	unsigned long			event_mask[2];
 	unsigned long			watermark_level;
 	u32				shp_addr, per_addr;
@@ -328,6 +383,8 @@
 	u32				script_number;
 	struct sdma_script_start_addrs	*script_addrs;
 	const struct sdma_driver_data	*drvdata;
+	u32				spba_start_addr;
+	u32				spba_end_addr;
 };
 
 static struct sdma_driver_data sdma_imx31 = {
@@ -705,6 +762,7 @@
 
 	sdmac->pc_from_device = 0;
 	sdmac->pc_to_device = 0;
+	sdmac->device_to_device = 0;
 
 	switch (peripheral_type) {
 	case IMX_DMATYPE_MEMORY:
@@ -780,6 +838,7 @@
 
 	sdmac->pc_from_device = per_2_emi;
 	sdmac->pc_to_device = emi_2_per;
+	sdmac->device_to_device = per_2_per;
 }
 
 static int sdma_load_context(struct sdma_channel *sdmac)
@@ -792,11 +851,12 @@
 	int ret;
 	unsigned long flags;
 
-	if (sdmac->direction == DMA_DEV_TO_MEM) {
+	if (sdmac->direction == DMA_DEV_TO_MEM)
 		load_address = sdmac->pc_from_device;
-	} else {
+	else if (sdmac->direction == DMA_DEV_TO_DEV)
+		load_address = sdmac->device_to_device;
+	else
 		load_address = sdmac->pc_to_device;
-	}
 
 	if (load_address < 0)
 		return load_address;
@@ -851,6 +911,46 @@
 	return 0;
 }
 
+static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
+{
+	struct sdma_engine *sdma = sdmac->sdma;
+
+	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
+	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
+
+	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
+	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
+
+	if (sdmac->event_id0 > 31)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
+
+	if (sdmac->event_id1 > 31)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
+
+	/*
+	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
+	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
+	 * r0(event_mask[1]) and r1(event_mask[0]).
+	 */
+	if (lwml > hwml) {
+		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
+						SDMA_WATERMARK_LEVEL_HWML);
+		sdmac->watermark_level |= hwml;
+		sdmac->watermark_level |= lwml << 16;
+		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
+	}
+
+	if (sdmac->per_address2 >= sdma->spba_start_addr &&
+			sdmac->per_address2 <= sdma->spba_end_addr)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
+
+	if (sdmac->per_address >= sdma->spba_start_addr &&
+			sdmac->per_address <= sdma->spba_end_addr)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
+
+	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
+}
+
 static int sdma_config_channel(struct dma_chan *chan)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -869,6 +969,12 @@
 		sdma_event_enable(sdmac, sdmac->event_id0);
 	}
 
+	if (sdmac->event_id1) {
+		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
+			return -EINVAL;
+		sdma_event_enable(sdmac, sdmac->event_id1);
+	}
+
 	switch (sdmac->peripheral_type) {
 	case IMX_DMATYPE_DSP:
 		sdma_config_ownership(sdmac, false, true, true);
@@ -887,19 +993,17 @@
 			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
 		/* Handle multiple event channels differently */
 		if (sdmac->event_id1) {
-			sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
-			if (sdmac->event_id1 > 31)
-				__set_bit(31, &sdmac->watermark_level);
-			sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
-			if (sdmac->event_id0 > 31)
-				__set_bit(30, &sdmac->watermark_level);
-		} else {
+			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
+			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
+				sdma_set_watermarklevel_for_p2p(sdmac);
+		} else
 			__set_bit(sdmac->event_id0, sdmac->event_mask);
-		}
+
 		/* Watermark Level */
 		sdmac->watermark_level |= sdmac->watermark_level;
 		/* Address */
 		sdmac->shp_addr = sdmac->per_address;
+		sdmac->per_addr = sdmac->per_address2;
 	} else {
 		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
 	}
@@ -987,17 +1091,22 @@
 
 	sdmac->peripheral_type = data->peripheral_type;
 	sdmac->event_id0 = data->dma_request;
+	sdmac->event_id1 = data->dma_request2;
 
-	clk_enable(sdmac->sdma->clk_ipg);
-	clk_enable(sdmac->sdma->clk_ahb);
+	ret = clk_enable(sdmac->sdma->clk_ipg);
+	if (ret)
+		return ret;
+	ret = clk_enable(sdmac->sdma->clk_ahb);
+	if (ret)
+		goto disable_clk_ipg;
 
 	ret = sdma_request_channel(sdmac);
 	if (ret)
-		return ret;
+		goto disable_clk_ahb;
 
 	ret = sdma_set_channel_priority(sdmac, prio);
 	if (ret)
-		return ret;
+		goto disable_clk_ahb;
 
 	dma_async_tx_descriptor_init(&sdmac->desc, chan);
 	sdmac->desc.tx_submit = sdma_tx_submit;
@@ -1005,6 +1114,12 @@
 	sdmac->desc.flags = DMA_CTRL_ACK;
 
 	return 0;
+
+disable_clk_ahb:
+	clk_disable(sdmac->sdma->clk_ahb);
+disable_clk_ipg:
+	clk_disable(sdmac->sdma->clk_ipg);
+	return ret;
 }
 
 static void sdma_free_chan_resources(struct dma_chan *chan)
@@ -1221,6 +1336,14 @@
 		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
 			dmaengine_cfg->src_addr_width;
 		sdmac->word_size = dmaengine_cfg->src_addr_width;
+	} else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
+		sdmac->per_address2 = dmaengine_cfg->src_addr;
+		sdmac->per_address = dmaengine_cfg->dst_addr;
+		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
+			SDMA_WATERMARK_LEVEL_LWML;
+		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
+			SDMA_WATERMARK_LEVEL_HWML;
+		sdmac->word_size = dmaengine_cfg->dst_addr_width;
 	} else {
 		sdmac->per_address = dmaengine_cfg->dst_addr;
 		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -1337,6 +1460,72 @@
 	release_firmware(fw);
 }
 
+#define EVENT_REMAP_CELLS 3
+
+static int __init sdma_event_remap(struct sdma_engine *sdma)
+{
+	struct device_node *np = sdma->dev->of_node;
+	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
+	struct property *event_remap;
+	struct regmap *gpr;
+	char propname[] = "fsl,sdma-event-remap";
+	u32 reg, val, shift, num_map, i;
+	int ret = 0;
+
+	if (IS_ERR(np) || IS_ERR(gpr_np))
+		goto out;
+
+	event_remap = of_find_property(np, propname, NULL);
+	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
+	if (!num_map) {
+		dev_warn(sdma->dev, "no event needs to be remapped\n");
+		goto out;
+	} else if (num_map % EVENT_REMAP_CELLS) {
+		dev_err(sdma->dev, "the property %s must modulo %d\n",
+				propname, EVENT_REMAP_CELLS);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	gpr = syscon_node_to_regmap(gpr_np);
+	if (IS_ERR(gpr)) {
+		dev_err(sdma->dev, "failed to get gpr regmap\n");
+		ret = PTR_ERR(gpr);
+		goto out;
+	}
+
+	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
+		ret = of_property_read_u32_index(np, propname, i, &reg);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i);
+			goto out;
+		}
+
+		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i + 1);
+			goto out;
+		}
+
+		ret = of_property_read_u32_index(np, propname, i + 2, &val);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i + 2);
+			goto out;
+		}
+
+		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
+	}
+
+out:
+	if (!IS_ERR(gpr_np))
+		of_node_put(gpr_np);
+
+	return ret;
+}
+
 static int sdma_get_firmware(struct sdma_engine *sdma,
 		const char *fw_name)
 {
@@ -1354,8 +1543,12 @@
 	int i, ret;
 	dma_addr_t ccb_phys;
 
-	clk_enable(sdma->clk_ipg);
-	clk_enable(sdma->clk_ahb);
+	ret = clk_enable(sdma->clk_ipg);
+	if (ret)
+		return ret;
+	ret = clk_enable(sdma->clk_ahb);
+	if (ret)
+		goto disable_clk_ipg;
 
 	/* Be sure SDMA has not started yet */
 	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1411,8 +1604,9 @@
 	return 0;
 
 err_dma_alloc:
-	clk_disable(sdma->clk_ipg);
 	clk_disable(sdma->clk_ahb);
+disable_clk_ipg:
+	clk_disable(sdma->clk_ipg);
 	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
 	return ret;
 }
@@ -1444,6 +1638,14 @@
 	data.dma_request = dma_spec->args[0];
 	data.peripheral_type = dma_spec->args[1];
 	data.priority = dma_spec->args[2];
+	/*
+	 * init dma_request2 to zero, which is not used by the dts.
+	 * For P2P, dma_request2 is init from dma_request_channel(),
+	 * chan->private will point to the imx_dma_data, and in
+	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
+	 * be set to sdmac->event_id1.
+	 */
+	data.dma_request2 = 0;
 
 	return dma_request_channel(mask, sdma_filter_fn, &data);
 }
@@ -1453,10 +1655,12 @@
 	const struct of_device_id *of_id =
 			of_match_device(sdma_dt_ids, &pdev->dev);
 	struct device_node *np = pdev->dev.of_node;
+	struct device_node *spba_bus;
 	const char *fw_name;
 	int ret;
 	int irq;
 	struct resource *iores;
+	struct resource spba_res;
 	struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	int i;
 	struct sdma_engine *sdma;
@@ -1551,6 +1755,10 @@
 	if (ret)
 		goto err_init;
 
+	ret = sdma_event_remap(sdma);
+	if (ret)
+		goto err_init;
+
 	if (sdma->drvdata->script_addrs)
 		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
 	if (pdata && pdata->script_addrs)
@@ -1608,6 +1816,14 @@
 			dev_err(&pdev->dev, "failed to register controller\n");
 			goto err_register;
 		}
+
+		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
+		ret = of_address_to_resource(spba_bus, 0, &spba_res);
+		if (!ret) {
+			sdma->spba_start_addr = spba_res.start;
+			sdma->spba_end_addr = spba_res.end;
+		}
+		of_node_put(spba_bus);
 	}
 
 	dev_info(sdma->dev, "initialized\n");
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
index 0ff7270..cf5fedb 100644
--- a/drivers/dma/ioat/Makefile
+++ b/drivers/dma/ioat/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o
+ioatdma-y := init.o dma.o prep.o dca.o sysfs.o
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index ea1e107..2cb7c30 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -31,7 +31,6 @@
 
 #include "dma.h"
 #include "registers.h"
-#include "dma_v2.h"
 
 /*
  * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -71,14 +70,6 @@
 #define APICID_BIT(x)		(DCA_TAG_MAP_VALID | (x))
 #define IOAT_TAG_MAP_LEN	8
 
-static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
-static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
-
 /* pack PCI B/D/F into a u16 */
 static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
 {
@@ -126,96 +117,6 @@
 	struct ioat_dca_slot 	 req_slots[0];
 };
 
-/* 5000 series chipset DCA Port Requester ID Table Entry Format
- * [15:8]	PCI-Express Bus Number
- * [7:3]	PCI-Express Device Number
- * [2:0]	PCI-Express Function Number
- *
- * 5000 series chipset DCA control register format
- * [7:1]	Reserved (0)
- * [0]		Ignore Function Number
- */
-
-static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 id;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-	id = dcaid_from_pcidev(pdev);
-
-	if (ioatdca->requester_count == ioatdca->max_requesters)
-		return -ENODEV;
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == NULL) {
-			/* found an empty slot */
-			ioatdca->requester_count++;
-			ioatdca->req_slots[i].pdev = pdev;
-			ioatdca->req_slots[i].rid = id;
-			writew(id, ioatdca->dca_base + (i * 4));
-			/* make sure the ignore function bit is off */
-			writeb(0, ioatdca->dca_base + (i * 4) + 2);
-			return i;
-		}
-	}
-	/* Error, ioatdma->requester_count is out of whack */
-	return -EFAULT;
-}
-
-static int ioat_dca_remove_requester(struct dca_provider *dca,
-				     struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == pdev) {
-			writew(0, ioatdca->dca_base + (i * 4));
-			ioatdca->req_slots[i].pdev = NULL;
-			ioatdca->req_slots[i].rid = 0;
-			ioatdca->requester_count--;
-			return i;
-		}
-	}
-	return -ENODEV;
-}
-
-static u8 ioat_dca_get_tag(struct dca_provider *dca,
-			   struct device *dev,
-			   int cpu)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	int i, apic_id, bit, value;
-	u8 entry, tag;
-
-	tag = 0;
-	apic_id = cpu_physical_id(cpu);
-
-	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
-		entry = ioatdca->tag_map[i];
-		if (entry & DCA_TAG_MAP_VALID) {
-			bit = entry & ~DCA_TAG_MAP_VALID;
-			value = (apic_id & (1 << bit)) ? 1 : 0;
-		} else {
-			value = entry ? 1 : 0;
-		}
-		tag |= (value << i);
-	}
-	return tag;
-}
-
 static int ioat_dca_dev_managed(struct dca_provider *dca,
 				struct device *dev)
 {
@@ -231,260 +132,7 @@
 	return 0;
 }
 
-static struct dca_ops ioat_dca_ops = {
-	.add_requester		= ioat_dca_add_requester,
-	.remove_requester	= ioat_dca_remove_requester,
-	.get_tag		= ioat_dca_get_tag,
-	.dev_managed		= ioat_dca_dev_managed,
-};
-
-
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct dca_provider *dca;
-	struct ioat_dca_priv *ioatdca;
-	u8 *tag_map = NULL;
-	int i;
-	int err;
-	u8 version;
-	u8 max_requesters;
-
-	if (!system_has_dca_enabled(pdev))
-		return NULL;
-
-	/* I/OAT v1 systems must have a known tag_map to support DCA */
-	switch (pdev->vendor) {
-	case PCI_VENDOR_ID_INTEL:
-		switch (pdev->device) {
-		case PCI_DEVICE_ID_INTEL_IOAT:
-			tag_map = ioat_tag_map_BNB;
-			break;
-		case PCI_DEVICE_ID_INTEL_IOAT_CNB:
-			tag_map = ioat_tag_map_CNB;
-			break;
-		case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
-			tag_map = ioat_tag_map_SCNB;
-			break;
-		}
-		break;
-	case PCI_VENDOR_ID_UNISYS:
-		switch (pdev->device) {
-		case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
-			tag_map = ioat_tag_map_UNISYS;
-			break;
-		}
-		break;
-	}
-	if (tag_map == NULL)
-		return NULL;
-
-	version = readb(iobase + IOAT_VER_OFFSET);
-	if (version == IOAT_VER_3_0)
-		max_requesters = IOAT3_DCA_MAX_REQ;
-	else
-		max_requesters = IOAT_DCA_MAX_REQ;
-
-	dca = alloc_dca_provider(&ioat_dca_ops,
-			sizeof(*ioatdca) +
-			(sizeof(struct ioat_dca_slot) * max_requesters));
-	if (!dca)
-		return NULL;
-
-	ioatdca = dca_priv(dca);
-	ioatdca->max_requesters = max_requesters;
-	ioatdca->dca_base = iobase + 0x54;
-
-	/* copy over the APIC ID to DCA tag mapping */
-	for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
-		ioatdca->tag_map[i] = tag_map[i];
-
-	err = register_dca_provider(dca, &pdev->dev);
-	if (err) {
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	return dca;
-}
-
-
-static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 id;
-	u16 global_req_table;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-	id = dcaid_from_pcidev(pdev);
-
-	if (ioatdca->requester_count == ioatdca->max_requesters)
-		return -ENODEV;
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == NULL) {
-			/* found an empty slot */
-			ioatdca->requester_count++;
-			ioatdca->req_slots[i].pdev = pdev;
-			ioatdca->req_slots[i].rid = id;
-			global_req_table =
-			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-			writel(id | IOAT_DCA_GREQID_VALID,
-			       ioatdca->iobase + global_req_table + (i * 4));
-			return i;
-		}
-	}
-	/* Error, ioatdma->requester_count is out of whack */
-	return -EFAULT;
-}
-
-static int ioat2_dca_remove_requester(struct dca_provider *dca,
-				      struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 global_req_table;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == pdev) {
-			global_req_table =
-			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-			writel(0, ioatdca->iobase + global_req_table + (i * 4));
-			ioatdca->req_slots[i].pdev = NULL;
-			ioatdca->req_slots[i].rid = 0;
-			ioatdca->requester_count--;
-			return i;
-		}
-	}
-	return -ENODEV;
-}
-
-static u8 ioat2_dca_get_tag(struct dca_provider *dca,
-			    struct device *dev,
-			    int cpu)
-{
-	u8 tag;
-
-	tag = ioat_dca_get_tag(dca, dev, cpu);
-	tag = (~tag) & 0x1F;
-	return tag;
-}
-
-static struct dca_ops ioat2_dca_ops = {
-	.add_requester		= ioat2_dca_add_requester,
-	.remove_requester	= ioat2_dca_remove_requester,
-	.get_tag		= ioat2_dca_get_tag,
-	.dev_managed		= ioat_dca_dev_managed,
-};
-
-static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
-{
-	int slots = 0;
-	u32 req;
-	u16 global_req_table;
-
-	global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
-	if (global_req_table == 0)
-		return 0;
-	do {
-		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
-		slots++;
-	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
-
-	return slots;
-}
-
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct dca_provider *dca;
-	struct ioat_dca_priv *ioatdca;
-	int slots;
-	int i;
-	int err;
-	u32 tag_map;
-	u16 dca_offset;
-	u16 csi_fsb_control;
-	u16 pcie_control;
-	u8 bit;
-
-	if (!system_has_dca_enabled(pdev))
-		return NULL;
-
-	dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
-	if (dca_offset == 0)
-		return NULL;
-
-	slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
-	if (slots == 0)
-		return NULL;
-
-	dca = alloc_dca_provider(&ioat2_dca_ops,
-				 sizeof(*ioatdca)
-				      + (sizeof(struct ioat_dca_slot) * slots));
-	if (!dca)
-		return NULL;
-
-	ioatdca = dca_priv(dca);
-	ioatdca->iobase = iobase;
-	ioatdca->dca_base = iobase + dca_offset;
-	ioatdca->max_requesters = slots;
-
-	/* some bios might not know to turn these on */
-	csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-	if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
-		csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
-		writew(csi_fsb_control,
-		       ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-	}
-	pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-	if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
-		pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
-		writew(pcie_control,
-		       ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-	}
-
-
-	/* TODO version, compatibility and configuration checks */
-
-	/* copy out the APIC to DCA tag map */
-	tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
-	for (i = 0; i < 5; i++) {
-		bit = (tag_map >> (4 * i)) & 0x0f;
-		if (bit < 8)
-			ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
-		else
-			ioatdca->tag_map[i] = 0;
-	}
-
-	if (!dca2_tag_map_valid(ioatdca->tag_map)) {
-		WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
-				"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
-				dev_driver_string(&pdev->dev),
-				dev_name(&pdev->dev));
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	err = register_dca_provider(dca, &pdev->dev);
-	if (err) {
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	return dca;
-}
-
-static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
+static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
 {
 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
 	struct pci_dev *pdev;
@@ -518,7 +166,7 @@
 	return -EFAULT;
 }
 
-static int ioat3_dca_remove_requester(struct dca_provider *dca,
+static int ioat_dca_remove_requester(struct dca_provider *dca,
 				      struct device *dev)
 {
 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
@@ -545,7 +193,7 @@
 	return -ENODEV;
 }
 
-static u8 ioat3_dca_get_tag(struct dca_provider *dca,
+static u8 ioat_dca_get_tag(struct dca_provider *dca,
 			    struct device *dev,
 			    int cpu)
 {
@@ -576,14 +224,14 @@
 	return tag;
 }
 
-static struct dca_ops ioat3_dca_ops = {
-	.add_requester		= ioat3_dca_add_requester,
-	.remove_requester	= ioat3_dca_remove_requester,
-	.get_tag		= ioat3_dca_get_tag,
+static struct dca_ops ioat_dca_ops = {
+	.add_requester		= ioat_dca_add_requester,
+	.remove_requester	= ioat_dca_remove_requester,
+	.get_tag		= ioat_dca_get_tag,
 	.dev_managed		= ioat_dca_dev_managed,
 };
 
-static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
+static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset)
 {
 	int slots = 0;
 	u32 req;
@@ -618,7 +266,7 @@
 		(tag_map[4] == DCA_TAG_MAP_VALID));
 }
 
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 {
 	struct dca_provider *dca;
 	struct ioat_dca_priv *ioatdca;
@@ -645,11 +293,11 @@
 	if (dca_offset == 0)
 		return NULL;
 
-	slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
+	slots = ioat_dca_count_dca_slots(iobase, dca_offset);
 	if (slots == 0)
 		return NULL;
 
-	dca = alloc_dca_provider(&ioat3_dca_ops,
+	dca = alloc_dca_provider(&ioat_dca_ops,
 				 sizeof(*ioatdca)
 				      + (sizeof(struct ioat_dca_slot) * slots));
 	if (!dca)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index ee0aa9f..f66b7e6 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1,6 +1,6 @@
 /*
  * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
+ * Copyright(c) 2004 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -31,31 +31,23 @@
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
 #include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
 #include "dma.h"
 #include "registers.h"
 #include "hw.h"
 
 #include "../dmaengine.h"
 
-int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
-		 "high-water mark for pushing ioat descriptors (default: 4)");
-
-/* internal functions */
-static void ioat1_cleanup(struct ioat_dma_chan *ioat);
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
+static void ioat_eh(struct ioatdma_chan *ioat_chan);
 
 /**
  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
  * @irq: interrupt id
  * @data: interrupt data
  */
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
 {
 	struct ioatdma_device *instance = data;
-	struct ioat_chan_common *chan;
+	struct ioatdma_chan *ioat_chan;
 	unsigned long attnstatus;
 	int bit;
 	u8 intrctrl;
@@ -72,9 +64,9 @@
 
 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
-		chan = ioat_chan_by_index(instance, bit);
-		if (test_bit(IOAT_RUN, &chan->state))
-			tasklet_schedule(&chan->cleanup_task);
+		ioat_chan = ioat_chan_by_index(instance, bit);
+		if (test_bit(IOAT_RUN, &ioat_chan->state))
+			tasklet_schedule(&ioat_chan->cleanup_task);
 	}
 
 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -86,294 +78,32 @@
  * @irq: interrupt id
  * @data: interrupt data
  */
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
 {
-	struct ioat_chan_common *chan = data;
+	struct ioatdma_chan *ioat_chan = data;
 
-	if (test_bit(IOAT_RUN, &chan->state))
-		tasklet_schedule(&chan->cleanup_task);
+	if (test_bit(IOAT_RUN, &ioat_chan->state))
+		tasklet_schedule(&ioat_chan->cleanup_task);
 
 	return IRQ_HANDLED;
 }
 
-/* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
+void ioat_stop(struct ioatdma_chan *ioat_chan)
 {
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c = &chan->common;
-	unsigned long data = (unsigned long) c;
-
-	chan->device = device;
-	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
-	spin_lock_init(&chan->cleanup_lock);
-	chan->common.device = dma;
-	dma_cookie_init(&chan->common);
-	list_add_tail(&chan->common.device_node, &dma->channels);
-	device->idx[idx] = chan;
-	init_timer(&chan->timer);
-	chan->timer.function = device->timer_fn;
-	chan->timer.data = data;
-	tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
-}
-
-/**
- * ioat1_dma_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-static int ioat1_enumerate_channels(struct ioatdma_device *device)
-{
-	u8 xfercap_scale;
-	u32 xfercap;
-	int i;
-	struct ioat_dma_chan *ioat;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-
-	INIT_LIST_HEAD(&dma->channels);
-	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-	dma->chancnt &= 0x1f; /* bits [4:0] valid */
-	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
-		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
-			 dma->chancnt, ARRAY_SIZE(device->idx));
-		dma->chancnt = ARRAY_SIZE(device->idx);
-	}
-	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap_scale &= 0x1f; /* bits [4:0] valid */
-	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
-	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
-
-#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
-	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
-		dma->chancnt--;
-#endif
-	for (i = 0; i < dma->chancnt; i++) {
-		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
-		if (!ioat)
-			break;
-
-		ioat_init_channel(device, &ioat->base, i);
-		ioat->xfercap = xfercap;
-		spin_lock_init(&ioat->desc_lock);
-		INIT_LIST_HEAD(&ioat->free_desc);
-		INIT_LIST_HEAD(&ioat->used_desc);
-	}
-	dma->chancnt = i;
-	return i;
-}
-
-/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- *                                 descriptors to hw
- * @chan: DMA channel handle
- */
-static inline void
-__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
-{
-	void __iomem *reg_base = ioat->base.reg_base;
-
-	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
-		__func__, ioat->pending);
-	ioat->pending = 0;
-	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
-}
-
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
-
-	if (ioat->pending > 0) {
-		spin_lock_bh(&ioat->desc_lock);
-		__ioat1_dma_memcpy_issue_pending(ioat);
-		spin_unlock_bh(&ioat->desc_lock);
-	}
-}
-
-/**
- * ioat1_reset_channel - restart a channel
- * @ioat: IOAT DMA channel handle
- */
-static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	void __iomem *reg_base = chan->reg_base;
-	u32 chansts, chanerr;
-
-	dev_warn(to_dev(chan), "reset\n");
-	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
-	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
-	if (chanerr) {
-		dev_err(to_dev(chan),
-			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
-			chan_num(chan), chansts, chanerr);
-		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/*
-	 * whack it upside the head with a reset
-	 * and wait for things to settle out.
-	 * force the pending count to a really big negative
-	 * to make sure no one forces an issue_pending
-	 * while we're waiting.
-	 */
-
-	ioat->pending = INT_MIN;
-	writeb(IOAT_CHANCMD_RESET,
-	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
-	set_bit(IOAT_RESET_PENDING, &chan->state);
-	mod_timer(&chan->timer, jiffies + RESET_DELAY);
-}
-
-static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_chan *c = tx->chan;
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *first;
-	struct ioat_desc_sw *chain_tail;
-	dma_cookie_t cookie;
-
-	spin_lock_bh(&ioat->desc_lock);
-	/* cookie incr and addition to used_list must be atomic */
-	cookie = dma_cookie_assign(tx);
-	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
-
-	/* write address into NextDescriptor field of last desc in chain */
-	first = to_ioat_desc(desc->tx_list.next);
-	chain_tail = to_ioat_desc(ioat->used_desc.prev);
-	/* make descriptor updates globally visible before chaining */
-	wmb();
-	chain_tail->hw->next = first->txd.phys;
-	list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
-	dump_desc_dbg(ioat, chain_tail);
-	dump_desc_dbg(ioat, first);
-
-	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	ioat->active += desc->hw->tx_cnt;
-	ioat->pending += desc->hw->tx_cnt;
-	if (ioat->pending >= ioat_pending_level)
-		__ioat1_dma_memcpy_issue_pending(ioat);
-	spin_unlock_bh(&ioat->desc_lock);
-
-	return cookie;
-}
-
-/**
- * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat: the channel supplying the memory pool for the descriptors
- * @flags: allocation flags
- */
-static struct ioat_desc_sw *
-ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
-{
-	struct ioat_dma_descriptor *desc;
-	struct ioat_desc_sw *desc_sw;
-	struct ioatdma_device *ioatdma_device;
-	dma_addr_t phys;
-
-	ioatdma_device = ioat->base.device;
-	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
-	if (unlikely(!desc))
-		return NULL;
-
-	desc_sw = kzalloc(sizeof(*desc_sw), flags);
-	if (unlikely(!desc_sw)) {
-		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
-		return NULL;
-	}
-
-	memset(desc, 0, sizeof(*desc));
-
-	INIT_LIST_HEAD(&desc_sw->tx_list);
-	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
-	desc_sw->txd.tx_submit = ioat1_tx_submit;
-	desc_sw->hw = desc;
-	desc_sw->txd.phys = phys;
-	set_desc_id(desc_sw, -1);
-
-	return desc_sw;
-}
-
-static int ioat_initial_desc_count = 256;
-module_param(ioat_initial_desc_count, int, 0644);
-MODULE_PARM_DESC(ioat_initial_desc_count,
-		 "ioat1: initial descriptors per channel (default: 256)");
-/**
- * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
- * @chan: the channel to be filled out
- */
-static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *desc;
-	u32 chanerr;
-	int i;
-	LIST_HEAD(tmp_list);
-
-	/* have we already been set up? */
-	if (!list_empty(&ioat->free_desc))
-		return ioat->desccount;
-
-	/* Setup register to interrupt and write completion status on error */
-	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	if (chanerr) {
-		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
-		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/* Allocate descriptors */
-	for (i = 0; i < ioat_initial_desc_count; i++) {
-		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
-		if (!desc) {
-			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
-			break;
-		}
-		set_desc_id(desc, i);
-		list_add_tail(&desc->node, &tmp_list);
-	}
-	spin_lock_bh(&ioat->desc_lock);
-	ioat->desccount = i;
-	list_splice(&tmp_list, &ioat->free_desc);
-	spin_unlock_bh(&ioat->desc_lock);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	chan->completion = pci_pool_alloc(chan->device->completion_pool,
-					  GFP_KERNEL, &chan->completion_dma);
-	memset(chan->completion, 0, sizeof(*chan->completion));
-	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64) chan->completion_dma) >> 32,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	set_bit(IOAT_RUN, &chan->state);
-	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
-	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
-		__func__, ioat->desccount);
-	return ioat->desccount;
-}
-
-void ioat_stop(struct ioat_chan_common *chan)
-{
-	struct ioatdma_device *device = chan->device;
-	struct pci_dev *pdev = device->pdev;
-	int chan_id = chan_num(chan);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int chan_id = chan_num(ioat_chan);
 	struct msix_entry *msix;
 
 	/* 1/ stop irq from firing tasklets
 	 * 2/ stop the tasklet from re-arming irqs
 	 */
-	clear_bit(IOAT_RUN, &chan->state);
+	clear_bit(IOAT_RUN, &ioat_chan->state);
 
 	/* flush inflight interrupts */
-	switch (device->irq_mode) {
+	switch (ioat_dma->irq_mode) {
 	case IOAT_MSIX:
-		msix = &device->msix_entries[chan_id];
+		msix = &ioat_dma->msix_entries[chan_id];
 		synchronize_irq(msix->vector);
 		break;
 	case IOAT_MSI:
@@ -385,398 +115,67 @@
 	}
 
 	/* flush inflight timers */
-	del_timer_sync(&chan->timer);
+	del_timer_sync(&ioat_chan->timer);
 
 	/* flush inflight tasklet runs */
-	tasklet_kill(&chan->cleanup_task);
+	tasklet_kill(&ioat_chan->cleanup_task);
 
 	/* final cleanup now that everything is quiesced and can't re-arm */
-	device->cleanup_fn((unsigned long) &chan->common);
+	ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
 }
 
-/**
- * ioat1_dma_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-static void ioat1_dma_free_chan_resources(struct dma_chan *c)
+static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *ioatdma_device = chan->device;
-	struct ioat_desc_sw *desc, *_desc;
-	int in_use_descs = 0;
+	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
+	ioat_chan->issued = ioat_chan->head;
+	writew(ioat_chan->dmacount,
+	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+}
 
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (ioat->desccount == 0)
-		return;
+void ioat_issue_pending(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
 
-	ioat_stop(chan);
-
-	/* Delay 100ms after reset to allow internal DMA logic to quiesce
-	 * before removing DMA descriptor resources.
-	 */
-	writeb(IOAT_CHANCMD_RESET,
-	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
-	mdelay(100);
-
-	spin_lock_bh(&ioat->desc_lock);
-	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
-		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
-			__func__, desc_id(desc));
-		dump_desc_dbg(ioat, desc);
-		in_use_descs++;
-		list_del(&desc->node);
-		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-			      desc->txd.phys);
-		kfree(desc);
+	if (ioat_ring_pending(ioat_chan)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		__ioat_issue_pending(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
 	}
-	list_for_each_entry_safe(desc, _desc,
-				 &ioat->free_desc, node) {
-		list_del(&desc->node);
-		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-			      desc->txd.phys);
-		kfree(desc);
-	}
-	spin_unlock_bh(&ioat->desc_lock);
-
-	pci_pool_free(ioatdma_device->completion_pool,
-		      chan->completion,
-		      chan->completion_dma);
-
-	/* one is ok since we left it on there on purpose */
-	if (in_use_descs > 1)
-		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
-			in_use_descs - 1);
-
-	chan->last_completion = 0;
-	chan->completion_dma = 0;
-	ioat->pending = 0;
-	ioat->desccount = 0;
 }
 
 /**
- * ioat1_dma_get_next_descriptor - return the next available descriptor
- * @ioat: IOAT DMA channel handle
+ * ioat_update_pending - log pending descriptors
+ * @ioat: ioat+ channel
  *
- * Gets the next descriptor from the chain, and must be called with the
- * channel's desc_lock held.  Allocates more descriptors if the channel
- * has run out.
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark.  Called with prep_lock held
  */
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
+static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_desc_sw *new;
-
-	if (!list_empty(&ioat->free_desc)) {
-		new = to_ioat_desc(ioat->free_desc.next);
-		list_del(&new->node);
-	} else {
-		/* try to get another desc */
-		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
-		if (!new) {
-			dev_err(to_dev(&ioat->base), "alloc failed\n");
-			return NULL;
-		}
-	}
-	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
-		__func__, desc_id(new));
-	prefetch(new->hw);
-	return new;
+	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
+		__ioat_issue_pending(ioat_chan);
 }
 
-static struct dma_async_tx_descriptor *
-ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
-		      dma_addr_t dma_src, size_t len, unsigned long flags)
+static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_desc_sw *desc;
-	size_t copy;
-	LIST_HEAD(chain);
-	dma_addr_t src = dma_src;
-	dma_addr_t dest = dma_dest;
-	size_t total_len = len;
-	struct ioat_dma_descriptor *hw = NULL;
-	int tx_cnt = 0;
-
-	spin_lock_bh(&ioat->desc_lock);
-	desc = ioat1_dma_get_next_descriptor(ioat);
-	do {
-		if (!desc)
-			break;
-
-		tx_cnt++;
-		copy = min_t(size_t, len, ioat->xfercap);
-
-		hw = desc->hw;
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dest;
-
-		list_add_tail(&desc->node, &chain);
-
-		len -= copy;
-		dest += copy;
-		src += copy;
-		if (len) {
-			struct ioat_desc_sw *next;
-
-			async_tx_ack(&desc->txd);
-			next = ioat1_dma_get_next_descriptor(ioat);
-			hw->next = next ? next->txd.phys : 0;
-			dump_desc_dbg(ioat, desc);
-			desc = next;
-		} else
-			hw->next = 0;
-	} while (len);
-
-	if (!desc) {
-		struct ioat_chan_common *chan = &ioat->base;
-
-		dev_err(to_dev(chan),
-			"chan%d - get_next_desc failed\n", chan_num(chan));
-		list_splice(&chain, &ioat->free_desc);
-		spin_unlock_bh(&ioat->desc_lock);
-		return NULL;
-	}
-	spin_unlock_bh(&ioat->desc_lock);
-
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	list_splice(&chain, &desc->tx_list);
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.compl_write = 1;
-	hw->tx_cnt = tx_cnt;
-	dump_desc_dbg(ioat, desc);
-
-	return &desc->txd;
-}
-
-static void ioat1_cleanup_event(unsigned long data)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat1_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
-{
-	dma_addr_t phys_complete;
-	u64 completion;
-
-	completion = *chan->completion;
-	phys_complete = ioat_chansts_to_addr(completion);
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
-		(unsigned long long) phys_complete);
-
-	if (is_ioat_halted(completion)) {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
-			chanerr);
-
-		/* TODO do something to salvage the situation */
-	}
-
-	return phys_complete;
-}
-
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   dma_addr_t *phys_complete)
-{
-	*phys_complete = ioat_get_current_completion(chan);
-	if (*phys_complete == chan->last_completion)
-		return false;
-	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	return true;
-}
-
-static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct list_head *_desc, *n;
-	struct dma_async_tx_descriptor *tx;
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
-		 __func__, (unsigned long long) phys_complete);
-	list_for_each_safe(_desc, n, &ioat->used_desc) {
-		struct ioat_desc_sw *desc;
-
-		prefetch(n);
-		desc = list_entry(_desc, typeof(*desc), node);
-		tx = &desc->txd;
-		/*
-		 * Incoming DMA requests may use multiple descriptors,
-		 * due to exceeding xfercap, perhaps. If so, only the
-		 * last one will have a cookie, and require unmapping.
-		 */
-		dump_desc_dbg(ioat, desc);
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			ioat->active -= desc->hw->tx_cnt;
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys != phys_complete) {
-			/*
-			 * a completed entry, but not the last, so clean
-			 * up if the client is done with the descriptor
-			 */
-			if (async_tx_test_ack(tx))
-				list_move_tail(&desc->node, &ioat->free_desc);
-		} else {
-			/*
-			 * last used desc. Do not remove, so we can
-			 * append from it.
-			 */
-
-			/* if nothing else is pending, cancel the
-			 * completion timeout
-			 */
-			if (n == &ioat->used_desc) {
-				dev_dbg(to_dev(chan),
-					"%s cancel completion timeout\n",
-					__func__);
-				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-			}
-
-			/* TODO check status bits? */
-			break;
-		}
-	}
-
-	chan->last_completion = phys_complete;
-}
-
-/**
- * ioat1_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- *
- * To prevent lock contention we defer cleanup when the locks are
- * contended with a terminal timeout that forces cleanup and catches
- * completion notification errors.
- */
-static void ioat1_cleanup(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	prefetch(chan->completion);
-
-	if (!spin_trylock_bh(&chan->cleanup_lock))
-		return;
-
-	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	}
-
-	if (!spin_trylock_bh(&ioat->desc_lock)) {
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	}
-
-	__cleanup(ioat, phys_complete);
-
-	spin_unlock_bh(&ioat->desc_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat1_timer_event(unsigned long data)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
-
-	spin_lock_bh(&chan->cleanup_lock);
-	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
-		struct ioat_desc_sw *desc;
-
-		spin_lock_bh(&ioat->desc_lock);
-
-		/* restart active descriptors */
-		desc = to_ioat_desc(ioat->used_desc.prev);
-		ioat_set_chainaddr(ioat, desc->txd.phys);
-		ioat_start(chan);
-
-		ioat->pending = 0;
-		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		spin_unlock_bh(&ioat->desc_lock);
-	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		dma_addr_t phys_complete;
-
-		spin_lock_bh(&ioat->desc_lock);
-		/* if we haven't made progress and we have already
-		 * acknowledged a pending completion once, then be more
-		 * forceful with a restart
-		 */
-		if (ioat_cleanup_preamble(chan, &phys_complete))
-			__cleanup(ioat, phys_complete);
-		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
-			ioat1_reset_channel(ioat);
-		else {
-			u64 status = ioat_chansts(chan);
-
-			/* manually update the last completion address */
-			if (ioat_chansts_to_addr(status) != 0)
-				*chan->completion = status;
-
-			set_bit(IOAT_COMPLETION_ACK, &chan->state);
-			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		}
-		spin_unlock_bh(&ioat->desc_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-enum dma_status
-ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-		   struct dma_tx_state *txstate)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-	struct ioatdma_device *device = chan->device;
-	enum dma_status ret;
-
-	ret = dma_cookie_status(c, cookie, txstate);
-	if (ret == DMA_COMPLETE)
-		return ret;
-
-	device->cleanup_fn((unsigned long) c);
-
-	return dma_cookie_status(c, cookie, txstate);
-}
-
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *desc;
+	struct ioat_ring_ent *desc;
 	struct ioat_dma_descriptor *hw;
 
-	spin_lock_bh(&ioat->desc_lock);
-
-	desc = ioat1_dma_get_next_descriptor(ioat);
-
-	if (!desc) {
-		dev_err(to_dev(chan),
-			"Unable to start null desc - get next desc failed\n");
-		spin_unlock_bh(&ioat->desc_lock);
+	if (ioat_ring_space(ioat_chan) < 1) {
+		dev_err(to_dev(ioat_chan),
+			"Unable to start null desc - ring full\n");
 		return;
 	}
 
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+
 	hw = desc->hw;
 	hw->ctl = 0;
 	hw->ctl_f.null = 1;
@@ -787,460 +186,804 @@
 	hw->src_addr = 0;
 	hw->dst_addr = 0;
 	async_tx_ack(&desc->txd);
-	hw->next = 0;
-	list_add_tail(&desc->node, &ioat->used_desc);
-	dump_desc_dbg(ioat, desc);
-
-	ioat_set_chainaddr(ioat, desc->txd.phys);
-	ioat_start(chan);
-	spin_unlock_bh(&ioat->desc_lock);
+	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+	dump_desc_dbg(ioat_chan, desc);
+	/* make sure descriptors are written before we submit */
+	wmb();
+	ioat_chan->head += 1;
+	__ioat_issue_pending(ioat_chan);
 }
 
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
-
-static void ioat_dma_test_callback(void *dma_async_param)
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 {
-	struct completion *cmp = dma_async_param;
-
-	complete(cmp);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	__ioat_start_null_desc(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
 }
 
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @device: device to be tested
- */
-int ioat_dma_self_test(struct ioatdma_device *device)
+static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
 {
-	int i;
-	u8 *src;
-	u8 *dest;
-	struct dma_device *dma = &device->common;
-	struct device *dev = &device->pdev->dev;
-	struct dma_chan *dma_chan;
-	struct dma_async_tx_descriptor *tx;
-	dma_addr_t dma_dest, dma_src;
-	dma_cookie_t cookie;
+	/* set the tail to be re-issued */
+	ioat_chan->issued = ioat_chan->tail;
+	ioat_chan->dmacount = 0;
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+
+	if (ioat_ring_pending(ioat_chan)) {
+		struct ioat_ring_ent *desc;
+
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+		__ioat_issue_pending(ioat_chan);
+	} else
+		__ioat_start_null_desc(ioat_chan);
+}
+
+static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
 	int err = 0;
-	struct completion cmp;
-	unsigned long tmo;
-	unsigned long flags;
+	u32 status;
 
-	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!src)
-		return -ENOMEM;
-	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!dest) {
-		kfree(src);
-		return -ENOMEM;
+	status = ioat_chansts(ioat_chan);
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		ioat_suspend(ioat_chan);
+	while (is_ioat_active(status) || is_ioat_idle(status)) {
+		if (tmo && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		status = ioat_chansts(ioat_chan);
+		cpu_relax();
 	}
 
-	/* Fill in src buffer */
-	for (i = 0; i < IOAT_TEST_SIZE; i++)
-		src[i] = (u8)i;
-
-	/* Start copy, using first DMA channel */
-	dma_chan = container_of(dma->channels.next, struct dma_chan,
-				device_node);
-	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
-		dev_err(dev, "selftest cannot allocate chan resource\n");
-		err = -ENODEV;
-		goto out;
-	}
-
-	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-	if (dma_mapping_error(dev, dma_src)) {
-		dev_err(dev, "mapping src buffer failed\n");
-		goto free_resources;
-	}
-	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dev, dma_dest)) {
-		dev_err(dev, "mapping dest buffer failed\n");
-		goto unmap_src;
-	}
-	flags = DMA_PREP_INTERRUPT;
-	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
-						   IOAT_TEST_SIZE, flags);
-	if (!tx) {
-		dev_err(dev, "Self-test prep failed, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test setup failed, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL)
-					!= DMA_COMPLETE) {
-		dev_err(dev, "Self-test copy timed out, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
-		dev_err(dev, "Self-test copy failed compare, disabling\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-unmap_dma:
-	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-unmap_src:
-	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-free_resources:
-	dma->device_free_chan_resources(dma_chan);
-out:
-	kfree(src);
-	kfree(dest);
 	return err;
 }
 
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
-		    sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
-		 "set ioat interrupt style: msix (default), msi, intx");
+static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
+	int err = 0;
+
+	ioat_reset(ioat_chan);
+	while (ioat_reset_pending(ioat_chan)) {
+		if (end && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		cpu_relax();
+	}
+
+	return err;
+}
+
+static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+	__releases(&ioat_chan->prep_lock)
+{
+	struct dma_chan *c = tx->chan;
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	dma_cookie_t cookie;
+
+	cookie = dma_cookie_assign(tx);
+	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
+
+	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	/* make descriptor updates visible before advancing ioat->head,
+	 * this is purposefully not smp_wmb() since we are also
+	 * publishing the descriptor updates to a dma device
+	 */
+	wmb();
+
+	ioat_chan->head += ioat_chan->produce;
+
+	ioat_update_pending(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+
+	return cookie;
+}
+
+static struct ioat_ring_ent *
+ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+{
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	struct ioatdma_device *ioat_dma;
+	dma_addr_t phys;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
+	if (!hw)
+		return NULL;
+	memset(hw, 0, sizeof(*hw));
+
+	desc = kmem_cache_zalloc(ioat_cache, flags);
+	if (!desc) {
+		pci_pool_free(ioat_dma->dma_pool, hw, phys);
+		return NULL;
+	}
+
+	dma_async_tx_descriptor_init(&desc->txd, chan);
+	desc->txd.tx_submit = ioat_tx_submit_unlock;
+	desc->hw = hw;
+	desc->txd.phys = phys;
+	return desc;
+}
+
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+{
+	struct ioatdma_device *ioat_dma;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
+	kmem_cache_free(ioat_cache, desc);
+}
+
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+{
+	struct ioat_ring_ent **ring;
+	int descs = 1 << order;
+	int i;
+
+	if (order > ioat_get_max_alloc_order())
+		return NULL;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(descs, sizeof(*ring), flags);
+	if (!ring)
+		return NULL;
+	for (i = 0; i < descs; i++) {
+		ring[i] = ioat_alloc_ring_ent(c, flags);
+		if (!ring[i]) {
+			while (i--)
+				ioat_free_ring_ent(ring[i], c);
+			kfree(ring);
+			return NULL;
+		}
+		set_desc_id(ring[i], i);
+	}
+
+	/* link descs */
+	for (i = 0; i < descs-1; i++) {
+		struct ioat_ring_ent *next = ring[i+1];
+		struct ioat_dma_descriptor *hw = ring[i]->hw;
+
+		hw->next = next->txd.phys;
+	}
+	ring[i]->hw->next = ring[0]->txd.phys;
+
+	return ring;
+}
+
+static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
+{
+	/* reshape differs from normal ring allocation in that we want
+	 * to allocate a new software ring while only
+	 * extending/truncating the hardware ring
+	 */
+	struct dma_chan *c = &ioat_chan->dma_chan;
+	const u32 curr_size = ioat_ring_size(ioat_chan);
+	const u16 active = ioat_ring_active(ioat_chan);
+	const u32 new_size = 1 << order;
+	struct ioat_ring_ent **ring;
+	u32 i;
+
+	if (order > ioat_get_max_alloc_order())
+		return false;
+
+	/* double check that we have at least 1 free descriptor */
+	if (active == curr_size)
+		return false;
+
+	/* when shrinking, verify that we can hold the current active
+	 * set in the new ring
+	 */
+	if (active >= new_size)
+		return false;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
+	if (!ring)
+		return false;
+
+	/* allocate/trim descriptors as needed */
+	if (new_size > curr_size) {
+		/* copy current descriptors to the new ring */
+		for (i = 0; i < curr_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* add new descriptors to the ring */
+		for (i = curr_size; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
+			if (!ring[new_idx]) {
+				while (i--) {
+					u16 new_idx = (ioat_chan->tail+i) &
+						       (new_size-1);
+
+					ioat_free_ring_ent(ring[new_idx], c);
+				}
+				kfree(ring);
+				return false;
+			}
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* hw link new descriptors */
+		for (i = curr_size-1; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+			struct ioat_ring_ent *next =
+				ring[(new_idx+1) & (new_size-1)];
+			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
+
+			hw->next = next->txd.phys;
+		}
+	} else {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_ring_ent *next;
+
+		/* copy current descriptors to the new ring, dropping the
+		 * removed descriptors
+		 */
+		for (i = 0; i < new_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* free deleted descriptors */
+		for (i = new_size; i < curr_size; i++) {
+			struct ioat_ring_ent *ent;
+
+			ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
+			ioat_free_ring_ent(ent, c);
+		}
+
+		/* fix up hardware ring */
+		hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
+		next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
+		hw->next = next->txd.phys;
+	}
+
+	dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
+		__func__, new_size);
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = ring;
+	ioat_chan->alloc_order = order;
+
+	return true;
+}
 
 /**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @device: ioat device
+ * ioat_check_space_lock - verify space and grab ring producer lock
+ * @ioat: ioat,3 channel (ring) to operate on
+ * @num_descs: allocation length
  */
-int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
+	__acquires(&ioat_chan->prep_lock)
 {
-	struct ioat_chan_common *chan;
-	struct pci_dev *pdev = device->pdev;
-	struct device *dev = &pdev->dev;
-	struct msix_entry *msix;
-	int i, j, msixcnt;
-	int err = -EINVAL;
-	u8 intrctrl = 0;
+	bool retry;
 
-	if (!strcmp(ioat_interrupt_style, "msix"))
-		goto msix;
-	if (!strcmp(ioat_interrupt_style, "msi"))
-		goto msi;
-	if (!strcmp(ioat_interrupt_style, "intx"))
-		goto intx;
-	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
-	goto err_no_irq;
+ retry:
+	spin_lock_bh(&ioat_chan->prep_lock);
+	/* never allow the last descriptor to be consumed, we need at
+	 * least one free at all times to allow for on-the-fly ring
+	 * resizing.
+	 */
+	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
+		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
+			__func__, num_descs, ioat_chan->head,
+			ioat_chan->tail, ioat_chan->issued);
+		ioat_chan->produce = num_descs;
+		return 0;  /* with ioat->prep_lock held */
+	}
+	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
 
-msix:
-	/* The number of MSI-X vectors should equal the number of channels */
-	msixcnt = device->common.chancnt;
-	for (i = 0; i < msixcnt; i++)
-		device->msix_entries[i].entry = i;
+	/* is another cpu already trying to expand the ring? */
+	if (retry)
+		goto retry;
 
-	err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
-	if (err)
-		goto msi;
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
+	clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
 
-	for (i = 0; i < msixcnt; i++) {
-		msix = &device->msix_entries[i];
-		chan = ioat_chan_by_index(device, i);
-		err = devm_request_irq(dev, msix->vector,
-				       ioat_dma_do_interrupt_msix, 0,
-				       "ioat-msix", chan);
-		if (err) {
-			for (j = 0; j < i; j++) {
-				msix = &device->msix_entries[j];
-				chan = ioat_chan_by_index(device, j);
-				devm_free_irq(dev, msix->vector, chan);
+	/* if we were able to expand the ring retry the allocation */
+	if (retry)
+		goto retry;
+
+	dev_dbg_ratelimited(to_dev(ioat_chan),
+			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+			    __func__, num_descs, ioat_chan->head,
+			    ioat_chan->tail, ioat_chan->issued);
+
+	/* progress reclaim in the allocation failure case we may be
+	 * called under bh_disabled so we need to trigger the timer
+	 * event directly
+	 */
+	if (time_is_before_jiffies(ioat_chan->timer.expires)
+	    && timer_pending(&ioat_chan->timer)) {
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+		ioat_timer_event((unsigned long)ioat_chan);
+	}
+
+	return -ENOMEM;
+}
+
+static bool desc_has_ext(struct ioat_ring_ent *desc)
+{
+	struct ioat_dma_descriptor *hw = desc->hw;
+
+	if (hw->ctl_f.op == IOAT_OP_XOR ||
+	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
+		struct ioat_xor_descriptor *xor = desc->xor;
+
+		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
+			return true;
+	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
+		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
+		struct ioat_pq_descriptor *pq = desc->pq;
+
+		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
+			return true;
+	}
+
+	return false;
+}
+
+static void
+ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
+{
+	if (!sed)
+		return;
+
+	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+	kmem_cache_free(ioat_sed_cache, sed);
+}
+
+static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+	u64 completion;
+
+	completion = *ioat_chan->completion;
+	phys_complete = ioat_chansts_to_addr(completion);
+
+	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
+		(unsigned long long) phys_complete);
+
+	return phys_complete;
+}
+
+static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
+				   u64 *phys_complete)
+{
+	*phys_complete = ioat_get_current_completion(ioat_chan);
+	if (*phys_complete == ioat_chan->last_completion)
+		return false;
+
+	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	return true;
+}
+
+static void
+desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
+{
+	struct ioat_dma_descriptor *hw = desc->hw;
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+	{
+		struct ioat_pq_descriptor *pq = desc->pq;
+
+		/* check if there's error written */
+		if (!pq->dwbes_f.wbes)
+			return;
+
+		/* need to set a chanerr var for checking to clear later */
+
+		if (pq->dwbes_f.p_val_err)
+			*desc->result |= SUM_CHECK_P_RESULT;
+
+		if (pq->dwbes_f.q_val_err)
+			*desc->result |= SUM_CHECK_Q_RESULT;
+
+		return;
+	}
+	default:
+		return;
+	}
+}
+
+/**
+ * __cleanup - reclaim used descriptors
+ * @ioat: channel (ring) to clean
+ */
+static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+{
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	bool seen_current = false;
+	int idx = ioat_chan->tail, i;
+	u16 active;
+
+	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+
+	/*
+	 * At restart of the channel, the completion address and the
+	 * channel status will be 0 due to starting a new chain. Since
+	 * it's new chain and the first descriptor "fails", there is
+	 * nothing to clean up. We do not want to reap the entire submitted
+	 * chain due to this 0 address value and then BUG.
+	 */
+	if (!phys_complete)
+		return;
+
+	active = ioat_ring_active(ioat_chan);
+	for (i = 0; i < active && !seen_current; i++) {
+		struct dma_async_tx_descriptor *tx;
+
+		smp_read_barrier_depends();
+		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		dump_desc_dbg(ioat_chan, desc);
+
+		/* set err stat if we are using dwbes */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			desc_get_errstat(ioat_chan, desc);
+
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
 			}
-			goto msi;
+		}
+
+		if (tx->phys == phys_complete)
+			seen_current = true;
+
+		/* skip extended descriptors */
+		if (desc_has_ext(desc)) {
+			BUG_ON(i + 1 >= active);
+			i++;
+		}
+
+		/* cleanup super extended descriptors */
+		if (desc->sed) {
+			ioat_free_sed(ioat_dma, desc->sed);
+			desc->sed = NULL;
 		}
 	}
-	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
-	device->irq_mode = IOAT_MSIX;
-	goto done;
 
-msi:
-	err = pci_enable_msi(pdev);
-	if (err)
-		goto intx;
+	/* finish all descriptor reads before incrementing tail */
+	smp_mb();
+	ioat_chan->tail = idx + i;
+	/* no active descs have written a completion? */
+	BUG_ON(active && !seen_current);
+	ioat_chan->last_completion = phys_complete;
 
-	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
-			       "ioat-msi", device);
-	if (err) {
+	if (active - i == 0) {
+		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
+			__func__);
+		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+
+	/* 5 microsecond delay per pending descriptor */
+	writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
+	       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
+}
+
+static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	if (is_ioat_halted(*ioat_chan->completion)) {
+		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+			ioat_eh(ioat_chan);
+		}
+	}
+
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+void ioat_cleanup_event(unsigned long data)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+
+	ioat_cleanup(ioat_chan);
+	if (!test_bit(IOAT_RUN, &ioat_chan->state))
+		return;
+	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+
+	ioat_quiesce(ioat_chan, 0);
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	__ioat_restart_chan(ioat_chan);
+}
+
+static void ioat_eh(struct ioatdma_chan *ioat_chan)
+{
+	struct pci_dev *pdev = to_pdev(ioat_chan);
+	struct ioat_dma_descriptor *hw;
+	struct dma_async_tx_descriptor *tx;
+	u64 phys_complete;
+	struct ioat_ring_ent *desc;
+	u32 err_handled = 0;
+	u32 chanerr_int;
+	u32 chanerr;
+
+	/* cleanup so tail points to descriptor that caused the error */
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+
+	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
+		__func__, chanerr, chanerr_int);
+
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+	hw = desc->hw;
+	dump_desc_dbg(ioat_chan, desc);
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_XOR_VAL:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		break;
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+			*desc->result |= SUM_CHECK_Q_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+		}
+		break;
+	}
+
+	/* fault on unhandled error or spurious halt */
+	if (chanerr ^ err_handled || chanerr == 0) {
+		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
+			__func__, chanerr, err_handled);
+		BUG();
+	} else { /* cleanup the faulty descriptor */
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+		}
+	}
+
+	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
+	/* mark faulting descriptor as complete */
+	*ioat_chan->completion = desc->txd.phys;
+
+	spin_lock_bh(&ioat_chan->prep_lock);
+	ioat_restart_channel(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
+static void check_active(struct ioatdma_chan *ioat_chan)
+{
+	if (ioat_ring_active(ioat_chan)) {
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+		return;
+	}
+
+	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
+		/* if the ring is idle, empty, and oversized try to step
+		 * down the size
+		 */
+		reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
+
+		/* keep shrinking until we get back to our minimum
+		 * default size
+		 */
+		if (ioat_chan->alloc_order > ioat_get_alloc_order())
+			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+
+}
+
+void ioat_timer_event(unsigned long data)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+	dma_addr_t phys_complete;
+	u64 status;
+
+	status = ioat_chansts(ioat_chan);
+
+	/* when halted due to errors check for channel
+	 * programming errors before advancing the completion state
+	 */
+	if (is_ioat_halted(status)) {
+		u32 chanerr;
+
+		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
+			__func__, chanerr);
+		if (test_bit(IOAT_RUN, &ioat_chan->state))
+			BUG_ON(is_ioat_bug(chanerr));
+		else /* we never got off the ground */
+			return;
+	}
+
+	/* if we haven't made progress and we have already
+	 * acknowledged a pending completion once, then be more
+	 * forceful with a restart
+	 */
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+	else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		ioat_restart_channel(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+		spin_unlock_bh(&ioat_chan->cleanup_lock);
+		return;
+	} else {
+		set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+	}
+
+
+	if (ioat_ring_active(ioat_chan))
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+	else {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		check_active(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+	}
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+		struct dma_tx_state *txstate)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	enum dma_status ret;
+
+	ret = dma_cookie_status(c, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	ioat_cleanup(ioat_chan);
+
+	return dma_cookie_status(c, cookie, txstate);
+}
+
+static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
+{
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int irq = pdev->irq, i;
+
+	if (!is_bwd_ioat(pdev))
+		return 0;
+
+	switch (ioat_dma->irq_mode) {
+	case IOAT_MSIX:
+		for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
+			struct msix_entry *msix = &ioat_dma->msix_entries[i];
+			struct ioatdma_chan *ioat_chan;
+
+			ioat_chan = ioat_chan_by_index(ioat_dma, i);
+			devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
+		}
+
+		pci_disable_msix(pdev);
+		break;
+	case IOAT_MSI:
 		pci_disable_msi(pdev);
-		goto intx;
+		/* fall through */
+	case IOAT_INTX:
+		devm_free_irq(&pdev->dev, irq, ioat_dma);
+		break;
+	default:
+		return 0;
 	}
-	device->irq_mode = IOAT_MSI;
-	goto done;
+	ioat_dma->irq_mode = IOAT_NOIRQ;
 
-intx:
-	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
-			       IRQF_SHARED, "ioat-intx", device);
-	if (err)
-		goto err_no_irq;
-
-	device->irq_mode = IOAT_INTX;
-done:
-	if (device->intr_quirk)
-		device->intr_quirk(device);
-	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
-	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	return 0;
-
-err_no_irq:
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	device->irq_mode = IOAT_NOIRQ;
-	dev_err(dev, "no usable interrupts\n");
-	return err;
-}
-EXPORT_SYMBOL(ioat_dma_setup_interrupts);
-
-static void ioat_disable_interrupts(struct ioatdma_device *device)
-{
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+	return ioat_dma_setup_interrupts(ioat_dma);
 }
 
-int ioat_probe(struct ioatdma_device *device)
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
 {
-	int err = -ENODEV;
-	struct dma_device *dma = &device->common;
-	struct pci_dev *pdev = device->pdev;
-	struct device *dev = &pdev->dev;
-
-	/* DMA coherent memory pool for DMA descriptor allocations */
-	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
-					   sizeof(struct ioat_dma_descriptor),
-					   64, 0);
-	if (!device->dma_pool) {
-		err = -ENOMEM;
-		goto err_dma_pool;
-	}
-
-	device->completion_pool = pci_pool_create("completion_pool", pdev,
-						  sizeof(u64), SMP_CACHE_BYTES,
-						  SMP_CACHE_BYTES);
-
-	if (!device->completion_pool) {
-		err = -ENOMEM;
-		goto err_completion_pool;
-	}
-
-	device->enumerate_channels(device);
-
-	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
-	dma->dev = &pdev->dev;
-
-	if (!dma->chancnt) {
-		dev_err(dev, "channel enumeration error\n");
-		goto err_setup_interrupts;
-	}
-
-	err = ioat_dma_setup_interrupts(device);
-	if (err)
-		goto err_setup_interrupts;
-
-	err = device->self_test(device);
-	if (err)
-		goto err_self_test;
-
-	return 0;
-
-err_self_test:
-	ioat_disable_interrupts(device);
-err_setup_interrupts:
-	pci_pool_destroy(device->completion_pool);
-err_completion_pool:
-	pci_pool_destroy(device->dma_pool);
-err_dma_pool:
-	return err;
-}
-
-int ioat_register(struct ioatdma_device *device)
-{
-	int err = dma_async_device_register(&device->common);
-
-	if (err) {
-		ioat_disable_interrupts(device);
-		pci_pool_destroy(device->completion_pool);
-		pci_pool_destroy(device->dma_pool);
-	}
-
-	return err;
-}
-
-/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
-static void ioat1_intr_quirk(struct ioatdma_device *device)
-{
-	struct pci_dev *pdev = device->pdev;
-	u32 dmactrl;
-
-	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
-	if (pdev->msi_enabled)
-		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
-	else
-		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
-	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-
-	return sprintf(page, "%d\n", ioat->desccount);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-
-	return sprintf(page, "%d\n", ioat->active);
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static ssize_t cap_show(struct dma_chan *c, char *page)
-{
-	struct dma_device *dma = c->device;
-
-	return sprintf(page, "copy%s%s%s%s%s\n",
-		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
-		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
-		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
-		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
-		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
-
-}
-struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
-
-static ssize_t version_show(struct dma_chan *c, char *page)
-{
-	struct dma_device *dma = c->device;
-	struct ioatdma_device *device = to_ioatdma_device(dma);
-
-	return sprintf(page, "%d.%d\n",
-		       device->version >> 4, device->version & 0xf);
-}
-struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
-
-static struct attribute *ioat1_attrs[] = {
-	&ring_size_attr.attr,
-	&ring_active_attr.attr,
-	&ioat_cap_attr.attr,
-	&ioat_version_attr.attr,
-	NULL,
-};
-
-static ssize_t
-ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-	struct ioat_sysfs_entry *entry;
-	struct ioat_chan_common *chan;
-
-	entry = container_of(attr, struct ioat_sysfs_entry, attr);
-	chan = container_of(kobj, struct ioat_chan_common, kobj);
-
-	if (!entry->show)
-		return -EIO;
-	return entry->show(&chan->common, page);
-}
-
-const struct sysfs_ops ioat_sysfs_ops = {
-	.show	= ioat_attr_show,
-};
-
-static struct kobj_type ioat1_ktype = {
-	.sysfs_ops = &ioat_sysfs_ops,
-	.default_attrs = ioat1_attrs,
-};
-
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
-{
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		struct ioat_chan_common *chan = to_chan_common(c);
-		struct kobject *parent = &c->dev->device.kobj;
-		int err;
-
-		err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
-		if (err) {
-			dev_warn(to_dev(chan),
-				 "sysfs init error (%d), continuing...\n", err);
-			kobject_put(&chan->kobj);
-			set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
-		}
-	}
-}
-
-void ioat_kobject_del(struct ioatdma_device *device)
-{
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		struct ioat_chan_common *chan = to_chan_common(c);
-
-		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
-			kobject_del(&chan->kobj);
-			kobject_put(&chan->kobj);
-		}
-	}
-}
-
-int ioat1_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	struct dma_device *dma;
+	/* throw away whatever the channel was doing and get it
+	 * initialized, with ioat3 specific workarounds
+	 */
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	u32 chanerr;
+	u16 dev_id;
 	int err;
 
-	device->intr_quirk = ioat1_intr_quirk;
-	device->enumerate_channels = ioat1_enumerate_channels;
-	device->self_test = ioat_dma_self_test;
-	device->timer_fn = ioat1_timer_event;
-	device->cleanup_fn = ioat1_cleanup_event;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
-	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
-	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
-	dma->device_tx_status = ioat_dma_tx_status;
+	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
 
-	err = ioat_probe(device);
-	if (err)
-		return err;
-	err = ioat_register(device);
-	if (err)
-		return err;
-	ioat_kobject_add(device, &ioat1_ktype);
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 
-	if (dca)
-		device->dca = ioat_dca_init(pdev, device->reg_base);
+	if (ioat_dma->version < IOAT_VER_3_3) {
+		/* clear any pending errors */
+		err = pci_read_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+		if (err) {
+			dev_err(&pdev->dev,
+				"channel error register unreachable\n");
+			return err;
+		}
+		pci_write_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+		 * (workaround for spurious config parity error after restart)
+		 */
+		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+			pci_write_config_dword(pdev,
+					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
+					       0x10);
+		}
+	}
+
+	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
+	if (!err)
+		err = ioat_irq_reinit(ioat_dma);
+
+	if (err)
+		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
 
 	return err;
 }
-
-void ioat_dma_remove(struct ioatdma_device *device)
-{
-	struct dma_device *dma = &device->common;
-
-	ioat_disable_interrupts(device);
-
-	ioat_kobject_del(device);
-
-	dma_async_device_unregister(dma);
-
-	pci_pool_destroy(device->dma_pool);
-	pci_pool_destroy(device->completion_pool);
-
-	INIT_LIST_HEAD(&dma->channels);
-}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 30f5c7e..1bc08498 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -18,26 +18,32 @@
 #define IOATDMA_H
 
 #include <linux/dmaengine.h>
-#include "hw.h"
-#include "registers.h"
 #include <linux/init.h>
 #include <linux/dmapool.h>
 #include <linux/cache.h>
 #include <linux/pci_ids.h>
-#include <net/tcp.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include "registers.h"
+#include "hw.h"
 
 #define IOAT_DMA_VERSION  "4.00"
 
-#define IOAT_LOW_COMPLETION_MASK	0xffffffc0
 #define IOAT_DMA_DCA_ANY_CPU		~0
 
-#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
-#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
-#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
-#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
-#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
+#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
+#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
 
-#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
+
+/* ioat hardware assumes at least two sources for raid operations */
+#define src_cnt_to_sw(x) ((x) + 2)
+#define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
 
 /*
  * workaround for IOAT ver.3.0 null descriptor issue
@@ -57,19 +63,15 @@
  * @pdev: PCI-Express device
  * @reg_base: MMIO register space base address
  * @dma_pool: for allocating DMA descriptors
- * @common: embedded struct dma_device
+ * @completion_pool: DMA buffers for completion ops
+ * @sed_hw_pool: DMA super descriptor pools
+ * @dma_dev: embedded struct dma_device
  * @version: version of ioatdma device
  * @msix_entries: irq handlers
  * @idx: per channel data
  * @dca: direct cache access context
- * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
- * @enumerate_channels: hw version specific channel enumeration
- * @reset_hw: hw version specific channel (re)initialization
- * @cleanup_fn: select between the v2 and v3 cleanup routines
- * @timer_fn: select between the v2 and v3 timer watchdog routines
- * @self_test: hardware version specific self test for each supported op type
- *
- * Note: the v3 cleanup routine supports raid operations
+ * @irq_mode: interrupt mode (INTX, MSI, MSIX)
+ * @cap: read DMA capabilities register
  */
 struct ioatdma_device {
 	struct pci_dev *pdev;
@@ -78,28 +80,21 @@
 	struct pci_pool *completion_pool;
 #define MAX_SED_POOLS	5
 	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
-	struct dma_device common;
+	struct dma_device dma_dev;
 	u8 version;
 	struct msix_entry msix_entries[4];
-	struct ioat_chan_common *idx[4];
+	struct ioatdma_chan *idx[4];
 	struct dca_provider *dca;
 	enum ioat_irq_mode irq_mode;
 	u32 cap;
-	void (*intr_quirk)(struct ioatdma_device *device);
-	int (*enumerate_channels)(struct ioatdma_device *device);
-	int (*reset_hw)(struct ioat_chan_common *chan);
-	void (*cleanup_fn)(unsigned long data);
-	void (*timer_fn)(unsigned long data);
-	int (*self_test)(struct ioatdma_device *device);
 };
 
-struct ioat_chan_common {
-	struct dma_chan common;
+struct ioatdma_chan {
+	struct dma_chan dma_chan;
 	void __iomem *reg_base;
 	dma_addr_t last_completion;
 	spinlock_t cleanup_lock;
 	unsigned long state;
-	#define IOAT_COMPLETION_PENDING 0
 	#define IOAT_COMPLETION_ACK 1
 	#define IOAT_RESET_PENDING 2
 	#define IOAT_KOBJ_INIT_FAIL 3
@@ -110,11 +105,32 @@
 	#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
 	#define IDLE_TIMEOUT msecs_to_jiffies(2000)
 	#define RESET_DELAY msecs_to_jiffies(100)
-	struct ioatdma_device *device;
+	struct ioatdma_device *ioat_dma;
 	dma_addr_t completion_dma;
 	u64 *completion;
 	struct tasklet_struct cleanup_task;
 	struct kobject kobj;
+
+/* ioat v2 / v3 channel attributes
+ * @xfercap_log; log2 of channel max transfer length (for fast division)
+ * @head: allocated index
+ * @issued: hardware notification point
+ * @tail: cleanup index
+ * @dmacount: identical to 'head' except for occasionally resetting to zero
+ * @alloc_order: log2 of the number of allocated descriptors
+ * @produce: number of descriptors to produce at submit time
+ * @ring: software ring buffer implementation of hardware ring
+ * @prep_lock: serializes descriptor preparation (producers)
+ */
+	size_t xfercap_log;
+	u16 head;
+	u16 issued;
+	u16 tail;
+	u16 dmacount;
+	u16 alloc_order;
+	u16 produce;
+	struct ioat_ring_ent **ring;
+	spinlock_t prep_lock;
 };
 
 struct ioat_sysfs_entry {
@@ -123,28 +139,11 @@
 };
 
 /**
- * struct ioat_dma_chan - internal representation of a DMA channel
- */
-struct ioat_dma_chan {
-	struct ioat_chan_common base;
-
-	size_t xfercap;	/* XFERCAP register value expanded out */
-
-	spinlock_t desc_lock;
-	struct list_head free_desc;
-	struct list_head used_desc;
-
-	int pending;
-	u16 desccount;
-	u16 active;
-};
-
-/**
  * struct ioat_sed_ent - wrapper around super extended hardware descriptor
  * @hw: hardware SED
- * @sed_dma: dma address for the SED
- * @list: list member
+ * @dma: dma address for the SED
  * @parent: point to the dma descriptor that's the parent
+ * @hw_pool: descriptor pool index
  */
 struct ioat_sed_ent {
 	struct ioat_sed_raw_descriptor *hw;
@@ -153,39 +152,57 @@
 	unsigned int hw_pool;
 };
 
-static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
-{
-	return container_of(c, struct ioat_chan_common, common);
-}
-
-static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-
-	return container_of(chan, struct ioat_dma_chan, base);
-}
-
-/* wrapper around hardware descriptor format + additional software fields */
-
 /**
- * struct ioat_desc_sw - wrapper around hardware descriptor
+ * struct ioat_ring_ent - wrapper around hardware descriptor
  * @hw: hardware DMA descriptor (for memcpy)
- * @node: this descriptor will either be on the free list,
- *     or attached to a transaction list (tx_list)
+ * @xor: hardware xor descriptor
+ * @xor_ex: hardware xor extension descriptor
+ * @pq: hardware pq descriptor
+ * @pq_ex: hardware pq extension descriptor
+ * @pqu: hardware pq update descriptor
+ * @raw: hardware raw (un-typed) descriptor
  * @txd: the generic software descriptor for all engines
+ * @len: total transaction length for unmap
+ * @result: asynchronous result of validate operations
  * @id: identifier for debug
+ * @sed: pointer to super extended descriptor sw desc
  */
-struct ioat_desc_sw {
-	struct ioat_dma_descriptor *hw;
-	struct list_head node;
+
+struct ioat_ring_ent {
+	union {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_xor_descriptor *xor;
+		struct ioat_xor_ext_descriptor *xor_ex;
+		struct ioat_pq_descriptor *pq;
+		struct ioat_pq_ext_descriptor *pq_ex;
+		struct ioat_pq_update_descriptor *pqu;
+		struct ioat_raw_descriptor *raw;
+	};
 	size_t len;
-	struct list_head tx_list;
 	struct dma_async_tx_descriptor txd;
+	enum sum_check_flags *result;
 	#ifdef DEBUG
 	int id;
 	#endif
+	struct ioat_sed_ent *sed;
 };
 
+extern const struct sysfs_ops ioat_sysfs_ops;
+extern struct ioat_sysfs_entry ioat_version_attr;
+extern struct ioat_sysfs_entry ioat_cap_attr;
+extern int ioat_pending_level;
+extern int ioat_ring_alloc_order;
+extern struct kobj_type ioat_ktype;
+extern struct kmem_cache *ioat_cache;
+extern int ioat_ring_max_alloc_order;
+extern struct kmem_cache *ioat_sed_cache;
+
+static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
+{
+	return container_of(c, struct ioatdma_chan, dma_chan);
+}
+
+/* wrapper around hardware descriptor format + additional software fields */
 #ifdef DEBUG
 #define set_desc_id(desc, i) ((desc)->id = (i))
 #define desc_id(desc) ((desc)->id)
@@ -195,10 +212,10 @@
 #endif
 
 static inline void
-__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
+__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
 		struct dma_async_tx_descriptor *tx, int id)
 {
-	struct device *dev = to_dev(chan);
+	struct device *dev = to_dev(ioat_chan);
 
 	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
 		" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
@@ -208,25 +225,25 @@
 }
 
 #define dump_desc_dbg(c, d) \
-	({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
+	({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
 
-static inline struct ioat_chan_common *
-ioat_chan_by_index(struct ioatdma_device *device, int index)
+static inline struct ioatdma_chan *
+ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
 {
-	return device->idx[index];
+	return ioat_dma->idx[index];
 }
 
-static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u64 status;
 	u32 status_lo;
 
 	/* We need to read the low address first as this causes the
 	 * chipset to latch the upper bits for the subsequent read
 	 */
-	status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
-	status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
+	status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
+	status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
 	status <<= 32;
 	status |= status_lo;
 
@@ -235,16 +252,16 @@
 
 #if BITS_PER_LONG == 64
 
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u64 status;
 
 	 /* With IOAT v3.3 the status register is 64bit.  */
 	if (ver >= IOAT_VER_3_3)
-		status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+		status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
 	else
-		status = ioat_chansts_32(chan);
+		status = ioat_chansts_32(ioat_chan);
 
 	return status;
 }
@@ -253,56 +270,41 @@
 #define ioat_chansts ioat_chansts_32
 #endif
 
-static inline void ioat_start(struct ioat_chan_common *chan)
-{
-	u8 ver = chan->device->version;
-
-	writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
-}
-
 static inline u64 ioat_chansts_to_addr(u64 status)
 {
 	return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
 }
 
-static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
+static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
 {
-	return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+	return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 }
 
-static inline void ioat_suspend(struct ioat_chan_common *chan)
+static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 
-	writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	writeb(IOAT_CHANCMD_SUSPEND,
+	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 }
 
-static inline void ioat_reset(struct ioat_chan_common *chan)
+static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 
-	writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	writeb(IOAT_CHANCMD_RESET,
+	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 }
 
-static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
+static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u8 cmd;
 
-	cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 	return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
 }
 
-static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	writel(addr & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-	writel(addr >> 32,
-	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-}
-
 static inline bool is_ioat_active(unsigned long status)
 {
 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
@@ -329,24 +331,111 @@
 	return !!err;
 }
 
-int ioat_probe(struct ioatdma_device *device);
-int ioat_register(struct ioatdma_device *device);
-int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat_dma_self_test(struct ioatdma_device *device);
-void ioat_dma_remove(struct ioatdma_device *device);
+#define IOAT_MAX_ORDER 16
+#define ioat_get_alloc_order() \
+	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
+#define ioat_get_max_alloc_order() \
+	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+
+static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
+{
+	return 1 << ioat_chan->alloc_order;
+}
+
+/* count of descriptors in flight with the engine */
+static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
+			ioat_ring_size(ioat_chan));
+}
+
+/* count of descriptors pending submission to hardware */
+static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
+			ioat_ring_size(ioat_chan));
+}
+
+static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
+{
+	return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
+}
+
+static inline u16
+ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
+{
+	u16 num_descs = len >> ioat_chan->xfercap_log;
+
+	num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
+	return num_descs;
+}
+
+static inline struct ioat_ring_ent *
+ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
+{
+	return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
+}
+
+static inline void
+ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
+{
+	writel(addr & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+	writel(addr >> 32,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+}
+
+/* IOAT Prep functions */
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+	       unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+		    unsigned int src_cnt, size_t len,
+		    enum sum_check_flags *result, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+	      unsigned int src_cnt, const unsigned char *scf, size_t len,
+	      unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+		  unsigned int src_cnt, const unsigned char *scf, size_t len,
+		  enum sum_check_flags *pqres, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+		 unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+		     unsigned int src_cnt, size_t len,
+		     enum sum_check_flags *result, unsigned long flags);
+
+/* IOAT Operation functions */
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+		struct dma_tx_state *txstate);
+void ioat_cleanup_event(unsigned long data);
+void ioat_timer_event(unsigned long data);
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
+void ioat_issue_pending(struct dma_chan *chan);
+void ioat_timer_event(unsigned long data);
+
+/* IOAT Init functions */
+bool is_bwd_ioat(struct pci_dev *pdev);
 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
-void ioat_init_channel(struct ioatdma_device *device,
-		       struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-				   struct dma_tx_state *txstate);
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   dma_addr_t *phys_complete);
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
-void ioat_kobject_del(struct ioatdma_device *device);
-int ioat_dma_setup_interrupts(struct ioatdma_device *device);
-void ioat_stop(struct ioat_chan_common *chan);
-extern const struct sysfs_ops ioat_sysfs_ops;
-extern struct ioat_sysfs_entry ioat_version_attr;
-extern struct ioat_sysfs_entry ioat_cap_attr;
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
+void ioat_kobject_del(struct ioatdma_device *ioat_dma);
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
+void ioat_stop(struct ioatdma_chan *ioat_chan);
 #endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
deleted file mode 100644
index 69c7dfc..0000000
--- a/drivers/dma/ioat/dma_v2.c
+++ /dev/null
@@ -1,916 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
- * does asynchronous data movement and checksumming operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-#include "../dmaengine.h"
-
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
-		 "ioat2+: allocate 2^n descriptors per channel"
-		 " (default: 8 max: 16)");
-static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
-		 "ioat2+: upper limit for ring size (default: 16)");
-
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat->dmacount += ioat2_ring_pending(ioat);
-	ioat->issued = ioat->head;
-	writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-	dev_dbg(to_dev(chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-}
-
-void ioat2_issue_pending(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	if (ioat2_ring_pending(ioat)) {
-		spin_lock_bh(&ioat->prep_lock);
-		__ioat2_issue_pending(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-}
-
-/**
- * ioat2_update_pending - log pending descriptors
- * @ioat: ioat2+ channel
- *
- * Check if the number of unsubmitted descriptors has exceeded the
- * watermark.  Called with prep_lock held
- */
-static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
-{
-	if (ioat2_ring_pending(ioat) > ioat_pending_level)
-		__ioat2_issue_pending(ioat);
-}
-
-static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_ring_ent *desc;
-	struct ioat_dma_descriptor *hw;
-
-	if (ioat2_ring_space(ioat) < 1) {
-		dev_err(to_dev(&ioat->base),
-			"Unable to start null desc - ring full\n");
-		return;
-	}
-
-	dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-	desc = ioat2_get_ring_ent(ioat, ioat->head);
-
-	hw = desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = 1;
-	hw->ctl_f.compl_write = 1;
-	/* set size to non-zero value (channel returns error when size is 0) */
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	hw->src_addr = 0;
-	hw->dst_addr = 0;
-	async_tx_ack(&desc->txd);
-	ioat2_set_chainaddr(ioat, desc->txd.phys);
-	dump_desc_dbg(ioat, desc);
-	wmb();
-	ioat->head += 1;
-	__ioat2_issue_pending(ioat);
-}
-
-static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
-	spin_lock_bh(&ioat->prep_lock);
-	__ioat2_start_null_desc(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct dma_async_tx_descriptor *tx;
-	struct ioat_ring_ent *desc;
-	bool seen_current = false;
-	u16 active;
-	int idx = ioat->tail, i;
-
-	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-
-	active = ioat2_ring_active(ioat);
-	for (i = 0; i < active && !seen_current; i++) {
-		smp_read_barrier_depends();
-		prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		tx = &desc->txd;
-		dump_desc_dbg(ioat, desc);
-		if (tx->cookie) {
-			dma_descriptor_unmap(tx);
-			dma_cookie_complete(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys == phys_complete)
-			seen_current = true;
-	}
-	smp_mb(); /* finish all descriptor reads before incrementing tail */
-	ioat->tail = idx + i;
-	BUG_ON(active && !seen_current); /* no active descs have written a completion? */
-
-	chan->last_completion = phys_complete;
-	if (active - i == 0) {
-		dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
-			__func__);
-		clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-}
-
-/**
- * ioat2_cleanup - clean finished descriptors (advance tail pointer)
- * @chan: ioat channel to be cleaned up
- */
-static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-void ioat2_cleanup_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat2_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	/* set the tail to be re-issued */
-	ioat->issued = ioat->tail;
-	ioat->dmacount = 0;
-	set_bit(IOAT_COMPLETION_PENDING, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	dev_dbg(to_dev(chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-
-	if (ioat2_ring_pending(ioat)) {
-		struct ioat_ring_ent *desc;
-
-		desc = ioat2_get_ring_ent(ioat, ioat->tail);
-		ioat2_set_chainaddr(ioat, desc->txd.phys);
-		__ioat2_issue_pending(ioat);
-	} else
-		__ioat2_start_null_desc(ioat);
-}
-
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-	u32 status;
-
-	status = ioat_chansts(chan);
-	if (is_ioat_active(status) || is_ioat_idle(status))
-		ioat_suspend(chan);
-	while (is_ioat_active(status) || is_ioat_idle(status)) {
-		if (tmo && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		status = ioat_chansts(chan);
-		cpu_relax();
-	}
-
-	return err;
-}
-
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-
-	ioat_reset(chan);
-	while (ioat_reset_pending(chan)) {
-		if (end && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		cpu_relax();
-	}
-
-	return err;
-}
-
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	ioat2_quiesce(chan, 0);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	__ioat2_restart_chan(ioat);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	if (ioat2_ring_active(ioat)) {
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		return;
-	}
-
-	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	else if (ioat->alloc_order > ioat_get_alloc_order()) {
-		/* if the ring is idle, empty, and oversized try to step
-		 * down the size
-		 */
-		reshape_ring(ioat, ioat->alloc_order - 1);
-
-		/* keep shrinking until we get back to our minimum
-		 * default size
-		 */
-		if (ioat->alloc_order > ioat_get_alloc_order())
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-
-}
-
-void ioat2_timer_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-	u64 status;
-
-	status = ioat_chansts(chan);
-
-	/* when halted due to errors check for channel
-	 * programming errors before advancing the completion state
-	 */
-	if (is_ioat_halted(status)) {
-		u32 chanerr;
-
-		chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
-			__func__, chanerr);
-		if (test_bit(IOAT_RUN, &chan->state))
-			BUG_ON(is_ioat_bug(chanerr));
-		else /* we never got off the ground */
-			return;
-	}
-
-	/* if we haven't made progress and we have already
-	 * acknowledged a pending completion once, then be more
-	 * forceful with a restart
-	 */
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
-		spin_lock_bh(&ioat->prep_lock);
-		ioat2_restart_channel(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	} else {
-		set_bit(IOAT_COMPLETION_ACK, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	}
-
-
-	if (ioat2_ring_active(ioat))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	else {
-		spin_lock_bh(&ioat->prep_lock);
-		check_active(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static int ioat2_reset_hw(struct ioat_chan_common *chan)
-{
-	/* throw away whatever the channel was doing and get it initialized */
-	u32 chanerr;
-
-	ioat2_quiesce(chan, msecs_to_jiffies(100));
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
-	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
-}
-
-/**
- * ioat2_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-int ioat2_enumerate_channels(struct ioatdma_device *device)
-{
-	struct ioat2_dma_chan *ioat;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-	u8 xfercap_log;
-	int i;
-
-	INIT_LIST_HEAD(&dma->channels);
-	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-	dma->chancnt &= 0x1f; /* bits [4:0] valid */
-	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
-		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
-			 dma->chancnt, ARRAY_SIZE(device->idx));
-		dma->chancnt = ARRAY_SIZE(device->idx);
-	}
-	xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap_log &= 0x1f; /* bits [4:0] valid */
-	if (xfercap_log == 0)
-		return 0;
-	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
-
-	/* FIXME which i/oat version is i7300? */
-#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
-	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
-		dma->chancnt--;
-#endif
-	for (i = 0; i < dma->chancnt; i++) {
-		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
-		if (!ioat)
-			break;
-
-		ioat_init_channel(device, &ioat->base, i);
-		ioat->xfercap_log = xfercap_log;
-		spin_lock_init(&ioat->prep_lock);
-		if (device->reset_hw(&ioat->base)) {
-			i = 0;
-			break;
-		}
-	}
-	dma->chancnt = i;
-	return i;
-}
-
-static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_chan *c = tx->chan;
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_cookie_t cookie;
-
-	cookie = dma_cookie_assign(tx);
-	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
-
-	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	/* make descriptor updates visible before advancing ioat->head,
-	 * this is purposefully not smp_wmb() since we are also
-	 * publishing the descriptor updates to a dma device
-	 */
-	wmb();
-
-	ioat->head += ioat->produce;
-
-	ioat2_update_pending(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-
-	return cookie;
-}
-
-static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
-{
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	struct ioatdma_device *dma;
-	dma_addr_t phys;
-
-	dma = to_ioatdma_device(chan->device);
-	hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
-	if (!hw)
-		return NULL;
-	memset(hw, 0, sizeof(*hw));
-
-	desc = kmem_cache_zalloc(ioat2_cache, flags);
-	if (!desc) {
-		pci_pool_free(dma->dma_pool, hw, phys);
-		return NULL;
-	}
-
-	dma_async_tx_descriptor_init(&desc->txd, chan);
-	desc->txd.tx_submit = ioat2_tx_submit_unlock;
-	desc->hw = hw;
-	desc->txd.phys = phys;
-	return desc;
-}
-
-static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
-{
-	struct ioatdma_device *dma;
-
-	dma = to_ioatdma_device(chan->device);
-	pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
-	kmem_cache_free(ioat2_cache, desc);
-}
-
-static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
-{
-	struct ioat_ring_ent **ring;
-	int descs = 1 << order;
-	int i;
-
-	if (order > ioat_get_max_alloc_order())
-		return NULL;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(descs, sizeof(*ring), flags);
-	if (!ring)
-		return NULL;
-	for (i = 0; i < descs; i++) {
-		ring[i] = ioat2_alloc_ring_ent(c, flags);
-		if (!ring[i]) {
-			while (i--)
-				ioat2_free_ring_ent(ring[i], c);
-			kfree(ring);
-			return NULL;
-		}
-		set_desc_id(ring[i], i);
-	}
-
-	/* link descs */
-	for (i = 0; i < descs-1; i++) {
-		struct ioat_ring_ent *next = ring[i+1];
-		struct ioat_dma_descriptor *hw = ring[i]->hw;
-
-		hw->next = next->txd.phys;
-	}
-	ring[i]->hw->next = ring[0]->txd.phys;
-
-	return ring;
-}
-
-void ioat2_free_chan_resources(struct dma_chan *c);
-
-/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
- * @chan: channel to be initialized
- */
-int ioat2_alloc_chan_resources(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_ring_ent **ring;
-	u64 status;
-	int order;
-	int i = 0;
-
-	/* have we already been set up? */
-	if (ioat->ring)
-		return 1 << ioat->alloc_order;
-
-	/* Setup register to interrupt and write completion status on error */
-	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	chan->completion = pci_pool_alloc(chan->device->completion_pool,
-					  GFP_KERNEL, &chan->completion_dma);
-	if (!chan->completion)
-		return -ENOMEM;
-
-	memset(chan->completion, 0, sizeof(*chan->completion));
-	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64) chan->completion_dma) >> 32,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	order = ioat_get_alloc_order();
-	ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
-	if (!ring)
-		return -ENOMEM;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	ioat->ring = ring;
-	ioat->head = 0;
-	ioat->issued = 0;
-	ioat->tail = 0;
-	ioat->alloc_order = order;
-	set_bit(IOAT_RUN, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	ioat2_start_null_desc(ioat);
-
-	/* check that we got off the ground */
-	do {
-		udelay(1);
-		status = ioat_chansts(chan);
-	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
-
-	if (is_ioat_active(status) || is_ioat_idle(status)) {
-		return 1 << ioat->alloc_order;
-	} else {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
-		dev_WARN(to_dev(chan),
-			"failed to start channel chanerr: %#x\n", chanerr);
-		ioat2_free_chan_resources(c);
-		return -EFAULT;
-	}
-}
-
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
-{
-	/* reshape differs from normal ring allocation in that we want
-	 * to allocate a new software ring while only
-	 * extending/truncating the hardware ring
-	 */
-	struct ioat_chan_common *chan = &ioat->base;
-	struct dma_chan *c = &chan->common;
-	const u32 curr_size = ioat2_ring_size(ioat);
-	const u16 active = ioat2_ring_active(ioat);
-	const u32 new_size = 1 << order;
-	struct ioat_ring_ent **ring;
-	u16 i;
-
-	if (order > ioat_get_max_alloc_order())
-		return false;
-
-	/* double check that we have at least 1 free descriptor */
-	if (active == curr_size)
-		return false;
-
-	/* when shrinking, verify that we can hold the current active
-	 * set in the new ring
-	 */
-	if (active >= new_size)
-		return false;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
-	if (!ring)
-		return false;
-
-	/* allocate/trim descriptors as needed */
-	if (new_size > curr_size) {
-		/* copy current descriptors to the new ring */
-		for (i = 0; i < curr_size; i++) {
-			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* add new descriptors to the ring */
-		for (i = curr_size; i < new_size; i++) {
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
-			if (!ring[new_idx]) {
-				while (i--) {
-					u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-					ioat2_free_ring_ent(ring[new_idx], c);
-				}
-				kfree(ring);
-				return false;
-			}
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* hw link new descriptors */
-		for (i = curr_size-1; i < new_size; i++) {
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-			struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
-			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
-			hw->next = next->txd.phys;
-		}
-	} else {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_ring_ent *next;
-
-		/* copy current descriptors to the new ring, dropping the
-		 * removed descriptors
-		 */
-		for (i = 0; i < new_size; i++) {
-			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* free deleted descriptors */
-		for (i = new_size; i < curr_size; i++) {
-			struct ioat_ring_ent *ent;
-
-			ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
-			ioat2_free_ring_ent(ent, c);
-		}
-
-		/* fix up hardware ring */
-		hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
-		next = ring[(ioat->tail+new_size) & (new_size-1)];
-		hw->next = next->txd.phys;
-	}
-
-	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
-		__func__, new_size);
-
-	kfree(ioat->ring);
-	ioat->ring = ring;
-	ioat->alloc_order = order;
-
-	return true;
-}
-
-/**
- * ioat2_check_space_lock - verify space and grab ring producer lock
- * @ioat: ioat2,3 channel (ring) to operate on
- * @num_descs: allocation length
- */
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	bool retry;
-
- retry:
-	spin_lock_bh(&ioat->prep_lock);
-	/* never allow the last descriptor to be consumed, we need at
-	 * least one free at all times to allow for on-the-fly ring
-	 * resizing.
-	 */
-	if (likely(ioat2_ring_space(ioat) > num_descs)) {
-		dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
-		ioat->produce = num_descs;
-		return 0;  /* with ioat->prep_lock held */
-	}
-	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-
-	/* is another cpu already trying to expand the ring? */
-	if (retry)
-		goto retry;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	retry = reshape_ring(ioat, ioat->alloc_order + 1);
-	clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	/* if we were able to expand the ring retry the allocation */
-	if (retry)
-		goto retry;
-
-	if (printk_ratelimit())
-		dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
-
-	/* progress reclaim in the allocation failure case we may be
-	 * called under bh_disabled so we need to trigger the timer
-	 * event directly
-	 */
-	if (time_is_before_jiffies(chan->timer.expires)
-	    && timer_pending(&chan->timer)) {
-		struct ioatdma_device *device = chan->device;
-
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		device->timer_fn((unsigned long) &chan->common);
-	}
-
-	return -ENOMEM;
-}
-
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	dma_addr_t dst = dma_dest;
-	dma_addr_t src = dma_src;
-	size_t total_len = len;
-	int num_descs, idx, i;
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		hw = desc->hw;
-
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dst;
-
-		len -= copy;
-		dst += copy;
-		src += copy;
-		dump_desc_dbg(ioat, desc);
-	} while (++i < num_descs);
-
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	hw->ctl_f.compl_write = 1;
-	dump_desc_dbg(ioat, desc);
-	/* we leave the channel locked to ensure in order submission */
-
-	return &desc->txd;
-}
-
-/**
- * ioat2_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-void ioat2_free_chan_resources(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	const u16 total_descs = 1 << ioat->alloc_order;
-	int descs;
-	int i;
-
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (!ioat->ring)
-		return;
-
-	ioat_stop(chan);
-	device->reset_hw(chan);
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	descs = ioat2_ring_space(ioat);
-	dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
-	for (i = 0; i < descs; i++) {
-		desc = ioat2_get_ring_ent(ioat, ioat->head + i);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	if (descs < total_descs)
-		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
-			total_descs - descs);
-
-	for (i = 0; i < total_descs - descs; i++) {
-		desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
-		dump_desc_dbg(ioat, desc);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	kfree(ioat->ring);
-	ioat->ring = NULL;
-	ioat->alloc_order = 0;
-	pci_pool_free(device->completion_pool, chan->completion,
-		      chan->completion_dma);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	chan->last_completion = 0;
-	chan->completion_dma = 0;
-	ioat->dmacount = 0;
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	/* ...taken outside the lock, no need to be precise */
-	return sprintf(page, "%d\n", ioat2_ring_active(ioat));
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static struct attribute *ioat2_attrs[] = {
-	&ring_size_attr.attr,
-	&ring_active_attr.attr,
-	&ioat_cap_attr.attr,
-	&ioat_version_attr.attr,
-	NULL,
-};
-
-struct kobj_type ioat2_ktype = {
-	.sysfs_ops = &ioat_sysfs_ops,
-	.default_attrs = ioat2_attrs,
-};
-
-int ioat2_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	int err;
-
-	device->enumerate_channels = ioat2_enumerate_channels;
-	device->reset_hw = ioat2_reset_hw;
-	device->cleanup_fn = ioat2_cleanup_event;
-	device->timer_fn = ioat2_timer_event;
-	device->self_test = ioat_dma_self_test;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
-	dma->device_issue_pending = ioat2_issue_pending;
-	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat2_free_chan_resources;
-	dma->device_tx_status = ioat_dma_tx_status;
-
-	err = ioat_probe(device);
-	if (err)
-		return err;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		chan = to_chan_common(c);
-		writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
-		       chan->reg_base + IOAT_DCACTRL_OFFSET);
-	}
-
-	err = ioat_register(device);
-	if (err)
-		return err;
-
-	ioat_kobject_add(device, &ioat2_ktype);
-
-	if (dca)
-		device->dca = ioat2_dca_init(pdev, device->reg_base);
-
-	return err;
-}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
deleted file mode 100644
index bf24ebe..0000000
--- a/drivers/dma/ioat/dma_v2.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_V2_H
-#define IOATDMA_V2_H
-
-#include <linux/dmaengine.h>
-#include <linux/circ_buf.h>
-#include "dma.h"
-#include "hw.h"
-
-
-extern int ioat_pending_level;
-extern int ioat_ring_alloc_order;
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
-	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
-	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
-
-/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
- * @base: common ioat channel parameters
- * @xfercap_log; log2 of channel max transfer length (for fast division)
- * @head: allocated index
- * @issued: hardware notification point
- * @tail: cleanup index
- * @dmacount: identical to 'head' except for occasionally resetting to zero
- * @alloc_order: log2 of the number of allocated descriptors
- * @produce: number of descriptors to produce at submit time
- * @ring: software ring buffer implementation of hardware ring
- * @prep_lock: serializes descriptor preparation (producers)
- */
-struct ioat2_dma_chan {
-	struct ioat_chan_common base;
-	size_t xfercap_log;
-	u16 head;
-	u16 issued;
-	u16 tail;
-	u16 dmacount;
-	u16 alloc_order;
-	u16 produce;
-	struct ioat_ring_ent **ring;
-	spinlock_t prep_lock;
-};
-
-static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-
-	return container_of(chan, struct ioat2_dma_chan, base);
-}
-
-static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
-{
-	return 1 << ioat->alloc_order;
-}
-
-/* count of descriptors in flight with the engine */
-static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
-{
-	return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
-}
-
-/* count of descriptors pending submission to hardware */
-static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
-{
-	return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
-}
-
-static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
-{
-	return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
-}
-
-static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
-{
-	u16 num_descs = len >> ioat->xfercap_log;
-
-	num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
-	return num_descs;
-}
-
-/**
- * struct ioat_ring_ent - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor (for memcpy)
- * @fill: hardware fill descriptor
- * @xor: hardware xor descriptor
- * @xor_ex: hardware xor extension descriptor
- * @pq: hardware pq descriptor
- * @pq_ex: hardware pq extension descriptor
- * @pqu: hardware pq update descriptor
- * @raw: hardware raw (un-typed) descriptor
- * @txd: the generic software descriptor for all engines
- * @len: total transaction length for unmap
- * @result: asynchronous result of validate operations
- * @id: identifier for debug
- */
-
-struct ioat_ring_ent {
-	union {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_xor_descriptor *xor;
-		struct ioat_xor_ext_descriptor *xor_ex;
-		struct ioat_pq_descriptor *pq;
-		struct ioat_pq_ext_descriptor *pq_ex;
-		struct ioat_pq_update_descriptor *pqu;
-		struct ioat_raw_descriptor *raw;
-	};
-	size_t len;
-	struct dma_async_tx_descriptor txd;
-	enum sum_check_flags *result;
-	#ifdef DEBUG
-	int id;
-	#endif
-	struct ioat_sed_ent *sed;
-};
-
-static inline struct ioat_ring_ent *
-ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
-{
-	return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
-}
-
-static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	writel(addr & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-	writel(addr >> 32,
-	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-}
-
-int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
-int ioat2_enumerate_channels(struct ioatdma_device *device);
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags);
-void ioat2_issue_pending(struct dma_chan *chan);
-int ioat2_alloc_chan_resources(struct dma_chan *c);
-void ioat2_free_chan_resources(struct dma_chan *c);
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_event(unsigned long data);
-void ioat2_timer_event(unsigned long data);
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
-extern struct kobj_type ioat2_ktype;
-extern struct kmem_cache *ioat2_cache;
-#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
deleted file mode 100644
index 64790a4..0000000
--- a/drivers/dma/ioat/dma_v3.c
+++ /dev/null
@@ -1,1717 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * BSD LICENSE
- *
- * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *   * Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *   * Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- *   * Neither the name of Intel Corporation nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Support routines for v3+ hardware
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/prefetch.h>
-#include "../dmaengine.h"
-#include "registers.h"
-#include "hw.h"
-#include "dma.h"
-#include "dma_v2.h"
-
-extern struct kmem_cache *ioat3_sed_cache;
-
-/* ioat hardware assumes at least two sources for raid operations */
-#define src_cnt_to_sw(x) ((x) + 2)
-#define src_cnt_to_hw(x) ((x) - 2)
-#define ndest_to_sw(x) ((x) + 1)
-#define ndest_to_hw(x) ((x) - 1)
-#define src16_cnt_to_sw(x) ((x) + 9)
-#define src16_cnt_to_hw(x) ((x) - 9)
-
-/* provide a lookup table for setting the source address in the base or
- * extended descriptor of an xor or pq descriptor
- */
-static const u8 xor_idx_to_desc = 0xe0;
-static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc = 0xf8;
-static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
-				       2, 2, 2, 2, 2, 2, 2 };
-static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
-static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
-					0, 1, 2, 3, 4, 5, 6 };
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat);
-
-static void xor_set_src(struct ioat_raw_descriptor *descs[2],
-			dma_addr_t addr, u32 offset, int idx)
-{
-	struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
-
-	raw->field[xor_idx_to_field[idx]] = addr + offset;
-}
-
-static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
-{
-	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
-	return raw->field[pq_idx_to_field[idx]];
-}
-
-static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
-{
-	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
-	return raw->field[pq16_idx_to_field[idx]];
-}
-
-static void pq_set_src(struct ioat_raw_descriptor *descs[2],
-		       dma_addr_t addr, u32 offset, u8 coef, int idx)
-{
-	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
-	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
-	raw->field[pq_idx_to_field[idx]] = addr + offset;
-	pq->coef[idx] = coef;
-}
-
-static bool is_jf_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static bool is_hsw_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static bool is_xeon_cb32(struct pci_dev *pdev)
-{
-	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
-		is_hsw_ioat(pdev);
-}
-
-static bool is_bwd_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
-	/* even though not Atom, BDX-DE has same DMA silicon */
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_bwd_noraid(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
-			dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
-{
-	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
-	struct ioat_pq16a_descriptor *pq16 =
-		(struct ioat_pq16a_descriptor *)desc[1];
-	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
-	raw->field[pq16_idx_to_field[idx]] = addr + offset;
-
-	if (idx < 8)
-		pq->coef[idx] = coef;
-	else
-		pq16->coef[idx - 8] = coef;
-}
-
-static struct ioat_sed_ent *
-ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
-{
-	struct ioat_sed_ent *sed;
-	gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
-
-	sed = kmem_cache_alloc(ioat3_sed_cache, flags);
-	if (!sed)
-		return NULL;
-
-	sed->hw_pool = hw_pool;
-	sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
-				 flags, &sed->dma);
-	if (!sed->hw) {
-		kmem_cache_free(ioat3_sed_cache, sed);
-		return NULL;
-	}
-
-	return sed;
-}
-
-static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
-{
-	if (!sed)
-		return;
-
-	dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
-	kmem_cache_free(ioat3_sed_cache, sed);
-}
-
-static bool desc_has_ext(struct ioat_ring_ent *desc)
-{
-	struct ioat_dma_descriptor *hw = desc->hw;
-
-	if (hw->ctl_f.op == IOAT_OP_XOR ||
-	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
-		struct ioat_xor_descriptor *xor = desc->xor;
-
-		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
-			return true;
-	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
-		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
-		struct ioat_pq_descriptor *pq = desc->pq;
-
-		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
-			return true;
-	}
-
-	return false;
-}
-
-static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
-{
-	u64 phys_complete;
-	u64 completion;
-
-	completion = *chan->completion;
-	phys_complete = ioat_chansts_to_addr(completion);
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
-		(unsigned long long) phys_complete);
-
-	return phys_complete;
-}
-
-static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
-				   u64 *phys_complete)
-{
-	*phys_complete = ioat3_get_current_completion(chan);
-	if (*phys_complete == chan->last_completion)
-		return false;
-
-	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	return true;
-}
-
-static void
-desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
-{
-	struct ioat_dma_descriptor *hw = desc->hw;
-
-	switch (hw->ctl_f.op) {
-	case IOAT_OP_PQ_VAL:
-	case IOAT_OP_PQ_VAL_16S:
-	{
-		struct ioat_pq_descriptor *pq = desc->pq;
-
-		/* check if there's error written */
-		if (!pq->dwbes_f.wbes)
-			return;
-
-		/* need to set a chanerr var for checking to clear later */
-
-		if (pq->dwbes_f.p_val_err)
-			*desc->result |= SUM_CHECK_P_RESULT;
-
-		if (pq->dwbes_f.q_val_err)
-			*desc->result |= SUM_CHECK_Q_RESULT;
-
-		return;
-	}
-	default:
-		return;
-	}
-}
-
-/**
- * __cleanup - reclaim used descriptors
- * @ioat: channel (ring) to clean
- *
- * The difference from the dma_v2.c __cleanup() is that this routine
- * handles extended descriptors and dma-unmapping raid operations.
- */
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	bool seen_current = false;
-	int idx = ioat->tail, i;
-	u16 active;
-
-	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-
-	/*
-	 * At restart of the channel, the completion address and the
-	 * channel status will be 0 due to starting a new chain. Since
-	 * it's new chain and the first descriptor "fails", there is
-	 * nothing to clean up. We do not want to reap the entire submitted
-	 * chain due to this 0 address value and then BUG.
-	 */
-	if (!phys_complete)
-		return;
-
-	active = ioat2_ring_active(ioat);
-	for (i = 0; i < active && !seen_current; i++) {
-		struct dma_async_tx_descriptor *tx;
-
-		smp_read_barrier_depends();
-		prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		dump_desc_dbg(ioat, desc);
-
-		/* set err stat if we are using dwbes */
-		if (device->cap & IOAT_CAP_DWBES)
-			desc_get_errstat(ioat, desc);
-
-		tx = &desc->txd;
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys == phys_complete)
-			seen_current = true;
-
-		/* skip extended descriptors */
-		if (desc_has_ext(desc)) {
-			BUG_ON(i + 1 >= active);
-			i++;
-		}
-
-		/* cleanup super extended descriptors */
-		if (desc->sed) {
-			ioat3_free_sed(device, desc->sed);
-			desc->sed = NULL;
-		}
-	}
-	smp_mb(); /* finish all descriptor reads before incrementing tail */
-	ioat->tail = idx + i;
-	BUG_ON(active && !seen_current); /* no active descs have written a completion? */
-	chan->last_completion = phys_complete;
-
-	if (active - i == 0) {
-		dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
-			__func__);
-		clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-	/* 5 microsecond delay per pending descriptor */
-	writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
-	       chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
-}
-
-static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	u64 phys_complete;
-
-	spin_lock_bh(&chan->cleanup_lock);
-
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	if (is_ioat_halted(*chan->completion)) {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
-		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-			ioat3_eh(ioat);
-		}
-	}
-
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat3_cleanup_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat3_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	u64 phys_complete;
-
-	ioat2_quiesce(chan, 0);
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	__ioat2_restart_chan(ioat);
-}
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct pci_dev *pdev = to_pdev(chan);
-	struct ioat_dma_descriptor *hw;
-	struct dma_async_tx_descriptor *tx;
-	u64 phys_complete;
-	struct ioat_ring_ent *desc;
-	u32 err_handled = 0;
-	u32 chanerr_int;
-	u32 chanerr;
-
-	/* cleanup so tail points to descriptor that caused the error */
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
-
-	dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
-		__func__, chanerr, chanerr_int);
-
-	desc = ioat2_get_ring_ent(ioat, ioat->tail);
-	hw = desc->hw;
-	dump_desc_dbg(ioat, desc);
-
-	switch (hw->ctl_f.op) {
-	case IOAT_OP_XOR_VAL:
-		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
-			*desc->result |= SUM_CHECK_P_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
-		}
-		break;
-	case IOAT_OP_PQ_VAL:
-	case IOAT_OP_PQ_VAL_16S:
-		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
-			*desc->result |= SUM_CHECK_P_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
-		}
-		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
-			*desc->result |= SUM_CHECK_Q_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
-		}
-		break;
-	}
-
-	/* fault on unhandled error or spurious halt */
-	if (chanerr ^ err_handled || chanerr == 0) {
-		dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
-			__func__, chanerr, err_handled);
-		BUG();
-	} else { /* cleanup the faulty descriptor */
-		tx = &desc->txd;
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-	}
-
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
-
-	/* mark faulting descriptor as complete */
-	*chan->completion = desc->txd.phys;
-
-	spin_lock_bh(&ioat->prep_lock);
-	ioat3_restart_channel(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	if (ioat2_ring_active(ioat)) {
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		return;
-	}
-
-	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	else if (ioat->alloc_order > ioat_get_alloc_order()) {
-		/* if the ring is idle, empty, and oversized try to step
-		 * down the size
-		 */
-		reshape_ring(ioat, ioat->alloc_order - 1);
-
-		/* keep shrinking until we get back to our minimum
-		 * default size
-		 */
-		if (ioat->alloc_order > ioat_get_alloc_order())
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-
-}
-
-static void ioat3_timer_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-	u64 status;
-
-	status = ioat_chansts(chan);
-
-	/* when halted due to errors check for channel
-	 * programming errors before advancing the completion state
-	 */
-	if (is_ioat_halted(status)) {
-		u32 chanerr;
-
-		chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
-			__func__, chanerr);
-		if (test_bit(IOAT_RUN, &chan->state))
-			BUG_ON(is_ioat_bug(chanerr));
-		else /* we never got off the ground */
-			return;
-	}
-
-	/* if we haven't made progress and we have already
-	 * acknowledged a pending completion once, then be more
-	 * forceful with a restart
-	 */
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
-		spin_lock_bh(&ioat->prep_lock);
-		ioat3_restart_channel(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	} else {
-		set_bit(IOAT_COMPLETION_ACK, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	}
-
-
-	if (ioat2_ring_active(ioat))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	else {
-		spin_lock_bh(&ioat->prep_lock);
-		check_active(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static enum dma_status
-ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-		struct dma_tx_state *txstate)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	enum dma_status ret;
-
-	ret = dma_cookie_status(c, cookie, txstate);
-	if (ret == DMA_COMPLETE)
-		return ret;
-
-	ioat3_cleanup(ioat);
-
-	return dma_cookie_status(c, cookie, txstate);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
-		      dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
-		      size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_ring_ent *compl_desc;
-	struct ioat_ring_ent *desc;
-	struct ioat_ring_ent *ext;
-	size_t total_len = len;
-	struct ioat_xor_descriptor *xor;
-	struct ioat_xor_ext_descriptor *xor_ex = NULL;
-	struct ioat_dma_descriptor *hw;
-	int num_descs, with_ext, idx, i;
-	u32 offset = 0;
-	u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
-
-	BUG_ON(src_cnt < 2);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	/* we need 2x the number of descriptors to cover greater than 5
-	 * sources
-	 */
-	if (src_cnt > 5) {
-		with_ext = 1;
-		num_descs *= 2;
-	} else
-		with_ext = 0;
-
-	/* completion writes from the raid engine may pass completion
-	 * writes from the legacy engine, so we need one extra null
-	 * (legacy) descriptor to ensure all completion writes arrive in
-	 * order.
-	 */
-	if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		struct ioat_raw_descriptor *descs[2];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-		int s;
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		xor = desc->xor;
-
-		/* save a branch by unconditionally retrieving the
-		 * extended descriptor xor_set_src() knows to not write
-		 * to it in the single descriptor case
-		 */
-		ext = ioat2_get_ring_ent(ioat, idx + i + 1);
-		xor_ex = ext->xor_ex;
-
-		descs[0] = (struct ioat_raw_descriptor *) xor;
-		descs[1] = (struct ioat_raw_descriptor *) xor_ex;
-		for (s = 0; s < src_cnt; s++)
-			xor_set_src(descs, src[s], offset, s);
-		xor->size = xfer_size;
-		xor->dst_addr = dest + offset;
-		xor->ctl = 0;
-		xor->ctl_f.op = op;
-		xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
-
-		len -= xfer_size;
-		offset += xfer_size;
-		dump_desc_dbg(ioat, desc);
-	} while ((i += 1 + with_ext) < num_descs);
-
-	/* last xor descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
-	/* completion descriptor carries interrupt bit */
-	compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-	hw = compl_desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.compl_write = 1;
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	dump_desc_dbg(ioat, compl_desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
-	       unsigned int src_cnt, size_t len, unsigned long flags)
-{
-	return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
-		    unsigned int src_cnt, size_t len,
-		    enum sum_check_flags *result, unsigned long flags)
-{
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*result = 0;
-
-	return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
-				     src_cnt - 1, len, flags);
-}
-
-static void
-dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
-{
-	struct device *dev = to_dev(&ioat->base);
-	struct ioat_pq_descriptor *pq = desc->pq;
-	struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
-	struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
-	int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
-	int i;
-
-	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
-		" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
-		" src_cnt: %d)\n",
-		desc_id(desc), (unsigned long long) desc->txd.phys,
-		(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
-		desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
-		pq->ctl_f.compl_write,
-		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
-		pq->ctl_f.src_cnt);
-	for (i = 0; i < src_cnt; i++)
-		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
-			(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
-	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
-	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
-	dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
-}
-
-static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
-			       struct ioat_ring_ent *desc)
-{
-	struct device *dev = to_dev(&ioat->base);
-	struct ioat_pq_descriptor *pq = desc->pq;
-	struct ioat_raw_descriptor *descs[] = { (void *)pq,
-						(void *)pq,
-						(void *)pq };
-	int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
-	int i;
-
-	if (desc->sed) {
-		descs[1] = (void *)desc->sed->hw;
-		descs[2] = (void *)desc->sed->hw + 64;
-	}
-
-	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
-		" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
-		" src_cnt: %d)\n",
-		desc_id(desc), (unsigned long long) desc->txd.phys,
-		(unsigned long long) pq->next,
-		desc->txd.flags, pq->size, pq->ctl,
-		pq->ctl_f.op, pq->ctl_f.int_en,
-		pq->ctl_f.compl_write,
-		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
-		pq->ctl_f.src_cnt);
-	for (i = 0; i < src_cnt; i++) {
-		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
-			(unsigned long long) pq16_get_src(descs, i),
-			pq->coef[i]);
-	}
-	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
-	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
-		     const dma_addr_t *dst, const dma_addr_t *src,
-		     unsigned int src_cnt, const unsigned char *scf,
-		     size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *compl_desc;
-	struct ioat_ring_ent *desc;
-	struct ioat_ring_ent *ext;
-	size_t total_len = len;
-	struct ioat_pq_descriptor *pq;
-	struct ioat_pq_ext_descriptor *pq_ex = NULL;
-	struct ioat_dma_descriptor *hw;
-	u32 offset = 0;
-	u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
-	int i, s, idx, with_ext, num_descs;
-	int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
-
-	dev_dbg(to_dev(chan), "%s\n", __func__);
-	/* the engine requires at least two sources (we provide
-	 * at least 1 implied source in the DMA_PREP_CONTINUE case)
-	 */
-	BUG_ON(src_cnt + dmaf_continue(flags) < 2);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	/* we need 2x the number of descriptors to cover greater than 3
-	 * sources (we need 1 extra source in the q-only continuation
-	 * case and 3 extra sources in the p+q continuation case.
-	 */
-	if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
-	    (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
-		with_ext = 1;
-		num_descs *= 2;
-	} else
-		with_ext = 0;
-
-	/* completion writes from the raid engine may pass completion
-	 * writes from the legacy engine, so we need one extra null
-	 * (legacy) descriptor to ensure all completion writes arrive in
-	 * order.
-	 */
-	if (likely(num_descs) &&
-	    ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		struct ioat_raw_descriptor *descs[2];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		pq = desc->pq;
-
-		/* save a branch by unconditionally retrieving the
-		 * extended descriptor pq_set_src() knows to not write
-		 * to it in the single descriptor case
-		 */
-		ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
-		pq_ex = ext->pq_ex;
-
-		descs[0] = (struct ioat_raw_descriptor *) pq;
-		descs[1] = (struct ioat_raw_descriptor *) pq_ex;
-
-		for (s = 0; s < src_cnt; s++)
-			pq_set_src(descs, src[s], offset, scf[s], s);
-
-		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
-		if (dmaf_p_disabled_continue(flags))
-			pq_set_src(descs, dst[1], offset, 1, s++);
-		else if (dmaf_continue(flags)) {
-			pq_set_src(descs, dst[0], offset, 0, s++);
-			pq_set_src(descs, dst[1], offset, 1, s++);
-			pq_set_src(descs, dst[1], offset, 0, s++);
-		}
-		pq->size = xfer_size;
-		pq->p_addr = dst[0] + offset;
-		pq->q_addr = dst[1] + offset;
-		pq->ctl = 0;
-		pq->ctl_f.op = op;
-		/* we turn on descriptor write back error status */
-		if (device->cap & IOAT_CAP_DWBES)
-			pq->ctl_f.wb_en = result ? 1 : 0;
-		pq->ctl_f.src_cnt = src_cnt_to_hw(s);
-		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
-		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
-		len -= xfer_size;
-		offset += xfer_size;
-	} while ((i += 1 + with_ext) < num_descs);
-
-	/* last pq descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	dump_pq_desc_dbg(ioat, desc, ext);
-
-	if (!cb32) {
-		pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-		pq->ctl_f.compl_write = 1;
-		compl_desc = desc;
-	} else {
-		/* completion descriptor carries interrupt bit */
-		compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-		hw = compl_desc->hw;
-		hw->ctl = 0;
-		hw->ctl_f.null = 1;
-		hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-		hw->ctl_f.compl_write = 1;
-		hw->size = NULL_DESC_BUFFER_SIZE;
-		dump_desc_dbg(ioat, compl_desc);
-	}
-
-
-	/* we leave the channel locked to ensure in order submission */
-	return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
-		       const dma_addr_t *dst, const dma_addr_t *src,
-		       unsigned int src_cnt, const unsigned char *scf,
-		       size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	size_t total_len = len;
-	struct ioat_pq_descriptor *pq;
-	u32 offset = 0;
-	u8 op;
-	int i, s, idx, num_descs;
-
-	/* this function is only called with 9-16 sources */
-	op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
-
-	dev_dbg(to_dev(chan), "%s\n", __func__);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-
-	/*
-	 * 16 source pq is only available on cb3.3 and has no completion
-	 * write hw bug.
-	 */
-	if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-
-	i = 0;
-
-	do {
-		struct ioat_raw_descriptor *descs[4];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		pq = desc->pq;
-
-		descs[0] = (struct ioat_raw_descriptor *) pq;
-
-		desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
-		if (!desc->sed) {
-			dev_err(to_dev(chan),
-				"%s: no free sed entries\n", __func__);
-			return NULL;
-		}
-
-		pq->sed_addr = desc->sed->dma;
-		desc->sed->parent = desc;
-
-		descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
-		descs[2] = (void *)descs[1] + 64;
-
-		for (s = 0; s < src_cnt; s++)
-			pq16_set_src(descs, src[s], offset, scf[s], s);
-
-		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
-		if (dmaf_p_disabled_continue(flags))
-			pq16_set_src(descs, dst[1], offset, 1, s++);
-		else if (dmaf_continue(flags)) {
-			pq16_set_src(descs, dst[0], offset, 0, s++);
-			pq16_set_src(descs, dst[1], offset, 1, s++);
-			pq16_set_src(descs, dst[1], offset, 0, s++);
-		}
-
-		pq->size = xfer_size;
-		pq->p_addr = dst[0] + offset;
-		pq->q_addr = dst[1] + offset;
-		pq->ctl = 0;
-		pq->ctl_f.op = op;
-		pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
-		/* we turn on descriptor write back error status */
-		if (device->cap & IOAT_CAP_DWBES)
-			pq->ctl_f.wb_en = result ? 1 : 0;
-		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
-		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
-		len -= xfer_size;
-		offset += xfer_size;
-	} while (++i < num_descs);
-
-	/* last pq descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
-	/* with cb3.3 we should be able to do completion w/o a null desc */
-	pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	pq->ctl_f.compl_write = 1;
-
-	dump_pq16_desc_dbg(ioat, desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &desc->txd;
-}
-
-static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
-{
-	if (dmaf_p_disabled_continue(flags))
-		return src_cnt + 1;
-	else if (dmaf_continue(flags))
-		return src_cnt + 3;
-	else
-		return src_cnt;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
-	      unsigned int src_cnt, const unsigned char *scf, size_t len,
-	      unsigned long flags)
-{
-	/* specify valid address for disabled result */
-	if (flags & DMA_PREP_PQ_DISABLE_P)
-		dst[0] = dst[1];
-	if (flags & DMA_PREP_PQ_DISABLE_Q)
-		dst[1] = dst[0];
-
-	/* handle the single source multiply case from the raid6
-	 * recovery path
-	 */
-	if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
-		dma_addr_t single_source[2];
-		unsigned char single_source_coef[2];
-
-		BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
-		single_source[0] = src[0];
-		single_source[1] = src[0];
-		single_source_coef[0] = scf[0];
-		single_source_coef[1] = 0;
-
-		return src_cnt_flags(src_cnt, flags) > 8 ?
-			__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
-					       2, single_source_coef, len,
-					       flags) :
-			__ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
-					     single_source_coef, len, flags);
-
-	} else {
-		return src_cnt_flags(src_cnt, flags) > 8 ?
-			__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
-					       scf, len, flags) :
-			__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
-					     scf, len, flags);
-	}
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
-		  unsigned int src_cnt, const unsigned char *scf, size_t len,
-		  enum sum_check_flags *pqres, unsigned long flags)
-{
-	/* specify valid address for disabled result */
-	if (flags & DMA_PREP_PQ_DISABLE_P)
-		pq[0] = pq[1];
-	if (flags & DMA_PREP_PQ_DISABLE_Q)
-		pq[1] = pq[0];
-
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*pqres = 0;
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
-				       flags) :
-		__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
-				     flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
-		 unsigned int src_cnt, size_t len, unsigned long flags)
-{
-	unsigned char scf[src_cnt];
-	dma_addr_t pq[2];
-
-	memset(scf, 0, src_cnt);
-	pq[0] = dst;
-	flags |= DMA_PREP_PQ_DISABLE_Q;
-	pq[1] = dst; /* specify valid address for disabled result */
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
-				       flags) :
-		__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
-				     flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
-		     unsigned int src_cnt, size_t len,
-		     enum sum_check_flags *result, unsigned long flags)
-{
-	unsigned char scf[src_cnt];
-	dma_addr_t pq[2];
-
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*result = 0;
-
-	memset(scf, 0, src_cnt);
-	pq[0] = src[0];
-	flags |= DMA_PREP_PQ_DISABLE_Q;
-	pq[1] = pq[0]; /* specify valid address for disabled result */
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
-				       scf, len, flags) :
-		__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
-				     scf, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_ring_ent *desc;
-	struct ioat_dma_descriptor *hw;
-
-	if (ioat2_check_space_lock(ioat, 1) == 0)
-		desc = ioat2_get_ring_ent(ioat, ioat->head);
-	else
-		return NULL;
-
-	hw = desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = 1;
-	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	hw->ctl_f.compl_write = 1;
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	hw->src_addr = 0;
-	hw->dst_addr = 0;
-
-	desc->txd.flags = flags;
-	desc->len = 1;
-
-	dump_desc_dbg(ioat, desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &desc->txd;
-}
-
-static void ioat3_dma_test_callback(void *dma_async_param)
-{
-	struct completion *cmp = dma_async_param;
-
-	complete(cmp);
-}
-
-#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
-static int ioat_xor_val_self_test(struct ioatdma_device *device)
-{
-	int i, src_idx;
-	struct page *dest;
-	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
-	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
-	dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
-	dma_addr_t dest_dma;
-	struct dma_async_tx_descriptor *tx;
-	struct dma_chan *dma_chan;
-	dma_cookie_t cookie;
-	u8 cmp_byte = 0;
-	u32 cmp_word;
-	u32 xor_val_result;
-	int err = 0;
-	struct completion cmp;
-	unsigned long tmo;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-	u8 op = 0;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	if (!dma_has_cap(DMA_XOR, dma->cap_mask))
-		return 0;
-
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
-		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
-		if (!xor_srcs[src_idx]) {
-			while (src_idx--)
-				__free_page(xor_srcs[src_idx]);
-			return -ENOMEM;
-		}
-	}
-
-	dest = alloc_page(GFP_KERNEL);
-	if (!dest) {
-		while (src_idx--)
-			__free_page(xor_srcs[src_idx]);
-		return -ENOMEM;
-	}
-
-	/* Fill in src buffers */
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
-		u8 *ptr = page_address(xor_srcs[src_idx]);
-		for (i = 0; i < PAGE_SIZE; i++)
-			ptr[i] = (1 << src_idx);
-	}
-
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
-		cmp_byte ^= (u8) (1 << src_idx);
-
-	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
-			(cmp_byte << 8) | cmp_byte;
-
-	memset(page_address(dest), 0, PAGE_SIZE);
-
-	dma_chan = container_of(dma->channels.next, struct dma_chan,
-				device_node);
-	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
-		err = -ENODEV;
-		goto out;
-	}
-
-	/* test xor */
-	op = IOAT_OP_XOR;
-
-	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dev, dest_dma))
-		goto dma_unmap;
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
-				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
-				      DMA_PREP_INTERRUPT);
-
-	if (!tx) {
-		dev_err(dev, "Self-test xor prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test xor setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test xor timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
-		u32 *ptr = page_address(dest);
-		if (ptr[i] != cmp_word) {
-			dev_err(dev, "Self-test xor failed compare\n");
-			err = -ENODEV;
-			goto free_resources;
-		}
-	}
-	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
-	dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
-	/* skip validate if the capability is not present */
-	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
-		goto free_resources;
-
-	op = IOAT_OP_XOR_VAL;
-
-	/* validate the sources with the destintation page */
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		xor_val_srcs[i] = xor_srcs[i];
-	xor_val_srcs[i] = dest;
-
-	xor_val_result = 1;
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
-					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-					  &xor_val_result, DMA_PREP_INTERRUPT);
-	if (!tx) {
-		dev_err(dev, "Self-test zero prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test zero setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test validate timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	if (xor_val_result != 0) {
-		dev_err(dev, "Self-test validate failed compare\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-	memset(page_address(dest), 0, PAGE_SIZE);
-
-	/* test for non-zero parity sum */
-	op = IOAT_OP_XOR_VAL;
-
-	xor_val_result = 0;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
-					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-					  &xor_val_result, DMA_PREP_INTERRUPT);
-	if (!tx) {
-		dev_err(dev, "Self-test 2nd zero prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test  2nd zero setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test 2nd validate timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	if (xor_val_result != SUM_CHECK_P_RESULT) {
-		dev_err(dev, "Self-test validate failed compare\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	goto free_resources;
-dma_unmap:
-	if (op == IOAT_OP_XOR) {
-		if (dest_dma != DMA_ERROR_CODE)
-			dma_unmap_page(dev, dest_dma, PAGE_SIZE,
-				       DMA_FROM_DEVICE);
-		for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-			if (dma_srcs[i] != DMA_ERROR_CODE)
-				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
-					       DMA_TO_DEVICE);
-	} else if (op == IOAT_OP_XOR_VAL) {
-		for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-			if (dma_srcs[i] != DMA_ERROR_CODE)
-				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
-					       DMA_TO_DEVICE);
-	}
-free_resources:
-	dma->device_free_chan_resources(dma_chan);
-out:
-	src_idx = IOAT_NUM_SRC_TEST;
-	while (src_idx--)
-		__free_page(xor_srcs[src_idx]);
-	__free_page(dest);
-	return err;
-}
-
-static int ioat3_dma_self_test(struct ioatdma_device *device)
-{
-	int rc = ioat_dma_self_test(device);
-
-	if (rc)
-		return rc;
-
-	rc = ioat_xor_val_self_test(device);
-	if (rc)
-		return rc;
-
-	return 0;
-}
-
-static int ioat3_irq_reinit(struct ioatdma_device *device)
-{
-	struct pci_dev *pdev = device->pdev;
-	int irq = pdev->irq, i;
-
-	if (!is_bwd_ioat(pdev))
-		return 0;
-
-	switch (device->irq_mode) {
-	case IOAT_MSIX:
-		for (i = 0; i < device->common.chancnt; i++) {
-			struct msix_entry *msix = &device->msix_entries[i];
-			struct ioat_chan_common *chan;
-
-			chan = ioat_chan_by_index(device, i);
-			devm_free_irq(&pdev->dev, msix->vector, chan);
-		}
-
-		pci_disable_msix(pdev);
-		break;
-	case IOAT_MSI:
-		pci_disable_msi(pdev);
-		/* fall through */
-	case IOAT_INTX:
-		devm_free_irq(&pdev->dev, irq, device);
-		break;
-	default:
-		return 0;
-	}
-	device->irq_mode = IOAT_NOIRQ;
-
-	return ioat_dma_setup_interrupts(device);
-}
-
-static int ioat3_reset_hw(struct ioat_chan_common *chan)
-{
-	/* throw away whatever the channel was doing and get it
-	 * initialized, with ioat3 specific workarounds
-	 */
-	struct ioatdma_device *device = chan->device;
-	struct pci_dev *pdev = device->pdev;
-	u32 chanerr;
-	u16 dev_id;
-	int err;
-
-	ioat2_quiesce(chan, msecs_to_jiffies(100));
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
-	if (device->version < IOAT_VER_3_3) {
-		/* clear any pending errors */
-		err = pci_read_config_dword(pdev,
-				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
-		if (err) {
-			dev_err(&pdev->dev,
-				"channel error register unreachable\n");
-			return err;
-		}
-		pci_write_config_dword(pdev,
-				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
-
-		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
-		 * (workaround for spurious config parity error after restart)
-		 */
-		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
-		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
-			pci_write_config_dword(pdev,
-					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
-					       0x10);
-		}
-	}
-
-	err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
-	if (!err)
-		err = ioat3_irq_reinit(device);
-
-	if (err)
-		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
-
-	return err;
-}
-
-static void ioat3_intr_quirk(struct ioatdma_device *device)
-{
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	u32 errmask;
-
-	dma = &device->common;
-
-	/*
-	 * if we have descriptor write back error status, we mask the
-	 * error interrupts
-	 */
-	if (device->cap & IOAT_CAP_DWBES) {
-		list_for_each_entry(c, &dma->channels, device_node) {
-			chan = to_chan_common(c);
-			errmask = readl(chan->reg_base +
-					IOAT_CHANERR_MASK_OFFSET);
-			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
-				   IOAT_CHANERR_XOR_Q_ERR;
-			writel(errmask, chan->reg_base +
-					IOAT_CHANERR_MASK_OFFSET);
-		}
-	}
-}
-
-int ioat3_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	int dca_en = system_has_dca_enabled(pdev);
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	bool is_raid_device = false;
-	int err;
-
-	device->enumerate_channels = ioat2_enumerate_channels;
-	device->reset_hw = ioat3_reset_hw;
-	device->self_test = ioat3_dma_self_test;
-	device->intr_quirk = ioat3_intr_quirk;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
-	dma->device_issue_pending = ioat2_issue_pending;
-	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat2_free_chan_resources;
-
-	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
-	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
-
-	device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
-
-	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
-		device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
-
-	/* dca is incompatible with raid operations */
-	if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
-		device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
-
-	if (device->cap & IOAT_CAP_XOR) {
-		is_raid_device = true;
-		dma->max_xor = 8;
-
-		dma_cap_set(DMA_XOR, dma->cap_mask);
-		dma->device_prep_dma_xor = ioat3_prep_xor;
-
-		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
-		dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
-	}
-
-	if (device->cap & IOAT_CAP_PQ) {
-		is_raid_device = true;
-
-		dma->device_prep_dma_pq = ioat3_prep_pq;
-		dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
-		dma_cap_set(DMA_PQ, dma->cap_mask);
-		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
-
-		if (device->cap & IOAT_CAP_RAID16SS) {
-			dma_set_maxpq(dma, 16, 0);
-		} else {
-			dma_set_maxpq(dma, 8, 0);
-		}
-
-		if (!(device->cap & IOAT_CAP_XOR)) {
-			dma->device_prep_dma_xor = ioat3_prep_pqxor;
-			dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
-			dma_cap_set(DMA_XOR, dma->cap_mask);
-			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
-
-			if (device->cap & IOAT_CAP_RAID16SS) {
-				dma->max_xor = 16;
-			} else {
-				dma->max_xor = 8;
-			}
-		}
-	}
-
-	dma->device_tx_status = ioat3_tx_status;
-	device->cleanup_fn = ioat3_cleanup_event;
-	device->timer_fn = ioat3_timer_event;
-
-	/* starting with CB3.3 super extended descriptors are supported */
-	if (device->cap & IOAT_CAP_RAID16SS) {
-		char pool_name[14];
-		int i;
-
-		for (i = 0; i < MAX_SED_POOLS; i++) {
-			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
-
-			/* allocate SED DMA pool */
-			device->sed_hw_pool[i] = dmam_pool_create(pool_name,
-					&pdev->dev,
-					SED_SIZE * (i + 1), 64, 0);
-			if (!device->sed_hw_pool[i])
-				return -ENOMEM;
-
-		}
-	}
-
-	err = ioat_probe(device);
-	if (err)
-		return err;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		chan = to_chan_common(c);
-		writel(IOAT_DMA_DCA_ANY_CPU,
-		       chan->reg_base + IOAT_DCACTRL_OFFSET);
-	}
-
-	err = ioat_register(device);
-	if (err)
-		return err;
-
-	ioat_kobject_add(device, &ioat2_ktype);
-
-	if (dca)
-		device->dca = ioat3_dca_init(pdev, device->reg_base);
-
-	return 0;
-}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index a3e731e..690e3b4 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -21,11 +21,6 @@
 #define IOAT_MMIO_BAR		0
 
 /* CB device ID's */
-#define IOAT_PCI_DID_5000       0x1A38
-#define IOAT_PCI_DID_CNB        0x360B
-#define IOAT_PCI_DID_SCNB       0x65FF
-#define IOAT_PCI_DID_SNB        0x402F
-
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB0	0x0e20
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1	0x0e21
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2	0x0e22
@@ -58,6 +53,17 @@
 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2	0x6f52
 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3	0x6f53
 
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX0	0x6f20
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX1	0x6f21
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX2	0x6f22
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX3	0x6f23
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX4	0x6f24
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX5	0x6f25
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX6	0x6f26
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX7	0x6f27
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX8	0x6f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX9	0x6f2f
+
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
new file mode 100644
index 0000000..1c3c9b0
--- /dev/null
+++ b/drivers/dma/ioat/init.c
@@ -0,0 +1,1314 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dca.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+MODULE_VERSION(IOAT_DMA_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static struct pci_device_id ioat_pci_tbl[] = {
+	/* I/OAT v3 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
+
+	/* I/OAT v3.2 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+
+	/* I/OAT v3.3 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void ioat_remove(struct pci_dev *pdev);
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+		  struct ioatdma_chan *ioat_chan, int idx);
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
+
+static int ioat_dca_enabled = 1;
+module_param(ioat_dca_enabled, int, 0644);
+MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
+int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+		 "high-water mark for pushing ioat descriptors (default: 4)");
+int ioat_ring_alloc_order = 8;
+module_param(ioat_ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_alloc_order,
+		 "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
+int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
+module_param(ioat_ring_max_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_max_alloc_order,
+		 "ioat+: upper limit for ring size (default: 16)");
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+		    sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+		 "set ioat interrupt style: msix (default), msi, intx");
+
+struct kmem_cache *ioat_cache;
+struct kmem_cache *ioat_sed_cache;
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_bdx_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+		is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+}
+
+bool is_bwd_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	/* even though not Atom, BDX-DE has same DMA silicon */
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+/*
+ * Perform a IOAT transaction to verify the HW works.
+ */
+#define IOAT_TEST_SIZE 2000
+
+static void ioat_dma_test_callback(void *dma_async_param)
+{
+	struct completion *cmp = dma_async_param;
+
+	complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @ioat_dma: dma device to be tested
+ */
+static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+	int i;
+	u8 *src;
+	u8 *dest;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_chan *dma_chan;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t dma_dest, dma_src;
+	dma_cookie_t cookie;
+	int err = 0;
+	struct completion cmp;
+	unsigned long tmo;
+	unsigned long flags;
+
+	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!src)
+		return -ENOMEM;
+	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!dest) {
+		kfree(src);
+		return -ENOMEM;
+	}
+
+	/* Fill in src buffer */
+	for (i = 0; i < IOAT_TEST_SIZE; i++)
+		src[i] = (u8)i;
+
+	/* Start copy, using first DMA channel */
+	dma_chan = container_of(dma->channels.next, struct dma_chan,
+				device_node);
+	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+		dev_err(dev, "selftest cannot allocate chan resource\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma_src)) {
+		dev_err(dev, "mapping src buffer failed\n");
+		goto free_resources;
+	}
+	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, dma_dest)) {
+		dev_err(dev, "mapping dest buffer failed\n");
+		goto unmap_src;
+	}
+	flags = DMA_PREP_INTERRUPT;
+	tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
+						      dma_src, IOAT_TEST_SIZE,
+						      flags);
+	if (!tx) {
+		dev_err(dev, "Self-test prep failed, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test setup failed, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL)
+					!= DMA_COMPLETE) {
+		dev_err(dev, "Self-test copy timed out, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
+		dev_err(dev, "Self-test copy failed compare, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+unmap_dma:
+	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+free_resources:
+	dma->device_free_chan_resources(dma_chan);
+out:
+	kfree(src);
+	kfree(dest);
+	return err;
+}
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @ioat_dma: ioat dma device
+ */
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	struct device *dev = &pdev->dev;
+	struct msix_entry *msix;
+	int i, j, msixcnt;
+	int err = -EINVAL;
+	u8 intrctrl = 0;
+
+	if (!strcmp(ioat_interrupt_style, "msix"))
+		goto msix;
+	if (!strcmp(ioat_interrupt_style, "msi"))
+		goto msi;
+	if (!strcmp(ioat_interrupt_style, "intx"))
+		goto intx;
+	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
+	goto err_no_irq;
+
+msix:
+	/* The number of MSI-X vectors should equal the number of channels */
+	msixcnt = ioat_dma->dma_dev.chancnt;
+	for (i = 0; i < msixcnt; i++)
+		ioat_dma->msix_entries[i].entry = i;
+
+	err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
+	if (err)
+		goto msi;
+
+	for (i = 0; i < msixcnt; i++) {
+		msix = &ioat_dma->msix_entries[i];
+		ioat_chan = ioat_chan_by_index(ioat_dma, i);
+		err = devm_request_irq(dev, msix->vector,
+				       ioat_dma_do_interrupt_msix, 0,
+				       "ioat-msix", ioat_chan);
+		if (err) {
+			for (j = 0; j < i; j++) {
+				msix = &ioat_dma->msix_entries[j];
+				ioat_chan = ioat_chan_by_index(ioat_dma, j);
+				devm_free_irq(dev, msix->vector, ioat_chan);
+			}
+			goto msi;
+		}
+	}
+	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+	ioat_dma->irq_mode = IOAT_MSIX;
+	goto done;
+
+msi:
+	err = pci_enable_msi(pdev);
+	if (err)
+		goto intx;
+
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
+			       "ioat-msi", ioat_dma);
+	if (err) {
+		pci_disable_msi(pdev);
+		goto intx;
+	}
+	ioat_dma->irq_mode = IOAT_MSI;
+	goto done;
+
+intx:
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
+			       IRQF_SHARED, "ioat-intx", ioat_dma);
+	if (err)
+		goto err_no_irq;
+
+	ioat_dma->irq_mode = IOAT_INTX;
+done:
+	if (is_bwd_ioat(pdev))
+		ioat_intr_quirk(ioat_dma);
+	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+	writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+	return 0;
+
+err_no_irq:
+	/* Disable all interrupt generation */
+	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+	ioat_dma->irq_mode = IOAT_NOIRQ;
+	dev_err(dev, "no usable interrupts\n");
+	return err;
+}
+
+static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
+{
+	/* Disable all interrupt generation */
+	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+}
+
+static int ioat_probe(struct ioatdma_device *ioat_dma)
+{
+	int err = -ENODEV;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	struct device *dev = &pdev->dev;
+
+	/* DMA coherent memory pool for DMA descriptor allocations */
+	ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+					     sizeof(struct ioat_dma_descriptor),
+					     64, 0);
+	if (!ioat_dma->dma_pool) {
+		err = -ENOMEM;
+		goto err_dma_pool;
+	}
+
+	ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+						    sizeof(u64),
+						    SMP_CACHE_BYTES,
+						    SMP_CACHE_BYTES);
+
+	if (!ioat_dma->completion_pool) {
+		err = -ENOMEM;
+		goto err_completion_pool;
+	}
+
+	ioat_enumerate_channels(ioat_dma);
+
+	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+	dma->dev = &pdev->dev;
+
+	if (!dma->chancnt) {
+		dev_err(dev, "channel enumeration error\n");
+		goto err_setup_interrupts;
+	}
+
+	err = ioat_dma_setup_interrupts(ioat_dma);
+	if (err)
+		goto err_setup_interrupts;
+
+	err = ioat3_dma_self_test(ioat_dma);
+	if (err)
+		goto err_self_test;
+
+	return 0;
+
+err_self_test:
+	ioat_disable_interrupts(ioat_dma);
+err_setup_interrupts:
+	pci_pool_destroy(ioat_dma->completion_pool);
+err_completion_pool:
+	pci_pool_destroy(ioat_dma->dma_pool);
+err_dma_pool:
+	return err;
+}
+
+static int ioat_register(struct ioatdma_device *ioat_dma)
+{
+	int err = dma_async_device_register(&ioat_dma->dma_dev);
+
+	if (err) {
+		ioat_disable_interrupts(ioat_dma);
+		pci_pool_destroy(ioat_dma->completion_pool);
+		pci_pool_destroy(ioat_dma->dma_pool);
+	}
+
+	return err;
+}
+
+static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+
+	ioat_disable_interrupts(ioat_dma);
+
+	ioat_kobject_del(ioat_dma);
+
+	dma_async_device_unregister(dma);
+
+	pci_pool_destroy(ioat_dma->dma_pool);
+	pci_pool_destroy(ioat_dma->completion_pool);
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+/**
+ * ioat_enumerate_channels - find and initialize the device's channels
+ * @ioat_dma: the ioat dma device to be enumerated
+ */
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	u8 xfercap_log;
+	int i;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+	dma->chancnt &= 0x1f; /* bits [4:0] valid */
+	if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+			 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
+		dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+	}
+	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
+	xfercap_log &= 0x1f; /* bits [4:0] valid */
+	if (xfercap_log == 0)
+		return 0;
+	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+
+	for (i = 0; i < dma->chancnt; i++) {
+		ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+		if (!ioat_chan)
+			break;
+
+		ioat_init_channel(ioat_dma, ioat_chan, i);
+		ioat_chan->xfercap_log = xfercap_log;
+		spin_lock_init(&ioat_chan->prep_lock);
+		if (ioat_reset_hw(ioat_chan)) {
+			i = 0;
+			break;
+		}
+	}
+	dma->chancnt = i;
+	return i;
+}
+
+/**
+ * ioat_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat_free_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	const int total_descs = 1 << ioat_chan->alloc_order;
+	int descs;
+	int i;
+
+	/* Before freeing channel resources first check
+	 * if they have been previously allocated for this channel.
+	 */
+	if (!ioat_chan->ring)
+		return;
+
+	ioat_stop(ioat_chan);
+	ioat_reset_hw(ioat_chan);
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	descs = ioat_ring_space(ioat_chan);
+	dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
+	for (i = 0; i < descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	if (descs < total_descs)
+		dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
+			total_descs - descs);
+
+	for (i = 0; i < total_descs - descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
+		dump_desc_dbg(ioat_chan, desc);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = NULL;
+	ioat_chan->alloc_order = 0;
+	pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+		      ioat_chan->completion_dma);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_chan->last_completion = 0;
+	ioat_chan->completion_dma = 0;
+	ioat_chan->dmacount = 0;
+}
+
+/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
+ * @chan: channel to be initialized
+ */
+static int ioat_alloc_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent **ring;
+	u64 status;
+	int order;
+	int i = 0;
+	u32 chanerr;
+
+	/* have we already been set up? */
+	if (ioat_chan->ring)
+		return 1 << ioat_chan->alloc_order;
+
+	/* Setup register to interrupt and write completion status on error */
+	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+	/* allocate a completion writeback area */
+	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+	ioat_chan->completion =
+		pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+			       GFP_KERNEL, &ioat_chan->completion_dma);
+	if (!ioat_chan->completion)
+		return -ENOMEM;
+
+	memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
+	writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+	writel(((u64)ioat_chan->completion_dma) >> 32,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+	order = ioat_get_alloc_order();
+	ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+	if (!ring)
+		return -ENOMEM;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	ioat_chan->ring = ring;
+	ioat_chan->head = 0;
+	ioat_chan->issued = 0;
+	ioat_chan->tail = 0;
+	ioat_chan->alloc_order = order;
+	set_bit(IOAT_RUN, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_start_null_desc(ioat_chan);
+
+	/* check that we got off the ground */
+	do {
+		udelay(1);
+		status = ioat_chansts(ioat_chan);
+	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		return 1 << ioat_chan->alloc_order;
+
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+	dev_WARN(to_dev(ioat_chan),
+		 "failed to start channel chanerr: %#x\n", chanerr);
+	ioat_free_chan_resources(c);
+	return -EFAULT;
+}
+
+/* common channel initialization */
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+		  struct ioatdma_chan *ioat_chan, int idx)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c = &ioat_chan->dma_chan;
+	unsigned long data = (unsigned long) c;
+
+	ioat_chan->ioat_dma = ioat_dma;
+	ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
+	spin_lock_init(&ioat_chan->cleanup_lock);
+	ioat_chan->dma_chan.device = dma;
+	dma_cookie_init(&ioat_chan->dma_chan);
+	list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
+	ioat_dma->idx[idx] = ioat_chan;
+	init_timer(&ioat_chan->timer);
+	ioat_chan->timer.function = ioat_timer_event;
+	ioat_chan->timer.data = data;
+	tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
+}
+
+#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
+static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
+{
+	int i, src_idx;
+	struct page *dest;
+	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
+	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
+	dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
+	dma_addr_t dest_dma;
+	struct dma_async_tx_descriptor *tx;
+	struct dma_chan *dma_chan;
+	dma_cookie_t cookie;
+	u8 cmp_byte = 0;
+	u32 cmp_word;
+	u32 xor_val_result;
+	int err = 0;
+	struct completion cmp;
+	unsigned long tmo;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	u8 op = 0;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	if (!dma_has_cap(DMA_XOR, dma->cap_mask))
+		return 0;
+
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+		if (!xor_srcs[src_idx]) {
+			while (src_idx--)
+				__free_page(xor_srcs[src_idx]);
+			return -ENOMEM;
+		}
+	}
+
+	dest = alloc_page(GFP_KERNEL);
+	if (!dest) {
+		while (src_idx--)
+			__free_page(xor_srcs[src_idx]);
+		return -ENOMEM;
+	}
+
+	/* Fill in src buffers */
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+		u8 *ptr = page_address(xor_srcs[src_idx]);
+
+		for (i = 0; i < PAGE_SIZE; i++)
+			ptr[i] = (1 << src_idx);
+	}
+
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
+		cmp_byte ^= (u8) (1 << src_idx);
+
+	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+			(cmp_byte << 8) | cmp_byte;
+
+	memset(page_address(dest), 0, PAGE_SIZE);
+
+	dma_chan = container_of(dma->channels.next, struct dma_chan,
+				device_node);
+	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* test xor */
+	op = IOAT_OP_XOR;
+
+	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, dest_dma))
+		goto dma_unmap;
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
+				      DMA_PREP_INTERRUPT);
+
+	if (!tx) {
+		dev_err(dev, "Self-test xor prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test xor setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test xor timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+		u32 *ptr = page_address(dest);
+
+		if (ptr[i] != cmp_word) {
+			dev_err(dev, "Self-test xor failed compare\n");
+			err = -ENODEV;
+			goto free_resources;
+		}
+	}
+	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	/* skip validate if the capability is not present */
+	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+		goto free_resources;
+
+	op = IOAT_OP_XOR_VAL;
+
+	/* validate the sources with the destintation page */
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		xor_val_srcs[i] = xor_srcs[i];
+	xor_val_srcs[i] = dest;
+
+	xor_val_result = 1;
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+					  &xor_val_result, DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(dev, "Self-test zero prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test zero setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test validate timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	if (xor_val_result != 0) {
+		dev_err(dev, "Self-test validate failed compare\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+	memset(page_address(dest), 0, PAGE_SIZE);
+
+	/* test for non-zero parity sum */
+	op = IOAT_OP_XOR_VAL;
+
+	xor_val_result = 0;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+					  &xor_val_result, DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(dev, "Self-test 2nd zero prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test  2nd zero setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test 2nd validate timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	if (xor_val_result != SUM_CHECK_P_RESULT) {
+		dev_err(dev, "Self-test validate failed compare\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	goto free_resources;
+dma_unmap:
+	if (op == IOAT_OP_XOR) {
+		if (dest_dma != DMA_ERROR_CODE)
+			dma_unmap_page(dev, dest_dma, PAGE_SIZE,
+				       DMA_FROM_DEVICE);
+		for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+			if (dma_srcs[i] != DMA_ERROR_CODE)
+				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+					       DMA_TO_DEVICE);
+	} else if (op == IOAT_OP_XOR_VAL) {
+		for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+			if (dma_srcs[i] != DMA_ERROR_CODE)
+				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+					       DMA_TO_DEVICE);
+	}
+free_resources:
+	dma->device_free_chan_resources(dma_chan);
+out:
+	src_idx = IOAT_NUM_SRC_TEST;
+	while (src_idx--)
+		__free_page(xor_srcs[src_idx]);
+	__free_page(dest);
+	return err;
+}
+
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+	int rc;
+
+	rc = ioat_dma_self_test(ioat_dma);
+	if (rc)
+		return rc;
+
+	rc = ioat_xor_val_self_test(ioat_dma);
+
+	return rc;
+}
+
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioatdma_chan *ioat_chan;
+	u32 errmask;
+
+	dma = &ioat_dma->dma_dev;
+
+	/*
+	 * if we have descriptor write back error status, we mask the
+	 * error interrupts
+	 */
+	if (ioat_dma->cap & IOAT_CAP_DWBES) {
+		list_for_each_entry(c, &dma->channels, device_node) {
+			ioat_chan = to_ioat_chan(c);
+			errmask = readl(ioat_chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+				   IOAT_CHANERR_XOR_Q_ERR;
+			writel(errmask, ioat_chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+		}
+	}
+}
+
+static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+{
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int dca_en = system_has_dca_enabled(pdev);
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioatdma_chan *ioat_chan;
+	bool is_raid_device = false;
+	int err;
+
+	dma = &ioat_dma->dma_dev;
+	dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
+	dma->device_issue_pending = ioat_issue_pending;
+	dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
+	dma->device_free_chan_resources = ioat_free_chan_resources;
+
+	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
+	dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
+
+	ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
+
+	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
+		ioat_dma->cap &=
+			~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
+
+	/* dca is incompatible with raid operations */
+	if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+		ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+
+	if (ioat_dma->cap & IOAT_CAP_XOR) {
+		is_raid_device = true;
+		dma->max_xor = 8;
+
+		dma_cap_set(DMA_XOR, dma->cap_mask);
+		dma->device_prep_dma_xor = ioat_prep_xor;
+
+		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+		dma->device_prep_dma_xor_val = ioat_prep_xor_val;
+	}
+
+	if (ioat_dma->cap & IOAT_CAP_PQ) {
+		is_raid_device = true;
+
+		dma->device_prep_dma_pq = ioat_prep_pq;
+		dma->device_prep_dma_pq_val = ioat_prep_pq_val;
+		dma_cap_set(DMA_PQ, dma->cap_mask);
+		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
+
+		if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+			dma_set_maxpq(dma, 16, 0);
+		else
+			dma_set_maxpq(dma, 8, 0);
+
+		if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
+			dma->device_prep_dma_xor = ioat_prep_pqxor;
+			dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
+			dma_cap_set(DMA_XOR, dma->cap_mask);
+			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+			if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+				dma->max_xor = 16;
+			else
+				dma->max_xor = 8;
+		}
+	}
+
+	dma->device_tx_status = ioat_tx_status;
+
+	/* starting with CB3.3 super extended descriptors are supported */
+	if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
+		char pool_name[14];
+		int i;
+
+		for (i = 0; i < MAX_SED_POOLS; i++) {
+			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
+
+			/* allocate SED DMA pool */
+			ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
+					&pdev->dev,
+					SED_SIZE * (i + 1), 64, 0);
+			if (!ioat_dma->sed_hw_pool[i])
+				return -ENOMEM;
+
+		}
+	}
+
+	if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
+		dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+	err = ioat_probe(ioat_dma);
+	if (err)
+		return err;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		ioat_chan = to_ioat_chan(c);
+		writel(IOAT_DMA_DCA_ANY_CPU,
+		       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+	}
+
+	err = ioat_register(ioat_dma);
+	if (err)
+		return err;
+
+	ioat_kobject_add(ioat_dma, &ioat_ktype);
+
+	if (dca)
+		ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
+
+	return 0;
+}
+
+#define DRV_NAME "ioatdma"
+
+static struct pci_driver ioat_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= ioat_pci_tbl,
+	.probe		= ioat_pci_probe,
+	.remove		= ioat_remove,
+};
+
+static struct ioatdma_device *
+alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
+{
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+
+	if (!d)
+		return NULL;
+	d->pdev = pdev;
+	d->reg_base = iobase;
+	return d;
+}
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	void __iomem * const *iomap;
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *device;
+	int err;
+
+	err = pcim_enable_device(pdev);
+	if (err)
+		return err;
+
+	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
+	if (err)
+		return err;
+	iomap = pcim_iomap_table(pdev);
+	if (!iomap)
+		return -ENOMEM;
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err)
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
+	if (!device)
+		return -ENOMEM;
+	pci_set_master(pdev);
+	pci_set_drvdata(pdev, device);
+
+	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+	if (device->version >= IOAT_VER_3_0)
+		err = ioat3_dma_probe(device, ioat_dca_enabled);
+	else
+		return -ENODEV;
+
+	if (err) {
+		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void ioat_remove(struct pci_dev *pdev)
+{
+	struct ioatdma_device *device = pci_get_drvdata(pdev);
+
+	if (!device)
+		return;
+
+	dev_err(&pdev->dev, "Removing dma and dca services\n");
+	if (device->dca) {
+		unregister_dca_provider(device->dca, &pdev->dev);
+		free_dca_provider(device->dca);
+		device->dca = NULL;
+	}
+	ioat_dma_remove(device);
+}
+
+static int __init ioat_init_module(void)
+{
+	int err = -ENOMEM;
+
+	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
+		DRV_NAME, IOAT_DMA_VERSION);
+
+	ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
+					0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!ioat_cache)
+		return -ENOMEM;
+
+	ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
+	if (!ioat_sed_cache)
+		goto err_ioat_cache;
+
+	err = pci_register_driver(&ioat_pci_driver);
+	if (err)
+		goto err_ioat3_cache;
+
+	return 0;
+
+ err_ioat3_cache:
+	kmem_cache_destroy(ioat_sed_cache);
+
+ err_ioat_cache:
+	kmem_cache_destroy(ioat_cache);
+
+	return err;
+}
+module_init(ioat_init_module);
+
+static void __exit ioat_exit_module(void)
+{
+	pci_unregister_driver(&ioat_pci_driver);
+	kmem_cache_destroy(ioat_cache);
+}
+module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
deleted file mode 100644
index 76f0dc6..0000000
--- a/drivers/dma/ioat/pci.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dca.h>
-#include <linux/slab.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-MODULE_VERSION(IOAT_DMA_VERSION);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-static struct pci_device_id ioat_pci_tbl[] = {
-	/* I/OAT v1 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB)  },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
-	{ PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
-
-	/* I/OAT v2 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
-
-	/* I/OAT v3 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
-
-	/* I/OAT v3.2 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
-
-	/* I/OAT v3.3 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
-
-	{ 0, }
-};
-MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
-static void ioat_remove(struct pci_dev *pdev);
-
-static int ioat_dca_enabled = 1;
-module_param(ioat_dca_enabled, int, 0644);
-MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-
-struct kmem_cache *ioat2_cache;
-struct kmem_cache *ioat3_sed_cache;
-
-#define DRV_NAME "ioatdma"
-
-static struct pci_driver ioat_pci_driver = {
-	.name		= DRV_NAME,
-	.id_table	= ioat_pci_tbl,
-	.probe		= ioat_pci_probe,
-	.remove		= ioat_remove,
-};
-
-static struct ioatdma_device *
-alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct device *dev = &pdev->dev;
-	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
-
-	if (!d)
-		return NULL;
-	d->pdev = pdev;
-	d->reg_base = iobase;
-	return d;
-}
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-	void __iomem * const *iomap;
-	struct device *dev = &pdev->dev;
-	struct ioatdma_device *device;
-	int err;
-
-	err = pcim_enable_device(pdev);
-	if (err)
-		return err;
-
-	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
-	if (err)
-		return err;
-	iomap = pcim_iomap_table(pdev);
-	if (!iomap)
-		return -ENOMEM;
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err)
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err)
-		return err;
-
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err)
-		return err;
-
-	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
-	if (!device)
-		return -ENOMEM;
-	pci_set_master(pdev);
-	pci_set_drvdata(pdev, device);
-
-	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
-	if (device->version == IOAT_VER_1_2)
-		err = ioat1_dma_probe(device, ioat_dca_enabled);
-	else if (device->version == IOAT_VER_2_0)
-		err = ioat2_dma_probe(device, ioat_dca_enabled);
-	else if (device->version >= IOAT_VER_3_0)
-		err = ioat3_dma_probe(device, ioat_dca_enabled);
-	else
-		return -ENODEV;
-
-	if (err) {
-		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static void ioat_remove(struct pci_dev *pdev)
-{
-	struct ioatdma_device *device = pci_get_drvdata(pdev);
-
-	if (!device)
-		return;
-
-	dev_err(&pdev->dev, "Removing dma and dca services\n");
-	if (device->dca) {
-		unregister_dca_provider(device->dca, &pdev->dev);
-		free_dca_provider(device->dca);
-		device->dca = NULL;
-	}
-	ioat_dma_remove(device);
-}
-
-static int __init ioat_init_module(void)
-{
-	int err = -ENOMEM;
-
-	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
-		DRV_NAME, IOAT_DMA_VERSION);
-
-	ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
-					0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!ioat2_cache)
-		return -ENOMEM;
-
-	ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
-	if (!ioat3_sed_cache)
-		goto err_ioat2_cache;
-
-	err = pci_register_driver(&ioat_pci_driver);
-	if (err)
-		goto err_ioat3_cache;
-
-	return 0;
-
- err_ioat3_cache:
-	kmem_cache_destroy(ioat3_sed_cache);
-
- err_ioat2_cache:
-	kmem_cache_destroy(ioat2_cache);
-
-	return err;
-}
-module_init(ioat_init_module);
-
-static void __exit ioat_exit_module(void)
-{
-	pci_unregister_driver(&ioat_pci_driver);
-	kmem_cache_destroy(ioat2_cache);
-}
-module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
new file mode 100644
index 0000000..ad4fb41
--- /dev/null
+++ b/drivers/dma/ioat/prep.c
@@ -0,0 +1,715 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "hw.h"
+#include "dma.h"
+
+#define MAX_SCF	1024
+
+/* provide a lookup table for setting the source address in the base or
+ * extended descriptor of an xor or pq descriptor
+ */
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+				       2, 2, 2, 2, 2, 2, 2 };
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+					0, 1, 2, 3, 4, 5, 6 };
+
+static void xor_set_src(struct ioat_raw_descriptor *descs[2],
+			dma_addr_t addr, u32 offset, int idx)
+{
+	struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
+
+	raw->field[xor_idx_to_field[idx]] = addr + offset;
+}
+
+static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+{
+	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+	return raw->field[pq_idx_to_field[idx]];
+}
+
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	return raw->field[pq16_idx_to_field[idx]];
+}
+
+static void pq_set_src(struct ioat_raw_descriptor *descs[2],
+		       dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
+	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+	raw->field[pq_idx_to_field[idx]] = addr + offset;
+	pq->coef[idx] = coef;
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+			dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
+{
+	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+	struct ioat_pq16a_descriptor *pq16 =
+		(struct ioat_pq16a_descriptor *)desc[1];
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+	if (idx < 8)
+		pq->coef[idx] = coef;
+	else
+		pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
+{
+	struct ioat_sed_ent *sed;
+	gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+	sed = kmem_cache_alloc(ioat_sed_cache, flags);
+	if (!sed)
+		return NULL;
+
+	sed->hw_pool = hw_pool;
+	sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
+				 flags, &sed->dma);
+	if (!sed->hw) {
+		kmem_cache_free(ioat_sed_cache, sed);
+		return NULL;
+	}
+
+	return sed;
+}
+
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	dma_addr_t dst = dma_dest;
+	dma_addr_t src = dma_src;
+	size_t total_len = len;
+	int num_descs, idx, i;
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		hw = desc->hw;
+
+		hw->size = copy;
+		hw->ctl = 0;
+		hw->src_addr = src;
+		hw->dst_addr = dst;
+
+		len -= copy;
+		dst += copy;
+		src += copy;
+		dump_desc_dbg(ioat_chan, desc);
+	} while (++i < num_descs);
+
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	hw->ctl_f.compl_write = 1;
+	dump_desc_dbg(ioat_chan, desc);
+	/* we leave the channel locked to ensure in order submission */
+
+	return &desc->txd;
+}
+
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
+		      dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
+		      size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent *compl_desc;
+	struct ioat_ring_ent *desc;
+	struct ioat_ring_ent *ext;
+	size_t total_len = len;
+	struct ioat_xor_descriptor *xor;
+	struct ioat_xor_ext_descriptor *xor_ex = NULL;
+	struct ioat_dma_descriptor *hw;
+	int num_descs, with_ext, idx, i;
+	u32 offset = 0;
+	u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
+
+	BUG_ON(src_cnt < 2);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	/* we need 2x the number of descriptors to cover greater than 5
+	 * sources
+	 */
+	if (src_cnt > 5) {
+		with_ext = 1;
+		num_descs *= 2;
+	} else
+		with_ext = 0;
+
+	/* completion writes from the raid engine may pass completion
+	 * writes from the legacy engine, so we need one extra null
+	 * (legacy) descriptor to ensure all completion writes arrive in
+	 * order.
+	 */
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		struct ioat_raw_descriptor *descs[2];
+		size_t xfer_size = min_t(size_t,
+					 len, 1 << ioat_chan->xfercap_log);
+		int s;
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		xor = desc->xor;
+
+		/* save a branch by unconditionally retrieving the
+		 * extended descriptor xor_set_src() knows to not write
+		 * to it in the single descriptor case
+		 */
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
+		xor_ex = ext->xor_ex;
+
+		descs[0] = (struct ioat_raw_descriptor *) xor;
+		descs[1] = (struct ioat_raw_descriptor *) xor_ex;
+		for (s = 0; s < src_cnt; s++)
+			xor_set_src(descs, src[s], offset, s);
+		xor->size = xfer_size;
+		xor->dst_addr = dest + offset;
+		xor->ctl = 0;
+		xor->ctl_f.op = op;
+		xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
+
+		len -= xfer_size;
+		offset += xfer_size;
+		dump_desc_dbg(ioat_chan, desc);
+	} while ((i += 1 + with_ext) < num_descs);
+
+	/* last xor descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+	/* completion descriptor carries interrupt bit */
+	compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+	hw = compl_desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.compl_write = 1;
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	dump_desc_dbg(ioat_chan, compl_desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &compl_desc->txd;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+	       unsigned int src_cnt, size_t len, unsigned long flags)
+{
+	return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+		    unsigned int src_cnt, size_t len,
+		    enum sum_check_flags *result, unsigned long flags)
+{
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*result = 0;
+
+	return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
+				     src_cnt - 1, len, flags);
+}
+
+static void
+dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
+		 struct ioat_ring_ent *ext)
+{
+	struct device *dev = to_dev(ioat_chan);
+	struct ioat_pq_descriptor *pq = desc->pq;
+	struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
+	struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
+	int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
+	int i;
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+		" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
+		desc_id(desc), (unsigned long long) desc->txd.phys,
+		(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
+		desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
+		pq->ctl_f.int_en, pq->ctl_f.compl_write,
+		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+		pq->ctl_f.src_cnt);
+	for (i = 0; i < src_cnt; i++)
+		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+			(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
+	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+	dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
+			       struct ioat_ring_ent *desc)
+{
+	struct device *dev = to_dev(ioat_chan);
+	struct ioat_pq_descriptor *pq = desc->pq;
+	struct ioat_raw_descriptor *descs[] = { (void *)pq,
+						(void *)pq,
+						(void *)pq };
+	int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+	int i;
+
+	if (desc->sed) {
+		descs[1] = (void *)desc->sed->hw;
+		descs[2] = (void *)desc->sed->hw + 64;
+	}
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+		" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
+		desc_id(desc), (unsigned long long) desc->txd.phys,
+		(unsigned long long) pq->next,
+		desc->txd.flags, pq->size, pq->ctl,
+		pq->ctl_f.op, pq->ctl_f.int_en,
+		pq->ctl_f.compl_write,
+		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+		pq->ctl_f.src_cnt);
+	for (i = 0; i < src_cnt; i++) {
+		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+			(unsigned long long) pq16_get_src(descs, i),
+			pq->coef[i]);
+	}
+	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
+		     const dma_addr_t *dst, const dma_addr_t *src,
+		     unsigned int src_cnt, const unsigned char *scf,
+		     size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *compl_desc;
+	struct ioat_ring_ent *desc;
+	struct ioat_ring_ent *ext;
+	size_t total_len = len;
+	struct ioat_pq_descriptor *pq;
+	struct ioat_pq_ext_descriptor *pq_ex = NULL;
+	struct ioat_dma_descriptor *hw;
+	u32 offset = 0;
+	u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+	int i, s, idx, with_ext, num_descs;
+	int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
+
+	dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+	/* the engine requires at least two sources (we provide
+	 * at least 1 implied source in the DMA_PREP_CONTINUE case)
+	 */
+	BUG_ON(src_cnt + dmaf_continue(flags) < 2);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	/* we need 2x the number of descriptors to cover greater than 3
+	 * sources (we need 1 extra source in the q-only continuation
+	 * case and 3 extra sources in the p+q continuation case.
+	 */
+	if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
+	    (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
+		with_ext = 1;
+		num_descs *= 2;
+	} else
+		with_ext = 0;
+
+	/* completion writes from the raid engine may pass completion
+	 * writes from the legacy engine, so we need one extra null
+	 * (legacy) descriptor to ensure all completion writes arrive in
+	 * order.
+	 */
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		struct ioat_raw_descriptor *descs[2];
+		size_t xfer_size = min_t(size_t, len,
+					 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		pq = desc->pq;
+
+		/* save a branch by unconditionally retrieving the
+		 * extended descriptor pq_set_src() knows to not write
+		 * to it in the single descriptor case
+		 */
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
+		pq_ex = ext->pq_ex;
+
+		descs[0] = (struct ioat_raw_descriptor *) pq;
+		descs[1] = (struct ioat_raw_descriptor *) pq_ex;
+
+		for (s = 0; s < src_cnt; s++)
+			pq_set_src(descs, src[s], offset, scf[s], s);
+
+		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
+		if (dmaf_p_disabled_continue(flags))
+			pq_set_src(descs, dst[1], offset, 1, s++);
+		else if (dmaf_continue(flags)) {
+			pq_set_src(descs, dst[0], offset, 0, s++);
+			pq_set_src(descs, dst[1], offset, 1, s++);
+			pq_set_src(descs, dst[1], offset, 0, s++);
+		}
+		pq->size = xfer_size;
+		pq->p_addr = dst[0] + offset;
+		pq->q_addr = dst[1] + offset;
+		pq->ctl = 0;
+		pq->ctl_f.op = op;
+		/* we turn on descriptor write back error status */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
+		pq->ctl_f.src_cnt = src_cnt_to_hw(s);
+		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+		len -= xfer_size;
+		offset += xfer_size;
+	} while ((i += 1 + with_ext) < num_descs);
+
+	/* last pq descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	dump_pq_desc_dbg(ioat_chan, desc, ext);
+
+	if (!cb32) {
+		pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		pq->ctl_f.compl_write = 1;
+		compl_desc = desc;
+	} else {
+		/* completion descriptor carries interrupt bit */
+		compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+		hw = compl_desc->hw;
+		hw->ctl = 0;
+		hw->ctl_f.null = 1;
+		hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		hw->ctl_f.compl_write = 1;
+		hw->size = NULL_DESC_BUFFER_SIZE;
+		dump_desc_dbg(ioat_chan, compl_desc);
+	}
+
+
+	/* we leave the channel locked to ensure in order submission */
+	return &compl_desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+		       const dma_addr_t *dst, const dma_addr_t *src,
+		       unsigned int src_cnt, const unsigned char *scf,
+		       size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	size_t total_len = len;
+	struct ioat_pq_descriptor *pq;
+	u32 offset = 0;
+	u8 op;
+	int i, s, idx, num_descs;
+
+	/* this function is only called with 9-16 sources */
+	op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+	dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+
+	/*
+	 * 16 source pq is only available on cb3.3 and has no completion
+	 * write hw bug.
+	 */
+	if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+
+	i = 0;
+
+	do {
+		struct ioat_raw_descriptor *descs[4];
+		size_t xfer_size = min_t(size_t, len,
+					 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		pq = desc->pq;
+
+		descs[0] = (struct ioat_raw_descriptor *) pq;
+
+		desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
+		if (!desc->sed) {
+			dev_err(to_dev(ioat_chan),
+				"%s: no free sed entries\n", __func__);
+			return NULL;
+		}
+
+		pq->sed_addr = desc->sed->dma;
+		desc->sed->parent = desc;
+
+		descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+		descs[2] = (void *)descs[1] + 64;
+
+		for (s = 0; s < src_cnt; s++)
+			pq16_set_src(descs, src[s], offset, scf[s], s);
+
+		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
+		if (dmaf_p_disabled_continue(flags))
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+		else if (dmaf_continue(flags)) {
+			pq16_set_src(descs, dst[0], offset, 0, s++);
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+			pq16_set_src(descs, dst[1], offset, 0, s++);
+		}
+
+		pq->size = xfer_size;
+		pq->p_addr = dst[0] + offset;
+		pq->q_addr = dst[1] + offset;
+		pq->ctl = 0;
+		pq->ctl_f.op = op;
+		pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+		/* we turn on descriptor write back error status */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
+		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+		len -= xfer_size;
+		offset += xfer_size;
+	} while (++i < num_descs);
+
+	/* last pq descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+	/* with cb3.3 we should be able to do completion w/o a null desc */
+	pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	pq->ctl_f.compl_write = 1;
+
+	dump_pq16_desc_dbg(ioat_chan, desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &desc->txd;
+}
+
+static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
+{
+	if (dmaf_p_disabled_continue(flags))
+		return src_cnt + 1;
+	else if (dmaf_continue(flags))
+		return src_cnt + 3;
+	else
+		return src_cnt;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+	      unsigned int src_cnt, const unsigned char *scf, size_t len,
+	      unsigned long flags)
+{
+	/* specify valid address for disabled result */
+	if (flags & DMA_PREP_PQ_DISABLE_P)
+		dst[0] = dst[1];
+	if (flags & DMA_PREP_PQ_DISABLE_Q)
+		dst[1] = dst[0];
+
+	/* handle the single source multiply case from the raid6
+	 * recovery path
+	 */
+	if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
+		dma_addr_t single_source[2];
+		unsigned char single_source_coef[2];
+
+		BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
+		single_source[0] = src[0];
+		single_source[1] = src[0];
+		single_source_coef[0] = scf[0];
+		single_source_coef[1] = 0;
+
+		return src_cnt_flags(src_cnt, flags) > 8 ?
+			__ioat_prep_pq16_lock(chan, NULL, dst, single_source,
+					       2, single_source_coef, len,
+					       flags) :
+			__ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
+					     single_source_coef, len, flags);
+
+	} else {
+		return src_cnt_flags(src_cnt, flags) > 8 ?
+			__ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+					       scf, len, flags) :
+			__ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+					     scf, len, flags);
+	}
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+		  unsigned int src_cnt, const unsigned char *scf, size_t len,
+		  enum sum_check_flags *pqres, unsigned long flags)
+{
+	/* specify valid address for disabled result */
+	if (flags & DMA_PREP_PQ_DISABLE_P)
+		pq[0] = pq[1];
+	if (flags & DMA_PREP_PQ_DISABLE_Q)
+		pq[1] = pq[0];
+
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*pqres = 0;
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				     flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+		 unsigned int src_cnt, size_t len, unsigned long flags)
+{
+	unsigned char scf[MAX_SCF];
+	dma_addr_t pq[2];
+
+	if (src_cnt > MAX_SCF)
+		return NULL;
+
+	memset(scf, 0, src_cnt);
+	pq[0] = dst;
+	flags |= DMA_PREP_PQ_DISABLE_Q;
+	pq[1] = dst; /* specify valid address for disabled result */
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				     flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+		     unsigned int src_cnt, size_t len,
+		     enum sum_check_flags *result, unsigned long flags)
+{
+	unsigned char scf[MAX_SCF];
+	dma_addr_t pq[2];
+
+	if (src_cnt > MAX_SCF)
+		return NULL;
+
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*result = 0;
+
+	memset(scf, 0, src_cnt);
+	pq[0] = src[0];
+	flags |= DMA_PREP_PQ_DISABLE_Q;
+	pq[1] = pq[0]; /* specify valid address for disabled result */
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+				       scf, len, flags) :
+		__ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+				     scf, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent *desc;
+	struct ioat_dma_descriptor *hw;
+
+	if (ioat_check_space_lock(ioat_chan, 1) == 0)
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+	else
+		return NULL;
+
+	hw = desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = 1;
+	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	hw->ctl_f.compl_write = 1;
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	hw->src_addr = 0;
+	hw->dst_addr = 0;
+
+	desc->txd.flags = flags;
+	desc->len = 1;
+
+	dump_desc_dbg(ioat_chan, desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &desc->txd;
+}
+
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c
new file mode 100644
index 0000000..cb4a857
--- /dev/null
+++ b/drivers/dma/ioat/sysfs.c
@@ -0,0 +1,135 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+static ssize_t cap_show(struct dma_chan *c, char *page)
+{
+	struct dma_device *dma = c->device;
+
+	return sprintf(page, "copy%s%s%s%s%s\n",
+		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
+		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
+		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
+		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
+		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
+
+}
+struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
+
+static ssize_t version_show(struct dma_chan *c, char *page)
+{
+	struct dma_device *dma = c->device;
+	struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
+
+	return sprintf(page, "%d.%d\n",
+		       ioat_dma->version >> 4, ioat_dma->version & 0xf);
+}
+struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
+
+static ssize_t
+ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	struct ioat_sysfs_entry *entry;
+	struct ioatdma_chan *ioat_chan;
+
+	entry = container_of(attr, struct ioat_sysfs_entry, attr);
+	ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
+
+	if (!entry->show)
+		return -EIO;
+	return entry->show(&ioat_chan->dma_chan, page);
+}
+
+const struct sysfs_ops ioat_sysfs_ops = {
+	.show	= ioat_attr_show,
+};
+
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+		struct kobject *parent = &c->dev->device.kobj;
+		int err;
+
+		err = kobject_init_and_add(&ioat_chan->kobj, type,
+					   parent, "quickdata");
+		if (err) {
+			dev_warn(to_dev(ioat_chan),
+				 "sysfs init error (%d), continuing...\n", err);
+			kobject_put(&ioat_chan->kobj);
+			set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
+		}
+	}
+}
+
+void ioat_kobject_del(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
+			kobject_del(&ioat_chan->kobj);
+			kobject_put(&ioat_chan->kobj);
+		}
+	}
+}
+
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	/* ...taken outside the lock, no need to be precise */
+	return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static struct attribute *ioat_attrs[] = {
+	&ring_size_attr.attr,
+	&ring_active_attr.attr,
+	&ioat_cap_attr.attr,
+	&ioat_version_attr.attr,
+	NULL,
+};
+
+struct kobj_type ioat_ktype = {
+	.sysfs_ops = &ioat_sysfs_ops,
+	.default_attrs = ioat_attrs,
+};
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 2e284a4..4768a82 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -265,10 +265,10 @@
 	return ret;
 }
 
-/* Chained IRQ handler for IPU error interrupt */
-static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
+/* Chained IRQ handler for IPU function and error interrupt */
+static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct ipu *ipu = irq_get_handler_data(irq);
+	struct ipu *ipu = irq_desc_get_handler_data(desc);
 	u32 status;
 	int i, line;
 
@@ -286,43 +286,7 @@
 		raw_spin_unlock(&bank_lock);
 		while ((line = ffs(status))) {
 			struct ipu_irq_map *map;
-
-			line--;
-			status &= ~(1UL << line);
-
-			raw_spin_lock(&bank_lock);
-			map = src2map(32 * i + line);
-			if (map)
-				irq = map->irq;
-			raw_spin_unlock(&bank_lock);
-
-			if (!map) {
-				pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
-				       line, i);
-				continue;
-			}
-			generic_handle_irq(irq);
-		}
-	}
-}
-
-/* Chained IRQ handler for IPU function interrupt */
-static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
-{
-	struct ipu *ipu = irq_desc_get_handler_data(desc);
-	u32 status;
-	int i, line;
-
-	for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
-		struct ipu_irq_bank *bank = irq_bank + i;
-
-		raw_spin_lock(&bank_lock);
-		status = ipu_read_reg(ipu, bank->status);
-		/* Not clearing all interrupts, see above */
-		status &= ipu_read_reg(ipu, bank->control);
-		raw_spin_unlock(&bank_lock);
-		while ((line = ffs(status))) {
-			struct ipu_irq_map *map;
+			unsigned int irq = NO_IRQ;
 
 			line--;
 			status &= ~(1UL << line);
@@ -377,16 +341,12 @@
 		irq_map[i].irq = irq;
 		irq_map[i].source = -EINVAL;
 		irq_set_handler(irq, handle_level_irq);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
-	irq_set_handler_data(ipu->irq_fn, ipu);
-	irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn);
+	irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
 
-	irq_set_handler_data(ipu->irq_err, ipu);
-	irq_set_chained_handler(ipu->irq_err, ipu_irq_err);
+	irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
 
 	ipu->irq_base = irq_base;
 
@@ -399,16 +359,12 @@
 
 	irq_base = ipu->irq_base;
 
-	irq_set_chained_handler(ipu->irq_fn, NULL);
-	irq_set_handler_data(ipu->irq_fn, NULL);
+	irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
 
-	irq_set_chained_handler(ipu->irq_err, NULL);
-	irq_set_handler_data(ipu->irq_err, NULL);
+	irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
 
 	for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, 0);
-#endif
+		irq_set_status_flags(irq, IRQ_NOREQUEST);
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 647e362..1ba2fd7 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -24,7 +24,6 @@
 #include "virt-dma.h"
 
 #define DRIVER_NAME		"k3-dma"
-#define DMA_ALIGN		3
 #define DMA_MAX_SIZE		0x1ffc
 
 #define INT_STAT		0x00
@@ -732,7 +731,7 @@
 	d->slave.device_pause = k3_dma_transfer_pause;
 	d->slave.device_resume = k3_dma_transfer_resume;
 	d->slave.device_terminate_all = k3_dma_terminate_all;
-	d->slave.copy_align = DMA_ALIGN;
+	d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
 
 	/* init virtual channel */
 	d->chans = devm_kzalloc(&op->dev,
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
new file mode 100644
index 0000000..761f326
--- /dev/null
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -0,0 +1,183 @@
+/*
+ * DMA Router driver for LPC18xx/43xx DMA MUX
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on TI DMA Crossbar driver by:
+ *   Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *   Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+/* CREG register offset and macros for mux manipulation */
+#define LPC18XX_CREG_DMAMUX		0x11c
+#define LPC18XX_DMAMUX_VAL(v, n)	((v) << (n * 2))
+#define LPC18XX_DMAMUX_MASK(n)		(0x3 << (n * 2))
+#define LPC18XX_DMAMUX_MAX_VAL		0x3
+
+struct lpc18xx_dmamux {
+	u32 value;
+	bool busy;
+};
+
+struct lpc18xx_dmamux_data {
+	struct dma_router dmarouter;
+	struct lpc18xx_dmamux *muxes;
+	u32 dma_master_requests;
+	u32 dma_mux_requests;
+	struct regmap *reg;
+	spinlock_t lock;
+};
+
+static void lpc18xx_dmamux_free(struct device *dev, void *route_data)
+{
+	struct lpc18xx_dmamux_data *dmamux = dev_get_drvdata(dev);
+	struct lpc18xx_dmamux *mux = route_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dmamux->lock, flags);
+	mux->busy = false;
+	spin_unlock_irqrestore(&dmamux->lock, flags);
+}
+
+static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
+				    struct of_dma *ofdma)
+{
+	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+	struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
+	unsigned long flags;
+	unsigned mux;
+
+	if (dma_spec->args_count != 3) {
+		dev_err(&pdev->dev, "invalid number of dma mux args\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mux = dma_spec->args[0];
+	if (mux >= dmamux->dma_master_requests) {
+		dev_err(&pdev->dev, "invalid mux number: %d\n",
+			dma_spec->args[0]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
+		dev_err(&pdev->dev, "invalid dma mux value: %d\n",
+			dma_spec->args[1]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* The of_node_put() will be done in the core for the node */
+	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+	if (!dma_spec->np) {
+		dev_err(&pdev->dev, "can't get dma master\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_irqsave(&dmamux->lock, flags);
+	if (dmamux->muxes[mux].busy) {
+		spin_unlock_irqrestore(&dmamux->lock, flags);
+		dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
+			mux, mux, dmamux->muxes[mux].value);
+		of_node_put(dma_spec->np);
+		return ERR_PTR(-EBUSY);
+	}
+
+	dmamux->muxes[mux].busy = true;
+	dmamux->muxes[mux].value = dma_spec->args[1];
+
+	regmap_update_bits(dmamux->reg, LPC18XX_CREG_DMAMUX,
+			   LPC18XX_DMAMUX_MASK(mux),
+			   LPC18XX_DMAMUX_VAL(dmamux->muxes[mux].value, mux));
+	spin_unlock_irqrestore(&dmamux->lock, flags);
+
+	dma_spec->args[1] = dma_spec->args[2];
+	dma_spec->args_count = 2;
+
+	dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
+		dmamux->muxes[mux].value, mux);
+
+	return &dmamux->muxes[mux];
+}
+
+static int lpc18xx_dmamux_probe(struct platform_device *pdev)
+{
+	struct device_node *dma_np, *np = pdev->dev.of_node;
+	struct lpc18xx_dmamux_data *dmamux;
+	int ret;
+
+	dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+	if (!dmamux)
+		return -ENOMEM;
+
+	dmamux->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+	if (IS_ERR(dmamux->reg)) {
+		dev_err(&pdev->dev, "syscon lookup failed\n");
+		return PTR_ERR(dmamux->reg);
+	}
+
+	ret = of_property_read_u32(np, "dma-requests",
+				   &dmamux->dma_mux_requests);
+	if (ret) {
+		dev_err(&pdev->dev, "missing dma-requests property\n");
+		return ret;
+	}
+
+	dma_np = of_parse_phandle(np, "dma-masters", 0);
+	if (!dma_np) {
+		dev_err(&pdev->dev, "can't get dma master\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(dma_np, "dma-requests",
+				   &dmamux->dma_master_requests);
+	of_node_put(dma_np);
+	if (ret) {
+		dev_err(&pdev->dev, "missing master dma-requests property\n");
+		return ret;
+	}
+
+	dmamux->muxes = devm_kcalloc(&pdev->dev, dmamux->dma_master_requests,
+				     sizeof(struct lpc18xx_dmamux),
+				     GFP_KERNEL);
+	if (!dmamux->muxes)
+		return -ENOMEM;
+
+	spin_lock_init(&dmamux->lock);
+	platform_set_drvdata(pdev, dmamux);
+	dmamux->dmarouter.dev = &pdev->dev;
+	dmamux->dmarouter.route_free = lpc18xx_dmamux_free;
+
+	return of_dma_router_register(np, lpc18xx_dmamux_reserve,
+				      &dmamux->dmarouter);
+}
+
+static const struct of_device_id lpc18xx_dmamux_match[] = {
+	{ .compatible = "nxp,lpc1850-dmamux" },
+	{},
+};
+
+static struct platform_driver lpc18xx_dmamux_driver = {
+	.probe	= lpc18xx_dmamux_probe,
+	.driver = {
+		.name = "lpc18xx-dmamux",
+		.of_match_table = lpc18xx_dmamux_match,
+	},
+};
+
+static int __init lpc18xx_dmamux_init(void)
+{
+	return platform_driver_register(&lpc18xx_dmamux_driver);
+}
+arch_initcall(lpc18xx_dmamux_init);
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
index f663b0b..d899820 100644
--- a/drivers/dma/mic_x100_dma.h
+++ b/drivers/dma/mic_x100_dma.h
@@ -39,7 +39,7 @@
  */
 #define MIC_DMA_MAX_NUM_CHAN	8
 #define MIC_DMA_NUM_CHAN	4
-#define MIC_DMA_ALIGN_SHIFT	6
+#define MIC_DMA_ALIGN_SHIFT	DMAENGINE_ALIGN_64_BYTES
 #define MIC_DMA_ALIGN_BYTES	(1 << MIC_DMA_ALIGN_SHIFT)
 #define MIC_DMA_DESC_RX_SIZE	(128 * 1024 - 4)
 
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 462a022..e39457f 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -72,7 +72,6 @@
 #define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
 #define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
 
-#define PDMA_ALIGNMENT		3
 #define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
 
 struct mmp_pdma_desc_hw {
@@ -1071,7 +1070,7 @@
 	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 	pdev->device.device_config = mmp_pdma_config;
 	pdev->device.device_terminate_all = mmp_pdma_terminate_all;
-	pdev->device.copy_align = PDMA_ALIGNMENT;
+	pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
 	pdev->device.src_addr_widths = widths;
 	pdev->device.dst_addr_widths = widths;
 	pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e683761..3df0422 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -100,7 +100,6 @@
 	PXA910_SQU,
 };
 
-#define TDMA_ALIGNMENT		3
 #define TDMA_MAX_XFER_BYTES    SZ_64K
 
 struct mmp_tdma_chan {
@@ -695,7 +694,7 @@
 	tdev->device.device_pause = mmp_tdma_pause_chan;
 	tdev->device.device_resume = mmp_tdma_resume_chan;
 	tdev->device.device_terminate_all = mmp_tdma_terminate_all;
-	tdev->device.copy_align = TDMA_ALIGNMENT;
+	tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
 
 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	platform_set_drvdata(pdev, tdev);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f1325f6..1c2de9a 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -13,7 +13,6 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -26,6 +25,7 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/cpumask.h>
 #include <linux/platform_data/dma-mv_xor.h>
 
 #include "dmaengine.h"
@@ -1126,7 +1126,8 @@
 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
 	{},
 };
-MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+
+static unsigned int mv_xor_engine_count;
 
 static int mv_xor_probe(struct platform_device *pdev)
 {
@@ -1134,6 +1135,7 @@
 	struct mv_xor_device *xordev;
 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct resource *res;
+	unsigned int max_engines, max_channels;
 	int i, ret;
 	int op_in_desc;
 
@@ -1177,6 +1179,21 @@
 	if (!IS_ERR(xordev->clk))
 		clk_prepare_enable(xordev->clk);
 
+	/*
+	 * We don't want to have more than one channel per CPU in
+	 * order for async_tx to perform well. So we limit the number
+	 * of engines and channels so that we take into account this
+	 * constraint. Note that we also want to use channels from
+	 * separate engines when possible.
+	 */
+	max_engines = num_present_cpus();
+	max_channels = min_t(unsigned int,
+			     MV_XOR_MAX_CHANNELS,
+			     DIV_ROUND_UP(num_present_cpus(), 2));
+
+	if (mv_xor_engine_count >= max_engines)
+		return 0;
+
 	if (pdev->dev.of_node) {
 		struct device_node *np;
 		int i = 0;
@@ -1190,13 +1207,13 @@
 			int irq;
 			op_in_desc = (int)of_id->data;
 
+			if (i >= max_channels)
+				continue;
+
 			dma_cap_zero(cap_mask);
-			if (of_property_read_bool(np, "dmacap,memcpy"))
-				dma_cap_set(DMA_MEMCPY, cap_mask);
-			if (of_property_read_bool(np, "dmacap,xor"))
-				dma_cap_set(DMA_XOR, cap_mask);
-			if (of_property_read_bool(np, "dmacap,interrupt"))
-				dma_cap_set(DMA_INTERRUPT, cap_mask);
+			dma_cap_set(DMA_MEMCPY, cap_mask);
+			dma_cap_set(DMA_XOR, cap_mask);
+			dma_cap_set(DMA_INTERRUPT, cap_mask);
 
 			irq = irq_of_parse_and_map(np, 0);
 			if (!irq) {
@@ -1216,7 +1233,7 @@
 			i++;
 		}
 	} else if (pdata && pdata->channels) {
-		for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+		for (i = 0; i < max_channels; i++) {
 			struct mv_xor_channel_data *cd;
 			struct mv_xor_chan *chan;
 			int irq;
@@ -1263,27 +1280,8 @@
 	return ret;
 }
 
-static int mv_xor_remove(struct platform_device *pdev)
-{
-	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
-	int i;
-
-	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
-		if (xordev->channels[i])
-			mv_xor_channel_remove(xordev->channels[i]);
-	}
-
-	if (!IS_ERR(xordev->clk)) {
-		clk_disable_unprepare(xordev->clk);
-		clk_put(xordev->clk);
-	}
-
-	return 0;
-}
-
 static struct platform_driver mv_xor_driver = {
 	.probe		= mv_xor_probe,
-	.remove		= mv_xor_remove,
 	.driver		= {
 		.name	        = MV_XOR_NAME,
 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
@@ -1295,19 +1293,10 @@
 {
 	return platform_driver_register(&mv_xor_driver);
 }
-module_init(mv_xor_init);
+device_initcall(mv_xor_init);
 
-/* it's currently unsafe to unload this module */
-#if 0
-static void __exit mv_xor_exit(void)
-{
-	platform_driver_unregister(&mv_xor_driver);
-	return;
-}
-
-module_exit(mv_xor_exit);
-#endif
-
+/*
 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
 MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index b859792d..113605f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -11,10 +11,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/dmaengine.h>
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index ecab4ea0..17ee758 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1198,6 +1198,9 @@
 	unsigned lcnt0, lcnt1, ljmp0, ljmp1;
 	struct _arg_LPEND lpend;
 
+	if (*bursts == 1)
+		return _bursts(dry_run, buf, pxs, 1);
+
 	/* Max iterations possible in DMALP is 256 */
 	if (*bursts >= 256*256) {
 		lcnt1 = 256;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index ddcbbf5..5cb61ce 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -184,19 +184,18 @@
 
 static int dbg_show_requester_chan(struct seq_file *s, void *p)
 {
-	int pos = 0;
 	struct pxad_phy *phy = s->private;
 	int i;
 	u32 drcmr;
 
-	pos += seq_printf(s, "DMA channel %d requester :\n", phy->idx);
+	seq_printf(s, "DMA channel %d requester :\n", phy->idx);
 	for (i = 0; i < 70; i++) {
 		drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
 		if ((drcmr & DRCMR_CHLNUM) == phy->idx)
-			pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
-					  !!(drcmr & DRCMR_MAPVLD));
+			seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
+				   !!(drcmr & DRCMR_MAPVLD));
 	}
-	return pos;
+	return 0;
 }
 
 static inline int dbg_burst_from_dcmd(u32 dcmd)
@@ -906,21 +905,21 @@
 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
 	*dcmd = 0;
-	if (chan->cfg.direction == DMA_DEV_TO_MEM) {
+	if (dir == DMA_DEV_TO_MEM) {
 		maxburst = chan->cfg.src_maxburst;
 		width = chan->cfg.src_addr_width;
 		dev_addr = chan->cfg.src_addr;
 		*dev_src = dev_addr;
 		*dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
 	}
-	if (chan->cfg.direction == DMA_MEM_TO_DEV) {
+	if (dir == DMA_MEM_TO_DEV) {
 		maxburst = chan->cfg.dst_maxburst;
 		width = chan->cfg.dst_addr_width;
 		dev_addr = chan->cfg.dst_addr;
 		*dev_dst = dev_addr;
 		*dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
 	}
-	if (chan->cfg.direction == DMA_MEM_TO_MEM)
+	if (dir == DMA_MEM_TO_MEM)
 		*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
 			PXA_DCMD_INCSRCADDR;
 
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0f37152..9fda65a 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -39,18 +39,6 @@
 
 endif
 
-config SUDMAC
-	tristate "Renesas SUDMAC support"
-	depends on SH_DMAE_BASE
-	help
-	  Enable support for the Renesas SUDMAC controllers.
-
-config RCAR_HPB_DMAE
-	tristate "Renesas R-Car HPB DMAC support"
-	depends on SH_DMAE_BASE
-	help
-	  Enable support for the Renesas R-Car series DMA controllers.
-
 config RCAR_DMAC
 	tristate "Renesas R-Car Gen2 DMA Controller"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -59,6 +47,12 @@
 	  This driver supports the general purpose DMA controller found in the
 	  Renesas R-Car second generation SoCs.
 
+config RCAR_HPB_DMAE
+	tristate "Renesas R-Car HPB DMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas R-Car series DMA controllers.
+
 config RENESAS_USB_DMAC
 	tristate "Renesas USB-DMA Controller"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -67,3 +61,9 @@
 	help
 	  This driver supports the USB-DMA controller found in the Renesas
 	  SoCs.
+
+config SUDMAC
+	tristate "Renesas SUDMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas SUDMAC controllers.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index b8a59806..0133e46 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -13,7 +13,7 @@
 shdma-objs := $(shdma-y)
 obj-$(CONFIG_SH_DMAE) += shdma.o
 
-obj-$(CONFIG_SUDMAC) += sudmac.o
-obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8c5186c..7d5598d 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -455,6 +455,7 @@
 	switch (sdma->type) {
 	case SIRFSOC_DMA_VER_A7V1:
 		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
+		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
 		writel_relaxed((1 << cid) | 1 << (cid + 16),
 			       sdma->base +
 			       SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
@@ -462,6 +463,8 @@
 		break;
 	case SIRFSOC_DMA_VER_A7V2:
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
+		writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
+			       sdma->base + SIRFSOC_DMA_INT_ATLAS7);
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
 		break;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 3c10f03..750d1b3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2853,7 +2853,7 @@
 		 * This controller can only access address at even
 		 * 32bit boundaries, i.e. 2^2
 		 */
-		dev->copy_align = 2;
+		dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
 	}
 
 	if (dma_has_cap(DMA_SG, dev->cap_mask))
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
new file mode 100644
index 0000000..a1a500d
--- /dev/null
+++ b/drivers/dma/sun4i-dma.c
@@ -0,0 +1,1288 @@
+/*
+ * Copyright (C) 2014 Emilio López
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+/** Common macros to normal and dedicated DMA registers **/
+
+#define SUN4I_DMA_CFG_LOADING			BIT(31)
+#define SUN4I_DMA_CFG_DST_DATA_WIDTH(width)	((width) << 25)
+#define SUN4I_DMA_CFG_DST_BURST_LENGTH(len)	((len) << 23)
+#define SUN4I_DMA_CFG_DST_ADDR_MODE(mode)	((mode) << 21)
+#define SUN4I_DMA_CFG_DST_DRQ_TYPE(type)	((type) << 16)
+#define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width)	((width) << 9)
+#define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len)	((len) << 7)
+#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode)	((mode) << 5)
+#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type)	(type)
+
+/** Normal DMA register values **/
+
+/* Normal DMA source/destination data request type values */
+#define SUN4I_NDMA_DRQ_TYPE_SDRAM		0x16
+#define SUN4I_NDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
+
+/** Normal DMA register layout **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_NDMA_ADDR_MODE_LINEAR		0
+#define SUN4I_NDMA_ADDR_MODE_IO			1
+
+/* Normal DMA configuration register layout */
+#define SUN4I_NDMA_CFG_CONT_MODE		BIT(30)
+#define SUN4I_NDMA_CFG_WAIT_STATE(n)		((n) << 27)
+#define SUN4I_NDMA_CFG_DST_NON_SECURE		BIT(22)
+#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
+#define SUN4I_NDMA_CFG_SRC_NON_SECURE		BIT(6)
+
+/** Dedicated DMA register values **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_DDMA_ADDR_MODE_LINEAR		0
+#define SUN4I_DDMA_ADDR_MODE_IO			1
+#define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE	2
+#define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE	3
+
+/* Dedicated DMA source/destination data request type values */
+#define SUN4I_DDMA_DRQ_TYPE_SDRAM		0x1
+#define SUN4I_DDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
+
+/** Dedicated DMA register layout **/
+
+/* Dedicated DMA configuration register layout */
+#define SUN4I_DDMA_CFG_BUSY			BIT(30)
+#define SUN4I_DDMA_CFG_CONT_MODE		BIT(29)
+#define SUN4I_DDMA_CFG_DST_NON_SECURE		BIT(28)
+#define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
+#define SUN4I_DDMA_CFG_SRC_NON_SECURE		BIT(12)
+
+/* Dedicated DMA parameter register layout */
+#define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n)	(((n) - 1) << 24)
+#define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n)	(((n) - 1) << 16)
+#define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n)	(((n) - 1) << 8)
+#define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n)	(((n) - 1) << 0)
+
+/** DMA register offsets **/
+
+/* General register offsets */
+#define SUN4I_DMA_IRQ_ENABLE_REG		0x0
+#define SUN4I_DMA_IRQ_PENDING_STATUS_REG	0x4
+
+/* Normal DMA register offsets */
+#define SUN4I_NDMA_CHANNEL_REG_BASE(n)		(0x100 + (n) * 0x20)
+#define SUN4I_NDMA_CFG_REG			0x0
+#define SUN4I_NDMA_SRC_ADDR_REG			0x4
+#define SUN4I_NDMA_DST_ADDR_REG		0x8
+#define SUN4I_NDMA_BYTE_COUNT_REG		0xC
+
+/* Dedicated DMA register offsets */
+#define SUN4I_DDMA_CHANNEL_REG_BASE(n)		(0x300 + (n) * 0x20)
+#define SUN4I_DDMA_CFG_REG			0x0
+#define SUN4I_DDMA_SRC_ADDR_REG			0x4
+#define SUN4I_DDMA_DST_ADDR_REG		0x8
+#define SUN4I_DDMA_BYTE_COUNT_REG		0xC
+#define SUN4I_DDMA_PARA_REG			0x18
+
+/** DMA Driver **/
+
+/*
+ * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
+ * that's 16 channels. As for endpoints, there's 29 and 21
+ * respectively. Given that the Normal DMA endpoints (other than
+ * SDRAM) can be used as tx/rx, we need 78 vchans in total
+ */
+#define SUN4I_NDMA_NR_MAX_CHANNELS	8
+#define SUN4I_DDMA_NR_MAX_CHANNELS	8
+#define SUN4I_DMA_NR_MAX_CHANNELS					\
+	(SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
+#define SUN4I_NDMA_NR_MAX_VCHANS	(29 * 2 - 1)
+#define SUN4I_DDMA_NR_MAX_VCHANS	21
+#define SUN4I_DMA_NR_MAX_VCHANS						\
+	(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+
+/* This set of SUN4I_DDMA timing parameters were found experimentally while
+ * working with the SPI driver and seem to make it behave correctly */
+#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
+	(SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) |			\
+	 SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) |				\
+	 SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) |				\
+	 SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+
+struct sun4i_dma_pchan {
+	/* Register base of channel */
+	void __iomem			*base;
+	/* vchan currently being serviced */
+	struct sun4i_dma_vchan		*vchan;
+	/* Is this a dedicated pchan? */
+	int				is_dedicated;
+};
+
+struct sun4i_dma_vchan {
+	struct virt_dma_chan		vc;
+	struct dma_slave_config		cfg;
+	struct sun4i_dma_pchan		*pchan;
+	struct sun4i_dma_promise	*processing;
+	struct sun4i_dma_contract	*contract;
+	u8				endpoint;
+	int				is_dedicated;
+};
+
+struct sun4i_dma_promise {
+	u32				cfg;
+	u32				para;
+	dma_addr_t			src;
+	dma_addr_t			dst;
+	size_t				len;
+	struct list_head		list;
+};
+
+/* A contract is a set of promises */
+struct sun4i_dma_contract {
+	struct virt_dma_desc		vd;
+	struct list_head		demands;
+	struct list_head		completed_demands;
+	int				is_cyclic;
+};
+
+struct sun4i_dma_dev {
+	DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+	struct dma_device		slave;
+	struct sun4i_dma_pchan		*pchans;
+	struct sun4i_dma_vchan		*vchans;
+	void __iomem			*base;
+	struct clk			*clk;
+	int				irq;
+	spinlock_t			lock;
+};
+
+static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
+{
+	return container_of(dev, struct sun4i_dma_dev, slave);
+}
+
+static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
+{
+	return container_of(chan, struct sun4i_dma_vchan, vc.chan);
+}
+
+static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct sun4i_dma_contract, vd);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static int convert_burst(u32 maxburst)
+{
+	if (maxburst > 8)
+		return -EINVAL;
+
+	/* 1 -> 0, 4 -> 1, 8 -> 2 */
+	return (maxburst >> 2);
+}
+
+static int convert_buswidth(enum dma_slave_buswidth addr_width)
+{
+	if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
+		return -EINVAL;
+
+	/* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
+	return (addr_width >> 1);
+}
+
+static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+	vchan_free_chan_resources(&vchan->vc);
+}
+
+static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
+						  struct sun4i_dma_vchan *vchan)
+{
+	struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
+	unsigned long flags;
+	int i, max;
+
+	/*
+	 * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
+	 * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+	 */
+	if (vchan->is_dedicated) {
+		i = SUN4I_NDMA_NR_MAX_CHANNELS;
+		max = SUN4I_DMA_NR_MAX_CHANNELS;
+	} else {
+		i = 0;
+		max = SUN4I_NDMA_NR_MAX_CHANNELS;
+	}
+
+	spin_lock_irqsave(&priv->lock, flags);
+	for_each_clear_bit_from(i, &priv->pchans_used, max) {
+		pchan = &pchans[i];
+		pchan->vchan = vchan;
+		set_bit(i, priv->pchans_used);
+		break;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return pchan;
+}
+
+static void release_pchan(struct sun4i_dma_dev *priv,
+			  struct sun4i_dma_pchan *pchan)
+{
+	unsigned long flags;
+	int nr = pchan - priv->pchans;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	pchan->vchan = NULL;
+	clear_bit(nr, priv->pchans_used);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void configure_pchan(struct sun4i_dma_pchan *pchan,
+			    struct sun4i_dma_promise *d)
+{
+	/*
+	 * Configure addresses and misc parameters depending on type
+	 * SUN4I_DDMA has an extra field with timing parameters
+	 */
+	if (pchan->is_dedicated) {
+		writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
+		writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
+		writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+		writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
+		writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
+	} else {
+		writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
+		writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
+		writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+		writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
+	}
+}
+
+static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
+				struct sun4i_dma_pchan *pchan,
+				int half, int end)
+{
+	u32 reg;
+	int pchan_number = pchan - priv->pchans;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+	if (half)
+		reg |= BIT(pchan_number * 2);
+	else
+		reg &= ~BIT(pchan_number * 2);
+
+	if (end)
+		reg |= BIT(pchan_number * 2 + 1);
+	else
+		reg &= ~BIT(pchan_number * 2 + 1);
+
+	writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/**
+ * Execute pending operations on a vchan
+ *
+ * When given a vchan, this function will try to acquire a suitable
+ * pchan and, if successful, will configure it to fulfill a promise
+ * from the next pending contract.
+ *
+ * This function must be called with &vchan->vc.lock held.
+ */
+static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
+				   struct sun4i_dma_vchan *vchan)
+{
+	struct sun4i_dma_promise *promise = NULL;
+	struct sun4i_dma_contract *contract = NULL;
+	struct sun4i_dma_pchan *pchan;
+	struct virt_dma_desc *vd;
+	int ret;
+
+	lockdep_assert_held(&vchan->vc.lock);
+
+	/* We need a pchan to do anything, so secure one if available */
+	pchan = find_and_use_pchan(priv, vchan);
+	if (!pchan)
+		return -EBUSY;
+
+	/*
+	 * Channel endpoints must not be repeated, so if this vchan
+	 * has already submitted some work, we can't do anything else
+	 */
+	if (vchan->processing) {
+		dev_dbg(chan2dev(&vchan->vc.chan),
+			"processing something to this endpoint already\n");
+		ret = -EBUSY;
+		goto release_pchan;
+	}
+
+	do {
+		/* Figure out which contract we're working with today */
+		vd = vchan_next_desc(&vchan->vc);
+		if (!vd) {
+			dev_dbg(chan2dev(&vchan->vc.chan),
+				"No pending contract found");
+			ret = 0;
+			goto release_pchan;
+		}
+
+		contract = to_sun4i_dma_contract(vd);
+		if (list_empty(&contract->demands)) {
+			/* The contract has been completed so mark it as such */
+			list_del(&contract->vd.node);
+			vchan_cookie_complete(&contract->vd);
+			dev_dbg(chan2dev(&vchan->vc.chan),
+				"Empty contract found and marked complete");
+		}
+	} while (list_empty(&contract->demands));
+
+	/* Now find out what we need to do */
+	promise = list_first_entry(&contract->demands,
+				   struct sun4i_dma_promise, list);
+	vchan->processing = promise;
+
+	/* ... and make it reality */
+	if (promise) {
+		vchan->contract = contract;
+		vchan->pchan = pchan;
+		set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+		configure_pchan(pchan, promise);
+	}
+
+	return 0;
+
+release_pchan:
+	release_pchan(priv, pchan);
+	return ret;
+}
+
+static int sanitize_config(struct dma_slave_config *sconfig,
+			   enum dma_transfer_direction direction)
+{
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+		    !sconfig->dst_maxburst)
+			return -EINVAL;
+
+		if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+			sconfig->src_addr_width = sconfig->dst_addr_width;
+
+		if (!sconfig->src_maxburst)
+			sconfig->src_maxburst = sconfig->dst_maxburst;
+
+		break;
+
+	case DMA_DEV_TO_MEM:
+		if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+		    !sconfig->src_maxburst)
+			return -EINVAL;
+
+		if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+			sconfig->dst_addr_width = sconfig->src_addr_width;
+
+		if (!sconfig->dst_maxburst)
+			sconfig->dst_maxburst = sconfig->src_maxburst;
+
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+/**
+ * Generate a promise, to be used in a normal DMA contract.
+ *
+ * A NDMA promise contains all the information required to program the
+ * normal part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+		      size_t len, struct dma_slave_config *sconfig,
+		      enum dma_transfer_direction direction)
+{
+	struct sun4i_dma_promise *promise;
+	int ret;
+
+	ret = sanitize_config(sconfig, direction);
+	if (ret)
+		return NULL;
+
+	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+	if (!promise)
+		return NULL;
+
+	promise->src = src;
+	promise->dst = dest;
+	promise->len = len;
+	promise->cfg = SUN4I_DMA_CFG_LOADING |
+		SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+	dev_dbg(chan2dev(chan),
+		"src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
+		sconfig->src_maxburst, sconfig->dst_maxburst,
+		sconfig->src_addr_width, sconfig->dst_addr_width);
+
+	/* Source burst */
+	ret = convert_burst(sconfig->src_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+	/* Destination burst */
+	ret = convert_burst(sconfig->dst_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+	/* Source bus width */
+	ret = convert_buswidth(sconfig->src_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+	/* Destination bus width */
+	ret = convert_buswidth(sconfig->dst_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+	return promise;
+
+fail:
+	kfree(promise);
+	return NULL;
+}
+
+/**
+ * Generate a promise, to be used in a dedicated DMA contract.
+ *
+ * A DDMA promise contains all the information required to program the
+ * Dedicated part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+		      size_t len, struct dma_slave_config *sconfig)
+{
+	struct sun4i_dma_promise *promise;
+	int ret;
+
+	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+	if (!promise)
+		return NULL;
+
+	promise->src = src;
+	promise->dst = dest;
+	promise->len = len;
+	promise->cfg = SUN4I_DMA_CFG_LOADING |
+		SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+	/* Source burst */
+	ret = convert_burst(sconfig->src_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+	/* Destination burst */
+	ret = convert_burst(sconfig->dst_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+	/* Source bus width */
+	ret = convert_buswidth(sconfig->src_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+	/* Destination bus width */
+	ret = convert_buswidth(sconfig->dst_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+	return promise;
+
+fail:
+	kfree(promise);
+	return NULL;
+}
+
+/**
+ * Generate a contract
+ *
+ * Contracts function as DMA descriptors. As our hardware does not support
+ * linked lists, we need to implement SG via software. We use a contract
+ * to hold all the pieces of the request and process them serially one
+ * after another. Each piece is represented as a promise.
+ */
+static struct sun4i_dma_contract *generate_dma_contract(void)
+{
+	struct sun4i_dma_contract *contract;
+
+	contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
+	if (!contract)
+		return NULL;
+
+	INIT_LIST_HEAD(&contract->demands);
+	INIT_LIST_HEAD(&contract->completed_demands);
+
+	return contract;
+}
+
+/**
+ * Get next promise on a cyclic transfer
+ *
+ * Cyclic contracts contain a series of promises which are executed on a
+ * loop. This function returns the next promise from a cyclic contract,
+ * so it can be programmed into the hardware.
+ */
+static struct sun4i_dma_promise *
+get_next_cyclic_promise(struct sun4i_dma_contract *contract)
+{
+	struct sun4i_dma_promise *promise;
+
+	promise = list_first_entry_or_null(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	if (!promise) {
+		list_splice_init(&contract->completed_demands,
+				 &contract->demands);
+		promise = list_first_entry(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	}
+
+	return promise;
+}
+
+/**
+ * Free a contract and all its associated promises
+ */
+static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
+{
+	struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
+	struct sun4i_dma_promise *promise;
+
+	/* Free all the demands and completed demands */
+	list_for_each_entry(promise, &contract->demands, list)
+		kfree(promise);
+
+	list_for_each_entry(promise, &contract->completed_demands, list)
+		kfree(promise);
+
+	kfree(contract);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+			  dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	/*
+	 * We can only do the copy to bus aligned addresses, so
+	 * choose the best one so we get decent performance. We also
+	 * maximize the burst size for this same reason.
+	 */
+	sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	sconfig->src_maxburst = 8;
+	sconfig->dst_maxburst = 8;
+
+	if (vchan->is_dedicated)
+		promise = generate_ddma_promise(chan, src, dest, len, sconfig);
+	else
+		promise = generate_ndma_promise(chan, src, dest, len, sconfig,
+						DMA_MEM_TO_MEM);
+
+	if (!promise) {
+		kfree(contract);
+		return NULL;
+	}
+
+	/* Configure memcpy mode */
+	if (vchan->is_dedicated) {
+		promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
+				SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+	} else {
+		promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+				SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+	}
+
+	/* Fill the contract with our only promise */
+	list_add_tail(&promise->list, &contract->demands);
+
+	/* And add it to the vchan */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
+			  size_t period_len, enum dma_transfer_direction dir,
+			  unsigned long flags)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+	dma_addr_t src, dest;
+	u32 endpoints;
+	int nr_periods, offset, plength, i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "Invalid DMA direction\n");
+		return NULL;
+	}
+
+	if (vchan->is_dedicated) {
+		/*
+		 * As we are using this just for audio data, we need to use
+		 * normal DMA. There is nothing stopping us from supporting
+		 * dedicated DMA here as well, so if a client comes up and
+		 * requires it, it will be simple to implement it.
+		 */
+		dev_err(chan2dev(chan),
+			"Cyclic transfers are only supported on Normal DMA\n");
+		return NULL;
+	}
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	contract->is_cyclic = 1;
+
+	/* Figure out the endpoints and the address we need */
+	if (dir == DMA_MEM_TO_DEV) {
+		src = buf;
+		dest = sconfig->dst_addr;
+		endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+			    SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+	} else {
+		src = sconfig->src_addr;
+		dest = buf;
+		endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
+			    SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+	}
+
+	/*
+	 * We will be using half done interrupts to make two periods
+	 * out of a promise, so we need to program the DMA engine less
+	 * often
+	 */
+
+	/*
+	 * The engine can interrupt on half-transfer, so we can use
+	 * this feature to program the engine half as often as if we
+	 * didn't use it (keep in mind the hardware doesn't support
+	 * linked lists).
+	 *
+	 * Say you have a set of periods (| marks the start/end, I for
+	 * interrupt, P for programming the engine to do a new
+	 * transfer), the easy but slow way would be to do
+	 *
+	 *  |---|---|---|---| (periods / promises)
+	 *  P  I,P I,P I,P  I
+	 *
+	 * Using half transfer interrupts you can do
+	 *
+	 *  |-------|-------| (promises as configured on hw)
+	 *  |---|---|---|---| (periods)
+	 *  P   I  I,P  I   I
+	 *
+	 * Which requires half the engine programming for the same
+	 * functionality.
+	 */
+	nr_periods = DIV_ROUND_UP(len / period_len, 2);
+	for (i = 0; i < nr_periods; i++) {
+		/* Calculate the offset in the buffer and the length needed */
+		offset = i * period_len * 2;
+		plength = min((len - offset), (period_len * 2));
+		if (dir == DMA_MEM_TO_DEV)
+			src = buf + offset;
+		else
+			dest = buf + offset;
+
+		/* Make the promise */
+		promise = generate_ndma_promise(chan, src, dest,
+						plength, sconfig, dir);
+		if (!promise) {
+			/* TODO: should we free everything? */
+			return NULL;
+		}
+		promise->cfg |= endpoints;
+
+		/* Then add it to the contract */
+		list_add_tail(&promise->list, &contract->demands);
+	}
+
+	/* And add it to the vchan */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			unsigned int sg_len, enum dma_transfer_direction dir,
+			unsigned long flags, void *context)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+	u8 ram_type, io_mode, linear_mode;
+	struct scatterlist *sg;
+	dma_addr_t srcaddr, dstaddr;
+	u32 endpoints, para;
+	int i;
+
+	if (!sgl)
+		return NULL;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "Invalid DMA direction\n");
+		return NULL;
+	}
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	if (vchan->is_dedicated) {
+		io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+		linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+		ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+	} else {
+		io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+		linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+		ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+	}
+
+	if (dir == DMA_MEM_TO_DEV)
+		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
+	else
+		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
+			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		/* Figure out addresses */
+		if (dir == DMA_MEM_TO_DEV) {
+			srcaddr = sg_dma_address(sg);
+			dstaddr = sconfig->dst_addr;
+		} else {
+			srcaddr = sconfig->src_addr;
+			dstaddr = sg_dma_address(sg);
+		}
+
+		/*
+		 * These are the magic DMA engine timings that keep SPI going.
+		 * I haven't seen any interface on DMAEngine to configure
+		 * timings, and so far they seem to work for everything we
+		 * support, so I've kept them here. I don't know if other
+		 * devices need different timings because, as usual, we only
+		 * have the "para" bitfield meanings, but no comment on what
+		 * the values should be when doing a certain operation :|
+		 */
+		para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
+
+		/* And make a suitable promise */
+		if (vchan->is_dedicated)
+			promise = generate_ddma_promise(chan, srcaddr, dstaddr,
+							sg_dma_len(sg),
+							sconfig);
+		else
+			promise = generate_ndma_promise(chan, srcaddr, dstaddr,
+							sg_dma_len(sg),
+							sconfig, dir);
+
+		if (!promise)
+			return NULL; /* TODO: should we free everything? */
+
+		promise->cfg |= endpoints;
+		promise->para = para;
+
+		/* Then add it to the contract */
+		list_add_tail(&promise->list, &contract->demands);
+	}
+
+	/*
+	 * Once we've got all the promises ready, add the contract
+	 * to the pending list on the vchan
+	 */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static int sun4i_dma_terminate_all(struct dma_chan *chan)
+{
+	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct sun4i_dma_pchan *pchan = vchan->pchan;
+	LIST_HEAD(head);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vchan_get_all_descriptors(&vchan->vc, &head);
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	/*
+	 * Clearing the configuration register will halt the pchan. Interrupts
+	 * may still trigger, so don't forget to disable them.
+	 */
+	if (pchan) {
+		if (pchan->is_dedicated)
+			writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
+		else
+			writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
+		set_pchan_interrupt(priv, pchan, 0, 0);
+		release_pchan(priv, pchan);
+	}
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vchan_dma_desc_free_list(&vchan->vc, &head);
+	/* Clear these so the vchan is usable again */
+	vchan->processing = NULL;
+	vchan->pchan = NULL;
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	return 0;
+}
+
+static int sun4i_dma_config(struct dma_chan *chan,
+			    struct dma_slave_config *config)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+	memcpy(&vchan->cfg, config, sizeof(*config));
+
+	return 0;
+}
+
+static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
+					   struct of_dma *ofdma)
+{
+	struct sun4i_dma_dev *priv = ofdma->of_dma_data;
+	struct sun4i_dma_vchan *vchan;
+	struct dma_chan *chan;
+	u8 is_dedicated = dma_spec->args[0];
+	u8 endpoint = dma_spec->args[1];
+
+	/* Check if type is Normal or Dedicated */
+	if (is_dedicated != 0 && is_dedicated != 1)
+		return NULL;
+
+	/* Make sure the endpoint looks sane */
+	if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
+	    (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
+		return NULL;
+
+	chan = dma_get_any_slave_channel(&priv->slave);
+	if (!chan)
+		return NULL;
+
+	/* Assign the endpoint to the vchan */
+	vchan = to_sun4i_dma_vchan(chan);
+	vchan->is_dedicated = is_dedicated;
+	vchan->endpoint = endpoint;
+
+	return chan;
+}
+
+static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
+					   dma_cookie_t cookie,
+					   struct dma_tx_state *state)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct sun4i_dma_pchan *pchan = vchan->pchan;
+	struct sun4i_dma_contract *contract;
+	struct sun4i_dma_promise *promise;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	ret = dma_cookie_status(chan, cookie, state);
+	if (!state || (ret == DMA_COMPLETE))
+		return ret;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vd = vchan_find_desc(&vchan->vc, cookie);
+	if (!vd)
+		goto exit;
+	contract = to_sun4i_dma_contract(vd);
+
+	list_for_each_entry(promise, &contract->demands, list)
+		bytes += promise->len;
+
+	/*
+	 * The hardware is configured to return the remaining byte
+	 * quantity. If possible, replace the first listed element's
+	 * full size with the actual remaining amount
+	 */
+	promise = list_first_entry_or_null(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	if (promise && pchan) {
+		bytes -= promise->len;
+		if (pchan->is_dedicated)
+			bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+		else
+			bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+	}
+
+exit:
+
+	dma_set_residue(state, bytes);
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	return ret;
+}
+
+static void sun4i_dma_issue_pending(struct dma_chan *chan)
+{
+	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+
+	/*
+	 * If there are pending transactions for this vchan, push one of
+	 * them into the engine to get the ball rolling.
+	 */
+	if (vchan_issue_pending(&vchan->vc))
+		__execute_vchan_pending(priv, vchan);
+
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
+{
+	struct sun4i_dma_dev *priv = dev_id;
+	struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
+	struct sun4i_dma_vchan *vchan;
+	struct sun4i_dma_contract *contract;
+	struct sun4i_dma_promise *promise;
+	unsigned long pendirq, irqs, disableirqs;
+	int bit, i, free_room, allow_mitigation = 1;
+
+	pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+handle_pending:
+
+	disableirqs = 0;
+	free_room = 0;
+
+	for_each_set_bit(bit, &pendirq, 32) {
+		pchan = &pchans[bit >> 1];
+		vchan = pchan->vchan;
+		if (!vchan) /* a terminated channel may still interrupt */
+			continue;
+		contract = vchan->contract;
+
+		/*
+		 * Disable the IRQ and free the pchan if it's an end
+		 * interrupt (odd bit)
+		 */
+		if (bit & 1) {
+			spin_lock(&vchan->vc.lock);
+
+			/*
+			 * Move the promise into the completed list now that
+			 * we're done with it
+			 */
+			list_del(&vchan->processing->list);
+			list_add_tail(&vchan->processing->list,
+				      &contract->completed_demands);
+
+			/*
+			 * Cyclic DMA transfers are special:
+			 * - There's always something we can dispatch
+			 * - We need to run the callback
+			 * - Latency is very important, as this is used by audio
+			 * We therefore just cycle through the list and dispatch
+			 * whatever we have here, reusing the pchan. There's
+			 * no need to run the thread after this.
+			 *
+			 * For non-cyclic transfers we need to look around,
+			 * so we can program some more work, or notify the
+			 * client that their transfers have been completed.
+			 */
+			if (contract->is_cyclic) {
+				promise = get_next_cyclic_promise(contract);
+				vchan->processing = promise;
+				configure_pchan(pchan, promise);
+				vchan_cyclic_callback(&contract->vd);
+			} else {
+				vchan->processing = NULL;
+				vchan->pchan = NULL;
+
+				free_room = 1;
+				disableirqs |= BIT(bit);
+				release_pchan(priv, pchan);
+			}
+
+			spin_unlock(&vchan->vc.lock);
+		} else {
+			/* Half done interrupt */
+			if (contract->is_cyclic)
+				vchan_cyclic_callback(&contract->vd);
+			else
+				disableirqs |= BIT(bit);
+		}
+	}
+
+	/* Disable the IRQs for events we handled */
+	spin_lock(&priv->lock);
+	irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	writel_relaxed(irqs & ~disableirqs,
+		       priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	spin_unlock(&priv->lock);
+
+	/* Writing 1 to the pending field will clear the pending interrupt */
+	writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+	/*
+	 * If a pchan was freed, we may be able to schedule something else,
+	 * so have a look around
+	 */
+	if (free_room) {
+		for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+			vchan = &priv->vchans[i];
+			spin_lock(&vchan->vc.lock);
+			__execute_vchan_pending(priv, vchan);
+			spin_unlock(&vchan->vc.lock);
+		}
+	}
+
+	/*
+	 * Handle newer interrupts if some showed up, but only do it once
+	 * to avoid a too long a loop
+	 */
+	if (allow_mitigation) {
+		pendirq = readl_relaxed(priv->base +
+					SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+		if (pendirq) {
+			allow_mitigation = 0;
+			goto handle_pending;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int sun4i_dma_probe(struct platform_device *pdev)
+{
+	struct sun4i_dma_dev *priv;
+	struct resource *res;
+	int i, j, ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->irq = platform_get_irq(pdev, 0);
+	if (priv->irq < 0) {
+		dev_err(&pdev->dev, "Cannot claim IRQ\n");
+		return priv->irq;
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "No clock specified\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	platform_set_drvdata(pdev, priv);
+	spin_lock_init(&priv->lock);
+
+	dma_cap_zero(priv->slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
+
+	INIT_LIST_HEAD(&priv->slave.channels);
+	priv->slave.device_free_chan_resources	= sun4i_dma_free_chan_resources;
+	priv->slave.device_tx_status		= sun4i_dma_tx_status;
+	priv->slave.device_issue_pending	= sun4i_dma_issue_pending;
+	priv->slave.device_prep_slave_sg	= sun4i_dma_prep_slave_sg;
+	priv->slave.device_prep_dma_memcpy	= sun4i_dma_prep_dma_memcpy;
+	priv->slave.device_prep_dma_cyclic	= sun4i_dma_prep_dma_cyclic;
+	priv->slave.device_config		= sun4i_dma_config;
+	priv->slave.device_terminate_all	= sun4i_dma_terminate_all;
+	priv->slave.copy_align			= 2;
+	priv->slave.src_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	priv->slave.dst_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	priv->slave.directions			= BIT(DMA_DEV_TO_MEM) |
+						  BIT(DMA_MEM_TO_DEV);
+	priv->slave.residue_granularity		= DMA_RESIDUE_GRANULARITY_BURST;
+
+	priv->slave.dev = &pdev->dev;
+
+	priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+				    sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
+	priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
+				    sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
+	if (!priv->vchans || !priv->pchans)
+		return -ENOMEM;
+
+	/*
+	 * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
+	 * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+	 * dedicated ones
+	 */
+	for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+		priv->pchans[i].base = priv->base +
+			SUN4I_NDMA_CHANNEL_REG_BASE(i);
+
+	for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+		priv->pchans[i].base = priv->base +
+			SUN4I_DDMA_CHANNEL_REG_BASE(j);
+		priv->pchans[i].is_dedicated = 1;
+	}
+
+	for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+		struct sun4i_dma_vchan *vchan = &priv->vchans[i];
+
+		spin_lock_init(&vchan->vc.lock);
+		vchan->vc.desc_free = sun4i_dma_free_contract;
+		vchan_init(&vchan->vc, &priv->slave);
+	}
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Couldn't enable the clock\n");
+		return ret;
+	}
+
+	/*
+	 * Make sure the IRQs are all disabled and accounted for. The bootloader
+	 * likes to leave these dirty
+	 */
+	writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+	ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
+			       0, dev_name(&pdev->dev), priv);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot request IRQ\n");
+		goto err_clk_disable;
+	}
+
+	ret = dma_async_device_register(&priv->slave);
+	if (ret) {
+		dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
+		goto err_clk_disable;
+	}
+
+	ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
+					 priv);
+	if (ret) {
+		dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+		goto err_dma_unregister;
+	}
+
+	dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
+
+	return 0;
+
+err_dma_unregister:
+	dma_async_device_unregister(&priv->slave);
+err_clk_disable:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static int sun4i_dma_remove(struct platform_device *pdev)
+{
+	struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
+
+	/* Disable IRQ so no more work is scheduled */
+	disable_irq(priv->irq);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&priv->slave);
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static const struct of_device_id sun4i_dma_match[] = {
+	{ .compatible = "allwinner,sun4i-a10-dma" },
+	{ /* sentinel */ },
+};
+
+static struct platform_driver sun4i_dma_driver = {
+	.probe	= sun4i_dma_probe,
+	.remove	= sun4i_dma_remove,
+	.driver	= {
+		.name		= "sun4i-dma",
+		.of_match_table	= sun4i_dma_match,
+	},
+};
+
+module_platform_driver(sun4i_dma_driver);
+
+MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
+MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 842ff97..73e0be6 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -969,7 +969,7 @@
 	sdc->slave.device_issue_pending		= sun6i_dma_issue_pending;
 	sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg;
 	sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy;
-	sdc->slave.copy_align			= 4;
+	sdc->slave.copy_align			= DMAENGINE_ALIGN_4_BYTES;
 	sdc->slave.device_config		= sun6i_dma_config;
 	sdc->slave.device_pause			= sun6i_dma_pause;
 	sdc->slave.device_resume		= sun6i_dma_resume;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index eaf585e..c8f79dc 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -155,7 +155,6 @@
 	int				req_len;
 	bool				configured;
 	bool				last_sg;
-	bool				half_done;
 	struct list_head		node;
 	struct tegra_dma_desc		*dma_desc;
 };
@@ -188,7 +187,7 @@
 	bool			config_init;
 	int			id;
 	int			irq;
-	unsigned long		chan_base_offset;
+	void __iomem		*chan_addr;
 	spinlock_t		lock;
 	bool			busy;
 	struct tegra_dma	*tdma;
@@ -203,8 +202,6 @@
 	/* ISR handler and tasklet for bottom half of isr handling */
 	dma_isr_handler		isr_handler;
 	struct tasklet_struct	tasklet;
-	dma_async_tx_callback	callback;
-	void			*callback_param;
 
 	/* Channel-slave specific configuration */
 	unsigned int slave_id;
@@ -222,6 +219,13 @@
 	void __iomem			*base_addr;
 	const struct tegra_dma_chip_data *chip_data;
 
+	/*
+	 * Counter for managing global pausing of the DMA controller.
+	 * Only applicable for devices that don't support individual
+	 * channel pausing.
+	 */
+	u32				global_pause_count;
+
 	/* Some register need to be cache before suspend */
 	u32				reg_gen;
 
@@ -242,12 +246,12 @@
 static inline void tdc_write(struct tegra_dma_channel *tdc,
 		u32 reg, u32 val)
 {
-	writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+	writel(val, tdc->chan_addr + reg);
 }
 
 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
 {
-	return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+	return readl(tdc->chan_addr + reg);
 }
 
 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
@@ -361,16 +365,32 @@
 	struct tegra_dma *tdma = tdc->tdma;
 
 	spin_lock(&tdma->global_lock);
-	tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
-	if (wait_for_burst_complete)
-		udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+
+	if (tdc->tdma->global_pause_count == 0) {
+		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
+		if (wait_for_burst_complete)
+			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+	}
+
+	tdc->tdma->global_pause_count++;
+
+	spin_unlock(&tdma->global_lock);
 }
 
 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
 {
 	struct tegra_dma *tdma = tdc->tdma;
 
-	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+	spin_lock(&tdma->global_lock);
+
+	if (WARN_ON(tdc->tdma->global_pause_count == 0))
+		goto out;
+
+	if (--tdc->tdma->global_pause_count == 0)
+		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
+			   TEGRA_APBDMA_GENERAL_ENABLE);
+
+out:
 	spin_unlock(&tdma->global_lock);
 }
 
@@ -601,7 +621,6 @@
 		return;
 
 	tdc_start_head_req(tdc);
-	return;
 }
 
 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
@@ -628,7 +647,6 @@
 		if (!st)
 			dma_desc->dma_status = DMA_ERROR;
 	}
-	return;
 }
 
 static void tegra_dma_tasklet(unsigned long data)
@@ -720,7 +738,6 @@
 	}
 end:
 	spin_unlock_irqrestore(&tdc->lock, flags);
-	return;
 }
 
 static int tegra_dma_terminate_all(struct dma_chan *dc)
@@ -932,7 +949,6 @@
 	struct tegra_dma_sg_req  *sg_req = NULL;
 	u32 burst_size;
 	enum dma_slave_buswidth slave_bw;
-	int ret;
 
 	if (!tdc->config_init) {
 		dev_err(tdc2dev(tdc), "dma channel is not configured\n");
@@ -943,9 +959,8 @@
 		return NULL;
 	}
 
-	ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
-				&burst_size, &slave_bw);
-	if (ret < 0)
+	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+				&burst_size, &slave_bw) < 0)
 		return NULL;
 
 	INIT_LIST_HEAD(&req_list);
@@ -1048,7 +1063,6 @@
 	dma_addr_t mem = buf_addr;
 	u32 burst_size;
 	enum dma_slave_buswidth slave_bw;
-	int ret;
 
 	if (!buf_len || !period_len) {
 		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
@@ -1087,12 +1101,10 @@
 		return NULL;
 	}
 
-	ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
-				&burst_size, &slave_bw);
-	if (ret < 0)
+	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+				&burst_size, &slave_bw) < 0)
 		return NULL;
 
-
 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
@@ -1136,7 +1148,6 @@
 		sg_req->ch_regs.apb_seq = apb_seq;
 		sg_req->ch_regs.ahb_seq = ahb_seq;
 		sg_req->configured = false;
-		sg_req->half_done = false;
 		sg_req->last_sg = false;
 		sg_req->dma_desc = dma_desc;
 		sg_req->req_len = len;
@@ -1377,8 +1388,9 @@
 	for (i = 0; i < cdata->nr_channels; i++) {
 		struct tegra_dma_channel *tdc = &tdma->channels[i];
 
-		tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
-					i * cdata->channel_reg_size;
+		tdc->chan_addr = tdma->base_addr +
+				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
+				 (i * cdata->channel_reg_size);
 
 		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 		if (!res) {
@@ -1418,6 +1430,7 @@
 	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
 	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
 
+	tdma->global_pause_count = 0;
 	tdma->dma_dev.dev = &pdev->dev;
 	tdma->dma_dev.device_alloc_chan_resources =
 					tegra_dma_alloc_chan_resources;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 24f5ca2..5cce8c9 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -20,16 +20,19 @@
 #define TI_XBAR_OUTPUTS	127
 #define TI_XBAR_INPUTS	256
 
-static DEFINE_IDR(map_idr);
+#define TI_XBAR_EDMA_OFFSET	0
+#define TI_XBAR_SDMA_OFFSET	1
 
 struct ti_dma_xbar_data {
 	void __iomem *iomem;
 
 	struct dma_router dmarouter;
+	struct idr map_idr;
 
 	u16 safe_val; /* Value to rest the crossbar lines */
 	u32 xbar_requests; /* number of DMA requests connected to XBAR */
 	u32 dma_requests; /* number of DMA requests forwarded to DMA */
+	u32 dma_offset;
 };
 
 struct ti_dma_xbar_map {
@@ -51,7 +54,7 @@
 		map->xbar_in, map->xbar_out);
 
 	ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
-	idr_remove(&map_idr, map->xbar_out);
+	idr_remove(&xbar->map_idr, map->xbar_out);
 	kfree(map);
 }
 
@@ -81,12 +84,11 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	map->xbar_out = idr_alloc(&map_idr, NULL, 0, xbar->dma_requests,
+	map->xbar_out = idr_alloc(&xbar->map_idr, NULL, 0, xbar->dma_requests,
 				  GFP_KERNEL);
 	map->xbar_in = (u16)dma_spec->args[0];
 
-	/* The DMA request is 1 based in sDMA */
-	dma_spec->args[0] = map->xbar_out + 1;
+	dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
 
 	dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
 		map->xbar_in, map->xbar_out);
@@ -96,9 +98,22 @@
 	return map;
 }
 
+static const struct of_device_id ti_dma_master_match[] = {
+	{
+		.compatible = "ti,omap4430-sdma",
+		.data = (void *)TI_XBAR_SDMA_OFFSET,
+	},
+	{
+		.compatible = "ti,edma3",
+		.data = (void *)TI_XBAR_EDMA_OFFSET,
+	},
+	{},
+};
+
 static int ti_dma_xbar_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
+	const struct of_device_id *match;
 	struct device_node *dma_node;
 	struct ti_dma_xbar_data *xbar;
 	struct resource *res;
@@ -113,12 +128,20 @@
 	if (!xbar)
 		return -ENOMEM;
 
+	idr_init(&xbar->map_idr);
+
 	dma_node = of_parse_phandle(node, "dma-masters", 0);
 	if (!dma_node) {
 		dev_err(&pdev->dev, "Can't get DMA master node\n");
 		return -ENODEV;
 	}
 
+	match = of_match_node(ti_dma_master_match, dma_node);
+	if (!match) {
+		dev_err(&pdev->dev, "DMA master is not supported\n");
+		return -EINVAL;
+	}
+
 	if (of_property_read_u32(dma_node, "dma-requests",
 				 &xbar->dma_requests)) {
 		dev_info(&pdev->dev,
@@ -139,17 +162,15 @@
 		xbar->safe_val = (u16)safe_val;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	iomem = devm_ioremap_resource(&pdev->dev, res);
-	if (!iomem)
-		return -ENOMEM;
+	if (IS_ERR(iomem))
+		return PTR_ERR(iomem);
 
 	xbar->iomem = iomem;
 
 	xbar->dmarouter.dev = &pdev->dev;
 	xbar->dmarouter.route_free = ti_dma_xbar_free;
+	xbar->dma_offset = (u32)match->data;
 
 	platform_set_drvdata(pdev, xbar);
 
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index c4c3d93..559cd40 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -10,10 +10,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 /* Supports:
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index dff22ab..b23e8d5 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -21,6 +21,7 @@
  * NOTE: PM support is currently not available.
  */
 
+#include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -151,7 +152,6 @@
 #define XGENE_DMA_PQ_CHANNEL		1
 #define XGENE_DMA_MAX_BYTE_CNT		0x4000	/* 16 KB */
 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT	0x14000	/* 80 KB */
-#define XGENE_DMA_XOR_ALIGNMENT		6	/* 64 Bytes */
 #define XGENE_DMA_MAX_XOR_SRC		5
 #define XGENE_DMA_16K_BUFFER_LEN_CODE	0x0
 #define XGENE_DMA_INVALID_LEN_CODE	0x7800000000000000ULL
@@ -764,12 +764,17 @@
 	struct xgene_dma_ring *ring = &chan->rx_ring;
 	struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
 	struct xgene_dma_desc_hw *desc_hw;
+	struct list_head ld_completed;
 	u8 status;
 
+	INIT_LIST_HEAD(&ld_completed);
+
+	spin_lock_bh(&chan->lock);
+
 	/* Clean already completed and acked descriptors */
 	xgene_dma_clean_completed_descriptor(chan);
 
-	/* Run the callback for each descriptor, in order */
+	/* Move all completed descriptors to ld completed queue, in order */
 	list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
 		/* Get subsequent hw descriptor from DMA rx ring */
 		desc_hw = &ring->desc_hw[ring->head];
@@ -812,15 +817,17 @@
 		/* Mark this hw descriptor as processed */
 		desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
 
-		xgene_dma_run_tx_complete_actions(chan, desc_sw);
-
-		xgene_dma_clean_running_descriptor(chan, desc_sw);
-
 		/*
 		 * Decrement the pending transaction count
 		 * as we have processed one
 		 */
 		chan->pending--;
+
+		/*
+		 * Delete this node from ld running queue and append it to
+		 * ld completed queue for further processing
+		 */
+		list_move_tail(&desc_sw->node, &ld_completed);
 	}
 
 	/*
@@ -829,6 +836,14 @@
 	 * ahead and free the descriptors below.
 	 */
 	xgene_chan_xfer_ld_pending(chan);
+
+	spin_unlock_bh(&chan->lock);
+
+	/* Run the callback for each descriptor, in order */
+	list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
+		xgene_dma_run_tx_complete_actions(chan, desc_sw);
+		xgene_dma_clean_running_descriptor(chan, desc_sw);
+	}
 }
 
 static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
@@ -877,11 +892,11 @@
 	if (!chan->desc_pool)
 		return;
 
-	spin_lock_bh(&chan->lock);
-
 	/* Process all running descriptor */
 	xgene_dma_cleanup_descriptors(chan);
 
+	spin_lock_bh(&chan->lock);
+
 	/* Clean all link descriptor queues */
 	xgene_dma_free_desc_list(chan, &chan->ld_pending);
 	xgene_dma_free_desc_list(chan, &chan->ld_running);
@@ -1201,15 +1216,11 @@
 {
 	struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
 
-	spin_lock_bh(&chan->lock);
-
 	/* Run all cleanup for descriptors which have been completed */
 	xgene_dma_cleanup_descriptors(chan);
 
 	/* Re-enable DMA channel IRQ */
 	enable_irq(chan->rx_irq);
-
-	spin_unlock_bh(&chan->lock);
 }
 
 static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
@@ -1741,13 +1752,13 @@
 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 		dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
 		dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
-		dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT;
+		dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
 	}
 
 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
 		dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
 		dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
-		dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT;
+		dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
 	}
 }
 
@@ -1944,16 +1955,18 @@
 		return ret;
 
 	pdma->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(pdma->clk)) {
+	if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
 		dev_err(&pdev->dev, "Failed to get clk\n");
 		return PTR_ERR(pdma->clk);
 	}
 
 	/* Enable clk before accessing registers */
-	ret = clk_prepare_enable(pdma->clk);
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
-		return ret;
+	if (!IS_ERR(pdma->clk)) {
+		ret = clk_prepare_enable(pdma->clk);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
+			return ret;
+		}
 	}
 
 	/* Remove DMA RAM out of shutdown */
@@ -1998,7 +2011,8 @@
 
 err_dma_mask:
 err_clk_enable:
-	clk_disable_unprepare(pdma->clk);
+	if (!IS_ERR(pdma->clk))
+		clk_disable_unprepare(pdma->clk);
 
 	return ret;
 }
@@ -2022,11 +2036,20 @@
 		xgene_dma_delete_chan_rings(chan);
 	}
 
-	clk_disable_unprepare(pdma->clk);
+	if (!IS_ERR(pdma->clk))
+		clk_disable_unprepare(pdma->clk);
 
 	return 0;
 }
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
+	{"APMC0D43", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
+#endif
+
 static const struct of_device_id xgene_dma_of_match_ptr[] = {
 	{.compatible = "apm,xgene-storm-dma",},
 	{},
@@ -2039,6 +2062,7 @@
 	.driver = {
 		.name = "X-Gene-DMA",
 		.of_match_table = xgene_dma_of_match_ptr,
+		.acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
 	},
 };
 
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
new file mode 100644
index 0000000..39915a6
--- /dev/null
+++ b/drivers/dma/zx296702_dma.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright 2015 Linaro.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME		"zx-dma"
+#define DMA_ALIGN		4
+#define DMA_MAX_SIZE		(0x10000 - PAGE_SIZE)
+#define LLI_BLOCK_SIZE		(4 * PAGE_SIZE)
+
+#define REG_ZX_SRC_ADDR			0x00
+#define REG_ZX_DST_ADDR			0x04
+#define REG_ZX_TX_X_COUNT		0x08
+#define REG_ZX_TX_ZY_COUNT		0x0c
+#define REG_ZX_SRC_ZY_STEP		0x10
+#define REG_ZX_DST_ZY_STEP		0x14
+#define REG_ZX_LLI_ADDR			0x1c
+#define REG_ZX_CTRL			0x20
+#define REG_ZX_TC_IRQ			0x800
+#define REG_ZX_SRC_ERR_IRQ		0x804
+#define REG_ZX_DST_ERR_IRQ		0x808
+#define REG_ZX_CFG_ERR_IRQ		0x80c
+#define REG_ZX_TC_IRQ_RAW		0x810
+#define REG_ZX_SRC_ERR_IRQ_RAW		0x814
+#define REG_ZX_DST_ERR_IRQ_RAW		0x818
+#define REG_ZX_CFG_ERR_IRQ_RAW		0x81c
+#define REG_ZX_STATUS			0x820
+#define REG_ZX_DMA_GRP_PRIO		0x824
+#define REG_ZX_DMA_ARB			0x828
+
+#define ZX_FORCE_CLOSE			BIT(31)
+#define ZX_DST_BURST_WIDTH(x)		(((x) & 0x7) << 13)
+#define ZX_MAX_BURST_LEN		16
+#define ZX_SRC_BURST_LEN(x)		(((x) & 0xf) << 9)
+#define ZX_SRC_BURST_WIDTH(x)		(((x) & 0x7) << 6)
+#define ZX_IRQ_ENABLE_ALL		(3 << 4)
+#define ZX_DST_FIFO_MODE		BIT(3)
+#define ZX_SRC_FIFO_MODE		BIT(2)
+#define ZX_SOFT_REQ			BIT(1)
+#define ZX_CH_ENABLE			BIT(0)
+
+#define ZX_DMA_BUSWIDTHS \
+	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+enum zx_dma_burst_width {
+	ZX_DMA_WIDTH_8BIT	= 0,
+	ZX_DMA_WIDTH_16BIT	= 1,
+	ZX_DMA_WIDTH_32BIT	= 2,
+	ZX_DMA_WIDTH_64BIT	= 3,
+};
+
+struct zx_desc_hw {
+	u32 saddr;
+	u32 daddr;
+	u32 src_x;
+	u32 src_zy;
+	u32 src_zy_step;
+	u32 dst_zy_step;
+	u32 reserved1;
+	u32 lli;
+	u32 ctr;
+	u32 reserved[7]; /* pack as hardware registers region size */
+} __aligned(32);
+
+struct zx_dma_desc_sw {
+	struct virt_dma_desc	vd;
+	dma_addr_t		desc_hw_lli;
+	size_t			desc_num;
+	size_t			size;
+	struct zx_desc_hw	*desc_hw;
+};
+
+struct zx_dma_phy;
+
+struct zx_dma_chan {
+	struct dma_slave_config slave_cfg;
+	int			id; /* Request phy chan id */
+	u32			ccfg;
+	u32			cyclic;
+	struct virt_dma_chan	vc;
+	struct zx_dma_phy	*phy;
+	struct list_head	node;
+	dma_addr_t		dev_addr;
+	enum dma_status		status;
+};
+
+struct zx_dma_phy {
+	u32			idx;
+	void __iomem		*base;
+	struct zx_dma_chan	*vchan;
+	struct zx_dma_desc_sw	*ds_run;
+	struct zx_dma_desc_sw	*ds_done;
+};
+
+struct zx_dma_dev {
+	struct dma_device	slave;
+	void __iomem		*base;
+	spinlock_t		lock; /* lock for ch and phy */
+	struct list_head	chan_pending;
+	struct zx_dma_phy	*phy;
+	struct zx_dma_chan	*chans;
+	struct clk		*clk;
+	struct dma_pool		*pool;
+	u32			dma_channels;
+	u32			dma_requests;
+	int 			irq;
+};
+
+#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
+
+static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct zx_dma_chan, vc.chan);
+}
+
+static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
+{
+	u32 val = 0;
+
+	val = readl_relaxed(phy->base + REG_ZX_CTRL);
+	val &= ~ZX_CH_ENABLE;
+	val |= ZX_FORCE_CLOSE;
+	writel_relaxed(val, phy->base + REG_ZX_CTRL);
+
+	val = 0x1 << phy->idx;
+	writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
+{
+	writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
+	writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
+	writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
+	writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
+	writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
+	writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
+	writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
+	writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
+}
+
+static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
+{
+	return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
+}
+
+static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
+{
+	return readl_relaxed(d->base + REG_ZX_STATUS);
+}
+
+static void zx_dma_init_state(struct zx_dma_dev *d)
+{
+	/* set same priority */
+	writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
+	/* clear all irq */
+	writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static int zx_dma_start_txd(struct zx_dma_chan *c)
+{
+	struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+	if (!c->phy)
+		return -EAGAIN;
+
+	if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
+		return -EAGAIN;
+
+	if (vd) {
+		struct zx_dma_desc_sw *ds =
+			container_of(vd, struct zx_dma_desc_sw, vd);
+		/*
+		 * fetch and remove request from vc->desc_issued
+		 * so vc->desc_issued only contains desc pending
+		 */
+		list_del(&ds->vd.node);
+		c->phy->ds_run = ds;
+		c->phy->ds_done = NULL;
+		/* start dma */
+		zx_dma_set_desc(c->phy, ds->desc_hw);
+		return 0;
+	}
+	c->phy->ds_done = NULL;
+	c->phy->ds_run = NULL;
+	return -EAGAIN;
+}
+
+static void zx_dma_task(struct zx_dma_dev *d)
+{
+	struct zx_dma_phy *p;
+	struct zx_dma_chan *c, *cn;
+	unsigned pch, pch_alloc = 0;
+	unsigned long flags;
+
+	/* check new dma request of running channel in vc->desc_issued */
+	list_for_each_entry_safe(c, cn, &d->slave.channels,
+				 vc.chan.device_node) {
+		spin_lock_irqsave(&c->vc.lock, flags);
+		p = c->phy;
+		if (p && p->ds_done && zx_dma_start_txd(c)) {
+			/* No current txd associated with this channel */
+			dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+			/* Mark this channel free */
+			c->phy = NULL;
+			p->vchan = NULL;
+		}
+		spin_unlock_irqrestore(&c->vc.lock, flags);
+	}
+
+	/* check new channel request in d->chan_pending */
+	spin_lock_irqsave(&d->lock, flags);
+	while (!list_empty(&d->chan_pending)) {
+		c = list_first_entry(&d->chan_pending,
+				     struct zx_dma_chan, node);
+		p = &d->phy[c->id];
+		if (!p->vchan) {
+			/* remove from d->chan_pending */
+			list_del_init(&c->node);
+			pch_alloc |= 1 << c->id;
+			/* Mark this channel allocated */
+			p->vchan = c;
+			c->phy = p;
+		} else {
+			dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
+		}
+	}
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	for (pch = 0; pch < d->dma_channels; pch++) {
+		if (pch_alloc & (1 << pch)) {
+			p = &d->phy[pch];
+			c = p->vchan;
+			if (c) {
+				spin_lock_irqsave(&c->vc.lock, flags);
+				zx_dma_start_txd(c);
+				spin_unlock_irqrestore(&c->vc.lock, flags);
+			}
+		}
+	}
+}
+
+static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
+{
+	struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
+	struct zx_dma_phy *p;
+	struct zx_dma_chan *c;
+	u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
+	u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
+	u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
+	u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
+	u32 i, irq_chan = 0, task = 0;
+
+	while (tc) {
+		i = __ffs(tc);
+		tc &= ~BIT(i);
+		p = &d->phy[i];
+		c = p->vchan;
+		if (c) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&c->vc.lock, flags);
+			if (c->cyclic) {
+				vchan_cyclic_callback(&p->ds_run->vd);
+			} else {
+				vchan_cookie_complete(&p->ds_run->vd);
+				p->ds_done = p->ds_run;
+				task = 1;
+			}
+			spin_unlock_irqrestore(&c->vc.lock, flags);
+			irq_chan |= BIT(i);
+		}
+	}
+
+	if (serr || derr || cfg)
+		dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
+			 serr, derr, cfg);
+
+	writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+
+	if (task)
+		zx_dma_task(d);
+	return IRQ_HANDLED;
+}
+
+static void zx_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&d->lock, flags);
+	list_del_init(&c->node);
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	vchan_free_chan_resources(&c->vc);
+	c->ccfg = 0;
+}
+
+static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *state)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_phy *p;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	ret = dma_cookie_status(&c->vc.chan, cookie, state);
+	if (ret == DMA_COMPLETE || !state)
+		return ret;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	p = c->phy;
+	ret = c->status;
+
+	/*
+	 * If the cookie is on our issue queue, then the residue is
+	 * its total size.
+	 */
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
+	} else if ((!p) || (!p->ds_run)) {
+		bytes = 0;
+	} else {
+		struct zx_dma_desc_sw *ds = p->ds_run;
+		u32 clli = 0, index = 0;
+
+		bytes = 0;
+		clli = zx_dma_get_curr_lli(p);
+		index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
+		for (; index < ds->desc_num; index++) {
+			bytes += ds->desc_hw[index].src_x;
+			/* end of lli */
+			if (!ds->desc_hw[index].lli)
+				break;
+		}
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	dma_set_residue(state, bytes);
+	return ret;
+}
+
+static void zx_dma_issue_pending(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	unsigned long flags;
+	int issue = 0;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	/* add request to vc->desc_issued */
+	if (vchan_issue_pending(&c->vc)) {
+		spin_lock(&d->lock);
+		if (!c->phy && list_empty(&c->node)) {
+			/* if new channel, add chan_pending */
+			list_add_tail(&c->node, &d->chan_pending);
+			issue = 1;
+			dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+		}
+		spin_unlock(&d->lock);
+	} else {
+		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	if (issue)
+		zx_dma_task(d);
+}
+
+static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
+			     dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+	if ((num + 1) < ds->desc_num)
+		ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+			sizeof(struct zx_desc_hw);
+	ds->desc_hw[num].saddr = src;
+	ds->desc_hw[num].daddr = dst;
+	ds->desc_hw[num].src_x = len;
+	ds->desc_hw[num].ctr = ccfg;
+}
+
+static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
+						     struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
+
+	if (num > lli_limit) {
+		dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
+			&c->vc, num, lli_limit);
+		return NULL;
+	}
+
+	ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
+	if (!ds)
+		return NULL;
+
+	ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+	if (!ds->desc_hw) {
+		dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
+		kfree(ds);
+		return NULL;
+	}
+	memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0);
+	ds->desc_num = num;
+	return ds;
+}
+
+static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
+{
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		return ffs(width) - 1;
+	default:
+		return ZX_DMA_WIDTH_32BIT;
+	}
+}
+
+static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
+{
+	struct dma_slave_config *cfg = &c->slave_cfg;
+	enum zx_dma_burst_width src_width;
+	enum zx_dma_burst_width dst_width;
+	u32 maxburst = 0;
+
+	switch (dir) {
+	case DMA_MEM_TO_MEM:
+		c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
+			| ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
+			| ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
+			| ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
+		break;
+	case DMA_MEM_TO_DEV:
+		c->dev_addr = cfg->dst_addr;
+		/* dst len is calculated from src width, len and dst width.
+		 * We need make sure dst len not exceed MAX LEN.
+		 * Trailing single transaction that does not fill a full
+		 * burst also require identical src/dst data width.
+		 */
+		dst_width = zx_dma_burst_width(cfg->dst_addr_width);
+		maxburst = cfg->dst_maxburst;
+		maxburst = maxburst < ZX_MAX_BURST_LEN ?
+				maxburst : ZX_MAX_BURST_LEN;
+		c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
+			| ZX_SRC_BURST_LEN(maxburst - 1)
+			| ZX_SRC_BURST_WIDTH(dst_width)
+			| ZX_DST_BURST_WIDTH(dst_width);
+		break;
+	case DMA_DEV_TO_MEM:
+		c->dev_addr = cfg->src_addr;
+		src_width = zx_dma_burst_width(cfg->src_addr_width);
+		maxburst = cfg->src_maxburst;
+		maxburst = maxburst < ZX_MAX_BURST_LEN ?
+				maxburst : ZX_MAX_BURST_LEN;
+		c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
+			| ZX_SRC_BURST_LEN(maxburst - 1)
+			| ZX_SRC_BURST_WIDTH(src_width)
+			| ZX_DST_BURST_WIDTH(src_width);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
+	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	size_t copy = 0;
+	int num = 0;
+
+	if (!len)
+		return NULL;
+
+	if (zx_pre_config(c, DMA_MEM_TO_MEM))
+		return NULL;
+
+	num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+
+	ds = zx_alloc_desc_resource(num, chan);
+	if (!ds)
+		return NULL;
+
+	ds->size = len;
+	num = 0;
+
+	do {
+		copy = min_t(size_t, len, DMA_MAX_SIZE);
+		zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+		src += copy;
+		dst += copy;
+		len -= copy;
+	} while (len);
+
+	c->cyclic = 0;
+	ds->desc_hw[num - 1].lli = 0;	/* end of link */
+	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+	enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	size_t len, avail, total = 0;
+	struct scatterlist *sg;
+	dma_addr_t addr, src = 0, dst = 0;
+	int num = sglen, i;
+
+	if (!sgl)
+		return NULL;
+
+	if (zx_pre_config(c, dir))
+		return NULL;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		avail = sg_dma_len(sg);
+		if (avail > DMA_MAX_SIZE)
+			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+	}
+
+	ds = zx_alloc_desc_resource(num, chan);
+	if (!ds)
+		return NULL;
+
+	c->cyclic = 0;
+	num = 0;
+	for_each_sg(sgl, sg, sglen, i) {
+		addr = sg_dma_address(sg);
+		avail = sg_dma_len(sg);
+		total += avail;
+
+		do {
+			len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+			if (dir == DMA_MEM_TO_DEV) {
+				src = addr;
+				dst = c->dev_addr;
+			} else if (dir == DMA_DEV_TO_MEM) {
+				src = c->dev_addr;
+				dst = addr;
+			}
+
+			zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+			addr += len;
+			avail -= len;
+		} while (avail);
+	}
+
+	ds->desc_hw[num - 1].lli = 0;	/* end of link */
+	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+	ds->size = total;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction dir,
+		unsigned long flags)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	dma_addr_t src = 0, dst = 0;
+	int num_periods = buf_len / period_len;
+	int buf = 0, num = 0;
+
+	if (period_len > DMA_MAX_SIZE) {
+		dev_err(chan->device->dev, "maximum period size exceeded\n");
+		return NULL;
+	}
+
+	if (zx_pre_config(c, dir))
+		return NULL;
+
+	ds = zx_alloc_desc_resource(num_periods, chan);
+	if (!ds)
+		return NULL;
+	c->cyclic = 1;
+
+	while (buf < buf_len) {
+		if (dir == DMA_MEM_TO_DEV) {
+			src = dma_addr;
+			dst = c->dev_addr;
+		} else if (dir == DMA_DEV_TO_MEM) {
+			src = c->dev_addr;
+			dst = dma_addr;
+		}
+		zx_dma_fill_desc(ds, dst, src, period_len, num++,
+				 c->ccfg | ZX_IRQ_ENABLE_ALL);
+		dma_addr += period_len;
+		buf += period_len;
+	}
+
+	ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
+	ds->size = buf_len;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int zx_dma_config(struct dma_chan *chan,
+			 struct dma_slave_config *cfg)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+
+	if (!cfg)
+		return -EINVAL;
+
+	memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
+
+	return 0;
+}
+
+static int zx_dma_terminate_all(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	struct zx_dma_phy *p = c->phy;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
+
+	/* Clear the tx descriptor lists */
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_get_all_descriptors(&c->vc, &head);
+	if (p) {
+		/* vchan is assigned to a pchan - stop the channel */
+		zx_dma_terminate_chan(p, d);
+		c->phy = NULL;
+		p->vchan = NULL;
+		p->ds_run = NULL;
+		p->ds_done = NULL;
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
+
+	return 0;
+}
+
+static int zx_dma_transfer_pause(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	u32 val = 0;
+
+	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+	val &= ~ZX_CH_ENABLE;
+	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+	return 0;
+}
+
+static int zx_dma_transfer_resume(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	u32 val = 0;
+
+	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+	val |= ZX_CH_ENABLE;
+	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+	return 0;
+}
+
+static void zx_dma_free_desc(struct virt_dma_desc *vd)
+{
+	struct zx_dma_desc_sw *ds =
+		container_of(vd, struct zx_dma_desc_sw, vd);
+	struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
+
+	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
+	kfree(ds);
+}
+
+static const struct of_device_id zx6702_dma_dt_ids[] = {
+	{ .compatible = "zte,zx296702-dma", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
+
+static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+					       struct of_dma *ofdma)
+{
+	struct zx_dma_dev *d = ofdma->of_dma_data;
+	unsigned int request = dma_spec->args[0];
+	struct dma_chan *chan;
+	struct zx_dma_chan *c;
+
+	if (request > d->dma_requests)
+		return NULL;
+
+	chan = dma_get_any_slave_channel(&d->slave);
+	if (!chan) {
+		dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
+		return NULL;
+	}
+	c = to_zx_chan(chan);
+	c->id = request;
+	dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
+		 c->id, &c->vc);
+	return chan;
+}
+
+static int zx_dma_probe(struct platform_device *op)
+{
+	struct zx_dma_dev *d;
+	struct resource *iores;
+	int i, ret = 0;
+
+	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+	if (!iores)
+		return -EINVAL;
+
+	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->base = devm_ioremap_resource(&op->dev, iores);
+	if (IS_ERR(d->base))
+		return PTR_ERR(d->base);
+
+	of_property_read_u32((&op->dev)->of_node,
+			     "dma-channels", &d->dma_channels);
+	of_property_read_u32((&op->dev)->of_node,
+			     "dma-requests", &d->dma_requests);
+	if (!d->dma_requests || !d->dma_channels)
+		return -EINVAL;
+
+	d->clk = devm_clk_get(&op->dev, NULL);
+	if (IS_ERR(d->clk)) {
+		dev_err(&op->dev, "no dma clk\n");
+		return PTR_ERR(d->clk);
+	}
+
+	d->irq = platform_get_irq(op, 0);
+	ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
+			       0, DRIVER_NAME, d);
+	if (ret)
+		return ret;
+
+	/* A DMA memory pool for LLIs, align on 32-byte boundary */
+	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
+			LLI_BLOCK_SIZE, 32, 0);
+	if (!d->pool)
+		return -ENOMEM;
+
+	/* init phy channel */
+	d->phy = devm_kzalloc(&op->dev,
+		d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
+	if (!d->phy)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_channels; i++) {
+		struct zx_dma_phy *p = &d->phy[i];
+
+		p->idx = i;
+		p->base = d->base + i * 0x40;
+	}
+
+	INIT_LIST_HEAD(&d->slave.channels);
+	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
+	d->slave.dev = &op->dev;
+	d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
+	d->slave.device_tx_status = zx_dma_tx_status;
+	d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
+	d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
+	d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
+	d->slave.device_issue_pending = zx_dma_issue_pending;
+	d->slave.device_config = zx_dma_config;
+	d->slave.device_terminate_all = zx_dma_terminate_all;
+	d->slave.device_pause = zx_dma_transfer_pause;
+	d->slave.device_resume = zx_dma_transfer_resume;
+	d->slave.copy_align = DMA_ALIGN;
+	d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
+	d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
+	d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
+			| BIT(DMA_DEV_TO_MEM);
+	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+	/* init virtual channel */
+	d->chans = devm_kzalloc(&op->dev,
+		d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
+	if (!d->chans)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_requests; i++) {
+		struct zx_dma_chan *c = &d->chans[i];
+
+		c->status = DMA_IN_PROGRESS;
+		INIT_LIST_HEAD(&c->node);
+		c->vc.desc_free = zx_dma_free_desc;
+		vchan_init(&c->vc, &d->slave);
+	}
+
+	/* Enable clock before accessing registers */
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+		goto zx_dma_out;
+	}
+
+	zx_dma_init_state(d);
+
+	spin_lock_init(&d->lock);
+	INIT_LIST_HEAD(&d->chan_pending);
+	platform_set_drvdata(op, d);
+
+	ret = dma_async_device_register(&d->slave);
+	if (ret)
+		goto clk_dis;
+
+	ret = of_dma_controller_register((&op->dev)->of_node,
+					 zx_of_dma_simple_xlate, d);
+	if (ret)
+		goto of_dma_register_fail;
+
+	dev_info(&op->dev, "initialized\n");
+	return 0;
+
+of_dma_register_fail:
+	dma_async_device_unregister(&d->slave);
+clk_dis:
+	clk_disable_unprepare(d->clk);
+zx_dma_out:
+	return ret;
+}
+
+static int zx_dma_remove(struct platform_device *op)
+{
+	struct zx_dma_chan *c, *cn;
+	struct zx_dma_dev *d = platform_get_drvdata(op);
+
+	/* explictly free the irq */
+	devm_free_irq(&op->dev, d->irq, d);
+
+	dma_async_device_unregister(&d->slave);
+	of_dma_controller_free((&op->dev)->of_node);
+
+	list_for_each_entry_safe(c, cn, &d->slave.channels,
+				 vc.chan.device_node) {
+		list_del(&c->vc.chan.device_node);
+	}
+	clk_disable_unprepare(d->clk);
+	dmam_pool_destroy(d->pool);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zx_dma_suspend_dev(struct device *dev)
+{
+	struct zx_dma_dev *d = dev_get_drvdata(dev);
+	u32 stat = 0;
+
+	stat = zx_dma_get_chan_stat(d);
+	if (stat) {
+		dev_warn(d->slave.dev,
+			 "chan %d is running fail to suspend\n", stat);
+		return -1;
+	}
+	clk_disable_unprepare(d->clk);
+	return 0;
+}
+
+static int zx_dma_resume_dev(struct device *dev)
+{
+	struct zx_dma_dev *d = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+		return ret;
+	}
+	zx_dma_init_state(d);
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
+
+static struct platform_driver zx_pdma_driver = {
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.pm	= &zx_dma_pmops,
+		.of_match_table = zx6702_dma_dt_ids,
+	},
+	.probe		= zx_dma_probe,
+	.remove		= zx_dma_remove,
+};
+
+module_platform_driver(zx_pdma_driver);
+
+MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
+MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 99c69a3..d8de6a8 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,6 +5,9 @@
 
 menu "Firmware Drivers"
 
+config ARM_PSCI_FW
+	bool
+
 config EDD
 	tristate "BIOS Enhanced Disk Drive calls determine boot disk"
 	depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 4a4b897..000830f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
+obj-$(CONFIG_ARM_PSCI_FW)	+= psci.o
 obj-$(CONFIG_DMI)		+= dmi_scan.o
 obj-$(CONFIG_DMI_SYSFS)		+= dmi-sysfs.o
 obj-$(CONFIG_EDD)		+= edd.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
new file mode 100644
index 0000000..42700f0
--- /dev/null
+++ b/drivers/firmware/psci.c
@@ -0,0 +1,382 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#include <asm/smp_plat.h>
+
+/*
+ * While a 64-bit OS can make calls with SMC32 calling conventions, for some
+ * calls it is necessary to use SMC64 to pass or return 64-bit values. For such
+ * calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width)
+ * function ID.
+ */
+#ifdef CONFIG_64BIT
+#define PSCI_0_2_FN_NATIVE(name)	PSCI_0_2_FN64_##name
+#else
+#define PSCI_0_2_FN_NATIVE(name)	PSCI_0_2_FN_##name
+#endif
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
+
+bool psci_tos_resident_on(int cpu)
+{
+	return cpu == resident_cpu;
+}
+
+struct psci_operations psci_ops;
+
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+				unsigned long, unsigned long);
+asmlinkage psci_fn __invoke_psci_fn_hvc;
+asmlinkage psci_fn __invoke_psci_fn_smc;
+static psci_fn *invoke_psci_fn;
+
+enum psci_function {
+	PSCI_FN_CPU_SUSPEND,
+	PSCI_FN_CPU_ON,
+	PSCI_FN_CPU_OFF,
+	PSCI_FN_MIGRATE,
+	PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+static int psci_to_linux_errno(int errno)
+{
+	switch (errno) {
+	case PSCI_RET_SUCCESS:
+		return 0;
+	case PSCI_RET_NOT_SUPPORTED:
+		return -EOPNOTSUPP;
+	case PSCI_RET_INVALID_PARAMS:
+		return -EINVAL;
+	case PSCI_RET_DENIED:
+		return -EPERM;
+	};
+
+	return -EINVAL;
+}
+
+static u32 psci_get_version(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+}
+
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+	err = invoke_psci_fn(fn, state, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(u32 state)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_OFF];
+	err = invoke_psci_fn(fn, state, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_ON];
+	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_MIGRATE];
+	err = invoke_psci_fn(fn, cpuid, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_affinity_info(unsigned long target_affinity,
+		unsigned long lowest_affinity_level)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO),
+			      target_affinity, lowest_affinity_level, 0);
+}
+
+static int psci_migrate_info_type(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
+
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU),
+			      0, 0, 0);
+}
+
+static int get_set_conduit_method(struct device_node *np)
+{
+	const char *method;
+
+	pr_info("probing for conduit method from DT.\n");
+
+	if (of_property_read_string(np, "method", &method)) {
+		pr_warn("missing \"method\" property\n");
+		return -ENXIO;
+	}
+
+	if (!strcmp("hvc", method)) {
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+	} else if (!strcmp("smc", method)) {
+		invoke_psci_fn = __invoke_psci_fn_smc;
+	} else {
+		pr_warn("invalid \"method\" property: %s\n", method);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+	unsigned long cpuid;
+	int type, cpu = -1;
+
+	type = psci_ops.migrate_info_type();
+
+	if (type == PSCI_0_2_TOS_MP) {
+		pr_info("Trusted OS migration not required\n");
+		return;
+	}
+
+	if (type == PSCI_RET_NOT_SUPPORTED) {
+		pr_info("MIGRATE_INFO_TYPE not supported.\n");
+		return;
+	}
+
+	if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+	    type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+		pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+		return;
+	}
+
+	cpuid = psci_migrate_info_up_cpu();
+	if (cpuid & ~MPIDR_HWID_BITMASK) {
+		pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+			cpuid);
+		return;
+	}
+
+	cpu = get_logical_index(cpuid);
+	resident_cpu = cpu >= 0 ? cpu : -1;
+
+	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
+static void __init psci_0_2_set_functions(void)
+{
+	pr_info("Using standard PSCI v0.2 function IDs\n");
+	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND);
+	psci_ops.cpu_suspend = psci_cpu_suspend;
+
+	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+	psci_ops.cpu_off = psci_cpu_off;
+
+	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON);
+	psci_ops.cpu_on = psci_cpu_on;
+
+	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE);
+	psci_ops.migrate = psci_migrate;
+
+	psci_ops.affinity_info = psci_affinity_info;
+
+	psci_ops.migrate_info_type = psci_migrate_info_type;
+
+	arm_pm_restart = psci_sys_reset;
+
+	pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+	u32 ver = psci_get_version();
+
+	pr_info("PSCIv%d.%d detected in firmware.\n",
+			PSCI_VERSION_MAJOR(ver),
+			PSCI_VERSION_MINOR(ver));
+
+	if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+		pr_err("Conflicting PSCI version detected.\n");
+		return -EINVAL;
+	}
+
+	psci_0_2_set_functions();
+
+	psci_init_migrate();
+
+	return 0;
+}
+
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+	/*
+	 * Starting with v0.2, the PSCI specification introduced a call
+	 * (PSCI_VERSION) that allows probing the firmware version, so
+	 * that PSCI function IDs and version specific initialization
+	 * can be carried out according to the specific version reported
+	 * by firmware
+	 */
+	err = psci_probe();
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
+{
+	u32 id;
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+
+	pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
+	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+		psci_ops.cpu_suspend = psci_cpu_suspend;
+	}
+
+	if (!of_property_read_u32(np, "cpu_off", &id)) {
+		psci_function_id[PSCI_FN_CPU_OFF] = id;
+		psci_ops.cpu_off = psci_cpu_off;
+	}
+
+	if (!of_property_read_u32(np, "cpu_on", &id)) {
+		psci_function_id[PSCI_FN_CPU_ON] = id;
+		psci_ops.cpu_on = psci_cpu_on;
+	}
+
+	if (!of_property_read_u32(np, "migrate", &id)) {
+		psci_function_id[PSCI_FN_MIGRATE] = id;
+		psci_ops.migrate = psci_migrate;
+	}
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+static const struct of_device_id const psci_of_match[] __initconst = {
+	{ .compatible = "arm,psci",	.data = psci_0_1_init},
+	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
+	{},
+};
+
+int __init psci_dt_init(void)
+{
+	struct device_node *np;
+	const struct of_device_id *matched_np;
+	psci_initcall_t init_fn;
+
+	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+	if (!np)
+		return -ENODEV;
+
+	init_fn = (psci_initcall_t)matched_np->data;
+	return init_fn(np);
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+	if (!acpi_psci_present()) {
+		pr_info("is not implemented in ACPI.\n");
+		return -EOPNOTSUPP;
+	}
+
+	pr_info("probing for conduit method from ACPI.\n");
+
+	if (acpi_psci_use_hvc())
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+	else
+		invoke_psci_fn = __invoke_psci_fn_smc;
+
+	return psci_probe();
+}
+#endif
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 1bd6f9c..29e6850 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/qcom_scm.h>
 
-#include <asm/outercache.h>
 #include <asm/cacheflush.h>
 
 #include "qcom_scm.h"
@@ -219,8 +218,7 @@
 	 * Flush the command buffer so that the secure world sees
 	 * the correct data.
 	 */
-	__cpuc_flush_dcache_area((void *)cmd, cmd->len);
-	outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+	secure_flush_area(cmd, cmd->len);
 
 	ret = smc(cmd_addr);
 	if (ret < 0)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8f1fe73..b4fc9e4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -113,7 +113,6 @@
 config GPIO_ALTERA
 	tristate "Altera GPIO"
 	depends on OF_GPIO
-	select GPIO_GENERIC
 	select GPIOLIB_IRQCHIP
 	help
 	  Say Y or M here to build support for the Altera PIO device.
@@ -131,6 +130,7 @@
 	default y if ARCH_BRCMSTB
 	depends on OF_GPIO && (ARCH_BRCMSTB || COMPILE_TEST)
 	select GPIO_GENERIC
+	select GPIOLIB_IRQCHIP
 	help
 	  Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs.
 
@@ -172,6 +172,7 @@
 	depends on CRIS || COMPILE_TEST
 	depends on OF
 	select GPIO_GENERIC
+	select GPIOLIB_IRQCHIP
 	help
 	  Say yes here to support the GPIO controller on Axis ETRAX FS SoCs.
 
@@ -308,7 +309,6 @@
 	def_bool y
 	depends on PLAT_ORION
 	depends on OF
-	select GPIO_GENERIC
 	select GENERIC_IRQ_CHIP
 
 config GPIO_MXC
@@ -1005,6 +1005,12 @@
 	  SPI driver for Freescale MC33880 high-side/low-side switch.
 	  This provides GPIO interface supporting inputs and outputs.
 
+config GPIO_ZX
+	bool "ZTE ZX GPIO support"
+	select GPIOLIB_IRQCHIP
+	help
+	  Say yes here to support the GPIO device on ZTE ZX SoCs.
+
 endmenu
 
 menu "USB GPIO expanders"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index f82cd67..f79a7c4 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_GPIO_ALTERA)  	+= gpio-altera.o
 obj-$(CONFIG_GPIO_AMD8111)	+= gpio-amd8111.o
 obj-$(CONFIG_GPIO_ARIZONA)	+= gpio-arizona.o
+obj-$(CONFIG_ATH79)		+= gpio-ath79.o
 obj-$(CONFIG_GPIO_BCM_KONA)	+= gpio-bcm-kona.o
 obj-$(CONFIG_GPIO_BRCMSTB)	+= gpio-brcmstb.o
 obj-$(CONFIG_GPIO_BT8XX)	+= gpio-bt8xx.o
@@ -116,3 +117,4 @@
 obj-$(CONFIG_GPIO_XTENSA)	+= gpio-xtensa.o
 obj-$(CONFIG_GPIO_ZEVIO)	+= gpio-zevio.o
 obj-$(CONFIG_GPIO_ZYNQ)		+= gpio-zynq.o
+obj-$(CONFIG_GPIO_ZX)		+= gpio-zx.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 07ba823..903fcf4 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -59,13 +59,13 @@
  * automatically disposed on driver detach. See gpiod_get() for detailed
  * information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
 					      const char *con_id,
 					      enum gpiod_flags flags)
 {
 	return devm_gpiod_get_index(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(__devm_gpiod_get);
+EXPORT_SYMBOL(devm_gpiod_get);
 
 /**
  * devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
@@ -77,13 +77,13 @@
  * are automatically disposed on driver detach. See gpiod_get_optional() for
  * detailed information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
 						       const char *con_id,
 						       enum gpiod_flags flags)
 {
 	return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(__devm_gpiod_get_optional);
+EXPORT_SYMBOL(devm_gpiod_get_optional);
 
 /**
  * devm_gpiod_get_index - Resource-managed gpiod_get_index()
@@ -96,7 +96,7 @@
  * automatically disposed on driver detach. See gpiod_get_index() for detailed
  * information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
 						    const char *con_id,
 						    unsigned int idx,
 						    enum gpiod_flags flags)
@@ -120,7 +120,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL(__devm_gpiod_get_index);
+EXPORT_SYMBOL(devm_gpiod_get_index);
 
 /**
  * devm_get_gpiod_from_child - get a GPIO descriptor from a device's child node
@@ -182,10 +182,10 @@
  * gpiod_get_index_optional() for detailed information about behavior and
  * return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
 							     const char *con_id,
 							     unsigned int index,
-							 enum gpiod_flags flags)
+							     enum gpiod_flags flags)
 {
 	struct gpio_desc *desc;
 
@@ -197,7 +197,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL(__devm_gpiod_get_index_optional);
+EXPORT_SYMBOL(devm_gpiod_get_index_optional);
 
 /**
  * devm_gpiod_get_array - Resource-managed gpiod_get_array()
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 0763655..6ed7c0f 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -129,7 +129,7 @@
 	if (IS_ERR(dat))
 		return PTR_ERR(dat);
 
-	priv->flags = (unsigned)of_id->data;
+	priv->flags = (uintptr_t) of_id->data;
 
 	err = bgpio_init(&priv->bgc, &pdev->dev,
 			 DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index d3fe6a6..984186e 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -305,15 +305,7 @@
 		irq_set_chip_and_handler(irq, &adp5588_irq_chip,
 					 handle_level_irq);
 		irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-		/*
-		 * ARM needs us to explicitly flag the IRQ as VALID,
-		 * once we do so, it will also set the noprobe.
-		 */
-		set_irq_flags(irq, IRQF_VALID);
-#else
-		irq_set_noprobe(irq);
-#endif
+		irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(client->irq,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 0f3d336..9b7e0b3d 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -338,9 +338,9 @@
 {
 	struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
 
-	gpiochip_remove(&altera_gc->mmchip.gc);
+	of_mm_gpiochip_remove(&altera_gc->mmchip);
 
-	return -EIO;
+	return 0;
 }
 
 static const struct of_device_id altera_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
new file mode 100644
index 0000000..03b9953
--- /dev/null
+++ b/drivers/gpio/gpio-ath79.c
@@ -0,0 +1,204 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X GPIO API support
+ *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-ath79.h>
+#include <linux/of_device.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+static void __iomem *ath79_gpio_base;
+static u32 ath79_gpio_count;
+static DEFINE_SPINLOCK(ath79_gpio_lock);
+
+static void __ath79_gpio_set_value(unsigned gpio, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+
+	if (value)
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
+}
+
+static int __ath79_gpio_get_value(unsigned gpio)
+{
+	return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
+}
+
+static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+	return __ath79_gpio_get_value(offset);
+}
+
+static void ath79_gpio_set_value(struct gpio_chip *chip,
+				  unsigned offset, int value)
+{
+	__ath79_gpio_set_value(offset, value);
+}
+
+static int ath79_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ath79_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+					int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static struct gpio_chip ath79_gpio_chip = {
+	.label			= "ath79",
+	.get			= ath79_gpio_get_value,
+	.set			= ath79_gpio_set_value,
+	.direction_input	= ath79_gpio_direction_input,
+	.direction_output	= ath79_gpio_direction_output,
+	.base			= 0,
+};
+
+static const struct of_device_id ath79_gpio_of_match[] = {
+	{ .compatible = "qca,ar7100-gpio" },
+	{ .compatible = "qca,ar9340-gpio" },
+	{},
+};
+
+static int ath79_gpio_probe(struct platform_device *pdev)
+{
+	struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+	bool oe_inverted;
+	int err;
+
+	if (np) {
+		err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
+		if (err) {
+			dev_err(&pdev->dev, "ngpios property is not valid\n");
+			return err;
+		}
+		if (ath79_gpio_count >= 32) {
+			dev_err(&pdev->dev, "ngpios must be less than 32\n");
+			return -EINVAL;
+		}
+		oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
+	} else if (pdata) {
+		ath79_gpio_count = pdata->ngpios;
+		oe_inverted = pdata->oe_inverted;
+	} else {
+		dev_err(&pdev->dev, "No DT node or platform data found\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ath79_gpio_base = devm_ioremap_nocache(
+		&pdev->dev, res->start, resource_size(res));
+	if (!ath79_gpio_base)
+		return -ENOMEM;
+
+	ath79_gpio_chip.dev = &pdev->dev;
+	ath79_gpio_chip.ngpio = ath79_gpio_count;
+	if (oe_inverted) {
+		ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+		ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+	}
+
+	err = gpiochip_add(&ath79_gpio_chip);
+	if (err) {
+		dev_err(&pdev->dev,
+			"cannot add AR71xx GPIO chip, error=%d", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static struct platform_driver ath79_gpio_driver = {
+	.driver = {
+		.name = "ath79-gpio",
+		.of_match_table	= ath79_gpio_of_match,
+	},
+	.probe = ath79_gpio_probe,
+};
+
+module_platform_driver(ath79_gpio_driver);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 40343fa..31b90ac 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -438,7 +438,7 @@
 	void __iomem *reg_base;
 	int bit, bank_id;
 	unsigned long sta;
-	struct bcm_kona_gpio_bank *bank = irq_get_handler_data(irq);
+	struct bcm_kona_gpio_bank *bank = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
@@ -525,11 +525,7 @@
 		return ret;
 	irq_set_lockdep_class(irq, &gpio_lock_class);
 	irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -644,17 +640,6 @@
 		dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret);
 		goto err_irq_domain;
 	}
-	for (i = 0; i < chip->ngpio; i++) {
-		int irq = bcm_kona_gpio_to_irq(chip, i);
-		irq_set_lockdep_class(irq, &gpio_lock_class);
-		irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip,
-					 handle_simple_irq);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID);
-#else
-		irq_set_noprobe(irq);
-#endif
-	}
 	for (i = 0; i < kona_gpio->num_bank; i++) {
 		bank = &kona_gpio->banks[i];
 		irq_set_chained_handler_and_data(bank->irq,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 4630a81..9ea86d2 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -17,6 +17,10 @@
 #include <linux/of_irq.h>
 #include <linux/module.h>
 #include <linux/basic_mmio_gpio.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
 
 #define GIO_BANK_SIZE           0x20
 #define GIO_ODEN(bank)          (((bank) * GIO_BANK_SIZE) + 0x00)
@@ -34,14 +38,18 @@
 	struct bgpio_chip bgc;
 	struct brcmstb_gpio_priv *parent_priv;
 	u32 width;
+	struct irq_chip irq_chip;
 };
 
 struct brcmstb_gpio_priv {
 	struct list_head bank_list;
 	void __iomem *reg_base;
-	int num_banks;
 	struct platform_device *pdev;
+	int parent_irq;
 	int gpio_base;
+	bool can_wake;
+	int parent_wake_irq;
+	struct notifier_block reboot_notifier;
 };
 
 #define MAX_GPIO_PER_BANK           32
@@ -63,6 +71,203 @@
 	return bank->parent_priv;
 }
 
+static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
+		unsigned int offset, bool enable)
+{
+	struct bgpio_chip *bgc = &bank->bgc;
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	u32 mask = bgc->pin2mask(bgc, offset);
+	u32 imask;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bgc->lock, flags);
+	imask = bgc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+	if (enable)
+		imask |= mask;
+	else
+		imask &= ~mask;
+	bgc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
+	spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+/* -------------------- IRQ chip functions -------------------- */
+
+static void brcmstb_gpio_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+	brcmstb_gpio_set_imask(bank, d->hwirq, false);
+}
+
+static void brcmstb_gpio_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+	brcmstb_gpio_set_imask(bank, d->hwirq, true);
+}
+
+static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	u32 mask = BIT(d->hwirq);
+	u32 edge_insensitive, iedge_insensitive;
+	u32 edge_config, iedge_config;
+	u32 level, ilevel;
+	unsigned long flags;
+
+	switch (type) {
+	case IRQ_TYPE_LEVEL_LOW:
+		level = 0;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		level = mask;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		level = 0;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		level = 0;
+		edge_config = mask;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		level = 0;
+		edge_config = 0;  /* don't care, but want known value */
+		edge_insensitive = mask;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&bank->bgc.lock, flags);
+
+	iedge_config = bank->bgc.read_reg(priv->reg_base +
+			GIO_EC(bank->id)) & ~mask;
+	iedge_insensitive = bank->bgc.read_reg(priv->reg_base +
+			GIO_EI(bank->id)) & ~mask;
+	ilevel = bank->bgc.read_reg(priv->reg_base +
+			GIO_LEVEL(bank->id)) & ~mask;
+
+	bank->bgc.write_reg(priv->reg_base + GIO_EC(bank->id),
+			iedge_config | edge_config);
+	bank->bgc.write_reg(priv->reg_base + GIO_EI(bank->id),
+			iedge_insensitive | edge_insensitive);
+	bank->bgc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
+			ilevel | level);
+
+	spin_unlock_irqrestore(&bank->bgc.lock, flags);
+	return 0;
+}
+
+static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
+		unsigned int enable)
+{
+	int ret = 0;
+
+	/*
+	 * Only enable wake IRQ once for however many hwirqs can wake
+	 * since they all use the same wake IRQ.  Mask will be set
+	 * up appropriately thanks to IRQCHIP_MASK_ON_SUSPEND flag.
+	 */
+	if (enable)
+		ret = enable_irq_wake(priv->parent_wake_irq);
+	else
+		ret = disable_irq_wake(priv->parent_wake_irq);
+	if (ret)
+		dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n",
+				enable ? "enable" : "disable");
+	return ret;
+}
+
+static int brcmstb_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+
+	return brcmstb_gpio_priv_set_wake(priv, enable);
+}
+
+static irqreturn_t brcmstb_gpio_wake_irq_handler(int irq, void *data)
+{
+	struct brcmstb_gpio_priv *priv = data;
+
+	if (!priv || irq != priv->parent_wake_irq)
+		return IRQ_NONE;
+	pm_wakeup_event(&priv->pdev->dev, 0);
+	return IRQ_HANDLED;
+}
+
+static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
+{
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	struct irq_domain *irq_domain = bank->bgc.gc.irqdomain;
+	void __iomem *reg_base = priv->reg_base;
+	unsigned long status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bank->bgc.lock, flags);
+	while ((status = bank->bgc.read_reg(reg_base + GIO_STAT(bank->id)) &
+			 bank->bgc.read_reg(reg_base + GIO_MASK(bank->id)))) {
+		int bit;
+
+		for_each_set_bit(bit, &status, 32) {
+			u32 stat = bank->bgc.read_reg(reg_base +
+						      GIO_STAT(bank->id));
+			if (bit >= bank->width)
+				dev_warn(&priv->pdev->dev,
+					 "IRQ for invalid GPIO (bank=%d, offset=%d)\n",
+					 bank->id, bit);
+			bank->bgc.write_reg(reg_base + GIO_STAT(bank->id),
+					    stat | BIT(bit));
+			generic_handle_irq(irq_find_mapping(irq_domain, bit));
+		}
+	}
+	spin_unlock_irqrestore(&bank->bgc.lock, flags);
+}
+
+/* Each UPG GIO block has one IRQ for all banks */
+static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct list_head *pos;
+
+	/* Interrupts weren't properly cleared during probe */
+	BUG_ON(!priv || !chip);
+
+	chained_irq_enter(chip, desc);
+	list_for_each(pos, &priv->bank_list) {
+		struct brcmstb_gpio_bank *bank =
+			list_entry(pos, struct brcmstb_gpio_bank, node);
+		brcmstb_gpio_irq_bank_handler(bank);
+	}
+	chained_irq_exit(chip, desc);
+}
+
+static int brcmstb_gpio_reboot(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	struct brcmstb_gpio_priv *priv =
+		container_of(nb, struct brcmstb_gpio_priv, reboot_notifier);
+
+	/* Enable GPIO for S5 cold boot */
+	if (action == SYS_POWER_OFF)
+		brcmstb_gpio_priv_set_wake(priv, 1);
+
+	return NOTIFY_DONE;
+}
+
 /* Make sure that the number of banks matches up between properties */
 static int brcmstb_gpio_sanity_check_banks(struct device *dev,
 		struct device_node *np, struct resource *res)
@@ -100,7 +305,13 @@
 		bank = list_entry(pos, struct brcmstb_gpio_bank, node);
 		ret = bgpio_remove(&bank->bgc);
 		if (ret)
-			dev_err(&pdev->dev, "gpiochip_remove fail in cleanup");
+			dev_err(&pdev->dev, "gpiochip_remove fail in cleanup\n");
+	}
+	if (priv->reboot_notifier.notifier_call) {
+		ret = unregister_reboot_notifier(&priv->reboot_notifier);
+		if (ret)
+			dev_err(&pdev->dev,
+				"failed to unregister reboot notifier\n");
 	}
 	return ret;
 }
@@ -121,7 +332,7 @@
 		return -EINVAL;
 
 	offset = gpiospec->args[0] - (gc->base - priv->gpio_base);
-	if (offset >= gc->ngpio)
+	if (offset >= gc->ngpio || offset < 0)
 		return -EINVAL;
 
 	if (unlikely(offset >= bank->width)) {
@@ -136,6 +347,65 @@
 	return offset;
 }
 
+/* Before calling, must have bank->parent_irq set and gpiochip registered */
+static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
+		struct brcmstb_gpio_bank *bank)
+{
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+
+	bank->irq_chip.name = dev_name(dev);
+	bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
+	bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
+	bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
+
+	/* Ensures that all non-wakeup IRQs are disabled at suspend */
+	bank->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+	if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake &&
+			of_property_read_bool(np, "wakeup-source")) {
+		priv->parent_wake_irq = platform_get_irq(pdev, 1);
+		if (priv->parent_wake_irq < 0) {
+			dev_warn(dev,
+				"Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
+		} else {
+			int err;
+
+			/*
+			 * Set wakeup capability before requesting wakeup
+			 * interrupt, so we can process boot-time "wakeups"
+			 * (e.g., from S5 cold boot)
+			 */
+			device_set_wakeup_capable(dev, true);
+			device_wakeup_enable(dev);
+			err = devm_request_irq(dev, priv->parent_wake_irq,
+					brcmstb_gpio_wake_irq_handler, 0,
+					"brcmstb-gpio-wake", priv);
+
+			if (err < 0) {
+				dev_err(dev, "Couldn't request wake IRQ");
+				return err;
+			}
+
+			priv->reboot_notifier.notifier_call =
+				brcmstb_gpio_reboot;
+			register_reboot_notifier(&priv->reboot_notifier);
+			priv->can_wake = true;
+		}
+	}
+
+	if (priv->can_wake)
+		bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+
+	gpiochip_irqchip_add(&bank->bgc.gc, &bank->irq_chip, 0,
+			handle_simple_irq, IRQ_TYPE_NONE);
+	gpiochip_set_chained_irqchip(&bank->bgc.gc, &bank->irq_chip,
+			priv->parent_irq, brcmstb_gpio_irq_handler);
+
+	return 0;
+}
+
 static int brcmstb_gpio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -146,6 +416,7 @@
 	struct property *prop;
 	const __be32 *p;
 	u32 bank_width;
+	int num_banks = 0;
 	int err;
 	static int gpio_base;
 
@@ -164,6 +435,16 @@
 	priv->reg_base = reg_base;
 	priv->pdev = pdev;
 
+	if (of_property_read_bool(np, "interrupt-controller")) {
+		priv->parent_irq = platform_get_irq(pdev, 0);
+		if (priv->parent_irq <= 0) {
+			dev_err(dev, "Couldn't get IRQ");
+			return -ENOENT;
+		}
+	} else {
+		priv->parent_irq = -ENOENT;
+	}
+
 	if (brcmstb_gpio_sanity_check_banks(dev, np, res))
 		return -EINVAL;
 
@@ -180,7 +461,7 @@
 		}
 
 		bank->parent_priv = priv;
-		bank->id = priv->num_banks;
+		bank->id = num_banks;
 		if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) {
 			dev_err(dev, "Invalid bank width %d\n", bank_width);
 			goto fail;
@@ -212,6 +493,12 @@
 		/* not all ngpio lines are valid, will use bank width later */
 		gc->ngpio = MAX_GPIO_PER_BANK;
 
+		/*
+		 * Mask all interrupts by default, since wakeup interrupts may
+		 * be retained from S5 cold boot
+		 */
+		bank->bgc.write_reg(reg_base + GIO_MASK(bank->id), 0);
+
 		err = gpiochip_add(gc);
 		if (err) {
 			dev_err(dev, "Could not add gpiochip for bank %d\n",
@@ -219,17 +506,24 @@
 			goto fail;
 		}
 		gpio_base += gc->ngpio;
+
+		if (priv->parent_irq > 0) {
+			err = brcmstb_gpio_irq_setup(pdev, bank);
+			if (err)
+				goto fail;
+		}
+
 		dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
 			gc->base, gc->ngpio, bank->width);
 
 		/* Everything looks good, so add bank to list */
 		list_add(&bank->node, &priv->bank_list);
 
-		priv->num_banks++;
+		num_banks++;
 	}
 
 	dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
-			priv->num_banks, priv->gpio_base, gpio_base - 1);
+			num_banks, priv->gpio_base, gpio_base - 1);
 
 	return 0;
 
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index c246ac3..94b0ab7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -65,11 +65,11 @@
 	return ptr;
 }
 
-static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
+static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
 {
 	struct davinci_gpio_regs __iomem *g;
 
-	g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq);
+	g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
 
 	return g;
 }
@@ -287,7 +287,7 @@
 
 static void gpio_irq_disable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d);
 	u32 mask = (u32) irq_data_get_irq_handler_data(d);
 
 	writel_relaxed(mask, &g->clr_falling);
@@ -296,7 +296,7 @@
 
 static void gpio_irq_enable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d);
 	u32 mask = (u32) irq_data_get_irq_handler_data(d);
 	unsigned status = irqd_get_trigger_type(d);
 
@@ -327,8 +327,9 @@
 };
 
 static void
-gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct davinci_gpio_regs __iomem *g;
 	u32 mask = 0xffff;
 	struct davinci_gpio_controller *d;
@@ -396,7 +397,7 @@
 	struct davinci_gpio_regs __iomem *g;
 	u32 mask;
 
-	d = (struct davinci_gpio_controller *)data->handler_data;
+	d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
 	g = (struct davinci_gpio_regs __iomem *)d->regs;
 	mask = __gpio_mask(data->irq - d->gpio_irq);
 
@@ -422,7 +423,6 @@
 	irq_set_irq_type(irq, IRQ_TYPE_NONE);
 	irq_set_chip_data(irq, (__force void *)g);
 	irq_set_handler_data(irq, (void *)__gpio_mask(hw));
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -545,7 +545,7 @@
 		chips[0].chip.to_irq = gpio_to_irq_unbanked;
 		chips[0].gpio_irq = bank_irq;
 		chips[0].gpio_unbanked = pdata->gpio_unbanked;
-		binten = BIT(0);
+		binten = GENMASK(pdata->gpio_unbanked / 16, 0);
 
 		/* AINTC handles mask/unmask; GPIO handles triggering */
 		irq = bank_irq;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 55fa985..c5be4b9 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -149,7 +149,7 @@
 
 static void dwapb_irq_handler(u32 irq, struct irq_desc *desc)
 {
-	struct dwapb_gpio *gpio = irq_get_handler_data(irq);
+	struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	dwapb_do_irq(gpio);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index fbf2873..6bca1e1 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -31,7 +31,6 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/gpio-em.h>
 
 struct em_gio_priv {
 	void __iomem *base0;
@@ -262,7 +261,6 @@
 
 	irq_set_chip_data(irq, h->host_data);
 	irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID); /* kill me now */
 	return 0;
 }
 
@@ -273,13 +271,12 @@
 
 static int em_gio_probe(struct platform_device *pdev)
 {
-	struct gpio_em_config pdata_dt;
-	struct gpio_em_config *pdata = dev_get_platdata(&pdev->dev);
 	struct em_gio_priv *p;
 	struct resource *io[2], *irq[2];
 	struct gpio_chip *gpio_chip;
 	struct irq_chip *irq_chip;
 	const char *name = dev_name(&pdev->dev);
+	unsigned int ngpios;
 	int ret;
 
 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
@@ -319,18 +316,10 @@
 		goto err0;
 	}
 
-	if (!pdata) {
-		memset(&pdata_dt, 0, sizeof(pdata_dt));
-		pdata = &pdata_dt;
-
-		if (of_property_read_u32(pdev->dev.of_node, "ngpios",
-					 &pdata->number_of_pins)) {
-			dev_err(&pdev->dev, "Missing ngpios OF property\n");
-			ret = -EINVAL;
-			goto err0;
-		}
-
-		pdata->gpio_base = -1;
+	if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
+		dev_err(&pdev->dev, "Missing ngpios OF property\n");
+		ret = -EINVAL;
+		goto err0;
 	}
 
 	gpio_chip = &p->gpio_chip;
@@ -345,8 +334,8 @@
 	gpio_chip->label = name;
 	gpio_chip->dev = &pdev->dev;
 	gpio_chip->owner = THIS_MODULE;
-	gpio_chip->base = pdata->gpio_base;
-	gpio_chip->ngpio = pdata->number_of_pins;
+	gpio_chip->base = -1;
+	gpio_chip->ngpio = ngpios;
 
 	irq_chip = &p->irq_chip;
 	irq_chip->name = name;
@@ -357,9 +346,7 @@
 	irq_chip->irq_release_resources = em_gio_irq_relres;
 	irq_chip->flags	= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
 
-	p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
-					      pdata->number_of_pins,
-					      pdata->irq_base,
+	p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0,
 					      &em_gio_irq_domain_ops, p);
 	if (!p->irq_domain) {
 		ret = -ENXIO;
@@ -387,12 +374,6 @@
 		goto err1;
 	}
 
-	if (pdata->pctl_name) {
-		ret = gpiochip_add_pin_range(gpio_chip, pdata->pctl_name, 0,
-					     gpio_chip->base, gpio_chip->ngpio);
-		if (ret < 0)
-			dev_warn(&pdev->dev, "failed to add pin range\n");
-	}
 	return 0;
 
 err1:
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 45684f3..9d90366 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -100,13 +100,15 @@
 	}
 }
 
-static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ep93xx_gpio_f_irq_handler(unsigned int __irq,
+				      struct irq_desc *desc)
 {
 	/*
 	 * map discontiguous hw irq range to continuous sw irq range:
 	 *
 	 *  IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
 	 */
+	unsigned int irq = irq_desc_get_irq(desc);
 	int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
 	int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
 
@@ -208,7 +210,7 @@
 		return -EINVAL;
 	}
 
-	__irq_set_handler_locked(d->irq, handler);
+	irq_set_handler_locked(d, handler);
 
 	gpio_int_enabled[port] |= port_mask;
 
@@ -234,7 +236,7 @@
 	     gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
 		irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(gpio_irq, IRQF_VALID);
+		irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
 	}
 
 	irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 28071f4..2ffcd9f 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -1,8 +1,10 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/of_gpio.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/basic_mmio_gpio.h>
 
@@ -13,6 +15,7 @@
 #define ETRAX_FS_rw_intr_mask	16
 #define ETRAX_FS_rw_ack_intr	20
 #define ETRAX_FS_r_intr		24
+#define ETRAX_FS_r_masked_intr	28
 #define ETRAX_FS_rw_pb_dout	32
 #define ETRAX_FS_r_pb_din	36
 #define ETRAX_FS_rw_pb_oe	40
@@ -26,6 +29,48 @@
 #define ETRAX_FS_r_pe_din	84
 #define ETRAX_FS_rw_pe_oe	88
 
+#define ARTPEC3_r_pa_din	0
+#define ARTPEC3_rw_pa_dout	4
+#define ARTPEC3_rw_pa_oe	8
+#define ARTPEC3_r_pb_din	44
+#define ARTPEC3_rw_pb_dout	48
+#define ARTPEC3_rw_pb_oe	52
+#define ARTPEC3_r_pc_din	88
+#define ARTPEC3_rw_pc_dout	92
+#define ARTPEC3_rw_pc_oe	96
+#define ARTPEC3_r_pd_din	116
+#define ARTPEC3_rw_intr_cfg	120
+#define ARTPEC3_rw_intr_pins	124
+#define ARTPEC3_rw_intr_mask	128
+#define ARTPEC3_rw_ack_intr	132
+#define ARTPEC3_r_masked_intr	140
+
+#define GIO_CFG_OFF		0
+#define GIO_CFG_HI		1
+#define GIO_CFG_LO		2
+#define GIO_CFG_SET		3
+#define GIO_CFG_POSEDGE		5
+#define GIO_CFG_NEGEDGE		6
+#define GIO_CFG_ANYEDGE		7
+
+struct etraxfs_gpio_info;
+
+struct etraxfs_gpio_block {
+	spinlock_t lock;
+	u32 mask;
+	u32 cfg;
+	u32 pins;
+	unsigned int group[8];
+
+	void __iomem *regs;
+	const struct etraxfs_gpio_info *info;
+};
+
+struct etraxfs_gpio_chip {
+	struct bgpio_chip bgc;
+	struct etraxfs_gpio_block *block;
+};
+
 struct etraxfs_gpio_port {
 	const char *label;
 	unsigned int oe;
@@ -37,6 +82,12 @@
 struct etraxfs_gpio_info {
 	unsigned int num_ports;
 	const struct etraxfs_gpio_port *ports;
+
+	unsigned int rw_ack_intr;
+	unsigned int rw_intr_mask;
+	unsigned int rw_intr_cfg;
+	unsigned int rw_intr_pins;
+	unsigned int r_masked_intr;
 };
 
 static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = {
@@ -80,8 +131,56 @@
 static const struct etraxfs_gpio_info etraxfs_gpio_etraxfs = {
 	.num_ports = ARRAY_SIZE(etraxfs_gpio_etraxfs_ports),
 	.ports = etraxfs_gpio_etraxfs_ports,
+	.rw_ack_intr	= ETRAX_FS_rw_ack_intr,
+	.rw_intr_mask	= ETRAX_FS_rw_intr_mask,
+	.rw_intr_cfg	= ETRAX_FS_rw_intr_cfg,
+	.r_masked_intr	= ETRAX_FS_r_masked_intr,
 };
 
+static const struct etraxfs_gpio_port etraxfs_gpio_artpec3_ports[] = {
+	{
+		.label	= "A",
+		.ngpio	= 32,
+		.oe	= ARTPEC3_rw_pa_oe,
+		.dout	= ARTPEC3_rw_pa_dout,
+		.din	= ARTPEC3_r_pa_din,
+	},
+	{
+		.label	= "B",
+		.ngpio	= 32,
+		.oe	= ARTPEC3_rw_pb_oe,
+		.dout	= ARTPEC3_rw_pb_dout,
+		.din	= ARTPEC3_r_pb_din,
+	},
+	{
+		.label	= "C",
+		.ngpio	= 16,
+		.oe	= ARTPEC3_rw_pc_oe,
+		.dout	= ARTPEC3_rw_pc_dout,
+		.din	= ARTPEC3_r_pc_din,
+	},
+	{
+		.label	= "D",
+		.ngpio	= 32,
+		.din	= ARTPEC3_r_pd_din,
+	},
+};
+
+static const struct etraxfs_gpio_info etraxfs_gpio_artpec3 = {
+	.num_ports = ARRAY_SIZE(etraxfs_gpio_artpec3_ports),
+	.ports = etraxfs_gpio_artpec3_ports,
+	.rw_ack_intr	= ARTPEC3_rw_ack_intr,
+	.rw_intr_mask	= ARTPEC3_rw_intr_mask,
+	.rw_intr_cfg	= ARTPEC3_rw_intr_cfg,
+	.r_masked_intr	= ARTPEC3_r_masked_intr,
+	.rw_intr_pins	= ARTPEC3_rw_intr_pins,
+};
+
+static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc)
+{
+	return gc->label[0] - 'A';
+}
+
 static int etraxfs_gpio_of_xlate(struct gpio_chip *gc,
 			       const struct of_phandle_args *gpiospec,
 			       u32 *flags)
@@ -90,7 +189,7 @@
 	 * Port numbers are A to E, and the properties are integers, so we
 	 * specify them as 0xA - 0xE.
 	 */
-	if (gc->label[0] - 'A' + 0xA != gpiospec->args[2])
+	if (etraxfs_gpio_chip_to_port(gc) + 0xA != gpiospec->args[2])
 		return -EINVAL;
 
 	return of_gpio_simple_xlate(gc, gpiospec, flags);
@@ -101,24 +200,174 @@
 		.compatible = "axis,etraxfs-gio",
 		.data = &etraxfs_gpio_etraxfs,
 	},
+	{
+		.compatible = "axis,artpec3-gio",
+		.data = &etraxfs_gpio_artpec3,
+	},
 	{},
 };
 
+static unsigned int etraxfs_gpio_to_group_irq(unsigned int gpio)
+{
+	return gpio % 8;
+}
+
+static unsigned int etraxfs_gpio_to_group_pin(struct etraxfs_gpio_chip *chip,
+					      unsigned int gpio)
+{
+	return 4 * etraxfs_gpio_chip_to_port(&chip->bgc.gc) + gpio / 8;
+}
+
+static void etraxfs_gpio_irq_ack(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	writel(BIT(grpirq), block->regs + block->info->rw_ack_intr);
+}
+
+static void etraxfs_gpio_irq_mask(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->mask &= ~BIT(grpirq);
+	writel(block->mask, block->regs + block->info->rw_intr_mask);
+	spin_unlock(&block->lock);
+}
+
+static void etraxfs_gpio_irq_unmask(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->mask |= BIT(grpirq);
+	writel(block->mask, block->regs + block->info->rw_intr_mask);
+	spin_unlock(&block->lock);
+}
+
+static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+	u32 cfg;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		cfg = GIO_CFG_POSEDGE;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		cfg = GIO_CFG_NEGEDGE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		cfg = GIO_CFG_ANYEDGE;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		cfg = GIO_CFG_LO;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		cfg = GIO_CFG_HI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock(&block->lock);
+	block->cfg &= ~(0x7 << (grpirq * 3));
+	block->cfg |= (cfg << (grpirq * 3));
+	writel(block->cfg, block->regs + block->info->rw_intr_cfg);
+	spin_unlock(&block->lock);
+
+	return 0;
+}
+
+static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+	int ret = -EBUSY;
+
+	spin_lock(&block->lock);
+	if (block->group[grpirq])
+		goto out;
+
+	ret = gpiochip_lock_as_irq(&chip->bgc.gc, d->hwirq);
+	if (ret)
+		goto out;
+
+	block->group[grpirq] = d->irq;
+	if (block->info->rw_intr_pins) {
+		unsigned int pin = etraxfs_gpio_to_group_pin(chip, d->hwirq);
+
+		block->pins &= ~(0xf << (grpirq * 4));
+		block->pins |= (pin << (grpirq * 4));
+
+		writel(block->pins, block->regs + block->info->rw_intr_pins);
+	}
+
+out:
+	spin_unlock(&block->lock);
+	return ret;
+}
+
+static void etraxfs_gpio_irq_release_resources(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->group[grpirq] = 0;
+	gpiochip_unlock_as_irq(&chip->bgc.gc, d->hwirq);
+	spin_unlock(&block->lock);
+}
+
+static struct irq_chip etraxfs_gpio_irq_chip = {
+	.name		= "gpio-etraxfs",
+	.irq_ack	= etraxfs_gpio_irq_ack,
+	.irq_mask	= etraxfs_gpio_irq_mask,
+	.irq_unmask	= etraxfs_gpio_irq_unmask,
+	.irq_set_type	= etraxfs_gpio_irq_set_type,
+	.irq_request_resources = etraxfs_gpio_irq_request_resources,
+	.irq_release_resources = etraxfs_gpio_irq_release_resources,
+};
+
+static irqreturn_t etraxfs_gpio_interrupt(int irq, void *dev_id)
+{
+	struct etraxfs_gpio_block *block = dev_id;
+	unsigned long intr = readl(block->regs + block->info->r_masked_intr);
+	int bit;
+
+	for_each_set_bit(bit, &intr, 8)
+		generic_handle_irq(block->group[bit]);
+
+	return IRQ_RETVAL(intr & 0xff);
+}
+
 static int etraxfs_gpio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	const struct etraxfs_gpio_info *info;
 	const struct of_device_id *match;
-	struct bgpio_chip *chips;
-	struct resource *res;
+	struct etraxfs_gpio_block *block;
+	struct etraxfs_gpio_chip *chips;
+	struct resource *res, *irq;
+	bool allportsirq = false;
 	void __iomem *regs;
 	int ret;
 	int i;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	regs = devm_ioremap_resource(dev, res);
-	if (!regs)
-		return -ENOMEM;
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	match = of_match_node(etraxfs_gpio_of_table, dev->of_node);
 	if (!match)
@@ -130,19 +379,57 @@
 	if (!chips)
 		return -ENOMEM;
 
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq)
+		return -EINVAL;
+
+	block = devm_kzalloc(dev, sizeof(*block), GFP_KERNEL);
+	if (!block)
+		return -ENOMEM;
+
+	spin_lock_init(&block->lock);
+
+	block->regs = regs;
+	block->info = info;
+
+	writel(0, block->regs + info->rw_intr_mask);
+	writel(0, block->regs + info->rw_intr_cfg);
+	if (info->rw_intr_pins) {
+		allportsirq = true;
+		writel(0, block->regs + info->rw_intr_pins);
+	}
+
+	ret = devm_request_irq(dev, irq->start, etraxfs_gpio_interrupt,
+			       IRQF_SHARED, dev_name(dev), block);
+	if (ret) {
+		dev_err(dev, "Unable to request irq %d\n", ret);
+		return ret;
+	}
+
 	for (i = 0; i < info->num_ports; i++) {
-		struct bgpio_chip *bgc = &chips[i];
+		struct etraxfs_gpio_chip *chip = &chips[i];
+		struct bgpio_chip *bgc = &chip->bgc;
 		const struct etraxfs_gpio_port *port = &info->ports[i];
+		unsigned long flags = BGPIOF_READ_OUTPUT_REG_SET;
+		void __iomem *dat = regs + port->din;
+		void __iomem *set = regs + port->dout;
+		void __iomem *dirout = regs + port->oe;
+
+		chip->block = block;
+
+		if (dirout == set) {
+			dirout = set = NULL;
+			flags = BGPIOF_NO_OUTPUT;
+		}
 
 		ret = bgpio_init(bgc, dev, 4,
-				 regs + port->din,	/* dat */
-				 regs + port->dout,	/* set */
-				 NULL,			/* clr */
-				 regs + port->oe,	/* dirout */
-				 NULL,			/* dirin */
-				 BGPIOF_UNREADABLE_REG_SET);
-		if (ret)
-			return ret;
+				 dat, set, NULL, dirout, NULL,
+				 flags);
+		if (ret) {
+			dev_err(dev, "Unable to init port %s\n",
+				port->label);
+			continue;
+		}
 
 		bgc->gc.ngpio = port->ngpio;
 		bgc->gc.label = port->label;
@@ -152,9 +439,21 @@
 		bgc->gc.of_xlate = etraxfs_gpio_of_xlate;
 
 		ret = gpiochip_add(&bgc->gc);
-		if (ret)
+		if (ret) {
 			dev_err(dev, "Unable to register port %s\n",
 				bgc->gc.label);
+			continue;
+		}
+
+		if (i > 0 && !allportsirq)
+			continue;
+
+		ret = gpiochip_irqchip_add(&bgc->gc, &etraxfs_gpio_irq_chip, 0,
+					   handle_level_irq, IRQ_TYPE_NONE);
+		if (ret) {
+			dev_err(dev, "Unable to add irqchip to port %s\n",
+				bgc->gc.label);
+		}
 	}
 
 	return 0;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 9bda372..a3f0753 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -153,6 +153,10 @@
 	return !!(bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio));
 }
 
+static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+}
+
 static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -279,6 +283,12 @@
 	return 0;
 }
 
+static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
+				int val)
+{
+	return -EINVAL;
+}
+
 static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
 				int val)
 {
@@ -302,6 +312,14 @@
 	return 0;
 }
 
+static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+	return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+	       GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
 static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -351,6 +369,14 @@
 	return 0;
 }
 
+static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+	return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+	       GPIOF_DIR_IN : GPIOF_DIR_OUT;
+}
+
 static int bgpio_setup_accessors(struct device *dev,
 				 struct bgpio_chip *bgc,
 				 bool bit_be,
@@ -444,6 +470,9 @@
 		bgc->reg_set = set;
 		bgc->gc.set = bgpio_set_set;
 		bgc->gc.set_multiple = bgpio_set_multiple_set;
+	} else if (flags & BGPIOF_NO_OUTPUT) {
+		bgc->gc.set = bgpio_set_none;
+		bgc->gc.set_multiple = NULL;
 	} else {
 		bgc->gc.set = bgpio_set;
 		bgc->gc.set_multiple = bgpio_set_multiple;
@@ -460,7 +489,8 @@
 
 static int bgpio_setup_direction(struct bgpio_chip *bgc,
 				 void __iomem *dirout,
-				 void __iomem *dirin)
+				 void __iomem *dirin,
+				 unsigned long flags)
 {
 	if (dirout && dirin) {
 		return -EINVAL;
@@ -468,12 +498,17 @@
 		bgc->reg_dir = dirout;
 		bgc->gc.direction_output = bgpio_dir_out;
 		bgc->gc.direction_input = bgpio_dir_in;
+		bgc->gc.get_direction = bgpio_get_dir;
 	} else if (dirin) {
 		bgc->reg_dir = dirin;
 		bgc->gc.direction_output = bgpio_dir_out_inv;
 		bgc->gc.direction_input = bgpio_dir_in_inv;
+		bgc->gc.get_direction = bgpio_get_dir_inv;
 	} else {
-		bgc->gc.direction_output = bgpio_simple_dir_out;
+		if (flags & BGPIOF_NO_OUTPUT)
+			bgc->gc.direction_output = bgpio_dir_out_err;
+		else
+			bgc->gc.direction_output = bgpio_simple_dir_out;
 		bgc->gc.direction_input = bgpio_simple_dir_in;
 	}
 
@@ -525,7 +560,7 @@
 	if (ret)
 		return ret;
 
-	ret = bgpio_setup_direction(bgc, dirout, dirin);
+	ret = bgpio_setup_direction(bgc, dirout, dirin, flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 0a8f761..801423f 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -104,17 +104,12 @@
 {
 	struct bgpio_chip *bgc = &priv->bgc;
 	unsigned long mask = bgc->pin2mask(bgc, offset);
-	unsigned long flags;
-
-	spin_lock_irqsave(&bgc->lock, flags);
 
 	if (val)
 		priv->imask |= mask;
 	else
 		priv->imask &= ~mask;
 	bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
-
-	spin_unlock_irqrestore(&bgc->lock, flags);
 }
 
 static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -180,16 +175,26 @@
 {
 	struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
 	int offset = d->hwirq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->bgc.lock, flags);
 
 	grgpio_set_imask(priv, offset, 0);
+
+	spin_unlock_irqrestore(&priv->bgc.lock, flags);
 }
 
 static void grgpio_irq_unmask(struct irq_data *d)
 {
 	struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
 	int offset = d->hwirq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->bgc.lock, flags);
 
 	grgpio_set_imask(priv, offset, 1);
+
+	spin_unlock_irqrestore(&priv->bgc.lock, flags);
 }
 
 static struct irq_chip grgpio_irq_chip = {
@@ -281,12 +286,7 @@
 	irq_set_chip_data(irq, priv);
 	irq_set_chip_and_handler(irq, &grgpio_irq_chip,
 				 handle_simple_irq);
-	irq_clear_status_flags(irq, IRQ_NOREQUEST);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return ret;
 }
@@ -301,9 +301,6 @@
 	int ngpio = priv->bgc.gc.ngpio;
 	int i;
 
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, 0);
-#endif
 	irq_set_chip_and_handler(irq, NULL, NULL);
 	irq_set_chip_data(irq, NULL);
 
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 7d3c90e..8c5252c 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -685,9 +685,14 @@
 
 	mutex_init(&chip->lock);
 
-	max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
-	if (nr_port > 8)
-		max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+	ret = max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+	if (ret)
+		goto out_failed;
+	if (nr_port > 8) {
+		ret = max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+		if (ret)
+			goto out_failed;
+	}
 
 	ret = gpiochip_add(&chip->gpio_chip);
 	if (ret)
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 2fc7ff8..73db7ec 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -507,11 +507,7 @@
 		irq_set_chip_data(irq, mcp);
 		irq_set_chip(irq, &mcp23s08_irq_chip);
 		irq_set_nested_thread(irq, true);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID);
-#else
 		irq_set_noprobe(irq);
-#endif
 	}
 	return 0;
 }
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 20aa66f..8ef7a12 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -32,7 +32,7 @@
 
 struct mpc8xxx_gpio_chip {
 	struct of_mm_gpio_chip mm_gc;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	/*
 	 * shadowed data register to be able to clear/set output pins in
@@ -95,7 +95,7 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	if (val)
 		mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio);
@@ -104,7 +104,7 @@
 
 	out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
@@ -115,7 +115,7 @@
 	unsigned long flags;
 	int i;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	for (i = 0; i < gc->ngpio; i++) {
 		if (*mask == 0)
@@ -130,7 +130,7 @@
 
 	out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -139,11 +139,11 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 
 	return 0;
 }
@@ -156,11 +156,11 @@
 
 	mpc8xxx_gpio_set(gc, gpio, val);
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 
 	return 0;
 }
@@ -174,6 +174,15 @@
 	return mpc8xxx_gpio_dir_out(gc, gpio, val);
 }
 
+static int mpc5125_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	/* GPIO 0..3 are input only on MPC5125 */
+	if (gpio <= 3)
+		return -EINVAL;
+
+	return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
 	struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -206,11 +215,11 @@
 	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_irq_mask(struct irq_data *d)
@@ -219,11 +228,11 @@
 	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_irq_ack(struct irq_data *d)
@@ -242,17 +251,17 @@
 
 	switch (flow_type) {
 	case IRQ_TYPE_EDGE_FALLING:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		setbits32(mm->regs + GPIO_ICR,
 			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrbits32(mm->regs + GPIO_ICR,
 			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	default:
@@ -282,22 +291,22 @@
 	switch (flow_type) {
 	case IRQ_TYPE_EDGE_FALLING:
 	case IRQ_TYPE_LEVEL_LOW:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrsetbits_be32(reg, 3 << shift, 2 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_RISING:
 	case IRQ_TYPE_LEVEL_HIGH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrsetbits_be32(reg, 3 << shift, 1 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrbits32(reg, 3 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	default:
@@ -312,17 +321,13 @@
 	.irq_unmask	= mpc8xxx_irq_unmask,
 	.irq_mask	= mpc8xxx_irq_mask,
 	.irq_ack	= mpc8xxx_irq_ack,
+	/* this might get overwritten in mpc8xxx_probe() */
 	.irq_set_type	= mpc8xxx_irq_set_type,
 };
 
 static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
 				irq_hw_number_t hwirq)
 {
-	struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
-
-	if (mpc8xxx_gc->of_dev_id_data)
-		mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
-
 	irq_set_chip_data(irq, h->host_data);
 	irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
 
@@ -334,11 +339,38 @@
 	.xlate	= irq_domain_xlate_twocell,
 };
 
-static struct of_device_id mpc8xxx_gpio_ids[] = {
+struct mpc8xxx_gpio_devtype {
+	int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int);
+	int (*gpio_get)(struct gpio_chip *, unsigned int);
+	int (*irq_set_type)(struct irq_data *, unsigned int);
+};
+
+static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
+	.gpio_dir_out = mpc5121_gpio_dir_out,
+	.irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
+	.gpio_dir_out = mpc5125_gpio_dir_out,
+	.irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8572_gpio_devtype = {
+	.gpio_get = mpc8572_gpio_get,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
+	.gpio_dir_out = mpc8xxx_gpio_dir_out,
+	.gpio_get = mpc8xxx_gpio_get,
+	.irq_set_type = mpc8xxx_irq_set_type,
+};
+
+static const struct of_device_id mpc8xxx_gpio_ids[] = {
 	{ .compatible = "fsl,mpc8349-gpio", },
-	{ .compatible = "fsl,mpc8572-gpio", },
+	{ .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
 	{ .compatible = "fsl,mpc8610-gpio", },
-	{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+	{ .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, },
+	{ .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, },
 	{ .compatible = "fsl,pq3-gpio",     },
 	{ .compatible = "fsl,qoriq-gpio",   },
 	{}
@@ -351,6 +383,8 @@
 	struct of_mm_gpio_chip *mm_gc;
 	struct gpio_chip *gc;
 	const struct of_device_id *id;
+	const struct mpc8xxx_gpio_devtype *devtype =
+		of_device_get_match_data(&pdev->dev);
 	int ret;
 
 	mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -359,7 +393,7 @@
 
 	platform_set_drvdata(pdev, mpc8xxx_gc);
 
-	spin_lock_init(&mpc8xxx_gc->lock);
+	raw_spin_lock_init(&mpc8xxx_gc->lock);
 
 	mm_gc = &mpc8xxx_gc->mm_gc;
 	gc = &mm_gc->gc;
@@ -367,10 +401,18 @@
 	mm_gc->save_regs = mpc8xxx_gpio_save_regs;
 	gc->ngpio = MPC8XXX_GPIO_PINS;
 	gc->direction_input = mpc8xxx_gpio_dir_in;
-	gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
-		mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
-	gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
-		mpc8572_gpio_get : mpc8xxx_gpio_get;
+
+	if (!devtype)
+		devtype = &mpc8xxx_gpio_devtype_default;
+
+	/*
+	 * It's assumed that only a single type of gpio controller is available
+	 * on the current machine, so overwriting global data is fine.
+	 */
+	mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+
+	gc->direction_output = devtype->gpio_dir_out ?: mpc8xxx_gpio_dir_out;
+	gc->get = devtype->gpio_get ?: mpc8xxx_gpio_get;
 	gc->set = mpc8xxx_gpio_set;
 	gc->set_multiple = mpc8xxx_gpio_set_multiple;
 	gc->to_irq = mpc8xxx_gpio_to_irq;
@@ -396,8 +438,8 @@
 	out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
 	out_be32(mm_gc->regs + GPIO_IMR, 0);
 
-	irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc);
-	irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade);
+	irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
+					 mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
 
 	return 0;
 }
@@ -407,8 +449,7 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev);
 
 	if (mpc8xxx_gc->irq) {
-		irq_set_handler_data(mpc8xxx_gc->irqn, NULL);
-		irq_set_chained_handler(mpc8xxx_gc->irqn, NULL);
+		irq_set_chained_handler_and_data(mpc8xxx_gc->irqn, NULL, NULL);
 		irq_domain_remove(mpc8xxx_gc->irq);
 	}
 
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 52ff182..d2012cf 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -187,14 +187,6 @@
 	return irq_create_mapping(domain, offset);
 }
 
-static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
-{
-	struct irq_data *irq_data = irq_get_irq_data(irq);
-
-	return irq_data->hwirq;
-}
-
-
 /* For dual-edge interrupts in software, since the hardware has no
  * such support:
  *
@@ -238,7 +230,7 @@
 
 static void msm_gpio_irq_ack(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+	int gpio = d->hwirq;
 
 	writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
 	if (test_bit(gpio, msm_gpio.dual_edge_irqs))
@@ -247,8 +239,8 @@
 
 static void msm_gpio_irq_mask(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
 	writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
@@ -259,8 +251,8 @@
 
 static void msm_gpio_irq_unmask(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
 	__set_bit(gpio, msm_gpio.enabled_irqs);
@@ -271,8 +263,8 @@
 
 static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 	uint32_t bits;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
@@ -281,14 +273,14 @@
 
 	if (flow_type & IRQ_TYPE_EDGE_BOTH) {
 		bits |= BIT(INTR_DECT_CTL);
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
 			__set_bit(gpio, msm_gpio.dual_edge_irqs);
 		else
 			__clear_bit(gpio, msm_gpio.dual_edge_irqs);
 	} else {
 		bits &= ~BIT(INTR_DECT_CTL);
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		__clear_bit(gpio, msm_gpio.dual_edge_irqs);
 	}
 
@@ -331,7 +323,7 @@
 
 static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+	int gpio = d->hwirq;
 
 	if (on) {
 		if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
@@ -363,7 +355,6 @@
 	irq_set_lockdep_class(irq, &msm_gpio_lock_class);
 	irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
 			handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 1a54205..b396bf3 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -458,9 +458,9 @@
 	return 0;
 }
 
-static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
+	struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	u32 cause, type;
 	int i;
@@ -787,8 +787,8 @@
 
 		if (irq < 0)
 			continue;
-		irq_set_handler_data(irq, mvchip);
-		irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
+		irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+						 mvchip);
 	}
 
 	mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index ec1eb1b..b752b56 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -275,8 +275,8 @@
 static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
 	u32 irq_stat;
-	struct mxc_gpio_port *port = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct mxc_gpio_port *port = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
 
@@ -292,7 +292,7 @@
 {
 	u32 irq_msk, irq_stat;
 	struct mxc_gpio_port *port;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
 
@@ -339,7 +339,7 @@
 	return 0;
 }
 
-static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
+static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
 {
 	struct irq_chip_generic *gc;
 	struct irq_chip_type *ct;
@@ -354,6 +354,7 @@
 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
 	ct->chip.irq_set_type = gpio_set_irq_type;
 	ct->chip.irq_set_wake = gpio_set_wake_irq;
+	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
 	ct->regs.ack = GPIO_ISR;
 	ct->regs.mask = GPIO_IMR;
 
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 551d15d..b7f383e 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -157,7 +157,7 @@
 static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
 	u32 irq_stat;
-	struct mxs_gpio_port *port = irq_get_handler_data(irq);
+	struct mxs_gpio_port *port = irq_desc_get_handler_data(desc);
 
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 61a731f..2ae0d47 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -29,6 +29,7 @@
 #include <linux/platform_data/gpio-omap.h>
 
 #define OFF_MODE	1
+#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
 
 static LIST_HEAD(omap_gpio_list);
 
@@ -57,7 +58,7 @@
 	u32 saved_datain;
 	u32 level_mask;
 	u32 toggle_mask;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	struct gpio_chip chip;
 	struct clk *dbck;
 	u32 mod_usage;
@@ -175,7 +176,7 @@
 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
 {
 	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
-		clk_prepare_enable(bank->dbck);
+		clk_enable(bank->dbck);
 		bank->dbck_enabled = true;
 
 		writel_relaxed(bank->dbck_enable_mask,
@@ -193,7 +194,7 @@
 		 */
 		writel_relaxed(0, bank->base + bank->regs->debounce_en);
 
-		clk_disable_unprepare(bank->dbck);
+		clk_disable(bank->dbck);
 		bank->dbck_enabled = false;
 	}
 }
@@ -204,8 +205,9 @@
  * @offset: the gpio number on this @bank
  * @debounce: debounce time to use
  *
- * OMAP's debounce time is in 31us steps so we need
- * to convert and round up to the closest unit.
+ * OMAP's debounce time is in 31us steps
+ *   <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
+ * so we need to convert and round up to the closest unit.
  */
 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
 				    unsigned debounce)
@@ -213,34 +215,33 @@
 	void __iomem		*reg;
 	u32			val;
 	u32			l;
+	bool			enable = !!debounce;
 
 	if (!bank->dbck_flag)
 		return;
 
-	if (debounce < 32)
-		debounce = 0x01;
-	else if (debounce > 7936)
-		debounce = 0xff;
-	else
-		debounce = (debounce / 0x1f) - 1;
+	if (enable) {
+		debounce = DIV_ROUND_UP(debounce, 31) - 1;
+		debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+	}
 
 	l = BIT(offset);
 
-	clk_prepare_enable(bank->dbck);
+	clk_enable(bank->dbck);
 	reg = bank->base + bank->regs->debounce;
 	writel_relaxed(debounce, reg);
 
 	reg = bank->base + bank->regs->debounce_en;
 	val = readl_relaxed(reg);
 
-	if (debounce)
+	if (enable)
 		val |= l;
 	else
 		val &= ~l;
 	bank->dbck_enable_mask = val;
 
 	writel_relaxed(val, reg);
-	clk_disable_unprepare(bank->dbck);
+	clk_disable(bank->dbck);
 	/*
 	 * Enable debounce clock per module.
 	 * This call is mandatory because in omap_gpio_request() when
@@ -285,7 +286,7 @@
 		bank->context.debounce = 0;
 		writel_relaxed(bank->context.debounce, bank->base +
 			     bank->regs->debounce);
-		clk_disable_unprepare(bank->dbck);
+		clk_disable(bank->dbck);
 		bank->dbck_enabled = false;
 	}
 }
@@ -498,24 +499,24 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	retval = omap_set_gpio_triggering(bank, offset, type);
 	if (retval) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		goto error;
 	}
 	omap_gpio_init_irq(bank, offset);
 	if (!omap_gpio_is_input(bank, offset)) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		retval = -EINVAL;
 		goto error;
 	}
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 
@@ -636,14 +637,14 @@
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (enable)
 		bank->context.wake_en |= gpio_bit;
 	else
 		bank->context.wake_en &= ~gpio_bit;
 
 	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -669,10 +670,10 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_enable_gpio_module(bank, offset);
 	bank->mod_usage |= BIT(offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -682,14 +683,14 @@
 	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->mod_usage &= ~(BIT(offset));
 	if (!LINE_USED(bank->irq_usage, offset)) {
 		omap_set_gpio_direction(bank, offset, 1);
 		omap_clear_gpio_debounce(bank, offset);
 	}
 	omap_disable_gpio_module(bank, offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last gpio to be freed in the bank,
@@ -716,7 +717,8 @@
 	struct gpio_bank *bank;
 	int unmasked = 0;
 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
-	struct gpio_chip *chip = irq_get_handler_data(irq);
+	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+	unsigned long lock_flags;
 
 	chained_irq_enter(irqchip, desc);
 
@@ -731,6 +733,8 @@
 		u32 isr_saved, level_mask = 0;
 		u32 enabled;
 
+		raw_spin_lock_irqsave(&bank->lock, lock_flags);
+
 		enabled = omap_get_gpio_irqbank_mask(bank);
 		isr_saved = isr = readl_relaxed(isr_reg) & enabled;
 
@@ -744,6 +748,8 @@
 		omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
 		omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
 
+		raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
 		/* if there is only edge sensitive GPIO pin interrupts
 		configured, we could unmask GPIO bank interrupt immediately */
 		if (!level_mask && !unmasked) {
@@ -758,6 +764,7 @@
 			bit = __ffs(isr);
 			isr &= ~(BIT(bit));
 
+			raw_spin_lock_irqsave(&bank->lock, lock_flags);
 			/*
 			 * Some chips can't respond to both rising and falling
 			 * at the same time.  If this irq was requested with
@@ -768,6 +775,8 @@
 			if (bank->toggle_mask & (BIT(bit)))
 				omap_toggle_gpio_edge_triggering(bank, bit);
 
+			raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
 			generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
 							    bit));
 		}
@@ -791,7 +800,7 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	if (!LINE_USED(bank->mod_usage, offset))
 		omap_set_gpio_direction(bank, offset, 1);
@@ -800,12 +809,12 @@
 	omap_enable_gpio_module(bank, offset);
 	bank->irq_usage |= BIT(offset);
 
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	omap_gpio_unmask_irq(d);
 
 	return 0;
 err:
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	if (!BANK_USED(bank))
 		pm_runtime_put(bank->dev);
 	return -EINVAL;
@@ -817,7 +826,7 @@
 	unsigned long flags;
 	unsigned offset = d->hwirq;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->irq_usage &= ~(BIT(offset));
 	omap_set_gpio_irqenable(bank, offset, 0);
 	omap_clear_gpio_irqstatus(bank, offset);
@@ -825,7 +834,7 @@
 	if (!LINE_USED(bank->mod_usage, offset))
 		omap_clear_gpio_debounce(bank, offset);
 	omap_disable_gpio_module(bank, offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last IRQ to be freed in the bank,
@@ -849,10 +858,10 @@
 	unsigned offset = d->hwirq;
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_irqenable(bank, offset, 0);
 	omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static void omap_gpio_unmask_irq(struct irq_data *d)
@@ -862,7 +871,7 @@
 	u32 trigger = irqd_get_trigger_type(d);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (trigger)
 		omap_set_gpio_triggering(bank, offset, trigger);
 
@@ -874,7 +883,7 @@
 	}
 
 	omap_set_gpio_irqenable(bank, offset, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@@ -887,9 +896,9 @@
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -902,9 +911,9 @@
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -950,9 +959,9 @@
 
 	bank = container_of(chip, struct gpio_bank, chip);
 	reg = bank->base + bank->regs->direction;
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	dir = !!(readl_relaxed(reg) & BIT(offset));
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return dir;
 }
 
@@ -962,9 +971,9 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_direction(bank, offset, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@@ -986,10 +995,10 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
 	omap_set_gpio_direction(bank, offset, 0);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@@ -1001,9 +1010,9 @@
 
 	bank = container_of(chip, struct gpio_bank, chip);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap2_set_gpio_debounce(bank, offset, debounce);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -1014,9 +1023,9 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@@ -1061,10 +1070,6 @@
 	 /* Initialize interface clk ungated, module enabled */
 	if (bank->regs->ctrl)
 		writel_relaxed(0, base + bank->regs->ctrl);
-
-	bank->dbck = clk_get(bank->dev, "dbclk");
-	if (IS_ERR(bank->dbck))
-		dev_err(bank->dev, "Could not get gpio dbck\n");
 }
 
 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
@@ -1178,13 +1183,16 @@
 	irqc->irq_set_wake = omap_gpio_wake_enable,
 	irqc->name = dev_name(&pdev->dev);
 
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (unlikely(!res)) {
-		dev_err(dev, "Invalid IRQ resource\n");
-		return -ENODEV;
+	bank->irq = platform_get_irq(pdev, 0);
+	if (bank->irq <= 0) {
+		if (!bank->irq)
+			bank->irq = -ENXIO;
+		if (bank->irq != -EPROBE_DEFER)
+			dev_err(dev,
+				"can't get irq resource ret=%d\n", bank->irq);
+		return bank->irq;
 	}
 
-	bank->irq = res->start;
 	bank->dev = dev;
 	bank->chip.dev = dev;
 	bank->chip.owner = THIS_MODULE;
@@ -1213,16 +1221,26 @@
 	else
 		bank->set_dataout = omap_set_gpio_dataout_mask;
 
-	spin_lock_init(&bank->lock);
+	raw_spin_lock_init(&bank->lock);
 
 	/* Static mapping, never released */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	bank->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(bank->base)) {
-		irq_domain_remove(bank->chip.irqdomain);
 		return PTR_ERR(bank->base);
 	}
 
+	if (bank->dbck_flag) {
+		bank->dbck = devm_clk_get(bank->dev, "dbclk");
+		if (IS_ERR(bank->dbck)) {
+			dev_err(bank->dev,
+				"Could not get gpio dbck. Disable debounce\n");
+			bank->dbck_flag = false;
+		} else {
+			clk_prepare(bank->dbck);
+		}
+	}
+
 	platform_set_drvdata(pdev, bank);
 
 	pm_runtime_enable(bank->dev);
@@ -1254,6 +1272,8 @@
 	list_del(&bank->node);
 	gpiochip_remove(&bank->chip);
 	pm_runtime_disable(bank->dev);
+	if (bank->dbck_flag)
+		clk_unprepare(bank->dbck);
 
 	return 0;
 }
@@ -1271,7 +1291,7 @@
 	unsigned long flags;
 	u32 wake_low, wake_hi;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * Only edges can generate a wakeup event to the PRCM.
@@ -1324,7 +1344,7 @@
 				bank->get_context_loss_count(bank->dev);
 
 	omap_gpio_dbck_disable(bank);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -1339,7 +1359,7 @@
 	unsigned long flags;
 	int c;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * On the first resume during the probe, the context has not
@@ -1375,14 +1395,14 @@
 			if (c != bank->context_loss_count) {
 				omap_gpio_restore_context(bank);
 			} else {
-				spin_unlock_irqrestore(&bank->lock, flags);
+				raw_spin_unlock_irqrestore(&bank->lock, flags);
 				return 0;
 			}
 		}
 	}
 
 	if (!bank->workaround_enabled) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		return 0;
 	}
 
@@ -1437,7 +1457,7 @@
 	}
 
 	bank->workaround_enabled = false;
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 404f3c6..1d4d9bc 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -88,7 +88,6 @@
 	struct gpio_chip	chip;
 	struct i2c_client	*client;
 	struct mutex		lock;		/* protect 'out' */
-	spinlock_t		slock;		/* protect irq demux */
 	unsigned		out;		/* software latch */
 	unsigned		status;		/* current status */
 	unsigned int		irq_parent;
@@ -185,23 +184,21 @@
 static irqreturn_t pcf857x_irq(int irq, void *data)
 {
 	struct pcf857x  *gpio = data;
-	unsigned long change, i, status, flags;
+	unsigned long change, i, status;
 
 	status = gpio->read(gpio->client);
 
-	spin_lock_irqsave(&gpio->slock, flags);
-
 	/*
 	 * call the interrupt handler iff gpio is used as
 	 * interrupt source, just to avoid bad irqs
 	 */
-
+	mutex_lock(&gpio->lock);
 	change = (gpio->status ^ status) & gpio->irq_enabled;
+	gpio->status = status;
+	mutex_unlock(&gpio->lock);
+
 	for_each_set_bit(i, &change, gpio->chip.ngpio)
 		handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
-	gpio->status = status;
-
-	spin_unlock_irqrestore(&gpio->slock, flags);
 
 	return IRQ_HANDLED;
 }
@@ -293,7 +290,6 @@
 		return -ENOMEM;
 
 	mutex_init(&gpio->lock);
-	spin_lock_init(&gpio->slock);
 
 	gpio->chip.base			= pdata ? pdata->gpio_base : -1;
 	gpio->chip.can_sleep		= true;
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 2d9a950..34ed176 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -281,9 +281,9 @@
 
 	/* And the handler */
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 unlock:
 	spin_unlock_irqrestore(&chip->spinlock, flags);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index cdbbcf0..55a11de 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -524,7 +524,7 @@
 {
 	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 				 handle_edge_irq);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_noprobe(irq);
 	return 0;
 }
 
@@ -643,20 +643,20 @@
 			irq = gpio_to_irq(0);
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		if (irq1 > 0) {
 			irq = gpio_to_irq(1);
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 
 		for (irq  = gpio_to_irq(gpio_offset);
 			irq <= gpio_to_irq(pxa_last_gpio); irq++) {
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 	}
 
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 1e14a6c..2a81224 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -251,17 +251,32 @@
 
 static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
 {
-	return pinctrl_request_gpio(chip->base + offset);
+	struct gpio_rcar_priv *p = gpio_to_priv(chip);
+	int error;
+
+	error = pm_runtime_get_sync(&p->pdev->dev);
+	if (error < 0)
+		return error;
+
+	error = pinctrl_request_gpio(chip->base + offset);
+	if (error)
+		pm_runtime_put(&p->pdev->dev);
+
+	return error;
 }
 
 static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
+	struct gpio_rcar_priv *p = gpio_to_priv(chip);
+
 	pinctrl_free_gpio(chip->base + offset);
 
 	/* Set the GPIO as an input to ensure that the next GPIO request won't
 	 * drive the GPIO pin as an output.
 	 */
 	gpio_rcar_config_general_input_output_mode(chip, offset, false);
+
+	pm_runtime_put(&p->pdev->dev);
 }
 
 static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -327,6 +342,10 @@
 		.compatible = "renesas,gpio-r8a7794",
 		.data = &gpio_rcar_info_gen2,
 	}, {
+		.compatible = "renesas,gpio-r8a7795",
+		/* Gen3 GPIO is identical to Gen2. */
+		.data = &gpio_rcar_info_gen2,
+	}, {
 		.compatible = "renesas,gpio-rcar",
 		.data = &gpio_rcar_info_gen1,
 	}, {
@@ -405,7 +424,6 @@
 	}
 
 	pm_runtime_enable(dev);
-	pm_runtime_get_sync(dev);
 
 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -487,7 +505,6 @@
 err1:
 	gpiochip_remove(gpio_chip);
 err0:
-	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 	return ret;
 }
@@ -498,7 +515,6 @@
 
 	gpiochip_remove(&p->gpio_chip);
 
-	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 3fa22da..67bd2f5 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -155,7 +155,7 @@
 {
 	irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
 				 handle_edge_irq);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_noprobe(irq);
 
 	return 0;
 }
@@ -173,9 +173,9 @@
  * and call the handler.
  */
 static void
-sa1100_gpio_handler(unsigned int irq, struct irq_desc *desc)
+sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	unsigned int mask;
+	unsigned int irq, mask;
 
 	mask = GEDR;
 	do {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 18579ac..55e47828 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -346,7 +346,7 @@
 			i = chip->irq_base + j;
 			irq_set_chip_and_handler(i, &ct->chip, ct->handler);
 			irq_set_chip_data(i, gc);
-			irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+			irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		gc->irq_cnt = i - gc->irq_base;
 	}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 31b244c..d1d585d 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -102,7 +102,7 @@
 static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
@@ -130,7 +130,7 @@
 static void tc3589x_gpio_irq_lock(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 
 	mutex_lock(&tc3589x_gpio->irq_lock);
 }
@@ -138,7 +138,7 @@
 static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	static const u8 regmap[] = {
 		[REG_IBE]	= TC3589x_GPIOIBE0,
@@ -167,7 +167,7 @@
 static void tc3589x_gpio_irq_mask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
@@ -178,7 +178,7 @@
 static void tc3589x_gpio_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9b25c90..9b14aaf 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -252,9 +252,9 @@
 	tegra_gpio_enable(gpio);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 }
@@ -268,16 +268,14 @@
 
 static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct tegra_gpio_bank *bank;
 	int port;
 	int pin;
 	int unmasked = 0;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
 
 	chained_irq_enter(chip, desc);
 
-	bank = irq_get_handler_data(irq);
-
 	for (port = 0; port < 4; port++) {
 		int gpio = tegra_gpio_compose(bank->bank, port, 0);
 		unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) &
@@ -509,7 +507,6 @@
 		irq_set_chip_data(irq, bank);
 		irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
 					 handle_simple_irq);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 
 	for (i = 0; i < tegra_gpio_bank_count; i++) {
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index e8f97e0..5a49205 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -194,11 +194,12 @@
 
 static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct timbgpio *tgpio = irq_get_handler_data(irq);
+	struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
+	struct irq_data *data = irq_desc_get_irq_data(desc);
 	unsigned long ipr;
 	int offset;
 
-	desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
+	data->chip->irq_ack(data);
 	ipr = ioread32(tgpio->membase + TGPIO_IPR);
 	iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
@@ -294,13 +295,10 @@
 		irq_set_chip_and_handler(tgpio->irq_base + i,
 			&timbgpio_irqchip, handle_simple_irq);
 		irq_set_chip_data(tgpio->irq_base + i, tgpio);
-#ifdef CONFIG_ARM
-		set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
-#endif
+		irq_clear_status_flags(tgpio->irq_base + i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
-	irq_set_handler_data(irq, tgpio);
-	irq_set_chained_handler(irq, timbgpio_irq);
+	irq_set_chained_handler_and_data(irq, timbgpio_irq, tgpio);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 445660a..bbac92a 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -510,8 +510,8 @@
 	gc->chip_types[1].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;
 
 	/* Setup chained handler for this GPIO bank */
-	irq_set_handler_data(bank->irq, bank);
-	irq_set_chained_handler(bank->irq, tz1090_gpio_irq_handler);
+	irq_set_chained_handler_and_data(bank->irq, tz1090_gpio_irq_handler,
+					 bank);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7bd9f20..3d5714d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -60,6 +60,8 @@
 #define PORT_INT_EITHER_EDGE	0xb
 #define PORT_INT_LOGIC_ONE	0xc
 
+static struct irq_chip vf610_gpio_irq_chip;
+
 static const struct of_device_id vf610_gpio_dt_ids[] = {
 	{ .compatible = "fsl,vf610-gpio" },
 	{ /* sentinel */ }
@@ -120,7 +122,7 @@
 
 static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
-	struct vf610_gpio_port *port = irq_get_handler_data(irq);
+	struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int pin;
 	unsigned long irq_isfr;
@@ -173,6 +175,11 @@
 
 	port->irqc[d->hwirq] = irqc;
 
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_handler_locked(d->irq, handle_level_irq);
+	else
+		__irq_set_handler_locked(d->irq, handle_edge_irq);
+
 	return 0;
 }
 
@@ -263,7 +270,7 @@
 	vf610_gpio_writel(~0, port->base + PORT_ISFR);
 
 	ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
-				   handle_simple_irq, IRQ_TYPE_NONE);
+				   handle_edge_irq, IRQ_TYPE_NONE);
 	if (ret) {
 		dev_err(dev, "failed to add irqchip\n");
 		gpiochip_remove(gc);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 9bdab72..e02499a 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -387,7 +387,7 @@
 	irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
 	if (irq_base < 0) {
 		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
-		return err;
+		return -ENODEV;
 	}
 
 	err = gpiochip_add(gc);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
new file mode 100644
index 0000000..12ee196
--- /dev/null
+++ b/drivers/gpio/gpio-zx.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define ZX_GPIO_DIR	0x00
+#define ZX_GPIO_IVE	0x04
+#define ZX_GPIO_IV	0x08
+#define ZX_GPIO_IEP	0x0C
+#define ZX_GPIO_IEN	0x10
+#define ZX_GPIO_DI	0x14
+#define ZX_GPIO_DO1	0x18
+#define ZX_GPIO_DO0	0x1C
+#define ZX_GPIO_DO	0x20
+
+#define ZX_GPIO_IM	0x28
+#define ZX_GPIO_IE	0x2C
+
+#define ZX_GPIO_MIS	0x30
+#define ZX_GPIO_IC	0x34
+
+#define ZX_GPIO_NR	16
+
+struct zx_gpio {
+	spinlock_t		lock;
+
+	void __iomem		*base;
+	struct gpio_chip	gc;
+	bool			uses_pinctrl;
+};
+
+static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
+{
+	return container_of(gc, struct zx_gpio, gc);
+}
+
+static int zx_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	int gpio = gc->base + offset;
+
+	if (chip->uses_pinctrl)
+		return pinctrl_request_gpio(gpio);
+	return 0;
+}
+
+static void zx_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	int gpio = gc->base + offset;
+
+	if (chip->uses_pinctrl)
+		pinctrl_free_gpio(gpio);
+}
+
+static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	unsigned long flags;
+	u16 gpiodir;
+
+	if (offset >= gc->ngpio)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+	gpiodir &= ~BIT(offset);
+	writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
+		int value)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	unsigned long flags;
+	u16 gpiodir;
+
+	if (offset >= gc->ngpio)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+	gpiodir |= BIT(offset);
+	writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+
+	if (value)
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+	else
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static int zx_get_value(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+
+	return !!(readw_relaxed(chip->base + ZX_GPIO_DI) & BIT(offset));
+}
+
+static void zx_set_value(struct gpio_chip *gc, unsigned offset, int value)
+{
+	struct zx_gpio *chip = to_zx(gc);
+
+	if (value)
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+	else
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+}
+
+static int zx_irq_type(struct irq_data *d, unsigned trigger)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	int offset = irqd_to_hwirq(d);
+	unsigned long flags;
+	u16 gpiois, gpioi_epos, gpioi_eneg, gpioiev;
+	u16 bit = BIT(offset);
+
+	if (offset < 0 || offset >= ZX_GPIO_NR)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+
+	gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV);
+	gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE);
+	gpioi_epos = readw_relaxed(chip->base + ZX_GPIO_IEP);
+	gpioi_eneg = readw_relaxed(chip->base + ZX_GPIO_IEN);
+
+	if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+		gpiois |= bit;
+		if (trigger & IRQ_TYPE_LEVEL_HIGH)
+			gpioiev |= bit;
+		else
+			gpioiev &= ~bit;
+	} else
+		gpiois &= ~bit;
+
+	if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+		gpioi_epos |= bit;
+		gpioi_eneg |= bit;
+	} else {
+		if (trigger & IRQ_TYPE_EDGE_RISING) {
+			gpioi_epos |= bit;
+			gpioi_eneg &= ~bit;
+		} else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+			gpioi_eneg |= bit;
+			gpioi_epos &= ~bit;
+		}
+	}
+
+	writew_relaxed(gpiois, chip->base + ZX_GPIO_IVE);
+	writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP);
+	writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN);
+	writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static void zx_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+	unsigned long pending;
+	int offset;
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct zx_gpio *chip = to_zx(gc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(irqchip, desc);
+
+	pending = readw_relaxed(chip->base + ZX_GPIO_MIS);
+	writew_relaxed(pending, chip->base + ZX_GPIO_IC);
+	if (pending) {
+		for_each_set_bit(offset, &pending, ZX_GPIO_NR)
+			generic_handle_irq(irq_find_mapping(gc->irqdomain,
+							    offset));
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static void zx_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+	u16 gpioie;
+
+	spin_lock(&chip->lock);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+	spin_unlock(&chip->lock);
+}
+
+static void zx_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+	u16 gpioie;
+
+	spin_lock(&chip->lock);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+	spin_unlock(&chip->lock);
+}
+
+static struct irq_chip zx_irqchip = {
+	.name		= "zx-gpio",
+	.irq_mask	= zx_irq_mask,
+	.irq_unmask	= zx_irq_unmask,
+	.irq_set_type	= zx_irq_type,
+};
+
+static int zx_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct zx_gpio *chip;
+	struct resource *res;
+	int irq, id, ret;
+
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	chip->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(chip->base))
+		return PTR_ERR(chip->base);
+
+	spin_lock_init(&chip->lock);
+	if (of_property_read_bool(dev->of_node, "gpio-ranges"))
+		chip->uses_pinctrl = true;
+
+	id = of_alias_get_id(dev->of_node, "gpio");
+	chip->gc.request = zx_gpio_request;
+	chip->gc.free = zx_gpio_free;
+	chip->gc.direction_input = zx_direction_input;
+	chip->gc.direction_output = zx_direction_output;
+	chip->gc.get = zx_get_value;
+	chip->gc.set = zx_set_value;
+	chip->gc.base = ZX_GPIO_NR * id;
+	chip->gc.ngpio = ZX_GPIO_NR;
+	chip->gc.label = dev_name(dev);
+	chip->gc.dev = dev;
+	chip->gc.owner = THIS_MODULE;
+
+	ret = gpiochip_add(&chip->gc);
+	if (ret)
+		return ret;
+
+	/*
+	 * irq_chip support
+	 */
+	writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
+	writew_relaxed(0, chip->base + ZX_GPIO_IE);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "invalid IRQ\n");
+		gpiochip_remove(&chip->gc);
+		return -ENODEV;
+	}
+
+	ret = gpiochip_irqchip_add(&chip->gc, &zx_irqchip,
+				   0, handle_simple_irq,
+				   IRQ_TYPE_NONE);
+	if (ret) {
+		dev_err(dev, "could not add irqchip\n");
+		gpiochip_remove(&chip->gc);
+		return ret;
+	}
+	gpiochip_set_chained_irqchip(&chip->gc, &zx_irqchip,
+				     irq, zx_irq_handler);
+
+	platform_set_drvdata(pdev, chip);
+	dev_info(dev, "ZX GPIO chip registered\n");
+
+	return 0;
+}
+
+static const struct of_device_id zx_gpio_match[] = {
+	{
+		.compatible = "zte,zx296702-gpio",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, zx_gpio_match);
+
+static struct platform_driver zx_gpio_driver = {
+	.probe		= zx_gpio_probe,
+	.driver = {
+		.name	= "zx_gpio",
+		.of_match_table = of_match_ptr(zx_gpio_match),
+	},
+};
+
+module_platform_driver(zx_gpio_driver)
+
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX296702 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index a788823..27348e7 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -441,10 +441,10 @@
 		       gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
 
 	if (type & IRQ_TYPE_LEVEL_MASK) {
-		__irq_set_chip_handler_name_locked(irq_data->irq,
+		irq_set_chip_handler_name_locked(irq_data,
 			&zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
 	} else {
-		__irq_set_chip_handler_name_locked(irq_data->irq,
+		irq_set_chip_handler_name_locked(irq_data,
 			&zynq_gpio_edge_irqchip, handle_level_irq, NULL);
 	}
 
@@ -518,7 +518,7 @@
 {
 	u32 int_sts, int_enb;
 	unsigned int bank_num;
-	struct zynq_gpio *gpio = irq_get_handler_data(irq);
+	struct zynq_gpio *gpio = irq_desc_get_handler_data(desc);
 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(irqchip, desc);
@@ -782,6 +782,12 @@
 }
 postcore_initcall(zynq_gpio_init);
 
+static void __exit zynq_gpio_exit(void)
+{
+	platform_driver_unregister(&zynq_gpio_driver);
+}
+module_exit(zynq_gpio_exit);
+
 MODULE_AUTHOR("Xilinx Inc.");
 MODULE_DESCRIPTION("Zynq GPIO driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 9a0ec48..fa6e3c8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -136,7 +136,6 @@
 {
 	struct device_node *chip_np;
 	enum of_gpio_flags xlate_flags;
-	struct gpio_desc *desc;
 	struct gg_data gg_data = {
 		.flags = &xlate_flags,
 	};
@@ -193,9 +192,7 @@
 	if (name && of_property_read_string(np, "line-name", name))
 		*name = np->name;
 
-	desc = gg_data.out_gpio;
-
-	return desc;
+	return gg_data.out_gpio;
 }
 
 /**
@@ -338,7 +335,7 @@
 EXPORT_SYMBOL(of_mm_gpiochip_remove);
 
 #ifdef CONFIG_PINCTRL
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
 {
 	struct device_node *np = chip->of_node;
 	struct of_phandle_args pinspec;
@@ -349,7 +346,7 @@
 	struct property *group_names;
 
 	if (!np)
-		return;
+		return 0;
 
 	group_names = of_find_property(np, group_names_propname, NULL);
 
@@ -361,11 +358,11 @@
 
 		pctldev = of_pinctrl_get(pinspec.np);
 		if (!pctldev)
-			break;
+			return -EPROBE_DEFER;
 
 		if (pinspec.args[2]) {
 			if (group_names) {
-				ret = of_property_read_string_index(np,
+				of_property_read_string_index(np,
 						group_names_propname,
 						index, &name);
 				if (strlen(name)) {
@@ -381,7 +378,7 @@
 					pinspec.args[1],
 					pinspec.args[2]);
 			if (ret)
-				break;
+				return ret;
 		} else {
 			/* npins == 0: special range */
 			if (pinspec.args[1]) {
@@ -411,32 +408,41 @@
 			ret = gpiochip_add_pingroup_range(chip, pctldev,
 						pinspec.args[0], name);
 			if (ret)
-				break;
+				return ret;
 		}
 	}
+
+	return 0;
 }
 
 #else
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip) {}
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; }
 #endif
 
-void of_gpiochip_add(struct gpio_chip *chip)
+int of_gpiochip_add(struct gpio_chip *chip)
 {
+	int status;
+
 	if ((!chip->of_node) && (chip->dev))
 		chip->of_node = chip->dev->of_node;
 
 	if (!chip->of_node)
-		return;
+		return 0;
 
 	if (!chip->of_xlate) {
 		chip->of_gpio_n_cells = 2;
 		chip->of_xlate = of_gpio_simple_xlate;
 	}
 
-	of_gpiochip_add_pin_range(chip);
+	status = of_gpiochip_add_pin_range(chip);
+	if (status)
+		return status;
+
 	of_node_get(chip->of_node);
 
 	of_gpiochip_scan_hogs(chip);
+
+	return 0;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bf4bd1d..980c1f8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -190,7 +190,7 @@
  */
 static int gpiochip_add_to_list(struct gpio_chip *chip)
 {
-	struct list_head *pos = &gpio_chips;
+	struct list_head *pos;
 	struct gpio_chip *_chip;
 	int err = 0;
 
@@ -287,7 +287,13 @@
 	INIT_LIST_HEAD(&chip->pin_ranges);
 #endif
 
-	of_gpiochip_add(chip);
+	if (!chip->owner && chip->dev && chip->dev->driver)
+		chip->owner = chip->dev->driver->owner;
+
+	status = of_gpiochip_add(chip);
+	if (status)
+		goto err_remove_chip;
+
 	acpi_gpiochip_add(chip);
 
 	status = gpiochip_sysfs_register(chip);
@@ -443,8 +449,8 @@
 		 * The parent irqchip is already using the chip_data for this
 		 * irqchip, so our callbacks simply use the handler_data.
 		 */
-		irq_set_handler_data(parent_irq, gpiochip);
-		irq_set_chained_handler(parent_irq, parent_handler);
+		irq_set_chained_handler_and_data(parent_irq, parent_handler,
+						 gpiochip);
 
 		gpiochip->irq_parent = parent_irq;
 	}
@@ -456,12 +462,6 @@
 }
 EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
 
-/*
- * This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpiochip_irq_lock_class;
-
 /**
  * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
  * @d: the irqdomain used by this irqchip
@@ -478,16 +478,17 @@
 	struct gpio_chip *chip = d->host_data;
 
 	irq_set_chip_data(irq, chip);
-	irq_set_lockdep_class(irq, &gpiochip_irq_lock_class);
+	/*
+	 * This lock class tells lockdep that GPIO irqs are in a different
+	 * category than their parents, so it won't report false recursion.
+	 */
+	irq_set_lockdep_class(irq, chip->lock_key);
 	irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
 	/* Chips that can sleep need nested thread handlers */
 	if (chip->can_sleep && !chip->irq_not_threaded)
 		irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	/*
 	 * No set-up of the hardware will happen if IRQ_TYPE_NONE
 	 * is passed as default type.
@@ -502,9 +503,6 @@
 {
 	struct gpio_chip *chip = d->host_data;
 
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, 0);
-#endif
 	if (chip->can_sleep)
 		irq_set_nested_thread(irq, 0);
 	irq_set_chip_and_handler(irq, NULL, NULL);
@@ -522,10 +520,14 @@
 {
 	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
+	if (!try_module_get(chip->owner))
+		return -ENODEV;
+
 	if (gpiochip_lock_as_irq(chip, d->hwirq)) {
 		chip_err(chip,
 			"unable to lock HW IRQ %lu for IRQ\n",
 			d->hwirq);
+		module_put(chip->owner);
 		return -EINVAL;
 	}
 	return 0;
@@ -536,6 +538,7 @@
 	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
 	gpiochip_unlock_as_irq(chip, d->hwirq);
+	module_put(chip->owner);
 }
 
 static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -584,6 +587,7 @@
  * @handler: the irq handler to use (often a predefined irq core function)
  * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
  * to have the core avoid setting up any default type in the hardware.
+ * @lock_key: lockdep class
  *
  * This function closely associates a certain irqchip with a certain
  * gpiochip, providing an irq domain to translate the local IRQs to
@@ -599,11 +603,12 @@
  * the pins on the gpiochip can generate a unique IRQ. Everything else
  * need to be open coded.
  */
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-			 struct irq_chip *irqchip,
-			 unsigned int first_irq,
-			 irq_flow_handler_t handler,
-			 unsigned int type)
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+			  struct irq_chip *irqchip,
+			  unsigned int first_irq,
+			  irq_flow_handler_t handler,
+			  unsigned int type,
+			  struct lock_class_key *lock_key)
 {
 	struct device_node *of_node;
 	unsigned int offset;
@@ -629,6 +634,7 @@
 	gpiochip->irq_handler = handler;
 	gpiochip->irq_default_type = type;
 	gpiochip->to_irq = gpiochip_to_irq;
+	gpiochip->lock_key = lock_key;
 	gpiochip->irqdomain = irq_domain_add_simple(of_node,
 					gpiochip->ngpio, first_irq,
 					&gpiochip_domain_ops, gpiochip);
@@ -636,8 +642,16 @@
 		gpiochip->irqchip = NULL;
 		return -EINVAL;
 	}
-	irqchip->irq_request_resources = gpiochip_irq_reqres;
-	irqchip->irq_release_resources = gpiochip_irq_relres;
+
+	/*
+	 * It is possible for a driver to override this, but only if the
+	 * alternative functions are both implemented.
+	 */
+	if (!irqchip->irq_request_resources &&
+	    !irqchip->irq_release_resources) {
+		irqchip->irq_request_resources = gpiochip_irq_reqres;
+		irqchip->irq_release_resources = gpiochip_irq_relres;
+	}
 
 	/*
 	 * Prepare the mapping since the irqchip shall be orthogonal to
@@ -658,7 +672,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
 
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
@@ -671,7 +685,7 @@
 /**
  * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping
  * @chip: the gpiochip to add the range for
- * @pinctrl: the dev_name() of the pin controller to map to
+ * @pctldev: the pin controller to map to
  * @gpio_offset: the start offset in the current gpio_chip number space
  * @pin_group: name of the pin group inside the pin controller
  */
@@ -1672,6 +1686,19 @@
 	mutex_unlock(&gpio_lookup_lock);
 }
 
+/**
+ * gpiod_remove_lookup_table() - unregister GPIO device consumers
+ * @table: table of consumers to unregister
+ */
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
+{
+	mutex_lock(&gpio_lookup_lock);
+
+	list_del(&table->list);
+
+	mutex_unlock(&gpio_lookup_lock);
+}
+
 static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
 				      unsigned int idx,
 				      enum gpio_lookup_flags *flags)
@@ -1894,12 +1921,12 @@
  * dev, -ENOENT if no GPIO has been assigned to the requested function, or
  * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
  */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id,
+struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id,
 					 enum gpiod_flags flags)
 {
 	return gpiod_get_index(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL_GPL(__gpiod_get);
+EXPORT_SYMBOL_GPL(gpiod_get);
 
 /**
  * gpiod_get_optional - obtain an optional GPIO for a given GPIO function
@@ -1911,13 +1938,13 @@
  * the requested function it will return NULL. This is convenient for drivers
  * that need to handle optional GPIOs.
  */
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 						  const char *con_id,
 						  enum gpiod_flags flags)
 {
 	return gpiod_get_index_optional(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_optional);
 
 
 /**
@@ -1974,7 +2001,7 @@
  * requested function and/or index, or another IS_ERR() code if an error
  * occurred while trying to acquire the GPIO.
  */
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
 					       const char *con_id,
 					       unsigned int idx,
 					       enum gpiod_flags flags)
@@ -2023,7 +2050,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_index);
+EXPORT_SYMBOL_GPL(gpiod_get_index);
 
 /**
  * fwnode_get_named_gpiod - obtain a GPIO from firmware node
@@ -2092,7 +2119,7 @@
  * specified index was assigned to the requested function it will return NULL.
  * This is convenient for drivers that need to handle optional GPIOs.
  */
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
 							const char *con_id,
 							unsigned int index,
 							enum gpiod_flags flags)
@@ -2107,7 +2134,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_index_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
 
 /**
  * gpiod_hog - Hog the specified GPIO desc given the provided flags
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c46ca31..1a0a8df 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -37,9 +37,29 @@
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+	select FB_SYS_FOPS
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
 	help
 	  FBDEV helpers for KMS drivers.
 
+config DRM_FBDEV_EMULATION
+	bool "Enable legacy fbdev support for your modesetting driver"
+	depends on DRM
+	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev
+	  support. Note that this support also provides the linux console
+	  support on top of your modesetting driver.
+
+	  If in doubt, say "Y".
+
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
 	depends on DRM_KMS_HELPER
@@ -79,8 +99,6 @@
 
 source "drivers/gpu/drm/i2c/Kconfig"
 
-source "drivers/gpu/drm/bridge/Kconfig"
-
 config DRM_TDFX
 	tristate "3dfx Banshee/Voodoo3+"
 	depends on DRM && PCI
@@ -110,6 +128,7 @@
 	select POWER_SUPPLY
 	select HWMON
 	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
 	select INTERVAL_TREE
 	help
 	  Choose this option if you have an ATI Radeon graphics card.  There
@@ -133,6 +152,7 @@
 	select POWER_SUPPLY
 	select HWMON
 	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
 	select INTERVAL_TREE
 	help
 	  Choose this option if you have a recent AMD Radeon graphics card.
@@ -231,10 +251,14 @@
 
 source "drivers/gpu/drm/msm/Kconfig"
 
+source "drivers/gpu/drm/fsl-dcu/Kconfig"
+
 source "drivers/gpu/drm/tegra/Kconfig"
 
 source "drivers/gpu/drm/panel/Kconfig"
 
+source "drivers/gpu/drm/bridge/Kconfig"
+
 source "drivers/gpu/drm/sti/Kconfig"
 
 source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5713d053..45e7719 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
+drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -70,3 +70,4 @@
 obj-y			+= i2c/
 obj-y			+= panel/
 obj-y			+= bridge/
+obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 616dfd4..04c2707 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -3,7 +3,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
-	-Idrivers/gpu/drm/amd/include
+	-Idrivers/gpu/drm/amd/include \
+	-Idrivers/gpu/drm/amd/amdgpu \
+	-Idrivers/gpu/drm/amd/scheduler
 
 amdgpu-y := amdgpu_drv.o
 
@@ -21,7 +23,8 @@
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
-	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
+	amdgpu_amdkfd_gfx_v7.o
 
 amdgpu-y += \
 	vi.o
@@ -43,6 +46,7 @@
 	amdgpu_dpm.o \
 	cz_smc.o cz_dpm.o \
 	tonga_smc.o tonga_dpm.o \
+	fiji_smc.o fiji_dpm.o \
 	iceland_smc.o iceland_dpm.o
 
 # add DCE block
@@ -71,6 +75,20 @@
 	amdgpu_vce.o \
 	vce_v3_0.o
 
+# add amdkfd interfaces
+amdgpu-y += \
+	 amdgpu_amdkfd.o \
+	 amdgpu_amdkfd_gfx_v8.o
+
+# add cgs
+amdgpu-y += amdgpu_cgs.o
+
+# GPU scheduler
+amdgpu-y += \
+	../scheduler/gpu_scheduler.o \
+	../scheduler/sched_fence.o \
+	amdgpu_sched.o
+
 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
 amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f7b49d5c..668939a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -42,17 +42,19 @@
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_execbuf_util.h>
 
+#include <drm/drmP.h>
 #include <drm/drm_gem.h>
 #include <drm/amdgpu_drm.h>
 
 #include "amd_shared.h"
-#include "amdgpu_family.h"
 #include "amdgpu_mode.h"
 #include "amdgpu_ih.h"
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_gds.h"
 
+#include "gpu_scheduler.h"
+
 /*
  * Modules parameters.
  */
@@ -77,7 +79,11 @@
 extern int amdgpu_deep_color;
 extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
+extern int amdgpu_enable_scheduler;
+extern int amdgpu_sched_jobs;
+extern int amdgpu_sched_hw_submission;
 
+#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@@ -92,6 +98,9 @@
 #define AMDGPU_MAX_COMPUTE_RINGS		8
 #define AMDGPU_MAX_VCE_RINGS			2
 
+/* max number of IP instances */
+#define AMDGPU_MAX_SDMA_INSTANCES		2
+
 /* number of hw syncs before falling back on blocking */
 #define AMDGPU_NUM_SYNCS			4
 
@@ -177,7 +186,9 @@
 struct amdgpu_ring;
 struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
+struct amdgpu_job;
 struct amdgpu_irq_src;
+struct amdgpu_fpriv;
 
 enum amdgpu_cp_irq {
 	AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -239,7 +250,7 @@
 	unsigned	copy_num_dw;
 
 	/* used for buffer migration */
-	void (*emit_copy_buffer)(struct amdgpu_ring *ring,
+	void (*emit_copy_buffer)(struct amdgpu_ib *ib,
 				 /* src addr in bytes */
 				 uint64_t src_offset,
 				 /* dst addr in bytes */
@@ -254,7 +265,7 @@
 	unsigned	fill_num_dw;
 
 	/* used for buffer clearing */
-	void (*emit_fill_buffer)(struct amdgpu_ring *ring,
+	void (*emit_fill_buffer)(struct amdgpu_ib *ib,
 				 /* value to write to memory */
 				 uint32_t src_data,
 				 /* dst addr in bytes */
@@ -332,6 +343,8 @@
 	int (*test_ring)(struct amdgpu_ring *ring);
 	int (*test_ib)(struct amdgpu_ring *ring);
 	bool (*is_lockup)(struct amdgpu_ring *ring);
+	/* insert NOP packets */
+	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
 };
 
 /*
@@ -381,10 +394,10 @@
 	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
 	atomic64_t			last_seq;
 	bool				initialized;
-	bool				delayed_irq;
 	struct amdgpu_irq_src		*irq_src;
 	unsigned			irq_type;
 	struct delayed_work             lockup_work;
+	wait_queue_head_t		fence_queue;
 };
 
 /* some special values for the owner field */
@@ -423,20 +436,20 @@
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 				   struct amdgpu_irq_src *irq_src,
 				   unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
 		      struct amdgpu_fence **fence);
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
-			  uint64_t seq, struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
-			  struct amdgpu_fence **fences,
-			  bool intr);
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+				  struct fence **array,
+				  uint32_t count,
+				  bool intr,
+				  signed long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -481,7 +494,7 @@
 	return a->seq < b->seq;
 }
 
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
 			   void *owner, struct amdgpu_fence **fence);
 
 /*
@@ -509,7 +522,7 @@
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct amdgpu_fence **fence);
+		       struct fence **fence);
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 
 struct amdgpu_bo_list_entry {
@@ -532,14 +545,16 @@
 struct amdgpu_bo_va {
 	/* protected by bo being reserved */
 	struct list_head		bo_list;
-	uint64_t			addr;
-	struct amdgpu_fence		*last_pt_update;
+	struct fence		        *last_pt_update;
 	unsigned			ref_count;
 
-	/* protected by vm mutex */
-	struct list_head		mappings;
+	/* protected by vm mutex and spinlock */
 	struct list_head		vm_status;
 
+	/* mappings for this bo_va */
+	struct list_head		invalids;
+	struct list_head		valids;
+
 	/* constant after initialization */
 	struct amdgpu_vm		*vm;
 	struct amdgpu_bo		*bo;
@@ -643,7 +658,7 @@
 	struct amdgpu_sa_manager	*manager;
 	unsigned			soffset;
 	unsigned			eoffset;
-	struct amdgpu_fence		*fence;
+	struct fence		        *fence;
 };
 
 /*
@@ -685,7 +700,7 @@
 				struct amdgpu_semaphore *semaphore);
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
 			   struct amdgpu_semaphore **semaphore,
-			   struct amdgpu_fence *fence);
+			   struct fence *fence);
 
 /*
  * Synchronization
@@ -693,20 +708,23 @@
 struct amdgpu_sync {
 	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
 	struct amdgpu_fence	*sync_to[AMDGPU_MAX_RINGS];
-	struct amdgpu_fence	*last_vm_update;
+	DECLARE_HASHTABLE(fences, 4);
+	struct fence	        *last_vm_update;
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-		       struct amdgpu_fence *fence);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct fence *f);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
 		     struct reservation_object *resv,
 		     void *owner);
 int amdgpu_sync_rings(struct amdgpu_sync *sync,
 		      struct amdgpu_ring *ring);
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct amdgpu_fence *fence);
+		      struct fence *fence);
 
 /*
  * GART structures, functions & helpers
@@ -821,7 +839,9 @@
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct amdgpu_bo		*old_rbo;
-	struct fence			*fence;
+	struct fence			*excl;
+	unsigned			shared_count;
+	struct fence			**shared;
 };
 
 
@@ -844,6 +864,8 @@
 	uint32_t			gws_base, gws_size;
 	uint32_t			oa_base, oa_size;
 	uint32_t			flags;
+	/* resulting sequence number */
+	uint64_t			sequence;
 };
 
 enum amdgpu_ring_type {
@@ -854,11 +876,23 @@
 	AMDGPU_RING_TYPE_VCE
 };
 
+extern struct amd_sched_backend_ops amdgpu_sched_ops;
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+					 struct amdgpu_ring *ring,
+					 struct amdgpu_ib *ibs,
+					 unsigned num_ibs,
+					 int (*free_job)(struct amdgpu_job *),
+					 void *owner,
+					 struct fence **fence);
+
 struct amdgpu_ring {
 	struct amdgpu_device		*adev;
 	const struct amdgpu_ring_funcs	*funcs;
 	struct amdgpu_fence_driver	fence_drv;
+	struct amd_gpu_scheduler 	*scheduler;
 
+	spinlock_t              fence_lock;
 	struct mutex		*ring_lock;
 	struct amdgpu_bo	*ring_obj;
 	volatile uint32_t	*ring;
@@ -892,6 +926,7 @@
 	struct amdgpu_ctx	*current_ctx;
 	enum amdgpu_ring_type	type;
 	char			name[16];
+	bool                    is_pte_ring;
 };
 
 /*
@@ -933,7 +968,7 @@
 	unsigned		id;
 	uint64_t		pd_gpu_addr;
 	/* last flushed PD/PT update */
-	struct amdgpu_fence	*flushed_updates;
+	struct fence	        *flushed_updates;
 	/* last use of vmid */
 	struct amdgpu_fence	*last_id_use;
 };
@@ -943,18 +978,22 @@
 
 	struct rb_root		va;
 
-	/* protecting invalidated and freed */
+	/* protecting invalidated */
 	spinlock_t		status_lock;
 
 	/* BOs moved, but not yet updated in the PT */
 	struct list_head	invalidated;
 
-	/* BOs freed, but not yet updated in the PT */
+	/* BOs cleared in the PT because of a move */
+	struct list_head	cleared;
+
+	/* BO mappings freed, but not yet updated in the PT */
 	struct list_head	freed;
 
 	/* contains the page directory */
 	struct amdgpu_bo	*page_directory;
 	unsigned		max_pde_used;
+	struct fence		*page_directory_fence;
 
 	/* array of page tables, one for each page directory entry */
 	struct amdgpu_vm_pt	*page_tables;
@@ -983,27 +1022,47 @@
  * context related structures
  */
 
-struct amdgpu_ctx_state {
-	uint64_t flags;
-	uint32_t hangs;
+#define AMDGPU_CTX_MAX_CS_PENDING	16
+
+struct amdgpu_ctx_ring {
+	uint64_t		sequence;
+	struct fence		*fences[AMDGPU_CTX_MAX_CS_PENDING];
+	struct amd_sched_entity	entity;
 };
 
 struct amdgpu_ctx {
-	/* call kref_get()before CS start and kref_put() after CS fence signaled */
-	struct kref refcount;
-	struct amdgpu_fpriv *fpriv;
-	struct amdgpu_ctx_state state;
-	uint32_t id;
-	unsigned reset_counter;
+	struct kref		refcount;
+	struct amdgpu_device    *adev;
+	unsigned		reset_counter;
+	spinlock_t		ring_lock;
+	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
 };
 
 struct amdgpu_ctx_mgr {
-	struct amdgpu_device *adev;
-	struct idr ctx_handles;
-	/* lock for IDR system */
-	struct mutex lock;
+	struct amdgpu_device	*adev;
+	struct mutex		lock;
+	/* protected by lock */
+	struct idr		ctx_handles;
 };
 
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+		    struct amdgpu_ctx *ctx);
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+			      struct fence *fence);
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				   struct amdgpu_ring *ring, uint64_t seq);
+
+int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *filp);
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
 /*
  * file private structure
  */
@@ -1012,7 +1071,7 @@
 	struct amdgpu_vm	vm;
 	struct mutex		bo_list_lock;
 	struct idr		bo_list_handles;
-	struct amdgpu_ctx_mgr ctx_mgr;
+	struct amdgpu_ctx_mgr	ctx_mgr;
 };
 
 /*
@@ -1160,6 +1219,7 @@
 void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
 int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
@@ -1207,6 +1267,16 @@
 	struct amdgpu_user_fence uf;
 };
 
+struct amdgpu_job {
+	struct amd_sched_job    base;
+	struct amdgpu_device	*adev;
+	struct amdgpu_ib	*ibs;
+	uint32_t		num_ibs;
+	struct mutex            job_lock;
+	struct amdgpu_user_fence uf;
+	int (*free_job)(struct amdgpu_job *sched_job);
+};
+
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
 {
 	return p->ibs[ib_idx].ptr[idx];
@@ -1601,7 +1671,6 @@
 	struct amdgpu_bo	*vcpu_bo;
 	void			*cpu_addr;
 	uint64_t		gpu_addr;
-	void			*saved_bo;
 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
 	struct delayed_work	idle_work;
@@ -1645,6 +1714,7 @@
 	uint32_t		feature_version;
 
 	struct amdgpu_ring	ring;
+	bool			burst_nop;
 };
 
 /*
@@ -1849,17 +1919,12 @@
 	struct amdgpu_atcs_functions functions;
 };
 
-int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
-							uint32_t *id,uint32_t flags);
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
-						  uint32_t id);
+/*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
 
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
-struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
-int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
-
-extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
-						 struct drm_file *filp);
 
 /*
  * Core structure, functions and helpers.
@@ -1883,7 +1948,7 @@
 	struct rw_semaphore		exclusive_lock;
 
 	/* ASIC */
-	enum amdgpu_asic_type           asic_type;
+	enum amd_asic_type		asic_type;
 	uint32_t			family;
 	uint32_t			rev_id;
 	uint32_t			external_rev_id;
@@ -1976,7 +2041,6 @@
 	struct amdgpu_irq_src		hpd_irq;
 
 	/* rings */
-	wait_queue_head_t		fence_queue;
 	unsigned			fence_context;
 	struct mutex			ring_lock;
 	unsigned			num_rings;
@@ -1999,7 +2063,7 @@
 	struct amdgpu_gfx		gfx;
 
 	/* sdma */
-	struct amdgpu_sdma		sdma[2];
+	struct amdgpu_sdma		sdma[AMDGPU_MAX_SDMA_INSTANCES];
 	struct amdgpu_irq_src		sdma_trap_irq;
 	struct amdgpu_irq_src		sdma_illegal_inst_irq;
 
@@ -2025,6 +2089,12 @@
 	/* tracking pinned memory */
 	u64 vram_pin_size;
 	u64 gart_pin_size;
+
+	/* amdkfd interface */
+	struct kfd_dev          *kfd;
+
+	/* kernel conext for IB submission */
+	struct amdgpu_ctx	kernel_ctx;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2132,6 +2202,21 @@
 	ring->ring_free_dw--;
 }
 
+static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
+		if (&adev->sdma[i].ring == ring)
+			break;
+
+	if (i < AMDGPU_MAX_SDMA_INSTANCES)
+		return &adev->sdma[i];
+	else
+		return NULL;
+}
+
 /*
  * ASICs macro.
  */
@@ -2183,8 +2268,8 @@
 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
-#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
-#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
+#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
@@ -2212,6 +2297,12 @@
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
 bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+						 struct drm_file *filp,
+						 struct amdgpu_ctx *ctx,
+						 struct amdgpu_ib *ibs,
+						 uint32_t num_ibs);
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		       u32 ip_instance, u32 ring,
@@ -2275,11 +2366,11 @@
 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
 					  struct amdgpu_vm *vm,
 					  struct list_head *head);
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
-				       struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
 		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *updates);
+		     struct fence *updates);
 void amdgpu_vm_fence(struct amdgpu_device *adev,
 		     struct amdgpu_vm *vm,
 		     struct amdgpu_fence *fence);
@@ -2309,7 +2400,7 @@
 		       uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		      struct amdgpu_bo_va *bo_va);
-
+int amdgpu_vm_free_job(struct amdgpu_job *job);
 /*
  * functions used by amdgpu_encoder.c
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
new file mode 100644
index 0000000..496ed21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_amdkfd.h"
+#include "amd_shared.h"
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include <linux/module.h>
+
+const struct kfd2kgd_calls *kfd2kgd;
+const struct kgd2kfd_calls *kgd2kfd;
+bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+bool amdgpu_amdkfd_init(void)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+	if (kgd2kfd_init_p == NULL)
+		return false;
+#endif
+	return true;
+}
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+#endif
+
+	switch (rdev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_KAVERI:
+		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
+		break;
+#endif
+	case CHIP_CARRIZO:
+		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
+		break;
+	default:
+		return false;
+	}
+
+#if defined(CONFIG_HSA_AMD_MODULE)
+	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+	if (kgd2kfd_init_p == NULL) {
+		kfd2kgd = NULL;
+		return false;
+	}
+
+	if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+		symbol_put(kgd2kfd_init);
+		kfd2kgd = NULL;
+		kgd2kfd = NULL;
+
+		return false;
+	}
+
+	return true;
+#elif defined(CONFIG_HSA_AMD)
+	if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+		kfd2kgd = NULL;
+		kgd2kfd = NULL;
+		return false;
+	}
+
+	return true;
+#else
+	kfd2kgd = NULL;
+	return false;
+#endif
+}
+
+void amdgpu_amdkfd_fini(void)
+{
+	if (kgd2kfd) {
+		kgd2kfd->exit();
+		symbol_put(kgd2kfd_init);
+	}
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
+{
+	if (kgd2kfd)
+		rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+					rdev->pdev, kfd2kgd);
+}
+
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd) {
+		struct kgd2kfd_shared_resources gpu_resources = {
+			.compute_vmid_bitmap = 0xFF00,
+
+			.first_compute_pipe = 1,
+			.compute_pipe_count = 4 - 1,
+		};
+
+		amdgpu_doorbell_get_kfd_info(rdev,
+				&gpu_resources.doorbell_physical_address,
+				&gpu_resources.doorbell_aperture_size,
+				&gpu_resources.doorbell_start_offset);
+
+		kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+	}
+}
+
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd) {
+		kgd2kfd->device_exit(rdev->kfd);
+		rdev->kfd = NULL;
+	}
+}
+
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+		const void *ih_ring_entry)
+{
+	if (rdev->kfd)
+		kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd)
+		kgd2kfd->suspend(rdev->kfd);
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
+{
+	int r = 0;
+
+	if (rdev->kfd)
+		r = kgd2kfd->resume(rdev->kfd);
+
+	return r;
+}
+
+u32 pool_to_domain(enum kgd_memory_pool p)
+{
+	switch (p) {
+	case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
+	default: return AMDGPU_GEM_DOMAIN_GTT;
+	}
+}
+
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+			void **mem_obj, uint64_t *gpu_addr,
+			void **cpu_ptr)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+	int r;
+
+	BUG_ON(kgd == NULL);
+	BUG_ON(gpu_addr == NULL);
+	BUG_ON(cpu_ptr == NULL);
+
+	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+	if ((*mem) == NULL)
+		return -ENOMEM;
+
+	r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
+			AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+	if (r) {
+		dev_err(rdev->dev,
+			"failed to allocate BO for amdkfd (%d)\n", r);
+		return r;
+	}
+
+	/* map the buffer */
+	r = amdgpu_bo_reserve((*mem)->bo, true);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+		goto allocate_mem_reserve_bo_failed;
+	}
+
+	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
+				&(*mem)->gpu_addr);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+		goto allocate_mem_pin_bo_failed;
+	}
+	*gpu_addr = (*mem)->gpu_addr;
+
+	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+	if (r) {
+		dev_err(rdev->dev,
+			"(%d) failed to map bo to kernel for amdkfd\n", r);
+		goto allocate_mem_kmap_bo_failed;
+	}
+	*cpu_ptr = (*mem)->cpu_ptr;
+
+	amdgpu_bo_unreserve((*mem)->bo);
+
+	return 0;
+
+allocate_mem_kmap_bo_failed:
+	amdgpu_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+	amdgpu_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+	amdgpu_bo_unref(&(*mem)->bo);
+
+	return r;
+}
+
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+{
+	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
+
+	BUG_ON(mem == NULL);
+
+	amdgpu_bo_reserve(mem->bo, true);
+	amdgpu_bo_kunmap(mem->bo);
+	amdgpu_bo_unpin(mem->bo);
+	amdgpu_bo_unreserve(mem->bo);
+	amdgpu_bo_unref(&(mem->bo));
+	kfree(mem);
+}
+
+uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev =
+		(struct amdgpu_device *)kgd;
+
+	BUG_ON(kgd == NULL);
+
+	return rdev->mc.real_vram_size;
+}
+
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+	if (rdev->asic_funcs->get_gpu_clock_counter)
+		return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+	return 0;
+}
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+	/* The sclk is in quantas of 10kHz */
+	return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
new file mode 100644
index 0000000..a8be765
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
+
+#ifndef AMDGPU_AMDKFD_H_INCLUDED
+#define AMDGPU_AMDKFD_H_INCLUDED
+
+#include <linux/types.h>
+#include <kgd_kfd_interface.h>
+
+struct amdgpu_device;
+
+struct kgd_mem {
+	struct amdgpu_bo *bo;
+	uint64_t gpu_addr;
+	void *cpu_ptr;
+};
+
+bool amdgpu_amdkfd_init(void);
+void amdgpu_amdkfd_fini(void);
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+			const void *ih_ring_entry);
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+
+/* Shared API */
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+			void **mem_obj, uint64_t *gpu_addr,
+			void **cpu_ptr);
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint64_t get_vmem_size(struct kgd_dev *kgd);
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
new file mode 100644
index 0000000..dd2037b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "cikd.h"
+#include "cik_sdma.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "cik_structs.h"
+
+#define CIK_PIPE_PER_MEC	(4)
+
+enum {
+	MAX_TRAPID = 8,		/* 3 bits in the bitfield. */
+	MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+	ADDRESS_WATCH_REG_ADDR_HI = 0,
+	ADDRESS_WATCH_REG_ADDR_LO,
+	ADDRESS_WATCH_REG_CNTL,
+	ADDRESS_WATCH_REG_MAX
+};
+
+/*  not defined in the CI/KV reg file  */
+enum {
+	ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+	ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+	ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+	/* extend the mask to 26 bits to match the low address field */
+	ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+	ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
+	mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
+	mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
+	mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
+	mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
+};
+
+union TCP_WATCH_CNTL_BITS {
+	struct {
+		uint32_t mask:24;
+		uint32_t vmid:4;
+		uint32_t atc:1;
+		uint32_t mode:2;
+		uint32_t valid:1;
+	} bitfields, bits;
+	uint32_t u32All;
+	signed int i32All;
+	float f32All;
+};
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
+		uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+							uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+	.init_gtt_mem_allocation = alloc_gtt_mem,
+	.free_gtt_mem = free_gtt_mem,
+	.get_vmem_size = get_vmem_size,
+	.get_gpu_clock_counter = get_gpu_clock_counter,
+	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+	.program_sh_mem_settings = kgd_program_sh_mem_settings,
+	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+	.init_pipeline = kgd_init_pipeline,
+	.init_interrupts = kgd_init_interrupts,
+	.hqd_load = kgd_hqd_load,
+	.hqd_sdma_load = kgd_hqd_sdma_load,
+	.hqd_is_occupied = kgd_hqd_is_occupied,
+	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+	.hqd_destroy = kgd_hqd_destroy,
+	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+	.address_watch_disable = kgd_address_watch_disable,
+	.address_watch_execute = kgd_address_watch_execute,
+	.wave_control_execute = kgd_wave_control_execute,
+	.address_watch_get_offset = kgd_address_watch_get_offset,
+	.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
+	.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
+	.write_vmid_invalidate_request = write_vmid_invalidate_request,
+	.get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+{
+	return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+	return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+			uint32_t queue, uint32_t vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+	mutex_lock(&adev->srbm_mutex);
+	WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	WREG32(mmSRBM_GFX_CNTL, 0);
+	mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+	unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+					uint32_t sh_mem_config,
+					uint32_t sh_mem_ape1_base,
+					uint32_t sh_mem_ape1_limit,
+					uint32_t sh_mem_bases)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	lock_srbm(kgd, 0, 0, 0, vmid);
+
+	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+	WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+	unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	/*
+	 * We have to assume that there is no outstanding mapping.
+	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+	 * a mapping is in progress or because a mapping finished and the
+	 * SW cleared it. So the protocol is to always wait & clear.
+	 */
+	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+			ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+		cpu_relax();
+	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+	return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+	WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
+	WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
+	WREG32(mmCP_HPD_EOP_VMID, 0);
+	WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t mec;
+	uint32_t pipe;
+
+	mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
+	pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+
+	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+			CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+	uint32_t retval;
+
+	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+			m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+	pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+	return retval;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+	return (struct cik_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+	return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t wptr_shadow, is_wptr_shadow_valid;
+	struct cik_mqd *m;
+
+	m = get_mqd(mqd);
+
+	is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+	WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+	WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+
+	WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+	WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+	WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+	WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+	WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+	WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+	WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+	WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+	WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+	WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+	WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
+	WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
+	WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
+	WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
+
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+			m->cp_hqd_pq_rptr_report_addr_hi);
+
+	WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
+
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+	WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+
+	WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+	WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+	WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+	WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+	if (is_wptr_shadow_valid)
+		WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
+
+	WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+	release_queue(kgd);
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+			m->sdma_rlc_virtual_addr);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+			m->sdma_rlc_rb_base);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+			m->sdma_rlc_rb_base_hi);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+			m->sdma_rlc_rb_rptr_addr_lo);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+			m->sdma_rlc_rb_rptr_addr_hi);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+			m->sdma_rlc_doorbell);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+			m->sdma_rlc_rb_cntl);
+
+	return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t act;
+	bool retval = false;
+	uint32_t low, high;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	act = RREG32(mmCP_HQD_ACTIVE);
+	if (act) {
+		low = lower_32_bits(queue_address >> 8);
+		high = upper_32_bits(queue_address >> 8);
+
+		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+				high == RREG32(mmCP_HQD_PQ_BASE_HI))
+			retval = true;
+	}
+	release_queue(kgd);
+	return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t sdma_rlc_rb_cntl;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+		return true;
+
+	return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t temp;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+	while (true) {
+		temp = RREG32(mmCP_HQD_ACTIVE);
+		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+			break;
+		if (timeout == 0) {
+			pr_err("kfd: cp queue preemption time out (%dms)\n",
+				temp);
+			release_queue(kgd);
+			return -ETIME;
+		}
+		msleep(20);
+		timeout -= 20;
+	}
+
+	release_queue(kgd);
+	return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t temp;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+	while (true) {
+		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+			break;
+		if (timeout == 0)
+			return -ETIME;
+		msleep(20);
+		timeout -= 20;
+	}
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+	return 0;
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	union TCP_WATCH_CNTL_BITS cntl;
+	unsigned int i;
+
+	cntl.u32All = 0;
+
+	cntl.bitfields.valid = 0;
+	cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
+	cntl.bitfields.atc = 1;
+
+	/* Turning off this address until we set all the registers */
+	for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
+		WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
+			ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	union TCP_WATCH_CNTL_BITS cntl;
+
+	cntl.u32All = cntl_val;
+
+	/* Turning off this watch point until we set all the registers */
+	cntl.bitfields.valid = 0;
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
+
+	/* Enable the watch point */
+	cntl.bitfields.valid = 1;
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t data;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+	WREG32(mmSQ_CMD, sq_cmd);
+
+	/*  Restore the GRBM_GFX_INDEX register  */
+
+	data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
+		GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+		GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
+
+	WREG32(mmGRBM_GFX_INDEX, data);
+
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset)
+{
+	return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+							uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+								uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	const union amdgpu_firmware_header *hdr;
+
+	BUG_ON(kgd == NULL);
+
+	switch (type) {
+	case KGD_ENGINE_PFP:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.pfp_fw->data;
+		break;
+
+	case KGD_ENGINE_ME:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.me_fw->data;
+		break;
+
+	case KGD_ENGINE_CE:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.ce_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec2_fw->data;
+		break;
+
+	case KGD_ENGINE_RLC:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.rlc_fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[0].fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[1].fw->data;
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (hdr == NULL)
+		return 0;
+
+	/* Only 12 bit in use*/
+	return hdr->common.ucode_version;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
new file mode 100644
index 0000000..dfd1d50
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "vi_structs.h"
+#include "vid.h"
+
+#define VI_PIPE_PER_MEC	(4)
+
+struct cik_sdma_rlc_registers;
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t sh_mem_config,
+		uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+		uint32_t sh_mem_bases);
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+		unsigned int vmid);
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+		uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+		uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+		uint32_t pipe_id, uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+	.init_gtt_mem_allocation = alloc_gtt_mem,
+	.free_gtt_mem = free_gtt_mem,
+	.get_vmem_size = get_vmem_size,
+	.get_gpu_clock_counter = get_gpu_clock_counter,
+	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+	.program_sh_mem_settings = kgd_program_sh_mem_settings,
+	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+	.init_pipeline = kgd_init_pipeline,
+	.init_interrupts = kgd_init_interrupts,
+	.hqd_load = kgd_hqd_load,
+	.hqd_sdma_load = kgd_hqd_sdma_load,
+	.hqd_is_occupied = kgd_hqd_is_occupied,
+	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+	.hqd_destroy = kgd_hqd_destroy,
+	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+	.address_watch_disable = kgd_address_watch_disable,
+	.address_watch_execute = kgd_address_watch_execute,
+	.wave_control_execute = kgd_wave_control_execute,
+	.address_watch_get_offset = kgd_address_watch_get_offset,
+	.get_atc_vmid_pasid_mapping_pasid =
+			get_atc_vmid_pasid_mapping_pasid,
+	.get_atc_vmid_pasid_mapping_valid =
+			get_atc_vmid_pasid_mapping_valid,
+	.write_vmid_invalidate_request = write_vmid_invalidate_request,
+	.get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+{
+	return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+	return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+			uint32_t queue, uint32_t vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+	mutex_lock(&adev->srbm_mutex);
+	WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	WREG32(mmSRBM_GFX_CNTL, 0);
+	mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+	unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+					uint32_t sh_mem_config,
+					uint32_t sh_mem_ape1_base,
+					uint32_t sh_mem_ape1_limit,
+					uint32_t sh_mem_bases)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	lock_srbm(kgd, 0, 0, 0, vmid);
+
+	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+	WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+	unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	/*
+	 * We have to assume that there is no outstanding mapping.
+	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+	 * a mapping is in progress or because a mapping finished
+	 * and the SW cleared it.
+	 * So the protocol is to always wait & clear.
+	 */
+	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+			ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+		cpu_relax();
+	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+	return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+	return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t mec;
+	uint32_t pipe;
+
+	mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+	pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+
+	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+	return 0;
+}
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+	return (struct vi_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+	return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr)
+{
+	struct vi_mqd *m;
+	uint32_t shadow_wptr, valid_wptr;
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	m = get_mqd(mqd);
+
+	valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+	WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+	WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+
+	WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+	WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+	WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+	WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+	WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+	WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+	WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+			m->cp_hqd_pq_rptr_report_addr_hi);
+
+	if (valid_wptr > 0)
+		WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
+
+	WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+	WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
+	WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
+	WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
+	WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+	WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+	WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
+
+	WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
+	WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
+	WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
+	WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
+	WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
+	WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
+	WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
+
+	WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
+	WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
+	WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+	WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
+
+	WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+
+	release_queue(kgd);
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+	return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t act;
+	bool retval = false;
+	uint32_t low, high;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	act = RREG32(mmCP_HQD_ACTIVE);
+	if (act) {
+		low = lower_32_bits(queue_address >> 8);
+		high = upper_32_bits(queue_address >> 8);
+
+		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+				high == RREG32(mmCP_HQD_PQ_BASE_HI))
+			retval = true;
+	}
+	release_queue(kgd);
+	return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t sdma_rlc_rb_cntl;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+		return true;
+
+	return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t temp;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+	while (true) {
+		temp = RREG32(mmCP_HQD_ACTIVE);
+		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+			break;
+		if (timeout == 0) {
+			pr_err("kfd: cp queue preemption time out (%dms)\n",
+				temp);
+			release_queue(kgd);
+			return -ETIME;
+		}
+		msleep(20);
+		timeout -= 20;
+	}
+
+	release_queue(kgd);
+	return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t temp;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+	while (true) {
+		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+			break;
+		if (timeout == 0)
+			return -ETIME;
+		msleep(20);
+		timeout -= 20;
+	}
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+	return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+							uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+								uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+	return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo)
+{
+	return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t data = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+	WREG32(mmSQ_CMD, sq_cmd);
+
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		INSTANCE_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SH_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SE_BROADCAST_WRITES, 1);
+
+	WREG32(mmGRBM_GFX_INDEX, data);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset)
+{
+	return 0;
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	const union amdgpu_firmware_header *hdr;
+
+	BUG_ON(kgd == NULL);
+
+	switch (type) {
+	case KGD_ENGINE_PFP:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.pfp_fw->data;
+		break;
+
+	case KGD_ENGINE_ME:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.me_fw->data;
+		break;
+
+	case KGD_ENGINE_CE:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.ce_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec2_fw->data;
+		break;
+
+	case KGD_ENGINE_RLC:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.rlc_fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[0].fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[1].fw->data;
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (hdr == NULL)
+		return 0;
+
+	/* Only 12 bit in use*/
+	return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 6a58837..77f1d7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -897,7 +897,7 @@
 					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					    (id == ASIC_INTERNAL_MEMORY_SS))
 						ss->rate /= 100;
-					if (adev->flags & AMDGPU_IS_APU)
+					if (adev->flags & AMD_IS_APU)
 						amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
 					return true;
 				}
@@ -1058,7 +1058,7 @@
 	SET_MEMORY_CLOCK_PS_ALLOCATION args;
 	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2742b9a..98d59ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@
 {
 	unsigned long start_jiffies;
 	unsigned long end_jiffies;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int i, r;
 
 	start_jiffies = jiffies;
@@ -42,17 +42,17 @@
 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
 		if (r)
 			goto exit_do_move;
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r)
 			goto exit_do_move;
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 	}
 	end_jiffies = jiffies;
 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
 exit_do_move:
 	if (fence)
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index ceb444f..02add0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -48,7 +48,7 @@
 	resource_size_t vram_base;
 	resource_size_t size = 256 * 1024; /* ??? */
 
-	if (!(adev->flags & AMDGPU_IS_APU))
+	if (!(adev->flags & AMD_IS_APU))
 		if (!amdgpu_card_posted(adev))
 			return false;
 
@@ -184,7 +184,7 @@
 	bool found = false;
 
 	/* ATRM is for the discrete card only */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return false;
 
 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
@@ -246,7 +246,7 @@
 
 static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
 {
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return igp_read_bios_from_vram(adev);
 	else
 		return amdgpu_asic_read_disabled_bios(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
new file mode 100644
index 0000000..6b1243f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <drm/drmP.h>
+#include <linux/firmware.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "cgs_linux.h"
+#include "atom.h"
+#include "amdgpu_ucode.h"
+
+
+struct amdgpu_cgs_device {
+	struct cgs_device base;
+	struct amdgpu_device *adev;
+};
+
+#define CGS_FUNC_ADEV							\
+	struct amdgpu_device *adev =					\
+		((struct amdgpu_cgs_device *)cgs_device)->adev
+
+static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
+				   uint64_t *mc_start, uint64_t *mc_size,
+				   uint64_t *mem_size)
+{
+	CGS_FUNC_ADEV;
+	switch(type) {
+	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+		*mc_start = 0;
+		*mc_size = adev->mc.visible_vram_size;
+		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
+		break;
+	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+		*mc_start = adev->mc.visible_vram_size;
+		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
+		*mem_size = *mc_size;
+		break;
+	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+		*mc_start = adev->mc.gtt_start;
+		*mc_size = adev->mc.gtt_size;
+		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
+				uint64_t size,
+				uint64_t min_offset, uint64_t max_offset,
+				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
+{
+	CGS_FUNC_ADEV;
+	int ret;
+	struct amdgpu_bo *bo;
+	struct page *kmem_page = vmalloc_to_page(kmem);
+	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
+	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
+			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+	if (ret)
+		return ret;
+	ret = amdgpu_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* pin buffer into GTT */
+	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
+				       min_offset, max_offset, mcaddr);
+	amdgpu_bo_unreserve(bo);
+
+	*kmem_handle = (cgs_handle_t)bo;
+	return ret;
+}
+
+static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
+{
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
+
+	if (obj) {
+		int r = amdgpu_bo_reserve(obj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_unpin(obj);
+			amdgpu_bo_unreserve(obj);
+		}
+		amdgpu_bo_unref(&obj);
+
+	}
+	return 0;
+}
+
+static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
+				    enum cgs_gpu_mem_type type,
+				    uint64_t size, uint64_t align,
+				    uint64_t min_offset, uint64_t max_offset,
+				    cgs_handle_t *handle)
+{
+	CGS_FUNC_ADEV;
+	uint16_t flags = 0;
+	int ret = 0;
+	uint32_t domain = 0;
+	struct amdgpu_bo *obj;
+	struct ttm_placement placement;
+	struct ttm_place place;
+
+	if (min_offset > max_offset) {
+		BUG_ON(1);
+		return -EINVAL;
+	}
+
+	/* fail if the alignment is not a power of 2 */
+	if (((align != 1) && (align & (align - 1)))
+	    || size == 0 || align == 0)
+		return -EINVAL;
+
+
+	switch(type) {
+	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+		domain = AMDGPU_GEM_DOMAIN_VRAM;
+		if (max_offset > adev->mc.real_vram_size)
+			return -EINVAL;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_VRAM;
+		break;
+	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+		domain = AMDGPU_GEM_DOMAIN_VRAM;
+		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+			place.fpfn =
+				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
+			place.lpfn =
+				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
+			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_VRAM;
+		}
+
+		break;
+	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+		break;
+	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+			TTM_PL_FLAG_UNCACHED;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+
+	*handle = 0;
+
+	placement.placement = &place;
+	placement.num_placement = 1;
+	placement.busy_placement = &place;
+	placement.num_busy_placement = 1;
+
+	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
+					  true, domain, flags,
+					  NULL, &placement, &obj);
+	if (ret) {
+		DRM_ERROR("(%d) bo create failed\n", ret);
+		return ret;
+	}
+	*handle = (cgs_handle_t)obj;
+
+	return ret;
+}
+
+static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
+				     cgs_handle_t *handle)
+{
+	CGS_FUNC_ADEV;
+	int r;
+	uint32_t dma_handle;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *bo;
+	struct drm_device *dev = adev->ddev;
+	struct drm_file *file_priv = NULL, *priv;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(priv, &dev->filelist, lhead) {
+		rcu_read_lock();
+		if (priv->pid == get_pid(task_pid(current)))
+			file_priv = priv;
+		rcu_read_unlock();
+		if (file_priv)
+			break;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	r = dev->driver->prime_fd_to_handle(dev,
+					    file_priv, dmabuf_fd,
+					    &dma_handle);
+	spin_lock(&file_priv->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&file_priv->object_idr, dma_handle);
+	if (obj == NULL) {
+		spin_unlock(&file_priv->table_lock);
+		return -EINVAL;
+	}
+	spin_unlock(&file_priv->table_lock);
+	bo = gem_to_amdgpu_bo(obj);
+	*handle = (cgs_handle_t)bo;
+	return 0;
+}
+
+static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+	if (obj) {
+		int r = amdgpu_bo_reserve(obj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_kunmap(obj);
+			amdgpu_bo_unpin(obj);
+			amdgpu_bo_unreserve(obj);
+		}
+		amdgpu_bo_unref(&obj);
+
+	}
+	return 0;
+}
+
+static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+				   uint64_t *mcaddr)
+{
+	int r;
+	u64 min_offset, max_offset;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+	WARN_ON_ONCE(obj->placement.num_placement > 1);
+
+	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
+	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
+
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+				     min_offset, max_offset, mcaddr);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_unpin(obj);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+				   void **map)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_kmap(obj, map);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	amdgpu_bo_kunmap(obj);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
+{
+	CGS_FUNC_ADEV;
+	return RREG32(offset);
+}
+
+static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
+				      uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	WREG32(offset, value);
+}
+
+static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
+					     enum cgs_ind_reg space,
+					     unsigned index)
+{
+	CGS_FUNC_ADEV;
+	switch (space) {
+	case CGS_IND_REG__MMIO:
+		return RREG32_IDX(index);
+	case CGS_IND_REG__PCIE:
+		return RREG32_PCIE(index);
+	case CGS_IND_REG__SMC:
+		return RREG32_SMC(index);
+	case CGS_IND_REG__UVD_CTX:
+		return RREG32_UVD_CTX(index);
+	case CGS_IND_REG__DIDT:
+		return RREG32_DIDT(index);
+	case CGS_IND_REG__AUDIO_ENDPT:
+		DRM_ERROR("audio endpt register access not implemented.\n");
+		return 0;
+	}
+	WARN(1, "Invalid indirect register space");
+	return 0;
+}
+
+static void amdgpu_cgs_write_ind_register(void *cgs_device,
+					  enum cgs_ind_reg space,
+					  unsigned index, uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	switch (space) {
+	case CGS_IND_REG__MMIO:
+		return WREG32_IDX(index, value);
+	case CGS_IND_REG__PCIE:
+		return WREG32_PCIE(index, value);
+	case CGS_IND_REG__SMC:
+		return WREG32_SMC(index, value);
+	case CGS_IND_REG__UVD_CTX:
+		return WREG32_UVD_CTX(index, value);
+	case CGS_IND_REG__DIDT:
+		return WREG32_DIDT(index, value);
+	case CGS_IND_REG__AUDIO_ENDPT:
+		DRM_ERROR("audio endpt register access not implemented.\n");
+		return;
+	}
+	WARN(1, "Invalid indirect register space");
+}
+
+static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint8_t val;
+	int ret = pci_read_config_byte(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_byte error"))
+		return 0;
+	return val;
+}
+
+static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint16_t val;
+	int ret = pci_read_config_word(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_word error"))
+		return 0;
+	return val;
+}
+
+static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
+						 unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint32_t val;
+	int ret = pci_read_config_dword(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_dword error"))
+		return 0;
+	return val;
+}
+
+static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
+					     uint8_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_byte(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_byte error");
+}
+
+static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
+					     uint16_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_word(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_word error");
+}
+
+static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
+					      uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_dword(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_dword error");
+}
+
+static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
+						  unsigned table, uint16_t *size,
+						  uint8_t *frev, uint8_t *crev)
+{
+	CGS_FUNC_ADEV;
+	uint16_t data_start;
+
+	if (amdgpu_atom_parse_data_header(
+		    adev->mode_info.atom_context, table, size,
+		    frev, crev, &data_start))
+		return (uint8_t*)adev->mode_info.atom_context->bios +
+			data_start;
+
+	return NULL;
+}
+
+static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
+					      uint8_t *frev, uint8_t *crev)
+{
+	CGS_FUNC_ADEV;
+
+	if (amdgpu_atom_parse_cmd_header(
+		    adev->mode_info.atom_context, table,
+		    frev, crev))
+		return 0;
+
+	return -EINVAL;
+}
+
+static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
+					  void *args)
+{
+	CGS_FUNC_ADEV;
+
+	return amdgpu_atom_execute_table(
+		adev->mode_info.atom_context, table, args);
+}
+
+static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
+				     int active)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
+				       enum cgs_clock clock, unsigned freq)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
+					enum cgs_engine engine, int powered)
+{
+	/* TODO */
+	return 0;
+}
+
+
+
+static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
+					    enum cgs_clock clock,
+					    struct cgs_clock_limits *limits)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
+					  const uint32_t *voltages)
+{
+	DRM_ERROR("not implemented");
+	return -EPERM;
+}
+
+struct cgs_irq_params {
+	unsigned src_id;
+	cgs_irq_source_set_func_t set;
+	cgs_irq_handler_func_t handler;
+	void *private_data;
+};
+
+static int cgs_set_irq_state(struct amdgpu_device *adev,
+			     struct amdgpu_irq_src *src,
+			     unsigned type,
+			     enum amdgpu_interrupt_state state)
+{
+	struct cgs_irq_params *irq_params =
+		(struct cgs_irq_params *)src->data;
+	if (!irq_params)
+		return -EINVAL;
+	if (!irq_params->set)
+		return -EINVAL;
+	return irq_params->set(irq_params->private_data,
+			       irq_params->src_id,
+			       type,
+			       (int)state);
+}
+
+static int cgs_process_irq(struct amdgpu_device *adev,
+			   struct amdgpu_irq_src *source,
+			   struct amdgpu_iv_entry *entry)
+{
+	struct cgs_irq_params *irq_params =
+		(struct cgs_irq_params *)source->data;
+	if (!irq_params)
+		return -EINVAL;
+	if (!irq_params->handler)
+		return -EINVAL;
+	return irq_params->handler(irq_params->private_data,
+				   irq_params->src_id,
+				   entry->iv_entry);
+}
+
+static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
+	.set = cgs_set_irq_state,
+	.process = cgs_process_irq,
+};
+
+static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
+				     unsigned num_types,
+				     cgs_irq_source_set_func_t set,
+				     cgs_irq_handler_func_t handler,
+				     void *private_data)
+{
+	CGS_FUNC_ADEV;
+	int ret = 0;
+	struct cgs_irq_params *irq_params;
+	struct amdgpu_irq_src *source =
+		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
+	if (!source)
+		return -ENOMEM;
+	irq_params =
+		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
+	if (!irq_params) {
+		kfree(source);
+		return -ENOMEM;
+	}
+	source->num_types = num_types;
+	source->funcs = &cgs_irq_funcs;
+	irq_params->src_id = src_id;
+	irq_params->set = set;
+	irq_params->handler = handler;
+	irq_params->private_data = private_data;
+	source->data = (void *)irq_params;
+	ret = amdgpu_irq_add_id(adev, src_id, source);
+	if (ret) {
+		kfree(irq_params);
+		kfree(source);
+	}
+
+	return ret;
+}
+
+static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
+}
+
+static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
+}
+
+int amdgpu_cgs_set_clockgating_state(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state)
+{
+	CGS_FUNC_ADEV;
+	int i, r = -1;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_clockgating_state(
+								(void *)adev,
+									state);
+			break;
+		}
+	}
+	return r;
+}
+
+int amdgpu_cgs_set_powergating_state(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state)
+{
+	CGS_FUNC_ADEV;
+	int i, r = -1;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_powergating_state(
+								(void *)adev,
+									state);
+			break;
+		}
+	}
+	return r;
+}
+
+
+static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
+{
+	CGS_FUNC_ADEV;
+	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
+
+	switch (fw_type) {
+	case CGS_UCODE_ID_SDMA0:
+		result = AMDGPU_UCODE_ID_SDMA0;
+		break;
+	case CGS_UCODE_ID_SDMA1:
+		result = AMDGPU_UCODE_ID_SDMA1;
+		break;
+	case CGS_UCODE_ID_CP_CE:
+		result = AMDGPU_UCODE_ID_CP_CE;
+		break;
+	case CGS_UCODE_ID_CP_PFP:
+		result = AMDGPU_UCODE_ID_CP_PFP;
+		break;
+	case CGS_UCODE_ID_CP_ME:
+		result = AMDGPU_UCODE_ID_CP_ME;
+		break;
+	case CGS_UCODE_ID_CP_MEC:
+	case CGS_UCODE_ID_CP_MEC_JT1:
+		result = AMDGPU_UCODE_ID_CP_MEC1;
+		break;
+	case CGS_UCODE_ID_CP_MEC_JT2:
+		if (adev->asic_type == CHIP_TONGA)
+			result = AMDGPU_UCODE_ID_CP_MEC2;
+		else if (adev->asic_type == CHIP_CARRIZO)
+			result = AMDGPU_UCODE_ID_CP_MEC1;
+		break;
+	case CGS_UCODE_ID_RLC_G:
+		result = AMDGPU_UCODE_ID_RLC_G;
+		break;
+	default:
+		DRM_ERROR("Firmware type not supported\n");
+	}
+	return result;
+}
+
+static int amdgpu_cgs_get_firmware_info(void *cgs_device,
+					enum cgs_ucode_id type,
+					struct cgs_firmware_info *info)
+{
+	CGS_FUNC_ADEV;
+
+	if (CGS_UCODE_ID_SMU != type) {
+		uint64_t gpu_addr;
+		uint32_t data_size;
+		const struct gfx_firmware_header_v1_0 *header;
+		enum AMDGPU_UCODE_ID id;
+		struct amdgpu_firmware_info *ucode;
+
+		id = fw_type_convert(cgs_device, type);
+		ucode = &adev->firmware.ucode[id];
+		if (ucode->fw == NULL)
+			return -EINVAL;
+
+		gpu_addr  = ucode->mc_addr;
+		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+		data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
+		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
+			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+			data_size = le32_to_cpu(header->jt_size) << 2;
+		}
+		info->mc_addr = gpu_addr;
+		info->image_size = data_size;
+		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
+	} else {
+		char fw_name[30] = {0};
+		int err = 0;
+		uint32_t ucode_size;
+		uint32_t ucode_start_address;
+		const uint8_t *src;
+		const struct smc_firmware_header_v1_0 *hdr;
+
+		switch (adev->asic_type) {
+		case CHIP_TONGA:
+			strcpy(fw_name, "amdgpu/tonga_smc.bin");
+			break;
+		default:
+			DRM_ERROR("SMC firmware not supported\n");
+			return -EINVAL;
+		}
+
+		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+		if (err) {
+			DRM_ERROR("Failed to request firmware\n");
+			return err;
+		}
+
+		err = amdgpu_ucode_validate(adev->pm.fw);
+		if (err) {
+			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+			release_firmware(adev->pm.fw);
+			adev->pm.fw = NULL;
+			return err;
+		}
+
+		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
+		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+		src = (const uint8_t *)(adev->pm.fw->data +
+		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+		info->version = adev->pm.fw_version;
+		info->image_size = ucode_size;
+		info->kptr = (void *)src;
+	}
+	return 0;
+}
+
+static const struct cgs_ops amdgpu_cgs_ops = {
+	amdgpu_cgs_gpu_mem_info,
+	amdgpu_cgs_gmap_kmem,
+	amdgpu_cgs_gunmap_kmem,
+	amdgpu_cgs_alloc_gpu_mem,
+	amdgpu_cgs_free_gpu_mem,
+	amdgpu_cgs_gmap_gpu_mem,
+	amdgpu_cgs_gunmap_gpu_mem,
+	amdgpu_cgs_kmap_gpu_mem,
+	amdgpu_cgs_kunmap_gpu_mem,
+	amdgpu_cgs_read_register,
+	amdgpu_cgs_write_register,
+	amdgpu_cgs_read_ind_register,
+	amdgpu_cgs_write_ind_register,
+	amdgpu_cgs_read_pci_config_byte,
+	amdgpu_cgs_read_pci_config_word,
+	amdgpu_cgs_read_pci_config_dword,
+	amdgpu_cgs_write_pci_config_byte,
+	amdgpu_cgs_write_pci_config_word,
+	amdgpu_cgs_write_pci_config_dword,
+	amdgpu_cgs_atom_get_data_table,
+	amdgpu_cgs_atom_get_cmd_table_revs,
+	amdgpu_cgs_atom_exec_cmd_table,
+	amdgpu_cgs_create_pm_request,
+	amdgpu_cgs_destroy_pm_request,
+	amdgpu_cgs_set_pm_request,
+	amdgpu_cgs_pm_request_clock,
+	amdgpu_cgs_pm_request_engine,
+	amdgpu_cgs_pm_query_clock_limits,
+	amdgpu_cgs_set_camera_voltages,
+	amdgpu_cgs_get_firmware_info,
+	amdgpu_cgs_set_powergating_state,
+	amdgpu_cgs_set_clockgating_state
+};
+
+static const struct cgs_os_ops amdgpu_cgs_os_ops = {
+	amdgpu_cgs_import_gpu_mem,
+	amdgpu_cgs_add_irq_source,
+	amdgpu_cgs_irq_get,
+	amdgpu_cgs_irq_put
+};
+
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
+{
+	struct amdgpu_cgs_device *cgs_device =
+		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
+
+	if (!cgs_device) {
+		DRM_ERROR("Couldn't allocate CGS device structure\n");
+		return NULL;
+	}
+
+	cgs_device->base.ops = &amdgpu_cgs_ops;
+	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
+	cgs_device->adev = adev;
+
+	return cgs_device;
+}
+
+void amdgpu_cgs_destroy_device(void *cgs_device)
+{
+	kfree(cgs_device);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 27df17a..89c3dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -75,6 +75,11 @@
 			if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 			} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+				/* Don't try to start link training before we
+				 * have the dpcd */
+				if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+					return;
+
 				/* set it to OFF so that drm_helper_connector_dpms()
 				 * won't return immediately since the current state
 				 * is ON at this point.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1f040d8..3b355ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,6 +126,30 @@
 	return 0;
 }
 
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+                                               struct drm_file *filp,
+                                               struct amdgpu_ctx *ctx,
+                                               struct amdgpu_ib *ibs,
+                                               uint32_t num_ibs)
+{
+	struct amdgpu_cs_parser *parser;
+	int i;
+
+	parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
+	if (!parser)
+		return NULL;
+
+	parser->adev = adev;
+	parser->filp = filp;
+	parser->ctx = ctx;
+	parser->ibs = ibs;
+	parser->num_ibs = num_ibs;
+	for (i = 0; i < num_ibs; i++)
+		ibs[i].ctx = ctx;
+
+	return parser;
+}
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
 	union drm_amdgpu_cs *cs = data;
@@ -147,13 +171,13 @@
 
 	/* get chunks */
 	INIT_LIST_HEAD(&p->validated);
-	chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 	if (chunk_array == NULL) {
 		r = -ENOMEM;
 		goto out;
 	}
 
-	chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
+	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
 		r = -EFAULT;
@@ -161,7 +185,7 @@
 	}
 
 	p->nchunks = cs->in.num_chunks;
-	p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 			    GFP_KERNEL);
 	if (p->chunks == NULL) {
 		r = -ENOMEM;
@@ -173,7 +197,7 @@
 		struct drm_amdgpu_cs_chunk user_chunk;
 		uint32_t __user *cdata;
 
-		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+		chunk_ptr = (void __user *)chunk_array[i];
 		if (copy_from_user(&user_chunk, chunk_ptr,
 				       sizeof(struct drm_amdgpu_cs_chunk))) {
 			r = -EFAULT;
@@ -183,7 +207,7 @@
 		p->chunks[i].length_dw = user_chunk.length_dw;
 
 		size = p->chunks[i].length_dw;
-		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+		cdata = (void __user *)user_chunk.chunk_data;
 		p->chunks[i].user_ptr = cdata;
 
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@@ -235,11 +259,10 @@
 		}
 	}
 
+
 	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!p->ibs) {
+	if (!p->ibs)
 		r = -ENOMEM;
-		goto out;
-	}
 
 out:
 	kfree(chunk_array);
@@ -331,7 +354,7 @@
 			 * into account. We don't want to disallow buffer moves
 			 * completely.
 			 */
-			if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+			if ((lobj->allowed_domains & current_domain) != 0 &&
 			    (domain & current_domain) == 0 && /* will be moved */
 			    bytes_moved > bytes_moved_threshold) {
 				/* don't move it */
@@ -415,18 +438,8 @@
 	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
 }
 
-/**
- * cs_parser_fini() - clean parser states
- * @parser:	parser structure holding parsing context.
- * @error:	error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
 {
-	unsigned i;
-
 	if (!error) {
 		/* Sort the buffer list from the smallest to largest buffer,
 		 * which affects the order of buffers in the LRU list.
@@ -447,21 +460,45 @@
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 	}
+}
 
+static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
+{
+	unsigned i;
 	if (parser->ctx)
 		amdgpu_ctx_put(parser->ctx);
 	if (parser->bo_list)
 		amdgpu_bo_list_put(parser->bo_list);
+
 	drm_free_large(parser->vm_bos);
 	for (i = 0; i < parser->nchunks; i++)
 		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
-	if (parser->ibs)
-		for (i = 0; i < parser->num_ibs; i++)
-			amdgpu_ib_free(parser->adev, &parser->ibs[i]);
-	kfree(parser->ibs);
-	if (parser->uf.bo)
-		drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+	if (!amdgpu_enable_scheduler)
+	{
+		if (parser->ibs)
+			for (i = 0; i < parser->num_ibs; i++)
+				amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+		kfree(parser->ibs);
+		if (parser->uf.bo)
+			drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+	}
+
+	kfree(parser);
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+       amdgpu_cs_parser_fini_early(parser, error, backoff);
+       amdgpu_cs_parser_fini_late(parser);
 }
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -476,12 +513,18 @@
 	if (r)
 		return r;
 
+	r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+	if (r)
+		return r;
+
 	r = amdgpu_vm_clear_freed(adev, vm);
 	if (r)
 		return r;
 
 	if (p->bo_list) {
 		for (i = 0; i < p->bo_list->num_entries; i++) {
+			struct fence *f;
+
 			/* ignore duplicates */
 			bo = p->bo_list->array[i].robj;
 			if (!bo)
@@ -495,7 +538,10 @@
 			if (r)
 				return r;
 
-			amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
+			f = bo_va->last_pt_update;
+			r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+			if (r)
+				return r;
 		}
 	}
 
@@ -529,9 +575,9 @@
 		goto out;
 	}
 	amdgpu_cs_sync_rings(parser);
-
-	r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
-			       parser->filp);
+	if (!amdgpu_enable_scheduler)
+		r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
+				       parser->filp);
 
 out:
 	mutex_unlock(&vm->mutex);
@@ -650,7 +696,6 @@
 			ib->oa_size = amdgpu_bo_size(oa);
 		}
 	}
-
 	/* wrap the last IB with user fence */
 	if (parser->uf.bo) {
 		struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@@ -693,9 +738,9 @@
 			sizeof(struct drm_amdgpu_cs_chunk_dep);
 
 		for (j = 0; j < num_deps; ++j) {
-			struct amdgpu_fence *fence;
 			struct amdgpu_ring *ring;
 			struct amdgpu_ctx *ctx;
+			struct fence *fence;
 
 			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 					       deps[j].ip_instance,
@@ -707,85 +752,137 @@
 			if (ctx == NULL)
 				return -EINVAL;
 
-			r = amdgpu_fence_recreate(ring, p->filp,
-						  deps[j].handle,
-						  &fence);
-			if (r) {
+			fence = amdgpu_ctx_get_fence(ctx, ring,
+						     deps[j].handle);
+			if (IS_ERR(fence)) {
+				r = PTR_ERR(fence);
 				amdgpu_ctx_put(ctx);
 				return r;
-			}
 
-			amdgpu_sync_fence(&ib->sync, fence);
-			amdgpu_fence_unref(&fence);
-			amdgpu_ctx_put(ctx);
+			} else if (fence) {
+				r = amdgpu_sync_fence(adev, &ib->sync, fence);
+				fence_put(fence);
+				amdgpu_ctx_put(ctx);
+				if (r)
+					return r;
+			}
 		}
 	}
 
 	return 0;
 }
 
+static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+{
+	int i;
+	if (sched_job->ibs)
+		for (i = 0; i < sched_job->num_ibs; i++)
+			amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+	kfree(sched_job->ibs);
+	if (sched_job->uf.bo)
+		drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+	return 0;
+}
+
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct amdgpu_device *adev = dev->dev_private;
 	union drm_amdgpu_cs *cs = data;
-	struct amdgpu_cs_parser parser;
-	int r, i;
+	struct amdgpu_cs_parser *parser;
 	bool reserved_buffers = false;
+	int i, r;
 
 	down_read(&adev->exclusive_lock);
 	if (!adev->accel_working) {
 		up_read(&adev->exclusive_lock);
 		return -EBUSY;
 	}
-	/* initialize parser */
-	memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
-	parser.filp = filp;
-	parser.adev = adev;
-	r = amdgpu_cs_parser_init(&parser, data);
+
+	parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
+	if (!parser)
+		return -ENOMEM;
+	r = amdgpu_cs_parser_init(parser, data);
 	if (r) {
 		DRM_ERROR("Failed to initialize parser !\n");
-		amdgpu_cs_parser_fini(&parser, r, false);
+		amdgpu_cs_parser_fini(parser, r, false);
 		up_read(&adev->exclusive_lock);
 		r = amdgpu_cs_handle_lockup(adev, r);
 		return r;
 	}
 
-	r = amdgpu_cs_parser_relocs(&parser);
-	if (r) {
-		if (r != -ERESTARTSYS) {
-			if (r == -ENOMEM)
-				DRM_ERROR("Not enough memory for command submission!\n");
-			else
-				DRM_ERROR("Failed to process the buffer list %d!\n", r);
-		}
+	r = amdgpu_cs_parser_relocs(parser);
+	if (r == -ENOMEM)
+		DRM_ERROR("Not enough memory for command submission!\n");
+	else if (r && r != -ERESTARTSYS)
+		DRM_ERROR("Failed to process the buffer list %d!\n", r);
+	else if (!r) {
+		reserved_buffers = true;
+		r = amdgpu_cs_ib_fill(adev, parser);
 	}
 
 	if (!r) {
-		reserved_buffers = true;
-		r = amdgpu_cs_ib_fill(adev, &parser);
+		r = amdgpu_cs_dependencies(adev, parser);
+		if (r)
+			DRM_ERROR("Failed in the dependencies handling %d!\n", r);
 	}
 
-	if (!r)
-		r = amdgpu_cs_dependencies(adev, &parser);
-
-	if (r) {
-		amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
-		up_read(&adev->exclusive_lock);
-		r = amdgpu_cs_handle_lockup(adev, r);
-		return r;
-	}
-
-	for (i = 0; i < parser.num_ibs; i++)
-		trace_amdgpu_cs(&parser, i);
-
-	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
-	if (r) {
+	if (r)
 		goto out;
+
+	for (i = 0; i < parser->num_ibs; i++)
+		trace_amdgpu_cs(parser, i);
+
+	r = amdgpu_cs_ib_vm_chunk(adev, parser);
+	if (r)
+		goto out;
+
+	if (amdgpu_enable_scheduler && parser->num_ibs) {
+		struct amdgpu_job *job;
+		struct amdgpu_ring * ring =  parser->ibs->ring;
+		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+		if (!job)
+			return -ENOMEM;
+		job->base.sched = ring->scheduler;
+		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
+		job->adev = parser->adev;
+		job->ibs = parser->ibs;
+		job->num_ibs = parser->num_ibs;
+		job->base.owner = parser->filp;
+		mutex_init(&job->job_lock);
+		if (job->ibs[job->num_ibs - 1].user) {
+			memcpy(&job->uf,  &parser->uf,
+			       sizeof(struct amdgpu_user_fence));
+			job->ibs[job->num_ibs - 1].user = &job->uf;
+		}
+
+		job->free_job = amdgpu_cs_free_job;
+		mutex_lock(&job->job_lock);
+		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		if (r) {
+			mutex_unlock(&job->job_lock);
+			amdgpu_cs_free_job(job);
+			kfree(job);
+			goto out;
+		}
+		cs->out.handle =
+			amdgpu_ctx_add_fence(parser->ctx, ring,
+					     &job->base.s_fence->base);
+		parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+
+		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+		ttm_eu_fence_buffer_objects(&parser->ticket,
+				&parser->validated,
+				&job->base.s_fence->base);
+
+		mutex_unlock(&job->job_lock);
+		amdgpu_cs_parser_fini_late(parser);
+		up_read(&adev->exclusive_lock);
+		return 0;
 	}
 
-	cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
+	cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
 out:
-	amdgpu_cs_parser_fini(&parser, r, true);
+	amdgpu_cs_parser_fini(parser, r, reserved_buffers);
 	up_read(&adev->exclusive_lock);
 	r = amdgpu_cs_handle_lockup(adev, r);
 	return r;
@@ -806,30 +903,29 @@
 	union drm_amdgpu_wait_cs *wait = data;
 	struct amdgpu_device *adev = dev->dev_private;
 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
-	struct amdgpu_fence *fence = NULL;
 	struct amdgpu_ring *ring = NULL;
 	struct amdgpu_ctx *ctx;
+	struct fence *fence;
 	long r;
 
+	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
+			       wait->in.ring, &ring);
+	if (r)
+		return r;
+
 	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
 	if (ctx == NULL)
 		return -EINVAL;
 
-	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
-			       wait->in.ring, &ring);
-	if (r) {
-		amdgpu_ctx_put(ctx);
-		return r;
-	}
+	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+	if (IS_ERR(fence))
+		r = PTR_ERR(fence);
+	else if (fence) {
+		r = fence_wait_timeout(fence, true, timeout);
+		fence_put(fence);
+	} else
+		r = 1;
 
-	r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
-	if (r) {
-		amdgpu_ctx_put(ctx);
-		return r;
-	}
-
-	r = fence_wait_timeout(&fence->base, true, timeout);
-	amdgpu_fence_unref(&fence);
 	amdgpu_ctx_put(ctx);
 	if (r < 0)
 		return r;
@@ -864,7 +960,16 @@
 		if (!reloc->bo_va)
 			continue;
 
-		list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+		list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+			if (mapping->it.start > addr ||
+			    addr > mapping->it.last)
+				continue;
+
+			*bo = reloc->bo_va->bo;
+			return mapping;
+		}
+
+		list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
 			if (mapping->it.start > addr ||
 			    addr > mapping->it.last)
 				continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6c66ac8..20cbc4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,54 +25,107 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static void amdgpu_ctx_do_release(struct kref *ref)
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+		    struct amdgpu_ctx *ctx)
 {
-	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr;
+	unsigned i, j;
+	int r;
 
-	ctx = container_of(ref, struct amdgpu_ctx, refcount);
-	mgr = &ctx->fpriv->ctx_mgr;
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->adev = adev;
+	kref_init(&ctx->refcount);
+	spin_lock_init(&ctx->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		ctx->rings[i].sequence = 1;
 
-	idr_remove(&mgr->ctx_handles, ctx->id);
-	kfree(ctx);
+	if (amdgpu_enable_scheduler) {
+		/* create context entity for each ring */
+		for (i = 0; i < adev->num_rings; i++) {
+			struct amd_sched_rq *rq;
+			if (kernel)
+				rq = &adev->rings[i]->scheduler->kernel_rq;
+			else
+				rq = &adev->rings[i]->scheduler->sched_rq;
+			r = amd_sched_entity_init(adev->rings[i]->scheduler,
+						  &ctx->rings[i].entity,
+						  rq, amdgpu_sched_jobs);
+			if (r)
+				break;
+		}
+
+		if (i < adev->num_rings) {
+			for (j = 0; j < i; j++)
+				amd_sched_entity_fini(adev->rings[j]->scheduler,
+						      &ctx->rings[j].entity);
+			kfree(ctx);
+			return r;
+		}
+	}
+	return 0;
 }
 
-int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 {
-	int r;
-	struct amdgpu_ctx *ctx;
+	struct amdgpu_device *adev = ctx->adev;
+	unsigned i, j;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+			fence_put(ctx->rings[i].fences[j]);
+
+	if (amdgpu_enable_scheduler) {
+		for (i = 0; i < adev->num_rings; i++)
+			amd_sched_entity_fini(adev->rings[i]->scheduler,
+					      &ctx->rings[i].entity);
+	}
+}
+
+static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+			    struct amdgpu_fpriv *fpriv,
+			    uint32_t *id)
+{
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx *ctx;
+	int r;
 
 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
 	mutex_lock(&mgr->lock);
-	r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
+	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
 	if (r < 0) {
 		mutex_unlock(&mgr->lock);
 		kfree(ctx);
 		return r;
 	}
 	*id = (uint32_t)r;
-
-	memset(ctx, 0, sizeof(*ctx));
-	ctx->id = *id;
-	ctx->fpriv = fpriv;
-	kref_init(&ctx->refcount);
+	r = amdgpu_ctx_init(adev, false, ctx);
 	mutex_unlock(&mgr->lock);
 
-	return 0;
+	return r;
 }
 
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
+static void amdgpu_ctx_do_release(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx;
+
+	ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+	amdgpu_ctx_fini(ctx);
+
+	kfree(ctx);
+}
+
+static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
+{
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx *ctx;
 
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
 	if (ctx) {
+		idr_remove(&mgr->ctx_handles, id);
 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 		mutex_unlock(&mgr->lock);
 		return 0;
@@ -86,9 +139,13 @@
 			    union drm_amdgpu_ctx_out *out)
 {
 	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx_mgr *mgr;
 	unsigned reset_counter;
 
+	if (!fpriv)
+		return -EINVAL;
+
+	mgr = &fpriv->ctx_mgr;
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
 	if (!ctx) {
@@ -97,8 +154,8 @@
 	}
 
 	/* TODO: these two are always zero */
-	out->state.flags = ctx->state.flags;
-	out->state.hangs = ctx->state.hangs;
+	out->state.flags = 0x0;
+	out->state.hangs = 0x0;
 
 	/* determine if a GPU reset has occured since the last call */
 	reset_counter = atomic_read(&adev->gpu_reset_counter);
@@ -113,28 +170,11 @@
 	return 0;
 }
 
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
-{
-	struct idr *idp;
-	struct amdgpu_ctx *ctx;
-	uint32_t id;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
-	idp = &mgr->ctx_handles;
-
-	idr_for_each_entry(idp,ctx,id) {
-		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
-			DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
-	}
-
-	mutex_destroy(&mgr->lock);
-}
-
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp)
 {
 	int r;
 	uint32_t id;
-	uint32_t flags;
 
 	union drm_amdgpu_ctx *args = data;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -142,15 +182,14 @@
 
 	r = 0;
 	id = args->in.ctx_id;
-	flags = args->in.flags;
 
 	switch (args->in.op) {
 		case AMDGPU_CTX_OP_ALLOC_CTX:
-			r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
+			r = amdgpu_ctx_alloc(adev, fpriv, &id);
 			args->out.alloc.ctx_id = id;
 			break;
 		case AMDGPU_CTX_OP_FREE_CTX:
-			r = amdgpu_ctx_free(adev, fpriv, id);
+			r = amdgpu_ctx_free(fpriv, id);
 			break;
 		case AMDGPU_CTX_OP_QUERY_STATE:
 			r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
@@ -165,7 +204,12 @@
 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 {
 	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx_mgr *mgr;
+
+	if (!fpriv)
+		return NULL;
+
+	mgr = &fpriv->ctx_mgr;
 
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
@@ -177,17 +221,86 @@
 
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 {
-	struct amdgpu_fpriv *fpriv;
-	struct amdgpu_ctx_mgr *mgr;
-
 	if (ctx == NULL)
 		return -EINVAL;
 
-	fpriv = ctx->fpriv;
-	mgr = &fpriv->ctx_mgr;
-	mutex_lock(&mgr->lock);
 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
-	mutex_unlock(&mgr->lock);
-
 	return 0;
 }
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+			      struct fence *fence)
+{
+	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+	uint64_t seq = cring->sequence;
+	unsigned idx = 0;
+	struct fence *other = NULL;
+
+	idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+	other = cring->fences[idx];
+	if (other) {
+		signed long r;
+		r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+		if (r < 0)
+			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+	}
+
+	fence_get(fence);
+
+	spin_lock(&ctx->ring_lock);
+	cring->fences[idx] = fence;
+	cring->sequence++;
+	spin_unlock(&ctx->ring_lock);
+
+	fence_put(other);
+
+	return seq;
+}
+
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				   struct amdgpu_ring *ring, uint64_t seq)
+{
+	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+	struct fence *fence;
+
+	spin_lock(&ctx->ring_lock);
+
+	if (seq >= cring->sequence) {
+		spin_unlock(&ctx->ring_lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+
+	if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+		spin_unlock(&ctx->ring_lock);
+		return NULL;
+	}
+
+	fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+	spin_unlock(&ctx->ring_lock);
+
+	return fence;
+}
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+{
+	mutex_init(&mgr->lock);
+	idr_init(&mgr->ctx_handles);
+}
+
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+{
+	struct amdgpu_ctx *ctx;
+	struct idr *idp;
+	uint32_t id;
+
+	idp = &mgr->ctx_handles;
+
+	idr_for_each_entry(idp, ctx, id) {
+		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+			DRM_ERROR("ctx %p is still alive\n", ctx);
+	}
+
+	idr_destroy(&mgr->ctx_handles);
+	mutex_destroy(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 99f158e..6ff6ae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -55,6 +55,7 @@
 	"MULLINS",
 	"TOPAZ",
 	"TONGA",
+	"FIJI",
 	"CARRIZO",
 	"LAST",
 };
@@ -63,7 +64,7 @@
 {
 	struct amdgpu_device *adev = dev->dev_private;
 
-	if (adev->flags & AMDGPU_IS_PX)
+	if (adev->flags & AMD_IS_PX)
 		return true;
 	return false;
 }
@@ -243,7 +244,8 @@
 
 	if (adev->vram_scratch.robj == NULL) {
 		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 				     NULL, &adev->vram_scratch.robj);
 		if (r) {
 			return r;
@@ -1160,6 +1162,7 @@
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
+	case CHIP_FIJI:
 	case CHIP_CARRIZO:
 		if (adev->asic_type == CHIP_CARRIZO)
 			adev->family = AMDGPU_FAMILY_CZ;
@@ -1377,7 +1380,7 @@
 	adev->ddev = ddev;
 	adev->pdev = pdev;
 	adev->flags = flags;
-	adev->asic_type = flags & AMDGPU_ASIC_MASK;
+	adev->asic_type = flags & AMD_ASIC_MASK;
 	adev->is_atom_bios = false;
 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
 	adev->mc.gtt_size = 512 * 1024 * 1024;
@@ -1523,6 +1526,11 @@
 		return r;
 	}
 
+	r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
+	if (r) {
+		dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
+		return r;
+	}
 	r = amdgpu_ib_ring_tests(adev);
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1584,6 +1592,7 @@
 	adev->shutdown = true;
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
+	amdgpu_ctx_fini(&adev->kernel_ctx);
 	amdgpu_ib_pool_fini(adev);
 	amdgpu_fence_driver_fini(adev);
 	amdgpu_fbdev_fini(adev);
@@ -1627,8 +1636,7 @@
 	struct amdgpu_device *adev;
 	struct drm_crtc *crtc;
 	struct drm_connector *connector;
-	int i, r;
-	bool force_completion = false;
+	int r;
 
 	if (dev == NULL || dev->dev_private == NULL) {
 		return -ENODEV;
@@ -1667,21 +1675,7 @@
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
 
-	/* wait for gpu to finish processing current batch */
-	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-		struct amdgpu_ring *ring = adev->rings[i];
-		if (!ring)
-			continue;
-
-		r = amdgpu_fence_wait_empty(ring);
-		if (r) {
-			/* delay GPU reset to resume */
-			force_completion = true;
-		}
-	}
-	if (force_completion) {
-		amdgpu_fence_driver_force_completion(adev);
-	}
+	amdgpu_fence_driver_suspend(adev);
 
 	r = amdgpu_suspend(adev);
 
@@ -1739,6 +1733,8 @@
 
 	r = amdgpu_resume(adev);
 
+	amdgpu_fence_driver_resume(adev);
+
 	r = amdgpu_ib_ring_tests(adev);
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b16b925..e3d7077 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,36 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
+static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
+				   struct fence **f)
+{
+	struct amdgpu_fence *fence;
+	long r;
+
+	if (*f == NULL)
+		return;
+
+	fence = to_amdgpu_fence(*f);
+	if (fence) {
+		r = fence_wait(&fence->base, false);
+		if (r == -EDEADLK) {
+			up_read(&adev->exclusive_lock);
+			r = amdgpu_gpu_reset(adev);
+			down_read(&adev->exclusive_lock);
+		}
+	} else
+		r = fence_wait(*f, false);
+
+	if (r)
+		DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
+
+	/* We continue with the page flip even if we failed to wait on
+	 * the fence, otherwise the DRM core and userspace will be
+	 * confused about which BO the CRTC is scanning out
+	 */
+	fence_put(*f);
+	*f = NULL;
+}
 
 static void amdgpu_flip_work_func(struct work_struct *__work)
 {
@@ -44,34 +74,13 @@
 	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
 
 	struct drm_crtc *crtc = &amdgpuCrtc->base;
-	struct amdgpu_fence *fence;
 	unsigned long flags;
-	int r;
+	unsigned i;
 
 	down_read(&adev->exclusive_lock);
-	if (work->fence) {
-		fence = to_amdgpu_fence(work->fence);
-		if (fence) {
-			r = amdgpu_fence_wait(fence, false);
-			if (r == -EDEADLK) {
-				up_read(&adev->exclusive_lock);
-				r = amdgpu_gpu_reset(adev);
-				down_read(&adev->exclusive_lock);
-			}
-		} else
-			r = fence_wait(work->fence, false);
-
-		if (r)
-			DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
-
-		/* We continue with the page flip even if we failed to wait on
-		 * the fence, otherwise the DRM core and userspace will be
-		 * confused about which BO the CRTC is scanning out
-		 */
-
-		fence_put(work->fence);
-		work->fence = NULL;
-	}
+	amdgpu_flip_wait_fence(adev, &work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		amdgpu_flip_wait_fence(adev, &work->shared[i]);
 
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -108,6 +117,7 @@
 		DRM_ERROR("failed to reserve buffer after flip\n");
 
 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	kfree(work->shared);
 	kfree(work);
 }
 
@@ -127,7 +137,7 @@
 	unsigned long flags;
 	u64 tiling_flags;
 	u64 base;
-	int r;
+	int i, r;
 
 	work = kzalloc(sizeof *work, GFP_KERNEL);
 	if (work == NULL)
@@ -167,7 +177,19 @@
 		goto cleanup;
 	}
 
-	work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+	r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+					      &work->shared_count,
+					      &work->shared);
+	if (unlikely(r != 0)) {
+		amdgpu_bo_unreserve(new_rbo);
+		DRM_ERROR("failed to get fences for buffer\n");
+		goto cleanup;
+	}
+
+	fence_get(work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		fence_get(work->shared[i]);
+
 	amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
 	amdgpu_bo_unreserve(new_rbo);
 
@@ -212,7 +234,10 @@
 
 cleanup:
 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-	fence_put(work->fence);
+	fence_put(work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		fence_put(work->shared[i]);
+	kfree(work->shared);
 	kfree(work);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 56da962..0fcc0bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -44,12 +44,15 @@
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * KMS wrapper.
  * - 3.0.0 - initial driver
+ * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	0
+#define KMS_DRIVER_MINOR	1
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
@@ -61,7 +64,7 @@
 int amdgpu_hw_i2c = 0;
 int amdgpu_pcie_gen2 = -1;
 int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 10000;
+int amdgpu_lockup_timeout = 0;
 int amdgpu_dpm = -1;
 int amdgpu_smc_load_fw = 1;
 int amdgpu_aspm = -1;
@@ -73,6 +76,9 @@
 int amdgpu_vm_size = 8;
 int amdgpu_vm_block_size = -1;
 int amdgpu_exp_hw_support = 0;
+int amdgpu_enable_scheduler = 0;
+int amdgpu_sched_jobs = 16;
+int amdgpu_sched_hw_submission = 2;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -101,7 +107,7 @@
 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(msi, amdgpu_msi, int, 0444);
 
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -137,36 +143,45 @@
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
+MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
+module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
+
+MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
+module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+
+MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
+module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
-	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
 	/* Bonaire */
-	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
 	{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
 	{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
 	{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
@@ -188,39 +203,39 @@
 	{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
 	{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
 	/* Kabini */
-	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
 	/* mullins */
-	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
 	/* topaz */
 	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -238,12 +253,14 @@
 	{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	/* fiji */
+	{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
 	/* carrizo */
-	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
 
 	{0, 0, 0}
 };
@@ -279,7 +296,7 @@
 	unsigned long flags = ent->driver_data;
 	int ret;
 
-	if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
+	if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
 		DRM_INFO("This hardware requires experimental hardware support.\n"
 			 "See modparam exp_hw_support\n");
 		return -ENODEV;
@@ -527,12 +544,15 @@
 	driver->num_ioctls = amdgpu_max_kms_ioctl;
 	amdgpu_register_atpx_handler();
 
+	amdgpu_amdkfd_init();
+
 	/* let modprobe override vga console setting */
 	return drm_pci_init(driver, pdriver);
 }
 
 static void __exit amdgpu_exit(void)
 {
+	amdgpu_amdkfd_fini();
 	drm_pci_exit(driver, pdriver);
 	amdgpu_unregister_atpx_handler();
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index cceeb33..e3a4f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -31,7 +31,7 @@
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
 
-#include "amdgpu_family.h"
+#include "amd_shared.h"
 
 /* General customization:
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
deleted file mode 100644
index 0698764..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- *          Alex Deucher
- *          Jerome Glisse
- */
-
-/* this file defines the CHIP_  and family flags used in the pciids,
- * its is common between kms and non-kms because duplicating it and
- * changing one place is fail.
- */
-#ifndef AMDGPU_FAMILY_H
-#define AMDGPU_FAMILY_H
-/*
- * Supported ASIC types
- */
-enum amdgpu_asic_type {
-	CHIP_BONAIRE = 0,
-	CHIP_KAVERI,
-	CHIP_KABINI,
-	CHIP_HAWAII,
-	CHIP_MULLINS,
-	CHIP_TOPAZ,
-	CHIP_TONGA,
-	CHIP_CARRIZO,
-	CHIP_LAST,
-};
-
-/*
- * Chip flags
- */
-enum amdgpu_chip_flags {
-	AMDGPU_ASIC_MASK = 0x0000ffffUL,
-	AMDGPU_FLAGS_MASK  = 0xffff0000UL,
-	AMDGPU_IS_MOBILITY = 0x00010000UL,
-	AMDGPU_IS_APU      = 0x00020000UL,
-	AMDGPU_IS_PX       = 0x00040000UL,
-	AMDGPU_EXP_HW_SUPPORT = 0x00080000UL,
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c1645d2..8a122b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -53,9 +53,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -126,8 +126,8 @@
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = amdgpu_gem_object_create(adev, aligned_size, 0,
 				       AMDGPU_GEM_DOMAIN_VRAM,
-				       0, true,
-				       &gobj);
+				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				       true, &gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
 		       aligned_size);
@@ -179,7 +179,6 @@
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct amdgpu_bo *rbo = NULL;
-	struct device *device = &adev->pdev->dev;
 	int ret;
 	unsigned long tmp;
 
@@ -201,9 +200,9 @@
 	rbo = gem_to_amdgpu_bo(gobj);
 
 	/* okay we have an object now allocate the framebuffer */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -212,14 +211,13 @@
 	ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	fb = &rfbdev->rfb.base;
 
 	/* setup helper */
 	rfbdev->helper.fb = fb;
-	rfbdev->helper.fbdev = info;
 
 	memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
 
@@ -239,11 +237,6 @@
 	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = adev->mc.aper_size;
 
@@ -251,13 +244,7 @@
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
@@ -269,6 +256,8 @@
 	vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unref:
 	if (rbo) {
 
@@ -290,17 +279,10 @@
 
 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
 {
-	struct fb_info *info;
 	struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
 
-	if (rfbdev->helper.fbdev) {
-		info = rfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+	drm_fb_helper_release_fbi(&rfbdev->helper);
 
 	if (rfb->obj) {
 		amdgpufb_destroy_pinned_object(rfb->obj);
@@ -395,7 +377,8 @@
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
 {
 	if (adev->mode_info.rfbdev)
-		fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
+		drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
+			state);
 }
 
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index a7189a1..1be2bd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -126,7 +126,8 @@
 	(*fence)->ring = ring;
 	(*fence)->owner = owner;
 	fence_init(&(*fence)->base, &amdgpu_fence_ops,
-		&adev->fence_queue.lock, adev->fence_context + ring->idx,
+		&ring->fence_drv.fence_queue.lock,
+		adev->fence_context + ring->idx,
 		(*fence)->seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 			       (*fence)->seq,
@@ -136,38 +137,6 @@
 }
 
 /**
- * amdgpu_fence_recreate - recreate a fence from an user fence
- *
- * @ring: ring the fence is associated with
- * @owner: creator of the fence
- * @seq: user fence sequence number
- * @fence: resulting amdgpu fence object
- *
- * Recreates a fence command from the user fence sequence number (all asics).
- * Returns 0 on success, -ENOMEM on failure.
- */
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
-			  uint64_t seq, struct amdgpu_fence **fence)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	if (seq > ring->fence_drv.sync_seq[ring->idx])
-		return -EINVAL;
-
-	*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
-	if ((*fence) == NULL)
-		return -ENOMEM;
-
-	(*fence)->seq = seq;
-	(*fence)->ring = ring;
-	(*fence)->owner = owner;
-	fence_init(&(*fence)->base, &amdgpu_fence_ops,
-		&adev->fence_queue.lock, adev->fence_context + ring->idx,
-		(*fence)->seq);
-	return 0;
-}
-
-/**
  * amdgpu_fence_check_signaled - callback from fence_queue
  *
  * this function is called with fence_queue lock held, which is also used
@@ -196,9 +165,7 @@
 		else
 			FENCE_TRACE(&fence->base, "was already signaled\n");
 
-		amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
-				fence->ring->fence_drv.irq_type);
-		__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
+		__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
 		fence_put(&fence->base);
 	} else
 		FENCE_TRACE(&fence->base, "pending\n");
@@ -299,14 +266,9 @@
 		return;
 	}
 
-	if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
-		fence_drv->delayed_irq = false;
-		amdgpu_irq_update(ring->adev, fence_drv->irq_src,
-				fence_drv->irq_type);
+	if (amdgpu_fence_activity(ring)) {
+		wake_up_all(&ring->fence_drv.fence_queue);
 	}
-
-	if (amdgpu_fence_activity(ring))
-		wake_up_all(&ring->adev->fence_queue);
 	else if (amdgpu_ring_is_lockup(ring)) {
 		/* good news we believe it's a lockup */
 		dev_warn(ring->adev->dev, "GPU lockup (current fence id "
@@ -316,7 +278,7 @@
 
 		/* remember that we need an reset */
 		ring->adev->needs_reset = true;
-		wake_up_all(&ring->adev->fence_queue);
+		wake_up_all(&ring->fence_drv.fence_queue);
 	}
 	up_read(&ring->adev->exclusive_lock);
 }
@@ -332,62 +294,8 @@
  */
 void amdgpu_fence_process(struct amdgpu_ring *ring)
 {
-	uint64_t seq, last_seq, last_emitted;
-	unsigned count_loop = 0;
-	bool wake = false;
-
-	/* Note there is a scenario here for an infinite loop but it's
-	 * very unlikely to happen. For it to happen, the current polling
-	 * process need to be interrupted by another process and another
-	 * process needs to update the last_seq btw the atomic read and
-	 * xchg of the current process.
-	 *
-	 * More over for this to go in infinite loop there need to be
-	 * continuously new fence signaled ie amdgpu_fence_read needs
-	 * to return a different value each time for both the currently
-	 * polling process and the other process that xchg the last_seq
-	 * btw atomic read and xchg of the current process. And the
-	 * value the other process set as last seq must be higher than
-	 * the seq value we just read. Which means that current process
-	 * need to be interrupted after amdgpu_fence_read and before
-	 * atomic xchg.
-	 *
-	 * To be even more safe we count the number of time we loop and
-	 * we bail after 10 loop just accepting the fact that we might
-	 * have temporarly set the last_seq not to the true real last
-	 * seq but to an older one.
-	 */
-	last_seq = atomic64_read(&ring->fence_drv.last_seq);
-	do {
-		last_emitted = ring->fence_drv.sync_seq[ring->idx];
-		seq = amdgpu_fence_read(ring);
-		seq |= last_seq & 0xffffffff00000000LL;
-		if (seq < last_seq) {
-			seq &= 0xffffffff;
-			seq |= last_emitted & 0xffffffff00000000LL;
-		}
-
-		if (seq <= last_seq || seq > last_emitted) {
-			break;
-		}
-		/* If we loop over we don't want to return without
-		 * checking if a fence is signaled as it means that the
-		 * seq we just read is different from the previous on.
-		 */
-		wake = true;
-		last_seq = seq;
-		if ((count_loop++) > 10) {
-			/* We looped over too many time leave with the
-			 * fact that we might have set an older fence
-			 * seq then the current real last seq as signaled
-			 * by the hw.
-			 */
-			break;
-		}
-	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
-
-	if (wake)
-		wake_up_all(&ring->adev->fence_queue);
+	if (amdgpu_fence_activity(ring))
+		wake_up_all(&ring->fence_drv.fence_queue);
 }
 
 /**
@@ -447,284 +355,49 @@
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_ring *ring = fence->ring;
-	struct amdgpu_device *adev = ring->adev;
 
 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
 		return false;
 
-	if (down_read_trylock(&adev->exclusive_lock)) {
-		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
-			ring->fence_drv.irq_type);
-		if (amdgpu_fence_activity(ring))
-			wake_up_all_locked(&adev->fence_queue);
-
-		/* did fence get signaled after we enabled the sw irq? */
-		if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
-			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
-				ring->fence_drv.irq_type);
-			up_read(&adev->exclusive_lock);
-			return false;
-		}
-
-		up_read(&adev->exclusive_lock);
-	} else {
-		/* we're probably in a lockup, lets not fiddle too much */
-		if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
-			ring->fence_drv.irq_type))
-			ring->fence_drv.delayed_irq = true;
-		amdgpu_fence_schedule_check(ring);
-	}
-
 	fence->fence_wake.flags = 0;
 	fence->fence_wake.private = NULL;
 	fence->fence_wake.func = amdgpu_fence_check_signaled;
-	__add_wait_queue(&adev->fence_queue, &fence->fence_wake);
+	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
 	fence_get(f);
 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 	return true;
 }
 
-/**
- * amdgpu_fence_signaled - check if a fence has signaled
+/*
+ * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
+ * @ring: ring to wait on for the seq number
+ * @seq: seq number wait for
  *
- * @fence: amdgpu fence object
- *
- * Check if the requested fence has signaled (all asics).
- * Returns true if the fence has signaled or false if it has not.
+ * return value:
+ * 0: seq signaled, and gpu not hang
+ * -EDEADL: GPU hang detected
+ * -EINVAL: some paramter is not valid
  */
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
+static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
 {
-	if (!fence)
-		return true;
+	struct amdgpu_device *adev = ring->adev;
+	bool signaled = false;
 
-	if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
-		if (!fence_signal(&fence->base))
-			FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
-		return true;
-	}
+	BUG_ON(!ring);
+	if (seq > ring->fence_drv.sync_seq[ring->idx])
+		return -EINVAL;
 
-	return false;
-}
+	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
+		return 0;
 
-/**
- * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
- *
- * @adev: amdgpu device pointer
- * @seq: sequence numbers
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if any has signaled (current value is >= requested value)
- * or false if it has not. Helper function for amdgpu_fence_wait_seq.
- */
-static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
-{
-	unsigned i;
+	wait_event(ring->fence_drv.fence_queue, (
+		   (signaled = amdgpu_fence_seq_signaled(ring, seq))
+		   || adev->needs_reset));
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		if (!adev->rings[i] || !seq[i])
-			continue;
-
-		if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
-			return true;
-	}
-
-	return false;
-}
-
-/**
- * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
- *
- * @adev: amdgpu device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics).  Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number.  Helper function
- * for amdgpu_fence_wait_*().
- * Returns remaining time if the sequence number has passed, 0 when
- * the wait timeout, or an error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected.
- */
-static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
-					  u64 *target_seq, bool intr,
-					  long timeout)
-{
-	uint64_t last_seq[AMDGPU_MAX_RINGS];
-	bool signaled;
-	int i;
-	long r;
-
-	if (timeout == 0) {
-		return amdgpu_fence_any_seq_signaled(adev, target_seq);
-	}
-
-	while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
-
-		/* Save current sequence values, used to check for GPU lockups */
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_ring *ring = adev->rings[i];
-
-			if (!ring || !target_seq[i])
-				continue;
-
-			last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
-			trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
-			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
-				       ring->fence_drv.irq_type);
-		}
-
-		if (intr) {
-			r = wait_event_interruptible_timeout(adev->fence_queue, (
-				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
-				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
-		} else {
-			r = wait_event_timeout(adev->fence_queue, (
-				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
-				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
-		}
-
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_ring *ring = adev->rings[i];
-
-			if (!ring || !target_seq[i])
-				continue;
-
-			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
-				       ring->fence_drv.irq_type);
-			trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
-		}
-
-		if (unlikely(r < 0))
-			return r;
-
-		if (unlikely(!signaled)) {
-
-			if (adev->needs_reset)
-				return -EDEADLK;
-
-			/* we were interrupted for some reason and fence
-			 * isn't signaled yet, resume waiting */
-			if (r)
-				continue;
-
-			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-				struct amdgpu_ring *ring = adev->rings[i];
-
-				if (!ring || !target_seq[i])
-					continue;
-
-				if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
-					break;
-			}
-
-			if (i != AMDGPU_MAX_RINGS)
-				continue;
-
-			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-				if (!adev->rings[i] || !target_seq[i])
-					continue;
-
-				if (amdgpu_ring_is_lockup(adev->rings[i]))
-					break;
-			}
-
-			if (i < AMDGPU_MAX_RINGS) {
-				/* good news we believe it's a lockup */
-				dev_warn(adev->dev, "GPU lockup (waiting for "
-					 "0x%016llx last fence id 0x%016llx on"
-					 " ring %d)\n",
-					 target_seq[i], last_seq[i], i);
-
-				/* remember that we need an reset */
-				adev->needs_reset = true;
-				wake_up_all(&adev->fence_queue);
-				return -EDEADLK;
-			}
-
-			if (timeout < MAX_SCHEDULE_TIMEOUT) {
-				timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
-				if (timeout <= 0) {
-					return 0;
-				}
-			}
-		}
-	}
-	return timeout;
-}
-
-/**
- * amdgpu_fence_wait - wait for a fence to signal
- *
- * @fence: amdgpu fence object
- * @intr: use interruptable sleep
- *
- * Wait for the requested fence to signal (all asics).
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the fence.
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
-{
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
-
-	seq[fence->ring->idx] = fence->seq;
-	r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		return r;
-	}
-
-	r = fence_signal(&fence->base);
-	if (!r)
-		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
-	return 0;
-}
-
-/**
- * amdgpu_fence_wait_any - wait for a fence to signal on any ring
- *
- * @adev: amdgpu device pointer
- * @fences: amdgpu fence object(s)
- * @intr: use interruptable sleep
- *
- * Wait for any requested fence to signal (all asics).  Fence
- * array is indexed by ring id.  @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
- */
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
-			  struct amdgpu_fence **fences,
-			  bool intr)
-{
-	uint64_t seq[AMDGPU_MAX_RINGS];
-	unsigned i, num_rings = 0;
-	long r;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		seq[i] = 0;
-
-		if (!fences[i]) {
-			continue;
-		}
-
-		seq[i] = fences[i]->seq;
-		++num_rings;
-	}
-
-	/* nothing to wait for ? */
-	if (num_rings == 0)
-		return -ENOENT;
-
-	r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		return r;
-	}
-	return 0;
+	if (signaled)
+		return 0;
+	else
+		return -EDEADLK;
 }
 
 /**
@@ -739,19 +412,12 @@
  */
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
 {
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
+	uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
 
-	seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
-	if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
-		/* nothing to wait for, last_seq is
-		   already the last emited fence */
+	if (seq >= ring->fence_drv.sync_seq[ring->idx])
 		return -ENOENT;
-	}
-	r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0)
-		return r;
-	return 0;
+
+	return amdgpu_fence_ring_wait_seq(ring, seq);
 }
 
 /**
@@ -766,23 +432,12 @@
  */
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
-	struct amdgpu_device *adev = ring->adev;
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
+	uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
 
-	seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
-	if (!seq[ring->idx])
+	if (!seq)
 		return 0;
 
-	r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		if (r == -EDEADLK)
-			return -EDEADLK;
-
-		dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
-			ring->idx, r);
-	}
-	return 0;
+	return amdgpu_fence_ring_wait_seq(ring, seq);
 }
 
 /**
@@ -933,9 +588,12 @@
 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
 	}
 	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
-	ring->fence_drv.initialized = true;
+	amdgpu_irq_get(adev, irq_src, irq_type);
+
 	ring->fence_drv.irq_src = irq_src;
 	ring->fence_drv.irq_type = irq_type;
+	ring->fence_drv.initialized = true;
+
 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
 		 "cpu addr 0x%p\n", ring->idx,
 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@@ -966,6 +624,16 @@
 	INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
 			amdgpu_fence_check_lockup);
 	ring->fence_drv.ring = ring;
+
+	if (amdgpu_enable_scheduler) {
+		ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
+						   ring->idx,
+						   amdgpu_sched_hw_submission,
+						   (void *)ring->adev);
+		if (!ring->scheduler)
+			DRM_ERROR("Failed to create scheduler on ring %d.\n",
+				  ring->idx);
+	}
 }
 
 /**
@@ -982,7 +650,6 @@
  */
 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 {
-	init_waitqueue_head(&adev->fence_queue);
 	if (amdgpu_debugfs_fence_init(adev))
 		dev_err(adev->dev, "fence debugfs file creation failed\n");
 
@@ -1011,13 +678,78 @@
 			/* no need to trigger GPU reset as we are unloading */
 			amdgpu_fence_driver_force_completion(adev);
 		}
-		wake_up_all(&adev->fence_queue);
+		wake_up_all(&ring->fence_drv.fence_queue);
+		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+		if (ring->scheduler)
+			amd_sched_destroy(ring->scheduler);
 		ring->fence_drv.initialized = false;
 	}
 	mutex_unlock(&adev->ring_lock);
 }
 
 /**
+ * amdgpu_fence_driver_suspend - suspend the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Suspend the fence driver for all possible rings (all asics).
+ */
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	mutex_lock(&adev->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		/* wait for gpu to finish processing current batch */
+		r = amdgpu_fence_wait_empty(ring);
+		if (r) {
+			/* delay GPU reset to resume */
+			amdgpu_fence_driver_force_completion(adev);
+		}
+
+		/* disable the interrupt */
+		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+	}
+	mutex_unlock(&adev->ring_lock);
+}
+
+/**
+ * amdgpu_fence_driver_resume - resume the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Resume the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * amdgpu_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+{
+	int i;
+
+	mutex_lock(&adev->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		/* enable the interrupt */
+		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+	}
+	mutex_unlock(&adev->ring_lock);
+}
+
+/**
  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
  *
  * @adev: amdgpu device pointer
@@ -1104,6 +836,21 @@
 	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
+static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
+{
+	int idx;
+	struct fence *fence;
+
+	for (idx = 0; idx < count; ++idx) {
+		fence = fences[idx];
+		if (fence) {
+			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+				return true;
+		}
+	}
+	return false;
+}
+
 struct amdgpu_wait_cb {
 	struct fence_cb base;
 	struct task_struct *task;
@@ -1121,12 +868,48 @@
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_device *adev = fence->ring->adev;
-	struct amdgpu_wait_cb cb;
 
-	cb.task = current;
+	return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
+}
 
-	if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
-		return t;
+/**
+ * Wait the fence array with timeout
+ *
+ * @adev:     amdgpu device
+ * @array:    the fence array with amdgpu fence pointer
+ * @count:    the number of the fence array
+ * @intr:     when sleep, set the current task interruptable or not
+ * @t:        timeout to wait
+ *
+ * It will return when any fence is signaled or timeout.
+ */
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+				  struct fence **array, uint32_t count,
+				  bool intr, signed long t)
+{
+	struct amdgpu_wait_cb *cb;
+	struct fence *fence;
+	unsigned idx;
+
+	BUG_ON(!array);
+
+	cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
+	if (cb == NULL) {
+		t = -ENOMEM;
+		goto err_free_cb;
+	}
+
+	for (idx = 0; idx < count; ++idx) {
+		fence = array[idx];
+		if (fence) {
+			cb[idx].task = current;
+			if (fence_add_callback(fence,
+					&cb[idx].base, amdgpu_fence_wait_cb)) {
+				/* The fence is already signaled */
+				goto fence_rm_cb;
+			}
+		}
+	}
 
 	while (t > 0) {
 		if (intr)
@@ -1135,10 +918,10 @@
 			set_current_state(TASK_UNINTERRUPTIBLE);
 
 		/*
-		 * amdgpu_test_signaled must be called after
+		 * amdgpu_test_signaled_any must be called after
 		 * set_current_state to prevent a race with wake_up_process
 		 */
-		if (amdgpu_test_signaled(fence))
+		if (amdgpu_test_signaled_any(array, count))
 			break;
 
 		if (adev->needs_reset) {
@@ -1153,7 +936,16 @@
 	}
 
 	__set_current_state(TASK_RUNNING);
-	fence_remove_callback(f, &cb.base);
+
+fence_rm_cb:
+	for (idx = 0; idx < count; ++idx) {
+		fence = array[idx];
+		if (fence && cb[idx].base.func)
+			fence_remove_callback(fence, &cb[idx].base);
+	}
+
+err_free_cb:
+	kfree(cb);
 
 	return t;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index e02db0b..cbd3a48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -125,7 +125,8 @@
 
 	if (adev->gart.robj == NULL) {
 		r = amdgpu_bo_create(adev, adev->gart.table_size,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 				     NULL, &adev->gart.robj);
 		if (r) {
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4afc507..5839fab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -615,6 +615,7 @@
 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
 		info.domains = robj->initial_domain;
 		info.domain_flags = robj->flags;
+		amdgpu_bo_unreserve(robj);
 		if (copy_to_user(out, &info, sizeof(info)))
 			r = -EFAULT;
 		break;
@@ -622,17 +623,19 @@
 	case AMDGPU_GEM_OP_SET_PLACEMENT:
 		if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
 			r = -EPERM;
+			amdgpu_bo_unreserve(robj);
 			break;
 		}
 		robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 						      AMDGPU_GEM_DOMAIN_GTT |
 						      AMDGPU_GEM_DOMAIN_CPU);
+		amdgpu_bo_unreserve(robj);
 		break;
 	default:
+		amdgpu_bo_unreserve(robj);
 		r = -EINVAL;
 	}
 
-	amdgpu_bo_unreserve(robj);
 out:
 	drm_gem_object_unreference_unlocked(gobj);
 	return r;
@@ -653,7 +656,8 @@
 
 	r = amdgpu_gem_object_create(adev, args->size, 0,
 				     AMDGPU_GEM_DOMAIN_VRAM,
-				     0, ttm_bo_type_device,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				     ttm_bo_type_device,
 				     &gobj);
 	if (r)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bc0fac6..c439735 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -73,28 +73,12 @@
 
 		if (!vm)
 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
-		else
-			ib->gpu_addr = 0;
-
-	} else {
-		ib->sa_bo = NULL;
-		ib->ptr = NULL;
-		ib->gpu_addr = 0;
 	}
 
 	amdgpu_sync_create(&ib->sync);
 
 	ib->ring = ring;
-	ib->fence = NULL;
-	ib->user = NULL;
 	ib->vm = vm;
-	ib->gds_base = 0;
-	ib->gds_size = 0;
-	ib->gws_base = 0;
-	ib->gws_size = 0;
-	ib->oa_base = 0;
-	ib->oa_size = 0;
-	ib->flags = 0;
 
 	return 0;
 }
@@ -109,8 +93,8 @@
  */
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
 {
-	amdgpu_sync_free(adev, &ib->sync, ib->fence);
-	amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
+	amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
+	amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
 	amdgpu_fence_unref(&ib->fence);
 }
 
@@ -156,7 +140,11 @@
 		dev_err(adev->dev, "couldn't schedule ib\n");
 		return -EINVAL;
 	}
-
+	r = amdgpu_sync_wait(&ibs->sync);
+	if (r) {
+		dev_err(adev->dev, "IB sync failed (%d).\n", r);
+		return r;
+	}
 	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
 	if (r) {
 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
@@ -165,9 +153,11 @@
 
 	if (vm) {
 		/* grab a vm id if necessary */
-		struct amdgpu_fence *vm_id_fence = NULL;
-		vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
-		amdgpu_sync_fence(&ibs->sync, vm_id_fence);
+		r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
+		if (r) {
+			amdgpu_ring_unlock_undo(ring);
+			return r;
+		}
 	}
 
 	r = amdgpu_sync_rings(&ibs->sync, ring);
@@ -212,11 +202,15 @@
 		return r;
 	}
 
+	if (!amdgpu_enable_scheduler && ib->ctx)
+		ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
+						    &ib->fence->base);
+
 	/* wrap the last IB with fence */
 	if (ib->user) {
 		uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
 		addr += ib->user->offset;
-		amdgpu_ring_emit_fence(ring, addr, ib->fence->seq,
+		amdgpu_ring_emit_fence(ring, addr, ib->sequence,
 				       AMDGPU_FENCE_FLAG_64BIT);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index db5422e..5c8a803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_ih.h"
+#include "amdgpu_amdkfd.h"
 
 /**
  * amdgpu_ih_ring_alloc - allocate memory for the IH ring
@@ -97,18 +98,12 @@
 			/* add 8 bytes for the rptr/wptr shadows and
 			 * add them to the end of the ring allocation.
 			 */
-			adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL);
+			adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
+								 adev->irq.ih.ring_size + 8,
+								 &adev->irq.ih.rb_dma_addr);
 			if (adev->irq.ih.ring == NULL)
 				return -ENOMEM;
-			adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev,
-								  (void *)adev->irq.ih.ring,
-								  adev->irq.ih.ring_size,
-								  PCI_DMA_BIDIRECTIONAL);
-			if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) {
-				dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n");
-				kfree((void *)adev->irq.ih.ring);
-				return -ENOMEM;
-			}
+			memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
 			adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
 			adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
 		}
@@ -148,9 +143,9 @@
 			/* add 8 bytes for the rptr/wptr shadows and
 			 * add them to the end of the ring allocation.
 			 */
-			pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr,
-					 adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL);
-			kfree((void *)adev->irq.ih.ring);
+			pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
+					    (void *)adev->irq.ih.ring,
+					    adev->irq.ih.rb_dma_addr);
 			adev->irq.ih.ring = NULL;
 		}
 	} else {
@@ -199,6 +194,14 @@
 	rmb();
 
 	while (adev->irq.ih.rptr != wptr) {
+		u32 ring_index = adev->irq.ih.rptr >> 2;
+
+		/* Before dispatching irq to IP blocks, send it to amdkfd */
+		amdgpu_amdkfd_interrupt(adev,
+				(const void *) &adev->irq.ih.ring[ring_index]);
+
+		entry.iv_entry = (const uint32_t *)
+			&adev->irq.ih.ring[ring_index];
 		amdgpu_ih_decode_iv(adev, &entry);
 		adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index c62b09e..ba38ae6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -52,6 +52,7 @@
 	unsigned ring_id;
 	unsigned vm_id;
 	unsigned pas_id;
+	const uint32_t *iv_entry;
 };
 
 int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b4d36f0..0aba8e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -272,6 +272,11 @@
 
 		kfree(src->enabled_types);
 		src->enabled_types = NULL;
+		if (src->data) {
+			kfree(src->data);
+			kfree(src);
+			adev->irq.sources[i] = NULL;
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 8299795..17b01aef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -40,6 +40,7 @@
 	unsigned				num_types;
 	atomic_t				*enabled_types;
 	const struct amdgpu_irq_src_funcs	*funcs;
+	void *data;
 };
 
 /* provided by interrupt generating IP blocks */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 3bfe67d..2236793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -34,6 +34,7 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include "amdgpu_amdkfd.h"
 
 #if defined(CONFIG_VGA_SWITCHEROO)
 bool amdgpu_has_atpx(void);
@@ -61,6 +62,8 @@
 
 	pm_runtime_get_sync(dev->dev);
 
+	amdgpu_amdkfd_device_fini(adev);
+
 	amdgpu_acpi_fini(adev);
 
 	amdgpu_device_fini(adev);
@@ -93,8 +96,8 @@
 
 	if ((amdgpu_runtime_pm != 0) &&
 	    amdgpu_has_atpx() &&
-	    ((flags & AMDGPU_IS_APU) == 0))
-		flags |= AMDGPU_IS_PX;
+	    ((flags & AMD_IS_APU) == 0))
+		flags |= AMD_IS_PX;
 
 	/* amdgpu_device_init should report only fatal error
 	 * like memory allocation failure or iomapping failure,
@@ -118,6 +121,10 @@
 				"Error during ACPI methods call\n");
 	}
 
+	amdgpu_amdkfd_load_interface(adev);
+	amdgpu_amdkfd_device_probe(adev);
+	amdgpu_amdkfd_device_init(adev);
+
 	if (amdgpu_device_is_px(dev)) {
 		pm_runtime_use_autosuspend(dev->dev);
 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -444,11 +451,11 @@
 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
 		dev_info._pad = 0;
 		dev_info.ids_flags = 0;
-		if (adev->flags & AMDGPU_IS_APU)
+		if (adev->flags & AMD_IS_APU)
 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
 		dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
-		dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL);
+		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
 		dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
 					     AMDGPU_GPU_PAGE_SIZE;
 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
@@ -520,10 +527,7 @@
 	mutex_init(&fpriv->bo_list_lock);
 	idr_init(&fpriv->bo_list_handles);
 
-	/* init context manager */
-	mutex_init(&fpriv->ctx_mgr.lock);
-	idr_init(&fpriv->ctx_mgr.ctx_handles);
-	fpriv->ctx_mgr.adev = adev;
+	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
 
 	file_priv->driver_priv = fpriv;
 
@@ -556,6 +560,8 @@
 	if (!fpriv)
 		return;
 
+	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+
 	amdgpu_vm_fini(adev, &fpriv->vm);
 
 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -564,9 +570,6 @@
 	idr_destroy(&fpriv->bo_list_handles);
 	mutex_destroy(&fpriv->bo_list_lock);
 
-	/* release context */
-	amdgpu_ctx_fini(fpriv);
-
 	kfree(fpriv);
 	file_priv->driver_priv = NULL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8da6424..08b09d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -127,7 +127,7 @@
 			placements[c].fpfn =
 				adev->mc.visible_vram_size >> PAGE_SHIFT;
 			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-				TTM_PL_FLAG_VRAM;
+				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
 		}
 		placements[c].fpfn = 0;
 		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
@@ -223,18 +223,6 @@
 	size_t acc_size;
 	int r;
 
-	/* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
-	 * do this as a temporary workaround
-	 */
-	if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
-		if (adev->asic_type >= CHIP_TOPAZ) {
-			if (byte_align & 0x7fff)
-				byte_align = ALIGN(byte_align, 0x8000);
-			if (size & 0x7fff)
-				size = ALIGN(size, 0x8000);
-		}
-	}
-
 	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 	size = ALIGN(size, PAGE_SIZE);
 
@@ -462,7 +450,7 @@
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-	if (0 && (adev->flags & AMDGPU_IS_APU)) {
+	if (0 && (adev->flags & AMD_IS_APU)) {
 		/* Useless to evict on IGP chips */
 		return 0;
 	}
@@ -478,7 +466,6 @@
 	}
 	dev_err(adev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
-		mutex_lock(&adev->ddev->struct_mutex);
 		dev_err(adev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -486,8 +473,7 @@
 		list_del_init(&bo->list);
 		mutex_unlock(&bo->adev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&adev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
@@ -658,13 +644,13 @@
  * @shared: true if fence should be added shared
  *
  */
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared)
 {
 	struct reservation_object *resv = bo->tbo.resv;
 
 	if (shared)
-		reservation_object_add_shared_fence(resv, &fence->base);
+		reservation_object_add_shared_fence(resv, fence);
 	else
-		reservation_object_add_excl_fence(resv, &fence->base);
+		reservation_object_add_excl_fence(resv, fence);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 675bdc3..6ea18dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -161,7 +161,7 @@
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *new_mem);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared);
 
 /*
@@ -193,7 +193,7 @@
 			    unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 			      struct amdgpu_sa_bo **sa_bo,
-			      struct amdgpu_fence *fence);
+			      struct fence *fence);
 #if defined(CONFIG_DEBUG_FS)
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 					 struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ed13baa..efed115 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -82,7 +82,7 @@
 	mutex_unlock(&adev->pm.mutex);
 
 	/* Can't set dpm state when the card is off */
-	if (!(adev->flags & AMDGPU_IS_PX) ||
+	if (!(adev->flags & AMD_IS_PX) ||
 	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
 		amdgpu_pm_compute_clocks(adev);
 fail:
@@ -538,7 +538,7 @@
 		/* vce just modifies an existing state so force a change */
 		if (ps->vce_active != adev->pm.dpm.vce_active)
 			goto force;
-		if (adev->flags & AMDGPU_IS_APU) {
+		if (adev->flags & AMD_IS_APU) {
 			/* for APUs if the num crtcs changed but state is the same,
 			 * all we need to do is update the display configuration.
 			 */
@@ -580,7 +580,6 @@
 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
 	}
 
-	mutex_lock(&adev->ddev->struct_mutex);
 	mutex_lock(&adev->ring_lock);
 
 	/* update whether vce is active */
@@ -628,7 +627,6 @@
 
 done:
 	mutex_unlock(&adev->ring_lock);
-	mutex_unlock(&adev->ddev->struct_mutex);
 }
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 855e219..9bec914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -131,6 +131,21 @@
 	return 0;
 }
 
+/** amdgpu_ring_insert_nop - insert NOP packets
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @count: the number of NOP packets to insert
+ *
+ * This is the generic insert_nop function for rings except SDMA
+ */
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * amdgpu_ring_commit - tell the GPU to execute the new
  * commands on the ring buffer
@@ -143,10 +158,13 @@
  */
 void amdgpu_ring_commit(struct amdgpu_ring *ring)
 {
+	uint32_t count;
+
 	/* We pad to match fetch size */
-	while (ring->wptr & ring->align_mask) {
-		amdgpu_ring_write(ring, ring->nop);
-	}
+	count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
+	count %= ring->align_mask + 1;
+	ring->funcs->insert_nop(ring, count);
+
 	mb();
 	amdgpu_ring_set_wptr(ring);
 }
@@ -342,6 +360,8 @@
 		amdgpu_fence_driver_init_ring(ring);
 	}
 
+	init_waitqueue_head(&ring->fence_drv.fence_queue);
+
 	r = amdgpu_wb_get(adev, &ring->rptr_offs);
 	if (r) {
 		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -367,7 +387,7 @@
 	}
 	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
 	ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
+	spin_lock_init(&ring->fence_lock);
 	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
 	if (r) {
 		dev_err(adev->dev, "failed initializing fences (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index eb20987..74dad27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -139,6 +139,20 @@
 	return r;
 }
 
+static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
+{
+	struct amdgpu_fence *a_fence;
+	struct amd_sched_fence *s_fence;
+
+	s_fence = to_amd_sched_fence(f);
+	if (s_fence)
+		return s_fence->scheduler->ring_id;
+	a_fence = to_amdgpu_fence(f);
+	if (a_fence)
+		return a_fence->ring->idx;
+	return 0;
+}
+
 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 {
 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
@@ -147,7 +161,7 @@
 	}
 	list_del_init(&sa_bo->olist);
 	list_del_init(&sa_bo->flist);
-	amdgpu_fence_unref(&sa_bo->fence);
+	fence_put(sa_bo->fence);
 	kfree(sa_bo);
 }
 
@@ -160,7 +174,8 @@
 
 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
-		if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) {
+		if (sa_bo->fence == NULL ||
+		    !fence_is_signaled(sa_bo->fence)) {
 			return;
 		}
 		amdgpu_sa_bo_remove_locked(sa_bo);
@@ -245,7 +260,7 @@
 }
 
 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
-				   struct amdgpu_fence **fences,
+				   struct fence **fences,
 				   unsigned *tries)
 {
 	struct amdgpu_sa_bo *best_bo = NULL;
@@ -274,7 +289,7 @@
 		sa_bo = list_first_entry(&sa_manager->flist[i],
 					 struct amdgpu_sa_bo, flist);
 
-		if (!amdgpu_fence_signaled(sa_bo->fence)) {
+		if (!fence_is_signaled(sa_bo->fence)) {
 			fences[i] = sa_bo->fence;
 			continue;
 		}
@@ -298,7 +313,8 @@
 	}
 
 	if (best_bo) {
-		++tries[best_bo->fence->ring->idx];
+		uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
+		++tries[idx];
 		sa_manager->hole = best_bo->olist.prev;
 
 		/* we knew that this one is signaled,
@@ -314,9 +330,10 @@
 		     struct amdgpu_sa_bo **sa_bo,
 		     unsigned size, unsigned align)
 {
-	struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
+	struct fence *fences[AMDGPU_MAX_RINGS];
 	unsigned tries[AMDGPU_MAX_RINGS];
 	int i, r;
+	signed long t;
 
 	BUG_ON(align > sa_manager->align);
 	BUG_ON(size > sa_manager->size);
@@ -350,7 +367,9 @@
 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 
 		spin_unlock(&sa_manager->wq.lock);
-		r = amdgpu_fence_wait_any(adev, fences, false);
+		t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
+					  false, MAX_SCHEDULE_TIMEOUT);
+		r = (t > 0) ? 0 : t;
 		spin_lock(&sa_manager->wq.lock);
 		/* if we have nothing to wait for block */
 		if (r == -ENOENT) {
@@ -369,7 +388,7 @@
 }
 
 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
-		       struct amdgpu_fence *fence)
+		       struct fence *fence)
 {
 	struct amdgpu_sa_manager *sa_manager;
 
@@ -379,10 +398,11 @@
 
 	sa_manager = (*sa_bo)->manager;
 	spin_lock(&sa_manager->wq.lock);
-	if (fence && !amdgpu_fence_signaled(fence)) {
-		(*sa_bo)->fence = amdgpu_fence_ref(fence);
-		list_add_tail(&(*sa_bo)->flist,
-			      &sa_manager->flist[fence->ring->idx]);
+	if (fence && !fence_is_signaled(fence)) {
+		uint32_t idx;
+		(*sa_bo)->fence = fence_get(fence);
+		idx = amdgpu_sa_get_ring_from_fence(fence);
+		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 	} else {
 		amdgpu_sa_bo_remove_locked(*sa_bo);
 	}
@@ -409,8 +429,16 @@
 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
 			   soffset, eoffset, eoffset - soffset);
 		if (i->fence) {
-			seq_printf(m, " protected by 0x%016llx on ring %d",
-				   i->fence->seq, i->fence->ring->idx);
+			struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
+			struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
+			if (a_fence)
+				seq_printf(m, " protected by 0x%016llx on ring %d",
+					   a_fence->seq, a_fence->ring->idx);
+			if (s_fence)
+				seq_printf(m, " protected by 0x%016x on ring %d",
+					   s_fence->base.seqno,
+					   s_fence->scheduler->ring_id);
+
 		}
 		seq_printf(m, "\n");
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 0000000..de98fbd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
+	return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+}
+
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job;
+	struct amdgpu_fence *fence;
+	int r;
+
+	if (!job) {
+		DRM_ERROR("job is null\n");
+		return NULL;
+	}
+	sched_job = (struct amdgpu_job *)job;
+	mutex_lock(&sched_job->job_lock);
+	r = amdgpu_ib_schedule(sched_job->adev,
+			       sched_job->num_ibs,
+			       sched_job->ibs,
+			       sched_job->base.owner);
+	if (r)
+		goto err;
+	fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
+
+	if (sched_job->free_job)
+		sched_job->free_job(sched_job);
+
+	mutex_unlock(&sched_job->job_lock);
+	return &fence->base;
+
+err:
+	DRM_ERROR("Run job error\n");
+	mutex_unlock(&sched_job->job_lock);
+	job->sched->ops->process_job(job);
+	return NULL;
+}
+
+static void amdgpu_sched_process_job(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job;
+
+	if (!job) {
+		DRM_ERROR("job is null\n");
+		return;
+	}
+	sched_job = (struct amdgpu_job *)job;
+	/* after processing job, free memory */
+	fence_put(&sched_job->base.s_fence->base);
+	kfree(sched_job);
+}
+
+struct amd_sched_backend_ops amdgpu_sched_ops = {
+	.dependency = amdgpu_sched_dependency,
+	.run_job = amdgpu_sched_run_job,
+	.process_job = amdgpu_sched_process_job
+};
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+					 struct amdgpu_ring *ring,
+					 struct amdgpu_ib *ibs,
+					 unsigned num_ibs,
+					 int (*free_job)(struct amdgpu_job *),
+					 void *owner,
+					 struct fence **f)
+{
+	int r = 0;
+	if (amdgpu_enable_scheduler) {
+		struct amdgpu_job *job =
+			kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+		if (!job)
+			return -ENOMEM;
+		job->base.sched = ring->scheduler;
+		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+		job->adev = adev;
+		job->ibs = ibs;
+		job->num_ibs = num_ibs;
+		job->base.owner = owner;
+		mutex_init(&job->job_lock);
+		job->free_job = free_job;
+		mutex_lock(&job->job_lock);
+		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		if (r) {
+			mutex_unlock(&job->job_lock);
+			kfree(job);
+			return r;
+		}
+		*f = fence_get(&job->base.s_fence->base);
+		mutex_unlock(&job->job_lock);
+	} else {
+		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
+		if (r)
+			return r;
+		*f = fence_get(&ibs[num_ibs - 1].fence->base);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index d6d41a4..ff3ca52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -87,7 +87,7 @@
 
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
 			   struct amdgpu_semaphore **semaphore,
-			   struct amdgpu_fence *fence)
+			   struct fence *fence)
 {
 	if (semaphore == NULL || *semaphore == NULL) {
 		return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 21accbd..068aeaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -32,6 +32,11 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+struct amdgpu_sync_entry {
+	struct hlist_node	node;
+	struct fence		*fence;
+};
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -49,36 +54,104 @@
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		sync->sync_to[i] = NULL;
 
+	hash_init(sync->fences);
 	sync->last_vm_update = NULL;
 }
 
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+	if (a_fence)
+		return a_fence->ring->adev == adev;
+	if (s_fence)
+		return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+	return false;
+}
+
+static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+	if (s_fence)
+		return s_fence->owner == owner;
+	if (a_fence)
+		return a_fence->owner == owner;
+	return false;
+}
+
 /**
- * amdgpu_sync_fence - use the semaphore to sync to a fence
+ * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
  * @fence: fence to sync to
  *
- * Sync to the fence using the semaphore objects
  */
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-		       struct amdgpu_fence *fence)
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct fence *f)
 {
+	struct amdgpu_sync_entry *e;
+	struct amdgpu_fence *fence;
 	struct amdgpu_fence *other;
+	struct fence *tmp, *later;
 
-	if (!fence)
-		return;
+	if (!f)
+		return 0;
+
+	if (amdgpu_sync_same_dev(adev, f) &&
+	    amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
+		if (sync->last_vm_update) {
+			tmp = sync->last_vm_update;
+			BUG_ON(f->context != tmp->context);
+			later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
+			sync->last_vm_update = fence_get(later);
+			fence_put(tmp);
+		} else
+			sync->last_vm_update = fence_get(f);
+	}
+
+	fence = to_amdgpu_fence(f);
+	if (!fence || fence->ring->adev != adev) {
+		hash_for_each_possible(sync->fences, e, node, f->context) {
+			struct fence *new;
+			if (unlikely(e->fence->context != f->context))
+				continue;
+			new = fence_get(fence_later(e->fence, f));
+			if (new) {
+				fence_put(e->fence);
+				e->fence = new;
+			}
+			return 0;
+		}
+
+		e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+		if (!e)
+			return -ENOMEM;
+
+		hash_add(sync->fences, &e->node, f->context);
+		e->fence = fence_get(f);
+		return 0;
+	}
 
 	other = sync->sync_to[fence->ring->idx];
 	sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
 		amdgpu_fence_later(fence, other));
 	amdgpu_fence_unref(&other);
 
-	if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
-		other = sync->last_vm_update;
-		sync->last_vm_update = amdgpu_fence_ref(
-			amdgpu_fence_later(fence, other));
-		amdgpu_fence_unref(&other);
-	}
+	return 0;
+}
+
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+	if (s_fence)
+		return s_fence->owner;
+	else if (a_fence)
+		return a_fence->owner;
+	return AMDGPU_FENCE_OWNER_UNDEFINED;
 }
 
 /**
@@ -97,7 +170,7 @@
 {
 	struct reservation_object_list *flist;
 	struct fence *f;
-	struct amdgpu_fence *fence;
+	void *fence_owner;
 	unsigned i;
 	int r = 0;
 
@@ -106,11 +179,7 @@
 
 	/* always sync to the exclusive fence */
 	f = reservation_object_get_excl(resv);
-	fence = f ? to_amdgpu_fence(f) : NULL;
-	if (fence && fence->ring->adev == adev)
-		amdgpu_sync_fence(sync, fence);
-	else if (f)
-		r = fence_wait(f, true);
+	r = amdgpu_sync_fence(adev, sync, f);
 
 	flist = reservation_object_get_list(resv);
 	if (!flist || r)
@@ -119,20 +188,72 @@
 	for (i = 0; i < flist->shared_count; ++i) {
 		f = rcu_dereference_protected(flist->shared[i],
 					      reservation_object_held(resv));
-		fence = f ? to_amdgpu_fence(f) : NULL;
-		if (fence && fence->ring->adev == adev) {
-			if (fence->owner != owner ||
-			    fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
-				amdgpu_sync_fence(sync, fence);
-		} else if (f) {
-			r = fence_wait(f, true);
-			if (r)
-				break;
+		if (amdgpu_sync_same_dev(adev, f)) {
+			/* VM updates are only interesting
+			 * for other VM updates and moves.
+			 */
+			fence_owner = amdgpu_sync_get_owner(f);
+			if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
+			    (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
+			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
+			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+				continue;
+
+			/* Ignore fence from the same owner as
+			 * long as it isn't undefined.
+			 */
+			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+			    fence_owner == owner)
+				continue;
 		}
+
+		r = amdgpu_sync_fence(adev, sync, f);
+		if (r)
+			break;
 	}
 	return r;
 }
 
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
+	struct fence *f;
+	int i;
+
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+		f = e->fence;
+
+		hash_del(&e->node);
+		kfree(e);
+
+		if (!fence_is_signaled(f))
+			return f;
+
+		fence_put(f);
+	}
+	return NULL;
+}
+
+int amdgpu_sync_wait(struct amdgpu_sync *sync)
+{
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
+	int i, r;
+
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+		r = fence_wait(e->fence, false);
+		if (r)
+			return r;
+
+		hash_del(&e->node);
+		fence_put(e->fence);
+		kfree(e);
+	}
+	return 0;
+}
+
 /**
  * amdgpu_sync_rings - sync ring to all registered fences
  *
@@ -164,9 +285,9 @@
 			return -EINVAL;
 		}
 
-		if (count >= AMDGPU_NUM_SYNCS) {
+		if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
 			/* not enough room, wait manually */
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -186,7 +307,7 @@
 		if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
 			/* signaling wasn't successful wait manually */
 			amdgpu_ring_undo(other);
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -196,7 +317,7 @@
 		if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
 			/* waiting wasn't successful wait manually */
 			amdgpu_ring_undo(other);
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -220,15 +341,23 @@
  */
 void amdgpu_sync_free(struct amdgpu_device *adev,
 		      struct amdgpu_sync *sync,
-		      struct amdgpu_fence *fence)
+		      struct fence *fence)
 {
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
 	unsigned i;
 
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+		hash_del(&e->node);
+		fence_put(e->fence);
+		kfree(e);
+	}
+
 	for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
 		amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		amdgpu_fence_unref(&sync->sync_to[i]);
 
-	amdgpu_fence_unref(&sync->last_vm_update);
+	fence_put(sync->last_vm_update);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index df20299..f80b1a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -77,7 +77,7 @@
 		void *gtt_map, *vram_map;
 		void **gtt_start, **gtt_end;
 		void **vram_start, **vram_end;
-		struct amdgpu_fence *fence = NULL;
+		struct fence *fence = NULL;
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
@@ -116,13 +116,13 @@
 			goto out_lclean_unpin;
 		}
 
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 
 		r = amdgpu_bo_kmap(vram_obj, &vram_map);
 		if (r) {
@@ -161,13 +161,13 @@
 			goto out_lclean_unpin;
 		}
 
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 
 		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
@@ -214,7 +214,7 @@
 			amdgpu_bo_unref(&gtt_obj[i]);
 		}
 		if (fence)
-			amdgpu_fence_unref(&fence);
+			fence_put(fence);
 		break;
 	}
 
@@ -238,7 +238,7 @@
 
 static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
 					     struct amdgpu_ring *ring,
-					     struct amdgpu_fence **fence)
+					     struct fence **fence)
 {
 	uint32_t handle = ring->idx ^ 0xdeafbeef;
 	int r;
@@ -269,15 +269,16 @@
 			DRM_ERROR("Failed to get dummy destroy msg\n");
 			return r;
 		}
-
 	} else {
+		struct amdgpu_fence *a_fence = NULL;
 		r = amdgpu_ring_lock(ring, 64);
 		if (r) {
 			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
 			return r;
 		}
-		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
 		amdgpu_ring_unlock_commit(ring);
+		*fence = &a_fence->base;
 	}
 	return 0;
 }
@@ -286,7 +287,7 @@
 			   struct amdgpu_ring *ringA,
 			   struct amdgpu_ring *ringB)
 {
-	struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
+	struct fence *fence1 = NULL, *fence2 = NULL;
 	struct amdgpu_semaphore *semaphore = NULL;
 	int r;
 
@@ -322,7 +323,7 @@
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fence1)) {
+	if (fence_is_signaled(fence1)) {
 		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -335,7 +336,7 @@
 	amdgpu_semaphore_emit_signal(ringB, semaphore);
 	amdgpu_ring_unlock_commit(ringB);
 
-	r = amdgpu_fence_wait(fence1, false);
+	r = fence_wait(fence1, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence 1\n");
 		goto out_cleanup;
@@ -343,7 +344,7 @@
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fence2)) {
+	if (fence_is_signaled(fence2)) {
 		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -356,7 +357,7 @@
 	amdgpu_semaphore_emit_signal(ringB, semaphore);
 	amdgpu_ring_unlock_commit(ringB);
 
-	r = amdgpu_fence_wait(fence2, false);
+	r = fence_wait(fence2, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence 1\n");
 		goto out_cleanup;
@@ -366,10 +367,10 @@
 	amdgpu_semaphore_free(adev, &semaphore, NULL);
 
 	if (fence1)
-		amdgpu_fence_unref(&fence1);
+		fence_put(fence1);
 
 	if (fence2)
-		amdgpu_fence_unref(&fence2);
+		fence_put(fence2);
 
 	if (r)
 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
@@ -380,7 +381,7 @@
 			    struct amdgpu_ring *ringB,
 			    struct amdgpu_ring *ringC)
 {
-	struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
+	struct fence *fenceA = NULL, *fenceB = NULL;
 	struct amdgpu_semaphore *semaphore = NULL;
 	bool sigA, sigB;
 	int i, r;
@@ -416,11 +417,11 @@
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fenceA)) {
+	if (fence_is_signaled(fenceA)) {
 		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
-	if (amdgpu_fence_signaled(fenceB)) {
+	if (fence_is_signaled(fenceB)) {
 		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -435,8 +436,8 @@
 
 	for (i = 0; i < 30; ++i) {
 		mdelay(100);
-		sigA = amdgpu_fence_signaled(fenceA);
-		sigB = amdgpu_fence_signaled(fenceB);
+		sigA = fence_is_signaled(fenceA);
+		sigB = fence_is_signaled(fenceB);
 		if (sigA || sigB)
 			break;
 	}
@@ -461,12 +462,12 @@
 
 	mdelay(1000);
 
-	r = amdgpu_fence_wait(fenceA, false);
+	r = fence_wait(fenceA, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence A\n");
 		goto out_cleanup;
 	}
-	r = amdgpu_fence_wait(fenceB, false);
+	r = fence_wait(fenceB, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence B\n");
 		goto out_cleanup;
@@ -476,10 +477,10 @@
 	amdgpu_semaphore_free(adev, &semaphore, NULL);
 
 	if (fenceA)
-		amdgpu_fence_unref(&fenceA);
+		fence_put(fenceA);
 
 	if (fenceB)
-		amdgpu_fence_unref(&fenceB);
+		fence_put(fenceB);
 
 	if (r)
 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dd3415d..b5abd5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -228,7 +228,7 @@
 	struct amdgpu_device *adev;
 	struct amdgpu_ring *ring;
 	uint64_t old_start, new_start;
-	struct amdgpu_fence *fence;
+	struct fence *fence;
 	int r;
 
 	adev = amdgpu_get_adev(bo->bdev);
@@ -269,9 +269,9 @@
 			       new_mem->num_pages * PAGE_SIZE, /* bytes */
 			       bo->resv, &fence);
 	/* FIXME: handle copy error */
-	r = ttm_bo_move_accel_cleanup(bo, &fence->base,
+	r = ttm_bo_move_accel_cleanup(bo, fence,
 				      evict, no_wait_gpu, new_mem);
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
 
@@ -859,7 +859,8 @@
 	amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
 	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 			     NULL, &adev->stollen_vga_memory);
 	if (r) {
 		return r;
@@ -987,46 +988,48 @@
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct amdgpu_fence **fence)
+		       struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_sync sync;
 	uint32_t max_bytes;
 	unsigned num_loops, num_dw;
+	struct amdgpu_ib *ib;
 	unsigned i;
 	int r;
 
-	/* sync other rings */
-	amdgpu_sync_create(&sync);
-	if (resv) {
-		r = amdgpu_sync_resv(adev, &sync, resv, false);
-		if (r) {
-			DRM_ERROR("sync failed (%d).\n", r);
-			amdgpu_sync_free(adev, &sync, NULL);
-			return r;
-		}
-	}
-
 	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
 	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
 	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
 
-	/* for fence and sync */
-	num_dw += 64 + AMDGPU_NUM_SYNCS * 8;
+	/* for IB padding */
+	while (num_dw & 0x7)
+		num_dw++;
 
-	r = amdgpu_ring_lock(ring, num_dw);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib);
 	if (r) {
-		DRM_ERROR("ring lock failed (%d).\n", r);
-		amdgpu_sync_free(adev, &sync, NULL);
+		kfree(ib);
 		return r;
 	}
 
-	amdgpu_sync_rings(&sync, ring);
+	ib->length_dw = 0;
+
+	if (resv) {
+		r = amdgpu_sync_resv(adev, &ib->sync, resv,
+				     AMDGPU_FENCE_OWNER_UNDEFINED);
+		if (r) {
+			DRM_ERROR("sync failed (%d).\n", r);
+			goto error_free;
+		}
+	}
 
 	for (i = 0; i < num_loops; i++) {
 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
-		amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset,
+		amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
 					cur_size_in_bytes);
 
 		src_offset += cur_size_in_bytes;
@@ -1034,17 +1037,24 @@
 		byte_count -= cur_size_in_bytes;
 	}
 
-	r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence);
-	if (r) {
-		amdgpu_ring_unlock_undo(ring);
-		amdgpu_sync_free(adev, &sync, NULL);
-		return r;
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > num_dw);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_MOVE,
+						 fence);
+	if (r)
+		goto error_free;
+
+	if (!amdgpu_enable_scheduler) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
 	}
-
-	amdgpu_ring_unlock_commit(ring);
-	amdgpu_sync_free(adev, &sync, *fence);
-
 	return 0;
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index f5c2255..2cf6c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -52,6 +52,7 @@
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
+#define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -81,6 +82,7 @@
 #endif
 MODULE_FIRMWARE(FIRMWARE_TONGA);
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
 
 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -116,6 +118,9 @@
 	case CHIP_TONGA:
 		fw_name = FIRMWARE_TONGA;
 		break;
+	case CHIP_FIJI:
+		fw_name = FIRMWARE_FIJI;
+		break;
 	case CHIP_CARRIZO:
 		fw_name = FIRMWARE_CARRIZO;
 		break;
@@ -149,7 +154,9 @@
 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &adev->uvd.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 		return r;
@@ -216,31 +223,32 @@
 
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
-	unsigned size;
-	void *ptr;
-	const struct common_firmware_header *hdr;
-	int i;
+	struct amdgpu_ring *ring = &adev->uvd.ring;
+	int i, r;
 
 	if (adev->uvd.vcpu_bo == NULL)
 		return 0;
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
-		if (atomic_read(&adev->uvd.handles[i]))
-			break;
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+		if (handle != 0) {
+			struct fence *fence;
 
-	if (i == AMDGPU_MAX_UVD_HANDLES)
-		return 0;
+			amdgpu_uvd_note_usage(adev);
 
-	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+			if (r) {
+				DRM_ERROR("Error destroying UVD (%d)!\n", r);
+				continue;
+			}
 
-	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-	size -= le32_to_cpu(hdr->ucode_size_bytes);
+			fence_wait(fence, false);
+			fence_put(fence);
 
-	ptr = adev->uvd.cpu_addr;
-	ptr += le32_to_cpu(hdr->ucode_size_bytes);
-
-	adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-	memcpy(adev->uvd.saved_bo, ptr, size);
+			adev->uvd.filp[i] = NULL;
+			atomic_set(&adev->uvd.handles[i], 0);
+		}
+	}
 
 	return 0;
 }
@@ -265,12 +273,7 @@
 	ptr = adev->uvd.cpu_addr;
 	ptr += le32_to_cpu(hdr->ucode_size_bytes);
 
-	if (adev->uvd.saved_bo != NULL) {
-		memcpy(ptr, adev->uvd.saved_bo, size);
-		kfree(adev->uvd.saved_bo);
-		adev->uvd.saved_bo = NULL;
-	} else
-		memset(ptr, 0, size);
+	memset(ptr, 0, size);
 
 	return 0;
 }
@@ -283,7 +286,7 @@
 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 		if (handle != 0 && adev->uvd.filp[i] == filp) {
-			struct amdgpu_fence *fence;
+			struct fence *fence;
 
 			amdgpu_uvd_note_usage(adev);
 
@@ -293,8 +296,8 @@
 				continue;
 			}
 
-			amdgpu_fence_wait(fence, false);
-			amdgpu_fence_unref(&fence);
+			fence_wait(fence, false);
+			fence_put(fence);
 
 			adev->uvd.filp[i] = NULL;
 			atomic_set(&adev->uvd.handles[i], 0);
@@ -374,7 +377,8 @@
 	unsigned height_in_mb = ALIGN(height / 16, 2);
 	unsigned fs_in_mb = width_in_mb * height_in_mb;
 
-	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
+	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+	unsigned min_ctx_size = 0;
 
 	image_size = width * height;
 	image_size += image_size / 2;
@@ -507,28 +511,25 @@
 {
 	struct amdgpu_device *adev = ctx->parser->adev;
 	int32_t *msg, msg_type, handle;
-	struct fence *f;
 	void *ptr;
-
-	int i, r;
+	long r;
+	int i;
 
 	if (offset & 0x3F) {
 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
 		return -EINVAL;
 	}
 
-	f = reservation_object_get_excl(bo->tbo.resv);
-	if (f) {
-		r = amdgpu_fence_wait((struct amdgpu_fence *)f, false);
-		if (r) {
-			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
-			return r;
-		}
+	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+						MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
+		return r;
 	}
 
 	r = amdgpu_bo_kmap(bo, &ptr);
 	if (r) {
-		DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
 		return r;
 	}
 
@@ -803,14 +804,24 @@
 	return 0;
 }
 
+static int amdgpu_uvd_free_job(
+	struct amdgpu_job *sched_job)
+{
+	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
 			       struct amdgpu_bo *bo,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t addr;
 	int i, r;
 
@@ -832,34 +843,49 @@
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 	if (r)
 		goto err;
-
-	r = amdgpu_ib_get(ring, NULL, 64, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib) {
+		r = -ENOMEM;
 		goto err;
+	}
+	r = amdgpu_ib_get(ring, NULL, 64, ib);
+	if (r)
+		goto err1;
 
 	addr = amdgpu_bo_gpu_offset(bo);
-	ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
-	ib.ptr[1] = addr;
-	ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
-	ib.ptr[3] = addr >> 32;
-	ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
-	ib.ptr[5] = 0;
+	ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
+	ib->ptr[1] = addr;
+	ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
+	ib->ptr[3] = addr >> 32;
+	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
+	ib->ptr[5] = 0;
 	for (i = 6; i < 16; ++i)
-		ib.ptr[i] = PACKET2(0);
-	ib.length_dw = 16;
+		ib->ptr[i] = PACKET2(0);
+	ib->length_dw = 16;
 
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_uvd_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
 	if (r)
-		goto err;
-	ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
+		goto err2;
+
+	ttm_eu_fence_buffer_objects(&ticket, &head, f);
 
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
+		*fence = fence_get(f);
 	amdgpu_bo_unref(&bo);
-	return 0;
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
 
+	amdgpu_ib_free(ring->adev, ib);
+	kfree(ib);
+	return 0;
+err2:
+	amdgpu_ib_free(ring->adev, ib);
+err1:
+	kfree(ib);
 err:
 	ttm_eu_backoff_reservation(&ticket, &head);
 	return r;
@@ -869,7 +895,7 @@
    crash the vcpu so just try to emmit a dummy create/destroy msg to
    avoid this */
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence)
+			      struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -877,7 +903,9 @@
 	int r, i;
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &bo);
 	if (r)
 		return r;
 
@@ -916,7 +944,7 @@
 }
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -924,7 +952,9 @@
 	int r, i;
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &bo);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 2255aa7..1724c2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence);
+			      struct fence **fence);
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence);
+			       struct fence **fence);
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
 			     struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d3ca730..3cab96c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -48,6 +48,7 @@
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
+#define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
 
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -58,6 +59,7 @@
 #endif
 MODULE_FIRMWARE(FIRMWARE_TONGA);
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
@@ -101,6 +103,9 @@
 	case CHIP_CARRIZO:
 		fw_name = FIRMWARE_CARRIZO;
 		break;
+	case CHIP_FIJI:
+		fw_name = FIRMWARE_FIJI;
+		break;
 
 	default:
 		return -EINVAL;
@@ -136,7 +141,9 @@
 	/* allocate firmware, stack and heap BO */
 
 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &adev->vce.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
 		return r;
@@ -334,6 +341,14 @@
 	}
 }
 
+static int amdgpu_vce_free_job(
+	struct amdgpu_job *sched_job)
+{
+	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 /**
  * amdgpu_vce_get_create_msg - generate a VCE create msg
  *
@@ -345,59 +360,69 @@
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence)
+			      struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+		kfree(ib);
 		return r;
 	}
 
-	dummy = ib.gpu_addr + 1024;
+	dummy = ib->gpu_addr + 1024;
 
 	/* stitch together an VCE create msg */
-	ib.length_dw = 0;
-	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
-	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
-	ib.ptr[ib.length_dw++] = handle;
+	ib->length_dw = 0;
+	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+	ib->ptr[ib->length_dw++] = handle;
 
-	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
-	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
-	ib.ptr[ib.length_dw++] = 0x00000000;
-	ib.ptr[ib.length_dw++] = 0x00000042;
-	ib.ptr[ib.length_dw++] = 0x0000000a;
-	ib.ptr[ib.length_dw++] = 0x00000001;
-	ib.ptr[ib.length_dw++] = 0x00000080;
-	ib.ptr[ib.length_dw++] = 0x00000060;
-	ib.ptr[ib.length_dw++] = 0x00000100;
-	ib.ptr[ib.length_dw++] = 0x00000100;
-	ib.ptr[ib.length_dw++] = 0x0000000c;
-	ib.ptr[ib.length_dw++] = 0x00000000;
+	ib->ptr[ib->length_dw++] = 0x00000030; /* len */
+	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
+	ib->ptr[ib->length_dw++] = 0x00000000;
+	ib->ptr[ib->length_dw++] = 0x00000042;
+	ib->ptr[ib->length_dw++] = 0x0000000a;
+	ib->ptr[ib->length_dw++] = 0x00000001;
+	ib->ptr[ib->length_dw++] = 0x00000080;
+	ib->ptr[ib->length_dw++] = 0x00000060;
+	ib->ptr[ib->length_dw++] = 0x00000100;
+	ib->ptr[ib->length_dw++] = 0x00000100;
+	ib->ptr[ib->length_dw++] = 0x0000000c;
+	ib->ptr[ib->length_dw++] = 0x00000000;
 
-	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
-	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
-	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
-	ib.ptr[ib.length_dw++] = dummy;
-	ib.ptr[ib.length_dw++] = 0x00000001;
+	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+	ib->ptr[ib->length_dw++] = dummy;
+	ib->ptr[ib->length_dw++] = 0x00000001;
 
-	for (i = ib.length_dw; i < ib_size_dw; ++i)
-		ib.ptr[i] = 0x0;
+	for (i = ib->length_dw; i < ib_size_dw; ++i)
+		ib->ptr[i] = 0x0;
 
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-	}
-
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vce_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err;
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
-
+		*fence = fence_get(f);
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
+err:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 	return r;
 }
 
@@ -412,49 +437,59 @@
  * Close up a stream for HW test or if userspace failed to do so
  */
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
 	if (r) {
+		kfree(ib);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		return r;
 	}
 
-	dummy = ib.gpu_addr + 1024;
+	dummy = ib->gpu_addr + 1024;
 
 	/* stitch together an VCE destroy msg */
-	ib.length_dw = 0;
-	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
-	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
-	ib.ptr[ib.length_dw++] = handle;
+	ib->length_dw = 0;
+	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+	ib->ptr[ib->length_dw++] = handle;
 
-	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
-	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
-	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
-	ib.ptr[ib.length_dw++] = dummy;
-	ib.ptr[ib.length_dw++] = 0x00000001;
+	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+	ib->ptr[ib->length_dw++] = dummy;
+	ib->ptr[ib->length_dw++] = 0x00000001;
 
-	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
-	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
+	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
+	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
 
-	for (i = ib.length_dw; i < ib_size_dw; ++i)
-		ib.ptr[i] = 0x0;
-
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-	}
-
+	for (i = ib->length_dw; i < ib_size_dw; ++i)
+		ib->ptr[i] = 0x0;
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vce_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err;
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
-
+		*fence = fence_get(f);
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
+err:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 	return r;
 }
 
@@ -800,9 +835,13 @@
  */
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
 {
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
+	/* skip vce ring1 ib test for now, since it's not reliable */
+	if (ring == &ring->adev->vce.ring[1])
+		return 0;
+
 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
@@ -815,13 +854,13 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 	} else {
 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
 	}
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 7ccdb59..ba2da8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,9 +29,9 @@
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence);
+			      struct fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence);
+			       struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9a4e3b6..f68b7cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -127,16 +127,16 @@
 /**
  * amdgpu_vm_grab_id - allocate the next free VMID
  *
- * @ring: ring we want to submit job to
  * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
  *
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
  *
- * Global and local mutex must be locked!
+ * Global mutex must be locked!
  */
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
-				       struct amdgpu_vm *vm)
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync)
 {
 	struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -148,7 +148,7 @@
 	/* check if the id is still valid */
 	if (vm_id->id && vm_id->last_id_use &&
 	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
-		return NULL;
+		return 0;
 
 	/* we definately need to flush */
 	vm_id->pd_gpu_addr = ~0ll;
@@ -161,7 +161,7 @@
 			/* found a free one */
 			vm_id->id = i;
 			trace_amdgpu_vm_grab_id(i, ring->idx);
-			return NULL;
+			return 0;
 		}
 
 		if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
@@ -172,15 +172,19 @@
 
 	for (i = 0; i < 2; ++i) {
 		if (choices[i]) {
+			struct amdgpu_fence *fence;
+
+			fence  = adev->vm_manager.active[choices[i]];
 			vm_id->id = choices[i];
+
 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
-			return adev->vm_manager.active[choices[i]];
+			return amdgpu_sync_fence(ring->adev, sync, &fence->base);
 		}
 	}
 
 	/* should never happen */
 	BUG();
-	return NULL;
+	return -EINVAL;
 }
 
 /**
@@ -196,17 +200,29 @@
  */
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
 		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *updates)
+		     struct fence *updates)
 {
 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+	struct fence *flushed_updates = vm_id->flushed_updates;
+	bool is_earlier = false;
 
-	if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
-	    amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) {
+	if (flushed_updates && updates) {
+		BUG_ON(flushed_updates->context != updates->context);
+		is_earlier = (updates->seqno - flushed_updates->seqno <=
+			      INT_MAX) ? true : false;
+	}
+
+	if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
+	    is_earlier) {
 
 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
-		amdgpu_fence_unref(&vm_id->flushed_updates);
-		vm_id->flushed_updates = amdgpu_fence_ref(updates);
+		if (is_earlier) {
+			vm_id->flushed_updates = fence_get(updates);
+			fence_put(flushed_updates);
+		}
+		if (!flushed_updates)
+			vm_id->flushed_updates = fence_get(updates);
 		vm_id->pd_gpu_addr = pd_addr;
 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
 	}
@@ -300,6 +316,15 @@
 	}
 }
 
+int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+{
+	int i;
+	for (i = 0; i < sched_job->num_ibs; i++)
+		amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 /**
  * amdgpu_vm_clear_bo - initially clear the page dir/table
  *
@@ -310,7 +335,8 @@
 			      struct amdgpu_bo *bo)
 {
 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
-	struct amdgpu_ib ib;
+	struct fence *fence = NULL;
+	struct amdgpu_ib *ib;
 	unsigned entries;
 	uint64_t addr;
 	int r;
@@ -330,24 +356,33 @@
 	addr = amdgpu_bo_gpu_offset(bo);
 	entries = amdgpu_bo_size(bo) / 8;
 
-	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
 		goto error_unreserve;
 
-	ib.length_dw = 0;
-
-	amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
-	amdgpu_vm_pad_ib(adev, &ib);
-	WARN_ON(ib.length_dw > 64);
-
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
 	if (r)
 		goto error_free;
 
-	amdgpu_bo_fence(bo, ib.fence, true);
+	ib->length_dw = 0;
 
+	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > 64);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_VM,
+						 &fence);
+	if (!r)
+		amdgpu_bo_fence(bo, fence, true);
+	fence_put(fence);
+	if (amdgpu_enable_scheduler) {
+		amdgpu_bo_unreserve(bo);
+		return 0;
+	}
 error_free:
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 
 error_unreserve:
 	amdgpu_bo_unreserve(bo);
@@ -400,7 +435,9 @@
 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 	uint64_t last_pde = ~0, last_pt = ~0;
 	unsigned count = 0, pt_idx, ndw;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib;
+	struct fence *fence = NULL;
+
 	int r;
 
 	/* padding, etc. */
@@ -413,10 +450,14 @@
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
 	if (r)
 		return r;
-	ib.length_dw = 0;
+	ib->length_dw = 0;
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -436,7 +477,7 @@
 		    ((last_pt + incr * count) != pt)) {
 
 			if (count) {
-				amdgpu_vm_update_pages(adev, &ib, last_pde,
+				amdgpu_vm_update_pages(adev, ib, last_pde,
 						       last_pt, count, incr,
 						       AMDGPU_PTE_VALID, 0);
 			}
@@ -450,23 +491,37 @@
 	}
 
 	if (count)
-		amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
+		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
 				       incr, AMDGPU_PTE_VALID, 0);
 
-	if (ib.length_dw != 0) {
-		amdgpu_vm_pad_ib(adev, &ib);
-		amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(ib.length_dw > ndw);
-		r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
-		if (r) {
-			amdgpu_ib_free(adev, &ib);
-			return r;
-		}
-		amdgpu_bo_fence(pd, ib.fence, true);
+	if (ib->length_dw != 0) {
+		amdgpu_vm_pad_ib(adev, ib);
+		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+		WARN_ON(ib->length_dw > ndw);
+		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+							 &amdgpu_vm_free_job,
+							 AMDGPU_FENCE_OWNER_VM,
+							 &fence);
+		if (r)
+			goto error_free;
+
+		amdgpu_bo_fence(pd, fence, true);
+		fence_put(vm->page_directory_fence);
+		vm->page_directory_fence = fence_get(fence);
+		fence_put(fence);
 	}
-	amdgpu_ib_free(adev, &ib);
+
+	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
+	}
 
 	return 0;
+
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 /**
@@ -572,9 +627,14 @@
 {
 	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 	uint64_t last_pte = ~0, last_dst = ~0;
+	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned count = 0;
 	uint64_t addr;
 
+	/* sync to everything on unmapping */
+	if (!(flags & AMDGPU_PTE_VALID))
+		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
 	/* walk over the address space and update the page tables */
 	for (addr = start; addr < end; ) {
 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
@@ -583,8 +643,7 @@
 		uint64_t pte;
 		int r;
 
-		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
-				 AMDGPU_FENCE_OWNER_VM);
+		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
 		r = reservation_object_reserve_shared(pt->tbo.resv);
 		if (r)
 			return r;
@@ -640,7 +699,7 @@
  */
 static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
 				uint64_t start, uint64_t end,
-				struct amdgpu_fence *fence)
+				struct fence *fence)
 {
 	unsigned i;
 
@@ -670,12 +729,13 @@
 				       struct amdgpu_vm *vm,
 				       struct amdgpu_bo_va_mapping *mapping,
 				       uint64_t addr, uint32_t gtt_flags,
-				       struct amdgpu_fence **fence)
+				       struct fence **fence)
 {
 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
 	unsigned nptes, ncmds, ndw;
 	uint32_t flags = gtt_flags;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib;
+	struct fence *f = NULL;
 	int r;
 
 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -722,46 +782,54 @@
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
+	if (r) {
+		kfree(ib);
 		return r;
-	ib.length_dw = 0;
-
-	if (!(flags & AMDGPU_PTE_VALID)) {
-		unsigned i;
-
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_fence *f = vm->ids[i].last_id_use;
-			amdgpu_sync_fence(&ib.sync, f);
-		}
 	}
 
-	r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
+	ib->length_dw = 0;
+
+	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
 				  mapping->it.last + 1, addr + mapping->offset,
 				  flags, gtt_flags);
 
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
 		return r;
 	}
 
-	amdgpu_vm_pad_ib(adev, &ib);
-	WARN_ON(ib.length_dw > ndw);
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > ndw);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_VM,
+						 &f);
+	if (r)
+		goto error_free;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		return r;
-	}
 	amdgpu_vm_fence_pts(vm, mapping->it.start,
-			    mapping->it.last + 1, ib.fence);
+			    mapping->it.last + 1, f);
 	if (fence) {
-		amdgpu_fence_unref(fence);
-		*fence = amdgpu_fence_ref(ib.fence);
+		fence_put(*fence);
+		*fence = fence_get(f);
 	}
-	amdgpu_ib_free(adev, &ib);
-
+	fence_put(f);
+	if (!amdgpu_enable_scheduler) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
+	}
 	return 0;
+
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 /**
@@ -794,21 +862,25 @@
 		addr = 0;
 	}
 
-	if (addr == bo_va->addr)
-		return 0;
-
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
 
-	list_for_each_entry(mapping, &bo_va->mappings, list) {
+	spin_lock(&vm->status_lock);
+	if (!list_empty(&bo_va->vm_status))
+		list_splice_init(&bo_va->valids, &bo_va->invalids);
+	spin_unlock(&vm->status_lock);
+
+	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
 						flags, &bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
 
-	bo_va->addr = addr;
 	spin_lock(&vm->status_lock);
+	list_splice_init(&bo_va->invalids, &bo_va->valids);
 	list_del_init(&bo_va->vm_status);
+	if (!mem)
+		list_add(&bo_va->vm_status, &vm->cleared);
 	spin_unlock(&vm->status_lock);
 
 	return 0;
@@ -861,7 +933,7 @@
 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 {
 	struct amdgpu_bo_va *bo_va = NULL;
-	int r;
+	int r = 0;
 
 	spin_lock(&vm->status_lock);
 	while (!list_empty(&vm->invalidated)) {
@@ -878,8 +950,9 @@
 	spin_unlock(&vm->status_lock);
 
 	if (bo_va)
-		amdgpu_sync_fence(sync, bo_va->last_pt_update);
-	return 0;
+		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
+
+	return r;
 }
 
 /**
@@ -907,10 +980,10 @@
 	}
 	bo_va->vm = vm;
 	bo_va->bo = bo;
-	bo_va->addr = 0;
 	bo_va->ref_count = 1;
 	INIT_LIST_HEAD(&bo_va->bo_list);
-	INIT_LIST_HEAD(&bo_va->mappings);
+	INIT_LIST_HEAD(&bo_va->valids);
+	INIT_LIST_HEAD(&bo_va->invalids);
 	INIT_LIST_HEAD(&bo_va->vm_status);
 
 	mutex_lock(&vm->mutex);
@@ -999,12 +1072,10 @@
 	mapping->offset = offset;
 	mapping->flags = flags;
 
-	list_add(&mapping->list, &bo_va->mappings);
+	list_add(&mapping->list, &bo_va->invalids);
 	interval_tree_insert(&mapping->it, &vm->va);
 	trace_amdgpu_vm_bo_map(bo_va, mapping);
 
-	bo_va->addr = 0;
-
 	/* Make sure the page tables are allocated */
 	saddr >>= amdgpu_vm_block_size;
 	eaddr >>= amdgpu_vm_block_size;
@@ -1028,7 +1099,9 @@
 
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+				     AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+				     NULL, &pt);
 		if (r)
 			goto error_free;
 
@@ -1085,17 +1158,27 @@
 {
 	struct amdgpu_bo_va_mapping *mapping;
 	struct amdgpu_vm *vm = bo_va->vm;
+	bool valid = true;
 
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
 
-	list_for_each_entry(mapping, &bo_va->mappings, list) {
+	list_for_each_entry(mapping, &bo_va->valids, list) {
 		if (mapping->it.start == saddr)
 			break;
 	}
 
-	if (&mapping->list == &bo_va->mappings) {
-		amdgpu_bo_unreserve(bo_va->bo);
-		return -ENOENT;
+	if (&mapping->list == &bo_va->valids) {
+		valid = false;
+
+		list_for_each_entry(mapping, &bo_va->invalids, list) {
+			if (mapping->it.start == saddr)
+				break;
+		}
+
+		if (&mapping->list == &bo_va->invalids) {
+			amdgpu_bo_unreserve(bo_va->bo);
+			return -ENOENT;
+		}
 	}
 
 	mutex_lock(&vm->mutex);
@@ -1103,12 +1186,10 @@
 	interval_tree_remove(&mapping->it, &vm->va);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
-	if (bo_va->addr) {
-		/* clear the old address */
+	if (valid)
 		list_add(&mapping->list, &vm->freed);
-	} else {
+	else
 		kfree(mapping);
-	}
 	mutex_unlock(&vm->mutex);
 	amdgpu_bo_unreserve(bo_va->bo);
 
@@ -1139,16 +1220,19 @@
 	list_del(&bo_va->vm_status);
 	spin_unlock(&vm->status_lock);
 
-	list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
 		list_del(&mapping->list);
 		interval_tree_remove(&mapping->it, &vm->va);
 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
-		if (bo_va->addr)
-			list_add(&mapping->list, &vm->freed);
-		else
-			kfree(mapping);
+		list_add(&mapping->list, &vm->freed);
 	}
-	amdgpu_fence_unref(&bo_va->last_pt_update);
+	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+		list_del(&mapping->list);
+		interval_tree_remove(&mapping->it, &vm->va);
+		kfree(mapping);
+	}
+
+	fence_put(bo_va->last_pt_update);
 	kfree(bo_va);
 
 	mutex_unlock(&vm->mutex);
@@ -1169,12 +1253,10 @@
 	struct amdgpu_bo_va *bo_va;
 
 	list_for_each_entry(bo_va, &bo->va, bo_list) {
-		if (bo_va->addr) {
-			spin_lock(&bo_va->vm->status_lock);
-			list_del(&bo_va->vm_status);
+		spin_lock(&bo_va->vm->status_lock);
+		if (list_empty(&bo_va->vm_status))
 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-			spin_unlock(&bo_va->vm->status_lock);
-		}
+		spin_unlock(&bo_va->vm->status_lock);
 	}
 }
 
@@ -1202,6 +1284,7 @@
 	vm->va = RB_ROOT;
 	spin_lock_init(&vm->status_lock);
 	INIT_LIST_HEAD(&vm->invalidated);
+	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->freed);
 
 	pd_size = amdgpu_vm_directory_size(adev);
@@ -1215,8 +1298,11 @@
 		return -ENOMEM;
 	}
 
+	vm->page_directory_fence = NULL;
+
 	r = amdgpu_bo_create(adev, pd_size, align, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
 			     NULL, &vm->page_directory);
 	if (r)
 		return r;
@@ -1263,9 +1349,10 @@
 	kfree(vm->page_tables);
 
 	amdgpu_bo_unref(&vm->page_directory);
+	fence_put(vm->page_directory_fence);
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		amdgpu_fence_unref(&vm->ids[i].flushed_updates);
+		fence_put(vm->ids[i].flushed_updates);
 		amdgpu_fence_unref(&vm->ids[i].last_id_use);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 9ba0a7d..92b6aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -139,7 +139,8 @@
 
 	tx_buf[0] = msg->address & 0xff;
 	tx_buf[1] = msg->address >> 8;
-	tx_buf[2] = msg->request << 4;
+	tx_buf[2] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index ae8caca..cd6edc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -812,7 +812,7 @@
 			else
 				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
-			if ((adev->flags & AMDGPU_IS_APU) &&
+			if ((adev->flags & AMD_IS_APU) &&
 			    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
 				if (is_dp ||
 				    !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 341c566..4b6ce74 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -64,6 +64,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * Indirect registers accessor
  */
@@ -836,7 +838,7 @@
 {
 	u32 reference_clock = adev->clock.spll.reference_freq;
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
 			return reference_clock / 2;
 	} else {
@@ -1233,7 +1235,7 @@
 	if (reset_mask & AMDGPU_RESET_VMC)
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		if (reset_mask & AMDGPU_RESET_MC)
 			srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
 	}
@@ -1409,7 +1411,7 @@
 		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
 	}
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		kv_save_regs_for_reset(adev, &kv_save);
 
 	/* disable BM */
@@ -1427,7 +1429,7 @@
 	}
 
 	/* does asic init need to be run first??? */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		kv_restore_regs_for_reset(adev, &kv_save);
 }
 
@@ -1568,7 +1570,7 @@
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -1728,7 +1730,7 @@
 		return;
 
 	/* XXX double check APUs */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
@@ -2448,14 +2450,21 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_amdkfd_suspend(adev);
+
 	return cik_common_hw_fini(adev);
 }
 
 static int cik_common_resume(void *handle)
 {
+	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	return cik_common_hw_init(adev);
+	r = cik_common_hw_init(adev);
+	if (r)
+		return r;
+
+	return amdgpu_amdkfd_resume(adev);
 }
 
 static bool cik_common_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 15df46c..9ea9de4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -188,6 +188,19 @@
 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 }
 
+static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+					  SDMA_NOP_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -213,8 +226,8 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 4)
-		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+	cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
+
 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
@@ -501,6 +514,8 @@
 		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 		fw_data = (const __le32 *)
 			(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
@@ -614,6 +629,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -629,12 +645,11 @@
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -643,20 +658,16 @@
 	ib.ptr[3] = 1;
 	ib.ptr[4] = 0xDEADBEEF;
 	ib.length_dw = 5;
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -666,12 +677,17 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -814,8 +830,19 @@
  */
 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
+					SDMA_NOP_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 }
 
 /**
@@ -1302,6 +1329,7 @@
 	.test_ring = cik_sdma_ring_test_ring,
 	.test_ib = cik_sdma_ring_test_ib,
 	.is_lockup = cik_sdma_ring_is_lockup,
+	.insert_nop = cik_sdma_ring_insert_nop,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1338,18 +1366,18 @@
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
 				      uint64_t src_offset,
 				      uint64_t dst_offset,
 				      uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1362,16 +1390,16 @@
  *
  * Fill GPU buffers using the DMA engine (CIK).
  */
-static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
 				      uint32_t src_data,
 				      uint64_t dst_offset,
 				      uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
@@ -1404,5 +1432,6 @@
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index d19085a..7f6d457 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -487,6 +487,7 @@
 					 (((op) & 0xFF) << 0))
 /* sDMA opcodes */
 #define	SDMA_OPCODE_NOP					  0
+#	define SDMA_NOP_COUNT(x)			  (((x) & 0x3FFF) << 16)
 #define	SDMA_OPCODE_COPY				  1
 #       define SDMA_COPY_SUB_OPCODE_LINEAR                0
 #       define SDMA_COPY_SUB_OPCODE_TILED                 1
@@ -552,6 +553,12 @@
 #define VCE_CMD_IB_AUTO		0x00000005
 #define VCE_CMD_SEMAPHORE	0x00000006
 
+/* if PTR32, these are the bases for scratch and lds */
+#define	PRIVATE_BASE(x)	((x) << 0) /* scratch */
+#define	SHARED_BASE(x)	((x) << 16) /* LDS */
+
+#define KFD_CIK_SDMA_QUEUE_OFFSET	0x200
+
 /* valid for both DEFAULT_MTYPE and APE1_MTYPE */
 enum {
 	MTYPE_CACHED = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index ace870a..44fa96a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1596,9 +1596,9 @@
 
 	if (pi->sys_info.nb_dpm_enable) {
 		if (ps->force_high)
-			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
-		else
 			cz_dpm_nbdpm_lm_pstate_enable(adev, false);
+		else
+			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index e70a26f..e4d101b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -126,9 +126,31 @@
 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+	mmFBC_MISC, 0x1f311fff, 0x12300000,
+	mmHDMI_CONTROL, 0x31000111, 0x00000011,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+};
+
 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -803,11 +825,11 @@
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		mem_cfg = 1;
@@ -1331,7 +1353,7 @@
 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* restore original selection */
@@ -2888,6 +2910,7 @@
 	dce_v10_0_set_irq_funcs(adev);
 
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		adev->mode_info.num_crtc = 6; /* XXX 7??? */
 		adev->mode_info.num_hpd = 6;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index dcb402e..6411e82 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -801,11 +801,11 @@
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		mem_cfg = 1;
@@ -1329,7 +1329,7 @@
 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* restore original selection */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index cc050a3..c86911c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -770,11 +770,11 @@
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			tmp = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			tmp = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		tmp = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
new file mode 100644
index 0000000..8f9845d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_smumgr.h"
+
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int fiji_dpm_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_set_funcs(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
+{
+	char fw_name[30] = "amdgpu/fiji_smc.bin";
+	int err;
+
+	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+	if (err) {
+		DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+		release_firmware(adev->pm.fw);
+		adev->pm.fw = NULL;
+	}
+	return err;
+}
+
+static int fiji_dpm_sw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	ret = fiji_dpm_init_microcode(adev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int fiji_dpm_sw_fini(void *handle)
+{
+	return 0;
+}
+
+static int fiji_dpm_hw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+
+	ret = fiji_smu_init(adev);
+	if (ret) {
+		DRM_ERROR("SMU initialization failed\n");
+		goto fail;
+	}
+
+	ret = fiji_smu_start(adev);
+	if (ret) {
+		DRM_ERROR("SMU start failed\n");
+		goto fail;
+	}
+
+	mutex_unlock(&adev->pm.mutex);
+	return 0;
+
+fail:
+	adev->firmware.smu_load = false;
+	mutex_unlock(&adev->pm.mutex);
+	return -EINVAL;
+}
+
+static int fiji_dpm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	mutex_lock(&adev->pm.mutex);
+	fiji_smu_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+	return 0;
+}
+
+static int fiji_dpm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_hw_fini(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_hw_init(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_set_clockgating_state(void *handle,
+			enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int fiji_dpm_set_powergating_state(void *handle,
+			enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs fiji_dpm_ip_funcs = {
+	.early_init = fiji_dpm_early_init,
+	.late_init = NULL,
+	.sw_init = fiji_dpm_sw_init,
+	.sw_fini = fiji_dpm_sw_fini,
+	.hw_init = fiji_dpm_hw_init,
+	.hw_fini = fiji_dpm_hw_fini,
+	.suspend = fiji_dpm_suspend,
+	.resume = fiji_dpm_resume,
+	.is_idle = NULL,
+	.wait_for_idle = NULL,
+	.soft_reset = NULL,
+	.print_status = NULL,
+	.set_clockgating_state = fiji_dpm_set_clockgating_state,
+	.set_powergating_state = fiji_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
+	.get_temperature = NULL,
+	.pre_set_power_state = NULL,
+	.set_power_state = NULL,
+	.post_set_power_state = NULL,
+	.display_configuration_changed = NULL,
+	.get_sclk = NULL,
+	.get_mclk = NULL,
+	.print_power_state = NULL,
+	.debugfs_print_current_performance_level = NULL,
+	.force_performance_level = NULL,
+	.vblank_too_short = NULL,
+	.powergate_uvd = NULL,
+};
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
+{
+	if (NULL == adev->pm.funcs)
+		adev->pm.funcs = &fiji_dpm_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
new file mode 100644
index 0000000..3c48240
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_PP_SMC_H
+#define FIJI_PP_SMC_H
+
+#pragma pack(push, 1)
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+
+#define PPSMC_DPM2FLAGS_TDPCLMP                         0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT                         0x02
+#define PPSMC_DPM2FLAGS_OCP                             0x04
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP    		0x01
+#define PPSMC_STATEFLAG_POWERBOOST         		0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 		0x04
+#define PPSMC_STATEFLAG_POWERSHIFT         		0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN   		0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 		0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   		0x40
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+	FAN_CONTROL_FUZZY,
+	FAN_CONTROL_TABLE
+};
+
+//Gemini Modes
+#define PPSMC_GeminiModeNone   0  //Single GPU board
+#define PPSMC_GeminiModeMaster 1  //Master GPU on a Gemini board
+#define PPSMC_GeminiModeSlave  2  //Slave GPU on a Gemini board
+
+#define PPSMC_Result_OK             			((uint16_t)0x01)
+#define PPSMC_Result_NoMore         			((uint16_t)0x02)
+#define PPSMC_Result_NotNow         			((uint16_t)0x03)
+#define PPSMC_Result_Failed         			((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     			((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      			((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+#define PPSMC_MSG_Halt                      		((uint16_t)0x10)
+#define PPSMC_MSG_Resume                    		((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel            		((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled        		((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled         		((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled         		((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt    		((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC               		((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp                   		((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown                 		((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters          		((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState           		((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast       		((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState      		((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel             		((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh                 		((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh         		((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower      		((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower    		((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac                 		((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac                		((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart          		((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop           		((uint16_t)0x56)
+#define PPSMC_CACHistoryStart               		((uint16_t)0x57)
+#define PPSMC_CACHistoryStop                		((uint16_t)0x58)
+#define PPSMC_TDPClampingActive             		((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive           		((uint16_t)0x5A)
+#define PPSMC_StartFanControl               		((uint16_t)0x5B)
+#define PPSMC_StopFanControl                		((uint16_t)0x5C)
+#define PPSMC_NoDisplay                     		((uint16_t)0x5D)
+#define PPSMC_HasDisplay                    		((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF               		((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON                		((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV                 		((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV                		((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV                  		((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV                   		((uint16_t)0x65)
+#define PPSMC_PowerShiftActive              		((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive            		((uint16_t)0x6B)
+#define PPSMC_OCPActive                     		((uint16_t)0x6C)
+#define PPSMC_OCPInactive                   		((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable          		((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable         		((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start  		((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop   		((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState    		((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState       		((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start       		((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop        		((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState   		((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState  		((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest        		((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping          		((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln   		((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib    		((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly         		((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr 		((uint16_t)0x7D)
+#define PPSMC_MSG_ExtremitiesTest_Start     		((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop      		((uint16_t)0x7F)
+#define PPSMC_FlushDataCache                		((uint16_t)0x80)
+#define PPSMC_FlushInstrCache               		((uint16_t)0x81)
+#define PPSMC_MSG_SetEnabledLevels          		((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels           		((uint16_t)0x83)
+#define PPSMC_MSG_ResetToDefaults           		((uint16_t)0x84)
+#define PPSMC_MSG_SetForcedLevelsAndJump    		((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode         		((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE                 		((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE                		((uint16_t)0x88)
+#define PPSMC_MSG_SmcSpaceSetAddress        		((uint16_t)0x89)
+#define PPSMC_MSG_SmcSpaceWriteDWordInc     		((uint16_t)0x8A)
+#define PPSMC_MSG_SmcSpaceWriteWordInc      		((uint16_t)0x8B)
+#define PPSMC_MSG_SmcSpaceWriteByteInc      		((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK                     		((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test                      		((uint16_t)0x100)
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI            		((uint16_t)0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO            		((uint16_t)0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI            		((uint16_t)0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO            		((uint16_t)0x253)
+#define PPSMC_MSG_LoadUcodes                  		((uint16_t)0x254)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL          		0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT     		0x00000002
+#define PPSMC_EVENT_STATUS_DC               		0x00000004
+#define PPSMC_EVENT_STATUS_GPIO17           		0x00000008
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
new file mode 100644
index 0000000..322edea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_ppsmc.h"
+#include "fiji_smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "amdgpu_ucode.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+#define FIJI_SMC_SIZE 0x20000
+
+static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
+{
+	uint32_t val;
+
+	if (smc_address & 3)
+		return -EINVAL;
+
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(mmSMC_IND_INDEX_0, smc_address);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+	return 0;
+}
+
+static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+	uint32_t addr;
+	uint32_t data, orig_data;
+	int result = 0;
+	uint32_t extra_shift;
+	unsigned long flags;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* Bytes are written into the SMC addres space with the MSB first */
+		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+
+		if (result)
+			goto out;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	if (0 != byte_count) {
+		/* Now write odd bytes left, do a read modify write cycle */
+		data = 0;
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+		if (result)
+			goto out;
+
+		orig_data = RREG32(mmSMC_IND_DATA_0);
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+		data |= (orig_data & ~((~0UL) << extra_shift));
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+		if (result)
+			goto out;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+	}
+
+out:
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_program_jump_on_start(struct amdgpu_device *adev)
+{
+	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
+	fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+
+	return 0;
+}
+
+static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
+{
+	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
+
+	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
+}
+
+static int wait_smu_response(struct amdgpu_device *adev)
+{
+	int i;
+	uint32_t val;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32(mmSMC_RESP_0);
+		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, 0x20000);
+	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send message\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+	if (!fiji_is_smc_ram_running(adev))
+	{
+		return -EINVAL;;
+	}
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MESSAGE_0, msg);
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send message\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
+						PPSMC_Msg msg)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MESSAGE_0, msg);
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+						PPSMC_Msg msg,
+						uint32_t parameter)
+{
+	if (!fiji_is_smc_ram_running(adev))
+		return -EINVAL;
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, parameter);
+
+	return fiji_send_msg_to_smc(adev, msg);
+}
+
+static int fiji_send_msg_to_smc_with_parameter_without_waiting(
+					struct amdgpu_device *adev,
+					PPSMC_Msg msg, uint32_t parameter)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, parameter);
+
+	return fiji_send_msg_to_smc_without_waiting(adev, msg);
+}
+
+#if 0 /* not used yet */
+static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+	int i;
+	uint32_t val;
+
+	if (!fiji_is_smc_ram_running(adev))
+		return -EINVAL;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout)
+		return -EINVAL;
+
+	return 0;
+}
+#endif
+
+static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
+{
+	const struct smc_firmware_header_v1_0 *hdr;
+	uint32_t ucode_size;
+	uint32_t ucode_start_address;
+	const uint8_t *src;
+	uint32_t val;
+	uint32_t byte_count;
+	uint32_t *data;
+	unsigned long flags;
+
+	if (!adev->pm.fw)
+		return -EINVAL;
+
+	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+	amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+	src = (const uint8_t *)
+		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	if (ucode_size & 3) {
+		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
+		return -EINVAL;
+	}
+
+	if (ucode_size > FIJI_SMC_SIZE) {
+		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+	byte_count = ucode_size;
+	data = (uint32_t *)src;
+	for (; byte_count >= 4; data++, byte_count -= 4)
+		WREG32(mmSMC_IND_DATA_0, data[0]);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+#if 0 /* not used yet */
+static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
+				uint32_t smc_address,
+				uint32_t *value,
+				uint32_t limit)
+{
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	result = fiji_set_smc_sram_address(adev, smc_address, limit);
+	if (result == 0)
+		*value = RREG32(mmSMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
+				uint32_t smc_address,
+				uint32_t value,
+				uint32_t limit)
+{
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	result = fiji_set_smc_sram_address(adev, smc_address, limit);
+	if (result == 0)
+		WREG32(mmSMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_smu_stop_smc(struct amdgpu_device *adev)
+{
+	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	return 0;
+}
+#endif
+
+static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
+{
+	switch (fw_type) {
+		case UCODE_ID_SDMA0:
+			return AMDGPU_UCODE_ID_SDMA0;
+		case UCODE_ID_SDMA1:
+			return AMDGPU_UCODE_ID_SDMA1;
+		case UCODE_ID_CP_CE:
+			return AMDGPU_UCODE_ID_CP_CE;
+		case UCODE_ID_CP_PFP:
+			return AMDGPU_UCODE_ID_CP_PFP;
+		case UCODE_ID_CP_ME:
+			return AMDGPU_UCODE_ID_CP_ME;
+		case UCODE_ID_CP_MEC:
+		case UCODE_ID_CP_MEC_JT1:
+		case UCODE_ID_CP_MEC_JT2:
+			return AMDGPU_UCODE_ID_CP_MEC1;
+		case UCODE_ID_RLC_G:
+			return AMDGPU_UCODE_ID_RLC_G;
+		default:
+			DRM_ERROR("ucode type is out of range!\n");
+			return AMDGPU_UCODE_ID_MAXIMUM;
+	}
+}
+
+static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
+						uint32_t fw_type,
+						struct SMU_Entry *entry)
+{
+	enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
+	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
+	const struct gfx_firmware_header_v1_0 *header = NULL;
+	uint64_t gpu_addr;
+	uint32_t data_size;
+
+	if (ucode->fw == NULL)
+		return -EINVAL;
+	gpu_addr  = ucode->mc_addr;
+	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+	data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+	if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
+		(fw_type == UCODE_ID_CP_MEC_JT2)) {
+		gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+		data_size = le32_to_cpu(header->jt_size) << 2;
+	}
+
+	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+	entry->id = (uint16_t)fw_type;
+	entry->image_addr_high = upper_32_bits(gpu_addr);
+	entry->image_addr_low = lower_32_bits(gpu_addr);
+	entry->meta_data_addr_high = 0;
+	entry->meta_data_addr_low = 0;
+	entry->data_size_byte = data_size;
+	entry->num_register_entries = 0;
+
+	if (fw_type == UCODE_ID_RLC_G)
+		entry->flags = 1;
+	else
+		entry->flags = 0;
+
+	return 0;
+}
+
+static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
+{
+	struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
+	struct SMU_DRAMData_TOC *toc;
+	uint32_t fw_to_load;
+
+	WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
+
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
+
+	toc = (struct SMU_DRAMData_TOC *)private->header;
+	toc->num_entries = 0;
+	toc->structure_version = 1;
+
+	if (!adev->firmware.smu_load)
+		return 0;
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for RLC\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for CE\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for PFP\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for ME\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
+		return -EINVAL;
+	}
+
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
+
+	fw_to_load = UCODE_ID_RLC_G_MASK |
+			UCODE_ID_SDMA0_MASK |
+			UCODE_ID_SDMA1_MASK |
+			UCODE_ID_CP_CE_MASK |
+			UCODE_ID_CP_ME_MASK |
+			UCODE_ID_CP_PFP_MASK |
+			UCODE_ID_CP_MEC_MASK;
+
+	if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+		DRM_ERROR("Fail to request SMU load ucode\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
+{
+	switch (fw_type) {
+		case AMDGPU_UCODE_ID_SDMA0:
+			return UCODE_ID_SDMA0_MASK;
+		case AMDGPU_UCODE_ID_SDMA1:
+			return UCODE_ID_SDMA1_MASK;
+		case AMDGPU_UCODE_ID_CP_CE:
+			return UCODE_ID_CP_CE_MASK;
+		case AMDGPU_UCODE_ID_CP_PFP:
+			return UCODE_ID_CP_PFP_MASK;
+		case AMDGPU_UCODE_ID_CP_ME:
+			return UCODE_ID_CP_ME_MASK;
+		case AMDGPU_UCODE_ID_CP_MEC1:
+			return UCODE_ID_CP_MEC_MASK;
+		case AMDGPU_UCODE_ID_CP_MEC2:
+			return UCODE_ID_CP_MEC_MASK;
+		case AMDGPU_UCODE_ID_RLC_G:
+			return UCODE_ID_RLC_G_MASK;
+		default:
+			DRM_ERROR("ucode type is out of range!\n");
+			return 0;
+	}
+}
+
+static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
+					uint32_t fw_type)
+{
+	uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
+	int i;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("check firmware loading failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
+{
+	int result;
+	uint32_t val;
+	int i;
+
+	/* Assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	result = fiji_smu_upload_firmware_image(adev);
+	if (result)
+		return result;
+
+	/* Clear status */
+	WREG32_SMC(ixSMU_STATUS, 0);
+
+	/* Enable clock */
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	/* De-assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	/* Set SMU Auto Start */
+	val = RREG32_SMC(ixSMU_INPUT_DATA);
+	val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
+	WREG32_SMC(ixSMU_INPUT_DATA, val);
+
+	/* Clear firmware interrupt enable flag */
+	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixRCU_UC_EVENTS);
+		if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Interrupt is not enabled by firmware\n");
+		return -EINVAL;
+	}
+
+	/* Call Test SMU message with 0x20000 offset
+	 * to trigger SMU start
+	 */
+	fiji_send_msg_to_smc_offset(adev);
+	DRM_INFO("[FM]try triger smu start\n");
+	/* Wait for done bit to be set */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixSMU_STATUS);
+		if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Timeout for SMU start\n");
+		return -EINVAL;
+	}
+
+	/* Check pass/failed indicator */
+	val = RREG32_SMC(ixSMU_STATUS);
+	if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
+		DRM_ERROR("SMU Firmware start failed\n");
+		return -EINVAL;
+	}
+	DRM_INFO("[FM]smu started\n");
+	/* Wait for firmware to initialize */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixFIRMWARE_FLAGS);
+		if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("SMU firmware initialization failed\n");
+		return -EINVAL;
+	}
+	DRM_INFO("[FM]smu initialized\n");
+
+	return 0;
+}
+
+static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
+{
+	int i, result;
+	uint32_t val;
+
+	/* wait for smc boot up */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixRCU_UC_EVENTS);
+		val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
+		if (val)
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("SMC boot sequence is not completed\n");
+		return -EINVAL;
+	}
+
+	/* Clear firmware interrupt enable flag */
+	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+	/* Assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	result = fiji_smu_upload_firmware_image(adev);
+	if (result)
+		return result;
+
+	/* Set smc instruct start point at 0x0 */
+	fiji_program_jump_on_start(adev);
+
+	/* Enable clock */
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	/* De-assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	/* Wait for firmware to initialize */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixFIRMWARE_FLAGS);
+		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Timeout for SMC firmware initialization\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int fiji_smu_start(struct amdgpu_device *adev)
+{
+	int result;
+	uint32_t val;
+
+	if (!fiji_is_smc_ram_running(adev)) {
+		val = RREG32_SMC(ixSMU_FIRMWARE);
+		if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
+			DRM_INFO("[FM]start smu in nonprotection mode\n");
+			result = fiji_smu_start_in_non_protection_mode(adev);
+			if (result)
+				return result;
+		} else {
+			DRM_INFO("[FM]start smu in protection mode\n");
+			result = fiji_smu_start_in_protection_mode(adev);
+			if (result)
+				return result;
+		}
+	}
+
+	return fiji_smu_request_load_fw(adev);
+}
+
+static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
+	.check_fw_load_finish = fiji_smu_check_fw_load_finish,
+	.request_smu_load_fw = NULL,
+	.request_smu_specific_fw = NULL,
+};
+
+int fiji_smu_init(struct amdgpu_device *adev)
+{
+	struct fiji_smu_private_data *private;
+	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+	uint32_t smu_internal_buffer_size = 200*4096;
+	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
+	struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
+	uint64_t mc_addr;
+	void *toc_buf_ptr;
+	void *smu_buf_ptr;
+	int ret;
+
+	private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
+	if (NULL == private)
+		return -ENOMEM;
+
+	/* allocate firmware buffers */
+	if (adev->firmware.smu_load)
+		amdgpu_ucode_init_bo(adev);
+
+	adev->smu.priv = private;
+	adev->smu.fw_flags = 0;
+
+	/* Allocate FW image data structure and header buffer */
+	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
+	if (ret) {
+		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate buffer for SMU internal buffer */
+	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, smu_buf);
+	if (ret) {
+		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Retrieve GPU address for header buffer and internal buffer */
+	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
+	if (ret) {
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to reserve the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.toc_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to pin the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.toc_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to map the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	amdgpu_bo_unreserve(adev->smu.toc_buf);
+	private->header_addr_low = lower_32_bits(mc_addr);
+	private->header_addr_high = upper_32_bits(mc_addr);
+	private->header = toc_buf_ptr;
+
+	ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
+	if (ret) {
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to reserve the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to pin the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to map the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	amdgpu_bo_unreserve(adev->smu.smu_buf);
+	private->smu_buffer_addr_low = lower_32_bits(mc_addr);
+	private->smu_buffer_addr_high = upper_32_bits(mc_addr);
+
+	adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
+
+	return 0;
+}
+
+int fiji_smu_fini(struct amdgpu_device *adev)
+{
+	amdgpu_bo_unref(&adev->smu.toc_buf);
+	amdgpu_bo_unref(&adev->smu.smu_buf);
+	kfree(adev->smu.priv);
+	adev->smu.priv = NULL;
+	if (adev->firmware.fw_buf)
+		amdgpu_ucode_fini_bo(adev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
new file mode 100644
index 0000000..1cef03d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_SMUMGR_H
+#define FIJI_SMUMGR_H
+
+#include "fiji_ppsmc.h"
+
+int fiji_smu_init(struct amdgpu_device *adev);
+int fiji_smu_fini(struct amdgpu_device *adev);
+int fiji_smu_start(struct amdgpu_device *adev);
+
+struct fiji_smu_private_data
+{
+	uint8_t *header;
+	uint32_t smu_buffer_addr_high;
+	uint32_t smu_buffer_addr_low;
+	uint32_t header_addr_high;
+	uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0d8bf2c..4bd1e5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2173,7 +2173,7 @@
 
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		/* Get memory bank mapping mode. */
 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2648,6 +2648,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -2659,29 +2660,27 @@
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
+		goto err1;
 	}
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait(f, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		return r;
+		goto err2;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -2691,14 +2690,19 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err2;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	amdgpu_gfx_scratch_free(adev, scratch);
+
+err2:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
 
@@ -3758,7 +3762,7 @@
 	int r;
 
 	/* allocate rlc buffers */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		if (adev->asic_type == CHIP_KAVERI) {
 			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
 			adev->gfx.rlc.reg_list_size =
@@ -3782,7 +3786,9 @@
 		/* save restore block */
 		if (adev->gfx.rlc.save_restore_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.save_restore_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
 				return r;
@@ -3823,7 +3829,9 @@
 
 		if (adev->gfx.rlc.clear_state_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.clear_state_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -3860,7 +3868,9 @@
 	if (adev->gfx.rlc.cp_table_size) {
 		if (adev->gfx.rlc.cp_table_obj == NULL) {
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.cp_table_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -5594,6 +5604,7 @@
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.is_lockup = gfx_v7_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5610,6 +5621,7 @@
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.is_lockup = gfx_v7_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 20e2cfd..53f0743 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -87,6 +87,13 @@
 MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
+MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
+MODULE_FIRMWARE("amdgpu/fiji_me.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
+MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
+
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
 	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -217,6 +224,71 @@
 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 };
 
+static const u32 fiji_golden_common_all[] =
+{
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
+	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
+	mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
+	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+};
+
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100,
+	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+	mmTCC_CTRL, 0x00100000, 0xf30fff7f,
+	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
+	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4,
+	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0,
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
+	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+};
+
 static const u32 golden_settings_iceland_a11[] =
 {
 	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
@@ -439,6 +511,18 @@
 						 iceland_golden_common_all,
 						 (const u32)ARRAY_SIZE(iceland_golden_common_all));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		amdgpu_program_register_sequence(adev,
+						 fiji_golden_common_all,
+						 (const u32)ARRAY_SIZE(fiji_golden_common_all));
+		break;
+
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -526,6 +610,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -537,29 +622,27 @@
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
+		goto err1;
 	}
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait(f, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		return r;
+		goto err2;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -569,14 +652,18 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err2;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	amdgpu_gfx_scratch_free(adev, scratch);
+err2:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
 
@@ -601,6 +688,9 @@
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	default:
 		BUG();
 	}
@@ -1236,6 +1326,7 @@
 			adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
 			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
 		}
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
 			switch (reg_offset) {
@@ -1914,7 +2005,7 @@
 }
 
 /**
- * gmc_v8_0_init_compute_vmid - gart enable
+ * gfx_v8_0_init_compute_vmid - gart enable
  *
  * @rdev: amdgpu_device pointer
  *
@@ -1924,7 +2015,7 @@
 #define DEFAULT_SH_MEM_BASES	(0x6000)
 #define FIRST_COMPUTE_VMID	(8)
 #define LAST_COMPUTE_VMID	(16)
-static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 {
 	int i;
 	uint32_t sh_mem_config;
@@ -1984,6 +2075,23 @@
 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
 		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
 		break;
+	case CHIP_FIJI:
+		adev->gfx.config.max_shader_engines = 4;
+		adev->gfx.config.max_tile_pipes = 16;
+		adev->gfx.config.max_cu_per_sh = 16;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 8;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+		break;
 	case CHIP_TONGA:
 		adev->gfx.config.max_shader_engines = 4;
 		adev->gfx.config.max_tile_pipes = 8;
@@ -2078,7 +2186,7 @@
 
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		/* Get memory bank mapping mode. */
 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2174,7 +2282,7 @@
 	vi_srbm_select(adev, 0, 0, 0, 0);
 	mutex_unlock(&adev->srbm_mutex);
 
-	gmc_v8_0_init_compute_vmid(adev);
+	gfx_v8_0_init_compute_vmid(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	/*
@@ -2490,6 +2598,7 @@
 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (adev->asic_type) {
 	case CHIP_TONGA:
+	case CHIP_FIJI:
 		amdgpu_ring_write(ring, 0x16000012);
 		amdgpu_ring_write(ring, 0x0000002A);
 		break;
@@ -3131,7 +3240,8 @@
 
 		/* enable the doorbell if requested */
 		if (use_doorbell) {
-			if (adev->asic_type == CHIP_CARRIZO) {
+			if ((adev->asic_type == CHIP_CARRIZO) ||
+			    (adev->asic_type == CHIP_FIJI)) {
 				WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
 				       AMDGPU_DOORBELL_KIQ << 2);
 				WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -3875,7 +3985,8 @@
 	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
 	if (ring->adev->asic_type == CHIP_TOPAZ ||
-	    ring->adev->asic_type == CHIP_TONGA)
+	    ring->adev->asic_type == CHIP_TONGA ||
+	    ring->adev->asic_type == CHIP_FIJI)
 		/* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
 		return false;
 	else {
@@ -4268,6 +4379,7 @@
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.is_lockup = gfx_v8_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -4284,6 +4396,7 @@
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.is_lockup = gfx_v8_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ae37fce..774528a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -523,17 +523,11 @@
 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 			    amdgpu_vm_block_size - 9);
@@ -636,7 +630,7 @@
 	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
 
 	/* base offset of vram pages */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 		tmp <<= 22;
 		adev->vm_manager.vram_base_offset = tmp;
@@ -841,7 +835,7 @@
 	gmc_v7_0_set_gart_funcs(adev);
 	gmc_v7_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 	} else {
 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -852,6 +846,13 @@
 	return 0;
 }
 
+static int gmc_v7_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v7_0_sw_init(void *handle)
 {
 	int r;
@@ -957,7 +958,7 @@
 
 	gmc_v7_0_mc_program(adev);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		r = gmc_v7_0_mc_load_microcode(adev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -976,6 +977,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
 	gmc_v7_0_gart_disable(adev);
 
 	return 0;
@@ -1172,7 +1174,7 @@
 
 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
-		if (!(adev->flags & AMDGPU_IS_APU))
+		if (!(adev->flags & AMD_IS_APU))
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
@@ -1282,7 +1284,7 @@
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		gmc_v7_0_enable_mc_mgcg(adev, gate);
 		gmc_v7_0_enable_mc_ls(adev, gate);
 	}
@@ -1301,7 +1303,7 @@
 
 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 	.early_init = gmc_v7_0_early_init,
-	.late_init = NULL,
+	.late_init = gmc_v7_0_late_init,
 	.sw_init = gmc_v7_0_sw_init,
 	.sw_fini = gmc_v7_0_sw_fini,
 	.hw_init = gmc_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 8135963..9a07742 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -44,6 +44,7 @@
 
 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -61,6 +62,19 @@
 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
 static const u32 golden_settings_iceland_a11[] =
 {
 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
@@ -90,6 +104,14 @@
 						 golden_settings_iceland_a11,
 						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -202,6 +224,9 @@
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	case CHIP_CARRIZO:
 		return 0;
 	default: BUG();
@@ -628,19 +653,12 @@
 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 			    amdgpu_vm_block_size - 9);
@@ -737,7 +755,7 @@
 	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
 
 	/* base offset of vram pages */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 		tmp <<= 22;
 		adev->vm_manager.vram_base_offset = tmp;
@@ -816,7 +834,7 @@
 	gmc_v8_0_set_gart_funcs(adev);
 	gmc_v8_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 	} else {
 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -827,6 +845,13 @@
 	return 0;
 }
 
+static int gmc_v8_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v8_0_sw_init(void *handle)
 {
 	int r;
@@ -934,7 +959,7 @@
 
 	gmc_v8_0_mc_program(adev);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		r = gmc_v8_0_mc_load_microcode(adev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -953,6 +978,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
 	gmc_v8_0_gart_disable(adev);
 
 	return 0;
@@ -1147,7 +1173,7 @@
 
 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
-		if (!(adev->flags & AMDGPU_IS_APU))
+		if (!(adev->flags & AMD_IS_APU))
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
@@ -1263,7 +1289,7 @@
 
 const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 	.early_init = gmc_v8_0_early_init,
-	.late_init = NULL,
+	.late_init = gmc_v8_0_late_init,
 	.sw_init = gmc_v8_0_sw_init,
 	.sw_fini = gmc_v8_0_sw_fini,
 	.hw_init = gmc_v8_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
index c723602..ee6a041 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
@@ -2163,5 +2163,10 @@
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __ICELAND_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c6f1e2f..c900aa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -623,7 +623,9 @@
 
 	/* Allocate FW image data structure and header buffer */
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/pptable.h b/drivers/gpu/drm/amd/amdgpu/pptable.h
deleted file mode 100644
index 0030f72..0000000
--- a/drivers/gpu/drm/amd/amdgpu/pptable.h
+++ /dev/null
@@ -1,698 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _PPTABLE_H
-#define _PPTABLE_H
-
-#pragma pack(1)
-
-typedef struct _ATOM_PPLIB_THERMALCONTROLLER
-
-{
-    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
-    UCHAR ucI2cLine;        // as interpreted by DAL I2C
-    UCHAR ucI2cAddress;
-    UCHAR ucFanParameters;  // Fan Control Parameters.
-    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
-    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
-    UCHAR ucReserved;       // ----
-    UCHAR ucFlags;          // to be defined
-} ATOM_PPLIB_THERMALCONTROLLER;
-
-#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
-#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
-
-#define ATOM_PP_THERMALCONTROLLER_NONE      0
-#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_LM64      5
-#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
-#define ATOM_PP_THERMALCONTROLLER_RV770     8
-#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
-#define ATOM_PP_THERMALCONTROLLER_KONG      10
-#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
-#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
-#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
-#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
-#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
-#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
-#define ATOM_PP_THERMALCONTROLLER_LM96163   17
-#define ATOM_PP_THERMALCONTROLLER_CISLANDS  18
-#define ATOM_PP_THERMALCONTROLLER_KAVERI    19
-
-
-// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
-// We probably should reserve the bit 0x80 for this use.
-// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
-// The driver can pick the correct internal controller based on the ASIC.
-
-#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
-#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
-
-typedef struct _ATOM_PPLIB_STATE
-{
-    UCHAR ucNonClockStateIndex;
-    UCHAR ucClockStateIndices[1]; // variable-sized
-} ATOM_PPLIB_STATE;
-
-
-typedef struct _ATOM_PPLIB_FANTABLE
-{
-    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
-    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
-    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
-    USHORT  usTMed;                          // The middle temperature where we change slopes.
-    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
-    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
-    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
-    USHORT  usPWMHigh;                       // The PWM value at THigh.
-} ATOM_PPLIB_FANTABLE;
-
-typedef struct _ATOM_PPLIB_FANTABLE2
-{
-    ATOM_PPLIB_FANTABLE basicTable;
-    USHORT  usTMax;                          // The max temperature
-} ATOM_PPLIB_FANTABLE2;
-
-typedef struct _ATOM_PPLIB_FANTABLE3
-{
-	ATOM_PPLIB_FANTABLE2 basicTable2;
-	UCHAR ucFanControlMode;
-	USHORT usFanPWMMax;
-	USHORT usFanOutputSensitivity;
-} ATOM_PPLIB_FANTABLE3;
-
-typedef struct _ATOM_PPLIB_EXTENDEDHEADER
-{
-    USHORT  usSize;
-    ULONG   ulMaxEngineClock;   // For Overdrive.
-    ULONG   ulMaxMemoryClock;   // For Overdrive.
-    // Add extra system parameters here, always adjust size to include all fields.
-    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
-    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
-    USHORT  usSAMUTableOffset;  //points to ATOM_PPLIB_SAMU_Table
-    USHORT  usPPMTableOffset;   //points to ATOM_PPLIB_PPM_Table
-    USHORT  usACPTableOffset;  //points to ATOM_PPLIB_ACP_Table   
-    /* points to ATOM_PPLIB_POWERTUNE_Table */
-    USHORT  usPowerTuneTableOffset;
-    /* points to ATOM_PPLIB_CLOCK_Voltage_Dependency_Table for sclkVddgfxTable */
-    USHORT  usSclkVddgfxTableOffset;
-} ATOM_PPLIB_EXTENDEDHEADER;
-
-//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
-#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
-#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
-#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
-#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
-#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
-#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
-#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
-#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
-#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
-#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
-#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
-#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
-#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
-#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
-#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
-#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
-#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
-#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
-#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE   0x00040000           // Does the driver supports new CAC voltage table.
-#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY   0x00080000     // Does the driver supports revert GPIO5 polarity.
-#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17   0x00100000     // Does the driver supports thermal2GPIO17.
-#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE   0x00200000   // Does the driver supports VR HOT GPIO Configurable.
-#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION   0x00400000            // Does the driver supports Temp Inversion feature.
-#define ATOM_PP_PLATFORM_CAP_EVV    0x00800000
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE
-{
-      ATOM_COMMON_TABLE_HEADER sHeader;
-
-      UCHAR ucDataRevision;
-
-      UCHAR ucNumStates;
-      UCHAR ucStateEntrySize;
-      UCHAR ucClockInfoSize;
-      UCHAR ucNonClockSize;
-
-      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
-      USHORT usStateArrayOffset;
-
-      // offset from start of this table to array of ASIC-specific structures,
-      // currently ATOM_PPLIB_CLOCK_INFO.
-      USHORT usClockInfoArrayOffset;
-
-      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
-      USHORT usNonClockInfoArrayOffset;
-
-      USHORT usBackbiasTime;    // in microseconds
-      USHORT usVoltageTime;     // in microseconds
-      USHORT usTableSize;       //the size of this structure, or the extended structure
-
-      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
-
-      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
-
-      USHORT usBootClockInfoOffset;
-      USHORT usBootNonClockInfoOffset;
-
-} ATOM_PPLIB_POWERPLAYTABLE;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
-{
-    ATOM_PPLIB_POWERPLAYTABLE basicTable;
-    UCHAR   ucNumCustomThermalPolicy;
-    USHORT  usCustomThermalPolicyArrayOffset;
-}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
-{
-    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
-    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
-    USHORT                     usFanTableOffset;
-    USHORT                     usExtendendedHeaderOffset;
-} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
-{
-    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
-    ULONG                      ulGoldenPPID;                    // PPGen use only     
-    ULONG                      ulGoldenRevision;                // PPGen use only
-    USHORT                     usVddcDependencyOnSCLKOffset;
-    USHORT                     usVddciDependencyOnMCLKOffset;
-    USHORT                     usVddcDependencyOnMCLKOffset;
-    USHORT                     usMaxClockVoltageOnDCOffset;
-    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
-    USHORT                     usMvddDependencyOnMCLKOffset;  
-} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
-{
-    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
-    ULONG                      ulTDPLimit;
-    ULONG                      ulNearTDPLimit;
-    ULONG                      ulSQRampingThreshold;
-    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
-    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
-    USHORT                     usTDPODLimit;
-    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
-} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
-#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
-#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
-#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
-#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
-#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
-#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
-// 2, 4, 6, 7 are reserved
-
-#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
-#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
-#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
-#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
-#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
-#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
-#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
-#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
-#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
-#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
-#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
-#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
-#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
-#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
-#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
-#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
-
-//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
-#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
-#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
-
-// 0 is 2.5Gb/s, 1 is 5Gb/s
-#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
-#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
-
-// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
-
-// lookup into reduced refresh-rate table
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
-
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
-// 2-15 TBD as needed.
-
-#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
-#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
-
-#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
-
-#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
-
-//memory related flags
-#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
-
-//M3 Arb    //2bits, current 3 sets of parameters in total
-#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
-#define ATOM_PPLIB_M3ARB_SHIFT                      17
-
-#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
-
-// remaining 16 bits are reserved
-typedef struct _ATOM_PPLIB_THERMAL_STATE
-{
-    UCHAR   ucMinTemperature;
-    UCHAR   ucMaxTemperature;
-    UCHAR   ucThermalAction;
-}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
-#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
-typedef struct _ATOM_PPLIB_NONCLOCK_INFO
-{
-      USHORT usClassification;
-      UCHAR  ucMinTemperature;
-      UCHAR  ucMaxTemperature;
-      ULONG  ulCapsAndSettings;
-      UCHAR  ucRequiredPower;
-      USHORT usClassification2;
-      ULONG  ulVCLK;
-      ULONG  ulDCLK;
-      UCHAR  ucUnused[5];
-} ATOM_PPLIB_NONCLOCK_INFO;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR ucMemoryClockHigh;
-
-      USHORT usVDDC;
-      USHORT usUnused1;
-      USHORT usUnused2;
-
-      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_R600_CLOCK_INFO;
-
-// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
-#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
-#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
-#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
-#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
-
-typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
-
-{
-      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
-      UCHAR  ucLowEngineClockHigh;
-      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
-      UCHAR  ucHighEngineClockHigh;
-      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
-      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
-      UCHAR  ucPadding;                   // For proper alignment and size.
-      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
-      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
-      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could 
-      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
-      ULONG  ulFlags; 
-} ATOM_PPLIB_RS780_CLOCK_INFO;
-
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0 
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1 
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2 
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3 
-
-#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
-#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
-#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
-
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0 
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
-
-typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR  ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR  ucMemoryClockHigh;
-
-      USHORT usVDDC;
-      USHORT usVDDCI;
-      USHORT usUnused;
-
-      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR  ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR  ucMemoryClockHigh;
-
-      USHORT usVDDC;
-      USHORT usVDDCI;
-      UCHAR  ucPCIEGen;
-      UCHAR  ucUnused1;
-
-      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
-
-} ATOM_PPLIB_SI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR  ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR  ucMemoryClockHigh;
-      
-      UCHAR  ucPCIEGen;
-      USHORT usPCIELane;
-} ATOM_PPLIB_CI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
-      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
-      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
-      UCHAR  vddcIndex;         //2-bit vddc index;
-      USHORT tdpLimit;
-      //please initalize to 0
-      USHORT rsv1;
-      //please initialize to 0s
-      ULONG rsv2[2];
-}ATOM_PPLIB_SUMO_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_CZ_CLOCK_INFO {
-      UCHAR index;
-      UCHAR rsv[3];
-} ATOM_PPLIB_CZ_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_STATE_V2
-{
-      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
-      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
-      UCHAR ucNumDPMLevels;
-      
-      //a index to the array of nonClockInfos
-      UCHAR nonClockInfoIndex;
-      /**
-      * Driver will read the first ucNumDPMLevels in this array
-      */
-      UCHAR clockInfoIndex[1];
-} ATOM_PPLIB_STATE_V2;
-
-typedef struct _StateArray{
-    //how many states we have 
-    UCHAR ucNumEntries;
-    
-    ATOM_PPLIB_STATE_V2 states[1];
-}StateArray;
-
-
-typedef struct _ClockInfoArray{
-    //how many clock levels we have
-    UCHAR ucNumEntries;
-    
-    //sizeof(ATOM_PPLIB_CLOCK_INFO)
-    UCHAR ucEntrySize;
-    
-    UCHAR clockInfo[1];
-}ClockInfoArray;
-
-typedef struct _NonClockInfoArray{
-
-    //how many non-clock levels we have. normally should be same as number of states
-    UCHAR ucNumEntries;
-    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
-    UCHAR ucEntrySize;
-    
-    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
-}NonClockInfoArray;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
-{
-    USHORT usClockLow;
-    UCHAR  ucClockHigh;
-    USHORT usVoltage;
-}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
-{
-    UCHAR ucNumEntries;                                                // Number of entries.
-    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
-{
-    USHORT usSclkLow;
-    UCHAR  ucSclkHigh;
-    USHORT usMclkLow;
-    UCHAR  ucMclkHigh;
-    USHORT usVddc;
-    USHORT usVddci;
-}ATOM_PPLIB_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
-{
-    UCHAR ucNumEntries;                                                // Number of entries.
-    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Limit_Table;
-
-union _ATOM_PPLIB_CAC_Leakage_Record
-{
-    struct
-    {
-        USHORT usVddc;          // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
-        ULONG  ulLeakageValue;  // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
-
-    };
-    struct
-     {
-        USHORT usVddc1;
-        USHORT usVddc2;
-        USHORT usVddc3;
-     };
-};
-
-typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Table
-{
-    UCHAR ucNumEntries;                                                 // Number of entries.
-    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
-}ATOM_PPLIB_CAC_Leakage_Table;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
-{
-    USHORT usVoltage;
-    USHORT usSclkLow;
-    UCHAR  ucSclkHigh;
-    USHORT usMclkLow;
-    UCHAR  ucMclkHigh;
-}ATOM_PPLIB_PhaseSheddingLimits_Record;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
-{
-    UCHAR ucNumEntries;                                                 // Number of entries.
-    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
-}ATOM_PPLIB_PhaseSheddingLimits_Table;
-
-typedef struct _VCEClockInfo{
-    USHORT usEVClkLow;
-    UCHAR  ucEVClkHigh;
-    USHORT usECClkLow;
-    UCHAR  ucECClkHigh;
-}VCEClockInfo;
-
-typedef struct _VCEClockInfoArray{
-    UCHAR ucNumEntries;
-    VCEClockInfo entries[1];
-}VCEClockInfoArray;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
-{
-    USHORT usVoltage;
-    UCHAR  ucVCEClockInfoIndex;
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
-{
-    UCHAR numEntries;
-    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_VCE_State_Record
-{
-    UCHAR  ucVCEClockInfoIndex;
-    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_VCE_State_Record;
-
-typedef struct _ATOM_PPLIB_VCE_State_Table
-{
-    UCHAR numEntries;
-    ATOM_PPLIB_VCE_State_Record entries[1];
-}ATOM_PPLIB_VCE_State_Table;
-
-
-typedef struct _ATOM_PPLIB_VCE_Table
-{
-      UCHAR revid;
-//    VCEClockInfoArray array;
-//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
-//    ATOM_PPLIB_VCE_State_Table states;
-}ATOM_PPLIB_VCE_Table;
-
-
-typedef struct _UVDClockInfo{
-    USHORT usVClkLow;
-    UCHAR  ucVClkHigh;
-    USHORT usDClkLow;
-    UCHAR  ucDClkHigh;
-}UVDClockInfo;
-
-typedef struct _UVDClockInfoArray{
-    UCHAR ucNumEntries;
-    UVDClockInfo entries[1];
-}UVDClockInfoArray;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
-{
-    USHORT usVoltage;
-    UCHAR  ucUVDClockInfoIndex;
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
-{
-    UCHAR numEntries;
-    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_UVD_Table
-{
-      UCHAR revid;
-//    UVDClockInfoArray array;
-//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
-}ATOM_PPLIB_UVD_Table;
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
-{
-      USHORT usVoltage;
-      USHORT usSAMClockLow;
-      UCHAR  ucSAMClockHigh;
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
-    UCHAR numEntries;
-    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_SAMU_Table
-{
-      UCHAR revid;
-      ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
-}ATOM_PPLIB_SAMU_Table;
-
-typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
-{
-      USHORT usVoltage;
-      USHORT usACPClockLow;
-      UCHAR  ucACPClockHigh;
-}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
-    UCHAR numEntries;
-    ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_ACP_Table
-{
-      UCHAR revid;
-      ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
-}ATOM_PPLIB_ACP_Table;
-
-typedef struct _ATOM_PowerTune_Table{
-    USHORT usTDP;
-    USHORT usConfigurableTDP;
-    USHORT usTDC;
-    USHORT usBatteryPowerLimit;
-    USHORT usSmallPowerLimit;
-    USHORT usLowCACLeakage;
-    USHORT usHighCACLeakage;
-}ATOM_PowerTune_Table;
-
-typedef struct _ATOM_PPLIB_POWERTUNE_Table
-{
-      UCHAR revid;
-      ATOM_PowerTune_Table power_tune_table;
-}ATOM_PPLIB_POWERTUNE_Table;
-
-typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
-{
-      UCHAR revid;
-      ATOM_PowerTune_Table power_tune_table;
-      USHORT usMaximumPowerDeliveryLimit;
-      USHORT usReserve[7];
-} ATOM_PPLIB_POWERTUNE_Table_V1;
-
-#define ATOM_PPM_A_A    1
-#define ATOM_PPM_A_I    2
-typedef struct _ATOM_PPLIB_PPM_Table
-{
-      UCHAR  ucRevId;
-      UCHAR  ucPpmDesign;          //A+I or A+A
-      USHORT usCpuCoreNumber;
-      ULONG  ulPlatformTDP;
-      ULONG  ulSmallACPlatformTDP;
-      ULONG  ulPlatformTDC;
-      ULONG  ulSmallACPlatformTDC;
-      ULONG  ulApuTDP;
-      ULONG  ulDGpuTDP;  
-      ULONG  ulDGpuUlvPower;
-      ULONG  ulTjmax;
-} ATOM_PPLIB_PPM_Table;
-
-#pragma pack()
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a988dfb..14e8723 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -146,6 +146,8 @@
 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 
 		if (adev->firmware.smu_load) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -218,6 +220,19 @@
 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 }
 
+static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -245,8 +260,8 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 2)
-		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+	sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
+
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
 	/* base must be 32 byte aligned */
@@ -673,6 +688,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -688,12 +704,11 @@
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -707,19 +722,16 @@
 	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -729,12 +741,17 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -877,8 +894,19 @@
  */
 static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1312,6 +1340,7 @@
 	.test_ring = sdma_v2_4_ring_test_ring,
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.is_lockup = sdma_v2_4_ring_is_lockup,
+	.insert_nop = sdma_v2_4_ring_insert_nop,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1348,19 +1377,19 @@
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
 				       uint64_t src_offset,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1373,16 +1402,16 @@
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
 				       uint32_t src_data,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
@@ -1415,5 +1444,6 @@
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2b86569..9bfe92d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -53,6 +53,8 @@
 MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
 
 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 {
@@ -80,6 +82,24 @@
 	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
+};
+
 static const u32 cz_golden_settings_a11[] =
 {
 	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@@ -122,6 +142,14 @@
 static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -167,6 +195,9 @@
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;
@@ -187,6 +218,8 @@
 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 
 		if (adev->firmware.smu_load) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -273,6 +306,19 @@
 	}
 }
 
+static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -299,8 +345,7 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 2)
-		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+	sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
@@ -763,6 +808,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -778,12 +824,11 @@
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -797,19 +842,16 @@
 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -819,12 +861,16 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -967,8 +1013,19 @@
  */
 static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1406,6 +1463,7 @@
 	.test_ring = sdma_v3_0_ring_test_ring,
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.is_lockup = sdma_v3_0_ring_is_lockup,
+	.insert_nop = sdma_v3_0_ring_insert_nop,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1442,19 +1500,19 @@
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
 				       uint64_t src_offset,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1467,16 +1525,16 @@
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
 				       uint32_t src_data,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
@@ -1509,5 +1567,6 @@
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
index 099b7b5..e5ebd08 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
@@ -2236,5 +2236,10 @@
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __TONGA_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 5fc53a4..1f5ac94 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -761,7 +761,9 @@
 
 	/* Allocate FW image data structure and header buffer */
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
@@ -769,7 +771,9 @@
 
 	/* Allocate buffer for SMU internal buffer */
 	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, smu_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 4efd671..5fac5da 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -534,7 +534,7 @@
 static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -555,14 +555,14 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 	return r;
 }
@@ -886,6 +886,7 @@
 	.test_ring = uvd_v4_2_ring_test_ring,
 	.test_ib = uvd_v4_2_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index b756bd9..2d5c59c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,7 +580,7 @@
 static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -601,14 +601,14 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 	return r;
 }
@@ -825,6 +825,7 @@
 	.test_ring = uvd_v5_0_ring_test_ring,
 	.test_ib = uvd_v5_0_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 49aa931..d9f553f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -575,7 +575,7 @@
  */
 static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
 {
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -590,14 +590,14 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
 
@@ -805,6 +805,7 @@
 	.test_ring = uvd_v6_0_ring_test_ring,
 	.test_ib = uvd_v6_0_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 303d961d..cd16df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -643,6 +643,7 @@
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d1064ca..f0656df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -32,8 +32,8 @@
 #include "vid.h"
 #include "vce/vce_3_0_d.h"
 #include "vce/vce_3_0_sh_mask.h"
-#include "oss/oss_2_0_d.h"
-#include "oss/oss_2_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
@@ -205,7 +205,14 @@
 	u32 tmp;
 	unsigned ret;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	/* Fiji is single pipe */
+	if (adev->asic_type == CHIP_FIJI) {
+		ret = AMDGPU_VCE_HARVEST_VCE1;
+		return ret;
+	}
+
+	/* Tonga and CZ are dual or single pipe */
+	if (adev->flags & AMD_IS_APU)
 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
 			VCE_HARVEST_FUSE_MACRO__SHIFT;
@@ -419,17 +426,41 @@
 static bool vce_v3_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
 
-	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+		else
+			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+	}
+
+	return !(RREG32(mmSRBM_STATUS2) & mask);
 }
 
 static int vce_v3_0_wait_for_idle(void *handle)
 {
 	unsigned i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
+
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+		else
+			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+		if (!(RREG32(mmSRBM_STATUS2) & mask))
 			return 0;
 	}
 	return -ETIMEDOUT;
@@ -438,9 +469,21 @@
 static int vce_v3_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
 
-	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
-			~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
+		else
+			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+	}
+	WREG32_P(mmSRBM_SOFT_RESET, mask,
+		 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
+		   SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
 	mdelay(5);
 
 	return vce_v3_0_start(adev);
@@ -601,6 +644,7 @@
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 68552da..552d9e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -203,6 +203,17 @@
 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 };
 
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+	mmPCIE_DATA, 0x000f0000, 0x00000000,
+	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
+	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+};
+
 static const u32 iceland_mgcg_cgcg_init[] =
 {
 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
@@ -232,6 +243,11 @@
 						 iceland_mgcg_cgcg_init,
 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -261,7 +277,7 @@
 	u32 reference_clock = adev->clock.spll.reference_freq;
 	u32 tmp;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return reference_clock;
 
 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
@@ -362,6 +378,26 @@
 
 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
 	{mmGRBM_STATUS, false},
+	{mmGRBM_STATUS2, false},
+	{mmGRBM_STATUS_SE0, false},
+	{mmGRBM_STATUS_SE1, false},
+	{mmGRBM_STATUS_SE2, false},
+	{mmGRBM_STATUS_SE3, false},
+	{mmSRBM_STATUS, false},
+	{mmSRBM_STATUS2, false},
+	{mmSRBM_STATUS3, false},
+	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
+	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
+	{mmCP_STAT, false},
+	{mmCP_STALLED_STAT1, false},
+	{mmCP_STALLED_STAT2, false},
+	{mmCP_STALLED_STAT3, false},
+	{mmCP_CPF_BUSY_STAT, false},
+	{mmCP_CPF_STALLED_STAT1, false},
+	{mmCP_CPF_STATUS, false},
+	{mmCP_CPC_BUSY_STAT, false},
+	{mmCP_CPC_STALLED_STAT1, false},
+	{mmCP_CPC_STATUS, false},
 	{mmGB_ADDR_CONFIG, false},
 	{mmMC_ARB_RAMCFG, false},
 	{mmGB_TILE_MODE0, false},
@@ -449,6 +485,7 @@
 		asic_register_table = tonga_allowed_read_registers;
 		size = ARRAY_SIZE(tonga_allowed_read_registers);
 		break;
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 	case CHIP_CARRIZO:
 		asic_register_table = cz_allowed_read_registers;
@@ -751,7 +788,7 @@
 		srbm_soft_reset =
 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		if (reset_mask & AMDGPU_RESET_MC)
 		srbm_soft_reset =
 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
@@ -971,7 +1008,7 @@
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -999,7 +1036,7 @@
 	u32 tmp;
 
 	/* not necessary on CZ */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
@@ -1127,6 +1164,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &fiji_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 10,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_v10_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1202,6 +1307,10 @@
 		adev->ip_blocks = topaz_ip_blocks;
 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
 		break;
+	case CHIP_FIJI:
+		adev->ip_blocks = fiji_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+		break;
 	case CHIP_TONGA:
 		adev->ip_blocks = tonga_ip_blocks;
 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
@@ -1248,7 +1357,7 @@
 	bool smc_enabled = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->smc_rreg = &cz_smc_rreg;
 		adev->smc_wreg = &cz_smc_wreg;
 	} else {
@@ -1279,6 +1388,7 @@
 		if (amdgpu_smc_load_fw && smc_enabled)
 			adev->firmware.smu_load = true;
 		break;
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		adev->has_uvd = true;
 		adev->cg_flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
index 3b45332..fc120ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
@@ -30,7 +30,7 @@
 int cz_smu_fini(struct amdgpu_device *adev);
 
 extern const struct amd_ip_funcs tonga_dpm_ip_funcs;
-
+extern const struct amd_ip_funcs fiji_dpm_ip_funcs;
 extern const struct amd_ip_funcs iceland_dpm_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 31bb894..d98aa9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -66,6 +66,11 @@
 
 #define AMDGPU_NUM_OF_VMIDS			8
 
+#define		PIPEID(x)					((x) << 0)
+#define		MEID(x)						((x) << 2)
+#define		VMID(x)						((x) << 4)
+#define		QUEUEID(x)					((x) << 8)
+
 #define RB_BITMAP_WIDTH_PER_SH     2
 
 #define MC_SEQ_MISC0__MT__MASK	0xf0000000
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 8dfac37..e13c67c 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,6 @@
 
 config HSA_AMD
 	tristate "HSA kernel driver for AMD GPU devices"
-	depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+	depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
 	help
 	  Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 2855115..7fc9b0f 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -2,7 +2,8 @@
 # Makefile for Heterogenous System Architecture support for AMD GPU devices
 #
 
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/  \
+		-Idrivers/gpu/drm/amd/include/asic_reg
 
 amdkfd-y	:= kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
 		kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 183be5b..48769d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -65,17 +65,6 @@
 
 #define	AQL_ENABLE					1
 
-#define	SDMA_RB_VMID(x)					(x << 24)
-#define	SDMA_RB_ENABLE					(1 << 0)
-#define	SDMA_RB_SIZE(x)					((x) << 1) /* log2 */
-#define	SDMA_RPTR_WRITEBACK_ENABLE			(1 << 12)
-#define	SDMA_RPTR_WRITEBACK_TIMER(x)			((x) << 16) /* log2 */
-#define	SDMA_OFFSET(x)					(x << 0)
-#define	SDMA_DB_ENABLE					(1 << 28)
-#define	SDMA_ATC					(1 << 0)
-#define	SDMA_VA_PTR32					(1 << 4)
-#define	SDMA_VA_SHARED_BASE(x)				(x << 8)
-
 #define GRBM_GFX_INDEX					0x30800
 
 #define	ATC_VMID_PASID_MAPPING_VALID			(1U << 31)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index c991973..c6a1b4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,7 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include <linux/time.h>
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 75312c8..3f95f7c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -80,7 +80,12 @@
 	{ 0x1318, &kaveri_device_info },	/* Kaveri */
 	{ 0x131B, &kaveri_device_info },	/* Kaveri */
 	{ 0x131C, &kaveri_device_info },	/* Kaveri */
-	{ 0x131D, &kaveri_device_info }		/* Kaveri */
+	{ 0x131D, &kaveri_device_info },	/* Kaveri */
+	{ 0x9870, &carrizo_device_info },	/* Carrizo */
+	{ 0x9874, &carrizo_device_info },	/* Carrizo */
+	{ 0x9875, &carrizo_device_info },	/* Carrizo */
+	{ 0x9876, &carrizo_device_info },	/* Carrizo */
+	{ 0x9877, &carrizo_device_info }	/* Carrizo */
 };
 
 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 9ce8a20..c6f435a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -23,6 +23,7 @@
 
 #include "kfd_device_queue_manager.h"
 #include "cik_regs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
@@ -135,13 +136,16 @@
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 				struct qcm_process_device *qpd)
 {
-	uint32_t value = SDMA_ATC;
+	uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
 
 	if (q->process->is_32bit_user_mode)
-		value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+		value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+				get_sh_mem_bases_32(qpd_to_pdd(qpd));
 	else
-		value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
-							qpd_to_pdd(qpd)));
+		value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
 	q->properties.sdma_vm_addr = value;
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 4c15212..7e9cae9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -22,6 +22,10 @@
  */
 
 #include "kfd_device_queue_manager.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
 
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
@@ -37,14 +41,40 @@
 
 void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
 {
-	pr_warn("amdkfd: VI DQM is not currently supported\n");
-
 	ops->set_cache_memory_policy = set_cache_memory_policy_vi;
 	ops->register_process = register_process_vi;
 	ops->initialize = initialize_cpsch_vi;
 	ops->init_sdma_vm = init_sdma_vm;
 }
 
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+	/* In 64-bit mode, we can only control the top 3 bits of the LDS,
+	 * scratch and GPUVM apertures.
+	 * The hardware fills in the remaining 59 bits according to the
+	 * following pattern:
+	 * LDS:		X0000000'00000000 - X0000001'00000000 (4GB)
+	 * Scratch:	X0000001'00000000 - X0000002'00000000 (4GB)
+	 * GPUVM:	Y0010000'00000000 - Y0020000'00000000 (1TB)
+	 *
+	 * (where X/Y is the configurable nybble with the low-bit 0)
+	 *
+	 * LDS and scratch will have the same top nybble programmed in the
+	 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+	 * GPUVM can have a different top nybble programmed in the
+	 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+	 * We don't bother to support different top nybbles
+	 * for LDS/Scratch and GPUVM.
+	 */
+
+	BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+		top_address_nybble == 0);
+
+	return top_address_nybble << 12 |
+			(top_address_nybble << 12) <<
+			SH_MEM_BASES__SHARED_BASE__SHIFT;
+}
+
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
 				   enum cache_policy default_policy,
@@ -52,18 +82,83 @@
 				   void __user *alternate_aperture_base,
 				   uint64_t alternate_aperture_size)
 {
-	return false;
+	uint32_t default_mtype;
+	uint32_t ape1_mtype;
+
+	default_mtype = (default_policy == cache_policy_coherent) ?
+			MTYPE_CC :
+			MTYPE_NC;
+
+	ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+			MTYPE_CC :
+			MTYPE_NC;
+
+	qpd->sh_mem_config = (qpd->sh_mem_config &
+			SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
+		SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+		default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+		ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+		SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+	return true;
 }
 
 static int register_process_vi(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd)
 {
-	return -1;
+	struct kfd_process_device *pdd;
+	unsigned int temp;
+
+	BUG_ON(!dqm || !qpd);
+
+	pdd = qpd_to_pdd(qpd);
+
+	/* check if sh_mem_config register already configured */
+	if (qpd->sh_mem_config == 0) {
+		qpd->sh_mem_config =
+			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+			MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+		qpd->sh_mem_ape1_limit = 0;
+		qpd->sh_mem_ape1_base = 0;
+	}
+
+	if (qpd->pqm->process->is_32bit_user_mode) {
+		temp = get_sh_mem_bases_32(pdd);
+		qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
+		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
+					SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+	} else {
+		temp = get_sh_mem_bases_nybble_64(pdd);
+		qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
+			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+	}
+
+	pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+		qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+	return 0;
 }
 
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 				struct qcm_process_device *qpd)
 {
+	uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
+
+	if (q->process->is_32bit_user_mode)
+		value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+				get_sh_mem_bases_32(qpd_to_pdd(qpd));
+	else
+		value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
+	q->properties.sdma_vm_addr = value;
 }
 
 static int initialize_cpsch_vi(struct device_queue_manager *dqm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 35b9875..2b65510 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -33,7 +33,7 @@
 #include <linux/time.h>
 #include "kfd_priv.h"
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 
 /*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4349794..d83de98 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -27,6 +27,7 @@
 #include "kfd_mqd_manager.h"
 #include "cik_regs.h"
 #include "cik_structs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static inline struct cik_mqd *get_mqd(void *mqd)
 {
@@ -214,17 +215,20 @@
 	BUG_ON(!mm || !mqd || !q);
 
 	m = get_sdma_mqd(mqd);
-	m->sdma_rlc_rb_cntl =
-		SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
-		SDMA_RB_VMID(q->vmid) |
-		SDMA_RPTR_WRITEBACK_ENABLE |
-		SDMA_RPTR_WRITEBACK_TIMER(6);
+	m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+			SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+			q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+			1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+			6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
 
 	m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
 	m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
 	m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 	m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
-	m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+	m->sdma_rlc_doorbell = q->doorbell_off <<
+			SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
+			1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+
 	m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
 
 	m->sdma_engine_id = q->sdma_engine_id;
@@ -234,7 +238,9 @@
 	if (q->queue_size > 0 &&
 			q->queue_address != 0 &&
 			q->queue_percent > 0) {
-		m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+		m->sdma_rlc_rb_cntl |=
+				1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
+
 		q->is_active = true;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index b3a7e3b..fa32c32 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -22,12 +22,255 @@
  */
 
 #include <linux/printk.h>
+#include <linux/slab.h>
 #include "kfd_priv.h"
 #include "kfd_mqd_manager.h"
+#include "vi_structs.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+	return (struct vi_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	int retval;
+	uint64_t addr;
+	struct vi_mqd *m;
+
+	retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd),
+			mqd_mem_obj);
+	if (retval != 0)
+		return -ENOMEM;
+
+	m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr;
+	addr = (*mqd_mem_obj)->gpu_addr;
+
+	memset(m, 0, sizeof(struct vi_mqd));
+
+	m->header = 0xC0310800;
+	m->compute_pipelinestat_enable = 1;
+	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+
+	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT |
+			MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT;
+
+	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+
+	m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
+			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+			10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+	m->cp_hqd_pipe_priority = 1;
+	m->cp_hqd_queue_priority = 15;
+
+	m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL)
+		m->cp_hqd_iq_rptr = 1;
+
+	*mqd = m;
+	if (gart_addr != NULL)
+		*gart_addr = addr;
+	retval = mm->update_mqd(mm, m, q);
+
+	return retval;
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd,
+			uint32_t pipe_id, uint32_t queue_id,
+			uint32_t __user *wptr)
+{
+	return mm->dev->kfd2kgd->hqd_load
+		(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+
+static int __update_mqd(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q, unsigned int mtype,
+			unsigned int atc_bit)
+{
+	struct vi_mqd *m;
+
+	BUG_ON(!mm || !q || !mqd);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	m = get_mqd(mqd);
+
+	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
+			atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
+			mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
+	m->cp_hqd_pq_control |=
+			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+	pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+
+	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+
+	m->cp_hqd_pq_doorbell_control =
+		1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
+		q->doorbell_off <<
+			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+	pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+			m->cp_hqd_pq_doorbell_control);
+
+	m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
+			mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT;
+
+	m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT |
+			3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+			mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
+
+	m->cp_hqd_eop_control |=
+		ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+	m->cp_hqd_eop_base_addr_lo =
+			lower_32_bits(q->eop_ring_buffer_address >> 8);
+	m->cp_hqd_eop_base_addr_hi =
+			upper_32_bits(q->eop_ring_buffer_address >> 8);
+
+	m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT |
+			mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT;
+
+	m->cp_hqd_vmid = q->vmid;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL) {
+		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+				2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
+	}
+
+	m->cp_hqd_active = 0;
+	q->is_active = false;
+	if (q->queue_size > 0 &&
+			q->queue_address != 0 &&
+			q->queue_percent > 0) {
+		m->cp_hqd_active = 1;
+		q->is_active = true;
+	}
+
+	return 0;
+}
+
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q)
+{
+	return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+			enum kfd_preempt_type type,
+			unsigned int timeout, uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_destroy
+		(mm->dev->kgd, type, timeout,
+		pipe_id, queue_id);
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+			struct kfd_mem_obj *mqd_mem_obj)
+{
+	BUG_ON(!mm || !mqd);
+	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+			uint64_t queue_address,	uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_is_occupied(
+		mm->dev->kgd, queue_address,
+		pipe_id, queue_id);
+}
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	struct vi_mqd *m;
+	int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+
+	if (retval != 0)
+		return retval;
+
+	m = get_mqd(*mqd);
+
+	m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+
+	return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q)
+{
+	struct vi_mqd *m;
+	int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+
+	if (retval != 0)
+		return retval;
+
+	m = get_mqd(mqd);
+	m->cp_hqd_vmid = q->vmid;
+	return retval;
+}
 
 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
-					struct kfd_dev *dev)
+		struct kfd_dev *dev)
 {
-	pr_warn("amdkfd: VI MQD is not currently supported\n");
-	return NULL;
+	struct mqd_manager *mqd;
+
+	BUG_ON(!dev);
+	BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+	if (!mqd)
+		return NULL;
+
+	mqd->dev = dev;
+
+	switch (type) {
+	case KFD_MQD_TYPE_CP:
+	case KFD_MQD_TYPE_COMPUTE:
+		mqd->init_mqd = init_mqd;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+		break;
+	case KFD_MQD_TYPE_HIQ:
+		mqd->init_mqd = init_mqd_hiq;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd_hiq;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+		break;
+	case KFD_MQD_TYPE_SDMA:
+		break;
+	default:
+		kfree(mqd);
+		return NULL;
+	}
+
+	return mqd;
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 99b6d28..90f3914 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -27,6 +27,7 @@
 #include "kfd_kernel_queue.h"
 #include "kfd_priv.h"
 #include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
 #include "kfd_pm4_opcodes.h"
 
 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
@@ -55,6 +56,7 @@
 				bool *over_subscription)
 {
 	unsigned int process_count, queue_count;
+	unsigned int map_queue_size;
 
 	BUG_ON(!pm || !rlib_size || !over_subscription);
 
@@ -69,9 +71,13 @@
 		pr_debug("kfd: over subscribed runlist\n");
 	}
 
+	map_queue_size =
+		(pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
+		sizeof(struct pm4_mes_map_queues) :
+		sizeof(struct pm4_map_queues);
 	/* calculate run list ib allocation size */
 	*rlib_size = process_count * sizeof(struct pm4_map_process) +
-		     queue_count * sizeof(struct pm4_map_queues);
+		     queue_count * map_queue_size;
 
 	/*
 	 * Increase the allocation size in case we need a chained run list
@@ -176,6 +182,71 @@
 	return 0;
 }
 
+static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+		struct queue *q, bool is_static)
+{
+	struct pm4_mes_map_queues *packet;
+	bool use_static = is_static;
+
+	BUG_ON(!pm || !buffer || !q);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	packet = (struct pm4_mes_map_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+	packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+						sizeof(struct pm4_map_queues));
+	packet->bitfields2.alloc_format =
+		alloc_format__mes_map_queues__one_per_pipe_vi;
+	packet->bitfields2.num_queues = 1;
+	packet->bitfields2.queue_sel =
+		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+	packet->bitfields2.engine_sel =
+		engine_sel__mes_map_queues__compute_vi;
+	packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_compute_vi;
+
+	switch (q->properties.type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+		if (use_static)
+			packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_latency_static_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.queue_type =
+			queue_type__mes_map_queues__debug_interface_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+		packet->bitfields2.engine_sel =
+				engine_sel__mes_map_queues__sdma0_vi;
+		use_static = false; /* no static queues under SDMA */
+		break;
+	default:
+		pr_err("kfd: in %s queue type %d\n", __func__,
+				q->properties.type);
+		BUG();
+		break;
+	}
+	packet->bitfields3.doorbell_offset =
+			q->properties.doorbell_off;
+
+	packet->mqd_addr_lo =
+			lower_32_bits(q->gart_mqd_addr);
+
+	packet->mqd_addr_hi =
+			upper_32_bits(q->gart_mqd_addr);
+
+	packet->wptr_addr_lo =
+			lower_32_bits((uint64_t)q->properties.write_ptr);
+
+	packet->wptr_addr_hi =
+			upper_32_bits((uint64_t)q->properties.write_ptr);
+
+	return 0;
+}
+
 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
 				struct queue *q, bool is_static)
 {
@@ -292,8 +363,17 @@
 			pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
 				kq->queue->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-						kq->queue, qpd->is_debug);
+			if (pm->dqm->dev->device_info->asic_family ==
+					CHIP_CARRIZO)
+				retval = pm_create_map_queue_vi(pm,
+						&rl_buffer[rl_wptr],
+						kq->queue,
+						qpd->is_debug);
+			else
+				retval = pm_create_map_queue(pm,
+						&rl_buffer[rl_wptr],
+						kq->queue,
+						qpd->is_debug);
 			if (retval != 0)
 				return retval;
 
@@ -309,8 +389,17 @@
 			pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
 				q->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-						q,  qpd->is_debug);
+			if (pm->dqm->dev->device_info->asic_family ==
+					CHIP_CARRIZO)
+				retval = pm_create_map_queue_vi(pm,
+						&rl_buffer[rl_wptr],
+						q,
+						qpd->is_debug);
+			else
+				retval = pm_create_map_queue(pm,
+						&rl_buffer[rl_wptr],
+						q,
+						qpd->is_debug);
 
 			if (retval != 0)
 				return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
new file mode 100644
index 0000000..08c7219
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef F32_MES_PM4_PACKETS_H
+#define F32_MES_PM4_PACKETS_H
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+	struct {
+		uint32_t reserved1 : 8; /* < reserved */
+		uint32_t opcode    : 8; /* < IT opcode */
+		uint32_t count     : 14;/* < number of DWORDs - 1 in the
+		information body. */
+		uint32_t type      : 2; /* < packet identifier.
+					It should be 3 for type 3 packets */
+	};
+	uint32_t u32All;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/*--------------------MES_SET_RESOURCES--------------------*/
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum mes_set_resources_queue_type_enum {
+	queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+	queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+	queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+
+struct pm4_mes_set_resources {
+	union {
+		union PM4_MES_TYPE_3_HEADER	header;		/* header */
+		uint32_t			ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t vmid_mask:16;
+			uint32_t unmap_latency:8;
+			uint32_t reserved1:5;
+			enum mes_set_resources_queue_type_enum queue_type:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	uint32_t queue_mask_lo;
+	uint32_t queue_mask_hi;
+	uint32_t gws_mask_lo;
+	uint32_t gws_mask_hi;
+
+	union {
+		struct {
+			uint32_t oac_mask:16;
+			uint32_t reserved2:16;
+		} bitfields7;
+		uint32_t ordinal7;
+	};
+
+	union {
+		struct {
+		uint32_t gds_heap_base:6;
+		uint32_t reserved3:5;
+		uint32_t gds_heap_size:6;
+		uint32_t reserved4:15;
+		} bitfields8;
+		uint32_t ordinal8;
+	};
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST--------------------*/
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_mes_runlist {
+	union {
+	    union PM4_MES_TYPE_3_HEADER   header;            /* header */
+	    uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:2;
+			uint32_t ib_base_lo:30;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t ib_base_hi:16;
+			uint32_t reserved2:16;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	union {
+		struct {
+			uint32_t ib_size:20;
+			uint32_t chain:1;
+			uint32_t offload_polling:1;
+			uint32_t reserved3:1;
+			uint32_t valid:1;
+			uint32_t reserved4:8;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_mes_map_process {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:8;
+			uint32_t diq_enable:1;
+			uint32_t process_quantum:7;
+		} bitfields2;
+		uint32_t ordinal2;
+};
+
+	union {
+		struct {
+			uint32_t page_table_base:28;
+			uint32_t reserved2:4;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	uint32_t sh_mem_bases;
+	uint32_t sh_mem_ape1_base;
+	uint32_t sh_mem_ape1_limit;
+	uint32_t sh_mem_config;
+	uint32_t gds_addr_lo;
+	uint32_t gds_addr_hi;
+
+	union {
+		struct {
+			uint32_t num_gws:6;
+			uint32_t reserved3:2;
+			uint32_t num_oac:4;
+			uint32_t reserved4:4;
+			uint32_t gds_size:6;
+			uint32_t num_queues:10;
+		} bitfields10;
+		uint32_t ordinal10;
+	};
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED
+#define PM4_MES_MAP_QUEUES_VI_DEFINED
+enum mes_map_queues_queue_sel_vi_enum {
+	queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0,
+queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1
+};
+
+enum mes_map_queues_queue_type_vi_enum {
+	queue_type__mes_map_queues__normal_compute_vi = 0,
+	queue_type__mes_map_queues__debug_interface_queue_vi = 1,
+	queue_type__mes_map_queues__normal_latency_static_queue_vi = 2,
+queue_type__mes_map_queues__low_latency_static_queue_vi = 3
+};
+
+enum mes_map_queues_alloc_format_vi_enum {
+	alloc_format__mes_map_queues__one_per_pipe_vi = 0,
+alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
+};
+
+enum mes_map_queues_engine_sel_vi_enum {
+	engine_sel__mes_map_queues__compute_vi = 0,
+	engine_sel__mes_map_queues__sdma0_vi = 2,
+	engine_sel__mes_map_queues__sdma1_vi = 3
+};
+
+
+struct pm4_mes_map_queues {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:4;
+			enum mes_map_queues_queue_sel_vi_enum queue_sel:2;
+			uint32_t reserved2:15;
+			enum mes_map_queues_queue_type_vi_enum queue_type:3;
+			enum mes_map_queues_alloc_format_vi_enum alloc_format:2;
+			enum mes_map_queues_engine_sel_vi_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t reserved3:1;
+			uint32_t check_disable:1;
+			uint32_t doorbell_offset:21;
+			uint32_t reserved4:3;
+			uint32_t queue:6;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	uint32_t mqd_addr_lo;
+	uint32_t mqd_addr_hi;
+	uint32_t wptr_addr_lo;
+	uint32_t wptr_addr_hi;
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum mes_query_status_interrupt_sel_enum {
+	interrupt_sel__mes_query_status__completion_status = 0,
+	interrupt_sel__mes_query_status__process_status = 1,
+	interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum mes_query_status_command_enum {
+	command__mes_query_status__interrupt_only = 0,
+	command__mes_query_status__fence_only_immediate = 1,
+	command__mes_query_status__fence_only_after_write_ack = 2,
+	command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum mes_query_status_engine_sel_enum {
+	engine_sel__mes_query_status__compute = 0,
+	engine_sel__mes_query_status__sdma0_queue = 2,
+	engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_mes_query_status {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t context_id:28;
+			enum mes_query_status_interrupt_sel_enum
+				interrupt_sel:2;
+			enum mes_query_status_command_enum command:2;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved2:2;
+			uint32_t doorbell_offset:21;
+			uint32_t reserved3:2;
+			enum mes_query_status_engine_sel_enum engine_sel:3;
+			uint32_t reserved4:4;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+	uint32_t data_lo;
+	uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum mes_unmap_queues_action_enum {
+	action__mes_unmap_queues__preempt_queues = 0,
+	action__mes_unmap_queues__reset_queues = 1,
+	action__mes_unmap_queues__disable_process_queues = 2,
+	action__mes_unmap_queues__reserved = 3
+};
+
+enum mes_unmap_queues_queue_sel_enum {
+	queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+	queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+	queue_sel__mes_unmap_queues__unmap_all_queues = 2,
+	queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3
+};
+
+enum mes_unmap_queues_engine_sel_enum {
+	engine_sel__mes_unmap_queues__compute = 0,
+	engine_sel__mes_unmap_queues__sdma0 = 2,
+	engine_sel__mes_unmap_queues__sdmal = 3
+};
+
+struct PM4_MES_UNMAP_QUEUES {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			enum mes_unmap_queues_action_enum action:2;
+			uint32_t reserved1:2;
+			enum mes_unmap_queues_queue_sel_enum queue_sel:2;
+			uint32_t reserved2:20;
+			enum mes_unmap_queues_engine_sel_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved3:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved4:2;
+			uint32_t doorbell_offset0:21;
+			uint32_t reserved5:9;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	union {
+	struct {
+			uint32_t reserved6:2;
+			uint32_t doorbell_offset1:21;
+			uint32_t reserved7:9;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+	union {
+		struct {
+			uint32_t reserved8:2;
+			uint32_t doorbell_offset2:21;
+			uint32_t reserved9:9;
+		} bitfields5;
+		uint32_t ordinal5;
+	};
+
+	union {
+		struct {
+			uint32_t reserved10:2;
+			uint32_t doorbell_offset3:21;
+			uint32_t reserved11:9;
+		} bitfields6;
+		uint32_t ordinal6;
+	};
+};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c25728b..74909e7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1186,6 +1186,11 @@
 	 * TODO: Retrieve max engine clock values from KGD
 	 */
 
+	if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+		dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
+		pr_info("amdkfd: adding doorbell packet type capability\n");
+	}
+
 	res = 0;
 
 err:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 989624b..c3ddb9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -40,6 +40,7 @@
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK	0x00000f00
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT	8
 #define HSA_CAP_RESERVED			0xfffff000
+#define HSA_CAP_DOORBELL_PACKET_TYPE		0x00001000
 
 struct kfd_node_properties {
 	uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 5bdf1b4..68a8eaa 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,6 +23,45 @@
 #ifndef __AMD_SHARED_H__
 #define __AMD_SHARED_H__
 
+#define AMD_MAX_USEC_TIMEOUT		100000  /* 100 ms */
+
+/*
+* Supported GPU families (aligned with amdgpu_drm.h)
+*/
+#define AMD_FAMILY_UNKNOWN              0
+#define AMD_FAMILY_CI                   120 /* Bonaire, Hawaii */
+#define AMD_FAMILY_KV                   125 /* Kaveri, Kabini, Mullins */
+#define AMD_FAMILY_VI                   130 /* Iceland, Tonga */
+#define AMD_FAMILY_CZ                   135 /* Carrizo */
+
+/*
+ * Supported ASIC types
+ */
+enum amd_asic_type {
+	CHIP_BONAIRE = 0,
+	CHIP_KAVERI,
+	CHIP_KABINI,
+	CHIP_HAWAII,
+	CHIP_MULLINS,
+	CHIP_TOPAZ,
+	CHIP_TONGA,
+	CHIP_FIJI,
+	CHIP_CARRIZO,
+	CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum amd_chip_flags {
+	AMD_ASIC_MASK = 0x0000ffffUL,
+	AMD_FLAGS_MASK  = 0xffff0000UL,
+	AMD_IS_MOBILITY = 0x00010000UL,
+	AMD_IS_APU      = 0x00020000UL,
+	AMD_IS_PX       = 0x00040000UL,
+	AMD_EXP_HW_SUPPORT = 0x00080000UL,
+};
+
 enum amd_ip_block_type {
 	AMD_IP_BLOCK_TYPE_COMMON,
 	AMD_IP_BLOCK_TYPE_GMC,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
new file mode 100644
index 0000000..44b1855
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -0,0 +1,1246 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_D_H
+#define SMU_7_1_3_D_H
+
+#define mmGCK_SMC_IND_INDEX                                                     0x80
+#define mmGCK0_GCK_SMC_IND_INDEX                                                0x80
+#define mmGCK1_GCK_SMC_IND_INDEX                                                0x82
+#define mmGCK2_GCK_SMC_IND_INDEX                                                0x84
+#define mmGCK3_GCK_SMC_IND_INDEX                                                0x86
+#define mmGCK_SMC_IND_DATA                                                      0x81
+#define mmGCK0_GCK_SMC_IND_DATA                                                 0x81
+#define mmGCK1_GCK_SMC_IND_DATA                                                 0x83
+#define mmGCK2_GCK_SMC_IND_DATA                                                 0x85
+#define mmGCK3_GCK_SMC_IND_DATA                                                 0x87
+#define ixGCK_MCLK_FUSES                                                        0xc0500008
+#define ixCG_DCLK_CNTL                                                          0xc050009c
+#define ixCG_DCLK_STATUS                                                        0xc05000a0
+#define ixCG_VCLK_CNTL                                                          0xc05000a4
+#define ixCG_VCLK_STATUS                                                        0xc05000a8
+#define ixCG_ECLK_CNTL                                                          0xc05000ac
+#define ixCG_ECLK_STATUS                                                        0xc05000b0
+#define ixCG_ACLK_CNTL                                                          0xc05000dc
+#define ixCG_MCLK_CNTL                                                          0xc0500120
+#define ixCG_MCLK_STATUS                                                        0xc0500124
+#define ixGCK_DFS_BYPASS_CNTL                                                   0xc0500118
+#define ixCG_SPLL_FUNC_CNTL                                                     0xc0500140
+#define ixCG_SPLL_FUNC_CNTL_2                                                   0xc0500144
+#define ixCG_SPLL_FUNC_CNTL_3                                                   0xc0500148
+#define ixCG_SPLL_FUNC_CNTL_4                                                   0xc050014c
+#define ixCG_SPLL_FUNC_CNTL_5                                                   0xc0500150
+#define ixCG_SPLL_FUNC_CNTL_6                                                   0xc0500154
+#define ixCG_SPLL_FUNC_CNTL_7                                                   0xc0500158
+#define ixSPLL_CNTL_MODE                                                        0xc0500160
+#define ixCG_SPLL_SPREAD_SPECTRUM                                               0xc0500164
+#define ixCG_SPLL_SPREAD_SPECTRUM_2                                             0xc0500168
+#define ixMPLL_BYPASSCLK_SEL                                                    0xc050019c
+#define ixCG_CLKPIN_CNTL                                                        0xc05001a0
+#define ixCG_CLKPIN_CNTL_2                                                      0xc05001a4
+#define ixCG_CLKPIN_CNTL_DC                                                     0xc0500204
+#define ixTHM_CLK_CNTL                                                          0xc05001a8
+#define ixMISC_CLK_CTRL                                                         0xc05001ac
+#define ixGCK_PLL_TEST_CNTL                                                     0xc05001c0
+#define ixGCK_PLL_TEST_CNTL_2                                                   0xc05001c4
+#define ixGCK_ADFS_CLK_BYPASS_CNTL1                                             0xc05001c8
+#define mmSMC_IND_INDEX                                                         0x80
+#define mmSMC0_SMC_IND_INDEX                                                    0x80
+#define mmSMC1_SMC_IND_INDEX                                                    0x82
+#define mmSMC2_SMC_IND_INDEX                                                    0x84
+#define mmSMC3_SMC_IND_INDEX                                                    0x86
+#define mmSMC_IND_DATA                                                          0x81
+#define mmSMC0_SMC_IND_DATA                                                     0x81
+#define mmSMC1_SMC_IND_DATA                                                     0x83
+#define mmSMC2_SMC_IND_DATA                                                     0x85
+#define mmSMC3_SMC_IND_DATA                                                     0x87
+#define mmSMC_IND_INDEX_0                                                       0x80
+#define mmSMC_IND_DATA_0                                                        0x81
+#define mmSMC_IND_INDEX_1                                                       0x82
+#define mmSMC_IND_DATA_1                                                        0x83
+#define mmSMC_IND_INDEX_2                                                       0x84
+#define mmSMC_IND_DATA_2                                                        0x85
+#define mmSMC_IND_INDEX_3                                                       0x86
+#define mmSMC_IND_DATA_3                                                        0x87
+#define mmSMC_IND_INDEX_4                                                       0x88
+#define mmSMC_IND_DATA_4                                                        0x89
+#define mmSMC_IND_INDEX_5                                                       0x8a
+#define mmSMC_IND_DATA_5                                                        0x8b
+#define mmSMC_IND_INDEX_6                                                       0x8c
+#define mmSMC_IND_DATA_6                                                        0x8d
+#define mmSMC_IND_INDEX_7                                                       0x8e
+#define mmSMC_IND_DATA_7                                                        0x8f
+#define mmSMC_IND_ACCESS_CNTL                                                   0x92
+#define mmSMC_MESSAGE_0                                                         0x94
+#define mmSMC_RESP_0                                                            0x95
+#define mmSMC_MESSAGE_1                                                         0x96
+#define mmSMC_RESP_1                                                            0x97
+#define mmSMC_MESSAGE_2                                                         0x98
+#define mmSMC_RESP_2                                                            0x99
+#define mmSMC_MESSAGE_3                                                         0x9a
+#define mmSMC_RESP_3                                                            0x9b
+#define mmSMC_MESSAGE_4                                                         0x9c
+#define mmSMC_RESP_4                                                            0x9d
+#define mmSMC_MESSAGE_5                                                         0x9e
+#define mmSMC_RESP_5                                                            0x9f
+#define mmSMC_MESSAGE_6                                                         0xa0
+#define mmSMC_RESP_6                                                            0xa1
+#define mmSMC_MESSAGE_7                                                         0xa2
+#define mmSMC_RESP_7                                                            0xa3
+#define mmSMC_MSG_ARG_0                                                         0xa4
+#define mmSMC_MSG_ARG_1                                                         0xa5
+#define mmSMC_MSG_ARG_2                                                         0xa6
+#define mmSMC_MSG_ARG_3                                                         0xa7
+#define mmSMC_MSG_ARG_4                                                         0xa8
+#define mmSMC_MSG_ARG_5                                                         0xa9
+#define mmSMC_MSG_ARG_6                                                         0xaa
+#define mmSMC_MSG_ARG_7                                                         0xab
+#define mmSMC_MESSAGE_8                                                         0xb5
+#define mmSMC_RESP_8                                                            0xb6
+#define mmSMC_MESSAGE_9                                                         0xb7
+#define mmSMC_RESP_9                                                            0xb8
+#define mmSMC_MESSAGE_10                                                        0xb9
+#define mmSMC_RESP_10                                                           0xba
+#define mmSMC_MESSAGE_11                                                        0xbb
+#define mmSMC_RESP_11                                                           0xbc
+#define mmSMC_MSG_ARG_8                                                         0xbd
+#define mmSMC_MSG_ARG_9                                                         0xbe
+#define mmSMC_MSG_ARG_10                                                        0xbf
+#define mmSMC_MSG_ARG_11                                                        0x93
+#define ixSMC_SYSCON_RESET_CNTL                                            0x80000000
+#define ixSMC_SYSCON_CLOCK_CNTL_0                                          0x80000004
+#define ixSMC_SYSCON_CLOCK_CNTL_1                                          0x80000008
+#define ixSMC_SYSCON_CLOCK_CNTL_2                                          0x8000000c
+#define ixSMC_SYSCON_MISC_CNTL                                                  0x80000010
+#define ixSMC_SYSCON_MSG_ARG_0                                                  0x80000068
+#define ixSMC_PC_C                                                              0x80000370
+#define ixSMC_SCRATCH9                                                          0x80000424
+#define mmGPIOPAD_SW_INT_STAT                                                   0x180
+#define mmGPIOPAD_STRENGTH                                                      0x181
+#define mmGPIOPAD_MASK                                                          0x182
+#define mmGPIOPAD_A                                                             0x183
+#define mmGPIOPAD_EN                                                            0x184
+#define mmGPIOPAD_Y                                                             0x185
+#define mmGPIOPAD_PINSTRAPS                                                     0x186
+#define mmGPIOPAD_INT_STAT_EN                                                   0x187
+#define mmGPIOPAD_INT_STAT                                                      0x188
+#define mmGPIOPAD_INT_STAT_AK                                                   0x189
+#define mmGPIOPAD_INT_EN                                                        0x18a
+#define mmGPIOPAD_INT_TYPE                                                      0x18b
+#define mmGPIOPAD_INT_POLARITY                                                  0x18c
+#define mmGPIOPAD_EXTERN_TRIG_CNTL                                              0x18d
+#define mmGPIOPAD_RCVR_SEL                                                      0x191
+#define mmGPIOPAD_PU_EN                                                         0x192
+#define mmGPIOPAD_PD_EN                                                         0x193
+#define mmCG_FPS_CNT                                                            0x1b6
+#define mmSMU_IND_INDEX_0                                                       0x1a6
+#define mmSMU_IND_DATA_0                                                        0x1a7
+#define mmSMU_IND_INDEX_1                                                       0x1a8
+#define mmSMU_IND_DATA_1                                                        0x1a9
+#define mmSMU_IND_INDEX_2                                                       0x1aa
+#define mmSMU_IND_DATA_2                                                        0x1ab
+#define mmSMU_IND_INDEX_3                                                       0x1ac
+#define mmSMU_IND_DATA_3                                                        0x1ad
+#define mmSMU_IND_INDEX_4                                                       0x1ae
+#define mmSMU_IND_DATA_4                                                        0x1af
+#define mmSMU_IND_INDEX_5                                                       0x1b0
+#define mmSMU_IND_DATA_5                                                        0x1b1
+#define mmSMU_IND_INDEX_6                                                       0x1b2
+#define mmSMU_IND_DATA_6                                                        0x1b3
+#define mmSMU_IND_INDEX_7                                                       0x1b4
+#define mmSMU_IND_DATA_7                                                        0x1b5
+#define mmSMU_SMC_IND_INDEX                                                     0x80
+#define mmSMU0_SMU_SMC_IND_INDEX                                                0x80
+#define mmSMU1_SMU_SMC_IND_INDEX                                                0x82
+#define mmSMU2_SMU_SMC_IND_INDEX                                                0x84
+#define mmSMU3_SMU_SMC_IND_INDEX                                                0x86
+#define mmSMU_SMC_IND_DATA                                                      0x81
+#define mmSMU0_SMU_SMC_IND_DATA                                                 0x81
+#define mmSMU1_SMU_SMC_IND_DATA                                                 0x83
+#define mmSMU2_SMU_SMC_IND_DATA                                                 0x85
+#define mmSMU3_SMU_SMC_IND_DATA                                                 0x87
+#define ixRCU_UC_EVENTS                                                         0xc0000004
+#define ixRCU_MISC_CTRL                                                         0xc0000010
+#define ixRCU_VIRT_RESET_REQ                                                    0xc0000024
+#define ixCC_RCU_FUSES                                                          0xc00c0000
+#define ixCC_SMU_MISC_FUSES                                                     0xc00c0004
+#define ixCC_SCLK_VID_FUSES                                                     0xc00c0008
+#define ixCC_GIO_IOCCFG_FUSES                                                   0xc00c000c
+#define ixCC_GIO_IOC_FUSES                                                      0xc00c0010
+#define ixCC_SMU_TST_EFUSE1_MISC                                                0xc00c001c
+#define ixCC_TST_ID_STRAPS                                                      0xc00c0020
+#define ixCC_FCTRL_FUSES                                                        0xc00c0024
+#define ixCC_HARVEST_FUSES                                                      0xc00c0028
+#define ixSMU_MAIN_PLL_OP_FREQ                                                  0xe0003020
+#define ixSMU_STATUS                                                       0xe0003088
+#define ixSMU_FIRMWARE                                                     0xe00030a4
+#define ixSMU_INPUT_DATA                                                   0xe00030b8
+#define ixSMU_EFUSE_0                                                           0xc0100000
+#define ixFIRMWARE_FLAGS                                                        0x3f000
+#define ixTDC_STATUS                                                            0x3f004
+#define ixTDC_MV_AVERAGE                                                        0x3f008
+#define ixTDC_VRM_LIMIT                                                         0x3f00c
+#define ixFEATURE_STATUS                                                        0x3f010
+#define ixENTITY_TEMPERATURES_1                                                 0x3f014
+#define ixMCARB_DRAM_TIMING_TABLE_1                                             0x3f018
+#define ixMCARB_DRAM_TIMING_TABLE_2                                             0x3f01c
+#define ixMCARB_DRAM_TIMING_TABLE_3                                             0x3f020
+#define ixMCARB_DRAM_TIMING_TABLE_4                                             0x3f024
+#define ixMCARB_DRAM_TIMING_TABLE_5                                             0x3f028
+#define ixMCARB_DRAM_TIMING_TABLE_6                                             0x3f02c
+#define ixMCARB_DRAM_TIMING_TABLE_7                                             0x3f030
+#define ixMCARB_DRAM_TIMING_TABLE_8                                             0x3f034
+#define ixMCARB_DRAM_TIMING_TABLE_9                                             0x3f038
+#define ixMCARB_DRAM_TIMING_TABLE_10                                            0x3f03c
+#define ixMCARB_DRAM_TIMING_TABLE_11                                            0x3f040
+#define ixMCARB_DRAM_TIMING_TABLE_12                                            0x3f044
+#define ixMCARB_DRAM_TIMING_TABLE_13                                            0x3f048
+#define ixMCARB_DRAM_TIMING_TABLE_14                                            0x3f04c
+#define ixMCARB_DRAM_TIMING_TABLE_15                                            0x3f050
+#define ixMCARB_DRAM_TIMING_TABLE_16                                            0x3f054
+#define ixMCARB_DRAM_TIMING_TABLE_17                                            0x3f058
+#define ixMCARB_DRAM_TIMING_TABLE_18                                            0x3f05c
+#define ixMCARB_DRAM_TIMING_TABLE_19                                            0x3f060
+#define ixMCARB_DRAM_TIMING_TABLE_20                                            0x3f064
+#define ixMCARB_DRAM_TIMING_TABLE_21                                            0x3f068
+#define ixMCARB_DRAM_TIMING_TABLE_22                                            0x3f06c
+#define ixMCARB_DRAM_TIMING_TABLE_23                                            0x3f070
+#define ixMCARB_DRAM_TIMING_TABLE_24                                            0x3f074
+#define ixMCARB_DRAM_TIMING_TABLE_25                                            0x3f078
+#define ixMCARB_DRAM_TIMING_TABLE_26                                            0x3f07c
+#define ixMCARB_DRAM_TIMING_TABLE_27                                            0x3f080
+#define ixMCARB_DRAM_TIMING_TABLE_28                                            0x3f084
+#define ixMCARB_DRAM_TIMING_TABLE_29                                            0x3f088
+#define ixMCARB_DRAM_TIMING_TABLE_30                                            0x3f08c
+#define ixMCARB_DRAM_TIMING_TABLE_31                                            0x3f090
+#define ixMCARB_DRAM_TIMING_TABLE_32                                            0x3f094
+#define ixMCARB_DRAM_TIMING_TABLE_33                                            0x3f098
+#define ixMCARB_DRAM_TIMING_TABLE_34                                            0x3f09c
+#define ixMCARB_DRAM_TIMING_TABLE_35                                            0x3f0a0
+#define ixMCARB_DRAM_TIMING_TABLE_36                                            0x3f0a4
+#define ixMCARB_DRAM_TIMING_TABLE_37                                            0x3f0a8
+#define ixMCARB_DRAM_TIMING_TABLE_38                                            0x3f0ac
+#define ixMCARB_DRAM_TIMING_TABLE_39                                            0x3f0b0
+#define ixMCARB_DRAM_TIMING_TABLE_40                                            0x3f0b4
+#define ixMCARB_DRAM_TIMING_TABLE_41                                            0x3f0b8
+#define ixMCARB_DRAM_TIMING_TABLE_42                                            0x3f0bc
+#define ixMCARB_DRAM_TIMING_TABLE_43                                            0x3f0c0
+#define ixMCARB_DRAM_TIMING_TABLE_44                                            0x3f0c4
+#define ixMCARB_DRAM_TIMING_TABLE_45                                            0x3f0c8
+#define ixMCARB_DRAM_TIMING_TABLE_46                                            0x3f0cc
+#define ixMCARB_DRAM_TIMING_TABLE_47                                            0x3f0d0
+#define ixMCARB_DRAM_TIMING_TABLE_48                                            0x3f0d4
+#define ixMCARB_DRAM_TIMING_TABLE_49                                            0x3f0d8
+#define ixMCARB_DRAM_TIMING_TABLE_50                                            0x3f0dc
+#define ixMCARB_DRAM_TIMING_TABLE_51                                            0x3f0e0
+#define ixMCARB_DRAM_TIMING_TABLE_52                                            0x3f0e4
+#define ixMCARB_DRAM_TIMING_TABLE_53                                            0x3f0e8
+#define ixMCARB_DRAM_TIMING_TABLE_54                                            0x3f0ec
+#define ixMCARB_DRAM_TIMING_TABLE_55                                            0x3f0f0
+#define ixMCARB_DRAM_TIMING_TABLE_56                                            0x3f0f4
+#define ixMCARB_DRAM_TIMING_TABLE_57                                            0x3f0f8
+#define ixMCARB_DRAM_TIMING_TABLE_58                                            0x3f0fc
+#define ixMCARB_DRAM_TIMING_TABLE_59                                            0x3f100
+#define ixMCARB_DRAM_TIMING_TABLE_60                                            0x3f104
+#define ixMCARB_DRAM_TIMING_TABLE_61                                            0x3f108
+#define ixMCARB_DRAM_TIMING_TABLE_62                                            0x3f10c
+#define ixMCARB_DRAM_TIMING_TABLE_63                                            0x3f110
+#define ixMCARB_DRAM_TIMING_TABLE_64                                            0x3f114
+#define ixMCARB_DRAM_TIMING_TABLE_65                                            0x3f118
+#define ixMCARB_DRAM_TIMING_TABLE_66                                            0x3f11c
+#define ixMCARB_DRAM_TIMING_TABLE_67                                            0x3f120
+#define ixMCARB_DRAM_TIMING_TABLE_68                                            0x3f124
+#define ixMCARB_DRAM_TIMING_TABLE_69                                            0x3f128
+#define ixMCARB_DRAM_TIMING_TABLE_70                                            0x3f12c
+#define ixMCARB_DRAM_TIMING_TABLE_71                                            0x3f130
+#define ixMCARB_DRAM_TIMING_TABLE_72                                            0x3f134
+#define ixMCARB_DRAM_TIMING_TABLE_73                                            0x3f138
+#define ixMCARB_DRAM_TIMING_TABLE_74                                            0x3f13c
+#define ixMCARB_DRAM_TIMING_TABLE_75                                            0x3f140
+#define ixMCARB_DRAM_TIMING_TABLE_76                                            0x3f144
+#define ixMCARB_DRAM_TIMING_TABLE_77                                            0x3f148
+#define ixMCARB_DRAM_TIMING_TABLE_78                                            0x3f14c
+#define ixMCARB_DRAM_TIMING_TABLE_79                                            0x3f150
+#define ixMCARB_DRAM_TIMING_TABLE_80                                            0x3f154
+#define ixMCARB_DRAM_TIMING_TABLE_81                                            0x3f158
+#define ixMCARB_DRAM_TIMING_TABLE_82                                            0x3f15c
+#define ixMCARB_DRAM_TIMING_TABLE_83                                            0x3f160
+#define ixMCARB_DRAM_TIMING_TABLE_84                                            0x3f164
+#define ixMCARB_DRAM_TIMING_TABLE_85                                            0x3f168
+#define ixMCARB_DRAM_TIMING_TABLE_86                                            0x3f16c
+#define ixMCARB_DRAM_TIMING_TABLE_87                                            0x3f170
+#define ixMCARB_DRAM_TIMING_TABLE_88                                            0x3f174
+#define ixMCARB_DRAM_TIMING_TABLE_89                                            0x3f178
+#define ixMCARB_DRAM_TIMING_TABLE_90                                            0x3f17c
+#define ixMCARB_DRAM_TIMING_TABLE_91                                            0x3f180
+#define ixMCARB_DRAM_TIMING_TABLE_92                                            0x3f184
+#define ixMCARB_DRAM_TIMING_TABLE_93                                            0x3f188
+#define ixMCARB_DRAM_TIMING_TABLE_94                                            0x3f18c
+#define ixMCARB_DRAM_TIMING_TABLE_95                                            0x3f190
+#define ixMCARB_DRAM_TIMING_TABLE_96                                            0x3f194
+#define ixDPM_TABLE_1                                                           0x3f198
+#define ixDPM_TABLE_2                                                           0x3f19c
+#define ixDPM_TABLE_3                                                           0x3f1a0
+#define ixDPM_TABLE_4                                                           0x3f1a4
+#define ixDPM_TABLE_5                                                           0x3f1a8
+#define ixDPM_TABLE_6                                                           0x3f1ac
+#define ixDPM_TABLE_7                                                           0x3f1b0
+#define ixDPM_TABLE_8                                                           0x3f1b4
+#define ixDPM_TABLE_9                                                           0x3f1b8
+#define ixDPM_TABLE_10                                                          0x3f1bc
+#define ixDPM_TABLE_11                                                          0x3f1c0
+#define ixDPM_TABLE_12                                                          0x3f1c4
+#define ixDPM_TABLE_13                                                          0x3f1c8
+#define ixDPM_TABLE_14                                                          0x3f1cc
+#define ixDPM_TABLE_15                                                          0x3f1d0
+#define ixDPM_TABLE_16                                                          0x3f1d4
+#define ixDPM_TABLE_17                                                          0x3f1d8
+#define ixDPM_TABLE_18                                                          0x3f1dc
+#define ixDPM_TABLE_19                                                          0x3f1e0
+#define ixDPM_TABLE_20                                                          0x3f1e4
+#define ixDPM_TABLE_21                                                          0x3f1e8
+#define ixDPM_TABLE_22                                                          0x3f1ec
+#define ixDPM_TABLE_23                                                          0x3f1f0
+#define ixDPM_TABLE_24                                                          0x3f1f4
+#define ixDPM_TABLE_25                                                          0x3f1f8
+#define ixDPM_TABLE_26                                                          0x3f1fc
+#define ixDPM_TABLE_27                                                          0x3f200
+#define ixDPM_TABLE_28                                                          0x3f204
+#define ixDPM_TABLE_29                                                          0x3f208
+#define ixDPM_TABLE_30                                                          0x3f20c
+#define ixDPM_TABLE_31                                                          0x3f210
+#define ixDPM_TABLE_32                                                          0x3f214
+#define ixDPM_TABLE_33                                                          0x3f218
+#define ixDPM_TABLE_34                                                          0x3f21c
+#define ixDPM_TABLE_35                                                          0x3f220
+#define ixDPM_TABLE_36                                                          0x3f224
+#define ixDPM_TABLE_37                                                          0x3f228
+#define ixDPM_TABLE_38                                                          0x3f22c
+#define ixDPM_TABLE_39                                                          0x3f230
+#define ixDPM_TABLE_40                                                          0x3f234
+#define ixDPM_TABLE_41                                                          0x3f238
+#define ixDPM_TABLE_42                                                          0x3f23c
+#define ixDPM_TABLE_43                                                          0x3f240
+#define ixDPM_TABLE_44                                                          0x3f244
+#define ixDPM_TABLE_45                                                          0x3f248
+#define ixDPM_TABLE_46                                                          0x3f24c
+#define ixDPM_TABLE_47                                                          0x3f250
+#define ixDPM_TABLE_48                                                          0x3f254
+#define ixDPM_TABLE_49                                                          0x3f258
+#define ixDPM_TABLE_50                                                          0x3f25c
+#define ixDPM_TABLE_51                                                          0x3f260
+#define ixDPM_TABLE_52                                                          0x3f264
+#define ixDPM_TABLE_53                                                          0x3f268
+#define ixDPM_TABLE_54                                                          0x3f26c
+#define ixDPM_TABLE_55                                                          0x3f270
+#define ixDPM_TABLE_56                                                          0x3f274
+#define ixDPM_TABLE_57                                                          0x3f278
+#define ixDPM_TABLE_58                                                          0x3f27c
+#define ixDPM_TABLE_59                                                          0x3f280
+#define ixDPM_TABLE_60                                                          0x3f284
+#define ixDPM_TABLE_61                                                          0x3f288
+#define ixDPM_TABLE_62                                                          0x3f28c
+#define ixDPM_TABLE_63                                                          0x3f290
+#define ixDPM_TABLE_64                                                          0x3f294
+#define ixDPM_TABLE_65                                                          0x3f298
+#define ixDPM_TABLE_66                                                          0x3f29c
+#define ixDPM_TABLE_67                                                          0x3f2a0
+#define ixDPM_TABLE_68                                                          0x3f2a4
+#define ixDPM_TABLE_69                                                          0x3f2a8
+#define ixDPM_TABLE_70                                                          0x3f2ac
+#define ixDPM_TABLE_71                                                          0x3f2b0
+#define ixDPM_TABLE_72                                                          0x3f2b4
+#define ixDPM_TABLE_73                                                          0x3f2b8
+#define ixDPM_TABLE_74                                                          0x3f2bc
+#define ixDPM_TABLE_75                                                          0x3f2c0
+#define ixDPM_TABLE_76                                                          0x3f2c4
+#define ixDPM_TABLE_77                                                          0x3f2c8
+#define ixDPM_TABLE_78                                                          0x3f2cc
+#define ixDPM_TABLE_79                                                          0x3f2d0
+#define ixDPM_TABLE_80                                                          0x3f2d4
+#define ixDPM_TABLE_81                                                          0x3f2d8
+#define ixDPM_TABLE_82                                                          0x3f2dc
+#define ixDPM_TABLE_83                                                          0x3f2e0
+#define ixDPM_TABLE_84                                                          0x3f2e4
+#define ixDPM_TABLE_85                                                          0x3f2e8
+#define ixDPM_TABLE_86                                                          0x3f2ec
+#define ixDPM_TABLE_87                                                          0x3f2f0
+#define ixDPM_TABLE_88                                                          0x3f2f4
+#define ixDPM_TABLE_89                                                          0x3f2f8
+#define ixDPM_TABLE_90                                                          0x3f2fc
+#define ixDPM_TABLE_91                                                          0x3f300
+#define ixDPM_TABLE_92                                                          0x3f304
+#define ixDPM_TABLE_93                                                          0x3f308
+#define ixDPM_TABLE_94                                                          0x3f30c
+#define ixDPM_TABLE_95                                                          0x3f310
+#define ixDPM_TABLE_96                                                          0x3f314
+#define ixDPM_TABLE_97                                                          0x3f318
+#define ixDPM_TABLE_98                                                          0x3f31c
+#define ixDPM_TABLE_99                                                          0x3f320
+#define ixDPM_TABLE_100                                                         0x3f324
+#define ixDPM_TABLE_101                                                         0x3f328
+#define ixDPM_TABLE_102                                                         0x3f32c
+#define ixDPM_TABLE_103                                                         0x3f330
+#define ixDPM_TABLE_104                                                         0x3f334
+#define ixDPM_TABLE_105                                                         0x3f338
+#define ixDPM_TABLE_106                                                         0x3f33c
+#define ixDPM_TABLE_107                                                         0x3f340
+#define ixDPM_TABLE_108                                                         0x3f344
+#define ixDPM_TABLE_109                                                         0x3f348
+#define ixDPM_TABLE_110                                                         0x3f34c
+#define ixDPM_TABLE_111                                                         0x3f350
+#define ixDPM_TABLE_112                                                         0x3f354
+#define ixDPM_TABLE_113                                                         0x3f358
+#define ixDPM_TABLE_114                                                         0x3f35c
+#define ixDPM_TABLE_115                                                         0x3f360
+#define ixDPM_TABLE_116                                                         0x3f364
+#define ixDPM_TABLE_117                                                         0x3f368
+#define ixDPM_TABLE_118                                                         0x3f36c
+#define ixDPM_TABLE_119                                                         0x3f370
+#define ixDPM_TABLE_120                                                         0x3f374
+#define ixDPM_TABLE_121                                                         0x3f378
+#define ixDPM_TABLE_122                                                         0x3f37c
+#define ixDPM_TABLE_123                                                         0x3f380
+#define ixDPM_TABLE_124                                                         0x3f384
+#define ixDPM_TABLE_125                                                         0x3f388
+#define ixDPM_TABLE_126                                                         0x3f38c
+#define ixDPM_TABLE_127                                                         0x3f390
+#define ixDPM_TABLE_128                                                         0x3f394
+#define ixDPM_TABLE_129                                                         0x3f398
+#define ixDPM_TABLE_130                                                         0x3f39c
+#define ixDPM_TABLE_131                                                         0x3f3a0
+#define ixDPM_TABLE_132                                                         0x3f3a4
+#define ixDPM_TABLE_133                                                         0x3f3a8
+#define ixDPM_TABLE_134                                                         0x3f3ac
+#define ixDPM_TABLE_135                                                         0x3f3b0
+#define ixDPM_TABLE_136                                                         0x3f3b4
+#define ixDPM_TABLE_137                                                         0x3f3b8
+#define ixDPM_TABLE_138                                                         0x3f3bc
+#define ixDPM_TABLE_139                                                         0x3f3c0
+#define ixDPM_TABLE_140                                                         0x3f3c4
+#define ixDPM_TABLE_141                                                         0x3f3c8
+#define ixDPM_TABLE_142                                                         0x3f3cc
+#define ixDPM_TABLE_143                                                         0x3f3d0
+#define ixDPM_TABLE_144                                                         0x3f3d4
+#define ixDPM_TABLE_145                                                         0x3f3d8
+#define ixDPM_TABLE_146                                                         0x3f3dc
+#define ixDPM_TABLE_147                                                         0x3f3e0
+#define ixDPM_TABLE_148                                                         0x3f3e4
+#define ixDPM_TABLE_149                                                         0x3f3e8
+#define ixDPM_TABLE_150                                                         0x3f3ec
+#define ixDPM_TABLE_151                                                         0x3f3f0
+#define ixDPM_TABLE_152                                                         0x3f3f4
+#define ixDPM_TABLE_153                                                         0x3f3f8
+#define ixDPM_TABLE_154                                                         0x3f3fc
+#define ixDPM_TABLE_155                                                         0x3f400
+#define ixDPM_TABLE_156                                                         0x3f404
+#define ixDPM_TABLE_157                                                         0x3f408
+#define ixDPM_TABLE_158                                                         0x3f40c
+#define ixDPM_TABLE_159                                                         0x3f410
+#define ixDPM_TABLE_160                                                         0x3f414
+#define ixDPM_TABLE_161                                                         0x3f418
+#define ixDPM_TABLE_162                                                         0x3f41c
+#define ixDPM_TABLE_163                                                         0x3f420
+#define ixDPM_TABLE_164                                                         0x3f424
+#define ixDPM_TABLE_165                                                         0x3f428
+#define ixDPM_TABLE_166                                                         0x3f42c
+#define ixDPM_TABLE_167                                                         0x3f430
+#define ixDPM_TABLE_168                                                         0x3f434
+#define ixDPM_TABLE_169                                                         0x3f438
+#define ixDPM_TABLE_170                                                         0x3f43c
+#define ixDPM_TABLE_171                                                         0x3f440
+#define ixDPM_TABLE_172                                                         0x3f444
+#define ixDPM_TABLE_173                                                         0x3f448
+#define ixDPM_TABLE_174                                                         0x3f44c
+#define ixDPM_TABLE_175                                                         0x3f450
+#define ixDPM_TABLE_176                                                         0x3f454
+#define ixDPM_TABLE_177                                                         0x3f458
+#define ixDPM_TABLE_178                                                         0x3f45c
+#define ixDPM_TABLE_179                                                         0x3f460
+#define ixDPM_TABLE_180                                                         0x3f464
+#define ixDPM_TABLE_181                                                         0x3f468
+#define ixDPM_TABLE_182                                                         0x3f46c
+#define ixDPM_TABLE_183                                                         0x3f470
+#define ixDPM_TABLE_184                                                         0x3f474
+#define ixDPM_TABLE_185                                                         0x3f478
+#define ixDPM_TABLE_186                                                         0x3f47c
+#define ixDPM_TABLE_187                                                         0x3f480
+#define ixDPM_TABLE_188                                                         0x3f484
+#define ixDPM_TABLE_189                                                         0x3f488
+#define ixDPM_TABLE_190                                                         0x3f48c
+#define ixDPM_TABLE_191                                                         0x3f490
+#define ixDPM_TABLE_192                                                         0x3f494
+#define ixDPM_TABLE_193                                                         0x3f498
+#define ixDPM_TABLE_194                                                         0x3f49c
+#define ixDPM_TABLE_195                                                         0x3f4a0
+#define ixDPM_TABLE_196                                                         0x3f4a4
+#define ixDPM_TABLE_197                                                         0x3f4a8
+#define ixDPM_TABLE_198                                                         0x3f4ac
+#define ixDPM_TABLE_199                                                         0x3f4b0
+#define ixDPM_TABLE_200                                                         0x3f4b4
+#define ixDPM_TABLE_201                                                         0x3f4b8
+#define ixDPM_TABLE_202                                                         0x3f4bc
+#define ixDPM_TABLE_203                                                         0x3f4c0
+#define ixDPM_TABLE_204                                                         0x3f4c4
+#define ixDPM_TABLE_205                                                         0x3f4c8
+#define ixDPM_TABLE_206                                                         0x3f4cc
+#define ixDPM_TABLE_207                                                         0x3f4d0
+#define ixDPM_TABLE_208                                                         0x3f4d4
+#define ixDPM_TABLE_209                                                         0x3f4d8
+#define ixDPM_TABLE_210                                                         0x3f4dc
+#define ixDPM_TABLE_211                                                         0x3f4e0
+#define ixDPM_TABLE_212                                                         0x3f4e4
+#define ixDPM_TABLE_213                                                         0x3f4e8
+#define ixDPM_TABLE_214                                                         0x3f4ec
+#define ixDPM_TABLE_215                                                         0x3f4f0
+#define ixDPM_TABLE_216                                                         0x3f4f4
+#define ixDPM_TABLE_217                                                         0x3f4f8
+#define ixDPM_TABLE_218                                                         0x3f4fc
+#define ixDPM_TABLE_219                                                         0x3f500
+#define ixDPM_TABLE_220                                                         0x3f504
+#define ixDPM_TABLE_221                                                         0x3f508
+#define ixDPM_TABLE_222                                                         0x3f50c
+#define ixDPM_TABLE_223                                                         0x3f510
+#define ixDPM_TABLE_224                                                         0x3f514
+#define ixDPM_TABLE_225                                                         0x3f518
+#define ixDPM_TABLE_226                                                         0x3f51c
+#define ixDPM_TABLE_227                                                         0x3f520
+#define ixDPM_TABLE_228                                                         0x3f524
+#define ixDPM_TABLE_229                                                         0x3f528
+#define ixDPM_TABLE_230                                                         0x3f52c
+#define ixDPM_TABLE_231                                                         0x3f530
+#define ixDPM_TABLE_232                                                         0x3f534
+#define ixDPM_TABLE_233                                                         0x3f538
+#define ixDPM_TABLE_234                                                         0x3f53c
+#define ixDPM_TABLE_235                                                         0x3f540
+#define ixDPM_TABLE_236                                                         0x3f544
+#define ixDPM_TABLE_237                                                         0x3f548
+#define ixDPM_TABLE_238                                                         0x3f54c
+#define ixDPM_TABLE_239                                                         0x3f550
+#define ixDPM_TABLE_240                                                         0x3f554
+#define ixDPM_TABLE_241                                                         0x3f558
+#define ixDPM_TABLE_242                                                         0x3f55c
+#define ixDPM_TABLE_243                                                         0x3f560
+#define ixDPM_TABLE_244                                                         0x3f564
+#define ixDPM_TABLE_245                                                         0x3f568
+#define ixDPM_TABLE_246                                                         0x3f56c
+#define ixDPM_TABLE_247                                                         0x3f570
+#define ixDPM_TABLE_248                                                         0x3f574
+#define ixDPM_TABLE_249                                                         0x3f578
+#define ixDPM_TABLE_250                                                         0x3f57c
+#define ixDPM_TABLE_251                                                         0x3f580
+#define ixDPM_TABLE_252                                                         0x3f584
+#define ixDPM_TABLE_253                                                         0x3f588
+#define ixDPM_TABLE_254                                                         0x3f58c
+#define ixDPM_TABLE_255                                                         0x3f590
+#define ixDPM_TABLE_256                                                         0x3f594
+#define ixDPM_TABLE_257                                                         0x3f598
+#define ixDPM_TABLE_258                                                         0x3f59c
+#define ixDPM_TABLE_259                                                         0x3f5a0
+#define ixDPM_TABLE_260                                                         0x3f5a4
+#define ixDPM_TABLE_261                                                         0x3f5a8
+#define ixDPM_TABLE_262                                                         0x3f5ac
+#define ixDPM_TABLE_263                                                         0x3f5b0
+#define ixDPM_TABLE_264                                                         0x3f5b4
+#define ixDPM_TABLE_265                                                         0x3f5b8
+#define ixDPM_TABLE_266                                                         0x3f5bc
+#define ixDPM_TABLE_267                                                         0x3f5c0
+#define ixDPM_TABLE_268                                                         0x3f5c4
+#define ixDPM_TABLE_269                                                         0x3f5c8
+#define ixDPM_TABLE_270                                                         0x3f5cc
+#define ixDPM_TABLE_271                                                         0x3f5d0
+#define ixDPM_TABLE_272                                                         0x3f5d4
+#define ixDPM_TABLE_273                                                         0x3f5d8
+#define ixDPM_TABLE_274                                                         0x3f5dc
+#define ixDPM_TABLE_275                                                         0x3f5e0
+#define ixDPM_TABLE_276                                                         0x3f5e4
+#define ixDPM_TABLE_277                                                         0x3f5e8
+#define ixDPM_TABLE_278                                                         0x3f5ec
+#define ixDPM_TABLE_279                                                         0x3f5f0
+#define ixDPM_TABLE_280                                                         0x3f5f4
+#define ixDPM_TABLE_281                                                         0x3f5f8
+#define ixDPM_TABLE_282                                                         0x3f5fc
+#define ixDPM_TABLE_283                                                         0x3f600
+#define ixDPM_TABLE_284                                                         0x3f604
+#define ixDPM_TABLE_285                                                         0x3f608
+#define ixDPM_TABLE_286                                                         0x3f60c
+#define ixDPM_TABLE_287                                                         0x3f610
+#define ixDPM_TABLE_288                                                         0x3f614
+#define ixDPM_TABLE_289                                                         0x3f618
+#define ixDPM_TABLE_290                                                         0x3f61c
+#define ixDPM_TABLE_291                                                         0x3f620
+#define ixDPM_TABLE_292                                                         0x3f624
+#define ixDPM_TABLE_293                                                         0x3f628
+#define ixDPM_TABLE_294                                                         0x3f62c
+#define ixDPM_TABLE_295                                                         0x3f630
+#define ixDPM_TABLE_296                                                         0x3f634
+#define ixDPM_TABLE_297                                                         0x3f638
+#define ixDPM_TABLE_298                                                         0x3f63c
+#define ixDPM_TABLE_299                                                         0x3f640
+#define ixDPM_TABLE_300                                                         0x3f644
+#define ixDPM_TABLE_301                                                         0x3f648
+#define ixDPM_TABLE_302                                                         0x3f64c
+#define ixDPM_TABLE_303                                                         0x3f650
+#define ixDPM_TABLE_304                                                         0x3f654
+#define ixDPM_TABLE_305                                                         0x3f658
+#define ixDPM_TABLE_306                                                         0x3f65c
+#define ixDPM_TABLE_307                                                         0x3f660
+#define ixDPM_TABLE_308                                                         0x3f664
+#define ixDPM_TABLE_309                                                         0x3f668
+#define ixDPM_TABLE_310                                                         0x3f66c
+#define ixDPM_TABLE_311                                                         0x3f670
+#define ixDPM_TABLE_312                                                         0x3f674
+#define ixDPM_TABLE_313                                                         0x3f678
+#define ixDPM_TABLE_314                                                         0x3f67c
+#define ixDPM_TABLE_315                                                         0x3f680
+#define ixDPM_TABLE_316                                                         0x3f684
+#define ixDPM_TABLE_317                                                         0x3f688
+#define ixDPM_TABLE_318                                                         0x3f68c
+#define ixDPM_TABLE_319                                                         0x3f690
+#define ixDPM_TABLE_320                                                         0x3f694
+#define ixDPM_TABLE_321                                                         0x3f698
+#define ixDPM_TABLE_322                                                         0x3f69c
+#define ixDPM_TABLE_323                                                         0x3f6a0
+#define ixDPM_TABLE_324                                                         0x3f6a4
+#define ixDPM_TABLE_325                                                         0x3f6a8
+#define ixDPM_TABLE_326                                                         0x3f6ac
+#define ixDPM_TABLE_327                                                         0x3f6b0
+#define ixDPM_TABLE_328                                                         0x3f6b4
+#define ixDPM_TABLE_329                                                         0x3f6b8
+#define ixDPM_TABLE_330                                                         0x3f6bc
+#define ixDPM_TABLE_331                                                         0x3f6c0
+#define ixDPM_TABLE_332                                                         0x3f6c4
+#define ixDPM_TABLE_333                                                         0x3f6c8
+#define ixDPM_TABLE_334                                                         0x3f6cc
+#define ixDPM_TABLE_335                                                         0x3f6d0
+#define ixDPM_TABLE_336                                                         0x3f6d4
+#define ixDPM_TABLE_337                                                         0x3f6d8
+#define ixDPM_TABLE_338                                                         0x3f6dc
+#define ixDPM_TABLE_339                                                         0x3f6e0
+#define ixDPM_TABLE_340                                                         0x3f6e4
+#define ixDPM_TABLE_341                                                         0x3f6e8
+#define ixDPM_TABLE_342                                                         0x3f6ec
+#define ixDPM_TABLE_343                                                         0x3f6f0
+#define ixDPM_TABLE_344                                                         0x3f6f4
+#define ixDPM_TABLE_345                                                         0x3f6f8
+#define ixDPM_TABLE_346                                                         0x3f6fc
+#define ixDPM_TABLE_347                                                         0x3f700
+#define ixDPM_TABLE_348                                                         0x3f704
+#define ixDPM_TABLE_349                                                         0x3f708
+#define ixDPM_TABLE_350                                                         0x3f70c
+#define ixDPM_TABLE_351                                                         0x3f710
+#define ixDPM_TABLE_352                                                         0x3f714
+#define ixDPM_TABLE_353                                                         0x3f718
+#define ixDPM_TABLE_354                                                         0x3f71c
+#define ixDPM_TABLE_355                                                         0x3f720
+#define ixDPM_TABLE_356                                                         0x3f724
+#define ixDPM_TABLE_357                                                         0x3f728
+#define ixDPM_TABLE_358                                                         0x3f72c
+#define ixDPM_TABLE_359                                                         0x3f730
+#define ixDPM_TABLE_360                                                         0x3f734
+#define ixDPM_TABLE_361                                                         0x3f738
+#define ixDPM_TABLE_362                                                         0x3f73c
+#define ixDPM_TABLE_363                                                         0x3f740
+#define ixDPM_TABLE_364                                                         0x3f744
+#define ixDPM_TABLE_365                                                         0x3f748
+#define ixDPM_TABLE_366                                                         0x3f74c
+#define ixDPM_TABLE_367                                                         0x3f750
+#define ixDPM_TABLE_368                                                         0x3f754
+#define ixDPM_TABLE_369                                                         0x3f758
+#define ixDPM_TABLE_370                                                         0x3f75c
+#define ixDPM_TABLE_371                                                         0x3f760
+#define ixDPM_TABLE_372                                                         0x3f764
+#define ixDPM_TABLE_373                                                         0x3f768
+#define ixDPM_TABLE_374                                                         0x3f76c
+#define ixDPM_TABLE_375                                                         0x3f770
+#define ixDPM_TABLE_376                                                         0x3f774
+#define ixDPM_TABLE_377                                                         0x3f778
+#define ixDPM_TABLE_378                                                         0x3f77c
+#define ixDPM_TABLE_379                                                         0x3f780
+#define ixDPM_TABLE_380                                                         0x3f784
+#define ixDPM_TABLE_381                                                         0x3f788
+#define ixDPM_TABLE_382                                                         0x3f78c
+#define ixDPM_TABLE_383                                                         0x3f790
+#define ixDPM_TABLE_384                                                         0x3f794
+#define ixDPM_TABLE_385                                                         0x3f798
+#define ixDPM_TABLE_386                                                         0x3f79c
+#define ixDPM_TABLE_387                                                         0x3f7a0
+#define ixDPM_TABLE_388                                                         0x3f7a4
+#define ixDPM_TABLE_389                                                         0x3f7a8
+#define ixDPM_TABLE_390                                                         0x3f7ac
+#define ixDPM_TABLE_391                                                         0x3f7b0
+#define ixDPM_TABLE_392                                                         0x3f7b4
+#define ixDPM_TABLE_393                                                         0x3f7b8
+#define ixDPM_TABLE_394                                                         0x3f7bc
+#define ixDPM_TABLE_395                                                         0x3f7c0
+#define ixDPM_TABLE_396                                                         0x3f7c4
+#define ixDPM_TABLE_397                                                         0x3f7c8
+#define ixDPM_TABLE_398                                                         0x3f7cc
+#define ixDPM_TABLE_399                                                         0x3f7d0
+#define ixDPM_TABLE_400                                                         0x3f7d4
+#define ixDPM_TABLE_401                                                         0x3f7d8
+#define ixDPM_TABLE_402                                                         0x3f7dc
+#define ixDPM_TABLE_403                                                         0x3f7e0
+#define ixDPM_TABLE_404                                                         0x3f7e4
+#define ixDPM_TABLE_405                                                         0x3f7e8
+#define ixDPM_TABLE_406                                                         0x3f7ec
+#define ixDPM_TABLE_407                                                         0x3f7f0
+#define ixDPM_TABLE_408                                                         0x3f7f4
+#define ixDPM_TABLE_409                                                         0x3f7f8
+#define ixDPM_TABLE_410                                                         0x3f7fc
+#define ixDPM_TABLE_411                                                         0x3f800
+#define ixDPM_TABLE_412                                                         0x3f804
+#define ixDPM_TABLE_413                                                         0x3f808
+#define ixDPM_TABLE_414                                                         0x3f80c
+#define ixDPM_TABLE_415                                                         0x3f810
+#define ixDPM_TABLE_416                                                         0x3f814
+#define ixDPM_TABLE_417                                                         0x3f818
+#define ixDPM_TABLE_418                                                         0x3f81c
+#define ixDPM_TABLE_419                                                         0x3f820
+#define ixDPM_TABLE_420                                                         0x3f824
+#define ixDPM_TABLE_421                                                         0x3f828
+#define ixDPM_TABLE_422                                                         0x3f82c
+#define ixDPM_TABLE_423                                                         0x3f830
+#define ixDPM_TABLE_424                                                         0x3f834
+#define ixDPM_TABLE_425                                                         0x3f838
+#define ixDPM_TABLE_426                                                         0x3f83c
+#define ixDPM_TABLE_427                                                         0x3f840
+#define ixDPM_TABLE_428                                                         0x3f844
+#define ixDPM_TABLE_429                                                         0x3f848
+#define ixDPM_TABLE_430                                                         0x3f84c
+#define ixDPM_TABLE_431                                                         0x3f850
+#define ixDPM_TABLE_432                                                         0x3f854
+#define ixDPM_TABLE_433                                                         0x3f858
+#define ixDPM_TABLE_434                                                         0x3f85c
+#define ixDPM_TABLE_435                                                         0x3f860
+#define ixDPM_TABLE_436                                                         0x3f864
+#define ixDPM_TABLE_437                                                         0x3f868
+#define ixDPM_TABLE_438                                                         0x3f86c
+#define ixDPM_TABLE_439                                                         0x3f870
+#define ixDPM_TABLE_440                                                         0x3f874
+#define ixSOFT_REGISTERS_TABLE_1                                                0x3f89c
+#define ixSOFT_REGISTERS_TABLE_2                                                0x3f8a0
+#define ixSOFT_REGISTERS_TABLE_3                                                0x3f8a4
+#define ixSOFT_REGISTERS_TABLE_4                                                0x3f8a8
+#define ixSOFT_REGISTERS_TABLE_5                                                0x3f8ac
+#define ixSOFT_REGISTERS_TABLE_6                                                0x3f8b0
+#define ixSOFT_REGISTERS_TABLE_7                                                0x3f8b4
+#define ixSOFT_REGISTERS_TABLE_8                                                0x3f8b8
+#define ixSOFT_REGISTERS_TABLE_9                                                0x3f8bc
+#define ixSOFT_REGISTERS_TABLE_10                                               0x3f8c0
+#define ixSOFT_REGISTERS_TABLE_11                                               0x3f8c4
+#define ixSOFT_REGISTERS_TABLE_12                                               0x3f8c8
+#define ixSOFT_REGISTERS_TABLE_13                                               0x3f8cc
+#define ixSOFT_REGISTERS_TABLE_14                                               0x3f8d0
+#define ixSOFT_REGISTERS_TABLE_15                                               0x3f8d4
+#define ixSOFT_REGISTERS_TABLE_16                                               0x3f8d8
+#define ixSOFT_REGISTERS_TABLE_17                                               0x3f8dc
+#define ixSOFT_REGISTERS_TABLE_18                                               0x3f8e0
+#define ixSOFT_REGISTERS_TABLE_19                                               0x3f8e4
+#define ixSOFT_REGISTERS_TABLE_20                                               0x3f8e8
+#define ixSOFT_REGISTERS_TABLE_21                                               0x3f8ec
+#define ixSOFT_REGISTERS_TABLE_22                                               0x3f8f0
+#define ixSOFT_REGISTERS_TABLE_23                                               0x3f8f4
+#define ixSOFT_REGISTERS_TABLE_24                                               0x3f8f8
+#define ixSOFT_REGISTERS_TABLE_25                                               0x3f8fc
+#define ixSOFT_REGISTERS_TABLE_26                                               0x3f900
+#define ixSOFT_REGISTERS_TABLE_27                                               0x3f904
+#define ixSOFT_REGISTERS_TABLE_28                                               0x3f888
+#define ixSOFT_REGISTERS_TABLE_29                                               0x3f90c
+#define ixSOFT_REGISTERS_TABLE_30                                               0x3f910
+#define ixPM_FUSES_1                                                            0x3f914
+#define ixPM_FUSES_2                                                            0x3f918
+#define ixPM_FUSES_3                                                            0x3f91c
+#define ixPM_FUSES_4                                                            0x3f920
+#define ixPM_FUSES_5                                                            0x3f924
+#define ixPM_FUSES_6                                                            0x3f928
+#define ixPM_FUSES_7                                                            0x3f92c
+#define ixPM_FUSES_8                                                            0x3f930
+#define ixPM_FUSES_9                                                            0x3f934
+#define ixPM_FUSES_10                                                           0x3f938
+#define ixPM_FUSES_11                                                           0x3f93c
+#define ixPM_FUSES_12                                                           0x3f940
+#define ixPM_FUSES_13                                                           0x3f944
+#define ixPM_FUSES_14                                                           0x3f948
+#define ixPM_FUSES_15                                                           0x3f94c
+#define ixSMU_PM_STATUS_0                                                       0x3fe00
+#define ixSMU_PM_STATUS_1                                                       0x3fe04
+#define ixSMU_PM_STATUS_2                                                       0x3fe08
+#define ixSMU_PM_STATUS_3                                                       0x3fe0c
+#define ixSMU_PM_STATUS_4                                                       0x3fe10
+#define ixSMU_PM_STATUS_5                                                       0x3fe14
+#define ixSMU_PM_STATUS_6                                                       0x3fe18
+#define ixSMU_PM_STATUS_7                                                       0x3fe1c
+#define ixSMU_PM_STATUS_8                                                       0x3fe20
+#define ixSMU_PM_STATUS_9                                                       0x3fe24
+#define ixSMU_PM_STATUS_10                                                      0x3fe28
+#define ixSMU_PM_STATUS_11                                                      0x3fe2c
+#define ixSMU_PM_STATUS_12                                                      0x3fe30
+#define ixSMU_PM_STATUS_13                                                      0x3fe34
+#define ixSMU_PM_STATUS_14                                                      0x3fe38
+#define ixSMU_PM_STATUS_15                                                      0x3fe3c
+#define ixSMU_PM_STATUS_16                                                      0x3fe40
+#define ixSMU_PM_STATUS_17                                                      0x3fe44
+#define ixSMU_PM_STATUS_18                                                      0x3fe48
+#define ixSMU_PM_STATUS_19                                                      0x3fe4c
+#define ixSMU_PM_STATUS_20                                                      0x3fe50
+#define ixSMU_PM_STATUS_21                                                      0x3fe54
+#define ixSMU_PM_STATUS_22                                                      0x3fe58
+#define ixSMU_PM_STATUS_23                                                      0x3fe5c
+#define ixSMU_PM_STATUS_24                                                      0x3fe60
+#define ixSMU_PM_STATUS_25                                                      0x3fe64
+#define ixSMU_PM_STATUS_26                                                      0x3fe68
+#define ixSMU_PM_STATUS_27                                                      0x3fe6c
+#define ixSMU_PM_STATUS_28                                                      0x3fe70
+#define ixSMU_PM_STATUS_29                                                      0x3fe74
+#define ixSMU_PM_STATUS_30                                                      0x3fe78
+#define ixSMU_PM_STATUS_31                                                      0x3fe7c
+#define ixSMU_PM_STATUS_32                                                      0x3fe80
+#define ixSMU_PM_STATUS_33                                                      0x3fe84
+#define ixSMU_PM_STATUS_34                                                      0x3fe88
+#define ixSMU_PM_STATUS_35                                                      0x3fe8c
+#define ixSMU_PM_STATUS_36                                                      0x3fe90
+#define ixSMU_PM_STATUS_37                                                      0x3fe94
+#define ixSMU_PM_STATUS_38                                                      0x3fe98
+#define ixSMU_PM_STATUS_39                                                      0x3fe9c
+#define ixSMU_PM_STATUS_40                                                      0x3fea0
+#define ixSMU_PM_STATUS_41                                                      0x3fea4
+#define ixSMU_PM_STATUS_42                                                      0x3fea8
+#define ixSMU_PM_STATUS_43                                                      0x3feac
+#define ixSMU_PM_STATUS_44                                                      0x3feb0
+#define ixSMU_PM_STATUS_45                                                      0x3feb4
+#define ixSMU_PM_STATUS_46                                                      0x3feb8
+#define ixSMU_PM_STATUS_47                                                      0x3febc
+#define ixSMU_PM_STATUS_48                                                      0x3fec0
+#define ixSMU_PM_STATUS_49                                                      0x3fec4
+#define ixSMU_PM_STATUS_50                                                      0x3fec8
+#define ixSMU_PM_STATUS_51                                                      0x3fecc
+#define ixSMU_PM_STATUS_52                                                      0x3fed0
+#define ixSMU_PM_STATUS_53                                                      0x3fed4
+#define ixSMU_PM_STATUS_54                                                      0x3fed8
+#define ixSMU_PM_STATUS_55                                                      0x3fedc
+#define ixSMU_PM_STATUS_56                                                      0x3fee0
+#define ixSMU_PM_STATUS_57                                                      0x3fee4
+#define ixSMU_PM_STATUS_58                                                      0x3fee8
+#define ixSMU_PM_STATUS_59                                                      0x3feec
+#define ixSMU_PM_STATUS_60                                                      0x3fef0
+#define ixSMU_PM_STATUS_61                                                      0x3fef4
+#define ixSMU_PM_STATUS_62                                                      0x3fef8
+#define ixSMU_PM_STATUS_63                                                      0x3fefc
+#define ixSMU_PM_STATUS_64                                                      0x3ff00
+#define ixSMU_PM_STATUS_65                                                      0x3ff04
+#define ixSMU_PM_STATUS_66                                                      0x3ff08
+#define ixSMU_PM_STATUS_67                                                      0x3ff0c
+#define ixSMU_PM_STATUS_68                                                      0x3ff10
+#define ixSMU_PM_STATUS_69                                                      0x3ff14
+#define ixSMU_PM_STATUS_70                                                      0x3ff18
+#define ixSMU_PM_STATUS_71                                                      0x3ff1c
+#define ixSMU_PM_STATUS_72                                                      0x3ff20
+#define ixSMU_PM_STATUS_73                                                      0x3ff24
+#define ixSMU_PM_STATUS_74                                                      0x3ff28
+#define ixSMU_PM_STATUS_75                                                      0x3ff2c
+#define ixSMU_PM_STATUS_76                                                      0x3ff30
+#define ixSMU_PM_STATUS_77                                                      0x3ff34
+#define ixSMU_PM_STATUS_78                                                      0x3ff38
+#define ixSMU_PM_STATUS_79                                                      0x3ff3c
+#define ixSMU_PM_STATUS_80                                                      0x3ff40
+#define ixSMU_PM_STATUS_81                                                      0x3ff44
+#define ixSMU_PM_STATUS_82                                                      0x3ff48
+#define ixSMU_PM_STATUS_83                                                      0x3ff4c
+#define ixSMU_PM_STATUS_84                                                      0x3ff50
+#define ixSMU_PM_STATUS_85                                                      0x3ff54
+#define ixSMU_PM_STATUS_86                                                      0x3ff58
+#define ixSMU_PM_STATUS_87                                                      0x3ff5c
+#define ixSMU_PM_STATUS_88                                                      0x3ff60
+#define ixSMU_PM_STATUS_89                                                      0x3ff64
+#define ixSMU_PM_STATUS_90                                                      0x3ff68
+#define ixSMU_PM_STATUS_91                                                      0x3ff6c
+#define ixSMU_PM_STATUS_92                                                      0x3ff70
+#define ixSMU_PM_STATUS_93                                                      0x3ff74
+#define ixSMU_PM_STATUS_94                                                      0x3ff78
+#define ixSMU_PM_STATUS_95                                                      0x3ff7c
+#define ixSMU_PM_STATUS_96                                                      0x3ff80
+#define ixSMU_PM_STATUS_97                                                      0x3ff84
+#define ixSMU_PM_STATUS_98                                                      0x3ff88
+#define ixSMU_PM_STATUS_99                                                      0x3ff8c
+#define ixSMU_PM_STATUS_100                                                     0x3ff90
+#define ixSMU_PM_STATUS_101                                                     0x3ff94
+#define ixSMU_PM_STATUS_102                                                     0x3ff98
+#define ixSMU_PM_STATUS_103                                                     0x3ff9c
+#define ixSMU_PM_STATUS_104                                                     0x3ffa0
+#define ixSMU_PM_STATUS_105                                                     0x3ffa4
+#define ixSMU_PM_STATUS_106                                                     0x3ffa8
+#define ixSMU_PM_STATUS_107                                                     0x3ffac
+#define ixSMU_PM_STATUS_108                                                     0x3ffb0
+#define ixSMU_PM_STATUS_109                                                     0x3ffb4
+#define ixSMU_PM_STATUS_110                                                     0x3ffb8
+#define ixSMU_PM_STATUS_111                                                     0x3ffbc
+#define ixSMU_PM_STATUS_112                                                     0x3ffc0
+#define ixSMU_PM_STATUS_113                                                     0x3ffc4
+#define ixSMU_PM_STATUS_114                                                     0x3ffc8
+#define ixSMU_PM_STATUS_115                                                     0x3ffcc
+#define ixSMU_PM_STATUS_116                                                     0x3ffd0
+#define ixSMU_PM_STATUS_117                                                     0x3ffd4
+#define ixSMU_PM_STATUS_118                                                     0x3ffd8
+#define ixSMU_PM_STATUS_119                                                     0x3ffdc
+#define ixSMU_PM_STATUS_120                                                     0x3ffe0
+#define ixSMU_PM_STATUS_121                                                     0x3ffe4
+#define ixSMU_PM_STATUS_122                                                     0x3ffe8
+#define ixSMU_PM_STATUS_123                                                     0x3ffec
+#define ixSMU_PM_STATUS_124                                                     0x3fff0
+#define ixSMU_PM_STATUS_125                                                     0x3fff4
+#define ixSMU_PM_STATUS_126                                                     0x3fff8
+#define ixSMU_PM_STATUS_127                                                     0x3fffc
+#define ixCG_THERMAL_INT_ENA                                                    0xc2100024
+#define ixCG_THERMAL_INT_CTRL                                                   0xc2100028
+#define ixCG_THERMAL_INT_STATUS                                                 0xc210002c
+#define ixCG_THERMAL_CTRL                                                       0xc0300004
+#define ixCG_THERMAL_STATUS                                                     0xc0300008
+#define ixCG_THERMAL_INT                                                        0xc030000c
+#define ixCG_MULT_THERMAL_CTRL                                                  0xc0300010
+#define ixCG_MULT_THERMAL_STATUS                                                0xc0300014
+#define ixTHM_TMON2_CTRL                                                        0xc0300034
+#define ixTHM_TMON2_CTRL2                                                       0xc0300038
+#define ixTHM_TMON2_CSR_WR                                                      0xc0300054
+#define ixTHM_TMON2_CSR_RD                                                      0xc0300058
+#define ixCG_FDO_CTRL0                                                          0xc0300064
+#define ixCG_FDO_CTRL1                                                          0xc0300068
+#define ixCG_FDO_CTRL2                                                          0xc030006c
+#define ixCG_TACH_CTRL                                                          0xc0300070
+#define ixCG_TACH_STATUS                                                        0xc0300074
+#define ixCC_THM_STRAPS0                                                        0xc0300080
+#define ixTHM_TMON0_RDIL0_DATA                                                  0xc0300100
+#define ixTHM_TMON0_RDIL1_DATA                                                  0xc0300104
+#define ixTHM_TMON0_RDIL2_DATA                                                  0xc0300108
+#define ixTHM_TMON0_RDIL3_DATA                                                  0xc030010c
+#define ixTHM_TMON0_RDIL4_DATA                                                  0xc0300110
+#define ixTHM_TMON0_RDIL5_DATA                                                  0xc0300114
+#define ixTHM_TMON0_RDIL6_DATA                                                  0xc0300118
+#define ixTHM_TMON0_RDIL7_DATA                                                  0xc030011c
+#define ixTHM_TMON0_RDIL8_DATA                                                  0xc0300120
+#define ixTHM_TMON0_RDIL9_DATA                                                  0xc0300124
+#define ixTHM_TMON0_RDIL10_DATA                                                 0xc0300128
+#define ixTHM_TMON0_RDIL11_DATA                                                 0xc030012c
+#define ixTHM_TMON0_RDIL12_DATA                                                 0xc0300130
+#define ixTHM_TMON0_RDIL13_DATA                                                 0xc0300134
+#define ixTHM_TMON0_RDIL14_DATA                                                 0xc0300138
+#define ixTHM_TMON0_RDIL15_DATA                                                 0xc030013c
+#define ixTHM_TMON0_RDIR0_DATA                                                  0xc0300140
+#define ixTHM_TMON0_RDIR1_DATA                                                  0xc0300144
+#define ixTHM_TMON0_RDIR2_DATA                                                  0xc0300148
+#define ixTHM_TMON0_RDIR3_DATA                                                  0xc030014c
+#define ixTHM_TMON0_RDIR4_DATA                                                  0xc0300150
+#define ixTHM_TMON0_RDIR5_DATA                                                  0xc0300154
+#define ixTHM_TMON0_RDIR6_DATA                                                  0xc0300158
+#define ixTHM_TMON0_RDIR7_DATA                                                  0xc030015c
+#define ixTHM_TMON0_RDIR8_DATA                                                  0xc0300160
+#define ixTHM_TMON0_RDIR9_DATA                                                  0xc0300164
+#define ixTHM_TMON0_RDIR10_DATA                                                 0xc0300168
+#define ixTHM_TMON0_RDIR11_DATA                                                 0xc030016c
+#define ixTHM_TMON0_RDIR12_DATA                                                 0xc0300170
+#define ixTHM_TMON0_RDIR13_DATA                                                 0xc0300174
+#define ixTHM_TMON0_RDIR14_DATA                                                 0xc0300178
+#define ixTHM_TMON0_RDIR15_DATA                                                 0xc030017c
+#define ixTHM_TMON1_RDIL0_DATA                                                  0xc0300180
+#define ixTHM_TMON1_RDIL1_DATA                                                  0xc0300184
+#define ixTHM_TMON1_RDIL2_DATA                                                  0xc0300188
+#define ixTHM_TMON1_RDIL3_DATA                                                  0xc030018c
+#define ixTHM_TMON1_RDIL4_DATA                                                  0xc0300190
+#define ixTHM_TMON1_RDIL5_DATA                                                  0xc0300194
+#define ixTHM_TMON1_RDIL6_DATA                                                  0xc0300198
+#define ixTHM_TMON1_RDIL7_DATA                                                  0xc030019c
+#define ixTHM_TMON1_RDIL8_DATA                                                  0xc03001a0
+#define ixTHM_TMON1_RDIL9_DATA                                                  0xc03001a4
+#define ixTHM_TMON1_RDIL10_DATA                                                 0xc03001a8
+#define ixTHM_TMON1_RDIL11_DATA                                                 0xc03001ac
+#define ixTHM_TMON1_RDIL12_DATA                                                 0xc03001b0
+#define ixTHM_TMON1_RDIL13_DATA                                                 0xc03001b4
+#define ixTHM_TMON1_RDIL14_DATA                                                 0xc03001b8
+#define ixTHM_TMON1_RDIL15_DATA                                                 0xc03001bc
+#define ixTHM_TMON1_RDIR0_DATA                                                  0xc03001c0
+#define ixTHM_TMON1_RDIR1_DATA                                                  0xc03001c4
+#define ixTHM_TMON1_RDIR2_DATA                                                  0xc03001c8
+#define ixTHM_TMON1_RDIR3_DATA                                                  0xc03001cc
+#define ixTHM_TMON1_RDIR4_DATA                                                  0xc03001d0
+#define ixTHM_TMON1_RDIR5_DATA                                                  0xc03001d4
+#define ixTHM_TMON1_RDIR6_DATA                                                  0xc03001d8
+#define ixTHM_TMON1_RDIR7_DATA                                                  0xc03001dc
+#define ixTHM_TMON1_RDIR8_DATA                                                  0xc03001e0
+#define ixTHM_TMON1_RDIR9_DATA                                                  0xc03001e4
+#define ixTHM_TMON1_RDIR10_DATA                                                 0xc03001e8
+#define ixTHM_TMON1_RDIR11_DATA                                                 0xc03001ec
+#define ixTHM_TMON1_RDIR12_DATA                                                 0xc03001f0
+#define ixTHM_TMON1_RDIR13_DATA                                                 0xc03001f4
+#define ixTHM_TMON1_RDIR14_DATA                                                 0xc03001f8
+#define ixTHM_TMON1_RDIR15_DATA                                                 0xc03001fc
+#define ixTHM_TMON2_RDIL0_DATA                                                  0xc0300200
+#define ixTHM_TMON2_RDIL1_DATA                                                  0xc0300204
+#define ixTHM_TMON2_RDIL2_DATA                                                  0xc0300208
+#define ixTHM_TMON2_RDIL3_DATA                                                  0xc030020c
+#define ixTHM_TMON2_RDIL4_DATA                                                  0xc0300210
+#define ixTHM_TMON2_RDIL5_DATA                                                  0xc0300214
+#define ixTHM_TMON2_RDIL6_DATA                                                  0xc0300218
+#define ixTHM_TMON2_RDIL7_DATA                                                  0xc030021c
+#define ixTHM_TMON2_RDIL8_DATA                                                  0xc0300220
+#define ixTHM_TMON2_RDIL9_DATA                                                  0xc0300224
+#define ixTHM_TMON2_RDIL10_DATA                                                 0xc0300228
+#define ixTHM_TMON2_RDIL11_DATA                                                 0xc030022c
+#define ixTHM_TMON2_RDIL12_DATA                                                 0xc0300230
+#define ixTHM_TMON2_RDIL13_DATA                                                 0xc0300234
+#define ixTHM_TMON2_RDIL14_DATA                                                 0xc0300238
+#define ixTHM_TMON2_RDIL15_DATA                                                 0xc030023c
+#define ixTHM_TMON2_RDIR0_DATA                                                  0xc0300240
+#define ixTHM_TMON2_RDIR1_DATA                                                  0xc0300244
+#define ixTHM_TMON2_RDIR2_DATA                                                  0xc0300248
+#define ixTHM_TMON2_RDIR3_DATA                                                  0xc030024c
+#define ixTHM_TMON2_RDIR4_DATA                                                  0xc0300250
+#define ixTHM_TMON2_RDIR5_DATA                                                  0xc0300254
+#define ixTHM_TMON2_RDIR6_DATA                                                  0xc0300258
+#define ixTHM_TMON2_RDIR7_DATA                                                  0xc030025c
+#define ixTHM_TMON2_RDIR8_DATA                                                  0xc0300260
+#define ixTHM_TMON2_RDIR9_DATA                                                  0xc0300264
+#define ixTHM_TMON2_RDIR10_DATA                                                 0xc0300268
+#define ixTHM_TMON2_RDIR11_DATA                                                 0xc030026c
+#define ixTHM_TMON2_RDIR12_DATA                                                 0xc0300270
+#define ixTHM_TMON2_RDIR13_DATA                                                 0xc0300274
+#define ixTHM_TMON2_RDIR14_DATA                                                 0xc0300278
+#define ixTHM_TMON2_RDIR15_DATA                                                 0xc030027c
+#define ixTHM_TMON0_INT_DATA                                                    0xc0300300
+#define ixTHM_TMON1_INT_DATA                                                    0xc0300304
+#define ixTHM_TMON2_INT_DATA                                                    0xc0300308
+#define ixTHM_TMON0_DEBUG                                                       0xc0300310
+#define ixTHM_TMON1_DEBUG                                                       0xc0300314
+#define ixTHM_TMON2_DEBUG                                                       0xc0300318
+#define ixTHM_TMON0_STATUS                                                      0xc0300320
+#define ixTHM_TMON1_STATUS                                                      0xc0300324
+#define ixTHM_TMON2_STATUS                                                      0xc0300328
+#define ixGENERAL_PWRMGT                                                        0xc0200000
+#define ixCNB_PWRMGT_CNTL                                                       0xc0200004
+#define ixSCLK_PWRMGT_CNTL                                                      0xc0200008
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX                                      0xc0200014
+#define ixPWR_PCC_CONTROL                                                       0xc0200018
+#define ixPWR_PCC_GPIO_SELECT                                                   0xc020001c
+#define ixCG_FREQ_TRAN_VOTING_0                                                 0xc02001a8
+#define ixCG_FREQ_TRAN_VOTING_1                                                 0xc02001ac
+#define ixCG_FREQ_TRAN_VOTING_2                                                 0xc02001b0
+#define ixCG_FREQ_TRAN_VOTING_3                                                 0xc02001b4
+#define ixCG_FREQ_TRAN_VOTING_4                                                 0xc02001b8
+#define ixCG_FREQ_TRAN_VOTING_5                                                 0xc02001bc
+#define ixCG_FREQ_TRAN_VOTING_6                                                 0xc02001c0
+#define ixCG_FREQ_TRAN_VOTING_7                                                 0xc02001c4
+#define ixPLL_TEST_CNTL                                                         0xc020003c
+#define ixCG_STATIC_SCREEN_PARAMETER                                            0xc0200044
+#define ixCG_DISPLAY_GAP_CNTL                                                   0xc0200060
+#define ixCG_DISPLAY_GAP_CNTL2                                                  0xc0200230
+#define ixCG_ACPI_CNTL                                                          0xc0200064
+#define ixSCLK_DEEP_SLEEP_CNTL                                                  0xc0200080
+#define ixSCLK_DEEP_SLEEP_CNTL2                                                 0xc0200084
+#define ixSCLK_DEEP_SLEEP_CNTL3                                                 0xc020009c
+#define ixSCLK_DEEP_SLEEP_MISC_CNTL                                             0xc0200088
+#define ixLCLK_DEEP_SLEEP_CNTL                                                  0xc020008c
+#define ixLCLK_DEEP_SLEEP_CNTL2                                                 0xc0200310
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX_1                                    0xc02000f0
+#define ixCG_ULV_PARAMETER                                                      0xc020015c
+#define ixSCLK_MIN_DIV                                                          0xc02003ac
+#define ixPWR_AVFS_SEL                                                          0xc0200384
+#define ixPWR_AVFS_CNTL                                                         0xc0200388
+#define ixPWR_AVFS0_CNTL_STATUS                                                 0xc0200400
+#define ixPWR_AVFS1_CNTL_STATUS                                                 0xc0200404
+#define ixPWR_AVFS2_CNTL_STATUS                                                 0xc0200408
+#define ixPWR_AVFS3_CNTL_STATUS                                                 0xc020040c
+#define ixPWR_AVFS4_CNTL_STATUS                                                 0xc0200410
+#define ixPWR_AVFS5_CNTL_STATUS                                                 0xc0200414
+#define ixPWR_AVFS6_CNTL_STATUS                                                 0xc0200418
+#define ixPWR_AVFS7_CNTL_STATUS                                                 0xc020041c
+#define ixPWR_AVFS8_CNTL_STATUS                                                 0xc0200420
+#define ixPWR_AVFS9_CNTL_STATUS                                                 0xc0200424
+#define ixPWR_AVFS10_CNTL_STATUS                                                0xc0200428
+#define ixPWR_AVFS11_CNTL_STATUS                                                0xc020042c
+#define ixPWR_AVFS12_CNTL_STATUS                                                0xc0200430
+#define ixPWR_AVFS13_CNTL_STATUS                                                0xc0200434
+#define ixPWR_AVFS14_CNTL_STATUS                                                0xc0200438
+#define ixPWR_AVFS15_CNTL_STATUS                                                0xc020043c
+#define ixPWR_AVFS16_CNTL_STATUS                                                0xc0200440
+#define ixPWR_AVFS17_CNTL_STATUS                                                0xc0200444
+#define ixPWR_AVFS18_CNTL_STATUS                                                0xc0200448
+#define ixPWR_AVFS19_CNTL_STATUS                                                0xc020044c
+#define ixPWR_AVFS20_CNTL_STATUS                                                0xc0200450
+#define ixPWR_AVFS21_CNTL_STATUS                                                0xc0200454
+#define ixPWR_AVFS22_CNTL_STATUS                                                0xc0200458
+#define ixPWR_AVFS23_CNTL_STATUS                                                0xc020045c
+#define ixPWR_AVFS24_CNTL_STATUS                                                0xc0200460
+#define ixPWR_AVFS25_CNTL_STATUS                                                0xc0200464
+#define ixPWR_AVFS26_CNTL_STATUS                                                0xc0200468
+#define ixPWR_AVFS27_CNTL_STATUS                                                0xc020046c
+#define ixPWR_CKS_ENABLE                                                        0xc020034c
+#define ixPWR_CKS_CNTL                                                          0xc0200350
+#define ixPWR_DISP_TIMER_CONTROL                                                0xc02003c0
+#define ixPWR_DISP_TIMER_DEBUG                                                  0xc02003c4
+#define ixPWR_DISP_TIMER2_CONTROL                                               0xc02003c8
+#define ixPWR_DISP_TIMER2_DEBUG                                                 0xc02003cc
+#define ixPWR_DISP_TIMER_CONTROL2                                               0xc0200378
+#define ixVDDGFX_IDLE_PARAMETER                                                 0xc020036c
+#define ixVDDGFX_IDLE_CONTROL                                                   0xc0200370
+#define ixVDDGFX_IDLE_EXIT                                                      0xc0200374
+#define ixLCAC_MC0_CNTL                                                         0xc0400130
+#define ixLCAC_MC0_OVR_SEL                                                      0xc0400134
+#define ixLCAC_MC0_OVR_VAL                                                      0xc0400138
+#define ixLCAC_MC1_CNTL                                                         0xc040013c
+#define ixLCAC_MC1_OVR_SEL                                                      0xc0400140
+#define ixLCAC_MC1_OVR_VAL                                                      0xc0400144
+#define ixLCAC_MC2_CNTL                                                         0xc0400148
+#define ixLCAC_MC2_OVR_SEL                                                      0xc040014c
+#define ixLCAC_MC2_OVR_VAL                                                      0xc0400150
+#define ixLCAC_MC3_CNTL                                                         0xc0400154
+#define ixLCAC_MC3_OVR_SEL                                                      0xc0400158
+#define ixLCAC_MC3_OVR_VAL                                                      0xc040015c
+#define ixLCAC_MC4_CNTL                                                         0xc0400d60
+#define ixLCAC_MC4_OVR_SEL                                                      0xc0400d64
+#define ixLCAC_MC4_OVR_VAL                                                      0xc0400d68
+#define ixLCAC_MC5_CNTL                                                         0xc0400d6c
+#define ixLCAC_MC5_OVR_SEL                                                      0xc0400d70
+#define ixLCAC_MC5_OVR_VAL                                                      0xc0400d74
+#define ixLCAC_MC6_CNTL                                                         0xc0400d78
+#define ixLCAC_MC6_OVR_SEL                                                      0xc0400d7c
+#define ixLCAC_MC6_OVR_VAL                                                      0xc0400d80
+#define ixLCAC_MC7_CNTL                                                         0xc0400d84
+#define ixLCAC_MC7_OVR_SEL                                                      0xc0400d88
+#define ixLCAC_MC7_OVR_VAL                                                      0xc0400d8c
+#define ixLCAC_CPL_CNTL                                                         0xc0400160
+#define ixLCAC_CPL_OVR_SEL                                                      0xc0400164
+#define ixLCAC_CPL_OVR_VAL                                                      0xc0400168
+#define mmROM_SMC_IND_INDEX                                                     0x80
+#define mmROM0_ROM_SMC_IND_INDEX                                                0x80
+#define mmROM1_ROM_SMC_IND_INDEX                                                0x82
+#define mmROM2_ROM_SMC_IND_INDEX                                                0x84
+#define mmROM3_ROM_SMC_IND_INDEX                                                0x86
+#define mmROM_SMC_IND_DATA                                                      0x81
+#define mmROM0_ROM_SMC_IND_DATA                                                 0x81
+#define mmROM1_ROM_SMC_IND_DATA                                                 0x83
+#define mmROM2_ROM_SMC_IND_DATA                                                 0x85
+#define mmROM3_ROM_SMC_IND_DATA                                                 0x87
+#define ixROM_CNTL                                                              0xc0600000
+#define ixPAGE_MIRROR_CNTL                                                      0xc0600004
+#define ixROM_STATUS                                                            0xc0600008
+#define ixCGTT_ROM_CLK_CTRL0                                                    0xc060000c
+#define ixROM_INDEX                                                             0xc0600010
+#define ixROM_DATA                                                              0xc0600014
+#define ixROM_START                                                             0xc0600018
+#define ixROM_SW_CNTL                                                           0xc060001c
+#define ixROM_SW_STATUS                                                         0xc0600020
+#define ixROM_SW_COMMAND                                                        0xc0600024
+#define ixROM_SW_DATA_1                                                         0xc0600028
+#define ixROM_SW_DATA_2                                                         0xc060002c
+#define ixROM_SW_DATA_3                                                         0xc0600030
+#define ixROM_SW_DATA_4                                                         0xc0600034
+#define ixROM_SW_DATA_5                                                         0xc0600038
+#define ixROM_SW_DATA_6                                                         0xc060003c
+#define ixROM_SW_DATA_7                                                         0xc0600040
+#define ixROM_SW_DATA_8                                                         0xc0600044
+#define ixROM_SW_DATA_9                                                         0xc0600048
+#define ixROM_SW_DATA_10                                                        0xc060004c
+#define ixROM_SW_DATA_11                                                        0xc0600050
+#define ixROM_SW_DATA_12                                                        0xc0600054
+#define ixROM_SW_DATA_13                                                        0xc0600058
+#define ixROM_SW_DATA_14                                                        0xc060005c
+#define ixROM_SW_DATA_15                                                        0xc0600060
+#define ixROM_SW_DATA_16                                                        0xc0600064
+#define ixROM_SW_DATA_17                                                        0xc0600068
+#define ixROM_SW_DATA_18                                                        0xc060006c
+#define ixROM_SW_DATA_19                                                        0xc0600070
+#define ixROM_SW_DATA_20                                                        0xc0600074
+#define ixROM_SW_DATA_21                                                        0xc0600078
+#define ixROM_SW_DATA_22                                                        0xc060007c
+#define ixROM_SW_DATA_23                                                        0xc0600080
+#define ixROM_SW_DATA_24                                                        0xc0600084
+#define ixROM_SW_DATA_25                                                        0xc0600088
+#define ixROM_SW_DATA_26                                                        0xc060008c
+#define ixROM_SW_DATA_27                                                        0xc0600090
+#define ixROM_SW_DATA_28                                                        0xc0600094
+#define ixROM_SW_DATA_29                                                        0xc0600098
+#define ixROM_SW_DATA_30                                                        0xc060009c
+#define ixROM_SW_DATA_31                                                        0xc06000a0
+#define ixROM_SW_DATA_32                                                        0xc06000a4
+#define ixROM_SW_DATA_33                                                        0xc06000a8
+#define ixROM_SW_DATA_34                                                        0xc06000ac
+#define ixROM_SW_DATA_35                                                        0xc06000b0
+#define ixROM_SW_DATA_36                                                        0xc06000b4
+#define ixROM_SW_DATA_37                                                        0xc06000b8
+#define ixROM_SW_DATA_38                                                        0xc06000bc
+#define ixROM_SW_DATA_39                                                        0xc06000c0
+#define ixROM_SW_DATA_40                                                        0xc06000c4
+#define ixROM_SW_DATA_41                                                        0xc06000c8
+#define ixROM_SW_DATA_42                                                        0xc06000cc
+#define ixROM_SW_DATA_43                                                        0xc06000d0
+#define ixROM_SW_DATA_44                                                        0xc06000d4
+#define ixROM_SW_DATA_45                                                        0xc06000d8
+#define ixROM_SW_DATA_46                                                        0xc06000dc
+#define ixROM_SW_DATA_47                                                        0xc06000e0
+#define ixROM_SW_DATA_48                                                        0xc06000e4
+#define ixROM_SW_DATA_49                                                        0xc06000e8
+#define ixROM_SW_DATA_50                                                        0xc06000ec
+#define ixROM_SW_DATA_51                                                        0xc06000f0
+#define ixROM_SW_DATA_52                                                        0xc06000f4
+#define ixROM_SW_DATA_53                                                        0xc06000f8
+#define ixROM_SW_DATA_54                                                        0xc06000fc
+#define ixROM_SW_DATA_55                                                        0xc0600100
+#define ixROM_SW_DATA_56                                                        0xc0600104
+#define ixROM_SW_DATA_57                                                        0xc0600108
+#define ixROM_SW_DATA_58                                                        0xc060010c
+#define ixROM_SW_DATA_59                                                        0xc0600110
+#define ixROM_SW_DATA_60                                                        0xc0600114
+#define ixROM_SW_DATA_61                                                        0xc0600118
+#define ixROM_SW_DATA_62                                                        0xc060011c
+#define ixROM_SW_DATA_63                                                        0xc0600120
+#define ixROM_SW_DATA_64                                                        0xc0600124
+#define mmGC_CAC_CGTT_CLK_CTRL                                                  0x3292
+#define mmSE_CAC_CGTT_CLK_CTRL                                                  0x3293
+#define mmGC_CAC_LKG_AGGR_LOWER                                                 0x3296
+#define mmGC_CAC_LKG_AGGR_UPPER                                                 0x3297
+#define ixGC_CAC_WEIGHT_CU_0                                                    0x32
+#define ixGC_CAC_WEIGHT_CU_1                                                    0x33
+#define ixGC_CAC_WEIGHT_CU_2                                                    0x34
+#define ixGC_CAC_WEIGHT_CU_3                                                    0x35
+#define ixGC_CAC_WEIGHT_CU_4                                                    0x36
+#define ixGC_CAC_WEIGHT_CU_5                                                    0x37
+#define ixGC_CAC_WEIGHT_CU_6                                                    0x38
+#define ixGC_CAC_WEIGHT_CU_7                                                    0x39
+#define ixGC_CAC_ACC_CU0                                                        0xba
+#define ixGC_CAC_ACC_CU1                                                        0xbb
+#define ixGC_CAC_ACC_CU2                                                        0xbc
+#define ixGC_CAC_ACC_CU3                                                        0xbd
+#define ixGC_CAC_ACC_CU4                                                        0xbe
+#define ixGC_CAC_ACC_CU5                                                        0xbf
+#define ixGC_CAC_ACC_CU6                                                        0xc0
+#define ixGC_CAC_ACC_CU7                                                        0xc1
+#define ixGC_CAC_ACC_CU8                                                        0xc2
+#define ixGC_CAC_ACC_CU9                                                        0xc3
+#define ixGC_CAC_ACC_CU10                                                       0xc4
+#define ixGC_CAC_ACC_CU11                                                       0xc5
+#define ixGC_CAC_ACC_CU12                                                       0xc6
+#define ixGC_CAC_ACC_CU13                                                       0xc7
+#define ixGC_CAC_ACC_CU14                                                       0xc8
+#define ixGC_CAC_ACC_CU15                                                       0xc9
+#define ixGC_CAC_OVRD_CU                                                        0xe7
+
+#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
new file mode 100644
index 0000000..f19c420
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
@@ -0,0 +1,1282 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_ENUM_H
+#define SMU_7_1_3_ENUM_H
+
+#define CG_SRBM_START_ADDR                        0x600
+#define CG_SRBM_END_ADDR                          0x8ff
+#define RCU_CCF_DWORDS0                           0xa0
+#define RCU_CCF_BITS0                             0x1400
+#define RCU_SAM_BYTES                             0x2c
+#define RCU_SAM_RTL_BYTES                         0x2c
+#define RCU_SMU_BYTES                             0x14
+#define RCU_SMU_RTL_BYTES                         0x14
+#define SFP_CHAIN_ADDR                            0x1
+#define SFP_SADR                                  0x0
+#define SFP_EADR                                  0x37f
+#define SAMU_KEY_CHAIN_ADR                        0x0
+#define SAMU_KEY_SADR                             0x280
+#define SAMU_KEY_EADR                             0x2ab
+#define SMU_KEY_CHAIN_ADR                         0x0
+#define SMU_KEY_SADR                              0x2ac
+#define SMU_KEY_EADR                              0x2bf
+#define SMC_MSG_TEST                              0x1
+#define SMC_MSG_PHY_LN_OFF                        0x2
+#define SMC_MSG_PHY_LN_ON                         0x3
+#define SMC_MSG_DDI_PHY_OFF                       0x4
+#define SMC_MSG_DDI_PHY_ON                        0x5
+#define SMC_MSG_CASCADE_PLL_OFF                   0x6
+#define SMC_MSG_CASCADE_PLL_ON                    0x7
+#define SMC_MSG_PWR_OFF_x16                       0x8
+#define SMC_MSG_CONFIG_LCLK_DPM                   0x9
+#define SMC_MSG_FLUSH_DATA_CACHE                  0xa
+#define SMC_MSG_FLUSH_INSTRUCTION_CACHE           0xb
+#define SMC_MSG_CONFIG_VPC_ACCUMULATOR            0xc
+#define SMC_MSG_CONFIG_BAPM                       0xd
+#define SMC_MSG_CONFIG_TDC_LIMIT                  0xe
+#define SMC_MSG_CONFIG_LPMx                       0xf
+#define SMC_MSG_CONFIG_HTC_LIMIT                  0x10
+#define SMC_MSG_CONFIG_THERMAL_CNTL               0x11
+#define SMC_MSG_CONFIG_VOLTAGE_CNTL               0x12
+#define SMC_MSG_CONFIG_TDP_CNTL                   0x13
+#define SMC_MSG_EN_PM_CNTL                        0x14
+#define SMC_MSG_DIS_PM_CNTL                       0x15
+#define SMC_MSG_CONFIG_NBDPM                      0x16
+#define SMC_MSG_CONFIG_LOADLINE                   0x17
+#define SMC_MSG_ADJUST_LOADLINE                   0x18
+#define SMC_MSG_RESET                             0x20
+#define SMC_MSG_VOLTAGE                           0x25
+#define SMC_VERSION_MAJOR                         0x7
+#define SMC_VERSION_MINOR                         0x0
+#define SMC_HEADER_SIZE                           0x40
+#define ROM_SIGNATURE                             0xaa55
+typedef enum SurfaceEndian {
+	ENDIAN_NONE                                      = 0x0,
+	ENDIAN_8IN16                                     = 0x1,
+	ENDIAN_8IN32                                     = 0x2,
+	ENDIAN_8IN64                                     = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+	ARRAY_LINEAR_GENERAL                             = 0x0,
+	ARRAY_LINEAR_ALIGNED                             = 0x1,
+	ARRAY_1D_TILED_THIN1                             = 0x2,
+	ARRAY_1D_TILED_THICK                             = 0x3,
+	ARRAY_2D_TILED_THIN1                             = 0x4,
+	ARRAY_PRT_TILED_THIN1                            = 0x5,
+	ARRAY_PRT_2D_TILED_THIN1                         = 0x6,
+	ARRAY_2D_TILED_THICK                             = 0x7,
+	ARRAY_2D_TILED_XTHICK                            = 0x8,
+	ARRAY_PRT_TILED_THICK                            = 0x9,
+	ARRAY_PRT_2D_TILED_THICK                         = 0xa,
+	ARRAY_PRT_3D_TILED_THIN1                         = 0xb,
+	ARRAY_3D_TILED_THIN1                             = 0xc,
+	ARRAY_3D_TILED_THICK                             = 0xd,
+	ARRAY_3D_TILED_XTHICK                            = 0xe,
+	ARRAY_PRT_3D_TILED_THICK                         = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+	CONFIG_1_PIPE                                    = 0x0,
+	CONFIG_2_PIPE                                    = 0x1,
+	CONFIG_4_PIPE                                    = 0x2,
+	CONFIG_8_PIPE                                    = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+	CONFIG_4_BANK                                    = 0x0,
+	CONFIG_8_BANK                                    = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+	CONFIG_256B_GROUP                                = 0x0,
+	CONFIG_512B_GROUP                                = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+	CONFIG_1KB_ROW                                   = 0x0,
+	CONFIG_2KB_ROW                                   = 0x1,
+	CONFIG_4KB_ROW                                   = 0x2,
+	CONFIG_8KB_ROW                                   = 0x3,
+	CONFIG_1KB_ROW_OPT                               = 0x4,
+	CONFIG_2KB_ROW_OPT                               = 0x5,
+	CONFIG_4KB_ROW_OPT                               = 0x6,
+	CONFIG_8KB_ROW_OPT                               = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+	CONFIG_128B_SWAPS                                = 0x0,
+	CONFIG_256B_SWAPS                                = 0x1,
+	CONFIG_512B_SWAPS                                = 0x2,
+	CONFIG_1KB_SWAPS                                 = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+	CONFIG_1KB_SPLIT                                 = 0x0,
+	CONFIG_2KB_SPLIT                                 = 0x1,
+	CONFIG_4KB_SPLIT                                 = 0x2,
+	CONFIG_8KB_SPLIT                                 = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+	ADDR_CONFIG_1_PIPE                               = 0x0,
+	ADDR_CONFIG_2_PIPE                               = 0x1,
+	ADDR_CONFIG_4_PIPE                               = 0x2,
+	ADDR_CONFIG_8_PIPE                               = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+	ADDR_CONFIG_PIPE_INTERLEAVE_256B                 = 0x0,
+	ADDR_CONFIG_PIPE_INTERLEAVE_512B                 = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+	ADDR_CONFIG_BANK_INTERLEAVE_1                    = 0x0,
+	ADDR_CONFIG_BANK_INTERLEAVE_2                    = 0x1,
+	ADDR_CONFIG_BANK_INTERLEAVE_4                    = 0x2,
+	ADDR_CONFIG_BANK_INTERLEAVE_8                    = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+	ADDR_CONFIG_1_SHADER_ENGINE                      = 0x0,
+	ADDR_CONFIG_2_SHADER_ENGINE                      = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+	ADDR_CONFIG_SE_TILE_16                           = 0x0,
+	ADDR_CONFIG_SE_TILE_32                           = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+	ADDR_CONFIG_1_GPU                                = 0x0,
+	ADDR_CONFIG_2_GPU                                = 0x1,
+	ADDR_CONFIG_4_GPU                                = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+	ADDR_CONFIG_GPU_TILE_16                          = 0x0,
+	ADDR_CONFIG_GPU_TILE_32                          = 0x1,
+	ADDR_CONFIG_GPU_TILE_64                          = 0x2,
+	ADDR_CONFIG_GPU_TILE_128                         = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+	ADDR_CONFIG_1KB_ROW                              = 0x0,
+	ADDR_CONFIG_2KB_ROW                              = 0x1,
+	ADDR_CONFIG_4KB_ROW                              = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+	ADDR_CONFIG_1_LOWER_PIPES                        = 0x0,
+	ADDR_CONFIG_2_LOWER_PIPES                        = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+	DBG_CLIENT_BLKID_RESERVED                        = 0x0,
+	DBG_CLIENT_BLKID_dbg                             = 0x1,
+	DBG_CLIENT_BLKID_scf2                            = 0x2,
+	DBG_CLIENT_BLKID_mcd5_0                          = 0x3,
+	DBG_CLIENT_BLKID_mcd5_1                          = 0x4,
+	DBG_CLIENT_BLKID_mcd6_0                          = 0x5,
+	DBG_CLIENT_BLKID_mcd6_1                          = 0x6,
+	DBG_CLIENT_BLKID_mcd7_0                          = 0x7,
+	DBG_CLIENT_BLKID_mcd7_1                          = 0x8,
+	DBG_CLIENT_BLKID_vmc                             = 0x9,
+	DBG_CLIENT_BLKID_sx30                            = 0xa,
+	DBG_CLIENT_BLKID_mcd2_0                          = 0xb,
+	DBG_CLIENT_BLKID_mcd2_1                          = 0xc,
+	DBG_CLIENT_BLKID_bci1                            = 0xd,
+	DBG_CLIENT_BLKID_xdma_dbg_client_wrapper         = 0xe,
+	DBG_CLIENT_BLKID_mcc0                            = 0xf,
+	DBG_CLIENT_BLKID_uvdf_0                          = 0x10,
+	DBG_CLIENT_BLKID_uvdf_1                          = 0x11,
+	DBG_CLIENT_BLKID_uvdf_2                          = 0x12,
+	DBG_CLIENT_BLKID_bci0                            = 0x13,
+	DBG_CLIENT_BLKID_vcec0_0                         = 0x14,
+	DBG_CLIENT_BLKID_cb100                           = 0x15,
+	DBG_CLIENT_BLKID_cb001                           = 0x16,
+	DBG_CLIENT_BLKID_cb002                           = 0x17,
+	DBG_CLIENT_BLKID_cb003                           = 0x18,
+	DBG_CLIENT_BLKID_mcd4_0                          = 0x19,
+	DBG_CLIENT_BLKID_mcd4_1                          = 0x1a,
+	DBG_CLIENT_BLKID_tmonw00                         = 0x1b,
+	DBG_CLIENT_BLKID_cb101                           = 0x1c,
+	DBG_CLIENT_BLKID_cb102                           = 0x1d,
+	DBG_CLIENT_BLKID_cb103                           = 0x1e,
+	DBG_CLIENT_BLKID_sx10                            = 0x1f,
+	DBG_CLIENT_BLKID_cb301                           = 0x20,
+	DBG_CLIENT_BLKID_cb302                           = 0x21,
+	DBG_CLIENT_BLKID_cb303                           = 0x22,
+	DBG_CLIENT_BLKID_tmonw01                         = 0x23,
+	DBG_CLIENT_BLKID_tmonw02                         = 0x24,
+	DBG_CLIENT_BLKID_vcea0_0                         = 0x25,
+	DBG_CLIENT_BLKID_vcea0_1                         = 0x26,
+	DBG_CLIENT_BLKID_vcea0_2                         = 0x27,
+	DBG_CLIENT_BLKID_vcea0_3                         = 0x28,
+	DBG_CLIENT_BLKID_scf1                            = 0x29,
+	DBG_CLIENT_BLKID_sx20                            = 0x2a,
+	DBG_CLIENT_BLKID_spim1                           = 0x2b,
+	DBG_CLIENT_BLKID_scb1                            = 0x2c,
+	DBG_CLIENT_BLKID_pa10                            = 0x2d,
+	DBG_CLIENT_BLKID_pa00                            = 0x2e,
+	DBG_CLIENT_BLKID_gmcon                           = 0x2f,
+	DBG_CLIENT_BLKID_mcb                             = 0x30,
+	DBG_CLIENT_BLKID_vgt0                            = 0x31,
+	DBG_CLIENT_BLKID_pc0                             = 0x32,
+	DBG_CLIENT_BLKID_bci2                            = 0x33,
+	DBG_CLIENT_BLKID_uvdb_0                          = 0x34,
+	DBG_CLIENT_BLKID_spim3                           = 0x35,
+	DBG_CLIENT_BLKID_scb3                            = 0x36,
+	DBG_CLIENT_BLKID_cpc_0                           = 0x37,
+	DBG_CLIENT_BLKID_cpc_1                           = 0x38,
+	DBG_CLIENT_BLKID_uvdm_0                          = 0x39,
+	DBG_CLIENT_BLKID_uvdm_1                          = 0x3a,
+	DBG_CLIENT_BLKID_uvdm_2                          = 0x3b,
+	DBG_CLIENT_BLKID_uvdm_3                          = 0x3c,
+	DBG_CLIENT_BLKID_cb000                           = 0x3d,
+	DBG_CLIENT_BLKID_spim0                           = 0x3e,
+	DBG_CLIENT_BLKID_scb0                            = 0x3f,
+	DBG_CLIENT_BLKID_mcc2                            = 0x40,
+	DBG_CLIENT_BLKID_ds0                             = 0x41,
+	DBG_CLIENT_BLKID_srbm                            = 0x42,
+	DBG_CLIENT_BLKID_ih                              = 0x43,
+	DBG_CLIENT_BLKID_sem                             = 0x44,
+	DBG_CLIENT_BLKID_sdma_0                          = 0x45,
+	DBG_CLIENT_BLKID_sdma_1                          = 0x46,
+	DBG_CLIENT_BLKID_hdp                             = 0x47,
+	DBG_CLIENT_BLKID_acp_0                           = 0x48,
+	DBG_CLIENT_BLKID_acp_1                           = 0x49,
+	DBG_CLIENT_BLKID_cb200                           = 0x4a,
+	DBG_CLIENT_BLKID_scf3                            = 0x4b,
+	DBG_CLIENT_BLKID_bci3                            = 0x4c,
+	DBG_CLIENT_BLKID_mcd0_0                          = 0x4d,
+	DBG_CLIENT_BLKID_mcd0_1                          = 0x4e,
+	DBG_CLIENT_BLKID_pa11                            = 0x4f,
+	DBG_CLIENT_BLKID_pa01                            = 0x50,
+	DBG_CLIENT_BLKID_cb201                           = 0x51,
+	DBG_CLIENT_BLKID_cb202                           = 0x52,
+	DBG_CLIENT_BLKID_cb203                           = 0x53,
+	DBG_CLIENT_BLKID_spim2                           = 0x54,
+	DBG_CLIENT_BLKID_scb2                            = 0x55,
+	DBG_CLIENT_BLKID_vgt2                            = 0x56,
+	DBG_CLIENT_BLKID_pc2                             = 0x57,
+	DBG_CLIENT_BLKID_smu_0                           = 0x58,
+	DBG_CLIENT_BLKID_smu_1                           = 0x59,
+	DBG_CLIENT_BLKID_smu_2                           = 0x5a,
+	DBG_CLIENT_BLKID_cb1                             = 0x5b,
+	DBG_CLIENT_BLKID_ia0                             = 0x5c,
+	DBG_CLIENT_BLKID_wd                              = 0x5d,
+	DBG_CLIENT_BLKID_ia1                             = 0x5e,
+	DBG_CLIENT_BLKID_scf0                            = 0x5f,
+	DBG_CLIENT_BLKID_vgt1                            = 0x60,
+	DBG_CLIENT_BLKID_pc1                             = 0x61,
+	DBG_CLIENT_BLKID_cb0                             = 0x62,
+	DBG_CLIENT_BLKID_gdc_one_0                       = 0x63,
+	DBG_CLIENT_BLKID_gdc_one_1                       = 0x64,
+	DBG_CLIENT_BLKID_gdc_one_2                       = 0x65,
+	DBG_CLIENT_BLKID_gdc_one_3                       = 0x66,
+	DBG_CLIENT_BLKID_gdc_one_4                       = 0x67,
+	DBG_CLIENT_BLKID_gdc_one_5                       = 0x68,
+	DBG_CLIENT_BLKID_gdc_one_6                       = 0x69,
+	DBG_CLIENT_BLKID_gdc_one_7                       = 0x6a,
+	DBG_CLIENT_BLKID_gdc_one_8                       = 0x6b,
+	DBG_CLIENT_BLKID_gdc_one_9                       = 0x6c,
+	DBG_CLIENT_BLKID_gdc_one_10                      = 0x6d,
+	DBG_CLIENT_BLKID_gdc_one_11                      = 0x6e,
+	DBG_CLIENT_BLKID_gdc_one_12                      = 0x6f,
+	DBG_CLIENT_BLKID_gdc_one_13                      = 0x70,
+	DBG_CLIENT_BLKID_gdc_one_14                      = 0x71,
+	DBG_CLIENT_BLKID_gdc_one_15                      = 0x72,
+	DBG_CLIENT_BLKID_gdc_one_16                      = 0x73,
+	DBG_CLIENT_BLKID_gdc_one_17                      = 0x74,
+	DBG_CLIENT_BLKID_gdc_one_18                      = 0x75,
+	DBG_CLIENT_BLKID_gdc_one_19                      = 0x76,
+	DBG_CLIENT_BLKID_gdc_one_20                      = 0x77,
+	DBG_CLIENT_BLKID_gdc_one_21                      = 0x78,
+	DBG_CLIENT_BLKID_gdc_one_22                      = 0x79,
+	DBG_CLIENT_BLKID_gdc_one_23                      = 0x7a,
+	DBG_CLIENT_BLKID_gdc_one_24                      = 0x7b,
+	DBG_CLIENT_BLKID_gdc_one_25                      = 0x7c,
+	DBG_CLIENT_BLKID_gdc_one_26                      = 0x7d,
+	DBG_CLIENT_BLKID_gdc_one_27                      = 0x7e,
+	DBG_CLIENT_BLKID_gdc_one_28                      = 0x7f,
+	DBG_CLIENT_BLKID_gdc_one_29                      = 0x80,
+	DBG_CLIENT_BLKID_gdc_one_30                      = 0x81,
+	DBG_CLIENT_BLKID_gdc_one_31                      = 0x82,
+	DBG_CLIENT_BLKID_gdc_one_32                      = 0x83,
+	DBG_CLIENT_BLKID_gdc_one_33                      = 0x84,
+	DBG_CLIENT_BLKID_gdc_one_34                      = 0x85,
+	DBG_CLIENT_BLKID_gdc_one_35                      = 0x86,
+	DBG_CLIENT_BLKID_vceb0_0                         = 0x87,
+	DBG_CLIENT_BLKID_vgt3                            = 0x88,
+	DBG_CLIENT_BLKID_pc3                             = 0x89,
+	DBG_CLIENT_BLKID_mcd3_0                          = 0x8a,
+	DBG_CLIENT_BLKID_mcd3_1                          = 0x8b,
+	DBG_CLIENT_BLKID_uvdu_0                          = 0x8c,
+	DBG_CLIENT_BLKID_uvdu_1                          = 0x8d,
+	DBG_CLIENT_BLKID_uvdu_2                          = 0x8e,
+	DBG_CLIENT_BLKID_uvdu_3                          = 0x8f,
+	DBG_CLIENT_BLKID_uvdu_4                          = 0x90,
+	DBG_CLIENT_BLKID_uvdu_5                          = 0x91,
+	DBG_CLIENT_BLKID_uvdu_6                          = 0x92,
+	DBG_CLIENT_BLKID_cb300                           = 0x93,
+	DBG_CLIENT_BLKID_mcd1_0                          = 0x94,
+	DBG_CLIENT_BLKID_mcd1_1                          = 0x95,
+	DBG_CLIENT_BLKID_sx00                            = 0x96,
+	DBG_CLIENT_BLKID_uvdc_0                          = 0x97,
+	DBG_CLIENT_BLKID_uvdc_1                          = 0x98,
+	DBG_CLIENT_BLKID_mcc3                            = 0x99,
+	DBG_CLIENT_BLKID_mcc4                            = 0x9a,
+	DBG_CLIENT_BLKID_mcc5                            = 0x9b,
+	DBG_CLIENT_BLKID_mcc6                            = 0x9c,
+	DBG_CLIENT_BLKID_mcc7                            = 0x9d,
+	DBG_CLIENT_BLKID_cpg_0                           = 0x9e,
+	DBG_CLIENT_BLKID_cpg_1                           = 0x9f,
+	DBG_CLIENT_BLKID_gck                             = 0xa0,
+	DBG_CLIENT_BLKID_mcc1                            = 0xa1,
+	DBG_CLIENT_BLKID_cpf_0                           = 0xa2,
+	DBG_CLIENT_BLKID_cpf_1                           = 0xa3,
+	DBG_CLIENT_BLKID_rlc                             = 0xa4,
+	DBG_CLIENT_BLKID_grbm                            = 0xa5,
+	DBG_CLIENT_BLKID_sammsp                          = 0xa6,
+	DBG_CLIENT_BLKID_dci_pg                          = 0xa7,
+	DBG_CLIENT_BLKID_dci_0                           = 0xa8,
+	DBG_CLIENT_BLKID_dccg0_0                         = 0xa9,
+	DBG_CLIENT_BLKID_dccg0_1                         = 0xaa,
+	DBG_CLIENT_BLKID_dcfe01_0                        = 0xab,
+	DBG_CLIENT_BLKID_dcfe02_0                        = 0xac,
+	DBG_CLIENT_BLKID_dcfe03_0                        = 0xad,
+	DBG_CLIENT_BLKID_dcfe04_0                        = 0xae,
+	DBG_CLIENT_BLKID_dcfe05_0                        = 0xaf,
+	DBG_CLIENT_BLKID_dcfe06_0                        = 0xb0,
+	DBG_CLIENT_BLKID_mcq0_0                          = 0xb1,
+	DBG_CLIENT_BLKID_mcq0_1                          = 0xb2,
+	DBG_CLIENT_BLKID_mcq1_0                          = 0xb3,
+	DBG_CLIENT_BLKID_mcq1_1                          = 0xb4,
+	DBG_CLIENT_BLKID_mcq2_0                          = 0xb5,
+	DBG_CLIENT_BLKID_mcq2_1                          = 0xb6,
+	DBG_CLIENT_BLKID_mcq3_0                          = 0xb7,
+	DBG_CLIENT_BLKID_mcq3_1                          = 0xb8,
+	DBG_CLIENT_BLKID_mcq4_0                          = 0xb9,
+	DBG_CLIENT_BLKID_mcq4_1                          = 0xba,
+	DBG_CLIENT_BLKID_mcq5_0                          = 0xbb,
+	DBG_CLIENT_BLKID_mcq5_1                          = 0xbc,
+	DBG_CLIENT_BLKID_mcq6_0                          = 0xbd,
+	DBG_CLIENT_BLKID_mcq6_1                          = 0xbe,
+	DBG_CLIENT_BLKID_mcq7_0                          = 0xbf,
+	DBG_CLIENT_BLKID_mcq7_1                          = 0xc0,
+	DBG_CLIENT_BLKID_uvdi_0                          = 0xc1,
+	DBG_CLIENT_BLKID_RESERVED_LAST                   = 0xc2,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+	DBG_BLOCK_ID_RESERVED                            = 0x0,
+	DBG_BLOCK_ID_DBG                                 = 0x1,
+	DBG_BLOCK_ID_VMC                                 = 0x2,
+	DBG_BLOCK_ID_PDMA                                = 0x3,
+	DBG_BLOCK_ID_CG                                  = 0x4,
+	DBG_BLOCK_ID_SRBM                                = 0x5,
+	DBG_BLOCK_ID_GRBM                                = 0x6,
+	DBG_BLOCK_ID_RLC                                 = 0x7,
+	DBG_BLOCK_ID_CSC                                 = 0x8,
+	DBG_BLOCK_ID_SEM                                 = 0x9,
+	DBG_BLOCK_ID_IH                                  = 0xa,
+	DBG_BLOCK_ID_SC                                  = 0xb,
+	DBG_BLOCK_ID_SQ                                  = 0xc,
+	DBG_BLOCK_ID_AVP                                 = 0xd,
+	DBG_BLOCK_ID_GMCON                               = 0xe,
+	DBG_BLOCK_ID_SMU                                 = 0xf,
+	DBG_BLOCK_ID_DMA0                                = 0x10,
+	DBG_BLOCK_ID_DMA1                                = 0x11,
+	DBG_BLOCK_ID_SPIM                                = 0x12,
+	DBG_BLOCK_ID_GDS                                 = 0x13,
+	DBG_BLOCK_ID_SPIS                                = 0x14,
+	DBG_BLOCK_ID_UNUSED0                             = 0x15,
+	DBG_BLOCK_ID_PA0                                 = 0x16,
+	DBG_BLOCK_ID_PA1                                 = 0x17,
+	DBG_BLOCK_ID_CP0                                 = 0x18,
+	DBG_BLOCK_ID_CP1                                 = 0x19,
+	DBG_BLOCK_ID_CP2                                 = 0x1a,
+	DBG_BLOCK_ID_UNUSED1                             = 0x1b,
+	DBG_BLOCK_ID_UVDU                                = 0x1c,
+	DBG_BLOCK_ID_UVDM                                = 0x1d,
+	DBG_BLOCK_ID_VCE                                 = 0x1e,
+	DBG_BLOCK_ID_UNUSED2                             = 0x1f,
+	DBG_BLOCK_ID_VGT0                                = 0x20,
+	DBG_BLOCK_ID_VGT1                                = 0x21,
+	DBG_BLOCK_ID_IA                                  = 0x22,
+	DBG_BLOCK_ID_UNUSED3                             = 0x23,
+	DBG_BLOCK_ID_SCT0                                = 0x24,
+	DBG_BLOCK_ID_SCT1                                = 0x25,
+	DBG_BLOCK_ID_SPM0                                = 0x26,
+	DBG_BLOCK_ID_SPM1                                = 0x27,
+	DBG_BLOCK_ID_TCAA                                = 0x28,
+	DBG_BLOCK_ID_TCAB                                = 0x29,
+	DBG_BLOCK_ID_TCCA                                = 0x2a,
+	DBG_BLOCK_ID_TCCB                                = 0x2b,
+	DBG_BLOCK_ID_MCC0                                = 0x2c,
+	DBG_BLOCK_ID_MCC1                                = 0x2d,
+	DBG_BLOCK_ID_MCC2                                = 0x2e,
+	DBG_BLOCK_ID_MCC3                                = 0x2f,
+	DBG_BLOCK_ID_SX0                                 = 0x30,
+	DBG_BLOCK_ID_SX1                                 = 0x31,
+	DBG_BLOCK_ID_SX2                                 = 0x32,
+	DBG_BLOCK_ID_SX3                                 = 0x33,
+	DBG_BLOCK_ID_UNUSED4                             = 0x34,
+	DBG_BLOCK_ID_UNUSED5                             = 0x35,
+	DBG_BLOCK_ID_UNUSED6                             = 0x36,
+	DBG_BLOCK_ID_UNUSED7                             = 0x37,
+	DBG_BLOCK_ID_PC0                                 = 0x38,
+	DBG_BLOCK_ID_PC1                                 = 0x39,
+	DBG_BLOCK_ID_UNUSED8                             = 0x3a,
+	DBG_BLOCK_ID_UNUSED9                             = 0x3b,
+	DBG_BLOCK_ID_UNUSED10                            = 0x3c,
+	DBG_BLOCK_ID_UNUSED11                            = 0x3d,
+	DBG_BLOCK_ID_MCB                                 = 0x3e,
+	DBG_BLOCK_ID_UNUSED12                            = 0x3f,
+	DBG_BLOCK_ID_SCB0                                = 0x40,
+	DBG_BLOCK_ID_SCB1                                = 0x41,
+	DBG_BLOCK_ID_UNUSED13                            = 0x42,
+	DBG_BLOCK_ID_UNUSED14                            = 0x43,
+	DBG_BLOCK_ID_SCF0                                = 0x44,
+	DBG_BLOCK_ID_SCF1                                = 0x45,
+	DBG_BLOCK_ID_UNUSED15                            = 0x46,
+	DBG_BLOCK_ID_UNUSED16                            = 0x47,
+	DBG_BLOCK_ID_BCI0                                = 0x48,
+	DBG_BLOCK_ID_BCI1                                = 0x49,
+	DBG_BLOCK_ID_BCI2                                = 0x4a,
+	DBG_BLOCK_ID_BCI3                                = 0x4b,
+	DBG_BLOCK_ID_UNUSED17                            = 0x4c,
+	DBG_BLOCK_ID_UNUSED18                            = 0x4d,
+	DBG_BLOCK_ID_UNUSED19                            = 0x4e,
+	DBG_BLOCK_ID_UNUSED20                            = 0x4f,
+	DBG_BLOCK_ID_CB00                                = 0x50,
+	DBG_BLOCK_ID_CB01                                = 0x51,
+	DBG_BLOCK_ID_CB02                                = 0x52,
+	DBG_BLOCK_ID_CB03                                = 0x53,
+	DBG_BLOCK_ID_CB04                                = 0x54,
+	DBG_BLOCK_ID_UNUSED21                            = 0x55,
+	DBG_BLOCK_ID_UNUSED22                            = 0x56,
+	DBG_BLOCK_ID_UNUSED23                            = 0x57,
+	DBG_BLOCK_ID_CB10                                = 0x58,
+	DBG_BLOCK_ID_CB11                                = 0x59,
+	DBG_BLOCK_ID_CB12                                = 0x5a,
+	DBG_BLOCK_ID_CB13                                = 0x5b,
+	DBG_BLOCK_ID_CB14                                = 0x5c,
+	DBG_BLOCK_ID_UNUSED24                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED25                            = 0x5e,
+	DBG_BLOCK_ID_UNUSED26                            = 0x5f,
+	DBG_BLOCK_ID_TCP0                                = 0x60,
+	DBG_BLOCK_ID_TCP1                                = 0x61,
+	DBG_BLOCK_ID_TCP2                                = 0x62,
+	DBG_BLOCK_ID_TCP3                                = 0x63,
+	DBG_BLOCK_ID_TCP4                                = 0x64,
+	DBG_BLOCK_ID_TCP5                                = 0x65,
+	DBG_BLOCK_ID_TCP6                                = 0x66,
+	DBG_BLOCK_ID_TCP7                                = 0x67,
+	DBG_BLOCK_ID_TCP8                                = 0x68,
+	DBG_BLOCK_ID_TCP9                                = 0x69,
+	DBG_BLOCK_ID_TCP10                               = 0x6a,
+	DBG_BLOCK_ID_TCP11                               = 0x6b,
+	DBG_BLOCK_ID_TCP12                               = 0x6c,
+	DBG_BLOCK_ID_TCP13                               = 0x6d,
+	DBG_BLOCK_ID_TCP14                               = 0x6e,
+	DBG_BLOCK_ID_TCP15                               = 0x6f,
+	DBG_BLOCK_ID_TCP16                               = 0x70,
+	DBG_BLOCK_ID_TCP17                               = 0x71,
+	DBG_BLOCK_ID_TCP18                               = 0x72,
+	DBG_BLOCK_ID_TCP19                               = 0x73,
+	DBG_BLOCK_ID_TCP20                               = 0x74,
+	DBG_BLOCK_ID_TCP21                               = 0x75,
+	DBG_BLOCK_ID_TCP22                               = 0x76,
+	DBG_BLOCK_ID_TCP23                               = 0x77,
+	DBG_BLOCK_ID_TCP_RESERVED0                       = 0x78,
+	DBG_BLOCK_ID_TCP_RESERVED1                       = 0x79,
+	DBG_BLOCK_ID_TCP_RESERVED2                       = 0x7a,
+	DBG_BLOCK_ID_TCP_RESERVED3                       = 0x7b,
+	DBG_BLOCK_ID_TCP_RESERVED4                       = 0x7c,
+	DBG_BLOCK_ID_TCP_RESERVED5                       = 0x7d,
+	DBG_BLOCK_ID_TCP_RESERVED6                       = 0x7e,
+	DBG_BLOCK_ID_TCP_RESERVED7                       = 0x7f,
+	DBG_BLOCK_ID_DB00                                = 0x80,
+	DBG_BLOCK_ID_DB01                                = 0x81,
+	DBG_BLOCK_ID_DB02                                = 0x82,
+	DBG_BLOCK_ID_DB03                                = 0x83,
+	DBG_BLOCK_ID_DB04                                = 0x84,
+	DBG_BLOCK_ID_UNUSED27                            = 0x85,
+	DBG_BLOCK_ID_UNUSED28                            = 0x86,
+	DBG_BLOCK_ID_UNUSED29                            = 0x87,
+	DBG_BLOCK_ID_DB10                                = 0x88,
+	DBG_BLOCK_ID_DB11                                = 0x89,
+	DBG_BLOCK_ID_DB12                                = 0x8a,
+	DBG_BLOCK_ID_DB13                                = 0x8b,
+	DBG_BLOCK_ID_DB14                                = 0x8c,
+	DBG_BLOCK_ID_UNUSED30                            = 0x8d,
+	DBG_BLOCK_ID_UNUSED31                            = 0x8e,
+	DBG_BLOCK_ID_UNUSED32                            = 0x8f,
+	DBG_BLOCK_ID_TCC0                                = 0x90,
+	DBG_BLOCK_ID_TCC1                                = 0x91,
+	DBG_BLOCK_ID_TCC2                                = 0x92,
+	DBG_BLOCK_ID_TCC3                                = 0x93,
+	DBG_BLOCK_ID_TCC4                                = 0x94,
+	DBG_BLOCK_ID_TCC5                                = 0x95,
+	DBG_BLOCK_ID_TCC6                                = 0x96,
+	DBG_BLOCK_ID_TCC7                                = 0x97,
+	DBG_BLOCK_ID_SPS00                               = 0x98,
+	DBG_BLOCK_ID_SPS01                               = 0x99,
+	DBG_BLOCK_ID_SPS02                               = 0x9a,
+	DBG_BLOCK_ID_SPS10                               = 0x9b,
+	DBG_BLOCK_ID_SPS11                               = 0x9c,
+	DBG_BLOCK_ID_SPS12                               = 0x9d,
+	DBG_BLOCK_ID_UNUSED33                            = 0x9e,
+	DBG_BLOCK_ID_UNUSED34                            = 0x9f,
+	DBG_BLOCK_ID_TA00                                = 0xa0,
+	DBG_BLOCK_ID_TA01                                = 0xa1,
+	DBG_BLOCK_ID_TA02                                = 0xa2,
+	DBG_BLOCK_ID_TA03                                = 0xa3,
+	DBG_BLOCK_ID_TA04                                = 0xa4,
+	DBG_BLOCK_ID_TA05                                = 0xa5,
+	DBG_BLOCK_ID_TA06                                = 0xa6,
+	DBG_BLOCK_ID_TA07                                = 0xa7,
+	DBG_BLOCK_ID_TA08                                = 0xa8,
+	DBG_BLOCK_ID_TA09                                = 0xa9,
+	DBG_BLOCK_ID_TA0A                                = 0xaa,
+	DBG_BLOCK_ID_TA0B                                = 0xab,
+	DBG_BLOCK_ID_UNUSED35                            = 0xac,
+	DBG_BLOCK_ID_UNUSED36                            = 0xad,
+	DBG_BLOCK_ID_UNUSED37                            = 0xae,
+	DBG_BLOCK_ID_UNUSED38                            = 0xaf,
+	DBG_BLOCK_ID_TA10                                = 0xb0,
+	DBG_BLOCK_ID_TA11                                = 0xb1,
+	DBG_BLOCK_ID_TA12                                = 0xb2,
+	DBG_BLOCK_ID_TA13                                = 0xb3,
+	DBG_BLOCK_ID_TA14                                = 0xb4,
+	DBG_BLOCK_ID_TA15                                = 0xb5,
+	DBG_BLOCK_ID_TA16                                = 0xb6,
+	DBG_BLOCK_ID_TA17                                = 0xb7,
+	DBG_BLOCK_ID_TA18                                = 0xb8,
+	DBG_BLOCK_ID_TA19                                = 0xb9,
+	DBG_BLOCK_ID_TA1A                                = 0xba,
+	DBG_BLOCK_ID_TA1B                                = 0xbb,
+	DBG_BLOCK_ID_UNUSED39                            = 0xbc,
+	DBG_BLOCK_ID_UNUSED40                            = 0xbd,
+	DBG_BLOCK_ID_UNUSED41                            = 0xbe,
+	DBG_BLOCK_ID_UNUSED42                            = 0xbf,
+	DBG_BLOCK_ID_TD00                                = 0xc0,
+	DBG_BLOCK_ID_TD01                                = 0xc1,
+	DBG_BLOCK_ID_TD02                                = 0xc2,
+	DBG_BLOCK_ID_TD03                                = 0xc3,
+	DBG_BLOCK_ID_TD04                                = 0xc4,
+	DBG_BLOCK_ID_TD05                                = 0xc5,
+	DBG_BLOCK_ID_TD06                                = 0xc6,
+	DBG_BLOCK_ID_TD07                                = 0xc7,
+	DBG_BLOCK_ID_TD08                                = 0xc8,
+	DBG_BLOCK_ID_TD09                                = 0xc9,
+	DBG_BLOCK_ID_TD0A                                = 0xca,
+	DBG_BLOCK_ID_TD0B                                = 0xcb,
+	DBG_BLOCK_ID_UNUSED43                            = 0xcc,
+	DBG_BLOCK_ID_UNUSED44                            = 0xcd,
+	DBG_BLOCK_ID_UNUSED45                            = 0xce,
+	DBG_BLOCK_ID_UNUSED46                            = 0xcf,
+	DBG_BLOCK_ID_TD10                                = 0xd0,
+	DBG_BLOCK_ID_TD11                                = 0xd1,
+	DBG_BLOCK_ID_TD12                                = 0xd2,
+	DBG_BLOCK_ID_TD13                                = 0xd3,
+	DBG_BLOCK_ID_TD14                                = 0xd4,
+	DBG_BLOCK_ID_TD15                                = 0xd5,
+	DBG_BLOCK_ID_TD16                                = 0xd6,
+	DBG_BLOCK_ID_TD17                                = 0xd7,
+	DBG_BLOCK_ID_TD18                                = 0xd8,
+	DBG_BLOCK_ID_TD19                                = 0xd9,
+	DBG_BLOCK_ID_TD1A                                = 0xda,
+	DBG_BLOCK_ID_TD1B                                = 0xdb,
+	DBG_BLOCK_ID_UNUSED47                            = 0xdc,
+	DBG_BLOCK_ID_UNUSED48                            = 0xdd,
+	DBG_BLOCK_ID_UNUSED49                            = 0xde,
+	DBG_BLOCK_ID_UNUSED50                            = 0xdf,
+	DBG_BLOCK_ID_MCD0                                = 0xe0,
+	DBG_BLOCK_ID_MCD1                                = 0xe1,
+	DBG_BLOCK_ID_MCD2                                = 0xe2,
+	DBG_BLOCK_ID_MCD3                                = 0xe3,
+	DBG_BLOCK_ID_MCD4                                = 0xe4,
+	DBG_BLOCK_ID_MCD5                                = 0xe5,
+	DBG_BLOCK_ID_UNUSED51                            = 0xe6,
+	DBG_BLOCK_ID_UNUSED52                            = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+	DBG_BLOCK_ID_RESERVED_BY2                        = 0x0,
+	DBG_BLOCK_ID_VMC_BY2                             = 0x1,
+	DBG_BLOCK_ID_CG_BY2                              = 0x2,
+	DBG_BLOCK_ID_GRBM_BY2                            = 0x3,
+	DBG_BLOCK_ID_CSC_BY2                             = 0x4,
+	DBG_BLOCK_ID_IH_BY2                              = 0x5,
+	DBG_BLOCK_ID_SQ_BY2                              = 0x6,
+	DBG_BLOCK_ID_GMCON_BY2                           = 0x7,
+	DBG_BLOCK_ID_DMA0_BY2                            = 0x8,
+	DBG_BLOCK_ID_SPIM_BY2                            = 0x9,
+	DBG_BLOCK_ID_SPIS_BY2                            = 0xa,
+	DBG_BLOCK_ID_PA0_BY2                             = 0xb,
+	DBG_BLOCK_ID_CP0_BY2                             = 0xc,
+	DBG_BLOCK_ID_CP2_BY2                             = 0xd,
+	DBG_BLOCK_ID_UVDU_BY2                            = 0xe,
+	DBG_BLOCK_ID_VCE_BY2                             = 0xf,
+	DBG_BLOCK_ID_VGT0_BY2                            = 0x10,
+	DBG_BLOCK_ID_IA_BY2                              = 0x11,
+	DBG_BLOCK_ID_SCT0_BY2                            = 0x12,
+	DBG_BLOCK_ID_SPM0_BY2                            = 0x13,
+	DBG_BLOCK_ID_TCAA_BY2                            = 0x14,
+	DBG_BLOCK_ID_TCCA_BY2                            = 0x15,
+	DBG_BLOCK_ID_MCC0_BY2                            = 0x16,
+	DBG_BLOCK_ID_MCC2_BY2                            = 0x17,
+	DBG_BLOCK_ID_SX0_BY2                             = 0x18,
+	DBG_BLOCK_ID_SX2_BY2                             = 0x19,
+	DBG_BLOCK_ID_UNUSED4_BY2                         = 0x1a,
+	DBG_BLOCK_ID_UNUSED6_BY2                         = 0x1b,
+	DBG_BLOCK_ID_PC0_BY2                             = 0x1c,
+	DBG_BLOCK_ID_UNUSED8_BY2                         = 0x1d,
+	DBG_BLOCK_ID_UNUSED10_BY2                        = 0x1e,
+	DBG_BLOCK_ID_MCB_BY2                             = 0x1f,
+	DBG_BLOCK_ID_SCB0_BY2                            = 0x20,
+	DBG_BLOCK_ID_UNUSED13_BY2                        = 0x21,
+	DBG_BLOCK_ID_SCF0_BY2                            = 0x22,
+	DBG_BLOCK_ID_UNUSED15_BY2                        = 0x23,
+	DBG_BLOCK_ID_BCI0_BY2                            = 0x24,
+	DBG_BLOCK_ID_BCI2_BY2                            = 0x25,
+	DBG_BLOCK_ID_UNUSED17_BY2                        = 0x26,
+	DBG_BLOCK_ID_UNUSED19_BY2                        = 0x27,
+	DBG_BLOCK_ID_CB00_BY2                            = 0x28,
+	DBG_BLOCK_ID_CB02_BY2                            = 0x29,
+	DBG_BLOCK_ID_CB04_BY2                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED22_BY2                        = 0x2b,
+	DBG_BLOCK_ID_CB10_BY2                            = 0x2c,
+	DBG_BLOCK_ID_CB12_BY2                            = 0x2d,
+	DBG_BLOCK_ID_CB14_BY2                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED25_BY2                        = 0x2f,
+	DBG_BLOCK_ID_TCP0_BY2                            = 0x30,
+	DBG_BLOCK_ID_TCP2_BY2                            = 0x31,
+	DBG_BLOCK_ID_TCP4_BY2                            = 0x32,
+	DBG_BLOCK_ID_TCP6_BY2                            = 0x33,
+	DBG_BLOCK_ID_TCP8_BY2                            = 0x34,
+	DBG_BLOCK_ID_TCP10_BY2                           = 0x35,
+	DBG_BLOCK_ID_TCP12_BY2                           = 0x36,
+	DBG_BLOCK_ID_TCP14_BY2                           = 0x37,
+	DBG_BLOCK_ID_TCP16_BY2                           = 0x38,
+	DBG_BLOCK_ID_TCP18_BY2                           = 0x39,
+	DBG_BLOCK_ID_TCP20_BY2                           = 0x3a,
+	DBG_BLOCK_ID_TCP22_BY2                           = 0x3b,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY2                   = 0x3c,
+	DBG_BLOCK_ID_TCP_RESERVED2_BY2                   = 0x3d,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY2                   = 0x3e,
+	DBG_BLOCK_ID_TCP_RESERVED6_BY2                   = 0x3f,
+	DBG_BLOCK_ID_DB00_BY2                            = 0x40,
+	DBG_BLOCK_ID_DB02_BY2                            = 0x41,
+	DBG_BLOCK_ID_DB04_BY2                            = 0x42,
+	DBG_BLOCK_ID_UNUSED28_BY2                        = 0x43,
+	DBG_BLOCK_ID_DB10_BY2                            = 0x44,
+	DBG_BLOCK_ID_DB12_BY2                            = 0x45,
+	DBG_BLOCK_ID_DB14_BY2                            = 0x46,
+	DBG_BLOCK_ID_UNUSED31_BY2                        = 0x47,
+	DBG_BLOCK_ID_TCC0_BY2                            = 0x48,
+	DBG_BLOCK_ID_TCC2_BY2                            = 0x49,
+	DBG_BLOCK_ID_TCC4_BY2                            = 0x4a,
+	DBG_BLOCK_ID_TCC6_BY2                            = 0x4b,
+	DBG_BLOCK_ID_SPS00_BY2                           = 0x4c,
+	DBG_BLOCK_ID_SPS02_BY2                           = 0x4d,
+	DBG_BLOCK_ID_SPS11_BY2                           = 0x4e,
+	DBG_BLOCK_ID_UNUSED33_BY2                        = 0x4f,
+	DBG_BLOCK_ID_TA00_BY2                            = 0x50,
+	DBG_BLOCK_ID_TA02_BY2                            = 0x51,
+	DBG_BLOCK_ID_TA04_BY2                            = 0x52,
+	DBG_BLOCK_ID_TA06_BY2                            = 0x53,
+	DBG_BLOCK_ID_TA08_BY2                            = 0x54,
+	DBG_BLOCK_ID_TA0A_BY2                            = 0x55,
+	DBG_BLOCK_ID_UNUSED35_BY2                        = 0x56,
+	DBG_BLOCK_ID_UNUSED37_BY2                        = 0x57,
+	DBG_BLOCK_ID_TA10_BY2                            = 0x58,
+	DBG_BLOCK_ID_TA12_BY2                            = 0x59,
+	DBG_BLOCK_ID_TA14_BY2                            = 0x5a,
+	DBG_BLOCK_ID_TA16_BY2                            = 0x5b,
+	DBG_BLOCK_ID_TA18_BY2                            = 0x5c,
+	DBG_BLOCK_ID_TA1A_BY2                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED39_BY2                        = 0x5e,
+	DBG_BLOCK_ID_UNUSED41_BY2                        = 0x5f,
+	DBG_BLOCK_ID_TD00_BY2                            = 0x60,
+	DBG_BLOCK_ID_TD02_BY2                            = 0x61,
+	DBG_BLOCK_ID_TD04_BY2                            = 0x62,
+	DBG_BLOCK_ID_TD06_BY2                            = 0x63,
+	DBG_BLOCK_ID_TD08_BY2                            = 0x64,
+	DBG_BLOCK_ID_TD0A_BY2                            = 0x65,
+	DBG_BLOCK_ID_UNUSED43_BY2                        = 0x66,
+	DBG_BLOCK_ID_UNUSED45_BY2                        = 0x67,
+	DBG_BLOCK_ID_TD10_BY2                            = 0x68,
+	DBG_BLOCK_ID_TD12_BY2                            = 0x69,
+	DBG_BLOCK_ID_TD14_BY2                            = 0x6a,
+	DBG_BLOCK_ID_TD16_BY2                            = 0x6b,
+	DBG_BLOCK_ID_TD18_BY2                            = 0x6c,
+	DBG_BLOCK_ID_TD1A_BY2                            = 0x6d,
+	DBG_BLOCK_ID_UNUSED47_BY2                        = 0x6e,
+	DBG_BLOCK_ID_UNUSED49_BY2                        = 0x6f,
+	DBG_BLOCK_ID_MCD0_BY2                            = 0x70,
+	DBG_BLOCK_ID_MCD2_BY2                            = 0x71,
+	DBG_BLOCK_ID_MCD4_BY2                            = 0x72,
+	DBG_BLOCK_ID_UNUSED51_BY2                        = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+	DBG_BLOCK_ID_RESERVED_BY4                        = 0x0,
+	DBG_BLOCK_ID_CG_BY4                              = 0x1,
+	DBG_BLOCK_ID_CSC_BY4                             = 0x2,
+	DBG_BLOCK_ID_SQ_BY4                              = 0x3,
+	DBG_BLOCK_ID_DMA0_BY4                            = 0x4,
+	DBG_BLOCK_ID_SPIS_BY4                            = 0x5,
+	DBG_BLOCK_ID_CP0_BY4                             = 0x6,
+	DBG_BLOCK_ID_UVDU_BY4                            = 0x7,
+	DBG_BLOCK_ID_VGT0_BY4                            = 0x8,
+	DBG_BLOCK_ID_SCT0_BY4                            = 0x9,
+	DBG_BLOCK_ID_TCAA_BY4                            = 0xa,
+	DBG_BLOCK_ID_MCC0_BY4                            = 0xb,
+	DBG_BLOCK_ID_SX0_BY4                             = 0xc,
+	DBG_BLOCK_ID_UNUSED4_BY4                         = 0xd,
+	DBG_BLOCK_ID_PC0_BY4                             = 0xe,
+	DBG_BLOCK_ID_UNUSED10_BY4                        = 0xf,
+	DBG_BLOCK_ID_SCB0_BY4                            = 0x10,
+	DBG_BLOCK_ID_SCF0_BY4                            = 0x11,
+	DBG_BLOCK_ID_BCI0_BY4                            = 0x12,
+	DBG_BLOCK_ID_UNUSED17_BY4                        = 0x13,
+	DBG_BLOCK_ID_CB00_BY4                            = 0x14,
+	DBG_BLOCK_ID_CB04_BY4                            = 0x15,
+	DBG_BLOCK_ID_CB10_BY4                            = 0x16,
+	DBG_BLOCK_ID_CB14_BY4                            = 0x17,
+	DBG_BLOCK_ID_TCP0_BY4                            = 0x18,
+	DBG_BLOCK_ID_TCP4_BY4                            = 0x19,
+	DBG_BLOCK_ID_TCP8_BY4                            = 0x1a,
+	DBG_BLOCK_ID_TCP12_BY4                           = 0x1b,
+	DBG_BLOCK_ID_TCP16_BY4                           = 0x1c,
+	DBG_BLOCK_ID_TCP20_BY4                           = 0x1d,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY4                   = 0x1e,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY4                   = 0x1f,
+	DBG_BLOCK_ID_DB_BY4                              = 0x20,
+	DBG_BLOCK_ID_DB04_BY4                            = 0x21,
+	DBG_BLOCK_ID_DB10_BY4                            = 0x22,
+	DBG_BLOCK_ID_DB14_BY4                            = 0x23,
+	DBG_BLOCK_ID_TCC0_BY4                            = 0x24,
+	DBG_BLOCK_ID_TCC4_BY4                            = 0x25,
+	DBG_BLOCK_ID_SPS00_BY4                           = 0x26,
+	DBG_BLOCK_ID_SPS11_BY4                           = 0x27,
+	DBG_BLOCK_ID_TA00_BY4                            = 0x28,
+	DBG_BLOCK_ID_TA04_BY4                            = 0x29,
+	DBG_BLOCK_ID_TA08_BY4                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED35_BY4                        = 0x2b,
+	DBG_BLOCK_ID_TA10_BY4                            = 0x2c,
+	DBG_BLOCK_ID_TA14_BY4                            = 0x2d,
+	DBG_BLOCK_ID_TA18_BY4                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED39_BY4                        = 0x2f,
+	DBG_BLOCK_ID_TD00_BY4                            = 0x30,
+	DBG_BLOCK_ID_TD04_BY4                            = 0x31,
+	DBG_BLOCK_ID_TD08_BY4                            = 0x32,
+	DBG_BLOCK_ID_UNUSED43_BY4                        = 0x33,
+	DBG_BLOCK_ID_TD10_BY4                            = 0x34,
+	DBG_BLOCK_ID_TD14_BY4                            = 0x35,
+	DBG_BLOCK_ID_TD18_BY4                            = 0x36,
+	DBG_BLOCK_ID_UNUSED47_BY4                        = 0x37,
+	DBG_BLOCK_ID_MCD0_BY4                            = 0x38,
+	DBG_BLOCK_ID_MCD4_BY4                            = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+	DBG_BLOCK_ID_RESERVED_BY8                        = 0x0,
+	DBG_BLOCK_ID_CSC_BY8                             = 0x1,
+	DBG_BLOCK_ID_DMA0_BY8                            = 0x2,
+	DBG_BLOCK_ID_CP0_BY8                             = 0x3,
+	DBG_BLOCK_ID_VGT0_BY8                            = 0x4,
+	DBG_BLOCK_ID_TCAA_BY8                            = 0x5,
+	DBG_BLOCK_ID_SX0_BY8                             = 0x6,
+	DBG_BLOCK_ID_PC0_BY8                             = 0x7,
+	DBG_BLOCK_ID_SCB0_BY8                            = 0x8,
+	DBG_BLOCK_ID_BCI0_BY8                            = 0x9,
+	DBG_BLOCK_ID_CB00_BY8                            = 0xa,
+	DBG_BLOCK_ID_CB10_BY8                            = 0xb,
+	DBG_BLOCK_ID_TCP0_BY8                            = 0xc,
+	DBG_BLOCK_ID_TCP8_BY8                            = 0xd,
+	DBG_BLOCK_ID_TCP16_BY8                           = 0xe,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY8                   = 0xf,
+	DBG_BLOCK_ID_DB00_BY8                            = 0x10,
+	DBG_BLOCK_ID_DB10_BY8                            = 0x11,
+	DBG_BLOCK_ID_TCC0_BY8                            = 0x12,
+	DBG_BLOCK_ID_SPS00_BY8                           = 0x13,
+	DBG_BLOCK_ID_TA00_BY8                            = 0x14,
+	DBG_BLOCK_ID_TA08_BY8                            = 0x15,
+	DBG_BLOCK_ID_TA10_BY8                            = 0x16,
+	DBG_BLOCK_ID_TA18_BY8                            = 0x17,
+	DBG_BLOCK_ID_TD00_BY8                            = 0x18,
+	DBG_BLOCK_ID_TD08_BY8                            = 0x19,
+	DBG_BLOCK_ID_TD10_BY8                            = 0x1a,
+	DBG_BLOCK_ID_TD18_BY8                            = 0x1b,
+	DBG_BLOCK_ID_MCD0_BY8                            = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+	DBG_BLOCK_ID_RESERVED_BY16                       = 0x0,
+	DBG_BLOCK_ID_DMA0_BY16                           = 0x1,
+	DBG_BLOCK_ID_VGT0_BY16                           = 0x2,
+	DBG_BLOCK_ID_SX0_BY16                            = 0x3,
+	DBG_BLOCK_ID_SCB0_BY16                           = 0x4,
+	DBG_BLOCK_ID_CB00_BY16                           = 0x5,
+	DBG_BLOCK_ID_TCP0_BY16                           = 0x6,
+	DBG_BLOCK_ID_TCP16_BY16                          = 0x7,
+	DBG_BLOCK_ID_DB00_BY16                           = 0x8,
+	DBG_BLOCK_ID_TCC0_BY16                           = 0x9,
+	DBG_BLOCK_ID_TA00_BY16                           = 0xa,
+	DBG_BLOCK_ID_TA10_BY16                           = 0xb,
+	DBG_BLOCK_ID_TD00_BY16                           = 0xc,
+	DBG_BLOCK_ID_TD10_BY16                           = 0xd,
+	DBG_BLOCK_ID_MCD0_BY16                           = 0xe,
+} DebugBlockId_BY16;
+typedef enum ColorTransform {
+	DCC_CT_AUTO                                      = 0x0,
+	DCC_CT_NONE                                      = 0x1,
+	ABGR_TO_A_BG_G_RB                                = 0x2,
+	BGRA_TO_BG_G_RB_A                                = 0x3,
+} ColorTransform;
+typedef enum CompareRef {
+	REF_NEVER                                        = 0x0,
+	REF_LESS                                         = 0x1,
+	REF_EQUAL                                        = 0x2,
+	REF_LEQUAL                                       = 0x3,
+	REF_GREATER                                      = 0x4,
+	REF_NOTEQUAL                                     = 0x5,
+	REF_GEQUAL                                       = 0x6,
+	REF_ALWAYS                                       = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+	READ_256_BITS                                    = 0x0,
+	READ_512_BITS                                    = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+	DEPTH_INVALID                                    = 0x0,
+	DEPTH_16                                         = 0x1,
+	DEPTH_X8_24                                      = 0x2,
+	DEPTH_8_24                                       = 0x3,
+	DEPTH_X8_24_FLOAT                                = 0x4,
+	DEPTH_8_24_FLOAT                                 = 0x5,
+	DEPTH_32_FLOAT                                   = 0x6,
+	DEPTH_X24_8_32_FLOAT                             = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+	Z_INVALID                                        = 0x0,
+	Z_16                                             = 0x1,
+	Z_24                                             = 0x2,
+	Z_32_FLOAT                                       = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+	STENCIL_INVALID                                  = 0x0,
+	STENCIL_8                                        = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+	CMASK_CLEAR_NONE                                 = 0x0,
+	CMASK_CLEAR_ONE                                  = 0x1,
+	CMASK_CLEAR_ALL                                  = 0x2,
+	CMASK_ANY_EXPANDED                               = 0x3,
+	CMASK_ALPHA0_FRAG1                               = 0x4,
+	CMASK_ALPHA0_FRAG2                               = 0x5,
+	CMASK_ALPHA0_FRAG4                               = 0x6,
+	CMASK_ALPHA0_FRAGS                               = 0x7,
+	CMASK_ALPHA1_FRAG1                               = 0x8,
+	CMASK_ALPHA1_FRAG2                               = 0x9,
+	CMASK_ALPHA1_FRAG4                               = 0xa,
+	CMASK_ALPHA1_FRAGS                               = 0xb,
+	CMASK_ALPHAX_FRAG1                               = 0xc,
+	CMASK_ALPHAX_FRAG2                               = 0xd,
+	CMASK_ALPHAX_FRAG4                               = 0xe,
+	CMASK_ALPHAX_FRAGS                               = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+	EXPORT_UNUSED                                    = 0x0,
+	EXPORT_32_R                                      = 0x1,
+	EXPORT_32_GR                                     = 0x2,
+	EXPORT_32_AR                                     = 0x3,
+	EXPORT_FP16_ABGR                                 = 0x4,
+	EXPORT_UNSIGNED16_ABGR                           = 0x5,
+	EXPORT_SIGNED16_ABGR                             = 0x6,
+	EXPORT_32_ABGR                                   = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+	EXPORT_4P_32BPC_ABGR                             = 0x0,
+	EXPORT_4P_16BPC_ABGR                             = 0x1,
+	EXPORT_4P_32BPC_GR                               = 0x2,
+	EXPORT_4P_32BPC_AR                               = 0x3,
+	EXPORT_2P_32BPC_ABGR                             = 0x4,
+	EXPORT_8P_32BPC_R                                = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+	COLOR_INVALID                                    = 0x0,
+	COLOR_8                                          = 0x1,
+	COLOR_16                                         = 0x2,
+	COLOR_8_8                                        = 0x3,
+	COLOR_32                                         = 0x4,
+	COLOR_16_16                                      = 0x5,
+	COLOR_10_11_11                                   = 0x6,
+	COLOR_11_11_10                                   = 0x7,
+	COLOR_10_10_10_2                                 = 0x8,
+	COLOR_2_10_10_10                                 = 0x9,
+	COLOR_8_8_8_8                                    = 0xa,
+	COLOR_32_32                                      = 0xb,
+	COLOR_16_16_16_16                                = 0xc,
+	COLOR_RESERVED_13                                = 0xd,
+	COLOR_32_32_32_32                                = 0xe,
+	COLOR_RESERVED_15                                = 0xf,
+	COLOR_5_6_5                                      = 0x10,
+	COLOR_1_5_5_5                                    = 0x11,
+	COLOR_5_5_5_1                                    = 0x12,
+	COLOR_4_4_4_4                                    = 0x13,
+	COLOR_8_24                                       = 0x14,
+	COLOR_24_8                                       = 0x15,
+	COLOR_X24_8_32_FLOAT                             = 0x16,
+	COLOR_RESERVED_23                                = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+	FMT_INVALID                                      = 0x0,
+	FMT_8                                            = 0x1,
+	FMT_16                                           = 0x2,
+	FMT_8_8                                          = 0x3,
+	FMT_32                                           = 0x4,
+	FMT_16_16                                        = 0x5,
+	FMT_10_11_11                                     = 0x6,
+	FMT_11_11_10                                     = 0x7,
+	FMT_10_10_10_2                                   = 0x8,
+	FMT_2_10_10_10                                   = 0x9,
+	FMT_8_8_8_8                                      = 0xa,
+	FMT_32_32                                        = 0xb,
+	FMT_16_16_16_16                                  = 0xc,
+	FMT_32_32_32                                     = 0xd,
+	FMT_32_32_32_32                                  = 0xe,
+	FMT_RESERVED_4                                   = 0xf,
+	FMT_5_6_5                                        = 0x10,
+	FMT_1_5_5_5                                      = 0x11,
+	FMT_5_5_5_1                                      = 0x12,
+	FMT_4_4_4_4                                      = 0x13,
+	FMT_8_24                                         = 0x14,
+	FMT_24_8                                         = 0x15,
+	FMT_X24_8_32_FLOAT                               = 0x16,
+	FMT_RESERVED_33                                  = 0x17,
+	FMT_11_11_10_FLOAT                               = 0x18,
+	FMT_16_FLOAT                                     = 0x19,
+	FMT_32_FLOAT                                     = 0x1a,
+	FMT_16_16_FLOAT                                  = 0x1b,
+	FMT_8_24_FLOAT                                   = 0x1c,
+	FMT_24_8_FLOAT                                   = 0x1d,
+	FMT_32_32_FLOAT                                  = 0x1e,
+	FMT_10_11_11_FLOAT                               = 0x1f,
+	FMT_16_16_16_16_FLOAT                            = 0x20,
+	FMT_3_3_2                                        = 0x21,
+	FMT_6_5_5                                        = 0x22,
+	FMT_32_32_32_32_FLOAT                            = 0x23,
+	FMT_RESERVED_36                                  = 0x24,
+	FMT_1                                            = 0x25,
+	FMT_1_REVERSED                                   = 0x26,
+	FMT_GB_GR                                        = 0x27,
+	FMT_BG_RG                                        = 0x28,
+	FMT_32_AS_8                                      = 0x29,
+	FMT_32_AS_8_8                                    = 0x2a,
+	FMT_5_9_9_9_SHAREDEXP                            = 0x2b,
+	FMT_8_8_8                                        = 0x2c,
+	FMT_16_16_16                                     = 0x2d,
+	FMT_16_16_16_FLOAT                               = 0x2e,
+	FMT_4_4                                          = 0x2f,
+	FMT_32_32_32_FLOAT                               = 0x30,
+	FMT_BC1                                          = 0x31,
+	FMT_BC2                                          = 0x32,
+	FMT_BC3                                          = 0x33,
+	FMT_BC4                                          = 0x34,
+	FMT_BC5                                          = 0x35,
+	FMT_BC6                                          = 0x36,
+	FMT_BC7                                          = 0x37,
+	FMT_32_AS_32_32_32_32                            = 0x38,
+	FMT_APC3                                         = 0x39,
+	FMT_APC4                                         = 0x3a,
+	FMT_APC5                                         = 0x3b,
+	FMT_APC6                                         = 0x3c,
+	FMT_APC7                                         = 0x3d,
+	FMT_CTX1                                         = 0x3e,
+	FMT_RESERVED_63                                  = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+	BUF_DATA_FORMAT_INVALID                          = 0x0,
+	BUF_DATA_FORMAT_8                                = 0x1,
+	BUF_DATA_FORMAT_16                               = 0x2,
+	BUF_DATA_FORMAT_8_8                              = 0x3,
+	BUF_DATA_FORMAT_32                               = 0x4,
+	BUF_DATA_FORMAT_16_16                            = 0x5,
+	BUF_DATA_FORMAT_10_11_11                         = 0x6,
+	BUF_DATA_FORMAT_11_11_10                         = 0x7,
+	BUF_DATA_FORMAT_10_10_10_2                       = 0x8,
+	BUF_DATA_FORMAT_2_10_10_10                       = 0x9,
+	BUF_DATA_FORMAT_8_8_8_8                          = 0xa,
+	BUF_DATA_FORMAT_32_32                            = 0xb,
+	BUF_DATA_FORMAT_16_16_16_16                      = 0xc,
+	BUF_DATA_FORMAT_32_32_32                         = 0xd,
+	BUF_DATA_FORMAT_32_32_32_32                      = 0xe,
+	BUF_DATA_FORMAT_RESERVED_15                      = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+	IMG_DATA_FORMAT_INVALID                          = 0x0,
+	IMG_DATA_FORMAT_8                                = 0x1,
+	IMG_DATA_FORMAT_16                               = 0x2,
+	IMG_DATA_FORMAT_8_8                              = 0x3,
+	IMG_DATA_FORMAT_32                               = 0x4,
+	IMG_DATA_FORMAT_16_16                            = 0x5,
+	IMG_DATA_FORMAT_10_11_11                         = 0x6,
+	IMG_DATA_FORMAT_11_11_10                         = 0x7,
+	IMG_DATA_FORMAT_10_10_10_2                       = 0x8,
+	IMG_DATA_FORMAT_2_10_10_10                       = 0x9,
+	IMG_DATA_FORMAT_8_8_8_8                          = 0xa,
+	IMG_DATA_FORMAT_32_32                            = 0xb,
+	IMG_DATA_FORMAT_16_16_16_16                      = 0xc,
+	IMG_DATA_FORMAT_32_32_32                         = 0xd,
+	IMG_DATA_FORMAT_32_32_32_32                      = 0xe,
+	IMG_DATA_FORMAT_RESERVED_15                      = 0xf,
+	IMG_DATA_FORMAT_5_6_5                            = 0x10,
+	IMG_DATA_FORMAT_1_5_5_5                          = 0x11,
+	IMG_DATA_FORMAT_5_5_5_1                          = 0x12,
+	IMG_DATA_FORMAT_4_4_4_4                          = 0x13,
+	IMG_DATA_FORMAT_8_24                             = 0x14,
+	IMG_DATA_FORMAT_24_8                             = 0x15,
+	IMG_DATA_FORMAT_X24_8_32                         = 0x16,
+	IMG_DATA_FORMAT_RESERVED_23                      = 0x17,
+	IMG_DATA_FORMAT_RESERVED_24                      = 0x18,
+	IMG_DATA_FORMAT_RESERVED_25                      = 0x19,
+	IMG_DATA_FORMAT_RESERVED_26                      = 0x1a,
+	IMG_DATA_FORMAT_RESERVED_27                      = 0x1b,
+	IMG_DATA_FORMAT_RESERVED_28                      = 0x1c,
+	IMG_DATA_FORMAT_RESERVED_29                      = 0x1d,
+	IMG_DATA_FORMAT_RESERVED_30                      = 0x1e,
+	IMG_DATA_FORMAT_RESERVED_31                      = 0x1f,
+	IMG_DATA_FORMAT_GB_GR                            = 0x20,
+	IMG_DATA_FORMAT_BG_RG                            = 0x21,
+	IMG_DATA_FORMAT_5_9_9_9                          = 0x22,
+	IMG_DATA_FORMAT_BC1                              = 0x23,
+	IMG_DATA_FORMAT_BC2                              = 0x24,
+	IMG_DATA_FORMAT_BC3                              = 0x25,
+	IMG_DATA_FORMAT_BC4                              = 0x26,
+	IMG_DATA_FORMAT_BC5                              = 0x27,
+	IMG_DATA_FORMAT_BC6                              = 0x28,
+	IMG_DATA_FORMAT_BC7                              = 0x29,
+	IMG_DATA_FORMAT_RESERVED_42                      = 0x2a,
+	IMG_DATA_FORMAT_RESERVED_43                      = 0x2b,
+	IMG_DATA_FORMAT_FMASK8_S2_F1                     = 0x2c,
+	IMG_DATA_FORMAT_FMASK8_S4_F1                     = 0x2d,
+	IMG_DATA_FORMAT_FMASK8_S8_F1                     = 0x2e,
+	IMG_DATA_FORMAT_FMASK8_S2_F2                     = 0x2f,
+	IMG_DATA_FORMAT_FMASK8_S4_F2                     = 0x30,
+	IMG_DATA_FORMAT_FMASK8_S4_F4                     = 0x31,
+	IMG_DATA_FORMAT_FMASK16_S16_F1                   = 0x32,
+	IMG_DATA_FORMAT_FMASK16_S8_F2                    = 0x33,
+	IMG_DATA_FORMAT_FMASK32_S16_F2                   = 0x34,
+	IMG_DATA_FORMAT_FMASK32_S8_F4                    = 0x35,
+	IMG_DATA_FORMAT_FMASK32_S8_F8                    = 0x36,
+	IMG_DATA_FORMAT_FMASK64_S16_F4                   = 0x37,
+	IMG_DATA_FORMAT_FMASK64_S16_F8                   = 0x38,
+	IMG_DATA_FORMAT_4_4                              = 0x39,
+	IMG_DATA_FORMAT_6_5_5                            = 0x3a,
+	IMG_DATA_FORMAT_1                                = 0x3b,
+	IMG_DATA_FORMAT_1_REVERSED                       = 0x3c,
+	IMG_DATA_FORMAT_32_AS_8                          = 0x3d,
+	IMG_DATA_FORMAT_32_AS_8_8                        = 0x3e,
+	IMG_DATA_FORMAT_32_AS_32_32_32_32                = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+	BUF_NUM_FORMAT_UNORM                             = 0x0,
+	BUF_NUM_FORMAT_SNORM                             = 0x1,
+	BUF_NUM_FORMAT_USCALED                           = 0x2,
+	BUF_NUM_FORMAT_SSCALED                           = 0x3,
+	BUF_NUM_FORMAT_UINT                              = 0x4,
+	BUF_NUM_FORMAT_SINT                              = 0x5,
+	BUF_NUM_FORMAT_RESERVED_6                        = 0x6,
+	BUF_NUM_FORMAT_FLOAT                             = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+	IMG_NUM_FORMAT_UNORM                             = 0x0,
+	IMG_NUM_FORMAT_SNORM                             = 0x1,
+	IMG_NUM_FORMAT_USCALED                           = 0x2,
+	IMG_NUM_FORMAT_SSCALED                           = 0x3,
+	IMG_NUM_FORMAT_UINT                              = 0x4,
+	IMG_NUM_FORMAT_SINT                              = 0x5,
+	IMG_NUM_FORMAT_RESERVED_6                        = 0x6,
+	IMG_NUM_FORMAT_FLOAT                             = 0x7,
+	IMG_NUM_FORMAT_RESERVED_8                        = 0x8,
+	IMG_NUM_FORMAT_SRGB                              = 0x9,
+	IMG_NUM_FORMAT_RESERVED_10                       = 0xa,
+	IMG_NUM_FORMAT_RESERVED_11                       = 0xb,
+	IMG_NUM_FORMAT_RESERVED_12                       = 0xc,
+	IMG_NUM_FORMAT_RESERVED_13                       = 0xd,
+	IMG_NUM_FORMAT_RESERVED_14                       = 0xe,
+	IMG_NUM_FORMAT_RESERVED_15                       = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+	ARRAY_COLOR_TILE                                 = 0x0,
+	ARRAY_DEPTH_TILE                                 = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+	ADDR_SURF_MICRO_TILING_DISPLAY                   = 0x0,
+	ADDR_SURF_MICRO_TILING_NON_DISPLAY               = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+	ADDR_SURF_DISPLAY_MICRO_TILING                   = 0x0,
+	ADDR_SURF_THIN_MICRO_TILING                      = 0x1,
+	ADDR_SURF_DEPTH_MICRO_TILING                     = 0x2,
+	ADDR_SURF_ROTATED_MICRO_TILING                   = 0x3,
+	ADDR_SURF_THICK_MICRO_TILING                     = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+	ADDR_SURF_TILE_SPLIT_64B                         = 0x0,
+	ADDR_SURF_TILE_SPLIT_128B                        = 0x1,
+	ADDR_SURF_TILE_SPLIT_256B                        = 0x2,
+	ADDR_SURF_TILE_SPLIT_512B                        = 0x3,
+	ADDR_SURF_TILE_SPLIT_1KB                         = 0x4,
+	ADDR_SURF_TILE_SPLIT_2KB                         = 0x5,
+	ADDR_SURF_TILE_SPLIT_4KB                         = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+	ADDR_SURF_SAMPLE_SPLIT_1                         = 0x0,
+	ADDR_SURF_SAMPLE_SPLIT_2                         = 0x1,
+	ADDR_SURF_SAMPLE_SPLIT_4                         = 0x2,
+	ADDR_SURF_SAMPLE_SPLIT_8                         = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+	ADDR_SURF_P2                                     = 0x0,
+	ADDR_SURF_P2_RESERVED0                           = 0x1,
+	ADDR_SURF_P2_RESERVED1                           = 0x2,
+	ADDR_SURF_P2_RESERVED2                           = 0x3,
+	ADDR_SURF_P4_8x16                                = 0x4,
+	ADDR_SURF_P4_16x16                               = 0x5,
+	ADDR_SURF_P4_16x32                               = 0x6,
+	ADDR_SURF_P4_32x32                               = 0x7,
+	ADDR_SURF_P8_16x16_8x16                          = 0x8,
+	ADDR_SURF_P8_16x32_8x16                          = 0x9,
+	ADDR_SURF_P8_32x32_8x16                          = 0xa,
+	ADDR_SURF_P8_16x32_16x16                         = 0xb,
+	ADDR_SURF_P8_32x32_16x16                         = 0xc,
+	ADDR_SURF_P8_32x32_16x32                         = 0xd,
+	ADDR_SURF_P8_32x64_32x32                         = 0xe,
+	ADDR_SURF_P8_RESERVED0                           = 0xf,
+	ADDR_SURF_P16_32x32_8x16                         = 0x10,
+	ADDR_SURF_P16_32x32_16x16                        = 0x11,
+} PipeConfig;
+typedef enum NumBanks {
+	ADDR_SURF_2_BANK                                 = 0x0,
+	ADDR_SURF_4_BANK                                 = 0x1,
+	ADDR_SURF_8_BANK                                 = 0x2,
+	ADDR_SURF_16_BANK                                = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+	ADDR_SURF_BANK_WIDTH_1                           = 0x0,
+	ADDR_SURF_BANK_WIDTH_2                           = 0x1,
+	ADDR_SURF_BANK_WIDTH_4                           = 0x2,
+	ADDR_SURF_BANK_WIDTH_8                           = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+	ADDR_SURF_BANK_HEIGHT_1                          = 0x0,
+	ADDR_SURF_BANK_HEIGHT_2                          = 0x1,
+	ADDR_SURF_BANK_HEIGHT_4                          = 0x2,
+	ADDR_SURF_BANK_HEIGHT_8                          = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+	ADDR_SURF_BANK_WH_1                              = 0x0,
+	ADDR_SURF_BANK_WH_2                              = 0x1,
+	ADDR_SURF_BANK_WH_4                              = 0x2,
+	ADDR_SURF_BANK_WH_8                              = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+	ADDR_SURF_MACRO_ASPECT_1                         = 0x0,
+	ADDR_SURF_MACRO_ASPECT_2                         = 0x1,
+	ADDR_SURF_MACRO_ASPECT_4                         = 0x2,
+	ADDR_SURF_MACRO_ASPECT_8                         = 0x3,
+} MacroTileAspect;
+typedef enum GATCL1RequestType {
+	GATCL1_TYPE_NORMAL                               = 0x0,
+	GATCL1_TYPE_SHOOTDOWN                            = 0x1,
+	GATCL1_TYPE_BYPASS                               = 0x2,
+} GATCL1RequestType;
+typedef enum TCC_CACHE_POLICIES {
+	TCC_CACHE_POLICY_LRU                             = 0x0,
+	TCC_CACHE_POLICY_STREAM                          = 0x1,
+} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+	MTYPE_NC_NV                                      = 0x0,
+	MTYPE_NC                                         = 0x1,
+	MTYPE_CC                                         = 0x2,
+	MTYPE_UC                                         = 0x3,
+} MTYPE;
+typedef enum PERFMON_COUNTER_MODE {
+	PERFMON_COUNTER_MODE_ACCUM                       = 0x0,
+	PERFMON_COUNTER_MODE_ACTIVE_CYCLES               = 0x1,
+	PERFMON_COUNTER_MODE_MAX                         = 0x2,
+	PERFMON_COUNTER_MODE_DIRTY                       = 0x3,
+	PERFMON_COUNTER_MODE_SAMPLE                      = 0x4,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT    = 0x5,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT     = 0x6,
+	PERFMON_COUNTER_MODE_CYCLES_GE_HI                = 0x7,
+	PERFMON_COUNTER_MODE_CYCLES_EQ_HI                = 0x8,
+	PERFMON_COUNTER_MODE_INACTIVE_CYCLES             = 0x9,
+	PERFMON_COUNTER_MODE_RESERVED                    = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+	PERFMON_SPM_MODE_OFF                             = 0x0,
+	PERFMON_SPM_MODE_16BIT_CLAMP                     = 0x1,
+	PERFMON_SPM_MODE_16BIT_NO_CLAMP                  = 0x2,
+	PERFMON_SPM_MODE_32BIT_CLAMP                     = 0x3,
+	PERFMON_SPM_MODE_32BIT_NO_CLAMP                  = 0x4,
+	PERFMON_SPM_MODE_RESERVED_5                      = 0x5,
+	PERFMON_SPM_MODE_RESERVED_6                      = 0x6,
+	PERFMON_SPM_MODE_RESERVED_7                      = 0x7,
+	PERFMON_SPM_MODE_TEST_MODE_0                     = 0x8,
+	PERFMON_SPM_MODE_TEST_MODE_1                     = 0x9,
+	PERFMON_SPM_MODE_TEST_MODE_2                     = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+	ARRAY_LINEAR                                     = 0x0,
+	ARRAY_TILED                                      = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+	ARRAY_1D                                         = 0x0,
+	ARRAY_2D                                         = 0x1,
+	ARRAY_3D                                         = 0x2,
+	ARRAY_3D_SLICE                                   = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+	ARRAY_2D_ALT_COLOR                               = 0x0,
+	ARRAY_2D_COLOR                                   = 0x1,
+	ARRAY_3D_SLICE_COLOR                             = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+	ARRAY_2D_ALT_DEPTH                               = 0x0,
+	ARRAY_2D_DEPTH                                   = 0x1,
+} DepthArray;
+typedef enum ENUM_NUM_SIMD_PER_CU {
+	NUM_SIMD_PER_CU                                  = 0x4,
+} ENUM_NUM_SIMD_PER_CU;
+typedef enum MEM_PWR_FORCE_CTRL {
+	NO_FORCE_REQUEST                                 = 0x0,
+	FORCE_LIGHT_SLEEP_REQUEST                        = 0x1,
+	FORCE_DEEP_SLEEP_REQUEST                         = 0x2,
+	FORCE_SHUT_DOWN_REQUEST                          = 0x3,
+} MEM_PWR_FORCE_CTRL;
+typedef enum MEM_PWR_FORCE_CTRL2 {
+	NO_FORCE_REQ                                     = 0x0,
+	FORCE_LIGHT_SLEEP_REQ                            = 0x1,
+} MEM_PWR_FORCE_CTRL2;
+typedef enum MEM_PWR_DIS_CTRL {
+	ENABLE_MEM_PWR_CTRL                              = 0x0,
+	DISABLE_MEM_PWR_CTRL                             = 0x1,
+} MEM_PWR_DIS_CTRL;
+typedef enum MEM_PWR_SEL_CTRL {
+	DYNAMIC_SHUT_DOWN_ENABLE                         = 0x0,
+	DYNAMIC_DEEP_SLEEP_ENABLE                        = 0x1,
+	DYNAMIC_LIGHT_SLEEP_ENABLE                       = 0x2,
+} MEM_PWR_SEL_CTRL;
+typedef enum MEM_PWR_SEL_CTRL2 {
+	DYNAMIC_DEEP_SLEEP_EN                            = 0x0,
+	DYNAMIC_LIGHT_SLEEP_EN                           = 0x1,
+} MEM_PWR_SEL_CTRL2;
+
+#endif /* SMU_7_1_3_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
new file mode 100644
index 0000000..1ede9e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -0,0 +1,6080 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_SH_MASK_H
+#define SMU_7_1_3_SH_MASK_H
+
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define GCK_MCLK_FUSES__StartupMClkDid_MASK 0x7f
+#define GCK_MCLK_FUSES__StartupMClkDid__SHIFT 0x0
+#define GCK_MCLK_FUSES__MClkADCA_MASK 0x780
+#define GCK_MCLK_FUSES__MClkADCA__SHIFT 0x7
+#define GCK_MCLK_FUSES__MClkDDCA_MASK 0x1800
+#define GCK_MCLK_FUSES__MClkDDCA__SHIFT 0xb
+#define GCK_MCLK_FUSES__MClkDiDtWait_MASK 0xe000
+#define GCK_MCLK_FUSES__MClkDiDtWait__SHIFT 0xd
+#define GCK_MCLK_FUSES__MClkDiDtFloor_MASK 0x30000
+#define GCK_MCLK_FUSES__MClkDiDtFloor__SHIFT 0x10
+#define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f
+#define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1
+#define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f
+#define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1
+#define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f
+#define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1
+#define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f
+#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_CNTL__MCLK_DIVIDER_MASK 0x7f
+#define CG_MCLK_CNTL__MCLK_DIVIDER__SHIFT 0x0
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_STATUS__MCLK_STATUS_MASK 0x1
+#define CG_MCLK_STATUS__MCLK_STATUS__SHIFT 0x0
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK_MASK 0x2000
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK__SHIFT 0xd
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x7f00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x14
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0xe00
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV_MASK 0x7f000
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1
+#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2
+#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8
+#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3
+#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10
+#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc
+#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000
+#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15
+#define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000
+#define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16
+#define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000
+#define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18
+#define CG_CLKPIN_CNTL_DC__OSC_EN_MASK 0x1
+#define CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT 0x0
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN_MASK 0x6
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN__SHIFT 0x1
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN_MASK 0x200
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN__SHIFT 0x9
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK 0x1c00
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT 0xa
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0
+#define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00
+#define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa
+#define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000
+#define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b
+#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8_MASK 0x100
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8__SHIFT 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9_MASK 0x200
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9__SHIFT 0x9
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10_MASK 0x400
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10__SHIFT 0xa
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11_MASK 0x800
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11__SHIFT 0xb
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12_MASK 0x1000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12__SHIFT 0xc
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13_MASK 0x2000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13__SHIFT 0xd
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14_MASK 0x4000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14__SHIFT 0xe
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15_MASK 0x8000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15__SHIFT 0xf
+#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_0__SMC_RESP_MASK 0xffff
+#define SMC_RESP_0__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_1__SMC_RESP_MASK 0xffff
+#define SMC_RESP_1__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_2__SMC_RESP_MASK 0xffff
+#define SMC_RESP_2__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_3__SMC_RESP_MASK 0xffff
+#define SMC_RESP_3__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_4__SMC_RESP_MASK 0xffff
+#define SMC_RESP_4__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_5__SMC_RESP_MASK 0xffff
+#define SMC_RESP_5__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_6__SMC_RESP_MASK 0xffff
+#define SMC_RESP_6__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_7__SMC_RESP_MASK 0xffff
+#define SMC_RESP_7__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_8__SMC_RESP_MASK 0xffff
+#define SMC_RESP_8__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_9__SMC_RESP_MASK 0xffff
+#define SMC_RESP_9__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_10__SMC_RESP_MASK 0xffff
+#define SMC_RESP_10__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_11__SMC_RESP_MASK 0xffff
+#define SMC_RESP_11__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1
+#define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1
+#define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000
+#define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8
+#define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000
+#define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding_MASK 0x2
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding__SHIFT 0x1
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0
+#define SMC_PC_C__smc_pc_c_MASK 0xffffffff
+#define SMC_PC_C__smc_pc_c__SHIFT 0x0
+#define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff
+#define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x1
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0xf
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0xf0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x4
+#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffff
+#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
+#define GPIOPAD_A__GPIO_A_MASK 0x7fffffff
+#define GPIOPAD_A__GPIO_A__SHIFT 0x0
+#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffff
+#define GPIOPAD_EN__GPIO_EN__SHIFT 0x0
+#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffff
+#define GPIOPAD_Y__GPIO_Y__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x20
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x40
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x80
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x100
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x200
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x400
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x800
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x1000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x2000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x4000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x8000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x10000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x20000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x40000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x80000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x100000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x200000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x400000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x800000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x1000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x2000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x4000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x8000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x20
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x40
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x80
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x100
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x200
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x400
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x800
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x1000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x2000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x4000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x8000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x10000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x20000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x40000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x80000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x100000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x200000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x400000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x800000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x1000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x2000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x4000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x8000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
+#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffff
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffff
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x0
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x20
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x5
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x40
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x6
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffff
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x0
+#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffff
+#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
+#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffff
+#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
+#define CG_FPS_CNT__FPS_CNT_MASK 0xffffffff
+#define CG_FPS_CNT__FPS_CNT__SHIFT 0x0
+#define SMU_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1
+#define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4
+#define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid_MASK 0x8
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid__SHIFT 0x3
+#define RCU_UC_EVENTS__TP_Tester_MASK 0x40
+#define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6
+#define RCU_UC_EVENTS__boot_seq_done_MASK 0x80
+#define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa
+#define RCU_UC_EVENTS__FCH_HALT_MASK 0x800
+#define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13
+#define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000
+#define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8
+#define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000
+#define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10
+#define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000
+#define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11
+#define RCU_MISC_CTRL__SAMU_START_MASK 0x400000
+#define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17
+#define RCU_VIRT_RESET_REQ__VF_MASK 0xffff
+#define RCU_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define RCU_VIRT_RESET_REQ__PF_MASK 0x80000000
+#define RCU_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define CC_RCU_FUSES__GPU_DIS_MASK 0x2
+#define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1
+#define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4
+#define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5
+#define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40
+#define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6
+#define CC_RCU_FUSES__ROM_DIS_MASK 0x80
+#define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7
+#define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100
+#define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8
+#define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200
+#define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9
+#define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400
+#define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x4000
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xe
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x8000
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0xf
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x10000
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x10
+#define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x20000
+#define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x11
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x40000
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x12
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x80000
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x13
+#define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x200000
+#define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x15
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x400000
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x16
+#define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x800000
+#define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x17
+#define CC_RCU_FUSES__WRP_FUSE_VALID_MASK 0x1000000
+#define CC_RCU_FUSES__WRP_FUSE_VALID__SHIFT 0x18
+#define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x2000000
+#define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0x19
+#define CC_RCU_FUSES__RCU_SPARE_MASK 0xfc000000
+#define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x1a
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1
+#define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc
+#define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2
+#define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600
+#define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17
+#define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000
+#define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c
+#define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000
+#define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d
+#define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff
+#define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0
+#define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00
+#define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8
+#define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000
+#define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10
+#define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000
+#define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1
+#define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3e
+#define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40
+#define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a
+#define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0
+#define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14
+#define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000
+#define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18
+#define CC_TST_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
+#define CC_TST_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1
+#define CC_HARVEST_FUSES__VCE_DISABLE_MASK 0x6
+#define CC_HARVEST_FUSES__VCE_DISABLE__SHIFT 0x1
+#define CC_HARVEST_FUSES__UVD_DISABLE_MASK 0x10
+#define CC_HARVEST_FUSES__UVD_DISABLE__SHIFT 0x4
+#define CC_HARVEST_FUSES__ACP_DISABLE_MASK 0x40
+#define CC_HARVEST_FUSES__ACP_DISABLE__SHIFT 0x6
+#define CC_HARVEST_FUSES__DC_DISABLE_MASK 0x3f00
+#define CC_HARVEST_FUSES__DC_DISABLE__SHIFT 0x8
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0
+#define SMU_STATUS__SMU_DONE_MASK 0x1
+#define SMU_STATUS__SMU_DONE__SHIFT 0x0
+#define SMU_STATUS__SMU_PASS_MASK 0x2
+#define SMU_STATUS__SMU_PASS__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0
+#define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6
+#define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4
+#define SMU_FIRMWARE__SMU_counter_MASK 0xf00
+#define SMU_FIRMWARE__SMU_counter__SHIFT 0x8
+#define SMU_FIRMWARE__SMU_MODE_MASK 0x10000
+#define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10
+#define SMU_FIRMWARE__SMU_SEL_MASK 0x20000
+#define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11
+#define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff
+#define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0
+#define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000
+#define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f
+#define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff
+#define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe
+#define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000
+#define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18
+#define TDC_STATUS__VDD_Boost_MASK 0xff
+#define TDC_STATUS__VDD_Boost__SHIFT 0x0
+#define TDC_STATUS__VDD_Throttle_MASK 0xff00
+#define TDC_STATUS__VDD_Throttle__SHIFT 0x8
+#define TDC_STATUS__VDDC_Boost_MASK 0xff0000
+#define TDC_STATUS__VDDC_Boost__SHIFT 0x10
+#define TDC_STATUS__VDDC_Throttle_MASK 0xff000000
+#define TDC_STATUS__VDDC_Throttle__SHIFT 0x18
+#define TDC_MV_AVERAGE__IDD_MASK 0xffff
+#define TDC_MV_AVERAGE__IDD__SHIFT 0x0
+#define TDC_MV_AVERAGE__IDDC_MASK 0xffff0000
+#define TDC_MV_AVERAGE__IDDC__SHIFT 0x10
+#define TDC_VRM_LIMIT__IDD_MASK 0xffff
+#define TDC_VRM_LIMIT__IDD__SHIFT 0x0
+#define TDC_VRM_LIMIT__IDDC_MASK 0xffff0000
+#define TDC_VRM_LIMIT__IDDC__SHIFT 0x10
+#define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1
+#define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0
+#define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2
+#define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1
+#define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4
+#define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2
+#define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8
+#define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3
+#define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10
+#define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4
+#define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x20
+#define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x5
+#define FEATURE_STATUS__ACP_DPM_ON_MASK 0x40
+#define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x6
+#define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80
+#define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7
+#define FEATURE_STATUS__BAPM_ON_MASK 0x100
+#define FEATURE_STATUS__BAPM_ON__SHIFT 0x8
+#define FEATURE_STATUS__LPMX_ON_MASK 0x200
+#define FEATURE_STATUS__LPMX_ON__SHIFT 0x9
+#define FEATURE_STATUS__NBDPM_ON_MASK 0x400
+#define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa
+#define FEATURE_STATUS__LHTC_ON_MASK 0x800
+#define FEATURE_STATUS__LHTC_ON__SHIFT 0xb
+#define FEATURE_STATUS__VPC_ON_MASK 0x1000
+#define FEATURE_STATUS__VPC_ON__SHIFT 0xc
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd
+#define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000
+#define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe
+#define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000
+#define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf
+#define FEATURE_STATUS__AVS_ON_MASK 0x10000
+#define FEATURE_STATUS__AVS_ON__SHIFT 0x10
+#define FEATURE_STATUS__SPMI_ON_MASK 0x20000
+#define FEATURE_STATUS__SPMI_ON__SHIFT 0x11
+#define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000
+#define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12
+#define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000
+#define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13
+#define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000
+#define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14
+#define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000
+#define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15
+#define FEATURE_STATUS__RESERVED_MASK 0xffc00000
+#define FEATURE_STATUS__RESERVED__SHIFT 0x16
+#define ENTITY_TEMPERATURES_1__GPU_MASK 0xffffffff
+#define ENTITY_TEMPERATURES_1__GPU__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime__SHIFT 0x18
+#define DPM_TABLE_1__GraphicsPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_1__GraphicsPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_7__GraphicsPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_7__GraphicsPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_9__GraphicsPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_9__GraphicsPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_10__MemoryPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_10__MemoryPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_15__MemoryPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_15__MemoryPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_16__MemoryPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_16__MemoryPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_18__MemoryPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_18__MemoryPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_19__LinkPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_19__LinkPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_22__LinkPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_22__LinkPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_23__LinkPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_23__LinkPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_24__LinkPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_24__LinkPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_25__LinkPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_25__LinkPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_27__LinkPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_27__LinkPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_28__SystemFlags_MASK 0xffffffff
+#define DPM_TABLE_28__SystemFlags__SHIFT 0x0
+#define DPM_TABLE_29__VRConfig_MASK 0xffffffff
+#define DPM_TABLE_29__VRConfig__SHIFT 0x0
+#define DPM_TABLE_30__SmioMask1_MASK 0xffffffff
+#define DPM_TABLE_30__SmioMask1__SHIFT 0x0
+#define DPM_TABLE_31__SmioMask2_MASK 0xffffffff
+#define DPM_TABLE_31__SmioMask2__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_40__VddcLevelCount_MASK 0xffffffff
+#define DPM_TABLE_40__VddcLevelCount__SHIFT 0x0
+#define DPM_TABLE_41__VddciLevelCount_MASK 0xffffffff
+#define DPM_TABLE_41__VddciLevelCount__SHIFT 0x0
+#define DPM_TABLE_42__VddGfxLevelCount_MASK 0xffffffff
+#define DPM_TABLE_42__VddGfxLevelCount__SHIFT 0x0
+#define DPM_TABLE_43__MvddLevelCount_MASK 0xffffffff
+#define DPM_TABLE_43__MvddLevelCount__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_1_MASK 0xffff
+#define DPM_TABLE_44__VddcTable_1__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_0_MASK 0xffff0000
+#define DPM_TABLE_44__VddcTable_0__SHIFT 0x10
+#define DPM_TABLE_45__VddcTable_3_MASK 0xffff
+#define DPM_TABLE_45__VddcTable_3__SHIFT 0x0
+#define DPM_TABLE_45__VddcTable_2_MASK 0xffff0000
+#define DPM_TABLE_45__VddcTable_2__SHIFT 0x10
+#define DPM_TABLE_46__VddcTable_5_MASK 0xffff
+#define DPM_TABLE_46__VddcTable_5__SHIFT 0x0
+#define DPM_TABLE_46__VddcTable_4_MASK 0xffff0000
+#define DPM_TABLE_46__VddcTable_4__SHIFT 0x10
+#define DPM_TABLE_47__VddcTable_7_MASK 0xffff
+#define DPM_TABLE_47__VddcTable_7__SHIFT 0x0
+#define DPM_TABLE_47__VddcTable_6_MASK 0xffff0000
+#define DPM_TABLE_47__VddcTable_6__SHIFT 0x10
+#define DPM_TABLE_48__VddcTable_9_MASK 0xffff
+#define DPM_TABLE_48__VddcTable_9__SHIFT 0x0
+#define DPM_TABLE_48__VddcTable_8_MASK 0xffff0000
+#define DPM_TABLE_48__VddcTable_8__SHIFT 0x10
+#define DPM_TABLE_49__VddcTable_11_MASK 0xffff
+#define DPM_TABLE_49__VddcTable_11__SHIFT 0x0
+#define DPM_TABLE_49__VddcTable_10_MASK 0xffff0000
+#define DPM_TABLE_49__VddcTable_10__SHIFT 0x10
+#define DPM_TABLE_50__VddcTable_13_MASK 0xffff
+#define DPM_TABLE_50__VddcTable_13__SHIFT 0x0
+#define DPM_TABLE_50__VddcTable_12_MASK 0xffff0000
+#define DPM_TABLE_50__VddcTable_12__SHIFT 0x10
+#define DPM_TABLE_51__VddcTable_15_MASK 0xffff
+#define DPM_TABLE_51__VddcTable_15__SHIFT 0x0
+#define DPM_TABLE_51__VddcTable_14_MASK 0xffff0000
+#define DPM_TABLE_51__VddcTable_14__SHIFT 0x10
+#define DPM_TABLE_52__VddGfxTable_1_MASK 0xffff
+#define DPM_TABLE_52__VddGfxTable_1__SHIFT 0x0
+#define DPM_TABLE_52__VddGfxTable_0_MASK 0xffff0000
+#define DPM_TABLE_52__VddGfxTable_0__SHIFT 0x10
+#define DPM_TABLE_53__VddGfxTable_3_MASK 0xffff
+#define DPM_TABLE_53__VddGfxTable_3__SHIFT 0x0
+#define DPM_TABLE_53__VddGfxTable_2_MASK 0xffff0000
+#define DPM_TABLE_53__VddGfxTable_2__SHIFT 0x10
+#define DPM_TABLE_54__VddGfxTable_5_MASK 0xffff
+#define DPM_TABLE_54__VddGfxTable_5__SHIFT 0x0
+#define DPM_TABLE_54__VddGfxTable_4_MASK 0xffff0000
+#define DPM_TABLE_54__VddGfxTable_4__SHIFT 0x10
+#define DPM_TABLE_55__VddGfxTable_7_MASK 0xffff
+#define DPM_TABLE_55__VddGfxTable_7__SHIFT 0x0
+#define DPM_TABLE_55__VddGfxTable_6_MASK 0xffff0000
+#define DPM_TABLE_55__VddGfxTable_6__SHIFT 0x10
+#define DPM_TABLE_56__VddGfxTable_9_MASK 0xffff
+#define DPM_TABLE_56__VddGfxTable_9__SHIFT 0x0
+#define DPM_TABLE_56__VddGfxTable_8_MASK 0xffff0000
+#define DPM_TABLE_56__VddGfxTable_8__SHIFT 0x10
+#define DPM_TABLE_57__VddGfxTable_11_MASK 0xffff
+#define DPM_TABLE_57__VddGfxTable_11__SHIFT 0x0
+#define DPM_TABLE_57__VddGfxTable_10_MASK 0xffff0000
+#define DPM_TABLE_57__VddGfxTable_10__SHIFT 0x10
+#define DPM_TABLE_58__VddGfxTable_13_MASK 0xffff
+#define DPM_TABLE_58__VddGfxTable_13__SHIFT 0x0
+#define DPM_TABLE_58__VddGfxTable_12_MASK 0xffff0000
+#define DPM_TABLE_58__VddGfxTable_12__SHIFT 0x10
+#define DPM_TABLE_59__VddGfxTable_15_MASK 0xffff
+#define DPM_TABLE_59__VddGfxTable_15__SHIFT 0x0
+#define DPM_TABLE_59__VddGfxTable_14_MASK 0xffff0000
+#define DPM_TABLE_59__VddGfxTable_14__SHIFT 0x10
+#define DPM_TABLE_60__VddciTable_1_MASK 0xffff
+#define DPM_TABLE_60__VddciTable_1__SHIFT 0x0
+#define DPM_TABLE_60__VddciTable_0_MASK 0xffff0000
+#define DPM_TABLE_60__VddciTable_0__SHIFT 0x10
+#define DPM_TABLE_61__VddciTable_3_MASK 0xffff
+#define DPM_TABLE_61__VddciTable_3__SHIFT 0x0
+#define DPM_TABLE_61__VddciTable_2_MASK 0xffff0000
+#define DPM_TABLE_61__VddciTable_2__SHIFT 0x10
+#define DPM_TABLE_62__VddciTable_5_MASK 0xffff
+#define DPM_TABLE_62__VddciTable_5__SHIFT 0x0
+#define DPM_TABLE_62__VddciTable_4_MASK 0xffff0000
+#define DPM_TABLE_62__VddciTable_4__SHIFT 0x10
+#define DPM_TABLE_63__VddciTable_7_MASK 0xffff
+#define DPM_TABLE_63__VddciTable_7__SHIFT 0x0
+#define DPM_TABLE_63__VddciTable_6_MASK 0xffff0000
+#define DPM_TABLE_63__VddciTable_6__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_88__MasterDeepSleepControl_MASK 0xff
+#define DPM_TABLE_88__MasterDeepSleepControl__SHIFT 0x0
+#define DPM_TABLE_88__LinkLevelCount_MASK 0xff00
+#define DPM_TABLE_88__LinkLevelCount__SHIFT 0x8
+#define DPM_TABLE_88__MemoryDpmLevelCount_MASK 0xff0000
+#define DPM_TABLE_88__MemoryDpmLevelCount__SHIFT 0x10
+#define DPM_TABLE_88__GraphicsDpmLevelCount_MASK 0xff000000
+#define DPM_TABLE_88__GraphicsDpmLevelCount__SHIFT 0x18
+#define DPM_TABLE_89__SamuLevelCount_MASK 0xff
+#define DPM_TABLE_89__SamuLevelCount__SHIFT 0x0
+#define DPM_TABLE_89__AcpLevelCount_MASK 0xff00
+#define DPM_TABLE_89__AcpLevelCount__SHIFT 0x8
+#define DPM_TABLE_89__VceLevelCount_MASK 0xff0000
+#define DPM_TABLE_89__VceLevelCount__SHIFT 0x10
+#define DPM_TABLE_89__UvdLevelCount_MASK 0xff000000
+#define DPM_TABLE_89__UvdLevelCount__SHIFT 0x18
+#define DPM_TABLE_90__Reserved_0_MASK 0xff
+#define DPM_TABLE_90__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_90__ThermOutMode_MASK 0xff00
+#define DPM_TABLE_90__ThermOutMode__SHIFT 0x8
+#define DPM_TABLE_90__ThermOutPolarity_MASK 0xff0000
+#define DPM_TABLE_90__ThermOutPolarity__SHIFT 0x10
+#define DPM_TABLE_90__ThermOutGpio_MASK 0xff000000
+#define DPM_TABLE_90__ThermOutGpio__SHIFT 0x18
+#define DPM_TABLE_91__Reserved_0_MASK 0xffffffff
+#define DPM_TABLE_91__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_92__Reserved_1_MASK 0xffffffff
+#define DPM_TABLE_92__Reserved_1__SHIFT 0x0
+#define DPM_TABLE_93__Reserved_2_MASK 0xffffffff
+#define DPM_TABLE_93__Reserved_2__SHIFT 0x0
+#define DPM_TABLE_94__Reserved_3_MASK 0xffffffff
+#define DPM_TABLE_94__Reserved_3__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid_MASK 0xff000000
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid__SHIFT 0x18
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle_MASK 0xff
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid_MASK 0xff000000
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid__SHIFT 0x18
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle_MASK 0xff
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid_MASK 0xff000000
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid__SHIFT 0x18
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle_MASK 0xff
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid_MASK 0xff000000
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid__SHIFT 0x18
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle_MASK 0xff
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid_MASK 0xff000000
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid__SHIFT 0x18
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle_MASK 0xff
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst_MASK 0xff0000
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst__SHIFT 0x10
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst_MASK 0xff000000
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst__SHIFT 0x18
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid_MASK 0xff000000
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid__SHIFT 0x18
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle_MASK 0xff
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst_MASK 0xff0000
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst__SHIFT 0x10
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst_MASK 0xff000000
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst__SHIFT 0x18
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid_MASK 0xff000000
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid__SHIFT 0x18
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle_MASK 0xff
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst_MASK 0xff0000
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst__SHIFT 0x10
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst_MASK 0xff000000
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst__SHIFT 0x18
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid_MASK 0xff000000
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid__SHIFT 0x18
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle_MASK 0xff
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst_MASK 0xff0000
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst__SHIFT 0x10
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst_MASK 0xff000000
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst__SHIFT 0x18
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange_MASK 0xff0000
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange__SHIFT 0x10
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_187__MemoryACPILevel_padding_MASK 0xff
+#define DPM_TABLE_187__MemoryACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst_MASK 0xff0000
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst__SHIFT 0x10
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst_MASK 0xff000000
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst__SHIFT 0x18
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider_MASK 0xff
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange_MASK 0xff0000
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange__SHIFT 0x10
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_193__MemoryLevel_0_padding_MASK 0xff
+#define DPM_TABLE_193__MemoryLevel_0_padding__SHIFT 0x0
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider_MASK 0xff
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange_MASK 0xff0000
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange__SHIFT 0x10
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_199__MemoryLevel_1_padding_MASK 0xff
+#define DPM_TABLE_199__MemoryLevel_1_padding__SHIFT 0x0
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider_MASK 0xff
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange_MASK 0xff0000
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange__SHIFT 0x10
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_205__MemoryLevel_2_padding_MASK 0xff
+#define DPM_TABLE_205__MemoryLevel_2_padding__SHIFT 0x0
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider_MASK 0xff
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange_MASK 0xff0000
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange__SHIFT 0x10
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_211__MemoryLevel_3_padding_MASK 0xff
+#define DPM_TABLE_211__MemoryLevel_3_padding__SHIFT 0x0
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider_MASK 0xff
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_SPC_MASK 0xff
+#define DPM_TABLE_213__LinkLevel_0_SPC__SHIFT 0x0
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_216__LinkLevel_0_Reserved_MASK 0xffffffff
+#define DPM_TABLE_216__LinkLevel_0_Reserved__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_SPC_MASK 0xff
+#define DPM_TABLE_217__LinkLevel_1_SPC__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_220__LinkLevel_1_Reserved_MASK 0xffffffff
+#define DPM_TABLE_220__LinkLevel_1_Reserved__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_SPC_MASK 0xff
+#define DPM_TABLE_221__LinkLevel_2_SPC__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_224__LinkLevel_2_Reserved_MASK 0xffffffff
+#define DPM_TABLE_224__LinkLevel_2_Reserved__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_SPC_MASK 0xff
+#define DPM_TABLE_225__LinkLevel_3_SPC__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_228__LinkLevel_3_Reserved_MASK 0xffffffff
+#define DPM_TABLE_228__LinkLevel_3_Reserved__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_SPC_MASK 0xff
+#define DPM_TABLE_229__LinkLevel_4_SPC__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_232__LinkLevel_4_Reserved_MASK 0xffffffff
+#define DPM_TABLE_232__LinkLevel_4_Reserved__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_SPC_MASK 0xff
+#define DPM_TABLE_233__LinkLevel_5_SPC__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_236__LinkLevel_5_Reserved_MASK 0xffffffff
+#define DPM_TABLE_236__LinkLevel_5_Reserved__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_SPC_MASK 0xff
+#define DPM_TABLE_237__LinkLevel_6_SPC__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_240__LinkLevel_6_Reserved_MASK 0xffffffff
+#define DPM_TABLE_240__LinkLevel_6_Reserved__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_SPC_MASK 0xff
+#define DPM_TABLE_241__LinkLevel_7_SPC__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_244__LinkLevel_7_Reserved_MASK 0xffffffff
+#define DPM_TABLE_244__LinkLevel_7_Reserved__SHIFT 0x0
+#define DPM_TABLE_245__ACPILevel_Flags_MASK 0xffffffff
+#define DPM_TABLE_245__ACPILevel_Flags__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_247__ACPILevel_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_247__ACPILevel_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_padding_MASK 0xff
+#define DPM_TABLE_248__ACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId_MASK 0xff00
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId__SHIFT 0x8
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_248__ACPILevel_SclkDid_MASK 0xff000000
+#define DPM_TABLE_248__ACPILevel_SclkDid__SHIFT 0x18
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl_MASK 0xffffffff
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl__SHIFT 0x0
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2_MASK 0xffffffff
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2__SHIFT 0x0
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_260__UvdLevel_0_padding_1_MASK 0xff
+#define DPM_TABLE_260__UvdLevel_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_260__UvdLevel_0_padding_0_MASK 0xff00
+#define DPM_TABLE_260__UvdLevel_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_264__UvdLevel_1_padding_1_MASK 0xff
+#define DPM_TABLE_264__UvdLevel_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_264__UvdLevel_1_padding_0_MASK 0xff00
+#define DPM_TABLE_264__UvdLevel_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_268__UvdLevel_2_padding_1_MASK 0xff
+#define DPM_TABLE_268__UvdLevel_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_268__UvdLevel_2_padding_0_MASK 0xff00
+#define DPM_TABLE_268__UvdLevel_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_272__UvdLevel_3_padding_1_MASK 0xff
+#define DPM_TABLE_272__UvdLevel_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_272__UvdLevel_3_padding_0_MASK 0xff00
+#define DPM_TABLE_272__UvdLevel_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_276__UvdLevel_4_padding_1_MASK 0xff
+#define DPM_TABLE_276__UvdLevel_4_padding_1__SHIFT 0x0
+#define DPM_TABLE_276__UvdLevel_4_padding_0_MASK 0xff00
+#define DPM_TABLE_276__UvdLevel_4_padding_0__SHIFT 0x8
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_280__UvdLevel_5_padding_1_MASK 0xff
+#define DPM_TABLE_280__UvdLevel_5_padding_1__SHIFT 0x0
+#define DPM_TABLE_280__UvdLevel_5_padding_0_MASK 0xff00
+#define DPM_TABLE_280__UvdLevel_5_padding_0__SHIFT 0x8
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_284__UvdLevel_6_padding_1_MASK 0xff
+#define DPM_TABLE_284__UvdLevel_6_padding_1__SHIFT 0x0
+#define DPM_TABLE_284__UvdLevel_6_padding_0_MASK 0xff00
+#define DPM_TABLE_284__UvdLevel_6_padding_0__SHIFT 0x8
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_288__UvdLevel_7_padding_1_MASK 0xff
+#define DPM_TABLE_288__UvdLevel_7_padding_1__SHIFT 0x0
+#define DPM_TABLE_288__UvdLevel_7_padding_0_MASK 0xff00
+#define DPM_TABLE_288__UvdLevel_7_padding_0__SHIFT 0x8
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_289__VceLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_289__VceLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_291__VceLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_291__VceLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_291__VceLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_291__VceLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_291__VceLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_291__VceLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_291__VceLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_291__VceLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_292__VceLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_292__VceLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_294__VceLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_294__VceLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_294__VceLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_294__VceLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_294__VceLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_294__VceLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_294__VceLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_294__VceLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_295__VceLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_295__VceLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_297__VceLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_297__VceLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_297__VceLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_297__VceLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_297__VceLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_297__VceLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_297__VceLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_297__VceLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_298__VceLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_298__VceLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_300__VceLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_300__VceLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_300__VceLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_300__VceLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_300__VceLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_300__VceLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_300__VceLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_300__VceLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_301__VceLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_301__VceLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_303__VceLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_303__VceLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_303__VceLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_303__VceLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_303__VceLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_303__VceLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_303__VceLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_303__VceLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_304__VceLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_304__VceLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_306__VceLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_306__VceLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_306__VceLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_306__VceLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_306__VceLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_306__VceLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_306__VceLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_306__VceLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_307__VceLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_307__VceLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_309__VceLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_309__VceLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_309__VceLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_309__VceLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_309__VceLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_309__VceLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_309__VceLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_309__VceLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_310__VceLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_310__VceLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_312__VceLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_312__VceLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_312__VceLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_312__VceLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_312__VceLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_312__VceLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_312__VceLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_312__VceLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_313__AcpLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_313__AcpLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_315__AcpLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_315__AcpLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_315__AcpLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_315__AcpLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_315__AcpLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_315__AcpLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_315__AcpLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_315__AcpLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_316__AcpLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_316__AcpLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_318__AcpLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_318__AcpLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_318__AcpLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_318__AcpLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_318__AcpLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_318__AcpLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_318__AcpLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_318__AcpLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_319__AcpLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_319__AcpLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_321__AcpLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_321__AcpLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_321__AcpLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_321__AcpLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_321__AcpLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_321__AcpLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_321__AcpLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_321__AcpLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_322__AcpLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_322__AcpLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_324__AcpLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_324__AcpLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_324__AcpLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_324__AcpLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_324__AcpLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_324__AcpLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_324__AcpLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_324__AcpLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_325__AcpLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_325__AcpLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_327__AcpLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_327__AcpLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_327__AcpLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_327__AcpLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_327__AcpLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_327__AcpLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_327__AcpLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_327__AcpLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_328__AcpLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_328__AcpLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_330__AcpLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_330__AcpLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_330__AcpLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_330__AcpLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_330__AcpLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_330__AcpLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_330__AcpLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_330__AcpLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_331__AcpLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_331__AcpLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_333__AcpLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_333__AcpLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_333__AcpLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_333__AcpLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_333__AcpLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_333__AcpLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_333__AcpLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_333__AcpLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_334__AcpLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_334__AcpLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_336__AcpLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_336__AcpLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_336__AcpLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_336__AcpLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_336__AcpLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_336__AcpLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_336__AcpLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_336__AcpLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_337__SamuLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_337__SamuLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_339__SamuLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_339__SamuLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_339__SamuLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_339__SamuLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_339__SamuLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_339__SamuLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_339__SamuLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_339__SamuLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_340__SamuLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_340__SamuLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_342__SamuLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_342__SamuLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_342__SamuLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_342__SamuLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_342__SamuLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_342__SamuLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_342__SamuLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_342__SamuLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_343__SamuLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_343__SamuLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_345__SamuLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_345__SamuLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_345__SamuLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_345__SamuLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_345__SamuLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_345__SamuLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_345__SamuLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_345__SamuLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_346__SamuLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_346__SamuLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_348__SamuLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_348__SamuLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_348__SamuLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_348__SamuLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_348__SamuLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_348__SamuLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_348__SamuLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_348__SamuLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_349__SamuLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_349__SamuLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_351__SamuLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_351__SamuLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_351__SamuLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_351__SamuLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_351__SamuLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_351__SamuLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_351__SamuLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_351__SamuLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_352__SamuLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_352__SamuLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_354__SamuLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_354__SamuLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_354__SamuLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_354__SamuLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_354__SamuLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_354__SamuLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_354__SamuLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_354__SamuLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_355__SamuLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_355__SamuLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_357__SamuLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_357__SamuLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_357__SamuLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_357__SamuLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_357__SamuLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_357__SamuLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_357__SamuLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_357__SamuLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_358__SamuLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_358__SamuLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_360__SamuLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_360__SamuLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_360__SamuLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_360__SamuLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_360__SamuLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_360__SamuLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_360__SamuLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_360__SamuLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_361__Ulv_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_361__Ulv_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcPhase_MASK 0xff
+#define DPM_TABLE_363__Ulv_VddcPhase__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcOffsetVid_MASK 0xff00
+#define DPM_TABLE_363__Ulv_VddcOffsetVid__SHIFT 0x8
+#define DPM_TABLE_363__Ulv_VddcOffset_MASK 0xffff0000
+#define DPM_TABLE_363__Ulv_VddcOffset__SHIFT 0x10
+#define DPM_TABLE_364__Ulv_Reserved_MASK 0xffffffff
+#define DPM_TABLE_364__Ulv_Reserved__SHIFT 0x0
+#define DPM_TABLE_365__SclkStepSize_MASK 0xffffffff
+#define DPM_TABLE_365__SclkStepSize__SHIFT 0x0
+#define DPM_TABLE_366__Smio_0_MASK 0xffffffff
+#define DPM_TABLE_366__Smio_0__SHIFT 0x0
+#define DPM_TABLE_367__Smio_1_MASK 0xffffffff
+#define DPM_TABLE_367__Smio_1__SHIFT 0x0
+#define DPM_TABLE_368__Smio_2_MASK 0xffffffff
+#define DPM_TABLE_368__Smio_2__SHIFT 0x0
+#define DPM_TABLE_369__Smio_3_MASK 0xffffffff
+#define DPM_TABLE_369__Smio_3__SHIFT 0x0
+#define DPM_TABLE_370__Smio_4_MASK 0xffffffff
+#define DPM_TABLE_370__Smio_4__SHIFT 0x0
+#define DPM_TABLE_371__Smio_5_MASK 0xffffffff
+#define DPM_TABLE_371__Smio_5__SHIFT 0x0
+#define DPM_TABLE_372__Smio_6_MASK 0xffffffff
+#define DPM_TABLE_372__Smio_6__SHIFT 0x0
+#define DPM_TABLE_373__Smio_7_MASK 0xffffffff
+#define DPM_TABLE_373__Smio_7__SHIFT 0x0
+#define DPM_TABLE_374__Smio_8_MASK 0xffffffff
+#define DPM_TABLE_374__Smio_8__SHIFT 0x0
+#define DPM_TABLE_375__Smio_9_MASK 0xffffffff
+#define DPM_TABLE_375__Smio_9__SHIFT 0x0
+#define DPM_TABLE_376__Smio_10_MASK 0xffffffff
+#define DPM_TABLE_376__Smio_10__SHIFT 0x0
+#define DPM_TABLE_377__Smio_11_MASK 0xffffffff
+#define DPM_TABLE_377__Smio_11__SHIFT 0x0
+#define DPM_TABLE_378__Smio_12_MASK 0xffffffff
+#define DPM_TABLE_378__Smio_12__SHIFT 0x0
+#define DPM_TABLE_379__Smio_13_MASK 0xffffffff
+#define DPM_TABLE_379__Smio_13__SHIFT 0x0
+#define DPM_TABLE_380__Smio_14_MASK 0xffffffff
+#define DPM_TABLE_380__Smio_14__SHIFT 0x0
+#define DPM_TABLE_381__Smio_15_MASK 0xffffffff
+#define DPM_TABLE_381__Smio_15__SHIFT 0x0
+#define DPM_TABLE_382__Smio_16_MASK 0xffffffff
+#define DPM_TABLE_382__Smio_16__SHIFT 0x0
+#define DPM_TABLE_383__Smio_17_MASK 0xffffffff
+#define DPM_TABLE_383__Smio_17__SHIFT 0x0
+#define DPM_TABLE_384__Smio_18_MASK 0xffffffff
+#define DPM_TABLE_384__Smio_18__SHIFT 0x0
+#define DPM_TABLE_385__Smio_19_MASK 0xffffffff
+#define DPM_TABLE_385__Smio_19__SHIFT 0x0
+#define DPM_TABLE_386__Smio_20_MASK 0xffffffff
+#define DPM_TABLE_386__Smio_20__SHIFT 0x0
+#define DPM_TABLE_387__Smio_21_MASK 0xffffffff
+#define DPM_TABLE_387__Smio_21__SHIFT 0x0
+#define DPM_TABLE_388__Smio_22_MASK 0xffffffff
+#define DPM_TABLE_388__Smio_22__SHIFT 0x0
+#define DPM_TABLE_389__Smio_23_MASK 0xffffffff
+#define DPM_TABLE_389__Smio_23__SHIFT 0x0
+#define DPM_TABLE_390__Smio_24_MASK 0xffffffff
+#define DPM_TABLE_390__Smio_24__SHIFT 0x0
+#define DPM_TABLE_391__Smio_25_MASK 0xffffffff
+#define DPM_TABLE_391__Smio_25__SHIFT 0x0
+#define DPM_TABLE_392__Smio_26_MASK 0xffffffff
+#define DPM_TABLE_392__Smio_26__SHIFT 0x0
+#define DPM_TABLE_393__Smio_27_MASK 0xffffffff
+#define DPM_TABLE_393__Smio_27__SHIFT 0x0
+#define DPM_TABLE_394__Smio_28_MASK 0xffffffff
+#define DPM_TABLE_394__Smio_28__SHIFT 0x0
+#define DPM_TABLE_395__Smio_29_MASK 0xffffffff
+#define DPM_TABLE_395__Smio_29__SHIFT 0x0
+#define DPM_TABLE_396__Smio_30_MASK 0xffffffff
+#define DPM_TABLE_396__Smio_30__SHIFT 0x0
+#define DPM_TABLE_397__Smio_31_MASK 0xffffffff
+#define DPM_TABLE_397__Smio_31__SHIFT 0x0
+#define DPM_TABLE_398__SamuBootLevel_MASK 0xff
+#define DPM_TABLE_398__SamuBootLevel__SHIFT 0x0
+#define DPM_TABLE_398__AcpBootLevel_MASK 0xff00
+#define DPM_TABLE_398__AcpBootLevel__SHIFT 0x8
+#define DPM_TABLE_398__VceBootLevel_MASK 0xff0000
+#define DPM_TABLE_398__VceBootLevel__SHIFT 0x10
+#define DPM_TABLE_398__UvdBootLevel_MASK 0xff000000
+#define DPM_TABLE_398__UvdBootLevel__SHIFT 0x18
+#define DPM_TABLE_399__GraphicsInterval_MASK 0xff
+#define DPM_TABLE_399__GraphicsInterval__SHIFT 0x0
+#define DPM_TABLE_399__GraphicsThermThrottleEnable_MASK 0xff00
+#define DPM_TABLE_399__GraphicsThermThrottleEnable__SHIFT 0x8
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable_MASK 0xff0000
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable__SHIFT 0x10
+#define DPM_TABLE_399__GraphicsBootLevel_MASK 0xff000000
+#define DPM_TABLE_399__GraphicsBootLevel__SHIFT 0x18
+#define DPM_TABLE_400__TemperatureLimitHigh_MASK 0xffff
+#define DPM_TABLE_400__TemperatureLimitHigh__SHIFT 0x0
+#define DPM_TABLE_400__ThermalInterval_MASK 0xff0000
+#define DPM_TABLE_400__ThermalInterval__SHIFT 0x10
+#define DPM_TABLE_400__VoltageInterval_MASK 0xff000000
+#define DPM_TABLE_400__VoltageInterval__SHIFT 0x18
+#define DPM_TABLE_401__MemoryVoltageChangeEnable_MASK 0xff
+#define DPM_TABLE_401__MemoryVoltageChangeEnable__SHIFT 0x0
+#define DPM_TABLE_401__MemoryBootLevel_MASK 0xff00
+#define DPM_TABLE_401__MemoryBootLevel__SHIFT 0x8
+#define DPM_TABLE_401__TemperatureLimitLow_MASK 0xffff0000
+#define DPM_TABLE_401__TemperatureLimitLow__SHIFT 0x10
+#define DPM_TABLE_402__MemoryThermThrottleEnable_MASK 0xff
+#define DPM_TABLE_402__MemoryThermThrottleEnable__SHIFT 0x0
+#define DPM_TABLE_402__MemoryInterval_MASK 0xff00
+#define DPM_TABLE_402__MemoryInterval__SHIFT 0x8
+#define DPM_TABLE_402__BootMVdd_MASK 0xffff0000
+#define DPM_TABLE_402__BootMVdd__SHIFT 0x10
+#define DPM_TABLE_403__PhaseResponseTime_MASK 0xffff
+#define DPM_TABLE_403__PhaseResponseTime__SHIFT 0x0
+#define DPM_TABLE_403__VoltageResponseTime_MASK 0xffff0000
+#define DPM_TABLE_403__VoltageResponseTime__SHIFT 0x10
+#define DPM_TABLE_404__DTEMode_MASK 0xff
+#define DPM_TABLE_404__DTEMode__SHIFT 0x0
+#define DPM_TABLE_404__DTEInterval_MASK 0xff00
+#define DPM_TABLE_404__DTEInterval__SHIFT 0x8
+#define DPM_TABLE_404__PCIeGenInterval_MASK 0xff0000
+#define DPM_TABLE_404__PCIeGenInterval__SHIFT 0x10
+#define DPM_TABLE_404__PCIeBootLinkLevel_MASK 0xff000000
+#define DPM_TABLE_404__PCIeBootLinkLevel__SHIFT 0x18
+#define DPM_TABLE_405__ThermGpio_MASK 0xff
+#define DPM_TABLE_405__ThermGpio__SHIFT 0x0
+#define DPM_TABLE_405__AcDcGpio_MASK 0xff00
+#define DPM_TABLE_405__AcDcGpio__SHIFT 0x8
+#define DPM_TABLE_405__VRHotGpio_MASK 0xff0000
+#define DPM_TABLE_405__VRHotGpio__SHIFT 0x10
+#define DPM_TABLE_405__SVI2Enable_MASK 0xff000000
+#define DPM_TABLE_405__SVI2Enable__SHIFT 0x18
+#define DPM_TABLE_406__PPM_TemperatureLimit_MASK 0xffff
+#define DPM_TABLE_406__PPM_TemperatureLimit__SHIFT 0x0
+#define DPM_TABLE_406__PPM_PkgPwrLimit_MASK 0xffff0000
+#define DPM_TABLE_406__PPM_PkgPwrLimit__SHIFT 0x10
+#define DPM_TABLE_407__TargetTdp_MASK 0xffff
+#define DPM_TABLE_407__TargetTdp__SHIFT 0x0
+#define DPM_TABLE_407__DefaultTdp_MASK 0xffff0000
+#define DPM_TABLE_407__DefaultTdp__SHIFT 0x10
+#define DPM_TABLE_408__FpsLowThreshold_MASK 0xffff
+#define DPM_TABLE_408__FpsLowThreshold__SHIFT 0x0
+#define DPM_TABLE_408__FpsHighThreshold_MASK 0xffff0000
+#define DPM_TABLE_408__FpsHighThreshold__SHIFT 0x10
+#define DPM_TABLE_409__BAPMTI_R_0_1_0_MASK 0xffff
+#define DPM_TABLE_409__BAPMTI_R_0_1_0__SHIFT 0x0
+#define DPM_TABLE_409__BAPMTI_R_0_0_0_MASK 0xffff0000
+#define DPM_TABLE_409__BAPMTI_R_0_0_0__SHIFT 0x10
+#define DPM_TABLE_410__BAPMTI_R_1_0_0_MASK 0xffff
+#define DPM_TABLE_410__BAPMTI_R_1_0_0__SHIFT 0x0
+#define DPM_TABLE_410__BAPMTI_R_0_2_0_MASK 0xffff0000
+#define DPM_TABLE_410__BAPMTI_R_0_2_0__SHIFT 0x10
+#define DPM_TABLE_411__BAPMTI_R_1_2_0_MASK 0xffff
+#define DPM_TABLE_411__BAPMTI_R_1_2_0__SHIFT 0x0
+#define DPM_TABLE_411__BAPMTI_R_1_1_0_MASK 0xffff0000
+#define DPM_TABLE_411__BAPMTI_R_1_1_0__SHIFT 0x10
+#define DPM_TABLE_412__BAPMTI_R_2_1_0_MASK 0xffff
+#define DPM_TABLE_412__BAPMTI_R_2_1_0__SHIFT 0x0
+#define DPM_TABLE_412__BAPMTI_R_2_0_0_MASK 0xffff0000
+#define DPM_TABLE_412__BAPMTI_R_2_0_0__SHIFT 0x10
+#define DPM_TABLE_413__BAPMTI_R_3_0_0_MASK 0xffff
+#define DPM_TABLE_413__BAPMTI_R_3_0_0__SHIFT 0x0
+#define DPM_TABLE_413__BAPMTI_R_2_2_0_MASK 0xffff0000
+#define DPM_TABLE_413__BAPMTI_R_2_2_0__SHIFT 0x10
+#define DPM_TABLE_414__BAPMTI_R_3_2_0_MASK 0xffff
+#define DPM_TABLE_414__BAPMTI_R_3_2_0__SHIFT 0x0
+#define DPM_TABLE_414__BAPMTI_R_3_1_0_MASK 0xffff0000
+#define DPM_TABLE_414__BAPMTI_R_3_1_0__SHIFT 0x10
+#define DPM_TABLE_415__BAPMTI_R_4_1_0_MASK 0xffff
+#define DPM_TABLE_415__BAPMTI_R_4_1_0__SHIFT 0x0
+#define DPM_TABLE_415__BAPMTI_R_4_0_0_MASK 0xffff0000
+#define DPM_TABLE_415__BAPMTI_R_4_0_0__SHIFT 0x10
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0_MASK 0xffff
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0__SHIFT 0x0
+#define DPM_TABLE_416__BAPMTI_R_4_2_0_MASK 0xffff0000
+#define DPM_TABLE_416__BAPMTI_R_4_2_0__SHIFT 0x10
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0_MASK 0xffff
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0__SHIFT 0x0
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0_MASK 0xffff0000
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0__SHIFT 0x10
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0_MASK 0xffff
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0__SHIFT 0x0
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0_MASK 0xffff0000
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0__SHIFT 0x10
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0_MASK 0xffff
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0__SHIFT 0x0
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0_MASK 0xffff0000
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0__SHIFT 0x10
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0_MASK 0xffff
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0__SHIFT 0x0
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0_MASK 0xffff0000
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0__SHIFT 0x10
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0_MASK 0xffff
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0__SHIFT 0x0
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0_MASK 0xffff0000
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0__SHIFT 0x10
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0_MASK 0xffff
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0__SHIFT 0x0
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0_MASK 0xffff0000
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0__SHIFT 0x10
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0_MASK 0xffff
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0__SHIFT 0x0
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0_MASK 0xffff0000
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0__SHIFT 0x10
+#define DPM_TABLE_424__GpuTjHyst_MASK 0xff
+#define DPM_TABLE_424__GpuTjHyst__SHIFT 0x0
+#define DPM_TABLE_424__GpuTjMax_MASK 0xff00
+#define DPM_TABLE_424__GpuTjMax__SHIFT 0x8
+#define DPM_TABLE_424__DTETjOffset_MASK 0xff0000
+#define DPM_TABLE_424__DTETjOffset__SHIFT 0x10
+#define DPM_TABLE_424__DTEAmbientTempBase_MASK 0xff000000
+#define DPM_TABLE_424__DTEAmbientTempBase__SHIFT 0x18
+#define DPM_TABLE_425__BootVoltage_Phases_MASK 0xff
+#define DPM_TABLE_425__BootVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_425__BootVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_425__BootVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_425__BootVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_425__BootVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_425__BootVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_425__BootVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT_MASK 0xffffffff
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT__SHIFT 0x0
+#define DPM_TABLE_427__LowSclkInterruptThreshold_MASK 0xffffffff
+#define DPM_TABLE_427__LowSclkInterruptThreshold__SHIFT 0x0
+#define DPM_TABLE_428__VddGfxReChkWait_MASK 0xffffffff
+#define DPM_TABLE_428__VddGfxReChkWait__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1_MASK 0xff
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0_MASK 0xff00
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID_MASK 0xff0000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID__SHIFT 0x10
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID_MASK 0xff000000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID__SHIFT 0x18
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3_MASK 0xff
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3__SHIFT 0x0
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2_MASK 0xff00
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2__SHIFT 0x8
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1_MASK 0xff0000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1__SHIFT 0x10
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0_MASK 0xff000000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0__SHIFT 0x18
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7_MASK 0xff
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7__SHIFT 0x0
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6_MASK 0xff00
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6__SHIFT 0x8
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5_MASK 0xff0000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5__SHIFT 0x10
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4_MASK 0xff000000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4__SHIFT 0x18
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1_MASK 0xff
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0_MASK 0xff00
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID_MASK 0xff0000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID__SHIFT 0x10
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID_MASK 0xff000000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID__SHIFT 0x18
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3_MASK 0xff
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3__SHIFT 0x0
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2_MASK 0xff00
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2__SHIFT 0x8
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1_MASK 0xff0000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1__SHIFT 0x10
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0_MASK 0xff000000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0__SHIFT 0x18
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7_MASK 0xff
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7__SHIFT 0x0
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6_MASK 0xff00
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6__SHIFT 0x8
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5_MASK 0xff0000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5__SHIFT 0x10
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4_MASK 0xff000000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4__SHIFT 0x18
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1_MASK 0xff
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0_MASK 0xff00
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID_MASK 0xff0000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID__SHIFT 0x10
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID_MASK 0xff000000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID__SHIFT 0x18
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3_MASK 0xff
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3__SHIFT 0x0
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2_MASK 0xff00
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2__SHIFT 0x8
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1_MASK 0xff0000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1__SHIFT 0x10
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0_MASK 0xff000000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0__SHIFT 0x18
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7_MASK 0xff
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7__SHIFT 0x0
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6_MASK 0xff00
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6__SHIFT 0x8
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5_MASK 0xff0000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5__SHIFT 0x10
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4_MASK 0xff000000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4__SHIFT 0x18
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1_MASK 0xff
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0_MASK 0xff00
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID_MASK 0xff0000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID__SHIFT 0x10
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID_MASK 0xff000000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID__SHIFT 0x18
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3_MASK 0xff
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3__SHIFT 0x0
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2_MASK 0xff00
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2__SHIFT 0x8
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1_MASK 0xff0000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1__SHIFT 0x10
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0_MASK 0xff000000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0__SHIFT 0x18
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7_MASK 0xff
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7__SHIFT 0x0
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6_MASK 0xff00
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6__SHIFT 0x8
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5_MASK 0xff0000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5__SHIFT 0x10
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4_MASK 0xff000000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_27__UlvTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_27__UlvTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_29__Reserved_0_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_29__Reserved_0__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_30__Reserved_1_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_30__Reserved_1__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineOffsetVddC_MASK 0xff
+#define PM_FUSES_1__SviLoadLineOffsetVddC__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineTrimVddC_MASK 0xff00
+#define PM_FUSES_1__SviLoadLineTrimVddC__SHIFT 0x8
+#define PM_FUSES_1__SviLoadLineVddC_MASK 0xff0000
+#define PM_FUSES_1__SviLoadLineVddC__SHIFT 0x10
+#define PM_FUSES_1__SviLoadLineEn_MASK 0xff000000
+#define PM_FUSES_1__SviLoadLineEn__SHIFT 0x18
+#define PM_FUSES_2__TDC_MAWt_MASK 0xff
+#define PM_FUSES_2__TDC_MAWt__SHIFT 0x0
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc_MASK 0xff00
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc__SHIFT 0x8
+#define PM_FUSES_2__TDC_VDDC_PkgLimit_MASK 0xffff0000
+#define PM_FUSES_2__TDC_VDDC_PkgLimit__SHIFT 0x10
+#define PM_FUSES_3__Reserved_MASK 0xff
+#define PM_FUSES_3__Reserved__SHIFT 0x0
+#define PM_FUSES_3__LPMLTemperatureMax_MASK 0xff00
+#define PM_FUSES_3__LPMLTemperatureMax__SHIFT 0x8
+#define PM_FUSES_3__LPMLTemperatureMin_MASK 0xff0000
+#define PM_FUSES_3__LPMLTemperatureMin__SHIFT 0x10
+#define PM_FUSES_3__TdcWaterfallCtl_MASK 0xff000000
+#define PM_FUSES_3__TdcWaterfallCtl__SHIFT 0x18
+#define PM_FUSES_4__LPMLTemperatureScaler_3_MASK 0xff
+#define PM_FUSES_4__LPMLTemperatureScaler_3__SHIFT 0x0
+#define PM_FUSES_4__LPMLTemperatureScaler_2_MASK 0xff00
+#define PM_FUSES_4__LPMLTemperatureScaler_2__SHIFT 0x8
+#define PM_FUSES_4__LPMLTemperatureScaler_1_MASK 0xff0000
+#define PM_FUSES_4__LPMLTemperatureScaler_1__SHIFT 0x10
+#define PM_FUSES_4__LPMLTemperatureScaler_0_MASK 0xff000000
+#define PM_FUSES_4__LPMLTemperatureScaler_0__SHIFT 0x18
+#define PM_FUSES_5__LPMLTemperatureScaler_7_MASK 0xff
+#define PM_FUSES_5__LPMLTemperatureScaler_7__SHIFT 0x0
+#define PM_FUSES_5__LPMLTemperatureScaler_6_MASK 0xff00
+#define PM_FUSES_5__LPMLTemperatureScaler_6__SHIFT 0x8
+#define PM_FUSES_5__LPMLTemperatureScaler_5_MASK 0xff0000
+#define PM_FUSES_5__LPMLTemperatureScaler_5__SHIFT 0x10
+#define PM_FUSES_5__LPMLTemperatureScaler_4_MASK 0xff000000
+#define PM_FUSES_5__LPMLTemperatureScaler_4__SHIFT 0x18
+#define PM_FUSES_6__LPMLTemperatureScaler_11_MASK 0xff
+#define PM_FUSES_6__LPMLTemperatureScaler_11__SHIFT 0x0
+#define PM_FUSES_6__LPMLTemperatureScaler_10_MASK 0xff00
+#define PM_FUSES_6__LPMLTemperatureScaler_10__SHIFT 0x8
+#define PM_FUSES_6__LPMLTemperatureScaler_9_MASK 0xff0000
+#define PM_FUSES_6__LPMLTemperatureScaler_9__SHIFT 0x10
+#define PM_FUSES_6__LPMLTemperatureScaler_8_MASK 0xff000000
+#define PM_FUSES_6__LPMLTemperatureScaler_8__SHIFT 0x18
+#define PM_FUSES_7__LPMLTemperatureScaler_15_MASK 0xff
+#define PM_FUSES_7__LPMLTemperatureScaler_15__SHIFT 0x0
+#define PM_FUSES_7__LPMLTemperatureScaler_14_MASK 0xff00
+#define PM_FUSES_7__LPMLTemperatureScaler_14__SHIFT 0x8
+#define PM_FUSES_7__LPMLTemperatureScaler_13_MASK 0xff0000
+#define PM_FUSES_7__LPMLTemperatureScaler_13__SHIFT 0x10
+#define PM_FUSES_7__LPMLTemperatureScaler_12_MASK 0xff000000
+#define PM_FUSES_7__LPMLTemperatureScaler_12__SHIFT 0x18
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta_MASK 0xffff
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta__SHIFT 0x0
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta_MASK 0xffff0000
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta__SHIFT 0x10
+#define PM_FUSES_9__Reserved6_MASK 0xffff
+#define PM_FUSES_9__Reserved6__SHIFT 0x0
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta_MASK 0xffff0000
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_3_MASK 0xff
+#define PM_FUSES_10__GnbLPML_3__SHIFT 0x0
+#define PM_FUSES_10__GnbLPML_2_MASK 0xff00
+#define PM_FUSES_10__GnbLPML_2__SHIFT 0x8
+#define PM_FUSES_10__GnbLPML_1_MASK 0xff0000
+#define PM_FUSES_10__GnbLPML_1__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_0_MASK 0xff000000
+#define PM_FUSES_10__GnbLPML_0__SHIFT 0x18
+#define PM_FUSES_11__GnbLPML_7_MASK 0xff
+#define PM_FUSES_11__GnbLPML_7__SHIFT 0x0
+#define PM_FUSES_11__GnbLPML_6_MASK 0xff00
+#define PM_FUSES_11__GnbLPML_6__SHIFT 0x8
+#define PM_FUSES_11__GnbLPML_5_MASK 0xff0000
+#define PM_FUSES_11__GnbLPML_5__SHIFT 0x10
+#define PM_FUSES_11__GnbLPML_4_MASK 0xff000000
+#define PM_FUSES_11__GnbLPML_4__SHIFT 0x18
+#define PM_FUSES_12__GnbLPML_11_MASK 0xff
+#define PM_FUSES_12__GnbLPML_11__SHIFT 0x0
+#define PM_FUSES_12__GnbLPML_10_MASK 0xff00
+#define PM_FUSES_12__GnbLPML_10__SHIFT 0x8
+#define PM_FUSES_12__GnbLPML_9_MASK 0xff0000
+#define PM_FUSES_12__GnbLPML_9__SHIFT 0x10
+#define PM_FUSES_12__GnbLPML_8_MASK 0xff000000
+#define PM_FUSES_12__GnbLPML_8__SHIFT 0x18
+#define PM_FUSES_13__GnbLPML_15_MASK 0xff
+#define PM_FUSES_13__GnbLPML_15__SHIFT 0x0
+#define PM_FUSES_13__GnbLPML_14_MASK 0xff00
+#define PM_FUSES_13__GnbLPML_14__SHIFT 0x8
+#define PM_FUSES_13__GnbLPML_13_MASK 0xff0000
+#define PM_FUSES_13__GnbLPML_13__SHIFT 0x10
+#define PM_FUSES_13__GnbLPML_12_MASK 0xff000000
+#define PM_FUSES_13__GnbLPML_12__SHIFT 0x18
+#define PM_FUSES_14__Reserved1_1_MASK 0xff
+#define PM_FUSES_14__Reserved1_1__SHIFT 0x0
+#define PM_FUSES_14__Reserved1_0_MASK 0xff00
+#define PM_FUSES_14__Reserved1_0__SHIFT 0x8
+#define PM_FUSES_14__GnbLPMLMinVid_MASK 0xff0000
+#define PM_FUSES_14__GnbLPMLMinVid__SHIFT 0x10
+#define PM_FUSES_14__GnbLPMLMaxVid_MASK 0xff000000
+#define PM_FUSES_14__GnbLPMLMaxVid__SHIFT 0x18
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd_MASK 0xffff
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd__SHIFT 0x0
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd_MASK 0xffff0000
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd__SHIFT 0x10
+#define SMU_PM_STATUS_0__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_0__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_1__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_1__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_2__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_2__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_3__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_3__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_4__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_4__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_5__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_5__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_6__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_6__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_7__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_7__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_8__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_8__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_9__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_9__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_10__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_10__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_11__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_11__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_12__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_12__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_13__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_13__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_14__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_14__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_15__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_15__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_16__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_16__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_17__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_17__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_18__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_18__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_19__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_19__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_20__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_20__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_21__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_21__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_22__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_22__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_23__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_23__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_24__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_24__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_25__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_25__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_26__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_26__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_27__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_27__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_28__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_28__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_29__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_29__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_30__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_30__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_31__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_31__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_32__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_32__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_33__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_33__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_34__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_34__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_35__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_35__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_36__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_36__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_37__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_37__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_38__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_38__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_39__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_39__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_40__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_40__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_41__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_41__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_42__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_42__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_43__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_43__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_44__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_44__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_45__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_45__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_46__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_46__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_47__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_47__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_48__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_48__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_49__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_49__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_50__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_50__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_51__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_51__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_52__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_52__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_53__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_53__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_54__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_54__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_55__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_55__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_56__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_56__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_57__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_57__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_58__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_58__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_59__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_59__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_60__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_60__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_61__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_61__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_62__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_62__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_63__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_63__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_64__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_64__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_65__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_65__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_66__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_66__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_67__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_67__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_68__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_68__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_69__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_69__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_70__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_70__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_71__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_71__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_72__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_72__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_73__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_73__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_74__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_74__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_75__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_75__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_76__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_76__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_77__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_77__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_78__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_78__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_79__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_79__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_80__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_80__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_81__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_81__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_82__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_82__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_83__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_83__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_84__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_84__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_85__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_85__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_86__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_86__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_87__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_87__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_88__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_88__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_89__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_89__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_90__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_90__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_91__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_91__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_92__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_92__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_93__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_93__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_94__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_94__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_95__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_95__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_96__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_96__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_97__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_97__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_98__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_98__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_99__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_99__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_100__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_100__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_101__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_101__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_102__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_102__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_103__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_103__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_104__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_104__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_105__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_105__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_106__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_106__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_107__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_107__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_108__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_108__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_109__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_109__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_110__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_110__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_111__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_111__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_112__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_112__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_113__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_113__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_114__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_114__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_115__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_115__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_116__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_116__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_117__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_117__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_118__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_118__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_119__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_119__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_120__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_120__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_121__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_121__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_122__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_122__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_123__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_123__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_124__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_124__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_125__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_125__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_126__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_126__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_127__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_127__DATA__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x7
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x0
+#define CG_THERMAL_CTRL__THERM_INC_CLK_MASK 0x8
+#define CG_THERMAL_CTRL__THERM_INC_CLK__SHIFT 0x3
+#define CG_THERMAL_CTRL__SPARE_MASK 0x3ff0
+#define CG_THERMAL_CTRL__SPARE__SHIFT 0x4
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x3fc000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0xe
+#define CG_THERMAL_CTRL__RESERVED_MASK 0x1c00000
+#define CG_THERMAL_CTRL__RESERVED__SHIFT 0x16
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY_MASK 0x2000000
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY__SHIFT 0x19
+#define CG_THERMAL_CTRL__CTF_PAD_EN_MASK 0x4000000
+#define CG_THERMAL_CTRL__CTF_PAD_EN__SHIFT 0x1a
+#define CG_THERMAL_STATUS__SPARE_MASK 0x1ff
+#define CG_THERMAL_STATUS__SPARE__SHIFT 0x0
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x1fe00
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9
+#define CG_THERMAL_STATUS__THERM_ALERT_MASK 0x20000
+#define CG_THERMAL_STATUS__THERM_ALERT__SHIFT 0x11
+#define CG_THERMAL_STATUS__GEN_STATUS_MASK 0x3c0000
+#define CG_THERMAL_STATUS__GEN_STATUS__SHIFT 0x12
+#define CG_THERMAL_INT__DIG_THERM_CTF_MASK 0xff
+#define CG_THERMAL_INT__DIG_THERM_CTF__SHIFT 0x0
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0xff00
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x8
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0xff0000
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x10
+#define CG_THERMAL_INT__THERM_INT_MASK_MASK 0xf000000
+#define CG_THERMAL_INT__THERM_INT_MASK__SHIFT 0x18
+#define CG_MULT_THERMAL_CTRL__TS_FILTER_MASK 0xf
+#define CG_MULT_THERMAL_CTRL__TS_FILTER__SHIFT 0x0
+#define CG_MULT_THERMAL_CTRL__UNUSED_MASK 0x1f0
+#define CG_MULT_THERMAL_CTRL__UNUSED__SHIFT 0x4
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST_MASK 0x200
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST__SHIFT 0x9
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0xff00000
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x14
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR_MASK 0x10000000
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR__SHIFT 0x1c
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x1ff
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x3fe00
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
+#define THM_TMON2_CTRL__POWER_DOWN_MASK 0x1
+#define THM_TMON2_CTRL__POWER_DOWN__SHIFT 0x0
+#define THM_TMON2_CTRL__BGADJ_MASK 0x1fe
+#define THM_TMON2_CTRL__BGADJ__SHIFT 0x1
+#define THM_TMON2_CTRL__BGADJ_MODE_MASK 0x200
+#define THM_TMON2_CTRL__BGADJ_MODE__SHIFT 0x9
+#define THM_TMON2_CTRL__TMON_PAUSE_MASK 0x400
+#define THM_TMON2_CTRL__TMON_PAUSE__SHIFT 0xa
+#define THM_TMON2_CTRL__INT_MEAS_EN_MASK 0x800
+#define THM_TMON2_CTRL__INT_MEAS_EN__SHIFT 0xb
+#define THM_TMON2_CTRL__DEBUG_MODE_MASK 0x1000
+#define THM_TMON2_CTRL__DEBUG_MODE__SHIFT 0xc
+#define THM_TMON2_CTRL__EN_CFG_SERDES_MASK 0x2000
+#define THM_TMON2_CTRL__EN_CFG_SERDES__SHIFT 0xd
+#define THM_TMON2_CTRL2__RDIL_PRESENT_MASK 0xffff
+#define THM_TMON2_CTRL2__RDIL_PRESENT__SHIFT 0x0
+#define THM_TMON2_CTRL2__RDIR_PRESENT_MASK 0xffff0000
+#define THM_TMON2_CTRL2__RDIR_PRESENT__SHIFT 0x10
+#define THM_TMON2_CSR_WR__CSR_WRITE_MASK 0x1
+#define THM_TMON2_CSR_WR__CSR_WRITE__SHIFT 0x0
+#define THM_TMON2_CSR_WR__CSR_READ_MASK 0x2
+#define THM_TMON2_CSR_WR__CSR_READ__SHIFT 0x1
+#define THM_TMON2_CSR_WR__CSR_ADDR_MASK 0xffc
+#define THM_TMON2_CSR_WR__CSR_ADDR__SHIFT 0x2
+#define THM_TMON2_CSR_WR__WRITE_DATA_MASK 0xfff000
+#define THM_TMON2_CSR_WR__WRITE_DATA__SHIFT 0xc
+#define THM_TMON2_CSR_WR__SPARE_MASK 0x1000000
+#define THM_TMON2_CSR_WR__SPARE__SHIFT 0x18
+#define THM_TMON2_CSR_RD__READ_DATA_MASK 0xfff
+#define THM_TMON2_CSR_RD__READ_DATA__SHIFT 0x0
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0xff
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY_MASK 0xff00
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL_MASK 0x10000
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL__SHIFT 0x10
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER_MASK 0x7e0000
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER__SHIFT 0x11
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN_MASK 0x800000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN__SHIFT 0x17
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_MASK 0xff000000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP__SHIFT 0x18
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0xff
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
+#define CG_FDO_CTRL1__FMIN_DUTY_MASK 0xff00
+#define CG_FDO_CTRL1__FMIN_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL1__M_MASK 0xff0000
+#define CG_FDO_CTRL1__M__SHIFT 0x10
+#define CG_FDO_CTRL1__RESERVED_MASK 0x3f000000
+#define CG_FDO_CTRL1__RESERVED__SHIFT 0x18
+#define CG_FDO_CTRL1__FDO_PWRDNB_MASK 0x40000000
+#define CG_FDO_CTRL1__FDO_PWRDNB__SHIFT 0x1e
+#define CG_FDO_CTRL2__TMIN_MASK 0xff
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME_MASK 0x700
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME__SHIFT 0x8
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x3800
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
+#define CG_FDO_CTRL2__TMIN_HYSTER_MASK 0x1c000
+#define CG_FDO_CTRL2__TMIN_HYSTER__SHIFT 0xe
+#define CG_FDO_CTRL2__TMAX_MASK 0x1fe0000
+#define CG_FDO_CTRL2__TMAX__SHIFT 0x11
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xfe000000
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x19
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x7
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x0
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xfffffff8
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xffffffff
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x0
+#define CC_THM_STRAPS0__TMON0_BGADJ_MASK 0x1fe
+#define CC_THM_STRAPS0__TMON0_BGADJ__SHIFT 0x1
+#define CC_THM_STRAPS0__TMON1_BGADJ_MASK 0x1fe00
+#define CC_THM_STRAPS0__TMON1_BGADJ__SHIFT 0x9
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL_MASK 0x20000
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL__SHIFT 0x11
+#define CC_THM_STRAPS0__NUM_ACQ_MASK 0x1c0000
+#define CC_THM_STRAPS0__NUM_ACQ__SHIFT 0x12
+#define CC_THM_STRAPS0__TMON_CLK_SEL_MASK 0xe00000
+#define CC_THM_STRAPS0__TMON_CLK_SEL__SHIFT 0x15
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE_MASK 0x1000000
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE__SHIFT 0x18
+#define CC_THM_STRAPS0__CTF_DISABLE_MASK 0x2000000
+#define CC_THM_STRAPS0__CTF_DISABLE__SHIFT 0x19
+#define CC_THM_STRAPS0__TMON0_DISABLE_MASK 0x4000000
+#define CC_THM_STRAPS0__TMON0_DISABLE__SHIFT 0x1a
+#define CC_THM_STRAPS0__TMON1_DISABLE_MASK 0x8000000
+#define CC_THM_STRAPS0__TMON1_DISABLE__SHIFT 0x1b
+#define CC_THM_STRAPS0__TMON2_DISABLE_MASK 0x10000000
+#define CC_THM_STRAPS0__TMON2_DISABLE__SHIFT 0x1c
+#define CC_THM_STRAPS0__TMON3_DISABLE_MASK 0x20000000
+#define CC_THM_STRAPS0__TMON3_DISABLE__SHIFT 0x1d
+#define CC_THM_STRAPS0__UNUSED_MASK 0x80000000
+#define CC_THM_STRAPS0__UNUSED__SHIFT 0x1f
+#define THM_TMON0_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON0_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON0_INT_DATA__VALID_MASK 0x800
+#define THM_TMON0_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON1_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON1_INT_DATA__VALID_MASK 0x800
+#define THM_TMON1_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON2_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON2_INT_DATA__VALID_MASK 0x800
+#define THM_TMON2_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON2_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON2_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON2_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON2_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON0_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON0_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON0_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON0_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON1_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON1_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON1_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON1_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON2_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON2_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON2_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON2_STATUS__MEAS_DONE__SHIFT 0x5
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa
+#define GENERAL_PWRMGT__SPARE11_MASK 0x800
+#define GENERAL_PWRMGT__SPARE11__SHIFT 0xb
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11
+#define GENERAL_PWRMGT__SPARE18_MASK 0x40000
+#define GENERAL_PWRMGT__SPARE18__SHIFT 0x12
+#define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000
+#define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17
+#define GENERAL_PWRMGT__SPARE27_MASK 0x8000000
+#define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b
+#define GENERAL_PWRMGT__SPARE_MASK 0xf0000000
+#define GENERAL_PWRMGT__SPARE__SHIFT 0x1c
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4
+#define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3
+#define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10
+#define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4
+#define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0
+#define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d
+#define PWR_PCC_CONTROL__PCC_POLARITY_MASK 0x1
+#define PWR_PCC_CONTROL__PCC_POLARITY__SHIFT 0x0
+#define PWR_PCC_GPIO_SELECT__GPIO_MASK 0xffffffff
+#define PWR_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf
+#define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0
+#define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4
+#define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00
+#define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8
+#define PLL_TEST_CNTL__TST_RESET_MASK 0x8000
+#define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf
+#define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000
+#define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff
+#define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10
+#define SCLK_MIN_DIV__FRACV_MASK 0xfff
+#define SCLK_MIN_DIV__FRACV__SHIFT 0x0
+#define SCLK_MIN_DIV__INTV_MASK 0x7f000
+#define SCLK_MIN_DIV__INTV__SHIFT 0xc
+#define PWR_AVFS_SEL__AvfsSel_MASK 0xfffffff
+#define PWR_AVFS_SEL__AvfsSel__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmBusIn_MASK 0xff
+#define PWR_AVFS_CNTL__MmBusIn__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmLclRdEn_MASK 0x100
+#define PWR_AVFS_CNTL__MmLclRdEn__SHIFT 0x8
+#define PWR_AVFS_CNTL__MmLclWrEn_MASK 0x200
+#define PWR_AVFS_CNTL__MmLclWrEn__SHIFT 0x9
+#define PWR_AVFS_CNTL__MmLclSz_MASK 0xc00
+#define PWR_AVFS_CNTL__MmLclSz__SHIFT 0xa
+#define PWR_AVFS_CNTL__MmState_MASK 0x3f000
+#define PWR_AVFS_CNTL__MmState__SHIFT 0xc
+#define PWR_AVFS_CNTL__PsmScanMode_MASK 0x40000
+#define PWR_AVFS_CNTL__PsmScanMode__SHIFT 0x12
+#define PWR_AVFS_CNTL__PsmGater_MASK 0x80000
+#define PWR_AVFS_CNTL__PsmGater__SHIFT 0x13
+#define PWR_AVFS_CNTL__PsmTrst_MASK 0x100000
+#define PWR_AVFS_CNTL__PsmTrst__SHIFT 0x14
+#define PWR_AVFS_CNTL__PsmEn_MASK 0x200000
+#define PWR_AVFS_CNTL__PsmEn__SHIFT 0x15
+#define PWR_AVFS_CNTL__SkipPhaseEn_MASK 0x400000
+#define PWR_AVFS_CNTL__SkipPhaseEn__SHIFT 0x16
+#define PWR_AVFS_CNTL__Isolate_MASK 0x800000
+#define PWR_AVFS_CNTL__Isolate__SHIFT 0x17
+#define PWR_AVFS_CNTL__AvfsRst_MASK 0x1000000
+#define PWR_AVFS_CNTL__AvfsRst__SHIFT 0x18
+#define PWR_AVFS_CNTL__PccIsolateEn_MASK 0x2000000
+#define PWR_AVFS_CNTL__PccIsolateEn__SHIFT 0x19
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn_MASK 0x4000000
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn__SHIFT 0x1a
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_CKS_ENABLE__STRETCH_ENABLE_MASK 0x1
+#define PWR_CKS_ENABLE__STRETCH_ENABLE__SHIFT 0x0
+#define PWR_CKS_ENABLE__masterReset_MASK 0x2
+#define PWR_CKS_ENABLE__masterReset__SHIFT 0x1
+#define PWR_CKS_ENABLE__staticEnable_MASK 0x4
+#define PWR_CKS_ENABLE__staticEnable__SHIFT 0x2
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT_MASK 0x8
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT__SHIFT 0x3
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN_MASK 0x10
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN__SHIFT 0x4
+#define PWR_CKS_ENABLE__MET_CTRL_SEL_MASK 0x60
+#define PWR_CKS_ENABLE__MET_CTRL_SEL__SHIFT 0x5
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN_MASK 0x80
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_BYPASS_MASK 0x1
+#define PWR_CKS_CNTL__CKS_BYPASS__SHIFT 0x0
+#define PWR_CKS_CNTL__CKS_PCCEnable_MASK 0x2
+#define PWR_CKS_CNTL__CKS_PCCEnable__SHIFT 0x1
+#define PWR_CKS_CNTL__CKS_TEMP_COMP_MASK 0x4
+#define PWR_CKS_CNTL__CKS_TEMP_COMP__SHIFT 0x2
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT_MASK 0x78
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT__SHIFT 0x3
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS_MASK 0x80
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE_MASK 0xf00
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE__SHIFT 0x8
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES_MASK 0xf000
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES__SHIFT 0xc
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ_MASK 0x10000
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ__SHIFT 0x10
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP_MASK 0x20000
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP__SHIFT 0x11
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL_MASK 0x3c0000
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL__SHIFT 0x12
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL_MASK 0x400000
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL__SHIFT 0x16
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL_MASK 0x7f800000
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL__SHIFT 0x17
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH_MASK 0x3ff
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_MASK 0xffff
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT_MASK 0xf0000
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT__SHIFT 0x10
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN_MASK 0x1
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN__SHIFT 0x0
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT_MASK 0x2
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT__SHIFT 0x1
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT_MASK 0x4
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT__SHIFT 0x2
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE_MASK 0x8
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE__SHIFT 0x3
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ_MASK 0x1
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1
+#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1
+#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1
+#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1
+#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x1
+#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x1
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x0
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x1
+#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x1
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x0
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_ENABLE_MASK 0x1
+#define LCAC_MC6_CNTL__MC6_ENABLE__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC6_CNTL__MC6_THRESHOLD__SHIFT 0x1
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL__SHIFT 0x0
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_ENABLE_MASK 0x1
+#define LCAC_MC7_CNTL__MC7_ENABLE__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC7_CNTL__MC7_THRESHOLD__SHIFT 0x1
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL__SHIFT 0x0
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1
+#define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe
+#define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define ROM_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define ROM_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define ROM_CNTL__SCK_OVERWRITE_MASK 0x2
+#define ROM_CNTL__SCK_OVERWRITE__SHIFT 0x1
+#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x4
+#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x2
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME_MASK 0xff00
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME__SHIFT 0x8
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME_MASK 0xff0000
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME__SHIFT 0x10
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0xf000000
+#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK_MASK 0xf0000000
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK__SHIFT 0x1c
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0xffffff
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x1000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x18
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x2000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0xc000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
+#define ROM_STATUS__ROM_BUSY_MASK 0x1
+#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0xf
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define ROM_INDEX__ROM_INDEX_MASK 0xffffff
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xffffffff
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_START__ROM_START_MASK 0xffffff
+#define ROM_START__ROM_START__SHIFT 0x0
+#define ROM_SW_CNTL__DATA_SIZE_MASK 0xffff
+#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
+#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x30000
+#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x40000
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x12
+#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x1
+#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0xff
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xffffff00
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
+#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15__SHIFT 0x10
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0xffff
+#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000
+#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
+
+#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atom-bits.h
rename to drivers/gpu/drm/amd/include/atom-bits.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/include/atom-names.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atom-names.h
rename to drivers/gpu/drm/amd/include/atom-names.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/include/atom-types.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atom-types.h
rename to drivers/gpu/drm/amd/include/atom-types.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atombios.h
rename to drivers/gpu/drm/amd/include/atombios.h
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
new file mode 100644
index 0000000..992dcd8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_COMMON_H
+#define _CGS_COMMON_H
+
+#include "amd_shared.h"
+
+/**
+ * enum cgs_gpu_mem_type - GPU memory types
+ */
+enum cgs_gpu_mem_type {
+	CGS_GPU_MEM_TYPE__VISIBLE_FB,
+	CGS_GPU_MEM_TYPE__INVISIBLE_FB,
+	CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+	CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
+	CGS_GPU_MEM_TYPE__GART_CACHEABLE,
+	CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
+};
+
+/**
+ * enum cgs_ind_reg - Indirect register spaces
+ */
+enum cgs_ind_reg {
+	CGS_IND_REG__MMIO,
+	CGS_IND_REG__PCIE,
+	CGS_IND_REG__SMC,
+	CGS_IND_REG__UVD_CTX,
+	CGS_IND_REG__DIDT,
+	CGS_IND_REG__AUDIO_ENDPT
+};
+
+/**
+ * enum cgs_clock - Clocks controlled by the SMU
+ */
+enum cgs_clock {
+	CGS_CLOCK__SCLK,
+	CGS_CLOCK__MCLK,
+	CGS_CLOCK__VCLK,
+	CGS_CLOCK__DCLK,
+	CGS_CLOCK__ECLK,
+	CGS_CLOCK__ACLK,
+	CGS_CLOCK__ICLK,
+	/* ... */
+};
+
+/**
+ * enum cgs_engine - Engines that can be statically power-gated
+ */
+enum cgs_engine {
+	CGS_ENGINE__UVD,
+	CGS_ENGINE__VCE,
+	CGS_ENGINE__VP8,
+	CGS_ENGINE__ACP_DMA,
+	CGS_ENGINE__ACP_DSP0,
+	CGS_ENGINE__ACP_DSP1,
+	CGS_ENGINE__ISP,
+	/* ... */
+};
+
+/**
+ * enum cgs_voltage_planes - Voltage planes for external camera HW
+ */
+enum cgs_voltage_planes {
+	CGS_VOLTAGE_PLANE__SENSOR0,
+	CGS_VOLTAGE_PLANE__SENSOR1,
+	/* ... */
+};
+
+/*
+ * enum cgs_ucode_id - Firmware types for different IPs
+ */
+enum cgs_ucode_id {
+	CGS_UCODE_ID_SMU = 0,
+	CGS_UCODE_ID_SDMA0,
+	CGS_UCODE_ID_SDMA1,
+	CGS_UCODE_ID_CP_CE,
+	CGS_UCODE_ID_CP_PFP,
+	CGS_UCODE_ID_CP_ME,
+	CGS_UCODE_ID_CP_MEC,
+	CGS_UCODE_ID_CP_MEC_JT1,
+	CGS_UCODE_ID_CP_MEC_JT2,
+	CGS_UCODE_ID_GMCON_RENG,
+	CGS_UCODE_ID_RLC_G,
+	CGS_UCODE_ID_MAXIMUM,
+};
+
+/**
+ * struct cgs_clock_limits - Clock limits
+ *
+ * Clocks are specified in 10KHz units.
+ */
+struct cgs_clock_limits {
+	unsigned min;		/**< Minimum supported frequency */
+	unsigned max;		/**< Maxumim supported frequency */
+	unsigned sustainable;	/**< Thermally sustainable frequency */
+};
+
+/**
+ * struct cgs_firmware_info - Firmware information
+ */
+struct cgs_firmware_info {
+	uint16_t		version;
+	uint16_t		feature_version;
+	uint32_t		image_size;
+	uint64_t		mc_addr;
+	void			*kptr;
+};
+
+typedef unsigned long cgs_handle_t;
+
+/**
+ * cgs_gpu_mem_info() - Return information about memory heaps
+ * @cgs_device: opaque device handle
+ * @type:	memory type
+ * @mc_start:	Start MC address of the heap (output)
+ * @mc_size:	MC address space size (output)
+ * @mem_size:	maximum amount of memory available for allocation (output)
+ *
+ * This function returns information about memory heaps. The type
+ * parameter is used to select the memory heap. The mc_start and
+ * mc_size for GART heaps may be bigger than the memory available for
+ * allocation.
+ *
+ * mc_start and mc_size are undefined for non-contiguous FB memory
+ * types, since buffers allocated with these types may or may not be
+ * GART mapped.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+				  uint64_t *mc_start, uint64_t *mc_size,
+				  uint64_t *mem_size);
+
+/**
+ * cgs_gmap_kmem() - map kernel memory to GART aperture
+ * @cgs_device:	opaque device handle
+ * @kmem:	pointer to kernel memory
+ * @size:	size to map
+ * @min_offset: minimum offset from start of GART aperture
+ * @max_offset: maximum offset from start of GART aperture
+ * @kmem_handle: kernel memory handle (output)
+ * @mcaddr:	MC address (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
+			       uint64_t min_offset, uint64_t max_offset,
+			       cgs_handle_t *kmem_handle, uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_kmem() - unmap kernel memory
+ * @cgs_device:	opaque device handle
+ * @kmem_handle: kernel memory handle returned by gmap_kmem
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
+
+/**
+ * cgs_alloc_gpu_mem() - Allocate GPU memory
+ * @cgs_device:	opaque device handle
+ * @type:	memory type
+ * @size:	size in bytes
+ * @align:	alignment in bytes
+ * @min_offset: minimum offset from start of heap
+ * @max_offset: maximum offset from start of heap
+ * @handle:	memory handle (output)
+ *
+ * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
+ * memory allocation. This guarantees that the MC address returned by
+ * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
+ * FB memory types may be GART mapped depending on memory
+ * fragmentation and memory allocator policies.
+ *
+ * If min/max_offset are non-0, the allocation will be forced to
+ * reside between these offsets in its respective memory heap. The
+ * base address that the offset relates to, depends on the memory
+ * type.
+ *
+ * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
+ * - CGS_GPU_MEM_TYPE__GART_*:	    GART aperture base address
+ * - others:			    undefined, don't use with max_offset
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+				   uint64_t size, uint64_t align,
+				   uint64_t min_offset, uint64_t max_offset,
+				   cgs_handle_t *handle);
+
+/**
+ * cgs_free_gpu_mem() - Free GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_gmap_gpu_mem() - GPU-map GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ * @mcaddr:	MC address (output)
+ *
+ * Ensures that a buffer is GPU accessible and returns its MC address.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+				  uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Allows the buffer to be migrated while it's not used by the GPU.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_kmap_gpu_mem() - Kernel-map GPU memory
+ *
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ * @map:	Kernel virtual address the memory was mapped to (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+				  void **map);
+
+/**
+ * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_read_register() - Read an MMIO register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ *
+ * Return:  register value
+ */
+typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
+
+/**
+ * cgs_write_register() - Write an MMIO register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ * @value:	register value
+ */
+typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
+				     uint32_t value);
+
+/**
+ * cgs_read_ind_register() - Read an indirect register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ *
+ * Return:  register value
+ */
+typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+					    unsigned index);
+
+/**
+ * cgs_write_ind_register() - Write an indirect register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ * @value:	register value
+ */
+typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+					 unsigned index, uint32_t value);
+
+/**
+ * cgs_read_pci_config_byte() - Read byte from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address
+ *
+ * Return:  Value read
+ */
+typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_word() - Read word from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be word-aligned
+ *
+ * Return:  Value read
+ */
+typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_dword() - Read dword from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be dword-aligned
+ *
+ * Return:  Value read
+ */
+typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
+						unsigned addr);
+
+/**
+ * cgs_write_pci_config_byte() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
+					    uint8_t value);
+
+/**
+ * cgs_write_pci_config_word() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be word-aligned
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
+					    uint16_t value);
+
+/**
+ * cgs_write_pci_config_dword() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be dword-aligned
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
+					     uint32_t value);
+
+/**
+ * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
+ * @cgs_device:	opaque device handle
+ * @table:	data table index
+ * @size:	size of the table (output, may be NULL)
+ * @frev:	table format revision (output, may be NULL)
+ * @crev:	table content revision (output, may be NULL)
+ *
+ * Return: Pointer to start of the table, or NULL on failure
+ */
+typedef const void *(*cgs_atom_get_data_table_t)(
+	void *cgs_device, unsigned table,
+	uint16_t *size, uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
+ * @cgs_device:	opaque device handle
+ * @table:	data table index
+ * @frev:	table format revision (output, may be NULL)
+ * @crev:	table content revision (output, may be NULL)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
+					     uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
+ * @cgs_device: opaque device handle
+ * @table:	command table index
+ * @args:	arguments
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
+					 unsigned table, void *args);
+
+/**
+ * cgs_create_pm_request() - Create a power management request
+ * @cgs_device:	opaque device handle
+ * @request:	handle of created PM request (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
+
+/**
+ * cgs_destroy_pm_request() - Destroy a power management request
+ * @cgs_device:	opaque device handle
+ * @request:	handle of created PM request
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
+
+/**
+ * cgs_set_pm_request() - Activate or deactiveate a PM request
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @active:	0 = deactivate, non-0 = activate
+ *
+ * While a PM request is active, its minimum clock requests are taken
+ * into account as the requested engines are powered up. When the
+ * request is inactive, the engines may be powered down and clocks may
+ * be lower, depending on other PM requests by other driver
+ * components.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
+				    int active);
+
+/**
+ * cgs_pm_request_clock() - Request a minimum frequency for a specific clock
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @clock:	which clock?
+ * @freq:	requested min. frequency in 10KHz units (0 to clear request)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
+				      enum cgs_clock clock, unsigned freq);
+
+/**
+ * cgs_pm_request_engine() - Request an engine to be powered up
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @engine:	which engine?
+ * @powered:	0 = powered down, non-0 = powered up
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
+				       enum cgs_engine engine, int powered);
+
+/**
+ * cgs_pm_query_clock_limits() - Query clock frequency limits
+ * @cgs_device:	opaque device handle
+ * @clock:	which clock?
+ * @limits:	clock limits
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
+					   enum cgs_clock clock,
+					   struct cgs_clock_limits *limits);
+
+/**
+ * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes
+ * @cgs_device:	opaque device handle
+ * @mask:	bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...)
+ * @voltages:	pointer to array of voltage values in 1mV units
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
+					 const uint32_t *voltages);
+/**
+ * cgs_get_firmware_info - Get the firmware information from core driver
+ * @cgs_device: opaque device handle
+ * @type: the firmware type
+ * @info: returend firmware information
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_get_firmware_info)(void *cgs_device,
+				     enum cgs_ucode_id type,
+				     struct cgs_firmware_info *info);
+
+typedef int(*cgs_set_powergating_state)(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state);
+
+typedef int(*cgs_set_clockgating_state)(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state);
+
+struct cgs_ops {
+	/* memory management calls (similar to KFD interface) */
+	cgs_gpu_mem_info_t gpu_mem_info;
+	cgs_gmap_kmem_t gmap_kmem;
+	cgs_gunmap_kmem_t gunmap_kmem;
+	cgs_alloc_gpu_mem_t alloc_gpu_mem;
+	cgs_free_gpu_mem_t free_gpu_mem;
+	cgs_gmap_gpu_mem_t gmap_gpu_mem;
+	cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
+	cgs_kmap_gpu_mem_t kmap_gpu_mem;
+	cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
+	/* MMIO access */
+	cgs_read_register_t read_register;
+	cgs_write_register_t write_register;
+	cgs_read_ind_register_t read_ind_register;
+	cgs_write_ind_register_t write_ind_register;
+	/* PCI configuration space access */
+	cgs_read_pci_config_byte_t read_pci_config_byte;
+	cgs_read_pci_config_word_t read_pci_config_word;
+	cgs_read_pci_config_dword_t read_pci_config_dword;
+	cgs_write_pci_config_byte_t write_pci_config_byte;
+	cgs_write_pci_config_word_t write_pci_config_word;
+	cgs_write_pci_config_dword_t write_pci_config_dword;
+	/* ATOM BIOS */
+	cgs_atom_get_data_table_t atom_get_data_table;
+	cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
+	cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
+	/* Power management */
+	cgs_create_pm_request_t create_pm_request;
+	cgs_destroy_pm_request_t destroy_pm_request;
+	cgs_set_pm_request_t set_pm_request;
+	cgs_pm_request_clock_t pm_request_clock;
+	cgs_pm_request_engine_t pm_request_engine;
+	cgs_pm_query_clock_limits_t pm_query_clock_limits;
+	cgs_set_camera_voltages_t set_camera_voltages;
+	/* Firmware Info */
+	cgs_get_firmware_info get_firmware_info;
+	/* cg pg interface*/
+	cgs_set_powergating_state set_powergating_state;
+	cgs_set_clockgating_state set_clockgating_state;
+	/* ACPI (TODO) */
+};
+
+struct cgs_os_ops; /* To be define in OS-specific CGS header */
+
+struct cgs_device
+{
+	const struct cgs_ops *ops;
+	const struct cgs_os_ops *os_ops;
+	/* to be embedded at the start of driver private structure */
+};
+
+/* Convenience macros that make CGS indirect function calls look like
+ * normal function calls */
+#define CGS_CALL(func,dev,...) \
+	(((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
+#define CGS_OS_CALL(func,dev,...) \
+	(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
+
+#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size)		\
+	CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size)
+#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)	\
+	CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)
+#define cgs_gunmap_kmem(dev,kmem_handle)	\
+	CGS_CALL(gunmap_kmem,dev,keme_handle)
+#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle)	\
+	CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
+#define cgs_free_gpu_mem(dev,handle)		\
+	CGS_CALL(free_gpu_mem,dev,handle)
+#define cgs_gmap_gpu_mem(dev,handle,mcaddr)	\
+	CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
+#define cgs_gunmap_gpu_mem(dev,handle)		\
+	CGS_CALL(gunmap_gpu_mem,dev,handle)
+#define cgs_kmap_gpu_mem(dev,handle,map)	\
+	CGS_CALL(kmap_gpu_mem,dev,handle,map)
+#define cgs_kunmap_gpu_mem(dev,handle)		\
+	CGS_CALL(kunmap_gpu_mem,dev,handle)
+
+#define cgs_read_register(dev,offset)		\
+	CGS_CALL(read_register,dev,offset)
+#define cgs_write_register(dev,offset,value)		\
+	CGS_CALL(write_register,dev,offset,value)
+#define cgs_read_ind_register(dev,space,index)		\
+	CGS_CALL(read_ind_register,dev,space,index)
+#define cgs_write_ind_register(dev,space,index,value)		\
+	CGS_CALL(write_ind_register,dev,space,index,value)
+
+#define cgs_read_pci_config_byte(dev,addr)	\
+	CGS_CALL(read_pci_config_byte,dev,addr)
+#define cgs_read_pci_config_word(dev,addr)	\
+	CGS_CALL(read_pci_config_word,dev,addr)
+#define cgs_read_pci_config_dword(dev,addr)		\
+	CGS_CALL(read_pci_config_dword,dev,addr)
+#define cgs_write_pci_config_byte(dev,addr,value)	\
+	CGS_CALL(write_pci_config_byte,dev,addr,value)
+#define cgs_write_pci_config_word(dev,addr,value)	\
+	CGS_CALL(write_pci_config_word,dev,addr,value)
+#define cgs_write_pci_config_dword(dev,addr,value)	\
+	CGS_CALL(write_pci_config_dword,dev,addr,value)
+
+#define cgs_atom_get_data_table(dev,table,size,frev,crev)	\
+	CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
+#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev)	\
+	CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
+#define cgs_atom_exec_cmd_table(dev,table,args)		\
+	CGS_CALL(atom_exec_cmd_table,dev,table,args)
+
+#define cgs_create_pm_request(dev,request)	\
+	CGS_CALL(create_pm_request,dev,request)
+#define cgs_destroy_pm_request(dev,request)		\
+	CGS_CALL(destroy_pm_request,dev,request)
+#define cgs_set_pm_request(dev,request,active)		\
+	CGS_CALL(set_pm_request,dev,request,active)
+#define cgs_pm_request_clock(dev,request,clock,freq)		\
+	CGS_CALL(pm_request_clock,dev,request,clock,freq)
+#define cgs_pm_request_engine(dev,request,engine,powered)	\
+	CGS_CALL(pm_request_engine,dev,request,engine,powered)
+#define cgs_pm_query_clock_limits(dev,clock,limits)		\
+	CGS_CALL(pm_query_clock_limits,dev,clock,limits)
+#define cgs_set_camera_voltages(dev,mask,voltages)	\
+	CGS_CALL(set_camera_voltages,dev,mask,voltages)
+#define cgs_get_firmware_info(dev, type, info)	\
+	CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_set_powergating_state(dev, block_type, state)	\
+	CGS_CALL(set_powergating_state, dev, block_type, state)
+#define cgs_set_clockgating_state(dev, block_type, state)	\
+	CGS_CALL(set_clockgating_state, dev, block_type, state)
+
+#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
new file mode 100644
index 0000000..488642f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_LINUX_H
+#define _CGS_LINUX_H
+
+#include "cgs_common.h"
+
+/**
+ * cgs_import_gpu_mem() - Import dmabuf handle
+ * @cgs_device:  opaque device handle
+ * @dmabuf_fd:   DMABuf file descriptor
+ * @handle:      memory handle (output)
+ *
+ * Must be called in the process context that dmabuf_fd belongs to.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
+				    cgs_handle_t *handle);
+
+/**
+ * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
+ * @private_data:  private data provided to cgs_add_irq_source
+ * @src_id:        interrupt source ID
+ * @type:          interrupt type
+ * @enabled:       0 = disable source, non-0 = enable source
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_source_set_func_t)(void *private_data,
+					 unsigned src_id, unsigned type,
+					 int enabled);
+
+/**
+ * cgs_irq_handler_func() - Interrupt handler callback
+ * @private_data:  private data provided to cgs_add_irq_source
+ * @src_id:        interrupt source ID
+ * @iv_entry:      pointer to raw ih ring entry
+ *
+ * This callback runs in interrupt context.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_handler_func_t)(void *private_data,
+				      unsigned src_id, const uint32_t *iv_entry);
+
+/**
+ * cgs_add_irq_source() - Add an IRQ source
+ * @cgs_device:    opaque device handle
+ * @src_id:        interrupt source ID
+ * @num_types:     number of interrupt types that can be independently enabled
+ * @set:           callback function to enable/disable an interrupt type
+ * @handler:       interrupt handler callback
+ * @private_data:  private data to pass to callback functions
+ *
+ * The same IRQ source can be added only once. Adding an IRQ source
+ * indicates ownership of that IRQ source and all its IRQ types.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
+				    unsigned num_types,
+				    cgs_irq_source_set_func_t set,
+				    cgs_irq_handler_func_t handler,
+				    void *private_data);
+
+/**
+ * cgs_irq_get() - Request enabling an IRQ source and type
+ * @cgs_device:  opaque device handle
+ * @src_id:      interrupt source ID
+ * @type:        interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+/**
+ * cgs_irq_put() - Indicate IRQ source is no longer needed
+ * @cgs_device:  opaque device handle
+ * @src_id:      interrupt source ID
+ * @type:        interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources. Even after cgs_irq_put is called, the
+ * IRQ handler may still be called if there are more refecences to
+ * the IRQ source.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+struct cgs_os_ops {
+	cgs_import_gpu_mem_t import_gpu_mem;
+
+	/* IRQ handling */
+	cgs_add_irq_source_t add_irq_source;
+	cgs_irq_get_t irq_get;
+	cgs_irq_put_t irq_put;
+};
+
+#define cgs_import_gpu_mem(dev,dmabuf_fd,handle)		\
+	CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
+#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
+	CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,	\
+		    private_data)
+#define cgs_irq_get(dev,src_id,type)		\
+	CGS_OS_CALL(irq_get,dev,src_id,type)
+#define cgs_irq_put(dev,src_id,type)		\
+	CGS_OS_CALL(irq_put,dev,src_id,type)
+
+#endif /* _CGS_LINUX_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9080daa..888250b 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -52,7 +52,8 @@
 	KGD_ENGINE_MEC1,
 	KGD_ENGINE_MEC2,
 	KGD_ENGINE_RLC,
-	KGD_ENGINE_SDMA,
+	KGD_ENGINE_SDMA1,
+	KGD_ENGINE_SDMA2,
 	KGD_ENGINE_MAX
 };
 
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
new file mode 100644
index 0000000..ee6978b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -0,0 +1,702 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PPTABLE_H
+#define _PPTABLE_H
+
+#pragma pack(1)
+
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
+    UCHAR ucI2cLine;        // as interpreted by DAL I2C
+    UCHAR ucI2cAddress;
+    UCHAR ucFanParameters;  // Fan Control Parameters.
+    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
+    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
+    UCHAR ucReserved;       // ----
+    UCHAR ucFlags;          // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE      0
+#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64      5
+#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
+#define ATOM_PP_THERMALCONTROLLER_RV770     8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+#define ATOM_PP_THERMALCONTROLLER_KONG      10
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
+#define ATOM_PP_THERMALCONTROLLER_LM96163   17
+#define ATOM_PP_THERMALCONTROLLER_CISLANDS  18
+#define ATOM_PP_THERMALCONTROLLER_KAVERI    19
+
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+    UCHAR ucNonClockStateIndex;
+    UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+    USHORT  usTMed;                          // The middle temperature where we change slopes.
+    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+    USHORT  usPWMHigh;                       // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+    ATOM_PPLIB_FANTABLE basicTable;
+    USHORT  usTMax;                          // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_FANTABLE3
+{
+	ATOM_PPLIB_FANTABLE2 basicTable2;
+	UCHAR ucFanControlMode;
+	USHORT usFanPWMMax;
+	USHORT usFanOutputSensitivity;
+} ATOM_PPLIB_FANTABLE3;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+    USHORT  usSize;
+    ULONG   ulMaxEngineClock;   // For Overdrive.
+    ULONG   ulMaxMemoryClock;   // For Overdrive.
+    // Add extra system parameters here, always adjust size to include all fields.
+    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
+    USHORT  usSAMUTableOffset;  //points to ATOM_PPLIB_SAMU_Table
+    USHORT  usPPMTableOffset;   //points to ATOM_PPLIB_PPM_Table
+    USHORT  usACPTableOffset;  //points to ATOM_PPLIB_ACP_Table   
+    /* points to ATOM_PPLIB_POWERTUNE_Table */
+    USHORT  usPowerTuneTableOffset;
+    /* points to ATOM_PPLIB_CLOCK_Voltage_Dependency_Table for sclkVddgfxTable */
+    USHORT  usSclkVddgfxTableOffset;
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
+#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE   0x00040000           // Does the driver supports new CAC voltage table.
+#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY   0x00080000     // Does the driver supports revert GPIO5 polarity.
+#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17   0x00100000     // Does the driver supports thermal2GPIO17.
+#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE   0x00200000   // Does the driver supports VR HOT GPIO Configurable.
+#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION   0x00400000            // Does the driver supports Temp Inversion feature.
+#define ATOM_PP_PLATFORM_CAP_EVV    0x00800000
+#define ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL    0x01000000
+#define ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE      0x02000000
+#define ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC 0x04000000
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+      ATOM_COMMON_TABLE_HEADER sHeader;
+
+      UCHAR ucDataRevision;
+
+      UCHAR ucNumStates;
+      UCHAR ucStateEntrySize;
+      UCHAR ucClockInfoSize;
+      UCHAR ucNonClockSize;
+
+      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+      USHORT usStateArrayOffset;
+
+      // offset from start of this table to array of ASIC-specific structures,
+      // currently ATOM_PPLIB_CLOCK_INFO.
+      USHORT usClockInfoArrayOffset;
+
+      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+      USHORT usNonClockInfoArrayOffset;
+
+      USHORT usBackbiasTime;    // in microseconds
+      USHORT usVoltageTime;     // in microseconds
+      USHORT usTableSize;       //the size of this structure, or the extended structure
+
+      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
+
+      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
+
+      USHORT usBootClockInfoOffset;
+      USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+    ATOM_PPLIB_POWERPLAYTABLE basicTable;
+    UCHAR   ucNumCustomThermalPolicy;
+    USHORT  usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+    USHORT                     usFanTableOffset;
+    USHORT                     usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+    ULONG                      ulGoldenPPID;                    // PPGen use only     
+    ULONG                      ulGoldenRevision;                // PPGen use only
+    USHORT                     usVddcDependencyOnSCLKOffset;
+    USHORT                     usVddciDependencyOnMCLKOffset;
+    USHORT                     usVddcDependencyOnMCLKOffset;
+    USHORT                     usMaxClockVoltageOnDCOffset;
+    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+    USHORT                     usMvddDependencyOnMCLKOffset;  
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+    ULONG                      ulTDPLimit;
+    ULONG                      ulNearTDPLimit;
+    ULONG                      ulSQRampingThreshold;
+    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
+    USHORT                     usTDPODLimit;
+    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+
+//M3 Arb    //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT                      17
+
+#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+    UCHAR   ucMinTemperature;
+    UCHAR   ucMaxTemperature;
+    UCHAR   ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+      USHORT usClassification;
+      UCHAR  ucMinTemperature;
+      UCHAR  ucMaxTemperature;
+      ULONG  ulCapsAndSettings;
+      UCHAR  ucRequiredPower;
+      USHORT usClassification2;
+      ULONG  ulVCLK;
+      ULONG  ulDCLK;
+      UCHAR  ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usUnused1;
+      USHORT usUnused2;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
+      UCHAR  ucLowEngineClockHigh;
+      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
+      UCHAR  ucHighEngineClockHigh;
+      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
+      UCHAR  ucPadding;                   // For proper alignment and size.
+      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
+      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
+      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could 
+      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+      ULONG  ulFlags; 
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0 
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1 
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2 
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3 
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      USHORT usUnused;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      UCHAR  ucPCIEGen;
+      UCHAR  ucUnused1;
+
+      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+      
+      UCHAR  ucPCIEGen;
+      USHORT usPCIELane;
+} ATOM_PPLIB_CI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+      UCHAR  vddcIndex;         //2-bit vddc index;
+      USHORT tdpLimit;
+      //please initalize to 0
+      USHORT rsv1;
+      //please initialize to 0s
+      ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CZ_CLOCK_INFO {
+      UCHAR index;
+      UCHAR rsv[3];
+} ATOM_PPLIB_CZ_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
+      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      UCHAR ucNumDPMLevels;
+      
+      //a index to the array of nonClockInfos
+      UCHAR nonClockInfoIndex;
+      /**
+      * Driver will read the first ucNumDPMLevels in this array
+      */
+      UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+    //how many states we have 
+    UCHAR ucNumEntries;
+    
+    ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+    //how many clock levels we have
+    UCHAR ucNumEntries;
+    
+    //sizeof(ATOM_PPLIB_CLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+    //how many non-clock levels we have. normally should be same as number of states
+    UCHAR ucNumEntries;
+    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+    USHORT usClockLow;
+    UCHAR  ucClockHigh;
+    USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+    USHORT usVddc;
+    USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+union _ATOM_PPLIB_CAC_Leakage_Record
+{
+    struct
+    {
+        USHORT usVddc;          // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
+        ULONG  ulLeakageValue;  // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
+
+    };
+    struct
+     {
+        USHORT usVddc1;
+        USHORT usVddc2;
+        USHORT usVddc3;
+     };
+};
+
+typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+    USHORT usVoltage;
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+    USHORT usEVClkLow;
+    UCHAR  ucEVClkHigh;
+    USHORT usECClkLow;
+    UCHAR  ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+    UCHAR ucNumEntries;
+    VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+    UCHAR  ucVCEClockInfoIndex;
+    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+      UCHAR revid;
+//    VCEClockInfoArray array;
+//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+//    ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+    USHORT usVClkLow;
+    UCHAR  ucVClkHigh;
+    USHORT usDClkLow;
+    UCHAR  ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+    UCHAR ucNumEntries;
+    UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+      UCHAR revid;
+//    UVDClockInfoArray array;
+//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+}ATOM_PPLIB_UVD_Table;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
+{
+      USHORT usVoltage;
+      USHORT usSAMClockLow;
+      UCHAR  ucSAMClockHigh;
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
+    UCHAR numEntries;
+    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_SAMU_Table
+{
+      UCHAR revid;
+      ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_SAMU_Table;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
+{
+      USHORT usVoltage;
+      USHORT usACPClockLow;
+      UCHAR  ucACPClockHigh;
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
+    UCHAR numEntries;
+    ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_ACP_Table
+{
+      UCHAR revid;
+      ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_ACP_Table;
+
+typedef struct _ATOM_PowerTune_Table{
+    USHORT usTDP;
+    USHORT usConfigurableTDP;
+    USHORT usTDC;
+    USHORT usBatteryPowerLimit;
+    USHORT usSmallPowerLimit;
+    USHORT usLowCACLeakage;
+    USHORT usHighCACLeakage;
+}ATOM_PowerTune_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table
+{
+      UCHAR revid;
+      ATOM_PowerTune_Table power_tune_table;
+}ATOM_PPLIB_POWERTUNE_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
+{
+      UCHAR revid;
+      ATOM_PowerTune_Table power_tune_table;
+      USHORT usMaximumPowerDeliveryLimit;
+      USHORT usTjMax;
+      USHORT usReserve[6];
+} ATOM_PPLIB_POWERTUNE_Table_V1;
+
+#define ATOM_PPM_A_A    1
+#define ATOM_PPM_A_I    2
+typedef struct _ATOM_PPLIB_PPM_Table
+{
+      UCHAR  ucRevId;
+      UCHAR  ucPpmDesign;          //A+I or A+A
+      USHORT usCpuCoreNumber;
+      ULONG  ulPlatformTDP;
+      ULONG  ulSmallACPlatformTDP;
+      ULONG  ulPlatformTDC;
+      ULONG  ulSmallACPlatformTDC;
+      ULONG  ulApuTDP;
+      ULONG  ulDGpuTDP;  
+      ULONG  ulDGpuUlvPower;
+      ULONG  ulTjmax;
+} ATOM_PPLIB_PPM_Table;
+
+#pragma pack()
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
new file mode 100644
index 0000000..65cfacd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VI_STRUCTS_H_
+#define VI_STRUCTS_H_
+
+struct vi_sdma_mqd {
+	uint32_t sdmax_rlcx_rb_cntl;
+	uint32_t sdmax_rlcx_rb_base;
+	uint32_t sdmax_rlcx_rb_base_hi;
+	uint32_t sdmax_rlcx_rb_rptr;
+	uint32_t sdmax_rlcx_rb_wptr;
+	uint32_t sdmax_rlcx_rb_wptr_poll_cntl;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
+	uint32_t sdmax_rlcx_rb_rptr_addr_hi;
+	uint32_t sdmax_rlcx_rb_rptr_addr_lo;
+	uint32_t sdmax_rlcx_ib_cntl;
+	uint32_t sdmax_rlcx_ib_rptr;
+	uint32_t sdmax_rlcx_ib_offset;
+	uint32_t sdmax_rlcx_ib_base_lo;
+	uint32_t sdmax_rlcx_ib_base_hi;
+	uint32_t sdmax_rlcx_ib_size;
+	uint32_t sdmax_rlcx_skip_cntl;
+	uint32_t sdmax_rlcx_context_status;
+	uint32_t sdmax_rlcx_doorbell;
+	uint32_t sdmax_rlcx_virtual_addr;
+	uint32_t sdmax_rlcx_ape1_cntl;
+	uint32_t sdmax_rlcx_doorbell_log;
+	uint32_t reserved_22;
+	uint32_t reserved_23;
+	uint32_t reserved_24;
+	uint32_t reserved_25;
+	uint32_t reserved_26;
+	uint32_t reserved_27;
+	uint32_t reserved_28;
+	uint32_t reserved_29;
+	uint32_t reserved_30;
+	uint32_t reserved_31;
+	uint32_t reserved_32;
+	uint32_t reserved_33;
+	uint32_t reserved_34;
+	uint32_t reserved_35;
+	uint32_t reserved_36;
+	uint32_t reserved_37;
+	uint32_t reserved_38;
+	uint32_t reserved_39;
+	uint32_t reserved_40;
+	uint32_t reserved_41;
+	uint32_t reserved_42;
+	uint32_t reserved_43;
+	uint32_t reserved_44;
+	uint32_t reserved_45;
+	uint32_t reserved_46;
+	uint32_t reserved_47;
+	uint32_t reserved_48;
+	uint32_t reserved_49;
+	uint32_t reserved_50;
+	uint32_t reserved_51;
+	uint32_t reserved_52;
+	uint32_t reserved_53;
+	uint32_t reserved_54;
+	uint32_t reserved_55;
+	uint32_t reserved_56;
+	uint32_t reserved_57;
+	uint32_t reserved_58;
+	uint32_t reserved_59;
+	uint32_t reserved_60;
+	uint32_t reserved_61;
+	uint32_t reserved_62;
+	uint32_t reserved_63;
+	uint32_t reserved_64;
+	uint32_t reserved_65;
+	uint32_t reserved_66;
+	uint32_t reserved_67;
+	uint32_t reserved_68;
+	uint32_t reserved_69;
+	uint32_t reserved_70;
+	uint32_t reserved_71;
+	uint32_t reserved_72;
+	uint32_t reserved_73;
+	uint32_t reserved_74;
+	uint32_t reserved_75;
+	uint32_t reserved_76;
+	uint32_t reserved_77;
+	uint32_t reserved_78;
+	uint32_t reserved_79;
+	uint32_t reserved_80;
+	uint32_t reserved_81;
+	uint32_t reserved_82;
+	uint32_t reserved_83;
+	uint32_t reserved_84;
+	uint32_t reserved_85;
+	uint32_t reserved_86;
+	uint32_t reserved_87;
+	uint32_t reserved_88;
+	uint32_t reserved_89;
+	uint32_t reserved_90;
+	uint32_t reserved_91;
+	uint32_t reserved_92;
+	uint32_t reserved_93;
+	uint32_t reserved_94;
+	uint32_t reserved_95;
+	uint32_t reserved_96;
+	uint32_t reserved_97;
+	uint32_t reserved_98;
+	uint32_t reserved_99;
+	uint32_t reserved_100;
+	uint32_t reserved_101;
+	uint32_t reserved_102;
+	uint32_t reserved_103;
+	uint32_t reserved_104;
+	uint32_t reserved_105;
+	uint32_t reserved_106;
+	uint32_t reserved_107;
+	uint32_t reserved_108;
+	uint32_t reserved_109;
+	uint32_t reserved_110;
+	uint32_t reserved_111;
+	uint32_t reserved_112;
+	uint32_t reserved_113;
+	uint32_t reserved_114;
+	uint32_t reserved_115;
+	uint32_t reserved_116;
+	uint32_t reserved_117;
+	uint32_t reserved_118;
+	uint32_t reserved_119;
+	uint32_t reserved_120;
+	uint32_t reserved_121;
+	uint32_t reserved_122;
+	uint32_t reserved_123;
+	uint32_t reserved_124;
+	uint32_t reserved_125;
+	uint32_t reserved_126;
+	uint32_t reserved_127;
+};
+
+struct vi_mqd {
+	uint32_t header;
+	uint32_t compute_dispatch_initiator;
+	uint32_t compute_dim_x;
+	uint32_t compute_dim_y;
+	uint32_t compute_dim_z;
+	uint32_t compute_start_x;
+	uint32_t compute_start_y;
+	uint32_t compute_start_z;
+	uint32_t compute_num_thread_x;
+	uint32_t compute_num_thread_y;
+	uint32_t compute_num_thread_z;
+	uint32_t compute_pipelinestat_enable;
+	uint32_t compute_perfcount_enable;
+	uint32_t compute_pgm_lo;
+	uint32_t compute_pgm_hi;
+	uint32_t compute_tba_lo;
+	uint32_t compute_tba_hi;
+	uint32_t compute_tma_lo;
+	uint32_t compute_tma_hi;
+	uint32_t compute_pgm_rsrc1;
+	uint32_t compute_pgm_rsrc2;
+	uint32_t compute_vmid;
+	uint32_t compute_resource_limits;
+	uint32_t compute_static_thread_mgmt_se0;
+	uint32_t compute_static_thread_mgmt_se1;
+	uint32_t compute_tmpring_size;
+	uint32_t compute_static_thread_mgmt_se2;
+	uint32_t compute_static_thread_mgmt_se3;
+	uint32_t compute_restart_x;
+	uint32_t compute_restart_y;
+	uint32_t compute_restart_z;
+	uint32_t compute_thread_trace_enable;
+	uint32_t compute_misc_reserved;
+	uint32_t compute_dispatch_id;
+	uint32_t compute_threadgroup_id;
+	uint32_t compute_relaunch;
+	uint32_t compute_wave_restore_addr_lo;
+	uint32_t compute_wave_restore_addr_hi;
+	uint32_t compute_wave_restore_control;
+	uint32_t reserved_39;
+	uint32_t reserved_40;
+	uint32_t reserved_41;
+	uint32_t reserved_42;
+	uint32_t reserved_43;
+	uint32_t reserved_44;
+	uint32_t reserved_45;
+	uint32_t reserved_46;
+	uint32_t reserved_47;
+	uint32_t reserved_48;
+	uint32_t reserved_49;
+	uint32_t reserved_50;
+	uint32_t reserved_51;
+	uint32_t reserved_52;
+	uint32_t reserved_53;
+	uint32_t reserved_54;
+	uint32_t reserved_55;
+	uint32_t reserved_56;
+	uint32_t reserved_57;
+	uint32_t reserved_58;
+	uint32_t reserved_59;
+	uint32_t reserved_60;
+	uint32_t reserved_61;
+	uint32_t reserved_62;
+	uint32_t reserved_63;
+	uint32_t reserved_64;
+	uint32_t compute_user_data_0;
+	uint32_t compute_user_data_1;
+	uint32_t compute_user_data_2;
+	uint32_t compute_user_data_3;
+	uint32_t compute_user_data_4;
+	uint32_t compute_user_data_5;
+	uint32_t compute_user_data_6;
+	uint32_t compute_user_data_7;
+	uint32_t compute_user_data_8;
+	uint32_t compute_user_data_9;
+	uint32_t compute_user_data_10;
+	uint32_t compute_user_data_11;
+	uint32_t compute_user_data_12;
+	uint32_t compute_user_data_13;
+	uint32_t compute_user_data_14;
+	uint32_t compute_user_data_15;
+	uint32_t cp_compute_csinvoc_count_lo;
+	uint32_t cp_compute_csinvoc_count_hi;
+	uint32_t reserved_83;
+	uint32_t reserved_84;
+	uint32_t reserved_85;
+	uint32_t cp_mqd_query_time_lo;
+	uint32_t cp_mqd_query_time_hi;
+	uint32_t cp_mqd_connect_start_time_lo;
+	uint32_t cp_mqd_connect_start_time_hi;
+	uint32_t cp_mqd_connect_end_time_lo;
+	uint32_t cp_mqd_connect_end_time_hi;
+	uint32_t cp_mqd_connect_end_wf_count;
+	uint32_t cp_mqd_connect_end_pq_rptr;
+	uint32_t cp_mqd_connect_end_pq_wptr;
+	uint32_t cp_mqd_connect_end_ib_rptr;
+	uint32_t reserved_96;
+	uint32_t reserved_97;
+	uint32_t cp_mqd_save_start_time_lo;
+	uint32_t cp_mqd_save_start_time_hi;
+	uint32_t cp_mqd_save_end_time_lo;
+	uint32_t cp_mqd_save_end_time_hi;
+	uint32_t cp_mqd_restore_start_time_lo;
+	uint32_t cp_mqd_restore_start_time_hi;
+	uint32_t cp_mqd_restore_end_time_lo;
+	uint32_t cp_mqd_restore_end_time_hi;
+	uint32_t reserved_106;
+	uint32_t reserved_107;
+	uint32_t gds_cs_ctxsw_cnt0;
+	uint32_t gds_cs_ctxsw_cnt1;
+	uint32_t gds_cs_ctxsw_cnt2;
+	uint32_t gds_cs_ctxsw_cnt3;
+	uint32_t reserved_112;
+	uint32_t reserved_113;
+	uint32_t cp_pq_exe_status_lo;
+	uint32_t cp_pq_exe_status_hi;
+	uint32_t cp_packet_id_lo;
+	uint32_t cp_packet_id_hi;
+	uint32_t cp_packet_exe_status_lo;
+	uint32_t cp_packet_exe_status_hi;
+	uint32_t gds_save_base_addr_lo;
+	uint32_t gds_save_base_addr_hi;
+	uint32_t gds_save_mask_lo;
+	uint32_t gds_save_mask_hi;
+	uint32_t ctx_save_base_addr_lo;
+	uint32_t ctx_save_base_addr_hi;
+	uint32_t reserved_126;
+	uint32_t reserved_127;
+	uint32_t cp_mqd_base_addr_lo;
+	uint32_t cp_mqd_base_addr_hi;
+	uint32_t cp_hqd_active;
+	uint32_t cp_hqd_vmid;
+	uint32_t cp_hqd_persistent_state;
+	uint32_t cp_hqd_pipe_priority;
+	uint32_t cp_hqd_queue_priority;
+	uint32_t cp_hqd_quantum;
+	uint32_t cp_hqd_pq_base_lo;
+	uint32_t cp_hqd_pq_base_hi;
+	uint32_t cp_hqd_pq_rptr;
+	uint32_t cp_hqd_pq_rptr_report_addr_lo;
+	uint32_t cp_hqd_pq_rptr_report_addr_hi;
+	uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+	uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+	uint32_t cp_hqd_pq_doorbell_control;
+	uint32_t cp_hqd_pq_wptr;
+	uint32_t cp_hqd_pq_control;
+	uint32_t cp_hqd_ib_base_addr_lo;
+	uint32_t cp_hqd_ib_base_addr_hi;
+	uint32_t cp_hqd_ib_rptr;
+	uint32_t cp_hqd_ib_control;
+	uint32_t cp_hqd_iq_timer;
+	uint32_t cp_hqd_iq_rptr;
+	uint32_t cp_hqd_dequeue_request;
+	uint32_t cp_hqd_dma_offload;
+	uint32_t cp_hqd_sema_cmd;
+	uint32_t cp_hqd_msg_type;
+	uint32_t cp_hqd_atomic0_preop_lo;
+	uint32_t cp_hqd_atomic0_preop_hi;
+	uint32_t cp_hqd_atomic1_preop_lo;
+	uint32_t cp_hqd_atomic1_preop_hi;
+	uint32_t cp_hqd_hq_status0;
+	uint32_t cp_hqd_hq_control0;
+	uint32_t cp_mqd_control;
+	uint32_t cp_hqd_hq_status1;
+	uint32_t cp_hqd_hq_control1;
+	uint32_t cp_hqd_eop_base_addr_lo;
+	uint32_t cp_hqd_eop_base_addr_hi;
+	uint32_t cp_hqd_eop_control;
+	uint32_t cp_hqd_eop_rptr;
+	uint32_t cp_hqd_eop_wptr;
+	uint32_t cp_hqd_eop_done_events;
+	uint32_t cp_hqd_ctx_save_base_addr_lo;
+	uint32_t cp_hqd_ctx_save_base_addr_hi;
+	uint32_t cp_hqd_ctx_save_control;
+	uint32_t cp_hqd_cntl_stack_offset;
+	uint32_t cp_hqd_cntl_stack_size;
+	uint32_t cp_hqd_wg_state_offset;
+	uint32_t cp_hqd_ctx_save_size;
+	uint32_t cp_hqd_gds_resource_state;
+	uint32_t cp_hqd_error;
+	uint32_t cp_hqd_eop_wptr_mem;
+	uint32_t cp_hqd_eop_dones;
+	uint32_t reserved_182;
+	uint32_t reserved_183;
+	uint32_t reserved_184;
+	uint32_t reserved_185;
+	uint32_t reserved_186;
+	uint32_t reserved_187;
+	uint32_t reserved_188;
+	uint32_t reserved_189;
+	uint32_t reserved_190;
+	uint32_t reserved_191;
+	uint32_t iqtimer_pkt_header;
+	uint32_t iqtimer_pkt_dw0;
+	uint32_t iqtimer_pkt_dw1;
+	uint32_t iqtimer_pkt_dw2;
+	uint32_t iqtimer_pkt_dw3;
+	uint32_t iqtimer_pkt_dw4;
+	uint32_t iqtimer_pkt_dw5;
+	uint32_t iqtimer_pkt_dw6;
+	uint32_t iqtimer_pkt_dw7;
+	uint32_t iqtimer_pkt_dw8;
+	uint32_t iqtimer_pkt_dw9;
+	uint32_t iqtimer_pkt_dw10;
+	uint32_t iqtimer_pkt_dw11;
+	uint32_t iqtimer_pkt_dw12;
+	uint32_t iqtimer_pkt_dw13;
+	uint32_t iqtimer_pkt_dw14;
+	uint32_t iqtimer_pkt_dw15;
+	uint32_t iqtimer_pkt_dw16;
+	uint32_t iqtimer_pkt_dw17;
+	uint32_t iqtimer_pkt_dw18;
+	uint32_t iqtimer_pkt_dw19;
+	uint32_t iqtimer_pkt_dw20;
+	uint32_t iqtimer_pkt_dw21;
+	uint32_t iqtimer_pkt_dw22;
+	uint32_t iqtimer_pkt_dw23;
+	uint32_t iqtimer_pkt_dw24;
+	uint32_t iqtimer_pkt_dw25;
+	uint32_t iqtimer_pkt_dw26;
+	uint32_t iqtimer_pkt_dw27;
+	uint32_t iqtimer_pkt_dw28;
+	uint32_t iqtimer_pkt_dw29;
+	uint32_t iqtimer_pkt_dw30;
+	uint32_t iqtimer_pkt_dw31;
+	uint32_t reserved_225;
+	uint32_t reserved_226;
+	uint32_t reserved_227;
+	uint32_t set_resources_header;
+	uint32_t set_resources_dw1;
+	uint32_t set_resources_dw2;
+	uint32_t set_resources_dw3;
+	uint32_t set_resources_dw4;
+	uint32_t set_resources_dw5;
+	uint32_t set_resources_dw6;
+	uint32_t set_resources_dw7;
+	uint32_t reserved_236;
+	uint32_t reserved_237;
+	uint32_t reserved_238;
+	uint32_t reserved_239;
+	uint32_t queue_doorbell_id0;
+	uint32_t queue_doorbell_id1;
+	uint32_t queue_doorbell_id2;
+	uint32_t queue_doorbell_id3;
+	uint32_t queue_doorbell_id4;
+	uint32_t queue_doorbell_id5;
+	uint32_t queue_doorbell_id6;
+	uint32_t queue_doorbell_id7;
+	uint32_t queue_doorbell_id8;
+	uint32_t queue_doorbell_id9;
+	uint32_t queue_doorbell_id10;
+	uint32_t queue_doorbell_id11;
+	uint32_t queue_doorbell_id12;
+	uint32_t queue_doorbell_id13;
+	uint32_t queue_doorbell_id14;
+	uint32_t queue_doorbell_id15;
+};
+
+#endif /* VI_STRUCTS_H_ */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
new file mode 100644
index 0000000..9259f1b
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity);
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+
+/* Initialize a given run queue struct */
+static void amd_sched_rq_init(struct amd_sched_rq *rq)
+{
+	spin_lock_init(&rq->lock);
+	INIT_LIST_HEAD(&rq->entities);
+	rq->current_entity = NULL;
+}
+
+static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
+				    struct amd_sched_entity *entity)
+{
+	spin_lock(&rq->lock);
+	list_add_tail(&entity->list, &rq->entities);
+	spin_unlock(&rq->lock);
+}
+
+static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
+				       struct amd_sched_entity *entity)
+{
+	spin_lock(&rq->lock);
+	list_del_init(&entity->list);
+	if (rq->current_entity == entity)
+		rq->current_entity = NULL;
+	spin_unlock(&rq->lock);
+}
+
+/**
+ * Select next job from a specified run queue with round robin policy.
+ * Return NULL if nothing available.
+ */
+static struct amd_sched_job *
+amd_sched_rq_select_job(struct amd_sched_rq *rq)
+{
+	struct amd_sched_entity *entity;
+	struct amd_sched_job *job;
+
+	spin_lock(&rq->lock);
+
+	entity = rq->current_entity;
+	if (entity) {
+		list_for_each_entry_continue(entity, &rq->entities, list) {
+			job = amd_sched_entity_pop_job(entity);
+			if (job) {
+				rq->current_entity = entity;
+				spin_unlock(&rq->lock);
+				return job;
+			}
+		}
+	}
+
+	list_for_each_entry(entity, &rq->entities, list) {
+
+		job = amd_sched_entity_pop_job(entity);
+		if (job) {
+			rq->current_entity = entity;
+			spin_unlock(&rq->lock);
+			return job;
+		}
+
+		if (entity == rq->current_entity)
+			break;
+	}
+
+	spin_unlock(&rq->lock);
+
+	return NULL;
+}
+
+/**
+ * Init a context entity used by scheduler when submit to HW ring.
+ *
+ * @sched	The pointer to the scheduler
+ * @entity	The pointer to a valid amd_sched_entity
+ * @rq		The run queue this entity belongs
+ * @kernel	If this is an entity for the kernel
+ * @jobs	The max number of jobs in the job queue
+ *
+ * return 0 if succeed. negative error code on failure
+*/
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+			  struct amd_sched_entity *entity,
+			  struct amd_sched_rq *rq,
+			  uint32_t jobs)
+{
+	if (!(sched && entity && rq))
+		return -EINVAL;
+
+	memset(entity, 0, sizeof(struct amd_sched_entity));
+	entity->belongto_rq = rq;
+	entity->scheduler = sched;
+	entity->fence_context = fence_context_alloc(1);
+	if(kfifo_alloc(&entity->job_queue,
+		       jobs * sizeof(void *),
+		       GFP_KERNEL))
+		return -EINVAL;
+
+	spin_lock_init(&entity->queue_lock);
+	atomic_set(&entity->fence_seq, 0);
+
+	/* Add the entity to the run queue */
+	amd_sched_rq_add_entity(rq, entity);
+	return 0;
+}
+
+/**
+ * Query if entity is initialized
+ *
+ * @sched       Pointer to scheduler instance
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * return true if entity is initialized, false otherwise
+*/
+static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
+					    struct amd_sched_entity *entity)
+{
+	return entity->scheduler == sched &&
+		entity->belongto_rq != NULL;
+}
+
+/**
+ * Check if entity is idle
+ *
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * Return true if entity don't has any unscheduled jobs.
+ */
+static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+{
+	rmb();
+	if (kfifo_is_empty(&entity->job_queue))
+		return true;
+
+	return false;
+}
+
+/**
+ * Destroy a context entity
+ *
+ * @sched       Pointer to scheduler instance
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * Cleanup and free the allocated resources.
+ */
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+			   struct amd_sched_entity *entity)
+{
+	struct amd_sched_rq *rq = entity->belongto_rq;
+
+	if (!amd_sched_entity_is_initialized(sched, entity))
+		return;
+
+	/**
+	 * The client will not queue more IBs during this fini, consume existing
+	 * queued IBs
+	*/
+	wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
+
+	amd_sched_rq_remove_entity(rq, entity);
+	kfifo_free(&entity->job_queue);
+}
+
+static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_entity *entity =
+		container_of(cb, struct amd_sched_entity, cb);
+	entity->dependency = NULL;
+	fence_put(f);
+	amd_sched_wakeup(entity->scheduler);
+}
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+{
+	struct amd_gpu_scheduler *sched = entity->scheduler;
+	struct amd_sched_job *job;
+
+	if (ACCESS_ONCE(entity->dependency))
+		return NULL;
+
+	if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+		return NULL;
+
+	while ((entity->dependency = sched->ops->dependency(job))) {
+
+		if (fence_add_callback(entity->dependency, &entity->cb,
+				       amd_sched_entity_wakeup))
+			fence_put(entity->dependency);
+		else
+			return NULL;
+	}
+
+	return job;
+}
+
+/**
+ * Helper to submit a job to the job queue
+ *
+ * @job		The pointer to job required to submit
+ *
+ * Returns true if we could submit the job.
+ */
+static bool amd_sched_entity_in(struct amd_sched_job *job)
+{
+	struct amd_sched_entity *entity = job->s_entity;
+	bool added, first = false;
+
+	spin_lock(&entity->queue_lock);
+	added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
+
+	if (added && kfifo_len(&entity->job_queue) == sizeof(job))
+		first = true;
+
+	spin_unlock(&entity->queue_lock);
+
+	/* first job wakes up scheduler */
+	if (first)
+		amd_sched_wakeup(job->sched);
+
+	return added;
+}
+
+/**
+ * Submit a job to the job queue
+ *
+ * @job		The pointer to job required to submit
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+{
+	struct amd_sched_entity *entity = sched_job->s_entity;
+	struct amd_sched_fence *fence = amd_sched_fence_create(
+		entity, sched_job->owner);
+
+	if (!fence)
+		return -ENOMEM;
+
+	fence_get(&fence->base);
+	sched_job->s_fence = fence;
+
+	wait_event(entity->scheduler->job_scheduled,
+		   amd_sched_entity_in(sched_job));
+
+	return 0;
+}
+
+/**
+ * Return ture if we can push more jobs to the hw.
+ */
+static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+{
+	return atomic_read(&sched->hw_rq_count) <
+		sched->hw_submission_limit;
+}
+
+/**
+ * Wake up the scheduler when it is ready
+ */
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+{
+	if (amd_sched_ready(sched))
+		wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * Select next to run
+*/
+static struct amd_sched_job *
+amd_sched_select_job(struct amd_gpu_scheduler *sched)
+{
+	struct amd_sched_job *job;
+
+	if (!amd_sched_ready(sched))
+		return NULL;
+
+	/* Kernel run queue has higher priority than normal run queue*/
+	job = amd_sched_rq_select_job(&sched->kernel_rq);
+	if (job == NULL)
+		job = amd_sched_rq_select_job(&sched->sched_rq);
+
+	return job;
+}
+
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_job *sched_job =
+		container_of(cb, struct amd_sched_job, cb);
+	struct amd_gpu_scheduler *sched;
+
+	sched = sched_job->sched;
+	amd_sched_fence_signal(sched_job->s_fence);
+	atomic_dec(&sched->hw_rq_count);
+	fence_put(&sched_job->s_fence->base);
+	sched->ops->process_job(sched_job);
+	wake_up_interruptible(&sched->wake_up_worker);
+}
+
+static int amd_sched_main(void *param)
+{
+	struct sched_param sparam = {.sched_priority = 1};
+	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+	int r, count;
+
+	sched_setscheduler(current, SCHED_FIFO, &sparam);
+
+	while (!kthread_should_stop()) {
+		struct amd_sched_entity *entity;
+		struct amd_sched_job *job;
+		struct fence *fence;
+
+		wait_event_interruptible(sched->wake_up_worker,
+			kthread_should_stop() ||
+			(job = amd_sched_select_job(sched)));
+
+		if (!job)
+			continue;
+
+		entity = job->s_entity;
+		atomic_inc(&sched->hw_rq_count);
+		fence = sched->ops->run_job(job);
+		if (fence) {
+			r = fence_add_callback(fence, &job->cb,
+					       amd_sched_process_job);
+			if (r == -ENOENT)
+				amd_sched_process_job(fence, &job->cb);
+			else if (r)
+				DRM_ERROR("fence add callback failed (%d)\n", r);
+			fence_put(fence);
+		}
+
+		count = kfifo_out(&entity->job_queue, &job, sizeof(job));
+		WARN_ON(count != sizeof(job));
+		wake_up(&sched->job_scheduled);
+	}
+	return 0;
+}
+
+/**
+ * Create a gpu scheduler
+ *
+ * @ops			The backend operations for this scheduler.
+ * @ring		The the ring id for the scheduler.
+ * @hw_submissions	Number of hw submissions to do.
+ *
+ * Return the pointer to scheduler for success, otherwise return NULL
+*/
+struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
+					   unsigned ring, unsigned hw_submission,
+					   void *priv)
+{
+	struct amd_gpu_scheduler *sched;
+
+	sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
+	if (!sched)
+		return NULL;
+
+	sched->ops = ops;
+	sched->ring_id = ring;
+	sched->hw_submission_limit = hw_submission;
+	sched->priv = priv;
+	snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
+	amd_sched_rq_init(&sched->sched_rq);
+	amd_sched_rq_init(&sched->kernel_rq);
+
+	init_waitqueue_head(&sched->wake_up_worker);
+	init_waitqueue_head(&sched->job_scheduled);
+	atomic_set(&sched->hw_rq_count, 0);
+	/* Each scheduler will run on a seperate kernel thread */
+	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+	if (IS_ERR(sched->thread)) {
+		DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
+		kfree(sched);
+		return NULL;
+	}
+
+	return sched;
+}
+
+/**
+ * Destroy a gpu scheduler
+ *
+ * @sched	The pointer to the scheduler
+ *
+ * return 0 if succeed. -1 if failed.
+ */
+int amd_sched_destroy(struct amd_gpu_scheduler *sched)
+{
+	kthread_stop(sched->thread);
+	kfree(sched);
+	return  0;
+}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
new file mode 100644
index 0000000..2af0e4d
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _GPU_SCHEDULER_H_
+#define _GPU_SCHEDULER_H_
+
+#include <linux/kfifo.h>
+#include <linux/fence.h>
+
+struct amd_gpu_scheduler;
+struct amd_sched_rq;
+
+/**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their 
+ * job queues to corresponding hardware ring based on scheduling
+ * policy.
+*/
+struct amd_sched_entity {
+	struct list_head		list;
+	struct amd_sched_rq		*belongto_rq;
+	atomic_t			fence_seq;
+	/* the job_queue maintains the jobs submitted by clients */
+	struct kfifo                    job_queue;
+	spinlock_t			queue_lock;
+	struct amd_gpu_scheduler	*scheduler;
+	uint64_t                        fence_context;
+	struct fence			*dependency;
+	struct fence_cb			cb;
+};
+
+/**
+ * Run queue is a set of entities scheduling command submissions for
+ * one specific ring. It implements the scheduling policy that selects
+ * the next entity to emit commands from.
+*/
+struct amd_sched_rq {
+	spinlock_t		lock;
+	struct list_head	entities;
+	struct amd_sched_entity	*current_entity;
+};
+
+struct amd_sched_fence {
+	struct fence                    base;
+	struct amd_gpu_scheduler	*scheduler;
+	spinlock_t			lock;
+	void                            *owner;
+};
+
+struct amd_sched_job {
+	struct fence_cb                 cb;
+	struct amd_gpu_scheduler        *sched;
+	struct amd_sched_entity         *s_entity;
+	struct amd_sched_fence          *s_fence;
+	void		                *owner;
+};
+
+extern const struct fence_ops amd_sched_fence_ops;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+{
+	struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+
+	if (__f->base.ops == &amd_sched_fence_ops)
+		return __f;
+
+	return NULL;
+}
+
+/**
+ * Define the backend operations called by the scheduler,
+ * these functions should be implemented in driver side
+*/
+struct amd_sched_backend_ops {
+	struct fence *(*dependency)(struct amd_sched_job *job);
+	struct fence *(*run_job)(struct amd_sched_job *job);
+	void (*process_job)(struct amd_sched_job *job);
+};
+
+/**
+ * One scheduler is implemented for each hardware ring
+*/
+struct amd_gpu_scheduler {
+	struct task_struct		*thread;
+	struct amd_sched_rq		sched_rq;
+	struct amd_sched_rq		kernel_rq;
+	atomic_t			hw_rq_count;
+	struct amd_sched_backend_ops	*ops;
+	uint32_t			ring_id;
+	wait_queue_head_t		wake_up_worker;
+	wait_queue_head_t		job_scheduled;
+	uint32_t                        hw_submission_limit;
+	char                            name[20];
+	void                            *priv;
+};
+
+struct amd_gpu_scheduler *
+amd_sched_create(struct amd_sched_backend_ops *ops,
+		 uint32_t ring, uint32_t hw_submission, void *priv);
+int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+			  struct amd_sched_entity *entity,
+			  struct amd_sched_rq *rq,
+			  uint32_t jobs);
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+			   struct amd_sched_entity *entity);
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+struct amd_sched_fence *amd_sched_fence_create(
+	struct amd_sched_entity *s_entity, void *owner);
+void amd_sched_fence_signal(struct amd_sched_fence *fence);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
new file mode 100644
index 0000000..e62c379
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+{
+	struct amd_sched_fence *fence = NULL;
+	unsigned seq;
+
+	fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+	if (fence == NULL)
+		return NULL;
+	fence->owner = owner;
+	fence->scheduler = s_entity->scheduler;
+	spin_lock_init(&fence->lock);
+
+	seq = atomic_inc_return(&s_entity->fence_seq);
+	fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
+		   s_entity->fence_context, seq);
+
+	return fence;
+}
+
+void amd_sched_fence_signal(struct amd_sched_fence *fence)
+{
+	int ret = fence_signal(&fence->base);
+	if (!ret)
+		FENCE_TRACE(&fence->base, "signaled from irq context\n");
+	else
+		FENCE_TRACE(&fence->base, "was already signaled\n");
+}
+
+static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+{
+	return "amd_sched";
+}
+
+static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+{
+	struct amd_sched_fence *fence = to_amd_sched_fence(f);
+	return (const char *)fence->scheduler->name;
+}
+
+static bool amd_sched_fence_enable_signaling(struct fence *f)
+{
+	return true;
+}
+
+const struct fence_ops amd_sched_fence_ops = {
+	.get_driver_name = amd_sched_fence_get_driver_name,
+	.get_timeline_name = amd_sched_fence_get_timeline_name,
+	.enable_signaling = amd_sched_fence_enable_signaling,
+	.signaled = NULL,
+	.wait = fence_default_wait,
+	.release = NULL,
+};
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7838e73..7d03c51 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -22,9 +22,9 @@
 	.owner		= THIS_MODULE,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_pan_display	= drm_fb_helper_pan_display,
 	.fb_blank	= drm_fb_helper_blank,
 	.fb_setcmap	= drm_fb_helper_setcmap,
@@ -80,18 +80,12 @@
 	if (IS_ERR(dfb))
 		return PTR_ERR(dfb);
 
-	info = framebuffer_alloc(0, dev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(fbh);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto err_fballoc;
 	}
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto err_fbcmap;
-	}
-
 	strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
 	info->par = fbh;
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
@@ -101,7 +95,7 @@
 	info->screen_size = obj->obj.size;
 	info->screen_base = ptr;
 	fbh->fb = &dfb->fb;
-	fbh->fbdev = info;
+
 	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
 	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
 
@@ -111,8 +105,6 @@
 
 	return 0;
 
- err_fbcmap:
-	framebuffer_release(info);
  err_fballoc:
 	dfb->fb.funcs->destroy(&dfb->fb);
 	return ret;
@@ -171,6 +163,7 @@
 
 	return 0;
  err_fb_setup:
+	drm_fb_helper_release_fbi(fbh);
 	drm_fb_helper_fini(fbh);
  err_fb_helper:
 	priv->fbdev = NULL;
@@ -191,14 +184,8 @@
 	struct drm_fb_helper *fbh = priv->fbdev;
 
 	if (fbh) {
-		struct fb_info *info = fbh->fbdev;
-
-		if (info) {
-			unregister_framebuffer(info);
-			if (info->cmap.len)
-				fb_dealloc_cmap(&info->cmap);
-			framebuffer_release(info);
-		}
+		drm_fb_helper_unregister_fbi(fbh);
+		drm_fb_helper_release_fbi(fbh);
 
 		drm_fb_helper_fini(fbh);
 
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index ff68eef..f31db28 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -125,7 +125,7 @@
 			 const struct fb_fillrect *rect)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -134,7 +134,7 @@
 			 const struct fb_copyarea *area)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	ast_dirty_update(afbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -143,7 +143,7 @@
 			  const struct fb_image *image)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	ast_dirty_update(afbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -193,7 +193,6 @@
 	struct drm_framebuffer *fb;
 	struct fb_info *info;
 	int size, ret;
-	struct device *device = &dev->pdev->dev;
 	void *sysram;
 	struct drm_gem_object *gobj = NULL;
 	struct ast_bo *bo = NULL;
@@ -217,40 +216,28 @@
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
-		goto out;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
+		goto err_free_vram;
 	}
 	info->par = afbdev;
 
 	ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
 	if (ret)
-		goto out;
+		goto err_release_fbi;
 
 	afbdev->sysram = sysram;
 	afbdev->size = size;
 
 	fb = &afbdev->afb.base;
 	afbdev->helper.fb = fb;
-	afbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "astdrmfb");
 
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &astfb_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out;
-	}
 	info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
 	info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
 
@@ -266,7 +253,11 @@
 		      fb->width, fb->height);
 
 	return 0;
-out:
+
+err_release_fbi:
+	drm_fb_helper_release_fbi(helper);
+err_free_vram:
+	vfree(afbdev->sysram);
 	return ret;
 }
 
@@ -297,15 +288,10 @@
 static void ast_fbdev_destroy(struct drm_device *dev,
 			      struct ast_fbdev *afbdev)
 {
-	struct fb_info *info;
 	struct ast_framebuffer *afb = &afbdev->afb;
-	if (afbdev->helper.fbdev) {
-		info = afbdev->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+
+	drm_fb_helper_unregister_fbi(&afbdev->helper);
+	drm_fb_helper_release_fbi(&afbdev->helper);
 
 	if (afb->obj) {
 		drm_gem_object_unreference_unlocked(afb->obj);
@@ -377,5 +363,5 @@
 	if (!ast->fbdev)
 		return;
 
-	fb_set_suspend(ast->fbdev->helper.fbdev, state);
+	drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
 }
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 035dacc..838217f 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -571,24 +571,18 @@
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct ast_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_ast_bo(obj);
 	*offset = ast_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
 
 }
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 5ae5c69..9f6e234 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -239,7 +239,8 @@
 	return atmel_hlcdc_plane_prepare_disc_area(s);
 }
 
-static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
+					  struct drm_crtc_state *old_s)
 {
 	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
 
@@ -253,7 +254,8 @@
 	}
 }
 
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_s)
 {
 	/* TODO: write common plane control register if available */
 }
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index ef6182b..8bc62ec 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -29,6 +29,115 @@
 
 #define ATMEL_HLCDC_LAYER_IRQS_OFFSET		8
 
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 5,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 1280,
+	.max_height = 860,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
+	.layers = atmel_hlcdc_at91sam9n12_layers,
+};
+
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 5,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+			.disc_pos = 5,
+			.disc_size = 6,
+		},
+	},
+	{
+		.name = "overlay1",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x100,
+		.id = 1,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "high-end-overlay",
+		.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+		.regs_offset = 0x280,
+		.id = 2,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 17,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.memsize = 4,
+			.xstride = { 5, 7 },
+			.pstride = { 6, 8 },
+			.default_color = 9,
+			.chroma_key = 10,
+			.chroma_key_mask = 11,
+			.general_config = 12,
+			.csc = 14,
+		},
+	},
+	{
+		.name = "cursor",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x340,
+		.id = 3,
+		.type = ATMEL_HLCDC_CURSOR_LAYER,
+		.nconfigs = 10,
+		.max_width = 128,
+		.max_height = 128,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 800,
+	.max_height = 600,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
+	.layers = atmel_hlcdc_at91sam9x5_layers,
+};
+
 static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
 	{
 		.name = "base",
@@ -132,11 +241,105 @@
 	.layers = atmel_hlcdc_sama5d3_layers,
 };
 
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 7,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+			.disc_pos = 5,
+			.disc_size = 6,
+		},
+	},
+	{
+		.name = "overlay1",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x140,
+		.id = 1,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "overlay2",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x240,
+		.id = 2,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "high-end-overlay",
+		.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+		.regs_offset = 0x340,
+		.id = 3,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 42,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.memsize = 4,
+			.xstride = { 5, 7 },
+			.pstride = { 6, 8 },
+			.default_color = 9,
+			.chroma_key = 10,
+			.chroma_key_mask = 11,
+			.general_config = 12,
+			.csc = 14,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 2048,
+	.max_height = 2048,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
+	.layers = atmel_hlcdc_sama5d4_layers,
+};
 static const struct of_device_id atmel_hlcdc_of_match[] = {
 	{
+		.compatible = "atmel,at91sam9n12-hlcdc",
+		.data = &atmel_hlcdc_dc_at91sam9n12,
+	},
+	{
+		.compatible = "atmel,at91sam9x5-hlcdc",
+		.data = &atmel_hlcdc_dc_at91sam9x5,
+	},
+	{
 		.compatible = "atmel,sama5d3-hlcdc",
 		.data = &atmel_hlcdc_dc_sama5d3,
 	},
+	{
+		.compatible = "atmel,sama5d4-hlcdc",
+		.data = &atmel_hlcdc_dc_sama5d4,
+	},
 	{ /* sentinel */ },
 };
 
@@ -485,7 +688,9 @@
 };
 
 static struct drm_driver atmel_hlcdc_dc_driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+			   DRIVER_MODESET | DRIVER_PRIME |
+			   DRIVER_ATOMIC,
 	.preclose = atmel_hlcdc_dc_preclose,
 	.lastclose = atmel_hlcdc_dc_lastclose,
 	.irq_handler = atmel_hlcdc_dc_irq_handler,
@@ -497,6 +702,15 @@
 	.disable_vblank = atmel_hlcdc_dc_disable_vblank,
 	.gem_free_object = drm_gem_cma_free_object,
 	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap = drm_gem_cma_prime_mmap,
 	.dumb_create = drm_gem_cma_dumb_create,
 	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
 	.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 9c45130..067e4c1 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -126,12 +126,16 @@
 
 	if (info->num_bus_formats) {
 		switch (info->bus_formats[0]) {
+		case MEDIA_BUS_FMT_RGB565_1X16:
+			cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8;
+			break;
 		case MEDIA_BUS_FMT_RGB666_1X18:
 			cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
 			break;
 		case MEDIA_BUS_FMT_RGB888_1X24:
 			cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
 			break;
+		case MEDIA_BUS_FMT_RGB444_1X12:
 		default:
 			break;
 		}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 98837bd..7f1a360 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -109,7 +109,7 @@
 
 	if (bochs->fb.initialized) {
 		console_lock();
-		fb_set_suspend(bochs->fb.helper.fbdev, 1);
+		drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
 		console_unlock();
 	}
 
@@ -126,7 +126,7 @@
 
 	if (bochs->fb.initialized) {
 		console_lock();
-		fb_set_suspend(bochs->fb.helper.fbdev, 0);
+		drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
 		console_unlock();
 	}
 
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 976d979..09a0637 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -24,9 +24,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -56,11 +56,9 @@
 {
 	struct bochs_device *bochs =
 		container_of(helper, struct bochs_device, fb.helper);
-	struct drm_device *dev = bochs->dev;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	struct drm_gem_object *gobj = NULL;
 	struct bochs_bo *bo = NULL;
 	int size, ret;
@@ -106,22 +104,23 @@
 	ttm_bo_unreserve(&bo->bo);
 
 	/* init fb device */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = &bochs->fb.helper;
 
 	ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
-	if (ret)
+	if (ret) {
+		drm_fb_helper_release_fbi(helper);
 		return ret;
+	}
 
 	bochs->fb.size = size;
 
 	/* setup helper */
 	fb = &bochs->fb.gfb.base;
 	bochs->fb.helper.fb = fb;
-	bochs->fb.helper.fbdev = info;
 
 	strcpy(info->fix.id, "bochsdrmfb");
 
@@ -139,30 +138,17 @@
 	info->fix.smem_start = 0;
 	info->fix.smem_len = size;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		return -ENOMEM;
-	}
-
 	return 0;
 }
 
 static int bochs_fbdev_destroy(struct bochs_device *bochs)
 {
 	struct bochs_framebuffer *gfb = &bochs->fb.gfb;
-	struct fb_info *info;
 
 	DRM_DEBUG_DRIVER("\n");
 
-	if (bochs->fb.helper.fbdev) {
-		info = bochs->fb.helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&bochs->fb.helper);
+	drm_fb_helper_release_fbi(&bochs->fb.helper);
 
 	if (gfb->obj) {
 		drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 66286ff..f69e6bf 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -454,25 +454,17 @@
 			   uint32_t handle, uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct bochs_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_bochs_bo(obj);
 	*offset = bochs_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
 }
 
 /* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index acef322..2de52a5 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -1,24 +1,32 @@
+config DRM_BRIDGE
+	def_bool y
+	depends on DRM
+	help
+	  Bridge registration and lookup framework.
+
+menu "Display Interface Bridges"
+	depends on DRM && DRM_BRIDGE
+
 config DRM_DW_HDMI
 	tristate
-	depends on DRM
 	select DRM_KMS_HELPER
 
-config DRM_PTN3460
-	tristate "PTN3460 DP/LVDS bridge"
-	depends on DRM
+config DRM_NXP_PTN3460
+	tristate "NXP PTN3460 DP/LVDS bridge"
 	depends on OF
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	---help---
-	  ptn3460 eDP-LVDS bridge chip driver.
+	  NXP PTN3460 eDP-LVDS bridge chip driver.
 
-config DRM_PS8622
+config DRM_PARADE_PS8622
 	tristate "Parade eDP/LVDS bridge"
-	depends on DRM
 	depends on OF
 	select DRM_PANEL
 	select DRM_KMS_HELPER
 	select BACKLIGHT_LCD_SUPPORT
 	select BACKLIGHT_CLASS_DEVICE
 	---help---
-	  parade eDP-LVDS bridge chip driver.
+	  Parade eDP-LVDS bridge chip driver.
+
+endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 8dfebd9..e2eef1c 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,5 @@
 ccflags-y := -Iinclude/drm
 
-obj-$(CONFIG_DRM_PS8622) += ps8622.o
-obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
 obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
+obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
+obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index 816d104..0083d4e 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -18,6 +18,7 @@
 #include <linux/hdmi.h>
 #include <linux/mutex.h>
 #include <linux/of_device.h>
+#include <linux/spinlock.h>
 
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
@@ -81,10 +82,6 @@
 };
 
 struct hdmi_vmode {
-	bool mdvi;
-	bool mhsyncpolarity;
-	bool mvsyncpolarity;
-	bool minterlaced;
 	bool mdataenablepolarity;
 
 	unsigned int mpixelclock;
@@ -123,12 +120,20 @@
 	bool phy_enabled;
 	struct drm_display_mode previous_mode;
 
-	struct regmap *regmap;
 	struct i2c_adapter *ddc;
 	void __iomem *regs;
+	bool sink_is_hdmi;
+	bool sink_has_audio;
 
+	struct mutex mutex;		/* for state below and previous_mode */
+	bool disabled;			/* DRM has disabled our bridge */
+
+	spinlock_t audio_lock;
 	struct mutex audio_mutex;
 	unsigned int sample_rate;
+	unsigned int audio_cts;
+	unsigned int audio_n;
+	bool audio_enable;
 	int ratio;
 
 	void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
@@ -335,42 +340,76 @@
 }
 
 static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
-				     unsigned long pixel_clk)
+	unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio)
 {
-	unsigned int clk_n, clk_cts;
+	unsigned int n, cts;
 
-	clk_n = hdmi_compute_n(hdmi->sample_rate, pixel_clk,
-			       hdmi->ratio);
-	clk_cts = hdmi_compute_cts(hdmi->sample_rate, pixel_clk,
-				   hdmi->ratio);
-
-	if (!clk_cts) {
-		dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
-			__func__, pixel_clk);
-		return;
+	n = hdmi_compute_n(sample_rate, pixel_clk, ratio);
+	cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio);
+	if (!cts) {
+		dev_err(hdmi->dev,
+			"%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n",
+			__func__, pixel_clk, sample_rate);
 	}
 
-	dev_dbg(hdmi->dev, "%s: samplerate=%d  ratio=%d  pixelclk=%lu  N=%d cts=%d\n",
-		__func__, hdmi->sample_rate, hdmi->ratio,
-		pixel_clk, clk_n, clk_cts);
+	dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n",
+		__func__, sample_rate, ratio, pixel_clk, n, cts);
 
-	hdmi_set_cts_n(hdmi, clk_cts, clk_n);
+	spin_lock_irq(&hdmi->audio_lock);
+	hdmi->audio_n = n;
+	hdmi->audio_cts = cts;
+	hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0);
+	spin_unlock_irq(&hdmi->audio_lock);
 }
 
 static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
 {
 	mutex_lock(&hdmi->audio_mutex);
-	hdmi_set_clk_regenerator(hdmi, 74250000);
+	hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate,
+				 hdmi->ratio);
 	mutex_unlock(&hdmi->audio_mutex);
 }
 
 static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
 {
 	mutex_lock(&hdmi->audio_mutex);
-	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
+	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+				 hdmi->sample_rate, hdmi->ratio);
 	mutex_unlock(&hdmi->audio_mutex);
 }
 
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
+{
+	mutex_lock(&hdmi->audio_mutex);
+	hdmi->sample_rate = rate;
+	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+				 hdmi->sample_rate, hdmi->ratio);
+	mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
+
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->audio_lock, flags);
+	hdmi->audio_enable = true;
+	hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
+	spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_enable);
+
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->audio_lock, flags);
+	hdmi->audio_enable = false;
+	hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0);
+	spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable);
+
 /*
  * this submodule is responsible for the video data synchronization.
  * for example, for RGB 4:4:4 input, the data map is defined as
@@ -701,9 +740,9 @@
 	return 0;
 }
 
-static void dw_hdmi_phy_enable_power(struct dw_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable)
 {
-	hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+	hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0,
 			 HDMI_PHY_CONF0_PDZ_OFFSET,
 			 HDMI_PHY_CONF0_PDZ_MASK);
 }
@@ -753,12 +792,12 @@
 static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
 			      unsigned char res, int cscon)
 {
-	unsigned res_idx, i;
+	unsigned res_idx;
 	u8 val, msec;
-	const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data;
-	const struct dw_hdmi_mpll_config *mpll_config = plat_data->mpll_cfg;
-	const struct dw_hdmi_curr_ctrl *curr_ctrl = plat_data->cur_ctr;
-	const struct dw_hdmi_phy_config *phy_config = plat_data->phy_config;
+	const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
+	const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg;
+	const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr;
+	const struct dw_hdmi_phy_config *phy_config = pdata->phy_config;
 
 	if (prep)
 		return -EINVAL;
@@ -778,6 +817,30 @@
 		return -EINVAL;
 	}
 
+	/* PLL/MPLL Cfg - always match on final entry */
+	for (; mpll_config->mpixelclock != ~0UL; mpll_config++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    mpll_config->mpixelclock)
+			break;
+
+	for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    curr_ctrl->mpixelclock)
+			break;
+
+	for (; phy_config->mpixelclock != ~0UL; phy_config++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    phy_config->mpixelclock)
+			break;
+
+	if (mpll_config->mpixelclock == ~0UL ||
+	    curr_ctrl->mpixelclock == ~0UL ||
+	    phy_config->mpixelclock == ~0UL) {
+		dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
+			hdmi->hdmi_data.video_mode.mpixelclock);
+		return -EINVAL;
+	}
+
 	/* Enable csc path */
 	if (cscon)
 		val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
@@ -803,48 +866,23 @@
 		    HDMI_PHY_I2CM_SLAVE_ADDR);
 	hdmi_phy_test_clear(hdmi, 0);
 
-	/* PLL/MPLL Cfg - always match on final entry */
-	for (i = 0; mpll_config[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    mpll_config[i].mpixelclock)
-			break;
-
-	hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06);
-	hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15);
-
-	for (i = 0; curr_ctrl[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    curr_ctrl[i].mpixelclock)
-			break;
-
-	if (curr_ctrl[i].mpixelclock == (~0UL)) {
-		dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
-			hdmi->hdmi_data.video_mode.mpixelclock);
-		return -EINVAL;
-	}
+	hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].cpce, 0x06);
+	hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].gmp, 0x15);
 
 	/* CURRCTRL */
-	hdmi_phy_i2c_write(hdmi, curr_ctrl[i].curr[res_idx], 0x10);
+	hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[res_idx], 0x10);
 
 	hdmi_phy_i2c_write(hdmi, 0x0000, 0x13);  /* PLLPHBYCTRL */
 	hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
 
-	for (i = 0; phy_config[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    phy_config[i].mpixelclock)
-			break;
-
-	/* RESISTANCE TERM 133Ohm Cfg */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].term, 0x19);  /* TXTERM */
-	/* PREEMP Cgf 0.00 */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
-	/* TX/CK LVL 10 */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].vlev_ctr, 0x0E); /* VLEVCTRL */
+	hdmi_phy_i2c_write(hdmi, phy_config->term, 0x19);  /* TXTERM */
+	hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, 0x09); /* CKSYMTXCTRL */
+	hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, 0x0E); /* VLEVCTRL */
 
 	/* REMOVE CLK TERM */
 	hdmi_phy_i2c_write(hdmi, 0x8000, 0x05);  /* CKCALCTRL */
 
-	dw_hdmi_phy_enable_power(hdmi, 1);
+	dw_hdmi_phy_enable_powerdown(hdmi, false);
 
 	/* toggle TMDS enable */
 	dw_hdmi_phy_enable_tmds(hdmi, 0);
@@ -879,18 +917,17 @@
 static int dw_hdmi_phy_init(struct dw_hdmi *hdmi)
 {
 	int i, ret;
-	bool cscon = false;
+	bool cscon;
 
 	/*check csc whether needed activated in HDMI mode */
-	cscon = (is_color_space_conversion(hdmi) &&
-			!hdmi->hdmi_data.video_mode.mdvi);
+	cscon = hdmi->sink_is_hdmi && is_color_space_conversion(hdmi);
 
 	/* HDMI Phy spec says to do the phy initialization sequence twice */
 	for (i = 0; i < 2; i++) {
 		dw_hdmi_phy_sel_data_en_pol(hdmi, 1);
 		dw_hdmi_phy_sel_interface_control(hdmi, 0);
 		dw_hdmi_phy_enable_tmds(hdmi, 0);
-		dw_hdmi_phy_enable_power(hdmi, 0);
+		dw_hdmi_phy_enable_powerdown(hdmi, true);
 
 		/* Enable CSC */
 		ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
@@ -921,74 +958,76 @@
 		  HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
 }
 
-static void hdmi_config_AVI(struct dw_hdmi *hdmi)
+static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 {
-	u8 val, pix_fmt, under_scan;
-	u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
-	bool aspect_16_9;
+	struct hdmi_avi_infoframe frame;
+	u8 val;
 
-	aspect_16_9 = false; /* FIXME */
+	/* Initialise info frame from DRM mode */
+	drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
 
-	/* AVI Data Byte 1 */
 	if (hdmi->hdmi_data.enc_out_format == YCBCR444)
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
+		frame.colorspace = HDMI_COLORSPACE_YUV444;
 	else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
+		frame.colorspace = HDMI_COLORSPACE_YUV422;
 	else
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
-
-		under_scan =  HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
-
-	/*
-	 * Active format identification data is present in the AVI InfoFrame.
-	 * Under scan info, no bar data
-	 */
-	val = pix_fmt | under_scan |
-		HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
-		HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
-
-	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
-
-	/* AVI Data Byte 2 -Set the Aspect Ratio */
-	if (aspect_16_9) {
-		act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
-		coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
-	} else {
-		act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
-		coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
-	}
+		frame.colorspace = HDMI_COLORSPACE_RGB;
 
 	/* Set up colorimetry */
 	if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
-		colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
+		frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
 		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
-			ext_colorimetry =
-				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+			frame.extended_colorimetry =
+				HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
-			ext_colorimetry =
-				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
+			frame.extended_colorimetry =
+				HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
 	} else if (hdmi->hdmi_data.enc_out_format != RGB) {
-		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
-			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
-		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
-			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
-		ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+		frame.colorimetry = hdmi->hdmi_data.colorimetry;
+		frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 	} else { /* Carries no data */
-		colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
-		ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+		frame.colorimetry = HDMI_COLORIMETRY_NONE;
+		frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 	}
 
-	val = colorimetry | coded_ratio | act_ratio;
+	frame.scan_mode = HDMI_SCAN_MODE_NONE;
+
+	/*
+	 * The Designware IP uses a different byte format from standard
+	 * AVI info frames, though generally the bits are in the correct
+	 * bytes.
+	 */
+
+	/*
+	 * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
+	 * active aspect present in bit 6 rather than 4.
+	 */
+	val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+	if (frame.active_aspect & 15)
+		val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
+	if (frame.top_bar || frame.bottom_bar)
+		val |= HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR;
+	if (frame.left_bar || frame.right_bar)
+		val |= HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR;
+	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
+
+	/* AVI data byte 2 differences: none */
+	val = ((frame.colorimetry & 0x3) << 6) |
+	      ((frame.picture_aspect & 0x3) << 4) |
+	      (frame.active_aspect & 0xf);
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1);
 
-	/* AVI Data Byte 3 */
-	val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
-		HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT |
-		HDMI_FC_AVICONF2_SCALING_NONE;
+	/* AVI data byte 3 differences: none */
+	val = ((frame.extended_colorimetry & 0x7) << 4) |
+	      ((frame.quantization_range & 0x3) << 2) |
+	      (frame.nups & 0x3);
+	if (frame.itc)
+		val |= HDMI_FC_AVICONF2_IT_CONTENT_VALID;
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2);
 
-	/* AVI Data Byte 4 */
-	hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID);
+	/* AVI data byte 4 differences: none */
+	val = frame.video_code & 0x7f;
+	hdmi_writeb(hdmi, val, HDMI_FC_AVIVID);
 
 	/* AVI Data Byte 5- set up input and output pixel repetition */
 	val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) <<
@@ -999,20 +1038,23 @@
 		HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
 	hdmi_writeb(hdmi, val, HDMI_FC_PRCONF);
 
-	/* IT Content and quantization range = don't care */
-	val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
-		HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
+	/*
+	 * AVI data byte 5 differences: content type in 0,1 rather than 4,5,
+	 * ycc range in bits 2,3 rather than 6,7
+	 */
+	val = ((frame.ycc_quantization_range & 0x3) << 2) |
+	      (frame.content_type & 0x3);
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3);
 
 	/* AVI Data Bytes 6-13 */
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1);
+	hdmi_writeb(hdmi, frame.top_bar & 0xff, HDMI_FC_AVIETB0);
+	hdmi_writeb(hdmi, (frame.top_bar >> 8) & 0xff, HDMI_FC_AVIETB1);
+	hdmi_writeb(hdmi, frame.bottom_bar & 0xff, HDMI_FC_AVISBB0);
+	hdmi_writeb(hdmi, (frame.bottom_bar >> 8) & 0xff, HDMI_FC_AVISBB1);
+	hdmi_writeb(hdmi, frame.left_bar & 0xff, HDMI_FC_AVIELB0);
+	hdmi_writeb(hdmi, (frame.left_bar >> 8) & 0xff, HDMI_FC_AVIELB1);
+	hdmi_writeb(hdmi, frame.right_bar & 0xff, HDMI_FC_AVISRB0);
+	hdmi_writeb(hdmi, (frame.right_bar >> 8) & 0xff, HDMI_FC_AVISRB1);
 }
 
 static void hdmi_av_composer(struct dw_hdmi *hdmi,
@@ -1022,9 +1064,6 @@
 	struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
 	int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
 
-	vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC);
-	vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC);
-	vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
 	vmode->mpixelclock = mode->clock * 1000;
 
 	dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
@@ -1034,13 +1073,13 @@
 		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
 		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
 
-	inv_val |= (vmode->mvsyncpolarity ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
 		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
-		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
+		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW;
 
-	inv_val |= (vmode->mhsyncpolarity ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
 		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
-		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
+		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW;
 
 	inv_val |= (vmode->mdataenablepolarity ?
 		HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
@@ -1049,17 +1088,17 @@
 	if (hdmi->vic == 39)
 		inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
 	else
-		inv_val |= (vmode->minterlaced ?
+		inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
 			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
-			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
+			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW;
 
-	inv_val |= (vmode->minterlaced ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
 		HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
-		HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
+		HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE;
 
-	inv_val |= (vmode->mdvi ?
-		HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
-		HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
+	inv_val |= hdmi->sink_is_hdmi ?
+		HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE :
+		HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE;
 
 	hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
 
@@ -1105,7 +1144,7 @@
 		return;
 
 	dw_hdmi_phy_enable_tmds(hdmi, 0);
-	dw_hdmi_phy_enable_power(hdmi, 0);
+	dw_hdmi_phy_enable_powerdown(hdmi, true);
 
 	hdmi->phy_enabled = false;
 }
@@ -1186,10 +1225,8 @@
 
 	if (!hdmi->vic) {
 		dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n");
-		hdmi->hdmi_data.video_mode.mdvi = true;
 	} else {
 		dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic);
-		hdmi->hdmi_data.video_mode.mdvi = false;
 	}
 
 	if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
@@ -1200,18 +1237,7 @@
 	else
 		hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
 
-	if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
-	    (hdmi->vic == 12) || (hdmi->vic == 13) ||
-	    (hdmi->vic == 14) || (hdmi->vic == 15) ||
-	    (hdmi->vic == 25) || (hdmi->vic == 26) ||
-	    (hdmi->vic == 27) || (hdmi->vic == 28) ||
-	    (hdmi->vic == 29) || (hdmi->vic == 30) ||
-	    (hdmi->vic == 35) || (hdmi->vic == 36) ||
-	    (hdmi->vic == 37) || (hdmi->vic == 38))
-		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
-	else
-		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
-
+	hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
 	hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
 
 	/* TODO: Get input format from IPU (via FB driver interface) */
@@ -1235,18 +1261,22 @@
 	/* HDMI Initialization Step B.3 */
 	dw_hdmi_enable_video_path(hdmi);
 
-	/* not for DVI mode */
-	if (hdmi->hdmi_data.video_mode.mdvi) {
-		dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
-	} else {
-		dev_dbg(hdmi->dev, "%s CEA mode\n", __func__);
+	if (hdmi->sink_has_audio) {
+		dev_dbg(hdmi->dev, "sink has audio support\n");
 
 		/* HDMI Initialization Step E - Configure audio */
 		hdmi_clk_regenerator_update_pixel_clock(hdmi);
 		hdmi_enable_audio_clk(hdmi);
+	}
+
+	/* not for DVI mode */
+	if (hdmi->sink_is_hdmi) {
+		dev_dbg(hdmi->dev, "%s HDMI mode\n", __func__);
 
 		/* HDMI Initialization Step F - Configure AVI InfoFrame */
-		hdmi_config_AVI(hdmi);
+		hdmi_config_AVI(hdmi, mode);
+	} else {
+		dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
 	}
 
 	hdmi_video_packetize(hdmi);
@@ -1255,7 +1285,7 @@
 	hdmi_tx_hdcp_config(hdmi);
 
 	dw_hdmi_clear_overflow(hdmi);
-	if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi)
+	if (hdmi->cable_plugin && hdmi->sink_is_hdmi)
 		hdmi_enable_overflow_interrupts(hdmi);
 
 	return 0;
@@ -1348,10 +1378,12 @@
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
-	dw_hdmi_setup(hdmi, mode);
+	mutex_lock(&hdmi->mutex);
 
 	/* Store the display mode for plugin/DKMS poweron events */
 	memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
+
+	mutex_unlock(&hdmi->mutex);
 }
 
 static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -1365,14 +1397,20 @@
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
+	mutex_lock(&hdmi->mutex);
+	hdmi->disabled = true;
 	dw_hdmi_poweroff(hdmi);
+	mutex_unlock(&hdmi->mutex);
 }
 
 static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
+	mutex_lock(&hdmi->mutex);
 	dw_hdmi_poweron(hdmi);
+	hdmi->disabled = false;
+	mutex_unlock(&hdmi->mutex);
 }
 
 static void dw_hdmi_bridge_nop(struct drm_bridge *bridge)
@@ -1405,6 +1443,8 @@
 		dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
 			edid->width_cm, edid->height_cm);
 
+		hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+		hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
 		kfree(edid);
@@ -1423,6 +1463,10 @@
 					   struct dw_hdmi, connector);
 	enum drm_mode_status mode_status = MODE_OK;
 
+	/* We don't support double-clocked modes */
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_BAD;
+
 	if (hdmi->plat_data->mode_valid)
 		mode_status = hdmi->plat_data->mode_valid(connector, mode);
 
@@ -1489,21 +1533,21 @@
 	phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
 
 	if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+		hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0);
+		mutex_lock(&hdmi->mutex);
 		if (phy_int_pol & HDMI_PHY_HPD) {
 			dev_dbg(hdmi->dev, "EVENT=plugin\n");
 
-			hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
-
-			dw_hdmi_poweron(hdmi);
+			if (!hdmi->disabled)
+				dw_hdmi_poweron(hdmi);
 		} else {
 			dev_dbg(hdmi->dev, "EVENT=plugout\n");
 
-			hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
-				  HDMI_PHY_POL0);
-
-			dw_hdmi_poweroff(hdmi);
+			if (!hdmi->disabled)
+				dw_hdmi_poweroff(hdmi);
 		}
-		drm_helper_hpd_irq_event(hdmi->connector.dev);
+		mutex_unlock(&hdmi->mutex);
+		drm_helper_hpd_irq_event(hdmi->bridge->dev);
 	}
 
 	hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
@@ -1570,8 +1614,11 @@
 	hdmi->sample_rate = 48000;
 	hdmi->ratio = 100;
 	hdmi->encoder = encoder;
+	hdmi->disabled = true;
 
+	mutex_init(&hdmi->mutex);
 	mutex_init(&hdmi->audio_mutex);
+	spin_lock_init(&hdmi->audio_lock);
 
 	of_property_read_u32(np, "reg-io-width", &val);
 
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw_hdmi.h
index 175dbc8..ee7f7ed 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.h
+++ b/drivers/gpu/drm/bridge/dw_hdmi.h
@@ -7,8 +7,8 @@
  * (at your option) any later version.
  */
 
-#ifndef __IMX_HDMI_H__
-#define __IMX_HDMI_H__
+#ifndef __DW_HDMI_H__
+#define __DW_HDMI_H__
 
 /* Identification Registers */
 #define HDMI_DESIGN_ID                          0x0000
@@ -525,7 +525,7 @@
 
 /* I2C Master Registers (E-DDC) */
 #define HDMI_I2CM_SLAVE                         0x7E00
-#define HDMI_I2CMESS                            0x7E01
+#define HDMI_I2CM_ADDRESS                       0x7E01
 #define HDMI_I2CM_DATAO                         0x7E02
 #define HDMI_I2CM_DATAI                         0x7E03
 #define HDMI_I2CM_OPERATION                     0x7E04
@@ -1031,4 +1031,4 @@
 	HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
 };
 
-#endif /* __IMX_HDMI_H__ */
+#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
similarity index 100%
rename from drivers/gpu/drm/bridge/ptn3460.c
rename to drivers/gpu/drm/bridge/nxp-ptn3460.c
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
similarity index 100%
rename from drivers/gpu/drm/bridge/ps8622.c
rename to drivers/gpu/drm/bridge/parade-ps8622.c
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b914003..b1619e2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -92,7 +92,7 @@
 
 	if (cdev->mode_info.gfbdev) {
 		console_lock();
-		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+		drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
 		console_unlock();
 	}
 
@@ -109,7 +109,7 @@
 
 	if (cdev->mode_info.gfbdev) {
 		console_lock();
-		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+		drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
 		console_unlock();
 	}
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 13ddf1c..589103b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -98,7 +98,7 @@
 			 const struct fb_fillrect *rect)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -107,7 +107,7 @@
 			 const struct fb_copyarea *area)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -116,7 +116,7 @@
 			  const struct fb_image *image)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -165,12 +165,10 @@
 {
 	struct cirrus_fbdev *gfbdev =
 		container_of(helper, struct cirrus_fbdev, helper);
-	struct drm_device *dev = gfbdev->helper.dev;
 	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	void *sysram;
 	struct drm_gem_object *gobj = NULL;
 	struct cirrus_bo *bo = NULL;
@@ -195,9 +193,9 @@
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = gfbdev;
 
@@ -216,11 +214,9 @@
 
 	/* setup helper */
 	gfbdev->helper.fb = fb;
-	gfbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "cirrusdrmfb");
 
-
 	info->flags = FBINFO_DEFAULT;
 	info->fbops = &cirrusfb_ops;
 
@@ -229,11 +225,6 @@
 			       sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_iounmap;
-	}
 	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = cdev->mc.vram_size;
 
@@ -246,13 +237,6 @@
 	info->fix.mmio_start = 0;
 	info->fix.mmio_len = 0;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		ret = -ENOMEM;
-		goto out_iounmap;
-	}
-
 	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
 	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
 	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
@@ -260,24 +244,15 @@
 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
 	return 0;
-out_iounmap:
-	return ret;
 }
 
 static int cirrus_fbdev_destroy(struct drm_device *dev,
 				struct cirrus_fbdev *gfbdev)
 {
-	struct fb_info *info;
 	struct cirrus_framebuffer *gfb = &gfbdev->gfb;
 
-	if (gfbdev->helper.fbdev) {
-		info = gfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&gfbdev->helper);
+	drm_fb_helper_release_fbi(&gfbdev->helper);
 
 	if (gfb->obj) {
 		drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e4b9766..055fd86 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -293,25 +293,18 @@
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct cirrus_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_cirrus_bo(obj);
 	*offset = cirrus_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
 
+	return 0;
 }
 
 bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f6f2fb5..4349154 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -153,9 +153,15 @@
 		if (!connector)
 			continue;
 
-		WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
-		connector->funcs->atomic_destroy_state(connector,
+		/*
+		 * FIXME: Async commits can race with connector unplugging and
+		 * there's currently nothing that prevents cleanup up state for
+		 * deleted connectors. As long as the callback doesn't look at
+		 * the connector we'll be fine though, so make sure that's the
+		 * case by setting all connector pointers to NULL.
+		 */
+		state->connector_states[i]->connector = NULL;
+		connector->funcs->atomic_destroy_state(NULL,
 						       state->connector_states[i]);
 		state->connectors[i] = NULL;
 		state->connector_states[i] = NULL;
@@ -1063,7 +1069,7 @@
 	 * Changed connectors are already in @state, so only need to look at the
 	 * current configuration.
 	 */
-	list_for_each_entry(connector, &config->connector_list, head) {
+	drm_for_each_connector(connector, state->dev) {
 		if (connector->state->crtc != crtc)
 			continue;
 
@@ -1463,24 +1469,18 @@
 
 		if (get_user(obj_id, objs_ptr + copied_objs)) {
 			ret = -EFAULT;
-			goto fail;
+			goto out;
 		}
 
 		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
 		if (!obj || !obj->properties) {
 			ret = -ENOENT;
-			goto fail;
-		}
-
-		if (obj->type == DRM_MODE_OBJECT_PLANE) {
-			plane = obj_to_plane(obj);
-			plane_mask |= (1 << drm_plane_index(plane));
-			plane->old_fb = plane->fb;
+			goto out;
 		}
 
 		if (get_user(count_props, count_props_ptr + copied_objs)) {
 			ret = -EFAULT;
-			goto fail;
+			goto out;
 		}
 
 		copied_objs++;
@@ -1492,28 +1492,34 @@
 
 			if (get_user(prop_id, props_ptr + copied_props)) {
 				ret = -EFAULT;
-				goto fail;
+				goto out;
 			}
 
 			prop = drm_property_find(dev, prop_id);
 			if (!prop) {
 				ret = -ENOENT;
-				goto fail;
+				goto out;
 			}
 
 			if (copy_from_user(&prop_value,
 					   prop_values_ptr + copied_props,
 					   sizeof(prop_value))) {
 				ret = -EFAULT;
-				goto fail;
+				goto out;
 			}
 
 			ret = atomic_set_prop(state, obj, prop, prop_value);
 			if (ret)
-				goto fail;
+				goto out;
 
 			copied_props++;
 		}
+
+		if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) {
+			plane = obj_to_plane(obj);
+			plane_mask |= (1 << drm_plane_index(plane));
+			plane->old_fb = plane->fb;
+		}
 	}
 
 	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1523,7 +1529,7 @@
 			e = create_vblank_event(dev, file_priv, arg->user_data);
 			if (!e) {
 				ret = -ENOMEM;
-				goto fail;
+				goto out;
 			}
 
 			crtc_state->event = e;
@@ -1533,13 +1539,15 @@
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		ret = drm_atomic_check_only(state);
 		/* _check_only() does not free state, unlike _commit() */
-		drm_atomic_state_free(state);
+		if (!ret)
+			drm_atomic_state_free(state);
 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
 		ret = drm_atomic_async_commit(state);
 	} else {
 		ret = drm_atomic_commit(state);
 	}
 
+out:
 	/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
 	 * locks (ie. while it is still safe to deref plane->state).  We
 	 * need to do this here because the driver entry points cannot
@@ -1552,41 +1560,35 @@
 				drm_framebuffer_reference(new_fb);
 			plane->fb = new_fb;
 			plane->crtc = plane->state->crtc;
-		} else {
-			plane->old_fb = NULL;
+
+			if (plane->old_fb)
+				drm_framebuffer_unreference(plane->old_fb);
 		}
-		if (plane->old_fb) {
-			drm_framebuffer_unreference(plane->old_fb);
-			plane->old_fb = NULL;
+		plane->old_fb = NULL;
+	}
+
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+
+	if (ret) {
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			for_each_crtc_in_state(state, crtc, crtc_state, i) {
+				if (!crtc_state->event)
+					continue;
+
+				destroy_vblank_event(dev, file_priv,
+						     crtc_state->event);
+			}
 		}
+
+		drm_atomic_state_free(state);
 	}
 
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 
 	return ret;
-
-fail:
-	if (ret == -EDEADLK)
-		goto backoff;
-
-	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			destroy_vblank_event(dev, file_priv, crtc_state->event);
-			crtc_state->event = NULL;
-		}
-	}
-
-	drm_atomic_state_free(state);
-
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
-	return ret;
-
-backoff:
-	drm_atomic_state_clear(state);
-	drm_modeset_backoff(&ctx);
-
-	goto retry;
 }
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 3e65daa3..aecb5d6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -89,7 +89,7 @@
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(connector, &config->connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (connector->state->best_encoder != encoder)
 			continue;
 
@@ -124,7 +124,7 @@
 	if (IS_ERR(crtc_state))
 		return PTR_ERR(crtc_state);
 
-	crtc_state->mode_changed = true;
+	crtc_state->connectors_changed = true;
 
 	list_for_each_entry(connector, &config->connector_list, head) {
 		if (connector->state->best_encoder != encoder)
@@ -174,14 +174,14 @@
 			idx = drm_crtc_index(connector->state->crtc);
 
 			crtc_state = state->crtc_states[idx];
-			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 
 		if (connector_state->crtc) {
 			idx = drm_crtc_index(connector_state->crtc);
 
 			crtc_state = state->crtc_states[idx];
-			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 	}
 
@@ -241,7 +241,7 @@
 	idx = drm_crtc_index(connector_state->crtc);
 
 	crtc_state = state->crtc_states[idx];
-	crtc_state->mode_changed = true;
+	crtc_state->connectors_changed = true;
 
 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
 			 connector->base.id,
@@ -264,7 +264,8 @@
 	bool ret;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!crtc_state->mode_changed)
+		if (!crtc_state->mode_changed &&
+		    !crtc_state->connectors_changed)
 			continue;
 
 		drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
@@ -306,7 +307,7 @@
 						 encoder->base.id, encoder->name);
 				return ret;
 			}
-		} else {
+		} else if (funcs->mode_fixup) {
 			ret = funcs->mode_fixup(encoder, &crtc_state->mode,
 						&crtc_state->adjusted_mode);
 			if (!ret) {
@@ -320,7 +321,8 @@
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		const struct drm_crtc_helper_funcs *funcs;
 
-		if (!crtc_state->mode_changed)
+		if (!crtc_state->mode_changed &&
+		    !crtc_state->connectors_changed)
 			continue;
 
 		funcs = crtc->helper_private;
@@ -346,9 +348,14 @@
  *
  * Check the state object to see if the requested state is physically possible.
  * This does all the crtc and connector related computations for an atomic
- * update. It computes and updates crtc_state->mode_changed, adds any additional
- * connectors needed for full modesets and calls down into ->mode_fixup
- * functions of the driver backend.
+ * update and adds any additional connectors needed for full modesets and calls
+ * down into ->mode_fixup functions of the driver backend.
+ *
+ * crtc_state->mode_changed is set when the input mode is changed.
+ * crtc_state->connectors_changed is set when a connector is added or
+ * removed from the crtc.
+ * crtc_state->active_changed is set when crtc_state->active changes,
+ * which is used for dpms.
  *
  * IMPORTANT:
  *
@@ -381,7 +388,17 @@
 		if (crtc->state->enable != crtc_state->enable) {
 			DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
 					 crtc->base.id);
+
+			/*
+			 * For clarity this assignment is done here, but
+			 * enable == 0 is only true when there are no
+			 * connectors and a NULL mode.
+			 *
+			 * The other way around is true as well. enable != 0
+			 * iff connectors are attached and a mode is set.
+			 */
 			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 	}
 
@@ -456,6 +473,9 @@
  * This does all the plane update related checks using by calling into the
  * ->atomic_check hooks provided by the driver.
  *
+ * It also sets crtc_state->planes_changed to indicate that a crtc has
+ * updated planes.
+ *
  * RETURNS
  * Zero for success or -errno
  */
@@ -648,15 +668,29 @@
 	struct drm_crtc_state *old_crtc_state;
 	int i;
 
-	/* clear out existing links */
+	/* clear out existing links and update dpms */
 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
-		if (!connector->encoder)
-			continue;
+		if (connector->encoder) {
+			WARN_ON(!connector->encoder->crtc);
 
-		WARN_ON(!connector->encoder->crtc);
+			connector->encoder->crtc = NULL;
+			connector->encoder = NULL;
+		}
 
-		connector->encoder->crtc = NULL;
-		connector->encoder = NULL;
+		crtc = connector->state->crtc;
+		if ((!crtc && old_conn_state->crtc) ||
+		    (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
+			struct drm_property *dpms_prop =
+				dev->mode_config.dpms_property;
+			int mode = DRM_MODE_DPMS_OFF;
+
+			if (crtc && crtc->state->active)
+				mode = DRM_MODE_DPMS_ON;
+
+			connector->dpms = mode;
+			drm_object_property_set_value(&connector->base,
+						      dpms_prop, mode);
+		}
 	}
 
 	/* set new links */
@@ -673,10 +707,16 @@
 
 	/* set legacy state in the crtc structure */
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		struct drm_plane *primary = crtc->primary;
+
 		crtc->mode = crtc->state->mode;
 		crtc->enabled = crtc->state->enable;
-		crtc->x = crtc->primary->state->src_x >> 16;
-		crtc->y = crtc->primary->state->src_y >> 16;
+
+		if (drm_atomic_get_existing_plane_state(old_state, primary) &&
+		    primary->state->crtc == crtc) {
+			crtc->x = primary->state->src_x >> 16;
+			crtc->y = primary->state->src_y >> 16;
+		}
 
 		if (crtc->state->enable)
 			drm_calc_timestamping_constants(crtc,
@@ -926,7 +966,7 @@
 			continue;
 
 		old_crtc_state->enable = true;
-		old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+		old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
 	}
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -935,7 +975,7 @@
 
 		ret = wait_event_timeout(dev->vblank[i].queue,
 				old_crtc_state->last_vblank_count !=
-					drm_vblank_count(dev, i),
+					drm_crtc_vblank_count(crtc),
 				msecs_to_jiffies(50));
 
 		drm_crtc_vblank_put(crtc);
@@ -1146,7 +1186,7 @@
 		if (!funcs || !funcs->atomic_begin)
 			continue;
 
-		funcs->atomic_begin(crtc);
+		funcs->atomic_begin(crtc, old_crtc_state);
 	}
 
 	for_each_plane_in_state(old_state, plane, old_plane_state, i) {
@@ -1176,7 +1216,7 @@
 		if (!funcs || !funcs->atomic_flush)
 			continue;
 
-		funcs->atomic_flush(crtc);
+		funcs->atomic_flush(crtc, old_crtc_state);
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -1212,7 +1252,7 @@
 
 	crtc_funcs = crtc->helper_private;
 	if (crtc_funcs && crtc_funcs->atomic_begin)
-		crtc_funcs->atomic_begin(crtc);
+		crtc_funcs->atomic_begin(crtc, old_crtc_state);
 
 	drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
 		struct drm_plane_state *old_plane_state =
@@ -1235,7 +1275,7 @@
 	}
 
 	if (crtc_funcs && crtc_funcs->atomic_flush)
-		crtc_funcs->atomic_flush(crtc);
+		crtc_funcs->atomic_flush(crtc, old_crtc_state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
 
@@ -1923,10 +1963,6 @@
 	if (ret != 0)
 		goto fail;
 
-	/* TODO: ->page_flip is the only driver callback where the core
-	 * doesn't update plane->fb. For now patch it up here. */
-	plane->fb = plane->state->fb;
-
 	/* Driver takes ownership of state on successful async commit. */
 	return 0;
 fail:
@@ -1960,9 +1996,12 @@
  * implementing the legacy DPMS connector interface. It computes the new desired
  * ->active state for the corresponding CRTC (if the connector is enabled) and
  *  updates it.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
  */
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-				      int mode)
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+				     int mode)
 {
 	struct drm_mode_config *config = &connector->dev->mode_config;
 	struct drm_atomic_state *state;
@@ -1971,6 +2010,7 @@
 	struct drm_connector *tmp_connector;
 	int ret;
 	bool active = false;
+	int old_mode = connector->dpms;
 
 	if (mode != DRM_MODE_DPMS_ON)
 		mode = DRM_MODE_DPMS_OFF;
@@ -1979,22 +2019,23 @@
 	crtc = connector->state->crtc;
 
 	if (!crtc)
-		return;
+		return 0;
 
-	/* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
 	state = drm_atomic_state_alloc(connector->dev);
 	if (!state)
-		return;
+		return -ENOMEM;
 
 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 retry:
 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
-	if (IS_ERR(crtc_state))
-		return;
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(tmp_connector, &config->connector_list, head) {
+	drm_for_each_connector(tmp_connector, connector->dev) {
 		if (tmp_connector->state->crtc != crtc)
 			continue;
 
@@ -2009,17 +2050,16 @@
 	if (ret != 0)
 		goto fail;
 
-	/* Driver takes ownership of state on successful async commit. */
-	return;
+	/* Driver takes ownership of state on successful commit. */
+	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
+	connector->dpms = old_mode;
 	drm_atomic_state_free(state);
 
-	WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
-
-	return;
+	return ret;
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2080,6 +2120,7 @@
 	state->mode_changed = false;
 	state->active_changed = false;
 	state->planes_changed = false;
+	state->connectors_changed = false;
 	state->event = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 9b23525..192a5f9 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -53,6 +53,10 @@
  */
 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->struct_mutex);
 	idr_remove(&dev->ctx_idr, ctx_handle);
 	mutex_unlock(&dev->struct_mutex);
@@ -85,10 +89,13 @@
  *
  * Initialise the drm_device::ctx_idr
  */
-int drm_legacy_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	idr_init(&dev->ctx_idr);
-	return 0;
 }
 
 /**
@@ -101,6 +108,10 @@
  */
 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->struct_mutex);
 	idr_destroy(&dev->ctx_idr);
 	mutex_unlock(&dev->struct_mutex);
@@ -119,6 +130,10 @@
 {
 	struct drm_ctx_list *pos, *tmp;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->ctxlist_mutex);
 
 	list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
@@ -161,6 +176,10 @@
 	struct drm_local_map *map;
 	struct drm_map_list *_entry;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 
 	map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -205,6 +224,10 @@
 	struct drm_local_map *map = NULL;
 	struct drm_map_list *r_list = NULL;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map
@@ -305,6 +328,10 @@
 	struct drm_ctx ctx;
 	int i;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (res->count >= DRM_RESERVED_CONTEXTS) {
 		memset(&ctx, 0, sizeof(ctx));
 		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -335,6 +362,10 @@
 	struct drm_ctx_list *ctx_entry;
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	ctx->handle = drm_legacy_ctxbitmap_next(dev);
 	if (ctx->handle == DRM_KERNEL_CONTEXT) {
 		/* Skip kernel's context and get a new one. */
@@ -378,6 +409,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	/* This is 0, because we don't handle any context flags */
 	ctx->flags = 0;
 
@@ -400,6 +435,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -420,6 +459,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -442,6 +485,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	if (ctx->handle != DRM_KERNEL_CONTEXT) {
 		if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fed7483..33d877c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -615,7 +615,7 @@
 	if (atomic_read(&fb->refcount.refcount) > 1) {
 		drm_modeset_lock_all(dev);
 		/* remove from any CRTC */
-		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		drm_for_each_crtc(crtc, dev) {
 			if (crtc->primary->fb == fb) {
 				/* should turn off the crtc */
 				memset(&set, 0, sizeof(struct drm_mode_set));
@@ -627,7 +627,7 @@
 			}
 		}
 
-		list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		drm_for_each_plane(plane, dev) {
 			if (plane->fb == fb)
 				drm_plane_force_disable(plane);
 		}
@@ -736,7 +736,7 @@
 	unsigned int index = 0;
 	struct drm_crtc *tmp;
 
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, crtc->dev) {
 		if (tmp == crtc)
 			return index;
 
@@ -988,7 +988,7 @@
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+	drm_for_each_connector(tmp, connector->dev) {
 		if (tmp == connector)
 			return index;
 
@@ -1054,7 +1054,7 @@
 {
 	struct drm_connector *connector;
 
-	/* taking the mode config mutex ends up in a clash with sysfs */
+	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 		drm_connector_unregister(connector);
 
@@ -1151,7 +1151,7 @@
 int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 			     unsigned long possible_crtcs,
 			     const struct drm_plane_funcs *funcs,
-			     const uint32_t *formats, uint32_t format_count,
+			     const uint32_t *formats, unsigned int format_count,
 			     enum drm_plane_type type)
 {
 	struct drm_mode_config *config = &dev->mode_config;
@@ -1225,7 +1225,7 @@
 int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
 		   unsigned long possible_crtcs,
 		   const struct drm_plane_funcs *funcs,
-		   const uint32_t *formats, uint32_t format_count,
+		   const uint32_t *formats, unsigned int format_count,
 		   bool is_primary)
 {
 	enum drm_plane_type type;
@@ -1280,7 +1280,7 @@
 	unsigned int index = 0;
 	struct drm_plane *tmp;
 
-	list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+	drm_for_each_plane(tmp, plane->dev) {
 		if (tmp == plane)
 			return index;
 
@@ -1305,7 +1305,7 @@
 	struct drm_plane *plane;
 	unsigned int i = 0;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		if (i == idx)
 			return plane;
 		i++;
@@ -1679,70 +1679,6 @@
 }
 EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
 
-static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
-	uint32_t total_objects = 0;
-
-	total_objects += dev->mode_config.num_crtc;
-	total_objects += dev->mode_config.num_connector;
-	total_objects += dev->mode_config.num_encoder;
-
-	group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
-	if (!group->id_list)
-		return -ENOMEM;
-
-	group->num_crtcs = 0;
-	group->num_connectors = 0;
-	group->num_encoders = 0;
-	return 0;
-}
-
-void drm_mode_group_destroy(struct drm_mode_group *group)
-{
-	kfree(group->id_list);
-	group->id_list = NULL;
-}
-
-/*
- * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
- * the drm core's responsibility to set up mode control groups.
- */
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
-				     struct drm_mode_group *group)
-{
-	struct drm_crtc *crtc;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	int ret;
-
-	ret = drm_mode_group_init(dev, group);
-	if (ret)
-		return ret;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		group->id_list[group->num_crtcs++] = crtc->base.id;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		group->id_list[group->num_crtcs + group->num_encoders++] =
-		encoder->base.id;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		group->id_list[group->num_crtcs + group->num_encoders +
-			       group->num_connectors++] = connector->base.id;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
-
-void drm_reinit_primary_mode_group(struct drm_device *dev)
-{
-	drm_modeset_lock_all(dev);
-	drm_mode_group_destroy(&dev->primary->mode_group);
-	drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
-	drm_modeset_unlock_all(dev);
-}
-EXPORT_SYMBOL(drm_reinit_primary_mode_group);
-
 /**
  * drm_mode_getresources - get graphics configuration
  * @dev: drm device for the ioctl
@@ -1771,12 +1707,11 @@
 	int crtc_count = 0;
 	int fb_count = 0;
 	int encoder_count = 0;
-	int copied = 0, i;
+	int copied = 0;
 	uint32_t __user *fb_id;
 	uint32_t __user *crtc_id;
 	uint32_t __user *connector_id;
 	uint32_t __user *encoder_id;
-	struct drm_mode_group *mode_group;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -1809,24 +1744,14 @@
 	/* mode_config.mutex protects the connector list against e.g. DP MST
 	 * connector hot-adding. CRTC/Plane lists are invariant. */
 	mutex_lock(&dev->mode_config.mutex);
-	if (!drm_is_primary_client(file_priv)) {
+	drm_for_each_crtc(crtc, dev)
+		crtc_count++;
 
-		mode_group = NULL;
-		list_for_each(lh, &dev->mode_config.crtc_list)
-			crtc_count++;
+	drm_for_each_connector(connector, dev)
+		connector_count++;
 
-		list_for_each(lh, &dev->mode_config.connector_list)
-			connector_count++;
-
-		list_for_each(lh, &dev->mode_config.encoder_list)
-			encoder_count++;
-	} else {
-
-		mode_group = &file_priv->master->minor->mode_group;
-		crtc_count = mode_group->num_crtcs;
-		connector_count = mode_group->num_connectors;
-		encoder_count = mode_group->num_encoders;
-	}
+	drm_for_each_encoder(encoder, dev)
+		encoder_count++;
 
 	card_res->max_height = dev->mode_config.max_height;
 	card_res->min_height = dev->mode_config.min_height;
@@ -1837,25 +1762,13 @@
 	if (card_res->count_crtcs >= crtc_count) {
 		copied = 0;
 		crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-					    head) {
-				DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-				if (put_user(crtc->base.id, crtc_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_crtc(crtc, dev) {
+			DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+			if (put_user(crtc->base.id, crtc_id + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
-		} else {
-			for (i = 0; i < mode_group->num_crtcs; i++) {
-				if (put_user(mode_group->id_list[i],
-					     crtc_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
+			copied++;
 		}
 	}
 	card_res->count_crtcs = crtc_count;
@@ -1864,29 +1777,15 @@
 	if (card_res->count_encoders >= encoder_count) {
 		copied = 0;
 		encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(encoder,
-					    &dev->mode_config.encoder_list,
-					    head) {
-				DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
-						encoder->name);
-				if (put_user(encoder->base.id, encoder_id +
-					     copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_encoder(encoder, dev) {
+			DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+					encoder->name);
+			if (put_user(encoder->base.id, encoder_id +
+				     copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
-		} else {
-			for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
-				if (put_user(mode_group->id_list[i],
-					     encoder_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
-
+			copied++;
 		}
 	}
 	card_res->count_encoders = encoder_count;
@@ -1895,31 +1794,16 @@
 	if (card_res->count_connectors >= connector_count) {
 		copied = 0;
 		connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(connector,
-					    &dev->mode_config.connector_list,
-					    head) {
-				DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-					connector->base.id,
-					connector->name);
-				if (put_user(connector->base.id,
-					     connector_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_connector(connector, dev) {
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+				connector->base.id,
+				connector->name);
+			if (put_user(connector->base.id,
+				     connector_id + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
-		} else {
-			int start = mode_group->num_crtcs +
-				mode_group->num_encoders;
-			for (i = start; i < start + mode_group->num_connectors; i++) {
-				if (put_user(mode_group->id_list[i],
-					     connector_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
+			copied++;
 		}
 	}
 	card_res->count_connectors = connector_count;
@@ -2187,7 +2071,7 @@
 
 	/* For atomic drivers only state objects are synchronously updated and
 	 * protected by modeset locks, so check those first. */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (!connector->state)
 			continue;
 
@@ -2291,7 +2175,7 @@
 		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
 		/* Plane lists are invariant, no locking needed. */
-		list_for_each_entry(plane, &config->plane_list, head) {
+		drm_for_each_plane(plane, dev) {
 			/*
 			 * Unless userspace set the 'universal planes'
 			 * capability bit, only advertise overlays.
@@ -2596,7 +2480,7 @@
 	 * connectors from it), hence we need to refcount the fbs across all
 	 * crtcs. Atomic modeset will have saner semantics ...
 	 */
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(tmp, crtc->dev)
 		tmp->primary->old_fb = tmp->primary->fb;
 
 	fb = set->fb;
@@ -2607,7 +2491,7 @@
 		crtc->primary->fb = fb;
 	}
 
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, crtc->dev) {
 		if (tmp->primary->fb)
 			drm_framebuffer_reference(tmp->primary->fb);
 		if (tmp->primary->old_fb)
@@ -4301,7 +4185,6 @@
 		mutex_unlock(&dev->mode_config.blob_lock);
 	else
 		might_lock(&dev->mode_config.blob_lock);
-
 }
 EXPORT_SYMBOL(drm_property_unreference_blob);
 
@@ -4472,9 +4355,7 @@
 			goto err_created;
 	}
 
-	if (old_blob)
-		drm_property_unreference_blob(old_blob);
-
+	drm_property_unreference_blob(old_blob);
 	*replace = new_blob;
 
 	return 0;
@@ -4872,9 +4753,9 @@
 
 	/* Do DPMS ourselves */
 	if (property == connector->dev->mode_config.dpms_property) {
-		if (connector->funcs->dpms)
-			(*connector->funcs->dpms)(connector, (int)value);
 		ret = 0;
+		if (connector->funcs->dpms)
+			ret = (*connector->funcs->dpms)(connector, (int)value);
 	} else if (connector->funcs->set_property)
 		ret = connector->funcs->set_property(connector, property, value);
 
@@ -5349,13 +5230,7 @@
 		/* Keep the old fb, don't unref it. */
 		crtc->primary->old_fb = NULL;
 	} else {
-		/*
-		 * Warn if the driver hasn't properly updated the crtc->fb
-		 * field to reflect that the new framebuffer is now used.
-		 * Failing to do so will screw with the reference counting
-		 * on framebuffers.
-		 */
-		WARN_ON(crtc->primary->fb != fb);
+		crtc->primary->fb = fb;
 		/* Unref only the old framebuffer. */
 		fb = NULL;
 	}
@@ -5386,21 +5261,23 @@
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+	drm_for_each_plane(plane, dev)
 		if (plane->funcs->reset)
 			plane->funcs->reset(plane);
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(crtc, dev)
 		if (crtc->funcs->reset)
 			crtc->funcs->reset(crtc);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+	drm_for_each_encoder(encoder, dev)
 		if (encoder->funcs->reset)
 			encoder->funcs->reset(encoder);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev)
 		if (connector->funcs->reset)
 			connector->funcs->reset(connector);
+	mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 393114d..ef53475 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,7 +121,7 @@
 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 	}
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder)
 			return true;
 	return false;
@@ -151,7 +151,7 @@
 	if (!oops_in_progress)
 		WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+	drm_for_each_encoder(encoder, dev)
 		if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
 			return true;
 	return false;
@@ -180,7 +180,7 @@
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		if (!drm_helper_encoder_in_use(encoder)) {
 			drm_encoder_disable(encoder);
 			/* disconnect encoder from any connector */
@@ -188,7 +188,7 @@
 		}
 	}
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 		crtc->enabled = drm_helper_crtc_in_use(crtc);
 		if (!crtc->enabled) {
@@ -230,7 +230,7 @@
 	const struct drm_encoder_helper_funcs *encoder_funcs;
 	struct drm_encoder *encoder;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		encoder_funcs = encoder->helper_private;
 		/* Disable unused encoders */
 		if (encoder->crtc == NULL)
@@ -305,7 +305,7 @@
 	 * adjust it according to limitations or connector properties, and also
 	 * a chance to reject the mode entirely.
 	 */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -334,7 +334,7 @@
 	crtc->hwmode = *adjusted_mode;
 
 	/* Prepare the encoders and CRTCs before setting the mode. */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -359,7 +359,7 @@
 	if (!ret)
 	    goto done;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -376,7 +376,7 @@
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	crtc_funcs->commit(crtc);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -418,11 +418,11 @@
 	struct drm_encoder *encoder;
 
 	/* Decouple all encoders and their attached connectors from this crtc */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		if (encoder->crtc != crtc)
 			continue;
 
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_for_each_connector(connector, dev) {
 			if (connector->encoder != encoder)
 				continue;
 
@@ -519,12 +519,12 @@
 	 * restored, not the drivers personal bookkeeping.
 	 */
 	count = 0;
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		save_encoders[count++] = *encoder;
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		save_connectors[count++] = *connector;
 	}
 
@@ -562,7 +562,7 @@
 
 	/* a) traverse passed in connector list and get encoders for them */
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		const struct drm_connector_helper_funcs *connector_funcs =
 			connector->helper_private;
 		new_encoder = connector->encoder;
@@ -602,7 +602,7 @@
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (!connector->encoder)
 			continue;
 
@@ -685,12 +685,12 @@
 fail:
 	/* Restore all previous data. */
 	count = 0;
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		*encoder = save_encoders[count++];
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		*connector = save_connectors[count++];
 	}
 
@@ -712,7 +712,7 @@
 	struct drm_connector *connector;
 	struct drm_device *dev = encoder->dev;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder)
 			if (connector->dpms < dpms)
 				dpms = connector->dpms;
@@ -746,7 +746,7 @@
 	struct drm_connector *connector;
 	struct drm_device *dev = crtc->dev;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder && connector->encoder->crtc == crtc)
 			if (connector->dpms < dpms)
 				dpms = connector->dpms;
@@ -762,15 +762,18 @@
  * implementing the DPMS connector attribute. It computes the new desired DPMS
  * state for all encoders and crtcs in the output mesh and calls the ->dpms()
  * callback provided by the driver appropriately.
+ *
+ * Returns:
+ * Always returns 0.
  */
-void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
 	struct drm_encoder *encoder = connector->encoder;
 	struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
 	int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
 	if (mode == connector->dpms)
-		return;
+		return 0;
 
 	old_dpms = connector->dpms;
 	connector->dpms = mode;
@@ -802,7 +805,7 @@
 		}
 	}
 
-	return;
+	return 0;
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
@@ -862,7 +865,7 @@
 	bool ret;
 
 	drm_modeset_lock_all(dev);
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 
 		if (!crtc->enabled)
 			continue;
@@ -876,7 +879,7 @@
 
 		/* Turn off outputs that were already powered off */
 		if (drm_helper_choose_crtc_dpms(crtc)) {
-			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			drm_for_each_encoder(encoder, dev) {
 
 				if(encoder->crtc != crtc)
 					continue;
@@ -928,15 +931,15 @@
 	if (crtc->funcs->atomic_duplicate_state)
 		crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
 	else {
-		crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-		if (!crtc_state)
-			return -ENOMEM;
-		if (crtc->state)
-			__drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state);
-		else
-			crtc_state->crtc = crtc;
+		if (!crtc->state)
+			drm_atomic_helper_crtc_reset(crtc);
+
+		crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
 	}
 
+	if (!crtc_state)
+		return -ENOMEM;
+
 	crtc_state->planes_changed = true;
 	crtc_state->mode_changed = true;
 	ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
@@ -957,11 +960,11 @@
 	ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
 
 out:
-	if (crtc->funcs->atomic_destroy_state)
-		crtc->funcs->atomic_destroy_state(crtc, crtc_state);
-	else {
-		__drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
-		kfree(crtc_state);
+	if (crtc_state) {
+		if (crtc->funcs->atomic_destroy_state)
+			crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+		else
+			drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index eb603f1de..e23df5f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2632,6 +2632,16 @@
 			seq_printf(m, "%02x ", buf[i]);
 		seq_printf(m, "\n");
 
+		/* dump the standard OUI branch header */
+		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+		seq_printf(m, "branch oui: ");
+		for (i = 0; i < 0x3; i++)
+			seq_printf(m, "%02x", buf[i]);
+		seq_printf(m, " devid: ");
+		for (i = 0x3; i < 0x8; i++)
+			seq_printf(m, "%c", buf[i]);
+		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
+		seq_printf(m, "\n");
 		bret = dump_dp_payload_table(mgr, buf);
 		if (bret == true) {
 			seq_printf(m, "payload table: ");
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b7bf4ce..53d09a1 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -285,7 +285,6 @@
 	if (!minor)
 		return;
 
-	drm_mode_group_destroy(&minor->mode_group);
 	put_device(minor->kdev);
 
 	spin_lock_irqsave(&drm_minor_lock, flags);
@@ -582,11 +581,7 @@
 	if (drm_ht_create(&dev->map_hash, 12))
 		goto err_minors;
 
-	ret = drm_legacy_ctxbitmap_init(dev);
-	if (ret) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto err_ht;
-	}
+	drm_legacy_ctxbitmap_init(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
 		ret = drm_gem_init(dev);
@@ -600,7 +595,6 @@
 
 err_ctxbitmap:
 	drm_legacy_ctxbitmap_cleanup(dev);
-err_ht:
 	drm_ht_remove(&dev->map_hash);
 err_minors:
 	drm_minor_free(dev, DRM_MINOR_LEGACY);
@@ -705,20 +699,9 @@
 			goto err_minors;
 	}
 
-	/* setup grouping for legacy outputs */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-				&dev->primary->mode_group);
-		if (ret)
-			goto err_unload;
-	}
-
 	ret = 0;
 	goto out_unlock;
 
-err_unload:
-	if (dev->driver->unload)
-		dev->driver->unload(dev);
 err_minors:
 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7087da3..05bb731 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3413,7 +3413,7 @@
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder && connector->eld[0])
 			return connector;
 
@@ -3802,7 +3802,7 @@
 	struct drm_display_mode *mode;
 	struct drm_device *dev = connector->dev;
 
-	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+	count = ARRAY_SIZE(drm_dmt_modes);
 	if (hdisplay < 0)
 		hdisplay = 0;
 	if (vdisplay < 0)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5c1aca4..c19a625 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -209,23 +209,11 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_framebuffer *fb;
-	int ret;
 
-	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
-	if (ret)
-		return ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret) {
-		mutex_unlock(&dev->mode_config.mutex);
-		return ret;
-	}
-
-	list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+	mutex_lock(&dev->mode_config.fb_lock);
+	drm_for_each_fb(fb, dev)
 		drm_fb_cma_describe(fb, m);
-
-	mutex_unlock(&dev->struct_mutex);
-	mutex_unlock(&dev->mode_config.mutex);
+	mutex_unlock(&dev->mode_config.fb_lock);
 
 	return 0;
 }
@@ -234,9 +222,9 @@
 
 static struct fb_ops drm_fbdev_cma_ops = {
 	.owner		= THIS_MODULE,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
+	.fb_fillrect	= drm_fb_helper_sys_fillrect,
+	.fb_copyarea	= drm_fb_helper_sys_copyarea,
+	.fb_imageblit	= drm_fb_helper_sys_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -275,10 +263,9 @@
 	if (IS_ERR(obj))
 		return -ENOMEM;
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
-		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
-		ret = -ENOMEM;
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		ret = PTR_ERR(fbi);
 		goto err_drm_gem_cma_free_object;
 	}
 
@@ -286,23 +273,16 @@
 	if (IS_ERR(fbdev_cma->fb)) {
 		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
 		ret = PTR_ERR(fbdev_cma->fb);
-		goto err_framebuffer_release;
+		goto err_fb_info_destroy;
 	}
 
 	fb = &fbdev_cma->fb->fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->fbops = &drm_fbdev_cma_ops;
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		dev_err(dev->dev, "Failed to allocate color map.\n");
-		goto err_drm_fb_cma_destroy;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -317,11 +297,8 @@
 
 	return 0;
 
-err_drm_fb_cma_destroy:
-	drm_framebuffer_unregister_private(fb);
-	drm_fb_cma_destroy(fb);
-err_framebuffer_release:
-	framebuffer_release(fbi);
+err_fb_info_destroy:
+	drm_fb_helper_release_fbi(helper);
 err_drm_gem_cma_free_object:
 	drm_gem_cma_free_object(&obj->base);
 	return ret;
@@ -397,20 +374,8 @@
  */
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
 {
-	if (fbdev_cma->fb_helper.fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = fbdev_cma->fb_helper.fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+	drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
 
 	if (fbdev_cma->fb) {
 		drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cac4229..418d299 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,8 +56,8 @@
  * Teardown is done with drm_fb_helper_fini().
  *
  * At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
- * should also notify the fb helper code from updates to the output
+ * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
+ * They should also notify the fb helper code from updates to the output
  * configuration by calling drm_fb_helper_hotplug_event(). For easier
  * integration with the output polling code in drm_crtc_helper.c the modeset
  * code provides a ->output_poll_changed callback.
@@ -89,8 +89,9 @@
  * connectors to the fbdev, e.g. if some are reserved for special purposes or
  * not adequate to be used for the fbcon.
  *
- * Since this is part of the initial setup before the fbdev is published, no
- * locking is required.
+ * This function is protected against concurrent connector hotadds/removals
+ * using drm_fb_helper_add_one_connector() and
+ * drm_fb_helper_remove_one_connector().
  */
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
@@ -98,7 +99,8 @@
 	struct drm_connector *connector;
 	int i;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev) {
 		struct drm_fb_helper_connector *fb_helper_connector;
 
 		fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
@@ -108,6 +110,7 @@
 		fb_helper_connector->connector = connector;
 		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 	return 0;
 fail:
 	for (i = 0; i < fb_helper->connector_count; i++) {
@@ -115,6 +118,8 @@
 		fb_helper->connector_info[i] = NULL;
 	}
 	fb_helper->connector_count = 0;
+	mutex_unlock(&dev->mode_config.mutex);
+
 	return -ENOMEM;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@@ -163,11 +168,14 @@
 	}
 	set->num_connectors--;
 
-	/* because i915 is pissy about this..
+	/*
 	 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
 	 */
-	if (set->num_connectors == 0)
+	if (set->num_connectors == 0) {
 		set->fb = NULL;
+		drm_mode_destroy(connector->dev, set->mode);
+		set->mode = NULL;
+	}
 }
 
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
@@ -269,7 +277,7 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc *c;
 
-	list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(c, dev) {
 		if (crtc->base.id == c->base.id)
 			return c->primary->fb;
 	}
@@ -321,7 +329,7 @@
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		if (plane->type != DRM_PLANE_TYPE_PRIMARY)
 			drm_plane_force_disable(plane);
 
@@ -349,21 +357,6 @@
 	}
 	return error;
 }
-/**
- * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
- * @fb_helper: fbcon to restore
- *
- * This should be called from driver's drm ->lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * Use this variant if you need to bypass locking (panic), or already
- * hold all modeset locks.  Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
- */
-static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
-{
-	return restore_fbdev_mode(fb_helper);
-}
 
 /**
  * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
@@ -393,6 +386,31 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_crtc *crtc;
+	int bound = 0, crtcs_bound = 0;
+
+	/* Sometimes user space wants everything disabled, so don't steal the
+	 * display if there's a master. */
+	if (dev->primary->master)
+		return false;
+
+	drm_for_each_crtc(crtc, dev) {
+		if (crtc->primary->fb)
+			crtcs_bound++;
+		if (crtc->primary->fb == fb_helper->fb)
+			bound++;
+	}
+
+	if (bound < crtcs_bound)
+		return false;
+
+	return true;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
 /*
  * restore fbcon display for all kms driver's using this helper, used for sysrq
  * and panic handling.
@@ -411,67 +429,15 @@
 		if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 			continue;
 
-		/*
-		 * NOTE: Use trylock mode to avoid deadlocks and sleeping in
-		 * panic context.
-		 */
-		if (__drm_modeset_lock_all(dev, true) != 0) {
-			error = true;
-			continue;
-		}
-
-		ret = drm_fb_helper_restore_fbdev_mode(helper);
+		drm_modeset_lock_all(dev);
+		ret = restore_fbdev_mode(helper);
 		if (ret)
 			error = true;
-
 		drm_modeset_unlock_all(dev);
 	}
 	return error;
 }
 
-static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
-			void *panic_str)
-{
-	/*
-	 * It's a waste of time and effort to switch back to text console
-	 * if the kernel should reboot before panic messages can be seen.
-	 */
-	if (panic_timeout < 0)
-		return 0;
-
-	pr_err("panic occurred, switching back to text console\n");
-	return drm_fb_helper_force_kernel_mode();
-}
-
-static struct notifier_block paniced = {
-	.notifier_call = drm_fb_helper_panic,
-};
-
-static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
-{
-	struct drm_device *dev = fb_helper->dev;
-	struct drm_crtc *crtc;
-	int bound = 0, crtcs_bound = 0;
-
-	/* Sometimes user space wants everything disabled, so don't steal the
-	 * display if there's a master. */
-	if (dev->primary->master)
-		return false;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb)
-			crtcs_bound++;
-		if (crtc->primary->fb == fb_helper->fb)
-			bound++;
-	}
-
-	if (bound < crtcs_bound)
-		return false;
-
-	return true;
-}
-
-#ifdef CONFIG_MAGIC_SYSRQ
 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 {
 	bool ret;
@@ -504,14 +470,6 @@
 	int i, j;
 
 	/*
-	 * fbdev->blank can be called from irq context in case of a panic.
-	 * Since we already have our own special panic handler which will
-	 * restore the fbdev console mode completely, just bail out early.
-	 */
-	if (oops_in_progress)
-		return;
-
-	/*
 	 * For each CRTC in this fb, turn the connectors on/off.
 	 */
 	drm_modeset_lock_all(dev);
@@ -544,6 +502,9 @@
  */
 int drm_fb_helper_blank(int blank, struct fb_info *info)
 {
+	if (oops_in_progress)
+		return -EBUSY;
+
 	switch (blank) {
 	/* Display: On; HSync: On, VSync: On */
 	case FB_BLANK_UNBLANK:
@@ -655,7 +616,7 @@
 	}
 
 	i = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		fb_helper->crtc_info[i].mode_set.crtc = crtc;
 		i++;
 	}
@@ -667,14 +628,91 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_init);
 
+/**
+ * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to alloc fb_info and the members cmap and apertures. Called
+ * by the driver within the fb_probe fb_helper callback function.
+ *
+ * RETURNS:
+ * fb_info pointer if things went okay, pointer containing error code
+ * otherwise
+ */
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+	struct device *dev = fb_helper->dev->dev;
+	struct fb_info *info;
+	int ret;
+
+	info = framebuffer_alloc(0, dev);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret)
+		goto err_release;
+
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto err_free_cmap;
+	}
+
+	fb_helper->fbdev = info;
+
+	return info;
+
+err_free_cmap:
+	fb_dealloc_cmap(&info->cmap);
+err_release:
+	framebuffer_release(info);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
+
+/**
+ * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unregister_framebuffer, to release the fb_info
+ * framebuffer device
+ */
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper && fb_helper->fbdev)
+		unregister_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
+
+/**
+ * drm_fb_helper_release_fbi - dealloc fb_info and its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to free memory taken by fb_info and the members cmap and
+ * apertures
+ */
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper) {
+		struct fb_info *info = fb_helper->fbdev;
+
+		if (info) {
+			if (info->cmap.len)
+				fb_dealloc_cmap(&info->cmap);
+			framebuffer_release(info);
+		}
+
+		fb_helper->fbdev = NULL;
+	}
+}
+EXPORT_SYMBOL(drm_fb_helper_release_fbi);
+
 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 {
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
 		if (list_empty(&kernel_fb_helper_list)) {
-			pr_info("drm: unregistered panic notifier\n");
-			atomic_notifier_chain_unregister(&panic_notifier_list,
-							 &paniced);
 			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 		}
 	}
@@ -684,6 +722,149 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_fini);
 
+/**
+ * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unlink_framebuffer implemented by fbdev core
+ */
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper && fb_helper->fbdev)
+		unlink_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+
+/**
+ * drm_fb_helper_sys_read - wrapper around fb_sys_read
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to read from framebuffer memory
+ * @count: number of bytes to read from framebuffer memory
+ * @ppos: read offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_read implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	return fb_sys_read(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_read);
+
+/**
+ * drm_fb_helper_sys_write - wrapper around fb_sys_write
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to write to framebuffer memory
+ * @count: number of bytes to write to framebuffer memory
+ * @ppos: write offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_write implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	return fb_sys_write(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_write);
+
+/**
+ * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around sys_fillrect implemented by fbdev core
+ */
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect)
+{
+	sys_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
+
+/**
+ * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around sys_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area)
+{
+	sys_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
+
+/**
+ * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around sys_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+				 const struct fb_image *image)
+{
+	sys_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
+
+/**
+ * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect)
+{
+	cfb_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
+
+/**
+ * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around cfb_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area)
+{
+	cfb_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
+
+/**
+ * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+				 const struct fb_image *image)
+{
+	cfb_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
+
+/**
+ * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
+ * @fb_helper: driver-allocated fbdev helper
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * A wrapper around fb_set_suspend implemented by fbdev core
+ */
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+{
+	if (fb_helper && fb_helper->fbdev)
+		fb_set_suspend(fb_helper->fbdev, state);
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend);
+
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -771,9 +952,10 @@
 	int i, j, rc = 0;
 	int start;
 
-	if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+	if (oops_in_progress)
 		return -EBUSY;
-	}
+
+	drm_modeset_lock_all(dev);
 	if (!drm_fb_helper_is_bound(fb_helper)) {
 		drm_modeset_unlock_all(dev);
 		return -EBUSY;
@@ -922,6 +1104,9 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct fb_var_screeninfo *var = &info->var;
 
+	if (oops_in_progress)
+		return -EBUSY;
+
 	if (var->pixclock != 0) {
 		DRM_ERROR("PIXEL CLOCK SET\n");
 		return -EINVAL;
@@ -947,9 +1132,10 @@
 	int ret = 0;
 	int i;
 
-	if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+	if (oops_in_progress)
 		return -EBUSY;
-	}
+
+	drm_modeset_lock_all(dev);
 	if (!drm_fb_helper_is_bound(fb_helper)) {
 		drm_modeset_unlock_all(dev);
 		return -EBUSY;
@@ -1109,12 +1295,7 @@
 	dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
 			info->node, info->fix.id);
 
-	/* Switch back to kernel console on panic */
-	/* multi card linked list maybe */
 	if (list_empty(&kernel_fb_helper_list)) {
-		dev_info(fb_helper->dev->dev, "registered panic notifier\n");
-		atomic_notifier_chain_register(&panic_notifier_list,
-					       &paniced);
 		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
 
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 16a1647..3c2d4ab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -766,7 +766,7 @@
 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
 	struct drm_device *dev = obj->dev;
 
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	if (dev->driver->gem_free_object != NULL)
 		dev->driver->gem_free_object(obj);
@@ -778,22 +778,14 @@
 	struct drm_gem_object *obj = vma->vm_private_data;
 
 	drm_gem_object_reference(obj);
-
-	mutex_lock(&obj->dev->struct_mutex);
-	drm_vm_open_locked(obj->dev, vma);
-	mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
 void drm_gem_vm_close(struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
 
-	mutex_lock(&dev->struct_mutex);
-	drm_vm_close_locked(obj->dev, vma);
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
+	drm_gem_object_unreference_unlocked(obj);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
@@ -850,7 +842,6 @@
 	 */
 	drm_gem_object_reference(obj);
 
-	drm_vm_open_locked(dev, vma);
 	return 0;
 }
 EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index bd75f30..86cc793 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -289,20 +289,15 @@
 {
 	struct drm_gem_object *gem_obj;
 
-	mutex_lock(&drm->struct_mutex);
-
 	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
 	if (!gem_obj) {
 		dev_err(drm->dev, "failed to lookup GEM object\n");
-		mutex_unlock(&drm->struct_mutex);
 		return -EINVAL;
 	}
 
 	*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 
-	drm_gem_object_unreference(gem_obj);
-
-	mutex_unlock(&drm->struct_mutex);
+	drm_gem_object_unreference_unlocked(gem_obj);
 
 	return 0;
 }
@@ -381,11 +376,8 @@
 			  struct seq_file *m)
 {
 	struct drm_gem_object *obj = &cma_obj->base;
-	struct drm_device *dev = obj->dev;
 	uint64_t off;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 	off = drm_vma_node_start(&obj->vma_node);
 
 	seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 9cfcd0a..ddfa601 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -95,7 +95,7 @@
 		return -EFAULT;
 
 	version = compat_alloc_user_space(sizeof(*version));
-	if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+	if (!version)
 		return -EFAULT;
 	if (__put_user(v32.name_len, &version->name_len)
 	    || __put_user((void __user *)(unsigned long)v32.name,
@@ -142,7 +142,7 @@
 		return -EFAULT;
 
 	u = compat_alloc_user_space(sizeof(*u));
-	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+	if (!u)
 		return -EFAULT;
 	if (__put_user(uq32.unique_len, &u->unique_len)
 	    || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -170,7 +170,7 @@
 		return -EFAULT;
 
 	u = compat_alloc_user_space(sizeof(*u));
-	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+	if (!u)
 		return -EFAULT;
 	if (__put_user(uq32.unique_len, &u->unique_len)
 	    || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -202,7 +202,7 @@
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user(idx, &map->offset))
 		return -EFAULT;
@@ -239,7 +239,7 @@
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user(m32.offset, &map->offset)
 	    || __put_user(m32.size, &map->size)
@@ -279,7 +279,7 @@
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user((void *)(unsigned long)handle, &map->handle))
 		return -EFAULT;
@@ -308,7 +308,7 @@
 		return -EFAULT;
 
 	client = compat_alloc_user_space(sizeof(*client));
-	if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+	if (!client)
 		return -EFAULT;
 	if (__put_user(idx, &client->idx))
 		return -EFAULT;
@@ -347,7 +347,7 @@
 	int i, err;
 
 	stats = compat_alloc_user_space(sizeof(*stats));
-	if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+	if (!stats)
 		return -EFAULT;
 
 	err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
@@ -384,8 +384,7 @@
 	unsigned long agp_start;
 
 	buf = compat_alloc_user_space(sizeof(*buf));
-	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+	if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
 		return -EFAULT;
 
 	if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
@@ -416,7 +415,7 @@
 		return -EFAULT;
 
 	buf = compat_alloc_user_space(sizeof(*buf));
-	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+	if (!buf)
 		return -EFAULT;
 
 	if (__put_user(b32.size, &buf->size)
@@ -457,7 +456,7 @@
 
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
 	request = compat_alloc_user_space(nbytes);
-	if (!access_ok(VERIFY_WRITE, request, nbytes))
+	if (!request)
 		return -EFAULT;
 	list = (struct drm_buf_desc *) (request + 1);
 
@@ -518,7 +517,7 @@
 		return -EINVAL;
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
 	request = compat_alloc_user_space(nbytes);
-	if (!access_ok(VERIFY_WRITE, request, nbytes))
+	if (!request)
 		return -EFAULT;
 	list = (struct drm_buf_pub *) (request + 1);
 
@@ -565,7 +564,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(req32.count, &request->count)
 	    || __put_user((int __user *)(unsigned long)req32.list,
@@ -591,7 +590,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(req32.ctx_id, &request->ctx_id)
 	    || __put_user((void *)(unsigned long)req32.handle,
@@ -615,7 +614,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(ctx_id, &request->ctx_id))
 		return -EFAULT;
@@ -648,7 +647,7 @@
 		return -EFAULT;
 
 	res = compat_alloc_user_space(sizeof(*res));
-	if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+	if (!res)
 		return -EFAULT;
 	if (__put_user(res32.count, &res->count)
 	    || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
@@ -691,7 +690,7 @@
 		return -EFAULT;
 
 	d = compat_alloc_user_space(sizeof(*d));
-	if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+	if (!d)
 		return -EFAULT;
 
 	if (__put_user(d32.context, &d->context)
@@ -766,7 +765,7 @@
 	int err;
 
 	info = compat_alloc_user_space(sizeof(*info));
-	if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+	if (!info)
 		return -EFAULT;
 
 	err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
@@ -809,7 +808,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.size, &request->size)
 	    || __put_user(req32.type, &request->type))
 		return -EFAULT;
@@ -836,7 +835,7 @@
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || get_user(handle, &argp->handle)
 	    || __put_user(handle, &request->handle))
 		return -EFAULT;
@@ -860,7 +859,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.handle, &request->handle)
 	    || __put_user(req32.offset, &request->offset))
 		return -EFAULT;
@@ -876,7 +875,7 @@
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || get_user(handle, &argp->handle)
 	    || __put_user(handle, &request->handle))
 		return -EFAULT;
@@ -899,8 +898,7 @@
 	unsigned long x;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
 	    || __get_user(x, &argp->size)
 	    || __put_user(x, &request->size))
 		return -EFAULT;
@@ -925,8 +923,7 @@
 	unsigned long x;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
 	    || __get_user(x, &argp->handle)
 	    || __put_user(x << PAGE_SHIFT, &request->handle))
 		return -EFAULT;
@@ -954,7 +951,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+	if (!request ||
 	    __put_user(update32.handle, &request->handle) ||
 	    __put_user(update32.type, &request->type) ||
 	    __put_user(update32.num, &request->num) ||
@@ -996,7 +993,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.request.type, &request->request.type)
 	    || __put_user(req32.request.sequence, &request->request.sequence)
 	    || __put_user(req32.request.signal, &request->request.signal))
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b1d303f..9a860ca 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -480,7 +480,7 @@
  * indicated permissions. If so, returns zero. Otherwise returns an
  * error code suitable for ioctl return.
  */
-static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 {
 	/* ROOT_ONLY is only for CAP_SYS_ADMIN */
 	if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -508,6 +508,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_ioctl_permit);
 
 #define DRM_IOCTL_DEF(ioctl, _func, _flags)	\
 	[DRM_IOCTL_NR(ioctl)] = {		\
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b50fa0a..22d207e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,8 +43,8 @@
 #include <linux/export.h>
 
 /* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) \
-	((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
+#define vblanktimestamp(dev, pipe, count) \
+	((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
@@ -57,7 +57,7 @@
 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
 static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
 			  struct timeval *tvblank, unsigned flags);
 
 static unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
@@ -107,7 +107,7 @@
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
- * @crtc: counter to update
+ * @pipe: counter to update
  *
  * Call back into the driver to update the appropriate vblank counter
  * (specified by @crtc).  Deal with wraparound, if it occurred, and
@@ -120,9 +120,9 @@
  * Note: caller must hold dev->vbl_lock since this reads & writes
  * device vblank fields.
  */
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 cur_vblank, diff;
 	bool rc;
 	struct timeval t_vblank;
@@ -140,21 +140,21 @@
 	 * corresponding vblank timestamp.
 	 */
 	do {
-		cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
-		rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
-	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+		cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
+		rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
+	} while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe));
 
 	/* Deal with counter wrap */
 	diff = cur_vblank - vblank->last;
 	if (cur_vblank < vblank->last) {
 		diff += dev->max_vblank_count + 1;
 
-		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-			  crtc, vblank->last, cur_vblank, diff);
+		DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+			  pipe, vblank->last, cur_vblank, diff);
 	}
 
-	DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
-		  crtc, diff);
+	DRM_DEBUG("updating vblank count on crtc %u, missed %d\n",
+		  pipe, diff);
 
 	if (diff == 0)
 		return;
@@ -167,7 +167,7 @@
 	if (!rc)
 		t_vblank = (struct timeval) {0, 0};
 
-	store_vblank(dev, crtc, diff, &t_vblank);
+	store_vblank(dev, pipe, diff, &t_vblank);
 }
 
 /*
@@ -176,9 +176,9 @@
  * are preserved, even if there are any spurious vblank irq's after
  * disable.
  */
-static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 	u32 vblcount;
 	s64 diff_ns;
@@ -206,8 +206,8 @@
 	 * vblank interrupt is disabled.
 	 */
 	if (!vblank->enabled &&
-	    drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
-		drm_update_vblank_count(dev, crtc);
+	    drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
+		drm_update_vblank_count(dev, pipe);
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 		return;
 	}
@@ -218,7 +218,7 @@
 	 * hardware potentially runtime suspended.
 	 */
 	if (vblank->enabled) {
-		dev->driver->disable_vblank(dev, crtc);
+		dev->driver->disable_vblank(dev, pipe);
 		vblank->enabled = false;
 	}
 
@@ -235,9 +235,9 @@
 	 * delayed gpu counter increment.
 	 */
 	do {
-		vblank->last = dev->driver->get_vblank_counter(dev, crtc);
-		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+		vblank->last = dev->driver->get_vblank_counter(dev, pipe);
+		vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
+	} while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
 
 	if (!count)
 		vblrc = 0;
@@ -247,7 +247,7 @@
 	 */
 	vblcount = vblank->count;
 	diff_ns = timeval_to_ns(&tvblank) -
-		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+		  timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
 
 	/* If there is at least 1 msec difference between the last stored
 	 * timestamp and tvblank, then we are currently executing our
@@ -262,7 +262,7 @@
 	 * hope for the best.
 	 */
 	if (vblrc && (abs64(diff_ns) > 1000000))
-		store_vblank(dev, crtc, 1, &tvblank);
+		store_vblank(dev, pipe, 1, &tvblank);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
@@ -271,16 +271,16 @@
 {
 	struct drm_vblank_crtc *vblank = (void *)arg;
 	struct drm_device *dev = vblank->dev;
+	unsigned int pipe = vblank->pipe;
 	unsigned long irqflags;
-	int crtc = vblank->crtc;
 
 	if (!dev->vblank_disable_allowed)
 		return;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
-		DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
-		vblank_disable_and_save(dev, crtc);
+		DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
+		vblank_disable_and_save(dev, pipe);
 	}
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
@@ -293,14 +293,14 @@
  */
 void drm_vblank_cleanup(struct drm_device *dev)
 {
-	int crtc;
+	unsigned int pipe;
 
 	/* Bail if the driver didn't call drm_vblank_init() */
 	if (dev->num_crtcs == 0)
 		return;
 
-	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
-		struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
+		struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
 		WARN_ON(vblank->enabled &&
 			drm_core_check_feature(dev, DRIVER_MODESET));
@@ -316,17 +316,18 @@
 
 /**
  * drm_vblank_init - initialize vblank support
- * @dev: drm_device
- * @num_crtcs: number of crtcs supported by @dev
+ * @dev: DRM device
+ * @num_crtcs: number of CRTCs supported by @dev
  *
  * This function initializes vblank support for @num_crtcs display pipelines.
  *
  * Returns:
  * Zero on success or a negative error code on failure.
  */
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
 {
-	int i, ret = -ENOMEM;
+	int ret = -ENOMEM;
+	unsigned int i;
 
 	spin_lock_init(&dev->vbl_lock);
 	spin_lock_init(&dev->vblank_time_lock);
@@ -341,7 +342,7 @@
 		struct drm_vblank_crtc *vblank = &dev->vblank[i];
 
 		vblank->dev = dev;
-		vblank->crtc = i;
+		vblank->pipe = i;
 		init_waitqueue_head(&vblank->queue);
 		setup_timer(&vblank->disable_timer, vblank_disable_fn,
 			    (unsigned long)vblank);
@@ -624,17 +625,17 @@
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 			framedur_ns /= 2;
 	} else
-		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+		DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
 			  crtc->base.id);
 
 	crtc->pixeldur_ns = pixeldur_ns;
 	crtc->linedur_ns  = linedur_ns;
 	crtc->framedur_ns = framedur_ns;
 
-	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+	DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
 		  crtc->base.id, mode->crtc_htotal,
 		  mode->crtc_vtotal, mode->crtc_vdisplay);
-	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+	DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
 		  crtc->base.id, dotclock, framedur_ns,
 		  linedur_ns, pixeldur_ns);
 }
@@ -643,7 +644,7 @@
 /**
  * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
  * @dev: DRM device
- * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
  * @max_error: Desired maximum allowable error in timestamps (nanosecs)
  *             On return contains true maximum error of timestamp
  * @vblank_time: Pointer to struct timeval which should receive the timestamp
@@ -686,7 +687,8 @@
  * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
  *
  */
-int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+					  unsigned int pipe,
 					  int *max_error,
 					  struct timeval *vblank_time,
 					  unsigned flags,
@@ -700,8 +702,8 @@
 	int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
 	bool invbl;
 
-	if (crtc < 0 || crtc >= dev->num_crtcs) {
-		DRM_ERROR("Invalid crtc %d\n", crtc);
+	if (pipe >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %u\n", pipe);
 		return -EINVAL;
 	}
 
@@ -720,7 +722,7 @@
 	 * Happens during initial modesetting of a crtc.
 	 */
 	if (framedur_ns == 0) {
-		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+		DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
 		return -EAGAIN;
 	}
 
@@ -736,13 +738,13 @@
 		 * Get vertical and horizontal scanout position vpos, hpos,
 		 * and bounding timestamps stime, etime, pre/post query.
 		 */
-		vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
+		vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos,
 							       &hpos, &stime, &etime);
 
 		/* Return as no-op if scanout query unsupported or failed. */
 		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
-			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
-				  crtc, vbl_status);
+			DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n",
+				  pipe, vbl_status);
 			return -EIO;
 		}
 
@@ -756,8 +758,8 @@
 
 	/* Noisy system timing? */
 	if (i == DRM_TIMESTAMP_MAXRETRIES) {
-		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
-			  crtc, duration_ns/1000, *max_error/1000, i);
+		DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
+			  pipe, duration_ns/1000, *max_error/1000, i);
 	}
 
 	/* Return upper bound of timestamp precision error. */
@@ -790,8 +792,8 @@
 		etime = ktime_sub_ns(etime, delta_ns);
 	*vblank_time = ktime_to_timeval(etime);
 
-	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
-		  crtc, (int)vbl_status, hpos, vpos,
+	DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+		  pipe, (int)vbl_status, hpos, vpos,
 		  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
 		  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
 		  duration_ns/1000, i);
@@ -816,7 +818,7 @@
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  *                             vblank interval
  * @dev: DRM device
- * @crtc: which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
  * @tvblank: Pointer to target struct timeval which should receive the timestamp
  * @flags: Flags to pass to driver:
  *         0 = Default,
@@ -833,7 +835,7 @@
  * True if timestamp is considered to be very precise, false otherwise.
  */
 static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
 			  struct timeval *tvblank, unsigned flags)
 {
 	int ret;
@@ -843,7 +845,7 @@
 
 	/* Query driver if possible and precision timestamping enabled. */
 	if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
-		ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+		ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
 							tvblank, flags);
 		if (ret > 0)
 			return true;
@@ -860,7 +862,7 @@
 /**
  * drm_vblank_count - retrieve "cooked" vblank counter value
  * @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC for which to retrieve the counter
  *
  * Fetches the "cooked" vblank count value that represents the number of
  * vblank events since the system was booted, including lost events due to
@@ -871,12 +873,13 @@
  * Returns:
  * The software vblank counter.
  */
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
+u32 drm_vblank_count(struct drm_device *dev, int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return 0;
+
 	return vblank->count;
 }
 EXPORT_SYMBOL(drm_vblank_count);
@@ -901,11 +904,10 @@
 EXPORT_SYMBOL(drm_crtc_vblank_count);
 
 /**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
- * and the system timestamp corresponding to that vblank counter value.
- *
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ *     system timestamp corresponding to that vblank counter value.
  * @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC whose counter to retrieve
  * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
  *
  * Fetches the "cooked" vblank count value that represents the number of
@@ -913,13 +915,13 @@
  * modesetting activity. Returns corresponding system timestamp of the time
  * of the vblank interval that corresponds to the current vblank counter value.
  */
-u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
 			      struct timeval *vblanktime)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 cur_vblank;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return 0;
 
 	/*
@@ -930,7 +932,7 @@
 	do {
 		cur_vblank = vblank->count;
 		smp_rmb();
-		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+		*vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
 		smp_rmb();
 	} while (cur_vblank != vblank->count);
 
@@ -957,7 +959,7 @@
 /**
  * drm_send_vblank_event - helper to send vblank event after pageflip
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  * @e: the event to send
  *
  * Updates sequence # and timestamp on event, and sends it to userspace.
@@ -965,20 +967,20 @@
  *
  * This is the legacy version of drm_crtc_send_vblank_event().
  */
-void drm_send_vblank_event(struct drm_device *dev, int crtc,
-		struct drm_pending_vblank_event *e)
+void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+			   struct drm_pending_vblank_event *e)
 {
 	struct timeval now;
 	unsigned int seq;
 
-	if (crtc >= 0) {
-		seq = drm_vblank_count_and_time(dev, crtc, &now);
+	if (dev->num_crtcs > 0) {
+		seq = drm_vblank_count_and_time(dev, pipe, &now);
 	} else {
 		seq = 0;
 
 		now = get_drm_timestamp();
 	}
-	e->pipe = crtc;
+	e->pipe = pipe;
 	send_vblank_event(dev, e, seq, &now);
 }
 EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1003,11 +1005,14 @@
 /**
  * drm_vblank_enable - enable the vblank interrupt on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
  */
-static int drm_vblank_enable(struct drm_device *dev, int crtc)
+static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	int ret = 0;
 
 	assert_spin_locked(&dev->vbl_lock);
@@ -1022,13 +1027,13 @@
 		 * timestamps. Filtercode in drm_handle_vblank() will
 		 * prevent double-accounting of same vblank interval.
 		 */
-		ret = dev->driver->enable_vblank(dev, crtc);
-		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+		ret = dev->driver->enable_vblank(dev, pipe);
+		DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
 		if (ret)
 			atomic_dec(&vblank->refcount);
 		else {
 			vblank->enabled = true;
-			drm_update_vblank_count(dev, crtc);
+			drm_update_vblank_count(dev, pipe);
 		}
 	}
 
@@ -1040,7 +1045,7 @@
 /**
  * drm_vblank_get - get a reference count on vblank events
  * @dev: DRM device
- * @crtc: which CRTC to own
+ * @pipe: index of CRTC to own
  *
  * Acquire a reference count on vblank events to avoid having them disabled
  * while in use.
@@ -1048,24 +1053,24 @@
  * This is the legacy version of drm_crtc_vblank_get().
  *
  * Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
  */
-int drm_vblank_get(struct drm_device *dev, int crtc)
+int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 	int ret = 0;
 
 	if (!dev->num_crtcs)
 		return -EINVAL;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return -EINVAL;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
 	if (atomic_add_return(1, &vblank->refcount) == 1) {
-		ret = drm_vblank_enable(dev, crtc);
+		ret = drm_vblank_enable(dev, pipe);
 	} else {
 		if (!vblank->enabled) {
 			atomic_dec(&vblank->refcount);
@@ -1088,7 +1093,7 @@
  * This is the native kms version of drm_vblank_get().
  *
  * Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
  */
 int drm_crtc_vblank_get(struct drm_crtc *crtc)
 {
@@ -1097,23 +1102,23 @@
 EXPORT_SYMBOL(drm_crtc_vblank_get);
 
 /**
- * drm_vblank_put - give up ownership of vblank events
+ * drm_vblank_put - release ownership of vblank events
  * @dev: DRM device
- * @crtc: which counter to give up
+ * @pipe: index of CRTC to release
  *
  * Release ownership of a given vblank counter, turning off interrupts
  * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
  *
  * This is the legacy version of drm_crtc_vblank_put().
  */
-void drm_vblank_put(struct drm_device *dev, int crtc)
+void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
-	if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(atomic_read(&vblank->refcount) == 0))
 		return;
 
 	/* Last user schedules interrupt disable */
@@ -1147,30 +1152,34 @@
 /**
  * drm_wait_one_vblank - wait for one vblank
  * @dev: DRM device
- * @crtc: crtc index
+ * @pipe: CRTC index
  *
  * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
  * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
  * due to lack of driver support or because the crtc is off.
  */
-void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
 {
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	int ret;
 	u32 last;
 
-	ret = drm_vblank_get(dev, crtc);
-	if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
-	last = drm_vblank_count(dev, crtc);
+	ret = drm_vblank_get(dev, pipe);
+	if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
+		return;
 
-	ret = wait_event_timeout(dev->vblank[crtc].queue,
-				 last != drm_vblank_count(dev, crtc),
+	last = drm_vblank_count(dev, pipe);
+
+	ret = wait_event_timeout(vblank->queue,
+				 last != drm_vblank_count(dev, pipe),
 				 msecs_to_jiffies(100));
 
-	WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
+	WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
 
-	drm_vblank_put(dev, crtc);
+	drm_vblank_put(dev, pipe);
 }
 EXPORT_SYMBOL(drm_wait_one_vblank);
 
@@ -1191,7 +1200,7 @@
 /**
  * drm_vblank_off - disable vblank events on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * Drivers can use this function to shut down the vblank interrupt handling when
  * disabling a crtc. This function ensures that the latest vblank frame count is
@@ -1202,21 +1211,21 @@
  *
  * This is the legacy version of drm_crtc_vblank_off().
  */
-void drm_vblank_off(struct drm_device *dev, int crtc)
+void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
 	unsigned long irqflags;
 	unsigned int seq;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	spin_lock_irqsave(&dev->event_lock, irqflags);
 
 	spin_lock(&dev->vbl_lock);
-	vblank_disable_and_save(dev, crtc);
+	vblank_disable_and_save(dev, pipe);
 	wake_up(&vblank->queue);
 
 	/*
@@ -1230,16 +1239,16 @@
 	spin_unlock(&dev->vbl_lock);
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
-	seq = drm_vblank_count_and_time(dev, crtc, &now);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
-		if (e->pipe != crtc)
+		if (e->pipe != pipe)
 			continue;
 		DRM_DEBUG("Sending premature vblank event on disable: \
 			  wanted %d, current %d\n",
 			  e->event.sequence, seq);
 		list_del(&e->base.link);
-		drm_vblank_put(dev, e->pipe);
+		drm_vblank_put(dev, pipe);
 		send_vblank_event(dev, e, seq, &now);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1267,7 +1276,7 @@
 
 /**
  * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @crtc: CRTC in question
+ * @drm_crtc: CRTC in question
  *
  * Drivers can use this function to reset the vblank state to off at load time.
  * Drivers should use this together with the drm_crtc_vblank_off() and
@@ -1300,7 +1309,7 @@
 /**
  * drm_vblank_on - enable vblank events on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * This functions restores the vblank interrupt state captured with
  * drm_vblank_off() again. Note that calls to drm_vblank_on() and
@@ -1309,12 +1318,12 @@
  *
  * This is the legacy version of drm_crtc_vblank_on().
  */
-void drm_vblank_on(struct drm_device *dev, int crtc)
+void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -1332,7 +1341,7 @@
 	 * vblank counter value before and after a modeset
 	 */
 	vblank->last =
-		(dev->driver->get_vblank_counter(dev, crtc) - 1) &
+		(dev->driver->get_vblank_counter(dev, pipe) - 1) &
 		dev->max_vblank_count;
 	/*
 	 * re-enable interrupts if there are users left, or the
@@ -1340,7 +1349,7 @@
 	 */
 	if (atomic_read(&vblank->refcount) != 0 ||
 	    (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
-		WARN_ON(drm_vblank_enable(dev, crtc));
+		WARN_ON(drm_vblank_enable(dev, pipe));
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_vblank_on);
@@ -1365,7 +1374,7 @@
 /**
  * drm_vblank_pre_modeset - account for vblanks across mode sets
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * Account for vblank events across mode setting events, which will likely
  * reset the hardware frame counter.
@@ -1385,15 +1394,15 @@
  * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
  * again.
  */
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
 	/* vblank is not initialized (IRQ not installed ?), or has been freed */
 	if (!dev->num_crtcs)
 		return;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	/*
@@ -1405,7 +1414,7 @@
 	 */
 	if (!vblank->inmodeset) {
 		vblank->inmodeset = 0x1;
-		if (drm_vblank_get(dev, crtc) == 0)
+		if (drm_vblank_get(dev, pipe) == 0)
 			vblank->inmodeset |= 0x2;
 	}
 }
@@ -1414,27 +1423,30 @@
 /**
  * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * This function again drops the temporary vblank reference acquired in
  * drm_vblank_pre_modeset.
  */
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 
 	/* vblank is not initialized (IRQ not installed ?), or has been freed */
 	if (!dev->num_crtcs)
 		return;
 
+	if (WARN_ON(pipe >= dev->num_crtcs))
+		return;
+
 	if (vblank->inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		dev->vblank_disable_allowed = true;
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
 		if (vblank->inmodeset & 0x2)
-			drm_vblank_put(dev, crtc);
+			drm_vblank_put(dev, pipe);
 
 		vblank->inmodeset = 0;
 	}
@@ -1456,7 +1468,7 @@
 		    struct drm_file *file_priv)
 {
 	struct drm_modeset_ctl *modeset = data;
-	unsigned int crtc;
+	unsigned int pipe;
 
 	/* If drm_vblank_init() hasn't been called yet, just no-op */
 	if (!dev->num_crtcs)
@@ -1466,16 +1478,16 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
 
-	crtc = modeset->crtc;
-	if (crtc >= dev->num_crtcs)
+	pipe = modeset->crtc;
+	if (pipe >= dev->num_crtcs)
 		return -EINVAL;
 
 	switch (modeset->cmd) {
 	case _DRM_PRE_MODESET:
-		drm_vblank_pre_modeset(dev, crtc);
+		drm_vblank_pre_modeset(dev, pipe);
 		break;
 	case _DRM_POST_MODESET:
-		drm_vblank_post_modeset(dev, crtc);
+		drm_vblank_post_modeset(dev, pipe);
 		break;
 	default:
 		return -EINVAL;
@@ -1484,7 +1496,7 @@
 	return 0;
 }
 
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
 				  union drm_wait_vblank *vblwait,
 				  struct drm_file *file_priv)
 {
@@ -1538,7 +1550,7 @@
 		vblwait->reply.sequence = vblwait->request.sequence;
 	}
 
-	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+	DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
 		  vblwait->request.sequence, seq, pipe);
 
 	trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1587,7 +1599,7 @@
 	struct drm_vblank_crtc *vblank;
 	union drm_wait_vblank *vblwait = data;
 	int ret;
-	unsigned int flags, seq, crtc, high_crtc;
+	unsigned int flags, seq, pipe, high_pipe;
 
 	if (!dev->irq_enabled)
 		return -EINVAL;
@@ -1606,22 +1618,22 @@
 	}
 
 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
-	high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
-	if (high_crtc)
-		crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+	high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+	if (high_pipe)
+		pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
 	else
-		crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-	if (crtc >= dev->num_crtcs)
+		pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+	if (pipe >= dev->num_crtcs)
 		return -EINVAL;
 
-	vblank = &dev->vblank[crtc];
+	vblank = &dev->vblank[pipe];
 
-	ret = drm_vblank_get(dev, crtc);
+	ret = drm_vblank_get(dev, pipe);
 	if (ret) {
 		DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
 		return ret;
 	}
-	seq = drm_vblank_count(dev, crtc);
+	seq = drm_vblank_count(dev, pipe);
 
 	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
 	case _DRM_VBLANK_RELATIVE:
@@ -1638,7 +1650,7 @@
 		/* must hold on to the vblank ref until the event fires
 		 * drm_vblank_put will be called asynchronously
 		 */
-		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+		return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
 	}
 
 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -1646,11 +1658,11 @@
 		vblwait->request.sequence = seq + 1;
 	}
 
-	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
-		  vblwait->request.sequence, crtc);
+	DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+		  vblwait->request.sequence, pipe);
 	vblank->last_wait = vblwait->request.sequence;
 	DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
-		    (((drm_vblank_count(dev, crtc) -
+		    (((drm_vblank_count(dev, pipe) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		     !vblank->enabled ||
 		     !dev->irq_enabled));
@@ -1658,7 +1670,7 @@
 	if (ret != -EINTR) {
 		struct timeval now;
 
-		vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+		vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
 		vblwait->reply.tval_sec = now.tv_sec;
 		vblwait->reply.tval_usec = now.tv_usec;
 
@@ -1669,11 +1681,11 @@
 	}
 
 done:
-	drm_vblank_put(dev, crtc);
+	drm_vblank_put(dev, pipe);
 	return ret;
 }
 
-static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
@@ -1681,10 +1693,10 @@
 
 	assert_spin_locked(&dev->event_lock);
 
-	seq = drm_vblank_count_and_time(dev, crtc, &now);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
-		if (e->pipe != crtc)
+		if (e->pipe != pipe)
 			continue;
 		if ((seq - e->event.sequence) > (1<<23))
 			continue;
@@ -1693,26 +1705,26 @@
 			  e->event.sequence, seq);
 
 		list_del(&e->base.link);
-		drm_vblank_put(dev, e->pipe);
+		drm_vblank_put(dev, pipe);
 		send_vblank_event(dev, e, seq, &now);
 	}
 
-	trace_drm_vblank_event(crtc, seq);
+	trace_drm_vblank_event(pipe, seq);
 }
 
 /**
  * drm_handle_vblank - handle a vblank event
  * @dev: DRM device
- * @crtc: where this event occurred
+ * @pipe: index of CRTC where this event occurred
  *
  * Drivers should call this routine in their vblank interrupt handlers to
  * update the vblank counter and send any signals that may be pending.
  *
  * This is the legacy version of drm_crtc_handle_vblank().
  */
-bool drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 vblcount;
 	s64 diff_ns;
 	struct timeval tvblank;
@@ -1721,7 +1733,7 @@
 	if (WARN_ON_ONCE(!dev->num_crtcs))
 		return false;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return false;
 
 	spin_lock_irqsave(&dev->event_lock, irqflags);
@@ -1745,11 +1757,11 @@
 
 	/* Get current timestamp and count. */
 	vblcount = vblank->count;
-	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+	drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 
 	/* Compute time difference to timestamp of last vblank */
 	diff_ns = timeval_to_ns(&tvblank) -
-		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+		  timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
 
 	/* Update vblank timestamp and count if at least
 	 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
@@ -1761,15 +1773,15 @@
 	 * ignore those for accounting.
 	 */
 	if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
-		store_vblank(dev, crtc, 1, &tvblank);
+		store_vblank(dev, pipe, 1, &tvblank);
 	else
-		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
-			  crtc, (int) diff_ns);
+		DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
+			  pipe, (int) diff_ns);
 
 	spin_unlock(&dev->vblank_time_lock);
 
 	wake_up(&vblank->queue);
-	drm_handle_vblank_events(dev, crtc);
+	drm_handle_vblank_events(dev, pipe);
 
 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
 
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c1dc614..9b73178 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,7 +42,7 @@
 #define DRM_KERNEL_CONTEXT		0
 #define DRM_RESERVED_CONTEXTS		1
 
-int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_init(struct drm_device *dev);
 void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
 void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f861361..4924d381 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -61,6 +61,9 @@
 	struct drm_master *master = file_priv->master;
 	int ret = 0;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	++file_priv->lock_count;
 
 	if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@
 	struct drm_lock *lock = data;
 	struct drm_master *master = file_priv->master;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (lock->context == DRM_KERNEL_CONTEXT) {
 		DRM_ERROR("Process %d using kernel context %d\n",
 			  task_pid_nr(current), lock->context);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c0a5cd8..fba321c 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -55,41 +55,27 @@
  *     drm_modeset_acquire_fini(&ctx);
  */
 
-
 /**
- * __drm_modeset_lock_all - internal helper to grab all modeset locks
- * @dev: DRM device
- * @trylock: trylock mode for atomic contexts
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
  *
- * This is a special version of drm_modeset_lock_all() which can also be used in
- * atomic contexts. Then @trylock must be set to true.
- *
- * Returns:
- * 0 on success or negative error code on failure.
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
  */
-int __drm_modeset_lock_all(struct drm_device *dev,
-			   bool trylock)
+void drm_modeset_lock_all(struct drm_device *dev)
 {
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_modeset_acquire_ctx *ctx;
 	int ret;
 
-	ctx = kzalloc(sizeof(*ctx),
-		      trylock ? GFP_ATOMIC : GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (WARN_ON(!ctx))
+		return;
 
-	if (trylock) {
-		if (!mutex_trylock(&config->mutex)) {
-			ret = -EBUSY;
-			goto out;
-		}
-	} else {
-		mutex_lock(&config->mutex);
-	}
+	mutex_lock(&config->mutex);
 
 	drm_modeset_acquire_init(ctx, 0);
-	ctx->trylock_only = trylock;
 
 retry:
 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -108,7 +94,7 @@
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	return 0;
+	return;
 
 fail:
 	if (ret == -EDEADLK) {
@@ -116,23 +102,7 @@
 		goto retry;
 	}
 
-out:
 	kfree(ctx);
-	return ret;
-}
-EXPORT_SYMBOL(__drm_modeset_lock_all);
-
-/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
- *
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
- */
-void drm_modeset_lock_all(struct drm_device *dev)
-{
-	WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
 }
 EXPORT_SYMBOL(drm_modeset_lock_all);
 
@@ -276,7 +246,7 @@
 	if (oops_in_progress)
 		return;
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(crtc, dev)
 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -464,18 +434,17 @@
 int drm_modeset_lock_all_crtcs(struct drm_device *dev,
 		struct drm_modeset_acquire_ctx *ctx)
 {
-	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_crtc *crtc;
 	struct drm_plane *plane;
 	int ret = 0;
 
-	list_for_each_entry(crtc, &config->crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		ret = drm_modeset_lock(&crtc->mutex, ctx);
 		if (ret)
 			return ret;
 	}
 
-	list_for_each_entry(plane, &config->plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		ret = drm_modeset_lock(&plane->mutex, ctx);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index aaa1307..be38840 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -19,7 +19,7 @@
 	unsigned int index = 0;
 	struct drm_crtc *tmp;
 
-	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, dev) {
 		if (tmp->port == port)
 			return 1 << index;
 
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 2f0ed11..5e5a07a 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -91,13 +91,14 @@
 	 */
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev) {
 		if (connector->encoder && connector->encoder->crtc == crtc) {
 			if (connector_list != NULL && count < num_connectors)
 				*(connector_list++) = connector;
 
 			count++;
 		}
+	}
 
 	return count;
 }
@@ -436,7 +437,7 @@
 
 	for (i = 0; i < 2; i++) {
 		if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
-			crtc_funcs[i]->atomic_begin(crtc[i]);
+			crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
 	}
 
 	/*
@@ -451,7 +452,7 @@
 
 	for (i = 0; i < 2; i++) {
 		if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
-			crtc_funcs[i]->atomic_flush(crtc[i]);
+			crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
 	}
 
 	/*
@@ -525,10 +526,12 @@
 
 	if (plane->funcs->atomic_duplicate_state)
 		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else if (plane->state)
+	else {
+		if (!plane->state)
+			drm_atomic_helper_plane_reset(plane);
+
 		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	else
-		plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	}
 	if (!plane_state)
 		return -ENOMEM;
 	plane_state->plane = plane;
@@ -572,10 +575,12 @@
 
 	if (plane->funcs->atomic_duplicate_state)
 		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else if (plane->state)
+	else {
+		if (!plane->state)
+			drm_atomic_helper_plane_reset(plane);
+
 		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	else
-		plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	}
 	if (!plane_state)
 		return -ENOMEM;
 	plane_state->plane = plane;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 04203c0..d734780 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -93,6 +93,27 @@
 	return 1;
 }
 
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+	bool poll = false;
+	struct drm_connector *connector;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+		return;
+
+	drm_for_each_connector(connector, dev) {
+		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+					 DRM_CONNECTOR_POLL_DISCONNECT))
+			poll = true;
+	}
+
+	if (poll)
+		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
 							      uint32_t maxX, uint32_t maxY, bool merge_type_bits)
 {
@@ -153,7 +174,7 @@
 
 	/* Re-enable polling in case the global poll config changed. */
 	if (drm_kms_helper_poll != dev->mode_config.poll_running)
-		drm_kms_helper_poll_enable(dev);
+		__drm_kms_helper_poll_enable(dev);
 
 	dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -295,7 +316,6 @@
 }
 EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 static void output_poll_execute(struct work_struct *work)
 {
 	struct delayed_work *delayed_work = to_delayed_work(work);
@@ -312,7 +332,7 @@
 		goto out;
 
 	mutex_lock(&dev->mode_config.mutex);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 
 		/* Ignore forced connectors. */
 		if (connector->force)
@@ -407,20 +427,9 @@
  */
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
-	bool poll = false;
-	struct drm_connector *connector;
-
-	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
-		return;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
-					 DRM_CONNECTOR_POLL_DISCONNECT))
-			poll = true;
-	}
-
-	if (poll)
-		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+	mutex_lock(&dev->mode_config.mutex);
+	__drm_kms_helper_poll_enable(dev);
+	mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
@@ -495,7 +504,7 @@
 		return false;
 
 	mutex_lock(&dev->mode_config.mutex);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 
 		/* Only handle HPD capable connectors. */
 		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 43003c4..df0b61a 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@
 
 config DRM_EXYNOS_DP
 	bool "EXYNOS DRM DP driver support"
-	depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
+	depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
 	default DRM_EXYNOS
 	select DRM_PANEL
 	help
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 7de0b10..02aecfe 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,10 +3,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
-exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
-		exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
-		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
-		exynos_drm_plane.o exynos_drm_dmabuf.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
+		exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
+		exynos_drm_plane.o
 
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8b1225f..b3c7307 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -54,6 +54,13 @@
 	"sclk_decon_eclk",
 };
 
+static const uint32_t decon_formats[] = {
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
 static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
@@ -152,15 +159,15 @@
 #define OFFSIZE(x)		(((x) & 0x3fff) << 14)
 #define PAGEWIDTH(x)		((x) & 0x3fff)
 
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+				 struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val &= ~WINCONx_BPPMODE_MASK;
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_XRGB1555:
 		val |= WINCONx_BPPMODE_16BPP_I1555;
 		val |= WINCONx_HAWSWP_F;
@@ -186,7 +193,7 @@
 		return;
 	}
 
-	DRM_DEBUG_KMS("bpp = %u\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -196,7 +203,7 @@
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
@@ -219,27 +226,35 @@
 	writel(val, ctx->addr + DECON_SHADOWCON);
 }
 
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
-	u32 val;
-
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
 
 	if (ctx->suspended)
 		return;
 
-	decon_shadow_protect_win(ctx, win, true);
+	decon_shadow_protect_win(ctx, plane->zpos, true);
+}
+
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+	struct drm_plane_state *state = plane->base.state;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
+	u32 val;
+
+	if (ctx->suspended)
+		return;
 
 	val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
 	writel(val, ctx->addr + DECON_VIDOSDxA(win));
 
-	val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) |
-		COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1);
+	val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) |
+		COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
 	writel(val, ctx->addr + DECON_VIDOSDxB(win));
 
 	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -252,42 +267,33 @@
 
 	writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
 
-	val = plane->dma_addr[0] + plane->pitch * plane->crtc_height;
+	val = plane->dma_addr[0] + pitch * plane->crtc_h;
 	writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
 
-	val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3))
-		| PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3));
+	val = OFFSIZE(pitch - plane->crtc_w * bpp)
+		| PAGEWIDTH(plane->crtc_w * bpp);
 	writel(val, ctx->addr + DECON_VIDW0xADD2(win));
 
-	decon_win_set_pixfmt(ctx, win);
+	decon_win_set_pixfmt(ctx, win, state->fb);
 
 	/* window enable */
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val |= WINCONx_ENWIN_F;
 	writel(val, ctx->addr + DECON_WINCONx(win));
 
-	decon_shadow_protect_win(ctx, win, false);
-
 	/* standalone update */
 	val = readl(ctx->addr + DECON_UPDATE);
 	val |= STANDALONE_UPDATE_F;
 	writel(val, ctx->addr + DECON_UPDATE);
-
-	if (ctx->i80_if)
-		atomic_set(&ctx->win_updated, 1);
 }
 
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	u32 val;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	if (ctx->suspended)
 		return;
 
@@ -306,6 +312,20 @@
 	writel(val, ctx->addr + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	decon_shadow_protect_win(ctx, plane->zpos, false);
+
+	if (ctx->i80_if)
+		atomic_set(&ctx->win_updated, 1);
+}
+
 static void decon_swreset(struct decon_context *ctx)
 {
 	unsigned int tries;
@@ -378,7 +398,7 @@
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		decon_win_disable(crtc, i);
+		decon_disable_plane(crtc, &ctx->planes[i]);
 
 	decon_swreset(ctx);
 
@@ -407,7 +427,7 @@
 		writel(val, ctx->addr + DECON_TRIGCON);
 	}
 
-	drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+	drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
 static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -460,10 +480,11 @@
 	.enable_vblank		= decon_enable_vblank,
 	.disable_vblank		= decon_disable_vblank,
 	.commit			= decon_commit,
-	.win_commit		= decon_win_commit,
-	.win_disable		= decon_win_disable,
+	.atomic_begin		= decon_atomic_begin,
+	.update_plane		= decon_update_plane,
+	.disable_plane		= decon_disable_plane,
+	.atomic_flush		= decon_atomic_flush,
 	.te_handler		= decon_te_irq_handler,
-	.clear_channels		= decon_clear_channels,
 };
 
 static int decon_bind(struct device *dev, struct device *master, void *data)
@@ -483,7 +504,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 							DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-				1 << ctx->pipe, type, zpos);
+				1 << ctx->pipe, type, decon_formats,
+				ARRAY_SIZE(decon_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -497,7 +519,9 @@
 		goto err;
 	}
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+	decon_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret)
 		goto err;
 
@@ -514,8 +538,7 @@
 	decon_disable(ctx->crtc);
 
 	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 }
 
 static const struct component_ops decon_component_ops = {
@@ -533,7 +556,7 @@
 
 	val = readl(ctx->addr + DECON_VIDINTCON1);
 	if (val & VIDINTCON1_INTFRMPEND) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 
 		/* clear */
 		writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
@@ -547,13 +570,21 @@
 {
 	struct decon_context *ctx = dev_id;
 	u32 val;
+	int win;
 
 	if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
 		goto out;
 
 	val = readl(ctx->addr + DECON_VIDINTCON1);
 	if (val & VIDINTCON1_INTFRMDONEPEND) {
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		for (win = 0 ; win < WINDOWS_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* clear */
 		writel(VIDINTCON1_INTFRMDONEPEND,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 362532a..cbdb78e 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -61,7 +61,7 @@
 	atomic_t			wait_vsync_event;
 
 	struct exynos_drm_panel_info panel;
-	struct exynos_drm_display *display;
+	struct drm_encoder *encoder;
 };
 
 static const struct of_device_id decon_driver_dt_match[] = {
@@ -70,6 +70,18 @@
 };
 MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
 
+static const uint32_t decon_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_RGBX8888,
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_RGBA8888,
+	DRM_FORMAT_BGRA8888,
+};
+
 static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
@@ -126,7 +138,9 @@
 	ctx->drm_dev = drm_dev;
 	ctx->pipe = priv->pipe++;
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev);
+	decon_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, ctx->dev);
 	if (ret)
 		priv->pipe--;
 
@@ -136,8 +150,7 @@
 static void decon_ctx_remove(struct decon_context *ctx)
 {
 	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 }
 
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
@@ -271,16 +284,16 @@
 	}
 }
 
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+				 struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 	int padding;
 
 	val = readl(ctx->regs + WINCON(win));
 	val &= ~WINCONx_BPPMODE_MASK;
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_RGB565:
 		val |= WINCONx_BPPMODE_16BPP_565;
 		val |= WINCONx_BURSTLEN_16WORD;
@@ -329,7 +342,7 @@
 		break;
 	}
 
-	DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -339,8 +352,8 @@
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
-	if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width;
+	if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
@@ -382,23 +395,30 @@
 	writel(val, ctx->regs + SHADOWCON);
 }
 
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
-	struct exynos_drm_plane *plane;
-	int padding;
-	unsigned long val, alpha;
-	unsigned int last_x;
-	unsigned int last_y;
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
+	decon_shadow_protect_win(ctx, plane->zpos, true);
+}
 
-	plane = &ctx->planes[win];
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
+	struct drm_plane_state *state = plane->base.state;
+	int padding;
+	unsigned long val, alpha;
+	unsigned int last_x;
+	unsigned int last_y;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
 
 	if (ctx->suspended)
 		return;
@@ -413,18 +433,15 @@
 	 * is set.
 	 */
 
-	/* protect windows */
-	decon_shadow_protect_win(ctx, win, true);
-
 	/* buffer start address */
 	val = (unsigned long)plane->dma_addr[0];
 	writel(val, ctx->regs + VIDW_BUF_START(win));
 
-	padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+	padding = (pitch / bpp) - state->fb->width;
 
 	/* buffer size */
-	writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
-	writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+	writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
+	writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win));
 
 	/* offset from the start of the buffer to read */
 	writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
@@ -433,25 +450,25 @@
 	DRM_DEBUG_KMS("start addr = 0x%lx\n",
 			(unsigned long)val);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
-			plane->crtc_width, plane->crtc_height);
+			plane->crtc_w, plane->crtc_h);
 
 	/*
 	 * OSD position.
 	 * In case the window layout goes of LCD layout, DECON fails.
 	 */
-	if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
-		plane->crtc_x = mode->hdisplay - plane->crtc_width;
-	if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
-		plane->crtc_y = mode->vdisplay - plane->crtc_height;
+	if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
+		plane->crtc_x = mode->hdisplay - plane->crtc_w;
+	if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
+		plane->crtc_y = mode->vdisplay - plane->crtc_h;
 
 	val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
 		VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
 	writel(val, ctx->regs + VIDOSD_A(win));
 
-	last_x = plane->crtc_x + plane->crtc_width;
+	last_x = plane->crtc_x + plane->crtc_w;
 	if (last_x)
 		last_x--;
-	last_y = plane->crtc_y + plane->crtc_height;
+	last_y = plane->crtc_y + plane->crtc_h;
 	if (last_y)
 		last_y--;
 
@@ -475,7 +492,7 @@
 
 	writel(alpha, ctx->regs + VIDOSD_D(win));
 
-	decon_win_set_pixfmt(ctx, win);
+	decon_win_set_pixfmt(ctx, win, state->fb);
 
 	/* hardware window 0 doesn't support color key. */
 	if (win != 0)
@@ -495,17 +512,13 @@
 	writel(val, ctx->regs + DECON_UPDATE);
 }
 
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	u32 val;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	if (ctx->suspended)
 		return;
 
@@ -517,14 +530,22 @@
 	val &= ~WINCONx_ENWIN;
 	writel(val, ctx->regs + WINCON(win));
 
-	/* unprotect windows */
-	decon_shadow_protect_win(ctx, win, false);
-
 	val = readl(ctx->regs + DECON_UPDATE);
 	val |= DECON_UPDATE_STANDALONE_F;
 	writel(val, ctx->regs + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	decon_shadow_protect_win(ctx, plane->zpos, false);
+}
+
 static void decon_init(struct decon_context *ctx)
 {
 	u32 val;
@@ -601,7 +622,7 @@
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		decon_win_disable(crtc, i);
+		decon_disable_plane(crtc, &ctx->planes[i]);
 
 	clk_disable_unprepare(ctx->vclk);
 	clk_disable_unprepare(ctx->eclk);
@@ -621,9 +642,10 @@
 	.enable_vblank = decon_enable_vblank,
 	.disable_vblank = decon_disable_vblank,
 	.wait_for_vblank = decon_wait_for_vblank,
-	.win_commit = decon_win_commit,
-	.win_disable = decon_win_disable,
-	.clear_channels = decon_clear_channels,
+	.atomic_begin = decon_atomic_begin,
+	.update_plane = decon_update_plane,
+	.disable_plane = decon_disable_plane,
+	.atomic_flush = decon_atomic_flush,
 };
 
 
@@ -631,6 +653,7 @@
 {
 	struct decon_context *ctx = (struct decon_context *)dev_id;
 	u32 val, clear_bit;
+	int win;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -643,8 +666,15 @@
 		goto out;
 
 	if (!ctx->i80_if) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
+		for (win = 0 ; win < WINDOWS_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
@@ -675,7 +705,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, decon_formats,
+					ARRAY_SIZE(decon_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -689,8 +720,8 @@
 		return PTR_ERR(ctx->crtc);
 	}
 
-	if (ctx->display)
-		exynos_drm_create_enc_conn(drm_dev, ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_bind(drm_dev, ctx->encoder);
 
 	return 0;
 
@@ -703,8 +734,8 @@
 
 	decon_disable(ctx->crtc);
 
-	if (ctx->display)
-		exynos_dpi_remove(ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_remove(ctx->encoder);
 
 	decon_ctx_remove(ctx);
 }
@@ -789,9 +820,9 @@
 
 	platform_set_drvdata(pdev, ctx);
 
-	ctx->display = exynos_dpi_probe(dev);
-	if (IS_ERR(ctx->display)) {
-		ret = PTR_ERR(ctx->display);
+	ctx->encoder = exynos_dpi_probe(dev);
+	if (IS_ERR(ctx->encoder)) {
+		ret = PTR_ERR(ctx->encoder);
 		goto err_iounmap;
 	}
 
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 172b800..d66ade0 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,19 +32,20 @@
 #include <drm/drm_panel.h>
 
 #include "exynos_dp_core.h"
+#include "exynos_drm_crtc.h"
 
 #define ctx_from_connector(c)	container_of(c, struct exynos_dp_device, \
 					connector)
 
 static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
 {
-	return to_exynos_crtc(dp->encoder->crtc);
+	return to_exynos_crtc(dp->encoder.crtc);
 }
 
-static inline struct exynos_dp_device *
-display_to_dp(struct exynos_drm_display *d)
+static inline struct exynos_dp_device *encoder_to_dp(
+						struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dp_device, display);
+	return container_of(e, struct exynos_dp_device, encoder);
 }
 
 struct bridge_init {
@@ -795,9 +796,6 @@
 	/* Configure video slave mode */
 	exynos_dp_enable_video_master(dp, 0);
 
-	/* Enable video */
-	exynos_dp_start_video(dp);
-
 	timeout_loop = 0;
 
 	for (;;) {
@@ -891,9 +889,9 @@
 		drm_helper_hpd_irq_event(dp->drm_dev);
 }
 
-static void exynos_dp_commit(struct exynos_drm_display *display)
+static void exynos_dp_commit(struct drm_encoder *encoder)
 {
-	struct exynos_dp_device *dp = display_to_dp(display);
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	int ret;
 
 	/* Keep the panel disabled while we configure video */
@@ -938,6 +936,9 @@
 		if (drm_panel_enable(dp->panel))
 			DRM_ERROR("failed to enable the panel\n");
 	}
+
+	/* Enable video */
+	exynos_dp_start_video(dp);
 }
 
 static enum drm_connector_status exynos_dp_detect(
@@ -994,7 +995,7 @@
 {
 	struct exynos_dp_device *dp = ctx_from_connector(connector);
 
-	return dp->encoder;
+	return &dp->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
@@ -1019,15 +1020,12 @@
 	return 0;
 }
 
-static int exynos_dp_create_connector(struct exynos_drm_display *display,
-				struct drm_encoder *encoder)
+static int exynos_dp_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dp_device *dp = display_to_dp(display);
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct drm_connector *connector = &dp->connector;
 	int ret;
 
-	dp->encoder = encoder;
-
 	/* Pre-empt DP connector creation if there's a bridge */
 	if (dp->bridge) {
 		ret = exynos_drm_attach_lcd_bridge(dp, encoder);
@@ -1054,20 +1052,22 @@
 	return ret;
 }
 
-static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
 {
-	if (dp->phy)
-		phy_power_on(dp->phy);
+	return true;
 }
 
-static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
 {
-	if (dp->phy)
-		phy_power_off(dp->phy);
 }
 
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_enable(struct drm_encoder *encoder)
 {
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
 
 	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
@@ -1084,14 +1084,17 @@
 		crtc->ops->clock_enable(dp_to_crtc(dp), true);
 
 	clk_prepare_enable(dp->clock);
-	exynos_dp_phy_init(dp);
+	phy_power_on(dp->phy);
 	exynos_dp_init_dp(dp);
 	enable_irq(dp->irq);
-	exynos_dp_commit(&dp->display);
+	exynos_dp_commit(&dp->encoder);
+
+	dp->dpms_mode = DRM_MODE_DPMS_ON;
 }
 
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
 
 	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1106,7 +1109,7 @@
 
 	disable_irq(dp->irq);
 	flush_work(&dp->hotplug_work);
-	exynos_dp_phy_exit(dp);
+	phy_power_off(dp->phy);
 	clk_disable_unprepare(dp->clock);
 
 	if (crtc->ops->clock_enable)
@@ -1116,31 +1119,19 @@
 		if (drm_panel_unprepare(dp->panel))
 			DRM_ERROR("failed to turnoff the panel\n");
 	}
+
+	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
-static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dp_device *dp = display_to_dp(display);
+static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+	.mode_fixup = exynos_dp_mode_fixup,
+	.mode_set = exynos_dp_mode_set,
+	.enable = exynos_dp_enable,
+	.disable = exynos_dp_disable,
+};
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		exynos_dp_poweron(dp);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		exynos_dp_poweroff(dp);
-		break;
-	default:
-		break;
-	}
-	dp->dpms_mode = mode;
-}
-
-static struct exynos_drm_display_ops exynos_dp_display_ops = {
-	.create_connector = exynos_dp_create_connector,
-	.dpms = exynos_dp_dpms,
-	.commit = exynos_dp_commit,
+static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
@@ -1219,9 +1210,10 @@
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 	struct platform_device *pdev = to_platform_device(dev);
 	struct drm_device *drm_dev = data;
+	struct drm_encoder *encoder = &dp->encoder;
 	struct resource *res;
 	unsigned int irq_flags;
-	int ret = 0;
+	int pipe, ret = 0;
 
 	dp->dev = &pdev->dev;
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1297,7 +1289,7 @@
 
 	INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
 
-	exynos_dp_phy_init(dp);
+	phy_power_on(dp->phy);
 
 	exynos_dp_init_dp(dp);
 
@@ -1311,7 +1303,28 @@
 
 	dp->drm_dev = drm_dev;
 
-	return exynos_drm_create_enc_conn(drm_dev, &dp->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_LCD);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+
+	ret = exynos_dp_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -1319,7 +1332,7 @@
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+	exynos_dp_disable(&dp->encoder);
 }
 
 static const struct component_ops exynos_dp_ops = {
@@ -1338,8 +1351,6 @@
 	if (!dp)
 		return -ENOMEM;
 
-	dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	dp->display.ops = &exynos_dp_display_ops;
 	platform_set_drvdata(pdev, dp);
 
 	panel_node = of_parse_phandle(dev->of_node, "panel", 0);
@@ -1377,7 +1388,7 @@
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+	exynos_dp_disable(&dp->encoder);
 	return 0;
 }
 
@@ -1385,7 +1396,7 @@
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
+	exynos_dp_enable(&dp->encoder);
 	return 0;
 }
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a4e7996..e413b6f 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -147,11 +147,10 @@
 };
 
 struct exynos_dp_device {
-	struct exynos_drm_display display;
+	struct drm_encoder	encoder;
 	struct device		*dev;
 	struct drm_device	*drm_dev;
 	struct drm_connector	connector;
-	struct drm_encoder	*encoder;
 	struct drm_panel	*panel;
 	struct drm_bridge	*bridge;
 	struct clk		*clock;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
deleted file mode 100644
index 24994ba..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* exynos_drm_buf.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
-#include "exynos_drm_iommu.h"
-
-static int lowlevel_buffer_allocate(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
-	int ret = 0;
-	enum dma_attr attr;
-	unsigned int nr_pages;
-
-	if (buf->dma_addr) {
-		DRM_DEBUG_KMS("already allocated.\n");
-		return 0;
-	}
-
-	init_dma_attrs(&buf->dma_attrs);
-
-	/*
-	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
-	 * region will be allocated else physically contiguous
-	 * as possible.
-	 */
-	if (!(flags & EXYNOS_BO_NONCONTIG))
-		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
-
-	/*
-	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
-	 * else cachable mapping.
-	 */
-	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
-		attr = DMA_ATTR_WRITE_COMBINE;
-	else
-		attr = DMA_ATTR_NON_CONSISTENT;
-
-	dma_set_attr(attr, &buf->dma_attrs);
-	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
-
-	nr_pages = buf->size >> PAGE_SHIFT;
-
-	if (!is_drm_iommu_supported(dev)) {
-		dma_addr_t start_addr;
-		unsigned int i = 0;
-
-		buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate pages.\n");
-			return -ENOMEM;
-		}
-
-		buf->cookie = dma_alloc_attrs(dev->dev,
-					buf->size,
-					&buf->dma_addr, GFP_KERNEL,
-					&buf->dma_attrs);
-		if (!buf->cookie) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			ret = -ENOMEM;
-			goto err_free;
-		}
-
-		start_addr = buf->dma_addr;
-		while (i < nr_pages) {
-			buf->pages[i] = phys_to_page(start_addr);
-			start_addr += PAGE_SIZE;
-			i++;
-		}
-	} else {
-
-		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
-					&buf->dma_addr, GFP_KERNEL,
-					&buf->dma_attrs);
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			return -ENOMEM;
-		}
-	}
-
-	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
-	if (IS_ERR(buf->sgt)) {
-		DRM_ERROR("failed to get sg table.\n");
-		ret = PTR_ERR(buf->sgt);
-		goto err_free_attrs;
-	}
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->dma_addr,
-			buf->size);
-
-	return ret;
-
-err_free_attrs:
-	dma_free_attrs(dev->dev, buf->size, buf->pages,
-			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-	buf->dma_addr = (dma_addr_t)NULL;
-err_free:
-	if (!is_drm_iommu_supported(dev))
-		drm_free_large(buf->pages);
-
-	return ret;
-}
-
-static void lowlevel_buffer_deallocate(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
-	if (!buf->dma_addr) {
-		DRM_DEBUG_KMS("dma_addr is invalid.\n");
-		return;
-	}
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->dma_addr,
-			buf->size);
-
-	sg_free_table(buf->sgt);
-
-	kfree(buf->sgt);
-	buf->sgt = NULL;
-
-	if (!is_drm_iommu_supported(dev)) {
-		dma_free_attrs(dev->dev, buf->size, buf->cookie,
-				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-		drm_free_large(buf->pages);
-	} else
-		dma_free_attrs(dev->dev, buf->size, buf->pages,
-				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-
-	buf->dma_addr = (dma_addr_t)NULL;
-}
-
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
-						unsigned int size)
-{
-	struct exynos_drm_gem_buf *buffer;
-
-	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
-
-	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer)
-		return NULL;
-
-	buffer->size = size;
-	return buffer;
-}
-
-void exynos_drm_fini_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buffer)
-{
-	kfree(buffer);
-	buffer = NULL;
-}
-
-int exynos_drm_alloc_buf(struct drm_device *dev,
-		struct exynos_drm_gem_buf *buf, unsigned int flags)
-{
-
-	/*
-	 * allocate memory region and set the memory information
-	 * to vaddr and dma_addr of a buffer object.
-	 */
-	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void exynos_drm_free_buf(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buffer)
-{
-
-	lowlevel_buffer_deallocate(dev, flags, buffer);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
deleted file mode 100644
index a6412f1..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* exynos_drm_buf.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_BUF_H_
-#define _EXYNOS_DRM_BUF_H_
-
-/* create and initialize buffer object. */
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
-						unsigned int size);
-
-/* destroy buffer object. */
-void exynos_drm_fini_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buffer);
-
-/* allocate physical memory region and setup sgt. */
-int exynos_drm_alloc_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buf,
-				unsigned int flags);
-
-/* release physical memory region, and sgt. */
-void exynos_drm_free_buf(struct drm_device *dev,
-				unsigned int flags,
-				struct exynos_drm_gem_buf *buffer);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 4c9f972..c68a6a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,46 +15,10 @@
 #include <drm/drmP.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_fbdev.h"
 
 static LIST_HEAD(exynos_drm_subdrv_list);
 
-int exynos_drm_create_enc_conn(struct drm_device *dev,
-					struct exynos_drm_display *display)
-{
-	struct drm_encoder *encoder;
-	int ret;
-	unsigned long possible_crtcs = 0;
-
-	ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
-	if (ret < 0)
-		return ret;
-
-	possible_crtcs |= 1 << ret;
-
-	/* create and initialize a encoder for this sub driver. */
-	encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
-	if (!encoder) {
-		DRM_ERROR("failed to create encoder\n");
-		return -EFAULT;
-	}
-
-	display->encoder = encoder;
-
-	ret = display->ops->create_connector(display, encoder);
-	if (ret) {
-		DRM_ERROR("failed to create connector ret = %d\n", ret);
-		goto err_destroy_encoder;
-	}
-
-	return 0;
-
-err_destroy_encoder:
-	encoder->funcs->destroy(encoder);
-	return ret;
-}
-
 int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
 {
 	if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 644b4b7..0872aa2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -19,21 +19,15 @@
 
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_plane.h"
 
 static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	if (exynos_crtc->enabled)
-		return;
-
 	if (exynos_crtc->ops->enable)
 		exynos_crtc->ops->enable(exynos_crtc);
 
-	exynos_crtc->enabled = true;
-
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -41,20 +35,10 @@
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	if (!exynos_crtc->enabled)
-		return;
-
-	/* wait for the completion of page flip. */
-	if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
-				(exynos_crtc->event == NULL), HZ/20))
-		exynos_crtc->event = NULL;
-
 	drm_crtc_vblank_off(crtc);
 
 	if (exynos_crtc->ops->disable)
 		exynos_crtc->ops->disable(exynos_crtc);
-
-	exynos_crtc->enabled = false;
 }
 
 static bool
@@ -80,18 +64,36 @@
 		exynos_crtc->ops->commit(exynos_crtc);
 }
 
-static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane;
 
-	if (crtc->state->event) {
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-		exynos_crtc->event = crtc->state->event;
+	exynos_crtc->event = crtc->state->event;
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+		if (exynos_crtc->ops->atomic_begin)
+			exynos_crtc->ops->atomic_begin(exynos_crtc,
+							exynos_plane);
 	}
 }
 
-static void exynos_crtc_atomic_flush(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane;
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+		if (exynos_crtc->ops->atomic_flush)
+			exynos_crtc->ops->atomic_flush(exynos_crtc,
+							exynos_plane);
+	}
 }
 
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -139,13 +141,13 @@
 	if (!exynos_crtc)
 		return ERR_PTR(-ENOMEM);
 
-	init_waitqueue_head(&exynos_crtc->pending_flip_queue);
-
 	exynos_crtc->pipe = pipe;
 	exynos_crtc->type = type;
 	exynos_crtc->ops = ops;
 	exynos_crtc->ctx = ctx;
 
+	init_waitqueue_head(&exynos_crtc->wait_update);
+
 	crtc = &exynos_crtc->base;
 
 	private->crtc[pipe] = crtc;
@@ -171,11 +173,8 @@
 	struct exynos_drm_crtc *exynos_crtc =
 		to_exynos_crtc(private->crtc[pipe]);
 
-	if (!exynos_crtc->enabled)
-		return -EPERM;
-
 	if (exynos_crtc->ops->enable_vblank)
-		exynos_crtc->ops->enable_vblank(exynos_crtc);
+		return exynos_crtc->ops->enable_vblank(exynos_crtc);
 
 	return 0;
 }
@@ -186,31 +185,34 @@
 	struct exynos_drm_crtc *exynos_crtc =
 		to_exynos_crtc(private->crtc[pipe]);
 
-	if (!exynos_crtc->enabled)
-		return;
-
 	if (exynos_crtc->ops->disable_vblank)
 		exynos_crtc->ops->disable_vblank(exynos_crtc);
 }
 
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc)
 {
-	struct exynos_drm_private *dev_priv = dev->dev_private;
-	struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
-	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
+	wait_event_timeout(exynos_crtc->wait_update,
+			   (atomic_read(&exynos_crtc->pending_update) == 0),
+			   msecs_to_jiffies(50));
+}
+
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+				struct exynos_drm_plane *exynos_plane)
+{
+	struct drm_crtc *crtc = &exynos_crtc->base;
 	unsigned long flags;
 
-	spin_lock_irqsave(&dev->event_lock, flags);
-	if (exynos_crtc->event) {
+	exynos_plane->pending_fb = NULL;
 
-		drm_send_vblank_event(dev, -1, exynos_crtc->event);
-		drm_vblank_put(dev, pipe);
-		wake_up(&exynos_crtc->pending_flip_queue);
+	if (atomic_dec_and_test(&exynos_crtc->pending_update))
+		wake_up(&exynos_crtc->wait_update);
 
-	}
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	if (exynos_crtc->event)
+		drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
 
 	exynos_crtc->event = NULL;
-	spin_unlock_irqrestore(&dev->event_lock, flags);
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
@@ -237,7 +239,7 @@
 }
 
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
-					unsigned int out_type)
+				       enum exynos_drm_output_type out_type)
 {
 	struct drm_crtc *crtc;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0f3aa70..f87d4ab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -25,12 +25,14 @@
 					void *context);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+				   struct exynos_drm_plane *exynos_plane);
 void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
 
 /* This function gets pipe value to crtc device matched with out_type. */
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
-					unsigned int out_type);
+				       enum exynos_drm_output_type out_type);
 
 /*
  * This function calls the crtc device(manager)'s te_handler() callback
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
deleted file mode 100644
index cd485c0..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/* exynos_drm_dmabuf.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include "exynos_drm_dmabuf.h"
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-
-#include <linux/dma-buf.h>
-
-struct exynos_drm_dmabuf_attachment {
-	struct sg_table sgt;
-	enum dma_data_direction dir;
-	bool is_mapped;
-};
-
-static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
-{
-	return to_exynos_gem_obj(buf->priv);
-}
-
-static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
-					struct device *dev,
-					struct dma_buf_attachment *attach)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach;
-
-	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
-	if (!exynos_attach)
-		return -ENOMEM;
-
-	exynos_attach->dir = DMA_NONE;
-	attach->priv = exynos_attach;
-
-	return 0;
-}
-
-static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
-					struct dma_buf_attachment *attach)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-	struct sg_table *sgt;
-
-	if (!exynos_attach)
-		return;
-
-	sgt = &exynos_attach->sgt;
-
-	if (exynos_attach->dir != DMA_NONE)
-		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
-				exynos_attach->dir);
-
-	sg_free_table(sgt);
-	kfree(exynos_attach);
-	attach->priv = NULL;
-}
-
-static struct sg_table *
-		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
-					enum dma_data_direction dir)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
-	struct drm_device *dev = gem_obj->base.dev;
-	struct exynos_drm_gem_buf *buf;
-	struct scatterlist *rd, *wr;
-	struct sg_table *sgt = NULL;
-	unsigned int i;
-	int nents, ret;
-
-	/* just return current sgt if already requested. */
-	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
-		return &exynos_attach->sgt;
-
-	buf = gem_obj->buffer;
-	if (!buf) {
-		DRM_ERROR("buffer is null.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	sgt = &exynos_attach->sgt;
-
-	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
-	if (ret) {
-		DRM_ERROR("failed to alloc sgt.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	mutex_lock(&dev->struct_mutex);
-
-	rd = buf->sgt->sgl;
-	wr = sgt->sgl;
-	for (i = 0; i < sgt->orig_nents; ++i) {
-		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-		rd = sg_next(rd);
-		wr = sg_next(wr);
-	}
-
-	if (dir != DMA_NONE) {
-		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-		if (!nents) {
-			DRM_ERROR("failed to map sgl with iommu.\n");
-			sg_free_table(sgt);
-			sgt = ERR_PTR(-EIO);
-			goto err_unlock;
-		}
-	}
-
-	exynos_attach->is_mapped = true;
-	exynos_attach->dir = dir;
-	attach->priv = exynos_attach;
-
-	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
-
-err_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return sgt;
-}
-
-static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
-						struct sg_table *sgt,
-						enum dma_data_direction dir)
-{
-	/* Nothing to do. */
-}
-
-static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-						unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-						unsigned long page_num,
-						void *addr)
-{
-	/* TODO */
-}
-
-static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
-					unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
-					unsigned long page_num, void *addr)
-{
-	/* TODO */
-}
-
-static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
-	struct vm_area_struct *vma)
-{
-	return -ENOTTY;
-}
-
-static struct dma_buf_ops exynos_dmabuf_ops = {
-	.attach			= exynos_gem_attach_dma_buf,
-	.detach			= exynos_gem_detach_dma_buf,
-	.map_dma_buf		= exynos_gem_map_dma_buf,
-	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
-	.kmap			= exynos_gem_dmabuf_kmap,
-	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic,
-	.kunmap			= exynos_gem_dmabuf_kunmap,
-	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
-	.mmap			= exynos_gem_dmabuf_mmap,
-	.release		= drm_gem_dmabuf_release,
-};
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
-				struct drm_gem_object *obj, int flags)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &exynos_dmabuf_ops;
-	exp_info.size = exynos_gem_obj->base.size;
-	exp_info.flags = flags;
-	exp_info.priv = obj;
-
-	return dma_buf_export(&exp_info);
-}
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
-				struct dma_buf *dma_buf)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sgt;
-	struct scatterlist *sgl;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buffer;
-	int ret;
-
-	/* is this one of own objects? */
-	if (dma_buf->ops == &exynos_dmabuf_ops) {
-		struct drm_gem_object *obj;
-
-		obj = dma_buf->priv;
-
-		/* is it from our device? */
-		if (obj->dev == drm_dev) {
-			/*
-			 * Importing dmabuf exported from out own gem increases
-			 * refcount on gem itself instead of f_count of dmabuf.
-			 */
-			drm_gem_object_reference(obj);
-			return obj;
-		}
-	}
-
-	attach = dma_buf_attach(dma_buf, drm_dev->dev);
-	if (IS_ERR(attach))
-		return ERR_PTR(-EINVAL);
-
-	get_dma_buf(dma_buf);
-
-	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sgt)) {
-		ret = PTR_ERR(sgt);
-		goto err_buf_detach;
-	}
-
-	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer) {
-		ret = -ENOMEM;
-		goto err_unmap_attach;
-	}
-
-	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
-	if (!exynos_gem_obj) {
-		ret = -ENOMEM;
-		goto err_free_buffer;
-	}
-
-	sgl = sgt->sgl;
-
-	buffer->size = dma_buf->size;
-	buffer->dma_addr = sg_dma_address(sgl);
-
-	if (sgt->nents == 1) {
-		/* always physically continuous memory if sgt->nents is 1. */
-		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
-	} else {
-		/*
-		 * this case could be CONTIG or NONCONTIG type but for now
-		 * sets NONCONTIG.
-		 * TODO. we have to find a way that exporter can notify
-		 * the type of its own buffer to importer.
-		 */
-		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
-	}
-
-	exynos_gem_obj->buffer = buffer;
-	buffer->sgt = sgt;
-	exynos_gem_obj->base.import_attach = attach;
-
-	DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
-								buffer->size);
-
-	return &exynos_gem_obj->base;
-
-err_free_buffer:
-	kfree(buffer);
-	buffer = NULL;
-err_unmap_attach:
-	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
-err_buf_detach:
-	dma_buf_detach(dma_buf, attach);
-	dma_buf_put(dma_buf);
-
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
deleted file mode 100644
index 886de9f..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* exynos_drm_dmabuf.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_DMABUF_H_
-#define _EXYNOS_DRM_DMABUF_H_
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
-				struct drm_gem_object *obj, int flags);
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
-						struct dma_buf *dma_buf);
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 7cb6595..c748b87 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -20,26 +20,24 @@
 #include <video/of_videomode.h>
 #include <video/videomode.h>
 
-#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
 
 struct exynos_dpi {
-	struct exynos_drm_display display;
+	struct drm_encoder encoder;
 	struct device *dev;
 	struct device_node *panel_node;
 
 	struct drm_panel *panel;
 	struct drm_connector connector;
-	struct drm_encoder *encoder;
 
 	struct videomode *vm;
-	int dpms_mode;
 };
 
 #define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
 
-static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dpi, display);
+	return container_of(e, struct exynos_dpi, encoder);
 }
 
 static enum drm_connector_status
@@ -99,7 +97,7 @@
 {
 	struct exynos_dpi *ctx = connector_to_dpi(connector);
 
-	return ctx->encoder;
+	return &ctx->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
@@ -107,15 +105,12 @@
 	.best_encoder = exynos_dpi_best_encoder,
 };
 
-static int exynos_dpi_create_connector(struct exynos_drm_display *display,
-				       struct drm_encoder *encoder)
+static int exynos_dpi_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dpi *ctx = display_to_dpi(display);
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
 	struct drm_connector *connector = &ctx->connector;
 	int ret;
 
-	ctx->encoder = encoder;
-
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	ret = drm_connector_init(encoder->dev, connector,
@@ -133,46 +128,48 @@
 	return 0;
 }
 
-static void exynos_dpi_poweron(struct exynos_dpi *ctx)
+static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
 {
+	return true;
+}
+
+static void exynos_dpi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_dpi_enable(struct drm_encoder *encoder)
+{
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
 	if (ctx->panel) {
 		drm_panel_prepare(ctx->panel);
 		drm_panel_enable(ctx->panel);
 	}
 }
 
-static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
+static void exynos_dpi_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
 	if (ctx->panel) {
 		drm_panel_disable(ctx->panel);
 		drm_panel_unprepare(ctx->panel);
 	}
 }
 
-static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dpi *ctx = display_to_dpi(display);
+static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
+	.mode_fixup = exynos_dpi_mode_fixup,
+	.mode_set = exynos_dpi_mode_set,
+	.enable = exynos_dpi_enable,
+	.disable = exynos_dpi_disable,
+};
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
-				exynos_dpi_poweron(ctx);
-			break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
-			exynos_dpi_poweroff(ctx);
-		break;
-	default:
-		break;
-	}
-	ctx->dpms_mode = mode;
-}
-
-static struct exynos_drm_display_ops exynos_dpi_display_ops = {
-	.create_connector = exynos_dpi_create_connector,
-	.dpms = exynos_dpi_dpms
+static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 /* of_* functions will be removed after merge of of_graph patches */
@@ -299,7 +296,34 @@
 	return 0;
 }
 
-struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
+{
+	int ret;
+
+	ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
+	if (ret < 0)
+		return ret;
+
+	encoder->possible_crtcs = 1 << ret;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+
+	ret = exynos_dpi_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct drm_encoder *exynos_dpi_probe(struct device *dev)
 {
 	struct exynos_dpi *ctx;
 	int ret;
@@ -308,10 +332,7 @@
 	if (!ctx)
 		return ERR_PTR(-ENOMEM);
 
-	ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	ctx->display.ops = &exynos_dpi_display_ops;
 	ctx->dev = dev;
-	ctx->dpms_mode = DRM_MODE_DPMS_OFF;
 
 	ret = exynos_dpi_parse_dt(ctx);
 	if (ret < 0) {
@@ -325,14 +346,14 @@
 			return ERR_PTR(-EPROBE_DEFER);
 	}
 
-	return &ctx->display;
+	return &ctx->encoder;
 }
 
-int exynos_dpi_remove(struct exynos_drm_display *display)
+int exynos_dpi_remove(struct drm_encoder *encoder)
 {
-	struct exynos_dpi *ctx = display_to_dpi(display);
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
 
-	exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
+	exynos_dpi_disable(&ctx->encoder);
 
 	if (ctx->panel)
 		drm_panel_detach(ctx->panel);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 63a68c6..831d2e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -13,6 +13,8 @@
 
 #include <linux/pm_runtime.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 
 #include <linux/component.h>
@@ -21,13 +23,11 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_vidi.h"
-#include "exynos_drm_dmabuf.h"
 #include "exynos_drm_g2d.h"
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_iommu.h"
@@ -38,15 +38,112 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
+struct exynos_atomic_commit {
+	struct work_struct	work;
+	struct drm_device	*dev;
+	struct drm_atomic_state *state;
+	u32			crtcs;
+};
+
+static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i, ret;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+		if (!crtc->state->enable)
+			continue;
+
+		ret = drm_crtc_vblank_get(crtc);
+		if (ret)
+			continue;
+
+		exynos_drm_crtc_wait_pending_update(exynos_crtc);
+		drm_crtc_vblank_put(crtc);
+	}
+}
+
+static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
+{
+	struct drm_device *dev = commit->dev;
+	struct exynos_drm_private *priv = dev->dev_private;
+	struct drm_atomic_state *state = commit->state;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_plane_state *plane_state;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	drm_atomic_helper_commit_modeset_disables(dev, state);
+
+	drm_atomic_helper_commit_modeset_enables(dev, state);
+
+	/*
+	 * Exynos can't update planes with CRTCs and encoders disabled,
+	 * its updates routines, specially for FIMD, requires the clocks
+	 * to be enabled. So it is necessary to handle the modeset operations
+	 * *before* the commit_planes() step, this way it will always
+	 * have the relevant clocks enabled to perform the update.
+	 */
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+		atomic_set(&exynos_crtc->pending_update, 0);
+	}
+
+	for_each_plane_in_state(state, plane, plane_state, i) {
+		struct exynos_drm_crtc *exynos_crtc =
+						to_exynos_crtc(plane->crtc);
+
+		if (!plane->crtc)
+			continue;
+
+		atomic_inc(&exynos_crtc->pending_update);
+	}
+
+	drm_atomic_helper_commit_planes(dev, state);
+
+	exynos_atomic_wait_for_commit(state);
+
+	drm_atomic_helper_cleanup_planes(dev, state);
+
+	drm_atomic_state_free(state);
+
+	spin_lock(&priv->lock);
+	priv->pending &= ~commit->crtcs;
+	spin_unlock(&priv->lock);
+
+	wake_up_all(&priv->wait);
+
+	kfree(commit);
+}
+
+static void exynos_drm_atomic_work(struct work_struct *work)
+{
+	struct exynos_atomic_commit *commit = container_of(work,
+				struct exynos_atomic_commit, work);
+
+	exynos_atomic_commit_complete(commit);
+}
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
 	struct exynos_drm_private *private;
-	int ret;
+	struct drm_encoder *encoder;
+	unsigned int clone_mask;
+	int cnt, ret;
 
 	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
 	if (!private)
 		return -ENOMEM;
 
+	init_waitqueue_head(&private->wait);
+	spin_lock_init(&private->lock);
+
 	dev_set_drvdata(dev->dev, dev);
 	dev->dev_private = (void *)private;
 
@@ -67,7 +164,13 @@
 	exynos_drm_mode_config_init(dev);
 
 	/* setup possible_clones. */
-	exynos_drm_encoder_setup(dev);
+	cnt = 0;
+	clone_mask = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		clone_mask |= (1 << (cnt++));
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		encoder->possible_clones = clone_mask;
 
 	platform_set_drvdata(dev->platformdev, dev);
 
@@ -143,6 +246,64 @@
 	return 0;
 }
 
+static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
+{
+	bool pending;
+
+	spin_lock(&priv->lock);
+	pending = priv->pending & crtcs;
+	spin_unlock(&priv->lock);
+
+	return pending;
+}
+
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+			 bool async)
+{
+	struct exynos_drm_private *priv = dev->dev_private;
+	struct exynos_atomic_commit *commit;
+	int i, ret;
+
+	commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+	if (!commit)
+		return -ENOMEM;
+
+	ret = drm_atomic_helper_prepare_planes(dev, state);
+	if (ret) {
+		kfree(commit);
+		return ret;
+	}
+
+	/* This is the point of no return */
+
+	INIT_WORK(&commit->work, exynos_drm_atomic_work);
+	commit->dev = dev;
+	commit->state = state;
+
+	/* Wait until all affected CRTCs have completed previous commits and
+	 * mark them as pending.
+	 */
+	for (i = 0; i < dev->mode_config.num_crtc; ++i) {
+		if (state->crtcs[i])
+			commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
+	}
+
+	wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
+
+	spin_lock(&priv->lock);
+	priv->pending |= commit->crtcs;
+	spin_unlock(&priv->lock);
+
+	drm_atomic_helper_swap_state(dev, state);
+
+	if (async)
+		schedule_work(&commit->work);
+	else
+		exynos_atomic_commit_complete(commit);
+
+	return 0;
+}
+
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
 	struct drm_connector *connector;
@@ -242,25 +403,25 @@
 
 static const struct drm_ioctl_desc exynos_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
+			DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
 			DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
-			exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
-			vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
-			exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
-			exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
-			exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
-			exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
-			exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
-			exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
-			exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -277,11 +438,10 @@
 };
 
 static struct drm_driver exynos_drm_driver = {
-	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
+				  | DRIVER_ATOMIC | DRIVER_RENDER,
 	.load			= exynos_drm_load,
 	.unload			= exynos_drm_unload,
-	.suspend		= exynos_drm_suspend,
-	.resume			= exynos_drm_resume,
 	.open			= exynos_drm_open,
 	.preclose		= exynos_drm_preclose,
 	.lastclose		= exynos_drm_lastclose,
@@ -297,8 +457,12 @@
 	.dumb_destroy		= drm_gem_dumb_destroy,
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
-	.gem_prime_export	= exynos_dmabuf_prime_export,
-	.gem_prime_import	= exynos_dmabuf_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_get_sg_table	= exynos_drm_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table	= exynos_drm_gem_prime_import_sg_table,
+	.gem_prime_vmap		= exynos_drm_gem_prime_vmap,
+	.gem_prime_vunmap	= exynos_drm_gem_prime_vunmap,
 	.ioctls			= exynos_ioctls,
 	.num_ioctls		= ARRAY_SIZE(exynos_ioctls),
 	.fops			= &exynos_drm_driver_fops,
@@ -345,9 +509,6 @@
  * because connector requires pipe number of its crtc during initialization.
  */
 static struct platform_driver *const exynos_drm_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_VIDI
-	&vidi_driver,
-#endif
 #ifdef CONFIG_DRM_EXYNOS_FIMD
 	&fimd_driver,
 #endif
@@ -370,6 +531,9 @@
 	&mixer_driver,
 	&hdmi_driver,
 #endif
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+	&vidi_driver,
+#endif
 };
 
 static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index dd00f16..b7ba21d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -44,23 +44,14 @@
  *	- the unit is screen coordinates.
  * @src_y: offset y on a framebuffer to be displayed.
  *	- the unit is screen coordinates.
- * @src_width: width of a partial image to be displayed from framebuffer.
- * @src_height: height of a partial image to be displayed from framebuffer.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
+ * @src_w: width of a partial image to be displayed from framebuffer.
+ * @src_h: height of a partial image to be displayed from framebuffer.
  * @crtc_x: offset x on hardware screen.
  * @crtc_y: offset y on hardware screen.
- * @crtc_width: window width to be displayed (hardware screen).
- * @crtc_height: window height to be displayed (hardware screen).
- * @mode_width: width of screen mode.
- * @mode_height: height of screen mode.
+ * @crtc_w: window width to be displayed (hardware screen).
+ * @crtc_h: window height to be displayed (hardware screen).
  * @h_ratio: horizontal scaling ratio, 16.16 fixed point
  * @v_ratio: vertical scaling ratio, 16.16 fixed point
- * @refresh: refresh rate.
- * @scan_flag: interlace or progressive way.
- *	(it could be DRM_MODE_FLAG_*)
- * @bpp: pixel size.(in bit)
- * @pixel_format: fourcc pixel format of this overlay
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *	      allocated for a overlay.
  * @zpos: order of overlay layer(z position).
@@ -73,73 +64,17 @@
 	struct drm_plane base;
 	unsigned int src_x;
 	unsigned int src_y;
-	unsigned int src_width;
-	unsigned int src_height;
-	unsigned int fb_width;
-	unsigned int fb_height;
+	unsigned int src_w;
+	unsigned int src_h;
 	unsigned int crtc_x;
 	unsigned int crtc_y;
-	unsigned int crtc_width;
-	unsigned int crtc_height;
-	unsigned int mode_width;
-	unsigned int mode_height;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
 	unsigned int h_ratio;
 	unsigned int v_ratio;
-	unsigned int refresh;
-	unsigned int scan_flag;
-	unsigned int bpp;
-	unsigned int pitch;
-	uint32_t pixel_format;
 	dma_addr_t dma_addr[MAX_FB_BUFFER];
 	unsigned int zpos;
-};
-
-/*
- * Exynos DRM Display Structure.
- *	- this structure is common to analog tv, digital tv and lcd panel.
- *
- * @create_connector: initialize and register a new connector
- * @remove: cleans up the display for removal
- * @mode_fixup: fix mode data comparing to hw specific display mode.
- * @mode_set: convert drm_display_mode to hw specific display mode and
- *	      would be called by encoder->mode_set().
- * @check_mode: check if mode is valid or not.
- * @dpms: display device on or off.
- * @commit: apply changes to hw
- */
-struct exynos_drm_display;
-struct exynos_drm_display_ops {
-	int (*create_connector)(struct exynos_drm_display *display,
-				struct drm_encoder *encoder);
-	void (*remove)(struct exynos_drm_display *display);
-	void (*mode_fixup)(struct exynos_drm_display *display,
-				struct drm_connector *connector,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
-	void (*mode_set)(struct exynos_drm_display *display,
-				struct drm_display_mode *mode);
-	int (*check_mode)(struct exynos_drm_display *display,
-				struct drm_display_mode *mode);
-	void (*dpms)(struct exynos_drm_display *display, int mode);
-	void (*commit)(struct exynos_drm_display *display);
-};
-
-/*
- * Exynos drm display structure, maps 1:1 with an encoder/connector
- *
- * @list: the list entry for this manager
- * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @encoder: encoder object this display maps to
- * @connector: connector object this display maps to
- * @ops: pointer to callbacks for exynos drm specific functionality
- * @ctx: A pointer to the display's implementation specific context
- */
-struct exynos_drm_display {
-	struct list_head list;
-	enum exynos_drm_output_type type;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	struct exynos_drm_display_ops *ops;
+	struct drm_framebuffer *pending_fb;
 };
 
 /*
@@ -153,8 +88,10 @@
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  * @wait_for_vblank: wait for vblank interrupt to make sure that
  *	hardware overlay is updated.
- * @win_commit: apply hardware specific overlay data to registers.
- * @win_disable: disable hardware specific overlay.
+ * @atomic_begin: prepare a window to receive a update
+ * @atomic_flush: mark the end of a window update
+ * @update_plane: apply hardware specific overlay data to registers.
+ * @disable_plane: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
  *	synchronization signal if there is a page flip request.
  * @clock_enable: optional function enabling/disabling display domain clock,
@@ -173,11 +110,16 @@
 	int (*enable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*disable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
-	void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
-	void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+	void (*atomic_begin)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
+	void (*update_plane)(struct exynos_drm_crtc *crtc,
+			     struct exynos_drm_plane *plane);
+	void (*disable_plane)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
+	void (*atomic_flush)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
 	void (*te_handler)(struct exynos_drm_crtc *crtc);
 	void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
-	void (*clear_channels)(struct exynos_drm_crtc *crtc);
 };
 
 /*
@@ -194,6 +136,8 @@
  *	this pipe value.
  * @enabled: if the crtc is enabled or not
  * @event: vblank event that is currently queued for flip
+ * @wait_update: wait all pending planes updates to finish
+ * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
  */
@@ -201,9 +145,9 @@
 	struct drm_crtc			base;
 	enum exynos_drm_output_type	type;
 	unsigned int			pipe;
-	bool				enabled;
-	wait_queue_head_t		pending_flip_queue;
 	struct drm_pending_vblank_event	*event;
+	wait_queue_head_t		wait_update;
+	atomic_t			pending_update;
 	const struct exynos_drm_crtc_ops	*ops;
 	void				*ctx;
 };
@@ -229,6 +173,9 @@
  * @da_space_size: size of device address space.
  *	if 0 then default value is used for it.
  * @pipe: the pipe number for this crtc/manager.
+ * @pending: the crtcs that have pending updates to finish
+ * @lock: protect access to @pending
+ * @wait: wait an atomic commit to finish
  */
 struct exynos_drm_private {
 	struct drm_fb_helper *fb_helper;
@@ -244,6 +191,11 @@
 	unsigned long da_space_size;
 
 	unsigned int pipe;
+
+	/* for atomic commit */
+	u32			pending;
+	spinlock_t		lock;
+	wait_queue_head_t	wait;
 };
 
 /*
@@ -285,20 +237,26 @@
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
 #ifdef CONFIG_DRM_EXYNOS_DPI
-struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct exynos_drm_display *display);
+struct drm_encoder *exynos_dpi_probe(struct device *dev);
+int exynos_dpi_remove(struct drm_encoder *encoder);
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder);
 #else
-static inline struct exynos_drm_display *
+static inline struct drm_encoder *
 exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+static inline int exynos_dpi_remove(struct drm_encoder *encoder)
+{
+	return 0;
+}
+static inline int exynos_dpi_bind(struct drm_device *dev,
+				  struct drm_encoder *encoder)
 {
 	return 0;
 }
 #endif
 
-/* This function creates a encoder and a connector, and initializes them. */
-int exynos_drm_create_enc_conn(struct drm_device *dev,
-				struct exynos_drm_display *display);
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+			 bool async);
+
 
 extern struct platform_driver fimd_driver;
 extern struct platform_driver exynos5433_decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 0e58b36..12b03b3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -259,7 +259,7 @@
 };
 
 struct exynos_dsi {
-	struct exynos_drm_display display;
+	struct drm_encoder encoder;
 	struct mipi_dsi_host dsi_host;
 	struct drm_connector connector;
 	struct device_node *panel_node;
@@ -295,9 +295,9 @@
 #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
 #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
 
-static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dsi, display);
+	return container_of(e, struct exynos_dsi, encoder);
 }
 
 enum reg_idx {
@@ -1272,7 +1272,7 @@
 static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
 {
 	struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
-	struct drm_encoder *encoder = dsi->display.encoder;
+	struct drm_encoder *encoder = &dsi->encoder;
 
 	if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
 		exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1518,16 +1518,17 @@
 		dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
 }
 
-static int exynos_dsi_enable(struct exynos_dsi *dsi)
+static void exynos_dsi_enable(struct drm_encoder *encoder)
 {
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	int ret;
 
 	if (dsi->state & DSIM_STATE_ENABLED)
-		return 0;
+		return;
 
 	ret = exynos_dsi_poweron(dsi);
 	if (ret < 0)
-		return ret;
+		return;
 
 	dsi->state |= DSIM_STATE_ENABLED;
 
@@ -1535,7 +1536,7 @@
 	if (ret < 0) {
 		dsi->state &= ~DSIM_STATE_ENABLED;
 		exynos_dsi_poweroff(dsi);
-		return ret;
+		return;
 	}
 
 	exynos_dsi_set_display_mode(dsi);
@@ -1547,16 +1548,16 @@
 		exynos_dsi_set_display_enable(dsi, false);
 		drm_panel_unprepare(dsi->panel);
 		exynos_dsi_poweroff(dsi);
-		return ret;
+		return;
 	}
 
 	dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
-	return 0;
 }
 
-static void exynos_dsi_disable(struct exynos_dsi *dsi)
+static void exynos_dsi_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+
 	if (!(dsi->state & DSIM_STATE_ENABLED))
 		return;
 
@@ -1571,26 +1572,6 @@
 	exynos_dsi_poweroff(dsi);
 }
 
-static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dsi *dsi = display_to_dsi(display);
-
-	if (dsi->panel) {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-			exynos_dsi_enable(dsi);
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			exynos_dsi_disable(dsi);
-			break;
-		default:
-			break;
-		}
-	}
-}
-
 static enum drm_connector_status
 exynos_dsi_detect(struct drm_connector *connector, bool force)
 {
@@ -1601,10 +1582,10 @@
 		if (dsi->panel)
 			drm_panel_attach(dsi->panel, &dsi->connector);
 	} else if (!dsi->panel_node) {
-		struct exynos_drm_display *display;
+		struct drm_encoder *encoder;
 
-		display = platform_get_drvdata(to_platform_device(dsi->dev));
-		exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+		encoder = platform_get_drvdata(to_platform_device(dsi->dev));
+		exynos_dsi_disable(encoder);
 		drm_panel_detach(dsi->panel);
 		dsi->panel = NULL;
 	}
@@ -1647,7 +1628,7 @@
 {
 	struct exynos_dsi *dsi = connector_to_dsi(connector);
 
-	return dsi->display.encoder;
+	return &dsi->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1655,10 +1636,9 @@
 	.best_encoder = exynos_dsi_best_encoder,
 };
 
-static int exynos_dsi_create_connector(struct exynos_drm_display *display,
-				       struct drm_encoder *encoder)
+static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	struct drm_connector *connector = &dsi->connector;
 	int ret;
 
@@ -1679,26 +1659,40 @@
 	return 0;
 }
 
-static void exynos_dsi_mode_set(struct exynos_drm_display *display,
-			 struct drm_display_mode *mode)
+static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
 {
-	struct exynos_dsi *dsi = display_to_dsi(display);
-	struct videomode *vm = &dsi->vm;
-
-	vm->hactive = mode->hdisplay;
-	vm->vactive = mode->vdisplay;
-	vm->vfront_porch = mode->vsync_start - mode->vdisplay;
-	vm->vback_porch = mode->vtotal - mode->vsync_end;
-	vm->vsync_len = mode->vsync_end - mode->vsync_start;
-	vm->hfront_porch = mode->hsync_start - mode->hdisplay;
-	vm->hback_porch = mode->htotal - mode->hsync_end;
-	vm->hsync_len = mode->hsync_end - mode->hsync_start;
+	return true;
 }
 
-static struct exynos_drm_display_ops exynos_dsi_display_ops = {
-	.create_connector = exynos_dsi_create_connector,
+static void exynos_dsi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+	struct videomode *vm = &dsi->vm;
+	struct drm_display_mode *m = adjusted_mode;
+
+	vm->hactive = m->hdisplay;
+	vm->vactive = m->vdisplay;
+	vm->vfront_porch = m->vsync_start - m->vdisplay;
+	vm->vback_porch = m->vtotal - m->vsync_end;
+	vm->vsync_len = m->vsync_end - m->vsync_start;
+	vm->hfront_porch = m->hsync_start - m->hdisplay;
+	vm->hback_porch = m->htotal - m->hsync_end;
+	vm->hsync_len = m->hsync_end - m->hsync_start;
+}
+
+static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
+	.mode_fixup = exynos_dsi_mode_fixup,
 	.mode_set = exynos_dsi_mode_set,
-	.dpms = exynos_dsi_dpms
+	.enable = exynos_dsi_enable,
+	.disable = exynos_dsi_disable,
+};
+
+static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@@ -1821,22 +1815,35 @@
 static int exynos_dsi_bind(struct device *dev, struct device *master,
 				void *data)
 {
-	struct exynos_drm_display *display = dev_get_drvdata(dev);
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct drm_encoder *encoder = dev_get_drvdata(dev);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	struct drm_device *drm_dev = data;
 	struct drm_bridge *bridge;
 	int ret;
 
-	ret = exynos_drm_create_enc_conn(drm_dev, display);
+	ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_LCD);
+	if (ret < 0)
+		return ret;
+
+	encoder->possible_crtcs = 1 << ret;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+
+	ret = exynos_dsi_create_connector(encoder);
 	if (ret) {
-		DRM_ERROR("Encoder create [%d] failed with %d\n",
-			  display->type, ret);
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
 		return ret;
 	}
 
 	bridge = of_drm_find_bridge(dsi->bridge_node);
 	if (bridge) {
-		display->encoder->bridge = bridge;
 		drm_bridge_attach(drm_dev, bridge);
 	}
 
@@ -1846,10 +1853,10 @@
 static void exynos_dsi_unbind(struct device *dev, struct device *master,
 				void *data)
 {
-	struct exynos_drm_display *display = dev_get_drvdata(dev);
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct drm_encoder *encoder = dev_get_drvdata(dev);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 
-	exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+	exynos_dsi_disable(encoder);
 
 	mipi_dsi_host_unregister(&dsi->dsi_host);
 }
@@ -1870,9 +1877,6 @@
 	if (!dsi)
 		return -ENOMEM;
 
-	dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	dsi->display.ops = &exynos_dsi_display_ops;
-
 	/* To be checked as invalid one */
 	dsi->te_gpio = -ENOENT;
 
@@ -1948,7 +1952,7 @@
 		return ret;
 	}
 
-	platform_set_drvdata(pdev, &dsi->display);
+	platform_set_drvdata(pdev, &dsi->encoder);
 
 	return component_add(dev, &exynos_dsi_component_ops);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
deleted file mode 100644
index 7b89fd5..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/* exynos_drm_encoder.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- *	Inki Dae <inki.dae@samsung.com>
- *	Joonyoung Shim <jy0922.shim@samsung.com>
- *	Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
-
-#define to_exynos_encoder(x)	container_of(x, struct exynos_drm_encoder,\
-				drm_encoder)
-
-/*
- * exynos specific encoder structure.
- *
- * @drm_encoder: encoder object.
- * @display: the display structure that maps to this encoder
- */
-struct exynos_drm_encoder {
-	struct drm_encoder		drm_encoder;
-	struct exynos_drm_display	*display;
-};
-
-static bool
-exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
-			       const struct drm_display_mode *mode,
-			       struct drm_display_mode *adjusted_mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-	struct drm_connector *connector;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->encoder != encoder)
-			continue;
-
-		if (display->ops->mode_fixup)
-			display->ops->mode_fixup(display, connector, mode,
-					adjusted_mode);
-	}
-
-	return true;
-}
-
-static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
-					 struct drm_display_mode *mode,
-					 struct drm_display_mode *adjusted_mode)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->mode_set)
-		display->ops->mode_set(display, adjusted_mode);
-}
-
-static void exynos_drm_encoder_enable(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->dpms)
-		display->ops->dpms(display, DRM_MODE_DPMS_ON);
-
-	if (display->ops->commit)
-		display->ops->commit(display);
-}
-
-static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->dpms)
-		display->ops->dpms(display, DRM_MODE_DPMS_OFF);
-}
-
-static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
-	.mode_fixup	= exynos_drm_encoder_mode_fixup,
-	.mode_set	= exynos_drm_encoder_mode_set,
-	.enable		= exynos_drm_encoder_enable,
-	.disable	= exynos_drm_encoder_disable,
-};
-
-static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-
-	drm_encoder_cleanup(encoder);
-	kfree(exynos_encoder);
-}
-
-static struct drm_encoder_funcs exynos_encoder_funcs = {
-	.destroy = exynos_drm_encoder_destroy,
-};
-
-static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
-{
-	struct drm_encoder *clone;
-	struct drm_device *dev = encoder->dev;
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-	unsigned int clone_mask = 0;
-	int cnt = 0;
-
-	list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
-		switch (display->type) {
-		case EXYNOS_DISPLAY_TYPE_LCD:
-		case EXYNOS_DISPLAY_TYPE_HDMI:
-		case EXYNOS_DISPLAY_TYPE_VIDI:
-			clone_mask |= (1 << (cnt++));
-			break;
-		default:
-			continue;
-		}
-	}
-
-	return clone_mask;
-}
-
-void exynos_drm_encoder_setup(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		encoder->possible_clones = exynos_drm_encoder_clones(encoder);
-}
-
-struct drm_encoder *
-exynos_drm_encoder_create(struct drm_device *dev,
-			   struct exynos_drm_display *display,
-			   unsigned long possible_crtcs)
-{
-	struct drm_encoder *encoder;
-	struct exynos_drm_encoder *exynos_encoder;
-
-	if (!possible_crtcs)
-		return NULL;
-
-	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
-	if (!exynos_encoder)
-		return NULL;
-
-	exynos_encoder->display = display;
-	encoder = &exynos_encoder->drm_encoder;
-	encoder->possible_crtcs = possible_crtcs;
-
-	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
-	drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
-			DRM_MODE_ENCODER_TMDS);
-
-	drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
-
-	DRM_DEBUG_KMS("encoder has been created\n");
-
-	return encoder;
-}
-
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
-{
-	return to_exynos_encoder(encoder)->display;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
deleted file mode 100644
index 26305d8..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- *	Inki Dae <inki.dae@samsung.com>
- *	Joonyoung Shim <jy0922.shim@samsung.com>
- *	Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ENCODER_H_
-#define _EXYNOS_DRM_ENCODER_H_
-
-void exynos_drm_encoder_setup(struct drm_device *dev);
-struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
-			struct exynos_drm_display *mgr,
-			unsigned long possible_crtcs);
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 2b6320e..0842808 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -23,7 +23,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 #include "exynos_drm_crtc.h"
 
@@ -33,12 +32,10 @@
  * exynos specific framebuffer structure.
  *
  * @fb: drm framebuffer obejct.
- * @buf_cnt: a buffer count to drm framebuffer.
  * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
  */
 struct exynos_drm_fb {
 	struct drm_framebuffer		fb;
-	unsigned int			buf_cnt;
 	struct exynos_drm_gem_obj	*exynos_gem_obj[MAX_FB_BUFFER];
 };
 
@@ -98,10 +95,6 @@
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
 
-	/* This fb should have only one gem object. */
-	if (WARN_ON(exynos_fb->buf_cnt != 1))
-		return -EINVAL;
-
 	return drm_gem_handle_create(file_priv,
 			&exynos_fb->exynos_gem_obj[0]->base, handle);
 }
@@ -122,138 +115,96 @@
 	.dirty		= exynos_drm_fb_dirty,
 };
 
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-						unsigned int cnt)
-{
-	struct exynos_drm_fb *exynos_fb;
-
-	exynos_fb = to_exynos_fb(fb);
-
-	exynos_fb->buf_cnt = cnt;
-}
-
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
-{
-	struct exynos_drm_fb *exynos_fb;
-
-	exynos_fb = to_exynos_fb(fb);
-
-	return exynos_fb->buf_cnt;
-}
-
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
 			    struct drm_mode_fb_cmd2 *mode_cmd,
-			    struct drm_gem_object *obj)
+			    struct exynos_drm_gem_obj **gem_obj,
+			    int count)
 {
 	struct exynos_drm_fb *exynos_fb;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int i;
 	int ret;
 
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-
-	ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-	if (ret < 0)
-		return ERR_PTR(ret);
-
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
 	if (!exynos_fb)
 		return ERR_PTR(-ENOMEM);
 
+	for (i = 0; i < count; i++) {
+		ret = check_fb_gem_memory_type(dev, gem_obj[i]);
+		if (ret < 0)
+			goto err;
+
+		exynos_fb->exynos_gem_obj[i] = gem_obj[i];
+	}
+
 	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
 
 	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-	if (ret) {
-		kfree(exynos_fb);
+	if (ret < 0) {
 		DRM_ERROR("failed to initialize framebuffer\n");
-		return ERR_PTR(ret);
+		goto err;
 	}
 
 	return &exynos_fb->fb;
+
+err:
+	kfree(exynos_fb);
+	return ERR_PTR(ret);
 }
 
 static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 		      struct drm_mode_fb_cmd2 *mode_cmd)
 {
+	struct exynos_drm_gem_obj *gem_objs[MAX_FB_BUFFER];
 	struct drm_gem_object *obj;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_fb *exynos_fb;
-	int i, ret;
+	struct drm_framebuffer *fb;
+	int i;
+	int ret;
 
-	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-	if (!exynos_fb)
-		return ERR_PTR(-ENOMEM);
-
-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
-	if (!obj) {
-		DRM_ERROR("failed to lookup gem object\n");
-		ret = -ENOENT;
-		goto err_free;
-	}
-
-	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-	exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
-
-	DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
-
-	for (i = 1; i < exynos_fb->buf_cnt; i++) {
+	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
 		obj = drm_gem_object_lookup(dev, file_priv,
-				mode_cmd->handles[i]);
+					    mode_cmd->handles[i]);
 		if (!obj) {
 			DRM_ERROR("failed to lookup gem object\n");
 			ret = -ENOENT;
-			exynos_fb->buf_cnt = i;
-			goto err_unreference;
+			goto err;
 		}
 
-		exynos_gem_obj = to_exynos_gem_obj(obj);
-		exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
-
-		ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-		if (ret < 0)
-			goto err_unreference;
+		gem_objs[i] = to_exynos_gem_obj(obj);
 	}
 
-	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-	if (ret) {
-		DRM_ERROR("failed to init framebuffer.\n");
-		goto err_unreference;
+	fb = exynos_drm_framebuffer_init(dev, mode_cmd, gem_objs, i);
+	if (IS_ERR(fb)) {
+		ret = PTR_ERR(fb);
+		goto err;
 	}
 
-	return &exynos_fb->fb;
+	return fb;
 
-err_unreference:
-	for (i = 0; i < exynos_fb->buf_cnt; i++) {
-		struct drm_gem_object *obj;
+err:
+	while (i--)
+		drm_gem_object_unreference_unlocked(&gem_objs[i]->base);
 
-		obj = &exynos_fb->exynos_gem_obj[i]->base;
-		if (obj)
-			drm_gem_object_unreference_unlocked(obj);
-	}
-err_free:
-	kfree(exynos_fb);
 	return ERR_PTR(ret);
 }
 
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
-						int index)
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
+						 int index)
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
-	struct exynos_drm_gem_buf *buffer;
+	struct exynos_drm_gem_obj *obj;
 
 	if (index >= MAX_FB_BUFFER)
 		return NULL;
 
-	buffer = exynos_fb->exynos_gem_obj[index]->buffer;
-	if (!buffer)
+	obj = exynos_fb->exynos_gem_obj[index];
+	if (!obj)
 		return NULL;
 
-	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
+	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr);
 
-	return buffer;
+	return obj;
 }
 
 static void exynos_drm_output_poll_changed(struct drm_device *dev)
@@ -267,41 +218,6 @@
 		exynos_drm_fbdev_init(dev);
 }
 
-static int exynos_atomic_commit(struct drm_device *dev,
-				struct drm_atomic_state *state,
-				bool async)
-{
-	int ret;
-
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* This is the point of no return */
-
-	drm_atomic_helper_swap_state(dev, state);
-
-	drm_atomic_helper_commit_modeset_disables(dev, state);
-
-	drm_atomic_helper_commit_modeset_enables(dev, state);
-
-	/*
-	 * Exynos can't update planes with CRTCs and encoders disabled,
-	 * its updates routines, specially for FIMD, requires the clocks
-	 * to be enabled. So it is necessary to handle the modeset operations
-	 * *before* the commit_planes() step, this way it will always
-	 * have the relevant clocks enabled to perform the update.
-	 */
-
-	drm_atomic_helper_commit_planes(dev, state);
-
-	drm_atomic_helper_cleanup_planes(dev, state);
-
-	drm_atomic_state_free(state);
-
-	return 0;
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
 	.fb_create = exynos_user_fb_create,
 	.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 517471b..85e4445 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,22 +14,18 @@
 #ifndef _EXYNOS_DRM_FB_H_
 #define _EXYNOS_DRM_FB_H
 
+#include "exynos_drm_gem.h"
+
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
 			    struct drm_mode_fb_cmd2 *mode_cmd,
-			    struct drm_gem_object *obj);
+			    struct exynos_drm_gem_obj **gem_obj,
+			    int count);
 
-/* get memory information of a drm framebuffer */
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+/* get gem object of a drm framebuffer */
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
 						 int index);
 
 void exynos_drm_mode_config_init(struct drm_device *dev);
 
-/* set a buffer count to drm framebuffer. */
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-						unsigned int cnt);
-
-/* get a buffer count to drm framebuffer. */
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
-
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e0b085b..a221f75 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -21,7 +21,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
 #define MAX_CONNECTOR		4
@@ -32,7 +31,7 @@
 
 struct exynos_drm_fbdev {
 	struct drm_fb_helper		drm_fb_helper;
-	struct exynos_drm_gem_obj	*exynos_gem_obj;
+	struct exynos_drm_gem_obj	*obj;
 };
 
 static int exynos_drm_fb_mmap(struct fb_info *info,
@@ -40,8 +39,7 @@
 {
 	struct drm_fb_helper *helper = info->par;
 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
-	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+	struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
 	unsigned long vm_size;
 	int ret;
 
@@ -49,11 +47,11 @@
 
 	vm_size = vma->vm_end - vma->vm_start;
 
-	if (vm_size > buffer->size)
+	if (vm_size > obj->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
-		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+	ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr,
+			     obj->size, &obj->dma_attrs);
 	if (ret < 0) {
 		DRM_ERROR("failed to mmap.\n");
 		return ret;
@@ -65,9 +63,9 @@
 static struct fb_ops exynos_drm_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_mmap        = exynos_drm_fb_mmap,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -76,42 +74,42 @@
 };
 
 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
-				     struct drm_fb_helper_surface_size *sizes,
-				     struct drm_framebuffer *fb)
+				   struct drm_fb_helper_surface_size *sizes,
+				   struct exynos_drm_gem_obj *obj)
 {
-	struct fb_info *fbi = helper->fbdev;
-	struct exynos_drm_gem_buf *buffer;
+	struct fb_info *fbi;
+	struct drm_framebuffer *fb = helper->fb;
 	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
 	unsigned int nr_pages;
 	unsigned long offset;
 
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		DRM_ERROR("failed to allocate fb info.\n");
+		return PTR_ERR(fbi);
+	}
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &exynos_drm_fb_ops;
+
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
-	/* RGB formats use only one buffer */
-	buffer = exynos_drm_fb_buffer(fb, 0);
-	if (!buffer) {
-		DRM_DEBUG_KMS("buffer is null.\n");
-		return -EFAULT;
-	}
+	nr_pages = obj->size >> PAGE_SHIFT;
 
-	nr_pages = buffer->size >> PAGE_SHIFT;
-
-	buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
-			nr_pages, VM_MAP,
+	obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
 			pgprot_writecombine(PAGE_KERNEL));
-	if (!buffer->kvaddr) {
+	if (!obj->kvaddr) {
 		DRM_ERROR("failed to map pages to kernel space.\n");
+		drm_fb_helper_release_fbi(helper);
 		return -EIO;
 	}
 
-	/* buffer count to framebuffer always is 1 at booting time. */
-	exynos_drm_fb_set_buf_cnt(fb, 1);
-
 	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
 	offset += fbi->var.yoffset * fb->pitches[0];
 
-	fbi->screen_base = buffer->kvaddr + offset;
+	fbi->screen_base = obj->kvaddr + offset;
 	fbi->screen_size = size;
 	fbi->fix.smem_len = size;
 
@@ -122,9 +120,8 @@
 				    struct drm_fb_helper_surface_size *sizes)
 {
 	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_gem_obj *obj;
 	struct drm_device *dev = helper->dev;
-	struct fb_info *fbi;
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 	struct platform_device *pdev = dev->platformdev;
 	unsigned long size;
@@ -142,69 +139,44 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	fbi = framebuffer_alloc(0, &pdev->dev);
-	if (!fbi) {
-		DRM_ERROR("failed to allocate fb info.\n");
-		ret = -ENOMEM;
-		goto out;
-	}
-
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+	obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
 	/*
 	 * If physically contiguous memory allocation fails and if IOMMU is
 	 * supported then try to get buffer from non physically contiguous
 	 * memory area.
 	 */
-	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+	if (IS_ERR(obj) && is_drm_iommu_supported(dev)) {
 		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
-		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
-							size);
+		obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, size);
 	}
 
-	if (IS_ERR(exynos_gem_obj)) {
-		ret = PTR_ERR(exynos_gem_obj);
-		goto err_release_framebuffer;
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto out;
 	}
 
-	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+	exynos_fbdev->obj = obj;
 
-	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
-			&exynos_gem_obj->base);
+	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &obj, 1);
 	if (IS_ERR(helper->fb)) {
 		DRM_ERROR("failed to create drm framebuffer.\n");
 		ret = PTR_ERR(helper->fb);
 		goto err_destroy_gem;
 	}
 
-	helper->fbdev = fbi;
-
-	fbi->par = helper;
-	fbi->flags = FBINFO_FLAG_DEFAULT;
-	fbi->fbops = &exynos_drm_fb_ops;
-
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("failed to allocate cmap.\n");
-		goto err_destroy_framebuffer;
-	}
-
-	ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
+	ret = exynos_drm_fbdev_update(helper, sizes, obj);
 	if (ret < 0)
-		goto err_dealloc_cmap;
+		goto err_destroy_framebuffer;
 
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 
-err_dealloc_cmap:
-	fb_dealloc_cmap(&fbi->cmap);
 err_destroy_framebuffer:
 	drm_framebuffer_cleanup(helper->fb);
 err_destroy_gem:
-	exynos_drm_gem_destroy(exynos_gem_obj);
-err_release_framebuffer:
-	framebuffer_release(fbi);
+	exynos_drm_gem_destroy(obj);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -297,11 +269,11 @@
 				      struct drm_fb_helper *fb_helper)
 {
 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+	struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
 	struct drm_framebuffer *fb;
 
-	if (exynos_gem_obj->buffer->kvaddr)
-		vunmap(exynos_gem_obj->buffer->kvaddr);
+	if (obj->kvaddr)
+		vunmap(obj->kvaddr);
 
 	/* release drm framebuffer and real buffer */
 	if (fb_helper->fb && fb_helper->fb->funcs) {
@@ -312,21 +284,8 @@
 		}
 	}
 
-	/* release linux framebuffer */
-	if (fb_helper->fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = fb_helper->fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(fb_helper);
+	drm_fb_helper_release_fbi(fb_helper);
 
 	drm_fb_helper_fini(fb_helper);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 794e56c..750a9e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -59,6 +59,7 @@
 #define VIDWnALPHA1(win)	(VIDW_ALPHA + 0x04 + (win) * 8)
 
 #define VIDWx_BUF_START(win, buf)	(VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_START_S(win, buf)	(VIDW_BUF_START_S(buf) + (win) * 8)
 #define VIDWx_BUF_END(win, buf)		(VIDW_BUF_END(buf) + (win) * 8)
 #define VIDWx_BUF_SIZE(win, buf)	(VIDW_BUF_SIZE(buf) + (win) * 4)
 
@@ -169,7 +170,7 @@
 
 	struct exynos_drm_panel_info panel;
 	struct fimd_driver_data *driver_data;
-	struct exynos_drm_display *display;
+	struct drm_encoder *encoder;
 };
 
 static const struct of_device_id fimd_driver_dt_match[] = {
@@ -187,6 +188,14 @@
 };
 MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
 
+static const uint32_t fimd_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 	struct platform_device *pdev)
 {
@@ -348,13 +357,6 @@
 	pm_runtime_put(ctx->dev);
 }
 
-static void fimd_iommu_detach_devices(struct fimd_context *ctx)
-{
-	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
-}
-
 static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
 		const struct drm_display_mode *mode)
 {
@@ -486,9 +488,9 @@
 }
 
 
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+				struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 
 	val = WINCONx_ENWIN;
@@ -498,11 +500,11 @@
 	 * So the request format is ARGB8888 then change it to XRGB8888.
 	 */
 	if (ctx->driver_data->has_limited_fmt && !win) {
-		if (plane->pixel_format == DRM_FORMAT_ARGB8888)
-			plane->pixel_format = DRM_FORMAT_XRGB8888;
+		if (fb->pixel_format == DRM_FORMAT_ARGB8888)
+			fb->pixel_format = DRM_FORMAT_XRGB8888;
 	}
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_C8:
 		val |= WINCON0_BPPMODE_8BPP_PALETTE;
 		val |= WINCONx_BURSTLEN_8WORD;
@@ -538,7 +540,7 @@
 		break;
 	}
 
-	DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -548,7 +550,7 @@
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_4WORD;
 	}
@@ -598,6 +600,16 @@
 {
 	u32 reg, bits, val;
 
+	/*
+	 * SHADOWCON/PRTCON register is used for enabling timing.
+	 *
+	 * for example, once only width value of a register is set,
+	 * if the dma is started then fimd hardware could malfunction so
+	 * with protect window setting, the register fields with prefix '_F'
+	 * wouldn't be updated at vsync also but updated once unprotect window
+	 * is set.
+	 */
+
 	if (ctx->driver_data->has_shadowcon) {
 		reg = SHADOWCON;
 		bits = SHADOWCON_WINx_PROTECT(win);
@@ -614,41 +626,45 @@
 	writel(val, ctx->regs + reg);
 }
 
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_atomic_begin(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct fimd_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+
+	if (ctx->suspended)
+		return;
+
+	fimd_shadow_protect_win(ctx, plane->zpos, true);
+}
+
+static void fimd_atomic_flush(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct fimd_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	fimd_shadow_protect_win(ctx, plane->zpos, false);
+}
+
+static void fimd_update_plane(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane)
+{
+	struct fimd_context *ctx = crtc->ctx;
+	struct drm_plane_state *state = plane->base.state;
 	dma_addr_t dma_addr;
 	unsigned long val, size, offset;
 	unsigned int last_x, last_y, buf_offsize, line_size;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
-	if (ctx->suspended)
-		return;
-
-	/*
-	 * SHADOWCON/PRTCON register is used for enabling timing.
-	 *
-	 * for example, once only width value of a register is set,
-	 * if the dma is started then fimd hardware could malfunction so
-	 * with protect window setting, the register fields with prefix '_F'
-	 * wouldn't be updated at vsync also but updated once unprotect window
-	 * is set.
-	 */
-
-	/* protect windows */
-	fimd_shadow_protect_win(ctx, win, true);
-
-
-	offset = plane->src_x * (plane->bpp >> 3);
-	offset += plane->src_y * plane->pitch;
+	offset = plane->src_x * bpp;
+	offset += plane->src_y * pitch;
 
 	/* buffer start address */
 	dma_addr = plane->dma_addr[0] + offset;
@@ -656,18 +672,18 @@
 	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
 	/* buffer end address */
-	size = plane->pitch * plane->crtc_height;
+	size = pitch * plane->crtc_h;
 	val = (unsigned long)(dma_addr + size);
 	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
 	DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
 			(unsigned long)dma_addr, val, size);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
-			plane->crtc_width, plane->crtc_height);
+			plane->crtc_w, plane->crtc_h);
 
 	/* buffer size */
-	buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
-	line_size = plane->crtc_width * (plane->bpp >> 3);
+	buf_offsize = pitch - (plane->crtc_w * bpp);
+	line_size = plane->crtc_w * bpp;
 	val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
 		VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
 		VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -681,10 +697,10 @@
 		VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
 	writel(val, ctx->regs + VIDOSD_A(win));
 
-	last_x = plane->crtc_x + plane->crtc_width;
+	last_x = plane->crtc_x + plane->crtc_w;
 	if (last_x)
 		last_x--;
-	last_y = plane->crtc_y + plane->crtc_height;
+	last_y = plane->crtc_y + plane->crtc_h;
 	if (last_y)
 		last_y--;
 
@@ -701,13 +717,13 @@
 		u32 offset = VIDOSD_D(win);
 		if (win == 0)
 			offset = VIDOSD_C(win);
-		val = plane->crtc_width * plane->crtc_height;
+		val = plane->crtc_w * plane->crtc_h;
 		writel(val, ctx->regs + offset);
 
 		DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
 	}
 
-	fimd_win_set_pixfmt(ctx, win);
+	fimd_win_set_pixfmt(ctx, win, state->fb);
 
 	/* hardware window 0 doesn't support color key. */
 	if (win != 0)
@@ -718,36 +734,23 @@
 	if (ctx->driver_data->has_shadowcon)
 		fimd_enable_shadow_channel_path(ctx, win, true);
 
-	/* Enable DMA channel and unprotect windows */
-	fimd_shadow_protect_win(ctx, win, false);
-
 	if (ctx->i80_if)
 		atomic_set(&ctx->win_updated, 1);
 }
 
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct fimd_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
-
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
+	unsigned int win = plane->zpos;
 
 	if (ctx->suspended)
 		return;
 
-	/* protect windows */
-	fimd_shadow_protect_win(ctx, win, true);
-
 	fimd_enable_video_output(ctx, win, false);
 
 	if (ctx->driver_data->has_shadowcon)
 		fimd_enable_shadow_channel_path(ctx, win, false);
-
-	/* unprotect windows */
-	fimd_shadow_protect_win(ctx, win, false);
 }
 
 static void fimd_enable(struct exynos_drm_crtc *crtc)
@@ -795,7 +798,7 @@
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		fimd_win_disable(crtc, i);
+		fimd_disable_plane(crtc, &ctx->planes[i]);
 
 	fimd_enable_vblank(crtc);
 	fimd_wait_for_vblank(crtc);
@@ -862,7 +865,7 @@
 	}
 
 	if (test_bit(0, &ctx->irq_flags))
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
 static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
@@ -890,17 +893,19 @@
 	.enable_vblank = fimd_enable_vblank,
 	.disable_vblank = fimd_disable_vblank,
 	.wait_for_vblank = fimd_wait_for_vblank,
-	.win_commit = fimd_win_commit,
-	.win_disable = fimd_win_disable,
+	.atomic_begin = fimd_atomic_begin,
+	.update_plane = fimd_update_plane,
+	.disable_plane = fimd_disable_plane,
+	.atomic_flush = fimd_atomic_flush,
 	.te_handler = fimd_te_handler,
 	.clock_enable = fimd_dp_clock_enable,
-	.clear_channels = fimd_clear_channels,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 {
 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
-	u32 val, clear_bit;
+	u32 val, clear_bit, start, start_s;
+	int win;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -912,15 +917,25 @@
 	if (ctx->pipe < 0 || !ctx->drm_dev)
 		goto out;
 
-	if (ctx->i80_if) {
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	if (!ctx->i80_if)
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 
+	for (win = 0 ; win < WINDOWS_NR ; win++) {
+		struct exynos_drm_plane *plane = &ctx->planes[win];
+
+		if (!plane->pending_fb)
+			continue;
+
+		start = readl(ctx->regs + VIDWx_BUF_START(win, 0));
+		start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0));
+		if (start == start_s)
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+	}
+
+	if (ctx->i80_if) {
 		/* Exits triggering mode */
 		atomic_set(&ctx->triggering, 0);
 	} else {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
-
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
 			atomic_set(&ctx->wait_vsync_event, 0);
@@ -949,7 +964,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, fimd_formats,
+					ARRAY_SIZE(fimd_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -961,10 +977,13 @@
 	if (IS_ERR(ctx->crtc))
 		return PTR_ERR(ctx->crtc);
 
-	if (ctx->display)
-		exynos_drm_create_enc_conn(drm_dev, ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_bind(drm_dev, ctx->encoder);
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+	if (is_drm_iommu_supported(drm_dev))
+		fimd_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret)
 		priv->pipe--;
 
@@ -978,10 +997,10 @@
 
 	fimd_disable(ctx->crtc);
 
-	fimd_iommu_detach_devices(ctx);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 
-	if (ctx->display)
-		exynos_dpi_remove(ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_remove(ctx->encoder);
 }
 
 static const struct component_ops fimd_component_ops = {
@@ -1088,10 +1107,9 @@
 
 	platform_set_drvdata(pdev, ctx);
 
-	ctx->display = exynos_dpi_probe(dev);
-	if (IS_ERR(ctx->display)) {
-		return PTR_ERR(ctx->display);
-	}
+	ctx->encoder = exynos_dpi_probe(dev);
+	if (IS_ERR(ctx->encoder))
+		return PTR_ERR(ctx->encoder);
 
 	pm_runtime_enable(dev);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 81a2508..535b4ad 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,11 +48,13 @@
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR		0x0304
+#define G2D_SRC_STRIDE_REG		0x0308
 #define G2D_SRC_COLOR_MODE		0x030C
 #define G2D_SRC_LEFT_TOP		0x0310
 #define G2D_SRC_RIGHT_BOTTOM		0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR	0x0318
 #define G2D_DST_BASE_ADDR		0x0404
+#define G2D_DST_STRIDE_REG		0x0408
 #define G2D_DST_COLOR_MODE		0x040C
 #define G2D_DST_LEFT_TOP		0x0410
 #define G2D_DST_RIGHT_BOTTOM		0x0414
@@ -148,6 +150,7 @@
  * A structure of buffer description
  *
  * @format: color format
+ * @stride: buffer stride/pitch in bytes
  * @left_x: the x coordinates of left top corner
  * @top_y: the y coordinates of left top corner
  * @right_x: the x coordinates of right bottom corner
@@ -156,6 +159,7 @@
  */
 struct g2d_buf_desc {
 	unsigned int	format;
+	unsigned int	stride;
 	unsigned int	left_x;
 	unsigned int	top_y;
 	unsigned int	right_x;
@@ -589,6 +593,7 @@
 
 	switch (reg_offset) {
 	case G2D_SRC_BASE_ADDR:
+	case G2D_SRC_STRIDE_REG:
 	case G2D_SRC_COLOR_MODE:
 	case G2D_SRC_LEFT_TOP:
 	case G2D_SRC_RIGHT_BOTTOM:
@@ -598,6 +603,7 @@
 		reg_type = REG_TYPE_SRC_PLANE2;
 		break;
 	case G2D_DST_BASE_ADDR:
+	case G2D_DST_STRIDE_REG:
 	case G2D_DST_COLOR_MODE:
 	case G2D_DST_LEFT_TOP:
 	case G2D_DST_RIGHT_BOTTOM:
@@ -652,8 +658,8 @@
 						enum g2d_reg_type reg_type,
 						unsigned long size)
 {
-	unsigned int width, height;
-	unsigned long area;
+	int width, height;
+	unsigned long bpp, last_pos;
 
 	/*
 	 * check source and destination buffers only.
@@ -662,22 +668,37 @@
 	if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
 		return true;
 
-	width = buf_desc->right_x - buf_desc->left_x;
+	/* This check also makes sure that right_x > left_x. */
+	width = (int)buf_desc->right_x - (int)buf_desc->left_x;
 	if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
-		DRM_ERROR("width[%u] is out of range!\n", width);
+		DRM_ERROR("width[%d] is out of range!\n", width);
 		return false;
 	}
 
-	height = buf_desc->bottom_y - buf_desc->top_y;
+	/* This check also makes sure that bottom_y > top_y. */
+	height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
 	if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
-		DRM_ERROR("height[%u] is out of range!\n", height);
+		DRM_ERROR("height[%d] is out of range!\n", height);
 		return false;
 	}
 
-	area = (unsigned long)width * (unsigned long)height *
-					g2d_get_buf_bpp(buf_desc->format);
-	if (area > size) {
-		DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+	bpp = g2d_get_buf_bpp(buf_desc->format);
+
+	/* Compute the position of the last byte that the engine accesses. */
+	last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
+		(unsigned long)buf_desc->stride +
+		(unsigned long)buf_desc->right_x * bpp - 1;
+
+	/*
+	 * Since right_x > left_x and bottom_y > top_y we already know
+	 * that the first_pos < last_pos (first_pos being the position
+	 * of the first byte the engine accesses), it just remains to
+	 * check if last_pos is smaller then the buffer size.
+	 */
+
+	if (last_pos >= size) {
+		DRM_ERROR("last engine access position [%lu] "
+			"is out of range [%lu]!\n", last_pos, size);
 		return false;
 	}
 
@@ -973,8 +994,6 @@
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			/* check userptr buffer type. */
 			if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -983,14 +1002,22 @@
 			} else
 				buf_info->types[reg_type] = BUF_TYPE_GEM;
 			break;
+		case G2D_SRC_STRIDE_REG:
+		case G2D_DST_STRIDE_REG:
+			if (for_addr)
+				goto err;
+
+			reg_type = g2d_get_reg_type(reg_offset);
+
+			buf_desc = &buf_info->descs[reg_type];
+			buf_desc->stride = cmdlist->data[index + 1];
+			break;
 		case G2D_SRC_COLOR_MODE:
 		case G2D_DST_COLOR_MODE:
 			if (for_addr)
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1003,8 +1030,6 @@
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1018,8 +1043,6 @@
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1319,9 +1342,6 @@
 		return ret;
 	}
 
-	if (!is_drm_iommu_supported(drm_dev))
-		return 0;
-
 	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret < 0) {
 		dev_err(dev, "failed to enable iommu.\n");
@@ -1334,9 +1354,6 @@
 
 static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 {
-	if (!is_drm_iommu_supported(drm_dev))
-		return;
-
 	drm_iommu_detach_device(drm_dev, dev);
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0d5b969..62b9ea1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -13,98 +13,112 @@
 #include <drm/drm_vma_manager.h>
 
 #include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 #include "exynos_drm_iommu.h"
 
-static unsigned int convert_to_vm_err_msg(int msg)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
 {
-	unsigned int out_msg;
+	struct drm_device *dev = obj->base.dev;
+	enum dma_attr attr;
+	unsigned int nr_pages;
 
-	switch (msg) {
-	case 0:
-	case -ERESTARTSYS:
-	case -EINTR:
-		out_msg = VM_FAULT_NOPAGE;
-		break;
-
-	case -ENOMEM:
-		out_msg = VM_FAULT_OOM;
-		break;
-
-	default:
-		out_msg = VM_FAULT_SIGBUS;
-		break;
+	if (obj->dma_addr) {
+		DRM_DEBUG_KMS("already allocated.\n");
+		return 0;
 	}
 
-	return out_msg;
-}
+	init_dma_attrs(&obj->dma_attrs);
 
-static int check_gem_flags(unsigned int flags)
-{
-	if (flags & ~(EXYNOS_BO_MASK)) {
-		DRM_ERROR("invalid flags.\n");
-		return -EINVAL;
+	/*
+	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+	 * region will be allocated else physically contiguous
+	 * as possible.
+	 */
+	if (!(obj->flags & EXYNOS_BO_NONCONTIG))
+		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
+
+	/*
+	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+	 * else cachable mapping.
+	 */
+	if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
+		attr = DMA_ATTR_WRITE_COMBINE;
+	else
+		attr = DMA_ATTR_NON_CONSISTENT;
+
+	dma_set_attr(attr, &obj->dma_attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
+
+	nr_pages = obj->size >> PAGE_SHIFT;
+
+	if (!is_drm_iommu_supported(dev)) {
+		dma_addr_t start_addr;
+		unsigned int i = 0;
+
+		obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+		if (!obj->pages) {
+			DRM_ERROR("failed to allocate pages.\n");
+			return -ENOMEM;
+		}
+
+		obj->cookie = dma_alloc_attrs(dev->dev,
+					obj->size,
+					&obj->dma_addr, GFP_KERNEL,
+					&obj->dma_attrs);
+		if (!obj->cookie) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			drm_free_large(obj->pages);
+			return -ENOMEM;
+		}
+
+		start_addr = obj->dma_addr;
+		while (i < nr_pages) {
+			obj->pages[i] = phys_to_page(start_addr);
+			start_addr += PAGE_SIZE;
+			i++;
+		}
+	} else {
+		obj->pages = dma_alloc_attrs(dev->dev, obj->size,
+					&obj->dma_addr, GFP_KERNEL,
+					&obj->dma_attrs);
+		if (!obj->pages) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			return -ENOMEM;
+		}
 	}
 
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)obj->dma_addr,
+			obj->size);
+
 	return 0;
 }
 
-static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
-					struct vm_area_struct *vma)
+static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
 {
-	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+	struct drm_device *dev = obj->base.dev;
 
-	/* non-cachable as default. */
-	if (obj->flags & EXYNOS_BO_CACHABLE)
-		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-	else if (obj->flags & EXYNOS_BO_WC)
-		vma->vm_page_prot =
-			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-	else
-		vma->vm_page_prot =
-			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-}
-
-static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
-{
-	/* TODO */
-
-	return roundup(size, PAGE_SIZE);
-}
-
-static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
-					struct vm_area_struct *vma,
-					unsigned long f_vaddr,
-					pgoff_t page_offset)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-	struct scatterlist *sgl;
-	unsigned long pfn;
-	int i;
-
-	if (!buf->sgt)
-		return -EINTR;
-
-	if (page_offset >= (buf->size >> PAGE_SHIFT)) {
-		DRM_ERROR("invalid page offset\n");
-		return -EINVAL;
+	if (!obj->dma_addr) {
+		DRM_DEBUG_KMS("dma_addr is invalid.\n");
+		return;
 	}
 
-	sgl = buf->sgt->sgl;
-	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
-		if (page_offset < (sgl->length >> PAGE_SHIFT))
-			break;
-		page_offset -=	(sgl->length >> PAGE_SHIFT);
-	}
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)obj->dma_addr, obj->size);
 
-	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+	if (!is_drm_iommu_supported(dev)) {
+		dma_free_attrs(dev->dev, obj->size, obj->cookie,
+				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+		drm_free_large(obj->pages);
+	} else
+		dma_free_attrs(dev->dev, obj->size, obj->pages,
+				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-	return vm_insert_mixed(vma, f_vaddr, pfn);
+	obj->dma_addr = (dma_addr_t)NULL;
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -131,11 +145,7 @@
 
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 {
-	struct drm_gem_object *obj;
-	struct exynos_drm_gem_buf *buf;
-
-	obj = &exynos_gem_obj->base;
-	buf = exynos_gem_obj->buffer;
+	struct drm_gem_object *obj = &exynos_gem_obj->base;
 
 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
 
@@ -148,12 +158,9 @@
 	if (obj->import_attach)
 		goto out;
 
-	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+	exynos_drm_free_buf(exynos_gem_obj);
 
 out:
-	exynos_drm_fini_buf(obj->dev, buf);
-	exynos_gem_obj->buffer = NULL;
-
 	drm_gem_free_mmap_offset(obj);
 
 	/* release file pointer to gem object. */
@@ -180,7 +187,7 @@
 
 	drm_gem_object_unreference_unlocked(obj);
 
-	return exynos_gem_obj->buffer->size;
+	return exynos_gem_obj->size;
 }
 
 
@@ -193,7 +200,7 @@
 
 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
 	if (!exynos_gem_obj)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	exynos_gem_obj->size = size;
 	obj = &exynos_gem_obj->base;
@@ -202,7 +209,7 @@
 	if (ret < 0) {
 		DRM_ERROR("failed to initialize gem object\n");
 		kfree(exynos_gem_obj);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -215,47 +222,35 @@
 						unsigned long size)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buf;
 	int ret;
 
+	if (flags & ~(EXYNOS_BO_MASK)) {
+		DRM_ERROR("invalid flags.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	if (!size) {
 		DRM_ERROR("invalid size.\n");
 		return ERR_PTR(-EINVAL);
 	}
 
-	size = roundup_gem_size(size, flags);
-
-	ret = check_gem_flags(flags);
-	if (ret)
-		return ERR_PTR(ret);
-
-	buf = exynos_drm_init_buf(dev, size);
-	if (!buf)
-		return ERR_PTR(-ENOMEM);
+	size = roundup(size, PAGE_SIZE);
 
 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
-	if (!exynos_gem_obj) {
-		ret = -ENOMEM;
-		goto err_fini_buf;
-	}
-
-	exynos_gem_obj->buffer = buf;
+	if (IS_ERR(exynos_gem_obj))
+		return exynos_gem_obj;
 
 	/* set memory type and cache attribute from user side. */
 	exynos_gem_obj->flags = flags;
 
-	ret = exynos_drm_alloc_buf(dev, buf, flags);
-	if (ret < 0)
-		goto err_gem_fini;
+	ret = exynos_drm_alloc_buf(exynos_gem_obj);
+	if (ret < 0) {
+		drm_gem_object_release(&exynos_gem_obj->base);
+		kfree(exynos_gem_obj);
+		return ERR_PTR(ret);
+	}
 
 	return exynos_gem_obj;
-
-err_gem_fini:
-	drm_gem_object_release(&exynos_gem_obj->base);
-	kfree(exynos_gem_obj);
-err_fini_buf:
-	exynos_drm_fini_buf(dev, buf);
-	return ERR_PTR(ret);
 }
 
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -294,7 +289,7 @@
 
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	return &exynos_gem_obj->buffer->dma_addr;
+	return &exynos_gem_obj->dma_addr;
 }
 
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -322,7 +317,6 @@
 				      struct vm_area_struct *vma)
 {
 	struct drm_device *drm_dev = exynos_gem_obj->base.dev;
-	struct exynos_drm_gem_buf *buffer;
 	unsigned long vm_size;
 	int ret;
 
@@ -331,19 +325,13 @@
 
 	vm_size = vma->vm_end - vma->vm_start;
 
-	/*
-	 * a buffer contains information to physically continuous memory
-	 * allocated by user request or at framebuffer creation.
-	 */
-	buffer = exynos_gem_obj->buffer;
-
 	/* check if user-requested size is valid. */
-	if (vm_size > buffer->size)
+	if (vm_size > exynos_gem_obj->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
-				buffer->dma_addr, buffer->size,
-				&buffer->dma_attrs);
+	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
+				exynos_gem_obj->dma_addr, exynos_gem_obj->size,
+				&exynos_gem_obj->dma_attrs);
 	if (ret < 0) {
 		DRM_ERROR("failed to mmap.\n");
 		return ret;
@@ -503,15 +491,6 @@
 
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buf;
-
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-	buf = exynos_gem_obj->buffer;
-
-	if (obj->import_attach)
-		drm_prime_gem_destroy(obj, buf->sgt);
-
 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
 }
 
@@ -595,24 +574,34 @@
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
-	unsigned long f_vaddr;
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	unsigned long pfn;
 	pgoff_t page_offset;
 	int ret;
 
 	page_offset = ((unsigned long)vmf->virtual_address -
 			vma->vm_start) >> PAGE_SHIFT;
-	f_vaddr = (unsigned long)vmf->virtual_address;
 
-	mutex_lock(&dev->struct_mutex);
+	if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
+		DRM_ERROR("invalid page offset\n");
+		ret = -EINVAL;
+		goto out;
+	}
 
-	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
-	if (ret < 0)
-		DRM_ERROR("failed to map a buffer with user.\n");
+	pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
+	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
 
-	mutex_unlock(&dev->struct_mutex);
-
-	return convert_to_vm_err_msg(ret);
+out:
+	switch (ret) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
 }
 
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -631,11 +620,17 @@
 	obj = vma->vm_private_data;
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	ret = check_gem_flags(exynos_gem_obj->flags);
-	if (ret)
-		goto err_close_vm;
+	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
 
-	update_vm_cache_attr(exynos_gem_obj, vma);
+	/* non-cachable as default. */
+	if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	else
+		vma->vm_page_prot =
+			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 
 	ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
 	if (ret)
@@ -649,3 +644,76 @@
 
 	return ret;
 }
+
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	int npages;
+
+	npages = exynos_gem_obj->size >> PAGE_SHIFT;
+
+	return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
+}
+
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+				     struct dma_buf_attachment *attach,
+				     struct sg_table *sgt)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int npages;
+	int ret;
+
+	exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
+	if (IS_ERR(exynos_gem_obj)) {
+		ret = PTR_ERR(exynos_gem_obj);
+		return ERR_PTR(ret);
+	}
+
+	exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
+
+	npages = exynos_gem_obj->size >> PAGE_SHIFT;
+	exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!exynos_gem_obj->pages) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
+			npages);
+	if (ret < 0)
+		goto err_free_large;
+
+	if (sgt->nents == 1) {
+		/* always physically continuous memory if sgt->nents is 1. */
+		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+	} else {
+		/*
+		 * this case could be CONTIG or NONCONTIG type but for now
+		 * sets NONCONTIG.
+		 * TODO. we have to find a way that exporter can notify
+		 * the type of its own buffer to importer.
+		 */
+		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+	}
+
+	return &exynos_gem_obj->base;
+
+err_free_large:
+	drm_free_large(exynos_gem_obj->pages);
+err:
+	drm_gem_object_release(&exynos_gem_obj->base);
+	kfree(exynos_gem_obj);
+	return ERR_PTR(ret);
+}
+
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	return NULL;
+}
+
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	/* Nothing to do */
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 6f42e22..cd62f84 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -20,35 +20,6 @@
 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
 
 /*
- * exynos drm gem buffer structure.
- *
- * @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
- * *userptr: user space address.
- * @dma_addr: bus address(accessed by dma) to allocated memory region.
- *	- this address could be physical address without IOMMU and
- *	device address with IOMMU.
- * @write: whether pages will be written to by the caller.
- * @pages: Array of backing pages.
- * @sgt: sg table to transfer page data.
- * @size: size of allocated memory region.
- * @pfnmap: indicate whether memory region from userptr is mmaped with
- *	VM_PFNMAP or not.
- */
-struct exynos_drm_gem_buf {
-	void 			*cookie;
-	void __iomem		*kvaddr;
-	unsigned long		userptr;
-	dma_addr_t		dma_addr;
-	struct dma_attrs	dma_attrs;
-	unsigned int		write;
-	struct page		**pages;
-	struct sg_table		*sgt;
-	unsigned long		size;
-	bool			pfnmap;
-};
-
-/*
  * exynos drm buffer structure.
  *
  * @base: a gem object.
@@ -59,18 +30,28 @@
  *	by user request or at framebuffer creation.
  *	continuous memory region allocated by user request
  *	or at framebuffer creation.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
  * @size: size requested from user, in bytes and this size is aligned
  *	in page unit.
- * @flags: indicate memory type to allocated buffer and cache attruibute.
+ * @cookie: cookie returned by dma_alloc_attrs
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ *	- this address could be physical address without IOMMU and
+ *	device address with IOMMU.
+ * @pages: Array of backing pages.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
  *	user can access the buffer through kms_bo.handle.
  */
 struct exynos_drm_gem_obj {
-	struct drm_gem_object		base;
-	struct exynos_drm_gem_buf	*buffer;
-	unsigned long			size;
-	unsigned int			flags;
+	struct drm_gem_object	base;
+	unsigned int		flags;
+	unsigned long		size;
+	void			*cookie;
+	void __iomem		*kvaddr;
+	dma_addr_t		dma_addr;
+	struct dma_attrs	dma_attrs;
+	struct page		**pages;
 };
 
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -177,4 +158,13 @@
 				struct sg_table *sgt,
 				enum dma_data_direction dir);
 
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+				     struct dma_buf_attachment *attach,
+				     struct sg_table *sgt);
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index f1c6b76..808a0a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -582,9 +582,17 @@
 		break;
 	case EXYNOS_DRM_DEGREE_180:
 		cfg |= GSC_IN_ROT_180;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	case EXYNOS_DRM_DEGREE_270:
 		cfg |= GSC_IN_ROT_270;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	default:
 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -845,9 +853,17 @@
 		break;
 	case EXYNOS_DRM_DEGREE_180:
 		cfg |= GSC_IN_ROT_180;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	case EXYNOS_DRM_DEGREE_270:
 		cfg |= GSC_IN_ROT_270;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	default:
 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d4ec746..055e8ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -87,10 +87,8 @@
 	struct device *dev = drm_dev->dev;
 	int ret;
 
-	if (!dev->archdata.mapping) {
-		DRM_ERROR("iommu_mapping is null.\n");
-		return -EFAULT;
-	}
+	if (!dev->archdata.mapping)
+		return 0;
 
 	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
 					sizeof(*subdrv_dev->dma_parms),
@@ -144,17 +142,3 @@
 	iommu_detach_device(mapping->domain, subdrv_dev);
 	drm_release_iommu_mapping(drm_dev);
 }
-
-int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
-			struct drm_device *drm_dev, struct device *subdrv_dev)
-{
-	int ret = 0;
-
-	if (is_drm_iommu_supported(drm_dev)) {
-		if (exynos_crtc->ops->clear_channels)
-			exynos_crtc->ops->clear_channels(exynos_crtc);
-		return drm_iommu_attach_device(drm_dev, subdrv_dev);
-	}
-
-	return ret;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 8341c7a..dc1b544 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,19 +29,11 @@
 
 static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
 {
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
 	struct device *dev = drm_dev->dev;
 
 	return dev->archdata.mapping ? true : false;
-#else
-	return false;
-#endif
 }
 
-int drm_iommu_attach_device_if_possible(
-		struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
-		struct device *subdrv_dev);
-
 #else
 
 static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@@ -69,12 +61,5 @@
 	return false;
 }
 
-static inline int drm_iommu_attach_device_if_possible(
-		struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
-		struct device *subdrv_dev)
-{
-	return 0;
-}
-
 #endif
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67e5451..67d2423 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -1622,12 +1622,10 @@
 		INIT_LIST_HEAD(&ippdrv->cmd_list);
 		mutex_init(&ippdrv->cmd_lock);
 
-		if (is_drm_iommu_supported(drm_dev)) {
-			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
-			if (ret) {
-				DRM_ERROR("failed to activate iommu\n");
-				goto err;
-			}
+		ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+		if (ret) {
+			DRM_ERROR("failed to activate iommu\n");
+			goto err;
 		}
 	}
 
@@ -1637,8 +1635,7 @@
 	/* get ipp driver entry */
 	list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
 						drv_list) {
-		if (is_drm_iommu_supported(drm_dev))
-			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+		drm_iommu_detach_device(drm_dev, ippdrv->dev);
 
 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
 				ippdrv->prop_list.ipp_id);
@@ -1654,8 +1651,7 @@
 
 	/* get ipp driver entry */
 	list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
-		if (is_drm_iommu_supported(drm_dev))
-			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+		drm_iommu_detach_device(drm_dev, ippdrv->dev);
 
 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
 				ippdrv->prop_list.ipp_id);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a729980..7148224 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -20,12 +20,6 @@
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
 
-static const uint32_t formats[] = {
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_NV12,
-};
-
 /*
  * This function is to get X or Y size shown via screen. This needs length and
  * start position of CRTC.
@@ -97,29 +91,18 @@
 	/* set drm framebuffer data. */
 	exynos_plane->src_x = src_x;
 	exynos_plane->src_y = src_y;
-	exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
-	exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
-	exynos_plane->fb_width = fb->width;
-	exynos_plane->fb_height = fb->height;
-	exynos_plane->bpp = fb->bits_per_pixel;
-	exynos_plane->pitch = fb->pitches[0];
-	exynos_plane->pixel_format = fb->pixel_format;
+	exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16;
+	exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16;
 
 	/* set plane range to be displayed. */
 	exynos_plane->crtc_x = crtc_x;
 	exynos_plane->crtc_y = crtc_y;
-	exynos_plane->crtc_width = actual_w;
-	exynos_plane->crtc_height = actual_h;
-
-	/* set drm mode data. */
-	exynos_plane->mode_width = mode->hdisplay;
-	exynos_plane->mode_height = mode->vdisplay;
-	exynos_plane->refresh = mode->vrefresh;
-	exynos_plane->scan_flag = mode->flags;
+	exynos_plane->crtc_w = actual_w;
+	exynos_plane->crtc_h = actual_h;
 
 	DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
 			exynos_plane->crtc_x, exynos_plane->crtc_y,
-			exynos_plane->crtc_width, exynos_plane->crtc_height);
+			exynos_plane->crtc_w, exynos_plane->crtc_h);
 
 	plane->crtc = crtc;
 }
@@ -143,17 +126,17 @@
 	if (!state->fb)
 		return 0;
 
-	nr = exynos_drm_fb_get_buf_cnt(state->fb);
+	nr = drm_format_num_planes(state->fb->pixel_format);
 	for (i = 0; i < nr; i++) {
-		struct exynos_drm_gem_buf *buffer =
-					exynos_drm_fb_buffer(state->fb, i);
+		struct exynos_drm_gem_obj *obj =
+					exynos_drm_fb_gem_obj(state->fb, i);
 
-		if (!buffer) {
-			DRM_DEBUG_KMS("buffer is null\n");
+		if (!obj) {
+			DRM_DEBUG_KMS("gem object is null\n");
 			return -EFAULT;
 		}
 
-		exynos_plane->dma_addr[i] = buffer->dma_addr +
+		exynos_plane->dma_addr[i] = obj->dma_addr +
 					    state->fb->offsets[i];
 
 		DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
@@ -179,8 +162,10 @@
 			      state->src_x >> 16, state->src_y >> 16,
 			      state->src_w >> 16, state->src_h >> 16);
 
-	if (exynos_crtc->ops->win_commit)
-		exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
+	exynos_plane->pending_fb = state->fb;
+
+	if (exynos_crtc->ops->update_plane)
+		exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
 }
 
 static void exynos_plane_atomic_disable(struct drm_plane *plane,
@@ -192,9 +177,9 @@
 	if (!old_state->crtc)
 		return;
 
-	if (exynos_crtc->ops->win_disable)
-		exynos_crtc->ops->win_disable(exynos_crtc,
-					      exynos_plane->zpos);
+	if (exynos_crtc->ops->disable_plane)
+		exynos_crtc->ops->disable_plane(exynos_crtc,
+						exynos_plane);
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
@@ -226,13 +211,14 @@
 int exynos_plane_init(struct drm_device *dev,
 		      struct exynos_drm_plane *exynos_plane,
 		      unsigned long possible_crtcs, enum drm_plane_type type,
+		      const uint32_t *formats, unsigned int fcount,
 		      unsigned int zpos)
 {
 	int err;
 
 	err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
-				       &exynos_plane_funcs, formats,
-				       ARRAY_SIZE(formats), type);
+				       &exynos_plane_funcs, formats, fcount,
+				       type);
 	if (err) {
 		DRM_ERROR("failed to initialize plane\n");
 		return err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 8c88ae9..476c934 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -12,4 +12,5 @@
 int exynos_plane_init(struct drm_device *dev,
 		      struct exynos_drm_plane *exynos_plane,
 		      unsigned long possible_crtcs, enum drm_plane_type type,
+		      const uint32_t *formats, unsigned int fcount,
 		      unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3413393..75718e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -25,7 +25,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_vidi.h"
 
 /* vidi has totally three virtual windows. */
@@ -35,11 +34,10 @@
 					connector)
 
 struct vidi_context {
-	struct exynos_drm_display	display;
+	struct drm_encoder		encoder;
 	struct platform_device		*pdev;
 	struct drm_device		*drm_dev;
 	struct exynos_drm_crtc		*crtc;
-	struct drm_encoder		*encoder;
 	struct drm_connector		connector;
 	struct exynos_drm_plane		planes[WINDOWS_NR];
 	struct edid			*raw_edid;
@@ -55,9 +53,9 @@
 	int				pipe;
 };
 
-static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
 {
-	return container_of(d, struct vidi_context, display);
+	return container_of(e, struct vidi_context, encoder);
 }
 
 static const char fake_edid_info[] = {
@@ -85,6 +83,12 @@
 	0x00, 0x00, 0x00, 0x06
 };
 
+static const uint32_t formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+};
+
 static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct vidi_context *ctx = crtc->ctx;
@@ -100,7 +104,7 @@
 	/*
 	 * in case of page flip request, vidi_finish_pageflip function
 	 * will not be called because direct_vblank is true and then
-	 * that function will be called by crtc_ops->win_commit callback
+	 * that function will be called by crtc_ops->update_plane callback
 	 */
 	schedule_work(&ctx->work);
 
@@ -118,19 +122,14 @@
 		ctx->vblank_on = false;
 }
 
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void vidi_update_plane(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane)
 {
 	struct vidi_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
 
 	if (ctx->vblank_on)
@@ -179,13 +178,14 @@
 	.disable = vidi_disable,
 	.enable_vblank = vidi_enable_vblank,
 	.disable_vblank = vidi_disable_vblank,
-	.win_commit = vidi_win_commit,
+	.update_plane = vidi_update_plane,
 };
 
 static void vidi_fake_vblank_handler(struct work_struct *work)
 {
 	struct vidi_context *ctx = container_of(work, struct vidi_context,
 					work);
+	int win;
 
 	if (ctx->pipe < 0)
 		return;
@@ -196,7 +196,7 @@
 	mutex_lock(&ctx->lock);
 
 	if (ctx->direct_vblank) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 		ctx->direct_vblank = false;
 		mutex_unlock(&ctx->lock);
 		return;
@@ -204,7 +204,14 @@
 
 	mutex_unlock(&ctx->lock);
 
-	exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	for (win = 0 ; win < WINDOWS_NR ; win++) {
+		struct exynos_drm_plane *plane = &ctx->planes[win];
+
+		if (!plane->pending_fb)
+			continue;
+
+		exynos_drm_crtc_finish_update(ctx->crtc, plane);
+	}
 }
 
 static int vidi_show_connection(struct device *dev,
@@ -259,9 +266,7 @@
 int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
 				struct drm_file *file_priv)
 {
-	struct vidi_context *ctx = NULL;
-	struct drm_encoder *encoder;
-	struct exynos_drm_display *display;
+	struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev);
 	struct drm_exynos_vidi_connection *vidi = data;
 
 	if (!vidi) {
@@ -274,21 +279,6 @@
 		return -EINVAL;
 	}
 
-	list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
-								head) {
-		display = exynos_drm_get_display(encoder);
-
-		if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
-			ctx = display_to_vidi(display);
-			break;
-		}
-	}
-
-	if (!ctx) {
-		DRM_DEBUG_KMS("not found virtual device type encoder.\n");
-		return -EINVAL;
-	}
-
 	if (ctx->connected == vidi->connection) {
 		DRM_DEBUG_KMS("same connection request.\n");
 		return -EINVAL;
@@ -381,7 +371,7 @@
 {
 	struct vidi_context *ctx = ctx_from_connector(connector);
 
-	return ctx->encoder;
+	return &ctx->encoder;
 }
 
 static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
@@ -389,14 +379,12 @@
 	.best_encoder = vidi_best_encoder,
 };
 
-static int vidi_create_connector(struct exynos_drm_display *display,
-				struct drm_encoder *encoder)
+static int vidi_create_connector(struct drm_encoder *encoder)
 {
-	struct vidi_context *ctx = display_to_vidi(display);
+	struct vidi_context *ctx = encoder_to_vidi(encoder);
 	struct drm_connector *connector = &ctx->connector;
 	int ret;
 
-	ctx->encoder = encoder;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	ret = drm_connector_init(ctx->drm_dev, connector,
@@ -413,19 +401,47 @@
 	return 0;
 }
 
+static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
 
-static struct exynos_drm_display_ops vidi_display_ops = {
-	.create_connector = vidi_create_connector,
+static void exynos_vidi_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_vidi_enable(struct drm_encoder *encoder)
+{
+}
+
+static void exynos_vidi_disable(struct drm_encoder *encoder)
+{
+}
+
+static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
+	.mode_fixup = exynos_vidi_mode_fixup,
+	.mode_set = exynos_vidi_mode_set,
+	.enable = exynos_vidi_enable,
+	.disable = exynos_vidi_disable,
+};
+
+static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static int vidi_bind(struct device *dev, struct device *master, void *data)
 {
 	struct vidi_context *ctx = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
+	struct drm_encoder *encoder = &ctx->encoder;
 	struct exynos_drm_plane *exynos_plane;
 	enum drm_plane_type type;
 	unsigned int zpos;
-	int ret;
+	int pipe, ret;
 
 	vidi_ctx_initialize(ctx, drm_dev);
 
@@ -433,7 +449,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, formats,
+					ARRAY_SIZE(formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -447,9 +464,24 @@
 		return PTR_ERR(ctx->crtc);
 	}
 
-	ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_VIDI);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+
+	ret = vidi_create_connector(encoder);
 	if (ret) {
-		ctx->crtc->base.funcs->destroy(&ctx->crtc->base);
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
 		return ret;
 	}
 
@@ -475,8 +507,6 @@
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
-	ctx->display.ops = &vidi_display_ops;
 	ctx->default_win = 0;
 	ctx->pdev = pdev;
 
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 4a00990..932f7fa 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -22,7 +22,6 @@
 #include "regs-hdmi.h"
 
 #include <linux/kernel.h>
-#include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
@@ -33,8 +32,8 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/io.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/hdmi.h>
 #include <linux/component.h>
@@ -48,7 +47,6 @@
 #include "exynos_mixer.h"
 
 #include <linux/gpio.h>
-#include <media/s5p_hdmi.h>
 
 #define ctx_from_connector(c)	container_of(c, struct hdmi_context, connector)
 
@@ -88,109 +86,14 @@
 	int				regul_count;
 };
 
-struct hdmi_tg_regs {
-	u8 cmd[1];
-	u8 h_fsz[2];
-	u8 hact_st[2];
-	u8 hact_sz[2];
-	u8 v_fsz[2];
-	u8 vsync[2];
-	u8 vsync2[2];
-	u8 vact_st[2];
-	u8 vact_sz[2];
-	u8 field_chg[2];
-	u8 vact_st2[2];
-	u8 vact_st3[2];
-	u8 vact_st4[2];
-	u8 vsync_top_hdmi[2];
-	u8 vsync_bot_hdmi[2];
-	u8 field_top_hdmi[2];
-	u8 field_bot_hdmi[2];
-	u8 tg_3d[1];
-};
-
-struct hdmi_v13_core_regs {
-	u8 h_blank[2];
-	u8 v_blank[3];
-	u8 h_v_line[3];
-	u8 vsync_pol[1];
-	u8 int_pro_mode[1];
-	u8 v_blank_f[3];
-	u8 h_sync_gen[3];
-	u8 v_sync_gen1[3];
-	u8 v_sync_gen2[3];
-	u8 v_sync_gen3[3];
-};
-
-struct hdmi_v14_core_regs {
-	u8 h_blank[2];
-	u8 v2_blank[2];
-	u8 v1_blank[2];
-	u8 v_line[2];
-	u8 h_line[2];
-	u8 hsync_pol[1];
-	u8 vsync_pol[1];
-	u8 int_pro_mode[1];
-	u8 v_blank_f0[2];
-	u8 v_blank_f1[2];
-	u8 h_sync_start[2];
-	u8 h_sync_end[2];
-	u8 v_sync_line_bef_2[2];
-	u8 v_sync_line_bef_1[2];
-	u8 v_sync_line_aft_2[2];
-	u8 v_sync_line_aft_1[2];
-	u8 v_sync_line_aft_pxl_2[2];
-	u8 v_sync_line_aft_pxl_1[2];
-	u8 v_blank_f2[2]; /* for 3D mode */
-	u8 v_blank_f3[2]; /* for 3D mode */
-	u8 v_blank_f4[2]; /* for 3D mode */
-	u8 v_blank_f5[2]; /* for 3D mode */
-	u8 v_sync_line_aft_3[2];
-	u8 v_sync_line_aft_4[2];
-	u8 v_sync_line_aft_5[2];
-	u8 v_sync_line_aft_6[2];
-	u8 v_sync_line_aft_pxl_3[2];
-	u8 v_sync_line_aft_pxl_4[2];
-	u8 v_sync_line_aft_pxl_5[2];
-	u8 v_sync_line_aft_pxl_6[2];
-	u8 vact_space_1[2];
-	u8 vact_space_2[2];
-	u8 vact_space_3[2];
-	u8 vact_space_4[2];
-	u8 vact_space_5[2];
-	u8 vact_space_6[2];
-};
-
-struct hdmi_v13_conf {
-	struct hdmi_v13_core_regs core;
-	struct hdmi_tg_regs tg;
-};
-
-struct hdmi_v14_conf {
-	struct hdmi_v14_core_regs core;
-	struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf_regs {
-	int pixel_clock;
-	int cea_video_id;
-	enum hdmi_picture_aspect aspect_ratio;
-	union {
-		struct hdmi_v13_conf v13_conf;
-		struct hdmi_v14_conf v14_conf;
-	} conf;
-};
-
 struct hdmi_context {
-	struct exynos_drm_display	display;
+	struct drm_encoder		encoder;
 	struct device			*dev;
 	struct drm_device		*drm_dev;
 	struct drm_connector		connector;
-	struct drm_encoder		*encoder;
 	bool				hpd;
 	bool				powered;
 	bool				dvi_mode;
-	struct mutex			hdmi_mutex;
 
 	void __iomem			*regs;
 	int				irq;
@@ -201,22 +104,20 @@
 
 	/* current hdmiphy conf regs */
 	struct drm_display_mode		current_mode;
-	struct hdmi_conf_regs		mode_conf;
+	u8				cea_video_id;
 
 	struct hdmi_resources		res;
+	const struct hdmi_driver_data	*drv_data;
 
 	int				hpd_gpio;
 	void __iomem			*regs_hdmiphy;
-	const struct hdmiphy_config		*phy_confs;
-	unsigned int			phy_conf_count;
 
 	struct regmap			*pmureg;
-	enum hdmi_type			type;
 };
 
-static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
 {
-	return container_of(d, struct hdmi_context, display);
+	return container_of(e, struct hdmi_context, encoder);
 }
 
 struct hdmiphy_config {
@@ -624,6 +525,16 @@
 	writeb(value, hdata->regs + reg_id);
 }
 
+static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
+				   int bytes, u32 val)
+{
+	while (--bytes >= 0) {
+		writeb(val & 0xff, hdata->regs + reg_id);
+		val >>= 8;
+		reg_id += 4;
+	}
+}
+
 static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
 				 u32 reg_id, u32 value, u32 mask)
 {
@@ -930,7 +841,7 @@
 
 static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
 {
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_v13_regs_dump(hdata, prefix);
 	else
 		hdmi_v14_regs_dump(hdata, prefix);
@@ -957,7 +868,7 @@
 	u32 hdr_sum;
 	u8 chksum;
 	u32 mod;
-	u32 vic;
+	u8 ar;
 
 	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
 	if (hdata->dvi_mode) {
@@ -988,27 +899,22 @@
 		 * Set the aspect ratio as per the mode, mentioned in
 		 * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
 		 */
-		switch (hdata->mode_conf.aspect_ratio) {
+		ar = hdata->current_mode.picture_aspect_ratio;
+		switch (ar) {
 		case HDMI_PICTURE_ASPECT_4_3:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_4_3_CENTER_RATIO);
+			ar |= AVI_4_3_CENTER_RATIO;
 			break;
 		case HDMI_PICTURE_ASPECT_16_9:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_16_9_CENTER_RATIO);
+			ar |= AVI_16_9_CENTER_RATIO;
 			break;
 		case HDMI_PICTURE_ASPECT_NONE:
 		default:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_SAME_AS_PIC_ASPECT_RATIO);
+			ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
 			break;
 		}
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
 
-		vic = hdata->mode_conf.cea_video_id;
-		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
 
 		chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
 					infoframe->any.length, hdr_sum);
@@ -1038,10 +944,10 @@
 {
 	struct hdmi_context *hdata = ctx_from_connector(connector);
 
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+	if (gpio_get_value(hdata->hpd_gpio))
+		return connector_status_connected;
 
-	return hdata->hpd ? connector_status_connected :
-			connector_status_disconnected;
+	return connector_status_disconnected;
 }
 
 static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -1091,8 +997,8 @@
 {
 	int i;
 
-	for (i = 0; i < hdata->phy_conf_count; i++)
-		if (hdata->phy_confs[i].pixel_clock == pixel_clock)
+	for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
+		if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
 			return i;
 
 	DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1125,7 +1031,7 @@
 {
 	struct hdmi_context *hdata = ctx_from_connector(connector);
 
-	return hdata->encoder;
+	return &hdata->encoder;
 }
 
 static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -1134,14 +1040,12 @@
 	.best_encoder = hdmi_best_encoder,
 };
 
-static int hdmi_create_connector(struct exynos_drm_display *display,
-			struct drm_encoder *encoder)
+static int hdmi_create_connector(struct drm_encoder *encoder)
 {
-	struct hdmi_context *hdata = display_to_hdmi(display);
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct drm_connector *connector = &hdata->connector;
 	int ret;
 
-	hdata->encoder = encoder;
 	connector->interlace_allowed = true;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
@@ -1159,23 +1063,30 @@
 	return 0;
 }
 
-static void hdmi_mode_fixup(struct exynos_drm_display *display,
-				struct drm_connector *connector,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+static bool hdmi_mode_fixup(struct drm_encoder *encoder,
+			    const struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
 {
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
 	struct drm_display_mode *m;
 	int mode_ok;
 
-	DRM_DEBUG_KMS("%s\n", __FILE__);
-
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder)
+			break;
+	}
+
+	if (connector->encoder != encoder)
+		return true;
+
 	mode_ok = hdmi_mode_valid(connector, adjusted_mode);
 
 	/* just return if user desired mode exists. */
 	if (mode_ok == MODE_OK)
-		return;
+		return true;
 
 	/*
 	 * otherwise, find the most suitable mode among modes and change it
@@ -1195,6 +1106,8 @@
 			break;
 		}
 	}
+
+	return true;
 }
 
 static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1257,7 +1170,7 @@
 	hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
 	hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
 
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
 	else
 		hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
@@ -1391,7 +1304,7 @@
 				HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
 	}
 
-	if (hdata->type == HDMI_TYPE13) {
+	if (hdata->drv_data->type == HDMI_TYPE13) {
 		/* choose bluescreen (fecal) color */
 		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
 		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
@@ -1424,66 +1337,94 @@
 
 static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
 {
-	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
-	const struct hdmi_v13_core_regs *core =
-		&hdata->mode_conf.conf.v13_conf.core;
+	struct drm_display_mode *m = &hdata->current_mode;
+	unsigned int val;
 	int tries;
 
-	/* setting core registers */
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
-	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+	hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
+			(m->htotal << 12) | m->vtotal);
+
+	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val);
+
+	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+	hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val);
+
+	val = (m->hsync_start - m->hdisplay - 2);
+	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
+	hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
+
+	/*
+	 * Quirk requirement for exynos HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		val = ((m->vsync_end - m->vdisplay) / 2);
+		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+		val = m->vtotal / 2;
+		val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+		val = (m->vtotal +
+			((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+		val |= m->vtotal << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val);
+
+		val = ((m->vtotal / 2) + 7);
+		val |= ((m->vtotal / 2) + 2) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val);
+
+		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		val |= ((m->htotal / 2) +
+			(m->hsync_start - m->hdisplay)) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
+	} else {
+		/* Progressive Mode */
+
+		val = m->vtotal;
+		val |= (m->vtotal - m->vdisplay) << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0);
+
+		val = (m->vsync_end - m->vdisplay);
+		val |= ((m->vsync_start - m->vdisplay) << 12);
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001);
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+	}
+
 	/* Timing generator registers */
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
 
 	/* waiting for HDMIPHY's PLL to get to steady state */
 	for (tries = 100; tries; --tries) {
@@ -1508,144 +1449,119 @@
 
 static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
 {
-	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
-	const struct hdmi_v14_core_regs *core =
-		&hdata->mode_conf.conf.v14_conf.core;
+	struct drm_display_mode *m = &hdata->current_mode;
 	int tries;
 
-	/* setting core registers */
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
-			core->v_sync_line_bef_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
-			core->v_sync_line_bef_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
-			core->v_sync_line_bef_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
-			core->v_sync_line_bef_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
-			core->v_sync_line_aft_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
-			core->v_sync_line_aft_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
-			core->v_sync_line_aft_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
-			core->v_sync_line_aft_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
-			core->v_sync_line_aft_pxl_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
-			core->v_sync_line_aft_pxl_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
-			core->v_sync_line_aft_pxl_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
-			core->v_sync_line_aft_pxl_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
-			core->v_sync_line_aft_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
-			core->v_sync_line_aft_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
-			core->v_sync_line_aft_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
-			core->v_sync_line_aft_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
-			core->v_sync_line_aft_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
-			core->v_sync_line_aft_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
-			core->v_sync_line_aft_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
-			core->v_sync_line_aft_6[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
-			core->v_sync_line_aft_pxl_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
-			core->v_sync_line_aft_pxl_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
-			core->v_sync_line_aft_pxl_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
-			core->v_sync_line_aft_pxl_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
-			core->v_sync_line_aft_pxl_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
-			core->v_sync_line_aft_pxl_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
-			core->v_sync_line_aft_pxl_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
-			core->v_sync_line_aft_pxl_6[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+	hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
+			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
+	hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
+			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+	hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
+			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+	/*
+	 * Quirk requirement for exynos 5 HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+			(m->vsync_end - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+			(m->vsync_start - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2);
+		hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2,
+				m->vtotal - m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2,
+				(m->vtotal / 2) + 7);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2,
+				(m->vtotal / 2) + 2);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2,
+				m->vtotal - m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
+	} else {
+		/* Progressive Mode */
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+			m->vsync_end - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+			m->vsync_start - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal);
+		hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+		hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
+	}
+
+	/* Following values & calculations are same irrespective of mode type */
+	hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
+			m->hsync_start - m->hdisplay - 2);
+	hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
+			m->hsync_end - m->hdisplay - 2);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
 
 	/* Timing generator registers */
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
+	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
 
 	/* waiting for HDMIPHY's PLL to get to steady state */
 	for (tries = 100; tries; --tries) {
@@ -1670,7 +1586,7 @@
 
 static void hdmi_mode_apply(struct hdmi_context *hdata)
 {
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_v13_mode_apply(hdata);
 	else
 		hdmi_v14_mode_apply(hdata);
@@ -1688,7 +1604,7 @@
 	hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
 				HDMI_PHY_ENABLE_MODE_SET);
 
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		reg = HDMI_V13_PHY_RSTOUT;
 	else
 		reg = HDMI_PHY_RSTOUT;
@@ -1702,7 +1618,7 @@
 
 static void hdmiphy_poweron(struct hdmi_context *hdata)
 {
-	if (hdata->type != HDMI_TYPE14)
+	if (hdata->drv_data->type != HDMI_TYPE14)
 		return;
 
 	DRM_DEBUG_KMS("\n");
@@ -1722,7 +1638,7 @@
 
 static void hdmiphy_poweroff(struct hdmi_context *hdata)
 {
-	if (hdata->type != HDMI_TYPE14)
+	if (hdata->drv_data->type != HDMI_TYPE14)
 		return;
 
 	DRM_DEBUG_KMS("\n");
@@ -1748,13 +1664,14 @@
 	int i;
 
 	/* pixel clock */
-	i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+	i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
 	if (i < 0) {
 		DRM_ERROR("failed to find hdmiphy conf\n");
 		return;
 	}
 
-	ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
+	ret = hdmiphy_reg_write_buf(hdata, 0,
+			hdata->drv_data->phy_confs[i].conf, 32);
 	if (ret) {
 		DRM_ERROR("failed to configure hdmiphy\n");
 		return;
@@ -1776,10 +1693,8 @@
 	hdmiphy_conf_reset(hdata);
 	hdmiphy_conf_apply(hdata);
 
-	mutex_lock(&hdata->hdmi_mutex);
 	hdmi_start(hdata, false);
 	hdmi_conf_init(hdata);
-	mutex_unlock(&hdata->hdmi_mutex);
 
 	hdmi_audio_init(hdata);
 
@@ -1790,271 +1705,32 @@
 	hdmi_regs_dump(hdata, "start");
 }
 
-static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
+static void hdmi_mode_set(struct drm_encoder *encoder,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
 {
-	int i;
-	BUG_ON(num_bytes > 4);
-	for (i = 0; i < num_bytes; i++)
-		reg_pair[i] = (value >> (8 * i)) & 0xff;
-}
-
-static void hdmi_v13_mode_set(struct hdmi_context *hdata,
-			struct drm_display_mode *m)
-{
-	struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
-	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
-	unsigned int val;
-
-	hdata->mode_conf.cea_video_id =
-		drm_match_cea_mode((struct drm_display_mode *)m);
-	hdata->mode_conf.pixel_clock = m->clock * 1000;
-	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
-	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
-
-	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
-	hdmi_set_reg(core->vsync_pol, 1, val);
-
-	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
-	hdmi_set_reg(core->int_pro_mode, 1, val);
-
-	val = (m->hsync_start - m->hdisplay - 2);
-	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
-	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
-	hdmi_set_reg(core->h_sync_gen, 3, val);
-
-	/*
-	 * Quirk requirement for exynos HDMI IP design,
-	 * 2 pixels less than the actual calculation for hsync_start
-	 * and end.
-	 */
-
-	/* Following values & calculations differ for different type of modes */
-	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
-		val = ((m->vsync_end - m->vdisplay) / 2);
-		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
-		hdmi_set_reg(core->v_sync_gen1, 3, val);
-
-		val = m->vtotal / 2;
-		val |= ((m->vtotal - m->vdisplay) / 2) << 11;
-		hdmi_set_reg(core->v_blank, 3, val);
-
-		val = (m->vtotal +
-			((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
-		val |= m->vtotal << 11;
-		hdmi_set_reg(core->v_blank_f, 3, val);
-
-		val = ((m->vtotal / 2) + 7);
-		val |= ((m->vtotal / 2) + 2) << 12;
-		hdmi_set_reg(core->v_sync_gen2, 3, val);
-
-		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		val |= ((m->htotal / 2) +
-			(m->hsync_start - m->hdisplay)) << 12;
-		hdmi_set_reg(core->v_sync_gen3, 3, val);
-
-		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-
-		hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
-	} else {
-		/* Progressive Mode */
-
-		val = m->vtotal;
-		val |= (m->vtotal - m->vdisplay) << 11;
-		hdmi_set_reg(core->v_blank, 3, val);
-
-		hdmi_set_reg(core->v_blank_f, 3, 0);
-
-		val = (m->vsync_end - m->vdisplay);
-		val |= ((m->vsync_start - m->vdisplay) << 12);
-		hdmi_set_reg(core->v_sync_gen1, 3, val);
-
-		hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value  */
-		hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value  */
-		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
-		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
-	}
-
-	/* Timing generator registers */
-	hdmi_set_reg(tg->cmd, 1, 0x0);
-	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
-	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
-	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
-	hdmi_set_reg(tg->vsync, 2, 0x1);
-	hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
-}
-
-static void hdmi_v14_mode_set(struct hdmi_context *hdata,
-			struct drm_display_mode *m)
-{
-	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
-	struct hdmi_v14_core_regs *core =
-		&hdata->mode_conf.conf.v14_conf.core;
-
-	hdata->mode_conf.cea_video_id =
-		drm_match_cea_mode((struct drm_display_mode *)m);
-	hdata->mode_conf.pixel_clock = m->clock * 1000;
-	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
-	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(core->v_line, 2, m->vtotal);
-	hdmi_set_reg(core->h_line, 2, m->htotal);
-	hdmi_set_reg(core->hsync_pol, 1,
-			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
-	hdmi_set_reg(core->vsync_pol, 1,
-			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
-	hdmi_set_reg(core->int_pro_mode, 1,
-			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
-
-	/*
-	 * Quirk requirement for exynos 5 HDMI IP design,
-	 * 2 pixels less than the actual calculation for hsync_start
-	 * and end.
-	 */
-
-	/* Following values & calculations differ for different type of modes */
-	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
-		hdmi_set_reg(core->v_sync_line_bef_2, 2,
-			(m->vsync_end - m->vdisplay) / 2);
-		hdmi_set_reg(core->v_sync_line_bef_1, 2,
-			(m->vsync_start - m->vdisplay) / 2);
-		hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
-		hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
-		hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
-		hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
-		hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
-			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
-			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-		hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
-		hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->vact_st3, 2, 0x0);
-		hdmi_set_reg(tg->vact_st4, 2, 0x0);
-	} else {
-		/* Progressive Mode */
-		hdmi_set_reg(core->v_sync_line_bef_2, 2,
-			m->vsync_end - m->vdisplay);
-		hdmi_set_reg(core->v_sync_line_bef_1, 2,
-			m->vsync_start - m->vdisplay);
-		hdmi_set_reg(core->v2_blank, 2, m->vtotal);
-		hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
-		hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
-		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
-		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
-		hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
-		hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
-		hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
-		hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
-		hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
-	}
-
-	/* Following values & calculations are same irrespective of mode type */
-	hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
-	hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
-	hdmi_set_reg(core->vact_space_1, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_2, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_3, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_4, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_5, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_6, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
-
-	/* Timing generator registers */
-	hdmi_set_reg(tg->cmd, 1, 0x0);
-	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
-	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
-	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
-	hdmi_set_reg(tg->vsync, 2, 0x1);
-	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->tg_3d, 1, 0x0);
-}
-
-static void hdmi_mode_set(struct exynos_drm_display *display,
-			struct drm_display_mode *mode)
-{
-	struct hdmi_context *hdata = display_to_hdmi(display);
-	struct drm_display_mode *m = mode;
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+	struct drm_display_mode *m = adjusted_mode;
 
 	DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
 		m->hdisplay, m->vdisplay,
 		m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
 		"INTERLACED" : "PROGRESSIVE");
 
-	/* preserve mode information for later use. */
-	drm_mode_copy(&hdata->current_mode, mode);
-
-	if (hdata->type == HDMI_TYPE13)
-		hdmi_v13_mode_set(hdata, mode);
-	else
-		hdmi_v14_mode_set(hdata, mode);
+	drm_mode_copy(&hdata->current_mode, m);
+	hdata->cea_video_id = drm_match_cea_mode(mode);
 }
 
-static void hdmi_commit(struct exynos_drm_display *display)
+static void hdmi_enable(struct drm_encoder *encoder)
 {
-	struct hdmi_context *hdata = display_to_hdmi(display);
-
-	mutex_lock(&hdata->hdmi_mutex);
-	if (!hdata->powered) {
-		mutex_unlock(&hdata->hdmi_mutex);
-		return;
-	}
-	mutex_unlock(&hdata->hdmi_mutex);
-
-	hdmi_conf_apply(hdata);
-}
-
-static void hdmi_poweron(struct hdmi_context *hdata)
-{
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct hdmi_resources *res = &hdata->res;
 
-	mutex_lock(&hdata->hdmi_mutex);
-	if (hdata->powered) {
-		mutex_unlock(&hdata->hdmi_mutex);
+	if (hdata->powered)
 		return;
-	}
 
 	hdata->powered = true;
 
-	mutex_unlock(&hdata->hdmi_mutex);
-
 	pm_runtime_get_sync(hdata->dev);
 
 	if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
@@ -2068,17 +1744,32 @@
 	clk_prepare_enable(res->sclk_hdmi);
 
 	hdmiphy_poweron(hdata);
-	hdmi_commit(&hdata->display);
+	hdmi_conf_apply(hdata);
 }
 
-static void hdmi_poweroff(struct hdmi_context *hdata)
+static void hdmi_disable(struct drm_encoder *encoder)
 {
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct hdmi_resources *res = &hdata->res;
+	struct drm_crtc *crtc = encoder->crtc;
+	const struct drm_crtc_helper_funcs *funcs = NULL;
 
-	mutex_lock(&hdata->hdmi_mutex);
 	if (!hdata->powered)
-		goto out;
-	mutex_unlock(&hdata->hdmi_mutex);
+		return;
+
+	/*
+	 * The SFRs of VP and Mixer are updated by Vertical Sync of
+	 * Timing generator which is a part of HDMI so the sequence
+	 * to disable TV Subsystem should be as following,
+	 *	VP -> Mixer -> HDMI
+	 *
+	 * Below codes will try to disable Mixer and VP(if used)
+	 * prior to disabling HDMI.
+	 */
+	if (crtc)
+		funcs = crtc->helper_private;
+	if (funcs && funcs->disable)
+		(*funcs->disable)(crtc);
 
 	/* HDMI System Disable */
 	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
@@ -2098,57 +1789,18 @@
 
 	pm_runtime_put_sync(hdata->dev);
 
-	mutex_lock(&hdata->hdmi_mutex);
 	hdata->powered = false;
-
-out:
-	mutex_unlock(&hdata->hdmi_mutex);
 }
 
-static void hdmi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct hdmi_context *hdata = display_to_hdmi(display);
-	struct drm_encoder *encoder = hdata->encoder;
-	struct drm_crtc *crtc = encoder->crtc;
-	const struct drm_crtc_helper_funcs *funcs = NULL;
-
-	DRM_DEBUG_KMS("mode %d\n", mode);
-
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		hdmi_poweron(hdata);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		/*
-		 * The SFRs of VP and Mixer are updated by Vertical Sync of
-		 * Timing generator which is a part of HDMI so the sequence
-		 * to disable TV Subsystem should be as following,
-		 *	VP -> Mixer -> HDMI
-		 *
-		 * Below codes will try to disable Mixer and VP(if used)
-		 * prior to disabling HDMI.
-		 */
-		if (crtc)
-			funcs = crtc->helper_private;
-		if (funcs && funcs->disable)
-			(*funcs->disable)(crtc);
-
-		hdmi_poweroff(hdata);
-		break;
-	default:
-		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
-		break;
-	}
-}
-
-static struct exynos_drm_display_ops hdmi_display_ops = {
-	.create_connector = hdmi_create_connector,
+static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
 	.mode_fixup	= hdmi_mode_fixup,
 	.mode_set	= hdmi_mode_set,
-	.dpms		= hdmi_dpms,
-	.commit		= hdmi_commit,
+	.enable		= hdmi_enable,
+	.disable	= hdmi_disable,
+};
+
+static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static void hdmi_hotplug_work_func(struct work_struct *work)
@@ -2157,10 +1809,6 @@
 
 	hdata = container_of(work, struct hdmi_context, hotplug_work.work);
 
-	mutex_lock(&hdata->hdmi_mutex);
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-	mutex_unlock(&hdata->hdmi_mutex);
-
 	if (hdata->drm_dev)
 		drm_helper_hpd_irq_event(hdata->drm_dev);
 }
@@ -2259,30 +1907,6 @@
 	return ret;
 }
 
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
-					(struct device *dev)
-{
-	struct device_node *np = dev->of_node;
-	struct s5p_hdmi_platform_data *pd;
-	u32 value;
-
-	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
-	if (!pd)
-		goto err_data;
-
-	if (!of_find_property(np, "hpd-gpio", &value)) {
-		DRM_ERROR("no hpd gpio property found\n");
-		goto err_data;
-	}
-
-	pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
-
-	return pd;
-
-err_data:
-	return NULL;
-}
-
 static struct of_device_id hdmi_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmi",
@@ -2306,10 +1930,33 @@
 {
 	struct drm_device *drm_dev = data;
 	struct hdmi_context *hdata = dev_get_drvdata(dev);
+	struct drm_encoder *encoder = &hdata->encoder;
+	int ret, pipe;
 
 	hdata->drm_dev = drm_dev;
 
-	return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_HDMI);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+
+	ret = hdmi_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2343,43 +1990,30 @@
 static int hdmi_probe(struct platform_device *pdev)
 {
 	struct device_node *ddc_node, *phy_node;
-	struct s5p_hdmi_platform_data *pdata;
-	struct hdmi_driver_data *drv_data;
 	const struct of_device_id *match;
 	struct device *dev = &pdev->dev;
 	struct hdmi_context *hdata;
 	struct resource *res;
 	int ret;
 
-	if (!dev->of_node)
-		return -ENODEV;
-
-	pdata = drm_hdmi_dt_parse_pdata(dev);
-	if (!pdata)
-		return -EINVAL;
-
 	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
 	if (!hdata)
 		return -ENOMEM;
 
-	hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
-	hdata->display.ops = &hdmi_display_ops;
-
-	mutex_init(&hdata->hdmi_mutex);
-
-	platform_set_drvdata(pdev, hdata);
-
-	match = of_match_node(hdmi_match_types, dev->of_node);
+	match = of_match_device(hdmi_match_types, dev);
 	if (!match)
 		return -ENODEV;
 
-	drv_data = (struct hdmi_driver_data *)match->data;
-	hdata->type = drv_data->type;
-	hdata->phy_confs = drv_data->phy_confs;
-	hdata->phy_conf_count = drv_data->phy_conf_count;
+	hdata->drv_data = match->data;
 
-	hdata->hpd_gpio = pdata->hpd_gpio;
+	platform_set_drvdata(pdev, hdata);
+
 	hdata->dev = dev;
+	hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
+	if (hdata->hpd_gpio < 0) {
+		DRM_ERROR("cannot get hpd gpio property\n");
+		return hdata->hpd_gpio;
+	}
 
 	ret = hdmi_resources_init(hdata);
 	if (ret) {
@@ -2431,7 +2065,7 @@
 	}
 
 out_get_phy_port:
-	if (drv_data->is_apb_phy) {
+	if (hdata->drv_data->is_apb_phy) {
 		hdata->regs_hdmiphy = of_iomap(phy_node, 0);
 		if (!hdata->regs_hdmiphy) {
 			DRM_ERROR("failed to ioremap hdmi phy\n");
@@ -2454,8 +2088,6 @@
 		goto err_hdmiphy;
 	}
 
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-
 	INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
 
 	ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4706b56..7f81cce 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -43,6 +43,7 @@
 
 #define MIXER_WIN_NR		3
 #define MIXER_DEFAULT_WIN	0
+#define VP_DEFAULT_WIN		2
 
 /* The pixelformats that are natively supported by the mixer. */
 #define MXR_FORMAT_RGB565	4
@@ -69,6 +70,24 @@
 	MXR_VER_128_0_0_184,
 };
 
+enum mixer_flag_bits {
+	MXR_BIT_POWERED,
+	MXR_BIT_VSYNC,
+};
+
+static const uint32_t mixer_formats[] = {
+	DRM_FORMAT_XRGB4444,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
+static const uint32_t vp_formats[] = {
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+};
+
 struct mixer_context {
 	struct platform_device *pdev;
 	struct device		*dev;
@@ -76,13 +95,11 @@
 	struct exynos_drm_crtc	*crtc;
 	struct exynos_drm_plane	planes[MIXER_WIN_NR];
 	int			pipe;
+	unsigned long		flags;
 	bool			interlace;
-	bool			powered;
 	bool			vp_enabled;
 	bool			has_sclk;
-	u32			int_en;
 
-	struct mutex		mixer_mutex;
 	struct mixer_resources	mixer_res;
 	enum mixer_version_id	mxr_ver;
 	wait_queue_head_t	wait_vsync_queue;
@@ -380,19 +397,20 @@
 		usleep_range(10000, 12000);
 }
 
-static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
+static void vp_video_buffer(struct mixer_context *ctx,
+			    struct exynos_drm_plane *plane)
 {
 	struct mixer_resources *res = &ctx->mixer_res;
+	struct drm_plane_state *state = plane->base.state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &state->crtc->mode;
 	unsigned long flags;
-	struct exynos_drm_plane *plane;
 	dma_addr_t luma_addr[2], chroma_addr[2];
 	bool tiled_mode = false;
 	bool crcb_mode = false;
 	u32 val;
 
-	plane = &ctx->planes[win];
-
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_NV12:
 		crcb_mode = false;
 		break;
@@ -401,21 +419,21 @@
 		break;
 	default:
 		DRM_ERROR("pixel format for vp is wrong [%d].\n",
-				plane->pixel_format);
+				fb->pixel_format);
 		return;
 	}
 
 	luma_addr[0] = plane->dma_addr[0];
 	chroma_addr[0] = plane->dma_addr[1];
 
-	if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		ctx->interlace = true;
 		if (tiled_mode) {
 			luma_addr[1] = luma_addr[0] + 0x40;
 			chroma_addr[1] = chroma_addr[0] + 0x40;
 		} else {
-			luma_addr[1] = luma_addr[0] + plane->pitch;
-			chroma_addr[1] = chroma_addr[0] + plane->pitch;
+			luma_addr[1] = luma_addr[0] + fb->pitches[0];
+			chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
 		}
 	} else {
 		ctx->interlace = false;
@@ -436,25 +454,25 @@
 	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 
 	/* setting size of input image */
-	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
-		VP_IMG_VSIZE(plane->fb_height));
+	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+		VP_IMG_VSIZE(fb->height));
 	/* chroma height has to reduced by 2 to avoid chroma distorions */
-	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
-		VP_IMG_VSIZE(plane->fb_height / 2));
+	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+		VP_IMG_VSIZE(fb->height / 2));
 
-	vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
-	vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
+	vp_reg_write(res, VP_SRC_WIDTH, plane->src_w);
+	vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h);
 	vp_reg_write(res, VP_SRC_H_POSITION,
 			VP_SRC_H_POSITION_VAL(plane->src_x));
 	vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
 
-	vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+	vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w);
 	vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
 	if (ctx->interlace) {
-		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2);
 		vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
 	} else {
-		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h);
 		vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
 	}
 
@@ -469,9 +487,9 @@
 	vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
 	vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
 
-	mixer_cfg_scan(ctx, plane->mode_height);
-	mixer_cfg_rgb_fmt(ctx, plane->mode_height);
-	mixer_cfg_layer(ctx, win, true);
+	mixer_cfg_scan(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+	mixer_cfg_layer(ctx, plane->zpos, true);
 	mixer_run(ctx);
 
 	mixer_vsync_set_update(ctx, true);
@@ -491,15 +509,15 @@
 static int mixer_setup_scale(const struct exynos_drm_plane *plane,
 		unsigned int *x_ratio, unsigned int *y_ratio)
 {
-	if (plane->crtc_width != plane->src_width) {
-		if (plane->crtc_width == 2 * plane->src_width)
+	if (plane->crtc_w != plane->src_w) {
+		if (plane->crtc_w == 2 * plane->src_w)
 			*x_ratio = 1;
 		else
 			goto fail;
 	}
 
-	if (plane->crtc_height != plane->src_height) {
-		if (plane->crtc_height == 2 * plane->src_height)
+	if (plane->crtc_h != plane->src_h) {
+		if (plane->crtc_h == 2 * plane->src_h)
 			*y_ratio = 1;
 		else
 			goto fail;
@@ -512,20 +530,22 @@
 	return -ENOTSUPP;
 }
 
-static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
+static void mixer_graph_buffer(struct mixer_context *ctx,
+			       struct exynos_drm_plane *plane)
 {
 	struct mixer_resources *res = &ctx->mixer_res;
+	struct drm_plane_state *state = plane->base.state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &state->crtc->mode;
 	unsigned long flags;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	unsigned int x_ratio = 0, y_ratio = 0;
 	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
 	dma_addr_t dma_addr;
 	unsigned int fmt;
 	u32 val;
 
-	plane = &ctx->planes[win];
-
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_XRGB4444:
 		fmt = MXR_FORMAT_ARGB4444;
 		break;
@@ -557,12 +577,12 @@
 
 	/* converting dma address base and source offset */
 	dma_addr = plane->dma_addr[0]
-		+ (plane->src_x * plane->bpp >> 3)
-		+ (plane->src_y * plane->pitch);
+		+ (plane->src_x * fb->bits_per_pixel >> 3)
+		+ (plane->src_y * fb->pitches[0]);
 	src_x_offset = 0;
 	src_y_offset = 0;
 
-	if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 		ctx->interlace = true;
 	else
 		ctx->interlace = false;
@@ -576,18 +596,18 @@
 
 	/* setup geometry */
 	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
-			plane->pitch / (plane->bpp >> 3));
+			fb->pitches[0] / (fb->bits_per_pixel >> 3));
 
 	/* setup display size */
 	if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
 		win == MIXER_DEFAULT_WIN) {
-		val  = MXR_MXR_RES_HEIGHT(plane->mode_height);
-		val |= MXR_MXR_RES_WIDTH(plane->mode_width);
+		val  = MXR_MXR_RES_HEIGHT(mode->vdisplay);
+		val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
 		mixer_reg_write(res, MXR_RESOLUTION, val);
 	}
 
-	val  = MXR_GRP_WH_WIDTH(plane->src_width);
-	val |= MXR_GRP_WH_HEIGHT(plane->src_height);
+	val  = MXR_GRP_WH_WIDTH(plane->src_w);
+	val |= MXR_GRP_WH_HEIGHT(plane->src_h);
 	val |= MXR_GRP_WH_H_SCALE(x_ratio);
 	val |= MXR_GRP_WH_V_SCALE(y_ratio);
 	mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -605,8 +625,8 @@
 	/* set buffer address to mixer */
 	mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
 
-	mixer_cfg_scan(ctx, plane->mode_height);
-	mixer_cfg_rgb_fmt(ctx, plane->mode_height);
+	mixer_cfg_scan(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
 	mixer_cfg_layer(ctx, win, true);
 
 	/* layer update mandatory for mixer 16.0.33.0 */
@@ -710,6 +730,7 @@
 	struct mixer_context *ctx = arg;
 	struct mixer_resources *res = &ctx->mixer_res;
 	u32 val, base, shadow;
+	int win;
 
 	spin_lock(&res->reg_slock);
 
@@ -735,8 +756,15 @@
 				goto out;
 		}
 
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
+		for (win = 0 ; win < MIXER_WIN_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
@@ -881,8 +909,7 @@
 		}
 	}
 
-	ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev,
-								mixer_ctx->dev);
+	ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
 	if (ret)
 		priv->pipe--;
 
@@ -891,8 +918,7 @@
 
 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
 {
-	if (is_drm_iommu_supported(mixer_ctx->drm_dev))
-		drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+	drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
 }
 
 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
@@ -900,10 +926,9 @@
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 
-	if (!mixer_ctx->powered) {
-		mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
+	__set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return 0;
-	}
 
 	/* enable vsync interrupt */
 	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
@@ -917,54 +942,48 @@
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 
-	if (!mixer_ctx->powered) {
-		mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+	__clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
 
 	/* disable vsync interrupt */
 	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
 	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct mixer_context *mixer_ctx = crtc->ctx;
 
-	DRM_DEBUG_KMS("win: %d\n", win);
+	DRM_DEBUG_KMS("win: %d\n", plane->zpos);
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
-	if (win > 1 && mixer_ctx->vp_enabled)
-		vp_video_buffer(mixer_ctx, win);
+	if (plane->zpos > 1 && mixer_ctx->vp_enabled)
+		vp_video_buffer(mixer_ctx, plane);
 	else
-		mixer_graph_buffer(mixer_ctx, win);
+		mixer_graph_buffer(mixer_ctx, plane);
 }
 
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 	unsigned long flags;
 
-	DRM_DEBUG_KMS("win: %d\n", win);
+	DRM_DEBUG_KMS("win: %d\n", plane->zpos);
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
 	spin_lock_irqsave(&res->reg_slock, flags);
 	mixer_vsync_set_update(mixer_ctx, false);
 
-	mixer_cfg_layer(mixer_ctx, win, false);
+	mixer_cfg_layer(mixer_ctx, plane->zpos, false);
 
 	mixer_vsync_set_update(mixer_ctx, true);
 	spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -975,12 +994,8 @@
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	int err;
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
 	err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
 	if (err < 0) {
@@ -1008,13 +1023,8 @@
 	struct mixer_resources *res = &ctx->mixer_res;
 	int ret;
 
-	mutex_lock(&ctx->mixer_mutex);
-	if (ctx->powered) {
-		mutex_unlock(&ctx->mixer_mutex);
+	if (test_bit(MXR_BIT_POWERED, &ctx->flags))
 		return;
-	}
-
-	mutex_unlock(&ctx->mixer_mutex);
 
 	pm_runtime_get_sync(ctx->dev);
 
@@ -1046,15 +1056,14 @@
 		}
 	}
 
-	mutex_lock(&ctx->mixer_mutex);
-	ctx->powered = true;
-	mutex_unlock(&ctx->mixer_mutex);
+	set_bit(MXR_BIT_POWERED, &ctx->flags);
 
 	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
-	if (ctx->int_en & MXR_INT_EN_VSYNC)
+	if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
 		mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
-	mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+		mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+	}
 	mixer_win_reset(ctx);
 }
 
@@ -1064,24 +1073,16 @@
 	struct mixer_resources *res = &ctx->mixer_res;
 	int i;
 
-	mutex_lock(&ctx->mixer_mutex);
-	if (!ctx->powered) {
-		mutex_unlock(&ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
 		return;
-	}
-	mutex_unlock(&ctx->mixer_mutex);
 
 	mixer_stop(ctx);
 	mixer_regs_dump(ctx);
 
 	for (i = 0; i < MIXER_WIN_NR; i++)
-		mixer_win_disable(crtc, i);
+		mixer_disable_plane(crtc, &ctx->planes[i]);
 
-	ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
-
-	mutex_lock(&ctx->mixer_mutex);
-	ctx->powered = false;
-	mutex_unlock(&ctx->mixer_mutex);
+	clear_bit(MXR_BIT_POWERED, &ctx->flags);
 
 	clk_disable_unprepare(res->hdmi);
 	clk_disable_unprepare(res->mixer);
@@ -1120,8 +1121,8 @@
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
 	.wait_for_vblank	= mixer_wait_for_vblank,
-	.win_commit		= mixer_win_commit,
-	.win_disable		= mixer_win_disable,
+	.update_plane		= mixer_update_plane,
+	.disable_plane		= mixer_disable_plane,
 };
 
 static struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1184,7 +1185,6 @@
 	struct mixer_context *ctx = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
 	struct exynos_drm_plane *exynos_plane;
-	enum drm_plane_type type;
 	unsigned int zpos;
 	int ret;
 
@@ -1193,10 +1193,23 @@
 		return ret;
 
 	for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+		enum drm_plane_type type;
+		const uint32_t *formats;
+		unsigned int fcount;
+
 		type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
+		if (zpos < VP_DEFAULT_WIN) {
+			formats = mixer_formats;
+			fcount = ARRAY_SIZE(mixer_formats);
+		} else {
+			formats = vp_formats;
+			fcount = ARRAY_SIZE(vp_formats);
+		}
+
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, formats, fcount,
+					zpos);
 		if (ret)
 			return ret;
 	}
@@ -1243,8 +1256,6 @@
 		return -ENOMEM;
 	}
 
-	mutex_init(&ctx->mixer_mutex);
-
 	if (dev->of_node) {
 		const struct of_device_id *match;
 
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
new file mode 100644
index 0000000..c78cf3f
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -0,0 +1,18 @@
+config DRM_FSL_DCU
+	tristate "DRM Support for Freescale DCU"
+	depends on DRM && OF && ARM
+	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_KMS_FB_HELPER
+	select DRM_PANEL
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	select REGMAP_MMIO
+	select VIDEOMODE_HELPERS
+	help
+	  Choose this option if you have an Freescale DCU chipset.
+	  If M is selected the module will be called fsl-dcu-drm.
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
new file mode 100644
index 0000000..6ea1523
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -0,0 +1,7 @@
+fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
+		 fsl_dcu_drm_kms.o \
+		 fsl_dcu_drm_rgb.o \
+		 fsl_dcu_drm_plane.o \
+		 fsl_dcu_drm_crtc.o \
+		 fsl_dcu_drm_fbdev.o
+obj-$(CONFIG_DRM_FSL_DCU)	+= fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
new file mode 100644
index 0000000..82a3d31
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
+					 struct drm_crtc_state *state)
+{
+	return 0;
+}
+
+static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	int ret;
+
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	if (ret)
+		dev_err(fsl_dev->dev, "Disable CRTC failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	int ret;
+
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	struct drm_display_mode *mode = &crtc->state->mode;
+	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index;
+	unsigned long dcuclk;
+	int ret;
+
+	index = drm_crtc_index(crtc);
+	dcuclk = clk_get_rate(fsl_dev->clk);
+	div = dcuclk / mode->clock / 1000;
+
+	/* Configure timings: */
+	hbp = mode->htotal - mode->hsync_end;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsw = mode->vsync_end - mode->vsync_start;
+
+	ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
+			   DCU_HSYN_PARA_BP(hbp) |
+			   DCU_HSYN_PARA_PW(hsw) |
+			   DCU_HSYN_PARA_FP(hfp));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
+			   DCU_VSYN_PARA_BP(vbp) |
+			   DCU_VSYN_PARA_PW(vsw) |
+			   DCU_VSYN_PARA_FP(vfp));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
+			   DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
+			   DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL,
+			   DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
+			   DCU_BGND_G(0) | DCU_BGND_B(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
+			   DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
+			   DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
+			   DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
+			   DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto set_failed;
+	return;
+set_failed:
+	dev_err(dev->dev, "set DCU register failed\n");
+}
+
+static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+	.atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
+	.atomic_check = fsl_dcu_drm_crtc_atomic_check,
+	.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
+	.disable = fsl_dcu_drm_disable_crtc,
+	.enable = fsl_dcu_drm_crtc_enable,
+	.mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
+	.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
+};
+
+static const struct drm_crtc_funcs fsl_dcu_drm_crtc_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.destroy = drm_crtc_cleanup,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = drm_atomic_helper_crtc_reset,
+	.set_config = drm_atomic_helper_set_config,
+};
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
+{
+	struct drm_plane *primary;
+	struct drm_crtc *crtc = &fsl_dev->crtc;
+	unsigned int i, j, reg_num;
+	int ret;
+
+	primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
+	ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
+					&fsl_dcu_drm_crtc_funcs);
+	if (ret < 0)
+		return ret;
+
+	drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
+
+	if (!strcmp(fsl_dev->soc->name, "ls1021a"))
+		reg_num = LS1021A_LAYER_REG_NUM;
+	else
+		reg_num = VF610_LAYER_REG_NUM;
+	for (i = 0; i <= fsl_dev->soc->total_layer; i++) {
+		for (j = 0; j < reg_num; j++) {
+			ret = regmap_write(fsl_dev->regmap,
+					   DCU_CTRLDESCLN(i, j), 0);
+			if (ret)
+				goto init_failed;
+		}
+	}
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	if (ret)
+		goto init_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto init_failed;
+
+	return 0;
+init_failed:
+	dev_err(fsl_dev->dev, "init DCU register failed\n");
+	return ret;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
new file mode 100644
index 0000000..43d4da2
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CRTC_H__
+#define __FSL_DCU_DRM_CRTC_H__
+
+struct fsl_dcu_drm_device;
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
new file mode 100644
index 0000000..9a8e2da
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct regmap_config fsl_dcu_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static int fsl_dcu_drm_irq_init(struct drm_device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = drm_irq_install(dev, fsl_dev->irq);
+	if (ret < 0)
+		dev_err(dev->dev, "failed to install IRQ handler\n");
+
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value &= DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+	return ret;
+}
+
+static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
+{
+	struct device *dev = drm->dev;
+	struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
+	int ret;
+
+	ret = fsl_dcu_drm_modeset_init(fsl_dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to initialize mode setting\n");
+		return ret;
+	}
+
+	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (ret < 0) {
+		dev_err(dev, "failed to initialize vblank\n");
+		goto done;
+	}
+	drm->vblank_disable_allowed = true;
+
+	ret = fsl_dcu_drm_irq_init(drm);
+	if (ret < 0)
+		goto done;
+	drm->irq_enabled = true;
+
+	fsl_dcu_fbdev_init(drm);
+
+	return 0;
+done:
+	if (ret) {
+		drm_mode_config_cleanup(drm);
+		drm_vblank_cleanup(drm);
+		drm_irq_uninstall(drm);
+		drm->dev_private = NULL;
+	}
+
+	return ret;
+}
+
+static int fsl_dcu_unload(struct drm_device *dev)
+{
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+}
+
+static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int int_status;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	if (int_status & DCU_INT_STATUS_VBLANK)
+		drm_handle_vblank(dev, 0);
+
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+	return IRQ_HANDLED;
+}
+
+static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value &= ~DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	return 0;
+}
+
+static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value |= DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+}
+
+static const struct file_operations fsl_dcu_drm_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver fsl_dcu_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+				| DRIVER_PRIME | DRIVER_ATOMIC,
+	.load			= fsl_dcu_load,
+	.unload			= fsl_dcu_unload,
+	.preclose		= fsl_dcu_drm_preclose,
+	.irq_handler		= fsl_dcu_drm_irq,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= fsl_dcu_drm_enable_vblank,
+	.disable_vblank		= fsl_dcu_drm_disable_vblank,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_get_sg_table	= drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap		= drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap	= drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap		= drm_gem_cma_prime_mmap,
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_dumb_destroy,
+	.fops			= &fsl_dcu_drm_fops,
+	.name			= "fsl-dcu-drm",
+	.desc			= "Freescale DCU DRM",
+	.date			= "20150213",
+	.major			= 1,
+	.minor			= 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int fsl_dcu_drm_pm_suspend(struct device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+
+	if (!fsl_dev)
+		return 0;
+
+	drm_kms_helper_poll_disable(fsl_dev->drm);
+	regcache_cache_only(fsl_dev->regmap, true);
+	regcache_mark_dirty(fsl_dev->regmap);
+	clk_disable(fsl_dev->clk);
+	clk_unprepare(fsl_dev->clk);
+
+	return 0;
+}
+
+static int fsl_dcu_drm_pm_resume(struct device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+	int ret;
+
+	if (!fsl_dev)
+		return 0;
+
+	ret = clk_enable(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable dcu clk\n");
+		clk_unprepare(fsl_dev->clk);
+		return ret;
+	}
+	ret = clk_prepare(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to prepare dcu clk\n");
+		return ret;
+	}
+
+	drm_kms_helper_poll_enable(fsl_dev->drm);
+	regcache_cache_only(fsl_dev->regmap, false);
+	regcache_sync(fsl_dev->regmap);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_dcu_drm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fsl_dcu_drm_pm_suspend, fsl_dcu_drm_pm_resume)
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
+	.name = "ls1021a",
+	.total_layer = 16,
+	.max_layer = 4,
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
+	.name = "vf610",
+	.total_layer = 64,
+	.max_layer = 6,
+};
+
+static const struct of_device_id fsl_dcu_of_match[] = {
+	{
+		.compatible = "fsl,ls1021a-dcu",
+		.data = &fsl_dcu_ls1021a_data,
+	}, {
+		.compatible = "fsl,vf610-dcu",
+		.data = &fsl_dcu_vf610_data,
+	}, {
+	},
+};
+MODULE_DEVICE_TABLE(of, fsl_dcu_of_match);
+
+static int fsl_dcu_drm_probe(struct platform_device *pdev)
+{
+	struct fsl_dcu_drm_device *fsl_dev;
+	struct drm_device *drm;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	void __iomem *base;
+	struct drm_driver *driver = &fsl_dcu_drm_driver;
+	const struct of_device_id *id;
+	int ret;
+
+	fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
+	if (!fsl_dev)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "could not get memory IO resource\n");
+		return -ENODEV;
+	}
+
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base)) {
+		ret = PTR_ERR(base);
+		return ret;
+	}
+
+	fsl_dev->irq = platform_get_irq(pdev, 0);
+	if (fsl_dev->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return -ENXIO;
+	}
+
+	fsl_dev->clk = devm_clk_get(dev, "dcu");
+	if (IS_ERR(fsl_dev->clk)) {
+		ret = PTR_ERR(fsl_dev->clk);
+		dev_err(dev, "failed to get dcu clock\n");
+		return ret;
+	}
+	ret = clk_prepare(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to prepare dcu clk\n");
+		return ret;
+	}
+	ret = clk_enable(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable dcu clk\n");
+		clk_unprepare(fsl_dev->clk);
+		return ret;
+	}
+
+	fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
+			&fsl_dcu_regmap_config);
+	if (IS_ERR(fsl_dev->regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(fsl_dev->regmap);
+	}
+
+	id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+	fsl_dev->soc = id->data;
+
+	drm = drm_dev_alloc(driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	fsl_dev->dev = dev;
+	fsl_dev->drm = drm;
+	fsl_dev->np = dev->of_node;
+	drm->dev_private = fsl_dev;
+	dev_set_drvdata(dev, fsl_dev);
+	drm_dev_set_unique(drm, dev_name(dev));
+
+	ret = drm_dev_register(drm, 0);
+	if (ret < 0)
+		goto unref;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+		 driver->major, driver->minor, driver->patchlevel,
+		 driver->date, drm->primary->index);
+
+	return 0;
+
+unref:
+	drm_dev_unref(drm);
+	return ret;
+}
+
+static int fsl_dcu_drm_remove(struct platform_device *pdev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+
+	drm_put_dev(fsl_dev->drm);
+
+	return 0;
+}
+
+static struct platform_driver fsl_dcu_drm_platform_driver = {
+	.probe		= fsl_dcu_drm_probe,
+	.remove		= fsl_dcu_drm_remove,
+	.driver		= {
+		.name	= "fsl-dcu",
+		.pm	= &fsl_dcu_drm_pm_ops,
+		.of_match_table = fsl_dcu_of_match,
+	},
+};
+
+module_platform_driver(fsl_dcu_drm_platform_driver);
+
+MODULE_DESCRIPTION("Freescale DCU DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
new file mode 100644
index 0000000..579b9e4
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_DRV_H__
+#define __FSL_DCU_DRM_DRV_H__
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_output.h"
+#include "fsl_dcu_drm_plane.h"
+
+#define DCU_DCU_MODE			0x0010
+#define DCU_MODE_BLEND_ITER(x)		((x) << 20)
+#define DCU_MODE_RASTER_EN		BIT(14)
+#define DCU_MODE_DCU_MODE(x)		(x)
+#define DCU_MODE_DCU_MODE_MASK		0x03
+#define DCU_MODE_OFF			0
+#define DCU_MODE_NORMAL			1
+#define DCU_MODE_TEST			2
+#define DCU_MODE_COLORBAR		3
+
+#define DCU_BGND			0x0014
+#define DCU_BGND_R(x)			((x) << 16)
+#define DCU_BGND_G(x)			((x) << 8)
+#define DCU_BGND_B(x)			(x)
+
+#define DCU_DISP_SIZE			0x0018
+#define DCU_DISP_SIZE_DELTA_Y(x)	((x) << 16)
+/*Regisiter value 1/16 of horizontal resolution*/
+#define DCU_DISP_SIZE_DELTA_X(x)	((x) >> 4)
+
+#define DCU_HSYN_PARA			0x001c
+#define DCU_HSYN_PARA_BP(x)		((x) << 22)
+#define DCU_HSYN_PARA_PW(x)		((x) << 11)
+#define DCU_HSYN_PARA_FP(x)		(x)
+
+#define DCU_VSYN_PARA			0x0020
+#define DCU_VSYN_PARA_BP(x)		((x) << 22)
+#define DCU_VSYN_PARA_PW(x)		((x) << 11)
+#define DCU_VSYN_PARA_FP(x)		(x)
+
+#define DCU_SYN_POL			0x0024
+#define DCU_SYN_POL_INV_PXCK_FALL	(0 << 6)
+#define DCU_SYN_POL_NEG_REMAIN		(0 << 5)
+#define DCU_SYN_POL_INV_VS_LOW		BIT(1)
+#define DCU_SYN_POL_INV_HS_LOW		BIT(0)
+
+#define DCU_THRESHOLD			0x0028
+#define DCU_THRESHOLD_LS_BF_VS(x)	((x) << 16)
+#define DCU_THRESHOLD_OUT_BUF_HIGH(x)	((x) << 8)
+#define DCU_THRESHOLD_OUT_BUF_LOW(x)	(x)
+#define BF_VS_VAL			0x03
+#define BUF_MAX_VAL			0x78
+#define BUF_MIN_VAL			0x0a
+
+#define DCU_INT_STATUS			0x002C
+#define DCU_INT_STATUS_VSYNC		BIT(0)
+#define DCU_INT_STATUS_UNDRUN		BIT(1)
+#define DCU_INT_STATUS_LSBFVS		BIT(2)
+#define DCU_INT_STATUS_VBLANK		BIT(3)
+#define DCU_INT_STATUS_CRCREADY		BIT(4)
+#define DCU_INT_STATUS_CRCOVERFLOW	BIT(5)
+#define DCU_INT_STATUS_P1FIFOLO		BIT(6)
+#define DCU_INT_STATUS_P1FIFOHI		BIT(7)
+#define DCU_INT_STATUS_P2FIFOLO		BIT(8)
+#define DCU_INT_STATUS_P2FIFOHI		BIT(9)
+#define DCU_INT_STATUS_PROGEND		BIT(10)
+#define DCU_INT_STATUS_IPMERROR		BIT(11)
+#define DCU_INT_STATUS_LYRTRANS		BIT(12)
+#define DCU_INT_STATUS_DMATRANS		BIT(14)
+#define DCU_INT_STATUS_P3FIFOLO		BIT(16)
+#define DCU_INT_STATUS_P3FIFOHI		BIT(17)
+#define DCU_INT_STATUS_P4FIFOLO		BIT(18)
+#define DCU_INT_STATUS_P4FIFOHI		BIT(19)
+#define DCU_INT_STATUS_P1EMPTY		BIT(26)
+#define DCU_INT_STATUS_P2EMPTY		BIT(27)
+#define DCU_INT_STATUS_P3EMPTY		BIT(28)
+#define DCU_INT_STATUS_P4EMPTY		BIT(29)
+
+#define DCU_INT_MASK			0x0030
+#define DCU_INT_MASK_VSYNC		BIT(0)
+#define DCU_INT_MASK_UNDRUN		BIT(1)
+#define DCU_INT_MASK_LSBFVS		BIT(2)
+#define DCU_INT_MASK_VBLANK		BIT(3)
+#define DCU_INT_MASK_CRCREADY		BIT(4)
+#define DCU_INT_MASK_CRCOVERFLOW	BIT(5)
+#define DCU_INT_MASK_P1FIFOLO		BIT(6)
+#define DCU_INT_MASK_P1FIFOHI		BIT(7)
+#define DCU_INT_MASK_P2FIFOLO		BIT(8)
+#define DCU_INT_MASK_P2FIFOHI		BIT(9)
+#define DCU_INT_MASK_PROGEND		BIT(10)
+#define DCU_INT_MASK_IPMERROR		BIT(11)
+#define DCU_INT_MASK_LYRTRANS		BIT(12)
+#define DCU_INT_MASK_DMATRANS		BIT(14)
+#define DCU_INT_MASK_P3FIFOLO		BIT(16)
+#define DCU_INT_MASK_P3FIFOHI		BIT(17)
+#define DCU_INT_MASK_P4FIFOLO		BIT(18)
+#define DCU_INT_MASK_P4FIFOHI		BIT(19)
+#define DCU_INT_MASK_P1EMPTY		BIT(26)
+#define DCU_INT_MASK_P2EMPTY		BIT(27)
+#define DCU_INT_MASK_P3EMPTY		BIT(28)
+#define DCU_INT_MASK_P4EMPTY		BIT(29)
+
+#define DCU_DIV_RATIO			0x0054
+
+#define DCU_UPDATE_MODE			0x00cc
+#define DCU_UPDATE_MODE_MODE		BIT(31)
+#define DCU_UPDATE_MODE_READREG		BIT(30)
+
+#define DCU_DCFB_MAX			0x300
+
+#define DCU_CTRLDESCLN(layer, reg)	(0x200 + (reg - 1) * 4 + (layer) * 0x40)
+
+#define DCU_LAYER_HEIGHT(x)		((x) << 16)
+#define DCU_LAYER_WIDTH(x)		(x)
+
+#define DCU_LAYER_POSY(x)		((x) << 16)
+#define DCU_LAYER_POSX(x)		(x)
+
+#define DCU_LAYER_EN			BIT(31)
+#define DCU_LAYER_TILE_EN		BIT(30)
+#define DCU_LAYER_DATA_SEL_CLUT		BIT(29)
+#define DCU_LAYER_SAFETY_EN		BIT(28)
+#define DCU_LAYER_TRANS(x)		((x) << 20)
+#define DCU_LAYER_BPP(x)		((x) << 16)
+#define DCU_LAYER_RLE_EN		BIT(15)
+#define DCU_LAYER_LUOFFS(x)		((x) << 4)
+#define DCU_LAYER_BB_ON			BIT(2)
+#define DCU_LAYER_AB(x)			(x)
+
+#define DCU_LAYER_CKMAX_R(x)		((x) << 16)
+#define DCU_LAYER_CKMAX_G(x)		((x) << 8)
+#define DCU_LAYER_CKMAX_B(x)		(x)
+
+#define DCU_LAYER_CKMIN_R(x)		((x) << 16)
+#define DCU_LAYER_CKMIN_G(x)		((x) << 8)
+#define DCU_LAYER_CKMIN_B(x)		(x)
+
+#define DCU_LAYER_TILE_VER(x)		((x) << 16)
+#define DCU_LAYER_TILE_HOR(x)		(x)
+
+#define DCU_LAYER_FG_FCOLOR(x)		(x)
+
+#define DCU_LAYER_BG_BCOLOR(x)		(x)
+
+#define DCU_LAYER_POST_SKIP(x)		((x) << 16)
+#define DCU_LAYER_PRE_SKIP(x)		(x)
+
+#define FSL_DCU_RGB565			4
+#define FSL_DCU_RGB888			5
+#define FSL_DCU_ARGB8888		6
+#define FSL_DCU_ARGB1555		11
+#define FSL_DCU_ARGB4444		12
+#define FSL_DCU_YUV422			14
+
+#define VF610_LAYER_REG_NUM		9
+#define LS1021A_LAYER_REG_NUM		10
+
+struct clk;
+struct device;
+struct drm_device;
+
+struct fsl_dcu_soc_data {
+	const char *name;
+	/*total layer number*/
+	unsigned int total_layer;
+	/*max layer number DCU supported*/
+	unsigned int max_layer;
+};
+
+struct fsl_dcu_drm_device {
+	struct device *dev;
+	struct device_node *np;
+	struct regmap *regmap;
+	int irq;
+	struct clk *clk;
+	/*protects hardware register*/
+	spinlock_t irq_lock;
+	struct drm_device *drm;
+	struct drm_fbdev_cma *fbdev;
+	struct drm_crtc crtc;
+	struct drm_encoder encoder;
+	struct fsl_dcu_drm_connector connector;
+	const struct fsl_dcu_soc_data *soc;
+};
+
+void fsl_dcu_fbdev_init(struct drm_device *dev);
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
new file mode 100644
index 0000000..8b8b819
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+/* initialize fbdev helper */
+void fsl_dcu_fbdev_init(struct drm_device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev);
+
+	fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1);
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
new file mode 100644
index 0000000..0ef5959
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+	.fb_create = drm_fb_cma_create,
+};
+
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
+{
+	drm_mode_config_init(fsl_dev->drm);
+
+	fsl_dev->drm->mode_config.min_width = 0;
+	fsl_dev->drm->mode_config.min_height = 0;
+	fsl_dev->drm->mode_config.max_width = 2031;
+	fsl_dev->drm->mode_config.max_height = 2047;
+	fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs;
+
+	drm_kms_helper_poll_init(fsl_dev->drm);
+	fsl_dcu_drm_crtc_create(fsl_dev);
+	fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
+	fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+	drm_mode_config_reset(fsl_dev->drm);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
new file mode 100644
index 0000000..7093109
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CONNECTOR_H__
+#define __FSL_DCU_DRM_CONNECTOR_H__
+
+struct fsl_dcu_drm_connector {
+	struct drm_connector base;
+	struct drm_encoder *encoder;
+	struct drm_panel *panel;
+};
+
+static inline struct fsl_dcu_drm_connector *
+to_fsl_dcu_connector(struct drm_connector *con)
+{
+	return con ? container_of(con, struct fsl_dcu_drm_connector, base)
+		     : NULL;
+}
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+				 struct drm_encoder *encoder);
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+			       struct drm_crtc *crtc);
+
+#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
new file mode 100644
index 0000000..82be6b8
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static int fsl_dcu_drm_plane_index(struct drm_plane *plane)
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	unsigned int total_layer = fsl_dev->soc->total_layer;
+	unsigned int index;
+
+	index = drm_plane_index(plane);
+	if (index < total_layer)
+		return total_layer - index - 1;
+
+	dev_err(fsl_dev->dev, "No more layer left\n");
+	return -EINVAL;
+}
+
+static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
+					  struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_YUV422:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
+					     struct drm_plane_state *old_state)
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	unsigned int index, value, ret;
+
+	index = fsl_dcu_drm_plane_index(plane);
+	if (index < 0)
+		return;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
+	if (ret)
+		dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n");
+	value &= ~DCU_LAYER_EN;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
+	if (ret)
+		dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
+					    struct drm_plane_state *old_state)
+
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = plane->state->fb;
+	struct drm_gem_cma_object *gem;
+	unsigned int alpha, bpp;
+	int index, ret;
+
+	if (!fb)
+		return;
+
+	index = fsl_dcu_drm_plane_index(plane);
+	if (index < 0)
+		return;
+
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+		bpp = FSL_DCU_RGB565;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_RGB888:
+		bpp = FSL_DCU_RGB888;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		bpp = FSL_DCU_ARGB8888;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_BGRA4444:
+		bpp = FSL_DCU_ARGB4444;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_ARGB1555:
+		bpp = FSL_DCU_ARGB1555;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_YUV422:
+		bpp = FSL_DCU_YUV422;
+		alpha = 0xff;
+		break;
+	default:
+		return;
+	}
+
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
+			   DCU_LAYER_HEIGHT(state->crtc_h) |
+			   DCU_LAYER_WIDTH(state->crtc_w));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
+			   DCU_LAYER_POSY(state->crtc_y) |
+			   DCU_LAYER_POSX(state->crtc_x));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap,
+			   DCU_CTRLDESCLN(index, 3), gem->paddr);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
+			   DCU_LAYER_EN |
+			   DCU_LAYER_TRANS(alpha) |
+			   DCU_LAYER_BPP(bpp) |
+			   DCU_LAYER_AB(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
+			   DCU_LAYER_CKMAX_R(0xFF) |
+			   DCU_LAYER_CKMAX_G(0xFF) |
+			   DCU_LAYER_CKMAX_B(0xFF));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
+			   DCU_LAYER_CKMIN_R(0) |
+			   DCU_LAYER_CKMIN_G(0) |
+			   DCU_LAYER_CKMIN_B(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
+			   DCU_LAYER_FG_FCOLOR(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
+			   DCU_LAYER_BG_BCOLOR(0));
+	if (ret)
+		goto set_failed;
+	if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
+		ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
+				   DCU_LAYER_POST_SKIP(0) |
+				   DCU_LAYER_PRE_SKIP(0));
+		if (ret)
+			goto set_failed;
+	}
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap,
+			   DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto set_failed;
+	return;
+
+set_failed:
+	dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void
+fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
+			     struct drm_framebuffer *fb,
+			     const struct drm_plane_state *new_state)
+{
+}
+
+static int
+fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
+			     struct drm_framebuffer *fb,
+			     const struct drm_plane_state *new_state)
+{
+	return 0;
+}
+
+static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
+	.atomic_check = fsl_dcu_drm_plane_atomic_check,
+	.atomic_disable = fsl_dcu_drm_plane_atomic_disable,
+	.atomic_update = fsl_dcu_drm_plane_atomic_update,
+	.cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
+	.prepare_fb = fsl_dcu_drm_plane_prepare_fb,
+};
+
+static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
+{
+	drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+	.destroy = fsl_dcu_drm_plane_destroy,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.reset = drm_atomic_helper_plane_reset,
+	.update_plane = drm_atomic_helper_update_plane,
+};
+
+static const u32 fsl_dcu_drm_plane_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_YUV422,
+};
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
+{
+	struct drm_plane *primary;
+	int ret;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (!primary) {
+		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+		return NULL;
+	}
+
+	/* possible_crtc's will be filled in later by crtc_init */
+	ret = drm_universal_plane_init(dev, primary, 0,
+				       &fsl_dcu_drm_plane_funcs,
+				       fsl_dcu_drm_plane_formats,
+				       ARRAY_SIZE(fsl_dcu_drm_plane_formats),
+				       DRM_PLANE_TYPE_PRIMARY);
+	if (ret) {
+		kfree(primary);
+		primary = NULL;
+	}
+	drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs);
+
+	return primary;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
new file mode 100644
index 0000000..d657f08
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_PLANE_H__
+#define __FSL_DCU_DRM_PLANE_H__
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
+
+#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
new file mode 100644
index 0000000..fe8ab5d
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+static int
+fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
+				 struct drm_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.atomic_check = fsl_dcu_drm_encoder_atomic_check,
+	.disable = fsl_dcu_drm_encoder_disable,
+	.enable = fsl_dcu_drm_encoder_enable,
+};
+
+static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = fsl_dcu_drm_encoder_destroy,
+};
+
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+			       struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder = &fsl_dev->encoder;
+	int ret;
+
+	encoder->possible_crtcs = 1;
+	ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+	return 0;
+}
+
+static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.destroy = fsl_dcu_drm_connector_destroy,
+	.detect = fsl_dcu_drm_connector_detect,
+	.dpms = drm_atomic_helper_connector_dpms,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_encoder *
+fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
+{
+	struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
+
+	return fsl_con->encoder;
+}
+
+static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct fsl_dcu_drm_connector *fsl_connector;
+	int (*get_modes)(struct drm_panel *panel);
+	int num_modes = 0;
+
+	fsl_connector = to_fsl_dcu_connector(connector);
+	if (fsl_connector->panel && fsl_connector->panel->funcs &&
+	    fsl_connector->panel->funcs->get_modes) {
+		get_modes = fsl_connector->panel->funcs->get_modes;
+		num_modes = get_modes(fsl_connector->panel);
+	}
+
+	return num_modes;
+}
+
+static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	if (mode->hdisplay & 0xf)
+		return MODE_ERROR;
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.best_encoder = fsl_dcu_drm_connector_best_encoder,
+	.get_modes = fsl_dcu_drm_connector_get_modes,
+	.mode_valid = fsl_dcu_drm_connector_mode_valid,
+};
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+				 struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = &fsl_dev->connector.base;
+	struct drm_mode_config mode_config = fsl_dev->drm->mode_config;
+	struct device_node *panel_node;
+	int ret;
+
+	fsl_dev->connector.encoder = encoder;
+
+	ret = drm_connector_init(fsl_dev->drm, connector,
+				 &fsl_dcu_drm_connector_funcs,
+				 DRM_MODE_CONNECTOR_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_connector_register(connector);
+	if (ret < 0)
+		goto err_cleanup;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret < 0)
+		goto err_sysfs;
+
+	drm_object_property_set_value(&connector->base,
+				      mode_config.dpms_property,
+				      DRM_MODE_DPMS_OFF);
+
+	panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+	if (panel_node) {
+		fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+		if (!fsl_dev->connector.panel) {
+			ret = -EPROBE_DEFER;
+			goto err_sysfs;
+		}
+	of_node_put(panel_node);
+	}
+
+	ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+	if (ret) {
+		dev_err(fsl_dev->dev, "failed to attach panel\n");
+		goto err_sysfs;
+	}
+
+	return 0;
+
+err_sysfs:
+	drm_connector_unregister(connector);
+err_cleanup:
+	drm_connector_cleanup(connector);
+	return ret;
+}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index de6f62a..db9f7d0 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -276,12 +276,12 @@
 		break;
 	default:
 		/* software fallback */
-		cfb_copyarea(info, a);
+		drm_fb_helper_cfb_copyarea(info, a);
 		return;
 	}
 
 	if (!gma_power_begin(dev, false)) {
-		cfb_copyarea(info, a);
+		drm_fb_helper_cfb_copyarea(info, a);
 		return;
 	}
 	psb_accel_2d_copy(dev_priv,
@@ -308,7 +308,7 @@
 	/* Avoid the 8 pixel erratum */
 	if (region->width == 8 || region->height == 8 ||
 		(info->flags & FBINFO_HWACCEL_DISABLED))
-		return cfb_copyarea(info, region);
+		return drm_fb_helper_cfb_copyarea(info, region);
 
 	psbfb_copyarea_accel(info, region);
 }
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2d42ce6..2eaf1b3 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -194,9 +194,9 @@
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
 	.fb_copyarea = psbfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_mmap = psbfb_mmap,
 	.fb_sync = psbfb_sync,
 	.fb_ioctl = psbfb_ioctl,
@@ -208,9 +208,9 @@
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = psbfb_pan,
 	.fb_mmap = psbfb_mmap,
 	.fb_ioctl = psbfb_ioctl,
@@ -222,9 +222,9 @@
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_mmap = psbfb_mmap,
 	.fb_ioctl = psbfb_ioctl,
 };
@@ -343,7 +343,6 @@
 	struct drm_framebuffer *fb;
 	struct psb_framebuffer *psbfb = &fbdev->pfb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	int size;
 	int ret;
 	struct gtt_range *backing;
@@ -409,9 +408,9 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_err1;
 	}
 	info->par = fbdev;
@@ -426,7 +425,6 @@
 	psbfb->fbdev = info;
 
 	fbdev->psb_fb_helper.fb = fb;
-	fbdev->psb_fb_helper.fbdev = info;
 
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	strcpy(info->fix.id, "psbdrmfb");
@@ -440,12 +438,6 @@
 	} else	/* Software */
 		info->fbops = &psbfb_unaccel_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
-
 	info->fix.smem_start = dev->mode_config.fb_base;
 	info->fix.smem_len = size;
 	info->fix.ywrapstep = gtt_roll;
@@ -456,11 +448,6 @@
 	info->screen_size = size;
 
 	if (dev_priv->gtt.stolen_size) {
-		info->apertures = alloc_apertures(1);
-		if (!info->apertures) {
-			ret = -ENOMEM;
-			goto out_unref;
-		}
 		info->apertures->ranges[0].base = dev->mode_config.fb_base;
 		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
 	}
@@ -483,6 +470,8 @@
 		psb_gtt_free_range(dev, backing);
 	else
 		drm_gem_object_unreference(&backing->gem);
+
+	drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
 out_err1:
 	mutex_unlock(&dev->struct_mutex);
 	psb_gtt_free_range(dev, backing);
@@ -570,16 +559,11 @@
 
 static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
 {
-	struct fb_info *info;
 	struct psb_framebuffer *psbfb = &fbdev->pfb;
 
-	if (fbdev->psb_fb_helper.fbdev) {
-		info = fbdev->psb_fb_helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+	drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
+
 	drm_fb_helper_fini(&fbdev->psb_fb_helper);
 	drm_framebuffer_unregister_private(&psbfb->base);
 	drm_framebuffer_cleanup(&psbfb->base);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 2aaa3c8..00416f2 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -54,7 +54,7 @@
 }
 
 /* ADI recommended values for proper operation. */
-static const struct reg_default adv7511_fixed_registers[] = {
+static const struct reg_sequence adv7511_fixed_registers[] = {
 	{ 0x98, 0x03 },
 	{ 0x9a, 0xe0 },
 	{ 0x9c, 0x30 },
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 74acca9..051eab3 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,30 +36,6 @@
 	  i810 driver instead, and the Atom z5xx series has an entirely
 	  different implementation.
 
-config DRM_I915_KMS
-	bool "Enable modesetting on intel by default"
-	depends on DRM_I915
-	default y
-	help
-	  Choose this option if you want kernel modesetting enabled by default.
-
-	  If in doubt, say "Y".
-
-config DRM_I915_FBDEV
-	bool "Enable legacy fbdev support for the modesetting intel driver"
-	depends on DRM_I915
-	select DRM_KMS_FB_HELPER
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	default y
-	help
-	  Choose this option if you have a need for the legacy fbdev
-	  support. Note that this support also provide the linux console
-	  support on top of the intel modesetting driver.
-
-	  If in doubt, say "Y".
-
 config DRM_I915_PRELIMINARY_HW_SUPPORT
 	bool "Enable preliminary support for prerelease Intel hardware by default"
 	depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b7ddf48..998b464 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -6,12 +6,13 @@
 
 # core driver code
 i915-y := i915_drv.o \
+	  i915_irq.o \
 	  i915_params.o \
           i915_suspend.o \
 	  i915_sysfs.o \
+	  intel_csr.o \
 	  intel_pm.o \
-	  intel_runtime_pm.o \
-	  intel_csr.o
+	  intel_runtime_pm.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -20,21 +21,22 @@
 i915-y += i915_cmd_parser.o \
 	  i915_gem_batch_pool.o \
 	  i915_gem_context.o \
-	  i915_gem_render_state.o \
 	  i915_gem_debug.o \
 	  i915_gem_dmabuf.o \
 	  i915_gem_evict.o \
 	  i915_gem_execbuffer.o \
+	  i915_gem_fence.o \
 	  i915_gem_gtt.o \
 	  i915_gem.o \
+	  i915_gem_render_state.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
 	  i915_gem_userptr.o \
 	  i915_gpu_error.o \
-	  i915_irq.o \
 	  i915_trace_points.o \
 	  intel_lrc.o \
+	  intel_mocs.o \
 	  intel_ringbuffer.o \
 	  intel_uncore.o
 
@@ -46,18 +48,21 @@
 
 # modesetting core code
 i915-y += intel_audio.o \
+	  intel_atomic.o \
+	  intel_atomic_plane.o \
 	  intel_bios.o \
 	  intel_display.o \
 	  intel_fbc.o \
 	  intel_fifo_underrun.o \
 	  intel_frontbuffer.o \
+	  intel_hotplug.o \
 	  intel_modes.o \
 	  intel_overlay.o \
 	  intel_psr.o \
 	  intel_sideband.o \
 	  intel_sprite.o
 i915-$(CONFIG_ACPI)		+= intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_I915_FBDEV)	+= intel_fbdev.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION)	+= intel_fbdev.o
 
 # modesetting output/encoder code
 i915-y += dvo_ch7017.o \
@@ -66,15 +71,13 @@
 	  dvo_ns2501.o \
 	  dvo_sil164.o \
 	  dvo_tfp410.o \
-	  intel_atomic.o \
-	  intel_atomic_plane.o \
 	  intel_crt.o \
 	  intel_ddi.o \
-	  intel_dp.o \
 	  intel_dp_mst.o \
+	  intel_dp.o \
 	  intel_dsi.o \
-	  intel_dsi_pll.o \
 	  intel_dsi_panel_vbt.o \
+	  intel_dsi_pll.o \
 	  intel_dvo.o \
 	  intel_hdmi.o \
 	  intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 89b08a8..732ce87 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -22,6 +22,7 @@
  *
  * Authors:
  *    Eric Anholt <eric@anholt.net>
+ *    Thomas Richter <thor@math.tu-berlin.de>
  *
  * Minor modifications (Dithering enable):
  *    Thomas Richter <thor@math.tu-berlin.de>
@@ -90,7 +91,7 @@
 /*
  * LCD Vertical Display Size
  */
-#define VR21	0x20
+#define VR21	0x21
 
 /*
  * Panel power down status
@@ -155,16 +156,33 @@
 # define VR8F_POWER_MASK		(0x3c)
 # define VR8F_POWER_POS			(2)
 
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const uint16_t backup_addresses[] = {
+	0x11, 0x12,
+	0x18, 0x19, 0x1a, 0x1f,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x8e, 0x8f,
+	0x10		/* this must come last */
+};
+
 
 struct ivch_priv {
 	bool quiet;
 
 	uint16_t width, height;
+
+	/* Register backup */
+
+	uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
 };
 
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo);
-
 /**
  * Reads a register on the ivch.
  *
@@ -246,6 +264,7 @@
 {
 	struct ivch_priv *priv;
 	uint16_t temp;
+	int i;
 
 	priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
 	if (priv == NULL)
@@ -273,6 +292,14 @@
 	ivch_read(dvo, VR20, &priv->width);
 	ivch_read(dvo, VR21, &priv->height);
 
+	/* Make a backup of the registers to be able to restore them
+	 * upon suspend.
+	 */
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+	ivch_dump_regs(dvo);
+
 	return true;
 
 out:
@@ -294,12 +321,31 @@
 	return MODE_OK;
 }
 
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	int i;
+
+	DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+	ivch_write(dvo, VR10, 0x0000);
+
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
 /** Sets the power state of the panel connected to the ivch */
 static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	int i;
 	uint16_t vr01, vr30, backlight;
 
+	ivch_reset(dvo);
+
 	/* Set the new power state of the panel. */
 	if (!ivch_read(dvo, VR01, &vr01))
 		return;
@@ -308,6 +354,7 @@
 		backlight = 1;
 	else
 		backlight = 0;
+
 	ivch_write(dvo, VR80, backlight);
 
 	if (enable)
@@ -334,6 +381,8 @@
 {
 	uint16_t vr01;
 
+	ivch_reset(dvo);
+
 	/* Set the new power state of the panel. */
 	if (!ivch_read(dvo, VR01, &vr01))
 		return false;
@@ -348,11 +397,15 @@
 			  struct drm_display_mode *mode,
 			  struct drm_display_mode *adjusted_mode)
 {
+	struct ivch_priv *priv = dvo->dev_priv;
 	uint16_t vr40 = 0;
 	uint16_t vr01 = 0;
 	uint16_t vr10;
 
-	ivch_read(dvo, VR10, &vr10);
+	ivch_reset(dvo);
+
+	vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
 	/* Enable dithering for 18 bpp pipelines */
 	vr10 &= VR10_INTERFACE_DEPTH_MASK;
 	if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
@@ -366,7 +419,7 @@
 		uint16_t x_ratio, y_ratio;
 
 		vr01 |= VR01_PANEL_FIT_ENABLE;
-		vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
+		vr40 |= VR40_CLOCK_GATING_ENABLE;
 		x_ratio = (((mode->hdisplay - 1) << 16) /
 			   (adjusted_mode->hdisplay - 1)) >> 2;
 		y_ratio = (((mode->vdisplay - 1) << 16) /
@@ -381,8 +434,6 @@
 
 	ivch_write(dvo, VR01, vr01);
 	ivch_write(dvo, VR40, vr40);
-
-	ivch_dump_regs(dvo);
 }
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 306d9e4..237ff68 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -131,7 +131,7 @@
 			.mask = MI_GLOBAL_GTT,
 			.expected = 0,
 	      }},						       ),
-	CMD(  MI_LOAD_REGISTER_MEM,             SMI,   !F,  0xFF,   W | B,
+	CMD(  MI_LOAD_REGISTER_MEM(1),             SMI,   !F,  0xFF,   W | B,
 	      .reg = { .offset = 1, .mask = 0x007FFFFC },
 	      .bits = {{
 			.offset = 0,
@@ -151,8 +151,8 @@
 	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
 	CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
 	CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
-	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
 	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
@@ -564,7 +564,7 @@
 
 		for (j = 0; j < table->count; j++) {
 			const struct drm_i915_cmd_descriptor *desc =
-				&table->table[i];
+				&table->table[j];
 			u32 curr = desc->cmd.value & desc->cmd.mask;
 
 			if (curr < previous) {
@@ -1021,7 +1021,7 @@
 			 * only MI_LOAD_REGISTER_IMM commands.
 			 */
 			if (reg_addr == OACONTROL) {
-				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
 					return false;
 				}
@@ -1035,7 +1035,7 @@
 			 * allowed mask/value pair given in the whitelist entry.
 			 */
 			if (reg->mask) {
-				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
 							 reg_addr);
 					return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 82bbe3f..e3ec904 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,6 +117,20 @@
 	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 }
 
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+	u64 size = 0;
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    drm_mm_node_allocated(&vma->node))
+			size += vma->node.size;
+	}
+
+	return size;
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
@@ -156,13 +170,13 @@
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (!i915_is_ggtt(vma->vm))
-			seq_puts(m, " (pp");
+		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+			   i915_is_ggtt(vma->vm) ? "g" : "pp",
+			   vma->node.start, vma->node.size);
+		if (i915_is_ggtt(vma->vm))
+			seq_printf(m, ", type: %u)", vma->ggtt_view.type);
 		else
-			seq_puts(m, " (g");
-		seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
-			   vma->node.start, vma->node.size,
-			   vma->ggtt_view.type);
+			seq_puts(m, ")");
 	}
 	if (obj->stolen)
 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -198,7 +212,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct i915_vma *vma;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	int count, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -231,7 +245,7 @@
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 	return 0;
 }
@@ -253,7 +267,7 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	LIST_HEAD(stolen);
 	int count, ret;
 
@@ -269,7 +283,7 @@
 		list_add(&obj->obj_exec_link, &stolen);
 
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_ggtt_size(obj);
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -292,14 +306,14 @@
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 	return 0;
 }
 
 #define count_objects(list, member) do { \
 	list_for_each_entry(obj, list, member) { \
-		size += i915_gem_obj_ggtt_size(obj); \
+		size += i915_gem_obj_total_ggtt_size(obj); \
 		++count; \
 		if (obj->map_and_fenceable) { \
 			mappable_size += i915_gem_obj_ggtt_size(obj); \
@@ -310,10 +324,10 @@
 
 struct file_stats {
 	struct drm_i915_file_private *file_priv;
-	int count;
-	size_t total, unbound;
-	size_t global, shared;
-	size_t active, inactive;
+	unsigned long count;
+	u64 total, unbound;
+	u64 global, shared;
+	u64 active, inactive;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
@@ -370,7 +384,7 @@
 
 #define print_file_stats(m, name, stats) do { \
 	if (stats.count) \
-		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
 			   name, \
 			   stats.count, \
 			   stats.total, \
@@ -405,7 +419,7 @@
 
 #define count_vmas(list, member) do { \
 	list_for_each_entry(vma, list, member) { \
-		size += i915_gem_obj_ggtt_size(vma->obj); \
+		size += i915_gem_obj_total_ggtt_size(vma->obj); \
 		++count; \
 		if (vma->obj->map_and_fenceable) { \
 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -420,7 +434,7 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 count, mappable_count, purgeable_count;
-	size_t size, mappable_size, purgeable_size;
+	u64 size, mappable_size, purgeable_size;
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct drm_file *file;
@@ -437,17 +451,17 @@
 
 	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.bound_list, global_list);
-	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_vmas(&vm->active_list, mm_list);
-	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
+	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_vmas(&vm->inactive_list, mm_list);
-	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
+	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = purgeable_size = purgeable_count = 0;
@@ -456,7 +470,7 @@
 		if (obj->madv == I915_MADV_DONTNEED)
 			purgeable_size += obj->base.size, ++purgeable_count;
 	}
-	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
 	size = count = mappable_size = mappable_count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -473,16 +487,16 @@
 			++purgeable_count;
 		}
 	}
-	seq_printf(m, "%u purgeable objects, %zu bytes\n",
+	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 		   purgeable_count, purgeable_size);
-	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
 		   mappable_count, mappable_size);
-	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
 		   count, size);
 
-	seq_printf(m, "%zu [%lu] gtt total\n",
+	seq_printf(m, "%llu [%llu] gtt total\n",
 		   dev_priv->gtt.base.total,
-		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
 	seq_putc(m, '\n');
 	print_batch_pool_stats(m, dev_priv);
@@ -519,7 +533,7 @@
 	uintptr_t list = (uintptr_t) node->info_ent->data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	int count, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -535,13 +549,13 @@
 		describe_obj(m, obj);
 		seq_putc(m, '\n');
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_ggtt_size(obj);
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 
 	return 0;
@@ -1132,9 +1146,9 @@
 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
 		   IS_BROADWELL(dev) || IS_GEN9(dev)) {
-		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
-		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		u32 rp_state_limits;
+		u32 gt_perf_status;
+		u32 rp_state_cap;
 		u32 rpmodectl, rpinclimit, rpdeclimit;
 		u32 rpstat, cagf, reqf;
 		u32 rpupei, rpcurup, rpprevup;
@@ -1142,6 +1156,15 @@
 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
 		int max_freq;
 
+		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		if (IS_BROXTON(dev)) {
+			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+		} else {
+			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+		}
+
 		/* RPSTAT1 is in the GT power well */
 		ret = mutex_lock_interruptible(&dev->struct_mutex);
 		if (ret)
@@ -1229,7 +1252,8 @@
 		seq_printf(m, "Down threshold: %d%%\n",
 			   dev_priv->rps.down_threshold);
 
-		max_freq = (rp_state_cap & 0xff0000) >> 16;
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+			    rp_state_cap >> 16) & 0xff;
 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1239,7 +1263,8 @@
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
-		max_freq = rp_state_cap & 0xff;
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+			    rp_state_cap >> 0) & 0xff;
 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1581,6 +1606,21 @@
 		return ironlake_drpc_info(m);
 }
 
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+		   dev_priv->fb_tracking.busy_bits);
+
+	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+		   dev_priv->fb_tracking.flip_bits);
+
+	return 0;
+}
+
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
@@ -1593,51 +1633,20 @@
 	}
 
 	intel_runtime_pm_get(dev_priv);
+	mutex_lock(&dev_priv->fbc.lock);
 
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv))
 		seq_puts(m, "FBC enabled\n");
-	} else {
-		seq_puts(m, "FBC disabled: ");
-		switch (dev_priv->fbc.no_fbc_reason) {
-		case FBC_OK:
-			seq_puts(m, "FBC actived, but currently disabled in hardware");
-			break;
-		case FBC_UNSUPPORTED:
-			seq_puts(m, "unsupported by this chipset");
-			break;
-		case FBC_NO_OUTPUT:
-			seq_puts(m, "no outputs");
-			break;
-		case FBC_STOLEN_TOO_SMALL:
-			seq_puts(m, "not enough stolen memory");
-			break;
-		case FBC_UNSUPPORTED_MODE:
-			seq_puts(m, "mode not supported");
-			break;
-		case FBC_MODE_TOO_LARGE:
-			seq_puts(m, "mode too large");
-			break;
-		case FBC_BAD_PLANE:
-			seq_puts(m, "FBC unsupported on plane");
-			break;
-		case FBC_NOT_TILED:
-			seq_puts(m, "scanout buffer not tiled");
-			break;
-		case FBC_MULTIPLE_PIPES:
-			seq_puts(m, "multiple pipes are enabled");
-			break;
-		case FBC_MODULE_PARAM:
-			seq_puts(m, "disabled per module param (default off)");
-			break;
-		case FBC_CHIP_DEFAULT:
-			seq_puts(m, "disabled per chip default");
-			break;
-		default:
-			seq_puts(m, "unknown reason");
-		}
-		seq_putc(m, '\n');
-	}
+	else
+		seq_printf(m, "FBC disabled: %s\n",
+			  intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
 
+	if (INTEL_INFO(dev_priv)->gen >= 7)
+		seq_printf(m, "Compressing: %s\n",
+			   yesno(I915_READ(FBC_STATUS2) &
+				 FBC_COMPRESSION_MASK));
+
+	mutex_unlock(&dev_priv->fbc.lock);
 	intel_runtime_pm_put(dev_priv);
 
 	return 0;
@@ -1651,9 +1660,7 @@
 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
 		return -ENODEV;
 
-	drm_modeset_lock_all(dev);
 	*val = dev_priv->fbc.false_color;
-	drm_modeset_unlock_all(dev);
 
 	return 0;
 }
@@ -1667,7 +1674,7 @@
 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
 		return -ENODEV;
 
-	drm_modeset_lock_all(dev);
+	mutex_lock(&dev_priv->fbc.lock);
 
 	reg = I915_READ(ILK_DPFC_CONTROL);
 	dev_priv->fbc.false_color = val;
@@ -1676,7 +1683,7 @@
 		   (reg | FBC_CTL_FALSE_COLOR) :
 		   (reg & ~FBC_CTL_FALSE_COLOR));
 
-	drm_modeset_unlock_all(dev);
+	mutex_unlock(&dev_priv->fbc.lock);
 	return 0;
 }
 
@@ -1778,8 +1785,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret = 0;
 	int gpu_freq, ia_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+	if (!HAS_CORE_RING_FREQ(dev)) {
 		seq_puts(m, "unsupported on this chipset\n");
 		return 0;
 	}
@@ -1792,17 +1800,27 @@
 	if (ret)
 		goto out;
 
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq =
+			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+		max_gpu_freq =
+			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+	}
+
 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->rps.min_freq_softlimit;
-	     gpu_freq <= dev_priv->rps.max_freq_softlimit;
-	     gpu_freq++) {
+	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
 		ia_freq = gpu_freq;
 		sandybridge_pcode_read(dev_priv,
 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
 				       &ia_freq);
 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-			   intel_gpu_freq(dev_priv, gpu_freq),
+			   intel_gpu_freq(dev_priv, (gpu_freq *
+				(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
 			   ((ia_freq >> 0) & 0xff) * 100,
 			   ((ia_freq >> 8) & 0xff) * 100);
 	}
@@ -1848,8 +1866,9 @@
 	struct drm_device *dev = node->minor->dev;
 	struct intel_fbdev *ifbdev = NULL;
 	struct intel_framebuffer *fb;
+	struct drm_framebuffer *drm_fb;
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ifbdev = dev_priv->fbdev;
@@ -1867,7 +1886,8 @@
 #endif
 
 	mutex_lock(&dev->mode_config.fb_lock);
-	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+	drm_for_each_fb(drm_fb, dev) {
+		fb = to_intel_framebuffer(drm_fb);
 		if (ifbdev && &fb->base == ifbdev->helper.fb)
 			continue;
 
@@ -2248,7 +2268,7 @@
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
 		seq_puts(m, "aliasing PPGTT:\n");
-		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
 
 		ppgtt->debug_dump(ppgtt, m);
 	}
@@ -2479,13 +2499,13 @@
 	return 0;
 }
 
-static int i915_pc8_status(struct seq_file *m, void *unused)
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+	if (!HAS_RUNTIME_PM(dev)) {
 		seq_puts(m, "not supported\n");
 		return 0;
 	}
@@ -2493,6 +2513,12 @@
 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
 	seq_printf(m, "IRQs disabled: %s\n",
 		   yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+	seq_printf(m, "Usage count: %d\n",
+		   atomic_read(&dev->dev->power.usage_count));
+#else
+	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
 
 	return 0;
 }
@@ -2536,6 +2562,8 @@
 		return "PORT_DDI_D_2_LANES";
 	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
 		return "PORT_DDI_D_4_LANES";
+	case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+		return "PORT_DDI_E_2_LANES";
 	case POWER_DOMAIN_PORT_DSI:
 		return "PORT_DSI";
 	case POWER_DOMAIN_PORT_CRT:
@@ -2780,13 +2808,16 @@
 	seq_printf(m, "---------\n");
 	for_each_intel_crtc(dev, crtc) {
 		bool active;
+		struct intel_crtc_state *pipe_config;
 		int x, y;
 
+		pipe_config = to_intel_crtc_state(crtc->base.state);
+
 		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
 			   crtc->base.base.id, pipe_name(crtc->pipe),
-			   yesno(crtc->active), crtc->config->pipe_src_w,
-			   crtc->config->pipe_src_h);
-		if (crtc->active) {
+			   yesno(pipe_config->base.active),
+			   pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+		if (pipe_config->base.active) {
 			intel_crtc_info(m, crtc);
 
 			active = cursor_position(dev, crtc->pipe, &x, &y);
@@ -3027,7 +3058,7 @@
 
 	seq_puts(m, "\n\n");
 
-	if (intel_crtc->config->has_drrs) {
+	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
 		struct intel_panel *panel;
 
 		mutex_lock(&drrs->mutex);
@@ -3079,7 +3110,7 @@
 	for_each_intel_crtc(dev, intel_crtc) {
 		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
 
-		if (intel_crtc->active) {
+		if (intel_crtc->base.state->active) {
 			active_crtc_cnt++;
 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
 
@@ -3616,53 +3647,40 @@
 	return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+	struct intel_crtc_state *pipe_config;
+	struct drm_atomic_state *state;
+	int ret = 0;
 
 	drm_modeset_lock_all(dev);
-	/*
-	 * If we use the eDP transcoder we need to make sure that we don't
-	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-	 * relevant on hsw with pipe A when using the always-on power well
-	 * routing.
-	 */
-	if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
-	    !crtc->config->pch_pfit.enabled) {
-		crtc->config->pch_pfit.force_thru = true;
-
-		intel_display_power_get(dev_priv,
-					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-
-		intel_crtc_reset(crtc);
+	state = drm_atomic_state_alloc(dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
 	}
-	drm_modeset_unlock_all(dev);
-}
 
-static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc =
-		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
-
-	drm_modeset_lock_all(dev);
-	/*
-	 * If we use the eDP transcoder we need to make sure that we don't
-	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-	 * relevant on hsw with pipe A when using the always-on power well
-	 * routing.
-	 */
-	if (crtc->config->pch_pfit.force_thru) {
-		crtc->config->pch_pfit.force_thru = false;
-
-		intel_crtc_reset(crtc);
-
-		intel_display_power_put(dev_priv,
-					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+	pipe_config = intel_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(pipe_config)) {
+		ret = PTR_ERR(pipe_config);
+		goto out;
 	}
+
+	pipe_config->pch_pfit.force_thru = enable;
+	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+	    pipe_config->pch_pfit.enabled != enable)
+		pipe_config->base.connectors_changed = true;
+
+	ret = drm_atomic_commit(state);
+out:
 	drm_modeset_unlock_all(dev);
+	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+	if (ret)
+		drm_atomic_state_free(state);
 }
 
 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
@@ -3682,7 +3700,7 @@
 		break;
 	case INTEL_PIPE_CRC_SOURCE_PF:
 		if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_trans_edp_pipe_A_crc_wa(dev);
+			hsw_trans_edp_pipe_A_crc_wa(dev, true);
 
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
 		break;
@@ -3776,7 +3794,7 @@
 				 pipe_name(pipe));
 
 		drm_modeset_lock(&crtc->base.mutex, NULL);
-		if (crtc->active)
+		if (crtc->base.state->active)
 			intel_wait_for_vblank(dev, pipe);
 		drm_modeset_unlock(&crtc->base.mutex);
 
@@ -3794,7 +3812,7 @@
 		else if (IS_VALLEYVIEW(dev))
 			vlv_undo_pipe_scramble_reset(dev, pipe);
 		else if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+			hsw_trans_edp_pipe_A_crc_wa(dev, false);
 
 		hsw_enable_ips(crtc);
 	}
@@ -3980,24 +3998,14 @@
 {
 	char *input_buffer;
 	int status = 0;
-	struct seq_file *m;
 	struct drm_device *dev;
 	struct drm_connector *connector;
 	struct list_head *connector_list;
 	struct intel_dp *intel_dp;
 	int val = 0;
 
-	m = file->private_data;
-	if (!m) {
-		status = -ENODEV;
-		return status;
-	}
-	dev = m->private;
+	dev = ((struct seq_file *)file->private_data)->private;
 
-	if (!dev) {
-		status = -ENODEV;
-		return status;
-	}
 	connector_list = &dev->mode_config.connector_list;
 
 	if (len == 0)
@@ -4021,9 +4029,7 @@
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
 
-		if (connector->connector_type ==
-		    DRM_MODE_CONNECTOR_DisplayPort &&
-		    connector->status == connector_status_connected &&
+		if (connector->status == connector_status_connected &&
 		    connector->encoder != NULL) {
 			intel_dp = enc_to_intel_dp(connector->encoder);
 			status = kstrtoint(input_buffer, 10, &val);
@@ -4055,9 +4061,6 @@
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4102,9 +4105,6 @@
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4144,9 +4144,6 @@
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4183,8 +4180,15 @@
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
 	struct drm_device *dev = m->private;
-	int num_levels = ilk_wm_max_level(dev) + 1;
 	int level;
+	int num_levels;
+
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
 
 	drm_modeset_lock_all(dev);
 
@@ -4193,9 +4197,9 @@
 
 		/*
 		 * - WM1+ latency values in 0.5us units
-		 * - latencies are in us on gen9
+		 * - latencies are in us on gen9/vlv/chv
 		 */
-		if (INTEL_INFO(dev)->gen >= 9)
+		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
 			latency *= 10;
 		else if (level > 0)
 			latency *= 5;
@@ -4259,7 +4263,7 @@
 {
 	struct drm_device *dev = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (INTEL_INFO(dev)->gen < 5)
 		return -ENODEV;
 
 	return single_open(file, pri_wm_latency_show, dev);
@@ -4291,11 +4295,18 @@
 	struct seq_file *m = file->private_data;
 	struct drm_device *dev = m->private;
 	uint16_t new[8] = { 0 };
-	int num_levels = ilk_wm_max_level(dev) + 1;
+	int num_levels;
 	int level;
 	int ret;
 	char tmp[32];
 
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
+
 	if (len >= sizeof(tmp))
 		return -EINVAL;
 
@@ -5027,6 +5038,7 @@
 	{"i915_drpc_info", i915_drpc_info, 0},
 	{"i915_emon_status", i915_emon_status, 0},
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
+	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
 	{"i915_ips_status", i915_ips_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
@@ -5042,7 +5054,7 @@
 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
 	{"i915_energy_uJ", i915_energy_uJ, 0},
-	{"i915_pc8_status", i915_pc8_status, 0},
+	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
 	{"i915_power_domain_info", i915_power_domain_info, 0},
 	{"i915_display_info", i915_display_info, 0},
 	{"i915_semaphore_status", i915_semaphore_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d2df321..ab37d11 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -163,6 +163,13 @@
 		if (!value)
 			return -ENODEV;
 		break;
+	case I915_PARAM_HAS_GPU_RESET:
+		value = i915.enable_hangcheck &&
+			intel_has_gpu_reset(dev);
+		break;
+	case I915_PARAM_HAS_RESOURCE_STREAMER:
+		value = HAS_RESOURCE_STREAMER(dev);
+		break;
 	default:
 		DRM_DEBUG("Unknown parameter %d\n", param->param);
 		return -EINVAL;
@@ -719,11 +726,19 @@
 
 	info = (struct intel_device_info *)&dev_priv->info;
 
+	/*
+	 * Skylake and Broxton currently don't expose the topmost plane as its
+	 * use is exclusive with the legacy cursor and we only want to expose
+	 * one of those, not both. Until we can safely expose the topmost plane
+	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+	 * we don't expose the topmost plane at all to prevent ABI breakage
+	 * down the line.
+	 */
 	if (IS_BROXTON(dev)) {
-		info->num_sprites[PIPE_A] = 3;
-		info->num_sprites[PIPE_B] = 3;
-		info->num_sprites[PIPE_C] = 2;
-	} else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+		info->num_sprites[PIPE_A] = 2;
+		info->num_sprites[PIPE_B] = 2;
+		info->num_sprites[PIPE_C] = 1;
+	} else if (IS_VALLEYVIEW(dev))
 		for_each_pipe(dev_priv, pipe)
 			info->num_sprites[pipe] = 2;
 	else
@@ -933,8 +948,8 @@
 		goto out_mtrrfree;
 	}
 
-	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-	if (dev_priv->dp_wq == NULL) {
+	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+	if (dev_priv->hotplug.dp_wq == NULL) {
 		DRM_ERROR("Failed to create our dp workqueue.\n");
 		ret = -ENOMEM;
 		goto out_freewq;
@@ -1029,7 +1044,7 @@
 	pm_qos_remove_request(&dev_priv->pm_qos);
 	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 out_freedpwq:
-	destroy_workqueue(dev_priv->dp_wq);
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
 out_freewq:
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
@@ -1116,6 +1131,7 @@
 	i915_gem_cleanup_ringbuffer(dev);
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
+	intel_fbc_cleanup_cfb(dev_priv);
 	i915_gem_cleanup_stolen(dev);
 
 	intel_csr_ucode_fini(dev);
@@ -1123,7 +1139,7 @@
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
 
-	destroy_workqueue(dev_priv->dp_wq);
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
 	destroy_workqueue(dev_priv->wq);
 	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 	pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1258,13 +1274,3 @@
 };
 
 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
-/*
- * This is really ugly: Because old userspace abused the linux agp interface to
- * manage the gtt, we need to claim that all intel devices are agp.  For
- * otherwise the drm core refuses to initialize the agp support code.
- */
-int i915_driver_device_is_agp(struct drm_device *dev)
-{
-	return 1;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 884b4f9..ab64d68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,7 +356,6 @@
 };
 
 static const struct intel_device_info intel_skylake_info = {
-	.is_preliminary = 1,
 	.is_skylake = 1,
 	.gen = 9, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
@@ -369,7 +368,6 @@
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
-	.is_preliminary = 1,
 	.is_skylake = 1,
 	.gen = 9, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
@@ -440,9 +438,7 @@
 	{0, 0, 0}
 };
 
-#if defined(CONFIG_DRM_I915_KMS)
 MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
 
 void intel_detect_pch(struct drm_device *dev)
 {
@@ -541,21 +537,6 @@
 	return true;
 }
 
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
-{
-	spin_lock_irq(&dev_priv->irq_lock);
-
-	dev_priv->long_hpd_port_mask = 0;
-	dev_priv->short_hpd_port_mask = 0;
-	dev_priv->hpd_event_bits = 0;
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	cancel_work_sync(&dev_priv->dig_port_work);
-	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
-}
-
 void i915_firmware_load_error_print(const char *fw_path, int err)
 {
 	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
@@ -601,7 +582,6 @@
 static int i915_drm_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
 	pci_power_t opregion_target_state;
 	int error;
 
@@ -632,8 +612,7 @@
 	 * for _thaw. Also, power gate the CRTC power wells.
 	 */
 	drm_modeset_lock_all(dev);
-	for_each_crtc(dev, crtc)
-		intel_crtc_control(crtc, false);
+	intel_display_suspend(dev);
 	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_suspend(dev);
@@ -683,15 +662,18 @@
 
 	pci_disable_device(drm_dev->pdev);
 	/*
-	 * During hibernation on some GEN4 platforms the BIOS may try to access
+	 * During hibernation on some platforms the BIOS may try to access
 	 * the device even though it's already in D3 and hang the machine. So
 	 * leave the device in D0 on those platforms and hope the BIOS will
-	 * power down the device properly. Platforms where this was seen:
-	 * Lenovo Thinkpad X301, X61s
+	 * power down the device properly. The issue was seen on multiple old
+	 * GENs with different BIOS vendors, so having an explicit blacklist
+	 * is inpractical; apply the workaround on everything pre GEN6. The
+	 * platforms where the issue was seen:
+	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
+	 * Fujitsu FSC S7110
+	 * Acer Aspire 1830T
 	 */
-	if (!(hibernation &&
-	      drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
-	      INTEL_INFO(dev_priv)->gen == 4))
+	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
 		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
 	return 0;
@@ -748,7 +730,7 @@
 	mutex_lock(&dev->struct_mutex);
 	if (i915_gem_init_hw(dev)) {
 		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
@@ -760,7 +742,7 @@
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev, true);
+	intel_display_resume(dev);
 	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_resume(dev);
@@ -865,9 +847,6 @@
 	bool simulated;
 	int ret;
 
-	if (!i915.reset)
-		return 0;
-
 	intel_reset_gt_powersave(dev);
 
 	mutex_lock(&dev->struct_mutex);
@@ -959,8 +938,6 @@
 	if (PCI_FUNC(pdev->devfn))
 		return -ENODEV;
 
-	driver.driver_features &= ~(DRIVER_USE_AGP);
-
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -1515,7 +1492,15 @@
 	 * FIXME: We really should find a document that references the arguments
 	 * used below!
 	 */
-	if (IS_HASWELL(dev)) {
+	if (IS_BROADWELL(dev)) {
+		/*
+		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+		 * being detected, and the call we do at intel_runtime_resume()
+		 * won't be able to restore them. Since PCI_D3hot matches the
+		 * actual specification and appears to be working, use it.
+		 */
+		intel_opregion_notify_adapter(dev, PCI_D3hot);
+	} else {
 		/*
 		 * current versions of firmware which depend on this opregion
 		 * notification have repurposed the D1 definition to mean
@@ -1524,16 +1509,6 @@
 		 * the suspend path.
 		 */
 		intel_opregion_notify_adapter(dev, PCI_D1);
-	} else {
-		/*
-		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-		 * being detected, and the call we do at intel_runtime_resume()
-		 * won't be able to restore them. Since PCI_D3hot matches the
-		 * actual specification and appears to be working, use it. Let's
-		 * assume the other non-Haswell platforms will stay the same as
-		 * Broadwell.
-		 */
-		intel_opregion_notify_adapter(dev, PCI_D3hot);
 	}
 
 	assert_forcewakes_inactive(dev_priv);
@@ -1673,7 +1648,6 @@
 	 * deal with them for Intel hardware.
 	 */
 	.driver_features =
-	    DRIVER_USE_AGP |
 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
 	    DRIVER_RENDER,
 	.load = i915_driver_load,
@@ -1688,7 +1662,6 @@
 	.suspend = i915_suspend_legacy,
 	.resume = i915_resume_legacy,
 
-	.device_is_agp = i915_driver_device_is_agp,
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = i915_debugfs_init,
 	.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1727,20 +1700,14 @@
 	driver.num_ioctls = i915_max_ioctl;
 
 	/*
-	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
-	 * explicitly disabled with the module pararmeter.
-	 *
-	 * Otherwise, just follow the parameter (defaulting to off).
-	 *
-	 * Allow optional vga_text_mode_force boot option to override
-	 * the default behavior.
+	 * Enable KMS by default, unless explicitly overriden by
+	 * either the i915.modeset prarameter or by the
+	 * vga_text_mode_force boot option.
 	 */
-#if defined(CONFIG_DRM_I915_KMS)
-	if (i915.modeset != 0)
-		driver.driver_features |= DRIVER_MODESET;
-#endif
-	if (i915.modeset == 1)
-		driver.driver_features |= DRIVER_MODESET;
+	driver.driver_features |= DRIVER_MODESET;
+
+	if (i915.modeset == 0)
+		driver.driver_features &= ~DRIVER_MODESET;
 
 #ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && i915.modeset == -1)
@@ -1759,7 +1726,7 @@
 	 * to the atomic ioctl and the atomic properties.  Only plane operations on
 	 * a single CRTC will actually work.
 	 */
-	if (i915.nuclear_pageflip)
+	if (driver.driver_features & DRIVER_MODESET)
 		driver.driver_features |= DRIVER_ATOMIC;
 
 	return drm_pci_init(&driver, &i915_pci_driver);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fd1de45..81adf89 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20150522"
+#define DRIVER_DATE		"20150731"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -182,6 +182,7 @@
 	POWER_DOMAIN_PORT_DDI_C_4_LANES,
 	POWER_DOMAIN_PORT_DDI_D_2_LANES,
 	POWER_DOMAIN_PORT_DDI_D_4_LANES,
+	POWER_DOMAIN_PORT_DDI_E_2_LANES,
 	POWER_DOMAIN_PORT_DSI,
 	POWER_DOMAIN_PORT_CRT,
 	POWER_DOMAIN_PORT_OTHER,
@@ -206,17 +207,51 @@
 
 enum hpd_pin {
 	HPD_NONE = 0,
-	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
 	HPD_CRT,
 	HPD_SDVO_B,
 	HPD_SDVO_C,
+	HPD_PORT_A,
 	HPD_PORT_B,
 	HPD_PORT_C,
 	HPD_PORT_D,
+	HPD_PORT_E,
 	HPD_NUM_PINS
 };
 
+#define for_each_hpd_pin(__pin) \
+	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+struct i915_hotplug {
+	struct work_struct hotplug_work;
+
+	struct {
+		unsigned long last_jiffies;
+		int count;
+		enum {
+			HPD_ENABLED = 0,
+			HPD_DISABLED = 1,
+			HPD_MARK_DISABLED = 2
+		} state;
+	} stats[HPD_NUM_PINS];
+	u32 event_bits;
+	struct delayed_work reenable_work;
+
+	struct intel_digital_port *irq_port[I915_MAX_PORTS];
+	u32 long_port_mask;
+	u32 short_port_mask;
+	struct work_struct dig_port_work;
+
+	/*
+	 * if we get a HPD irq from DP and a HPD irq from non-DP
+	 * the non-DP HPD could block the workqueue on a mode config
+	 * mutex getting, that userspace may have taken. However
+	 * userspace is waiting on the DP workqueue to run which is
+	 * blocked behind the non-DP one.
+	 */
+	struct workqueue_struct *dp_wq;
+};
+
 #define I915_GEM_GPU_DOMAINS \
 	(I915_GEM_DOMAIN_RENDER | \
 	 I915_GEM_DOMAIN_SAMPLER | \
@@ -243,6 +278,12 @@
 			    &dev->mode_config.plane_list,	\
 			    base.head)
 
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)	\
+	list_for_each_entry(intel_plane,				\
+			    &(dev)->mode_config.plane_list,		\
+			    base.head)					\
+		if ((intel_plane)->pipe == (intel_crtc)->pipe)
+
 #define for_each_intel_crtc(dev, intel_crtc) \
 	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
@@ -333,7 +374,8 @@
 	uint32_t cfgcr1, cfgcr2;
 
 	/* bxt */
-	uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
+	uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+		 pcsdw12;
 };
 
 struct intel_shared_dpll_config {
@@ -343,7 +385,6 @@
 
 struct intel_shared_dpll {
 	struct intel_shared_dpll_config config;
-	struct intel_shared_dpll_config *new_config;
 
 	int active; /* count of number of active CRTCs (i.e. DPMS on) */
 	bool on; /* is the PLL actually active? Disabled during modeset */
@@ -445,6 +486,7 @@
 	struct timeval time;
 
 	char error_msg[128];
+	int iommu;
 	u32 reset_count;
 	u32 suspend_count;
 
@@ -559,9 +601,6 @@
 struct dpll;
 
 struct drm_i915_display_funcs {
-	bool (*fbc_enabled)(struct drm_device *dev);
-	void (*enable_fbc)(struct drm_crtc *crtc);
-	void (*disable_fbc)(struct drm_device *dev);
 	int (*get_display_clock_speed)(struct drm_device *dev);
 	int (*get_fifo_size)(struct drm_device *dev, int plane);
 	/**
@@ -587,7 +626,8 @@
 				 struct drm_crtc *crtc,
 				 uint32_t sprite_width, uint32_t sprite_height,
 				 int pixel_size, bool enable, bool scaled);
-	void (*modeset_global_resources)(struct drm_atomic_state *state);
+	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
 	/* Returns the active state of the crtc, and if the crtc is active,
 	 * fills out the pipe-config with the hw state. */
 	bool (*get_pipe_config)(struct intel_crtc *,
@@ -598,7 +638,6 @@
 				  struct intel_crtc_state *crtc_state);
 	void (*crtc_enable)(struct drm_crtc *crtc);
 	void (*crtc_disable)(struct drm_crtc *crtc);
-	void (*off)(struct drm_crtc *crtc);
 	void (*audio_codec_enable)(struct drm_connector *connector,
 				   struct intel_encoder *encoder,
 				   struct drm_display_mode *mode);
@@ -608,7 +647,7 @@
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
 			  struct drm_framebuffer *fb,
 			  struct drm_i915_gem_object *obj,
-			  struct intel_engine_cs *ring,
+			  struct drm_i915_gem_request *req,
 			  uint32_t flags);
 	void (*update_primary_plane)(struct drm_crtc *crtc,
 				     struct drm_framebuffer *fb,
@@ -706,7 +745,7 @@
 
 struct intel_csr {
 	const char *fw_path;
-	__be32 *dmc_payload;
+	uint32_t *dmc_payload;
 	uint32_t dmc_fw_size;
 	uint32_t mmio_count;
 	uint32_t mmioaddr[8];
@@ -805,11 +844,15 @@
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0
+
+#define CONTEXT_NO_ZEROMAP (1<<0)
 /**
  * struct intel_context - as the name implies, represents a context.
  * @ref: reference count.
  * @user_handle: userspace tracking identity for this context.
  * @remap_slice: l3 row remapping information.
+ * @flags: context specific flags:
+ *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
  * @file_priv: filp associated with this context (NULL for global default
  *	       context).
  * @hang_stats: information about the role of this context in possible GPU
@@ -827,6 +870,7 @@
 	int user_handle;
 	uint8_t remap_slice;
 	struct drm_i915_private *i915;
+	int flags;
 	struct drm_i915_file_private *file_priv;
 	struct i915_ctx_hang_stats hang_stats;
 	struct i915_hw_ppgtt *ppgtt;
@@ -853,9 +897,13 @@
 	ORIGIN_CPU,
 	ORIGIN_CS,
 	ORIGIN_FLIP,
+	ORIGIN_DIRTYFB,
 };
 
 struct i915_fbc {
+	/* This is always the inner lock when overlapping with struct_mutex and
+	 * it's the outer lock when overlapping with stolen_lock. */
+	struct mutex lock;
 	unsigned long uncompressed_size;
 	unsigned threshold;
 	unsigned int fb_id;
@@ -875,7 +923,7 @@
 
 	struct intel_fbc_work {
 		struct delayed_work work;
-		struct drm_crtc *crtc;
+		struct intel_crtc *crtc;
 		struct drm_framebuffer *fb;
 	} *fbc_work;
 
@@ -891,7 +939,13 @@
 		FBC_MULTIPLE_PIPES, /* more than one pipe active */
 		FBC_MODULE_PARAM,
 		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+		FBC_ROTATION, /* rotation is not supported */
+		FBC_IN_DBG_MASTER, /* kernel debugger is active */
 	} no_fbc_reason;
+
+	bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
+	void (*enable_fbc)(struct intel_crtc *crtc);
+	void (*disable_fbc)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -1201,6 +1255,10 @@
 struct i915_gem_mm {
 	/** Memory allocator for GTT stolen memory */
 	struct drm_mm stolen;
+	/** Protects the usage of the GTT stolen memory allocator. This is
+	 * always the inner lock when overlapping with struct_mutex. */
+	struct mutex stolen_lock;
+
 	/** List of all objects in gtt_space. Used to restore gtt
 	 * mappings on resume */
 	struct list_head bound_list;
@@ -1354,6 +1412,15 @@
 	MODESET_SUSPENDED,
 };
 
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+
+#define DDC_PIN_B  0x05
+#define DDC_PIN_C  0x04
+#define DDC_PIN_D  0x06
+
 struct ddi_vbt_port_info {
 	/*
 	 * This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1366,6 +1433,12 @@
 	uint8_t supports_dvi:1;
 	uint8_t supports_hdmi:1;
 	uint8_t supports_dp:1;
+
+	uint8_t alternate_aux_channel;
+	uint8_t alternate_ddc_pin;
+
+	uint8_t dp_boost_level;
+	uint8_t hdmi_boost_level;
 };
 
 enum psr_lines_to_wait {
@@ -1461,23 +1534,27 @@
 	enum intel_ddb_partitioning partitioning;
 };
 
+struct vlv_pipe_wm {
+	uint16_t primary;
+	uint16_t sprite[2];
+	uint8_t cursor;
+};
+
+struct vlv_sr_wm {
+	uint16_t plane;
+	uint8_t cursor;
+};
+
 struct vlv_wm_values {
-	struct {
-		uint16_t primary;
-		uint16_t sprite[2];
-		uint8_t cursor;
-	} pipe[3];
-
-	struct {
-		uint16_t plane;
-		uint8_t cursor;
-	} sr;
-
+	struct vlv_pipe_wm pipe[3];
+	struct vlv_sr_wm sr;
 	struct {
 		uint8_t cursor;
 		uint8_t sprite[2];
 		uint8_t primary;
 	} ddl[3];
+	uint8_t level;
+	bool cxsr;
 };
 
 struct skl_ddb_entry {
@@ -1611,6 +1688,18 @@
 	bool active;
 };
 
+struct i915_execbuffer_params {
+	struct drm_device               *dev;
+	struct drm_file                 *file;
+	uint32_t                        dispatch_flags;
+	uint32_t                        args_batch_start_offset;
+	uint32_t                        batch_obj_vm_offset;
+	struct intel_engine_cs          *ring;
+	struct drm_i915_gem_object      *batch_obj;
+	struct intel_context            *ctx;
+	struct drm_i915_gem_request     *request;
+};
+
 struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *objects;
@@ -1680,19 +1769,7 @@
 	u32 pm_rps_events;
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
 
-	struct work_struct hotplug_work;
-	struct {
-		unsigned long hpd_last_jiffies;
-		int hpd_cnt;
-		enum {
-			HPD_ENABLED = 0,
-			HPD_DISABLED = 1,
-			HPD_MARK_DISABLED = 2
-		} hpd_mark;
-	} hpd_stats[HPD_NUM_PINS];
-	u32 hpd_event_bits;
-	struct delayed_work hotplug_reenable_work;
-
+	struct i915_hotplug hotplug;
 	struct i915_fbc fbc;
 	struct i915_drrs drrs;
 	struct intel_opregion opregion;
@@ -1718,7 +1795,7 @@
 
 	unsigned int fsb_freq, mem_freq, is_ddr3;
 	unsigned int skl_boot_cdclk;
-	unsigned int cdclk_freq;
+	unsigned int cdclk_freq, max_cdclk_freq;
 	unsigned int hpll_freq;
 
 	/**
@@ -1769,9 +1846,6 @@
 
 	/* Reclocking support */
 	bool render_reclock_avail;
-	bool lvds_downclock_avail;
-	/* indicates the reduced downclock for LVDS*/
-	int lvds_downclock;
 
 	struct i915_frontbuffer_tracking fb_tracking;
 
@@ -1799,7 +1873,7 @@
 
 	struct drm_i915_gem_object *vlv_pctx;
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 	struct work_struct fbdev_suspend_work;
@@ -1809,6 +1883,7 @@
 	struct drm_property *force_audio_property;
 
 	/* hda/i915 audio component */
+	struct i915_audio_component *audio_component;
 	bool audio_component_registered;
 
 	uint32_t hw_context_size;
@@ -1858,29 +1933,11 @@
 
 	struct i915_runtime_pm pm;
 
-	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
-	u32 long_hpd_port_mask;
-	u32 short_hpd_port_mask;
-	struct work_struct dig_port_work;
-
-	/*
-	 * if we get a HPD irq from DP and a HPD irq from non-DP
-	 * the non-DP HPD could block the workqueue on a mode config
-	 * mutex getting, that userspace may have taken. However
-	 * userspace is waiting on the DP workqueue to run which is
-	 * blocked behind the non-DP one.
-	 */
-	struct workqueue_struct *dp_wq;
-
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
-		int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
-				      struct intel_engine_cs *ring,
-				      struct intel_context *ctx,
+		int (*execbuf_submit)(struct i915_execbuffer_params *params,
 				      struct drm_i915_gem_execbuffer2 *args,
-				      struct list_head *vmas,
-				      struct drm_i915_gem_object *batch_obj,
-				      u64 exec_start, u32 flags);
+				      struct list_head *vmas);
 		int (*init_rings)(struct drm_device *dev);
 		void (*cleanup_ring)(struct intel_engine_cs *ring);
 		void (*stop_ring)(struct intel_engine_cs *ring);
@@ -2148,7 +2205,8 @@
 	struct intel_context *ctx;
 	struct intel_ringbuffer *ringbuf;
 
-	/** Batch buffer related to this request if any */
+	/** Batch buffer related to this request if any (used for
+	    error state dump only) */
 	struct drm_i915_gem_object *batch_obj;
 
 	/** Time at which this request was emitted, in jiffies. */
@@ -2186,8 +2244,12 @@
 };
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx);
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file);
 
 static inline uint32_t
 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
@@ -2391,6 +2453,9 @@
 				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
 				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
 				 (INTEL_DEVID(dev) & 0xf) == 0xe))
+/* ULX machines are also considered ULT. */
+#define IS_BDW_ULX(dev)		(IS_BROADWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0xf) == 0xe)
 #define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
 				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 #define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
@@ -2400,6 +2465,14 @@
 /* ULX machines are also considered ULT. */
 #define IS_HSW_ULX(dev)		(INTEL_DEVID(dev) == 0x0A0E || \
 				 INTEL_DEVID(dev) == 0x0A1E)
+#define IS_SKL_ULT(dev)		(INTEL_DEVID(dev) == 0x1906 || \
+				 INTEL_DEVID(dev) == 0x1913 || \
+				 INTEL_DEVID(dev) == 0x1916 || \
+				 INTEL_DEVID(dev) == 0x1921 || \
+				 INTEL_DEVID(dev) == 0x1926)
+#define IS_SKL_ULX(dev)		(INTEL_DEVID(dev) == 0x190E || \
+				 INTEL_DEVID(dev) == 0x1915 || \
+				 INTEL_DEVID(dev) == 0x191E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 #define SKL_REVID_A0		(0x0)
@@ -2466,9 +2539,6 @@
  */
 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
 						      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 
@@ -2494,6 +2564,12 @@
 
 #define HAS_CSR(dev)	(IS_SKYLAKE(dev))
 
+#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
+				    INTEL_INFO(dev)->gen >= 8)
+
+#define HAS_CORE_RING_FREQ(dev)	(INTEL_INFO(dev)->gen >= 6 && \
+				 !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
@@ -2533,7 +2609,6 @@
 	int modeset;
 	int panel_ignore_lid;
 	int semaphores;
-	unsigned int lvds_downclock;
 	int lvds_channel_mode;
 	int panel_use_ssc;
 	int vbt_sdvo_panel_type;
@@ -2555,10 +2630,11 @@
 	bool reset;
 	bool disable_display;
 	bool disable_vtd_wa;
+	bool enable_guc_submission;
+	int guc_log_level;
 	int use_mmio_flip;
 	int mmio_debug;
 	bool verbose_state_checks;
-	bool nuclear_pageflip;
 	int edp_vswing;
 };
 extern struct i915_params i915 __read_mostly;
@@ -2572,21 +2648,27 @@
 				 struct drm_file *file);
 extern void i915_driver_postclose(struct drm_device *dev,
 				  struct drm_file *file);
-extern int i915_driver_device_is_agp(struct drm_device * dev);
 #ifdef CONFIG_COMPAT
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 #endif
 extern int intel_gpu_reset(struct drm_device *dev);
+extern bool intel_has_gpu_reset(struct drm_device *dev);
 extern int i915_reset(struct drm_device *dev);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 void i915_firmware_load_error_print(const char *fw_path, int err);
 
+/* intel_hotplug.c */
+void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+
 /* i915_irq.c */
 void i915_queue_hangcheck(struct drm_device *dev);
 __printf(3, 4)
@@ -2594,7 +2676,6 @@
 		       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_hpd_init(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
@@ -2661,19 +2742,11 @@
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-					struct intel_engine_cs *ring);
-void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-					 struct drm_file *file,
-					 struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *obj);
-int i915_gem_ringbuffer_submission(struct drm_device *dev,
-				   struct drm_file *file,
-				   struct intel_engine_cs *ring,
-				   struct intel_context *ctx,
+					struct drm_i915_gem_request *req);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 				   struct drm_i915_gem_execbuffer2 *args,
-				   struct list_head *vmas,
-				   struct drm_i915_gem_object *batch_obj,
-				   u64 exec_start, u32 flags);
+				   struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2706,6 +2779,8 @@
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
+struct drm_i915_gem_object *i915_gem_object_create_from_data(
+		struct drm_device *dev, const void *data, size_t size);
 void i915_init_vm(struct drm_i915_private *dev_priv,
 		  struct i915_address_space *vm);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -2780,9 +2855,10 @@
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-			 struct intel_engine_cs *to);
+			 struct intel_engine_cs *to,
+			 struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring);
+			     struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -2811,11 +2887,6 @@
 
 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring);
@@ -2824,7 +2895,6 @@
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
-int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
@@ -2859,16 +2929,18 @@
 int __must_check i915_gem_init(struct drm_device *dev);
 int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
-		       struct drm_file *file,
-		       struct drm_i915_gem_object *batch_obj);
-#define i915_add_request(ring) \
-	__i915_add_request(ring, NULL, NULL)
+void __i915_add_request(struct drm_i915_gem_request *req,
+			struct drm_i915_gem_object *batch_obj,
+			bool flush_caches);
+#define i915_add_request(req) \
+	__i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+	__i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
 			unsigned reset_counter,
 			bool interruptible,
@@ -2888,6 +2960,7 @@
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
 				     const struct i915_ggtt_view *view);
 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
 					      const struct i915_ggtt_view *view);
@@ -2911,8 +2984,6 @@
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				struct drm_gem_object *gem_obj, int flags);
 
-void i915_gem_restore_fences(struct drm_device *dev);
-
 unsigned long
 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 			      const struct i915_ggtt_view *view);
@@ -3007,15 +3078,27 @@
 	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
 }
 
+/* i915_gem_fence.c */
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_private *dev_priv);
+int i915_gem_context_enable(struct drm_i915_gem_request *req);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
-			struct intel_context *to);
+int i915_switch_context(struct drm_i915_gem_request *req);
 struct intel_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
 void i915_gem_context_free(struct kref *ctx_ref);
@@ -3065,9 +3148,12 @@
 }
 
 /* i915_gem_stolen.c */
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node);
 int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
 void i915_gem_cleanup_stolen(struct drm_device *dev);
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3097,10 +3183,6 @@
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
-
 /* i915_gem_debug.c */
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
@@ -3222,8 +3304,7 @@
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern void intel_connector_unregister(struct intel_connector *);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev,
-					 bool force_restore);
+extern void intel_display_resume(struct drm_device *dev);
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 52b446b..4d631a9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,11 +46,6 @@
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
 static void
 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj);
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-					 struct drm_i915_fence_reg *fence,
-					 bool enable);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 				  enum i915_cache_level level)
@@ -66,18 +61,6 @@
 	return obj->pin_display;
 }
 
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
-	if (obj->tiling_mode)
-		i915_gem_release_mmap(obj);
-
-	/* As we do not have an associated fence register, we will force
-	 * a tiling change if we ever need to acquire one.
-	 */
-	obj->fence_dirty = false;
-	obj->fence_reg = I915_FENCE_REG_NONE;
-}
-
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
 				  size_t size)
@@ -149,14 +132,18 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_get_aperture *args = data;
-	struct drm_i915_gem_object *obj;
+	struct i915_gtt *ggtt = &dev_priv->gtt;
+	struct i915_vma *vma;
 	size_t pinned;
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-		if (i915_gem_obj_is_pinned(obj))
-			pinned += i915_gem_obj_ggtt_size(obj);
+	list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
+	list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
 	args->aper_size = dev_priv->gtt.base.total;
@@ -347,7 +334,7 @@
 	if (ret)
 		return ret;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
 		unsigned long unwritten;
 
@@ -368,7 +355,7 @@
 	i915_gem_chipset_flush(dev);
 
 out:
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
 }
 
@@ -801,7 +788,7 @@
 
 	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -832,7 +819,7 @@
 	}
 
 out_flush:
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
 	i915_gem_object_ggtt_unpin(obj);
 out:
@@ -945,7 +932,7 @@
 	if (ret)
 		return ret;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
 	i915_gem_object_pin_pages(obj);
 
@@ -1025,7 +1012,7 @@
 	if (needs_clflush_after)
 		i915_gem_chipset_flush(dev);
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
 }
 
@@ -1146,23 +1133,6 @@
 	return 0;
 }
 
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
-	int ret;
-
-	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
-	ret = 0;
-	if (req == req->ring->outstanding_lazy_request)
-		ret = i915_add_request(req->ring);
-
-	return ret;
-}
-
 static void fake_irq(unsigned long data)
 {
 	wake_up_process((struct task_struct *)data);
@@ -1334,6 +1304,33 @@
 	return ret;
 }
 
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file)
+{
+	struct drm_i915_private *dev_private;
+	struct drm_i915_file_private *file_priv;
+
+	WARN_ON(!req || !file || req->file_priv);
+
+	if (!req || !file)
+		return -EINVAL;
+
+	if (req->file_priv)
+		return -EINVAL;
+
+	dev_private = req->ring->dev->dev_private;
+	file_priv = file->driver_priv;
+
+	spin_lock(&file_priv->mm.lock);
+	req->file_priv = file_priv;
+	list_add_tail(&req->client_list, &file_priv->mm.request_list);
+	spin_unlock(&file_priv->mm.lock);
+
+	req->pid = get_pid(task_pid(current));
+
+	return 0;
+}
+
 static inline void
 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
@@ -1346,6 +1343,9 @@
 	list_del(&request->client_list);
 	request->file_priv = NULL;
 	spin_unlock(&file_priv->mm.lock);
+
+	put_pid(request->pid);
+	request->pid = NULL;
 }
 
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
@@ -1365,8 +1365,6 @@
 	list_del_init(&request->list);
 	i915_gem_request_remove_from_client(request);
 
-	put_pid(request->pid);
-
 	i915_gem_request_unreference(request);
 }
 
@@ -1415,10 +1413,6 @@
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_olr(req);
-	if (ret)
-		return ret;
-
 	ret = __i915_wait_request(req,
 				  atomic_read(&dev_priv->gpu_error.reset_counter),
 				  interruptible, NULL, NULL);
@@ -1518,10 +1512,6 @@
 		if (req == NULL)
 			return 0;
 
-		ret = i915_gem_check_olr(req);
-		if (ret)
-			goto err;
-
 		requests[n++] = i915_gem_request_reference(req);
 	} else {
 		for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -1531,10 +1521,6 @@
 			if (req == NULL)
 				continue;
 
-			ret = i915_gem_check_olr(req);
-			if (ret)
-				goto err;
-
 			requests[n++] = i915_gem_request_reference(req);
 		}
 	}
@@ -1545,7 +1531,6 @@
 					  NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
-err:
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
 			i915_gem_object_retire_request(obj, requests[i]);
@@ -1613,6 +1598,11 @@
 	else
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
+	if (write_domain != 0)
+		intel_fb_obj_invalidate(obj,
+					write_domain == I915_GEM_DOMAIN_GTT ?
+					ORIGIN_GTT : ORIGIN_CPU);
+
 unref:
 	drm_gem_object_unreference(&obj->base);
 unlock:
@@ -2349,9 +2339,12 @@
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring)
+			     struct drm_i915_gem_request *req)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct intel_engine_cs *ring;
+
+	ring = i915_gem_request_get_ring(req);
 
 	/* Add a reference if we're newly entering the active list. */
 	if (obj->active == 0)
@@ -2359,8 +2352,7 @@
 	obj->active |= intel_ring_flag(ring);
 
 	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-	i915_gem_request_assign(&obj->last_read_req[ring->id],
-				intel_ring_get_request(ring));
+	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
@@ -2372,7 +2364,7 @@
 	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
 
 	i915_gem_request_assign(&obj->last_write_req, NULL);
-	intel_fb_obj_flush(obj, true);
+	intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
@@ -2393,6 +2385,13 @@
 	if (obj->active)
 		return;
 
+	/* Bump our place on the bound list to keep it roughly in LRU order
+	 * so that we don't steal from recently used but inactive objects
+	 * (unless we are forced to ofc!)
+	 */
+	list_move_tail(&obj->global_list,
+		       &to_i915(obj->base.dev)->mm.bound_list);
+
 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
 		if (!list_empty(&vma->mm_list))
 			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
@@ -2472,24 +2471,34 @@
 	return 0;
 }
 
-int __i915_add_request(struct intel_engine_cs *ring,
-		       struct drm_file *file,
-		       struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+			struct drm_i915_gem_object *obj,
+			bool flush_caches)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct drm_i915_gem_request *request;
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_start;
 	int ret;
 
-	request = ring->outstanding_lazy_request;
 	if (WARN_ON(request == NULL))
-		return -ENOMEM;
+		return;
 
-	if (i915.enable_execlists) {
-		ringbuf = request->ctx->engine[ring->id].ringbuf;
-	} else
-		ringbuf = ring->buffer;
+	ring = request->ring;
+	dev_priv = ring->dev->dev_private;
+	ringbuf = request->ringbuf;
+
+	/*
+	 * To ensure that this call will not fail, space for its emissions
+	 * should already have been reserved in the ring buffer. Let the ring
+	 * know that it is time to use that space up.
+	 */
+	intel_ring_reserved_space_use(ringbuf);
 
 	request_start = intel_ring_get_tail(ringbuf);
 	/*
@@ -2499,14 +2508,13 @@
 	 * is that the flush _must_ happen before the next request, no matter
 	 * what.
 	 */
-	if (i915.enable_execlists) {
-		ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
-		if (ret)
-			return ret;
-	} else {
-		ret = intel_ring_flush_all_caches(ring);
-		if (ret)
-			return ret;
+	if (flush_caches) {
+		if (i915.enable_execlists)
+			ret = logical_ring_flush_all_caches(request);
+		else
+			ret = intel_ring_flush_all_caches(request);
+		/* Not allowed to fail! */
+		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
 	}
 
 	/* Record the position of the start of the request so that
@@ -2516,17 +2524,15 @@
 	 */
 	request->postfix = intel_ring_get_tail(ringbuf);
 
-	if (i915.enable_execlists) {
-		ret = ring->emit_request(ringbuf, request);
-		if (ret)
-			return ret;
-	} else {
-		ret = ring->add_request(ring);
-		if (ret)
-			return ret;
+	if (i915.enable_execlists)
+		ret = ring->emit_request(request);
+	else {
+		ret = ring->add_request(request);
 
 		request->tail = intel_ring_get_tail(ringbuf);
 	}
+	/* Not allowed to fail! */
+	WARN(ret, "emit|add_request failed: %d!\n", ret);
 
 	request->head = request_start;
 
@@ -2538,34 +2544,11 @@
 	 */
 	request->batch_obj = obj;
 
-	if (!i915.enable_execlists) {
-		/* Hold a reference to the current context so that we can inspect
-		 * it later in case a hangcheck error event fires.
-		 */
-		request->ctx = ring->last_context;
-		if (request->ctx)
-			i915_gem_context_reference(request->ctx);
-	}
-
 	request->emitted_jiffies = jiffies;
 	ring->last_submitted_seqno = request->seqno;
 	list_add_tail(&request->list, &ring->request_list);
-	request->file_priv = NULL;
-
-	if (file) {
-		struct drm_i915_file_private *file_priv = file->driver_priv;
-
-		spin_lock(&file_priv->mm.lock);
-		request->file_priv = file_priv;
-		list_add_tail(&request->client_list,
-			      &file_priv->mm.request_list);
-		spin_unlock(&file_priv->mm.lock);
-
-		request->pid = get_pid(task_pid(current));
-	}
 
 	trace_i915_gem_request_add(request);
-	ring->outstanding_lazy_request = NULL;
 
 	i915_queue_hangcheck(ring->dev);
 
@@ -2574,7 +2557,8 @@
 			   round_jiffies_up_relative(HZ));
 	intel_mark_busy(dev_priv->dev);
 
-	return 0;
+	/* Sanity check that the reserved size was large enough. */
+	intel_ring_reserved_space_end(ringbuf);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2628,12 +2612,13 @@
 						 typeof(*req), ref);
 	struct intel_context *ctx = req->ctx;
 
+	if (req->file_priv)
+		i915_gem_request_remove_from_client(req);
+
 	if (ctx) {
 		if (i915.enable_execlists) {
-			struct intel_engine_cs *ring = req->ring;
-
-			if (ctx != ring->default_context)
-				intel_lr_context_unpin(ring, ctx);
+			if (ctx != req->ring->default_context)
+				intel_lr_context_unpin(req);
 		}
 
 		i915_gem_context_unreference(ctx);
@@ -2643,36 +2628,63 @@
 }
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx)
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out)
 {
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 	struct drm_i915_gem_request *req;
 	int ret;
 
-	if (ring->outstanding_lazy_request)
-		return 0;
+	if (!req_out)
+		return -EINVAL;
+
+	*req_out = NULL;
 
 	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
 	if (req == NULL)
 		return -ENOMEM;
 
-	kref_init(&req->ref);
-	req->i915 = dev_priv;
-
 	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
 	if (ret)
 		goto err;
 
+	kref_init(&req->ref);
+	req->i915 = dev_priv;
 	req->ring = ring;
+	req->ctx  = ctx;
+	i915_gem_context_reference(req->ctx);
 
 	if (i915.enable_execlists)
-		ret = intel_logical_ring_alloc_request_extras(req, ctx);
+		ret = intel_logical_ring_alloc_request_extras(req);
 	else
 		ret = intel_ring_alloc_request_extras(req);
-	if (ret)
+	if (ret) {
+		i915_gem_context_unreference(req->ctx);
 		goto err;
+	}
 
-	ring->outstanding_lazy_request = req;
+	/*
+	 * Reserve space in the ring buffer for all the commands required to
+	 * eventually emit this request. This is to guarantee that the
+	 * i915_add_request() call can't fail. Note that the reserve may need
+	 * to be redone if the request is not actually submitted straight
+	 * away, e.g. because a GPU scheduler has deferred it.
+	 */
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_reserve_space(req);
+	else
+		ret = intel_ring_reserve_space(req);
+	if (ret) {
+		/*
+		 * At this point, the request is fully allocated even if not
+		 * fully prepared. Thus it can be cleaned up using the proper
+		 * free code.
+		 */
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
+	*req_out = req;
 	return 0;
 
 err:
@@ -2680,6 +2692,13 @@
 	return ret;
 }
 
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+	intel_ring_reserved_space_cancel(req->ringbuf);
+
+	i915_gem_request_unreference(req);
+}
+
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
@@ -2741,7 +2760,7 @@
 		list_del(&submit_req->execlist_link);
 
 		if (submit_req->ctx != ring->default_context)
-			intel_lr_context_unpin(ring, submit_req->ctx);
+			intel_lr_context_unpin(submit_req);
 
 		i915_gem_request_unreference(submit_req);
 	}
@@ -2762,30 +2781,6 @@
 
 		i915_gem_request_retire(request);
 	}
-
-	/* This may not have been flushed before the reset, so clean it now */
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
-}
-
-void i915_gem_restore_fences(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
-		/*
-		 * Commit delayed tiling changes if we have an object still
-		 * attached to the fence, otherwise just clear the fence.
-		 */
-		if (reg->obj) {
-			i915_gem_object_update_fence(reg->obj, reg,
-						     reg->obj->tiling_mode);
-		} else {
-			i915_gem_write_fence(dev, i, NULL);
-		}
-	}
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2947,7 +2942,7 @@
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
-	int ret, i;
+	int i;
 
 	if (!obj->active)
 		return 0;
@@ -2962,10 +2957,6 @@
 		if (list_empty(&req->list))
 			goto retire;
 
-		ret = i915_gem_check_olr(req);
-		if (ret)
-			return ret;
-
 		if (i915_gem_request_completed(req, true)) {
 			__i915_gem_request_retire__upto(req);
 retire:
@@ -3068,25 +3059,22 @@
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		       struct intel_engine_cs *to,
-		       struct drm_i915_gem_request *req)
+		       struct drm_i915_gem_request *from_req,
+		       struct drm_i915_gem_request **to_req)
 {
 	struct intel_engine_cs *from;
 	int ret;
 
-	from = i915_gem_request_get_ring(req);
+	from = i915_gem_request_get_ring(from_req);
 	if (to == from)
 		return 0;
 
-	if (i915_gem_request_completed(req, true))
+	if (i915_gem_request_completed(from_req, true))
 		return 0;
 
-	ret = i915_gem_check_olr(req);
-	if (ret)
-		return ret;
-
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-		ret = __i915_wait_request(req,
+		ret = __i915_wait_request(from_req,
 					  atomic_read(&i915->gpu_error.reset_counter),
 					  i915->mm.interruptible,
 					  NULL,
@@ -3094,16 +3082,24 @@
 		if (ret)
 			return ret;
 
-		i915_gem_object_retire_request(obj, req);
+		i915_gem_object_retire_request(obj, from_req);
 	} else {
 		int idx = intel_ring_sync_index(from, to);
-		u32 seqno = i915_gem_request_get_seqno(req);
+		u32 seqno = i915_gem_request_get_seqno(from_req);
+
+		WARN_ON(!to_req);
 
 		if (seqno <= from->semaphore.sync_seqno[idx])
 			return 0;
 
-		trace_i915_gem_ring_sync_to(from, to, req);
-		ret = to->semaphore.sync_to(to, from, seqno);
+		if (*to_req == NULL) {
+			ret = i915_gem_request_alloc(to, to->default_context, to_req);
+			if (ret)
+				return ret;
+		}
+
+		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+		ret = to->semaphore.sync_to(*to_req, from, seqno);
 		if (ret)
 			return ret;
 
@@ -3123,11 +3119,14 @@
  *
  * @obj: object which may be in use on another ring.
  * @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ *          This will be allocated and returned if a request is
+ *          required but not passed in.
  *
  * This code is meant to abstract object synchronization with the GPU.
  * Calling with NULL implies synchronizing the object with the CPU
  * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow on engine to write
+ * between engines inside the GPU. We only allow one engine to write
  * into a buffer at any time, but multiple readers. To ensure each has
  * a coherent view of memory, we must:
  *
@@ -3138,11 +3137,22 @@
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		     struct intel_engine_cs *to)
+		     struct intel_engine_cs *to,
+		     struct drm_i915_gem_request **to_req)
 {
 	const bool readonly = obj->base.pending_write_domain == 0;
 	struct drm_i915_gem_request *req[I915_NUM_RINGS];
@@ -3164,7 +3174,7 @@
 				req[n++] = obj->last_read_req[i];
 	}
 	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i]);
+		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
 		if (ret)
 			return ret;
 	}
@@ -3275,9 +3285,19 @@
 	/* Flush everything onto the inactive list. */
 	for_each_ring(ring, dev_priv, i) {
 		if (!i915.enable_execlists) {
-			ret = i915_switch_context(ring, ring->default_context);
+			struct drm_i915_gem_request *req;
+
+			ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 			if (ret)
 				return ret;
+
+			ret = i915_switch_context(req);
+			if (ret) {
+				i915_gem_request_cancel(req);
+				return ret;
+			}
+
+			i915_add_request_no_flush(req);
 		}
 
 		ret = intel_ring_idle(ring);
@@ -3289,343 +3309,6 @@
 	return 0;
 }
 
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fence_reg;
-	int fence_pitch_shift;
-
-	if (INTEL_INFO(dev)->gen >= 6) {
-		fence_reg = FENCE_REG_SANDYBRIDGE_0;
-		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
-	} else {
-		fence_reg = FENCE_REG_965_0;
-		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
-	}
-
-	fence_reg += reg * 8;
-
-	/* To w/a incoherency with non-atomic 64-bit register updates,
-	 * we split the 64-bit update into two 32-bit writes. In order
-	 * for a partial fence not to be evaluated between writes, we
-	 * precede the update with write to turn off the fence register,
-	 * and only enable the fence as the last step.
-	 *
-	 * For extra levels of paranoia, we make sure each step lands
-	 * before applying the next step.
-	 */
-	I915_WRITE(fence_reg, 0);
-	POSTING_READ(fence_reg);
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint64_t val;
-
-		/* Adjust fence size to match tiled area */
-		if (obj->tiling_mode != I915_TILING_NONE) {
-			uint32_t row_size = obj->stride *
-				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
-			size = (size / row_size) * row_size;
-		}
-
-		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
-				 0xfffff000) << 32;
-		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
-		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
-		val |= I965_FENCE_REG_VALID;
-
-		I915_WRITE(fence_reg + 4, val >> 32);
-		POSTING_READ(fence_reg + 4);
-
-		I915_WRITE(fence_reg + 0, val);
-		POSTING_READ(fence_reg);
-	} else {
-		I915_WRITE(fence_reg + 4, 0);
-		POSTING_READ(fence_reg + 4);
-	}
-}
-
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 val;
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		int pitch_val;
-		int tile_width;
-
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
-
-		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
-			tile_width = 128;
-		else
-			tile_width = 512;
-
-		/* Note: pitch better be a power of two tile widths */
-		pitch_val = obj->stride / tile_width;
-		pitch_val = ffs(pitch_val) - 1;
-
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I915_FENCE_SIZE_BITS(size);
-		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	if (reg < 8)
-		reg = FENCE_REG_830_0 + reg * 4;
-	else
-		reg = FENCE_REG_945_8 + (reg - 8) * 4;
-
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
-}
-
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
-				struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t val;
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint32_t pitch_val;
-
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), size);
-
-		pitch_val = obj->stride / 128;
-		pitch_val = ffs(pitch_val) - 1;
-
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I830_FENCE_SIZE_BITS(size);
-		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
-	POSTING_READ(FENCE_REG_830_0 + reg * 4);
-}
-
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
-	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
-}
-
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* Ensure that all CPU reads are completed before installing a fence
-	 * and all writes before removing the fence.
-	 */
-	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
-		mb();
-
-	WARN(obj && (!obj->stride || !obj->tiling_mode),
-	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-	     obj->stride, obj->tiling_mode);
-
-	if (IS_GEN2(dev))
-		i830_write_fence_reg(dev, reg, obj);
-	else if (IS_GEN3(dev))
-		i915_write_fence_reg(dev, reg, obj);
-	else if (INTEL_INFO(dev)->gen >= 4)
-		i965_write_fence_reg(dev, reg, obj);
-
-	/* And similarly be paranoid that no direct access to this region
-	 * is reordered to before the fence is installed.
-	 */
-	if (i915_gem_object_needs_mb(obj))
-		mb();
-}
-
-static inline int fence_number(struct drm_i915_private *dev_priv,
-			       struct drm_i915_fence_reg *fence)
-{
-	return fence - dev_priv->fence_regs;
-}
-
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-					 struct drm_i915_fence_reg *fence,
-					 bool enable)
-{
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	int reg = fence_number(dev_priv, fence);
-
-	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
-
-	if (enable) {
-		obj->fence_reg = reg;
-		fence->obj = obj;
-		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
-	} else {
-		obj->fence_reg = I915_FENCE_REG_NONE;
-		fence->obj = NULL;
-		list_del_init(&fence->lru_list);
-	}
-	obj->fence_dirty = false;
-}
-
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->last_fenced_req) {
-		int ret = i915_wait_request(obj->last_fenced_req);
-		if (ret)
-			return ret;
-
-		i915_gem_request_assign(&obj->last_fenced_req, NULL);
-	}
-
-	return 0;
-}
-
-int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	struct drm_i915_fence_reg *fence;
-	int ret;
-
-	ret = i915_gem_object_wait_fence(obj);
-	if (ret)
-		return ret;
-
-	if (obj->fence_reg == I915_FENCE_REG_NONE)
-		return 0;
-
-	fence = &dev_priv->fence_regs[obj->fence_reg];
-
-	if (WARN_ON(fence->pin_count))
-		return -EBUSY;
-
-	i915_gem_object_fence_lost(obj);
-	i915_gem_object_update_fence(obj, fence, false);
-
-	return 0;
-}
-
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_fence_reg *reg, *avail;
-	int i;
-
-	/* First try to find a free reg */
-	avail = NULL;
-	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
-		reg = &dev_priv->fence_regs[i];
-		if (!reg->obj)
-			return reg;
-
-		if (!reg->pin_count)
-			avail = reg;
-	}
-
-	if (avail == NULL)
-		goto deadlock;
-
-	/* None available, try to steal one or wait for a user to finish */
-	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
-		if (reg->pin_count)
-			continue;
-
-		return reg;
-	}
-
-deadlock:
-	/* Wait for completion of pending flips which consume fences */
-	if (intel_has_pending_fb_unpin(dev))
-		return ERR_PTR(-EAGAIN);
-
-	return ERR_PTR(-EDEADLK);
-}
-
-/**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- * This function walks the fence regs looking for a free one for @obj,
- * stealing one if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- *
- * For an untiled surface, this removes any existing fence.
- */
-int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	bool enable = obj->tiling_mode != I915_TILING_NONE;
-	struct drm_i915_fence_reg *reg;
-	int ret;
-
-	/* Have we updated the tiling parameters upon the object and so
-	 * will need to serialise the write to the associated fence register?
-	 */
-	if (obj->fence_dirty) {
-		ret = i915_gem_object_wait_fence(obj);
-		if (ret)
-			return ret;
-	}
-
-	/* Just update our place in the LRU if our fence is getting reused. */
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		reg = &dev_priv->fence_regs[obj->fence_reg];
-		if (!obj->fence_dirty) {
-			list_move_tail(&reg->lru_list,
-				       &dev_priv->mm.fence_list);
-			return 0;
-		}
-	} else if (enable) {
-		if (WARN_ON(!obj->map_and_fenceable))
-			return -EINVAL;
-
-		reg = i915_find_fence_reg(dev);
-		if (IS_ERR(reg))
-			return PTR_ERR(reg);
-
-		if (reg->obj) {
-			struct drm_i915_gem_object *old = reg->obj;
-
-			ret = i915_gem_object_wait_fence(old);
-			if (ret)
-				return ret;
-
-			i915_gem_object_fence_lost(old);
-		}
-	} else
-		return 0;
-
-	i915_gem_object_update_fence(obj, reg, enable);
-
-	return 0;
-}
-
 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 				     unsigned long cache_level)
 {
@@ -3673,9 +3356,9 @@
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
-	unsigned long start =
+	u64 start =
 		flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-	unsigned long end =
+	u64 end =
 		flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
 	struct i915_vma *vma;
 	int ret;
@@ -3731,7 +3414,7 @@
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
 			  ggtt_view ? ggtt_view->type : 0,
 			  size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3853,7 +3536,7 @@
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
@@ -3875,7 +3558,7 @@
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
@@ -3937,9 +3620,6 @@
 		obj->dirty = 1;
 	}
 
-	if (write)
-		intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
-
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -4094,12 +3774,13 @@
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
 				     const struct i915_ggtt_view *view)
 {
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
-	ret = i915_gem_object_sync(obj, pipelined);
+	ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
 	if (ret)
 		return ret;
 
@@ -4210,9 +3891,6 @@
 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
-	if (write)
-		intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
-
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -4253,6 +3931,13 @@
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
 			break;
 
+		/*
+		 * Note that the request might not have been submitted yet.
+		 * In which case emitted_jiffies will be zero.
+		 */
+		if (!request->emitted_jiffies)
+			continue;
+
 		target = request;
 	}
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4423,32 +4108,6 @@
 	--vma->pin_count;
 }
 
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-		WARN_ON(!ggtt_vma ||
-			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
-		dev_priv->fence_regs[obj->fence_reg].pin_count++;
-		return true;
-	} else
-		return false;
-}
-
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-		dev_priv->fence_regs[obj->fence_reg].pin_count--;
-	}
-}
-
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		    struct drm_file *file)
@@ -4810,8 +4469,9 @@
 	return ret;
 }
 
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
@@ -4821,7 +4481,7 @@
 	if (!HAS_L3_DPF(dev) || !remap_info)
 		return 0;
 
-	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
 	if (ret)
 		return ret;
 
@@ -4967,7 +4627,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring;
-	int ret, i;
+	int ret, i, j;
 
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
@@ -5004,27 +4664,55 @@
 	 */
 	init_unused_rings(dev);
 
+	BUG_ON(!dev_priv->ring[RCS].default_context);
+
+	ret = i915_ppgtt_init_hw(dev);
+	if (ret) {
+		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+		goto out;
+	}
+
+	/* Need to do basic initialisation of all rings first: */
 	for_each_ring(ring, dev_priv, i) {
 		ret = ring->init_hw(ring);
 		if (ret)
 			goto out;
 	}
 
-	for (i = 0; i < NUM_L3_SLICES(dev); i++)
-		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+	/* Now it is safe to go back round and do everything else: */
+	for_each_ring(ring, dev_priv, i) {
+		struct drm_i915_gem_request *req;
 
-	ret = i915_ppgtt_init_hw(dev);
-	if (ret && ret != -EIO) {
-		DRM_ERROR("PPGTT enable failed %d\n", ret);
-		i915_gem_cleanup_ringbuffer(dev);
-	}
+		WARN_ON(!ring->default_context);
 
-	ret = i915_gem_context_enable(dev_priv);
-	if (ret && ret != -EIO) {
-		DRM_ERROR("Context enable failed %d\n", ret);
-		i915_gem_cleanup_ringbuffer(dev);
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+		if (ret) {
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
 
-		goto out;
+		if (ring->id == RCS) {
+			for (j = 0; j < NUM_L3_SLICES(dev); j++)
+				i915_gem_l3_remap(req, j);
+		}
+
+		ret = i915_ppgtt_init_ring(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		ret = i915_gem_context_enable(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		i915_add_request_no_flush(req);
 	}
 
 out:
@@ -5091,7 +4779,7 @@
 		 * for all other failure, such as an allocation failure, bail.
 		 */
 		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
 		ret = 0;
 	}
 
@@ -5111,6 +4799,14 @@
 
 	for_each_ring(ring, dev_priv, i)
 		dev_priv->gt.cleanup_ring(ring);
+
+    if (i915.enable_execlists)
+            /*
+             * Neither the BIOS, ourselves or any other kernel
+             * expects the system to be in execlists mode on startup,
+             * so we need to reset the GPU back to legacy mode.
+             */
+            intel_gpu_reset(dev);
 }
 
 static void
@@ -5388,3 +5084,42 @@
 	return false;
 }
 
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_device *dev,
+			         const void *data, size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	struct sg_table *sg;
+	size_t bytes;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
+	if (IS_ERR_OR_NULL(obj))
+		return obj;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret)
+		goto fail;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto fail;
+
+	i915_gem_object_pin_pages(obj);
+	sg = obj->pages;
+	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+	i915_gem_object_unpin_pages(obj);
+
+	if (WARN_ON(bytes != size)) {
+		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	return obj;
+
+fail:
+	drm_gem_object_unreference(&obj->base);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 48afa77..8e893b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -287,6 +287,7 @@
 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
 err_destroy:
+	idr_remove(&file_priv->context_idr, ctx->user_handle);
 	i915_gem_context_unreference(ctx);
 	return ERR_PTR(ret);
 }
@@ -407,32 +408,23 @@
 	i915_gem_context_unreference(dctx);
 }
 
-int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring;
-	int ret, i;
-
-	BUG_ON(!dev_priv->ring[RCS].default_context);
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
 
 	if (i915.enable_execlists) {
-		for_each_ring(ring, dev_priv, i) {
-			if (ring->init_context) {
-				ret = ring->init_context(ring,
-						ring->default_context);
-				if (ret) {
-					DRM_ERROR("ring init context: %d\n",
-							ret);
-					return ret;
-				}
-			}
-		}
+		if (ring->init_context == NULL)
+			return 0;
 
+		ret = ring->init_context(req);
 	} else
-		for_each_ring(ring, dev_priv, i) {
-			ret = i915_switch_context(ring, ring->default_context);
-			if (ret)
-				return ret;
-		}
+		ret = i915_switch_context(req);
+
+	if (ret) {
+		DRM_ERROR("ring init context: %d\n", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -485,10 +477,9 @@
 }
 
 static inline int
-mi_set_context(struct intel_engine_cs *ring,
-	       struct intel_context *new_context,
-	       u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -503,13 +494,15 @@
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(ring->dev)) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+	else if (INTEL_INFO(ring->dev)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
@@ -517,7 +510,7 @@
 	if (INTEL_INFO(ring->dev)->gen >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
-	ret = intel_ring_begin(ring, len);
+	ret = intel_ring_begin(req, len);
 	if (ret)
 		return ret;
 
@@ -540,7 +533,7 @@
 
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
 			flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -621,9 +614,10 @@
 	return false;
 }
 
-static int do_switch(struct intel_engine_cs *ring,
-		     struct intel_context *to)
+static int do_switch(struct drm_i915_gem_request *req)
 {
+	struct intel_context *to = req->ctx;
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct intel_context *from = ring->last_context;
 	u32 hw_flags = 0;
@@ -659,7 +653,7 @@
 		 * Register Immediate commands in Ring Buffer before submitting
 		 * a context."*/
 		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		if (ret)
 			goto unpin_out;
 
@@ -701,7 +695,7 @@
 	WARN_ON(needs_pd_load_pre(ring, to) &&
 		needs_pd_load_post(ring, to, hw_flags));
 
-	ret = mi_set_context(ring, to, hw_flags);
+	ret = mi_set_context(req, hw_flags);
 	if (ret)
 		goto unpin_out;
 
@@ -710,7 +704,7 @@
 	 */
 	if (needs_pd_load_post(ring, to, hw_flags)) {
 		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		/* The hardware context switch is emitted, but we haven't
 		 * actually changed the state - so it's probably safe to bail
 		 * here. Still, let the user know something dangerous has
@@ -726,7 +720,7 @@
 		if (!(to->remap_slice & (1<<i)))
 			continue;
 
-		ret = i915_gem_l3_remap(ring, i);
+		ret = i915_gem_l3_remap(req, i);
 		/* If it failed, try again next round */
 		if (ret)
 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -742,7 +736,7 @@
 	 */
 	if (from != NULL) {
 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -766,7 +760,7 @@
 
 	if (uninitialized) {
 		if (ring->init_context) {
-			ret = ring->init_context(ring, to);
+			ret = ring->init_context(req);
 			if (ret)
 				DRM_ERROR("ring init context: %d\n", ret);
 		}
@@ -782,8 +776,7 @@
 
 /**
  * i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -794,25 +787,25 @@
  * switched by writing to the ELSP and requests keep a reference to their
  * context.
  */
-int i915_switch_context(struct intel_engine_cs *ring,
-			struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
 	WARN_ON(i915.enable_execlists);
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-		if (to != ring->last_context) {
-			i915_gem_context_reference(to);
+	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+		if (req->ctx != ring->last_context) {
+			i915_gem_context_reference(req->ctx);
 			if (ring->last_context)
 				i915_gem_context_unreference(ring->last_context);
-			ring->last_context = to;
+			ring->last_context = req->ctx;
 		}
 		return 0;
 	}
 
-	return do_switch(ring, to);
+	return do_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
@@ -898,6 +891,9 @@
 	case I915_CONTEXT_PARAM_BAN_PERIOD:
 		args->value = ctx->hang_stats.ban_period_seconds;
 		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+		break;
 	default:
 		ret = -EINVAL;
 		break;
@@ -935,6 +931,14 @@
 		else
 			ctx->hang_stats.ban_period_seconds = args->value;
 		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		if (args->size) {
+			ret = -EINVAL;
+		} else {
+			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+		}
+		break;
 	default:
 		ret = -EINVAL;
 		break;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a7fa145..923a3c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -677,6 +677,7 @@
 static int
 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 			    struct list_head *vmas,
+			    struct intel_context *ctx,
 			    bool *need_relocs)
 {
 	struct drm_i915_gem_object *obj;
@@ -699,6 +700,9 @@
 		obj = vma->obj;
 		entry = vma->exec_entry;
 
+		if (ctx->flags & CONTEXT_NO_ZEROMAP)
+			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
 		if (!has_fenced_gpu_access)
 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
 		need_fence =
@@ -776,7 +780,8 @@
 				  struct drm_file *file,
 				  struct intel_engine_cs *ring,
 				  struct eb_vmas *eb,
-				  struct drm_i915_gem_exec_object2 *exec)
+				  struct drm_i915_gem_exec_object2 *exec,
+				  struct intel_context *ctx)
 {
 	struct drm_i915_gem_relocation_entry *reloc;
 	struct i915_address_space *vm;
@@ -862,7 +867,7 @@
 		goto err;
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -887,10 +892,10 @@
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_ring_flag(ring);
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -900,7 +905,7 @@
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, ring);
+			ret = i915_gem_object_sync(obj, req->ring, &req);
 			if (ret)
 				return ret;
 		}
@@ -912,7 +917,7 @@
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(ring->dev);
+		i915_gem_chipset_flush(req->ring->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -920,7 +925,7 @@
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return intel_ring_invalidate_all_caches(ring);
+	return intel_ring_invalidate_all_caches(req);
 }
 
 static bool
@@ -953,6 +958,9 @@
 		if (exec[i].flags & invalid_flags)
 			return -EINVAL;
 
+		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+			return -EINVAL;
+
 		/* First check for malicious input causing overflow in
 		 * the worst case where we need to allocate the entire
 		 * relocation tree as a single array.
@@ -1013,9 +1021,9 @@
 
 void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-				   struct intel_engine_cs *ring)
+				   struct drm_i915_gem_request *req)
 {
-	struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1029,12 +1037,12 @@
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, ring);
+		i915_vma_move_to_active(vma, req);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			i915_gem_request_assign(&obj->last_write_req, req);
 
-			intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+			intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
 			/* update for the implicit flush after a batch */
 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1053,22 +1061,20 @@
 }
 
 void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-				    struct drm_file *file,
-				    struct intel_engine_cs *ring,
-				    struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
 	/* Unconditionally force add_request to emit a full flush. */
-	ring->gpu_caches_dirty = true;
+	params->ring->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
-	(void)__i915_add_request(ring, file, obj);
+	__i915_add_request(params->request, params->batch_obj, true);
 }
 
 static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct intel_engine_cs *ring)
+			    struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret, i;
 
@@ -1077,7 +1083,7 @@
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(ring, 4 * 3);
+	ret = intel_ring_begin(req, 4 * 3);
 	if (ret)
 		return ret;
 
@@ -1093,10 +1099,11 @@
 }
 
 static int
-i915_emit_box(struct intel_engine_cs *ring,
+i915_emit_box(struct drm_i915_gem_request *req,
 	      struct drm_clip_rect *box,
 	      int DR1, int DR4)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
@@ -1107,7 +1114,7 @@
 	}
 
 	if (INTEL_INFO(ring->dev)->gen >= 4) {
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(req, 4);
 		if (ret)
 			return ret;
 
@@ -1116,7 +1123,7 @@
 		intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
 		intel_ring_emit(ring, DR4);
 	} else {
-		ret = intel_ring_begin(ring, 6);
+		ret = intel_ring_begin(req, 6);
 		if (ret)
 			return ret;
 
@@ -1186,17 +1193,15 @@
 }
 
 int
-i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags)
+			       struct list_head *vmas)
 {
 	struct drm_clip_rect *cliprects = NULL;
+	struct drm_device *dev = params->dev;
+	struct intel_engine_cs *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u64 exec_len;
+	u64 exec_start, exec_len;
 	int instp_mode;
 	u32 instp_mask;
 	int i, ret = 0;
@@ -1244,15 +1249,15 @@
 		}
 	}
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
 	if (ret)
 		goto error;
 
-	ret = i915_switch_context(ring, ctx);
+	ret = i915_switch_context(params->request);
 	if (ret)
 		goto error;
 
-	WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
 	     "%s didn't clear reload\n", ring->name);
 
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1294,7 +1299,7 @@
 
 	if (ring == &dev_priv->ring[RCS] &&
 			instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			goto error;
 
@@ -1308,37 +1313,40 @@
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, ring);
+		ret = i915_reset_gen7_sol_offsets(dev, params->request);
 		if (ret)
 			goto error;
 	}
 
-	exec_len = args->batch_len;
+	exec_len   = args->batch_len;
+	exec_start = params->batch_obj_vm_offset +
+		     params->args_batch_start_offset;
+
 	if (cliprects) {
 		for (i = 0; i < args->num_cliprects; i++) {
-			ret = i915_emit_box(ring, &cliprects[i],
+			ret = i915_emit_box(params->request, &cliprects[i],
 					    args->DR1, args->DR4);
 			if (ret)
 				goto error;
 
-			ret = ring->dispatch_execbuffer(ring,
+			ret = ring->dispatch_execbuffer(params->request,
 							exec_start, exec_len,
-							dispatch_flags);
+							params->dispatch_flags);
 			if (ret)
 				goto error;
 		}
 	} else {
-		ret = ring->dispatch_execbuffer(ring,
+		ret = ring->dispatch_execbuffer(params->request,
 						exec_start, exec_len,
-						dispatch_flags);
+						params->dispatch_flags);
 		if (ret)
 			return ret;
 	}
 
-	trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
 
 error:
 	kfree(cliprects);
@@ -1408,8 +1416,9 @@
 	struct intel_engine_cs *ring;
 	struct intel_context *ctx;
 	struct i915_address_space *vm;
+	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+	struct i915_execbuffer_params *params = &params_master;
 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-	u64 exec_start = args->batch_start_offset;
 	u32 dispatch_flags;
 	int ret;
 	bool need_relocs;
@@ -1482,6 +1491,20 @@
 		return -EINVAL;
 	}
 
+	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
+		if (!HAS_RESOURCE_STREAMER(dev)) {
+			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
+			return -EINVAL;
+		}
+		if (ring->id != RCS) {
+			DRM_DEBUG("RS is not available on %s\n",
+				 ring->name);
+			return -EINVAL;
+		}
+
+		dispatch_flags |= I915_DISPATCH_RS;
+	}
+
 	intel_runtime_pm_get(dev_priv);
 
 	ret = i915_mutex_lock_interruptible(dev);
@@ -1502,6 +1525,8 @@
 	else
 		vm = &dev_priv->gtt.base;
 
+	memset(&params_master, 0x00, sizeof(params_master));
+
 	eb = eb_create(args);
 	if (eb == NULL) {
 		i915_gem_context_unreference(ctx);
@@ -1520,7 +1545,7 @@
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1530,7 +1555,7 @@
 	if (ret) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-								eb, exec);
+								eb, exec, ctx);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
 		if (ret)
@@ -1544,6 +1569,7 @@
 		goto err;
 	}
 
+	params->args_batch_start_offset = args->batch_start_offset;
 	if (i915_needs_cmd_parser(ring) && args->batch_len) {
 		struct drm_i915_gem_object *parsed_batch_obj;
 
@@ -1575,7 +1601,7 @@
 			 * command parser has accepted.
 			 */
 			dispatch_flags |= I915_DISPATCH_SECURE;
-			exec_start = 0;
+			params->args_batch_start_offset = 0;
 			batch_obj = parsed_batch_obj;
 		}
 	}
@@ -1600,14 +1626,35 @@
 		if (ret)
 			goto err;
 
-		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
 	} else
-		exec_start += i915_gem_obj_offset(batch_obj, vm);
+		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
-	ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
-					  &eb->vmas, batch_obj, exec_start,
-					  dispatch_flags);
+	/* Allocate a request for this batch buffer nice and early. */
+	ret = i915_gem_request_alloc(ring, ctx, &params->request);
+	if (ret)
+		goto err_batch_unpin;
 
+	ret = i915_gem_request_add_to_client(params->request, file);
+	if (ret)
+		goto err_batch_unpin;
+
+	/*
+	 * Save assorted stuff away to pass through to *_submission().
+	 * NB: This data should be 'persistent' and not local as it will
+	 * kept around beyond the duration of the IOCTL once the GPU
+	 * scheduler arrives.
+	 */
+	params->dev                     = dev;
+	params->file                    = file;
+	params->ring                    = ring;
+	params->dispatch_flags          = dispatch_flags;
+	params->batch_obj               = batch_obj;
+	params->ctx                     = ctx;
+
+	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
 	/*
 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
 	 * batch vma for correctness. For less ugly and less fragility this
@@ -1616,11 +1663,20 @@
 	 */
 	if (dispatch_flags & I915_DISPATCH_SECURE)
 		i915_gem_object_ggtt_unpin(batch_obj);
+
 err:
 	/* the request owns the ref now */
 	i915_gem_context_unreference(ctx);
 	eb_destroy(eb);
 
+	/*
+	 * If the request was created but not successfully submitted then it
+	 * must be freed again. If it was submitted then it is being tracked
+	 * on the active request list and no clean up is required here.
+	 */
+	if (ret && params->request)
+		i915_gem_request_cancel(params->request);
+
 	mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
new file mode 100644
index 0000000..af1f8c4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interract with frontbuffer
+ * conmpression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
+ * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int fence_reg;
+	int fence_pitch_shift;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		fence_reg = FENCE_REG_SANDYBRIDGE_0;
+		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+	} else {
+		fence_reg = FENCE_REG_965_0;
+		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+	}
+
+	fence_reg += reg * 8;
+
+	/* To w/a incoherency with non-atomic 64-bit register updates,
+	 * we split the 64-bit update into two 32-bit writes. In order
+	 * for a partial fence not to be evaluated between writes, we
+	 * precede the update with write to turn off the fence register,
+	 * and only enable the fence as the last step.
+	 *
+	 * For extra levels of paranoia, we make sure each step lands
+	 * before applying the next step.
+	 */
+	I915_WRITE(fence_reg, 0);
+	POSTING_READ(fence_reg);
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint64_t val;
+
+		/* Adjust fence size to match tiled area */
+		if (obj->tiling_mode != I915_TILING_NONE) {
+			uint32_t row_size = obj->stride *
+				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+			size = (size / row_size) * row_size;
+		}
+
+		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+				 0xfffff000) << 32;
+		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val |= I965_FENCE_REG_VALID;
+
+		I915_WRITE(fence_reg + 4, val >> 32);
+		POSTING_READ(fence_reg + 4);
+
+		I915_WRITE(fence_reg + 0, val);
+		POSTING_READ(fence_reg);
+	} else {
+		I915_WRITE(fence_reg + 4, 0);
+		POSTING_READ(fence_reg + 4);
+	}
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		int pitch_val;
+		int tile_width;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+			tile_width = 128;
+		else
+			tile_width = 512;
+
+		/* Note: pitch better be a power of two tile widths */
+		pitch_val = obj->stride / tile_width;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I915_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	if (reg < 8)
+		reg = FENCE_REG_830_0 + reg * 4;
+	else
+		reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+				struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint32_t pitch_val;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), size);
+
+		pitch_val = obj->stride / 128;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I830_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+	POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+		mb();
+
+	WARN(obj && (!obj->stride || !obj->tiling_mode),
+	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+	     obj->stride, obj->tiling_mode);
+
+	if (IS_GEN2(dev))
+		i830_write_fence_reg(dev, reg, obj);
+	else if (IS_GEN3(dev))
+		i915_write_fence_reg(dev, reg, obj);
+	else if (INTEL_INFO(dev)->gen >= 4)
+		i965_write_fence_reg(dev, reg, obj);
+
+	/* And similarly be paranoid that no direct access to this region
+	 * is reordered to before the fence is installed.
+	 */
+	if (i915_gem_object_needs_mb(obj))
+		mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+			       struct drm_i915_fence_reg *fence)
+{
+	return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+					 struct drm_i915_fence_reg *fence,
+					 bool enable)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int reg = fence_number(dev_priv, fence);
+
+	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+	if (enable) {
+		obj->fence_reg = reg;
+		fence->obj = obj;
+		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+	} else {
+		obj->fence_reg = I915_FENCE_REG_NONE;
+		fence->obj = NULL;
+		list_del_init(&fence->lru_list);
+	}
+	obj->fence_dirty = false;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	/* As we do not have an associated fence register, we will force
+	 * a tiling change if we ever need to acquire one.
+	 */
+	obj->fence_dirty = false;
+	obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->last_fenced_req) {
+		int ret = i915_wait_request(obj->last_fenced_req);
+		if (ret)
+			return ret;
+
+		i915_gem_request_assign(&obj->last_fenced_req, NULL);
+	}
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_put_fence - force-remove fence for an object
+ * @obj: object to map through a fence reg
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_fence_reg *fence;
+	int ret;
+
+	ret = i915_gem_object_wait_fence(obj);
+	if (ret)
+		return ret;
+
+	if (obj->fence_reg == I915_FENCE_REG_NONE)
+		return 0;
+
+	fence = &dev_priv->fence_regs[obj->fence_reg];
+
+	if (WARN_ON(fence->pin_count))
+		return -EBUSY;
+
+	i915_gem_object_fence_lost(obj);
+	i915_gem_object_update_fence(obj, fence, false);
+
+	return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_fence_reg *reg, *avail;
+	int i;
+
+	/* First try to find a free reg */
+	avail = NULL;
+	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+		reg = &dev_priv->fence_regs[i];
+		if (!reg->obj)
+			return reg;
+
+		if (!reg->pin_count)
+			avail = reg;
+	}
+
+	if (avail == NULL)
+		goto deadlock;
+
+	/* None available, try to steal one or wait for a user to finish */
+	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+		if (reg->pin_count)
+			continue;
+
+		return reg;
+	}
+
+deadlock:
+	/* Wait for completion of pending flips which consume fences */
+	if (intel_has_pending_fb_unpin(dev))
+		return ERR_PTR(-EAGAIN);
+
+	return ERR_PTR(-EDEADLK);
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable = obj->tiling_mode != I915_TILING_NONE;
+	struct drm_i915_fence_reg *reg;
+	int ret;
+
+	/* Have we updated the tiling parameters upon the object and so
+	 * will need to serialise the write to the associated fence register?
+	 */
+	if (obj->fence_dirty) {
+		ret = i915_gem_object_wait_fence(obj);
+		if (ret)
+			return ret;
+	}
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		reg = &dev_priv->fence_regs[obj->fence_reg];
+		if (!obj->fence_dirty) {
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+			return 0;
+		}
+	} else if (enable) {
+		if (WARN_ON(!obj->map_and_fenceable))
+			return -EINVAL;
+
+		reg = i915_find_fence_reg(dev);
+		if (IS_ERR(reg))
+			return PTR_ERR(reg);
+
+		if (reg->obj) {
+			struct drm_i915_gem_object *old = reg->obj;
+
+			ret = i915_gem_object_wait_fence(old);
+			if (ret)
+				return ret;
+
+			i915_gem_object_fence_lost(old);
+		}
+	} else
+		return 0;
+
+	i915_gem_object_update_fence(obj, reg, enable);
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_pin_fence - pin fencing state
+ * @obj: object to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * object is ready to be used as a scanout target. Fencing status must be
+ * synchronize first by calling i915_gem_object_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_gem_object_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the object has a fence, false otherwise.
+ */
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+		WARN_ON(!ggtt_vma ||
+			dev_priv->fence_regs[obj->fence_reg].pin_count >
+			ggtt_vma->pin_count);
+		dev_priv->fence_regs[obj->fence_reg].pin_count++;
+		return true;
+	} else
+		return false;
+}
+
+/**
+ * i915_gem_object_unpin_fence - unpin fencing state
+ * @obj: object to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_gem_object_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+		dev_priv->fence_regs[obj->fence_reg].pin_count--;
+	}
+}
+
+/**
+ * i915_gem_restore_fences - restore fence state
+ * @dev: DRM device
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume.
+ */
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+		/*
+		 * Commit delayed tiling changes if we have an object still
+		 * attached to the fence, otherwise just clear the fence.
+		 */
+		if (reg->obj) {
+			i915_gem_object_update_fence(reg->obj, reg,
+						     reg->obj->tiling_mode);
+		} else {
+			i915_gem_write_fence(dev, i, NULL);
+		}
+	}
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @dev: DRM device
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+		/*
+		 * On BDW+, swizzling is not used. We leave the CPU memory
+		 * controller in charge of optimizing memory accesses without
+		 * the extra address manipulation GPU side.
+		 *
+		 * VLV and CHV don't have GPU swizzling.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		if (dev_priv->preserve_bios_swizzle) {
+			if (I915_READ(DISP_ARB_CTL) &
+			    DISP_TILE_SURFACE_SWIZZLING) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		} else {
+			uint32_t dimm_c0, dimm_c1;
+			dimm_c0 = I915_READ(MAD_DIMM_C0);
+			dimm_c1 = I915_READ(MAD_DIMM_C1);
+			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			/* Enable swizzling when the channels are populated
+			 * with identically sized dimms. We don't need to check
+			 * the 3rd channel because no cpu with gpu attached
+			 * ships in that configuration. Also, swizzling only
+			 * makes sense for 2 channels anyway. */
+			if (dimm_c0 == dimm_c1) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		}
+	} else if (IS_GEN5(dev)) {
+		/* On Ironlake whatever DRAM config, GPU always do
+		 * same swizzling setup.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+		swizzle_y = I915_BIT_6_SWIZZLE_9;
+	} else if (IS_GEN2(dev)) {
+		/* As far as we know, the 865 doesn't have these bit 6
+		 * swizzling issues.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+		uint32_t dcc;
+
+		/* On 9xx chipsets, channel interleave by the CPU is
+		 * determined by DCC.  For single-channel, neither the CPU
+		 * nor the GPU do swizzling.  For dual channel interleaved,
+		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+		 * 9 for Y tiled.  The CPU's interleave is independent, and
+		 * can be based on either bit 11 (haven't seen this yet) or
+		 * bit 17 (common).
+		 */
+		dcc = I915_READ(DCC);
+		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			break;
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+				/* This is the base swizzling by the GPU for
+				 * tiled buffers.
+				 */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+				/* Bit 11 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+			} else {
+				/* Bit 17 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+			}
+			break;
+		}
+
+		/* check for L-shaped memory aka modified enhanced addressing */
+		if (IS_GEN4(dev)) {
+			uint32_t ddc2 = I915_READ(DCC2);
+
+			if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+				dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+		}
+
+		if (dcc == 0xffffffff) {
+			DRM_ERROR("Couldn't read from MCHBAR.  "
+				  "Disabling tiling.\n");
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+	} else {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 */
+		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+		} else {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	}
+
+	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+	char temp[64];
+	char *vaddr;
+	int i;
+
+	vaddr = kmap(page);
+
+	for (i = 0; i < PAGE_SIZE; i += 128) {
+		memcpy(temp, &vaddr[i], 64);
+		memcpy(&vaddr[i], &vaddr[i + 64], 64);
+		memcpy(&vaddr[i + 64], temp, 64);
+	}
+
+	kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int i;
+
+	if (obj->bit_17 == NULL)
+		return;
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		char new_bit_17 = page_to_phys(page) >> 17;
+		if ((new_bit_17 & 0x1) !=
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(page);
+			set_page_dirty(page);
+		}
+		i++;
+	}
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int page_count = obj->base.size >> PAGE_SHIFT;
+	int i;
+
+	if (obj->bit_17 == NULL) {
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+				      sizeof(long), GFP_KERNEL);
+		if (obj->bit_17 == NULL) {
+			DRM_ERROR("Failed to allocate memory for bit 17 "
+				  "record\n");
+			return;
+		}
+	}
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+			__set_bit(i, obj->bit_17);
+		else
+			__clear_bit(i, obj->bit_17);
+		i++;
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 31e8269..96054a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -192,9 +192,8 @@
 	return pte;
 }
 
-static gen8_pde_t gen8_pde_encode(struct drm_device *dev,
-				  dma_addr_t addr,
-				  enum i915_cache_level level)
+static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
+				  const enum i915_cache_level level)
 {
 	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
 	pde |= addr;
@@ -301,75 +300,120 @@
 	return pte;
 }
 
-#define i915_dma_unmap_single(px, dev) \
-	__i915_dma_unmap_single((px)->daddr, dev)
-
-static void __i915_dma_unmap_single(dma_addr_t daddr,
-				    struct drm_device *dev)
+static int __setup_page_dma(struct drm_device *dev,
+			    struct i915_page_dma *p, gfp_t flags)
 {
 	struct device *device = &dev->pdev->dev;
 
-	dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
-}
-
-/**
- * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
- * @px:	Page table/dir/etc to get a DMA map for
- * @dev:	drm device
- *
- * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping. If we keep the structs
- * symmetric here, the simple macro covers us for every page table type.
- *
- * Return: 0 if success.
- */
-#define i915_dma_map_single(px, dev) \
-	i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
-
-static int i915_dma_map_page_single(struct page *page,
-				    struct drm_device *dev,
-				    dma_addr_t *daddr)
-{
-	struct device *device = &dev->pdev->dev;
-
-	*daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(device, *daddr))
+	p->page = alloc_page(flags);
+	if (!p->page)
 		return -ENOMEM;
 
+	p->daddr = dma_map_page(device,
+				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(device, p->daddr)) {
+		__free_page(p->page);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
-static void unmap_and_free_pt(struct i915_page_table *pt,
-			       struct drm_device *dev)
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-	if (WARN_ON(!pt->page))
+	return __setup_page_dma(dev, p, GFP_KERNEL);
+}
+
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+	if (WARN_ON(!p->page))
 		return;
 
-	i915_dma_unmap_single(pt, dev);
-	__free_page(pt->page);
-	kfree(pt->used_ptes);
-	kfree(pt);
+	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	__free_page(p->page);
+	memset(p, 0, sizeof(*p));
 }
 
-static void gen8_initialize_pt(struct i915_address_space *vm,
-			       struct i915_page_table *pt)
+static void *kmap_page_dma(struct i915_page_dma *p)
 {
-	gen8_pte_t *pt_vaddr, scratch_pte;
-	int i;
-
-	pt_vaddr = kmap_atomic(pt->page);
-	scratch_pte = gen8_pte_encode(vm->scratch.addr,
-				      I915_CACHE_LLC, true);
-
-	for (i = 0; i < GEN8_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
-
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-	kunmap_atomic(pt_vaddr);
+	return kmap_atomic(p->page);
 }
 
-static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
+/* We use the flushing unmap only with ppgtt structures:
+ * page directories, page tables and scratch pages.
+ */
+static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+{
+	/* There are only few exceptions for gen >=6. chv and bxt.
+	 * And we are not sure about the latter so play safe for now.
+	 */
+	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+		drm_clflush_virt_range(vaddr, PAGE_SIZE);
+
+	kunmap_atomic(vaddr);
+}
+
+#define kmap_px(px) kmap_page_dma(px_base(px))
+#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+
+#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
+#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
+#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+			  const uint64_t val)
+{
+	int i;
+	uint64_t * const vaddr = kmap_page_dma(p);
+
+	for (i = 0; i < 512; i++)
+		vaddr[i] = val;
+
+	kunmap_page_dma(dev, vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+			     const uint32_t val32)
+{
+	uint64_t v = val32;
+
+	v = v << 32 | val32;
+
+	fill_page_dma(dev, p, v);
+}
+
+static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+{
+	struct i915_page_scratch *sp;
+	int ret;
+
+	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+	if (sp == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+	if (ret) {
+		kfree(sp);
+		return ERR_PTR(ret);
+	}
+
+	set_pages_uc(px_page(sp), 1);
+
+	return sp;
+}
+
+static void free_scratch_page(struct drm_device *dev,
+			      struct i915_page_scratch *sp)
+{
+	set_pages_wb(px_page(sp), 1);
+
+	cleanup_px(dev, sp);
+	kfree(sp);
+}
+
+static struct i915_page_table *alloc_pt(struct drm_device *dev)
 {
 	struct i915_page_table *pt;
 	const size_t count = INTEL_INFO(dev)->gen >= 8 ?
@@ -386,19 +430,13 @@
 	if (!pt->used_ptes)
 		goto fail_bitmap;
 
-	pt->page = alloc_page(GFP_KERNEL);
-	if (!pt->page)
-		goto fail_page;
-
-	ret = i915_dma_map_single(pt, dev);
+	ret = setup_px(dev, pt);
 	if (ret)
-		goto fail_dma;
+		goto fail_page_m;
 
 	return pt;
 
-fail_dma:
-	__free_page(pt->page);
-fail_page:
+fail_page_m:
 	kfree(pt->used_ptes);
 fail_bitmap:
 	kfree(pt);
@@ -406,18 +444,38 @@
 	return ERR_PTR(ret);
 }
 
-static void unmap_and_free_pd(struct i915_page_directory *pd,
-			      struct drm_device *dev)
+static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
 {
-	if (pd->page) {
-		i915_dma_unmap_single(pd, dev);
-		__free_page(pd->page);
-		kfree(pd->used_pdes);
-		kfree(pd);
-	}
+	cleanup_px(dev, pt);
+	kfree(pt->used_ptes);
+	kfree(pt);
 }
 
-static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
+static void gen8_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen8_pte_t scratch_pte;
+
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+				      I915_CACHE_LLC, true);
+
+	fill_px(vm->dev, pt, scratch_pte);
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen6_pte_t scratch_pte;
+
+	WARN_ON(px_dma(vm->scratch_page) == 0);
+
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
+
+	fill32_px(vm->dev, pt, scratch_pte);
+}
+
+static struct i915_page_directory *alloc_pd(struct drm_device *dev)
 {
 	struct i915_page_directory *pd;
 	int ret = -ENOMEM;
@@ -429,38 +487,52 @@
 	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
 				sizeof(*pd->used_pdes), GFP_KERNEL);
 	if (!pd->used_pdes)
-		goto free_pd;
+		goto fail_bitmap;
 
-	pd->page = alloc_page(GFP_KERNEL);
-	if (!pd->page)
-		goto free_bitmap;
-
-	ret = i915_dma_map_single(pd, dev);
+	ret = setup_px(dev, pd);
 	if (ret)
-		goto free_page;
+		goto fail_page_m;
 
 	return pd;
 
-free_page:
-	__free_page(pd->page);
-free_bitmap:
+fail_page_m:
 	kfree(pd->used_pdes);
-free_pd:
+fail_bitmap:
 	kfree(pd);
 
 	return ERR_PTR(ret);
 }
 
+static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+{
+	if (px_page(pd)) {
+		cleanup_px(dev, pd);
+		kfree(pd->used_pdes);
+		kfree(pd);
+	}
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+			       struct i915_page_directory *pd)
+{
+	gen8_pde_t scratch_pde;
+
+	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+	fill_px(vm->dev, pd, scratch_pde);
+}
+
 /* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring,
+static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -476,16 +548,14 @@
 }
 
 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
 	int i, ret;
 
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
-		struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
-		dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
-		/* The page directory might be NULL, but we need to clear out
-		 * whatever the previous context might have used. */
-		ret = gen8_write_pdp(ring, i, pd_daddr);
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		ret = gen8_write_pdp(req, i, pd_daddr);
 		if (ret)
 			return ret;
 	}
@@ -507,13 +577,12 @@
 	unsigned num_entries = length >> PAGE_SHIFT;
 	unsigned last_pte, i;
 
-	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+	scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
 				      I915_CACHE_LLC, use_scratch);
 
 	while (num_entries) {
 		struct i915_page_directory *pd;
 		struct i915_page_table *pt;
-		struct page *page_table;
 
 		if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
 			break;
@@ -525,25 +594,21 @@
 
 		pt = pd->page_table[pde];
 
-		if (WARN_ON(!pt->page))
+		if (WARN_ON(!px_page(pt)))
 			break;
 
-		page_table = pt->page;
-
 		last_pte = pte + num_entries;
 		if (last_pte > GEN8_PTES)
 			last_pte = GEN8_PTES;
 
-		pt_vaddr = kmap_atomic(page_table);
+		pt_vaddr = kmap_px(pt);
 
 		for (i = pte; i < last_pte; i++) {
 			pt_vaddr[i] = scratch_pte;
 			num_entries--;
 		}
 
-		if (!HAS_LLC(ppgtt->base.dev))
-			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt);
 
 		pte = 0;
 		if (++pde == I915_PDES) {
@@ -575,18 +640,14 @@
 		if (pt_vaddr == NULL) {
 			struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
 			struct i915_page_table *pt = pd->page_table[pde];
-			struct page *page_table = pt->page;
-
-			pt_vaddr = kmap_atomic(page_table);
+			pt_vaddr = kmap_px(pt);
 		}
 
 		pt_vaddr[pte] =
 			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
 					cache_level, true);
 		if (++pte == GEN8_PTES) {
-			if (!HAS_LLC(ppgtt->base.dev))
-				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-			kunmap_atomic(pt_vaddr);
+			kunmap_px(ppgtt, pt_vaddr);
 			pt_vaddr = NULL;
 			if (++pde == I915_PDES) {
 				pdpe++;
@@ -595,58 +656,64 @@
 			pte = 0;
 		}
 	}
-	if (pt_vaddr) {
-		if (!HAS_LLC(ppgtt->base.dev))
-			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-		kunmap_atomic(pt_vaddr);
-	}
+
+	if (pt_vaddr)
+		kunmap_px(ppgtt, pt_vaddr);
 }
 
-static void __gen8_do_map_pt(gen8_pde_t * const pde,
-			     struct i915_page_table *pt,
-			     struct drm_device *dev)
-{
-	gen8_pde_t entry =
-		gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
-	*pde = entry;
-}
-
-static void gen8_initialize_pd(struct i915_address_space *vm,
-			       struct i915_page_directory *pd)
-{
-	struct i915_hw_ppgtt *ppgtt =
-			container_of(vm, struct i915_hw_ppgtt, base);
-	gen8_pde_t *page_directory;
-	struct i915_page_table *pt;
-	int i;
-
-	page_directory = kmap_atomic(pd->page);
-	pt = ppgtt->scratch_pt;
-	for (i = 0; i < I915_PDES; i++)
-		/* Map the PDE to the page table */
-		__gen8_do_map_pt(page_directory + i, pt, vm->dev);
-
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(page_directory, PAGE_SIZE);
-	kunmap_atomic(page_directory);
-}
-
-static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct drm_device *dev,
+				  struct i915_page_directory *pd)
 {
 	int i;
 
-	if (!pd->page)
+	if (!px_page(pd))
 		return;
 
 	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
 		if (WARN_ON(!pd->page_table[i]))
 			continue;
 
-		unmap_and_free_pt(pd->page_table[i], dev);
+		free_pt(dev, pd->page_table[i]);
 		pd->page_table[i] = NULL;
 	}
 }
 
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	vm->scratch_pd = alloc_pd(dev);
+	if (IS_ERR(vm->scratch_pd)) {
+		free_pt(dev, vm->scratch_pt);
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pd);
+	}
+
+	gen8_initialize_pt(vm, vm->scratch_pt);
+	gen8_initialize_pd(vm, vm->scratch_pd);
+
+	return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pd(dev, vm->scratch_pd);
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -657,12 +724,12 @@
 		if (WARN_ON(!ppgtt->pdp.page_directory[i]))
 			continue;
 
-		gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
-		unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+		gen8_free_page_tables(ppgtt->base.dev,
+				      ppgtt->pdp.page_directory[i]);
+		free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
 	}
 
-	unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev);
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+	gen8_free_scratch(vm);
 }
 
 /**
@@ -698,24 +765,24 @@
 		/* Don't reallocate page tables */
 		if (pt) {
 			/* Scratch is never allocated this way */
-			WARN_ON(pt == ppgtt->scratch_pt);
+			WARN_ON(pt == ppgtt->base.scratch_pt);
 			continue;
 		}
 
-		pt = alloc_pt_single(dev);
+		pt = alloc_pt(dev);
 		if (IS_ERR(pt))
 			goto unwind_out;
 
 		gen8_initialize_pt(&ppgtt->base, pt);
 		pd->page_table[pde] = pt;
-		set_bit(pde, new_pts);
+		__set_bit(pde, new_pts);
 	}
 
 	return 0;
 
 unwind_out:
 	for_each_set_bit(pde, new_pts, I915_PDES)
-		unmap_and_free_pt(pd->page_table[pde], dev);
+		free_pt(dev, pd->page_table[pde]);
 
 	return -ENOMEM;
 }
@@ -756,27 +823,24 @@
 
 	WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
 
-	/* FIXME: upper bound must not overflow 32 bits  */
-	WARN_ON((start + length) > (1ULL << 32));
-
 	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		if (pd)
 			continue;
 
-		pd = alloc_pd_single(dev);
+		pd = alloc_pd(dev);
 		if (IS_ERR(pd))
 			goto unwind_out;
 
 		gen8_initialize_pd(&ppgtt->base, pd);
 		pdp->page_directory[pdpe] = pd;
-		set_bit(pdpe, new_pds);
+		__set_bit(pdpe, new_pds);
 	}
 
 	return 0;
 
 unwind_out:
 	for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
-		unmap_and_free_pd(pdp->page_directory[pdpe], dev);
+		free_pd(dev, pdp->page_directory[pdpe]);
 
 	return -ENOMEM;
 }
@@ -830,6 +894,16 @@
 	return -ENOMEM;
 }
 
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
 static int gen8_alloc_va_range(struct i915_address_space *vm,
 			       uint64_t start,
 			       uint64_t length)
@@ -848,7 +922,10 @@
 	 * actually use the other side of the canonical address space.
 	 */
 	if (WARN_ON(start + length < start))
-		return -ERANGE;
+		return -ENODEV;
+
+	if (WARN_ON(start + length > ppgtt->base.total))
+		return -ENODEV;
 
 	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
 	if (ret)
@@ -876,7 +953,7 @@
 	/* Allocations have completed successfully, so set the bitmaps, and do
 	 * the mappings. */
 	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-		gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+		gen8_pde_t *const page_directory = kmap_px(pd);
 		struct i915_page_table *pt;
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
@@ -897,36 +974,36 @@
 				   gen8_pte_count(pd_start, pd_len));
 
 			/* Our pde is now pointing to the pagetable, pt */
-			set_bit(pde, pd->used_pdes);
+			__set_bit(pde, pd->used_pdes);
 
 			/* Map the PDE to the page table */
-			__gen8_do_map_pt(page_directory + pde, pt, vm->dev);
+			page_directory[pde] = gen8_pde_encode(px_dma(pt),
+							      I915_CACHE_LLC);
 
 			/* NB: We haven't yet mapped ptes to pages. At this
 			 * point we're still relying on insert_entries() */
 		}
 
-		if (!HAS_LLC(vm->dev))
-			drm_clflush_virt_range(page_directory, PAGE_SIZE);
+		kunmap_px(ppgtt, page_directory);
 
-		kunmap_atomic(page_directory);
-
-		set_bit(pdpe, ppgtt->pdp.used_pdpes);
+		__set_bit(pdpe, ppgtt->pdp.used_pdpes);
 	}
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
 	return 0;
 
 err_out:
 	while (pdpe--) {
 		for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
-			unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev);
+			free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
 	}
 
 	for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
-		unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+		free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
 	return ret;
 }
 
@@ -939,16 +1016,11 @@
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-	ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pt))
-		return PTR_ERR(ppgtt->scratch_pt);
+	int ret;
 
-	ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pd))
-		return PTR_ERR(ppgtt->scratch_pd);
-
-	gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
-	gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
+	ret = gen8_init_scratch(&ppgtt->base);
+	if (ret)
+		return ret;
 
 	ppgtt->base.start = 0;
 	ppgtt->base.total = 1ULL << 32;
@@ -980,12 +1052,13 @@
 	uint32_t  pte, pde, temp;
 	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
 		u32 expected;
 		gen6_pte_t *pt_vaddr;
-		dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
+		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
 		pd_entry = readl(ppgtt->pd_addr + pde);
 		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 
@@ -996,7 +1069,8 @@
 				   expected);
 		seq_printf(m, "\tPDE: %x\n", pd_entry);
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+
 		for (pte = 0; pte < GEN6_PTES; pte+=4) {
 			unsigned long va =
 				(pde * PAGE_SIZE * GEN6_PTES) +
@@ -1018,7 +1092,7 @@
 			}
 			seq_puts(m, "\n");
 		}
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 	}
 }
 
@@ -1031,7 +1105,7 @@
 		container_of(pd, struct i915_hw_ppgtt, pd);
 	u32 pd_entry;
 
-	pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
 	pd_entry |= GEN6_PDE_VALID;
 
 	writel(pd_entry, ppgtt->pd_addr + pde);
@@ -1056,22 +1130,23 @@
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 {
-	BUG_ON(ppgtt->pd.pd_offset & 0x3f);
+	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
 
-	return (ppgtt->pd.pd_offset / 64) << 16;
+	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			 struct intel_engine_cs *ring)
+			 struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1087,8 +1162,9 @@
 }
 
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 
 	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
@@ -1097,16 +1173,17 @@
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1120,7 +1197,7 @@
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (ring->id != RCS) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -1129,8 +1206,9 @@
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -1214,19 +1292,20 @@
 	unsigned first_pte = first_entry % GEN6_PTES;
 	unsigned last_pte, i;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
 
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
 		if (last_pte > GEN6_PTES)
 			last_pte = GEN6_PTES;
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
 		for (i = first_pte; i < last_pte; i++)
 			pt_vaddr[i] = scratch_pte;
 
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 
 		num_entries -= last_pte - first_pte;
 		first_pte = 0;
@@ -1250,54 +1329,25 @@
 	pt_vaddr = NULL;
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 		if (pt_vaddr == NULL)
-			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
 		pt_vaddr[act_pte] =
 			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
 				       cache_level, true, flags);
 
 		if (++act_pte == GEN6_PTES) {
-			kunmap_atomic(pt_vaddr);
+			kunmap_px(ppgtt, pt_vaddr);
 			pt_vaddr = NULL;
 			act_pt++;
 			act_pte = 0;
 		}
 	}
 	if (pt_vaddr)
-		kunmap_atomic(pt_vaddr);
-}
-
-/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
- * are switching between contexts with the same LRCA, we also must do a force
- * restore.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
-	/* If current vm != vm, */
-	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
-		struct i915_page_table *pt)
-{
-	gen6_pte_t *pt_vaddr, scratch_pte;
-	int i;
-
-	WARN_ON(vm->scratch.addr == 0);
-
-	scratch_pte = vm->pte_encode(vm->scratch.addr,
-			I915_CACHE_LLC, true, 0);
-
-	pt_vaddr = kmap_atomic(pt->page);
-
-	for (i = 0; i < GEN6_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
-
-	kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start, uint64_t length)
+			       uint64_t start_in, uint64_t length_in)
 {
 	DECLARE_BITMAP(new_page_tables, I915_PDES);
 	struct drm_device *dev = vm->dev;
@@ -1305,11 +1355,15 @@
 	struct i915_hw_ppgtt *ppgtt =
 				container_of(vm, struct i915_hw_ppgtt, base);
 	struct i915_page_table *pt;
-	const uint32_t start_save = start, length_save = length;
+	uint32_t start, length, start_save, length_save;
 	uint32_t pde, temp;
 	int ret;
 
-	WARN_ON(upper_32_bits(start));
+	if (WARN_ON(start_in + length_in > ppgtt->base.total))
+		return -ENODEV;
+
+	start = start_save = start_in;
+	length = length_save = length_in;
 
 	bitmap_zero(new_page_tables, I915_PDES);
 
@@ -1319,7 +1373,7 @@
 	 * tables.
 	 */
 	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
-		if (pt != ppgtt->scratch_pt) {
+		if (pt != vm->scratch_pt) {
 			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
 			continue;
 		}
@@ -1327,7 +1381,7 @@
 		/* We've already allocated a page table */
 		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
 
-		pt = alloc_pt_single(dev);
+		pt = alloc_pt(dev);
 		if (IS_ERR(pt)) {
 			ret = PTR_ERR(pt);
 			goto unwind_out;
@@ -1336,7 +1390,7 @@
 		gen6_initialize_pt(vm, pt);
 
 		ppgtt->pd.page_table[pde] = pt;
-		set_bit(pde, new_page_tables);
+		__set_bit(pde, new_page_tables);
 		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
 	}
 
@@ -1350,7 +1404,7 @@
 		bitmap_set(tmp_bitmap, gen6_pte_index(start),
 			   gen6_pte_count(start, length));
 
-		if (test_and_clear_bit(pde, new_page_tables))
+		if (__test_and_clear_bit(pde, new_page_tables))
 			gen6_write_pde(&ppgtt->pd, pde, pt);
 
 		trace_i915_page_table_entry_map(vm, pde, pt,
@@ -1374,14 +1428,41 @@
 	for_each_set_bit(pde, new_page_tables, I915_PDES) {
 		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
 
-		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
-		unmap_and_free_pt(pt, vm->dev);
+		ppgtt->pd.page_table[pde] = vm->scratch_pt;
+		free_pt(vm->dev, pt);
 	}
 
 	mark_tlbs_dirty(ppgtt);
 	return ret;
 }
 
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	gen6_initialize_pt(vm, vm->scratch_pt);
+
+	return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -1389,20 +1470,19 @@
 	struct i915_page_table *pt;
 	uint32_t pde;
 
-
 	drm_mm_remove_node(&ppgtt->node);
 
 	gen6_for_all_pdes(pt, ppgtt, pde) {
-		if (pt != ppgtt->scratch_pt)
-			unmap_and_free_pt(pt, ppgtt->base.dev);
+		if (pt != vm->scratch_pt)
+			free_pt(ppgtt->base.dev, pt);
 	}
 
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
-	unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev);
+	gen6_free_scratch(vm);
 }
 
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
+	struct i915_address_space *vm = &ppgtt->base;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool retried = false;
@@ -1413,11 +1493,10 @@
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
 	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
-	ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pt))
-		return PTR_ERR(ppgtt->scratch_pt);
 
-	gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+	ret = gen6_init_scratch(vm);
+	if (ret)
+		return ret;
 
 alloc:
 	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1448,7 +1527,7 @@
 	return 0;
 
 err_out:
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+	gen6_free_scratch(vm);
 	return ret;
 }
 
@@ -1464,7 +1543,7 @@
 	uint32_t pde, temp;
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
-		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
 }
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
@@ -1500,11 +1579,11 @@
 	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
 	ppgtt->debug_dump = gen6_dump_ppgtt;
 
-	ppgtt->pd.pd_offset =
+	ppgtt->pd.base.ggtt_offset =
 		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
 	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
-		ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
 	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
 
@@ -1515,23 +1594,21 @@
 			 ppgtt->node.start / PAGE_SIZE);
 
 	DRM_DEBUG("Adding PPGTT at offset %x\n",
-		  ppgtt->pd.pd_offset << 10);
+		  ppgtt->pd.base.ggtt_offset << 10);
 
 	return 0;
 }
 
 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	ppgtt->base.dev = dev;
-	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 
 	if (INTEL_INFO(dev)->gen < 8)
 		return gen6_ppgtt_init(ppgtt);
 	else
 		return gen8_ppgtt_init(ppgtt);
 }
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1550,11 +1627,6 @@
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-	int i, ret = 0;
-
 	/* In the case of execlists, PPGTT is enabled by the context descriptor
 	 * and the PDPs are contained within the context itself.  We don't
 	 * need to do anything here. */
@@ -1573,16 +1645,23 @@
 	else
 		MISSING_CASE(INTEL_INFO(dev)->gen);
 
-	if (ppgtt) {
-		for_each_ring(ring, dev_priv, i) {
-			ret = ppgtt->switch_mm(ppgtt, ring);
-			if (ret != 0)
-				return ret;
-		}
-	}
-
-	return ret;
+	return 0;
 }
+
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+	if (i915.enable_execlists)
+		return 0;
+
+	if (!ppgtt)
+		return 0;
+
+	return ppgtt->switch_mm(ppgtt, req);
+}
+
 struct i915_hw_ppgtt *
 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 {
@@ -1843,7 +1922,7 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = gen8_pte_encode(vm->scratch.addr,
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
 				      I915_CACHE_LLC,
 				      use_scratch);
 	for (i = 0; i < num_entries; i++)
@@ -1869,7 +1948,8 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, use_scratch, 0);
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -2105,7 +2185,7 @@
 void i915_gem_init_global_gtt(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long gtt_size, mappable_size;
+	u64 gtt_size, mappable_size;
 
 	gtt_size = dev_priv->gtt.base.total;
 	mappable_size = dev_priv->gtt.mappable_end;
@@ -2135,42 +2215,6 @@
 	vm->cleanup(vm);
 }
 
-static int setup_scratch_page(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct page *page;
-	dma_addr_t dma_addr;
-
-	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-	if (page == NULL)
-		return -ENOMEM;
-	set_pages_uc(page, 1);
-
-#ifdef CONFIG_INTEL_IOMMU
-	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
-				PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(dev->pdev, dma_addr))
-		return -EINVAL;
-#else
-	dma_addr = page_to_phys(page);
-#endif
-	dev_priv->gtt.base.scratch.page = page;
-	dev_priv->gtt.base.scratch.addr = dma_addr;
-
-	return 0;
-}
-
-static void teardown_scratch_page(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct page *page = dev_priv->gtt.base.scratch.page;
-
-	set_pages_wb(page, 1);
-	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
-		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	__free_page(page);
-}
-
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
 {
 	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2253,8 +2297,8 @@
 			     size_t gtt_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_page_scratch *scratch_page;
 	phys_addr_t gtt_phys_addr;
-	int ret;
 
 	/* For Modern GENs the PTEs and register space are split in the BAR */
 	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2276,14 +2320,17 @@
 		return -ENOMEM;
 	}
 
-	ret = setup_scratch_page(dev);
-	if (ret) {
+	scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(scratch_page)) {
 		DRM_ERROR("Scratch setup failed\n");
 		/* iounmap will also get called at remove, but meh */
 		iounmap(dev_priv->gtt.gsm);
+		return PTR_ERR(scratch_page);
 	}
 
-	return ret;
+	dev_priv->gtt.base.scratch_page = scratch_page;
+
+	return 0;
 }
 
 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2360,13 +2407,13 @@
 }
 
 static int gen8_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned int gtt_size;
+	u64 gtt_size;
 	u16 snb_gmch_ctl;
 	int ret;
 
@@ -2408,10 +2455,10 @@
 }
 
 static int gen6_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned int gtt_size;
@@ -2425,7 +2472,7 @@
 	 * a coarse sanity check.
 	 */
 	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
-		DRM_ERROR("Unknown GMADR size (%lx)\n",
+		DRM_ERROR("Unknown GMADR size (%llx)\n",
 			  dev_priv->gtt.mappable_end);
 		return -ENXIO;
 	}
@@ -2455,14 +2502,14 @@
 	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
 	iounmap(gtt->gsm);
-	teardown_scratch_page(vm->dev);
+	free_scratch_page(vm->dev, vm->scratch_page);
 }
 
 static int i915_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -2519,17 +2566,17 @@
 		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
 	}
 
+	gtt->base.dev = dev;
+
 	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
 			     &gtt->mappable_base, &gtt->mappable_end);
 	if (ret)
 		return ret;
 
-	gtt->base.dev = dev;
-
 	/* GMADR is the PCI mmio aperture into the global GTT. */
-	DRM_INFO("Memory usable by graphics device = %zdM\n",
+	DRM_INFO("Memory usable by graphics device = %lluM\n",
 		 gtt->base.total >> 20);
-	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
 	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped)
@@ -2706,30 +2753,17 @@
 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
 			  struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
-	unsigned long size, pages, rot_pages;
+	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
 	struct sg_page_iter sg_iter;
 	unsigned long i;
 	dma_addr_t *page_addr_list;
 	struct sg_table *st;
-	unsigned int tile_pitch, tile_height;
-	unsigned int width_pages, height_pages;
 	int ret = -ENOMEM;
 
-	pages = obj->base.size / PAGE_SIZE;
-
-	/* Calculate tiling geometry. */
-	tile_height = intel_tile_height(dev, rot_info->pixel_format,
-					rot_info->fb_modifier);
-	tile_pitch = PAGE_SIZE / tile_height;
-	width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
-	height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
-	rot_pages = width_pages * height_pages;
-	size = rot_pages * PAGE_SIZE;
-
 	/* Allocate a temporary list of source pages for random access. */
-	page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+	page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
+				       sizeof(dma_addr_t));
 	if (!page_addr_list)
 		return ERR_PTR(ret);
 
@@ -2738,7 +2772,7 @@
 	if (!st)
 		goto err_st_alloc;
 
-	ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+	ret = sg_alloc_table(st, size_pages, GFP_KERNEL);
 	if (ret)
 		goto err_sg_alloc;
 
@@ -2750,13 +2784,15 @@
 	}
 
 	/* Rotate the pages. */
-	rotate_pages(page_addr_list, width_pages, height_pages, st);
+	rotate_pages(page_addr_list,
+		     rot_info->width_pages, rot_info->height_pages,
+		     st);
 
 	DRM_DEBUG_KMS(
-		      "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
-		      size, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, width_pages, height_pages,
-		      rot_pages);
+		      "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n",
+		      obj->base.size, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages);
 
 	drm_free_large(page_addr_list);
 
@@ -2768,10 +2804,10 @@
 	drm_free_large(page_addr_list);
 
 	DRM_DEBUG_KMS(
-		      "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
-		      size, ret, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, width_pages, height_pages,
-		      rot_pages);
+		      "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n",
+		      obj->base.size, ret, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages);
 	return ERR_PTR(ret);
 }
 
@@ -2889,9 +2925,12 @@
 				    vma->node.size,
 				    VM_TO_TRACE_NAME(vma->vm));
 
+		/* XXX: i915_vma_pin() will fix this +- hack */
+		vma->pin_count++;
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
+		vma->pin_count--;
 		if (ret)
 			return ret;
 	}
@@ -2916,9 +2955,10 @@
 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
 		    const struct i915_ggtt_view *view)
 {
-	if (view->type == I915_GGTT_VIEW_NORMAL ||
-	    view->type == I915_GGTT_VIEW_ROTATED) {
+	if (view->type == I915_GGTT_VIEW_NORMAL) {
 		return obj->base.size;
+	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
+		return view->rotation_info.size;
 	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
 		return view->params.partial.size << PAGE_SHIFT;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0d46dd2..e1cfa29 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -126,6 +126,8 @@
 	unsigned int pitch;
 	uint32_t pixel_format;
 	uint64_t fb_modifier;
+	unsigned int width_pages, height_pages;
+	uint64_t size;
 };
 
 struct i915_ggtt_view {
@@ -205,19 +207,34 @@
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
-struct i915_page_table {
+struct i915_page_dma {
 	struct page *page;
-	dma_addr_t daddr;
+	union {
+		dma_addr_t daddr;
+
+		/* For gen6/gen7 only. This is the offset in the GGTT
+		 * where the page directory entries for PPGTT begin
+		 */
+		uint32_t ggtt_offset;
+	};
+};
+
+#define px_base(px) (&(px)->base)
+#define px_page(px) (px_base(px)->page)
+#define px_dma(px) (px_base(px)->daddr)
+
+struct i915_page_scratch {
+	struct i915_page_dma base;
+};
+
+struct i915_page_table {
+	struct i915_page_dma base;
 
 	unsigned long *used_ptes;
 };
 
 struct i915_page_directory {
-	struct page *page; /* NULL for GEN6-GEN7 */
-	union {
-		uint32_t pd_offset;
-		dma_addr_t daddr;
-	};
+	struct i915_page_dma base;
 
 	unsigned long *used_pdes;
 	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
@@ -233,13 +250,12 @@
 	struct drm_mm mm;
 	struct drm_device *dev;
 	struct list_head global_link;
-	unsigned long start;		/* Start offset always 0 for dri2 */
-	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
+	u64 start;		/* Start offset always 0 for dri2 */
+	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
-	struct {
-		dma_addr_t addr;
-		struct page *page;
-	} scratch;
+	struct i915_page_scratch *scratch_page;
+	struct i915_page_table *scratch_pt;
+	struct i915_page_directory *scratch_pd;
 
 	/**
 	 * List of objects currently involved in rendering.
@@ -300,9 +316,9 @@
  */
 struct i915_gtt {
 	struct i915_address_space base;
-	size_t stolen_size;		/* Total size of stolen memory */
 
-	unsigned long mappable_end;	/* End offset that we can CPU map */
+	size_t stolen_size;		/* Total size of stolen memory */
+	u64 mappable_end;		/* End offset that we can CPU map */
 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
 	phys_addr_t mappable_base;	/* PA of our GMADR */
 
@@ -314,9 +330,9 @@
 	int mtrr;
 
 	/* global gtt ops */
-	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+	int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
 			  size_t *stolen, phys_addr_t *mappable_base,
-			  unsigned long *mappable_end);
+			  u64 *mappable_end);
 };
 
 struct i915_hw_ppgtt {
@@ -329,16 +345,13 @@
 		struct i915_page_directory pd;
 	};
 
-	struct i915_page_table *scratch_pt;
-	struct i915_page_directory *scratch_pd;
-
 	struct drm_i915_file_private *file_priv;
 
 	gen6_pte_t __iomem *pd_addr;
 
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-			 struct intel_engine_cs *ring);
+			 struct drm_i915_gem_request *req);
 	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
@@ -468,6 +481,14 @@
 	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
 }
 
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+{
+	return test_bit(n, ppgtt->pdp.used_pdpes) ?
+		px_dma(ppgtt->pdp.page_directory[n]) :
+		px_dma(ppgtt->base.scratch_pd);
+}
+
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -475,6 +496,7 @@
 
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
 					struct drm_i915_file_private *fpriv);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 521548a..5026a62 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -73,6 +73,24 @@
 	return ret;
 }
 
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of aligment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val)				\
+	do {							\
+		if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {	\
+			ret = -ENOSPC;				\
+			goto err_out;				\
+		}						\
+		(batch)[(i)++] = (val);				\
+	} while(0)
+
 static int render_state_setup(struct render_state *so)
 {
 	const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -96,8 +114,10 @@
 			s = lower_32_bits(r);
 			if (so->gen >= 8) {
 				if (i + 1 >= rodata->batch_items ||
-				    rodata->batch[i + 1] != 0)
-					return -EINVAL;
+				    rodata->batch[i + 1] != 0) {
+					ret = -EINVAL;
+					goto err_out;
+				}
 
 				d[i++] = s;
 				s = upper_32_bits(r);
@@ -108,6 +128,21 @@
 
 		d[i++] = s;
 	}
+
+	while (i % CACHELINE_DWORDS)
+		OUT_BATCH(d, i, MI_NOOP);
+
+	so->aux_batch_offset = i * sizeof(u32);
+
+	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
+
+	/*
+	 * Since we are sending length, we need to strictly conform to
+	 * all requirements. For Gen2 this must be a multiple of 8.
+	 */
+	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+
 	kunmap(page);
 
 	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
@@ -120,8 +155,14 @@
 	}
 
 	return 0;
+
+err_out:
+	kunmap(page);
+	return ret;
 }
 
+#undef OUT_BATCH
+
 void i915_gem_render_state_fini(struct render_state *so)
 {
 	i915_gem_object_ggtt_unpin(so->obj);
@@ -152,29 +193,36 @@
 	return 0;
 }
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(ring, &so);
+	ret = i915_gem_render_state_prepare(req->ring, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = ring->dispatch_execbuffer(ring,
-					so.ggtt_offset,
-					so.rodata->batch_items * 4,
-					I915_DISPATCH_SECURE);
+	ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+					     so.rodata->batch_items * 4,
+					     I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	if (so.aux_batch_size > 8) {
+		ret = req->ring->dispatch_execbuffer(req,
+						     (so.ggtt_offset +
+						      so.aux_batch_offset),
+						     so.aux_batch_size,
+						     I915_DISPATCH_SECURE);
+		if (ret)
+			goto out;
+	}
 
-	ret = __i915_add_request(ring, NULL, so.obj);
-	/* __i915_add_request moves object to inactive if it fails */
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+
 out:
 	i915_gem_render_state_fini(&so);
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index c44961e..e641bb0 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -37,9 +37,11 @@
 	struct drm_i915_gem_object *obj;
 	u64 ggtt_offset;
 	int gen;
+	u32 aux_batch_size;
+	u32 aux_batch_offset;
 };
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
+int i915_gem_render_state_init(struct drm_i915_gem_request *req);
 void i915_gem_render_state_fini(struct render_state *so);
 int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
 				  struct render_state *so);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8b5b784..f361c4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,6 +42,31 @@
  * for is a boon.
  */
 
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment)
+{
+	int ret;
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return -ENODEV;
+
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
+				 DRM_MM_SEARCH_DEFAULT);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+
+	return ret;
+}
+
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node)
+{
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	drm_mm_remove_node(node);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+}
+
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -151,134 +176,6 @@
 	return base;
 }
 
-static int find_compression_threshold(struct drm_device *dev,
-				      struct drm_mm_node *node,
-				      int size,
-				      int fb_cpp)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int compression_threshold = 1;
-	int ret;
-
-	/* HACK: This code depends on what we will do in *_enable_fbc. If that
-	 * code changes, this code needs to change as well.
-	 *
-	 * The enable_fbc code will attempt to use one of our 2 compression
-	 * thresholds, therefore, in that case, we only have 1 resort.
-	 */
-
-	/* Try to over-allocate to reduce reallocations and fragmentation. */
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-				 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
-	if (ret == 0)
-		return compression_threshold;
-
-again:
-	/* HW's ability to limit the CFB is 1:4 */
-	if (compression_threshold > 4 ||
-	    (fb_cpp == 2 && compression_threshold == 2))
-		return 0;
-
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-				 size >>= 1, 4096,
-				 DRM_MM_SEARCH_DEFAULT);
-	if (ret && INTEL_INFO(dev)->gen <= 4) {
-		return 0;
-	} else if (ret) {
-		compression_threshold <<= 1;
-		goto again;
-	} else {
-		return compression_threshold;
-	}
-}
-
-static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_mm_node *uninitialized_var(compressed_llb);
-	int ret;
-
-	ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
-					 size, fb_cpp);
-	if (!ret)
-		goto err_llb;
-	else if (ret > 1) {
-		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
-	}
-
-	dev_priv->fbc.threshold = ret;
-
-	if (INTEL_INFO(dev_priv)->gen >= 5)
-		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-	else if (IS_GM45(dev)) {
-		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-	} else {
-		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
-		if (!compressed_llb)
-			goto err_fb;
-
-		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
-					 4096, 4096, DRM_MM_SEARCH_DEFAULT);
-		if (ret)
-			goto err_fb;
-
-		dev_priv->fbc.compressed_llb = compressed_llb;
-
-		I915_WRITE(FBC_CFB_BASE,
-			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
-		I915_WRITE(FBC_LL_BASE,
-			   dev_priv->mm.stolen_base + compressed_llb->start);
-	}
-
-	dev_priv->fbc.uncompressed_size = size;
-
-	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
-		      size);
-
-	return 0;
-
-err_fb:
-	kfree(compressed_llb);
-	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-err_llb:
-	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
-	return -ENOSPC;
-}
-
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
-		return -ENODEV;
-
-	if (size <= dev_priv->fbc.uncompressed_size)
-		return 0;
-
-	/* Release any current block */
-	i915_gem_stolen_cleanup_compression(dev);
-
-	return i915_setup_compression(dev, size, fb_cpp);
-}
-
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->fbc.uncompressed_size == 0)
-		return;
-
-	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-
-	if (dev_priv->fbc.compressed_llb) {
-		drm_mm_remove_node(dev_priv->fbc.compressed_llb);
-		kfree(dev_priv->fbc.compressed_llb);
-	}
-
-	dev_priv->fbc.uncompressed_size = 0;
-}
-
 void i915_gem_cleanup_stolen(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -286,15 +183,108 @@
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
 		return;
 
-	i915_gem_stolen_cleanup_compression(dev);
 	drm_mm_takedown(&dev_priv->mm.stolen);
 }
 
+static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN6_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_512K:
+		*size = 512 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_128K:
+		*size = 128 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN7_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN7_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN8_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_2M:
+		*size = 2 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_4M:
+		*size = 4 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_8M:
+		*size = 8 * 1024 * 1024;
+		break;
+	default:
+		*size = 8 * 1024 * 1024;
+		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				    unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	unsigned long stolen_top;
+
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	/* On these platforms, the register doesn't have a size field, so the
+	 * size is the distance between the base and the top of the stolen
+	 * memory. We also have the genuine case where base is zero and there's
+	 * nothing reserved. */
+	if (*base == 0)
+		*size = 0;
+	else
+		*size = stolen_top - *base;
+}
+
 int i915_gem_init_stolen(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 tmp;
-	int bios_reserved = 0;
+	unsigned long reserved_total, reserved_base, reserved_size;
+	unsigned long stolen_top;
+
+	mutex_init(&dev_priv->mm.stolen_lock);
 
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
@@ -310,26 +300,61 @@
 	if (dev_priv->mm.stolen_base == 0)
 		return 0;
 
-	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
-		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
 
-	if (INTEL_INFO(dev)->gen >= 8) {
-		tmp = I915_READ(GEN7_BIOS_RESERVED);
-		tmp >>= GEN8_BIOS_RESERVED_SHIFT;
-		tmp &= GEN8_BIOS_RESERVED_MASK;
-		bios_reserved = (1024*1024) << tmp;
-	} else if (IS_GEN7(dev)) {
-		tmp = I915_READ(GEN7_BIOS_RESERVED);
-		bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
-			256*1024 : 1024*1024;
+	switch (INTEL_INFO(dev_priv)->gen) {
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		/* Assume the gen6 maximum for the older platforms. */
+		reserved_size = 1024 * 1024;
+		reserved_base = stolen_top - reserved_size;
+		break;
+	case 6:
+		gen6_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	case 7:
+		gen7_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	default:
+		if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+			bdw_get_stolen_reserved(dev_priv, &reserved_base,
+						&reserved_size);
+		else
+			gen8_get_stolen_reserved(dev_priv, &reserved_base,
+						 &reserved_size);
+		break;
 	}
 
-	if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+	/* It is possible for the reserved base to be zero, but the register
+	 * field for size doesn't have a zero option. */
+	if (reserved_base == 0) {
+		reserved_size = 0;
+		reserved_base = stolen_top;
+	}
+
+	if (reserved_base < dev_priv->mm.stolen_base ||
+	    reserved_base + reserved_size > stolen_top) {
+		DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
+			      reserved_base, reserved_base + reserved_size,
+			      dev_priv->mm.stolen_base, stolen_top);
 		return 0;
+	}
+
+	/* It is possible for the reserved area to end before the end of stolen
+	 * memory, so just consider the start. */
+	reserved_total = stolen_top - reserved_base;
+
+	DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
+		      dev_priv->gtt.stolen_size >> 10,
+		      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
 	/* Basic memrange allocator for stolen space */
 	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
-		    bios_reserved);
+		    reserved_total);
 
 	return 0;
 }
@@ -386,8 +411,10 @@
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
 	if (obj->stolen) {
-		drm_mm_remove_node(obj->stolen);
+		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
 		kfree(obj->stolen);
 		obj->stolen = NULL;
 	}
@@ -448,8 +475,7 @@
 	if (!stolen)
 		return NULL;
 
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
-				 4096, DRM_MM_SEARCH_DEFAULT);
+	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
 	if (ret) {
 		kfree(stolen);
 		return NULL;
@@ -459,7 +485,7 @@
 	if (obj)
 		return obj;
 
-	drm_mm_remove_node(stolen);
+	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
 	return NULL;
 }
@@ -494,7 +520,9 @@
 
 	stolen->start = stolen_offset;
 	stolen->size = size;
+	mutex_lock(&dev_priv->mm.stolen_lock);
 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
 	if (ret) {
 		DRM_DEBUG_KMS("failed to allocate stolen space\n");
 		kfree(stolen);
@@ -504,7 +532,7 @@
 	obj = _i915_gem_object_create_stolen(dev, stolen);
 	if (obj == NULL) {
 		DRM_DEBUG_KMS("failed to allocate stolen object\n");
-		drm_mm_remove_node(stolen);
+		i915_gem_stolen_remove_node(dev_priv, stolen);
 		kfree(stolen);
 		return NULL;
 	}
@@ -545,7 +573,7 @@
 err_vma:
 	i915_gem_vma_destroy(vma);
 err_out:
-	drm_mm_remove_node(stolen);
+	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
 	drm_gem_object_unreference(&obj->base);
 	return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d19c9db..8a6717c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -31,201 +31,32 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled.  However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y.  So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip --  Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics.  This
- * is called "Channel XOR Randomization" in the MCH documentation.  The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
- *
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
- *
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
- *
- * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
- *
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
 /**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
+ * DOC: buffer object tiling
+ *
+ * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
+ * declare fence register requirements.
+ *
+ * In principle GEM doesn't care at all about the internal data layout of an
+ * object, and hence it also doesn't care about tiling or swizzling. There's two
+ * exceptions:
+ *
+ * - For X and Y tiling the hardware provides detilers for CPU access, so called
+ *   fences. Since there's only a limited amount of them the kernel must manage
+ *   these, and therefore userspace must tell the kernel the object tiling if it
+ *   wants to use fences for detiling.
+ * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
+ *   depends upon the physical page frame number. When swapping such objects the
+ *   page frame number might change and the kernel must be able to fix this up
+ *   and hence now the tiling. Note that on a subset of platforms with
+ *   asymmetric memory channel population the swizzling pattern changes in an
+ *   unknown way, and for those the kernel simply forbids swapping completely.
+ *
+ * Since neither of this applies for new tiling layouts on modern platforms like
+ * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
+ * Anything else can be handled in userspace entirely without the kernel's
+ * invovlement.
  */
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
-	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
-		/*
-		 * On BDW+, swizzling is not used. We leave the CPU memory
-		 * controller in charge of optimizing memory accesses without
-		 * the extra address manipulation GPU side.
-		 *
-		 * VLV and CHV don't have GPU swizzling.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (INTEL_INFO(dev)->gen >= 6) {
-		if (dev_priv->preserve_bios_swizzle) {
-			if (I915_READ(DISP_ARB_CTL) &
-			    DISP_TILE_SURFACE_SWIZZLING) {
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else {
-				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			}
-		} else {
-			uint32_t dimm_c0, dimm_c1;
-			dimm_c0 = I915_READ(MAD_DIMM_C0);
-			dimm_c1 = I915_READ(MAD_DIMM_C1);
-			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-			/* Enable swizzling when the channels are populated
-			 * with identically sized dimms. We don't need to check
-			 * the 3rd channel because no cpu with gpu attached
-			 * ships in that configuration. Also, swizzling only
-			 * makes sense for 2 channels anyway. */
-			if (dimm_c0 == dimm_c1) {
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else {
-				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			}
-		}
-	} else if (IS_GEN5(dev)) {
-		/* On Ironlake whatever DRAM config, GPU always do
-		 * same swizzling setup.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-		swizzle_y = I915_BIT_6_SWIZZLE_9;
-	} else if (IS_GEN2(dev)) {
-		/* As far as we know, the 865 doesn't have these bit 6
-		 * swizzling issues.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
-		uint32_t dcc;
-
-		/* On 9xx chipsets, channel interleave by the CPU is
-		 * determined by DCC.  For single-channel, neither the CPU
-		 * nor the GPU do swizzling.  For dual channel interleaved,
-		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
-		 * 9 for Y tiled.  The CPU's interleave is independent, and
-		 * can be based on either bit 11 (haven't seen this yet) or
-		 * bit 17 (common).
-		 */
-		dcc = I915_READ(DCC);
-		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
-		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
-		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
-			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			break;
-		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
-			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
-				/* This is the base swizzling by the GPU for
-				 * tiled buffers.
-				 */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
-				/* Bit 11 swizzling by the CPU in addition. */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
-				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
-			} else {
-				/* Bit 17 swizzling by the CPU in addition. */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
-				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
-			}
-			break;
-		}
-
-		/* check for L-shaped memory aka modified enhanced addressing */
-		if (IS_GEN4(dev)) {
-			uint32_t ddc2 = I915_READ(DCC2);
-
-			if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
-				dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
-		}
-
-		if (dcc == 0xffffffff) {
-			DRM_ERROR("Couldn't read from MCHBAR.  "
-				  "Disabling tiling.\n");
-			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-		}
-	} else {
-		/* The 965, G33, and newer, have a very flexible memory
-		 * configuration.  It will enable dual-channel mode
-		 * (interleaving) on as much memory as it can, and the GPU
-		 * will additionally sometimes enable different bit 6
-		 * swizzling for tiled objects from the CPU.
-		 *
-		 * Here's what I found on the G965:
-		 *    slot fill         memory size  swizzling
-		 * 0A   0B   1A   1B    1-ch   2-ch
-		 * 512  0    0    0     512    0     O
-		 * 512  0    512  0     16     1008  X
-		 * 512  0    0    512   16     1008  X
-		 * 0    512  0    512   16     1008  X
-		 * 1024 1024 1024 0     2048   1024  O
-		 *
-		 * We could probably detect this based on either the DRB
-		 * matching, which was the case for the swizzling required in
-		 * the table above, or from the 1-ch value being less than
-		 * the minimum size of a rank.
-		 */
-		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
-			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-		} else {
-			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-			swizzle_y = I915_BIT_6_SWIZZLE_9;
-		}
-	}
-
-	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
-	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
 
 /* Check pitch constriants for all chips & tiling formats */
 static bool
@@ -313,8 +144,18 @@
 }
 
 /**
+ * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Sets the tiling mode of an object, returning the required swizzling of
  * bit 6 of addresses in the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_set_tiling(struct drm_device *dev, void *data,
@@ -432,7 +273,17 @@
 }
 
 /**
+ * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Returns the current tiling mode and required bit 6 swizzling for the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -478,75 +329,3 @@
 
 	return 0;
 }
-
-/**
- * Swap every 64 bytes of this page around, to account for it having a new
- * bit 17 of its physical address and therefore being interpreted differently
- * by the GPU.
- */
-static void
-i915_gem_swizzle_page(struct page *page)
-{
-	char temp[64];
-	char *vaddr;
-	int i;
-
-	vaddr = kmap(page);
-
-	for (i = 0; i < PAGE_SIZE; i += 128) {
-		memcpy(temp, &vaddr[i], 64);
-		memcpy(&vaddr[i], &vaddr[i + 64], 64);
-		memcpy(&vaddr[i + 64], temp, 64);
-	}
-
-	kunmap(page);
-}
-
-void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	int i;
-
-	if (obj->bit_17 == NULL)
-		return;
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-		struct page *page = sg_page_iter_page(&sg_iter);
-		char new_bit_17 = page_to_phys(page) >> 17;
-		if ((new_bit_17 & 0x1) !=
-		    (test_bit(i, obj->bit_17) != 0)) {
-			i915_gem_swizzle_page(page);
-			set_page_dirty(page);
-		}
-		i++;
-	}
-}
-
-void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	int page_count = obj->base.size >> PAGE_SHIFT;
-	int i;
-
-	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
-				      sizeof(long), GFP_KERNEL);
-		if (obj->bit_17 == NULL) {
-			DRM_ERROR("Failed to allocate memory for bit 17 "
-				  "record\n");
-			return;
-		}
-	}
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
-			__set_bit(i, obj->bit_17);
-		else
-			__clear_bit(i, obj->bit_17);
-		i++;
-	}
-}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6f42569..41d0739 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -369,6 +369,7 @@
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
 	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 	err_printf(m, "EIR: 0x%08x\n", error->eir);
 	err_printf(m, "IER: 0x%08x\n", error->ier);
 	if (INTEL_INFO(dev)->gen >= 8) {
@@ -1266,6 +1267,10 @@
 static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error)
 {
+	error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+	error->iommu = intel_iommu_gfx_mapped;
+#endif
 	error->reset_count = i915_reset_count(&dev_priv->gpu_error);
 	error->suspend_count = dev_priv->suspend_count;
 }
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
new file mode 100644
index 0000000..ccdc6c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_GUC_REG_H_
+#define _I915_GUC_REG_H_
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS			0xc000
+#define   GS_BOOTROM_SHIFT		1
+#define   GS_BOOTROM_MASK		  (0x7F << GS_BOOTROM_SHIFT)
+#define   GS_BOOTROM_RSA_FAILED		  (0x50 << GS_BOOTROM_SHIFT)
+#define   GS_UKERNEL_SHIFT		8
+#define   GS_UKERNEL_MASK		  (0xFF << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_LAPIC_DONE		  (0x30 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_DPC_ERROR		  (0x60 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_READY		  (0xF0 << GS_UKERNEL_SHIFT)
+#define   GS_MIA_SHIFT			16
+#define   GS_MIA_MASK			  (0x07 << GS_MIA_SHIFT)
+
+#define GUC_WOPCM_SIZE			0xc050
+#define   GUC_WOPCM_SIZE_VALUE  	  (0x80 << 12)	/* 512KB */
+#define GUC_WOPCM_OFFSET		0x80000		/* 512KB */
+
+#define SOFT_SCRATCH(n)			(0xc180 + ((n) * 4))
+
+#define UOS_RSA_SCRATCH_0		0xc200
+#define DMA_ADDR_0_LOW			0xc300
+#define DMA_ADDR_0_HIGH			0xc304
+#define DMA_ADDR_1_LOW			0xc308
+#define DMA_ADDR_1_HIGH			0xc30c
+#define   DMA_ADDRESS_SPACE_WOPCM	  (7 << 16)
+#define   DMA_ADDRESS_SPACE_GTT		  (8 << 16)
+#define DMA_COPY_SIZE			0xc310
+#define DMA_CTRL			0xc314
+#define   UOS_MOVE			  (1<<4)
+#define   START_DMA			  (1<<0)
+#define DMA_GUC_WOPCM_OFFSET		0xc340
+
+#define GEN8_GT_PM_CONFIG		0x138140
+#define GEN9_GT_PM_CONFIG		0x13816c
+#define   GEN8_GT_DOORBELL_ENABLE	  (1<<0)
+
+#define GEN8_GTCR			0x4274
+#define   GEN8_GTCR_INVALIDATE		  (1<<0)
+
+#define GUC_ARAT_C6DIS			0xA178
+
+#define GUC_SHIM_CONTROL		0xc064
+#define   GUC_DISABLE_SRAM_INIT_TO_ZEROES	(1<<0)
+#define   GUC_ENABLE_READ_CACHE_LOGIC		(1<<1)
+#define   GUC_ENABLE_MIA_CACHING		(1<<2)
+#define   GUC_GEN10_MSGCH_ENABLE		(1<<4)
+#define   GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	(1<<9)
+#define   GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA	(1<<10)
+#define   GUC_ENABLE_MIA_CLOCK_GATING		(1<<15)
+#define   GUC_GEN10_SHIM_WC_ENABLE		(1<<21)
+
+#define GUC_SHIM_CONTROL_VALUE	(GUC_DISABLE_SRAM_INIT_TO_ZEROES	| \
+				 GUC_ENABLE_READ_CACHE_LOGIC		| \
+				 GUC_ENABLE_MIA_CACHING			| \
+				 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	| \
+				 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
+
+#define HOST2GUC_INTERRUPT		0xc4c8
+#define   HOST2GUC_TRIGGER		  (1<<0)
+
+#define DRBMISC1			0x1984
+#define   DOORBELL_ENABLE		  (1<<0)
+
+#define GEN8_DRBREGL(x)			(0x1000 + (x) * 8)
+#define   GEN8_DRB_VALID		  (1<<0)
+#define GEN8_DRBREGU(x)			(GEN8_DRBREGL(x) + 4)
+
+#define DE_GUCRMR			0x44054
+
+#define GUC_BCS_RCS_IER			0xC550
+#define GUC_VCS2_VCS1_IER		0xC554
+#define GUC_WD_VECS_IER			0xC558
+#define GUC_PM_P24C_IER			0xC55C
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 23aa04c..97f3a56 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -35,107 +35,20 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-typedef struct _drm_i915_batchbuffer32 {
-	int start;		/* agp offset */
-	int used;		/* nr bytes in use */
-	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
-	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
-	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	u32 cliprects;		/* pointer to userspace cliprects */
-} drm_i915_batchbuffer32_t;
-
-static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
-				   unsigned long arg)
-{
-	drm_i915_batchbuffer32_t batchbuffer32;
-	drm_i915_batchbuffer_t __user *batchbuffer;
-
-	if (copy_from_user
-	    (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
-		return -EFAULT;
-
-	batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
-	if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
-	    || __put_user(batchbuffer32.start, &batchbuffer->start)
-	    || __put_user(batchbuffer32.used, &batchbuffer->used)
-	    || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
-	    || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
-	    || __put_user(batchbuffer32.num_cliprects,
-			  &batchbuffer->num_cliprects)
-	    || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
-			  &batchbuffer->cliprects))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
-			 (unsigned long)batchbuffer);
-}
-
-typedef struct _drm_i915_cmdbuffer32 {
-	u32 buf;		/* pointer to userspace command buffer */
-	int sz;			/* nr bytes in buf */
-	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
-	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
-	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	u32 cliprects;		/* pointer to userspace cliprects */
-} drm_i915_cmdbuffer32_t;
-
-static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
-				 unsigned long arg)
-{
-	drm_i915_cmdbuffer32_t cmdbuffer32;
-	drm_i915_cmdbuffer_t __user *cmdbuffer;
-
-	if (copy_from_user
-	    (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
-		return -EFAULT;
-
-	cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
-	if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
-	    || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
-			  &cmdbuffer->buf)
-	    || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
-	    || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
-	    || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
-	    || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
-	    || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
-			  &cmdbuffer->cliprects))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
-			 (unsigned long)cmdbuffer);
-}
-
-typedef struct drm_i915_irq_emit32 {
-	u32 irq_seq;
-} drm_i915_irq_emit32_t;
-
-static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
-				unsigned long arg)
-{
-	drm_i915_irq_emit32_t req32;
-	drm_i915_irq_emit_t __user *request;
-
-	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-		return -EFAULT;
-
-	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
-			  &request->irq_seq))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
-			 (unsigned long)request);
-}
-typedef struct drm_i915_getparam32 {
-	int param;
+struct drm_i915_getparam32 {
+	s32 param;
+	/*
+	 * We screwed up the generic ioctl struct here and used a variable-sized
+	 * pointer. Use u32 in the compat struct to match the 32bit pointer
+	 * userspace expects.
+	 */
 	u32 value;
-} drm_i915_getparam32_t;
+};
 
 static int compat_i915_getparam(struct file *file, unsigned int cmd,
 				unsigned long arg)
 {
-	drm_i915_getparam32_t req32;
+	struct drm_i915_getparam32 req32;
 	drm_i915_getparam_t __user *request;
 
 	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
@@ -152,41 +65,8 @@
 			 (unsigned long)request);
 }
 
-typedef struct drm_i915_mem_alloc32 {
-	int region;
-	int alignment;
-	int size;
-	u32 region_offset;	/* offset from start of fb or agp */
-} drm_i915_mem_alloc32_t;
-
-static int compat_i915_alloc(struct file *file, unsigned int cmd,
-			     unsigned long arg)
-{
-	drm_i915_mem_alloc32_t req32;
-	drm_i915_mem_alloc_t __user *request;
-
-	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-		return -EFAULT;
-
-	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || __put_user(req32.region, &request->region)
-	    || __put_user(req32.alignment, &request->alignment)
-	    || __put_user(req32.size, &request->size)
-	    || __put_user((void __user *)(unsigned long)req32.region_offset,
-			  &request->region_offset))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
-			 (unsigned long)request);
-}
-
 static drm_ioctl_compat_t *i915_compat_ioctls[] = {
-	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
-	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
 	[DRM_I915_GETPARAM] = compat_i915_getparam,
-	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
-	[DRM_I915_ALLOC] = compat_i915_alloc
 };
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 984e2fe..b5fb143 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -61,6 +61,13 @@
 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
 };
 
+static const u32 hpd_spt[HPD_NUM_PINS] = {
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
+	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+};
+
 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
@@ -564,8 +571,7 @@
 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-	const struct drm_display_mode *mode =
-		&intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 
 	htotal = mode->crtc_htotal;
 	hsync_start = mode->crtc_hsync_start;
@@ -620,7 +626,7 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &crtc->base.hwmode;
 	enum pipe pipe = crtc->pipe;
 	int position, vtotal;
 
@@ -647,14 +653,14 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 	int position;
 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
 	bool in_vbl = true;
 	int ret = 0;
 	unsigned long irqflags;
 
-	if (!intel_crtc->active) {
+	if (WARN_ON(!mode->crtc_clock)) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 				 "pipe %c\n", pipe_name(pipe));
 		return 0;
@@ -796,7 +802,7 @@
 		return -EINVAL;
 	}
 
-	if (!crtc->state->enable) {
+	if (!crtc->hwmode.crtc_clock) {
 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 		return -EBUSY;
 	}
@@ -805,151 +811,7 @@
 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 						     vblank_time, flags,
 						     crtc,
-						     &to_intel_crtc(crtc)->config->base.adjusted_mode);
-}
-
-static bool intel_hpd_irq_event(struct drm_device *dev,
-				struct drm_connector *connector)
-{
-	enum drm_connector_status old_status;
-
-	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-	old_status = connector->status;
-
-	connector->status = connector->funcs->detect(connector, false);
-	if (old_status == connector->status)
-		return false;
-
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-		      connector->base.id,
-		      connector->name,
-		      drm_get_connector_status_name(old_status),
-		      drm_get_connector_status_name(connector->status));
-
-	return true;
-}
-
-static void i915_digport_work_func(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, dig_port_work);
-	u32 long_port_mask, short_port_mask;
-	struct intel_digital_port *intel_dig_port;
-	int i;
-	u32 old_bits = 0;
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	long_port_mask = dev_priv->long_hpd_port_mask;
-	dev_priv->long_hpd_port_mask = 0;
-	short_port_mask = dev_priv->short_hpd_port_mask;
-	dev_priv->short_hpd_port_mask = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	for (i = 0; i < I915_MAX_PORTS; i++) {
-		bool valid = false;
-		bool long_hpd = false;
-		intel_dig_port = dev_priv->hpd_irq_port[i];
-		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
-			continue;
-
-		if (long_port_mask & (1 << i))  {
-			valid = true;
-			long_hpd = true;
-		} else if (short_port_mask & (1 << i))
-			valid = true;
-
-		if (valid) {
-			enum irqreturn ret;
-
-			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
-			if (ret == IRQ_NONE) {
-				/* fall back to old school hpd */
-				old_bits |= (1 << intel_dig_port->base.hpd_pin);
-			}
-		}
-	}
-
-	if (old_bits) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		dev_priv->hpd_event_bits |= old_bits;
-		spin_unlock_irq(&dev_priv->irq_lock);
-		schedule_work(&dev_priv->hotplug_work);
-	}
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
-
-static void i915_hotplug_work_func(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, hotplug_work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct intel_connector *intel_connector;
-	struct intel_encoder *intel_encoder;
-	struct drm_connector *connector;
-	bool hpd_disabled = false;
-	bool changed = false;
-	u32 hpd_event_bits;
-
-	mutex_lock(&mode_config->mutex);
-	DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
-	spin_lock_irq(&dev_priv->irq_lock);
-
-	hpd_event_bits = dev_priv->hpd_event_bits;
-	dev_priv->hpd_event_bits = 0;
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		intel_connector = to_intel_connector(connector);
-		if (!intel_connector->encoder)
-			continue;
-		intel_encoder = intel_connector->encoder;
-		if (intel_encoder->hpd_pin > HPD_NONE &&
-		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
-		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
-			DRM_INFO("HPD interrupt storm detected on connector %s: "
-				 "switching from hotplug detection to polling\n",
-				connector->name);
-			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
-			connector->polled = DRM_CONNECTOR_POLL_CONNECT
-				| DRM_CONNECTOR_POLL_DISCONNECT;
-			hpd_disabled = true;
-		}
-		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-				      connector->name, intel_encoder->hpd_pin);
-		}
-	}
-	 /* if there were no outputs to poll, poll was disabled,
-	  * therefore make sure it's enabled when disabling HPD on
-	  * some connectors */
-	if (hpd_disabled) {
-		drm_kms_helper_poll_enable(dev);
-		mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
-				 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
-	}
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		intel_connector = to_intel_connector(connector);
-		if (!intel_connector->encoder)
-			continue;
-		intel_encoder = intel_connector->encoder;
-		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-			if (intel_encoder->hot_plug)
-				intel_encoder->hot_plug(intel_encoder);
-			if (intel_hpd_irq_event(dev, connector))
-				changed = true;
-		}
-	}
-	mutex_unlock(&mode_config->mutex);
-
-	if (changed)
-		drm_kms_helper_hotplug_event(dev);
+						     &crtc->hwmode);
 }
 
 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
@@ -1372,165 +1234,80 @@
 	return ret;
 }
 
-#define HPD_STORM_DETECT_PERIOD 1000
-#define HPD_STORM_THRESHOLD 5
-
-static int pch_port_to_hotplug_shift(enum port port)
+static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
 	switch (port) {
 	case PORT_A:
-	case PORT_E:
-	default:
-		return -1;
+		return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
 	case PORT_B:
-		return 0;
+		return val & PORTB_HOTPLUG_LONG_DETECT;
 	case PORT_C:
-		return 8;
+		return val & PORTC_HOTPLUG_LONG_DETECT;
 	case PORT_D:
-		return 16;
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
 	}
 }
 
-static int i915_port_to_hotplug_shift(enum port port)
+static bool pch_port_hotplug_long_detect(enum port port, u32 val)
 {
 	switch (port) {
-	case PORT_A:
-	case PORT_E:
-	default:
-		return -1;
 	case PORT_B:
-		return 17;
+		return val & PORTB_HOTPLUG_LONG_DETECT;
 	case PORT_C:
-		return 19;
+		return val & PORTC_HOTPLUG_LONG_DETECT;
 	case PORT_D:
-		return 21;
-	}
-}
-
-static enum port get_port_from_pin(enum hpd_pin pin)
-{
-	switch (pin) {
-	case HPD_PORT_B:
-		return PORT_B;
-	case HPD_PORT_C:
-		return PORT_C;
-	case HPD_PORT_D:
-		return PORT_D;
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	case PORT_E:
+		return val & PORTE_HOTPLUG_LONG_DETECT;
 	default:
-		return PORT_A; /* no hpd */
+		return false;
 	}
 }
 
-static void intel_hpd_irq_handler(struct drm_device *dev,
-				  u32 hotplug_trigger,
-				  u32 dig_hotplug_reg,
-				  const u32 hpd[HPD_NUM_PINS])
+static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
+	switch (port) {
+	case PORT_B:
+		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
+	case PORT_C:
+		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
+	case PORT_D:
+		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
+	default:
+		return false;
+	}
+}
+
+/* Get a bit mask of pins that have triggered, and which ones may be long. */
+static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
+			     u32 hotplug_trigger, u32 dig_hotplug_reg,
+			     const u32 hpd[HPD_NUM_PINS],
+			     bool long_pulse_detect(enum port port, u32 val))
+{
 	enum port port;
-	bool storm_detected = false;
-	bool queue_dig = false, queue_hp = false;
-	u32 dig_shift;
-	u32 dig_port_mask = 0;
+	int i;
 
-	if (!hotplug_trigger)
-		return;
+	*pin_mask = 0;
+	*long_mask = 0;
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
-			 hotplug_trigger, dig_hotplug_reg);
-
-	spin_lock(&dev_priv->irq_lock);
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		if (!(hpd[i] & hotplug_trigger))
+	for_each_hpd_pin(i) {
+		if ((hpd[i] & hotplug_trigger) == 0)
 			continue;
 
-		port = get_port_from_pin(i);
-		if (port && dev_priv->hpd_irq_port[port]) {
-			bool long_hpd;
+		*pin_mask |= BIT(i);
 
-			if (!HAS_GMCH_DISPLAY(dev_priv)) {
-				dig_shift = pch_port_to_hotplug_shift(port);
-				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-			} else {
-				dig_shift = i915_port_to_hotplug_shift(port);
-				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-			}
+		if (!intel_hpd_pin_to_port(i, &port))
+			continue;
 
-			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
-					 port_name(port),
-					 long_hpd ? "long" : "short");
-			/* for long HPD pulses we want to have the digital queue happen,
-			   but we still want HPD storm detection to function. */
-			if (long_hpd) {
-				dev_priv->long_hpd_port_mask |= (1 << port);
-				dig_port_mask |= hpd[i];
-			} else {
-				/* for short HPD just trigger the digital queue */
-				dev_priv->short_hpd_port_mask |= (1 << port);
-				hotplug_trigger &= ~hpd[i];
-			}
-			queue_dig = true;
-		}
+		if (long_pulse_detect(port, dig_hotplug_reg))
+			*long_mask |= BIT(i);
 	}
 
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		if (hpd[i] & hotplug_trigger &&
-		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
-			/*
-			 * On GMCH platforms the interrupt mask bits only
-			 * prevent irq generation, not the setting of the
-			 * hotplug bits itself. So only WARN about unexpected
-			 * interrupts on saner platforms.
-			 */
-			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
-				  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
-				  hotplug_trigger, i, hpd[i]);
+	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
+			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
 
-			continue;
-		}
-
-		if (!(hpd[i] & hotplug_trigger) ||
-		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
-			continue;
-
-		if (!(dig_port_mask & hpd[i])) {
-			dev_priv->hpd_event_bits |= (1 << i);
-			queue_hp = true;
-		}
-
-		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
-				   dev_priv->hpd_stats[i].hpd_last_jiffies
-				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
-			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
-			dev_priv->hpd_stats[i].hpd_cnt = 0;
-			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
-		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
-			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
-			dev_priv->hpd_event_bits &= ~(1 << i);
-			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
-			storm_detected = true;
-		} else {
-			dev_priv->hpd_stats[i].hpd_cnt++;
-			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
-				      dev_priv->hpd_stats[i].hpd_cnt);
-		}
-	}
-
-	if (storm_detected)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock(&dev_priv->irq_lock);
-
-	/*
-	 * Our hotplug handler can grab modeset locks (by calling down into the
-	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
-	 * queue for otherwise the flush_work in the pageflip code will
-	 * deadlock.
-	 */
-	if (queue_dig)
-		queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
-	if (queue_hp)
-		schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -1755,28 +1532,35 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+	u32 pin_mask, long_mask;
 
-	if (hotplug_status) {
-		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-		/*
-		 * Make sure hotplug status is cleared before we clear IIR, or else we
-		 * may miss hotplug events.
-		 */
-		POSTING_READ(PORT_HOTPLUG_STAT);
+	if (!hotplug_status)
+		return;
 
-		if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
-			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+	/*
+	 * Make sure hotplug status is cleared before we clear IIR, or else we
+	 * may miss hotplug events.
+	 */
+	POSTING_READ(PORT_HOTPLUG_STAT);
 
-			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
-		} else {
-			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
-			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
-		}
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   hotplug_trigger, hpd_status_g4x,
+				   i9xx_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
 
-		if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
-		    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
 			dp_aux_irq_handler(dev);
+	} else {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   hotplug_trigger, hpd_status_g4x,
+				   i9xx_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
 	}
 }
 
@@ -1875,12 +1659,18 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
-	u32 dig_hotplug_reg;
 
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	if (hotplug_trigger) {
+		u32 dig_hotplug_reg, pin_mask, long_mask;
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   dig_hotplug_reg, hpd_ibx,
+				   pch_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
+	}
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1971,13 +1761,38 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
-	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
-	u32 dig_hotplug_reg;
+	u32 hotplug_trigger;
 
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	if (HAS_PCH_SPT(dev))
+		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
+	else
+		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
+	if (hotplug_trigger) {
+		u32 dig_hotplug_reg, pin_mask, long_mask;
+
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+		if (HAS_PCH_SPT(dev)) {
+			intel_get_hpd_pins(&pin_mask, &long_mask,
+					   hotplug_trigger,
+					   dig_hotplug_reg, hpd_spt,
+					   pch_port_hotplug_long_detect);
+
+			/* detect PORTE HP event */
+			dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+			if (pch_port_hotplug_long_detect(PORT_E,
+							 dig_hotplug_reg))
+				long_mask |= 1 << HPD_PORT_E;
+		} else
+			intel_get_hpd_pins(&pin_mask, &long_mask,
+					   hotplug_trigger,
+					   dig_hotplug_reg, hpd_cpt,
+					   pch_port_hotplug_long_detect);
+
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
+	}
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2176,8 +1991,8 @@
 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t hp_control;
-	uint32_t hp_trigger;
+	u32 hp_control, hp_trigger;
+	u32 pin_mask, long_mask;
 
 	/* Get the status */
 	hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
@@ -2189,20 +2004,12 @@
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-		hp_control & BXT_HOTPLUG_CTL_MASK);
-
-	/* Check for HPD storm and schedule bottom half */
-	intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
-
-	/*
-	 * FIXME: Save the hot plug status for bottom half before
-	 * clearing the sticky status bits, else the status will be
-	 * lost.
-	 */
-
 	/* Clear sticky bits in hpd status */
 	I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+
+	intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
+			   hpd_bxt, bxt_port_hotplug_long_detect);
+	intel_hpd_irq_handler(dev, pin_mask, long_mask);
 }
 
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
@@ -2446,7 +2253,7 @@
 			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
-			atomic_set_mask(I915_WEDGED, &error->reset_counter);
+			atomic_or(I915_WEDGED, &error->reset_counter);
 		}
 
 		/*
@@ -2574,7 +2381,7 @@
 	i915_report_and_clear_eir(dev);
 
 	if (wedged) {
-		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
 				&dev_priv->gpu_error.reset_counter);
 
 		/*
@@ -3203,12 +3010,17 @@
 	if (HAS_PCH_IBX(dev)) {
 		hotplug_irqs = SDE_HOTPLUG_MASK;
 		for_each_intel_encoder(dev, intel_encoder)
-			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
+	} else if (HAS_PCH_SPT(dev)) {
+		hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+		for_each_intel_encoder(dev, intel_encoder)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
+				enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
 	} else {
 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
 		for_each_intel_encoder(dev, intel_encoder)
-			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
 	}
 
@@ -3226,6 +3038,13 @@
 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+	/* enable SPT PORTE hot plug */
+	if (HAS_PCH_SPT(dev)) {
+		hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+		hotplug |= PORTE_HOTPLUG_ENABLE;
+		I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+	}
 }
 
 static void bxt_hpd_irq_setup(struct drm_device *dev)
@@ -3237,7 +3056,7 @@
 
 	/* Now, enable HPD */
 	for_each_intel_encoder(dev, intel_encoder) {
-		if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
 				== HPD_ENABLED)
 			hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
 	}
@@ -4130,7 +3949,7 @@
 	/* Note HDMI and DP share hotplug bits */
 	/* enable bits are the same for all generations */
 	for_each_intel_encoder(dev, intel_encoder)
-		if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 			hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
 	/* Programming the CRT detection parameters tends
 	   to generate a spurious hotplug event about three
@@ -4270,46 +4089,6 @@
 	I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable_work(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv),
-			     hotplug_reenable_work.work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	int i;
-
-	intel_runtime_pm_get(dev_priv);
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
-		struct drm_connector *connector;
-
-		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
-			continue;
-
-		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-
-		list_for_each_entry(connector, &mode_config->connector_list, head) {
-			struct intel_connector *intel_connector = to_intel_connector(connector);
-
-			if (intel_connector->encoder->hpd_pin == i) {
-				if (connector->polled != intel_connector->polled)
-					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
-							 connector->name);
-				connector->polled = intel_connector->polled;
-				if (!connector->polled)
-					connector->polled = DRM_CONNECTOR_POLL_HPD;
-			}
-		}
-	}
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	intel_runtime_pm_put(dev_priv);
-}
-
 /**
  * intel_irq_init - initializes irq support
  * @dev_priv: i915 device instance
@@ -4321,8 +4100,8 @@
 {
 	struct drm_device *dev = dev_priv->dev;
 
-	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
-	INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
+	intel_hpd_init_work(dev_priv);
+
 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
@@ -4335,8 +4114,6 @@
 
 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
 			  i915_hangcheck_elapsed);
-	INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
-			  intel_hpd_irq_reenable_work);
 
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
@@ -4422,46 +4199,6 @@
 }
 
 /**
- * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
- *
- * This function enables the hotplug support. It requires that interrupts have
- * already been enabled with intel_irq_init_hw(). From this point on hotplug and
- * poll request can run concurrently to other code, so locking rules must be
- * obeyed.
- *
- * This is a separate step from interrupt enabling to simplify the locking rules
- * in the driver load and resume code.
- */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
-{
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_connector *connector;
-	int i;
-
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		dev_priv->hpd_stats[i].hpd_cnt = 0;
-		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-	}
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		struct intel_connector *intel_connector = to_intel_connector(connector);
-		connector->polled = intel_connector->polled;
-		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
-			connector->polled = DRM_CONNECTOR_POLL_HPD;
-		if (intel_connector->mst_port)
-			connector->polled = DRM_CONNECTOR_POLL_HPD;
-	}
-
-	/* Interrupt setup is already guaranteed to be single-threaded, this is
-	 * just to make the assert_spin_locked checks happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-/**
  * intel_irq_install - enables the hardware interrupt
  * @dev_priv: i915 device instance
  *
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ac5a1b..5ae4b0a 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -28,7 +28,6 @@
 	.modeset = -1,
 	.panel_ignore_lid = 1,
 	.semaphores = -1,
-	.lvds_downclock = 0,
 	.lvds_channel_mode = 0,
 	.panel_use_ssc = -1,
 	.vbt_sdvo_panel_type = -1,
@@ -52,13 +51,14 @@
 	.use_mmio_flip = 0,
 	.mmio_debug = 0,
 	.verbose_state_checks = 1,
-	.nuclear_pageflip = 0,
 	.edp_vswing = 0,
+	.enable_guc_submission = false,
+	.guc_log_level = -1,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
 MODULE_PARM_DESC(modeset,
-	"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+	"Use kernel modesetting [KMS] (0=disable, "
 	"1=on, -1=force vga console preference [default])");
 
 module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
@@ -84,11 +84,6 @@
 	"Enable frame buffer compression for power savings "
 	"(default: -1 (use per-chip default))");
 
-module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
-	"Use panel (LVDS/eDP) downclocking for power savings "
-	"(default: false)");
-
 module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
 MODULE_PARM_DESC(lvds_channel_mode,
 	 "Specify LVDS channel mode "
@@ -104,7 +99,7 @@
 	"Override/Ignore selection of SDVO panel mode in the VBT "
 	"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
-module_param_named(reset, i915.reset, bool, 0600);
+module_param_named_unsafe(reset, i915.reset, bool, 0600);
 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
 module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
@@ -182,13 +177,16 @@
 MODULE_PARM_DESC(verbose_state_checks,
 	"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
 
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
-MODULE_PARM_DESC(nuclear_pageflip,
-		 "Force atomic modeset functionality; only planes work for now (default: false).");
-
 /* WA to get away with the default setting in VBT for early platforms.Will be removed */
 module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
 MODULE_PARM_DESC(edp_vswing,
 		 "Ignore/Override vswing pre-emph table selection from VBT "
 		 "(0=use value from vbt [default], 1=low power swing(200mV),"
 		 "2=default swing(400mV))");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
+MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+
+module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
+MODULE_PARM_DESC(guc_log_level,
+	"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2030f60..83a0888 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -50,12 +50,17 @@
 
 /* PCI config space */
 
-#define HPLLCC	0xc0 /* 855 only */
-#define   GC_CLOCK_CONTROL_MASK		(0xf << 0)
+#define HPLLCC	0xc0 /* 85x only */
+#define   GC_CLOCK_CONTROL_MASK		(0x7 << 0)
 #define   GC_CLOCK_133_200		(0 << 0)
 #define   GC_CLOCK_100_200		(1 << 0)
 #define   GC_CLOCK_100_133		(2 << 0)
-#define   GC_CLOCK_166_250		(3 << 0)
+#define   GC_CLOCK_133_266		(3 << 0)
+#define   GC_CLOCK_133_200_2		(4 << 0)
+#define   GC_CLOCK_133_266_2		(5 << 0)
+#define   GC_CLOCK_166_266		(6 << 0)
+#define   GC_CLOCK_166_250		(7 << 0)
+
 #define GCFGC2	0xda
 #define GCFGC	0xf0 /* 915+ only */
 #define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
@@ -155,6 +160,7 @@
 #define GAM_ECOCHK			0x4090
 #define   BDW_DISABLE_HDC_INVALIDATION	(1<<25)
 #define   ECOCHK_SNB_BIT		(1<<10)
+#define   ECOCHK_DIS_TLB		(1<<8)
 #define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
 #define   ECOCHK_PPGTT_CACHE64B		(0x3<<3)
 #define   ECOCHK_PPGTT_CACHE4B		(0x0<<3)
@@ -172,13 +178,22 @@
 #define GAB_CTL				0x24000
 #define   GAB_CTL_CONT_AFTER_PAGEFAULT	(1<<8)
 
-#define GEN7_BIOS_RESERVED		0x1082C0
-#define GEN7_BIOS_RESERVED_1M		(0 << 5)
-#define GEN7_BIOS_RESERVED_256K		(1 << 5)
-#define GEN8_BIOS_RESERVED_SHIFT       7
-#define GEN7_BIOS_RESERVED_MASK        0x1
-#define GEN8_BIOS_RESERVED_MASK        0x3
-
+#define GEN6_STOLEN_RESERVED		0x1082C0
+#define GEN6_STOLEN_RESERVED_ADDR_MASK	(0xFFF << 20)
+#define GEN7_STOLEN_RESERVED_ADDR_MASK	(0x3FFF << 18)
+#define GEN6_STOLEN_RESERVED_SIZE_MASK	(3 << 4)
+#define GEN6_STOLEN_RESERVED_1M		(0 << 4)
+#define GEN6_STOLEN_RESERVED_512K	(1 << 4)
+#define GEN6_STOLEN_RESERVED_256K	(2 << 4)
+#define GEN6_STOLEN_RESERVED_128K	(3 << 4)
+#define GEN7_STOLEN_RESERVED_SIZE_MASK	(1 << 5)
+#define GEN7_STOLEN_RESERVED_1M		(0 << 5)
+#define GEN7_STOLEN_RESERVED_256K	(1 << 5)
+#define GEN8_STOLEN_RESERVED_SIZE_MASK	(3 << 7)
+#define GEN8_STOLEN_RESERVED_1M		(0 << 7)
+#define GEN8_STOLEN_RESERVED_2M		(1 << 7)
+#define GEN8_STOLEN_RESERVED_4M		(2 << 7)
+#define GEN8_STOLEN_RESERVED_8M		(3 << 7)
 
 /* VGA stuff */
 
@@ -316,6 +331,8 @@
 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
 #define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
+#define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
+#define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
 #define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
 #define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
 #define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
@@ -347,6 +364,8 @@
 #define   MI_INVALIDATE_BSD		(1<<7)
 #define   MI_FLUSH_DW_USE_GTT		(1<<2)
 #define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
+#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
+#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE		(1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -356,6 +375,7 @@
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
+#define   MI_BATCH_RESOURCE_STREAMER (1<<10)
 
 #define MI_PREDICATE_SRC0	(0x2400)
 #define MI_PREDICATE_SRC1	(0x2408)
@@ -410,6 +430,7 @@
 #define   DISPLAY_PLANE_A           (0<<20)
 #define   DISPLAY_PLANE_B           (1<<20)
 #define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define   PIPE_CONTROL_FLUSH_L3				(1<<27)
 #define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
 #define   PIPE_CONTROL_MMIO_WRITE			(1<<23)
 #define   PIPE_CONTROL_STORE_DATA_INDEX			(1<<21)
@@ -426,6 +447,7 @@
 #define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
 #define   PIPE_CONTROL_NOTIFY				(1<<8)
 #define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
+#define   PIPE_CONTROL_DC_FLUSH_ENABLE			(1<<5)
 #define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
 #define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
 #define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
@@ -449,7 +471,6 @@
 #define MI_CLFLUSH              MI_INSTR(0x27, 0)
 #define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
 #define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_MEM    MI_INSTR(0x29, 0)
 #define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 0)
 #define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
 #define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
@@ -1163,10 +1184,12 @@
 #define _PORT_PLL_EBB_0_A		0x162034
 #define _PORT_PLL_EBB_0_B		0x6C034
 #define _PORT_PLL_EBB_0_C		0x6C340
-#define   PORT_PLL_P1_MASK		(0x07 << 13)
-#define   PORT_PLL_P1(x)		((x)  << 13)
-#define   PORT_PLL_P2_MASK		(0x1f << 8)
-#define   PORT_PLL_P2(x)		((x)  << 8)
+#define   PORT_PLL_P1_SHIFT		13
+#define   PORT_PLL_P1_MASK		(0x07 << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P1(x)		((x)  << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P2_SHIFT		8
+#define   PORT_PLL_P2_MASK		(0x1f << PORT_PLL_P2_SHIFT)
+#define   PORT_PLL_P2(x)		((x)  << PORT_PLL_P2_SHIFT)
 #define BXT_PORT_PLL_EBB_0(port)	_PORT3(port, _PORT_PLL_EBB_0_A, \
 						_PORT_PLL_EBB_0_B,	\
 						_PORT_PLL_EBB_0_C)
@@ -1186,8 +1209,9 @@
 /* PORT_PLL_0_A */
 #define   PORT_PLL_M2_MASK		0xFF
 /* PORT_PLL_1_A */
-#define   PORT_PLL_N_MASK		(0x0F << 8)
-#define   PORT_PLL_N(x)			((x) << 8)
+#define   PORT_PLL_N_SHIFT		8
+#define   PORT_PLL_N_MASK		(0x0F << PORT_PLL_N_SHIFT)
+#define   PORT_PLL_N(x)			((x) << PORT_PLL_N_SHIFT)
 /* PORT_PLL_2_A */
 #define   PORT_PLL_M2_FRAC_MASK		0x3FFFFF
 /* PORT_PLL_3_A */
@@ -1201,9 +1225,11 @@
 /* PORT_PLL_8_A */
 #define   PORT_PLL_TARGET_CNT_MASK	0x3FF
 /* PORT_PLL_9_A */
-#define  PORT_PLL_LOCK_THRESHOLD_MASK	0xe
+#define  PORT_PLL_LOCK_THRESHOLD_SHIFT	1
+#define  PORT_PLL_LOCK_THRESHOLD_MASK	(0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
 /* PORT_PLL_10_A */
 #define  PORT_PLL_DCO_AMP_OVR_EN_H	(1<<27)
+#define  PORT_PLL_DCO_AMP_DEFAULT	15
 #define  PORT_PLL_DCO_AMP_MASK		0x3c00
 #define  PORT_PLL_DCO_AMP(x)		(x<<10)
 #define _PORT_PLL_BASE(port)		_PORT3(port, _PORT_PLL_0_A,	\
@@ -1377,6 +1403,18 @@
 							_PORT_TX_DW14_LN0_C) + \
 					 _BXT_LANE_OFFSET(lane))
 
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1			0x4F074
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK			0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0		0x6C00C
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port)		(8+3*(port))
+#define BALANCE_LEG_MASK(port)		(7<<(8+3*(port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT	23
+
 /*
  * Fence registers
  */
@@ -1456,6 +1494,9 @@
 #define RING_MAX_IDLE(base)	((base)+0x54)
 #define RING_HWS_PGA(base)	((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
+#define RING_RESET_CTL(base)	((base)+0xd0)
+#define   RESET_CTL_REQUEST_RESET  (1 << 0)
+#define   RESET_CTL_READY_TO_RESET (1 << 1)
 
 #define HSW_GTT_CACHE_EN	0x4024
 #define   GTT_CACHE_EN_ALL	0xF0007FFF
@@ -1946,6 +1987,9 @@
 #define FBC_FENCE_OFF		0x03218 /* BSpec typo has 321Bh */
 #define FBC_TAG			0x03300
 
+#define FBC_STATUS2		0x43214
+#define  FBC_COMPRESSION_MASK	0x7ff
+
 #define FBC_LL_SIZE		(1536)
 
 /* Framebuffer compression for GM45+ */
@@ -2116,7 +2160,7 @@
 #define   DPLL_DVO_2X_MODE		(1 << 30)
 #define   DPLL_EXT_BUFFER_ENABLE_VLV	(1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
-#define   DPLL_REFA_CLK_ENABLE_VLV	(1 << 29)
+#define   DPLL_REF_CLK_ENABLE_VLV	(1 << 29)
 #define   DPLL_VGA_MODE_DIS		(1 << 28)
 #define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
 #define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
@@ -2130,8 +2174,8 @@
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
 #define   DPLL_LOCK_VLV			(1<<15)
 #define   DPLL_INTEGRATED_CRI_CLK_VLV	(1<<14)
-#define   DPLL_INTEGRATED_CLOCK_VLV	(1<<13)
-#define   DPLL_SSC_REF_CLOCK_CHV	(1<<13)
+#define   DPLL_INTEGRATED_REF_CLK_VLV	(1<<13)
+#define   DPLL_SSC_REF_CLK_CHV		(1<<13)
 #define   DPLL_PORTC_READY_MASK		(0xf << 4)
 #define   DPLL_PORTB_READY_MASK		(0xf)
 
@@ -2488,6 +2532,9 @@
 #define CLKCFG_MEM_800					(3 << 4)
 #define CLKCFG_MEM_MASK					(7 << 4)
 
+#define HPLLVCO                 (MCHBAR_MIRROR_BASE + 0xc38)
+#define HPLLVCO_MOBILE          (MCHBAR_MIRROR_BASE + 0xc0f)
+
 #define TSC1			0x11001
 #define   TSE			(1<<0)
 #define TR1			0x11006
@@ -2718,8 +2765,10 @@
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 
 #define GEN6_GT_PERF_STATUS	(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define BXT_GT_PERF_STATUS      (MCHBAR_MIRROR_BASE_SNB + 0x7070)
 #define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
 #define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define BXT_RP_STATE_CAP        0x138170
 
 #define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
 #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
@@ -2767,7 +2816,8 @@
  * valid. Now, docs explain in dwords what is in the context object. The full
  * size is 70720 bytes, however, the power context and execlist context will
  * never be saved (power context is stored elsewhere, and execlists don't work
- * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
  */
 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
 /* Same as Haswell, but 72064 bytes now. */
@@ -4398,9 +4448,32 @@
 #define   DSPARB_BSTART_SHIFT	0
 #define   DSPARB_BEND_SHIFT	9 /* on 855 */
 #define   DSPARB_AEND_SHIFT	0
-
+#define   DSPARB_SPRITEA_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEB_SHIFT_VLV	8
+#define   DSPARB_SPRITEB_MASK_VLV	(0xff << 8)
+#define   DSPARB_SPRITEC_SHIFT_VLV	16
+#define   DSPARB_SPRITEC_MASK_VLV	(0xff << 16)
+#define   DSPARB_SPRITED_SHIFT_VLV	24
+#define   DSPARB_SPRITED_MASK_VLV	(0xff << 24)
 #define DSPARB2			(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define   DSPARB_SPRITEA_HI_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_HI_MASK_VLV	(0x1 << 0)
+#define   DSPARB_SPRITEB_HI_SHIFT_VLV	4
+#define   DSPARB_SPRITEB_HI_MASK_VLV	(0x1 << 4)
+#define   DSPARB_SPRITEC_HI_SHIFT_VLV	8
+#define   DSPARB_SPRITEC_HI_MASK_VLV	(0x1 << 8)
+#define   DSPARB_SPRITED_HI_SHIFT_VLV	12
+#define   DSPARB_SPRITED_HI_MASK_VLV	(0x1 << 12)
+#define   DSPARB_SPRITEE_HI_SHIFT_VLV	16
+#define   DSPARB_SPRITEE_HI_MASK_VLV	(0x1 << 16)
+#define   DSPARB_SPRITEF_HI_SHIFT_VLV	20
+#define   DSPARB_SPRITEF_HI_MASK_VLV	(0x1 << 20)
 #define DSPARB3			(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define   DSPARB_SPRITEE_SHIFT_VLV	0
+#define   DSPARB_SPRITEE_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEF_SHIFT_VLV	8
+#define   DSPARB_SPRITEF_MASK_VLV	(0xff << 8)
 
 /* pnv/gen4/g4x/vlv/chv */
 #define DSPFW1			(dev_priv->info.display_mmio_offset + 0x70034)
@@ -5754,6 +5827,13 @@
 #define HSW_NDE_RSTWRN_OPT	0x46408
 #define  RESET_PCH_HANDSHAKE_ENABLE	(1<<4)
 
+#define SKL_DFSM			0x51000
+#define SKL_DFSM_CDCLK_LIMIT_MASK	(3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675	(0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540	(1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450	(2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5	(3 << 23)
+
 #define FF_SLICE_CS_CHICKEN2			0x20e4
 #define  GEN9_TSG_BARRIER_ACK_DISABLE		(1<<8)
 
@@ -5791,6 +5871,7 @@
 
 #define GEN8_L3SQCREG4				0xb118
 #define  GEN8_LQSC_RO_PERF_DIS			(1<<27)
+#define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1<<21)
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0				0x7300
@@ -5868,6 +5949,7 @@
 #define SDE_AUXC_CPT		(1 << 26)
 #define SDE_AUXB_CPT		(1 << 25)
 #define SDE_AUX_MASK_CPT	(7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT	(1 << 25)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
@@ -5878,6 +5960,10 @@
 				 SDE_PORTD_HOTPLUG_CPT |	\
 				 SDE_PORTC_HOTPLUG_CPT |	\
 				 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT	(SDE_PORTE_HOTPLUG_SPT |	\
+				 SDE_PORTD_HOTPLUG_CPT |	\
+				 SDE_PORTC_HOTPLUG_CPT |	\
+				 SDE_PORTB_HOTPLUG_CPT)
 #define SDE_GMBUS_CPT		(1 << 17)
 #define SDE_ERROR_CPT		(1 << 16)
 #define SDE_AUDIO_CP_REQ_C_CPT	(1 << 10)
@@ -5913,6 +5999,11 @@
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG        0xc4030		/* SHOTPLUG_CTL */
+#define BXT_PORTA_HOTPLUG_ENABLE	(1 << 28)
+#define BXT_PORTA_HOTPLUG_STATUS_MASK	(0x3 << 24)
+#define  BXT_PORTA_HOTPLUG_NO_DETECT	(0 << 24)
+#define  BXT_PORTA_HOTPLUG_SHORT_DETECT	(1 << 24)
+#define  BXT_PORTA_HOTPLUG_LONG_DETECT	(2 << 24)
 #define PORTD_HOTPLUG_ENABLE            (1 << 20)
 #define PORTD_PULSE_DURATION_2ms        (0)
 #define PORTD_PULSE_DURATION_4_5ms      (1 << 18)
@@ -5944,6 +6035,13 @@
 #define  PORTB_HOTPLUG_SHORT_DETECT	(1 << 0)
 #define  PORTB_HOTPLUG_LONG_DETECT	(2 << 0)
 
+#define PCH_PORT_HOTPLUG2        0xc403C		/* SHOTPLUG_CTL2 */
+#define PORTE_HOTPLUG_ENABLE            (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK	(0x3 << 0)
+#define  PORTE_HOTPLUG_NO_DETECT	(0 << 0)
+#define  PORTE_HOTPLUG_SHORT_DETECT	(1 << 0)
+#define  PORTE_HOTPLUG_LONG_DETECT	(2 << 0)
+
 #define PCH_GPIOA               0xc5010
 #define PCH_GPIOB               0xc5014
 #define PCH_GPIOC               0xc5018
@@ -6047,6 +6145,9 @@
 #define _VIDEO_DIP_CTL_A         0xe0200
 #define _VIDEO_DIP_DATA_A        0xe0208
 #define _VIDEO_DIP_GCP_A         0xe0210
+#define  GCP_COLOR_INDICATION		(1 << 2)
+#define  GCP_DEFAULT_PHASE_ENABLE	(1 << 1)
+#define  GCP_AV_MUTE			(1 << 0)
 
 #define _VIDEO_DIP_CTL_B         0xe1200
 #define _VIDEO_DIP_DATA_B        0xe1208
@@ -6186,6 +6287,7 @@
 #define _TRANSA_CHICKEN1	 0xf0060
 #define _TRANSB_CHICKEN1	 0xf1060
 #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE	(1<<10)
 #define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE	(1<<4)
 #define _TRANSA_CHICKEN2	 0xf0064
 #define _TRANSB_CHICKEN2	 0xf1064
@@ -6370,6 +6472,8 @@
 #define PCH_PP_CONTROL		0xc7204
 #define  PANEL_UNLOCK_REGS	(0xabcd << 16)
 #define  PANEL_UNLOCK_MASK	(0xffff << 16)
+#define  BXT_POWER_CYCLE_DELAY_MASK	(0x1f0)
+#define  BXT_POWER_CYCLE_DELAY_SHIFT	4
 #define  EDP_FORCE_VDD		(1 << 3)
 #define  EDP_BLC_ENABLE		(1 << 2)
 #define  PANEL_POWER_RESET	(1 << 1)
@@ -6398,6 +6502,17 @@
 #define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
 #define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
+/* BXT PPS changes - 2nd set of PPS registers */
+#define _BXT_PP_STATUS2 	0xc7300
+#define _BXT_PP_CONTROL2 	0xc7304
+#define _BXT_PP_ON_DELAYS2	0xc7308
+#define _BXT_PP_OFF_DELAYS2	0xc730c
+
+#define BXT_PP_STATUS(n)	((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
+#define BXT_PP_CONTROL(n)	((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
+#define BXT_PP_ON_DELAYS(n)	((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
+#define BXT_PP_OFF_DELAYS(n)	((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
+
 #define PCH_DP_B		0xe4100
 #define PCH_DPB_AUX_CH_CTL	0xe4110
 #define PCH_DPB_AUX_CH_DATA1	0xe4114
@@ -6698,6 +6813,7 @@
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
 #define     GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
 #define     GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
+#define   BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ	0x18
 #define   GEN9_PCODE_READ_MEM_LATENCY		0x6
 #define     GEN9_MEM_LATENCY_LEVEL_MASK		0xFF
 #define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT	8
@@ -6756,6 +6872,9 @@
 #define GEN7_MISCCPCTL			(0x9424)
 #define   GEN7_DOP_CLOCK_GATE_ENABLE	(1<<0)
 
+#define GEN8_GARBCNTL                   0xB004
+#define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
 #define HSW_L3CDERRST11			0xB208 /* L3CD Error Status register 1 slice 1 */
@@ -7163,6 +7282,7 @@
 #define  LCPLL_CLK_FREQ_337_5_BDW	(2<<26)
 #define  LCPLL_CLK_FREQ_675_BDW		(3<<26)
 #define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
+#define  LCPLL_ROOT_CD_CLOCK_DISABLE	(1<<24)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 #define  LCPLL_POWER_DOWN_ALLOW		(1<<22)
 #define  LCPLL_CD_SOURCE_FCLK		(1<<21)
@@ -7265,12 +7385,6 @@
 #define DC_STATE_EN			0x45504
 #define  DC_STATE_EN_UPTO_DC5		(1<<0)
 #define  DC_STATE_EN_DC9		(1<<3)
-
-/*
-* SKL DC
-*/
-#define  DC_STATE_EN			0x45504
-#define  DC_STATE_EN_UPTO_DC5		(1<<0)
 #define  DC_STATE_EN_UPTO_DC6		(2<<0)
 #define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
 
@@ -7822,4 +7936,13 @@
 #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
 #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
 
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS0		0xb020	/* L3 Cache Control base */
+
+#define GEN9_GFX_MOCS_0		0xc800	/* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0	0xc900	/* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0	0xca00	/* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0	0xcb00	/* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0		0xcc00	/* Blitter MOCS base register*/
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index cf67f82..1ccac61 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -92,7 +92,7 @@
 	}
 
 	/* only restore FBC info on the platform that supports FBC*/
-	intel_fbc_disable(dev);
+	intel_fbc_disable(dev_priv);
 
 	/* restore FBC interval */
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2476268..55bd04c 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -64,24 +64,16 @@
 			goto out;
 		}
 
-		units = 0;
-		div = 1000000ULL;
-
-		if (IS_CHERRYVIEW(dev)) {
+		if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
 			/* Special case for 320Mhz */
-			if (czcount_30ns == 1) {
-				div = 10000000ULL;
-				units = 3125ULL;
-			} else {
-				/* chv counts are one less */
-				czcount_30ns += 1;
-			}
+			div = 10000000ULL;
+			units = 3125ULL;
+		} else {
+			czcount_30ns += 1;
+			div = 1000000ULL;
+			units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
 		}
 
-		if (units == 0)
-			units = DIV_ROUND_UP_ULL(30ULL * bias,
-						 (u64)czcount_30ns);
-
 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 			units <<= 8;
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 849a259..2f34c47 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -424,10 +424,10 @@
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-	    TP_PROTO(struct intel_engine_cs *from,
-		     struct intel_engine_cs *to,
+	    TP_PROTO(struct drm_i915_gem_request *to_req,
+		     struct intel_engine_cs *from,
 		     struct drm_i915_gem_request *req),
-	    TP_ARGS(from, to, req),
+	    TP_ARGS(to_req, from, req),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -439,7 +439,7 @@
 	    TP_fast_assign(
 			   __entry->dev = from->dev->primary->index;
 			   __entry->sync_from = from->id;
-			   __entry->sync_to = to->id;
+			   __entry->sync_to = to_req->ring->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   ),
 
@@ -475,8 +475,8 @@
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
-	    TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
-	    TP_ARGS(ring, invalidate, flush),
+	    TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+	    TP_ARGS(req, invalidate, flush),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -486,8 +486,8 @@
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
+			   __entry->dev = req->ring->dev->primary->index;
+			   __entry->ring = req->ring->id;
 			   __entry->invalidate = invalidate;
 			   __entry->flush = flush;
 			   ),
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8e35e0d..e2531cf 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,133 +35,6 @@
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
 
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @state: state to validate
- */
-int intel_atomic_check(struct drm_device *dev,
-		       struct drm_atomic_state *state)
-{
-	int nplanes = dev->mode_config.num_total_plane;
-	int ncrtcs = dev->mode_config.num_crtc;
-	int nconnectors = dev->mode_config.num_connector;
-	enum pipe nuclear_pipe = INVALID_PIPE;
-	struct intel_crtc *nuclear_crtc = NULL;
-	struct intel_crtc_state *crtc_state = NULL;
-	int ret;
-	int i;
-	bool not_nuclear = false;
-
-	/*
-	 * FIXME:  At the moment, we only support "nuclear pageflip" on a
-	 * single CRTC.  Cross-crtc updates will be added later.
-	 */
-	for (i = 0; i < nplanes; i++) {
-		struct intel_plane *plane = to_intel_plane(state->planes[i]);
-		if (!plane)
-			continue;
-
-		if (nuclear_pipe == INVALID_PIPE) {
-			nuclear_pipe = plane->pipe;
-		} else if (nuclear_pipe != plane->pipe) {
-			DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
-			return -EINVAL;
-		}
-	}
-
-	/*
-	 * FIXME:  We only handle planes for now; make sure there are no CRTC's
-	 * or connectors involved.
-	 */
-	state->allow_modeset = false;
-	for (i = 0; i < ncrtcs; i++) {
-		struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
-		if (crtc)
-			memset(&crtc->atomic, 0, sizeof(crtc->atomic));
-		if (crtc && crtc->pipe != nuclear_pipe)
-			not_nuclear = true;
-		if (crtc && crtc->pipe == nuclear_pipe) {
-			nuclear_crtc = crtc;
-			crtc_state = to_intel_crtc_state(state->crtc_states[i]);
-		}
-	}
-	for (i = 0; i < nconnectors; i++)
-		if (state->connectors[i] != NULL)
-			not_nuclear = true;
-
-	if (not_nuclear) {
-		DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
-		return -EINVAL;
-	}
-
-	ret = drm_atomic_helper_check_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* FIXME: move to crtc atomic check function once it is ready */
-	ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @async: asynchronous commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
- * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int intel_atomic_commit(struct drm_device *dev,
-			struct drm_atomic_state *state,
-			bool async)
-{
-	struct drm_crtc_state *crtc_state;
-	struct drm_crtc *crtc;
-	int ret, i;
-
-	if (async) {
-		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
-		return -EINVAL;
-	}
-
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* Point of no return */
-	drm_atomic_helper_swap_state(dev, state);
-
-	/* swap crtc_scaler_state */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
-
-		if (INTEL_INFO(dev)->gen >= 9)
-			skl_detach_scalers(to_intel_crtc(crtc));
-
-		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
-	}
-
-	drm_atomic_helper_wait_for_vblanks(dev, state);
-	drm_atomic_helper_cleanup_planes(dev, state);
-	drm_atomic_state_free(state);
-
-	return 0;
-}
-
 /**
  * intel_connector_atomic_get_property - fetch connector property value
  * @connector: connector to fetch property for
@@ -269,17 +142,12 @@
 	struct drm_plane *plane = NULL;
 	struct intel_plane *intel_plane;
 	struct intel_plane_state *plane_state = NULL;
-	struct intel_crtc_scaler_state *scaler_state;
-	struct drm_atomic_state *drm_state;
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct drm_atomic_state *drm_state = crtc_state->base.state;
 	int num_scalers_need;
 	int i, j;
 
-	if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
-		return 0;
-
-	scaler_state = &crtc_state->scaler_state;
-	drm_state = crtc_state->base.state;
-
 	num_scalers_need = hweight32(scaler_state->scaler_users);
 	DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
 		crtc_state, num_scalers_need, intel_crtc->num_scalers,
@@ -307,17 +175,21 @@
 	/* walkthrough scaler_users bits and start assigning scalers */
 	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
 		int *scaler_id;
+		const char *name;
+		int idx;
 
 		/* skip if scaler not required */
 		if (!(scaler_state->scaler_users & (1 << i)))
 			continue;
 
 		if (i == SKL_CRTC_INDEX) {
+			name = "CRTC";
+			idx = intel_crtc->base.base.id;
+
 			/* panel fitter case: assign as a crtc scaler */
 			scaler_id = &scaler_state->scaler_id;
 		} else {
-			if (!drm_state)
-				continue;
+			name = "PLANE";
 
 			/* plane scaler case: assign as a plane scaler */
 			/* find the plane that set the bit as scaler_user */
@@ -336,9 +208,19 @@
 						plane->base.id);
 					return PTR_ERR(state);
 				}
+
+				/*
+				 * the plane is added after plane checks are run,
+				 * but since this plane is unchanged just do the
+				 * minimum required validation.
+				 */
+				if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+					intel_crtc->atomic.wait_for_flips = true;
+				crtc_state->base.planes_changed = true;
 			}
 
 			intel_plane = to_intel_plane(plane);
+			idx = plane->base.id;
 
 			/* plane on different crtc cannot be a scaler user of this crtc */
 			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
@@ -354,23 +236,16 @@
 			for (j = 0; j < intel_crtc->num_scalers; j++) {
 				if (!scaler_state->scalers[j].in_use) {
 					scaler_state->scalers[j].in_use = 1;
-					*scaler_id = scaler_state->scalers[j].id;
+					*scaler_id = j;
 					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-						intel_crtc->pipe,
-						i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
-							plane_state->scaler_id,
-						i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-						i == SKL_CRTC_INDEX ?  intel_crtc->base.base.id :
-						plane->base.id);
+						intel_crtc->pipe, *scaler_id, name, idx);
 					break;
 				}
 			}
 		}
 
 		if (WARN_ON(*scaler_id < 0)) {
-			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
-				i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-				i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
+			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
 			continue;
 		}
 
@@ -392,3 +267,54 @@
 
 	return 0;
 }
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll_config *shared_dpll)
+{
+	enum intel_dpll_id i;
+
+	/* Copy shared dpll state */
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		shared_dpll[i] = pll->config;
+	}
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+	if (!state->dpll_set) {
+		state->dpll_set = true;
+
+		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+						  state->shared_dpll);
+	}
+
+	return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+		kfree(state);
+		return NULL;
+	}
+
+	return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+	drm_atomic_state_default_clear(&state->base);
+	state->dpll_set = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 86ba4b2..f1ab8e4 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -56,6 +56,7 @@
 
 	state->base.plane = plane;
 	state->base.rotation = BIT(DRM_ROTATE_0);
+	state->ckey.flags = I915_SET_COLORKEY_NONE;
 
 	return state;
 }
@@ -114,8 +115,10 @@
 	struct intel_crtc_state *crtc_state;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct intel_plane_state *intel_state = to_intel_plane_state(state);
+	struct drm_crtc_state *drm_crtc_state;
+	int ret;
 
-	crtc = crtc ? crtc : plane->crtc;
+	crtc = crtc ? crtc : plane->state->crtc;
 	intel_crtc = to_intel_crtc(crtc);
 
 	/*
@@ -127,16 +130,11 @@
 	if (!crtc)
 		return 0;
 
-	/* FIXME: temporary hack necessary while we still use the plane update
-	 * helper. */
-	if (state->state) {
-		crtc_state =
-			intel_atomic_get_crtc_state(state->state, intel_crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-	} else {
-		crtc_state = intel_crtc->config;
-	}
+	drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+	if (WARN_ON(!drm_crtc_state))
+		return -EINVAL;
+
+	crtc_state = to_intel_crtc_state(drm_crtc_state);
 
 	/*
 	 * The original src/dest coordinates are stored in state->base, but
@@ -160,20 +158,6 @@
 	intel_state->clip.y2 =
 		crtc_state->base.active ? crtc_state->pipe_src_h : 0;
 
-	/*
-	 * Disabling a plane is always okay; we just need to update
-	 * fb tracking in a special way since cleanup_fb() won't
-	 * get called by the plane helpers.
-	 */
-	if (state->fb == NULL && plane->state->fb != NULL) {
-		/*
-		 * 'prepare' is never called when plane is being disabled, so
-		 * we need to handle frontbuffer tracking as a special case
-		 */
-		intel_crtc->atomic.disabled_planes |=
-			(1 << drm_plane_index(plane));
-	}
-
 	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
 		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 			state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
@@ -198,7 +182,12 @@
 		}
 	}
 
-	return intel_plane->check_plane(plane, intel_state);
+	intel_state->visible = false;
+	ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+	if (ret)
+		return ret;
+
+	return intel_plane_atomic_calc_changes(&crtc_state->base, state);
 }
 
 static void intel_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 3da9b84..89c1a8ce 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -41,7 +41,8 @@
  *
  * The disable sequences must be performed before disabling the transcoder or
  * port. The enable sequences may only be performed after enabling the
- * transcoder and port, and after completed link training.
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
  *
  * The codec and controller sequences could be done either parallel or serial,
  * but generally the ELDV/PD change in the codec sequence indicates to the audio
@@ -399,6 +400,9 @@
 	struct drm_connector *connector;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
 
 	connector = drm_select_eld(encoder, mode);
 	if (!connector)
@@ -419,6 +423,9 @@
 
 	if (dev_priv->display.audio_codec_enable)
 		dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
 }
 
 /**
@@ -428,13 +435,20 @@
  * The disable sequences must be performed before disabling the transcoder or
  * port.
  */
-void intel_audio_codec_disable(struct intel_encoder *encoder)
+void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
 
 	if (dev_priv->display.audio_codec_disable)
-		dev_priv->display.audio_codec_disable(encoder);
+		dev_priv->display.audio_codec_disable(intel_encoder);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
 }
 
 /**
@@ -525,12 +539,16 @@
 				     struct device *hda_dev, void *data)
 {
 	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
 	if (WARN_ON(acomp->ops || acomp->dev))
 		return -EEXIST;
 
+	drm_modeset_lock_all(dev_priv->dev);
 	acomp->ops = &i915_audio_component_ops;
 	acomp->dev = i915_dev;
+	dev_priv->audio_component = acomp;
+	drm_modeset_unlock_all(dev_priv->dev);
 
 	return 0;
 }
@@ -539,9 +557,13 @@
 					struct device *hda_dev, void *data)
 {
 	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
+	drm_modeset_lock_all(dev_priv->dev);
 	acomp->ops = NULL;
 	acomp->dev = NULL;
+	dev_priv->audio_component = NULL;
+	drm_modeset_unlock_all(dev_priv->dev);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 198fc3c..b3e437b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -122,42 +122,6 @@
 	drm_mode_set_name(panel_fixed_mode);
 }
 
-static bool
-lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
-			   const struct lvds_dvo_timing *b)
-{
-	if (a->hactive_hi != b->hactive_hi ||
-	    a->hactive_lo != b->hactive_lo)
-		return false;
-
-	if (a->hsync_off_hi != b->hsync_off_hi ||
-	    a->hsync_off_lo != b->hsync_off_lo)
-		return false;
-
-	if (a->hsync_pulse_width != b->hsync_pulse_width)
-		return false;
-
-	if (a->hblank_hi != b->hblank_hi ||
-	    a->hblank_lo != b->hblank_lo)
-		return false;
-
-	if (a->vactive_hi != b->vactive_hi ||
-	    a->vactive_lo != b->vactive_lo)
-		return false;
-
-	if (a->vsync_off != b->vsync_off)
-		return false;
-
-	if (a->vsync_pulse_width != b->vsync_pulse_width)
-		return false;
-
-	if (a->vblank_hi != b->vblank_hi ||
-	    a->vblank_lo != b->vblank_lo)
-		return false;
-
-	return true;
-}
-
 static const struct lvds_dvo_timing *
 get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
 		    const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
@@ -213,7 +177,7 @@
 	const struct lvds_dvo_timing *panel_dvo_timing;
 	const struct lvds_fp_timing *fp_timing;
 	struct drm_display_mode *panel_fixed_mode;
-	int i, downclock, drrs_mode;
+	int drrs_mode;
 
 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
 	if (!lvds_options)
@@ -272,30 +236,6 @@
 	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
-	/*
-	 * Iterate over the LVDS panel timing info to find the lowest clock
-	 * for the native resolution.
-	 */
-	downclock = panel_dvo_timing->clock;
-	for (i = 0; i < 16; i++) {
-		const struct lvds_dvo_timing *dvo_timing;
-
-		dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
-						 lvds_lfp_data_ptrs,
-						 i);
-		if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
-		    dvo_timing->clock < downclock)
-			downclock = dvo_timing->clock;
-	}
-
-	if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
-		dev_priv->lvds_downclock_avail = 1;
-		dev_priv->lvds_downclock = downclock * 10;
-		DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
-			      "Normal Clock %dKHz, downclock %dKHz\n",
-			      panel_fixed_mode->clock, 10*downclock);
-	}
-
 	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
 				       lvds_lfp_data_ptrs,
 				       lvds_options->panel_type);
@@ -461,7 +401,7 @@
 {
 	struct sdvo_device_mapping *p_mapping;
 	const struct bdb_general_definitions *p_defs;
-	const union child_device_config *p_child;
+	const struct old_child_dev_config *child; /* legacy */
 	int i, child_device_num, count;
 	u16	block_size;
 
@@ -470,14 +410,14 @@
 		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
 		return;
 	}
-	/* judge whether the size of child device meets the requirements.
-	 * If the child device size obtained from general definition block
-	 * is different with sizeof(struct child_device_config), skip the
-	 * parsing of sdvo device info
+
+	/*
+	 * Only parse SDVO mappings when the general definitions block child
+	 * device size matches that of the *legacy* child device config
+	 * struct. Thus, SDVO mapping will be skipped for newer VBT.
 	 */
-	if (p_defs->child_dev_size != sizeof(*p_child)) {
-		/* different child dev size . Ignore it */
-		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+	if (p_defs->child_dev_size != sizeof(*child)) {
+		DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
 		return;
 	}
 	/* get the block size of general definitions */
@@ -487,37 +427,37 @@
 		p_defs->child_dev_size;
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
-		p_child = child_device_ptr(p_defs, i);
-		if (!p_child->old.device_type) {
+		child = &child_device_ptr(p_defs, i)->old;
+		if (!child->device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
-		if (p_child->old.slave_addr != SLAVE_ADDR1 &&
-			p_child->old.slave_addr != SLAVE_ADDR2) {
+		if (child->slave_addr != SLAVE_ADDR1 &&
+		    child->slave_addr != SLAVE_ADDR2) {
 			/*
 			 * If the slave address is neither 0x70 nor 0x72,
 			 * it is not a SDVO device. Skip it.
 			 */
 			continue;
 		}
-		if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
-			p_child->old.dvo_port != DEVICE_PORT_DVOC) {
+		if (child->dvo_port != DEVICE_PORT_DVOB &&
+		    child->dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
 			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
 			continue;
 		}
 		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
-				" %s port\n",
-				p_child->old.slave_addr,
-				(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
-					"SDVOB" : "SDVOC");
-		p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
+			      " %s port\n",
+			      child->slave_addr,
+			      (child->dvo_port == DEVICE_PORT_DVOB) ?
+			      "SDVOB" : "SDVOC");
+		p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
 		if (!p_mapping->initialized) {
-			p_mapping->dvo_port = p_child->old.dvo_port;
-			p_mapping->slave_addr = p_child->old.slave_addr;
-			p_mapping->dvo_wiring = p_child->old.dvo_wiring;
-			p_mapping->ddc_pin = p_child->old.ddc_pin;
-			p_mapping->i2c_pin = p_child->old.i2c_pin;
+			p_mapping->dvo_port = child->dvo_port;
+			p_mapping->slave_addr = child->slave_addr;
+			p_mapping->dvo_wiring = child->dvo_wiring;
+			p_mapping->ddc_pin = child->ddc_pin;
+			p_mapping->i2c_pin = child->i2c_pin;
 			p_mapping->initialized = 1;
 			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
 				      p_mapping->dvo_port,
@@ -529,7 +469,7 @@
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 		}
-		if (p_child->old.slave2_addr) {
+		if (child->slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
 			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -946,6 +886,17 @@
 	memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
 }
 
+static u8 translate_iboost(u8 val)
+{
+	static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+	if (val >= ARRAY_SIZE(mapping)) {
+		DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+		return 0;
+	}
+	return mapping[val];
+}
+
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 			   const struct bdb_header *bdb)
 {
@@ -954,23 +905,23 @@
 	uint8_t hdmi_level_shift;
 	int i, j;
 	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
-	uint8_t aux_channel;
+	uint8_t aux_channel, ddc_pin;
 	/* Each DDI port can have more than one value on the "DVO Port" field,
 	 * so look for all the possible values for each port and abort if more
 	 * than one is found. */
-	int dvo_ports[][2] = {
-		{DVO_PORT_HDMIA, DVO_PORT_DPA},
-		{DVO_PORT_HDMIB, DVO_PORT_DPB},
-		{DVO_PORT_HDMIC, DVO_PORT_DPC},
-		{DVO_PORT_HDMID, DVO_PORT_DPD},
-		{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+	int dvo_ports[][3] = {
+		{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+		{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+		{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+		{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+		{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
 	};
 
 	/* Find the child device to use, abort if more than one found. */
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
 		it = dev_priv->vbt.child_dev + i;
 
-		for (j = 0; j < 2; j++) {
+		for (j = 0; j < 3; j++) {
 			if (dvo_ports[port][j] == -1)
 				break;
 
@@ -988,6 +939,7 @@
 		return;
 
 	aux_channel = child->raw[25];
+	ddc_pin = child->common.ddc_pin;
 
 	is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
 	is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1019,22 +971,53 @@
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
 	if (is_dvi) {
-		if (child->common.ddc_pin == 0x05 && port != PORT_B)
+		if (port == PORT_E) {
+			info->alternate_ddc_pin = ddc_pin;
+			/* if DDIE share ddc pin with other port, then
+			 * dvi/hdmi couldn't exist on the shared port.
+			 * Otherwise they share the same ddc bin and system
+			 * couldn't communicate with them seperately. */
+			if (ddc_pin == DDC_PIN_B) {
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_C) {
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_D) {
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
+			}
+		} else if (ddc_pin == DDC_PIN_B && port != PORT_B)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
-		if (child->common.ddc_pin == 0x04 && port != PORT_C)
+		else if (ddc_pin == DDC_PIN_C && port != PORT_C)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
-		if (child->common.ddc_pin == 0x06 && port != PORT_D)
+		else if (ddc_pin == DDC_PIN_D && port != PORT_D)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
 	}
 
 	if (is_dp) {
-		if (aux_channel == 0x40 && port != PORT_A)
+		if (port == PORT_E) {
+			info->alternate_aux_channel = aux_channel;
+			/* if DDIE share aux channel with other port, then
+			 * DP couldn't exist on the shared port. Otherwise
+			 * they share the same aux channel and system
+			 * couldn't communicate with them seperately. */
+			if (aux_channel == DP_AUX_A)
+				dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+			else if (aux_channel == DP_AUX_B)
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+			else if (aux_channel == DP_AUX_C)
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+			else if (aux_channel == DP_AUX_D)
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+		}
+		else if (aux_channel == DP_AUX_A && port != PORT_A)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
-		if (aux_channel == 0x10 && port != PORT_B)
+		else if (aux_channel == DP_AUX_B && port != PORT_B)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
-		if (aux_channel == 0x20 && port != PORT_C)
+		else if (aux_channel == DP_AUX_C && port != PORT_C)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
-		if (aux_channel == 0x30 && port != PORT_D)
+		else if (aux_channel == DP_AUX_D && port != PORT_D)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
 	}
 
@@ -1046,6 +1029,16 @@
 			      hdmi_level_shift);
 		info->hdmi_level_shift = hdmi_level_shift;
 	}
+
+	/* Parse the I_boost config for SKL and above */
+	if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+		info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+		DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
+			      port_name(port), info->dp_boost_level);
+		info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+		DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
+			      port_name(port), info->hdmi_boost_level);
+	}
 }
 
 static void parse_ddi_ports(struct drm_i915_private *dev_priv,
@@ -1075,17 +1068,39 @@
 	const union child_device_config *p_child;
 	union child_device_config *child_dev_ptr;
 	int i, child_device_num, count;
-	u16	block_size;
+	u8 expected_size;
+	u16 block_size;
 
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!p_defs) {
 		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
 		return;
 	}
-	if (p_defs->child_dev_size < sizeof(*p_child)) {
-		DRM_ERROR("General definiton block child device size is too small.\n");
+	if (bdb->version < 195) {
+		expected_size = sizeof(struct old_child_dev_config);
+	} else if (bdb->version == 195) {
+		expected_size = 37;
+	} else if (bdb->version <= 197) {
+		expected_size = 38;
+	} else {
+		expected_size = 38;
+		BUILD_BUG_ON(sizeof(*p_child) < 38);
+		DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
+				 bdb->version, expected_size);
+	}
+
+	/* The legacy sized child device config is the minimum we need. */
+	if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+		DRM_ERROR("Child device config size %u is too small.\n",
+			  p_defs->child_dev_size);
 		return;
 	}
+
+	/* Flag an error for unexpected size, but continue anyway. */
+	if (p_defs->child_dev_size != expected_size)
+		DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
+			  p_defs->child_dev_size, expected_size, bdb->version);
+
 	/* get the block size of general definitions */
 	block_size = get_blocksize(p_defs);
 	/* get the number of child device */
@@ -1130,7 +1145,14 @@
 
 		child_dev_ptr = dev_priv->vbt.child_dev + count;
 		count++;
-		memcpy(child_dev_ptr, p_child, sizeof(*p_child));
+
+		/*
+		 * Copy as much as we know (sizeof) and is available
+		 * (child_dev_size) of the child device. Accessing the data must
+		 * depend on VBT version.
+		 */
+		memcpy(child_dev_ptr, p_child,
+		       min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
 	}
 	return;
 }
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index af0b476..46cd5c7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -203,9 +203,11 @@
 #define DEVICE_PORT_DVOB	0x01
 #define DEVICE_PORT_DVOC	0x02
 
-/* We used to keep this struct but without any version control. We should avoid
+/*
+ * We used to keep this struct but without any version control. We should avoid
  * using it in the future, but it should be safe to keep using it in the old
- * code. */
+ * code. Do not change; we rely on its size.
+ */
 struct old_child_dev_config {
 	u16 handle;
 	u16 device_type;
@@ -231,6 +233,10 @@
 /* This one contains field offsets that are known to be common for all BDB
  * versions. Notice that the meaning of the contents contents may still change,
  * but at least the offsets are consistent. */
+
+/* Definitions for flags_1 */
+#define IBOOST_ENABLE (1<<3)
+
 struct common_child_dev_config {
 	u16 handle;
 	u16 device_type;
@@ -239,8 +245,13 @@
 	u8 not_common2[2];
 	u8 ddc_pin;
 	u16 edid_ptr;
+	u8 obsolete;
+	u8 flags_1;
+	u8 not_common3[13];
+	u8 iboost_level;
 } __packed;
 
+
 /* This field changes depending on the BDB version, so the most reliable way to
  * read it is by checking the BDB version and reading the raw pointer. */
 union child_device_config {
@@ -747,11 +758,6 @@
 #define		DVO_C		2
 #define		DVO_D		3
 
-/* define the PORT for DP output type */
-#define		PORT_IDPB	7
-#define		PORT_IDPC	8
-#define		PORT_IDPD	9
-
 /* Possible values for the "DVO Port" field for versions >= 155: */
 #define DVO_PORT_HDMIA	0
 #define DVO_PORT_HDMIB	1
@@ -764,6 +770,8 @@
 #define DVO_PORT_DPC	8
 #define DVO_PORT_DPD	9
 #define DVO_PORT_DPA	10
+#define DVO_PORT_DPE	11
+#define DVO_PORT_HDMIE	12
 #define DVO_PORT_MIPIA	21
 #define DVO_PORT_MIPIB	22
 #define DVO_PORT_MIPIC	23
@@ -778,6 +786,13 @@
 #define MIPI_DSI_UNDEFINED_PANEL_ID	0
 #define MIPI_DSI_GENERIC_PANEL_ID	1
 
+/*
+ * PMIC vs SoC Backlight support specified in pwm_blc
+ * field in mipi_config block below.
+*/
+#define PPS_BLC_PMIC   0
+#define PPS_BLC_SOC    1
+
 struct mipi_config {
 	u16 panel_id;
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 521af2c..af5e43b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -236,53 +236,6 @@
 	intel_crt_set_dpms(encoder, crt->connector->base.dpms);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_crt_dpms(struct drm_connector *connector, int mode)
-{
-	struct drm_device *dev = connector->dev;
-	struct intel_encoder *encoder = intel_attached_encoder(connector);
-	struct drm_crtc *crtc;
-	int old_dpms;
-
-	/* PCH platforms and VLV only support on/off. */
-	if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	old_dpms = connector->dpms;
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = encoder->base.crtc;
-	if (!crtc) {
-		encoder->connectors_active = false;
-		return;
-	}
-
-	/* We need the pipe to run for anything but OFF. */
-	if (mode == DRM_MODE_DPMS_OFF)
-		encoder->connectors_active = false;
-	else
-		encoder->connectors_active = true;
-
-	/* We call connector dpms manually below in case pipe dpms doesn't
-	 * change due to cloning. */
-	if (mode < old_dpms) {
-		/* From off to on, enable the pipe first. */
-		intel_crtc_update_dpms(crtc);
-
-		intel_crt_set_dpms(encoder, mode);
-	} else {
-		intel_crt_set_dpms(encoder, mode);
-
-		intel_crtc_update_dpms(crtc);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_crt_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
@@ -798,7 +751,7 @@
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
 	.reset = intel_crt_reset,
-	.dpms = intel_crt_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_crt_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = intel_crt_destroy,
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bcb41e6..ba1ae03 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -244,7 +244,7 @@
 void intel_csr_load_program(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	__be32 *payload = dev_priv->csr.dmc_payload;
+	u32 *payload = dev_priv->csr.dmc_payload;
 	uint32_t i, fw_size;
 
 	if (!IS_GEN9(dev)) {
@@ -256,7 +256,7 @@
 	fw_size = dev_priv->csr.dmc_fw_size;
 	for (i = 0; i < fw_size; i++)
 		I915_WRITE(CSR_PROGRAM_BASE + i * 4,
-			(u32 __force)payload[i]);
+			payload[i]);
 
 	for (i = 0; i < dev_priv->csr.mmio_count; i++) {
 		I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -279,7 +279,7 @@
 	char substepping = intel_get_substepping(dev);
 	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
 	uint32_t i;
-	__be32 *dmc_payload;
+	uint32_t *dmc_payload;
 	bool fw_loaded = false;
 
 	if (!fw) {
@@ -375,20 +375,13 @@
 	}
 
 	dmc_payload = csr->dmc_payload;
-	for (i = 0; i < dmc_header->fw_size; i++) {
-		uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
-		/*
-		 * The firmware payload is an array of 32 bit words stored in
-		 * little-endian format in the firmware image and programmed
-		 * as 32 bit big-endian format to memory.
-		 */
-		dmc_payload[i] = cpu_to_be32(*tmp);
-	}
+	memcpy(dmc_payload, &fw->data[readcount], nbytes);
 
 	/* load csr program during system boot, as needed for DC states */
 	intel_csr_load_program(dev);
 	fw_loaded = true;
 
+	DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
 out:
 	if (fw_loaded)
 		intel_runtime_pm_put(dev_priv);
@@ -422,6 +415,8 @@
 		return;
 	}
 
+	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+
 	/*
 	 * Obtain a runtime pm reference, until CSR is loaded,
 	 * to avoid entering runtime-suspend.
@@ -459,7 +454,8 @@
 
 void assert_csr_loaded(struct drm_i915_private *dev_priv)
 {
-	WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+	WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+	     "CSR is not loaded.\n");
 	WARN(!I915_READ(CSR_PROGRAM_BASE),
 				"CSR program storage start is NULL\n");
 	WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cacb07b..61575f6 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -31,6 +31,7 @@
 struct ddi_buf_trans {
 	u32 trans1;	/* balance leg enable, de-emph level */
 	u32 trans2;	/* vref sel, vswing */
+	u8 i_boost;	/* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
 };
 
 /* HDMI/DVI modes ignore everything but the last 2 items. So we share
@@ -38,134 +39,213 @@
  * automatically adapt to HDMI connections as well
  */
 static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
-	{ 0x00FFFFFF, 0x0006000E },
-	{ 0x00D75FFF, 0x0005000A },
-	{ 0x00C30FFF, 0x00040006 },
-	{ 0x80AAAFFF, 0x000B0000 },
-	{ 0x00FFFFFF, 0x0005000A },
-	{ 0x00D75FFF, 0x000C0004 },
-	{ 0x80C30FFF, 0x000B0000 },
-	{ 0x00FFFFFF, 0x00040006 },
-	{ 0x80D75FFF, 0x000B0000 },
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },
+	{ 0x00D75FFF, 0x0005000A, 0x0 },
+	{ 0x00C30FFF, 0x00040006, 0x0 },
+	{ 0x80AAAFFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },
+	{ 0x00D75FFF, 0x000C0004, 0x0 },
+	{ 0x80C30FFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x00040006, 0x0 },
+	{ 0x80D75FFF, 0x000B0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
-	{ 0x00FFFFFF, 0x0007000E },
-	{ 0x00D75FFF, 0x000F000A },
-	{ 0x00C30FFF, 0x00060006 },
-	{ 0x00AAAFFF, 0x001E0000 },
-	{ 0x00FFFFFF, 0x000F000A },
-	{ 0x00D75FFF, 0x00160004 },
-	{ 0x00C30FFF, 0x001E0000 },
-	{ 0x00FFFFFF, 0x00060006 },
-	{ 0x00D75FFF, 0x001E0000 },
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000F000A, 0x0 },
+	{ 0x00C30FFF, 0x00060006, 0x0 },
+	{ 0x00AAAFFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x000F000A, 0x0 },
+	{ 0x00D75FFF, 0x00160004, 0x0 },
+	{ 0x00C30FFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x00060006, 0x0 },
+	{ 0x00D75FFF, 0x001E0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
 					/* Idx	NT mV d	T mV d	db	*/
-	{ 0x00FFFFFF, 0x0006000E },	/* 0:	400	400	0	*/
-	{ 0x00E79FFF, 0x000E000C },	/* 1:	400	500	2	*/
-	{ 0x00D75FFF, 0x0005000A },	/* 2:	400	600	3.5	*/
-	{ 0x00FFFFFF, 0x0005000A },	/* 3:	600	600	0	*/
-	{ 0x00E79FFF, 0x001D0007 },	/* 4:	600	750	2	*/
-	{ 0x00D75FFF, 0x000C0004 },	/* 5:	600	900	3.5	*/
-	{ 0x00FFFFFF, 0x00040006 },	/* 6:	800	800	0	*/
-	{ 0x80E79FFF, 0x00030002 },	/* 7:	800	1000	2	*/
-	{ 0x00FFFFFF, 0x00140005 },	/* 8:	850	850	0	*/
-	{ 0x00FFFFFF, 0x000C0004 },	/* 9:	900	900	0	*/
-	{ 0x00FFFFFF, 0x001C0003 },	/* 10:	950	950	0	*/
-	{ 0x80FFFFFF, 0x00030002 },	/* 11:	1000	1000	0	*/
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00E79FFF, 0x000E000C, 0x0 },/* 1:	400	500	2	*/
+	{ 0x00D75FFF, 0x0005000A, 0x0 },/* 2:	400	600	3.5	*/
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },/* 3:	600	600	0	*/
+	{ 0x00E79FFF, 0x001D0007, 0x0 },/* 4:	600	750	2	*/
+	{ 0x00D75FFF, 0x000C0004, 0x0 },/* 5:	600	900	3.5	*/
+	{ 0x00FFFFFF, 0x00040006, 0x0 },/* 6:	800	800	0	*/
+	{ 0x80E79FFF, 0x00030002, 0x0 },/* 7:	800	1000	2	*/
+	{ 0x00FFFFFF, 0x00140005, 0x0 },/* 8:	850	850	0	*/
+	{ 0x00FFFFFF, 0x000C0004, 0x0 },/* 9:	900	900	0	*/
+	{ 0x00FFFFFF, 0x001C0003, 0x0 },/* 10:	950	950	0	*/
+	{ 0x80FFFFFF, 0x00030002, 0x0 },/* 11:	1000	1000	0	*/
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
-	{ 0x00FFFFFF, 0x00000012 },
-	{ 0x00EBAFFF, 0x00020011 },
-	{ 0x00C71FFF, 0x0006000F },
-	{ 0x00AAAFFF, 0x000E000A },
-	{ 0x00FFFFFF, 0x00020011 },
-	{ 0x00DB6FFF, 0x0005000F },
-	{ 0x00BEEFFF, 0x000A000C },
-	{ 0x00FFFFFF, 0x0005000F },
-	{ 0x00DB6FFF, 0x000A000C },
+	{ 0x00FFFFFF, 0x00000012, 0x0 },
+	{ 0x00EBAFFF, 0x00020011, 0x0 },
+	{ 0x00C71FFF, 0x0006000F, 0x0 },
+	{ 0x00AAAFFF, 0x000E000A, 0x0 },
+	{ 0x00FFFFFF, 0x00020011, 0x0 },
+	{ 0x00DB6FFF, 0x0005000F, 0x0 },
+	{ 0x00BEEFFF, 0x000A000C, 0x0 },
+	{ 0x00FFFFFF, 0x0005000F, 0x0 },
+	{ 0x00DB6FFF, 0x000A000C, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
-	{ 0x00FFFFFF, 0x0007000E },
-	{ 0x00D75FFF, 0x000E000A },
-	{ 0x00BEFFFF, 0x00140006 },
-	{ 0x80B2CFFF, 0x001B0002 },
-	{ 0x00FFFFFF, 0x000E000A },
-	{ 0x00DB6FFF, 0x00160005 },
-	{ 0x80C71FFF, 0x001A0002 },
-	{ 0x00F7DFFF, 0x00180004 },
-	{ 0x80D75FFF, 0x001B0002 },
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000E000A, 0x0 },
+	{ 0x00BEFFFF, 0x00140006, 0x0 },
+	{ 0x80B2CFFF, 0x001B0002, 0x0 },
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },
+	{ 0x00DB6FFF, 0x00160005, 0x0 },
+	{ 0x80C71FFF, 0x001A0002, 0x0 },
+	{ 0x00F7DFFF, 0x00180004, 0x0 },
+	{ 0x80D75FFF, 0x001B0002, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
-	{ 0x00FFFFFF, 0x0001000E },
-	{ 0x00D75FFF, 0x0004000A },
-	{ 0x00C30FFF, 0x00070006 },
-	{ 0x00AAAFFF, 0x000C0000 },
-	{ 0x00FFFFFF, 0x0004000A },
-	{ 0x00D75FFF, 0x00090004 },
-	{ 0x00C30FFF, 0x000C0000 },
-	{ 0x00FFFFFF, 0x00070006 },
-	{ 0x00D75FFF, 0x000C0000 },
+	{ 0x00FFFFFF, 0x0001000E, 0x0 },
+	{ 0x00D75FFF, 0x0004000A, 0x0 },
+	{ 0x00C30FFF, 0x00070006, 0x0 },
+	{ 0x00AAAFFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x0004000A, 0x0 },
+	{ 0x00D75FFF, 0x00090004, 0x0 },
+	{ 0x00C30FFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x00070006, 0x0 },
+	{ 0x00D75FFF, 0x000C0000, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
 					/* Idx	NT mV d	T mV df	db	*/
-	{ 0x00FFFFFF, 0x0007000E },	/* 0:	400	400	0	*/
-	{ 0x00D75FFF, 0x000E000A },	/* 1:	400	600	3.5	*/
-	{ 0x00BEFFFF, 0x00140006 },	/* 2:	400	800	6	*/
-	{ 0x00FFFFFF, 0x0009000D },	/* 3:	450	450	0	*/
-	{ 0x00FFFFFF, 0x000E000A },	/* 4:	600	600	0	*/
-	{ 0x00D7FFFF, 0x00140006 },	/* 5:	600	800	2.5	*/
-	{ 0x80CB2FFF, 0x001B0002 },	/* 6:	600	1000	4.5	*/
-	{ 0x00FFFFFF, 0x00140006 },	/* 7:	800	800	0	*/
-	{ 0x80E79FFF, 0x001B0002 },	/* 8:	800	1000	2	*/
-	{ 0x80FFFFFF, 0x001B0002 },	/* 9:	1000	1000	0	*/
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00D75FFF, 0x000E000A, 0x0 },/* 1:	400	600	3.5	*/
+	{ 0x00BEFFFF, 0x00140006, 0x0 },/* 2:	400	800	6	*/
+	{ 0x00FFFFFF, 0x0009000D, 0x0 },/* 3:	450	450	0	*/
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },/* 4:	600	600	0	*/
+	{ 0x00D7FFFF, 0x00140006, 0x0 },/* 5:	600	800	2.5	*/
+	{ 0x80CB2FFF, 0x001B0002, 0x0 },/* 6:	600	1000	4.5	*/
+	{ 0x00FFFFFF, 0x00140006, 0x0 },/* 7:	800	800	0	*/
+	{ 0x80E79FFF, 0x001B0002, 0x0 },/* 8:	800	1000	2	*/
+	{ 0x80FFFFFF, 0x001B0002, 0x0 },/* 9:	1000	1000	0	*/
 };
 
+/* Skylake H and S */
 static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
-	{ 0x00000018, 0x000000a2 },
-	{ 0x00004014, 0x0000009B },
-	{ 0x00006012, 0x00000088 },
-	{ 0x00008010, 0x00000087 },
-	{ 0x00000018, 0x0000009B },
-	{ 0x00004014, 0x00000088 },
-	{ 0x00006012, 0x00000087 },
-	{ 0x00000018, 0x00000088 },
-	{ 0x00004014, 0x00000087 },
+	{ 0x00002016, 0x000000A0, 0x0 },
+	{ 0x00005012, 0x0000009B, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00009010, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x0000009B, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x000000DF, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
 };
 
-/* eDP 1.4 low vswing translation parameters */
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+	{ 0x0000201B, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x0000201B, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
+	{ 0x00000018, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x3 },	/* Uses I_boost level 0x3 */
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/*
+ * Skylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
 static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
-	{ 0x00000018, 0x000000a8 },
-	{ 0x00002016, 0x000000ab },
-	{ 0x00006012, 0x000000a2 },
-	{ 0x00008010, 0x00000088 },
-	{ 0x00000018, 0x000000ab },
-	{ 0x00004014, 0x000000a2 },
-	{ 0x00006012, 0x000000a6 },
-	{ 0x00000018, 0x000000a2 },
-	{ 0x00005013, 0x0000009c },
-	{ 0x00000018, 0x00000088 },
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00000018, 0x000000AB, 0x0 },
+	{ 0x00007013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
 };
 
+/*
+ * Skylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00002016, 0x000000AB, 0x0 },
+	{ 0x00005013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+};
 
+/*
+ * Skylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000AB, 0x0 },
+	{ 0x00007011, 0x000000A4, 0x0 },
+	{ 0x00009010, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x000000AA, 0x0 },
+	{ 0x00006013, 0x000000A4, 0x0 },
+	{ 0x00007011, 0x0000009D, 0x0 },
+	{ 0x00000018, 0x000000A0, 0x0 },
+	{ 0x00006012, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+};
+
+/* Skylake U, H and S */
 static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
-	{ 0x00000018, 0x000000ac },
-	{ 0x00005012, 0x0000009d },
-	{ 0x00007011, 0x00000088 },
-	{ 0x00000018, 0x000000a1 },
-	{ 0x00000018, 0x00000098 },
-	{ 0x00004013, 0x00000088 },
-	{ 0x00006012, 0x00000087 },
-	{ 0x00000018, 0x000000df },
-	{ 0x00003015, 0x00000087 },
-	{ 0x00003015, 0x000000c7 },
-	{ 0x00000018, 0x000000c7 },
+	{ 0x00000018, 0x000000AC, 0x0 },
+	{ 0x00005012, 0x0000009D, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00000018, 0x00000098, 0x0 },
+	{ 0x00004013, 0x00000088, 0x0 },
+	{ 0x00006012, 0x00000087, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+	{ 0x00003015, 0x00000087, 0x0 },	/* Default */
+	{ 0x00003015, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00005012, 0x000000DF, 0x0 },
+	{ 0x00007011, 0x00000084, 0x0 },
+	{ 0x00000018, 0x000000A4, 0x0 },
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00004013, 0x00000080, 0x0 },
+	{ 0x00006013, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+	{ 0x00003015, 0x000000C7, 0x0 },	/* Default */
+	{ 0x80003015, 0x000000C7, 0x7 },	/* Uses I_boost level 0x7 */
+	{ 0x00000018, 0x000000C7, 0x0 },
 };
 
 struct bxt_ddi_buf_trans {
@@ -181,16 +261,16 @@
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
 					/* Idx	NT mV diff	db  */
-	{ 52,  0,    0, 128, true  },	/* 0:	400		0   */
-	{ 78,  0,    0, 85,  false },	/* 1:	400		3.5 */
-	{ 104, 0,    0, 64,  false },	/* 2:	400		6   */
-	{ 154, 0,    0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0,    0, 128, false },	/* 4:	600		0   */
-	{ 116, 0,    0, 85,  false },	/* 5:	600		3.5 */
-	{ 154, 0,    0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0,    0, 128, false },	/* 7:	800		0   */
-	{ 154, 0,    0, 85,  false },	/* 8:	800		3.5 */
-	{ 154, 0x9A, 1, 128, false },  /* 9:	1200		0   */
+	{ 52,  0x9A, 0, 128, true  },	/* 0:	400		0   */
+	{ 78,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 104, 0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 154, 0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 116, 0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 154, 0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 154, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
+	{ 154, 0x9A, 1, 128, false },	/* 9:	1200		0   */
 };
 
 /* BSpec has 2 recommended values - entries 0 and 8.
@@ -198,18 +278,21 @@
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
 					/* Idx	NT mV diff	db  */
-	{ 52,  0,    0, 128, false },	/* 0:	400		0   */
-	{ 52,  0,    0, 85,  false },	/* 1:	400		3.5 */
-	{ 52,  0,    0, 64,  false },	/* 2:	400		6   */
-	{ 42,  0,    0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0,    0, 128, false },	/* 4:	600		0   */
-	{ 77,  0,    0, 85,  false },	/* 5:	600		3.5 */
-	{ 77,  0,    0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0,    0, 128, false },	/* 7:	800		0   */
-	{ 102, 0,    0, 85,  false },	/* 8:	800		3.5 */
+	{ 52,  0x9A, 0, 128, false },	/* 0:	400		0   */
+	{ 52,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 52,  0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 42,  0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 77,  0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 77,  0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 102, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
 	{ 154, 0x9A, 1, 128, true },	/* 9:	1200		0   */
 };
 
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type);
+
 static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
 				 struct intel_digital_port **dig_port,
 				 enum port *port)
@@ -249,6 +332,77 @@
 	return intel_dig_port->hdmi.hdmi_reg;
 }
 
+static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
+							int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+	} else if (IS_SKL_ULT(dev)) {
+		ddi_translations = skl_u_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+	} else {
+		ddi_translations = skl_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
+							 int *n_entries)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_y_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_y_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+		}
+	} else if (IS_SKL_ULT(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_u_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_u_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+		}
+	} else {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+		}
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_device *dev,
+		       int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+	} else {
+		ddi_translations = skl_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+	}
+
+	return ddi_translations;
+}
+
 /*
  * Starting with Haswell, DDI port buffers must be programmed with correct
  * values in advance. The buffer values are different for FDI and DP modes,
@@ -261,6 +415,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
+	u32 iboost_bit = 0;
 	int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
 	    size;
 	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
@@ -280,19 +435,17 @@
 		return;
 	} else if (IS_SKYLAKE(dev)) {
 		ddi_translations_fdi = NULL;
-		ddi_translations_dp = skl_ddi_translations_dp;
-		n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-		if (dev_priv->edp_low_vswing) {
-			ddi_translations_edp = skl_ddi_translations_edp;
-			n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
-		} else {
-			ddi_translations_edp = skl_ddi_translations_dp;
-			n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-		}
-
-		ddi_translations_hdmi = skl_ddi_translations_hdmi;
-		n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
-		hdmi_default_entry = 7;
+		ddi_translations_dp =
+				skl_get_buf_trans_dp(dev, &n_dp_entries);
+		ddi_translations_edp =
+				skl_get_buf_trans_edp(dev, &n_edp_entries);
+		ddi_translations_hdmi =
+				skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+		hdmi_default_entry = 8;
+		/* If we're boosting the current, set bit 31 of trans1 */
+		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
+		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+			iboost_bit = 1<<31;
 	} else if (IS_BROADWELL(dev)) {
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
@@ -353,7 +506,7 @@
 	}
 
 	for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
-		I915_WRITE(reg, ddi_translations[i].trans1);
+		I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit);
 		reg += 4;
 		I915_WRITE(reg, ddi_translations[i].trans2);
 		reg += 4;
@@ -368,7 +521,7 @@
 		hdmi_level = hdmi_default_entry;
 
 	/* Entry 9 is for HDMI: */
-	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
+	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
 	reg += 4;
 	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
 	reg += 4;
@@ -625,11 +778,11 @@
 	(void) (&__a == &__b);			\
 	__a > __b ? (__a - __b) : (__b - __a); })
 
-struct wrpll_rnp {
+struct hsw_wrpll_rnp {
 	unsigned p, n2, r2;
 };
 
-static unsigned wrpll_get_budget_for_freq(int clock)
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 {
 	unsigned budget;
 
@@ -703,9 +856,9 @@
 	return budget;
 }
 
-static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
-			     unsigned r2, unsigned n2, unsigned p,
-			     struct wrpll_rnp *best)
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+				 unsigned r2, unsigned n2, unsigned p,
+				 struct hsw_wrpll_rnp *best)
 {
 	uint64_t a, b, c, d, diff, diff_best;
 
@@ -762,8 +915,7 @@
 	/* Otherwise a < c && b >= d, do nothing */
 }
 
-static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
-				     int reg)
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
 {
 	int refclk = LC_FREQ;
 	int n, p, r;
@@ -856,6 +1008,26 @@
 	return dco_freq / (p0 * p1 * p2 * 5);
 }
 
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+	int dotclock;
+
+	if (pipe_config->has_pch_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->fdi_m_n);
+	else if (pipe_config->has_dp_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->dp_m_n);
+	else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
+		dotclock = pipe_config->port_clock * 2 / 3;
+	else
+		dotclock = pipe_config->port_clock;
+
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
 
 static void skl_ddi_clock_get(struct intel_encoder *encoder,
 				struct intel_crtc_state *pipe_config)
@@ -902,12 +1074,7 @@
 
 	pipe_config->port_clock = link_clock;
 
-	if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 static void hsw_ddi_clock_get(struct intel_encoder *encoder,
@@ -929,10 +1096,10 @@
 		link_clock = 270000;
 		break;
 	case PORT_CLK_SEL_WRPLL1:
-		link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
 		break;
 	case PORT_CLK_SEL_WRPLL2:
-		link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
 		break;
 	case PORT_CLK_SEL_SPLL:
 		pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -954,23 +1121,32 @@
 
 	pipe_config->port_clock = link_clock * 2;
 
-	if (pipe_config->has_pch_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->fdi_m_n);
-	else if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
 				enum intel_dpll_id dpll)
 {
-	/* FIXME formula not available in bspec */
-	return 0;
+	struct intel_shared_dpll *pll;
+	struct intel_dpll_hw_state *state;
+	intel_clock_t clock;
+
+	/* For DDI ports we always use a shared PLL. */
+	if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+		return 0;
+
+	pll = &dev_priv->shared_dplls[dpll];
+	state = &pll->config.hw_state;
+
+	clock.m1 = 2;
+	clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
+	if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+		clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
+	clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+	clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+	clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+	return chv_calc_dpll_params(100000, &clock);
 }
 
 static void bxt_ddi_clock_get(struct intel_encoder *encoder,
@@ -980,16 +1156,9 @@
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	uint32_t dpll = port;
 
-	pipe_config->port_clock =
-		bxt_calc_pll_link(dev_priv, dpll);
+	pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
 
-	if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-							&pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock =
-							pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 void intel_ddi_clock_get(struct intel_encoder *encoder,
@@ -1011,12 +1180,12 @@
 {
 	uint64_t freq2k;
 	unsigned p, n2, r2;
-	struct wrpll_rnp best = { 0, 0, 0 };
+	struct hsw_wrpll_rnp best = { 0, 0, 0 };
 	unsigned budget;
 
 	freq2k = clock / 100;
 
-	budget = wrpll_get_budget_for_freq(clock);
+	budget = hsw_wrpll_get_budget_for_freq(clock);
 
 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 	 * and directly pass the LC PLL to it. */
@@ -1060,8 +1229,8 @@
 		     n2++) {
 
 			for (p = P_MIN; p <= P_MAX; p += P_INC)
-				wrpll_update_rnp(freq2k, budget,
-						 r2, n2, p, &best);
+				hsw_wrpll_update_rnp(freq2k, budget,
+						     r2, n2, p, &best);
 		}
 	}
 
@@ -1105,6 +1274,102 @@
 	return true;
 }
 
+struct skl_wrpll_context {
+	uint64_t min_deviation;		/* current minimal deviation */
+	uint64_t central_freq;		/* chosen central freq */
+	uint64_t dco_freq;		/* chosen dco freq */
+	unsigned int p;			/* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6%  of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION	100
+#define SKL_DCO_MAX_NDEVIATION	600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+				  uint64_t central_freq,
+				  uint64_t dco_freq,
+				  unsigned int divider)
+{
+	uint64_t deviation;
+
+	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+			      central_freq);
+
+	/* positive deviation */
+	if (dco_freq >= central_freq) {
+		if (deviation < SKL_DCO_MAX_PDEVIATION &&
+		    deviation < ctx->min_deviation) {
+			ctx->min_deviation = deviation;
+			ctx->central_freq = central_freq;
+			ctx->dco_freq = dco_freq;
+			ctx->p = divider;
+		}
+	/* negative deviation */
+	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+		   deviation < ctx->min_deviation) {
+		ctx->min_deviation = deviation;
+		ctx->central_freq = central_freq;
+		ctx->dco_freq = dco_freq;
+		ctx->p = divider;
+	}
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+				      unsigned int *p0 /* out */,
+				      unsigned int *p1 /* out */,
+				      unsigned int *p2 /* out */)
+{
+	/* even dividers */
+	if (p % 2 == 0) {
+		unsigned int half = p / 2;
+
+		if (half == 1 || half == 2 || half == 3 || half == 5) {
+			*p0 = 2;
+			*p1 = 1;
+			*p2 = half;
+		} else if (half % 2 == 0) {
+			*p0 = 2;
+			*p1 = half / 2;
+			*p2 = 2;
+		} else if (half % 3 == 0) {
+			*p0 = 3;
+			*p1 = half / 3;
+			*p2 = 2;
+		} else if (half % 7 == 0) {
+			*p0 = 7;
+			*p1 = half / 7;
+			*p2 = 2;
+		}
+	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = p / 3;
+	} else if (p == 5 || p == 7) {
+		*p0 = p;
+		*p1 = 1;
+		*p2 = 1;
+	} else if (p == 15) {
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = 5;
+	} else if (p == 21) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 3;
+	} else if (p == 35) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 5;
+	}
+}
+
 struct skl_wrpll_params {
 	uint32_t        dco_fraction;
 	uint32_t        dco_integer;
@@ -1115,7 +1380,74 @@
 	uint32_t        central_freq;
 };
 
-static void
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+				      uint64_t afe_clock,
+				      uint64_t central_freq,
+				      uint32_t p0, uint32_t p1, uint32_t p2)
+{
+	uint64_t dco_freq;
+
+	switch (central_freq) {
+	case 9600000000ULL:
+		params->central_freq = 0;
+		break;
+	case 9000000000ULL:
+		params->central_freq = 1;
+		break;
+	case 8400000000ULL:
+		params->central_freq = 3;
+	}
+
+	switch (p0) {
+	case 1:
+		params->pdiv = 0;
+		break;
+	case 2:
+		params->pdiv = 1;
+		break;
+	case 3:
+		params->pdiv = 2;
+		break;
+	case 7:
+		params->pdiv = 4;
+		break;
+	default:
+		WARN(1, "Incorrect PDiv\n");
+	}
+
+	switch (p2) {
+	case 5:
+		params->kdiv = 0;
+		break;
+	case 2:
+		params->kdiv = 1;
+		break;
+	case 3:
+		params->kdiv = 2;
+		break;
+	case 1:
+		params->kdiv = 3;
+		break;
+	default:
+		WARN(1, "Incorrect KDiv\n");
+	}
+
+	params->qdiv_ratio = p1;
+	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+	dco_freq = p0 * p1 * p2 * afe_clock;
+
+	/*
+	 * Intermediate values are in Hz.
+	 * Divide by MHz to match bsepc
+	 */
+	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+	params->dco_fraction =
+		div_u64((div_u64(dco_freq, 24) -
+			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static bool
 skl_ddi_calculate_wrpll(int clock /* in Hz */,
 			struct skl_wrpll_params *wrpll_params)
 {
@@ -1123,142 +1455,70 @@
 	uint64_t dco_central_freq[3] = {8400000000ULL,
 					9000000000ULL,
 					9600000000ULL};
-	uint32_t min_dco_deviation = 400;
-	uint32_t min_dco_index = 3;
-	uint32_t P0[4] = {1, 2, 3, 7};
-	uint32_t P2[4] = {1, 2, 3, 5};
-	bool found = false;
-	uint32_t candidate_p = 0;
-	uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
-	uint32_t candidate_p2[3] = {0};
-	uint32_t dco_central_freq_deviation[3];
-	uint32_t i, P1, k, dco_count;
-	bool retry_with_odd = false;
-	uint64_t dco_freq;
+	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
+					     24, 28, 30, 32, 36, 40, 42, 44,
+					     48, 52, 54, 56, 60, 64, 66, 68,
+					     70, 72, 76, 78, 80, 84, 88, 90,
+					     92, 96, 98 };
+	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+	static const struct {
+		const int *list;
+		int n_dividers;
+	} dividers[] = {
+		{ even_dividers, ARRAY_SIZE(even_dividers) },
+		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
+	};
+	struct skl_wrpll_context ctx;
+	unsigned int dco, d, i;
+	unsigned int p0, p1, p2;
 
-	/* Determine P0, P1 or P2 */
-	for (dco_count = 0; dco_count < 3; dco_count++) {
-		found = false;
-		candidate_p =
-			div64_u64(dco_central_freq[dco_count], afe_clock);
-		if (retry_with_odd == false)
-			candidate_p = (candidate_p % 2 == 0 ?
-				candidate_p : candidate_p + 1);
+	skl_wrpll_context_init(&ctx);
 
-		for (P1 = 1; P1 < candidate_p; P1++) {
-			for (i = 0; i < 4; i++) {
-				if (!(P0[i] != 1 || P1 == 1))
-					continue;
+	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+			for (i = 0; i < dividers[d].n_dividers; i++) {
+				unsigned int p = dividers[d].list[i];
+				uint64_t dco_freq = p * afe_clock;
 
-				for (k = 0; k < 4; k++) {
-					if (P1 != 1 && P2[k] != 2)
-						continue;
-
-					if (candidate_p == P0[i] * P1 * P2[k]) {
-						/* Found possible P0, P1, P2 */
-						found = true;
-						candidate_p0[dco_count] = P0[i];
-						candidate_p1[dco_count] = P1;
-						candidate_p2[dco_count] = P2[k];
-						goto found;
-					}
-
-				}
+				skl_wrpll_try_divider(&ctx,
+						      dco_central_freq[dco],
+						      dco_freq,
+						      p);
+				/*
+				 * Skip the remaining dividers if we're sure to
+				 * have found the definitive divider, we can't
+				 * improve a 0 deviation.
+				 */
+				if (ctx.min_deviation == 0)
+					goto skip_remaining_dividers;
 			}
 		}
 
-found:
-		if (found) {
-			dco_central_freq_deviation[dco_count] =
-				div64_u64(10000 *
-					  abs_diff((candidate_p * afe_clock),
-						   dco_central_freq[dco_count]),
-					  dco_central_freq[dco_count]);
-
-			if (dco_central_freq_deviation[dco_count] <
-				min_dco_deviation) {
-				min_dco_deviation =
-					dco_central_freq_deviation[dco_count];
-				min_dco_index = dco_count;
-			}
-		}
-
-		if (min_dco_index > 2 && dco_count == 2) {
-			retry_with_odd = true;
-			dco_count = 0;
-		}
-	}
-
-	if (min_dco_index > 2) {
-		WARN(1, "No valid values found for the given pixel clock\n");
-	} else {
-		wrpll_params->central_freq = dco_central_freq[min_dco_index];
-
-		switch (dco_central_freq[min_dco_index]) {
-		case 9600000000ULL:
-			wrpll_params->central_freq = 0;
-			break;
-		case 9000000000ULL:
-			wrpll_params->central_freq = 1;
-			break;
-		case 8400000000ULL:
-			wrpll_params->central_freq = 3;
-		}
-
-		switch (candidate_p0[min_dco_index]) {
-		case 1:
-			wrpll_params->pdiv = 0;
-			break;
-		case 2:
-			wrpll_params->pdiv = 1;
-			break;
-		case 3:
-			wrpll_params->pdiv = 2;
-			break;
-		case 7:
-			wrpll_params->pdiv = 4;
-			break;
-		default:
-			WARN(1, "Incorrect PDiv\n");
-		}
-
-		switch (candidate_p2[min_dco_index]) {
-		case 5:
-			wrpll_params->kdiv = 0;
-			break;
-		case 2:
-			wrpll_params->kdiv = 1;
-			break;
-		case 3:
-			wrpll_params->kdiv = 2;
-			break;
-		case 1:
-			wrpll_params->kdiv = 3;
-			break;
-		default:
-			WARN(1, "Incorrect KDiv\n");
-		}
-
-		wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
-		wrpll_params->qdiv_mode =
-			(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
-
-		dco_freq = candidate_p0[min_dco_index] *
-			candidate_p1[min_dco_index] *
-			candidate_p2[min_dco_index] * afe_clock;
-
+skip_remaining_dividers:
 		/*
-		 * Intermediate values are in Hz.
-		 * Divide by MHz to match bsepc
+		 * If a solution is found with an even divider, prefer
+		 * this one.
 		 */
-		wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
-		wrpll_params->dco_fraction =
-			div_u64(((div_u64(dco_freq, 24) -
-				  wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
-
+		if (d == 0 && ctx.p)
+			break;
 	}
-}
 
+	if (!ctx.p) {
+		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+		return false;
+	}
+
+	/*
+	 * gcc incorrectly analyses that these can be used without being
+	 * initialized. To be fair, it's hard to guess.
+	 */
+	p0 = p1 = p2 = 0;
+	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+				  p0, p1, p2);
+
+	return true;
+}
 
 static bool
 skl_ddi_pll_select(struct intel_crtc *intel_crtc,
@@ -1281,7 +1541,8 @@
 
 		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-		skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+			return false;
 
 		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
 			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1293,17 +1554,14 @@
 			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
 			 wrpll_params.central_freq;
 	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
-		struct drm_encoder *encoder = &intel_encoder->base;
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		switch (intel_dp->link_bw) {
-		case DP_LINK_BW_1_62:
+		switch (crtc_state->port_clock / 2) {
+		case 81000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
 			break;
-		case DP_LINK_BW_2_7:
+		case 135000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
 			break;
-		case DP_LINK_BW_5_4:
+		case 270000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
 			break;
 		}
@@ -1334,6 +1592,7 @@
 
 /* bxt clock parameters */
 struct bxt_clk_div {
+	int clock;
 	uint32_t p1;
 	uint32_t p2;
 	uint32_t m2_int;
@@ -1343,14 +1602,14 @@
 };
 
 /* pre-calculated values for DP linkrates */
-static struct bxt_clk_div bxt_dp_clk_val[7] = {
-	/* 162 */ {4, 2, 32, 1677722, 1, 1},
-	/* 270 */ {4, 1, 27,       0, 0, 1},
-	/* 540 */ {2, 1, 27,       0, 0, 1},
-	/* 216 */ {3, 2, 32, 1677722, 1, 1},
-	/* 243 */ {4, 1, 24, 1258291, 1, 1},
-	/* 324 */ {4, 1, 32, 1677722, 1, 1},
-	/* 432 */ {3, 1, 32, 1677722, 1, 1}
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+	{162000, 4, 2, 32, 1677722, 1, 1},
+	{270000, 4, 1, 27,       0, 0, 1},
+	{540000, 2, 1, 27,       0, 0, 1},
+	{216000, 3, 2, 32, 1677722, 1, 1},
+	{243000, 4, 1, 24, 1258291, 1, 1},
+	{324000, 4, 1, 32, 1677722, 1, 1},
+	{432000, 3, 1, 32, 1677722, 1, 1}
 };
 
 static bool
@@ -1363,7 +1622,7 @@
 	struct bxt_clk_div clk_div = {0};
 	int vco = 0;
 	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-	uint32_t dcoampovr_en_h, dco_amp, lanestagger;
+	uint32_t lanestagger;
 
 	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
 		intel_clock_t best_clock;
@@ -1390,29 +1649,19 @@
 		vco = best_clock.vco;
 	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
 			intel_encoder->type == INTEL_OUTPUT_EDP) {
-		struct drm_encoder *encoder = &intel_encoder->base;
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		int i;
 
-		switch (intel_dp->link_bw) {
-		case DP_LINK_BW_1_62:
-			clk_div = bxt_dp_clk_val[0];
-			break;
-		case DP_LINK_BW_2_7:
-			clk_div = bxt_dp_clk_val[1];
-			break;
-		case DP_LINK_BW_5_4:
-			clk_div = bxt_dp_clk_val[2];
-			break;
-		default:
-			clk_div = bxt_dp_clk_val[0];
-			DRM_ERROR("Unknown link rate\n");
+		clk_div = bxt_dp_clk_val[0];
+		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+			if (bxt_dp_clk_val[i].clock == clock) {
+				clk_div = bxt_dp_clk_val[i];
+				break;
+			}
 		}
 		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
 	}
 
-	dco_amp = 15;
-	dcoampovr_en_h = 0;
-	if (vco >= 6200000 && vco <= 6480000) {
+	if (vco >= 6200000 && vco <= 6700000) {
 		prop_coef = 4;
 		int_coef = 9;
 		gain_ctl = 3;
@@ -1423,8 +1672,6 @@
 		int_coef = 11;
 		gain_ctl = 3;
 		targ_cnt = 9;
-		if (vco >= 4800000 && vco < 5400000)
-			dcoampovr_en_h = 1;
 	} else if (vco == 5400000) {
 		prop_coef = 3;
 		int_coef = 8;
@@ -1466,10 +1713,13 @@
 
 	crtc_state->dpll_hw_state.pll8 = targ_cnt;
 
-	if (dcoampovr_en_h)
-		crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
+	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-	crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
+	crtc_state->dpll_hw_state.pll10 =
+		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+		| PORT_PLL_DCO_AMP_OVR_EN_H;
+
+	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
 	crtc_state->dpll_hw_state.pcsdw12 =
 		LANESTAGGER_STRAP_OVRD | lanestagger;
@@ -1799,8 +2049,65 @@
 			   TRANS_CLK_SEL_DISABLED);
 }
 
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-			     enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
+			       enum port port, int type)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+	uint8_t iboost;
+	uint8_t dp_iboost, hdmi_iboost;
+	int n_entries;
+	u32 reg;
+
+	/* VBT may override standard boost values */
+	dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+	hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_EDP) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		if (hdmi_iboost) {
+			iboost = hdmi_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else {
+		return;
+	}
+
+	/* Make sure that the requested I_boost is valid */
+	if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+		DRM_ERROR("Invalid I_boost value %u\n", iboost);
+		return;
+	}
+
+	reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
+	reg &= ~BALANCE_LEG_MASK(port);
+	reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+
+	if (iboost)
+		reg |= iboost << BALANCE_LEG_SHIFT(port);
+	else
+		reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+
+	I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+}
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const struct bxt_ddi_buf_trans *ddi_translations;
@@ -1860,6 +2167,73 @@
 	I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
 }
 
+static uint32_t translate_signal_level(int signal_levels)
+{
+	uint32_t level;
+
+	switch (signal_levels) {
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+			      signal_levels);
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 0;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 1;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 2;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+		level = 3;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 4;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 5;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 6;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 7;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 8;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 9;
+		break;
+	}
+
+	return level;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dport->base.base.dev;
+	struct intel_encoder *encoder = &dport->base;
+	uint8_t train_set = intel_dp->train_set[0];
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	enum port port = dport->port;
+	uint32_t level;
+
+	level = translate_signal_level(signal_levels);
+
+	if (IS_SKYLAKE(dev))
+		skl_ddi_set_iboost(dev, level, port, encoder->type);
+	else if (IS_BROXTON(dev))
+		bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+
+	return DDI_BUF_TRANS_SELECT(level);
+}
+
 static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
@@ -2404,7 +2778,7 @@
 
 	temp = I915_READ(BXT_PORT_PLL(port, 9));
 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
-	temp |= (5 << 1);
+	temp |= pll->config.hw_state.pll9;
 	I915_WRITE(BXT_PORT_PLL(port, 9), temp);
 
 	temp = I915_READ(BXT_PORT_PLL(port, 10));
@@ -2417,8 +2791,8 @@
 	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
 	temp |= PORT_PLL_RECALIBRATE;
 	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-	/* Enable 10 bit clock */
-	temp |= PORT_PLL_10BIT_CLK_ENABLE;
+	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+	temp |= pll->config.hw_state.ebb4;
 	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
 
 	/* Enable PLL */
@@ -2469,13 +2843,38 @@
 		return false;
 
 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+	hw_state->pll0 &= PORT_PLL_M2_MASK;
+
 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+	hw_state->pll1 &= PORT_PLL_N_MASK;
+
 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+			  PORT_PLL_INT_COEFF_MASK |
+			  PORT_PLL_GAIN_CTL_MASK;
+
 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+	hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+			   PORT_PLL_DCO_AMP_MASK;
+
 	/*
 	 * While we write to the group register to program all lanes at once we
 	 * can read only lane registers. We configure all lanes the same way, so
@@ -2486,6 +2885,7 @@
 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
 				 hw_state->pcsdw12,
 				 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
 	return true;
 }
@@ -2510,7 +2910,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t val = I915_READ(LCPLL_CTL);
-	int cdclk_freq;
 
 	if (IS_SKYLAKE(dev))
 		skl_shared_dplls_init(dev_priv);
@@ -2519,10 +2918,10 @@
 	else
 		hsw_shared_dplls_init(dev_priv);
 
-	cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-	DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
-
 	if (IS_SKYLAKE(dev)) {
+		int cdclk_freq;
+
+		cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
 		dev_priv->skl_boot_cdclk = cdclk_freq;
 		if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
 			DRM_ERROR("LCPLL1 is disabled\n");
@@ -2618,20 +3017,6 @@
 	I915_WRITE(_FDI_RXA_CTL, val);
 }
 
-static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
-{
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-	int type = intel_dig_port->base.type;
-
-	if (type != INTEL_OUTPUT_DISPLAYPORT &&
-	    type != INTEL_OUTPUT_EDP &&
-	    type != INTEL_OUTPUT_UNKNOWN) {
-		return;
-	}
-
-	intel_dp_hot_plug(intel_encoder);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config)
 {
@@ -2793,10 +3178,9 @@
 		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
 	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
 	if (!init_dp && !init_hdmi) {
-		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
+		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
 			      port_name(port));
-		init_hdmi = true;
-		init_dp = true;
+		return;
 	}
 
 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
@@ -2825,14 +3209,13 @@
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
-	intel_encoder->hot_plug = intel_ddi_hot_plug;
 
 	if (init_dp) {
 		if (!intel_ddi_init_dp_connector(intel_dig_port))
 			goto err;
 
 		intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-		dev_priv->hpd_irq_port[port] = intel_dig_port;
+		dev_priv->hotplug.irq_port[port] = intel_dig_port;
 	}
 
 	/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 87476ff..ca9278b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -86,9 +86,6 @@
 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config);
 
-static int intel_set_mode(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
-			  bool force_restore);
 static int intel_framebuffer_init(struct drm_device *dev,
 				  struct intel_framebuffer *ifb,
 				  struct drm_mode_fb_cmd2 *mode_cmd,
@@ -105,22 +102,13 @@
 			    const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *crtc);
-static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
 	struct intel_crtc_state *crtc_state);
 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
 			   int num_connectors);
-static void intel_crtc_enable_planes(struct drm_crtc *crtc);
-static void intel_crtc_disable_planes(struct drm_crtc *crtc);
-
-static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
-{
-	if (!connector->mst_port)
-		return connector->encoder;
-	else
-		return &connector->mst_port->mst_encoders[pipe]->base;
-}
+static void intel_modeset_setup_hw_state(struct drm_device *dev);
 
 typedef struct {
 	int	min, max;
@@ -413,7 +401,7 @@
 static const intel_limit_t intel_limits_bxt = {
 	/* FIXME: find real dot limits */
 	.dot = { .min = 0, .max = INT_MAX },
-	.vco = { .min = 4800000, .max = 6480000 },
+	.vco = { .min = 4800000, .max = 6700000 },
 	.n = { .min = 1, .max = 1 },
 	.m1 = { .min = 2, .max = 2 },
 	/* FIXME: find real m2 limits */
@@ -422,14 +410,10 @@
 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
 };
 
-static void vlv_clock(int refclk, intel_clock_t *clock)
+static bool
+needs_modeset(struct drm_crtc_state *state)
 {
-	clock->m = clock->m1 * clock->m2;
-	clock->p = clock->p1 * clock->p2;
-	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
-	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+	return drm_atomic_crtc_needs_modeset(state);
 }
 
 /**
@@ -561,15 +545,25 @@
 	return limit;
 }
 
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
 /* m1 is reserved as 0 in Pineview, n is a ring counter */
-static void pineview_clock(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = clock->m2 + 2;
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
 }
 
 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -577,25 +571,41 @@
 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 }
 
-static void i9xx_clock(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = i9xx_dpll_compute_m(clock);
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
-		return;
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
 }
 
-static void chv_clock(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = clock->m1 * clock->m2;
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
 			clock->n << 22);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -639,16 +649,12 @@
 	return true;
 }
 
-static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
-		    struct intel_crtc_state *crtc_state,
-		    int target, int refclk, intel_clock_t *match_clock,
-		    intel_clock_t *best_clock)
+static int
+i9xx_select_p2_div(const intel_limit_t *limit,
+		   const struct intel_crtc_state *crtc_state,
+		   int target)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
-	intel_clock_t clock;
-	int err = target;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 
 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		/*
@@ -657,18 +663,31 @@
 		 * single/dual channel state, if we even can.
 		 */
 		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
+			return limit->p2.p2_fast;
 		else
-			clock.p2 = limit->p2.p2_slow;
+			return limit->p2.p2_slow;
 	} else {
 		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
+			return limit->p2.p2_slow;
 		else
-			clock.p2 = limit->p2.p2_fast;
+			return limit->p2.p2_fast;
 	}
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit,
+		    struct intel_crtc_state *crtc_state,
+		    int target, int refclk, intel_clock_t *match_clock,
+		    intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	intel_clock_t clock;
+	int err = target;
 
 	memset(best_clock, 0, sizeof(*best_clock));
 
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 	     clock.m1++) {
 		for (clock.m2 = limit->m2.min;
@@ -681,7 +700,7 @@
 					clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
-					i9xx_clock(refclk, &clock);
+					i9xx_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -708,30 +727,14 @@
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 	intel_clock_t clock;
 	int err = target;
 
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		/*
-		 * For LVDS just rely on its current settings for dual-channel.
-		 * We haven't figured out how to reliably set up different
-		 * single/dual channel state, if we even can.
-		 */
-		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
-		else
-			clock.p2 = limit->p2.p2_slow;
-	} else {
-		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
-		else
-			clock.p2 = limit->p2.p2_fast;
-	}
-
 	memset(best_clock, 0, sizeof(*best_clock));
 
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 	     clock.m1++) {
 		for (clock.m2 = limit->m2.min;
@@ -742,7 +745,7 @@
 					clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
-					pineview_clock(refclk, &clock);
+					pnv_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -769,28 +772,17 @@
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 	intel_clock_t clock;
 	int max_n;
-	bool found;
+	bool found = false;
 	/* approximately equals target * 0.00585 */
 	int err_most = (target >> 8) + (target >> 9);
-	found = false;
-
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
-		else
-			clock.p2 = limit->p2.p2_slow;
-	} else {
-		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
-		else
-			clock.p2 = limit->p2.p2_fast;
-	}
 
 	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	max_n = limit->n.max;
 	/* based on hardware requirement, prefer smaller n to precision */
 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
@@ -803,7 +795,7 @@
 				     clock.p1 >= limit->p1.min; clock.p1--) {
 					int this_err;
 
-					i9xx_clock(refclk, &clock);
+					i9xx_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -893,7 +885,7 @@
 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
 								     refclk * clock.m1);
 
-					vlv_clock(refclk, &clock);
+					vlv_calc_dpll_params(refclk, &clock);
 
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
@@ -956,7 +948,7 @@
 
 			clock.m2 = m2;
 
-			chv_clock(refclk, &clock);
+			chv_calc_dpll_params(refclk, &clock);
 
 			if (!intel_PLL_is_valid(dev, limit, &clock))
 				continue;
@@ -1026,7 +1018,7 @@
 		line_mask = DSL_LINEMASK_GEN3;
 
 	line1 = I915_READ(reg) & line_mask;
-	mdelay(5);
+	msleep(5);
 	line2 = I915_READ(reg) & line_mask;
 
 	return line1 == line2;
@@ -1106,6 +1098,9 @@
 		case PORT_D:
 			bit = SDE_PORTD_HOTPLUG_CPT;
 			break;
+		case PORT_E:
+			bit = SDE_PORTE_HOTPLUG_SPT;
+			break;
 		default:
 			return true;
 		}
@@ -1694,7 +1689,7 @@
 	int count = 0;
 
 	for_each_intel_crtc(dev, crtc)
-		count += crtc->active &&
+		count += crtc->base.state->active &&
 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
 
 	return count;
@@ -1775,7 +1770,7 @@
 	/* Disable DVO 2x clock on both PLLs if necessary */
 	if (IS_I830(dev) &&
 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
-	    intel_num_dvo_pipes(dev) == 1) {
+	    !intel_num_dvo_pipes(dev)) {
 		I915_WRITE(DPLL(PIPE_B),
 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
 		I915_WRITE(DPLL(PIPE_A),
@@ -1790,13 +1785,13 @@
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
 
-	I915_WRITE(DPLL(pipe), 0);
+	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
 	POSTING_READ(DPLL(pipe));
 }
 
 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	u32 val = 0;
+	u32 val;
 
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
@@ -1805,8 +1800,9 @@
 	 * Leave integrated clock source and reference clock enabled for pipe B.
 	 * The latter is needed for VGA hotplug / manual detection.
 	 */
+	val = DPLL_VGA_MODE_DIS;
 	if (pipe == PIPE_B)
-		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
+		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
 	I915_WRITE(DPLL(pipe), val);
 	POSTING_READ(DPLL(pipe));
 
@@ -1821,7 +1817,8 @@
 	assert_pipe_disabled(dev_priv, pipe);
 
 	/* Set PLL en = 0 */
-	val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
+	val = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 	if (pipe != PIPE_A)
 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 	I915_WRITE(DPLL(pipe), val);
@@ -1942,11 +1939,13 @@
 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
 	/* PCH only available on ILK+ */
-	BUG_ON(INTEL_INFO(dev)->gen < 5);
-	if (WARN_ON(pll == NULL))
-	       return;
+	if (INTEL_INFO(dev)->gen < 5)
+		return;
 
-	if (WARN_ON(pll->config.crtc_mask == 0))
+	if (pll == NULL)
+		return;
+
+	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
 		return;
 
 	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -2004,11 +2003,15 @@
 
 	if (HAS_PCH_IBX(dev_priv->dev)) {
 		/*
-		 * make the BPC in transcoder be consistent with
-		 * that in pipeconf reg.
+		 * Make the BPC in transcoder be consistent with
+		 * that in pipeconf reg. For HDMI we must use 8bpc
+		 * here for both 8bpc and 12bpc.
 		 */
 		val &= ~PIPECONF_BPC_MASK;
-		val |= pipeconf_val & PIPECONF_BPC_MASK;
+		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+			val |= PIPECONF_8BPC;
+		else
+			val |= pipeconf_val & PIPECONF_BPC_MASK;
 	}
 
 	val &= ~TRANS_INTERLACE_MASK;
@@ -2122,6 +2125,8 @@
 	int reg;
 	u32 val;
 
+	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
 	assert_planes_disabled(dev_priv, pipe);
 	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
@@ -2181,6 +2186,8 @@
 	int reg;
 	u32 val;
 
+	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
 	/*
 	 * Make sure planes won't keep trying to pump pixels to us,
 	 * or we might hang the display.
@@ -2211,28 +2218,6 @@
 		intel_wait_for_pipe_off(crtc);
 }
 
-/**
- * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @plane:  plane to be enabled
- * @crtc: crtc for the plane
- *
- * Enable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_enable_primary_hw_plane(struct drm_plane *plane,
-					  struct drm_crtc *crtc)
-{
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	/* If the pipe isn't enabled, we can't pump pixels and may hang */
-	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
-	to_intel_plane_state(plane->state)->visible = true;
-
-	dev_priv->display.update_primary_plane(crtc, plane->fb,
-					       crtc->x, crtc->y);
-}
-
 static bool need_vtd_wa(struct drm_device *dev)
 {
 #ifdef CONFIG_INTEL_IOMMU
@@ -2302,6 +2287,7 @@
 			const struct drm_plane_state *plane_state)
 {
 	struct intel_rotation_info *info = &view->rotation_info;
+	unsigned int tile_height, tile_pitch;
 
 	*view = i915_ggtt_view_normal;
 
@@ -2318,14 +2304,35 @@
 	info->pitch = fb->pitches[0];
 	info->fb_modifier = fb->modifier[0];
 
+	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+					fb->modifier[0]);
+	tile_pitch = PAGE_SIZE / tile_height;
+	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
+	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+
 	return 0;
 }
 
+static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_INFO(dev_priv)->gen >= 9)
+		return 256 * 1024;
+	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+		 IS_VALLEYVIEW(dev_priv))
+		return 128 * 1024;
+	else if (INTEL_INFO(dev_priv)->gen >= 4)
+		return 4 * 1024;
+	else
+		return 0;
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			   struct drm_framebuffer *fb,
 			   const struct drm_plane_state *plane_state,
-			   struct intel_engine_cs *pipelined)
+			   struct intel_engine_cs *pipelined,
+			   struct drm_i915_gem_request **pipelined_request)
 {
 	struct drm_device *dev = fb->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2338,14 +2345,7 @@
 
 	switch (fb->modifier[0]) {
 	case DRM_FORMAT_MOD_NONE:
-		if (INTEL_INFO(dev)->gen >= 9)
-			alignment = 256 * 1024;
-		else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
-			alignment = 128 * 1024;
-		else if (INTEL_INFO(dev)->gen >= 4)
-			alignment = 4 * 1024;
-		else
-			alignment = 64 * 1024;
+		alignment = intel_linear_alignment(dev_priv);
 		break;
 	case I915_FORMAT_MOD_X_TILED:
 		if (INTEL_INFO(dev)->gen >= 9)
@@ -2390,7 +2390,7 @@
 
 	dev_priv->mm.interruptible = false;
 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
-						   &view);
+						   pipelined_request, &view);
 	if (ret)
 		goto err_interruptible;
 
@@ -2400,7 +2400,18 @@
 	 * a fence as the cost is not that onerous.
 	 */
 	ret = i915_gem_object_get_fence(obj);
-	if (ret)
+	if (ret == -EDEADLK) {
+		/*
+		 * -EDEADLK means there are no free fences
+		 * no pending flips.
+		 *
+		 * This is propagated to atomic, but it uses
+		 * -EDEADLK to force a locking recovery, so
+		 * change the returned error to -EBUSY.
+		 */
+		ret = -EBUSY;
+		goto err_unpin;
+	} else if (ret)
 		goto err_unpin;
 
 	i915_gem_object_pin_fence(obj);
@@ -2435,7 +2446,8 @@
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
 					     unsigned int tiling_mode,
 					     unsigned int cpp,
 					     unsigned int pitch)
@@ -2451,12 +2463,13 @@
 
 		return tile_rows * pitch * 8 + tiles * 4096;
 	} else {
+		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
 		unsigned int offset;
 
 		offset = *y * pitch + *x * cpp;
-		*y = 0;
-		*x = (offset & 4095) / cpp;
-		return offset & -4096;
+		*y = (offset & alignment) / pitch;
+		*x = ((offset & alignment) - *y * pitch) / cpp;
+		return offset & ~alignment;
 	}
 }
 
@@ -2583,6 +2596,7 @@
 	struct intel_crtc *i;
 	struct drm_i915_gem_object *obj;
 	struct drm_plane *primary = intel_crtc->base.primary;
+	struct drm_plane_state *plane_state = primary->state;
 	struct drm_framebuffer *fb;
 
 	if (!plane_config->fb)
@@ -2622,15 +2636,23 @@
 	return;
 
 valid_fb:
+	plane_state->src_x = plane_state->src_y = 0;
+	plane_state->src_w = fb->width << 16;
+	plane_state->src_h = fb->height << 16;
+
+	plane_state->crtc_x = plane_state->src_y = 0;
+	plane_state->crtc_w = fb->width;
+	plane_state->crtc_h = fb->height;
+
 	obj = intel_fb_obj(fb);
 	if (obj->tiling_mode != I915_TILING_NONE)
 		dev_priv->preserve_bios_swizzle = true;
 
-	primary->fb = fb;
-	primary->state->crtc = &intel_crtc->base;
-	primary->crtc = &intel_crtc->base;
-	update_state_fb(primary);
-	obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+	drm_framebuffer_reference(fb);
+	primary->fb = primary->state->fb = fb;
+	primary->crtc = primary->state->crtc = &intel_crtc->base;
+	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
 }
 
 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2725,7 +2747,8 @@
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		intel_crtc->dspaddr_offset =
-			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+			intel_gen4_compute_page_offset(dev_priv,
+						       &x, &y, obj->tiling_mode,
 						       pixel_size,
 						       fb->pitches[0]);
 		linear_offset -= intel_crtc->dspaddr_offset;
@@ -2826,7 +2849,8 @@
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	intel_crtc->dspaddr_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size,
 					       fb->pitches[0]);
 	linear_offset -= intel_crtc->dspaddr_offset;
@@ -2904,32 +2928,32 @@
 	return i915_gem_obj_ggtt_offset_view(obj, view);
 }
 
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+	DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+		intel_crtc->base.base.id, intel_crtc->pipe, id);
+}
+
 /*
  * This function detaches (aka. unbinds) unused scalers in hardware
  */
-void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 {
-	struct drm_device *dev;
-	struct drm_i915_private *dev_priv;
 	struct intel_crtc_scaler_state *scaler_state;
 	int i;
 
-	if (!intel_crtc || !intel_crtc->config)
-		return;
-
-	dev = intel_crtc->base.dev;
-	dev_priv = dev->dev_private;
 	scaler_state = &intel_crtc->config->scaler_state;
 
 	/* loop through and disable scalers that aren't in use */
 	for (i = 0; i < intel_crtc->num_scalers; i++) {
-		if (!scaler_state->scalers[i].in_use) {
-			I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
-			I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
-			I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
-			DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
-				intel_crtc->base.base.id, intel_crtc->pipe, i);
-		}
+		if (!scaler_state->scalers[i].in_use)
+			skl_detach_scaler(intel_crtc, i);
 	}
 }
 
@@ -3132,8 +3156,8 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->display.disable_fbc)
-		dev_priv->display.disable_fbc(dev);
+	if (dev_priv->fbc.disable_fbc)
+		dev_priv->fbc.disable_fbc(dev_priv);
 
 	dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -3176,24 +3200,8 @@
 	}
 }
 
-void intel_crtc_reset(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-	if (!crtc->active)
-		return;
-
-	intel_crtc_disable_planes(&crtc->base);
-	dev_priv->display.crtc_disable(&crtc->base);
-	dev_priv->display.crtc_enable(&crtc->base);
-	intel_crtc_enable_planes(&crtc->base);
-}
-
 void intel_prepare_reset(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc;
-
 	/* no reset support for gen2 */
 	if (IS_GEN2(dev))
 		return;
@@ -3203,18 +3211,11 @@
 		return;
 
 	drm_modeset_lock_all(dev);
-
 	/*
 	 * Disabling the crtcs gracefully seems nicer. Also the
 	 * g33 docs say we should at least disable all the planes.
 	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
-
-		intel_crtc_disable_planes(&crtc->base);
-		dev_priv->display.crtc_disable(&crtc->base);
-	}
+	intel_display_suspend(dev);
 }
 
 void intel_finish_reset(struct drm_device *dev)
@@ -3258,7 +3259,7 @@
 		dev_priv->display.hpd_irq_setup(dev);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	intel_modeset_setup_hw_state(dev, true);
+	intel_display_resume(dev);
 
 	intel_hpd_init(dev_priv);
 
@@ -4200,34 +4201,16 @@
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
-void intel_put_shared_dpll(struct intel_crtc *crtc)
-{
-	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
-	if (pll == NULL)
-		return;
-
-	if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
-		WARN(1, "bad %s crtc mask\n", pll->name);
-		return;
-	}
-
-	pll->config.crtc_mask &= ~(1 << crtc->pipe);
-	if (pll->config.crtc_mask == 0) {
-		WARN_ON(pll->on);
-		WARN_ON(pll->active);
-	}
-
-	crtc->config->shared_dpll = DPLL_ID_PRIVATE;
-}
-
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 						struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct intel_shared_dpll *pll;
+	struct intel_shared_dpll_config *shared_dpll;
 	enum intel_dpll_id i;
 
+	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
 	if (HAS_PCH_IBX(dev_priv->dev)) {
 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 		i = (enum intel_dpll_id) crtc->pipe;
@@ -4236,7 +4219,7 @@
 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
 			      crtc->base.base.id, pll->name);
 
-		WARN_ON(pll->new_config->crtc_mask);
+		WARN_ON(shared_dpll[i].crtc_mask);
 
 		goto found;
 	}
@@ -4256,7 +4239,7 @@
 		pll = &dev_priv->shared_dplls[i];
 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
 			crtc->base.base.id, pll->name);
-		WARN_ON(pll->new_config->crtc_mask);
+		WARN_ON(shared_dpll[i].crtc_mask);
 
 		goto found;
 	}
@@ -4265,15 +4248,15 @@
 		pll = &dev_priv->shared_dplls[i];
 
 		/* Only want to check enabled timings first */
-		if (pll->new_config->crtc_mask == 0)
+		if (shared_dpll[i].crtc_mask == 0)
 			continue;
 
 		if (memcmp(&crtc_state->dpll_hw_state,
-			   &pll->new_config->hw_state,
-			   sizeof(pll->new_config->hw_state)) == 0) {
+			   &shared_dpll[i].hw_state,
+			   sizeof(crtc_state->dpll_hw_state)) == 0) {
 			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
 				      crtc->base.base.id, pll->name,
-				      pll->new_config->crtc_mask,
+				      shared_dpll[i].crtc_mask,
 				      pll->active);
 			goto found;
 		}
@@ -4282,7 +4265,7 @@
 	/* Ok no matching timings, maybe there's a free one? */
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		pll = &dev_priv->shared_dplls[i];
-		if (pll->new_config->crtc_mask == 0) {
+		if (shared_dpll[i].crtc_mask == 0) {
 			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
 				      crtc->base.base.id, pll->name);
 			goto found;
@@ -4292,83 +4275,33 @@
 	return NULL;
 
 found:
-	if (pll->new_config->crtc_mask == 0)
-		pll->new_config->hw_state = crtc_state->dpll_hw_state;
+	if (shared_dpll[i].crtc_mask == 0)
+		shared_dpll[i].hw_state =
+			crtc_state->dpll_hw_state;
 
 	crtc_state->shared_dpll = i;
 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
 			 pipe_name(crtc->pipe));
 
-	pll->new_config->crtc_mask |= 1 << crtc->pipe;
+	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
 
 	return pll;
 }
 
-/**
- * intel_shared_dpll_start_config - start a new PLL staged config
- * @dev_priv: DRM device
- * @clear_pipes: mask of pipes that will have their PLLs freed
- *
- * Starts a new PLL staged config, copying the current config but
- * releasing the references of pipes specified in clear_pipes.
- */
-static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
-					  unsigned clear_pipes)
+static void intel_shared_dpll_commit(struct drm_atomic_state *state)
 {
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct intel_shared_dpll_config *shared_dpll;
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id i;
 
+	if (!to_intel_atomic_state(state)->dpll_set)
+		return;
+
+	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		pll = &dev_priv->shared_dplls[i];
-
-		pll->new_config = kmemdup(&pll->config, sizeof pll->config,
-					  GFP_KERNEL);
-		if (!pll->new_config)
-			goto cleanup;
-
-		pll->new_config->crtc_mask &= ~clear_pipes;
-	}
-
-	return 0;
-
-cleanup:
-	while (--i >= 0) {
-		pll = &dev_priv->shared_dplls[i];
-		kfree(pll->new_config);
-		pll->new_config = NULL;
-	}
-
-	return -ENOMEM;
-}
-
-static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
-{
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-
-		WARN_ON(pll->new_config == &pll->config);
-
-		pll->config = *pll->new_config;
-		kfree(pll->new_config);
-		pll->new_config = NULL;
-	}
-}
-
-static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
-{
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-
-		WARN_ON(pll->new_config == &pll->config);
-
-		kfree(pll->new_config);
-		pll->new_config = NULL;
+		pll->config = shared_dpll[i];
 	}
 }
 
@@ -4386,62 +4319,16 @@
 	}
 }
 
-/**
- * skl_update_scaler_users - Stages update to crtc's scaler state
- * @intel_crtc: crtc
- * @crtc_state: crtc_state
- * @plane: plane (NULL indicates crtc is requesting update)
- * @plane_state: plane's state
- * @force_detach: request unconditional detachment of scaler
- *
- * This function updates scaler state for requested plane or crtc.
- * To request scaler usage update for a plane, caller shall pass plane pointer.
- * To request scaler usage update for crtc, caller shall pass plane pointer
- * as NULL.
- *
- * Return
- *     0 - scaler_usage updated successfully
- *    error - requested scaling cannot be supported or other error condition
- */
-int
-skl_update_scaler_users(
-	struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
-	struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
-	int force_detach)
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
+		  int src_w, int src_h, int dst_w, int dst_h)
 {
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(crtc_state->base.crtc);
 	int need_scaling;
-	int idx;
-	int src_w, src_h, dst_w, dst_h;
-	int *scaler_id;
-	struct drm_framebuffer *fb;
-	struct intel_crtc_scaler_state *scaler_state;
-	unsigned int rotation;
-
-	if (!intel_crtc || !crtc_state)
-		return 0;
-
-	scaler_state = &crtc_state->scaler_state;
-
-	idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
-	fb = intel_plane ? plane_state->base.fb : NULL;
-
-	if (intel_plane) {
-		src_w = drm_rect_width(&plane_state->src) >> 16;
-		src_h = drm_rect_height(&plane_state->src) >> 16;
-		dst_w = drm_rect_width(&plane_state->dst);
-		dst_h = drm_rect_height(&plane_state->dst);
-		scaler_id = &plane_state->scaler_id;
-		rotation = plane_state->base.rotation;
-	} else {
-		struct drm_display_mode *adjusted_mode =
-			&crtc_state->base.adjusted_mode;
-		src_w = crtc_state->pipe_src_w;
-		src_h = crtc_state->pipe_src_h;
-		dst_w = adjusted_mode->hdisplay;
-		dst_h = adjusted_mode->vdisplay;
-		scaler_id = &scaler_state->scaler_id;
-		rotation = DRM_ROTATE_0;
-	}
 
 	need_scaling = intel_rotation_90_or_270(rotation) ?
 		(src_h != dst_w || src_w != dst_h):
@@ -4457,17 +4344,14 @@
 	 * update to free the scaler is done in plane/panel-fit programming.
 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
 	 */
-	if (force_detach || !need_scaling || (intel_plane &&
-		(!fb || !plane_state->visible))) {
+	if (force_detach || !need_scaling) {
 		if (*scaler_id >= 0) {
-			scaler_state->scaler_users &= ~(1 << idx);
+			scaler_state->scaler_users &= ~(1 << scaler_user);
 			scaler_state->scalers[*scaler_id].in_use = 0;
 
-			DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
-				"crtc_state = %p scaler_users = 0x%x\n",
-				intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
-				intel_plane ? intel_plane->base.base.id :
-				intel_crtc->base.base.id, crtc_state,
+			DRM_DEBUG_KMS("scaler_user index %u.%u: "
+				"Staged freeing scaler id %d scaler_users = 0x%x\n",
+				intel_crtc->pipe, scaler_user, *scaler_id,
 				scaler_state->scaler_users);
 			*scaler_id = -1;
 		}
@@ -4480,55 +4364,123 @@
 
 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
-		DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
 			"size is out of scaler range\n",
-			intel_plane ? "PLANE" : "CRTC",
-			intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-			intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
+			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
 		return -EINVAL;
 	}
 
+	/* mark this plane as a scaler user in crtc_state */
+	scaler_state->scaler_users |= (1 << scaler_user);
+	DRM_DEBUG_KMS("scaler_user index %u.%u: "
+		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+		scaler_state->scaler_users);
+
+	return 0;
+}
+
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+	struct drm_display_mode *adjusted_mode =
+		&state->base.adjusted_mode;
+
+	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
+		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+
+	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+		&state->scaler_state.scaler_id, DRM_ROTATE_0,
+		state->pipe_src_w, state->pipe_src_h,
+		adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ *
+ * @state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+				   struct intel_plane_state *plane_state)
+{
+
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_plane *intel_plane =
+		to_intel_plane(plane_state->base.plane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
+	int ret;
+
+	bool force_detach = !fb || !plane_state->visible;
+
+	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
+		      intel_plane->base.base.id, intel_crtc->pipe,
+		      drm_plane_index(&intel_plane->base));
+
+	ret = skl_update_scaler(crtc_state, force_detach,
+				drm_plane_index(&intel_plane->base),
+				&plane_state->scaler_id,
+				plane_state->base.rotation,
+				drm_rect_width(&plane_state->src) >> 16,
+				drm_rect_height(&plane_state->src) >> 16,
+				drm_rect_width(&plane_state->dst),
+				drm_rect_height(&plane_state->dst));
+
+	if (ret || plane_state->scaler_id < 0)
+		return ret;
+
 	/* check colorkey */
-	if (WARN_ON(intel_plane &&
-		intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) {
-		DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey",
-			intel_plane->base.base.id, src_w, src_h, dst_w, dst_h);
+	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
+			      intel_plane->base.base.id);
 		return -EINVAL;
 	}
 
 	/* Check src format */
-	if (intel_plane) {
-		switch (fb->pixel_format) {
-		case DRM_FORMAT_RGB565:
-		case DRM_FORMAT_XBGR8888:
-		case DRM_FORMAT_XRGB8888:
-		case DRM_FORMAT_ABGR8888:
-		case DRM_FORMAT_ARGB8888:
-		case DRM_FORMAT_XRGB2101010:
-		case DRM_FORMAT_XBGR2101010:
-		case DRM_FORMAT_YUYV:
-		case DRM_FORMAT_YVYU:
-		case DRM_FORMAT_UYVY:
-		case DRM_FORMAT_VYUY:
-			break;
-		default:
-			DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
-				intel_plane->base.base.id, fb->base.id, fb->pixel_format);
-			return -EINVAL;
-		}
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		break;
+	default:
+		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
+			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+		return -EINVAL;
 	}
 
-	/* mark this plane as a scaler user in crtc_state */
-	scaler_state->scaler_users |= (1 << idx);
-	DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
-		"crtc_state = %p scaler_users = 0x%x\n",
-		intel_plane ? "PLANE" : "CRTC",
-		intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-		src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
 	return 0;
 }
 
-static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+	int i;
+
+	for (i = 0; i < crtc->num_scalers; i++)
+		skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4538,13 +4490,6 @@
 
 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
 
-	/* To update pfit, first update scaler state */
-	skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
-	intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
-	skl_detach_scalers(crtc);
-	if (!enable)
-		return;
-
 	if (crtc->config->pch_pfit.enabled) {
 		int id;
 
@@ -4584,20 +4529,6 @@
 	}
 }
 
-static void intel_enable_sprite_planes(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	enum pipe pipe = to_intel_crtc(crtc)->pipe;
-	struct drm_plane *plane;
-	struct intel_plane *intel_plane;
-
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
-		intel_plane = to_intel_plane(plane);
-		if (intel_plane->pipe == pipe)
-			intel_plane_restore(&intel_plane->base);
-	}
-}
-
 void hsw_enable_ips(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -4668,7 +4599,7 @@
 	bool reenable_ips = false;
 
 	/* The clocks have to be on to load the palette. */
-	if (!crtc->state->enable || !intel_crtc->active)
+	if (!crtc->state->active)
 		return;
 
 	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
@@ -4755,10 +4686,6 @@
 	 */
 	hsw_enable_ips(intel_crtc);
 
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
-
 	/*
 	 * Gen2 reports pipe underruns whenever all planes are disabled.
 	 * So don't enable underrun reporting before at least some planes
@@ -4810,13 +4737,11 @@
 	 * event which is after the vblank start event, so we need to have a
 	 * wait-for-vblank between disabling the plane and the pipe.
 	 */
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev)) {
 		intel_set_memory_cxsr(dev_priv, false);
-
-	mutex_lock(&dev->struct_mutex);
-	if (dev_priv->fbc.crtc == intel_crtc)
-		intel_fbc_disable(dev);
-	mutex_unlock(&dev->struct_mutex);
+		dev_priv->wm.vlv.cxsr = false;
+		intel_wait_for_vblank(dev, pipe);
+	}
 
 	/*
 	 * FIXME IPS should be fine as long as one plane is
@@ -4827,49 +4752,83 @@
 	hsw_disable_ips(intel_crtc);
 }
 
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_plane *plane;
 
-	intel_enable_primary_hw_plane(crtc->primary, crtc);
-	intel_enable_sprite_planes(crtc);
-	intel_crtc_update_cursor(crtc, true);
+	if (atomic->wait_vblank)
+		intel_wait_for_vblank(dev, crtc->pipe);
 
-	intel_post_enable_primary(crtc);
+	intel_frontbuffer_flip(dev, atomic->fb_bits);
 
-	/*
-	 * FIXME: Once we grow proper nuclear flip support out of this we need
-	 * to compute the mask of flip planes precisely. For the time being
-	 * consider this a flip to a NULL plane.
-	 */
-	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+	if (atomic->disable_cxsr)
+		crtc->wm.cxsr_allowed = true;
+
+	if (crtc->atomic.update_wm_post)
+		intel_update_watermarks(&crtc->base);
+
+	if (atomic->update_fbc)
+		intel_fbc_update(dev_priv);
+
+	if (atomic->post_enable_primary)
+		intel_post_enable_primary(&crtc->base);
+
+	drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
+		intel_update_sprite_watermarks(plane, &crtc->base,
+					       0, 0, 0, false, false);
+
+	memset(atomic, 0, sizeof(*atomic));
 }
 
-static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_plane *p;
+
+	/* Track fb's for any planes being disabled */
+	drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
+		struct intel_plane *plane = to_intel_plane(p);
+
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
+				  plane->frontbuffer_bit);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	if (atomic->wait_for_flips)
+		intel_crtc_wait_for_pending_flips(&crtc->base);
+
+	if (atomic->disable_fbc)
+		intel_fbc_disable_crtc(crtc);
+
+	if (crtc->atomic.disable_ips)
+		hsw_disable_ips(crtc);
+
+	if (atomic->pre_disable_primary)
+		intel_pre_disable_primary(&crtc->base);
+
+	if (atomic->disable_cxsr) {
+		crtc->wm.cxsr_allowed = false;
+		intel_set_memory_cxsr(dev_priv, false);
+	}
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
 {
 	struct drm_device *dev = crtc->dev;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
+	struct drm_plane *p;
 	int pipe = intel_crtc->pipe;
 
-	if (!intel_crtc->active)
-		return;
-
-	intel_crtc_wait_for_pending_flips(crtc);
-
-	intel_pre_disable_primary(crtc);
-
 	intel_crtc_dpms_overlay_disable(intel_crtc);
-	for_each_intel_plane(dev, intel_plane) {
-		if (intel_plane->pipe == pipe) {
-			struct drm_crtc *from = intel_plane->base.crtc;
 
-			intel_plane->disable_plane(&intel_plane->base,
-						   from ?: crtc, true);
-		}
-	}
+	drm_for_each_plane_mask(p, dev, plane_mask)
+		to_intel_plane(p)->disable_plane(p, crtc);
 
 	/*
 	 * FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4887,9 +4846,7 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	if (intel_crtc->config->has_pch_encoder)
@@ -4956,46 +4913,17 @@
 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-/*
- * This implements the workaround described in the "notes" section of the mode
- * set sequence documentation. When going from no pipes or single pipe to
- * multiple pipes, and planes are enabled after the pipe, we need to wait at
- * least 2 vblanks on the first pipe before enabling planes on the second pipe.
- */
-static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
-
-	/* We want to get the other_active_crtc only if there's only 1 other
-	 * active crtc. */
-	for_each_intel_crtc(dev, crtc_it) {
-		if (!crtc_it->active || crtc_it == crtc)
-			continue;
-
-		if (other_active_crtc)
-			return;
-
-		other_active_crtc = crtc_it;
-	}
-	if (!other_active_crtc)
-		return;
-
-	intel_wait_for_vblank(dev, other_active_crtc->pipe);
-	intel_wait_for_vblank(dev, other_active_crtc->pipe);
-}
-
 static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
-	int pipe = intel_crtc->pipe;
+	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->state);
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -5036,7 +4964,7 @@
 	intel_ddi_enable_pipe_clock(intel_crtc);
 
 	if (INTEL_INFO(dev)->gen == 9)
-		skylake_pfit_update(intel_crtc, 1);
+		skylake_pfit_enable(intel_crtc);
 	else if (INTEL_INFO(dev)->gen < 9)
 		ironlake_pfit_enable(intel_crtc);
 	else
@@ -5070,7 +4998,11 @@
 
 	/* If we change the relative order between pipe/planes enabling, we need
 	 * to change the workaround. */
-	haswell_mode_set_planes_workaround(intel_crtc);
+	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+	}
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -5097,9 +5029,6 @@
 	int pipe = intel_crtc->pipe;
 	u32 reg, temp;
 
-	if (!intel_crtc->active)
-		return;
-
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		encoder->disable(encoder);
 
@@ -5138,18 +5067,11 @@
 			I915_WRITE(PCH_DPLL_SEL, temp);
 		}
 
-		/* disable PCH DPLL */
-		intel_disable_shared_dpll(intel_crtc);
-
 		ironlake_fdi_pll_disable(intel_crtc);
 	}
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5160,9 +5082,6 @@
 	struct intel_encoder *encoder;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
-	if (!intel_crtc->active)
-		return;
-
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		intel_opregion_notify_encoder(encoder, false);
 		encoder->disable(encoder);
@@ -5182,7 +5101,7 @@
 	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
 	if (INTEL_INFO(dev)->gen == 9)
-		skylake_pfit_update(intel_crtc, 0);
+		skylake_scaler_disable(intel_crtc);
 	else if (INTEL_INFO(dev)->gen < 9)
 		ironlake_pfit_disable(intel_crtc);
 	else
@@ -5201,22 +5120,8 @@
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	if (intel_crtc_to_shared_dpll(intel_crtc))
-		intel_disable_shared_dpll(intel_crtc);
 }
 
-static void ironlake_crtc_off(struct drm_crtc *crtc)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	intel_put_shared_dpll(intel_crtc);
-}
-
-
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -5252,6 +5157,8 @@
 		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
 	case PORT_D:
 		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+	case PORT_E:
+		return POWER_DOMAIN_PORT_DDI_E_2_LANES;
 	default:
 		WARN_ON_ONCE(1);
 		return POWER_DOMAIN_PORT_OTHER;
@@ -5298,6 +5205,9 @@
 	unsigned long mask;
 	enum transcoder transcoder;
 
+	if (!crtc->state->active)
+		return 0;
+
 	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
@@ -5312,45 +5222,131 @@
 	return mask;
 }
 
+static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum intel_display_power_domain domain;
+	unsigned long domains, new_domains, old_domains;
+
+	old_domains = intel_crtc->enabled_power_domains;
+	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+
+	domains = new_domains & ~old_domains;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_get(dev_priv, domain);
+
+	return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+				      unsigned long domains)
+{
+	enum intel_display_power_domain domain;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+}
+
 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
-	struct intel_crtc *crtc;
+	unsigned long put_domains[I915_MAX_PIPES] = {};
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i;
 
-	/*
-	 * First get all needed power domains, then put all unneeded, to avoid
-	 * any unnecessary toggling of the power wells.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		enum intel_display_power_domain domain;
-
-		if (!crtc->base.state->enable)
-			continue;
-
-		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
-
-		for_each_power_domain(domain, pipe_domains[crtc->pipe])
-			intel_display_power_get(dev_priv, domain);
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (needs_modeset(crtc->state))
+			put_domains[to_intel_crtc(crtc)->pipe] =
+				modeset_get_crtc_power_domains(crtc);
 	}
 
-	if (dev_priv->display.modeset_global_resources)
-		dev_priv->display.modeset_global_resources(state);
+	if (dev_priv->display.modeset_commit_cdclk) {
+		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
 
-	for_each_intel_crtc(dev, crtc) {
-		enum intel_display_power_domain domain;
-
-		for_each_power_domain(domain, crtc->enabled_power_domains)
-			intel_display_power_put(dev_priv, domain);
-
-		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+		if (cdclk != dev_priv->cdclk_freq &&
+		    !WARN_ON(!state->allow_modeset))
+			dev_priv->display.modeset_commit_cdclk(state);
 	}
 
-	intel_display_set_init_power(dev_priv, false);
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		if (put_domains[i])
+			modeset_put_power_domains(dev_priv, put_domains[i]);
 }
 
-void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void intel_update_max_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_SKYLAKE(dev)) {
+		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+
+		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+			dev_priv->max_cdclk_freq = 675000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+			dev_priv->max_cdclk_freq = 540000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+			dev_priv->max_cdclk_freq = 450000;
+		else
+			dev_priv->max_cdclk_freq = 337500;
+	} else if (IS_BROADWELL(dev))  {
+		/*
+		 * FIXME with extra cooling we can allow
+		 * 540 MHz for ULX and 675 Mhz for ULT.
+		 * How can we know if extra cooling is
+		 * available? PCI ID, VTB, something else?
+		 */
+		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULX(dev))
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULT(dev))
+			dev_priv->max_cdclk_freq = 540000;
+		else
+			dev_priv->max_cdclk_freq = 675000;
+	} else if (IS_CHERRYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 320000;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 400000;
+	} else {
+		/* otherwise assume cdclk is fixed */
+		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
+	}
+
+	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+			 dev_priv->max_cdclk_freq);
+}
+
+static void intel_update_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+			 dev_priv->cdclk_freq);
+
+	/*
+	 * Program the gmbus_freq based on the cdclk frequency.
+	 * BSpec erroneously claims we should aim for 4MHz, but
+	 * in fact 1MHz is the correct frequency.
+	 */
+	if (IS_VALLEYVIEW(dev)) {
+		/*
+		 * Program the gmbus_freq based on the cdclk frequency.
+		 * BSpec erroneously claims we should aim for 4MHz, but
+		 * in fact 1MHz is the correct frequency.
+		 */
+		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
+	}
+
+	if (dev_priv->max_cdclk_freq == 0)
+		intel_update_max_cdclk(dev);
+}
+
+static void broxton_set_cdclk(struct drm_device *dev, int frequency)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t divider;
@@ -5466,7 +5462,7 @@
 		return;
 	}
 
-	dev_priv->cdclk_freq = frequency;
+	intel_update_cdclk(dev);
 }
 
 void broxton_init_cdclk(struct drm_device *dev)
@@ -5641,6 +5637,7 @@
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
 {
+	struct drm_device *dev = dev_priv->dev;
 	u32 freq_select, pcu_ack;
 
 	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
@@ -5681,6 +5678,8 @@
 	mutex_lock(&dev_priv->rps.hw_lock);
 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
 	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
 }
 
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -5714,16 +5713,13 @@
 	/* enable PG1 and Misc I/O */
 	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
-	/* DPLL0 already enabed !? */
-	if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) {
-		DRM_DEBUG_DRIVER("DPLL0 already running\n");
-		return;
+	/* DPLL0 not enabled (happens on early BIOS versions) */
+	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
+		/* enable DPLL0 */
+		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+		skl_dpll0_enable(dev_priv, required_vco);
 	}
 
-	/* enable DPLL0 */
-	required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
-	skl_dpll0_enable(dev_priv, required_vco);
-
 	/* set CDCLK to the frequency the BIOS chose */
 	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
 
@@ -5751,22 +5747,6 @@
 	return vco_freq[hpll_freq] * 1000;
 }
 
-static void vlv_update_cdclk(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
-			 dev_priv->cdclk_freq);
-
-	/*
-	 * Program the gmbus_freq based on the cdclk frequency.
-	 * BSpec erroneously claims we should aim for 4MHz, but
-	 * in fact 1MHz is the correct frequency.
-	 */
-	I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-}
-
 /* Adjust CDclk dividers to allow high res or save power if possible */
 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 {
@@ -5830,7 +5810,7 @@
 
 	mutex_unlock(&dev_priv->sb_lock);
 
-	vlv_update_cdclk(dev);
+	intel_update_cdclk(dev);
 }
 
 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -5871,7 +5851,7 @@
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
-	vlv_update_cdclk(dev);
+	intel_update_cdclk(dev);
 }
 
 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -5934,11 +5914,7 @@
 	int max_pixclk = 0;
 
 	for_each_intel_crtc(dev, intel_crtc) {
-		if (state)
-			crtc_state =
-				intel_atomic_get_crtc_state(state, intel_crtc);
-		else
-			crtc_state = intel_crtc->config;
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
 		if (IS_ERR(crtc_state))
 			return PTR_ERR(crtc_state);
 
@@ -5952,39 +5928,32 @@
 	return max_pixclk;
 }
 
-static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
+static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	int max_pixclk = intel_mode_max_pixclk(state->dev, state);
-	int cdclk, i;
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
 
 	if (max_pixclk < 0)
 		return max_pixclk;
 
-	if (IS_VALLEYVIEW(dev_priv))
-		cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
-	else
-		cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+	to_intel_atomic_state(state)->cdclk =
+		valleyview_calc_cdclk(dev_priv, max_pixclk);
 
-	if (cdclk == dev_priv->cdclk_freq)
-		return 0;
+	return 0;
+}
 
-	/* add all active pipes to the state */
-	for_each_crtc(state->dev, crtc) {
-		if (!crtc->state->enable)
-			continue;
+static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
 
-		crtc_state = drm_atomic_get_crtc_state(state, crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-	}
+	if (max_pixclk < 0)
+		return max_pixclk;
 
-	/* disable/enable all currently active pipes while we change cdclk */
-	for_each_crtc_in_state(state, crtc, crtc_state, i)
-		if (crtc_state->enable)
-			crtc_state->mode_changed = true;
+	to_intel_atomic_state(state)->cdclk =
+		broxton_calc_cdclk(dev_priv, max_pixclk);
 
 	return 0;
 }
@@ -6001,7 +5970,7 @@
 	if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
 		/* CHV suggested value is 31 or 63 */
 		if (IS_CHERRYVIEW(dev_priv))
-			credits = PFI_CREDIT_31;
+			credits = PFI_CREDIT_63;
 		else
 			credits = PFI_CREDIT(15);
 	} else {
@@ -6025,41 +5994,31 @@
 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
 }
 
-static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
+static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-	int req_cdclk;
 
-	/* The path in intel_mode_max_pixclk() with a NULL atomic state should
-	 * never fail. */
-	if (WARN_ON(max_pixclk < 0))
-		return;
+	/*
+	 * FIXME: We can end up here with all power domains off, yet
+	 * with a CDCLK frequency other than the minimum. To account
+	 * for this take the PIPE-A power domain, which covers the HW
+	 * blocks needed for the following programming. This can be
+	 * removed once it's guaranteed that we get here either with
+	 * the minimum CDCLK set, or the required power domains
+	 * enabled.
+	 */
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
-	req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+	if (IS_CHERRYVIEW(dev))
+		cherryview_set_cdclk(dev, req_cdclk);
+	else
+		valleyview_set_cdclk(dev, req_cdclk);
 
-	if (req_cdclk != dev_priv->cdclk_freq) {
-		/*
-		 * FIXME: We can end up here with all power domains off, yet
-		 * with a CDCLK frequency other than the minimum. To account
-		 * for this take the PIPE-A power domain, which covers the HW
-		 * blocks needed for the following programming. This can be
-		 * removed once it's guaranteed that we get here either with
-		 * the minimum CDCLK set, or the required power domains
-		 * enabled.
-		 */
-		intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+	vlv_program_pfi_credits(dev_priv);
 
-		if (IS_CHERRYVIEW(dev))
-			cherryview_set_cdclk(dev, req_cdclk);
-		else
-			valleyview_set_cdclk(dev, req_cdclk);
-
-		vlv_program_pfi_credits(dev_priv);
-
-		intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
-	}
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -6071,9 +6030,7 @@
 	int pipe = intel_crtc->pipe;
 	bool is_dsi;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
@@ -6122,7 +6079,6 @@
 
 	intel_crtc_load_lut(crtc);
 
-	intel_update_watermarks(crtc);
 	intel_enable_pipe(intel_crtc);
 
 	assert_vblank_disabled(crtc);
@@ -6149,9 +6105,7 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	i9xx_set_pll_dividers(intel_crtc);
@@ -6211,9 +6165,6 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	if (!intel_crtc->active)
-		return;
-
 	/*
 	 * On gen2 planes are double buffered but the pipe isn't, so we must
 	 * wait for planes to fully turn off before disabling the pipe.
@@ -6250,88 +6201,89 @@
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
 }
 
-static void i9xx_crtc_off(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 {
-}
-
-/* Master function to enable/disable CRTC and corresponding power wells */
-void intel_crtc_control(struct drm_crtc *crtc, bool enable)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	enum intel_display_power_domain domain;
 	unsigned long domains;
 
-	if (enable) {
-		if (!intel_crtc->active) {
-			domains = get_crtc_power_domains(crtc);
-			for_each_power_domain(domain, domains)
-				intel_display_power_get(dev_priv, domain);
-			intel_crtc->enabled_power_domains = domains;
+	if (!intel_crtc->active)
+		return;
 
-			dev_priv->display.crtc_enable(crtc);
-			intel_crtc_enable_planes(crtc);
-		}
-	} else {
-		if (intel_crtc->active) {
-			intel_crtc_disable_planes(crtc);
-			dev_priv->display.crtc_disable(crtc);
-
-			domains = intel_crtc->enabled_power_domains;
-			for_each_power_domain(domain, domains)
-				intel_display_power_put(dev_priv, domain);
-			intel_crtc->enabled_power_domains = 0;
-		}
+	if (to_intel_plane_state(crtc->primary->state)->visible) {
+		intel_crtc_wait_for_pending_flips(crtc);
+		intel_pre_disable_primary(crtc);
 	}
-}
 
-/**
- * Sets the power management mode of the pipe and plane.
- */
-void intel_crtc_update_dpms(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *intel_encoder;
-	bool enable = false;
-
-	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
-		enable |= intel_encoder->connectors_active;
-
-	intel_crtc_control(crtc, enable);
-
-	crtc->state->active = enable;
-}
-
-static void intel_crtc_disable(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_connector *connector;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	intel_crtc_disable_planes(crtc);
+	intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
 	dev_priv->display.crtc_disable(crtc);
-	dev_priv->display.off(crtc);
+	intel_disable_shared_dpll(intel_crtc);
 
-	drm_plane_helper_disable(crtc->primary);
+	domains = intel_crtc->enabled_power_domains;
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+	intel_crtc->enabled_power_domains = 0;
+}
 
-	/* Update computed state. */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (!connector->encoder || !connector->encoder->crtc)
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
+ */
+int intel_display_suspend(struct drm_device *dev)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+	struct drm_atomic_state *state;
+	struct drm_crtc *crtc;
+	unsigned crtc_mask = 0;
+	int ret = 0;
+
+	if (WARN_ON(!ctx))
+		return 0;
+
+	lockdep_assert_held(&ctx->ww_ctx);
+	state = drm_atomic_state_alloc(dev);
+	if (WARN_ON(!state))
+		return -ENOMEM;
+
+	state->acquire_ctx = ctx;
+	state->allow_modeset = true;
+
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto free;
+
+		if (!crtc_state->active)
 			continue;
 
-		if (connector->encoder->crtc != crtc)
-			continue;
-
-		connector->dpms = DRM_MODE_DPMS_OFF;
-		to_intel_encoder(connector->encoder)->connectors_active = false;
+		crtc_state->active = false;
+		crtc_mask |= 1 << drm_crtc_index(crtc);
 	}
+
+	if (crtc_mask) {
+		ret = drm_atomic_commit(state);
+
+		if (!ret) {
+			for_each_crtc(dev, crtc)
+				if (crtc_mask & (1 << drm_crtc_index(crtc)))
+					crtc->state->active = true;
+
+			return ret;
+		}
+	}
+
+free:
+	if (ret)
+		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+	drm_atomic_state_free(state);
+	return ret;
 }
 
 void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -6342,62 +6294,42 @@
 	kfree(intel_encoder);
 }
 
-/* Simple dpms helper for encoders with just one connector, no cloning and only
- * one kind of off state. It clamps all !ON modes to fully OFF and changes the
- * state of the entire output pipe. */
-static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
-{
-	if (mode == DRM_MODE_DPMS_ON) {
-		encoder->connectors_active = true;
-
-		intel_crtc_update_dpms(encoder->base.crtc);
-	} else {
-		encoder->connectors_active = false;
-
-		intel_crtc_update_dpms(encoder->base.crtc);
-	}
-}
-
 /* Cross check the actual hw state with our own modeset state tracking (and it's
  * internal consistency). */
 static void intel_connector_check_state(struct intel_connector *connector)
 {
+	struct drm_crtc *crtc = connector->base.state->crtc;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.base.id,
+		      connector->base.name);
+
 	if (connector->get_hw_state(connector)) {
-		struct intel_encoder *encoder = connector->encoder;
-		struct drm_crtc *crtc;
-		bool encoder_enabled;
-		enum pipe pipe;
+		struct drm_encoder *encoder = &connector->encoder->base;
+		struct drm_connector_state *conn_state = connector->base.state;
 
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-			      connector->base.base.id,
-			      connector->base.name);
+		I915_STATE_WARN(!crtc,
+			 "connector enabled without attached crtc\n");
 
-		/* there is no real hw state for MST connectors */
-		if (connector->mst_port)
+		if (!crtc)
 			return;
 
-		I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
-		     "wrong connector dpms state\n");
-		I915_STATE_WARN(connector->base.encoder != &encoder->base,
-		     "active connector not linked to encoder\n");
+		I915_STATE_WARN(!crtc->state->active,
+		      "connector is active, but attached crtc isn't\n");
 
-		if (encoder) {
-			I915_STATE_WARN(!encoder->connectors_active,
-			     "encoder->connectors_active not set\n");
+		if (!encoder)
+			return;
 
-			encoder_enabled = encoder->get_hw_state(encoder, &pipe);
-			I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
-			if (I915_STATE_WARN_ON(!encoder->base.crtc))
-				return;
+		I915_STATE_WARN(conn_state->best_encoder != encoder,
+			"atomic encoder doesn't match attached encoder\n");
 
-			crtc = encoder->base.crtc;
-
-			I915_STATE_WARN(!crtc->state->enable,
-					"crtc not enabled\n");
-			I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
-			I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
-			     "encoder active on the wrong pipe\n");
-		}
+		I915_STATE_WARN(conn_state->crtc != encoder->crtc,
+			"attached encoder crtc differs from connector crtc\n");
+	} else {
+		I915_STATE_WARN(crtc && crtc->state->active,
+			"attached crtc is active, but connector isn't\n");
+		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+			"best encoder set without crtc!\n");
 	}
 }
 
@@ -6429,26 +6361,6 @@
 	return connector;
 }
 
-/* Even simpler default implementation, if there's really no special case to
- * consider. */
-void intel_connector_dpms(struct drm_connector *connector, int mode)
-{
-	/* All the simple cases only support two dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	if (connector->encoder)
-		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
-
-	intel_modeset_check_state(connector->dev);
-}
-
 /* Simple connector->get_hw_state implementation for encoders that support only
  * one connector and no cloning and hence the encoder state determines the state
  * of the connector. */
@@ -6586,12 +6498,36 @@
 	return ret;
 }
 
+static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
+				     struct intel_crtc_state *pipe_config)
+{
+	if (pipe_config->pipe_bpp > 24)
+		return false;
+
+	/* HSW can handle pixel rate up to cdclk? */
+	if (IS_HASWELL(dev_priv->dev))
+		return true;
+
+	/*
+	 * We compare against max which means we must take
+	 * the increased cdclk requirement into account when
+	 * calculating the new cdclk.
+	 *
+	 * Should measure whether using a lower cdclk w/o IPS
+	 */
+	return ilk_pipe_pixel_rate(pipe_config) <=
+		dev_priv->max_cdclk_freq * 95 / 100;
+}
+
 static void hsw_compute_ips_config(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config)
 {
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	pipe_config->ips_enabled = i915.enable_ips &&
-				   hsw_crtc_supports_ips(crtc) &&
-				   pipe_config->pipe_bpp <= 24;
+		hsw_crtc_supports_ips(crtc) &&
+		pipe_config_supports_ips(dev_priv, pipe_config);
 }
 
 static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -6600,12 +6536,10 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int ret;
 
 	/* FIXME should check pixel clock limits on all platforms */
 	if (INTEL_INFO(dev)->gen < 4) {
-		int clock_limit =
-			dev_priv->display.get_display_clock_speed(dev);
+		int clock_limit = dev_priv->max_cdclk_freq;
 
 		/*
 		 * Enable pixel doubling when the dot clock
@@ -6647,14 +6581,7 @@
 	if (pipe_config->has_pch_encoder)
 		return ironlake_fdi_compute_config(crtc, pipe_config);
 
-	/* FIXME: remove below call once atomic mode set is place and all crtc
-	 * related checks called from atomic_crtc_check function */
-	ret = 0;
-	DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
-		crtc, pipe_config->base.state);
-	ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
-
-	return ret;
+	return 0;
 }
 
 static int skylake_get_display_clock_speed(struct drm_device *dev)
@@ -6664,10 +6591,8 @@
 	uint32_t cdctl = I915_READ(CDCLK_CTL);
 	uint32_t linkrate;
 
-	if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
-		WARN(1, "LCPLL1 not enabled\n");
+	if (!(lcpll1 & LCPLL_PLL_ENABLE))
 		return 24000; /* 24MHz is the cd freq with NSSC ref */
-	}
 
 	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
 		return 540000;
@@ -6706,6 +6631,34 @@
 	return 24000;
 }
 
+static int broxton_get_display_clock_speed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	uint32_t cdctl = I915_READ(CDCLK_CTL);
+	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
+	int cdclk;
+
+	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
+		return 19200;
+
+	cdclk = 19200 * pll_ratio / 2;
+
+	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+	case BXT_CDCLK_CD2X_DIV_SEL_1:
+		return cdclk;  /* 576MHz or 624MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+		return cdclk * 2 / 3; /* 384MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_2:
+		return cdclk / 2; /* 288MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_4:
+		return cdclk / 4; /* 144MHz */
+	}
+
+	/* error case, do as if DE PLL isn't enabled */
+	return 19200;
+}
+
 static int broadwell_get_display_clock_speed(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6834,20 +6787,37 @@
 	return 266667;
 }
 
-static int i855_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
 	u16 hpllcc = 0;
+
+	/*
+	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
+	 * encoding is different :(
+	 * FIXME is this the right way to detect 852GM/852GMV?
+	 */
+	if (dev->pdev->revision == 0x1)
+		return 133333;
+
+	pci_bus_read_config_word(dev->pdev->bus,
+				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
 	/* Assume that the hardware is in the high speed state.  This
 	 * should be the default.
 	 */
 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
 	case GC_CLOCK_133_200:
+	case GC_CLOCK_133_200_2:
 	case GC_CLOCK_100_200:
 		return 200000;
 	case GC_CLOCK_166_250:
 		return 250000;
 	case GC_CLOCK_100_133:
 		return 133333;
+	case GC_CLOCK_133_266:
+	case GC_CLOCK_133_266_2:
+	case GC_CLOCK_166_266:
+		return 266667;
 	}
 
 	/* Shouldn't happen */
@@ -6859,6 +6829,175 @@
 	return 133333;
 }
 
+static unsigned int intel_hpll_vco(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	static const unsigned int blb_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 6400000,
+	};
+	static const unsigned int pnv_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 2666667,
+	};
+	static const unsigned int cl_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 3333333,
+		[5] = 3566667,
+		[6] = 4266667,
+	};
+	static const unsigned int elk_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+	};
+	static const unsigned int ctg_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 2666667,
+		[5] = 4266667,
+	};
+	const unsigned int *vco_table;
+	unsigned int vco;
+	uint8_t tmp = 0;
+
+	/* FIXME other chipsets? */
+	if (IS_GM45(dev))
+		vco_table = ctg_vco;
+	else if (IS_G4X(dev))
+		vco_table = elk_vco;
+	else if (IS_CRESTLINE(dev))
+		vco_table = cl_vco;
+	else if (IS_PINEVIEW(dev))
+		vco_table = pnv_vco;
+	else if (IS_G33(dev))
+		vco_table = blb_vco;
+	else
+		return 0;
+
+	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+
+	vco = vco_table[tmp & 0x7];
+	if (vco == 0)
+		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+	else
+		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+	return vco;
+}
+
+static int gm45_get_display_clock_speed(struct drm_device *dev)
+{
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 12) & 0x1;
+
+	switch (vco) {
+	case 2666667:
+	case 4000000:
+	case 5333333:
+		return cdclk_sel ? 333333 : 222222;
+	case 3200000:
+		return cdclk_sel ? 320000 : 228571;
+	default:
+		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
+		return 222222;
+	}
+}
+
+static int i965gm_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 16, 10,  8 };
+	static const uint8_t div_4000[] = { 20, 12, 10 };
+	static const uint8_t div_5333[] = { 24, 16, 14 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
+	return 200000;
+}
+
+static int g33_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
+	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
+	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 4) & 0x7;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 4800000:
+		div_table = div_4800;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
+	return 190476;
+}
+
 static void
 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 {
@@ -7064,8 +7203,8 @@
 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
 }
 
-static void vlv_update_pll(struct intel_crtc *crtc,
-			   struct intel_crtc_state *pipe_config)
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
 {
 	u32 dpll, dpll_md;
 
@@ -7074,8 +7213,8 @@
 	 * clock for pipe B, since VGA hotplug / manual detection depends
 	 * on it.
 	 */
-	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
-		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
+		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
 	/* We should never disable this, set it here for state tracking */
 	if (crtc->pipe == PIPE_B)
 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7178,11 +7317,11 @@
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_update_pll(struct intel_crtc *crtc,
-			   struct intel_crtc_state *pipe_config)
+static void chv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
 {
-	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
-		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
 		DPLL_VCO_ENABLE;
 	if (crtc->pipe != PIPE_A)
 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7318,11 +7457,11 @@
 	};
 
 	if (IS_CHERRYVIEW(dev)) {
-		chv_update_pll(crtc, &pipe_config);
+		chv_compute_dpll(crtc, &pipe_config);
 		chv_prepare_pll(crtc, &pipe_config);
 		chv_enable_pll(crtc, &pipe_config);
 	} else {
-		vlv_update_pll(crtc, &pipe_config);
+		vlv_compute_dpll(crtc, &pipe_config);
 		vlv_prepare_pll(crtc, &pipe_config);
 		vlv_enable_pll(crtc, &pipe_config);
 	}
@@ -7344,10 +7483,10 @@
 		vlv_disable_pll(to_i915(dev), pipe);
 }
 
-static void i9xx_update_pll(struct intel_crtc *crtc,
-			    struct intel_crtc_state *crtc_state,
-			    intel_clock_t *reduced_clock,
-			    int num_connectors)
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7421,10 +7560,10 @@
 	}
 }
 
-static void i8xx_update_pll(struct intel_crtc *crtc,
-			    struct intel_crtc_state *crtc_state,
-			    intel_clock_t *reduced_clock,
-			    int num_connectors)
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7584,9 +7723,14 @@
 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
 
 	mode->flags = pipe_config->base.adjusted_mode.flags;
+	mode->type = DRM_MODE_TYPE_DRIVER;
 
 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
 	mode->flags |= pipe_config->base.adjusted_mode.flags;
+
+	mode->hsync = drm_mode_hsync(mode);
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
 }
 
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -7658,9 +7802,9 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int refclk, num_connectors = 0;
-	intel_clock_t clock, reduced_clock;
-	bool ok, has_reduced_clock = false;
-	bool is_lvds = false, is_dsi = false;
+	intel_clock_t clock;
+	bool ok;
+	bool is_dsi = false;
 	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	struct drm_atomic_state *state = crtc_state->base.state;
@@ -7678,9 +7822,6 @@
 		encoder = to_intel_encoder(connector_state->best_encoder);
 
 		switch (encoder->type) {
-		case INTEL_OUTPUT_LVDS:
-			is_lvds = true;
-			break;
 		case INTEL_OUTPUT_DSI:
 			is_dsi = true;
 			break;
@@ -7712,19 +7853,6 @@
 			return -EINVAL;
 		}
 
-		if (is_lvds && dev_priv->lvds_downclock_avail) {
-			/*
-			 * Ensure we match the reduced clock's P to the target
-			 * clock.  If the clocks don't match, we can't switch
-			 * the display clock by using the FP0/FP1. In such case
-			 * we will disable the LVDS downclock feature.
-			 */
-			has_reduced_clock =
-				dev_priv->display.find_dpll(limit, crtc_state,
-							    dev_priv->lvds_downclock,
-							    refclk, &clock,
-							    &reduced_clock);
-		}
 		/* Compat-code for transition, will disappear. */
 		crtc_state->dpll.n = clock.n;
 		crtc_state->dpll.m1 = clock.m1;
@@ -7734,17 +7862,15 @@
 	}
 
 	if (IS_GEN2(dev)) {
-		i8xx_update_pll(crtc, crtc_state,
-				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+		i8xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
 	} else if (IS_CHERRYVIEW(dev)) {
-		chv_update_pll(crtc, crtc_state);
+		chv_compute_dpll(crtc, crtc_state);
 	} else if (IS_VALLEYVIEW(dev)) {
-		vlv_update_pll(crtc, crtc_state);
+		vlv_compute_dpll(crtc, crtc_state);
 	} else {
-		i9xx_update_pll(crtc, crtc_state,
-				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+		i9xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
 	}
 
 	return 0;
@@ -7804,10 +7930,7 @@
 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 
-	vlv_clock(refclk, &clock);
-
-	/* clock.dot is the fast clock */
-	pipe_config->port_clock = clock.dot / 5;
+	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
 }
 
 static void
@@ -7906,10 +8029,7 @@
 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
 
-	chv_clock(refclk, &clock);
-
-	/* clock.dot is the fast clock */
-	pipe_config->port_clock = clock.dot / 5;
+	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
 }
 
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
@@ -8558,9 +8678,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int refclk;
 	const intel_limit_t *limit;
-	bool ret, is_lvds = false;
-
-	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+	bool ret;
 
 	refclk = ironlake_get_refclk(crtc_state);
 
@@ -8576,20 +8694,6 @@
 	if (!ret)
 		return false;
 
-	if (is_lvds && dev_priv->lvds_downclock_avail) {
-		/*
-		 * Ensure we match the reduced clock's P to the target clock.
-		 * If the clocks don't match, we can't switch the display clock
-		 * by using the FP0/FP1. In such case we will disable the LVDS
-		 * downclock feature.
-		*/
-		*has_reduced_clock =
-			dev_priv->display.find_dpll(limit, crtc_state,
-						    dev_priv->lvds_downclock,
-						    refclk, clock,
-						    reduced_clock);
-	}
-
 	return true;
 }
 
@@ -9297,6 +9401,7 @@
 	}
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_update_cdclk(dev_priv->dev);
 }
 
 /*
@@ -9358,21 +9463,160 @@
 	intel_prepare_ddi(dev);
 }
 
-static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
+static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-	int req_cdclk;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 
-	/* see the comment in valleyview_modeset_global_resources */
-	if (WARN_ON(max_pixclk < 0))
+	broxton_set_cdclk(dev, req_cdclk);
+}
+
+/* compute the max rate for new configuration */
+static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *crtc_state;
+	int max_pixel_rate = 0;
+
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		int pixel_rate;
+
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->base.enable)
+			continue;
+
+		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+		max_pixel_rate = max(max_pixel_rate, pixel_rate);
+	}
+
+	return max_pixel_rate;
+}
+
+static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val, data;
+	int ret;
+
+	if (WARN((I915_READ(LCPLL_CTL) &
+		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+		 "trying to change cdclk frequency with cdclk not enabled\n"))
 		return;
 
-	req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = sandybridge_pcode_write(dev_priv,
+				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	if (ret) {
+		DRM_ERROR("failed to inform pcode about cdclk change\n");
+		return;
+	}
 
-	if (req_cdclk != dev_priv->cdclk_freq)
-		broxton_set_cdclk(dev, req_cdclk);
+	val = I915_READ(LCPLL_CTL);
+	val |= LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
+		DRM_ERROR("Switching to FCLK failed\n");
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CLK_FREQ_MASK;
+
+	switch (cdclk) {
+	case 450000:
+		val |= LCPLL_CLK_FREQ_450;
+		data = 0;
+		break;
+	case 540000:
+		val |= LCPLL_CLK_FREQ_54O_BDW;
+		data = 1;
+		break;
+	case 337500:
+		val |= LCPLL_CLK_FREQ_337_5_BDW;
+		data = 2;
+		break;
+	case 675000:
+		val |= LCPLL_CLK_FREQ_675_BDW;
+		data = 3;
+		break;
+	default:
+		WARN(1, "invalid cdclk frequency\n");
+		return;
+	}
+
+	I915_WRITE(LCPLL_CTL, val);
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+		DRM_ERROR("Switching back to LCPLL failed\n");
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
+
+	WARN(cdclk != dev_priv->cdclk_freq,
+	     "cdclk requested %d kHz but got %d kHz\n",
+	     cdclk, dev_priv->cdclk_freq);
+}
+
+static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	int max_pixclk = ilk_max_pixel_rate(state);
+	int cdclk;
+
+	/*
+	 * FIXME should also account for plane ratio
+	 * once 64bpp pixel formats are supported.
+	 */
+	if (max_pixclk > 540000)
+		cdclk = 675000;
+	else if (max_pixclk > 450000)
+		cdclk = 540000;
+	else if (max_pixclk > 337500)
+		cdclk = 450000;
+	else
+		cdclk = 337500;
+
+	/*
+	 * FIXME move the cdclk caclulation to
+	 * compute_config() so we can fail gracegully.
+	 */
+	if (cdclk > dev_priv->max_cdclk_freq) {
+		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+			  cdclk, dev_priv->max_cdclk_freq);
+		cdclk = dev_priv->max_cdclk_freq;
+	}
+
+	to_intel_atomic_state(state)->cdclk = cdclk;
+
+	return 0;
+}
+
+static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+	broadwell_set_cdclk(dev, req_cdclk);
 }
 
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
@@ -9889,7 +10133,7 @@
 mode_fits_in_fbdev(struct drm_device *dev,
 		   struct drm_display_mode *mode)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct drm_framebuffer *fb;
@@ -9978,7 +10222,7 @@
 retry:
 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
 	if (ret)
-		goto fail_unlock;
+		goto fail;
 
 	/*
 	 * Algorithm gets a little messy:
@@ -9996,10 +10240,10 @@
 
 		ret = drm_modeset_lock(&crtc->mutex, ctx);
 		if (ret)
-			goto fail_unlock;
+			goto fail;
 		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
 		if (ret)
-			goto fail_unlock;
+			goto fail;
 
 		old->dpms_mode = connector->dpms;
 		old->load_detect_temp = false;
@@ -10018,9 +10262,6 @@
 			continue;
 		if (possible_crtc->state->enable)
 			continue;
-		/* This can occur when applying the pipe A quirk on resume. */
-		if (to_intel_crtc(possible_crtc)->new_enabled)
-			continue;
 
 		crtc = possible_crtc;
 		break;
@@ -10031,20 +10272,17 @@
 	 */
 	if (!crtc) {
 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
-		goto fail_unlock;
+		goto fail;
 	}
 
 	ret = drm_modeset_lock(&crtc->mutex, ctx);
 	if (ret)
-		goto fail_unlock;
+		goto fail;
 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
 	if (ret)
-		goto fail_unlock;
-	intel_encoder->new_crtc = to_intel_crtc(crtc);
-	to_intel_connector(connector)->new_encoder = intel_encoder;
+		goto fail;
 
 	intel_crtc = to_intel_crtc(crtc);
-	intel_crtc->new_enabled = true;
 	old->dpms_mode = connector->dpms;
 	old->load_detect_temp = true;
 	old->release_fb = NULL;
@@ -10100,7 +10338,7 @@
 
 	drm_mode_copy(&crtc_state->base.mode, mode);
 
-	if (intel_set_mode(crtc, state, true)) {
+	if (drm_atomic_commit(state)) {
 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
@@ -10112,9 +10350,7 @@
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 	return true;
 
- fail:
-	intel_crtc->new_enabled = crtc->state->enable;
-fail_unlock:
+fail:
 	drm_atomic_state_free(state);
 	state = NULL;
 
@@ -10160,10 +10396,6 @@
 		if (IS_ERR(crtc_state))
 			goto fail;
 
-		to_intel_connector(connector)->new_encoder = NULL;
-		intel_encoder->new_crtc = NULL;
-		intel_crtc->new_enabled = false;
-
 		connector_state->best_encoder = NULL;
 		connector_state->crtc = NULL;
 
@@ -10174,7 +10406,7 @@
 		if (ret)
 			goto fail;
 
-		ret = intel_set_mode(crtc, state, true);
+		ret = drm_atomic_commit(state);
 		if (ret)
 			goto fail;
 
@@ -10222,6 +10454,7 @@
 	u32 dpll = pipe_config->dpll_hw_state.dpll;
 	u32 fp;
 	intel_clock_t clock;
+	int port_clock;
 	int refclk = i9xx_pll_refclk(dev, pipe_config);
 
 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
@@ -10262,9 +10495,9 @@
 		}
 
 		if (IS_PINEVIEW(dev))
-			pineview_clock(refclk, &clock);
+			port_clock = pnv_calc_dpll_params(refclk, &clock);
 		else
-			i9xx_clock(refclk, &clock);
+			port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	} else {
 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
@@ -10290,7 +10523,7 @@
 				clock.p2 = 2;
 		}
 
-		i9xx_clock(refclk, &clock);
+		port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	}
 
 	/*
@@ -10298,7 +10531,7 @@
 	 * port_clock to compute adjusted_mode.crtc_clock in the
 	 * encoder's get_config() function.
 	 */
-	pipe_config->port_clock = clock.dot;
+	pipe_config->port_clock = port_clock;
 }
 
 int intel_dotclock_calculate(int link_freq,
@@ -10387,42 +10620,6 @@
 	return mode;
 }
 
-static void intel_decrease_pllclock(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	if (!HAS_GMCH_DISPLAY(dev))
-		return;
-
-	if (!dev_priv->lvds_downclock_avail)
-		return;
-
-	/*
-	 * Since this is called by a timer, we should never get here in
-	 * the manual case.
-	 */
-	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
-		int pipe = intel_crtc->pipe;
-		int dpll_reg = DPLL(pipe);
-		int dpll;
-
-		DRM_DEBUG_DRIVER("downclocking LVDS\n");
-
-		assert_panel_unlocked(dev_priv, pipe);
-
-		dpll = I915_READ(dpll_reg);
-		dpll |= DISPLAY_RATE_SELECT_FPA1;
-		I915_WRITE(dpll_reg, dpll);
-		intel_wait_for_vblank(dev, pipe);
-		dpll = I915_READ(dpll_reg);
-		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
-			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
-	}
-
-}
-
 void intel_mark_busy(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10440,20 +10637,12 @@
 void intel_mark_idle(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
 
 	if (!dev_priv->mm.busy)
 		return;
 
 	dev_priv->mm.busy = false;
 
-	for_each_crtc(dev, crtc) {
-		if (!crtc->primary->fb)
-			continue;
-
-		intel_decrease_pllclock(crtc);
-	}
-
 	if (INTEL_INFO(dev)->gen >= 6)
 		gen6_rps_idle(dev->dev_private);
 
@@ -10485,24 +10674,23 @@
 {
 	struct intel_unpin_work *work =
 		container_of(__work, struct intel_unpin_work, work);
-	struct drm_device *dev = work->crtc->dev;
-	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_plane *primary = crtc->base.primary;
 
 	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
+	intel_unpin_fb_obj(work->old_fb, primary->state);
 	drm_gem_object_unreference(&work->pending_flip_obj->base);
 
-	intel_fbc_update(dev);
-
 	if (work->flip_queued_req)
 		i915_gem_request_assign(&work->flip_queued_req, NULL);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
 	drm_framebuffer_unreference(work->old_fb);
 
-	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
-	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
+	atomic_dec(&crtc->unpin_work_count);
 
 	kfree(work);
 }
@@ -10635,14 +10823,15 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -10662,7 +10851,6 @@
 	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10670,14 +10858,15 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -10694,7 +10883,6 @@
 	intel_ring_emit(ring, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10702,15 +10890,16 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -10733,7 +10922,6 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10741,15 +10929,16 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -10769,7 +10958,6 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10777,9 +10965,10 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -10821,11 +11010,11 @@
 	 * then do the cacheline alignment, and finally emit the
 	 * MI_DISPLAY_FLIP.
 	 */
-	ret = intel_ring_cacheline_align(ring);
+	ret = intel_ring_cacheline_align(req);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, len);
+	ret = intel_ring_begin(req, len);
 	if (ret)
 		return ret;
 
@@ -10864,7 +11053,6 @@
 	intel_ring_emit(ring, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10973,12 +11161,11 @@
 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
-	bool atomic_update;
 	u32 start_vbl_count;
 
 	intel_mark_page_flip_active(intel_crtc);
 
-	atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+	intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
 	if (INTEL_INFO(dev)->gen >= 9)
 		skl_do_mmio_flip(intel_crtc);
@@ -10986,8 +11173,7 @@
 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
 		ilk_do_mmio_flip(intel_crtc);
 
-	if (atomic_update)
-		intel_pipe_update_end(intel_crtc, start_vbl_count);
+	intel_pipe_update_end(intel_crtc, start_vbl_count);
 }
 
 static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11034,7 +11220,7 @@
 				    struct drm_crtc *crtc,
 				    struct drm_framebuffer *fb,
 				    struct drm_i915_gem_object *obj,
-				    struct intel_engine_cs *ring,
+				    struct drm_i915_gem_request *req,
 				    uint32_t flags)
 {
 	return -ENODEV;
@@ -11120,6 +11306,7 @@
 	struct intel_unpin_work *work;
 	struct intel_engine_cs *ring;
 	bool mmio_flip;
+	struct drm_i915_gem_request *request = NULL;
 	int ret;
 
 	/*
@@ -11226,7 +11413,7 @@
 	 */
 	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
 					 crtc->primary->state,
-					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
+					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
 	if (ret)
 		goto cleanup_pending;
 
@@ -11242,31 +11429,34 @@
 		i915_gem_request_assign(&work->flip_queued_req,
 					obj->last_write_req);
 	} else {
-		if (obj->last_write_req) {
-			ret = i915_gem_check_olr(obj->last_write_req);
+		if (!request) {
+			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
 			if (ret)
 				goto cleanup_unpin;
 		}
 
-		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
 						   page_flip_flags);
 		if (ret)
 			goto cleanup_unpin;
 
-		i915_gem_request_assign(&work->flip_queued_req,
-					intel_ring_get_request(ring));
+		i915_gem_request_assign(&work->flip_queued_req, request);
 	}
 
+	if (request)
+		i915_add_request_no_flush(request);
+
 	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
 	work->enable_stall_check = true;
 
 	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
-			  INTEL_FRONTBUFFER_PRIMARY(pipe));
-
-	intel_fbc_disable(dev);
-	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
+	intel_fbc_disable_crtc(intel_crtc);
+	intel_frontbuffer_flip_prepare(dev,
+				       to_intel_plane(primary)->frontbuffer_bit);
+
 	trace_i915_flip_request(intel_crtc->plane, obj);
 
 	return 0;
@@ -11274,6 +11464,8 @@
 cleanup_unpin:
 	intel_unpin_fb_obj(fb, crtc->primary->state);
 cleanup_pending:
+	if (request)
+		i915_gem_request_cancel(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
@@ -11292,8 +11484,35 @@
 	kfree(work);
 
 	if (ret == -EIO) {
+		struct drm_atomic_state *state;
+		struct drm_plane_state *plane_state;
+
 out_hang:
-		ret = intel_plane_restore(primary);
+		state = drm_atomic_state_alloc(dev);
+		if (!state)
+			return -ENOMEM;
+		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+		plane_state = drm_atomic_get_plane_state(state, primary);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			drm_atomic_set_fb_for_plane(plane_state, fb);
+
+			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+			if (!ret)
+				ret = drm_atomic_commit(state);
+		}
+
+		if (ret == -EDEADLK) {
+			drm_modeset_backoff(state->acquire_ctx);
+			drm_atomic_state_clear(state);
+			goto retry;
+		}
+
+		if (ret)
+			drm_atomic_state_free(state);
+
 		if (ret == 0 && event) {
 			spin_lock_irq(&dev->event_lock);
 			drm_send_vblank_event(dev, pipe, event);
@@ -11303,44 +11522,274 @@
 	return ret;
 }
 
+
+/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+static bool intel_wm_need_update(struct drm_plane *plane,
+				 struct drm_plane_state *state)
+{
+	/* Update watermarks on tiling changes. */
+	if (!plane->state->fb || !state->fb ||
+	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+	    plane->state->rotation != state->rotation)
+		return true;
+
+	if (plane->state->crtc_w != state->crtc_w)
+		return true;
+
+	return false;
+}
+
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state)
+{
+	struct drm_crtc *crtc = crtc_state->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *plane = plane_state->plane;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane_state *old_plane_state =
+		to_intel_plane_state(plane->state);
+	int idx = intel_crtc->base.base.id, ret;
+	int i = drm_plane_index(plane);
+	bool mode_changed = needs_modeset(crtc_state);
+	bool was_crtc_enabled = crtc->state->active;
+	bool is_crtc_enabled = crtc_state->active;
+
+	bool turn_off, turn_on, visible, was_visible;
+	struct drm_framebuffer *fb = plane_state->fb;
+
+	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
+	    plane->type != DRM_PLANE_TYPE_CURSOR) {
+		ret = skl_update_scaler_plane(
+			to_intel_crtc_state(crtc_state),
+			to_intel_plane_state(plane_state));
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Disabling a plane is always okay; we just need to update
+	 * fb tracking in a special way since cleanup_fb() won't
+	 * get called by the plane helpers.
+	 */
+	if (old_plane_state->base.fb && !fb)
+		intel_crtc->atomic.disabled_planes |= 1 << i;
+
+	was_visible = old_plane_state->visible;
+	visible = to_intel_plane_state(plane_state)->visible;
+
+	if (!was_crtc_enabled && WARN_ON(was_visible))
+		was_visible = false;
+
+	if (!is_crtc_enabled && WARN_ON(visible))
+		visible = false;
+
+	if (!was_visible && !visible)
+		return 0;
+
+	turn_off = was_visible && (!visible || mode_changed);
+	turn_on = visible && (!was_visible || mode_changed);
+
+	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
+			 plane->base.id, fb ? fb->base.id : -1);
+
+	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
+			 plane->base.id, was_visible, visible,
+			 turn_off, turn_on, mode_changed);
+
+	if (turn_on) {
+		intel_crtc->atomic.update_wm_pre = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			intel_crtc->atomic.disable_cxsr = true;
+			/* to potentially re-enable cxsr */
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_wm_post = true;
+		}
+	} else if (turn_off) {
+		intel_crtc->atomic.update_wm_post = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			if (is_crtc_enabled)
+				intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.disable_cxsr = true;
+		}
+	} else if (intel_wm_need_update(plane, plane_state)) {
+		intel_crtc->atomic.update_wm_pre = true;
+	}
+
+	if (visible)
+		intel_crtc->atomic.fb_bits |=
+			to_intel_plane(plane)->frontbuffer_bit;
+
+	switch (plane->type) {
+	case DRM_PLANE_TYPE_PRIMARY:
+		intel_crtc->atomic.wait_for_flips = true;
+		intel_crtc->atomic.pre_disable_primary = turn_off;
+		intel_crtc->atomic.post_enable_primary = turn_on;
+
+		if (turn_off) {
+			/*
+			 * FIXME: Actually if we will still have any other
+			 * plane enabled on the pipe we could let IPS enabled
+			 * still, but for now lets consider that when we make
+			 * primary invisible by setting DSPCNTR to 0 on
+			 * update_primary_plane function IPS needs to be
+			 * disable.
+			 */
+			intel_crtc->atomic.disable_ips = true;
+
+			intel_crtc->atomic.disable_fbc = true;
+		}
+
+		/*
+		 * FBC does not work on some platforms for rotated
+		 * planes, so disable it when rotation is not 0 and
+		 * update it when rotation is set back to 0.
+		 *
+		 * FIXME: This is redundant with the fbc update done in
+		 * the primary plane enable function except that that
+		 * one is done too late. We eventually need to unify
+		 * this.
+		 */
+
+		if (visible &&
+		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+		    dev_priv->fbc.crtc == intel_crtc &&
+		    plane_state->rotation != BIT(DRM_ROTATE_0))
+			intel_crtc->atomic.disable_fbc = true;
+
+		/*
+		 * BDW signals flip done immediately if the plane
+		 * is disabled, even if the plane enable is already
+		 * armed to occur at the next vblank :(
+		 */
+		if (turn_on && IS_BROADWELL(dev))
+			intel_crtc->atomic.wait_vblank = true;
+
+		intel_crtc->atomic.update_fbc |= visible || mode_changed;
+		break;
+	case DRM_PLANE_TYPE_CURSOR:
+		break;
+	case DRM_PLANE_TYPE_OVERLAY:
+		if (turn_off && !mode_changed) {
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_sprite_watermarks |=
+				1 << i;
+		}
+	}
+	return 0;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+			       const struct intel_encoder *b)
+{
+	/* masks could be asymmetric, so check both ways */
+	return a == b || (a->cloneable & (1 << b->type) &&
+			  b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+					 struct intel_crtc *crtc,
+					 struct intel_encoder *encoder)
+{
+	struct intel_encoder *source_encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		source_encoder =
+			to_intel_encoder(connector_state->best_encoder);
+		if (!encoders_cloneable(encoder, source_encoder))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+				  struct intel_crtc *crtc)
+{
+	struct intel_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+		if (!check_single_encoder_cloning(state, crtc, encoder))
+			return false;
+	}
+
+	return true;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+				   struct drm_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc_state);
+	struct drm_atomic_state *state = crtc_state->state;
+	int ret;
+	bool mode_changed = needs_modeset(crtc_state);
+
+	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
+		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+		return -EINVAL;
+	}
+
+	if (mode_changed && !crtc_state->active)
+		intel_crtc->atomic.update_wm_post = true;
+
+	if (mode_changed && crtc_state->enable &&
+	    dev_priv->display.crtc_compute_clock &&
+	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+							   pipe_config);
+		if (ret)
+			return ret;
+	}
+
+	ret = 0;
+	if (INTEL_INFO(dev)->gen >= 9) {
+		if (mode_changed)
+			ret = skl_update_scaler_crtc(pipe_config);
+
+		if (!ret)
+			ret = intel_atomic_setup_scalers(dev, intel_crtc,
+							 pipe_config);
+	}
+
+	return ret;
+}
+
 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
 	.load_lut = intel_crtc_load_lut,
 	.atomic_begin = intel_begin_crtc_commit,
 	.atomic_flush = intel_finish_crtc_commit,
+	.atomic_check = intel_crtc_atomic_check,
 };
 
-/**
- * intel_modeset_update_staged_output_state
- *
- * Updates the staged output configuration state, e.g. after we've read out the
- * current hw state.
- */
-static void intel_modeset_update_staged_output_state(struct drm_device *dev)
-{
-	struct intel_crtc *crtc;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-
-	for_each_intel_connector(dev, connector) {
-		connector->new_encoder =
-			to_intel_encoder(connector->base.encoder);
-	}
-
-	for_each_intel_encoder(dev, encoder) {
-		encoder->new_crtc =
-			to_intel_crtc(encoder->base.crtc);
-	}
-
-	for_each_intel_crtc(dev, crtc) {
-		crtc->new_enabled = crtc->base.state->enable;
-	}
-}
-
-/* Transitional helper to copy current connector/encoder state to
- * connector->state. This is needed so that code that is partially
- * converted to atomic does the right thing.
- */
 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 {
 	struct intel_connector *connector;
@@ -11358,39 +11807,6 @@
 	}
 }
 
-/* Fixup legacy state after an atomic state swap.
- */
-static void intel_modeset_fixup_state(struct drm_atomic_state *state)
-{
-	struct intel_crtc *crtc;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-
-	for_each_intel_connector(state->dev, connector) {
-		connector->base.encoder = connector->base.state->best_encoder;
-		if (connector->base.encoder)
-			connector->base.encoder->crtc =
-				connector->base.state->crtc;
-	}
-
-	/* Update crtc of disabled encoders */
-	for_each_intel_encoder(state->dev, encoder) {
-		int num_connectors = 0;
-
-		for_each_intel_connector(state->dev, connector)
-			if (connector->base.encoder == &encoder->base)
-				num_connectors++;
-
-		if (num_connectors == 0)
-			encoder->base.crtc = NULL;
-	}
-
-	for_each_intel_crtc(state->dev, crtc) {
-		crtc->base.enabled = crtc->base.state->enable;
-		crtc->config = to_intel_crtc_state(crtc->base.state);
-	}
-}
-
 static void
 connected_sink_compute_bpp(struct intel_connector *connector,
 			   struct intel_crtc_state *pipe_config)
@@ -11526,17 +11942,20 @@
 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
 	if (IS_BROXTON(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, "
+		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
-			      "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n",
+			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
 			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.ebb0,
+			      pipe_config->dpll_hw_state.ebb4,
 			      pipe_config->dpll_hw_state.pll0,
 			      pipe_config->dpll_hw_state.pll1,
 			      pipe_config->dpll_hw_state.pll2,
 			      pipe_config->dpll_hw_state.pll3,
 			      pipe_config->dpll_hw_state.pll6,
 			      pipe_config->dpll_hw_state.pll8,
+			      pipe_config->dpll_hw_state.pll9,
+			      pipe_config->dpll_hw_state.pll10,
 			      pipe_config->dpll_hw_state.pcsdw12);
 	} else if (IS_SKYLAKE(dev)) {
 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
@@ -11593,56 +12012,6 @@
 	}
 }
 
-static bool encoders_cloneable(const struct intel_encoder *a,
-			       const struct intel_encoder *b)
-{
-	/* masks could be asymmetric, so check both ways */
-	return a == b || (a->cloneable & (1 << b->type) &&
-			  b->cloneable & (1 << a->type));
-}
-
-static bool check_single_encoder_cloning(struct drm_atomic_state *state,
-					 struct intel_crtc *crtc,
-					 struct intel_encoder *encoder)
-{
-	struct intel_encoder *source_encoder;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		source_encoder =
-			to_intel_encoder(connector_state->best_encoder);
-		if (!encoders_cloneable(encoder, source_encoder))
-			return false;
-	}
-
-	return true;
-}
-
-static bool check_encoder_cloning(struct drm_atomic_state *state,
-				  struct intel_crtc *crtc)
-{
-	struct intel_encoder *encoder;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		encoder = to_intel_encoder(connector_state->best_encoder);
-		if (!check_single_encoder_cloning(state, crtc, encoder))
-			return false;
-	}
-
-	return true;
-}
-
 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -11696,6 +12065,7 @@
 	struct intel_dpll_hw_state dpll_hw_state;
 	enum intel_dpll_id shared_dpll;
 	uint32_t ddi_pll_sel;
+	bool force_thru;
 
 	/* FIXME: before the switch to atomic started, a new pipe_config was
 	 * kzalloc'd. Code that depends on any field being zero should be
@@ -11707,6 +12077,7 @@
 	shared_dpll = crtc_state->shared_dpll;
 	dpll_hw_state = crtc_state->dpll_hw_state;
 	ddi_pll_sel = crtc_state->ddi_pll_sel;
+	force_thru = crtc_state->pch_pfit.force_thru;
 
 	memset(crtc_state, 0, sizeof *crtc_state);
 
@@ -11715,13 +12086,14 @@
 	crtc_state->shared_dpll = shared_dpll;
 	crtc_state->dpll_hw_state = dpll_hw_state;
 	crtc_state->ddi_pll_sel = ddi_pll_sel;
+	crtc_state->pch_pfit.force_thru = force_thru;
 }
 
 static int
 intel_modeset_pipe_config(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
 			  struct intel_crtc_state *pipe_config)
 {
+	struct drm_atomic_state *state = pipe_config->base.state;
 	struct intel_encoder *encoder;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
@@ -11729,16 +12101,6 @@
 	int i;
 	bool retry = true;
 
-	if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
-		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-		return -EINVAL;
-	}
-
-	if (!check_digital_port_conflicts(state)) {
-		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
-		return -EINVAL;
-	}
-
 	clear_intel_crtc_state(pipe_config);
 
 	pipe_config->cpu_transcoder =
@@ -11832,90 +12194,27 @@
 	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
-	return 0;
 fail:
 	return ret;
 }
 
-static bool intel_crtc_in_use(struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-	struct drm_device *dev = crtc->dev;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc == crtc)
-			return true;
-
-	return false;
-}
-
-static bool
-needs_modeset(struct drm_crtc_state *state)
-{
-	return state->mode_changed || state->active_changed;
-}
-
 static void
-intel_modeset_update_state(struct drm_atomic_state *state)
+intel_modeset_update_crtc_state(struct drm_atomic_state *state)
 {
-	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	struct drm_connector *connector;
 	int i;
 
-	intel_shared_dpll_commit(dev_priv);
-
-	for_each_intel_encoder(dev, intel_encoder) {
-		if (!intel_encoder->base.crtc)
-			continue;
-
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			if (crtc != intel_encoder->base.crtc)
-				continue;
-
-			if (crtc_state->enable && needs_modeset(crtc_state))
-				intel_encoder->connectors_active = false;
-
-			break;
-		}
-	}
-
-	drm_atomic_helper_swap_state(state->dev, state);
-	intel_modeset_fixup_state(state);
-
 	/* Double check state. */
-	for_each_crtc(dev, crtc) {
-		WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
+
+		/* Update hwmode for vblank functions */
+		if (crtc->state->active)
+			crtc->hwmode = crtc->state->adjusted_mode;
+		else
+			crtc->hwmode.crtc_clock = 0;
 	}
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (!connector->encoder || !connector->encoder->crtc)
-			continue;
-
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			if (crtc != connector->encoder->crtc)
-				continue;
-
-			if (crtc->state->enable && needs_modeset(crtc->state)) {
-				struct drm_property *dpms_property =
-					dev->mode_config.dpms_property;
-
-				connector->dpms = DRM_MODE_DPMS_ON;
-				drm_object_property_set_value(&connector->base,
-								 dpms_property,
-								 DRM_MODE_DPMS_ON);
-
-				intel_encoder = to_intel_encoder(connector->encoder);
-				intel_encoder->connectors_active = true;
-			}
-
-			break;
-		}
-	}
-
 }
 
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11942,27 +12241,133 @@
 			    base.head) \
 		if (mask & (1 <<(intel_crtc)->pipe))
 
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+		  unsigned int m2, unsigned int n2,
+		  bool exact)
+{
+	if (m == m2 && n == n2)
+		return true;
+
+	if (exact || !m || !n || !m2 || !n2)
+		return false;
+
+	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+	if (m > m2) {
+		while (m > m2) {
+			m2 <<= 1;
+			n2 <<= 1;
+		}
+	} else if (m < m2) {
+		while (m < m2) {
+			m <<= 1;
+			n <<= 1;
+		}
+	}
+
+	return m == m2 && n == n2;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+		       struct intel_link_m_n *m2_n2,
+		       bool adjust)
+{
+	if (m_n->tu == m2_n2->tu &&
+	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+	    intel_compare_m_n(m_n->link_m, m_n->link_n,
+			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
+		if (adjust)
+			*m2_n2 = *m_n;
+
+		return true;
+	}
+
+	return false;
+}
+
 static bool
 intel_pipe_config_compare(struct drm_device *dev,
 			  struct intel_crtc_state *current_config,
-			  struct intel_crtc_state *pipe_config)
+			  struct intel_crtc_state *pipe_config,
+			  bool adjust)
 {
+	bool ret = true;
+
+#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
+	do { \
+		if (!adjust) \
+			DRM_ERROR(fmt, ##__VA_ARGS__); \
+		else \
+			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
+	} while (0)
+
 #define PIPE_CONF_CHECK_X(name)	\
 	if (current_config->name != pipe_config->name) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected 0x%08x, found 0x%08x)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_I(name)	\
 	if (current_config->name != pipe_config->name) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N(name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name,\
+				    adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name, adjust) && \
+	    !intel_compare_link_m_n(&current_config->alt_name, \
+				    &pipe_config->name, adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "or tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  current_config->alt_name.tu, \
+			  current_config->alt_name.gmch_m, \
+			  current_config->alt_name.gmch_n, \
+			  current_config->alt_name.link_m, \
+			  current_config->alt_name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
 	}
 
 /* This is required for BDW+ where there is only one set of registers for
@@ -11973,30 +12378,30 @@
 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
 	if ((current_config->name != pipe_config->name) && \
 		(current_config->alt_name != pipe_config->name)) { \
-			DRM_ERROR("mismatch in " #name " " \
+			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 				  "(expected %i or %i, found %i)\n", \
 				  current_config->name, \
 				  current_config->alt_name, \
 				  pipe_config->name); \
-			return false; \
+			ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
-		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name & (mask), \
 			  pipe_config->name & (mask)); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_QUIRK(quirk)	\
@@ -12006,35 +12411,18 @@
 
 	PIPE_CONF_CHECK_I(has_pch_encoder);
 	PIPE_CONF_CHECK_I(fdi_lanes);
-	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
-	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
-	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
-	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
-	PIPE_CONF_CHECK_I(fdi_m_n.tu);
+	PIPE_CONF_CHECK_M_N(fdi_m_n);
 
 	PIPE_CONF_CHECK_I(has_dp_encoder);
 
 	if (INTEL_INFO(dev)->gen < 8) {
-		PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
-		PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
-		PIPE_CONF_CHECK_I(dp_m_n.link_m);
-		PIPE_CONF_CHECK_I(dp_m_n.link_n);
-		PIPE_CONF_CHECK_I(dp_m_n.tu);
+		PIPE_CONF_CHECK_M_N(dp_m_n);
 
-		if (current_config->has_drrs) {
-			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
-			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
-			PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
-			PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
-			PIPE_CONF_CHECK_I(dp_m2_n2.tu);
-		}
-	} else {
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
-	}
+		PIPE_CONF_CHECK_I(has_drrs);
+		if (current_config->has_drrs)
+			PIPE_CONF_CHECK_M_N(dp_m2_n2);
+	} else
+		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12076,21 +12464,11 @@
 	PIPE_CONF_CHECK_I(pipe_src_w);
 	PIPE_CONF_CHECK_I(pipe_src_h);
 
-	/*
-	 * FIXME: BIOS likes to set up a cloned config with lvds+external
-	 * screen. Since we don't yet re-compute the pipe config when moving
-	 * just the lvds port away to another pipe the sw tracking won't match.
-	 *
-	 * Proper atomic modesets with recomputed global state will fix this.
-	 * Until then just don't check gmch state for inherited modes.
-	 */
-	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
-		PIPE_CONF_CHECK_I(gmch_pfit.control);
-		/* pfit ratios are autocomputed by the hw on gen4+ */
-		if (INTEL_INFO(dev)->gen < 4)
-			PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-		PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
-	}
+	PIPE_CONF_CHECK_I(gmch_pfit.control);
+	/* pfit ratios are autocomputed by the hw on gen4+ */
+	if (INTEL_INFO(dev)->gen < 4)
+		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
 
 	PIPE_CONF_CHECK_I(pch_pfit.enabled);
 	if (current_config->pch_pfit.enabled) {
@@ -12130,8 +12508,9 @@
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
+#undef INTEL_ERR_OR_DBG_KMS
 
-	return true;
+	return ret;
 }
 
 static void check_wm_state(struct drm_device *dev)
@@ -12185,17 +12564,23 @@
 }
 
 static void
-check_connector_state(struct drm_device *dev)
+check_connector_state(struct drm_device *dev,
+		      struct drm_atomic_state *old_state)
 {
-	struct intel_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *connector;
+	int i;
 
-	for_each_intel_connector(dev, connector) {
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		struct drm_encoder *encoder = connector->encoder;
+		struct drm_connector_state *state = connector->state;
+
 		/* This also checks the encoder/connector hw state with the
 		 * ->get_hw_state callbacks. */
-		intel_connector_check_state(connector);
+		intel_connector_check_state(to_intel_connector(connector));
 
-		I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
-		     "connector's staged encoder doesn't match current encoder\n");
+		I915_STATE_WARN(state->best_encoder != encoder,
+		     "connector's atomic encoder doesn't match legacy encoder\n");
 	}
 }
 
@@ -12207,124 +12592,106 @@
 
 	for_each_intel_encoder(dev, encoder) {
 		bool enabled = false;
-		bool active = false;
-		enum pipe pipe, tracked_pipe;
+		enum pipe pipe;
 
 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
 
-		I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
-		     "encoder's stage crtc doesn't match current crtc\n");
-		I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
-		     "encoder's active_connectors set, but no crtc\n");
-
 		for_each_intel_connector(dev, connector) {
-			if (connector->base.encoder != &encoder->base)
+			if (connector->base.state->best_encoder != &encoder->base)
 				continue;
 			enabled = true;
-			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
-				active = true;
+
+			I915_STATE_WARN(connector->base.state->crtc !=
+					encoder->base.crtc,
+			     "connector's crtc doesn't match encoder crtc\n");
 		}
-		/*
-		 * for MST connectors if we unplug the connector is gone
-		 * away but the encoder is still connected to a crtc
-		 * until a modeset happens in response to the hotplug.
-		 */
-		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
-			continue;
 
 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
 		     "encoder's enabled state mismatch "
 		     "(expected %i, found %i)\n",
 		     !!encoder->base.crtc, enabled);
-		I915_STATE_WARN(active && !encoder->base.crtc,
-		     "active encoder with no crtc\n");
 
-		I915_STATE_WARN(encoder->connectors_active != active,
-		     "encoder's computed active state doesn't match tracked active state "
-		     "(expected %i, found %i)\n", active, encoder->connectors_active);
+		if (!encoder->base.crtc) {
+			bool active;
 
-		active = encoder->get_hw_state(encoder, &pipe);
-		I915_STATE_WARN(active != encoder->connectors_active,
-		     "encoder's hw state doesn't match sw tracking "
-		     "(expected %i, found %i)\n",
-		     encoder->connectors_active, active);
-
-		if (!encoder->base.crtc)
-			continue;
-
-		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
-		I915_STATE_WARN(active && pipe != tracked_pipe,
-		     "active encoder's pipe doesn't match"
-		     "(expected %i, found %i)\n",
-		     tracked_pipe, pipe);
-
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active,
+			     "encoder detached but still enabled on pipe %c.\n",
+			     pipe_name(pipe));
+		}
 	}
 }
 
 static void
-check_crtc_state(struct drm_device *dev)
+check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
-	struct intel_crtc_state pipe_config;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc *crtc;
+	int i;
 
-	for_each_intel_crtc(dev, crtc) {
-		bool enabled = false;
-		bool active = false;
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		struct intel_crtc_state *pipe_config, *sw_config;
+		bool active;
 
-		memset(&pipe_config, 0, sizeof(pipe_config));
+		if (!needs_modeset(crtc->state))
+			continue;
+
+		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+		pipe_config = to_intel_crtc_state(old_crtc_state);
+		memset(pipe_config, 0, sizeof(*pipe_config));
+		pipe_config->base.crtc = crtc;
+		pipe_config->base.state = old_state;
 
 		DRM_DEBUG_KMS("[CRTC:%d]\n",
-			      crtc->base.base.id);
+			      crtc->base.id);
 
-		I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
-		     "active crtc, but not enabled in sw tracking\n");
-
-		for_each_intel_encoder(dev, encoder) {
-			if (encoder->base.crtc != &crtc->base)
-				continue;
-			enabled = true;
-			if (encoder->connectors_active)
-				active = true;
-		}
-
-		I915_STATE_WARN(active != crtc->active,
-		     "crtc's computed active state doesn't match tracked active state "
-		     "(expected %i, found %i)\n", active, crtc->active);
-		I915_STATE_WARN(enabled != crtc->base.state->enable,
-		     "crtc's computed enabled state doesn't match tracked enabled state "
-		     "(expected %i, found %i)\n", enabled,
-				crtc->base.state->enable);
-
-		active = dev_priv->display.get_pipe_config(crtc,
-							   &pipe_config);
+		active = dev_priv->display.get_pipe_config(intel_crtc,
+							   pipe_config);
 
 		/* hw state is inconsistent with the pipe quirk */
-		if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
-		    (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
-			active = crtc->active;
+		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+			active = crtc->state->active;
 
-		for_each_intel_encoder(dev, encoder) {
+		I915_STATE_WARN(crtc->state->active != active,
+		     "crtc active state doesn't match with hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, active);
+
+		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
+		     "transitional active state does not match atomic hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
+
+		for_each_encoder_on_crtc(dev, crtc, encoder) {
 			enum pipe pipe;
-			if (encoder->base.crtc != &crtc->base)
-				continue;
-			if (encoder->get_hw_state(encoder, &pipe))
-				encoder->get_config(encoder, &pipe_config);
+
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active != crtc->state->active,
+				"[ENCODER:%i] active %i with crtc active %i\n",
+				encoder->base.base.id, active, crtc->state->active);
+
+			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+					"Encoder connected to wrong pipe %c\n",
+					pipe_name(pipe));
+
+			if (active)
+				encoder->get_config(encoder, pipe_config);
 		}
 
-		I915_STATE_WARN(crtc->active != active,
-		     "crtc active state doesn't match with hw state "
-		     "(expected %i, found %i)\n", crtc->active, active);
+		if (!crtc->state->active)
+			continue;
 
-		if (active &&
-		    !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+		sw_config = to_intel_crtc_state(crtc->state);
+		if (!intel_pipe_config_compare(dev, sw_config,
+					       pipe_config, false)) {
 			I915_STATE_WARN(1, "pipe state doesn't match!\n");
-			intel_dump_pipe_config(crtc, &pipe_config,
+			intel_dump_pipe_config(intel_crtc, pipe_config,
 					       "[hw state]");
-			intel_dump_pipe_config(crtc, crtc->config,
+			intel_dump_pipe_config(intel_crtc, sw_config,
 					       "[sw state]");
 		}
 	}
@@ -12379,13 +12746,14 @@
 	}
 }
 
-void
-intel_modeset_check_state(struct drm_device *dev)
+static void
+intel_modeset_check_state(struct drm_device *dev,
+			  struct drm_atomic_state *old_state)
 {
 	check_wm_state(dev);
-	check_connector_state(dev);
+	check_connector_state(dev, old_state);
 	check_encoder_state(dev);
-	check_crtc_state(dev);
+	check_crtc_state(dev, old_state);
 	check_shared_dpll_state(dev);
 }
 
@@ -12439,113 +12807,145 @@
 		crtc->scanline_offset = 1;
 }
 
-static struct intel_crtc_state *
-intel_modeset_compute_config(struct drm_crtc *crtc,
-			     struct drm_atomic_state *state)
-{
-	struct intel_crtc_state *pipe_config;
-	int ret = 0;
-
-	ret = drm_atomic_add_affected_connectors(state, crtc);
-	if (ret)
-		return ERR_PTR(ret);
-
-	ret = drm_atomic_helper_check_modeset(state->dev, state);
-	if (ret)
-		return ERR_PTR(ret);
-
-	/*
-	 * Note this needs changes when we start tracking multiple modes
-	 * and crtcs.  At that point we'll need to compute the whole config
-	 * (i.e. one pipe_config for each crtc) rather than just the one
-	 * for this crtc.
-	 */
-	pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-	if (IS_ERR(pipe_config))
-		return pipe_config;
-
-	if (!pipe_config->base.enable)
-		return pipe_config;
-
-	ret = intel_modeset_pipe_config(crtc, state, pipe_config);
-	if (ret)
-		return ERR_PTR(ret);
-
-	/* Check things that can only be changed through modeset */
-	if (pipe_config->has_audio !=
-	    to_intel_crtc(crtc)->config->has_audio)
-		pipe_config->base.mode_changed = true;
-
-	/*
-	 * Note we have an issue here with infoframes: current code
-	 * only updates them on the full mode set path per hw
-	 * requirements.  So here we should be checking for any
-	 * required changes and forcing a mode set.
-	 */
-
-	intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
-
-	ret = drm_atomic_helper_check_planes(state->dev, state);
-	if (ret)
-		return ERR_PTR(ret);
-
-	return pipe_config;
-}
-
-static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned clear_pipes = 0;
+	struct intel_shared_dpll_config *shared_dpll = NULL;
 	struct intel_crtc *intel_crtc;
 	struct intel_crtc_state *intel_crtc_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	int ret = 0;
 	int i;
 
 	if (!dev_priv->display.crtc_compute_clock)
-		return 0;
+		return;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		int dpll;
+
 		intel_crtc = to_intel_crtc(crtc);
 		intel_crtc_state = to_intel_crtc_state(crtc_state);
+		dpll = intel_crtc_state->shared_dpll;
 
-		if (needs_modeset(crtc_state)) {
-			clear_pipes |= 1 << intel_crtc->pipe;
-			intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
-		}
-	}
-
-	ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
-	if (ret)
-		goto done;
-
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc_state) || !crtc_state->enable)
+		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		intel_crtc_state = to_intel_crtc_state(crtc_state);
+		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
 
-		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
-							   intel_crtc_state);
-		if (ret) {
-			intel_shared_dpll_abort_config(dev_priv);
-			goto done;
+		if (!shared_dpll)
+			shared_dpll = intel_atomic_get_shared_dpll_state(state);
+
+		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+	}
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct intel_crtc *intel_crtc;
+	struct drm_crtc *crtc;
+	struct intel_crtc_state *first_crtc_state = NULL;
+	struct intel_crtc_state *other_crtc_state = NULL;
+	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
+	int i;
+
+	/* look at all crtc's that are going to be enabled in during modeset */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		intel_crtc = to_intel_crtc(crtc);
+
+		if (!crtc_state->active || !needs_modeset(crtc_state))
+			continue;
+
+		if (first_crtc_state) {
+			other_crtc_state = to_intel_crtc_state(crtc_state);
+			break;
+		} else {
+			first_crtc_state = to_intel_crtc_state(crtc_state);
+			first_pipe = intel_crtc->pipe;
 		}
 	}
 
-done:
+	/* No workaround needed? */
+	if (!first_crtc_state)
+		return 0;
+
+	/* w/a possibly needed, check how many crtc's are already enabled. */
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		struct intel_crtc_state *pipe_config;
+
+		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(pipe_config))
+			return PTR_ERR(pipe_config);
+
+		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
+
+		if (!pipe_config->base.active ||
+		    needs_modeset(&pipe_config->base))
+			continue;
+
+		/* 2 or more enabled crtcs means no need for w/a */
+		if (enabled_pipe != INVALID_PIPE)
+			return 0;
+
+		enabled_pipe = intel_crtc->pipe;
+	}
+
+	if (enabled_pipe != INVALID_PIPE)
+		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+	else if (other_crtc_state)
+		other_crtc_state->hsw_workaround_pipe = first_pipe;
+
+	return 0;
+}
+
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret = 0;
+
+	/* add all active pipes to the state */
+	for_each_crtc(state->dev, crtc) {
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->active || needs_modeset(crtc_state))
+			continue;
+
+		crtc_state->mode_changed = true;
+
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			break;
+
+		ret = drm_atomic_add_affected_planes(state, crtc);
+		if (ret)
+			break;
+	}
+
 	return ret;
 }
 
-/* Code that should eventually be part of atomic_check() */
-static int __intel_set_mode_checks(struct drm_atomic_state *state)
+
+static int intel_modeset_checks(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	if (!check_digital_port_conflicts(state)) {
+		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+		return -EINVAL;
+	}
+
 	/*
 	 * See if the config requires any additional preparation, e.g.
 	 * to adjust global state with pipes off.  We need to do this
@@ -12553,405 +12953,244 @@
 	 * mode set on this crtc.  For other crtcs we need to use the
 	 * adjusted_mode bits in the crtc directly.
 	 */
-	if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
-		ret = valleyview_modeset_global_pipes(state);
-		if (ret)
-			return ret;
-	}
+	if (dev_priv->display.modeset_calc_cdclk) {
+		unsigned int cdclk;
 
-	ret = __intel_set_mode_setup_plls(state);
-	if (ret)
-		return ret;
+		ret = dev_priv->display.modeset_calc_cdclk(state);
+
+		cdclk = to_intel_atomic_state(state)->cdclk;
+		if (!ret && cdclk != dev_priv->cdclk_freq)
+			ret = intel_modeset_all_pipes(state);
+
+		if (ret < 0)
+			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
+
+	intel_modeset_clear_plls(state);
+
+	if (IS_HASWELL(dev))
+		return haswell_mode_set_planes_workaround(state);
 
 	return 0;
 }
 
-static int __intel_set_mode(struct drm_crtc *modeset_crtc,
-			    struct intel_crtc_state *pipe_config)
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+			      struct drm_atomic_state *state)
 {
-	struct drm_device *dev = modeset_crtc->dev;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret, i;
+	bool any_ms = false;
+
+	ret = drm_atomic_helper_check_modeset(dev, state);
+	if (ret)
+		return ret;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc_state *pipe_config =
+			to_intel_crtc_state(crtc_state);
+
+		/* Catch I915_MODE_FLAG_INHERITED */
+		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+			crtc_state->mode_changed = true;
+
+		if (!crtc_state->enable) {
+			if (needs_modeset(crtc_state))
+				any_ms = true;
+			continue;
+		}
+
+		if (!needs_modeset(crtc_state))
+			continue;
+
+		/* FIXME: For only active_changed we shouldn't need to do any
+		 * state recomputation at all. */
+
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			return ret;
+
+		ret = intel_modeset_pipe_config(crtc, pipe_config);
+		if (ret)
+			return ret;
+
+		if (i915.fastboot &&
+		    intel_pipe_config_compare(state->dev,
+					to_intel_crtc_state(crtc->state),
+					pipe_config, true)) {
+			crtc_state->mode_changed = false;
+		}
+
+		if (needs_modeset(crtc_state)) {
+			any_ms = true;
+
+			ret = drm_atomic_add_affected_planes(state, crtc);
+			if (ret)
+				return ret;
+		}
+
+		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+				       needs_modeset(crtc_state) ?
+				       "[modeset]" : "[fastset]");
+	}
+
+	if (any_ms) {
+		ret = intel_modeset_checks(state);
+
+		if (ret)
+			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk =
+			to_i915(state->dev)->cdclk_freq;
+
+	return drm_atomic_helper_check_planes(state->dev, state);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+			       struct drm_atomic_state *state,
+			       bool async)
+{
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_atomic_state *state = pipe_config->base.state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 	int ret = 0;
 	int i;
+	bool any_ms = false;
 
-	ret = __intel_set_mode_checks(state);
-	if (ret < 0)
-		return ret;
+	if (async) {
+		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+		return -EINVAL;
+	}
 
 	ret = drm_atomic_helper_prepare_planes(dev, state);
 	if (ret)
 		return ret;
 
+	drm_atomic_helper_swap_state(dev, state);
+
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc_state))
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		if (!needs_modeset(crtc->state))
 			continue;
 
-		if (!crtc_state->enable) {
-			if (crtc->state->enable)
-				intel_crtc_disable(crtc);
-		} else if (crtc->state->enable) {
-			intel_crtc_disable_planes(crtc);
+		any_ms = true;
+		intel_pre_plane_update(intel_crtc);
+
+		if (crtc_state->active) {
+			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
 			dev_priv->display.crtc_disable(crtc);
+			intel_crtc->active = false;
+			intel_disable_shared_dpll(intel_crtc);
 		}
 	}
 
-	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
-	 * to set it here already despite that we pass it down the callchain.
-	 *
-	 * Note we'll need to fix this up when we start tracking multiple
-	 * pipes; here we assume a single modeset_pipe and only track the
-	 * single crtc and mode.
-	 */
-	if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
-		modeset_crtc->mode = pipe_config->base.mode;
-
-		/*
-		 * Calculate and store various constants which
-		 * are later needed by vblank and swap-completion
-		 * timestamping. They are derived from true hwmode.
-		 */
-		drm_calc_timestamping_constants(modeset_crtc,
-						&pipe_config->base.adjusted_mode);
-	}
-
 	/* Only after disabling all output pipelines that will be changed can we
 	 * update the the output configuration. */
-	intel_modeset_update_state(state);
+	intel_modeset_update_crtc_state(state);
 
-	/* The state has been swaped above, so state actually contains the
-	 * old state now. */
+	if (any_ms) {
+		intel_shared_dpll_commit(state);
 
-	modeset_update_crtc_power_domains(state);
+		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+		modeset_update_crtc_power_domains(state);
+	}
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc->state) || !crtc->state->enable) {
-			drm_atomic_helper_commit_planes_on_crtc(crtc_state);
-			continue;
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		bool modeset = needs_modeset(crtc->state);
+
+		if (modeset && crtc->state->active) {
+			update_scanline_offset(to_intel_crtc(crtc));
+			dev_priv->display.crtc_enable(crtc);
 		}
 
-		update_scanline_offset(to_intel_crtc(crtc));
+		if (!modeset)
+			intel_pre_plane_update(intel_crtc);
 
-		dev_priv->display.crtc_enable(crtc);
 		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+		intel_post_plane_update(intel_crtc);
 	}
 
 	/* FIXME: add subpixel order */
 
+	drm_atomic_helper_wait_for_vblanks(dev, state);
 	drm_atomic_helper_cleanup_planes(dev, state);
 
+	if (any_ms)
+		intel_modeset_check_state(dev, state);
+
 	drm_atomic_state_free(state);
 
 	return 0;
 }
 
-static int intel_set_mode_with_config(struct drm_crtc *crtc,
-				      struct intel_crtc_state *pipe_config,
-				      bool force_restore)
-{
-	int ret;
-
-	ret = __intel_set_mode(crtc, pipe_config);
-
-	if (ret == 0 && force_restore) {
-		intel_modeset_update_staged_output_state(crtc->dev);
-		intel_modeset_check_state(crtc->dev);
-	}
-
-	return ret;
-}
-
-static int intel_set_mode(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
-			  bool force_restore)
-{
-	struct intel_crtc_state *pipe_config;
-	int ret = 0;
-
-	pipe_config = intel_modeset_compute_config(crtc, state);
-	if (IS_ERR(pipe_config)) {
-		ret = PTR_ERR(pipe_config);
-		goto out;
-	}
-
-	ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
-	if (ret)
-		goto out;
-
-out:
-	return ret;
-}
-
 void intel_crtc_restore_mode(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_atomic_state *state;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-	struct drm_connector_state *connector_state;
-	struct intel_crtc_state *crtc_state;
+	struct drm_crtc_state *crtc_state;
 	int ret;
 
 	state = drm_atomic_state_alloc(dev);
 	if (!state) {
-		DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
+		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
 			      crtc->base.id);
 		return;
 	}
 
-	state->acquire_ctx = dev->mode_config.acquire_ctx;
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 
-	/* The force restore path in the HW readout code relies on the staged
-	 * config still keeping the user requested config while the actual
-	 * state has been overwritten by the configuration read from HW. We
-	 * need to copy the staged config to the atomic state, otherwise the
-	 * mode set will just reapply the state the HW is already in. */
-	for_each_intel_encoder(dev, encoder) {
-		if (&encoder->new_crtc->base != crtc)
-			continue;
+retry:
+	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	ret = PTR_ERR_OR_ZERO(crtc_state);
+	if (!ret) {
+		if (!crtc_state->active)
+			goto out;
 
-		for_each_intel_connector(dev, connector) {
-			if (connector->new_encoder != encoder)
-				continue;
-
-			connector_state = drm_atomic_get_connector_state(state, &connector->base);
-			if (IS_ERR(connector_state)) {
-				DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
-					      connector->base.base.id,
-					      connector->base.name,
-					      PTR_ERR(connector_state));
-				continue;
-			}
-
-			connector_state->crtc = crtc;
-			connector_state->best_encoder = &encoder->base;
-		}
+		crtc_state->mode_changed = true;
+		ret = drm_atomic_commit(state);
 	}
 
-	crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-	if (IS_ERR(crtc_state)) {
-		DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
-			      crtc->base.id, PTR_ERR(crtc_state));
-		drm_atomic_state_free(state);
-		return;
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(state->acquire_ctx);
+		goto retry;
 	}
 
-	crtc_state->base.active = crtc_state->base.enable =
-		to_intel_crtc(crtc)->new_enabled;
-
-	drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
-
-	intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
-					crtc->primary->fb, crtc->x, crtc->y);
-
-	ret = intel_set_mode(crtc, state, false);
 	if (ret)
+out:
 		drm_atomic_state_free(state);
 }
 
 #undef for_each_intel_crtc_masked
 
-static bool intel_connector_in_mode_set(struct intel_connector *connector,
-					struct drm_mode_set *set)
-{
-	int ro;
-
-	for (ro = 0; ro < set->num_connectors; ro++)
-		if (set->connectors[ro] == &connector->base)
-			return true;
-
-	return false;
-}
-
-static int
-intel_modeset_stage_output_state(struct drm_device *dev,
-				 struct drm_mode_set *set,
-				 struct drm_atomic_state *state)
-{
-	struct intel_connector *connector;
-	struct drm_connector *drm_connector;
-	struct drm_connector_state *connector_state;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	int i, ret;
-
-	/* The upper layers ensure that we either disable a crtc or have a list
-	 * of connectors. For paranoia, double-check this. */
-	WARN_ON(!set->fb && (set->num_connectors != 0));
-	WARN_ON(set->fb && (set->num_connectors == 0));
-
-	for_each_intel_connector(dev, connector) {
-		bool in_mode_set = intel_connector_in_mode_set(connector, set);
-
-		if (!in_mode_set && connector->base.state->crtc != set->crtc)
-			continue;
-
-		connector_state =
-			drm_atomic_get_connector_state(state, &connector->base);
-		if (IS_ERR(connector_state))
-			return PTR_ERR(connector_state);
-
-		if (in_mode_set) {
-			int pipe = to_intel_crtc(set->crtc)->pipe;
-			connector_state->best_encoder =
-				&intel_find_encoder(connector, pipe)->base;
-		}
-
-		if (connector->base.state->crtc != set->crtc)
-			continue;
-
-		/* If we disable the crtc, disable all its connectors. Also, if
-		 * the connector is on the changing crtc but not on the new
-		 * connector list, disable it. */
-		if (!set->fb || !in_mode_set) {
-			connector_state->best_encoder = NULL;
-
-			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
-				connector->base.base.id,
-				connector->base.name);
-		}
-	}
-	/* connector->new_encoder is now updated for all connectors. */
-
-	for_each_connector_in_state(state, drm_connector, connector_state, i) {
-		connector = to_intel_connector(drm_connector);
-
-		if (!connector_state->best_encoder) {
-			ret = drm_atomic_set_crtc_for_connector(connector_state,
-								NULL);
-			if (ret)
-				return ret;
-
-			continue;
-		}
-
-		if (intel_connector_in_mode_set(connector, set)) {
-			struct drm_crtc *crtc = connector->base.state->crtc;
-
-			/* If this connector was in a previous crtc, add it
-			 * to the state. We might need to disable it. */
-			if (crtc) {
-				crtc_state =
-					drm_atomic_get_crtc_state(state, crtc);
-				if (IS_ERR(crtc_state))
-					return PTR_ERR(crtc_state);
-			}
-
-			ret = drm_atomic_set_crtc_for_connector(connector_state,
-								set->crtc);
-			if (ret)
-				return ret;
-		}
-
-		/* Make sure the new CRTC will work with the encoder */
-		if (!drm_encoder_crtc_ok(connector_state->best_encoder,
-					 connector_state->crtc)) {
-			return -EINVAL;
-		}
-
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
-			connector->base.base.id,
-			connector->base.name,
-			connector_state->crtc->base.id);
-
-		if (connector_state->best_encoder != &connector->encoder->base)
-			connector->encoder =
-				to_intel_encoder(connector_state->best_encoder);
-	}
-
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		bool has_connectors;
-
-		ret = drm_atomic_add_affected_connectors(state, crtc);
-		if (ret)
-			return ret;
-
-		has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc);
-		if (has_connectors != crtc_state->enable)
-			crtc_state->enable =
-			crtc_state->active = has_connectors;
-	}
-
-	ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
-					      set->fb, set->x, set->y);
-	if (ret)
-		return ret;
-
-	crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
-	if (IS_ERR(crtc_state))
-		return PTR_ERR(crtc_state);
-
-	if (set->mode)
-		drm_mode_copy(&crtc_state->mode, set->mode);
-
-	if (set->num_connectors)
-		crtc_state->active = true;
-
-	return 0;
-}
-
-static int intel_crtc_set_config(struct drm_mode_set *set)
-{
-	struct drm_device *dev;
-	struct drm_atomic_state *state = NULL;
-	struct intel_crtc_state *pipe_config;
-	int ret;
-
-	BUG_ON(!set);
-	BUG_ON(!set->crtc);
-	BUG_ON(!set->crtc->helper_private);
-
-	/* Enforce sane interface api - has been abused by the fb helper. */
-	BUG_ON(!set->mode && set->fb);
-	BUG_ON(set->fb && set->num_connectors == 0);
-
-	if (set->fb) {
-		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
-				set->crtc->base.id, set->fb->base.id,
-				(int)set->num_connectors, set->x, set->y);
-	} else {
-		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
-	}
-
-	dev = set->crtc->dev;
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return -ENOMEM;
-
-	state->acquire_ctx = dev->mode_config.acquire_ctx;
-
-	ret = intel_modeset_stage_output_state(dev, set, state);
-	if (ret)
-		goto out;
-
-	pipe_config = intel_modeset_compute_config(set->crtc, state);
-	if (IS_ERR(pipe_config)) {
-		ret = PTR_ERR(pipe_config);
-		goto out;
-	}
-
-	intel_update_pipe_size(to_intel_crtc(set->crtc));
-
-	ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
-
-	if (ret) {
-		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
-			      set->crtc->base.id, ret);
-	}
-
-out:
-	if (ret)
-		drm_atomic_state_free(state);
-	return ret;
-}
-
 static const struct drm_crtc_funcs intel_crtc_funcs = {
 	.gamma_set = intel_crtc_gamma_set,
-	.set_config = intel_crtc_set_config,
+	.set_config = drm_atomic_helper_set_config,
 	.destroy = intel_crtc_destroy,
 	.page_flip = intel_crtc_page_flip,
 	.atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13048,6 +13287,8 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	intel_update_cdclk(dev);
+
 	if (HAS_DDI(dev))
 		intel_ddi_pll_init(dev);
 	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -13059,28 +13300,6 @@
 }
 
 /**
- * intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-bool intel_wm_need_update(struct drm_plane *plane,
-			  struct drm_plane_state *state)
-{
-	/* Update watermarks on tiling changes. */
-	if (!plane->state->fb || !state->fb ||
-	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
-	    plane->state->rotation != state->rotation)
-		return true;
-
-	return false;
-}
-
-/**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
  * @plane: drm plane to prepare for
  * @fb: framebuffer to prepare for presentation
@@ -13099,27 +13318,13 @@
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	enum pipe pipe = intel_plane->pipe;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
-	unsigned frontbuffer_bits = 0;
 	int ret = 0;
 
 	if (!obj)
 		return 0;
 
-	switch (plane->type) {
-	case DRM_PLANE_TYPE_PRIMARY:
-		frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
-		break;
-	case DRM_PLANE_TYPE_CURSOR:
-		frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
-		break;
-	case DRM_PLANE_TYPE_OVERLAY:
-		frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
-		break;
-	}
-
 	mutex_lock(&dev->struct_mutex);
 
 	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13129,11 +13334,11 @@
 		if (ret)
 			DRM_DEBUG_KMS("failed to attach phys object\n");
 	} else {
-		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
 	}
 
 	if (ret == 0)
-		i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
+		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -13180,7 +13385,7 @@
 	dev = intel_crtc->base.dev;
 	dev_priv = dev->dev_private;
 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
-	cdclk = dev_priv->display.get_display_clock_speed(dev);
+	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
 	if (!crtc_clock || !cdclk)
 		return DRM_PLANE_HELPER_NO_SCALING;
@@ -13198,112 +13403,28 @@
 
 static int
 intel_check_primary_plane(struct drm_plane *plane,
+			  struct intel_crtc_state *crtc_state,
 			  struct intel_plane_state *state)
 {
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = state->base.crtc;
-	struct intel_crtc *intel_crtc;
-	struct intel_crtc_state *crtc_state;
 	struct drm_framebuffer *fb = state->base.fb;
-	struct drm_rect *dest = &state->dst;
-	struct drm_rect *src = &state->src;
-	const struct drm_rect *clip = &state->clip;
-	bool can_position = false;
-	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
-	int ret;
+	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+	bool can_position = false;
 
-	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
-	crtc_state = state->base.state ?
-		intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		/* use scaler when colorkey is not required */
-		if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) {
-			min_scale = 1;
-			max_scale = skl_max_scale(intel_crtc, crtc_state);
-		}
+	/* use scaler when colorkey is not required */
+	if (INTEL_INFO(plane->dev)->gen >= 9 &&
+	    state->ckey.flags == I915_SET_COLORKEY_NONE) {
+		min_scale = 1;
+		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
 		can_position = true;
 	}
 
-	ret = drm_plane_helper_check_update(plane, crtc, fb,
-					    src, dest, clip,
-					    min_scale,
-					    max_scale,
-					    can_position, true,
-					    &state->visible);
-	if (ret)
-		return ret;
-
-	if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
-		struct intel_plane_state *old_state =
-			to_intel_plane_state(plane->state);
-
-		intel_crtc->atomic.wait_for_flips = true;
-
-		/*
-		 * FBC does not work on some platforms for rotated
-		 * planes, so disable it when rotation is not 0 and
-		 * update it when rotation is set back to 0.
-		 *
-		 * FIXME: This is redundant with the fbc update done in
-		 * the primary plane enable function except that that
-		 * one is done too late. We eventually need to unify
-		 * this.
-		 */
-		if (state->visible &&
-		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-		    dev_priv->fbc.crtc == intel_crtc &&
-		    state->base.rotation != BIT(DRM_ROTATE_0)) {
-			intel_crtc->atomic.disable_fbc = true;
-		}
-
-		if (state->visible && !old_state->visible) {
-			/*
-			 * BDW signals flip done immediately if the plane
-			 * is disabled, even if the plane enable is already
-			 * armed to occur at the next vblank :(
-			 */
-			if (IS_BROADWELL(dev))
-				intel_crtc->atomic.wait_vblank = true;
-
-			if (crtc_state)
-				intel_crtc->atomic.post_enable_primary = true;
-		}
-
-		/*
-		 * FIXME: Actually if we will still have any other plane enabled
-		 * on the pipe we could let IPS enabled still, but for
-		 * now lets consider that when we make primary invisible
-		 * by setting DSPCNTR to 0 on update_primary_plane function
-		 * IPS needs to be disable.
-		 */
-		if (!state->visible || !fb)
-			intel_crtc->atomic.disable_ips = true;
-
-		if (!state->visible && old_state->visible &&
-		    crtc_state && !needs_modeset(&crtc_state->base))
-			intel_crtc->atomic.pre_disable_primary = true;
-
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
-
-		intel_crtc->atomic.update_fbc = true;
-
-		if (intel_wm_need_update(plane, &state->base))
-			intel_crtc->atomic.update_wm = true;
-	}
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		ret = skl_update_scaler_users(intel_crtc, crtc_state,
-			to_intel_plane(plane), state, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
+	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					     &state->dst, &state->clip,
+					     min_scale, max_scale,
+					     can_position, true,
+					     &state->visible);
 }
 
 static void
@@ -13324,20 +13445,19 @@
 	crtc->x = src->x1 >> 16;
 	crtc->y = src->y1 >> 16;
 
-	if (intel_crtc->active) {
-		if (state->visible)
-			/* FIXME: kill this fastboot hack */
-			intel_update_pipe_size(intel_crtc);
+	if (!crtc->state->active)
+		return;
 
-		dev_priv->display.update_primary_plane(crtc, plane->fb,
-						       crtc->x, crtc->y);
-	}
+	if (state->visible)
+		/* FIXME: kill this fastboot hack */
+		intel_update_pipe_size(intel_crtc);
+
+	dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
 }
 
 static void
 intel_disable_primary_plane(struct drm_plane *plane,
-			    struct drm_crtc *crtc,
-			    bool force)
+			    struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13345,96 +13465,30 @@
 	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
 }
 
-static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+static void intel_begin_crtc_commit(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
-	struct drm_plane *p;
-	unsigned fb_bits = 0;
 
-	/* Track fb's for any planes being disabled */
-	list_for_each_entry(p, &dev->mode_config.plane_list, head) {
-		intel_plane = to_intel_plane(p);
-
-		if (intel_crtc->atomic.disabled_planes &
-		    (1 << drm_plane_index(p))) {
-			switch (p->type) {
-			case DRM_PLANE_TYPE_PRIMARY:
-				fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
-				break;
-			case DRM_PLANE_TYPE_CURSOR:
-				fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
-				break;
-			case DRM_PLANE_TYPE_OVERLAY:
-				fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
-				break;
-			}
-
-			mutex_lock(&dev->struct_mutex);
-			i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
-			mutex_unlock(&dev->struct_mutex);
-		}
-	}
-
-	if (intel_crtc->atomic.wait_for_flips)
-		intel_crtc_wait_for_pending_flips(crtc);
-
-	if (intel_crtc->atomic.disable_fbc)
-		intel_fbc_disable(dev);
-
-	if (intel_crtc->atomic.disable_ips)
-		hsw_disable_ips(intel_crtc);
-
-	if (intel_crtc->atomic.pre_disable_primary)
-		intel_pre_disable_primary(crtc);
-
-	if (intel_crtc->atomic.update_wm)
+	if (intel_crtc->atomic.update_wm_pre)
 		intel_update_watermarks(crtc);
 
-	intel_runtime_pm_get(dev_priv);
-
 	/* Perform vblank evasion around commit operation */
-	if (intel_crtc->active)
-		intel_crtc->atomic.evade =
-			intel_pipe_update_start(intel_crtc,
-						&intel_crtc->atomic.start_vbl_count);
+	if (crtc->state->active)
+		intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+
+	if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
+		skl_detach_scalers(intel_crtc);
 }
 
-static void intel_finish_crtc_commit(struct drm_crtc *crtc)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_plane *p;
 
-	if (intel_crtc->atomic.evade)
-		intel_pipe_update_end(intel_crtc,
-				      intel_crtc->atomic.start_vbl_count);
-
-	intel_runtime_pm_put(dev_priv);
-
-	if (intel_crtc->atomic.wait_vblank)
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
-
-	intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
-
-	if (intel_crtc->atomic.update_fbc) {
-		mutex_lock(&dev->struct_mutex);
-		intel_fbc_update(dev);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	if (intel_crtc->atomic.post_enable_primary)
-		intel_post_enable_primary(crtc);
-
-	drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
-		if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
-			intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
-						       false, false);
-
-	memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
+	if (crtc->state->active)
+		intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
 }
 
 /**
@@ -13469,7 +13523,7 @@
 	struct intel_plane *primary;
 	struct intel_plane_state *state;
 	const uint32_t *intel_primary_formats;
-	int num_formats;
+	unsigned int num_formats;
 
 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
 	if (primary == NULL)
@@ -13490,10 +13544,10 @@
 	}
 	primary->pipe = pipe;
 	primary->plane = pipe;
+	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
 	primary->check_plane = intel_check_primary_plane;
 	primary->commit_plane = intel_commit_primary_plane;
 	primary->disable_plane = intel_disable_primary_plane;
-	primary->ckey.flags = I915_SET_COLORKEY_NONE;
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
 		primary->plane = !pipe;
 
@@ -13541,37 +13595,29 @@
 
 static int
 intel_check_cursor_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
-	struct drm_crtc *crtc = state->base.crtc;
-	struct drm_device *dev = plane->dev;
+	struct drm_crtc *crtc = crtc_state->base.crtc;
 	struct drm_framebuffer *fb = state->base.fb;
-	struct drm_rect *dest = &state->dst;
-	struct drm_rect *src = &state->src;
-	const struct drm_rect *clip = &state->clip;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc;
 	unsigned stride;
 	int ret;
 
-	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
-
-	ret = drm_plane_helper_check_update(plane, crtc, fb,
-					    src, dest, clip,
+	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					    &state->dst, &state->clip,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    true, true, &state->visible);
 	if (ret)
 		return ret;
 
-
 	/* if we want to turn off the cursor ignore width and height */
 	if (!obj)
-		goto finish;
+		return 0;
 
 	/* Check for which cursor types we support */
-	if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
 			  state->base.crtc_w, state->base.crtc_h);
 		return -EINVAL;
@@ -13585,34 +13631,16 @@
 
 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
-		ret = -EINVAL;
+		return -EINVAL;
 	}
 
-finish:
-	if (intel_crtc->active) {
-		if (plane->state->crtc_w != state->base.crtc_w)
-			intel_crtc->atomic.update_wm = true;
-
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
-	}
-
-	return ret;
+	return 0;
 }
 
 static void
 intel_disable_cursor_plane(struct drm_plane *plane,
-			   struct drm_crtc *crtc,
-			   bool force)
+			   struct drm_crtc *crtc)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	if (!force) {
-		plane->fb = NULL;
-		intel_crtc->cursor_bo = NULL;
-		intel_crtc->cursor_addr = 0;
-	}
-
 	intel_crtc_update_cursor(crtc, false);
 }
 
@@ -13645,9 +13673,9 @@
 
 	intel_crtc->cursor_addr = addr;
 	intel_crtc->cursor_bo = obj;
-update:
 
-	if (intel_crtc->active)
+update:
+	if (crtc->state->active)
 		intel_crtc_update_cursor(crtc, state->visible);
 }
 
@@ -13672,6 +13700,7 @@
 	cursor->max_downscale = 1;
 	cursor->pipe = pipe;
 	cursor->plane = pipe;
+	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
 	cursor->check_plane = intel_check_cursor_plane;
 	cursor->commit_plane = intel_commit_cursor_plane;
 	cursor->disable_plane = intel_disable_cursor_plane;
@@ -13712,8 +13741,6 @@
 	for (i = 0; i < intel_crtc->num_scalers; i++) {
 		intel_scaler = &scaler_state->scalers[i];
 		intel_scaler->in_use = 0;
-		intel_scaler->id = i;
-
 		intel_scaler->mode = PS_SCALER_MODE_DYN;
 	}
 
@@ -13785,6 +13812,8 @@
 	intel_crtc->cursor_cntl = ~0;
 	intel_crtc->cursor_size = ~0;
 
+	intel_crtc->wm.cxsr_allowed = true;
+
 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
@@ -13919,8 +13948,7 @@
 		 */
 		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
 		/* WaIgnoreDDIAStrap: skl */
-		if (found ||
-		    (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
+		if (found || IS_SKYLAKE(dev))
 			intel_ddi_init(dev, PORT_A);
 
 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -13933,6 +13961,15 @@
 			intel_ddi_init(dev, PORT_C);
 		if (found & SFUSE_STRAP_DDID_DETECTED)
 			intel_ddi_init(dev, PORT_D);
+		/*
+		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+		 */
+		if (IS_SKYLAKE(dev) &&
+		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+			intel_ddi_init(dev, PORT_E);
+
 	} else if (HAS_PCH_SPLIT(dev)) {
 		int found;
 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
@@ -13996,18 +14033,18 @@
 		}
 
 		intel_dsi_init(dev);
-	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
 		bool found = false;
 
 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
 			DRM_DEBUG_KMS("probing SDVOB\n");
 			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
-			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+			if (!found && IS_G4X(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
 			}
 
-			if (!found && SUPPORTS_INTEGRATED_DP(dev))
+			if (!found && IS_G4X(dev))
 				intel_dp_init(dev, DP_B, PORT_B);
 		}
 
@@ -14020,15 +14057,15 @@
 
 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
 
-			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+			if (IS_G4X(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
 			}
-			if (SUPPORTS_INTEGRATED_DP(dev))
+			if (IS_G4X(dev))
 				intel_dp_init(dev, DP_C, PORT_C);
 		}
 
-		if (SUPPORTS_INTEGRATED_DP(dev) &&
+		if (IS_G4X(dev) &&
 		    (I915_READ(DP_D) & DP_DETECTED))
 			intel_dp_init(dev, DP_D, PORT_D);
 	} else if (IS_GEN2(dev))
@@ -14073,9 +14110,27 @@
 	return drm_gem_handle_create(file, &obj->base, handle);
 }
 
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+					struct drm_file *file,
+					unsigned flags, unsigned color,
+					struct drm_clip_rect *clips,
+					unsigned num_clips)
+{
+	struct drm_device *dev = fb->dev;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+
+	mutex_lock(&dev->struct_mutex);
+	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
 	.destroy = intel_user_framebuffer_destroy,
 	.create_handle = intel_user_framebuffer_create_handle,
+	.dirty = intel_user_framebuffer_dirty,
 };
 
 static
@@ -14270,7 +14325,7 @@
 	return intel_framebuffer_create(dev, mode_cmd, obj);
 }
 
-#ifndef CONFIG_DRM_I915_FBDEV
+#ifndef CONFIG_DRM_FBDEV_EMULATION
 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
 }
@@ -14281,6 +14336,8 @@
 	.output_poll_changed = intel_fbdev_output_poll_changed,
 	.atomic_check = intel_atomic_check,
 	.atomic_commit = intel_atomic_commit,
+	.atomic_state_alloc = intel_atomic_state_alloc,
+	.atomic_state_clear = intel_atomic_state_clear,
 };
 
 /* Set up chip specific display functions */
@@ -14307,7 +14364,6 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			skylake_update_primary_plane;
 	} else if (HAS_DDI(dev)) {
@@ -14318,7 +14374,6 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			ironlake_update_primary_plane;
 	} else if (HAS_PCH_SPLIT(dev)) {
@@ -14329,7 +14384,6 @@
 			ironlake_crtc_compute_clock;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			ironlake_update_primary_plane;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -14339,7 +14393,6 @@
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_primary_plane =
 			i9xx_update_primary_plane;
 	} else {
@@ -14349,7 +14402,6 @@
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_primary_plane =
 			i9xx_update_primary_plane;
 	}
@@ -14358,6 +14410,9 @@
 	if (IS_SKYLAKE(dev))
 		dev_priv->display.get_display_clock_speed =
 			skylake_get_display_clock_speed;
+	else if (IS_BROXTON(dev))
+		dev_priv->display.get_display_clock_speed =
+			broxton_get_display_clock_speed;
 	else if (IS_BROADWELL(dev))
 		dev_priv->display.get_display_clock_speed =
 			broadwell_get_display_clock_speed;
@@ -14371,9 +14426,21 @@
 		dev_priv->display.get_display_clock_speed =
 			ilk_get_display_clock_speed;
 	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
-		 IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
 		dev_priv->display.get_display_clock_speed =
 			i945_get_display_clock_speed;
+	else if (IS_GM45(dev))
+		dev_priv->display.get_display_clock_speed =
+			gm45_get_display_clock_speed;
+	else if (IS_CRESTLINE(dev))
+		dev_priv->display.get_display_clock_speed =
+			i965gm_get_display_clock_speed;
+	else if (IS_PINEVIEW(dev))
+		dev_priv->display.get_display_clock_speed =
+			pnv_get_display_clock_speed;
+	else if (IS_G33(dev) || IS_G4X(dev))
+		dev_priv->display.get_display_clock_speed =
+			g33_get_display_clock_speed;
 	else if (IS_I915G(dev))
 		dev_priv->display.get_display_clock_speed =
 			i915_get_display_clock_speed;
@@ -14391,10 +14458,12 @@
 			i865_get_display_clock_speed;
 	else if (IS_I85X(dev))
 		dev_priv->display.get_display_clock_speed =
-			i855_get_display_clock_speed;
-	else /* 852, 830 */
+			i85x_get_display_clock_speed;
+	else { /* 830 */
+		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
 		dev_priv->display.get_display_clock_speed =
 			i830_get_display_clock_speed;
+	}
 
 	if (IS_GEN5(dev)) {
 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
@@ -14405,12 +14474,22 @@
 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+		if (IS_BROADWELL(dev)) {
+			dev_priv->display.modeset_commit_cdclk =
+				broadwell_modeset_commit_cdclk;
+			dev_priv->display.modeset_calc_cdclk =
+				broadwell_modeset_calc_cdclk;
+		}
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.modeset_global_resources =
-			valleyview_modeset_global_resources;
+		dev_priv->display.modeset_commit_cdclk =
+			valleyview_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			valleyview_modeset_calc_cdclk;
 	} else if (IS_BROXTON(dev)) {
-		dev_priv->display.modeset_global_resources =
-			broxton_modeset_global_resources;
+		dev_priv->display.modeset_commit_cdclk =
+			broxton_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			broxton_modeset_calc_cdclk;
 	}
 
 	switch (INTEL_INFO(dev)->gen) {
@@ -14629,13 +14708,9 @@
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
+	intel_update_cdclk(dev);
 	intel_prepare_ddi(dev);
-
-	if (IS_VALLEYVIEW(dev))
-		vlv_update_cdclk(dev);
-
 	intel_init_clock_gating(dev);
-
 	intel_enable_gt_powersave(dev);
 }
 
@@ -14665,6 +14740,24 @@
 	if (INTEL_INFO(dev)->num_pipes == 0)
 		return;
 
+	/*
+	 * There may be no VBT; and if the BIOS enabled SSC we can
+	 * just keep using it to avoid unnecessary flicker.  Whereas if the
+	 * BIOS isn't using it, don't assume it will work even if the VBT
+	 * indicates as much.
+	 */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+					    DREF_SSC1_ENABLE);
+
+		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+				     bios_lvds_use_ssc ? "en" : "dis",
+				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+		}
+	}
+
 	intel_init_display(dev);
 	intel_init_audio(dev);
 
@@ -14715,13 +14808,15 @@
 	intel_setup_outputs(dev);
 
 	/* Just in case the BIOS is doing something questionable. */
-	intel_fbc_disable(dev);
+	intel_fbc_disable(dev_priv);
 
 	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev, false);
+	intel_modeset_setup_hw_state(dev);
 	drm_modeset_unlock_all(dev);
 
 	for_each_intel_crtc(dev, crtc) {
+		struct intel_initial_plane_config plane_config = {};
+
 		if (!crtc->active)
 			continue;
 
@@ -14732,15 +14827,14 @@
 		 * can even allow for smooth boot transitions if the BIOS
 		 * fb is large enough for the active pipe configuration.
 		 */
-		if (dev_priv->display.get_initial_plane_config) {
-			dev_priv->display.get_initial_plane_config(crtc,
-							   &crtc->plane_config);
-			/*
-			 * If the fb is shared between multiple heads, we'll
-			 * just get the first one.
-			 */
-			intel_find_initial_plane_obj(crtc, &crtc->plane_config);
-		}
+		dev_priv->display.get_initial_plane_config(crtc,
+							   &plane_config);
+
+		/*
+		 * If the fb is shared between multiple heads, we'll
+		 * just get the first one.
+		 */
+		intel_find_initial_plane_obj(crtc, &plane_config);
 	}
 }
 
@@ -14792,7 +14886,9 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
 	u32 reg;
+	bool enable;
 
 	/* Clear any frame start delays used for debugging left by the BIOS */
 	reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14801,6 +14897,7 @@
 	/* restore vblank interrupts to correct state */
 	drm_crtc_vblank_reset(&crtc->base);
 	if (crtc->active) {
+		drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
 		update_scanline_offset(crtc);
 		drm_crtc_vblank_on(&crtc->base);
 	}
@@ -14809,7 +14906,6 @@
 	 * disable the crtc (and hence change the state) if it is wrong. Note
 	 * that gen4+ has a fixed plane -> pipe mapping.  */
 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
-		struct intel_connector *connector;
 		bool plane;
 
 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
@@ -14821,30 +14917,8 @@
 		plane = crtc->plane;
 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
 		crtc->plane = !plane;
-		intel_crtc_disable_planes(&crtc->base);
-		dev_priv->display.crtc_disable(&crtc->base);
+		intel_crtc_disable_noatomic(&crtc->base);
 		crtc->plane = plane;
-
-		/* ... and break all links. */
-		for_each_intel_connector(dev, connector) {
-			if (connector->encoder->base.crtc != &crtc->base)
-				continue;
-
-			connector->base.dpms = DRM_MODE_DPMS_OFF;
-			connector->base.encoder = NULL;
-		}
-		/* multiple connectors may have the same encoder:
-		 *  handle them and break crtc link separately */
-		for_each_intel_connector(dev, connector)
-			if (connector->encoder->base.crtc == &crtc->base) {
-				connector->encoder->base.crtc = NULL;
-				connector->encoder->connectors_active = false;
-			}
-
-		WARN_ON(crtc->active);
-		crtc->base.state->enable = false;
-		crtc->base.state->active = false;
-		crtc->base.enabled = false;
 	}
 
 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
@@ -14858,20 +14932,27 @@
 
 	/* Adjust the state of the output pipe according to whether we
 	 * have active connectors/encoders. */
-	intel_crtc_update_dpms(&crtc->base);
+	enable = false;
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+		enable = true;
+		break;
+	}
 
-	if (crtc->active != crtc->base.state->enable) {
-		struct intel_encoder *encoder;
+	if (!enable)
+		intel_crtc_disable_noatomic(&crtc->base);
+
+	if (crtc->active != crtc->base.state->active) {
 
 		/* This can happen either due to bugs in the get_hw_state
-		 * functions or because the pipe is force-enabled due to the
+		 * functions or because of calls to intel_crtc_disable_noatomic,
+		 * or because the pipe is force-enabled due to the
 		 * pipe A quirk. */
 		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
 			      crtc->base.base.id,
 			      crtc->base.state->enable ? "enabled" : "disabled",
 			      crtc->active ? "enabled" : "disabled");
 
-		crtc->base.state->enable = crtc->active;
+		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
 		crtc->base.state->active = crtc->active;
 		crtc->base.enabled = crtc->active;
 
@@ -14882,10 +14963,8 @@
 		 *  actually up, hence no need to break them. */
 		WARN_ON(crtc->active);
 
-		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-			WARN_ON(encoder->connectors_active);
+		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
 			encoder->base.crtc = NULL;
-		}
 	}
 
 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
@@ -14911,6 +14990,7 @@
 {
 	struct intel_connector *connector;
 	struct drm_device *dev = encoder->base.dev;
+	bool active = false;
 
 	/* We need to check both for a crtc link (meaning that the
 	 * encoder is active and trying to read from a pipe) and the
@@ -14918,7 +14998,15 @@
 	bool has_active_crtc = encoder->base.crtc &&
 		to_intel_crtc(encoder->base.crtc)->active;
 
-	if (encoder->connectors_active && !has_active_crtc) {
+	for_each_intel_connector(dev, connector) {
+		if (connector->base.encoder != &encoder->base)
+			continue;
+
+		active = true;
+		break;
+	}
+
+	if (active && !has_active_crtc) {
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
@@ -14935,7 +15023,6 @@
 				encoder->post_disable(encoder);
 		}
 		encoder->base.crtc = NULL;
-		encoder->connectors_active = false;
 
 		/* Inconsistent output/port/pipe state happens presumably due to
 		 * a bug in one of the get_hw_state functions. Or someplace else
@@ -14984,10 +15071,31 @@
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-	if (!crtc->active)
-		return false;
+	return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+}
 
-	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+static void readout_plane_state(struct intel_crtc *crtc,
+				struct intel_crtc_state *crtc_state)
+{
+	struct intel_plane *p;
+	struct intel_plane_state *plane_state;
+	bool active = crtc_state->base.active;
+
+	for_each_intel_plane(crtc->base.dev, p) {
+		if (crtc->pipe != p->pipe)
+			continue;
+
+		plane_state = to_intel_plane_state(p->base.state);
+
+		if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+			plane_state->visible = primary_get_hw_state(crtc);
+		else {
+			if (active)
+				p->disable_plane(&p->base, &crtc->base);
+
+			plane_state->visible = false;
+		}
+	}
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15000,23 +15108,44 @@
 	int i;
 
 	for_each_intel_crtc(dev, crtc) {
-		struct drm_plane *primary = crtc->base.primary;
-		struct intel_plane_state *plane_state;
-
+		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
 		memset(crtc->config, 0, sizeof(*crtc->config));
 		crtc->config->base.crtc = &crtc->base;
 
-		crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
-
 		crtc->active = dev_priv->display.get_pipe_config(crtc,
 								 crtc->config);
 
-		crtc->base.state->enable = crtc->active;
 		crtc->base.state->active = crtc->active;
 		crtc->base.enabled = crtc->active;
 
-		plane_state = to_intel_plane_state(primary->state);
-		plane_state->visible = primary_get_hw_state(crtc);
+		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+		if (crtc->base.state->active) {
+			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+			/*
+			 * The initial mode needs to be set in order to keep
+			 * the atomic core happy. It wants a valid mode if the
+			 * crtc's enabled, so we do the above call.
+			 *
+			 * At this point some state updated by the connectors
+			 * in their ->detect() callback has not run yet, so
+			 * no recalculation can be done yet.
+			 *
+			 * Even if we could do a recalculation and modeset
+			 * right now it would cause a double modeset if
+			 * fbdev or userspace chooses a different initial mode.
+			 *
+			 * If that happens, someone indicated they wanted a
+			 * mode change, which means it's safe to do a full
+			 * recalculation.
+			 */
+			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+		}
+
+		crtc->base.hwmode = crtc->config->base.adjusted_mode;
+		readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
 
 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
 			      crtc->base.base.id,
@@ -15055,7 +15184,6 @@
 			encoder->base.crtc = NULL;
 		}
 
-		encoder->connectors_active = false;
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
 			      encoder->base.base.id,
 			      encoder->base.name,
@@ -15066,7 +15194,6 @@
 	for_each_intel_connector(dev, connector) {
 		if (connector->get_hw_state(connector)) {
 			connector->base.dpms = DRM_MODE_DPMS_ON;
-			connector->encoder->connectors_active = true;
 			connector->base.encoder = &connector->encoder->base;
 		} else {
 			connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -15079,10 +15206,11 @@
 	}
 }
 
-/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
- * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev,
-				  bool force_restore)
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe;
@@ -15092,21 +15220,6 @@
 
 	intel_modeset_readout_hw_state(dev);
 
-	/*
-	 * Now that we have the config, copy it to each CRTC struct
-	 * Note that this could go away if we move to using crtc_config
-	 * checking everywhere.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (crtc->active && i915.fastboot) {
-			intel_mode_from_pipe_config(&crtc->base.mode,
-						    crtc->config);
-			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
-				      crtc->base.base.id);
-			drm_mode_debug_printmodeline(&crtc->base.mode);
-		}
-	}
-
 	/* HW state is read out, now we need to sanitize this mess. */
 	for_each_intel_encoder(dev, encoder) {
 		intel_sanitize_encoder(encoder);
@@ -15133,34 +15246,77 @@
 		pll->on = false;
 	}
 
-	if (IS_GEN9(dev))
+	if (IS_VALLEYVIEW(dev))
+		vlv_wm_get_hw_state(dev);
+	else if (IS_GEN9(dev))
 		skl_wm_get_hw_state(dev);
 	else if (HAS_PCH_SPLIT(dev))
 		ilk_wm_get_hw_state(dev);
 
-	if (force_restore) {
-		i915_redisable_vga(dev);
+	for_each_intel_crtc(dev, crtc) {
+		unsigned long put_domains;
 
-		/*
-		 * We need to use raw interfaces for restoring state to avoid
-		 * checking (bogus) intermediate states.
-		 */
-		for_each_pipe(dev_priv, pipe) {
-			struct drm_crtc *crtc =
-				dev_priv->pipe_to_crtc_mapping[pipe];
+		put_domains = modeset_get_crtc_power_domains(&crtc->base);
+		if (WARN_ON(put_domains))
+			modeset_put_power_domains(dev_priv, put_domains);
+	}
+	intel_display_set_init_power(dev_priv, false);
+}
 
-			intel_crtc_restore_mode(crtc);
-		}
-	} else {
-		intel_modeset_update_staged_output_state(dev);
+void intel_display_resume(struct drm_device *dev)
+{
+	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
+	struct intel_connector *conn;
+	struct intel_plane *plane;
+	struct drm_crtc *crtc;
+	int ret;
+
+	if (!state)
+		return;
+
+	state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+	/* preserve complete old state, including dpll */
+	intel_atomic_get_shared_dpll_state(state);
+
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto err;
+
+		/* force a restore */
+		crtc_state->mode_changed = true;
 	}
 
-	intel_modeset_check_state(dev);
+	for_each_intel_plane(dev, plane) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
+		if (ret)
+			goto err;
+	}
+
+	for_each_intel_connector(dev, conn) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
+		if (ret)
+			goto err;
+	}
+
+	intel_modeset_setup_hw_state(dev);
+
+	i915_redisable_vga(dev);
+	ret = drm_atomic_commit(state);
+	if (!ret)
+		return;
+
+err:
+	DRM_ERROR("Restoring old state failed with %i\n", ret);
+	drm_atomic_state_free(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *c;
 	struct drm_i915_gem_object *obj;
 	int ret;
@@ -15169,16 +15325,6 @@
 	intel_init_gt_powersave(dev);
 	mutex_unlock(&dev->struct_mutex);
 
-	/*
-	 * There may be no VBT; and if the BIOS enabled SSC we can
-	 * just keep using it to avoid unnecessary flicker.  Whereas if the
-	 * BIOS isn't using it, don't assume it will work even if the VBT
-	 * indicates as much.
-	 */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-		dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
-						DREF_SSC1_ENABLE);
-
 	intel_modeset_init_hw(dev);
 
 	intel_setup_overlay(dev);
@@ -15197,14 +15343,16 @@
 		ret = intel_pin_and_fence_fb_obj(c->primary,
 						 c->primary->fb,
 						 c->primary->state,
-						 NULL);
+						 NULL, NULL);
 		mutex_unlock(&dev->struct_mutex);
 		if (ret) {
 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
 				  to_intel_crtc(c)->pipe);
 			drm_framebuffer_unreference(c->primary->fb);
 			c->primary->fb = NULL;
+			c->primary->crtc = c->primary->state->crtc = NULL;
 			update_state_fb(c->primary);
+			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
 		}
 	}
 
@@ -15241,13 +15389,9 @@
 	 */
 	drm_kms_helper_poll_fini(dev);
 
-	mutex_lock(&dev->struct_mutex);
-
 	intel_unregister_dsm_handler();
 
-	intel_fbc_disable(dev);
-
-	mutex_unlock(&dev->struct_mutex);
+	intel_fbc_disable(dev_priv);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1df0e1f..0a2e33f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,28 +48,28 @@
 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
 
 struct dp_link_dpll {
-	int link_bw;
+	int clock;
 	struct dpll dpll;
 };
 
 static const struct dp_link_dpll gen4_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
 };
 
 static const struct dp_link_dpll pch_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
 };
 
 static const struct dp_link_dpll vlv_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
 };
 
@@ -83,14 +83,16 @@
 	 * m2 is stored in fixed point format using formula below
 	 * (m2_int << 22) | m2_fraction
 	 */
-	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
+	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
-	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
+	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
-	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
+	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
 };
 
+static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
+				  324000, 432000, 540000 };
 static const int skl_rates[] = { 162000, 216000, 270000,
 				  324000, 432000, 540000 };
 static const int default_rates[] = { 162000, 270000, 540000 };
@@ -562,7 +564,9 @@
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_BROXTON(dev))
+		return BXT_PP_CONTROL(0);
+	else if (HAS_PCH_SPLIT(dev))
 		return PCH_PP_CONTROL;
 	else
 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
@@ -572,7 +576,9 @@
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_BROXTON(dev))
+		return BXT_PP_STATUS(0);
+	else if (HAS_PCH_SPLIT(dev))
 		return PCH_PP_STATUS;
 	else
 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
@@ -705,7 +711,8 @@
 		return 0;
 
 	if (intel_dig_port->port == PORT_A) {
-		return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
+		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+
 	} else {
 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
 	}
@@ -720,7 +727,7 @@
 	if (intel_dig_port->port == PORT_A) {
 		if (index)
 			return 0;
-		return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
+		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 		/* Workaround for non-ULT HSW */
 		switch (index) {
@@ -839,8 +846,15 @@
 	}
 
 	if (try == 3) {
-		WARN(1, "dp_aux_ch not started status 0x%08x\n",
-		     I915_READ(ch_ctl));
+		static u32 last_status = -1;
+		const u32 status = I915_READ(ch_ctl);
+
+		if (status != last_status) {
+			WARN(1, "dp_aux_ch not started status 0x%08x\n",
+			     status);
+			last_status = status;
+		}
+
 		ret = -EBUSY;
 		goto out;
 	}
@@ -1016,11 +1030,34 @@
 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
+	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
 	const char *name = NULL;
+	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
 	int ret;
 
+	/* On SKL we don't have Aux for port E so we rely on VBT to set
+	 * a proper alternate aux channel.
+	 */
+	if (IS_SKYLAKE(dev) && port == PORT_E) {
+		switch (info->alternate_aux_channel) {
+		case DP_AUX_B:
+			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
+			break;
+		case DP_AUX_C:
+			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
+			break;
+		case DP_AUX_D:
+			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
+			break;
+		case DP_AUX_A:
+		default:
+			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+		}
+	}
+
 	switch (port) {
 	case PORT_A:
 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
@@ -1038,6 +1075,10 @@
 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
 		name = "DPDDC-D";
 		break;
+	case PORT_E:
+		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
+		name = "DPDDC-E";
+		break;
 	default:
 		BUG();
 	}
@@ -1051,7 +1092,7 @@
 	 *
 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
 	 */
-	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
 	intel_dp->aux.name = name;
@@ -1089,7 +1130,7 @@
 }
 
 static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
 {
 	u32 ctrl1;
 
@@ -1101,7 +1142,7 @@
 	pipe_config->dpll_hw_state.cfgcr2 = 0;
 
 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-	switch (link_clock / 2) {
+	switch (pipe_config->port_clock / 2) {
 	case 81000:
 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
 					      SKL_DPLL0);
@@ -1134,20 +1175,20 @@
 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
 }
 
-static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
+void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
 {
 	memset(&pipe_config->dpll_hw_state, 0,
 	       sizeof(pipe_config->dpll_hw_state));
 
-	switch (link_bw) {
-	case DP_LINK_BW_1_62:
+	switch (pipe_config->port_clock / 2) {
+	case 81000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
 		break;
-	case DP_LINK_BW_2_7:
+	case 135000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
 		break;
-	case DP_LINK_BW_5_4:
+	case 270000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
 		break;
 	}
@@ -1182,23 +1223,29 @@
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
-	if (IS_SKYLAKE(dev)) {
+	int size;
+
+	if (IS_BROXTON(dev)) {
+		*source_rates = bxt_rates;
+		size = ARRAY_SIZE(bxt_rates);
+	} else if (IS_SKYLAKE(dev)) {
 		*source_rates = skl_rates;
-		return ARRAY_SIZE(skl_rates);
+		size = ARRAY_SIZE(skl_rates);
+	} else {
+		*source_rates = default_rates;
+		size = ARRAY_SIZE(default_rates);
 	}
 
-	*source_rates = default_rates;
-
 	/* This depends on the fact that 5.4 is last value in the array */
-	if (intel_dp_source_supports_hbr2(dev))
-		return (DP_LINK_BW_5_4 >> 3) + 1;
-	else
-		return (DP_LINK_BW_2_7 >> 3) + 1;
+	if (!intel_dp_source_supports_hbr2(dev))
+		size--;
+
+	return size;
 }
 
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
-		   struct intel_crtc_state *pipe_config, int link_bw)
+		   struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = encoder->base.dev;
 	const struct dp_link_dpll *divisor = NULL;
@@ -1220,7 +1267,7 @@
 
 	if (divisor && count) {
 		for (i = 0; i < count; i++) {
-			if (link_bw == divisor[i].link_bw) {
+			if (pipe_config->port_clock == divisor[i].clock) {
 				pipe_config->dpll = divisor[i].dpll;
 				pipe_config->clock_set = true;
 				break;
@@ -1378,7 +1425,7 @@
 
 		if (INTEL_INFO(dev)->gen >= 9) {
 			int ret;
-			ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+			ret = skl_update_scaler_crtc(pipe_config);
 			if (ret)
 				return ret;
 		}
@@ -1403,7 +1450,10 @@
 	 * bpc in between. */
 	bpp = pipe_config->pipe_bpp;
 	if (is_edp(intel_dp)) {
-		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+
+		/* Get bpp from vbt only for panels that dont have bpp in edid */
+		if (intel_connector->base.display_info.bpc == 0 &&
+			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
 				      dev_priv->vbt.edp_bpp);
 			bpp = dev_priv->vbt.edp_bpp;
@@ -1494,13 +1544,13 @@
 	}
 
 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
-		skl_edp_set_pll_config(pipe_config, common_rates[clock]);
+		skl_edp_set_pll_config(pipe_config);
 	else if (IS_BROXTON(dev))
 		/* handled in ddi */;
 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+		hsw_dp_set_ddi_pll_sel(pipe_config);
 	else
-		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+		intel_dp_set_clock(encoder, pipe_config);
 
 	return true;
 }
@@ -1703,8 +1753,10 @@
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
 	control = I915_READ(_pp_ctrl_reg(intel_dp));
-	control &= ~PANEL_UNLOCK_MASK;
-	control |= PANEL_UNLOCK_REGS;
+	if (!IS_BROXTON(dev)) {
+		control &= ~PANEL_UNLOCK_MASK;
+		control |= PANEL_UNLOCK_REGS;
+	}
 	return control;
 }
 
@@ -2616,7 +2668,7 @@
 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
 			      pipe_name(pipe), port_name(port));
 
-		WARN(encoder->connectors_active,
+		WARN(encoder->base.crtc,
 		     "stealing pipe %c power sequencer from active eDP port %c\n",
 		     pipe_name(pipe), port_name(port));
 
@@ -3418,92 +3470,6 @@
 	}
 }
 
-/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
-static uint32_t
-hsw_signal_levels(uint8_t train_set)
-{
-	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-					 DP_TRAIN_PRE_EMPHASIS_MASK);
-	switch (signal_levels) {
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(0);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(1);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		return DDI_BUF_TRANS_SELECT(2);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-		return DDI_BUF_TRANS_SELECT(3);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(4);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(5);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		return DDI_BUF_TRANS_SELECT(6);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(7);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(8);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(9);
-	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
-			      "0x%x\n", signal_levels);
-		return DDI_BUF_TRANS_SELECT(0);
-	}
-}
-
-static void bxt_signal_levels(struct intel_dp *intel_dp)
-{
-	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	enum port port = dport->port;
-	struct drm_device *dev = dport->base.base.dev;
-	struct intel_encoder *encoder = &dport->base;
-	uint8_t train_set = intel_dp->train_set[0];
-	uint32_t level = 0;
-
-	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-					 DP_TRAIN_PRE_EMPHASIS_MASK);
-	switch (signal_levels) {
-	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 0;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 1;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		level = 2;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-		level = 3;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 4;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 5;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		level = 6;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 7;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 8;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 9;
-		break;
-	}
-
-	bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
-}
-
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -3511,22 +3477,20 @@
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
-	uint32_t signal_levels, mask;
+	uint32_t signal_levels, mask = 0;
 	uint8_t train_set = intel_dp->train_set[0];
 
-	if (IS_BROXTON(dev)) {
-		signal_levels = 0;
-		bxt_signal_levels(intel_dp);
-		mask = 0;
-	} else if (HAS_DDI(dev)) {
-		signal_levels = hsw_signal_levels(train_set);
-		mask = DDI_BUF_EMP_MASK;
+	if (HAS_DDI(dev)) {
+		signal_levels = ddi_signal_levels(intel_dp);
+
+		if (IS_BROXTON(dev))
+			signal_levels = 0;
+		else
+			mask = DDI_BUF_EMP_MASK;
 	} else if (IS_CHERRYVIEW(dev)) {
 		signal_levels = chv_signal_levels(intel_dp);
-		mask = 0;
 	} else if (IS_VALLEYVIEW(dev)) {
 		signal_levels = vlv_signal_levels(intel_dp);
-		mask = 0;
 	} else if (IS_GEN7(dev) && port == PORT_A) {
 		signal_levels = gen7_edp_signal_levels(train_set);
 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
@@ -4043,43 +4007,67 @@
 	return intel_dp->is_mst;
 }
 
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 {
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct intel_crtc *intel_crtc =
-		to_intel_crtc(intel_dig_port->base.base.crtc);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
 	u8 buf;
-	int test_crc_count;
-	int attempts = 6;
-	int ret = 0;
-
-	hsw_disable_ips(intel_crtc);
-
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!(buf & DP_TEST_CRC_SUPPORTED)) {
-		ret = -ENOTTY;
-		goto out;
-	}
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-		ret = -EIO;
-		goto out;
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+		return;
 	}
 
 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-				buf | DP_TEST_SINK_START) < 0) {
-		ret = -EIO;
-		goto out;
+			       buf & ~DP_TEST_SINK_START) < 0)
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+
+	hsw_enable_ips(intel_crtc);
+}
+
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
+		return -EIO;
+
+	if (!(buf & DP_TEST_CRC_SUPPORTED))
+		return -ENOTTY;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+		return -EIO;
+
+	hsw_disable_ips(intel_crtc);
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+			       buf | DP_TEST_SINK_START) < 0) {
+		hsw_enable_ips(intel_crtc);
+		return -EIO;
 	}
 
+	return 0;
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+	int test_crc_count;
+	int attempts = 6;
+	int ret;
+
+	ret = intel_dp_sink_crc_start(intel_dp);
+	if (ret)
+		return ret;
+
 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
 		ret = -EIO;
-		goto out;
+		goto stop;
 	}
 
 	test_crc_count = buf & DP_TEST_COUNT_MASK;
@@ -4088,7 +4076,7 @@
 		if (drm_dp_dpcd_readb(&intel_dp->aux,
 				      DP_TEST_SINK_MISC, &buf) < 0) {
 			ret = -EIO;
-			goto out;
+			goto stop;
 		}
 		intel_wait_for_vblank(dev, intel_crtc->pipe);
 	} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
@@ -4096,25 +4084,13 @@
 	if (attempts == 0) {
 		DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
 		ret = -ETIMEDOUT;
-		goto out;
+		goto stop;
 	}
 
-	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
 		ret = -EIO;
-		goto out;
-	}
-
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-		ret = -EIO;
-		goto out;
-	}
-	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-			       buf & ~DP_TEST_SINK_START) < 0) {
-		ret = -EIO;
-		goto out;
-	}
-out:
-	hsw_enable_ips(intel_crtc);
+stop:
+	intel_dp_sink_crc_stop(intel_dp);
 	return ret;
 }
 
@@ -4175,9 +4151,16 @@
 				      intel_dp->aux.i2c_defer_count);
 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
 	} else {
+		struct edid *block = intel_connector->detect_edid;
+
+		/* We have to write the checksum
+		 * of the last block read
+		 */
+		block += intel_connector->detect_edid->extensions;
+
 		if (!drm_dp_dpcd_write(&intel_dp->aux,
 					DP_TEST_EDID_CHECKSUM,
-					&intel_connector->detect_edid->checksum,
+					&block->checksum,
 					1))
 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
 
@@ -4325,10 +4308,7 @@
 
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	if (!intel_encoder->connectors_active)
-		return;
-
-	if (WARN_ON(!intel_encoder->base.crtc))
+	if (!intel_encoder->base.crtc)
 		return;
 
 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
@@ -4909,7 +4889,7 @@
 }
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dp_detect,
 	.force = intel_dp_force,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -4931,12 +4911,6 @@
 	.destroy = intel_dp_encoder_destroy,
 };
 
-void
-intel_dp_hot_plug(struct intel_encoder *intel_encoder)
-{
-	return;
-}
-
 enum irqreturn
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
@@ -4987,9 +4961,12 @@
 
 		intel_dp_probe_oui(intel_dp);
 
-		if (!intel_dp_probe_mst(intel_dp))
+		if (!intel_dp_probe_mst(intel_dp)) {
+			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+			intel_dp_check_link_status(intel_dp);
+			drm_modeset_unlock(&dev->mode_config.connection_mutex);
 			goto mst_fail;
-
+		}
 	} else {
 		if (intel_dp->is_mst) {
 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
@@ -4997,10 +4974,6 @@
 		}
 
 		if (!intel_dp->is_mst) {
-			/*
-			 * we'll check the link status via the normal hot plug path later -
-			 * but for short hpds we should check it now
-			 */
 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 			intel_dp_check_link_status(intel_dp);
 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -5042,16 +5015,17 @@
 	return -1;
 }
 
-/* check the VBT to see whether the eDP is on DP-D port */
+/* check the VBT to see whether the eDP is on another port */
 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	union child_device_config *p_child;
 	int i;
 	static const short port_mapping[] = {
-		[PORT_B] = PORT_IDPB,
-		[PORT_C] = PORT_IDPC,
-		[PORT_D] = PORT_IDPD,
+		[PORT_B] = DVO_PORT_DPB,
+		[PORT_C] = DVO_PORT_DPC,
+		[PORT_D] = DVO_PORT_DPD,
+		[PORT_E] = DVO_PORT_DPE,
 	};
 
 	if (port == PORT_A)
@@ -5104,8 +5078,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct edp_power_seq cur, vbt, spec,
 		*final = &intel_dp->pps_delays;
-	u32 pp_on, pp_off, pp_div, pp;
-	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -5113,7 +5087,16 @@
 	if (final->t11_t12 != 0)
 		return;
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+	} else if (HAS_PCH_SPLIT(dev)) {
 		pp_ctrl_reg = PCH_PP_CONTROL;
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
@@ -5129,12 +5112,14 @@
 
 	/* Workaround: Need to write PP_CONTROL with the unlock key as
 	 * the very first thing. */
-	pp = ironlake_get_pp_control(intel_dp);
-	I915_WRITE(pp_ctrl_reg, pp);
+	pp_ctl = ironlake_get_pp_control(intel_dp);
 
 	pp_on = I915_READ(pp_on_reg);
 	pp_off = I915_READ(pp_off_reg);
-	pp_div = I915_READ(pp_div_reg);
+	if (!IS_BROXTON(dev)) {
+		I915_WRITE(pp_ctrl_reg, pp_ctl);
+		pp_div = I915_READ(pp_div_reg);
+	}
 
 	/* Pull timing values out of registers */
 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -5149,8 +5134,17 @@
 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
 		PANEL_POWER_DOWN_DELAY_SHIFT;
 
-	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+	if (IS_BROXTON(dev)) {
+		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+			BXT_POWER_CYCLE_DELAY_SHIFT;
+		if (tmp > 0)
+			cur.t11_t12 = (tmp - 1) * 1000;
+		else
+			cur.t11_t12 = 0;
+	} else {
+		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+	}
 
 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
@@ -5207,13 +5201,23 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_on, pp_off, pp_div, port_sel = 0;
 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
-	int pp_on_reg, pp_off_reg, pp_div_reg;
+	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+
+	} else if (HAS_PCH_SPLIT(dev)) {
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_div_reg = PCH_PP_DIVISOR;
@@ -5239,9 +5243,16 @@
 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
 	/* Compute the divisor for the pp clock, simply match the Bspec
 	 * formula. */
-	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
-	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
-			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+	if (IS_BROXTON(dev)) {
+		pp_div = I915_READ(pp_ctrl_reg);
+		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
+		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+				<< BXT_POWER_CYCLE_DELAY_SHIFT);
+	} else {
+		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+	}
 
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
@@ -5258,11 +5269,16 @@
 
 	I915_WRITE(pp_on_reg, pp_on);
 	I915_WRITE(pp_off_reg, pp_off);
-	I915_WRITE(pp_div_reg, pp_div);
+	if (IS_BROXTON(dev))
+		I915_WRITE(pp_ctrl_reg, pp_div);
+	else
+		I915_WRITE(pp_div_reg, pp_div);
 
 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
 		      I915_READ(pp_on_reg),
 		      I915_READ(pp_off_reg),
+		      IS_BROXTON(dev) ?
+		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
 		      I915_READ(pp_div_reg));
 }
 
@@ -5467,13 +5483,12 @@
 }
 
 /**
- * intel_edp_drrs_invalidate - Invalidate DRRS
+ * intel_edp_drrs_invalidate - Disable Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is a disturbance on screen (due to cursor movement/time
- * update etc), DRRS needs to be invalidated, i.e. need to switch to
- * high RR.
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5498,26 +5513,27 @@
 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
 
-	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	/* invalidate means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
 		intel_dp_set_drrs_state(dev_priv->dev,
 				dev_priv->drrs.dp->attached_connector->panel.
 				fixed_mode->vrefresh);
-	}
 
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
-	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
 	mutex_unlock(&dev_priv->drrs.mutex);
 }
 
 /**
- * intel_edp_drrs_flush - Flush DRRS
+ * intel_edp_drrs_flush - Restart Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is no movement on screen, DRRS work can be scheduled.
- * This DRRS work is responsible for setting relevant registers after a
- * timeout of 1 second.
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5541,10 +5557,21 @@
 
 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
-			!dev_priv->drrs.busy_frontbuffer_bits)
+	/* flush means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(dev_priv->dev,
+				dev_priv->drrs.dp->attached_connector->panel.
+				fixed_mode->vrefresh);
+
+	/*
+	 * flush also means no more activity hence schedule downclock, if all
+	 * other fbs are quiescent too
+	 */
+	if (!dev_priv->drrs.busy_frontbuffer_bits)
 		schedule_delayed_work(&dev_priv->drrs.work,
 				msecs_to_jiffies(1000));
 	mutex_unlock(&dev_priv->drrs.mutex);
@@ -5833,6 +5860,9 @@
 	case PORT_D:
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
+	case PORT_E:
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
 	default:
 		BUG();
 	}
@@ -5948,10 +5978,9 @@
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	}
 	intel_encoder->cloneable = 0;
-	intel_encoder->hot_plug = intel_dp_hot_plug;
 
 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-	dev_priv->hpd_irq_port[port] = intel_dig_port;
+	dev_priv->hotplug.irq_port[port] = intel_dig_port;
 
 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
 		drm_encoder_cleanup(encoder);
@@ -5967,7 +5996,7 @@
 
 	/* disable MST */
 	for (i = 0; i < I915_MAX_PORTS; i++) {
-		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
 		if (!intel_dig_port)
 			continue;
 
@@ -5986,7 +6015,7 @@
 	int i;
 
 	for (i = 0; i < I915_MAX_PORTS; i++) {
-		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
 		if (!intel_dig_port)
 			continue;
 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 600afdb..983553c 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,6 +33,7 @@
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 					struct intel_crtc_state *pipe_config)
 {
+	struct drm_device *dev = encoder->base.dev;
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -97,6 +98,10 @@
 			       &pipe_config->dp_m_n);
 
 	pipe_config->dp_m_n.tu = slots;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hsw_dp_set_ddi_pll_sel(pipe_config);
+
 	return true;
 
 }
@@ -328,7 +333,7 @@
 }
 
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dp_mst_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_dp_mst_set_property,
@@ -406,7 +411,7 @@
 
 static void intel_connector_add_to_fbdev(struct intel_connector *connector)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
 #endif
@@ -414,7 +419,7 @@
 
 static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
 #endif
@@ -452,10 +457,9 @@
 	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
 	drm_mode_connector_set_path_property(connector, pathprop);
-	drm_reinit_primary_mode_group(dev);
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	intel_connector_add_to_fbdev(intel_connector);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_unlock_all(dev);
 	drm_connector_register(&intel_connector->base);
 	return connector;
 }
@@ -465,19 +469,28 @@
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_device *dev = connector->dev;
+
 	/* need to nuke the connector */
-	mutex_lock(&dev->mode_config.mutex);
-	intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
+	if (connector->state->crtc) {
+		struct drm_mode_set set;
+		int ret;
+
+		memset(&set, 0, sizeof(set));
+		set.crtc = connector->state->crtc,
+
+		ret = drm_atomic_helper_set_config(&set);
+
+		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+	}
+	drm_modeset_unlock_all(dev);
 
 	intel_connector->unregister(intel_connector);
 
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	intel_connector_remove_from_fbdev(intel_connector);
 	drm_connector_cleanup(connector);
-	mutex_unlock(&dev->mode_config.mutex);
-
-	drm_reinit_primary_mode_group(dev);
+	drm_modeset_unlock_all(dev);
 
 	kfree(intel_connector);
 	DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1059283..2b9e6f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -130,15 +130,9 @@
 
 struct intel_encoder {
 	struct drm_encoder base;
-	/*
-	 * The new crtc this encoder will be driven from. Only differs from
-	 * base->crtc while a modeset is in progress.
-	 */
-	struct intel_crtc *new_crtc;
 
 	enum intel_output_type type;
 	unsigned int cloneable;
-	bool connectors_active;
 	void (*hot_plug)(struct intel_encoder *);
 	bool (*compute_config)(struct intel_encoder *,
 			       struct intel_crtc_state *);
@@ -182,6 +176,10 @@
 		bool enabled;
 		bool combination_mode;	/* gen 2/4 only */
 		bool active_low_pwm;
+
+		/* PWM chip */
+		struct pwm_device *pwm;
+
 		struct backlight_device *device;
 	} backlight;
 
@@ -195,12 +193,6 @@
 	 */
 	struct intel_encoder *encoder;
 
-	/*
-	 * The new encoder this connector will be driven. Only differs from
-	 * encoder while a modeset is in progress.
-	 */
-	struct intel_encoder *new_encoder;
-
 	/* Reads out the current hw, returning true if the connector is enabled
 	 * and active (i.e. dpms ON state). */
 	bool (*get_hw_state)(struct intel_connector *);
@@ -241,6 +233,14 @@
 	int	p;
 } intel_clock_t;
 
+struct intel_atomic_state {
+	struct drm_atomic_state base;
+
+	unsigned int cdclk;
+	bool dpll_set;
+	struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+};
+
 struct intel_plane_state {
 	struct drm_plane_state base;
 	struct drm_rect src;
@@ -256,7 +256,7 @@
 	 * plane requiring a scaler:
 	 *   - During check_plane, its bit is set in
 	 *     crtc_state->scaler_state.scaler_users by calling helper function
-	 *     update_scaler_users.
+	 *     update_scaler_plane.
 	 *   - scaler_id indicates the scaler it got assigned.
 	 *
 	 * plane doesn't require a scaler:
@@ -264,9 +264,11 @@
 	 *     got disabled.
 	 *   - During check_plane, corresponding bit is reset in
 	 *     crtc_state->scaler_state.scaler_users by calling helper function
-	 *     update_scaler_users.
+	 *     update_scaler_plane.
 	 */
 	int scaler_id;
+
+	struct drm_intel_sprite_colorkey ckey;
 };
 
 struct intel_initial_plane_config {
@@ -286,7 +288,6 @@
 #define SKL_MAX_DST_H 4096
 
 struct intel_scaler {
-	int id;
 	int in_use;
 	uint32_t mode;
 };
@@ -319,6 +320,9 @@
 	int scaler_id;
 };
 
+/* drm_mode->private_flags */
+#define I915_MODE_FLAG_INHERITED 1
+
 struct intel_crtc_state {
 	struct drm_crtc_state base;
 
@@ -331,7 +335,6 @@
 	 * accordingly.
 	 */
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS	(1<<0) /* unreliable sync mode.flags */
-#define PIPE_CONFIG_QUIRK_INHERITED_MODE	(1<<1) /* mode inherited from firmware */
 	unsigned long quirks;
 
 	/* Pipe source size (ie. panel fitter input size)
@@ -447,6 +450,18 @@
 	int pbn;
 
 	struct intel_crtc_scaler_state scaler_state;
+
+	/* w/a for waiting 2 vblanks during crtc enable */
+	enum pipe hsw_workaround_pipe;
+};
+
+struct vlv_wm_state {
+	struct vlv_pipe_wm wm[3];
+	struct vlv_sr_wm sr[3];
+	uint8_t num_active_planes;
+	uint8_t num_levels;
+	uint8_t level;
+	bool cxsr;
 };
 
 struct intel_pipe_wm {
@@ -478,16 +493,13 @@
  * and thus can't be run with interrupts disabled.
  */
 struct intel_crtc_atomic_commit {
-	/* vblank evasion */
-	bool evade;
-	unsigned start_vbl_count;
-
 	/* Sleepable operations to perform before commit */
 	bool wait_for_flips;
 	bool disable_fbc;
 	bool disable_ips;
+	bool disable_cxsr;
 	bool pre_disable_primary;
-	bool update_wm;
+	bool update_wm_pre, update_wm_post;
 	unsigned disabled_planes;
 
 	/* Sleepable operations to perform after commit */
@@ -527,9 +539,7 @@
 	uint32_t cursor_size;
 	uint32_t cursor_base;
 
-	struct intel_initial_plane_config plane_config;
 	struct intel_crtc_state *config;
-	bool new_enabled;
 
 	/* reset counter value when the last flip was submitted */
 	unsigned int reset_counter;
@@ -544,14 +554,19 @@
 		struct intel_pipe_wm active;
 		/* SKL wm values currently in use */
 		struct skl_pipe_wm skl_active;
+		/* allow CxSR on this pipe */
+		bool cxsr_allowed;
 	} wm;
 
 	int scanline_offset;
 
+	unsigned start_vbl_count;
 	struct intel_crtc_atomic_commit atomic;
 
 	/* scalers available on this crtc */
 	int num_scalers;
+
+	struct vlv_wm_state wm_state;
 };
 
 struct intel_plane_wm_parameters {
@@ -570,6 +585,7 @@
 	bool scaled;
 	u64 tiling;
 	unsigned int rotation;
+	uint16_t fifo_size;
 };
 
 struct intel_plane {
@@ -578,9 +594,7 @@
 	enum pipe pipe;
 	bool can_scale;
 	int max_downscale;
-
-	/* FIXME convert to properties */
-	struct drm_intel_sprite_colorkey ckey;
+	uint32_t frontbuffer_bit;
 
 	/* Since we need to change the watermarks before/after
 	 * enabling/disabling the planes, we need to store the parameters here
@@ -603,8 +617,9 @@
 			     uint32_t x, uint32_t y,
 			     uint32_t src_w, uint32_t src_h);
 	void (*disable_plane)(struct drm_plane *plane,
-			      struct drm_crtc *crtc, bool force);
+			      struct drm_crtc *crtc);
 	int (*check_plane)(struct drm_plane *plane,
+			   struct intel_crtc_state *crtc_state,
 			   struct intel_plane_state *state);
 	void (*commit_plane)(struct drm_plane *plane,
 			     struct intel_plane_state *state);
@@ -629,6 +644,7 @@
 	unsigned long cursor_hpll_disable;
 };
 
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
@@ -940,43 +956,23 @@
 void intel_ddi_clock_get(struct intel_encoder *encoder,
 			 struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-				enum port port, int type);
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
 /* intel_frontbuffer.c */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring,
 			     enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
 				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flush(struct drm_device *dev,
-			     unsigned frontbuffer_bits);
-/**
- * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. This is for
- * synchronous plane updates which will happen on the next vblank and which will
- * not get delayed by pending gpu rendering.
- *
- * Can be called without any locks held.
- */
-static inline
 void intel_frontbuffer_flip(struct drm_device *dev,
-			    unsigned frontbuffer_bits)
-{
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
+			    unsigned frontbuffer_bits);
 unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
 				   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
-
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+			enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
 			      uint32_t pixel_format);
 
@@ -994,15 +990,11 @@
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
-void intel_crtc_control(struct drm_crtc *crtc, bool enable);
-void intel_crtc_reset(struct intel_crtc *crtc);
-void intel_crtc_update_dpms(struct drm_crtc *crtc);
+int intel_display_suspend(struct drm_device *dev);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
-void intel_connector_dpms(struct drm_connector *, int mode);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_modeset_check_state(struct drm_device *dev);
 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
 				struct intel_digital_port *port);
 void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -1035,7 +1027,8 @@
 int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			       struct drm_framebuffer *fb,
 			       const struct drm_plane_state *plane_state,
-			       struct intel_engine_cs *pipelined);
+			       struct intel_engine_cs *pipelined,
+			       struct drm_i915_gem_request **pipelined_request);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1058,6 +1051,8 @@
 				    struct drm_plane_state *state,
 				    struct drm_property *property,
 				    uint64_t val);
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state);
 
 unsigned int
 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
@@ -1072,9 +1067,6 @@
 void intel_create_rotation_property(struct drm_device *dev,
 					struct intel_plane *plane);
 
-bool intel_wm_need_update(struct drm_plane *plane,
-			  struct drm_plane_state *state);
-
 /* shared dpll functions */
 struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1084,7 +1076,6 @@
 #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 						struct intel_crtc_state *state);
-void intel_put_shared_dpll(struct intel_crtc *crtc);
 
 void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
 		      const struct dpll *dpll);
@@ -1104,7 +1095,8 @@
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
 					     unsigned int tiling_mode,
 					     unsigned int bpp,
 					     unsigned int pitch);
@@ -1114,7 +1106,6 @@
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
 void broxton_init_cdclk(struct drm_device *dev);
 void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_set_cdclk(struct drm_device *dev, int frequency);
 void broxton_ddi_phy_init(struct drm_device *dev);
 void broxton_ddi_phy_uninit(struct drm_device *dev);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
@@ -1130,6 +1121,8 @@
 				int dotclock);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 			intel_clock_t *best_clock);
+int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
@@ -1139,10 +1132,8 @@
 				 struct intel_crtc_state *pipe_config);
 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
-void skl_detach_scalers(struct intel_crtc *intel_crtc);
-int skl_update_scaler_users(struct intel_crtc *intel_crtc,
-	struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
-	struct intel_plane_state *plane_state, int force_detach);
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
@@ -1194,6 +1185,7 @@
 void intel_edp_drrs_invalidate(struct drm_device *dev,
 		unsigned frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
 /* intel_dp_mst.c */
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1207,7 +1199,7 @@
 
 
 /* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
 extern void intel_fbdev_fini(struct drm_device *dev);
@@ -1238,15 +1230,18 @@
 #endif
 
 /* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_fbc_update(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct drm_i915_private *dev_priv);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_disable(struct drm_device *dev);
+void intel_fbc_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_disable_crtc(struct intel_crtc *crtc);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin);
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-		     unsigned int frontbuffer_bits);
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin);
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
 
 /* intel_hdmi.c */
 void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1314,11 +1309,13 @@
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
 void intel_psr_invalidate(struct drm_device *dev,
-			      unsigned frontbuffer_bits);
+			  unsigned frontbuffer_bits);
 void intel_psr_flush(struct drm_device *dev,
-			 unsigned frontbuffer_bits);
+		     unsigned frontbuffer_bits,
+		     enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
@@ -1372,11 +1369,12 @@
 		    unsigned long submitted);
 void intel_queue_rps_boost_for_request(struct drm_device *dev,
 				       struct drm_i915_gem_request *req);
+void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
-
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
@@ -1384,10 +1382,9 @@
 
 /* intel_sprite.c */
 int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-int intel_plane_restore(struct drm_plane *plane);
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
-bool intel_pipe_update_start(struct intel_crtc *crtc,
+void intel_pipe_update_start(struct intel_crtc *crtc,
 			     uint32_t *start_vbl_count);
 void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 
@@ -1395,11 +1392,6 @@
 void intel_tv_init(struct drm_device *dev);
 
 /* intel_atomic.c */
-int intel_atomic_check(struct drm_device *dev,
-		       struct drm_atomic_state *state);
-int intel_atomic_commit(struct drm_device *dev,
-			struct drm_atomic_state *state,
-			bool async);
 int intel_connector_atomic_get_property(struct drm_connector *connector,
 					const struct drm_connector_state *state,
 					struct drm_property *property,
@@ -1407,6 +1399,11 @@
 struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
 void intel_crtc_destroy_state(struct drm_crtc *crtc,
 			       struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *);
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
+
 static inline struct intel_crtc_state *
 intel_atomic_get_crtc_state(struct drm_atomic_state *state,
 			    struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b5a5558..4a601cf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -31,6 +31,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
 #include <linux/slab.h>
+#include <linux/gpio/consumer.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_dsi.h"
@@ -261,11 +262,6 @@
 	return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
 }
 
-static void intel_dsi_hot_plug(struct intel_encoder *encoder)
-{
-	DRM_DEBUG_KMS("\n");
-}
-
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *config)
 {
@@ -401,6 +397,8 @@
 
 		intel_dsi_port_enable(encoder);
 	}
+
+	intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
@@ -415,15 +413,21 @@
 
 	DRM_DEBUG_KMS("\n");
 
+	/* Panel Enable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+
+	msleep(intel_dsi->panel_on_delay);
+
 	/* Disable DPOunit clock gating, can stall pipe
 	 * and we need DPLL REFA always enabled */
 	tmp = I915_READ(DPLL(pipe));
-	tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+	tmp |= DPLL_REF_CLK_ENABLE_VLV;
 	I915_WRITE(DPLL(pipe), tmp);
 
 	/* update the hw state for DPLL */
-	intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
-		DPLL_REFA_CLK_ENABLE_VLV;
+	intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 
 	tmp = I915_READ(DSPCLK_GATE_D);
 	tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -432,8 +436,6 @@
 	/* put device in ready state */
 	intel_dsi_device_ready(encoder);
 
-	msleep(intel_dsi->panel_on_delay);
-
 	drm_panel_prepare(intel_dsi->panel);
 
 	for_each_dsi_port(port, intel_dsi->ports)
@@ -461,6 +463,8 @@
 
 	DRM_DEBUG_KMS("\n");
 
+	intel_panel_disable_backlight(intel_dsi->attached_connector);
+
 	if (is_vid_mode(intel_dsi)) {
 		/* Send Shutdown command to the panel in LP mode */
 		for_each_dsi_port(port, intel_dsi->ports)
@@ -576,6 +580,10 @@
 
 	msleep(intel_dsi->panel_off_delay);
 	msleep(intel_dsi->panel_pwr_cycle_delay);
+
+	/* Panel Disable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -955,6 +963,11 @@
 		/* XXX: Logically this call belongs in the panel driver. */
 		drm_panel_remove(intel_dsi->panel);
 	}
+
+	/* dispose of the gpios */
+	if (intel_dsi->gpio_panel)
+		gpiod_put(intel_dsi->gpio_panel);
+
 	intel_encoder_destroy(encoder);
 }
 
@@ -969,7 +982,7 @@
 };
 
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dsi_detect,
 	.destroy = intel_dsi_connector_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1022,7 +1035,6 @@
 	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
 
 	/* XXX: very likely not all of these are needed */
-	intel_encoder->hot_plug = intel_dsi_hot_plug;
 	intel_encoder->compute_config = intel_dsi_compute_config;
 	intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
 	intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1071,6 +1083,20 @@
 		goto err;
 	}
 
+	/*
+	 * In case of BYT with CRC PMIC, we need to use GPIO for
+	 * Panel control.
+	 */
+	if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+		intel_dsi->gpio_panel =
+			gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
+
+		if (IS_ERR(intel_dsi->gpio_panel)) {
+			DRM_ERROR("Failed to own gpio for panel control\n");
+			intel_dsi->gpio_panel = NULL;
+		}
+	}
+
 	intel_encoder->type = INTEL_OUTPUT_DSI;
 	intel_encoder->cloneable = 0;
 	drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
@@ -1104,6 +1130,7 @@
 	}
 
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+	intel_panel_setup_backlight(connector, INVALID_PIPE);
 
 	return;
 
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 2784ac4..42a6859 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -42,6 +42,9 @@
 	struct drm_panel *panel;
 	struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
 
+	/* GPIO Desc for CRC based Panel control */
+	struct gpio_desc *gpio_panel;
+
 	struct intel_connector *attached_connector;
 
 	/* bit mask of ports being driven */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index d20cf37..c6a8975 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -38,6 +38,27 @@
 #define DSI_HFP_PACKET_EXTRA_SIZE	6
 #define DSI_EOTP_PACKET_SIZE		4
 
+static int dsi_pixel_format_bpp(int pixel_format)
+{
+	int bpp;
+
+	switch (pixel_format) {
+	default:
+	case VID_MODE_FORMAT_RGB888:
+	case VID_MODE_FORMAT_RGB666_LOOSE:
+		bpp = 24;
+		break;
+	case VID_MODE_FORMAT_RGB666:
+		bpp = 18;
+		break;
+	case VID_MODE_FORMAT_RGB565:
+		bpp = 16;
+		break;
+	}
+
+	return bpp;
+}
+
 struct dsi_mnp {
 	u32 dsi_pll_ctrl;
 	u32 dsi_pll_div;
@@ -46,8 +67,8 @@
 static const u32 lfsr_converts[] = {
 	426, 469, 234, 373, 442, 221, 110, 311, 411,		/* 62 - 70 */
 	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
-	106, 53, 282, 397, 354, 227, 113, 56, 284, 142,		/* 81 - 90 */
-	71, 35							/* 91 - 92 */
+	106, 53, 282, 397, 454, 227, 113, 56, 284, 142,		/* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506		/* 91 - 100 */
 };
 
 #ifdef DSI_CLK_FROM_RR
@@ -65,19 +86,7 @@
 	u32 dsi_bit_clock_hz;
 	u32 dsi_clk;
 
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	bpp = dsi_pixel_format_bpp(pixel_format);
 
 	hactive = mode->hdisplay;
 	vactive = mode->vdisplay;
@@ -137,21 +146,7 @@
 static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 {
 	u32 dsi_clk_khz;
-	u32 bpp;
-
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	u32 bpp = dsi_pixel_format_bpp(pixel_format);
 
 	/* DSI data rate = pixel clock * bits per pixel / lane count
 	   pixel clock is converted from KHz to Hz */
@@ -162,11 +157,13 @@
 
 #endif
 
-static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+			struct dsi_mnp *dsi_mnp, int target_dsi_clk)
 {
 	unsigned int calc_m = 0, calc_p = 0;
-	unsigned int m, n = 1, p;
-	int ref_clk = 25000;
+	unsigned int m_min, m_max, p_min = 2, p_max = 6;
+	unsigned int m, n, p;
+	int ref_clk;
 	int delta = target_dsi_clk;
 	u32 m_seed;
 
@@ -176,8 +173,20 @@
 		return -ECHRNG;
 	}
 
-	for (m = 62; m <= 92 && delta; m++) {
-		for (p = 2; p <= 6 && delta; p++) {
+	if (IS_CHERRYVIEW(dev_priv)) {
+		ref_clk = 100000;
+		n = 4;
+		m_min = 70;
+		m_max = 96;
+	} else {
+		ref_clk = 25000;
+		n = 1;
+		m_min = 62;
+		m_max = 92;
+	}
+
+	for (m = m_min; m <= m_max && delta; m++) {
+		for (p = p_min; p <= p_max && delta; p++) {
 			/*
 			 * Find the optimal m and p divisors with minimal delta
 			 * +/- the required clock
@@ -217,7 +226,7 @@
 	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
 				    intel_dsi->lane_count);
 
-	ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+	ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
 	if (ret) {
 		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
 		return;
@@ -286,21 +295,7 @@
 
 static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
 {
-	int bpp;
-
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	int bpp = dsi_pixel_format_bpp(pixel_format);
 
 	WARN(bpp != pipe_bpp,
 	     "bpp match assertion failure (expected %d, current %d)\n",
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ece5bd7..dc532bb 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -196,50 +196,6 @@
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_dvo_dpms(struct drm_connector *connector, int mode)
-{
-	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
-	struct drm_crtc *crtc;
-	struct intel_crtc_state *config;
-
-	/* dvo supports only 2 dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = intel_dvo->base.base.crtc;
-	if (!crtc) {
-		intel_dvo->base.connectors_active = false;
-		return;
-	}
-
-	/* We call connector dpms manually below in case pipe dpms doesn't
-	 * change due to cloning. */
-	if (mode == DRM_MODE_DPMS_ON) {
-		config = to_intel_crtc(crtc)->config;
-
-		intel_dvo->base.connectors_active = true;
-
-		intel_crtc_update_dpms(crtc);
-
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
-	} else {
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
-
-		intel_dvo->base.connectors_active = false;
-
-		intel_crtc_update_dpms(crtc);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_dvo_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
@@ -387,7 +343,7 @@
 }
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
-	.dpms = intel_dvo_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dvo_detect,
 	.destroy = intel_dvo_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6abb834..1f97fb5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -41,9 +41,8 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void i8xx_fbc_disable(struct drm_device *dev)
+static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 fbc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -65,13 +64,11 @@
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static void i8xx_fbc_enable(struct drm_crtc *crtc)
+static void i8xx_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int cfb_pitch;
 	int i;
 	u32 fbc_ctl;
@@ -84,7 +81,7 @@
 		cfb_pitch = fb->pitches[0];
 
 	/* FBC_CTL wants 32B or 64B units */
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		cfb_pitch = (cfb_pitch / 32) - 1;
 	else
 		cfb_pitch = (cfb_pitch / 64) - 1;
@@ -93,66 +90,61 @@
 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
 		I915_WRITE(FBC_TAG + (i * 4), 0);
 
-	if (IS_GEN4(dev)) {
+	if (IS_GEN4(dev_priv)) {
 		u32 fbc_ctl2;
 
 		/* Set it up... */
 		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-		fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
 		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-		I915_WRITE(FBC_FENCE_OFF, crtc->y);
+		I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
 	}
 
 	/* enable it... */
 	fbc_ctl = I915_READ(FBC_CONTROL);
 	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
 	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
-	if (IS_I945GM(dev))
+	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= obj->fence_reg;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
 	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
-		      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+		      cfb_pitch, crtc->base.y, plane_name(crtc->plane));
 }
 
-static bool i8xx_fbc_enabled(struct drm_device *dev)
+static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_fbc_enable(struct drm_crtc *crtc)
+static void g4x_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 
-	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+	I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
 
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void g4x_fbc_disable(struct drm_device *dev)
+static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -167,10 +159,8 @@
 	}
 }
 
-static bool g4x_fbc_enabled(struct drm_device *dev)
+static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
@@ -180,22 +170,21 @@
 	POSTING_READ(MSG_FBC_REND_STATE);
 }
 
-static void ilk_fbc_enable(struct drm_crtc *crtc)
+static void ilk_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-		dev_priv->fbc.threshold++;
+		threshold++;
 
-	switch (dev_priv->fbc.threshold) {
+	switch (threshold) {
 	case 4:
 	case 3:
 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -208,28 +197,27 @@
 		break;
 	}
 	dpfc_ctl |= DPFC_CTL_FENCE_EN;
-	if (IS_GEN5(dev))
+	if (IS_GEN5(dev_priv))
 		dpfc_ctl |= obj->fence_reg;
 
-	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
 	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	if (IS_GEN6(dev)) {
+	if (IS_GEN6(dev_priv)) {
 		I915_WRITE(SNB_DPFC_CTL_SA,
 			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
 	}
 
 	intel_fbc_nuke(dev_priv);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void ilk_fbc_disable(struct drm_device *dev)
+static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -244,29 +232,29 @@
 	}
 }
 
-static bool ilk_fbc_enabled(struct drm_device *dev)
+static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_fbc_enable(struct drm_crtc *crtc)
+static void gen7_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
-	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-		dev_priv->fbc.threshold++;
+	dpfc_ctl = 0;
+	if (IS_IVYBRIDGE(dev_priv))
+		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
 
-	switch (dev_priv->fbc.threshold) {
+	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+		threshold++;
+
+	switch (threshold) {
 	case 4:
 	case 3:
 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -286,39 +274,37 @@
 
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	if (IS_IVYBRIDGE(dev)) {
+	if (IS_IVYBRIDGE(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
 			   ILK_FBCQ_DIS);
 	} else {
 		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-		I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
-			   I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
+			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
 			   HSW_FBCQ_DIS);
 	}
 
 	I915_WRITE(SNB_DPFC_CTL_SA,
 		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
 
 	intel_fbc_nuke(dev_priv);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
 /**
  * intel_fbc_enabled - Is FBC enabled?
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
  *
  * This function is used to verify the current state of FBC.
  * FIXME: This should be tracked in the plane config eventually
  *        instead of queried at runtime for most callers.
  */
-bool intel_fbc_enabled(struct drm_device *dev)
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return dev_priv->fbc.enabled;
 }
 
@@ -327,31 +313,33 @@
 	struct intel_fbc_work *work =
 		container_of(to_delayed_work(__work),
 			     struct intel_fbc_work, work);
-	struct drm_device *dev = work->crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
+	struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&dev_priv->fbc.lock);
 	if (work == dev_priv->fbc.fbc_work) {
 		/* Double check that we haven't switched fb without cancelling
 		 * the prior work.
 		 */
-		if (work->crtc->primary->fb == work->fb) {
-			dev_priv->display.enable_fbc(work->crtc);
+		if (crtc_fb == work->fb) {
+			dev_priv->fbc.enable_fbc(work->crtc);
 
-			dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
-			dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
-			dev_priv->fbc.y = work->crtc->y;
+			dev_priv->fbc.crtc = work->crtc;
+			dev_priv->fbc.fb_id = crtc_fb->base.id;
+			dev_priv->fbc.y = work->crtc->base.y;
 		}
 
 		dev_priv->fbc.fbc_work = NULL;
 	}
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->fbc.lock);
 
 	kfree(work);
 }
 
 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
 {
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
 	if (dev_priv->fbc.fbc_work == NULL)
 		return;
 
@@ -373,26 +361,24 @@
 	dev_priv->fbc.fbc_work = NULL;
 }
 
-static void intel_fbc_enable(struct drm_crtc *crtc)
+static void intel_fbc_enable(struct intel_crtc *crtc)
 {
 	struct intel_fbc_work *work;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-	if (!dev_priv->display.enable_fbc)
-		return;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	intel_fbc_cancel_work(dev_priv);
 
 	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL) {
 		DRM_ERROR("Failed to allocate FBC work structure\n");
-		dev_priv->display.enable_fbc(crtc);
+		dev_priv->fbc.enable_fbc(crtc);
 		return;
 	}
 
 	work->crtc = crtc;
-	work->fb = crtc->primary->fb;
+	work->fb = crtc->base.primary->fb;
 	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
 	dev_priv->fbc.fbc_work = work;
@@ -413,75 +399,274 @@
 	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 }
 
-/**
- * intel_fbc_disable - disable FBC
- * @dev: the drm_device
- *
- * This function disables FBC.
- */
-void intel_fbc_disable(struct drm_device *dev)
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	intel_fbc_cancel_work(dev_priv);
 
-	if (!dev_priv->display.disable_fbc)
-		return;
-
-	dev_priv->display.disable_fbc(dev);
+	dev_priv->fbc.disable_fbc(dev_priv);
 	dev_priv->fbc.crtc = NULL;
 }
 
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+/**
+ * intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This function disables FBC.
+ */
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	if (dev_priv->fbc.crtc == crtc)
+		__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
+{
+	switch (reason) {
+	case FBC_OK:
+		return "FBC enabled but currently disabled in hardware";
+	case FBC_UNSUPPORTED:
+		return "unsupported by this chipset";
+	case FBC_NO_OUTPUT:
+		return "no output";
+	case FBC_STOLEN_TOO_SMALL:
+		return "not enough stolen memory";
+	case FBC_UNSUPPORTED_MODE:
+		return "mode incompatible with compression";
+	case FBC_MODE_TOO_LARGE:
+		return "mode too large for compression";
+	case FBC_BAD_PLANE:
+		return "FBC unsupported on plane";
+	case FBC_NOT_TILED:
+		return "framebuffer not tiled or fenced";
+	case FBC_MULTIPLE_PIPES:
+		return "more than one pipe active";
+	case FBC_MODULE_PARAM:
+		return "disabled per module param";
+	case FBC_CHIP_DEFAULT:
+		return "disabled per chip default";
+	case FBC_ROTATION:
+		return "rotation unsupported";
+	case FBC_IN_DBG_MASTER:
+		return "Kernel debugger is active";
+	default:
+		MISSING_CASE(reason);
+		return "unknown reason";
+	}
+}
+
+static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
 			      enum no_fbc_reason reason)
 {
 	if (dev_priv->fbc.no_fbc_reason == reason)
-		return false;
+		return;
 
 	dev_priv->fbc.no_fbc_reason = reason;
-	return true;
+	DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
 }
 
 static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
 {
 	struct drm_crtc *crtc = NULL, *tmp_crtc;
 	enum pipe pipe;
-	bool pipe_a_only = false, one_pipe_only = false;
+	bool pipe_a_only = false;
 
 	if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
 		pipe_a_only = true;
-	else if (INTEL_INFO(dev_priv)->gen <= 4)
-		one_pipe_only = true;
 
 	for_each_pipe(dev_priv, pipe) {
 		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
 		if (intel_crtc_active(tmp_crtc) &&
-		    to_intel_plane_state(tmp_crtc->primary->state)->visible) {
-			if (one_pipe_only && crtc) {
-				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
-					DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-				return NULL;
-			}
+		    to_intel_plane_state(tmp_crtc->primary->state)->visible)
 			crtc = tmp_crtc;
-		}
 
 		if (pipe_a_only)
 			break;
 	}
 
-	if (!crtc || crtc->primary->fb == NULL) {
-		if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
-			DRM_DEBUG_KMS("no output, disabling\n");
+	if (!crtc || crtc->primary->fb == NULL)
 		return NULL;
-	}
 
 	return crtc;
 }
 
+static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+	int n_pipes = 0;
+	struct drm_crtc *crtc;
+
+	if (INTEL_INFO(dev_priv)->gen > 4)
+		return true;
+
+	for_each_pipe(dev_priv, pipe) {
+		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+		if (intel_crtc_active(crtc) &&
+		    to_intel_plane_state(crtc->primary->state)->visible)
+			n_pipes++;
+	}
+
+	return (n_pipes < 2);
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+				      struct drm_mm_node *node,
+				      int size,
+				      int fb_cpp)
+{
+	int compression_threshold = 1;
+	int ret;
+
+	/* HACK: This code depends on what we will do in *_enable_fbc. If that
+	 * code changes, this code needs to change as well.
+	 *
+	 * The enable_fbc code will attempt to use one of our 2 compression
+	 * thresholds, therefore, in that case, we only have 1 resort.
+	 */
+
+	/* Try to over-allocate to reduce reallocations and fragmentation. */
+	ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+	if (ret == 0)
+		return compression_threshold;
+
+again:
+	/* HW's ability to limit the CFB is 1:4 */
+	if (compression_threshold > 4 ||
+	    (fb_cpp == 2 && compression_threshold == 2))
+		return 0;
+
+	ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+		return 0;
+	} else if (ret) {
+		compression_threshold <<= 1;
+		goto again;
+	} else {
+		return compression_threshold;
+	}
+}
+
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
+			       int fb_cpp)
+{
+	struct drm_mm_node *uninitialized_var(compressed_llb);
+	int ret;
+
+	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+					 size, fb_cpp);
+	if (!ret)
+		goto err_llb;
+	else if (ret > 1) {
+		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+	}
+
+	dev_priv->fbc.threshold = ret;
+
+	if (INTEL_INFO(dev_priv)->gen >= 5)
+		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	else if (IS_GM45(dev_priv)) {
+		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	} else {
+		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+		if (!compressed_llb)
+			goto err_fb;
+
+		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+						  4096, 4096);
+		if (ret)
+			goto err_fb;
+
+		dev_priv->fbc.compressed_llb = compressed_llb;
+
+		I915_WRITE(FBC_CFB_BASE,
+			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+		I915_WRITE(FBC_LL_BASE,
+			   dev_priv->mm.stolen_base + compressed_llb->start);
+	}
+
+	dev_priv->fbc.uncompressed_size = size;
+
+	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+		      size);
+
+	return 0;
+
+err_fb:
+	kfree(compressed_llb);
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+err_llb:
+	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+	return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->fbc.uncompressed_size == 0)
+		return;
+
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+
+	if (dev_priv->fbc.compressed_llb) {
+		i915_gem_stolen_remove_node(dev_priv,
+					    dev_priv->fbc.compressed_llb);
+		kfree(dev_priv->fbc.compressed_llb);
+	}
+
+	dev_priv->fbc.uncompressed_size = 0;
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_cleanup_cfb(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
+			       int fb_cpp)
+{
+	if (size <= dev_priv->fbc.uncompressed_size)
+		return 0;
+
+	/* Release any current block */
+	__intel_fbc_cleanup_cfb(dev_priv);
+
+	return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
+}
+
 /**
- * intel_fbc_update - enable/disable FBC as needed
- * @dev: the drm_device
+ * __intel_fbc_update - enable/disable FBC as needed, unlocked
+ * @dev_priv: i915 device instance
  *
  * Set up the framebuffer compression hardware at mode set time.  We
  * enable it if possible:
@@ -498,9 +683,8 @@
  *
  * We need to enable/disable FBC on a global basis.
  */
-void intel_fbc_update(struct drm_device *dev)
+static void __intel_fbc_update(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = NULL;
 	struct intel_crtc *intel_crtc;
 	struct drm_framebuffer *fb;
@@ -508,22 +692,19 @@
 	const struct drm_display_mode *adjusted_mode;
 	unsigned int max_width, max_height;
 
-	if (!HAS_FBC(dev))
-		return;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	/* disable framebuffer compression in vGPU */
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv->dev))
 		i915.enable_fbc = 0;
 
 	if (i915.enable_fbc < 0) {
-		if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
-			DRM_DEBUG_KMS("disabled per chip default\n");
+		set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
 		goto out_disable;
 	}
 
 	if (!i915.enable_fbc) {
-		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
-			DRM_DEBUG_KMS("fbc disabled per module param\n");
+		set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
 		goto out_disable;
 	}
 
@@ -537,8 +718,15 @@
 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
 	 */
 	crtc = intel_fbc_find_crtc(dev_priv);
-	if (!crtc)
+	if (!crtc) {
+		set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
 		goto out_disable;
+	}
+
+	if (!multiple_pipes_ok(dev_priv)) {
+		set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
+		goto out_disable;
+	}
 
 	intel_crtc = to_intel_crtc(crtc);
 	fb = crtc->primary->fb;
@@ -547,16 +735,14 @@
 
 	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
 	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-			DRM_DEBUG_KMS("mode incompatible with compression, "
-				      "disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
 		goto out_disable;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
 		max_width = 4096;
 		max_height = 4096;
-	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
 		max_width = 4096;
 		max_height = 2048;
 	} else {
@@ -565,14 +751,12 @@
 	}
 	if (intel_crtc->config->pipe_src_w > max_width ||
 	    intel_crtc->config->pipe_src_h > max_height) {
-		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
-			DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
 		goto out_disable;
 	}
-	if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+	if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
 	    intel_crtc->plane != PLANE_A) {
-		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
-			DRM_DEBUG_KMS("plane not A, disabling compression\n");
+		set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
 		goto out_disable;
 	}
 
@@ -581,25 +765,24 @@
 	 */
 	if (obj->tiling_mode != I915_TILING_X ||
 	    obj->fence_reg == I915_FENCE_REG_NONE) {
-		if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
-			DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+		set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
 		goto out_disable;
 	}
-	if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
 	    crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
-		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-			DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_ROTATION);
 		goto out_disable;
 	}
 
 	/* If the kernel debugger is active, always disable compression */
-	if (in_dbg_master())
+	if (in_dbg_master()) {
+		set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
 		goto out_disable;
+	}
 
-	if (i915_gem_stolen_setup_compression(dev, obj->base.size,
-					      drm_format_plane_cpp(fb->pixel_format, 0))) {
-		if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
-			DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+	if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
+				drm_format_plane_cpp(fb->pixel_format, 0))) {
+		set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
 		goto out_disable;
 	}
 
@@ -613,7 +796,7 @@
 	    dev_priv->fbc.y == crtc->y)
 		return;
 
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv)) {
 		/* We update FBC along two paths, after changing fb/crtc
 		 * configuration (modeswitching) and after page-flipping
 		 * finishes. For the latter, we know that not only did
@@ -638,58 +821,87 @@
 		 * some point. And we wait before enabling FBC anyway.
 		 */
 		DRM_DEBUG_KMS("disabling active FBC for update\n");
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
 	}
 
-	intel_fbc_enable(crtc);
+	intel_fbc_enable(intel_crtc);
 	dev_priv->fbc.no_fbc_reason = FBC_OK;
 	return;
 
 out_disable:
 	/* Multiple disables should be harmless */
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv)) {
 		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
 	}
-	i915_gem_stolen_cleanup_compression(dev);
+	__intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev_priv: i915 device instance
+ *
+ * This function reevaluates the overall state and enables or disables FBC.
+ */
+void intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_update(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin)
 {
-	struct drm_device *dev = dev_priv->dev;
 	unsigned int fbc_bits;
 
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
 	if (origin == ORIGIN_GTT)
 		return;
 
+	mutex_lock(&dev_priv->fbc.lock);
+
 	if (dev_priv->fbc.enabled)
 		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
 	else if (dev_priv->fbc.fbc_work)
 		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
-			to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+					dev_priv->fbc.fbc_work->crtc->pipe);
 	else
 		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
 
 	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
 
 	if (dev_priv->fbc.busy_bits)
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
+
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-		     unsigned int frontbuffer_bits)
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_device *dev = dev_priv->dev;
-
-	if (!dev_priv->fbc.busy_bits)
+	if (!dev_priv->fbc.enable_fbc)
 		return;
 
+	if (origin == ORIGIN_GTT)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+
 	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
 
-	if (!dev_priv->fbc.busy_bits)
-		intel_fbc_update(dev);
+	if (!dev_priv->fbc.busy_bits) {
+		__intel_fbc_disable(dev_priv);
+		__intel_fbc_update(dev_priv);
+	}
+
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 /**
@@ -702,6 +914,8 @@
 {
 	enum pipe pipe;
 
+	mutex_init(&dev_priv->fbc.lock);
+
 	if (!HAS_FBC(dev_priv)) {
 		dev_priv->fbc.enabled = false;
 		dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
@@ -717,25 +931,25 @@
 	}
 
 	if (INTEL_INFO(dev_priv)->gen >= 7) {
-		dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->display.enable_fbc = gen7_fbc_enable;
-		dev_priv->display.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = gen7_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
 	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
-		dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->display.enable_fbc = ilk_fbc_enable;
-		dev_priv->display.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = ilk_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
 	} else if (IS_GM45(dev_priv)) {
-		dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-		dev_priv->display.enable_fbc = g4x_fbc_enable;
-		dev_priv->display.disable_fbc = g4x_fbc_disable;
+		dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
+		dev_priv->fbc.enable_fbc = g4x_fbc_enable;
+		dev_priv->fbc.disable_fbc = g4x_fbc_disable;
 	} else {
-		dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-		dev_priv->display.enable_fbc = i8xx_fbc_enable;
-		dev_priv->display.disable_fbc = i8xx_fbc_disable;
+		dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
+		dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
+		dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
 
 		/* This value was pulled out of someone's hat */
 		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
 	}
 
-	dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+	dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6372cfc..8c6a6fa 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -55,16 +55,8 @@
 	ret = drm_fb_helper_set_par(info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
-							true);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -81,15 +73,8 @@
 	ret = drm_fb_helper_blank(blank, info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -107,15 +92,8 @@
 	ret = drm_fb_helper_pan_display(var, info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -126,9 +104,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = intel_fbdev_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = intel_fbdev_pan_display,
 	.fb_blank = intel_fbdev_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -177,7 +155,7 @@
 	}
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
+	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
 	if (ret) {
 		DRM_ERROR("failed to pin obj: %d\n", ret);
 		goto out_fb;
@@ -237,9 +215,9 @@
 	obj = intel_fb->obj;
 	size = obj->base.size;
 
-	info = framebuffer_alloc(0, &dev->pdev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unpin;
 	}
 
@@ -248,24 +226,13 @@
 	fb = &ifbdev->fb->base;
 
 	ifbdev->helper.fb = fb;
-	ifbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "inteldrmfb");
 
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &intelfb_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
@@ -277,7 +244,7 @@
 			   size);
 	if (!info->screen_base) {
 		ret = -ENOSPC;
-		goto out_unpin;
+		goto out_destroy_fbi;
 	}
 	info->screen_size = size;
 
@@ -304,6 +271,8 @@
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unpin:
 	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
@@ -484,18 +453,13 @@
 			 * IMPORTANT: We want to use the adjusted mode (i.e.
 			 * after the panel fitter upscaling) as the initial
 			 * config, not the input mode, which is what crtc->mode
-			 * usually contains. But since our current fastboot
+			 * usually contains. But since our current
 			 * code puts a mode derived from the post-pfit timings
-			 * into crtc->mode this works out correctly. We don't
-			 * use hwmode anywhere right now, so use it for this
-			 * since the fb helper layer wants a pointer to
-			 * something we own.
+			 * into crtc->mode this works out correctly.
 			 */
 			DRM_DEBUG_KMS("looking for current mode on connector %s\n",
 				      connector->name);
-			intel_mode_from_pipe_config(&encoder->crtc->hwmode,
-						    to_intel_crtc(encoder->crtc)->config);
-			modes[i] = &encoder->crtc->hwmode;
+			modes[i] = &encoder->crtc->mode;
 		}
 		crtcs[i] = new_crtc;
 
@@ -550,16 +514,9 @@
 static void intel_fbdev_destroy(struct drm_device *dev,
 				struct intel_fbdev *ifbdev)
 {
-	if (ifbdev->helper.fbdev) {
-		struct fb_info *info = ifbdev->helper.fbdev;
 
-		unregister_framebuffer(info);
-		iounmap(info->screen_base);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&ifbdev->helper);
+	drm_fb_helper_release_fbi(&ifbdev->helper);
 
 	drm_fb_helper_fini(&ifbdev->helper);
 
@@ -582,7 +539,6 @@
 	struct intel_framebuffer *fb = NULL;
 	struct drm_crtc *crtc;
 	struct intel_crtc *intel_crtc;
-	struct intel_initial_plane_config *plane_config = NULL;
 	unsigned int max_size = 0;
 
 	if (!i915.fastboot)
@@ -590,20 +546,21 @@
 
 	/* Find the largest fb */
 	for_each_crtc(dev, crtc) {
+		struct drm_i915_gem_object *obj =
+			intel_fb_obj(crtc->primary->state->fb);
 		intel_crtc = to_intel_crtc(crtc);
 
-		if (!intel_crtc->active || !crtc->primary->fb) {
+		if (!intel_crtc->active || !obj) {
 			DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
 				      pipe_name(intel_crtc->pipe));
 			continue;
 		}
 
-		if (intel_crtc->plane_config.size > max_size) {
+		if (obj->base.size > max_size) {
 			DRM_DEBUG_KMS("found possible fb from plane %c\n",
 				      pipe_name(intel_crtc->pipe));
-			plane_config = &intel_crtc->plane_config;
-			fb = to_intel_framebuffer(crtc->primary->fb);
-			max_size = plane_config->size;
+			fb = to_intel_framebuffer(crtc->primary->state->fb);
+			max_size = obj->base.size;
 		}
 	}
 
@@ -638,7 +595,6 @@
 			DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
 				      pipe_name(intel_crtc->pipe),
 				      cur_size, fb->base.pitches[0]);
-			plane_config = NULL;
 			fb = NULL;
 			break;
 		}
@@ -659,7 +615,6 @@
 			DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
 				      pipe_name(intel_crtc->pipe),
 				      cur_size, max_size);
-			plane_config = NULL;
 			fb = NULL;
 			break;
 		}
@@ -810,7 +765,7 @@
 	if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
 		memset_io(info->screen_base, 0, info->screen_size);
 
-	fb_set_suspend(info, state);
+	drm_fb_helper_set_suspend(&ifbdev->helper, state);
 	console_unlock();
 }
 
@@ -825,11 +780,20 @@
 {
 	int ret;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct drm_fb_helper *fb_helper;
 
-	if (!dev_priv->fbdev)
+	if (!ifbdev)
 		return;
 
-	ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
-	if (ret)
+	fb_helper = &ifbdev->helper;
+
+	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+	if (ret) {
 		DRM_DEBUG("failed to restore crtc mode\n");
+	} else {
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
 }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 57095f5..ac85357 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -65,84 +65,29 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void intel_increase_pllclock(struct drm_device *dev,
-				    enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int dpll_reg = DPLL(pipe);
-	int dpll;
-
-	if (!HAS_GMCH_DISPLAY(dev))
-		return;
-
-	if (!dev_priv->lvds_downclock_avail)
-		return;
-
-	dpll = I915_READ(dpll_reg);
-	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-		DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
-		assert_panel_unlocked(dev_priv, pipe);
-
-		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-		I915_WRITE(dpll_reg, dpll);
-		intel_wait_for_vblank(dev, pipe);
-
-		dpll = I915_READ(dpll_reg);
-		if (dpll & DISPLAY_RATE_SELECT_FPA1)
-			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-	}
-}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
-			       unsigned frontbuffer_bits,
-			       struct intel_engine_cs *ring)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-			continue;
-
-		intel_increase_pllclock(dev, pipe);
-	}
-}
-
 /**
  * intel_fb_obj_invalidate - invalidate frontbuffer object
  * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
  * @origin: which operation caused the invalidation
  *
  * This function gets called every time rendering on the given object starts and
  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring,
 			     enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	if (!obj->frontbuffer_bits)
 		return;
 
-	if (ring) {
+	if (origin == ORIGIN_CS) {
 		mutex_lock(&dev_priv->fb_tracking.lock);
 		dev_priv->fb_tracking.busy_bits
 			|= obj->frontbuffer_bits;
@@ -151,8 +96,6 @@
 		mutex_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
 	intel_psr_invalidate(dev, obj->frontbuffer_bits);
 	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
 	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
@@ -162,6 +105,7 @@
  * intel_frontbuffer_flush - flush frontbuffer
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given planes has
  * completed and frontbuffer caching can be started again. Flushes will get
@@ -169,37 +113,40 @@
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flush(struct drm_device *dev,
-			     unsigned frontbuffer_bits)
+static void intel_frontbuffer_flush(struct drm_device *dev,
+				    unsigned frontbuffer_bits,
+				    enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Delay flushing when rings are still busy.*/
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+	if (!frontbuffer_bits)
+		return;
 
 	intel_edp_drrs_flush(dev, frontbuffer_bits);
-	intel_psr_flush(dev, frontbuffer_bits);
-	intel_fbc_flush(dev_priv, frontbuffer_bits);
+	intel_psr_flush(dev, frontbuffer_bits, origin);
+	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_fb_obj_flush - flush frontbuffer object
  * @obj: GEM object to flush
  * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given object has
  * completed and frontbuffer caching can be started again. If @retire is true
  * then any delayed flushes will be unblocked.
  */
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			bool retire)
+			bool retire, enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned frontbuffer_bits;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -218,7 +165,7 @@
 		mutex_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
+	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
@@ -236,7 +183,7 @@
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
@@ -244,7 +191,7 @@
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_psr_single_frame_update(dev);
+	intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
 
 /**
@@ -260,7 +207,7 @@
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
 				     unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
@@ -268,5 +215,29 @@
 	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_device *dev,
+			    unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	/* Remove stale busy bits due to the old buffer. */
+	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
new file mode 100644
index 0000000..18d7f20
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+/*
+ * This file is partially autogenerated, although currently with some manual
+ * fixups afterwards. In future, it should be entirely autogenerated, in order
+ * to ensure that the definitions herein remain in sync with those used by the
+ * GuC's own firmware.
+ *
+ * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
+ */
+
+#define GFXCORE_FAMILY_GEN8		11
+#define GFXCORE_FAMILY_GEN9		12
+#define GFXCORE_FAMILY_FORCE_ULONG	0x7fffffff
+
+#define GUC_CTX_PRIORITY_CRITICAL	0
+#define GUC_CTX_PRIORITY_HIGH		1
+#define GUC_CTX_PRIORITY_NORMAL		2
+#define GUC_CTX_PRIORITY_LOW		3
+
+#define GUC_MAX_GPU_CONTEXTS		1024
+#define	GUC_INVALID_CTX_ID		(GUC_MAX_GPU_CONTEXTS + 1)
+
+/* Work queue item header definitions */
+#define WQ_STATUS_ACTIVE		1
+#define WQ_STATUS_SUSPENDED		2
+#define WQ_STATUS_CMD_ERROR		3
+#define WQ_STATUS_ENGINE_ID_NOT_USED	4
+#define WQ_STATUS_SUSPENDED_FROM_RESET	5
+#define WQ_TYPE_SHIFT			0
+#define   WQ_TYPE_BATCH_BUF		(0x1 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_PSEUDO		(0x2 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_INORDER		(0x3 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT			10
+#define WQ_LEN_SHIFT			16
+#define WQ_NO_WCFLUSH_WAIT		(1 << 27)
+#define WQ_PRESENT_WORKLOAD		(1 << 28)
+#define WQ_WORKLOAD_SHIFT		29
+#define   WQ_WORKLOAD_GENERAL		(0 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_GPGPU		(1 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_TOUCH		(2 << WQ_WORKLOAD_SHIFT)
+
+#define WQ_RING_TAIL_SHIFT		20
+#define WQ_RING_TAIL_MASK		(0x7FF << WQ_RING_TAIL_SHIFT)
+
+#define GUC_DOORBELL_ENABLED		1
+#define GUC_DOORBELL_DISABLED		0
+
+#define GUC_CTX_DESC_ATTR_ACTIVE	(1 << 0)
+#define GUC_CTX_DESC_ATTR_PENDING_DB	(1 << 1)
+#define GUC_CTX_DESC_ATTR_KERNEL	(1 << 2)
+#define GUC_CTX_DESC_ATTR_PREEMPT	(1 << 3)
+#define GUC_CTX_DESC_ATTR_RESET		(1 << 4)
+#define GUC_CTX_DESC_ATTR_WQLOCKED	(1 << 5)
+#define GUC_CTX_DESC_ATTR_PCH		(1 << 6)
+
+/* The guc control data is 10 DWORDs */
+#define GUC_CTL_CTXINFO			0
+#define   GUC_CTL_CTXNUM_IN16_SHIFT	0
+#define   GUC_CTL_BASE_ADDR_SHIFT	12
+#define GUC_CTL_ARAT_HIGH		1
+#define GUC_CTL_ARAT_LOW		2
+#define GUC_CTL_DEVICE_INFO		3
+#define   GUC_CTL_GTTYPE_SHIFT		0
+#define   GUC_CTL_COREFAMILY_SHIFT	7
+#define GUC_CTL_LOG_PARAMS		4
+#define   GUC_LOG_VALID			(1 << 0)
+#define   GUC_LOG_NOTIFY_ON_HALF_FULL	(1 << 1)
+#define   GUC_LOG_ALLOC_IN_MEGABYTE	(1 << 3)
+#define   GUC_LOG_CRASH_PAGES		1
+#define   GUC_LOG_CRASH_SHIFT		4
+#define   GUC_LOG_DPC_PAGES		3
+#define   GUC_LOG_DPC_SHIFT		6
+#define   GUC_LOG_ISR_PAGES		3
+#define   GUC_LOG_ISR_SHIFT		9
+#define   GUC_LOG_BUF_ADDR_SHIFT	12
+#define GUC_CTL_PAGE_FAULT_CONTROL	5
+#define GUC_CTL_WA			6
+#define   GUC_CTL_WA_UK_BY_DRIVER	(1 << 3)
+#define GUC_CTL_FEATURE			7
+#define   GUC_CTL_VCS2_ENABLED		(1 << 0)
+#define   GUC_CTL_KERNEL_SUBMISSIONS	(1 << 1)
+#define   GUC_CTL_FEATURE2		(1 << 2)
+#define   GUC_CTL_POWER_GATING		(1 << 3)
+#define   GUC_CTL_DISABLE_SCHEDULER	(1 << 4)
+#define   GUC_CTL_PREEMPTION_LOG	(1 << 5)
+#define   GUC_CTL_ENABLE_SLPC		(1 << 7)
+#define GUC_CTL_DEBUG			8
+#define   GUC_LOG_VERBOSITY_SHIFT	0
+#define   GUC_LOG_VERBOSITY_LOW		(0 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_MED		(1 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_HIGH	(2 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_ULTRA	(3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define	  GUC_LOG_VERBOSITY_MIN		0
+#define	  GUC_LOG_VERBOSITY_MAX		3
+
+#define GUC_CTL_MAX_DWORDS		(GUC_CTL_DEBUG + 1)
+
+struct guc_doorbell_info {
+	u32 db_status;
+	u32 cookie;
+	u32 reserved[14];
+} __packed;
+
+union guc_doorbell_qw {
+	struct {
+		u32 db_status;
+		u32 cookie;
+	};
+	u64 value_qw;
+} __packed;
+
+#define GUC_MAX_DOORBELLS		256
+#define GUC_INVALID_DOORBELL_ID		(GUC_MAX_DOORBELLS)
+
+#define GUC_DB_SIZE			(PAGE_SIZE)
+#define GUC_WQ_SIZE			(PAGE_SIZE * 2)
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+	u32 header;
+	u32 context_desc;
+	u32 ring_tail;
+	u32 fence_id;
+} __packed;
+
+struct guc_process_desc {
+	u32 context_id;
+	u64 db_base_addr;
+	u32 head;
+	u32 tail;
+	u32 error_offset;
+	u64 wq_base_addr;
+	u32 wq_size_bytes;
+	u32 wq_status;
+	u32 engine_presence;
+	u32 priority;
+	u32 reserved[30];
+} __packed;
+
+/* engine id and context id is packed into guc_execlist_context.context_id*/
+#define GUC_ELC_CTXID_OFFSET		0
+#define GUC_ELC_ENGINE_OFFSET		29
+
+/* The execlist context including software and HW information */
+struct guc_execlist_context {
+	u32 context_desc;
+	u32 context_id;
+	u32 ring_status;
+	u32 ring_lcra;
+	u32 ring_begin;
+	u32 ring_end;
+	u32 ring_next_free_location;
+	u32 ring_current_tail_pointer_value;
+	u8 engine_state_submit_value;
+	u8 engine_state_wait_value;
+	u16 pagefault_count;
+	u16 engine_submit_queue_count;
+} __packed;
+
+/*Context descriptor for communicating between uKernel and Driver*/
+struct guc_context_desc {
+	u32 sched_common_area;
+	u32 context_id;
+	u32 pas_id;
+	u8 engines_used;
+	u64 db_trigger_cpu;
+	u32 db_trigger_uk;
+	u64 db_trigger_phy;
+	u16 db_id;
+
+	struct guc_execlist_context lrc[I915_NUM_RINGS];
+
+	u8 attribute;
+
+	u32 priority;
+
+	u32 wq_sampled_tail_offset;
+	u32 wq_total_submit_enqueues;
+
+	u32 process_desc;
+	u32 wq_addr;
+	u32 wq_size;
+
+	u32 engine_presence;
+
+	u32 reserved0[1];
+	u64 reserved1[1];
+
+	u64 desc_private;
+} __packed;
+
+/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+enum host2guc_action {
+	HOST2GUC_ACTION_DEFAULT = 0x0,
+	HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+	HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+	HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+	HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+	HOST2GUC_ACTION_LIMIT
+};
+
+/*
+ * The GuC sends its response to a command by overwriting the
+ * command in SS0. The response is distinguishable from a command
+ * by the fact that all the MASK bits are set. The remaining bits
+ * give more detail.
+ */
+#define	GUC2HOST_RESPONSE_MASK		((u32)0xF0000000)
+#define	GUC2HOST_IS_RESPONSE(x) 	((u32)(x) >= GUC2HOST_RESPONSE_MASK)
+#define	GUC2HOST_STATUS(x)		(GUC2HOST_RESPONSE_MASK | (x))
+
+/* GUC will return status back to SOFT_SCRATCH_O_REG */
+enum guc2host_status {
+	GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
+	GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
+	GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
+	GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e97731a..dcd336b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -174,10 +174,14 @@
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	u32 val = I915_READ(VIDEO_DIP_CTL);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 }
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -227,10 +231,15 @@
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -282,7 +291,12 @@
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -332,10 +346,15 @@
 	int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -383,8 +402,9 @@
 	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
 	u32 val = I915_READ(ctl_reg);
 
-	return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
-		      VIDEO_DIP_ENABLE_VS_HSW);
+	return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		      VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		      VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
 }
 
 /*
@@ -514,7 +534,13 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		if (port != (val & VIDEO_DIP_PORT_MASK)) {
+			DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
+		}
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
@@ -522,16 +548,17 @@
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
 		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
+			DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
 		}
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~VIDEO_DIP_ENABLE_VENDOR;
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -541,6 +568,97 @@
 	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
 }
 
+static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	/*
+	 * HDMI cloning is only supported on g4x which doesn't
+	 * support deep color or GCP infoframes anyway so no
+	 * need to worry about multiple HDMI sinks here.
+	 */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			return connector->display_info.bpc > 8;
+
+	return false;
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ *   phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+				       const struct drm_display_mode *mode)
+{
+	unsigned int pixels_per_group;
+
+	switch (pipe_bpp) {
+	case 30:
+		/* 4 pixels in 5 clocks */
+		pixels_per_group = 4;
+		break;
+	case 36:
+		/* 2 pixels in 3 clocks */
+		pixels_per_group = 2;
+		break;
+	case 48:
+		/* 1 pixel in 2 clocks */
+		pixels_per_group = 1;
+		break;
+	default:
+		/* phase information not relevant for 8bpc */
+		return false;
+	}
+
+	return mode->crtc_hdisplay % pixels_per_group == 0 &&
+		mode->crtc_htotal % pixels_per_group == 0 &&
+		mode->crtc_hblank_start % pixels_per_group == 0 &&
+		mode->crtc_hblank_end % pixels_per_group == 0 &&
+		mode->crtc_hsync_start % pixels_per_group == 0 &&
+		mode->crtc_hsync_end % pixels_per_group == 0 &&
+		((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+		 mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	u32 reg, val = 0;
+
+	if (HAS_DDI(dev_priv))
+		reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+	else if (IS_VALLEYVIEW(dev_priv))
+		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+	else if (HAS_PCH_SPLIT(dev_priv->dev))
+		reg = TVIDEO_DIP_GCP(crtc->pipe);
+	else
+		return false;
+
+	/* Indicate color depth whenever the sink supports deep color */
+	if (hdmi_sink_is_deep_color(encoder))
+		val |= GCP_COLOR_INDICATION;
+
+	/* Enable default_phase whenever the display mode is suitably aligned */
+	if (gcp_default_phase_possible(crtc->config->pipe_bpp,
+				       &crtc->config->base.adjusted_mode))
+		val |= GCP_DEFAULT_PHASE_ENABLE;
+
+	I915_WRITE(reg, val);
+
+	return val != 0;
+}
+
 static void ibx_set_infoframes(struct drm_encoder *encoder,
 			       bool enable,
 			       struct drm_display_mode *adjusted_mode)
@@ -561,25 +679,29 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
-		}
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-		 VIDEO_DIP_ENABLE_GCP);
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -607,7 +729,9 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
@@ -616,7 +740,10 @@
 	/* Set both together, unset both together: see the spec. */
 	val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
 	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-		 VIDEO_DIP_ENABLE_GCP);
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -646,25 +773,29 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
-		}
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
-		 VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -686,14 +817,18 @@
 
 	assert_hdmi_port_disabled(intel_hdmi);
 
+	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		 VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
 	if (!enable) {
-		I915_WRITE(reg, 0);
+		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
-	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
-		 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP_HSW;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -808,58 +943,146 @@
 	else
 		dotclock = pipe_config->port_clock;
 
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
 	if (HAS_PCH_SPLIT(dev_priv->dev))
 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 }
 
-static void intel_enable_hdmi(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+{
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	WARN_ON(!crtc->config->has_hdmi_sink);
+	DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+			 pipe_name(crtc->pipe));
+	intel_audio_codec_enable(encoder);
+}
+
+static void g4x_enable_hdmi(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	u32 temp;
-	u32 enable_bits = SDVO_ENABLE;
-
-	if (intel_crtc->config->has_audio)
-		enable_bits |= SDVO_AUDIO_ENABLE;
 
 	temp = I915_READ(intel_hdmi->hdmi_reg);
 
-	/* HW workaround for IBX, we need to move the port to transcoder A
-	 * before disabling it, so restore the transcoder select bit here. */
-	if (HAS_PCH_IBX(dev))
-		enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
-
-	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
-	 * we do this anyway which shows more stable in testing.
-	 */
-	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
-		POSTING_READ(intel_hdmi->hdmi_reg);
-	}
-
-	temp |= enable_bits;
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
 
 	I915_WRITE(intel_hdmi->hdmi_reg, temp);
 	POSTING_READ(intel_hdmi->hdmi_reg);
 
-	/* HW workaround, need to write this twice for issue that may result
-	 * in first write getting masked.
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void ibx_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	/*
+	 * HW workaround, need to write this twice for issue
+	 * that may result in first write getting masked.
 	 */
-	if (HAS_PCH_SPLIT(dev)) {
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	/*
+	 * HW workaround, need to toggle enable bit off and on
+	 * for 12bpc with pixel repeat.
+	 *
+	 * FIXME: BSpec says this should be done at the end of
+	 * of the modeset sequence, so not sure if this isn't too soon.
+	 */
+	if (crtc->config->pipe_bpp > 24 &&
+	    crtc->config->pixel_multiplier > 1) {
+		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		/*
+		 * HW workaround, need to write this twice for issue
+		 * that may result in first write getting masked.
+		 */
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
 
-	if (intel_crtc->config->has_audio) {
-		WARN_ON(!intel_crtc->config->has_hdmi_sink);
-		DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
-				 pipe_name(intel_crtc->pipe));
-		intel_audio_codec_enable(encoder);
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void cpt_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	enum pipe pipe = crtc->pipe;
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	/*
+	 * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+	 *
+	 * The procedure for 12bpc is as follows:
+	 * 1. disable HDMI clock gating
+	 * 2. enable HDMI with 8bpc
+	 * 3. enable HDMI with 12bpc
+	 * 4. enable HDMI clock gating
+	 */
+
+	if (crtc->config->pipe_bpp > 24) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) |
+			   TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= SDVO_COLOR_FORMAT_8bpc;
 	}
+
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	if (crtc->config->pipe_bpp > 24) {
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= HDMI_COLOR_FORMAT_12bpc;
+
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) &
+			   ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+	}
+
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
 }
 
 static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -901,6 +1124,8 @@
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
+
+	intel_hdmi->set_infoframes(&encoder->base, false, NULL);
 }
 
 static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -926,7 +1151,7 @@
 	intel_disable_hdmi(encoder);
 }
 
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 {
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
@@ -939,24 +1164,51 @@
 }
 
 static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+		      int clock, bool respect_dvi_limit)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+	if (clock < 25000)
+		return MODE_CLOCK_LOW;
+	if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+		return MODE_CLOCK_HIGH;
+
+	/* BXT DPLL can't generate 223-240 MHz */
+	if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	/* CHV DPLL can't generate 216-240 MHz */
+	if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
 {
-	int clock = mode->clock;
-
-	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
-		clock *= 2;
-
-	if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
-					 true))
-		return MODE_CLOCK_HIGH;
-	if (clock < 20000)
-		return MODE_CLOCK_LOW;
+	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+	enum drm_mode_status status;
+	int clock;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
-	return MODE_OK;
+	clock = mode->clock;
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		clock *= 2;
+
+	/* check if we can do 8bpc */
+	status = hdmi_port_clock_valid(hdmi, clock, true);
+
+	/* if we can't do 8bpc we may still be able to do 12bpc */
+	if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+		status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
+
+	return status;
 }
 
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
@@ -997,8 +1249,8 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
-	int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+	int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+	int clock_12bpc = clock_8bpc * 3 / 2;
 	int desired_bpp;
 
 	pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
@@ -1017,6 +1269,8 @@
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
 		pipe_config->pixel_multiplier = 2;
+		clock_8bpc *= 2;
+		clock_12bpc *= 2;
 	}
 
 	if (intel_hdmi->color_range)
@@ -1035,9 +1289,8 @@
 	 * within limits.
 	 */
 	if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
-	    clock_12bpc <= portclock_limit &&
-	    hdmi_12bpc_possible(pipe_config) &&
-	    0 /* FIXME 12bpc support totally broken */) {
+	    hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+	    hdmi_12bpc_possible(pipe_config)) {
 		DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
 		desired_bpp = 12*3;
 
@@ -1046,6 +1299,8 @@
 	} else {
 		DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
 		desired_bpp = 8*3;
+
+		pipe_config->port_clock = clock_8bpc;
 	}
 
 	if (!pipe_config->bw_constrained) {
@@ -1053,8 +1308,9 @@
 		pipe_config->pipe_bpp = desired_bpp;
 	}
 
-	if (adjusted_mode->crtc_clock > portclock_limit) {
-		DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+	if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
+				  false) != MODE_OK) {
+		DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
 		return false;
 	}
 
@@ -1323,7 +1579,7 @@
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	intel_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1640,7 +1896,7 @@
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	intel_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1653,7 +1909,7 @@
 }
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_hdmi_detect,
 	.force = intel_hdmi_force,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1702,6 +1958,7 @@
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = intel_dig_port->port;
+	uint8_t alternate_ddc_pin;
 
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
@@ -1735,6 +1992,26 @@
 			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
+	case PORT_E:
+		/* On SKL PORT E doesn't have seperate GMBUS pin
+		 *  We rely on VBT to set a proper alternate GMBUS pin. */
+		alternate_ddc_pin =
+			dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
+		switch (alternate_ddc_pin) {
+		case DDC_PIN_B:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+			break;
+		case DDC_PIN_C:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+			break;
+		case DDC_PIN_D:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+			break;
+		default:
+			MISSING_CASE(alternate_ddc_pin);
+		}
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
 	case PORT_A:
 		intel_encoder->hpd_pin = HPD_PORT_A;
 		/* Internal port only for eDP. */
@@ -1827,7 +2104,12 @@
 		intel_encoder->post_disable = vlv_hdmi_post_disable;
 	} else {
 		intel_encoder->pre_enable = intel_hdmi_pre_enable;
-		intel_encoder->enable = intel_enable_hdmi;
+		if (HAS_PCH_CPT(dev))
+			intel_encoder->enable = cpt_enable_hdmi;
+		else if (HAS_PCH_IBX(dev))
+			intel_encoder->enable = ibx_enable_hdmi;
+		else
+			intel_encoder->enable = g4x_enable_hdmi;
 	}
 
 	intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
new file mode 100644
index 0000000..53c0173
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+{
+	switch (pin) {
+	case HPD_PORT_A:
+		*port = PORT_A;
+		return true;
+	case HPD_PORT_B:
+		*port = PORT_B;
+		return true;
+	case HPD_PORT_C:
+		*port = PORT_C;
+		return true;
+	case HPD_PORT_D:
+		*port = PORT_D;
+		return true;
+	case HPD_PORT_E:
+		*port = PORT_E;
+		return true;
+	default:
+		return false;	/* no hpd */
+	}
+}
+
+#define HPD_STORM_DETECT_PERIOD		1000
+#define HPD_STORM_THRESHOLD		5
+#define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ *
+ * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
+ * otherwise it's considered an irq storm, and the irq state is set to
+ * @HPD_MARK_DISABLED.
+ *
+ * Return true if an irq storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+				       enum hpd_pin pin)
+{
+	unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+	bool storm = false;
+
+	if (!time_in_range(jiffies, start, end)) {
+		dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
+		dev_priv->hotplug.stats[pin].count = 0;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
+	} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+		dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+		DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+		storm = true;
+	} else {
+		dev_priv->hotplug.stats[pin].count++;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+			      dev_priv->hotplug.stats[pin].count);
+	}
+
+	return storm;
+}
+
+static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	enum hpd_pin pin;
+	bool hpd_disabled = false;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+			continue;
+
+		intel_connector = to_intel_connector(connector);
+		intel_encoder = intel_connector->encoder;
+		if (!intel_encoder)
+			continue;
+
+		pin = intel_encoder->hpd_pin;
+		if (pin == HPD_NONE ||
+		    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+			continue;
+
+		DRM_INFO("HPD interrupt storm detected on connector %s: "
+			 "switching from hotplug detection to polling\n",
+			 connector->name);
+
+		dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT
+			| DRM_CONNECTOR_POLL_DISCONNECT;
+		hpd_disabled = true;
+	}
+
+	/* Enable polling and queue hotplug re-enabling. */
+	if (hpd_disabled) {
+		drm_kms_helper_poll_enable(dev);
+		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+	}
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv),
+			     hotplug.reenable_work.work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	int i;
+
+	intel_runtime_pm_get(dev_priv);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		struct drm_connector *connector;
+
+		if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+			continue;
+
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+
+		list_for_each_entry(connector, &mode_config->connector_list, head) {
+			struct intel_connector *intel_connector = to_intel_connector(connector);
+
+			if (intel_connector->encoder->hpd_pin == i) {
+				if (connector->polled != intel_connector->polled)
+					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+							 connector->name);
+				connector->polled = intel_connector->polled;
+				if (!connector->polled)
+					connector->polled = DRM_CONNECTOR_POLL_HPD;
+			}
+		}
+	}
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+				struct drm_connector *connector)
+{
+	enum drm_connector_status old_status;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	old_status = connector->status;
+
+	connector->status = connector->funcs->detect(connector, false);
+	if (old_status == connector->status)
+		return false;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+		      connector->base.id,
+		      connector->name,
+		      drm_get_connector_status_name(old_status),
+		      drm_get_connector_status_name(connector->status));
+
+	return true;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+	u32 long_port_mask, short_port_mask;
+	struct intel_digital_port *intel_dig_port;
+	int i;
+	u32 old_bits = 0;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	long_port_mask = dev_priv->hotplug.long_port_mask;
+	dev_priv->hotplug.long_port_mask = 0;
+	short_port_mask = dev_priv->hotplug.short_port_mask;
+	dev_priv->hotplug.short_port_mask = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		bool valid = false;
+		bool long_hpd = false;
+		intel_dig_port = dev_priv->hotplug.irq_port[i];
+		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+			continue;
+
+		if (long_port_mask & (1 << i))  {
+			valid = true;
+			long_hpd = true;
+		} else if (short_port_mask & (1 << i))
+			valid = true;
+
+		if (valid) {
+			enum irqreturn ret;
+
+			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+			if (ret == IRQ_NONE) {
+				/* fall back to old school hpd */
+				old_bits |= (1 << intel_dig_port->base.hpd_pin);
+			}
+		}
+	}
+
+	if (old_bits) {
+		spin_lock_irq(&dev_priv->irq_lock);
+		dev_priv->hotplug.event_bits |= old_bits;
+		spin_unlock_irq(&dev_priv->irq_lock);
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+	}
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	bool changed = false;
+	u32 hpd_event_bits;
+
+	mutex_lock(&mode_config->mutex);
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	hpd_event_bits = dev_priv->hotplug.event_bits;
+	dev_priv->hotplug.event_bits = 0;
+
+	/* Disable hotplug on connectors that hit an irq storm. */
+	intel_hpd_irq_storm_disable(dev_priv);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		intel_connector = to_intel_connector(connector);
+		if (!intel_connector->encoder)
+			continue;
+		intel_encoder = intel_connector->encoder;
+		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+				      connector->name, intel_encoder->hpd_pin);
+			if (intel_encoder->hot_plug)
+				intel_encoder->hot_plug(intel_encoder);
+			if (intel_hpd_irq_event(dev, connector))
+				changed = true;
+		}
+	}
+	mutex_unlock(&mode_config->mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev: drm device
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_device *dev,
+			   u32 pin_mask, u32 long_mask)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	enum port port;
+	bool storm_detected = false;
+	bool queue_dig = false, queue_hp = false;
+	bool is_dig_port;
+
+	if (!pin_mask)
+		return;
+
+	spin_lock(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		if (!(BIT(i) & pin_mask))
+			continue;
+
+		is_dig_port = intel_hpd_pin_to_port(i, &port) &&
+			      dev_priv->hotplug.irq_port[port];
+
+		if (is_dig_port) {
+			bool long_hpd = long_mask & BIT(i);
+
+			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+					 long_hpd ? "long" : "short");
+			/*
+			 * For long HPD pulses we want to have the digital queue happen,
+			 * but we still want HPD storm detection to function.
+			 */
+			queue_dig = true;
+			if (long_hpd) {
+				dev_priv->hotplug.long_port_mask |= (1 << port);
+			} else {
+				/* for short HPD just trigger the digital queue */
+				dev_priv->hotplug.short_port_mask |= (1 << port);
+				continue;
+			}
+		}
+
+		if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+			/*
+			 * On GMCH platforms the interrupt mask bits only
+			 * prevent irq generation, not the setting of the
+			 * hotplug bits itself. So only WARN about unexpected
+			 * interrupts on saner platforms.
+			 */
+			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+				  "Received HPD interrupt on pin %d although disabled\n", i);
+			continue;
+		}
+
+		if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+			continue;
+
+		if (!is_dig_port) {
+			dev_priv->hotplug.event_bits |= BIT(i);
+			queue_hp = true;
+		}
+
+		if (intel_hpd_irq_storm_detect(dev_priv, i)) {
+			dev_priv->hotplug.event_bits &= ~BIT(i);
+			storm_detected = true;
+		}
+	}
+
+	if (storm_detected)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock(&dev_priv->irq_lock);
+
+	/*
+	 * Our hotplug handler can grab modeset locks (by calling down into the
+	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
+	 * queue for otherwise the flush_work in the pageflip code will
+	 * deadlock.
+	 */
+	if (queue_dig)
+		queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+	if (queue_hp)
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+	int i;
+
+	for_each_hpd_pin(i) {
+		dev_priv->hotplug.stats[i].count = 0;
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+	}
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct intel_connector *intel_connector = to_intel_connector(connector);
+		connector->polled = intel_connector->polled;
+		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+		if (intel_connector->mst_port)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+	}
+
+	/*
+	 * Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked checks happy.
+	 */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+	INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+	INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+	INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+			  intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	dev_priv->hotplug.long_port_mask = 0;
+	dev_priv->hotplug.short_port_mask = 0;
+	dev_priv->hotplug.event_bits = 0;
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+	cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+	cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f2161a..72e0edd 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -135,6 +135,7 @@
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "intel_mocs.h"
 
 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
@@ -190,9 +191,7 @@
 #define GEN8_CTX_PRIVILEGE (1<<8)
 
 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
-	const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
-		ppgtt->pdp.page_directory[n]->daddr : \
-		ppgtt->scratch_pd->daddr; \
+	const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n));	\
 	reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
 	reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
 }
@@ -211,9 +210,9 @@
 	FAULT_AND_CONTINUE /* Unsupported */
 };
 #define GEN8_CTX_ID_SHIFT 32
+#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,10 +262,11 @@
 	return lrca >> 12;
 }
 
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
 	struct drm_device *dev = ring->dev;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
 	uint64_t desc;
 	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
 
@@ -294,55 +294,59 @@
 	return desc;
 }
 
-static void execlists_elsp_write(struct intel_engine_cs *ring,
-				 struct drm_i915_gem_object *ctx_obj0,
-				 struct drm_i915_gem_object *ctx_obj1)
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+				 struct drm_i915_gem_request *rq1)
 {
+
+	struct intel_engine_cs *ring = rq0->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint64_t temp = 0;
-	uint32_t desc[4];
+	uint64_t desc[2];
 
-	/* XXX: You must always write both descriptors in the order below. */
-	if (ctx_obj1)
-		temp = execlists_ctx_descriptor(ring, ctx_obj1);
-	else
-		temp = 0;
-	desc[1] = (u32)(temp >> 32);
-	desc[0] = (u32)temp;
+	if (rq1) {
+		desc[1] = execlists_ctx_descriptor(rq1);
+		rq1->elsp_submitted++;
+	} else {
+		desc[1] = 0;
+	}
 
-	temp = execlists_ctx_descriptor(ring, ctx_obj0);
-	desc[3] = (u32)(temp >> 32);
-	desc[2] = (u32)temp;
+	desc[0] = execlists_ctx_descriptor(rq0);
+	rq0->elsp_submitted++;
 
+	/* You must always write both descriptors in the order below. */
 	spin_lock(&dev_priv->uncore.lock);
 	intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-	I915_WRITE_FW(RING_ELSP(ring), desc[1]);
-	I915_WRITE_FW(RING_ELSP(ring), desc[0]);
-	I915_WRITE_FW(RING_ELSP(ring), desc[3]);
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
 
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
 	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(ring), desc[2]);
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
 
-	/* ELSP is a wo register, so use another nearby reg for posting instead */
+	/* ELSP is a wo register, use another nearby reg for posting */
 	POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
 	intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
 	spin_unlock(&dev_priv->uncore.lock);
 }
 
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
-				    struct drm_i915_gem_object *ring_obj,
-				    struct i915_hw_ppgtt *ppgtt,
-				    u32 tail)
+static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
+	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
 	struct page *page;
 	uint32_t *reg_state;
 
+	BUG_ON(!ctx_obj);
+	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
 	page = i915_gem_object_get_page(ctx_obj, 1);
 	reg_state = kmap_atomic(page);
 
-	reg_state[CTX_RING_TAIL+1] = tail;
-	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+	reg_state[CTX_RING_TAIL+1] = rq->tail;
+	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
 	/* True PPGTT with dynamic page allocation: update PDP registers and
 	 * point the unallocated PDPs to the scratch page
@@ -359,32 +363,15 @@
 	return 0;
 }
 
-static void execlists_submit_contexts(struct intel_engine_cs *ring,
-				      struct intel_context *to0, u32 tail0,
-				      struct intel_context *to1, u32 tail1)
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+				      struct drm_i915_gem_request *rq1)
 {
-	struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
-	struct drm_i915_gem_object *ctx_obj1 = NULL;
-	struct intel_ringbuffer *ringbuf1 = NULL;
+	execlists_update_context(rq0);
 
-	BUG_ON(!ctx_obj0);
-	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
-	WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
+	if (rq1)
+		execlists_update_context(rq1);
 
-	execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
-
-	if (to1) {
-		ringbuf1 = to1->engine[ring->id].ringbuf;
-		ctx_obj1 = to1->engine[ring->id].state;
-		BUG_ON(!ctx_obj1);
-		WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
-		WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
-
-		execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
-	}
-
-	execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+	execlists_elsp_write(rq0, rq1);
 }
 
 static void execlists_context_unqueue(struct intel_engine_cs *ring)
@@ -444,13 +431,7 @@
 
 	WARN_ON(req1 && req1->elsp_submitted);
 
-	execlists_submit_contexts(ring, req0->ctx, req0->tail,
-				  req1 ? req1->ctx : NULL,
-				  req1 ? req1->tail : 0);
-
-	req0->elsp_submitted++;
-	if (req1)
-		req1->elsp_submitted++;
+	execlists_submit_requests(req0, req1);
 }
 
 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
@@ -516,6 +497,9 @@
 		status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
 				(read_pointer % 6) * 8 + 4);
 
+		if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+			continue;
+
 		if (status & GEN8_CTX_STATUS_PREEMPTED) {
 			if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
 				if (execlists_check_remove_request(ring, status_id))
@@ -540,37 +524,21 @@
 	ring->next_context_status_buffer = write_pointer % 6;
 
 	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-		   ((u32)ring->next_context_status_buffer & 0x07) << 8);
+		   _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
 }
 
-static int execlists_context_queue(struct intel_engine_cs *ring,
-				   struct intel_context *to,
-				   u32 tail,
-				   struct drm_i915_gem_request *request)
+static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
+	struct intel_engine_cs *ring = request->ring;
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
-	if (to != ring->default_context)
-		intel_lr_context_pin(ring, to);
+	if (request->ctx != ring->default_context)
+		intel_lr_context_pin(request);
 
-	if (!request) {
-		/*
-		 * If there isn't a request associated with this submission,
-		 * create one as a temporary holder.
-		 */
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-		request->ring = ring;
-		request->ctx = to;
-		kref_init(&request->ref);
-		i915_gem_context_reference(request->ctx);
-	} else {
-		i915_gem_request_reference(request);
-		WARN_ON(to != request->ctx);
-	}
-	request->tail = tail;
+	i915_gem_request_reference(request);
+
+	request->tail = request->ringbuf->tail;
 
 	spin_lock_irq(&ring->execlist_lock);
 
@@ -585,7 +553,7 @@
 					   struct drm_i915_gem_request,
 					   execlist_link);
 
-		if (to == tail_req->ctx) {
+		if (request->ctx == tail_req->ctx) {
 			WARN(tail_req->elsp_submitted != 0,
 				"More than 2 already-submitted reqs queued\n");
 			list_del(&tail_req->execlist_link);
@@ -603,10 +571,9 @@
 	return 0;
 }
 
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
-					      struct intel_context *ctx)
+static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
@@ -614,8 +581,7 @@
 	if (ring->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(ringbuf, ctx,
-			       I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
@@ -623,12 +589,10 @@
 	return 0;
 }
 
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
-				 struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	const unsigned other_rings = ~intel_ring_flag(ring);
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -638,7 +602,7 @@
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, ring);
+			ret = i915_gem_object_sync(obj, req->ring, &req);
 			if (ret)
 				return ret;
 		}
@@ -655,59 +619,59 @@
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(ringbuf, ctx);
+	return logical_ring_invalidate_all_caches(req);
 }
 
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-					    struct intel_context *ctx)
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
 	int ret;
 
-	if (ctx != request->ring->default_context) {
-		ret = intel_lr_context_pin(request->ring, ctx);
+	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
+	if (request->ctx != request->ring->default_context) {
+		ret = intel_lr_context_pin(request);
 		if (ret)
 			return ret;
 	}
 
-	request->ringbuf = ctx->engine[request->ring->id].ringbuf;
-	request->ctx     = ctx;
-	i915_gem_context_reference(request->ctx);
-
 	return 0;
 }
 
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
-				       struct intel_context *ctx,
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 				       int bytes)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_i915_gem_request *request;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_gem_request *target;
 	unsigned space;
 	int ret;
 
 	if (intel_ring_space(ringbuf) >= bytes)
 		return 0;
 
-	list_for_each_entry(request, &ring->request_list, list) {
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
+	list_for_each_entry(target, &ring->request_list, list) {
 		/*
 		 * The request queue is per-engine, so can contain requests
 		 * from multiple ringbuffers. Here, we must ignore any that
 		 * aren't from the ringbuffer we're considering.
 		 */
-		if (request->ringbuf != ringbuf)
+		if (target->ringbuf != ringbuf)
 			continue;
 
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(request->postfix, ringbuf->tail,
+		space = __intel_ring_space(target->postfix, ringbuf->tail,
 					   ringbuf->size);
 		if (space >= bytes)
 			break;
 	}
 
-	if (WARN_ON(&request->list == &ring->request_list))
+	if (WARN_ON(&target->list == &ring->request_list))
 		return -ENOSPC;
 
-	ret = i915_wait_request(request);
+	ret = i915_wait_request(target);
 	if (ret)
 		return ret;
 
@@ -717,7 +681,7 @@
 
 /*
  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
+ * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
  * really happens during submission is that the context and current tail will be placed
@@ -725,33 +689,23 @@
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
-				      struct intel_context *ctx,
-				      struct drm_i915_gem_request *request)
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = request->ring;
 
-	intel_logical_ring_advance(ringbuf);
+	intel_logical_ring_advance(request->ringbuf);
 
 	if (intel_ring_stopped(ring))
 		return;
 
-	execlists_context_queue(ring, ctx, ringbuf->tail, request);
+	execlists_context_queue(request);
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	if (ringbuf->space < rem) {
-		int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -759,25 +713,50 @@
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
-				struct intel_context *ctx, int bytes)
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
-	int ret;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
 
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = logical_ring_wrap_buffer(ringbuf, ctx);
-		if (unlikely(ret))
-			return ret;
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
+
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+	if (wait_bytes) {
+		ret = logical_ring_wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
@@ -786,7 +765,8 @@
 /**
  * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
  *
- * @ringbuf: Logical ringbuffer.
+ * @request: The request to start some new work for
+ * @ctx: Logical ring context whose ringbuffer is being prepared.
  * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
  *
  * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -796,32 +776,42 @@
  *
  * Return: non-zero if the ringbuffer is not ready to be written to.
  */
-static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx, int num_dwords)
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv;
 	int ret;
 
+	WARN_ON(req == NULL);
+	dev_priv = req->ring->dev->dev_private;
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
 	if (ret)
 		return ret;
 
-	ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
+	ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
 	if (ret)
 		return ret;
 
-	/* Preallocate the olr before touching the ring */
-	ret = i915_gem_request_alloc(ring, ctx);
-	if (ret)
-		return ret;
-
-	ringbuf->space -= num_dwords * sizeof(uint32_t);
+	req->ringbuf->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+	return intel_logical_ring_begin(request, 0);
+}
+
 /**
  * execlists_submission() - submit a batchbuffer for execution, Execlists style
  * @dev: DRM device.
@@ -839,16 +829,15 @@
  *
  * Return: non-zero if the submission fails.
  */
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags)
+			       struct list_head *vmas)
 {
+	struct drm_device       *dev = params->dev;
+	struct intel_engine_cs  *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
 	int ret;
@@ -899,13 +888,13 @@
 		return -EINVAL;
 	}
 
-	ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
+	ret = execlists_move_to_gpu(params->request, vmas);
 	if (ret)
 		return ret;
 
 	if (ring == &dev_priv->ring[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+		ret = intel_logical_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
@@ -918,14 +907,17 @@
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
-	ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
+	exec_start = params->batch_obj_vm_offset +
+		     args->batch_start_offset;
+
+	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
 
 	return 0;
 }
@@ -950,7 +942,7 @@
 				ctx->engine[ring->id].state;
 
 		if (ctx_obj && (ctx != ring->default_context))
-			intel_lr_context_unpin(ring, ctx);
+			intel_lr_context_unpin(req);
 		list_del(&req->execlist_link);
 		i915_gem_request_unreference(req);
 	}
@@ -978,16 +970,15 @@
 	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx)
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (!ring->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
+	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -995,15 +986,15 @@
 	return 0;
 }
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 	int ret = 0;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-	if (ctx->engine[ring->id].pin_count++ == 0) {
+	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
 		ret = i915_gem_obj_ggtt_pin(ctx_obj,
 				GEN8_LR_CONTEXT_ALIGN, 0);
 		if (ret)
@@ -1021,31 +1012,31 @@
 unpin_ctx_obj:
 	i915_gem_object_ggtt_unpin(ctx_obj);
 reset_pin_count:
-	ctx->engine[ring->id].pin_count = 0;
+	rq->ctx->engine[ring->id].pin_count = 0;
 
 	return ret;
 }
 
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
 	if (ctx_obj) {
 		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--ctx->engine[ring->id].pin_count == 0) {
+		if (--rq->ctx->engine[ring->id].pin_count == 0) {
 			intel_unpin_ringbuffer_obj(ringbuf);
 			i915_gem_object_ggtt_unpin(ctx_obj);
 		}
 	}
 }
 
-static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
-					       struct intel_context *ctx)
+static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
@@ -1054,11 +1045,11 @@
 		return 0;
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf, ctx);
+	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
+	ret = intel_logical_ring_begin(req, w->count * 2 + 2);
 	if (ret)
 		return ret;
 
@@ -1072,13 +1063,361 @@
 	intel_logical_ring_advance(ringbuf);
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf, ctx);
+	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
+#define wa_ctx_emit(batch, index, cmd)					\
+	do {								\
+		int __index = (index)++;				\
+		if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
+			return -ENOSPC;					\
+		}							\
+		batch[__index] = (cmd);					\
+	} while (0)
+
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+						uint32_t *const batch,
+						uint32_t index)
+{
+	uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+	/*
+	 * WaDisableLSQCROPERFforOCL:skl
+	 * This WA is implemented in skl_init_clock_gating() but since
+	 * this batch updates GEN8_L3SQCREG4 with default value we need to
+	 * set this bit here to retain the WA during flush.
+	 */
+	if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
+
+	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, l3sqc4_flush);
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_DC_FLUSH_ENABLE));
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	return index;
+}
+
+static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t offset,
+				    uint32_t start_alignment)
+{
+	return wa_ctx->offset = ALIGN(offset, start_alignment);
+}
+
+static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
+			     uint32_t offset,
+			     uint32_t size_alignment)
+{
+	wa_ctx->size = offset - wa_ctx->offset;
+
+	WARN(wa_ctx->size % size_alignment,
+	     "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
+	     wa_ctx->size, size_alignment);
+	return 0;
+}
+
+/**
+ * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned. This is updated
+ *    with the offset value received as input.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of the batch, it should be
+ *  cache-aligned otherwise it is adjusted accordingly.
+ *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ *  initialized at the beginning and shared across all contexts but this field
+ *  helps us to have multiple batches at different offsets and select them based
+ *  on a criteria. At the moment this batch always start at the beginning of the page
+ *  and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ *  The number of WA applied are not known at the beginning; we use this field
+ *  to return the no of DWORDS written.
+ *
+ *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ *  so it adds NOOPs as padding to make it cacheline aligned.
+ *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ *  makes a complete batch buffer.
+ *
+ * Return: non-zero if we exceed the PAGE_SIZE limit.
+ */
+
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	uint32_t scratch_addr;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+	if (IS_BROADWELL(ring->dev)) {
+		index = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+		if (index < 0)
+			return index;
+	}
+
+	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+	/* Actual scratch location is at 128 bytes offset */
+	scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+				   PIPE_CONTROL_GLOBAL_GTT_IVB |
+				   PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_QW_WRITE));
+	wa_ctx_emit(batch, index, scratch_addr);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	/*
+	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+	 * execution depends on the length specified in terms of cache lines
+	 * in the register CTX_RCS_INDIRECT_CTX
+	 */
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+/**
+ * gen8_init_perctx_bb() - initialize per ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of this batch.
+ *   This batch is started immediately after indirect_ctx batch. Since we ensure
+ *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
+ *
+ *   The number of DWORDS written are returned using this field.
+ *
+ *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
+ *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
+ */
+static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+	ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+	if (ret < 0)
+		return ret;
+	index = ret;
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+		wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
+		wa_ctx_emit(batch, index,
+			    _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
+		wa_ctx_emit(batch, index, MI_NOOP);
+	}
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+{
+	int ret;
+
+	ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+	if (!ring->wa_ctx.obj) {
+		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
+		return -ENOMEM;
+	}
+
+	ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+	if (ret) {
+		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
+				 ret);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+{
+	if (ring->wa_ctx.obj) {
+		i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		ring->wa_ctx.obj = NULL;
+	}
+}
+
+static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+{
+	int ret;
+	uint32_t *batch;
+	uint32_t offset;
+	struct page *page;
+	struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+
+	WARN_ON(ring->id != RCS);
+
+	/* update this when WA for higher Gen are added */
+	if (INTEL_INFO(ring->dev)->gen > 9) {
+		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
+			  INTEL_INFO(ring->dev)->gen);
+		return 0;
+	}
+
+	/* some WA perform writes to scratch page, ensure it is valid */
+	if (ring->scratch.obj == NULL) {
+		DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+		return -EINVAL;
+	}
+
+	ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+		return ret;
+	}
+
+	page = i915_gem_object_get_page(wa_ctx->obj, 0);
+	batch = kmap_atomic(page);
+	offset = 0;
+
+	if (INTEL_INFO(ring->dev)->gen == 8) {
+		ret = gen8_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen8_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	} else if (INTEL_INFO(ring->dev)->gen == 9) {
+		ret = gen9_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen9_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	}
+
+out:
+	kunmap_atomic(batch);
+	if (ret)
+		lrc_destroy_wa_ctx_obj(ring);
+
+	return ret;
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -1139,19 +1478,64 @@
 	return init_workarounds_ring(ring);
 }
 
-static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
-			      struct intel_context *ctx,
+static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+{
+	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
+	int i, ret;
+
+	ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (ret)
+		return ret;
+
+	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+	/* Don't rely in hw updating PDPs, specially in lite-restore.
+	 * Ideally, we should set Force PD Restore in ctx descriptor,
+	 * but we can't. Force Restore would be a second option, but
+	 * it is unsafe in case of lite-restore (because the ctx is
+	 * not idle). */
+	if (req->ctx->ppgtt &&
+	    (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+		ret = intel_logical_ring_emit_pdps(req);
+		if (ret)
+			return ret;
+
+		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+	}
+
+	ret = intel_logical_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+				(ppgtt<<8) |
+				(dispatch_flags & I915_DISPATCH_RS ?
+				 MI_BATCH_RESOURCE_STREAMER : 0));
 	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
 	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1193,18 +1577,18 @@
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
-			   struct intel_context *ctx,
+static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+	ret = intel_logical_ring_begin(request, 4);
 	if (ret)
 		return ret;
 
@@ -1234,11 +1618,11 @@
 	return 0;
 }
 
-static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx,
+static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa;
@@ -1270,7 +1654,7 @@
 	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
 		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
+	ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
 	if (ret)
 		return ret;
 
@@ -1304,9 +1688,9 @@
 	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
-			     struct drm_i915_gem_request *request)
+static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 cmd;
 	int ret;
@@ -1316,7 +1700,7 @@
 	 * used as a workaround for not being allowed to do lite
 	 * restore with HEAD==TAIL (WaIdleLiteRestore).
 	 */
-	ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
+	ret = intel_logical_ring_begin(request, 8);
 	if (ret)
 		return ret;
 
@@ -1328,11 +1712,10 @@
 				(ring->status_page.gfx_addr +
 				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
 	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf,
-		i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+	intel_logical_ring_advance_and_submit(request);
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
@@ -1345,49 +1728,53 @@
 	return 0;
 }
 
-static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
-					      struct intel_context *ctx)
+static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
 	struct render_state so;
-	struct drm_i915_file_private *file_priv = ctx->file_priv;
-	struct drm_file *file = file_priv ? file_priv->file : NULL;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(ring, &so);
+	ret = i915_gem_render_state_prepare(req->ring, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = ring->emit_bb_start(ringbuf,
-			ctx,
-			so.ggtt_offset,
-			I915_DISPATCH_SECURE);
+	ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+				       I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	ret = req->ring->emit_bb_start(req,
+				       (so.ggtt_offset + so.aux_batch_offset),
+				       I915_DISPATCH_SECURE);
+	if (ret)
+		goto out;
 
-	ret = __i915_add_request(ring, file, so.obj);
-	/* intel_logical_ring_add_request moves object to inactive if it
-	 * fails */
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+
 out:
 	i915_gem_render_state_fini(&so);
 	return ret;
 }
 
-static int gen8_init_rcs_context(struct intel_engine_cs *ring,
-		       struct intel_context *ctx)
+static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	ret = intel_logical_ring_workarounds_emit(ring, ctx);
+	ret = intel_logical_ring_workarounds_emit(req);
 	if (ret)
 		return ret;
 
-	return intel_lr_context_render_state_init(ring, ctx);
+	ret = intel_rcs_context_init_mocs(req);
+	/*
+	 * Failing to program the MOCS is non-fatal.The system will not
+	 * run at peak performance. So generate an error and carry on.
+	 */
+	if (ret)
+		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+
+	return intel_lr_context_render_state_init(req);
 }
 
 /**
@@ -1407,7 +1794,6 @@
 
 	intel_logical_ring_stop(ring);
 	WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -1419,6 +1805,8 @@
 		kunmap(sg_page(ring->status_page.obj->pages->sgl));
 		ring->status_page.obj = NULL;
 	}
+
+	lrc_destroy_wa_ctx_obj(ring);
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
@@ -1478,11 +1866,28 @@
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	ring->dev = dev;
-	ret = logical_ring_init(dev, ring);
+
+	ret = intel_init_pipe_control(ring);
 	if (ret)
 		return ret;
 
-	return intel_init_pipe_control(ring);
+	ret = intel_init_workaround_bb(ring);
+	if (ret) {
+		/*
+		 * We continue even if we fail to initialize WA batch
+		 * because we only expect rare glitches but nothing
+		 * critical to prevent us from using GPU
+		 */
+		DRM_ERROR("WA batch buffer initialization failed: %d\n",
+			  ret);
+	}
+
+	ret = logical_ring_init(dev, ring);
+	if (ret) {
+		lrc_destroy_wa_ctx_obj(ring);
+	}
+
+	return ret;
 }
 
 static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1737,7 +2142,8 @@
 	reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
 	reg_state[CTX_CONTEXT_CONTROL+1] =
 		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
-				CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+				   CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+				   CTX_CTRL_RS_CTX_ENABLE);
 	reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
 	reg_state[CTX_RING_HEAD+1] = 0;
 	reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1762,15 +2168,27 @@
 	reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
 	reg_state[CTX_SECOND_BB_STATE+1] = 0;
 	if (ring->id == RCS) {
-		/* TODO: according to BSpec, the register state context
-		 * for CHV does not have these. OTOH, these registers do
-		 * exist in CHV. I'm waiting for a clarification */
 		reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
 		reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
 		reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
 		reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
 		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
 		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+		if (ring->wa_ctx.obj) {
+			struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+
+			reg_state[CTX_RCS_INDIRECT_CTX+1] =
+				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
+				(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+
+			reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+				CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+
+			reg_state[CTX_BB_PER_CTX_PTR+1] =
+				(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
+				0x01;
+		}
 	}
 	reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
 	reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
@@ -1975,13 +2393,22 @@
 		lrc_setup_hardware_status_page(ring, ctx_obj);
 	else if (ring->id == RCS && !ctx->rcs_initialized) {
 		if (ring->init_context) {
-			ret = ring->init_context(ring, ctx);
+			struct drm_i915_gem_request *req;
+
+			ret = i915_gem_request_alloc(ring, ctx, &req);
+			if (ret)
+				return ret;
+
+			ret = ring->init_context(req);
 			if (ret) {
 				DRM_ERROR("ring init context: %d\n", ret);
+				i915_gem_request_cancel(req);
 				ctx->engine[ring->id].ringbuf = NULL;
 				ctx->engine[ring->id].state = NULL;
 				goto error;
 			}
+
+			i915_add_request_no_flush(req);
 		}
 
 		ctx->rcs_initialized = true;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 04d3a6d..64f89f99 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -32,18 +32,19 @@
 #define RING_CONTEXT_CONTROL(ring)	((ring)->mmio_base+0x244)
 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
+#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
 #define RING_CONTEXT_STATUS_BUF(ring)	((ring)->mmio_base+0x370)
 #define RING_CONTEXT_STATUS_PTR(ring)	((ring)->mmio_base+0x3a0)
 
 /* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-					    struct intel_context *ctx);
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
 void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx);
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
  * @ringbuf: Ringbuffer to advance.
@@ -70,20 +71,16 @@
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
 				     struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+struct i915_execbuffer_params;
+int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags);
+			       struct list_head *vmas);
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 161ab26..881b5d1 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,6 @@
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct intel_connector *intel_connector =
-		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, stat_reg;
 
@@ -252,8 +250,6 @@
 		stat_reg = PP_STATUS;
 	}
 
-	intel_panel_disable_backlight(intel_connector);
-
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power off\n");
@@ -262,6 +258,31 @@
 	POSTING_READ(lvds_encoder->reg);
 }
 
+static void gmch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+
+	intel_disable_lvds(encoder);
+}
+
+static void pch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+}
+
+static void pch_post_disable_lvds(struct intel_encoder *encoder)
+{
+	intel_disable_lvds(encoder);
+}
+
 static enum drm_mode_status
 intel_lvds_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
@@ -452,7 +473,7 @@
 	 */
 	if (!HAS_PCH_SPLIT(dev)) {
 		drm_modeset_lock_all(dev);
-		intel_modeset_setup_hw_state(dev, true);
+		intel_display_resume(dev);
 		drm_modeset_unlock_all(dev);
 	}
 
@@ -528,7 +549,7 @@
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_lvds_set_property,
@@ -942,12 +963,6 @@
 	if (dmi_check_system(intel_no_lvds))
 		return;
 
-	pin = GMBUS_PIN_PANEL;
-	if (!lvds_is_present_in_vbt(dev, &pin)) {
-		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
-		return;
-	}
-
 	if (HAS_PCH_SPLIT(dev)) {
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 			return;
@@ -957,6 +972,16 @@
 		}
 	}
 
+	pin = GMBUS_PIN_PANEL;
+	if (!lvds_is_present_in_vbt(dev, &pin)) {
+		u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
+		if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
+			DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+			return;
+		}
+		DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+	}
+
 	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
 	if (!lvds_encoder)
 		return;
@@ -988,7 +1013,12 @@
 	intel_encoder->enable = intel_enable_lvds;
 	intel_encoder->pre_enable = intel_pre_enable_lvds;
 	intel_encoder->compute_config = intel_lvds_compute_config;
-	intel_encoder->disable = intel_disable_lvds;
+	if (HAS_PCH_SPLIT(dev_priv)) {
+		intel_encoder->disable = pch_disable_lvds;
+		intel_encoder->post_disable = pch_post_disable_lvds;
+	} else {
+		intel_encoder->disable = gmch_disable_lvds;
+	}
 	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
 	intel_encoder->get_config = intel_lvds_get_config;
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1068,24 +1098,8 @@
 			drm_mode_debug_printmodeline(scan);
 
 			fixed_mode = drm_mode_duplicate(dev, scan);
-			if (fixed_mode) {
-				downclock_mode =
-					intel_find_panel_downclock(dev,
-					fixed_mode, connector);
-				if (downclock_mode != NULL &&
-					i915.lvds_downclock) {
-					/* We found the downclock for LVDS. */
-					dev_priv->lvds_downclock_avail = true;
-					dev_priv->lvds_downclock =
-						downclock_mode->clock;
-					DRM_DEBUG_KMS("LVDS downclock is found"
-					" in EDID. Normal clock %dKhz, "
-					"downclock %dKhz\n",
-					fixed_mode->clock,
-					dev_priv->lvds_downclock);
-				}
+			if (fixed_mode)
 				goto out;
-			}
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
new file mode 100644
index 0000000..6d3c6c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions: *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_mocs.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+	u32 control_value;
+	u16 l3cc_value;
+};
+
+struct drm_i915_mocs_table {
+	u32 size;
+	const struct drm_i915_mocs_entry *table;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define LE_CACHEABILITY(value)	((value) << 0)
+#define LE_TGT_CACHE(value)	((value) << 2)
+#define LE_LRUM(value)		((value) << 4)
+#define LE_AOM(value)		((value) << 6)
+#define LE_RSC(value)		((value) << 7)
+#define LE_SCC(value)		((value) << 8)
+#define LE_PFM(value)		((value) << 11)
+#define LE_SCF(value)		((value) << 14)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value)		((value) << 0)
+#define L3_SCC(value)		((value) << 1)
+#define L3_CACHEABILITY(value)	((value) << 4)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES	62  /* 62 out of 64 - 63 & 64 are reserved. */
+
+/* (e)LLC caching options */
+#define LE_PAGETABLE		0
+#define LE_UC			1
+#define LE_WT			2
+#define LE_WB			3
+
+/* L3 caching options */
+#define L3_DIRECT		0
+#define L3_UC			1
+#define L3_RESERVED		2
+#define L3_WB			3
+
+/* Target cache */
+#define ELLC			0
+#define LLC			1
+#define LLC_ELLC		2
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * platforms. However some of the fields are not applicable to all of
+ * them.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon.  For the time
+ * being they will be implicitly initialized to the strictest caching
+ * configuration (uncached) to guarantee forwards compatibility with
+ * userspace programs written against more recent kernels providing
+ * additional MOCS entries.
+ *
+ * NOTE: These tables MUST start with being uncached and the length
+ *       MUST be less than 63 as the last two registers are reserved
+ *       by the hardware.  These tables are part of the kernel ABI and
+ *       may only be updated incrementally by adding entries at the
+ *       end.
+ */
+static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/**
+ * get_mocs_settings()
+ * @dev:        DRM device.
+ * @table:      Output table that will be made to point at appropriate
+ *              MOCS values for the device.
+ *
+ * This function will return the values of the MOCS table that needs to
+ * be programmed for the platform. It will return the values that need
+ * to be programmed and if they need to be programmed.
+ *
+ * Return: true if there are applicable MOCS settings for the device.
+ */
+static bool get_mocs_settings(struct drm_device *dev,
+			      struct drm_i915_mocs_table *table)
+{
+	bool result = false;
+
+	if (IS_SKYLAKE(dev)) {
+		table->size  = ARRAY_SIZE(skylake_mocs_table);
+		table->table = skylake_mocs_table;
+		result = true;
+	} else if (IS_BROXTON(dev)) {
+		table->size  = ARRAY_SIZE(broxton_mocs_table);
+		table->table = broxton_mocs_table;
+		result = true;
+	} else {
+		WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+			  "Platform that should have a MOCS table does not.\n");
+	}
+
+	return result;
+}
+
+/**
+ * emit_mocs_control_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ * @reg_base:	The base for the engine that needs to be programmed.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+				   const struct drm_i915_mocs_table *table,
+				   u32 reg_base)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int index;
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+
+	for (index = 0; index < table->size; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf,
+					table->table[index].control_value);
+	}
+
+	/*
+	 * Ok, now set the unused entries to uncached. These entries
+	 * are officially undefined and no contract for the contents
+	 * and settings is given for these entries.
+	 *
+	 * Entry 0 in the table is uncached - so we are just writing
+	 * that value to all the used entries.
+	 */
+	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * emit_mocs_l3cc_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address. This register set is
+ * programmed in pairs.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+				const struct drm_i915_mocs_table *table)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int count;
+	unsigned int i;
+	u32 value;
+	u32 filler = (table->table[0].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+
+	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[count + 1].l3cc_value & 0xffff) << 16);
+
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+	}
+
+	if (table->size & 0x01) {
+		/* Odd table size - 1 left over */
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	} else
+		value = filler;
+
+	/*
+	 * Now set the rest of the table to uncached - use entry 0 as
+	 * this will be uncached. Leave the last pair uninitialised as
+	 * they are reserved by the hardware.
+	 */
+	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+
+		value = filler;
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * intel_rcs_context_init_mocs() - program the MOCS register.
+ * @req:	Request to set up the MOCS tables for.
+ *
+ * This function will emit a batch buffer with the values required for
+ * programming the MOCS register values for all the currently supported
+ * rings.
+ *
+ * These registers are partially stored in the RCS context, so they are
+ * emitted at the same time so that when a context is created these registers
+ * are set up. These registers have to be emitted into the start of the
+ * context as setting the ELSP will re-init some of these registers back
+ * to the hw values.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_mocs_table t;
+	int ret;
+
+	if (get_mocs_settings(req->ring->dev, &t)) {
+		/* Program the control registers */
+		ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
+		if (ret)
+			return ret;
+
+		/* Now program the l3cc registers */
+		ret = emit_mocs_l3cc_table(req, &t);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
new file mode 100644
index 0000000..76e45b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4813374..cb1c657 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,8 +25,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/acpi.h>
 #include <acpi/video.h>
 
@@ -53,6 +51,7 @@
 #define MBOX_ACPI      (1<<0)
 #define MBOX_SWSCI     (1<<1)
 #define MBOX_ASLE      (1<<2)
+#define MBOX_ASLE_EXT  (1<<4)
 
 struct opregion_header {
 	u8 signature[16];
@@ -62,7 +61,10 @@
 	u8 vbios_ver[16];
 	u8 driver_ver[16];
 	u32 mboxes;
-	u8 reserved[164];
+	u32 driver_model;
+	u32 pcon;
+	u8 dver[32];
+	u8 rsvd[124];
 } __packed;
 
 /* OpRegion mailbox #1: public ACPI methods */
@@ -84,7 +86,9 @@
 	u32 evts;       /* ASL supported events */
 	u32 cnot;       /* current OS notification */
 	u32 nrdy;       /* driver status */
-	u8 rsvd2[60];
+	u32 did2[7];	/* extended supported display devices ID list */
+	u32 cpd2[7];	/* extended attached display devices list */
+	u8 rsvd2[4];
 } __packed;
 
 /* OpRegion mailbox #2: SWSCI */
@@ -113,7 +117,10 @@
 	u32 pcft;       /* power conservation features */
 	u32 srot;       /* supported rotation angles */
 	u32 iuer;       /* IUER events */
-	u8 rsvd[86];
+	u64 fdss;
+	u32 fdsp;
+	u32 stat;
+	u8 rsvd[70];
 } __packed;
 
 /* Driver readiness indicator */
@@ -611,6 +618,38 @@
  * (version 3)
  */
 
+static u32 get_did(struct intel_opregion *opregion, int i)
+{
+	u32 did;
+
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		did = ioread32(&opregion->acpi->didl[i]);
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return 0;
+
+		did = ioread32(&opregion->acpi->did2[i]);
+	}
+
+	return did;
+}
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		iowrite32(val, &opregion->acpi->didl[i]);
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return;
+
+		iowrite32(val, &opregion->acpi->did2[i]);
+	}
+}
+
 static void intel_didl_outputs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -620,7 +659,7 @@
 	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
 	unsigned long long device_id;
 	acpi_status status;
-	u32 temp;
+	u32 temp, max_outputs;
 	int i = 0;
 
 	handle = ACPI_HANDLE(&dev->pdev->dev);
@@ -639,41 +678,50 @@
 	}
 
 	if (!acpi_video_bus) {
-		pr_warn("No ACPI video bus found\n");
+		DRM_ERROR("No ACPI video bus found\n");
 		return;
 	}
 
+	/*
+	 * In theory, did2, the extended didl, gets added at opregion version
+	 * 3.0. In practice, however, we're supposed to set it for earlier
+	 * versions as well, since a BIOS that doesn't understand did2 should
+	 * not look at it anyway. Use a variable so we can tweak this if a need
+	 * arises later.
+	 */
+	max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+		ARRAY_SIZE(opregion->acpi->did2);
+
 	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
-		if (i >= 8) {
-			dev_dbg(&dev->pdev->dev,
-				"More than 8 outputs detected via ACPI\n");
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
+				      max_outputs);
 			return;
 		}
-		status =
-			acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
-						NULL, &device_id);
+		status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+					       NULL, &device_id);
 		if (ACPI_SUCCESS(status)) {
 			if (!device_id)
 				goto blind_set;
-			iowrite32((u32)(device_id & 0x0f0f),
-				  &opregion->acpi->didl[i]);
-			i++;
+			set_did(opregion, i++, (u32)(device_id & 0x0f0f));
 		}
 	}
 
 end:
-	/* If fewer than 8 outputs, the list must be null terminated */
-	if (i < 8)
-		iowrite32(0, &opregion->acpi->didl[i]);
+	DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+	/* If fewer than max outputs, the list must be null terminated */
+	if (i < max_outputs)
+		set_did(opregion, i, 0);
 	return;
 
 blind_set:
 	i = 0;
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		int output_type = ACPI_OTHER_OUTPUT;
-		if (i >= 8) {
-			dev_dbg(&dev->pdev->dev,
-				"More than 8 outputs in connector list\n");
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs in connector list\n",
+				      max_outputs);
 			return;
 		}
 		switch (connector->connector_type) {
@@ -698,9 +746,8 @@
 			output_type = ACPI_LVDS_OUTPUT;
 			break;
 		}
-		temp = ioread32(&opregion->acpi->didl[i]);
-		iowrite32(temp | (1<<31) | output_type | i,
-			  &opregion->acpi->didl[i]);
+		temp = get_did(opregion, i);
+		set_did(opregion, i, temp | (1 << 31) | output_type | i);
 		i++;
 	}
 	goto end;
@@ -720,7 +767,7 @@
 	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
 	 * there are less than eight devices. */
 	do {
-		disp_id = ioread32(&opregion->acpi->didl[i]);
+		disp_id = get_did(opregion, i);
 		iowrite32(disp_id, &opregion->acpi->cadl[i]);
 	} while (++i < 8 && disp_id != 0);
 }
@@ -852,6 +899,11 @@
 	char buf[sizeof(OPREGION_SIGNATURE)];
 	int err = 0;
 
+	BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+
 	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
 	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
 	if (asls == 0) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 25c8ec6..4445426 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -210,19 +210,14 @@
 }
 
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+					 struct drm_i915_gem_request *req,
 					 void (*tail)(struct intel_overlay *))
 {
-	struct drm_device *dev = overlay->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
 	int ret;
 
 	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req,
-					     ring->outstanding_lazy_request);
-	ret = i915_add_request(ring);
-	if (ret)
-		return ret;
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
 
 	overlay->flip_tail = tail;
 	ret = i915_wait_request(overlay->last_flip_req);
@@ -239,15 +234,22 @@
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	int ret;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	ret = intel_ring_begin(ring, 4);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 4);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	overlay->active = true;
 
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -256,7 +258,7 @@
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, NULL);
+	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
 
 /* overlay needs to be enabled in OCMD reg */
@@ -266,6 +268,7 @@
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -280,18 +283,25 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	ret = intel_ring_begin(ring, 2);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 2);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
 	intel_ring_advance(ring);
 
 	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req,
-					     ring->outstanding_lazy_request);
-	return i915_add_request(ring);
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
+
+	return 0;
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -327,6 +337,7 @@
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -338,10 +349,16 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 6);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	/* wait for overlay to go idle */
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
@@ -360,7 +377,7 @@
 	}
 	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
@@ -404,15 +421,23 @@
 
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
-		ret = intel_ring_begin(ring, 2);
+		struct drm_i915_gem_request *req;
+
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 		if (ret)
 			return ret;
 
+		ret = intel_ring_begin(req, 2);
+		if (ret) {
+			i915_gem_request_cancel(req);
+			return ret;
+		}
+
 		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 		intel_ring_emit(ring, MI_NOOP);
 		intel_ring_advance(ring);
 
-		ret = intel_overlay_do_wait_request(overlay,
+		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
 		if (ret)
 			return ret;
@@ -724,7 +749,7 @@
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
 						   &i915_ggtt_view_normal);
 	if (ret != 0)
 		return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 55aad23..e2ab3f6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -32,8 +32,11 @@
 
 #include <linux/kernel.h>
 #include <linux/moduleparam.h>
+#include <linux/pwm.h>
 #include "intel_drv.h"
 
+#define CRC_PMIC_PWM_PERIOD_NS	21333
+
 void
 intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 		       struct drm_display_mode *adjusted_mode)
@@ -544,6 +547,15 @@
 	return I915_READ(BXT_BLC_PWM_DUTY1);
 }
 
+static u32 pwm_get_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns;
+
+	duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
+	return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static u32 intel_panel_get_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -632,6 +644,14 @@
 	I915_WRITE(BXT_BLC_PWM_DUTY1, level);
 }
 
+static void pwm_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+	pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static void
 intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
 {
@@ -769,6 +789,16 @@
 	I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_disable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	/* Disable the backlight */
+	pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+	usleep_range(2000, 3000);
+	pwm_disable(panel->backlight.pwm);
+}
+
 void intel_panel_disable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -1010,6 +1040,14 @@
 	I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_enable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	pwm_enable(panel->backlight.pwm);
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
 void intel_panel_enable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -1386,6 +1424,40 @@
 	return 0;
 }
 
+static int pwm_setup_backlight(struct intel_connector *connector,
+			       enum pipe pipe)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct intel_panel *panel = &connector->panel;
+	int retval;
+
+	/* Get the PWM chip for backlight control */
+	panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+	if (IS_ERR(panel->backlight.pwm)) {
+		DRM_ERROR("Failed to own the pwm chip\n");
+		panel->backlight.pwm = NULL;
+		return -ENODEV;
+	}
+
+	retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
+			    CRC_PMIC_PWM_PERIOD_NS);
+	if (retval < 0) {
+		DRM_ERROR("Failed to configure the pwm chip\n");
+		pwm_put(panel->backlight.pwm);
+		panel->backlight.pwm = NULL;
+		return retval;
+	}
+
+	panel->backlight.min = 0; /* 0% */
+	panel->backlight.max = 100; /* 100% */
+	panel->backlight.level = DIV_ROUND_UP(
+				 pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+				 CRC_PMIC_PWM_PERIOD_NS);
+	panel->backlight.enabled = panel->backlight.level != 0;
+
+	return 0;
+}
+
 int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 {
 	struct drm_device *dev = connector->dev;
@@ -1429,6 +1501,10 @@
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_panel *panel = &intel_connector->panel;
 
+	/* dispose of the pwm */
+	if (panel->backlight.pwm)
+		pwm_put(panel->backlight.pwm);
+
 	panel->backlight.present = false;
 }
 
@@ -1456,11 +1532,19 @@
 		dev_priv->display.set_backlight = pch_set_backlight;
 		dev_priv->display.get_backlight = pch_get_backlight;
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.setup_backlight = vlv_setup_backlight;
-		dev_priv->display.enable_backlight = vlv_enable_backlight;
-		dev_priv->display.disable_backlight = vlv_disable_backlight;
-		dev_priv->display.set_backlight = vlv_set_backlight;
-		dev_priv->display.get_backlight = vlv_get_backlight;
+		if (dev_priv->vbt.has_mipi) {
+			dev_priv->display.setup_backlight = pwm_setup_backlight;
+			dev_priv->display.enable_backlight = pwm_enable_backlight;
+			dev_priv->display.disable_backlight = pwm_disable_backlight;
+			dev_priv->display.set_backlight = pwm_set_backlight;
+			dev_priv->display.get_backlight = pwm_get_backlight;
+		} else {
+			dev_priv->display.setup_backlight = vlv_setup_backlight;
+			dev_priv->display.enable_backlight = vlv_enable_backlight;
+			dev_priv->display.disable_backlight = vlv_disable_backlight;
+			dev_priv->display.set_backlight = vlv_set_backlight;
+			dev_priv->display.get_backlight = vlv_get_backlight;
+		}
 	} else if (IS_GEN4(dev)) {
 		dev_priv->display.setup_backlight = i965_setup_backlight;
 		dev_priv->display.enable_backlight = i965_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eadc15c..fff0c22 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -59,6 +59,10 @@
 	/* WaEnableLbsSlaRetryTimerDecrement:skl */
 	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
 		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+	/* WaDisableKillLogic:bxt,skl */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+		   ECOCHK_DIS_TLB);
 }
 
 static void skl_init_clock_gating(struct drm_device *dev)
@@ -91,10 +95,19 @@
 			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
 	}
 
+	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+	 * involving this register should also be added to WA batch as required.
+	 */
 	if (INTEL_REVID(dev) <= SKL_REVID_E0)
 		/* WaDisableLSQCROPERFforOCL:skl */
 		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 			   GEN8_LQSC_RO_PERF_DIS);
+
+	/* WaEnableGapsTsvCreditFix:skl */
+	if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+					   GEN9_GAPS_TSV_CREDIT_DISABLE));
+	}
 }
 
 static void bxt_init_clock_gating(struct drm_device *dev)
@@ -334,22 +347,26 @@
 
 	if (IS_VALLEYVIEW(dev)) {
 		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-		if (IS_CHERRYVIEW(dev))
-			chv_set_memory_pm5(dev_priv, enable);
+		POSTING_READ(FW_BLC_SELF_VLV);
+		dev_priv->wm.vlv.cxsr = enable;
 	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+		POSTING_READ(FW_BLC_SELF);
 	} else if (IS_PINEVIEW(dev)) {
 		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
 		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
 		I915_WRITE(DSPFW3, val);
+		POSTING_READ(DSPFW3);
 	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
 		I915_WRITE(FW_BLC_SELF, val);
+		POSTING_READ(FW_BLC_SELF);
 	} else if (IS_I915GM(dev)) {
 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
 		I915_WRITE(INSTPM, val);
+		POSTING_READ(INSTPM);
 	} else {
 		return;
 	}
@@ -923,223 +940,484 @@
 			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
 	}
 
-	POSTING_READ(DSPFW1);
+	/* zero (unused) WM1 watermarks */
+	I915_WRITE(DSPFW4, 0);
+	I915_WRITE(DSPFW5, 0);
+	I915_WRITE(DSPFW6, 0);
+	I915_WRITE(DSPHOWM1, 0);
 
-	dev_priv->wm.vlv = *wm;
+	POSTING_READ(DSPFW1);
 }
 
 #undef FW_WM_VLV
 
-static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
-					 struct drm_plane *plane)
+enum vlv_wm_level {
+	VLV_WM_LEVEL_PM2,
+	VLV_WM_LEVEL_PM5,
+	VLV_WM_LEVEL_DDR_DVFS,
+	CHV_WM_NUM_LEVELS,
+	VLV_WM_NUM_LEVELS = 1,
+};
+
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+				   unsigned int pipe_htotal,
+				   unsigned int horiz_pixels,
+				   unsigned int bytes_per_pixel,
+				   unsigned int latency)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int entries, prec_mult, drain_latency, pixel_size;
-	int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
-	const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
+	unsigned int ret;
 
-	/*
-	 * FIXME the plane might have an fb
-	 * but be invisible (eg. due to clipping)
-	 */
-	if (!intel_crtc->active || !plane->state->fb)
-		return 0;
+	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = DIV_ROUND_UP(ret, 64);
 
-	if (WARN(clock == 0, "Pixel clock is zero!\n"))
-		return 0;
-
-	pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
-
-	if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
-		return 0;
-
-	entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
-
-	prec_mult = high_precision;
-	drain_latency = 64 * prec_mult * 4 / entries;
-
-	if (drain_latency > DRAIN_LATENCY_MASK) {
-		prec_mult /= 2;
-		drain_latency = 64 * prec_mult * 4 / entries;
-	}
-
-	if (drain_latency > DRAIN_LATENCY_MASK)
-		drain_latency = DRAIN_LATENCY_MASK;
-
-	return drain_latency | (prec_mult == high_precision ?
-				DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
+	return ret;
 }
 
-static int vlv_compute_wm(struct intel_crtc *crtc,
-			  struct intel_plane *plane,
-			  int fifo_size)
+static void vlv_setup_wm_latency(struct drm_device *dev)
 {
-	int clock, entries, pixel_size;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	/*
-	 * FIXME the plane might have an fb
-	 * but be invisible (eg. due to clipping)
-	 */
-	if (!crtc->active || !plane->base.state->fb)
+	/* all latencies in usec */
+	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+	}
+}
+
+static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
+				     struct intel_crtc *crtc,
+				     const struct intel_plane_state *state,
+				     int level)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	int clock, htotal, pixel_size, width, wm;
+
+	if (dev_priv->wm.pri_latency[level] == 0)
+		return USHRT_MAX;
+
+	if (!state->visible)
 		return 0;
 
-	pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+	pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
 	clock = crtc->config->base.adjusted_mode.crtc_clock;
+	htotal = crtc->config->base.adjusted_mode.crtc_htotal;
+	width = crtc->config->pipe_src_w;
+	if (WARN_ON(htotal == 0))
+		htotal = 1;
 
-	entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
-
-	/*
-	 * Set up the watermark such that we don't start issuing memory
-	 * requests until we are within PND's max deadline value (256us).
-	 * Idea being to be idle as long as possible while still taking
-	 * advatange of PND's deadline scheduling. The limit of 8
-	 * cachelines (used when the FIFO will anyway drain in less time
-	 * than 256us) should match what we would be done if trickle
-	 * feed were enabled.
-	 */
-	return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
-}
-
-static bool vlv_compute_sr_wm(struct drm_device *dev,
-			      struct vlv_wm_values *wm)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc;
-	enum pipe pipe = INVALID_PIPE;
-	int num_planes = 0;
-	int fifo_size = 0;
-	struct intel_plane *plane;
-
-	wm->sr.cursor = wm->sr.plane = 0;
-
-	crtc = single_enabled_crtc(dev);
-	/* maxfifo not supported on pipe C */
-	if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
-		pipe = to_intel_crtc(crtc)->pipe;
-		num_planes = !!wm->pipe[pipe].primary +
-			!!wm->pipe[pipe].sprite[0] +
-			!!wm->pipe[pipe].sprite[1];
-		fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+	if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+		/*
+		 * FIXME the formula gives values that are
+		 * too big for the cursor FIFO, and hence we
+		 * would never be able to use cursors. For
+		 * now just hardcode the watermark.
+		 */
+		wm = 63;
+	} else {
+		wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+				    dev_priv->wm.pri_latency[level] * 10);
 	}
 
-	if (fifo_size == 0 || num_planes > 1)
-		return false;
+	return min_t(int, wm, USHRT_MAX);
+}
 
-	wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
-				       to_intel_plane(crtc->cursor), 0x3f);
+static void vlv_compute_fifo(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	struct intel_plane *plane;
+	unsigned int total_rate = 0;
+	const int fifo_size = 512 - 1;
+	int fifo_extra, fifo_left = fifo_size;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+
 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			continue;
 
-		if (plane->pipe != pipe)
+		if (state->visible) {
+			wm_state->num_active_planes++;
+			total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		}
+	}
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+		unsigned int rate;
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			plane->wm.fifo_size = 63;
+			continue;
+		}
+
+		if (!state->visible) {
+			plane->wm.fifo_size = 0;
+			continue;
+		}
+
+		rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		plane->wm.fifo_size = fifo_size * rate / total_rate;
+		fifo_left -= plane->wm.fifo_size;
+	}
+
+	fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
+
+	/* spread the remainder evenly */
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		int plane_extra;
+
+		if (fifo_left == 0)
+			break;
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			continue;
 
-		wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
-					      plane, fifo_size);
-		if (wm->sr.plane != 0)
-			break;
+		/* give it all to the first plane if none are active */
+		if (plane->wm.fifo_size == 0 &&
+		    wm_state->num_active_planes)
+			continue;
+
+		plane_extra = min(fifo_extra, fifo_left);
+		plane->wm.fifo_size += plane_extra;
+		fifo_left -= plane_extra;
 	}
 
-	return true;
+	WARN_ON(fifo_left != 0);
 }
 
-static void valleyview_update_wm(struct drm_crtc *crtc)
+static void vlv_invert_wms(struct intel_crtc *crtc)
+{
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	int level;
+
+	for (level = 0; level < wm_state->num_levels; level++) {
+		struct drm_device *dev = crtc->base.dev;
+		const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+		struct intel_plane *plane;
+
+		wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
+		wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
+
+		for_each_intel_plane_on_crtc(dev, crtc, plane) {
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = plane->wm.fifo_size -
+					wm_state->wm[level].cursor;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = plane->wm.fifo_size -
+					wm_state->wm[level].primary;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
+					wm_state->wm[level].sprite[sprite];
+				break;
+			}
+		}
+	}
+}
+
+static void vlv_compute_wm(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	struct intel_plane *plane;
+	int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+	int level;
+
+	memset(wm_state, 0, sizeof(*wm_state));
+
+	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
+	if (IS_CHERRYVIEW(dev))
+		wm_state->num_levels = CHV_WM_NUM_LEVELS;
+	else
+		wm_state->num_levels = VLV_WM_NUM_LEVELS;
+
+	wm_state->num_active_planes = 0;
+
+	vlv_compute_fifo(crtc);
+
+	if (wm_state->num_active_planes != 1)
+		wm_state->cxsr = false;
+
+	if (wm_state->cxsr) {
+		for (level = 0; level < wm_state->num_levels; level++) {
+			wm_state->sr[level].plane = sr_fifo_size;
+			wm_state->sr[level].cursor = 63;
+		}
+	}
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+
+		if (!state->visible)
+			continue;
+
+		/* normal watermarks */
+		for (level = 0; level < wm_state->num_levels; level++) {
+			int wm = vlv_compute_wm_level(plane, crtc, state, level);
+			int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+
+			/* hack */
+			if (WARN_ON(level == 0 && wm > max_wm))
+				wm = max_wm;
+
+			if (wm > plane->wm.fifo_size)
+				break;
+
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = wm;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = wm;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = wm;
+				break;
+			}
+		}
+
+		wm_state->num_levels = level;
+
+		if (!wm_state->cxsr)
+			continue;
+
+		/* maxfifo watermarks */
+		switch (plane->base.type) {
+			int sprite, level;
+		case DRM_PLANE_TYPE_CURSOR:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].cursor =
+					wm_state->sr[level].cursor;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].primary);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].sprite[sprite]);
+			break;
+		}
+	}
+
+	/* clear any (partially) filled invalid levels */
+	for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
+		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+	}
+
+	vlv_invert_wms(crtc);
+}
+
+#define VLV_FIFO(plane, value) \
+	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_plane *plane;
+	int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			WARN_ON(plane->wm.fifo_size != 63);
+			continue;
+		}
+
+		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+			sprite0_start = plane->wm.fifo_size;
+		else if (plane->plane == 0)
+			sprite1_start = sprite0_start + plane->wm.fifo_size;
+		else
+			fifo_size = sprite1_start + plane->wm.fifo_size;
+	}
+
+	WARN_ON(fifo_size != 512 - 1);
+
+	DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
+		      pipe_name(crtc->pipe), sprite0_start,
+		      sprite1_start, fifo_size);
+
+	switch (crtc->pipe) {
+		uint32_t dsparb, dsparb2, dsparb3;
+	case PIPE_A:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+			    VLV_FIFO(SPRITEB, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+			   VLV_FIFO(SPRITEB, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+			     VLV_FIFO(SPRITEB_HI, 0x1));
+		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_B:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+			    VLV_FIFO(SPRITED, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+			   VLV_FIFO(SPRITED, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+			     VLV_FIFO(SPRITED_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_C:
+		dsparb3 = I915_READ(DSPARB3);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+			     VLV_FIFO(SPRITEF, 0xff));
+		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+			    VLV_FIFO(SPRITEF, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+			     VLV_FIFO(SPRITEF_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB3, dsparb3);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	default:
+		break;
+	}
+}
+
+#undef VLV_FIFO
+
+static void vlv_merge_wm(struct drm_device *dev,
+			 struct vlv_wm_values *wm)
+{
+	struct intel_crtc *crtc;
+	int num_active_crtcs = 0;
+
+	if (IS_CHERRYVIEW(dev))
+		wm->level = VLV_WM_LEVEL_DDR_DVFS;
+	else
+		wm->level = VLV_WM_LEVEL_PM2;
+	wm->cxsr = true;
+
+	for_each_intel_crtc(dev, crtc) {
+		const struct vlv_wm_state *wm_state = &crtc->wm_state;
+
+		if (!crtc->active)
+			continue;
+
+		if (!wm_state->cxsr)
+			wm->cxsr = false;
+
+		num_active_crtcs++;
+		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+	}
+
+	if (num_active_crtcs != 1)
+		wm->cxsr = false;
+
+	if (num_active_crtcs > 1)
+		wm->level = VLV_WM_LEVEL_PM2;
+
+	for_each_intel_crtc(dev, crtc) {
+		struct vlv_wm_state *wm_state = &crtc->wm_state;
+		enum pipe pipe = crtc->pipe;
+
+		if (!crtc->active)
+			continue;
+
+		wm->pipe[pipe] = wm_state->wm[wm->level];
+		if (wm->cxsr)
+			wm->sr = wm_state->sr[wm->level];
+
+		wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+	}
+}
+
+static void vlv_update_wm(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
-	bool cxsr_enabled;
-	struct vlv_wm_values wm = dev_priv->wm.vlv;
+	struct vlv_wm_values wm = {};
 
-	wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
-	wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
-					       to_intel_plane(crtc->primary),
-					       vlv_get_fifo_size(dev, pipe, 0));
+	vlv_compute_wm(intel_crtc);
+	vlv_merge_wm(dev, &wm);
 
-	wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
-	wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
-					      to_intel_plane(crtc->cursor),
-					      0x3f);
-
-	cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
-	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+	if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+		/* FIXME should be part of crtc atomic commit */
+		vlv_pipe_set_fifo_size(intel_crtc);
 		return;
+	}
 
-	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
-		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-		      wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
-		      wm.sr.plane, wm.sr.cursor);
-
-	/*
-	 * FIXME DDR DVFS introduces massive memory latencies which
-	 * are not known to system agent so any deadline specified
-	 * by the display may not be respected. To support DDR DVFS
-	 * the watermark code needs to be rewritten to essentially
-	 * bypass deadline mechanism and rely solely on the
-	 * watermarks. For now disable DDR DVFS.
-	 */
-	if (IS_CHERRYVIEW(dev_priv))
+	if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
 		chv_set_memory_dvfs(dev_priv, false);
 
-	if (!cxsr_enabled)
+	if (wm.level < VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, false);
+
+	if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
 		intel_set_memory_cxsr(dev_priv, false);
 
+	/* FIXME should be part of crtc atomic commit */
+	vlv_pipe_set_fifo_size(intel_crtc);
+
 	vlv_write_wm_values(intel_crtc, &wm);
 
-	if (cxsr_enabled)
+	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+		      "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
+		      pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+		      wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
+		      wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+
+	if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
 		intel_set_memory_cxsr(dev_priv, true);
-}
 
-static void valleyview_update_sprite_wm(struct drm_plane *plane,
-					struct drm_crtc *crtc,
-					uint32_t sprite_width,
-					uint32_t sprite_height,
-					int pixel_size,
-					bool enabled, bool scaled)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-	int sprite = to_intel_plane(plane)->plane;
-	bool cxsr_enabled;
-	struct vlv_wm_values wm = dev_priv->wm.vlv;
+	if (wm.level >= VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, true);
 
-	if (enabled) {
-		wm.ddl[pipe].sprite[sprite] =
-			vlv_compute_drain_latency(crtc, plane);
+	if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+		chv_set_memory_dvfs(dev_priv, true);
 
-		wm.pipe[pipe].sprite[sprite] =
-			vlv_compute_wm(intel_crtc,
-				       to_intel_plane(plane),
-				       vlv_get_fifo_size(dev, pipe, sprite+1));
-	} else {
-		wm.ddl[pipe].sprite[sprite] = 0;
-		wm.pipe[pipe].sprite[sprite] = 0;
-	}
-
-	cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
-	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
-		return;
-
-	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
-		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-		      sprite_name(pipe, sprite),
-		      wm.pipe[pipe].sprite[sprite],
-		      wm.sr.plane, wm.sr.cursor);
-
-	if (!cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, false);
-
-	vlv_write_wm_values(intel_crtc, &wm);
-
-	if (cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, true);
+	dev_priv->wm.vlv = wm;
 }
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1434,23 +1712,22 @@
 	I915_WRITE(FW_BLC, fwater_lo);
 }
 
-static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
-				    struct drm_crtc *crtc)
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pixel_rate;
 
-	pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
+	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
 
 	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
 	 * adjust the pixel_rate here. */
 
-	if (intel_crtc->config->pch_pfit.enabled) {
+	if (pipe_config->pch_pfit.enabled) {
 		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
-		uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
+		uint32_t pfit_size = pipe_config->pch_pfit.size;
 
-		pipe_w = intel_crtc->config->pipe_src_w;
-		pipe_h = intel_crtc->config->pipe_src_h;
+		pipe_w = pipe_config->pipe_src_w;
+		pipe_h = pipe_config->pipe_src_h;
+
 		pfit_w = (pfit_size >> 16) & 0xFFFF;
 		pfit_h = pfit_size & 0xFFFF;
 		if (pipe_w < pfit_w)
@@ -1815,7 +2092,7 @@
 	linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
 				     mode->crtc_clock);
 	ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
-					 dev_priv->display.get_display_clock_speed(dev_priv->dev));
+					 dev_priv->cdclk_freq);
 
 	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
 	       PIPE_WM_LINETIME_TIME(linetime);
@@ -2066,7 +2343,7 @@
 
 	p->active = true;
 	p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
-	p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+	p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
 
 	if (crtc->primary->state->fb)
 		p->pri.bytes_per_pixel =
@@ -2085,7 +2362,7 @@
 	p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
 	p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
 
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+	drm_for_each_legacy_plane(plane, dev) {
 		struct intel_plane *intel_plane = to_intel_plane(plane);
 
 		if (intel_plane->pipe == pipe) {
@@ -2215,6 +2492,7 @@
 			 const struct ilk_wm_maximums *max,
 			 struct intel_pipe_wm *merged)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int level, max_level = ilk_wm_max_level(dev);
 	int last_enabled_level = max_level;
 
@@ -2255,7 +2533,8 @@
 	 * What we should check here is whether FBC can be
 	 * enabled sometime later.
 	 */
-	if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+	if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+	    intel_fbc_enabled(dev_priv)) {
 		for (level = 2; level <= max_level; level++) {
 			struct intel_wm_level *wm = &merged->wm[level];
 
@@ -3043,8 +3322,10 @@
 	if (!to_intel_crtc(crtc)->active)
 		return 0;
 
-	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+	if (WARN_ON(p->pixel_rate == 0))
+		return 0;
 
+	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
 }
 
 static void skl_compute_transition_wm(struct drm_crtc *crtc,
@@ -3685,6 +3966,139 @@
 	}
 }
 
+#define _FW_WM(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+			       struct vlv_wm_values *wm)
+{
+	enum pipe pipe;
+	uint32_t tmp;
+
+	for_each_pipe(dev_priv, pipe) {
+		tmp = I915_READ(VLV_DDL(pipe));
+
+		wm->ddl[pipe].primary =
+			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].cursor =
+			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[0] =
+			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[1] =
+			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+	}
+
+	tmp = I915_READ(DSPFW1);
+	wm->sr.plane = _FW_WM(tmp, SR);
+	wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
+	wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
+	wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+
+	tmp = I915_READ(DSPFW2);
+	wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
+	wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
+	wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+
+	tmp = I915_READ(DSPFW3);
+	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		tmp = I915_READ(DSPFW7_CHV);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPFW8_CHV);
+		wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
+		wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+
+		tmp = I915_READ(DSPFW9_CHV);
+		wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
+		wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+		wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+		wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	} else {
+		tmp = I915_READ(DSPFW7);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	}
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+void vlv_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+	struct intel_plane *plane;
+	enum pipe pipe;
+	u32 val;
+
+	vlv_read_wm_values(dev_priv, wm);
+
+	for_each_intel_plane(dev, plane) {
+		switch (plane->base.type) {
+			int sprite;
+		case DRM_PLANE_TYPE_CURSOR:
+			plane->wm.fifo_size = 63;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+			break;
+		}
+	}
+
+	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+	wm->level = VLV_WM_LEVEL_PM2;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+		if (val & DSP_MAXFIFO_PM5_ENABLE)
+			wm->level = VLV_WM_LEVEL_PM5;
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+		if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+			wm->level = VLV_WM_LEVEL_DDR_DVFS;
+
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	}
+
+	for_each_pipe(dev_priv, pipe)
+		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+			      pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
+			      wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+
+	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4083,14 +4497,14 @@
 		      "Odd GPU freq value\n"))
 		val &= ~1;
 
+	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
 	if (val != dev_priv->rps.cur_freq) {
 		vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 		if (!IS_CHERRYVIEW(dev_priv))
 			gen6_set_rps_thresholds(dev_priv, val);
 	}
 
-	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
 	dev_priv->rps.cur_freq = val;
 	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 }
@@ -4250,12 +4664,8 @@
 
 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
 {
-	/* No RC6 before Ironlake */
-	if (INTEL_INFO(dev)->gen < 5)
-		return 0;
-
-	/* RC6 is only on Ironlake mobile not on desktop */
-	if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+	/* No RC6 before Ironlake and code is gone for ilk. */
+	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
 	/* Respect the kernel parameter if it is set */
@@ -4275,10 +4685,6 @@
 		return enable_rc6 & mask;
 	}
 
-	/* Disable RC6 on Ironlake */
-	if (INTEL_INFO(dev)->gen == 5)
-		return 0;
-
 	if (IS_IVYBRIDGE(dev))
 		return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
@@ -4297,25 +4703,26 @@
 	u32 ddcc_status = 0;
 	int ret;
 
-	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	/* All of these values are in units of 50MHz */
 	dev_priv->rps.cur_freq		= 0;
 	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
-	dev_priv->rps.rp0_freq		= (rp_state_cap >>  0) & 0xff;
-	dev_priv->rps.rp1_freq		= (rp_state_cap >>  8) & 0xff;
-	dev_priv->rps.min_freq		= (rp_state_cap >> 16) & 0xff;
-	if (IS_SKYLAKE(dev)) {
-		/* Store the frequency values in 16.66 MHZ units, which is
-		   the natural hardware unit for SKL */
-		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
-		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
-		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+	if (IS_BROXTON(dev)) {
+		rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
+	} else {
+		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
 	}
+
 	/* hw_max = RP0 until we check for overclocking */
 	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;
 
 	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
 		ret = sandybridge_pcode_read(dev_priv,
 					HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
 					&ddcc_status);
@@ -4327,6 +4734,16 @@
 					dev_priv->rps.max_freq);
 	}
 
+	if (IS_SKYLAKE(dev)) {
+		/* Store the frequency values in 16.66 MHZ units, which is
+		   the natural hardware unit for SKL */
+		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+	}
+
 	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
 
 	/* Preserve min/max settings in case of re-init */
@@ -4619,6 +5036,7 @@
 	int min_freq = 15;
 	unsigned int gpu_freq;
 	unsigned int max_ia_freq, min_ring_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
 	int scaling_factor = 180;
 	struct cpufreq_policy *policy;
 
@@ -4643,17 +5061,31 @@
 	/* convert DDR frequency from units of 266.6MHz to bandwidth */
 	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
+		max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq;
+		max_gpu_freq = dev_priv->rps.max_freq;
+	}
+
 	/*
 	 * For each potential GPU frequency, load a ring frequency we'd like
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
-	     gpu_freq--) {
-		int diff = dev_priv->rps.max_freq - gpu_freq;
+	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+		int diff = max_gpu_freq - gpu_freq;
 		unsigned int ia_freq = 0, ring_freq = 0;
 
-		if (INTEL_INFO(dev)->gen >= 8) {
+		if (IS_SKYLAKE(dev)) {
+			/*
+			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
+			 * No floor required for ring frequency on SKL.
+			 */
+			ring_freq = gpu_freq;
+		} else if (INTEL_INFO(dev)->gen >= 8) {
 			/* max(2 * GT, DDR). NB: GT is 50MHz units */
 			ring_freq = max(min_ring_freq, gpu_freq);
 		} else if (IS_HASWELL(dev)) {
@@ -4687,7 +5119,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+	if (!HAS_CORE_RING_FREQ(dev))
 		return;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
@@ -5802,7 +6234,8 @@
 	} else if (INTEL_INFO(dev)->gen >= 9) {
 		gen9_enable_rc6(dev);
 		gen9_enable_rps(dev);
-		__gen6_update_ring_freq(dev);
+		if (IS_SKYLAKE(dev))
+			__gen6_update_ring_freq(dev);
 	} else if (IS_BROADWELL(dev)) {
 		gen8_enable_rps(dev);
 		__gen6_update_ring_freq(dev);
@@ -6686,13 +7119,15 @@
 		else if (INTEL_INFO(dev)->gen == 8)
 			dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
 	} else if (IS_CHERRYVIEW(dev)) {
-		dev_priv->display.update_wm = valleyview_update_wm;
-		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
 		dev_priv->display.init_clock_gating =
 			cherryview_init_clock_gating;
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.update_wm = valleyview_update_wm;
-		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
 		dev_priv->display.init_clock_gating =
 			valleyview_init_clock_gating;
 	} else if (IS_PINEVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 5ee0fa5..a04b4dc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -254,10 +254,13 @@
 	uint32_t max_sleep_time = 0x1f;
 	/* Lately it was identified that depending on panel idle frame count
 	 * calculated at HW can be off by 1. So let's use what came
-	 * from VBT + 1 and at minimum 2 to be on the safe side.
+	 * from VBT + 1.
+	 * There are also other cases where panel demands at least 4
+	 * but VBT is not being set. To cover these 2 cases lets use
+	 * at least 5 when VBT isn't set to be on the safest side.
 	 */
 	uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
-			       dev_priv->vbt.psr.idle_frames + 1 : 2;
+			       dev_priv->vbt.psr.idle_frames + 1 : 5;
 	uint32_t val = 0x0;
 	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
@@ -400,7 +403,7 @@
 
 		/* Avoid continuous PSR exit by masking memup and hpd */
 		I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
-			   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+			   EDP_PSR_DEBUG_MASK_HPD);
 
 		/* Enable PSR on the panel */
 		hsw_psr_enable_sink(intel_dp);
@@ -596,13 +599,15 @@
 /**
  * intel_psr_single_frame_update - Single Frame Update
  * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
  * send and update only one frame on Remote Frame Buffer.
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev)
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
@@ -624,14 +629,16 @@
 
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
-	val = I915_READ(VLV_PSRCTL(pipe));
 
-	/*
-	 * We need to set this bit before writing registers for a flip.
-	 * This bit will be self-clear when it gets to the PSR active state.
-	 */
-	I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
+		val = I915_READ(VLV_PSRCTL(pipe));
 
+		/*
+		 * We need to set this bit before writing registers for a flip.
+		 * This bit will be self-clear when it gets to the PSR active state.
+		 */
+		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+	}
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -648,7 +655,7 @@
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
 void intel_psr_invalidate(struct drm_device *dev,
-			      unsigned frontbuffer_bits)
+			  unsigned frontbuffer_bits)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
@@ -663,11 +670,12 @@
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
 
-	intel_psr_exit(dev);
-
 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	if (frontbuffer_bits)
+		intel_psr_exit(dev);
+
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -675,6 +683,7 @@
  * intel_psr_flush - Flush PSR
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
  * with the software frontbuffer tracking. This function gets called every
@@ -684,11 +693,12 @@
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
 void intel_psr_flush(struct drm_device *dev,
-			 unsigned frontbuffer_bits)
+		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	enum pipe pipe;
+	int delay_ms = HAS_DDI(dev) ? 100 : 500;
 
 	mutex_lock(&dev_priv->psr.lock);
 	if (!dev_priv->psr.enabled) {
@@ -698,30 +708,33 @@
 
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	/*
-	 * On Haswell sprite plane updates don't result in a psr invalidating
-	 * signal in the hardware. Which means we need to manually fake this in
-	 * software for all flushes, not just when we've seen a preceding
-	 * invalidation through frontbuffer rendering.
-	 */
-	if (IS_HASWELL(dev) &&
-	    (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
-		intel_psr_exit(dev);
-
-	/*
-	 * On Valleyview and Cherryview we don't use hardware tracking so
-	 * any plane updates or cursor moves don't result in a PSR
-	 * invalidating. Which means we need to manually fake this in
-	 * software for all flushes, not just when we've seen a preceding
-	 * invalidation through frontbuffer rendering. */
-	if (!HAS_DDI(dev))
-		intel_psr_exit(dev);
+	if (HAS_DDI(dev)) {
+		/*
+		 * By definition every flush should mean invalidate + flush,
+		 * however on core platforms let's minimize the
+		 * disable/re-enable so we can avoid the invalidate when flip
+		 * originated the flush.
+		 */
+		if (frontbuffer_bits && origin != ORIGIN_FLIP)
+			intel_psr_exit(dev);
+	} else {
+		/*
+		 * On Valleyview and Cherryview we don't use hardware tracking
+		 * so any plane updates or cursor moves don't result in a PSR
+		 * invalidating. Which means we need to manually fake this in
+		 * software for all flushes.
+		 */
+		if (frontbuffer_bits)
+			intel_psr_exit(dev);
+	}
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		schedule_delayed_work(&dev_priv->psr.work,
-				      msecs_to_jiffies(100));
+				      msecs_to_jiffies(delay_ms));
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3817a6f..6e6b8db 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -81,7 +81,7 @@
 	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
 }
 
-void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *ring)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
@@ -91,10 +91,11 @@
 }
 
 static int
-gen2_render_ring_flush(struct intel_engine_cs *ring,
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -105,7 +106,7 @@
 	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -117,10 +118,11 @@
 }
 
 static int
-gen4_render_ring_flush(struct intel_engine_cs *ring,
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	u32 cmd;
 	int ret;
@@ -163,7 +165,7 @@
 	    (IS_G4X(dev) || IS_GEN5(dev)))
 		cmd |= MI_INVALIDATE_ISP;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -212,13 +214,13 @@
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
-
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -231,7 +233,7 @@
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -247,15 +249,16 @@
 }
 
 static int
-gen6_render_ring_flush(struct intel_engine_cs *ring,
-                         u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32 invalidate_domains, u32 flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
-	ret = intel_emit_post_sync_nonzero_flush(ring);
+	ret = intel_emit_post_sync_nonzero_flush(req);
 	if (ret)
 		return ret;
 
@@ -285,7 +288,7 @@
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -299,11 +302,12 @@
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -318,9 +322,10 @@
 }
 
 static int
-gen7_render_ring_flush(struct intel_engine_cs *ring,
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -362,10 +367,10 @@
 		/* Workaround: we must issue a pipe_control with CS-stall bit
 		 * set before a pipe_control command that has the state cache
 		 * invalidate bit set. */
-		gen7_render_ring_cs_stall_wa(ring);
+		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -379,12 +384,13 @@
 }
 
 static int
-gen8_emit_pipe_control(struct intel_engine_cs *ring,
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -400,11 +406,11 @@
 }
 
 static int
-gen8_render_ring_flush(struct intel_engine_cs *ring,
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -424,7 +430,7 @@
 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
 		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
-		ret = gen8_emit_pipe_control(ring,
+		ret = gen8_emit_pipe_control(req,
 					     PIPE_CONTROL_CS_STALL |
 					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
 					     0);
@@ -432,7 +438,7 @@
 			return ret;
 	}
 
-	return gen8_emit_pipe_control(ring, flags, scratch_addr);
+	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
 static void ring_write_tail(struct intel_engine_cs *ring,
@@ -703,10 +709,10 @@
 	return ret;
 }
 
-static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
-				       struct intel_context *ctx)
+static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
@@ -715,11 +721,11 @@
 		return 0;
 
 	ring->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(ring);
+	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, (w->count * 2 + 2));
+	ret = intel_ring_begin(req, (w->count * 2 + 2));
 	if (ret)
 		return ret;
 
@@ -733,7 +739,7 @@
 	intel_ring_advance(ring);
 
 	ring->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(ring);
+	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
@@ -742,16 +748,15 @@
 	return 0;
 }
 
-static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
-			      struct intel_context *ctx)
+static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	ret = intel_ring_workarounds_emit(ring, ctx);
+	ret = intel_ring_workarounds_emit(req);
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_render_state_init(ring);
+	ret = i915_gem_render_state_init(req);
 	if (ret)
 		DRM_ERROR("init render state: %d\n", ret);
 
@@ -775,11 +780,11 @@
 	return 0;
 }
 
-#define WA_REG(addr, mask, val) { \
+#define WA_REG(addr, mask, val) do { \
 		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
 		if (r) \
 			return r; \
-	}
+	} while (0)
 
 #define WA_SET_BIT_MASKED(addr, mask) \
 	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -800,6 +805,11 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:bdw */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
 	/* WaDisablePartialInstShootdown:bdw */
 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -861,6 +871,11 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:chv */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
 	/* WaDisablePartialInstShootdown:chv */
 	/* WaDisableThreadStallDopClockGating:chv */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -931,8 +946,11 @@
 		/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
 		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
 				  GEN9_RHWO_OPTIMIZATION_DISABLE);
-		WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
-				  DISABLE_PIXEL_MASK_CAMMING);
+		/*
+		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+		 * but we do that in per ctx batchbuffer as there is an issue
+		 * with this register not getting restored on ctx restore
+		 */
 	}
 
 	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
@@ -1023,13 +1041,6 @@
 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
-	    INTEL_REVID(dev) == SKL_REVID_D0)
-		/* WaBarrierPerformanceFixDisable:skl */
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE |
-				  HDC_BARRIER_PERFORMANCE_DISABLE);
-
 	if (INTEL_REVID(dev) <= SKL_REVID_D0) {
 		/*
 		 *Use Force Non-Coherent whenever executing a 3D context. This
@@ -1041,6 +1052,20 @@
 				  HDC_FORCE_NON_COHERENT);
 	}
 
+	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+	    INTEL_REVID(dev) == SKL_REVID_D0)
+		/* WaBarrierPerformanceFixDisable:skl */
+		WA_SET_BIT_MASKED(HDC_CHICKEN0,
+				  HDC_FENCE_DEST_SLM_DISABLE |
+				  HDC_BARRIER_PERFORMANCE_DISABLE);
+
+	/* WaDisableSbeCacheDispatchPortSharing:skl */
+	if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+		WA_SET_BIT_MASKED(
+			GEN7_HALF_SLICE_CHICKEN1,
+			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	}
+
 	return skl_tune_iz_hashing(ring);
 }
 
@@ -1105,9 +1130,9 @@
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
 	 * programmed to '1' on all products.
 	 *
-	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 	 */
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
 	/* Required for the hardware to program scanline values for waiting */
@@ -1132,7 +1157,7 @@
 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6)
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
 	if (HAS_L3_DPF(dev))
@@ -1155,10 +1180,11 @@
 	intel_fini_pipe_control(ring);
 }
 
-static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
@@ -1168,7 +1194,7 @@
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -1178,8 +1204,7 @@
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+		seqno = i915_gem_request_get_seqno(signaller_req);
 		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
 		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
 					   PIPE_CONTROL_QW_WRITE |
@@ -1196,10 +1221,11 @@
 	return 0;
 }
 
-static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
@@ -1209,7 +1235,7 @@
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -1219,8 +1245,7 @@
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+		seqno = i915_gem_request_get_seqno(signaller_req);
 		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
 					   MI_FLUSH_DW_OP_STOREDW);
 		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
@@ -1235,9 +1260,10 @@
 	return 0;
 }
 
-static int gen6_signal(struct intel_engine_cs *signaller,
+static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *useless;
@@ -1248,15 +1274,14 @@
 	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
 	for_each_ring(useless, dev_priv, i) {
 		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
 		if (mbox_reg != GEN6_NOSYNC) {
-			u32 seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+			u32 seqno = i915_gem_request_get_seqno(signaller_req);
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
 			intel_ring_emit(signaller, mbox_reg);
 			intel_ring_emit(signaller, seqno);
@@ -1272,30 +1297,29 @@
 
 /**
  * gen6_add_request - Update the semaphore mailbox registers
- * 
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
+ *
+ * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_engine_cs *ring)
+gen6_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (ring->semaphore.signal)
-		ret = ring->semaphore.signal(ring, 4);
+		ret = ring->semaphore.signal(req, 4);
 	else
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(req, 4);
 
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	__intel_ring_advance(ring);
 
@@ -1318,14 +1342,15 @@
  */
 
 static int
-gen8_ring_sync(struct intel_engine_cs *waiter,
+gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
+	struct intel_engine_cs *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
 	int ret;
 
-	ret = intel_ring_begin(waiter, 4);
+	ret = intel_ring_begin(waiter_req, 4);
 	if (ret)
 		return ret;
 
@@ -1343,10 +1368,11 @@
 }
 
 static int
-gen6_ring_sync(struct intel_engine_cs *waiter,
+gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
+	struct intel_engine_cs *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1361,7 +1387,7 @@
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(waiter, 4);
+	ret = intel_ring_begin(waiter_req, 4);
 	if (ret)
 		return ret;
 
@@ -1392,8 +1418,9 @@
 } while (0)
 
 static int
-pc_render_add_request(struct intel_engine_cs *ring)
+pc_render_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
@@ -1405,7 +1432,7 @@
 	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
 	 * memory before requesting an interrupt.
 	 */
-	ret = intel_ring_begin(ring, 32);
+	ret = intel_ring_begin(req, 32);
 	if (ret)
 		return ret;
 
@@ -1413,8 +1440,7 @@
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1433,8 +1459,7 @@
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	__intel_ring_advance(ring);
 
@@ -1585,13 +1610,14 @@
 }
 
 static int
-bsd_ring_flush(struct intel_engine_cs *ring,
+bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -1602,18 +1628,18 @@
 }
 
 static int
-i9xx_add_request(struct intel_engine_cs *ring)
+i9xx_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	__intel_ring_advance(ring);
 
@@ -1745,13 +1771,14 @@
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -1771,14 +1798,15 @@
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 cs_offset = ring->scratch.gtt_offset;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1795,7 +1823,7 @@
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(ring, 6 + 2);
+		ret = intel_ring_begin(req, 6 + 2);
 		if (ret)
 			return ret;
 
@@ -1818,7 +1846,7 @@
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -1833,13 +1861,14 @@
 }
 
 static int
-i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -2082,7 +2111,6 @@
 
 	intel_unpin_ringbuffer_obj(ringbuf);
 	intel_destroy_ringbuffer_obj(ringbuf);
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -2106,6 +2134,9 @@
 	if (intel_ring_space(ringbuf) >= n)
 		return 0;
 
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
 	list_for_each_entry(request, &ring->request_list, list) {
 		space = __intel_ring_space(request->postfix, ringbuf->tail,
 					   ringbuf->size);
@@ -2124,18 +2155,11 @@
 	return 0;
 }
 
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
 	uint32_t __iomem *virt;
-	struct intel_ringbuffer *ringbuf = ring->buffer;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	if (ringbuf->space < rem) {
-		int ret = ring_wait_for_space(ring, rem);
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -2143,21 +2167,11 @@
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
 int intel_ring_idle(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_request *req;
-	int ret;
-
-	/* We need to add any requests required to flush the objects and ring */
-	if (ring->outstanding_lazy_request) {
-		ret = i915_add_request(ring);
-		if (ret)
-			return ret;
-	}
 
 	/* Wait upon the last request to be completed */
 	if (list_empty(&ring->request_list))
@@ -2180,33 +2194,126 @@
 	return 0;
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring,
-				int bytes)
+int intel_ring_reserve_space(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
-	int ret;
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
 
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = intel_wrap_ring_buffer(ring);
-		if (unlikely(ret))
-			return ret;
+	return intel_ring_begin(request, 0);
+}
+
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+	WARN_ON(ringbuf->reserved_size);
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size = size;
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_in_use = true;
+	ringbuf->reserved_tail   = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(!ringbuf->reserved_in_use);
+	if (ringbuf->tail > ringbuf->reserved_tail) {
+		WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+		     "request reserved size too small: %d vs %d!\n",
+		     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+	} else {
+		/*
+		 * The ring was wrapped while the reserved space was in use.
+		 * That means that some unknown amount of the ring tail was
+		 * no-op filled and skipped. Thus simply adding the ring size
+		 * to the tail and doing the above space check will not work.
+		 * Rather than attempt to track how much tail was skipped,
+		 * it is much simpler to say that also skipping the sanity
+		 * check every once in a while is not a big issue.
+		 */
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = ring_wait_for_space(ring, bytes);
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+{
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
+
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
+
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
+	}
+
+	if (wait_bytes) {
+		ret = ring_wait_for_space(ring, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
 }
 
-int intel_ring_begin(struct intel_engine_cs *ring,
+int intel_ring_begin(struct drm_i915_gem_request *req,
 		     int num_dwords)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
 	int ret;
 
+	WARN_ON(req == NULL);
+	ring = req->ring;
+	dev_priv = ring->dev->dev_private;
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
 	if (ret)
@@ -2216,18 +2323,14 @@
 	if (ret)
 		return ret;
 
-	/* Preallocate the olr before touching the ring */
-	ret = i915_gem_request_alloc(ring, ring->default_context);
-	if (ret)
-		return ret;
-
 	ring->buffer->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine_cs *ring)
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
@@ -2235,7 +2338,7 @@
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(ring, num_dwords);
+	ret = intel_ring_begin(req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -2252,8 +2355,6 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	BUG_ON(ring->outstanding_lazy_request);
-
 	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
 		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
 		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
@@ -2298,13 +2399,14 @@
 		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -2342,20 +2444,23 @@
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	bool ppgtt = USES_PPGTT(ring->dev) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
 	intel_ring_emit(ring, lower_32_bits(offset));
 	intel_ring_emit(ring, upper_32_bits(offset));
 	intel_ring_emit(ring, MI_NOOP);
@@ -2365,20 +2470,23 @@
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
+			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
 	intel_ring_emit(ring, offset);
 	intel_ring_advance(ring);
@@ -2387,13 +2495,14 @@
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -2410,14 +2519,15 @@
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct intel_engine_cs *ring,
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -2818,26 +2928,28 @@
 }
 
 int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (!ring->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
 	ring->gpu_caches_dirty = false;
 	return 0;
 }
 
 int
-intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
@@ -2845,11 +2957,11 @@
 	if (ring->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
 	ring->gpu_caches_dirty = false;
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 4be66f6..2e85fda 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -12,6 +12,7 @@
  * workarounds!
  */
 #define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
 
 /*
  * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
@@ -105,6 +106,9 @@
 	int space;
 	int size;
 	int effective_size;
+	int reserved_size;
+	int reserved_tail;
+	bool reserved_in_use;
 
 	/** We track the position of the requests in the ring buffer, and
 	 * when each is retired we increment last_retired_head as the GPU
@@ -120,6 +124,25 @@
 struct	intel_context;
 struct drm_i915_reg_descriptor;
 
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ *  offset: specifies batch starting position, also helpful in case
+ *    if we want to have multiple batches at different offsets based on
+ *    some criteria. It is not a requirement at the moment but provides
+ *    an option for future use.
+ *  size: size of the batch in DWORDS
+ */
+struct  i915_ctx_workarounds {
+	struct i915_wa_ctx_bb {
+		u32 offset;
+		u32 size;
+	} indirect_ctx, per_ctx;
+	struct drm_i915_gem_object *obj;
+};
+
 struct  intel_engine_cs {
 	const char	*name;
 	enum intel_ring_id {
@@ -143,6 +166,7 @@
 	struct i915_gem_batch_pool batch_pool;
 
 	struct intel_hw_status_page status_page;
+	struct i915_ctx_workarounds wa_ctx;
 
 	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
@@ -152,15 +176,14 @@
 
 	int		(*init_hw)(struct intel_engine_cs *ring);
 
-	int		(*init_context)(struct intel_engine_cs *ring,
-					struct intel_context *ctx);
+	int		(*init_context)(struct drm_i915_gem_request *req);
 
 	void		(*write_tail)(struct intel_engine_cs *ring,
 				      u32 value);
-	int __must_check (*flush)(struct intel_engine_cs *ring,
+	int __must_check (*flush)(struct drm_i915_gem_request *req,
 				  u32	invalidate_domains,
 				  u32	flush_domains);
-	int		(*add_request)(struct intel_engine_cs *ring);
+	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -171,11 +194,12 @@
 				     bool lazy_coherency);
 	void		(*set_seqno)(struct intel_engine_cs *ring,
 				     u32 seqno);
-	int		(*dispatch_execbuffer)(struct intel_engine_cs *ring,
+	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
 					       u64 offset, u32 length,
 					       unsigned dispatch_flags);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
 	void		(*cleanup)(struct intel_engine_cs *ring);
 
 	/* GEN8 signal/wait table - never trust comments!
@@ -229,10 +253,10 @@
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct intel_engine_cs *ring,
-				   struct intel_engine_cs *to,
+		int	(*sync_to)(struct drm_i915_gem_request *to_req,
+				   struct intel_engine_cs *from,
 				   u32 seqno);
-		int	(*signal)(struct intel_engine_cs *signaller,
+		int	(*signal)(struct drm_i915_gem_request *signaller_req,
 				  /* num_dwords needed by caller */
 				  unsigned int num_dwords);
 	} semaphore;
@@ -243,14 +267,11 @@
 	struct list_head execlist_retired_req_list;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-	int		(*emit_request)(struct intel_ringbuffer *ringbuf,
-					struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct intel_ringbuffer *ringbuf,
-				      struct intel_context *ctx,
+	int		(*emit_request)(struct drm_i915_gem_request *request);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
 				      u32 invalidate_domains,
 				      u32 flush_domains);
-	int		(*emit_bb_start)(struct intel_ringbuffer *ringbuf,
-					 struct intel_context *ctx,
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
 					 u64 offset, unsigned dispatch_flags);
 
 	/**
@@ -272,10 +293,6 @@
 	struct list_head request_list;
 
 	/**
-	 * Do we have some not yet emitted requests outstanding?
-	 */
-	struct drm_i915_gem_request *outstanding_lazy_request;
-	/**
 	 * Seqno of request most recently submitted to request_list.
 	 * Used exclusively by hang checker to avoid grabbing lock while
 	 * inspecting request list.
@@ -408,8 +425,8 @@
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 static inline void intel_ring_emit(struct intel_engine_cs *ring,
 				   u32 data)
 {
@@ -426,12 +443,11 @@
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
 bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
 
 int __must_check intel_ring_idle(struct intel_engine_cs *ring);
 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 void intel_fini_pipe_control(struct intel_engine_cs *ring);
 int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -451,11 +467,29 @@
 	return ringbuf->tail;
 }
 
-static inline struct drm_i915_gem_request *
-intel_ring_get_request(struct intel_engine_cs *ring)
-{
-	BUG_ON(ring->outstanding_lazy_request == NULL);
-	return ring->outstanding_lazy_request;
-}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST	160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1a45385..af7fdb3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,6 +68,22 @@
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 				    int power_well_id);
 
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+				    struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+	power_well->ops->enable(dev_priv, power_well);
+	power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+				     struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+	power_well->hw_enabled = false;
+	power_well->ops->disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -281,6 +297,7 @@
 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
 	BIT(POWER_DOMAIN_AUX_B) |                       \
 	BIT(POWER_DOMAIN_AUX_C) |			\
 	BIT(POWER_DOMAIN_AUX_D) |			\
@@ -300,6 +317,7 @@
 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
 	BIT(POWER_DOMAIN_INIT))
 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
@@ -835,12 +853,8 @@
 	return enabled;
 }
 
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-					  struct i915_power_well *power_well)
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
-	vlv_set_power_well(dev_priv, power_well, true);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	valleyview_enable_display_irqs(dev_priv);
@@ -858,18 +872,33 @@
 	i915_redisable_vga_power_on(dev_priv->dev);
 }
 
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	valleyview_disable_display_irqs(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+					  struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+	vlv_set_power_well(dev_priv, power_well, true);
+
+	vlv_display_power_well_init(dev_priv);
+}
+
 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
-	spin_lock_irq(&dev_priv->irq_lock);
-	valleyview_disable_display_irqs(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	vlv_display_power_well_deinit(dev_priv);
 
 	vlv_set_power_well(dev_priv, power_well, false);
-
-	vlv_power_sequencer_reset(dev_priv);
 }
 
 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -882,8 +911,8 @@
 	 * display and the reference clock for VGA
 	 * hotplug / manual detection.
 	 */
-	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-		   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+		   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
 	vlv_set_power_well(dev_priv, power_well, true);
@@ -933,14 +962,14 @@
 	 */
 	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
 		phy = DPIO_PHY0;
-		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-			   DPLL_REFA_CLK_ENABLE_VLV);
-		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV);
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	} else {
 		phy = DPIO_PHY1;
-		I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
-			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+		I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	}
 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 	vlv_set_power_well(dev_priv, power_well, true);
@@ -1042,53 +1071,29 @@
 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
+	WARN_ON_ONCE(power_well->data != PIPE_A);
+
 	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
 }
 
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A &&
-		     power_well->data != PIPE_B &&
-		     power_well->data != PIPE_C);
+	WARN_ON_ONCE(power_well->data != PIPE_A);
 
 	chv_set_pipe_power_well(dev_priv, power_well, true);
 
-	if (power_well->data == PIPE_A) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		valleyview_enable_display_irqs(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
-
-		/*
-		 * During driver initialization/resume we can avoid restoring the
-		 * part of the HW/SW state that will be inited anyway explicitly.
-		 */
-		if (dev_priv->power_domains.initializing)
-			return;
-
-		intel_hpd_init(dev_priv);
-
-		i915_redisable_vga_power_on(dev_priv->dev);
-	}
+	vlv_display_power_well_init(dev_priv);
 }
 
 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A &&
-		     power_well->data != PIPE_B &&
-		     power_well->data != PIPE_C);
+	WARN_ON_ONCE(power_well->data != PIPE_A);
 
-	if (power_well->data == PIPE_A) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		valleyview_disable_display_irqs(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
-	}
+	vlv_display_power_well_deinit(dev_priv);
 
 	chv_set_pipe_power_well(dev_priv, power_well, false);
-
-	if (power_well->data == PIPE_A)
-		vlv_power_sequencer_reset(dev_priv);
 }
 
 /**
@@ -1117,11 +1122,8 @@
 	mutex_lock(&power_domains->lock);
 
 	for_each_power_well(i, power_well, BIT(domain), power_domains) {
-		if (!power_well->count++) {
-			DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-			power_well->ops->enable(dev_priv, power_well);
-			power_well->hw_enabled = true;
-		}
+		if (!power_well->count++)
+			intel_power_well_enable(dev_priv, power_well);
 	}
 
 	power_domains->domain_use_count[domain]++;
@@ -1155,11 +1157,8 @@
 	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
 		WARN_ON(!power_well->count);
 
-		if (!--power_well->count && i915.disable_power_well) {
-			DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-			power_well->hw_enabled = false;
-			power_well->ops->disable(dev_priv, power_well);
-		}
+		if (!--power_well->count && i915.disable_power_well)
+			intel_power_well_disable(dev_priv, power_well);
 	}
 
 	mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aa2fd75..c98098e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1508,51 +1508,6 @@
 	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
-{
-	struct drm_crtc *crtc;
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-
-	/* dvo supports only 2 dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = intel_sdvo->base.base.crtc;
-	if (!crtc) {
-		intel_sdvo->base.connectors_active = false;
-		return;
-	}
-
-	/* We set active outputs manually below in case pipe dpms doesn't change
-	 * due to cloning. */
-	if (mode != DRM_MODE_DPMS_ON) {
-		intel_sdvo_set_active_outputs(intel_sdvo, 0);
-		if (0)
-			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-
-		intel_sdvo->base.connectors_active = false;
-
-		intel_crtc_update_dpms(crtc);
-	} else {
-		intel_sdvo->base.connectors_active = true;
-
-		intel_crtc_update_dpms(crtc);
-
-		if (0)
-			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-		intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_sdvo_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
@@ -2190,7 +2145,7 @@
 }
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
-	.dpms = intel_sdvo_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_sdvo_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8193a35..9d8af2f 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -75,10 +75,8 @@
  * until a subsequent call to intel_pipe_update_end(). That is done to
  * avoid random delays. The value written to @start_vbl_count should be
  * supplied to intel_pipe_update_end() for error checking.
- *
- * Return: true if the call was successful
  */
-bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 {
 	struct drm_device *dev = crtc->base.dev;
 	const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
@@ -96,13 +94,14 @@
 	min = vblank_start - usecs_to_scanlines(mode, 100);
 	max = vblank_start - 1;
 
+	local_irq_disable();
+	*start_vbl_count = 0;
+
 	if (min <= 0 || max <= 0)
-		return false;
+		return;
 
 	if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
-		return false;
-
-	local_irq_disable();
+		return;
 
 	trace_i915_pipe_update_start(crtc, min, max);
 
@@ -138,8 +137,6 @@
 	*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
 
 	trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
-
-	return true;
 }
 
 /**
@@ -161,7 +158,7 @@
 
 	local_irq_enable();
 
-	if (start_vbl_count != end_vbl_count)
+	if (start_vbl_count && start_vbl_count != end_vbl_count)
 		DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
 			  pipe_name(pipe), start_vbl_count, end_vbl_count);
 }
@@ -182,7 +179,8 @@
 	const int plane = intel_plane->plane + 1;
 	u32 plane_ctl, stride_div, stride;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(drm_plane->state)->ckey;
 	unsigned long surf_addr;
 	u32 tile_height, plane_offset, plane_size;
 	unsigned int rotation;
@@ -272,7 +270,7 @@
 }
 
 static void
-skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -344,7 +342,8 @@
 	u32 sprctl;
 	unsigned long sprsurf_offset, linear_offset;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(dplane->state)->ckey;
 
 	sprctl = SP_ENABLE;
 
@@ -400,10 +399,6 @@
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SP_TILED;
 
-	intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
-				       pixel_size, true,
-				       src_w != crtc_w || src_h != crtc_h);
-
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
@@ -411,7 +406,8 @@
 	crtc_h--;
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
-	sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+	sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
+							&x, &y,
 							obj->tiling_mode,
 							pixel_size,
 							fb->pitches[0]);
@@ -455,7 +451,7 @@
 }
 
 static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -467,8 +463,6 @@
 
 	I915_WRITE(SPSURF(pipe, plane), 0);
 	POSTING_READ(SPSURF(pipe, plane));
-
-	intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
 }
 
 static void
@@ -487,7 +481,8 @@
 	u32 sprctl, sprscale = 0;
 	unsigned long sprsurf_offset, linear_offset;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
 
 	sprctl = SPRITE_ENABLE;
 
@@ -546,7 +541,8 @@
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	sprsurf_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size, fb->pitches[0]);
 	linear_offset -= sprsurf_offset;
 
@@ -595,7 +591,7 @@
 }
 
 static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -627,7 +623,8 @@
 	unsigned long dvssurf_offset, linear_offset;
 	u32 dvscntr, dvsscale;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
 
 	dvscntr = DVS_ENABLE;
 
@@ -682,7 +679,8 @@
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	dvssurf_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size, fb->pitches[0]);
 	linear_offset -= dvssurf_offset;
 
@@ -722,7 +720,7 @@
 }
 
 static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -739,11 +737,12 @@
 
 static int
 intel_check_sprite_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
 	struct drm_device *dev = plane->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
-	struct intel_crtc_state *crtc_state;
+	struct drm_crtc *crtc = state->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = state->base.fb;
 	int crtc_x, crtc_y;
@@ -756,15 +755,10 @@
 	int max_scale, min_scale;
 	bool can_scale;
 	int pixel_size;
-	int ret;
-
-	intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
-	crtc_state = state->base.state ?
-		intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
 
 	if (!fb) {
 		state->visible = false;
-		goto finish;
+		return 0;
 	}
 
 	/* Don't modify another pipe's plane */
@@ -782,7 +776,7 @@
 	/* setup can_scale, min_scale, max_scale */
 	if (INTEL_INFO(dev)->gen >= 9) {
 		/* use scaler when colorkey is not required */
-		if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
+		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
 			can_scale = 1;
 			min_scale = 1;
 			max_scale = skl_max_scale(intel_crtc, crtc_state);
@@ -802,7 +796,6 @@
 	 * coordinates and sizes. We probably need some way to decide whether
 	 * more strict checking should be done instead.
 	 */
-
 	drm_rect_rotate(src, fb->width << 16, fb->height << 16,
 			state->base.rotation);
 
@@ -812,7 +805,7 @@
 	vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
 	BUG_ON(vscale < 0);
 
-	state->visible =  drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+	state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
 	crtc_x = dst->x1;
 	crtc_y = dst->y1;
@@ -917,36 +910,6 @@
 	dst->y1 = crtc_y;
 	dst->y2 = crtc_y + crtc_h;
 
-finish:
-	/*
-	 * If the sprite is completely covering the primary plane,
-	 * we can disable the primary and save power.
-	 */
-	if (intel_crtc->active) {
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
-
-		if (intel_wm_need_update(plane, &state->base))
-			intel_crtc->atomic.update_wm = true;
-
-		if (!state->visible) {
-			/*
-			 * Avoid underruns when disabling the sprite.
-			 * FIXME remove once watermark updates are done properly.
-			 */
-			intel_crtc->atomic.wait_vblank = true;
-			intel_crtc->atomic.update_sprite_watermarks |=
-				(1 << drm_plane_index(plane));
-		}
-	}
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
-			state, 0);
-		if (ret)
-			return ret;
-	}
-
 	return 0;
 }
 
@@ -955,34 +918,27 @@
 			  struct intel_plane_state *state)
 {
 	struct drm_crtc *crtc = state->base.crtc;
-	struct intel_crtc *intel_crtc;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = state->base.fb;
-	int crtc_x, crtc_y;
-	unsigned int crtc_w, crtc_h;
-	uint32_t src_x, src_y, src_w, src_h;
 
 	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
 
 	plane->fb = fb;
 
-	if (intel_crtc->active) {
-		if (state->visible) {
-			crtc_x = state->dst.x1;
-			crtc_y = state->dst.y1;
-			crtc_w = drm_rect_width(&state->dst);
-			crtc_h = drm_rect_height(&state->dst);
-			src_x = state->src.x1 >> 16;
-			src_y = state->src.y1 >> 16;
-			src_w = drm_rect_width(&state->src) >> 16;
-			src_h = drm_rect_height(&state->src) >> 16;
-			intel_plane->update_plane(plane, crtc, fb,
-						  crtc_x, crtc_y, crtc_w, crtc_h,
-						  src_x, src_y, src_w, src_h);
-		} else {
-			intel_plane->disable_plane(plane, crtc, false);
-		}
+	if (!crtc->state->active)
+		return;
+
+	if (state->visible) {
+		intel_plane->update_plane(plane, crtc, fb,
+					  state->dst.x1, state->dst.y1,
+					  drm_rect_width(&state->dst),
+					  drm_rect_height(&state->dst),
+					  state->src.x1 >> 16,
+					  state->src.y1 >> 16,
+					  drm_rect_width(&state->src) >> 16,
+					  drm_rect_height(&state->src) >> 16);
+	} else {
+		intel_plane->disable_plane(plane, crtc);
 	}
 }
 
@@ -991,7 +947,9 @@
 {
 	struct drm_intel_sprite_colorkey *set = data;
 	struct drm_plane *plane;
-	struct intel_plane *intel_plane;
+	struct drm_plane_state *plane_state;
+	struct drm_atomic_state *state;
+	struct drm_modeset_acquire_ctx ctx;
 	int ret = 0;
 
 	/* Make sure we don't try to enable both src & dest simultaneously */
@@ -1002,52 +960,43 @@
 	    set->flags & I915_SET_COLORKEY_DESTINATION)
 		return -EINVAL;
 
-	drm_modeset_lock_all(dev);
-
 	plane = drm_plane_find(dev, set->plane_id);
-	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
-		ret = -ENOENT;
-		goto out_unlock;
+	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+		return -ENOENT;
+
+	drm_modeset_acquire_init(&ctx, 0);
+
+	state = drm_atomic_state_alloc(plane->dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
 	}
+	state->acquire_ctx = &ctx;
 
-	intel_plane = to_intel_plane(plane);
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		/* plane scaling and colorkey are mutually exclusive */
-		if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
-			DRM_ERROR("colorkey not allowed with scaler\n");
-			ret = -EINVAL;
-			goto out_unlock;
+	while (1) {
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			to_intel_plane_state(plane_state)->ckey = *set;
+			ret = drm_atomic_commit(state);
 		}
+
+		if (ret != -EDEADLK)
+			break;
+
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
 	}
 
-	intel_plane->ckey = *set;
+	if (ret)
+		drm_atomic_state_free(state);
 
-	/*
-	 * The only way this could fail would be due to
-	 * the current plane state being unsupportable already,
-	 * and we dont't consider that an error for the
-	 * colorkey ioctl. So just ignore any error.
-	 */
-	intel_plane_restore(plane);
-
-out_unlock:
-	drm_modeset_unlock_all(dev);
+out:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
 	return ret;
 }
 
-int intel_plane_restore(struct drm_plane *plane)
-{
-	if (!plane->crtc || !plane->state->fb)
-		return 0;
-
-	return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
-				       plane->state->crtc_x, plane->state->crtc_y,
-				       plane->state->crtc_w, plane->state->crtc_h,
-				       plane->state->src_x, plane->state->src_y,
-				       plane->state->src_w, plane->state->src_h);
-}
-
 static const uint32_t ilk_plane_formats[] = {
 	DRM_FORMAT_XRGB8888,
 	DRM_FORMAT_YUYV,
@@ -1172,9 +1121,9 @@
 
 	intel_plane->pipe = pipe;
 	intel_plane->plane = plane;
+	intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
 	intel_plane->check_plane = intel_check_sprite_plane;
 	intel_plane->commit_plane = intel_commit_sprite_plane;
-	intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
 	possible_crtcs = (1 << pipe);
 	ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
 				       &intel_plane_funcs,
@@ -1189,6 +1138,6 @@
 
 	drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
- out:
+out:
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 8b9d325..0568ae6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1509,7 +1509,7 @@
 }
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_tv_detect,
 	.destroy = intel_tv_destroy,
 	.set_property = intel_tv_set_property,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 260389a..9d3c2e4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1467,20 +1467,80 @@
 	return ret;
 }
 
+static int wait_for_register(struct drm_i915_private *dev_priv,
+			     const u32 reg,
+			     const u32 mask,
+			     const u32 value,
+			     const unsigned long timeout_ms)
+{
+	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+}
+
+static int gen8_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *engine;
+	int i;
+
+	for_each_ring(engine, dev_priv, i) {
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+		if (wait_for_register(dev_priv,
+				      RING_RESET_CTL(engine->mmio_base),
+				      RESET_CTL_READY_TO_RESET,
+				      RESET_CTL_READY_TO_RESET,
+				      700)) {
+			DRM_ERROR("%s: reset request timeout\n", engine->name);
+			goto not_ready;
+		}
+	}
+
+	return gen6_do_reset(dev);
+
+not_ready:
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+
+	return -EIO;
+}
+
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+{
+	if (!i915.reset)
+		return NULL;
+
+	if (INTEL_INFO(dev)->gen >= 8)
+		return gen8_do_reset;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return gen6_do_reset;
+	else if (IS_GEN5(dev))
+		return ironlake_do_reset;
+	else if (IS_G4X(dev))
+		return g4x_do_reset;
+	else if (IS_G33(dev))
+		return g33_do_reset;
+	else if (INTEL_INFO(dev)->gen >= 3)
+		return i915_do_reset;
+	else
+		return NULL;
+}
+
 int intel_gpu_reset(struct drm_device *dev)
 {
-	if (INTEL_INFO(dev)->gen >= 6)
-		return gen6_do_reset(dev);
-	else if (IS_GEN5(dev))
-		return ironlake_do_reset(dev);
-	else if (IS_G4X(dev))
-		return g4x_do_reset(dev);
-	else if (IS_G33(dev))
-		return g33_do_reset(dev);
-	else if (INTEL_INFO(dev)->gen >= 3)
-		return i915_do_reset(dev);
-	else
+	int (*reset)(struct drm_device *);
+
+	reset = intel_get_gpu_reset(dev);
+	if (reset == NULL)
 		return -ENODEV;
+
+	return reset(dev);
+}
+
+bool intel_has_gpu_reset(struct drm_device *dev)
+{
+	return intel_get_gpu_reset(dev) != NULL;
 }
 
 void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a3ecf10..644edf6 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -75,6 +75,11 @@
 	},
 };
 
+/*
+ * Resistance term 133Ohm Cfg
+ * PREEMP config 0.00
+ * TX/CK level 10
+ */
 static const struct dw_hdmi_phy_config imx_phy_config[] = {
 	/*pixelclk   symbol   term   vlev */
 	{ 148500000, 0x800d, 0x0005, 0x01ad},
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 9f9780b..4f2068f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,18 +70,22 @@
 	BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
 	BUG_ON(pixels_current == pixels_prev);
 
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
 	ret = mgag200_bo_reserve(pixels_1, true);
 	if (ret) {
 		WREG8(MGA_CURPOSXL, 0);
 		WREG8(MGA_CURPOSXH, 0);
-		return ret;
+		goto out_unref;
 	}
 	ret = mgag200_bo_reserve(pixels_2, true);
 	if (ret) {
 		WREG8(MGA_CURPOSXL, 0);
 		WREG8(MGA_CURPOSXH, 0);
 		mgag200_bo_unreserve(pixels_1);
-		return ret;
+		goto out_unreserve1;
 	}
 
 	if (!handle) {
@@ -106,16 +110,6 @@
 		}
 	}
 
-	mutex_lock(&dev->struct_mutex);
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
-	if (!obj) {
-		mutex_unlock(&dev->struct_mutex);
-		ret = -ENOENT;
-		goto out1;
-	}
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
-
 	bo = gem_to_mga_bo(obj);
 	ret = mgag200_bo_reserve(bo, true);
 	if (ret) {
@@ -252,7 +246,11 @@
 	if (ret)
 		mga_hide_cursor(mdev);
 	mgag200_bo_unreserve(pixels_1);
+out_unreserve1:
 	mgag200_bo_unreserve(pixels_2);
+out_unref:
+	drm_gem_object_unreference_unlocked(obj);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 9774599..b0af774 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -35,6 +35,7 @@
 	{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
 	{ PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
 	{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+	{ PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
 	{0,}
 };
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index e9eea1d..912151c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -180,6 +180,7 @@
 	G200_EV,
 	G200_EH,
 	G200_ER,
+	G200_EW3,
 };
 
 #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index c36b830..87de15e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -101,7 +101,7 @@
 			 const struct fb_fillrect *rect)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -110,7 +110,7 @@
 			 const struct fb_copyarea *area)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -119,7 +119,7 @@
 			  const struct fb_image *image)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -166,8 +166,6 @@
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_gem_object *gobj = NULL;
-	struct device *device = &dev->pdev->dev;
-	struct mgag200_bo *bo;
 	int ret;
 	void *sysram;
 	int size;
@@ -185,15 +183,14 @@
 		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
 		return ret;
 	}
-	bo = gem_to_mga_bo(gobj);
 
 	sysram = vmalloc(size);
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = mfbdev;
 
@@ -208,14 +205,6 @@
 
 	/* setup helper */
 	mfbdev->helper.fb = fb;
-	mfbdev->helper.fbdev = info;
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		ret = -ENOMEM;
-		goto out;
-	}
 
 	strcpy(info->fix.id, "mgadrmfb");
 
@@ -223,11 +212,6 @@
 	info->fbops = &mgag200fb_ops;
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out;
-	}
 	info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = mdev->mc.vram_size;
 
@@ -242,24 +226,15 @@
 	DRM_DEBUG_KMS("allocated %dx%d\n",
 		      fb->width, fb->height);
 	return 0;
-out:
-	return ret;
 }
 
 static int mga_fbdev_destroy(struct drm_device *dev,
 				struct mga_fbdev *mfbdev)
 {
-	struct fb_info *info;
 	struct mga_framebuffer *mfb = &mfbdev->mfb;
 
-	if (mfbdev->helper.fbdev) {
-		info = mfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&mfbdev->helper);
+	drm_fb_helper_release_fbi(&mfbdev->helper);
 
 	if (mfb->obj) {
 		drm_gem_object_unreference_unlocked(mfb->obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index d3dcf54..10535e3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -101,6 +101,7 @@
 	case G200_SE_B:
 	case G200_EV:
 	case G200_WB:
+	case G200_EW3:
 		data = 1;
 		clock = 2;
 		break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index f6b283b..de06388 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -82,12 +82,19 @@
 	int orig;
 	int test1, test2;
 	int orig1, orig2;
+	unsigned int vram_size;
 
 	/* Probe */
 	orig = ioread16(mem);
 	iowrite16(0, mem);
 
-	for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+	vram_size = mdev->mc.vram_window;
+
+	if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
+		vram_size = vram_size - 0x400000;
+	}
+
+	for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
 		orig1 = ioread8(mem + offset);
 		orig2 = ioread8(mem + offset + 0x100);
 
@@ -345,23 +352,15 @@
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct mgag200_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_mga_bo(obj);
 	*offset = mgag200_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ad4b901..c99d3fe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -104,6 +104,8 @@
 	return true;
 }
 
+#define P_ARRAY_SIZE 9
+
 static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
@@ -111,37 +113,97 @@
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
+	unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
+	unsigned int fvv;
+	unsigned int i;
 
-	m = n = p = 0;
-	vcomax = 320000;
-	vcomin = 160000;
-	pllreffreq = 25000;
+	if (mdev->unique_rev_id <= 0x03) {
 
-	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
+		m = n = p = 0;
+		vcomax = 320000;
+		vcomin = 160000;
+		pllreffreq = 25000;
 
-	for (testp = 8; testp > 0; testp /= 2) {
-		if (clock * testp > vcomax)
-			continue;
-		if (clock * testp < vcomin)
-			continue;
+		delta = 0xffffffff;
+		permitteddelta = clock * 5 / 1000;
 
-		for (testn = 17; testn < 256; testn++) {
-			for (testm = 1; testm < 32; testm++) {
-				computed = (pllreffreq * testn) /
-					(testm * testp);
-				if (computed > clock)
-					tmpdelta = computed - clock;
-				else
-					tmpdelta = clock - computed;
-				if (tmpdelta < delta) {
-					delta = tmpdelta;
-					m = testm - 1;
-					n = testn - 1;
-					p = testp - 1;
+		for (testp = 8; testp > 0; testp /= 2) {
+			if (clock * testp > vcomax)
+				continue;
+			if (clock * testp < vcomin)
+				continue;
+
+			for (testn = 17; testn < 256; testn++) {
+				for (testm = 1; testm < 32; testm++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm - 1;
+						n = testn - 1;
+						p = testp - 1;
+					}
 				}
 			}
 		}
+	} else {
+
+
+		m = n = p = 0;
+		vcomax        = 1600000;
+		vcomin        = 800000;
+		pllreffreq    = 25000;
+
+		if (clock < 25000)
+			clock = 25000;
+
+		clock = clock * 2;
+
+		delta = 0xFFFFFFFF;
+		/* Permited delta is 0.5% as VESA Specification */
+		permitteddelta = clock * 5 / 1000;
+
+		for (i = 0 ; i < P_ARRAY_SIZE ; i++) {
+			testp = pvalues_e4[i];
+
+			if ((clock * testp) > vcomax)
+				continue;
+			if ((clock * testp) < vcomin)
+				continue;
+
+			for (testn = 50; testn <= 256; testn++) {
+				for (testm = 1; testm <= 32; testm++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm - 1;
+						n = testn - 1;
+						p = testp - 1;
+					}
+				}
+			}
+		}
+
+		fvv = pllreffreq * testn / testm;
+		fvv = (fvv - 800000) / 50000;
+
+		if (fvv > 15)
+			fvv = 15;
+
+		p |= (fvv << 4);
+		m |= 0x80;
+
+		clock = clock / 2;
 	}
 
 	if (delta > permitteddelta) {
@@ -158,8 +220,8 @@
 static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
-	unsigned int testp, testm, testn;
+	unsigned int delta, tmpdelta;
+	unsigned int testp, testm, testn, testp2;
 	unsigned int p, m, n;
 	unsigned int computed;
 	int i, j, tmpcount, vcount;
@@ -167,32 +229,71 @@
 	u8 tmp;
 
 	m = n = p = 0;
-	vcomax = 550000;
-	vcomin = 150000;
-	pllreffreq = 48000;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
-	for (testp = 1; testp < 9; testp++) {
-		if (clock * testp > vcomax)
-			continue;
-		if (clock * testp < vcomin)
-			continue;
+	if (mdev->type == G200_EW3) {
 
-		for (testm = 1; testm < 17; testm++) {
-			for (testn = 1; testn < 151; testn++) {
-				computed = (pllreffreq * testn) /
-					(testm * testp);
-				if (computed > clock)
-					tmpdelta = computed - clock;
-				else
-					tmpdelta = clock - computed;
-				if (tmpdelta < delta) {
-					delta = tmpdelta;
-					n = testn - 1;
-					m = (testm - 1) | ((n >> 1) & 0x80);
-					p = testp - 1;
+		vcomax = 800000;
+		vcomin = 400000;
+		pllreffreq = 25000;
+
+		for (testp = 1; testp < 8; testp++) {
+			for (testp2 = 1; testp2 < 8; testp2++) {
+				if (testp < testp2)
+					continue;
+				if ((clock * testp * testp2) > vcomax)
+					continue;
+				if ((clock * testp * testp2) < vcomin)
+					continue;
+				for (testm = 1; testm < 26; testm++) {
+					for (testn = 32; testn < 2048 ; testn++) {
+						computed = (pllreffreq * testn) /
+							(testm * testp * testp2);
+						if (computed > clock)
+							tmpdelta = computed - clock;
+						else
+							tmpdelta = clock - computed;
+						if (tmpdelta < delta) {
+							delta = tmpdelta;
+							m = ((testn & 0x100) >> 1) |
+								(testm);
+							n = (testn & 0xFF);
+							p = ((testn & 0x600) >> 3) |
+								(testp2 << 3) |
+								(testp);
+						}
+					}
+				}
+			}
+		}
+	} else {
+
+		vcomax = 550000;
+		vcomin = 150000;
+		pllreffreq = 48000;
+
+		for (testp = 1; testp < 9; testp++) {
+			if (clock * testp > vcomax)
+				continue;
+			if (clock * testp < vcomin)
+				continue;
+
+			for (testm = 1; testm < 17; testm++) {
+				for (testn = 1; testn < 151; testn++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						n = testn - 1;
+						m = (testm - 1) |
+							((n >> 1) & 0x80);
+						p = testp - 1;
+					}
 				}
 			}
 		}
@@ -298,7 +399,7 @@
 static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int delta, tmpdelta;
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
@@ -310,7 +411,6 @@
 	pllreffreq = 50000;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
 	for (testp = 16; testp > 0; testp--) {
 		if (clock * testp > vcomax)
@@ -392,7 +492,7 @@
 static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int delta, tmpdelta;
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
@@ -406,7 +506,6 @@
 	pllreffreq = 33333;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
 	for (testp = 16; testp > 0; testp >>= 1) {
 		if (clock * testp > vcomax)
@@ -572,6 +671,7 @@
 		return mga_g200se_set_plls(mdev, clock);
 		break;
 	case G200_WB:
+	case G200_EW3:
 		return mga_g200wb_set_plls(mdev, clock);
 		break;
 	case G200_EV:
@@ -823,6 +923,7 @@
 		option2 = 0x00008000;
 		break;
 	case G200_WB:
+	case G200_EW3:
 		dacvalue[MGA1064_VREF_CTL] = 0x07;
 		option = 0x41049120;
 		option2 = 0x0000b000;
@@ -878,7 +979,10 @@
 		if (IS_G200_SE(mdev) &&
 		    ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
 			continue;
-		if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+		if ((mdev->type == G200_EV ||
+		    mdev->type == G200_WB ||
+		    mdev->type == G200_EH ||
+		    mdev->type == G200_EW3) &&
 		    (i >= 0x44) && (i <= 0x4e))
 			continue;
 
@@ -980,7 +1084,7 @@
 	else
 		ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
 	ext_vga[4] = 0;
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		ext_vga[1] |= 0x88;
 
 	/* Set pixel clocks */
@@ -996,6 +1100,9 @@
 	if (mdev->type == G200_ER)
 		WREG_ECRT(0x24, 0x5);
 
+	if (mdev->type == G200_EW3)
+		WREG_ECRT(0x34, 0x5);
+
 	if (mdev->type == G200_EV) {
 		WREG_ECRT(6, 0);
 	}
@@ -1208,7 +1315,7 @@
 		WREG_SEQ(1, tmp | 0x20);
 	}
 
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		mga_g200wb_prepare(crtc);
 
 	WREG_CRT(17, 0);
@@ -1225,7 +1332,7 @@
 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	u8 tmp;
 
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		mga_g200wb_commit(crtc);
 
 	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
@@ -1495,7 +1602,7 @@
 			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
 				> (24400 * 1024))
 				return MODE_BANDWIDTH;
-		} else if (mdev->unique_rev_id >= 0x02) {
+		} else if (mdev->unique_rev_id == 0x02) {
 			if (mode->hdisplay > 1920)
 				return MODE_VIRTUAL_X;
 			if (mode->vdisplay > 1200)
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d16964e..05108b5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -378,7 +378,7 @@
 
 int mgag200_bo_unpin(struct mgag200_bo *bo)
 {
-	int i, ret;
+	int i;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
 		return 0;
@@ -389,11 +389,7 @@
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
-	if (ret)
-		return ret;
-
-	return 0;
+	return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 }
 
 int mgag200_bo_push_sysram(struct mgag200_bo *bo)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 08ba8d0..8e6c7c6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -9,6 +9,7 @@
 	select DRM_PANEL
 	select SHMEM
 	select TMPFS
+	select QCOM_SCM
 	default y
 	help
 	  DRM/KMS driver for MSM/snapdragon.
@@ -53,3 +54,17 @@
 	help
 	  Choose this option to enable DSI PLL driver which provides DSI
 	  source clocks under common clock framework.
+
+config DRM_MSM_DSI_28NM_PHY
+	bool "Enable DSI 28nm PHY driver in MSM DRM"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if the 28nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_20NM_PHY
+	bool "Enable DSI 20nm PHY driver in MSM DRM"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if the 20nm DSI PHY is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 16a81b9..0a543eb 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,5 @@
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
-ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 
 msm-y := \
 	adreno/adreno_device.o \
@@ -10,6 +10,7 @@
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
 	hdmi/hdmi_connector.o \
+	hdmi/hdmi_hdcp.o \
 	hdmi/hdmi_i2c.o \
 	hdmi/hdmi_phy_8960.o \
 	hdmi/hdmi_phy_8x60.o \
@@ -53,12 +54,18 @@
 msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
 
 msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+			dsi/dsi_cfg.o \
 			dsi/dsi_host.o \
 			dsi/dsi_manager.o \
-			dsi/dsi_phy.o \
+			dsi/phy/dsi_phy.o \
 			mdp/mdp5/mdp5_cmd_encoder.o
 
-msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
-				dsi/pll/dsi_pll_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
+msm-y += dsi/pll/dsi_pll.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+endif
 
 obj-$(CONFIG_DRM_MSM)	+= msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 23176e4..0261f0d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,15 +8,15 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 1c599e5..48d1337 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -326,6 +326,13 @@
 	A3XX_TEX_3D = 3,
 };
 
+enum a3xx_tex_msaa {
+	A3XX_TPL1_MSAA1X = 0,
+	A3XX_TPL1_MSAA2X = 1,
+	A3XX_TPL1_MSAA4X = 2,
+	A3XX_TPL1_MSAA8X = 3,
+};
+
 #define A3XX_INT0_RBBM_GPU_IDLE					0x00000001
 #define A3XX_INT0_RBBM_AHB_ERROR				0x00000002
 #define A3XX_INT0_RBBM_REG_TIMEOUT				0x00000004
@@ -2652,6 +2659,7 @@
 #define REG_A3XX_VGT_IMMED_DATA					0x000021fd
 
 #define REG_A3XX_TEX_SAMP_0					0x00000000
+#define A3XX_TEX_SAMP_0_CLAMPENABLE				0x00000001
 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR			0x00000002
 #define A3XX_TEX_SAMP_0_XY_MAG__MASK				0x0000000c
 #define A3XX_TEX_SAMP_0_XY_MAG__SHIFT				2
@@ -2695,6 +2703,7 @@
 {
 	return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
 }
+#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF			0x01000000
 #define A3XX_TEX_SAMP_0_UNNORM_COORDS				0x80000000
 
 #define REG_A3XX_TEX_SAMP_1					0x00000001
@@ -2750,6 +2759,12 @@
 {
 	return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
 }
+#define A3XX_TEX_CONST_0_MSAATEX__MASK				0x00300000
+#define A3XX_TEX_CONST_0_MSAATEX__SHIFT				20
+static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
+{
+	return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
+}
 #define A3XX_TEX_CONST_0_FMT__MASK				0x1fc00000
 #define A3XX_TEX_CONST_0_FMT__SHIFT				22
 static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
@@ -2785,7 +2800,7 @@
 }
 
 #define REG_A3XX_TEX_CONST_2					0x00000002
-#define A3XX_TEX_CONST_2_INDX__MASK				0x000000ff
+#define A3XX_TEX_CONST_2_INDX__MASK				0x000001ff
 #define A3XX_TEX_CONST_2_INDX__SHIFT				0
 static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
 {
@@ -2805,7 +2820,7 @@
 }
 
 #define REG_A3XX_TEX_CONST_3					0x00000003
-#define A3XX_TEX_CONST_3_LAYERSZ1__MASK				0x00007fff
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK				0x0001ffff
 #define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT			0
 static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
 {
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3f06ecf..ac55066 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -227,6 +227,7 @@
 	DEPTH4_NONE = 0,
 	DEPTH4_16 = 1,
 	DEPTH4_24_8 = 2,
+	DEPTH4_32 = 3,
 };
 
 enum a4xx_tess_spacing {
@@ -429,7 +430,7 @@
 	return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
 }
 #define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB				0x00002000
-#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK		0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK		0xffffc000
 #define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT		14
 static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
 {
@@ -439,7 +440,7 @@
 static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
 
 static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
-#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK			0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK			0x03fffff8
 #define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT			3
 static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
 {
@@ -570,6 +571,15 @@
 	return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
 }
 
+#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL			0x000020fa
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY			0x00000002
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK			0xfffffffc
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT		2
+static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
+{
+	return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+}
+
 #define REG_A4XX_RB_RENDER_COMPONENTS				0x000020fb
 #define A4XX_RB_RENDER_COMPONENTS_RT0__MASK			0x0000000f
 #define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT			0
@@ -811,6 +821,23 @@
 #define REG_A4XX_RB_STENCIL_CONTROL2				0x00002107
 #define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER			0x00000001
 
+#define REG_A4XX_RB_STENCIL_INFO				0x00002108
+#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL			0x00000001
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK			0xfffff000
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT		12
+static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+	return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_PITCH				0x00002109
+#define A4XX_RB_STENCIL_PITCH__MASK				0xffffffff
+#define A4XX_RB_STENCIL_PITCH__SHIFT				0
+static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
+{
+	return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+}
+
 #define REG_A4XX_RB_STENCILREFMASK				0x0000210b
 #define A4XX_RB_STENCILREFMASK_STENCILREF__MASK			0x000000ff
 #define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT		0
@@ -1433,6 +1460,7 @@
 {
 	return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
 }
+#define A4XX_SP_FS_MRT_REG_COLOR_SRGB				0x00040000
 
 #define REG_A4XX_SP_CS_CTRL_REG0				0x00002300
 
@@ -1470,6 +1498,76 @@
 
 #define REG_A4XX_SP_HS_LENGTH_REG				0x00002312
 
+#define REG_A4XX_SP_DS_PARAM_REG				0x0000231a
+#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK			0x000000ff
+#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT			0
+static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK		0xfff00000
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT		20
+static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define A4XX_SP_DS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT			0
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK			0x00001e00
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT			9
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT			16
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK			0x1e000000
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT			25
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT			0
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT			8
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT			16
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT			24
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
 #define REG_A4XX_SP_DS_OBJ_OFFSET_REG				0x00002334
 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
@@ -1492,6 +1590,82 @@
 
 #define REG_A4XX_SP_DS_LENGTH_REG				0x00002339
 
+#define REG_A4XX_SP_GS_PARAM_REG				0x00002341
+#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK			0x000000ff
+#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT			0
+static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK			0x0000ff00
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT			8
+static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK		0xfff00000
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT		20
+static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define A4XX_SP_GS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT			0
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK			0x00001e00
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT			9
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT			16
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK			0x1e000000
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT			25
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT			0
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT			8
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT			16
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT			24
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
 #define REG_A4XX_SP_GS_OBJ_OFFSET_REG				0x0000235b
 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
@@ -1693,6 +1867,18 @@
 {
 	return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
 }
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK			0x00ff0000
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT			16
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+	return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK			0xff000000
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT			24
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+	return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
 
 #define REG_A4XX_VFD_CONTROL_4					0x00002204
 
@@ -2489,6 +2675,8 @@
 
 #define REG_A4XX_UNKNOWN_22D7					0x000022d7
 
+#define REG_A4XX_UNKNOWN_2352					0x00002352
+
 #define REG_A4XX_TEX_SAMP_0					0x00000000
 #define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR			0x00000001
 #define A4XX_TEX_SAMP_0_XY_MAG__MASK				0x00000006
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 9562a1f..399a9e5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,15 +8,15 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index bd5b23b..41904fe 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -67,7 +67,7 @@
 
 enum pc_di_primtype {
 	DI_PT_NONE = 0,
-	DI_PT_POINTLIST_A2XX = 1,
+	DI_PT_POINTLIST_PSIZE = 1,
 	DI_PT_LINELIST = 2,
 	DI_PT_LINESTRIP = 3,
 	DI_PT_TRILIST = 4,
@@ -75,7 +75,7 @@
 	DI_PT_TRISTRIP = 6,
 	DI_PT_LINELOOP = 7,
 	DI_PT_RECTLIST = 8,
-	DI_PT_POINTLIST_A3XX = 9,
+	DI_PT_POINTLIST = 9,
 	DI_PT_LINE_ADJ = 10,
 	DI_PT_LINESTRIP_ADJ = 11,
 	DI_PT_TRI_ADJ = 12,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1f2561e..6edcd6f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -15,10 +15,10 @@
 
 struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
 {
-	if (!msm_dsi || !msm_dsi->panel)
+	if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
 		return NULL;
 
-	return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+	return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
 		msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
 		msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
 }
@@ -74,19 +74,15 @@
 
 static struct msm_dsi *dsi_init(struct platform_device *pdev)
 {
-	struct msm_dsi *msm_dsi = NULL;
+	struct msm_dsi *msm_dsi;
 	int ret;
 
-	if (!pdev) {
-		ret = -ENXIO;
-		goto fail;
-	}
+	if (!pdev)
+		return ERR_PTR(-ENXIO);
 
 	msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
-	if (!msm_dsi) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	if (!msm_dsi)
+		return ERR_PTR(-ENOMEM);
 	DBG("dsi probed=%p", msm_dsi);
 
 	msm_dsi->pdev = pdev;
@@ -95,24 +91,22 @@
 	/* Init dsi host */
 	ret = msm_dsi_host_init(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	/* GET dsi PHY */
 	ret = dsi_get_phy(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	/* Register to dsi manager */
 	ret = msm_dsi_manager_register(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	return msm_dsi;
 
-fail:
-	if (msm_dsi)
-		dsi_destroy(msm_dsi);
-
+destroy_dsi:
+	dsi_destroy(msm_dsi);
 	return ERR_PTR(ret);
 }
 
@@ -196,6 +190,7 @@
 		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_bridge *ext_bridge;
 	int ret, i;
 
 	if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
@@ -223,10 +218,25 @@
 		msm_dsi->encoders[i] = encoders[i];
 	}
 
-	msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+	/*
+	 * check if the dsi encoder output is connected to a panel or an
+	 * external bridge. We create a connector only if we're connected to a
+	 * drm_panel device. When we're connected to an external bridge, we
+	 * assume that the drm_bridge driver will create the connector itself.
+	 */
+	ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
+
+	if (ext_bridge)
+		msm_dsi->connector =
+			msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+	else
+		msm_dsi->connector =
+			msm_dsi_manager_connector_init(msm_dsi->id);
+
 	if (IS_ERR(msm_dsi->connector)) {
 		ret = PTR_ERR(msm_dsi->connector);
-		dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+		dev_err(dev->dev,
+			"failed to create dsi connector: %d\n", ret);
 		msm_dsi->connector = NULL;
 		goto fail;
 	}
@@ -242,10 +252,12 @@
 			msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
 			msm_dsi->bridge = NULL;
 		}
-		if (msm_dsi->connector) {
+
+		/* don't destroy connector if we didn't make it */
+		if (msm_dsi->connector && !msm_dsi->external_bridge)
 			msm_dsi->connector->funcs->destroy(msm_dsi->connector);
-			msm_dsi->connector = NULL;
-		}
+
+		msm_dsi->connector = NULL;
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 92d697d..5f5a373 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,21 +27,10 @@
 #define DSI_1	1
 #define DSI_MAX	2
 
-#define DSI_CLOCK_MASTER	DSI_0
-#define DSI_CLOCK_SLAVE		DSI_1
-
-#define DSI_LEFT		DSI_0
-#define DSI_RIGHT		DSI_1
-
-/* According to the current drm framework sequence, take the encoder of
- * DSI_1 as master encoder
- */
-#define DSI_ENCODER_MASTER	DSI_1
-#define DSI_ENCODER_SLAVE	DSI_0
-
 enum msm_dsi_phy_type {
 	MSM_DSI_PHY_28NM_HPM,
 	MSM_DSI_PHY_28NM_LP,
+	MSM_DSI_PHY_20NM,
 	MSM_DSI_PHY_MAX
 };
 
@@ -65,13 +54,21 @@
 	struct drm_device *dev;
 	struct platform_device *pdev;
 
+	/* connector managed by us when we're connected to a drm_panel */
 	struct drm_connector *connector;
+	/* internal dsi bridge attached to MDP interface */
 	struct drm_bridge *bridge;
 
 	struct mipi_dsi_host *host;
 	struct msm_dsi_phy *phy;
+
+	/*
+	 * panel/external_bridge connected to dsi bridge output, only one of the
+	 * two can be valid at a time
+	 */
 	struct drm_panel *panel;
-	unsigned long panel_flags;
+	struct drm_bridge *external_bridge;
+	unsigned long device_flags;
 
 	struct device *phy_dev;
 	bool phy_enabled;
@@ -86,6 +83,7 @@
 struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
 struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
 int msm_dsi_manager_phy_enable(int id,
 		const unsigned long bit_rate, const unsigned long esc_rate,
 		u32 *clk_pre, u32 *clk_post);
@@ -96,6 +94,11 @@
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 
 /* msm dsi */
+static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
+{
+	return msm_dsi->panel || msm_dsi->external_bridge;
+}
+
 struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
 
 /* dsi pll */
@@ -106,6 +109,8 @@
 void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
 int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
 	struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
 #else
 static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			 enum msm_dsi_phy_type type, int id) {
@@ -119,6 +124,13 @@
 {
 	return -ENODEV;
 }
+static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+}
+static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	return 0;
+}
 #endif
 
 /* dsi host */
@@ -140,6 +152,7 @@
 					struct drm_display_mode *mode);
 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
 					unsigned long *panel_flags);
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
@@ -153,9 +166,9 @@
 struct msm_dsi_phy;
 void msm_dsi_phy_driver_register(void);
 void msm_dsi_phy_driver_unregister(void);
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	const unsigned long bit_rate, const unsigned long esc_rate);
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
 					u32 *clk_pre, u32 *clk_post);
 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 9791ea0..1d2e32f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -382,6 +382,11 @@
 #define REG_DSI_TRIG_DMA					0x0000008c
 
 #define REG_DSI_DLN0_PHY_ERR					0x000000b0
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC				0x00000001
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC			0x00000010
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL			0x00000100
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0		0x00001000
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1		0x00010000
 
 #define REG_DSI_TIMEOUT_STATUS					0x000000bc
 
@@ -435,6 +440,9 @@
 #define REG_DSI_PHY_RESET					0x00000128
 #define DSI_PHY_RESET_RESET					0x00000001
 
+#define REG_DSI_T_CLK_PRE_EXTEND				0x0000017c
+#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK			0x00000001
+
 #define REG_DSI_RDBK_DATA_CTRL					0x000001d0
 #define DSI_RDBK_DATA_CTRL_COUNT__MASK				0x00ff0000
 #define DSI_RDBK_DATA_CTRL_COUNT__SHIFT				16
@@ -830,6 +838,7 @@
 #define REG_DSI_28nm_PHY_BIST_CTRL_5				0x000001c8
 
 #define REG_DSI_28nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
 
 #define REG_DSI_28nm_PHY_LDO_CNTRL				0x000001dc
 
@@ -994,5 +1003,185 @@
 
 #define REG_DSI_28nm_PHY_PLL_CTRL_54				0x000000d4
 
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0				0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1				0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2				0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3				0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4				0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH			0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL				0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0				0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1				0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0				0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1				0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2				0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3				0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8			0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4				0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5				0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6				0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7				0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8				0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9				0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK			0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT			0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10				0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11				0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0					0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1					0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2					0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3					0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4					0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0				0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1				0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0				0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1				0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2				0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3				0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4				0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5				0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL				0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0			0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1			0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2			0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3			0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4			0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5			0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
+
 
 #endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 0000000..5872d5e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_cfg.h"
+
+/* DSI v2 has not been supported by now */
+static const struct msm_dsi_config dsi_v2_cfg = {
+	.io_offset = 0,
+};
+
+static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 4,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdd", 3000000, 3000000, 150000, 100},
+			{"vdda", 1200000, 1200000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+};
+
+static const struct msm_dsi_config msm8916_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 4,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdd", 2850000, 2850000, 100000, 100},
+			{"vdda", 1200000, 1200000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+};
+
+static const struct msm_dsi_config msm8994_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 7,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdda", 1250000, 1250000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vcca", 1000000, 1000000, 10000, 100},
+			{"vdd", 1800000, 1800000, 100000, 100},
+			{"lab_reg", -1, -1, -1, -1},
+			{"ibb_reg", -1, -1, -1, -1},
+		},
+	}
+};
+
+static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
+	{MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+{
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+	int i;
+
+	for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
+		if ((dsi_cfg_handlers[i].major == major) &&
+			(dsi_cfg_handlers[i].minor == minor)) {
+			cfg_hnd = &dsi_cfg_handlers[i];
+			break;
+		}
+	}
+
+	return cfg_hnd;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 0000000..4cf8872
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DSI_CFG_H__
+#define __MSM_DSI_CFG_H__
+
+#include "dsi.h"
+
+#define MSM_DSI_VER_MAJOR_V2	0x02
+#define MSM_DSI_VER_MAJOR_6G	0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3	0x10030000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
+
+#define DSI_6G_REG_SHIFT	4
+
+struct msm_dsi_config {
+	u32 io_offset;
+	struct dsi_reg_config reg_cfg;
+};
+
+struct msm_dsi_cfg_handler {
+	u32 major;
+	u32 minor;
+	const struct msm_dsi_config *cfg;
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
+
+#endif /* __MSM_DSI_CFG_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index de04009..8d82973 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -20,103 +20,15 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spinlock.h>
 #include <video/mipi_display.h>
 
 #include "dsi.h"
 #include "dsi.xml.h"
-
-#define MSM_DSI_VER_MAJOR_V2	0x02
-#define MSM_DSI_VER_MAJOR_6G	0x03
-#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
-#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
-#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
-#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
-#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
-
-#define DSI_6G_REG_SHIFT	4
-
-struct dsi_config {
-	u32 major;
-	u32 minor;
-	u32 io_offset;
-	struct dsi_reg_config reg_cfg;
-};
-
-static const struct dsi_config dsi_cfgs[] = {
-	{MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
-	{ /* 8974 v1 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_0,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8974 v2 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8974 v3 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8084 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_2,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8916 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 2850000, 2850000, 100000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-};
+#include "dsi_cfg.h"
 
 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
 {
@@ -194,7 +106,7 @@
 	struct gpio_desc *disp_en_gpio;
 	struct gpio_desc *te_gpio;
 
-	const struct dsi_config *cfg;
+	const struct msm_dsi_cfg_handler *cfg_hnd;
 
 	struct completion dma_comp;
 	struct completion video_comp;
@@ -212,8 +124,8 @@
 
 	struct drm_display_mode *mode;
 
-	/* Panel info */
-	struct device_node *panel_node;
+	/* connected device info */
+	struct device_node *device_node;
 	unsigned int channel;
 	unsigned int lanes;
 	enum mipi_dsi_pixel_format format;
@@ -239,61 +151,58 @@
 
 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
 {
-	return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+	return msm_readl(msm_host->ctrl_base + reg);
 }
 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
 {
-	msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+	msm_writel(data, msm_host->ctrl_base + reg);
 }
 
 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
 
-static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *dsi_get_config(
+						struct msm_dsi_host *msm_host)
 {
-	const struct dsi_config *cfg;
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
 	struct regulator *gdsc_reg;
-	int i, ret;
+	int ret;
 	u32 major = 0, minor = 0;
 
 	gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
 	if (IS_ERR(gdsc_reg)) {
 		pr_err("%s: cannot get gdsc\n", __func__);
-		goto fail;
+		goto exit;
 	}
 	ret = regulator_enable(gdsc_reg);
 	if (ret) {
 		pr_err("%s: unable to enable gdsc\n", __func__);
-		regulator_put(gdsc_reg);
-		goto fail;
+		goto put_gdsc;
 	}
 	ret = clk_prepare_enable(msm_host->ahb_clk);
 	if (ret) {
 		pr_err("%s: unable to enable ahb_clk\n", __func__);
-		regulator_disable(gdsc_reg);
-		regulator_put(gdsc_reg);
-		goto fail;
+		goto disable_gdsc;
 	}
 
 	ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
-
-	clk_disable_unprepare(msm_host->ahb_clk);
-	regulator_disable(gdsc_reg);
-	regulator_put(gdsc_reg);
 	if (ret) {
 		pr_err("%s: Invalid version\n", __func__);
-		goto fail;
+		goto disable_clks;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
-		cfg = dsi_cfgs + i;
-		if ((cfg->major == major) && (cfg->minor == minor))
-			return cfg;
-	}
-	pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+	cfg_hnd = msm_dsi_cfg_get(major, minor);
 
-fail:
-	return NULL;
+	DBG("%s: Version %x:%x\n", __func__, major, minor);
+
+disable_clks:
+	clk_disable_unprepare(msm_host->ahb_clk);
+disable_gdsc:
+	regulator_disable(gdsc_reg);
+put_gdsc:
+	regulator_put(gdsc_reg);
+exit:
+	return cfg_hnd;
 }
 
 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
@@ -304,8 +213,8 @@
 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int i;
 
 	DBG("");
@@ -320,8 +229,8 @@
 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int ret, i;
 
 	DBG("");
@@ -354,8 +263,8 @@
 static int dsi_regulator_init(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int i, ret;
 
 	for (i = 0; i < num; i++)
@@ -697,6 +606,7 @@
 {
 	u32 flags = msm_host->mode_flags;
 	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 	u32 data = 0;
 
 	if (!enable) {
@@ -750,8 +660,8 @@
 	data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
 	data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
 	data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
-	if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
-		(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+		(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
 		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 
@@ -1257,7 +1167,11 @@
 
 	status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
 
-	if (status) {
+	if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
 		dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
 		msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
 	}
@@ -1359,7 +1273,8 @@
 		return PTR_ERR(msm_host->disp_en_gpio);
 	}
 
-	msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
+	msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
+								GPIOD_IN);
 	if (IS_ERR(msm_host->te_gpio)) {
 		DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
 		return PTR_ERR(msm_host->te_gpio);
@@ -1379,7 +1294,7 @@
 	msm_host->format = dsi->format;
 	msm_host->mode_flags = dsi->mode_flags;
 
-	msm_host->panel_node = dsi->dev.of_node;
+	WARN_ON(dsi->dev.of_node != msm_host->device_node);
 
 	/* Some gpios defined in panel DT need to be controlled by host */
 	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1398,7 +1313,7 @@
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 
-	msm_host->panel_node = NULL;
+	msm_host->device_node = NULL;
 
 	DBG("id=%d", msm_host->id);
 	if (msm_host->dev)
@@ -1429,6 +1344,48 @@
 	.transfer = dsi_host_transfer,
 };
 
+static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+{
+	struct device *dev = &msm_host->pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *endpoint, *device_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
+	if (ret) {
+		dev_err(dev, "%s: host index not specified, ret=%d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/*
+	 * Get the first endpoint node. In our case, dsi has one output port
+	 * to which the panel is connected. Don't return an error if a port
+	 * isn't defined. It's possible that there is nothing connected to
+	 * the dsi output.
+	 */
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		dev_dbg(dev, "%s: no endpoint\n", __func__);
+		return 0;
+	}
+
+	/* Get panel node from the output port's endpoint data */
+	device_node = of_graph_get_remote_port_parent(endpoint);
+	if (!device_node) {
+		dev_err(dev, "%s: no valid device\n", __func__);
+		of_node_put(endpoint);
+		return -ENODEV;
+	}
+
+	of_node_put(endpoint);
+	of_node_put(device_node);
+
+	msm_host->device_node = device_node;
+
+	return 0;
+}
+
 int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 {
 	struct msm_dsi_host *msm_host = NULL;
@@ -1443,15 +1400,13 @@
 		goto fail;
 	}
 
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,dsi-host-index", &msm_host->id);
+	msm_host->pdev = pdev;
+
+	ret = dsi_host_parse_dt(msm_host);
 	if (ret) {
-		dev_err(&pdev->dev,
-			"%s: host index not specified, ret=%d\n",
-			__func__, ret);
+		pr_err("%s: failed to parse dt\n", __func__);
 		goto fail;
 	}
-	msm_host->pdev = pdev;
 
 	ret = dsi_clk_init(msm_host);
 	if (ret) {
@@ -1466,13 +1421,16 @@
 		goto fail;
 	}
 
-	msm_host->cfg = dsi_get_config(msm_host);
-	if (!msm_host->cfg) {
+	msm_host->cfg_hnd = dsi_get_config(msm_host);
+	if (!msm_host->cfg_hnd) {
 		ret = -EINVAL;
 		pr_err("%s: get config failed\n", __func__);
 		goto fail;
 	}
 
+	/* fixup base address by io offset */
+	msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+
 	ret = dsi_regulator_init(msm_host);
 	if (ret) {
 		pr_err("%s: regulator init failed\n", __func__);
@@ -1559,7 +1517,6 @@
 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-	struct device_node *node;
 	int ret;
 
 	/* Register mipi dsi host */
@@ -1577,14 +1534,13 @@
 		 * It makes sure panel is connected when fbcon detects
 		 * connector status and gets the proper display mode to
 		 * create framebuffer.
+		 * Don't try to defer if there is nothing connected to the dsi
+		 * output
 		 */
-		if (check_defer) {
-			node = of_get_child_by_name(msm_host->pdev->dev.of_node,
-							"panel");
-			if (node) {
-				if (!of_drm_find_panel(node))
+		if (check_defer && msm_host->device_node) {
+			if (!of_drm_find_panel(msm_host->device_node))
+				if (!of_drm_find_bridge(msm_host->device_node))
 					return -EPROBE_DEFER;
-			}
 		}
 	}
 
@@ -1663,6 +1619,7 @@
 				const struct mipi_dsi_msg *msg)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 	int data_byte, rx_byte, dlen, end;
 	int short_response, diff, pkt_size, ret = 0;
 	char cmd;
@@ -1704,8 +1661,8 @@
 			return -EINVAL;
 		}
 
-		if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
-			(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+		if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+			(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
 			/* Clear the RDBK_DATA registers */
 			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
 					DSI_RDBK_DATA_CTRL_CLR);
@@ -1919,6 +1876,13 @@
 		goto fail_disable_reg;
 	}
 
+	ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
+	if (ret) {
+		pr_err("%s: failed to set pinctrl default state, %d\n",
+			__func__, ret);
+		goto fail_disable_clk;
+	}
+
 	dsi_timing_setup(msm_host);
 	dsi_sw_reset(msm_host);
 	dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
@@ -1931,6 +1895,8 @@
 
 	return 0;
 
+fail_disable_clk:
+	dsi_clk_ctrl(msm_host, 0);
 fail_disable_reg:
 	dsi_host_regulator_disable(msm_host);
 unlock_ret:
@@ -1953,6 +1919,8 @@
 	if (msm_host->disp_en_gpio)
 		gpiod_set_value(msm_host->disp_en_gpio, 0);
 
+	pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
+
 	msm_dsi_manager_phy_disable(msm_host->id);
 
 	dsi_clk_ctrl(msm_host, 0);
@@ -1993,10 +1961,16 @@
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 	struct drm_panel *panel;
 
-	panel = of_drm_find_panel(msm_host->panel_node);
+	panel = of_drm_find_panel(msm_host->device_node);
 	if (panel_flags)
 			*panel_flags = msm_host->mode_flags;
 
 	return panel;
 }
 
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	return of_drm_find_bridge(msm_host->device_node);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 87ac661..0455ff7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -14,19 +14,31 @@
 #include "msm_kms.h"
 #include "dsi.h"
 
+#define DSI_CLOCK_MASTER	DSI_0
+#define DSI_CLOCK_SLAVE		DSI_1
+
+#define DSI_LEFT		DSI_0
+#define DSI_RIGHT		DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER	DSI_1
+#define DSI_ENCODER_SLAVE	DSI_0
+
 struct msm_dsi_manager {
 	struct msm_dsi *dsi[DSI_MAX];
 
-	bool is_dual_panel;
+	bool is_dual_dsi;
 	bool is_sync_needed;
-	int master_panel_id;
+	int master_dsi_link_id;
 };
 
 static struct msm_dsi_manager msm_dsim_glb;
 
-#define IS_DUAL_PANEL()		(msm_dsim_glb.is_dual_panel)
+#define IS_DUAL_DSI()		(msm_dsim_glb.is_dual_dsi)
 #define IS_SYNC_NEEDED()	(msm_dsim_glb.is_sync_needed)
-#define IS_MASTER_PANEL(id)	(msm_dsim_glb.master_panel_id == id)
+#define IS_MASTER_DSI_LINK(id)	(msm_dsim_glb.master_dsi_link_id == id)
 
 static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
 {
@@ -38,23 +50,23 @@
 	return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
 }
 
-static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
 {
 	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
 
-	/* We assume 2 dsi nodes have the same information of dual-panel and
+	/* We assume 2 dsi nodes have the same information of dual-dsi and
 	 * sync-mode, and only one node specifies master in case of dual mode.
 	 */
-	if (!msm_dsim->is_dual_panel)
-		msm_dsim->is_dual_panel = of_property_read_bool(
-						np, "qcom,dual-panel-mode");
+	if (!msm_dsim->is_dual_dsi)
+		msm_dsim->is_dual_dsi = of_property_read_bool(
+						np, "qcom,dual-dsi-mode");
 
-	if (msm_dsim->is_dual_panel) {
-		if (of_property_read_bool(np, "qcom,master-panel"))
-			msm_dsim->master_panel_id = id;
+	if (msm_dsim->is_dual_dsi) {
+		if (of_property_read_bool(np, "qcom,master-dsi"))
+			msm_dsim->master_dsi_link_id = id;
 		if (!msm_dsim->is_sync_needed)
 			msm_dsim->is_sync_needed = of_property_read_bool(
-					np, "qcom,sync-dual-panel");
+					np, "qcom,sync-dual-dsi");
 	}
 
 	return 0;
@@ -68,7 +80,7 @@
 	struct msm_dsi_pll *src_pll;
 	int ret;
 
-	if (!IS_DUAL_PANEL()) {
+	if (!IS_DUAL_DSI()) {
 		ret = msm_dsi_host_register(msm_dsi->host, true);
 		if (ret)
 			return ret;
@@ -78,9 +90,9 @@
 	} else if (!other_dsi) {
 		ret = 0;
 	} else {
-		struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+		struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
 					msm_dsi : other_dsi;
-		struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+		struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
 					other_dsi : msm_dsi;
 		/* Register slave host first, so that slave DSI device
 		 * has a chance to probe, and do not block the master
@@ -144,28 +156,28 @@
 	DBG("id=%d", id);
 	if (!msm_dsi->panel) {
 		msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
-						&msm_dsi->panel_flags);
+						&msm_dsi->device_flags);
 
 		/* There is only 1 panel in the global panel list
-		 * for dual panel mode. Therefore slave dsi should get
+		 * for dual DSI mode. Therefore slave dsi should get
 		 * the drm_panel instance from master dsi, and
 		 * keep using the panel flags got from the current DSI link.
 		 */
-		if (!msm_dsi->panel && IS_DUAL_PANEL() &&
-			!IS_MASTER_PANEL(id) && other_dsi)
+		if (!msm_dsi->panel && IS_DUAL_DSI() &&
+			!IS_MASTER_DSI_LINK(id) && other_dsi)
 			msm_dsi->panel = msm_dsi_host_get_panel(
 					other_dsi->host, NULL);
 
-		if (msm_dsi->panel && IS_DUAL_PANEL())
+		if (msm_dsi->panel && IS_DUAL_DSI())
 			drm_object_attach_property(&connector->base,
 				connector->dev->mode_config.tile_property, 0);
 
-		/* Set split display info to kms once dual panel is connected
-		 * to both hosts
+		/* Set split display info to kms once dual DSI panel is
+		 * connected to both hosts.
 		 */
-		if (msm_dsi->panel && IS_DUAL_PANEL() &&
+		if (msm_dsi->panel && IS_DUAL_DSI() &&
 			other_dsi && other_dsi->panel) {
-			bool cmd_mode = !(msm_dsi->panel_flags &
+			bool cmd_mode = !(msm_dsi->device_flags &
 						MIPI_DSI_MODE_VIDEO);
 			struct drm_encoder *encoder = msm_dsi_get_encoder(
 					dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
@@ -176,7 +188,7 @@
 				kms->funcs->set_split_display(kms, encoder,
 							slave_enc, cmd_mode);
 			else
-				pr_err("mdp does not support dual panel\n");
+				pr_err("mdp does not support dual DSI\n");
 		}
 	}
 
@@ -273,7 +285,7 @@
 	if (!num)
 		return 0;
 
-	if (IS_DUAL_PANEL()) {
+	if (IS_DUAL_DSI()) {
 		/* report half resolution to user */
 		dsi_dual_connector_fix_modes(connector);
 		ret = dsi_dual_connector_tile_init(connector, id);
@@ -328,11 +340,12 @@
 	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
 	struct mipi_dsi_host *host = msm_dsi->host;
 	struct drm_panel *panel = msm_dsi->panel;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 	int ret;
 
 	DBG("id=%d", id);
-	if (!panel || (is_dual_panel && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi) ||
+			(is_dual_dsi && (DSI_1 == id)))
 		return;
 
 	ret = msm_dsi_host_power_on(host);
@@ -341,7 +354,7 @@
 		goto host_on_fail;
 	}
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_power_on(msm_dsi1->host);
 		if (ret) {
 			pr_err("%s: power on host1 failed, %d\n",
@@ -353,10 +366,13 @@
 	/* Always call panel functions once, because even for dual panels,
 	 * there is only one drm_panel instance.
 	 */
-	ret = drm_panel_prepare(panel);
-	if (ret) {
-		pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
-		goto panel_prep_fail;
+	if (panel) {
+		ret = drm_panel_prepare(panel);
+		if (ret) {
+			pr_err("%s: prepare panel %d failed, %d\n", __func__,
+								id, ret);
+			goto panel_prep_fail;
+		}
 	}
 
 	ret = msm_dsi_host_enable(host);
@@ -365,7 +381,7 @@
 		goto host_en_fail;
 	}
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_enable(msm_dsi1->host);
 		if (ret) {
 			pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -373,23 +389,27 @@
 		}
 	}
 
-	ret = drm_panel_enable(panel);
-	if (ret) {
-		pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
-		goto panel_en_fail;
+	if (panel) {
+		ret = drm_panel_enable(panel);
+		if (ret) {
+			pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+									ret);
+			goto panel_en_fail;
+		}
 	}
 
 	return;
 
 panel_en_fail:
-	if (is_dual_panel && msm_dsi1)
+	if (is_dual_dsi && msm_dsi1)
 		msm_dsi_host_disable(msm_dsi1->host);
 host1_en_fail:
 	msm_dsi_host_disable(host);
 host_en_fail:
-	drm_panel_unprepare(panel);
+	if (panel)
+		drm_panel_unprepare(panel);
 panel_prep_fail:
-	if (is_dual_panel && msm_dsi1)
+	if (is_dual_dsi && msm_dsi1)
 		msm_dsi_host_power_off(msm_dsi1->host);
 host1_on_fail:
 	msm_dsi_host_power_off(host);
@@ -414,37 +434,44 @@
 	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
 	struct mipi_dsi_host *host = msm_dsi->host;
 	struct drm_panel *panel = msm_dsi->panel;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 	int ret;
 
 	DBG("id=%d", id);
 
-	if (!panel || (is_dual_panel && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi) ||
+			(is_dual_dsi && (DSI_1 == id)))
 		return;
 
-	ret = drm_panel_disable(panel);
-	if (ret)
-		pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+	if (panel) {
+		ret = drm_panel_disable(panel);
+		if (ret)
+			pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+									ret);
+	}
 
 	ret = msm_dsi_host_disable(host);
 	if (ret)
 		pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_disable(msm_dsi1->host);
 		if (ret)
 			pr_err("%s: host1 disable failed, %d\n", __func__, ret);
 	}
 
-	ret = drm_panel_unprepare(panel);
-	if (ret)
-		pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+	if (panel) {
+		ret = drm_panel_unprepare(panel);
+		if (ret)
+			pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
+								id, ret);
+	}
 
 	ret = msm_dsi_host_power_off(host);
 	if (ret)
 		pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_power_off(msm_dsi1->host);
 		if (ret)
 			pr_err("%s: host1 power off failed, %d\n",
@@ -460,7 +487,7 @@
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
 	struct mipi_dsi_host *host = msm_dsi->host;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 
 	DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
 			mode->base.id, mode->name,
@@ -471,11 +498,11 @@
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 
-	if (is_dual_panel && (DSI_1 == id))
+	if (is_dual_dsi && (DSI_1 == id))
 		return;
 
 	msm_dsi_host_set_display_mode(host, adjusted_mode);
-	if (is_dual_panel && other_dsi)
+	if (is_dual_dsi && other_dsi)
 		msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
 }
 
@@ -503,7 +530,7 @@
 	.mode_set = dsi_mgr_bridge_mode_set,
 };
 
-/* initialize connector */
+/* initialize connector when we're connected to a drm_panel */
 struct drm_connector *msm_dsi_manager_connector_init(u8 id)
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -588,6 +615,53 @@
 	return ERR_PTR(ret);
 }
 
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_device *dev = msm_dsi->dev;
+	struct drm_encoder *encoder;
+	struct drm_bridge *int_bridge, *ext_bridge;
+	struct drm_connector *connector;
+	struct list_head *connector_list;
+
+	int_bridge = msm_dsi->bridge;
+	ext_bridge = msm_dsi->external_bridge =
+			msm_dsi_host_get_bridge(msm_dsi->host);
+
+	/*
+	 * HACK: we may not know the external DSI bridge device's mode
+	 * flags here. We'll get to know them only when the device
+	 * attaches to the dsi host. For now, assume the bridge supports
+	 * DSI video mode
+	 */
+	encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+
+	/* link the internal dsi bridge to the external bridge */
+	int_bridge->next = ext_bridge;
+	/* set the external bridge's encoder as dsi's encoder */
+	ext_bridge->encoder = encoder;
+
+	drm_bridge_attach(dev, ext_bridge);
+
+	/*
+	 * we need the drm_connector created by the external bridge
+	 * driver (or someone else) to feed it to our driver's
+	 * priv->connector[] list, mainly for msm_fbdev_init()
+	 */
+	connector_list = &dev->mode_config.connector_list;
+
+	list_for_each_entry(connector, connector_list, head) {
+		int i;
+
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] == encoder->base.id)
+				return connector;
+		}
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
 {
 }
@@ -598,12 +672,29 @@
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi_phy *phy = msm_dsi->phy;
+	int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
 	int ret;
 
-	ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+	ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
 	if (ret)
 		return ret;
 
+	/*
+	 * Reset DSI PHY silently changes its PLL registers to reset status,
+	 * which will confuse clock driver and result in wrong output rate of
+	 * link clocks. Restore PLL status if its PLL is being used as clock
+	 * source.
+	 */
+	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
+		ret = msm_dsi_pll_restore_state(pll);
+		if (ret) {
+			pr_err("%s: failed to restore pll state\n", __func__);
+			msm_dsi_phy_disable(phy);
+			return ret;
+		}
+	}
+
 	msm_dsi->phy_enabled = true;
 	msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
 
@@ -616,13 +707,18 @@
 	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
 	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
 	struct msm_dsi_phy *phy = msm_dsi->phy;
+	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+
+	/* Save PLL status if it is a clock source */
+	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
+		msm_dsi_pll_save_state(pll);
 
 	/* disable DSI phy
 	 * In dual-dsi configuration, the phy should be disabled for the
 	 * first controller only when the second controller is disabled.
 	 */
 	msm_dsi->phy_enabled = false;
-	if (IS_DUAL_PANEL() && mdsi && sdsi) {
+	if (IS_DUAL_DSI() && mdsi && sdsi) {
 		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
 			msm_dsi_phy_disable(sdsi->phy);
 			msm_dsi_phy_disable(mdsi->phy);
@@ -713,9 +809,9 @@
 
 	msm_dsim->dsi[id] = msm_dsi;
 
-	ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+	ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
 	if (ret) {
-		pr_err("%s: failed to parse dual panel info\n", __func__);
+		pr_err("%s: failed to parse dual DSI info\n", __func__);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
deleted file mode 100644
index 2d3b33c..0000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ /dev/null
@@ -1,607 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
-#include "dsi.h"
-#include "dsi.xml.h"
-
-#define dsi_phy_read(offset) msm_readl((offset))
-#define dsi_phy_write(offset, data) msm_writel((data), (offset))
-
-struct dsi_phy_ops {
-	int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
-		const unsigned long bit_rate, const unsigned long esc_rate);
-	int (*disable)(struct msm_dsi_phy *phy);
-};
-
-struct dsi_phy_cfg {
-	enum msm_dsi_phy_type type;
-	struct dsi_reg_config reg_cfg;
-	struct dsi_phy_ops ops;
-};
-
-struct dsi_dphy_timing {
-	u32 clk_pre;
-	u32 clk_post;
-	u32 clk_zero;
-	u32 clk_trail;
-	u32 clk_prepare;
-	u32 hs_exit;
-	u32 hs_zero;
-	u32 hs_prepare;
-	u32 hs_trail;
-	u32 hs_rqst;
-	u32 ta_go;
-	u32 ta_sure;
-	u32 ta_get;
-};
-
-struct msm_dsi_phy {
-	struct platform_device *pdev;
-	void __iomem *base;
-	void __iomem *reg_base;
-	int id;
-
-	struct clk *ahb_clk;
-	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
-
-	struct dsi_dphy_timing timing;
-	const struct dsi_phy_cfg *cfg;
-
-	struct msm_dsi_pll *pll;
-};
-
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	struct device *dev = &phy->pdev->dev;
-	int num = phy->cfg->reg_cfg.num;
-	int i, ret;
-
-	for (i = 0; i < num; i++)
-		s[i].supply = regs[i].name;
-
-	ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
-	if (ret < 0) {
-		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
-						__func__, ret);
-		return ret;
-	}
-
-	for (i = 0; i < num; i++) {
-		if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
-			ret = regulator_set_voltage(s[i].consumer,
-				regs[i].min_voltage, regs[i].max_voltage);
-			if (ret < 0) {
-				dev_err(dev,
-					"regulator %d set voltage failed, %d\n",
-					i, ret);
-				return ret;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	int num = phy->cfg->reg_cfg.num;
-	int i;
-
-	DBG("");
-	for (i = num - 1; i >= 0; i--)
-		if (regs[i].disable_load >= 0)
-			regulator_set_load(s[i].consumer,
-						regs[i].disable_load);
-
-	regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	struct device *dev = &phy->pdev->dev;
-	int num = phy->cfg->reg_cfg.num;
-	int ret, i;
-
-	DBG("");
-	for (i = 0; i < num; i++) {
-		if (regs[i].enable_load >= 0) {
-			ret = regulator_set_load(s[i].consumer,
-							regs[i].enable_load);
-			if (ret < 0) {
-				dev_err(dev,
-					"regulator %d set op mode failed, %d\n",
-					i, ret);
-				goto fail;
-			}
-		}
-	}
-
-	ret = regulator_bulk_enable(num, s);
-	if (ret < 0) {
-		dev_err(dev, "regulator enable failed, %d\n", ret);
-		goto fail;
-	}
-
-	return 0;
-
-fail:
-	for (i--; i >= 0; i--)
-		regulator_set_load(s[i].consumer, regs[i].disable_load);
-	return ret;
-}
-
-#define S_DIV_ROUND_UP(n, d)	\
-	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
-
-static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
-				s32 min_result, bool even)
-{
-	s32 v;
-	v = (tmax - tmin) * percent;
-	v = S_DIV_ROUND_UP(v, 100) + tmin;
-	if (even && (v & 0x1))
-		return max_t(s32, min_result, v - 1);
-	else
-		return max_t(s32, min_result, v);
-}
-
-static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
-					s32 ui, s32 coeff, s32 pcnt)
-{
-	s32 tmax, tmin, clk_z;
-	s32 temp;
-
-	/* reset */
-	temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
-	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
-	if (tmin > 255) {
-		tmax = 511;
-		clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
-	} else {
-		tmax = 255;
-		clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
-	}
-
-	/* adjust */
-	temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
-	timing->clk_zero = clk_z + 8 - temp;
-}
-
-static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
-	const unsigned long bit_rate, const unsigned long esc_rate)
-{
-	s32 ui, lpx;
-	s32 tmax, tmin;
-	s32 pcnt0 = 10;
-	s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
-	s32 pcnt2 = 10;
-	s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
-	s32 coeff = 1000; /* Precision, should avoid overflow */
-	s32 temp;
-
-	if (!bit_rate || !esc_rate)
-		return -EINVAL;
-
-	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
-	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
-
-	tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
-	tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
-	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
-
-	temp = lpx / ui;
-	if (temp & 0x1)
-		timing->hs_rqst = temp;
-	else
-		timing->hs_rqst = max_t(s32, 0, temp - 2);
-
-	/* Calculate clk_zero after clk_prepare and hs_rqst */
-	dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
-
-	temp = 105 * coeff + 12 * ui - 20 * coeff;
-	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
-	tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
-	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
-
-	temp = 85 * coeff + 6 * ui;
-	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
-	temp = 40 * coeff + 4 * ui;
-	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
-	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
-
-	tmax = 255;
-	temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
-	temp = 145 * coeff + 10 * ui - temp;
-	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
-	timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
-
-	temp = 105 * coeff + 12 * ui - 20 * coeff;
-	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
-	temp = 60 * coeff + 4 * ui;
-	tmin = DIV_ROUND_UP(temp, ui) - 2;
-	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
-
-	tmax = 255;
-	tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
-	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
-
-	tmax = 63;
-	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
-	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
-	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
-	timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
-
-	tmax = 63;
-	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
-	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
-	temp += 8 * ui + lpx;
-	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
-	if (tmin > tmax) {
-		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
-		timing->clk_pre = temp >> 1;
-		temp = (2 * tmax - tmin) * pcnt2;
-	} else {
-		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
-	}
-
-	timing->ta_go = 3;
-	timing->ta_sure = 0;
-	timing->ta_get = 4;
-
-	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
-		timing->clk_pre, timing->clk_post, timing->clk_zero,
-		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
-		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
-		timing->hs_rqst);
-
-	return 0;
-}
-
-static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
-{
-	void __iomem *base = phy->reg_base;
-
-	if (!enable) {
-		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
-		return;
-	}
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
-}
-
-static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
-		const unsigned long bit_rate, const unsigned long esc_rate)
-{
-	struct dsi_dphy_timing *timing = &phy->timing;
-	int i;
-	void __iomem *base = phy->base;
-
-	DBG("");
-
-	if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
-		pr_err("%s: D-PHY timing calculation failed\n", __func__);
-		return -EINVAL;
-	}
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
-
-	dsi_28nm_phy_regulator_ctrl(phy, true);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
-		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
-		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
-		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
-	if (timing->clk_zero & BIT(8))
-		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
-			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
-		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
-		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
-		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
-		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
-		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
-		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
-		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
-		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
-		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
-
-	for (i = 0; i < 4; i++) {
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
-	}
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
-
-	if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
-		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
-	else
-		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
-
-	return 0;
-}
-
-static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
-{
-	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
-	dsi_28nm_phy_regulator_ctrl(phy, false);
-
-	/*
-	 * Wait for the registers writes to complete in order to
-	 * ensure that the phy is completely disabled
-	 */
-	wmb();
-
-	return 0;
-}
-
-static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
-{
-	int ret;
-
-	pm_runtime_get_sync(&phy->pdev->dev);
-
-	ret = clk_prepare_enable(phy->ahb_clk);
-	if (ret) {
-		pr_err("%s: can't enable ahb clk, %d\n", __func__, ret);
-		pm_runtime_put_sync(&phy->pdev->dev);
-	}
-
-	return ret;
-}
-
-static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
-{
-	clk_disable_unprepare(phy->ahb_clk);
-	pm_runtime_put_sync(&phy->pdev->dev);
-}
-
-static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
-	[MSM_DSI_PHY_28NM_HPM] = {
-		.type = MSM_DSI_PHY_28NM_HPM,
-		.reg_cfg = {
-			.num = 1,
-			.regs = {
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-		.ops = {
-			.enable = dsi_28nm_phy_enable,
-			.disable = dsi_28nm_phy_disable,
-		}
-	},
-	[MSM_DSI_PHY_28NM_LP] = {
-		.type = MSM_DSI_PHY_28NM_LP,
-		.reg_cfg = {
-			.num = 1,
-			.regs = {
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-		.ops = {
-			.enable = dsi_28nm_phy_enable,
-			.disable = dsi_28nm_phy_disable,
-		}
-	},
-};
-
-static const struct of_device_id dsi_phy_dt_match[] = {
-	{ .compatible = "qcom,dsi-phy-28nm-hpm",
-	  .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],},
-	{ .compatible = "qcom,dsi-phy-28nm-lp",
-	  .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],},
-	{}
-};
-
-static int dsi_phy_driver_probe(struct platform_device *pdev)
-{
-	struct msm_dsi_phy *phy;
-	const struct of_device_id *match;
-	int ret;
-
-	phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
-	if (!phy)
-		return -ENOMEM;
-
-	match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node);
-	if (!match)
-		return -ENODEV;
-
-	phy->cfg = match->data;
-	phy->pdev = pdev;
-
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,dsi-phy-index", &phy->id);
-	if (ret) {
-		dev_err(&pdev->dev,
-			"%s: PHY index not specified, ret=%d\n",
-			__func__, ret);
-		goto fail;
-	}
-
-	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-	if (IS_ERR(phy->base)) {
-		dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__);
-		ret = -ENOMEM;
-		goto fail;
-	}
-	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
-	if (IS_ERR(phy->reg_base)) {
-		dev_err(&pdev->dev,
-			"%s: failed to map phy regulator base\n", __func__);
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	ret = dsi_phy_regulator_init(phy);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__);
-		goto fail;
-	}
-
-	phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
-	if (IS_ERR(phy->ahb_clk)) {
-		pr_err("%s: Unable to get ahb clk\n", __func__);
-		ret = PTR_ERR(phy->ahb_clk);
-		goto fail;
-	}
-
-	/* PLL init will call into clk_register which requires
-	 * register access, so we need to enable power and ahb clock.
-	 */
-	ret = dsi_phy_enable_resource(phy);
-	if (ret)
-		goto fail;
-
-	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
-	if (!phy->pll)
-		dev_info(&pdev->dev,
-			"%s: pll init failed, need separate pll clk driver\n",
-			__func__);
-
-	dsi_phy_disable_resource(phy);
-
-	platform_set_drvdata(pdev, phy);
-
-	return 0;
-
-fail:
-	return ret;
-}
-
-static int dsi_phy_driver_remove(struct platform_device *pdev)
-{
-	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
-
-	if (phy && phy->pll) {
-		msm_dsi_pll_destroy(phy->pll);
-		phy->pll = NULL;
-	}
-
-	platform_set_drvdata(pdev, NULL);
-
-	return 0;
-}
-
-static struct platform_driver dsi_phy_platform_driver = {
-	.probe      = dsi_phy_driver_probe,
-	.remove     = dsi_phy_driver_remove,
-	.driver     = {
-		.name   = "msm_dsi_phy",
-		.of_match_table = dsi_phy_dt_match,
-	},
-};
-
-void __init msm_dsi_phy_driver_register(void)
-{
-	platform_driver_register(&dsi_phy_platform_driver);
-}
-
-void __exit msm_dsi_phy_driver_unregister(void)
-{
-	platform_driver_unregister(&dsi_phy_platform_driver);
-}
-
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
-	const unsigned long bit_rate, const unsigned long esc_rate)
-{
-	int ret;
-
-	if (!phy || !phy->cfg->ops.enable)
-		return -EINVAL;
-
-	ret = dsi_phy_regulator_enable(phy);
-	if (ret) {
-		dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n",
-			__func__, ret);
-		return ret;
-	}
-
-	return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate);
-}
-
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
-{
-	if (!phy || !phy->cfg->ops.disable)
-		return -EINVAL;
-
-	phy->cfg->ops.disable(phy);
-	dsi_phy_regulator_disable(phy);
-
-	return 0;
-}
-
-void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
-	u32 *clk_pre, u32 *clk_post)
-{
-	if (!phy)
-		return;
-	if (clk_pre)
-		*clk_pre = phy->timing.clk_pre;
-	if (clk_post)
-		*clk_post = phy->timing.clk_post;
-}
-
-struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
-{
-	if (!phy)
-		return NULL;
-
-	return phy->pll;
-}
-
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 728152f..5de505e 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
new file mode 100644
index 0000000..401ff58
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+
+#include "dsi_phy.h"
+
+#define S_DIV_ROUND_UP(n, d)	\
+	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+				s32 min_result, bool even)
+{
+	s32 v;
+
+	v = (tmax - tmin) * percent;
+	v = S_DIV_ROUND_UP(v, 100) + tmin;
+	if (even && (v & 0x1))
+		return max_t(s32, min_result, v - 1);
+	else
+		return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
+					s32 ui, s32 coeff, s32 pcnt)
+{
+	s32 tmax, tmin, clk_z;
+	s32 temp;
+
+	/* reset */
+	temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	if (tmin > 255) {
+		tmax = 511;
+		clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+	} else {
+		tmax = 255;
+		clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+	}
+
+	/* adjust */
+	temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+	timing->clk_zero = clk_z + 8 - temp;
+}
+
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+	const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	s32 ui, lpx;
+	s32 tmax, tmin;
+	s32 pcnt0 = 10;
+	s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+	s32 pcnt2 = 10;
+	s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+	s32 coeff = 1000; /* Precision, should avoid overflow */
+	s32 temp;
+
+	if (!bit_rate || !esc_rate)
+		return -EINVAL;
+
+	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+	tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+	tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+	temp = lpx / ui;
+	if (temp & 0x1)
+		timing->hs_rqst = temp;
+	else
+		timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+	/* Calculate clk_zero after clk_prepare and hs_rqst */
+	dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	temp = 85 * coeff + 6 * ui;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 40 * coeff + 4 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+	tmax = 255;
+	temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+	temp = 145 * coeff + 10 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 60 * coeff + 4 * ui;
+	tmin = DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	tmax = 255;
+	tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+	tmax = 63;
+	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	tmax = 63;
+	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+	temp += 8 * ui + lpx;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	if (tmin > tmax) {
+		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
+		timing->clk_pre = temp >> 1;
+	} else {
+		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+	}
+
+	timing->ta_go = 3;
+	timing->ta_sure = 0;
+	timing->ta_get = 4;
+
+	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+		timing->clk_pre, timing->clk_post, timing->clk_zero,
+		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+		timing->hs_rqst);
+
+	return 0;
+}
+
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask)
+{
+	int phy_id = phy->id;
+	u32 val;
+
+	if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
+		return;
+
+	val = dsi_phy_read(phy->base + reg);
+
+	if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
+		dsi_phy_write(phy->base + reg, val | bit_mask);
+	else
+		dsi_phy_write(phy->base + reg, val & (~bit_mask));
+}
+
+static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int i, ret;
+
+	for (i = 0; i < num; i++)
+		s[i].supply = regs[i].name;
+
+	ret = devm_regulator_bulk_get(dev, num, s);
+	if (ret < 0) {
+		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+						__func__, ret);
+		return ret;
+	}
+
+	for (i = 0; i < num; i++) {
+		if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+			ret = regulator_set_voltage(s[i].consumer,
+				regs[i].min_voltage, regs[i].max_voltage);
+			if (ret < 0) {
+				dev_err(dev,
+					"regulator %d set voltage failed, %d\n",
+					i, ret);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	int num = phy->cfg->reg_cfg.num;
+	int i;
+
+	DBG("");
+	for (i = num - 1; i >= 0; i--)
+		if (regs[i].disable_load >= 0)
+			regulator_set_load(s[i].consumer, regs[i].disable_load);
+
+	regulator_bulk_disable(num, s);
+}
+
+static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int ret, i;
+
+	DBG("");
+	for (i = 0; i < num; i++) {
+		if (regs[i].enable_load >= 0) {
+			ret = regulator_set_load(s[i].consumer,
+							regs[i].enable_load);
+			if (ret < 0) {
+				dev_err(dev,
+					"regulator %d set op mode failed, %d\n",
+					i, ret);
+				goto fail;
+			}
+		}
+	}
+
+	ret = regulator_bulk_enable(num, s);
+	if (ret < 0) {
+		dev_err(dev, "regulator enable failed, %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (i--; i >= 0; i--)
+		regulator_set_load(s[i].consumer, regs[i].disable_load);
+	return ret;
+}
+
+static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+{
+	struct device *dev = &phy->pdev->dev;
+	int ret;
+
+	pm_runtime_get_sync(dev);
+
+	ret = clk_prepare_enable(phy->ahb_clk);
+	if (ret) {
+		dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+		pm_runtime_put_sync(dev);
+	}
+
+	return ret;
+}
+
+static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
+{
+	clk_disable_unprepare(phy->ahb_clk);
+	pm_runtime_put_sync(&phy->pdev->dev);
+}
+
+static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
+	{ .compatible = "qcom,dsi-phy-28nm-hpm",
+	  .data = &dsi_phy_28nm_hpm_cfgs },
+	{ .compatible = "qcom,dsi-phy-28nm-lp",
+	  .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+	{ .compatible = "qcom,dsi-phy-20nm",
+	  .data = &dsi_phy_20nm_cfgs },
+#endif
+	{}
+};
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+	struct msm_dsi_phy *phy;
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	int ret;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	match = of_match_node(dsi_phy_dt_match, dev->of_node);
+	if (!match)
+		return -ENODEV;
+
+	phy->cfg = match->data;
+	phy->pdev = pdev;
+
+	ret = of_property_read_u32(dev->of_node,
+				"qcom,dsi-phy-index", &phy->id);
+	if (ret) {
+		dev_err(dev, "%s: PHY index not specified, %d\n",
+			__func__, ret);
+		goto fail;
+	}
+
+	phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+				"qcom,dsi-phy-regulator-ldo-mode");
+
+	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR(phy->base)) {
+		dev_err(dev, "%s: failed to map phy base\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+				"DSI_PHY_REG");
+	if (IS_ERR(phy->reg_base)) {
+		dev_err(dev, "%s: failed to map phy regulator base\n",
+			__func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = dsi_phy_regulator_init(phy);
+	if (ret) {
+		dev_err(dev, "%s: failed to init regulator\n", __func__);
+		goto fail;
+	}
+
+	phy->ahb_clk = devm_clk_get(dev, "iface_clk");
+	if (IS_ERR(phy->ahb_clk)) {
+		dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+		ret = PTR_ERR(phy->ahb_clk);
+		goto fail;
+	}
+
+	/* PLL init will call into clk_register which requires
+	 * register access, so we need to enable power and ahb clock.
+	 */
+	ret = dsi_phy_enable_resource(phy);
+	if (ret)
+		goto fail;
+
+	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
+	if (!phy->pll)
+		dev_info(dev,
+			"%s: pll init failed, need separate pll clk driver\n",
+			__func__);
+
+	dsi_phy_disable_resource(phy);
+
+	platform_set_drvdata(pdev, phy);
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int dsi_phy_driver_remove(struct platform_device *pdev)
+{
+	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
+
+	if (phy && phy->pll) {
+		msm_dsi_pll_destroy(phy->pll);
+		phy->pll = NULL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+	.probe      = dsi_phy_driver_probe,
+	.remove     = dsi_phy_driver_remove,
+	.driver     = {
+		.name   = "msm_dsi_phy",
+		.of_match_table = dsi_phy_dt_match,
+	},
+};
+
+void __init msm_dsi_phy_driver_register(void)
+{
+	platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void __exit msm_dsi_phy_driver_unregister(void)
+{
+	platform_driver_unregister(&dsi_phy_platform_driver);
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+	const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct device *dev = &phy->pdev->dev;
+	int ret;
+
+	if (!phy || !phy->cfg->ops.enable)
+		return -EINVAL;
+
+	ret = dsi_phy_regulator_enable(phy);
+	if (ret) {
+		dev_err(dev, "%s: regulator enable failed, %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
+	if (ret) {
+		dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+		dsi_phy_regulator_disable(phy);
+		return ret;
+	}
+
+	return 0;
+}
+
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+	if (!phy || !phy->cfg->ops.disable)
+		return;
+
+	phy->cfg->ops.disable(phy);
+
+	dsi_phy_regulator_disable(phy);
+}
+
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+					u32 *clk_pre, u32 *clk_post)
+{
+	if (!phy)
+		return;
+
+	if (clk_pre)
+		*clk_pre = phy->timing.clk_pre;
+	if (clk_post)
+		*clk_post = phy->timing.clk_post;
+}
+
+struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
+{
+	if (!phy)
+		return NULL;
+
+	return phy->pll;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 0000000..0456b25
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct msm_dsi_phy_ops {
+	int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate);
+	void (*disable)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+	enum msm_dsi_phy_type type;
+	struct dsi_reg_config reg_cfg;
+	struct msm_dsi_phy_ops ops;
+
+	/*
+	 * Each cell {phy_id, pll_id} of the truth table indicates
+	 * if the source PLL selection bit should be set for each PHY.
+	 * Fill default H/W values in illegal cells, eg. cell {0, 1}.
+	 */
+	bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+
+struct msm_dsi_dphy_timing {
+	u32 clk_pre;
+	u32 clk_post;
+	u32 clk_zero;
+	u32 clk_trail;
+	u32 clk_prepare;
+	u32 hs_exit;
+	u32 hs_zero;
+	u32 hs_prepare;
+	u32 hs_trail;
+	u32 hs_rqst;
+	u32 ta_go;
+	u32 ta_sure;
+	u32 ta_get;
+};
+
+struct msm_dsi_phy {
+	struct platform_device *pdev;
+	void __iomem *base;
+	void __iomem *reg_base;
+	int id;
+
+	struct clk *ahb_clk;
+	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+
+	struct msm_dsi_dphy_timing timing;
+	const struct msm_dsi_phy_cfg *cfg;
+
+	bool regulator_ldo_mode;
+
+	struct msm_dsi_pll *pll;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+	const unsigned long bit_rate, const unsigned long esc_rate);
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask);
+
+#endif /* __DSI_PHY_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 0000000..2e9ba11
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+		DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+		DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+		DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+			DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+		DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+		DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+		DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+		DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+		DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+		DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+		DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	if (phy->regulator_ldo_mode) {
+		dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+		return;
+	}
+
+	/* non LDO mode */
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+	u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+		dev_err(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_20nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
+				DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+							(i >> 1) * 0x40);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+	}
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+	dsi_20nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+	/* make sure everything is written before enable */
+	wmb();
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+	return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+	dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+	.type = MSM_DSI_PHY_20NM,
+	.src_pll_truthtable = { {false, true}, {false, true} },
+	.reg_cfg = {
+		.num = 2,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vcca", 1000000, 1000000, 10000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_20nm_phy_enable,
+		.disable = dsi_20nm_phy_disable,
+	}
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 0000000..f1a7c7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+		dev_err(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+	dsi_28nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+	dsi_28nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+	}
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
+				DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+	dsi_28nm_phy_regulator_ctrl(phy, false);
+
+	/*
+	 * Wait for the registers writes to complete in order to
+	 * ensure that the phy is completely disabled
+	 */
+	wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+	.type = MSM_DSI_PHY_28NM_HPM,
+	.src_pll_truthtable = { {true, true}, {false, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+	},
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+	.type = MSM_DSI_PHY_28NM_LP,
+	.src_pll_truthtable = { {true, true}, {true, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+	},
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 509376f..5104fc9 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -72,31 +72,14 @@
 int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
 {
 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	int ret;
 
-	/*
-	 * Certain PLLs need to update the same VCO rate and registers
-	 * after resume in suspend/resume scenario.
-	 */
-	if (pll->restore_state) {
-		ret = pll->restore_state(pll);
-		if (ret)
-			goto error;
-	}
-
-	ret = dsi_pll_enable(pll);
-
-error:
-	return ret;
+	return dsi_pll_enable(pll);
 }
 
 void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
 {
 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
 
-	if (pll->save_state)
-		pll->save_state(pll);
-
 	dsi_pll_disable(pll);
 }
 
@@ -134,6 +117,29 @@
 		pll->destroy(pll);
 }
 
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+	if (pll->save_state) {
+		pll->save_state(pll);
+		pll->state_saved = true;
+	}
+}
+
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	int ret;
+
+	if (pll->restore_state && pll->state_saved) {
+		ret = pll->restore_state(pll);
+		if (ret)
+			return ret;
+
+		pll->state_saved = false;
+	}
+
+	return 0;
+}
+
 struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			enum msm_dsi_phy_type type, int id)
 {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 5a3bb24..063caa2 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -27,6 +27,7 @@
 
 	struct clk_hw	clk_hw;
 	bool		pll_on;
+	bool		state_saved;
 
 	unsigned long	min_rate;
 	unsigned long	max_rate;
@@ -82,8 +83,16 @@
 /*
  * Initialization for Each PLL Type
  */
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
 struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
 					enum msm_dsi_phy_type type, int id);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
+	struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
 
 #endif /* __DSI_PLL_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 18b7727..598fdaf 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -465,26 +465,21 @@
 	void __iomem *base = pll_28nm->mmio;
 	int ret;
 
-	if ((cached_state->vco_rate != 0) &&
-		(cached_state->vco_rate == clk_hw_get_rate(&pll->clk_hw))) {
-		ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
-						cached_state->vco_rate, 0);
-		if (ret) {
-			dev_err(&pll_28nm->pdev->dev,
-				"restore vco rate failed. ret=%d\n", ret);
-			return ret;
-		}
-
-		pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
-				cached_state->postdiv3);
-		pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
-				cached_state->postdiv1);
-		pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
-				cached_state->byte_mux);
-
-		cached_state->vco_rate = 0;
+	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		dev_err(&pll_28nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
 	}
 
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+			cached_state->postdiv3);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+			cached_state->postdiv1);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+			cached_state->byte_mux);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 26f268e..06cbddf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f9c71dc..bef1d65 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7991069..81200e9 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -373,7 +373,7 @@
 	struct device *dev = &ctrl->pdev->dev;
 	int ret;
 
-	ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd");
+	ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
 	if (IS_ERR(ctrl->panel_hpd_gpio)) {
 		ret = PTR_ERR(ctrl->panel_hpd_gpio);
 		ctrl->panel_hpd_gpio = NULL;
@@ -381,13 +381,7 @@
 		return ret;
 	}
 
-	ret = gpiod_direction_input(ctrl->panel_hpd_gpio);
-	if (ret) {
-		pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret);
-		return ret;
-	}
-
-	ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en");
+	ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
 	if (IS_ERR(ctrl->panel_en_gpio)) {
 		ret = PTR_ERR(ctrl->panel_en_gpio);
 		ctrl->panel_en_gpio = NULL;
@@ -395,13 +389,6 @@
 		return ret;
 	}
 
-	ret = gpiod_direction_output(ctrl->panel_en_gpio, 0);
-	if (ret) {
-		pr_err("%s: Set direction for panel_en failed, %d\n",
-				__func__, ret);
-		return ret;
-	}
-
 	DBG("gpio on");
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 8145362..101b324 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -22,7 +22,9 @@
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
 {
 	uint32_t ctrl = 0;
+	unsigned long flags;
 
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
 	if (power_on) {
 		ctrl |= HDMI_CTRL_ENABLE;
 		if (!hdmi->hdmi_mode) {
@@ -37,6 +39,7 @@
 	}
 
 	hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
 	DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
 			power_on ? "Enable" : "Disable", ctrl);
 }
@@ -51,6 +54,10 @@
 	/* Process DDC: */
 	hdmi_i2c_irq(hdmi->i2c);
 
+	/* Process HDCP: */
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+
 	/* TODO audio.. */
 
 	return IRQ_HANDLED;
@@ -60,6 +67,15 @@
 {
 	struct hdmi_phy *phy = hdmi->phy;
 
+	/*
+	 * at this point, hpd has been disabled,
+	 * after flush workq, it's safe to deinit hdcp
+	 */
+	if (hdmi->workq) {
+		flush_workqueue(hdmi->workq);
+		destroy_workqueue(hdmi->workq);
+	}
+	hdmi_hdcp_destroy(hdmi);
 	if (phy)
 		phy->funcs->destroy(phy);
 
@@ -77,6 +93,7 @@
 {
 	struct hdmi_platform_config *config = pdev->dev.platform_data;
 	struct hdmi *hdmi = NULL;
+	struct resource *res;
 	int i, ret;
 
 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -87,18 +104,18 @@
 
 	hdmi->pdev = pdev;
 	hdmi->config = config;
+	spin_lock_init(&hdmi->reg_lock);
 
 	/* not sure about which phy maps to which msm.. probably I miss some */
-	if (config->phy_init)
+	if (config->phy_init) {
 		hdmi->phy = config->phy_init(hdmi);
-	else
-		hdmi->phy = ERR_PTR(-ENXIO);
 
-	if (IS_ERR(hdmi->phy)) {
-		ret = PTR_ERR(hdmi->phy);
-		dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
-		hdmi->phy = NULL;
-		goto fail;
+		if (IS_ERR(hdmi->phy)) {
+			ret = PTR_ERR(hdmi->phy);
+			dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
+			hdmi->phy = NULL;
+			goto fail;
+		}
 	}
 
 	hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
@@ -107,6 +124,18 @@
 		goto fail;
 	}
 
+	/* HDCP needs physical address of hdmi register */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		config->mmio_name);
+	hdmi->mmio_phy_addr = res->start;
+
+	hdmi->qfprom_mmio = msm_ioremap(pdev,
+		config->qfprom_mmio_name, "HDMI_QFPROM");
+	if (IS_ERR(hdmi->qfprom_mmio)) {
+		dev_info(&pdev->dev, "can't find qfprom resource\n");
+		hdmi->qfprom_mmio = NULL;
+	}
+
 	hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
 			config->hpd_reg_cnt, GFP_KERNEL);
 	if (!hdmi->hpd_regs) {
@@ -189,6 +218,8 @@
 		hdmi->pwr_clks[i] = clk;
 	}
 
+	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+
 	hdmi->i2c = hdmi_i2c_init(hdmi);
 	if (IS_ERR(hdmi->i2c)) {
 		ret = PTR_ERR(hdmi->i2c);
@@ -197,6 +228,12 @@
 		goto fail;
 	}
 
+	hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+	if (IS_ERR(hdmi->hdcp_ctrl)) {
+		dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
+		hdmi->hdcp_ctrl = NULL;
+	}
+
 	return hdmi;
 
 fail:
@@ -310,7 +347,7 @@
 static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
 static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
 
-static struct hdmi_platform_config hdmi_tx_8074_config = {
+static struct hdmi_platform_config hdmi_tx_8974_config = {
 		.phy_init = hdmi_phy_8x74_init,
 		HDMI_CFG(pwr_reg, 8x74),
 		HDMI_CFG(hpd_reg, 8x74),
@@ -330,9 +367,21 @@
 		.hpd_freq      = hpd_clk_freq_8x74,
 };
 
+static const char *hpd_reg_names_8x94[] = {};
+
+static struct hdmi_platform_config hdmi_tx_8994_config = {
+		.phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
+		HDMI_CFG(pwr_reg, 8x74),
+		HDMI_CFG(hpd_reg, 8x94),
+		HDMI_CFG(pwr_clk, 8x74),
+		HDMI_CFG(hpd_clk, 8x74),
+		.hpd_freq      = hpd_clk_freq_8x74,
+};
+
 static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
 	{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
-	{ .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config },
+	{ .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
 	{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
 	{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
 	{}
@@ -347,8 +396,7 @@
 		snprintf(name2, sizeof(name2), "%s-gpio", name);
 		gpio = of_get_named_gpio(of_node, name2, 0);
 		if (gpio < 0) {
-			dev_err(dev, "failed to get gpio: %s (%d)\n",
-					name, gpio);
+			DBG("failed to get gpio: %s (%d)", name, gpio);
 			gpio = -1;
 		}
 	}
@@ -376,6 +424,7 @@
 	}
 
 	hdmi_cfg->mmio_name     = "core_physical";
+	hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
 	hdmi_cfg->ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
 	hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
 	hdmi_cfg->hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
@@ -391,7 +440,6 @@
 	if (cpu_is_apq8064()) {
 		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -404,7 +452,6 @@
 	} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
 		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -419,7 +466,6 @@
 				"8901_hdmi_mvs", "8901_mpp0"
 		};
 		config.phy_init      = hdmi_phy_8x60_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -430,6 +476,9 @@
 		config.mux_en_gpio   = -1;
 		config.mux_sel_gpio  = -1;
 	}
+	config.mmio_name     = "hdmi_msm_hdmi_addr";
+	config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
+
 	hdmi_cfg = &config;
 #endif
 	dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 68fdfb3..d0e6631 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -37,6 +37,8 @@
 	int rate;
 };
 
+struct hdmi_hdcp_ctrl;
+
 struct hdmi {
 	struct drm_device *dev;
 	struct platform_device *pdev;
@@ -51,6 +53,8 @@
 	unsigned long int pixclock;
 
 	void __iomem *mmio;
+	void __iomem *qfprom_mmio;
+	phys_addr_t mmio_phy_addr;
 
 	struct regulator **hpd_regs;
 	struct regulator **pwr_regs;
@@ -68,12 +72,25 @@
 	bool hdmi_mode;               /* are we in hdmi mode? */
 
 	int irq;
+	struct workqueue_struct *workq;
+
+	struct hdmi_hdcp_ctrl *hdcp_ctrl;
+
+	/*
+	* spinlock to protect registers shared by different execution
+	* REG_HDMI_CTRL
+	* REG_HDMI_DDC_ARBITRATION
+	* REG_HDMI_HDCP_INT_CTRL
+	* REG_HDMI_HPD_CTRL
+	*/
+	spinlock_t reg_lock;
 };
 
 /* platform config data (ie. from DT, or pdata) */
 struct hdmi_platform_config {
 	struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
 	const char *mmio_name;
+	const char *qfprom_mmio_name;
 
 	/* regulators that need to be on for hpd: */
 	const char **hpd_reg_names;
@@ -109,6 +126,11 @@
 	return msm_readl(hdmi->mmio + reg);
 }
 
+static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
+{
+	return msm_readl(hdmi->qfprom_mmio + reg);
+}
+
 /*
  * The phy appears to be different, for example between 8960 and 8x60,
  * so split the phy related functions out and load the correct one at
@@ -117,7 +139,6 @@
 
 struct hdmi_phy_funcs {
 	void (*destroy)(struct hdmi_phy *phy);
-	void (*reset)(struct hdmi_phy *phy);
 	void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
 	void (*powerdown)(struct hdmi_phy *phy);
 };
@@ -163,4 +184,13 @@
 void hdmi_i2c_destroy(struct i2c_adapter *i2c);
 struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
 
+/*
+ * hdcp
+ */
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
+void hdmi_hdcp_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+
 #endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e6f0348..0b1b558 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -441,6 +441,12 @@
 
 #define REG_HDMI_HDCP_SW_LOWER_AKSV				0x00000288
 
+#define REG_HDMI_CEC_CTRL					0x0000028c
+
+#define REG_HDMI_CEC_WR_DATA					0x00000290
+
+#define REG_HDMI_CEC_CEC_RETRANSMIT				0x00000294
+
 #define REG_HDMI_CEC_STATUS					0x00000298
 
 #define REG_HDMI_CEC_INT					0x0000029c
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 872485f..df232e2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -203,7 +203,6 @@
 		audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
 		audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
 	} else {
-		hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
 		acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
 		acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
 		vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index a7a1d82..92b69ae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -100,8 +100,13 @@
 		hdmi_audio_update(hdmi);
 	}
 
-	phy->funcs->powerup(phy, hdmi->pixclock);
+	if (phy)
+		phy->funcs->powerup(phy, hdmi->pixclock);
+
 	hdmi_set_mode(hdmi, true);
+
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_on(hdmi->hdcp_ctrl);
 }
 
 static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -118,9 +123,14 @@
 	struct hdmi *hdmi = hdmi_bridge->hdmi;
 	struct hdmi_phy *phy = hdmi->phy;
 
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_off(hdmi->hdcp_ctrl);
+
 	DBG("power down");
 	hdmi_set_mode(hdmi, false);
-	phy->funcs->powerdown(phy);
+
+	if (phy)
+		phy->funcs->powerdown(phy);
 
 	if (hdmi->power_on) {
 		power_off(bridge);
@@ -142,8 +152,6 @@
 
 	hdmi->pixclock = mode->clock * 1000;
 
-	hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
-
 	hstart = mode->htotal - mode->hsync_start;
 	hend   = mode->htotal - mode->hsync_start + mode->hdisplay;
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 54aa93f..a3b05ae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,6 +28,55 @@
 };
 #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
 
+static void hdmi_phy_reset(struct hdmi *hdmi)
+{
+	unsigned int val;
+
+	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+
+	msleep(100);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+}
+
 static int gpio_config(struct hdmi *hdmi, bool on)
 {
 	struct device *dev = &hdmi->pdev->dev;
@@ -35,21 +84,25 @@
 	int ret;
 
 	if (on) {
-		ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
-		if (ret) {
-			dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-				"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
-			goto error1;
+		if (config->ddc_clk_gpio != -1) {
+			ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+			if (ret) {
+				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+				goto error1;
+			}
+			gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
 		}
-		gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
 
-		ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
-		if (ret) {
-			dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-				"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
-			goto error2;
+		if (config->ddc_data_gpio != -1) {
+			ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+			if (ret) {
+				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+				goto error2;
+			}
+			gpio_set_value_cansleep(config->ddc_data_gpio, 1);
 		}
-		gpio_set_value_cansleep(config->ddc_data_gpio, 1);
 
 		ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
 		if (ret) {
@@ -94,8 +147,12 @@
 		}
 		DBG("gpio on");
 	} else {
-		gpio_free(config->ddc_clk_gpio);
-		gpio_free(config->ddc_data_gpio);
+		if (config->ddc_clk_gpio != -1)
+			gpio_free(config->ddc_clk_gpio);
+
+		if (config->ddc_data_gpio != -1)
+			gpio_free(config->ddc_data_gpio);
+
 		gpio_free(config->hpd_gpio);
 
 		if (config->mux_en_gpio != -1) {
@@ -126,9 +183,11 @@
 error4:
 	gpio_free(config->hpd_gpio);
 error3:
-	gpio_free(config->ddc_data_gpio);
+	if (config->ddc_data_gpio != -1)
+		gpio_free(config->ddc_data_gpio);
 error2:
-	gpio_free(config->ddc_clk_gpio);
+	if (config->ddc_clk_gpio != -1)
+		gpio_free(config->ddc_clk_gpio);
 error1:
 	return ret;
 }
@@ -138,9 +197,9 @@
 	struct hdmi *hdmi = hdmi_connector->hdmi;
 	const struct hdmi_platform_config *config = hdmi->config;
 	struct device *dev = &hdmi->pdev->dev;
-	struct hdmi_phy *phy = hdmi->phy;
 	uint32_t hpd_ctrl;
 	int i, ret;
+	unsigned long flags;
 
 	for (i = 0; i < config->hpd_reg_cnt; i++) {
 		ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -181,7 +240,7 @@
 	}
 
 	hdmi_set_mode(hdmi, false);
-	phy->funcs->reset(phy);
+	hdmi_phy_reset(hdmi);
 	hdmi_set_mode(hdmi, true);
 
 	hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -192,6 +251,7 @@
 			HDMI_HPD_INT_CTRL_INT_EN);
 
 	/* set timeout to 4.1ms (max) for hardware debounce */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
 	hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
 	hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
 
@@ -200,6 +260,7 @@
 			~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
 	hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
 			HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
 
 	return 0;
 
@@ -250,7 +311,6 @@
 void hdmi_connector_irq(struct drm_connector *connector)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
-	struct msm_drm_private *priv = connector->dev->dev_private;
 	struct hdmi *hdmi = hdmi_connector->hdmi;
 	uint32_t hpd_int_status, hpd_int_ctrl;
 
@@ -274,7 +334,7 @@
 			hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
 		hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
 
-		queue_work(priv->wq, &hdmi_connector->hpd_work);
+		queue_work(hdmi->workq, &hdmi_connector->hpd_work);
 	}
 }
 
@@ -350,6 +410,7 @@
 
 	hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
 
+	hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
 	drm_mode_connector_update_edid_property(connector, edid);
 
 	if (edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
new file mode 100644
index 0000000..1dc9c34
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -0,0 +1,1437 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hdmi.h"
+#include <linux/qcom_scm.h>
+
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+#define HDCP_PORT_ADDR 0x74
+
+#define HDCP_INT_STATUS_MASK ( \
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
+
+#define AUTH_WORK_RETRIES_TIME 100
+#define AUTH_RETRIES_TIME 30
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB  0x000000F8
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB  0x000000FC
+#define HDCP_KSV_LSB                     0x000060D8
+#define HDCP_KSV_MSB                     0x000060DC
+
+enum DS_TYPE {  /* type of downstream device */
+	DS_UNKNOWN,
+	DS_RECEIVER,
+	DS_REPEATER,
+};
+
+enum hdmi_hdcp_state {
+	HDCP_STATE_NO_AKSV,
+	HDCP_STATE_INACTIVE,
+	HDCP_STATE_AUTHENTICATING,
+	HDCP_STATE_AUTHENTICATED,
+	HDCP_STATE_AUTH_FAILED
+};
+
+struct hdmi_hdcp_reg_data {
+	u32 reg_id;
+	u32 off;
+	char *name;
+	u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+	struct hdmi *hdmi;
+	u32 auth_retries;
+	bool tz_hdcp;
+	enum hdmi_hdcp_state hdcp_state;
+	struct work_struct hdcp_auth_work;
+	struct work_struct hdcp_reauth_work;
+
+#define AUTH_ABORT_EV 1
+#define AUTH_RESULT_RDY_EV 2
+	unsigned long auth_event;
+	wait_queue_head_t auth_event_queue;
+
+	u32 ksv_fifo_w_index;
+	/*
+	 * store aksv from qfprom
+	 */
+	u32 aksv_lsb;
+	u32 aksv_msb;
+	bool aksv_valid;
+	u32 ds_type;
+	u32 bksv_lsb;
+	u32 bksv_msb;
+	u8 dev_count;
+	u8 depth;
+	u8 ksv_list[5 * 127];
+	bool max_cascade_exceeded;
+	bool max_dev_exceeded;
+};
+
+static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+	u8 *data, u16 data_len)
+{
+	int rc;
+	int retry = 5;
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= &offset,
+		}, {
+			.addr	= addr >> 1,
+			.flags	= I2C_M_RD,
+			.len	= data_len,
+			.buf	= data,
+		}
+	};
+
+	DBG("Start DDC read");
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 2);
+
+	retry--;
+	if (rc == 2)
+		rc = 0;
+	else if (retry > 0)
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC read %d", rc);
+
+	return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
+
+static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+	u8 *data, u16 data_len)
+{
+	int rc;
+	int retry = 10;
+	u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+		}
+	};
+
+	DBG("Start DDC write");
+	if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+		pr_err("%s: write size too big\n", __func__);
+		return -ERANGE;
+	}
+
+	buf[0] = offset;
+	memcpy(&buf[1], data, data_len);
+	msgs[0].buf = buf;
+	msgs[0].len = data_len + 1;
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 1);
+
+	retry--;
+	if (rc == 1)
+		rc = 0;
+	else if (retry > 0)
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC write %d", rc);
+
+	return rc;
+}
+
+static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+	u32 *pdata, u32 count)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
+	u32 resp, phy_addr, idx = 0;
+	int i, ret = 0;
+
+	WARN_ON(!pdata || !preg || (count == 0));
+
+	if (hdcp_ctrl->tz_hdcp) {
+		phy_addr = (u32)hdmi->mmio_phy_addr;
+
+		while (count) {
+			memset(scm_buf, 0, sizeof(scm_buf));
+			for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
+				i++) {
+				scm_buf[i].addr = phy_addr + preg[idx];
+				scm_buf[i].val  = pdata[idx];
+				idx++;
+			}
+			ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
+
+			if (ret || resp) {
+				pr_err("%s: error: scm_call ret=%d resp=%u\n",
+					__func__, ret, resp);
+				ret = -EINVAL;
+				break;
+			}
+
+			count -= i;
+		}
+	} else {
+		for (i = 0; i < count; i++)
+			hdmi_write(hdmi, preg[i], pdata[i]);
+	}
+
+	return ret;
+}
+
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val, hdcp_int_status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
+	hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
+	if (!hdcp_int_status) {
+		spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+		return;
+	}
+	/* Clear Interrupts */
+	reg_val |= hdcp_int_status << 1;
+	/* Clear AUTH_FAIL_INFO as well */
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
+		reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	DBG("hdcp irq %x", hdcp_int_status);
+
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
+		pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
+		if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+			set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+			wake_up_all(&hdcp_ctrl->auth_event_queue);
+		}
+	}
+
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+			__func__, reg_val);
+		if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
+			queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+		else if (HDCP_STATE_AUTHENTICATING ==
+				hdcp_ctrl->hdcp_state) {
+			set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+			wake_up_all(&hdcp_ctrl->auth_event_queue);
+		}
+	}
+}
+
+static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+{
+	int rc;
+
+	rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
+		!!test_bit(ev, &hdcp_ctrl->auth_event),
+		msecs_to_jiffies(ms));
+	if (rc) {
+		pr_info("%s: msleep is canceled by event %d\n",
+				__func__, ev);
+		clear_bit(ev, &hdcp_ctrl->auth_event);
+		return -ECANCELED;
+	}
+
+	return 0;
+}
+
+static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+	/* Fetch aksv from QFPROM, this info should be public. */
+	hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
+	hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
+
+	/* check there are 20 ones in AKSV */
+	if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
+			!= 20) {
+		pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
+			__func__);
+		pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
+			__func__, hdcp_ctrl->aksv_msb,
+			hdcp_ctrl->aksv_lsb);
+		return -EINVAL;
+	}
+	DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
+
+	return 0;
+}
+
+static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val, failure, nack0;
+	int rc = 0;
+
+	/* Check for any DDC transfer failures */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+	failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
+	nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
+	DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
+		reg_val, failure, nack0);
+
+	if (failure) {
+		/*
+		 * Indicates that the last HDCP HW DDC transfer failed.
+		 * This occurs when a transfer is attempted with HDCP DDC
+		 * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+		 * matches HDCP_DDC_RETRY_CNT.
+		 * Failure occurred,  let's clear it.
+		 */
+		DBG("DDC failure detected");
+
+		/* First, Disable DDC */
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
+			HDMI_HDCP_DDC_CTRL_0_DISABLE);
+
+		/* ACK the Failure to Clear it */
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
+		reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
+
+		/* Check if the FAILURE got Cleared */
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+		if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
+			pr_info("%s: Unable to clear HDCP DDC Failure\n",
+				__func__);
+
+		/* Re-Enable HDCP DDC */
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
+	}
+
+	if (nack0) {
+		DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
+			hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+		/* Reset HDMI DDC software status */
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		/* Reset HDMI DDC Controller */
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		/* If previous msleep is aborted, skip this msleep */
+		if (!rc)
+			rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+		DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
+			hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+	}
+
+	return rc;
+}
+
+static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 hdcp_ddc_status, ddc_hw_status;
+	u32 xfer_done, xfer_req, hw_done;
+	bool hw_not_ready;
+	u32 timeout_count;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+	if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
+		return 0;
+
+	/* Wait to be clean on DDC HW engine */
+	timeout_count = 100;
+	do {
+		hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+		ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+
+		xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
+		xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
+		hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
+		hw_not_ready = !xfer_done || xfer_req || !hw_done;
+
+		if (hw_not_ready)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_warn("%s: hw_ddc_clean failed\n", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static void hdmi_hdcp_reauth_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_reauth_work);
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	u32 reg_val;
+
+	DBG("HDCP REAUTH WORK");
+	/*
+	 * Disable HPD circuitry.
+	 * This is needed to reset the HDCP cipher engine so that when we
+	 * attempt a re-authentication, HW would clear the AN0_READY and
+	 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+	/* Disable HDCP interrupts */
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+		HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+	/* Wait to be clean on DDC HW engine */
+	if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+		pr_info("%s: reauth work aborted\n", __func__);
+		return;
+	}
+
+	/* Disable encryption and disable the HDCP block */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+	/* Enable HPD circuitry */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val |= HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Only retry defined times then abort current authenticating process
+	 */
+	if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
+		hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+		hdcp_ctrl->auth_retries = 0;
+		pr_info("%s: abort reauthentication!\n", __func__);
+
+		return;
+	}
+
+	DBG("Queue AUTH WORK");
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	u32 reg_val;
+	unsigned long flags;
+	int rc;
+
+	if (!hdcp_ctrl->aksv_valid) {
+		rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+		if (rc) {
+			pr_err("%s: ASKV validation failed\n", __func__);
+			hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
+			return -ENOTSUPP;
+		}
+		hdcp_ctrl->aksv_valid = true;
+	}
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	/* disable HDMI Encrypt */
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+	/* Enabling Software DDC */
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+	reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+	hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Write AKSV read from QFPROM to the HDCP registers.
+	 * This step is needed for HDCP authentication and must be
+	 * written before enabling HDCP.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
+	hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
+
+	/*
+	 * HDCP setup prior to enabling HDCP_CTRL.
+	 * Setup seed values for random number An.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+	hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+	/* Disable the RngCipher state */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
+	reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
+	hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
+	DBG("HDCP_DEBUG_CTRL=0x%08x",
+		hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
+
+	/*
+	 * Ensure that all register writes are completed before
+	 * enabling HDCP cipher
+	 */
+	wmb();
+
+	/*
+	 * Enable HDCP
+	 * This needs to be done as early as possible in order for the
+	 * hardware to make An available to read
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
+
+	/*
+	 * If we had stale values for the An ready bit, it should most
+	 * likely be cleared now after enabling HDCP cipher
+	 */
+	link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+	DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
+	if (!(link0_status &
+		(HDMI_HDCP_LINK0_STATUS_AN_0_READY |
+		HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
+		DBG("An not ready after enabling HDCP");
+
+	/* Clear any DDC failures from previous tries before enable HDCP*/
+	rc = reset_hdcp_ddc_failures(hdcp_ctrl);
+
+	return rc;
+}
+
+static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	DBG("hdcp auth failed, queue reauth work");
+	/* clear HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+}
+
+static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	/*
+	 * Disable software DDC before going into part3 to make sure
+	 * there is no Arbitration between software and hardware for DDC
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+	reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+	hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/* enable HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val |= HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+	hdcp_ctrl->auth_retries = 0;
+}
+
+/*
+ * hdcp authenticating part 1
+ * Wait Key/An ready
+ * Read BCAPS from sink
+ * Write BCAPS and AKSV into HDCP engine
+ * Write An and AKSV to sink
+ * Read BKSV from sink and write into HDCP engine
+ */
+static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status, keys_state;
+	u32 timeout_count;
+	bool an_ready;
+
+	/* Wait for HDCP keys to be checked and validated */
+	timeout_count = 100;
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		keys_state = (link0_status >> 28) & 0x7;
+		if (keys_state == HDCP_KEYS_STATE_VALID)
+			break;
+
+		DBG("Keys not ready(%d). s=%d, l0=%0x08x",
+			timeout_count, keys_state, link0_status);
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait key state timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	timeout_count = 100;
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
+			&& (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
+		if (an_ready)
+			break;
+
+		DBG("An not ready(%d). l0_status=0x%08x",
+			timeout_count, link0_status);
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait An timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_aksv_0, link0_aksv_1;
+	u32 link0_an[2];
+	u8 aksv[5];
+
+	/* Read An0 and An1 */
+	link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
+	link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
+
+	/* Read AKSV */
+	link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
+	link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
+
+	DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
+	/* Copy An and AKSV to byte arrays for transmission */
+	aksv[0] =  link0_aksv_0        & 0xFF;
+	aksv[1] = (link0_aksv_0 >> 8)  & 0xFF;
+	aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+	aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+	aksv[4] =  link0_aksv_1        & 0xFF;
+
+	/* Write An to offset 0x18 */
+	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+		(u16)sizeof(link0_an));
+	if (rc) {
+		pr_err("%s:An write failed\n", __func__);
+		return rc;
+	}
+	DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
+
+	/* Write AKSV to offset 0x10 */
+	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+	if (rc) {
+		pr_err("%s:AKSV write failed\n", __func__);
+		return rc;
+	}
+	DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
+
+	return 0;
+}
+
+static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u8 bksv[5];
+	u32 reg[2], data[2];
+
+	/* Read BKSV at offset 0x00 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+	if (rc) {
+		pr_err("%s:BKSV read failed\n", __func__);
+		return rc;
+	}
+
+	hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
+		(bksv[2] << 16) | (bksv[3] << 24);
+	hdcp_ctrl->bksv_msb = bksv[4];
+	DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
+
+	/* check there are 20 ones in BKSV */
+	if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
+			!= 20) {
+		pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
+		pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+			bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+		return -EINVAL;
+	}
+
+	/* Write BKSV read from sink to HDCP registers */
+	reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
+	data[0] = hdcp_ctrl->bksv_lsb;
+	reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
+	data[1] = hdcp_ctrl->bksv_msb;
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+	return rc;
+}
+
+static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg, data;
+	u8 bcaps;
+
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+	if (rc) {
+		pr_err("%s:BCAPS read failed\n", __func__);
+		return rc;
+	}
+	DBG("BCAPS=%02x", bcaps);
+
+	/* receiver (0), repeater (1) */
+	hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
+
+	/* Write BCAPS to the hardware */
+	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+	data = (u32)bcaps;
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	int rc;
+
+	/* Wait for AKSV key and An ready */
+	rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: wait key and an ready failed\n", __func__);
+		return rc;
+	};
+
+	/* Read BCAPS and send to HDCP engine */
+	rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: read bcaps error, abort\n", __func__);
+		return rc;
+	}
+
+	/*
+	 * 1.1_Features turned off by default.
+	 * No need to write AInfo since 1.1_Features is disabled.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
+
+	/* Send AKSV and An to sink */
+	rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s:An/Aksv write failed\n", __func__);
+		return rc;
+	}
+
+	/* Read BKSV and send to HDCP engine*/
+	rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s:BKSV Process failed\n", __func__);
+		return rc;
+	}
+
+	/* Enable HDCP interrupts and ack/clear any stale interrupts */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	return 0;
+}
+
+/* read R0' from sink and pass it to HDCP engine */
+static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	int rc = 0;
+	u8 buf[2];
+
+	/*
+	 * HDCP Compliance Test case 1A-01:
+	 * Wait here at least 100ms before reading R0'
+	 */
+	rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+	if (rc)
+		return rc;
+
+	/* Read R0' at offset 0x08 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+	if (rc) {
+		pr_err("%s:R0' read failed\n", __func__);
+		return rc;
+	}
+	DBG("R0'=%02x%02x", buf[1], buf[0]);
+
+	/* Write R0' to HDCP registers and check to see if it is a match */
+	hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
+		(((u32)buf[1]) << 8) | buf[0]);
+
+	return 0;
+}
+
+/* Wait for authenticating result: R0/R0' are matched or not */
+static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	int rc;
+
+	/* wait for hdcp irq, 10 sec should be long enough */
+	rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+	if (!rc) {
+		pr_err("%s: Wait Auth IRQ timeout\n", __func__);
+		return -ETIMEDOUT;
+	}
+
+	link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+	if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
+		pr_err("%s: Authentication Part I failed\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Enable HDCP Encryption */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
+		HDMI_HDCP_CTRL_ENABLE |
+		HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
+
+	return 0;
+}
+
+static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+	u16 *pbstatus)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	bool max_devs_exceeded = false, max_cascade_exceeded = false;
+	u32 repeater_cascade_depth = 0, down_stream_devices = 0;
+	u16 bstatus;
+	u8 buf[2];
+
+	/* Read BSTATUS at offset 0x41 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+	if (rc) {
+		pr_err("%s: BSTATUS read failed\n", __func__);
+		goto error;
+	}
+	*pbstatus = bstatus = (buf[1] << 8) | buf[0];
+
+
+	down_stream_devices = bstatus & 0x7F;
+	repeater_cascade_depth = (bstatus >> 8) & 0x7;
+	max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
+	max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
+
+	if (down_stream_devices == 0) {
+		/*
+		 * If no downstream devices are attached to the repeater
+		 * then part II fails.
+		 * todo: The other approach would be to continue PART II.
+		 */
+		pr_err("%s: No downstream devices\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-05:
+	 * Check if no. of devices connected to repeater
+	 * exceed max_devices_connected from bit 7 of Bstatus.
+	 */
+	if (max_devs_exceeded) {
+		pr_err("%s: no. of devs connected exceeds max allowed",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-06:
+	 * Check if no. of cascade connected to repeater
+	 * exceed max_cascade_connected from bit 11 of Bstatus.
+	 */
+	if (max_cascade_exceeded) {
+		pr_err("%s: no. of cascade conn exceeds max allowed",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+error:
+	hdcp_ctrl->dev_count = down_stream_devices;
+	hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
+	hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
+	hdcp_ctrl->depth = repeater_cascade_depth;
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg, data;
+	u32 timeout_count;
+	u16 bstatus;
+	u8 bcaps;
+
+	/*
+	 * Wait until READY bit is set in BCAPS, as per HDCP specifications
+	 * maximum permitted time to check for READY bit is five seconds.
+	 */
+	timeout_count = 100;
+	do {
+		/* Read BCAPS at offset 0x40 */
+		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+		if (rc) {
+			pr_err("%s: BCAPS read failed\n", __func__);
+			return rc;
+		}
+
+		if (bcaps & BIT(5))
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait KSV fifo ready timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+	if (rc) {
+		pr_err("%s: bstatus error\n", __func__);
+		return rc;
+	}
+
+	/* Write BSTATUS and BCAPS to HDCP registers */
+	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+	data = bcaps | (bstatus << 8);
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+	if (rc) {
+		pr_err("%s: BSTATUS write failed\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * hdcp authenticating part 2: 2nd
+ * read ksv fifo from sink
+ * transfer V' from sink to HDCP engine
+ * reset SHA engine
+ */
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	int rc = 0;
+	struct hdmi_hdcp_reg_data reg_data[]  = {
+		{REG_HDMI_HDCP_RCVPORT_DATA7,  0x20, "V' H0"},
+		{REG_HDMI_HDCP_RCVPORT_DATA8,  0x24, "V' H1"},
+		{REG_HDMI_HDCP_RCVPORT_DATA9,  0x28, "V' H2"},
+		{REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+		{REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+	};
+	struct hdmi_hdcp_reg_data *rd;
+	u32 size = ARRAY_SIZE(reg_data);
+	u32 reg[ARRAY_SIZE(reg_data)];
+	u32 data[ARRAY_SIZE(reg_data)];
+	int i;
+
+	for (i = 0; i < size; i++) {
+		rd = &reg_data[i];
+		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+			rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
+		if (rc) {
+			pr_err("%s: Read %s failed\n", __func__, rd->name);
+			goto error;
+		}
+
+		DBG("%s =%x", rd->name, data[i]);
+		reg[i] = reg_data[i].reg_id;
+	}
+
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+
+error:
+	return rc;
+}
+
+static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 ksv_bytes;
+
+	ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+		hdcp_ctrl->ksv_list, ksv_bytes);
+	if (rc)
+		pr_err("%s: KSV FIFO read failed\n", __func__);
+
+	return rc;
+}
+
+static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	u32 reg[2], data[2];
+	u32 rc  = 0;
+
+	reg[0] = REG_HDMI_HDCP_SHA_CTRL;
+	data[0] = HDCP_REG_ENABLE;
+	reg[1] = REG_HDMI_HDCP_SHA_CTRL;
+	data[1] = HDCP_REG_DISABLE;
+
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 timeout_count;
+
+	/*
+	 * Read KSV FIFO over DDC
+	 * Key Selection vector FIFO Used to pull downstream KSVs
+	 * from HDCP Repeaters.
+	 * All bytes (DEVICE_COUNT * 5) must be read in a single,
+	 * auto incrementing access.
+	 * All bytes read as 0x00 for HDCP Receivers that are not
+	 * HDCP Repeaters (REPEATER == 0).
+	 */
+	timeout_count = 100;
+	do {
+		rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+		if (!rc)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Recv ksv fifo timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: transfer V failed\n", __func__);
+		return rc;
+	}
+
+	/* reset SHA engine before write ksv fifo */
+	rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: fail to reset sha engine\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * Write KSV FIFO to HDCP_SHA_DATA.
+ * This is done 1 byte at time starting with the LSB.
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ * If the last byte is written, we need to poll for
+ * HDCP_SHA_COMP_DONE to wait until HW finish
+ */
+static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int i;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 ksv_bytes, last_byte = 0;
+	u8 *ksv_fifo = NULL;
+	u32 reg_val, data, reg;
+	u32 rc  = 0;
+
+	ksv_bytes  = 5 * hdcp_ctrl->dev_count;
+
+	/* Check if need to wait for HW completion */
+	if (hdcp_ctrl->ksv_fifo_w_index) {
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
+		DBG("HDCP_SHA_STATUS=%08x", reg_val);
+		if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
+			/* check COMP_DONE if last write */
+			if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
+				DBG("COMP_DONE");
+				return 0;
+			} else {
+				return -EAGAIN;
+			}
+		} else {
+			/* check BLOCK_DONE if not last write */
+			if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
+				return -EAGAIN;
+
+			DBG("BLOCK_DONE");
+		}
+	}
+
+	ksv_bytes  -= hdcp_ctrl->ksv_fifo_w_index;
+	if (ksv_bytes <= 64)
+		last_byte = 1;
+	else
+		ksv_bytes = 64;
+
+	ksv_fifo = hdcp_ctrl->ksv_list;
+	ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
+
+	for (i = 0; i < ksv_bytes; i++) {
+		/* Write KSV byte and set DONE bit[0] for last byte*/
+		reg_val = ksv_fifo[i] << 16;
+		if ((i == (ksv_bytes - 1)) && last_byte)
+			reg_val |= HDMI_HDCP_SHA_DATA_DONE;
+
+		reg = REG_HDMI_HDCP_SHA_DATA;
+		data = reg_val;
+		rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+		if (rc)
+			return rc;
+	}
+
+	hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
+
+	/*
+	 *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
+	 */
+	return -EAGAIN;
+}
+
+/* write ksv fifo into HDCP engine */
+static int hdmi_hdcp_auth_part2_write_ksv_fifo(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 timeout_count;
+
+	hdcp_ctrl->ksv_fifo_w_index = 0;
+	timeout_count = 100;
+	do {
+		rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+		if (!rc)
+			break;
+
+		if (rc != -EAGAIN)
+			return rc;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Write KSV fifo timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	u32 timeout_count = 100;
+
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+				pr_err("%s: HDCP V Match timedout", __func__);
+				return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_auth_work);
+	int rc;
+
+	rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: auth prepare failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	/* HDCP PartI */
+	rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: key exchange failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: receive r0 failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: verify r0 failed %d\n", __func__, rc);
+		goto end;
+	}
+	pr_info("%s: Authentication Part I successful\n", __func__);
+	if (hdcp_ctrl->ds_type == DS_RECEIVER)
+		goto end;
+
+	/* HDCP PartII */
+	rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+	if (rc)
+		pr_err("%s: check v match failed %d\n", __func__, rc);
+
+end:
+	if (rc == -ECANCELED) {
+		pr_info("%s: hdcp authentication canceled\n", __func__);
+	} else if (rc == -ENOTSUPP) {
+		pr_info("%s: hdcp is not supported\n", __func__);
+	} else if (rc) {
+		pr_err("%s: hdcp authentication failed\n", __func__);
+		hdmi_hdcp_auth_fail(hdcp_ctrl);
+	} else {
+		hdmi_hdcp_auth_done(hdcp_ctrl);
+	}
+}
+
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
+		(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+		DBG("still active or activating or no askv. returning");
+		return;
+	}
+
+	/* clear HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->auth_event = 0;
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	hdcp_ctrl->auth_retries = 0;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	u32 reg_val;
+
+	if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
+		(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+		DBG("hdcp inactive or no aksv. returning");
+		return;
+	}
+
+	/*
+	 * Disable HPD circuitry.
+	 * This is needed to reset the HDCP cipher engine so that when we
+	 * attempt a re-authentication, HW would clear the AN0_READY and
+	 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+	/*
+	 * Disable HDCP interrupts.
+	 * Also, need to set the state to inactive here so that any ongoing
+	 * reauth works will know that the HDCP session has been turned off.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Cancel any pending auth/reauth attempts.
+	 * If one is ongoing, this will wait for it to finish.
+	 * No more reauthentication attempts will be scheduled since we
+	 * set the current state to inactive.
+	 */
+	set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
+	wake_up_all(&hdcp_ctrl->auth_event_queue);
+	cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
+	cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
+
+	hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+		HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+	/* Disable encryption and disable the HDCP block */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+	/* Enable HPD circuitry */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val |= HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+
+	DBG("HDCP: Off");
+}
+
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+
+	if (!hdmi->qfprom_mmio) {
+		pr_err("%s: HDCP is not supported without qfprom\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+	if (!hdcp_ctrl)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+	INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
+	init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
+	hdcp_ctrl->hdmi = hdmi;
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+	hdcp_ctrl->aksv_valid = false;
+
+	if (qcom_scm_hdcp_available())
+		hdcp_ctrl->tz_hdcp = true;
+	else
+		hdcp_ctrl->tz_hdcp = false;
+
+	return hdcp_ctrl;
+}
+
+void hdmi_hdcp_destroy(struct hdmi *hdmi)
+{
+	if (hdmi && hdmi->hdcp_ctrl) {
+		kfree(hdmi->hdcp_ctrl);
+		hdmi->hdcp_ctrl = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 6997ec6..3a01cb5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -426,57 +426,6 @@
 	kfree(phy_8960);
 }
 
-static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
-	struct hdmi *hdmi = phy_8960->hdmi;
-	unsigned int val;
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-}
-
 static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -511,7 +460,6 @@
 
 static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
 		.destroy = hdmi_phy_8960_destroy,
-		.reset = hdmi_phy_8960_reset,
 		.powerup = hdmi_phy_8960_powerup,
 		.powerdown = hdmi_phy_8960_powerdown,
 };
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 391433c..cb01421 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -29,37 +29,6 @@
 	kfree(phy_8x60);
 }
 
-static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
-	struct hdmi *hdmi = phy_8x60->hdmi;
-	unsigned int val;
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-}
-
 static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -182,7 +151,6 @@
 
 static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
 		.destroy = hdmi_phy_8x60_destroy,
-		.reset = hdmi_phy_8x60_reset,
 		.powerup = hdmi_phy_8x60_powerup,
 		.powerdown = hdmi_phy_8x60_powerdown,
 };
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 59fa6cd..56ab891 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -19,7 +19,6 @@
 
 struct hdmi_phy_8x74 {
 	struct hdmi_phy base;
-	struct hdmi *hdmi;
 	void __iomem *mmio;
 };
 #define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
@@ -41,59 +40,6 @@
 	kfree(phy_8x74);
 }
 
-static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
-	struct hdmi *hdmi = phy_8x74->hdmi;
-	unsigned int val;
-
-	/* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-}
-
 static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -117,7 +63,6 @@
 
 static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
 		.destroy = hdmi_phy_8x74_destroy,
-		.reset = hdmi_phy_8x74_reset,
 		.powerup = hdmi_phy_8x74_powerup,
 		.powerdown = hdmi_phy_8x74_powerdown,
 };
@@ -138,8 +83,6 @@
 
 	phy->funcs = &hdmi_phy_8x74_funcs;
 
-	phy_8x74->hdmi = hdmi;
-
 	/* for 8x74, the phy mmio is mapped separately: */
 	phy_8x74->mmio = msm_ioremap(hdmi->pdev,
 			"phy_physical", "HDMI_8x74");
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 978c3f7..2aa23b9 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 153fc48..74b8673 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index c4bb9d9..6ac9aa1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -334,13 +334,15 @@
 	return 0;
 }
 
-static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	DBG("%s: begin", mdp4_crtc->name);
 }
 
-static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -680,7 +682,5 @@
 	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
 	plane->crtc = crtc;
 
-	mdp4_plane_install_properties(plane, &crtc->base);
-
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 7369ee7..5ed38cf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -19,8 +19,11 @@
 #include "msm_drv.h"
 #include "mdp4_kms.h"
 
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask)
 {
+	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
+		irqmask ^ (irqmask & old_irqmask));
 	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
 }
 
@@ -68,9 +71,10 @@
 	struct drm_device *dev = mdp4_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	unsigned int id;
-	uint32_t status;
+	uint32_t status, enable;
 
-	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+	enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
+	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
 	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
 
 	VERB("status=%08x", status);
@@ -86,13 +90,22 @@
 
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+	mdp4_enable(mdp4_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp4_crtc_vblank(crtc), true);
+	mdp4_disable(mdp4_kms);
+
 	return 0;
 }
 
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+	mdp4_enable(mdp4_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp4_crtc_vblank(crtc), false);
+	mdp4_disable(mdp4_kms);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 531e4ac..077f752 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -241,22 +241,37 @@
 }
 
 #ifdef CONFIG_OF
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
 {
-	struct device_node *n;
+	struct device_node *endpoint, *panel_node;
+	struct device_node *np = dev->dev->of_node;
 	struct drm_panel *panel = NULL;
 
-	n = of_parse_phandle(dev->dev->of_node, name, 0);
-	if (n) {
-		panel = of_drm_find_panel(n);
-		if (!panel)
-			panel = ERR_PTR(-EPROBE_DEFER);
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		dev_err(dev->dev, "no valid endpoint\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	panel_node = of_graph_get_remote_port_parent(endpoint);
+	if (!panel_node) {
+		dev_err(dev->dev, "no valid panel node\n");
+		of_node_put(endpoint);
+		return ERR_PTR(-ENODEV);
+	}
+
+	of_node_put(endpoint);
+
+	panel = of_drm_find_panel(panel_node);
+	if (!panel) {
+		of_node_put(panel_node);
+		return ERR_PTR(-EPROBE_DEFER);
 	}
 
 	return panel;
 }
 #else
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
 {
 	// ??? maybe use a module param to specify which panel is attached?
 }
@@ -294,7 +309,7 @@
 	 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
 	 */
 
-	panel = detect_panel(dev, "qcom,lvds-panel");
+	panel = detect_panel(dev);
 	if (IS_ERR(panel)) {
 		ret = PTR_ERR(panel);
 		dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
@@ -527,6 +542,11 @@
 		goto fail;
 	}
 
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = 2048;
+	dev->mode_config.max_height = 2048;
+
 	return kms;
 
 fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c1ecb9d..8a7f6e1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -167,7 +167,8 @@
 int mdp4_disable(struct mdp4_kms *mdp4_kms);
 int mdp4_enable(struct mdp4_kms *mdp4_kms);
 
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 void mdp4_irq_preinstall(struct msm_kms *kms);
 int mdp4_irq_postinstall(struct msm_kms *kms);
 void mdp4_irq_uninstall(struct msm_kms *kms);
@@ -175,29 +176,24 @@
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 
-static inline bool pipe_supports_yuv(enum mdp4_pipe pipe)
+static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
 {
 	switch (pipe) {
 	case VG1:
 	case VG2:
 	case VG3:
 	case VG4:
-		return true;
+		return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+	case RGB1:
+	case RGB2:
+	case RGB3:
+		return MDP_PIPE_CAP_SCALE;
 	default:
-		return false;
+		return 0;
 	}
 }
 
-static inline
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
-		uint32_t max_formats)
-{
-	return mdp_get_formats(pixel_formats, max_formats,
-				!pipe_supports_yuv(pipe_id));
-}
-
-void mdp4_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj);
 enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp4_plane_init(struct drm_device *dev,
 		enum mdp4_pipe pipe_id, bool private_plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index c048433..4cd6e72 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -346,8 +346,10 @@
 
 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
 
-	if (panel)
+	if (panel) {
 		drm_panel_disable(panel);
+		drm_panel_unprepare(panel);
+	}
 
 	/*
 	 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -412,8 +414,10 @@
 	if (ret)
 		dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
 
-	if (panel)
+	if (panel) {
+		drm_panel_prepare(panel);
 		drm_panel_enable(panel);
+	}
 
 	setup_phy(encoder);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 247a424..e9dee36 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -26,6 +26,7 @@
 
 	enum mdp4_pipe pipe;
 
+	uint32_t caps;
 	uint32_t nformats;
 	uint32_t formats[32];
 
@@ -74,7 +75,7 @@
 }
 
 /* helper to install properties which are common to planes and crtcs */
-void mdp4_plane_install_properties(struct drm_plane *plane,
+static void mdp4_plane_install_properties(struct drm_plane *plane,
 		struct drm_mode_object *obj)
 {
 	// XXX
@@ -382,9 +383,11 @@
 
 	mdp4_plane->pipe = pipe_id;
 	mdp4_plane->name = pipe_names[pipe_id];
+	mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
 
-	mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
-			ARRAY_SIZE(mdp4_plane->formats));
+	mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
+			ARRAY_SIZE(mdp4_plane->formats),
+			!pipe_supports_yuv(mdp4_plane->caps));
 
 	type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
 	ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 50e1752..3469f50d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -381,49 +381,49 @@
 static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
 #define MDP5_CTL_LAYER_REG_VIG0__MASK				0x00000007
 #define MDP5_CTL_LAYER_REG_VIG0__SHIFT				0
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_VIG1__MASK				0x00000038
 #define MDP5_CTL_LAYER_REG_VIG1__SHIFT				3
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
 }
 #define MDP5_CTL_LAYER_REG_VIG2__MASK				0x000001c0
 #define MDP5_CTL_LAYER_REG_VIG2__SHIFT				6
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB0__MASK				0x00000e00
 #define MDP5_CTL_LAYER_REG_RGB0__SHIFT				9
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB1__MASK				0x00007000
 #define MDP5_CTL_LAYER_REG_RGB1__SHIFT				12
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB2__MASK				0x00038000
 #define MDP5_CTL_LAYER_REG_RGB2__SHIFT				15
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
 }
 #define MDP5_CTL_LAYER_REG_DMA0__MASK				0x001c0000
 #define MDP5_CTL_LAYER_REG_DMA0__SHIFT				18
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_DMA1__MASK				0x00e00000
 #define MDP5_CTL_LAYER_REG_DMA1__SHIFT				21
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
 }
@@ -431,13 +431,13 @@
 #define MDP5_CTL_LAYER_REG_CURSOR_OUT				0x02000000
 #define MDP5_CTL_LAYER_REG_VIG3__MASK				0x1c000000
 #define MDP5_CTL_LAYER_REG_VIG3__SHIFT				26
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB3__MASK				0xe0000000
 #define MDP5_CTL_LAYER_REG_RGB3__SHIFT				29
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
 }
@@ -499,6 +499,44 @@
 
 static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
 
+static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return 0x00000040;
+		case 1: return 0x00000044;
+		case 2: return 0x00000048;
+		case 3: return 0x0000004c;
+		case 4: return 0x00000050;
+		case 5: return 0x00000054;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3			0x00000001
+#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3			0x00000004
+#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3			0x00000010
+#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3			0x00000040
+#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3			0x00000100
+#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3			0x00000400
+#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3			0x00001000
+#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3			0x00004000
+#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3			0x00010000
+#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3			0x00040000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK			0x00f00000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT			20
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
+}
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK			0x3c000000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT			26
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
+}
+
 static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
 {
 	switch (idx) {
@@ -803,11 +841,11 @@
 }
 #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT			0x00020000
 #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB			0x00040000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK			0x00180000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT			19
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val)
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK			0x00180000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT			19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
 {
-	return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+	return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
 }
 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK			0x01800000
 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT			23
@@ -897,41 +935,41 @@
 static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
 #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN			0x00000001
 #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN			0x00000002
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK		0x00000300
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT		8
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK	0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT	8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK		0x00000c00
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT		10
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK	0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT	10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK		0x00003000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT		12
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK	0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT	12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK		0x0000c000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT		14
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK	0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT	14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK		0x00030000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT		16
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK	0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT	16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK		0x000c0000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT		18
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK	0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT	18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
 }
 
 static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
@@ -984,9 +1022,22 @@
 
 static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
 
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t __offset_BLEND(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return 0x00000020;
+		case 1: return 0x00000050;
+		case 2: return 0x00000080;
+		case 3: return 0x000000b0;
+		case 4: return 0x00000230;
+		case 5: return 0x00000260;
+		case 6: return 0x00000290;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK			0x00000003
 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT			0
 static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -1008,25 +1059,25 @@
 #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA			0x00001000
 #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN			0x00002000
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
 
 static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
 #define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK			0x0000ffff
@@ -1260,6 +1311,13 @@
 static inline uint32_t __offset_WB(uint32_t idx)
 {
 	switch (idx) {
+#if 0  /* TEMPORARY until patch that adds wb.base[] is merged */
+		case 0: return (mdp5_cfg->wb.base[0]);
+		case 1: return (mdp5_cfg->wb.base[1]);
+		case 2: return (mdp5_cfg->wb.base[2]);
+		case 3: return (mdp5_cfg->wb.base[3]);
+		case 4: return (mdp5_cfg->wb.base[4]);
+#endif
 		default: return INVALID_IDX(idx);
 	}
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b9a793..a1e26f2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -22,7 +22,76 @@
 /* mdp5_cfg must be exposed (used in mdp5.xml.h) */
 const struct mdp5_cfg_hw *mdp5_cfg = NULL;
 
-const struct mdp5_cfg_hw msm8x74_config = {
+const struct mdp5_cfg_hw msm8x74v1_config = {
+	.name = "msm8x74v1",
+	.mdp = {
+		.count = 1,
+		.base = { 0x00100 },
+	},
+	.smp = {
+		.mmb_count = 22,
+		.mmb_size = 4096,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4, [SSPP_VIG2] =  7,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+		},
+	},
+	.ctl = {
+		.count = 5,
+		.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+		.flush_hw_mask = 0x0003ffff,
+	},
+	.pipe_vig = {
+		.count = 3,
+		.base = { 0x01200, 0x01600, 0x01a00 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			MDP_PIPE_CAP_SCALE |
+			MDP_PIPE_CAP_CSC   |
+			0,
+	},
+	.pipe_rgb = {
+		.count = 3,
+		.base = { 0x01e00, 0x02200, 0x02600 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			MDP_PIPE_CAP_SCALE |
+			0,
+	},
+	.pipe_dma = {
+		.count = 2,
+		.base = { 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			0,
+	},
+	.lm = {
+		.count = 5,
+		.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+		.nb_stages = 5,
+	},
+	.dspp = {
+		.count = 3,
+		.base = { 0x04600, 0x04a00, 0x04e00 },
+	},
+	.pp = {
+		.count = 3,
+		.base = { 0x21b00, 0x21c00, 0x21d00 },
+	},
+	.intf = {
+		.base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+		.connect = {
+			[0] = INTF_eDP,
+			[1] = INTF_DSI,
+			[2] = INTF_DSI,
+			[3] = INTF_HDMI,
+		},
+	},
+	.max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw msm8x74v2_config = {
 	.name = "msm8x74",
 	.mdp = {
 		.count = 1,
@@ -45,19 +114,27 @@
 	.pipe_vig = {
 		.count = 3,
 		.base = { 0x01200, 0x01600, 0x01a00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 3,
 		.base = { 0x01e00, 0x02200, 0x02600 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 2,
 		.base = { 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 5,
 		.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 3,
@@ -65,7 +142,7 @@
 	},
 	.ad = {
 		.count = 2,
-		.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+		.base = { 0x13100, 0x13300 },
 	},
 	.pp = {
 		.count = 3,
@@ -113,19 +190,27 @@
 	.pipe_vig = {
 		.count = 4,
 		.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 4,
 		.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 2,
 		.base = { 0x03200, 0x03600 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 6,
 		.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 4,
@@ -174,19 +259,27 @@
 	.pipe_vig = {
 		.count = 1,
 		.base = { 0x05000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 2,
 		.base = { 0x15000, 0x17000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 1,
 		.base = { 0x25000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 2, /* LM0 and LM3 */
 		.base = { 0x45000, 0x48000 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 1,
@@ -203,13 +296,90 @@
 	.max_clk = 320000000,
 };
 
-static const struct mdp5_cfg_handler cfg_handlers[] = {
-	{ .revision = 0, .config = { .hw = &msm8x74_config } },
-	{ .revision = 2, .config = { .hw = &msm8x74_config } },
-	{ .revision = 3, .config = { .hw = &apq8084_config } },
-	{ .revision = 6, .config = { .hw = &msm8x16_config } },
+const struct mdp5_cfg_hw msm8x94_config = {
+	.name = "msm8x94",
+	.mdp = {
+		.count = 1,
+		.base = { 0x01000 },
+	},
+	.smp = {
+		.mmb_count = 44,
+		.mmb_size = 8192,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4,
+			[SSPP_VIG2] =  7, [SSPP_VIG3] = 19,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+			[SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+		},
+		.reserved_state[0] = GENMASK(23, 0),	/* first 24 MMBs */
+		.reserved = {
+			 [1] = 1,  [4] = 1,  [7] = 1, [19] = 1,
+			[16] = 5, [17] = 5, [18] = 5, [22] = 5,
+		},
+	},
+	.ctl = {
+		.count = 5,
+		.base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+		.flush_hw_mask = 0xf0ffffff,
+	},
+	.pipe_vig = {
+		.count = 4,
+		.base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
+	},
+	.pipe_rgb = {
+		.count = 4,
+		.base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+	},
+	.pipe_dma = {
+		.count = 2,
+		.base = { 0x25000, 0x27000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+	},
+	.lm = {
+		.count = 6,
+		.base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+		.nb_stages = 8,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
+	},
+	.dspp = {
+		.count = 4,
+		.base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+
+	},
+	.ad = {
+		.count = 3,
+		.base = { 0x79000, 0x79800, 0x7a000 },
+	},
+	.pp = {
+		.count = 4,
+		.base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+	},
+	.intf = {
+		.base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+		.connect = {
+			[0] = INTF_DISABLED,
+			[1] = INTF_DSI,
+			[2] = INTF_DSI,
+			[3] = INTF_HDMI,
+		},
+	},
+	.max_clk = 320000000,
 };
 
+static const struct mdp5_cfg_handler cfg_handlers[] = {
+	{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
+	{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
+	{ .revision = 3, .config = { .hw = &apq8084_config } },
+	{ .revision = 6, .config = { .hw = &msm8x16_config } },
+	{ .revision = 9, .config = { .hw = &msm8x94_config } },
+};
 
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 69349ab..efb918d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -42,6 +42,13 @@
 struct mdp5_lm_block {
 	MDP5_SUB_BLOCK_DEFINITION;
 	uint32_t nb_stages;		/* number of stages per blender */
+	uint32_t max_width;		/* Maximum output resolution */
+	uint32_t max_height;
+};
+
+struct mdp5_pipe_block {
+	MDP5_SUB_BLOCK_DEFINITION;
+	uint32_t caps;			/* pipe capabilities */
 };
 
 struct mdp5_ctl_block {
@@ -70,9 +77,9 @@
 	struct mdp5_sub_block mdp;
 	struct mdp5_smp_block smp;
 	struct mdp5_ctl_block ctl;
-	struct mdp5_sub_block pipe_vig;
-	struct mdp5_sub_block pipe_rgb;
-	struct mdp5_sub_block pipe_dma;
+	struct mdp5_pipe_block pipe_vig;
+	struct mdp5_pipe_block pipe_rgb;
+	struct mdp5_pipe_block pipe_dma;
 	struct mdp5_lm_block  lm;
 	struct mdp5_sub_block dspp;
 	struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index ee31b16..8e6c9b5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -21,6 +21,8 @@
 	struct mdp5_interface intf;
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
 
@@ -210,13 +212,14 @@
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 	pingpong_tearcheck_setup(encoder, mode);
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
+				mdp5_cmd_enc->ctl);
 }
 
 static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(!mdp5_cmd_enc->enabled))
@@ -235,7 +238,7 @@
 static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(mdp5_cmd_enc->enabled))
@@ -300,7 +303,7 @@
 
 /* initialize command mode encoder */
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_cmd_encoder *mdp5_cmd_enc;
@@ -320,6 +323,7 @@
 
 	memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
 	encoder = &mdp5_cmd_enc->base;
+	mdp5_cmd_enc->ctl = ctl;
 
 	drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
 			DRM_MODE_ENCODER_DSI);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index dea3d2e..7f9f4ac 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,7 @@
 
 	if (mdp5_crtc->ctl && !crtc->state->enable) {
 		/* set STAGE_UNUSED for all layers */
-		mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
-		mdp5_ctl_release(mdp5_crtc->ctl);
+		mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
 		mdp5_crtc->ctl = NULL;
 	}
 }
@@ -196,13 +195,9 @@
 /*
  * blend_setup() - blend all the planes of a CRTC
  *
- * When border is enabled, the border color will ALWAYS be the base layer.
- * Therefore, the first plane (private RGB pipe) will start at STAGE0.
- * If disabled, the first plane starts at STAGE_BASE.
- *
- * Note:
- * Border is not enabled here because the private plane is exactly
- * the CRTC resolution.
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
  */
 static void blend_setup(struct drm_crtc *crtc)
 {
@@ -210,9 +205,14 @@
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_plane *plane;
 	const struct mdp5_cfg_hw *hw_cfg;
-	uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+	const struct mdp_format *format;
+	uint32_t lm = mdp5_crtc->lm;
+	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
 	unsigned long flags;
-#define blender(stage)	((stage) - STAGE_BASE)
+	uint8_t stage[STAGE_MAX + 1];
+	int i, plane_cnt = 0;
+#define blender(stage)	((stage) - STAGE0)
 
 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 
@@ -222,33 +222,73 @@
 	if (!mdp5_crtc->ctl)
 		goto out;
 
+	/* Collect all plane information */
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		enum mdp_mixer_stage_id stage =
-			to_mdp5_plane_state(plane->state)->stage;
-
-		/*
-		 * Note: This cannot happen with current implementation but
-		 * we need to check this condition once z property is added
-		 */
-		BUG_ON(stage > hw_cfg->lm.nb_stages);
-
-		/* LM */
-		mdp5_write(mdp5_kms,
-				REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
-				MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
-				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
-		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
-				blender(stage)), 0xff);
-		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
-				blender(stage)), 0x00);
-		/* CTL */
-		blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
-		DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
-				pipe2name(mdp5_plane_pipe(plane)), stage);
+		pstate = to_mdp5_plane_state(plane->state);
+		pstates[pstate->stage] = pstate;
+		stage[pstate->stage] = mdp5_plane_pipe(plane);
+		plane_cnt++;
 	}
 
-	DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
-	mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+	/*
+	* If there is no base layer, enable border color.
+	* Although it's not possbile in current blend logic,
+	* put it here as a reminder.
+	*/
+	if (!pstates[STAGE_BASE] && plane_cnt) {
+		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+		DBG("Border Color is enabled");
+	}
+
+	/* The reset for blending */
+	for (i = STAGE0; i <= STAGE_MAX; i++) {
+		if (!pstates[i])
+			continue;
+
+		format = to_mdp_format(
+			msm_framebuffer_format(pstates[i]->base.fb));
+		plane = pstates[i]->base.plane;
+		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+		fg_alpha = pstates[i]->alpha;
+		bg_alpha = 0xFF - pstates[i]->alpha;
+		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+		if (format->alpha_enable && pstates[i]->premultiplied) {
+			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |=
+					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+			}
+		} else if (format->alpha_enable) {
+			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |=
+				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+			}
+		}
+
+		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+				blender(i)), blend_op);
+		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
+				blender(i)), fg_alpha);
+		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
+				blender(i)), bg_alpha);
+	}
+
+	mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
 
 out:
 	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -339,25 +379,19 @@
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_plane *plane;
 	struct drm_device *dev = crtc->dev;
-	struct plane_state pstates[STAGE3 + 1];
+	struct plane_state pstates[STAGE_MAX + 1];
+	const struct mdp5_cfg_hw *hw_cfg;
 	int cnt = 0, i;
 
 	DBG("%s: check", mdp5_crtc->name);
 
-	/* request a free CTL, if none is already allocated for this CRTC */
-	if (state->enable && !mdp5_crtc->ctl) {
-		mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
-		if (WARN_ON(!mdp5_crtc->ctl))
-			return -EINVAL;
-	}
-
 	/* verify that there are not too many planes attached to crtc
 	 * and that we don't have conflicting mixer stages:
 	 */
+	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	drm_atomic_crtc_state_for_each_plane(plane, state) {
 		struct drm_plane_state *pstate;
-
-		if (cnt >= ARRAY_SIZE(pstates)) {
+		if (cnt >= (hw_cfg->lm.nb_stages)) {
 			dev_err(dev->dev, "too many planes!\n");
 			return -EINVAL;
 		}
@@ -369,13 +403,13 @@
 		 */
 		if (!pstate)
 			pstate = plane->state;
-
 		pstates[cnt].plane = plane;
 		pstates[cnt].state = to_mdp5_plane_state(pstate);
 
 		cnt++;
 	}
 
+	/* assign a stage based on sorted zpos property */
 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 
 	for (i = 0; i < cnt; i++) {
@@ -388,13 +422,15 @@
 	return 0;
 }
 
-static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	DBG("%s: begin", mdp5_crtc->name);
 }
 
-static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -691,8 +727,8 @@
 	complete_flip(crtc, file);
 }
 
-/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -715,7 +751,8 @@
 
 	mdp_irq_update(&mdp5_kms->base);
 
-	mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+	mdp5_crtc->ctl = ctl;
+	mdp5_ctl_set_pipeline(ctl, intf, lm);
 }
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -724,12 +761,6 @@
 	return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
 }
 
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
-{
-	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-	return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
-}
-
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -774,7 +805,5 @@
 	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
 	plane->crtc = crtc;
 
-	mdp5_plane_install_properties(plane, &crtc->base);
-
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index f2530f2..4e81ca4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
 /*
  * CTL - MDP Control Pool Manager
  *
- * Controls are shared between all CRTCs.
+ * Controls are shared between all display interfaces.
  *
  * They are intended to be used for data path configuration.
  * The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
  *
  * In certain use cases (high-resolution dual pipe), one single CTL can be
  * shared across multiple CRTCs.
- *
- * Because the number of CTLs can be less than the number of CRTCs,
- * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
- * requested by the client (in mdp5_crtc_mode_set()).
  */
 
+#define CTL_STAT_BUSY		0x1
+#define CTL_STAT_BOOKED	0x2
+
 struct op_mode {
 	struct mdp5_interface intf;
 
@@ -46,8 +45,8 @@
 	u32 id;
 	int lm;
 
-	/* whether this CTL has been allocated or not: */
-	bool busy;
+	/* CTL status bitmask */
+	u32 status;
 
 	/* Operation Mode Configuration for the Pipeline */
 	struct op_mode pipeline;
@@ -61,7 +60,10 @@
 
 	bool cursor_on;
 
-	struct drm_crtc *crtc;
+	/* True if the current CTL has FLUSH bits pending for single FLUSH. */
+	bool flush_pending;
+
+	struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
 };
 
 struct mdp5_ctl_manager {
@@ -74,6 +76,10 @@
 	/* to filter out non-present bits in the current hardware config */
 	u32 flush_hw_mask;
 
+	/* status for single FLUSH */
+	bool single_flush_supported;
+	u32 single_flush_pending_mask;
+
 	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
 	spinlock_t pool_lock;
 	struct mdp5_ctl ctls[MAX_CTL];
@@ -168,11 +174,21 @@
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 }
 
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
+		struct mdp5_interface *intf, int lm)
 {
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
 
+	if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
+		dev_err(mdp5_kms->dev->dev,
+			"CTL %d is allocated by INTF %d, but used by INTF %d\n",
+			ctl->id, ctl->pipeline.intf.num, intf->num);
+		return -EINVAL;
+	}
+
+	ctl->lm = lm;
+
 	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
 
 	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -287,29 +303,85 @@
 		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 
 	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+	ctl->cursor_on = enable;
 
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 
 	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
-	ctl->cursor_on = enable;
 
 	return 0;
 }
 
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+		enum mdp_mixer_stage_id stage)
+{
+	switch (pipe) {
+	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+	default:	return 0;
+	}
+}
+
+static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
+		enum mdp_mixer_stage_id stage)
+{
+	if (stage < STAGE6)
+		return 0;
+
+	switch (pipe) {
+	case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
+	case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
+	case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
+	case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
+	case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
+	case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
+	case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
+	case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
+	case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
+	case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+	default:	return 0;
+	}
+}
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+	u32 ctl_blend_op_flags)
 {
 	unsigned long flags;
+	u32 blend_cfg = 0, blend_ext_cfg = 0;
+	int i, start_stage;
 
-	if (ctl->cursor_on)
-		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
-	else
-		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
+		start_stage = STAGE0;
+		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+	} else {
+		start_stage = STAGE_BASE;
+	}
+
+	for (i = start_stage; i < start_stage + stage_cnt; i++) {
+		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
+		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
+	}
 
 	spin_lock_irqsave(&ctl->hw_lock, flags);
-	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+	if (ctl->cursor_on)
+		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
+	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 
-	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+
+	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+		blend_cfg, blend_ext_cfg);
 
 	return 0;
 }
@@ -379,6 +451,31 @@
 	return sw_mask;
 }
 
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+		u32 *flush_id)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+	if (ctl->pair) {
+		DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+		ctl->flush_pending = true;
+		ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+		*flush_mask = 0;
+
+		if (ctl->pair->flush_pending) {
+			*flush_id = min_t(u32, ctl->id, ctl->pair->id);
+			*flush_mask = ctl_mgr->single_flush_pending_mask;
+
+			ctl->flush_pending = false;
+			ctl->pair->flush_pending = false;
+			ctl_mgr->single_flush_pending_mask = 0;
+
+			DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+				*flush_id);
+		}
+	}
+}
+
 /**
  * mdp5_ctl_commit() - Register Flush
  *
@@ -400,6 +497,8 @@
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct op_mode *pipeline = &ctl->pipeline;
 	unsigned long flags;
+	u32 flush_id = ctl->id;
+	u32 curr_ctl_flush_mask;
 
 	pipeline->start_mask &= ~flush_mask;
 
@@ -415,9 +514,13 @@
 
 	flush_mask &= ctl_mgr->flush_hw_mask;
 
+	curr_ctl_flush_mask = flush_mask;
+
+	fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
 	if (flush_mask) {
 		spin_lock_irqsave(&ctl->hw_lock, flags);
-		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
 		spin_unlock_irqrestore(&ctl->hw_lock, flags);
 	}
 
@@ -426,7 +529,7 @@
 		refill_start_mask(ctl);
 	}
 
-	return flush_mask;
+	return curr_ctl_flush_mask;
 }
 
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -434,59 +537,85 @@
 	return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
 }
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
-{
-	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
-	unsigned long flags;
-
-	if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
-		dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
-				ctl->id, ctl->busy);
-		return;
-	}
-
-	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
-	ctl->busy = false;
-	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
-
-	DBG("CTL %d released", ctl->id);
-}
-
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
 {
 	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
 }
 
 /*
- * mdp5_ctl_request() - CTL dynamic allocation
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+	/* do nothing silently if hw doesn't support */
+	if (!ctl_mgr->single_flush_supported)
+		return 0;
+
+	if (!enable) {
+		ctlx->pair = NULL;
+		ctly->pair = NULL;
+		mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+		return 0;
+	} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+		dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+		return -EINVAL;
+	} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+		dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+		return -EINVAL;
+	}
+
+	ctlx->pair = ctly;
+	ctly->pair = ctlx;
+
+	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+
+	return 0;
+}
+
+/*
+ * mdp5_ctl_request() - CTL allocation
  *
- * Note: Current implementation considers that we can only have one CRTC per CTL
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
  *
- * @return first free CTL
+ * @return fail if no CTL is available.
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
-		struct drm_crtc *crtc)
+		int intf_num)
 {
 	struct mdp5_ctl *ctl = NULL;
+	const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+	u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
 	unsigned long flags;
 	int c;
 
 	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 
+	/* search the preferred */
 	for (c = 0; c < ctl_mgr->nctl; c++)
-		if (!ctl_mgr->ctls[c].busy)
-			break;
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
-	if (unlikely(c >= ctl_mgr->nctl)) {
-		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
-		goto unlock;
-	}
+	dev_warn(ctl_mgr->dev->dev,
+		"fall back to the other CTL category for INTF %d!\n", intf_num);
 
+	match ^= CTL_STAT_BOOKED;
+	for (c = 0; c < ctl_mgr->nctl; c++)
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
+
+	dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+	goto unlock;
+
+found:
 	ctl = &ctl_mgr->ctls[c];
-
-	ctl->lm = mdp5_crtc_get_lm(crtc);
-	ctl->crtc = crtc;
-	ctl->busy = true;
+	ctl->pipeline.intf.num = intf_num;
+	ctl->lm = -1;
+	ctl->status |= CTL_STAT_BUSY;
 	ctl->pending_ctl_trigger = 0;
 	DBG("CTL %d allocated", ctl->id);
 
@@ -515,9 +644,11 @@
 }
 
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
 {
 	struct mdp5_ctl_manager *ctl_mgr;
+	const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+	int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
 	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
 	unsigned long flags;
 	int c, ret;
@@ -551,14 +682,28 @@
 		if (WARN_ON(!ctl_cfg->base[c])) {
 			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
 			ret = -EINVAL;
+			spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 			goto fail;
 		}
 		ctl->ctlm = ctl_mgr;
 		ctl->id = c;
 		ctl->reg_offset = ctl_cfg->base[c];
-		ctl->busy = false;
+		ctl->status = 0;
 		spin_lock_init(&ctl->hw_lock);
 	}
+
+	/*
+	 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+	 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+	 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+	 * Single FLUSH is supported from hw rev v3.0.
+	 */
+	if (rev >= 3) {
+		ctl_mgr->single_flush_supported = true;
+		/* Reserve CTL0/1 for INTF1/2 */
+		ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+		ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+	}
 	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 4678228..96148c6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
  */
 struct mdp5_ctl_manager;
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
 
@@ -32,49 +32,32 @@
  * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
  * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
  */
-struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
 
 struct mdp5_interface;
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
+				int lm);
 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
 
 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
-
-/*
- * blend_cfg (LM blender config):
- *
- * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
- * are being blended according to their stage (z-order), through @blend_cfg arg.
- */
-static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
-		enum mdp_mixer_stage_id stage)
-{
-	switch (pipe) {
-	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
-	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
-	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
-	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
-	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
-	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
-	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
-	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
-	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
-	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
-	default:	return 0;
-	}
-}
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
 
 /*
  * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
  *
- * @blend_cfg: see LM blender config definition below
+ * @stage: array to contain the pipe num for each stage
+ * @stage_cnt: valid stage number in stage array
+ * @ctl_blend_op_flags: blender operation mode flags
  *
  * Note:
  * CTL registers need to be flushed after calling this function
  * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
  */
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT	BIT(0)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+	u32 ctl_blend_op_flags);
 
 /**
  * mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -91,8 +74,6 @@
 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
-
 
 
 #endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index de97c08..c9e32b0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -27,6 +27,8 @@
 	spinlock_t intf_lock;	/* protect REG_MDP5_INTF_* registers */
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
 
@@ -222,14 +224,15 @@
 
 	spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
 
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
+				mdp5_encoder->ctl);
 }
 
 static void mdp5_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	int lm = mdp5_crtc_get_lm(encoder->crtc);
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
@@ -264,7 +267,7 @@
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
 	unsigned long flags;
@@ -294,6 +297,7 @@
 					struct drm_encoder *slave_encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
 	struct mdp5_kms *mdp5_kms;
 	int intf_num;
 	u32 data = 0;
@@ -316,12 +320,13 @@
 
 	/* Make sure clocks are on when connectors calling this function. */
 	mdp5_enable(mdp5_kms);
-	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
-		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 	/* Dumb Panel, Sync mode */
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+
+	mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
 	mdp5_disable(mdp5_kms);
 
 	return 0;
@@ -329,7 +334,7 @@
 
 /* initialize encoder */
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_encoder *mdp5_encoder;
@@ -345,6 +350,7 @@
 
 	memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
 	encoder = &mdp5_encoder->base;
+	mdp5_encoder->ctl = ctl;
 
 	spin_lock_init(&mdp5_encoder->intf_lock);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 33bd4c6..b1f73be 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -21,8 +21,11 @@
 #include "msm_drv.h"
 #include "mdp5_kms.h"
 
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask)
 {
+	mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
+		irqmask ^ (irqmask & old_irqmask));
 	mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
 }
 
@@ -71,9 +74,10 @@
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	unsigned int id;
-	uint32_t status;
+	uint32_t status, enable;
 
-	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+	enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
+	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
 
 	VERB("status=%08x", status);
@@ -112,15 +116,24 @@
 
 int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+	mdp5_enable(mdp5_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp5_crtc_vblank(crtc), true);
+	mdp5_disable(mdp5_kms);
+
 	return 0;
 }
 
 void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+	mdp5_enable(mdp5_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp5_crtc_vblank(crtc), false);
+	mdp5_disable(mdp5_kms);
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e253db5..047cb04 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -177,7 +177,8 @@
 	clk_disable_unprepare(mdp5_kms->ahb_clk);
 	clk_disable_unprepare(mdp5_kms->axi_clk);
 	clk_disable_unprepare(mdp5_kms->core_clk);
-	clk_disable_unprepare(mdp5_kms->lut_clk);
+	if (mdp5_kms->lut_clk)
+		clk_disable_unprepare(mdp5_kms->lut_clk);
 
 	return 0;
 }
@@ -189,14 +190,15 @@
 	clk_prepare_enable(mdp5_kms->ahb_clk);
 	clk_prepare_enable(mdp5_kms->axi_clk);
 	clk_prepare_enable(mdp5_kms->core_clk);
-	clk_prepare_enable(mdp5_kms->lut_clk);
+	if (mdp5_kms->lut_clk)
+		clk_prepare_enable(mdp5_kms->lut_clk);
 
 	return 0;
 }
 
 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 		enum mdp5_intf_type intf_type, int intf_num,
-		enum mdp5_intf_mode intf_mode)
+		enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
 {
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -209,9 +211,9 @@
 
 	if ((intf_type == INTF_DSI) &&
 		(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
-		encoder = mdp5_cmd_encoder_init(dev, &intf);
+		encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
 	else
-		encoder = mdp5_encoder_init(dev, &intf);
+		encoder = mdp5_encoder_init(dev, &intf, ctl);
 
 	if (IS_ERR(encoder)) {
 		dev_err(dev->dev, "failed to construct encoder\n");
@@ -249,6 +251,8 @@
 	const struct mdp5_cfg_hw *hw_cfg =
 					mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
+	struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+	struct mdp5_ctl *ctl;
 	struct drm_encoder *encoder;
 	int ret = 0;
 
@@ -259,8 +263,14 @@
 		if (!priv->edp)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -272,8 +282,14 @@
 		if (!priv->hdmi)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -298,14 +314,20 @@
 		if (!priv->dsi[dsi_id])
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
 			mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
 				MDP5_INTF_DSI_MODE_COMMAND :
 				MDP5_INTF_DSI_MODE_VIDEO;
 			dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
-							intf_num, mode);
-			if (IS_ERR(dsi_encs)) {
-				ret = PTR_ERR(dsi_encs);
+							intf_num, mode, ctl);
+			if (IS_ERR(dsi_encs[i])) {
+				ret = PTR_ERR(dsi_encs[i]);
 				break;
 			}
 		}
@@ -327,9 +349,12 @@
 	static const enum mdp5_pipe crtcs[] = {
 			SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
 	};
-	static const enum mdp5_pipe pub_planes[] = {
+	static const enum mdp5_pipe vig_planes[] = {
 			SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
 	};
+	static const enum mdp5_pipe dma_planes[] = {
+			SSPP_DMA0, SSPP_DMA1,
+	};
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	const struct mdp5_cfg_hw *hw_cfg;
@@ -350,7 +375,7 @@
 		struct drm_crtc *crtc;
 
 		plane = mdp5_plane_init(dev, crtcs[i], true,
-				hw_cfg->pipe_rgb.base[i]);
+			hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
 			dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -368,16 +393,30 @@
 		priv->crtcs[priv->num_crtcs++] = crtc;
 	}
 
-	/* Construct public planes: */
+	/* Construct video planes: */
 	for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
 		struct drm_plane *plane;
 
-		plane = mdp5_plane_init(dev, pub_planes[i], false,
-				hw_cfg->pipe_vig.base[i]);
+		plane = mdp5_plane_init(dev, vig_planes[i], false,
+			hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
 			dev_err(dev->dev, "failed to construct %s plane: %d\n",
-					pipe2name(pub_planes[i]), ret);
+					pipe2name(vig_planes[i]), ret);
+			goto fail;
+		}
+	}
+
+	/* DMA planes */
+	for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
+		struct drm_plane *plane;
+
+		plane = mdp5_plane_init(dev, dma_planes[i], false,
+				hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
+		if (IS_ERR(plane)) {
+			ret = PTR_ERR(plane);
+			dev_err(dev->dev, "failed to construct %s plane: %d\n",
+					pipe2name(dma_planes[i]), ret);
 			goto fail;
 		}
 	}
@@ -489,7 +528,7 @@
 		goto fail;
 	ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
 	if (ret)
-		goto fail;
+		DBG("failed to get (optional) lut_clk clock");
 	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
 	if (ret)
 		goto fail;
@@ -521,7 +560,7 @@
 		goto fail;
 	}
 
-	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
 	if (IS_ERR(mdp5_kms->ctlm)) {
 		ret = PTR_ERR(mdp5_kms->ctlm);
 		mdp5_kms->ctlm = NULL;
@@ -577,6 +616,11 @@
 		goto fail;
 	}
 
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = config->hw->lm.max_width;
+	dev->mode_config.max_height = config->hw->lm.max_height;
+
 	return kms;
 
 fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e79ac09..0bb6242 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -70,18 +70,12 @@
 struct mdp5_plane_state {
 	struct drm_plane_state base;
 
-	/* "virtual" zpos.. we calculate actual mixer-stage at runtime
-	 * by sorting the attached planes by zpos and then assigning
-	 * mixer stage lowest to highest.  Private planes get default
-	 * zpos of zero, and public planes a unique value that is
-	 * greater than zero.  This way, things work out if a naive
-	 * userspace assigns planes to a crtc without setting zpos.
-	 */
-	int zpos;
+	/* aligned with property */
+	uint8_t premultiplied;
+	uint8_t zpos;
+	uint8_t alpha;
 
-	/* the actual mixer stage, calculated in crtc->atomic_check()
-	 * NOTE: this should move to mdp5_crtc_state, when that exists
-	 */
+	/* assigned by crtc blender */
 	enum mdp_mixer_stage_id stage;
 
 	/* some additional transactional status to help us know in the
@@ -192,7 +186,8 @@
 int mdp5_disable(struct mdp5_kms *mdp5_kms);
 int mdp5_enable(struct mdp5_kms *mdp5_kms);
 
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 void mdp5_irq_preinstall(struct msm_kms *kms);
 int mdp5_irq_postinstall(struct msm_kms *kms);
 void mdp5_irq_uninstall(struct msm_kms *kms);
@@ -202,60 +197,38 @@
 int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
-static inline bool pipe_supports_yuv(enum mdp5_pipe pipe)
-{
-	switch (pipe) {
-	case SSPP_VIG0:
-	case SSPP_VIG1:
-	case SSPP_VIG2:
-	case SSPP_VIG3:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static inline
-uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
-		uint32_t max_formats)
-{
-	return mdp_get_formats(pixel_formats, max_formats,
-				!pipe_supports_yuv(pipe));
-}
-
-void mdp5_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj);
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
 void mdp5_plane_complete_flip(struct drm_plane *plane);
 void mdp5_plane_complete_commit(struct drm_plane *plane,
 	struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
+		enum mdp5_pipe pipe, bool private_plane,
+		uint32_t reg_offset, uint32_t caps);
 
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 		struct drm_plane *plane, int id);
 
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-		struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 
 #ifdef CONFIG_DRM_MSM_DSI
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 #else
-static inline struct drm_encoder *mdp5_cmd_encoder_init(
-			struct drm_device *dev, struct mdp5_interface *intf)
+static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	return ERR_PTR(-EINVAL);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 2227556..07fb62f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -26,6 +26,7 @@
 
 	spinlock_t pipe_lock;	/* protect REG_MDP5_PIPE_* registers */
 	uint32_t reg_offset;
+	uint32_t caps;
 
 	uint32_t flush_mask;	/* used to commit pipe registers */
 
@@ -40,6 +41,7 @@
 		unsigned int crtc_w, unsigned int crtc_h,
 		uint32_t src_x, uint32_t src_y,
 		uint32_t src_w, uint32_t src_h);
+
 static void set_scanout_locked(struct drm_plane *plane,
 		struct drm_framebuffer *fb);
 
@@ -64,18 +66,122 @@
 	kfree(mdp5_plane);
 }
 
-/* helper to install properties which are common to planes and crtcs */
-void mdp5_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj)
+static void mdp5_plane_install_rotation_property(struct drm_device *dev,
+		struct drm_plane *plane)
 {
-	// XXX
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+	if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
+		!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
+		return;
+
+	if (!dev->mode_config.rotation_property)
+		dev->mode_config.rotation_property =
+			drm_mode_create_rotation_property(dev,
+			BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+
+	if (dev->mode_config.rotation_property)
+		drm_object_attach_property(&plane->base,
+			dev->mode_config.rotation_property,
+			0);
 }
 
-int mdp5_plane_set_property(struct drm_plane *plane,
-		struct drm_property *property, uint64_t val)
+/* helper to install properties which are common to planes and crtcs */
+static void mdp5_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj)
 {
-	// XXX
-	return -EINVAL;
+	struct drm_device *dev = plane->dev;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
+		prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
+		if (!prop) { \
+			prop = drm_property_##fnc(dev, 0, #name, \
+				##__VA_ARGS__); \
+			if (!prop) { \
+				dev_warn(dev->dev, \
+					"Create property %s failed\n", \
+					#name); \
+				return; \
+			} \
+			dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
+		} \
+		drm_object_attach_property(&plane->base, prop, init_val); \
+	} while (0)
+
+#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
+		INSTALL_PROPERTY(name, NAME, init_val, \
+				create_range, min, max)
+
+#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
+		INSTALL_PROPERTY(name, NAME, init_val, \
+				create_enum, name##_prop_enum_list, \
+				ARRAY_SIZE(name##_prop_enum_list))
+
+	INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
+
+	mdp5_plane_install_rotation_property(dev, plane);
+
+#undef INSTALL_RANGE_PROPERTY
+#undef INSTALL_ENUM_PROPERTY
+#undef INSTALL_PROPERTY
+}
+
+static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
+		struct drm_plane_state *state, struct drm_property *property,
+		uint64_t val)
+{
+	struct drm_device *dev = plane->dev;
+	struct mdp5_plane_state *pstate;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	pstate = to_mdp5_plane_state(state);
+
+#define SET_PROPERTY(name, NAME, type) do { \
+		if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+			pstate->name = (type)val; \
+			DBG("Set property %s %d", #name, (type)val); \
+			goto done; \
+		} \
+	} while (0)
+
+	SET_PROPERTY(zpos, ZPOS, uint8_t);
+
+	dev_err(dev->dev, "Invalid property\n");
+	ret = -EINVAL;
+done:
+	return ret;
+#undef SET_PROPERTY
+}
+
+static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
+		const struct drm_plane_state *state,
+		struct drm_property *property, uint64_t *val)
+{
+	struct drm_device *dev = plane->dev;
+	struct mdp5_plane_state *pstate;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	pstate = to_mdp5_plane_state(state);
+
+#define GET_PROPERTY(name, NAME, type) do { \
+		if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+			*val = pstate->name; \
+			DBG("Get property %s %lld", #name, *val); \
+			goto done; \
+		} \
+	} while (0)
+
+	GET_PROPERTY(zpos, ZPOS, uint8_t);
+
+	dev_err(dev->dev, "Invalid property\n");
+	ret = -EINVAL;
+done:
+	return ret;
+#undef SET_PROPERTY
 }
 
 static void mdp5_plane_reset(struct drm_plane *plane)
@@ -88,11 +194,15 @@
 	kfree(to_mdp5_plane_state(plane->state));
 	mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
 
-	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
-		mdp5_state->zpos = 0;
-	} else {
-		mdp5_state->zpos = 1 + drm_plane_index(plane);
-	}
+	/* assign default blend parameters */
+	mdp5_state->alpha = 255;
+	mdp5_state->premultiplied = 0;
+
+	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+		mdp5_state->zpos = STAGE_BASE;
+	else
+		mdp5_state->zpos = STAGE0 + drm_plane_index(plane);
+
 	mdp5_state->base.plane = plane;
 
 	plane->state = &mdp5_state->base;
@@ -131,7 +241,9 @@
 		.update_plane = drm_atomic_helper_update_plane,
 		.disable_plane = drm_atomic_helper_disable_plane,
 		.destroy = mdp5_plane_destroy,
-		.set_property = mdp5_plane_set_property,
+		.set_property = drm_atomic_helper_plane_set_property,
+		.atomic_set_property = mdp5_plane_atomic_set_property,
+		.atomic_get_property = mdp5_plane_atomic_get_property,
 		.reset = mdp5_plane_reset,
 		.atomic_duplicate_state = mdp5_plane_duplicate_state,
 		.atomic_destroy_state = mdp5_plane_destroy_state,
@@ -164,10 +276,44 @@
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 	struct drm_plane_state *old_state = plane->state;
+	const struct mdp_format *format;
+	bool vflip, hflip;
 
 	DBG("%s: check (%d -> %d)", mdp5_plane->name,
 			plane_enabled(old_state), plane_enabled(state));
 
+	if (plane_enabled(state)) {
+		format = to_mdp_format(msm_framebuffer_format(state->fb));
+		if (MDP_FORMAT_IS_YUV(format) &&
+			!pipe_supports_yuv(mdp5_plane->caps)) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support YUV\n");
+
+			return -EINVAL;
+		}
+
+		if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
+			(((state->src_w >> 16) != state->crtc_w) ||
+			((state->src_h >> 16) != state->crtc_h))) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+				state->src_w >> 16, state->src_h >> 16,
+				state->crtc_w, state->crtc_h);
+
+			return -EINVAL;
+		}
+
+		hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
+		vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
+			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support flip\n");
+
+			return -EINVAL;
+		}
+	}
+
 	if (plane_enabled(state) && plane_enabled(old_state)) {
 		/* we cannot change SMP block configuration during scanout: */
 		bool full_modeset = false;
@@ -346,16 +492,21 @@
 	return 0;
 }
 
-static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scalex_steps(struct drm_plane *plane,
+		uint32_t pixel_format, uint32_t src, uint32_t dest,
 		uint32_t phasex_steps[2])
 {
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	struct device *dev = mdp5_kms->dev->dev;
 	uint32_t phasex_step;
 	unsigned int hsub;
 	int ret;
 
 	ret = calc_phase_step(src, dest, &phasex_step);
-	if (ret)
+	if (ret) {
+		dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
+	}
 
 	hsub = drm_format_horz_chroma_subsampling(pixel_format);
 
@@ -365,16 +516,21 @@
 	return 0;
 }
 
-static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scaley_steps(struct drm_plane *plane,
+		uint32_t pixel_format, uint32_t src, uint32_t dest,
 		uint32_t phasey_steps[2])
 {
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	struct device *dev = mdp5_kms->dev->dev;
 	uint32_t phasey_step;
 	unsigned int vsub;
 	int ret;
 
 	ret = calc_phase_step(src, dest, &phasey_step);
-	if (ret)
+	if (ret) {
+		dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
+	}
 
 	vsub = drm_format_vert_chroma_subsampling(pixel_format);
 
@@ -384,28 +540,38 @@
 	return 0;
 }
 
-static uint32_t get_scalex_config(uint32_t src, uint32_t dest)
+static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample,
+		uint32_t src, uint32_t dest, bool hor)
 {
-	uint32_t filter;
+	uint32_t y_filter =   (src <= dest) ? SCALE_FILTER_CA  : SCALE_FILTER_PCMN;
+	uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */
+			SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	uint32_t value = 0;
 
-	filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) {
+		if (hor)
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter);
+		else
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter);
+	} else if (src != dest) {
+		if (hor)
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter);
+		else
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter);
+	}
 
-	return  MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter)  |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter);
-}
-
-static uint32_t get_scaley_config(uint32_t src, uint32_t dest)
-{
-	uint32_t filter;
-
-	filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
-
-	return  MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter)  |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter);
+	return value;
 }
 
 static int mdp5_plane_mode_set(struct drm_plane *plane,
@@ -416,8 +582,8 @@
 		uint32_t src_w, uint32_t src_h)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct drm_plane_state *pstate = plane->state;
 	struct mdp5_kms *mdp5_kms = get_kms(plane);
-	struct device *dev = mdp5_kms->dev->dev;
 	enum mdp5_pipe pipe = mdp5_plane->pipe;
 	const struct mdp_format *format;
 	uint32_t nplanes, config = 0;
@@ -425,6 +591,7 @@
 	uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,};
 	uint32_t hdecm = 0, vdecm = 0;
 	uint32_t pix_format;
+	bool vflip, hflip;
 	unsigned long flags;
 	int ret;
 
@@ -449,7 +616,7 @@
 
 	/* Request some memory from the SMP: */
 	ret = mdp5_smp_request(mdp5_kms->smp,
-			mdp5_plane->pipe, fb->pixel_format, src_w);
+			mdp5_plane->pipe, format, src_w, false);
 	if (ret)
 		return ret;
 
@@ -461,29 +628,23 @@
 	 */
 	mdp5_smp_configure(mdp5_kms->smp, pipe);
 
+	ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
+	if (ret)
+		return ret;
+
+	ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
+	if (ret)
+		return ret;
+
+	/* TODO calc hdecm, vdecm */
+
 	/* SCALE is used to both scale and up-sample chroma components */
+	config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true);
+	config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false);
+	DBG("scale config = %x", config);
 
-	if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) {
-		/* TODO calc hdecm */
-		ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step);
-		if (ret) {
-			dev_err(dev, "X scaling (%d -> %d) failed: %d\n",
-					src_w, crtc_w, ret);
-			return ret;
-		}
-		config |= get_scalex_config(src_w, crtc_w);
-	}
-
-	if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) {
-		/* TODO calc vdecm */
-		ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step);
-		if (ret) {
-			dev_err(dev, "Y scaling (%d -> %d) failed: %d\n",
-					src_h, crtc_h, ret);
-			return ret;
-		}
-		config |= get_scaley_config(src_h, crtc_h);
-	}
+	hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
+	vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
 
 	spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 
@@ -516,7 +677,7 @@
 			MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
 			MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
 			COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
-			MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) |
+			MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
 			MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
@@ -526,29 +687,35 @@
 			MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+			(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+			(vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
 			MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
 
 	/* not using secure mode: */
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
 
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
-			phasex_step[0]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
-			phasey_step[0]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
-			phasex_step[1]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
-			phasey_step[1]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
-			MDP5_PIPE_DECIMATION_VERT(vdecm) |
-			MDP5_PIPE_DECIMATION_HORZ(hdecm));
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+	if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+				phasex_step[0]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+				phasey_step[0]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+				phasex_step[1]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+				phasey_step[1]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+				MDP5_PIPE_DECIMATION_VERT(vdecm) |
+				MDP5_PIPE_DECIMATION_HORZ(hdecm));
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+	}
 
-	if (MDP_FORMAT_IS_YUV(format))
-		csc_enable(mdp5_kms, pipe,
-				mdp_get_default_csc_cfg(CSC_YUV2RGB));
-	else
-		csc_disable(mdp5_kms, pipe);
+	if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+		if (MDP_FORMAT_IS_YUV(format))
+			csc_enable(mdp5_kms, pipe,
+					mdp_get_default_csc_cfg(CSC_YUV2RGB));
+		else
+			csc_disable(mdp5_kms, pipe);
+	}
 
 	set_scanout_locked(plane, fb);
 
@@ -599,7 +766,8 @@
 
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
+		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
+		uint32_t caps)
 {
 	struct drm_plane *plane = NULL;
 	struct mdp5_plane *mdp5_plane;
@@ -616,9 +784,11 @@
 
 	mdp5_plane->pipe = pipe;
 	mdp5_plane->name = pipe2name(pipe);
+	mdp5_plane->caps = caps;
 
-	mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
-			ARRAY_SIZE(mdp5_plane->formats));
+	mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+		ARRAY_SIZE(mdp5_plane->formats),
+		!pipe_supports_yuv(mdp5_plane->caps));
 
 	mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
 	mdp5_plane->reg_offset = reg_offset;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 64a27d8..563cca9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -90,6 +90,8 @@
 struct mdp5_smp {
 	struct drm_device *dev;
 
+	const struct mdp5_smp_block *cfg;
+
 	int blk_cnt;
 	int blk_size;
 
@@ -137,14 +139,12 @@
 		u32 cid, int nblks)
 {
 	struct mdp5_kms *mdp5_kms = get_kms(smp);
-	const struct mdp5_cfg_hw *hw_cfg;
 	struct mdp5_client_smp_state *ps = &smp->client_state[cid];
 	int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
 	int reserved;
 	unsigned long flags;
 
-	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-	reserved = hw_cfg->smp.reserved[cid];
+	reserved = smp->cfg->reserved[cid];
 
 	spin_lock_irqsave(&smp->state_lock, flags);
 
@@ -209,12 +209,14 @@
  * decimated width.  Ie. SMP buffering sits downstream of decimation (which
  * presumably happens during the dma from scanout buffer).
  */
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+		const struct mdp_format *format, u32 width, bool hdecim)
 {
 	struct mdp5_kms *mdp5_kms = get_kms(smp);
 	struct drm_device *dev = mdp5_kms->dev;
 	int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
 	int i, hsub, nplanes, nlines, nblks, ret;
+	u32 fmt = format->base.pixel_format;
 
 	nplanes = drm_format_num_planes(fmt);
 	hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -222,6 +224,21 @@
 	/* different if BWC (compressed framebuffer?) enabled: */
 	nlines = 2;
 
+	/* Newer MDPs have split/packing logic, which fetches sub-sampled
+	 * U and V components (splits them from Y if necessary) and packs
+	 * them together, writes to SMP using a single client.
+	 */
+	if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
+		fmt = DRM_FORMAT_NV24;
+		nplanes = 2;
+
+		/* if decimation is enabled, HW decimates less on the
+		 * sub sampled chroma components
+		 */
+		if (hdecim && (hsub > 1))
+			hsub = 1;
+	}
+
 	for (i = 0, nblks = 0; i < nplanes; i++) {
 		int n, fetch_stride, cpp;
 
@@ -388,6 +405,7 @@
 	}
 
 	smp->dev = dev;
+	smp->cfg = cfg;
 	smp->blk_cnt = cfg->mmb_count;
 	smp->blk_size = cfg->mmb_size;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 5b6c236..20b87e8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -39,7 +39,8 @@
 struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
 void  mdp5_smp_destroy(struct mdp5_smp *smp);
 
-int  mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+int  mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+		const struct mdp_format *format, u32 width, bool hdecim);
 void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 641d036..4f792c4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,7 +46,7 @@
 
 
 enum mdp_chroma_samp_type {
-	CHROMA_RGB = 0,
+	CHROMA_FULL = 0,
 	CHROMA_H2V1 = 1,
 	CHROMA_H1V2 = 2,
 	CHROMA_420 = 3,
@@ -65,6 +65,10 @@
 	STAGE1 = 3,
 	STAGE2 = 4,
 	STAGE3 = 5,
+	STAGE4 = 6,
+	STAGE5 = 7,
+	STAGE6 = 8,
+	STAGE_MAX = 8,
 };
 
 enum mdp_alpha_type {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 7b0524d..1c2caff 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -71,7 +71,7 @@
 	},
 };
 
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
 		.base = { .pixel_format = DRM_FORMAT_ ## name }, \
 		.bpc_a = BPC ## a ## A,                          \
 		.bpc_r = BPC ## r,                               \
@@ -83,7 +83,8 @@
 		.cpp = c,                                        \
 		.unpack_count = cnt,                             \
 		.fetch_type = fp,                                \
-		.chroma_sample = cs                              \
+		.chroma_sample = cs,                             \
+		.is_yuv = yuv,                                   \
 }
 
 #define BPC0A 0
@@ -95,30 +96,49 @@
 static const struct mdp_format formats[] = {
 	/*  name      a  r  g  b   e0 e1 e2 e3  alpha   tight  cpp cnt ... */
 	FMT(ARGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(ABGR8888, 8, 8, 8, 8,  2, 0, 1, 3,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGBA8888, 8, 8, 8, 8,  3, 1, 0, 2,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGRA8888, 8, 8, 8, 8,  3, 2, 0, 1,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(XRGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  false,  true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGB888,   0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  3,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGR888,   0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  3,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGB565,   0, 5, 6, 5,  1, 0, 2, 0,  false,  true,  2,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGR565,   0, 5, 6, 5,  2, 0, 1, 0,  false,  true,  2,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 
 	/* --- RGB formats above / YUV formats below this line --- */
 
+	/* 2 plane YUV */
 	FMT(NV12,     0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  2, 2,
-			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
 	FMT(NV21,     0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  2, 2,
-			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+	FMT(NV16,     0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  2, 2,
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+	FMT(NV61,     0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  2, 2,
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+	/* 1 plane YUV */
+	FMT(VYUY,     0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(UYVY,     0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(YUYV,     0, 8, 8, 8,  0, 1, 0, 2,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(YVYU,     0, 8, 8, 8,  0, 2, 0, 1,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	/* 3 plane YUV */
+	FMT(YUV420,   0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  1, 1,
+			MDP_PLANE_PLANAR, CHROMA_420, true),
+	FMT(YVU420,   0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  1, 1,
+			MDP_PLANE_PLANAR, CHROMA_420, true),
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 1988c24..6428730 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -39,7 +39,8 @@
 	list_for_each_entry(irq, &mdp_kms->irq_list, node)
 		irqmask |= irq->irqmask;
 
-	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
+	mdp_kms->cur_irq_mask = irqmask;
 }
 
 /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 2d3428c..46a94e7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -30,7 +30,8 @@
 
 struct mdp_kms_funcs {
 	struct msm_kms_funcs base;
-	void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+	void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 };
 
 struct mdp_kms {
@@ -42,6 +43,7 @@
 	bool in_irq;
 	struct list_head irq_list;    /* list of mdp4_irq */
 	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+	uint32_t cur_irq_mask;        /* current irq mask */
 };
 #define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
 
@@ -90,13 +92,27 @@
 	uint8_t cpp, unpack_count;
 	enum mdp_fetch_type fetch_type;
 	enum mdp_chroma_samp_type chroma_sample;
+	bool is_yuv;
 };
 #define to_mdp_format(x) container_of(x, struct mdp_format, base)
-#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
 
 uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
 const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
 
+/* MDP pipe capabilities */
+#define MDP_PIPE_CAP_HFLIP			BIT(0)
+#define MDP_PIPE_CAP_VFLIP			BIT(1)
+#define MDP_PIPE_CAP_SCALE			BIT(2)
+#define MDP_PIPE_CAP_CSC			BIT(3)
+#define MDP_PIPE_CAP_DECIMATION			BIT(4)
+
+static inline bool pipe_supports_yuv(uint32_t pipe_caps)
+{
+	return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
+		(pipe_caps & MDP_PIPE_CAP_CSC);
+}
+
 enum csc_type {
 	CSC_RGB2RGB = 0,
 	CSC_YUV2RGB,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d3467b1..0339c5d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -116,6 +116,65 @@
 	return val;
 }
 
+struct vblank_event {
+	struct list_head node;
+	int crtc_id;
+	bool enable;
+};
+
+static void vblank_ctrl_worker(struct work_struct *work)
+{
+	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
+						struct msm_vblank_ctrl, work);
+	struct msm_drm_private *priv = container_of(vbl_ctrl,
+					struct msm_drm_private, vblank_ctrl);
+	struct msm_kms *kms = priv->kms;
+	struct vblank_event *vbl_ev, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+		if (vbl_ev->enable)
+			kms->funcs->enable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+		else
+			kms->funcs->disable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+
+		kfree(vbl_ev);
+
+		spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	}
+
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+}
+
+static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
+					int crtc_id, bool enable)
+{
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev;
+	unsigned long flags;
+
+	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
+	if (!vbl_ev)
+		return -ENOMEM;
+
+	vbl_ev->crtc_id = crtc_id;
+	vbl_ev->enable = enable;
+
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+	queue_work(priv->wq, &vbl_ctrl->work);
+
+	return 0;
+}
+
 /*
  * DRM operations:
  */
@@ -125,6 +184,18 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_gpu *gpu = priv->gpu;
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev, *tmp;
+
+	/* We must cancel and cleanup any pending vblank enable/disable
+	 * work before drm_irq_uninstall() to avoid work re-enabling an
+	 * irq after uninstall has disabled it.
+	 */
+	cancel_work_sync(&vbl_ctrl->work);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		kfree(vbl_ev);
+	}
 
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
@@ -282,6 +353,9 @@
 
 	INIT_LIST_HEAD(&priv->inactive_list);
 	INIT_LIST_HEAD(&priv->fence_cbs);
+	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(dev);
 
@@ -331,10 +405,6 @@
 		}
 	}
 
-	dev->mode_config.min_width = 0;
-	dev->mode_config.min_height = 0;
-	dev->mode_config.max_width = 2048;
-	dev->mode_config.max_height = 2048;
 	dev->mode_config.funcs = &mode_config_funcs;
 
 	ret = drm_vblank_init(dev, priv->num_crtcs);
@@ -468,7 +538,7 @@
 	if (!kms)
 		return -ENXIO;
 	DBG("dev=%p, crtc=%d", dev, crtc_id);
-	return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+	return vblank_ctrl_queue_work(priv, crtc_id, true);
 }
 
 static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
@@ -478,7 +548,7 @@
 	if (!kms)
 		return;
 	DBG("dev=%p, crtc=%d", dev, crtc_id);
-	kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+	vblank_ctrl_queue_work(priv, crtc_id, false);
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4ff0ec9..3be7a56 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -30,6 +30,7 @@
 #include <linux/list.h>
 #include <linux/iommu.h>
 #include <linux/types.h>
+#include <linux/of_graph.h>
 #include <asm/sizes.h>
 
 #ifndef CONFIG_OF
@@ -64,6 +65,19 @@
 	int dummy;
 };
 
+enum msm_mdp_plane_property {
+	PLANE_PROP_ZPOS,
+	PLANE_PROP_ALPHA,
+	PLANE_PROP_PREMULTIPLIED,
+	PLANE_PROP_MAX_NUM
+};
+
+struct msm_vblank_ctrl {
+	struct work_struct work;
+	struct list_head event_list;
+	spinlock_t lock;
+};
+
 struct msm_drm_private {
 
 	struct msm_kms *kms;
@@ -128,6 +142,9 @@
 	unsigned int num_connectors;
 	struct drm_connector *connectors[8];
 
+	/* Properties */
+	struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+
 	/* VRAM carveout, used when no IOMMU: */
 	struct {
 		unsigned long size;
@@ -137,6 +154,8 @@
 		 */
 		struct drm_mm mm;
 	} vram;
+
+	struct msm_vblank_ctrl vblank_ctrl;
 };
 
 struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 95f6532..f97a196 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -43,11 +43,11 @@
 	/* Note: to properly handle manual update displays, we wrap the
 	 * basic fbdev ops which write to the framebuffer
 	 */
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_read = drm_fb_helper_sys_read,
+	.fb_write = drm_fb_helper_sys_write,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_mmap = msm_fbdev_mmap,
 
 	.fb_check_var = drm_fb_helper_check_var,
@@ -144,10 +144,10 @@
 		goto fail_unlock;
 	}
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
 		dev_err(dev->dev, "failed to allocate fb info\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(fbi);
 		goto fail_unlock;
 	}
 
@@ -155,7 +155,6 @@
 
 	fbdev->fb = fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_DEFAULT;
@@ -163,12 +162,6 @@
 
 	strcpy(fbi->fix.id, "msm");
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto fail_unlock;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -191,7 +184,6 @@
 fail:
 
 	if (ret) {
-		framebuffer_release(fbi);
 		if (fb) {
 			drm_framebuffer_unregister_private(fb);
 			drm_framebuffer_remove(fb);
@@ -266,17 +258,11 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_fb_helper *helper = priv->fbdev;
 	struct msm_fbdev *fbdev;
-	struct fb_info *fbi;
 
 	DBG();
 
-	fbi = helper->fbdev;
-
-	/* only cleanup framebuffer if it is present */
-	if (fbi) {
-		unregister_framebuffer(fbi);
-		framebuffer_release(fbi);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	drm_fb_helper_fini(helper);
 
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2b76566..a34b437 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -18,7 +18,6 @@
 ifdef CONFIG_X86
 nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
 endif
-nouveau-y += nouveau_agp.o
 nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
 nouveau-y += nouveau_drm.o
 nouveau-y += nouveau_hwmon.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index c636142..82bd465 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,7 +198,7 @@
 		int *burst, int *lwm)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nv_fifo_info fifo_data;
 	struct nv_sim_state sim_data;
 	int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index af7249c..78cb033 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@
 
 static int sample_load_twice(struct drm_device *dev, bool sense[2])
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_object *device = &drm->device.object;
 	int i;
 
 	for (i = 0; i < 2; i++) {
@@ -80,17 +80,22 @@
 		 * use a 10ms timeout (guards against crtc being inactive, in
 		 * which case blank state would never change)
 		 */
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (nvif_msec(&drm->device, 10,
+			if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000001))
+
+		if (nvif_msec(&drm->device, 10,
+			if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+
+		if (nvif_msec(&drm->device, 10,
+			if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
 
 		udelay(100);
@@ -128,7 +133,7 @@
 						 struct drm_connector *connector)
 {
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
 	uint8_t saved_palette0[3], saved_palette_mask;
@@ -231,8 +236,8 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &nouveau_drm(dev)->device;
-	struct nvkm_gpio *gpio = nvxx_gpio(device);
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
+	struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
 	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
 	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -265,10 +270,10 @@
 	}
 
 	if (gpio) {
-		saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
-		saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
+		saved_gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+		saved_gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
 	}
 
 	msleep(4);
@@ -320,8 +325,8 @@
 	nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
 
 	if (gpio) {
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
 	}
 
 	return sample;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7cfb0cb..429ab5e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@
 			      struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -485,7 +485,7 @@
 {
 #ifdef __powerpc__
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 
 	/* BIOS scripts usually take care of the backlight, thanks
 	 * Apple for your consistency.
@@ -493,11 +493,11 @@
 	if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
 	    dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
 		if (mode == DRM_MODE_DPMS_ON) {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
-			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
+			nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
+			nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
 		} else {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
-			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
+			nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+			nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
 		}
 	}
 #endif
@@ -624,8 +624,8 @@
 	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *port = i2c->find(i2c, 2);
-	struct nvkm_i2c_board_info info[] = {
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+	struct nvkm_i2c_bus_probe info[] = {
 		{
 		    {
 		        .type = "sil164",
@@ -639,16 +639,15 @@
 	};
 	int type;
 
-	if (!nv_gf4_disp_arch(dev) || !port ||
-	    get_tmds_slave(encoder))
+	if (!nv_gf4_disp_arch(dev) || !bus || get_tmds_slave(encoder))
 		return;
 
-	type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
+	type = nvkm_i2c_bus_probe(bus, "TMDS transmitter", info, NULL, NULL);
 	if (type < 0)
 		return;
 
 	drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-			     &port->adapter, &info[type].dev);
+			     &bus->i2c, &info[type].dev);
 }
 
 static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4131be55..9e65008 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -47,7 +47,7 @@
 	if (!disp)
 		return -ENOMEM;
 
-	nvif_object_map(nvif_object(&drm->device));
+	nvif_object_map(&drm->device.object);
 
 	nouveau_display(dev)->priv = disp;
 	nouveau_display(dev)->dtor = nv04_display_destroy;
@@ -101,7 +101,9 @@
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-		nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+		struct nvkm_i2c_bus *bus =
+			nvkm_i2c_bus_find(i2c, nv_encoder->dcb->i2c_index);
+		nv_encoder->i2c = bus ? &bus->i2c : NULL;
 	}
 
 	/* Save previous state */
@@ -151,7 +153,7 @@
 	nouveau_display(dev)->priv = NULL;
 	kfree(disp);
 
-	nvif_object_unmap(nvif_object(&drm->device));
+	nvif_object_unmap(&drm->device.object);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index c910c5d..6c9a1e8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -172,7 +172,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_bios *bios = nvxx_bios(&drm->device);
 	struct nvbios_init init = {
-		.subdev = nv_subdev(bios),
+		.subdev = &bios->subdev,
 		.bios = bios,
 		.offset = table,
 		.outp = outp,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 42e07af..956a833 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -165,8 +165,8 @@
 		       struct nvkm_pll_vals *pllvals)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
-	struct nvkm_bios *bios = nvxx_bios(device);
+	struct nvif_object *device = &drm->device.object;
+	struct nvkm_bios *bios = nvxx_bios(&drm->device);
 	uint32_t reg1, pll1, pll2 = 0;
 	struct nvbios_pll pll_lim;
 	int ret;
@@ -660,8 +660,7 @@
 		  struct nv04_mode_state *state)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
+	struct nvif_object *device = &drm->device.object;
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	uint32_t reg900;
 	int i;
@@ -678,10 +677,10 @@
 		nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
 		nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
 		nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
-		nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1);
 		nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
 
 		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -741,8 +740,14 @@
 		if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
-			nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
-			nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+			nvif_msec(&drm->device, 650,
+				if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+					break;
+			);
+			nvif_msec(&drm->device, 650,
+				if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+					break;
+			);
 		}
 
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -765,7 +770,7 @@
 nv_save_state_palette(struct drm_device *dev, int head,
 		      struct nv04_mode_state *state)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
 	nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -784,7 +789,7 @@
 nouveau_hw_load_state_palette(struct drm_device *dev, int head,
 			      struct nv04_mode_state *state)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
 	nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 6c79617..3bded60 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,7 +60,7 @@
 static inline uint32_t NVReadCRTC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint32_t val;
 	if (head)
 		reg += NV_PCRTC0_SIZE;
@@ -71,7 +71,7 @@
 static inline void NVWriteCRTC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (head)
 		reg += NV_PCRTC0_SIZE;
 	nvif_wr32(device, reg, val);
@@ -80,7 +80,7 @@
 static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint32_t val;
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
@@ -91,7 +91,7 @@
 static inline void NVWriteRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
 	nvif_wr32(device, reg, val);
@@ -120,7 +120,7 @@
 static inline void NVWriteVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
 	nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
 }
@@ -128,7 +128,7 @@
 static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint8_t val;
 	nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
 	val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
@@ -165,7 +165,7 @@
 static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t val;
 
@@ -181,7 +181,7 @@
 static inline void NVWritePRMVIO(struct drm_device *dev,
 					int head, uint32_t reg, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
@@ -194,14 +194,14 @@
 
 static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
 	nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
 }
 
 static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
 	return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
 }
@@ -209,7 +209,7 @@
 static inline void NVWriteVgaAttr(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
 	else
@@ -223,7 +223,7 @@
 static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
 					int head, uint8_t index)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint8_t val;
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
@@ -259,7 +259,7 @@
 static inline bool
 nv_heads_tied(struct drm_device *dev)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (drm->device.info.chipset == 0x11)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 9f24985..aeebdd4 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,8 @@
 		  uint32_t src_x, uint32_t src_y,
 		  uint32_t src_w, uint32_t src_h)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nouveau_drm *drm = nouveau_drm(plane->dev);
+	struct nvif_object *dev = &drm->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -118,7 +119,7 @@
 	if (format > 0xffff)
 		return -ERANGE;
 
-	if (dev->info.chipset >= 0x30) {
+	if (drm->device.info.chipset >= 0x30) {
 		if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
 			return -ERANGE;
 	} else {
@@ -173,7 +174,7 @@
 static int
 nv10_disable_plane(struct drm_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 
@@ -197,7 +198,7 @@
 static void
 nv10_set_params(struct nouveau_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object;
 	u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
 	u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
 		(cos_mul(plane->hue, plane->saturation) & 0xffff);
@@ -261,7 +262,7 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(device);
 	struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
-	int num_formats = ARRAY_SIZE(formats);
+	unsigned int num_formats = ARRAY_SIZE(formats);
 	int ret;
 
 	if (!plane)
@@ -346,7 +347,7 @@
 		  uint32_t src_x, uint32_t src_y,
 		  uint32_t src_w, uint32_t src_h)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -426,7 +427,7 @@
 static int
 nv04_disable_plane(struct drm_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 70e95cf..5345eb5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,7 +35,7 @@
 
 #include <drm/i2c/ch7006.h>
 
-static struct nvkm_i2c_board_info nv04_tv_encoder_info[] = {
+static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
 	{
 		{
 			I2C_BOARD_INFO("ch7006", 0x75),
@@ -55,9 +55,13 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-
-	return i2c->identify(i2c, i2c_index, "TV encoder",
-			     nv04_tv_encoder_info, NULL, NULL);
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
+	if (bus) {
+		return nvkm_i2c_bus_probe(bus, "TV encoder",
+					  nv04_tv_encoder_info,
+					  NULL, NULL);
+	}
+	return -ENODEV;
 }
 
 
@@ -205,7 +209,7 @@
 	struct drm_device *dev = connector->dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *port = i2c->find(i2c, entry->i2c_index);
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
 	int type, ret;
 
 	/* Ensure that we can talk to this encoder */
@@ -231,7 +235,7 @@
 
 	/* Run the slave-specific initialization */
 	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-				   &port->adapter,
+				   &bus->i2c,
 				   &nv04_tv_encoder_info[type].dev);
 	if (ret < 0)
 		goto fail_cleanup;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index d9720dd..b734195 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -62,8 +62,8 @@
 	head = (dacclk & 0x100) >> 8;
 
 	/* Save the previous state. */
-	gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
-	gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+	gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+	gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
 	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
 	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
 	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -74,8 +74,8 @@
 	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
 
 	/* Prepare the DAC for load detection.  */
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
 
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -120,8 +120,8 @@
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
 
 	return sample;
 }
@@ -130,18 +130,10 @@
 get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvkm_device *device = nvxx_device(&drm->device);
 
-	/* Zotac FX5200 */
-	if (nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x1035) ||
-	    nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x2035)) {
-		*pin_mask = 0xc;
-		return false;
-	}
-
-	/* MSI nForce2 IGP */
-	if (nv_device_match(nvxx_object(device), 0x01f0, 0x1462, 0x5710)) {
-		*pin_mask = 0xc;
+	if (device->quirk && device->quirk->tv_pin_mask) {
+		*pin_mask = device->quirk->tv_pin_mask;
 		return false;
 	}
 
@@ -395,8 +387,8 @@
 
 	nv_load_ptv(dev, regs, 200);
 
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
 
 	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 225894c..459910b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -131,13 +131,13 @@
 				uint32_t val)
 {
 	struct nvif_device *device = &nouveau_drm(dev)->device;
-	nvif_wr32(device, reg, val);
+	nvif_wr32(&device->object, reg, val);
 }
 
 static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
 {
 	struct nvif_device *device = &nouveau_drm(dev)->device;
-	return nvif_rd32(device, reg);
+	return nvif_rd32(&device->object, reg);
 }
 
 static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 64f8b2f..95a64d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -45,6 +45,11 @@
 #define GM107_DISP                                                   0x00009470
 #define GM204_DISP                                                   0x00009570
 
+#define NV31_MPEG                                                    0x00003174
+#define G82_MPEG                                                     0x00008274
+
+#define NV74_VP2                                                     0x00007476
+
 #define NV50_DISP_CURSOR                                             0x0000507a
 #define G82_DISP_CURSOR                                              0x0000827a
 #define GT214_DISP_CURSOR                                            0x0000857a
@@ -94,15 +99,40 @@
 #define MAXWELL_A                                                    0x0000b097
 #define MAXWELL_B                                                    0x0000b197
 
+#define NV74_BSP                                                     0x000074b0
+
+#define GT212_MSVLD                                                  0x000085b1
+#define IGT21A_MSVLD                                                 0x000086b1
+#define G98_MSVLD                                                    0x000088b1
+#define GF100_MSVLD                                                  0x000090b1
+#define GK104_MSVLD                                                  0x000095b1
+
+#define GT212_MSPDEC                                                 0x000085b2
+#define G98_MSPDEC                                                   0x000088b2
+#define GF100_MSPDEC                                                 0x000090b2
+#define GK104_MSPDEC                                                 0x000095b2
+
+#define GT212_MSPPP                                                  0x000085b3
+#define G98_MSPPP                                                    0x000088b3
+#define GF100_MSPPP                                                  0x000090b3
+
+#define G98_SEC                                                      0x000088b4
+
+#define GT212_DMA                                                    0x000085b5
+#define FERMI_DMA                                                    0x000090b5
+#define KEPLER_DMA_COPY_A                                            0x0000a0b5
+#define MAXWELL_DMA_COPY_A                                           0x0000b0b5
+
+#define FERMI_DECOMPRESS                                             0x000090b8
+
 #define FERMI_COMPUTE_A                                              0x000090c0
 #define FERMI_COMPUTE_B                                              0x000091c0
-
 #define KEPLER_COMPUTE_A                                             0x0000a0c0
 #define KEPLER_COMPUTE_B                                             0x0000a1c0
-
 #define MAXWELL_COMPUTE_A                                            0x0000b0c0
 #define MAXWELL_COMPUTE_B                                            0x0000b1c0
 
+#define NV74_CIPHER                                                  0x000074c1
 
 /*******************************************************************************
  * client
@@ -126,32 +156,10 @@
 	__u8  version;
 	__u8  pad01[7];
 	__u64 device;	/* device identifier, ~0 for client default */
-#define NV_DEVICE_V0_DISABLE_IDENTIFY                     0x0000000000000001ULL
-#define NV_DEVICE_V0_DISABLE_MMIO                         0x0000000000000002ULL
-#define NV_DEVICE_V0_DISABLE_VBIOS                        0x0000000000000004ULL
-#define NV_DEVICE_V0_DISABLE_CORE                         0x0000000000000008ULL
-#define NV_DEVICE_V0_DISABLE_DISP                         0x0000000000010000ULL
-#define NV_DEVICE_V0_DISABLE_FIFO                         0x0000000000020000ULL
-#define NV_DEVICE_V0_DISABLE_GR                           0x0000000100000000ULL
-#define NV_DEVICE_V0_DISABLE_MPEG                         0x0000000200000000ULL
-#define NV_DEVICE_V0_DISABLE_ME                           0x0000000400000000ULL
-#define NV_DEVICE_V0_DISABLE_VP                           0x0000000800000000ULL
-#define NV_DEVICE_V0_DISABLE_CIPHER                       0x0000001000000000ULL
-#define NV_DEVICE_V0_DISABLE_BSP                          0x0000002000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPPP                        0x0000004000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE0                          0x0000008000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE1                          0x0000010000000000ULL
-#define NV_DEVICE_V0_DISABLE_VIC                          0x0000020000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSENC                        0x0000040000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE2                          0x0000080000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSVLD                        0x0000100000000000ULL
-#define NV_DEVICE_V0_DISABLE_SEC                          0x0000200000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPDEC                       0x0000400000000000ULL
-	__u64 disable;	/* disable particular subsystems */
-	__u64 debug0;	/* as above, but *internal* ids, and *NOT* ABI */
 };
 
 #define NV_DEVICE_V0_INFO                                                  0x00
+#define NV_DEVICE_V0_TIME                                                  0x01
 
 struct nv_device_info_v0 {
 	__u8  version;
@@ -176,6 +184,14 @@
 	__u8  pad06[2];
 	__u64 ram_size;
 	__u64 ram_user;
+	char  chip[16];
+	char  name[64];
+};
+
+struct nv_device_time_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 time;
 };
 
 
@@ -235,13 +251,13 @@
 	__u8  pad03[5];
 };
 
-struct gf110_dma_v0 {
+struct gf119_dma_v0 {
 	__u8  version;
-#define GF110_DMA_V0_PAGE_LP                                               0x00
-#define GF110_DMA_V0_PAGE_SP                                               0x01
+#define GF119_DMA_V0_PAGE_LP                                               0x00
+#define GF119_DMA_V0_PAGE_SP                                               0x01
 	__u8  page;
-#define GF110_DMA_V0_KIND_PITCH                                            0x00
-#define GF110_DMA_V0_KIND_VM                                               0xff
+#define GF119_DMA_V0_KIND_PITCH                                            0x00
+#define GF119_DMA_V0_KIND_VM                                               0xff
 	__u8  kind;
 	__u8  pad03[5];
 };
@@ -251,33 +267,74 @@
  * perfmon
  ******************************************************************************/
 
-struct nvif_perfctr_v0 {
-	__u8  version;
-	__u8  pad01[1];
-	__u16 logic_op;
-	__u8  pad04[4];
-	char  name[4][64];
-};
+#define NVIF_PERFMON_V0_QUERY_DOMAIN                                       0x00
+#define NVIF_PERFMON_V0_QUERY_SIGNAL                                       0x01
+#define NVIF_PERFMON_V0_QUERY_SOURCE                                       0x02
 
-#define NVIF_PERFCTR_V0_QUERY                                              0x00
-#define NVIF_PERFCTR_V0_SAMPLE                                             0x01
-#define NVIF_PERFCTR_V0_READ                                               0x02
-
-struct nvif_perfctr_query_v0 {
+struct nvif_perfmon_query_domain_v0 {
 	__u8  version;
-	__u8  pad01[3];
-	__u32 iter;
+	__u8  id;
+	__u8  counter_nr;
+	__u8  iter;
+	__u16 signal_nr;
+	__u8  pad05[2];
 	char  name[64];
 };
 
-struct nvif_perfctr_sample {
+struct nvif_perfmon_query_signal_v0 {
+	__u8  version;
+	__u8  domain;
+	__u16 iter;
+	__u8  signal;
+	__u8  source_nr;
+	__u8  pad05[2];
+	char  name[64];
 };
 
-struct nvif_perfctr_read_v0 {
+struct nvif_perfmon_query_source_v0 {
+	__u8  version;
+	__u8  domain;
+	__u8  signal;
+	__u8  iter;
+	__u8  pad04[4];
+	__u32 source;
+	__u32 mask;
+	char  name[64];
+};
+
+
+/*******************************************************************************
+ * perfdom
+ ******************************************************************************/
+
+struct nvif_perfdom_v0 {
+	__u8  version;
+	__u8  domain;
+	__u8  mode;
+	__u8  pad03[1];
+	struct {
+		__u8  signal[4];
+		__u64 source[4][8];
+		__u16 logic_op;
+	} ctr[4];
+};
+
+#define NVIF_PERFDOM_V0_INIT                                               0x00
+#define NVIF_PERFDOM_V0_SAMPLE                                             0x01
+#define NVIF_PERFDOM_V0_READ                                               0x02
+
+struct nvif_perfdom_init {
+};
+
+struct nvif_perfdom_sample {
+};
+
+struct nvif_perfdom_read_v0 {
 	__u8  version;
 	__u8  pad01[7];
-	__u32 ctr;
+	__u32 ctr[4];
 	__u32 clk;
+	__u8  pad04[4];
 };
 
 
@@ -337,7 +394,16 @@
 	__u8  version;
 	__u8  chid;
 	__u8  pad02[2];
-	__u32 pushbuf;
+	__u32 offset;
+	__u64 pushbuf;
+};
+
+struct nv50_channel_dma_v0 {
+	__u8  version;
+	__u8  chid;
+	__u8  pad02[6];
+	__u64 vm;
+	__u64 pushbuf;
 	__u64 offset;
 };
 
@@ -350,10 +416,20 @@
 struct nv50_channel_gpfifo_v0 {
 	__u8  version;
 	__u8  chid;
-	__u8  pad01[6];
-	__u32 pushbuf;
+	__u8  pad02[2];
 	__u32 ilength;
 	__u64 ioffset;
+	__u64 pushbuf;
+	__u64 vm;
+};
+
+struct fermi_channel_gpfifo_v0 {
+	__u8  version;
+	__u8  chid;
+	__u8  pad02[2];
+	__u32 ilength;
+	__u64 ioffset;
+	__u64 vm;
 };
 
 struct kepler_channel_gpfifo_a_v0 {
@@ -367,10 +443,9 @@
 #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC                              0x40
 	__u8  engine;
 	__u16 chid;
-	__u8  pad04[4];
-	__u32 pushbuf;
 	__u32 ilength;
 	__u64 ioffset;
+	__u64 vm;
 };
 
 /*******************************************************************************
@@ -491,8 +566,8 @@
 /* core */
 struct nv50_disp_core_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[3];
-	__u32 pushbuf;
+	__u8  pad01[7];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT                          0x00
@@ -509,9 +584,9 @@
 /* base */
 struct nv50_disp_base_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[2];
 	__u8  head;
-	__u32 pushbuf;
+	__u8  pad02[6];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT                          0x00
@@ -519,9 +594,9 @@
 /* overlay */
 struct nv50_disp_overlay_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[2];
 	__u8  head;
-	__u32 pushbuf;
+	__u8  pad02[6];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT                       0x00
@@ -536,6 +611,20 @@
 #define NV50_DISP_OVERLAY_V0_NTFY_UEVENT                                   0x00
 
 /*******************************************************************************
+ * software
+ ******************************************************************************/
+
+#define NVSW_NTFY_UEVENT                                                   0x00
+
+#define NV04_NVSW_GET_REF                                                  0x00
+
+struct nv04_nvsw_get_ref_v0 {
+	__u8  version;
+	__u8  pad01[3];
+	__u32 ref;
+};
+
+/*******************************************************************************
  * fermi
  ******************************************************************************/
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index eca648e..4a7f6f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -4,36 +4,25 @@
 #include <nvif/object.h>
 
 struct nvif_client {
-	struct nvif_object base;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
+	struct nvif_object object;
 	const struct nvif_driver *driver;
+	u64 version;
+	u8 route;
 	bool super;
 };
 
-static inline struct nvif_client *
-nvif_client(struct nvif_object *object)
-{
-	while (object && object->parent != object)
-		object = object->parent;
-	return (void *)object;
-}
-
-int  nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
-		      const char *, u64, const char *, const char *,
+int  nvif_client_init(const char *drv, const char *name, u64 device,
+		      const char *cfg, const char *dbg,
 		      struct nvif_client *);
 void nvif_client_fini(struct nvif_client *);
-int  nvif_client_new(const char *, const char *, u64, const char *,
-		     const char *, struct nvif_client **);
-void nvif_client_ref(struct nvif_client *, struct nvif_client **);
 int  nvif_client_ioctl(struct nvif_client *, void *, u32);
 int  nvif_client_suspend(struct nvif_client *);
 int  nvif_client_resume(struct nvif_client *);
 
 /*XXX*/
 #include <core/client.h>
-#define nvxx_client(a) ({ \
-	struct nvif_client *_client = nvif_client(nvif_object(a)); \
-	nvkm_client(_client->base.priv); \
+#define nvxx_client(a) ({                                                      \
+	struct nvif_client *_client = (a);                                     \
+	(struct nvkm_client *)_client->object.priv;                            \
 })
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 88553a7..700a9b2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -5,26 +5,35 @@
 #include <nvif/class.h>
 
 struct nvif_device {
-	struct nvif_object base;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
+	struct nvif_object object;
 	struct nv_device_info_v0 info;
 };
 
-static inline struct nvif_device *
-nvif_device(struct nvif_object *object)
-{
-	while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
-		object = object->parent;
-	return (void *)object;
-}
-
-int  nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
-		      u32 handle, u32 oclass, void *, u32,
+int  nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
 		      struct nvif_device *);
 void nvif_device_fini(struct nvif_device *);
-int  nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
-		     void *, u32, struct nvif_device **);
-void nvif_device_ref(struct nvif_device *, struct nvif_device **);
+u64  nvif_device_time(struct nvif_device *);
+
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ */
+#define nvif_nsec(d,n,cond...) ({                                              \
+	struct nvif_device *_device = (d);                                     \
+	u64 _nsecs = (n), _time0 = nvif_device_time(_device);                  \
+	s64 _taken = 0;                                                        \
+                                                                               \
+	do {                                                                   \
+		cond                                                           \
+	} while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
+                                                                               \
+	if (_taken >= _nsecs)                                                  \
+		_taken = -ETIMEDOUT;                                           \
+	_taken;                                                                \
+})
+#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
+#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
 
 /*XXX*/
 #include <subdev/bios.h>
@@ -36,26 +45,30 @@
 #include <subdev/i2c.h>
 #include <subdev/timer.h>
 #include <subdev/therm.h>
+#include <subdev/pci.h>
 
-#define nvxx_device(a) nv_device(nvxx_object((a)))
-#define nvxx_bios(a) nvkm_bios(nvxx_device(a))
-#define nvxx_fb(a) nvkm_fb(nvxx_device(a))
-#define nvxx_mmu(a) nvkm_mmu(nvxx_device(a))
-#define nvxx_bar(a) nvkm_bar(nvxx_device(a))
-#define nvxx_gpio(a) nvkm_gpio(nvxx_device(a))
-#define nvxx_clk(a) nvkm_clk(nvxx_device(a))
-#define nvxx_i2c(a) nvkm_i2c(nvxx_device(a))
-#define nvxx_timer(a) nvkm_timer(nvxx_device(a))
-#define nvxx_wait(a,b,c,d) nv_wait(nvxx_timer(a), (b), (c), (d))
-#define nvxx_wait_cb(a,b,c) nv_wait_cb(nvxx_timer(a), (b), (c))
-#define nvxx_therm(a) nvkm_therm(nvxx_device(a))
+#define nvxx_device(a) ({                                                      \
+	struct nvif_device *_device = (a);                                     \
+	struct {                                                               \
+		struct nvkm_object object;                                     \
+		struct nvkm_device *device;                                    \
+	} *_udevice = _device->object.priv;                                    \
+	_udevice->device;                                                      \
+})
+#define nvxx_bios(a) nvxx_device(a)->bios
+#define nvxx_fb(a) nvxx_device(a)->fb
+#define nvxx_mmu(a) nvxx_device(a)->mmu
+#define nvxx_bar(a) nvxx_device(a)->bar
+#define nvxx_gpio(a) nvxx_device(a)->gpio
+#define nvxx_clk(a) nvxx_device(a)->clk
+#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_therm(a) nvxx_device(a)->therm
 
 #include <core/device.h>
 #include <engine/fifo.h>
 #include <engine/gr.h>
 #include <engine/sw.h>
 
-#define nvxx_fifo(a) nvkm_fifo(nvxx_device(a))
-#define nvxx_fifo_chan(a) ((struct nvkm_fifo_chan *)nvxx_object(a))
-#define nvxx_gr(a) ((struct nvkm_gr *)nvkm_engine(nvxx_object(a), NVDEV_ENGINE_GR))
+#define nvxx_fifo(a) nvxx_device(a)->fifo
+#define nvxx_gr(a) nvxx_device(a)->gr
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4cd8e32..b0ac021 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,11 +1,10 @@
 #ifndef __NVIF_IOCTL_H__
 #define __NVIF_IOCTL_H__
 
+#define NVIF_VERSION_LATEST                               0x0000000000000000ULL
+
 struct nvif_ioctl_v0 {
 	__u8  version;
-#define NVIF_IOCTL_V0_OWNER_NVIF                                           0x00
-#define NVIF_IOCTL_V0_OWNER_ANY                                            0xff
-	__u8  owner;
 #define NVIF_IOCTL_V0_NOP                                                  0x00
 #define NVIF_IOCTL_V0_SCLASS                                               0x01
 #define NVIF_IOCTL_V0_NEW                                                  0x02
@@ -20,17 +19,20 @@
 #define NVIF_IOCTL_V0_NTFY_GET                                             0x0b
 #define NVIF_IOCTL_V0_NTFY_PUT                                             0x0c
 	__u8  type;
-	__u8  path_nr;
+	__u8  pad02[4];
+#define NVIF_IOCTL_V0_OWNER_NVIF                                           0x00
+#define NVIF_IOCTL_V0_OWNER_ANY                                            0xff
+	__u8  owner;
 #define NVIF_IOCTL_V0_ROUTE_NVIF                                           0x00
 #define NVIF_IOCTL_V0_ROUTE_HIDDEN                                         0xff
-	__u8  pad04[3];
 	__u8  route;
 	__u64 token;
-	__u32 path[8];		/* in reverse */
+	__u64 object;
 	__u8  data[];		/* ioctl data (below) */
 };
 
-struct nvif_ioctl_nop {
+struct nvif_ioctl_nop_v0 {
+	__u64 version;
 };
 
 struct nvif_ioctl_sclass_v0 {
@@ -38,7 +40,11 @@
 	__u8  version;
 	__u8  count;
 	__u8  pad02[6];
-	__u32 oclass[];
+	struct nvif_ioctl_sclass_oclass_v0 {
+		__s32 oclass;
+		__s16 minver;
+		__s16 maxver;
+	} oclass[];
 };
 
 struct nvif_ioctl_new_v0 {
@@ -47,11 +53,17 @@
 	__u8  pad01[6];
 	__u8  route;
 	__u64 token;
+	__u64 object;
 	__u32 handle;
 /* these class numbers are made up by us, and not nvidia-assigned */
-#define NVIF_IOCTL_NEW_V0_PERFCTR                                    0x0000ffff
-#define NVIF_IOCTL_NEW_V0_CONTROL                                    0x0000fffe
-	__u32 oclass;
+#define NVIF_IOCTL_NEW_V0_CONTROL                                            -1
+#define NVIF_IOCTL_NEW_V0_PERFMON                                            -2
+#define NVIF_IOCTL_NEW_V0_PERFDOM                                            -3
+#define NVIF_IOCTL_NEW_V0_SW_NV04                                            -4
+#define NVIF_IOCTL_NEW_V0_SW_NV10                                            -5
+#define NVIF_IOCTL_NEW_V0_SW_NV50                                            -6
+#define NVIF_IOCTL_NEW_V0_SW_GF100                                           -7
+	__s32 oclass;
 	__u8  data[];		/* class data (class.h) */
 };
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 9ebfa3b..51e2eb5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -23,17 +23,11 @@
 	struct work_struct work;
 };
 
-int  nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *),
-		      int (*func)(struct nvif_notify *), bool work, u8 type,
-		      void *data, u32 size, u32 reply, struct nvif_notify *);
+int  nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *),
+		      bool work, u8 type, void *data, u32 size, u32 reply,
+		      struct nvif_notify *);
 int  nvif_notify_fini(struct nvif_notify *);
 int  nvif_notify_get(struct nvif_notify *);
 int  nvif_notify_put(struct nvif_notify *);
 int  nvif_notify(const void *, u32, const void *, u32);
-
-int  nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
-		     bool work, u8 type, void *data, u32 size, u32 reply,
-		     struct nvif_notify **);
-void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 04c8747..8d81596 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -3,73 +3,73 @@
 
 #include <nvif/os.h>
 
+struct nvif_sclass {
+	s32 oclass;
+	int minver;
+	int maxver;
+};
+
 struct nvif_object {
-	struct nvif_object *parent;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
-	struct kref refcount;
+	struct nvif_client *client;
 	u32 handle;
-	u32 oclass;
-	void *data;
-	u32   size;
+	s32 oclass;
 	void *priv; /*XXX: hack */
-	void (*dtor)(struct nvif_object *);
 	struct {
 		void __iomem *ptr;
 		u32 size;
 	} map;
 };
 
-int  nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *),
-		      u32 handle, u32 oclass, void *, u32,
+int  nvif_object_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
 		      struct nvif_object *);
 void nvif_object_fini(struct nvif_object *);
-int  nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
-		     void *, u32, struct nvif_object **);
-void nvif_object_ref(struct nvif_object *, struct nvif_object **);
 int  nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
-int  nvif_object_sclass(struct nvif_object *, u32 *, int);
+int  nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
+void nvif_object_sclass_put(struct nvif_sclass **);
 u32  nvif_object_rd(struct nvif_object *, int, u64);
 void nvif_object_wr(struct nvif_object *, int, u64, u32);
 int  nvif_object_mthd(struct nvif_object *, u32, void *, u32);
 int  nvif_object_map(struct nvif_object *);
 void nvif_object_unmap(struct nvif_object *);
 
+#define nvif_handle(a) (unsigned long)(void *)(a)
 #define nvif_object(a) (a)->object
 
-#define ioread8_native ioread8
-#define iowrite8_native iowrite8
-#define nvif_rd(a,b,c) ({                                                      \
-	struct nvif_object *_object = nvif_object(a);                          \
+#define nvif_rd(a,f,b,c) ({                                                    \
+	struct nvif_object *_object = (a);                                     \
 	u32 _data;                                                             \
 	if (likely(_object->map.ptr))                                          \
-		_data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c));      \
+		_data = f((u8 __iomem *)_object->map.ptr + (c));               \
 	else                                                                   \
-		_data = nvif_object_rd(_object, (b) / 8, (c));                 \
+		_data = nvif_object_rd(_object, (b), (c));                     \
 	_data;                                                                 \
 })
-#define nvif_wr(a,b,c,d) ({                                                    \
-	struct nvif_object *_object = nvif_object(a);                          \
+#define nvif_wr(a,f,b,c,d) ({                                                  \
+	struct nvif_object *_object = (a);                                     \
 	if (likely(_object->map.ptr))                                          \
-		iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c));        \
+		f((d), (u8 __iomem *)_object->map.ptr + (c));                  \
 	else                                                                   \
-		nvif_object_wr(_object, (b) / 8, (c), (d));                    \
+		nvif_object_wr(_object, (b), (c), (d));                        \
 })
-#define nvif_rd08(a,b) ({ u8  _v = nvif_rd((a), 8, (b)); _v; })
-#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; })
-#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; })
-#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c))
-#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c))
-#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c))
+#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
+#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
+#define nvif_rd32(a,b) ({ ((u32)nvif_rd((a), ioread32_native, 4, (b))); })
+#define nvif_wr08(a,b,c) nvif_wr((a), iowrite8, 1, (b), (u8)(c))
+#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
+#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
 #define nvif_mask(a,b,c,d) ({                                                  \
-	u32 _v = nvif_rd32(nvif_object(a), (b));                               \
-	nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d));                     \
-	_v;                                                                    \
+	struct nvif_object *__object = (a);                                    \
+	u32 _addr = (b), _data = nvif_rd32(__object, _addr);                   \
+	nvif_wr32(__object, _addr, (_data & ~(c)) | (d));                      \
+	_data;                                                                 \
 })
 
-#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d))
+#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
 
 /*XXX*/
 #include <core/object.h>
-#define nvxx_object(a) ((struct nvkm_object *)nvif_object(a)->priv)
-
+#define nvxx_object(a) ({                                                      \
+	struct nvif_object *_object = (a);                                     \
+	(struct nvkm_object *)_object->priv;                                   \
+})
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index bdd05ee..3accc99 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -24,9 +24,15 @@
 #include <linux/power_supply.h>
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
+#include <linux/agp_backend.h>
+#include <linux/reset.h>
+#include <linux/iommu.h>
 
 #include <asm/unaligned.h>
 
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
 #ifndef ioread32_native
 #ifdef __BIG_ENDIAN
 #define ioread16_native ioread16be
@@ -40,5 +46,4 @@
 #define iowrite32_native iowrite32
 #endif /* def __BIG_ENDIAN else */
 #endif /* !ioread32_native */
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index a35b382..eaf5905 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,55 +1,52 @@
 #ifndef __NVKM_CLIENT_H__
 #define __NVKM_CLIENT_H__
-#include <core/namedb.h>
+#include <core/object.h>
 
 struct nvkm_client {
-	struct nvkm_namedb namedb;
-	struct nvkm_handle *root;
-	struct nvkm_object *device;
+	struct nvkm_object object;
 	char name[32];
+	u64 device;
 	u32 debug;
-	struct nvkm_vm *vm;
+
+	struct nvkm_client_notify *notify[16];
+	struct rb_root objroot;
+	struct rb_root dmaroot;
+
 	bool super;
 	void *data;
-
 	int (*ntfy)(const void *, u32, const void *, u32);
-	struct nvkm_client_notify *notify[16];
+
+	struct nvkm_vm *vm;
 };
 
-static inline struct nvkm_client *
-nv_client(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
-		nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
+void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
+struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
 
-static inline struct nvkm_client *
-nvkm_client(void *obj)
-{
-	struct nvkm_object *client = nv_object(obj);
-	while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
-		client = client->parent;
-	return (void *)client;
-}
-
-#define nvkm_client_create(n,c,oc,od,d)                                     \
-	nvkm_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
-
-int  nvkm_client_create_(const char *name, u64 device, const char *cfg,
-			    const char *dbg, int, void **);
-#define nvkm_client_destroy(p)                                              \
-	nvkm_namedb_destroy(&(p)->base)
-
+int  nvkm_client_new(const char *name, u64 device, const char *cfg,
+		     const char *dbg, struct nvkm_client **);
+void nvkm_client_del(struct nvkm_client **);
 int  nvkm_client_init(struct nvkm_client *);
 int  nvkm_client_fini(struct nvkm_client *, bool suspend);
-const char *nvkm_client_name(void *obj);
 
 int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
 			   void *data, u32 size);
 int nvkm_client_notify_del(struct nvkm_client *, int index);
 int nvkm_client_notify_get(struct nvkm_client *, int index);
 int nvkm_client_notify_put(struct nvkm_client *, int index);
+
+/* logging for client-facing objects */
+#define nvif_printk(o,l,p,f,a...) do {                                         \
+	struct nvkm_object *_object = (o);                                     \
+	struct nvkm_client *_client = _object->client;                         \
+	if (_client->debug >= NV_DBG_##l)                                      \
+		printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name,     \
+		       _object->handle, _object->oclass, ##a);                 \
+} while(0)
+#define nvif_fatal(o,f,a...) nvif_printk((o), FATAL, CRIT, f, ##a)
+#define nvif_error(o,f,a...) nvif_printk((o), ERROR,  ERR, f, ##a)
+#define nvif_debug(o,f,a...) nvif_printk((o), DEBUG, INFO, f, ##a)
+#define nvif_trace(o,f,a...) nvif_printk((o), TRACE, INFO, f, ##a)
+#define nvif_info(o,f,a...)  nvif_printk((o),  INFO, INFO, f, ##a)
+#define nvif_ioctl(o,f,a...) nvif_trace((o), "ioctl: "f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index d07cb86..c59fd4e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,18 +1,11 @@
 #ifndef __NVKM_DEBUG_H__
 #define __NVKM_DEBUG_H__
-extern int nv_info_debug_level;
-
 #define NV_DBG_FATAL    0
 #define NV_DBG_ERROR    1
 #define NV_DBG_WARN     2
-#define NV_DBG_INFO     nv_info_debug_level
+#define NV_DBG_INFO     3
 #define NV_DBG_DEBUG    4
 #define NV_DBG_TRACE    5
 #define NV_DBG_PARANOIA 6
 #define NV_DBG_SPAM     7
-
-#define NV_DBG_INFO_NORMAL 3
-#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
-
-#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 333db33..8f76000 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,24 +1,84 @@
 #ifndef __NVKM_DEVICE_H__
 #define __NVKM_DEVICE_H__
-#include <core/engine.h>
 #include <core/event.h>
+#include <core/object.h>
+
+enum nvkm_devidx {
+	NVKM_SUBDEV_PCI,
+	NVKM_SUBDEV_VBIOS,
+	NVKM_SUBDEV_DEVINIT,
+	NVKM_SUBDEV_IBUS,
+	NVKM_SUBDEV_GPIO,
+	NVKM_SUBDEV_I2C,
+	NVKM_SUBDEV_FUSE,
+	NVKM_SUBDEV_MXM,
+	NVKM_SUBDEV_MC,
+	NVKM_SUBDEV_BUS,
+	NVKM_SUBDEV_TIMER,
+	NVKM_SUBDEV_FB,
+	NVKM_SUBDEV_LTC,
+	NVKM_SUBDEV_INSTMEM,
+	NVKM_SUBDEV_MMU,
+	NVKM_SUBDEV_BAR,
+	NVKM_SUBDEV_PMU,
+	NVKM_SUBDEV_VOLT,
+	NVKM_SUBDEV_THERM,
+	NVKM_SUBDEV_CLK,
+
+	NVKM_ENGINE_DMAOBJ,
+	NVKM_ENGINE_IFB,
+	NVKM_ENGINE_FIFO,
+	NVKM_ENGINE_SW,
+	NVKM_ENGINE_GR,
+	NVKM_ENGINE_MPEG,
+	NVKM_ENGINE_ME,
+	NVKM_ENGINE_VP,
+	NVKM_ENGINE_CIPHER,
+	NVKM_ENGINE_BSP,
+	NVKM_ENGINE_MSPPP,
+	NVKM_ENGINE_CE0,
+	NVKM_ENGINE_CE1,
+	NVKM_ENGINE_CE2,
+	NVKM_ENGINE_VIC,
+	NVKM_ENGINE_MSENC,
+	NVKM_ENGINE_DISP,
+	NVKM_ENGINE_PM,
+	NVKM_ENGINE_MSVLD,
+	NVKM_ENGINE_SEC,
+	NVKM_ENGINE_MSPDEC,
+
+	NVKM_SUBDEV_NR
+};
+
+enum nvkm_device_type {
+	NVKM_DEVICE_PCI,
+	NVKM_DEVICE_AGP,
+	NVKM_DEVICE_PCIE,
+	NVKM_DEVICE_TEGRA,
+};
 
 struct nvkm_device {
-	struct nvkm_engine engine;
-	struct list_head head;
-
-	struct pci_dev *pdev;
-	struct platform_device *platformdev;
+	const struct nvkm_device_func *func;
+	const struct nvkm_device_quirk *quirk;
+	struct device *dev;
+	enum nvkm_device_type type;
 	u64 handle;
+	const char *name;
+	const char *cfgopt;
+	const char *dbgopt;
+
+	struct list_head head;
+	struct mutex mutex;
+	int refcount;
+
+	void __iomem *pri;
 
 	struct nvkm_event event;
 
-	const char *cfgopt;
-	const char *dbgopt;
-	const char *name;
-	const char *cname;
 	u64 disable_mask;
+	u32 debug;
 
+	const struct nvkm_device_chip *chip;
 	enum {
 		NV_04    = 0x04,
 		NV_10    = 0x10,
@@ -35,67 +95,157 @@
 	u8  chiprev;
 	u32 crystal;
 
-	struct nvkm_oclass *oclass[NVDEV_SUBDEV_NR];
-	struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
-
 	struct {
 		struct notifier_block nb;
 	} acpi;
+
+	struct nvkm_bar *bar;
+	struct nvkm_bios *bios;
+	struct nvkm_bus *bus;
+	struct nvkm_clk *clk;
+	struct nvkm_devinit *devinit;
+	struct nvkm_fb *fb;
+	struct nvkm_fuse *fuse;
+	struct nvkm_gpio *gpio;
+	struct nvkm_i2c *i2c;
+	struct nvkm_subdev *ibus;
+	struct nvkm_instmem *imem;
+	struct nvkm_ltc *ltc;
+	struct nvkm_mc *mc;
+	struct nvkm_mmu *mmu;
+	struct nvkm_subdev *mxm;
+	struct nvkm_pci *pci;
+	struct nvkm_pmu *pmu;
+	struct nvkm_therm *therm;
+	struct nvkm_timer *timer;
+	struct nvkm_volt *volt;
+
+	struct nvkm_engine *bsp;
+	struct nvkm_engine *ce[3];
+	struct nvkm_engine *cipher;
+	struct nvkm_disp *disp;
+	struct nvkm_dma *dma;
+	struct nvkm_fifo *fifo;
+	struct nvkm_gr *gr;
+	struct nvkm_engine *ifb;
+	struct nvkm_engine *me;
+	struct nvkm_engine *mpeg;
+	struct nvkm_engine *msenc;
+	struct nvkm_engine *mspdec;
+	struct nvkm_engine *msppp;
+	struct nvkm_engine *msvld;
+	struct nvkm_pm *pm;
+	struct nvkm_engine *sec;
+	struct nvkm_sw *sw;
+	struct nvkm_engine *vic;
+	struct nvkm_engine *vp;
+};
+
+struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
+struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
+
+struct nvkm_device_func {
+	struct nvkm_device_pci *(*pci)(struct nvkm_device *);
+	struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
+	void *(*dtor)(struct nvkm_device *);
+	int (*preinit)(struct nvkm_device *);
+	int (*init)(struct nvkm_device *);
+	void (*fini)(struct nvkm_device *, bool suspend);
+	resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
+	resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+	bool cpu_coherent;
+};
+
+struct nvkm_device_quirk {
+	u8 tv_pin_mask;
+	u8 tv_gpio;
+	bool War00C800_0;
+};
+
+struct nvkm_device_chip {
+	const char *name;
+
+	int (*bar    )(struct nvkm_device *, int idx, struct nvkm_bar **);
+	int (*bios   )(struct nvkm_device *, int idx, struct nvkm_bios **);
+	int (*bus    )(struct nvkm_device *, int idx, struct nvkm_bus **);
+	int (*clk    )(struct nvkm_device *, int idx, struct nvkm_clk **);
+	int (*devinit)(struct nvkm_device *, int idx, struct nvkm_devinit **);
+	int (*fb     )(struct nvkm_device *, int idx, struct nvkm_fb **);
+	int (*fuse   )(struct nvkm_device *, int idx, struct nvkm_fuse **);
+	int (*gpio   )(struct nvkm_device *, int idx, struct nvkm_gpio **);
+	int (*i2c    )(struct nvkm_device *, int idx, struct nvkm_i2c **);
+	int (*ibus   )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*imem   )(struct nvkm_device *, int idx, struct nvkm_instmem **);
+	int (*ltc    )(struct nvkm_device *, int idx, struct nvkm_ltc **);
+	int (*mc     )(struct nvkm_device *, int idx, struct nvkm_mc **);
+	int (*mmu    )(struct nvkm_device *, int idx, struct nvkm_mmu **);
+	int (*mxm    )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*pci    )(struct nvkm_device *, int idx, struct nvkm_pci **);
+	int (*pmu    )(struct nvkm_device *, int idx, struct nvkm_pmu **);
+	int (*therm  )(struct nvkm_device *, int idx, struct nvkm_therm **);
+	int (*timer  )(struct nvkm_device *, int idx, struct nvkm_timer **);
+	int (*volt   )(struct nvkm_device *, int idx, struct nvkm_volt **);
+
+	int (*bsp    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*ce[3]  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*disp   )(struct nvkm_device *, int idx, struct nvkm_disp **);
+	int (*dma    )(struct nvkm_device *, int idx, struct nvkm_dma **);
+	int (*fifo   )(struct nvkm_device *, int idx, struct nvkm_fifo **);
+	int (*gr     )(struct nvkm_device *, int idx, struct nvkm_gr **);
+	int (*ifb    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*me     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mpeg   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msenc  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msppp  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msvld  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*pm     )(struct nvkm_device *, int idx, struct nvkm_pm **);
+	int (*sec    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*sw     )(struct nvkm_device *, int idx, struct nvkm_sw **);
+	int (*vic    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*vp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
 };
 
 struct nvkm_device *nvkm_device_find(u64 name);
 int nvkm_device_list(u64 *name, int size);
 
-struct nvkm_device *nv_device(void *obj);
+/* privileged register interface accessor macros */
+#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
+#define nvkm_rd16(d,a) ioread16_native((d)->pri + (a))
+#define nvkm_rd32(d,a) ioread32_native((d)->pri + (a))
+#define nvkm_wr08(d,a,v) iowrite8((v), (d)->pri + (a))
+#define nvkm_wr16(d,a,v) iowrite16_native((v), (d)->pri + (a))
+#define nvkm_wr32(d,a,v) iowrite32_native((v), (d)->pri + (a))
+#define nvkm_mask(d,a,m,v) ({                                                  \
+	struct nvkm_device *_device = (d);                                     \
+	u32 _addr = (a), _temp = nvkm_rd32(_device, _addr);                    \
+	nvkm_wr32(_device, _addr, (_temp & ~(m)) | (v));                       \
+	_temp;                                                                 \
+})
 
-static inline bool
-nv_device_match(struct nvkm_object *object, u16 dev, u16 ven, u16 sub)
-{
-	struct nvkm_device *device = nv_device(object);
-	return device->pdev->device == dev &&
-	       device->pdev->subsystem_vendor == ven &&
-	       device->pdev->subsystem_device == sub;
-}
+void nvkm_device_del(struct nvkm_device **);
 
-static inline bool
-nv_device_is_pci(struct nvkm_device *device)
-{
-	return device->pdev != NULL;
-}
-
-static inline bool
-nv_device_is_cpu_coherent(struct nvkm_device *device)
-{
-	return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
-}
-
-static inline struct device *
-nv_device_base(struct nvkm_device *device)
-{
-	return nv_device_is_pci(device) ? &device->pdev->dev :
-					  &device->platformdev->dev;
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar);
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar);
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall);
-
-struct platform_device;
-
-enum nv_bus_type {
-	NVKM_BUS_PCI,
-	NVKM_BUS_PLATFORM,
+struct nvkm_device_oclass {
+	int (*ctor)(struct nvkm_device *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
 };
 
-#define nvkm_device_create(p,t,n,s,c,d,u)                                   \
-	nvkm_device_create_((void *)(p), (t), (n), (s), (c), (d),           \
-			       sizeof(**u), (void **)u)
-int  nvkm_device_create_(void *, enum nv_bus_type type, u64 name,
-			    const char *sname, const char *cfg, const char *dbg,
-			    int, void **);
+extern const struct nvkm_sclass nvkm_udevice_sclass;
+
+/* device logging */
+#define nvdev_printk_(d,l,p,f,a...) do {                                       \
+	struct nvkm_device *_device = (d);                                     \
+	if (_device->debug >= (l))                                             \
+		dev_##p(_device->dev, f, ##a);                                 \
+} while(0)
+#define nvdev_printk(d,l,p,f,a...) nvdev_printk_((d), NV_DBG_##l, p, f, ##a)
+#define nvdev_fatal(d,f,a...) nvdev_printk((d), FATAL,   crit, f, ##a)
+#define nvdev_error(d,f,a...) nvdev_printk((d), ERROR,    err, f, ##a)
+#define nvdev_warn(d,f,a...)  nvdev_printk((d),  WARN, notice, f, ##a)
+#define nvdev_info(d,f,a...)  nvdev_printk((d),  INFO,   info, f, ##a)
+#define nvdev_debug(d,f,a...) nvdev_printk((d), DEBUG,   info, f, ##a)
+#define nvdev_trace(d,f,a...) nvdev_printk((d), TRACE,   info, f, ##a)
+#define nvdev_spam(d,f,a...)  nvdev_printk((d),  SPAM,    dbg, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
deleted file mode 100644
index 60c5888..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __NVKM_DEVIDX_H__
-#define __NVKM_DEVIDX_H__
-enum nvkm_devidx {
-	NVDEV_ENGINE_DEVICE,
-	NVDEV_SUBDEV_VBIOS,
-
-	/* All subdevs from DEVINIT to DEVINIT_LAST will be created before
-	 * *any* of them are initialised.  This subdev category is used
-	 * for any subdevs that the VBIOS init table parsing may call out
-	 * to during POST.
-	 */
-	NVDEV_SUBDEV_DEVINIT,
-	NVDEV_SUBDEV_IBUS,
-	NVDEV_SUBDEV_GPIO,
-	NVDEV_SUBDEV_I2C,
-	NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
-
-	/* This grouping of subdevs are initialised right after they've
-	 * been created, and are allowed to assume any subdevs in the
-	 * list above them exist and have been initialised.
-	 */
-	NVDEV_SUBDEV_FUSE,
-	NVDEV_SUBDEV_MXM,
-	NVDEV_SUBDEV_MC,
-	NVDEV_SUBDEV_BUS,
-	NVDEV_SUBDEV_TIMER,
-	NVDEV_SUBDEV_FB,
-	NVDEV_SUBDEV_LTC,
-	NVDEV_SUBDEV_INSTMEM,
-	NVDEV_SUBDEV_MMU,
-	NVDEV_SUBDEV_BAR,
-	NVDEV_SUBDEV_PMU,
-	NVDEV_SUBDEV_VOLT,
-	NVDEV_SUBDEV_THERM,
-	NVDEV_SUBDEV_CLK,
-
-	NVDEV_ENGINE_FIRST,
-	NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
-	NVDEV_ENGINE_IFB,
-	NVDEV_ENGINE_FIFO,
-	NVDEV_ENGINE_SW,
-	NVDEV_ENGINE_GR,
-	NVDEV_ENGINE_MPEG,
-	NVDEV_ENGINE_ME,
-	NVDEV_ENGINE_VP,
-	NVDEV_ENGINE_CIPHER,
-	NVDEV_ENGINE_BSP,
-	NVDEV_ENGINE_MSPPP,
-	NVDEV_ENGINE_CE0,
-	NVDEV_ENGINE_CE1,
-	NVDEV_ENGINE_CE2,
-	NVDEV_ENGINE_VIC,
-	NVDEV_ENGINE_MSENC,
-	NVDEV_ENGINE_DISP,
-	NVDEV_ENGINE_PM,
-	NVDEV_ENGINE_MSVLD,
-	NVDEV_ENGINE_SEC,
-	NVDEV_ENGINE_MSPDEC,
-
-	NVDEV_SUBDEV_NR,
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
deleted file mode 100644
index 1bf2e8e..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __NVKM_ENGCTX_H__
-#define __NVKM_ENGCTX_H__
-#include <core/gpuobj.h>
-
-#include <subdev/mmu.h>
-
-#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
-#define NV_ENGCTX(name,var)  NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
-
-struct nvkm_engctx {
-	struct nvkm_gpuobj gpuobj;
-	struct nvkm_vma vma;
-	struct list_head head;
-	unsigned long save;
-	u64 addr;
-};
-
-static inline struct nvkm_engctx *
-nv_engctx(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
-		nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_engctx_create(p,e,c,g,s,a,f,d)                                 \
-	nvkm_engctx_create_((p), (e), (c), (g), (s), (a), (f),              \
-			       sizeof(**d), (void **)d)
-
-int  nvkm_engctx_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, struct nvkm_object *,
-			    u32 size, u32 align, u32 flags,
-			    int length, void **data);
-void nvkm_engctx_destroy(struct nvkm_engctx *);
-int  nvkm_engctx_init(struct nvkm_engctx *);
-int  nvkm_engctx_fini(struct nvkm_engctx *, bool suspend);
-
-int  _nvkm_engctx_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_engctx_dtor(struct nvkm_object *);
-int  _nvkm_engctx_init(struct nvkm_object *);
-int  _nvkm_engctx_fini(struct nvkm_object *, bool suspend);
-#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
-
-struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr);
-void nvkm_engctx_put(struct nvkm_object *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index faf0fd2..48bf128 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,56 +1,49 @@
 #ifndef __NVKM_ENGINE_H__
 #define __NVKM_ENGINE_H__
+#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
 #include <core/subdev.h>
-
-#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
-#define NV_ENGINE(name,var)  NV_ENGINE_(NVDEV_ENGINE_##name, (var))
+struct nvkm_fifo_chan;
+struct nvkm_fb_tile;
 
 struct nvkm_engine {
+	const struct nvkm_engine_func *func;
 	struct nvkm_subdev subdev;
-	struct nvkm_oclass *cclass;
-	struct nvkm_oclass *sclass;
-
-	struct list_head contexts;
 	spinlock_t lock;
 
-	void (*tile_prog)(struct nvkm_engine *, int region);
-	int  (*tlb_flush)(struct nvkm_engine *);
+	int usecount;
 };
 
-static inline struct nvkm_engine *
-nv_engine(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
-		nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+struct nvkm_engine_func {
+	void *(*dtor)(struct nvkm_engine *);
+	int (*oneinit)(struct nvkm_engine *);
+	int (*init)(struct nvkm_engine *);
+	int (*fini)(struct nvkm_engine *, bool suspend);
+	void (*intr)(struct nvkm_engine *);
+	void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
 
-static inline int
-nv_engidx(struct nvkm_engine *engine)
-{
-	return nv_subidx(&engine->subdev);
-}
+	struct {
+		int (*sclass)(struct nvkm_oclass *, int index,
+			      const struct nvkm_device_oclass **);
+	} base;
 
-struct nvkm_engine *nvkm_engine(void *obj, int idx);
+	struct {
+		int (*cclass)(struct nvkm_fifo_chan *,
+			      const struct nvkm_oclass *,
+			      struct nvkm_object **);
+		int (*sclass)(struct nvkm_oclass *, int index);
+	} fifo;
 
-#define nvkm_engine_create(p,e,c,d,i,f,r)                                   \
-	nvkm_engine_create_((p), (e), (c), (d), (i), (f),                   \
-			       sizeof(**r),(void **)r)
+	const struct nvkm_object_func *cclass;
+	struct nvkm_sclass sclass[];
+};
 
-#define nvkm_engine_destroy(p)                                              \
-	nvkm_subdev_destroy(&(p)->subdev)
-#define nvkm_engine_init(p)                                                 \
-	nvkm_subdev_init(&(p)->subdev)
-#define nvkm_engine_fini(p,s)                                               \
-	nvkm_subdev_fini(&(p)->subdev, (s))
-
-int nvkm_engine_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, bool, const char *,
-			   const char *, int, void **);
-
-#define _nvkm_engine_dtor _nvkm_subdev_dtor
-#define _nvkm_engine_init _nvkm_subdev_init
-#define _nvkm_engine_fini _nvkm_subdev_fini
+int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
+		     int index, u32 pmc_enable, bool enable,
+		     struct nvkm_engine *);
+int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
+		     int index, u32 pmc_enable, bool enable,
+		     struct nvkm_engine **);
+struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
+void nvkm_engine_unref(struct nvkm_engine **);
+void nvkm_engine_tile(struct nvkm_engine *, int region);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index e76f76f..40429a8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -10,12 +10,11 @@
 };
 
 const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
-const struct nvkm_enum *nvkm_enum_print(const struct nvkm_enum *, u32 value);
 
 struct nvkm_bitfield {
 	u32 mask;
 	const char *name;
 };
 
-void nvkm_bitfield_print(const struct nvkm_bitfield *, u32 value);
+void nvkm_snprintbf(char *, int, const struct nvkm_bitfield *, u32 value);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index e0187e7..d4f56ea 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,64 +1,40 @@
 #ifndef __NVKM_GPUOBJ_H__
 #define __NVKM_GPUOBJ_H__
 #include <core/object.h>
+#include <core/memory.h>
 #include <core/mm.h>
 struct nvkm_vma;
 struct nvkm_vm;
 
 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
-#define NVOBJ_FLAG_ZERO_FREE  0x00000002
 #define NVOBJ_FLAG_HEAP       0x00000004
 
 struct nvkm_gpuobj {
 	struct nvkm_object object;
-	struct nvkm_object *parent;
+	const struct nvkm_gpuobj_func *func;
+	struct nvkm_gpuobj *parent;
+	struct nvkm_memory *memory;
 	struct nvkm_mm_node *node;
-	struct nvkm_mm heap;
 
-	u32 flags;
 	u64 addr;
 	u32 size;
+	struct nvkm_mm heap;
+
+	void __iomem *map;
 };
 
-static inline struct nvkm_gpuobj *
-nv_gpuobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
-		nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+struct nvkm_gpuobj_func {
+	void *(*acquire)(struct nvkm_gpuobj *);
+	void (*release)(struct nvkm_gpuobj *);
+	u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
+	void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+};
 
-#define nvkm_gpuobj_create(p,e,c,v,g,s,a,f,d)                               \
-	nvkm_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f),         \
-			       sizeof(**d), (void **)d)
-#define nvkm_gpuobj_init(p) nvkm_object_init(&(p)->object)
-#define nvkm_gpuobj_fini(p,s) nvkm_object_fini(&(p)->object, (s))
-int  nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_object *, u32 size, u32 align,
-			    u32 flags, int length, void **);
-void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
-
-int  nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
-		     u32 align, u32 flags, struct nvkm_gpuobj **);
-int  nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *,
-		     struct nvkm_gpuobj **);
-int  nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
-int  nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
-			struct nvkm_vma *);
+int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
+		    struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
+void nvkm_gpuobj_del(struct nvkm_gpuobj **);
+int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
+int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
+		    struct nvkm_vma *);
 void nvkm_gpuobj_unmap(struct nvkm_vma *);
-
-static inline void
-nvkm_gpuobj_ref(struct nvkm_gpuobj *obj, struct nvkm_gpuobj **ref)
-{
-	nvkm_object_ref(&obj->object, (struct nvkm_object **)ref);
-}
-
-void _nvkm_gpuobj_dtor(struct nvkm_object *);
-int  _nvkm_gpuobj_init(struct nvkm_object *);
-int  _nvkm_gpuobj_fini(struct nvkm_object *, bool);
-u32  _nvkm_gpuobj_rd32(struct nvkm_object *, u64);
-void _nvkm_gpuobj_wr32(struct nvkm_object *, u64, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
deleted file mode 100644
index 67f384d..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __NVKM_HANDLE_H__
-#define __NVKM_HANDLE_H__
-#include <core/os.h>
-struct nvkm_object;
-
-struct nvkm_handle {
-	struct nvkm_namedb *namedb;
-	struct list_head node;
-
-	struct list_head head;
-	struct list_head tree;
-	u32 name;
-	u32 priv;
-
-	u8  route;
-	u64 token;
-
-	struct nvkm_handle *parent;
-	struct nvkm_object *object;
-};
-
-int  nvkm_handle_create(struct nvkm_object *, u32 parent, u32 handle,
-			struct nvkm_object *, struct nvkm_handle **);
-void nvkm_handle_destroy(struct nvkm_handle *);
-int  nvkm_handle_init(struct nvkm_handle *);
-int  nvkm_handle_fini(struct nvkm_handle *, bool suspend);
-
-struct nvkm_object *nvkm_handle_ref(struct nvkm_object *, u32 name);
-
-struct nvkm_handle *nvkm_handle_get_class(struct nvkm_object *, u16);
-struct nvkm_handle *nvkm_handle_get_vinst(struct nvkm_object *, u64);
-struct nvkm_handle *nvkm_handle_get_cinst(struct nvkm_object *, u32);
-void nvkm_handle_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
new file mode 100644
index 0000000..9363b83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -0,0 +1,53 @@
+#ifndef __NVKM_MEMORY_H__
+#define __NVKM_MEMORY_H__
+#include <core/os.h>
+struct nvkm_device;
+struct nvkm_vma;
+struct nvkm_vm;
+
+enum nvkm_memory_target {
+	NVKM_MEM_TARGET_INST,
+	NVKM_MEM_TARGET_VRAM,
+	NVKM_MEM_TARGET_HOST,
+};
+
+struct nvkm_memory {
+	const struct nvkm_memory_func *func;
+};
+
+struct nvkm_memory_func {
+	void *(*dtor)(struct nvkm_memory *);
+	enum nvkm_memory_target (*target)(struct nvkm_memory *);
+	u64 (*addr)(struct nvkm_memory *);
+	u64 (*size)(struct nvkm_memory *);
+	void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+	void __iomem *(*acquire)(struct nvkm_memory *);
+	void (*release)(struct nvkm_memory *);
+	u32 (*rd32)(struct nvkm_memory *, u64 offset);
+	void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
+	void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
+};
+
+void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
+int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
+		    u64 size, u32 align, bool zero, struct nvkm_memory **);
+void nvkm_memory_del(struct nvkm_memory **);
+#define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_addr(p) (p)->func->addr(p)
+#define nvkm_memory_size(p) (p)->func->size(p)
+#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
+#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+
+/* accessor macros - kmap()/done() must bracket use of the other accessor
+ * macros to guarantee correct behaviour across all chipsets
+ */
+#define nvkm_kmap(o)     (o)->func->acquire(o)
+#define nvkm_ro32(o,a)   (o)->func->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_mo32(o,a,m,d) ({                                                  \
+	u32 _addr = (a), _data = nvkm_ro32((o), _addr);                        \
+	nvkm_wo32((o), _addr, (_data & ~(m)) | (d));                           \
+	_data;                                                                 \
+})
+#define nvkm_done(o)     (o)->func->release(o)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 096eb1a..d92fd41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -27,7 +27,7 @@
 static inline bool
 nvkm_mm_initialised(struct nvkm_mm *mm)
 {
-	return mm->block_size != 0;
+	return mm->heap_nodes;
 }
 
 int  nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
@@ -37,4 +37,5 @@
 int  nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 		  u32 size_min, u32 align, struct nvkm_mm_node **);
 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
+void nvkm_mm_dump(struct nvkm_mm *, const char *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
deleted file mode 100644
index 4cfe16f..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_NAMEDB_H__
-#define __NVKM_NAMEDB_H__
-#include <core/parent.h>
-struct nvkm_handle;
-
-struct nvkm_namedb {
-	struct nvkm_parent parent;
-	rwlock_t lock;
-	struct list_head list;
-};
-
-static inline struct nvkm_namedb *
-nv_namedb(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
-		nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_namedb_create(p,e,c,v,s,m,d)                                   \
-	nvkm_namedb_create_((p), (e), (c), (v), (s), (m),                   \
-			       sizeof(**d), (void **)d)
-#define nvkm_namedb_init(p)                                                 \
-	nvkm_parent_init(&(p)->parent)
-#define nvkm_namedb_fini(p,s)                                               \
-	nvkm_parent_fini(&(p)->parent, (s))
-#define nvkm_namedb_destroy(p)                                              \
-	nvkm_parent_destroy(&(p)->parent)
-
-int  nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_oclass *, u64 engcls,
-			    int size, void **);
-
-int  _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-#define _nvkm_namedb_dtor _nvkm_parent_dtor
-#define _nvkm_namedb_init _nvkm_parent_init
-#define _nvkm_namedb_fini _nvkm_parent_fini
-
-int  nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *,
-			struct nvkm_handle *);
-void nvkm_namedb_remove(struct nvkm_handle *);
-
-struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32);
-struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, u16);
-struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64);
-struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32);
-void nvkm_namedb_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 6e3cd39..dcd048b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,203 +1,88 @@
 #ifndef __NVKM_OBJECT_H__
 #define __NVKM_OBJECT_H__
 #include <core/os.h>
-#include <core/printk.h>
-
-#define NV_PARENT_CLASS 0x80000000
-#define NV_NAMEDB_CLASS 0x40000000
-#define NV_CLIENT_CLASS 0x20000000
-#define NV_SUBDEV_CLASS 0x10000000
-#define NV_ENGINE_CLASS 0x08000000
-#define NV_MEMOBJ_CLASS 0x04000000
-#define NV_GPUOBJ_CLASS 0x02000000
-#define NV_ENGCTX_CLASS 0x01000000
-#define NV_OBJECT_CLASS 0x0000ffff
+#include <core/debug.h>
+struct nvkm_event;
+struct nvkm_gpuobj;
+struct nvkm_oclass;
 
 struct nvkm_object {
-	struct nvkm_oclass *oclass;
+	const struct nvkm_object_func *func;
+	struct nvkm_client *client;
+	struct nvkm_engine *engine;
+	s32 oclass;
+	u32 handle;
+
+	struct list_head head;
+	struct list_head tree;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct rb_node node;
+};
+
+struct nvkm_object_func {
+	void *(*dtor)(struct nvkm_object *);
+	int (*init)(struct nvkm_object *);
+	int (*fini)(struct nvkm_object *, bool suspend);
+	int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
+	int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+	int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+	int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
+	int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
+	int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
+	int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
+	int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
+	int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
+	int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+		    struct nvkm_gpuobj **);
+	int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
+};
+
+void nvkm_object_ctor(const struct nvkm_object_func *,
+		      const struct nvkm_oclass *, struct nvkm_object *);
+int nvkm_object_new_(const struct nvkm_object_func *,
+		     const struct nvkm_oclass *, void *data, u32 size,
+		     struct nvkm_object **);
+int nvkm_object_new(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+void nvkm_object_del(struct nvkm_object **);
+void *nvkm_object_dtor(struct nvkm_object *);
+int nvkm_object_init(struct nvkm_object *);
+int nvkm_object_fini(struct nvkm_object *, bool suspend);
+int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
+int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8  *data);
+int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
+int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
+int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8   data);
+int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16  data);
+int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32  data);
+int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+		     struct nvkm_gpuobj **);
+
+struct nvkm_sclass {
+	int minver;
+	int maxver;
+	s32 oclass;
+	const struct nvkm_object_func *func;
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+};
+
+struct nvkm_oclass {
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const void *priv;
+	const void *engn;
+	u32 handle;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct nvkm_client *client;
 	struct nvkm_object *parent;
 	struct nvkm_engine *engine;
-	atomic_t refcount;
-	atomic_t usecount;
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-#define NVKM_OBJECT_MAGIC 0x75ef0bad
-	struct list_head list;
-	u32 _magic;
-#endif
 };
-
-static inline struct nvkm_object *
-nv_object(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (likely(obj)) {
-		struct nvkm_object *object = obj;
-		if (unlikely(object->_magic != NVKM_OBJECT_MAGIC))
-			nv_assert("BAD CAST -> NvObject, invalid magic");
-	}
-#endif
-	return obj;
-}
-
-#define nvkm_object_create(p,e,c,s,d)                                       \
-	nvkm_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
-int  nvkm_object_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32, int size, void **);
-void nvkm_object_destroy(struct nvkm_object *);
-int  nvkm_object_init(struct nvkm_object *);
-int  nvkm_object_fini(struct nvkm_object *, bool suspend);
-
-int _nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, void *, u32,
-			 struct nvkm_object **);
-
-extern struct nvkm_ofuncs nvkm_object_ofuncs;
-
-/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
- * ".data". */
-struct nvkm_oclass {
-	u32 handle;
-	struct nvkm_ofuncs * const ofuncs;
-	struct nvkm_omthds * const omthds;
-	struct lock_class_key lock_class_key;
-};
-
-#define nv_oclass(o)    nv_object(o)->oclass
-#define nv_hclass(o)    nv_oclass(o)->handle
-#define nv_iclass(o,i) (nv_hclass(o) & (i))
-#define nv_mclass(o)    nv_iclass(o, NV_OBJECT_CLASS)
-
-static inline struct nvkm_object *
-nv_pclass(struct nvkm_object *parent, u32 oclass)
-{
-	while (parent && !nv_iclass(parent, oclass))
-		parent = parent->parent;
-	return parent;
-}
-
-struct nvkm_omthds {
-	u32 start;
-	u32 limit;
-	int (*call)(struct nvkm_object *, u32, void *, u32);
-};
-
-struct nvkm_event;
-struct nvkm_ofuncs {
-	int  (*ctor)(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-	void (*dtor)(struct nvkm_object *);
-	int  (*init)(struct nvkm_object *);
-	int  (*fini)(struct nvkm_object *, bool suspend);
-	int  (*mthd)(struct nvkm_object *, u32, void *, u32);
-	int  (*ntfy)(struct nvkm_object *, u32, struct nvkm_event **);
-	int  (* map)(struct nvkm_object *, u64 *, u32 *);
-	u8   (*rd08)(struct nvkm_object *, u64 offset);
-	u16  (*rd16)(struct nvkm_object *, u64 offset);
-	u32  (*rd32)(struct nvkm_object *, u64 offset);
-	void (*wr08)(struct nvkm_object *, u64 offset, u8 data);
-	void (*wr16)(struct nvkm_object *, u64 offset, u16 data);
-	void (*wr32)(struct nvkm_object *, u64 offset, u32 data);
-};
-
-static inline struct nvkm_ofuncs *
-nv_ofuncs(void *obj)
-{
-	return nv_oclass(obj)->ofuncs;
-}
-
-int  nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-void nvkm_object_ref(struct nvkm_object *, struct nvkm_object **);
-int  nvkm_object_inc(struct nvkm_object *);
-int  nvkm_object_dec(struct nvkm_object *, bool suspend);
-void nvkm_object_debug(void);
-
-static inline int
-nv_exec(void *obj, u32 mthd, void *data, u32 size)
-{
-	struct nvkm_omthds *method = nv_oclass(obj)->omthds;
-
-	while (method && method->call) {
-		if (mthd >= method->start && mthd <= method->limit)
-			return method->call(obj, mthd, data, size);
-		method++;
-	}
-
-	return -EINVAL;
-}
-
-static inline int
-nv_call(void *obj, u32 mthd, u32 data)
-{
-	return nv_exec(obj, mthd, &data, sizeof(data));
-}
-
-static inline u8
-nv_ro08(void *obj, u64 addr)
-{
-	u8 data = nv_ofuncs(obj)->rd08(obj, addr);
-	nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
-	return data;
-}
-
-static inline u16
-nv_ro16(void *obj, u64 addr)
-{
-	u16 data = nv_ofuncs(obj)->rd16(obj, addr);
-	nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
-	return data;
-}
-
-static inline u32
-nv_ro32(void *obj, u64 addr)
-{
-	u32 data = nv_ofuncs(obj)->rd32(obj, addr);
-	nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
-	return data;
-}
-
-static inline void
-nv_wo08(void *obj, u64 addr, u8 data)
-{
-	nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
-	nv_ofuncs(obj)->wr08(obj, addr, data);
-}
-
-static inline void
-nv_wo16(void *obj, u64 addr, u16 data)
-{
-	nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
-	nv_ofuncs(obj)->wr16(obj, addr, data);
-}
-
-static inline void
-nv_wo32(void *obj, u64 addr, u32 data)
-{
-	nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
-	nv_ofuncs(obj)->wr32(obj, addr, data);
-}
-
-static inline u32
-nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
-{
-	u32 temp = nv_ro32(obj, addr);
-	nv_wo32(obj, addr, (temp & ~mask) | data);
-	return temp;
-}
-
-static inline int
-nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
-{
-	unsigned char c1, c2;
-
-	while (len--) {
-		c1 = nv_ro08(obj, addr++);
-		c2 = *(str++);
-		if (c1 != c2)
-			return c1 - c2;
-	}
-	return 0;
-}
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
new file mode 100644
index 0000000..bd52236
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -0,0 +1,22 @@
+#ifndef __NVKM_OPROXY_H__
+#define __NVKM_OPROXY_H__
+#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
+#include <core/object.h>
+
+struct nvkm_oproxy {
+	const struct nvkm_oproxy_func *func;
+	struct nvkm_object base;
+	struct nvkm_object *object;
+};
+
+struct nvkm_oproxy_func {
+	void (*dtor[2])(struct nvkm_oproxy *);
+	int  (*init[2])(struct nvkm_oproxy *);
+	int  (*fini[2])(struct nvkm_oproxy *, bool suspend);
+};
+
+void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
+		      const struct nvkm_oclass *, struct nvkm_oproxy *);
+int  nvkm_oproxy_new_(const struct nvkm_oproxy_func *,
+		      const struct nvkm_oclass *, struct nvkm_oproxy **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index 532bfa8..80fdc14 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -4,6 +4,7 @@
 
 const char *nvkm_stropt(const char *optstr, const char *opt, int *len);
 bool nvkm_boolopt(const char *optstr, const char *opt, bool value);
+long nvkm_longopt(const char *optstr, const char *opt, long value);
 int  nvkm_dbgopt(const char *optstr, const char *sub);
 
 /* compares unterminated string 'str' with zero-terminated string 'cmp' */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
deleted file mode 100644
index 837e4fe..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef __NVKM_PARENT_H__
-#define __NVKM_PARENT_H__
-#include <core/object.h>
-
-struct nvkm_sclass {
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-};
-
-struct nvkm_parent {
-	struct nvkm_object object;
-
-	struct nvkm_sclass *sclass;
-	u64 engine;
-
-	int  (*context_attach)(struct nvkm_object *, struct nvkm_object *);
-	int  (*context_detach)(struct nvkm_object *, bool suspend,
-			       struct nvkm_object *);
-
-	int  (*object_attach)(struct nvkm_object *parent,
-			      struct nvkm_object *object, u32 name);
-	void (*object_detach)(struct nvkm_object *parent, int cookie);
-};
-
-static inline struct nvkm_parent *
-nv_parent(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
-		nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_parent_create(p,e,c,v,s,m,d)                                   \
-	nvkm_parent_create_((p), (e), (c), (v), (s), (m),                   \
-			       sizeof(**d), (void **)d)
-#define nvkm_parent_init(p)                                                 \
-	nvkm_object_init(&(p)->object)
-#define nvkm_parent_fini(p,s)                                               \
-	nvkm_object_fini(&(p)->object, (s))
-
-int  nvkm_parent_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_oclass *, u64 engcls,
-			    int size, void **);
-void nvkm_parent_destroy(struct nvkm_parent *);
-
-void _nvkm_parent_dtor(struct nvkm_object *);
-#define _nvkm_parent_init nvkm_object_init
-#define _nvkm_parent_fini nvkm_object_fini
-
-int nvkm_parent_sclass(struct nvkm_object *, u16 handle,
-		       struct nvkm_object **pengine,
-		       struct nvkm_oclass **poclass);
-int nvkm_parent_lclass(struct nvkm_object *, u32 *, int);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
new file mode 100644
index 0000000..78d41be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_DEVICE_PCI_H__
+#define __NVKM_DEVICE_PCI_H__
+#include <core/device.h>
+
+struct nvkm_device_pci {
+	struct nvkm_device device;
+	struct pci_dev *pdev;
+	bool suspend;
+};
+
+int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
+			bool detect, bool mmio, u64 subdev_mask,
+			struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
deleted file mode 100644
index 8364817..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __NVKM_PRINTK_H__
-#define __NVKM_PRINTK_H__
-#include <core/os.h>
-#include <core/debug.h>
-struct nvkm_object;
-
-void __printf(3, 4)
-nv_printk_(struct nvkm_object *, int, const char *, ...);
-
-#define nv_printk(o,l,f,a...) do {                                             \
-	if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG)                                \
-		nv_printk_(nv_object(o), NV_DBG_##l, f, ##a);                  \
-} while(0)
-
-#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
-#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
-#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
-#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
-#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
-#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
-#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
-#define nv_ioctl(o,f,a...) nv_trace(nvkm_client(o), "ioctl: "f, ##a)
-
-#define nv_assert(f,a...) do {                                                 \
-	if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG)                              \
-		nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a);                   \
-	BUG_ON(1);                                                             \
-} while(0)
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index cc132ea..5ee6298 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -2,19 +2,27 @@
 #define __NVKM_RAMHT_H__
 #include <core/gpuobj.h>
 
-struct nvkm_ramht {
-	struct nvkm_gpuobj gpuobj;
-	int bits;
+struct nvkm_ramht_data {
+	struct nvkm_gpuobj *inst;
+	int chid;
+	u32 handle;
 };
 
-int  nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context);
-void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
-int  nvkm_ramht_new(struct nvkm_object *, struct nvkm_object *, u32 size,
-		    u32 align, struct nvkm_ramht **);
+struct nvkm_ramht {
+	struct nvkm_device *device;
+	struct nvkm_gpuobj *parent;
+	struct nvkm_gpuobj *gpuobj;
+	int size;
+	int bits;
+	struct nvkm_ramht_data data[];
+};
 
-static inline void
-nvkm_ramht_ref(struct nvkm_ramht *obj, struct nvkm_ramht **ref)
-{
-	nvkm_gpuobj_ref(&obj->gpuobj, (struct nvkm_gpuobj **)ref);
-}
+int  nvkm_ramht_new(struct nvkm_device *, u32 size, u32 align,
+		    struct nvkm_gpuobj *, struct nvkm_ramht **);
+void nvkm_ramht_del(struct nvkm_ramht **);
+int  nvkm_ramht_insert(struct nvkm_ramht *, struct nvkm_object *,
+		       int chid, int addr, u32 handle, u32 context);
+void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *, int chid, u32 handle);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 6fdc391..3b5dc9c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,119 +1,50 @@
 #ifndef __NVKM_SUBDEV_H__
 #define __NVKM_SUBDEV_H__
-#include <core/object.h>
-#include <core/devidx.h>
-
-#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
-#define NV_SUBDEV(name,var)  NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
+#include <core/device.h>
 
 struct nvkm_subdev {
-	struct nvkm_object object;
+	const struct nvkm_subdev_func *func;
+	struct nvkm_device *device;
+	enum nvkm_devidx index;
+	u32 pmc_enable;
 	struct mutex mutex;
-	const char *name;
-	void __iomem *mmio;
 	u32 debug;
-	u32 unit;
 
+	bool oneinit;
+};
+
+struct nvkm_subdev_func {
+	void *(*dtor)(struct nvkm_subdev *);
+	int (*preinit)(struct nvkm_subdev *);
+	int (*oneinit)(struct nvkm_subdev *);
+	int (*init)(struct nvkm_subdev *);
+	int (*fini)(struct nvkm_subdev *, bool suspend);
 	void (*intr)(struct nvkm_subdev *);
 };
 
-static inline struct nvkm_subdev *
-nv_subdev(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
-		nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-static inline int
-nv_subidx(struct nvkm_subdev *subdev)
-{
-	return nv_hclass(subdev) & 0xff;
-}
-
-struct nvkm_subdev *nvkm_subdev(void *obj, int idx);
-
-#define nvkm_subdev_create(p,e,o,v,s,f,d)                                   \
-	nvkm_subdev_create_((p), (e), (o), (v), (s), (f),                   \
-			       sizeof(**d),(void **)d)
-
-int  nvkm_subdev_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    const char *sname, const char *fname,
-			    int size, void **);
-void nvkm_subdev_destroy(struct nvkm_subdev *);
+extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
+		      int index, u32 pmc_enable, struct nvkm_subdev *);
+void nvkm_subdev_del(struct nvkm_subdev **);
+int  nvkm_subdev_preinit(struct nvkm_subdev *);
 int  nvkm_subdev_init(struct nvkm_subdev *);
 int  nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
-void nvkm_subdev_reset(struct nvkm_object *);
+void nvkm_subdev_intr(struct nvkm_subdev *);
 
-void _nvkm_subdev_dtor(struct nvkm_object *);
-int  _nvkm_subdev_init(struct nvkm_object *);
-int  _nvkm_subdev_fini(struct nvkm_object *, bool suspend);
-
-#define s_printk(s,l,f,a...) do {                                              \
-	if ((s)->debug >= OS_DBG_##l) {                                        \
-		nv_printk((s)->base.parent, (s)->name, l, f, ##a);             \
+/* subdev logging */
+#define nvkm_printk_(s,l,p,f,a...) do {                                        \
+	struct nvkm_subdev *_subdev = (s);                                     \
+	if (_subdev->debug >= (l)) {                                           \
+		dev_##p(_subdev->device->dev, "%s: "f,                         \
+			nvkm_subdev_name[_subdev->index], ##a);                \
 	}                                                                      \
 } while(0)
-
-static inline u8
-nv_rd08(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u8 data = ioread8(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
-	return data;
-}
-
-static inline u16
-nv_rd16(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u16 data = ioread16_native(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
-	return data;
-}
-
-static inline u32
-nv_rd32(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u32 data = ioread32_native(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
-	return data;
-}
-
-static inline void
-nv_wr08(void *obj, u32 addr, u8 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
-	iowrite8(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr16(void *obj, u32 addr, u16 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
-	iowrite16_native(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr32(void *obj, u32 addr, u32 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
-	iowrite32_native(data, subdev->mmio + addr);
-}
-
-static inline u32
-nv_mask(void *obj, u32 addr, u32 mask, u32 data)
-{
-	u32 temp = nv_rd32(obj, addr);
-	nv_wr32(obj, addr, (temp & ~mask) | data);
-	return temp;
-}
+#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
+#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL,   crit, f, ##a)
+#define nvkm_error(s,f,a...) nvkm_printk((s), ERROR,    err, f, ##a)
+#define nvkm_warn(s,f,a...)  nvkm_printk((s),  WARN, notice, f, ##a)
+#define nvkm_info(s,f,a...)  nvkm_printk((s),  INFO,   info, f, ##a)
+#define nvkm_debug(s,f,a...) nvkm_printk((s), DEBUG,   info, f, ##a)
+#define nvkm_trace(s,f,a...) nvkm_printk((s), TRACE,   info, f, ##a)
+#define nvkm_spam(s,f,a...)  nvkm_printk((s),  SPAM,    dbg, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
new file mode 100644
index 0000000..5aa2480
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -0,0 +1,35 @@
+#ifndef __NVKM_DEVICE_TEGRA_H__
+#define __NVKM_DEVICE_TEGRA_H__
+#include <core/device.h>
+#include <core/mm.h>
+
+struct nvkm_device_tegra {
+	struct nvkm_device device;
+	struct platform_device *pdev;
+	int irq;
+
+	struct reset_control *rst;
+	struct clk *clk;
+	struct clk *clk_pwr;
+
+	struct regulator *vdd;
+
+	struct {
+		/*
+		 * Protects accesses to mm from subsystems
+		 */
+		struct mutex mutex;
+
+		struct nvkm_mm mm;
+		struct iommu_domain *domain;
+		unsigned long pgshift;
+	} iommu;
+
+	int gpu_speedo;
+};
+
+int nvkm_device_tegra_new(struct platform_device *,
+			  const char *cfg, const char *dbg,
+			  bool detect, bool mmio, u64 subdev_mask,
+			  struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index e489bee..9048205 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_BSP_H__
 #define __NVKM_BSP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_bsp_oclass;
+#include <engine/xtensa.h>
+int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index e832f72..e2e22cd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,16 +1,9 @@
 #ifndef __NVKM_CE_H__
 #define __NVKM_CE_H__
-#include <core/engine.h>
+#include <engine/falcon.h>
 
-void gt215_ce_intr(struct nvkm_subdev *);
-
-extern struct nvkm_oclass gt215_ce_oclass;
-extern struct nvkm_oclass gf100_ce0_oclass;
-extern struct nvkm_oclass gf100_ce1_oclass;
-extern struct nvkm_oclass gk104_ce0_oclass;
-extern struct nvkm_oclass gk104_ce1_oclass;
-extern struct nvkm_oclass gk104_ce2_oclass;
-extern struct nvkm_oclass gm204_ce0_oclass;
-extern struct nvkm_oclass gm204_ce1_oclass;
-extern struct nvkm_oclass gm204_ce2_oclass;
+int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm204_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 57c29e9..03fa57a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_CIPHER_H__
 #define __NVKM_CIPHER_H__
 #include <core/engine.h>
-extern struct nvkm_oclass g84_cipher_oclass;
+int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
deleted file mode 100644
index 5d4805e..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
-#define __NOUVEAU_SUBDEV_DEVICE_H__
-
-#include <core/device.h>
-
-struct platform_device;
-
-enum nv_bus_type {
-	NOUVEAU_BUS_PCI,
-	NOUVEAU_BUS_PLATFORM,
-};
-
-#define nouveau_device_create(p,t,n,s,c,d,u)                                   \
-	nouveau_device_create_((void *)(p), (t), (n), (s), (c), (d),           \
-			       sizeof(**u), (void **)u)
-
-int  nouveau_device_create_(void *, enum nv_bus_type type, u64 name,
-			    const char *sname, const char *cfg, const char *dbg,
-			    int, void **);
-
-int nv04_identify(struct nouveau_device *);
-int nv10_identify(struct nouveau_device *);
-int nv20_identify(struct nouveau_device *);
-int nv30_identify(struct nouveau_device *);
-int nv40_identify(struct nouveau_device *);
-int nv50_identify(struct nouveau_device *);
-int nvc0_identify(struct nouveau_device *);
-int nve0_identify(struct nouveau_device *);
-int gm100_identify(struct nouveau_device *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index a5e1ed8..efc74d0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,32 +1,35 @@
 #ifndef __NVKM_DISP_H__
 #define __NVKM_DISP_H__
+#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
 #include <core/engine.h>
 #include <core/event.h>
 
 struct nvkm_disp {
-	struct nvkm_engine base;
+	const struct nvkm_disp_func *func;
+	struct nvkm_engine engine;
+
+	struct nvkm_oproxy *client;
 
 	struct list_head outp;
+	struct list_head conn;
 
 	struct nvkm_event hpd;
 	struct nvkm_event vblank;
+
+	struct {
+		int nr;
+	} head;
 };
 
-static inline struct nvkm_disp *
-nvkm_disp(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_DISP);
-}
-
-extern struct nvkm_oclass *nv04_disp_oclass;
-extern struct nvkm_oclass *nv50_disp_oclass;
-extern struct nvkm_oclass *g84_disp_oclass;
-extern struct nvkm_oclass *gt200_disp_oclass;
-extern struct nvkm_oclass *g94_disp_oclass;
-extern struct nvkm_oclass *gt215_disp_oclass;
-extern struct nvkm_oclass *gf110_disp_oclass;
-extern struct nvkm_oclass *gk104_disp_oclass;
-extern struct nvkm_oclass *gk110_disp_oclass;
-extern struct nvkm_oclass *gm107_disp_oclass;
-extern struct nvkm_oclass *gm204_disp_oclass;
+int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm204_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
new file mode 100644
index 0000000..114bfb7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_DMA_H__
+#define __NVKM_DMA_H__
+#include <core/engine.h>
+struct nvkm_client;
+
+struct nvkm_dmaobj {
+	const struct nvkm_dmaobj_func *func;
+	struct nvkm_dma *dma;
+
+	struct nvkm_object object;
+	u32 target;
+	u32 access;
+	u64 start;
+	u64 limit;
+
+	struct rb_node rb;
+	u64 handle; /*XXX HANDLE MERGE */
+};
+
+struct nvkm_dma {
+	const struct nvkm_dma_func *func;
+	struct nvkm_engine engine;
+};
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
+
+int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
deleted file mode 100644
index c4fce8a..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __NVKM_DMAOBJ_H__
-#define __NVKM_DMAOBJ_H__
-#include <core/engine.h>
-struct nvkm_gpuobj;
-
-struct nvkm_dmaobj {
-	struct nvkm_object base;
-	u32 target;
-	u32 access;
-	u64 start;
-	u64 limit;
-};
-
-struct nvkm_dmaeng {
-	struct nvkm_engine base;
-
-	/* creates a "physical" dma object from a struct nvkm_dmaobj */
-	int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		    struct nvkm_gpuobj **);
-};
-
-extern struct nvkm_oclass *nv04_dmaeng_oclass;
-extern struct nvkm_oclass *nv50_dmaeng_oclass;
-extern struct nvkm_oclass *gf100_dmaeng_oclass;
-extern struct nvkm_oclass *gf110_dmaeng_oclass;
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index bd38cf9..81c0bc6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,41 +1,18 @@
 #ifndef __NVKM_FALCON_H__
 #define __NVKM_FALCON_H__
-#include <core/engctx.h>
-
-struct nvkm_falcon_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_falcon_context_create(p,e,c,g,s,a,f,d)                         \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_falcon_context_destroy(d)                                      \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_falcon_context_init(d)                                         \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_falcon_context_fini(d,s)                                       \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_falcon_context_ctor _nvkm_engctx_ctor
-#define _nvkm_falcon_context_dtor _nvkm_engctx_dtor
-#define _nvkm_falcon_context_init _nvkm_engctx_init
-#define _nvkm_falcon_context_fini _nvkm_engctx_fini
-#define _nvkm_falcon_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_falcon_context_wr32 _nvkm_engctx_wr32
-
-struct nvkm_falcon_data {
-	bool external;
-};
-
+#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #include <core/engine.h>
+struct nvkm_fifo_chan;
 
 struct nvkm_falcon {
-	struct nvkm_engine base;
+	const struct nvkm_falcon_func *func;
+	struct nvkm_engine engine;
 
 	u32 addr;
 	u8  version;
 	u8  secret;
 
-	struct nvkm_gpuobj *core;
+	struct nvkm_memory *core;
 	bool external;
 
 	struct {
@@ -51,31 +28,21 @@
 	} data;
 };
 
-#define nv_falcon(priv) (&(priv)->base)
+int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		     int index, bool enable, u32 addr, struct nvkm_engine **);
 
-#define nvkm_falcon_create(p,e,c,b,d,i,f,r)                                 \
-	nvkm_falcon_create_((p), (e), (c), (b), (d), (i), (f),              \
-			       sizeof(**r),(void **)r)
-#define nvkm_falcon_destroy(p)                                              \
-	nvkm_engine_destroy(&(p)->base)
-#define nvkm_falcon_init(p) ({                                              \
-	struct nvkm_falcon *falcon = (p);                                   \
-	_nvkm_falcon_init(nv_object(falcon));                               \
-})
-#define nvkm_falcon_fini(p,s) ({                                            \
-	struct nvkm_falcon *falcon = (p);                                   \
-	_nvkm_falcon_fini(nv_object(falcon), (s));                          \
-})
-
-int nvkm_falcon_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, u32, bool, const char *,
-			   const char *, int, void **);
-
-void nvkm_falcon_intr(struct nvkm_subdev *subdev);
-
-#define _nvkm_falcon_dtor _nvkm_engine_dtor
-int  _nvkm_falcon_init(struct nvkm_object *);
-int  _nvkm_falcon_fini(struct nvkm_object *, bool);
-u32  _nvkm_falcon_rd32(struct nvkm_object *, u64);
-void _nvkm_falcon_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_falcon_func {
+	struct {
+		u32 *data;
+		u32  size;
+	} code;
+	struct {
+		u32 *data;
+		u32  size;
+	} data;
+	u32 pmc_enable;
+	void (*init)(struct nvkm_falcon *);
+	void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+	struct nvkm_sclass sclass[];
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 97cdeab..9e664495 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,127 +1,67 @@
 #ifndef __NVKM_FIFO_H__
 #define __NVKM_FIFO_H__
-#include <core/namedb.h>
-
-struct nvkm_fifo_chan {
-	struct nvkm_namedb namedb;
-	struct nvkm_dmaobj *pushdma;
-	struct nvkm_gpuobj *pushgpu;
-	void __iomem *user;
-	u64 addr;
-	u32 size;
-	u16 chid;
-	atomic_t refcnt; /* NV04_NVSW_SET_REF */
-};
-
-static inline struct nvkm_fifo_chan *
-nvkm_fifo_chan(void *obj)
-{
-	return (void *)nv_namedb(obj);
-}
-
-#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d)                         \
-	nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n),        \
-				     (m), sizeof(**d), (void **)d)
-#define nvkm_fifo_channel_init(p)                                           \
-	nvkm_namedb_init(&(p)->namedb)
-#define nvkm_fifo_channel_fini(p,s)                                         \
-	nvkm_namedb_fini(&(p)->namedb, (s))
-
-int  nvkm_fifo_channel_create_(struct nvkm_object *,
-				  struct nvkm_object *,
-				  struct nvkm_oclass *,
-				  int bar, u32 addr, u32 size, u32 push,
-				  u64 engmask, int len, void **);
-void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
-
-#define _nvkm_fifo_channel_init _nvkm_namedb_init
-#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
-
-void _nvkm_fifo_channel_dtor(struct nvkm_object *);
-int  _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
-u32  _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
-void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
-int  _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-
-#include <core/gpuobj.h>
-
-struct nvkm_fifo_base {
-	struct nvkm_gpuobj gpuobj;
-};
-
-#define nvkm_fifo_context_create(p,e,c,g,s,a,f,d)                           \
-	nvkm_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
-#define nvkm_fifo_context_destroy(p)                                        \
-	nvkm_gpuobj_destroy(&(p)->gpuobj)
-#define nvkm_fifo_context_init(p)                                           \
-	nvkm_gpuobj_init(&(p)->gpuobj)
-#define nvkm_fifo_context_fini(p,s)                                         \
-	nvkm_gpuobj_fini(&(p)->gpuobj, (s))
-
-#define _nvkm_fifo_context_dtor _nvkm_gpuobj_dtor
-#define _nvkm_fifo_context_init _nvkm_gpuobj_init
-#define _nvkm_fifo_context_fini _nvkm_gpuobj_fini
-#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
-
 #include <core/engine.h>
 #include <core/event.h>
 
-struct nvkm_fifo {
-	struct nvkm_engine base;
+#define NVKM_FIFO_CHID_NR 4096
 
-	struct nvkm_event cevent; /* channel creation event */
-	struct nvkm_event uevent; /* async user trigger */
-
-	struct nvkm_object **channel;
-	spinlock_t lock;
-	u16 min;
-	u16 max;
-
-	int  (*chid)(struct nvkm_fifo *, struct nvkm_object *);
-	void (*pause)(struct nvkm_fifo *, unsigned long *);
-	void (*start)(struct nvkm_fifo *, unsigned long *);
+struct nvkm_fifo_engn {
+	struct nvkm_object *object;
+	int refcount;
+	int usecount;
 };
 
-static inline struct nvkm_fifo *
-nvkm_fifo(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO);
-}
+struct nvkm_fifo_chan {
+	const struct nvkm_fifo_chan_func *func;
+	struct nvkm_fifo *fifo;
+	u64 engines;
+	struct nvkm_object object;
 
-#define nvkm_fifo_create(o,e,c,fc,lc,d)                                     \
-	nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
-#define nvkm_fifo_init(p)                                                   \
-	nvkm_engine_init(&(p)->base)
-#define nvkm_fifo_fini(p,s)                                                 \
-	nvkm_engine_fini(&(p)->base, (s))
+	struct list_head head;
+	u16 chid;
+	struct nvkm_gpuobj *inst;
+	struct nvkm_gpuobj *push;
+	struct nvkm_vm *vm;
+	void __iomem *user;
+	u64 addr;
+	u32 size;
 
-int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int min, int max,
-			 int size, void **);
-void nvkm_fifo_destroy(struct nvkm_fifo *);
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid);
+	struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
+};
 
-#define _nvkm_fifo_init _nvkm_engine_init
-#define _nvkm_fifo_fini _nvkm_engine_fini
+struct nvkm_fifo {
+	const struct nvkm_fifo_func *func;
+	struct nvkm_engine engine;
 
-extern struct nvkm_oclass *nv04_fifo_oclass;
-extern struct nvkm_oclass *nv10_fifo_oclass;
-extern struct nvkm_oclass *nv17_fifo_oclass;
-extern struct nvkm_oclass *nv40_fifo_oclass;
-extern struct nvkm_oclass *nv50_fifo_oclass;
-extern struct nvkm_oclass *g84_fifo_oclass;
-extern struct nvkm_oclass *gf100_fifo_oclass;
-extern struct nvkm_oclass *gk104_fifo_oclass;
-extern struct nvkm_oclass *gk20a_fifo_oclass;
-extern struct nvkm_oclass *gk208_fifo_oclass;
-extern struct nvkm_oclass *gm204_fifo_oclass;
+	DECLARE_BITMAP(mask, NVKM_FIFO_CHID_NR);
+	int nr;
+	struct list_head chan;
+	spinlock_t lock;
 
-int  nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
-			   struct nvkm_notify *);
-void nvkm_fifo_uevent(struct nvkm_fifo *);
+	struct nvkm_event uevent; /* async user trigger */
+	struct nvkm_event cevent; /* channel creation event */
+};
 
-void nv04_fifo_intr(struct nvkm_subdev *);
-int  nv04_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
+void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *);
+
+void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags,
+			struct nvkm_fifo_chan **);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
+
+int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm204_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 7cbe202..f126e54 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,88 +1,46 @@
 #ifndef __NVKM_GR_H__
 #define __NVKM_GR_H__
-#include <core/engctx.h>
-
-struct nvkm_gr_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_gr_context_create(p,e,c,g,s,a,f,d)                          \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_gr_context_destroy(d)                                       \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_gr_context_init(d)                                          \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_gr_context_fini(d,s)                                        \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
-#define _nvkm_gr_context_init _nvkm_engctx_init
-#define _nvkm_gr_context_fini _nvkm_engctx_fini
-#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
-
 #include <core/engine.h>
 
 struct nvkm_gr {
-	struct nvkm_engine base;
-
-	/* Returns chipset-specific counts of units packed into an u64.
-	 */
-	u64 (*units)(struct nvkm_gr *);
+	const struct nvkm_gr_func *func;
+	struct nvkm_engine engine;
 };
 
-static inline struct nvkm_gr *
-nvkm_gr(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR);
-}
+u64 nvkm_gr_units(struct nvkm_gr *);
+int nvkm_gr_tlb_flush(struct nvkm_gr *);
 
-#define nvkm_gr_create(p,e,c,y,d)                                        \
-	nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
-#define nvkm_gr_destroy(d)                                               \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_gr_init(d)                                                  \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_gr_fini(d,s)                                                \
-	nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_gr_dtor _nvkm_engine_dtor
-#define _nvkm_gr_init _nvkm_engine_init
-#define _nvkm_gr_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv04_gr_oclass;
-extern struct nvkm_oclass nv10_gr_oclass;
-extern struct nvkm_oclass nv20_gr_oclass;
-extern struct nvkm_oclass nv25_gr_oclass;
-extern struct nvkm_oclass nv2a_gr_oclass;
-extern struct nvkm_oclass nv30_gr_oclass;
-extern struct nvkm_oclass nv34_gr_oclass;
-extern struct nvkm_oclass nv35_gr_oclass;
-extern struct nvkm_oclass nv40_gr_oclass;
-extern struct nvkm_oclass nv50_gr_oclass;
-extern struct nvkm_oclass *gf100_gr_oclass;
-extern struct nvkm_oclass *gf108_gr_oclass;
-extern struct nvkm_oclass *gf104_gr_oclass;
-extern struct nvkm_oclass *gf110_gr_oclass;
-extern struct nvkm_oclass *gf117_gr_oclass;
-extern struct nvkm_oclass *gf119_gr_oclass;
-extern struct nvkm_oclass *gk104_gr_oclass;
-extern struct nvkm_oclass *gk20a_gr_oclass;
-extern struct nvkm_oclass *gk110_gr_oclass;
-extern struct nvkm_oclass *gk110b_gr_oclass;
-extern struct nvkm_oclass *gk208_gr_oclass;
-extern struct nvkm_oclass *gm107_gr_oclass;
-extern struct nvkm_oclass *gm204_gr_oclass;
-extern struct nvkm_oclass *gm206_gr_oclass;
-
-#include <core/enum.h>
-
-extern const struct nvkm_bitfield nv04_gr_nsource[];
-extern struct nvkm_ofuncs nv04_gr_ofuncs;
-bool nv04_gr_idle(void *obj);
-
-extern const struct nvkm_bitfield nv10_gr_intr_name[];
-extern const struct nvkm_bitfield nv10_gr_nstatus[];
-
-extern const struct nvkm_enum nv50_data_error_names[];
+int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm204_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm206_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 4e500b3..257738e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,62 +1,9 @@
 #ifndef __NVKM_MPEG_H__
 #define __NVKM_MPEG_H__
-#include <core/engctx.h>
-
-struct nvkm_mpeg_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_mpeg_context_create(p,e,c,g,s,a,f,d)                           \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_mpeg_context_destroy(d)                                        \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_mpeg_context_init(d)                                           \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_mpeg_context_fini(d,s)                                         \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_context_dtor _nvkm_engctx_dtor
-#define _nvkm_mpeg_context_init _nvkm_engctx_init
-#define _nvkm_mpeg_context_fini _nvkm_engctx_fini
-#define _nvkm_mpeg_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_mpeg_context_wr32 _nvkm_engctx_wr32
-
 #include <core/engine.h>
-
-struct nvkm_mpeg {
-	struct nvkm_engine base;
-};
-
-#define nvkm_mpeg_create(p,e,c,d)                                           \
-	nvkm_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
-#define nvkm_mpeg_destroy(d)                                                \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_mpeg_init(d)                                                   \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_mpeg_fini(d,s)                                                 \
-	nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_dtor _nvkm_engine_dtor
-#define _nvkm_mpeg_init _nvkm_engine_init
-#define _nvkm_mpeg_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv31_mpeg_oclass;
-extern struct nvkm_oclass nv40_mpeg_oclass;
-extern struct nvkm_oclass nv44_mpeg_oclass;
-extern struct nvkm_oclass nv50_mpeg_oclass;
-extern struct nvkm_oclass g84_mpeg_oclass;
-extern struct nvkm_ofuncs nv31_mpeg_ofuncs;
-extern struct nvkm_oclass nv31_mpeg_cclass;
-extern struct nvkm_oclass nv31_mpeg_sclass[];
-extern struct nvkm_oclass nv40_mpeg_sclass[];
-void nv31_mpeg_intr(struct nvkm_subdev *);
-void nv31_mpeg_tile_prog(struct nvkm_engine *, int);
-int  nv31_mpeg_init(struct nvkm_object *);
-
-extern struct nvkm_ofuncs nv50_mpeg_ofuncs;
-int  nv50_mpeg_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, void *, u32,
-			    struct nvkm_object **);
-void nv50_mpeg_intr(struct nvkm_subdev *);
-int  nv50_mpeg_init(struct nvkm_object *);
+int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 54b7672e..08516ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,7 +1,8 @@
 #ifndef __NVKM_MSPDEC_H__
 #define __NVKM_MSPDEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_mspdec_oclass;
-extern struct nvkm_oclass gf100_mspdec_oclass;
-extern struct nvkm_oclass gk104_mspdec_oclass;
+#include <engine/falcon.h>
+int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index c6c69d0..85fd306 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,6 +1,7 @@
 #ifndef __NVKM_MSPPP_H__
 #define __NVKM_MSPPP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msppp_oclass;
-extern struct nvkm_oclass gf100_msppp_oclass;
+#include <engine/falcon.h>
+int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 1f193b7..99757ed 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,7 +1,9 @@
 #ifndef __NVKM_MSVLD_H__
 #define __NVKM_MSVLD_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msvld_oclass;
-extern struct nvkm_oclass gf100_msvld_oclass;
-extern struct nvkm_oclass gk104_msvld_oclass;
+#include <engine/falcon.h>
+int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 93181bb..240855a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -2,33 +2,24 @@
 #define __NVKM_PM_H__
 #include <core/engine.h>
 
-struct nvkm_perfdom;
-struct nvkm_perfctr;
 struct nvkm_pm {
-	struct nvkm_engine base;
+	const struct nvkm_pm_func *func;
+	struct nvkm_engine engine;
 
-	struct nvkm_perfctx *context;
-	void *profile_data;
+	struct nvkm_object *perfmon;
 
 	struct list_head domains;
+	struct list_head sources;
 	u32 sequence;
-
-	/*XXX: temp for daemon backend */
-	u32 pwr[8];
-	u32 last;
 };
 
-static inline struct nvkm_pm *
-nvkm_pm(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_PM);
-}
-
-extern struct nvkm_oclass *nv40_pm_oclass;
-extern struct nvkm_oclass *nv50_pm_oclass;
-extern struct nvkm_oclass *g84_pm_oclass;
-extern struct nvkm_oclass *gt215_pm_oclass;
-extern struct nvkm_oclass gf100_pm_oclass;
-extern struct nvkm_oclass gk104_pm_oclass;
-extern struct nvkm_oclass gk110_pm_oclass;
+int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index 44590a2..7317ef4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_SEC_H__
 #define __NVKM_SEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_sec_oclass;
+#include <engine/falcon.h>
+int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index a529013..096e7db 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,50 +1,18 @@
 #ifndef __NVKM_SW_H__
 #define __NVKM_SW_H__
-#include <core/engctx.h>
-
-struct nvkm_sw_chan {
-	struct nvkm_engctx base;
-
-	int (*flip)(void *);
-	void *flip_data;
-};
-
-#define nvkm_sw_context_create(p,e,c,d)                               \
-	nvkm_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
-#define nvkm_sw_context_destroy(d)                                    \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_sw_context_init(d)                                       \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_sw_context_fini(d,s)                                     \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_sw_context_dtor _nvkm_engctx_dtor
-#define _nvkm_sw_context_init _nvkm_engctx_init
-#define _nvkm_sw_context_fini _nvkm_engctx_fini
-
 #include <core/engine.h>
 
 struct nvkm_sw {
-	struct nvkm_engine base;
+	const struct nvkm_sw_func *func;
+	struct nvkm_engine engine;
+
+	struct list_head chan;
 };
 
-#define nvkm_sw_create(p,e,c,d)                                       \
-	nvkm_engine_create((p), (e), (c), true, "SW", "software", (d))
-#define nvkm_sw_destroy(d)                                            \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_sw_init(d)                                               \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_sw_fini(d,s)                                             \
-	nvkm_engine_fini(&(d)->base, (s))
+bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
 
-#define _nvkm_sw_dtor _nvkm_engine_dtor
-#define _nvkm_sw_init _nvkm_engine_init
-#define _nvkm_sw_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass *nv04_sw_oclass;
-extern struct nvkm_oclass *nv10_sw_oclass;
-extern struct nvkm_oclass *nv50_sw_oclass;
-extern struct nvkm_oclass *gf100_sw_oclass;
-
-void nv04_sw_intr(struct nvkm_subdev *);
+int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 7851f18..616ea91 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_VP_H__
 #define __NVKM_VP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_vp_oclass;
+#include <engine/xtensa.h>
+int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 7a216cc..3128d21 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,35 +1,23 @@
 #ifndef __NVKM_XTENSA_H__
 #define __NVKM_XTENSA_H__
+#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
 #include <core/engine.h>
-struct nvkm_gpuobj;
 
 struct nvkm_xtensa {
-	struct nvkm_engine base;
-
+	const struct nvkm_xtensa_func *func;
 	u32 addr;
-	struct nvkm_gpuobj *gpu_fw;
-	u32 fifo_val;
-	u32 unkd28;
+	struct nvkm_engine engine;
+
+	struct nvkm_memory *gpu_fw;
 };
 
-#define nvkm_xtensa_create(p,e,c,b,d,i,f,r)				\
-	nvkm_xtensa_create_((p), (e), (c), (b), (d), (i), (f),	\
-			       sizeof(**r),(void **)r)
+int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
+		     int index, bool enable, u32 addr, struct nvkm_engine **);
 
-int _nvkm_xtensa_engctx_ctor(struct nvkm_object *,
-				struct nvkm_object *,
-				struct nvkm_oclass *, void *, u32,
-				struct nvkm_object **);
-
-void _nvkm_xtensa_intr(struct nvkm_subdev *);
-int nvkm_xtensa_create_(struct nvkm_object *,
-			   struct nvkm_object *,
-			   struct nvkm_oclass *, u32, bool,
-			   const char *, const char *,
-			   int, void **);
-#define _nvkm_xtensa_dtor _nvkm_engine_dtor
-int _nvkm_xtensa_init(struct nvkm_object *);
-int _nvkm_xtensa_fini(struct nvkm_object *, bool);
-u32  _nvkm_xtensa_rd32(struct nvkm_object *, u64);
-void _nvkm_xtensa_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_xtensa_func {
+	u32 pmc_enable;
+	u32 fifo_val;
+	u32 unkd28;
+	struct nvkm_sclass sclass[];
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index c7a007b..d3071b5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,33 +1,24 @@
 #ifndef __NVKM_BAR_H__
 #define __NVKM_BAR_H__
 #include <core/subdev.h>
-struct nvkm_mem;
 struct nvkm_vma;
 
 struct nvkm_bar {
-	struct nvkm_subdev base;
+	const struct nvkm_bar_func *func;
+	struct nvkm_subdev subdev;
 
-	int  (*alloc)(struct nvkm_bar *, struct nvkm_object *,
-		      struct nvkm_mem *, struct nvkm_object **);
-
-	int  (*kmap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
-		     struct nvkm_vma *);
-	int  (*umap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
-		     struct nvkm_vma *);
-	void (*unmap)(struct nvkm_bar *, struct nvkm_vma *);
-	void (*flush)(struct nvkm_bar *);
+	spinlock_t lock;
 
 	/* whether the BAR supports to be ioremapped WC or should be uncached */
 	bool iomap_uncached;
 };
 
-static inline struct nvkm_bar *
-nvkm_bar(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BAR);
-}
+void nvkm_bar_flush(struct nvkm_bar *);
+struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
+int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
 
-extern struct nvkm_oclass nv50_bar_oclass;
-extern struct nvkm_oclass gf100_bar_oclass;
-extern struct nvkm_oclass gk20a_bar_oclass;
+int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index cef287e..e39a1fea 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -3,7 +3,7 @@
 #include <core/subdev.h>
 
 struct nvkm_bios {
-	struct nvkm_subdev base;
+	struct nvkm_subdev subdev;
 	u32 size;
 	u8 *data;
 
@@ -19,14 +19,13 @@
 	} version;
 };
 
-static inline struct nvkm_bios *
-nvkm_bios(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VBIOS);
-}
-
 u8  nvbios_checksum(const u8 *data, int size);
 u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
+int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
 
-extern struct nvkm_oclass nvkm_bios_oclass;
+#define nvbios_rd08(b,o) (b)->data[(o)]
+#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
+#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
+
+int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 4107aa5..3f0c7c4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -4,8 +4,8 @@
 bmp_version(struct nvkm_bios *bios)
 {
 	if (bios->bmp_offset) {
-		return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
-		       nv_ro08(bios, bios->bmp_offset + 6);
+		return nvbios_rd08(bios, bios->bmp_offset + 5) << 8 |
+		       nvbios_rd08(bios, bios->bmp_offset + 6);
 	}
 
 	return 0x0000;
@@ -15,7 +15,7 @@
 bmp_mem_init_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 24);
+		return nvbios_rd16(bios, bios->bmp_offset + 24);
 	return 0x0000;
 }
 
@@ -23,7 +23,7 @@
 bmp_sdr_seq_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 26);
+		return nvbios_rd16(bios, bios->bmp_offset + 26);
 	return 0x0000;
 }
 
@@ -31,7 +31,7 @@
 bmp_ddr_seq_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 28);
+		return nvbios_rd16(bios, bios->bmp_offset + 28);
 	return 0x0000;
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 578a667..4dc1c8a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,5 +1,6 @@
 #ifndef __NVBIOS_INIT_H__
 #define __NVBIOS_INIT_H__
+
 struct nvbios_init {
 	struct nvkm_subdev *subdev;
 	struct nvkm_bios *bios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 4204267..3a9abd3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -7,6 +7,11 @@
 	unsigned rammap_max;
 	union {
 		struct {
+			unsigned rammap_00_16_20:1;
+			unsigned rammap_00_16_40:1;
+			unsigned rammap_00_17_02:1;
+		};
+		struct {
 			unsigned rammap_10_04_02:1;
 			unsigned rammap_10_04_08:1;
 		};
@@ -32,15 +37,32 @@
 	unsigned ramcfg_ver;
 	unsigned ramcfg_hdr;
 	unsigned ramcfg_timing;
+	unsigned ramcfg_DLLoff;
+	unsigned ramcfg_RON;
 	union {
 		struct {
+			unsigned ramcfg_00_03_01:1;
+			unsigned ramcfg_00_03_02:1;
+			unsigned ramcfg_00_03_08:1;
+			unsigned ramcfg_00_03_10:1;
+			unsigned ramcfg_00_04_02:1;
+			unsigned ramcfg_00_04_04:1;
+			unsigned ramcfg_00_04_20:1;
+			unsigned ramcfg_00_05:8;
+			unsigned ramcfg_00_06:8;
+			unsigned ramcfg_00_07:8;
+			unsigned ramcfg_00_08:8;
+			unsigned ramcfg_00_09:8;
+			unsigned ramcfg_00_0a_0f:4;
+			unsigned ramcfg_00_0a_f0:4;
+		};
+		struct {
 			unsigned ramcfg_10_02_01:1;
 			unsigned ramcfg_10_02_02:1;
 			unsigned ramcfg_10_02_04:1;
 			unsigned ramcfg_10_02_08:1;
 			unsigned ramcfg_10_02_10:1;
 			unsigned ramcfg_10_02_20:1;
-			unsigned ramcfg_10_DLLoff:1;
 			unsigned ramcfg_10_03_0f:4;
 			unsigned ramcfg_10_04_01:1;
 			unsigned ramcfg_10_05:8;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 609a905..8d8ee13 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -7,6 +7,8 @@
 
 u32 nvbios_rammapEe(struct nvkm_bios *, int idx,
 		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+		    struct nvbios_ramcfg *p);
 u32 nvbios_rammapEp(struct nvkm_bios *, int idx,
 		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
 u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
@@ -15,6 +17,8 @@
 u32 nvbios_rammapSe(struct nvkm_bios *, u32 data,
 		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		    u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+		    struct nvbios_ramcfg *p);
 u32 nvbios_rammapSp(struct nvkm_bios *, u32 data,
 		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		    u8 *ver, u8 *hdr, struct nvbios_ramcfg *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index fba83c0..6a04d9c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -2,49 +2,23 @@
 #define __NVKM_BUS_H__
 #include <core/subdev.h>
 
-struct nvkm_bus_intr {
-	u32 stat;
-	u32 unit;
-};
-
 struct nvkm_bus {
-	struct nvkm_subdev base;
-	int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
-	u32 hwsq_size;
+	const struct nvkm_bus_func *func;
+	struct nvkm_subdev subdev;
 };
 
-static inline struct nvkm_bus *
-nvkm_bus(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BUS);
-}
-
-#define nvkm_bus_create(p, e, o, d)                                         \
-	nvkm_subdev_create_((p), (e), (o), 0, "PBUS", "master",             \
-			       sizeof(**d), (void **)d)
-#define nvkm_bus_destroy(p)                                                 \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_bus_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_bus_fini(p, s)                                                 \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_bus_dtor _nvkm_subdev_dtor
-#define _nvkm_bus_init _nvkm_subdev_init
-#define _nvkm_bus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass *nv04_bus_oclass;
-extern struct nvkm_oclass *nv31_bus_oclass;
-extern struct nvkm_oclass *nv50_bus_oclass;
-extern struct nvkm_oclass *g94_bus_oclass;
-extern struct nvkm_oclass *gf100_bus_oclass;
-
 /* interface to sequencer */
 struct nvkm_hwsq;
-int  nvkm_hwsq_init(struct nvkm_bus *, struct nvkm_hwsq **);
+int  nvkm_hwsq_init(struct nvkm_subdev *, struct nvkm_hwsq **);
 int  nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
 void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
 void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
 void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
 void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
+
+int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index f5d30385..8708f0a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -71,9 +71,10 @@
 };
 
 struct nvkm_clk {
-	struct nvkm_subdev base;
+	const struct nvkm_clk_func *func;
+	struct nvkm_subdev subdev;
 
-	struct nvkm_domain *domains;
+	const struct nvkm_domain *domains;
 	struct nvkm_pstate bstate;
 
 	struct list_head states;
@@ -94,68 +95,27 @@
 
 	bool allow_reclock;
 
-	int  (*read)(struct nvkm_clk *, enum nv_clk_src);
-	int  (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
-	int  (*prog)(struct nvkm_clk *);
-	void (*tidy)(struct nvkm_clk *);
-
 	/*XXX: die, these are here *only* to support the completely
-	 *     bat-shit insane what-was-nvkm_hw.c code
+	 *     bat-shit insane what-was-nouveau_hw.c code
 	 */
 	int (*pll_calc)(struct nvkm_clk *, struct nvbios_pll *, int clk,
 			struct nvkm_pll_vals *pv);
 	int (*pll_prog)(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *pv);
 };
 
-static inline struct nvkm_clk *
-nvkm_clk(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_CLK);
-}
-
-#define nvkm_clk_create(p,e,o,i,r,s,n,d)                                  \
-	nvkm_clk_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d),  \
-			      (void **)d)
-#define nvkm_clk_destroy(p) ({                                            \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_dtor(nv_object(clk));                                   \
-})
-#define nvkm_clk_init(p) ({                                               \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_init(nv_object(clk));                                   \
-})
-#define nvkm_clk_fini(p,s) ({                                             \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_fini(nv_object(clk), (s));                              \
-})
-
-int  nvkm_clk_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *,
-			   struct nvkm_domain *, struct nvkm_pstate *,
-			   int, bool, int, void **);
-void _nvkm_clk_dtor(struct nvkm_object *);
-int  _nvkm_clk_init(struct nvkm_object *);
-int  _nvkm_clk_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nv04_clk_oclass;
-extern struct nvkm_oclass nv40_clk_oclass;
-extern struct nvkm_oclass *nv50_clk_oclass;
-extern struct nvkm_oclass *g84_clk_oclass;
-extern struct nvkm_oclass *mcp77_clk_oclass;
-extern struct nvkm_oclass gt215_clk_oclass;
-extern struct nvkm_oclass gf100_clk_oclass;
-extern struct nvkm_oclass gk104_clk_oclass;
-extern struct nvkm_oclass gk20a_clk_oclass;
-
-int nv04_clk_pll_set(struct nvkm_clk *, u32 type, u32 freq);
-int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
-		      struct nvkm_pll_vals *);
-int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
-int gt215_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *,
-		       int clk, struct nvkm_pll_vals *);
-
+int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
 int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
 int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
 int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
 int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+
+int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index d1bbe0d..6c1407f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,32 +1,31 @@
 #ifndef __NVKM_DEVINIT_H__
 #define __NVKM_DEVINIT_H__
 #include <core/subdev.h>
+struct nvkm_devinit;
 
 struct nvkm_devinit {
-	struct nvkm_subdev base;
+	const struct nvkm_devinit_func *func;
+	struct nvkm_subdev subdev;
 	bool post;
-	void (*meminit)(struct nvkm_devinit *);
-	int  (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
-	u32  (*mmio)(struct nvkm_devinit *, u32 addr);
 };
 
-static inline struct nvkm_devinit *
-nvkm_devinit(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_DEVINIT);
-}
+u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
+int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
+void nvkm_devinit_meminit(struct nvkm_devinit *);
+u64 nvkm_devinit_disable(struct nvkm_devinit *);
+int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
 
-extern struct nvkm_oclass *nv04_devinit_oclass;
-extern struct nvkm_oclass *nv05_devinit_oclass;
-extern struct nvkm_oclass *nv10_devinit_oclass;
-extern struct nvkm_oclass *nv1a_devinit_oclass;
-extern struct nvkm_oclass *nv20_devinit_oclass;
-extern struct nvkm_oclass *nv50_devinit_oclass;
-extern struct nvkm_oclass *g84_devinit_oclass;
-extern struct nvkm_oclass *g98_devinit_oclass;
-extern struct nvkm_oclass *gt215_devinit_oclass;
-extern struct nvkm_oclass *mcp89_devinit_oclass;
-extern struct nvkm_oclass *gf100_devinit_oclass;
-extern struct nvkm_oclass *gm107_devinit_oclass;
-extern struct nvkm_oclass *gm204_devinit_oclass;
+int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm204_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 16da56c..85ab72c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -18,7 +18,7 @@
 #define NV_MEM_TARGET_VM          3
 #define NV_MEM_TARGET_GART        4
 
-#define NV_MEM_TYPE_VM 0x7f
+#define NVKM_RAM_TYPE_VM 0x7f
 #define NV_MEM_COMP_VM 0x03
 
 struct nvkm_mem {
@@ -46,62 +46,47 @@
 };
 
 struct nvkm_fb {
-	struct nvkm_subdev base;
-
-	bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+	const struct nvkm_fb_func *func;
+	struct nvkm_subdev subdev;
 
 	struct nvkm_ram *ram;
 
-	struct nvkm_mm vram;
-	struct nvkm_mm tags;
-
 	struct {
 		struct nvkm_fb_tile region[16];
 		int regions;
-		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
-			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
-		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
-			     struct nvkm_fb_tile *);
-		void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-		void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
 	} tile;
 };
 
-static inline struct nvkm_fb *
-nvkm_fb(void *obj)
-{
-	/* fbram uses this before device subdev pointer is valid */
-	if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
-	    nv_subidx(obj) == NVDEV_SUBDEV_FB)
-		return obj;
+bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
+void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
 
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FB);
-}
-
-extern struct nvkm_oclass *nv04_fb_oclass;
-extern struct nvkm_oclass *nv10_fb_oclass;
-extern struct nvkm_oclass *nv1a_fb_oclass;
-extern struct nvkm_oclass *nv20_fb_oclass;
-extern struct nvkm_oclass *nv25_fb_oclass;
-extern struct nvkm_oclass *nv30_fb_oclass;
-extern struct nvkm_oclass *nv35_fb_oclass;
-extern struct nvkm_oclass *nv36_fb_oclass;
-extern struct nvkm_oclass *nv40_fb_oclass;
-extern struct nvkm_oclass *nv41_fb_oclass;
-extern struct nvkm_oclass *nv44_fb_oclass;
-extern struct nvkm_oclass *nv46_fb_oclass;
-extern struct nvkm_oclass *nv47_fb_oclass;
-extern struct nvkm_oclass *nv49_fb_oclass;
-extern struct nvkm_oclass *nv4e_fb_oclass;
-extern struct nvkm_oclass *nv50_fb_oclass;
-extern struct nvkm_oclass *g84_fb_oclass;
-extern struct nvkm_oclass *gt215_fb_oclass;
-extern struct nvkm_oclass *mcp77_fb_oclass;
-extern struct nvkm_oclass *mcp89_fb_oclass;
-extern struct nvkm_oclass *gf100_fb_oclass;
-extern struct nvkm_oclass *gk104_fb_oclass;
-extern struct nvkm_oclass *gk20a_fb_oclass;
-extern struct nvkm_oclass *gm107_fb_oclass;
+int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
@@ -112,36 +97,35 @@
 	u32 freq;
 };
 
+enum nvkm_ram_type {
+	NVKM_RAM_TYPE_UNKNOWN = 0,
+	NVKM_RAM_TYPE_STOLEN,
+	NVKM_RAM_TYPE_SGRAM,
+	NVKM_RAM_TYPE_SDRAM,
+	NVKM_RAM_TYPE_DDR1,
+	NVKM_RAM_TYPE_DDR2,
+	NVKM_RAM_TYPE_DDR3,
+	NVKM_RAM_TYPE_GDDR2,
+	NVKM_RAM_TYPE_GDDR3,
+	NVKM_RAM_TYPE_GDDR4,
+	NVKM_RAM_TYPE_GDDR5
+};
+
 struct nvkm_ram {
-	struct nvkm_object base;
-	enum {
-		NV_MEM_TYPE_UNKNOWN = 0,
-		NV_MEM_TYPE_STOLEN,
-		NV_MEM_TYPE_SGRAM,
-		NV_MEM_TYPE_SDRAM,
-		NV_MEM_TYPE_DDR1,
-		NV_MEM_TYPE_DDR2,
-		NV_MEM_TYPE_DDR3,
-		NV_MEM_TYPE_GDDR2,
-		NV_MEM_TYPE_GDDR3,
-		NV_MEM_TYPE_GDDR4,
-		NV_MEM_TYPE_GDDR5
-	} type;
-	u64 stolen;
+	const struct nvkm_ram_func *func;
+	struct nvkm_fb *fb;
+	enum nvkm_ram_type type;
 	u64 size;
-	u32 tags;
+
+#define NVKM_RAM_MM_SHIFT 12
+	struct nvkm_mm vram;
+	struct nvkm_mm tags;
+	u64 stolen;
 
 	int ranks;
 	int parts;
 	int part_mask;
 
-	int  (*get)(struct nvkm_fb *, u64 size, u32 align, u32 size_nc,
-		    u32 type, struct nvkm_mem **);
-	void (*put)(struct nvkm_fb *, struct nvkm_mem **);
-
-	int  (*calc)(struct nvkm_fb *, u32 freq);
-	int  (*prog)(struct nvkm_fb *);
-	void (*tidy)(struct nvkm_fb *);
 	u32 freq;
 	u32 mr[16];
 	u32 mr1_nuts;
@@ -151,4 +135,17 @@
 	struct nvkm_ram_data xition;
 	struct nvkm_ram_data target;
 };
+
+struct nvkm_ram_func {
+	void *(*dtor)(struct nvkm_ram *);
+	int (*init)(struct nvkm_ram *);
+
+	int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
+		   u32 type, struct nvkm_mem **);
+	void (*put)(struct nvkm_ram *, struct nvkm_mem **);
+
+	int (*calc)(struct nvkm_ram *, u32 freq);
+	int (*prog)(struct nvkm_ram *);
+	void (*tidy)(struct nvkm_ram *);
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index a138478..ae201e3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,28 +1,16 @@
 #ifndef __NVKM_FUSE_H__
 #define __NVKM_FUSE_H__
 #include <core/subdev.h>
-#include <core/device.h>
 
 struct nvkm_fuse {
-	struct nvkm_subdev base;
+	const struct nvkm_fuse_func *func;
+	struct nvkm_subdev subdev;
+	spinlock_t lock;
 };
 
-static inline struct nvkm_fuse *
-nvkm_fuse(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FUSE);
-}
+u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
 
-#define nvkm_fuse_create(p, e, o, d)                                        \
-	nvkm_fuse_create_((p), (e), (o), sizeof(**d), (void **)d)
-
-int  nvkm_fuse_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-void _nvkm_fuse_dtor(struct nvkm_object *);
-int  _nvkm_fuse_init(struct nvkm_object *);
-#define _nvkm_fuse_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_fuse_oclass;
-extern struct nvkm_oclass gf100_fuse_oclass;
-extern struct nvkm_oclass gm107_fuse_oclass;
+int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index ca5099a..9b9c6d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -19,26 +19,21 @@
 };
 
 struct nvkm_gpio {
-	struct nvkm_subdev base;
+	const struct nvkm_gpio_func *func;
+	struct nvkm_subdev subdev;
 
 	struct nvkm_event event;
-
-	void (*reset)(struct nvkm_gpio *, u8 func);
-	int  (*find)(struct nvkm_gpio *, int idx, u8 tag, u8 line,
-		     struct dcb_gpio_func *);
-	int  (*set)(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
-	int  (*get)(struct nvkm_gpio *, int idx, u8 tag, u8 line);
 };
 
-static inline struct nvkm_gpio *
-nvkm_gpio(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_GPIO);
-}
+void nvkm_gpio_reset(struct nvkm_gpio *, u8 func);
+int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
+		   struct dcb_gpio_func *);
+int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
+int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
 
-extern struct nvkm_oclass *nv10_gpio_oclass;
-extern struct nvkm_oclass *nv50_gpio_oclass;
-extern struct nvkm_oclass *g94_gpio_oclass;
-extern struct nvkm_oclass *gf110_gpio_oclass;
-extern struct nvkm_oclass *gk104_gpio_oclass;
+int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a2e3373..6b6224d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -6,15 +6,6 @@
 #include <subdev/bios.h>
 #include <subdev/bios/i2c.h>
 
-#define NV_I2C_PORT(n)    (0x00 + (n))
-#define NV_I2C_AUX(n)     (0x10 + (n))
-#define NV_I2C_EXT(n)     (0x20 + (n))
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
-
-#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
-#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
-#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
-
 struct nvkm_i2c_ntfy_req {
 #define NVKM_I2C_PLUG                                                      0x01
 #define NVKM_I2C_UNPLUG                                                    0x02
@@ -29,72 +20,79 @@
 	u8 mask;
 };
 
-struct nvkm_i2c_port {
-	struct nvkm_object base;
-	struct i2c_adapter adapter;
-	struct mutex mutex;
-
-	struct list_head head;
-	u8  index;
-	int aux;
-
-	const struct nvkm_i2c_func *func;
-};
-
-struct nvkm_i2c_func {
-	void (*drive_scl)(struct nvkm_i2c_port *, int);
-	void (*drive_sda)(struct nvkm_i2c_port *, int);
-	int  (*sense_scl)(struct nvkm_i2c_port *);
-	int  (*sense_sda)(struct nvkm_i2c_port *);
-
-	int  (*aux)(struct nvkm_i2c_port *, bool, u8, u32, u8 *, u8);
-	int  (*pattern)(struct nvkm_i2c_port *, int pattern);
-	int  (*lnk_ctl)(struct nvkm_i2c_port *, int nr, int bw, bool enh);
-	int  (*drv_ctl)(struct nvkm_i2c_port *, int lane, int sw, int pe);
-};
-
-struct nvkm_i2c_board_info {
+struct nvkm_i2c_bus_probe {
 	struct i2c_board_info dev;
 	u8 udelay; /* set to 0 to use the standard delay */
 };
 
-struct nvkm_i2c {
-	struct nvkm_subdev base;
-	struct nvkm_event event;
+struct nvkm_i2c_bus {
+	const struct nvkm_i2c_bus_func *func;
+	struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */                           (n)
+#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+#define NVKM_I2C_BUS_PRI /* ccb primary comm. port */                        -1
+#define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */                      -2
+	int id;
 
-	struct nvkm_i2c_port *(*find)(struct nvkm_i2c *, u8 index);
-	struct nvkm_i2c_port *(*find_type)(struct nvkm_i2c *, u16 type);
-	int  (*acquire_pad)(struct nvkm_i2c_port *, unsigned long timeout);
-	void (*release_pad)(struct nvkm_i2c_port *);
-	int  (*acquire)(struct nvkm_i2c_port *, unsigned long timeout);
-	void (*release)(struct nvkm_i2c_port *);
-	int  (*identify)(struct nvkm_i2c *, int index,
-			 const char *what, struct nvkm_i2c_board_info *,
-			 bool (*match)(struct nvkm_i2c_port *,
-				       struct i2c_board_info *, void *),
-			 void *);
-
-	wait_queue_head_t wait;
-	struct list_head ports;
+	struct mutex mutex;
+	struct list_head head;
+	struct i2c_adapter i2c;
 };
 
-static inline struct nvkm_i2c *
-nvkm_i2c(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_I2C);
-}
+int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_release(struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_probe(struct nvkm_i2c_bus *, const char *,
+		       struct nvkm_i2c_bus_probe *,
+		       bool (*)(struct nvkm_i2c_bus *,
+			        struct i2c_board_info *, void *), void *);
 
-extern struct nvkm_oclass *nv04_i2c_oclass;
-extern struct nvkm_oclass *nv4e_i2c_oclass;
-extern struct nvkm_oclass *nv50_i2c_oclass;
-extern struct nvkm_oclass *g94_i2c_oclass;
-extern struct nvkm_oclass *gf110_i2c_oclass;
-extern struct nvkm_oclass *gf117_i2c_oclass;
-extern struct nvkm_oclass *gk104_i2c_oclass;
-extern struct nvkm_oclass *gm204_i2c_oclass;
+struct nvkm_i2c_aux {
+	const struct nvkm_i2c_aux_func *func;
+	struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */                           (n)
+#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+	int id;
+
+	struct mutex mutex;
+	struct list_head head;
+	struct i2c_adapter i2c;
+
+	u32 intr;
+};
+
+void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
+int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+		      u32 addr, u8 *data, u8 size);
+int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+			 bool enhanced_framing);
+
+struct nvkm_i2c {
+	const struct nvkm_i2c_func *func;
+	struct nvkm_subdev subdev;
+
+	struct list_head pad;
+	struct list_head bus;
+	struct list_head aux;
+
+	struct nvkm_event event;
+};
+
+struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
+struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
+
+int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gm204_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 
 static inline int
-nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
+nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
 {
 	u8 val;
 	struct i2c_msg msgs[] = {
@@ -102,7 +100,7 @@
 		{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
 	};
 
-	int ret = i2c_transfer(&port->adapter, msgs, 2);
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
 	if (ret != 2)
 		return -EIO;
 
@@ -110,14 +108,14 @@
 }
 
 static inline int
-nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
+nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
 {
 	u8 buf[2] = { reg, val };
 	struct i2c_msg msgs[] = {
 		{ .addr = addr, .flags = 0, .len = 2, .buf = buf },
 	};
 
-	int ret = i2c_transfer(&port->adapter, msgs, 1);
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
 	if (ret != 1)
 		return -EIO;
 
@@ -125,11 +123,30 @@
 }
 
 static inline bool
-nv_probe_i2c(struct nvkm_i2c_port *port, u8 addr)
+nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
 {
-	return nv_rdi2cr(port, addr, 0) >= 0;
+	return nvkm_rdi2cr(adap, addr, 0) >= 0;
 }
 
-int nv_rdaux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
+static inline int
+nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+	int ret = nvkm_i2c_aux_acquire(aux);
+	if (ret == 0) {
+		ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size);
+		nvkm_i2c_aux_release(aux);
+	}
+	return ret;
+}
+
+static inline int
+nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+	int ret = nvkm_i2c_aux_acquire(aux);
+	if (ret == 0) {
+		ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size);
+		nvkm_i2c_aux_release(aux);
+	}
+	return ret;
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 2150d8a..9d512cd5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -2,31 +2,7 @@
 #define __NVKM_IBUS_H__
 #include <core/subdev.h>
 
-struct nvkm_ibus {
-	struct nvkm_subdev base;
-};
-
-static inline struct nvkm_ibus *
-nvkm_ibus(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_IBUS);
-}
-
-#define nvkm_ibus_create(p,e,o,d)                                           \
-	nvkm_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus",              \
-			       sizeof(**d), (void **)d)
-#define nvkm_ibus_destroy(p)                                                \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_ibus_init(p)                                                   \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_ibus_fini(p,s)                                                 \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_ibus_dtor _nvkm_subdev_dtor
-#define _nvkm_ibus_init _nvkm_subdev_init
-#define _nvkm_ibus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass gf100_ibus_oclass;
-extern struct nvkm_oclass gk104_ibus_oclass;
-extern struct nvkm_oclass gk20a_ibus_oclass;
+int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 1bcb763..28bc202 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,49 +1,29 @@
 #ifndef __NVKM_INSTMEM_H__
 #define __NVKM_INSTMEM_H__
 #include <core/subdev.h>
-
-struct nvkm_instobj {
-	struct nvkm_object base;
-	struct list_head head;
-	u32 *suspend;
-	u64 addr;
-	u32 size;
-};
-
-static inline struct nvkm_instobj *
-nv_memobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
-		nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+struct nvkm_memory;
 
 struct nvkm_instmem {
-	struct nvkm_subdev base;
-	struct list_head list;
+	const struct nvkm_instmem_func *func;
+	struct nvkm_subdev subdev;
 
+	struct list_head list;
 	u32 reserved;
-	int (*alloc)(struct nvkm_instmem *, struct nvkm_object *,
-		     u32 size, u32 align, struct nvkm_object **);
+
+	struct nvkm_memory *vbios;
+	struct nvkm_ramht  *ramht;
+	struct nvkm_memory *ramro;
+	struct nvkm_memory *ramfc;
 };
 
-static inline struct nvkm_instmem *
-nvkm_instmem(void *obj)
-{
-	/* nv04/nv40 impls need to create objects in their constructor,
-	 * which is before the subdev pointer is valid
-	 */
-	if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
-	    nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
-		return obj;
+u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
+void nvkm_instmem_wr32(struct nvkm_instmem *, u32 addr, u32 data);
+int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+		     struct nvkm_memory **);
 
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_INSTMEM);
-}
 
-extern struct nvkm_oclass *nv04_instmem_oclass;
-extern struct nvkm_oclass *nv40_instmem_oclass;
-extern struct nvkm_oclass *nv50_instmem_oclass;
-extern struct nvkm_oclass *gk20a_instmem_oclass;
+int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index cd5d29f..c773b5e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,31 +1,36 @@
 #ifndef __NVKM_LTC_H__
 #define __NVKM_LTC_H__
 #include <core/subdev.h>
-struct nvkm_mm_node;
+#include <core/mm.h>
 
 #define NVKM_LTC_MAX_ZBC_CNT 16
 
 struct nvkm_ltc {
-	struct nvkm_subdev base;
+	const struct nvkm_ltc_func *func;
+	struct nvkm_subdev subdev;
 
-	int  (*tags_alloc)(struct nvkm_ltc *, u32 count,
-			   struct nvkm_mm_node **);
-	void (*tags_free)(struct nvkm_ltc *, struct nvkm_mm_node **);
-	void (*tags_clear)(struct nvkm_ltc *, u32 first, u32 count);
+	u32 ltc_nr;
+	u32 lts_nr;
+
+	u32 num_tags;
+	u32 tag_base;
+	struct nvkm_mm tags;
+	struct nvkm_mm_node *tag_ram;
 
 	int zbc_min;
 	int zbc_max;
-	int (*zbc_color_get)(struct nvkm_ltc *, int index, const u32[4]);
-	int (*zbc_depth_get)(struct nvkm_ltc *, int index, const u32);
+	u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
+	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 };
 
-static inline struct nvkm_ltc *
-nvkm_ltc(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_LTC);
-}
+int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
+void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
+void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
 
-extern struct nvkm_oclass *gf100_ltc_oclass;
-extern struct nvkm_oclass *gk104_ltc_oclass;
-extern struct nvkm_oclass *gm107_ltc_oclass;
+int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
+int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
+
+int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 055bea7..4de05e7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -3,26 +3,19 @@
 #include <core/subdev.h>
 
 struct nvkm_mc {
-	struct nvkm_subdev base;
-	bool use_msi;
-	unsigned int irq;
-	void (*unk260)(struct nvkm_mc *, u32);
+	const struct nvkm_mc_func *func;
+	struct nvkm_subdev subdev;
 };
 
-static inline struct nvkm_mc *
-nvkm_mc(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MC);
-}
+void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_mc *);
+void nvkm_mc_intr_rearm(struct nvkm_mc *);
+void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
 
-extern struct nvkm_oclass *nv04_mc_oclass;
-extern struct nvkm_oclass *nv40_mc_oclass;
-extern struct nvkm_oclass *nv44_mc_oclass;
-extern struct nvkm_oclass *nv4c_mc_oclass;
-extern struct nvkm_oclass *nv50_mc_oclass;
-extern struct nvkm_oclass *g94_mc_oclass;
-extern struct nvkm_oclass *g98_mc_oclass;
-extern struct nvkm_oclass *gf100_mc_oclass;
-extern struct nvkm_oclass *gf106_mc_oclass;
-extern struct nvkm_oclass *gk20a_mc_oclass;
+int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 3a53687..dcd3def 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -6,7 +6,7 @@
 struct nvkm_mem;
 
 struct nvkm_vm_pgt {
-	struct nvkm_gpuobj *obj[2];
+	struct nvkm_memory *mem[2];
 	u32 refcount[2];
 };
 
@@ -26,74 +26,23 @@
 
 struct nvkm_vm {
 	struct nvkm_mmu *mmu;
+
+	struct mutex mutex;
 	struct nvkm_mm mm;
 	struct kref refcount;
 
 	struct list_head pgd_list;
-	atomic_t engref[NVDEV_SUBDEV_NR];
+	atomic_t engref[NVKM_SUBDEV_NR];
 
 	struct nvkm_vm_pgt *pgt;
 	u32 fpde;
 	u32 lpde;
 };
 
-struct nvkm_mmu {
-	struct nvkm_subdev base;
-
-	u64 limit;
-	u8  dma_bits;
-	u32 pgt_bits;
-	u8  spg_shift;
-	u8  lpg_shift;
-
-	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length,
-		       u64 mm_offset, struct nvkm_vm **);
-
-	void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
-			struct nvkm_gpuobj *pgt[2]);
-	void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
-		    struct nvkm_mem *, u32 pte, u32 cnt,
-		    u64 phys, u64 delta);
-	void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
-		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
-	void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
-	void (*flush)(struct nvkm_vm *);
-};
-
-static inline struct nvkm_mmu *
-nvkm_mmu(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MMU);
-}
-
-#define nvkm_mmu_create(p,e,o,i,f,d)                                      \
-	nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
-#define nvkm_mmu_destroy(p)                                               \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_mmu_init(p)                                                  \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_mmu_fini(p,s)                                                \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_mmu_dtor _nvkm_subdev_dtor
-#define _nvkm_mmu_init _nvkm_subdev_init
-#define _nvkm_mmu_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv04_mmu_oclass;
-extern struct nvkm_oclass nv41_mmu_oclass;
-extern struct nvkm_oclass nv44_mmu_oclass;
-extern struct nvkm_oclass nv50_mmu_oclass;
-extern struct nvkm_oclass gf100_mmu_oclass;
-
-int  nv04_vm_create(struct nvkm_mmu *, u64, u64, u64,
-		    struct nvkm_vm **);
-void nv04_mmu_dtor(struct nvkm_object *);
-
-int  nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
-		    u32 block, struct nvkm_vm **);
 int  nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
-		 struct nvkm_vm **);
+		 struct lock_class_key *, struct nvkm_vm **);
 int  nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
+int  nvkm_vm_boot(struct nvkm_vm *, u64 size);
 int  nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
 		 struct nvkm_vma *);
 void nvkm_vm_put(struct nvkm_vma *);
@@ -101,4 +50,19 @@
 void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
 void nvkm_vm_unmap(struct nvkm_vma *);
 void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+
+struct nvkm_mmu {
+	const struct nvkm_mmu_func *func;
+	struct nvkm_subdev subdev;
+
+	u64 limit;
+	u8  dma_bits;
+	u8  lpg_shift;
+};
+
+int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index fba6134..ed02501 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -2,33 +2,5 @@
 #define __NVKM_MXM_H__
 #include <core/subdev.h>
 
-#define MXM_SANITISE_DCB 0x00000001
-
-struct nvkm_mxm {
-	struct nvkm_subdev base;
-	u32 action;
-	u8 *mxms;
-};
-
-static inline struct nvkm_mxm *
-nvkm_mxm(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MXM);
-}
-
-#define nvkm_mxm_create(p,e,o,d)                                            \
-	nvkm_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mxm_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_mxm_fini(p,s)                                                  \
-	nvkm_subdev_fini(&(p)->base, (s))
-int  nvkm_mxm_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int, void **);
-void nvkm_mxm_destroy(struct nvkm_mxm *);
-
-#define _nvkm_mxm_dtor _nvkm_subdev_dtor
-#define _nvkm_mxm_init _nvkm_subdev_init
-#define _nvkm_mxm_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_mxm_oclass;
+int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
new file mode 100644
index 0000000..5b3c054
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_PCI_H__
+#define __NVKM_PCI_H__
+#include <core/subdev.h>
+
+struct nvkm_pci {
+	const struct nvkm_pci_func *func;
+	struct nvkm_subdev subdev;
+	struct pci_dev *pdev;
+	int irq;
+
+	struct {
+		struct agp_bridge_data *bridge;
+		u32 mode;
+		u64 base;
+		u64 size;
+		int mtrr;
+		bool cma;
+		bool acquired;
+	} agp;
+
+	bool msi;
+};
+
+u32 nvkm_pci_rd32(struct nvkm_pci *, u16 addr);
+void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data);
+void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
+void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
+
+int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv50_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 7559423..e61923d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -3,7 +3,8 @@
 #include <core/subdev.h>
 
 struct nvkm_pmu {
-	struct nvkm_subdev base;
+	const struct nvkm_pmu_func *func;
+	struct nvkm_subdev subdev;
 
 	struct {
 		u32 base;
@@ -20,24 +21,20 @@
 		u32 message;
 		u32 data[2];
 	} recv;
-
-	int  (*message)(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
-	void (*pgob)(struct nvkm_pmu *, bool);
 };
 
-static inline struct nvkm_pmu *
-nvkm_pmu(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_PMU);
-}
+int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
+		  u32 message, u32 data0, u32 data1);
+void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
 
-extern struct nvkm_oclass *gt215_pmu_oclass;
-extern struct nvkm_oclass *gf100_pmu_oclass;
-extern struct nvkm_oclass *gf110_pmu_oclass;
-extern struct nvkm_oclass *gk104_pmu_oclass;
-extern struct nvkm_oclass *gk110_pmu_oclass;
-extern struct nvkm_oclass *gk208_pmu_oclass;
-extern struct nvkm_oclass *gk20a_pmu_oclass;
+int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 
 /* interface to MEMX process running on PMU */
 struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 6662829..b268b96 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -2,6 +2,28 @@
 #define __NVKM_THERM_H__
 #include <core/subdev.h>
 
+#include <subdev/bios.h>
+#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+enum nvkm_therm_thrs_direction {
+	NVKM_THERM_THRS_FALLING = 0,
+	NVKM_THERM_THRS_RISING = 1
+};
+
+enum nvkm_therm_thrs_state {
+	NVKM_THERM_THRS_LOWER = 0,
+	NVKM_THERM_THRS_HIGHER = 1
+};
+
+enum nvkm_therm_thrs {
+	NVKM_THERM_THRS_FANBOOST = 0,
+	NVKM_THERM_THRS_DOWNCLOCK = 1,
+	NVKM_THERM_THRS_CRITICAL = 2,
+	NVKM_THERM_THRS_SHUTDOWN = 3,
+	NVKM_THERM_THRS_NR
+};
+
 enum nvkm_therm_fan_mode {
 	NVKM_THERM_CTRL_NONE = 0,
 	NVKM_THERM_CTRL_MANUAL = 1,
@@ -24,56 +46,54 @@
 };
 
 struct nvkm_therm {
-	struct nvkm_subdev base;
+	const struct nvkm_therm_func *func;
+	struct nvkm_subdev subdev;
 
-	int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
-	int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
-	int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
-	int (*pwm_clock)(struct nvkm_therm *, int line);
+	/* automatic thermal management */
+	struct nvkm_alarm alarm;
+	spinlock_t lock;
+	struct nvbios_therm_trip_point *last_trip;
+	int mode;
+	int cstate;
+	int suspend;
+
+	/* bios */
+	struct nvbios_therm_sensor bios_sensor;
+
+	/* fan priv */
+	struct nvkm_fan *fan;
+
+	/* alarms priv */
+	struct {
+		spinlock_t alarm_program_lock;
+		struct nvkm_alarm therm_poll_alarm;
+		enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
+	} sensor;
+
+	/* what should be done if the card overheats */
+	struct {
+		void (*downclock)(struct nvkm_therm *, bool active);
+		void (*pause)(struct nvkm_therm *, bool active);
+	} emergency;
+
+	/* ic */
+	struct i2c_client *ic;
 
 	int (*fan_get)(struct nvkm_therm *);
 	int (*fan_set)(struct nvkm_therm *, int);
-	int (*fan_sense)(struct nvkm_therm *);
-
-	int (*temp_get)(struct nvkm_therm *);
 
 	int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
 	int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
 };
 
-static inline struct nvkm_therm *
-nvkm_therm(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_THERM);
-}
+int nvkm_therm_temp_get(struct nvkm_therm *);
+int nvkm_therm_fan_sense(struct nvkm_therm *);
+int nvkm_therm_cstate(struct nvkm_therm *, int, int);
 
-#define nvkm_therm_create(p,e,o,d)                                          \
-	nvkm_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_therm_destroy(p) ({                                            \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_dtor(nv_object(therm));                                 \
-})
-#define nvkm_therm_init(p) ({                                               \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_init(nv_object(therm));                                 \
-})
-#define nvkm_therm_fini(p,s) ({                                             \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_init(nv_object(therm), (s));                            \
-})
-
-int  nvkm_therm_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, int, void **);
-void _nvkm_therm_dtor(struct nvkm_object *);
-int  _nvkm_therm_init(struct nvkm_object *);
-int  _nvkm_therm_fini(struct nvkm_object *, bool);
-
-int  nvkm_therm_cstate(struct nvkm_therm *, int, int);
-
-extern struct nvkm_oclass nv40_therm_oclass;
-extern struct nvkm_oclass nv50_therm_oclass;
-extern struct nvkm_oclass g84_therm_oclass;
-extern struct nvkm_oclass gt215_therm_oclass;
-extern struct nvkm_oclass gf110_therm_oclass;
-extern struct nvkm_oclass gm107_therm_oclass;
+int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 4ad5508..62ed088 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -9,53 +9,58 @@
 };
 
 static inline void
-nvkm_alarm_init(struct nvkm_alarm *alarm,
-		   void (*func)(struct nvkm_alarm *))
+nvkm_alarm_init(struct nvkm_alarm *alarm, void (*func)(struct nvkm_alarm *))
 {
 	INIT_LIST_HEAD(&alarm->head);
 	alarm->func = func;
 }
 
-bool nvkm_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
-void nvkm_timer_alarm(void *, u32 nsec, struct nvkm_alarm *);
-void nvkm_timer_alarm_cancel(void *, struct nvkm_alarm *);
-
-#define NV_WAIT_DEFAULT 2000000000ULL
-#define nv_wait(o,a,m,v)                                                       \
-	nvkm_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_ne(o,a,m,v)                                                    \
-	nvkm_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_cb(o,c,d)                                                      \
-	nvkm_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
-
 struct nvkm_timer {
-	struct nvkm_subdev base;
-	u64  (*read)(struct nvkm_timer *);
-	void (*alarm)(struct nvkm_timer *, u64 time, struct nvkm_alarm *);
-	void (*alarm_cancel)(struct nvkm_timer *, struct nvkm_alarm *);
+	const struct nvkm_timer_func *func;
+	struct nvkm_subdev subdev;
+
+	struct list_head alarms;
+	spinlock_t lock;
 };
 
-static inline struct nvkm_timer *
-nvkm_timer(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_TIMER);
-}
+u64 nvkm_timer_read(struct nvkm_timer *);
+void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
+void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
 
-#define nvkm_timer_create(p,e,o,d)                                          \
-	nvkm_subdev_create_((p), (e), (o), 0, "PTIMER", "timer",            \
-			       sizeof(**d), (void **)d)
-#define nvkm_timer_destroy(p)                                               \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_timer_init(p)                                                  \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_timer_fini(p,s)                                                \
-	nvkm_subdev_fini(&(p)->base, (s))
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ *
+ * NVKM_DELAY can be passed for 'cond' to disable the timeout warning,
+ * which is useful for unconditional delay loops.
+ */
+#define NVKM_DELAY _warn = false;
+#define nvkm_nsec(d,n,cond...) ({                                              \
+	struct nvkm_device *_device = (d);                                     \
+	struct nvkm_timer *_tmr = _device->timer;                              \
+	u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr);                      \
+	s64 _taken = 0;                                                        \
+	bool _warn = true;                                                     \
+                                                                               \
+	do {                                                                   \
+		cond                                                           \
+	} while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs);    \
+                                                                               \
+	if (_taken >= _nsecs) {                                                \
+		if (_warn) {                                                   \
+			dev_warn(_device->dev, "timeout at %s:%d/%s()!\n",     \
+				 __FILE__, __LINE__, __func__);                \
+		}                                                              \
+		_taken = -ETIMEDOUT;                                           \
+	}                                                                      \
+	_taken;                                                                \
+})
+#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
+#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
 
-int nvkm_timer_create_(struct nvkm_object *, struct nvkm_engine *,
-			  struct nvkm_oclass *, int size, void **);
-
-extern struct nvkm_oclass nv04_timer_oclass;
-extern struct nvkm_oclass gk20a_timer_oclass;
+int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index fee09ad..ce5636f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,30 +1,28 @@
 #ifndef __NOUVEAU_VGA_H__
 #define __NOUVEAU_VGA_H__
-
-#include <core/os.h>
+#include <core/subdev.h>
 
 /* access to various legacy io ports */
-u8   nv_rdport(void *obj, int head, u16 port);
-void nv_wrport(void *obj, int head, u16 port, u8 value);
+u8   nvkm_rdport(struct nvkm_device *, int head, u16 port);
+void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value);
 
 /* VGA Sequencer */
-u8   nv_rdvgas(void *obj, int head, u8 index);
-void nv_wrvgas(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgas(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA Graphics */
-u8   nv_rdvgag(void *obj, int head, u8 index);
-void nv_wrvgag(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgag(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA CRTC */
-u8   nv_rdvgac(void *obj, int head, u8 index);
-void nv_wrvgac(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgac(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA indexed port access dispatcher */
-u8   nv_rdvgai(void *obj, int head, u16 port, u8 index);
-void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
+u8   nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index);
+void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value);
 
-bool nv_lockvgac(void *obj, bool lock);
-u8   nv_rdvgaowner(void *obj);
-void nv_wrvgaowner(void *obj, u8);
-
+bool nvkm_lockvgac(struct nvkm_device *, bool lock);
+u8   nvkm_rdvgaowner(struct nvkm_device *);
+void nvkm_wrvgaowner(struct nvkm_device *, u8);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index e3d7243..5c8a3f1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -2,19 +2,9 @@
 #define __NVKM_VOLT_H__
 #include <core/subdev.h>
 
-struct nvkm_voltage {
-	u32 uv;
-	u8  id;
-};
-
 struct nvkm_volt {
-	struct nvkm_subdev base;
-
-	int (*vid_get)(struct nvkm_volt *);
-	int (*get)(struct nvkm_volt *);
-	int (*vid_set)(struct nvkm_volt *, u8 vid);
-	int (*set)(struct nvkm_volt *, u32 uv);
-	int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+	const struct nvkm_volt_func *func;
+	struct nvkm_subdev subdev;
 
 	u8 vid_mask;
 	u8 vid_nr;
@@ -24,35 +14,9 @@
 	} vid[256];
 };
 
-static inline struct nvkm_volt *
-nvkm_volt(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VOLT);
-}
+int nvkm_volt_get(struct nvkm_volt *);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
 
-#define nvkm_volt_create(p, e, o, d)                                        \
-	nvkm_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_volt_destroy(p) ({                                             \
-	struct nvkm_volt *v = (p);                                          \
-	_nvkm_volt_dtor(nv_object(v));                                      \
-})
-#define nvkm_volt_init(p) ({                                                \
-	struct nvkm_volt *v = (p);                                          \
-	_nvkm_volt_init(nv_object(v));                                      \
-})
-#define nvkm_volt_fini(p,s)                                                 \
-	nvkm_subdev_fini((p), (s))
-
-int  nvkm_volt_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-void _nvkm_volt_dtor(struct nvkm_object *);
-int  _nvkm_volt_init(struct nvkm_object *);
-#define _nvkm_volt_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv40_volt_oclass;
-extern struct nvkm_oclass gk20a_volt_oclass;
-
-int nvkm_voltgpio_init(struct nvkm_volt *);
-int nvkm_voltgpio_get(struct nvkm_volt *);
-int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d8b0891..d336c22 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -51,7 +51,7 @@
 			 * device (ie. the one that belongs to the fd it
 			 * opened)
 			 */
-			if (nvif_device_init(&cli->base.base, NULL,
+			if (nvif_device_init(&cli->base.object,
 					     NOUVEAU_ABI16_DEVICE, NV_DEVICE,
 					     &args, sizeof(args),
 					     &abi16->device) == 0)
@@ -69,28 +69,28 @@
 int
 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+	struct nouveau_cli *cli = (void *)abi16->device.object.client;
 	mutex_unlock(&cli->mutex);
 	return ret;
 }
 
-u16
+s32
 nouveau_abi16_swclass(struct nouveau_drm *drm)
 {
 	switch (drm->device.info.family) {
 	case NV_DEVICE_INFO_V0_TNT:
-		return 0x006e;
+		return NVIF_IOCTL_NEW_V0_SW_NV04;
 	case NV_DEVICE_INFO_V0_CELSIUS:
 	case NV_DEVICE_INFO_V0_KELVIN:
 	case NV_DEVICE_INFO_V0_RANKINE:
 	case NV_DEVICE_INFO_V0_CURIE:
-		return 0x016e;
+		return NVIF_IOCTL_NEW_V0_SW_NV10;
 	case NV_DEVICE_INFO_V0_TESLA:
-		return 0x506e;
+		return NVIF_IOCTL_NEW_V0_SW_NV50;
 	case NV_DEVICE_INFO_V0_FERMI:
 	case NV_DEVICE_INFO_V0_KEPLER:
 	case NV_DEVICE_INFO_V0_MAXWELL:
-		return 0x906e;
+		return NVIF_IOCTL_NEW_V0_SW_GF100;
 	}
 
 	return 0x0000;
@@ -100,6 +100,7 @@
 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
 			struct nouveau_abi16_ntfy *ntfy)
 {
+	nvif_object_fini(&ntfy->object);
 	nvkm_mm_free(&chan->heap, &ntfy->node);
 	list_del(&ntfy->head);
 	kfree(ntfy);
@@ -132,7 +133,8 @@
 
 	/* destroy channel object, all children will be killed too */
 	if (chan->chan) {
-		abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
+		abi16->handles &= ~(1ULL << (chan->chan->user.handle & 0xffff));
+		nouveau_channel_idle(chan->chan);
 		nouveau_channel_del(&chan->chan);
 	}
 
@@ -143,7 +145,7 @@
 void
 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+	struct nouveau_cli *cli = (void *)abi16->device.object.client;
 	struct nouveau_abi16_chan *chan, *temp;
 
 	/* cleanup channels */
@@ -164,7 +166,6 @@
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvif_device *device = &drm->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
 	struct nvkm_gr *gr = nvxx_gr(device);
 	struct drm_nouveau_getparam *getparam = data;
 
@@ -173,19 +174,19 @@
 		getparam->value = device->info.chipset;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
-		if (nv_device_is_pci(nvxx_device(device)))
+		if (nvxx_device(device)->func->pci)
 			getparam->value = dev->pdev->vendor;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
-		if (nv_device_is_pci(nvxx_device(device)))
+		if (nvxx_device(device)->func->pci)
 			getparam->value = dev->pdev->device;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_BUS_TYPE:
-		if (!nv_device_is_pci(nvxx_device(device)))
+		if (!nvxx_device(device)->func->pci)
 			getparam->value = 3;
 		else
 		if (drm_pci_device_is_agp(dev))
@@ -206,7 +207,7 @@
 		getparam->value = 0; /* deprecated */
 		break;
 	case NOUVEAU_GETPARAM_PTIMER_TIME:
-		getparam->value = ptimer->read(ptimer);
+		getparam->value = nvif_device_time(device);
 		break;
 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
 		getparam->value = 1;
@@ -215,10 +216,10 @@
 		getparam->value = 1;
 		break;
 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
-		getparam->value = gr->units ? gr->units(gr) : 0;
+		getparam->value = nvkm_gr_units(gr);
 		break;
 	default:
-		NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
+		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
 		return -EINVAL;
 	}
 
@@ -337,7 +338,7 @@
 	struct nouveau_abi16_chan *chan;
 
 	list_for_each_entry(chan, &abi16->channels, head) {
-		if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
+		if (chan->chan->user.handle == NOUVEAU_ABI16_CHAN(channel))
 			return chan;
 	}
 
@@ -365,40 +366,91 @@
 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_grobj_alloc *init = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_new_v0 new;
-	} args = {
-		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
-		.ioctl.type = NVIF_IOCTL_V0_NEW,
-		.ioctl.path_nr = 3,
-		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
-		.new.route = NVDRM_OBJECT_ABI16,
-		.new.handle = init->handle,
-		.new.oclass = init->class,
-	};
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_abi16_chan *chan;
+	struct nouveau_abi16_ntfy *ntfy;
 	struct nvif_client *client;
-	int ret;
+	struct nvif_sclass *sclass;
+	s32 oclass = 0;
+	int ret, i;
 
 	if (unlikely(!abi16))
 		return -ENOMEM;
 
 	if (init->handle == ~0)
 		return nouveau_abi16_put(abi16, -EINVAL);
-	client = nvif_client(nvif_object(&abi16->device));
+	client = abi16->device.object.client;
 
-	/* compatibility with userspace that assumes 506e for all chipsets */
-	if (init->class == 0x506e) {
-		init->class = nouveau_abi16_swclass(drm);
-		if (init->class == 0x906e)
-			return nouveau_abi16_put(abi16, 0);
+	chan = nouveau_abi16_chan(abi16, init->channel);
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
+	if (ret < 0)
+		return nouveau_abi16_put(abi16, ret);
+
+	if ((init->class & 0x00ff) == 0x006e) {
+		/* nvsw: compatibility with older 0x*6e class identifier */
+		for (i = 0; !oclass && i < ret; i++) {
+			switch (sclass[i].oclass) {
+			case NVIF_IOCTL_NEW_V0_SW_NV04:
+			case NVIF_IOCTL_NEW_V0_SW_NV10:
+			case NVIF_IOCTL_NEW_V0_SW_NV50:
+			case NVIF_IOCTL_NEW_V0_SW_GF100:
+				oclass = sclass[i].oclass;
+				break;
+			default:
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b1) {
+		/* msvld: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
+		/* mspdec: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
+		/* msppp: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else {
+		oclass = init->class;
 	}
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
+	nvif_object_sclass_put(&sclass);
+	if (!oclass)
+		return nouveau_abi16_put(abi16, -EINVAL);
+
+	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
+	if (!ntfy)
+		return nouveau_abi16_put(abi16, -ENOMEM);
+
+	list_add(&ntfy->head, &chan->notifiers);
+
+	client->route = NVDRM_OBJECT_ABI16;
+	ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
+			       NULL, 0, &ntfy->object);
+	client->route = NVDRM_OBJECT_NVIF;
+
+	if (ret)
+		nouveau_abi16_ntfy_fini(chan, ntfy);
 	return nouveau_abi16_put(abi16, ret);
 }
 
@@ -406,27 +458,13 @@
 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_notifierobj_alloc *info = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_new_v0 new;
-		struct nv_dma_v0 ctxdma;
-	} args = {
-		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
-		.ioctl.type = NVIF_IOCTL_V0_NEW,
-		.ioctl.path_nr = 3,
-		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
-		.new.route = NVDRM_OBJECT_ABI16,
-		.new.handle = info->handle,
-		.new.oclass = NV_DMA_IN_MEMORY,
-	};
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
 	struct nouveau_abi16_chan *chan;
 	struct nouveau_abi16_ntfy *ntfy;
 	struct nvif_device *device = &abi16->device;
 	struct nvif_client *client;
+	struct nv_dma_v0 args = {};
 	int ret;
 
 	if (unlikely(!abi16))
@@ -435,7 +473,7 @@
 	/* completely unnecessary for these chipsets... */
 	if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
 		return nouveau_abi16_put(abi16, -EINVAL);
-	client = nvif_client(nvif_object(&abi16->device));
+	client = abi16->device.object.client;
 
 	chan = nouveau_abi16_chan(abi16, info->channel);
 	if (!chan)
@@ -446,41 +484,43 @@
 		return nouveau_abi16_put(abi16, -ENOMEM);
 
 	list_add(&ntfy->head, &chan->notifiers);
-	ntfy->handle = info->handle;
 
 	ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
 			   &ntfy->node);
 	if (ret)
 		goto done;
 
-	args.ctxdma.start = ntfy->node->offset;
-	args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
+	args.start = ntfy->node->offset;
+	args.limit = ntfy->node->offset + ntfy->node->length - 1;
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
-		args.ctxdma.start += chan->ntfy_vma.offset;
-		args.ctxdma.limit += chan->ntfy_vma.offset;
+		args.target = NV_DMA_V0_TARGET_VM;
+		args.access = NV_DMA_V0_ACCESS_VM;
+		args.start += chan->ntfy_vma.offset;
+		args.limit += chan->ntfy_vma.offset;
 	} else
-	if (drm->agp.stat == ENABLED) {
-		args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
-		args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
-		args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
-		client->super = true;
+	if (drm->agp.bridge) {
+		args.target = NV_DMA_V0_TARGET_AGP;
+		args.access = NV_DMA_V0_ACCESS_RDWR;
+		args.start += drm->agp.base + chan->ntfy->bo.offset;
+		args.limit += drm->agp.base + chan->ntfy->bo.offset;
 	} else {
-		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
-		args.ctxdma.start += chan->ntfy->bo.offset;
-		args.ctxdma.limit += chan->ntfy->bo.offset;
+		args.target = NV_DMA_V0_TARGET_VM;
+		args.access = NV_DMA_V0_ACCESS_RDWR;
+		args.start += chan->ntfy->bo.offset;
+		args.limit += chan->ntfy->bo.offset;
 	}
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
+	client->route = NVDRM_OBJECT_ABI16;
+	client->super = true;
+	ret = nvif_object_init(&chan->chan->user, info->handle,
+			       NV_DMA_IN_MEMORY, &args, sizeof(args),
+			       &ntfy->object);
 	client->super = false;
+	client->route = NVDRM_OBJECT_NVIF;
 	if (ret)
 		goto done;
 
 	info->offset = ntfy->node->offset;
-
 done:
 	if (ret)
 		nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -491,47 +531,28 @@
 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_gpuobj_free *fini = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_del del;
-	} args = {
-		.ioctl.owner = NVDRM_OBJECT_ABI16,
-		.ioctl.type = NVIF_IOCTL_V0_DEL,
-		.ioctl.path_nr = 4,
-		.ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
-		.ioctl.path[0] = fini->handle,
-	};
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
 	struct nouveau_abi16_chan *chan;
 	struct nouveau_abi16_ntfy *ntfy;
-	struct nvif_client *client;
-	int ret;
+	int ret = -ENOENT;
 
 	if (unlikely(!abi16))
 		return -ENOMEM;
 
 	chan = nouveau_abi16_chan(abi16, fini->channel);
 	if (!chan)
-		return nouveau_abi16_put(abi16, -ENOENT);
-	client = nvif_client(nvif_object(&abi16->device));
+		return nouveau_abi16_put(abi16, -EINVAL);
 
 	/* synchronize with the user channel and destroy the gpu object */
 	nouveau_channel_idle(chan->chan);
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
-	if (ret)
-		return nouveau_abi16_put(abi16, ret);
-
-	/* cleanup extra state if this object was a notifier */
 	list_for_each_entry(ntfy, &chan->notifiers, head) {
-		if (ntfy->handle == fini->handle) {
-			nvkm_mm_free(&chan->heap, &ntfy->node);
-			list_del(&ntfy->head);
+		if (ntfy->object.handle == fini->handle) {
+			nouveau_abi16_ntfy_fini(chan, ntfy);
+			ret = 0;
 			break;
 		}
 	}
 
-	return nouveau_abi16_put(abi16, 0);
+	return nouveau_abi16_put(abi16, ret);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 86eb1ca..6584557 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -13,9 +13,9 @@
 int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
 
 struct nouveau_abi16_ntfy {
+	struct nvif_object object;
 	struct list_head head;
 	struct nvkm_mm_node *node;
-	u32 handle;
 };
 
 struct nouveau_abi16_chan {
@@ -37,7 +37,7 @@
 struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
 int  nouveau_abi16_put(struct nouveau_abi16 *, int);
 void nouveau_abi16_fini(struct nouveau_abi16 *);
-u16  nouveau_abi16_swclass(struct nouveau_drm *);
+s32  nouveau_abi16_swclass(struct nouveau_drm *);
 
 #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
 #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 6224246..df2d981 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -372,12 +372,12 @@
 	return len;
 }
 
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+bool nouveau_acpi_rom_supported(struct device *dev)
 {
 	acpi_status status;
 	acpi_handle dhandle, rom_handle;
 
-	dhandle = ACPI_HANDLE(&pdev->dev);
+	dhandle = ACPI_HANDLE(dev);
 	if (!dhandle)
 		return false;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 74acf0f..2f03653 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,7 +10,7 @@
 void nouveau_unregister_dsm_handler(void);
 void nouveau_switcheroo_optimus_dsm(void);
 int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
 void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
 #else
 static inline bool nouveau_is_optimus(void) { return false; };
@@ -18,7 +18,7 @@
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
 static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
 static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
 static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
deleted file mode 100644
index 0b59709..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ /dev/null
@@ -1,195 +0,0 @@
-#include <linux/module.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_agp.h"
-#include "nouveau_reg.h"
-
-#if __OS_HAS_AGP
-MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
-static int nouveau_agpmode = -1;
-module_param_named(agpmode, nouveau_agpmode, int, 0400);
-
-struct nouveau_agpmode_quirk {
-	u16 hostbridge_vendor;
-	u16 hostbridge_device;
-	u16 chip_vendor;
-	u16 chip_device;
-	int mode;
-};
-
-static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
-	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
-	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
-
-	{},
-};
-
-static unsigned long
-get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
-{
-	struct nvif_device *device = &drm->device;
-	struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
-	int agpmode = nouveau_agpmode;
-	unsigned long mode = info->mode;
-
-	/*
-	 * FW seems to be broken on nv18, it makes the card lock up
-	 * randomly.
-	 */
-	if (device->info.chipset == 0x18)
-		mode &= ~PCI_AGP_COMMAND_FW;
-
-	/*
-	 * Go through the quirks list and adjust the agpmode accordingly.
-	 */
-	while (agpmode == -1 && quirk->hostbridge_vendor) {
-		if (info->id_vendor == quirk->hostbridge_vendor &&
-		    info->id_device == quirk->hostbridge_device &&
-		    nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
-		    nvxx_device(device)->pdev->device == quirk->chip_device) {
-			agpmode = quirk->mode;
-			NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
-				agpmode);
-			break;
-		}
-		++quirk;
-	}
-
-	/*
-	 * AGP mode set in the command line.
-	 */
-	if (agpmode > 0) {
-		bool agpv3 = mode & 0x8;
-		int rate = agpv3 ? agpmode / 4 : agpmode;
-
-		mode = (mode & ~0x7) | (rate & 0x7);
-	}
-
-	return mode;
-}
-
-static bool
-nouveau_agp_enabled(struct nouveau_drm *drm)
-{
-	struct drm_device *dev = drm->dev;
-
-	if (!dev->pdev || !drm_pci_device_is_agp(dev) || !dev->agp)
-		return false;
-
-	if (drm->agp.stat == UNKNOWN) {
-		if (!nouveau_agpmode)
-			return false;
-#ifdef __powerpc__
-		/* Disable AGP by default on all PowerPC machines for
-		 * now -- At least some UniNorth-2 AGP bridges are
-		 * known to be broken: DMA from the host to the card
-		 * works just fine, but writeback from the card to the
-		 * host goes straight to memory untranslated bypassing
-		 * the GATT somehow, making them quite painful to deal
-		 * with...
-		 */
-		if (nouveau_agpmode == -1)
-			return false;
-#endif
-		return true;
-	}
-
-	return (drm->agp.stat == ENABLED);
-}
-#endif
-
-void
-nouveau_agp_reset(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct nvif_device *device = &drm->device;
-	struct drm_device *dev = drm->dev;
-	u32 save[2];
-	int ret;
-
-	if (!nouveau_agp_enabled(drm))
-		return;
-
-	/* First of all, disable fast writes, otherwise if it's
-	 * already enabled in the AGP bridge and we disable the card's
-	 * AGP controller we might be locking ourselves out of it. */
-	if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
-	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
-		struct drm_agp_info info;
-		struct drm_agp_mode mode;
-
-		ret = drm_agp_info(dev, &info);
-		if (ret)
-			return;
-
-		mode.mode  = get_agp_mode(drm, &info);
-		mode.mode &= ~PCI_AGP_COMMAND_FW;
-
-		ret = drm_agp_enable(dev, mode);
-		if (ret)
-			return;
-	}
-
-
-	/* clear busmaster bit, and disable AGP */
-	save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
-	nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
-
-	/* reset PGRAPH, PFIFO and PTIMER */
-	save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
-	nvif_mask(device, 0x000200, 0x00011100, save[1]);
-
-	/* and restore bustmaster bit (gives effect of resetting AGP) */
-	nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
-#endif
-}
-
-void
-nouveau_agp_init(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct drm_device *dev = drm->dev;
-	struct drm_agp_info info;
-	struct drm_agp_mode mode;
-	int ret;
-
-	if (!nouveau_agp_enabled(drm))
-		return;
-	drm->agp.stat = DISABLE;
-
-	ret = drm_agp_acquire(dev);
-	if (ret) {
-		NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
-		return;
-	}
-
-	ret = drm_agp_info(dev, &info);
-	if (ret) {
-		NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
-		return;
-	}
-
-	/* see agp.h for the AGPSTAT_* modes available */
-	mode.mode = get_agp_mode(drm, &info);
-
-	ret = drm_agp_enable(dev, mode);
-	if (ret) {
-		NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
-		return;
-	}
-
-	drm->agp.stat = ENABLED;
-	drm->agp.base = info.aperture_base;
-	drm->agp.size = info.aperture_size;
-#endif
-}
-
-void
-nouveau_agp_fini(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct drm_device *dev = drm->dev;
-	if (dev->agp && dev->agp->acquired)
-		drm_agp_release(dev);
-#endif
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
deleted file mode 100644
index b55c086..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __NOUVEAU_AGP_H__
-#define __NOUVEAU_AGP_H__
-
-struct nouveau_drm;
-
-void nouveau_agp_reset(struct nouveau_drm *);
-void nouveau_agp_init(struct nouveau_drm *);
-void nouveau_agp_fini(struct nouveau_drm *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index e566c5b..89eb460 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,7 +40,7 @@
 nv40_get_intensity(struct backlight_device *bd)
 {
 	struct nouveau_drm *drm = bl_get_data(bd);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
 				   NV40_PMC_BACKLIGHT_MASK) >> 16;
 
@@ -51,7 +51,7 @@
 nv40_set_intensity(struct backlight_device *bd)
 {
 	struct nouveau_drm *drm = bl_get_data(bd);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int val = bd->props.brightness;
 	int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
 
@@ -71,7 +71,7 @@
 nv40_backlight_init(struct drm_connector *connector)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct backlight_properties props;
 	struct backlight_device *bd;
 
@@ -97,7 +97,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val;
@@ -112,7 +112,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val = (bd->props.brightness * div) / 100;
@@ -133,7 +133,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div, val;
 
@@ -151,7 +151,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div, val;
 
@@ -177,7 +177,7 @@
 nv50_backlight_init(struct drm_connector *connector)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nouveau_encoder *nv_encoder;
 	struct backlight_properties props;
 	struct backlight_device *bd;
@@ -193,9 +193,9 @@
 	if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
 		return 0;
 
-	if (device->info.chipset <= 0xa0 ||
-	    device->info.chipset == 0xaa ||
-	    device->info.chipset == 0xac)
+	if (drm->device.info.chipset <= 0xa0 ||
+	    drm->device.info.chipset == 0xaa ||
+	    drm->device.info.chipset == 0xac)
 		ops = &nv50_bl_ops;
 	else
 		ops = &nva3_bl_ops;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0190b69..4dca65a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -215,7 +215,7 @@
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nvbios *bios = &drm->vbios;
 	uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
 	uint32_t sel_clk_binding, sel_clk;
@@ -318,7 +318,8 @@
 static int
 get_fp_strap(struct drm_device *dev, struct nvbios *bios)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_object *device = &drm->device.object;
 
 	/*
 	 * The fp strap is normally dictated by the "User Strap" in
@@ -332,7 +333,7 @@
 	if (bios->major_version < 5 && bios->data[0x48] & 0x4)
 		return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
 
-	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
+	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 		return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
 	else
 		return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
@@ -634,7 +635,7 @@
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nvbios *bios = &drm->vbios;
 	int cv = bios->chip_version;
 	uint16_t clktable = 0, scriptptr;
@@ -1481,22 +1482,20 @@
 			entry->dpconf.link_bw = 540000;
 			break;
 		}
-		entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
-		if (dcb->version < 0x41) {
-			switch (entry->dpconf.link_nr) {
-			case 0xf:
-				entry->dpconf.link_nr = 4;
-				break;
-			case 0x3:
-				entry->dpconf.link_nr = 2;
-				break;
-			default:
-				entry->dpconf.link_nr = 1;
-				break;
-			}
+		switch ((conf & 0x0f000000) >> 24) {
+		case 0xf:
+		case 0x4:
+			entry->dpconf.link_nr = 4;
+			break;
+		case 0x3:
+		case 0x2:
+			entry->dpconf.link_nr = 2;
+			break;
+		default:
+			entry->dpconf.link_nr = 1;
+			break;
 		}
 		link = entry->dpconf.sor.link;
-		entry->i2c_index += NV_I2C_AUX(0);
 		break;
 	case DCB_OUTPUT_TMDS:
 		if (dcb->version >= 0x40) {
@@ -1892,11 +1891,12 @@
 	idx = -1;
 	while ((conn = olddcb_conn(dev, ++idx))) {
 		if (conn[0] != 0xff) {
-			NV_INFO(drm, "DCB conn %02d: ", idx);
 			if (olddcb_conntab(dev)[3] < 4)
-				pr_cont("%04x\n", ROM16(conn[0]));
+				NV_INFO(drm, "DCB conn %02d: %04x\n",
+					idx, ROM16(conn[0]));
 			else
-				pr_cont("%08x\n", ROM32(conn[0]));
+				NV_INFO(drm, "DCB conn %02d: %08x\n",
+					idx, ROM32(conn[0]));
 		}
 	}
 	dcb_fake_connectors(bios);
@@ -1915,7 +1915,7 @@
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	uint8_t bytes_to_write;
 	uint16_t hwsq_entry_offset;
 	int i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6edcce1..15057b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,24 +48,19 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	int i = reg - drm->tile.reg;
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
-	struct nvkm_fb_tile *tile = &pfb->tile.region[i];
-	struct nvkm_engine *engine;
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_fb_tile *tile = &fb->tile.region[i];
 
 	nouveau_fence_unref(&reg->fence);
 
 	if (tile->pitch)
-		pfb->tile.fini(pfb, i, tile);
+		nvkm_fb_tile_fini(fb, i, tile);
 
 	if (pitch)
-		pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
+		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
 
-	pfb->tile.prog(pfb, i, tile);
-
-	if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
-		engine->tile_prog(engine, i);
-	if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
-		engine->tile_prog(engine, i);
+	nvkm_fb_tile_prog(fb, i, tile);
 }
 
 static struct nouveau_drm_tile *
@@ -105,18 +100,18 @@
 		   u32 size, u32 pitch, u32 flags)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 	struct nouveau_drm_tile *tile, *found = NULL;
 	int i;
 
-	for (i = 0; i < pfb->tile.regions; i++) {
+	for (i = 0; i < fb->tile.regions; i++) {
 		tile = nv10_bo_get_tile_region(dev, i);
 
 		if (pitch && !found) {
 			found = tile;
 			continue;
 
-		} else if (tile && pfb->tile.region[i].pitch) {
+		} else if (tile && fb->tile.region[i].pitch) {
 			/* Kill an unused tile region. */
 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
 		}
@@ -214,7 +209,7 @@
 	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &drm->ttm.bdev;
 
-	if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
+	if (!nvxx_device(&drm->device)->func->cpu_coherent)
 		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
 	nvbo->page_shift = 12;
@@ -471,8 +466,8 @@
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_device(nv_device_base(device),
-			ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+		dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+					   PAGE_SIZE, DMA_TO_DEVICE);
 }
 
 void
@@ -491,8 +486,8 @@
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_cpu(nv_device_base(device),
-			ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+					PAGE_SIZE, DMA_FROM_DEVICE);
 }
 
 int
@@ -581,10 +576,9 @@
 {
 #if __OS_HAS_AGP
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct drm_device *dev = drm->dev;
 
-	if (drm->agp.stat == ENABLED) {
-		return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+	if (drm->agp.bridge) {
+		return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
 					 page_flags, dummy_read);
 	}
 #endif
@@ -636,12 +630,12 @@
 		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 			man->func = &nouveau_gart_manager;
 		else
-		if (drm->agp.stat != ENABLED)
+		if (!drm->agp.bridge)
 			man->func = &nv04_gart_manager;
 		else
 			man->func = &ttm_bo_manager_func;
 
-		if (drm->agp.stat == ENABLED) {
+		if (drm->agp.bridge) {
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 			man->available_caching = TTM_PL_FLAG_UNCACHED |
 				TTM_PL_FLAG_WC;
@@ -1064,7 +1058,7 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_channel *chan = drm->ttm.chan;
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_fence *fence;
 	int ret;
 
@@ -1104,7 +1098,7 @@
 	static const struct {
 		const char *name;
 		int engine;
-		u32 oclass;
+		s32 oclass;
 		int (*exec)(struct nouveau_channel *,
 			    struct ttm_buffer_object *,
 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
@@ -1137,7 +1131,7 @@
 		if (chan == NULL)
 			continue;
 
-		ret = nvif_object_init(chan->object, NULL,
+		ret = nvif_object_init(&chan->user,
 				       mthd->oclass | (mthd->engine << 16),
 				       mthd->oclass, NULL, 0,
 				       &drm->ttm.copy);
@@ -1356,6 +1350,7 @@
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nvkm_device *device = nvxx_device(&drm->device);
 	struct nvkm_mem *node = mem->mm_node;
 	int ret;
 
@@ -1372,10 +1367,10 @@
 		return 0;
 	case TTM_PL_TT:
 #if __OS_HAS_AGP
-		if (drm->agp.stat == ENABLED) {
+		if (drm->agp.bridge) {
 			mem->bus.offset = mem->start << PAGE_SHIFT;
 			mem->bus.base = drm->agp.base;
-			mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
+			mem->bus.is_iomem = !drm->agp.cma;
 		}
 #endif
 		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
@@ -1384,16 +1379,20 @@
 		/* fallthrough, tiled memory */
 	case TTM_PL_VRAM:
 		mem->bus.offset = mem->start << PAGE_SHIFT;
-		mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+		mem->bus.base = device->func->resource_addr(device, 1);
 		mem->bus.is_iomem = true;
 		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 			struct nvkm_bar *bar = nvxx_bar(&drm->device);
+			int page_shift = 12;
+			if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+				page_shift = node->page_shift;
 
-			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
-					&node->bar_vma);
+			ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
+					    &node->bar_vma);
 			if (ret)
 				return ret;
 
+			nvkm_vm_map(&node->bar_vma, node);
 			mem->bus.offset = node->bar_vma.offset;
 		}
 		break;
@@ -1406,14 +1405,13 @@
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
-	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct nvkm_bar *bar = nvxx_bar(&drm->device);
 	struct nvkm_mem *node = mem->mm_node;
 
 	if (!node->bar_vma.node)
 		return;
 
-	bar->unmap(bar, &node->bar_vma);
+	nvkm_vm_unmap(&node->bar_vma);
+	nvkm_vm_put(&node->bar_vma);
 }
 
 static int
@@ -1421,8 +1419,8 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvif_device *device = &drm->device;
-	u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
 	int i, ret;
 
 	/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1488,18 +1486,18 @@
 	drm = nouveau_bdev(ttm->bdev);
 	device = nvxx_device(&drm->device);
 	dev = drm->dev;
-	pdev = nv_device_base(device);
+	pdev = device->dev;
 
 	/*
 	 * Objects matching this condition have been marked as force_coherent,
 	 * so use the DMA API for them.
 	 */
-	if (!nv_device_is_cpu_coherent(device) &&
+	if (!nvxx_device(&drm->device)->func->cpu_coherent &&
 	    ttm->caching_state == tt_uncached)
 		return ttm_dma_populate(ttm_dma, dev->dev);
 
 #if __OS_HAS_AGP
-	if (drm->agp.stat == ENABLED) {
+	if (drm->agp.bridge) {
 		return ttm_agp_tt_populate(ttm);
 	}
 #endif
@@ -1553,20 +1551,20 @@
 	drm = nouveau_bdev(ttm->bdev);
 	device = nvxx_device(&drm->device);
 	dev = drm->dev;
-	pdev = nv_device_base(device);
+	pdev = device->dev;
 
 	/*
 	 * Objects matching this condition have been marked as force_coherent,
 	 * so use the DMA API for them.
 	 */
-	if (!nv_device_is_cpu_coherent(device) &&
+	if (!nvxx_device(&drm->device)->func->cpu_coherent &&
 	    ttm->caching_state == tt_uncached) {
 		ttm_dma_unpopulate(ttm_dma, dev->dev);
 		return;
 	}
 
 #if __OS_HAS_AGP
-	if (drm->agp.stat == ENABLED) {
+	if (drm->agp.bridge) {
 		ttm_agp_tt_unpopulate(ttm);
 		return;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 0589bab..ff5e59d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -24,6 +24,7 @@
 
 #include <nvif/os.h>
 #include <nvif/class.h>
+#include <nvif/ioctl.h>
 
 /*XXX*/
 #include <core/client.h>
@@ -42,20 +43,26 @@
 int
 nouveau_channel_idle(struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
-	struct nouveau_fence *fence = NULL;
-	int ret;
+	if (likely(chan && chan->fence)) {
+		struct nouveau_cli *cli = (void *)chan->user.client;
+		struct nouveau_fence *fence = NULL;
+		int ret;
 
-	ret = nouveau_fence_new(chan, false, &fence);
-	if (!ret) {
-		ret = nouveau_fence_wait(fence, false, false);
-		nouveau_fence_unref(&fence);
+		ret = nouveau_fence_new(chan, false, &fence);
+		if (!ret) {
+			ret = nouveau_fence_wait(fence, false, false);
+			nouveau_fence_unref(&fence);
+		}
+
+		if (ret) {
+			NV_PRINTK(err, cli, "failed to idle channel "
+					    "0x%08x [%s]\n",
+				  chan->user.handle,
+				  nvxx_client(&cli->base)->name);
+			return ret;
+		}
 	}
-
-	if (ret)
-		NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
-			  chan->object->handle, nvxx_client(&cli->base)->name);
-	return ret;
+	return 0;
 }
 
 void
@@ -63,21 +70,18 @@
 {
 	struct nouveau_channel *chan = *pchan;
 	if (chan) {
-		if (chan->fence) {
-			nouveau_channel_idle(chan);
+		if (chan->fence)
 			nouveau_fence(chan->drm)->context_del(chan);
-		}
 		nvif_object_fini(&chan->nvsw);
 		nvif_object_fini(&chan->gart);
 		nvif_object_fini(&chan->vram);
-		nvif_object_ref(NULL, &chan->object);
+		nvif_object_fini(&chan->user);
 		nvif_object_fini(&chan->push.ctxdma);
 		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
 		nouveau_bo_unmap(chan->push.buffer);
 		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
 			nouveau_bo_unpin(chan->push.buffer);
 		nouveau_bo_ref(NULL, &chan->push.buffer);
-		nvif_device_ref(NULL, &chan->device);
 		kfree(chan);
 	}
 	*pchan = NULL;
@@ -87,7 +91,7 @@
 nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		     u32 handle, u32 size, struct nouveau_channel **pchan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)device->object.client;
 	struct nvkm_mmu *mmu = nvxx_mmu(device);
 	struct nv_dma_v0 args = {};
 	struct nouveau_channel *chan;
@@ -98,7 +102,7 @@
 	if (!chan)
 		return -ENOMEM;
 
-	nvif_device_ref(device, &chan->device);
+	chan->device = device;
 	chan->drm = drm;
 
 	/* allocate memory for dma push buffer */
@@ -146,7 +150,8 @@
 			 */
 			args.target = NV_DMA_V0_TARGET_PCI;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
-			args.start = nv_device_resource_start(nvxx_device(device), 1);
+			args.start = nvxx_device(device)->func->
+				resource_addr(nvxx_device(device), 1);
 			args.limit = args.start + device->info.ram_user - 1;
 		} else {
 			args.target = NV_DMA_V0_TARGET_VRAM;
@@ -155,7 +160,7 @@
 			args.limit = device->info.ram_user - 1;
 		}
 	} else {
-		if (chan->drm->agp.stat == ENABLED) {
+		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = chan->drm->agp.base;
@@ -169,7 +174,7 @@
 		}
 	}
 
-	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
+	ret = nvif_object_init(&device->object, NVDRM_PUSH |
 			       (handle & 0xffff), NV_DMA_FROM_MEMORY,
 			       &args, sizeof(args), &chan->push.ctxdma);
 	if (ret) {
@@ -193,8 +198,9 @@
 	const u16 *oclass = oclasses;
 	union {
 		struct nv50_channel_gpfifo_v0 nv50;
+		struct fermi_channel_gpfifo_v0 fermi;
 		struct kepler_channel_gpfifo_a_v0 kepler;
-	} args, *retn;
+	} args;
 	struct nouveau_channel *chan;
 	u32 size;
 	int ret;
@@ -210,26 +216,36 @@
 		if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
 			args.kepler.version = 0;
 			args.kepler.engine  = engine;
-			args.kepler.pushbuf = chan->push.ctxdma.handle;
 			args.kepler.ilength = 0x02000;
 			args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
+			args.kepler.vm = 0;
 			size = sizeof(args.kepler);
+		} else
+		if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
+			args.fermi.version = 0;
+			args.fermi.ilength = 0x02000;
+			args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
+			args.fermi.vm = 0;
+			size = sizeof(args.fermi);
 		} else {
 			args.nv50.version = 0;
-			args.nv50.pushbuf = chan->push.ctxdma.handle;
 			args.nv50.ilength = 0x02000;
 			args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+			args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
+			args.nv50.vm = 0;
 			size = sizeof(args.nv50);
 		}
 
-		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
-				      &args, size, &chan->object);
+		ret = nvif_object_init(&device->object, handle, *oclass++,
+				       &args, size, &chan->user);
 		if (ret == 0) {
-			retn = chan->object->data;
-			if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
-				chan->chid = retn->kepler.chid;
+			if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+				chan->chid = args.kepler.chid;
 			else
-				chan->chid = retn->nv50.chid;
+			if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+				chan->chid = args.fermi.chid;
+			else
+				chan->chid = args.nv50.chid;
 			return ret;
 		}
 	} while (*oclass);
@@ -248,7 +264,7 @@
 					NV03_CHANNEL_DMA,
 					0 };
 	const u16 *oclass = oclasses;
-	struct nv03_channel_dma_v0 args, *retn;
+	struct nv03_channel_dma_v0 args;
 	struct nouveau_channel *chan;
 	int ret;
 
@@ -260,15 +276,14 @@
 
 	/* create channel object */
 	args.version = 0;
-	args.pushbuf = chan->push.ctxdma.handle;
+	args.pushbuf = nvif_handle(&chan->push.ctxdma);
 	args.offset = chan->push.vma.offset;
 
 	do {
-		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
-				      &args, sizeof(args), &chan->object);
+		ret = nvif_object_init(&device->object, handle, *oclass++,
+				       &args, sizeof(args), &chan->user);
 		if (ret == 0) {
-			retn = chan->object->data;
-			chan->chid = retn->chid;
+			chan->chid = args.chid;
 			return ret;
 		}
 	} while (ret && *oclass);
@@ -281,13 +296,12 @@
 nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 {
 	struct nvif_device *device = chan->device;
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nvkm_mmu *mmu = nvxx_mmu(device);
-	struct nvkm_sw_chan *swch;
 	struct nv_dma_v0 args = {};
 	int ret, i;
 
-	nvif_object_map(chan->object);
+	nvif_object_map(&chan->user);
 
 	/* allocate dma objects to cover all allowed vram, and gart */
 	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -303,9 +317,8 @@
 			args.limit = device->info.ram_user - 1;
 		}
 
-		ret = nvif_object_init(chan->object, NULL, vram,
-				       NV_DMA_IN_MEMORY, &args,
-				       sizeof(args), &chan->vram);
+		ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
+				       &args, sizeof(args), &chan->vram);
 		if (ret)
 			return ret;
 
@@ -315,7 +328,7 @@
 			args.start = 0;
 			args.limit = cli->vm->mmu->limit - 1;
 		} else
-		if (chan->drm->agp.stat == ENABLED) {
+		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = chan->drm->agp.base;
@@ -328,15 +341,14 @@
 			args.limit = mmu->limit - 1;
 		}
 
-		ret = nvif_object_init(chan->object, NULL, gart,
-				       NV_DMA_IN_MEMORY, &args,
-				       sizeof(args), &chan->gart);
+		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
+				       &args, sizeof(args), &chan->gart);
 		if (ret)
 			return ret;
 	}
 
 	/* initialise dma tracking parameters */
-	switch (chan->object->oclass & 0x00ff) {
+	switch (chan->user.oclass & 0x00ff) {
 	case 0x006b:
 	case 0x006e:
 		chan->user_put = 0x40;
@@ -368,15 +380,12 @@
 
 	/* allocate software object class (used for fences on <= nv05) */
 	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
-		ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
+		ret = nvif_object_init(&chan->user, 0x006e,
+				       NVIF_IOCTL_NEW_V0_SW_NV04,
 				       NULL, 0, &chan->nvsw);
 		if (ret)
 			return ret;
 
-		swch = (void *)nvxx_object(&chan->nvsw)->parent;
-		swch->flip = nouveau_flip_complete;
-		swch->flip_data = chan;
-
 		ret = RING_SPACE(chan, 2);
 		if (ret)
 			return ret;
@@ -395,7 +404,7 @@
 		    u32 handle, u32 arg0, u32 arg1,
 		    struct nouveau_channel **pchan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)device->object.client;
 	bool super;
 	int ret;
 
@@ -405,17 +414,17 @@
 
 	ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
 	if (ret) {
-		NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
+		NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
 		ret = nouveau_channel_dma(drm, device, handle, pchan);
 		if (ret) {
-			NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
+			NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
 			goto done;
 		}
 	}
 
 	ret = nouveau_channel_init(*pchan, arg0, arg1);
 	if (ret) {
-		NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
+		NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
 		nouveau_channel_del(pchan);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 8b3640f..2ed3241 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -37,7 +37,7 @@
 	u32 user_get;
 	u32 user_put;
 
-	struct nvif_object *object;
+	struct nvif_object user;
 };
 
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 3162040..2e7cbe9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -125,9 +125,9 @@
 	 * is handled by the SOR itself, and not required for LVDS DDC.
 	 */
 	if (nv_connector->type == DCB_CONNECTOR_eDP) {
-		panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+		panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
 		if (panel == 0) {
-			gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
 			msleep(300);
 		}
 	}
@@ -148,7 +148,7 @@
 				break;
 		} else
 		if (nv_encoder->i2c) {
-			if (nv_probe_i2c(nv_encoder->i2c, 0x50))
+			if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
 				break;
 		}
 	}
@@ -157,7 +157,7 @@
 	 * state to avoid confusing the SOR for other output types.
 	 */
 	if (!nv_encoder && panel == 0)
-		gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
 
 	return nv_encoder;
 }
@@ -241,7 +241,7 @@
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = NULL;
 	struct nouveau_encoder *nv_partner;
-	struct nvkm_i2c_port *i2c;
+	struct i2c_adapter *i2c;
 	int type;
 	int ret;
 	enum drm_connector_status conn_status = connector_status_disconnected;
@@ -259,7 +259,7 @@
 
 	nv_encoder = nouveau_connector_ddc_detect(connector);
 	if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
-		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+		nv_connector->edid = drm_get_edid(connector, i2c);
 		drm_mode_connector_update_edid_property(connector,
 							nv_connector->edid);
 		if (!nv_connector->edid) {
@@ -919,7 +919,7 @@
 	.force = nouveau_connector_force
 };
 
-static void
+static int
 nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
 {
 	struct nouveau_encoder *nv_encoder = NULL;
@@ -930,15 +930,15 @@
 	    nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 		if (mode == DRM_MODE_DPMS_ON) {
 			u8 data = DP_SET_POWER_D0;
-			nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+			nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
 			usleep_range(1000, 2000);
 		} else {
 			u8 data = DP_SET_POWER_D3;
-			nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+			nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
 		}
 	}
 
-	drm_helper_connector_dpms(connector, mode);
+	return drm_helper_connector_dpms(connector, mode);
 }
 
 static const struct drm_connector_funcs
@@ -980,29 +980,29 @@
 }
 
 static ssize_t
-nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
 {
 	struct nouveau_connector *nv_connector =
-		container_of(aux, typeof(*nv_connector), aux);
+		container_of(obj, typeof(*nv_connector), aux);
 	struct nouveau_encoder *nv_encoder;
-	struct nvkm_i2c_port *port;
+	struct nvkm_i2c_aux *aux;
 	int ret;
 
 	nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
-	if (!nv_encoder || !(port = nv_encoder->i2c))
+	if (!nv_encoder || !(aux = nv_encoder->aux))
 		return -ENODEV;
 	if (WARN_ON(msg->size > 16))
 		return -E2BIG;
 	if (msg->size == 0)
 		return msg->size;
 
-	ret = nvkm_i2c(port)->acquire(port, 0);
+	ret = nvkm_i2c_aux_acquire(aux);
 	if (ret)
 		return ret;
 
-	ret = port->func->aux(port, false, msg->request, msg->address,
-			      msg->buffer, msg->size);
-	nvkm_i2c(port)->release(port);
+	ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
+				msg->buffer, msg->size);
+	nvkm_i2c_aux_release(aux);
 	if (ret >= 0) {
 		msg->reply = ret;
 		return msg->size;
@@ -1256,8 +1256,8 @@
 		break;
 	}
 
-	ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug,
-				true, NV04_DISP_NTFY_CONN,
+	ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true,
+			       NV04_DISP_NTFY_CONN,
 			       &(struct nvif_notify_conn_req_v0) {
 				.mask = NVIF_NOTIFY_CONN_V0_ANY,
 				.conn = index,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8670d90..cc6c228 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -185,7 +185,7 @@
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-		ret = nvif_notify_init(&disp->disp, NULL,
+		ret = nvif_notify_init(&disp->disp,
 				       nouveau_display_vblank_handler, false,
 				       NV04_DISP_NTFY_VBLANK,
 				       &(struct nvif_notify_head_req_v0) {
@@ -358,6 +358,7 @@
 nouveau_display_init(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_connector *connector;
 	int ret;
 
@@ -374,6 +375,8 @@
 		nvif_notify_get(&conn->hpd);
 	}
 
+	/* enable flip completion events */
+	nvif_notify_get(&drm->flip);
 	return ret;
 }
 
@@ -381,6 +384,7 @@
 nouveau_display_fini(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_connector *connector;
 	int head;
 
@@ -388,6 +392,9 @@
 	for (head = 0; head < dev->mode_config.num_crtc; head++)
 		drm_vblank_off(dev, head);
 
+	/* disable flip completion events */
+	nvif_notify_put(&drm->flip);
+
 	/* disable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
@@ -438,6 +445,7 @@
 nouveau_display_create(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvkm_device *device = nvxx_device(&drm->device);
 	struct nouveau_display *disp;
 	int ret;
 
@@ -450,7 +458,7 @@
 	drm_mode_create_dvi_i_properties(dev);
 
 	dev->mode_config.funcs = &nouveau_mode_config_funcs;
-	dev->mode_config.fb_base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+	dev->mode_config.fb_base = device->func->resource_addr(device, 1);
 
 	dev->mode_config.min_width = 0;
 	dev->mode_config.min_height = 0;
@@ -494,7 +502,7 @@
 		int i;
 
 		for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
-			ret = nvif_object_init(nvif_object(&drm->device), NULL,
+			ret = nvif_object_init(&drm->device.object,
 					       NVDRM_DISPLAY, oclass[i],
 					       NULL, 0, &disp->disp);
 		}
@@ -711,7 +719,7 @@
 	chan = drm->channel;
 	if (!chan)
 		return -ENODEV;
-	cli = (void *)nvif_client(&chan->device->base);
+	cli = (void *)chan->user.client;
 
 	s = kzalloc(sizeof(*s), GFP_KERNEL);
 	if (!s)
@@ -847,10 +855,10 @@
 }
 
 int
-nouveau_flip_complete(void *data)
+nouveau_flip_complete(struct nvif_notify *notify)
 {
-	struct nouveau_channel *chan = data;
-	struct nouveau_drm *drm = chan->drm;
+	struct nouveau_drm *drm = container_of(notify, typeof(*drm), flip);
+	struct nouveau_channel *chan = drm->channel;
 	struct nouveau_page_flip_state state;
 
 	if (!nouveau_finish_page_flip(chan, &state)) {
@@ -861,7 +869,7 @@
 		}
 	}
 
-	return 0;
+	return NVIF_NOTIFY_KEEP;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 6d9245a..d168c63 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -52,9 +52,9 @@
 {
 	uint64_t val;
 
-	val = nvif_rd32(chan, chan->user_get);
+	val = nvif_rd32(&chan->user, chan->user_get);
         if (chan->user_get_hi)
-                val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32;
+                val |= (uint64_t)nvif_rd32(&chan->user, chan->user_get_hi) << 32;
 
 	/* reset counter as long as GET is still advancing, this is
 	 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -82,7 +82,7 @@
 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 	      int delta, int length)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_bo *pb = chan->push.buffer;
 	struct nvkm_vma *vma;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
@@ -103,7 +103,7 @@
 	/* Flush writes. */
 	nouveau_bo_rd32(pb, 0);
 
-	nvif_wr32(chan, 0x8c, chan->dma.ib_put);
+	nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
 	chan->dma.ib_free--;
 }
 
@@ -113,7 +113,7 @@
 	uint32_t cnt = 0, prev_get = 0;
 
 	while (chan->dma.ib_free < count) {
-		uint32_t get = nvif_rd32(chan, 0x88);
+		uint32_t get = nvif_rd32(&chan->user, 0x88);
 		if (get != prev_get) {
 			prev_get = get;
 			cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8da0a27..aff3a9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@
 #define WRITE_PUT(val) do {                                                    \
 	mb();                                                   \
 	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
-	nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+	nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
 } while (0)
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index c3ef30b..e17e15e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -31,8 +31,7 @@
 #include "nouveau_crtc.h"
 
 static void
-nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
-		     u8 *dpcd)
+nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 buf[3];
@@ -40,11 +39,11 @@
 	if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
 		return;
 
-	if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
+	if (!nvkm_rdaux(aux, DP_SINK_OUI, buf, 3))
 		NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
-	if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
+	if (!nvkm_rdaux(aux, DP_BRANCH_OUI, buf, 3))
 		NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
@@ -55,15 +54,15 @@
 {
 	struct drm_device *dev = nv_encoder->base.base.dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvkm_i2c_port *auxch;
+	struct nvkm_i2c_aux *aux;
 	u8 *dpcd = nv_encoder->dp.dpcd;
 	int ret;
 
-	auxch = nv_encoder->i2c;
-	if (!auxch)
+	aux = nv_encoder->aux;
+	if (!aux)
 		return -ENODEV;
 
-	ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
+	ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
 	if (ret)
 		return ret;
 
@@ -84,6 +83,6 @@
 	NV_DEBUG(drm, "maximum: %dx%d\n",
 		     nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
 
-	nouveau_dp_probe_oui(dev, auxch, dpcd);
+	nouveau_dp_probe_oui(dev, aux, dpcd);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 477cbb1..ccefb64 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -32,15 +32,15 @@
 #include "drmP.h"
 #include "drm_crtc_helper.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
+#include <core/pci.h>
+#include <core/tegra.h>
 
 #include "nouveau_drm.h"
 #include "nouveau_dma.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
-#include "nouveau_agp.h"
 #include "nouveau_vga.h"
 #include "nouveau_sysfs.h"
 #include "nouveau_hwmon.h"
@@ -105,14 +105,18 @@
 }
 
 static int
-nouveau_cli_create(u64 name, const char *sname,
+nouveau_cli_create(struct drm_device *dev, const char *sname,
 		   int size, void **pcli)
 {
 	struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
+	int ret;
 	if (cli) {
-		int ret = nvif_client_init(NULL, NULL, sname, name,
-					   nouveau_config, nouveau_debug,
-					  &cli->base);
+		snprintf(cli->name, sizeof(cli->name), "%s", sname);
+		cli->dev = dev;
+
+		ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
+				       nouveau_config, nouveau_debug,
+				       &cli->base);
 		if (ret == 0) {
 			mutex_init(&cli->mutex);
 			usif_client_init(cli);
@@ -134,12 +138,17 @@
 static void
 nouveau_accel_fini(struct nouveau_drm *drm)
 {
-	nouveau_channel_del(&drm->channel);
+	nouveau_channel_idle(drm->channel);
 	nvif_object_fini(&drm->ntfy);
-	nvkm_gpuobj_ref(NULL, &drm->notify);
+	nvkm_gpuobj_del(&drm->notify);
+	nvif_notify_fini(&drm->flip);
 	nvif_object_fini(&drm->nvsw);
-	nouveau_channel_del(&drm->cechan);
+	nouveau_channel_del(&drm->channel);
+
+	nouveau_channel_idle(drm->cechan);
 	nvif_object_fini(&drm->ttm.copy);
+	nouveau_channel_del(&drm->cechan);
+
 	if (drm->fence)
 		nouveau_fence(drm)->dtor(drm);
 }
@@ -148,9 +157,9 @@
 nouveau_accel_init(struct nouveau_drm *drm)
 {
 	struct nvif_device *device = &drm->device;
+	struct nvif_sclass *sclass;
 	u32 arg0, arg1;
-	u32 sclass[16];
-	int ret, i;
+	int ret, i, n;
 
 	if (nouveau_noaccel)
 		return;
@@ -159,12 +168,12 @@
 	/*XXX: this is crap, but the fence/channel stuff is a little
 	 *     backwards in some places.  this will be fixed.
 	 */
-	ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
+	ret = n = nvif_object_sclass_get(&device->object, &sclass);
 	if (ret < 0)
 		return;
 
-	for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
-		switch (sclass[i]) {
+	for (ret = -ENOSYS, i = 0; i < n; i++) {
+		switch (sclass[i].oclass) {
 		case NV03_CHANNEL_DMA:
 			ret = nv04_fence_create(drm);
 			break;
@@ -191,6 +200,7 @@
 		}
 	}
 
+	nvif_object_sclass_put(&sclass);
 	if (ret) {
 		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
 		nouveau_accel_fini(drm);
@@ -231,10 +241,9 @@
 		return;
 	}
 
-	ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
+	ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
 			       nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
 	if (ret == 0) {
-		struct nvkm_sw_chan *swch;
 		ret = RING_SPACE(drm->channel, 2);
 		if (ret == 0) {
 			if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -246,9 +255,16 @@
 				OUT_RING  (drm->channel, 0x001f0000);
 			}
 		}
-		swch = (void *)nvxx_object(&drm->nvsw)->parent;
-		swch->flip = nouveau_flip_complete;
-		swch->flip_data = drm->channel;
+
+		ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
+				       false, NVSW_NTFY_UEVENT, NULL, 0, 0,
+				       &drm->flip);
+		if (ret == 0)
+			ret = nvif_notify_get(&drm->flip);
+		if (ret) {
+			nouveau_accel_fini(drm);
+			return;
+		}
 	}
 
 	if (ret) {
@@ -258,15 +274,15 @@
 	}
 
 	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
-		ret = nvkm_gpuobj_new(nvxx_object(&drm->device), NULL, 32,
-				      0, 0, &drm->notify);
+		ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
+				      NULL, &drm->notify);
 		if (ret) {
 			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
 			nouveau_accel_fini(drm);
 			return;
 		}
 
-		ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
+		ret = nvif_object_init(&drm->channel->user, NvNotify0,
 				       NV_DMA_IN_MEMORY,
 				       &(struct nv_dma_v0) {
 						.target = NV_DMA_V0_TARGET_VRAM,
@@ -321,9 +337,8 @@
 		remove_conflicting_framebuffers(aper, "nouveaufb", boot);
 	kfree(aper);
 
-	ret = nvkm_device_create(pdev, NVKM_BUS_PCI,
-				 nouveau_pci_name(pdev), pci_name(pdev),
-				 nouveau_config, nouveau_debug, &device);
+	ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
+				  true, true, ~0ULL, &device);
 	if (ret)
 		return ret;
 
@@ -331,7 +346,7 @@
 
 	ret = drm_get_pci_dev(pdev, pent, &driver_pci);
 	if (ret) {
-		nvkm_object_ref(NULL, (struct nvkm_object **)&device);
+		nvkm_device_del(&device);
 		return ret;
 	}
 
@@ -371,12 +386,10 @@
 static int
 nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 {
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_drm *drm;
 	int ret;
 
-	ret = nouveau_cli_create(nouveau_name(dev), "DRM", sizeof(*drm),
-				 (void **)&drm);
+	ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
 	if (ret)
 		return ret;
 
@@ -390,36 +403,10 @@
 
 	nouveau_get_hdmi_dev(drm);
 
-	/* make sure AGP controller is in a consistent state before we
-	 * (possibly) execute vbios init tables (see nouveau_agp.h)
-	 */
-	if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
-		const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
-				    NV_DEVICE_V0_DISABLE_MMIO;
-		/* dummy device object, doesn't init anything, but allows
-		 * agp code access to registers
-		 */
-		ret = nvif_device_init(&drm->client.base.base, NULL,
-				       NVDRM_DEVICE, NV_DEVICE,
-				       &(struct nv_device_v0) {
-						.device = ~0,
-						.disable = ~enables,
-						.debug0 = ~0,
-				       }, sizeof(struct nv_device_v0),
-				       &drm->device);
-		if (ret)
-			goto fail_device;
-
-		nouveau_agp_reset(drm);
-		nvif_device_fini(&drm->device);
-	}
-
-	ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
-			       NV_DEVICE,
+	ret = nvif_device_init(&drm->client.base.object,
+			       NVDRM_DEVICE, NV_DEVICE,
 			       &(struct nv_device_v0) {
 					.device = ~0,
-					.disable = 0,
-					.debug0 = 0,
 			       }, sizeof(struct nv_device_v0),
 			       &drm->device);
 	if (ret)
@@ -432,14 +419,13 @@
 	 * better fix is found - assuming there is one...
 	 */
 	if (drm->device.info.chipset == 0xc1)
-		nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000);
+		nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
 
 	nouveau_vga_init(drm);
-	nouveau_agp_init(drm);
 
 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
-				  0x1000, &drm->client.vm);
+				  0x1000, NULL, &drm->client.vm);
 		if (ret)
 			goto fail_device;
 
@@ -486,7 +472,6 @@
 fail_bios:
 	nouveau_ttm_fini(drm);
 fail_ttm:
-	nouveau_agp_fini(drm);
 	nouveau_vga_fini(drm);
 fail_device:
 	nvif_device_fini(&drm->device);
@@ -512,7 +497,6 @@
 	nouveau_bios_takedown(dev);
 
 	nouveau_ttm_fini(drm);
-	nouveau_agp_fini(drm);
 	nouveau_vga_fini(drm);
 
 	nvif_device_fini(&drm->device);
@@ -527,15 +511,14 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_client *client;
-	struct nvkm_object *device;
+	struct nvkm_device *device;
 
 	dev->irq_enabled = false;
 	client = nvxx_client(&drm->client.base);
-	device = client->device;
+	device = nvkm_device_find(client->device);
 	drm_put_dev(dev);
 
-	nvkm_object_ref(NULL, &device);
-	nvkm_object_debug();
+	nvkm_device_del(&device);
 }
 
 static void
@@ -597,7 +580,6 @@
 	if (ret)
 		goto fail_client;
 
-	nouveau_agp_fini(drm);
 	return 0;
 
 fail_client:
@@ -622,13 +604,8 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli;
 
-	NV_INFO(drm, "re-enabling device...\n");
-
-	nouveau_agp_reset(drm);
-
 	NV_INFO(drm, "resuming kernel object tree...\n");
 	nvif_client_resume(&drm->client.base);
-	nouveau_agp_init(drm);
 
 	NV_INFO(drm, "resuming client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->resume)
@@ -728,7 +705,6 @@
 		return -EBUSY;
 	}
 
-	nv_debug_level(SILENT);
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 	nouveau_switcheroo_optimus_dsm();
@@ -762,10 +738,9 @@
 	ret = nouveau_do_resume(drm_dev, true);
 	drm_kms_helper_poll_enable(drm_dev);
 	/* do magic */
-	nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
-	nv_debug_level(NORMAL);
 	return ret;
 }
 
@@ -826,8 +801,7 @@
 	get_task_comm(tmpname, current);
 	snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
 
-	ret = nouveau_cli_create(nouveau_name(dev), name, sizeof(*cli),
-			(void **)&cli);
+	ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
 
 	if (ret)
 		goto out_suspend;
@@ -836,7 +810,7 @@
 
 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
-				  0x1000, &cli->vm);
+				  0x1000, NULL, &cli->vm);
 		if (ret) {
 			nouveau_cli_destroy(cli);
 			goto out_suspend;
@@ -945,8 +919,8 @@
 static struct drm_driver
 driver_stub = {
 	.driver_features =
-		DRIVER_USE_AGP |
-		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
+		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
+		DRIVER_KMS_LEGACY_CONTEXT,
 
 	.load = nouveau_drm_load,
 	.unload = nouveau_drm_unload,
@@ -1056,18 +1030,16 @@
 };
 
 struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev, int size,
-				void **pobject)
+nouveau_platform_device_create(struct platform_device *pdev,
+			       struct nvkm_device **pdevice)
 {
 	struct drm_device *drm;
 	int err;
 
-	err = nvkm_device_create_(pdev, NVKM_BUS_PLATFORM,
-				  nouveau_platform_name(pdev),
-				  dev_name(&pdev->dev), nouveau_config,
-				  nouveau_debug, size, pobject);
+	err = nvkm_device_tegra_new(pdev, nouveau_config, nouveau_debug,
+				    true, true, ~0ULL, pdevice);
 	if (err)
-		return ERR_PTR(err);
+		goto err_free;
 
 	drm = drm_dev_alloc(&driver_platform, &pdev->dev);
 	if (!drm) {
@@ -1085,7 +1057,7 @@
 	return drm;
 
 err_free:
-	nvkm_object_ref(NULL, (struct nvkm_object **)pobject);
+	nvkm_device_del(pdevice);
 
 	return ERR_PTR(err);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index dd72652..3c902c2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
 #define DRIVER_DATE		"20120801"
 
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		2
-#define DRIVER_PATCHLEVEL	2
+#define DRIVER_MINOR		3
+#define DRIVER_PATCHLEVEL	0
 
 /*
  * 1.1.1:
@@ -30,6 +30,9 @@
  *      - allow concurrent access to bo's mapped read/write.
  * 1.2.2:
  *      - add NOUVEAU_GEM_DOMAIN_COHERENT flag
+ * 1.3.0:
+ *      - NVIF ABI modified, safe because only (current) users are test
+ *        programs that get directly linked with NVKM.
  */
 
 #include <nvif/client.h>
@@ -88,6 +91,8 @@
 	void *abi16;
 	struct list_head objects;
 	struct list_head notifys;
+	char name[32];
+	struct drm_device *dev;
 };
 
 static inline struct nouveau_cli *
@@ -109,13 +114,10 @@
 	struct list_head clients;
 
 	struct {
-		enum {
-			UNKNOWN = 0,
-			DISABLE = 1,
-			ENABLED = 2
-		} stat;
+		struct agp_bridge_data *bridge;
 		u32 base;
 		u32 size;
+		bool cma;
 	} agp;
 
 	/* TTM interface support */
@@ -148,6 +150,7 @@
 	struct nouveau_fbdev *fbcon;
 	struct nvif_object nvsw;
 	struct nvif_object ntfy;
+	struct nvif_notify flip;
 
 	/* nv10-nv40 tiling regions */
 	struct {
@@ -180,22 +183,22 @@
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 
-#define nouveau_platform_device_create(p, u)                                   \
-	nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
 struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev,
-				int size, void **pobject);
+nouveau_platform_device_create(struct platform_device *, struct nvkm_device **);
 void nouveau_drm_device_remove(struct drm_device *dev);
 
 #define NV_PRINTK(l,c,f,a...) do {                                             \
 	struct nouveau_cli *_cli = (c);                                        \
-	nv_##l(_cli->base.base.priv, f, ##a);                                  \
+	dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a);                     \
 } while(0)
-#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a)
-#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a)
+#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
 #define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
 #define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
-#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a)
+#define NV_DEBUG(drm,f,a...) do {                                              \
+	if (unlikely(drm_debug & DRM_UT_DRIVER))                               \
+		NV_PRINTK(info, &(drm)->client, f, ##a);                       \
+} while(0)
 
 extern int nouveau_modeset;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index c57a37e..b37da95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -41,7 +41,9 @@
 
 	struct dcb_output *dcb;
 	int or;
-	struct nvkm_i2c_port *i2c;
+
+	struct i2c_adapter *i2c;
+	struct nvkm_i2c_aux *aux;
 
 	/* different to drm_encoder.crtc, this reflects what's
 	 * actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6751553..2791701 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -84,7 +84,7 @@
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_fillrect(info, rect);
+	drm_fb_helper_cfb_fillrect(info, rect);
 }
 
 static void
@@ -116,7 +116,7 @@
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_copyarea(info, image);
+	drm_fb_helper_cfb_copyarea(info, image);
 }
 
 static void
@@ -148,7 +148,7 @@
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_imageblit(info, image);
+	drm_fb_helper_cfb_imageblit(info, image);
 }
 
 static int
@@ -197,9 +197,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -319,7 +319,6 @@
 	struct nouveau_channel *chan;
 	struct nouveau_bo *nvbo;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct pci_dev *pdev = dev->pdev;
 	int size, ret;
 
 	mode_cmd.width = sizes->surface_width;
@@ -365,20 +364,13 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	info = framebuffer_alloc(0, &pdev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unlock;
 	}
 	info->skip_vt_switch = 1;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		framebuffer_release(info);
-		goto out_unlock;
-	}
-
 	info->par = fbcon;
 
 	nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
@@ -388,7 +380,6 @@
 
 	/* setup helper */
 	fbcon->helper.fb = fb;
-	fbcon->helper.fbdev = info;
 
 	strcpy(info->fix.id, "nouveaufb");
 	if (!chan)
@@ -450,15 +441,9 @@
 nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 {
 	struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
-	struct fb_info *info;
 
-	if (fbcon->helper.fbdev) {
-		info = fbcon->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbcon->helper);
+	drm_fb_helper_release_fbi(&fbcon->helper);
 
 	if (nouveau_fb->nvbo) {
 		nouveau_bo_unmap(nouveau_fb->nvbo);
@@ -496,7 +481,7 @@
 		console_lock();
 		if (state == FBINFO_STATE_RUNNING)
 			nouveau_fbcon_accel_restore(dev);
-		fb_set_suspend(drm->fbcon->helper.fbdev, state);
+		drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
 		if (state != FBINFO_STATE_RUNNING)
 			nouveau_fbcon_accel_save_disable(dev);
 		console_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c6d56be..574c36b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -169,7 +169,7 @@
 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 {
 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
-	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	int ret;
 
 	INIT_LIST_HEAD(&fctx->flip);
@@ -188,13 +188,12 @@
 	if (!priv->uevent)
 		return;
 
-	ret = nvif_notify_init(chan->object, NULL,
-			 nouveau_fence_wait_uevent_handler, false,
-			 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
-			 &(struct nvif_notify_uevent_req) { },
-			 sizeof(struct nvif_notify_uevent_req),
-			 sizeof(struct nvif_notify_uevent_rep),
-			 &fctx->notify);
+	ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
+			       false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
+			       &(struct nvif_notify_uevent_req) { },
+			       sizeof(struct nvif_notify_uevent_req),
+			       sizeof(struct nvif_notify_uevent_rep),
+			       &fctx->notify);
 
 	WARN_ON(ret);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index d9241d8..2e3a62d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -85,7 +85,7 @@
 int nv84_fence_create(struct nouveau_drm *);
 int nvc0_fence_create(struct nouveau_drm *);
 
-int nouveau_flip_complete(void *chan);
+int nouveau_flip_complete(struct nvif_notify *);
 
 struct nv84_fence_chan {
 	struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index af1ee51..2c99815 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -254,13 +254,13 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 	struct drm_nouveau_gem_new *req = data;
 	struct nouveau_bo *nvbo = NULL;
 	int ret = 0;
 
-	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
-		NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
+		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
 		return -EINVAL;
 	}
 
@@ -376,7 +376,7 @@
 	ww_acquire_init(&op->ticket, &reservation_ww_class);
 retry:
 	if (++trycnt > 100000) {
-		NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
+		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -387,7 +387,7 @@
 
 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
 		if (!gem) {
-			NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
+			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 			ret = -ENOENT;
 			break;
 		}
@@ -399,7 +399,7 @@
 		}
 
 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
-			NV_PRINTK(error, cli, "multiple instances of buffer %d on "
+			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 				      "validation list\n", b->handle);
 			drm_gem_object_unreference_unlocked(gem);
 			ret = -EINVAL;
@@ -420,7 +420,7 @@
 			}
 			if (unlikely(ret)) {
 				if (ret != -ERESTARTSYS)
-					NV_PRINTK(error, cli, "fail reserve\n");
+					NV_PRINTK(err, cli, "fail reserve\n");
 				break;
 			}
 		}
@@ -438,7 +438,7 @@
 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 			list_add_tail(&nvbo->entry, &gart_list);
 		else {
-			NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
+			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 				 b->valid_domains);
 			list_add_tail(&nvbo->entry, &both_list);
 			ret = -EINVAL;
@@ -476,21 +476,21 @@
 					     b->write_domains,
 					     b->valid_domains);
 		if (unlikely(ret)) {
-			NV_PRINTK(error, cli, "fail set_domain\n");
+			NV_PRINTK(err, cli, "fail set_domain\n");
 			return ret;
 		}
 
 		ret = nouveau_bo_validate(nvbo, true, false);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
-				NV_PRINTK(error, cli, "fail ttm_validate\n");
+				NV_PRINTK(err, cli, "fail ttm_validate\n");
 			return ret;
 		}
 
 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
-				NV_PRINTK(error, cli, "fail post-validate sync\n");
+				NV_PRINTK(err, cli, "fail post-validate sync\n");
 			return ret;
 		}
 
@@ -537,14 +537,14 @@
 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 	if (unlikely(ret)) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validate_init\n");
+			NV_PRINTK(err, cli, "validate_init\n");
 		return ret;
 	}
 
 	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validating bo list\n");
+			NV_PRINTK(err, cli, "validating bo list\n");
 		validate_fini(op, NULL, NULL);
 		return ret;
 	}
@@ -600,7 +600,7 @@
 		uint32_t data;
 
 		if (unlikely(r->bo_index > req->nr_buffers)) {
-			NV_PRINTK(error, cli, "reloc bo index invalid\n");
+			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -610,7 +610,7 @@
 			continue;
 
 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
-			NV_PRINTK(error, cli, "reloc container bo index invalid\n");
+			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -618,7 +618,7 @@
 
 		if (unlikely(r->reloc_bo_offset + 4 >
 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
-			NV_PRINTK(error, cli, "reloc outside of bo\n");
+			NV_PRINTK(err, cli, "reloc outside of bo\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -627,7 +627,7 @@
 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
 					  &nvbo->kmap);
 			if (ret) {
-				NV_PRINTK(error, cli, "failed kmap for reloc\n");
+				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 				break;
 			}
 			nvbo->validate_mapped = true;
@@ -650,7 +650,7 @@
 
 		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
 		if (ret) {
-			NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
+			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 			break;
 		}
 
@@ -681,7 +681,7 @@
 		return -ENOMEM;
 
 	list_for_each_entry(temp, &abi16->channels, head) {
-		if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
+		if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) {
 			chan = temp->chan;
 			break;
 		}
@@ -696,19 +696,19 @@
 		goto out_next;
 
 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
-		NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
-		NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
-		NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
@@ -726,7 +726,7 @@
 	/* Ensure all push buffers are on validate list */
 	for (i = 0; i < req->nr_push; i++) {
 		if (push[i].bo_index >= req->nr_buffers) {
-			NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
+			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 			ret = -EINVAL;
 			goto out_prevalid;
 		}
@@ -737,7 +737,7 @@
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validate: %d\n", ret);
+			NV_PRINTK(err, cli, "validate: %d\n", ret);
 		goto out_prevalid;
 	}
 
@@ -745,7 +745,7 @@
 	if (do_reloc) {
 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
 		if (ret) {
-			NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
+			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 			goto out;
 		}
 	}
@@ -753,7 +753,7 @@
 	if (chan->dma.ib_max) {
 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 		if (ret) {
-			NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
+			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 			goto out;
 		}
 
@@ -768,7 +768,7 @@
 	if (drm->device.info.chipset >= 0x25) {
 		ret = RING_SPACE(chan, req->nr_push * 2);
 		if (ret) {
-			NV_PRINTK(error, cli, "cal_space: %d\n", ret);
+			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 			goto out;
 		}
 
@@ -782,7 +782,7 @@
 	} else {
 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 		if (ret) {
-			NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
+			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 			goto out;
 		}
 
@@ -820,7 +820,7 @@
 
 	ret = nouveau_fence_new(chan, false, &fence);
 	if (ret) {
-		NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
+		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 		WIND_RING(chan);
 		goto out;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 0dbe006..491c714 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -41,7 +41,7 @@
 	struct drm_device *dev = dev_get_drvdata(d);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_therm *therm = nvxx_therm(&drm->device);
-	int temp = therm->temp_get(therm);
+	int temp = nvkm_therm_temp_get(therm);
 
 	if (temp < 0)
 		return temp;
@@ -348,7 +348,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_therm *therm = nvxx_therm(&drm->device);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+	return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
 }
 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
 			  NULL, 0);
@@ -571,7 +571,7 @@
 		return -ENOMEM;
 	hwmon->dev = dev;
 
-	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
+	if (!therm || !therm->attr_get || !therm->attr_set)
 		return -ENODEV;
 
 	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
@@ -588,7 +588,7 @@
 		goto error;
 
 	/* if the card has a working thermal sensor */
-	if (therm->temp_get(therm) >= 0) {
+	if (nvkm_therm_temp_get(therm) >= 0) {
 		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
 		if (ret)
 			goto error;
@@ -606,7 +606,7 @@
 	}
 
 	/* if the card can read the fan rpm */
-	if (therm->fan_sense(therm) >= 0) {
+	if (nvkm_therm_fan_sense(therm) >= 0) {
 		ret = sysfs_create_group(&hwmon_dev->kobj,
 					 &hwmon_fan_rpm_attrgroup);
 		if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index ca0ad9d..55eb942 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -72,10 +72,8 @@
 static void
 nvkm_client_driver_fini(void *priv)
 {
-	struct nvkm_object *client = priv;
-	nvkm_client_fini(nv_client(client), false);
-	atomic_set(&client->refcount, 1);
-	nvkm_object_ref(NULL, &client);
+	struct nvkm_client *client = priv;
+	nvkm_client_del(&client);
 }
 
 static int
@@ -113,7 +111,7 @@
 	struct nvkm_client *client;
 	int ret;
 
-	ret = nvkm_client_create(name, device, cfg, dbg, &client);
+	ret = nvkm_client_new(name, device, cfg, dbg, &client);
 	*ppriv = client;
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index dcfbbfa..3eb6654 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -19,239 +19,38 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/reset.h>
-#include <linux/regulator/consumer.h>
-#include <linux/iommu.h>
-#include <soc/tegra/fuse.h>
-#include <soc/tegra/pmc.h>
-
-#include "nouveau_drm.h"
 #include "nouveau_platform.h"
 
-static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
-{
-	int err;
-
-	err = regulator_enable(gpu->vdd);
-	if (err)
-		goto err_power;
-
-	err = clk_prepare_enable(gpu->clk);
-	if (err)
-		goto err_clk;
-	err = clk_prepare_enable(gpu->clk_pwr);
-	if (err)
-		goto err_clk_pwr;
-	clk_set_rate(gpu->clk_pwr, 204000000);
-	udelay(10);
-
-	reset_control_assert(gpu->rst);
-	udelay(10);
-
-	err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
-	if (err)
-		goto err_clamp;
-	udelay(10);
-
-	reset_control_deassert(gpu->rst);
-	udelay(10);
-
-	return 0;
-
-err_clamp:
-	clk_disable_unprepare(gpu->clk_pwr);
-err_clk_pwr:
-	clk_disable_unprepare(gpu->clk);
-err_clk:
-	regulator_disable(gpu->vdd);
-err_power:
-	return err;
-}
-
-static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
-{
-	int err;
-
-	reset_control_assert(gpu->rst);
-	udelay(10);
-
-	clk_disable_unprepare(gpu->clk_pwr);
-	clk_disable_unprepare(gpu->clk);
-	udelay(10);
-
-	err = regulator_disable(gpu->vdd);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-#if IS_ENABLED(CONFIG_IOMMU_API)
-
-static void nouveau_platform_probe_iommu(struct device *dev,
-					 struct nouveau_platform_gpu *gpu)
-{
-	int err;
-	unsigned long pgsize_bitmap;
-
-	mutex_init(&gpu->iommu.mutex);
-
-	if (iommu_present(&platform_bus_type)) {
-		gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
-		if (IS_ERR(gpu->iommu.domain))
-			goto error;
-
-		/*
-		 * A IOMMU is only usable if it supports page sizes smaller
-		 * or equal to the system's PAGE_SIZE, with a preference if
-		 * both are equal.
-		 */
-		pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
-		if (pgsize_bitmap & PAGE_SIZE) {
-			gpu->iommu.pgshift = PAGE_SHIFT;
-		} else {
-			gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
-			if (gpu->iommu.pgshift == 0) {
-				dev_warn(dev, "unsupported IOMMU page size\n");
-				goto free_domain;
-			}
-			gpu->iommu.pgshift -= 1;
-		}
-
-		err = iommu_attach_device(gpu->iommu.domain, dev);
-		if (err)
-			goto free_domain;
-
-		err = nvkm_mm_init(&gpu->iommu._mm, 0,
-				   (1ULL << 40) >> gpu->iommu.pgshift, 1);
-		if (err)
-			goto detach_device;
-
-		gpu->iommu.mm = &gpu->iommu._mm;
-	}
-
-	return;
-
-detach_device:
-	iommu_detach_device(gpu->iommu.domain, dev);
-
-free_domain:
-	iommu_domain_free(gpu->iommu.domain);
-
-error:
-	gpu->iommu.domain = NULL;
-	gpu->iommu.pgshift = 0;
-	dev_err(dev, "cannot initialize IOMMU MM\n");
-}
-
-static void nouveau_platform_remove_iommu(struct device *dev,
-					  struct nouveau_platform_gpu *gpu)
-{
-	if (gpu->iommu.domain) {
-		nvkm_mm_fini(&gpu->iommu._mm);
-		iommu_detach_device(gpu->iommu.domain, dev);
-		iommu_domain_free(gpu->iommu.domain);
-	}
-}
-
-#else
-
-static void nouveau_platform_probe_iommu(struct device *dev,
-					 struct nouveau_platform_gpu *gpu)
-{
-}
-
-static void nouveau_platform_remove_iommu(struct device *dev,
-					  struct nouveau_platform_gpu *gpu)
-{
-}
-
-#endif
-
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
-	struct nouveau_platform_gpu *gpu;
-	struct nouveau_platform_device *device;
+	struct nvkm_device *device;
 	struct drm_device *drm;
-	int err;
-
-	gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
-	if (!gpu)
-		return -ENOMEM;
-
-	gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(gpu->vdd))
-		return PTR_ERR(gpu->vdd);
-
-	gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
-	if (IS_ERR(gpu->rst))
-		return PTR_ERR(gpu->rst);
-
-	gpu->clk = devm_clk_get(&pdev->dev, "gpu");
-	if (IS_ERR(gpu->clk))
-		return PTR_ERR(gpu->clk);
-
-	gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
-	if (IS_ERR(gpu->clk_pwr))
-		return PTR_ERR(gpu->clk_pwr);
-
-	nouveau_platform_probe_iommu(&pdev->dev, gpu);
-
-	err = nouveau_platform_power_up(gpu);
-	if (err)
-		return err;
+	int ret;
 
 	drm = nouveau_platform_device_create(pdev, &device);
-	if (IS_ERR(drm)) {
-		err = PTR_ERR(drm);
-		goto power_down;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
+
+	ret = drm_dev_register(drm, 0);
+	if (ret < 0) {
+		drm_dev_unref(drm);
+		return ret;
 	}
 
-	device->gpu = gpu;
-	device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
-
-	err = drm_dev_register(drm, 0);
-	if (err < 0)
-		goto err_unref;
-
 	return 0;
-
-err_unref:
-	drm_dev_unref(drm);
-
-power_down:
-	nouveau_platform_power_down(gpu);
-	nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
-	return err;
 }
 
 static int nouveau_platform_remove(struct platform_device *pdev)
 {
-	struct drm_device *drm_dev = platform_get_drvdata(pdev);
-	struct nouveau_drm *drm = nouveau_drm(drm_dev);
-	struct nvkm_device *device = nvxx_device(&drm->device);
-	struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
-	int err;
-
-	nouveau_drm_device_remove(drm_dev);
-
-	err = nouveau_platform_power_down(gpu);
-
-	nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
-	return err;
+	struct drm_device *dev = platform_get_drvdata(pdev);
+	nouveau_drm_device_remove(dev);
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id nouveau_platform_match[] = {
 	{ .compatible = "nvidia,gk20a" },
+	{ .compatible = "nvidia,gm20b" },
 	{ }
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 392874c..f41056d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -19,54 +19,9 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
 #ifndef __NOUVEAU_PLATFORM_H__
 #define __NOUVEAU_PLATFORM_H__
-
-#include "core/device.h"
-#include "core/mm.h"
-
-struct reset_control;
-struct clk;
-struct regulator;
-struct iommu_domain;
-struct platform_driver;
-
-struct nouveau_platform_gpu {
-	struct reset_control *rst;
-	struct clk *clk;
-	struct clk *clk_pwr;
-
-	struct regulator *vdd;
-
-	struct {
-		/*
-		 * Protects accesses to mm from subsystems
-		 */
-		struct mutex mutex;
-
-		struct nvkm_mm _mm;
-		/*
-		 * Just points to _mm. We need this to avoid embedding
-		 * struct nvkm_mm in os.h
-		 */
-		struct nvkm_mm *mm;
-		struct iommu_domain *domain;
-		unsigned long pgshift;
-	} iommu;
-};
-
-struct nouveau_platform_device {
-	struct nvkm_device device;
-
-	struct nouveau_platform_gpu *gpu;
-
-	int gpu_speedo;
-};
-
-#define nv_device_to_platform(d)                                               \
-	container_of(d, struct nouveau_platform_device, device)
+#include "nouveau_drm.h"
 
 extern struct platform_driver nouveau_platform_driver;
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 1ec8f38..d12a5fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -165,7 +165,7 @@
 	struct nvif_device *device = &drm->device;
 
 	if (sysfs && sysfs->ctrl.priv) {
-		device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+		device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
 		nvif_object_fini(&sysfs->ctrl);
 	}
 
@@ -188,11 +188,11 @@
 	if (!sysfs)
 		return -ENOMEM;
 
-	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL,
+	ret = nvif_object_init(&device->object, NVDRM_CONTROL,
 			       NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
-			      &sysfs->ctrl);
+			       &sysfs->ctrl);
 	if (ret == 0)
-		device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+		device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7464aef3..3f0fb55 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,8 +33,8 @@
 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
-	man->priv = pfb;
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
+	man->priv = fb;
 	return 0;
 }
 
@@ -64,9 +64,9 @@
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 	nvkm_mem_node_cleanup(mem->mm_node);
-	pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
+	ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
 }
 
 static int
@@ -76,7 +76,7 @@
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nvkm_mem *node;
 	u32 size_nc = 0;
@@ -88,9 +88,9 @@
 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
 		size_nc = 1 << nvbo->page_shift;
 
-	ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
-			   mem->page_alignment << PAGE_SHIFT, size_nc,
-			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
+	ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
+			     mem->page_alignment << PAGE_SHIFT, size_nc,
+			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
 	if (ret) {
 		mem->mm_node = NULL;
 		return (ret == -ENOSPC) ? 0 : ret;
@@ -103,38 +103,11 @@
 	return 0;
 }
 
-static void
-nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
-{
-	struct nvkm_fb *pfb = man->priv;
-	struct nvkm_mm *mm = &pfb->vram;
-	struct nvkm_mm_node *r;
-	u32 total = 0, free = 0;
-
-	mutex_lock(&nv_subdev(pfb)->mutex);
-	list_for_each_entry(r, &mm->nodes, nl_entry) {
-		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
-		       prefix, r->type, ((u64)r->offset << 12),
-		       (((u64)r->offset + r->length) << 12));
-
-		total += r->length;
-		if (!r->type)
-			free += r->length;
-	}
-	mutex_unlock(&nv_subdev(pfb)->mutex);
-
-	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
-	       prefix, (u64)total << 12, (u64)free << 12);
-	printk(KERN_DEBUG "%s  block: 0x%08x\n",
-	       prefix, mm->block_size << 12);
-}
-
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
 	nouveau_vram_manager_init,
 	nouveau_vram_manager_fini,
 	nouveau_vram_manager_new,
 	nouveau_vram_manager_del,
-	nouveau_vram_manager_debug
 };
 
 static int
@@ -221,7 +194,7 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 	struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
-	struct nv04_mmu_priv *priv = (void *)mmu;
+	struct nv04_mmu *priv = (void *)mmu;
 	struct nvkm_vm *vm = NULL;
 	nvkm_vm_ref(priv->vm, &vm, NULL);
 	man->priv = vm;
@@ -362,13 +335,22 @@
 int
 nouveau_ttm_init(struct nouveau_drm *drm)
 {
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	struct nvkm_pci *pci = device->pci;
 	struct drm_device *dev = drm->dev;
 	u32 bits;
 	int ret;
 
+	if (pci && pci->agp.bridge) {
+		drm->agp.bridge = pci->agp.bridge;
+		drm->agp.base = pci->agp.base;
+		drm->agp.size = pci->agp.size;
+		drm->agp.cma = pci->agp.cma;
+	}
+
 	bits = nvxx_mmu(&drm->device)->dma_bits;
-	if (nv_device_is_pci(nvxx_device(&drm->device))) {
-		if (drm->agp.stat == ENABLED ||
+	if (nvxx_device(&drm->device)->func->pci) {
+		if (drm->agp.bridge ||
 		     !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
 			bits = 32;
 
@@ -408,11 +390,11 @@
 		return ret;
 	}
 
-	drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
-					 nv_device_resource_len(nvxx_device(&drm->device), 1));
+	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
+					 device->func->resource_size(device, 1));
 
 	/* GART init */
-	if (drm->agp.stat != ENABLED) {
+	if (!drm->agp.bridge) {
 		drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
 	} else {
 		drm->gem.gart_available = drm->agp.size;
@@ -433,10 +415,8 @@
 void
 nouveau_ttm_fini(struct nouveau_drm *drm)
 {
-	mutex_lock(&drm->dev->struct_mutex);
 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
-	mutex_unlock(&drm->dev->struct_mutex);
 
 	ttm_bo_device_release(&drm->ttm.bdev);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c7592ec..af89c36 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,13 +12,14 @@
 static unsigned int
 nouveau_vga_set_decode(void *priv, bool state)
 {
-	struct nvif_device *device = &nouveau_drm(priv)->device;
+	struct nouveau_drm *drm = nouveau_drm(priv);
+	struct nvif_object *device = &drm->device.object;
 
-	if (device->info.family == NV_DEVICE_INFO_V0_CURIE &&
-	    device->info.chipset >= 0x4c)
+	if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
+	    drm->device.info.chipset >= 0x4c)
 		nvif_wr32(device, 0x088060, state);
 	else
-	if (device->info.chipset >= 0x40)
+	if (drm->device.info.chipset >= 0x40)
 		nvif_wr32(device, 0x088054, state);
 	else
 		nvif_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 495c576..789dc29 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -171,33 +171,33 @@
 		return -EINVAL;
 	}
 
-	ret = nvif_object_init(chan->object, NULL, 0x0062,
+	ret = nvif_object_init(&chan->user, 0x0062,
 			       device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
 			       0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0,
 			       &nfbdev->clip);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0,
 			       &nfbdev->rop);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0,
 			       &nfbdev->patt);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0,
 			       &nfbdev->gdi);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x005f,
+	ret = nvif_object_init(&chan->user, 0x005f,
 			       device->info.chipset >= 0x11 ? 0x009f : 0x005f,
 			       NULL, 0, &nfbdev->blit);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index c2e05e6..f3d705d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -57,8 +57,10 @@
 static u32
 nv04_fence_read(struct nouveau_channel *chan)
 {
-	struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(chan);;
-	return atomic_read(&fifo->refcnt);
+	struct nv04_nvsw_get_ref_v0 args = {};
+	WARN_ON(nvif_object_mthd(&chan->nvsw, NV04_NVSW_GET_REF,
+				 &args, sizeof(args)));
+	return args.ref;
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 5e1ea1c..2c35213 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -50,7 +50,7 @@
 u32
 nv10_fence_read(struct nouveau_channel *chan)
 {
-	return nvif_rd32(chan, 0x0048);
+	return nvif_rd32(&chan->user, 0x0048);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 57860cf..80b6eb8 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -33,7 +33,7 @@
 nv17_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base);
+	struct nouveau_cli *cli = (void *)prev->user.client;
 	struct nv10_fence_priv *priv = chan->drm->fence;
 	struct nv10_fence_chan *fctx = chan->fence;
 	u32 value;
@@ -89,7 +89,7 @@
 	fctx->base.read = nv10_fence_read;
 	fctx->base.sync = nv17_fence_sync;
 
-	ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
+	ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 981342d..4ae87ae 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -60,35 +60,39 @@
 
 struct nv50_chan {
 	struct nvif_object user;
+	struct nvif_device *device;
 };
 
 static int
-nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, struct nv50_chan *chan)
+nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size,
+		 struct nv50_chan *chan)
 {
 	const u32 handle = (oclass[0] << 16) | head;
-	u32 sclass[8];
-	int ret, i;
+	struct nvif_sclass *sclass;
+	int ret, i, n;
 
-	ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
-	WARN_ON(ret > ARRAY_SIZE(sclass));
+	chan->device = device;
+
+	ret = n = nvif_object_sclass_get(disp, &sclass);
 	if (ret < 0)
 		return ret;
 
 	while (oclass[0]) {
-		for (i = 0; i < ARRAY_SIZE(sclass); i++) {
-			if (sclass[i] == oclass[0]) {
-				ret = nvif_object_init(disp, NULL, handle,
-						       oclass[0], data, size,
-						       &chan->user);
+		for (i = 0; i < n; i++) {
+			if (sclass[i].oclass == oclass[0]) {
+				ret = nvif_object_init(disp, handle, oclass[0],
+						       data, size, &chan->user);
 				if (ret == 0)
 					nvif_object_map(&chan->user);
+				nvif_object_sclass_put(&sclass);
 				return ret;
 			}
 		}
 		oclass++;
 	}
 
+	nvif_object_sclass_put(&sclass);
 	return -ENOSYS;
 }
 
@@ -113,10 +117,12 @@
 }
 
 static int
-nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, struct nv50_pioc *pioc)
+nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size,
+		 struct nv50_pioc *pioc)
 {
-	return nv50_chan_create(disp, oclass, head, data, size, &pioc->base);
+	return nv50_chan_create(device, disp, oclass, head, data, size,
+				&pioc->base);
 }
 
 /******************************************************************************
@@ -128,12 +134,13 @@
 };
 
 static int
-nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
+nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, struct nv50_curs *curs)
 {
 	struct nv50_disp_cursor_v0 args = {
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_CURSOR,
 		GF110_DISP_CURSOR,
 		GT214_DISP_CURSOR,
@@ -142,8 +149,8 @@
 		0
 	};
 
-	return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
-			       &curs->base);
+	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+				&curs->base);
 }
 
 /******************************************************************************
@@ -155,12 +162,13 @@
 };
 
 static int
-nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
+nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, struct nv50_oimm *oimm)
 {
 	struct nv50_disp_cursor_v0 args = {
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_OVERLAY,
 		GF110_DISP_OVERLAY,
 		GT214_DISP_OVERLAY,
@@ -169,8 +177,8 @@
 		0
 	};
 
-	return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
-			       &oimm->base);
+	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+				&oimm->base);
 }
 
 /******************************************************************************
@@ -194,37 +202,37 @@
 static void
 nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
 {
+	struct nvif_device *device = dmac->base.device;
+
 	nvif_object_fini(&dmac->vram);
 	nvif_object_fini(&dmac->sync);
 
 	nv50_chan_destroy(&dmac->base);
 
 	if (dmac->ptr) {
-		struct pci_dev *pdev = nvxx_device(nvif_device(disp))->pdev;
-		pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
+		struct device *dev = nvxx_device(device)->dev;
+		dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
 	}
 }
 
 static int
-nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, u64 syncbuf,
+nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
 		 struct nv50_dmac *dmac)
 {
-	struct nvif_device *device = nvif_device(disp);
 	struct nv50_disp_core_channel_dma_v0 *args = data;
 	struct nvif_object pushbuf;
 	int ret;
 
 	mutex_init(&dmac->lock);
 
-	dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev,
-					 PAGE_SIZE, &dmac->handle);
+	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
+				       &dmac->handle, GFP_KERNEL);
 	if (!dmac->ptr)
 		return -ENOMEM;
 
-	ret = nvif_object_init(nvif_object(device), NULL,
-			       args->pushbuf, NV_DMA_FROM_MEMORY,
-			       &(struct nv_dma_v0) {
+	ret = nvif_object_init(&device->object, 0xd0000000,
+			       NV_DMA_FROM_MEMORY, &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_PCI_US,
 					.access = NV_DMA_V0_ACCESS_RD,
 					.start = dmac->handle + 0x0000,
@@ -233,13 +241,15 @@
 	if (ret)
 		return ret;
 
-	ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base);
+	args->pushbuf = nvif_handle(&pushbuf);
+
+	ret = nv50_chan_create(device, disp, oclass, head, data, size,
+			       &dmac->base);
 	nvif_object_fini(&pushbuf);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
-			       NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -250,8 +260,7 @@
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
-			       NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -274,12 +283,13 @@
 };
 
 static int
-nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
+nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
+		 u64 syncbuf, struct nv50_mast *core)
 {
 	struct nv50_disp_core_channel_dma_v0 args = {
 		.pushbuf = 0xb0007d00,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GM204_DISP_CORE_CHANNEL_DMA,
 		GM107_DISP_CORE_CHANNEL_DMA,
 		GK110_DISP_CORE_CHANNEL_DMA,
@@ -293,8 +303,8 @@
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf,
-			       &core->base);
+	return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
+				syncbuf, &core->base);
 }
 
 /******************************************************************************
@@ -308,14 +318,14 @@
 };
 
 static int
-nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
-		 struct nv50_sync *base)
+nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, u64 syncbuf, struct nv50_sync *base)
 {
 	struct nv50_disp_base_channel_dma_v0 args = {
 		.pushbuf = 0xb0007c00 | head,
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK110_DISP_BASE_CHANNEL_DMA,
 		GK104_DISP_BASE_CHANNEL_DMA,
 		GF110_DISP_BASE_CHANNEL_DMA,
@@ -326,7 +336,7 @@
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
 				syncbuf, &base->base);
 }
 
@@ -339,14 +349,14 @@
 };
 
 static int
-nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
-		 struct nv50_ovly *ovly)
+nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, u64 syncbuf, struct nv50_ovly *ovly)
 {
 	struct nv50_disp_overlay_channel_dma_v0 args = {
 		.pushbuf = 0xb0007e00 | head,
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_OVERLAY_CONTROL_DMA,
 		GF110_DISP_OVERLAY_CONTROL_DMA,
 		GT214_DISP_OVERLAY_CHANNEL_DMA,
@@ -356,7 +366,7 @@
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
 				syncbuf, &ovly->base);
 }
 
@@ -413,6 +423,7 @@
 evo_wait(void *evoc, int nr)
 {
 	struct nv50_dmac *dmac = evoc;
+	struct nvif_device *device = dmac->base.device;
 	u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
 
 	mutex_lock(&dmac->lock);
@@ -420,9 +431,12 @@
 		dmac->ptr[put] = 0x20000000;
 
 		nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
-		if (!nvxx_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
+		if (nvif_msec(device, 2000,
+			if (!nvif_rd32(&dmac->base.user, 0x0004))
+				break;
+		) < 0) {
 			mutex_unlock(&dmac->lock);
-			nv_error(nvxx_object(&dmac->base.user), "channel stalled\n");
+			printk(KERN_ERR "nouveau: evo channel stalled\n");
 			return NULL;
 		}
 
@@ -480,7 +494,10 @@
 		evo_data(push, 0x00000000);
 		evo_data(push, 0x00000000);
 		evo_kick(push, mast);
-		if (nv_wait_cb(nvxx_device(device), evo_sync_wait, disp->sync))
+		if (nvif_msec(device, 2000,
+			if (evo_sync_wait(disp->sync))
+				break;
+		) >= 0)
 			return 0;
 	}
 
@@ -535,7 +552,10 @@
 		evo_kick(push, flip.chan);
 	}
 
-	nv_wait_cb(nvxx_device(device), nv50_display_flip_wait, &flip);
+	nvif_msec(device, 2000,
+		if (nv50_display_flip_wait(&flip))
+			break;
+	);
 }
 
 int
@@ -563,7 +583,7 @@
 	if (unlikely(push == NULL))
 		return -EBUSY;
 
-	if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) {
+	if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
 		ret = RING_SPACE(chan, 8);
 		if (ret)
 			return ret;
@@ -577,7 +597,7 @@
 		OUT_RING  (chan, sync->addr);
 		OUT_RING  (chan, sync->data);
 	} else
-	if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) {
+	if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
 		u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
 		ret = RING_SPACE(chan, 12);
 		if (ret)
@@ -1408,6 +1428,8 @@
 static int
 nv50_crtc_create(struct drm_device *dev, int index)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_device *device = &drm->device;
 	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_head *head;
 	struct drm_crtc *crtc;
@@ -1452,13 +1474,13 @@
 		goto out;
 
 	/* allocate cursor resources */
-	ret = nv50_curs_create(disp->disp, index, &head->curs);
+	ret = nv50_curs_create(device, disp->disp, index, &head->curs);
 	if (ret)
 		goto out;
 
 	/* allocate page flip / sync resources */
-	ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
-			      &head->sync);
+	ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
+			       &head->sync);
 	if (ret)
 		goto out;
 
@@ -1466,12 +1488,12 @@
 	head->sync.data = 0x00000000;
 
 	/* allocate overlay resources */
-	ret = nv50_oimm_create(disp->disp, index, &head->oimm);
+	ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
 	if (ret)
 		goto out;
 
-	ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset,
-			      &head->ovly);
+	ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
+			       &head->ovly);
 	if (ret)
 		goto out;
 
@@ -1678,6 +1700,7 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+	struct nvkm_i2c_bus *bus;
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
 	int type = DRM_MODE_ENCODER_DAC;
@@ -1687,7 +1710,10 @@
 		return -ENOMEM;
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
-	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
+
+	bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+	if (bus)
+		nv_encoder->i2c = &bus->i2c;
 
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
@@ -2081,9 +2107,22 @@
 		return -ENOMEM;
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
-	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
 	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
 
+	if (dcbe->type == DCB_OUTPUT_DP) {
+		struct nvkm_i2c_aux *aux =
+			nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+		if (aux) {
+			nv_encoder->i2c = &aux->i2c;
+			nv_encoder->aux = aux;
+		}
+	} else {
+		struct nvkm_i2c_bus *bus =
+			nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+		if (bus)
+			nv_encoder->i2c = &bus->i2c;
+	}
+
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
 	encoder->possible_clones = 0;
@@ -2234,18 +2273,22 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *ddc = NULL;
+	struct nvkm_i2c_bus *bus = NULL;
+	struct nvkm_i2c_aux *aux = NULL;
+	struct i2c_adapter *ddc;
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
 	int type;
 
 	switch (dcbe->type) {
 	case DCB_OUTPUT_TMDS:
-		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+		bus  = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
+		ddc  = bus ? &bus->i2c : NULL;
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
 	case DCB_OUTPUT_DP:
-		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+		aux  = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
+		ddc  = aux ? &aux->i2c : NULL;
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
 	default:
@@ -2258,6 +2301,7 @@
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
 	nv_encoder->i2c = ddc;
+	nv_encoder->aux = aux;
 
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
@@ -2295,7 +2339,7 @@
 		union {
 			struct nv50_dma_v0 nv50;
 			struct gf100_dma_v0 gf100;
-			struct gf110_dma_v0 gf110;
+			struct gf119_dma_v0 gf119;
 		};
 	} args = {};
 	struct nv50_fbdma *fbdma;
@@ -2331,15 +2375,15 @@
 		args.gf100.kind = kind;
 		size += sizeof(args.gf100);
 	} else {
-		args.gf110.page = GF110_DMA_V0_PAGE_LP;
-		args.gf110.kind = kind;
-		size += sizeof(args.gf110);
+		args.gf119.page = GF119_DMA_V0_PAGE_LP;
+		args.gf119.kind = kind;
+		size += sizeof(args.gf119);
 	}
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nv50_head *head = nv50_head(crtc);
-		int ret = nvif_object_init(&head->sync.base.base.user, NULL,
-					    name, NV_DMA_IN_MEMORY, &args, size,
+		int ret = nvif_object_init(&head->sync.base.base.user, name,
+					   NV_DMA_IN_MEMORY, &args, size,
 					   &fbdma->base[head->base.index]);
 		if (ret) {
 			nv50_fbdma_fini(fbdma);
@@ -2347,9 +2391,8 @@
 		}
 	}
 
-	ret = nvif_object_init(&mast->base.base.user, NULL, name,
-				NV_DMA_IN_MEMORY, &args, size,
-			       &fbdma->core);
+	ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
+			       &args, size, &fbdma->core);
 	if (ret) {
 		nv50_fbdma_fini(fbdma);
 		return ret;
@@ -2502,14 +2545,14 @@
 		goto out;
 
 	/* allocate master evo channel */
-	ret = nv50_core_create(disp->disp, disp->sync->bo.offset,
+	ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
 			      &disp->mast);
 	if (ret)
 		goto out;
 
 	/* create crtc objects to represent the hw heads */
 	if (disp->disp->oclass >= GF110_DISP)
-		crtcs = nvif_rd32(device, 0x022448);
+		crtcs = nvif_rd32(&device->object, 0x022448);
 	else
 		crtcs = 2;
 
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 901130b..e05499d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -183,7 +183,7 @@
 		return -EINVAL;
 	}
 
-	ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0,
 			       &nfbdev->twod);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a82d9ea..f0d96e5 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -51,7 +51,7 @@
 	fctx->base.read = nv10_fence_read;
 	fctx->base.sync = nv17_fence_sync;
 
-	ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -66,7 +66,7 @@
 		u32 start = bo->bo.mem.start * PAGE_SIZE;
 		u32 limit = start + bo->bo.mem.size - 1;
 
-		ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
+		ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
 				       NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
 						.target = NV_DMA_V0_TARGET_VRAM,
 						.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index a03db43..412c5be 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -131,7 +131,7 @@
 int
 nv84_fence_context_new(struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nv84_fence_priv *priv = chan->drm->fence;
 	struct nv84_fence_chan *fctx;
 	int ret, i;
@@ -213,7 +213,7 @@
 int
 nv84_fence_create(struct nouveau_drm *drm)
 {
-	struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device);
+	struct nvkm_fifo *fifo = nvxx_fifo(&drm->device);
 	struct nv84_fence_priv *priv;
 	u32 domain;
 	int ret;
@@ -228,7 +228,7 @@
 	priv->base.context_new = nv84_fence_context_new;
 	priv->base.context_del = nv84_fence_context_del;
 
-	priv->base.contexts = pfifo->max + 1;
+	priv->base.contexts = fifo->nr;
 	priv->base.context_base = fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fcd2e5f..c97395b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -156,7 +156,7 @@
 	struct nouveau_channel *chan = drm->channel;
 	int ret, format;
 
-	ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0,
 			       &nfbdev->twod);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 80b9684..1ee9294 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -29,29 +29,29 @@
 int
 nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
 {
-	return client->driver->ioctl(client->base.priv, client->super, data, size, NULL);
+	return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
 }
 
 int
 nvif_client_suspend(struct nvif_client *client)
 {
-	return client->driver->suspend(client->base.priv);
+	return client->driver->suspend(client->object.priv);
 }
 
 int
 nvif_client_resume(struct nvif_client *client)
 {
-	return client->driver->resume(client->base.priv);
+	return client->driver->resume(client->object.priv);
 }
 
 void
 nvif_client_fini(struct nvif_client *client)
 {
 	if (client->driver) {
-		client->driver->fini(client->base.priv);
+		client->driver->fini(client->object.priv);
 		client->driver = NULL;
-		client->base.parent = NULL;
-		nvif_object_fini(&client->base);
+		client->object.client = NULL;
+		nvif_object_fini(&client->object);
 	}
 }
 
@@ -68,63 +68,39 @@
 };
 
 int
-nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver,
-		 const char *name, u64 device, const char *cfg, const char *dbg,
-		 struct nvif_client *client)
+nvif_client_init(const char *driver, const char *name, u64 device,
+		 const char *cfg, const char *dbg, struct nvif_client *client)
 {
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_nop_v0 nop;
+	} args = {};
 	int ret, i;
 
-	ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base);
+	ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object);
 	if (ret)
 		return ret;
 
-	client->base.parent = &client->base;
-	client->base.handle = ~0;
-	client->object = &client->base;
+	client->object.client = client;
+	client->object.handle = ~0;
+	client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
 	client->super = true;
 
 	for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
 		if (!driver || !strcmp(client->driver->name, driver)) {
 			ret = client->driver->init(name, device, cfg, dbg,
-						  &client->base.priv);
+						  &client->object.priv);
 			if (!ret || driver)
 				break;
 		}
 	}
 
+	if (ret == 0) {
+		ret = nvif_client_ioctl(client, &args, sizeof(args));
+		client->version = args.nop.version;
+	}
+
 	if (ret)
 		nvif_client_fini(client);
 	return ret;
 }
-
-static void
-nvif_client_del(struct nvif_client *client)
-{
-	nvif_client_fini(client);
-	kfree(client);
-}
-
-int
-nvif_client_new(const char *driver, const char *name, u64 device,
-		const char *cfg, const char *dbg,
-		struct nvif_client **pclient)
-{
-	struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (client) {
-		int ret = nvif_client_init(nvif_client_del, driver, name,
-					   device, cfg, dbg, client);
-		if (ret) {
-			kfree(client);
-			client = NULL;
-		}
-		*pclient = client;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-void
-nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
-{
-	nvif_object_ref(&client->base, (struct nvif_object **)pclient);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 6f72244..252d8c3 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -24,55 +24,32 @@
 
 #include <nvif/device.h>
 
+u64
+nvif_device_time(struct nvif_device *device)
+{
+	struct nv_device_time_v0 args = {};
+	int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
+				   &args, sizeof(args));
+	WARN_ON_ONCE(ret != 0);
+	return args.time;
+}
+
 void
 nvif_device_fini(struct nvif_device *device)
 {
-	nvif_object_fini(&device->base);
+	nvif_object_fini(&device->object);
 }
 
 int
-nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
-		 u32 handle, u32 oclass, void *data, u32 size,
-		 struct nvif_device *device)
+nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
+		 void *data, u32 size, struct nvif_device *device)
 {
-	int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
-				   data, size, &device->base);
+	int ret = nvif_object_init(parent, handle, oclass, data, size,
+				   &device->object);
 	if (ret == 0) {
-		device->object = &device->base;
 		device->info.version = 0;
-		ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
+		ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
 				       &device->info, sizeof(device->info));
 	}
 	return ret;
 }
-
-static void
-nvif_device_del(struct nvif_device *device)
-{
-	nvif_device_fini(device);
-	kfree(device);
-}
-
-int
-nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
-		void *data, u32 size, struct nvif_device **pdevice)
-{
-	struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (device) {
-		int ret = nvif_device_init(parent, nvif_device_del, handle,
-					   oclass, data, size, device);
-		if (ret) {
-			kfree(device);
-			device = NULL;
-		}
-		*pdevice = device;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-void
-nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
-{
-	nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 8e34748..b0787ff 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -124,7 +124,7 @@
 	}
 
 	if (!WARN_ON(notify == NULL)) {
-		struct nvif_client *client = nvif_client(notify->object);
+		struct nvif_client *client = notify->object->client;
 		if (!WARN_ON(notify->size != size)) {
 			atomic_inc(&notify->putcnt);
 			if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
@@ -156,7 +156,7 @@
 	if (ret >= 0 && object) {
 		ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
 		if (ret == 0) {
-			nvif_object_ref(NULL, &notify->object);
+			notify->object = NULL;
 			kfree((void *)notify->data);
 		}
 	}
@@ -164,9 +164,9 @@
 }
 
 int
-nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
-		 int (*func)(struct nvif_notify *), bool work, u8 event,
-		 void *data, u32 size, u32 reply, struct nvif_notify *notify)
+nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
+		 bool work, u8 event, void *data, u32 size, u32 reply,
+		 struct nvif_notify *notify)
 {
 	struct {
 		struct nvif_ioctl_v0 ioctl;
@@ -175,11 +175,9 @@
 	} *args;
 	int ret = -ENOMEM;
 
-	notify->object = NULL;
-	nvif_object_ref(object, &notify->object);
+	notify->object = object;
 	notify->flags = 0;
 	atomic_set(&notify->putcnt, 1);
-	notify->dtor = dtor;
 	notify->func = func;
 	notify->data = NULL;
 	notify->size = reply;
@@ -211,38 +209,3 @@
 		nvif_notify_fini(notify);
 	return ret;
 }
-
-static void
-nvif_notify_del(struct nvif_notify *notify)
-{
-	nvif_notify_fini(notify);
-	kfree(notify);
-}
-
-void
-nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
-{
-	BUG_ON(notify != NULL);
-	if (*pnotify)
-		(*pnotify)->dtor(*pnotify);
-	*pnotify = notify;
-}
-
-int
-nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
-		bool work, u8 type, void *data, u32 size, u32 reply,
-		struct nvif_notify **pnotify)
-{
-	struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
-	if (notify) {
-		int ret = nvif_notify_init(object, nvif_notify_del, func, work,
-					   type, data, size, reply, notify);
-		if (ret) {
-			kfree(notify);
-			notify = NULL;
-		}
-		*pnotify = notify;
-		return ret;
-	}
-	return -ENOMEM;
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 3ab4e2f..c3fb6a2 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -30,47 +30,71 @@
 int
 nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
 {
-	struct nvif_client *client = nvif_client(object);
+	struct nvif_client *client = object->client;
 	union {
 		struct nvif_ioctl_v0 v0;
 	} *args = data;
 
 	if (size >= sizeof(*args) && args->v0.version == 0) {
+		if (object != &client->object)
+			args->v0.object = nvif_handle(object);
+		else
+			args->v0.object = 0;
 		args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
-		args->v0.path_nr = 0;
-		while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
-			args->v0.path[args->v0.path_nr++] = object->handle;
-			if (object->parent == object)
-				break;
-			object = object->parent;
-		}
 	} else
 		return -ENOSYS;
 
-	return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
+	return client->driver->ioctl(client->object.priv, client->super,
+				     data, size, hack);
+}
+
+void
+nvif_object_sclass_put(struct nvif_sclass **psclass)
+{
+	kfree(*psclass);
+	*psclass = NULL;
 }
 
 int
-nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
+nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
 {
 	struct {
 		struct nvif_ioctl_v0 ioctl;
 		struct nvif_ioctl_sclass_v0 sclass;
-	} *args;
-	u32 size = count * sizeof(args->sclass.oclass[0]);
-	int ret;
+	} *args = NULL;
+	int ret, cnt = 0, i;
+	u32 size;
 
-	if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
-		return -ENOMEM;
-	args->ioctl.version = 0;
-	args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
-	args->sclass.version = 0;
-	args->sclass.count = count;
+	while (1) {
+		size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
+		if (!(args = kmalloc(size, GFP_KERNEL)))
+			return -ENOMEM;
+		args->ioctl.version = 0;
+		args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
+		args->sclass.version = 0;
+		args->sclass.count = cnt;
 
-	memcpy(args->sclass.oclass, oclass, size);
-	ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
-	ret = ret ? ret : args->sclass.count;
-	memcpy(oclass, args->sclass.oclass, size);
+		ret = nvif_object_ioctl(object, args, size, NULL);
+		if (ret == 0 && args->sclass.count <= cnt)
+			break;
+		cnt = args->sclass.count;
+		kfree(args);
+		if (ret != 0)
+			return ret;
+	}
+
+	*psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL);
+	if (*psclass) {
+		for (i = 0; i < args->sclass.count; i++) {
+			(*psclass)[i].oclass = args->sclass.oclass[i].oclass;
+			(*psclass)[i].minver = args->sclass.oclass[i].minver;
+			(*psclass)[i].maxver = args->sclass.oclass[i].maxver;
+		}
+		ret = args->sclass.count;
+	} else {
+		ret = -ENOMEM;
+	}
+
 	kfree(args);
 	return ret;
 }
@@ -145,7 +169,7 @@
 nvif_object_unmap(struct nvif_object *object)
 {
 	if (object->map.size) {
-		struct nvif_client *client = nvif_client(object);
+		struct nvif_client *client = object->client;
 		struct {
 			struct nvif_ioctl_v0 ioctl;
 			struct nvif_ioctl_unmap unmap;
@@ -167,7 +191,7 @@
 int
 nvif_object_map(struct nvif_object *object)
 {
-	struct nvif_client *client = nvif_client(object);
+	struct nvif_client *client = object->client;
 	struct {
 		struct nvif_ioctl_v0 ioctl;
 		struct nvif_ioctl_map_v0 map;
@@ -186,119 +210,65 @@
 	return ret;
 }
 
-struct ctor {
-	struct nvif_ioctl_v0 ioctl;
-	struct nvif_ioctl_new_v0 new;
-};
-
 void
 nvif_object_fini(struct nvif_object *object)
 {
-	struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
-	if (object->parent) {
-		struct {
-			struct nvif_ioctl_v0 ioctl;
-			struct nvif_ioctl_del del;
-		} args = {
-			.ioctl.type = NVIF_IOCTL_V0_DEL,
-		};
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_del del;
+	} args = {
+		.ioctl.type = NVIF_IOCTL_V0_DEL,
+	};
 
-		nvif_object_unmap(object);
-		nvif_object_ioctl(object, &args, sizeof(args), NULL);
-		if (object->data) {
-			object->size = 0;
-			object->data = NULL;
-			kfree(ctor);
-		}
-		nvif_object_ref(NULL, &object->parent);
-	}
+	if (!object->client)
+		return;
+
+	nvif_object_unmap(object);
+	nvif_object_ioctl(object, &args, sizeof(args), NULL);
+	object->client = NULL;
 }
 
 int
-nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
-		 u32 handle, u32 oclass, void *data, u32 size,
-		 struct nvif_object *object)
+nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
+		 void *data, u32 size, struct nvif_object *object)
 {
-	struct ctor *ctor;
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_new_v0 new;
+	} *args;
 	int ret = 0;
 
-	object->parent = NULL;
-	object->object = object;
-	nvif_object_ref(parent, &object->parent);
-	kref_init(&object->refcount);
+	object->client = NULL;
 	object->handle = handle;
 	object->oclass = oclass;
-	object->data = NULL;
-	object->size = 0;
-	object->dtor = dtor;
 	object->map.ptr = NULL;
 	object->map.size = 0;
 
-	if (object->parent) {
-		if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
+	if (parent) {
+		if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
 			nvif_object_fini(object);
 			return -ENOMEM;
 		}
-		object->data = ctor->new.data;
-		object->size = size;
-		memcpy(object->data, data, size);
 
-		ctor->ioctl.version = 0;
-		ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
-		ctor->new.version = 0;
-		ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
-		ctor->new.token = (unsigned long)(void *)object;
-		ctor->new.handle = handle;
-		ctor->new.oclass = oclass;
+		args->ioctl.version = 0;
+		args->ioctl.type = NVIF_IOCTL_V0_NEW;
+		args->new.version = 0;
+		args->new.route = parent->client->route;
+		args->new.token = nvif_handle(object);
+		args->new.object = nvif_handle(object);
+		args->new.handle = handle;
+		args->new.oclass = oclass;
 
-		ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
-					object->size, &object->priv);
+		memcpy(args->new.data, data, size);
+		ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
+					&object->priv);
+		memcpy(data, args->new.data, size);
+		kfree(args);
+		if (ret == 0)
+			object->client = parent->client;
 	}
 
 	if (ret)
 		nvif_object_fini(object);
 	return ret;
 }
-
-static void
-nvif_object_del(struct nvif_object *object)
-{
-	nvif_object_fini(object);
-	kfree(object);
-}
-
-int
-nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
-		void *data, u32 size, struct nvif_object **pobject)
-{
-	struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
-	if (object) {
-		int ret = nvif_object_init(parent, nvif_object_del, handle,
-					   oclass, data, size, object);
-		if (ret) {
-			kfree(object);
-			object = NULL;
-		}
-		*pobject = object;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-static void
-nvif_object_put(struct kref *kref)
-{
-	struct nvif_object *object =
-		container_of(kref, typeof(*object), refcount);
-	object->dtor(object);
-}
-
-void
-nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
-{
-	if (object)
-		kref_get(&object->refcount);
-	if (*pobject)
-		kref_put(&(*pobject)->refcount, nvif_object_put);
-	*pobject = object;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index a2bdb20..7f66963 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -1,17 +1,14 @@
 nvkm-y := nvkm/core/client.o
-nvkm-y += nvkm/core/engctx.o
 nvkm-y += nvkm/core/engine.o
 nvkm-y += nvkm/core/enum.o
 nvkm-y += nvkm/core/event.o
 nvkm-y += nvkm/core/gpuobj.o
-nvkm-y += nvkm/core/handle.o
 nvkm-y += nvkm/core/ioctl.o
+nvkm-y += nvkm/core/memory.o
 nvkm-y += nvkm/core/mm.o
-nvkm-y += nvkm/core/namedb.o
 nvkm-y += nvkm/core/notify.o
 nvkm-y += nvkm/core/object.o
+nvkm-y += nvkm/core/oproxy.o
 nvkm-y += nvkm/core/option.o
-nvkm-y += nvkm/core/parent.o
-nvkm-y += nvkm/core/printk.o
 nvkm-y += nvkm/core/ramht.o
 nvkm-y += nvkm/core/subdev.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 878a82f..297e1e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -23,7 +23,6 @@
  */
 #include <core/client.h>
 #include <core/device.h>
-#include <core/handle.h>
 #include <core/notify.h>
 #include <core/option.h>
 
@@ -91,7 +90,7 @@
 nvkm_client_notify_new(struct nvkm_object *object,
 		       struct nvkm_event *event, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(object);
+	struct nvkm_client *client = object->client;
 	struct nvkm_client_notify *notify;
 	union {
 		struct nvif_notify_req_v0 v0;
@@ -111,11 +110,11 @@
 	if (!notify)
 		return -ENOMEM;
 
-	nv_ioctl(client, "notify new size %d\n", size);
+	nvif_ioctl(object, "notify new size %d\n", size);
 	if (nvif_unpack(req->v0, 0, 0, true)) {
-		nv_ioctl(client, "notify new vers %d reply %d route %02x "
-				 "token %llx\n", req->v0.version,
-			 req->v0.reply, req->v0.route, req->v0.token);
+		nvif_ioctl(object, "notify new vers %d reply %d route %02x "
+				   "token %llx\n", req->v0.version,
+			   req->v0.reply, req->v0.route, req->v0.token);
 		notify->version = req->v0.version;
 		notify->size = sizeof(notify->rep.v0);
 		notify->rep.v0.version = req->v0.version;
@@ -146,10 +145,10 @@
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "client devlist size %d\n", size);
+	nvif_ioctl(object, "client devlist size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "client devlist vers %d count %d\n",
-			 args->v0.version, args->v0.count);
+		nvif_ioctl(object, "client devlist vers %d count %d\n",
+			   args->v0.version, args->v0.count);
 		if (size == sizeof(args->v0.device[0]) * args->v0.count) {
 			ret = nvkm_device_list(args->v0.device, args->v0.count);
 			if (ret >= 0) {
@@ -176,91 +175,134 @@
 	return -EINVAL;
 }
 
-static void
-nvkm_client_dtor(struct nvkm_object *object)
+static int
+nvkm_client_child_new(const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
 {
-	struct nvkm_client *client = (void *)object;
-	int i;
-	for (i = 0; i < ARRAY_SIZE(client->notify); i++)
-		nvkm_client_notify_del(client, i);
-	nvkm_object_ref(NULL, &client->device);
-	nvkm_handle_destroy(client->root);
-	nvkm_namedb_destroy(&client->namedb);
+	return oclass->base.ctor(oclass, data, size, pobject);
 }
 
-static struct nvkm_oclass
-nvkm_client_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_client_dtor,
-		.mthd = nvkm_client_mthd,
-	},
-};
-
-int
-nvkm_client_create_(const char *name, u64 devname, const char *cfg,
-		    const char *dbg, int length, void **pobject)
+static int
+nvkm_client_child_get(struct nvkm_object *object, int index,
+		      struct nvkm_oclass *oclass)
 {
-	struct nvkm_object *device;
-	struct nvkm_client *client;
-	int ret;
+	const struct nvkm_sclass *sclass;
 
-	device = (void *)nvkm_device_find(devname);
-	if (!device)
-		return -ENODEV;
+	switch (index) {
+	case 0: sclass = &nvkm_udevice_sclass; break;
+	default:
+		return -EINVAL;
+	}
 
-	ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass,
-				  NV_CLIENT_CLASS, NULL,
-				  (1ULL << NVDEV_ENGINE_DEVICE),
-				  length, pobject);
-	client = *pobject;
-	if (ret)
-		return ret;
-
-	ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client),
-				 &client->root);
-	if (ret)
-		return ret;
-
-	/* prevent init/fini being called, os in in charge of this */
-	atomic_set(&nv_object(client)->usecount, 2);
-
-	nvkm_object_ref(device, &client->device);
-	snprintf(client->name, sizeof(client->name), "%s", name);
-	client->debug = nvkm_dbgopt(dbg, "CLIENT");
+	oclass->ctor = nvkm_client_child_new;
+	oclass->base = *sclass;
 	return 0;
 }
 
-int
-nvkm_client_init(struct nvkm_client *client)
+static const struct nvkm_object_func
+nvkm_client_object_func = {
+	.mthd = nvkm_client_mthd,
+	.sclass = nvkm_client_child_get,
+};
+
+void
+nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
 {
-	int ret;
-	nv_debug(client, "init running\n");
-	ret = nvkm_handle_init(client->root);
-	nv_debug(client, "init completed with %d\n", ret);
-	return ret;
+	if (!RB_EMPTY_NODE(&object->node))
+		rb_erase(&object->node, &client->objroot);
+}
+
+bool
+nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
+{
+	struct rb_node **ptr = &client->objroot.rb_node;
+	struct rb_node *parent = NULL;
+
+	while (*ptr) {
+		struct nvkm_object *this =
+			container_of(*ptr, typeof(*this), node);
+		parent = *ptr;
+		if (object->object < this->object)
+			ptr = &parent->rb_left;
+		else
+		if (object->object > this->object)
+			ptr = &parent->rb_right;
+		else
+			return false;
+	}
+
+	rb_link_node(&object->node, parent, ptr);
+	rb_insert_color(&object->node, &client->objroot);
+	return true;
+}
+
+struct nvkm_object *
+nvkm_client_search(struct nvkm_client *client, u64 handle)
+{
+	struct rb_node *node = client->objroot.rb_node;
+	while (node) {
+		struct nvkm_object *object =
+			container_of(node, typeof(*object), node);
+		if (handle < object->object)
+			node = node->rb_left;
+		else
+		if (handle > object->object)
+			node = node->rb_right;
+		else
+			return object;
+	}
+	return NULL;
 }
 
 int
 nvkm_client_fini(struct nvkm_client *client, bool suspend)
 {
+	struct nvkm_object *object = &client->object;
 	const char *name[2] = { "fini", "suspend" };
-	int ret, i;
-	nv_debug(client, "%s running\n", name[suspend]);
-	nv_debug(client, "%s notify\n", name[suspend]);
+	int i;
+	nvif_debug(object, "%s notify\n", name[suspend]);
 	for (i = 0; i < ARRAY_SIZE(client->notify); i++)
 		nvkm_client_notify_put(client, i);
-	nv_debug(client, "%s object\n", name[suspend]);
-	ret = nvkm_handle_fini(client->root, suspend);
-	nv_debug(client, "%s completed with %d\n", name[suspend], ret);
-	return ret;
+	return nvkm_object_fini(&client->object, suspend);
 }
 
-const char *
-nvkm_client_name(void *obj)
+int
+nvkm_client_init(struct nvkm_client *client)
 {
-	const char *client_name = "unknown";
-	struct nvkm_client *client = nvkm_client(obj);
-	if (client)
-		client_name = client->name;
-	return client_name;
+	return nvkm_object_init(&client->object);
+}
+
+void
+nvkm_client_del(struct nvkm_client **pclient)
+{
+	struct nvkm_client *client = *pclient;
+	int i;
+	if (client) {
+		nvkm_client_fini(client, false);
+		for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+			nvkm_client_notify_del(client, i);
+		nvkm_object_dtor(&client->object);
+		kfree(*pclient);
+		*pclient = NULL;
+	}
+}
+
+int
+nvkm_client_new(const char *name, u64 device, const char *cfg,
+		const char *dbg, struct nvkm_client **pclient)
+{
+	struct nvkm_oclass oclass = {};
+	struct nvkm_client *client;
+
+	if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
+		return -ENOMEM;
+	oclass.client = client;
+
+	nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object);
+	snprintf(client->name, sizeof(client->name), "%s", name);
+	client->device = device;
+	client->debug = nvkm_dbgopt(dbg, "CLIENT");
+	client->objroot = RB_ROOT;
+	client->dmaroot = RB_ROOT;
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
deleted file mode 100644
index fb2acbc..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/engctx.h>
-#include <core/engine.h>
-#include <core/client.h>
-
-static inline int
-nvkm_engctx_exists(struct nvkm_object *parent,
-		   struct nvkm_engine *engine, void **pobject)
-{
-	struct nvkm_engctx *engctx;
-	struct nvkm_object *parctx;
-
-	list_for_each_entry(engctx, &engine->contexts, head) {
-		parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
-		if (parctx == parent) {
-			atomic_inc(&nv_object(engctx)->refcount);
-			*pobject = engctx;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-int
-nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
-		    struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
-		    u32 size, u32 align, u32 flags, int length, void **pobject)
-{
-	struct nvkm_client *client = nvkm_client(parent);
-	struct nvkm_engine *engine = nv_engine(engobj);
-	struct nvkm_object *engctx;
-	unsigned long save;
-	int ret;
-
-	/* check if this engine already has a context for the parent object,
-	 * and reference it instead of creating a new one
-	 */
-	spin_lock_irqsave(&engine->lock, save);
-	ret = nvkm_engctx_exists(parent, engine, pobject);
-	spin_unlock_irqrestore(&engine->lock, save);
-	if (ret)
-		return ret;
-
-	/* create the new context, supports creating both raw objects and
-	 * objects backed by instance memory
-	 */
-	if (size) {
-		ret = nvkm_gpuobj_create_(parent, engobj, oclass,
-					  NV_ENGCTX_CLASS, pargpu, size,
-					  align, flags, length, pobject);
-	} else {
-		ret = nvkm_object_create_(parent, engobj, oclass,
-					  NV_ENGCTX_CLASS, length, pobject);
-	}
-
-	engctx = *pobject;
-	if (ret)
-		return ret;
-
-	/* must take the lock again and re-check a context doesn't already
-	 * exist (in case of a race) - the lock had to be dropped before as
-	 * it's not possible to allocate the object with it held.
-	 */
-	spin_lock_irqsave(&engine->lock, save);
-	ret = nvkm_engctx_exists(parent, engine, pobject);
-	if (ret) {
-		spin_unlock_irqrestore(&engine->lock, save);
-		nvkm_object_ref(NULL, &engctx);
-		return ret;
-	}
-
-	if (client->vm)
-		atomic_inc(&client->vm->engref[nv_engidx(engine)]);
-	list_add(&nv_engctx(engctx)->head, &engine->contexts);
-	nv_engctx(engctx)->addr = ~0ULL;
-	spin_unlock_irqrestore(&engine->lock, save);
-	return 0;
-}
-
-void
-nvkm_engctx_destroy(struct nvkm_engctx *engctx)
-{
-	struct nvkm_engine *engine = engctx->gpuobj.object.engine;
-	struct nvkm_client *client = nvkm_client(engctx);
-	unsigned long save;
-
-	nvkm_gpuobj_unmap(&engctx->vma);
-	spin_lock_irqsave(&engine->lock, save);
-	list_del(&engctx->head);
-	spin_unlock_irqrestore(&engine->lock, save);
-
-	if (client->vm)
-		atomic_dec(&client->vm->engref[nv_engidx(engine)]);
-
-	if (engctx->gpuobj.size)
-		nvkm_gpuobj_destroy(&engctx->gpuobj);
-	else
-		nvkm_object_destroy(&engctx->gpuobj.object);
-}
-
-int
-nvkm_engctx_init(struct nvkm_engctx *engctx)
-{
-	struct nvkm_object *object = nv_object(engctx);
-	struct nvkm_subdev *subdev = nv_subdev(object->engine);
-	struct nvkm_object *parent;
-	struct nvkm_subdev *pardev;
-	int ret;
-
-	ret = nvkm_gpuobj_init(&engctx->gpuobj);
-	if (ret)
-		return ret;
-
-	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
-	pardev = nv_subdev(parent->engine);
-	if (nv_parent(parent)->context_attach) {
-		mutex_lock(&pardev->mutex);
-		ret = nv_parent(parent)->context_attach(parent, object);
-		mutex_unlock(&pardev->mutex);
-	}
-
-	if (ret) {
-		nv_error(parent, "failed to attach %s context, %d\n",
-			 subdev->name, ret);
-		return ret;
-	}
-
-	nv_debug(parent, "attached %s context\n", subdev->name);
-	return 0;
-}
-
-int
-nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
-{
-	struct nvkm_object *object = nv_object(engctx);
-	struct nvkm_subdev *subdev = nv_subdev(object->engine);
-	struct nvkm_object *parent;
-	struct nvkm_subdev *pardev;
-	int ret = 0;
-
-	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
-	pardev = nv_subdev(parent->engine);
-	if (nv_parent(parent)->context_detach) {
-		mutex_lock(&pardev->mutex);
-		ret = nv_parent(parent)->context_detach(parent, suspend, object);
-		mutex_unlock(&pardev->mutex);
-	}
-
-	if (ret) {
-		nv_error(parent, "failed to detach %s context, %d\n",
-			 subdev->name, ret);
-		return ret;
-	}
-
-	nv_debug(parent, "detached %s context\n", subdev->name);
-	return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
-}
-
-int
-_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_engctx *engctx;
-	int ret;
-
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
-				 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
-	*pobject = nv_object(engctx);
-	return ret;
-}
-
-void
-_nvkm_engctx_dtor(struct nvkm_object *object)
-{
-	nvkm_engctx_destroy(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_init(struct nvkm_object *object)
-{
-	return nvkm_engctx_init(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
-{
-	return nvkm_engctx_fini(nv_engctx(object), suspend);
-}
-
-struct nvkm_object *
-nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
-{
-	struct nvkm_engctx *engctx;
-	unsigned long flags;
-
-	spin_lock_irqsave(&engine->lock, flags);
-	list_for_each_entry(engctx, &engine->contexts, head) {
-		if (engctx->addr == addr) {
-			engctx->save = flags;
-			return nv_object(engctx);
-		}
-	}
-	spin_unlock_irqrestore(&engine->lock, flags);
-	return NULL;
-}
-
-void
-nvkm_engctx_put(struct nvkm_object *object)
-{
-	if (object) {
-		struct nvkm_engine *engine = nv_engine(object->engine);
-		struct nvkm_engctx *engctx = nv_engctx(object);
-		spin_unlock_irqrestore(&engine->lock, engctx->save);
-	}
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 6082017..8a7bae7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -25,51 +25,141 @@
 #include <core/device.h>
 #include <core/option.h>
 
-struct nvkm_engine *
-nvkm_engine(void *obj, int idx)
+#include <subdev/fb.h>
+
+void
+nvkm_engine_unref(struct nvkm_engine **pengine)
 {
-	obj = nvkm_subdev(obj, idx);
-	if (obj && nv_iclass(obj, NV_ENGINE_CLASS))
-		return nv_engine(obj);
-	return NULL;
+	struct nvkm_engine *engine = *pengine;
+	if (engine) {
+		mutex_lock(&engine->subdev.mutex);
+		if (--engine->usecount == 0)
+			nvkm_subdev_fini(&engine->subdev, false);
+		mutex_unlock(&engine->subdev.mutex);
+		*pengine = NULL;
+	}
+}
+
+struct nvkm_engine *
+nvkm_engine_ref(struct nvkm_engine *engine)
+{
+	if (engine) {
+		mutex_lock(&engine->subdev.mutex);
+		if (++engine->usecount == 1) {
+			int ret = nvkm_subdev_init(&engine->subdev);
+			if (ret) {
+				engine->usecount--;
+				mutex_unlock(&engine->subdev.mutex);
+				return ERR_PTR(ret);
+			}
+		}
+		mutex_unlock(&engine->subdev.mutex);
+	}
+	return engine;
+}
+
+void
+nvkm_engine_tile(struct nvkm_engine *engine, int region)
+{
+	struct nvkm_fb *fb = engine->subdev.device->fb;
+	if (engine->func->tile)
+		engine->func->tile(engine, region, &fb->tile.region[region]);
+}
+
+static void
+nvkm_engine_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->intr)
+		engine->func->intr(engine);
+}
+
+static int
+nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->fini)
+		return engine->func->fini(engine, suspend);
+	return 0;
+}
+
+static int
+nvkm_engine_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	struct nvkm_fb *fb = subdev->device->fb;
+	int ret = 0, i;
+	s64 time;
+
+	if (!engine->usecount) {
+		nvkm_trace(subdev, "init skipped, engine has no users\n");
+		return ret;
+	}
+
+	if (engine->func->oneinit && !engine->subdev.oneinit) {
+		nvkm_trace(subdev, "one-time init running...\n");
+		time = ktime_to_us(ktime_get());
+		ret = engine->func->oneinit(engine);
+		if (ret) {
+			nvkm_trace(subdev, "one-time init failed, %d\n", ret);
+			return ret;
+		}
+
+		engine->subdev.oneinit = true;
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
+	}
+
+	if (engine->func->init)
+		ret = engine->func->init(engine);
+
+	for (i = 0; fb && i < fb->tile.regions; i++)
+		nvkm_engine_tile(engine, i);
+	return ret;
+}
+
+static void *
+nvkm_engine_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->dtor)
+		return engine->func->dtor(engine);
+	return engine;
+}
+
+static const struct nvkm_subdev_func
+nvkm_engine_func = {
+	.dtor = nvkm_engine_dtor,
+	.init = nvkm_engine_init,
+	.fini = nvkm_engine_fini,
+	.intr = nvkm_engine_intr,
+};
+
+int
+nvkm_engine_ctor(const struct nvkm_engine_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 bool enable, struct nvkm_engine *engine)
+{
+	nvkm_subdev_ctor(&nvkm_engine_func, device, index,
+			 pmc_enable, &engine->subdev);
+	engine->func = func;
+
+	if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
+		nvkm_debug(&engine->subdev, "disabled\n");
+		return -ENODEV;
+	}
+
+	spin_lock_init(&engine->lock);
+	return 0;
 }
 
 int
-nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
-		    struct nvkm_oclass *oclass, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+nvkm_engine_new_(const struct nvkm_engine_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 bool enable, struct nvkm_engine **pengine)
 {
-	struct nvkm_engine *engine;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
-				  iname, fname, length, pobject);
-	engine = *pobject;
-	if (ret)
-		return ret;
-
-	if (parent) {
-		struct nvkm_device *device = nv_device(parent);
-		int engidx = nv_engidx(engine);
-
-		if (device->disable_mask & (1ULL << engidx)) {
-			if (!nvkm_boolopt(device->cfgopt, iname, false)) {
-				nv_debug(engine, "engine disabled by hw/fw\n");
-				return -ENODEV;
-			}
-
-			nv_warn(engine, "ignoring hw/fw engine disable\n");
-		}
-
-		if (!nvkm_boolopt(device->cfgopt, iname, enable)) {
-			if (!enable)
-				nv_warn(engine, "disabled, %s=1 to enable\n", iname);
-			return -ENODEV;
-		}
-	}
-
-	INIT_LIST_HEAD(&engine->contexts);
-	spin_lock_init(&engine->lock);
-	return 0;
+	if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_engine_ctor(func, device, index, pmc_enable,
+				enable, *pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index 4f92bfc..b9581fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -38,29 +38,19 @@
 	return NULL;
 }
 
-const struct nvkm_enum *
-nvkm_enum_print(const struct nvkm_enum *en, u32 value)
-{
-	en = nvkm_enum_find(en, value);
-	if (en)
-		pr_cont("%s", en->name);
-	else
-		pr_cont("(unknown enum 0x%08x)", value);
-	return en;
-}
-
 void
-nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value)
+nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
 {
-	while (bf->name) {
+	bool space = false;
+	while (size >= 1 && bf->name) {
 		if (value & bf->mask) {
-			pr_cont(" %s", bf->name);
-			value &= ~bf->mask;
+			int this = snprintf(data, size, "%s%s",
+					    space ? " " : "", bf->name);
+			size -= this;
+			data += this;
+			space = true;
 		}
-
 		bf++;
 	}
-
-	if (value)
-		pr_cont(" (unknown bits 0x%08x)", value);
+	data[0] = '\0';
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index 2eba801..c3a790e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -28,240 +28,205 @@
 #include <subdev/bar.h>
 #include <subdev/mmu.h>
 
-void
-nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
+/* fast-path, where backend is able to provide direct pointer to memory */
+static u32
+nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
-	int i;
-
-	if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0x00000000);
-	}
-
-	if (gpuobj->node)
-		nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
-
-	if (gpuobj->heap.block_size)
-		nvkm_mm_fini(&gpuobj->heap);
-
-	nvkm_object_destroy(&gpuobj->object);
+	return ioread32_native(gpuobj->map + offset);
 }
 
-int
-nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
-		    int length, void **pobject)
+static void
+nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(parent);
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nvkm_gpuobj *gpuobj;
-	struct nvkm_mm *heap = NULL;
-	int ret, i;
-	u64 addr;
+	iowrite32_native(data, gpuobj->map + offset);
+}
 
-	*pobject = NULL;
+/* accessor functions for gpuobjs allocated directly from instmem */
+static u32
+nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+	return nvkm_ro32(gpuobj->memory, offset);
+}
 
-	if (pargpu) {
-		while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
-			if (nv_gpuobj(pargpu)->heap.block_size)
-				break;
-			pargpu = pargpu->parent;
-		}
+static void
+nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+	nvkm_wo32(gpuobj->memory, offset, data);
+}
 
-		if (unlikely(pargpu == NULL)) {
-			nv_error(parent, "no gpuobj heap\n");
-			return -EINVAL;
-		}
+static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
+static void
+nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->func = &nvkm_gpuobj_heap;
+	nvkm_done(gpuobj->memory);
+}
 
-		addr =  nv_gpuobj(pargpu)->addr;
-		heap = &nv_gpuobj(pargpu)->heap;
-		atomic_inc(&parent->refcount);
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_fast = {
+	.release = nvkm_gpuobj_heap_release,
+	.rd32 = nvkm_gpuobj_rd32_fast,
+	.wr32 = nvkm_gpuobj_wr32_fast,
+};
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_slow = {
+	.release = nvkm_gpuobj_heap_release,
+	.rd32 = nvkm_gpuobj_heap_rd32,
+	.wr32 = nvkm_gpuobj_heap_wr32,
+};
+
+static void *
+nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->map = nvkm_kmap(gpuobj->memory);
+	if (likely(gpuobj->map))
+		gpuobj->func = &nvkm_gpuobj_heap_fast;
+	else
+		gpuobj->func = &nvkm_gpuobj_heap_slow;
+	return gpuobj->map;
+}
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap = {
+	.acquire = nvkm_gpuobj_heap_acquire,
+};
+
+/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static u32
+nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+	return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
+}
+
+static void
+nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+	nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
+}
+
+static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
+static void
+nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->func = &nvkm_gpuobj_func;
+	nvkm_done(gpuobj->parent);
+}
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_fast = {
+	.release = nvkm_gpuobj_release,
+	.rd32 = nvkm_gpuobj_rd32_fast,
+	.wr32 = nvkm_gpuobj_wr32_fast,
+};
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_slow = {
+	.release = nvkm_gpuobj_release,
+	.rd32 = nvkm_gpuobj_rd32,
+	.wr32 = nvkm_gpuobj_wr32,
+};
+
+static void *
+nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->map = nvkm_kmap(gpuobj->parent);
+	if (likely(gpuobj->map)) {
+		gpuobj->map  = (u8 *)gpuobj->map + gpuobj->node->offset;
+		gpuobj->func = &nvkm_gpuobj_fast;
 	} else {
-		ret = imem->alloc(imem, parent, size, align, &parent);
-		pargpu = parent;
-		if (ret)
-			return ret;
-
-		addr = nv_memobj(pargpu)->addr;
-		size = nv_memobj(pargpu)->size;
-
-		if (bar && bar->alloc) {
-			struct nvkm_instobj *iobj = (void *)parent;
-			struct nvkm_mem **mem = (void *)(iobj + 1);
-			struct nvkm_mem *node = *mem;
-			if (!bar->alloc(bar, parent, node, &pargpu)) {
-				nvkm_object_ref(NULL, &parent);
-				parent = pargpu;
-			}
-		}
+		gpuobj->func = &nvkm_gpuobj_slow;
 	}
-
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_GPUOBJ_CLASS, length, pobject);
-	nvkm_object_ref(NULL, &parent);
-	gpuobj = *pobject;
-	if (ret)
-		return ret;
-
-	gpuobj->parent = pargpu;
-	gpuobj->flags = flags;
-	gpuobj->addr = addr;
-	gpuobj->size = size;
-
-	if (heap) {
-		ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
-				   &gpuobj->node);
-		if (ret)
-			return ret;
-
-		gpuobj->addr += gpuobj->node->offset;
-	}
-
-	if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
-		ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
-		if (ret)
-			return ret;
-	}
-
-	if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0x00000000);
-	}
-
-	return ret;
+	return gpuobj->map;
 }
 
-struct nvkm_gpuobj_class {
-	struct nvkm_object *pargpu;
-	u64 size;
-	u32 align;
-	u32 flags;
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_func = {
+	.acquire = nvkm_gpuobj_acquire,
 };
 
 static int
-_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
+		 struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
 {
-	struct nvkm_gpuobj_class *args = data;
-	struct nvkm_gpuobj *object;
+	u32 offset;
 	int ret;
 
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
-				 args->size, args->align, args->flags,
-				 &object);
-	*pobject = nv_object(object);
-	if (ret)
-		return ret;
+	if (parent) {
+		if (align >= 0) {
+			ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
+					   max(align, 1), &gpuobj->node);
+		} else {
+			ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
+					   -align, &gpuobj->node);
+		}
+		if (ret)
+			return ret;
 
-	return 0;
-}
+		gpuobj->parent = parent;
+		gpuobj->func = &nvkm_gpuobj_func;
+		gpuobj->addr = parent->addr + gpuobj->node->offset;
+		gpuobj->size = gpuobj->node->length;
 
-void
-_nvkm_gpuobj_dtor(struct nvkm_object *object)
-{
-	nvkm_gpuobj_destroy(nv_gpuobj(object));
-}
+		if (zero) {
+			nvkm_kmap(gpuobj);
+			for (offset = 0; offset < gpuobj->size; offset += 4)
+				nvkm_wo32(gpuobj, offset, 0x00000000);
+			nvkm_done(gpuobj);
+		}
+	} else {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
+				      abs(align), zero, &gpuobj->memory);
+		if (ret)
+			return ret;
 
-int
-_nvkm_gpuobj_init(struct nvkm_object *object)
-{
-	return nvkm_gpuobj_init(nv_gpuobj(object));
-}
-
-int
-_nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend)
-{
-	return nvkm_gpuobj_fini(nv_gpuobj(object), suspend);
-}
-
-u32
-_nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
-	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
-	if (gpuobj->node)
-		addr += gpuobj->node->offset;
-	return pfuncs->rd32(gpuobj->parent, addr);
-}
-
-void
-_nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
-	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
-	if (gpuobj->node)
-		addr += gpuobj->node->offset;
-	pfuncs->wr32(gpuobj->parent, addr, data);
-}
-
-static struct nvkm_oclass
-_nvkm_gpuobj_oclass = {
-	.handle = 0x00000000,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpuobj_ctor,
-		.dtor = _nvkm_gpuobj_dtor,
-		.init = _nvkm_gpuobj_init,
-		.fini = _nvkm_gpuobj_fini,
-		.rd32 = _nvkm_gpuobj_rd32,
-		.wr32 = _nvkm_gpuobj_wr32,
-	},
-};
-
-int
-nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
-		u32 size, u32 align, u32 flags,
-		struct nvkm_gpuobj **pgpuobj)
-{
-	struct nvkm_object *engine = parent;
-	struct nvkm_gpuobj_class args = {
-		.pargpu = pargpu,
-		.size = size,
-		.align = align,
-		.flags = flags,
-	};
-
-	if (!nv_iclass(engine, NV_SUBDEV_CLASS))
-		engine = &engine->engine->subdev.object;
-	BUG_ON(engine == NULL);
-
-	return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
-				&args, sizeof(args),
-				(struct nvkm_object **)pgpuobj);
-}
-
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
-{
-	struct nvkm_bar *bar = nvkm_bar(gpuobj);
-	int ret = -EINVAL;
-
-	if (bar && bar->umap) {
-		struct nvkm_instobj *iobj = (void *)
-			nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
-		struct nvkm_mem **mem = (void *)(iobj + 1);
-		ret = bar->umap(bar, *mem, access, vma);
+		gpuobj->func = &nvkm_gpuobj_heap;
+		gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
+		gpuobj->size = nvkm_memory_size(gpuobj->memory);
 	}
 
+	return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+}
+
+void
+nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
+{
+	struct nvkm_gpuobj *gpuobj = *pgpuobj;
+	if (gpuobj) {
+		if (gpuobj->parent)
+			nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
+		nvkm_mm_fini(&gpuobj->heap);
+		nvkm_memory_del(&gpuobj->memory);
+		kfree(*pgpuobj);
+		*pgpuobj = NULL;
+	}
+}
+
+int
+nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
+		struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nvkm_gpuobj *gpuobj;
+	int ret;
+
+	if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
+		return -ENOMEM;
+
+	ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
+	if (ret)
+		nvkm_gpuobj_del(pgpuobj);
 	return ret;
 }
 
 int
-nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
-		   u32 access, struct nvkm_vma *vma)
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
+		u32 access, struct nvkm_vma *vma)
 {
-	struct nvkm_instobj *iobj = (void *)
-		nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
-	struct nvkm_mem **mem = (void *)(iobj + 1);
-	int ret;
-
-	ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, *mem);
-	return 0;
+	struct nvkm_memory *memory = gpuobj->memory;
+	int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
+	if (ret == 0)
+		nvkm_memory_map(memory, vma, 0);
+	return ret;
 }
 
 void
@@ -278,39 +243,13 @@
  * anywhere else.
  */
 
-static void
-nvkm_gpudup_dtor(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *gpuobj = (void *)object;
-	nvkm_object_ref(NULL, &gpuobj->parent);
-	nvkm_object_destroy(&gpuobj->object);
-}
-
-static struct nvkm_oclass
-nvkm_gpudup_oclass = {
-	.handle = NV_GPUOBJ_CLASS,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_gpudup_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-	},
-};
-
 int
-nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
-		struct nvkm_gpuobj **pgpuobj)
+nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *gpuobj;
-	int ret;
+	if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = nvkm_object_create(parent, &parent->engine->subdev.object,
-				 &nvkm_gpudup_oclass, 0, &gpuobj);
-	*pgpuobj = gpuobj;
-	if (ret)
-		return ret;
-
-	nvkm_object_ref(nv_object(base), &gpuobj->parent);
-	gpuobj->addr = base->addr;
-	gpuobj->size = base->size;
+	(*pgpuobj)->addr = nvkm_memory_addr(memory);
+	(*pgpuobj)->size = nvkm_memory_size(memory);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c
deleted file mode 100644
index dc7ff10..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/handle.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/handle.h>
-#include <core/client.h>
-
-#define hprintk(h,l,f,a...) do {                                               \
-	struct nvkm_client *c = nvkm_client((h)->object);                      \
-	struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0;         \
-	nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a);               \
-} while(0)
-
-int
-nvkm_handle_init(struct nvkm_handle *handle)
-{
-	struct nvkm_handle *item;
-	int ret;
-
-	hprintk(handle, TRACE, "init running\n");
-	ret = nvkm_object_inc(handle->object);
-	if (ret)
-		return ret;
-
-	hprintk(handle, TRACE, "init children\n");
-	list_for_each_entry(item, &handle->tree, head) {
-		ret = nvkm_handle_init(item);
-		if (ret)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "init completed\n");
-	return 0;
-fail:
-	hprintk(handle, ERROR, "init failed with %d\n", ret);
-	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
-		nvkm_handle_fini(item, false);
-	}
-
-	nvkm_object_dec(handle->object, false);
-	return ret;
-}
-
-int
-nvkm_handle_fini(struct nvkm_handle *handle, bool suspend)
-{
-	static char *name[2] = { "fini", "suspend" };
-	struct nvkm_handle *item;
-	int ret;
-
-	hprintk(handle, TRACE, "%s children\n", name[suspend]);
-	list_for_each_entry(item, &handle->tree, head) {
-		ret = nvkm_handle_fini(item, suspend);
-		if (ret && suspend)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "%s running\n", name[suspend]);
-	if (handle->object) {
-		ret = nvkm_object_dec(handle->object, suspend);
-		if (ret && suspend)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "%s completed\n", name[suspend]);
-	return 0;
-fail:
-	hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
-	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
-		int rret = nvkm_handle_init(item);
-		if (rret)
-			hprintk(handle, FATAL, "failed to restart, %d\n", rret);
-	}
-
-	return ret;
-}
-
-int
-nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle,
-		   struct nvkm_object *object, struct nvkm_handle **phandle)
-{
-	struct nvkm_object *namedb;
-	struct nvkm_handle *handle;
-	int ret;
-
-	namedb = parent;
-	while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
-		namedb = namedb->parent;
-
-	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&handle->head);
-	INIT_LIST_HEAD(&handle->tree);
-	handle->name = _handle;
-	handle->priv = ~0;
-
-	ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle);
-	if (ret) {
-		kfree(handle);
-		return ret;
-	}
-
-	if (nv_parent(parent)->object_attach) {
-		ret = nv_parent(parent)->object_attach(parent, object, _handle);
-		if (ret < 0) {
-			nvkm_handle_destroy(handle);
-			return ret;
-		}
-
-		handle->priv = ret;
-	}
-
-	if (object != namedb) {
-		while (!nv_iclass(namedb, NV_CLIENT_CLASS))
-			namedb = namedb->parent;
-
-		handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent);
-		if (handle->parent) {
-			list_add(&handle->head, &handle->parent->tree);
-			nvkm_namedb_put(handle->parent);
-		}
-	}
-
-	hprintk(handle, TRACE, "created\n");
-	*phandle = handle;
-	return 0;
-}
-
-void
-nvkm_handle_destroy(struct nvkm_handle *handle)
-{
-	struct nvkm_handle *item, *temp;
-
-	hprintk(handle, TRACE, "destroy running\n");
-	list_for_each_entry_safe(item, temp, &handle->tree, head) {
-		nvkm_handle_destroy(item);
-	}
-	list_del(&handle->head);
-
-	if (handle->priv != ~0) {
-		struct nvkm_object *parent = handle->parent->object;
-		nv_parent(parent)->object_detach(parent, handle->priv);
-	}
-
-	hprintk(handle, TRACE, "destroy completed\n");
-	nvkm_namedb_remove(handle);
-	kfree(handle);
-}
-
-struct nvkm_object *
-nvkm_handle_ref(struct nvkm_object *parent, u32 name)
-{
-	struct nvkm_object *object = NULL;
-	struct nvkm_handle *handle;
-
-	while (!nv_iclass(parent, NV_NAMEDB_CLASS))
-		parent = parent->parent;
-
-	handle = nvkm_namedb_get(nv_namedb(parent), name);
-	if (handle) {
-		nvkm_object_ref(handle->object, &object);
-		nvkm_namedb_put(handle);
-	}
-
-	return object;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_class(namedb, oclass);
-	return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_vinst(namedb, vinst);
-	return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_cinst(namedb, cinst);
-	return NULL;
-}
-
-void
-nvkm_handle_put(struct nvkm_handle *handle)
-{
-	if (handle)
-		nvkm_namedb_put(handle);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 4459ff5..d87d6ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -24,196 +24,154 @@
 #include <core/ioctl.h>
 #include <core/client.h>
 #include <core/engine.h>
-#include <core/handle.h>
-#include <core/namedb.h>
 
 #include <nvif/unpack.h>
 #include <nvif/ioctl.h>
 
 static int
-nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
-		struct nvif_ioctl_nop none;
+		struct nvif_ioctl_nop_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "nop size %d\n", size);
-	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "nop\n");
+	nvif_ioctl(object, "nop size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
+		args->v0.version = NVIF_VERSION_LATEST;
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_sclass_v0 v0;
 	} *args = data;
-	int ret;
+	struct nvkm_oclass oclass;
+	int ret, i = 0;
 
-	if (!nv_iclass(object, NV_PARENT_CLASS)) {
-		nv_debug(object, "cannot have children (sclass)\n");
-		return -ENODEV;
-	}
-
-	nv_ioctl(object, "sclass size %d\n", size);
+	nvif_ioctl(object, "sclass size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "sclass vers %d count %d\n",
-			 args->v0.version, args->v0.count);
-		if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
-			ret = nvkm_parent_lclass(object, args->v0.oclass,
-							 args->v0.count);
-			if (ret >= 0) {
-				args->v0.count = ret;
-				ret = 0;
+		nvif_ioctl(object, "sclass vers %d count %d\n",
+			   args->v0.version, args->v0.count);
+		if (size != args->v0.count * sizeof(args->v0.oclass[0]))
+			return -EINVAL;
+
+		while (object->func->sclass &&
+		       object->func->sclass(object, i, &oclass) >= 0) {
+			if (i < args->v0.count) {
+				args->v0.oclass[i].oclass = oclass.base.oclass;
+				args->v0.oclass[i].minver = oclass.base.minver;
+				args->v0.oclass[i].maxver = oclass.base.maxver;
 			}
-		} else {
-			ret = -EINVAL;
+			i++;
 		}
+
+		args->v0.count = i;
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
 {
 	union {
 		struct nvif_ioctl_new_v0 v0;
 	} *args = data;
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *engctx = NULL;
+	struct nvkm_client *client = parent->client;
 	struct nvkm_object *object = NULL;
-	struct nvkm_parent *parent;
-	struct nvkm_object *engine;
-	struct nvkm_oclass *oclass;
-	u32 _handle, _oclass;
-	int ret;
+	struct nvkm_oclass oclass;
+	int ret, i = 0;
 
-	nv_ioctl(client, "new size %d\n", size);
+	nvif_ioctl(parent, "new size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		_handle = args->v0.handle;
-		_oclass = args->v0.oclass;
+		nvif_ioctl(parent, "new vers %d handle %08x class %08x "
+				   "route %02x token %llx object %016llx\n",
+			   args->v0.version, args->v0.handle, args->v0.oclass,
+			   args->v0.route, args->v0.token, args->v0.object);
 	} else
 		return ret;
 
-	nv_ioctl(client, "new vers %d handle %08x class %08x "
-			 "route %02x token %llx\n",
-		 args->v0.version, _handle, _oclass,
-		 args->v0.route, args->v0.token);
-
-	if (!nv_iclass(handle->object, NV_PARENT_CLASS)) {
-		nv_debug(handle->object, "cannot have children (ctor)\n");
-		ret = -ENODEV;
-		goto fail_class;
+	if (!parent->func->sclass) {
+		nvif_ioctl(parent, "cannot have children\n");
+		return -EINVAL;
 	}
 
-	parent = nv_parent(handle->object);
-
-	/* check that parent supports the requested subclass */
-	ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass);
-	if (ret) {
-		nv_debug(parent, "illegal class 0x%04x\n", _oclass);
-		goto fail_class;
-	}
-
-	/* make sure engine init has been completed *before* any objects
-	 * it controls are created - the constructors may depend on
-	 * state calculated at init (ie. default context construction)
-	 */
-	if (engine) {
-		ret = nvkm_object_inc(engine);
+	do {
+		memset(&oclass, 0x00, sizeof(oclass));
+		oclass.client = client;
+		oclass.handle = args->v0.handle;
+		oclass.object = args->v0.object;
+		oclass.parent = parent;
+		ret = parent->func->sclass(parent, i++, &oclass);
 		if (ret)
-			goto fail_class;
+			return ret;
+	} while (oclass.base.oclass != args->v0.oclass);
+
+	if (oclass.engine) {
+		oclass.engine = nvkm_engine_ref(oclass.engine);
+		if (IS_ERR(oclass.engine))
+			return PTR_ERR(oclass.engine);
 	}
 
-	/* if engine requires it, create a context object to insert
-	 * between the parent and its children (eg. PGRAPH context)
-	 */
-	if (engine && nv_engine(engine)->cclass) {
-		ret = nvkm_object_ctor(&parent->object, engine,
-				       nv_engine(engine)->cclass,
-				       data, size, &engctx);
-		if (ret)
-			goto fail_engctx;
-	} else {
-		nvkm_object_ref(&parent->object, &engctx);
+	ret = oclass.ctor(&oclass, data, size, &object);
+	nvkm_engine_unref(&oclass.engine);
+	if (ret == 0) {
+		ret = nvkm_object_init(object);
+		if (ret == 0) {
+			list_add(&object->head, &parent->tree);
+			object->route = args->v0.route;
+			object->token = args->v0.token;
+			object->object = args->v0.object;
+			if (nvkm_client_insert(client, object)) {
+				client->data = object;
+				return 0;
+			}
+			ret = -EEXIST;
+		}
+		nvkm_object_fini(object, false);
 	}
 
-	/* finally, create new object and bind it to its handle */
-	ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object);
-	client->data = object;
-	if (ret)
-		goto fail_ctor;
-
-	ret = nvkm_object_inc(object);
-	if (ret)
-		goto fail_init;
-
-	ret = nvkm_handle_create(&parent->object, handle->name,
-				 _handle, object, &handle);
-	if (ret)
-		goto fail_handle;
-
-	ret = nvkm_handle_init(handle);
-	handle->route = args->v0.route;
-	handle->token = args->v0.token;
-	if (ret)
-		nvkm_handle_destroy(handle);
-
-fail_handle:
-	nvkm_object_dec(object, false);
-fail_init:
-	nvkm_object_ref(NULL, &object);
-fail_ctor:
-	nvkm_object_ref(NULL, &engctx);
-fail_engctx:
-	if (engine)
-		nvkm_object_dec(engine, false);
-fail_class:
+	nvkm_object_del(&object);
 	return ret;
 }
 
 static int
-nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_del none;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "delete size %d\n", size);
+	nvif_ioctl(object, "delete size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "delete\n");
-		nvkm_handle_fini(handle, false);
-		nvkm_handle_destroy(handle);
+		nvif_ioctl(object, "delete\n");
+		nvkm_object_fini(object, false);
+		nvkm_object_del(&object);
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_mthd_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "mthd size %d\n", size);
+	nvif_ioctl(object, "mthd size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "mthd vers %d mthd %02x\n",
-			 args->v0.version, args->v0.method);
-		if (ret = -ENODEV, ofuncs->mthd)
-			ret = ofuncs->mthd(object, args->v0.method, data, size);
+		nvif_ioctl(object, "mthd vers %d mthd %02x\n",
+			   args->v0.version, args->v0.method);
+		ret = nvkm_object_mthd(object, args->v0.method, data, size);
 	}
 
 	return ret;
@@ -221,37 +179,34 @@
 
 
 static int
-nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_rd_v0 v0;
 	} *args = data;
+	union {
+		u8  b08;
+		u16 b16;
+		u32 b32;
+	} v;
 	int ret;
 
-	nv_ioctl(object, "rd size %d\n", size);
+	nvif_ioctl(object, "rd size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
-			 args->v0.version, args->v0.size, args->v0.addr);
+		nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
+			   args->v0.version, args->v0.size, args->v0.addr);
 		switch (args->v0.size) {
 		case 1:
-			if (ret = -ENODEV, ofuncs->rd08) {
-				args->v0.data = nv_ro08(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
+			args->v0.data = v.b08;
 			break;
 		case 2:
-			if (ret = -ENODEV, ofuncs->rd16) {
-				args->v0.data = nv_ro16(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
+			args->v0.data = v.b16;
 			break;
 		case 4:
-			if (ret = -ENODEV, ofuncs->rd32) {
-				args->v0.data = nv_ro32(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
+			args->v0.data = v.b32;
 			break;
 		default:
 			ret = -EINVAL;
@@ -263,104 +218,81 @@
 }
 
 static int
-nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_wr_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "wr size %d\n", size);
+	nvif_ioctl(object, "wr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
-			 args->v0.version, args->v0.size, args->v0.addr,
-			 args->v0.data);
-		switch (args->v0.size) {
-		case 1:
-			if (ret = -ENODEV, ofuncs->wr08) {
-				nv_wo08(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		case 2:
-			if (ret = -ENODEV, ofuncs->wr16) {
-				nv_wo16(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		case 4:
-			if (ret = -ENODEV, ofuncs->wr32) {
-				nv_wo32(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		default:
-			ret = -EINVAL;
-			break;
-		}
+		nvif_ioctl(object,
+			   "wr vers %d size %d addr %016llx data %08x\n",
+			   args->v0.version, args->v0.size, args->v0.addr,
+			   args->v0.data);
+	} else
+		return ret;
+
+	switch (args->v0.size) {
+	case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
+	case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
+	case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
+	default:
+		break;
 	}
 
-	return ret;
+	return -EINVAL;
 }
 
 static int
-nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_map_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "map size %d\n", size);
+	nvif_ioctl(object, "map size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "map vers %d\n", args->v0.version);
-		if (ret = -ENODEV, ofuncs->map) {
-			ret = ofuncs->map(object, &args->v0.handle,
-						  &args->v0.length);
-		}
+		nvif_ioctl(object, "map vers %d\n", args->v0.version);
+		ret = nvkm_object_map(object, &args->v0.handle,
+					      &args->v0.length);
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_unmap none;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "unmap size %d\n", size);
+	nvif_ioctl(object, "unmap size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "unmap\n");
+		nvif_ioctl(object, "unmap\n");
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_ntfy_new_v0 v0;
 	} *args = data;
 	struct nvkm_event *event;
 	int ret;
 
-	nv_ioctl(object, "ntfy new size %d\n", size);
+	nvif_ioctl(object, "ntfy new size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "ntfy new vers %d event %02x\n",
-			 args->v0.version, args->v0.event);
-		if (ret = -ENODEV, ofuncs->ntfy)
-			ret = ofuncs->ntfy(object, args->v0.event, &event);
+		nvif_ioctl(object, "ntfy new vers %d event %02x\n",
+			   args->v0.version, args->v0.event);
+		ret = nvkm_object_ntfy(object, args->v0.event, &event);
 		if (ret == 0) {
 			ret = nvkm_client_notify_new(object, event, data, size);
 			if (ret >= 0) {
@@ -374,19 +306,18 @@
 }
 
 static int
-nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_del_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy del size %d\n", size);
+	nvif_ioctl(object, "ntfy del size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy del vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy del vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_del(client, args->v0.index);
 	}
 
@@ -394,19 +325,18 @@
 }
 
 static int
-nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_get_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy get size %d\n", size);
+	nvif_ioctl(object, "ntfy get size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy get vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy get vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_get(client, args->v0.index);
 	}
 
@@ -414,19 +344,18 @@
 }
 
 static int
-nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_put_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy put size %d\n", size);
+	nvif_ioctl(object, "ntfy put size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy put vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy put vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_put(client, args->v0.index);
 	}
 
@@ -435,7 +364,7 @@
 
 static struct {
 	int version;
-	int (*func)(struct nvkm_handle *, void *, u32);
+	int (*func)(struct nvkm_object *, void *, u32);
 }
 nvkm_ioctl_v0[] = {
 	{ 0x00, nvkm_ioctl_nop },
@@ -454,40 +383,31 @@
 };
 
 static int
-nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path,
+nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
 		void *data, u32 size, u8 owner, u8 *route, u64 *token)
 {
-	struct nvkm_handle *handle = parent;
-	struct nvkm_namedb *namedb;
 	struct nvkm_object *object;
 	int ret;
 
-	while ((object = parent->object), nr--) {
-		nv_ioctl(object, "path 0x%08x\n", path[nr]);
-		if (!nv_iclass(object, NV_PARENT_CLASS)) {
-			nv_debug(object, "cannot have children (path)\n");
-			return -EINVAL;
-		}
-
-		if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
-		    !(handle = nvkm_namedb_get(namedb, path[nr]))) {
-			nv_debug(object, "handle 0x%08x not found\n", path[nr]);
-			return -ENOENT;
-		}
-		nvkm_namedb_put(handle);
-		parent = handle;
+	if (handle)
+		object = nvkm_client_search(client, handle);
+	else
+		object = &client->object;
+	if (unlikely(!object)) {
+		nvif_ioctl(&client->object, "object not found\n");
+		return -ENOENT;
 	}
 
-	if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) {
-		nv_ioctl(object, "object route != owner\n");
+	if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
+		nvif_ioctl(&client->object, "route != owner\n");
 		return -EACCES;
 	}
-	*route = handle->route;
-	*token = handle->token;
+	*route = object->route;
+	*token = object->token;
 
 	if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
 		if (nvkm_ioctl_v0[type].version == 0)
-			ret = nvkm_ioctl_v0[type].func(handle, data, size);
+			ret = nvkm_ioctl_v0[type].func(object, data, size);
 	}
 
 	return ret;
@@ -497,25 +417,26 @@
 nvkm_ioctl(struct nvkm_client *client, bool supervisor,
 	   void *data, u32 size, void **hack)
 {
+	struct nvkm_object *object = &client->object;
 	union {
 		struct nvif_ioctl_v0 v0;
 	} *args = data;
 	int ret;
 
 	client->super = supervisor;
-	nv_ioctl(client, "size %d\n", size);
+	nvif_ioctl(object, "size %d\n", size);
 
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(client, "vers %d type %02x path %d owner %02x\n",
-			 args->v0.version, args->v0.type, args->v0.path_nr,
-			 args->v0.owner);
-		ret = nvkm_ioctl_path(client->root, args->v0.type,
-				      args->v0.path_nr, args->v0.path,
+		nvif_ioctl(object,
+			   "vers %d type %02x object %016llx owner %02x\n",
+			   args->v0.version, args->v0.type, args->v0.object,
+			   args->v0.owner);
+		ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
 				      data, size, args->v0.owner,
 				      &args->v0.route, &args->v0.token);
 	}
 
-	nv_ioctl(client, "return %d\n", ret);
+	nvif_ioctl(object, "return %d\n", ret);
 	if (hack) {
 		*hack = client->data;
 		client->data = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
new file mode 100644
index 0000000..8903c04
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/memory.h>
+#include <subdev/instmem.h>
+
+void
+nvkm_memory_ctor(const struct nvkm_memory_func *func,
+		 struct nvkm_memory *memory)
+{
+	memory->func = func;
+}
+
+void
+nvkm_memory_del(struct nvkm_memory **pmemory)
+{
+	struct nvkm_memory *memory = *pmemory;
+	if (memory && !WARN_ON(!memory->func)) {
+		if (memory->func->dtor)
+			*pmemory = memory->func->dtor(memory);
+		kfree(*pmemory);
+		*pmemory = NULL;
+	}
+}
+
+int
+nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
+		u64 size, u32 align, bool zero,
+		struct nvkm_memory **pmemory)
+{
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_memory *memory;
+	int ret = -ENOSYS;
+
+	if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
+		return -ENOSYS;
+
+	ret = nvkm_instobj_new(imem, size, align, zero, &memory);
+	if (ret)
+		return ret;
+
+	*pmemory = memory;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 7f458df..09a1eee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -26,7 +26,7 @@
 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL :          \
 	list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
 
-static void
+void
 nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
 {
 	struct nvkm_mm_node *node;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
deleted file mode 100644
index 6400767..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/namedb.h>
-#include <core/gpuobj.h>
-#include <core/handle.h>
-
-static struct nvkm_handle *
-nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (handle->name == name)
-			return handle;
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_mclass(handle->object) == oclass)
-			return handle;
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-			if (nv_gpuobj(handle->object)->addr == vinst)
-				return handle;
-		}
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-			if (nv_gpuobj(handle->object)->node &&
-			    nv_gpuobj(handle->object)->node->offset == cinst)
-				return handle;
-		}
-	}
-
-	return NULL;
-}
-
-int
-nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name,
-		   struct nvkm_object *object,
-		   struct nvkm_handle *handle)
-{
-	int ret = -EEXIST;
-	write_lock_irq(&namedb->lock);
-	if (!nvkm_namedb_lookup(namedb, name)) {
-		nvkm_object_ref(object, &handle->object);
-		handle->namedb = namedb;
-		list_add(&handle->node, &namedb->list);
-		ret = 0;
-	}
-	write_unlock_irq(&namedb->lock);
-	return ret;
-}
-
-void
-nvkm_namedb_remove(struct nvkm_handle *handle)
-{
-	struct nvkm_namedb *namedb = handle->namedb;
-	struct nvkm_object *object = handle->object;
-	write_lock_irq(&namedb->lock);
-	list_del(&handle->node);
-	write_unlock_irq(&namedb->lock);
-	nvkm_object_ref(NULL, &object);
-}
-
-struct nvkm_handle *
-nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup(namedb, name);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_class(namedb, oclass);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_vinst(namedb, vinst);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_cinst(namedb, cinst);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-void
-nvkm_namedb_put(struct nvkm_handle *handle)
-{
-	if (handle)
-		read_unlock(&handle->namedb->lock);
-}
-
-int
-nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_oclass *sclass, u64 engcls,
-		    int length, void **pobject)
-{
-	struct nvkm_namedb *namedb;
-	int ret;
-
-	ret = nvkm_parent_create_(parent, engine, oclass, pclass |
-				  NV_NAMEDB_CLASS, sclass, engcls,
-				  length, pobject);
-	namedb = *pobject;
-	if (ret)
-		return ret;
-
-	rwlock_init(&namedb->lock);
-	INIT_LIST_HEAD(&namedb->list);
-	return 0;
-}
-
-int
-_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_namedb *object;
-	int ret;
-
-	ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
-	*pobject = nv_object(object);
-	if (ret)
-		return ret;
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 979f362..67aa722 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -22,309 +22,243 @@
  * Authors: Ben Skeggs
  */
 #include <core/object.h>
+#include <core/client.h>
 #include <core/engine.h>
 
-#ifdef NVKM_OBJECT_MAGIC
-static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
-static DEFINE_SPINLOCK(_objlist_lock);
-#endif
-
 int
-nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    int size, void **pobject)
+nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
-	struct nvkm_object *object;
-
-	object = *pobject = kzalloc(size, GFP_KERNEL);
-	if (!object)
-		return -ENOMEM;
-
-	nvkm_object_ref(parent, &object->parent);
-	nvkm_object_ref(engine, (struct nvkm_object **)&object->engine);
-	object->oclass = oclass;
-	object->oclass->handle |= pclass;
-	atomic_set(&object->refcount, 1);
-	atomic_set(&object->usecount, 0);
-
-#ifdef NVKM_OBJECT_MAGIC
-	object->_magic = NVKM_OBJECT_MAGIC;
-	spin_lock(&_objlist_lock);
-	list_add(&object->list, &_objlist);
-	spin_unlock(&_objlist_lock);
-#endif
-	return 0;
+	if (likely(object->func->mthd))
+		return object->func->mthd(object, mthd, data, size);
+	return -ENODEV;
 }
 
 int
-_nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
+		 struct nvkm_event **pevent)
 {
-	if (size != 0)
-		return -ENOSYS;
-	return nvkm_object_create(parent, engine, oclass, 0, pobject);
-}
-
-void
-nvkm_object_destroy(struct nvkm_object *object)
-{
-#ifdef NVKM_OBJECT_MAGIC
-	spin_lock(&_objlist_lock);
-	list_del(&object->list);
-	spin_unlock(&_objlist_lock);
-#endif
-	nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine);
-	nvkm_object_ref(NULL, &object->parent);
-	kfree(object);
+	if (likely(object->func->ntfy))
+		return object->func->ntfy(object, mthd, pevent);
+	return -ENODEV;
 }
 
 int
-nvkm_object_init(struct nvkm_object *object)
+nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
 {
-	return 0;
+	if (likely(object->func->map))
+		return object->func->map(object, addr, size);
+	return -ENODEV;
+}
+
+int
+nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	if (likely(object->func->rd08))
+		return object->func->rd08(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	if (likely(object->func->rd16))
+		return object->func->rd16(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	if (likely(object->func->rd32))
+		return object->func->rd32(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	if (likely(object->func->wr08))
+		return object->func->wr08(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	if (likely(object->func->wr16))
+		return object->func->wr16(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	if (likely(object->func->wr32))
+		return object->func->wr32(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	if (object->func->bind)
+		return object->func->bind(object, gpuobj, align, pgpuobj);
+	return -ENODEV;
 }
 
 int
 nvkm_object_fini(struct nvkm_object *object, bool suspend)
 {
+	const char *action = suspend ? "suspend" : "fini";
+	struct nvkm_object *child;
+	s64 time;
+	int ret;
+
+	nvif_debug(object, "%s children...\n", action);
+	time = ktime_to_us(ktime_get());
+	list_for_each_entry(child, &object->tree, head) {
+		ret = nvkm_object_fini(child, suspend);
+		if (ret && suspend)
+			goto fail_child;
+	}
+
+	nvif_debug(object, "%s running...\n", action);
+	if (object->func->fini) {
+		ret = object->func->fini(object, suspend);
+		if (ret) {
+			nvif_error(object, "%s failed with %d\n", action, ret);
+			if (suspend)
+				goto fail;
+		}
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "%s completed in %lldus\n", action, time);
 	return 0;
+
+fail:
+	if (object->func->init) {
+		int rret = object->func->init(object);
+		if (rret)
+			nvif_fatal(object, "failed to restart, %d\n", rret);
+	}
+fail_child:
+	list_for_each_entry_continue_reverse(child, &object->tree, head) {
+		nvkm_object_init(child);
+	}
+	return ret;
 }
 
-struct nvkm_ofuncs
-nvkm_object_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+int
+nvkm_object_init(struct nvkm_object *object)
+{
+	struct nvkm_object *child;
+	s64 time;
+	int ret;
+
+	nvif_debug(object, "init running...\n");
+	time = ktime_to_us(ktime_get());
+	if (object->func->init) {
+		ret = object->func->init(object);
+		if (ret)
+			goto fail;
+	}
+
+	nvif_debug(object, "init children...\n");
+	list_for_each_entry(child, &object->tree, head) {
+		ret = nvkm_object_init(child);
+		if (ret)
+			goto fail_child;
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "init completed in %lldus\n", time);
+	return 0;
+
+fail_child:
+	list_for_each_entry_continue_reverse(child, &object->tree, head)
+		nvkm_object_fini(child, false);
+fail:
+	nvif_error(object, "init failed with %d\n", ret);
+	if (object->func->fini)
+		object->func->fini(object, false);
+	return ret;
+}
+
+void *
+nvkm_object_dtor(struct nvkm_object *object)
+{
+	struct nvkm_object *child, *ctemp;
+	void *data = object;
+	s64 time;
+
+	nvif_debug(object, "destroy children...\n");
+	time = ktime_to_us(ktime_get());
+	list_for_each_entry_safe(child, ctemp, &object->tree, head) {
+		nvkm_object_del(&child);
+	}
+
+	nvif_debug(object, "destroy running...\n");
+	if (object->func->dtor)
+		data = object->func->dtor(object);
+	nvkm_engine_unref(&object->engine);
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "destroy completed in %lldus...\n", time);
+	return data;
+}
+
+void
+nvkm_object_del(struct nvkm_object **pobject)
+{
+	struct nvkm_object *object = *pobject;
+	if (object && !WARN_ON(!object->func)) {
+		*pobject = nvkm_object_dtor(object);
+		nvkm_client_remove(object->client, object);
+		list_del(&object->head);
+		kfree(*pobject);
+		*pobject = NULL;
+	}
+}
+
+void
+nvkm_object_ctor(const struct nvkm_object_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_object *object)
+{
+	object->func = func;
+	object->client = oclass->client;
+	object->engine = nvkm_engine_ref(oclass->engine);
+	object->oclass = oclass->base.oclass;
+	object->handle = oclass->handle;
+	INIT_LIST_HEAD(&object->head);
+	INIT_LIST_HEAD(&object->tree);
+	RB_CLEAR_NODE(&object->node);
+	WARN_ON(oclass->engine && !object->engine);
+}
+
+int
+nvkm_object_new_(const struct nvkm_object_func *func,
+		 const struct nvkm_oclass *oclass, void *data, u32 size,
+		 struct nvkm_object **pobject)
+{
+	if (size == 0) {
+		if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL)))
+			return -ENOMEM;
+		nvkm_object_ctor(func, oclass, *pobject);
+		return 0;
+	}
+	return -ENOSYS;
+}
+
+static const struct nvkm_object_func
+nvkm_object_func = {
 };
 
 int
-nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		struct nvkm_object **pobject)
 {
-	struct nvkm_ofuncs *ofuncs = oclass->ofuncs;
-	struct nvkm_object *object = NULL;
-	int ret;
-
-	ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
-	*pobject = object;
-	if (ret < 0) {
-		if (ret != -ENODEV) {
-			nv_error(parent, "failed to create 0x%08x, %d\n",
-				 oclass->handle, ret);
-		}
-
-		if (object) {
-			ofuncs->dtor(object);
-			*pobject = NULL;
-		}
-
-		return ret;
-	}
-
-	if (ret == 0) {
-		nv_trace(object, "created\n");
-		atomic_set(&object->refcount, 1);
-	}
-
-	return 0;
-}
-
-static void
-nvkm_object_dtor(struct nvkm_object *object)
-{
-	nv_trace(object, "destroying\n");
-	nv_ofuncs(object)->dtor(object);
-}
-
-void
-nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref)
-{
-	if (obj) {
-		atomic_inc(&obj->refcount);
-		nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
-	}
-
-	if (*ref) {
-		int dead = atomic_dec_and_test(&(*ref)->refcount);
-		nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
-		if (dead)
-			nvkm_object_dtor(*ref);
-	}
-
-	*ref = obj;
-}
-
-int
-nvkm_object_inc(struct nvkm_object *object)
-{
-	int ref = atomic_add_return(1, &object->usecount);
-	int ret;
-
-	nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
-	if (ref != 1)
-		return 0;
-
-	nv_trace(object, "initialising...\n");
-	if (object->parent) {
-		ret = nvkm_object_inc(object->parent);
-		if (ret) {
-			nv_error(object, "parent failed, %d\n", ret);
-			goto fail_parent;
-		}
-	}
-
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		ret = nvkm_object_inc(&object->engine->subdev.object);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (ret) {
-			nv_error(object, "engine failed, %d\n", ret);
-			goto fail_engine;
-		}
-	}
-
-	ret = nv_ofuncs(object)->init(object);
-	atomic_set(&object->usecount, 1);
-	if (ret) {
-		nv_error(object, "init failed, %d\n", ret);
-		goto fail_self;
-	}
-
-	nv_trace(object, "initialised\n");
-	return 0;
-
-fail_self:
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		nvkm_object_dec(&object->engine->subdev.object, false);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-	}
-fail_engine:
-	if (object->parent)
-		 nvkm_object_dec(object->parent, false);
-fail_parent:
-	atomic_dec(&object->usecount);
-	return ret;
-}
-
-static int
-nvkm_object_decf(struct nvkm_object *object)
-{
-	int ret;
-
-	nv_trace(object, "stopping...\n");
-
-	ret = nv_ofuncs(object)->fini(object, false);
-	atomic_set(&object->usecount, 0);
-	if (ret)
-		nv_warn(object, "failed fini, %d\n", ret);
-
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		nvkm_object_dec(&object->engine->subdev.object, false);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-	}
-
-	if (object->parent)
-		nvkm_object_dec(object->parent, false);
-
-	nv_trace(object, "stopped\n");
-	return 0;
-}
-
-static int
-nvkm_object_decs(struct nvkm_object *object)
-{
-	int ret, rret;
-
-	nv_trace(object, "suspending...\n");
-
-	ret = nv_ofuncs(object)->fini(object, true);
-	atomic_set(&object->usecount, 0);
-	if (ret) {
-		nv_error(object, "failed suspend, %d\n", ret);
-		return ret;
-	}
-
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		ret = nvkm_object_dec(&object->engine->subdev.object, true);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (ret) {
-			nv_warn(object, "engine failed suspend, %d\n", ret);
-			goto fail_engine;
-		}
-	}
-
-	if (object->parent) {
-		ret = nvkm_object_dec(object->parent, true);
-		if (ret) {
-			nv_warn(object, "parent failed suspend, %d\n", ret);
-			goto fail_parent;
-		}
-	}
-
-	nv_trace(object, "suspended\n");
-	return 0;
-
-fail_parent:
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		rret = nvkm_object_inc(&object->engine->subdev.object);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (rret)
-			nv_fatal(object, "engine failed to reinit, %d\n", rret);
-	}
-
-fail_engine:
-	rret = nv_ofuncs(object)->init(object);
-	if (rret)
-		nv_fatal(object, "failed to reinit, %d\n", rret);
-
-	return ret;
-}
-
-int
-nvkm_object_dec(struct nvkm_object *object, bool suspend)
-{
-	int ref = atomic_add_return(-1, &object->usecount);
-	int ret;
-
-	nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
-
-	if (ref == 0) {
-		if (suspend)
-			ret = nvkm_object_decs(object);
-		else
-			ret = nvkm_object_decf(object);
-
-		if (ret) {
-			atomic_inc(&object->usecount);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-void
-nvkm_object_debug(void)
-{
-#ifdef NVKM_OBJECT_MAGIC
-	struct nvkm_object *object;
-	if (!list_empty(&_objlist)) {
-		nv_fatal(NULL, "*******************************************\n");
-		nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
-		nv_fatal(NULL, "*******************************************\n");
-		list_for_each_entry(object, &_objlist, list) {
-			nv_fatal(object, "%p/%p/%d/%d\n",
-				 object->parent, object->engine,
-				 atomic_read(&object->refcount),
-				 atomic_read(&object->usecount));
-		}
-	}
-#endif
+	const struct nvkm_object_func *func =
+		oclass->base.func ? oclass->base.func : &nvkm_object_func;
+	return nvkm_object_new_(func, oclass, data, size, pobject);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
new file mode 100644
index 0000000..e31a047
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/oproxy.h>
+
+static int
+nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size);
+}
+
+static int
+nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
+		 struct nvkm_event **pevent)
+{
+	return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent);
+}
+
+static int
+nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+}
+
+static int
+nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_object_bind(nvkm_oproxy(object)->object,
+				parent, align, pgpuobj);
+}
+
+static int
+nvkm_oproxy_sclass(struct nvkm_object *object, int index,
+		   struct nvkm_oclass *oclass)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	oclass->parent = oproxy->object;
+	if (!oproxy->object->func->sclass)
+		return -ENODEV;
+	return oproxy->object->func->sclass(oproxy->object, index, oclass);
+}
+
+static int
+nvkm_oproxy_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	int ret;
+
+	if (oproxy->func->fini[0]) {
+		ret = oproxy->func->fini[0](oproxy, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	if (oproxy->object->func->fini) {
+		ret = oproxy->object->func->fini(oproxy->object, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	if (oproxy->func->fini[1]) {
+		ret = oproxy->func->fini[1](oproxy, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+nvkm_oproxy_init(struct nvkm_object *object)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	int ret;
+
+	if (oproxy->func->init[0]) {
+		ret = oproxy->func->init[0](oproxy);
+		if (ret)
+			return ret;
+	}
+
+	if (oproxy->object->func->init) {
+		ret = oproxy->object->func->init(oproxy->object);
+		if (ret)
+			return ret;
+	}
+
+	if (oproxy->func->init[1]) {
+		ret = oproxy->func->init[1](oproxy);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void *
+nvkm_oproxy_dtor(struct nvkm_object *object)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	if (oproxy->func->dtor[0])
+		oproxy->func->dtor[0](oproxy);
+	nvkm_object_del(&oproxy->object);
+	if (oproxy->func->dtor[1])
+		oproxy->func->dtor[1](oproxy);
+	return oproxy;
+}
+
+static const struct nvkm_object_func
+nvkm_oproxy_func = {
+	.dtor = nvkm_oproxy_dtor,
+	.init = nvkm_oproxy_init,
+	.fini = nvkm_oproxy_fini,
+	.mthd = nvkm_oproxy_mthd,
+	.ntfy = nvkm_oproxy_ntfy,
+	.map = nvkm_oproxy_map,
+	.rd08 = nvkm_oproxy_rd08,
+	.rd16 = nvkm_oproxy_rd16,
+	.rd32 = nvkm_oproxy_rd32,
+	.wr08 = nvkm_oproxy_wr08,
+	.wr16 = nvkm_oproxy_wr16,
+	.wr32 = nvkm_oproxy_wr32,
+	.bind = nvkm_oproxy_bind,
+	.sclass = nvkm_oproxy_sclass,
+};
+
+void
+nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy)
+{
+	nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base);
+	oproxy->func = func;
+}
+
+int
+nvkm_oproxy_new_(const struct nvkm_oproxy_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy)
+{
+	if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(func, oclass, *poproxy);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c
index 19d153f..3e62cf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/option.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c
@@ -73,6 +73,24 @@
 	return value;
 }
 
+long
+nvkm_longopt(const char *optstr, const char *opt, long value)
+{
+	long result = value;
+	int arglen;
+	char *s;
+
+	optstr = nvkm_stropt(optstr, opt, &arglen);
+	if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) {
+		int ret = kstrtol(s, 0, &value);
+		if (ret == 0)
+			result = value;
+		kfree(s);
+	}
+
+	return result;
+}
+
 int
 nvkm_dbgopt(const char *optstr, const char *sub)
 {
@@ -95,7 +113,7 @@
 				else if (!strncasecmpz(optstr, "warn", len))
 					level = NV_DBG_WARN;
 				else if (!strncasecmpz(optstr, "info", len))
-					level = NV_DBG_INFO_NORMAL;
+					level = NV_DBG_INFO;
 				else if (!strncasecmpz(optstr, "debug", len))
 					level = NV_DBG_DEBUG;
 				else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/parent.c b/drivers/gpu/drm/nouveau/nvkm/core/parent.c
deleted file mode 100644
index dd56cd1..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/parent.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/parent.h>
-#include <core/client.h>
-#include <core/engine.h>
-
-int
-nvkm_parent_sclass(struct nvkm_object *parent, u16 handle,
-		   struct nvkm_object **pengine,
-		   struct nvkm_oclass **poclass)
-{
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-	u64 mask;
-
-	sclass = nv_parent(parent)->sclass;
-	while (sclass) {
-		if ((sclass->oclass->handle & 0xffff) == handle) {
-			*pengine = &parent->engine->subdev.object;
-			*poclass = sclass->oclass;
-			return 0;
-		}
-
-		sclass = sclass->sclass;
-	}
-
-	mask = nv_parent(parent)->engine;
-	while (mask) {
-		int i = __ffs64(mask);
-
-		if (nv_iclass(parent, NV_CLIENT_CLASS))
-			engine = nv_engine(nv_client(parent)->device);
-		else
-			engine = nvkm_engine(parent, i);
-
-		if (engine) {
-			oclass = engine->sclass;
-			while (oclass->ofuncs) {
-				if ((oclass->handle & 0xffff) == handle) {
-					*pengine = nv_object(engine);
-					*poclass = oclass;
-					return 0;
-				}
-				oclass++;
-			}
-		}
-
-		mask &= ~(1ULL << i);
-	}
-
-	return -EINVAL;
-}
-
-int
-nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size)
-{
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-	int nr = -1, i;
-	u64 mask;
-
-	sclass = nv_parent(parent)->sclass;
-	while (sclass) {
-		if (++nr < size)
-			lclass[nr] = sclass->oclass->handle & 0xffff;
-		sclass = sclass->sclass;
-	}
-
-	mask = nv_parent(parent)->engine;
-	while (i = __ffs64(mask), mask) {
-		engine = nvkm_engine(parent, i);
-		if (engine && (oclass = engine->sclass)) {
-			while (oclass->ofuncs) {
-				if (++nr < size)
-					lclass[nr] = oclass->handle & 0xffff;
-				oclass++;
-			}
-		}
-
-		mask &= ~(1ULL << i);
-	}
-
-	return nr + 1;
-}
-
-int
-nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_oclass *sclass, u64 engcls,
-		    int size, void **pobject)
-{
-	struct nvkm_parent *object;
-	struct nvkm_sclass *nclass;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_PARENT_CLASS, size, pobject);
-	object = *pobject;
-	if (ret)
-		return ret;
-
-	while (sclass && sclass->ofuncs) {
-		nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
-		if (!nclass)
-			return -ENOMEM;
-
-		nclass->sclass = object->sclass;
-		object->sclass = nclass;
-		nclass->engine = engine ? nv_engine(engine) : NULL;
-		nclass->oclass = sclass;
-		sclass++;
-	}
-
-	object->engine = engcls;
-	return 0;
-}
-
-void
-nvkm_parent_destroy(struct nvkm_parent *parent)
-{
-	struct nvkm_sclass *sclass;
-
-	while ((sclass = parent->sclass)) {
-		parent->sclass = sclass->sclass;
-		kfree(sclass);
-	}
-
-	nvkm_object_destroy(&parent->object);
-}
-
-
-void
-_nvkm_parent_dtor(struct nvkm_object *object)
-{
-	nvkm_parent_destroy(nv_parent(object));
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/printk.c b/drivers/gpu/drm/nouveau/nvkm/core/printk.c
deleted file mode 100644
index 4a220eb..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/printk.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/printk.h>
-#include <core/client.h>
-#include <core/device.h>
-
-int nv_info_debug_level = NV_DBG_INFO_NORMAL;
-
-void
-nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...)
-{
-	static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
-	const char *pfx;
-	char mfmt[256];
-	va_list args;
-
-	switch (level) {
-	case NV_DBG_FATAL:
-		pfx = KERN_CRIT;
-		break;
-	case NV_DBG_ERROR:
-		pfx = KERN_ERR;
-		break;
-	case NV_DBG_WARN:
-		pfx = KERN_WARNING;
-		break;
-	case NV_DBG_INFO_NORMAL:
-		pfx = KERN_INFO;
-		break;
-	case NV_DBG_DEBUG:
-	case NV_DBG_PARANOIA:
-	case NV_DBG_TRACE:
-	case NV_DBG_SPAM:
-	default:
-		pfx = KERN_DEBUG;
-		break;
-	}
-
-	if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
-		struct nvkm_object *device;
-		struct nvkm_object *subdev;
-		char obuf[64], *ofmt = "";
-
-		if (object->engine == NULL) {
-			subdev = object;
-			while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS))
-				subdev = subdev->parent;
-		} else {
-			subdev = &object->engine->subdev.object;
-		}
-
-		device = subdev;
-		if (device->parent)
-			device = device->parent;
-
-		if (object != subdev) {
-			snprintf(obuf, sizeof(obuf), "[0x%08x]",
-				 nv_hclass(object));
-			ofmt = obuf;
-		}
-
-		if (level > nv_subdev(subdev)->debug)
-			return;
-
-		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
-			 name[level], nv_subdev(subdev)->name,
-			 nv_device(device)->name, ofmt, fmt);
-	} else
-	if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
-		if (level > nv_client(object)->debug)
-			return;
-
-		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
-			 name[level], nv_client(object)->name, fmt);
-	} else {
-		snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
-	}
-
-	va_start(args, fmt);
-	vprintk(mfmt, args);
-	va_end(args);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index ebd4d15..3216e15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -22,8 +22,6 @@
 #include <core/ramht.h>
 #include <core/engine.h>
 
-#include <subdev/bar.h>
-
 static u32
 nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
 {
@@ -35,72 +33,130 @@
 	}
 
 	hash ^= chid << (ramht->bits - 4);
-	hash  = hash << 3;
 	return hash;
 }
 
-int
-nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context)
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle)
 {
-	struct nvkm_bar *bar = nvkm_bar(ramht);
 	u32 co, ho;
 
 	co = ho = nvkm_ramht_hash(ramht, chid, handle);
 	do {
-		if (!nv_ro32(ramht, co + 4)) {
-			nv_wo32(ramht, co + 0, handle);
-			nv_wo32(ramht, co + 4, context);
-			if (bar)
-				bar->flush(bar);
-			return co;
+		if (ramht->data[co].chid == chid) {
+			if (ramht->data[co].handle == handle)
+				return ramht->data[co].inst;
 		}
 
-		co += 8;
-		if (co >= nv_gpuobj(ramht)->size)
+		if (++co >= ramht->size)
 			co = 0;
 	} while (co != ho);
 
-	return -ENOMEM;
+	return NULL;
+}
+
+static int
+nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object,
+		  int chid, int addr, u32 handle, u32 context)
+{
+	struct nvkm_ramht_data *data = &ramht->data[co];
+	u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */
+	int ret;
+
+	nvkm_gpuobj_del(&data->inst);
+	data->chid = chid;
+	data->handle = handle;
+
+	if (object) {
+		ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst);
+		if (ret) {
+			if (ret != -ENODEV) {
+				data->chid = -1;
+				return ret;
+			}
+			data->inst = NULL;
+		}
+
+		if (data->inst) {
+			if (ramht->device->card_type >= NV_50)
+				inst = data->inst->node->offset;
+			else
+				inst = data->inst->addr;
+		}
+
+		if (addr < 0) context |= inst << -addr;
+		else          context |= inst >>  addr;
+	}
+
+	nvkm_kmap(ramht->gpuobj);
+	nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle);
+	nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context);
+	nvkm_done(ramht->gpuobj);
+	return co + 1;
 }
 
 void
 nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
 {
-	struct nvkm_bar *bar = nvkm_bar(ramht);
-	nv_wo32(ramht, cookie + 0, 0x00000000);
-	nv_wo32(ramht, cookie + 4, 0x00000000);
-	if (bar)
-		bar->flush(bar);
+	if (--cookie >= 0)
+		nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0);
 }
 
-static struct nvkm_oclass
-nvkm_ramht_oclass = {
-	.handle = 0x0000abcd,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = NULL,
-		.dtor = _nvkm_gpuobj_dtor,
-		.init = _nvkm_gpuobj_init,
-		.fini = _nvkm_gpuobj_fini,
-		.rd32 = _nvkm_gpuobj_rd32,
-		.wr32 = _nvkm_gpuobj_wr32,
-	},
-};
+int
+nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object,
+		  int chid, int addr, u32 handle, u32 context)
+{
+	u32 co, ho;
+
+	if (nvkm_ramht_search(ramht, chid, handle))
+		return -EEXIST;
+
+	co = ho = nvkm_ramht_hash(ramht, chid, handle);
+	do {
+		if (ramht->data[co].chid < 0) {
+			return nvkm_ramht_update(ramht, co, object, chid,
+						 addr, handle, context);
+		}
+
+		if (++co >= ramht->size)
+			co = 0;
+	} while (co != ho);
+
+	return -ENOSPC;
+}
+
+void
+nvkm_ramht_del(struct nvkm_ramht **pramht)
+{
+	struct nvkm_ramht *ramht = *pramht;
+	if (ramht) {
+		nvkm_gpuobj_del(&ramht->gpuobj);
+		kfree(*pramht);
+		*pramht = NULL;
+	}
+}
 
 int
-nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
-	       u32 size, u32 align, struct nvkm_ramht **pramht)
+nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
+	       struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht)
 {
 	struct nvkm_ramht *ramht;
-	int ret;
+	int ret, i;
 
-	ret = nvkm_gpuobj_create(parent, parent->engine ?
-				 &parent->engine->subdev.object : parent, /* <nv50 ramht */
-				 &nvkm_ramht_oclass, 0, pargpu, size,
-				 align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
-	*pramht = ramht;
+	if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+					sizeof(*ramht->data), GFP_KERNEL)))
+		return -ENOMEM;
+
+	ramht->device = device;
+	ramht->parent = parent;
+	ramht->size = size >> 3;
+	ramht->bits = order_base_2(ramht->size);
+	for (i = 0; i < ramht->size; i++)
+		ramht->data[i].chid = -1;
+
+	ret = nvkm_gpuobj_new(ramht->device, size, align, true,
+			      ramht->parent, &ramht->gpuobj);
 	if (ret)
-		return ret;
-
-	ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
-	return 0;
+		nvkm_ramht_del(pramht);
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c5fb3a79..7de9847 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -25,96 +25,178 @@
 #include <core/device.h>
 #include <core/option.h>
 
-struct nvkm_subdev *
-nvkm_subdev(void *obj, int idx)
-{
-	struct nvkm_object *object = nv_object(obj);
-	while (object && !nv_iclass(object, NV_SUBDEV_CLASS))
-		object = object->parent;
-	if (object == NULL || nv_subidx(nv_subdev(object)) != idx)
-		object = nv_device(obj)->subdev[idx];
-	return object ? nv_subdev(object) : NULL;
-}
+static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
+
+const char *
+nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+	[NVKM_SUBDEV_BAR    ] = "bar",
+	[NVKM_SUBDEV_VBIOS  ] = "bios",
+	[NVKM_SUBDEV_BUS    ] = "bus",
+	[NVKM_SUBDEV_CLK    ] = "clk",
+	[NVKM_SUBDEV_DEVINIT] = "devinit",
+	[NVKM_SUBDEV_FB     ] = "fb",
+	[NVKM_SUBDEV_FUSE   ] = "fuse",
+	[NVKM_SUBDEV_GPIO   ] = "gpio",
+	[NVKM_SUBDEV_I2C    ] = "i2c",
+	[NVKM_SUBDEV_IBUS   ] = "priv",
+	[NVKM_SUBDEV_INSTMEM] = "imem",
+	[NVKM_SUBDEV_LTC    ] = "ltc",
+	[NVKM_SUBDEV_MC     ] = "mc",
+	[NVKM_SUBDEV_MMU    ] = "mmu",
+	[NVKM_SUBDEV_MXM    ] = "mxm",
+	[NVKM_SUBDEV_PCI    ] = "pci",
+	[NVKM_SUBDEV_PMU    ] = "pmu",
+	[NVKM_SUBDEV_THERM  ] = "therm",
+	[NVKM_SUBDEV_TIMER  ] = "tmr",
+	[NVKM_SUBDEV_VOLT   ] = "volt",
+	[NVKM_ENGINE_BSP    ] = "bsp",
+	[NVKM_ENGINE_CE0    ] = "ce0",
+	[NVKM_ENGINE_CE1    ] = "ce1",
+	[NVKM_ENGINE_CE2    ] = "ce2",
+	[NVKM_ENGINE_CIPHER ] = "cipher",
+	[NVKM_ENGINE_DISP   ] = "disp",
+	[NVKM_ENGINE_DMAOBJ ] = "dma",
+	[NVKM_ENGINE_FIFO   ] = "fifo",
+	[NVKM_ENGINE_GR     ] = "gr",
+	[NVKM_ENGINE_IFB    ] = "ifb",
+	[NVKM_ENGINE_ME     ] = "me",
+	[NVKM_ENGINE_MPEG   ] = "mpeg",
+	[NVKM_ENGINE_MSENC  ] = "msenc",
+	[NVKM_ENGINE_MSPDEC ] = "mspdec",
+	[NVKM_ENGINE_MSPPP  ] = "msppp",
+	[NVKM_ENGINE_MSVLD  ] = "msvld",
+	[NVKM_ENGINE_PM     ] = "pm",
+	[NVKM_ENGINE_SEC    ] = "sec",
+	[NVKM_ENGINE_SW     ] = "sw",
+	[NVKM_ENGINE_VIC    ] = "vic",
+	[NVKM_ENGINE_VP     ] = "vp",
+};
 
 void
-nvkm_subdev_reset(struct nvkm_object *subdev)
+nvkm_subdev_intr(struct nvkm_subdev *subdev)
 {
-	nv_trace(subdev, "resetting...\n");
-	nv_ofuncs(subdev)->fini(subdev, false);
-	nv_debug(subdev, "reset\n");
-}
-
-int
-nvkm_subdev_init(struct nvkm_subdev *subdev)
-{
-	int ret = nvkm_object_init(&subdev->object);
-	if (ret)
-		return ret;
-
-	nvkm_subdev_reset(&subdev->object);
-	return 0;
-}
-
-int
-_nvkm_subdev_init(struct nvkm_object *object)
-{
-	return nvkm_subdev_init(nv_subdev(object));
+	if (subdev->func->intr)
+		subdev->func->intr(subdev);
 }
 
 int
 nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	if (subdev->unit) {
-		nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
-		nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+	struct nvkm_device *device = subdev->device;
+	const char *action = suspend ? "suspend" : "fini";
+	u32 pmc_enable = subdev->pmc_enable;
+	s64 time;
+
+	nvkm_trace(subdev, "%s running...\n", action);
+	time = ktime_to_us(ktime_get());
+
+	if (subdev->func->fini) {
+		int ret = subdev->func->fini(subdev, suspend);
+		if (ret) {
+			nvkm_error(subdev, "%s failed, %d\n", action, ret);
+			if (suspend)
+				return ret;
+		}
 	}
 
-	return nvkm_object_fini(&subdev->object, suspend);
+	if (pmc_enable) {
+		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+		nvkm_rd32(device, 0x000200);
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
+	return 0;
 }
 
 int
-_nvkm_subdev_fini(struct nvkm_object *object, bool suspend)
+nvkm_subdev_preinit(struct nvkm_subdev *subdev)
 {
-	return nvkm_subdev_fini(nv_subdev(object), suspend);
-}
+	s64 time;
 
-void
-nvkm_subdev_destroy(struct nvkm_subdev *subdev)
-{
-	int subidx = nv_hclass(subdev) & 0xff;
-	nv_device(subdev)->subdev[subidx] = NULL;
-	nvkm_object_destroy(&subdev->object);
-}
+	nvkm_trace(subdev, "preinit running...\n");
+	time = ktime_to_us(ktime_get());
 
-void
-_nvkm_subdev_dtor(struct nvkm_object *object)
-{
-	nvkm_subdev_destroy(nv_subdev(object));
+	if (subdev->func->preinit) {
+		int ret = subdev->func->preinit(subdev);
+		if (ret) {
+			nvkm_error(subdev, "preinit failed, %d\n", ret);
+			return ret;
+		}
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "preinit completed in %lldus\n", time);
+	return 0;
 }
 
 int
-nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    const char *subname, const char *sysname,
-		    int size, void **pobject)
+nvkm_subdev_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_subdev *subdev;
+	s64 time;
 	int ret;
 
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_SUBDEV_CLASS, size, pobject);
-	subdev = *pobject;
-	if (ret)
-		return ret;
+	nvkm_trace(subdev, "init running...\n");
+	time = ktime_to_us(ktime_get());
 
-	__mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
-	subdev->name = subname;
+	if (subdev->func->oneinit && !subdev->oneinit) {
+		s64 time;
+		nvkm_trace(subdev, "one-time init running...\n");
+		time = ktime_to_us(ktime_get());
+		ret = subdev->func->oneinit(subdev);
+		if (ret) {
+			nvkm_error(subdev, "one-time init failed, %d\n", ret);
+			return ret;
+		}
 
-	if (parent) {
-		struct nvkm_device *device = nv_device(parent);
-		subdev->debug = nvkm_dbgopt(device->dbgopt, subname);
-		subdev->mmio  = nv_subdev(device)->mmio;
+		subdev->oneinit = true;
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
 	}
 
+	if (subdev->func->init) {
+		ret = subdev->func->init(subdev);
+		if (ret) {
+			nvkm_error(subdev, "init failed, %d\n", ret);
+			return ret;
+		}
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "init completed in %lldus\n", time);
 	return 0;
 }
+
+void
+nvkm_subdev_del(struct nvkm_subdev **psubdev)
+{
+	struct nvkm_subdev *subdev = *psubdev;
+	s64 time;
+
+	if (subdev && !WARN_ON(!subdev->func)) {
+		nvkm_trace(subdev, "destroy running...\n");
+		time = ktime_to_us(ktime_get());
+		if (subdev->func->dtor)
+			*psubdev = subdev->func->dtor(subdev);
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "destroy completed in %lldus\n", time);
+		kfree(*psubdev);
+		*psubdev = NULL;
+	}
+}
+
+void
+nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 struct nvkm_subdev *subdev)
+{
+	const char *name = nvkm_subdev_name[index];
+	subdev->func = func;
+	subdev->device = device;
+	subdev->index = index;
+	subdev->pmc_enable = pmc_enable;
+
+	__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
+	subdev->debug = nvkm_dbgopt(device->dbgopt, name);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 6bd3d75..36f7247 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -6,7 +6,7 @@
 include $(src)/nvkm/engine/cipher/Kbuild
 include $(src)/nvkm/engine/device/Kbuild
 include $(src)/nvkm/engine/disp/Kbuild
-include $(src)/nvkm/engine/dmaobj/Kbuild
+include $(src)/nvkm/engine/dma/Kbuild
 include $(src)/nvkm/engine/fifo/Kbuild
 include $(src)/nvkm/engine/gr/Kbuild
 include $(src)/nvkm/engine/mpeg/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index a0b1fd8..3ef0107 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -22,72 +22,23 @@
  * Authors: Ben Skeggs, Ilia Mirkin
  */
 #include <engine/bsp.h>
-#include <engine/xtensa.h>
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-/*******************************************************************************
- * BSP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_sclass[] = {
-	{ 0x74b0, &nvkm_object_ofuncs },
-	{},
+static const struct nvkm_xtensa_func
+g84_bsp = {
+	.pmc_enable = 0x04008000,
+	.fifo_val = 0x1111,
+	.unkd28 = 0x90044,
+	.sclass = {
+		{ -1, -1, NV74_BSP },
+		{}
+	}
 };
 
-/*******************************************************************************
- * BSP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_cclass = {
-	.handle = NV_ENGCTX(BSP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_xtensa_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
-};
-
-/*******************************************************************************
- * BSP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_bsp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
-	struct nvkm_xtensa *priv;
-	int ret;
-
-	ret = nvkm_xtensa_create(parent, engine, oclass, 0x103000, true,
-				 "PBSP", "bsp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x04008000;
-	nv_engine(priv)->cclass = &g84_bsp_cclass;
-	nv_engine(priv)->sclass = g84_bsp_sclass;
-	priv->fifo_val = 0x1111;
-	priv->unkd28 = 0x90044;
-	return 0;
+	return nvkm_xtensa_new_(&g84_bsp, device, index,
+				true, 0x103000, pengine);
 }
-
-struct nvkm_oclass
-g84_bsp_oclass = {
-	.handle = NV_ENGINE(BSP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_bsp_ctor,
-		.dtor = _nvkm_xtensa_dtor,
-		.init = _nvkm_xtensa_init,
-		.fini = _nvkm_xtensa_fini,
-		.rd32 = _nvkm_xtensa_rd32,
-		.wr32 = _nvkm_xtensa_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
index a558dfa..6226bcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
@@ -24,9 +24,9 @@
  */
 
 #ifdef GT215
-.section #gt215_pce_data
+.section #gt215_ce_data
 #else
-.section #gf100_pce_data
+.section #gf100_ce_data
 #endif
 
 ctx_object:                   .b32 0
@@ -128,9 +128,9 @@
 .b16 0x800 0
 
 #ifdef GT215
-.section #gt215_pce_code
+.section #gt215_ce_code
 #else
-.section #gf100_pce_code
+.section #gf100_ce_code
 #endif
 
 main:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index d9af6e4..05bb656 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pce_data[] = {
+uint32_t gf100_ce_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@
 	0x00000800,
 };
 
-uint32_t gf100_pce_code[] = {
+uint32_t gf100_ce_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index f42c0d0d..972281d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pce_data[] = {
+uint32_t gt215_ce_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@
 	0x00000800,
 };
 
-uint32_t gt215_pce_code[] = {
+uint32_t gt215_ce_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index 2d2e549..92a9f35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -21,146 +21,60 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
-#include <engine/falcon.h>
+#include "priv.h"
 #include "fuc/gf100.fuc3.h"
 
-struct gf100_ce_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_ce0_sclass[] = {
-	{ 0x90b5, &nvkm_object_ofuncs },
-	{},
-};
-
-static struct nvkm_oclass
-gf100_ce1_sclass[] = {
-	{ 0x90b8, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gf100_ce_context_ofuncs = {
-	.ctor = _nvkm_falcon_context_ctor,
-	.dtor = _nvkm_falcon_context_dtor,
-	.init = _nvkm_falcon_context_init,
-	.fini = _nvkm_falcon_context_fini,
-	.rd32 = _nvkm_falcon_context_rd32,
-	.wr32 = _nvkm_falcon_context_wr32,
-};
-
-static struct nvkm_oclass
-gf100_ce0_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xc0),
-	.ofuncs = &gf100_ce_context_ofuncs,
-};
-
-static struct nvkm_oclass
-gf100_ce1_cclass = {
-	.handle = NV_ENGCTX(CE1, 0xc0),
-	.ofuncs = &gf100_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_ce_init(struct nvkm_object *object)
+static void
+gf100_ce_init(struct nvkm_falcon *ce)
 {
-	struct gf100_ce_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wo32(priv, 0x084, nv_engidx(&priv->base.base) - NVDEV_ENGINE_CE0);
-	return 0;
+	struct nvkm_device *device = ce->engine.subdev.device;
+	const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
+	nvkm_wr32(device, ce->addr + 0x084, index);
 }
 
-static int
-gf100_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gf100_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gf100_ce0_cclass;
-	nv_engine(priv)->sclass = gf100_ce0_sclass;
-	nv_falcon(priv)->code.data = gf100_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
-	nv_falcon(priv)->data.data = gf100_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
-	return 0;
-}
-
-static int
-gf100_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gf100_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x105000, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gf100_ce1_cclass;
-	nv_engine(priv)->sclass = gf100_ce1_sclass;
-	nv_falcon(priv)->code.data = gf100_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
-	nv_falcon(priv)->data.data = gf100_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ce0_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_ce_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_ce0 = {
+	.code.data = gf100_ce_code,
+	.code.size = sizeof(gf100_ce_code),
+	.data.data = gf100_ce_data,
+	.data.size = sizeof(gf100_ce_data),
+	.pmc_enable = 0x00000040,
+	.init = gf100_ce_init,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, FERMI_DMA },
+		{}
+	}
 };
 
-struct nvkm_oclass
-gf100_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ce1_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_ce_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_ce1 = {
+	.code.data = gf100_ce_code,
+	.code.size = sizeof(gf100_ce_code),
+	.data.data = gf100_ce_data,
+	.data.size = sizeof(gf100_ce_data),
+	.pmc_enable = 0x00000080,
+	.init = gf100_ce_init,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, FERMI_DECOMPRESS },
+		{}
+	}
 };
+
+int
+gf100_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_falcon_new_(&gf100_ce0, device, index, true,
+					0x104000, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_falcon_new_(&gf100_ce1, device, index, true,
+					0x105000, pengine);
+	}
+	return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index a998932..c541a1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -21,153 +21,47 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
+#include "priv.h"
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-struct gk104_ce_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_ce_sclass[] = {
-	{ 0xa0b5, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gk104_ce_context_ofuncs = {
-	.ctor = _nvkm_engctx_ctor,
-	.dtor = _nvkm_engctx_dtor,
-	.init = _nvkm_engctx_init,
-	.fini = _nvkm_engctx_fini,
-	.rd32 = _nvkm_engctx_rd32,
-	.wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gk104_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xc0),
-	.ofuncs = &gk104_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gk104_ce_intr(struct nvkm_subdev *subdev)
+void
+gk104_ce_intr(struct nvkm_engine *ce)
 {
-	const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
-	struct gk104_ce_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
+	const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
+	struct nvkm_subdev *subdev = &ce->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x104908 + base);
 	if (stat) {
-		nv_warn(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+		nvkm_warn(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, 0x104908 + base, stat);
 	}
 }
 
-static int
-gk104_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
-
-static int
-gk104_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
-
-static int
-gk104_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE2", "ce2", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00200000;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce0_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
+static const struct nvkm_engine_func
+gk104_ce = {
+	.intr = gk104_ce_intr,
+	.sclass = {
+		{ -1, -1, KEPLER_DMA_COPY_A },
+		{}
+	}
 };
 
-struct nvkm_oclass
-gk104_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce1_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gk104_ce2_oclass = {
-	.handle = NV_ENGINE(CE2, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce2_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
+int
+gk104_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00000040, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00000080, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE2) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00200000, true, pengine);
+	}
+	return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
index 577eb2e..8eaa72a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
@@ -21,153 +21,34 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
+#include "priv.h"
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-struct gm204_ce_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_ce_sclass[] = {
-	{ 0xb0b5, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gm204_ce_context_ofuncs = {
-	.ctor = _nvkm_engctx_ctor,
-	.dtor = _nvkm_engctx_dtor,
-	.init = _nvkm_engctx_init,
-	.fini = _nvkm_engctx_fini,
-	.rd32 = _nvkm_engctx_rd32,
-	.wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gm204_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0x24),
-	.ofuncs = &gm204_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gm204_ce_intr(struct nvkm_subdev *subdev)
-{
-	const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
-	struct gm204_ce_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
-	if (stat) {
-		nv_warn(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+static const struct nvkm_engine_func
+gm204_ce = {
+	.intr = gk104_ce_intr,
+	.sclass = {
+		{ -1, -1, MAXWELL_DMA_COPY_A },
+		{}
 	}
-}
-
-static int
-gm204_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-static int
-gm204_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-static int
-gm204_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE2", "ce2", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00200000;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gm204_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce0_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
 };
 
-struct nvkm_oclass
-gm204_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce1_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gm204_ce2_oclass = {
-	.handle = NV_ENGINE(CE2, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce2_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
+int
+gm204_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00000040, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00000080, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE2) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00200000, true, pengine);
+	}
+	return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index d8bb429..402dcbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -21,50 +21,15 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
-#include <engine/falcon.h>
-#include <engine/fifo.h>
+#include "priv.h"
 #include "fuc/gt215.fuc3.h"
 
 #include <core/client.h>
-#include <core/device.h>
 #include <core/enum.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
 
-struct gt215_ce_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_sclass[] = {
-	{ 0x85b5, &nvkm_object_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-
-	},
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
 static const struct nvkm_enum
 gt215_ce_isr_error_name[] = {
@@ -75,78 +40,45 @@
 };
 
 void
-gt215_ce_intr(struct nvkm_subdev *subdev)
+gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_falcon *falcon = (void *)subdev;
-	struct nvkm_object *engctx;
-	u32 dispatch = nv_ro32(falcon, 0x01c);
-	u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
-	u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
-	u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
-	u32 addr = nv_ro32(falcon, 0x040) >> 16;
+	struct nvkm_subdev *subdev = &ce->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
+	u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
+	u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
 	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_ro32(falcon, 0x044);
-	int chid;
+	u32 data = nvkm_rd32(device, 0x104044 + base);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gt215_ce_isr_error_name, ssta);
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000040) {
-		nv_error(falcon, "DISPATCH_ERROR [");
-		nvkm_enum_print(gt215_ce_isr_error_name, ssta);
-		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
-		       chid, inst << 12, nvkm_client_name(engctx), subc,
-		       mthd, data);
-		nv_wo32(falcon, 0x004, 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		nv_error(falcon, "unhandled intr 0x%08x\n", stat);
-		nv_wo32(falcon, 0x004, stat);
-	}
-
-	nvkm_engctx_put(engctx);
+	nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+			   "subc %d mthd %04x data %08x\n", ssta,
+		   en ? en->name : "", chan ? chan->chid : -1,
+		   chan ? chan->inst->addr : 0,
+		   chan ? chan->object.client->name : "unknown",
+		   subc, mthd, data);
 }
 
-static int
-gt215_ce_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	bool enable = (nv_device(parent)->chipset != 0xaf);
-	struct gt215_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, enable,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00802000;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gt215_ce_cclass;
-	nv_engine(priv)->sclass = gt215_ce_sclass;
-	nv_falcon(priv)->code.data = gt215_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gt215_pce_code);
-	nv_falcon(priv)->data.data = gt215_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gt215_pce_data);
-	return 0;
-}
-
-struct nvkm_oclass
-gt215_ce_oclass = {
-	.handle = NV_ENGINE(CE0, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_ce_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = _nvkm_falcon_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gt215_ce = {
+	.code.data = gt215_ce_code,
+	.code.size = sizeof(gt215_ce_code),
+	.data.data = gt215_ce_data,
+	.data.size = sizeof(gt215_ce_data),
+	.pmc_enable = 0x00802000,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, GT212_DMA },
+		{}
+	}
 };
+
+int
+gt215_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(&gt215_ce, device, index,
+				(device->chipset != 0xaf), 0x104000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
new file mode 100644
index 0000000..e2fa8b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_CE_PRIV_H__
+#define __NVKM_CE_PRIV_H__
+#include <engine/ce.h>
+
+void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+void gk104_ce_intr(struct nvkm_engine *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 13f3042..bfd0162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -25,76 +25,47 @@
 #include <engine/fifo.h>
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
+#include <core/gpuobj.h>
 
-struct g84_cipher_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
+#include <nvif/class.h>
 
 static int
-g84_cipher_object_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+g84_cipher_oclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		       int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+				  align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-g84_cipher_ofuncs = {
-	.ctor = g84_cipher_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
+static const struct nvkm_object_func
+g84_cipher_oclass_func = {
+	.bind = g84_cipher_oclass_bind,
 };
 
-static struct nvkm_oclass
-g84_cipher_sclass[] = {
-	{ 0x74c1, &g84_cipher_ofuncs },
-	{}
-};
+static int
+g84_cipher_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		       int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+			       align, true, parent, pgpuobj);
 
-/*******************************************************************************
- * PCIPHER context
- ******************************************************************************/
+}
 
-static struct nvkm_oclass
+static const struct nvkm_object_func
 g84_cipher_cclass = {
-	.handle = NV_ENGCTX(CIPHER, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
+	.bind = g84_cipher_cclass_bind,
 };
 
-/*******************************************************************************
- * PCIPHER engine/subdev functions
- ******************************************************************************/
-
 static const struct nvkm_bitfield
 g84_cipher_intr_mask[] = {
 	{ 0x00000001, "INVALID_STATE" },
@@ -106,79 +77,59 @@
 };
 
 static void
-g84_cipher_intr(struct nvkm_subdev *subdev)
+g84_cipher_intr(struct nvkm_engine *cipher)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct g84_cipher_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x102130);
-	u32 mthd = nv_rd32(priv, 0x102190);
-	u32 data = nv_rd32(priv, 0x102194);
-	u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
-	int chid;
+	struct nvkm_subdev *subdev = &cipher->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo *fifo = device->fifo;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, 0x102130);
+	u32 mthd = nvkm_rd32(device, 0x102190);
+	u32 data = nvkm_rd32(device, 0x102194);
+	u32 inst = nvkm_rd32(device, 0x102188) & 0x7fffffff;
+	unsigned long flags;
+	char msg[128];
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
+	chan = nvkm_fifo_chan_inst(fifo, (u64)inst << 12, &flags);
 	if (stat) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(g84_cipher_intr_mask, stat);
-		pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
-		       chid, (u64)inst << 12, nvkm_client_name(engctx),
-		       mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat);
+		nvkm_error(subdev,  "%08x [%s] ch %d [%010llx %s] "
+				    "mthd %04x data %08x\n", stat, msg,
+			   chan ? chan->chid : -1, (u64)inst << 12,
+			   chan ? chan->object.client->name : "unknown",
+			   mthd, data);
 	}
+	nvkm_fifo_chan_put(fifo, flags, &chan);
 
-	nv_wr32(priv, 0x102130, stat);
-	nv_wr32(priv, 0x10200c, 0x10);
-
-	nvkm_engctx_put(engctx);
+	nvkm_wr32(device, 0x102130, stat);
+	nvkm_wr32(device, 0x10200c, 0x10);
 }
 
 static int
-g84_cipher_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+g84_cipher_init(struct nvkm_engine *cipher)
 {
-	struct g84_cipher_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCIPHER", "cipher", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00004000;
-	nv_subdev(priv)->intr = g84_cipher_intr;
-	nv_engine(priv)->cclass = &g84_cipher_cclass;
-	nv_engine(priv)->sclass = g84_cipher_sclass;
+	struct nvkm_device *device = cipher->subdev.device;
+	nvkm_wr32(device, 0x102130, 0xffffffff);
+	nvkm_wr32(device, 0x102140, 0xffffffbf);
+	nvkm_wr32(device, 0x10200c, 0x00000010);
 	return 0;
 }
 
-static int
-g84_cipher_init(struct nvkm_object *object)
-{
-	struct g84_cipher_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_engine_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x102130, 0xffffffff);
-	nv_wr32(priv, 0x102140, 0xffffffbf);
-	nv_wr32(priv, 0x10200c, 0x00000010);
-	return 0;
-}
-
-struct nvkm_oclass
-g84_cipher_oclass = {
-	.handle = NV_ENGINE(CIPHER, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_cipher_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = g84_cipher_init,
-		.fini = _nvkm_engine_fini,
-	},
+static const struct nvkm_engine_func
+g84_cipher = {
+	.init = g84_cipher_init,
+	.intr = g84_cipher_intr,
+	.cclass = &g84_cipher_cclass,
+	.sclass = {
+		{ -1, -1, NV74_CIPHER, &g84_cipher_oclass_func },
+		{}
+	}
 };
+
+int
+g84_cipher_new(struct nvkm_device *device, int index,
+	       struct nvkm_engine **pengine)
+{
+	return nvkm_engine_new_(&g84_cipher, device, index,
+				0x00004000, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
index de1bf09..09032ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
@@ -1,12 +1,6 @@
 nvkm-y += nvkm/engine/device/acpi.o
 nvkm-y += nvkm/engine/device/base.o
 nvkm-y += nvkm/engine/device/ctrl.o
-nvkm-y += nvkm/engine/device/nv04.o
-nvkm-y += nvkm/engine/device/nv10.o
-nvkm-y += nvkm/engine/device/nv20.o
-nvkm-y += nvkm/engine/device/nv30.o
-nvkm-y += nvkm/engine/device/nv40.o
-nvkm-y += nvkm/engine/device/nv50.o
-nvkm-y += nvkm/engine/device/gf100.o
-nvkm-y += nvkm/engine/device/gk104.o
-nvkm-y += nvkm/engine/device/gm100.o
+nvkm-y += nvkm/engine/device/pci.o
+nvkm-y += nvkm/engine/device/tegra.o
+nvkm-y += nvkm/engine/device/user.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
index f42706e..fdca90b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
@@ -40,21 +40,19 @@
 }
 #endif
 
-int
-nvkm_acpi_fini(struct nvkm_device *device, bool suspend)
+void
+nvkm_acpi_fini(struct nvkm_device *device)
 {
 #ifdef CONFIG_ACPI
 	unregister_acpi_notifier(&device->acpi.nb);
 #endif
-	return 0;
 }
 
-int
+void
 nvkm_acpi_init(struct nvkm_device *device)
 {
 #ifdef CONFIG_ACPI
 	device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
 	register_acpi_notifier(&device->acpi.nb);
 #endif
-	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 82dd359..1bbe76e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -3,6 +3,6 @@
 #include <core/os.h>
 struct nvkm_device;
 
-int nvkm_acpi_init(struct nvkm_device *);
-int nvkm_acpi_fini(struct nvkm_device *, bool);
+void nvkm_acpi_init(struct nvkm_device *);
+void nvkm_acpi_fini(struct nvkm_device *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 63d8e52..94a906b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -24,33 +24,33 @@
 #include "priv.h"
 #include "acpi.h"
 
-#include <core/client.h>
-#include <core/option.h>
 #include <core/notify.h>
-#include <core/parent.h>
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
+#include <core/option.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
+#include <subdev/bios.h>
 
 static DEFINE_MUTEX(nv_devices_mutex);
 static LIST_HEAD(nv_devices);
 
-struct nvkm_device *
-nvkm_device_find(u64 name)
+static struct nvkm_device *
+nvkm_device_find_locked(u64 handle)
 {
-	struct nvkm_device *device, *match = NULL;
-	mutex_lock(&nv_devices_mutex);
+	struct nvkm_device *device;
 	list_for_each_entry(device, &nv_devices, head) {
-		if (device->handle == name) {
-			match = device;
-			break;
-		}
+		if (device->handle == handle)
+			return device;
 	}
+	return NULL;
+}
+
+struct nvkm_device *
+nvkm_device_find(u64 handle)
+{
+	struct nvkm_device *device;
+	mutex_lock(&nv_devices_mutex);
+	device = nvkm_device_find_locked(handle);
 	mutex_unlock(&nv_devices_mutex);
-	return match;
+	return device;
 }
 
 int
@@ -67,280 +67,2272 @@
 	return nr;
 }
 
-/******************************************************************************
- * nvkm_devobj (0x0080): class implementation
- *****************************************************************************/
+static const struct nvkm_device_chip
+null_chipset = {
+	.name = "NULL",
+	.bios = nvkm_bios_new,
+};
 
-struct nvkm_devobj {
-	struct nvkm_parent base;
-	struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
+static const struct nvkm_device_chip
+nv4_chipset = {
+	.name = "NV04",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv04_devinit_new,
+	.fb = nv04_fb_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv04_fifo_new,
+	.gr = nv04_gr_new,
+	.sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv5_chipset = {
+	.name = "NV05",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv05_devinit_new,
+	.fb = nv04_fb_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv04_fifo_new,
+	.gr = nv04_gr_new,
+	.sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv10_chipset = {
+	.name = "NV10",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.gr = nv10_gr_new,
+};
+
+static const struct nvkm_device_chip
+nv11_chipset = {
+	.name = "NV11",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv15_chipset = {
+	.name = "NV15",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv17_chipset = {
+	.name = "NV17",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv18_chipset = {
+	.name = "NV18",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1a_chipset = {
+	.name = "nForce",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv1a_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1f_chipset = {
+	.name = "nForce2",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv1a_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv20_chipset = {
+	.name = "NV20",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv20_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv20_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv25_chipset = {
+	.name = "NV25",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv25_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv28_chipset = {
+	.name = "NV28",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv25_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv2a_chipset = {
+	.name = "NV2A",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv2a_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv30_chipset = {
+	.name = "NV30",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv30_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv30_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv31_chipset = {
+	.name = "NV31",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv30_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv30_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv34_chipset = {
+	.name = "NV34",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv34_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv35_chipset = {
+	.name = "NV35",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv35_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv35_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv36_chipset = {
+	.name = "NV36",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv36_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv35_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv40_chipset = {
+	.name = "NV40",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv40_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv40_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv41_chipset = {
+	.name = "NV41",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv42_chipset = {
+	.name = "NV42",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv43_chipset = {
+	.name = "NV43",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv44_chipset = {
+	.name = "NV44",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv44_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv45_chipset = {
+	.name = "NV45",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv40_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv46_chipset = {
+	.name = "G72",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv47_chipset = {
+	.name = "G70",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv47_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv49_chipset = {
+	.name = "G71",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv49_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4a_chipset = {
+	.name = "NV44A",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv44_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4b_chipset = {
+	.name = "G73",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv49_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4c_chipset = {
+	.name = "C61",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4e_chipset = {
+	.name = "C51",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv4e_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv4e_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv50_chipset = {
+	.name = "G80",
+	.bar = nv50_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = nv50_clk_new,
+	.devinit = nv50_devinit_new,
+	.fb = nv50_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = nv50_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv50_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = nv50_fifo_new,
+	.gr = nv50_gr_new,
+	.mpeg = nv50_mpeg_new,
+	.pm = nv50_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv63_chipset = {
+	.name = "C73",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv67_chipset = {
+	.name = "C67",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv68_chipset = {
+	.name = "C68",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv84_chipset = {
+	.name = "G84",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv86_chipset = {
+	.name = "G86",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv92_chipset = {
+	.name = "G92",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv94_chipset = {
+	.name = "G94",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv96_chipset = {
+	.name = "G96",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv98_chipset = {
+	.name = "G98",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva0_chipset = {
+	.name = "GT200",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = gt200_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt200_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = gt200_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nva3_chipset = {
+	.name = "GT215",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mpeg = g84_mpeg_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva5_chipset = {
+	.name = "GT216",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva8_chipset = {
+	.name = "GT218",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaa_chipset = {
+	.name = "MCP77/MCP78",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = mcp77_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = mcp77_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt200_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvac_chipset = {
+	.name = "MCP79/MCP7A",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = mcp77_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = mcp77_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = mcp79_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaf_chipset = {
+	.name = "MCP89",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = mcp89_devinit_new,
+	.fb = mcp89_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = mcp89_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = mcp89_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc0_chipset = {
+	.name = "GF100",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf100_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc1_chipset = {
+	.name = "GF108",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf108_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf108_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc3_chipset = {
+	.name = "GF106",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc4_chipset = {
+	.name = "GF104",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc8_chipset = {
+	.name = "GF110",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf110_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvce_chipset = {
+	.name = "GF114",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvcf_chipset = {
+	.name = "GF116",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd7_chipset = {
+	.name = "GF117",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gf119_gpio_new,
+	.i2c = gf117_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gf119_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf117_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf117_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd9_chipset = {
+	.name = "GF119",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gf119_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf119_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gf119_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf119_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf117_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve4_chipset = {
+	.name = "GK104",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk104_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve6_chipset = {
+	.name = "GK106",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk104_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve7_chipset = {
+	.name = "GK107",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf119_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvea_chipset = {
+	.name = "GK20A",
+	.bar = gk20a_bar_new,
+	.bus = gf100_bus_new,
+	.clk = gk20a_clk_new,
+	.fb = gk20a_fb_new,
+	.fuse = gf100_fuse_new,
+	.ibus = gk20a_ibus_new,
+	.imem = gk20a_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.pmu = gk20a_pmu_new,
+	.timer = gk20a_timer_new,
+	.volt = gk20a_volt_new,
+	.ce[2] = gk104_ce_new,
+	.dma = gf119_dma_new,
+	.fifo = gk20a_fifo_new,
+	.gr = gk20a_gr_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf0_chipset = {
+	.name = "GK110",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk110_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk110_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf1_chipset = {
+	.name = "GK110B",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk110_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk110b_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv106_chipset = {
+	.name = "GK208B",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk208_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gk208_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv108_chipset = {
+	.name = "GK208",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk208_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gk208_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv117_chipset = {
+	.name = "GM107",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gm107_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.therm = gm107_therm_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gm107_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gm107_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv124_chipset = {
+	.name = "GM204",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = gm204_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm204_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gm204_ce_new,
+	.ce[1] = gm204_ce_new,
+	.ce[2] = gm204_ce_new,
+	.disp = gm204_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm204_fifo_new,
+	.gr = gm204_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv126_chipset = {
+	.name = "GM206",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = gm204_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm204_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gm204_ce_new,
+	.ce[1] = gm204_ce_new,
+	.ce[2] = gm204_ce_new,
+	.disp = gm204_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm204_fifo_new,
+	.gr = gm206_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv12b_chipset = {
+	.name = "GM20B",
+	.bar = gk20a_bar_new,
+	.bus = gf100_bus_new,
+	.fb = gk20a_fb_new,
+	.fuse = gm107_fuse_new,
+	.ibus = gk20a_ibus_new,
+	.imem = gk20a_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.timer = gk20a_timer_new,
+	.ce[2] = gm204_ce_new,
+	.dma = gf119_dma_new,
+	.fifo = gm20b_fifo_new,
+	.gr = gm20b_gr_new,
+	.sw = gf100_sw_new,
 };
 
 static int
-nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
+		       struct nvkm_notify *notify)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_fb *pfb = nvkm_fb(device);
-	struct nvkm_instmem *imem = nvkm_instmem(device);
-	union {
-		struct nv_device_info_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "device info size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "device info vers %d\n", args->v0.version);
-	} else
-		return ret;
-
-	switch (device->chipset) {
-	case 0x01a:
-	case 0x01f:
-	case 0x04c:
-	case 0x04e:
-	case 0x063:
-	case 0x067:
-	case 0x068:
-	case 0x0aa:
-	case 0x0ac:
-	case 0x0af:
-		args->v0.platform = NV_DEVICE_INFO_V0_IGP;
-		break;
-	default:
-		if (device->pdev) {
-			if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
-				args->v0.platform = NV_DEVICE_INFO_V0_AGP;
-			else
-			if (pci_is_pcie(device->pdev))
-				args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
-			else
-				args->v0.platform = NV_DEVICE_INFO_V0_PCI;
-		} else {
-			args->v0.platform = NV_DEVICE_INFO_V0_SOC;
-		}
-		break;
-	}
-
-	switch (device->card_type) {
-	case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
-	case NV_10:
-	case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
-	case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
-	case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
-	case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
-	case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
-	case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
-	case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
-	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
-	default:
-		args->v0.family = 0;
-		break;
-	}
-
-	args->v0.chipset  = device->chipset;
-	args->v0.revision = device->chiprev;
-	if (pfb && pfb->ram)
-		args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
-	else
-		args->v0.ram_size = args->v0.ram_user = 0;
-	if (imem && args->v0.ram_size > 0)
-		args->v0.ram_user = args->v0.ram_user - imem->reserved;
-
-	return 0;
-}
-
-static int
-nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	switch (mthd) {
-	case NV_DEVICE_V0_INFO:
-		return nvkm_devobj_info(object, data, size);
-	default:
-		break;
+	if (!WARN_ON(size != 0)) {
+		notify->size  = 0;
+		notify->types = 1;
+		notify->index = 0;
+		return 0;
 	}
 	return -EINVAL;
 }
 
-static u8
-nvkm_devobj_rd08(struct nvkm_object *object, u64 addr)
+static const struct nvkm_event_func
+nvkm_device_event_func = {
+	.ctor = nvkm_device_event_ctor,
+};
+
+struct nvkm_subdev *
+nvkm_device_subdev(struct nvkm_device *device, int index)
 {
-	return nv_rd08(object->engine, addr);
+	struct nvkm_engine *engine;
+
+	if (device->disable_mask & (1ULL << index))
+		return NULL;
+
+	switch (index) {
+#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+	_(BAR    , device->bar    , &device->bar->subdev);
+	_(VBIOS  , device->bios   , &device->bios->subdev);
+	_(BUS    , device->bus    , &device->bus->subdev);
+	_(CLK    , device->clk    , &device->clk->subdev);
+	_(DEVINIT, device->devinit, &device->devinit->subdev);
+	_(FB     , device->fb     , &device->fb->subdev);
+	_(FUSE   , device->fuse   , &device->fuse->subdev);
+	_(GPIO   , device->gpio   , &device->gpio->subdev);
+	_(I2C    , device->i2c    , &device->i2c->subdev);
+	_(IBUS   , device->ibus   ,  device->ibus);
+	_(INSTMEM, device->imem   , &device->imem->subdev);
+	_(LTC    , device->ltc    , &device->ltc->subdev);
+	_(MC     , device->mc     , &device->mc->subdev);
+	_(MMU    , device->mmu    , &device->mmu->subdev);
+	_(MXM    , device->mxm    ,  device->mxm);
+	_(PCI    , device->pci    , &device->pci->subdev);
+	_(PMU    , device->pmu    , &device->pmu->subdev);
+	_(THERM  , device->therm  , &device->therm->subdev);
+	_(TIMER  , device->timer  , &device->timer->subdev);
+	_(VOLT   , device->volt   , &device->volt->subdev);
+#undef _
+	default:
+		engine = nvkm_device_engine(device, index);
+		if (engine)
+			return &engine->subdev;
+		break;
+	}
+	return NULL;
 }
 
-static u16
-nvkm_devobj_rd16(struct nvkm_object *object, u64 addr)
+struct nvkm_engine *
+nvkm_device_engine(struct nvkm_device *device, int index)
 {
-	return nv_rd16(object->engine, addr);
+	if (device->disable_mask & (1ULL << index))
+		return NULL;
+
+	switch (index) {
+#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
+	_(BSP    , device->bsp    ,  device->bsp);
+	_(CE0    , device->ce[0]  ,  device->ce[0]);
+	_(CE1    , device->ce[1]  ,  device->ce[1]);
+	_(CE2    , device->ce[2]  ,  device->ce[2]);
+	_(CIPHER , device->cipher ,  device->cipher);
+	_(DISP   , device->disp   , &device->disp->engine);
+	_(DMAOBJ , device->dma    , &device->dma->engine);
+	_(FIFO   , device->fifo   , &device->fifo->engine);
+	_(GR     , device->gr     , &device->gr->engine);
+	_(IFB    , device->ifb    ,  device->ifb);
+	_(ME     , device->me     ,  device->me);
+	_(MPEG   , device->mpeg   ,  device->mpeg);
+	_(MSENC  , device->msenc  ,  device->msenc);
+	_(MSPDEC , device->mspdec ,  device->mspdec);
+	_(MSPPP  , device->msppp  ,  device->msppp);
+	_(MSVLD  , device->msvld  ,  device->msvld);
+	_(PM     , device->pm     , &device->pm->engine);
+	_(SEC    , device->sec    ,  device->sec);
+	_(SW     , device->sw     , &device->sw->engine);
+	_(VIC    , device->vic    ,  device->vic);
+	_(VP     , device->vp     ,  device->vp);
+#undef _
+	default:
+		WARN_ON(1);
+		break;
+	}
+	return NULL;
 }
 
-static u32
-nvkm_devobj_rd32(struct nvkm_object *object, u64 addr)
+int
+nvkm_device_fini(struct nvkm_device *device, bool suspend)
 {
-	return nv_rd32(object->engine, addr);
-}
+	const char *action = suspend ? "suspend" : "fini";
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
 
-static void
-nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
-	nv_wr08(object->engine, addr, data);
-}
+	nvdev_trace(device, "%s running...\n", action);
+	time = ktime_to_us(ktime_get());
 
-static void
-nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
-	nv_wr16(object->engine, addr, data);
-}
+	nvkm_acpi_fini(device);
 
-static void
-nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	nv_wr32(object->engine, addr, data);
-}
+	for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_fini(subdev, suspend);
+			if (ret && suspend)
+				goto fail;
+		}
+	}
 
-static int
-nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nvkm_device *device = nv_device(object);
-	*addr = nv_device_resource_start(device, 0);
-	*size = nv_device_resource_len(device, 0);
+
+	if (device->func->fini)
+		device->func->fini(device, suspend);
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "%s completed in %lldus...\n", action, time);
 	return 0;
+
+fail:
+	do {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			int rret = nvkm_subdev_init(subdev);
+			if (rret)
+				nvkm_fatal(subdev, "failed restart, %d\n", ret);
+		}
+	} while (++i < NVKM_SUBDEV_NR);
+
+	nvdev_trace(device, "%s failed with %d\n", action, ret);
+	return ret;
 }
 
-static const u64 disable_map[] = {
-	[NVDEV_SUBDEV_VBIOS]	= NV_DEVICE_V0_DISABLE_VBIOS,
-	[NVDEV_SUBDEV_DEVINIT]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_GPIO]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_I2C]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_CLK  ]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MXM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MC]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_BUS]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_TIMER]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_FB]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_LTC]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_IBUS]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_INSTMEM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MMU]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_PMU]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_FUSE]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_PM     ]  = NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_V0_DISABLE_FIFO,
-	[NVDEV_ENGINE_SW]	= NV_DEVICE_V0_DISABLE_FIFO,
-	[NVDEV_ENGINE_GR]	= NV_DEVICE_V0_DISABLE_GR,
-	[NVDEV_ENGINE_MPEG]	= NV_DEVICE_V0_DISABLE_MPEG,
-	[NVDEV_ENGINE_ME]	= NV_DEVICE_V0_DISABLE_ME,
-	[NVDEV_ENGINE_VP]	= NV_DEVICE_V0_DISABLE_VP,
-	[NVDEV_ENGINE_CIPHER]	= NV_DEVICE_V0_DISABLE_CIPHER,
-	[NVDEV_ENGINE_BSP]	= NV_DEVICE_V0_DISABLE_BSP,
-	[NVDEV_ENGINE_MSPPP]	= NV_DEVICE_V0_DISABLE_MSPPP,
-	[NVDEV_ENGINE_CE0]	= NV_DEVICE_V0_DISABLE_CE0,
-	[NVDEV_ENGINE_CE1]	= NV_DEVICE_V0_DISABLE_CE1,
-	[NVDEV_ENGINE_CE2]	= NV_DEVICE_V0_DISABLE_CE2,
-	[NVDEV_ENGINE_VIC]	= NV_DEVICE_V0_DISABLE_VIC,
-	[NVDEV_ENGINE_MSENC]	= NV_DEVICE_V0_DISABLE_MSENC,
-	[NVDEV_ENGINE_DISP]	= NV_DEVICE_V0_DISABLE_DISP,
-	[NVDEV_ENGINE_MSVLD]	= NV_DEVICE_V0_DISABLE_MSVLD,
-	[NVDEV_ENGINE_SEC]	= NV_DEVICE_V0_DISABLE_SEC,
-	[NVDEV_SUBDEV_NR]	= 0,
-};
-
-static void
-nvkm_devobj_dtor(struct nvkm_object *object)
-{
-	struct nvkm_devobj *devobj = (void *)object;
-	int i;
-
-	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
-		nvkm_object_ref(NULL, &devobj->subdev[i]);
-
-	nvkm_parent_destroy(&devobj->base);
-}
-
-static struct nvkm_oclass
-nvkm_devobj_oclass_super = {
-	.handle = NV_DEVICE,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_devobj_dtor,
-		.init = _nvkm_parent_init,
-		.fini = _nvkm_parent_fini,
-		.mthd = nvkm_devobj_mthd,
-		.map  = nvkm_devobj_map,
-		.rd08 = nvkm_devobj_rd08,
-		.rd16 = nvkm_devobj_rd16,
-		.rd32 = nvkm_devobj_rd32,
-		.wr08 = nvkm_devobj_wr08,
-		.wr16 = nvkm_devobj_wr16,
-		.wr32 = nvkm_devobj_wr32,
-	}
-};
-
 static int
-nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+nvkm_device_preinit(struct nvkm_device *device)
 {
-	union {
-		struct nv_device_v0 v0;
-	} *args = data;
-	struct nvkm_client *client = nv_client(parent);
-	struct nvkm_device *device;
-	struct nvkm_devobj *devobj;
-	u32 boot0, strap;
-	u64 disable, mmio_base, mmio_size;
-	void __iomem *map;
-	int ret, i, c;
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
 
-	nv_ioctl(parent, "create device size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create device v%d device %016llx "
-				 "disable %016llx debug0 %016llx\n",
-			 args->v0.version, args->v0.device,
-			 args->v0.disable, args->v0.debug0);
-	} else
-		return ret;
+	nvdev_trace(device, "preinit running...\n");
+	time = ktime_to_us(ktime_get());
 
-	/* give priviledged clients register access */
-	if (client->super)
-		oclass = &nvkm_devobj_oclass_super;
-
-	/* find the device subdev that matches what the client requested */
-	device = nv_device(client->device);
-	if (args->v0.device != ~0) {
-		device = nvkm_device_find(args->v0.device);
-		if (!device)
-			return -ENODEV;
+	if (device->func->preinit) {
+		ret = device->func->preinit(device);
+		if (ret)
+			goto fail;
 	}
 
-	ret = nvkm_parent_create(parent, nv_object(device), oclass, 0,
-				 nvkm_control_oclass,
-				 (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				 (1ULL << NVDEV_ENGINE_FIFO) |
-				 (1ULL << NVDEV_ENGINE_DISP) |
-				 (1ULL << NVDEV_ENGINE_PM), &devobj);
-	*pobject = nv_object(devobj);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_preinit(subdev);
+			if (ret)
+				goto fail;
+		}
+	}
+
+	ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
+	if (ret)
+		goto fail;
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "preinit completed in %lldus\n", time);
+	return 0;
+
+fail:
+	nvdev_error(device, "preinit failed with %d\n", ret);
+	return ret;
+}
+
+int
+nvkm_device_init(struct nvkm_device *device)
+{
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
+
+	ret = nvkm_device_preinit(device);
 	if (ret)
 		return ret;
 
-	mmio_base = nv_device_resource_start(device, 0);
-	mmio_size = nv_device_resource_len(device, 0);
+	nvkm_device_fini(device, false);
 
-	/* translate api disable mask into internal mapping */
-	disable = args->v0.debug0;
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
-		if (args->v0.disable & disable_map[i])
-			disable |= (1ULL << i);
+	nvdev_trace(device, "init running...\n");
+	time = ktime_to_us(ktime_get());
+
+	if (device->func->init) {
+		ret = device->func->init(device);
+		if (ret)
+			goto fail;
 	}
 
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_init(subdev);
+			if (ret)
+				goto fail_subdev;
+		}
+	}
+
+	nvkm_acpi_init(device);
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "init completed in %lldus\n", time);
+	return 0;
+
+fail_subdev:
+	do {
+		if ((subdev = nvkm_device_subdev(device, i)))
+			nvkm_subdev_fini(subdev, false);
+	} while (--i >= 0);
+
+fail:
+	nvdev_error(device, "init failed with %d\n", ret);
+	return ret;
+}
+
+void
+nvkm_device_del(struct nvkm_device **pdevice)
+{
+	struct nvkm_device *device = *pdevice;
+	int i;
+	if (device) {
+		mutex_lock(&nv_devices_mutex);
+		device->disable_mask = 0;
+		for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+			struct nvkm_subdev *subdev =
+				nvkm_device_subdev(device, i);
+			nvkm_subdev_del(&subdev);
+		}
+
+		nvkm_event_fini(&device->event);
+
+		if (device->pri)
+			iounmap(device->pri);
+		list_del(&device->head);
+
+		if (device->func->dtor)
+			*pdevice = device->func->dtor(device);
+		mutex_unlock(&nv_devices_mutex);
+
+		kfree(*pdevice);
+		*pdevice = NULL;
+	}
+}
+
+int
+nvkm_device_ctor(const struct nvkm_device_func *func,
+		 const struct nvkm_device_quirk *quirk,
+		 struct device *dev, enum nvkm_device_type type, u64 handle,
+		 const char *name, const char *cfg, const char *dbg,
+		 bool detect, bool mmio, u64 subdev_mask,
+		 struct nvkm_device *device)
+{
+	struct nvkm_subdev *subdev;
+	u64 mmio_base, mmio_size;
+	u32 boot0, strap;
+	void __iomem *map;
+	int ret = -EEXIST;
+	int i;
+
+	mutex_lock(&nv_devices_mutex);
+	if (nvkm_device_find_locked(handle))
+		goto done;
+
+	device->func = func;
+	device->quirk = quirk;
+	device->dev = dev;
+	device->type = type;
+	device->handle = handle;
+	device->cfgopt = cfg;
+	device->dbgopt = dbg;
+	device->name = name;
+	list_add_tail(&device->head, &nv_devices);
+	device->debug = nvkm_dbgopt(device->dbgopt, "device");
+
+	ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
+	if (ret)
+		goto done;
+
+	mmio_base = device->func->resource_addr(device, 0);
+	mmio_size = device->func->resource_size(device, 0);
+
 	/* identify the chipset, and determine classes of subdev/engines */
-	if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
-	    !device->card_type) {
+	if (detect) {
 		map = ioremap(mmio_base, 0x102000);
-		if (map == NULL)
-			return -ENOMEM;
+		if (ret = -ENOMEM, map == NULL)
+			goto done;
 
 		/* switch mmio to cpu's native endianness */
 #ifndef __BIG_ENDIAN
@@ -397,31 +2389,83 @@
 			device->card_type = NV_04;
 		}
 
-		switch (device->card_type) {
-		case NV_04: ret = nv04_identify(device); break;
-		case NV_10:
-		case NV_11: ret = nv10_identify(device); break;
-		case NV_20: ret = nv20_identify(device); break;
-		case NV_30: ret = nv30_identify(device); break;
-		case NV_40: ret = nv40_identify(device); break;
-		case NV_50: ret = nv50_identify(device); break;
-		case NV_C0: ret = gf100_identify(device); break;
-		case NV_E0: ret = gk104_identify(device); break;
-		case GM100: ret = gm100_identify(device); break;
+		switch (device->chipset) {
+		case 0x004: device->chip = &nv4_chipset; break;
+		case 0x005: device->chip = &nv5_chipset; break;
+		case 0x010: device->chip = &nv10_chipset; break;
+		case 0x011: device->chip = &nv11_chipset; break;
+		case 0x015: device->chip = &nv15_chipset; break;
+		case 0x017: device->chip = &nv17_chipset; break;
+		case 0x018: device->chip = &nv18_chipset; break;
+		case 0x01a: device->chip = &nv1a_chipset; break;
+		case 0x01f: device->chip = &nv1f_chipset; break;
+		case 0x020: device->chip = &nv20_chipset; break;
+		case 0x025: device->chip = &nv25_chipset; break;
+		case 0x028: device->chip = &nv28_chipset; break;
+		case 0x02a: device->chip = &nv2a_chipset; break;
+		case 0x030: device->chip = &nv30_chipset; break;
+		case 0x031: device->chip = &nv31_chipset; break;
+		case 0x034: device->chip = &nv34_chipset; break;
+		case 0x035: device->chip = &nv35_chipset; break;
+		case 0x036: device->chip = &nv36_chipset; break;
+		case 0x040: device->chip = &nv40_chipset; break;
+		case 0x041: device->chip = &nv41_chipset; break;
+		case 0x042: device->chip = &nv42_chipset; break;
+		case 0x043: device->chip = &nv43_chipset; break;
+		case 0x044: device->chip = &nv44_chipset; break;
+		case 0x045: device->chip = &nv45_chipset; break;
+		case 0x046: device->chip = &nv46_chipset; break;
+		case 0x047: device->chip = &nv47_chipset; break;
+		case 0x049: device->chip = &nv49_chipset; break;
+		case 0x04a: device->chip = &nv4a_chipset; break;
+		case 0x04b: device->chip = &nv4b_chipset; break;
+		case 0x04c: device->chip = &nv4c_chipset; break;
+		case 0x04e: device->chip = &nv4e_chipset; break;
+		case 0x050: device->chip = &nv50_chipset; break;
+		case 0x063: device->chip = &nv63_chipset; break;
+		case 0x067: device->chip = &nv67_chipset; break;
+		case 0x068: device->chip = &nv68_chipset; break;
+		case 0x084: device->chip = &nv84_chipset; break;
+		case 0x086: device->chip = &nv86_chipset; break;
+		case 0x092: device->chip = &nv92_chipset; break;
+		case 0x094: device->chip = &nv94_chipset; break;
+		case 0x096: device->chip = &nv96_chipset; break;
+		case 0x098: device->chip = &nv98_chipset; break;
+		case 0x0a0: device->chip = &nva0_chipset; break;
+		case 0x0a3: device->chip = &nva3_chipset; break;
+		case 0x0a5: device->chip = &nva5_chipset; break;
+		case 0x0a8: device->chip = &nva8_chipset; break;
+		case 0x0aa: device->chip = &nvaa_chipset; break;
+		case 0x0ac: device->chip = &nvac_chipset; break;
+		case 0x0af: device->chip = &nvaf_chipset; break;
+		case 0x0c0: device->chip = &nvc0_chipset; break;
+		case 0x0c1: device->chip = &nvc1_chipset; break;
+		case 0x0c3: device->chip = &nvc3_chipset; break;
+		case 0x0c4: device->chip = &nvc4_chipset; break;
+		case 0x0c8: device->chip = &nvc8_chipset; break;
+		case 0x0ce: device->chip = &nvce_chipset; break;
+		case 0x0cf: device->chip = &nvcf_chipset; break;
+		case 0x0d7: device->chip = &nvd7_chipset; break;
+		case 0x0d9: device->chip = &nvd9_chipset; break;
+		case 0x0e4: device->chip = &nve4_chipset; break;
+		case 0x0e6: device->chip = &nve6_chipset; break;
+		case 0x0e7: device->chip = &nve7_chipset; break;
+		case 0x0ea: device->chip = &nvea_chipset; break;
+		case 0x0f0: device->chip = &nvf0_chipset; break;
+		case 0x0f1: device->chip = &nvf1_chipset; break;
+		case 0x106: device->chip = &nv106_chipset; break;
+		case 0x108: device->chip = &nv108_chipset; break;
+		case 0x117: device->chip = &nv117_chipset; break;
+		case 0x124: device->chip = &nv124_chipset; break;
+		case 0x126: device->chip = &nv126_chipset; break;
+		case 0x12b: device->chip = &nv12b_chipset; break;
 		default:
-			ret = -EINVAL;
-			break;
+			nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+			goto done;
 		}
 
-		if (ret) {
-			nv_error(device, "unknown chipset, 0x%08x\n", boot0);
-			return ret;
-		}
-
-		nv_info(device, "BOOT0  : 0x%08x\n", boot0);
-		nv_info(device, "Chipset: %s (NV%02X)\n",
-			device->cname, device->chipset);
-		nv_info(device, "Family : NV%02X\n", device->card_type);
+		nvdev_info(device, "NVIDIA %s (%08x)\n",
+			   device->chip->name, boot0);
 
 		/* determine frequency of timing crystal */
 		if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
@@ -436,300 +2480,89 @@
 		case 0x00400000: device->crystal = 27000; break;
 		case 0x00400040: device->crystal = 25000; break;
 		}
-
-		nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
-	} else
-	if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
-		device->cname = "NULL";
-		device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
+	} else {
+		device->chip = &null_chipset;
 	}
 
-	if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
-	    !nv_subdev(device)->mmio) {
-		nv_subdev(device)->mmio  = ioremap(mmio_base, mmio_size);
-		if (!nv_subdev(device)->mmio) {
-			nv_error(device, "unable to map device registers\n");
+	if (!device->name)
+		device->name = device->chip->name;
+
+	if (mmio) {
+		device->pri = ioremap(mmio_base, mmio_size);
+		if (!device->pri) {
+			nvdev_error(device, "unable to map PRI\n");
 			return -ENOMEM;
 		}
 	}
 
-	/* ensure requested subsystems are available for use */
-	for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
-		if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
-			continue;
+	mutex_init(&device->mutex);
 
-		if (device->subdev[i]) {
-			nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+#define _(s,m) case s:                                                         \
+	if (device->chip->m && (subdev_mask & (1ULL << (s)))) {                \
+		ret = device->chip->m(device, (s), &device->m);                \
+		if (ret) {                                                     \
+			subdev = nvkm_device_subdev(device, (s));              \
+			nvkm_subdev_del(&subdev);                              \
+			device->m = NULL;                                      \
+			if (ret != -ENODEV) {                                  \
+				nvdev_error(device, "%s ctor failed, %d\n",    \
+					    nvkm_subdev_name[s], ret);         \
+				goto done;                                     \
+			}                                                      \
+		}                                                              \
+	}                                                                      \
+	break
+		switch (i) {
+		_(NVKM_SUBDEV_BAR    ,     bar);
+		_(NVKM_SUBDEV_VBIOS  ,    bios);
+		_(NVKM_SUBDEV_BUS    ,     bus);
+		_(NVKM_SUBDEV_CLK    ,     clk);
+		_(NVKM_SUBDEV_DEVINIT, devinit);
+		_(NVKM_SUBDEV_FB     ,      fb);
+		_(NVKM_SUBDEV_FUSE   ,    fuse);
+		_(NVKM_SUBDEV_GPIO   ,    gpio);
+		_(NVKM_SUBDEV_I2C    ,     i2c);
+		_(NVKM_SUBDEV_IBUS   ,    ibus);
+		_(NVKM_SUBDEV_INSTMEM,    imem);
+		_(NVKM_SUBDEV_LTC    ,     ltc);
+		_(NVKM_SUBDEV_MC     ,      mc);
+		_(NVKM_SUBDEV_MMU    ,     mmu);
+		_(NVKM_SUBDEV_MXM    ,     mxm);
+		_(NVKM_SUBDEV_PCI    ,     pci);
+		_(NVKM_SUBDEV_PMU    ,     pmu);
+		_(NVKM_SUBDEV_THERM  ,   therm);
+		_(NVKM_SUBDEV_TIMER  ,   timer);
+		_(NVKM_SUBDEV_VOLT   ,    volt);
+		_(NVKM_ENGINE_BSP    ,     bsp);
+		_(NVKM_ENGINE_CE0    ,   ce[0]);
+		_(NVKM_ENGINE_CE1    ,   ce[1]);
+		_(NVKM_ENGINE_CE2    ,   ce[2]);
+		_(NVKM_ENGINE_CIPHER ,  cipher);
+		_(NVKM_ENGINE_DISP   ,    disp);
+		_(NVKM_ENGINE_DMAOBJ ,     dma);
+		_(NVKM_ENGINE_FIFO   ,    fifo);
+		_(NVKM_ENGINE_GR     ,      gr);
+		_(NVKM_ENGINE_IFB    ,     ifb);
+		_(NVKM_ENGINE_ME     ,      me);
+		_(NVKM_ENGINE_MPEG   ,    mpeg);
+		_(NVKM_ENGINE_MSENC  ,   msenc);
+		_(NVKM_ENGINE_MSPDEC ,  mspdec);
+		_(NVKM_ENGINE_MSPPP  ,   msppp);
+		_(NVKM_ENGINE_MSVLD  ,   msvld);
+		_(NVKM_ENGINE_PM     ,      pm);
+		_(NVKM_ENGINE_SEC    ,     sec);
+		_(NVKM_ENGINE_SW     ,      sw);
+		_(NVKM_ENGINE_VIC    ,     vic);
+		_(NVKM_ENGINE_VP     ,      vp);
+		default:
+			WARN_ON(1);
 			continue;
 		}
-
-		ret = nvkm_object_ctor(nv_object(device), NULL, oclass,
-				       NULL, i, &devobj->subdev[i]);
-		if (ret == -ENODEV)
-			continue;
-		if (ret)
-			return ret;
-
-		device->subdev[i] = devobj->subdev[i];
-
-		/* note: can't init *any* subdevs until devinit has been run
-		 * due to not knowing exactly what the vbios init tables will
-		 * mess with.  devinit also can't be run until all of its
-		 * dependencies have been created.
-		 *
-		 * this code delays init of any subdev until all of devinit's
-		 * dependencies have been created, and then initialises each
-		 * subdev in turn as they're created.
-		 */
-		while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
-			struct nvkm_object *subdev = devobj->subdev[c++];
-			if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret)
-					return ret;
-				atomic_dec(&nv_object(device)->usecount);
-			} else
-			if (subdev) {
-				nvkm_subdev_reset(subdev);
-			}
-		}
-	}
-
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nvkm_devobj_ofuncs = {
-	.ctor = nvkm_devobj_ctor,
-	.dtor = nvkm_devobj_dtor,
-	.init = _nvkm_parent_init,
-	.fini = _nvkm_parent_fini,
-	.mthd = nvkm_devobj_mthd,
-};
-
-/******************************************************************************
- * nvkm_device: engine functions
- *****************************************************************************/
-
-struct nvkm_device *
-nv_device(void *obj)
-{
-	struct nvkm_object *device = nv_object(obj);
-	if (device->engine == NULL) {
-		while (device && device->parent)
-			device = device->parent;
-	} else {
-		device = &nv_object(obj)->engine->subdev.object;
-		if (device && device->parent)
-			device = device->parent;
-	}
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!device))
-		nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
-#endif
-	return (void *)device;
-}
-
-static struct nvkm_oclass
-nvkm_device_sclass[] = {
-	{ 0x0080, &nvkm_devobj_ofuncs },
-	{}
-};
-
-static int
-nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
-		       struct nvkm_notify *notify)
-{
-	if (!WARN_ON(size != 0)) {
-		notify->size  = 0;
-		notify->types = 1;
-		notify->index = 0;
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static const struct nvkm_event_func
-nvkm_device_event_func = {
-	.ctor = nvkm_device_event_ctor,
-};
-
-static int
-nvkm_device_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_device *device = (void *)object;
-	struct nvkm_object *subdev;
-	int ret, i;
-
-	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_dec(subdev, suspend);
-				if (ret && suspend)
-					goto fail;
-			}
-		}
-	}
-
-	ret = nvkm_acpi_fini(device, suspend);
-fail:
-	for (; ret && i < NVDEV_SUBDEV_NR; i++) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret) {
-					/* XXX */
-				}
-			}
-		}
-	}
-
-	return ret;
-}
-
-static int
-nvkm_device_init(struct nvkm_object *object)
-{
-	struct nvkm_device *device = (void *)object;
-	struct nvkm_object *subdev;
-	int ret, i = 0;
-
-	ret = nvkm_acpi_init(device);
-	if (ret)
-		goto fail;
-
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret)
-					goto fail;
-			} else {
-				nvkm_subdev_reset(subdev);
-			}
-		}
+#undef _
 	}
 
 	ret = 0;
-fail:
-	for (--i; ret && i >= 0; i--) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS))
-				nvkm_object_dec(subdev, false);
-		}
-	}
-
-	if (ret)
-		nvkm_acpi_fini(device, false);
-	return ret;
-}
-
-static void
-nvkm_device_dtor(struct nvkm_object *object)
-{
-	struct nvkm_device *device = (void *)object;
-
-	nvkm_event_fini(&device->event);
-
-	mutex_lock(&nv_devices_mutex);
-	list_del(&device->head);
-	mutex_unlock(&nv_devices_mutex);
-
-	if (nv_subdev(device)->mmio)
-		iounmap(nv_subdev(device)->mmio);
-
-	nvkm_engine_destroy(&device->engine);
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
-{
-	if (nv_device_is_pci(device)) {
-		return pci_resource_start(device->pdev, bar);
-	} else {
-		struct resource *res;
-		res = platform_get_resource(device->platformdev,
-					    IORESOURCE_MEM, bar);
-		if (!res)
-			return 0;
-		return res->start;
-	}
-}
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
-{
-	if (nv_device_is_pci(device)) {
-		return pci_resource_len(device->pdev, bar);
-	} else {
-		struct resource *res;
-		res = platform_get_resource(device->platformdev,
-					    IORESOURCE_MEM, bar);
-		if (!res)
-			return 0;
-		return resource_size(res);
-	}
-}
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall)
-{
-	if (nv_device_is_pci(device)) {
-		return device->pdev->irq;
-	} else {
-		return platform_get_irq_byname(device->platformdev,
-					       stall ? "stall" : "nonstall");
-	}
-}
-
-static struct nvkm_oclass
-nvkm_device_oclass = {
-	.handle = NV_ENGINE(DEVICE, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_device_dtor,
-		.init = nvkm_device_init,
-		.fini = nvkm_device_fini,
-	},
-};
-
-int
-nvkm_device_create_(void *dev, enum nv_bus_type type, u64 name,
-		    const char *sname, const char *cfg, const char *dbg,
-		    int length, void **pobject)
-{
-	struct nvkm_device *device;
-	int ret = -EEXIST;
-
-	mutex_lock(&nv_devices_mutex);
-	list_for_each_entry(device, &nv_devices, head) {
-		if (device->handle == name)
-			goto done;
-	}
-
-	ret = nvkm_engine_create_(NULL, NULL, &nvkm_device_oclass, true,
-				  "DEVICE", "device", length, pobject);
-	device = *pobject;
-	if (ret)
-		goto done;
-
-	switch (type) {
-	case NVKM_BUS_PCI:
-		device->pdev = dev;
-		break;
-	case NVKM_BUS_PLATFORM:
-		device->platformdev = dev;
-		break;
-	}
-	device->handle = name;
-	device->cfgopt = cfg;
-	device->dbgopt = dbg;
-	device->name = sname;
-
-	nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
-	nv_engine(device)->sclass = nvkm_device_sclass;
-	list_add(&device->head, &nv_devices);
-
-	ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
 done:
 	mutex_unlock(&nv_devices_mutex);
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index 0b794b1..cf8bc06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
+#include "ctrl.h"
 
 #include <core/client.h>
 #include <subdev/clk.h>
@@ -31,18 +31,18 @@
 #include <nvif/unpack.h>
 
 static int
-nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_info_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
+	struct nvkm_clk *clk = ctrl->device->clk;
 	int ret;
 
-	nv_ioctl(object, "control pstate info size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate info vers %d\n",
-			 args->v0.version);
+		nvif_ioctl(&ctrl->object, "control pstate info vers %d\n",
+			   args->v0.version);
 	} else
 		return ret;
 
@@ -64,24 +64,24 @@
 }
 
 static int
-nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_attr(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_attr_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
-	struct nvkm_domain *domain;
+	struct nvkm_clk *clk = ctrl->device->clk;
+	const struct nvkm_domain *domain;
 	struct nvkm_pstate *pstate;
 	struct nvkm_cstate *cstate;
 	int i = 0, j = -1;
 	u32 lo, hi;
 	int ret;
 
-	nv_ioctl(object, "control pstate attr size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate attr vers %d state %d "
-				 "index %d\n",
-			 args->v0.version, args->v0.state, args->v0.index);
+		nvif_ioctl(&ctrl->object,
+			   "control pstate attr vers %d state %d index %d\n",
+			   args->v0.version, args->v0.state, args->v0.index);
 		if (!clk)
 			return -ENODEV;
 		if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
@@ -116,7 +116,7 @@
 
 		args->v0.state = pstate->pstate;
 	} else {
-		lo = max(clk->read(clk, domain->name), 0);
+		lo = max(nvkm_clk_read(clk, domain->name), 0);
 		hi = lo;
 	}
 
@@ -137,19 +137,19 @@
 }
 
 static int
-nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_user(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_user_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
+	struct nvkm_clk *clk = ctrl->device->clk;
 	int ret;
 
-	nv_ioctl(object, "control pstate user size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate user vers %d ustate %d "
-				 "pwrsrc %d\n", args->v0.version,
-			 args->v0.ustate, args->v0.pwrsrc);
+		nvif_ioctl(&ctrl->object,
+			   "control pstate user vers %d ustate %d pwrsrc %d\n",
+			   args->v0.version, args->v0.ustate, args->v0.pwrsrc);
 		if (!clk)
 			return -ENODEV;
 	} else
@@ -168,32 +168,44 @@
 static int
 nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
+	struct nvkm_control *ctrl = nvkm_control(object);
 	switch (mthd) {
 	case NVIF_CONTROL_PSTATE_INFO:
-		return nvkm_control_mthd_pstate_info(object, data, size);
+		return nvkm_control_mthd_pstate_info(ctrl, data, size);
 	case NVIF_CONTROL_PSTATE_ATTR:
-		return nvkm_control_mthd_pstate_attr(object, data, size);
+		return nvkm_control_mthd_pstate_attr(ctrl, data, size);
 	case NVIF_CONTROL_PSTATE_USER:
-		return nvkm_control_mthd_pstate_user(object, data, size);
+		return nvkm_control_mthd_pstate_user(ctrl, data, size);
 	default:
 		break;
 	}
 	return -EINVAL;
 }
 
-static struct nvkm_ofuncs
-nvkm_control_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+static const struct nvkm_object_func
+nvkm_control = {
 	.mthd = nvkm_control_mthd,
 };
 
-struct nvkm_oclass
-nvkm_control_oclass[] = {
-	{ .handle = NVIF_IOCTL_NEW_V0_CONTROL,
-	  .ofuncs = &nvkm_control_ofuncs
-	},
-	{}
+static int
+nvkm_control_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_control *ctrl;
+
+	if (!(ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &ctrl->object;
+	ctrl->device = device;
+
+	nvkm_object_ctor(&nvkm_control, oclass, &ctrl->object);
+	return 0;
+}
+
+const struct nvkm_device_oclass
+nvkm_control_oclass = {
+	.base.oclass = NVIF_IOCTL_NEW_V0_CONTROL,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nvkm_control_new,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
new file mode 100644
index 0000000..20249d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -0,0 +1,12 @@
+#ifndef __NVKM_DEVICE_CTRL_H__
+#define __NVKM_DEVICE_CTRL_H__
+#define nvkm_control(p) container_of((p), struct nvkm_control, object)
+#include <core/device.h>
+
+struct nvkm_control {
+	struct nvkm_object object;
+	struct nvkm_device *device;
+};
+
+extern const struct nvkm_device_oclass nvkm_control_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
deleted file mode 100644
index 82b38d7..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mspdec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-gf100_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0xc0:
-		device->cname = "GF100";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf100_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc4:
-		device->cname = "GF104";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc3:
-		device->cname = "GF106";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xce:
-		device->cname = "GF114";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xcf:
-		device->cname = "GF116";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc1:
-		device->cname = "GF108";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf108_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc8:
-		device->cname = "GF110";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf110_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xd9:
-		device->cname = "GF119";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gf110_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf119_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gf110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xd7:
-		device->cname = "GF117";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gf110_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf117_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf117_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gf110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Fermi chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
deleted file mode 100644
index 6a9483f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gk104_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0xe4:
-		device->cname = "GK104";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk104_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xe7:
-		device->cname = "GK107";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xe6:
-		device->cname = "GK106";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk104_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xea:
-		device->cname = "GK20A";
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk20a_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk20a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk20a_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = gk20a_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gk20a_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk20a_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk20a_gr_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &gk20a_volt_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk20a_pmu_oclass;
-		break;
-	case 0xf0:
-		device->cname = "GK110";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk110_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk110_pm_oclass;
-		break;
-	case 0xf1:
-		device->cname = "GK110B";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk110b_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk110_pm_oclass;
-		break;
-	case 0x106:
-		device->cname = "GK208B";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk208_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		break;
-	case 0x108:
-		device->cname = "GK208";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk208_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Kepler chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
deleted file mode 100644
index 70abf1e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gm100_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x117:
-		device->cname = "GM107";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm107_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm107_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm107_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	case 0x124:
-		device->cname = "GM204";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gm204_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-#if 0
-		/* looks to be some non-trivial changes */
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		/* priv ring says no to 0x10eb14 writes */
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-#endif
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm204_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gm204_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm204_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm204_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gm204_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gm204_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gm204_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	case 0x126:
-		device->cname = "GM206";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gm204_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-#if 0
-		/* looks to be some non-trivial changes */
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		/* priv ring says no to 0x10eb14 writes */
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-#endif
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm204_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gm204_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm206_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm204_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gm204_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gm204_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gm204_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	default:
-		nv_fatal(device, "unknown Maxwell chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
deleted file mode 100644
index 5a2ae04..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv04_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x04:
-		device->cname = "NV04";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv04_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x05:
-		device->cname = "NV05";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv05_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown RIVA chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
deleted file mode 100644
index 94a1ca4..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv10_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x10:
-		device->cname = "NV10";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x15:
-		device->cname = "NV15";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x16:
-		device->cname = "NV16";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x1a:
-		device->cname = "nForce";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x11:
-		device->cname = "NV11";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x17:
-		device->cname = "NV17";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x1f:
-		device->cname = "nForce2";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x18:
-		device->cname = "NV18";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Celsius chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
deleted file mode 100644
index d5ec893..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv20_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x20:
-		device->cname = "NV20";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv20_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv20_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x25:
-		device->cname = "NV25";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x28:
-		device->cname = "NV28";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x2a:
-		device->cname = "NV2A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv2a_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Kelvin chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
deleted file mode 100644
index dda0962..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-
-int
-nv30_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x30:
-		device->cname = "NV30";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x35:
-		device->cname = "NV35";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv35_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x31:
-		device->cname = "NV31";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x36:
-		device->cname = "NV36";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv36_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x34:
-		device->cname = "NV34";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv34_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Rankine chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
deleted file mode 100644
index c630136..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/mmu.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv40_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x40:
-		device->cname = "NV40";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x41:
-		device->cname = "NV41";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x42:
-		device->cname = "NV42";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x43:
-		device->cname = "NV43";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x45:
-		device->cname = "NV45";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x47:
-		device->cname = "G70";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv47_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x49:
-		device->cname = "G71";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4b:
-		device->cname = "G73";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x44:
-		device->cname = "NV44";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x46:
-		device->cname = "G72";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4a:
-		device->cname = "NV44A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4c:
-		device->cname = "C61";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4e:
-		device->cname = "C51";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv4e_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv4e_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x63:
-		device->cname = "C73";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x67:
-		device->cname = "C67";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x68:
-		device->cname = "C68";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Curie chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
deleted file mode 100644
index 249b844..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/vp.h>
-#include <engine/cipher.h>
-#include <engine/sec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv50_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x50:
-		device->cname = "G80";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  nv50_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv50_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv50_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv50_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv50_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv50_pm_oclass;
-		break;
-	case 0x84:
-		device->cname = "G84";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x86:
-		device->cname = "G86";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x92:
-		device->cname = "G92";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x94:
-		device->cname = "G94";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g94_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x96:
-		device->cname = "G96";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g94_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x98:
-		device->cname = "G98";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xa0:
-		device->cname = "G200";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt200_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xaa:
-		device->cname = "MCP77/MCP78";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  mcp77_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp77_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xac:
-		device->cname = "MCP79/MCP7A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  mcp77_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp77_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xa3:
-		device->cname = "GT215";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xa5:
-		device->cname = "GT216";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xa8:
-		device->cname = "GT218";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xaf:
-		device->cname = "MCP89";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  mcp89_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp89_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Tesla chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
new file mode 100644
index 0000000..9dd1cac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -0,0 +1,1685 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/pci.h>
+#include "priv.h"
+
+struct nvkm_device_pci_device {
+	u16 device;
+	const char *name;
+	const struct nvkm_device_pci_vendor *vendor;
+};
+
+struct nvkm_device_pci_vendor {
+	u16 vendor;
+	u16 device;
+	const char *name;
+	const struct nvkm_device_quirk quirk;
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0189[] = {
+	/* Apple iMac G4 NV18 */
+	{ 0x10de, 0x0010, NULL, { .tv_gpio = 4 } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_01f0[] = {
+	/* MSI nForce2 IGP */
+	{ 0x1462, 0x5710, NULL, { .tv_pin_mask = 0xc } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0322[] = {
+	/* Zotac FX5200 */
+	{ 0x19da, 0x1035, NULL, { .tv_pin_mask = 0xc } },
+	{ 0x19da, 0x2035, NULL, { .tv_pin_mask = 0xc } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_05e7[] = {
+	{ 0x10de, 0x0595, "Tesla T10 Processor" },
+	{ 0x10de, 0x068f, "Tesla T10 Processor" },
+	{ 0x10de, 0x0697, "Tesla M1060" },
+	{ 0x10de, 0x0714, "Tesla M1060" },
+	{ 0x10de, 0x0743, "Tesla M1060" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0609[] = {
+	{ 0x106b, 0x00a7, "GeForce 8800 GS" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_062e[] = {
+	{ 0x106b, 0x0605, "GeForce GT 130" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0649[] = {
+	{ 0x1043, 0x202d, "GeForce GT 220M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0652[] = {
+	{ 0x152d, 0x0850, "GeForce GT 240M LE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0654[] = {
+	{ 0x1043, 0x14a2, "GeForce GT 320M" },
+	{ 0x1043, 0x14d2, "GeForce GT 320M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0655[] = {
+	{ 0x106b, 0x0633, "GeForce GT 120" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0656[] = {
+	{ 0x106b, 0x0693, "GeForce GT 120" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d1[] = {
+	{ 0x10de, 0x0771, "Tesla C2050" },
+	{ 0x10de, 0x0772, "Tesla C2070" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d2[] = {
+	{ 0x10de, 0x088f, "Tesla X2070" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06de[] = {
+	{ 0x10de, 0x0773, "Tesla S2050" },
+	{ 0x10de, 0x082f, "Tesla M2050" },
+	{ 0x10de, 0x0840, "Tesla X2070" },
+	{ 0x10de, 0x0842, "Tesla M2050" },
+	{ 0x10de, 0x0846, "Tesla M2050" },
+	{ 0x10de, 0x0866, "Tesla M2050" },
+	{ 0x10de, 0x0907, "Tesla M2050" },
+	{ 0x10de, 0x091e, "Tesla M2050" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06e8[] = {
+	{ 0x103c, 0x360b, "GeForce 9200M GE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06f9[] = {
+	{ 0x10de, 0x060d, "Quadro FX 370 Low Profile" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06ff[] = {
+	{ 0x10de, 0x0711, "HICx8 + Graphics" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0866[] = {
+	{ 0x106b, 0x00b1, "GeForce 9400M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0872[] = {
+	{ 0x1043, 0x1c42, "GeForce G205M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0873[] = {
+	{ 0x1043, 0x1c52, "GeForce G205M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a6e[] = {
+	{ 0x17aa, 0x3607, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a70[] = {
+	{ 0x17aa, 0x3605, "Second Generation ION" },
+	{ 0x17aa, 0x3617, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a73[] = {
+	{ 0x17aa, 0x3607, "Second Generation ION" },
+	{ 0x17aa, 0x3610, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a74[] = {
+	{ 0x17aa, 0x903a, "GeForce G210" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a75[] = {
+	{ 0x17aa, 0x3605, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a7a[] = {
+	{ 0x1462, 0xaa51, "GeForce 405" },
+	{ 0x1462, 0xaa58, "GeForce 405" },
+	{ 0x1462, 0xac71, "GeForce 405" },
+	{ 0x1462, 0xac82, "GeForce 405" },
+	{ 0x1642, 0x3980, "GeForce 405" },
+	{ 0x17aa, 0x3950, "GeForce 405M" },
+	{ 0x17aa, 0x397d, "GeForce 405M" },
+	{ 0x1b0a, 0x90b4, "GeForce 405" },
+	{ 0x1bfd, 0x0003, "GeForce 405" },
+	{ 0x1bfd, 0x8006, "GeForce 405" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dd8[] = {
+	{ 0x10de, 0x0914, "Quadro 2000D" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0de9[] = {
+	{ 0x1025, 0x0692, "GeForce GT 620M" },
+	{ 0x1025, 0x0725, "GeForce GT 620M" },
+	{ 0x1025, 0x0728, "GeForce GT 620M" },
+	{ 0x1025, 0x072b, "GeForce GT 620M" },
+	{ 0x1025, 0x072e, "GeForce GT 620M" },
+	{ 0x1025, 0x0753, "GeForce GT 620M" },
+	{ 0x1025, 0x0754, "GeForce GT 620M" },
+	{ 0x17aa, 0x3977, "GeForce GT 640M LE" },
+	{ 0x1b0a, 0x2210, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dea[] = {
+	{ 0x17aa, 0x365a, "GeForce 615" },
+	{ 0x17aa, 0x365b, "GeForce 615" },
+	{ 0x17aa, 0x365e, "GeForce 615" },
+	{ 0x17aa, 0x3660, "GeForce 615" },
+	{ 0x17aa, 0x366c, "GeForce 615" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0df4[] = {
+	{ 0x152d, 0x0952, "GeForce GT 630M" },
+	{ 0x152d, 0x0953, "GeForce GT 630M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fd2[] = {
+	{ 0x1028, 0x0595, "GeForce GT 640M LE" },
+	{ 0x1028, 0x05b2, "GeForce GT 640M LE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fe3[] = {
+	{ 0x103c, 0x2b16, "GeForce GT 745A" },
+	{ 0x17aa, 0x3675, "GeForce GT 745A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_104b[] = {
+	{ 0x1043, 0x844c, "GeForce GT 625" },
+	{ 0x1043, 0x846b, "GeForce GT 625" },
+	{ 0x1462, 0xb590, "GeForce GT 625" },
+	{ 0x174b, 0x0625, "GeForce GT 625" },
+	{ 0x174b, 0xa625, "GeForce GT 625" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1058[] = {
+	{ 0x103c, 0x2af1, "GeForce 610" },
+	{ 0x17aa, 0x3682, "GeForce 800A" },
+	{ 0x17aa, 0x3692, "GeForce 705A" },
+	{ 0x17aa, 0x3695, "GeForce 800A" },
+	{ 0x17aa, 0x36a8, "GeForce 800A" },
+	{ 0x17aa, 0x36ac, "GeForce 800A" },
+	{ 0x17aa, 0x36ad, "GeForce 800A" },
+	{ 0x705a, 0x3682, "GeForce 800A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_105b[] = {
+	{ 0x103c, 0x2afb, "GeForce 705A" },
+	{ 0x17aa, 0x36a1, "GeForce 800A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1091[] = {
+	{ 0x10de, 0x088e, "Tesla X2090" },
+	{ 0x10de, 0x0891, "Tesla X2090" },
+	{ 0x10de, 0x0974, "Tesla X2090" },
+	{ 0x10de, 0x098d, "Tesla X2090" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1096[] = {
+	{ 0x10de, 0x0911, "Tesla C2050" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1140[] = {
+	{ 0x1019, 0x999f, "GeForce GT 720M" },
+	{ 0x1025, 0x0600, "GeForce GT 620M" },
+	{ 0x1025, 0x0606, "GeForce GT 620M" },
+	{ 0x1025, 0x064a, "GeForce GT 620M" },
+	{ 0x1025, 0x064c, "GeForce GT 620M" },
+	{ 0x1025, 0x067a, "GeForce GT 620M" },
+	{ 0x1025, 0x0680, "GeForce GT 620M" },
+	{ 0x1025, 0x0686, "GeForce 710M" },
+	{ 0x1025, 0x0689, "GeForce 710M" },
+	{ 0x1025, 0x068b, "GeForce 710M" },
+	{ 0x1025, 0x068d, "GeForce 710M" },
+	{ 0x1025, 0x068e, "GeForce 710M" },
+	{ 0x1025, 0x0691, "GeForce 710M" },
+	{ 0x1025, 0x0692, "GeForce GT 620M" },
+	{ 0x1025, 0x0694, "GeForce GT 620M" },
+	{ 0x1025, 0x0702, "GeForce GT 620M" },
+	{ 0x1025, 0x0719, "GeForce GT 620M" },
+	{ 0x1025, 0x0725, "GeForce GT 620M" },
+	{ 0x1025, 0x0728, "GeForce GT 620M" },
+	{ 0x1025, 0x072b, "GeForce GT 620M" },
+	{ 0x1025, 0x072e, "GeForce GT 620M" },
+	{ 0x1025, 0x0732, "GeForce GT 620M" },
+	{ 0x1025, 0x0763, "GeForce GT 720M" },
+	{ 0x1025, 0x0773, "GeForce 710M" },
+	{ 0x1025, 0x0774, "GeForce 710M" },
+	{ 0x1025, 0x0776, "GeForce GT 720M" },
+	{ 0x1025, 0x077a, "GeForce 710M" },
+	{ 0x1025, 0x077b, "GeForce 710M" },
+	{ 0x1025, 0x077c, "GeForce 710M" },
+	{ 0x1025, 0x077d, "GeForce 710M" },
+	{ 0x1025, 0x077e, "GeForce 710M" },
+	{ 0x1025, 0x077f, "GeForce 710M" },
+	{ 0x1025, 0x0781, "GeForce GT 720M" },
+	{ 0x1025, 0x0798, "GeForce GT 720M" },
+	{ 0x1025, 0x0799, "GeForce GT 720M" },
+	{ 0x1025, 0x079b, "GeForce GT 720M" },
+	{ 0x1025, 0x079c, "GeForce GT 720M" },
+	{ 0x1025, 0x0807, "GeForce GT 720M" },
+	{ 0x1025, 0x0821, "GeForce 820M" },
+	{ 0x1025, 0x0823, "GeForce GT 720M" },
+	{ 0x1025, 0x0830, "GeForce GT 720M" },
+	{ 0x1025, 0x0833, "GeForce GT 720M" },
+	{ 0x1025, 0x0837, "GeForce GT 720M" },
+	{ 0x1025, 0x083e, "GeForce 820M" },
+	{ 0x1025, 0x0841, "GeForce 710M" },
+	{ 0x1025, 0x0853, "GeForce 820M" },
+	{ 0x1025, 0x0854, "GeForce 820M" },
+	{ 0x1025, 0x0855, "GeForce 820M" },
+	{ 0x1025, 0x0856, "GeForce 820M" },
+	{ 0x1025, 0x0857, "GeForce 820M" },
+	{ 0x1025, 0x0858, "GeForce 820M" },
+	{ 0x1025, 0x0863, "GeForce 820M" },
+	{ 0x1025, 0x0868, "GeForce 820M" },
+	{ 0x1025, 0x0869, "GeForce 810M" },
+	{ 0x1025, 0x0873, "GeForce 820M" },
+	{ 0x1025, 0x0878, "GeForce 820M" },
+	{ 0x1025, 0x087b, "GeForce 820M" },
+	{ 0x1025, 0x087f, "GeForce 820M" },
+	{ 0x1025, 0x0881, "GeForce 820M" },
+	{ 0x1025, 0x0885, "GeForce 820M" },
+	{ 0x1025, 0x088a, "GeForce 820M" },
+	{ 0x1025, 0x089b, "GeForce 820M" },
+	{ 0x1025, 0x0921, "GeForce 820M" },
+	{ 0x1025, 0x092e, "GeForce 810M" },
+	{ 0x1025, 0x092f, "GeForce 820M" },
+	{ 0x1025, 0x0932, "GeForce 820M" },
+	{ 0x1025, 0x093a, "GeForce 820M" },
+	{ 0x1025, 0x093c, "GeForce 820M" },
+	{ 0x1025, 0x093f, "GeForce 820M" },
+	{ 0x1025, 0x0941, "GeForce 820M" },
+	{ 0x1025, 0x0945, "GeForce 820M" },
+	{ 0x1025, 0x0954, "GeForce 820M" },
+	{ 0x1025, 0x0965, "GeForce 820M" },
+	{ 0x1028, 0x054d, "GeForce GT 630M" },
+	{ 0x1028, 0x054e, "GeForce GT 630M" },
+	{ 0x1028, 0x0554, "GeForce GT 620M" },
+	{ 0x1028, 0x0557, "GeForce GT 620M" },
+	{ 0x1028, 0x0562, "GeForce GT625M" },
+	{ 0x1028, 0x0565, "GeForce GT 630M" },
+	{ 0x1028, 0x0568, "GeForce GT 630M" },
+	{ 0x1028, 0x0590, "GeForce GT 630M" },
+	{ 0x1028, 0x0592, "GeForce GT625M" },
+	{ 0x1028, 0x0594, "GeForce GT625M" },
+	{ 0x1028, 0x0595, "GeForce GT625M" },
+	{ 0x1028, 0x05a2, "GeForce GT625M" },
+	{ 0x1028, 0x05b1, "GeForce GT625M" },
+	{ 0x1028, 0x05b3, "GeForce GT625M" },
+	{ 0x1028, 0x05da, "GeForce GT 630M" },
+	{ 0x1028, 0x05de, "GeForce GT 720M" },
+	{ 0x1028, 0x05e0, "GeForce GT 720M" },
+	{ 0x1028, 0x05e8, "GeForce GT 630M" },
+	{ 0x1028, 0x05f4, "GeForce GT 720M" },
+	{ 0x1028, 0x060f, "GeForce GT 720M" },
+	{ 0x1028, 0x062f, "GeForce GT 720M" },
+	{ 0x1028, 0x064e, "GeForce 820M" },
+	{ 0x1028, 0x0652, "GeForce 820M" },
+	{ 0x1028, 0x0653, "GeForce 820M" },
+	{ 0x1028, 0x0655, "GeForce 820M" },
+	{ 0x1028, 0x065e, "GeForce 820M" },
+	{ 0x1028, 0x0662, "GeForce 820M" },
+	{ 0x1028, 0x068d, "GeForce 820M" },
+	{ 0x1028, 0x06ad, "GeForce 820M" },
+	{ 0x1028, 0x06ae, "GeForce 820M" },
+	{ 0x1028, 0x06af, "GeForce 820M" },
+	{ 0x1028, 0x06b0, "GeForce 820M" },
+	{ 0x1028, 0x06c0, "GeForce 820M" },
+	{ 0x1028, 0x06c1, "GeForce 820M" },
+	{ 0x103c, 0x18ef, "GeForce GT 630M" },
+	{ 0x103c, 0x18f9, "GeForce GT 630M" },
+	{ 0x103c, 0x18fb, "GeForce GT 630M" },
+	{ 0x103c, 0x18fd, "GeForce GT 630M" },
+	{ 0x103c, 0x18ff, "GeForce GT 630M" },
+	{ 0x103c, 0x218a, "GeForce 820M" },
+	{ 0x103c, 0x21bb, "GeForce 820M" },
+	{ 0x103c, 0x21bc, "GeForce 820M" },
+	{ 0x103c, 0x220e, "GeForce 820M" },
+	{ 0x103c, 0x2210, "GeForce 820M" },
+	{ 0x103c, 0x2212, "GeForce 820M" },
+	{ 0x103c, 0x2214, "GeForce 820M" },
+	{ 0x103c, 0x2218, "GeForce 820M" },
+	{ 0x103c, 0x225b, "GeForce 820M" },
+	{ 0x103c, 0x225d, "GeForce 820M" },
+	{ 0x103c, 0x226d, "GeForce 820M" },
+	{ 0x103c, 0x226f, "GeForce 820M" },
+	{ 0x103c, 0x22d2, "GeForce 820M" },
+	{ 0x103c, 0x22d9, "GeForce 820M" },
+	{ 0x103c, 0x2335, "GeForce 820M" },
+	{ 0x103c, 0x2337, "GeForce 820M" },
+	{ 0x103c, 0x2aef, "GeForce GT 720A" },
+	{ 0x103c, 0x2af9, "GeForce 710A" },
+	{ 0x1043, 0x10dd, "NVS 5200M" },
+	{ 0x1043, 0x10ed, "NVS 5200M" },
+	{ 0x1043, 0x11fd, "GeForce GT 720M" },
+	{ 0x1043, 0x124d, "GeForce GT 720M" },
+	{ 0x1043, 0x126d, "GeForce GT 720M" },
+	{ 0x1043, 0x131d, "GeForce GT 720M" },
+	{ 0x1043, 0x13fd, "GeForce GT 720M" },
+	{ 0x1043, 0x14c7, "GeForce GT 720M" },
+	{ 0x1043, 0x1507, "GeForce GT 620M" },
+	{ 0x1043, 0x15ad, "GeForce 820M" },
+	{ 0x1043, 0x15ed, "GeForce 820M" },
+	{ 0x1043, 0x160d, "GeForce 820M" },
+	{ 0x1043, 0x163d, "GeForce 820M" },
+	{ 0x1043, 0x165d, "GeForce 820M" },
+	{ 0x1043, 0x166d, "GeForce 820M" },
+	{ 0x1043, 0x16cd, "GeForce 820M" },
+	{ 0x1043, 0x16dd, "GeForce 820M" },
+	{ 0x1043, 0x170d, "GeForce 820M" },
+	{ 0x1043, 0x176d, "GeForce 820M" },
+	{ 0x1043, 0x178d, "GeForce 820M" },
+	{ 0x1043, 0x179d, "GeForce 820M" },
+	{ 0x1043, 0x2132, "GeForce GT 620M" },
+	{ 0x1043, 0x2136, "NVS 5200M" },
+	{ 0x1043, 0x21ba, "GeForce GT 720M" },
+	{ 0x1043, 0x21fa, "GeForce GT 720M" },
+	{ 0x1043, 0x220a, "GeForce GT 720M" },
+	{ 0x1043, 0x221a, "GeForce GT 720M" },
+	{ 0x1043, 0x223a, "GeForce GT 710M" },
+	{ 0x1043, 0x224a, "GeForce GT 710M" },
+	{ 0x1043, 0x227a, "GeForce 820M" },
+	{ 0x1043, 0x228a, "GeForce 820M" },
+	{ 0x1043, 0x22fa, "GeForce 820M" },
+	{ 0x1043, 0x232a, "GeForce 820M" },
+	{ 0x1043, 0x233a, "GeForce 820M" },
+	{ 0x1043, 0x235a, "GeForce 820M" },
+	{ 0x1043, 0x236a, "GeForce 820M" },
+	{ 0x1043, 0x238a, "GeForce 820M" },
+	{ 0x1043, 0x8595, "GeForce GT 720M" },
+	{ 0x1043, 0x85ea, "GeForce GT 720M" },
+	{ 0x1043, 0x85eb, "GeForce 820M" },
+	{ 0x1043, 0x85ec, "GeForce 820M" },
+	{ 0x1043, 0x85ee, "GeForce GT 720M" },
+	{ 0x1043, 0x85f3, "GeForce 820M" },
+	{ 0x1043, 0x860e, "GeForce 820M" },
+	{ 0x1043, 0x861a, "GeForce 820M" },
+	{ 0x1043, 0x861b, "GeForce 820M" },
+	{ 0x1043, 0x8628, "GeForce 820M" },
+	{ 0x1043, 0x8643, "GeForce 820M" },
+	{ 0x1043, 0x864c, "GeForce 820M" },
+	{ 0x1043, 0x8652, "GeForce 820M" },
+	{ 0x1043, 0x8660, "GeForce 820M" },
+	{ 0x1043, 0x8661, "GeForce 820M" },
+	{ 0x105b, 0x0dac, "GeForce GT 720M" },
+	{ 0x105b, 0x0dad, "GeForce GT 720M" },
+	{ 0x105b, 0x0ef3, "GeForce GT 720M" },
+	{ 0x10cf, 0x17f5, "GeForce GT 720M" },
+	{ 0x1179, 0xfa01, "GeForce 710M" },
+	{ 0x1179, 0xfa02, "GeForce 710M" },
+	{ 0x1179, 0xfa03, "GeForce 710M" },
+	{ 0x1179, 0xfa05, "GeForce 710M" },
+	{ 0x1179, 0xfa11, "GeForce 710M" },
+	{ 0x1179, 0xfa13, "GeForce 710M" },
+	{ 0x1179, 0xfa18, "GeForce 710M" },
+	{ 0x1179, 0xfa19, "GeForce 710M" },
+	{ 0x1179, 0xfa21, "GeForce 710M" },
+	{ 0x1179, 0xfa23, "GeForce 710M" },
+	{ 0x1179, 0xfa2a, "GeForce 710M" },
+	{ 0x1179, 0xfa32, "GeForce 710M" },
+	{ 0x1179, 0xfa33, "GeForce 710M" },
+	{ 0x1179, 0xfa36, "GeForce 710M" },
+	{ 0x1179, 0xfa38, "GeForce 710M" },
+	{ 0x1179, 0xfa42, "GeForce 710M" },
+	{ 0x1179, 0xfa43, "GeForce 710M" },
+	{ 0x1179, 0xfa45, "GeForce 710M" },
+	{ 0x1179, 0xfa47, "GeForce 710M" },
+	{ 0x1179, 0xfa49, "GeForce 710M" },
+	{ 0x1179, 0xfa58, "GeForce 710M" },
+	{ 0x1179, 0xfa59, "GeForce 710M" },
+	{ 0x1179, 0xfa88, "GeForce 710M" },
+	{ 0x1179, 0xfa89, "GeForce 710M" },
+	{ 0x144d, 0xb092, "GeForce GT 620M" },
+	{ 0x144d, 0xc0d5, "GeForce GT 630M" },
+	{ 0x144d, 0xc0d7, "GeForce GT 620M" },
+	{ 0x144d, 0xc0e2, "NVS 5200M" },
+	{ 0x144d, 0xc0e3, "NVS 5200M" },
+	{ 0x144d, 0xc0e4, "NVS 5200M" },
+	{ 0x144d, 0xc10d, "GeForce 820M" },
+	{ 0x144d, 0xc652, "GeForce GT 620M" },
+	{ 0x144d, 0xc709, "GeForce 710M" },
+	{ 0x144d, 0xc711, "GeForce 710M" },
+	{ 0x144d, 0xc736, "GeForce 710M" },
+	{ 0x144d, 0xc737, "GeForce 710M" },
+	{ 0x144d, 0xc745, "GeForce 820M" },
+	{ 0x144d, 0xc750, "GeForce 820M" },
+	{ 0x1462, 0x10b8, "GeForce GT 710M" },
+	{ 0x1462, 0x10e9, "GeForce GT 720M" },
+	{ 0x1462, 0x1116, "GeForce 820M" },
+	{ 0x1462, 0xaa33, "GeForce 720M" },
+	{ 0x1462, 0xaaa2, "GeForce GT 720M" },
+	{ 0x1462, 0xaaa3, "GeForce 820M" },
+	{ 0x1462, 0xacb2, "GeForce GT 720M" },
+	{ 0x1462, 0xacc1, "GeForce GT 720M" },
+	{ 0x1462, 0xae61, "GeForce 720M" },
+	{ 0x1462, 0xae65, "GeForce GT 720M" },
+	{ 0x1462, 0xae6a, "GeForce 820M" },
+	{ 0x1462, 0xae71, "GeForce GT 720M" },
+	{ 0x14c0, 0x0083, "GeForce 820M" },
+	{ 0x152d, 0x0926, "GeForce 620M" },
+	{ 0x152d, 0x0982, "GeForce GT 630M" },
+	{ 0x152d, 0x0983, "GeForce GT 630M" },
+	{ 0x152d, 0x1005, "GeForce GT820M" },
+	{ 0x152d, 0x1012, "GeForce 710M" },
+	{ 0x152d, 0x1019, "GeForce 820M" },
+	{ 0x152d, 0x1030, "GeForce GT 630M" },
+	{ 0x152d, 0x1055, "GeForce 710M" },
+	{ 0x152d, 0x1067, "GeForce GT 720M" },
+	{ 0x152d, 0x1092, "GeForce 820M" },
+	{ 0x17aa, 0x2200, "NVS 5200M" },
+	{ 0x17aa, 0x2213, "GeForce GT 720M" },
+	{ 0x17aa, 0x2220, "GeForce GT 720M" },
+	{ 0x17aa, 0x309c, "GeForce GT 720A" },
+	{ 0x17aa, 0x30b4, "GeForce 820A" },
+	{ 0x17aa, 0x30b7, "GeForce 720A" },
+	{ 0x17aa, 0x30e4, "GeForce 820A" },
+	{ 0x17aa, 0x361b, "GeForce 820A" },
+	{ 0x17aa, 0x361c, "GeForce 820A" },
+	{ 0x17aa, 0x361d, "GeForce 820A" },
+	{ 0x17aa, 0x3656, "GeForce GT620M" },
+	{ 0x17aa, 0x365a, "GeForce 705M" },
+	{ 0x17aa, 0x365e, "GeForce 800M" },
+	{ 0x17aa, 0x3661, "GeForce 820A" },
+	{ 0x17aa, 0x366c, "GeForce 800M" },
+	{ 0x17aa, 0x3685, "GeForce 800M" },
+	{ 0x17aa, 0x3686, "GeForce 800M" },
+	{ 0x17aa, 0x3687, "GeForce 705A" },
+	{ 0x17aa, 0x3696, "GeForce 820A" },
+	{ 0x17aa, 0x369b, "GeForce 820A" },
+	{ 0x17aa, 0x369c, "GeForce 820A" },
+	{ 0x17aa, 0x369d, "GeForce 820A" },
+	{ 0x17aa, 0x369e, "GeForce 820A" },
+	{ 0x17aa, 0x36a6, "GeForce 820A" },
+	{ 0x17aa, 0x36a7, "GeForce 820A" },
+	{ 0x17aa, 0x36a9, "GeForce 820A" },
+	{ 0x17aa, 0x36af, "GeForce 820A" },
+	{ 0x17aa, 0x36b0, "GeForce 820A" },
+	{ 0x17aa, 0x36b6, "GeForce 820A" },
+	{ 0x17aa, 0x3800, "GeForce GT 720M" },
+	{ 0x17aa, 0x3801, "GeForce GT 720M" },
+	{ 0x17aa, 0x3802, "GeForce GT 720M" },
+	{ 0x17aa, 0x3803, "GeForce GT 720M" },
+	{ 0x17aa, 0x3804, "GeForce GT 720M" },
+	{ 0x17aa, 0x3806, "GeForce GT 720M" },
+	{ 0x17aa, 0x3808, "GeForce GT 720M" },
+	{ 0x17aa, 0x380d, "GeForce 820M" },
+	{ 0x17aa, 0x380e, "GeForce 820M" },
+	{ 0x17aa, 0x380f, "GeForce 820M" },
+	{ 0x17aa, 0x3811, "GeForce 820M" },
+	{ 0x17aa, 0x3812, "GeForce 820M" },
+	{ 0x17aa, 0x3813, "GeForce 820M" },
+	{ 0x17aa, 0x3816, "GeForce 820M" },
+	{ 0x17aa, 0x3817, "GeForce 820M" },
+	{ 0x17aa, 0x3818, "GeForce 820M" },
+	{ 0x17aa, 0x381a, "GeForce 820M" },
+	{ 0x17aa, 0x381c, "GeForce 820M" },
+	{ 0x17aa, 0x381d, "GeForce 820M" },
+	{ 0x17aa, 0x3901, "GeForce 610M" },
+	{ 0x17aa, 0x3902, "GeForce 710M" },
+	{ 0x17aa, 0x3903, "GeForce 710M" },
+	{ 0x17aa, 0x3904, "GeForce GT 625M" },
+	{ 0x17aa, 0x3905, "GeForce GT 720M" },
+	{ 0x17aa, 0x3907, "GeForce 820M" },
+	{ 0x17aa, 0x3910, "GeForce GT 720M" },
+	{ 0x17aa, 0x3912, "GeForce GT 720M" },
+	{ 0x17aa, 0x3913, "GeForce 820M" },
+	{ 0x17aa, 0x3915, "GeForce 820M" },
+	{ 0x17aa, 0x3983, "GeForce 610M" },
+	{ 0x17aa, 0x5001, "GeForce 610M" },
+	{ 0x17aa, 0x5003, "GeForce GT 720M" },
+	{ 0x17aa, 0x5005, "GeForce 705M" },
+	{ 0x17aa, 0x500d, "GeForce GT 620M" },
+	{ 0x17aa, 0x5014, "GeForce 710M" },
+	{ 0x17aa, 0x5017, "GeForce 710M" },
+	{ 0x17aa, 0x5019, "GeForce 710M" },
+	{ 0x17aa, 0x501a, "GeForce 710M" },
+	{ 0x17aa, 0x501f, "GeForce GT 720M" },
+	{ 0x17aa, 0x5025, "GeForce 710M" },
+	{ 0x17aa, 0x5027, "GeForce 710M" },
+	{ 0x17aa, 0x502a, "GeForce 710M" },
+	{ 0x17aa, 0x502b, "GeForce GT 720M" },
+	{ 0x17aa, 0x502d, "GeForce 710M" },
+	{ 0x17aa, 0x502e, "GeForce GT 720M" },
+	{ 0x17aa, 0x502f, "GeForce GT 720M" },
+	{ 0x17aa, 0x5030, "GeForce 705M" },
+	{ 0x17aa, 0x5031, "GeForce 705M" },
+	{ 0x17aa, 0x5032, "GeForce 820M" },
+	{ 0x17aa, 0x5033, "GeForce 820M" },
+	{ 0x17aa, 0x503e, "GeForce 710M" },
+	{ 0x17aa, 0x503f, "GeForce 820M" },
+	{ 0x17aa, 0x5040, "GeForce 820M" },
+	{ 0x1854, 0x0177, "GeForce 710M" },
+	{ 0x1854, 0x0180, "GeForce 710M" },
+	{ 0x1854, 0x0190, "GeForce GT 720M" },
+	{ 0x1854, 0x0192, "GeForce GT 720M" },
+	{ 0x1854, 0x0224, "GeForce 820M" },
+	{ 0x1b0a, 0x20dd, "GeForce GT 620M" },
+	{ 0x1b0a, 0x20df, "GeForce GT 620M" },
+	{ 0x1b0a, 0x210e, "GeForce 820M" },
+	{ 0x1b0a, 0x2202, "GeForce GT 720M" },
+	{ 0x1b0a, 0x90d7, "GeForce 820M" },
+	{ 0x1b0a, 0x90dd, "GeForce 820M" },
+	{ 0x1b50, 0x5530, "GeForce 820M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1185[] = {
+	{ 0x10de, 0x106f, "GeForce GTX 760" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1189[] = {
+	{ 0x10de, 0x1074, "GeForce GTX 760 Ti OEM" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1199[] = {
+	{ 0x1458, 0xd001, "GeForce GTX 760" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11e3[] = {
+	{ 0x17aa, 0x3683, "GeForce GTX 760A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11fc[] = {
+	{ 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+	{ 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1247[] = {
+	{ 0x1043, 0x212a, "GeForce GT 635M" },
+	{ 0x1043, 0x212b, "GeForce GT 635M" },
+	{ 0x1043, 0x212c, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_124d[] = {
+	{ 0x1462, 0x10cc, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1290[] = {
+	{ 0x103c, 0x2afa, "GeForce 730A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1292[] = {
+	{ 0x17aa, 0x3675, "GeForce GT 740A" },
+	{ 0x17aa, 0x367c, "GeForce GT 740A" },
+	{ 0x17aa, 0x3684, "GeForce GT 740A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1295[] = {
+	{ 0x103c, 0x2b0d, "GeForce 710A" },
+	{ 0x103c, 0x2b0f, "GeForce 710A" },
+	{ 0x103c, 0x2b20, "GeForce 810A" },
+	{ 0x103c, 0x2b21, "GeForce 810A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1299[] = {
+	{ 0x17aa, 0x369b, "GeForce 920A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1340[] = {
+	{ 0x103c, 0x2b2b, "GeForce 830A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1341[] = {
+	{ 0x17aa, 0x3697, "GeForce 840A" },
+	{ 0x17aa, 0x3699, "GeForce 840A" },
+	{ 0x17aa, 0x369c, "GeForce 840A" },
+	{ 0x17aa, 0x36af, "GeForce 840A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1346[] = {
+	{ 0x17aa, 0x30ba, "GeForce 930A" },
+	{ 0x17aa, 0x362c, "GeForce 930A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1347[] = {
+	{ 0x17aa, 0x36b9, "GeForce 940A" },
+	{ 0x17aa, 0x36ba, "GeForce 940A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137a[] = {
+	{ 0x17aa, 0x2225, "Quadro K620M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137d[] = {
+	{ 0x17aa, 0x3699, "GeForce 940A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1391[] = {
+	{ 0x17aa, 0x3697, "GeForce GTX 850A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1392[] = {
+	{ 0x1028, 0x066a, "GeForce GPU" },
+	{ 0x1043, 0x861e, "GeForce GTX 750 Ti" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139a[] = {
+	{ 0x17aa, 0x36b9, "GeForce GTX 950A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139b[] = {
+	{ 0x1028, 0x06a3, "GeForce GTX 860M" },
+	{ 0x19da, 0xc248, "GeForce GTX 750 Ti" },
+	{}
+};
+
+static const struct nvkm_device_pci_device
+nvkm_device_pci_10de[] = {
+	{ 0x0020, "RIVA TNT" },
+	{ 0x0028, "RIVA TNT2/TNT2 Pro" },
+	{ 0x0029, "RIVA TNT2 Ultra" },
+	{ 0x002c, "Vanta/Vanta LT" },
+	{ 0x002d, "RIVA TNT2 Model 64/Model 64 Pro" },
+	{ 0x0040, "GeForce 6800 Ultra" },
+	{ 0x0041, "GeForce 6800" },
+	{ 0x0042, "GeForce 6800 LE" },
+	{ 0x0043, "GeForce 6800 XE" },
+	{ 0x0044, "GeForce 6800 XT" },
+	{ 0x0045, "GeForce 6800 GT" },
+	{ 0x0046, "GeForce 6800 GT" },
+	{ 0x0047, "GeForce 6800 GS" },
+	{ 0x0048, "GeForce 6800 XT" },
+	{ 0x004e, "Quadro FX 4000" },
+	{ 0x0090, "GeForce 7800 GTX" },
+	{ 0x0091, "GeForce 7800 GTX" },
+	{ 0x0092, "GeForce 7800 GT" },
+	{ 0x0093, "GeForce 7800 GS" },
+	{ 0x0095, "GeForce 7800 SLI" },
+	{ 0x0098, "GeForce Go 7800" },
+	{ 0x0099, "GeForce Go 7800 GTX" },
+	{ 0x009d, "Quadro FX 4500" },
+	{ 0x00a0, "Aladdin TNT2" },
+	{ 0x00c0, "GeForce 6800 GS" },
+	{ 0x00c1, "GeForce 6800" },
+	{ 0x00c2, "GeForce 6800 LE" },
+	{ 0x00c3, "GeForce 6800 XT" },
+	{ 0x00c8, "GeForce Go 6800" },
+	{ 0x00c9, "GeForce Go 6800 Ultra" },
+	{ 0x00cc, "Quadro FX Go1400" },
+	{ 0x00cd, "Quadro FX 3450/4000 SDI" },
+	{ 0x00ce, "Quadro FX 1400" },
+	{ 0x00f1, "GeForce 6600 GT" },
+	{ 0x00f2, "GeForce 6600" },
+	{ 0x00f3, "GeForce 6200" },
+	{ 0x00f4, "GeForce 6600 LE" },
+	{ 0x00f5, "GeForce 7800 GS" },
+	{ 0x00f6, "GeForce 6800 GS" },
+	{ 0x00f8, "Quadro FX 3400/Quadro FX 4000" },
+	{ 0x00f9, "GeForce 6800 Ultra" },
+	{ 0x00fa, "GeForce PCX 5750" },
+	{ 0x00fb, "GeForce PCX 5900" },
+	{ 0x00fc, "Quadro FX 330/GeForce PCX 5300" },
+	{ 0x00fd, "Quadro FX 330/Quadro NVS 280 PCI-E" },
+	{ 0x00fe, "Quadro FX 1300" },
+	{ 0x0100, "GeForce 256" },
+	{ 0x0101, "GeForce DDR" },
+	{ 0x0103, "Quadro" },
+	{ 0x0110, "GeForce2 MX/MX 400" },
+	{ 0x0111, "GeForce2 MX 100/200" },
+	{ 0x0112, "GeForce2 Go" },
+	{ 0x0113, "Quadro2 MXR/EX/Go" },
+	{ 0x0140, "GeForce 6600 GT" },
+	{ 0x0141, "GeForce 6600" },
+	{ 0x0142, "GeForce 6600 LE" },
+	{ 0x0143, "GeForce 6600 VE" },
+	{ 0x0144, "GeForce Go 6600" },
+	{ 0x0145, "GeForce 6610 XL" },
+	{ 0x0146, "GeForce Go 6600 TE/6200 TE" },
+	{ 0x0147, "GeForce 6700 XL" },
+	{ 0x0148, "GeForce Go 6600" },
+	{ 0x0149, "GeForce Go 6600 GT" },
+	{ 0x014a, "Quadro NVS 440" },
+	{ 0x014c, "Quadro FX 540M" },
+	{ 0x014d, "Quadro FX 550" },
+	{ 0x014e, "Quadro FX 540" },
+	{ 0x014f, "GeForce 6200" },
+	{ 0x0150, "GeForce2 GTS/GeForce2 Pro" },
+	{ 0x0151, "GeForce2 Ti" },
+	{ 0x0152, "GeForce2 Ultra" },
+	{ 0x0153, "Quadro2 Pro" },
+	{ 0x0160, "GeForce 6500" },
+	{ 0x0161, "GeForce 6200 TurboCache(TM)" },
+	{ 0x0162, "GeForce 6200SE TurboCache(TM)" },
+	{ 0x0163, "GeForce 6200 LE" },
+	{ 0x0164, "GeForce Go 6200" },
+	{ 0x0165, "Quadro NVS 285" },
+	{ 0x0166, "GeForce Go 6400" },
+	{ 0x0167, "GeForce Go 6200" },
+	{ 0x0168, "GeForce Go 6400" },
+	{ 0x0169, "GeForce 6250" },
+	{ 0x016a, "GeForce 7100 GS" },
+	{ 0x0170, "GeForce4 MX 460" },
+	{ 0x0171, "GeForce4 MX 440" },
+	{ 0x0172, "GeForce4 MX 420" },
+	{ 0x0173, "GeForce4 MX 440-SE" },
+	{ 0x0174, "GeForce4 440 Go" },
+	{ 0x0175, "GeForce4 420 Go" },
+	{ 0x0176, "GeForce4 420 Go 32M" },
+	{ 0x0177, "GeForce4 460 Go" },
+	{ 0x0178, "Quadro4 550 XGL" },
+	{ 0x0179, "GeForce4 440 Go 64M" },
+	{ 0x017a, "Quadro NVS 400" },
+	{ 0x017c, "Quadro4 500 GoGL" },
+	{ 0x017d, "GeForce4 410 Go 16M" },
+	{ 0x0181, "GeForce4 MX 440 with AGP8X" },
+	{ 0x0182, "GeForce4 MX 440SE with AGP8X" },
+	{ 0x0183, "GeForce4 MX 420 with AGP8X" },
+	{ 0x0185, "GeForce4 MX 4000" },
+	{ 0x0188, "Quadro4 580 XGL" },
+	{ 0x0189, "GeForce4 MX with AGP8X (Mac)", nvkm_device_pci_10de_0189 },
+	{ 0x018a, "Quadro NVS 280 SD" },
+	{ 0x018b, "Quadro4 380 XGL" },
+	{ 0x018c, "Quadro NVS 50 PCI" },
+	{ 0x0191, "GeForce 8800 GTX" },
+	{ 0x0193, "GeForce 8800 GTS" },
+	{ 0x0194, "GeForce 8800 Ultra" },
+	{ 0x0197, "Tesla C870" },
+	{ 0x019d, "Quadro FX 5600" },
+	{ 0x019e, "Quadro FX 4600" },
+	{ 0x01a0, "GeForce2 Integrated GPU" },
+	{ 0x01d0, "GeForce 7350 LE" },
+	{ 0x01d1, "GeForce 7300 LE" },
+	{ 0x01d2, "GeForce 7550 LE" },
+	{ 0x01d3, "GeForce 7300 SE/7200 GS" },
+	{ 0x01d6, "GeForce Go 7200" },
+	{ 0x01d7, "GeForce Go 7300" },
+	{ 0x01d8, "GeForce Go 7400" },
+	{ 0x01da, "Quadro NVS 110M" },
+	{ 0x01db, "Quadro NVS 120M" },
+	{ 0x01dc, "Quadro FX 350M" },
+	{ 0x01dd, "GeForce 7500 LE" },
+	{ 0x01de, "Quadro FX 350" },
+	{ 0x01df, "GeForce 7300 GS" },
+	{ 0x01f0, "GeForce4 MX Integrated GPU", nvkm_device_pci_10de_01f0 },
+	{ 0x0200, "GeForce3" },
+	{ 0x0201, "GeForce3 Ti 200" },
+	{ 0x0202, "GeForce3 Ti 500" },
+	{ 0x0203, "Quadro DCC" },
+	{ 0x0211, "GeForce 6800" },
+	{ 0x0212, "GeForce 6800 LE" },
+	{ 0x0215, "GeForce 6800 GT" },
+	{ 0x0218, "GeForce 6800 XT" },
+	{ 0x0221, "GeForce 6200" },
+	{ 0x0222, "GeForce 6200 A-LE" },
+	{ 0x0240, "GeForce 6150" },
+	{ 0x0241, "GeForce 6150 LE" },
+	{ 0x0242, "GeForce 6100" },
+	{ 0x0244, "GeForce Go 6150" },
+	{ 0x0245, "Quadro NVS 210S / GeForce 6150LE" },
+	{ 0x0247, "GeForce Go 6100" },
+	{ 0x0250, "GeForce4 Ti 4600" },
+	{ 0x0251, "GeForce4 Ti 4400" },
+	{ 0x0253, "GeForce4 Ti 4200" },
+	{ 0x0258, "Quadro4 900 XGL" },
+	{ 0x0259, "Quadro4 750 XGL" },
+	{ 0x025b, "Quadro4 700 XGL" },
+	{ 0x0280, "GeForce4 Ti 4800" },
+	{ 0x0281, "GeForce4 Ti 4200 with AGP8X" },
+	{ 0x0282, "GeForce4 Ti 4800 SE" },
+	{ 0x0286, "GeForce4 4200 Go" },
+	{ 0x0288, "Quadro4 980 XGL" },
+	{ 0x0289, "Quadro4 780 XGL" },
+	{ 0x028c, "Quadro4 700 GoGL" },
+	{ 0x0290, "GeForce 7900 GTX" },
+	{ 0x0291, "GeForce 7900 GT/GTO" },
+	{ 0x0292, "GeForce 7900 GS" },
+	{ 0x0293, "GeForce 7950 GX2" },
+	{ 0x0294, "GeForce 7950 GX2" },
+	{ 0x0295, "GeForce 7950 GT" },
+	{ 0x0297, "GeForce Go 7950 GTX" },
+	{ 0x0298, "GeForce Go 7900 GS" },
+	{ 0x0299, "Quadro NVS 510M" },
+	{ 0x029a, "Quadro FX 2500M" },
+	{ 0x029b, "Quadro FX 1500M" },
+	{ 0x029c, "Quadro FX 5500" },
+	{ 0x029d, "Quadro FX 3500" },
+	{ 0x029e, "Quadro FX 1500" },
+	{ 0x029f, "Quadro FX 4500 X2" },
+	{ 0x02e0, "GeForce 7600 GT" },
+	{ 0x02e1, "GeForce 7600 GS" },
+	{ 0x02e2, "GeForce 7300 GT" },
+	{ 0x02e3, "GeForce 7900 GS" },
+	{ 0x02e4, "GeForce 7950 GT" },
+	{ 0x0301, "GeForce FX 5800 Ultra" },
+	{ 0x0302, "GeForce FX 5800" },
+	{ 0x0308, "Quadro FX 2000" },
+	{ 0x0309, "Quadro FX 1000" },
+	{ 0x0311, "GeForce FX 5600 Ultra" },
+	{ 0x0312, "GeForce FX 5600" },
+	{ 0x0314, "GeForce FX 5600XT" },
+	{ 0x031a, "GeForce FX Go5600" },
+	{ 0x031b, "GeForce FX Go5650" },
+	{ 0x031c, "Quadro FX Go700" },
+	{ 0x0320, "GeForce FX 5200" },
+	{ 0x0321, "GeForce FX 5200 Ultra" },
+	{ 0x0322, "GeForce FX 5200", nvkm_device_pci_10de_0322 },
+	{ 0x0323, "GeForce FX 5200LE" },
+	{ 0x0324, "GeForce FX Go5200" },
+	{ 0x0325, "GeForce FX Go5250" },
+	{ 0x0326, "GeForce FX 5500" },
+	{ 0x0327, "GeForce FX 5100" },
+	{ 0x0328, "GeForce FX Go5200 32M/64M" },
+	{ 0x032a, "Quadro NVS 55/280 PCI" },
+	{ 0x032b, "Quadro FX 500/FX 600" },
+	{ 0x032c, "GeForce FX Go53xx" },
+	{ 0x032d, "GeForce FX Go5100" },
+	{ 0x0330, "GeForce FX 5900 Ultra" },
+	{ 0x0331, "GeForce FX 5900" },
+	{ 0x0332, "GeForce FX 5900XT" },
+	{ 0x0333, "GeForce FX 5950 Ultra" },
+	{ 0x0334, "GeForce FX 5900ZT" },
+	{ 0x0338, "Quadro FX 3000" },
+	{ 0x033f, "Quadro FX 700" },
+	{ 0x0341, "GeForce FX 5700 Ultra" },
+	{ 0x0342, "GeForce FX 5700" },
+	{ 0x0343, "GeForce FX 5700LE" },
+	{ 0x0344, "GeForce FX 5700VE" },
+	{ 0x0347, "GeForce FX Go5700" },
+	{ 0x0348, "GeForce FX Go5700" },
+	{ 0x034c, "Quadro FX Go1000" },
+	{ 0x034e, "Quadro FX 1100" },
+	{ 0x038b, "GeForce 7650 GS" },
+	{ 0x0390, "GeForce 7650 GS" },
+	{ 0x0391, "GeForce 7600 GT" },
+	{ 0x0392, "GeForce 7600 GS" },
+	{ 0x0393, "GeForce 7300 GT" },
+	{ 0x0394, "GeForce 7600 LE" },
+	{ 0x0395, "GeForce 7300 GT" },
+	{ 0x0397, "GeForce Go 7700" },
+	{ 0x0398, "GeForce Go 7600" },
+	{ 0x0399, "GeForce Go 7600 GT" },
+	{ 0x039c, "Quadro FX 560M" },
+	{ 0x039e, "Quadro FX 560" },
+	{ 0x03d0, "GeForce 6150SE nForce 430" },
+	{ 0x03d1, "GeForce 6100 nForce 405" },
+	{ 0x03d2, "GeForce 6100 nForce 400" },
+	{ 0x03d5, "GeForce 6100 nForce 420" },
+	{ 0x03d6, "GeForce 7025 / nForce 630a" },
+	{ 0x0400, "GeForce 8600 GTS" },
+	{ 0x0401, "GeForce 8600 GT" },
+	{ 0x0402, "GeForce 8600 GT" },
+	{ 0x0403, "GeForce 8600 GS" },
+	{ 0x0404, "GeForce 8400 GS" },
+	{ 0x0405, "GeForce 9500M GS" },
+	{ 0x0406, "GeForce 8300 GS" },
+	{ 0x0407, "GeForce 8600M GT" },
+	{ 0x0408, "GeForce 9650M GS" },
+	{ 0x0409, "GeForce 8700M GT" },
+	{ 0x040a, "Quadro FX 370" },
+	{ 0x040b, "Quadro NVS 320M" },
+	{ 0x040c, "Quadro FX 570M" },
+	{ 0x040d, "Quadro FX 1600M" },
+	{ 0x040e, "Quadro FX 570" },
+	{ 0x040f, "Quadro FX 1700" },
+	{ 0x0410, "GeForce GT 330" },
+	{ 0x0420, "GeForce 8400 SE" },
+	{ 0x0421, "GeForce 8500 GT" },
+	{ 0x0422, "GeForce 8400 GS" },
+	{ 0x0423, "GeForce 8300 GS" },
+	{ 0x0424, "GeForce 8400 GS" },
+	{ 0x0425, "GeForce 8600M GS" },
+	{ 0x0426, "GeForce 8400M GT" },
+	{ 0x0427, "GeForce 8400M GS" },
+	{ 0x0428, "GeForce 8400M G" },
+	{ 0x0429, "Quadro NVS 140M" },
+	{ 0x042a, "Quadro NVS 130M" },
+	{ 0x042b, "Quadro NVS 135M" },
+	{ 0x042c, "GeForce 9400 GT" },
+	{ 0x042d, "Quadro FX 360M" },
+	{ 0x042e, "GeForce 9300M G" },
+	{ 0x042f, "Quadro NVS 290" },
+	{ 0x0531, "GeForce 7150M / nForce 630M" },
+	{ 0x0533, "GeForce 7000M / nForce 610M" },
+	{ 0x053a, "GeForce 7050 PV / nForce 630a" },
+	{ 0x053b, "GeForce 7050 PV / nForce 630a" },
+	{ 0x053e, "GeForce 7025 / nForce 630a" },
+	{ 0x05e0, "GeForce GTX 295" },
+	{ 0x05e1, "GeForce GTX 280" },
+	{ 0x05e2, "GeForce GTX 260" },
+	{ 0x05e3, "GeForce GTX 285" },
+	{ 0x05e6, "GeForce GTX 275" },
+	{ 0x05e7, "Tesla C1060", nvkm_device_pci_10de_05e7 },
+	{ 0x05ea, "GeForce GTX 260" },
+	{ 0x05eb, "GeForce GTX 295" },
+	{ 0x05ed, "Quadroplex 2200 D2" },
+	{ 0x05f8, "Quadroplex 2200 S4" },
+	{ 0x05f9, "Quadro CX" },
+	{ 0x05fd, "Quadro FX 5800" },
+	{ 0x05fe, "Quadro FX 4800" },
+	{ 0x05ff, "Quadro FX 3800" },
+	{ 0x0600, "GeForce 8800 GTS 512" },
+	{ 0x0601, "GeForce 9800 GT" },
+	{ 0x0602, "GeForce 8800 GT" },
+	{ 0x0603, "GeForce GT 230" },
+	{ 0x0604, "GeForce 9800 GX2" },
+	{ 0x0605, "GeForce 9800 GT" },
+	{ 0x0606, "GeForce 8800 GS" },
+	{ 0x0607, "GeForce GTS 240" },
+	{ 0x0608, "GeForce 9800M GTX" },
+	{ 0x0609, "GeForce 8800M GTS", nvkm_device_pci_10de_0609 },
+	{ 0x060a, "GeForce GTX 280M" },
+	{ 0x060b, "GeForce 9800M GT" },
+	{ 0x060c, "GeForce 8800M GTX" },
+	{ 0x060d, "GeForce 8800 GS" },
+	{ 0x060f, "GeForce GTX 285M" },
+	{ 0x0610, "GeForce 9600 GSO" },
+	{ 0x0611, "GeForce 8800 GT" },
+	{ 0x0612, "GeForce 9800 GTX/9800 GTX+" },
+	{ 0x0613, "GeForce 9800 GTX+" },
+	{ 0x0614, "GeForce 9800 GT" },
+	{ 0x0615, "GeForce GTS 250" },
+	{ 0x0617, "GeForce 9800M GTX" },
+	{ 0x0618, "GeForce GTX 260M" },
+	{ 0x0619, "Quadro FX 4700 X2" },
+	{ 0x061a, "Quadro FX 3700" },
+	{ 0x061b, "Quadro VX 200" },
+	{ 0x061c, "Quadro FX 3600M" },
+	{ 0x061d, "Quadro FX 2800M" },
+	{ 0x061e, "Quadro FX 3700M" },
+	{ 0x061f, "Quadro FX 3800M" },
+	{ 0x0621, "GeForce GT 230" },
+	{ 0x0622, "GeForce 9600 GT" },
+	{ 0x0623, "GeForce 9600 GS" },
+	{ 0x0625, "GeForce 9600 GSO 512" },
+	{ 0x0626, "GeForce GT 130" },
+	{ 0x0627, "GeForce GT 140" },
+	{ 0x0628, "GeForce 9800M GTS" },
+	{ 0x062a, "GeForce 9700M GTS" },
+	{ 0x062b, "GeForce 9800M GS" },
+	{ 0x062c, "GeForce 9800M GTS" },
+	{ 0x062d, "GeForce 9600 GT" },
+	{ 0x062e, "GeForce 9600 GT", nvkm_device_pci_10de_062e },
+	{ 0x0630, "GeForce 9700 S" },
+	{ 0x0631, "GeForce GTS 160M" },
+	{ 0x0632, "GeForce GTS 150M" },
+	{ 0x0635, "GeForce 9600 GSO" },
+	{ 0x0637, "GeForce 9600 GT" },
+	{ 0x0638, "Quadro FX 1800" },
+	{ 0x063a, "Quadro FX 2700M" },
+	{ 0x0640, "GeForce 9500 GT" },
+	{ 0x0641, "GeForce 9400 GT" },
+	{ 0x0643, "GeForce 9500 GT" },
+	{ 0x0644, "GeForce 9500 GS" },
+	{ 0x0645, "GeForce 9500 GS" },
+	{ 0x0646, "GeForce GT 120" },
+	{ 0x0647, "GeForce 9600M GT" },
+	{ 0x0648, "GeForce 9600M GS" },
+	{ 0x0649, "GeForce 9600M GT", nvkm_device_pci_10de_0649 },
+	{ 0x064a, "GeForce 9700M GT" },
+	{ 0x064b, "GeForce 9500M G" },
+	{ 0x064c, "GeForce 9650M GT" },
+	{ 0x0651, "GeForce G 110M" },
+	{ 0x0652, "GeForce GT 130M", nvkm_device_pci_10de_0652 },
+	{ 0x0653, "GeForce GT 120M" },
+	{ 0x0654, "GeForce GT 220M", nvkm_device_pci_10de_0654 },
+	{ 0x0655, NULL, nvkm_device_pci_10de_0655 },
+	{ 0x0656, NULL, nvkm_device_pci_10de_0656 },
+	{ 0x0658, "Quadro FX 380" },
+	{ 0x0659, "Quadro FX 580" },
+	{ 0x065a, "Quadro FX 1700M" },
+	{ 0x065b, "GeForce 9400 GT" },
+	{ 0x065c, "Quadro FX 770M" },
+	{ 0x06c0, "GeForce GTX 480" },
+	{ 0x06c4, "GeForce GTX 465" },
+	{ 0x06ca, "GeForce GTX 480M" },
+	{ 0x06cd, "GeForce GTX 470" },
+	{ 0x06d1, "Tesla C2050 / C2070", nvkm_device_pci_10de_06d1 },
+	{ 0x06d2, "Tesla M2070", nvkm_device_pci_10de_06d2 },
+	{ 0x06d8, "Quadro 6000" },
+	{ 0x06d9, "Quadro 5000" },
+	{ 0x06da, "Quadro 5000M" },
+	{ 0x06dc, "Quadro 6000" },
+	{ 0x06dd, "Quadro 4000" },
+	{ 0x06de, "Tesla T20 Processor", nvkm_device_pci_10de_06de },
+	{ 0x06df, "Tesla M2070-Q" },
+	{ 0x06e0, "GeForce 9300 GE" },
+	{ 0x06e1, "GeForce 9300 GS" },
+	{ 0x06e2, "GeForce 8400" },
+	{ 0x06e3, "GeForce 8400 SE" },
+	{ 0x06e4, "GeForce 8400 GS" },
+	{ 0x06e5, "GeForce 9300M GS" },
+	{ 0x06e6, "GeForce G100" },
+	{ 0x06e7, "GeForce 9300 SE" },
+	{ 0x06e8, "GeForce 9200M GS", nvkm_device_pci_10de_06e8 },
+	{ 0x06e9, "GeForce 9300M GS" },
+	{ 0x06ea, "Quadro NVS 150M" },
+	{ 0x06eb, "Quadro NVS 160M" },
+	{ 0x06ec, "GeForce G 105M" },
+	{ 0x06ef, "GeForce G 103M" },
+	{ 0x06f1, "GeForce G105M" },
+	{ 0x06f8, "Quadro NVS 420" },
+	{ 0x06f9, "Quadro FX 370 LP", nvkm_device_pci_10de_06f9 },
+	{ 0x06fa, "Quadro NVS 450" },
+	{ 0x06fb, "Quadro FX 370M" },
+	{ 0x06fd, "Quadro NVS 295" },
+	{ 0x06ff, "HICx16 + Graphics", nvkm_device_pci_10de_06ff },
+	{ 0x07e0, "GeForce 7150 / nForce 630i" },
+	{ 0x07e1, "GeForce 7100 / nForce 630i" },
+	{ 0x07e2, "GeForce 7050 / nForce 630i" },
+	{ 0x07e3, "GeForce 7050 / nForce 610i" },
+	{ 0x07e5, "GeForce 7050 / nForce 620i" },
+	{ 0x0840, "GeForce 8200M" },
+	{ 0x0844, "GeForce 9100M G" },
+	{ 0x0845, "GeForce 8200M G" },
+	{ 0x0846, "GeForce 9200" },
+	{ 0x0847, "GeForce 9100" },
+	{ 0x0848, "GeForce 8300" },
+	{ 0x0849, "GeForce 8200" },
+	{ 0x084a, "nForce 730a" },
+	{ 0x084b, "GeForce 9200" },
+	{ 0x084c, "nForce 980a/780a SLI" },
+	{ 0x084d, "nForce 750a SLI" },
+	{ 0x084f, "GeForce 8100 / nForce 720a" },
+	{ 0x0860, "GeForce 9400" },
+	{ 0x0861, "GeForce 9400" },
+	{ 0x0862, "GeForce 9400M G" },
+	{ 0x0863, "GeForce 9400M" },
+	{ 0x0864, "GeForce 9300" },
+	{ 0x0865, "ION" },
+	{ 0x0866, "GeForce 9400M G", nvkm_device_pci_10de_0866 },
+	{ 0x0867, "GeForce 9400" },
+	{ 0x0868, "nForce 760i SLI" },
+	{ 0x0869, "GeForce 9400" },
+	{ 0x086a, "GeForce 9400" },
+	{ 0x086c, "GeForce 9300 / nForce 730i" },
+	{ 0x086d, "GeForce 9200" },
+	{ 0x086e, "GeForce 9100M G" },
+	{ 0x086f, "GeForce 8200M G" },
+	{ 0x0870, "GeForce 9400M" },
+	{ 0x0871, "GeForce 9200" },
+	{ 0x0872, "GeForce G102M", nvkm_device_pci_10de_0872 },
+	{ 0x0873, "GeForce G102M", nvkm_device_pci_10de_0873 },
+	{ 0x0874, "ION" },
+	{ 0x0876, "ION" },
+	{ 0x087a, "GeForce 9400" },
+	{ 0x087d, "ION" },
+	{ 0x087e, "ION LE" },
+	{ 0x087f, "ION LE" },
+	{ 0x08a0, "GeForce 320M" },
+	{ 0x08a2, "GeForce 320M" },
+	{ 0x08a3, "GeForce 320M" },
+	{ 0x08a4, "GeForce 320M" },
+	{ 0x08a5, "GeForce 320M" },
+	{ 0x0a20, "GeForce GT 220" },
+	{ 0x0a22, "GeForce 315" },
+	{ 0x0a23, "GeForce 210" },
+	{ 0x0a26, "GeForce 405" },
+	{ 0x0a27, "GeForce 405" },
+	{ 0x0a28, "GeForce GT 230M" },
+	{ 0x0a29, "GeForce GT 330M" },
+	{ 0x0a2a, "GeForce GT 230M" },
+	{ 0x0a2b, "GeForce GT 330M" },
+	{ 0x0a2c, "NVS 5100M" },
+	{ 0x0a2d, "GeForce GT 320M" },
+	{ 0x0a32, "GeForce GT 415" },
+	{ 0x0a34, "GeForce GT 240M" },
+	{ 0x0a35, "GeForce GT 325M" },
+	{ 0x0a38, "Quadro 400" },
+	{ 0x0a3c, "Quadro FX 880M" },
+	{ 0x0a60, "GeForce G210" },
+	{ 0x0a62, "GeForce 205" },
+	{ 0x0a63, "GeForce 310" },
+	{ 0x0a64, "Second Generation ION" },
+	{ 0x0a65, "GeForce 210" },
+	{ 0x0a66, "GeForce 310" },
+	{ 0x0a67, "GeForce 315" },
+	{ 0x0a68, "GeForce G105M" },
+	{ 0x0a69, "GeForce G105M" },
+	{ 0x0a6a, "NVS 2100M" },
+	{ 0x0a6c, "NVS 3100M" },
+	{ 0x0a6e, "GeForce 305M", nvkm_device_pci_10de_0a6e },
+	{ 0x0a6f, "Second Generation ION" },
+	{ 0x0a70, "GeForce 310M", nvkm_device_pci_10de_0a70 },
+	{ 0x0a71, "GeForce 305M" },
+	{ 0x0a72, "GeForce 310M" },
+	{ 0x0a73, "GeForce 305M", nvkm_device_pci_10de_0a73 },
+	{ 0x0a74, "GeForce G210M", nvkm_device_pci_10de_0a74 },
+	{ 0x0a75, "GeForce 310M", nvkm_device_pci_10de_0a75 },
+	{ 0x0a76, "Second Generation ION" },
+	{ 0x0a78, "Quadro FX 380 LP" },
+	{ 0x0a7a, "GeForce 315M", nvkm_device_pci_10de_0a7a },
+	{ 0x0a7c, "Quadro FX 380M" },
+	{ 0x0ca0, "GeForce GT 330" },
+	{ 0x0ca2, "GeForce GT 320" },
+	{ 0x0ca3, "GeForce GT 240" },
+	{ 0x0ca4, "GeForce GT 340" },
+	{ 0x0ca5, "GeForce GT 220" },
+	{ 0x0ca7, "GeForce GT 330" },
+	{ 0x0ca8, "GeForce GTS 260M" },
+	{ 0x0ca9, "GeForce GTS 250M" },
+	{ 0x0cac, "GeForce GT 220" },
+	{ 0x0caf, "GeForce GT 335M" },
+	{ 0x0cb0, "GeForce GTS 350M" },
+	{ 0x0cb1, "GeForce GTS 360M" },
+	{ 0x0cbc, "Quadro FX 1800M" },
+	{ 0x0dc0, "GeForce GT 440" },
+	{ 0x0dc4, "GeForce GTS 450" },
+	{ 0x0dc5, "GeForce GTS 450" },
+	{ 0x0dc6, "GeForce GTS 450" },
+	{ 0x0dcd, "GeForce GT 555M" },
+	{ 0x0dce, "GeForce GT 555M" },
+	{ 0x0dd1, "GeForce GTX 460M" },
+	{ 0x0dd2, "GeForce GT 445M" },
+	{ 0x0dd3, "GeForce GT 435M" },
+	{ 0x0dd6, "GeForce GT 550M" },
+	{ 0x0dd8, "Quadro 2000", nvkm_device_pci_10de_0dd8 },
+	{ 0x0dda, "Quadro 2000M" },
+	{ 0x0de0, "GeForce GT 440" },
+	{ 0x0de1, "GeForce GT 430" },
+	{ 0x0de2, "GeForce GT 420" },
+	{ 0x0de3, "GeForce GT 635M" },
+	{ 0x0de4, "GeForce GT 520" },
+	{ 0x0de5, "GeForce GT 530" },
+	{ 0x0de7, "GeForce GT 610" },
+	{ 0x0de8, "GeForce GT 620M" },
+	{ 0x0de9, "GeForce GT 630M", nvkm_device_pci_10de_0de9 },
+	{ 0x0dea, "GeForce 610M", nvkm_device_pci_10de_0dea },
+	{ 0x0deb, "GeForce GT 555M" },
+	{ 0x0dec, "GeForce GT 525M" },
+	{ 0x0ded, "GeForce GT 520M" },
+	{ 0x0dee, "GeForce GT 415M" },
+	{ 0x0def, "NVS 5400M" },
+	{ 0x0df0, "GeForce GT 425M" },
+	{ 0x0df1, "GeForce GT 420M" },
+	{ 0x0df2, "GeForce GT 435M" },
+	{ 0x0df3, "GeForce GT 420M" },
+	{ 0x0df4, "GeForce GT 540M", nvkm_device_pci_10de_0df4 },
+	{ 0x0df5, "GeForce GT 525M" },
+	{ 0x0df6, "GeForce GT 550M" },
+	{ 0x0df7, "GeForce GT 520M" },
+	{ 0x0df8, "Quadro 600" },
+	{ 0x0df9, "Quadro 500M" },
+	{ 0x0dfa, "Quadro 1000M" },
+	{ 0x0dfc, "NVS 5200M" },
+	{ 0x0e22, "GeForce GTX 460" },
+	{ 0x0e23, "GeForce GTX 460 SE" },
+	{ 0x0e24, "GeForce GTX 460" },
+	{ 0x0e30, "GeForce GTX 470M" },
+	{ 0x0e31, "GeForce GTX 485M" },
+	{ 0x0e3a, "Quadro 3000M" },
+	{ 0x0e3b, "Quadro 4000M" },
+	{ 0x0f00, "GeForce GT 630" },
+	{ 0x0f01, "GeForce GT 620" },
+	{ 0x0f02, "GeForce GT 730" },
+	{ 0x0fc0, "GeForce GT 640" },
+	{ 0x0fc1, "GeForce GT 640" },
+	{ 0x0fc2, "GeForce GT 630" },
+	{ 0x0fc6, "GeForce GTX 650" },
+	{ 0x0fc8, "GeForce GT 740" },
+	{ 0x0fc9, "GeForce GT 730" },
+	{ 0x0fcd, "GeForce GT 755M" },
+	{ 0x0fce, "GeForce GT 640M LE" },
+	{ 0x0fd1, "GeForce GT 650M" },
+	{ 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
+	{ 0x0fd3, "GeForce GT 640M LE" },
+	{ 0x0fd4, "GeForce GTX 660M" },
+	{ 0x0fd5, "GeForce GT 650M" },
+	{ 0x0fd8, "GeForce GT 640M" },
+	{ 0x0fd9, "GeForce GT 645M" },
+	{ 0x0fdf, "GeForce GT 740M" },
+	{ 0x0fe0, "GeForce GTX 660M" },
+	{ 0x0fe1, "GeForce GT 730M" },
+	{ 0x0fe2, "GeForce GT 745M" },
+	{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
+	{ 0x0fe4, "GeForce GT 750M" },
+	{ 0x0fe9, "GeForce GT 750M" },
+	{ 0x0fea, "GeForce GT 755M" },
+	{ 0x0fec, "GeForce 710A" },
+	{ 0x0fef, "GRID K340" },
+	{ 0x0ff2, "GRID K1" },
+	{ 0x0ff3, "Quadro K420" },
+	{ 0x0ff6, "Quadro K1100M" },
+	{ 0x0ff8, "Quadro K500M" },
+	{ 0x0ff9, "Quadro K2000D" },
+	{ 0x0ffa, "Quadro K600" },
+	{ 0x0ffb, "Quadro K2000M" },
+	{ 0x0ffc, "Quadro K1000M" },
+	{ 0x0ffd, "NVS 510" },
+	{ 0x0ffe, "Quadro K2000" },
+	{ 0x0fff, "Quadro 410" },
+	{ 0x1001, "GeForce GTX TITAN Z" },
+	{ 0x1004, "GeForce GTX 780" },
+	{ 0x1005, "GeForce GTX TITAN" },
+	{ 0x1007, "GeForce GTX 780" },
+	{ 0x1008, "GeForce GTX 780 Ti" },
+	{ 0x100a, "GeForce GTX 780 Ti" },
+	{ 0x100c, "GeForce GTX TITAN Black" },
+	{ 0x1021, "Tesla K20Xm" },
+	{ 0x1022, "Tesla K20c" },
+	{ 0x1023, "Tesla K40m" },
+	{ 0x1024, "Tesla K40c" },
+	{ 0x1026, "Tesla K20s" },
+	{ 0x1027, "Tesla K40st" },
+	{ 0x1028, "Tesla K20m" },
+	{ 0x1029, "Tesla K40s" },
+	{ 0x102a, "Tesla K40t" },
+	{ 0x102d, "Tesla K80" },
+	{ 0x103a, "Quadro K6000" },
+	{ 0x103c, "Quadro K5200" },
+	{ 0x1040, "GeForce GT 520" },
+	{ 0x1042, "GeForce 510" },
+	{ 0x1048, "GeForce 605" },
+	{ 0x1049, "GeForce GT 620" },
+	{ 0x104a, "GeForce GT 610" },
+	{ 0x104b, "GeForce GT 625 (OEM)", nvkm_device_pci_10de_104b },
+	{ 0x104c, "GeForce GT 705" },
+	{ 0x1050, "GeForce GT 520M" },
+	{ 0x1051, "GeForce GT 520MX" },
+	{ 0x1052, "GeForce GT 520M" },
+	{ 0x1054, "GeForce 410M" },
+	{ 0x1055, "GeForce 410M" },
+	{ 0x1056, "NVS 4200M" },
+	{ 0x1057, "NVS 4200M" },
+	{ 0x1058, "GeForce 610M", nvkm_device_pci_10de_1058 },
+	{ 0x1059, "GeForce 610M" },
+	{ 0x105a, "GeForce 610M" },
+	{ 0x105b, "GeForce 705M", nvkm_device_pci_10de_105b },
+	{ 0x107c, "NVS 315" },
+	{ 0x107d, "NVS 310" },
+	{ 0x1080, "GeForce GTX 580" },
+	{ 0x1081, "GeForce GTX 570" },
+	{ 0x1082, "GeForce GTX 560 Ti" },
+	{ 0x1084, "GeForce GTX 560" },
+	{ 0x1086, "GeForce GTX 570" },
+	{ 0x1087, "GeForce GTX 560 Ti" },
+	{ 0x1088, "GeForce GTX 590" },
+	{ 0x1089, "GeForce GTX 580" },
+	{ 0x108b, "GeForce GTX 580" },
+	{ 0x1091, "Tesla M2090", nvkm_device_pci_10de_1091 },
+	{ 0x1094, "Tesla M2075" },
+	{ 0x1096, "Tesla C2075", nvkm_device_pci_10de_1096 },
+	{ 0x109a, "Quadro 5010M" },
+	{ 0x109b, "Quadro 7000" },
+	{ 0x10c0, "GeForce 9300 GS" },
+	{ 0x10c3, "GeForce 8400GS" },
+	{ 0x10c5, "GeForce 405" },
+	{ 0x10d8, "NVS 300" },
+	{ 0x1140, NULL, nvkm_device_pci_10de_1140 },
+	{ 0x1180, "GeForce GTX 680" },
+	{ 0x1183, "GeForce GTX 660 Ti" },
+	{ 0x1184, "GeForce GTX 770" },
+	{ 0x1185, "GeForce GTX 660", nvkm_device_pci_10de_1185 },
+	{ 0x1187, "GeForce GTX 760" },
+	{ 0x1188, "GeForce GTX 690" },
+	{ 0x1189, "GeForce GTX 670", nvkm_device_pci_10de_1189 },
+	{ 0x118a, "GRID K520" },
+	{ 0x118e, "GeForce GTX 760 (192-bit)" },
+	{ 0x118f, "Tesla K10" },
+	{ 0x1193, "GeForce GTX 760 Ti OEM" },
+	{ 0x1194, "Tesla K8" },
+	{ 0x1195, "GeForce GTX 660" },
+	{ 0x1198, "GeForce GTX 880M" },
+	{ 0x1199, "GeForce GTX 870M", nvkm_device_pci_10de_1199 },
+	{ 0x119a, "GeForce GTX 860M" },
+	{ 0x119d, "GeForce GTX 775M" },
+	{ 0x119e, "GeForce GTX 780M" },
+	{ 0x119f, "GeForce GTX 780M" },
+	{ 0x11a0, "GeForce GTX 680M" },
+	{ 0x11a1, "GeForce GTX 670MX" },
+	{ 0x11a2, "GeForce GTX 675MX" },
+	{ 0x11a3, "GeForce GTX 680MX" },
+	{ 0x11a7, "GeForce GTX 675MX" },
+	{ 0x11b4, "Quadro K4200" },
+	{ 0x11b6, "Quadro K3100M" },
+	{ 0x11b7, "Quadro K4100M" },
+	{ 0x11b8, "Quadro K5100M" },
+	{ 0x11ba, "Quadro K5000" },
+	{ 0x11bc, "Quadro K5000M" },
+	{ 0x11bd, "Quadro K4000M" },
+	{ 0x11be, "Quadro K3000M" },
+	{ 0x11bf, "GRID K2" },
+	{ 0x11c0, "GeForce GTX 660" },
+	{ 0x11c2, "GeForce GTX 650 Ti BOOST" },
+	{ 0x11c3, "GeForce GTX 650 Ti" },
+	{ 0x11c4, "GeForce GTX 645" },
+	{ 0x11c5, "GeForce GT 740" },
+	{ 0x11c6, "GeForce GTX 650 Ti" },
+	{ 0x11c8, "GeForce GTX 650" },
+	{ 0x11cb, "GeForce GT 740" },
+	{ 0x11e0, "GeForce GTX 770M" },
+	{ 0x11e1, "GeForce GTX 765M" },
+	{ 0x11e2, "GeForce GTX 765M" },
+	{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
+	{ 0x11fa, "Quadro K4000" },
+	{ 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
+	{ 0x1200, "GeForce GTX 560 Ti" },
+	{ 0x1201, "GeForce GTX 560" },
+	{ 0x1203, "GeForce GTX 460 SE v2" },
+	{ 0x1205, "GeForce GTX 460 v2" },
+	{ 0x1206, "GeForce GTX 555" },
+	{ 0x1207, "GeForce GT 645" },
+	{ 0x1208, "GeForce GTX 560 SE" },
+	{ 0x1210, "GeForce GTX 570M" },
+	{ 0x1211, "GeForce GTX 580M" },
+	{ 0x1212, "GeForce GTX 675M" },
+	{ 0x1213, "GeForce GTX 670M" },
+	{ 0x1241, "GeForce GT 545" },
+	{ 0x1243, "GeForce GT 545" },
+	{ 0x1244, "GeForce GTX 550 Ti" },
+	{ 0x1245, "GeForce GTS 450" },
+	{ 0x1246, "GeForce GT 550M" },
+	{ 0x1247, "GeForce GT 555M", nvkm_device_pci_10de_1247 },
+	{ 0x1248, "GeForce GT 555M" },
+	{ 0x1249, "GeForce GTS 450" },
+	{ 0x124b, "GeForce GT 640" },
+	{ 0x124d, "GeForce GT 555M", nvkm_device_pci_10de_124d },
+	{ 0x1251, "GeForce GTX 560M" },
+	{ 0x1280, "GeForce GT 635" },
+	{ 0x1281, "GeForce GT 710" },
+	{ 0x1282, "GeForce GT 640" },
+	{ 0x1284, "GeForce GT 630" },
+	{ 0x1286, "GeForce GT 720" },
+	{ 0x1287, "GeForce GT 730" },
+	{ 0x1288, "GeForce GT 720" },
+	{ 0x1289, "GeForce GT 710" },
+	{ 0x1290, "GeForce GT 730M", nvkm_device_pci_10de_1290 },
+	{ 0x1291, "GeForce GT 735M" },
+	{ 0x1292, "GeForce GT 740M", nvkm_device_pci_10de_1292 },
+	{ 0x1293, "GeForce GT 730M" },
+	{ 0x1295, "GeForce 710M", nvkm_device_pci_10de_1295 },
+	{ 0x1296, "GeForce 825M" },
+	{ 0x1298, "GeForce GT 720M" },
+	{ 0x1299, "GeForce 920M", nvkm_device_pci_10de_1299 },
+	{ 0x129a, "GeForce 910M" },
+	{ 0x12b9, "Quadro K610M" },
+	{ 0x12ba, "Quadro K510M" },
+	{ 0x1340, "GeForce 830M", nvkm_device_pci_10de_1340 },
+	{ 0x1341, "GeForce 840M", nvkm_device_pci_10de_1341 },
+	{ 0x1344, "GeForce 845M" },
+	{ 0x1346, "GeForce 930M", nvkm_device_pci_10de_1346 },
+	{ 0x1347, "GeForce 940M", nvkm_device_pci_10de_1347 },
+	{ 0x137a, NULL, nvkm_device_pci_10de_137a },
+	{ 0x137d, NULL, nvkm_device_pci_10de_137d },
+	{ 0x1380, "GeForce GTX 750 Ti" },
+	{ 0x1381, "GeForce GTX 750" },
+	{ 0x1382, "GeForce GTX 745" },
+	{ 0x1390, "GeForce 845M" },
+	{ 0x1391, "GeForce GTX 850M", nvkm_device_pci_10de_1391 },
+	{ 0x1392, "GeForce GTX 860M", nvkm_device_pci_10de_1392 },
+	{ 0x1393, "GeForce 840M" },
+	{ 0x1398, "GeForce 845M" },
+	{ 0x139a, "GeForce GTX 950M", nvkm_device_pci_10de_139a },
+	{ 0x139b, "GeForce GTX 960M", nvkm_device_pci_10de_139b },
+	{ 0x139c, "GeForce 940M" },
+	{ 0x13b3, "Quadro K2200M" },
+	{ 0x13ba, "Quadro K2200" },
+	{ 0x13bb, "Quadro K620" },
+	{ 0x13bc, "Quadro K1200" },
+	{ 0x13c0, "GeForce GTX 980" },
+	{ 0x13c2, "GeForce GTX 970" },
+	{ 0x13d7, "GeForce GTX 980M" },
+	{ 0x13d8, "GeForce GTX 970M" },
+	{ 0x13d9, "GeForce GTX 965M" },
+	{ 0x1401, "GeForce GTX 960" },
+	{ 0x1617, "GeForce GTX 980M" },
+	{ 0x1618, "GeForce GTX 970M" },
+	{ 0x1619, "GeForce GTX 965M" },
+	{ 0x17c2, "GeForce GTX TITAN X" },
+	{ 0x17c8, "GeForce GTX 980 Ti" },
+	{ 0x17f0, "Quadro M6000" },
+	{}
+};
+
+static struct nvkm_device_pci *
+nvkm_device_pci(struct nvkm_device *device)
+{
+	return container_of(device, struct nvkm_device_pci, device);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	return pci_resource_start(pdev->pdev, bar);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	return pci_resource_len(pdev->pdev, bar);
+}
+
+static void
+nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	if (suspend) {
+		pci_disable_device(pdev->pdev);
+		pdev->suspend = true;
+	}
+}
+
+static int
+nvkm_device_pci_preinit(struct nvkm_device *device)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	if (pdev->suspend) {
+		int ret = pci_enable_device(pdev->pdev);
+		if (ret)
+			return ret;
+		pci_set_master(pdev->pdev);
+		pdev->suspend = false;
+	}
+	return 0;
+}
+
+static void *
+nvkm_device_pci_dtor(struct nvkm_device *device)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	pci_disable_device(pdev->pdev);
+	return pdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_pci_func = {
+	.pci = nvkm_device_pci,
+	.dtor = nvkm_device_pci_dtor,
+	.preinit = nvkm_device_pci_preinit,
+	.fini = nvkm_device_pci_fini,
+	.resource_addr = nvkm_device_pci_resource_addr,
+	.resource_size = nvkm_device_pci_resource_size,
+	.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
+};
+
+int
+nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
+		    bool detect, bool mmio, u64 subdev_mask,
+		    struct nvkm_device **pdevice)
+{
+	const struct nvkm_device_quirk *quirk = NULL;
+	const struct nvkm_device_pci_device *pcid;
+	const struct nvkm_device_pci_vendor *pciv;
+	const char *name = NULL;
+	struct nvkm_device_pci *pdev;
+	int ret;
+
+	ret = pci_enable_device(pci_dev);
+	if (ret)
+		return ret;
+
+	switch (pci_dev->vendor) {
+	case 0x10de: pcid = nvkm_device_pci_10de; break;
+	default:
+		pcid = NULL;
+		break;
+	}
+
+	while (pcid && pcid->device) {
+		if (pciv = pcid->vendor, pcid->device == pci_dev->device) {
+			while (pciv && pciv->vendor) {
+				if (pciv->vendor == pci_dev->subsystem_vendor &&
+				    pciv->device == pci_dev->subsystem_device) {
+					quirk = &pciv->quirk;
+					name  =  pciv->name;
+					break;
+				}
+				pciv++;
+			}
+			if (!name)
+				name = pcid->name;
+			break;
+		}
+		pcid++;
+	}
+
+	if (!(pdev = kzalloc(sizeof(*pdev), GFP_KERNEL))) {
+		pci_disable_device(pci_dev);
+		return -ENOMEM;
+	}
+	*pdevice = &pdev->device;
+	pdev->pdev = pci_dev;
+
+	return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+				pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+				pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+				NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+				(u64)pci_domain_nr(pci_dev->bus) << 32 |
+				     pci_dev->bus->number << 16 |
+				     PCI_SLOT(pci_dev->devfn) << 8 |
+				     PCI_FUNC(pci_dev->devfn), name,
+				cfg, dbg, detect, mmio, subdev_mask,
+				&pdev->device);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 8d3590e..ed3ad2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -2,15 +2,49 @@
 #define __NVKM_DEVICE_PRIV_H__
 #include <core/device.h>
 
-extern struct nvkm_oclass nvkm_control_oclass[];
+#include <subdev/bar.h>
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/clk.h>
+#include <subdev/devinit.h>
+#include <subdev/fb.h>
+#include <subdev/fuse.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/ltc.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/mxm.h>
+#include <subdev/pci.h>
+#include <subdev/pmu.h>
+#include <subdev/therm.h>
+#include <subdev/timer.h>
+#include <subdev/volt.h>
 
-int nv04_identify(struct nvkm_device *);
-int nv10_identify(struct nvkm_device *);
-int nv20_identify(struct nvkm_device *);
-int nv30_identify(struct nvkm_device *);
-int nv40_identify(struct nvkm_device *);
-int nv50_identify(struct nvkm_device *);
-int gf100_identify(struct nvkm_device *);
-int gk104_identify(struct nvkm_device *);
-int gm100_identify(struct nvkm_device *);
+#include <engine/bsp.h>
+#include <engine/ce.h>
+#include <engine/cipher.h>
+#include <engine/disp.h>
+#include <engine/dma.h>
+#include <engine/fifo.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+#include <engine/mspdec.h>
+#include <engine/msppp.h>
+#include <engine/msvld.h>
+#include <engine/pm.h>
+#include <engine/sec.h>
+#include <engine/sw.h>
+#include <engine/vp.h>
+
+int  nvkm_device_ctor(const struct nvkm_device_func *,
+		      const struct nvkm_device_quirk *,
+		      struct device *, enum nvkm_device_type, u64 handle,
+		      const char *name, const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device *);
+int  nvkm_device_init(struct nvkm_device *);
+int  nvkm_device_fini(struct nvkm_device *, bool suspend);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
new file mode 100644
index 0000000..da57c8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <core/tegra.h>
+#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
+#include "priv.h"
+
+static int
+nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
+{
+	int ret;
+
+	ret = regulator_enable(tdev->vdd);
+	if (ret)
+		goto err_power;
+
+	ret = clk_prepare_enable(tdev->clk);
+	if (ret)
+		goto err_clk;
+	ret = clk_prepare_enable(tdev->clk_pwr);
+	if (ret)
+		goto err_clk_pwr;
+	clk_set_rate(tdev->clk_pwr, 204000000);
+	udelay(10);
+
+	reset_control_assert(tdev->rst);
+	udelay(10);
+
+	ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
+	if (ret)
+		goto err_clamp;
+	udelay(10);
+
+	reset_control_deassert(tdev->rst);
+	udelay(10);
+
+	return 0;
+
+err_clamp:
+	clk_disable_unprepare(tdev->clk_pwr);
+err_clk_pwr:
+	clk_disable_unprepare(tdev->clk);
+err_clk:
+	regulator_disable(tdev->vdd);
+err_power:
+	return ret;
+}
+
+static int
+nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
+{
+	reset_control_assert(tdev->rst);
+	udelay(10);
+
+	clk_disable_unprepare(tdev->clk_pwr);
+	clk_disable_unprepare(tdev->clk);
+	udelay(10);
+
+	return regulator_disable(tdev->vdd);
+}
+
+static void
+nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+	struct device *dev = &tdev->pdev->dev;
+	unsigned long pgsize_bitmap;
+	int ret;
+
+	mutex_init(&tdev->iommu.mutex);
+
+	if (iommu_present(&platform_bus_type)) {
+		tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+		if (IS_ERR(tdev->iommu.domain))
+			goto error;
+
+		/*
+		 * A IOMMU is only usable if it supports page sizes smaller
+		 * or equal to the system's PAGE_SIZE, with a preference if
+		 * both are equal.
+		 */
+		pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
+		if (pgsize_bitmap & PAGE_SIZE) {
+			tdev->iommu.pgshift = PAGE_SHIFT;
+		} else {
+			tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+			if (tdev->iommu.pgshift == 0) {
+				dev_warn(dev, "unsupported IOMMU page size\n");
+				goto free_domain;
+			}
+			tdev->iommu.pgshift -= 1;
+		}
+
+		ret = iommu_attach_device(tdev->iommu.domain, dev);
+		if (ret)
+			goto free_domain;
+
+		ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+				   (1ULL << 40) >> tdev->iommu.pgshift, 1);
+		if (ret)
+			goto detach_device;
+	}
+
+	return;
+
+detach_device:
+	iommu_detach_device(tdev->iommu.domain, dev);
+
+free_domain:
+	iommu_domain_free(tdev->iommu.domain);
+
+error:
+	tdev->iommu.domain = NULL;
+	tdev->iommu.pgshift = 0;
+	dev_err(dev, "cannot initialize IOMMU MM\n");
+#endif
+}
+
+static void
+nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+	if (tdev->iommu.domain) {
+		nvkm_mm_fini(&tdev->iommu.mm);
+		iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
+		iommu_domain_free(tdev->iommu.domain);
+	}
+#endif
+}
+
+static struct nvkm_device_tegra *
+nvkm_device_tegra(struct nvkm_device *device)
+{
+	return container_of(device, struct nvkm_device_tegra, device);
+}
+
+static struct resource *
+nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+	struct resource *res = nvkm_device_tegra_resource(device, bar);
+	return res ? res->start : 0;
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+{
+	struct resource *res = nvkm_device_tegra_resource(device, bar);
+	return res ? resource_size(res) : 0;
+}
+
+static irqreturn_t
+nvkm_device_tegra_intr(int irq, void *arg)
+{
+	struct nvkm_device_tegra *tdev = arg;
+	struct nvkm_mc *mc = tdev->device.mc;
+	bool handled = false;
+	if (likely(mc)) {
+		nvkm_mc_intr_unarm(mc);
+		nvkm_mc_intr(mc, &handled);
+		nvkm_mc_intr_rearm(mc);
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void
+nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	if (tdev->irq) {
+		free_irq(tdev->irq, tdev);
+		tdev->irq = 0;
+	};
+}
+
+static int
+nvkm_device_tegra_init(struct nvkm_device *device)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	int irq, ret;
+
+	irq = platform_get_irq_byname(tdev->pdev, "stall");
+	if (irq < 0)
+		return irq;
+
+	ret = request_irq(irq, nvkm_device_tegra_intr,
+			  IRQF_SHARED, "nvkm", tdev);
+	if (ret)
+		return ret;
+
+	tdev->irq = irq;
+	return 0;
+}
+
+static void *
+nvkm_device_tegra_dtor(struct nvkm_device *device)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	nvkm_device_tegra_power_down(tdev);
+	nvkm_device_tegra_remove_iommu(tdev);
+	return tdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_tegra_func = {
+	.tegra = nvkm_device_tegra,
+	.dtor = nvkm_device_tegra_dtor,
+	.init = nvkm_device_tegra_init,
+	.fini = nvkm_device_tegra_fini,
+	.resource_addr = nvkm_device_tegra_resource_addr,
+	.resource_size = nvkm_device_tegra_resource_size,
+	.cpu_coherent = false,
+};
+
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+		      const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device **pdevice)
+{
+	struct nvkm_device_tegra *tdev;
+	int ret;
+
+	if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdevice = &tdev->device;
+	tdev->pdev = pdev;
+	tdev->irq = -1;
+
+	tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(tdev->vdd))
+		return PTR_ERR(tdev->vdd);
+
+	tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
+	if (IS_ERR(tdev->rst))
+		return PTR_ERR(tdev->rst);
+
+	tdev->clk = devm_clk_get(&pdev->dev, "gpu");
+	if (IS_ERR(tdev->clk))
+		return PTR_ERR(tdev->clk);
+
+	tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+	if (IS_ERR(tdev->clk_pwr))
+		return PTR_ERR(tdev->clk_pwr);
+
+	nvkm_device_tegra_probe_iommu(tdev);
+
+	ret = nvkm_device_tegra_power_up(tdev);
+	if (ret)
+		return ret;
+
+	tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+	ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
+			       NVKM_DEVICE_TEGRA, pdev->id, NULL,
+			       cfg, dbg, detect, mmio, subdev_mask,
+			       &tdev->device);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+#else
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+		      const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device **pdevice)
+{
+	return -ENOSYS;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
new file mode 100644
index 0000000..1ae48f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
+#include "priv.h"
+#include "ctrl.h"
+
+#include <core/client.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nvkm_udevice {
+	struct nvkm_object object;
+	struct nvkm_device *device;
+};
+
+static int
+nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
+{
+	struct nvkm_object *object = &udev->object;
+	struct nvkm_device *device = udev->device;
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_instmem *imem = device->imem;
+	union {
+		struct nv_device_info_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "device info size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "device info vers %d\n", args->v0.version);
+	} else
+		return ret;
+
+	switch (device->chipset) {
+	case 0x01a:
+	case 0x01f:
+	case 0x04c:
+	case 0x04e:
+	case 0x063:
+	case 0x067:
+	case 0x068:
+	case 0x0aa:
+	case 0x0ac:
+	case 0x0af:
+		args->v0.platform = NV_DEVICE_INFO_V0_IGP;
+		break;
+	default:
+		switch (device->type) {
+		case NVKM_DEVICE_PCI:
+			args->v0.platform = NV_DEVICE_INFO_V0_PCI;
+			break;
+		case NVKM_DEVICE_AGP:
+			args->v0.platform = NV_DEVICE_INFO_V0_AGP;
+			break;
+		case NVKM_DEVICE_PCIE:
+			args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
+			break;
+		case NVKM_DEVICE_TEGRA:
+			args->v0.platform = NV_DEVICE_INFO_V0_SOC;
+			break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		break;
+	}
+
+	switch (device->card_type) {
+	case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
+	case NV_10:
+	case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
+	case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
+	case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
+	case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
+	case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
+	case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
+	case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
+	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+	default:
+		args->v0.family = 0;
+		break;
+	}
+
+	args->v0.chipset  = device->chipset;
+	args->v0.revision = device->chiprev;
+	if (fb && fb->ram)
+		args->v0.ram_size = args->v0.ram_user = fb->ram->size;
+	else
+		args->v0.ram_size = args->v0.ram_user = 0;
+	if (imem && args->v0.ram_size > 0)
+		args->v0.ram_user = args->v0.ram_user - imem->reserved;
+
+	strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip));
+	strncpy(args->v0.name, device->name, sizeof(args->v0.name));
+	return 0;
+}
+
+static int
+nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
+{
+	struct nvkm_device *device = udev->device;
+	union {
+		struct nv_device_time_v0 v0;
+	} *args = data;
+	int ret;
+
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		args->v0.time = nvkm_timer_read(device->timer);
+	}
+
+	return ret;
+}
+
+static int
+nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	switch (mthd) {
+	case NV_DEVICE_V0_INFO:
+		return nvkm_udevice_info(udev, data, size);
+	case NV_DEVICE_V0_TIME:
+		return nvkm_udevice_time(udev, data, size);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd08(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd16(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd32(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr08(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr16(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr32(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	*addr = device->func->resource_addr(device, 0);
+	*size = device->func->resource_size(device, 0);
+	return 0;
+}
+
+static int
+nvkm_udevice_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+	if (!--device->refcount) {
+		ret = nvkm_device_fini(device, suspend);
+		if (ret && suspend) {
+			device->refcount++;
+			goto done;
+		}
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int
+nvkm_udevice_init(struct nvkm_object *object)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+	if (!device->refcount++) {
+		ret = nvkm_device_init(device);
+		if (ret) {
+			device->refcount--;
+			goto done;
+		}
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int
+nvkm_udevice_child_new(const struct nvkm_oclass *oclass,
+		       void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(oclass->parent);
+	const struct nvkm_device_oclass *sclass = oclass->priv;
+	return sclass->ctor(udev->device, oclass, data, size, pobject);
+}
+
+static int
+nvkm_udevice_child_get(struct nvkm_object *object, int index,
+		       struct nvkm_oclass *oclass)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	struct nvkm_engine *engine;
+	u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
+		   (1ULL << NVKM_ENGINE_FIFO) |
+		   (1ULL << NVKM_ENGINE_DISP) |
+		   (1ULL << NVKM_ENGINE_PM);
+	const struct nvkm_device_oclass *sclass = NULL;
+	int i;
+
+	for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
+		if (!(engine = nvkm_device_engine(device, i)) ||
+		    !(engine->func->base.sclass))
+			continue;
+		oclass->engine = engine;
+
+		index -= engine->func->base.sclass(oclass, index, &sclass);
+	}
+
+	if (!sclass) {
+		switch (index) {
+		case 0: sclass = &nvkm_control_oclass; break;
+		default:
+			return -EINVAL;
+		}
+		oclass->base = sclass->base;
+	}
+
+	oclass->ctor = nvkm_udevice_child_new;
+	oclass->priv = sclass;
+	return 0;
+}
+
+static const struct nvkm_object_func
+nvkm_udevice_super = {
+	.init = nvkm_udevice_init,
+	.fini = nvkm_udevice_fini,
+	.mthd = nvkm_udevice_mthd,
+	.map = nvkm_udevice_map,
+	.rd08 = nvkm_udevice_rd08,
+	.rd16 = nvkm_udevice_rd16,
+	.rd32 = nvkm_udevice_rd32,
+	.wr08 = nvkm_udevice_wr08,
+	.wr16 = nvkm_udevice_wr16,
+	.wr32 = nvkm_udevice_wr32,
+	.sclass = nvkm_udevice_child_get,
+};
+
+static const struct nvkm_object_func
+nvkm_udevice = {
+	.init = nvkm_udevice_init,
+	.fini = nvkm_udevice_fini,
+	.mthd = nvkm_udevice_mthd,
+	.sclass = nvkm_udevice_child_get,
+};
+
+int
+nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		 struct nvkm_object **pobject)
+{
+	union {
+		struct nv_device_v0 v0;
+	} *args = data;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_object *parent = &client->object;
+	const struct nvkm_object_func *func;
+	struct nvkm_udevice *udev;
+	int ret;
+
+	nvif_ioctl(parent, "create device size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create device v%d device %016llx\n",
+			   args->v0.version, args->v0.device);
+	} else
+		return ret;
+
+	/* give priviledged clients register access */
+	if (client->super)
+		func = &nvkm_udevice_super;
+	else
+		func = &nvkm_udevice;
+
+	if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(func, oclass, &udev->object);
+	*pobject = &udev->object;
+
+	/* find the device that matches what the client requested */
+	if (args->v0.device != ~0)
+		udev->device = nvkm_device_find(args->v0.device);
+	else
+		udev->device = nvkm_device_find(client->device);
+	if (!udev->device)
+		return -ENODEV;
+
+	return 0;
+}
+
+const struct nvkm_sclass
+nvkm_udevice_sclass = {
+	.oclass = NV_DEVICE,
+	.minver = 0,
+	.maxver = 0,
+	.ctor = nvkm_udevice_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 16a4e2a..04f6045 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -1,29 +1,93 @@
 nvkm-y += nvkm/engine/disp/base.o
-nvkm-y += nvkm/engine/disp/conn.o
-nvkm-y += nvkm/engine/disp/outp.o
-nvkm-y += nvkm/engine/disp/outpdp.o
 nvkm-y += nvkm/engine/disp/nv04.o
 nvkm-y += nvkm/engine/disp/nv50.o
 nvkm-y += nvkm/engine/disp/g84.o
 nvkm-y += nvkm/engine/disp/g94.o
 nvkm-y += nvkm/engine/disp/gt200.o
 nvkm-y += nvkm/engine/disp/gt215.o
-nvkm-y += nvkm/engine/disp/gf110.o
+nvkm-y += nvkm/engine/disp/gf119.o
 nvkm-y += nvkm/engine/disp/gk104.o
 nvkm-y += nvkm/engine/disp/gk110.o
 nvkm-y += nvkm/engine/disp/gm107.o
 nvkm-y += nvkm/engine/disp/gm204.o
+
+nvkm-y += nvkm/engine/disp/outp.o
+nvkm-y += nvkm/engine/disp/outpdp.o
 nvkm-y += nvkm/engine/disp/dacnv50.o
-nvkm-y += nvkm/engine/disp/dport.o
-nvkm-y += nvkm/engine/disp/hdagt215.o
-nvkm-y += nvkm/engine/disp/hdagf110.o
-nvkm-y += nvkm/engine/disp/hdmig84.o
-nvkm-y += nvkm/engine/disp/hdmigt215.o
-nvkm-y += nvkm/engine/disp/hdmigf110.o
-nvkm-y += nvkm/engine/disp/hdmigk104.o
 nvkm-y += nvkm/engine/disp/piornv50.o
 nvkm-y += nvkm/engine/disp/sornv50.o
 nvkm-y += nvkm/engine/disp/sorg94.o
-nvkm-y += nvkm/engine/disp/sorgf110.o
+nvkm-y += nvkm/engine/disp/sorgf119.o
 nvkm-y += nvkm/engine/disp/sorgm204.o
+nvkm-y += nvkm/engine/disp/dport.o
+
+nvkm-y += nvkm/engine/disp/conn.o
+
+nvkm-y += nvkm/engine/disp/hdagt215.o
+nvkm-y += nvkm/engine/disp/hdagf119.o
+
+nvkm-y += nvkm/engine/disp/hdmig84.o
+nvkm-y += nvkm/engine/disp/hdmigt215.o
+nvkm-y += nvkm/engine/disp/hdmigf119.o
+nvkm-y += nvkm/engine/disp/hdmigk104.o
+
 nvkm-y += nvkm/engine/disp/vga.o
+
+nvkm-y += nvkm/engine/disp/rootnv04.o
+nvkm-y += nvkm/engine/disp/rootnv50.o
+nvkm-y += nvkm/engine/disp/rootg84.o
+nvkm-y += nvkm/engine/disp/rootg94.o
+nvkm-y += nvkm/engine/disp/rootgt200.o
+nvkm-y += nvkm/engine/disp/rootgt215.o
+nvkm-y += nvkm/engine/disp/rootgf119.o
+nvkm-y += nvkm/engine/disp/rootgk104.o
+nvkm-y += nvkm/engine/disp/rootgk110.o
+nvkm-y += nvkm/engine/disp/rootgm107.o
+nvkm-y += nvkm/engine/disp/rootgm204.o
+
+nvkm-y += nvkm/engine/disp/channv50.o
+nvkm-y += nvkm/engine/disp/changf119.o
+
+nvkm-y += nvkm/engine/disp/dmacnv50.o
+nvkm-y += nvkm/engine/disp/dmacgf119.o
+
+nvkm-y += nvkm/engine/disp/basenv50.o
+nvkm-y += nvkm/engine/disp/baseg84.o
+nvkm-y += nvkm/engine/disp/basegt200.o
+nvkm-y += nvkm/engine/disp/basegt215.o
+nvkm-y += nvkm/engine/disp/basegf119.o
+nvkm-y += nvkm/engine/disp/basegk104.o
+nvkm-y += nvkm/engine/disp/basegk110.o
+
+nvkm-y += nvkm/engine/disp/corenv50.o
+nvkm-y += nvkm/engine/disp/coreg84.o
+nvkm-y += nvkm/engine/disp/coreg94.o
+nvkm-y += nvkm/engine/disp/coregt200.o
+nvkm-y += nvkm/engine/disp/coregt215.o
+nvkm-y += nvkm/engine/disp/coregf119.o
+nvkm-y += nvkm/engine/disp/coregk104.o
+nvkm-y += nvkm/engine/disp/coregk110.o
+nvkm-y += nvkm/engine/disp/coregm107.o
+nvkm-y += nvkm/engine/disp/coregm204.o
+
+nvkm-y += nvkm/engine/disp/ovlynv50.o
+nvkm-y += nvkm/engine/disp/ovlyg84.o
+nvkm-y += nvkm/engine/disp/ovlygt200.o
+nvkm-y += nvkm/engine/disp/ovlygt215.o
+nvkm-y += nvkm/engine/disp/ovlygf119.o
+nvkm-y += nvkm/engine/disp/ovlygk104.o
+
+nvkm-y += nvkm/engine/disp/piocnv50.o
+nvkm-y += nvkm/engine/disp/piocgf119.o
+
+nvkm-y += nvkm/engine/disp/cursnv50.o
+nvkm-y += nvkm/engine/disp/cursg84.o
+nvkm-y += nvkm/engine/disp/cursgt215.o
+nvkm-y += nvkm/engine/disp/cursgf119.o
+nvkm-y += nvkm/engine/disp/cursgk104.o
+
+nvkm-y += nvkm/engine/disp/oimmnv50.o
+nvkm-y += nvkm/engine/disp/oimmg84.o
+nvkm-y += nvkm/engine/disp/oimmgt215.o
+nvkm-y += nvkm/engine/disp/oimmgf119.o
+nvkm-y += nvkm/engine/disp/oimmgk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 23d1b5c..44b6771 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -25,7 +25,9 @@
 #include "conn.h"
 #include "outp.h"
 
+#include <core/client.h>
 #include <core/notify.h>
+#include <core/oproxy.h>
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
@@ -33,7 +35,21 @@
 #include <nvif/event.h>
 #include <nvif/unpack.h>
 
-int
+static void
+nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+	disp->func->head.vblank_fini(disp, head);
+}
+
+static void
+nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+	disp->func->head.vblank_init(disp, head);
+}
+
+static int
 nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
 		      struct nvkm_notify *notify)
 {
@@ -56,6 +72,13 @@
 	return ret;
 }
 
+static const struct nvkm_event_func
+nvkm_disp_vblank_func = {
+	.ctor = nvkm_disp_vblank_ctor,
+	.init = nvkm_disp_vblank_init,
+	.fini = nvkm_disp_vblank_fini,
+};
+
 void
 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
 {
@@ -100,7 +123,7 @@
 int
 nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
 {
-	struct nvkm_disp *disp = (void *)object->engine;
+	struct nvkm_disp *disp = nvkm_disp(object->engine);
 	switch (type) {
 	case NV04_DISP_NTFY_VBLANK:
 		*event = &disp->vblank;
@@ -114,127 +137,303 @@
 	return -EINVAL;
 }
 
-int
-_nvkm_disp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp;
-	int ret;
-
-	list_for_each_entry(outp, &disp->outp, head) {
-		ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
-		if (ret && suspend)
-			goto fail_outp;
-	}
-
-	return nvkm_engine_fini(&disp->base, suspend);
-
-fail_outp:
-	list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
-		nv_ofuncs(outp)->init(nv_object(outp));
-	}
-
-	return ret;
+	struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
+	mutex_lock(&disp->engine.subdev.mutex);
+	if (disp->client == oproxy)
+		disp->client = NULL;
+	mutex_unlock(&disp->engine.subdev.mutex);
 }
 
-int
-_nvkm_disp_init(struct nvkm_object *object)
+static const struct nvkm_oproxy_func
+nvkm_disp_class = {
+	.dtor[1] = nvkm_disp_class_del,
+};
+
+static int
+nvkm_disp_class_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp;
+	const struct nvkm_disp_oclass *sclass = oclass->engn;
+	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+	struct nvkm_oproxy *oproxy;
 	int ret;
 
-	ret = nvkm_engine_init(&disp->base);
+	ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
 	if (ret)
 		return ret;
+	*pobject = &oproxy->base;
 
-	list_for_each_entry(outp, &disp->outp, head) {
-		ret = nv_ofuncs(outp)->init(nv_object(outp));
-		if (ret)
-			goto fail_outp;
+	mutex_lock(&disp->engine.subdev.mutex);
+	if (disp->client) {
+		mutex_unlock(&disp->engine.subdev.mutex);
+		return -EBUSY;
 	}
+	disp->client = oproxy;
+	mutex_unlock(&disp->engine.subdev.mutex);
 
-	return ret;
-
-fail_outp:
-	list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
-		nv_ofuncs(outp)->fini(nv_object(outp), false);
-	}
-
-	return ret;
+	return sclass->ctor(disp, oclass, data, size, &oproxy->object);
 }
 
-void
-_nvkm_disp_dtor(struct nvkm_object *object)
+static const struct nvkm_device_oclass
+nvkm_disp_sclass = {
+	.ctor = nvkm_disp_class_new,
+};
+
+static int
+nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
+		    const struct nvkm_device_oclass **class)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp, *outt;
+	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+	if (index == 0) {
+		const struct nvkm_disp_oclass *root = disp->func->root(disp);
+		oclass->base = root->base;
+		oclass->engn = root;
+		*class = &nvkm_disp_sclass;
+		return 0;
+	}
+	return 1;
+}
+
+static void
+nvkm_disp_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	disp->func->intr(disp);
+}
+
+static int
+nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
+
+	list_for_each_entry(outp, &disp->outp, head) {
+		nvkm_output_fini(outp);
+	}
+
+	list_for_each_entry(conn, &disp->conn, head) {
+		nvkm_connector_fini(conn);
+	}
+
+	return 0;
+}
+
+static int
+nvkm_disp_init(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
+
+	list_for_each_entry(conn, &disp->conn, head) {
+		nvkm_connector_init(conn);
+	}
+
+	list_for_each_entry(outp, &disp->outp, head) {
+		nvkm_output_init(outp);
+	}
+
+	return 0;
+}
+
+static void *
+nvkm_disp_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
+	void *data = disp;
+
+	if (disp->func->dtor)
+		data = disp->func->dtor(disp);
 
 	nvkm_event_fini(&disp->vblank);
 	nvkm_event_fini(&disp->hpd);
 
-	if (disp->outp.next) {
-		list_for_each_entry_safe(outp, outt, &disp->outp, head) {
-			nvkm_object_ref(NULL, (struct nvkm_object **)&outp);
-		}
+	while (!list_empty(&disp->outp)) {
+		outp = list_first_entry(&disp->outp, typeof(*outp), head);
+		list_del(&outp->head);
+		nvkm_output_del(&outp);
 	}
 
-	nvkm_engine_destroy(&disp->base);
+	while (!list_empty(&disp->conn)) {
+		conn = list_first_entry(&disp->conn, typeof(*conn), head);
+		list_del(&conn->head);
+		nvkm_connector_del(&conn);
+	}
+
+	return data;
 }
 
+static const struct nvkm_engine_func
+nvkm_disp = {
+	.dtor = nvkm_disp_dtor,
+	.init = nvkm_disp_init,
+	.fini = nvkm_disp_fini,
+	.intr = nvkm_disp_intr,
+	.base.sclass = nvkm_disp_class_get,
+};
+
 int
-nvkm_disp_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int heads, const char *intname,
-		  const char *extname, int length, void **pobject)
+nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp *disp)
 {
-	struct nvkm_disp_impl *impl = (void *)oclass;
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_disp *disp;
-	struct nvkm_oclass **sclass;
-	struct nvkm_object *object;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp, *outt, *pair;
+	struct nvkm_connector *conn;
+	struct nvbios_connE connE;
 	struct dcb_output dcbE;
 	u8  hpd = 0, ver, hdr;
 	u32 data;
 	int ret, i;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, true, intname,
-				  extname, length, pobject);
-	disp = *pobject;
+	INIT_LIST_HEAD(&disp->outp);
+	INIT_LIST_HEAD(&disp->conn);
+	disp->func = func;
+	disp->head.nr = heads;
+
+	ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
+			       true, &disp->engine);
 	if (ret)
 		return ret;
 
-	INIT_LIST_HEAD(&disp->outp);
-
 	/* create output objects for each display path in the vbios */
 	i = -1;
 	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+		const struct nvkm_disp_func_outp *outps;
+		int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
+			    struct nvkm_output **);
+
 		if (dcbE.type == DCB_OUTPUT_UNUSED)
 			continue;
 		if (dcbE.type == DCB_OUTPUT_EOL)
 			break;
-		data = dcbE.location << 4 | dcbE.type;
+		outp = NULL;
 
-		oclass = nvkm_output_oclass;
-		sclass = impl->outp;
-		while (sclass && sclass[0]) {
-			if (sclass[0]->handle == data) {
-				oclass = sclass[0];
-				break;
-			}
-			sclass++;
+		switch (dcbE.location) {
+		case 0: outps = &disp->func->outp.internal; break;
+		case 1: outps = &disp->func->outp.external; break;
+		default:
+			nvkm_warn(&disp->engine.subdev,
+				  "dcb %d locn %d unknown\n", i, dcbE.location);
+			continue;
 		}
 
-		nvkm_object_ctor(*pobject, NULL, oclass, &dcbE, i, &object);
+		switch (dcbE.type) {
+		case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
+		case DCB_OUTPUT_TV    : ctor = outps->tv  ; break;
+		case DCB_OUTPUT_TMDS  : ctor = outps->tmds; break;
+		case DCB_OUTPUT_LVDS  : ctor = outps->lvds; break;
+		case DCB_OUTPUT_DP    : ctor = outps->dp  ; break;
+		default:
+			nvkm_warn(&disp->engine.subdev,
+				  "dcb %d type %d unknown\n", i, dcbE.type);
+			continue;
+		}
+
+		if (ctor)
+			ret = ctor(disp, i, &dcbE, &outp);
+		else
+			ret = -ENODEV;
+
+		if (ret) {
+			if (ret == -ENODEV) {
+				nvkm_debug(&disp->engine.subdev,
+					   "dcb %d %d/%d not supported\n",
+					   i, dcbE.location, dcbE.type);
+				continue;
+			}
+			nvkm_error(&disp->engine.subdev,
+				   "failed to create output %d\n", i);
+			nvkm_output_del(&outp);
+			continue;
+		}
+
+		list_add_tail(&outp->head, &disp->outp);
 		hpd = max(hpd, (u8)(dcbE.connector + 1));
 	}
 
+	/* create connector objects based on the outputs we support */
+	list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+		/* bios data *should* give us the most useful information */
+		data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
+				     &connE);
+
+		/* no bios connector data... */
+		if (!data) {
+			/* heuristic: anything with the same ccb index is
+			 * considered to be on the same connector, any
+			 * output path without an associated ccb entry will
+			 * be put on its own connector
+			 */
+			int ccb_index = outp->info.i2c_index;
+			if (ccb_index != 0xf) {
+				list_for_each_entry(pair, &disp->outp, head) {
+					if (pair->info.i2c_index == ccb_index) {
+						outp->conn = pair->conn;
+						break;
+					}
+				}
+			}
+
+			/* connector shared with another output path */
+			if (outp->conn)
+				continue;
+
+			memset(&connE, 0x00, sizeof(connE));
+			connE.type = DCB_CONNECTOR_NONE;
+			i = -1;
+		} else {
+			i = outp->info.connector;
+		}
+
+		/* check that we haven't already created this connector */
+		list_for_each_entry(conn, &disp->conn, head) {
+			if (conn->index == outp->info.connector) {
+				outp->conn = conn;
+				break;
+			}
+		}
+
+		if (outp->conn)
+			continue;
+
+		/* apparently we need to create a new one! */
+		ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
+		if (ret) {
+			nvkm_error(&disp->engine.subdev,
+				   "failed to create output %d conn: %d\n",
+				   outp->index, ret);
+			nvkm_connector_del(&outp->conn);
+			list_del(&outp->head);
+			nvkm_output_del(&outp);
+			continue;
+		}
+
+		list_add_tail(&outp->conn->head, &disp->conn);
+	}
+
 	ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
 	if (ret)
 		return ret;
 
-	ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);
+	ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
 	if (ret)
 		return ret;
 
 	return 0;
 }
+
+int
+nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp **pdisp)
+{
+	if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_disp_ctor(func, device, index, heads, *pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
new file mode 100644
index 0000000..6d17630
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0008c4 },
+		{ 0x0088, 0x0008d0 },
+		{ 0x008c, 0x0008dc },
+		{ 0x0090, 0x0008e4 },
+		{ 0x0094, 0x610884 },
+		{ 0x00a0, 0x6108a0 },
+		{ 0x00a4, 0x610878 },
+		{ 0x00c0, 0x61086c },
+		{ 0x00c4, 0x610800 },
+		{ 0x00c8, 0x61080c },
+		{ 0x00cc, 0x610818 },
+		{ 0x00e0, 0x610858 },
+		{ 0x00e4, 0x610860 },
+		{ 0x00e8, 0x6108ac },
+		{ 0x00ec, 0x6108b4 },
+		{ 0x00fc, 0x610824 },
+		{ 0x0100, 0x610894 },
+		{ 0x0104, 0x61082c },
+		{ 0x0110, 0x6108bc },
+		{ 0x0114, 0x61088c },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &g84_disp_base_mthd_base },
+		{  "Image", 2, &nv50_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_base_oclass = {
+	.base.oclass = G82_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
new file mode 100644
index 0000000..ebcb925
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x661080 },
+		{ 0x0084, 0x661084 },
+		{ 0x0088, 0x661088 },
+		{ 0x008c, 0x66108c },
+		{ 0x0090, 0x661090 },
+		{ 0x0094, 0x661094 },
+		{ 0x00a0, 0x6610a0 },
+		{ 0x00a4, 0x6610a4 },
+		{ 0x00c0, 0x6610c0 },
+		{ 0x00c4, 0x6610c4 },
+		{ 0x00c8, 0x6610c8 },
+		{ 0x00cc, 0x6610cc },
+		{ 0x00e0, 0x6610e0 },
+		{ 0x00e4, 0x6610e4 },
+		{ 0x00e8, 0x6610e8 },
+		{ 0x00ec, 0x6610ec },
+		{ 0x00fc, 0x6610fc },
+		{ 0x0100, 0x661100 },
+		{ 0x0104, 0x661104 },
+		{ 0x0108, 0x661108 },
+		{ 0x010c, 0x66110c },
+		{ 0x0110, 0x661110 },
+		{ 0x0114, 0x661114 },
+		{ 0x0118, 0x661118 },
+		{ 0x011c, 0x66111c },
+		{ 0x0130, 0x661130 },
+		{ 0x0134, 0x661134 },
+		{ 0x0138, 0x661138 },
+		{ 0x013c, 0x66113c },
+		{ 0x0140, 0x661140 },
+		{ 0x0144, 0x661144 },
+		{ 0x0148, 0x661148 },
+		{ 0x014c, 0x66114c },
+		{ 0x0150, 0x661150 },
+		{ 0x0154, 0x661154 },
+		{ 0x0158, 0x661158 },
+		{ 0x015c, 0x66115c },
+		{ 0x0160, 0x661160 },
+		{ 0x0164, 0x661164 },
+		{ 0x0168, 0x661168 },
+		{ 0x016c, 0x66116c },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_image = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0400, 0x661400 },
+		{ 0x0404, 0x661404 },
+		{ 0x0408, 0x661408 },
+		{ 0x040c, 0x66140c },
+		{ 0x0410, 0x661410 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+gf119_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_base_mthd_base },
+		{  "Image", 2, &gf119_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_base_oclass = {
+	.base.oclass = GF110_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
new file mode 100644
index 0000000..780a1d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_base_oclass = {
+	.base.oclass = GK104_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
new file mode 100644
index 0000000..d8bdd24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_base_oclass = {
+	.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
new file mode 100644
index 0000000..93451e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_base_oclass = {
+	.base.oclass = GT200_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
new file mode 100644
index 0000000..08e2b1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_base_oclass = {
+	.base.oclass = GT214_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
new file mode 100644
index 0000000..1fd89ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_base_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+	u64 push;
+
+	nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp base channel dma vers %d "
+				   "pushbuf %016llx head %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+				   head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0008c4 },
+		{ 0x0088, 0x0008d0 },
+		{ 0x008c, 0x0008dc },
+		{ 0x0090, 0x0008e4 },
+		{ 0x0094, 0x610884 },
+		{ 0x00a0, 0x6108a0 },
+		{ 0x00a4, 0x610878 },
+		{ 0x00c0, 0x61086c },
+		{ 0x00e0, 0x610858 },
+		{ 0x00e4, 0x610860 },
+		{ 0x00e8, 0x6108ac },
+		{ 0x00ec, 0x6108b4 },
+		{ 0x0100, 0x610894 },
+		{ 0x0110, 0x6108bc },
+		{ 0x0114, 0x61088c },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_image = {
+	.mthd = 0x0400,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0800, 0x6108f0 },
+		{ 0x0804, 0x6108fc },
+		{ 0x0808, 0x61090c },
+		{ 0x080c, 0x610914 },
+		{ 0x0810, 0x610904 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_base_mthd_base },
+		{  "Image", 2, &nv50_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_base_oclass = {
+	.base.oclass = NV50_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &nv50_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
new file mode 100644
index 0000000..17a3d83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+static void
+gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
+	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+}
+
+static void
+gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
+}
+
+const struct nvkm_event_func
+gf119_disp_chan_uevent = {
+	.ctor = nv50_disp_chan_uevent_ctor,
+	.init = gf119_disp_chan_uevent_init,
+	.fini = gf119_disp_chan_uevent_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
new file mode 100644
index 0000000..01803c0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <engine/dma.h>
+
+#include <nvif/class.h>
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+static void
+nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
+		    const struct nv50_disp_mthd_list *list, int inst)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i;
+
+	for (i = 0; list->data[i].mthd; i++) {
+		if (list->data[i].addr) {
+			u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
+			u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
+			u32 mthd = list->data[i].mthd + (list->mthd * inst);
+			const char *name = list->data[i].name;
+			char mods[16];
+
+			if (prev != next)
+				snprintf(mods, sizeof(mods), "-> %08x", next);
+			else
+				snprintf(mods, sizeof(mods), "%13c", ' ');
+
+			nvkm_printk_(subdev, debug, info,
+				     "\t%04x: %08x %s%s%s\n",
+				     mthd, prev, mods, name ? " // " : "",
+				     name ? name : "");
+		}
+	}
+}
+
+void
+nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	const struct nv50_disp_chan_mthd *mthd = chan->mthd;
+	const struct nv50_disp_mthd_list *list;
+	int i, j;
+
+	if (debug > subdev->debug)
+		return;
+
+	for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
+		u32 base = chan->head * mthd->addr;
+		for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
+			const char *cname = mthd->name;
+			const char *sname = "";
+			char cname_[16], sname_[16];
+
+			if (mthd->addr) {
+				snprintf(cname_, sizeof(cname_), "%s %d",
+					 mthd->name, chan->chid);
+				cname = cname_;
+			}
+
+			if (mthd->data[i].nr > 1) {
+				snprintf(sname_, sizeof(sname_), " - %s %d",
+					 mthd->data[i].name, j);
+				sname = sname_;
+			}
+
+			nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
+			nv50_disp_mthd_list(disp, debug, base, mthd->prev,
+					    list, j);
+		}
+	}
+}
+
+static void
+nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
+	nvkm_wr32(device, 0x610020, 0x00000001 << index);
+}
+
+static void
+nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x610020, 0x00000001 << index);
+	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
+}
+
+void
+nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid)
+{
+	struct nvif_notify_uevent_rep {
+	} rep;
+
+	nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep));
+}
+
+int
+nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
+			   struct nvkm_notify *notify)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	union {
+		struct nvif_notify_uevent_req none;
+	} *args = data;
+	int ret;
+
+	if (nvif_unvers(args->none)) {
+		notify->size  = sizeof(struct nvif_notify_uevent_rep);
+		notify->types = 1;
+		notify->index = chan->chid;
+		return 0;
+	}
+
+	return ret;
+}
+
+const struct nvkm_event_func
+nv50_disp_chan_uevent = {
+	.ctor = nv50_disp_chan_uevent_ctor,
+	.init = nv50_disp_chan_uevent_init,
+	.fini = nv50_disp_chan_uevent_fini,
+};
+
+int
+nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+	return 0;
+}
+
+int
+nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+	return 0;
+}
+
+int
+nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
+		    struct nvkm_event **pevent)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	switch (type) {
+	case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
+		*pevent = &disp->uevent;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+int
+nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*addr = device->func->resource_addr(device, 0) +
+		0x640000 + (chan->chid * 0x1000);
+	*size = 0x001000;
+	return 0;
+}
+
+static int
+nv50_disp_chan_child_new(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent);
+	return chan->func->child_new(chan, oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_chan_child_get(struct nvkm_object *object, int index,
+			 struct nvkm_oclass *oclass)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	if (chan->func->child_get) {
+		int ret = chan->func->child_get(chan, index, oclass);
+		if (ret == 0)
+			oclass->ctor = nv50_disp_chan_child_new;
+		return ret;
+	}
+	return -EINVAL;
+}
+
+static int
+nv50_disp_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	chan->func->fini(chan);
+	return 0;
+}
+
+static int
+nv50_disp_chan_init(struct nvkm_object *object)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_chan_dtor(struct nvkm_object *object)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	if (chan->chid >= 0)
+		disp->chan[chan->chid] = NULL;
+	return chan->func->dtor ? chan->func->dtor(chan) : chan;
+}
+
+static const struct nvkm_object_func
+nv50_disp_chan = {
+	.dtor = nv50_disp_chan_dtor,
+	.init = nv50_disp_chan_init,
+	.fini = nv50_disp_chan_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+	.ntfy = nv50_disp_chan_ntfy,
+	.map = nv50_disp_chan_map,
+	.sclass = nv50_disp_chan_child_get,
+};
+
+int
+nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head,
+		    const struct nvkm_oclass *oclass,
+		    struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = root->disp;
+
+	nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
+	chan->func = func;
+	chan->mthd = mthd;
+	chan->root = root;
+	chan->chid = chid;
+	chan->head = head;
+
+	if (disp->chan[chan->chid]) {
+		chan->chid = -1;
+		return -EBUSY;
+	}
+	disp->chan[chan->chid] = chan;
+	return 0;
+}
+
+int
+nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_object **pobject)
+{
+	struct nv50_disp_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->object;
+
+	return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
new file mode 100644
index 0000000..aee3748
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -0,0 +1,127 @@
+#ifndef __NV50_DISP_CHAN_H__
+#define __NV50_DISP_CHAN_H__
+#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include "nv50.h"
+
+struct nv50_disp_chan {
+	const struct nv50_disp_chan_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	struct nv50_disp_root *root;
+	int chid;
+	int head;
+
+	struct nvkm_object object;
+};
+
+struct nv50_disp_chan_func {
+	void *(*dtor)(struct nv50_disp_chan *);
+	int (*init)(struct nv50_disp_chan *);
+	void (*fini)(struct nv50_disp_chan *);
+	int (*child_get)(struct nv50_disp_chan *, int index,
+			 struct nvkm_oclass *);
+	int (*child_new)(struct nv50_disp_chan *, const struct nvkm_oclass *,
+			 void *data, u32 size, struct nvkm_object **);
+};
+
+int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head,
+			const struct nvkm_oclass *, struct nv50_disp_chan *);
+int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head,
+			const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
+extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
+
+extern const struct nvkm_event_func nv50_disp_chan_uevent;
+int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
+				struct nvkm_notify *);
+void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
+
+extern const struct nvkm_event_func gf119_disp_chan_uevent;
+
+struct nv50_disp_mthd_list {
+	u32 mthd;
+	u32 addr;
+	struct {
+		u32 mthd;
+		u32 addr;
+		const char *name;
+	} data[];
+};
+
+struct nv50_disp_chan_mthd {
+	const char *name;
+	u32 addr;
+	s32 prev;
+	struct {
+		const char *name;
+		int nr;
+		const struct nv50_disp_mthd_list *mthd;
+	} data[];
+};
+
+void nv50_disp_chan_mthd(struct nv50_disp_chan *, int debug);
+
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
+
+extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
+extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
+extern const struct nv50_disp_chan_mthd g84_disp_ovly_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
+
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
+extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+
+struct nv50_disp_pioc_oclass {
+	int (*ctor)(const struct nv50_disp_chan_func *,
+		    const struct nv50_disp_chan_mthd *,
+		    struct nv50_disp_root *, int chid,
+		    const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const struct nv50_disp_chan_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	int chid;
+};
+
+extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass g84_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gt215_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gf119_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+
+
+int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *, void *data, u32 size,
+		       struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
index cf03e02..c6910d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
@@ -33,15 +33,15 @@
 nvkm_connector_hpd(struct nvkm_notify *notify)
 {
 	struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
-	struct nvkm_disp *disp = nvkm_disp(conn);
-	struct nvkm_gpio *gpio = nvkm_gpio(conn);
+	struct nvkm_disp *disp = conn->disp;
+	struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
 	const struct nvkm_gpio_ntfy_rep *line = notify->data;
 	struct nvif_notify_conn_rep_v0 rep;
 	int index = conn->index;
 
-	DBG("HPD: %d\n", line->mask);
+	CONN_DBG(conn, "HPD: %d", line->mask);
 
-	if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
+	if (!nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
 		rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
 	else
 		rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
@@ -51,78 +51,58 @@
 	return NVKM_NOTIFY_KEEP;
 }
 
-int
-_nvkm_connector_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_connector_fini(struct nvkm_connector *conn)
 {
-	struct nvkm_connector *conn = (void *)object;
 	nvkm_notify_put(&conn->hpd);
-	return nvkm_object_fini(&conn->base, suspend);
-}
-
-int
-_nvkm_connector_init(struct nvkm_object *object)
-{
-	struct nvkm_connector *conn = (void *)object;
-	int ret = nvkm_object_init(&conn->base);
-	if (ret == 0)
-		nvkm_notify_get(&conn->hpd);
-	return ret;
 }
 
 void
-_nvkm_connector_dtor(struct nvkm_object *object)
+nvkm_connector_init(struct nvkm_connector *conn)
 {
-	struct nvkm_connector *conn = (void *)object;
-	nvkm_notify_fini(&conn->hpd);
-	nvkm_object_destroy(&conn->base);
+	nvkm_notify_get(&conn->hpd);
 }
 
-int
-nvkm_connector_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass,
-		       struct nvbios_connE *info, int index,
-		       int length, void **pobject)
+void
+nvkm_connector_del(struct nvkm_connector **pconn)
+{
+	struct nvkm_connector *conn = *pconn;
+	if (conn) {
+		nvkm_notify_fini(&conn->hpd);
+		kfree(*pconn);
+		*pconn = NULL;
+	}
+}
+
+static void
+nvkm_connector_ctor(struct nvkm_disp *disp, int index,
+		    struct nvbios_connE *info, struct nvkm_connector *conn)
 {
 	static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
-	struct nvkm_disp *disp = nvkm_disp(parent);
-	struct nvkm_gpio *gpio = nvkm_gpio(parent);
-	struct nvkm_connector *conn;
-	struct nvkm_output *outp;
+	struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
 	struct dcb_gpio_func func;
 	int ret;
 
-	list_for_each_entry(outp, &disp->outp, head) {
-		if (outp->conn && outp->conn->index == index) {
-			atomic_inc(&nv_object(outp->conn)->refcount);
-			*pobject = outp->conn;
-			return 1;
-		}
-	}
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	conn = *pobject;
-	if (ret)
-		return ret;
-
-	conn->info = *info;
+	conn->disp = disp;
 	conn->index = index;
+	conn->info = *info;
 
-	DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n",
-	    info->type, info->location, info->hpd, info->dp,
-	    info->di, info->sr, info->lcdid);
+	CONN_DBG(conn, "type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x",
+		 info->type, info->location, info->hpd, info->dp,
+		 info->di, info->sr, info->lcdid);
 
 	if ((info->hpd = ffs(info->hpd))) {
 		if (--info->hpd >= ARRAY_SIZE(hpd)) {
-			ERR("hpd %02x unknown\n", info->hpd);
-			return 0;
+			CONN_ERR(conn, "hpd %02x unknown", info->hpd);
+			return;
 		}
 		info->hpd = hpd[info->hpd];
 
-		ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
+		ret = nvkm_gpio_find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
 		if (ret) {
-			ERR("func %02x lookup failed, %d\n", info->hpd, ret);
-			return 0;
+			CONN_ERR(conn, "func %02x lookup failed, %d",
+				 info->hpd, ret);
+			return;
 		}
 
 		ret = nvkm_notify_init(NULL, &gpio->event, nvkm_connector_hpd,
@@ -134,41 +114,19 @@
 				       sizeof(struct nvkm_gpio_ntfy_rep),
 				       &conn->hpd);
 		if (ret) {
-			ERR("func %02x failed, %d\n", info->hpd, ret);
+			CONN_ERR(conn, "func %02x failed, %d", info->hpd, ret);
 		} else {
-			DBG("func %02x (HPD)\n", info->hpd);
+			CONN_DBG(conn, "func %02x (HPD)", info->hpd);
 		}
 	}
-
-	return 0;
 }
 
 int
-_nvkm_connector_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *info, u32 index,
-		     struct nvkm_object **pobject)
+nvkm_connector_new(struct nvkm_disp *disp, int index,
+		   struct nvbios_connE *info, struct nvkm_connector **pconn)
 {
-	struct nvkm_connector *conn;
-	int ret;
-
-	ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
-	*pobject = nv_object(conn);
-	if (ret)
-		return ret;
-
+	if (!(*pconn = kzalloc(sizeof(**pconn), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_connector_ctor(disp, index, info, *pconn);
 	return 0;
 }
-
-struct nvkm_oclass *
-nvkm_connector_oclass = &(struct nvkm_connector_impl) {
-	.base = {
-		.handle = 0,
-		.ofuncs = &(struct nvkm_ofuncs) {
-			.ctor = _nvkm_connector_ctor,
-			.dtor = _nvkm_connector_dtor,
-			.init = _nvkm_connector_init,
-			.fini = _nvkm_connector_fini,
-		},
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index c87a061..ed32fe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,58 +1,33 @@
 #ifndef __NVKM_DISP_CONN_H__
 #define __NVKM_DISP_CONN_H__
-#include <core/object.h>
-#include <core/notify.h>
+#include <engine/disp.h>
 
+#include <core/notify.h>
 #include <subdev/bios.h>
 #include <subdev/bios/conn.h>
 
 struct nvkm_connector {
-	struct nvkm_object base;
-	struct list_head head;
-
-	struct nvbios_connE info;
+	struct nvkm_disp *disp;
 	int index;
+	struct nvbios_connE info;
 
 	struct nvkm_notify hpd;
+
+	struct list_head head;
 };
 
-#define nvkm_connector_create(p,e,c,b,i,d)                                     \
-	nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_connector_destroy(d) ({                                           \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_dtor(nv_object(disp));                                 \
-})
-#define nvkm_connector_init(d) ({                                              \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_init(nv_object(disp));                                 \
-})
-#define nvkm_connector_fini(d,s) ({                                            \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_fini(nv_object(disp), (s));                            \
-})
+int  nvkm_connector_new(struct nvkm_disp *, int index, struct nvbios_connE *,
+			struct nvkm_connector **);
+void nvkm_connector_del(struct nvkm_connector **);
+void nvkm_connector_init(struct nvkm_connector *);
+void nvkm_connector_fini(struct nvkm_connector *);
 
-int nvkm_connector_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, struct nvbios_connE *,
-			   int, int, void **);
-
-int  _nvkm_connector_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_connector_dtor(struct nvkm_object *);
-int  _nvkm_connector_init(struct nvkm_object *);
-int  _nvkm_connector_fini(struct nvkm_object *, bool);
-
-struct nvkm_connector_impl {
-	struct nvkm_oclass base;
-};
-
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_connector *_conn = (void *)conn;                           \
-	nv_##l(_conn, "%02x:%02x%02x: "f, _conn->index,                        \
-	       _conn->info.location, _conn->info.type, ##a);                   \
+#define CONN_MSG(c,l,f,a...) do {                                              \
+	struct nvkm_connector *_conn = (c);                                    \
+	nvkm_##l(&_conn->disp->engine.subdev, "conn %02x:%02x%02x: "f"\n",     \
+		 _conn->index, _conn->info.location, _conn->info.type, ##a);   \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define CONN_ERR(c,f,a...) CONN_MSG((c), error, f, ##a)
+#define CONN_DBG(c,f,a...) CONN_MSG((c), debug, f, ##a)
+#define CONN_TRACE(c,f,a...) CONN_MSG((c), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
new file mode 100644
index 0000000..1baa5c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_dac = {
+	.mthd = 0x0080,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0400, 0x610b58 },
+		{ 0x0404, 0x610bdc },
+		{ 0x0420, 0x610bc4 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_head = {
+	.mthd = 0x0400,
+	.addr = 0x000540,
+	.data = {
+		{ 0x0800, 0x610ad8 },
+		{ 0x0804, 0x610ad0 },
+		{ 0x0808, 0x610a48 },
+		{ 0x080c, 0x610a78 },
+		{ 0x0810, 0x610ac0 },
+		{ 0x0814, 0x610af8 },
+		{ 0x0818, 0x610b00 },
+		{ 0x081c, 0x610ae8 },
+		{ 0x0820, 0x610af0 },
+		{ 0x0824, 0x610b08 },
+		{ 0x0828, 0x610b10 },
+		{ 0x082c, 0x610a68 },
+		{ 0x0830, 0x610a60 },
+		{ 0x0834, 0x000000 },
+		{ 0x0838, 0x610a40 },
+		{ 0x0840, 0x610a24 },
+		{ 0x0844, 0x610a2c },
+		{ 0x0848, 0x610aa8 },
+		{ 0x084c, 0x610ab0 },
+		{ 0x085c, 0x610c5c },
+		{ 0x0860, 0x610a84 },
+		{ 0x0864, 0x610a90 },
+		{ 0x0868, 0x610b18 },
+		{ 0x086c, 0x610b20 },
+		{ 0x0870, 0x610ac8 },
+		{ 0x0874, 0x610a38 },
+		{ 0x0878, 0x610c50 },
+		{ 0x0880, 0x610a58 },
+		{ 0x0884, 0x610a9c },
+		{ 0x089c, 0x610c68 },
+		{ 0x08a0, 0x610a70 },
+		{ 0x08a4, 0x610a50 },
+		{ 0x08a8, 0x610ae0 },
+		{ 0x08c0, 0x610b28 },
+		{ 0x08c4, 0x610b30 },
+		{ 0x08c8, 0x610b40 },
+		{ 0x08d4, 0x610b38 },
+		{ 0x08d8, 0x610b48 },
+		{ 0x08dc, 0x610b50 },
+		{ 0x0900, 0x610a18 },
+		{ 0x0904, 0x610ab8 },
+		{ 0x0910, 0x610c70 },
+		{ 0x0914, 0x610c78 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &g84_disp_core_mthd_dac  },
+		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &g84_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_core_oclass = {
+	.base.oclass = G82_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g84_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
new file mode 100644
index 0000000..019379a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g94_disp_core_mthd_sor = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0600, 0x610794 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g94_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &g84_disp_core_mthd_dac  },
+		{    "SOR", 4, &g94_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &g84_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g94_disp_core_oclass = {
+	.base.oclass = GT206_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g94_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
new file mode 100644
index 0000000..6b1dc70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x660080 },
+		{ 0x0084, 0x660084 },
+		{ 0x0088, 0x660088 },
+		{ 0x008c, 0x000000 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_dac = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0180, 0x660180 },
+		{ 0x0184, 0x660184 },
+		{ 0x0188, 0x660188 },
+		{ 0x0190, 0x660190 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_sor = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0200, 0x660200 },
+		{ 0x0204, 0x660204 },
+		{ 0x0208, 0x660208 },
+		{ 0x0210, 0x660210 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_pior = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0300, 0x660300 },
+		{ 0x0304, 0x660304 },
+		{ 0x0308, 0x660308 },
+		{ 0x0310, 0x660310 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_head = {
+	.mthd = 0x0300,
+	.addr = 0x000300,
+	.data = {
+		{ 0x0400, 0x660400 },
+		{ 0x0404, 0x660404 },
+		{ 0x0408, 0x660408 },
+		{ 0x040c, 0x66040c },
+		{ 0x0410, 0x660410 },
+		{ 0x0414, 0x660414 },
+		{ 0x0418, 0x660418 },
+		{ 0x041c, 0x66041c },
+		{ 0x0420, 0x660420 },
+		{ 0x0424, 0x660424 },
+		{ 0x0428, 0x660428 },
+		{ 0x042c, 0x66042c },
+		{ 0x0430, 0x660430 },
+		{ 0x0434, 0x660434 },
+		{ 0x0438, 0x660438 },
+		{ 0x0440, 0x660440 },
+		{ 0x0444, 0x660444 },
+		{ 0x0448, 0x660448 },
+		{ 0x044c, 0x66044c },
+		{ 0x0450, 0x660450 },
+		{ 0x0454, 0x660454 },
+		{ 0x0458, 0x660458 },
+		{ 0x045c, 0x66045c },
+		{ 0x0460, 0x660460 },
+		{ 0x0468, 0x660468 },
+		{ 0x046c, 0x66046c },
+		{ 0x0470, 0x660470 },
+		{ 0x0474, 0x660474 },
+		{ 0x0480, 0x660480 },
+		{ 0x0484, 0x660484 },
+		{ 0x048c, 0x66048c },
+		{ 0x0490, 0x660490 },
+		{ 0x0494, 0x660494 },
+		{ 0x0498, 0x660498 },
+		{ 0x04b0, 0x6604b0 },
+		{ 0x04b8, 0x6604b8 },
+		{ 0x04bc, 0x6604bc },
+		{ 0x04c0, 0x6604c0 },
+		{ 0x04c4, 0x6604c4 },
+		{ 0x04c8, 0x6604c8 },
+		{ 0x04d0, 0x6604d0 },
+		{ 0x04d4, 0x6604d4 },
+		{ 0x04e0, 0x6604e0 },
+		{ 0x04e4, 0x6604e4 },
+		{ 0x04e8, 0x6604e8 },
+		{ 0x04ec, 0x6604ec },
+		{ 0x04f0, 0x6604f0 },
+		{ 0x04f4, 0x6604f4 },
+		{ 0x04f8, 0x6604f8 },
+		{ 0x04fc, 0x6604fc },
+		{ 0x0500, 0x660500 },
+		{ 0x0504, 0x660504 },
+		{ 0x0508, 0x660508 },
+		{ 0x050c, 0x66050c },
+		{ 0x0510, 0x660510 },
+		{ 0x0514, 0x660514 },
+		{ 0x0518, 0x660518 },
+		{ 0x051c, 0x66051c },
+		{ 0x052c, 0x66052c },
+		{ 0x0530, 0x660530 },
+		{ 0x054c, 0x66054c },
+		{ 0x0550, 0x660550 },
+		{ 0x0554, 0x660554 },
+		{ 0x0558, 0x660558 },
+		{ 0x055c, 0x66055c },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_core_mthd_base },
+		{    "DAC", 3, &gf119_disp_core_mthd_dac  },
+		{    "SOR", 8, &gf119_disp_core_mthd_sor  },
+		{   "PIOR", 4, &gf119_disp_core_mthd_pior },
+		{   "HEAD", 4, &gf119_disp_core_mthd_head },
+		{}
+	}
+};
+
+static void
+gf119_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
+	nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core fini: %08x\n",
+			   nvkm_rd32(device, 0x610490));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000000);
+}
+
+static int
+gf119_disp_core_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610494, chan->push);
+	nvkm_wr32(device, 0x610498, 0x00010000);
+	nvkm_wr32(device, 0x61049c, 0x00000001);
+	nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000, 0x00000000);
+	nvkm_wr32(device, 0x610490, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core init: %08x\n",
+			   nvkm_rd32(device, 0x610490));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_core_func = {
+	.init = gf119_disp_core_init,
+	.fini = gf119_disp_core_fini,
+	.bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_core_oclass = {
+	.base.oclass = GF110_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gf119_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
new file mode 100644
index 0000000..088ab22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_core_mthd_head = {
+	.mthd = 0x0300,
+	.addr = 0x000300,
+	.data = {
+		{ 0x0400, 0x660400 },
+		{ 0x0404, 0x660404 },
+		{ 0x0408, 0x660408 },
+		{ 0x040c, 0x66040c },
+		{ 0x0410, 0x660410 },
+		{ 0x0414, 0x660414 },
+		{ 0x0418, 0x660418 },
+		{ 0x041c, 0x66041c },
+		{ 0x0420, 0x660420 },
+		{ 0x0424, 0x660424 },
+		{ 0x0428, 0x660428 },
+		{ 0x042c, 0x66042c },
+		{ 0x0430, 0x660430 },
+		{ 0x0434, 0x660434 },
+		{ 0x0438, 0x660438 },
+		{ 0x0440, 0x660440 },
+		{ 0x0444, 0x660444 },
+		{ 0x0448, 0x660448 },
+		{ 0x044c, 0x66044c },
+		{ 0x0450, 0x660450 },
+		{ 0x0454, 0x660454 },
+		{ 0x0458, 0x660458 },
+		{ 0x045c, 0x66045c },
+		{ 0x0460, 0x660460 },
+		{ 0x0468, 0x660468 },
+		{ 0x046c, 0x66046c },
+		{ 0x0470, 0x660470 },
+		{ 0x0474, 0x660474 },
+		{ 0x047c, 0x66047c },
+		{ 0x0480, 0x660480 },
+		{ 0x0484, 0x660484 },
+		{ 0x0488, 0x660488 },
+		{ 0x048c, 0x66048c },
+		{ 0x0490, 0x660490 },
+		{ 0x0494, 0x660494 },
+		{ 0x0498, 0x660498 },
+		{ 0x04a0, 0x6604a0 },
+		{ 0x04b0, 0x6604b0 },
+		{ 0x04b8, 0x6604b8 },
+		{ 0x04bc, 0x6604bc },
+		{ 0x04c0, 0x6604c0 },
+		{ 0x04c4, 0x6604c4 },
+		{ 0x04c8, 0x6604c8 },
+		{ 0x04d0, 0x6604d0 },
+		{ 0x04d4, 0x6604d4 },
+		{ 0x04e0, 0x6604e0 },
+		{ 0x04e4, 0x6604e4 },
+		{ 0x04e8, 0x6604e8 },
+		{ 0x04ec, 0x6604ec },
+		{ 0x04f0, 0x6604f0 },
+		{ 0x04f4, 0x6604f4 },
+		{ 0x04f8, 0x6604f8 },
+		{ 0x04fc, 0x6604fc },
+		{ 0x0500, 0x660500 },
+		{ 0x0504, 0x660504 },
+		{ 0x0508, 0x660508 },
+		{ 0x050c, 0x66050c },
+		{ 0x0510, 0x660510 },
+		{ 0x0514, 0x660514 },
+		{ 0x0518, 0x660518 },
+		{ 0x051c, 0x66051c },
+		{ 0x0520, 0x660520 },
+		{ 0x0524, 0x660524 },
+		{ 0x052c, 0x66052c },
+		{ 0x0530, 0x660530 },
+		{ 0x054c, 0x66054c },
+		{ 0x0550, 0x660550 },
+		{ 0x0554, 0x660554 },
+		{ 0x0558, 0x660558 },
+		{ 0x055c, 0x66055c },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+gk104_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_core_mthd_base },
+		{    "DAC", 3, &gf119_disp_core_mthd_dac  },
+		{    "SOR", 8, &gf119_disp_core_mthd_sor  },
+		{   "PIOR", 4, &gf119_disp_core_mthd_pior },
+		{   "HEAD", 4, &gk104_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_core_oclass = {
+	.base.oclass = GK104_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
new file mode 100644
index 0000000..df0f45c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_core_oclass = {
+	.base.oclass = GK110_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
new file mode 100644
index 0000000..9e27f8f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm107_disp_core_oclass = {
+	.base.oclass = GM107_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
new file mode 100644
index 0000000..222f4a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm204_disp_core_oclass = {
+	.base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
new file mode 100644
index 0000000..b234547
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_core_oclass = {
+	.base.oclass = GT200_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g84_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
new file mode 100644
index 0000000..8f5ba20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_core_oclass = {
+	.base.oclass = GT214_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g94_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
new file mode 100644
index 0000000..db4a9b3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_core_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	u64 push;
+	int ret;
+
+	nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp core channel dma vers %d "
+				   "pushbuf %016llx\n",
+			   args->v0.version, args->v0.pushbuf);
+		push = args->v0.pushbuf;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid, 0,
+				   push, oclass, pobject);
+}
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x610bb8 },
+		{ 0x0088, 0x610b9c },
+		{ 0x008c, 0x000000 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_dac = {
+	.mthd = 0x0080,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0400, 0x610b58 },
+		{ 0x0404, 0x610bdc },
+		{ 0x0420, 0x610828 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_sor = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0600, 0x610b70 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_pior = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0700, 0x610b80 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_head = {
+	.mthd = 0x0400,
+	.addr = 0x000540,
+	.data = {
+		{ 0x0800, 0x610ad8 },
+		{ 0x0804, 0x610ad0 },
+		{ 0x0808, 0x610a48 },
+		{ 0x080c, 0x610a78 },
+		{ 0x0810, 0x610ac0 },
+		{ 0x0814, 0x610af8 },
+		{ 0x0818, 0x610b00 },
+		{ 0x081c, 0x610ae8 },
+		{ 0x0820, 0x610af0 },
+		{ 0x0824, 0x610b08 },
+		{ 0x0828, 0x610b10 },
+		{ 0x082c, 0x610a68 },
+		{ 0x0830, 0x610a60 },
+		{ 0x0834, 0x000000 },
+		{ 0x0838, 0x610a40 },
+		{ 0x0840, 0x610a24 },
+		{ 0x0844, 0x610a2c },
+		{ 0x0848, 0x610aa8 },
+		{ 0x084c, 0x610ab0 },
+		{ 0x0860, 0x610a84 },
+		{ 0x0864, 0x610a90 },
+		{ 0x0868, 0x610b18 },
+		{ 0x086c, 0x610b20 },
+		{ 0x0870, 0x610ac8 },
+		{ 0x0874, 0x610a38 },
+		{ 0x0880, 0x610a58 },
+		{ 0x0884, 0x610a9c },
+		{ 0x08a0, 0x610a70 },
+		{ 0x08a4, 0x610a50 },
+		{ 0x08a8, 0x610ae0 },
+		{ 0x08c0, 0x610b28 },
+		{ 0x08c4, 0x610b30 },
+		{ 0x08c8, 0x610b40 },
+		{ 0x08d4, 0x610b38 },
+		{ 0x08d8, 0x610b48 },
+		{ 0x08dc, 0x610b50 },
+		{ 0x0900, 0x610a18 },
+		{ 0x0904, 0x610ab8 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
+		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &nv50_disp_core_mthd_head },
+		{}
+	}
+};
+
+static void
+nv50_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
+	nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core fini: %08x\n",
+			   nvkm_rd32(device, 0x610200));
+	}
+
+	/* disable error reporting and completion notifications */
+	nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
+}
+
+static int
+nv50_disp_core_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
+
+	/* attempt to unstick channel from some unknown state */
+	if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
+		nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
+	if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
+		nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610204, chan->push);
+	nvkm_wr32(device, 0x610208, 0x00010000);
+	nvkm_wr32(device, 0x61020c, 0x00000000);
+	nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000, 0x00000000);
+	nvkm_wr32(device, 0x610200, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core init: %08x\n",
+			   nvkm_rd32(device, 0x610200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_core_func = {
+	.init = nv50_disp_core_init,
+	.fini = nv50_disp_core_fini,
+	.bind = nv50_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_core_oclass = {
+	.base.oclass = NV50_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &nv50_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
new file mode 100644
index 0000000..dd99fc7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_curs_oclass = {
+	.base.oclass = G82_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
new file mode 100644
index 0000000..2a1574e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_curs_oclass = {
+	.base.oclass = GF110_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
new file mode 100644
index 0000000..28e8f06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_curs_oclass = {
+	.base.oclass = GK104_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
new file mode 100644
index 0000000..d8a4b9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_curs_oclass = {
+	.base.oclass = GT214_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
new file mode 100644
index 0000000..225858e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_cursor_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+
+	nvif_ioctl(parent, "create disp cursor size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
+			   args->v0.version, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+				   head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_curs_oclass = {
+	.base.oclass = NV50_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 0f7d1ec..9bfa9e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -33,6 +33,7 @@
 int
 nv50_dac_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 doff = outp->or * 0x800;
 	union {
 		struct nv50_disp_dac_pwr_v0 v0;
@@ -40,12 +41,12 @@
 	u32 stat;
 	int ret;
 
-	nv_ioctl(object, "disp dac pwr size %d\n", size);
+	nvif_ioctl(object, "disp dac pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp dac pwr vers %d state %d data %d "
-				 "vsync %d hsync %d\n",
-			 args->v0.version, args->v0.state, args->v0.data,
-			 args->v0.vsync, args->v0.hsync);
+		nvif_ioctl(object, "disp dac pwr vers %d state %d data %d "
+				   "vsync %d hsync %d\n",
+			   args->v0.version, args->v0.state, args->v0.data,
+			   args->v0.vsync, args->v0.hsync);
 		stat  = 0x00000040 * !args->v0.state;
 		stat |= 0x00000010 * !args->v0.data;
 		stat |= 0x00000004 * !args->v0.vsync;
@@ -53,15 +54,23 @@
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
 int
 nv50_dac_sense(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	union {
 		struct nv50_disp_dac_load_v0 v0;
 	} *args = data;
@@ -69,31 +78,49 @@
 	u32 loadval;
 	int ret;
 
-	nv_ioctl(object, "disp dac load size %d\n", size);
+	nvif_ioctl(object, "disp dac load size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp dac load vers %d data %08x\n",
-			 args->v0.version, args->v0.data);
+		nvif_ioctl(object, "disp dac load vers %d data %08x\n",
+			   args->v0.version, args->v0.data);
 		if (args->v0.data & 0xfff00000)
 			return -EINVAL;
 		loadval = args->v0.data;
 	} else
 		return ret;
 
-	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80150000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 
-	nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+	nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
 	mdelay(9);
 	udelay(500);
-	loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+	loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
 
-	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80550000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 
-	nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval);
+	nvkm_debug(subdev, "DAC%d sense: %08x\n", outp->or, loadval);
 	if (!(loadval & 0x80000000))
 		return -ETIMEDOUT;
 
 	args->v0.load = (loadval & 0x38000000) >> 27;
 	return 0;
 }
+
+static const struct nvkm_output_func
+nv50_dac_output_func = {
+};
+
+int
+nv50_dac_output_new(struct nvkm_disp *disp, int index,
+		    struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_dac_output_func, disp,
+				index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
new file mode 100644
index 0000000..876b145
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+int
+gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
+		     struct nvkm_object *object, u32 handle)
+{
+	return nvkm_ramht_insert(chan->base.root->ramht, object,
+				 chan->base.chid, -9, handle,
+				 chan->base.chid << 27 | 0x00000001);
+}
+
+static void
+gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_dmac_func = {
+	.init = gf119_disp_dmac_init,
+	.fini = gf119_disp_dmac_fini,
+	.bind = gf119_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
new file mode 100644
index 0000000..9c6645a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/oproxy.h>
+#include <core/ramht.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/dma.h>
+
+struct nv50_disp_dmac_object {
+	struct nvkm_oproxy oproxy;
+	struct nv50_disp_root *root;
+	int hash;
+};
+
+static void
+nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
+{
+	struct nv50_disp_dmac_object *object =
+		container_of(base, typeof(*object), oproxy);
+	nvkm_ramht_remove(object->root->ramht, object->hash);
+}
+
+static const struct nvkm_oproxy_func
+nv50_disp_dmac_child_func_ = {
+	.dtor[0] = nv50_disp_dmac_child_del_,
+};
+
+static int
+nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
+			  const struct nvkm_oclass *oclass,
+			  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	struct nv50_disp_root *root = chan->base.root;
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *sclass = oclass->priv;
+	struct nv50_disp_dmac_object *object;
+	int ret;
+
+	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
+	object->root = root;
+	*pobject = &object->oproxy.base;
+
+	ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
+	if (ret)
+		return ret;
+
+	object->hash = chan->func->bind(chan, object->oproxy.object,
+					      oclass->handle);
+	if (object->hash < 0)
+		return object->hash;
+
+	return 0;
+}
+
+static int
+nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
+			  struct nvkm_oclass *sclass)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *oclass = NULL;
+
+	sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+	if (sclass->engine && sclass->engine->func->base.sclass) {
+		sclass->engine->func->base.sclass(sclass, index, &oclass);
+		if (oclass) {
+			sclass->priv = oclass;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void
+nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	chan->func->fini(chan);
+}
+
+static int
+nv50_disp_dmac_init_(struct nv50_disp_chan *base)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
+{
+	return nv50_disp_dmac(base);
+}
+
+static const struct nv50_disp_chan_func
+nv50_disp_dmac_func_ = {
+	.dtor = nv50_disp_dmac_dtor_,
+	.init = nv50_disp_dmac_init_,
+	.fini = nv50_disp_dmac_fini_,
+	.child_get = nv50_disp_dmac_child_get_,
+	.child_new = nv50_disp_dmac_child_new_,
+};
+
+int
+nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head, u64 push,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_object **pobject)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_dmaobj *dmaobj;
+	struct nv50_disp_dmac *chan;
+	int ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->func = func;
+
+	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
+				  chid, head, oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	dmaobj = nvkm_dma_search(device->dma, client, push);
+	if (!dmaobj)
+		return -ENOENT;
+
+	if (dmaobj->limit - dmaobj->start != 0xfff)
+		return -EINVAL;
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VRAM:
+		chan->push = 0x00000001 | dmaobj->start >> 8;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		chan->push = 0x00000003 | dmaobj->start >> 8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
+		    struct nvkm_object *object, u32 handle)
+{
+	return nvkm_ramht_insert(chan->base.root->ramht, object,
+				 chan->base.chid, -10, handle,
+				 chan->base.chid << 28 |
+				 chan->base.chid);
+}
+
+static void
+nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notifications */
+	nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+}
+
+static int
+nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_dmac_func = {
+	.init = nv50_disp_dmac_init,
+	.fini = nv50_disp_dmac_fini,
+	.bind = nv50_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
new file mode 100644
index 0000000..c748ca2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -0,0 +1,91 @@
+#ifndef __NV50_DISP_DMAC_H__
+#define __NV50_DISP_DMAC_H__
+#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
+#include "channv50.h"
+
+struct nv50_disp_dmac {
+	const struct nv50_disp_dmac_func *func;
+	struct nv50_disp_chan base;
+	u32 push;
+};
+
+struct nv50_disp_dmac_func {
+	int  (*init)(struct nv50_disp_dmac *);
+	void (*fini)(struct nv50_disp_dmac *);
+	int  (*bind)(struct nv50_disp_dmac *, struct nvkm_object *, u32 handle);
+};
+
+int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head, u64 push,
+			const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
+int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func nv50_disp_core_func;
+
+extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+
+struct nv50_disp_dmac_oclass {
+	int (*ctor)(const struct nv50_disp_dmac_func *,
+		    const struct nv50_disp_chan_mthd *,
+		    struct nv50_disp_root *, int chid,
+		    const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const struct nv50_disp_dmac_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	int chid;
+};
+
+int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_ovly_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm204_disp_core_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 6834766..74e2f7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -48,12 +48,12 @@
 static int
 dp_set_link_config(struct dp_state *dp)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
+		.subdev = subdev,
 		.bios = bios,
 		.offset = 0x0000,
 		.outp = &outp->base.info,
@@ -64,33 +64,33 @@
 	u8 sink[2];
 	int ret;
 
-	DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+	OUTP_DBG(&outp->base, "%d lanes at %d KB/s", dp->link_nr, dp->link_bw);
 
 	/* set desired link configuration on the source */
 	if ((lnkcmp = dp->outp->info.lnkcmp)) {
 		if (outp->version < 0x30) {
-			while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+			while ((dp->link_bw / 10) < nvbios_rd16(bios, lnkcmp))
 				lnkcmp += 4;
-			init.offset = nv_ro16(bios, lnkcmp + 2);
+			init.offset = nvbios_rd16(bios, lnkcmp + 2);
 		} else {
-			while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+			while ((dp->link_bw / 27000) < nvbios_rd08(bios, lnkcmp))
 				lnkcmp += 3;
-			init.offset = nv_ro16(bios, lnkcmp + 1);
+			init.offset = nvbios_rd16(bios, lnkcmp + 1);
 		}
 
 		nvbios_exec(&init);
 	}
 
-	ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
-			    outp->dpcd[DPCD_RC02] &
-				       DPCD_RC02_ENHANCED_FRAME_CAP);
+	ret = outp->func->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
+				  outp->dpcd[DPCD_RC02] &
+					     DPCD_RC02_ENHANCED_FRAME_CAP);
 	if (ret) {
 		if (ret < 0)
-			ERR("lnk_ctl failed with %d\n", ret);
+			OUTP_ERR(&outp->base, "lnk_ctl failed with %d", ret);
 		return ret;
 	}
 
-	impl->lnk_pwr(outp, dp->link_nr);
+	outp->func->lnk_pwr(outp, dp->link_nr);
 
 	/* set desired link configuration on the sink */
 	sink[0] = dp->link_bw / 27000;
@@ -98,29 +98,27 @@
 	if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
 		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
 
-	return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2);
+	return nvkm_wraux(outp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
 }
 
 static void
 dp_set_training_pattern(struct dp_state *dp, u8 pattern)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
 	u8 sink_tp;
 
-	DBG("training pattern %d\n", pattern);
-	impl->pattern(outp, pattern);
+	OUTP_DBG(&outp->base, "training pattern %d", pattern);
+	outp->func->pattern(outp, pattern);
 
-	nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+	nvkm_rdaux(outp->aux, DPCD_LC02, &sink_tp, 1);
 	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
 	sink_tp |= pattern;
-	nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+	nvkm_wraux(outp->aux, DPCD_LC02, &sink_tp, 1);
 }
 
 static int
 dp_link_train_commit(struct dp_state *dp, bool pc)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
 	int ret, i;
 
@@ -146,16 +144,17 @@
 		dp->conf[i] = (lpre << 3) | lvsw;
 		dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
 
-		DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2);
-		impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
+		OUTP_DBG(&outp->base, "config lane %d %02x %02x",
+			 i, dp->conf[i], lpc2);
+		outp->func->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
 	}
 
-	ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4);
+	ret = nvkm_wraux(outp->aux, DPCD_LC03(0), dp->conf, 4);
 	if (ret)
 		return ret;
 
 	if (pc) {
-		ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2);
+		ret = nvkm_wraux(outp->aux, DPCD_LC0F, dp->pc2conf, 2);
 		if (ret)
 			return ret;
 	}
@@ -174,17 +173,18 @@
 	else
 		udelay(delay);
 
-	ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6);
+	ret = nvkm_rdaux(outp->aux, DPCD_LS02, dp->stat, 6);
 	if (ret)
 		return ret;
 
 	if (pc) {
-		ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1);
+		ret = nvkm_rdaux(outp->aux, DPCD_LS0C, &dp->pc2stat, 1);
 		if (ret)
 			dp->pc2stat = 0x00;
-		DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat);
+		OUTP_DBG(&outp->base, "status %6ph pc2 %02x",
+			 dp->stat, dp->pc2stat);
 	} else {
-		DBG("status %6ph\n", dp->stat);
+		OUTP_DBG(&outp->base, "status %6ph", dp->stat);
 	}
 
 	return 0;
@@ -260,11 +260,11 @@
 dp_link_train_init(struct dp_state *dp, bool spread)
 {
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
-		.bios = bios,
+		.subdev = subdev,
+		.bios = subdev->device->bios,
 		.outp = &outp->base.info,
 		.crtc = -1,
 		.execute = 1,
@@ -286,11 +286,11 @@
 dp_link_train_fini(struct dp_state *dp)
 {
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
-		.bios = bios,
+		.subdev = subdev,
+		.bios = subdev->device->bios,
 		.outp = &outp->base.info,
 		.crtc = -1,
 		.execute = 1,
@@ -322,7 +322,7 @@
 nvkm_dp_train(struct work_struct *w)
 {
 	struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nv50_disp *disp = nv50_disp(outp->base.disp);
 	const struct dp_rates *cfg = nvkm_dp_rates;
 	struct dp_state _dp = {
 		.outp = outp,
@@ -330,11 +330,11 @@
 	u32 datarate = 0;
 	int ret;
 
-	if (!outp->base.info.location && priv->sor.magic)
-		priv->sor.magic(&outp->base);
+	if (!outp->base.info.location && disp->func->sor.magic)
+		disp->func->sor.magic(&outp->base);
 
 	/* bring capabilities within encoder limits */
-	if (nv_mclass(priv) < GF110_DISP)
+	if (disp->base.engine.subdev.device->chipset < 0xd0)
 		outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
 	if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
 		outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -386,12 +386,12 @@
 	/* finish link training and execute post-train script from vbios */
 	dp_set_training_pattern(dp, 0);
 	if (ret < 0)
-		ERR("link training failed\n");
+		OUTP_ERR(&outp->base, "link training failed");
 
 	dp_link_train_fini(dp);
 
 	/* signal completion and enable link interrupt handling */
-	DBG("training complete\n");
+	OUTP_DBG(&outp->base, "training complete");
 	atomic_set(&outp->lt.done, 1);
 	wake_up(&outp->lt.wait);
 	nvkm_notify_get(&outp->irq);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index a0dcf53..3e3e592 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -22,251 +22,34 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_dac = {
-	.mthd = 0x0080,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0400, 0x610b58 },
-		{ 0x0404, 0x610bdc },
-		{ 0x0420, 0x610bc4 },
-		{}
-	}
+static const struct nv50_disp_func
+g84_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &g84_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_head = {
-	.mthd = 0x0400,
-	.addr = 0x000540,
-	.data = {
-		{ 0x0800, 0x610ad8 },
-		{ 0x0804, 0x610ad0 },
-		{ 0x0808, 0x610a48 },
-		{ 0x080c, 0x610a78 },
-		{ 0x0810, 0x610ac0 },
-		{ 0x0814, 0x610af8 },
-		{ 0x0818, 0x610b00 },
-		{ 0x081c, 0x610ae8 },
-		{ 0x0820, 0x610af0 },
-		{ 0x0824, 0x610b08 },
-		{ 0x0828, 0x610b10 },
-		{ 0x082c, 0x610a68 },
-		{ 0x0830, 0x610a60 },
-		{ 0x0834, 0x000000 },
-		{ 0x0838, 0x610a40 },
-		{ 0x0840, 0x610a24 },
-		{ 0x0844, 0x610a2c },
-		{ 0x0848, 0x610aa8 },
-		{ 0x084c, 0x610ab0 },
-		{ 0x085c, 0x610c5c },
-		{ 0x0860, 0x610a84 },
-		{ 0x0864, 0x610a90 },
-		{ 0x0868, 0x610b18 },
-		{ 0x086c, 0x610b20 },
-		{ 0x0870, 0x610ac8 },
-		{ 0x0874, 0x610a38 },
-		{ 0x0878, 0x610c50 },
-		{ 0x0880, 0x610a58 },
-		{ 0x0884, 0x610a9c },
-		{ 0x089c, 0x610c68 },
-		{ 0x08a0, 0x610a70 },
-		{ 0x08a4, 0x610a50 },
-		{ 0x08a8, 0x610ae0 },
-		{ 0x08c0, 0x610b28 },
-		{ 0x08c4, 0x610b30 },
-		{ 0x08c8, 0x610b40 },
-		{ 0x08d4, 0x610b38 },
-		{ 0x08d8, 0x610b48 },
-		{ 0x08dc, 0x610b50 },
-		{ 0x0900, 0x610a18 },
-		{ 0x0904, 0x610ab8 },
-		{ 0x0910, 0x610c70 },
-		{ 0x0914, 0x610c78 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &g84_disp_core_mthd_dac  },
-		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &g84_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0008c4 },
-		{ 0x0088, 0x0008d0 },
-		{ 0x008c, 0x0008dc },
-		{ 0x0090, 0x0008e4 },
-		{ 0x0094, 0x610884 },
-		{ 0x00a0, 0x6108a0 },
-		{ 0x00a4, 0x610878 },
-		{ 0x00c0, 0x61086c },
-		{ 0x00c4, 0x610800 },
-		{ 0x00c8, 0x61080c },
-		{ 0x00cc, 0x610818 },
-		{ 0x00e0, 0x610858 },
-		{ 0x00e4, 0x610860 },
-		{ 0x00e8, 0x6108ac },
-		{ 0x00ec, 0x6108b4 },
-		{ 0x00fc, 0x610824 },
-		{ 0x0100, 0x610894 },
-		{ 0x0104, 0x61082c },
-		{ 0x0110, 0x6108bc },
-		{ 0x0114, 0x61088c },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &g84_disp_base_mthd_base },
-		{  "Image", 2, &nv50_disp_base_mthd_image },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x6109a0 },
-		{ 0x0088, 0x6109c0 },
-		{ 0x008c, 0x6109c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &g84_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_disp_sclass[] = {
-	{ G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-g84_disp_main_oclass[] = {
-	{ G82_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g84_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = g84_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = g84_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&g84_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-g84_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x82),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &g84_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 1ab0d0a..7a7af3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -22,118 +22,35 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outpdp.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g94_disp_core_mthd_sor = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0600, 0x610794 },
-		{}
-	}
+static const struct nv50_disp_func
+g94_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &g94_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = g94_sor_dp_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-const struct nv50_disp_mthd_chan
-g94_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &g84_disp_core_mthd_dac  },
-		{    "SOR", 4, &g94_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &g84_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g94_disp_sclass[] = {
-	{ GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-g94_disp_main_oclass[] = {
-	{ GT206_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g94_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = g94_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = g94_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&g94_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-g94_disp_outp_sclass[] = {
-	&nv50_pior_dp_impl.base.base,
-	&g94_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-g94_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x88),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g94_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  g94_disp_outp_sclass,
-	.mthd.core = &g94_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
deleted file mode 100644
index 7f2f05f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ /dev/null
@@ -1,1310 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <core/ramht.h>
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/disp.h>
-#include <subdev/bios/init.h>
-#include <subdev/bios/pll.h>
-#include <subdev/devinit.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
-
-static void
-gf110_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
-	nv_wr32(priv, 0x61008c, 0x00000001 << index);
-}
-
-static void
-gf110_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_wr32(priv, 0x61008c, 0x00000001 << index);
-	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
-}
-
-const struct nvkm_event_func
-gf110_disp_chan_uevent = {
-	.ctor = nv50_disp_chan_uevent_ctor,
-	.init = gf110_disp_chan_uevent_init,
-	.fini = gf110_disp_chan_uevent_fini,
-};
-
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_dmac_object_attach(struct nvkm_object *parent,
-			      struct nvkm_object *object, u32 name)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	struct nv50_disp_chan *chan = (void *)parent;
-	u32 addr = nv_gpuobj(object)->node->offset;
-	u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
-	return nvkm_ramht_insert(base->ramht, chan->chid, name, data);
-}
-
-static void
-gf110_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	nvkm_ramht_remove(base->ramht, cookie);
-}
-
-static int
-gf110_disp_dmac_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&dmac->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
-	nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
-	nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
-	nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
-		nv_error(dmac, "init: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_dmac_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
-		nv_error(dmac, "fini: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
-	return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x660080 },
-		{ 0x0084, 0x660084 },
-		{ 0x0088, 0x660088 },
-		{ 0x008c, 0x000000 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_dac = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0180, 0x660180 },
-		{ 0x0184, 0x660184 },
-		{ 0x0188, 0x660188 },
-		{ 0x0190, 0x660190 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_sor = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0200, 0x660200 },
-		{ 0x0204, 0x660204 },
-		{ 0x0208, 0x660208 },
-		{ 0x0210, 0x660210 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_pior = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0300, 0x660300 },
-		{ 0x0304, 0x660304 },
-		{ 0x0308, 0x660308 },
-		{ 0x0310, 0x660310 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_head = {
-	.mthd = 0x0300,
-	.addr = 0x000300,
-	.data = {
-		{ 0x0400, 0x660400 },
-		{ 0x0404, 0x660404 },
-		{ 0x0408, 0x660408 },
-		{ 0x040c, 0x66040c },
-		{ 0x0410, 0x660410 },
-		{ 0x0414, 0x660414 },
-		{ 0x0418, 0x660418 },
-		{ 0x041c, 0x66041c },
-		{ 0x0420, 0x660420 },
-		{ 0x0424, 0x660424 },
-		{ 0x0428, 0x660428 },
-		{ 0x042c, 0x66042c },
-		{ 0x0430, 0x660430 },
-		{ 0x0434, 0x660434 },
-		{ 0x0438, 0x660438 },
-		{ 0x0440, 0x660440 },
-		{ 0x0444, 0x660444 },
-		{ 0x0448, 0x660448 },
-		{ 0x044c, 0x66044c },
-		{ 0x0450, 0x660450 },
-		{ 0x0454, 0x660454 },
-		{ 0x0458, 0x660458 },
-		{ 0x045c, 0x66045c },
-		{ 0x0460, 0x660460 },
-		{ 0x0468, 0x660468 },
-		{ 0x046c, 0x66046c },
-		{ 0x0470, 0x660470 },
-		{ 0x0474, 0x660474 },
-		{ 0x0480, 0x660480 },
-		{ 0x0484, 0x660484 },
-		{ 0x048c, 0x66048c },
-		{ 0x0490, 0x660490 },
-		{ 0x0494, 0x660494 },
-		{ 0x0498, 0x660498 },
-		{ 0x04b0, 0x6604b0 },
-		{ 0x04b8, 0x6604b8 },
-		{ 0x04bc, 0x6604bc },
-		{ 0x04c0, 0x6604c0 },
-		{ 0x04c4, 0x6604c4 },
-		{ 0x04c8, 0x6604c8 },
-		{ 0x04d0, 0x6604d0 },
-		{ 0x04d4, 0x6604d4 },
-		{ 0x04e0, 0x6604e0 },
-		{ 0x04e4, 0x6604e4 },
-		{ 0x04e8, 0x6604e8 },
-		{ 0x04ec, 0x6604ec },
-		{ 0x04f0, 0x6604f0 },
-		{ 0x04f4, 0x6604f4 },
-		{ 0x04f8, 0x6604f8 },
-		{ 0x04fc, 0x6604fc },
-		{ 0x0500, 0x660500 },
-		{ 0x0504, 0x660504 },
-		{ 0x0508, 0x660508 },
-		{ 0x050c, 0x66050c },
-		{ 0x0510, 0x660510 },
-		{ 0x0514, 0x660514 },
-		{ 0x0518, 0x660518 },
-		{ 0x051c, 0x66051c },
-		{ 0x052c, 0x66052c },
-		{ 0x0530, 0x660530 },
-		{ 0x054c, 0x66054c },
-		{ 0x0550, 0x660550 },
-		{ 0x0554, 0x660554 },
-		{ 0x0558, 0x660558 },
-		{ 0x055c, 0x66055c },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &gf110_disp_core_mthd_base },
-		{    "DAC", 3, &gf110_disp_core_mthd_dac  },
-		{    "SOR", 8, &gf110_disp_core_mthd_sor  },
-		{   "PIOR", 4, &gf110_disp_core_mthd_pior },
-		{   "HEAD", 4, &gf110_disp_core_mthd_head },
-		{}
-	}
-};
-
-static int
-gf110_disp_core_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-	int ret;
-
-	ret = nv50_disp_chan_init(&mast->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610494, mast->push);
-	nv_wr32(priv, 0x610498, 0x00010000);
-	nv_wr32(priv, 0x61049c, 0x00000001);
-	nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000, 0x00000000);
-	nv_wr32(priv, 0x610490, 0x01000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
-		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
-	nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
-		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
-
-	return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-gf110_disp_core_ofuncs = {
-	.base.ctor = nv50_disp_core_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_core_init,
-	.base.fini = gf110_disp_core_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 0,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x661080 },
-		{ 0x0084, 0x661084 },
-		{ 0x0088, 0x661088 },
-		{ 0x008c, 0x66108c },
-		{ 0x0090, 0x661090 },
-		{ 0x0094, 0x661094 },
-		{ 0x00a0, 0x6610a0 },
-		{ 0x00a4, 0x6610a4 },
-		{ 0x00c0, 0x6610c0 },
-		{ 0x00c4, 0x6610c4 },
-		{ 0x00c8, 0x6610c8 },
-		{ 0x00cc, 0x6610cc },
-		{ 0x00e0, 0x6610e0 },
-		{ 0x00e4, 0x6610e4 },
-		{ 0x00e8, 0x6610e8 },
-		{ 0x00ec, 0x6610ec },
-		{ 0x00fc, 0x6610fc },
-		{ 0x0100, 0x661100 },
-		{ 0x0104, 0x661104 },
-		{ 0x0108, 0x661108 },
-		{ 0x010c, 0x66110c },
-		{ 0x0110, 0x661110 },
-		{ 0x0114, 0x661114 },
-		{ 0x0118, 0x661118 },
-		{ 0x011c, 0x66111c },
-		{ 0x0130, 0x661130 },
-		{ 0x0134, 0x661134 },
-		{ 0x0138, 0x661138 },
-		{ 0x013c, 0x66113c },
-		{ 0x0140, 0x661140 },
-		{ 0x0144, 0x661144 },
-		{ 0x0148, 0x661148 },
-		{ 0x014c, 0x66114c },
-		{ 0x0150, 0x661150 },
-		{ 0x0154, 0x661154 },
-		{ 0x0158, 0x661158 },
-		{ 0x015c, 0x66115c },
-		{ 0x0160, 0x661160 },
-		{ 0x0164, 0x661164 },
-		{ 0x0168, 0x661168 },
-		{ 0x016c, 0x66116c },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_image = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0400, 0x661400 },
-		{ 0x0404, 0x661404 },
-		{ 0x0408, 0x661408 },
-		{ 0x040c, 0x66140c },
-		{ 0x0410, 0x661410 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-gf110_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gf110_disp_base_mthd_base },
-		{  "Image", 2, &gf110_disp_base_mthd_image },
-		{}
-	}
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_base_ofuncs = {
-	.base.ctor = nv50_disp_base_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_dmac_init,
-	.base.fini = gf110_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 1,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.data = {
-		{ 0x0080, 0x665080 },
-		{ 0x0084, 0x665084 },
-		{ 0x0088, 0x665088 },
-		{ 0x008c, 0x66508c },
-		{ 0x0090, 0x665090 },
-		{ 0x0094, 0x665094 },
-		{ 0x00a0, 0x6650a0 },
-		{ 0x00a4, 0x6650a4 },
-		{ 0x00b0, 0x6650b0 },
-		{ 0x00b4, 0x6650b4 },
-		{ 0x00b8, 0x6650b8 },
-		{ 0x00c0, 0x6650c0 },
-		{ 0x00e0, 0x6650e0 },
-		{ 0x00e4, 0x6650e4 },
-		{ 0x00e8, 0x6650e8 },
-		{ 0x0100, 0x665100 },
-		{ 0x0104, 0x665104 },
-		{ 0x0108, 0x665108 },
-		{ 0x010c, 0x66510c },
-		{ 0x0110, 0x665110 },
-		{ 0x0118, 0x665118 },
-		{ 0x011c, 0x66511c },
-		{ 0x0120, 0x665120 },
-		{ 0x0124, 0x665124 },
-		{ 0x0130, 0x665130 },
-		{ 0x0134, 0x665134 },
-		{ 0x0138, 0x665138 },
-		{ 0x013c, 0x66513c },
-		{ 0x0140, 0x665140 },
-		{ 0x0144, 0x665144 },
-		{ 0x0148, 0x665148 },
-		{ 0x014c, 0x66514c },
-		{ 0x0150, 0x665150 },
-		{ 0x0154, 0x665154 },
-		{ 0x0158, 0x665158 },
-		{ 0x015c, 0x66515c },
-		{ 0x0160, 0x665160 },
-		{ 0x0164, 0x665164 },
-		{ 0x0168, 0x665168 },
-		{ 0x016c, 0x66516c },
-		{ 0x0400, 0x665400 },
-		{ 0x0408, 0x665408 },
-		{ 0x040c, 0x66540c },
-		{ 0x0410, 0x665410 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gf110_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_ovly_ofuncs = {
-	.base.ctor = nv50_disp_ovly_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_dmac_init,
-	.base.fini = gf110_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 5,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_pioc_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&pioc->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
-	/* activate channel */
-	nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
-		nv_error(pioc, "init: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_pioc_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-
-	nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
-		nv_error(pioc, "timeout: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
-	return nv50_disp_chan_fini(&pioc->base, suspend);
-}
-
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_oimm_ofuncs = {
-	.base.ctor = nv50_disp_oimm_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = gf110_disp_pioc_init,
-	.base.fini = gf110_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 9,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_curs_ofuncs = {
-	.base.ctor = nv50_disp_curs_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = gf110_disp_pioc_init,
-	.base.fini = gf110_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 13,
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-int
-gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
-{
-	const u32 total  = nv_rd32(priv, 0x640414 + (head * 0x300));
-	const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
-	const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
-		args->v0.hblanke = (blanke & 0x0000ffff);
-		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
-		args->v0.hblanks = (blanks & 0x0000ffff);
-		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
-		args->v0.htotal  = ( total & 0x0000ffff);
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		args->v0.vline = /* vline read locks hline */
-			nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline =
-			nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
-	} else
-		return ret;
-
-	return 0;
-}
-
-static int
-gf110_disp_main_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-	int ret, i;
-	u32 tmp;
-
-	ret = nvkm_parent_init(&base->base);
-	if (ret)
-		return ret;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.
-	 */
-
-	/* ... CRTC caps */
-	for (i = 0; i < priv->head.nr; i++) {
-		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
-		nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
-		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
-		nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
-		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
-		nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < priv->dac.nr; i++) {
-		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
-		nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < priv->sor.nr; i++) {
-		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
-		nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
-		nv_wr32(priv, 0x6100ac, 0x00000100);
-		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
-			nv_error(priv, "timeout acquiring display\n");
-			return -EBUSY;
-		}
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nv_wr32(priv, 0x610090, 0x00000000);
-	nv_wr32(priv, 0x6100a0, 0x00000000);
-	nv_wr32(priv, 0x6100b0, 0x00000307);
-
-	/* disable underflow reporting, preventing an intermittent issue
-	 * on some gk104 boards where the production vbios left this
-	 * setting enabled by default.
-	 *
-	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
-	 */
-	for (i = 0; i < priv->head.nr; i++)
-		nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
-
-	return 0;
-}
-
-static int
-gf110_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x6100b0, 0x00000000);
-
-	return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-gf110_disp_main_ofuncs = {
-	.ctor = nv50_disp_main_ctor,
-	.dtor = nv50_disp_main_dtor,
-	.init = gf110_disp_main_init,
-	.fini = gf110_disp_main_fini,
-	.mthd = nv50_disp_main_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-gf110_disp_main_oclass[] = {
-	{ GF110_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gf110_disp_sclass[] = {
-	{ GF110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GF110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GF110_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GF110_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GF110_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-gf110_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
-}
-
-static void
-gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
-}
-
-const struct nvkm_event_func
-gf110_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = gf110_disp_vblank_init,
-	.fini = gf110_disp_vblank_fini,
-};
-
-static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
-	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
-	    struct nvbios_outp *info)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	u16 mask, type;
-
-	if (or < 4) {
-		type = DCB_OUTPUT_ANALOG;
-		mask = 0;
-	} else {
-		or -= 4;
-		switch (ctrl & 0x00000f00) {
-		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
-		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
-		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
-		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
-		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
-		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
-		default:
-			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
-			return NULL;
-		}
-	}
-
-	mask  = 0x00c0 & (mask << 6);
-	mask |= 0x0001 << or;
-	mask |= 0x0100 << head;
-
-	list_for_each_entry(outp, &priv->base.outp, head) {
-		if ((outp->info.hasht & 0xff) == type &&
-		    (outp->info.hashm & mask) == mask) {
-			*data = nvbios_outp_match(bios, outp->info.hasht,
-							outp->info.hashm,
-						  ver, hdr, cnt, len, info);
-			if (!*data)
-				return NULL;
-			return outp;
-		}
-	}
-
-	return NULL;
-}
-
-static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	struct nvbios_outp info;
-	u8  ver, hdr, cnt, len;
-	u32 data, ctrl = 0;
-	int or;
-
-	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
-		ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
-		if (ctrl & (1 << head))
-			break;
-	}
-
-	if (or == 8)
-		return NULL;
-
-	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
-	if (outp) {
-		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = bios,
-			.offset = info.script[id],
-			.outp = &outp->info,
-			.crtc = head,
-			.execute = 1,
-		};
-
-		nvbios_exec(&init);
-	}
-
-	return outp;
-}
-
-static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	struct nvbios_outp info1;
-	struct nvbios_ocfg info2;
-	u8  ver, hdr, cnt, len;
-	u32 data, ctrl = 0;
-	int or;
-
-	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
-		ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
-		if (ctrl & (1 << head))
-			break;
-	}
-
-	if (or == 8)
-		return NULL;
-
-	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
-	if (!outp)
-		return NULL;
-
-	switch (outp->info.type) {
-	case DCB_OUTPUT_TMDS:
-		*conf = (ctrl & 0x00000f00) >> 8;
-		if (pclk >= 165000)
-			*conf |= 0x0100;
-		break;
-	case DCB_OUTPUT_LVDS:
-		*conf = priv->sor.lvdsconf;
-		break;
-	case DCB_OUTPUT_DP:
-		*conf = (ctrl & 0x00000f00) >> 8;
-		break;
-	case DCB_OUTPUT_ANALOG:
-	default:
-		*conf = 0x00ff;
-		break;
-	}
-
-	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
-	if (data && id < 0xff) {
-		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
-		if (data) {
-			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
-				.bios = bios,
-				.offset = data,
-				.outp = &outp->info,
-				.crtc = head,
-				.execute = 1,
-			};
-
-			nvbios_exec(&init);
-		}
-	}
-
-	return outp;
-}
-
-static void
-gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
-{
-	exec_script(priv, head, 1);
-}
-
-static void
-gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_output *outp = exec_script(priv, head, 2);
-
-	/* see note in nv50_disp_intr_unk20_0() */
-	if (outp && outp->info.type == DCB_OUTPUT_DP) {
-		struct nvkm_output_dp *outpdp = (void *)outp;
-		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = nvkm_bios(priv),
-			.outp = &outp->info,
-			.crtc = head,
-			.offset = outpdp->info.script[4],
-			.execute = 1,
-		};
-
-		nvbios_exec(&init);
-		atomic_set(&outpdp->lt.done, 0);
-	}
-}
-
-static void
-gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_devinit *devinit = nvkm_devinit(priv);
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	if (pclk)
-		devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
-	nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
-}
-
-static void
-gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
-			  struct dcb_output *outp)
-{
-	const int or = ffs(outp->or) - 1;
-	const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
-	const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
-	const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
-	const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
-	const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
-	const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
-	const u32 hoff = (head * 0x800);
-	const u32 soff = (  or * 0x800);
-	const u32 loff = (link * 0x080) + soff;
-	const u32 symbol = 100000;
-	const u32 TU = 64;
-	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
-	u32 clksor = nv_rd32(priv, 0x612300 + soff);
-	u32 datarate, link_nr, link_bw, bits;
-	u64 ratio, value;
-
-	link_nr  = hweight32(dpctrl & 0x000f0000);
-	link_bw  = (clksor & 0x007c0000) >> 18;
-	link_bw *= 27000;
-
-	/* symbols/hblank - algorithm taken from comments in tegra driver */
-	value = vblanke + vactive - vblanks - 7;
-	value = value * link_bw;
-	do_div(value, pclk);
-	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
-	nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
-
-	/* symbols/vblank - algorithm taken from comments in tegra driver */
-	value = vblanks - vblanke - 25;
-	value = value * link_bw;
-	do_div(value, pclk);
-	value = value - ((36 / link_nr) + 3) - 1;
-	nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
-
-	/* watermark */
-	if      ((conf & 0x3c0) == 0x180) bits = 30;
-	else if ((conf & 0x3c0) == 0x140) bits = 24;
-	else                              bits = 18;
-	datarate = (pclk * bits) / 8;
-
-	ratio  = datarate;
-	ratio *= symbol;
-	do_div(ratio, link_nr * link_bw);
-
-	value  = (symbol - ratio) * TU;
-	value *= ratio;
-	do_div(value, symbol);
-	do_div(value, symbol);
-
-	value += 5;
-	value |= 0x08000000;
-
-	nv_wr32(priv, 0x616610 + hoff, value);
-}
-
-static void
-gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	u32 conf, addr, data;
-
-	outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
-	if (!outp)
-		return;
-
-	/* see note in nv50_disp_intr_unk20_2() */
-	if (outp->info.type == DCB_OUTPUT_DP) {
-		u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
-		switch ((sync & 0x000003c0) >> 6) {
-		case 6: pclk = pclk * 30; break;
-		case 5: pclk = pclk * 24; break;
-		case 2:
-		default:
-			pclk = pclk * 18;
-			break;
-		}
-
-		if (nvkm_output_dp_train(outp, pclk, true))
-			ERR("link not trained before attach\n");
-	} else {
-		if (priv->sor.magic)
-			priv->sor.magic(outp);
-	}
-
-	exec_clkcmp(priv, head, 0, pclk, &conf);
-
-	if (outp->info.type == DCB_OUTPUT_ANALOG) {
-		addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
-		data = 0x00000000;
-	} else {
-		addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
-		data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
-		switch (outp->info.type) {
-		case DCB_OUTPUT_TMDS:
-			nv_mask(priv, addr, 0x007c0000, 0x00280000);
-			break;
-		case DCB_OUTPUT_DP:
-			gf110_disp_intr_unk2_2_tu(priv, head, &outp->info);
-			break;
-		default:
-			break;
-		}
-	}
-
-	nv_mask(priv, addr, 0x00000707, data);
-}
-
-static void
-gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
-{
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	u32 conf;
-
-	exec_clkcmp(priv, head, 1, pclk, &conf);
-}
-
-void
-gf110_disp_intr_supervisor(struct work_struct *work)
-{
-	struct nv50_disp_priv *priv =
-		container_of(work, struct nv50_disp_priv, supervisor);
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 mask[4];
-	int head;
-
-	nv_debug(priv, "supervisor %d\n", ffs(priv->super));
-	for (head = 0; head < priv->head.nr; head++) {
-		mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
-		nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
-	}
-
-	if (priv->super & 0x00000001) {
-		nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 1.0 - head %d\n", head);
-			gf110_disp_intr_unk1_0(priv, head);
-		}
-	} else
-	if (priv->super & 0x00000002) {
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 2.0 - head %d\n", head);
-			gf110_disp_intr_unk2_0(priv, head);
-		}
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00010000))
-				continue;
-			nv_debug(priv, "supervisor 2.1 - head %d\n", head);
-			gf110_disp_intr_unk2_1(priv, head);
-		}
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 2.2 - head %d\n", head);
-			gf110_disp_intr_unk2_2(priv, head);
-		}
-	} else
-	if (priv->super & 0x00000004) {
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 3.0 - head %d\n", head);
-			gf110_disp_intr_unk4_0(priv, head);
-		}
-	}
-
-	for (head = 0; head < priv->head.nr; head++)
-		nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
-	nv_wr32(priv, 0x6101d0, 0x80000000);
-}
-
-static void
-gf110_disp_intr_error(struct nv50_disp_priv *priv, int chid)
-{
-	const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
-	u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
-	u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
-
-	nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
-		       "0x%08x 0x%08x\n",
-		 chid, (mthd & 0x0000ffc), data, mthd, unkn);
-
-	if (chid == 0) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
-					    impl->mthd.core);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 4) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
-					    impl->mthd.base);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 8) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
-					    impl->mthd.ovly);
-			break;
-		default:
-			break;
-		}
-	}
-
-	nv_wr32(priv, 0x61009c, (1 << chid));
-	nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
-}
-
-void
-gf110_disp_intr(struct nvkm_subdev *subdev)
-{
-	struct nv50_disp_priv *priv = (void *)subdev;
-	u32 intr = nv_rd32(priv, 0x610088);
-	int i;
-
-	if (intr & 0x00000001) {
-		u32 stat = nv_rd32(priv, 0x61008c);
-		while (stat) {
-			int chid = __ffs(stat); stat &= ~(1 << chid);
-			nv50_disp_chan_uevent_send(priv, chid);
-			nv_wr32(priv, 0x61008c, 1 << chid);
-		}
-		intr &= ~0x00000001;
-	}
-
-	if (intr & 0x00000002) {
-		u32 stat = nv_rd32(priv, 0x61009c);
-		int chid = ffs(stat) - 1;
-		if (chid >= 0)
-			gf110_disp_intr_error(priv, chid);
-		intr &= ~0x00000002;
-	}
-
-	if (intr & 0x00100000) {
-		u32 stat = nv_rd32(priv, 0x6100ac);
-		if (stat & 0x00000007) {
-			priv->super = (stat & 0x00000007);
-			schedule_work(&priv->supervisor);
-			nv_wr32(priv, 0x6100ac, priv->super);
-			stat &= ~0x00000007;
-		}
-
-		if (stat) {
-			nv_info(priv, "unknown intr24 0x%08x\n", stat);
-			nv_wr32(priv, 0x6100ac, stat);
-		}
-
-		intr &= ~0x00100000;
-	}
-
-	for (i = 0; i < priv->head.nr; i++) {
-		u32 mask = 0x01000000 << i;
-		if (mask & intr) {
-			u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
-			if (stat & 0x00000001)
-				nvkm_disp_vblank(&priv->base, i);
-			nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
-			nv_rd32(priv, 0x6100c0 + (i * 0x800));
-		}
-	}
-}
-
-static int
-gf110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gf110_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gf110_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gf110_hdmi_ctrl;
-	return 0;
-}
-
-struct nvkm_oclass *
-gf110_disp_outp_sclass[] = {
-	&gf110_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-gf110_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x90),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf110_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gf110_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gf110_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
new file mode 100644
index 0000000..186fd3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/devinit.h>
+
+void
+gf119_disp_vblank_init(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+void
+gf119_disp_vblank_fini(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+static struct nvkm_output *
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
+	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	    struct nvbios_outp *info)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
+	struct nvkm_output *outp;
+	u16 mask, type;
+
+	if (or < 4) {
+		type = DCB_OUTPUT_ANALOG;
+		mask = 0;
+	} else {
+		or -= 4;
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+		default:
+			nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
+			return NULL;
+		}
+	}
+
+	mask  = 0x00c0 & (mask << 6);
+	mask |= 0x0001 << or;
+	mask |= 0x0100 << head;
+
+	list_for_each_entry(outp, &disp->base.outp, head) {
+		if ((outp->info.hasht & 0xff) == type &&
+		    (outp->info.hashm & mask) == mask) {
+			*data = nvbios_outp_match(bios, outp->info.hasht,
+							outp->info.hashm,
+						  ver, hdr, cnt, len, info);
+			if (!*data)
+				return NULL;
+			return outp;
+		}
+	}
+
+	return NULL;
+}
+
+static struct nvkm_output *
+exec_script(struct nv50_disp *disp, int head, int id)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp;
+	struct nvbios_outp info;
+	u8  ver, hdr, cnt, len;
+	u32 data, ctrl = 0;
+	int or;
+
+	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+		ctrl = nvkm_rd32(device, 0x640180 + (or * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (or == 8)
+		return NULL;
+
+	outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+	if (outp) {
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = bios,
+			.offset = info.script[id],
+			.outp = &outp->info,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+	}
+
+	return outp;
+}
+
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp;
+	struct nvbios_outp info1;
+	struct nvbios_ocfg info2;
+	u8  ver, hdr, cnt, len;
+	u32 data, ctrl = 0;
+	int or;
+
+	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+		ctrl = nvkm_rd32(device, 0x660180 + (or * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (or == 8)
+		return NULL;
+
+	outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+	if (!outp)
+		return NULL;
+
+	switch (outp->info.type) {
+	case DCB_OUTPUT_TMDS:
+		*conf = (ctrl & 0x00000f00) >> 8;
+		if (pclk >= 165000)
+			*conf |= 0x0100;
+		break;
+	case DCB_OUTPUT_LVDS:
+		*conf = disp->sor.lvdsconf;
+		break;
+	case DCB_OUTPUT_DP:
+		*conf = (ctrl & 0x00000f00) >> 8;
+		break;
+	case DCB_OUTPUT_ANALOG:
+	default:
+		*conf = 0x00ff;
+		break;
+	}
+
+	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+	if (data && id < 0xff) {
+		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+		if (data) {
+			struct nvbios_init init = {
+				.subdev = subdev,
+				.bios = bios,
+				.offset = data,
+				.outp = &outp->info,
+				.crtc = head,
+				.execute = 1,
+			};
+
+			nvbios_exec(&init);
+		}
+	}
+
+	return outp;
+}
+
+static void
+gf119_disp_intr_unk1_0(struct nv50_disp *disp, int head)
+{
+	exec_script(disp, head, 1);
+}
+
+static void
+gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_output *outp = exec_script(disp, head, 2);
+
+	/* see note in nv50_disp_intr_unk20_0() */
+	if (outp && outp->info.type == DCB_OUTPUT_DP) {
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = subdev->device->bios,
+			.outp = &outp->info,
+			.crtc = head,
+			.offset = outpdp->info.script[4],
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+		atomic_set(&outpdp->lt.done, 0);
+	}
+}
+
+static void
+gf119_disp_intr_unk2_1(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	if (pclk)
+		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
+	nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000);
+}
+
+static void
+gf119_disp_intr_unk2_2_tu(struct nv50_disp *disp, int head,
+			  struct dcb_output *outp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const int or = ffs(outp->or) - 1;
+	const u32 ctrl = nvkm_rd32(device, 0x660200 + (or   * 0x020));
+	const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300));
+	const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff;
+	const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff;
+	const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff;
+	const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+	const u32 hoff = (head * 0x800);
+	const u32 soff = (  or * 0x800);
+	const u32 loff = (link * 0x080) + soff;
+	const u32 symbol = 100000;
+	const u32 TU = 64;
+	u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+	u32 clksor = nvkm_rd32(device, 0x612300 + soff);
+	u32 datarate, link_nr, link_bw, bits;
+	u64 ratio, value;
+
+	link_nr  = hweight32(dpctrl & 0x000f0000);
+	link_bw  = (clksor & 0x007c0000) >> 18;
+	link_bw *= 27000;
+
+	/* symbols/hblank - algorithm taken from comments in tegra driver */
+	value = vblanke + vactive - vblanks - 7;
+	value = value * link_bw;
+	do_div(value, pclk);
+	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
+	nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, value);
+
+	/* symbols/vblank - algorithm taken from comments in tegra driver */
+	value = vblanks - vblanke - 25;
+	value = value * link_bw;
+	do_div(value, pclk);
+	value = value - ((36 / link_nr) + 3) - 1;
+	nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, value);
+
+	/* watermark */
+	if      ((conf & 0x3c0) == 0x180) bits = 30;
+	else if ((conf & 0x3c0) == 0x140) bits = 24;
+	else                              bits = 18;
+	datarate = (pclk * bits) / 8;
+
+	ratio  = datarate;
+	ratio *= symbol;
+	do_div(ratio, link_nr * link_bw);
+
+	value  = (symbol - ratio) * TU;
+	value *= ratio;
+	do_div(value, symbol);
+	do_div(value, symbol);
+
+	value += 5;
+	value |= 0x08000000;
+
+	nvkm_wr32(device, 0x616610 + hoff, value);
+}
+
+static void
+gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_output *outp;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	u32 conf, addr, data;
+
+	outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
+	if (!outp)
+		return;
+
+	/* see note in nv50_disp_intr_unk20_2() */
+	if (outp->info.type == DCB_OUTPUT_DP) {
+		u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300));
+		switch ((sync & 0x000003c0) >> 6) {
+		case 6: pclk = pclk * 30; break;
+		case 5: pclk = pclk * 24; break;
+		case 2:
+		default:
+			pclk = pclk * 18;
+			break;
+		}
+
+		if (nvkm_output_dp_train(outp, pclk, true))
+			OUTP_ERR(outp, "link not trained before attach");
+	} else {
+		if (disp->func->sor.magic)
+			disp->func->sor.magic(outp);
+	}
+
+	exec_clkcmp(disp, head, 0, pclk, &conf);
+
+	if (outp->info.type == DCB_OUTPUT_ANALOG) {
+		addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
+		data = 0x00000000;
+	} else {
+		addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
+		data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+		switch (outp->info.type) {
+		case DCB_OUTPUT_TMDS:
+			nvkm_mask(device, addr, 0x007c0000, 0x00280000);
+			break;
+		case DCB_OUTPUT_DP:
+			gf119_disp_intr_unk2_2_tu(disp, head, &outp->info);
+			break;
+		default:
+			break;
+		}
+	}
+
+	nvkm_mask(device, addr, 0x00000707, data);
+}
+
+static void
+gf119_disp_intr_unk4_0(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	u32 conf;
+
+	exec_clkcmp(disp, head, 1, pclk, &conf);
+}
+
+void
+gf119_disp_intr_supervisor(struct work_struct *work)
+{
+	struct nv50_disp *disp =
+		container_of(work, struct nv50_disp, supervisor);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask[4];
+	int head;
+
+	nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
+	for (head = 0; head < disp->base.head.nr; head++) {
+		mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800));
+		nvkm_debug(subdev, "head %d: %08x\n", head, mask[head]);
+	}
+
+	if (disp->super & 0x00000001) {
+		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 1.0 - head %d\n", head);
+			gf119_disp_intr_unk1_0(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000002) {
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head);
+			gf119_disp_intr_unk2_0(disp, head);
+		}
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00010000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.1 - head %d\n", head);
+			gf119_disp_intr_unk2_1(disp, head);
+		}
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.2 - head %d\n", head);
+			gf119_disp_intr_unk2_2(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000004) {
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 3.0 - head %d\n", head);
+			gf119_disp_intr_unk4_0(disp, head);
+		}
+	}
+
+	for (head = 0; head < disp->base.head.nr; head++)
+		nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000);
+	nvkm_wr32(device, 0x6101d0, 0x80000000);
+}
+
+static void
+gf119_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+	u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
+	u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+
+	nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+		   chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+	if (chid < ARRAY_SIZE(disp->chan)) {
+		switch (mthd & 0xffc) {
+		case 0x0080:
+			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+			break;
+		default:
+			break;
+		}
+	}
+
+	nvkm_wr32(device, 0x61009c, (1 << chid));
+	nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
+}
+
+void
+gf119_disp_intr(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x610088);
+	int i;
+
+	if (intr & 0x00000001) {
+		u32 stat = nvkm_rd32(device, 0x61008c);
+		while (stat) {
+			int chid = __ffs(stat); stat &= ~(1 << chid);
+			nv50_disp_chan_uevent_send(disp, chid);
+			nvkm_wr32(device, 0x61008c, 1 << chid);
+		}
+		intr &= ~0x00000001;
+	}
+
+	if (intr & 0x00000002) {
+		u32 stat = nvkm_rd32(device, 0x61009c);
+		int chid = ffs(stat) - 1;
+		if (chid >= 0)
+			gf119_disp_intr_error(disp, chid);
+		intr &= ~0x00000002;
+	}
+
+	if (intr & 0x00100000) {
+		u32 stat = nvkm_rd32(device, 0x6100ac);
+		if (stat & 0x00000007) {
+			disp->super = (stat & 0x00000007);
+			schedule_work(&disp->supervisor);
+			nvkm_wr32(device, 0x6100ac, disp->super);
+			stat &= ~0x00000007;
+		}
+
+		if (stat) {
+			nvkm_warn(subdev, "intr24 %08x\n", stat);
+			nvkm_wr32(device, 0x6100ac, stat);
+		}
+
+		intr &= ~0x00100000;
+	}
+
+	for (i = 0; i < disp->base.head.nr; i++) {
+		u32 mask = 0x01000000 << i;
+		if (mask & intr) {
+			u32 stat = nvkm_rd32(device, 0x6100bc + (i * 0x800));
+			if (stat & 0x00000001)
+				nvkm_disp_vblank(&disp->base, i);
+			nvkm_mask(device, 0x6100bc + (i * 0x800), 0, 0);
+			nvkm_rd32(device, 0x6100c0 + (i * 0x800));
+		}
+	}
+}
+
+int
+gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+		int index, struct nvkm_disp **pdisp)
+{
+	u32 heads = nvkm_rd32(device, 0x022448);
+	return nv50_disp_new_(func, device, index, heads, pdisp);
+}
+
+static const struct nv50_disp_func
+gf119_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gf119_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gf119_hdmi_ctrl,
+};
+
+int
+gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return gf119_disp_new_(&gf119_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 6f4019a..a86384b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -22,247 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_core_mthd_head = {
-	.mthd = 0x0300,
-	.addr = 0x000300,
-	.data = {
-		{ 0x0400, 0x660400 },
-		{ 0x0404, 0x660404 },
-		{ 0x0408, 0x660408 },
-		{ 0x040c, 0x66040c },
-		{ 0x0410, 0x660410 },
-		{ 0x0414, 0x660414 },
-		{ 0x0418, 0x660418 },
-		{ 0x041c, 0x66041c },
-		{ 0x0420, 0x660420 },
-		{ 0x0424, 0x660424 },
-		{ 0x0428, 0x660428 },
-		{ 0x042c, 0x66042c },
-		{ 0x0430, 0x660430 },
-		{ 0x0434, 0x660434 },
-		{ 0x0438, 0x660438 },
-		{ 0x0440, 0x660440 },
-		{ 0x0444, 0x660444 },
-		{ 0x0448, 0x660448 },
-		{ 0x044c, 0x66044c },
-		{ 0x0450, 0x660450 },
-		{ 0x0454, 0x660454 },
-		{ 0x0458, 0x660458 },
-		{ 0x045c, 0x66045c },
-		{ 0x0460, 0x660460 },
-		{ 0x0468, 0x660468 },
-		{ 0x046c, 0x66046c },
-		{ 0x0470, 0x660470 },
-		{ 0x0474, 0x660474 },
-		{ 0x047c, 0x66047c },
-		{ 0x0480, 0x660480 },
-		{ 0x0484, 0x660484 },
-		{ 0x0488, 0x660488 },
-		{ 0x048c, 0x66048c },
-		{ 0x0490, 0x660490 },
-		{ 0x0494, 0x660494 },
-		{ 0x0498, 0x660498 },
-		{ 0x04a0, 0x6604a0 },
-		{ 0x04b0, 0x6604b0 },
-		{ 0x04b8, 0x6604b8 },
-		{ 0x04bc, 0x6604bc },
-		{ 0x04c0, 0x6604c0 },
-		{ 0x04c4, 0x6604c4 },
-		{ 0x04c8, 0x6604c8 },
-		{ 0x04d0, 0x6604d0 },
-		{ 0x04d4, 0x6604d4 },
-		{ 0x04e0, 0x6604e0 },
-		{ 0x04e4, 0x6604e4 },
-		{ 0x04e8, 0x6604e8 },
-		{ 0x04ec, 0x6604ec },
-		{ 0x04f0, 0x6604f0 },
-		{ 0x04f4, 0x6604f4 },
-		{ 0x04f8, 0x6604f8 },
-		{ 0x04fc, 0x6604fc },
-		{ 0x0500, 0x660500 },
-		{ 0x0504, 0x660504 },
-		{ 0x0508, 0x660508 },
-		{ 0x050c, 0x66050c },
-		{ 0x0510, 0x660510 },
-		{ 0x0514, 0x660514 },
-		{ 0x0518, 0x660518 },
-		{ 0x051c, 0x66051c },
-		{ 0x0520, 0x660520 },
-		{ 0x0524, 0x660524 },
-		{ 0x052c, 0x66052c },
-		{ 0x0530, 0x660530 },
-		{ 0x054c, 0x66054c },
-		{ 0x0550, 0x660550 },
-		{ 0x0554, 0x660554 },
-		{ 0x0558, 0x660558 },
-		{ 0x055c, 0x66055c },
-		{}
-	}
+static const struct nv50_disp_func
+gk104_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gk104_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-const struct nv50_disp_mthd_chan
-gk104_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &gf110_disp_core_mthd_base },
-		{    "DAC", 3, &gf110_disp_core_mthd_dac  },
-		{    "SOR", 8, &gf110_disp_core_mthd_sor  },
-		{   "PIOR", 4, &gf110_disp_core_mthd_pior },
-		{   "HEAD", 4, &gk104_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.data = {
-		{ 0x0080, 0x665080 },
-		{ 0x0084, 0x665084 },
-		{ 0x0088, 0x665088 },
-		{ 0x008c, 0x66508c },
-		{ 0x0090, 0x665090 },
-		{ 0x0094, 0x665094 },
-		{ 0x00a0, 0x6650a0 },
-		{ 0x00a4, 0x6650a4 },
-		{ 0x00b0, 0x6650b0 },
-		{ 0x00b4, 0x6650b4 },
-		{ 0x00b8, 0x6650b8 },
-		{ 0x00c0, 0x6650c0 },
-		{ 0x00c4, 0x6650c4 },
-		{ 0x00e0, 0x6650e0 },
-		{ 0x00e4, 0x6650e4 },
-		{ 0x00e8, 0x6650e8 },
-		{ 0x0100, 0x665100 },
-		{ 0x0104, 0x665104 },
-		{ 0x0108, 0x665108 },
-		{ 0x010c, 0x66510c },
-		{ 0x0110, 0x665110 },
-		{ 0x0118, 0x665118 },
-		{ 0x011c, 0x66511c },
-		{ 0x0120, 0x665120 },
-		{ 0x0124, 0x665124 },
-		{ 0x0130, 0x665130 },
-		{ 0x0134, 0x665134 },
-		{ 0x0138, 0x665138 },
-		{ 0x013c, 0x66513c },
-		{ 0x0140, 0x665140 },
-		{ 0x0144, 0x665144 },
-		{ 0x0148, 0x665148 },
-		{ 0x014c, 0x66514c },
-		{ 0x0150, 0x665150 },
-		{ 0x0154, 0x665154 },
-		{ 0x0158, 0x665158 },
-		{ 0x015c, 0x66515c },
-		{ 0x0160, 0x665160 },
-		{ 0x0164, 0x665164 },
-		{ 0x0168, 0x665168 },
-		{ 0x016c, 0x66516c },
-		{ 0x0400, 0x665400 },
-		{ 0x0404, 0x665404 },
-		{ 0x0408, 0x665408 },
-		{ 0x040c, 0x66540c },
-		{ 0x0410, 0x665410 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-gk104_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gk104_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_disp_sclass[] = {
-	{ GK104_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK104_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gk104_disp_main_oclass[] = {
-	{ GK104_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk104_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gk104_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gk104_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gk104_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gk104_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x91),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index daa4b46..0d574c7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -22,82 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk110_disp_sclass[] = {
-	{ GK110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gk110_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gk110_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-static struct nvkm_oclass
-gk110_disp_main_oclass[] = {
-	{ GK110_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gk110_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gk110_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gk110_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gk110_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x92),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk110_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 881cc94..b694414 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -22,82 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_disp_sclass[] = {
-	{ GM107_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gm107_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gm107_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-static struct nvkm_oclass
-gm107_disp_main_oclass[] = {
-	{ GM107_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm107_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gm107_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gm107_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gm107_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gm107_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x07),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
index 67004f8..30f1987 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
@@ -22,90 +22,33 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outpdp.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_disp_sclass[] = {
-	{ GM204_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gm204_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gm204_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gm204_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
+	.sor.magic = gm204_sor_magic,
 };
 
-static struct nvkm_oclass
-gm204_disp_main_oclass[] = {
-	{ GM204_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm204_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm204_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gm204_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gm204_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gf110_hdmi_ctrl;
-	priv->sor.magic = gm204_sor_magic;
-	return 0;
+	return gf119_disp_new_(&gm204_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gm204_disp_outp_sclass[] = {
-	&gm204_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-gm204_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x07),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gm204_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index a453072..6bc3bf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -22,127 +22,34 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gt200_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x6109a0 },
-		{ 0x0088, 0x6109c0 },
-		{ 0x008c, 0x6109c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00b0, 0x610c98 },
-		{ 0x00b4, 0x610ca4 },
-		{ 0x00b8, 0x610cac },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
+static const struct nv50_disp_func
+gt200_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &gt200_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-static const struct nv50_disp_mthd_chan
-gt200_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &gt200_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt200_disp_sclass[] = {
-	{ GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gt200_disp_main_oclass[] = {
-	{ GT200_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt200_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gt200_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = gt200_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&gt200_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-gt200_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x83),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt200_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &g84_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &gt200_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 55f0d3a..9402628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -22,83 +22,36 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_disp_sclass[] = {
-	{ GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gt215_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &gt215_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = g94_sor_dp_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gt215_hda_eld,
+	.sor.hdmi = gt215_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-static struct nvkm_oclass
-gt215_disp_main_oclass[] = {
-	{ GT214_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt215_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gt215_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = gt215_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gt215_hda_eld;
-	priv->sor.hdmi = gt215_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&gt215_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-gt215_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x85),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  g94_disp_outp_sclass,
-	.mthd.core = &g94_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
deleted file mode 100644
index b9813d2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outp.h"
-
-#include <core/client.h>
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-int
-gf110_hda_eld(NV50_DISP_MTHD_V1)
-{
-	union {
-		struct nv50_disp_sor_hda_eld_v0 v0;
-	} *args = data;
-	const u32 soff = outp->or * 0x030;
-	const u32 hoff = head * 0x800;
-	int ret, i;
-
-	nv_ioctl(object, "disp sor hda eld size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
-		if (size > 0x60)
-			return -E2BIG;
-	} else
-		return ret;
-
-	if (size && args->v0.data[0]) {
-		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x616618 + hoff, 0x8000000c, 0x80000001);
-			nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
-		}
-		nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
-		for (i = 0; i < size; i++)
-			nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
-		for (; i < 0x60; i++)
-			nv_wr32(priv, 0x10ec00 + soff, (i << 8));
-		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
-	} else {
-		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x616618 + hoff, 0x80000001, 0x80000000);
-			nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
-		}
-		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
new file mode 100644
index 0000000..af99efb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "outp.h"
+
+#include <core/client.h>
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_hda_eld(NV50_DISP_MTHD_V1)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	union {
+		struct nv50_disp_sor_hda_eld_v0 v0;
+	} *args = data;
+	const u32 soff = outp->or * 0x030;
+	const u32 hoff = head * 0x800;
+	int ret, i;
+
+	nvif_ioctl(object, "disp sor hda eld size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp sor hda eld vers %d\n",
+			   args->v0.version);
+		if (size > 0x60)
+			return -E2BIG;
+	} else
+		return ret;
+
+	if (size && args->v0.data[0]) {
+		if (outp->info.type == DCB_OUTPUT_DP) {
+			nvkm_mask(device, 0x616618 + hoff, 0x8000000c, 0x80000001);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
+		}
+		nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
+		for (i = 0; i < size; i++)
+			nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
+		for (; i < 0x60; i++)
+			nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
+		nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000003);
+	} else {
+		if (outp->info.type == DCB_OUTPUT_DP) {
+			nvkm_mask(device, 0x616618 + hoff, 0x80000001, 0x80000000);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
+		}
+		nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 891d1e7..c1590b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -33,15 +33,17 @@
 int
 gt215_hda_eld(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	union {
 		struct nv50_disp_sor_hda_eld_v0 v0;
 	} *args = data;
 	const u32 soff = outp->or * 0x800;
 	int ret, i;
 
-	nv_ioctl(object, "disp sor hda eld size %d\n", size);
+	nvif_ioctl(object, "disp sor hda eld size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+		nvif_ioctl(object, "disp sor hda eld vers %d\n",
+			   args->v0.version);
 		if (size > 0x60)
 			return -E2BIG;
 	} else
@@ -49,20 +51,28 @@
 
 	if (size && args->v0.data[0]) {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
-			nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
 		for (i = 0; i < size; i++)
-			nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
 		for (; i < 0x60; i++)
-			nv_wr32(priv, 0x61c440 + soff, (i << 8));
-		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8));
+		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
 	} else {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x61c1e0 + soff, 0x80000001, 0x80000000);
-			nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x61c1e0 + soff, 0x80000001, 0x80000000);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
-		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
+		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
index 621cb0b..ee9e800 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
@@ -31,6 +31,7 @@
 int
 g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 hoff = (head * 0x800);
 	union {
 		struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -54,38 +55,38 @@
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
-	nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
-	nv_wr32(priv, 0x616530 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616534 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616538 + hoff, 0x00000000);
-	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x616528 + hoff, 0x000d0282);
+	nvkm_wr32(device, 0x61652c + hoff, 0x0000006f);
+	nvkm_wr32(device, 0x616530 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616534 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616538 + hoff, 0x00000000);
+	nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000001);
 
 	/* Audio InfoFrame */
-	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
-	nv_wr32(priv, 0x61650c + hoff, 0x00000071);
-	nv_wr32(priv, 0x616510 + hoff, 0x00000000);
-	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x616508 + hoff, 0x000a0184);
+	nvkm_wr32(device, 0x61650c + hoff, 0x00000071);
+	nvkm_wr32(device, 0x616510 + hoff, 0x00000000);
+	nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000001);
 
-	nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-	nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-	nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+	nvkm_mask(device, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nvkm_mask(device, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nvkm_mask(device, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
 
 	/* ??? */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
+	nvkm_mask(device, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
deleted file mode 100644
index c284490..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-
-#include <core/client.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-int
-gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
-{
-	const u32 hoff = (head * 0x800);
-	union {
-		struct nv50_disp_sor_hdmi_pwr_v0 v0;
-	} *args = data;
-	u32 ctrl;
-	int ret;
-
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
-		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
-			return -EINVAL;
-		ctrl  = 0x40000000 * !!args->v0.state;
-		ctrl |= args->v0.max_ac_packet << 16;
-		ctrl |= args->v0.rekey;
-	} else
-		return ret;
-
-	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
-		return 0;
-	}
-
-	/* AVI InfoFrame */
-	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
-	nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
-	nv_wr32(priv, 0x616724 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616728 + hoff, 0x00000000);
-	nv_wr32(priv, 0x61672c + hoff, 0x00000000);
-	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
-
-	/* ??? InfoFrame? */
-	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
-	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
-
-	/* HDMI_CTRL */
-	nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
new file mode 100644
index 0000000..b5af025
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_hdmi_ctrl(NV50_DISP_MTHD_V1)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 hoff = (head * 0x800);
+	union {
+		struct nv50_disp_sor_hdmi_pwr_v0 v0;
+	} *args = data;
+	u32 ctrl;
+	int ret;
+
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
+		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+			return -EINVAL;
+		ctrl  = 0x40000000 * !!args->v0.state;
+		ctrl |= args->v0.max_ac_packet << 16;
+		ctrl |= args->v0.rekey;
+	} else
+		return ret;
+
+	if (!(ctrl & 0x40000000)) {
+		nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61671c + hoff, 0x000d0282);
+	nvkm_wr32(device, 0x616720 + hoff, 0x0000006f);
+	nvkm_wr32(device, 0x616724 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616728 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x61672c + hoff, 0x00000000);
+	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+	/* ??? InfoFrame? */
+	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
+	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+	/* HDMI_CTRL */
+	nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
index ca34ff8..110dc19 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
@@ -31,6 +31,7 @@
 int
 gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 hoff = (head * 0x800);
 	const u32 hdmi = (head * 0x400);
 	union {
@@ -39,12 +40,12 @@
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -54,30 +55,30 @@
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x690008 + hdmi, 0x000d0282);
-	nv_wr32(priv, 0x69000c + hdmi, 0x0000006f);
-	nv_wr32(priv, 0x690010 + hdmi, 0x00000000);
-	nv_wr32(priv, 0x690014 + hdmi, 0x00000000);
-	nv_wr32(priv, 0x690018 + hdmi, 0x00000000);
-	nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x690008 + hdmi, 0x000d0282);
+	nvkm_wr32(device, 0x69000c + hdmi, 0x0000006f);
+	nvkm_wr32(device, 0x690010 + hdmi, 0x00000000);
+	nvkm_wr32(device, 0x690014 + hdmi, 0x00000000);
+	nvkm_wr32(device, 0x690018 + hdmi, 0x00000000);
+	nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000001);
 
 	/* ??? InfoFrame? */
-	nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x6900cc + hdmi, 0x00000010);
-	nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010);
+	nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
 
 	/* ??? */
-	nv_wr32(priv, 0x690080 + hdmi, 0x82000000);
+	nvkm_wr32(device, 0x690080 + hdmi, 0x82000000);
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
+	nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
index b641c16..61237db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
@@ -32,6 +32,7 @@
 int
 gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 soff = outp->or * 0x800;
 	union {
 		struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -39,12 +40,12 @@
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -55,38 +56,38 @@
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
-	nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
-	nv_wr32(priv, 0x61c530 + soff, 0x00000000);
-	nv_wr32(priv, 0x61c534 + soff, 0x00000000);
-	nv_wr32(priv, 0x61c538 + soff, 0x00000000);
-	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61c528 + soff, 0x000d0282);
+	nvkm_wr32(device, 0x61c52c + soff, 0x0000006f);
+	nvkm_wr32(device, 0x61c530 + soff, 0x00000000);
+	nvkm_wr32(device, 0x61c534 + soff, 0x00000000);
+	nvkm_wr32(device, 0x61c538 + soff, 0x00000000);
+	nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000001);
 
 	/* Audio InfoFrame */
-	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
-	nv_wr32(priv, 0x61c50c + soff, 0x00000071);
-	nv_wr32(priv, 0x61c510 + soff, 0x00000000);
-	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61c508 + soff, 0x000a0184);
+	nvkm_wr32(device, 0x61c50c + soff, 0x00000071);
+	nvkm_wr32(device, 0x61c510 + soff, 0x00000000);
+	nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000001);
 
-	nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-	nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-	nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+	nvkm_mask(device, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nvkm_mask(device, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nvkm_mask(device, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
 
 	/* ??? */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
+	nvkm_mask(device, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index ff09b26..67254ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -23,183 +23,63 @@
  */
 #include "priv.h"
 
-#include <core/client.h>
-#include <core/device.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv04_disp_priv {
-	struct nvkm_disp base;
-};
-
-static int
-nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv,
-		     void *data, u32 size, int head)
+static const struct nvkm_disp_oclass *
+nv04_disp_root(struct nvkm_disp *disp)
 {
-	const u32 hoff = head * 0x2000;
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	u32 line;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
-		args->v0.vtotal  = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
-		args->v0.vblanke = args->v0.vtotal - 1;
-
-		args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
-		args->v0.htotal  = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
-		args->v0.hblanke = args->v0.htotal - 1;
-
-		/*
-		 * If output is vga instead of digital then vtotal/htotal is
-		 * invalid so we have to give up and trigger the timestamping
-		 * fallback in the drm core.
-		 */
-		if (!args->v0.vtotal || !args->v0.htotal)
-			return -ENOTSUPP;
-
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		line = nv_rd32(priv, 0x600868 + hoff);
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline = (line & 0xffff0000) >> 16;
-		args->v0.vline = (line & 0x0000ffff);
-	} else
-		return ret;
-
-	return 0;
-}
-
-static int
-nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	union {
-		struct nv04_disp_mthd_v0 v0;
-	} *args = data;
-	struct nv04_disp_priv *priv = (void *)object->engine;
-	int head, ret;
-
-	nv_ioctl(object, "disp mthd size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
-			 args->v0.version, args->v0.method, args->v0.head);
-		mthd = args->v0.method;
-		head = args->v0.head;
-	} else
-		return ret;
-
-	if (head < 0 || head >= 2)
-		return -ENXIO;
-
-	switch (mthd) {
-	case NV04_DISP_SCANOUTPOS:
-		return nv04_disp_scanoutpos(object, priv, data, size, head);
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
-static struct nvkm_ofuncs
-nv04_disp_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
-	.mthd = nv04_disp_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv04_disp_sclass[] = {
-	{ NV04_DISP, &nv04_disp_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
+	return &nv04_disp_root_oclass;
 }
 
 static void
-nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+nv04_disp_vblank_init(struct nvkm_disp *disp, int head)
 {
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
+	struct nvkm_device *device = disp->engine.subdev.device;
+	nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001);
 }
 
-static const struct nvkm_event_func
-nv04_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = nv04_disp_vblank_init,
-	.fini = nv04_disp_vblank_fini,
-};
+static void
+nv04_disp_vblank_fini(struct nvkm_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000);
+}
 
 static void
-nv04_disp_intr(struct nvkm_subdev *subdev)
+nv04_disp_intr(struct nvkm_disp *disp)
 {
-	struct nv04_disp_priv *priv = (void *)subdev;
-	u32 crtc0 = nv_rd32(priv, 0x600100);
-	u32 crtc1 = nv_rd32(priv, 0x602100);
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 crtc0 = nvkm_rd32(device, 0x600100);
+	u32 crtc1 = nvkm_rd32(device, 0x602100);
 	u32 pvideo;
 
 	if (crtc0 & 0x00000001) {
-		nvkm_disp_vblank(&priv->base, 0);
-		nv_wr32(priv, 0x600100, 0x00000001);
+		nvkm_disp_vblank(disp, 0);
+		nvkm_wr32(device, 0x600100, 0x00000001);
 	}
 
 	if (crtc1 & 0x00000001) {
-		nvkm_disp_vblank(&priv->base, 1);
-		nv_wr32(priv, 0x602100, 0x00000001);
+		nvkm_disp_vblank(disp, 1);
+		nvkm_wr32(device, 0x602100, 0x00000001);
 	}
 
-	if (nv_device(priv)->chipset >= 0x10 &&
-	    nv_device(priv)->chipset <= 0x40) {
-		pvideo = nv_rd32(priv, 0x8100);
+	if (device->chipset >= 0x10 && device->chipset <= 0x40) {
+		pvideo = nvkm_rd32(device, 0x8100);
 		if (pvideo & ~0x11)
-			nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
-		nv_wr32(priv, 0x8100, pvideo);
+			nvkm_info(subdev, "PVIDEO intr: %08x\n", pvideo);
+		nvkm_wr32(device, 0x8100, pvideo);
 	}
 }
 
-static int
-nv04_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_disp_func
+nv04_disp = {
+	.intr = nv04_disp_intr,
+	.root = nv04_disp_root,
+	.head.vblank_init = nv04_disp_vblank_init,
+	.head.vblank_fini = nv04_disp_vblank_fini,
+};
+
+int
+nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv04_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "DISPLAY",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = nv04_disp_sclass;
-	nv_subdev(priv)->intr = nv04_disp_intr;
-	return 0;
+	return nvkm_disp_new_(&nv04_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-nv04_disp_oclass = &(struct nvkm_disp_impl) {
-	.base.handle = NV_ENGINE(DISP, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.vblank = &nv04_disp_vblank_func,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 8ba808d..32e73a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -22,1291 +22,158 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
+#include "rootnv50.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
-#include <core/ramht.h>
-#include <engine/dmaobj.h>
+#include <core/gpuobj.h>
 #include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
 #include <subdev/bios/disp.h>
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
 
-#include <nvif/class.h>
-#include <nvif/event.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
+static const struct nvkm_disp_oclass *
+nv50_disp_root_(struct nvkm_disp *base)
+{
+	return nv50_disp(base)->func->root;
+}
 
 static int
-nv50_disp_chan_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, int head,
-		       int length, void **pobject)
+nv50_disp_outp_internal_crt_(struct nvkm_disp *base, int index,
+			     struct dcb_output *dcb, struct nvkm_output **poutp)
 {
-	const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
-	struct nv50_disp_base *base = (void *)parent;
-	struct nv50_disp_chan *chan;
-	int chid = impl->chid + head;
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.crt(base, index, dcb, poutp);
+}
+
+static int
+nv50_disp_outp_internal_tmds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.tmds(base, index, dcb, poutp);
+}
+
+static int
+nv50_disp_outp_internal_lvds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.lvds(base, index, dcb, poutp);
+}
+
+static int
+nv50_disp_outp_internal_dp_(struct nvkm_disp *base, int index,
+			    struct dcb_output *dcb, struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.internal.dp)
+		return disp->func->outp.internal.dp(base, index, dcb, poutp);
+	return -ENODEV;
+}
+
+static int
+nv50_disp_outp_external_tmds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.external.tmds)
+		return disp->func->outp.external.tmds(base, index, dcb, poutp);
+	return -ENODEV;
+}
+
+static int
+nv50_disp_outp_external_dp_(struct nvkm_disp *base, int index,
+			    struct dcb_output *dcb, struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.external.dp)
+		return disp->func->outp.external.dp(base, index, dcb, poutp);
+	return -ENODEV;
+}
+
+static void
+nv50_disp_vblank_fini_(struct nvkm_disp *base, int head)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->head.vblank_fini(disp, head);
+}
+
+static void
+nv50_disp_vblank_init_(struct nvkm_disp *base, int head)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->head.vblank_init(disp, head);
+}
+
+static void
+nv50_disp_intr_(struct nvkm_disp *base)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->intr(disp);
+}
+
+static void *
+nv50_disp_dtor_(struct nvkm_disp *base)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	nvkm_event_fini(&disp->uevent);
+	return disp;
+}
+
+static const struct nvkm_disp_func
+nv50_disp_ = {
+	.dtor = nv50_disp_dtor_,
+	.intr = nv50_disp_intr_,
+	.root = nv50_disp_root_,
+	.outp.internal.crt = nv50_disp_outp_internal_crt_,
+	.outp.internal.tmds = nv50_disp_outp_internal_tmds_,
+	.outp.internal.lvds = nv50_disp_outp_internal_lvds_,
+	.outp.internal.dp = nv50_disp_outp_internal_dp_,
+	.outp.external.tmds = nv50_disp_outp_external_tmds_,
+	.outp.external.dp = nv50_disp_outp_external_dp_,
+	.head.vblank_init = nv50_disp_vblank_init_,
+	.head.vblank_fini = nv50_disp_vblank_fini_,
+};
+
+int
+nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp **pdisp)
+{
+	struct nv50_disp *disp;
 	int ret;
 
-	if (base->chan & (1 << chid))
-		return -EBUSY;
-	base->chan |= (1 << chid);
+	if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_WORK(&disp->supervisor, func->super);
+	disp->func = func;
+	*pdisp = &disp->base;
 
-	ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
-				  (1ULL << NVDEV_ENGINE_DMAOBJ),
-				  length, pobject);
-	chan = *pobject;
+	ret = nvkm_disp_ctor(&nv50_disp_, device, index, heads, &disp->base);
 	if (ret)
 		return ret;
-	chan->chid = chid;
 
-	nv_parent(chan)->object_attach = impl->attach;
-	nv_parent(chan)->object_detach = impl->detach;
-	return 0;
-}
-
-static void
-nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
-{
-	struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
-	base->chan &= ~(1 << chan->chid);
-	nvkm_namedb_destroy(&chan->base);
-}
-
-static void
-nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
-	nv_wr32(priv, 0x610020, 0x00000001 << index);
-}
-
-static void
-nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_wr32(priv, 0x610020, 0x00000001 << index);
-	nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
+	return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
 }
 
 void
-nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
+nv50_disp_vblank_fini(struct nv50_disp *disp, int head)
 {
-	struct nvif_notify_uevent_rep {
-	} rep;
-
-	nvkm_event_send(&priv->uevent, 1, chid, &rep, sizeof(rep));
-}
-
-int
-nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
-			   struct nvkm_notify *notify)
-{
-	struct nv50_disp_dmac *dmac = (void *)object;
-	union {
-		struct nvif_notify_uevent_req none;
-	} *args = data;
-	int ret;
-
-	if (nvif_unvers(args->none)) {
-		notify->size  = sizeof(struct nvif_notify_uevent_rep);
-		notify->types = 1;
-		notify->index = dmac->base.chid;
-		return 0;
-	}
-
-	return ret;
-}
-
-const struct nvkm_event_func
-nv50_disp_chan_uevent = {
-	.ctor = nv50_disp_chan_uevent_ctor,
-	.init = nv50_disp_chan_uevent_init,
-	.fini = nv50_disp_chan_uevent_fini,
-};
-
-int
-nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
-		    struct nvkm_event **pevent)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	switch (type) {
-	case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
-		*pevent = &priv->uevent;
-		return 0;
-	default:
-		break;
-	}
-	return -EINVAL;
-}
-
-int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nv50_disp_chan *chan = (void *)object;
-	*addr = nv_device_resource_start(nv_device(object), 0) +
-		0x640000 + (chan->chid * 0x1000);
-	*size = 0x001000;
-	return 0;
-}
-
-u32
-nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_chan *chan = (void *)object;
-	return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x61002c, (4 << head), 0);
 }
 
 void
-nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv50_disp_vblank_init(struct nv50_disp *disp, int head)
 {
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_chan *chan = (void *)object;
-	nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x61002c, (4 << head), (4 << head));
 }
 
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_dmac_object_attach(struct nvkm_object *parent,
-			     struct nvkm_object *object, u32 name)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	struct nv50_disp_chan *chan = (void *)parent;
-	u32 addr = nv_gpuobj(object)->node->offset;
-	u32 chid = chan->chid;
-	u32 data = (chid << 28) | (addr << 10) | chid;
-	return nvkm_ramht_insert(base->ramht, chid, name, data);
-}
-
-static void
-nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	nvkm_ramht_remove(base->ramht, cookie);
-}
-
-static int
-nv50_disp_dmac_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, u32 pushbuf, int head,
-		       int length, void **pobject)
-{
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	ret = nv50_disp_chan_create_(parent, engine, oclass, head,
-				     length, pobject);
-	dmac = *pobject;
-	if (ret)
-		return ret;
-
-	dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
-	if (!dmac->pushdma)
-		return -ENOENT;
-
-	switch (nv_mclass(dmac->pushdma)) {
-	case 0x0002:
-	case 0x003d:
-		if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
-			return -EINVAL;
-
-		switch (dmac->pushdma->target) {
-		case NV_MEM_TARGET_VRAM:
-			dmac->push = 0x00000001 | dmac->pushdma->start >> 8;
-			break;
-		case NV_MEM_TARGET_PCI_NOSNOOP:
-			dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-void
-nv50_disp_dmac_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_dmac *dmac = (void *)object;
-	nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma);
-	nv50_disp_chan_destroy(&dmac->base);
-}
-
-static int
-nv50_disp_dmac_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&dmac->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
-	nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
-	nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
-	nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
-		nv_error(dmac, "init timeout, 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
-		nv_error(dmac, "fini timeout, 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notifications */
-	nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
-
-	return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static void
-nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
-		    const struct nv50_disp_mthd_list *list, int inst)
-{
-	struct nvkm_object *disp = nv_object(priv);
-	int i;
-
-	for (i = 0; list->data[i].mthd; i++) {
-		if (list->data[i].addr) {
-			u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
-			u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
-			u32 mthd = list->data[i].mthd + (list->mthd * inst);
-			const char *name = list->data[i].name;
-			char mods[16];
-
-			if (prev != next)
-				snprintf(mods, sizeof(mods), "-> 0x%08x", next);
-			else
-				snprintf(mods, sizeof(mods), "%13c", ' ');
-
-			nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
-				   mthd, prev, mods, name ? " // " : "",
-				   name ? name : "");
-		}
-	}
-}
-
-void
-nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
-		    const struct nv50_disp_mthd_chan *chan)
-{
-	struct nvkm_object *disp = nv_object(priv);
-	const struct nv50_disp_impl *impl = (void *)disp->oclass;
-	const struct nv50_disp_mthd_list *list;
-	int i, j;
-
-	if (debug > nv_subdev(priv)->debug)
-		return;
-
-	for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
-		u32 base = head * chan->addr;
-		for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
-			const char *cname = chan->name;
-			const char *sname = "";
-			char cname_[16], sname_[16];
-
-			if (chan->addr) {
-				snprintf(cname_, sizeof(cname_), "%s %d",
-					 chan->name, head);
-				cname = cname_;
-			}
-
-			if (chan->data[i].nr > 1) {
-				snprintf(sname_, sizeof(sname_), " - %s %d",
-					 chan->data[i].name, j);
-				sname = sname_;
-			}
-
-			nv_printk_(disp, debug, "%s%s:\n", cname, sname);
-			nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
-					    list, j);
-		}
-	}
-}
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x610bb8 },
-		{ 0x0088, 0x610b9c },
-		{ 0x008c, 0x000000 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_dac = {
-	.mthd = 0x0080,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0400, 0x610b58 },
-		{ 0x0404, 0x610bdc },
-		{ 0x0420, 0x610828 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_sor = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0600, 0x610b70 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_pior = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0700, 0x610b80 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_head = {
-	.mthd = 0x0400,
-	.addr = 0x000540,
-	.data = {
-		{ 0x0800, 0x610ad8 },
-		{ 0x0804, 0x610ad0 },
-		{ 0x0808, 0x610a48 },
-		{ 0x080c, 0x610a78 },
-		{ 0x0810, 0x610ac0 },
-		{ 0x0814, 0x610af8 },
-		{ 0x0818, 0x610b00 },
-		{ 0x081c, 0x610ae8 },
-		{ 0x0820, 0x610af0 },
-		{ 0x0824, 0x610b08 },
-		{ 0x0828, 0x610b10 },
-		{ 0x082c, 0x610a68 },
-		{ 0x0830, 0x610a60 },
-		{ 0x0834, 0x000000 },
-		{ 0x0838, 0x610a40 },
-		{ 0x0840, 0x610a24 },
-		{ 0x0844, 0x610a2c },
-		{ 0x0848, 0x610aa8 },
-		{ 0x084c, 0x610ab0 },
-		{ 0x0860, 0x610a84 },
-		{ 0x0864, 0x610a90 },
-		{ 0x0868, 0x610b18 },
-		{ 0x086c, 0x610b20 },
-		{ 0x0870, 0x610ac8 },
-		{ 0x0874, 0x610a38 },
-		{ 0x0880, 0x610a58 },
-		{ 0x0884, 0x610a9c },
-		{ 0x08a0, 0x610a70 },
-		{ 0x08a4, 0x610a50 },
-		{ 0x08a8, 0x610ae0 },
-		{ 0x08c0, 0x610b28 },
-		{ 0x08c4, 0x610b30 },
-		{ 0x08c8, 0x610b40 },
-		{ 0x08d4, 0x610b38 },
-		{ 0x08d8, 0x610b48 },
-		{ 0x08dc, 0x610b50 },
-		{ 0x0900, 0x610a18 },
-		{ 0x0904, 0x610ab8 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
-		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &nv50_disp_core_mthd_head },
-		{}
-	}
-};
-
-int
-nv50_disp_core_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_core_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_dmac *mast;
-	int ret;
-
-	nv_ioctl(parent, "create disp core channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp core channel dma vers %d "
-				 "pushbuf %08x\n",
-			 args->v0.version, args->v0.pushbuf);
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     0, sizeof(*mast), (void **)&mast);
-	*pobject = nv_object(mast);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv50_disp_core_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-	int ret;
-
-	ret = nv50_disp_chan_init(&mast->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x610028, 0x00010000, 0x00010000);
-
-	/* attempt to unstick channel from some unknown state */
-	if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
-		nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
-	if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
-		nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610204, mast->push);
-	nv_wr32(priv, 0x610208, 0x00010000);
-	nv_wr32(priv, 0x61020c, 0x00000000);
-	nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000, 0x00000000);
-	nv_wr32(priv, 0x610200, 0x01000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
-		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv50_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
-	nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
-		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notifications */
-	nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
-
-	return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_core_ofuncs = {
-	.base.ctor = nv50_disp_core_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_core_init,
-	.base.fini = nv50_disp_core_fini,
-	.base.map  = nv50_disp_chan_map,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 0,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0008c4 },
-		{ 0x0088, 0x0008d0 },
-		{ 0x008c, 0x0008dc },
-		{ 0x0090, 0x0008e4 },
-		{ 0x0094, 0x610884 },
-		{ 0x00a0, 0x6108a0 },
-		{ 0x00a4, 0x610878 },
-		{ 0x00c0, 0x61086c },
-		{ 0x00e0, 0x610858 },
-		{ 0x00e4, 0x610860 },
-		{ 0x00e8, 0x6108ac },
-		{ 0x00ec, 0x6108b4 },
-		{ 0x0100, 0x610894 },
-		{ 0x0110, 0x6108bc },
-		{ 0x0114, 0x61088c },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_image = {
-	.mthd = 0x0400,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0800, 0x6108f0 },
-		{ 0x0804, 0x6108fc },
-		{ 0x0808, 0x61090c },
-		{ 0x080c, 0x610914 },
-		{ 0x0810, 0x610904 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &nv50_disp_base_mthd_base },
-		{  "Image", 2, &nv50_disp_base_mthd_image },
-		{}
-	}
-};
-
-int
-nv50_disp_base_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_base_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	nv_ioctl(parent, "create disp base channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp base channel dma vers %d "
-				 "pushbuf %08x head %d\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     args->v0.head, sizeof(*dmac),
-				     (void **)&dmac);
-	*pobject = nv_object(dmac);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_base_ofuncs = {
-	.base.ctor = nv50_disp_base_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_dmac_init,
-	.base.fini = nv50_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 1,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-nv50_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0009a0 },
-		{ 0x0088, 0x0009c0 },
-		{ 0x008c, 0x0009c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &nv50_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-int
-nv50_disp_ovly_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_overlay_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp overlay channel dma vers %d "
-				 "pushbuf %08x head %d\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     args->v0.head, sizeof(*dmac),
-				     (void **)&dmac);
-	*pobject = nv_object(dmac);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_ovly_ofuncs = {
-	.base.ctor = nv50_disp_ovly_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_dmac_init,
-	.base.fini = nv50_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 3,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_pioc_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, int head,
-		       int length, void **pobject)
-{
-	return nv50_disp_chan_create_(parent, engine, oclass, head,
-				      length, pobject);
-}
-
-void
-nv50_disp_pioc_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_pioc *pioc = (void *)object;
-	nv50_disp_chan_destroy(&pioc->base);
-}
-
-static int
-nv50_disp_pioc_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&pioc->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
-		nv_error(pioc, "timeout0: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
-		nv_error(pioc, "timeout1: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-
-	nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
-		nv_error(pioc, "timeout: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	return nv50_disp_chan_fini(&pioc->base, suspend);
-}
-
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-int
-nv50_disp_oimm_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_overlay_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_pioc *pioc;
-	int ret;
-
-	nv_ioctl(parent, "create disp overlay size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp overlay vers %d head %d\n",
-			 args->v0.version, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
-				     sizeof(*pioc), (void **)&pioc);
-	*pobject = nv_object(pioc);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_oimm_ofuncs = {
-	.base.ctor = nv50_disp_oimm_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = nv50_disp_pioc_init,
-	.base.fini = nv50_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 5,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-int
-nv50_disp_curs_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_cursor_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_pioc *pioc;
-	int ret;
-
-	nv_ioctl(parent, "create disp cursor size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp cursor vers %d head %d\n",
-			 args->v0.version, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
-				     sizeof(*pioc), (void **)&pioc);
-	*pobject = nv_object(pioc);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_curs_ofuncs = {
-	.base.ctor = nv50_disp_curs_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = nv50_disp_pioc_init,
-	.base.fini = nv50_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 7,
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-int
-nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
-{
-	const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
-	const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
-	const u32 total  = nv_rd32(priv, 0x610afc + (head * 0x540));
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
-		args->v0.hblanke = (blanke & 0x0000ffff);
-		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
-		args->v0.hblanks = (blanks & 0x0000ffff);
-		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
-		args->v0.htotal  = ( total & 0x0000ffff);
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		args->v0.vline = /* vline read locks hline */
-			nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline =
-			nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
-	} else
-		return ret;
-
-	return 0;
-}
-
-int
-nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
-	union {
-		struct nv50_disp_mthd_v0 v0;
-		struct nv50_disp_mthd_v1 v1;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nvkm_output *outp = NULL;
-	struct nvkm_output *temp;
-	u16 type, mask = 0;
-	int head, ret;
-
-	if (mthd != NV50_DISP_MTHD)
-		return -EINVAL;
-
-	nv_ioctl(object, "disp mthd size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
-			 args->v0.version, args->v0.method, args->v0.head);
-		mthd = args->v0.method;
-		head = args->v0.head;
-	} else
-	if (nvif_unpack(args->v1, 1, 1, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x "
-				 "type %04x mask %04x\n",
-			 args->v1.version, args->v1.method,
-			 args->v1.hasht, args->v1.hashm);
-		mthd = args->v1.method;
-		type = args->v1.hasht;
-		mask = args->v1.hashm;
-		head = ffs((mask >> 8) & 0x0f) - 1;
-	} else
-		return ret;
-
-	if (head < 0 || head >= priv->head.nr)
-		return -ENXIO;
-
-	if (mask) {
-		list_for_each_entry(temp, &priv->base.outp, head) {
-			if ((temp->info.hasht         == type) &&
-			    (temp->info.hashm & mask) == mask) {
-				outp = temp;
-				break;
-			}
-		}
-		if (outp == NULL)
-			return -ENXIO;
-	}
-
-	switch (mthd) {
-	case NV50_DISP_SCANOUTPOS:
-		return impl->head.scanoutpos(object, priv, data, size, head);
-	default:
-		break;
-	}
-
-	switch (mthd * !!outp) {
-	case NV50_DISP_MTHD_V1_DAC_PWR:
-		return priv->dac.power(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_DAC_LOAD:
-		return priv->dac.sense(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_PWR:
-		return priv->sor.power(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
-		if (!priv->sor.hda_eld)
-			return -ENODEV;
-		return priv->sor.hda_eld(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
-		if (!priv->sor.hdmi)
-			return -ENODEV;
-		return priv->sor.hdmi(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
-		union {
-			struct nv50_disp_sor_lvds_script_v0 v0;
-		} *args = data;
-		nv_ioctl(object, "disp sor lvds script size %d\n", size);
-		if (nvif_unpack(args->v0, 0, 0, false)) {
-			nv_ioctl(object, "disp sor lvds script "
-					 "vers %d name %04x\n",
-				 args->v0.version, args->v0.script);
-			priv->sor.lvdsconf = args->v0.script;
-			return 0;
-		} else
-			return ret;
-	}
-		break;
-	case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
-		struct nvkm_output_dp *outpdp = (void *)outp;
-		union {
-			struct nv50_disp_sor_dp_pwr_v0 v0;
-		} *args = data;
-		nv_ioctl(object, "disp sor dp pwr size %d\n", size);
-		if (nvif_unpack(args->v0, 0, 0, false)) {
-			nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
-				 args->v0.version, args->v0.state);
-			if (args->v0.state == 0) {
-				nvkm_notify_put(&outpdp->irq);
-				((struct nvkm_output_dp_impl *)nv_oclass(outp))
-					->lnk_pwr(outpdp, 0);
-				atomic_set(&outpdp->lt.done, 0);
-				return 0;
-			} else
-			if (args->v0.state != 0) {
-				nvkm_output_dp_train(&outpdp->base, 0, true);
-				return 0;
-			}
-		} else
-			return ret;
-	}
-		break;
-	case NV50_DISP_MTHD_V1_PIOR_PWR:
-		if (!priv->pior.power)
-			return -ENODEV;
-		return priv->pior.power(object, priv, data, size, head, outp);
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
-int
-nv50_disp_main_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_base *base;
-	int ret;
-
-	ret = nvkm_parent_create(parent, engine, oclass, 0,
-				 priv->sclass, 0, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
-			      &base->ramht);
-}
-
-void
-nv50_disp_main_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_base *base = (void *)object;
-	nvkm_ramht_ref(NULL, &base->ramht);
-	nvkm_parent_destroy(&base->base);
-}
-
-static int
-nv50_disp_main_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-	int ret, i;
-	u32 tmp;
-
-	ret = nvkm_parent_init(&base->base);
-	if (ret)
-		return ret;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.  NFI what the 0x614004 caps are for..
-	 */
-	tmp = nv_rd32(priv, 0x614004);
-	nv_wr32(priv, 0x610184, tmp);
-
-	/* ... CRTC caps */
-	for (i = 0; i < priv->head.nr; i++) {
-		tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
-		nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
-		nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
-		nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
-		nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < priv->dac.nr; i++) {
-		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
-		nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < priv->sor.nr; i++) {
-		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
-		nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
-	}
-
-	/* ... PIOR caps */
-	for (i = 0; i < priv->pior.nr; i++) {
-		tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
-		nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nv_rd32(priv, 0x610024) & 0x00000100) {
-		nv_wr32(priv, 0x610024, 0x00000100);
-		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
-			nv_error(priv, "timeout acquiring display\n");
-			return -EBUSY;
-		}
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nv_wr32(priv, 0x61002c, 0x00000370);
-	nv_wr32(priv, 0x610028, 0x00000000);
-	return 0;
-}
-
-static int
-nv50_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x610024, 0x00000000);
-	nv_wr32(priv, 0x610020, 0x00000000);
-
-	return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-nv50_disp_main_ofuncs = {
-	.ctor = nv50_disp_main_ctor,
-	.dtor = nv50_disp_main_dtor,
-	.init = nv50_disp_main_init,
-	.fini = nv50_disp_main_fini,
-	.mthd = nv50_disp_main_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv50_disp_main_oclass[] = {
-	{ NV50_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-nv50_disp_sclass[] = {
-	{ NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-/*******************************************************************************
- * Display context, tracks instmem allocation and prevents more than one
- * client using the display hardware at any time.
- ******************************************************************************/
-
-static int
-nv50_disp_data_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nvkm_engctx *ectx;
-	int ret = -EBUSY;
-
-	/* no context needed for channel objects... */
-	if (nv_mclass(parent) != NV_DEVICE) {
-		atomic_inc(&parent->refcount);
-		*pobject = parent;
-		return 1;
-	}
-
-	/* allocate display hardware to client */
-	mutex_lock(&nv_subdev(priv)->mutex);
-	if (list_empty(&nv_engine(priv)->contexts)) {
-		ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000,
-					 0x10000, NVOBJ_FLAG_HEAP, &ectx);
-		*pobject = nv_object(ectx);
-	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
-struct nvkm_oclass
-nv50_disp_cclass = {
-	.handle = NV_ENGCTX(DISP, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_disp_data_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x61002c, (4 << head), 0);
-}
-
-static void
-nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x61002c, (4 << head), (4 << head));
-}
-
-const struct nvkm_event_func
-nv50_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = nv50_disp_vblank_init,
-	.fini = nv50_disp_vblank_fini,
-};
-
 static const struct nvkm_enum
 nv50_disp_intr_error_type[] = {
 	{ 3, "ILLEGAL_MTHD" },
@@ -1323,70 +190,46 @@
 };
 
 static void
-nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+nv50_disp_intr_error(struct nv50_disp *disp, int chid)
 {
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
-	u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
+	u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
 	u32 code = (addr & 0x00ff0000) >> 16;
 	u32 type = (addr & 0x00007000) >> 12;
 	u32 mthd = (addr & 0x00000ffc);
 	const struct nvkm_enum *ec, *et;
-	char ecunk[6], etunk[6];
 
 	et = nvkm_enum_find(nv50_disp_intr_error_type, type);
-	if (!et)
-		snprintf(etunk, sizeof(etunk), "UNK%02X", type);
-
 	ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
 
-	nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n",
-		 et ? et->name : etunk, ec ? ec->name : ecunk,
-		 chid, mthd, data);
+	nvkm_error(subdev,
+		   "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
+		   type, et ? et->name : "", code, ec ? ec->name : "",
+		   chid, mthd, data);
 
-	if (chid == 0) {
+	if (chid < ARRAY_SIZE(disp->chan)) {
 		switch (mthd) {
 		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
-					    impl->mthd.core);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 2) {
-		switch (mthd) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
-					    impl->mthd.base);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 4) {
-		switch (mthd) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3,
-					    impl->mthd.ovly);
+			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
 			break;
 		default:
 			break;
 		}
 	}
 
-	nv_wr32(priv, 0x610020, 0x00010000 << chid);
-	nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+	nvkm_wr32(device, 0x610020, 0x00010000 << chid);
+	nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
 }
 
 static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
 	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 	    struct nvbios_outp *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvkm_output *outp;
 	u16 mask, type;
 
@@ -1403,7 +246,7 @@
 		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
 		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
 		default:
-			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+			nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
 			return NULL;
 		}
 		or  -= 4;
@@ -1412,9 +255,9 @@
 		type = 0x0010;
 		mask = 0;
 		switch (ctrl & 0x00000f00) {
-		case 0x00000000: type |= priv->pior.type[or]; break;
+		case 0x00000000: type |= disp->pior.type[or]; break;
 		default:
-			nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+			nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
 			return NULL;
 		}
 	}
@@ -1423,7 +266,7 @@
 	mask |= 0x0001 << or;
 	mask |= 0x0100 << head;
 
-	list_for_each_entry(outp, &priv->base.outp, head) {
+	list_for_each_entry(outp, &disp->base.outp, head) {
 		if ((outp->info.hasht & 0xff) == type &&
 		    (outp->info.hashm & mask) == mask) {
 			*data = nvbios_outp_match(bios, outp->info.hasht,
@@ -1439,9 +282,11 @@
 }
 
 static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
+exec_script(struct nv50_disp *disp, int head, int id)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_output *outp;
 	struct nvbios_outp info;
 	u8  ver, hdr, cnt, len;
@@ -1450,27 +295,27 @@
 	int i;
 
 	/* DAC */
-	for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
-		ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+		ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
 
 	/* SOR */
 	if (!(ctrl & (1 << head))) {
-		if (nv_device(priv)->chipset  < 0x90 ||
-		    nv_device(priv)->chipset == 0x92 ||
-		    nv_device(priv)->chipset == 0xa0) {
+		if (device->chipset  < 0x90 ||
+		    device->chipset == 0x92 ||
+		    device->chipset == 0xa0) {
 			reg = 0x610b74;
 		} else {
 			reg = 0x610798;
 		}
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
-			ctrl = nv_rd32(priv, reg + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+			ctrl = nvkm_rd32(device, reg + (i * 8));
 		i += 4;
 	}
 
 	/* PIOR */
 	if (!(ctrl & (1 << head))) {
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
-			ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+			ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
 		i += 8;
 	}
 
@@ -1478,10 +323,10 @@
 		return NULL;
 	i--;
 
-	outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
 	if (outp) {
 		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
+			.subdev = subdev,
 			.bios = bios,
 			.offset = info.script[id],
 			.outp = &outp->info,
@@ -1496,9 +341,11 @@
 }
 
 static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_output *outp;
 	struct nvbios_outp info1;
 	struct nvbios_ocfg info2;
@@ -1508,27 +355,27 @@
 	int i;
 
 	/* DAC */
-	for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
-		ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+		ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
 
 	/* SOR */
 	if (!(ctrl & (1 << head))) {
-		if (nv_device(priv)->chipset  < 0x90 ||
-		    nv_device(priv)->chipset == 0x92 ||
-		    nv_device(priv)->chipset == 0xa0) {
+		if (device->chipset  < 0x90 ||
+		    device->chipset == 0x92 ||
+		    device->chipset == 0xa0) {
 			reg = 0x610b70;
 		} else {
 			reg = 0x610794;
 		}
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
-			ctrl = nv_rd32(priv, reg + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+			ctrl = nvkm_rd32(device, reg + (i * 8));
 		i += 4;
 	}
 
 	/* PIOR */
 	if (!(ctrl & (1 << head))) {
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
-			ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+			ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
 		i += 8;
 	}
 
@@ -1536,7 +383,7 @@
 		return NULL;
 	i--;
 
-	outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
 	if (!outp)
 		return NULL;
 
@@ -1548,7 +395,7 @@
 				*conf |= 0x0100;
 			break;
 		case DCB_OUTPUT_LVDS:
-			*conf = priv->sor.lvdsconf;
+			*conf = disp->sor.lvdsconf;
 			break;
 		case DCB_OUTPUT_DP:
 			*conf = (ctrl & 0x00000f00) >> 8;
@@ -1568,7 +415,7 @@
 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
 		if (data) {
 			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
+				.subdev = subdev,
 				.bios = bios,
 				.offset = data,
 				.outp = &outp->info,
@@ -1584,15 +431,16 @@
 }
 
 static void
-nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
 {
-	exec_script(priv, head, 1);
+	exec_script(disp, head, 1);
 }
 
 static void
-nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
 {
-	struct nvkm_output *outp = exec_script(priv, head, 2);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_output *outp = exec_script(disp, head, 2);
 
 	/* the binary driver does this outside of the supervisor handling
 	 * (after the third supervisor from a detach).  we (currently?)
@@ -1608,10 +456,10 @@
 	 * in a blank screen (SOR_PWR off/on can restore it)
 	 */
 	if (outp && outp->info.type == DCB_OUTPUT_DP) {
-		struct nvkm_output_dp *outpdp = (void *)outp;
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
 		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = nvkm_bios(priv),
+			.subdev = subdev,
+			.bios = subdev->device->bios,
 			.outp = &outp->info,
 			.crtc = head,
 			.offset = outpdp->info.script[4],
@@ -1624,29 +472,32 @@
 }
 
 static void
-nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(priv);
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	if (pclk)
-		devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
+		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
 }
 
 static void
-nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
+nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
 			  struct dcb_output *outp, u32 pclk)
 {
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	const int link = !(outp->sorconf.link & 1);
 	const int   or = ffs(outp->or) - 1;
 	const u32 soff = (  or * 0x800);
 	const u32 loff = (link * 0x080) + soff;
-	const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+	const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
 	const u32 symbol = 100000;
-	const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff;
-	const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff;
-	const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff;
-	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
-	u32 clksor = nv_rd32(priv, 0x614300 + soff);
+	const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
+	const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
+	const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
+	u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+	u32 clksor = nvkm_rd32(device, 0x614300 + soff);
 	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
 	int TU, VTUi, VTUf, VTUa;
 	u64 link_data_rate, link_ratio, unk;
@@ -1662,14 +513,14 @@
 	value = value * link_bw;
 	do_div(value, pclk);
 	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
-	nv_mask(priv, 0x61c1e8 + soff, 0x0000ffff, value);
+	nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
 
 	/* symbols/vblank - algorithm taken from comments in tegra driver */
 	value = vblanks - vblanke - 25;
 	value = value * link_bw;
 	do_div(value, pclk);
 	value = value - ((36 / link_nr) + 3) - 1;
-	nv_mask(priv, 0x61c1ec + soff, 0x00ffffff, value);
+	nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
 
 	/* watermark / activesym */
 	if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
@@ -1734,7 +585,7 @@
 	}
 
 	if (!bestTU) {
-		nv_error(priv, "unable to find suitable dp config\n");
+		nvkm_error(subdev, "unable to find suitable dp config\n");
 		return;
 	}
 
@@ -1745,22 +596,23 @@
 	do_div(unk, symbol);
 	unk += 6;
 
-	nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
-	nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+	nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+	nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
 						   bestVTUf << 16 |
 						   bestVTUi << 8 | unk);
 }
 
 static void
-nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	u32 hval, hreg = 0x614200 + (head * 0x800);
 	u32 oval, oreg;
 	u32 mask, conf;
 
-	outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+	outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
 	if (!outp)
 		return;
 
@@ -1787,10 +639,10 @@
 		u32 ctrl, datarate;
 
 		if (outp->info.location == 0) {
-			ctrl = nv_rd32(priv, 0x610794 + soff);
+			ctrl = nvkm_rd32(device, 0x610794 + soff);
 			soff = 1;
 		} else {
-			ctrl = nv_rd32(priv, 0x610b80 + soff);
+			ctrl = nvkm_rd32(device, 0x610b80 + soff);
 			soff = 2;
 		}
 
@@ -1804,10 +656,10 @@
 		}
 
 		if (nvkm_output_dp_train(outp, datarate / soff, true))
-			ERR("link not trained before attach\n");
+			OUTP_ERR(outp, "link not trained before attach");
 	}
 
-	exec_clkcmp(priv, head, 0, pclk, &conf);
+	exec_clkcmp(disp, head, 0, pclk, &conf);
 
 	if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
 		oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
@@ -1817,7 +669,7 @@
 	} else
 	if (!outp->info.location) {
 		if (outp->info.type == DCB_OUTPUT_DP)
-			nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk);
+			nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
 		oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
 		oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
 		hval = 0x00000000;
@@ -1829,8 +681,8 @@
 		mask = 0x00000707;
 	}
 
-	nv_mask(priv, hreg, 0x0000000f, hval);
-	nv_mask(priv, oreg, mask, oval);
+	nvkm_mask(device, hreg, 0x0000000f, hval);
+	nvkm_mask(device, oreg, mask, oval);
 }
 
 /* If programming a TMDS output on a SOR that can also be configured for
@@ -1842,10 +694,11 @@
  * programmed for DisplayPort.
  */
 static void
-nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
 			    struct dcb_output *outp)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	const int link = !(outp->sorconf.link & 1);
 	const int   or = ffs(outp->or) - 1;
 	const u32 loff = (or * 0x800) + (link * 0x80);
@@ -1854,166 +707,136 @@
 	u8  ver, hdr;
 
 	if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
-		nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
 }
 
 static void
-nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	u32 conf;
 
-	outp = exec_clkcmp(priv, head, 1, pclk, &conf);
+	outp = exec_clkcmp(disp, head, 1, pclk, &conf);
 	if (!outp)
 		return;
 
 	if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
-		nv50_disp_intr_unk40_0_tmds(priv, &outp->info);
+		nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
 }
 
 void
 nv50_disp_intr_supervisor(struct work_struct *work)
 {
-	struct nv50_disp_priv *priv =
-		container_of(work, struct nv50_disp_priv, supervisor);
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 super = nv_rd32(priv, 0x610030);
+	struct nv50_disp *disp =
+		container_of(work, struct nv50_disp, supervisor);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 super = nvkm_rd32(device, 0x610030);
 	int head;
 
-	nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
+	nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
 
-	if (priv->super & 0x00000010) {
-		nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000010) {
+		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000020 << head)))
 				continue;
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk10_0(priv, head);
+			nv50_disp_intr_unk10_0(disp, head);
 		}
 	} else
-	if (priv->super & 0x00000020) {
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000020) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk20_0(priv, head);
+			nv50_disp_intr_unk20_0(disp, head);
 		}
-		for (head = 0; head < priv->head.nr; head++) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000200 << head)))
 				continue;
-			nv50_disp_intr_unk20_1(priv, head);
+			nv50_disp_intr_unk20_1(disp, head);
 		}
-		for (head = 0; head < priv->head.nr; head++) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk20_2(priv, head);
+			nv50_disp_intr_unk20_2(disp, head);
 		}
 	} else
-	if (priv->super & 0x00000040) {
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000040) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk40_0(priv, head);
+			nv50_disp_intr_unk40_0(disp, head);
 		}
 	}
 
-	nv_wr32(priv, 0x610030, 0x80000000);
+	nvkm_wr32(device, 0x610030, 0x80000000);
 }
 
 void
-nv50_disp_intr(struct nvkm_subdev *subdev)
+nv50_disp_intr(struct nv50_disp *disp)
 {
-	struct nv50_disp_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x610020);
-	u32 intr1 = nv_rd32(priv, 0x610024);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x610020);
+	u32 intr1 = nvkm_rd32(device, 0x610024);
 
 	while (intr0 & 0x001f0000) {
 		u32 chid = __ffs(intr0 & 0x001f0000) - 16;
-		nv50_disp_intr_error(priv, chid);
+		nv50_disp_intr_error(disp, chid);
 		intr0 &= ~(0x00010000 << chid);
 	}
 
 	while (intr0 & 0x0000001f) {
 		u32 chid = __ffs(intr0 & 0x0000001f);
-		nv50_disp_chan_uevent_send(priv, chid);
+		nv50_disp_chan_uevent_send(disp, chid);
 		intr0 &= ~(0x00000001 << chid);
 	}
 
 	if (intr1 & 0x00000004) {
-		nvkm_disp_vblank(&priv->base, 0);
-		nv_wr32(priv, 0x610024, 0x00000004);
-		intr1 &= ~0x00000004;
+		nvkm_disp_vblank(&disp->base, 0);
+		nvkm_wr32(device, 0x610024, 0x00000004);
 	}
 
 	if (intr1 & 0x00000008) {
-		nvkm_disp_vblank(&priv->base, 1);
-		nv_wr32(priv, 0x610024, 0x00000008);
-		intr1 &= ~0x00000008;
+		nvkm_disp_vblank(&disp->base, 1);
+		nvkm_wr32(device, 0x610024, 0x00000008);
 	}
 
 	if (intr1 & 0x00000070) {
-		priv->super = (intr1 & 0x00000070);
-		schedule_work(&priv->supervisor);
-		nv_wr32(priv, 0x610024, priv->super);
-		intr1 &= ~0x00000070;
+		disp->super = (intr1 & 0x00000070);
+		schedule_work(&disp->supervisor);
+		nvkm_wr32(device, 0x610024, disp->super);
 	}
 }
 
-static int
-nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = nv50_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = nv50_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->pior.power = nv50_pior_power;
-	return 0;
-}
-
-struct nvkm_oclass *
-nv50_disp_outp_sclass[] = {
-	&nv50_pior_dp_impl.base.base,
-	NULL
+static const struct nv50_disp_func
+nv50_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &nv50_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-struct nvkm_oclass *
-nv50_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x50),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &nv50_disp_core_mthd_chan,
-	.mthd.base = &nv50_disp_base_mthd_chan,
-	.mthd.ovly = &nv50_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
+int
+nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index b4ed620..aecebd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,17 +1,18 @@
 #ifndef __NV50_DISP_H__
 #define __NV50_DISP_H__
+#define nv50_disp(p) container_of((p), struct nv50_disp, base)
 #include "priv.h"
 struct nvkm_output;
 struct nvkm_output_dp;
 
 #define NV50_DISP_MTHD_ struct nvkm_object *object,                            \
-	struct nv50_disp_priv *priv, void *data, u32 size
+	struct nv50_disp *disp, void *data, u32 size
 #define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
 #define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
 
-struct nv50_disp_priv {
+struct nv50_disp {
+	const struct nv50_disp_func *func;
 	struct nvkm_disp base;
-	struct nvkm_oclass *sclass;
 
 	struct work_struct supervisor;
 	u32 super;
@@ -19,208 +20,98 @@
 	struct nvkm_event uevent;
 
 	struct {
-		int nr;
-	} head;
-	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
-		int (*sense)(NV50_DISP_MTHD_V1);
-	} dac;
-	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
-		int (*hda_eld)(NV50_DISP_MTHD_V1);
-		int (*hdmi)(NV50_DISP_MTHD_V1);
 		u32 lvdsconf;
-		void (*magic)(struct nvkm_output *);
 	} sor;
+
 	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
 		u8 type[3];
 	} pior;
+
+	struct nv50_disp_chan *chan[17];
 };
 
-struct nv50_disp_impl {
-	struct nvkm_disp_impl base;
-	struct {
-		const struct nv50_disp_mthd_chan *core;
-		const struct nv50_disp_mthd_chan *base;
-		const struct nv50_disp_mthd_chan *ovly;
-		int prev;
-	} mthd;
-	struct {
-		int (*scanoutpos)(NV50_DISP_MTHD_V0);
-	} head;
-};
+int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
 
-int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_main_mthd(struct nvkm_object *, u32, void *, u32);
-
-int gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
 
 int nv50_dac_power(NV50_DISP_MTHD_V1);
 int nv50_dac_sense(NV50_DISP_MTHD_V1);
 
 int gt215_hda_eld(NV50_DISP_MTHD_V1);
-int gf110_hda_eld(NV50_DISP_MTHD_V1);
+int gf119_hda_eld(NV50_DISP_MTHD_V1);
 
 int g84_hdmi_ctrl(NV50_DISP_MTHD_V1);
 int gt215_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int gf110_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int gf119_hdmi_ctrl(NV50_DISP_MTHD_V1);
 int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1);
 
 int nv50_sor_power(NV50_DISP_MTHD_V1);
 int nv50_pior_power(NV50_DISP_MTHD_V1);
 
-#include <core/parent.h>
+int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp **);
+int gf119_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+		    int index, struct nvkm_disp **);
 
-struct nv50_disp_base {
-	struct nvkm_parent base;
-	struct nvkm_ramht *ramht;
-	u32 chan;
+struct nv50_disp_func_outp {
+	int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  tv)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  dp)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
 };
 
-struct nv50_disp_chan_impl {
-	struct nvkm_ofuncs base;
-	int chid;
-	int  (*attach)(struct nvkm_object *, struct nvkm_object *, u32);
-	void (*detach)(struct nvkm_object *, int);
-};
+struct nv50_disp_func {
+	void (*intr)(struct nv50_disp *);
 
-#include <core/namedb.h>
+	const struct nvkm_event_func *uevent;
+	void (*super)(struct work_struct *);
 
-struct nv50_disp_chan {
-	struct nvkm_namedb base;
-	int chid;
-};
+	const struct nvkm_disp_oclass *root;
 
-int  nv50_disp_chan_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-int  nv50_disp_chan_map(struct nvkm_object *, u64 *, u32 *);
-u32  nv50_disp_chan_rd32(struct nvkm_object *, u64);
-void nv50_disp_chan_wr32(struct nvkm_object *, u64, u32);
-extern const struct nvkm_event_func nv50_disp_chan_uevent;
-int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
-				struct nvkm_notify *);
-void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
-
-extern const struct nvkm_event_func gf110_disp_chan_uevent;
-
-#define nv50_disp_chan_init(a)                                                 \
-	nvkm_namedb_init(&(a)->base)
-#define nv50_disp_chan_fini(a,b)                                               \
-	nvkm_namedb_fini(&(a)->base, (b))
-
-struct nv50_disp_dmac {
-	struct nv50_disp_chan base;
-	struct nvkm_dmaobj *pushdma;
-	u32 push;
-};
-
-void nv50_disp_dmac_dtor(struct nvkm_object *);
-
-struct nv50_disp_pioc {
-	struct nv50_disp_chan base;
-};
-
-void nv50_disp_pioc_dtor(struct nvkm_object *);
-
-struct nv50_disp_mthd_list {
-	u32 mthd;
-	u32 addr;
 	struct {
-		u32 mthd;
-		u32 addr;
-		const char *name;
-	} data[];
-};
+		void (*vblank_init)(struct nv50_disp *, int head);
+		void (*vblank_fini)(struct nv50_disp *, int head);
+		int (*scanoutpos)(NV50_DISP_MTHD_V0);
+	} head;
 
-struct nv50_disp_mthd_chan {
-	const char *name;
-	u32 addr;
 	struct {
-		const char *name;
+		const struct nv50_disp_func_outp internal;
+		const struct nv50_disp_func_outp external;
+	} outp;
+
+	struct {
 		int nr;
-		const struct nv50_disp_mthd_list *mthd;
-	} data[];
+		int (*power)(NV50_DISP_MTHD_V1);
+		int (*sense)(NV50_DISP_MTHD_V1);
+	} dac;
+
+	struct {
+		int nr;
+		int (*power)(NV50_DISP_MTHD_V1);
+		int (*hda_eld)(NV50_DISP_MTHD_V1);
+		int (*hdmi)(NV50_DISP_MTHD_V1);
+		void (*magic)(struct nvkm_output *);
+	} sor;
+
+	struct {
+		int nr;
+		int (*power)(NV50_DISP_MTHD_V1);
+	} pior;
 };
 
-extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
-int nv50_disp_core_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
-int nv50_disp_base_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
-extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
-int nv50_disp_ovly_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
-extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
-int nv50_disp_oimm_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
-int nv50_disp_curs_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern struct nvkm_ofuncs nv50_disp_main_ofuncs;
-int  nv50_disp_main_ctor(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, void *, u32,
-			 struct nvkm_object **);
-void nv50_disp_main_dtor(struct nvkm_object *);
-extern struct nvkm_omthds nv50_disp_main_omthds[];
-extern struct nvkm_oclass nv50_disp_cclass;
-void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
-			 const struct nv50_disp_mthd_chan *);
+void nv50_disp_vblank_init(struct nv50_disp *, int);
+void nv50_disp_vblank_fini(struct nv50_disp *, int);
+void nv50_disp_intr(struct nv50_disp *);
 void nv50_disp_intr_supervisor(struct work_struct *);
-void nv50_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func nv50_disp_vblank_func;
 
-extern const struct nv50_disp_mthd_chan g84_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
-extern const struct nv50_disp_mthd_chan g84_disp_base_mthd_chan;
-extern const struct nv50_disp_mthd_chan g84_disp_ovly_mthd_chan;
-
-extern const struct nv50_disp_mthd_chan g94_disp_core_mthd_chan;
-
-extern struct nv50_disp_chan_impl gf110_disp_core_ofuncs;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl gf110_disp_base_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan gf110_disp_base_mthd_chan;
-extern struct nv50_disp_chan_impl gf110_disp_oimm_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_curs_ofuncs;
-extern struct nvkm_ofuncs gf110_disp_main_ofuncs;
-extern struct nvkm_oclass gf110_disp_cclass;
-void gf110_disp_intr_supervisor(struct work_struct *);
-void gf110_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func gf110_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan gk104_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_chan gk104_disp_ovly_mthd_chan;
-
-extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
-extern struct nvkm_oclass *nv50_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl g94_sor_dp_impl;
-int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
-extern struct nvkm_oclass *g94_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl gf110_sor_dp_impl;
-int gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
-extern struct nvkm_oclass *gf110_disp_outp_sclass[];
-
-void gm204_sor_magic(struct nvkm_output *outp);
-extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+void gf119_disp_vblank_init(struct nv50_disp *, int);
+void gf119_disp_vblank_fini(struct nv50_disp *, int);
+void gf119_disp_intr(struct nv50_disp *);
+void gf119_disp_intr_supervisor(struct work_struct *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
new file mode 100644
index 0000000..54a4ae8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_oimm_oclass = {
+	.base.oclass = G82_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
new file mode 100644
index 0000000..c658db5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_oimm_oclass = {
+	.base.oclass = GF110_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
new file mode 100644
index 0000000..b1fde8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_oimm_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
new file mode 100644
index 0000000..f4e7eb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_oimm_oclass = {
+	.base.oclass = GT214_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
new file mode 100644
index 0000000..cd888a1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_overlay_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+
+	nvif_ioctl(parent, "create disp overlay size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
+			   args->v0.version, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+				   head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_oimm_oclass = {
+	.base.oclass = NV50_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 9224bcb..bbe5ec0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,121 +22,66 @@
  * Authors: Ben Skeggs
  */
 #include "outp.h"
-#include "priv.h"
 
 #include <subdev/bios.h>
-#include <subdev/bios/conn.h>
 #include <subdev/bios/dcb.h>
 #include <subdev/i2c.h>
 
-int
-_nvkm_output_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_output_fini(struct nvkm_output *outp)
 {
-	struct nvkm_output *outp = (void *)object;
-	nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
-	return nvkm_object_fini(&outp->base, suspend);
-}
-
-int
-_nvkm_output_init(struct nvkm_object *object)
-{
-	struct nvkm_output *outp = (void *)object;
-	int ret = nvkm_object_init(&outp->base);
-	if (ret == 0)
-		nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
-	return 0;
+	if (outp->func->fini)
+		outp->func->fini(outp);
 }
 
 void
-_nvkm_output_dtor(struct nvkm_object *object)
+nvkm_output_init(struct nvkm_output *outp)
 {
-	struct nvkm_output *outp = (void *)object;
-	list_del(&outp->head);
-	nvkm_object_ref(NULL, (void *)&outp->conn);
-	nvkm_object_destroy(&outp->base);
+	if (outp->func->init)
+		outp->func->init(outp);
 }
 
-int
-nvkm_output_create_(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass,
-		    struct dcb_output *dcbE, int index,
-		    int length, void **pobject)
+void
+nvkm_output_del(struct nvkm_output **poutp)
 {
-	struct nvkm_disp *disp = nvkm_disp(parent);
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvbios_connE connE;
-	struct nvkm_output *outp;
-	u8  ver, hdr;
-	u32 data;
-	int ret;
+	struct nvkm_output *outp = *poutp;
+	if (outp && !WARN_ON(!outp->func)) {
+		if (outp->func->dtor)
+			*poutp = outp->func->dtor(outp);
+		kfree(*poutp);
+		*poutp = NULL;
+	}
+}
 
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	outp = *pobject;
-	if (ret)
-		return ret;
+void
+nvkm_output_ctor(const struct nvkm_output_func *func, struct nvkm_disp *disp,
+		 int index, struct dcb_output *dcbE, struct nvkm_output *outp)
+{
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
 
-	outp->info = *dcbE;
+	outp->func = func;
+	outp->disp = disp;
 	outp->index = index;
+	outp->info = *dcbE;
+	outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
 	outp->or = ffs(outp->info.or) - 1;
 
-	DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
-	    dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
-	    dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
-	    dcbE->bus, dcbE->heads);
-
-	if (outp->info.type != DCB_OUTPUT_DP)
-		outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
-	else
-		outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
-	outp->edid = outp->port;
-
-	data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
-	if (!data) {
-		DBG("vbios connector data not found\n");
-		memset(&connE, 0x00, sizeof(connE));
-		connE.type = DCB_CONNECTOR_NONE;
-	}
-
-	ret = nvkm_object_ctor(parent, NULL, nvkm_connector_oclass,
-			       &connE, outp->info.connector,
-			       (struct nvkm_object **)&outp->conn);
-	if (ret < 0) {
-		ERR("error %d creating connector, disabling\n", ret);
-		return ret;
-	}
-
-	list_add_tail(&outp->head, &disp->outp);
-	return 0;
+	OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
+		       "edid %x bus %d head %x",
+		 outp->info.type, outp->info.location, outp->info.or,
+		 outp->info.type >= 2 ? outp->info.sorconf.link : 0,
+		 outp->info.connector, outp->info.i2c_index,
+		 outp->info.bus, outp->info.heads);
 }
 
 int
-_nvkm_output_ctor(struct nvkm_object *parent,
-		  struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *dcbE, u32 index,
-		  struct nvkm_object **pobject)
+nvkm_output_new_(const struct nvkm_output_func *func,
+		 struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
 {
-	struct nvkm_output *outp;
-	int ret;
+	if (!(*poutp = kzalloc(sizeof(**poutp), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
-
+	nvkm_output_ctor(func, disp, index, dcbE, *poutp);
 	return 0;
 }
-
-struct nvkm_oclass *
-nvkm_output_oclass = &(struct nvkm_output_impl) {
-	.base = {
-		.handle = 0,
-		.ofuncs = &(struct nvkm_ofuncs) {
-			.ctor = _nvkm_output_ctor,
-			.dtor = _nvkm_output_dtor,
-			.init = _nvkm_output_init,
-			.fini = _nvkm_output_fini,
-		},
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index d9253d2..2590fec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,61 +1,55 @@
 #ifndef __NVKM_DISP_OUTP_H__
 #define __NVKM_DISP_OUTP_H__
-#include <core/object.h>
+#include <engine/disp.h>
 
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
 struct nvkm_output {
-	struct nvkm_object base;
-	struct list_head head;
-
-	struct dcb_output info;
+	const struct nvkm_output_func *func;
+	struct nvkm_disp *disp;
 	int index;
+	struct dcb_output info;
+
+	// whatever (if anything) is pointed at by the dcb device entry
+	struct nvkm_i2c_bus *i2c;
 	int or;
 
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_port *edid;
-
+	struct list_head head;
 	struct nvkm_connector *conn;
 };
 
-#define nvkm_output_create(p,e,c,b,i,d)                                        \
-	nvkm_output_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_destroy(d) ({                                              \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_dtor(nv_object(_outp));                                   \
-})
-#define nvkm_output_init(d) ({                                                 \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_init(nv_object(_outp));                                   \
-})
-#define nvkm_output_fini(d,s) ({                                               \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_fini(nv_object(_outp), (s));                              \
-})
-
-int nvkm_output_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, struct dcb_output *,
-			int, int, void **);
-
-int  _nvkm_output_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void _nvkm_output_dtor(struct nvkm_object *);
-int  _nvkm_output_init(struct nvkm_object *);
-int  _nvkm_output_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_impl {
-	struct nvkm_oclass base;
+struct nvkm_output_func {
+	void *(*dtor)(struct nvkm_output *);
+	void (*init)(struct nvkm_output *);
+	void (*fini)(struct nvkm_output *);
 };
 
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_output *_outp = (void *)outp;                              \
-	nv_##l(_outp, "%02x:%04x:%04x: "f, _outp->index,                       \
-	       _outp->info.hasht, _outp->info.hashm, ##a);                     \
+void nvkm_output_ctor(const struct nvkm_output_func *, struct nvkm_disp *,
+		      int index, struct dcb_output *, struct nvkm_output *);
+int nvkm_output_new_(const struct nvkm_output_func *, struct nvkm_disp *,
+		     int index, struct dcb_output *, struct nvkm_output **);
+void nvkm_output_del(struct nvkm_output **);
+void nvkm_output_init(struct nvkm_output *);
+void nvkm_output_fini(struct nvkm_output *);
+
+int nv50_dac_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			struct nvkm_output **);
+int nv50_sor_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			struct nvkm_output **);
+int nv50_pior_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			 struct nvkm_output **);
+
+u32 g94_sor_dp_lane_map(struct nvkm_device *, u8 lane);
+
+void gm204_sor_magic(struct nvkm_output *outp);
+
+#define OUTP_MSG(o,l,f,a...) do {                                              \
+	struct nvkm_output *_outp = (o);                                       \
+	nvkm_##l(&_outp->disp->engine.subdev, "outp %02x:%04x:%04x: "f"\n",    \
+		 _outp->index, _outp->info.hasht, _outp->info.hashm, ##a);     \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define OUTP_ERR(o,f,a...) OUTP_MSG((o), error, f, ##a)
+#define OUTP_DBG(o,f,a...) OUTP_MSG((o), debug, f, ##a)
+#define OUTP_TRACE(o,f,a...) OUTP_MSG((o), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 0bde0fa..3b7a9e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -33,16 +33,17 @@
 int
 nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
 {
-	struct nvkm_output_dp *outp = (void *)base;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
 	bool retrain = true;
 	u8 link[2], stat[3];
 	u32 linkrate;
 	int ret, i;
 
 	/* check that the link is trained at a high enough rate */
-	ret = nv_rdaux(outp->base.edid, DPCD_LC00_LINK_BW_SET, link, 2);
+	ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
 	if (ret) {
-		DBG("failed to read link config, assuming no sink\n");
+		OUTP_DBG(&outp->base,
+			 "failed to read link config, assuming no sink");
 		goto done;
 	}
 
@@ -50,14 +51,15 @@
 	linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
 	datarate = (datarate + 9) / 10; /* -> decakilobits */
 	if (linkrate < datarate) {
-		DBG("link not trained at sufficient rate\n");
+		OUTP_DBG(&outp->base, "link not trained at sufficient rate");
 		goto done;
 	}
 
 	/* check that link is still trained */
-	ret = nv_rdaux(outp->base.edid, DPCD_LS02, stat, 3);
+	ret = nvkm_rdaux(outp->aux, DPCD_LS02, stat, 3);
 	if (ret) {
-		DBG("failed to read link status, assuming no sink\n");
+		OUTP_DBG(&outp->base,
+			 "failed to read link status, assuming no sink");
 		goto done;
 	}
 
@@ -67,13 +69,14 @@
 			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
 			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
-				DBG("lane %d not equalised\n", lane);
+				OUTP_DBG(&outp->base,
+					 "lane %d not equalised", lane);
 				goto done;
 			}
 		}
 		retrain = false;
 	} else {
-		DBG("no inter-lane alignment\n");
+		OUTP_DBG(&outp->base, "no inter-lane alignment");
 	}
 
 done:
@@ -102,150 +105,138 @@
 }
 
 static void
-nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
+nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
 {
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (present) {
+	struct nvkm_i2c_aux *aux = outp->aux;
+
+	if (enable) {
 		if (!outp->present) {
-			nvkm_i2c(port)->acquire_pad(port, 0);
-			DBG("aux power -> always\n");
+			OUTP_DBG(&outp->base, "aux power -> always");
+			nvkm_i2c_aux_monitor(aux, true);
 			outp->present = true;
 		}
-		nvkm_output_dp_train(&outp->base, 0, true);
-	} else {
-		if (outp->present) {
-			nvkm_i2c(port)->release_pad(port);
-			DBG("aux power -> demand\n");
-			outp->present = false;
-		}
-		atomic_set(&outp->lt.done, 0);
-	}
-}
 
-static void
-nvkm_output_dp_detect(struct nvkm_output_dp *outp)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	int ret = nvkm_i2c(port)->acquire_pad(port, 0);
-	if (ret == 0) {
-		ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
-			       outp->dpcd, sizeof(outp->dpcd));
-		nvkm_output_dp_enable(outp, ret == 0);
-		nvkm_i2c(port)->release_pad(port);
+		if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
+				sizeof(outp->dpcd))) {
+			nvkm_output_dp_train(&outp->base, 0, true);
+			return;
+		}
 	}
+
+	if (outp->present) {
+		OUTP_DBG(&outp->base, "aux power -> demand");
+		nvkm_i2c_aux_monitor(aux, false);
+		outp->present = false;
+	}
+
+	atomic_set(&outp->lt.done, 0);
 }
 
 static int
 nvkm_output_dp_hpd(struct nvkm_notify *notify)
 {
-	struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
-	struct nvkm_output_dp *outp;
-	struct nvkm_disp *disp = nvkm_disp(conn);
 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
+	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), hpd);
+	struct nvkm_connector *conn = outp->base.conn;
+	struct nvkm_disp *disp = outp->base.disp;
 	struct nvif_notify_conn_rep_v0 rep = {};
 
-	list_for_each_entry(outp, &disp->outp, base.head) {
-		if (outp->base.conn == conn &&
-		    outp->info.type == DCB_OUTPUT_DP) {
-			DBG("HPD: %d\n", line->mask);
-			nvkm_output_dp_detect(outp);
+	OUTP_DBG(&outp->base, "HPD: %d", line->mask);
+	nvkm_output_dp_enable(outp, true);
 
-			if (line->mask & NVKM_I2C_UNPLUG)
-				rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
-			if (line->mask & NVKM_I2C_PLUG)
-				rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
+	if (line->mask & NVKM_I2C_UNPLUG)
+		rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
+	if (line->mask & NVKM_I2C_PLUG)
+		rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
 
-			nvkm_event_send(&disp->hpd, rep.mask, conn->index,
-					&rep, sizeof(rep));
-			return NVKM_NOTIFY_KEEP;
-		}
-	}
-
-	WARN_ON(1);
-	return NVKM_NOTIFY_DROP;
+	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
+	return NVKM_NOTIFY_KEEP;
 }
 
 static int
 nvkm_output_dp_irq(struct nvkm_notify *notify)
 {
-	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
-	struct nvkm_disp *disp = nvkm_disp(outp);
 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
+	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
+	struct nvkm_connector *conn = outp->base.conn;
+	struct nvkm_disp *disp = outp->base.disp;
 	struct nvif_notify_conn_rep_v0 rep = {
 		.mask = NVIF_NOTIFY_CONN_V0_IRQ,
 	};
-	int index = outp->base.info.connector;
 
-	DBG("IRQ: %d\n", line->mask);
+	OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
 	nvkm_output_dp_train(&outp->base, 0, true);
 
-	nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
 	return NVKM_NOTIFY_DROP;
 }
 
-int
-_nvkm_output_dp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_output_dp_fini(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_put(&outp->hpd);
 	nvkm_notify_put(&outp->irq);
+	flush_work(&outp->lt.work);
 	nvkm_output_dp_enable(outp, false);
-	return nvkm_output_fini(&outp->base, suspend);
 }
 
-int
-_nvkm_output_dp_init(struct nvkm_object *object)
+static void
+nvkm_output_dp_init(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
-	nvkm_output_dp_detect(outp);
-	return nvkm_output_init(&outp->base);
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_put(&outp->base.conn->hpd);
+	nvkm_output_dp_enable(outp, true);
+	nvkm_notify_get(&outp->hpd);
 }
 
-void
-_nvkm_output_dp_dtor(struct nvkm_object *object)
+static void *
+nvkm_output_dp_dtor(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_fini(&outp->hpd);
 	nvkm_notify_fini(&outp->irq);
-	nvkm_output_destroy(&outp->base);
+	return outp;
 }
 
+static const struct nvkm_output_func
+nvkm_output_dp_func = {
+	.dtor = nvkm_output_dp_dtor,
+	.init = nvkm_output_dp_init,
+	.fini = nvkm_output_dp_fini,
+};
+
 int
-nvkm_output_dp_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass,
-		       struct dcb_output *info, int index,
-		       int length, void **pobject)
+nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
+		    struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		    struct nvkm_i2c_aux *aux, struct nvkm_output_dp *outp)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output_dp *outp;
+	struct nvkm_device *device = disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
 	u8  hdr, cnt, len;
 	u32 data;
 	int ret;
 
-	ret = nvkm_output_create_(parent, engine, oclass, info, index,
-				  length, pobject);
-	outp = *pobject;
-	if (ret)
-		return ret;
-
-	nvkm_notify_fini(&outp->base.conn->hpd);
-
-	/* access to the aux channel is not optional... */
-	if (!outp->base.edid) {
-		ERR("aux channel not found\n");
+	nvkm_output_ctor(&nvkm_output_dp_func, disp, index, dcbE, &outp->base);
+	outp->func = func;
+	outp->aux = aux;
+	if (!outp->aux) {
+		OUTP_ERR(&outp->base, "no aux");
 		return -ENODEV;
 	}
 
-	/* nor is the bios data for this output... */
+	/* bios data is not optional */
 	data = nvbios_dpout_match(bios, outp->base.info.hasht,
 				  outp->base.info.hashm, &outp->version,
 				  &hdr, &cnt, &len, &outp->info);
 	if (!data) {
-		ERR("no bios dp data\n");
+		OUTP_ERR(&outp->base, "no bios dp data");
 		return -ENODEV;
 	}
 
-	DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len);
+	OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
+		 outp->version, hdr, cnt, len);
 
 	/* link training */
 	INIT_WORK(&outp->lt.work, nvkm_dp_train);
@@ -256,13 +247,13 @@
 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
 			       &(struct nvkm_i2c_ntfy_req) {
 				.mask = NVKM_I2C_IRQ,
-				.port = outp->base.edid->index,
+				.port = outp->aux->id,
 			       },
 			       sizeof(struct nvkm_i2c_ntfy_req),
 			       sizeof(struct nvkm_i2c_ntfy_rep),
 			       &outp->irq);
 	if (ret) {
-		ERR("error monitoring aux irq event: %d\n", ret);
+		OUTP_ERR(&outp->base, "error monitoring aux irq: %d", ret);
 		return ret;
 	}
 
@@ -270,13 +261,13 @@
 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
 			       &(struct nvkm_i2c_ntfy_req) {
 				.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
-				.port = outp->base.edid->index,
+				.port = outp->aux->id,
 			       },
 			       sizeof(struct nvkm_i2c_ntfy_req),
 			       sizeof(struct nvkm_i2c_ntfy_rep),
-			       &outp->base.conn->hpd);
+			       &outp->hpd);
 	if (ret) {
-		ERR("error monitoring aux hpd events: %d\n", ret);
+		OUTP_ERR(&outp->base, "error monitoring aux hpd: %d", ret);
 		return ret;
 	}
 
@@ -284,18 +275,17 @@
 }
 
 int
-_nvkm_output_dp_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *info, u32 index,
-		     struct nvkm_object **pobject)
+nvkm_output_dp_new_(const struct nvkm_output_dp_func *func,
+		    struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		    struct nvkm_output **poutp)
 {
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbE->i2c_index);
 	struct nvkm_output_dp *outp;
-	int ret;
 
-	ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
+	if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+		return -ENOMEM;
+	*poutp = &outp->base;
 
-	return 0;
+	return nvkm_output_dp_ctor(func, disp, index, dcbE, aux, outp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 70c77ae..731136d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -1,5 +1,14 @@
 #ifndef __NVKM_DISP_OUTP_DP_H__
 #define __NVKM_DISP_OUTP_DP_H__
+#define nvkm_output_dp(p) container_of((p), struct nvkm_output_dp, base)
+#ifndef MSG
+#define MSG(l,f,a...)                                                          \
+	nvkm_##l(&outp->base.disp->engine.subdev, "%02x:%04x:%04x: "f,         \
+		 outp->base.index, outp->base.info.hasht,                      \
+		 outp->base.info.hashm, ##a)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
 #include "outp.h"
 
 #include <core/notify.h>
@@ -7,12 +16,16 @@
 #include <subdev/bios/dp.h>
 
 struct nvkm_output_dp {
+	const struct nvkm_output_dp_func *func;
 	struct nvkm_output base;
 
 	struct nvbios_dpout info;
 	u8 version;
 
+	struct nvkm_i2c_aux *aux;
+
 	struct nvkm_notify irq;
+	struct nvkm_notify hpd;
 	bool present;
 	u8 dpcd[16];
 
@@ -23,34 +36,7 @@
 	} lt;
 };
 
-#define nvkm_output_dp_create(p,e,c,b,i,d)                                     \
-	nvkm_output_dp_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_dp_destroy(d) ({                                           \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_dtor(nv_object(_outp));                                \
-})
-#define nvkm_output_dp_init(d) ({                                              \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_init(nv_object(_outp));                                \
-})
-#define nvkm_output_dp_fini(d,s) ({                                            \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_fini(nv_object(_outp), (s));                           \
-})
-
-int nvkm_output_dp_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, struct dcb_output *,
-			   int, int, void **);
-
-int  _nvkm_output_dp_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_output_dp_dtor(struct nvkm_object *);
-int  _nvkm_output_dp_init(struct nvkm_object *);
-int  _nvkm_output_dp_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_dp_impl {
-	struct nvkm_output_impl base;
+struct nvkm_output_dp_func {
 	int (*pattern)(struct nvkm_output_dp *, int);
 	int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
 	int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
@@ -58,4 +44,25 @@
 };
 
 int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+
+int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+			int index, struct dcb_output *, struct nvkm_i2c_aux *,
+			struct nvkm_output_dp *);
+int nvkm_output_dp_new_(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+			int index, struct dcb_output *,
+			struct nvkm_output **);
+
+int nv50_pior_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
+
+int g94_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		   struct nvkm_output **);
+int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+
+int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
+int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+
+int  gm204_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		      struct nvkm_output **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
new file mode 100644
index 0000000..db6234e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x6109a0 },
+		{ 0x0088, 0x6109c0 },
+		{ 0x008c, 0x6109c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &g84_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_ovly_oclass = {
+	.base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
new file mode 100644
index 0000000..5985879
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.data = {
+		{ 0x0080, 0x665080 },
+		{ 0x0084, 0x665084 },
+		{ 0x0088, 0x665088 },
+		{ 0x008c, 0x66508c },
+		{ 0x0090, 0x665090 },
+		{ 0x0094, 0x665094 },
+		{ 0x00a0, 0x6650a0 },
+		{ 0x00a4, 0x6650a4 },
+		{ 0x00b0, 0x6650b0 },
+		{ 0x00b4, 0x6650b4 },
+		{ 0x00b8, 0x6650b8 },
+		{ 0x00c0, 0x6650c0 },
+		{ 0x00e0, 0x6650e0 },
+		{ 0x00e4, 0x6650e4 },
+		{ 0x00e8, 0x6650e8 },
+		{ 0x0100, 0x665100 },
+		{ 0x0104, 0x665104 },
+		{ 0x0108, 0x665108 },
+		{ 0x010c, 0x66510c },
+		{ 0x0110, 0x665110 },
+		{ 0x0118, 0x665118 },
+		{ 0x011c, 0x66511c },
+		{ 0x0120, 0x665120 },
+		{ 0x0124, 0x665124 },
+		{ 0x0130, 0x665130 },
+		{ 0x0134, 0x665134 },
+		{ 0x0138, 0x665138 },
+		{ 0x013c, 0x66513c },
+		{ 0x0140, 0x665140 },
+		{ 0x0144, 0x665144 },
+		{ 0x0148, 0x665148 },
+		{ 0x014c, 0x66514c },
+		{ 0x0150, 0x665150 },
+		{ 0x0154, 0x665154 },
+		{ 0x0158, 0x665158 },
+		{ 0x015c, 0x66515c },
+		{ 0x0160, 0x665160 },
+		{ 0x0164, 0x665164 },
+		{ 0x0168, 0x665168 },
+		{ 0x016c, 0x66516c },
+		{ 0x0400, 0x665400 },
+		{ 0x0408, 0x665408 },
+		{ 0x040c, 0x66540c },
+		{ 0x0410, 0x665410 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_ovly_oclass = {
+	.base.oclass = GF110_DISP_OVERLAY_CONTROL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_ovly_chan_mthd,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
new file mode 100644
index 0000000..2e2dc06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.data = {
+		{ 0x0080, 0x665080 },
+		{ 0x0084, 0x665084 },
+		{ 0x0088, 0x665088 },
+		{ 0x008c, 0x66508c },
+		{ 0x0090, 0x665090 },
+		{ 0x0094, 0x665094 },
+		{ 0x00a0, 0x6650a0 },
+		{ 0x00a4, 0x6650a4 },
+		{ 0x00b0, 0x6650b0 },
+		{ 0x00b4, 0x6650b4 },
+		{ 0x00b8, 0x6650b8 },
+		{ 0x00c0, 0x6650c0 },
+		{ 0x00c4, 0x6650c4 },
+		{ 0x00e0, 0x6650e0 },
+		{ 0x00e4, 0x6650e4 },
+		{ 0x00e8, 0x6650e8 },
+		{ 0x0100, 0x665100 },
+		{ 0x0104, 0x665104 },
+		{ 0x0108, 0x665108 },
+		{ 0x010c, 0x66510c },
+		{ 0x0110, 0x665110 },
+		{ 0x0118, 0x665118 },
+		{ 0x011c, 0x66511c },
+		{ 0x0120, 0x665120 },
+		{ 0x0124, 0x665124 },
+		{ 0x0130, 0x665130 },
+		{ 0x0134, 0x665134 },
+		{ 0x0138, 0x665138 },
+		{ 0x013c, 0x66513c },
+		{ 0x0140, 0x665140 },
+		{ 0x0144, 0x665144 },
+		{ 0x0148, 0x665148 },
+		{ 0x014c, 0x66514c },
+		{ 0x0150, 0x665150 },
+		{ 0x0154, 0x665154 },
+		{ 0x0158, 0x665158 },
+		{ 0x015c, 0x66515c },
+		{ 0x0160, 0x665160 },
+		{ 0x0164, 0x665164 },
+		{ 0x0168, 0x665168 },
+		{ 0x016c, 0x66516c },
+		{ 0x0400, 0x665400 },
+		{ 0x0404, 0x665404 },
+		{ 0x0408, 0x665408 },
+		{ 0x040c, 0x66540c },
+		{ 0x0410, 0x665410 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gk104_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gk104_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_ovly_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gk104_disp_ovly_chan_mthd,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
new file mode 100644
index 0000000..f858053
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gt200_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x6109a0 },
+		{ 0x0088, 0x6109c0 },
+		{ 0x008c, 0x6109c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00b0, 0x610c98 },
+		{ 0x00b4, 0x610ca4 },
+		{ 0x00b8, 0x610cac },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gt200_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &gt200_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_ovly_oclass = {
+	.base.oclass = GT200_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &gt200_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
new file mode 100644
index 0000000..c947e1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_ovly_oclass = {
+	.base.oclass = GT214_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
new file mode 100644
index 0000000..6fa296c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_overlay_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+	u64 push;
+
+	nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp overlay channel dma vers %d "
+				   "pushbuf %016llx head %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+				   head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0009a0 },
+		{ 0x0088, 0x0009c0 },
+		{ 0x008c, 0x0009c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_ovly_oclass = {
+	.base.oclass = NV50_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &nv50_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
new file mode 100644
index 0000000..a625a98
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* activate channel */
+	nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+		if ((tmp & 0x00030000) == 0x00010000)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_chan_func
+gf119_disp_pioc_func = {
+	.init = gf119_disp_pioc_init,
+	.fini = gf119_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
new file mode 100644
index 0000000..9d2618d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+	}
+}
+
+static int
+nv50_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+		if ((tmp & 0x00030000) == 0x00010000)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_chan_func
+nv50_disp_pioc_func = {
+	.init = nv50_disp_pioc_init,
+	.fini = nv50_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index 2a1d887..ab524bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -21,8 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
 #include "outpdp.h"
+#include "nv50.h"
 
 #include <core/client.h>
 #include <subdev/i2c.h>
@@ -31,119 +31,10 @@
 #include <nvif/class.h>
 #include <nvif/unpack.h>
 
-/******************************************************************************
- * TMDS
- *****************************************************************************/
-
-static int
-nv50_pior_tmds_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *info, u32 index,
-		    struct nvkm_object **pobject)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output *outp;
-	int ret;
-
-	ret = nvkm_output_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
-
-	outp->edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(outp->info.extdev));
-	return 0;
-}
-
-struct nvkm_output_impl
-nv50_pior_tmds_impl = {
-	.base.handle = DCB_OUTPUT_TMDS | 0x0100,
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_pior_tmds_ctor,
-		.dtor = _nvkm_output_dtor,
-		.init = _nvkm_output_init,
-		.fini = _nvkm_output_fini,
-	},
-};
-
-/******************************************************************************
- * DisplayPort
- *****************************************************************************/
-
-static int
-nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->pattern)
-		return port->func->pattern(port, pattern);
-	return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
-{
-	return 0;
-}
-
-static int
-nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->lnk_ctl)
-		return port->func->lnk_ctl(port, nr, bw, ef);
-	return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->drv_ctl)
-		return port->func->drv_ctl(port, ln, vs, pe);
-	return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_ctor(struct nvkm_object *parent,
-		  struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *info, u32 index,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output_dp *outp;
-	int ret;
-
-	ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
-
-	outp->base.edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(
-					 outp->base.info.extdev));
-	return 0;
-}
-
-struct nvkm_output_dp_impl
-nv50_pior_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP | 0x0010,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_pior_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
-	.pattern = nv50_pior_dp_pattern,
-	.lnk_pwr = nv50_pior_dp_lnk_pwr,
-	.lnk_ctl = nv50_pior_dp_lnk_ctl,
-	.drv_ctl = nv50_pior_dp_drv_ctl,
-};
-
-/******************************************************************************
- * General PIOR handling
- *****************************************************************************/
-
 int
 nv50_pior_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 soff = outp->or * 0x800;
 	union {
 		struct nv50_disp_pior_pwr_v0 v0;
@@ -151,10 +42,10 @@
 	u32 ctrl, type;
 	int ret;
 
-	nv_ioctl(object, "disp pior pwr size %d\n", size);
+	nvif_ioctl(object, "disp pior pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
-			 args->v0.version, args->v0.state, args->v0.type);
+		nvif_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
+			   args->v0.version, args->v0.state, args->v0.type);
 		if (args->v0.type > 0x0f)
 			return -EINVAL;
 		ctrl = !!args->v0.state;
@@ -162,9 +53,79 @@
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
-	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
-	priv->pior.type[outp->or] = type;
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+			break;
+	);
+	disp->pior.type[outp->or] = type;
 	return 0;
 }
+
+/******************************************************************************
+ * TMDS
+ *****************************************************************************/
+static const struct nvkm_output_func
+nv50_pior_output_func = {
+};
+
+int
+nv50_pior_output_new(struct nvkm_disp *disp, int index,
+		     struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_pior_output_func, disp,
+				index, dcbE, poutp);
+}
+
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
+static int
+nv50_pior_output_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+	return 0;
+}
+
+static int
+nv50_pior_output_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+{
+	return 0;
+}
+
+static int
+nv50_pior_output_dp_lnk_ctl(struct nvkm_output_dp *outp,
+			    int nr, int bw, bool ef)
+{
+	int ret = nvkm_i2c_aux_lnk_ctl(outp->aux, nr, bw, ef);
+	if (ret)
+		return ret;
+	return 1;
+}
+
+static const struct nvkm_output_dp_func
+nv50_pior_output_dp_func = {
+	.pattern = nv50_pior_output_dp_pattern,
+	.lnk_pwr = nv50_pior_output_dp_lnk_pwr,
+	.lnk_ctl = nv50_pior_output_dp_lnk_ctl,
+};
+
+int
+nv50_pior_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
+{
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+	struct nvkm_i2c_aux *aux =
+		nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
+	struct nvkm_output_dp *outp;
+
+	if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+		return -ENOMEM;
+	*poutp = &outp->base;
+
+	return nvkm_output_dp_ctor(&nv50_pior_output_dp_func, disp,
+				   index, dcbE, aux, outp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 961ce8b..c245295 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,42 +1,52 @@
 #ifndef __NVKM_DISP_PRIV_H__
 #define __NVKM_DISP_PRIV_H__
 #include <engine/disp.h>
+#include "outp.h"
+#include "outpdp.h"
 
-struct nvkm_disp_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass **outp;
-	struct nvkm_oclass **conn;
-	const struct nvkm_event_func *vblank;
+int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp *);
+int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp **);
+void nvkm_disp_vblank(struct nvkm_disp *, int head);
+
+struct nvkm_disp_func_outp {
+	int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  tv)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  dp)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
 };
 
-#define nvkm_disp_create(p,e,c,h,i,x,d)                                     \
-	nvkm_disp_create_((p), (e), (c), (h), (i), (x),                     \
-			     sizeof(**d), (void **)d)
-#define nvkm_disp_destroy(d) ({                                             \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_dtor(nv_object(disp));                                   \
-})
-#define nvkm_disp_init(d) ({                                                \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_init(nv_object(disp));                                   \
-})
-#define nvkm_disp_fini(d,s) ({                                              \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_fini(nv_object(disp), (s));                              \
-})
+struct nvkm_disp_func {
+	void *(*dtor)(struct nvkm_disp *);
+	void (*intr)(struct nvkm_disp *);
 
-int  nvkm_disp_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int heads,
-			  const char *, const char *, int, void **);
-void _nvkm_disp_dtor(struct nvkm_object *);
-int  _nvkm_disp_init(struct nvkm_object *);
-int  _nvkm_disp_fini(struct nvkm_object *, bool);
+	const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);
 
-extern struct nvkm_oclass *nvkm_output_oclass;
-extern struct nvkm_oclass *nvkm_connector_oclass;
+	struct {
+		void (*vblank_init)(struct nvkm_disp *, int head);
+		void (*vblank_fini)(struct nvkm_disp *, int head);
+	} head;
 
-int  nvkm_disp_vblank_ctor(struct nvkm_object *, void *data, u32 size,
-			   struct nvkm_notify *);
-void nvkm_disp_vblank(struct nvkm_disp *, int head);
+	struct {
+		const struct nvkm_disp_func_outp internal;
+		const struct nvkm_disp_func_outp external;
+	} outp;
+};
+
 int  nvkm_disp_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
+
+extern const struct nvkm_disp_oclass nv04_disp_root_oclass;
+
+struct nvkm_disp_oclass {
+	int (*ctor)(struct nvkm_disp *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
new file mode 100644
index 0000000..721e4f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+g84_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&g84_disp_core_oclass,
+		&g84_disp_base_oclass,
+		&g84_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+g84_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&g84_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+g84_disp_root_oclass = {
+	.base.oclass = G82_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = g84_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
new file mode 100644
index 0000000..9493f6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+g94_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&g94_disp_core_oclass,
+		&gt200_disp_base_oclass,
+		&gt200_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+g94_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&g94_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+g94_disp_root_oclass = {
+	.base.oclass = GT206_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = g94_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
new file mode 100644
index 0000000..8591726
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 total  = nvkm_rd32(device, 0x640414 + (head * 0x300));
+	const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300));
+	const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300));
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+		args->v0.hblanke = (blanke & 0x0000ffff);
+		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+		args->v0.hblanks = (blanks & 0x0000ffff);
+		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
+		args->v0.htotal  = ( total & 0x0000ffff);
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		args->v0.vline = /* vline read locks hline */
+			nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline =
+			nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+	} else
+		return ret;
+
+	return 0;
+}
+
+void
+gf119_disp_root_fini(struct nv50_disp_root *root)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x6100b0, 0x00000000);
+}
+
+int
+gf119_disp_root_init(struct nv50_disp_root *root)
+{
+	struct nv50_disp *disp = root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.
+	 */
+
+	/* ... CRTC caps */
+	for (i = 0; i < disp->base.head.nr; i++) {
+		tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+		nvkm_wr32(device, 0x6101b4 + (i * 0x800), tmp);
+		tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+		nvkm_wr32(device, 0x6101b8 + (i * 0x800), tmp);
+		tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+		nvkm_wr32(device, 0x6101bc + (i * 0x800), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->func->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->func->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
+		nvkm_wr32(device, 0x6100ac, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x610090, 0x00000000);
+	nvkm_wr32(device, 0x6100a0, 0x00000000);
+	nvkm_wr32(device, 0x6100b0, 0x00000307);
+
+	/* disable underflow reporting, preventing an intermittent issue
+	 * on some gk104 boards where the production vbios left this
+	 * setting enabled by default.
+	 *
+	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+	 */
+	for (i = 0; i < disp->base.head.nr; i++)
+		nvkm_mask(device, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
+	return 0;
+}
+
+static const struct nv50_disp_root_func
+gf119_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gf119_disp_core_oclass,
+		&gf119_disp_base_oclass,
+		&gf119_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gf119_disp_oimm_oclass,
+		&gf119_disp_curs_oclass,
+	},
+};
+
+static int
+gf119_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gf119_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gf119_disp_root_oclass = {
+	.base.oclass = GF110_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gf119_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
new file mode 100644
index 0000000..0bfdb1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk104_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gk104_disp_core_oclass,
+		&gk104_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gk104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gk104_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk104_disp_root_oclass = {
+	.base.oclass = GK104_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gk104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
new file mode 100644
index 0000000..1e8dbed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk110_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gk110_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gk110_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gk110_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk110_disp_root_oclass = {
+	.base.oclass = GK110_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gk110_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
new file mode 100644
index 0000000..44c55be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm107_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gm107_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gm107_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gm107_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm107_disp_root_oclass = {
+	.base.oclass = GM107_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gm107_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
new file mode 100644
index 0000000..168bffe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm204_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gm204_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gm204_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gm204_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm204_disp_root_oclass = {
+	.base.oclass = GM204_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gm204_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
new file mode 100644
index 0000000..124a0c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt200_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&gt200_disp_core_oclass,
+		&gt200_disp_base_oclass,
+		&gt200_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+gt200_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gt200_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt200_disp_root_oclass = {
+	.base.oclass = GT200_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gt200_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
new file mode 100644
index 0000000..dff52f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt215_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&gt215_disp_core_oclass,
+		&gt215_disp_base_oclass,
+		&gt215_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gt215_disp_oimm_oclass,
+		&gt215_disp_curs_oclass,
+	},
+};
+
+static int
+gt215_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gt215_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt215_disp_root_oclass = {
+	.base.oclass = GT214_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gt215_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
new file mode 100644
index 0000000..62d3fb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
+#include "priv.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv04_disp_root {
+	struct nvkm_object object;
+	struct nvkm_disp *disp;
+};
+
+static int
+nv04_disp_scanoutpos(struct nv04_disp_root *root,
+		     void *data, u32 size, int head)
+{
+	struct nvkm_device *device = root->disp->engine.subdev.device;
+	struct nvkm_object *object = &root->object;
+	const u32 hoff = head * 0x2000;
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	u32 line;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0xffff;
+		args->v0.vtotal  = nvkm_rd32(device, 0x680804 + hoff) & 0xffff;
+		args->v0.vblanke = args->v0.vtotal - 1;
+
+		args->v0.hblanks = nvkm_rd32(device, 0x680820 + hoff) & 0xffff;
+		args->v0.htotal  = nvkm_rd32(device, 0x680824 + hoff) & 0xffff;
+		args->v0.hblanke = args->v0.htotal - 1;
+
+		/*
+		 * If output is vga instead of digital then vtotal/htotal is
+		 * invalid so we have to give up and trigger the timestamping
+		 * fallback in the drm core.
+		 */
+		if (!args->v0.vtotal || !args->v0.htotal)
+			return -ENOTSUPP;
+
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		line = nvkm_rd32(device, 0x600868 + hoff);
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline = (line & 0xffff0000) >> 16;
+		args->v0.vline = (line & 0x0000ffff);
+	} else
+		return ret;
+
+	return 0;
+}
+
+static int
+nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nv04_disp_root *root = nv04_disp_root(object);
+	union {
+		struct nv04_disp_mthd_v0 v0;
+	} *args = data;
+	int head, ret;
+
+	nvif_ioctl(object, "disp mthd size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+			   args->v0.version, args->v0.method, args->v0.head);
+		mthd = args->v0.method;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	if (head < 0 || head >= 2)
+		return -ENXIO;
+
+	switch (mthd) {
+	case NV04_DISP_SCANOUTPOS:
+		return nv04_disp_scanoutpos(root, data, size, head);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static struct nvkm_object_func
+nv04_disp_root = {
+	.mthd = nv04_disp_mthd,
+	.ntfy = nvkm_disp_ntfy,
+};
+
+static int
+nv04_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv04_disp_root *root;
+
+	if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+		return -ENOMEM;
+	root->disp = disp;
+	*pobject = &root->object;
+
+	nvkm_object_ctor(&nv04_disp_root, oclass, &root->object);
+	return 0;
+}
+
+const struct nvkm_disp_oclass
+nv04_disp_root_oclass = {
+	.base.oclass = NV04_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nv04_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
new file mode 100644
index 0000000..06fb24d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540));
+	const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540));
+	const u32 total  = nvkm_rd32(device, 0x610afc + (head * 0x540));
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+		args->v0.hblanke = (blanke & 0x0000ffff);
+		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+		args->v0.hblanks = (blanks & 0x0000ffff);
+		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
+		args->v0.htotal  = ( total & 0x0000ffff);
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		args->v0.vline = /* vline read locks hline */
+			nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline =
+			nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+	} else
+		return ret;
+
+	return 0;
+}
+
+int
+nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	union {
+		struct nv50_disp_mthd_v0 v0;
+		struct nv50_disp_mthd_v1 v1;
+	} *args = data;
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	struct nv50_disp *disp = root->disp;
+	const struct nv50_disp_func *func = disp->func;
+	struct nvkm_output *outp = NULL;
+	struct nvkm_output *temp;
+	u16 type, mask = 0;
+	int head, ret;
+
+	if (mthd != NV50_DISP_MTHD)
+		return -EINVAL;
+
+	nvif_ioctl(object, "disp mthd size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+			   args->v0.version, args->v0.method, args->v0.head);
+		mthd = args->v0.method;
+		head = args->v0.head;
+	} else
+	if (nvif_unpack(args->v1, 1, 1, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x "
+				   "type %04x mask %04x\n",
+			   args->v1.version, args->v1.method,
+			   args->v1.hasht, args->v1.hashm);
+		mthd = args->v1.method;
+		type = args->v1.hasht;
+		mask = args->v1.hashm;
+		head = ffs((mask >> 8) & 0x0f) - 1;
+	} else
+		return ret;
+
+	if (head < 0 || head >= disp->base.head.nr)
+		return -ENXIO;
+
+	if (mask) {
+		list_for_each_entry(temp, &disp->base.outp, head) {
+			if ((temp->info.hasht         == type) &&
+			    (temp->info.hashm & mask) == mask) {
+				outp = temp;
+				break;
+			}
+		}
+		if (outp == NULL)
+			return -ENXIO;
+	}
+
+	switch (mthd) {
+	case NV50_DISP_SCANOUTPOS:
+		return func->head.scanoutpos(object, disp, data, size, head);
+	default:
+		break;
+	}
+
+	switch (mthd * !!outp) {
+	case NV50_DISP_MTHD_V1_DAC_PWR:
+		return func->dac.power(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_DAC_LOAD:
+		return func->dac.sense(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_PWR:
+		return func->sor.power(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
+		if (!func->sor.hda_eld)
+			return -ENODEV;
+		return func->sor.hda_eld(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
+		if (!func->sor.hdmi)
+			return -ENODEV;
+		return func->sor.hdmi(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
+		union {
+			struct nv50_disp_sor_lvds_script_v0 v0;
+		} *args = data;
+		nvif_ioctl(object, "disp sor lvds script size %d\n", size);
+		if (nvif_unpack(args->v0, 0, 0, false)) {
+			nvif_ioctl(object, "disp sor lvds script "
+					   "vers %d name %04x\n",
+				   args->v0.version, args->v0.script);
+			disp->sor.lvdsconf = args->v0.script;
+			return 0;
+		} else
+			return ret;
+	}
+		break;
+	case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+		union {
+			struct nv50_disp_sor_dp_pwr_v0 v0;
+		} *args = data;
+		nvif_ioctl(object, "disp sor dp pwr size %d\n", size);
+		if (nvif_unpack(args->v0, 0, 0, false)) {
+			nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n",
+				   args->v0.version, args->v0.state);
+			if (args->v0.state == 0) {
+				nvkm_notify_put(&outpdp->irq);
+				outpdp->func->lnk_pwr(outpdp, 0);
+				atomic_set(&outpdp->lt.done, 0);
+				return 0;
+			} else
+			if (args->v0.state != 0) {
+				nvkm_output_dp_train(&outpdp->base, 0, true);
+				return 0;
+			}
+		} else
+			return ret;
+	}
+		break;
+	case NV50_DISP_MTHD_V1_PIOR_PWR:
+		if (!func->pior.power)
+			return -ENODEV;
+		return func->pior.power(object, disp, data, size, head, outp);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	const struct nv50_disp_dmac_oclass *sclass = oclass->priv;
+	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+			    oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
+	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+			    oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_child_get_(struct nvkm_object *object, int index,
+			  struct nvkm_oclass *sclass)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+
+	if (index < ARRAY_SIZE(root->func->dmac)) {
+		sclass->base = root->func->dmac[index]->base;
+		sclass->priv = root->func->dmac[index];
+		sclass->ctor = nv50_disp_root_dmac_new_;
+		return 0;
+	}
+
+	index -= ARRAY_SIZE(root->func->dmac);
+
+	if (index < ARRAY_SIZE(root->func->pioc)) {
+		sclass->base = root->func->pioc[index]->base;
+		sclass->priv = root->func->pioc[index];
+		sclass->ctor = nv50_disp_root_pioc_new_;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv50_disp_root_fini_(struct nvkm_object *object, bool suspend)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	root->func->fini(root);
+	return 0;
+}
+
+static int
+nv50_disp_root_init_(struct nvkm_object *object)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	return root->func->init(root);
+}
+
+static void *
+nv50_disp_root_dtor_(struct nvkm_object *object)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	nvkm_ramht_del(&root->ramht);
+	nvkm_gpuobj_del(&root->instmem);
+	return root;
+}
+
+static const struct nvkm_object_func
+nv50_disp_root_ = {
+	.dtor = nv50_disp_root_dtor_,
+	.init = nv50_disp_root_init_,
+	.fini = nv50_disp_root_fini_,
+	.mthd = nv50_disp_root_mthd_,
+	.ntfy = nvkm_disp_ntfy,
+	.sclass = nv50_disp_root_child_get_,
+};
+
+int
+nv50_disp_root_new_(const struct nv50_disp_root_func *func,
+		    struct nvkm_disp *base, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	struct nv50_disp_root *root;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	int ret;
+
+	if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &root->object;
+
+	nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object);
+	root->func = func;
+	root->disp = disp;
+
+	ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000,
+			      false, NULL, &root->instmem);
+	if (ret)
+		return ret;
+
+	return nvkm_ramht_new(device, 0x1000, 0, root->instmem, &root->ramht);
+}
+
+void
+nv50_disp_root_fini(struct nv50_disp_root *root)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x610024, 0x00000000);
+	nvkm_wr32(device, 0x610020, 0x00000000);
+}
+
+int
+nv50_disp_root_init(struct nv50_disp_root *root)
+{
+	struct nv50_disp *disp = root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.  NFI what the 0x614004 caps are for..
+	 */
+	tmp = nvkm_rd32(device, 0x614004);
+	nvkm_wr32(device, 0x610184, tmp);
+
+	/* ... CRTC caps */
+	for (i = 0; i < disp->base.head.nr; i++) {
+		tmp = nvkm_rd32(device, 0x616100 + (i * 0x800));
+		nvkm_wr32(device, 0x610190 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+		nvkm_wr32(device, 0x610194 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+		nvkm_wr32(device, 0x610198 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+		nvkm_wr32(device, 0x61019c + (i * 0x10), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->func->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->func->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
+	}
+
+	/* ... PIOR caps */
+	for (i = 0; i < disp->func->pior.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
+		nvkm_wr32(device, 0x610024, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x61002c, 0x00000370);
+	nvkm_wr32(device, 0x610028, 0x00000000);
+	return 0;
+}
+
+static const struct nv50_disp_root_func
+nv50_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&nv50_disp_core_oclass,
+		&nv50_disp_base_oclass,
+		&nv50_disp_ovly_oclass,
+	},
+	.pioc = {
+		&nv50_disp_oimm_oclass,
+		&nv50_disp_curs_oclass,
+	},
+};
+
+static int
+nv50_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&nv50_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+nv50_disp_root_oclass = {
+	.base.oclass = NV50_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nv50_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
new file mode 100644
index 0000000..5b2c903
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -0,0 +1,43 @@
+#ifndef __NV50_DISP_ROOT_H__
+#define __NV50_DISP_ROOT_H__
+#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
+#include "nv50.h"
+#include "channv50.h"
+#include "dmacnv50.h"
+
+struct nv50_disp_root {
+	const struct nv50_disp_root_func *func;
+	struct nv50_disp *disp;
+	struct nvkm_object object;
+
+	struct nvkm_gpuobj *instmem;
+	struct nvkm_ramht *ramht;
+};
+
+struct nv50_disp_root_func {
+	int (*init)(struct nv50_disp_root *);
+	void (*fini)(struct nv50_disp_root *);
+	const struct nv50_disp_dmac_oclass *dmac[3];
+	const struct nv50_disp_pioc_oclass *pioc[2];
+};
+
+int  nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
+			 const struct nvkm_oclass *, void *data, u32 size,
+			 struct nvkm_object **);
+int  nv50_disp_root_init(struct nv50_disp_root *);
+void nv50_disp_root_fini(struct nv50_disp_root *);
+
+int  gf119_disp_root_init(struct nv50_disp_root *);
+void gf119_disp_root_fini(struct nv50_disp_root *);
+
+extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
+extern const struct nvkm_disp_oclass g84_disp_root_oclass;
+extern const struct nvkm_disp_oclass g94_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt215_disp_root_oclass;
+extern const struct nvkm_disp_oclass gf119_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm204_disp_root_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 8918da7..1bb9d66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -24,7 +24,6 @@
 #include "nv50.h"
 #include "outpdp.h"
 
-#include <core/device.h>
 #include <subdev/timer.h>
 
 static inline u32
@@ -39,12 +38,33 @@
 	return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
 }
 
-static inline u32
-g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+/*******************************************************************************
+ * TMDS/LVDS
+ ******************************************************************************/
+static const struct nvkm_output_func
+g94_sor_output_func = {
+};
+
+int
+g94_sor_output_new(struct nvkm_disp *disp, int index,
+		   struct dcb_output *dcbE, struct nvkm_output **poutp)
 {
+	return nvkm_output_new_(&g94_sor_output_func, disp,
+				index, dcbE, poutp);
+}
+
+/*******************************************************************************
+ * DisplayPort
+ ******************************************************************************/
+u32
+g94_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
+{
+	static const u8 gm100[] = { 0, 8, 16, 24 };
 	static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
-	static const u8 g94[] = { 16, 8, 0, 24 };
-	if (nv_device(priv)->chipset == 0xaf)
+	static const u8   g94[] = { 16, 8, 0, 24 };
+	if (device->chipset >= 0x110)
+		return gm100[lane];
+	if (device->chipset == 0xaf)
 		return mcp89[lane];
 	return g94[lane];
 }
@@ -52,33 +72,36 @@
 static int
 g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 loff = g94_sor_loff(outp);
-	nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
+	nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
 	return 0;
 }
 
 int
 g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = g94_sor_soff(outp);
 	const u32 loff = g94_sor_loff(outp);
 	u32 mask = 0, i;
 
 	for (i = 0; i < nr; i++)
-		mask |= 1 << (g94_sor_dp_lane_map(priv, i) >> 3);
+		mask |= 1 << (g94_sor_dp_lane_map(device, i) >> 3);
 
-	nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
-	nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
-	nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
 static int
 g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = g94_sor_soff(outp);
 	const u32 loff = g94_sor_loff(outp);
 	u32 dpctrl = 0x00000000;
@@ -90,17 +113,17 @@
 	if (bw > 0x06)
 		clksor |= 0x00040000;
 
-	nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
-	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
+	nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
 	return 0;
 }
 
 static int
 g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = g94_sor_dp_lane_map(priv, ln);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = g94_sor_dp_lane_map(device, ln);
 	const u32 loff = g94_sor_loff(outp);
 	u32 addr, data[3];
 	u8  ver, hdr, cnt, len;
@@ -109,37 +132,37 @@
 
 	addr = nvbios_dpout_match(bios, outp->base.info.hasht,
 					outp->base.info.hashm,
-				 &ver, &hdr, &cnt, &len, &info);
+				  &ver, &hdr, &cnt, &len, &info);
 	if (!addr)
 		return -ENODEV;
 
 	addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
-				 &ver, &hdr, &cnt, &len, &ocfg);
+				  &ver, &hdr, &cnt, &len, &ocfg);
 	if (!addr)
 		return -EINVAL;
 
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
 	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
 		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
 	return 0;
 }
 
-struct nvkm_output_dp_impl
-g94_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
+static const struct nvkm_output_dp_func
+g94_sor_dp_func = {
 	.pattern = g94_sor_dp_pattern,
 	.lnk_pwr = g94_sor_dp_lnk_pwr,
 	.lnk_ctl = g94_sor_dp_lnk_ctl,
 	.drv_ctl = g94_sor_dp_drv_ctl,
 };
+
+int
+g94_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+	       struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&g94_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
deleted file mode 100644
index 52fbe48..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outpdp.h"
-
-static inline u32
-gf110_sor_soff(struct nvkm_output_dp *outp)
-{
-	return (ffs(outp->base.info.or) - 1) * 0x800;
-}
-
-static inline u32
-gf110_sor_loff(struct nvkm_output_dp *outp)
-{
-	return gf110_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
-}
-
-static inline u32
-gf110_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
-{
-	static const u8 gf110[] = { 16, 8, 0, 24 };
-	return gf110[lane];
-}
-
-static int
-gf110_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	const u32 loff = gf110_sor_loff(outp);
-	nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-	return 0;
-}
-
-int
-gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
-{
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	const u32 soff = gf110_sor_soff(outp);
-	const u32 loff = gf110_sor_loff(outp);
-	u32 dpctrl = 0x00000000;
-	u32 clksor = 0x00000000;
-
-	clksor |= bw << 18;
-	dpctrl |= ((1 << nr) - 1) << 16;
-	if (ef)
-		dpctrl |= 0x00004000;
-
-	nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
-	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
-	return 0;
-}
-
-static int
-gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
-		     int ln, int vs, int pe, int pc)
-{
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = gf110_sor_dp_lane_map(priv, ln);
-	const u32 loff = gf110_sor_loff(outp);
-	u32 addr, data[4];
-	u8  ver, hdr, cnt, len;
-	struct nvbios_dpout info;
-	struct nvbios_dpcfg ocfg;
-
-	addr = nvbios_dpout_match(bios, outp->base.info.hasht,
-					outp->base.info.hashm,
-				  &ver, &hdr, &cnt, &len, &info);
-	if (!addr)
-		return -ENODEV;
-
-	addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
-				  &ver, &hdr, &cnt, &len, &ocfg);
-	if (!addr)
-		return -EINVAL;
-
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
-	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
-		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
-	data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
-	nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
-	return 0;
-}
-
-struct nvkm_output_dp_impl
-gf110_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
-	.pattern = gf110_sor_dp_pattern,
-	.lnk_pwr = g94_sor_dp_lnk_pwr,
-	.lnk_ctl = gf110_sor_dp_lnk_ctl,
-	.drv_ctl = gf110_sor_dp_drv_ctl,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
new file mode 100644
index 0000000..b4b41b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+static inline u32
+gf119_sor_soff(struct nvkm_output_dp *outp)
+{
+	return (ffs(outp->base.info.or) - 1) * 0x800;
+}
+
+static inline u32
+gf119_sor_loff(struct nvkm_output_dp *outp)
+{
+	return gf119_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+}
+
+static int
+gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	const u32 loff = gf119_sor_loff(outp);
+	nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+	return 0;
+}
+
+int
+gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+{
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	const u32 soff = gf119_sor_soff(outp);
+	const u32 loff = gf119_sor_loff(outp);
+	u32 dpctrl = 0x00000000;
+	u32 clksor = 0x00000000;
+
+	clksor |= bw << 18;
+	dpctrl |= ((1 << nr) - 1) << 16;
+	if (ef)
+		dpctrl |= 0x00004000;
+
+	nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+	nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
+	return 0;
+}
+
+static int
+gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+		     int ln, int vs, int pe, int pc)
+{
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = g94_sor_dp_lane_map(device, ln);
+	const u32 loff = gf119_sor_loff(outp);
+	u32 addr, data[4];
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout info;
+	struct nvbios_dpcfg ocfg;
+
+	addr = nvbios_dpout_match(bios, outp->base.info.hasht,
+					outp->base.info.hashm,
+				  &ver, &hdr, &cnt, &len, &info);
+	if (!addr)
+		return -ENODEV;
+
+	addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
+				  &ver, &hdr, &cnt, &len, &ocfg);
+	if (!addr)
+		return -EINVAL;
+
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
+	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
+		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
+	data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+	nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+	return 0;
+}
+
+static const struct nvkm_output_dp_func
+gf119_sor_dp_func = {
+	.pattern = gf119_sor_dp_pattern,
+	.lnk_pwr = g94_sor_dp_lnk_pwr,
+	.lnk_ctl = gf119_sor_dp_lnk_ctl,
+	.drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gf119_sor_dp_new(struct nvkm_disp *disp, int index,
+		 struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&gf119_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
index 1e40dfe..029e5f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
@@ -41,17 +41,17 @@
 void
 gm204_sor_magic(struct nvkm_output *outp)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->disp->engine.subdev.device;
 	const u32 soff = outp->or * 0x100;
 	const u32 data = outp->or + 1;
 	if (outp->info.sorconf.link & 1)
-		nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
+		nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
 	if (outp->info.sorconf.link & 2)
-		nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
+		nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
 }
 
 static inline u32
-gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
 {
 	return lane * 0x08;
 }
@@ -59,30 +59,33 @@
 static int
 gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = gm204_sor_soff(outp);
 	const u32 data = 0x01010101 * pattern;
 	if (outp->base.info.sorconf.link & 1)
-		nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
+		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
 	else
-		nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
+		nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
 	return 0;
 }
 
 static int
 gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = gm204_sor_soff(outp);
 	const u32 loff = gm204_sor_loff(outp);
 	u32 mask = 0, i;
 
 	for (i = 0; i < nr; i++)
-		mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
+		mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3);
 
-	nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
-	nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
-	nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
@@ -90,9 +93,9 @@
 gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 		     int ln, int vs, int pe, int pc)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = gm204_sor_dp_lane_map(priv, ln);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = gm204_sor_dp_lane_map(device, ln);
 	const u32 loff = gm204_sor_loff(outp);
 	u32 addr, data[4];
 	u8  ver, hdr, cnt, len;
@@ -109,31 +112,32 @@
 				  &ver, &hdr, &cnt, &len, &ocfg);
 	if (!addr)
 		return -EINVAL;
+	ocfg.tx_pu &= 0x0f;
 
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
-	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
-		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
-	data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
-	nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
+	if ((data[2] & 0x00000f00) < (ocfg.tx_pu << 8) || ln == 0)
+		data[2] = (data[2] & ~0x00000f00) | (ocfg.tx_pu << 8);
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
+	data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+	nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
 	return 0;
 }
 
-struct nvkm_output_dp_impl
-gm204_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
+static const struct nvkm_output_dp_func
+gm204_sor_dp_func = {
 	.pattern = gm204_sor_dp_pattern,
 	.lnk_pwr = gm204_sor_dp_lnk_pwr,
-	.lnk_ctl = gf110_sor_dp_lnk_ctl,
+	.lnk_ctl = gf119_sor_dp_lnk_ctl,
 	.drv_ctl = gm204_sor_dp_drv_ctl,
 };
+
+int
+gm204_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&gm204_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index b229a31..29e0d2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -33,6 +33,7 @@
 int
 nv50_sor_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	union {
 		struct nv50_disp_sor_pwr_v0 v0;
 	} *args = data;
@@ -40,17 +41,39 @@
 	u32 stat;
 	int ret;
 
-	nv_ioctl(object, "disp sor pwr size %d\n", size);
+	nvif_ioctl(object, "disp sor pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor pwr vers %d state %d\n",
-			 args->v0.version, args->v0.state);
+		nvif_ioctl(object, "disp sor pwr vers %d state %d\n",
+			   args->v0.version, args->v0.state);
 		stat = !!args->v0.state;
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
-	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
-	nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+			break;
+	);
 	return 0;
 }
+
+static const struct nvkm_output_func
+nv50_sor_output_func = {
+};
+
+int
+nv50_sor_output_new(struct nvkm_disp *disp, int index,
+		    struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_sor_output_func, disp,
+				index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
index c4622c7..8bff95c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
@@ -23,131 +23,119 @@
  */
 #include <subdev/vga.h>
 
-#include <core/device.h>
-
 u8
-nv_rdport(void *obj, int head, u16 port)
+nvkm_rdport(struct nvkm_device *device, int head, u16 port)
 {
-	struct nvkm_device *device = nv_device(obj);
-
 	if (device->card_type >= NV_50)
-		return nv_rd08(obj, 0x601000 + port);
+		return nvkm_rd08(device, 0x601000 + port);
 
 	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
 	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
 	    port == 0x03d4 || port == 0x03d5)	/* CR */
-		return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
+		return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port);
 
 	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
 	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
 	    port == 0x03ce || port == 0x03cf) {	/* GR */
 		if (device->card_type < NV_40)
 			head = 0; /* CR44 selects head */
-		return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
+		return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port);
 	}
 
-	nv_error(obj, "unknown vga port 0x%04x\n", port);
 	return 0x00;
 }
 
 void
-nv_wrport(void *obj, int head, u16 port, u8 data)
+nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data)
 {
-	struct nvkm_device *device = nv_device(obj);
-
 	if (device->card_type >= NV_50)
-		nv_wr08(obj, 0x601000 + port, data);
+		nvkm_wr08(device, 0x601000 + port, data);
 	else
 	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
 	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
 	    port == 0x03d4 || port == 0x03d5)	/* CR */
-		nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
+		nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data);
 	else
 	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
 	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
 	    port == 0x03ce || port == 0x03cf) {	/* GR */
 		if (device->card_type < NV_40)
 			head = 0; /* CR44 selects head */
-		nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
-	} else
-		nv_error(obj, "unknown vga port 0x%04x\n", port);
+		nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data);
+	}
 }
 
 u8
-nv_rdvgas(void *obj, int head, u8 index)
+nvkm_rdvgas(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03c4, index);
-	return nv_rdport(obj, head, 0x03c5);
+	nvkm_wrport(device, head, 0x03c4, index);
+	return nvkm_rdport(device, head, 0x03c5);
 }
 
 void
-nv_wrvgas(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03c4, index);
-	nv_wrport(obj, head, 0x03c5, value);
+	nvkm_wrport(device, head, 0x03c4, index);
+	nvkm_wrport(device, head, 0x03c5, value);
 }
 
 u8
-nv_rdvgag(void *obj, int head, u8 index)
+nvkm_rdvgag(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03ce, index);
-	return nv_rdport(obj, head, 0x03cf);
+	nvkm_wrport(device, head, 0x03ce, index);
+	return nvkm_rdport(device, head, 0x03cf);
 }
 
 void
-nv_wrvgag(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03ce, index);
-	nv_wrport(obj, head, 0x03cf, value);
+	nvkm_wrport(device, head, 0x03ce, index);
+	nvkm_wrport(device, head, 0x03cf, value);
 }
 
 u8
-nv_rdvgac(void *obj, int head, u8 index)
+nvkm_rdvgac(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03d4, index);
-	return nv_rdport(obj, head, 0x03d5);
+	nvkm_wrport(device, head, 0x03d4, index);
+	return nvkm_rdport(device, head, 0x03d5);
 }
 
 void
-nv_wrvgac(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03d4, index);
-	nv_wrport(obj, head, 0x03d5, value);
+	nvkm_wrport(device, head, 0x03d4, index);
+	nvkm_wrport(device, head, 0x03d5, value);
 }
 
 u8
-nv_rdvgai(void *obj, int head, u16 port, u8 index)
+nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index)
 {
-	if (port == 0x03c4) return nv_rdvgas(obj, head, index);
-	if (port == 0x03ce) return nv_rdvgag(obj, head, index);
-	if (port == 0x03d4) return nv_rdvgac(obj, head, index);
-	nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	if (port == 0x03c4) return nvkm_rdvgas(device, head, index);
+	if (port == 0x03ce) return nvkm_rdvgag(device, head, index);
+	if (port == 0x03d4) return nvkm_rdvgac(device, head, index);
 	return 0x00;
 }
 
 void
-nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
+nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value)
 {
-	if      (port == 0x03c4) nv_wrvgas(obj, head, index, value);
-	else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
-	else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
-	else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	if      (port == 0x03c4) nvkm_wrvgas(device, head, index, value);
+	else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value);
+	else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value);
 }
 
 bool
-nv_lockvgac(void *obj, bool lock)
+nvkm_lockvgac(struct nvkm_device *device, bool lock)
 {
-	struct nvkm_device *dev = nv_device(obj);
-
-	bool locked = !nv_rdvgac(obj, 0, 0x1f);
+	bool locked = !nvkm_rdvgac(device, 0, 0x1f);
 	u8 data = lock ? 0x99 : 0x57;
-	if (dev->card_type < NV_50)
-		nv_wrvgac(obj, 0, 0x1f, data);
+	if (device->card_type < NV_50)
+		nvkm_wrvgac(device, 0, 0x1f, data);
 	else
-		nv_wrvgac(obj, 0, 0x3f, data);
-	if (dev->chipset == 0x11) {
-		if (!(nv_rd32(obj, 0x001084) & 0x10000000))
-			nv_wrvgac(obj, 1, 0x1f, data);
+		nvkm_wrvgac(device, 0, 0x3f, data);
+	if (device->chipset == 0x11) {
+		if (!(nvkm_rd32(device, 0x001084) & 0x10000000))
+			nvkm_wrvgac(device, 1, 0x1f, data);
 	}
 	return locked;
 }
@@ -171,16 +159,16 @@
  * other values are treated as literal values to set
  */
 u8
-nv_rdvgaowner(void *obj)
+nvkm_rdvgaowner(struct nvkm_device *device)
 {
-	if (nv_device(obj)->card_type < NV_50) {
-		if (nv_device(obj)->chipset == 0x11) {
-			u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
+	if (device->card_type < NV_50) {
+		if (device->chipset == 0x11) {
+			u32 tied = nvkm_rd32(device, 0x001084) & 0x10000000;
 			if (tied == 0) {
-				u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
-				u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
-				u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
-				u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
+				u8 slA = nvkm_rdvgac(device, 0, 0x28) & 0x80;
+				u8 tvA = nvkm_rdvgac(device, 0, 0x33) & 0x01;
+				u8 slB = nvkm_rdvgac(device, 1, 0x28) & 0x80;
+				u8 tvB = nvkm_rdvgac(device, 1, 0x33) & 0x01;
 				if (slA && !tvA) return 0x00;
 				if (slB && !tvB) return 0x03;
 				if (slA) return 0x00;
@@ -190,30 +178,28 @@
 			return 0x04;
 		}
 
-		return nv_rdvgac(obj, 0, 0x44);
+		return nvkm_rdvgac(device, 0, 0x44);
 	}
 
-	nv_error(obj, "rdvgaowner after nv4x\n");
 	return 0x00;
 }
 
 void
-nv_wrvgaowner(void *obj, u8 select)
+nvkm_wrvgaowner(struct nvkm_device *device, u8 select)
 {
-	if (nv_device(obj)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		u8 owner = (select == 1) ? 3 : select;
-		if (nv_device(obj)->chipset == 0x11) {
+		if (device->chipset == 0x11) {
 			/* workaround hw lockup bug */
-			nv_rdvgac(obj, 0, 0x1f);
-			nv_rdvgac(obj, 1, 0x1f);
+			nvkm_rdvgac(device, 0, 0x1f);
+			nvkm_rdvgac(device, 1, 0x1f);
 		}
 
-		nv_wrvgac(obj, 0, 0x44, owner);
+		nvkm_wrvgac(device, 0, 0x44, owner);
 
-		if (nv_device(obj)->chipset == 0x11) {
-			nv_wrvgac(obj, 0, 0x2e, owner);
-			nv_wrvgac(obj, 0, 0x2e, owner);
+		if (device->chipset == 0x11) {
+			nvkm_wrvgac(device, 0, 0x2e, owner);
+			nvkm_wrvgac(device, 0, 0x2e, owner);
 		}
-	} else
-		nv_error(obj, "wrvgaowner after nv4x\n");
+	}
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
new file mode 100644
index 0000000..c4a2ce9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -0,0 +1,11 @@
+nvkm-y += nvkm/engine/dma/base.o
+nvkm-y += nvkm/engine/dma/nv04.o
+nvkm-y += nvkm/engine/dma/nv50.o
+nvkm-y += nvkm/engine/dma/gf100.o
+nvkm-y += nvkm/engine/dma/gf119.o
+
+nvkm-y += nvkm/engine/dma/user.o
+nvkm-y += nvkm/engine/dma/usernv04.o
+nvkm-y += nvkm/engine/dma/usernv50.o
+nvkm-y += nvkm/engine/dma/usergf100.o
+nvkm-y += nvkm/engine/dma/usergf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
new file mode 100644
index 0000000..9769fc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <core/client.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
+{
+	struct rb_node *node = client->dmaroot.rb_node;
+	while (node) {
+		struct nvkm_dmaobj *dmaobj =
+			container_of(node, typeof(*dmaobj), rb);
+		if (object < dmaobj->handle)
+			node = node->rb_left;
+		else
+		if (object > dmaobj->handle)
+			node = node->rb_right;
+		else
+			return dmaobj;
+	}
+	return NULL;
+}
+
+static int
+nvkm_dma_oclass_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
+{
+	struct nvkm_dma *dma = nvkm_dma(oclass->engine);
+	struct nvkm_dmaobj *dmaobj = NULL;
+	struct nvkm_client *client = oclass->client;
+	struct rb_node **ptr = &client->dmaroot.rb_node;
+	struct rb_node *parent = NULL;
+	int ret;
+
+	ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
+	if (dmaobj)
+		*pobject = &dmaobj->object;
+	if (ret)
+		return ret;
+
+	dmaobj->handle = oclass->object;
+
+	while (*ptr) {
+		struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
+		parent = *ptr;
+		if (dmaobj->handle < obj->handle)
+			ptr = &parent->rb_left;
+		else
+		if (dmaobj->handle > obj->handle)
+			ptr = &parent->rb_right;
+		else
+			return -EEXIST;
+	}
+
+	rb_link_node(&dmaobj->rb, parent, ptr);
+	rb_insert_color(&dmaobj->rb, &client->dmaroot);
+	return 0;
+}
+
+static const struct nvkm_device_oclass
+nvkm_dma_oclass_base = {
+	.ctor = nvkm_dma_oclass_new,
+};
+
+static int
+nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+			 struct nvkm_object **pobject)
+{
+	return nvkm_dma_oclass_new(oclass->engine->subdev.device,
+				   oclass, data, size, pobject);
+}
+
+static const struct nvkm_sclass
+nvkm_dma_sclass[] = {
+	{ 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+	{ 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+	{ 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+};
+
+static int
+nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
+			 const struct nvkm_device_oclass **class)
+{
+	const int count = ARRAY_SIZE(nvkm_dma_sclass);
+	if (index < count) {
+		const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
+		sclass->base = oclass[0];
+		sclass->engn = oclass;
+		*class = &nvkm_dma_oclass_base;
+		return index;
+	}
+	return count;
+}
+
+static int
+nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
+{
+	const int count = ARRAY_SIZE(nvkm_dma_sclass);
+	if (index < count) {
+		oclass->base = nvkm_dma_sclass[index];
+		return index;
+	}
+	return count;
+}
+
+static void *
+nvkm_dma_dtor(struct nvkm_engine *engine)
+{
+	return nvkm_dma(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_dma = {
+	.dtor = nvkm_dma_dtor,
+	.base.sclass = nvkm_dma_oclass_base_get,
+	.fifo.sclass = nvkm_dma_oclass_fifo_get,
+};
+
+int
+nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_dma **pdma)
+{
+	struct nvkm_dma *dma;
+
+	if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL)))
+		return -ENOMEM;
+	dma->func = func;
+
+	return nvkm_engine_ctor(&nvkm_dma, device, index,
+				0, true, &dma->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
new file mode 100644
index 0000000..efec5d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gf100_dma = {
+	.class_new = gf100_dmaobj_new,
+};
+
+int
+gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&gf100_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
new file mode 100644
index 0000000..34c7660
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gf119_dma = {
+	.class_new = gf119_dmaobj_new,
+};
+
+int
+gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&gf119_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
new file mode 100644
index 0000000..30747a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv04_dma = {
+	.class_new = nv04_dmaobj_new,
+};
+
+int
+nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&nv04_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
new file mode 100644
index 0000000..77aca7b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv50_dma = {
+	.class_new = nv50_dmaobj_new,
+};
+
+int
+nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&nv50_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
new file mode 100644
index 0000000..deb37ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_PRIV_H__
+#define __NVKM_DMA_PRIV_H__
+#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
+#include <engine/dma.h>
+
+struct nvkm_dmaobj_func {
+	int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *, int align,
+		    struct nvkm_gpuobj **);
+};
+
+int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
+		  int index, struct nvkm_dma **);
+
+struct nvkm_dma_func {
+	int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
+			 void *data, u32 size, struct nvkm_dmaobj **);
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
new file mode 100644
index 0000000..45ab062
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+	return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
+}
+
+static void *
+nvkm_dmaobj_dtor(struct nvkm_object *base)
+{
+	struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+	if (!RB_EMPTY_NODE(&dmaobj->rb))
+		rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
+	return dmaobj;
+}
+
+static const struct nvkm_object_func
+nvkm_dmaobj_func = {
+	.dtor = nvkm_dmaobj_dtor,
+	.bind = nvkm_dmaobj_bind,
+};
+
+int
+nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
+		 const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
+		 struct nvkm_dmaobj *dmaobj)
+{
+	union {
+		struct nv_dma_v0 v0;
+	} *args = *pdata;
+	struct nvkm_device *device = dma->engine.subdev.device;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_object *parent = oclass->parent;
+	struct nvkm_instmem *instmem = device->imem;
+	struct nvkm_fb *fb = device->fb;
+	void *data = *pdata;
+	u32 size = *psize;
+	int ret;
+
+	nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
+	dmaobj->func = func;
+	dmaobj->dma = dma;
+	RB_CLEAR_NODE(&dmaobj->rb);
+
+	nvif_ioctl(parent, "create dma size %d\n", *psize);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(parent, "create dma vers %d target %d access %d "
+				   "start %016llx limit %016llx\n",
+			   args->v0.version, args->v0.target, args->v0.access,
+			   args->v0.start, args->v0.limit);
+		dmaobj->target = args->v0.target;
+		dmaobj->access = args->v0.access;
+		dmaobj->start  = args->v0.start;
+		dmaobj->limit  = args->v0.limit;
+	} else
+		return ret;
+
+	*pdata = data;
+	*psize = size;
+
+	if (dmaobj->start > dmaobj->limit)
+		return -EINVAL;
+
+	switch (dmaobj->target) {
+	case NV_DMA_V0_TARGET_VM:
+		dmaobj->target = NV_MEM_TARGET_VM;
+		break;
+	case NV_DMA_V0_TARGET_VRAM:
+		if (!client->super) {
+			if (dmaobj->limit >= fb->ram->size - instmem->reserved)
+				return -EACCES;
+			if (device->card_type >= NV_50)
+				return -EACCES;
+		}
+		dmaobj->target = NV_MEM_TARGET_VRAM;
+		break;
+	case NV_DMA_V0_TARGET_PCI:
+		if (!client->super)
+			return -EACCES;
+		dmaobj->target = NV_MEM_TARGET_PCI;
+		break;
+	case NV_DMA_V0_TARGET_PCI_US:
+	case NV_DMA_V0_TARGET_AGP:
+		if (!client->super)
+			return -EACCES;
+		dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->access) {
+	case NV_DMA_V0_ACCESS_VM:
+		dmaobj->access = NV_MEM_ACCESS_VM;
+		break;
+	case NV_DMA_V0_ACCESS_RD:
+		dmaobj->access = NV_MEM_ACCESS_RO;
+		break;
+	case NV_DMA_V0_ACCESS_WR:
+		dmaobj->access = NV_MEM_ACCESS_WO;
+		break;
+	case NV_DMA_V0_ACCESS_RDWR:
+		dmaobj->access = NV_MEM_ACCESS_RW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
new file mode 100644
index 0000000..69a7f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_USER_H__
+#define __NVKM_DMA_USER_H__
+#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
+#include "priv.h"
+
+int nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *, struct nvkm_dma *,
+		     const struct nvkm_oclass *, void **data, u32 *size,
+		     struct nvkm_dmaobj *);
+
+int nv04_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		    struct nvkm_dmaobj **);
+int nv50_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		    struct nvkm_dmaobj **);
+int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		     struct nvkm_dmaobj **);
+int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		     struct nvkm_dmaobj **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
new file mode 100644
index 0000000..13e341c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf100_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+	u32 flags5;
+};
+
+static int
+gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct gf100_dmaobj *dmaobj = gf100_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+		nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+					  upper_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf100_dmaobj_func = {
+	.bind = gf100_dmaobj_bind,
+};
+
+int
+gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct gf100_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf100_dmaobj *dmaobj;
+	u32 kind, user, unkn;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&gf100_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create gf100 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent,
+			   "create gf100 dma vers %d priv %d kind %02x\n",
+			   args->v0.version, args->v0.priv, args->v0.kind);
+		kind = args->v0.kind;
+		user = args->v0.priv;
+		unkn = 0;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			kind = GF100_DMA_V0_KIND_PITCH;
+			user = GF100_DMA_V0_PRIV_US;
+			unkn = 2;
+		} else {
+			kind = GF100_DMA_V0_KIND_VM;
+			user = GF100_DMA_V0_PRIV_VM;
+			unkn = 0;
+		}
+	} else
+		return ret;
+
+	if (user > 2)
+		return -EINVAL;
+	dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
+	dmaobj->flags5 |= (unkn << 16);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VM:
+		dmaobj->flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags0 |= 0x00080000;
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
new file mode 100644
index 0000000..0e1af8b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf119_dmaobj(p) container_of((p), struct gf119_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf119_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+};
+
+static int
+gf119_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct gf119_dmaobj *dmaobj = gf119_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
+		nvkm_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf119_dmaobj_func = {
+	.bind = gf119_dmaobj_bind,
+};
+
+int
+gf119_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct gf119_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf119_dmaobj *dmaobj;
+	u32 kind, page;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&gf119_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create gf119 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent,
+			   "create gf100 dma vers %d page %d kind %02x\n",
+			   args->v0.version, args->v0.page, args->v0.kind);
+		kind = args->v0.kind;
+		page = args->v0.page;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			kind = GF119_DMA_V0_KIND_PITCH;
+			page = GF119_DMA_V0_PAGE_SP;
+		} else {
+			kind = GF119_DMA_V0_KIND_VM;
+			page = GF119_DMA_V0_PAGE_LP;
+		}
+	} else
+		return ret;
+
+	if (page > 1)
+		return -EINVAL;
+	dmaobj->flags0 = (kind << 20) | (page << 6);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00000009;
+		break;
+	case NV_MEM_TARGET_VM:
+	case NV_MEM_TARGET_PCI:
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		/* XXX: don't currently know how to construct a real one
+		 *      of these.  we only use them to represent pushbufs
+		 *      on these chipsets, and the classes that use them
+		 *      deal with the target themselves.
+		 */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
new file mode 100644
index 0000000..c95942e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
+#include "user.h"
+
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu/nv04.h>
+
+#include <nvif/class.h>
+
+struct nv04_dmaobj {
+	struct nvkm_dmaobj base;
+	bool clone;
+	u32 flags0;
+	u32 flags2;
+};
+
+static int
+nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	u64 offset = dmaobj->base.start & 0xfffff000;
+	u64 adjust = dmaobj->base.start & 0x00000fff;
+	u32 length = dmaobj->base.limit - dmaobj->base.start;
+	int ret;
+
+	if (dmaobj->clone) {
+		struct nv04_mmu *mmu = nv04_mmu(device->mmu);
+		struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+		if (!dmaobj->base.start)
+			return nvkm_gpuobj_wrap(pgt, pgpuobj);
+		nvkm_kmap(pgt);
+		offset  = nvkm_ro32(pgt, 8 + (offset >> 10));
+		offset &= 0xfffff000;
+		nvkm_done(pgt);
+	}
+
+	ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
+		nvkm_wo32(*pgpuobj, 0x04, length);
+		nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
+		nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv04_dmaobj_func = {
+	.bind = nv04_dmaobj_bind,
+};
+
+int
+nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	struct nvkm_device *device = dma->engine.subdev.device;
+	struct nv04_dmaobj *dmaobj;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	if (dmaobj->base.target == NV_MEM_TARGET_VM) {
+		if (device->mmu->func == &nv04_mmu)
+			dmaobj->clone = true;
+		dmaobj->base.target = NV_MEM_TARGET_PCI;
+		dmaobj->base.access = NV_MEM_ACCESS_RW;
+	}
+
+	dmaobj->flags0 = oclass->base.oclass;
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00003000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00023000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00033000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		dmaobj->flags0 |= 0x00008000;
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags2 |= 0x00000002;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
new file mode 100644
index 0000000..5b7ce31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv50_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+	u32 flags5;
+};
+
+static int
+nv50_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv50_dmaobj *dmaobj = nv50_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+		nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+					  upper_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv50_dmaobj_func = {
+	.bind = nv50_dmaobj_bind,
+};
+
+int
+nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct nv50_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_dmaobj *dmaobj;
+	u32 user, part, comp, kind;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&nv50_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create nv50 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
+				   "comp %d kind %02x\n", args->v0.version,
+			   args->v0.priv, args->v0.part, args->v0.comp,
+			   args->v0.kind);
+		user = args->v0.priv;
+		part = args->v0.part;
+		comp = args->v0.comp;
+		kind = args->v0.kind;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			user = NV50_DMA_V0_PRIV_US;
+			part = NV50_DMA_V0_PART_256;
+			comp = NV50_DMA_V0_COMP_NONE;
+			kind = NV50_DMA_V0_KIND_PITCH;
+		} else {
+			user = NV50_DMA_V0_PRIV_VM;
+			part = NV50_DMA_V0_PART_VM;
+			comp = NV50_DMA_V0_COMP_VM;
+			kind = NV50_DMA_V0_KIND_VM;
+		}
+	} else
+		return ret;
+
+	if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
+		return -EINVAL;
+	dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
+			 oclass->base.oclass;
+	dmaobj->flags5 = (part << 16);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VM:
+		dmaobj->flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags0 |= 0x00080000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
deleted file mode 100644
index 7529632..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-nvkm-y += nvkm/engine/dmaobj/base.o
-nvkm-y += nvkm/engine/dmaobj/nv04.o
-nvkm-y += nvkm/engine/dmaobj/nv50.o
-nvkm-y += nvkm/engine/dmaobj/gf100.o
-nvkm-y += nvkm/engine/dmaobj/gf110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
deleted file mode 100644
index a2b60d8..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/device.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static int
-nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	const struct nvkm_dmaeng_impl *impl = (void *)
-		nv_oclass(nv_object(dmaobj)->engine);
-	int ret = 0;
-
-	if (nv_object(dmaobj) == parent) { /* ctor bind */
-		if (nv_mclass(parent->parent) == NV_DEVICE) {
-			/* delayed, or no, binding */
-			return 0;
-		}
-		ret = impl->bind(dmaobj, parent, pgpuobj);
-		if (ret == 0)
-			nvkm_object_ref(NULL, &parent);
-		return ret;
-	}
-
-	return impl->bind(dmaobj, parent, pgpuobj);
-}
-
-int
-nvkm_dmaobj_create_(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void **pdata, u32 *psize,
-		    int length, void **pobject)
-{
-	union {
-		struct nv_dma_v0 v0;
-	} *args = *pdata;
-	struct nvkm_instmem *instmem = nvkm_instmem(parent);
-	struct nvkm_client *client = nvkm_client(parent);
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_dmaobj *dmaobj;
-	void *data = *pdata;
-	u32 size = *psize;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	dmaobj = *pobject;
-	if (ret)
-		return ret;
-
-	nv_ioctl(parent, "create dma size %d\n", *psize);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(parent, "create dma vers %d target %d access %d "
-				 "start %016llx limit %016llx\n",
-			 args->v0.version, args->v0.target, args->v0.access,
-			 args->v0.start, args->v0.limit);
-		dmaobj->target = args->v0.target;
-		dmaobj->access = args->v0.access;
-		dmaobj->start  = args->v0.start;
-		dmaobj->limit  = args->v0.limit;
-	} else
-		return ret;
-
-	*pdata = data;
-	*psize = size;
-
-	if (dmaobj->start > dmaobj->limit)
-		return -EINVAL;
-
-	switch (dmaobj->target) {
-	case NV_DMA_V0_TARGET_VM:
-		dmaobj->target = NV_MEM_TARGET_VM;
-		break;
-	case NV_DMA_V0_TARGET_VRAM:
-		if (!client->super) {
-			if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
-				return -EACCES;
-			if (device->card_type >= NV_50)
-				return -EACCES;
-		}
-		dmaobj->target = NV_MEM_TARGET_VRAM;
-		break;
-	case NV_DMA_V0_TARGET_PCI:
-		if (!client->super)
-			return -EACCES;
-		dmaobj->target = NV_MEM_TARGET_PCI;
-		break;
-	case NV_DMA_V0_TARGET_PCI_US:
-	case NV_DMA_V0_TARGET_AGP:
-		if (!client->super)
-			return -EACCES;
-		dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (dmaobj->access) {
-	case NV_DMA_V0_ACCESS_VM:
-		dmaobj->access = NV_MEM_ACCESS_VM;
-		break;
-	case NV_DMA_V0_ACCESS_RD:
-		dmaobj->access = NV_MEM_ACCESS_RO;
-		break;
-	case NV_DMA_V0_ACCESS_WR:
-		dmaobj->access = NV_MEM_ACCESS_WO;
-		break;
-	case NV_DMA_V0_ACCESS_RDWR:
-		dmaobj->access = NV_MEM_ACCESS_RW;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-int
-_nvkm_dmaeng_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	const struct nvkm_dmaeng_impl *impl = (void *)oclass;
-	struct nvkm_dmaeng *dmaeng;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true, "DMAOBJ",
-				 "dmaobj", &dmaeng);
-	*pobject = nv_object(dmaeng);
-	if (ret)
-		return ret;
-
-	nv_engine(dmaeng)->sclass = impl->sclass;
-	dmaeng->bind = nvkm_dmaobj_bind;
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
deleted file mode 100644
index f880e51..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-	u32 flags5;
-};
-
-static int
-gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		  struct nvkm_gpuobj **pgpuobj)
-{
-	struct gf100_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case GT214_DISP_CORE_CHANNEL_DMA:
-		case GT214_DISP_BASE_CHANNEL_DMA:
-		case GT214_DISP_OVERLAY_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else
-		return 0;
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
-		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
-		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
-					upper_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, priv->flags5);
-	}
-
-	return ret;
-}
-
-static int
-gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct gf100_dma_v0 v0;
-	} *args;
-	struct gf100_dmaobj_priv *priv;
-	u32 kind, user, unkn;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create gf100 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
-			 args->v0.version, args->v0.priv, args->v0.kind);
-		kind = args->v0.kind;
-		user = args->v0.priv;
-		unkn = 0;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			kind = GF100_DMA_V0_KIND_PITCH;
-			user = GF100_DMA_V0_PRIV_US;
-			unkn = 2;
-		} else {
-			kind = GF100_DMA_V0_KIND_VM;
-			user = GF100_DMA_V0_PRIV_VM;
-			unkn = 0;
-		}
-	} else
-		return ret;
-
-	if (user > 2)
-		return -EINVAL;
-	priv->flags0 |= (kind << 22) | (user << 20);
-	priv->flags5 |= (unkn << 16);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VM:
-		priv->flags0 |= 0x00000000;
-		break;
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00010000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00030000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_VM:
-		break;
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00040000;
-		break;
-	case NV_MEM_ACCESS_WO:
-	case NV_MEM_ACCESS_RW:
-		priv->flags0 |= 0x00080000;
-		break;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf100_dmaobj_ofuncs = {
-	.ctor =  gf100_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf100_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &gf100_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &gf100_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &gf100_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-gf100_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = gf100_dmaeng_sclass,
-	.bind = gf100_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
deleted file mode 100644
index bf8f0f2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf110_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-};
-
-static int
-gf110_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		  struct nvkm_gpuobj **pgpuobj)
-{
-	struct gf110_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case GF110_DISP_CORE_CHANNEL_DMA:
-		case GK104_DISP_CORE_CHANNEL_DMA:
-		case GK110_DISP_CORE_CHANNEL_DMA:
-		case GM107_DISP_CORE_CHANNEL_DMA:
-		case GM204_DISP_CORE_CHANNEL_DMA:
-		case GF110_DISP_BASE_CHANNEL_DMA:
-		case GK104_DISP_BASE_CHANNEL_DMA:
-		case GK110_DISP_BASE_CHANNEL_DMA:
-		case GF110_DISP_OVERLAY_CONTROL_DMA:
-		case GK104_DISP_OVERLAY_CONTROL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else
-		return 0;
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0);
-		nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
-		nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
-		nv_wo32(*pgpuobj, 0x0c, 0x00000000);
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, 0x00000000);
-	}
-
-	return ret;
-}
-
-static int
-gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct gf110_dma_v0 v0;
-	} *args;
-	struct gf110_dmaobj_priv *priv;
-	u32 kind, page;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create gf110 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
-			 args->v0.version, args->v0.page, args->v0.kind);
-		kind = args->v0.kind;
-		page = args->v0.page;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			kind = GF110_DMA_V0_KIND_PITCH;
-			page = GF110_DMA_V0_PAGE_SP;
-		} else {
-			kind = GF110_DMA_V0_KIND_VM;
-			page = GF110_DMA_V0_PAGE_LP;
-		}
-	} else
-		return ret;
-
-	if (page > 1)
-		return -EINVAL;
-	priv->flags0 = (kind << 20) | (page << 6);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00000009;
-		break;
-	case NV_MEM_TARGET_VM:
-	case NV_MEM_TARGET_PCI:
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		/* XXX: don't currently know how to construct a real one
-		 *      of these.  we only use them to represent pushbufs
-		 *      on these chipsets, and the classes that use them
-		 *      deal with the target themselves.
-		 */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf110_dmaobj_ofuncs = {
-	.ctor =  gf110_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf110_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &gf110_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &gf110_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &gf110_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-gf110_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = gf110_dmaeng_sclass,
-	.bind = gf110_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
deleted file mode 100644
index b4379c2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
-
-#include <nvif/class.h>
-
-struct nv04_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	bool clone;
-	u32 flags0;
-	u32 flags2;
-};
-
-static int
-nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	struct nv04_dmaobj_priv *priv = (void *)dmaobj;
-	struct nvkm_gpuobj *gpuobj;
-	u64 offset = priv->base.start & 0xfffff000;
-	u64 adjust = priv->base.start & 0x00000fff;
-	u32 length = priv->base.limit - priv->base.start;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case NV03_CHANNEL_DMA:
-		case NV10_CHANNEL_DMA:
-		case NV17_CHANNEL_DMA:
-		case NV40_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	if (priv->clone) {
-		struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj);
-		struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
-		if (!dmaobj->start)
-			return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
-		offset  = nv_ro32(pgt, 8 + (offset >> 10));
-		offset &= 0xfffff000;
-	}
-
-	ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
-	*pgpuobj = gpuobj;
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
-		nv_wo32(*pgpuobj, 0x04, length);
-		nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
-		nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
-	}
-
-	return ret;
-}
-
-static int
-nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	struct nv04_mmu_priv *mmu = nv04_mmu(engine);
-	struct nv04_dmaobj_priv *priv;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret || (ret = -ENOSYS, size))
-		return ret;
-
-	if (priv->base.target == NV_MEM_TARGET_VM) {
-		if (nv_object(mmu)->oclass == &nv04_mmu_oclass)
-			priv->clone = true;
-		priv->base.target = NV_MEM_TARGET_PCI;
-		priv->base.access = NV_MEM_ACCESS_RW;
-	}
-
-	priv->flags0 = nv_mclass(priv);
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00003000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00023000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00033000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00004000;
-		break;
-	case NV_MEM_ACCESS_WO:
-		priv->flags0 |= 0x00008000;
-	case NV_MEM_ACCESS_RW:
-		priv->flags2 |= 0x00000002;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv04_dmaobj_ofuncs = {
-	.ctor =  nv04_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv04_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = nv04_dmaeng_sclass,
-	.bind = nv04_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
deleted file mode 100644
index 4d3c828..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv50_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-	u32 flags5;
-};
-
-static int
-nv50_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	struct nv50_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case NV40_CHANNEL_DMA:
-		case NV50_CHANNEL_GPFIFO:
-		case G82_CHANNEL_GPFIFO:
-		case NV50_DISP_CORE_CHANNEL_DMA:
-		case G82_DISP_CORE_CHANNEL_DMA:
-		case GT206_DISP_CORE_CHANNEL_DMA:
-		case GT200_DISP_CORE_CHANNEL_DMA:
-		case GT214_DISP_CORE_CHANNEL_DMA:
-		case NV50_DISP_BASE_CHANNEL_DMA:
-		case G82_DISP_BASE_CHANNEL_DMA:
-		case GT200_DISP_BASE_CHANNEL_DMA:
-		case GT214_DISP_BASE_CHANNEL_DMA:
-		case NV50_DISP_OVERLAY_CHANNEL_DMA:
-		case G82_DISP_OVERLAY_CHANNEL_DMA:
-		case GT200_DISP_OVERLAY_CHANNEL_DMA:
-		case GT214_DISP_OVERLAY_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
-		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
-		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
-					upper_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, priv->flags5);
-	}
-
-	return ret;
-}
-
-static int
-nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct nv50_dma_v0 v0;
-	} *args;
-	struct nv50_dmaobj_priv *priv;
-	u32 user, part, comp, kind;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create nv50 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
-				 "comp %d kind %02x\n", args->v0.version,
-			 args->v0.priv, args->v0.part, args->v0.comp,
-			 args->v0.kind);
-		user = args->v0.priv;
-		part = args->v0.part;
-		comp = args->v0.comp;
-		kind = args->v0.kind;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			user = NV50_DMA_V0_PRIV_US;
-			part = NV50_DMA_V0_PART_256;
-			comp = NV50_DMA_V0_COMP_NONE;
-			kind = NV50_DMA_V0_KIND_PITCH;
-		} else {
-			user = NV50_DMA_V0_PRIV_VM;
-			part = NV50_DMA_V0_PART_VM;
-			comp = NV50_DMA_V0_COMP_VM;
-			kind = NV50_DMA_V0_KIND_VM;
-		}
-	} else
-		return ret;
-
-	if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
-		return -EINVAL;
-	priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
-	priv->flags5 = (part << 16);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VM:
-		priv->flags0 |= 0x00000000;
-		break;
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00010000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00030000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_VM:
-		break;
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00040000;
-		break;
-	case NV_MEM_ACCESS_WO:
-	case NV_MEM_ACCESS_RW:
-		priv->flags0 |= 0x00080000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv50_dmaobj_ofuncs = {
-	.ctor =  nv50_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv50_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = nv50_dmaeng_sclass,
-	.bind = nv50_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
deleted file mode 100644
index 44ae8a0..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __NVKM_DMAOBJ_PRIV_H__
-#define __NVKM_DMAOBJ_PRIV_H__
-#include <engine/dmaobj.h>
-
-#define nvkm_dmaobj_create(p,e,c,pa,sa,d)                                      \
-	nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
-
-int nvkm_dmaobj_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void **, u32 *,
-			int, void **);
-#define _nvkm_dmaobj_dtor nvkm_object_destroy
-#define _nvkm_dmaobj_init nvkm_object_init
-#define _nvkm_dmaobj_fini nvkm_object_fini
-
-int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-#define _nvkm_dmaeng_dtor _nvkm_engine_dtor
-#define _nvkm_dmaeng_init _nvkm_engine_init
-#define _nvkm_dmaeng_fini _nvkm_engine_fini
-
-struct nvkm_dmaeng_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *sclass;
-	int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *,
-		    struct nvkm_gpuobj **);
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 30958c1..7400060 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -21,40 +21,95 @@
  */
 #include <engine/falcon.h>
 
-#include <core/device.h>
+#include <core/gpuobj.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
 
-void
-nvkm_falcon_intr(struct nvkm_subdev *subdev)
+static int
+nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
 {
-	struct nvkm_falcon *falcon = (void *)subdev;
-	u32 dispatch = nv_ro32(falcon, 0x01c);
-	u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+	struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
+	int c = 0;
+
+	while (falcon->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = falcon->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+			int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+			       align, true, parent, pgpuobj);
+}
+
+static const struct nvkm_object_func
+nvkm_falcon_cclass = {
+	.bind = nvkm_falcon_cclass_bind,
+};
+
+static void
+nvkm_falcon_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = falcon->addr;
+	u32 dest = nvkm_rd32(device, base + 0x01c);
+	u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
+	u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+
+	if (intr & 0x00000040) {
+		if (falcon->func->intr) {
+			falcon->func->intr(falcon, chan);
+			nvkm_wr32(device, base + 0x004, 0x00000040);
+			intr &= ~0x00000040;
+		}
+	}
 
 	if (intr & 0x00000010) {
-		nv_debug(falcon, "ucode halted\n");
-		nv_wo32(falcon, 0x004, 0x00000010);
+		nvkm_debug(subdev, "ucode halted\n");
+		nvkm_wr32(device, base + 0x004, 0x00000010);
 		intr &= ~0x00000010;
 	}
 
 	if (intr)  {
-		nv_error(falcon, "unhandled intr 0x%08x\n", intr);
-		nv_wo32(falcon, 0x004, intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
+		nvkm_wr32(device, base + 0x004, intr);
 	}
+
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
-u32
-_nvkm_falcon_rd32(struct nvkm_object *object, u64 addr)
+static int
+nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_falcon *falcon = (void *)object;
-	return nv_rd32(falcon, falcon->addr + addr);
-}
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_device *device = falcon->engine.subdev.device;
+	const u32 base = falcon->addr;
 
-void
-_nvkm_falcon_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_falcon *falcon = (void *)object;
-	nv_wr32(falcon, falcon->addr + addr, data);
+	if (!suspend) {
+		nvkm_memory_del(&falcon->core);
+		if (falcon->external) {
+			vfree(falcon->data.data);
+			vfree(falcon->code.data);
+			falcon->code.data = NULL;
+		}
+	}
+
+	nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+	nvkm_wr32(device, base + 0x014, 0xffffffff);
+	return 0;
 }
 
 static void *
@@ -67,51 +122,66 @@
 	return p;
 }
 
-int
-_nvkm_falcon_init(struct nvkm_object *object)
+static int
+nvkm_falcon_oneinit(struct nvkm_engine *engine)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_falcon *falcon = (void *)object;
-	const struct firmware *fw;
-	char name[32] = "internal";
-	int ret, i;
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = falcon->addr;
 	u32 caps;
 
-	/* enable engine, and determine its capabilities */
-	ret = nvkm_engine_init(&falcon->base);
-	if (ret)
-		return ret;
-
+	/* determine falcon capabilities */
 	if (device->chipset <  0xa3 ||
 	    device->chipset == 0xaa || device->chipset == 0xac) {
 		falcon->version = 0;
 		falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
 	} else {
-		caps = nv_ro32(falcon, 0x12c);
+		caps = nvkm_rd32(device, base + 0x12c);
 		falcon->version = (caps & 0x0000000f);
 		falcon->secret  = (caps & 0x00000030) >> 4;
 	}
 
-	caps = nv_ro32(falcon, 0x108);
+	caps = nvkm_rd32(device, base + 0x108);
 	falcon->code.limit = (caps & 0x000001ff) << 8;
 	falcon->data.limit = (caps & 0x0003fe00) >> 1;
 
-	nv_debug(falcon, "falcon version: %d\n", falcon->version);
-	nv_debug(falcon, "secret level: %d\n", falcon->secret);
-	nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
-	nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+	nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
+	nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
+	nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
+	nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
+	return 0;
+}
+
+static int
+nvkm_falcon_init(struct nvkm_engine *engine)
+{
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const struct firmware *fw;
+	char name[32] = "internal";
+	const u32 base = falcon->addr;
+	int ret, i;
 
 	/* wait for 'uc halted' to be signalled before continuing */
 	if (falcon->secret && falcon->version < 4) {
-		if (!falcon->version)
-			nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
-		else
-			nv_wait(falcon, 0x180, 0x80000000, 0);
-		nv_wo32(falcon, 0x004, 0x00000010);
+		if (!falcon->version) {
+			nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, base + 0x008) & 0x00000010)
+					break;
+			);
+		} else {
+			nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
+					break;
+			);
+		}
+		nvkm_wr32(device, base + 0x004, 0x00000010);
 	}
 
 	/* disable all interrupts */
-	nv_wo32(falcon, 0x014, 0xffffffff);
+	nvkm_wr32(device, base + 0x014, 0xffffffff);
 
 	/* no default ucode provided by the engine implementation, try and
 	 * locate a "self-bootstrapping" firmware image for the engine
@@ -120,7 +190,7 @@
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret == 0) {
 			falcon->code.data = vmemdup(fw->data, fw->size);
 			falcon->code.size = fw->size;
@@ -139,10 +209,10 @@
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_error(falcon, "unable to load firmware data\n");
-			return ret;
+			nvkm_error(subdev, "unable to load firmware data\n");
+			return -ENODEV;
 		}
 
 		falcon->data.data = vmemdup(fw->data, fw->size);
@@ -154,10 +224,10 @@
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_error(falcon, "unable to load firmware code\n");
-			return ret;
+			nvkm_error(subdev, "unable to load firmware code\n");
+			return -ENODEV;
 		}
 
 		falcon->code.data = vmemdup(fw->data, fw->size);
@@ -167,111 +237,117 @@
 			return -ENOMEM;
 	}
 
-	nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
-		 "static code/data segments" : "self-bootstrapping");
+	nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
+		   "static code/data segments" : "self-bootstrapping");
 
 	/* ensure any "self-bootstrapping" firmware image is in vram */
 	if (!falcon->data.data && !falcon->core) {
-		ret = nvkm_gpuobj_new(object->parent, NULL, falcon->code.size,
-				      256, 0, &falcon->core);
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      falcon->code.size, 256, false,
+				      &falcon->core);
 		if (ret) {
-			nv_error(falcon, "core allocation failed, %d\n", ret);
+			nvkm_error(subdev, "core allocation failed, %d\n", ret);
 			return ret;
 		}
 
+		nvkm_kmap(falcon->core);
 		for (i = 0; i < falcon->code.size; i += 4)
-			nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+			nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
+		nvkm_done(falcon->core);
 	}
 
 	/* upload firmware bootloader (or the full code segments) */
 	if (falcon->core) {
+		u64 addr = nvkm_memory_addr(falcon->core);
 		if (device->card_type < NV_C0)
-			nv_wo32(falcon, 0x618, 0x04000000);
+			nvkm_wr32(device, base + 0x618, 0x04000000);
 		else
-			nv_wo32(falcon, 0x618, 0x00000114);
-		nv_wo32(falcon, 0x11c, 0);
-		nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
-		nv_wo32(falcon, 0x114, 0);
-		nv_wo32(falcon, 0x118, 0x00006610);
+			nvkm_wr32(device, base + 0x618, 0x00000114);
+		nvkm_wr32(device, base + 0x11c, 0);
+		nvkm_wr32(device, base + 0x110, addr >> 8);
+		nvkm_wr32(device, base + 0x114, 0);
+		nvkm_wr32(device, base + 0x118, 0x00006610);
 	} else {
 		if (falcon->code.size > falcon->code.limit ||
 		    falcon->data.size > falcon->data.limit) {
-			nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+			nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
 			return -EINVAL;
 		}
 
 		if (falcon->version < 3) {
-			nv_wo32(falcon, 0xff8, 0x00100000);
+			nvkm_wr32(device, base + 0xff8, 0x00100000);
 			for (i = 0; i < falcon->code.size / 4; i++)
-				nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+				nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
 		} else {
-			nv_wo32(falcon, 0x180, 0x01000000);
+			nvkm_wr32(device, base + 0x180, 0x01000000);
 			for (i = 0; i < falcon->code.size / 4; i++) {
 				if ((i & 0x3f) == 0)
-					nv_wo32(falcon, 0x188, i >> 6);
-				nv_wo32(falcon, 0x184, falcon->code.data[i]);
+					nvkm_wr32(device, base + 0x188, i >> 6);
+				nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
 			}
 		}
 	}
 
 	/* upload data segment (if necessary), zeroing the remainder */
 	if (falcon->version < 3) {
-		nv_wo32(falcon, 0xff8, 0x00000000);
+		nvkm_wr32(device, base + 0xff8, 0x00000000);
 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
-			nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+			nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
 		for (; i < falcon->data.limit; i += 4)
-			nv_wo32(falcon, 0xff4, 0x00000000);
+			nvkm_wr32(device, base + 0xff4, 0x00000000);
 	} else {
-		nv_wo32(falcon, 0x1c0, 0x01000000);
+		nvkm_wr32(device, base + 0x1c0, 0x01000000);
 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
-			nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+			nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
 		for (; i < falcon->data.limit / 4; i++)
-			nv_wo32(falcon, 0x1c4, 0x00000000);
+			nvkm_wr32(device, base + 0x1c4, 0x00000000);
 	}
 
 	/* start it running */
-	nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
-	nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
-	nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
-	nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+	nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+	nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */
+	nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */
+	nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */
+
+	if (falcon->func->init)
+		falcon->func->init(falcon);
 	return 0;
 }
 
-int
-_nvkm_falcon_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_falcon_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_falcon *falcon = (void *)object;
-
-	if (!suspend) {
-		nvkm_gpuobj_ref(NULL, &falcon->core);
-		if (falcon->external) {
-			vfree(falcon->data.data);
-			vfree(falcon->code.data);
-			falcon->code.data = NULL;
-		}
-	}
-
-	nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
-	nv_wo32(falcon, 0x014, 0xffffffff);
-
-	return nvkm_engine_fini(&falcon->base, suspend);
+	return nvkm_falcon(engine);
 }
 
+static const struct nvkm_engine_func
+nvkm_falcon = {
+	.dtor = nvkm_falcon_dtor,
+	.oneinit = nvkm_falcon_oneinit,
+	.init = nvkm_falcon_init,
+	.fini = nvkm_falcon_fini,
+	.intr = nvkm_falcon_intr,
+	.fifo.sclass = nvkm_falcon_oclass_get,
+	.cclass = &nvkm_falcon_cclass,
+};
+
 int
-nvkm_falcon_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 addr, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+nvkm_falcon_new_(const struct nvkm_falcon_func *func,
+		 struct nvkm_device *device, int index, bool enable,
+		 u32 addr, struct nvkm_engine **pengine)
 {
 	struct nvkm_falcon *falcon;
-	int ret;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
-				  fname, length, pobject);
-	falcon = *pobject;
-	if (ret)
-		return ret;
-
+	if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
+		return -ENOMEM;
+	falcon->func = func;
 	falcon->addr = addr;
-	return 0;
+	falcon->code.data = func->code.data;
+	falcon->code.size = func->code.size;
+	falcon->data.data = func->data.data;
+	falcon->data.size = func->data.size;
+	*pengine = &falcon->engine;
+
+	return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable,
+				enable, &falcon->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 42891cb..74993c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -7,6 +7,24 @@
 nvkm-y += nvkm/engine/fifo/g84.o
 nvkm-y += nvkm/engine/fifo/gf100.o
 nvkm-y += nvkm/engine/fifo/gk104.o
-nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gm204.o
+nvkm-y += nvkm/engine/fifo/gm20b.o
+
+nvkm-y += nvkm/engine/fifo/chan.o
+nvkm-y += nvkm/engine/fifo/channv50.o
+nvkm-y += nvkm/engine/fifo/chang84.o
+
+nvkm-y += nvkm/engine/fifo/dmanv04.o
+nvkm-y += nvkm/engine/fifo/dmanv10.o
+nvkm-y += nvkm/engine/fifo/dmanv17.o
+nvkm-y += nvkm/engine/fifo/dmanv40.o
+nvkm-y += nvkm/engine/fifo/dmanv50.o
+nvkm-y += nvkm/engine/fifo/dmag84.o
+
+nvkm-y += nvkm/engine/fifo/gpfifonv50.o
+nvkm-y += nvkm/engine/fifo/gpfifog84.o
+nvkm-y += nvkm/engine/fifo/gpfifogf100.o
+nvkm-y += nvkm/engine/fifo/gpfifogk104.o
+nvkm-y += nvkm/engine/fifo/gpfifogm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index fa223f8..1fbbfbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -21,18 +21,75 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/fifo.h>
+#include "priv.h"
+#include "chan.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <core/notify.h>
-#include <engine/dmaobj.h>
 
-#include <nvif/class.h>
 #include <nvif/event.h>
 #include <nvif/unpack.h>
 
+void
+nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+	return fifo->func->pause(fifo, flags);
+}
+
+void
+nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+	return fifo->func->start(fifo, flags);
+}
+
+void
+nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
+		   struct nvkm_fifo_chan **pchan)
+{
+	struct nvkm_fifo_chan *chan = *pchan;
+	if (likely(chan)) {
+		*pchan = NULL;
+		spin_unlock_irqrestore(&fifo->lock, flags);
+	}
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
+{
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->lock, flags);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		if (chan->inst->addr == inst) {
+			list_del(&chan->head);
+			list_add(&chan->head, &fifo->chan);
+			*rflags = flags;
+			return chan;
+		}
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+	return NULL;
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
+{
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->lock, flags);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		if (chan->chid == chid) {
+			list_del(&chan->head);
+			list_add(&chan->head, &fifo->chan);
+			*rflags = flags;
+			return chan;
+		}
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+	return NULL;
+}
+
 static int
 nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
 		     struct nvkm_notify *notify)
@@ -51,126 +108,21 @@
 	.ctor = nvkm_fifo_event_ctor,
 };
 
-int
-nvkm_fifo_channel_create_(struct nvkm_object *parent,
-			  struct nvkm_object *engine,
-			  struct nvkm_oclass *oclass,
-			  int bar, u32 addr, u32 size, u32 pushbuf,
-			  u64 engmask, int len, void **ptr)
+static void
+nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
 {
-	struct nvkm_device *device = nv_device(engine);
-	struct nvkm_fifo *priv = (void *)engine;
-	struct nvkm_fifo_chan *chan;
-	struct nvkm_dmaeng *dmaeng;
-	unsigned long flags;
-	int ret;
-
-	/* create base object class */
-	ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
-				  engmask, len, ptr);
-	chan = *ptr;
-	if (ret)
-		return ret;
-
-	/* validate dma object representing push buffer */
-	chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
-	if (!chan->pushdma)
-		return -ENOENT;
-
-	dmaeng = (void *)chan->pushdma->base.engine;
-	switch (chan->pushdma->base.oclass->handle) {
-	case NV_DMA_FROM_MEMORY:
-	case NV_DMA_IN_MEMORY:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
-	if (ret)
-		return ret;
-
-	/* find a free fifo channel */
-	spin_lock_irqsave(&priv->lock, flags);
-	for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
-		if (!priv->channel[chan->chid]) {
-			priv->channel[chan->chid] = nv_object(chan);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if (chan->chid == priv->max) {
-		nv_error(priv, "no free channels\n");
-		return -ENOSPC;
-	}
-
-	chan->addr = nv_device_resource_start(device, bar) +
-		     addr + size * chan->chid;
-	chan->size = size;
-	nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
-	return 0;
+	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+	fifo->func->uevent_fini(fifo);
 }
 
-void
-nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
+static void
+nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
 {
-	struct nvkm_fifo *priv = (void *)nv_object(chan)->engine;
-	unsigned long flags;
-
-	if (chan->user)
-		iounmap(chan->user);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->channel[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_gpuobj_ref(NULL, &chan->pushgpu);
-	nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma);
-	nvkm_namedb_destroy(&chan->namedb);
+	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+	fifo->func->uevent_init(fifo);
 }
 
-void
-_nvkm_fifo_channel_dtor(struct nvkm_object *object)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	nvkm_fifo_channel_destroy(chan);
-}
-
-int
-_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	*addr = chan->addr;
-	*size = chan->size;
-	return 0;
-}
-
-u32
-_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	if (unlikely(!chan->user)) {
-		chan->user = ioremap(chan->addr, chan->size);
-		if (WARN_ON_ONCE(chan->user == NULL))
-			return 0;
-	}
-	return ioread32_native(chan->user + addr);
-}
-
-void
-_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	if (unlikely(!chan->user)) {
-		chan->user = ioremap(chan->addr, chan->size);
-		if (WARN_ON_ONCE(chan->user == NULL))
-			return;
-	}
-	iowrite32_native(data, chan->user + addr);
-}
-
-int
+static int
 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
 		      struct nvkm_notify *notify)
 {
@@ -188,6 +140,13 @@
 	return ret;
 }
 
+static const struct nvkm_event_func
+nvkm_fifo_uevent_func = {
+	.ctor = nvkm_fifo_uevent_ctor,
+	.init = nvkm_fifo_uevent_init,
+	.fini = nvkm_fifo_uevent_fini,
+};
+
 void
 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
 {
@@ -196,87 +155,123 @@
 	nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
 }
 
-int
-_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
-			struct nvkm_event **event)
+static int
+nvkm_fifo_class_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
 {
-	struct nvkm_fifo *fifo = (void *)object->engine;
-	switch (type) {
-	case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
-		if (nv_mclass(object) >= G82_CHANNEL_DMA) {
-			*event = &fifo->uevent;
+	const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
+	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+	return sclass->ctor(fifo, oclass, data, size, pobject);
+}
+
+static const struct nvkm_device_oclass
+nvkm_fifo_class = {
+	.ctor = nvkm_fifo_class_new,
+};
+
+static int
+nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
+		    const struct nvkm_device_oclass **class)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+	const struct nvkm_fifo_chan_oclass *sclass;
+	int c = 0;
+
+	while ((sclass = fifo->func->chan[c])) {
+		if (c++ == index) {
+			oclass->base = sclass->base;
+			oclass->engn = sclass;
+			*class = &nvkm_fifo_class;
 			return 0;
 		}
-		break;
-	default:
-		break;
 	}
-	return -EINVAL;
+
+	return c;
+}
+
+static void
+nvkm_fifo_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	fifo->func->intr(fifo);
 }
 
 static int
-nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object)
+nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
 {
-	int engidx = nv_hclass(priv) & 0xff;
-
-	while (object && object->parent) {
-		if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
-		    (nv_hclass(object->parent) & 0xff) == engidx)
-			return nvkm_fifo_chan(object)->chid;
-		object = object->parent;
-	}
-
-	return -1;
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	if (fifo->func->fini)
+		fifo->func->fini(fifo);
+	return 0;
 }
 
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
+static int
+nvkm_fifo_oneinit(struct nvkm_engine *engine)
 {
-	struct nvkm_fifo_chan *chan = NULL;
-	unsigned long flags;
-
-	spin_lock_irqsave(&fifo->lock, flags);
-	if (chid >= fifo->min && chid <= fifo->max)
-		chan = (void *)fifo->channel[chid];
-	spin_unlock_irqrestore(&fifo->lock, flags);
-
-	return nvkm_client_name(chan);
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	if (fifo->func->oneinit)
+		return fifo->func->oneinit(fifo);
+	return 0;
 }
 
-void
-nvkm_fifo_destroy(struct nvkm_fifo *priv)
+static int
+nvkm_fifo_init(struct nvkm_engine *engine)
 {
-	kfree(priv->channel);
-	nvkm_event_fini(&priv->uevent);
-	nvkm_event_fini(&priv->cevent);
-	nvkm_engine_destroy(&priv->base);
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	fifo->func->init(fifo);
+	return 0;
 }
 
+static void *
+nvkm_fifo_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	void *data = fifo;
+	if (fifo->func->dtor)
+		data = fifo->func->dtor(fifo);
+	nvkm_event_fini(&fifo->cevent);
+	nvkm_event_fini(&fifo->uevent);
+	return data;
+}
+
+static const struct nvkm_engine_func
+nvkm_fifo = {
+	.dtor = nvkm_fifo_dtor,
+	.oneinit = nvkm_fifo_oneinit,
+	.init = nvkm_fifo_init,
+	.fini = nvkm_fifo_fini,
+	.intr = nvkm_fifo_intr,
+	.base.sclass = nvkm_fifo_class_get,
+};
+
 int
-nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass,
-		  int min, int max, int length, void **pobject)
+nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, int nr, struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *priv;
 	int ret;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
-				  "fifo", length, pobject);
-	priv = *pobject;
+	fifo->func = func;
+	INIT_LIST_HEAD(&fifo->chan);
+	spin_lock_init(&fifo->lock);
+
+	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
+		fifo->nr = NVKM_FIFO_CHID_NR;
+	else
+		fifo->nr = nr;
+	bitmap_clear(fifo->mask, 0, fifo->nr);
+
+	ret = nvkm_engine_ctor(&nvkm_fifo, device, index, 0x00000100,
+			       true, &fifo->engine);
 	if (ret)
 		return ret;
 
-	priv->min = min;
-	priv->max = max;
-	priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
-	if (!priv->channel)
-		return -ENOMEM;
+	if (func->uevent_init) {
+		ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
+				      &fifo->uevent);
+		if (ret)
+			return ret;
+	}
 
-	ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent);
-	if (ret)
-		return ret;
-
-	priv->chid = nvkm_fifo_chid;
-	spin_lock_init(&priv->lock);
-	return 0;
+	return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
new file mode 100644
index 0000000..dc6d467
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "chan.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/oproxy.h>
+#include <subdev/mmu.h>
+#include <engine/dma.h>
+
+struct nvkm_fifo_chan_object {
+	struct nvkm_oproxy oproxy;
+	struct nvkm_fifo_chan *chan;
+	int hash;
+};
+
+static int
+nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.object->engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	const char *name = nvkm_subdev_name[engine->subdev.index];
+	int ret = 0;
+
+	if (--engn->usecount)
+		return 0;
+
+	if (chan->func->engine_fini) {
+		ret = chan->func->engine_fini(chan, engine, suspend);
+		if (ret) {
+			nvif_error(&chan->object,
+				   "detach %s failed, %d\n", name, ret);
+			return ret;
+		}
+	}
+
+	if (engn->object) {
+		ret = nvkm_object_fini(engn->object, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	nvif_trace(&chan->object, "detached %s\n", name);
+	return ret;
+}
+
+static int
+nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.object->engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	const char *name = nvkm_subdev_name[engine->subdev.index];
+	int ret;
+
+	if (engn->usecount++)
+		return 0;
+
+	if (engn->object) {
+		ret = nvkm_object_init(engn->object);
+		if (ret)
+			return ret;
+	}
+
+	if (chan->func->engine_init) {
+		ret = chan->func->engine_init(chan, engine);
+		if (ret) {
+			nvif_error(&chan->object,
+				   "attach %s failed, %d\n", name, ret);
+			return ret;
+		}
+	}
+
+	nvif_trace(&chan->object, "attached %s\n", name);
+	return 0;
+}
+
+static void
+nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.base.engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+
+	if (chan->func->object_dtor)
+		chan->func->object_dtor(chan, object->hash);
+
+	if (!--engn->refcount) {
+		if (chan->func->engine_dtor)
+			chan->func->engine_dtor(chan, engine);
+		nvkm_object_del(&engn->object);
+		if (chan->vm)
+			atomic_dec(&chan->vm->engref[engine->subdev.index]);
+	}
+}
+
+static const struct nvkm_oproxy_func
+nvkm_fifo_chan_child_func = {
+	.dtor[0] = nvkm_fifo_chan_child_del,
+	.init[0] = nvkm_fifo_chan_child_init,
+	.fini[0] = nvkm_fifo_chan_child_fini,
+};
+
+static int
+nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+			 struct nvkm_object **pobject)
+{
+	struct nvkm_engine *engine = oclass->engine;
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	struct nvkm_fifo_chan_object *object;
+	int ret = 0;
+
+	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
+	object->chan = chan;
+	*pobject = &object->oproxy.base;
+
+	if (!engn->refcount++) {
+		struct nvkm_oclass cclass = {
+			.client = oclass->client,
+			.engine = oclass->engine,
+		};
+
+		if (chan->vm)
+			atomic_inc(&chan->vm->engref[engine->subdev.index]);
+
+		if (engine->func->fifo.cclass) {
+			ret = engine->func->fifo.cclass(chan, &cclass,
+							&engn->object);
+		} else
+		if (engine->func->cclass) {
+			ret = nvkm_object_new_(engine->func->cclass, &cclass,
+					       NULL, 0, &engn->object);
+		}
+		if (ret)
+			return ret;
+
+		if (chan->func->engine_ctor) {
+			ret = chan->func->engine_ctor(chan, oclass->engine,
+						      engn->object);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = oclass->base.ctor(&(const struct nvkm_oclass) {
+					.base = oclass->base,
+					.engn = oclass->engn,
+					.handle = oclass->handle,
+					.object = oclass->object,
+					.client = oclass->client,
+					.parent = engn->object ?
+						  engn->object :
+						  oclass->parent,
+					.engine = engine,
+				}, data, size, &object->oproxy.object);
+	if (ret)
+		return ret;
+
+	if (chan->func->object_ctor) {
+		object->hash =
+			chan->func->object_ctor(chan, object->oproxy.object);
+		if (object->hash < 0)
+			return object->hash;
+	}
+
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
+			 struct nvkm_oclass *oclass)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	struct nvkm_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	struct nvkm_engine *engine;
+	u64 mask = chan->engines;
+	int ret, i, c;
+
+	for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
+		if (!(engine = nvkm_device_engine(device, i)))
+			continue;
+		oclass->engine = engine;
+		oclass->base.oclass = 0;
+
+		if (engine->func->fifo.sclass) {
+			ret = engine->func->fifo.sclass(oclass, index);
+			if (oclass->base.oclass) {
+				if (!oclass->base.ctor)
+					oclass->base.ctor = nvkm_object_new;
+				oclass->ctor = nvkm_fifo_chan_child_new;
+				return 0;
+			}
+
+			index -= ret;
+			continue;
+		}
+
+		while (engine->func->sclass[c].oclass) {
+			if (c++ == index) {
+				oclass->base = engine->func->sclass[index];
+				if (!oclass->base.ctor)
+					oclass->base.ctor = nvkm_object_new;
+				oclass->ctor = nvkm_fifo_chan_child_new;
+				return 0;
+			}
+		}
+		index -= c;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
+		    struct nvkm_event **pevent)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (chan->func->ntfy)
+		return chan->func->ntfy(chan, type, pevent);
+	return -ENODEV;
+}
+
+static int
+nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	*addr = chan->addr;
+	*size = chan->size;
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (unlikely(!chan->user)) {
+		chan->user = ioremap(chan->addr, chan->size);
+		if (!chan->user)
+			return -ENOMEM;
+	}
+	if (unlikely(addr + 4 > chan->size))
+		return -EINVAL;
+	*data = ioread32_native(chan->user + addr);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (unlikely(!chan->user)) {
+		chan->user = ioremap(chan->addr, chan->size);
+		if (!chan->user)
+			return -ENOMEM;
+	}
+	if (unlikely(addr + 4 > chan->size))
+		return -EINVAL;
+	iowrite32_native(data, chan->user + addr);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	chan->func->fini(chan);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_init(struct nvkm_object *object)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	chan->func->init(chan);
+	return 0;
+}
+
+static void *
+nvkm_fifo_chan_dtor(struct nvkm_object *object)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	struct nvkm_fifo *fifo = chan->fifo;
+	void *data = chan->func->dtor(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fifo->lock, flags);
+	if (!list_empty(&chan->head)) {
+		__clear_bit(chan->chid, fifo->mask);
+		list_del(&chan->head);
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+
+	if (chan->user)
+		iounmap(chan->user);
+
+	nvkm_vm_ref(NULL, &chan->vm, NULL);
+
+	nvkm_gpuobj_del(&chan->push);
+	nvkm_gpuobj_del(&chan->inst);
+	return data;
+}
+
+static const struct nvkm_object_func
+nvkm_fifo_chan_func = {
+	.dtor = nvkm_fifo_chan_dtor,
+	.init = nvkm_fifo_chan_init,
+	.fini = nvkm_fifo_chan_fini,
+	.ntfy = nvkm_fifo_chan_ntfy,
+	.map = nvkm_fifo_chan_map,
+	.rd32 = nvkm_fifo_chan_rd32,
+	.wr32 = nvkm_fifo_chan_wr32,
+	.sclass = nvkm_fifo_chan_child_get,
+};
+
+int
+nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
+		    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
+		    u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_fifo_chan *chan)
+{
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	struct nvkm_mmu *mmu = device->mmu;
+	struct nvkm_dmaobj *dmaobj;
+	unsigned long flags;
+	int ret;
+
+	nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
+	chan->func = func;
+	chan->fifo = fifo;
+	chan->engines = engines;
+	INIT_LIST_HEAD(&chan->head);
+
+	/* instance memory */
+	ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
+	if (ret)
+		return ret;
+
+	/* allocate push buffer ctxdma instance */
+	if (push) {
+		dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
+		if (!dmaobj)
+			return -ENOENT;
+
+		ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
+				       &chan->push);
+		if (ret)
+			return ret;
+	}
+
+	/* channel address space */
+	if (!vm && mmu) {
+		if (!client->vm || client->vm->mmu == mmu) {
+			ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
+			if (ret)
+				return ret;
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		return -ENOENT;
+	}
+
+	/* allocate channel id */
+	spin_lock_irqsave(&fifo->lock, flags);
+	chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
+	if (chan->chid >= NVKM_FIFO_CHID_NR) {
+		spin_unlock_irqrestore(&fifo->lock, flags);
+		return -ENOSPC;
+	}
+	list_add(&chan->head, &fifo->chan);
+	__set_bit(chan->chid, fifo->mask);
+	spin_unlock_irqrestore(&fifo->lock, flags);
+
+	/* determine address of this channel's user registers */
+	chan->addr = device->func->resource_addr(device, bar) +
+		     base + user * chan->chid;
+	chan->size = user;
+
+	nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
new file mode 100644
index 0000000..55dc415
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_FIFO_CHAN_H__
+#define __NVKM_FIFO_CHAN_H__
+#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
+#include "priv.h"
+
+struct nvkm_fifo_chan_func {
+	void *(*dtor)(struct nvkm_fifo_chan *);
+	void (*init)(struct nvkm_fifo_chan *);
+	void (*fini)(struct nvkm_fifo_chan *);
+	int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **);
+	int  (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+			    struct nvkm_object *);
+	void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+	int  (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+	int  (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+			    bool suspend);
+	int  (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
+	void (*object_dtor)(struct nvkm_fifo_chan *, int);
+};
+
+int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
+			u32 size, u32 align, bool zero, u64 vm, u64 push,
+			u64 engines, int bar, u32 base, u32 user,
+			const struct nvkm_oclass *, struct nvkm_fifo_chan *);
+
+struct nvkm_fifo_chan_oclass {
+	int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
+
+int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
new file mode 100644
index 0000000..0430524
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+int
+g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
+		   struct nvkm_event **pevent)
+{
+	switch (type) {
+	case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
+		*pevent = &chan->fifo->uevent;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+g84_fifo_chan_engine(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_GR    : return 0;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : return 1;
+	case NVKM_ENGINE_CE0   : return 2;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: return 3;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   : return 4;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : return 5;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : return -1;
+	case NVKM_ENGINE_GR    : return 0x0020;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: return 0x0040;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : return 0x0060;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : return 0x0080;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   : return 0x00a0;
+	case NVKM_ENGINE_CE0   : return 0x00c0;
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
+static int
+g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine, bool suspend)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 engn, save;
+	int offset;
+	bool done;
+
+	offset = g84_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+
+	engn = g84_fifo_chan_engine(engine);
+	save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
+	nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+	done = nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+			break;
+	) >= 0;
+	nvkm_wr32(device, 0x002520, save);
+	if (!done) {
+		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		if (suspend)
+			return -EBUSY;
+	}
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+
+int
+g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	u64 limit, start;
+	int offset;
+
+	offset = g84_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+	limit = engn->addr + engn->size - 1;
+	start = engn->addr;
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+	nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+	nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+					    upper_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+static int
+g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine,
+			  struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	int engn = engine->subdev.index;
+
+	if (g84_fifo_chan_engine_addr(engine) < 0)
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+int
+g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	u32 handle = object->handle;
+	u32 context;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context = 0x00000000; break;
+	case NVKM_ENGINE_GR    : context = 0x00100000; break;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
+	case NVKM_ENGINE_ME    :
+	case NVKM_ENGINE_CE0   : context = 0x00300000; break;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   :
+	case NVKM_ENGINE_VIC   : context = 0x00500000; break;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : context = 0x00600000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+static void
+g84_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 addr = chan->ramfc->addr >> 8;
+	u32 chid = chan->base.chid;
+
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+	nv50_fifo_runlist_update(fifo);
+}
+
+static const struct nvkm_fifo_chan_func
+g84_fifo_chan_func = {
+	.dtor = nv50_fifo_chan_dtor,
+	.init = g84_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = g84_fifo_chan_engine_ctor,
+	.engine_dtor = nv50_fifo_chan_engine_dtor,
+	.engine_init = g84_fifo_chan_engine_init,
+	.engine_fini = g84_fifo_chan_engine_fini,
+	.object_ctor = g84_fifo_chan_object_ctor,
+	.object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+		   const struct nvkm_oclass *oclass,
+		   struct nv50_fifo_chan *chan)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int ret;
+
+	ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
+				  0x10000, 0x1000, false, vm, push,
+				  (1ULL << NVKM_ENGINE_BSP) |
+				  (1ULL << NVKM_ENGINE_CE0) |
+				  (1ULL << NVKM_ENGINE_CIPHER) |
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_ME) |
+				  (1ULL << NVKM_ENGINE_MPEG) |
+				  (1ULL << NVKM_ENGINE_MSPDEC) |
+				  (1ULL << NVKM_ENGINE_MSPPP) |
+				  (1ULL << NVKM_ENGINE_MSVLD) |
+				  (1ULL << NVKM_ENGINE_SEC) |
+				  (1ULL << NVKM_ENGINE_SW) |
+				  (1ULL << NVKM_ENGINE_VIC) |
+				  (1ULL << NVKM_ENGINE_VP),
+				  0, 0xc00000, 0x2000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst,
+			      &chan->eng);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+			      &chan->pgd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst,
+			      &chan->cache);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst,
+			      &chan->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+	if (ret)
+		return ret;
+
+	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
new file mode 100644
index 0000000..7d697e2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -0,0 +1,24 @@
+#ifndef __GF100_FIFO_CHAN_H__
+#define __GF100_FIFO_CHAN_H__
+#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
+#include "chan.h"
+#include "gf100.h"
+
+struct gf100_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct gf100_fifo *fifo;
+
+	struct list_head head;
+	bool killed;
+
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+
+	struct {
+		struct nvkm_gpuobj *inst;
+		struct nvkm_vma vma;
+	} engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
new file mode 100644
index 0000000..97bdddb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -0,0 +1,29 @@
+#ifndef __GK104_FIFO_CHAN_H__
+#define __GK104_FIFO_CHAN_H__
+#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
+#include "chan.h"
+#include "gk104.h"
+
+struct gk104_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct gk104_fifo *fifo;
+	int engine;
+
+	struct list_head head;
+	bool killed;
+
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+
+	struct {
+		struct nvkm_gpuobj *inst;
+		struct nvkm_vma vma;
+	} engn[NVKM_SUBDEV_NR];
+};
+
+int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
+			  void *data, u32 size, struct nvkm_object **);
+
+extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
new file mode 100644
index 0000000..3361a1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -0,0 +1,24 @@
+#ifndef __NV04_FIFO_CHAN_H__
+#define __NV04_FIFO_CHAN_H__
+#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
+#include "chan.h"
+#include "nv04.h"
+
+struct nv04_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct nv04_fifo *fifo;
+	u32 ramfc;
+	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
+void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
+
+extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
new file mode 100644
index 0000000..25b60af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+static int
+nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : return -1;
+	case NVKM_ENGINE_GR    : return 0x0000;
+	case NVKM_ENGINE_MPEG  : return 0x0060;
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
+static int
+nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine, bool suspend)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int offset, ret = 0;
+	u32 me;
+
+	offset = nv50_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+
+	/* HW bug workaround:
+	 *
+	 * PFIFO will hang forever if the connected engines don't report
+	 * that they've processed the context switch request.
+	 *
+	 * In order for the kickoff to work, we need to ensure all the
+	 * connected engines are in a state where they can answer.
+	 *
+	 * Newer chipsets don't seem to suffer from this issue, and well,
+	 * there's also a "ignore these engines" bitmask reg we can use
+	 * if we hit the issue there..
+	 */
+	me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
+
+	/* do the kickoff... */
+	nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		if (suspend)
+			ret = -EBUSY;
+	}
+	nvkm_wr32(device, 0x00b860, me);
+
+	if (ret == 0) {
+		nvkm_kmap(chan->eng);
+		nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+		nvkm_done(chan->eng);
+	}
+
+	return ret;
+}
+
+static int
+nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	u64 limit, start;
+	int offset;
+
+	offset = nv50_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+	limit = engn->addr + engn->size - 1;
+	start = engn->addr;
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+	nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+	nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+					    upper_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+void
+nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine,
+			   struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	int engn = engine->subdev.index;
+
+	if (nv50_fifo_chan_engine_addr(engine) < 0)
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+void
+nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+			   struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	u32 handle = object->handle;
+	u32 context;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context = 0x00000000; break;
+	case NVKM_ENGINE_GR    : context = 0x00100000; break;
+	case NVKM_ENGINE_MPEG  : context = 0x00200000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+void
+nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 chid = chan->base.chid;
+
+	/* remove channel from runlist, fifo will unload context */
+	nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+	nv50_fifo_runlist_update(fifo);
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
+}
+
+static void
+nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 addr = chan->ramfc->addr >> 12;
+	u32 chid = chan->base.chid;
+
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+	nv50_fifo_runlist_update(fifo);
+}
+
+void *
+nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_ramht_del(&chan->ramht);
+	nvkm_gpuobj_del(&chan->pgd);
+	nvkm_gpuobj_del(&chan->eng);
+	nvkm_gpuobj_del(&chan->cache);
+	nvkm_gpuobj_del(&chan->ramfc);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+nv50_fifo_chan_func = {
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv50_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.engine_ctor = nv50_fifo_chan_engine_ctor,
+	.engine_dtor = nv50_fifo_chan_engine_dtor,
+	.engine_init = nv50_fifo_chan_engine_init,
+	.engine_fini = nv50_fifo_chan_engine_fini,
+	.object_ctor = nv50_fifo_chan_object_ctor,
+	.object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+		    const struct nvkm_oclass *oclass,
+		    struct nv50_fifo_chan *chan)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int ret;
+
+	ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
+				  0x10000, 0x1000, false, vm, push,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_SW) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG),
+				  0, 0xc00000, 0x2000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
+			      &chan->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
+			      &chan->eng);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+			      &chan->pgd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+	if (ret)
+		return ret;
+
+	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
new file mode 100644
index 0000000..4b9da46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -0,0 +1,35 @@
+#ifndef __NV50_FIFO_CHAN_H__
+#define __NV50_FIFO_CHAN_H__
+#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
+#include "chan.h"
+#include "nv50.h"
+
+struct nv50_fifo_chan {
+	struct nv50_fifo *fifo;
+	struct nvkm_fifo_chan base;
+
+	struct nvkm_gpuobj *ramfc;
+	struct nvkm_gpuobj *cache;
+	struct nvkm_gpuobj *eng;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_ramht *ramht;
+	struct nvkm_vm *vm;
+
+	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+			const struct nvkm_oclass *, struct nv50_fifo_chan *);
+void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
+void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
+
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+		       const struct nvkm_oclass *, struct nv50_fifo_chan *);
+
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
new file mode 100644
index 0000000..a5ca52c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_dma_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+				   "pushbuf %llx offset %016llx\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				 oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+	nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_dma_oclass = {
+	.base.oclass = G82_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = g84_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
new file mode 100644
index 0000000..bfcc640
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+void
+nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	nvkm_ramht_remove(imem->ramht, cookie);
+}
+
+static int
+nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	u32 context = 0x80000000 | chan->base.chid << 24;
+	u32 handle  = object->handle;
+	int hash;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
+	case NVKM_ENGINE_GR    : context |= 0x00010000; break;
+	case NVKM_ENGINE_MPEG  : context |= 0x00020000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+				 handle, context);
+	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	return hash;
+}
+
+void
+nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_memory *fctx = device->imem->ramfc;
+	const struct nv04_fifo_ramfc *c;
+	unsigned long flags;
+	u32 mask = fifo->base.nr - 1;
+	u32 data = chan->ramfc;
+	u32 chid;
+
+	/* prevent fifo context switches */
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+	/* if this channel is active, replace it with a null context */
+	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
+	if (chid == chan->base.chid) {
+		nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
+		nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+		c = fifo->ramfc;
+		do {
+			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+			u32 rv = (nvkm_rd32(device, c->regp) &  rm) >> c->regs;
+			u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
+			nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+		} while ((++c)->bits);
+
+		c = fifo->ramfc;
+		do {
+			nvkm_wr32(device, c->regp, 0x00000000);
+		} while ((++c)->bits);
+
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	}
+
+	/* restore normal operation, after disabling dma mode */
+	nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void
+nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = 1 << chan->base.chid;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void *
+nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
+	const struct nv04_fifo_ramfc *c = fifo->ramfc;
+
+	nvkm_kmap(imem->ramfc);
+	do {
+		nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+	} while ((++c)->bits);
+	nvkm_done(imem->ramfc);
+	return chan;
+}
+
+const struct nvkm_fifo_chan_func
+nv04_fifo_dma_func = {
+	.dtor = nv04_fifo_dma_dtor,
+	.init = nv04_fifo_dma_init,
+	.fini = nv04_fifo_dma_fini,
+	.object_ctor = nv04_fifo_dma_object_ctor,
+	.object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 32;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv04_fifo_dma_oclass = {
+	.base.oclass = NV03_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv04_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
new file mode 100644
index 0000000..34f68e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 32;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv10_fifo_dma_oclass = {
+	.base.oclass = NV10_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv10_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
new file mode 100644
index 0000000..ed7cc9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 64;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv17_fifo_dma_oclass = {
+	.base.oclass = NV17_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv17_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
new file mode 100644
index 0000000..043b6c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static bool
+nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW:
+		return false;
+	case NVKM_ENGINE_GR:
+		*reg = 0x0032e0;
+		*ctx = 0x38;
+		return true;
+	case NVKM_ENGINE_MPEG:
+		*reg = 0x00330c;
+		*ctx = 0x54;
+		return true;
+	default:
+		WARN_ON(1);
+		return false;
+	}
+}
+
+static int
+nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine, bool suspend)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	unsigned long flags;
+	u32 reg, ctx;
+	int chid;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+	if (chid == chan->base.chid)
+		nvkm_wr32(device, reg, 0x00000000);
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
+	nvkm_done(imem->ramfc);
+
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+	return 0;
+}
+
+static int
+nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	unsigned long flags;
+	u32 inst, reg, ctx;
+	int chid;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+	inst = chan->engn[engine->subdev.index]->addr >> 4;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+	if (chid == chan->base.chid)
+		nvkm_wr32(device, reg, inst);
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
+	nvkm_done(imem->ramfc);
+
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+	return 0;
+}
+
+static void
+nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	const int engn = engine->subdev.index;
+	u32 reg, ctx;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+static int
+nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	u32 context = chan->base.chid << 23;
+	u32 handle  = object->handle;
+	int hash;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
+	case NVKM_ENGINE_GR    : context |= 0x00100000; break;
+	case NVKM_ENGINE_MPEG  : context |= 0x00200000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+				 handle, context);
+	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	return hash;
+}
+
+static const struct nvkm_fifo_chan_func
+nv40_fifo_dma_func = {
+	.dtor = nv04_fifo_dma_dtor,
+	.init = nv04_fifo_dma_init,
+	.fini = nv04_fifo_dma_fini,
+	.engine_ctor = nv40_fifo_dma_engine_ctor,
+	.engine_dtor = nv40_fifo_dma_engine_dtor,
+	.engine_init = nv40_fifo_dma_engine_init,
+	.engine_fini = nv40_fifo_dma_engine_fini,
+	.object_ctor = nv40_fifo_dma_object_ctor,
+	.object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0xc00000, 0x1000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 128;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv40_fifo_dma_oclass = {
+	.base.oclass = NV40_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv40_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
new file mode 100644
index 0000000..6b3b15f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_dma_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+				   "pushbuf %llx offset %016llx\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				  oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_dma_oclass = {
+	.base.oclass = NV50_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index a04920b..ff7b529 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -22,466 +22,41 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "nv04.h"
-
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nvkm_gpuobj *ectx = (void *)object;
-	u64 limit = ectx->addr + ectx->size - 1;
-	u64 start = ectx->addr;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0020; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
-	case NVDEV_ENGINE_MSPPP :
-	case NVDEV_ENGINE_MPEG  : addr = 0x0060; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   : addr = 0x00a0; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x00c0; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	nv_wo32(base->eng, addr + 0x00, 0x00190000);
-	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
-	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
-	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
-					upper_32_bits(start));
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_priv *priv = (void *)parent->engine;
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 addr, save, engn;
-	bool done;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : engn = 0; addr = 0x0020; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
-	case NVDEV_ENGINE_MSPPP :
-	case NVDEV_ENGINE_MPEG  : engn = 1; addr = 0x0060; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   : engn = 4; addr = 0x00a0; break;
-	case NVDEV_ENGINE_CE0   : engn = 2; addr = 0x00c0; break;
-	default:
-		return -EINVAL;
-	}
-
-	save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
-	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
-	done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
-	nv_wr32(priv, 0x002520, save);
-	if (!done) {
-		nv_error(priv, "channel %d [%s] unload timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	nv_wo32(base->eng, addr + 0x00, 0x00000000);
-	nv_wo32(base->eng, addr + 0x04, 0x00000000);
-	nv_wo32(base->eng, addr + 0x08, 0x00000000);
-	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_object_attach(struct nvkm_object *parent,
-		       struct nvkm_object *object, u32 handle)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 context;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->node->offset >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-	case NVDEV_ENGINE_MPEG  :
-	case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
-	case NVDEV_ENGINE_ME    :
-	case NVDEV_ENGINE_CE0   : context |= 0x00300000; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   :
-	case NVDEV_ENGINE_VIC   : context |= 0x00500000; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
-	default:
-		return -EINVAL;
-	}
-
-	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
-}
-
-static int
-g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG) |
-				       (1ULL << NVDEV_ENGINE_ME) |
-				       (1ULL << NVDEV_ENGINE_VP) |
-				       (1ULL << NVDEV_ENGINE_CIPHER) |
-				       (1ULL << NVDEV_ENGINE_SEC) |
-				       (1ULL << NVDEV_ENGINE_BSP) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_VIC), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_parent(chan)->context_attach = g84_fifo_context_attach;
-	nv_parent(chan)->context_detach = g84_fifo_context_detach;
-	nv_parent(chan)->object_attach = g84_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
-	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	u64 ioffset, ilength;
-	int ret;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG) |
-				       (1ULL << NVDEV_ENGINE_ME) |
-				       (1ULL << NVDEV_ENGINE_VP) |
-				       (1ULL << NVDEV_ENGINE_CIPHER) |
-				       (1ULL << NVDEV_ENGINE_SEC) |
-				       (1ULL << NVDEV_ENGINE_BSP) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_VIC), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_parent(chan)->context_attach = g84_fifo_context_attach;
-	nv_parent(chan)->context_detach = g84_fifo_context_detach;
-	nv_parent(chan)->object_attach = g84_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
-	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
-	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_base *base = (void *)object->parent;
-	struct nv50_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *ramfc = base->ramfc;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
-	nv50_fifo_playlist_update(priv);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_dma = {
-	.ctor = g84_fifo_chan_ctor_dma,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = g84_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_ind = {
-	.ctor = g84_fifo_chan_ctor_ind,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = g84_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-g84_fifo_sclass[] = {
-	{ G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
-	{ G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
-{
-	struct nv50_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
-			      0, &base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
-			      0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
-			      0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-g84_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_fifo_context_ctor,
-		.dtor = nv50_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
+#include "channv50.h"
 
 static void
-g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
 }
 
 static void
-g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
 }
 
-static const struct nvkm_event_func
-g84_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = g84_fifo_uevent_init,
-	.fini = g84_fifo_uevent_fini,
-};
-
-static int
-g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nv50_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[0]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[1]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &g84_fifo_cclass;
-	nv_engine(priv)->sclass = g84_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	return 0;
-}
-
-struct nvkm_oclass *
-g84_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_fifo_ctor,
-		.dtor = nv50_fifo_dtor,
-		.init = nv50_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+g84_fifo = {
+	.dtor = nv50_fifo_dtor,
+	.oneinit = nv50_fifo_oneinit,
+	.init = nv50_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.uevent_init = g84_fifo_uevent_init,
+	.uevent_fini = g84_fifo_uevent_fini,
+	.chan = {
+		&g84_fifo_dma_oclass,
+		&g84_fifo_gpfifo_oclass,
+		NULL
 	},
 };
+
+int
+g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index b745252..ff6fcbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -21,365 +21,72 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/fifo.h>
+#include "gf100.h"
+#include "changf100.h"
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
 
 #include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_fifo_priv {
-	struct nvkm_fifo base;
-
-	struct work_struct fault;
-	u64 mask;
-
-	struct {
-		struct nvkm_gpuobj *mem[2];
-		int active;
-		wait_queue_head_t wait;
-	} runlist;
-
-	struct {
-		struct nvkm_gpuobj *mem;
-		struct nvkm_vma bar;
-	} user;
-	int spoon_nr;
-};
-
-struct gf100_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gf100_fifo_chan {
-	struct nvkm_fifo_chan base;
-	enum {
-		STOPPED,
-		RUNNING,
-		KILLED
-	} state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
 
 static void
-gf100_fifo_runlist_update(struct gf100_fifo_priv *priv)
+gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *cur;
-	int i, p;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	cur = priv->runlist.mem[priv->runlist.active];
-	priv->runlist.active = !priv->runlist.active;
-
-	for (i = 0, p = 0; i < 128; i++) {
-		struct gf100_fifo_chan *chan = (void *)priv->base.channel[i];
-		if (chan && chan->state == RUNNING) {
-			nv_wo32(cur, p + 0, i);
-			nv_wo32(cur, p + 4, 0x00000004);
-			p += 8;
-		}
-	}
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x002270, cur->addr >> 12);
-	nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
-
-	if (wait_event_timeout(priv->runlist.wait,
-			       !(nv_rd32(priv, 0x00227c) & 0x00100000),
-			       msecs_to_jiffies(2000)) == 0)
-		nv_error(priv, "runlist update timeout\n");
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
 }
 
-static int
-gf100_fifo_context_attach(struct nvkm_object *parent,
-			  struct nvkm_object *object)
+static void
+gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_base *base = (void *)parent->parent;
-	struct nvkm_engctx *ectx = (void *)object;
-	u32 addr;
-	int ret;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
-	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (!ectx->vma.node) {
-		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-					 NV_MEM_ACCESS_RW, &ectx->vma);
-		if (ret)
-			return ret;
-
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	}
-
-	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
-	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
-	bar->flush(bar);
-	return 0;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
 }
 
-static int
-gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			  struct nvkm_object *object)
+void
+gf100_fifo_runlist_update(struct gf100_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_priv *priv = (void *)parent->engine;
-	struct gf100_fifo_base *base = (void *)parent->parent;
-	struct gf100_fifo_chan *chan = (void *)parent;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
-	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_wr32(priv, 0x002634, chan->base.chid);
-	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-		nv_error(priv, "channel %d [%s] kick timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	nv_wo32(base, addr + 0x00, 0x00000000);
-	nv_wo32(base, addr + 0x04, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_priv *priv = (void *)engine;
-	struct gf100_fifo_base *base = (void *)parent;
 	struct gf100_fifo_chan *chan;
-	u64 usermem, ioffset, ilength;
-	int ret, i;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *cur;
+	int nr = 0;
 
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
+	mutex_lock(&subdev->mutex);
+	cur = fifo->runlist.mem[fifo->runlist.active];
+	fifo->runlist.active = !fifo->runlist.active;
 
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
-				       priv->user.bar.offset, 0x1000,
-				       args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_CE1) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
-	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
-
-	usermem = chan->base.chid * 0x1000;
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	for (i = 0; i < 0x1000; i += 4)
-		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
-	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x10, 0x0000face);
-	nv_wo32(base, 0x30, 0xfffff902);
-	nv_wo32(base, 0x48, lower_32_bits(ioffset));
-	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base, 0x54, 0x00000002);
-	nv_wo32(base, 0x84, 0x20400000);
-	nv_wo32(base, 0x94, 0x30000001);
-	nv_wo32(base, 0x9c, 0x00000100);
-	nv_wo32(base, 0xa4, 0x1f1f1f1f);
-	nv_wo32(base, 0xa8, 0x1f1f1f1f);
-	nv_wo32(base, 0xac, 0x0000001f);
-	nv_wo32(base, 0xb8, 0xf8000000);
-	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gf100_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
-	struct gf100_fifo_priv *priv = (void *)object->engine;
-	struct gf100_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
-
-	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
-		nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
-		gf100_fifo_runlist_update(priv);
+	nvkm_kmap(cur);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+		nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
+		nr++;
 	}
+	nvkm_done(cur);
 
-	return 0;
+	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
+
+	if (wait_event_timeout(fifo->runlist.wait,
+			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
+			       msecs_to_jiffies(2000)) == 0)
+		nvkm_error(subdev, "runlist update timeout\n");
+	mutex_unlock(&subdev->mutex);
 }
 
-static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv);
-
-static int
-gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gf100_fifo_priv *priv = (void *)object->engine;
-	struct gf100_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
-		nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
-		gf100_fifo_runlist_update(priv);
-	}
-
-	gf100_fifo_intr_engine(priv);
-
-	nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-gf100_fifo_ofuncs = {
-	.ctor = gf100_fifo_chan_ctor,
-	.dtor = _nvkm_fifo_channel_dtor,
-	.init = gf100_fifo_chan_init,
-	.fini = gf100_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gf100_fifo_sclass[] = {
-	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	struct gf100_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
-				       NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0208, 0xffffffff);
-	nv_wo32(base, 0x020c, 0x000000ff);
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-gf100_fifo_context_dtor(struct nvkm_object *object)
-{
-	struct gf100_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-gf100_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fifo_context_ctor,
-		.dtor = gf100_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
 static inline int
-gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
 {
 	switch (engn) {
-	case NVDEV_ENGINE_GR    : engn = 0; break;
-	case NVDEV_ENGINE_MSVLD : engn = 1; break;
-	case NVDEV_ENGINE_MSPPP : engn = 2; break;
-	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
-	case NVDEV_ENGINE_CE0   : engn = 4; break;
-	case NVDEV_ENGINE_CE1   : engn = 5; break;
+	case NVKM_ENGINE_GR    : engn = 0; break;
+	case NVKM_ENGINE_MSVLD : engn = 1; break;
+	case NVKM_ENGINE_MSPPP : engn = 2; break;
+	case NVKM_ENGINE_MSPDEC: engn = 3; break;
+	case NVKM_ENGINE_CE0   : engn = 4; break;
+	case NVKM_ENGINE_CE1   : engn = 5; break;
 	default:
 		return -1;
 	}
@@ -388,95 +95,73 @@
 }
 
 static inline struct nvkm_engine *
-gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+
 	switch (engn) {
-	case 0: engn = NVDEV_ENGINE_GR; break;
-	case 1: engn = NVDEV_ENGINE_MSVLD; break;
-	case 2: engn = NVDEV_ENGINE_MSPPP; break;
-	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
-	case 4: engn = NVDEV_ENGINE_CE0; break;
-	case 5: engn = NVDEV_ENGINE_CE1; break;
+	case 0: engn = NVKM_ENGINE_GR; break;
+	case 1: engn = NVKM_ENGINE_MSVLD; break;
+	case 2: engn = NVKM_ENGINE_MSPPP; break;
+	case 3: engn = NVKM_ENGINE_MSPDEC; break;
+	case 4: engn = NVKM_ENGINE_CE0; break;
+	case 5: engn = NVKM_ENGINE_CE1; break;
 	default:
 		return NULL;
 	}
 
-	return nvkm_engine(priv, engn);
+	return nvkm_device_engine(device, engn);
 }
 
 static void
 gf100_fifo_recover_work(struct work_struct *work)
 {
-	struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault);
-	struct nvkm_object *engine;
+	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_engine *engine;
 	unsigned long flags;
 	u32 engn, engm = 0;
 	u64 mask, todo;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	mask = priv->mask;
-	priv->mask = 0ULL;
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	mask = fifo->mask;
+	fifo->mask = 0ULL;
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
-		engm |= 1 << gf100_fifo_engidx(priv, engn);
-	nv_mask(priv, 0x002630, engm, engm);
+		engm |= 1 << gf100_fifo_engidx(fifo, engn);
+	nvkm_mask(device, 0x002630, engm, engm);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
-		if ((engine = (void *)nvkm_engine(priv, engn))) {
-			nv_ofuncs(engine)->fini(engine, false);
-			WARN_ON(nv_ofuncs(engine)->init(engine));
+		if ((engine = nvkm_device_engine(device, engn))) {
+			nvkm_subdev_fini(&engine->subdev, false);
+			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
 	}
 
-	gf100_fifo_runlist_update(priv);
-	nv_wr32(priv, 0x00262c, engm);
-	nv_mask(priv, 0x002630, engm, 0x00000000);
+	gf100_fifo_runlist_update(fifo);
+	nvkm_wr32(device, 0x00262c, engm);
+	nvkm_mask(device, 0x002630, engm, 0x00000000);
 }
 
 static void
-gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine,
+gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
 		   struct gf100_fifo_chan *chan)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
-	unsigned long flags;
 
-	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
-		       nv_subdev(engine)->name, chid);
+	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+		   nvkm_subdev_name[engine->subdev.index], chid);
+	assert_spin_locked(&fifo->base.lock);
 
-	nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
-	chan->state = KILLED;
+	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
+	list_del_init(&chan->head);
+	chan->killed = true;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	priv->mask |= 1ULL << nv_engidx(engine);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	schedule_work(&priv->fault);
-}
-
-static int
-gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
-	struct gf100_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
-
-	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
-	if (likely(bind)) {
-		if (!mthd || !nv_call(bind->object, mthd, data))
-			ret = 0;
-		nvkm_namedb_put(bind);
-	}
-
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return ret;
+	fifo->mask |= 1ULL << engine->subdev.index;
+	schedule_work(&fifo->fault);
 }
 
 static const struct nvkm_enum
@@ -486,14 +171,17 @@
 };
 
 static void
-gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	struct gf100_fifo_chan *chan;
+	unsigned long flags;
 	u32 engn;
 
+	spin_lock_irqsave(&fifo->base.lock, flags);
 	for (engn = 0; engn < 6; engn++) {
-		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 		u32 busy = (stat & 0x80000000);
 		u32 save = (stat & 0x00100000); /* maybe? */
 		u32 unk0 = (stat & 0x00040000);
@@ -502,32 +190,36 @@
 		(void)save;
 
 		if (busy && unk0 && unk1) {
-			if (!(chan = (void *)priv->base.channel[chid]))
-				continue;
-			if (!(engine = gf100_fifo_engine(priv, engn)))
-				continue;
-			gf100_fifo_recover(priv, engine, chan);
+			list_for_each_entry(chan, &fifo->chan, head) {
+				if (chan->base.chid == chid) {
+					engine = gf100_fifo_engine(fifo, engn);
+					if (!engine)
+						break;
+					gf100_fifo_recover(fifo, engine, chan);
+					break;
+				}
+			}
 		}
 	}
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static void
-gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched(struct gf100_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00254c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00254c);
 	u32 code = intr & 0x000000ff;
 	const struct nvkm_enum *en;
-	char enunk[6] = "";
 
 	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
 
-	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 
 	switch (code) {
 	case 0x0a:
-		gf100_fifo_intr_sched_ctxsw(priv);
+		gf100_fifo_intr_sched_ctxsw(fifo);
 		break;
 	default:
 		break;
@@ -536,17 +228,17 @@
 
 static const struct nvkm_enum
 gf100_fifo_fault_engine[] = {
-	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
-	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
-	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
-	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
-	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
-	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
+	{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
+	{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
+	{ 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
+	{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PCOUNTER" },
-	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
-	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
+	{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
+	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
 	{ 0x17, "PDAEMON" },
 	{}
 };
@@ -594,79 +286,65 @@
 };
 
 static void
-gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
 {
-	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
-	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
-	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
-	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 	u32 gpc    = (stat & 0x1f000000) >> 24;
 	u32 client = (stat & 0x00001f00) >> 8;
 	u32 write  = (stat & 0x00000080);
 	u32 hub    = (stat & 0x00000040);
 	u32 reason = (stat & 0x0000000f);
-	struct nvkm_object *engctx = NULL, *object;
-	struct nvkm_engine *engine = NULL;
 	const struct nvkm_enum *er, *eu, *ec;
-	char erunk[6] = "";
-	char euunk[6] = "";
-	char ecunk[6] = "";
-	char gpcid[3] = "";
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char gpcid[8] = "";
 
 	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
-	if (!er)
-		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
 	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
-	if (eu) {
-		switch (eu->data2) {
-		case NVDEV_SUBDEV_BAR:
-			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_SUBDEV_INSTMEM:
-			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_ENGINE_IFB:
-			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
-			break;
-		default:
-			engine = nvkm_engine(priv, eu->data2);
-			if (engine)
-				engctx = nvkm_engctx_get(engine, inst);
-			break;
-		}
-	} else {
-		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
-	}
-
 	if (hub) {
 		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
 	} else {
 		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
-		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
+		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 	}
 
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
-	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
-		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
-		 (u64)vahi << 32 | valo, er ? er->name : erunk,
-		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
-		 ec ? ec->name : ecunk, (u64)inst << 12,
-		 nvkm_client_name(engctx));
-
-	object = engctx;
-	while (object) {
-		switch (nv_mclass(object)) {
-		case FERMI_CHANNEL_GPFIFO:
-			gf100_fifo_recover(priv, engine, (void *)object);
+	if (eu) {
+		switch (eu->data2) {
+		case NVKM_SUBDEV_BAR:
+			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+			break;
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+			break;
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+			break;
+		default:
+			engine = nvkm_device_engine(device, eu->data2);
 			break;
 		}
-		object = object->parent;
 	}
 
-	nvkm_engctx_put(engctx);
+	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
+
+	nvkm_error(subdev,
+		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+		   "reason %02x [%s] on channel %d [%010llx %s]\n",
+		   write ? "write" : "read", (u64)vahi << 32 | valo,
+		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+		   reason, er ? er->name : "", chan ? chan->chid : -1,
+		   (u64)inst << 12,
+		   chan ? chan->object.client->name : "unknown");
+
+	if (engine && chan)
+		gf100_fifo_recover(fifo, engine, (void *)chan);
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 }
 
 static const struct nvkm_bitfield
@@ -678,290 +356,288 @@
 };
 
 static void
-gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
 {
-	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
-	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
+	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00003ffc);
-	u32 show = stat;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u32 show= stat;
+	char msg[128];
 
 	if (stat & 0x00800000) {
-		if (!gf100_fifo_swmthd(priv, chid, mthd, data))
-			show &= ~0x00800000;
+		if (device->sw) {
+			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+				show &= ~0x00800000;
+		}
 	}
 
 	if (show) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gf100_fifo_pbdma_intr, show);
-		pr_cont("\n");
-		nv_error(priv,
-			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 unit, chid,
-			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
-			 subc, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+				   "subc %d mthd %04x data %08x\n",
+			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, mthd, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
-	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 }
 
 static void
-gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x002a00);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x002a00);
 
 	if (intr & 0x10000000) {
-		wake_up(&priv->runlist.wait);
-		nv_wr32(priv, 0x002a00, 0x10000000);
+		wake_up(&fifo->runlist.wait);
+		nvkm_wr32(device, 0x002a00, 0x10000000);
 		intr &= ~0x10000000;
 	}
 
 	if (intr) {
-		nv_error(priv, "RUNLIST 0x%08x\n", intr);
-		nv_wr32(priv, 0x002a00, intr);
+		nvkm_error(subdev, "RUNLIST %08x\n", intr);
+		nvkm_wr32(device, 0x002a00, intr);
 	}
 }
 
 static void
-gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn)
+gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
 {
-	u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
-	u32 inte = nv_rd32(priv, 0x002628);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
+	u32 inte = nvkm_rd32(device, 0x002628);
 	u32 unkn;
 
-	nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
 
 	for (unkn = 0; unkn < 8; unkn++) {
 		u32 ints = (intr >> (unkn * 0x04)) & inte;
 		if (ints & 0x1) {
-			nvkm_fifo_uevent(&priv->base);
+			nvkm_fifo_uevent(&fifo->base);
 			ints &= ~1;
 		}
 		if (ints) {
-			nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints);
-			nv_mask(priv, 0x002628, ints, 0);
+			nvkm_error(subdev, "ENGINE %d %d %01x",
+				   engn, unkn, ints);
+			nvkm_mask(device, 0x002628, ints, 0);
 		}
 	}
 }
 
-static void
-gf100_fifo_intr_engine(struct gf100_fifo_priv *priv)
+void
+gf100_fifo_intr_engine(struct gf100_fifo *fifo)
 {
-	u32 mask = nv_rd32(priv, 0x0025a4);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = nvkm_rd32(device, 0x0025a4);
 	while (mask) {
 		u32 unit = __ffs(mask);
-		gf100_fifo_intr_engine_unit(priv, unit);
+		gf100_fifo_intr_engine_unit(fifo, unit);
 		mask &= ~(1 << unit);
 	}
 }
 
 static void
-gf100_fifo_intr(struct nvkm_subdev *subdev)
+gf100_fifo_intr(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, 0x002140);
-	u32 stat = nv_rd32(priv, 0x002100) & mask;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x002140);
+	u32 stat = nvkm_rd32(device, 0x002100) & mask;
 
 	if (stat & 0x00000001) {
-		u32 intr = nv_rd32(priv, 0x00252c);
-		nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x00000001);
+		u32 intr = nvkm_rd32(device, 0x00252c);
+		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000100) {
-		gf100_fifo_intr_sched(priv);
-		nv_wr32(priv, 0x002100, 0x00000100);
+		gf100_fifo_intr_sched(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
 
 	if (stat & 0x00010000) {
-		u32 intr = nv_rd32(priv, 0x00256c);
-		nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x00010000);
+		u32 intr = nvkm_rd32(device, 0x00256c);
+		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x00010000);
 		stat &= ~0x00010000;
 	}
 
 	if (stat & 0x01000000) {
-		u32 intr = nv_rd32(priv, 0x00258c);
-		nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x01000000);
+		u32 intr = nvkm_rd32(device, 0x00258c);
+		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x01000000);
 		stat &= ~0x01000000;
 	}
 
 	if (stat & 0x10000000) {
-		u32 mask = nv_rd32(priv, 0x00259c);
+		u32 mask = nvkm_rd32(device, 0x00259c);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gf100_fifo_intr_fault(priv, unit);
-			nv_wr32(priv, 0x00259c, (1 << unit));
+			gf100_fifo_intr_fault(fifo, unit);
+			nvkm_wr32(device, 0x00259c, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x10000000;
 	}
 
 	if (stat & 0x20000000) {
-		u32 mask = nv_rd32(priv, 0x0025a0);
+		u32 mask = nvkm_rd32(device, 0x0025a0);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gf100_fifo_intr_pbdma(priv, unit);
-			nv_wr32(priv, 0x0025a0, (1 << unit));
+			gf100_fifo_intr_pbdma(fifo, unit);
+			nvkm_wr32(device, 0x0025a0, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x20000000;
 	}
 
 	if (stat & 0x40000000) {
-		gf100_fifo_intr_runlist(priv);
+		gf100_fifo_intr_runlist(fifo);
 		stat &= ~0x40000000;
 	}
 
 	if (stat & 0x80000000) {
-		gf100_fifo_intr_engine(priv);
+		gf100_fifo_intr_engine(fifo);
 		stat &= ~0x80000000;
 	}
 
 	if (stat) {
-		nv_error(priv, "INTR 0x%08x\n", stat);
-		nv_mask(priv, 0x002140, stat, 0x00000000);
-		nv_wr32(priv, 0x002100, stat);
+		nvkm_error(subdev, "INTR %08x\n", stat);
+		nvkm_mask(device, 0x002140, stat, 0x00000000);
+		nvkm_wr32(device, 0x002100, stat);
 	}
 }
 
-static void
-gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
-}
-
-static void
-gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-gf100_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = gf100_fifo_uevent_init,
-	.fini = gf100_fifo_uevent_fini,
-};
-
 static int
-gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+gf100_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+			      false, &fifo->runlist.mem[0]);
 	if (ret)
 		return ret;
 
-	INIT_WORK(&priv->fault, gf100_fifo_recover_work);
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
-			      &priv->runlist.mem[0]);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+			      false, &fifo->runlist.mem[1]);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
-			      &priv->runlist.mem[1]);
+	init_waitqueue_head(&fifo->runlist.wait);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
+			      0x1000, false, &fifo->user.mem);
 	if (ret)
 		return ret;
 
-	init_waitqueue_head(&priv->runlist.wait);
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
-			      &priv->user.mem);
+	ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
-			      &priv->user.bar);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = gf100_fifo_intr;
-	nv_engine(priv)->cclass = &gf100_fifo_cclass;
-	nv_engine(priv)->sclass = gf100_fifo_sclass;
+	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 	return 0;
 }
 
 static void
-gf100_fifo_dtor(struct nvkm_object *object)
+gf100_fifo_fini(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)object;
-
-	nvkm_gpuobj_unmap(&priv->user.bar);
-	nvkm_gpuobj_ref(NULL, &priv->user.mem);
-	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]);
-	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]);
-
-	nvkm_fifo_destroy(&priv->base);
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	flush_work(&fifo->fault);
 }
 
-static int
-gf100_fifo_init(struct nvkm_object *object)
+static void
+gf100_fifo_init(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)object;
-	int ret, i;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i;
 
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	nvkm_wr32(device, 0x002204, 0xffffffff);
 
-	nv_wr32(priv, 0x000204, 0xffffffff);
-	nv_wr32(priv, 0x002204, 0xffffffff);
-
-	priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
-	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
+	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
 
 	/* assign engines to PBDMAs */
-	if (priv->spoon_nr >= 3) {
-		nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
-		nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
-		nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */
-		nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */
-		nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
-		nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+	if (fifo->spoon_nr >= 3) {
+		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
+		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
+		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
+		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
+		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
+		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
 	}
 
 	/* PBDMA[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 	}
 
-	nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
-	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
 
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0x7fffffff);
-	nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
-	return 0;
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0x7fffffff);
+	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
 }
 
-struct nvkm_oclass *
-gf100_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fifo_ctor,
-		.dtor = gf100_fifo_dtor,
-		.init = gf100_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static void *
+gf100_fifo_dtor(struct nvkm_fifo *base)
+{
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	nvkm_vm_put(&fifo->user.bar);
+	nvkm_memory_del(&fifo->user.mem);
+	nvkm_memory_del(&fifo->runlist.mem[0]);
+	nvkm_memory_del(&fifo->runlist.mem[1]);
+	return fifo;
+}
+
+static const struct nvkm_fifo_func
+gf100_fifo = {
+	.dtor = gf100_fifo_dtor,
+	.oneinit = gf100_fifo_oneinit,
+	.init = gf100_fifo_init,
+	.fini = gf100_fifo_fini,
+	.intr = gf100_fifo_intr,
+	.uevent_init = gf100_fifo_uevent_init,
+	.uevent_fini = gf100_fifo_uevent_fini,
+	.chan = {
+		&gf100_fifo_gpfifo_oclass,
+		NULL
 	},
 };
+
+int
+gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	struct gf100_fifo *fifo;
+
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&fifo->chan);
+	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
+	*pfifo = &fifo->base;
+
+	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
new file mode 100644
index 0000000..c649ca9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -0,0 +1,31 @@
+#ifndef __GF100_FIFO_H__
+#define __GF100_FIFO_H__
+#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+struct gf100_fifo {
+	struct nvkm_fifo base;
+
+	struct list_head chan;
+
+	struct work_struct fault;
+	u64 mask;
+
+	struct {
+		struct nvkm_memory *mem[2];
+		int active;
+		wait_queue_head_t wait;
+	} runlist;
+
+	struct {
+		struct nvkm_memory *mem;
+		struct nvkm_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+void gf100_fifo_intr_engine(struct gf100_fifo *);
+void gf100_fifo_runlist_update(struct gf100_fifo *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index e10f964..98970a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -22,486 +22,121 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
 
 #include <nvif/class.h>
-#include <nvif/unpack.h>
 
-#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
-static const struct {
-	u64 subdev;
-	u64 mask;
-} fifo_engine[] = {
-	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
-				 (1ULL << NVDEV_ENGINE_CE2)),
-	_(NVDEV_ENGINE_MSPDEC  , 0),
-	_(NVDEV_ENGINE_MSPPP   , 0),
-	_(NVDEV_ENGINE_MSVLD   , 0),
-	_(NVDEV_ENGINE_CE0     , 0),
-	_(NVDEV_ENGINE_CE1     , 0),
-	_(NVDEV_ENGINE_MSENC   , 0),
-};
-#undef _
-#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
-
-struct gk104_fifo_engn {
-	struct nvkm_gpuobj *runlist[2];
-	int cur_runlist;
-	wait_queue_head_t wait;
-};
-
-struct gk104_fifo_priv {
-	struct nvkm_fifo base;
-
-	struct work_struct fault;
-	u64 mask;
-
-	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
-	struct {
-		struct nvkm_gpuobj *mem;
-		struct nvkm_vma bar;
-	} user;
-	int spoon_nr;
-};
-
-struct gk104_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gk104_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 engine;
-	enum {
-		STOPPED,
-		RUNNING,
-		KILLED
-	} state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static void
-gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine)
+void
+gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct gk104_fifo_engn *engn = &priv->engine[engine];
-	struct nvkm_gpuobj *cur;
-	int i, p;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
+}
 
-	mutex_lock(&nv_subdev(priv)->mutex);
+void
+gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
+}
+
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
+{
+	struct gk104_fifo_engn *engn = &fifo->engine[engine];
+	struct gk104_fifo_chan *chan;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *cur;
+	int nr = 0;
+
+	mutex_lock(&subdev->mutex);
 	cur = engn->runlist[engn->cur_runlist];
 	engn->cur_runlist = !engn->cur_runlist;
 
-	for (i = 0, p = 0; i < priv->base.max; i++) {
-		struct gk104_fifo_chan *chan = (void *)priv->base.channel[i];
-		if (chan && chan->state == RUNNING && chan->engine == engine) {
-			nv_wo32(cur, p + 0, i);
-			nv_wo32(cur, p + 4, 0x00000000);
-			p += 8;
-		}
+	nvkm_kmap(cur);
+	list_for_each_entry(chan, &engn->chan, head) {
+		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
+		nr++;
 	}
-	bar->flush(bar);
+	nvkm_done(cur);
 
-	nv_wr32(priv, 0x002270, cur->addr >> 12);
-	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
+	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
 
-	if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
+	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
 			       (engine * 0x08)) & 0x00100000),
 				msecs_to_jiffies(2000)) == 0)
-		nv_error(priv, "runlist %d update timeout\n", engine);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-static int
-gk104_fifo_context_attach(struct nvkm_object *parent,
-			  struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_base *base = (void *)parent->parent;
-	struct nvkm_engctx *ectx = (void *)object;
-	u32 addr;
-	int ret;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   :
-		return 0;
-	case NVDEV_ENGINE_CE0:
-	case NVDEV_ENGINE_CE1:
-	case NVDEV_ENGINE_CE2:
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-		return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (!ectx->vma.node) {
-		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-					 NV_MEM_ACCESS_RW, &ectx->vma);
-		if (ret)
-			return ret;
-
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	}
-
-	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
-	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			  struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_priv *priv = (void *)parent->engine;
-	struct gk104_fifo_base *base = (void *)parent->parent;
-	struct gk104_fifo_chan *chan = (void *)parent;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_CE0   :
-	case NVDEV_ENGINE_CE1   :
-	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_wr32(priv, 0x002634, chan->base.chid);
-	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-		nv_error(priv, "channel %d [%s] kick timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	if (addr) {
-		nv_wo32(base, addr + 0x00, 0x00000000);
-		nv_wo32(base, addr + 0x04, 0x00000000);
-		bar->flush(bar);
-	}
-
-	return 0;
-}
-
-static int
-gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
-{
-	union {
-		struct kepler_channel_gpfifo_a_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_priv *priv = (void *)engine;
-	struct gk104_fifo_base *base = (void *)parent;
-	struct gk104_fifo_chan *chan;
-	u64 usermem, ioffset, ilength;
-	int ret, i;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x engine %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength, args->v0.engine);
-	} else
-		return ret;
-
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		if (args->v0.engine & (1 << i)) {
-			if (nvkm_engine(parent, fifo_engine[i].subdev)) {
-				args->v0.engine = (1 << i);
-				break;
-			}
-		}
-	}
-
-	if (i == FIFO_ENGINE_NR) {
-		nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
-		return -ENODEV;
-	}
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
-				       priv->user.bar.offset, 0x200,
-				       args->v0.pushbuf,
-				       fifo_engine[i].mask, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
-	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
-	chan->engine = i;
-
-	usermem = chan->base.chid * 0x200;
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	for (i = 0; i < 0x200; i += 4)
-		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
-	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x10, 0x0000face);
-	nv_wo32(base, 0x30, 0xfffff902);
-	nv_wo32(base, 0x48, lower_32_bits(ioffset));
-	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base, 0x84, 0x20400000);
-	nv_wo32(base, 0x94, 0x30000001);
-	nv_wo32(base, 0x9c, 0x00000100);
-	nv_wo32(base, 0xac, 0x0000001f);
-	nv_wo32(base, 0xe8, chan->base.chid);
-	nv_wo32(base, 0xb8, 0xf8000000);
-	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gk104_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
-	struct gk104_fifo_priv *priv = (void *)object->engine;
-	struct gk104_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
-	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
-
-	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-		gk104_fifo_runlist_update(priv, chan->engine);
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-	}
-
-	return 0;
-}
-
-static int
-gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gk104_fifo_priv *priv = (void *)object->engine;
-	struct gk104_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
-		gk104_fifo_runlist_update(priv, chan->engine);
-	}
-
-	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-struct nvkm_ofuncs
-gk104_fifo_chan_ofuncs = {
-	.ctor = gk104_fifo_chan_ctor,
-	.dtor = _nvkm_fifo_channel_dtor,
-	.init = gk104_fifo_chan_init,
-	.fini = gk104_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gk104_fifo_sclass[] = {
-	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	struct gk104_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0208, 0xffffffff);
-	nv_wo32(base, 0x020c, 0x000000ff);
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-gk104_fifo_context_dtor(struct nvkm_object *object)
-{
-	struct gk104_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-gk104_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_context_ctor,
-		.dtor = gk104_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static inline int
-gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn)
-{
-	switch (engn) {
-	case NVDEV_ENGINE_GR    :
-	case NVDEV_ENGINE_CE2   : engn = 0; break;
-	case NVDEV_ENGINE_MSVLD : engn = 1; break;
-	case NVDEV_ENGINE_MSPPP : engn = 2; break;
-	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
-	case NVDEV_ENGINE_CE0   : engn = 4; break;
-	case NVDEV_ENGINE_CE1   : engn = 5; break;
-	case NVDEV_ENGINE_MSENC : engn = 6; break;
-	default:
-		return -1;
-	}
-
-	return engn;
+		nvkm_error(subdev, "runlist %d update timeout\n", engine);
+	mutex_unlock(&subdev->mutex);
 }
 
 static inline struct nvkm_engine *
-gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn)
+gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
 {
-	if (engn >= ARRAY_SIZE(fifo_engine))
-		return NULL;
-	return nvkm_engine(priv, fifo_engine[engn].subdev);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 subdevs = gk104_fifo_engine_subdev(engn);
+	if (subdevs)
+		return nvkm_device_engine(device, __ffs(subdevs));
+	return NULL;
 }
 
 static void
 gk104_fifo_recover_work(struct work_struct *work)
 {
-	struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault);
-	struct nvkm_object *engine;
+	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_engine *engine;
 	unsigned long flags;
 	u32 engn, engm = 0;
 	u64 mask, todo;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	mask = priv->mask;
-	priv->mask = 0ULL;
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	mask = fifo->mask;
+	fifo->mask = 0ULL;
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
-		engm |= 1 << gk104_fifo_engidx(priv, engn);
-	nv_mask(priv, 0x002630, engm, engm);
+		engm |= 1 << gk104_fifo_subdev_engine(engn);
+	nvkm_mask(device, 0x002630, engm, engm);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
-		if ((engine = (void *)nvkm_engine(priv, engn))) {
-			nv_ofuncs(engine)->fini(engine, false);
-			WARN_ON(nv_ofuncs(engine)->init(engine));
+		if ((engine = nvkm_device_engine(device, engn))) {
+			nvkm_subdev_fini(&engine->subdev, false);
+			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
-		gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn));
+		gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
 	}
 
-	nv_wr32(priv, 0x00262c, engm);
-	nv_mask(priv, 0x002630, engm, 0x00000000);
+	nvkm_wr32(device, 0x00262c, engm);
+	nvkm_mask(device, 0x002630, engm, 0x00000000);
 }
 
 static void
-gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine,
+gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
 		  struct gk104_fifo_chan *chan)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
-	unsigned long flags;
 
-	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
-		       nv_subdev(engine)->name, chid);
+	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+		   nvkm_subdev_name[engine->subdev.index], chid);
+	assert_spin_locked(&fifo->base.lock);
 
-	nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
-	chan->state = KILLED;
+	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
+	list_del_init(&chan->head);
+	chan->killed = true;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	priv->mask |= 1ULL << nv_engidx(engine);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	schedule_work(&priv->fault);
-}
-
-static int
-gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
-	struct gk104_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
-
-	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
-	if (likely(bind)) {
-		if (!mthd || !nv_call(bind->object, mthd, data))
-			ret = 0;
-		nvkm_namedb_put(bind);
-	}
-
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return ret;
+	fifo->mask |= 1ULL << engine->subdev.index;
+	schedule_work(&fifo->fault);
 }
 
 static const struct nvkm_enum
@@ -516,18 +151,16 @@
 };
 
 static void
-gk104_fifo_intr_bind(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_bind(struct gk104_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00252c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00252c);
 	u32 code = intr & 0x000000ff;
-	const struct nvkm_enum *en;
-	char enunk[6] = "";
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gk104_fifo_bind_reason, code);
 
-	en = nvkm_enum_find(gk104_fifo_bind_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
-
-	nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
 }
 
 static const struct nvkm_enum
@@ -537,14 +170,17 @@
 };
 
 static void
-gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	struct gk104_fifo_chan *chan;
+	unsigned long flags;
 	u32 engn;
 
-	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
-		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
+		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 		u32 busy = (stat & 0x80000000);
 		u32 next = (stat & 0x07ff0000) >> 16;
 		u32 chsw = (stat & 0x00008000);
@@ -555,32 +191,35 @@
 		(void)save;
 
 		if (busy && chsw) {
-			if (!(chan = (void *)priv->base.channel[chid]))
-				continue;
-			if (!(engine = gk104_fifo_engine(priv, engn)))
-				continue;
-			gk104_fifo_recover(priv, engine, chan);
+			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
+				if (chan->base.chid == chid) {
+					engine = gk104_fifo_engine(fifo, engn);
+					if (!engine)
+						break;
+					gk104_fifo_recover(fifo, engine, chan);
+					break;
+				}
+			}
 		}
 	}
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static void
-gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched(struct gk104_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00254c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00254c);
 	u32 code = intr & 0x000000ff;
-	const struct nvkm_enum *en;
-	char enunk[6] = "";
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gk104_fifo_sched_reason, code);
 
-	en = nvkm_enum_find(gk104_fifo_sched_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
-
-	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 
 	switch (code) {
 	case 0x0a:
-		gk104_fifo_intr_sched_ctxsw(priv);
+		gk104_fifo_intr_sched_ctxsw(fifo);
 		break;
 	default:
 		break;
@@ -588,38 +227,42 @@
 }
 
 static void
-gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
 {
-	u32 stat = nv_rd32(priv, 0x00256c);
-	nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
-	nv_wr32(priv, 0x00256c, stat);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00256c);
+	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
+	nvkm_wr32(device, 0x00256c, stat);
 }
 
 static void
-gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
 {
-	u32 stat = nv_rd32(priv, 0x00259c);
-	nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00259c);
+	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
 }
 
 static const struct nvkm_enum
 gk104_fifo_fault_engine[] = {
-	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
-	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
-	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
-	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
-	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
-	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
+	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
+	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
+	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
+	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
+	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PERF" },
-	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
-	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
+	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
+	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
 	{ 0x17, "PMU" },
-	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
-	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
+	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
 	{}
 };
 
@@ -708,80 +351,65 @@
 };
 
 static void
-gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
 {
-	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
-	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
-	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
-	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 	u32 gpc    = (stat & 0x1f000000) >> 24;
 	u32 client = (stat & 0x00001f00) >> 8;
 	u32 write  = (stat & 0x00000080);
 	u32 hub    = (stat & 0x00000040);
 	u32 reason = (stat & 0x0000000f);
-	struct nvkm_object *engctx = NULL, *object;
-	struct nvkm_engine *engine = NULL;
 	const struct nvkm_enum *er, *eu, *ec;
-	char erunk[6] = "";
-	char euunk[6] = "";
-	char ecunk[6] = "";
-	char gpcid[3] = "";
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char gpcid[8] = "";
 
 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
-	if (!er)
-		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
-	if (eu) {
-		switch (eu->data2) {
-		case NVDEV_SUBDEV_BAR:
-			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_SUBDEV_INSTMEM:
-			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_ENGINE_IFB:
-			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
-			break;
-		default:
-			engine = nvkm_engine(priv, eu->data2);
-			if (engine)
-				engctx = nvkm_engctx_get(engine, inst);
-			break;
-		}
-	} else {
-		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
-	}
-
 	if (hub) {
 		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
 	} else {
 		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
-		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
+		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 	}
 
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
-	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
-		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
-		 (u64)vahi << 32 | valo, er ? er->name : erunk,
-		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
-		 ec ? ec->name : ecunk, (u64)inst << 12,
-		 nvkm_client_name(engctx));
-
-	object = engctx;
-	while (object) {
-		switch (nv_mclass(object)) {
-		case KEPLER_CHANNEL_GPFIFO_A:
-		case MAXWELL_CHANNEL_GPFIFO_A:
-			gk104_fifo_recover(priv, engine, (void *)object);
+	if (eu) {
+		switch (eu->data2) {
+		case NVKM_SUBDEV_BAR:
+			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+			break;
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+			break;
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+			break;
+		default:
+			engine = nvkm_device_engine(device, eu->data2);
 			break;
 		}
-		object = object->parent;
 	}
 
-	nvkm_engctx_put(engctx);
+	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
+
+	nvkm_error(subdev,
+		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+		   "reason %02x [%s] on channel %d [%010llx %s]\n",
+		   write ? "write" : "read", (u64)vahi << 32 | valo,
+		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+		   reason, er ? er->name : "", chan ? chan->chid : -1,
+		   (u64)inst << 12,
+		   chan ? chan->object.client->name : "unknown");
+
+	if (engine && chan)
+		gk104_fifo_recover(fifo, engine, (void *)chan);
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 }
 
 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -819,35 +447,42 @@
 };
 
 static void
-gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
 {
-	u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
-	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
-	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
+	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
+	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00003ffc);
 	u32 show = stat;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char msg[128];
 
 	if (stat & 0x00800000) {
-		if (!gk104_fifo_swmthd(priv, chid, mthd, data))
-			show &= ~0x00800000;
-		nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+		if (device->sw) {
+			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+				show &= ~0x00800000;
+		}
+		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 	}
 
 	if (show) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show);
-		pr_cont("\n");
-		nv_error(priv,
-			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 unit, chid,
-			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
-			 subc, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+				   "subc %d mthd %04x data %08x\n",
+			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, mthd, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 }
 
 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
@@ -860,280 +495,266 @@
 };
 
 static void
-gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
 {
-	u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
-	u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
+	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
+	char msg[128];
 
 	if (stat) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat);
-		pr_cont("\n");
-		nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
-			 nv_rd32(priv, 0x040150 + (unit * 0x2000)),
-			 nv_rd32(priv, 0x040154 + (unit * 0x2000)));
+		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
+			   unit, stat, msg, chid,
+			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
+			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
 	}
 
-	nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
 }
 
 static void
-gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
 {
-	u32 mask = nv_rd32(priv, 0x002a00);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = nvkm_rd32(device, 0x002a00);
 	while (mask) {
 		u32 engn = __ffs(mask);
-		wake_up(&priv->engine[engn].wait);
-		nv_wr32(priv, 0x002a00, 1 << engn);
+		wake_up(&fifo->engine[engn].wait);
+		nvkm_wr32(device, 0x002a00, 1 << engn);
 		mask &= ~(1 << engn);
 	}
 }
 
 static void
-gk104_fifo_intr_engine(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_engine(struct gk104_fifo *fifo)
 {
-	nvkm_fifo_uevent(&priv->base);
+	nvkm_fifo_uevent(&fifo->base);
 }
 
-static void
-gk104_fifo_intr(struct nvkm_subdev *subdev)
+void
+gk104_fifo_intr(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, 0x002140);
-	u32 stat = nv_rd32(priv, 0x002100) & mask;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x002140);
+	u32 stat = nvkm_rd32(device, 0x002100) & mask;
 
 	if (stat & 0x00000001) {
-		gk104_fifo_intr_bind(priv);
-		nv_wr32(priv, 0x002100, 0x00000001);
+		gk104_fifo_intr_bind(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000010) {
-		nv_error(priv, "PIO_ERROR\n");
-		nv_wr32(priv, 0x002100, 0x00000010);
+		nvkm_error(subdev, "PIO_ERROR\n");
+		nvkm_wr32(device, 0x002100, 0x00000010);
 		stat &= ~0x00000010;
 	}
 
 	if (stat & 0x00000100) {
-		gk104_fifo_intr_sched(priv);
-		nv_wr32(priv, 0x002100, 0x00000100);
+		gk104_fifo_intr_sched(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
 
 	if (stat & 0x00010000) {
-		gk104_fifo_intr_chsw(priv);
-		nv_wr32(priv, 0x002100, 0x00010000);
+		gk104_fifo_intr_chsw(fifo);
+		nvkm_wr32(device, 0x002100, 0x00010000);
 		stat &= ~0x00010000;
 	}
 
 	if (stat & 0x00800000) {
-		nv_error(priv, "FB_FLUSH_TIMEOUT\n");
-		nv_wr32(priv, 0x002100, 0x00800000);
+		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
+		nvkm_wr32(device, 0x002100, 0x00800000);
 		stat &= ~0x00800000;
 	}
 
 	if (stat & 0x01000000) {
-		nv_error(priv, "LB_ERROR\n");
-		nv_wr32(priv, 0x002100, 0x01000000);
+		nvkm_error(subdev, "LB_ERROR\n");
+		nvkm_wr32(device, 0x002100, 0x01000000);
 		stat &= ~0x01000000;
 	}
 
 	if (stat & 0x08000000) {
-		gk104_fifo_intr_dropped_fault(priv);
-		nv_wr32(priv, 0x002100, 0x08000000);
+		gk104_fifo_intr_dropped_fault(fifo);
+		nvkm_wr32(device, 0x002100, 0x08000000);
 		stat &= ~0x08000000;
 	}
 
 	if (stat & 0x10000000) {
-		u32 mask = nv_rd32(priv, 0x00259c);
+		u32 mask = nvkm_rd32(device, 0x00259c);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gk104_fifo_intr_fault(priv, unit);
-			nv_wr32(priv, 0x00259c, (1 << unit));
+			gk104_fifo_intr_fault(fifo, unit);
+			nvkm_wr32(device, 0x00259c, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x10000000;
 	}
 
 	if (stat & 0x20000000) {
-		u32 mask = nv_rd32(priv, 0x0025a0);
+		u32 mask = nvkm_rd32(device, 0x0025a0);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gk104_fifo_intr_pbdma_0(priv, unit);
-			gk104_fifo_intr_pbdma_1(priv, unit);
-			nv_wr32(priv, 0x0025a0, (1 << unit));
+			gk104_fifo_intr_pbdma_0(fifo, unit);
+			gk104_fifo_intr_pbdma_1(fifo, unit);
+			nvkm_wr32(device, 0x0025a0, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x20000000;
 	}
 
 	if (stat & 0x40000000) {
-		gk104_fifo_intr_runlist(priv);
+		gk104_fifo_intr_runlist(fifo);
 		stat &= ~0x40000000;
 	}
 
 	if (stat & 0x80000000) {
-		nv_wr32(priv, 0x002100, 0x80000000);
-		gk104_fifo_intr_engine(priv);
+		nvkm_wr32(device, 0x002100, 0x80000000);
+		gk104_fifo_intr_engine(fifo);
 		stat &= ~0x80000000;
 	}
 
 	if (stat) {
-		nv_error(priv, "INTR 0x%08x\n", stat);
-		nv_mask(priv, 0x002140, stat, 0x00000000);
-		nv_wr32(priv, 0x002100, stat);
+		nvkm_error(subdev, "INTR %08x\n", stat);
+		nvkm_mask(device, 0x002140, stat, 0x00000000);
+		nvkm_wr32(device, 0x002100, stat);
 	}
 }
 
-static void
-gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+void
+gk104_fifo_fini(struct nvkm_fifo *base)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
-}
-
-static void
-gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-gk104_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = gk104_fifo_uevent_init,
-	.fini = gk104_fifo_uevent_fini,
-};
-
-int
-gk104_fifo_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gk104_fifo_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fifo_fini(&priv->base, suspend);
-	if (ret)
-		return ret;
-
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	flush_work(&fifo->fault);
 	/* allow mmu fault interrupts, even when we're not using fifo */
-	nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
-	return 0;
+	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 }
 
 int
-gk104_fifo_init(struct nvkm_object *object)
+gk104_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)object;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret, i;
 
-	ret = nvkm_fifo_init(&priv->base);
+	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x8000, 0x1000, false,
+				      &fifo->engine[i].runlist[0]);
+		if (ret)
+			return ret;
+
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x8000, 0x1000, false,
+				      &fifo->engine[i].runlist[1]);
+		if (ret)
+			return ret;
+
+		init_waitqueue_head(&fifo->engine[i].wait);
+		INIT_LIST_HEAD(&fifo->engine[i].chan);
+	}
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      fifo->base.nr * 0x200, 0x1000, true,
+			      &fifo->user.mem);
 	if (ret)
 		return ret;
 
-	/* enable all available PBDMA units */
-	nv_wr32(priv, 0x000204, 0xffffffff);
-	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
-	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
+			    &fifo->user.bar);
+	if (ret)
+		return ret;
 
-	/* PBDMA[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
-	}
-
-	/* PBDMA[n].HCE */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
-	}
-
-	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
-
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0x7fffffff);
+	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 	return 0;
 }
 
 void
-gk104_fifo_dtor(struct nvkm_object *object)
+gk104_fifo_init(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)object;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int i;
 
-	nvkm_gpuobj_unmap(&priv->user.bar);
-	nvkm_gpuobj_ref(NULL, &priv->user.mem);
+	/* enable all available PBDMA units */
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
+	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
 
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
-		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
+	/* PBDMA[n] */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 	}
 
-	nvkm_fifo_destroy(&priv->base);
+	/* PBDMA[n].HCE */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+	}
+
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0x7fffffff);
+}
+
+void *
+gk104_fifo_dtor(struct nvkm_fifo *base)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	int i;
+
+	nvkm_vm_put(&fifo->user.bar);
+	nvkm_memory_del(&fifo->user.mem);
+
+	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		nvkm_memory_del(&fifo->engine[i].runlist[1]);
+		nvkm_memory_del(&fifo->engine[i].runlist[0]);
+	}
+
+	return fifo;
 }
 
 int
-gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+		int index, int nr, struct nvkm_fifo **pfifo)
 {
-	struct gk104_fifo_impl *impl = (void *)oclass;
-	struct gk104_fifo_priv *priv;
-	int ret, i;
+	struct gk104_fifo *fifo;
 
-	ret = nvkm_fifo_create(parent, engine, oclass, 0,
-			       impl->channels - 1, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
+	*pfifo = &fifo->base;
 
-	INIT_WORK(&priv->fault, gk104_fifo_recover_work);
-
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-				      0, &priv->engine[i].runlist[0]);
-		if (ret)
-			return ret;
-
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-				      0, &priv->engine[i].runlist[1]);
-		if (ret)
-			return ret;
-
-		init_waitqueue_head(&priv->engine[i].wait);
-	}
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
-			      &priv->user.bar);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = gk104_fifo_intr;
-	nv_engine(priv)->cclass = &gk104_fifo_cclass;
-	nv_engine(priv)->sclass = gk104_fifo_sclass;
-	return 0;
+	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
 }
 
-struct nvkm_oclass *
-gk104_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk104_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 4096,
-}.base;
+};
+
+int
+gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 318d30d..5afd9b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,18 +1,77 @@
-#ifndef __NVKM_FIFO_NVE0_H__
-#define __NVKM_FIFO_NVE0_H__
-#include <engine/fifo.h>
+#ifndef __GK104_FIFO_H__
+#define __GK104_FIFO_H__
+#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
+#include "priv.h"
 
-int  gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
-		    struct nvkm_oclass *, void *, u32,
-		    struct nvkm_object **);
-void gk104_fifo_dtor(struct nvkm_object *);
-int  gk104_fifo_init(struct nvkm_object *);
-int  gk104_fifo_fini(struct nvkm_object *, bool);
+#include <subdev/mmu.h>
 
-struct gk104_fifo_impl {
-	struct nvkm_oclass base;
-	u32 channels;
+struct gk104_fifo_engn {
+	struct nvkm_memory *runlist[2];
+	int cur_runlist;
+	wait_queue_head_t wait;
+	struct list_head chan;
 };
 
-extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
+struct gk104_fifo {
+	struct nvkm_fifo base;
+
+	struct work_struct fault;
+	u64 mask;
+
+	struct gk104_fifo_engn engine[7];
+	struct {
+		struct nvkm_memory *mem;
+		struct nvkm_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		    int index, int nr, struct nvkm_fifo **);
+void *gk104_fifo_dtor(struct nvkm_fifo *);
+int gk104_fifo_oneinit(struct nvkm_fifo *);
+void gk104_fifo_init(struct nvkm_fifo *);
+void gk104_fifo_fini(struct nvkm_fifo *);
+void gk104_fifo_intr(struct nvkm_fifo *);
+void gk104_fifo_uevent_init(struct nvkm_fifo *);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *);
+void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
+
+static inline u64
+gk104_fifo_engine_subdev(int engine)
+{
+	switch (engine) {
+	case 0: return (1ULL << NVKM_ENGINE_GR) |
+		       (1ULL << NVKM_ENGINE_SW) |
+		       (1ULL << NVKM_ENGINE_CE2);
+	case 1: return (1ULL << NVKM_ENGINE_MSPDEC);
+	case 2: return (1ULL << NVKM_ENGINE_MSPPP);
+	case 3: return (1ULL << NVKM_ENGINE_MSVLD);
+	case 4: return (1ULL << NVKM_ENGINE_CE0);
+	case 5: return (1ULL << NVKM_ENGINE_CE1);
+	case 6: return (1ULL << NVKM_ENGINE_MSENC);
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static inline int
+gk104_fifo_subdev_engine(int subdev)
+{
+	switch (subdev) {
+	case NVKM_ENGINE_GR:
+	case NVKM_ENGINE_SW:
+	case NVKM_ENGINE_CE2   : return 0;
+	case NVKM_ENGINE_MSPDEC: return 1;
+	case NVKM_ENGINE_MSPPP : return 2;
+	case NVKM_ENGINE_MSVLD : return 3;
+	case NVKM_ENGINE_CE0   : return 4;
+	case NVKM_ENGINE_CE1   : return 5;
+	case NVKM_ENGINE_MSENC : return 6;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 9270922..ce01c1a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -22,15 +22,25 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
-struct nvkm_oclass *
-gk208_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+gk208_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 1024,
-}.base;
+};
+
+int
+gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index b30dc87..b47fe98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -20,15 +20,25 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include "gk104.h"
+#include "changk104.h"
 
-struct nvkm_oclass *
-gk20a_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk20a_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 128,
-}.base;
+};
+
+int
+gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
index 749d525..2db629f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
@@ -22,36 +22,25 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
-#include <nvif/class.h>
-
-static struct nvkm_oclass
-gm204_fifo_sclass[] = {
-	{ MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
-	{}
+static const struct nvkm_fifo_func
+gm204_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gm204_fifo_gpfifo_oclass,
+		NULL
+	},
 };
 
-static int
-gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject);
-	if (ret == 0) {
-		struct gk104_fifo_priv *priv = (void *)*pobject;
-		nv_engine(priv)->sclass = gm204_fifo_sclass;
-	}
-	return ret;
+	return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
 }
-
-struct nvkm_oclass *
-gm204_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-	.channels = 4096,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
new file mode 100644
index 0000000..ae6375d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_fifo_func
+gm20b_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gm204_fifo_gpfifo_oclass,
+		NULL
+	},
+};
+
+int
+gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
new file mode 100644
index 0000000..82013236
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "pushbuf %llx ioffset %016llx "
+				   "ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.ioffset, args->v0.ilength);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				 oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+	nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+	nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_gpfifo_oclass = {
+	.base.oclass = G82_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = g84_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
new file mode 100644
index 0000000..e7cbc13
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changf100.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static u32
+gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_SW    : return 0;
+	case NVKM_ENGINE_GR    : return 0x0210;
+	case NVKM_ENGINE_CE0   : return 0x0230;
+	case NVKM_ENGINE_CE1   : return 0x0240;
+	case NVKM_ENGINE_MSPDEC: return 0x0250;
+	case NVKM_ENGINE_MSPPP : return 0x0260;
+	case NVKM_ENGINE_MSVLD : return 0x0270;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine, bool suspend)
+{
+	const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	int ret = 0;
+
+	nvkm_wr32(device, 0x002634, chan->base.chid);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		ret = -EBUSY;
+		if (suspend)
+			return ret;
+	}
+
+	if (offset) {
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, 0x00000000);
+		nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		nvkm_done(inst);
+	}
+
+	return ret;
+}
+
+static int
+gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+
+	if (offset) {
+		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_done(inst);
+	}
+
+	return 0;
+}
+
+static void
+gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine,
+			      struct nvkm_object *object)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	int engn = engine->subdev.index;
+	int ret;
+
+	if (!gf100_fifo_gpfifo_engine_addr(engine))
+		return 0;
+
+	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	if (ret)
+		return ret;
+
+	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct gf100_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 coff = chan->base.chid * 8;
+
+	if (!list_empty(&chan->head) && !chan->killed) {
+		list_del_init(&chan->head);
+		nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
+		gf100_fifo_runlist_update(fifo);
+	}
+
+	gf100_fifo_intr_engine(fifo);
+
+	nvkm_wr32(device, 0x003000 + coff, 0x00000000);
+}
+
+static void
+gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct gf100_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 addr = chan->base.inst->addr >> 12;
+	u32 coff = chan->base.chid * 8;
+
+	nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
+
+	if (list_empty(&chan->head) && !chan->killed) {
+		list_add_tail(&chan->head, &fifo->chan);
+		nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
+		gf100_fifo_runlist_update(fifo);
+	}
+}
+
+static void *
+gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_gpuobj_del(&chan->pgd);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gf100_fifo_gpfifo_func = {
+	.dtor = gf100_fifo_gpfifo_dtor,
+	.init = gf100_fifo_gpfifo_init,
+	.fini = gf100_fifo_gpfifo_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = gf100_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gf100_fifo_gpfifo_engine_dtor,
+	.engine_init = gf100_fifo_gpfifo_engine_init,
+	.engine_fini = gf100_fifo_gpfifo_engine_fini,
+};
+
+static int
+gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	union {
+		struct fermi_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf100_fifo_chan *chan;
+	u64 usermem, ioffset, ilength;
+	int ret, i;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "ioffset %016llx ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.ilength);
+	} else
+		return ret;
+
+	/* allocate channel */
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->fifo = fifo;
+	INIT_LIST_HEAD(&chan->head);
+
+	ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
+				  0x1000, 0x1000, true, args->v0.vm, 0,
+				  (1ULL << NVKM_ENGINE_CE0) |
+				  (1ULL << NVKM_ENGINE_CE1) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MSPDEC) |
+				  (1ULL << NVKM_ENGINE_MSPPP) |
+				  (1ULL << NVKM_ENGINE_MSVLD) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  1, fifo->user.bar.offset, 0x1000,
+				  oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	/* page directory */
+	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+	nvkm_done(chan->base.inst);
+
+	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	if (ret)
+		return ret;
+
+	/* clear channel control registers */
+
+	usermem = chan->base.chid * 0x1000;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(fifo->user.mem);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+	nvkm_done(fifo->user.mem);
+	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+	/* RAMFC */
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+	nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+	nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+	nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+					 (ilength << 16));
+	nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
+	nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+	nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
+	nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
+	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+	nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+	nvkm_done(chan->base.inst);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gf100_fifo_gpfifo_oclass = {
+	.base.oclass = FERMI_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gf100_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
new file mode 100644
index 0000000..0b81754
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
+{
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_client *client = chan->base.object.client;
+
+	nvkm_wr32(device, 0x002634, chan->base.chid);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+			   chan->base.chid, client->name);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static u32
+gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_SW    :
+	case NVKM_ENGINE_CE0   :
+	case NVKM_ENGINE_CE1   :
+	case NVKM_ENGINE_CE2   : return 0x0000;
+	case NVKM_ENGINE_GR    : return 0x0210;
+	case NVKM_ENGINE_MSPDEC: return 0x0250;
+	case NVKM_ENGINE_MSPPP : return 0x0260;
+	case NVKM_ENGINE_MSVLD : return 0x0270;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine, bool suspend)
+{
+	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	int ret;
+
+	ret = gk104_fifo_gpfifo_kick(chan);
+	if (ret && suspend)
+		return ret;
+
+	if (offset) {
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, 0x00000000);
+		nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		nvkm_done(inst);
+	}
+
+	return ret;
+}
+
+static int
+gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+
+	if (offset) {
+		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_done(inst);
+	}
+
+	return 0;
+}
+
+static void
+gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine,
+			      struct nvkm_object *object)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	int engn = engine->subdev.index;
+	int ret;
+
+	if (!gk104_fifo_gpfifo_engine_addr(engine))
+		return 0;
+
+	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	if (ret)
+		return ret;
+
+	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 coff = chan->base.chid * 8;
+
+	if (!list_empty(&chan->head)) {
+		list_del_init(&chan->head);
+		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
+		gk104_fifo_runlist_update(fifo, chan->engine);
+	}
+
+	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
+}
+
+static void
+gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 addr = chan->base.inst->addr >> 12;
+	u32 coff = chan->base.chid * 8;
+
+	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
+	nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
+
+	if (list_empty(&chan->head) && !chan->killed) {
+		list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
+		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+		gk104_fifo_runlist_update(fifo, chan->engine);
+		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+	}
+}
+
+static void *
+gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_gpuobj_del(&chan->pgd);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gk104_fifo_gpfifo_func = {
+	.dtor = gk104_fifo_gpfifo_dtor,
+	.init = gk104_fifo_gpfifo_init,
+	.fini = gk104_fifo_gpfifo_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+	.engine_init = gk104_fifo_gpfifo_engine_init,
+	.engine_fini = gk104_fifo_gpfifo_engine_fini,
+};
+
+int
+gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	union {
+		struct kepler_channel_gpfifo_a_v0 v0;
+	} *args = data;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_object *parent = oclass->parent;
+	struct gk104_fifo_chan *chan;
+	u64 usermem, ioffset, ilength;
+	u32 engines;
+	int ret, i;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "ioffset %016llx ilength %08x engine %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.ilength, args->v0.engine);
+	} else
+		return ret;
+
+	/* determine which downstream engines are present */
+	for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		u64 subdevs = gk104_fifo_engine_subdev(i);
+		if (!nvkm_device_engine(device, __ffs64(subdevs)))
+			continue;
+		engines |= (1 << i);
+	}
+
+	/* if this is an engine mask query, we're done */
+	if (!args->v0.engine) {
+		args->v0.engine = engines;
+		return nvkm_object_new(oclass, NULL, 0, pobject);
+	}
+
+	/* check that we support a requested engine - note that the user
+	 * argument is a mask in order to allow the user to request (for
+	 * example) *any* copy engine, but doesn't matter which.
+	 */
+	args->v0.engine &= engines;
+	if (!args->v0.engine) {
+		nvif_ioctl(parent, "no supported engine\n");
+		return -ENODEV;
+	}
+
+	/* allocate the channel */
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->fifo = fifo;
+	chan->engine = __ffs(args->v0.engine);
+	INIT_LIST_HEAD(&chan->head);
+
+	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
+				  0x1000, 0x1000, true, args->v0.vm, 0,
+				  gk104_fifo_engine_subdev(chan->engine),
+				  1, fifo->user.bar.offset, 0x200,
+				  oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	/* page directory */
+	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+	nvkm_done(chan->base.inst);
+
+	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	if (ret)
+		return ret;
+
+	/* clear channel control registers */
+	usermem = chan->base.chid * 0x200;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(fifo->user.mem);
+	for (i = 0; i < 0x200; i += 4)
+		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+	nvkm_done(fifo->user.mem);
+	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+	/* RAMFC */
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+	nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+	nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+	nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+					 (ilength << 16));
+	nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+	nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
+	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+	nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+	nvkm_done(chan->base.inst);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gk104_fifo_gpfifo_oclass = {
+	.base.oclass = KEPLER_CHANNEL_GPFIFO_A,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
new file mode 100644
index 0000000..6511d6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gm204_fifo_gpfifo_oclass = {
+	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
new file mode 100644
index 0000000..a8c69f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		     void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "pushbuf %llx ioffset %016llx "
+				   "ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.ioffset, args->v0.ilength);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				  oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+	nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_gpfifo_oclass = {
+	.base.oclass = NV50_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 043e429..ad707ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -22,20 +22,17 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
-#include <core/handle.h>
 #include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 #include <subdev/timer.h>
+#include <engine/sw.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv04_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv04_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@@ -47,268 +44,19 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-int
-nv04_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	u32 context, chid = chan->base.chid;
-	int ret;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->addr >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW:
-		context |= 0x00000000;
-		break;
-	case NVDEV_ENGINE_GR:
-		context |= 0x00010000;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		context |= 0x00020000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	context |= 0x80000000; /* valid */
-	context |= chid << 24;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
 void
-nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
+nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
+__acquires(fifo->base.lock)
 {
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nvkm_ramht_remove(priv->ramht, cookie);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-int
-nv04_fifo_context_attach(struct nvkm_object *parent,
-			 struct nvkm_object *object)
-{
-	nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
-	return 0;
-}
-
-static int
-nv04_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 32;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x10,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
-}
-
-void
-nv04_fifo_chan_dtor(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	struct ramfc_desc *c = priv->ramfc_desc;
-
-	do {
-		nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
-	} while ((++c)->bits);
-
-	nvkm_fifo_channel_destroy(&chan->base);
-}
-
-int
-nv04_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	u32 mask = 1 << chan->base.chid;
-	unsigned long flags;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-int
-nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *fctx = priv->ramfc;
-	struct ramfc_desc *c;
-	unsigned long flags;
-	u32 data = chan->ramfc;
-	u32 chid;
-
-	/* prevent fifo context switches */
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
-
-	/* if this channel is active, replace it with a null context */
-	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
-	if (chid == chan->base.chid) {
-		nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
-		nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
-
-		c = priv->ramfc_desc;
-		do {
-			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
-			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
-			u32 rv = (nv_rd32(priv, c->regp) &  rm) >> c->regs;
-			u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
-			nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
-		} while ((++c)->bits);
-
-		c = priv->ramfc_desc;
-		do {
-			nv_wr32(priv, c->regp, 0x00000000);
-		} while ((++c)->bits);
-
-		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	}
-
-	/* restore normal operation, after disabling dma mode */
-	nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv04_fifo_ofuncs = {
-	.ctor = nv04_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv04_fifo_sclass[] = {
-	{ NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-int
-nv04_fifo_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	struct nv04_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-nv04_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-void
-nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__acquires(priv->base.lock)
-{
-	struct nv04_fifo_priv *priv = (void *)pfifo;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
 	*pflags = flags;
 
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
-	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
+	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
 
 	/* in some cases the puller may be left in an inconsistent state
 	 * if you try to stop it while it's busy translating handles.
@@ -319,28 +67,31 @@
 	 * to avoid this, we invalidate the most recently calculated
 	 * instance.
 	 */
-	if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
-			   NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
-		nv_warn(priv, "timeout idling puller\n");
+	nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
+		if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
+			break;
+	);
 
-	if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+	if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
 			  NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
-		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
 }
 
 void
-nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__releases(priv->base.lock)
+nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
+__releases(fifo->base.lock)
 {
-	struct nv04_fifo_priv *priv = (void *)pfifo;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	unsigned long flags = *pflags;
 
-	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
 
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static const char *
@@ -354,61 +105,40 @@
 }
 
 static bool
-nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
+nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
 {
-	struct nv04_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	const int subc = (addr >> 13) & 0x7;
-	const int mthd = addr & 0x1ffc;
+	struct nvkm_sw *sw = device->sw;
+	const int subc = (addr & 0x0000e000) >> 13;
+	const int mthd = (addr & 0x00001ffc);
+	const u32 mask = 0x0000000f << (subc * 4);
+	u32 engine = nvkm_rd32(device, 0x003280);
 	bool handled = false;
-	unsigned long flags;
-	u32 engine;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
 
 	switch (mthd) {
-	case 0x0000:
-		bind = nvkm_namedb_get(nv_namedb(chan), data);
-		if (unlikely(!bind))
-			break;
-
-		if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
-			engine = 0x0000000f << (subc * 4);
-			chan->subc[subc] = data;
-			handled = true;
-
-			nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
-		}
-
-		nvkm_namedb_put(bind);
+	case 0x0000 ... 0x0000: /* subchannel's engine -> software */
+		nvkm_wr32(device, 0x003280, (engine &= ~mask));
+	case 0x0180 ... 0x01fc: /* handle -> instance */
+		data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+	case 0x0100 ... 0x017c:
+	case 0x0200 ... 0x1ffc: /* pass method down to sw */
+		if (!(engine & mask) && sw)
+			handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
 		break;
 	default:
-		engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
-		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
-			break;
-
-		bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
-		if (likely(bind)) {
-			if (!nv_call(bind->object, mthd, data))
-				handled = true;
-			nvkm_namedb_put(bind);
-		}
 		break;
 	}
 
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
 	return handled;
 }
 
 static void
-nv04_fifo_cache_error(struct nvkm_device *device,
-		      struct nv04_fifo_priv *priv, u32 chid, u32 get)
+nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u32 pull0 = nvkm_rd32(device, 0x003250);
 	u32 mthd, data;
 	int ptr;
 
@@ -420,216 +150,214 @@
 	ptr = (get & 0x7ff) >> 2;
 
 	if (device->card_type < NV_40) {
-		mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
-		data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+		mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
+		data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
 	} else {
-		mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
-		data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+		mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
+		data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
 	}
 
-	if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
-		const char *client_name =
-			nvkm_client_name_for_fifo_chid(&priv->base, chid);
-		nv_error(priv,
-			 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
-			 data);
+	if (!(pull0 & 0x00000100) ||
+	    !nv04_fifo_swmthd(device, chid, mthd, data)) {
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "CACHE_ERROR - "
+			   "ch %d [%s] subc %d mthd %04x data %08x\n",
+			   chid, chan ? chan->object.client->name : "unknown",
+			   (mthd >> 13) & 7, mthd & 0x1ffc, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-	nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
-		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
-	nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
-		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
-		nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
+		nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
 }
 
 static void
-nv04_fifo_dma_pusher(struct nvkm_device *device,
-		     struct nv04_fifo_priv *priv, u32 chid)
+nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
 {
-	const char *client_name;
-	u32 dma_get = nv_rd32(priv, 0x003244);
-	u32 dma_put = nv_rd32(priv, 0x003240);
-	u32 push = nv_rd32(priv, 0x003220);
-	u32 state = nv_rd32(priv, 0x003228);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 dma_get = nvkm_rd32(device, 0x003244);
+	u32 dma_put = nvkm_rd32(device, 0x003240);
+	u32 push = nvkm_rd32(device, 0x003220);
+	u32 state = nvkm_rd32(device, 0x003228);
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	const char *name;
 
-	client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid);
-
+	chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+	name = chan ? chan->object.client->name : "unknown";
 	if (device->card_type == NV_50) {
-		u32 ho_get = nv_rd32(priv, 0x003328);
-		u32 ho_put = nv_rd32(priv, 0x003320);
-		u32 ib_get = nv_rd32(priv, 0x003334);
-		u32 ib_put = nv_rd32(priv, 0x003330);
+		u32 ho_get = nvkm_rd32(device, 0x003328);
+		u32 ho_put = nvkm_rd32(device, 0x003320);
+		u32 ib_get = nvkm_rd32(device, 0x003334);
+		u32 ib_put = nvkm_rd32(device, 0x003330);
 
-		nv_error(priv,
-			 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
-			 chid, client_name, ho_get, dma_get, ho_put, dma_put,
-			 ib_get, ib_put, state, nv_dma_state_err(state), push);
+		nvkm_error(subdev, "DMA_PUSHER - "
+			   "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
+			   "ib_put %08x state %08x (err: %s) push %08x\n",
+			   chid, name, ho_get, dma_get, ho_put, dma_put,
+			   ib_get, ib_put, state, nv_dma_state_err(state),
+			   push);
 
 		/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
-		nv_wr32(priv, 0x003364, 0x00000000);
+		nvkm_wr32(device, 0x003364, 0x00000000);
 		if (dma_get != dma_put || ho_get != ho_put) {
-			nv_wr32(priv, 0x003244, dma_put);
-			nv_wr32(priv, 0x003328, ho_put);
+			nvkm_wr32(device, 0x003244, dma_put);
+			nvkm_wr32(device, 0x003328, ho_put);
 		} else
 		if (ib_get != ib_put)
-			nv_wr32(priv, 0x003334, ib_put);
+			nvkm_wr32(device, 0x003334, ib_put);
 	} else {
-		nv_error(priv,
-			 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
-			 chid, client_name, dma_get, dma_put, state,
-			 nv_dma_state_err(state), push);
+		nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
+				   "state %08x (err: %s) push %08x\n",
+			   chid, name, dma_get, dma_put, state,
+			   nv_dma_state_err(state), push);
 
 		if (dma_get != dma_put)
-			nv_wr32(priv, 0x003244, dma_put);
+			nvkm_wr32(device, 0x003244, dma_put);
 	}
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 
-	nv_wr32(priv, 0x003228, 0x00000000);
-	nv_wr32(priv, 0x003220, 0x00000001);
-	nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+	nvkm_wr32(device, 0x003228, 0x00000000);
+	nvkm_wr32(device, 0x003220, 0x00000001);
+	nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
 }
 
 void
-nv04_fifo_intr(struct nvkm_subdev *subdev)
+nv04_fifo_intr(struct nvkm_fifo *base)
 {
-	struct nvkm_device *device = nv_device(subdev);
-	struct nv04_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
-	u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
+	u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
 	u32 reassign, chid, get, sem;
 
-	reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+	reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
 
-	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
-	get  = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
+	get  = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
 
 	if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
-		nv04_fifo_cache_error(device, priv, chid, get);
+		nv04_fifo_cache_error(fifo, chid, get);
 		stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
 	}
 
 	if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
-		nv04_fifo_dma_pusher(device, priv, chid);
+		nv04_fifo_dma_pusher(fifo, chid);
 		stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
 	}
 
 	if (stat & NV_PFIFO_INTR_SEMAPHORE) {
 		stat &= ~NV_PFIFO_INTR_SEMAPHORE;
-		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
 
-		sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
-		nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+		sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
+		nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
 
-		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
-		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
 	}
 
 	if (device->card_type == NV_50) {
 		if (stat & 0x00000010) {
 			stat &= ~0x00000010;
-			nv_wr32(priv, 0x002100, 0x00000010);
+			nvkm_wr32(device, 0x002100, 0x00000010);
 		}
 
 		if (stat & 0x40000000) {
-			nv_wr32(priv, 0x002100, 0x40000000);
-			nvkm_fifo_uevent(&priv->base);
+			nvkm_wr32(device, 0x002100, 0x40000000);
+			nvkm_fifo_uevent(&fifo->base);
 			stat &= ~0x40000000;
 		}
 	}
 
 	if (stat) {
-		nv_warn(priv, "unknown intr 0x%08x\n", stat);
-		nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
-		nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
+		nvkm_warn(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
 	}
 
-	nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
-}
-
-static int
-nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv04_fifo_cclass;
-	nv_engine(priv)->sclass = nv04_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv04_ramfc;
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
 }
 
 void
-nv04_fifo_dtor(struct nvkm_object *object)
+nv04_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv04_fifo_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ramfc);
-	nvkm_gpuobj_ref(NULL, &priv->ramro);
-	nvkm_ramht_ref(NULL, &priv->ramht);
-	nvkm_fifo_destroy(&priv->base);
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
+
+	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
 int
-nv04_fifo_init(struct nvkm_object *object)
+nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, int nr, const struct nv04_fifo_ramfc *ramfc,
+	       struct nvkm_fifo **pfifo)
 {
-	struct nv04_fifo_priv *priv = (void *)object;
+	struct nv04_fifo *fifo;
 	int ret;
 
-	ret = nvkm_fifo_init(&priv->base);
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	fifo->ramfc = ramfc;
+	*pfifo = &fifo->base;
+
+	ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	set_bit(nr - 1, fifo->base.mask); /* inactive channel */
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv04_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv04_fifo = {
+	.init = nv04_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv04_fifo_dma_oclass,
+		NULL
 	},
 };
+
+int
+nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv04_fifo_new_(&nv04_fifo, device, index, 16,
+			      nv04_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e0e0c47..03f6000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,137 +1,9 @@
 #ifndef __NV04_FIFO_H__
 #define __NV04_FIFO_H__
-#include <engine/fifo.h>
+#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
+#include "priv.h"
 
-#define NV04_PFIFO_DELAY_0                                 0x00002040
-#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
-#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
-#define NV03_PFIFO_INTR_0                                  0x00002100
-#define NV03_PFIFO_INTR_EN_0                               0x00002140
-#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
-#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
-#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
-#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
-#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
-#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
-#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
-#define NV03_PFIFO_RAMHT                                   0x00002210
-#define NV03_PFIFO_RAMFC                                   0x00002214
-#define NV03_PFIFO_RAMRO                                   0x00002218
-#define NV40_PFIFO_RAMFC                                   0x00002220
-#define NV03_PFIFO_CACHES                                  0x00002500
-#define NV04_PFIFO_MODE                                    0x00002504
-#define NV04_PFIFO_DMA                                     0x00002508
-#define NV04_PFIFO_SIZE                                    0x0000250c
-#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
-#define NV50_PFIFO_CTX_TABLE__SIZE                                128
-#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
-#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
-#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
-#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
-#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
-#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
-#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
-#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
-#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
-#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
-#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
-#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
-#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
-#define NV03_PFIFO_CACHE1_PUT                              0x00003210
-#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
-#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
-#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
-#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
-#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
-#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
-#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
-#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
-#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
-#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
-#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
-#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
-#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
-#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
-#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
-#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
-#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
-#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
-#define NV04_PFIFO_CACHE1_HASH                             0x00003258
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
-#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
-#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
-#define NV03_PFIFO_CACHE1_GET                              0x00003270
-#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
-#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
-#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
-#define NV40_PFIFO_UNK32E4                                 0x000032E4
-#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
-#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
-#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
-#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
-
-struct ramfc_desc {
+struct nv04_fifo_ramfc {
 	unsigned bits:6;
 	unsigned ctxs:5;
 	unsigned ctxp:8;
@@ -139,37 +11,13 @@
 	unsigned regp;
 };
 
-struct nv04_fifo_priv {
+struct nv04_fifo {
 	struct nvkm_fifo base;
-	struct ramfc_desc *ramfc_desc;
-	struct nvkm_ramht  *ramht;
-	struct nvkm_gpuobj *ramro;
-	struct nvkm_gpuobj *ramfc;
+	const struct nv04_fifo_ramfc *ramfc;
 };
 
-struct nv04_fifo_base {
-	struct nvkm_fifo_base base;
-};
-
-struct nv04_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 subc[8];
-	u32 ramfc;
-};
-
-int  nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
-void nv04_fifo_object_detach(struct nvkm_object *, int);
-
-void nv04_fifo_chan_dtor(struct nvkm_object *);
-int  nv04_fifo_chan_init(struct nvkm_object *);
-int  nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
-
-int  nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, void *, u32,
-			    struct nvkm_object **);
-
-void nv04_fifo_dtor(struct nvkm_object *);
-int  nv04_fifo_init(struct nvkm_object *);
-void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
-void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, int nr, const struct nv04_fifo_ramfc *,
+		   struct nvkm_fifo **);
+void nv04_fifo_init(struct nvkm_fifo *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index 48ce4af..f9a87de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -22,17 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv10_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv10_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -45,134 +39,21 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv10_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 32;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nv10_fifo_ofuncs = {
-	.ctor = nv10_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv10_fifo_sclass[] = {
-	{ NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv10_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv10_fifo = {
+	.init = nv04_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv10_fifo_dma_oclass,
+		NULL
 	},
 };
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv10_fifo_cclass;
-	nv_engine(priv)->sclass = nv10_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv10_ramfc;
-	return 0;
+	return nv04_fifo_new_(&nv10_fifo, device, index, 32,
+			      nv10_fifo_ramfc, pfifo);
 }
-
-struct nvkm_oclass *
-nv10_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv04_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index 4a20a6f..f6d383a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -22,17 +22,14 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
 #include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv17_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv17_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -50,166 +47,51 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv17_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+static void
+nv17_fifo_init(struct nvkm_fifo *base)
 {
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
 
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
+	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
 
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
-				       &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
+					    0x00010000);
 
-	args->v0.chid = chan->base.chid;
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
 
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 64;
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
 
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
-static struct nvkm_ofuncs
-nv17_fifo_ofuncs = {
-	.ctor = nv17_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv17_fifo_sclass[] = {
-	{ NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv17_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x17),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv17_fifo = {
+	.init = nv17_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv17_fifo_dma_oclass,
+		NULL
 	},
 };
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv17_fifo_cclass;
-	nv_engine(priv)->sclass = nv17_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv17_ramfc;
-	return 0;
+	return nv04_fifo_new_(&nv17_fifo, device, index, 32,
+			      nv17_fifo_ramfc, pfifo);
 }
-
-static int
-nv17_fifo_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	return 0;
-}
-
-struct nvkm_oclass *
-nv17_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x17),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv17_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv17_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 5bfc962..8c7ba32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -22,19 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/ramht.h>
 #include <subdev/fb.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv40_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv40_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -60,297 +56,72 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv40_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
+static void
+nv40_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	u32 context, chid = chan->base.chid;
-	int ret;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
 
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->addr >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
+	nvkm_wr32(device, 0x002040, 0x000000ff);
+	nvkm_wr32(device, 0x002044, 0x2101ffff);
+	nvkm_wr32(device, 0x002058, 0x00000001);
 
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW:
-		context |= 0x00000000;
-		break;
-	case NVDEV_ENGINE_GR:
-		context |= 0x00100000;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		context |= 0x00200000;
-		break;
-	default:
-		return -EINVAL;
-	}
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
 
-	context |= chid << 23;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
-static int
-nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	unsigned long flags;
-	u32 reg, ctx;
-
-	switch (nv_engidx(engctx->engine)) {
-	case NVDEV_ENGINE_SW:
-		return 0;
-	case NVDEV_ENGINE_GR:
-		reg = 0x32e0;
-		ctx = 0x38;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		reg = 0x330c;
-		ctx = 0x54;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
-	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
-		nv_wr32(priv, reg, nv_engctx(engctx)->addr);
-	nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
-
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-static int
-nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			 struct nvkm_object *engctx)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	unsigned long flags;
-	u32 reg, ctx;
-
-	switch (nv_engidx(engctx->engine)) {
-	case NVDEV_ENGINE_SW:
-		return 0;
-	case NVDEV_ENGINE_GR:
-		reg = 0x32e0;
-		ctx = 0x38;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		reg = 0x330c;
-		ctx = 0x54;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
-	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
-		nv_wr32(priv, reg, 0x00000000);
-	nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
-
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-static int
-nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x1000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv40_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv40_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv40_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	chan->ramfc = chan->base.chid * 128;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nv40_fifo_ofuncs = {
-	.ctor = nv40_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv40_fifo_sclass[] = {
-	{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv40_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv40_fifo_cclass;
-	nv_engine(priv)->sclass = nv40_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv40_ramfc;
-	return 0;
-}
-
-static int
-nv40_fifo_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002040, 0x000000ff);
-	nv_wr32(priv, 0x002044, 0x2101ffff);
-	nv_wr32(priv, 0x002058, 0x00000001);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-		nv_wr32(priv, 0x002230, 0x00000001);
+		nvkm_wr32(device, 0x002230, 0x00000001);
 	case 0x40:
 	case 0x41:
 	case 0x42:
 	case 0x43:
 	case 0x45:
 	case 0x48:
-		nv_wr32(priv, 0x002220, 0x00030002);
+		nvkm_wr32(device, 0x002220, 0x00030002);
 		break;
 	default:
-		nv_wr32(priv, 0x002230, 0x00000000);
-		nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 +
-					 priv->ramfc->addr) >> 16) |
-					0x00030000);
+		nvkm_wr32(device, 0x002230, 0x00000000);
+		nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
+					      nvkm_memory_addr(ramfc)) >> 16) |
+					    0x00030000);
 		break;
 	}
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
 
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
-struct nvkm_oclass *
-nv40_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv40_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv40_fifo = {
+	.init = nv40_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv40_fifo_dma_oclass,
+		NULL
 	},
 };
+
+int
+nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv04_fifo_new_(&nv40_fifo, device, index, 32,
+			      nv40_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index f25f0fd..66eb12c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -22,513 +22,126 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "nv04.h"
+#include "channv50.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
+#include <core/gpuobj.h>
 
 static void
-nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
+nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *cur;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_memory *cur;
 	int i, p;
 
-	cur = priv->playlist[priv->cur_playlist];
-	priv->cur_playlist = !priv->cur_playlist;
+	cur = fifo->runlist[fifo->cur_runlist];
+	fifo->cur_runlist = !fifo->cur_runlist;
 
-	for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
-		if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
-			nv_wo32(cur, p++ * 4, i);
+	nvkm_kmap(cur);
+	for (i = 0, p = 0; i < fifo->base.nr; i++) {
+		if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
+			nvkm_wo32(cur, p++ * 4, i);
 	}
+	nvkm_done(cur);
 
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x0032f4, cur->addr >> 12);
-	nv_wr32(priv, 0x0032ec, p);
-	nv_wr32(priv, 0x002500, 0x00000101);
+	nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x0032ec, p);
+	nvkm_wr32(device, 0x002500, 0x00000101);
 }
 
 void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+nv50_fifo_runlist_update(struct nv50_fifo *fifo)
 {
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nv50_fifo_playlist_update_locked(priv);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-static int
-nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nvkm_gpuobj *ectx = (void *)object;
-	u64 limit = ectx->addr + ectx->size - 1;
-	u64 start = ectx->addr;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   : return 0;
-	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
-	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	nv_wo32(base->eng, addr + 0x00, 0x00190000);
-	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
-	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
-	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
-					upper_32_bits(start));
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			 struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_priv *priv = (void *)parent->engine;
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 addr, me;
-	int ret = 0;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   : return 0;
-	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
-	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
-	default:
-		return -EINVAL;
-	}
-
-	/* HW bug workaround:
-	 *
-	 * PFIFO will hang forever if the connected engines don't report
-	 * that they've processed the context switch request.
-	 *
-	 * In order for the kickoff to work, we need to ensure all the
-	 * connected engines are in a state where they can answer.
-	 *
-	 * Newer chipsets don't seem to suffer from this issue, and well,
-	 * there's also a "ignore these engines" bitmask reg we can use
-	 * if we hit the issue there..
-	 */
-	me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
-
-	/* do the kickoff... */
-	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
-	if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
-		nv_error(priv, "channel %d [%s] unload timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			ret = -EBUSY;
-	}
-	nv_wr32(priv, 0x00b860, me);
-
-	if (ret == 0) {
-		nv_wo32(base->eng, addr + 0x00, 0x00000000);
-		nv_wo32(base->eng, addr + 0x04, 0x00000000);
-		nv_wo32(base->eng, addr + 0x08, 0x00000000);
-		nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-		nv_wo32(base->eng, addr + 0x10, 0x00000000);
-		nv_wo32(base->eng, addr + 0x14, 0x00000000);
-		bar->flush(bar);
-	}
-
-	return ret;
-}
-
-static int
-nv50_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 context;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->node->offset >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
-	default:
-		return -EINVAL;
-	}
-
-	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
-}
-
-void
-nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	nvkm_ramht_remove(chan->ramht, cookie);
-}
-
-static int
-nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	u64 ioffset, ilength;
-	int ret;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
-	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	bar->flush(bar);
-	return 0;
-}
-
-void
-nv50_fifo_chan_dtor(struct nvkm_object *object)
-{
-	struct nv50_fifo_chan *chan = (void *)object;
-	nvkm_ramht_ref(NULL, &chan->ramht);
-	nvkm_fifo_channel_destroy(&chan->base);
-}
-
-static int
-nv50_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_base *base = (void *)object->parent;
-	struct nv50_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *ramfc = base->ramfc;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
-	nv50_fifo_playlist_update(priv);
-	return 0;
+	mutex_lock(&fifo->base.engine.subdev.mutex);
+	nv50_fifo_runlist_update_locked(fifo);
+	mutex_unlock(&fifo->base.engine.subdev.mutex);
 }
 
 int
-nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+nv50_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	/* remove channel from playlist, fifo will unload context */
-	nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
-	nv50_fifo_playlist_update(priv);
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
-
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_dma = {
-	.ctor = nv50_fifo_chan_ctor_dma,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = nv50_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_ind = {
-	.ctor = nv50_fifo_chan_ctor_ind,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = nv50_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv50_fifo_sclass[] = {
-	{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
-	{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	struct nv50_fifo_base *base;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+			      false, &fifo->runlist[0]);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
+	return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+			       false, &fifo->runlist[1]);
 }
 
 void
-nv50_fifo_context_dtor(struct nvkm_object *object)
+nv50_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv50_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->eng);
-	nvkm_gpuobj_ref(NULL, &base->ramfc);
-	nvkm_gpuobj_ref(NULL, &base->cache);
-	nvkm_fifo_context_destroy(&base->base);
-}
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int i;
 
-static struct nvkm_oclass
-nv50_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fifo_context_ctor,
-		.dtor = nv50_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
+	nvkm_wr32(device, 0x002044, 0x01003fff);
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[0]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[1]);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv50_fifo_cclass;
-	nv_engine(priv)->sclass = nv50_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	return 0;
-}
-
-void
-nv50_fifo_dtor(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object;
-
-	nvkm_gpuobj_ref(NULL, &priv->playlist[1]);
-	nvkm_gpuobj_ref(NULL, &priv->playlist[0]);
-
-	nvkm_fifo_destroy(&priv->base);
-}
-
-int
-nv50_fifo_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x00250c, 0x6f3cfc34);
-	nv_wr32(priv, 0x002044, 0x01003fff);
-
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0xbfffffff);
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0xbfffffff);
 
 	for (i = 0; i < 128; i++)
-		nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
-	nv50_fifo_playlist_update_locked(priv);
+		nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
+	nv50_fifo_runlist_update_locked(fifo);
 
-	nv_wr32(priv, 0x003200, 0x00000001);
-	nv_wr32(priv, 0x003250, 0x00000001);
-	nv_wr32(priv, 0x002500, 0x00000001);
+	nvkm_wr32(device, 0x003200, 0x00000001);
+	nvkm_wr32(device, 0x003250, 0x00000001);
+	nvkm_wr32(device, 0x002500, 0x00000001);
+}
+
+void *
+nv50_fifo_dtor(struct nvkm_fifo *base)
+{
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	nvkm_memory_del(&fifo->runlist[1]);
+	nvkm_memory_del(&fifo->runlist[0]);
+	return fifo;
+}
+
+int
+nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_fifo **pfifo)
+{
+	struct nv50_fifo *fifo;
+	int ret;
+
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	*pfifo = &fifo->base;
+
+	ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
+	if (ret)
+		return ret;
+
+	set_bit(0, fifo->base.mask); /* PIO channel */
+	set_bit(127, fifo->base.mask); /* inactive channel */
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fifo_ctor,
-		.dtor = nv50_fifo_dtor,
-		.init = nv50_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv50_fifo = {
+	.dtor = nv50_fifo_dtor,
+	.oneinit = nv50_fifo_oneinit,
+	.init = nv50_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv50_fifo_dma_oclass,
+		&nv50_fifo_gpfifo_oclass,
+		NULL
 	},
 };
+
+int
+nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 09ed93c..8ab5394 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,36 +1,19 @@
 #ifndef __NV50_FIFO_H__
 #define __NV50_FIFO_H__
-#include <engine/fifo.h>
+#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
+#include "priv.h"
 
-struct nv50_fifo_priv {
+struct nv50_fifo {
 	struct nvkm_fifo base;
-	struct nvkm_gpuobj *playlist[2];
-	int cur_playlist;
+	struct nvkm_memory *runlist[2];
+	int cur_runlist;
 };
 
-struct nv50_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *ramfc;
-	struct nvkm_gpuobj *cache;
-	struct nvkm_gpuobj *eng;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, struct nvkm_fifo **);
 
-struct nv50_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 subc[8];
-	struct nvkm_ramht *ramht;
-};
-
-void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
-
-void nv50_fifo_object_detach(struct nvkm_object *, int);
-void nv50_fifo_chan_dtor(struct nvkm_object *);
-int  nv50_fifo_chan_fini(struct nvkm_object *, bool);
-
-void nv50_fifo_context_dtor(struct nvkm_object *);
-
-void nv50_fifo_dtor(struct nvkm_object *);
-int  nv50_fifo_init(struct nvkm_object *);
+void *nv50_fifo_dtor(struct nvkm_fifo *);
+int nv50_fifo_oneinit(struct nvkm_fifo *);
+void nv50_fifo_init(struct nvkm_fifo *);
+void nv50_fifo_runlist_update(struct nv50_fifo *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
new file mode 100644
index 0000000..cb1432e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_FIFO_PRIV_H__
+#define __NVKM_FIFO_PRIV_H__
+#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
+#include <engine/fifo.h>
+
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, int nr, struct nvkm_fifo *);
+void nvkm_fifo_uevent(struct nvkm_fifo *);
+
+struct nvkm_fifo_func {
+	void *(*dtor)(struct nvkm_fifo *);
+	int (*oneinit)(struct nvkm_fifo *);
+	void (*init)(struct nvkm_fifo *);
+	void (*fini)(struct nvkm_fifo *);
+	void (*intr)(struct nvkm_fifo *);
+	void (*pause)(struct nvkm_fifo *, unsigned long *);
+	void (*start)(struct nvkm_fifo *, unsigned long *);
+	void (*uevent_init)(struct nvkm_fifo *);
+	void (*uevent_fini)(struct nvkm_fifo *);
+	const struct nvkm_fifo_chan_oclass *chan[];
+};
+
+void nv04_fifo_intr(struct nvkm_fifo *);
+void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
new file mode 100644
index 0000000..92d5622
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -0,0 +1,132 @@
+#ifndef __NV04_FIFO_REGS_H__
+#define __NV04_FIFO_REGS_H__
+
+#define NV04_PFIFO_DELAY_0                                 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
+#define NV03_PFIFO_INTR_0                                  0x00002100
+#define NV03_PFIFO_INTR_EN_0                               0x00002140
+#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
+#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
+#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
+#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
+#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
+#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
+#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
+#define NV03_PFIFO_RAMHT                                   0x00002210
+#define NV03_PFIFO_RAMFC                                   0x00002214
+#define NV03_PFIFO_RAMRO                                   0x00002218
+#define NV40_PFIFO_RAMFC                                   0x00002220
+#define NV03_PFIFO_CACHES                                  0x00002500
+#define NV04_PFIFO_MODE                                    0x00002504
+#define NV04_PFIFO_DMA                                     0x00002508
+#define NV04_PFIFO_SIZE                                    0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE                                128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
+#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
+#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
+#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
+#define NV03_PFIFO_CACHE1_PUT                              0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
+#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
+#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
+#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
+#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
+#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
+#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
+#define NV04_PFIFO_CACHE1_HASH                             0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
+#define NV03_PFIFO_CACHE1_GET                              0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
+#define NV40_PFIFO_UNK32E4                                 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 2e1b92f..9ad0d0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -1,21 +1,8 @@
-nvkm-y += nvkm/engine/gr/ctxnv40.o
-nvkm-y += nvkm/engine/gr/ctxnv50.o
-nvkm-y += nvkm/engine/gr/ctxgf100.o
-nvkm-y += nvkm/engine/gr/ctxgf108.o
-nvkm-y += nvkm/engine/gr/ctxgf104.o
-nvkm-y += nvkm/engine/gr/ctxgf110.o
-nvkm-y += nvkm/engine/gr/ctxgf117.o
-nvkm-y += nvkm/engine/gr/ctxgf119.o
-nvkm-y += nvkm/engine/gr/ctxgk104.o
-nvkm-y += nvkm/engine/gr/ctxgk20a.o
-nvkm-y += nvkm/engine/gr/ctxgk110.o
-nvkm-y += nvkm/engine/gr/ctxgk110b.o
-nvkm-y += nvkm/engine/gr/ctxgk208.o
-nvkm-y += nvkm/engine/gr/ctxgm107.o
-nvkm-y += nvkm/engine/gr/ctxgm204.o
-nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/base.o
 nvkm-y += nvkm/engine/gr/nv04.o
 nvkm-y += nvkm/engine/gr/nv10.o
+nvkm-y += nvkm/engine/gr/nv15.o
+nvkm-y += nvkm/engine/gr/nv17.o
 nvkm-y += nvkm/engine/gr/nv20.o
 nvkm-y += nvkm/engine/gr/nv25.o
 nvkm-y += nvkm/engine/gr/nv2a.o
@@ -23,18 +10,43 @@
 nvkm-y += nvkm/engine/gr/nv34.o
 nvkm-y += nvkm/engine/gr/nv35.o
 nvkm-y += nvkm/engine/gr/nv40.o
+nvkm-y += nvkm/engine/gr/nv44.o
 nvkm-y += nvkm/engine/gr/nv50.o
+nvkm-y += nvkm/engine/gr/g84.o
+nvkm-y += nvkm/engine/gr/gt200.o
+nvkm-y += nvkm/engine/gr/mcp79.o
+nvkm-y += nvkm/engine/gr/gt215.o
+nvkm-y += nvkm/engine/gr/mcp89.o
 nvkm-y += nvkm/engine/gr/gf100.o
-nvkm-y += nvkm/engine/gr/gf108.o
 nvkm-y += nvkm/engine/gr/gf104.o
+nvkm-y += nvkm/engine/gr/gf108.o
 nvkm-y += nvkm/engine/gr/gf110.o
 nvkm-y += nvkm/engine/gr/gf117.o
 nvkm-y += nvkm/engine/gr/gf119.o
 nvkm-y += nvkm/engine/gr/gk104.o
-nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gk110.o
 nvkm-y += nvkm/engine/gr/gk110b.o
 nvkm-y += nvkm/engine/gr/gk208.o
+nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gm107.o
 nvkm-y += nvkm/engine/gr/gm204.o
 nvkm-y += nvkm/engine/gr/gm206.o
+nvkm-y += nvkm/engine/gr/gm20b.o
+
+nvkm-y += nvkm/engine/gr/ctxnv40.o
+nvkm-y += nvkm/engine/gr/ctxnv50.o
+nvkm-y += nvkm/engine/gr/ctxgf100.o
+nvkm-y += nvkm/engine/gr/ctxgf104.o
+nvkm-y += nvkm/engine/gr/ctxgf108.o
+nvkm-y += nvkm/engine/gr/ctxgf110.o
+nvkm-y += nvkm/engine/gr/ctxgf117.o
+nvkm-y += nvkm/engine/gr/ctxgf119.o
+nvkm-y += nvkm/engine/gr/ctxgk104.o
+nvkm-y += nvkm/engine/gr/ctxgk110.o
+nvkm-y += nvkm/engine/gr/ctxgk110b.o
+nvkm-y += nvkm/engine/gr/ctxgk208.o
+nvkm-y += nvkm/engine/gr/ctxgk20a.o
+nvkm-y += nvkm/engine/gr/ctxgm107.o
+nvkm-y += nvkm/engine/gr/ctxgm204.o
+nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/ctxgm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
new file mode 100644
index 0000000..090765f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <engine/fifo.h>
+
+static void
+nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->tile)
+		gr->func->tile(gr, region, tile);
+}
+
+u64
+nvkm_gr_units(struct nvkm_gr *gr)
+{
+	if (gr->func->units)
+		return gr->func->units(gr);
+	return 0;
+}
+
+int
+nvkm_gr_tlb_flush(struct nvkm_gr *gr)
+{
+	if (gr->func->tlb_flush)
+		return gr->func->tlb_flush(gr);
+	return -ENODEV;
+}
+
+static int
+nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+	int c = 0;
+
+	if (gr->func->object_get) {
+		int ret = gr->func->object_get(gr, index, &oclass->base);
+		if (oclass->base.oclass)
+			return index;
+		return ret;
+	}
+
+	while (gr->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = gr->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+	if (gr->func->chan_new)
+		return gr->func->chan_new(gr, chan, oclass, pobject);
+	return 0;
+}
+
+static void
+nvkm_gr_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	gr->func->intr(gr);
+}
+
+static int
+nvkm_gr_oneinit(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->oneinit)
+		return gr->func->oneinit(gr);
+	return 0;
+}
+
+static int
+nvkm_gr_init(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	return gr->func->init(gr);
+}
+
+static void *
+nvkm_gr_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->dtor)
+		return gr->func->dtor(gr);
+	return gr;
+}
+
+static const struct nvkm_engine_func
+nvkm_gr = {
+	.dtor = nvkm_gr_dtor,
+	.oneinit = nvkm_gr_oneinit,
+	.init = nvkm_gr_init,
+	.intr = nvkm_gr_intr,
+	.tile = nvkm_gr_tile,
+	.fifo.cclass = nvkm_gr_cclass_new,
+	.fifo.sclass = nvkm_gr_oclass_get,
+};
+
+int
+nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, u32 pmc_enable, bool enable, struct nvkm_gr *gr)
+{
+	gr->func = func;
+	return nvkm_engine_ctor(&nvkm_gr, device, index, pmc_enable,
+				enable, &gr->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 57e2c5b..56f392d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -23,7 +23,6 @@
  */
 #include "ctxgf100.h"
 
-#include <subdev/bar.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
 #include <subdev/timer.h>
@@ -1005,6 +1004,7 @@
 gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
 		      int shift, int buffer)
 {
+	struct nvkm_device *device = info->gr->base.engine.subdev.device;
 	if (info->data) {
 		if (shift >= 0) {
 			info->mmio->addr = addr;
@@ -1021,29 +1021,29 @@
 			return;
 	}
 
-	nv_wr32(info->priv, addr, data);
+	nvkm_wr32(device, addr, data);
 }
 
 void
 gf100_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
-	mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
 }
 
 void
 gf100_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1053,13 +1053,13 @@
 void
 gf100_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32 attrib = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32 attrib = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	int gpc, tpc;
 	u32 bo = 0;
 
@@ -1067,91 +1067,95 @@
 	mmio_refn(info, 0x419848, 0x10000000, s, b);
 	mmio_wr32(info, 0x405830, (attrib << 16));
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
 			mmio_skip(info, o, (attrib << 16) | ++bo);
 			mmio_wr32(info, o, (attrib << 16) | --bo);
-			bo += impl->attrib_nr_max;
+			bo += grctx->attrib_nr_max;
 		}
 	}
 }
 
 void
-gf100_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf100_grctx_generate_unkn(struct gf100_gr *gr)
 {
 }
 
 void
-gf100_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+gf100_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
 		}
 	}
 }
 
 void
-gf100_grctx_generate_r406028(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406028(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 tmp[GPC_MAX / 8] = {}, i = 0;
-	for (i = 0; i < priv->gpc_nr; i++)
-		tmp[i / 8] |= priv->tpc_nr[i] << ((i % 8) * 4);
+	for (i = 0; i < gr->gpc_nr; i++)
+		tmp[i / 8] |= gr->tpc_nr[i] << ((i % 8) * 4);
 	for (i = 0; i < 4; i++) {
-		nv_wr32(priv, 0x406028 + (i * 4), tmp[i]);
-		nv_wr32(priv, 0x405870 + (i * 4), tmp[i]);
+		nvkm_wr32(device, 0x406028 + (i * 4), tmp[i]);
+		nvkm_wr32(device, 0x405870 + (i * 4), tmp[i]);
 	}
 }
 
 void
-gf100_grctx_generate_r4060a8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u8  tpcnr[GPC_MAX], data[TPC_MAX];
 	int gpc, tpc, i;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 	memset(data, 0x1f, sizeof(data));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 		data[tpc] = gpc;
 	}
 
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+		nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
 }
 
 void
-gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
 	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
 	int gpc, tpc, i;
 
 	/* calculate first set of magics */
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 
@@ -1163,7 +1167,7 @@
 
 	/* and the second... */
 	shift = 0;
-	ntpcv = priv->tpc_total;
+	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
 		ntpcv <<= 1;
 		shift++;
@@ -1176,202 +1180,211 @@
 		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
 
 	/* GPC_BROADCAST */
-	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
-	nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr | data2[0]);
-	nv_wr32(priv, 0x419be4, data2[1]);
+	nvkm_wr32(device, 0x419bd0, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr | data2[0]);
+	nvkm_wr32(device, 0x419be4, data2[1]);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x419b00 + (i * 4), data[i]);
 
 	/* UNK78xx */
-	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
 void
-gf100_grctx_generate_r406800(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406800(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u64 tpc_mask = 0, tpc_set = 0;
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc;
 	int i, a, b;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++)
-		tpc_mask |= ((1ULL << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+		tpc_mask |= ((1ULL << gr->tpc_nr[gpc]) - 1) << (gpc * 8);
 
 	for (i = 0, gpc = -1, b = -1; i < 32; i++) {
-		a = (i * (priv->tpc_total - 1)) / 32;
+		a = (i * (gr->tpc_total - 1)) / 32;
 		if (a != b) {
 			b = a;
 			do {
-				gpc = (gpc + 1) % priv->gpc_nr;
+				gpc = (gpc + 1) % gr->gpc_nr;
 			} while (!tpcnr[gpc]);
-			tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+			tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 			tpc_set |= 1ULL << ((gpc * 8) + tpc);
 		}
 
-		nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
-		nv_wr32(priv, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
-		if (priv->gpc_nr > 4) {
-			nv_wr32(priv, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
-			nv_wr32(priv, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
+		nvkm_wr32(device, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
+		nvkm_wr32(device, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
+		if (gr->gpc_nr > 4) {
+			nvkm_wr32(device, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
+			nvkm_wr32(device, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
 		}
 	}
 }
 
 void
-gf100_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gf100_grctx_generate_r4060a8(priv);
-	gf100_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gf100_grctx_generate_r4060a8(gr);
+	gf100_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 }
 
 int
-gf100_grctx_generate(struct gf100_gr_priv *priv)
+gf100_grctx_generate(struct gf100_gr *gr)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *chan;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *chan;
 	struct gf100_grctx info;
 	int ret, i;
+	u64 addr;
 
 	/* allocate memory to for a "channel", which we'll use to generate
 	 * the default context values
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x80000 + priv->size,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
+			      0x1000, true, &chan);
 	if (ret) {
-		nv_error(priv, "failed to allocate channel memory, %d\n", ret);
+		nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
 		return ret;
 	}
 
+	addr = nvkm_memory_addr(chan);
+
 	/* PGD pointer */
-	nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
-	nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
-	nv_wo32(chan, 0x0208, 0xffffffff);
-	nv_wo32(chan, 0x020c, 0x000000ff);
+	nvkm_kmap(chan);
+	nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
+	nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
+	nvkm_wo32(chan, 0x0208, 0xffffffff);
+	nvkm_wo32(chan, 0x020c, 0x000000ff);
 
 	/* PGT[0] pointer */
-	nv_wo32(chan, 0x1000, 0x00000000);
-	nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+	nvkm_wo32(chan, 0x1000, 0x00000000);
+	nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
 
 	/* identity-map the whole "channel" into its own vm */
-	for (i = 0; i < chan->size / 4096; i++) {
-		u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
-		nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
-		nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+	for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
+		u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
+		nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+		nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
 	}
 
 	/* context pointer (virt) */
-	nv_wo32(chan, 0x0210, 0x00080004);
-	nv_wo32(chan, 0x0214, 0x00000000);
+	nvkm_wo32(chan, 0x0210, 0x00080004);
+	nvkm_wo32(chan, 0x0214, 0x00000000);
+	nvkm_done(chan);
 
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
-	nv_wr32(priv, 0x100cbc, 0x80000001);
-	nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
+	nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
+	nvkm_wr32(device, 0x100cbc, 0x80000001);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+			break;
+	);
 
 	/* setup default state for mmio list construction */
-	info.priv = priv;
-	info.data = priv->mmio_data;
-	info.mmio = priv->mmio_list;
+	info.gr = gr;
+	info.data = gr->mmio_data;
+	info.mmio = gr->mmio_list;
 	info.addr = 0x2000 + (i * 8);
 	info.buffer_nr = 0;
 
 	/* make channel current */
-	if (priv->firmware) {
-		nv_wr32(priv, 0x409840, 0x00000030);
-		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
-		nv_wr32(priv, 0x409504, 0x00000003);
-		if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
-			nv_error(priv, "load_ctx timeout\n");
+	if (gr->firmware) {
+		nvkm_wr32(device, 0x409840, 0x00000030);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409504, 0x00000003);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x00000010)
+				break;
+		);
 
-		nv_wo32(chan, 0x8001c, 1);
-		nv_wo32(chan, 0x80020, 0);
-		nv_wo32(chan, 0x80028, 0);
-		nv_wo32(chan, 0x8002c, 0);
-		bar->flush(bar);
+		nvkm_kmap(chan);
+		nvkm_wo32(chan, 0x8001c, 1);
+		nvkm_wo32(chan, 0x80020, 0);
+		nvkm_wo32(chan, 0x80028, 0);
+		nvkm_wo32(chan, 0x8002c, 0);
+		nvkm_done(chan);
 	} else {
-		nv_wr32(priv, 0x409840, 0x80000000);
-		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
-		nv_wr32(priv, 0x409504, 0x00000001);
-		if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000))
-			nv_error(priv, "HUB_SET_CHAN timeout\n");
+		nvkm_wr32(device, 0x409840, 0x80000000);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409504, 0x00000001);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x80000000)
+				break;
+		);
 	}
 
-	oclass->main(priv, &info);
+	grctx->main(gr, &info);
 
 	/* trigger a context unload by unsetting the "next channel valid" bit
 	 * and faking a context switch interrupt
 	 */
-	nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
-	nv_wr32(priv, 0x409000, 0x00000100);
-	if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
-		nv_error(priv, "grctx template channel unload timeout\n");
+	nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
+	nvkm_wr32(device, 0x409000, 0x00000100);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x409b00) & 0x80000000))
+			break;
+	) < 0) {
 		ret = -EBUSY;
 		goto done;
 	}
 
-	priv->data = kmalloc(priv->size, GFP_KERNEL);
-	if (priv->data) {
-		for (i = 0; i < priv->size; i += 4)
-			priv->data[i / 4] = nv_ro32(chan, 0x80000 + i);
+	gr->data = kmalloc(gr->size, GFP_KERNEL);
+	if (gr->data) {
+		nvkm_kmap(chan);
+		for (i = 0; i < gr->size; i += 4)
+			gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
+		nvkm_done(chan);
 		ret = 0;
 	} else {
 		ret = -ENOMEM;
 	}
 
 done:
-	nvkm_gpuobj_ref(NULL, &chan);
+	nvkm_memory_del(&chan);
 	return ret;
 }
 
-struct nvkm_oclass *
-gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf100_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -1387,4 +1400,4 @@
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3676a33..3c64040 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -3,7 +3,7 @@
 #include "gf100.h"
 
 struct gf100_grctx {
-	struct gf100_gr_priv *priv;
+	struct gf100_gr *gr;
 	struct gf100_gr_data *data;
 	struct gf100_gr_mmio *mmio;
 	int buffer_nr;
@@ -19,12 +19,11 @@
 #define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
 #define mmio_wr32(a,b,c) mmio_refn((a), (b), (c),  0, -1)
 
-struct gf100_grctx_oclass {
-	struct nvkm_oclass base;
+struct gf100_grctx_func {
 	/* main context generation function */
-	void  (*main)(struct gf100_gr_priv *, struct gf100_grctx *);
+	void  (*main)(struct gf100_gr *, struct gf100_grctx *);
 	/* context-specific modify-on-first-load list generation function */
-	void  (*unkn)(struct gf100_gr_priv *);
+	void  (*unkn)(struct gf100_gr *);
 	/* mmio context data */
 	const struct gf100_gr_pack *hub;
 	const struct gf100_gr_pack *gpc;
@@ -50,60 +49,61 @@
 	u32 alpha_nr;
 };
 
-static inline const struct gf100_grctx_oclass *
-gf100_grctx_impl(struct gf100_gr_priv *priv)
-{
-	return (void *)nv_engine(priv)->cclass;
-}
-
-extern struct nvkm_oclass *gf100_grctx_oclass;
-int  gf100_grctx_generate(struct gf100_gr_priv *);
-void gf100_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gf100_grctx;
+int  gf100_grctx_generate(struct gf100_gr *);
+void gf100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gf100_grctx_generate_bundle(struct gf100_grctx *);
 void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
-void gf100_grctx_generate_unkn(struct gf100_gr_priv *);
-void gf100_grctx_generate_tpcid(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406028(struct gf100_gr_priv *);
-void gf100_grctx_generate_r4060a8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406800(struct gf100_gr_priv *);
+void gf100_grctx_generate_unkn(struct gf100_gr *);
+void gf100_grctx_generate_tpcid(struct gf100_gr *);
+void gf100_grctx_generate_r406028(struct gf100_gr *);
+void gf100_grctx_generate_r4060a8(struct gf100_gr *);
+void gf100_grctx_generate_r418bb8(struct gf100_gr *);
+void gf100_grctx_generate_r406800(struct gf100_gr *);
 
-extern struct nvkm_oclass *gf108_grctx_oclass;
+extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
-void gf108_grctx_generate_unkn(struct gf100_gr_priv *);
+void gf108_grctx_generate_unkn(struct gf100_gr *);
 
-extern struct nvkm_oclass *gf104_grctx_oclass;
-extern struct nvkm_oclass *gf110_grctx_oclass;
+extern const struct gf100_grctx_func gf104_grctx;
+extern const struct gf100_grctx_func gf110_grctx;
 
-extern struct nvkm_oclass *gf117_grctx_oclass;
+extern const struct gf100_grctx_func gf117_grctx;
 void gf117_grctx_generate_attrib(struct gf100_grctx *);
 
-extern struct nvkm_oclass *gf119_grctx_oclass;
+extern const struct gf100_grctx_func gf119_grctx;
 
-extern struct nvkm_oclass *gk104_grctx_oclass;
-extern struct nvkm_oclass *gk20a_grctx_oclass;
-void gk104_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gk104_grctx;
+extern const struct gf100_grctx_func gk20a_grctx;
+void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gk104_grctx_generate_bundle(struct gf100_grctx *);
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
-void gk104_grctx_generate_unkn(struct gf100_gr_priv *);
-void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *);
+void gk104_grctx_generate_unkn(struct gf100_gr *);
+void gk104_grctx_generate_r418bb8(struct gf100_gr *);
+void gk104_grctx_generate_rop_active_fbps(struct gf100_gr *);
 
 
-extern struct nvkm_oclass *gk110_grctx_oclass;
-extern struct nvkm_oclass *gk110b_grctx_oclass;
-extern struct nvkm_oclass *gk208_grctx_oclass;
-
-extern struct nvkm_oclass *gm107_grctx_oclass;
 void gm107_grctx_generate_bundle(struct gf100_grctx *);
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
-extern struct nvkm_oclass *gm204_grctx_oclass;
-void gm204_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gk110_grctx;
+extern const struct gf100_grctx_func gk110b_grctx;
+extern const struct gf100_grctx_func gk208_grctx;
 
-extern struct nvkm_oclass *gm206_grctx_oclass;
+extern const struct gf100_grctx_func gm107_grctx;
+void gm107_grctx_generate_bundle(struct gf100_grctx *);
+void gm107_grctx_generate_pagepool(struct gf100_grctx *);
+void gm107_grctx_generate_attrib(struct gf100_grctx *);
+
+extern const struct gf100_grctx_func gm204_grctx;
+void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
+void gm204_grctx_generate_tpcid(struct gf100_gr *);
+void gm204_grctx_generate_405b60(struct gf100_gr *);
+
+extern const struct gf100_grctx_func gm206_grctx;
+extern const struct gf100_grctx_func gm20b_grctx;
 
 /* context init value lists */
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index c5a8d55..54fd74e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -79,17 +79,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf104_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -105,4 +96,4 @@
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 87c844a..505cdcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -730,18 +730,18 @@
 void
 gf108_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32   beta = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32   beta = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, tpc;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -749,43 +749,35 @@
 	mmio_wr32(info, 0x405830, (beta << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			const u32 a = alpha;
 			const u32 b =  beta;
 			const u32 t = timeslice_mode;
 			const u32 o = TPC_UNIT(gpc, tpc, 0x500);
 			mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
 			mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
-			bo += impl->attrib_nr_max;
+			bo += grctx->attrib_nr_max;
 			mmio_wr32(info, o + 0x44, (a << 16) | ao);
-			ao += impl->alpha_nr_max;
+			ao += grctx->alpha_nr_max;
 		}
 	}
 }
 
 void
-gf108_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf108_grctx_generate_unkn(struct gf100_gr *gr)
 {
-	nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
-	nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
-	nv_mask(priv, 0x419814, 0x00000004, 0x00000004);
-	nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
-	nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x419814, 0x00000004, 0x00000004);
+	nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
-struct nvkm_oclass *
-gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf108_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf108_grctx_pack_hub,
@@ -803,4 +795,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index b3acd93..7df398b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -330,17 +330,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc8),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf110_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -356,4 +347,4 @@
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 9bbe2c9..b5b8759 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -182,18 +182,18 @@
 void
 gf117_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32   beta = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32   beta = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, ppc;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -201,68 +201,60 @@
 	mmio_wr32(info, 0x405830, (beta << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) {
-			const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc];
-			const u32 b =  beta * priv->ppc_tpc_nr[gpc][ppc];
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+			const u32 a = alpha * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 b =  beta * gr->ppc_tpc_nr[gpc][ppc];
 			const u32 t = timeslice_mode;
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
 			mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
-			bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, o + 0xe4, (a << 16) | ao);
-			ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 		}
 	}
 }
 
 void
-gf117_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gf100_grctx_generate_r4060a8(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gf100_grctx_generate_r4060a8(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 }
 
-struct nvkm_oclass *
-gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf117_grctx = {
 	.main  = gf117_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gf117_grctx_pack_hub,
@@ -281,4 +273,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x324,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 8d87614..605185b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -498,17 +498,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xd9),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf119_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf119_grctx_pack_hub,
@@ -526,4 +517,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b12f6a9..a843e36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -843,27 +843,27 @@
 void
 gk104_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
-	const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
-				    impl->bundle_size / 0x20);
-	const u32 token_limit = impl->bundle_token_limit;
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+				    grctx->bundle_size / 0x20);
+	const u32 token_limit = grctx->bundle_token_limit;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
-	mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
 }
 
 void
 gk104_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -872,31 +872,33 @@
 }
 
 void
-gk104_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gk104_grctx_generate_unkn(struct gf100_gr *gr)
 {
-	nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
-	nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
-	nv_mask(priv, 0x41be08, 0x00000004, 0x00000004);
-	nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
-	nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004);
+	nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
 void
-gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gk104_grctx_generate_r418bb8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
 	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
 	int gpc, tpc, i;
 
 	/* calculate first set of magics */
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 
@@ -908,7 +910,7 @@
 
 	/* and the second... */
 	shift = 0;
-	ntpcv = priv->tpc_total;
+	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
 		ntpcv <<= 1;
 		shift++;
@@ -921,86 +923,79 @@
 		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
 
 	/* GPC_BROADCAST */
-	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
-	nv_wr32(priv, 0x41bfd0, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr | data2[0]);
-	nv_wr32(priv, 0x41bfe4, data2[1]);
+	nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr | data2[0]);
+	nvkm_wr32(device, 0x41bfe4, data2[1]);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
 
 	/* UNK78xx */
-	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
 void
-gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gk104_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
 {
-	const u32 fbp_count = nv_rd32(priv, 0x120074);
-	nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x120074);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
 void
-gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
-	gk104_grctx_generate_rop_active_fbps(priv);
-	nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+	gk104_grctx_generate_rop_active_fbps(gr);
+	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 
-	nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
-	nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
+	nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
+	nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
 }
 
-struct nvkm_oclass *
-gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk104_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk104_grctx_pack_hub,
@@ -1021,4 +1016,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index b3f58be..7b95ec2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -808,17 +808,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk110_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
@@ -839,4 +830,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index b11c267..048b115 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -69,17 +69,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xf1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk110b_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
@@ -100,4 +91,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 6e8ce9f..67b7a1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -530,17 +530,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk208_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk208_grctx_pack_hub,
@@ -561,4 +552,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 2f241f6..ddaa16a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -20,34 +20,60 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include "ctxgf100.h"
+#include "gf100.h"
 
-static const struct gf100_gr_pack
-gk20a_grctx_pack_mthd[] = {
-	{ gk104_grctx_init_a097_0, 0xa297 },
-	{ gf100_grctx_init_902d_0, 0x902d },
-	{}
-};
+#include <subdev/mc.h>
 
-struct nvkm_oclass *
-gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-	.main  = gk104_grctx_generate_main,
+static void
+gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	int idle_timeout_save;
+	int i;
+
+	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+	gf100_gr_wait_idle(gr);
+
+	idle_timeout_save = nvkm_rd32(device, 0x404154);
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->attrib(info);
+
+	grctx->unkn(gr);
+
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+	gk104_grctx_generate_rop_active_fbps(gr);
+
+	nvkm_mask(device, 0x5044b0, 0x8000000, 0x8000000);
+
+	gf100_gr_wait_idle(gr);
+
+	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_icmd(gr, gr->fuc_bundle);
+	grctx->pagepool(info);
+	grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gk20a_grctx = {
+	.main  = gk20a_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
-	.hub   = gk104_grctx_pack_hub,
-	.gpc   = gk104_grctx_pack_gpc,
-	.zcull = gf100_grctx_pack_zcull,
-	.tpc   = gk104_grctx_pack_tpc,
-	.ppc   = gk104_grctx_pack_ppc,
-	.icmd  = gk104_grctx_pack_icmd,
-	.mthd  = gk20a_grctx_pack_mthd,
 	.bundle = gk104_grctx_generate_bundle,
 	.bundle_size = 0x1800,
 	.bundle_min_gpm_fifo_depth = 0x62,
@@ -59,4 +85,4 @@
 	.attrib_nr = 0x240,
 	.alpha_nr_max = 0x648 + (0x648 / 2),
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index fbeaae3..95f59e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -863,27 +863,27 @@
 void
 gm107_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
-	const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
-				    impl->bundle_size / 0x20);
-	const u32 token_limit = impl->bundle_token_limit;
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+				    grctx->bundle_size / 0x20);
+	const u32 token_limit = grctx->bundle_token_limit;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418e24, 0x00000000, s, b);
-	mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
 }
 
 void
 gm107_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -895,17 +895,17 @@
 void
 gm107_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32 attrib = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32 attrib = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, ppc, n = 0;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -914,97 +914,90 @@
 	mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) {
-			const u32 as =  alpha * priv->ppc_tpc_nr[gpc][ppc];
-			const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc];
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+			const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
 			const u32 u = 0x418ea0 + (n * 0x04);
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			mmio_wr32(info, o + 0xc0, bs);
 			mmio_wr32(info, o + 0xf4, bo);
-			bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, o + 0xe4, as);
 			mmio_wr32(info, o + 0xf8, ao);
-			ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
 		}
 	}
 }
 
-static void
-gm107_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm107_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
 		}
 	}
 }
 
 static void
-gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gm107_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gm107_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
-	nv_wr32(priv, 0x4064d0, 0x00000001);
+	nvkm_wr32(device, 0x4064d0, 0x00000001);
 	for (i = 1; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
-	nv_wr32(priv, 0x406500, 0x00000001);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+	nvkm_wr32(device, 0x406500, 0x00000001);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gk104_grctx_generate_rop_active_fbps(priv);
+	gk104_grctx_generate_rop_active_fbps(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
 
-	nv_mask(priv, 0x419e00, 0x00808080, 0x00808080);
-	nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x419f80, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x419f88, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
+	nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
 }
 
-struct nvkm_oclass *
-gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm107_grctx = {
 	.main  = gm107_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm107_grctx_pack_hub,
@@ -1025,4 +1018,4 @@
 	.attrib_nr = 0xaa0,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
index ea8e661..170cbfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
@@ -918,17 +918,18 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-static void
-gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 		}
@@ -936,101 +937,95 @@
 }
 
 static void
-gm204_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gm204_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
 {
-	const u32 fbp_count = nv_rd32(priv, 0x12006c);
-	nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
-static void
-gm204_grctx_generate_405b60(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_405b60(struct gf100_gr *gr)
 {
-	const u32 dist_nr = DIV_ROUND_UP(priv->tpc_total, 4);
-	u32 dist[TPC_MAX] = {};
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+	u32 dist[TPC_MAX / 4] = {};
 	u32 gpcs[GPC_MAX] = {};
 	u8  tpcnr[GPC_MAX];
 	int tpc, gpc, i;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	/* won't result in the same distribution as the binary driver where
 	 * some of the gpcs have more tpcs than others, but this shall do
 	 * for the moment.  the code for earlier gpus has this issue too.
 	 */
-	for (gpc = -1, i = 0; i < priv->tpc_total; i++) {
+	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while(!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
 		gpcs[gpc] |= i << (tpc * 8);
 	}
 
 	for (i = 0; i < dist_nr; i++)
-		nv_wr32(priv, 0x405b60 + (i * 4), dist[i]);
-	for (i = 0; i < priv->gpc_nr; i++)
-		nv_wr32(priv, 0x405ba0 + (i * 4), gpcs[i]);
+		nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+	for (i = 0; i < gr->gpc_nr; i++)
+		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
 void
-gm204_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	u32 tmp;
 	int i;
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gm204_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
+	gm204_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
-	nv_wr32(priv, 0x406500, 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+	nvkm_wr32(device, 0x406500, 0x00000000);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gm204_grctx_generate_rop_active_fbps(priv);
+	gm204_grctx_generate_rop_active_fbps(gr);
 
-	for (tmp = 0, i = 0; i < priv->gpc_nr; i++)
-		tmp |= ((1 << priv->tpc_nr[i]) - 1) << (i * 4);
-	nv_wr32(priv, 0x4041c4, tmp);
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+	nvkm_wr32(device, 0x4041c4, tmp);
 
-	gm204_grctx_generate_405b60(priv);
+	gm204_grctx_generate_405b60(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000800);
-	gf100_gr_mthd(priv, oclass->mthd);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000800);
+	gf100_gr_mthd(gr, grctx->mthd);
 
-	nv_mask(priv, 0x418e94, 0xffffffff, 0xc4230000);
-	nv_mask(priv, 0x418e4c, 0xffffffff, 0x70000000);
+	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
+	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
 }
 
-struct nvkm_oclass *
-gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm204_grctx = {
 	.main  = gm204_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm204_grctx_pack_hub,
@@ -1051,4 +1046,4 @@
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
index 91ec416..d6be603 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
@@ -49,17 +49,8 @@
 	{}
 };
 
-struct nvkm_oclass *
-gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x26),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm206_grctx = {
 	.main  = gm204_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm204_grctx_pack_hub,
@@ -80,4 +71,4 @@
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
new file mode 100644
index 0000000..6702604
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+gm20b_grctx_generate_r406028(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 tpc_per_gpc = 0;
+	int i;
+
+	for (i = 0; i < gr->gpc_nr; i++)
+		tpc_per_gpc |= gr->tpc_nr[i] << (4 * i);
+
+	nvkm_wr32(device, 0x406028, tpc_per_gpc);
+	nvkm_wr32(device, 0x405870, tpc_per_gpc);
+}
+
+static void
+gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	int idle_timeout_save;
+	int i, tmp;
+
+	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+	gf100_gr_wait_idle(gr);
+
+	idle_timeout_save = nvkm_rd32(device, 0x404154);
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->attrib(info);
+
+	grctx->unkn(gr);
+
+	gm204_grctx_generate_tpcid(gr);
+	gm20b_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+	gk104_grctx_generate_rop_active_fbps(gr);
+	nvkm_wr32(device, 0x408908, nvkm_rd32(device, 0x410108) | 0x80000000);
+
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+	nvkm_wr32(device, 0x4041c4, tmp);
+
+	gm204_grctx_generate_405b60(gr);
+
+	gf100_gr_wait_idle(gr);
+
+	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_icmd(gr, gr->fuc_bundle);
+	grctx->pagepool(info);
+	grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gm20b_grctx = {
+	.main  = gm20b_grctx_generate_main,
+	.unkn  = gk104_grctx_generate_unkn,
+	.bundle = gm107_grctx_generate_bundle,
+	.bundle_size = 0x1800,
+	.bundle_min_gpm_fifo_depth = 0x182,
+	.bundle_token_limit = 0x1c0,
+	.pagepool = gm107_grctx_generate_pagepool,
+	.pagepool_size = 0x8000,
+	.attrib = gm107_grctx_generate_attrib,
+	.attrib_nr_max = 0x600,
+	.attrib_nr = 0x400,
+	.alpha_nr_max = 0xc00,
+	.alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
index dc31462..80a6b01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
@@ -111,7 +111,6 @@
 
 #include "ctxnv40.h"
 #include "nv40.h"
-#include <core/device.h>
 
 /* TODO:
  *  - get vs count from 0x1540
@@ -583,13 +582,13 @@
 
 	offset += 0x0280/4;
 	for (i = 0; i < 16; i++, offset += 2)
-		nv_wo32(obj, offset * 4, 0x3f800000);
+		nvkm_wo32(obj, offset * 4, 0x3f800000);
 
 	for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
 		for (i = 0; i < vs_nr_b0 * 6; i += 6)
-			nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
+			nvkm_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
 		for (i = 0; i < vs_nr_b1 * 4; i += 4)
-			nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
+			nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
 	}
 }
 
@@ -675,7 +674,7 @@
 	struct nvkm_grctx ctx = {
 		.device = device,
 		.mode = NVKM_GRCTX_PROG,
-		.data = ctxprog,
+		.ucode = ctxprog,
 		.ctxprog_max = 256,
 	};
 
@@ -684,9 +683,9 @@
 
 	nv40_grctx_generate(&ctx);
 
-	nv_wr32(device, 0x400324, 0);
+	nvkm_wr32(device, 0x400324, 0);
 	for (i = 0; i < ctx.ctxprog_len; i++)
-		nv_wr32(device, 0x400328, ctxprog[i]);
+		nvkm_wr32(device, 0x400328, ctxprog[i]);
 	*size = ctx.ctxvals_pos * 4;
 
 	kfree(ctxprog);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 8a89961..50e808e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -9,7 +9,8 @@
 		NVKM_GRCTX_PROG,
 		NVKM_GRCTX_VALS
 	} mode;
-	void *data;
+	u32 *ucode;
+	struct nvkm_gpuobj *data;
 
 	u32 ctxprog_max;
 	u32 ctxprog_len;
@@ -22,7 +23,7 @@
 static inline void
 cp_out(struct nvkm_grctx *ctx, u32 inst)
 {
-	u32 *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->ucode;
 
 	if (ctx->mode != NVKM_GRCTX_PROG)
 		return;
@@ -56,7 +57,7 @@
 static inline void
 cp_name(struct nvkm_grctx *ctx, int name)
 {
-	u32 *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->ucode;
 	int i;
 
 	if (ctx->mode != NVKM_GRCTX_PROG)
@@ -124,6 +125,6 @@
 	reg = (reg - 0x00400000) / 4;
 	reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
 
-	nv_wo32(ctx->data, reg * 4, val);
+	nvkm_wo32(ctx->data, reg * 4, val);
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 9c9528d..1e13278 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -107,7 +107,6 @@
 
 #include "ctxnv40.h"
 
-#include <core/device.h>
 #include <subdev/fb.h>
 
 #define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
@@ -269,7 +268,7 @@
 	struct nvkm_grctx ctx = {
 		.device = device,
 		.mode = NVKM_GRCTX_PROG,
-		.data = ctxprog,
+		.ucode = ctxprog,
 		.ctxprog_max = 512,
 	};
 
@@ -277,9 +276,9 @@
 		return -ENOMEM;
 	nv50_grctx_generate(&ctx);
 
-	nv_wr32(device, 0x400324, 0);
+	nvkm_wr32(device, 0x400324, 0);
 	for (i = 0; i < ctx.ctxprog_len; i++)
-		nv_wr32(device, 0x400328, ctxprog[i]);
+		nvkm_wr32(device, 0x400328, ctxprog[i]);
 	*size = ctx.ctxvals_pos * 4;
 	kfree(ctxprog);
 	return 0;
@@ -299,7 +298,7 @@
 	struct nvkm_device *device = ctx->device;
 	int i, j;
 	int offset, base;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 
 	/* 0800: DISPATCH */
 	cp_ctx(ctx, 0x400808, 7);
@@ -570,7 +569,7 @@
 		else if (device->chipset < 0xa0)
 			gr_def(ctx, 0x407d08, 0x00390040);
 		else {
-			if (nvkm_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
+			if (device->fb->ram->type != NVKM_RAM_TYPE_GDDR5)
 				gr_def(ctx, 0x407d08, 0x003d0040);
 			else
 				gr_def(ctx, 0x407d08, 0x003c0040);
@@ -784,9 +783,10 @@
 static void
 dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
 	int i;
-	if (val && ctx->mode == NVKM_GRCTX_VALS)
+	if (val && ctx->mode == NVKM_GRCTX_VALS) {
 		for (i = 0; i < num; i++)
-			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+			nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+	}
 	ctx->ctxvals_pos += num;
 }
 
@@ -1156,9 +1156,10 @@
 static void
 xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
 	int i;
-	if (val && ctx->mode == NVKM_GRCTX_VALS)
+	if (val && ctx->mode == NVKM_GRCTX_VALS) {
 		for (i = 0; i < num; i++)
-			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+			nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+	}
 	ctx->ctxvals_pos += num << 3;
 }
 
@@ -1190,7 +1191,7 @@
 	int i;
 	int offset;
 	int size = 0;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
 	ctx->ctxvals_base = offset;
@@ -3273,7 +3274,7 @@
 	struct nvkm_device *device = ctx->device;
 	int i;
 	u32 offset;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 	int size = 0;
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
new file mode 100644
index 0000000..ce91330
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+static const struct nvkm_bitfield nv50_gr_status[] = {
+	{ 0x00000001, "BUSY" }, /* set when any bit is set */
+	{ 0x00000002, "DISPATCH" },
+	{ 0x00000004, "UNK2" },
+	{ 0x00000008, "UNK3" },
+	{ 0x00000010, "UNK4" },
+	{ 0x00000020, "UNK5" },
+	{ 0x00000040, "M2MF" },
+	{ 0x00000080, "UNK7" },
+	{ 0x00000100, "CTXPROG" },
+	{ 0x00000200, "VFETCH" },
+	{ 0x00000400, "CCACHE_PREGEOM" },
+	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
+	{ 0x00001000, "VCLIP" },
+	{ 0x00002000, "RATTR_APLANE" },
+	{ 0x00004000, "TRAST" },
+	{ 0x00008000, "CLIPID" },
+	{ 0x00010000, "ZCULL" },
+	{ 0x00020000, "ENG2D" },
+	{ 0x00040000, "RMASK" },
+	{ 0x00080000, "TPC_RAST" },
+	{ 0x00100000, "TPC_PROP" },
+	{ 0x00200000, "TPC_TEX" },
+	{ 0x00400000, "TPC_GEOM" },
+	{ 0x00800000, "TPC_MP" },
+	{ 0x01000000, "ROP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_0[] = {
+	{ 0x01, "VFETCH" },
+	{ 0x02, "CCACHE" },
+	{ 0x04, "PREGEOM" },
+	{ 0x08, "POSTGEOM" },
+	{ 0x10, "VATTR" },
+	{ 0x20, "STRMOUT" },
+	{ 0x40, "VCLIP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_1[] = {
+	{ 0x01, "TPC_RAST" },
+	{ 0x02, "TPC_PROP" },
+	{ 0x04, "TPC_TEX" },
+	{ 0x08, "TPC_GEOM" },
+	{ 0x10, "TPC_MP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_2[] = {
+	{ 0x01, "RATTR" },
+	{ 0x02, "APLANE" },
+	{ 0x04, "TRAST" },
+	{ 0x08, "CLIPID" },
+	{ 0x10, "ZCULL" },
+	{ 0x20, "ENG2D" },
+	{ 0x40, "RMASK" },
+	{ 0x80, "ROP" },
+	{}
+};
+
+static void
+nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
+		      const struct nvkm_bitfield *units, u32 status)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	u32 stat = status;
+	u8  mask = 0x00;
+	char msg[64];
+	int i;
+
+	for (i = 0; units[i].name && status; i++) {
+		if ((status & 7) == 1)
+			mask |= (1 << i);
+		status >>= 3;
+	}
+
+	nvkm_snprintbf(msg, sizeof(msg), units, mask);
+	nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
+}
+
+int
+g84_gr_tlb_flush(struct nvkm_gr *base)
+{
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_timer *tmr = device->timer;
+	bool idle, timeout = false;
+	unsigned long flags;
+	char status[128];
+	u64 start;
+	u32 tmp;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
+
+	start = nvkm_timer_read(tmr);
+	do {
+		idle = true;
+
+		for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+	} while (!idle &&
+		 !(timeout = nvkm_timer_read(tmr) - start > 2000000000));
+
+	if (timeout) {
+		nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
+
+		tmp = nvkm_rd32(device, 0x400700);
+		nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
+		nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
+
+		nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
+				       nvkm_rd32(device, 0x400380));
+		nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
+				       nvkm_rd32(device, 0x400384));
+		nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
+				       nvkm_rd32(device, 0x400388));
+	}
+
+
+	nvkm_wr32(device, 0x100c80, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+			break;
+	);
+	nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return timeout ? -EBUSY : 0;
+}
+
+static const struct nvkm_gr_func
+g84_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8297, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&g84_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ca11ddb..f1358a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,13 +26,12 @@
 #include "fuc/os.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
 #include <core/option.h>
-#include <engine/fifo.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
+#include <subdev/pmu.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
 
 #include <nvif/class.h>
 #include <nvif/unpack.h>
@@ -42,35 +41,36 @@
  ******************************************************************************/
 
 static void
-gf100_gr_zbc_clear_color(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
 {
-	if (priv->zbc_color[zbc].format) {
-		nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
-		nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]);
-		nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]);
-		nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	if (gr->zbc_color[zbc].format) {
+		nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]);
+		nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]);
+		nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]);
+		nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]);
 	}
-	nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format);
-	nv_wr32(priv, 0x405820, zbc);
-	nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
+	nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format);
+	nvkm_wr32(device, 0x405820, zbc);
+	nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
 }
 
 static int
-gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
 		       const u32 ds[4], const u32 l2[4])
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int zbc = -ENOSPC, i;
 
 	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
-		if (priv->zbc_color[i].format) {
-			if (priv->zbc_color[i].format != format)
+		if (gr->zbc_color[i].format) {
+			if (gr->zbc_color[i].format != format)
 				continue;
-			if (memcmp(priv->zbc_color[i].ds, ds, sizeof(
-				   priv->zbc_color[i].ds)))
+			if (memcmp(gr->zbc_color[i].ds, ds, sizeof(
+				   gr->zbc_color[i].ds)))
 				continue;
-			if (memcmp(priv->zbc_color[i].l2, l2, sizeof(
-				   priv->zbc_color[i].l2))) {
+			if (memcmp(gr->zbc_color[i].l2, l2, sizeof(
+				   gr->zbc_color[i].l2))) {
 				WARN_ON(1);
 				return -EINVAL;
 			}
@@ -83,38 +83,39 @@
 	if (zbc < 0)
 		return zbc;
 
-	memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
-	memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
-	priv->zbc_color[zbc].format = format;
-	ltc->zbc_color_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_color(priv, zbc);
+	memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds));
+	memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
+	gr->zbc_color[zbc].format = format;
+	nvkm_ltc_zbc_color_get(ltc, zbc, l2);
+	gf100_gr_zbc_clear_color(gr, zbc);
 	return zbc;
 }
 
 static void
-gf100_gr_zbc_clear_depth(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
 {
-	if (priv->zbc_depth[zbc].format)
-		nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
-	nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format);
-	nv_wr32(priv, 0x405820, zbc);
-	nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	if (gr->zbc_depth[zbc].format)
+		nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds);
+	nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format);
+	nvkm_wr32(device, 0x405820, zbc);
+	nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
 }
 
 static int
-gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
 		       const u32 ds, const u32 l2)
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int zbc = -ENOSPC, i;
 
 	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
-		if (priv->zbc_depth[i].format) {
-			if (priv->zbc_depth[i].format != format)
+		if (gr->zbc_depth[i].format) {
+			if (gr->zbc_depth[i].format != format)
 				continue;
-			if (priv->zbc_depth[i].ds != ds)
+			if (gr->zbc_depth[i].ds != ds)
 				continue;
-			if (priv->zbc_depth[i].l2 != l2) {
+			if (gr->zbc_depth[i].l2 != l2) {
 				WARN_ON(1);
 				return -EINVAL;
 			}
@@ -127,11 +128,11 @@
 	if (zbc < 0)
 		return zbc;
 
-	priv->zbc_depth[zbc].format = format;
-	priv->zbc_depth[zbc].ds = ds;
-	priv->zbc_depth[zbc].l2 = l2;
-	ltc->zbc_depth_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_depth(priv, zbc);
+	gr->zbc_depth[zbc].format = format;
+	gr->zbc_depth[zbc].ds = ds;
+	gr->zbc_depth[zbc].l2 = l2;
+	nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
+	gf100_gr_zbc_clear_depth(gr, zbc);
 	return zbc;
 }
 
@@ -142,7 +143,7 @@
 static int
 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
+	struct gf100_gr *gr = (void *)object->engine;
 	union {
 		struct fermi_a_zbc_color_v0 v0;
 	} *args = data;
@@ -169,7 +170,7 @@
 		case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
 		case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
 		case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
-			ret = gf100_gr_zbc_color_get(priv, args->v0.format,
+			ret = gf100_gr_zbc_color_get(gr, args->v0.format,
 							   args->v0.ds,
 							   args->v0.l2);
 			if (ret >= 0) {
@@ -188,7 +189,7 @@
 static int
 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
+	struct gf100_gr *gr = (void *)object->engine;
 	union {
 		struct fermi_a_zbc_depth_v0 v0;
 	} *args = data;
@@ -197,7 +198,7 @@
 	if (nvif_unpack(args->v0, 0, 0, false)) {
 		switch (args->v0.format) {
 		case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
-			ret = gf100_gr_zbc_depth_get(priv, args->v0.format,
+			ret = gf100_gr_zbc_depth_get(gr, args->v0.format,
 							   args->v0.ds,
 							   args->v0.l2);
 			return (ret >= 0) ? 0 : -ENOSPC;
@@ -223,106 +224,176 @@
 	return -EINVAL;
 }
 
-struct nvkm_ofuncs
-gf100_fermi_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+const struct nvkm_object_func
+gf100_fermi = {
 	.mthd = gf100_fermi_mthd,
 };
 
-static int
-gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd,
-			       void *pdata, u32 size)
+static void
+gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
-	if (size >= sizeof(u32)) {
-		u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
-		nv_wr32(priv, 0x419e44, data);
-		nv_wr32(priv, 0x419e4c, data);
-		return 0;
-	}
-	return -EINVAL;
+	nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000);
+	nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000);
 }
 
-struct nvkm_omthds
-gf100_gr_9097_omthds[] = {
-	{ 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
-	{}
-};
+static bool
+gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
+{
+	switch (class & 0x00ff) {
+	case 0x97:
+	case 0xc0:
+		switch (mthd) {
+		case 0x1528:
+			gf100_gr_mthd_set_shader_exceptions(device, data);
+			return true;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	return false;
+}
 
-struct nvkm_omthds
-gf100_gr_90c0_omthds[] = {
-	{ 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
-	{}
-};
+static int
+gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	int c = 0;
 
-struct nvkm_oclass
-gf100_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
+	while (gr->func->sclass[c].oclass) {
+		if (c++ == index) {
+			*sclass = gr->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
-int
-gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *args, u32 size,
-		      struct nvkm_object **pobject)
+static int
+gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		   int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_vm *vm = nvkm_client(parent)->vm;
-	struct gf100_gr_priv *priv = (void *)engine;
-	struct gf100_gr_data *data = priv->mmio_data;
-	struct gf100_gr_mmio *mmio = priv->mmio_list;
-	struct gf100_gr_chan *chan;
+	struct gf100_gr_chan *chan = gf100_gr_chan(object);
+	struct gf100_gr *gr = chan->gr;
 	int ret, i;
 
-	/* allocate memory for context, and fill with default values */
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL,
-				     priv->size, 0x100,
-				     NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+			      align, false, parent, pgpuobj);
 	if (ret)
 		return ret;
 
+	nvkm_kmap(*pgpuobj);
+	for (i = 0; i < gr->size; i += 4)
+		nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
+
+	if (!gr->firmware) {
+		nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
+		nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
+	} else {
+		nvkm_wo32(*pgpuobj, 0xf4, 0);
+		nvkm_wo32(*pgpuobj, 0xf8, 0);
+		nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
+		nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
+		nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
+		nvkm_wo32(*pgpuobj, 0x1c, 1);
+		nvkm_wo32(*pgpuobj, 0x20, 0);
+		nvkm_wo32(*pgpuobj, 0x28, 0);
+		nvkm_wo32(*pgpuobj, 0x2c, 0);
+	}
+	nvkm_done(*pgpuobj);
+	return 0;
+}
+
+static void *
+gf100_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct gf100_gr_chan *chan = gf100_gr_chan(object);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
+		if (chan->data[i].vma.node) {
+			nvkm_vm_unmap(&chan->data[i].vma);
+			nvkm_vm_put(&chan->data[i].vma);
+		}
+		nvkm_memory_del(&chan->data[i].mem);
+	}
+
+	if (chan->mmio_vma.node) {
+		nvkm_vm_unmap(&chan->mmio_vma);
+		nvkm_vm_put(&chan->mmio_vma);
+	}
+	nvkm_memory_del(&chan->mmio);
+	return chan;
+}
+
+static const struct nvkm_object_func
+gf100_gr_chan = {
+	.dtor = gf100_gr_chan_dtor,
+	.bind = gf100_gr_chan_bind,
+};
+
+static int
+gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		  const struct nvkm_oclass *oclass,
+		  struct nvkm_object **pobject)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	struct gf100_gr_data *data = gr->mmio_data;
+	struct gf100_gr_mmio *mmio = gr->mmio_list;
+	struct gf100_gr_chan *chan;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int ret, i;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+
 	/* allocate memory for a "mmio list" buffer that's used by the HUB
 	 * fuc to modify some per-context register settings on first load
 	 * of the context.
 	 */
-	ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
-			      &chan->mmio);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100,
+			      false, &chan->mmio);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
-				 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
-				 &chan->mmio_vma);
+	ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
+			  NV_MEM_ACCESS_SYS, &chan->mmio_vma);
 	if (ret)
 		return ret;
 
+	nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+
 	/* allocate buffers referenced by mmio list */
-	for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
-		ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size,
-				      data->align, 0, &chan->data[i].mem);
+	for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      data->size, data->align, false,
+				      &chan->data[i].mem);
 		if (ret)
 			return ret;
 
-		ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
-					 &chan->data[i].vma);
+		ret = nvkm_vm_get(fifoch->vm,
+				  nvkm_memory_size(chan->data[i].mem), 12,
+				  data->access, &chan->data[i].vma);
 		if (ret)
 			return ret;
 
+		nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
 		data++;
 	}
 
 	/* finally, fill in the mmio list and point the context at it */
-	for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
+	nvkm_kmap(chan->mmio);
+	for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
 		u32 addr = mmio->addr;
 		u32 data = mmio->data;
 
@@ -331,49 +402,14 @@
 			data |= info >> mmio->shift;
 		}
 
-		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
-		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
+		nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+		nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
 		mmio++;
 	}
-
-	for (i = 0; i < priv->size; i += 4)
-		nv_wo32(chan, i, priv->data[i / 4]);
-
-	if (!priv->firmware) {
-		nv_wo32(chan, 0x00, chan->mmio_nr / 2);
-		nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
-	} else {
-		nv_wo32(chan, 0xf4, 0);
-		nv_wo32(chan, 0xf8, 0);
-		nv_wo32(chan, 0x10, chan->mmio_nr / 2);
-		nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
-		nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
-		nv_wo32(chan, 0x1c, 1);
-		nv_wo32(chan, 0x20, 0);
-		nv_wo32(chan, 0x28, 0);
-		nv_wo32(chan, 0x2c, 0);
-	}
-
+	nvkm_done(chan->mmio);
 	return 0;
 }
 
-void
-gf100_gr_context_dtor(struct nvkm_object *object)
-{
-	struct gf100_gr_chan *chan = (void *)object;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
-		nvkm_gpuobj_unmap(&chan->data[i].vma);
-		nvkm_gpuobj_ref(NULL, &chan->data[i].mem);
-	}
-
-	nvkm_gpuobj_unmap(&chan->mmio_vma);
-	nvkm_gpuobj_ref(NULL, &chan->mmio);
-
-	nvkm_gr_context_destroy(&chan->base);
-}
-
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -635,7 +671,7 @@
  ******************************************************************************/
 
 void
-gf100_gr_zbc_init(struct gf100_gr_priv *priv)
+gf100_gr_zbc_init(struct gf100_gr *gr)
 {
 	const u32  zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
 			      0x00000000, 0x00000000, 0x00000000, 0x00000000 };
@@ -645,22 +681,22 @@
 			      0x00000000, 0x00000000, 0x00000000, 0x00000000 };
 	const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
 			      0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int index;
 
-	if (!priv->zbc_color[0].format) {
-		gf100_gr_zbc_color_get(priv, 1,  & zero[0],   &zero[4]);
-		gf100_gr_zbc_color_get(priv, 2,  &  one[0],    &one[4]);
-		gf100_gr_zbc_color_get(priv, 4,  &f32_0[0],  &f32_0[4]);
-		gf100_gr_zbc_color_get(priv, 4,  &f32_1[0],  &f32_1[4]);
-		gf100_gr_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
-		gf100_gr_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
+	if (!gr->zbc_color[0].format) {
+		gf100_gr_zbc_color_get(gr, 1,  & zero[0],   &zero[4]);
+		gf100_gr_zbc_color_get(gr, 2,  &  one[0],    &one[4]);
+		gf100_gr_zbc_color_get(gr, 4,  &f32_0[0],  &f32_0[4]);
+		gf100_gr_zbc_color_get(gr, 4,  &f32_1[0],  &f32_1[4]);
+		gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000);
+		gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000);
 	}
 
 	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_color(priv, index);
+		gf100_gr_zbc_clear_color(gr, index);
 	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_depth(priv, index);
+		gf100_gr_zbc_clear_depth(gr, index);
 }
 
 /**
@@ -669,8 +705,10 @@
  * progress.
  */
 int
-gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+gf100_gr_wait_idle(struct gf100_gr *gr)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
 	bool gr_enabled, ctxsw_active, gr_busy;
 
@@ -679,24 +717,26 @@
 		 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
 		 * up-to-date
 		 */
-		nv_rd32(priv, 0x400700);
+		nvkm_rd32(device, 0x400700);
 
-		gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
-		ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
-		gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+		gr_enabled = nvkm_rd32(device, 0x200) & 0x1000;
+		ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000;
+		gr_busy = nvkm_rd32(device, 0x40060c) & 0x1;
 
 		if (!gr_enabled || (!gr_busy && !ctxsw_active))
 			return 0;
 	} while (time_before(jiffies, end_jiffies));
 
-	nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
-		 gr_enabled, ctxsw_active, gr_busy);
+	nvkm_error(subdev,
+		   "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+		   gr_enabled, ctxsw_active, gr_busy);
 	return -EAGAIN;
 }
 
 void
-gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 
@@ -704,49 +744,54 @@
 		u32 next = init->addr + init->count * init->pitch;
 		u32 addr = init->addr;
 		while (addr < next) {
-			nv_wr32(priv, addr, init->data);
+			nvkm_wr32(device, addr, init->data);
 			addr += init->pitch;
 		}
 	}
 }
 
 void
-gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 	u32 data = 0;
 
-	nv_wr32(priv, 0x400208, 0x80000000);
+	nvkm_wr32(device, 0x400208, 0x80000000);
 
 	pack_for_each_init(init, pack, p) {
 		u32 next = init->addr + init->count * init->pitch;
 		u32 addr = init->addr;
 
 		if ((pack == p && init == p->init) || data != init->data) {
-			nv_wr32(priv, 0x400204, init->data);
+			nvkm_wr32(device, 0x400204, init->data);
 			data = init->data;
 		}
 
 		while (addr < next) {
-			nv_wr32(priv, 0x400200, addr);
+			nvkm_wr32(device, 0x400200, addr);
 			/**
 			 * Wait for GR to go idle after submitting a
 			 * GO_IDLE bundle
 			 */
 			if ((addr & 0xffff) == 0xe100)
-				gf100_gr_wait_idle(priv);
-			nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
+				gf100_gr_wait_idle(gr);
+			nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, 0x400700) & 0x00000004))
+					break;
+			);
 			addr += init->pitch;
 		}
 	}
 
-	nv_wr32(priv, 0x400208, 0x00000000);
+	nvkm_wr32(device, 0x400208, 0x00000000);
 }
 
 void
-gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 	u32 data = 0;
@@ -757,79 +802,75 @@
 		u32 addr = init->addr;
 
 		if ((pack == p && init == p->init) || data != init->data) {
-			nv_wr32(priv, 0x40448c, init->data);
+			nvkm_wr32(device, 0x40448c, init->data);
 			data = init->data;
 		}
 
 		while (addr < next) {
-			nv_wr32(priv, 0x404488, ctrl | (addr << 14));
+			nvkm_wr32(device, 0x404488, ctrl | (addr << 14));
 			addr += init->pitch;
 		}
 	}
 }
 
 u64
-gf100_gr_units(struct nvkm_gr *gr)
+gf100_gr_units(struct nvkm_gr *base)
 {
-	struct gf100_gr_priv *priv = (void *)gr;
+	struct gf100_gr *gr = gf100_gr(base);
 	u64 cfg;
 
-	cfg  = (u32)priv->gpc_nr;
-	cfg |= (u32)priv->tpc_total << 8;
-	cfg |= (u64)priv->rop_nr << 32;
+	cfg  = (u32)gr->gpc_nr;
+	cfg |= (u32)gr->tpc_total << 8;
+	cfg |= (u64)gr->rop_nr << 32;
 
 	return cfg;
 }
 
-static const struct nvkm_enum gk104_sked_error[] = {
-	{ 7, "CONSTANT_BUFFER_SIZE" },
-	{ 9, "LOCAL_MEMORY_SIZE_POS" },
-	{ 10, "LOCAL_MEMORY_SIZE_NEG" },
-	{ 11, "WARP_CSTACK_SIZE" },
-	{ 12, "TOTAL_TEMP_SIZE" },
-	{ 13, "REGISTER_COUNT" },
-	{ 18, "TOTAL_THREADS" },
-	{ 20, "PROGRAM_OFFSET" },
-	{ 21, "SHARED_MEMORY_SIZE" },
-	{ 25, "SHARED_CONFIG_TOO_SMALL" },
-	{ 26, "TOTAL_REGISTER_COUNT" },
+static const struct nvkm_bitfield gk104_sked_error[] = {
+	{ 0x00000080, "CONSTANT_BUFFER_SIZE" },
+	{ 0x00000200, "LOCAL_MEMORY_SIZE_POS" },
+	{ 0x00000400, "LOCAL_MEMORY_SIZE_NEG" },
+	{ 0x00000800, "WARP_CSTACK_SIZE" },
+	{ 0x00001000, "TOTAL_TEMP_SIZE" },
+	{ 0x00002000, "REGISTER_COUNT" },
+	{ 0x00040000, "TOTAL_THREADS" },
+	{ 0x00100000, "PROGRAM_OFFSET" },
+	{ 0x00200000, "SHARED_MEMORY_SIZE" },
+	{ 0x02000000, "SHARED_CONFIG_TOO_SMALL" },
+	{ 0x04000000, "TOTAL_REGISTER_COUNT" },
 	{}
 };
 
-static const struct nvkm_enum gf100_gpc_rop_error[] = {
-	{ 1, "RT_PITCH_OVERRUN" },
-	{ 4, "RT_WIDTH_OVERRUN" },
-	{ 5, "RT_HEIGHT_OVERRUN" },
-	{ 7, "ZETA_STORAGE_TYPE_MISMATCH" },
-	{ 8, "RT_STORAGE_TYPE_MISMATCH" },
-	{ 10, "RT_LINEAR_MISMATCH" },
+static const struct nvkm_bitfield gf100_gpc_rop_error[] = {
+	{ 0x00000002, "RT_PITCH_OVERRUN" },
+	{ 0x00000010, "RT_WIDTH_OVERRUN" },
+	{ 0x00000020, "RT_HEIGHT_OVERRUN" },
+	{ 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000100, "RT_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000400, "RT_LINEAR_MISMATCH" },
 	{}
 };
 
 static void
-gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	char error[128];
 	u32 trap[4];
-	int i;
 
-	trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
-	trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
-	trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
-	trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
+	trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff;
+	trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434));
+	trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438));
+	trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c));
 
-	nv_error(priv, "GPC%d/PROP trap:", gpc);
-	for (i = 0; i <= 29; ++i) {
-		if (!(trap[0] & (1 << i)))
-			continue;
-		pr_cont(" ");
-		nvkm_enum_print(gf100_gpc_rop_error, i);
-	}
-	pr_cont("\n");
+	nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]);
 
-	nv_error(priv, "x = %u, y = %u, format = %x, storage type = %x\n",
-		 trap[1] & 0xffff, trap[1] >> 16, (trap[2] >> 8) & 0x3f,
-		 trap[3] & 0xff);
-	nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+	nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, "
+			   "format = %x, storage type = %x\n",
+		   gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16,
+		   (trap[2] >> 8) & 0x3f, trap[3] & 0xff);
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 }
 
 static const struct nvkm_enum gf100_mp_warp_error[] = {
@@ -852,401 +893,418 @@
 };
 
 static void
-gf100_gr_trap_mp(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
 {
-	u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x648));
-	u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x650));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648));
+	u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650));
+	const struct nvkm_enum *warp;
+	char glob[128];
 
-	nv_error(priv, "GPC%i/TPC%i/MP trap:", gpc, tpc);
-	nvkm_bitfield_print(gf100_mp_global_error, gerr);
-	if (werr) {
-		pr_cont(" ");
-		nvkm_enum_print(gf100_mp_warp_error, werr & 0xffff);
-	}
-	pr_cont("\n");
+	nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
+	warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
 
-	nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
-	nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x650), gerr);
+	nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+			   "global %08x [%s] warp %04x [%s]\n",
+		   gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr);
 }
 
 static void
-gf100_gr_trap_tpc(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
 {
-	u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508));
 
 	if (stat & 0x00000001) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
-		nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224));
+		nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000002) {
-		gf100_gr_trap_mp(priv, gpc, tpc);
+		gf100_gr_trap_mp(gr, gpc, tpc);
 		stat &= ~0x00000002;
 	}
 
 	if (stat & 0x00000004) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
-		nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084));
+		nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
 		stat &= ~0x00000004;
 	}
 
 	if (stat & 0x00000008) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
-		nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c));
+		nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
 		stat &= ~0x00000008;
 	}
 
 	if (stat) {
-		nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
+		nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
 	}
 }
 
 static void
-gf100_gr_trap_gpc(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc)
 {
-	u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90));
 	int tpc;
 
 	if (stat & 0x00000001) {
-		gf100_gr_trap_gpc_rop(priv, gpc);
+		gf100_gr_trap_gpc_rop(gr, gpc);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000002) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
-		nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900));
+		nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
 		stat &= ~0x00000002;
 	}
 
 	if (stat & 0x00000004) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
-		nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028));
+		nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
 		stat &= ~0x00000004;
 	}
 
 	if (stat & 0x00000008) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
-		nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824));
+		nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
 		stat &= ~0x00000009;
 	}
 
-	for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 		u32 mask = 0x00010000 << tpc;
 		if (stat & mask) {
-			gf100_gr_trap_tpc(priv, gpc, tpc);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
+			gf100_gr_trap_tpc(gr, gpc, tpc);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask);
 			stat &= ~mask;
 		}
 	}
 
 	if (stat) {
-		nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
+		nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat);
 	}
 }
 
 static void
-gf100_gr_trap_intr(struct gf100_gr_priv *priv)
+gf100_gr_trap_intr(struct gf100_gr *gr)
 {
-	u32 trap = nv_rd32(priv, 0x400108);
-	int rop, gpc, i;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 trap = nvkm_rd32(device, 0x400108);
+	int rop, gpc;
 
 	if (trap & 0x00000001) {
-		u32 stat = nv_rd32(priv, 0x404000);
-		nv_error(priv, "DISPATCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x404000, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000001);
+		u32 stat = nvkm_rd32(device, 0x404000);
+		nvkm_error(subdev, "DISPATCH %08x\n", stat);
+		nvkm_wr32(device, 0x404000, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000001);
 		trap &= ~0x00000001;
 	}
 
 	if (trap & 0x00000002) {
-		u32 stat = nv_rd32(priv, 0x404600);
-		nv_error(priv, "M2MF 0x%08x\n", stat);
-		nv_wr32(priv, 0x404600, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000002);
+		u32 stat = nvkm_rd32(device, 0x404600);
+		nvkm_error(subdev, "M2MF %08x\n", stat);
+		nvkm_wr32(device, 0x404600, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000002);
 		trap &= ~0x00000002;
 	}
 
 	if (trap & 0x00000008) {
-		u32 stat = nv_rd32(priv, 0x408030);
-		nv_error(priv, "CCACHE 0x%08x\n", stat);
-		nv_wr32(priv, 0x408030, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000008);
+		u32 stat = nvkm_rd32(device, 0x408030);
+		nvkm_error(subdev, "CCACHE %08x\n", stat);
+		nvkm_wr32(device, 0x408030, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000008);
 		trap &= ~0x00000008;
 	}
 
 	if (trap & 0x00000010) {
-		u32 stat = nv_rd32(priv, 0x405840);
-		nv_error(priv, "SHADER 0x%08x\n", stat);
-		nv_wr32(priv, 0x405840, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000010);
+		u32 stat = nvkm_rd32(device, 0x405840);
+		nvkm_error(subdev, "SHADER %08x\n", stat);
+		nvkm_wr32(device, 0x405840, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000010);
 		trap &= ~0x00000010;
 	}
 
 	if (trap & 0x00000040) {
-		u32 stat = nv_rd32(priv, 0x40601c);
-		nv_error(priv, "UNK6 0x%08x\n", stat);
-		nv_wr32(priv, 0x40601c, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000040);
+		u32 stat = nvkm_rd32(device, 0x40601c);
+		nvkm_error(subdev, "UNK6 %08x\n", stat);
+		nvkm_wr32(device, 0x40601c, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000040);
 		trap &= ~0x00000040;
 	}
 
 	if (trap & 0x00000080) {
-		u32 stat = nv_rd32(priv, 0x404490);
-		nv_error(priv, "MACRO 0x%08x\n", stat);
-		nv_wr32(priv, 0x404490, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000080);
+		u32 stat = nvkm_rd32(device, 0x404490);
+		nvkm_error(subdev, "MACRO %08x\n", stat);
+		nvkm_wr32(device, 0x404490, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000080);
 		trap &= ~0x00000080;
 	}
 
 	if (trap & 0x00000100) {
-		u32 stat = nv_rd32(priv, 0x407020);
+		u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff;
+		char sked[128];
 
-		nv_error(priv, "SKED:");
-		for (i = 0; i <= 29; ++i) {
-			if (!(stat & (1 << i)))
-				continue;
-			pr_cont(" ");
-			nvkm_enum_print(gk104_sked_error, i);
-		}
-		pr_cont("\n");
+		nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat);
+		nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked);
 
-		if (stat & 0x3fffffff)
-			nv_wr32(priv, 0x407020, 0x40000000);
-		nv_wr32(priv, 0x400108, 0x00000100);
+		if (stat)
+			nvkm_wr32(device, 0x407020, 0x40000000);
+		nvkm_wr32(device, 0x400108, 0x00000100);
 		trap &= ~0x00000100;
 	}
 
 	if (trap & 0x01000000) {
-		u32 stat = nv_rd32(priv, 0x400118);
-		for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
+		u32 stat = nvkm_rd32(device, 0x400118);
+		for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) {
 			u32 mask = 0x00000001 << gpc;
 			if (stat & mask) {
-				gf100_gr_trap_gpc(priv, gpc);
-				nv_wr32(priv, 0x400118, mask);
+				gf100_gr_trap_gpc(gr, gpc);
+				nvkm_wr32(device, 0x400118, mask);
 				stat &= ~mask;
 			}
 		}
-		nv_wr32(priv, 0x400108, 0x01000000);
+		nvkm_wr32(device, 0x400108, 0x01000000);
 		trap &= ~0x01000000;
 	}
 
 	if (trap & 0x02000000) {
-		for (rop = 0; rop < priv->rop_nr; rop++) {
-			u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
-			u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
-			nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
+		for (rop = 0; rop < gr->rop_nr; rop++) {
+			u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070));
+			u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144));
+			nvkm_error(subdev, "ROP%d %08x %08x\n",
 				 rop, statz, statc);
-			nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-			nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+			nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+			nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
 		}
-		nv_wr32(priv, 0x400108, 0x02000000);
+		nvkm_wr32(device, 0x400108, 0x02000000);
 		trap &= ~0x02000000;
 	}
 
 	if (trap) {
-		nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
-		nv_wr32(priv, 0x400108, trap);
+		nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap);
+		nvkm_wr32(device, 0x400108, trap);
 	}
 }
 
 static void
-gf100_gr_ctxctl_debug_unit(struct gf100_gr_priv *priv, u32 base)
+gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base)
 {
-	nv_error(priv, "%06x - done 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x400));
-	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
-		 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
-	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
-		 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	nvkm_error(subdev, "%06x - done %08x\n", base,
+		   nvkm_rd32(device, base + 0x400));
+	nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+		   nvkm_rd32(device, base + 0x800),
+		   nvkm_rd32(device, base + 0x804),
+		   nvkm_rd32(device, base + 0x808),
+		   nvkm_rd32(device, base + 0x80c));
+	nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+		   nvkm_rd32(device, base + 0x810),
+		   nvkm_rd32(device, base + 0x814),
+		   nvkm_rd32(device, base + 0x818),
+		   nvkm_rd32(device, base + 0x81c));
 }
 
 void
-gf100_gr_ctxctl_debug(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_debug(struct gf100_gr *gr)
 {
-	u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff;
 	u32 gpc;
 
-	gf100_gr_ctxctl_debug_unit(priv, 0x409000);
+	gf100_gr_ctxctl_debug_unit(gr, 0x409000);
 	for (gpc = 0; gpc < gpcnr; gpc++)
-		gf100_gr_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+		gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000));
 }
 
 static void
-gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_isr(struct gf100_gr *gr)
 {
-	u32 stat = nv_rd32(priv, 0x409c18);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x409c18);
 
 	if (stat & 0x00000001) {
-		u32 code = nv_rd32(priv, 0x409814);
+		u32 code = nvkm_rd32(device, 0x409814);
 		if (code == E_BAD_FWMTHD) {
-			u32 class = nv_rd32(priv, 0x409808);
-			u32  addr = nv_rd32(priv, 0x40980c);
+			u32 class = nvkm_rd32(device, 0x409808);
+			u32  addr = nvkm_rd32(device, 0x40980c);
 			u32  subc = (addr & 0x00070000) >> 16;
 			u32  mthd = (addr & 0x00003ffc);
-			u32  data = nv_rd32(priv, 0x409810);
+			u32  data = nvkm_rd32(device, 0x409810);
 
-			nv_error(priv, "FECS MTHD subc %d class 0x%04x "
-				       "mthd 0x%04x data 0x%08x\n",
-				 subc, class, mthd, data);
+			nvkm_error(subdev, "FECS MTHD subc %d class %04x "
+					   "mthd %04x data %08x\n",
+				   subc, class, mthd, data);
 
-			nv_wr32(priv, 0x409c20, 0x00000001);
+			nvkm_wr32(device, 0x409c20, 0x00000001);
 			stat &= ~0x00000001;
 		} else {
-			nv_error(priv, "FECS ucode error %d\n", code);
+			nvkm_error(subdev, "FECS ucode error %d\n", code);
 		}
 	}
 
 	if (stat & 0x00080000) {
-		nv_error(priv, "FECS watchdog timeout\n");
-		gf100_gr_ctxctl_debug(priv);
-		nv_wr32(priv, 0x409c20, 0x00080000);
+		nvkm_error(subdev, "FECS watchdog timeout\n");
+		gf100_gr_ctxctl_debug(gr);
+		nvkm_wr32(device, 0x409c20, 0x00080000);
 		stat &= ~0x00080000;
 	}
 
 	if (stat) {
-		nv_error(priv, "FECS 0x%08x\n", stat);
-		gf100_gr_ctxctl_debug(priv);
-		nv_wr32(priv, 0x409c20, stat);
+		nvkm_error(subdev, "FECS %08x\n", stat);
+		gf100_gr_ctxctl_debug(gr);
+		nvkm_wr32(device, 0x409c20, stat);
 	}
 }
 
 static void
-gf100_gr_intr(struct nvkm_subdev *subdev)
+gf100_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct gf100_gr_priv *priv = (void *)subdev;
-	u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
-	u32 stat = nv_rd32(priv, 0x400100);
-	u32 addr = nv_rd32(priv, 0x400704);
+	struct gf100_gr *gr = gf100_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff;
+	u32 stat = nvkm_rd32(device, 0x400100);
+	u32 addr = nvkm_rd32(device, 0x400704);
 	u32 mthd = (addr & 0x00003ffc);
 	u32 subc = (addr & 0x00070000) >> 16;
-	u32 data = nv_rd32(priv, 0x400708);
-	u32 code = nv_rd32(priv, 0x400110);
+	u32 data = nvkm_rd32(device, 0x400708);
+	u32 code = nvkm_rd32(device, 0x400110);
 	u32 class;
-	int chid;
+	const char *name = "unknown";
+	int chid = -1;
 
-	if (nv_device(priv)->card_type < NV_E0 || subc < 4)
-		class = nv_rd32(priv, 0x404200 + (subc * 4));
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+	if (chan) {
+		name = chan->object.client->name;
+		chid = chan->chid;
+	}
+
+	if (device->card_type < NV_E0 || subc < 4)
+		class = nvkm_rd32(device, 0x404200 + (subc * 4));
 	else
 		class = 0x0000;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
 	if (stat & 0x00000001) {
 		/*
 		 * notifier interrupt, only needed for cyclestats
 		 * can be safely ignored
 		 */
-		nv_wr32(priv, 0x400100, 0x00000001);
+		nvkm_wr32(device, 0x400100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000010) {
-		handle = nvkm_handle_get_class(engctx, class);
-		if (!handle || nv_call(handle->object, mthd, data)) {
-			nv_error(priv,
-				 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-				 chid, inst << 12, nvkm_client_name(engctx),
-				 subc, class, mthd, data);
+		if (!gf100_gr_mthd_sw(device, class, mthd, data)) {
+			nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] "
+				   "subc %d class %04x mthd %04x data %08x\n",
+				   chid, inst << 12, name, subc,
+				   class, mthd, data);
 		}
-		nvkm_handle_put(handle);
-		nv_wr32(priv, 0x400100, 0x00000010);
+		nvkm_wr32(device, 0x400100, 0x00000010);
 		stat &= ~0x00000010;
 	}
 
 	if (stat & 0x00000020) {
-		nv_error(priv,
-			 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, inst << 12, nvkm_client_name(engctx), subc,
-			 class, mthd, data);
-		nv_wr32(priv, 0x400100, 0x00000020);
+		nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] "
+			   "subc %d class %04x mthd %04x data %08x\n",
+			   chid, inst << 12, name, subc, class, mthd, data);
+		nvkm_wr32(device, 0x400100, 0x00000020);
 		stat &= ~0x00000020;
 	}
 
 	if (stat & 0x00100000) {
-		nv_error(priv, "DATA_ERROR [");
-		nvkm_enum_print(nv50_data_error_names, code);
-		pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			chid, inst << 12, nvkm_client_name(engctx), subc,
-			class, mthd, data);
-		nv_wr32(priv, 0x400100, 0x00100000);
+		const struct nvkm_enum *en =
+			nvkm_enum_find(nv50_data_error_names, code);
+		nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] "
+				   "subc %d class %04x mthd %04x data %08x\n",
+			   code, en ? en->name : "", chid, inst << 12,
+			   name, subc, class, mthd, data);
+		nvkm_wr32(device, 0x400100, 0x00100000);
 		stat &= ~0x00100000;
 	}
 
 	if (stat & 0x00200000) {
-		nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
-			 nvkm_client_name(engctx));
-		gf100_gr_trap_intr(priv);
-		nv_wr32(priv, 0x400100, 0x00200000);
+		nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n",
+			   chid, inst << 12, name);
+		gf100_gr_trap_intr(gr);
+		nvkm_wr32(device, 0x400100, 0x00200000);
 		stat &= ~0x00200000;
 	}
 
 	if (stat & 0x00080000) {
-		gf100_gr_ctxctl_isr(priv);
-		nv_wr32(priv, 0x400100, 0x00080000);
+		gf100_gr_ctxctl_isr(gr);
+		nvkm_wr32(device, 0x400100, 0x00080000);
 		stat &= ~0x00080000;
 	}
 
 	if (stat) {
-		nv_error(priv, "unknown stat 0x%08x\n", stat);
-		nv_wr32(priv, 0x400100, stat);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, 0x400100, stat);
 	}
 
-	nv_wr32(priv, 0x400500, 0x00010001);
-	nvkm_engctx_put(engctx);
+	nvkm_wr32(device, 0x400500, 0x00010001);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
 void
-gf100_gr_init_fw(struct gf100_gr_priv *priv, u32 fuc_base,
+gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
 		 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int i;
 
-	nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
+	nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
 	for (i = 0; i < data->size / 4; i++)
-		nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
+		nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
 
-	nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
+	nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
 	for (i = 0; i < code->size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, fuc_base + 0x0188, i >> 6);
-		nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
+			nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
+		nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
 	}
 
 	/* code must be padded to 0x40 words */
 	for (; i & 0x3f; i++)
-		nv_wr32(priv, fuc_base + 0x0184, 0);
+		nvkm_wr32(device, fuc_base + 0x0184, 0);
 }
 
 static void
-gf100_gr_init_csdata(struct gf100_gr_priv *priv,
+gf100_gr_init_csdata(struct gf100_gr *gr,
 		     const struct gf100_gr_pack *pack,
 		     u32 falcon, u32 starstar, u32 base)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *iter;
 	const struct gf100_gr_init *init;
 	u32 addr = ~0, prev = ~0, xfer = 0;
 	u32 star, temp;
 
-	nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar);
-	star = nv_rd32(priv, falcon + 0x01c4);
-	temp = nv_rd32(priv, falcon + 0x01c4);
+	nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar);
+	star = nvkm_rd32(device, falcon + 0x01c4);
+	temp = nvkm_rd32(device, falcon + 0x01c4);
 	if (temp > star)
 		star = temp;
-	nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star);
+	nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star);
 
 	pack_for_each_init(init, iter, pack) {
 		u32 head = init->addr - base;
@@ -1255,7 +1313,7 @@
 			if (head != prev + 4 || xfer >= 32) {
 				if (xfer) {
 					u32 data = ((--xfer << 26) | addr);
-					nv_wr32(priv, falcon + 0x01c4, data);
+					nvkm_wr32(device, falcon + 0x01c4, data);
 					star += 4;
 				}
 				addr = head;
@@ -1267,157 +1325,166 @@
 		}
 	}
 
-	nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr);
-	nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar);
-	nv_wr32(priv, falcon + 0x01c4, star + 4);
+	nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr);
+	nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar);
+	nvkm_wr32(device, falcon + 0x01c4, star + 4);
 }
 
 int
-gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gf100_gr_init_ctxctl(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)nv_object(priv)->oclass;
-	struct gf100_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int i;
 
-	if (priv->firmware) {
+	if (gr->firmware) {
 		/* load fuc microcode */
-		nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-		gf100_gr_init_fw(priv, 0x409000, &priv->fuc409c,
-						 &priv->fuc409d);
-		gf100_gr_init_fw(priv, 0x41a000, &priv->fuc41ac,
-						 &priv->fuc41ad);
-		nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+		nvkm_mc_unk260(device->mc, 0);
+		gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
+		gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
+		nvkm_mc_unk260(device->mc, 1);
 
 		/* start both of them running */
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x41a10c, 0x00000000);
-		nv_wr32(priv, 0x40910c, 0x00000000);
-		nv_wr32(priv, 0x41a100, 0x00000002);
-		nv_wr32(priv, 0x409100, 0x00000002);
-		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
-			nv_warn(priv, "0x409800 wait failed\n");
-
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x7fffffff);
-		nv_wr32(priv, 0x409504, 0x00000021);
-
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000010);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x10 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x41a10c, 0x00000000);
+		nvkm_wr32(device, 0x40910c, 0x00000000);
+		nvkm_wr32(device, 0x41a100, 0x00000002);
+		nvkm_wr32(device, 0x409100, 0x00000002);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x00000001)
+				break;
+		) < 0)
 			return -EBUSY;
-		}
-		priv->size = nv_rd32(priv, 0x409800);
 
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000016);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x16 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x7fffffff);
+		nvkm_wr32(device, 0x409504, 0x00000021);
+
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000010);
+		if (nvkm_msec(device, 2000,
+			if ((gr->size = nvkm_rd32(device, 0x409800)))
+				break;
+		) < 0)
 			return -EBUSY;
-		}
 
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000025);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x25 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000016);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800))
+				break;
+		) < 0)
 			return -EBUSY;
+
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000025);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800))
+				break;
+		) < 0)
+			return -EBUSY;
+
+		if (device->chipset >= 0xe0) {
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000030);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
+				return -EBUSY;
+
+			nvkm_wr32(device, 0x409810, 0xb00095c8);
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000031);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
+				return -EBUSY;
+
+			nvkm_wr32(device, 0x409810, 0x00080420);
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000032);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
+				return -EBUSY;
+
+			nvkm_wr32(device, 0x409614, 0x00000070);
+			nvkm_wr32(device, 0x409614, 0x00000770);
+			nvkm_wr32(device, 0x40802c, 0x00000001);
 		}
 
-		if (nv_device(priv)->chipset >= 0xe0) {
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000030);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x30 timeout\n");
-				return -EBUSY;
-			}
-
-			nv_wr32(priv, 0x409810, 0xb00095c8);
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000031);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x31 timeout\n");
-				return -EBUSY;
-			}
-
-			nv_wr32(priv, 0x409810, 0x00080420);
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000032);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x32 timeout\n");
-				return -EBUSY;
-			}
-
-			nv_wr32(priv, 0x409614, 0x00000070);
-			nv_wr32(priv, 0x409614, 0x00000770);
-			nv_wr32(priv, 0x40802c, 0x00000001);
-		}
-
-		if (priv->data == NULL) {
-			int ret = gf100_grctx_generate(priv);
+		if (gr->data == NULL) {
+			int ret = gf100_grctx_generate(gr);
 			if (ret) {
-				nv_error(priv, "failed to construct context\n");
+				nvkm_error(subdev, "failed to construct context\n");
 				return ret;
 			}
 		}
 
 		return 0;
 	} else
-	if (!oclass->fecs.ucode) {
+	if (!gr->func->fecs.ucode) {
 		return -ENOSYS;
 	}
 
 	/* load HUB microcode */
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-	nv_wr32(priv, 0x4091c0, 0x01000000);
-	for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
-		nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
+	nvkm_mc_unk260(device->mc, 0);
+	nvkm_wr32(device, 0x4091c0, 0x01000000);
+	for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
+		nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
 
-	nv_wr32(priv, 0x409180, 0x01000000);
-	for (i = 0; i < oclass->fecs.ucode->code.size / 4; i++) {
+	nvkm_wr32(device, 0x409180, 0x01000000);
+	for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x409188, i >> 6);
-		nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]);
+			nvkm_wr32(device, 0x409188, i >> 6);
+		nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
 	}
 
 	/* load GPC microcode */
-	nv_wr32(priv, 0x41a1c0, 0x01000000);
-	for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++)
-		nv_wr32(priv, 0x41a1c4, oclass->gpccs.ucode->data.data[i]);
+	nvkm_wr32(device, 0x41a1c0, 0x01000000);
+	for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
+		nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
 
-	nv_wr32(priv, 0x41a180, 0x01000000);
-	for (i = 0; i < oclass->gpccs.ucode->code.size / 4; i++) {
+	nvkm_wr32(device, 0x41a180, 0x01000000);
+	for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x41a188, i >> 6);
-		nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
+			nvkm_wr32(device, 0x41a188, i >> 6);
+		nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
 	}
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	nvkm_mc_unk260(device->mc, 1);
 
 	/* load register lists */
-	gf100_gr_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
-	gf100_gr_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
-	gf100_gr_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
-	gf100_gr_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
+	gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
+	gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
+	gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
+	gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
 
 	/* start HUB ucode running, it'll init the GPCs */
-	nv_wr32(priv, 0x40910c, 0x00000000);
-	nv_wr32(priv, 0x409100, 0x00000002);
-	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
-		nv_error(priv, "HUB_INIT timed out\n");
-		gf100_gr_ctxctl_debug(priv);
+	nvkm_wr32(device, 0x40910c, 0x00000000);
+	nvkm_wr32(device, 0x409100, 0x00000002);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x409800) & 0x80000000)
+			break;
+	) < 0) {
+		gf100_gr_ctxctl_debug(gr);
 		return -EBUSY;
 	}
 
-	priv->size = nv_rd32(priv, 0x409804);
-	if (priv->data == NULL) {
-		int ret = gf100_grctx_generate(priv);
+	gr->size = nvkm_rd32(device, 0x409804);
+	if (gr->data == NULL) {
+		int ret = gf100_grctx_generate(gr);
 		if (ret) {
-			nv_error(priv, "failed to construct context\n");
+			nvkm_error(subdev, "failed to construct context\n");
 			return ret;
 		}
 	}
@@ -1425,143 +1492,160 @@
 	return 0;
 }
 
-int
-gf100_gr_init(struct nvkm_object *object)
+static int
+gf100_gr_oneinit(struct nvkm_gr *base)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc, rop;
-	int ret, i;
+	struct gf100_gr *gr = gf100_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int ret, i, j;
 
-	ret = nvkm_gr_init(&priv->base);
+	nvkm_pmu_pgob(device->pmu, false);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+			      &gr->unk4188b4);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+			      &gr->unk4188b8);
+	if (ret)
+		return ret;
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	nvkm_kmap(gr->unk4188b4);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(gr->unk4188b4, i, 0x00000010);
+	nvkm_done(gr->unk4188b4);
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+	nvkm_kmap(gr->unk4188b8);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(gr->unk4188b8, i, 0x00000010);
+	nvkm_done(gr->unk4188b8);
 
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	if (nv_device(priv)->chipset != 0xd7)
-		nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
-	else
-		nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
-	nv_wr32(priv, 0x400500, 0x00010001);
-
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-
-	nv_wr32(priv, 0x409c24, 0x000f0000);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x40601c, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+	gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
+	gr->gpc_nr =  nvkm_rd32(device, 0x409604) & 0x0000001f;
+	for (i = 0; i < gr->gpc_nr; i++) {
+		gr->tpc_nr[i]  = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
+		gr->tpc_total += gr->tpc_nr[i];
+		gr->ppc_nr[i]  = gr->func->ppc_nr;
+		for (j = 0; j < gr->ppc_nr[i]; j++) {
+			u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
+			gr->ppc_tpc_nr[i][j] = hweight8(mask);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	/*XXX: these need figuring out... though it might not even matter */
+	switch (device->chipset) {
+	case 0xc0:
+		if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
+			gr->magic_not_rop_nr = 0x07;
+		} else
+		if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
+			gr->magic_not_rop_nr = 0x05;
+		} else
+		if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
+			gr->magic_not_rop_nr = 0x06;
+		}
+		break;
+	case 0xc3: /* 450, 4/0/0/0, 2 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xc4: /* 460, 3/4/0/0, 4 */
+		gr->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc1: /* 2/0/0/0, 1 */
+		gr->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc8: /* 4/4/3/4, 5 */
+		gr->magic_not_rop_nr = 0x06;
+		break;
+	case 0xce: /* 4/4/0/0, 4 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xcf: /* 4/0/0/0, 3 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xd7:
+	case 0xd9: /* 1/0/0/0, 1 */
+	case 0xea: /* gk20a */
+	case 0x12b: /* gm20b */
+		gr->magic_not_rop_nr = 0x01;
+		break;
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
-
-	nv_wr32(priv, 0x400054, 0x34ce3464);
-
-	gf100_gr_zbc_init(priv);
-
-	return gf100_gr_init_ctxctl(priv);
+	return 0;
 }
 
-static void
+int
+gf100_gr_init_(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
+	return gr->func->init(gr);
+}
+
+void
 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
 {
 	kfree(fuc->data);
 	fuc->data = NULL;
 }
 
+void *
+gf100_gr_dtor(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+
+	if (gr->func->dtor)
+		gr->func->dtor(gr);
+	kfree(gr->data);
+
+	gf100_gr_dtor_fw(&gr->fuc409c);
+	gf100_gr_dtor_fw(&gr->fuc409d);
+	gf100_gr_dtor_fw(&gr->fuc41ac);
+	gf100_gr_dtor_fw(&gr->fuc41ad);
+
+	nvkm_memory_del(&gr->unk4188b8);
+	nvkm_memory_del(&gr->unk4188b4);
+	return gr;
+}
+
+static const struct nvkm_gr_func
+gf100_gr_ = {
+	.dtor = gf100_gr_dtor,
+	.oneinit = gf100_gr_oneinit,
+	.init = gf100_gr_init_,
+	.intr = gf100_gr_intr,
+	.units = gf100_gr_units,
+	.chan_new = gf100_gr_chan_new,
+	.object_get = gf100_gr_object_get,
+};
+
 int
-gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
+gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
 		 struct gf100_gr_fuc *fuc)
 {
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	const struct firmware *fw;
-	char f[32];
+	char f[64];
+	char cname[16];
 	int ret;
+	int i;
 
-	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
-	ret = request_firmware(&fw, f, nv_device_base(device));
+	/* Convert device name to lowercase */
+	strncpy(cname, device->chip->name, sizeof(cname));
+	cname[sizeof(cname) - 1] = '\0';
+	i = strlen(cname);
+	while (i) {
+		--i;
+		cname[i] = tolower(cname[i]);
+	}
+
+	snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+	ret = request_firmware(&fw, f, device->dev);
 	if (ret) {
-		snprintf(f, sizeof(f), "nouveau/%s", fwname);
-		ret = request_firmware(&fw, f, nv_device_base(device));
-		if (ret) {
-			nv_error(priv, "failed to load %s\n", fwname);
-			return ret;
-		}
+		nvkm_error(subdev, "failed to load %s\n", fwname);
+		return ret;
 	}
 
 	fuc->size = fw->size;
@@ -1570,126 +1654,150 @@
 	return (fuc->data != NULL) ? 0 : -ENOMEM;
 }
 
-void
-gf100_gr_dtor(struct nvkm_object *object)
+int
+gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct gf100_gr *gr)
 {
-	struct gf100_gr_priv *priv = (void *)object;
+	int ret;
 
-	kfree(priv->data);
+	gr->func = func;
+	gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
+				    func->fecs.ucode == NULL);
 
-	gf100_gr_dtor_fw(&priv->fuc409c);
-	gf100_gr_dtor_fw(&priv->fuc409d);
-	gf100_gr_dtor_fw(&priv->fuc41ac);
-	gf100_gr_dtor_fw(&priv->fuc41ad);
+	ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
+			   gr->firmware || func->fecs.ucode != NULL,
+			   &gr->base);
+	if (ret)
+		return ret;
 
-	nvkm_gpuobj_ref(NULL, &priv->unk4188b8);
-	nvkm_gpuobj_ref(NULL, &priv->unk4188b4);
+	if (gr->firmware) {
+		nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
+		if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+		    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+			return -ENODEV;
+	}
 
-	nvkm_gr_destroy(&priv->base);
+	return 0;
 }
 
 int
-gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *bclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_gr **pgr)
 {
-	struct gf100_gr_oclass *oclass = (void *)bclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_gr_priv *priv;
-	bool use_ext_fw, enable;
-	int ret, i, j;
+	struct gf100_gr *gr;
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+	return gf100_gr_ctor(func, device, index, gr);
+}
 
-	use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
-				  oclass->fecs.ucode == NULL);
-	enable = use_ext_fw || oclass->fecs.ucode != NULL;
+int
+gf100_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc, rop;
+	int i;
 
-	ret = nvkm_gr_create(parent, engine, bclass, enable, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	nv_subdev(priv)->unit = 0x08001000;
-	nv_subdev(priv)->intr = gf100_gr_intr;
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	priv->base.units = gf100_gr_units;
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
-	if (use_ext_fw) {
-		nv_info(priv, "using external firmware\n");
-		if (gf100_gr_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
-		    gf100_gr_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
-		    gf100_gr_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
-		    gf100_gr_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
-			return -ENODEV;
-		priv->firmware = true;
+		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
-			      &priv->unk4188b4);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
-			      &priv->unk4188b8);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < 0x1000; i += 4) {
-		nv_wo32(priv->unk4188b4, i, 0x00000010);
-		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
-	priv->gpc_nr =  nv_rd32(priv, 0x409604) & 0x0000001f;
-	for (i = 0; i < priv->gpc_nr; i++) {
-		priv->tpc_nr[i]  = nv_rd32(priv, GPC_UNIT(i, 0x2608));
-		priv->tpc_total += priv->tpc_nr[i];
-		priv->ppc_nr[i]  = oclass->ppc_nr;
-		for (j = 0; j < priv->ppc_nr[i]; j++) {
-			u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4)));
-			priv->ppc_tpc_nr[i][j] = hweight8(mask);
+	if (device->chipset != 0xd7)
+		nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
+	else
+		nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+	nvkm_wr32(device, 0x409c24, 0x000f0000);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x40601c, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
 		}
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	/*XXX: these need figuring out... though it might not even matter */
-	switch (nv_device(priv)->chipset) {
-	case 0xc0:
-		if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
-			priv->magic_not_rop_nr = 0x07;
-		} else
-		if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
-			priv->magic_not_rop_nr = 0x05;
-		} else
-		if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
-			priv->magic_not_rop_nr = 0x06;
-		}
-		break;
-	case 0xc3: /* 450, 4/0/0/0, 2 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xc4: /* 460, 3/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc1: /* 2/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc8: /* 4/4/3/4, 5 */
-		priv->magic_not_rop_nr = 0x06;
-		break;
-	case 0xce: /* 4/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xcf: /* 4/0/0/0, 3 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xd7:
-	case 0xd9: /* 1/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_engine(priv)->cclass = *oclass->cclass;
-	nv_engine(priv)->sclass =  oclass->sclass;
-	return 0;
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
+
+	nvkm_wr32(device, 0x400054, 0x34ce3464);
+
+	gf100_gr_zbc_init(gr);
+
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgf100.fuc3.h"
@@ -1712,18 +1820,24 @@
 	.data.size = sizeof(gf100_grgpc_data),
 };
 
-struct nvkm_oclass *
-gf100_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf100_grctx_oclass,
-	.sclass =  gf100_gr_sclass,
+static const struct gf100_gr_func
+gf100_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf100_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf100_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c9533fd..4611961 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -21,11 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#ifndef __NVC0_GR_H__
-#define __NVC0_GR_H__
-#include <engine/gr.h>
+#ifndef __GF100_GR_H__
+#define __GF100_GR_H__
+#define gf100_gr(p) container_of((p), struct gf100_gr, base)
+#include "priv.h"
 
+#include <core/gpuobj.h>
 #include <subdev/ltc.h>
+#include <subdev/mmu.h>
 
 #define GPC_MAX 32
 #define TPC_MAX (GPC_MAX * 8)
@@ -67,7 +70,8 @@
 	u32 l2;
 };
 
-struct gf100_gr_priv {
+struct gf100_gr {
+	const struct gf100_gr_func *func;
 	struct nvkm_gr base;
 
 	struct gf100_gr_fuc fuc409c;
@@ -76,6 +80,15 @@
 	struct gf100_gr_fuc fuc41ad;
 	bool firmware;
 
+	/*
+	 * Used if the register packs are loaded from NVIDIA fw instead of
+	 * using hardcoded arrays.
+	 */
+	struct gf100_gr_pack *fuc_sw_nonctx;
+	struct gf100_gr_pack *fuc_sw_ctx;
+	struct gf100_gr_pack *fuc_bundle;
+	struct gf100_gr_pack *fuc_method;
+
 	struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
 	struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 
@@ -86,8 +99,8 @@
 	u8 ppc_nr[GPC_MAX];
 	u8 ppc_tpc_nr[GPC_MAX][4];
 
-	struct nvkm_gpuobj *unk4188b4;
-	struct nvkm_gpuobj *unk4188b8;
+	struct nvkm_memory *unk4188b4;
+	struct nvkm_memory *unk4188b8;
 
 	struct gf100_gr_data mmio_data[4];
 	struct gf100_gr_mmio mmio_list[4096/8];
@@ -97,48 +110,65 @@
 	u8 magic_not_rop_nr;
 };
 
-struct gf100_gr_chan {
-	struct nvkm_gr_chan base;
+int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct gf100_gr *);
+int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct nvkm_gr **);
+void *gf100_gr_dtor(struct nvkm_gr *);
 
-	struct nvkm_gpuobj *mmio;
+struct gf100_gr_func {
+	void (*dtor)(struct gf100_gr *);
+	int (*init)(struct gf100_gr *);
+	void (*init_gpc_mmu)(struct gf100_gr *);
+	void (*set_hww_esr_report_mask)(struct gf100_gr *);
+	const struct gf100_gr_pack *mmio;
+	struct {
+		struct gf100_gr_ucode *ucode;
+	} fecs;
+	struct {
+		struct gf100_gr_ucode *ucode;
+	} gpccs;
+	int ppc_nr;
+	const struct gf100_grctx_func *grctx;
+	struct nvkm_sclass sclass[];
+};
+
+int gf100_gr_init(struct gf100_gr *);
+
+int gk104_gr_init(struct gf100_gr *);
+
+int gk20a_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct nvkm_gr **);
+void gk20a_gr_dtor(struct gf100_gr *);
+int gk20a_gr_init(struct gf100_gr *);
+
+int gm204_gr_init(struct gf100_gr *);
+
+#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
+
+struct gf100_gr_chan {
+	struct nvkm_object object;
+	struct gf100_gr *gr;
+
+	struct nvkm_memory *mmio;
 	struct nvkm_vma mmio_vma;
 	int mmio_nr;
+
 	struct {
-		struct nvkm_gpuobj *mem;
+		struct nvkm_memory *mem;
 		struct nvkm_vma vma;
 	} data[4];
 };
 
-int  gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, void *, u32,
-			     struct nvkm_object **);
-void gf100_gr_context_dtor(struct nvkm_object *);
+void gf100_gr_ctxctl_debug(struct gf100_gr *);
 
-void gf100_gr_ctxctl_debug(struct gf100_gr_priv *);
-
+void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
+int  gf100_gr_ctor_fw(struct gf100_gr *, const char *,
+		      struct gf100_gr_fuc *);
 u64  gf100_gr_units(struct nvkm_gr *);
-int  gf100_gr_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-void gf100_gr_dtor(struct nvkm_object *);
-int  gf100_gr_init(struct nvkm_object *);
-void gf100_gr_zbc_init(struct gf100_gr_priv *);
+void gf100_gr_zbc_init(struct gf100_gr *);
 
-int  gk104_gr_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-int  gk104_gr_init(struct nvkm_object *);
-
-int  gm204_gr_init(struct nvkm_object *);
-
-extern struct nvkm_ofuncs gf100_fermi_ofuncs;
-
-extern struct nvkm_oclass gf100_gr_sclass[];
-extern struct nvkm_omthds gf100_gr_9097_omthds[];
-extern struct nvkm_omthds gf100_gr_90c0_omthds[];
-extern struct nvkm_oclass gf110_gr_sclass[];
-extern struct nvkm_oclass gk110_gr_sclass[];
-extern struct nvkm_oclass gm204_gr_sclass[];
+extern const struct nvkm_object_func gf100_fermi;
 
 struct gf100_gr_init {
 	u32 addr;
@@ -167,25 +197,11 @@
 extern struct gf100_gr_ucode gk110_gr_fecs_ucode;
 extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
 
-struct gf100_gr_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_oclass **cclass;
-	struct nvkm_oclass *sclass;
-	const struct gf100_gr_pack *mmio;
-	struct {
-		struct gf100_gr_ucode *ucode;
-	} fecs;
-	struct {
-		struct gf100_gr_ucode *ucode;
-	} gpccs;
-	int ppc_nr;
-};
-
-int  gf100_gr_wait_idle(struct gf100_gr_priv *);
-void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-int  gf100_gr_init_ctxctl(struct gf100_gr_priv *);
+int  gf100_gr_wait_idle(struct gf100_gr *);
+void gf100_gr_mmio(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
+int  gf100_gr_init_ctxctl(struct gf100_gr *);
 
 /* register init value lists */
 
@@ -261,7 +277,7 @@
 extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
 extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
 extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
-void gm107_gr_init_bios(struct gf100_gr_priv *);
+void gm107_gr_init_bios(struct gf100_gr *);
 
 extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 20d3b85..8f253e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -110,18 +112,24 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf104_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf104_grctx_oclass,
-	.sclass = gf100_gr_sclass,
+static const struct gf100_gr_func
+gf104_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf104_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8df7342..815a5aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -27,20 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf108_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -117,18 +103,25 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf108_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf108_grctx_oclass,
-	.sclass = gf108_gr_sclass,
+static const struct gf100_gr_func
+gf108_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf108_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf108_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index ef76e2d..d131874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -27,21 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gf110_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -99,18 +84,26 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf110_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc8),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf110_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf110_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf110_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 871ac5f..28483d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -118,19 +120,27 @@
 	.data.size = sizeof(gf117_grgpc_data),
 };
 
-struct nvkm_oclass *
-gf117_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf117_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf117_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gf117_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf117_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index e6dd651..9811a72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -173,18 +175,26 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf119_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xd9),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf119_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf119_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf119_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf119_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 46f7844..abf5492 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,24 +24,9 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
-#include <subdev/pmu.h>
-
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
-	{ KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -193,132 +178,112 @@
  ******************************************************************************/
 
 int
-gk104_gr_init(struct nvkm_object *object)
+gk104_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	struct nvkm_pmu *pmu = nvkm_pmu(priv);
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int ret, i;
+	int i;
 
-	if (pmu)
-		pmu->pgob(pmu, false);
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
-
-	gf100_gr_mmio(priv, oclass->mmio);
-
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
 
-	nv_wr32(priv, 0x409ffc, 0x00000000);
-	nv_wr32(priv, 0x409c14, 0x00003e3e);
-	nv_wr32(priv, 0x409c24, 0x000f0001);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
+	nvkm_wr32(device, 0x409ffc, 0x00000000);
+	nvkm_wr32(device, 0x409c14, 0x00003e3e);
+	nvkm_wr32(device, 0x409c24, 0x000f0001);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x34ce3464);
+	nvkm_wr32(device, 0x400054, 0x34ce3464);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gf100_gr_init_ctxctl(priv);
-}
-
-int
-gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nvkm_pmu *pmu = nvkm_pmu(parent);
-	if (pmu)
-		pmu->pgob(pmu, false);
-	return gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgk104.fuc3.h"
@@ -341,19 +306,25 @@
 	.data.size = sizeof(gk104_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk104_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk104_grctx_oclass,
-	.sclass = gk104_gr_sclass,
+static const struct gf100_gr_func
+gk104_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk104_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+		{ -1, -1, KEPLER_A, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f4cd8e5..32aa294 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -29,19 +29,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gk110_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -193,19 +180,25 @@
 	.data.size = sizeof(gk110_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk110_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk110_grctx_oclass,
-	.sclass =  gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gk110_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 9ff9eab..22f88af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -98,19 +100,25 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110b_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xf1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk110b_grctx_oclass,
-	.sclass =  gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110b_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gk110b_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 85f44a3..ee7554f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -29,19 +29,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk208_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ KEPLER_B, &gf100_fermi_ofuncs },
-	{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -172,19 +159,25 @@
 	.data.size = sizeof(gk208_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk208_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk208_grctx_oclass,
-	.sclass =  gk208_gr_sclass,
+static const struct gf100_gr_func
+gk208_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk208_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk208_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 40ff5eb..b8758d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,28 +22,335 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <subdev/timer.h>
+
 #include <nvif/class.h>
 
-static struct nvkm_oclass
-gk20a_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
-	{ KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
+static void
+gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
+{
+	vfree(pack);
+}
+
+struct gk20a_fw_av
+{
+	u32 addr;
+	u32 data;
 };
 
-struct nvkm_oclass *
-gk20a_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk20a_grctx_oclass,
-	.sclass = gk20a_gr_sclass,
-	.mmio = gk104_gr_pack_mmio,
+static struct gf100_gr_pack *
+gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+	int i;
+
+	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + 2);
+
+	pack[0].init = init;
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+
+		ent->addr = av->addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+struct gk20a_fw_aiv
+{
+	u32 addr;
+	u32 index;
+	u32 data;
+};
+
+static struct gf100_gr_pack *
+gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
+	int i;
+
+	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + 2);
+
+	pack[0].init = init;
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
+
+		ent->addr = av->addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+static struct gf100_gr_pack *
+gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	/* We don't suppose we will initialize more than 16 classes here... */
+	static const unsigned int max_classes = 16;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+	int i, classidx = 0;
+	u32 prevclass = 0;
+
+	pack = vzalloc((sizeof(*pack) * max_classes) +
+		       (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + max_classes);
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+		u32 class = av->addr & 0xffff;
+		u32 addr = (av->addr & 0xffff0000) >> 14;
+
+		if (prevclass != class) {
+			pack[classidx].init = ent;
+			pack[classidx].type = class;
+			prevclass = class;
+			if (++classidx >= max_classes) {
+				vfree(pack);
+				return ERR_PTR(-ENOSPC);
+			}
+		}
+
+		ent->addr = addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+static int
+gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "FECS mem scrubbing timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void
+gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x419e44, 0x1ffffe);
+	nvkm_wr32(device, 0x419e4c, 0x7f);
+}
+
+int
+gk20a_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc;
+	int ret, i;
+
+	/* Clear SCC RAM */
+	nvkm_wr32(device, 0x40802c, 0x1);
+
+	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+	ret = gk20a_gr_wait_mem_scrubbing(gr);
+	if (ret)
+		return ret;
+
+	ret = gf100_gr_wait_idle(gr);
+	if (ret)
+		return ret;
+
+	/* MMU debug buffer */
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+
+	if (gr->func->init_gpc_mmu)
+		gr->func->init_gpc_mmu(gr);
+
+	/* Set the PE as stream master */
+	nvkm_mask(device, 0x503018, 0x1, 0x1);
+
+	/* Zcull init */
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			  gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			  gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+	/* Enable FIFO access */
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	/* Enable interrupts */
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+	/* Enable FECS error interrupts */
+	nvkm_wr32(device, 0x409c24, 0x000f0000);
+
+	/* Enable hardware warning exceptions */
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+
+	if (gr->func->set_hww_esr_report_mask)
+		gr->func->set_hww_esr_report_mask(gr);
+
+	/* Enable TPC exceptions per GPC */
+	nvkm_wr32(device, 0x419d0c, 0x2);
+	nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
+
+	/* Reset and enable all exceptions */
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
+
+	gf100_gr_zbc_init(gr);
+
+	return gf100_gr_init_ctxctl(gr);
+}
+
+void
+gk20a_gr_dtor(struct gf100_gr *gr)
+{
+	gk20a_gr_init_dtor(gr->fuc_method);
+	gk20a_gr_init_dtor(gr->fuc_bundle);
+	gk20a_gr_init_dtor(gr->fuc_sw_ctx);
+	gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
+}
+
+int
+gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_gr **pgr)
+{
+	struct gf100_gr_fuc fuc;
+	struct gf100_gr *gr;
+	int ret;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	ret = gf100_gr_ctor(func, device, index, gr);
+	if (ret)
+		return ret;
+
+	ret = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_sw_nonctx))
+		return PTR_ERR(gr->fuc_sw_nonctx);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_sw_ctx))
+		return PTR_ERR(gr->fuc_sw_ctx);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_bundle))
+		return PTR_ERR(gr->fuc_bundle);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_method = gk20a_gr_av_to_method(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_method))
+		return PTR_ERR(gr->fuc_method);
+
+	return 0;
+}
+
+static const struct gf100_gr_func
+gk20a_gr = {
+	.dtor = gk20a_gr_dtor,
+	.init = gk20a_gr_init,
+	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk20a_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+		{ -1, -1, KEPLER_C, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gk20a_gr_new_(&gk20a_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index a5ebd45..56e9602 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -30,19 +30,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -292,7 +279,7 @@
  ******************************************************************************/
 
 void
-gm107_gr_init_bios(struct gf100_gr_priv *priv)
+gm107_gr_init_bios(struct gf100_gr *gr)
 {
 	static const struct {
 		u32 ctrl;
@@ -304,7 +291,8 @@
 		{ 0x419af0, 0x419af4 },
 		{ 0x419af8, 0x419afc },
 	};
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_P0260E infoE;
 	struct nvbios_P0260X infoX;
 	int E = -1, X;
@@ -312,124 +300,119 @@
 
 	while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) {
 		if (X = -1, E < ARRAY_SIZE(regs)) {
-			nv_wr32(priv, regs[E].ctrl, infoE.data);
+			nvkm_wr32(device, regs[E].ctrl, infoE.data);
 			while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX))
-				nv_wr32(priv, regs[E].data, infoX.data);
+				nvkm_wr32(device, regs[E].data, infoX.data);
 		}
 	}
 }
 
 int
-gm107_gr_init(struct nvkm_object *object)
+gm107_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, ppc, rop;
-	int ret, i;
+	int i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	gm107_gr_init_bios(gr);
 
-	gm107_gr_init_bios(priv);
-
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400124, 0x00000002);
-	nv_wr32(priv, 0x409c24, 0x000e0000);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
+	nvkm_wr32(device, 0x409c24, 0x000e0000);
 
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < 2 /* priv->ppc_nr[gpc] */; ppc++)
-			nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < 2 /* gr->ppc_nr[gpc] */; ppc++)
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x2c350f63);
+	nvkm_wr32(device, 0x400054, 0x2c350f63);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gf100_gr_init_ctxctl(priv);
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgm107.fuc5.h"
@@ -452,19 +435,25 @@
 	.data.size = sizeof(gm107_grgpc_data),
 };
 
-struct nvkm_oclass *
-gm107_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm107_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm107_grctx_oclass,
-	.sclass =  gm107_gr_sclass,
+static const struct gf100_gr_func
+gm107_gr = {
+	.init = gm107_gr_init,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm107_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_A, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm107_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
index fdb1dcf..90381dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -27,19 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gm204_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ MAXWELL_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ MAXWELL_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -243,144 +230,144 @@
  ******************************************************************************/
 
 static int
-gm204_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gm204_gr_init_ctxctl(struct gf100_gr *gr)
 {
 	return 0;
 }
 
 int
-gm204_gr_init(struct nvkm_object *object)
+gm204_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {}, tmp;
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, ppc, rop;
-	int ret, i;
-	u32 tmp;
+	int i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
-	tmp = nv_rd32(priv, 0x100c80); /*XXX: mask? */
-	nv_wr32(priv, 0x418880, 0x00001000 | (tmp & 0x00000fff));
-	nv_wr32(priv, 0x418890, 0x00000000);
-	nv_wr32(priv, 0x418894, 0x00000000);
-	nv_wr32(priv, 0x4188b4, priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, 0x4188b8, priv->unk4188b8->addr >> 8);
-	nv_mask(priv, 0x4188b0, 0x00040000, 0x00040000);
+	tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
+	nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+	nvkm_wr32(device, 0x418890, 0x00000000);
+	nvkm_wr32(device, 0x418894, 0x00000000);
+	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
 
 	/*XXX: belongs in fb */
-	nv_wr32(priv, 0x100cc8, priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, 0x100ccc, priv->unk4188b8->addr >> 8);
-	nv_mask(priv, 0x100cc4, 0x00040000, 0x00040000);
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gm107_gr_init_bios(priv);
+	gm107_gr_init_bios(gr);
 
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-	nv_wr32(priv, GPC_BCAST(0x033c), nv_rd32(priv, 0x100804));
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
 
-	nv_wr32(priv, 0x400500, 0x00010001);
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400124, 0x00000002);
-	nv_wr32(priv, 0x409c24, 0x000e0000);
-	nv_wr32(priv, 0x405848, 0xc0000000);
-	nv_wr32(priv, 0x40584c, 0x00000001);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_wr32(device, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
+	nvkm_wr32(device, 0x409c24, 0x000e0000);
+	nvkm_wr32(device, 0x405848, 0xc0000000);
+	nvkm_wr32(device, 0x40584c, 0x00000001);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
-			nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x2c350f63);
+	nvkm_wr32(device, 0x400054, 0x2c350f63);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gm204_gr_init_ctxctl(priv);
+	return gm204_gr_init_ctxctl(gr);
 }
 
-struct nvkm_oclass *
-gm204_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm204_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm204_grctx_oclass,
-	.sclass =  gm204_gr_sclass,
+static const struct gf100_gr_func
+gm204_gr = {
+	.init = gm204_gr_init,
 	.mmio = gm204_gr_pack_mmio,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm204_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm204_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
index 04b9733..341dc560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
@@ -24,17 +24,25 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
-struct nvkm_oclass *
-gm206_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x26),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm204_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm206_grctx_oclass,
-	.sclass =  gm204_gr_sclass,
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+gm206_gr = {
+	.init = gm204_gr_init,
 	.mmio = gm204_gr_pack_mmio,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm206_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm206_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
new file mode 100644
index 0000000..65b6e3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static void
+gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 val;
+
+	/* TODO this needs to be removed once secure boot works */
+	if (1) {
+		nvkm_wr32(device, 0x100ce4, 0xffffffff);
+	}
+
+	/* TODO update once secure boot works */
+	val = nvkm_rd32(device, 0x100c80);
+	val &= 0xf000087f;
+	nvkm_wr32(device, 0x418880, val);
+	nvkm_wr32(device, 0x418890, 0);
+	nvkm_wr32(device, 0x418894, 0);
+
+	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+
+	nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800));
+}
+
+static void
+gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x419e44, 0xdffffe);
+	nvkm_wr32(device, 0x419e4c, 0x5);
+}
+
+static const struct gf100_gr_func
+gm20b_gr = {
+	.dtor = gk20a_gr_dtor,
+	.init = gk20a_gr_init,
+	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
+	.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
+	.ppc_nr = 1,
+	.grctx = &gm20b_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gk20a_gr_new_(&gm20b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
new file mode 100644
index 0000000..2e68919
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt200_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8397, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&gt200_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
new file mode 100644
index 0000000..2bf7aac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt215_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8597, &nv50_gr_object },
+		{ -1, -1, 0x85c0, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&gt215_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
new file mode 100644
index 0000000..95d5219
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp79_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8397, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&mcp79_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
new file mode 100644
index 0000000..027b58e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp89_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x85c0, &nv50_gr_object },
+		{ -1, -1, 0x8697, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&mcp89_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 2614510..426ba00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -21,13 +21,13 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <engine/gr.h>
+#include "priv.h"
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/instmem.h>
 #include <subdev/timer.h>
 
@@ -346,25 +346,23 @@
 	NV04_PGRAPH_DEBUG_3
 };
 
-struct nv04_gr_priv {
+#define nv04_gr(p) container_of((p), struct nv04_gr, base)
+
+struct nv04_gr {
 	struct nvkm_gr base;
 	struct nv04_gr_chan *chan[16];
 	spinlock_t lock;
 };
 
+#define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
+
 struct nv04_gr_chan {
-	struct nvkm_object base;
+	struct nvkm_object object;
+	struct nv04_gr *gr;
 	int chid;
 	u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
 };
 
-
-static inline struct nv04_gr_priv *
-nv04_gr_priv(struct nv04_gr_chan *chan)
-{
-	return (void *)nv_object(chan)->engine;
-}
-
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
@@ -444,35 +442,34 @@
  */
 
 static void
-nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+	int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
 	u32 tmp;
 
-	tmp  = nv_ro32(object, 0x00);
+	tmp  = nvkm_rd32(device, 0x700000 + inst);
 	tmp &= ~mask;
 	tmp |= value;
-	nv_wo32(object, 0x00, tmp);
+	nvkm_wr32(device, 0x700000 + inst, tmp);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
-	nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp);
 }
 
 static void
-nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
 {
 	int class, op, valid = 1;
 	u32 tmp, ctx1;
 
-	ctx1 = nv_ro32(object, 0x00);
+	ctx1 = nvkm_rd32(device, 0x700000 + inst);
 	class = ctx1 & 0xff;
 	op = (ctx1 >> 15) & 7;
 
-	tmp = nv_ro32(object, 0x0c);
+	tmp = nvkm_rd32(device, 0x70000c + inst);
 	tmp &= ~mask;
 	tmp |= value;
-	nv_wo32(object, 0x0c, tmp);
+	nvkm_wr32(device, 0x70000c + inst, tmp);
 
 	/* check for valid surf2d/surf_dst/surf_color */
 	if (!(tmp & 0x02000000))
@@ -504,527 +501,567 @@
 		break;
 	}
 
-	nv04_gr_set_ctx1(object, 0x01000000, valid << 24);
+	nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24);
 }
 
-static int
-nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data)
 {
-	u32 class = nv_ro32(object, 0) & 0xff;
-	u32 data = *(u32 *)args;
+	u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff;
 	if (data > 5)
-		return 1;
+		return false;
 	/* Old versions of the objects only accept first three operations. */
 	if (data > 2 && class < 0x40)
-		return 1;
-	nv04_gr_set_ctx1(object, 0x00038000, data << 15);
+		return false;
+	nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15);
 	/* changing operation changes set of objects needed for validation */
-	nv04_gr_set_ctx_val(object, 0, 0);
-	return 0;
+	nv04_gr_set_ctx_val(device, inst, 0, 0);
+	return true;
 }
 
-static int
-nv04_gr_mthd_surf3d_clip_h(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	u32 data = *(u32 *)args;
 	u32 min = data & 0xffff, max;
 	u32 w = data >> 16;
 	if (min & 0x8000)
 		/* too large */
-		return 1;
+		return false;
 	if (w & 0x8000)
 		/* yes, it accepts negative for some reason. */
 		w |= 0xffff0000;
 	max = min + w;
 	max &= 0x3ffff;
-	nv_wr32(priv, 0x40053c, min);
-	nv_wr32(priv, 0x400544, max);
-	return 0;
+	nvkm_wr32(device, 0x40053c, min);
+	nvkm_wr32(device, 0x400544, max);
+	return true;
 }
 
-static int
-nv04_gr_mthd_surf3d_clip_v(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	u32 data = *(u32 *)args;
 	u32 min = data & 0xffff, max;
 	u32 w = data >> 16;
 	if (min & 0x8000)
 		/* too large */
-		return 1;
+		return false;
 	if (w & 0x8000)
 		/* yes, it accepts negative for some reason. */
 		w |= 0xffff0000;
 	max = min + w;
 	max &= 0x3ffff;
-	nv_wr32(priv, 0x400540, min);
-	nv_wr32(priv, 0x400548, max);
-	return 0;
+	nvkm_wr32(device, 0x400540, min);
+	nvkm_wr32(device, 0x400548, max);
+	return true;
 }
 
-static u16
-nv04_gr_mthd_bind_class(struct nvkm_object *object, u32 *args, u32 size)
+static u8
+nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	u32 inst = *(u32 *)args << 4;
-	return nv_ro32(imem, inst);
+	return nvkm_rd32(device, 0x700000 + (inst << 4));
 }
 
-static int
-nv04_gr_mthd_bind_surf2d(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x42:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_object *object, u32 mthd,
-				 void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x42:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	case 0x52:
-		nv04_gr_set_ctx1(object, 0x00004000, 0x00004000);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+		return true;
 	case 0x18:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+		return true;
 	case 0x44:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_rop(struct nvkm_object *object, u32 mthd,
-		      void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x10000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0);
+		return true;
 	case 0x43:
-		nv04_gr_set_ctx_val(object, 0x10000000, 0x10000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_beta1(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x20000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0);
+		return true;
 	case 0x12:
-		nv04_gr_set_ctx_val(object, 0x20000000, 0x20000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_beta4(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x40000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0);
+		return true;
 	case 0x72:
-		nv04_gr_set_ctx_val(object, 0x40000000, 0x40000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_dst(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x58:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_src(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+		return true;
 	case 0x59:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_color(struct nvkm_object *object, u32 mthd,
-			     void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x5a:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_zeta(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+		return true;
 	case 0x5b:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_clip(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x2000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x2000, 0);
+		return true;
 	case 0x19:
-		nv04_gr_set_ctx1(object, 0x2000, 0x2000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_chroma(struct nvkm_object *object, u32 mthd,
-			 void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x1000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x1000, 0);
+		return true;
 	/* Yes, for some reason even the old versions of objects
 	 * accept 0x57 and not 0x17. Consistency be damned.
 	 */
 	case 0x57:
-		nv04_gr_set_ctx1(object, 0x1000, 0x1000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static struct nvkm_omthds
-nv03_gr_gdi_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_patt },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_rop },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_beta1 },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0188: func = nv04_gr_mthd_bind_rop; break;
+	case 0x018c: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0190: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_gdi_omthds[] = {
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_blit_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf_src },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf_src; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_blit_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_iifc_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_chroma },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_clip },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_patt },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_rop },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta1 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_beta4 },
-	{ 0x01a0, 0x01a0, nv04_gr_mthd_bind_surf2d_swzsurf },
-	{ 0x03e4, 0x03e4, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x018c: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0190: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0194: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x019c: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x01a0: func = nv04_gr_mthd_bind_surf2d_swzsurf; break;
+	case 0x03e4: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_ifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_ifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_sifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_sifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_sifm_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x0304, 0x0304, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x0304: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_sifm_omthds[] = {
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x0304, 0x0304, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x0304: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_surf3d_omthds[] = {
-	{ 0x02f8, 0x02f8, nv04_gr_mthd_surf3d_clip_h },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_surf3d_clip_v },
-	{}
-};
+static bool
+nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x02f8: func = nv04_gr_mthd_surf3d_clip_h; break;
+	case 0x02fc: func = nv04_gr_mthd_surf3d_clip_v; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_ttri_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_surf_color },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_surf_zeta },
-	{}
-};
+static bool
+nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_surf_color; break;
+	case 0x0190: func = nv04_gr_mthd_bind_surf_zeta; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_prim_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_prim_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
+
+static bool
+nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32, u32);
+	switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) {
+	case 0x1c ... 0x1e:
+		   func = nv01_gr_mthd_prim; break;
+	case 0x1f: func = nv01_gr_mthd_blit; break;
+	case 0x21: func = nv01_gr_mthd_ifc; break;
+	case 0x36: func = nv03_gr_mthd_sifc; break;
+	case 0x37: func = nv03_gr_mthd_sifm; break;
+	case 0x48: func = nv03_gr_mthd_ttri; break;
+	case 0x4a: func = nv04_gr_mthd_gdi; break;
+	case 0x4b: func = nv03_gr_mthd_gdi; break;
+	case 0x53: func = nv04_gr_mthd_surf3d; break;
+	case 0x5c ... 0x5e:
+		   func = nv04_gr_mthd_prim; break;
+	case 0x5f: func = nv04_gr_mthd_blit; break;
+	case 0x60: func = nv04_gr_mthd_iifc; break;
+	case 0x61: func = nv04_gr_mthd_ifc; break;
+	case 0x76: func = nv04_gr_mthd_sifc; break;
+	case 0x77: func = nv04_gr_mthd_sifm; break;
+	default:
+		return false;
+	}
+	return func(device, inst, mthd, data);
+}
 
 static int
-nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
 #ifdef __BIG_ENDIAN
-	nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+		nvkm_mo32(*pgpuobj, 0x08, 0x00080000, 0x00080000);
 #endif
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-struct nvkm_ofuncs
-nv04_gr_ofuncs = {
-	.ctor = nv04_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv04_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0017, &nv04_gr_ofuncs }, /* chroma */
-	{ 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x001c, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* line */
-	{ 0x001d, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* tri */
-	{ 0x001e, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* rect */
-	{ 0x001f, &nv04_gr_ofuncs, nv01_gr_blit_omthds },
-	{ 0x0021, &nv04_gr_ofuncs, nv01_gr_ifc_omthds },
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0036, &nv04_gr_ofuncs, nv03_gr_sifc_omthds },
-	{ 0x0037, &nv04_gr_ofuncs, nv03_gr_sifm_omthds },
-	{ 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0042, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x0048, &nv04_gr_ofuncs, nv03_gr_ttri_omthds },
-	{ 0x004a, &nv04_gr_ofuncs, nv04_gr_gdi_omthds },
-	{ 0x004b, &nv04_gr_ofuncs, nv03_gr_gdi_omthds },
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x0053, &nv04_gr_ofuncs, nv04_gr_surf3d_omthds },
-	{ 0x0054, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0055, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0057, &nv04_gr_ofuncs }, /* chroma */
-	{ 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
-	{ 0x0059, &nv04_gr_ofuncs }, /* surf_src */
-	{ 0x005a, &nv04_gr_ofuncs }, /* surf_color */
-	{ 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
-	{ 0x005c, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* line */
-	{ 0x005d, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* tri */
-	{ 0x005e, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* rect */
-	{ 0x005f, &nv04_gr_ofuncs, nv04_gr_blit_omthds },
-	{ 0x0060, &nv04_gr_ofuncs, nv04_gr_iifc_omthds },
-	{ 0x0061, &nv04_gr_ofuncs, nv04_gr_ifc_omthds },
-	{ 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
-	{ 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
-	{ 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0076, &nv04_gr_ofuncs, nv04_gr_sifc_omthds },
-	{ 0x0077, &nv04_gr_ofuncs, nv04_gr_sifm_omthds },
-	{},
+const struct nvkm_object_func
+nv04_gr_object = {
+	.bind = nv04_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -1032,13 +1069,14 @@
  ******************************************************************************/
 
 static struct nv04_gr_chan *
-nv04_gr_channel(struct nv04_gr_priv *priv)
+nv04_gr_channel(struct nv04_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv04_gr_chan *chan = NULL;
-	if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
-		int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
-		if (chid < ARRAY_SIZE(priv->chan))
-			chan = priv->chan[chid];
+	if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
+		int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24;
+		if (chid < ARRAY_SIZE(gr->chan))
+			chan = gr->chan[chid];
 	}
 	return chan;
 }
@@ -1046,55 +1084,52 @@
 static int
 nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
 {
-	struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
-		nv_wr32(priv, nv04_gr_ctx_regs[i], chan->nv04[i]);
+		nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
-	nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
 	return 0;
 }
 
 static int
 nv04_gr_unload_context(struct nv04_gr_chan *chan)
 {
-	struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
-		chan->nv04[i] = nv_rd32(priv, nv04_gr_ctx_regs[i]);
+		chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
 	return 0;
 }
 
 static void
-nv04_gr_context_switch(struct nv04_gr_priv *priv)
+nv04_gr_context_switch(struct nv04_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv04_gr_chan *prev = NULL;
 	struct nv04_gr_chan *next = NULL;
-	unsigned long flags;
 	int chid;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 
 	/* If previous context is valid, we need to save it */
-	prev = nv04_gr_channel(priv);
+	prev = nv04_gr_channel(gr);
 	if (prev)
 		nv04_gr_unload_context(prev);
 
 	/* load context for next channel */
-	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
-	next = priv->chan[chid];
+	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
+	next = gr->chan[chid];
 	if (next)
 		nv04_gr_load_context(next, chid);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
@@ -1109,98 +1144,85 @@
 	return NULL;
 }
 
-static int
-nv04_gr_context_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+static void *
+nv04_gr_chan_dtor(struct nvkm_object *object)
 {
-	struct nvkm_fifo_chan *fifo = (void *)parent;
-	struct nv04_gr_priv *priv = (void *)engine;
-	struct nv04_gr_chan *chan;
+	struct nv04_gr_chan *chan = nv04_gr_chan(object);
+	struct nv04_gr *gr = chan->gr;
 	unsigned long flags;
-	int ret;
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return chan;
+}
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->chan[fifo->chid]) {
-		*pobject = nv_object(priv->chan[fifo->chid]);
-		atomic_inc(&(*pobject)->refcount);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		return 1;
-	}
+static int
+nv04_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv04_gr_chan *chan = nv04_gr_chan(object);
+	struct nv04_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	unsigned long flags;
 
-	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-
-	priv->chan[fifo->chid] = chan;
-	chan->chid = fifo->chid;
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv04_gr_channel(gr) == chan)
+		nv04_gr_unload_context(chan);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
 	return 0;
 }
 
-static void
-nv04_gr_context_dtor(struct nvkm_object *object)
-{
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	struct nv04_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->chan[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_object_destroy(&chan->base);
-}
+static const struct nvkm_object_func
+nv04_gr_chan = {
+	.dtor = nv04_gr_chan_dtor,
+	.fini = nv04_gr_chan_fini,
+};
 
 static int
-nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	struct nv04_gr_chan *chan = (void *)object;
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nv04_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (nv04_gr_channel(priv) == chan)
-		nv04_gr_unload_context(chan);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv04_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
 
-	return nvkm_object_fini(&chan->base, suspend);
+	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = chan;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return 0;
 }
 
-static struct nvkm_oclass
-nv04_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_gr_context_ctor,
-		.dtor = nv04_gr_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nv04_gr_context_fini,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 bool
-nv04_gr_idle(void *obj)
+nv04_gr_idle(struct nvkm_gr *gr)
 {
-	struct nvkm_gr *gr = nvkm_gr(obj);
+	struct nvkm_subdev *subdev = &gr->engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 mask = 0xffffffff;
 
-	if (nv_device(obj)->card_type == NV_40)
+	if (device->card_type == NV_40)
 		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
 
-	if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) {
-		nv_error(gr, "idle timed out with status 0x%08x\n",
-			 nv_rd32(gr, NV04_PGRAPH_STATUS));
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "idle timed out with status %08x\n",
+			   nvkm_rd32(device, NV04_PGRAPH_STATUS));
 		return false;
 	}
 
@@ -1247,136 +1269,159 @@
 };
 
 static void
-nv04_gr_intr(struct nvkm_subdev *subdev)
+nv04_gr_intr(struct nvkm_gr *base)
 {
-	struct nv04_gr_priv *priv = (void *)subdev;
-	struct nv04_gr_chan *chan = NULL;
-	struct nvkm_namedb *namedb = NULL;
-	struct nvkm_handle *handle = NULL;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x0f000000) >> 24;
 	u32 subc = (addr & 0x0000e000) >> 13;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
-	u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff;
+	u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	struct nv04_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	chan = priv->chan[chid];
-	if (chan)
-		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	chan = gr->chan[chid];
 
 	if (stat & NV_PGRAPH_INTR_NOTIFY) {
 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
-			handle = nvkm_namedb_get_vinst(namedb, inst);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv04_gr_mthd(device, inst, mthd, data))
 				show &= ~NV_PGRAPH_INTR_NOTIFY;
 		}
 	}
 
 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		nv04_gr_context_switch(priv);
+		nv04_gr_context_switch(gr);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv04_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv04_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(chan), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv04_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv04_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_namedb_put(handle);
+	spin_unlock_irqrestore(&gr->lock, flags);
 }
 
 static int
-nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv04_gr_init(struct nvkm_gr *base)
 {
-	struct nv04_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv04_gr_intr;
-	nv_engine(priv)->cclass = &nv04_gr_cclass;
-	nv_engine(priv)->sclass = nv04_gr_sclass;
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static int
-nv04_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv04_gr_priv *priv = (void *)engine;
-	int ret;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
 	/* Enable PGRAPH interrupts */
-	nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
-	nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+	nvkm_wr32(device, NV04_PGRAPH_VALID1, 0);
+	nvkm_wr32(device, NV04_PGRAPH_VALID2, 0);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000);
 	/*1231C000 blob, 001 haiku*/
 	/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100);
 	/*0x72111100 blob , 01 haiku*/
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
 	/*haiku same*/
 
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
 	/*haiku and blob 10d4*/
 
-	nv_wr32(priv, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	nvkm_wr32(device, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
 
 	/* These don't belong here, they're part of a per-channel context */
-	nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
 	return 0;
 }
 
-struct nvkm_oclass
-nv04_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv04_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv04_gr = {
+	.init = nv04_gr_init,
+	.intr = nv04_gr_intr,
+	.chan_new = nv04_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0017, &nv04_gr_object }, /* chroma */
+		{ -1, -1, 0x0018, &nv04_gr_object }, /* pattern (nv01) */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x001c, &nv04_gr_object }, /* line */
+		{ -1, -1, 0x001d, &nv04_gr_object }, /* tri */
+		{ -1, -1, 0x001e, &nv04_gr_object }, /* rect */
+		{ -1, -1, 0x001f, &nv04_gr_object },
+		{ -1, -1, 0x0021, &nv04_gr_object },
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0036, &nv04_gr_object },
+		{ -1, -1, 0x0037, &nv04_gr_object },
+		{ -1, -1, 0x0038, &nv04_gr_object }, /* dvd subpicture */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0042, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x0048, &nv04_gr_object },
+		{ -1, -1, 0x004a, &nv04_gr_object },
+		{ -1, -1, 0x004b, &nv04_gr_object },
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x0053, &nv04_gr_object },
+		{ -1, -1, 0x0054, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0055, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0057, &nv04_gr_object }, /* chroma */
+		{ -1, -1, 0x0058, &nv04_gr_object }, /* surf_dst */
+		{ -1, -1, 0x0059, &nv04_gr_object }, /* surf_src */
+		{ -1, -1, 0x005a, &nv04_gr_object }, /* surf_color */
+		{ -1, -1, 0x005b, &nv04_gr_object }, /* surf_zeta */
+		{ -1, -1, 0x005c, &nv04_gr_object }, /* line */
+		{ -1, -1, 0x005d, &nv04_gr_object }, /* tri */
+		{ -1, -1, 0x005e, &nv04_gr_object }, /* rect */
+		{ -1, -1, 0x005f, &nv04_gr_object },
+		{ -1, -1, 0x0060, &nv04_gr_object },
+		{ -1, -1, 0x0061, &nv04_gr_object },
+		{ -1, -1, 0x0064, &nv04_gr_object }, /* iifc (nv05) */
+		{ -1, -1, 0x0065, &nv04_gr_object }, /* ifc (nv05) */
+		{ -1, -1, 0x0066, &nv04_gr_object }, /* sifc (nv05) */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0076, &nv04_gr_object },
+		{ -1, -1, 0x0077, &nv04_gr_object },
+		{}
+	}
 };
+
+int
+nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	struct nv04_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000,
+			    true, &gr->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index d6ace41..4542867 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -21,13 +21,13 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <engine/gr.h>
+#include "nv10.h"
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 
 struct pipe_state {
@@ -386,14 +386,19 @@
 	0x00400a04,
 };
 
-struct nv10_gr_priv {
+#define nv10_gr(p) container_of((p), struct nv10_gr, base)
+
+struct nv10_gr {
 	struct nvkm_gr base;
 	struct nv10_gr_chan *chan[32];
 	spinlock_t lock;
 };
 
+#define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
+
 struct nv10_gr_chan {
-	struct nvkm_object base;
+	struct nvkm_object object;
+	struct nv10_gr *gr;
 	int chid;
 	int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
 	int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
@@ -402,214 +407,151 @@
 };
 
 
-static inline struct nv10_gr_priv *
-nv10_gr_priv(struct nv10_gr_chan *chan)
-{
-	return (void *)nv_object(chan)->engine;
-}
-
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
 
-#define PIPE_SAVE(priv, state, addr)					\
+#define PIPE_SAVE(gr, state, addr)					\
 	do {								\
 		int __i;						\
-		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
+			state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
 	} while (0)
 
-#define PIPE_RESTORE(priv, state, addr)					\
+#define PIPE_RESTORE(gr, state, addr)					\
 	do {								\
 		int __i;						\
-		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+			nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
 	} while (0)
 
-static struct nvkm_oclass
-nv10_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0056, &nv04_gr_ofuncs }, /* celcius */
-	{},
-};
-
-static struct nvkm_oclass
-nv15_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0096, &nv04_gr_ofuncs }, /* celcius */
-	{},
-};
-
-static int
-nv17_gr_mthd_lma_window(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static void
+nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
 {
-	struct nv10_gr_chan *chan = (void *)object->parent;
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nvkm_device *device = chan->object.engine->subdev.device;
+	struct nvkm_gr *gr = &chan->gr->base;
 	struct pipe_state *pipe = &chan->pipe_state;
 	u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
 	u32 xfmode0, xfmode1;
-	u32 data = *(u32 *)args;
 	int i;
 
 	chan->lma_window[(mthd - 0x1638) / 4] = data;
 
 	if (mthd != 0x1644)
-		return 0;
+		return;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	PIPE_SAVE(priv, pipe_0x0040, 0x0040);
-	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(device, pipe_0x0040, 0x0040);
+	PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200);
 
-	PIPE_RESTORE(priv, chan->lma_window, 0x6790);
+	PIPE_RESTORE(device, chan->lma_window, 0x6790);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
 
-	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
-	PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
-	PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
+	PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(device, pipe_0x64c0, 0x64c0);
+	PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0);
+	PIPE_SAVE(device, pipe_0x6a80, 0x6a80);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
 
-	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+	PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
+	PIPE_RESTORE(device, pipe_0x0040, 0x0040);
 
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
 
-	PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
-	PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
-	PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
-	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_RESTORE(device, pipe_0x64c0, 0x64c0);
+	PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0);
+	PIPE_RESTORE(device, pipe_0x6a80, 0x6a80);
+	PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv04_gr_idle(priv);
-
-	return 0;
+	nv04_gr_idle(gr);
 }
 
-static int
-nv17_gr_mthd_lma_enable(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static void
+nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
 {
-	struct nv10_gr_chan *chan = (void *)object->parent;
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nvkm_device *device = chan->object.engine->subdev.device;
+	struct nvkm_gr *gr = &chan->gr->base;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
-	nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
-	return 0;
+	nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
+	nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
 }
 
-static struct nvkm_omthds
-nv17_celcius_omthds[] = {
-	{ 0x1638, 0x1638, nv17_gr_mthd_lma_window },
-	{ 0x163c, 0x163c, nv17_gr_mthd_lma_window },
-	{ 0x1640, 0x1640, nv17_gr_mthd_lma_window },
-	{ 0x1644, 0x1644, nv17_gr_mthd_lma_window },
-	{ 0x1658, 0x1658, nv17_gr_mthd_lma_enable },
-	{}
-};
+static bool
+nv17_gr_mthd_celcius(struct nv10_gr_chan *chan, u32 mthd, u32 data)
+{
+	void (*func)(struct nv10_gr_chan *, u32, u32);
+	switch (mthd) {
+	case 0x1638 ... 0x1644:
+		     func = nv17_gr_mthd_lma_window; break;
+	case 0x1658: func = nv17_gr_mthd_lma_enable; break;
+	default:
+		return false;
+	}
+	func(chan, mthd, data);
+	return true;
+}
 
-static struct nvkm_oclass
-nv17_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0099, &nv04_gr_ofuncs, nv17_celcius_omthds },
-	{},
-};
+static bool
+nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
+{
+	bool (*func)(struct nv10_gr_chan *, u32, u32);
+	switch (class) {
+	case 0x99: func = nv17_gr_mthd_celcius; break;
+	default:
+		return false;
+	}
+	return func(chan, mthd, data);
+}
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
 static struct nv10_gr_chan *
-nv10_gr_channel(struct nv10_gr_priv *priv)
+nv10_gr_channel(struct nv10_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv10_gr_chan *chan = NULL;
-	if (nv_rd32(priv, 0x400144) & 0x00010000) {
-		int chid = nv_rd32(priv, 0x400148) >> 24;
-		if (chid < ARRAY_SIZE(priv->chan))
-			chan = priv->chan[chid];
+	if (nvkm_rd32(device, 0x400144) & 0x00010000) {
+		int chid = nvkm_rd32(device, 0x400148) >> 24;
+		if (chid < ARRAY_SIZE(gr->chan))
+			chan = gr->chan[chid];
 	}
 	return chan;
 }
@@ -617,75 +559,78 @@
 static void
 nv10_gr_save_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
 	struct pipe_state *pipe = &chan->pipe_state;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
-	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
-	PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
-	PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
-	PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
-	PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
-	PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
-	PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
-	PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
+	PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
+	PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
+	PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
+	PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
+	PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
+	PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
+	PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
 }
 
 static void
 nv10_gr_load_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
 	struct pipe_state *pipe = &chan->pipe_state;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 xfmode0, xfmode1;
 	int i;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 	/* XXX check haiku comments */
-	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
 
 
-	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
-	nv04_gr_idle(priv);
+	PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
+	nv04_gr_idle(&gr->base);
 
 	/* restore XFMODE */
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
-	PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
-	PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
-	PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
-	PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
-	PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
-	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
-	PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
-	nv04_gr_idle(priv);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
+	PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
+	PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
+	PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
+	PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
+	PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
+	PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
+	PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
+	PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
+	nv04_gr_idle(&gr->base);
 }
 
 static void
 nv10_gr_create_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct pipe_state *pipe_state = &chan->pipe_state;
 	u32 *pipe_state_addr;
 	int i;
@@ -698,7 +643,7 @@
 		u32 *__end_addr = pipe_state->pipe_##addr + \
 				ARRAY_SIZE(pipe_state->pipe_##addr); \
 		if (pipe_state_addr != __end_addr) \
-			nv_error(priv, "incomplete pipe init for 0x%x :  %p/%p\n", \
+			nvkm_error(subdev, "incomplete pipe init for 0x%x :  %p/%p\n", \
 				addr, pipe_state_addr, __end_addr); \
 	} while (0)
 #define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
@@ -838,33 +783,36 @@
 }
 
 static int
-nv10_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	int i;
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
 		if (nv10_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nv_error(priv, "unknown offset nv10_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv10_ctx_regs %d\n", reg);
 	return -1;
 }
 
 static int
-nv17_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	int i;
 	for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
 		if (nv17_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nv_error(priv, "unknown offset nv17_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv17_ctx_regs %d\n", reg);
 	return -1;
 }
 
 static void
 nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
 	u32 ctx_user, ctx_switch[5];
 	int i, subchan = -1;
@@ -876,7 +824,7 @@
 
 	/* Look for a celsius object */
 	for (i = 0; i < 8; i++) {
-		int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+		int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
 
 		if (class == 0x56 || class == 0x96 || class == 0x99) {
 			subchan = i;
@@ -888,159 +836,183 @@
 		return;
 
 	/* Save the current ctx object */
-	ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
+	ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
 	for (i = 0; i < 5; i++)
-		ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
+		ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
 
 	/* Save the FIFO state */
-	st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
-	st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
-	st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
-	fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+	st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
+	st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
+	st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
+	fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
 
 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
+		fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
 
 	/* Switch to the celsius subchannel */
 	for (i = 0; i < 5; i++)
-		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
-			nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
+			nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
 
 	/* Inject NV10TCL_DMA_VTXBUF */
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
 		0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
-	nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+	nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
 
 	/* Restore the FIFO state */
 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
+		nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
 
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
 
 	/* Restore the current ctx object */
 	for (i = 0; i < 5; i++)
-		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
-	nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
+		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
 }
 
 static int
 nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 inst;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
-		nv_wr32(priv, nv10_gr_ctx_regs[i], chan->nv10[i]);
+		nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
-			nv_wr32(priv, nv17_gr_ctx_regs[i], chan->nv17[i]);
+			nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
 	}
 
 	nv10_gr_load_pipe(chan);
 
-	inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
+	inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
 	nv10_gr_load_dma_vtxbuf(chan, chid, inst);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
-	nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
 	return 0;
 }
 
 static int
 nv10_gr_unload_context(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
-		chan->nv10[i] = nv_rd32(priv, nv10_gr_ctx_regs[i]);
+		chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
-			chan->nv17[i] = nv_rd32(priv, nv17_gr_ctx_regs[i]);
+			chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
 	}
 
 	nv10_gr_save_pipe(chan);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
 	return 0;
 }
 
 static void
-nv10_gr_context_switch(struct nv10_gr_priv *priv)
+nv10_gr_context_switch(struct nv10_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv10_gr_chan *prev = NULL;
 	struct nv10_gr_chan *next = NULL;
-	unsigned long flags;
 	int chid;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 
 	/* If previous context is valid, we need to save it */
-	prev = nv10_gr_channel(priv);
+	prev = nv10_gr_channel(gr);
 	if (prev)
 		nv10_gr_unload_context(prev);
 
 	/* load context for next channel */
-	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	next = priv->chan[chid];
+	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+	next = gr->chan[chid];
 	if (next)
 		nv10_gr_load_context(next, chid);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
+static int
+nv10_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv10_gr_chan *chan = nv10_gr_chan(object);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv10_gr_channel(gr) == chan)
+		nv10_gr_unload_context(chan);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return 0;
+}
+
+static void *
+nv10_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv10_gr_chan *chan = nv10_gr_chan(object);
+	struct nv10_gr *gr = chan->gr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv10_gr_chan = {
+	.dtor = nv10_gr_chan_dtor,
+	.fini = nv10_gr_chan_fini,
+};
+
 #define NV_WRITE_CTX(reg, val) do { \
-	int offset = nv10_gr_ctx_regs_find_offset(priv, reg); \
+	int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
 	if (offset > 0) \
 		chan->nv10[offset] = val; \
 	} while (0)
 
 #define NV17_WRITE_CTX(reg, val) do { \
-	int offset = nv17_gr_ctx_regs_find_offset(priv, reg); \
+	int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
 	if (offset > 0) \
 		chan->nv17[offset] = val; \
 	} while (0)
 
-static int
-nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+int
+nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nvkm_fifo_chan *fifo = (void *)parent;
-	struct nv10_gr_priv *priv = (void *)engine;
+	struct nv10_gr *gr = nv10_gr(base);
 	struct nv10_gr_chan *chan;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	unsigned long flags;
-	int ret;
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->chan[fifo->chid]) {
-		*pobject = nv_object(priv->chan[fifo->chid]);
-		atomic_inc(&(*pobject)->refcount);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		return 1;
-	}
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv10_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
 
 	NV_WRITE_CTX(0x00400e88, 0x08000000);
 	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -1049,12 +1021,11 @@
 	NV_WRITE_CTX(0x00400e14, 0x00001000);
 	NV_WRITE_CTX(0x00400e30, 0x00080008);
 	NV_WRITE_CTX(0x00400e34, 0x00080008);
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		/* is it really needed ??? */
 		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
-					nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
-		NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
+			       nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
+		NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
 		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
 		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
 		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
@@ -1064,74 +1035,32 @@
 
 	nv10_gr_create_pipe(chan);
 
-	priv->chan[fifo->chid] = chan;
-	chan->chid = fifo->chid;
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = chan;
+	spin_unlock_irqrestore(&gr->lock, flags);
 	return 0;
 }
 
-static void
-nv10_gr_context_dtor(struct nvkm_object *object)
-{
-	struct nv10_gr_priv *priv = (void *)object->engine;
-	struct nv10_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->chan[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_object_destroy(&chan->base);
-}
-
-static int
-nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv10_gr_priv *priv = (void *)object->engine;
-	struct nv10_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (nv10_gr_channel(priv) == chan)
-		nv10_gr_unload_context(chan);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	return nvkm_object_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv10_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_gr_context_ctor,
-		.dtor = nv10_gr_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nv10_gr_context_fini,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static void
-nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
+void
+nv10_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv10_gr_priv *priv = (void *)engine;
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
+	nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
 const struct nvkm_bitfield nv10_gr_intr_name[] = {
@@ -1148,168 +1077,145 @@
 	{}
 };
 
-static void
-nv10_gr_intr(struct nvkm_subdev *subdev)
+void
+nv10_gr_intr(struct nvkm_gr *base)
 {
-	struct nv10_gr_priv *priv = (void *)subdev;
-	struct nv10_gr_chan *chan = NULL;
-	struct nvkm_namedb *namedb = NULL;
-	struct nvkm_handle *handle = NULL;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x01f00000) >> 20;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	struct nv10_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	chan = priv->chan[chid];
-	if (chan)
-		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	chan = gr->chan[chid];
 
 	if (stat & NV_PGRAPH_INTR_ERROR) {
 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
-			handle = nvkm_namedb_get_class(namedb, class);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv10_gr_mthd(chan, class, mthd, data))
 				show &= ~NV_PGRAPH_INTR_ERROR;
 		}
 	}
 
 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		nv10_gr_context_switch(priv);
+		nv10_gr_context_switch(gr);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(chan), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_namedb_put(handle);
+	spin_unlock_irqrestore(&gr->lock, flags);
 }
 
-static int
-nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv10_gr_init(struct nvkm_gr *base)
 {
-	struct nv10_gr_priv *priv;
-	int ret;
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv10_gr_intr;
-	nv_engine(priv)->cclass = &nv10_gr_cclass;
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	/* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
 
-	if (nv_device(priv)->chipset <= 0x10)
-		nv_engine(priv)->sclass = nv10_gr_sclass;
-	else
-	if (nv_device(priv)->chipset <  0x17 ||
-	    nv_device(priv)->card_type < NV_11)
-		nv_engine(priv)->sclass = nv15_gr_sclass;
-	else
-		nv_engine(priv)->sclass = nv17_gr_sclass;
-
-	nv_engine(priv)->tile_prog = nv10_gr_tile_prog;
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static void
-nv10_gr_dtor(struct nvkm_object *object)
-{
-	struct nv10_gr_priv *priv = (void *)object;
-	nvkm_gr_destroy(&priv->base);
-}
-
-static int
-nv10_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nv10_gr_priv *priv = (void *)engine;
-	int ret, i;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	/* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
-
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
-		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
-		nv_wr32(priv, 0x400a10, 0x03ff3fb6);
-		nv_wr32(priv, 0x400838, 0x002f8684);
-		nv_wr32(priv, 0x40083c, 0x00115f3f);
-		nv_wr32(priv, 0x4006b0, 0x40000020);
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
+		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+		nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
+		nvkm_wr32(device, 0x400838, 0x002f8684);
+		nvkm_wr32(device, 0x40083c, 0x00115f3f);
+		nvkm_wr32(device, 0x4006b0, 0x40000020);
 	} else {
-		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
-
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
 	return 0;
 }
 
-static int
-nv10_gr_fini(struct nvkm_object *object, bool suspend)
+int
+nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
 {
-	struct nv10_gr_priv *priv = (void *)object;
-	return nvkm_gr_fini(&priv->base, suspend);
+	struct nv10_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
 }
 
-struct nvkm_oclass
-nv10_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_gr_ctor,
-		.dtor = nv10_gr_dtor,
-		.init = nv10_gr_init,
-		.fini = nv10_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv10_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0056, &nv04_gr_object }, /* celcius */
+		{}
+	}
 };
+
+int
+nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv10_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
new file mode 100644
index 0000000..d7c3d86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -0,0 +1,13 @@
+#ifndef __NV10_GR_H__
+#define __NV10_GR_H__
+#include "priv.h"
+
+int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv10_gr_init(struct nvkm_gr *);
+void nv10_gr_intr(struct nvkm_gr *);
+void nv10_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
+
+int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
new file mode 100644
index 0000000..3e2c685
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv15_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{}
+	}
+};
+
+int
+nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv15_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
new file mode 100644
index 0000000..12437d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv17_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0099, &nv04_gr_object },
+		{}
+	}
+};
+
+int
+nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv17_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 1713ffb..5caef65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -2,375 +2,374 @@
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv20_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
-	{ 0x0097, &nv04_gr_ofuncs, NULL }, /* kelvin */
-	{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{},
-};
-
-/*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
-static int
-nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+int
+nv20_gr_chan_init(struct nvkm_object *object)
 {
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	struct nv20_gr *gr = chan->gr;
+	u32 inst = nvkm_memory_addr(chan->inst);
+
+	nvkm_kmap(gr->ctxtab);
+	nvkm_wo32(gr->ctxtab, chan->chid * 4, inst >> 4);
+	nvkm_done(gr->ctxtab);
+	return 0;
+}
+
+int
+nv20_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	struct nv20_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 inst = nvkm_memory_addr(chan->inst);
+	int chid = -1;
+
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
+	if (nvkm_rd32(device, 0x400144) & 0x00010000)
+		chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24;
+	if (chan->chid == chid) {
+		nvkm_wr32(device, 0x400784, inst >> 4);
+		nvkm_wr32(device, 0x400788, 0x00000002);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
+		nvkm_wr32(device, 0x400144, 0x10000000);
+		nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
+	}
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
+
+	nvkm_kmap(gr->ctxtab);
+	nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
+	nvkm_done(gr->ctxtab);
+	return 0;
+}
+
+void *
+nv20_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	nvkm_memory_del(&chan->inst);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv20_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
+static int
+nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv20_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x37f0, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x033c, 0xffff0000);
-	nv_wo32(chan, 0x03a0, 0x0fff0000);
-	nv_wo32(chan, 0x03a4, 0x0fff0000);
-	nv_wo32(chan, 0x047c, 0x00000101);
-	nv_wo32(chan, 0x0490, 0x00000111);
-	nv_wo32(chan, 0x04a8, 0x44400000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
 	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05a4, 0x4b7fffff);
-	nv_wo32(chan, 0x05fc, 0x00000001);
-	nv_wo32(chan, 0x0604, 0x00004000);
-	nv_wo32(chan, 0x0610, 0x00000001);
-	nv_wo32(chan, 0x0618, 0x00040000);
-	nv_wo32(chan, 0x061c, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+	nvkm_wo32(chan->inst, 0x061c, 0x00010000);
 	for (i = 0x1c1c; i <= 0x248c; i += 16) {
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x281c, 0x3f800000);
-	nv_wo32(chan, 0x2830, 0x3f800000);
-	nv_wo32(chan, 0x285c, 0x40000000);
-	nv_wo32(chan, 0x2860, 0x3f800000);
-	nv_wo32(chan, 0x2864, 0x3f000000);
-	nv_wo32(chan, 0x286c, 0x40000000);
-	nv_wo32(chan, 0x2870, 0x3f800000);
-	nv_wo32(chan, 0x2878, 0xbf800000);
-	nv_wo32(chan, 0x2880, 0xbf800000);
-	nv_wo32(chan, 0x34a4, 0x000fe000);
-	nv_wo32(chan, 0x3530, 0x000003f8);
-	nv_wo32(chan, 0x3540, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x281c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2830, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x285c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2860, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2864, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x286c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2870, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2878, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2880, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x34a4, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x3530, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x3540, 0x002fe000);
 	for (i = 0x355c; i <= 0x3578; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-int
-nv20_gr_context_init(struct nvkm_object *object)
-{
-	struct nv20_gr_priv *priv = (void *)object->engine;
-	struct nv20_gr_chan *chan = (void *)object;
-	int ret;
-
-	ret = nvkm_gr_context_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
-	return 0;
-}
-
-int
-nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv20_gr_priv *priv = (void *)object->engine;
-	struct nv20_gr_chan *chan = (void *)object;
-	int chid = -1;
-
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x400144) & 0x00010000)
-		chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
-	if (chan->chid == chid) {
-		nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
-		nv_wr32(priv, 0x400788, 0x00000002);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
-		nv_wr32(priv, 0x400144, 0x10000000);
-		nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
-	}
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
-
-	nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
-	return nvkm_gr_context_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv20_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x20),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 void
-nv20_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv20_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv20_gr_priv *priv = (void *)engine;
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+	nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
 
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->limit);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->pitch);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->addr);
 
-	if (nv_device(engine)->chipset != 0x34) {
-		nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	if (device->chipset != 0x34) {
+		nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->zcomp);
 	}
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
 void
-nv20_gr_intr(struct nvkm_subdev *subdev)
+nv20_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct nv20_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x01f00000) >> 20;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	unsigned long flags;
 
-	engctx = nvkm_engctx_get(engine, chid);
-	if (stat & NV_PGRAPH_INTR_ERROR) {
-		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-			handle = nvkm_handle_get_class(engctx, class);
-			if (handle && !nv_call(handle->object, mthd, data))
-				show &= ~NV_PGRAPH_INTR_ERROR;
-			nvkm_handle_put(handle);
-		}
-	}
+	chan = nvkm_fifo_chan_chid(device->fifo, chid, &flags);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(engctx), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
-}
-
-static int
-nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv20_gr_cclass;
-	nv_engine(priv)->sclass = nv20_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-void
-nv20_gr_dtor(struct nvkm_object *object)
-{
-	struct nv20_gr_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ctxtab);
-	nvkm_gr_destroy(&priv->base);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
 int
-nv20_gr_init(struct nvkm_object *object)
+nv20_gr_oneinit(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv20_gr_priv *priv = (void *)engine;
-	struct nvkm_fb *pfb = nvkm_fb(object);
+	struct nv20_gr *gr = nv20_gr(base);
+	return nvkm_memory_new(gr->base.engine.subdev.device,
+			       NVKM_MEM_TARGET_INST, 32 * 4, 16,
+			       true, &gr->ctxtab);
+}
+
+int
+nv20_gr_init(struct nvkm_gr *base)
+{
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 tmp, vramsz;
-	int ret, i;
+	int i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+			  nvkm_memory_addr(gr->ctxtab) >> 4);
 
-	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
-
-	if (nv_device(priv)->chipset == 0x20) {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
+	if (device->chipset == 0x20) {
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
 		for (i = 0; i < 15; i++)
-			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+			nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
 	} else {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
 		for (i = 0; i < 32; i++)
-			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+			nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
-	nv_wr32(priv, 0x40009C           , 0x00000040);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	nvkm_wr32(device, 0x40009C           , 0x00000040);
 
-	if (nv_device(priv)->chipset >= 0x25) {
-		nv_wr32(priv, 0x400890, 0x00a8cfff);
-		nv_wr32(priv, 0x400610, 0x304B1FB6);
-		nv_wr32(priv, 0x400B80, 0x1cbd3883);
-		nv_wr32(priv, 0x400B84, 0x44000000);
-		nv_wr32(priv, 0x400098, 0x40000080);
-		nv_wr32(priv, 0x400B88, 0x000000ff);
+	if (device->chipset >= 0x25) {
+		nvkm_wr32(device, 0x400890, 0x00a8cfff);
+		nvkm_wr32(device, 0x400610, 0x304B1FB6);
+		nvkm_wr32(device, 0x400B80, 0x1cbd3883);
+		nvkm_wr32(device, 0x400B84, 0x44000000);
+		nvkm_wr32(device, 0x400098, 0x40000080);
+		nvkm_wr32(device, 0x400B88, 0x000000ff);
 
 	} else {
-		nv_wr32(priv, 0x400880, 0x0008c7df);
-		nv_wr32(priv, 0x400094, 0x00000005);
-		nv_wr32(priv, 0x400B80, 0x45eae20e);
-		nv_wr32(priv, 0x400B84, 0x24000000);
-		nv_wr32(priv, 0x400098, 0x00000040);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nvkm_wr32(device, 0x400880, 0x0008c7df);
+		nvkm_wr32(device, 0x400094, 0x00000005);
+		nvkm_wr32(device, 0x400B80, 0x45eae20e);
+		nvkm_wr32(device, 0x400B84, 0x24000000);
+		nvkm_wr32(device, 0x400098, 0x00000040);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, 0x4009a0, nvkm_rd32(device, 0x100324));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, nvkm_rd32(device, 0x100324));
 
-	nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-
-	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
-	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
-	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
-	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+	nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) | 0x00020100;
+	nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
 
 	/* begin RAM config */
-	vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
-	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
-	nv_wr32(priv, 0x400820, 0);
-	nv_wr32(priv, 0x400824, 0);
-	nv_wr32(priv, 0x400864, vramsz - 1);
-	nv_wr32(priv, 0x400868, vramsz - 1);
+	vramsz = device->func->resource_size(device, 1) - 1;
+	nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100204));
+	nvkm_wr32(device, 0x400820, 0);
+	nvkm_wr32(device, 0x400824, 0);
+	nvkm_wr32(device, 0x400864, vramsz - 1);
+	nvkm_wr32(device, 0x400868, vramsz - 1);
 
 	/* interesting.. the below overwrites some of the tile setup above.. */
-	nv_wr32(priv, 0x400B20, 0x00000000);
-	nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
+	nvkm_wr32(device, 0x400B20, 0x00000000);
+	nvkm_wr32(device, 0x400B04, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
 	return 0;
 }
 
-struct nvkm_oclass
-nv20_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x20),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+void *
+nv20_gr_dtor(struct nvkm_gr *base)
+{
+	struct nv20_gr *gr = nv20_gr(base);
+	nvkm_memory_del(&gr->ctxtab);
+	return gr;
+}
+
+int
+nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv20_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv20_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv20_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x0097, &nv04_gr_object }, /* kelvin */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{}
+	}
 };
+
+int
+nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv20_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index ac4dc04..cdf4501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,26 +1,33 @@
 #ifndef __NV20_GR_H__
 #define __NV20_GR_H__
-#include <engine/gr.h>
+#define nv20_gr(p) container_of((p), struct nv20_gr, base)
+#include "priv.h"
 
-struct nv20_gr_priv {
+struct nv20_gr {
 	struct nvkm_gr base;
-	struct nvkm_gpuobj *ctxtab;
+	struct nvkm_memory *ctxtab;
 };
 
+int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
+		 int, struct nvkm_gr **);
+void *nv20_gr_dtor(struct nvkm_gr *);
+int nv20_gr_oneinit(struct nvkm_gr *);
+int nv20_gr_init(struct nvkm_gr *);
+void nv20_gr_intr(struct nvkm_gr *);
+void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
+
+int nv30_gr_init(struct nvkm_gr *);
+
+#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
+
 struct nv20_gr_chan {
-	struct nvkm_gr_chan base;
+	struct nvkm_object object;
+	struct nv20_gr *gr;
 	int chid;
+	struct nvkm_memory *inst;
 };
 
-extern struct nvkm_oclass nv25_gr_sclass[];
-int  nv20_gr_context_init(struct nvkm_object *);
-int  nv20_gr_context_fini(struct nvkm_object *, bool);
-
-void nv20_gr_tile_prog(struct nvkm_engine *, int);
-void nv20_gr_intr(struct nvkm_subdev *);
-
-void nv20_gr_dtor(struct nvkm_object *);
-int  nv20_gr_init(struct nvkm_object *);
-
-int  nv30_gr_init(struct nvkm_object *);
+void *nv20_gr_chan_dtor(struct nvkm_object *);
+int nv20_gr_chan_init(struct nvkm_object *);
+int nv20_gr_chan_fini(struct nvkm_object *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index bc36251..6c4a008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,158 +1,134 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-
-/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-nv25_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
-	{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
-	{},
-};
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv25_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x3724, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x035c, 0xffff0000);
-	nv_wo32(chan, 0x03c0, 0x0fff0000);
-	nv_wo32(chan, 0x03c4, 0x0fff0000);
-	nv_wo32(chan, 0x049c, 0x00000101);
-	nv_wo32(chan, 0x04b0, 0x00000111);
-	nv_wo32(chan, 0x04c8, 0x00000080);
-	nv_wo32(chan, 0x04cc, 0xffff0000);
-	nv_wo32(chan, 0x04d0, 0x00000001);
-	nv_wo32(chan, 0x04e4, 0x44400000);
-	nv_wo32(chan, 0x04fc, 0x4b800000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x049c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
+	nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
+	nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
+	nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
 	for (i = 0x0510; i <= 0x051c; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x0530; i <= 0x053c; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x0548; i <= 0x0554; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0558; i <= 0x0564; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x0568; i <= 0x0574; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x0598; i <= 0x05d4; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05e0, 0x4b7fffff);
-	nv_wo32(chan, 0x0620, 0x00000080);
-	nv_wo32(chan, 0x0624, 0x30201000);
-	nv_wo32(chan, 0x0628, 0x70605040);
-	nv_wo32(chan, 0x062c, 0xb0a09080);
-	nv_wo32(chan, 0x0630, 0xf0e0d0c0);
-	nv_wo32(chan, 0x0664, 0x00000001);
-	nv_wo32(chan, 0x066c, 0x00004000);
-	nv_wo32(chan, 0x0678, 0x00000001);
-	nv_wo32(chan, 0x0680, 0x00040000);
-	nv_wo32(chan, 0x0684, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x0620, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0624, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0628, 0x70605040);
+	nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
+	nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
+	nvkm_wo32(chan->inst, 0x0664, 0x00000001);
+	nvkm_wo32(chan->inst, 0x066c, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0678, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0680, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0684, 0x00010000);
 	for (i = 0x1b04; i <= 0x2374; i += 16) {
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x2704, 0x3f800000);
-	nv_wo32(chan, 0x2718, 0x3f800000);
-	nv_wo32(chan, 0x2744, 0x40000000);
-	nv_wo32(chan, 0x2748, 0x3f800000);
-	nv_wo32(chan, 0x274c, 0x3f000000);
-	nv_wo32(chan, 0x2754, 0x40000000);
-	nv_wo32(chan, 0x2758, 0x3f800000);
-	nv_wo32(chan, 0x2760, 0xbf800000);
-	nv_wo32(chan, 0x2768, 0xbf800000);
-	nv_wo32(chan, 0x308c, 0x000fe000);
-	nv_wo32(chan, 0x3108, 0x000003f8);
-	nv_wo32(chan, 0x3468, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2744, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x2754, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
 	for (i = 0x3484; i <= 0x34a0; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv25_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x25),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv25_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv25_gr_cclass;
-	nv_engine(priv)->sclass = nv25_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv25_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x25),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv25_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv25_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv25_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+		{}
+	}
 };
+
+int
+nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv25_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 22a5096..3cad26d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,125 +1,125 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv2a_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv2a_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x36b0, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x033c, 0xffff0000);
-	nv_wo32(chan, 0x03a0, 0x0fff0000);
-	nv_wo32(chan, 0x03a4, 0x0fff0000);
-	nv_wo32(chan, 0x047c, 0x00000101);
-	nv_wo32(chan, 0x0490, 0x00000111);
-	nv_wo32(chan, 0x04a8, 0x44400000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
 	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05a4, 0x4b7fffff);
-	nv_wo32(chan, 0x05fc, 0x00000001);
-	nv_wo32(chan, 0x0604, 0x00004000);
-	nv_wo32(chan, 0x0610, 0x00000001);
-	nv_wo32(chan, 0x0618, 0x00040000);
-	nv_wo32(chan, 0x061c, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+	nvkm_wo32(chan->inst, 0x061c, 0x00010000);
 	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x269c, 0x3f800000);
-	nv_wo32(chan, 0x26b0, 0x3f800000);
-	nv_wo32(chan, 0x26dc, 0x40000000);
-	nv_wo32(chan, 0x26e0, 0x3f800000);
-	nv_wo32(chan, 0x26e4, 0x3f000000);
-	nv_wo32(chan, 0x26ec, 0x40000000);
-	nv_wo32(chan, 0x26f0, 0x3f800000);
-	nv_wo32(chan, 0x26f8, 0xbf800000);
-	nv_wo32(chan, 0x2700, 0xbf800000);
-	nv_wo32(chan, 0x3024, 0x000fe000);
-	nv_wo32(chan, 0x30a0, 0x000003f8);
-	nv_wo32(chan, 0x33fc, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x269c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26b0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26dc, 0x40000000);
+	nvkm_wo32(chan->inst, 0x26e0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26e4, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x26ec, 0x40000000);
+	nvkm_wo32(chan->inst, 0x26f0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26f8, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2700, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x3024, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x30a0, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x33fc, 0x002fe000);
 	for (i = 0x341c; i <= 0x3438; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv2a_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x2a),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv2a_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv2a_gr_cclass;
-	nv_engine(priv)->sclass = nv25_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv2a_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x2a),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv2a_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv2a_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv2a_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+		{}
+	}
 };
+
+int
+nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv2a_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index dcc84eb..69de8c62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,231 +1,198 @@
 #include "nv20.h"
 #include "regs.h"
 
-#include <core/device.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv30_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
-};
-
-/*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv30_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x5f48, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x0410, 0x00000101);
-	nv_wo32(chan, 0x0424, 0x00000111);
-	nv_wo32(chan, 0x0428, 0x00000060);
-	nv_wo32(chan, 0x0444, 0x00000080);
-	nv_wo32(chan, 0x0448, 0xffff0000);
-	nv_wo32(chan, 0x044c, 0x00000001);
-	nv_wo32(chan, 0x0460, 0x44400000);
-	nv_wo32(chan, 0x048c, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x0410, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0428, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0444, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0448, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x044c, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0460, 0x44400000);
+	nvkm_wo32(chan->inst, 0x048c, 0xffff0000);
 	for (i = 0x04e0; i < 0x04e8; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04ec, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04ec, 0x00011100);
 	for (i = 0x0508; i < 0x0548; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x0550, 0x4b7fffff);
-	nv_wo32(chan, 0x058c, 0x00000080);
-	nv_wo32(chan, 0x0590, 0x30201000);
-	nv_wo32(chan, 0x0594, 0x70605040);
-	nv_wo32(chan, 0x0598, 0xb8a89888);
-	nv_wo32(chan, 0x059c, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05b0, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x058c, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0590, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0594, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0598, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05b0, 0xb0000000);
 	for (i = 0x0600; i < 0x0640; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0640; i < 0x0680; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06c0; i < 0x0700; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x0700; i < 0x0740; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0740; i < 0x0780; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x085c, 0x00040000);
-	nv_wo32(chan, 0x0860, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x085c, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0860, 0x00010000);
 	for (i = 0x0864; i < 0x0874; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 1, 0x0436086c);
-		nv_wo32(chan, i + 2, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
 	}
 	for (i = 0x30b8; i < 0x30c8; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x344c, 0x3f800000);
-	nv_wo32(chan, 0x3808, 0x3f800000);
-	nv_wo32(chan, 0x381c, 0x3f800000);
-	nv_wo32(chan, 0x3848, 0x40000000);
-	nv_wo32(chan, 0x384c, 0x3f800000);
-	nv_wo32(chan, 0x3850, 0x3f000000);
-	nv_wo32(chan, 0x3858, 0x40000000);
-	nv_wo32(chan, 0x385c, 0x3f800000);
-	nv_wo32(chan, 0x3864, 0xbf800000);
-	nv_wo32(chan, 0x386c, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x344c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3808, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x381c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3848, 0x40000000);
+	nvkm_wo32(chan->inst, 0x384c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3850, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x3858, 0x40000000);
+	nvkm_wo32(chan->inst, 0x385c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3864, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x386c, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv30_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x30),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv30_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv30_gr_cclass;
-	nv_engine(priv)->sclass = nv30_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
 int
-nv30_gr_init(struct nvkm_object *object)
+nv30_gr_init(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv20_gr_priv *priv = (void *)engine;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret, i;
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+			  nvkm_memory_addr(gr->ctxtab) >> 4);
 
-	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nvkm_wr32(device, 0x400890, 0x01b463ff);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+	nvkm_wr32(device, 0x400B80, 0x1003d888);
+	nvkm_wr32(device, 0x400B84, 0x0c000000);
+	nvkm_wr32(device, 0x400098, 0x00000000);
+	nvkm_wr32(device, 0x40009C, 0x0005ad00);
+	nvkm_wr32(device, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+	nvkm_wr32(device, 0x4000a0, 0x00000000);
+	nvkm_wr32(device, 0x4000a4, 0x00000008);
+	nvkm_wr32(device, 0x4008a8, 0xb784a400);
+	nvkm_wr32(device, 0x400ba0, 0x002f8685);
+	nvkm_wr32(device, 0x400ba4, 0x00231f3f);
+	nvkm_wr32(device, 0x4008a4, 0x40000020);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(priv, 0x400890, 0x01b463ff);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
-	nv_wr32(priv, 0x400B80, 0x1003d888);
-	nv_wr32(priv, 0x400B84, 0x0c000000);
-	nv_wr32(priv, 0x400098, 0x00000000);
-	nv_wr32(priv, 0x40009C, 0x0005ad00);
-	nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
-	nv_wr32(priv, 0x4000a0, 0x00000000);
-	nv_wr32(priv, 0x4000a4, 0x00000008);
-	nv_wr32(priv, 0x4008a8, 0xb784a400);
-	nv_wr32(priv, 0x400ba0, 0x002f8685);
-	nv_wr32(priv, 0x400ba4, 0x00231f3f);
-	nv_wr32(priv, 0x4008a4, 0x40000020);
-
-	if (nv_device(priv)->chipset == 0x34) {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
+	if (device->chipset == 0x34) {
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00200201);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000008);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000032);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000002);
 	}
 
-	nv_wr32(priv, 0x4000c0, 0x00000016);
+	nvkm_wr32(device, 0x4000c0, 0x00000016);
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
-
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-	nv_wr32(priv, 0x0040075c             , 0x00000001);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nvkm_wr32(device, 0x0040075c             , 0x00000001);
 
 	/* begin RAM config */
-	/* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
-	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-	if (nv_device(priv)->chipset != 0x34) {
-		nv_wr32(priv, 0x400750, 0x00EA0000);
-		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x400750, 0x00EA0004);
-		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
+	/* vramsz = pci_resource_len(gr->dev->pdev, 1) - 1; */
+	nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+	if (device->chipset != 0x34) {
+		nvkm_wr32(device, 0x400750, 0x00EA0000);
+		nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x400750, 0x00EA0004);
+		nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100204));
 	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-nv30_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x30),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv30_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv30_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv30_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+		{}
+	}
 };
+
+int
+nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv30_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 985b7f3..2207dac2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,159 +1,135 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-
-/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv34_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
-};
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv34_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x46dc, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x040c, 0x01000101);
-	nv_wo32(chan, 0x0420, 0x00000111);
-	nv_wo32(chan, 0x0424, 0x00000060);
-	nv_wo32(chan, 0x0440, 0x00000080);
-	nv_wo32(chan, 0x0444, 0xffff0000);
-	nv_wo32(chan, 0x0448, 0x00000001);
-	nv_wo32(chan, 0x045c, 0x44400000);
-	nv_wo32(chan, 0x0480, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x040c, 0x01000101);
+	nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+	nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+	nvkm_wo32(chan->inst, 0x0480, 0xffff0000);
 	for (i = 0x04d4; i < 0x04dc; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04e0, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04e0, 0x00011100);
 	for (i = 0x04fc; i < 0x053c; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x0544, 0x4b7fffff);
-	nv_wo32(chan, 0x057c, 0x00000080);
-	nv_wo32(chan, 0x0580, 0x30201000);
-	nv_wo32(chan, 0x0584, 0x70605040);
-	nv_wo32(chan, 0x0588, 0xb8a89888);
-	nv_wo32(chan, 0x058c, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05a0, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x057c, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0580, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0584, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0588, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05a0, 0xb0000000);
 	for (i = 0x05f0; i < 0x0630; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0630; i < 0x0670; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06b0; i < 0x06f0; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x06f0; i < 0x0730; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0730; i < 0x0770; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x0850, 0x00040000);
-	nv_wo32(chan, 0x0854, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x0850, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0854, 0x00010000);
 	for (i = 0x0858; i < 0x0868; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x15ac; i <= 0x271c ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 1, 0x0436086c);
-		nv_wo32(chan, i + 2, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
 	}
 	for (i = 0x274c; i < 0x275c; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x2ae0, 0x3f800000);
-	nv_wo32(chan, 0x2e9c, 0x3f800000);
-	nv_wo32(chan, 0x2eb0, 0x3f800000);
-	nv_wo32(chan, 0x2edc, 0x40000000);
-	nv_wo32(chan, 0x2ee0, 0x3f800000);
-	nv_wo32(chan, 0x2ee4, 0x3f000000);
-	nv_wo32(chan, 0x2eec, 0x40000000);
-	nv_wo32(chan, 0x2ef0, 0x3f800000);
-	nv_wo32(chan, 0x2ef8, 0xbf800000);
-	nv_wo32(chan, 0x2f00, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2edc, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x2eec, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2f00, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv34_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x34),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv34_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv34_gr_cclass;
-	nv_engine(priv)->sclass = nv34_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv34_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x34),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv34_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv34_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv34_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
+		{}
+	}
 };
+
+int
+nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv34_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 707625f..740df0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,159 +1,135 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-
-/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv35_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
-};
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv35_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv35_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x577c, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x040c, 0x00000101);
-	nv_wo32(chan, 0x0420, 0x00000111);
-	nv_wo32(chan, 0x0424, 0x00000060);
-	nv_wo32(chan, 0x0440, 0x00000080);
-	nv_wo32(chan, 0x0444, 0xffff0000);
-	nv_wo32(chan, 0x0448, 0x00000001);
-	nv_wo32(chan, 0x045c, 0x44400000);
-	nv_wo32(chan, 0x0488, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x040c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+	nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+	nvkm_wo32(chan->inst, 0x0488, 0xffff0000);
 	for (i = 0x04dc; i < 0x04e4; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04e8, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04e8, 0x00011100);
 	for (i = 0x0504; i < 0x0544; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x054c, 0x4b7fffff);
-	nv_wo32(chan, 0x0588, 0x00000080);
-	nv_wo32(chan, 0x058c, 0x30201000);
-	nv_wo32(chan, 0x0590, 0x70605040);
-	nv_wo32(chan, 0x0594, 0xb8a89888);
-	nv_wo32(chan, 0x0598, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05ac, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x054c, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x0588, 0x00000080);
+	nvkm_wo32(chan->inst, 0x058c, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0590, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0594, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x0598, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05ac, 0xb0000000);
 	for (i = 0x0604; i < 0x0644; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0644; i < 0x0684; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06c4; i < 0x0704; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x0704; i < 0x0744; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0744; i < 0x0784; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x0860, 0x00040000);
-	nv_wo32(chan, 0x0864, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x0860, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0864, 0x00010000);
 	for (i = 0x0868; i < 0x0878; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 4, 0x0436086c);
-		nv_wo32(chan, i + 8, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 8, 0x000c001b);
 	}
 	for (i = 0x30bc; i < 0x30cc; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x3450, 0x3f800000);
-	nv_wo32(chan, 0x380c, 0x3f800000);
-	nv_wo32(chan, 0x3820, 0x3f800000);
-	nv_wo32(chan, 0x384c, 0x40000000);
-	nv_wo32(chan, 0x3850, 0x3f800000);
-	nv_wo32(chan, 0x3854, 0x3f000000);
-	nv_wo32(chan, 0x385c, 0x40000000);
-	nv_wo32(chan, 0x3860, 0x3f800000);
-	nv_wo32(chan, 0x3868, 0xbf800000);
-	nv_wo32(chan, 0x3870, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x3450, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x380c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3820, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x384c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x3850, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3854, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x385c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x3860, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3868, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x3870, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv35_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x35),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv35_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv35_gr_cclass;
-	nv_engine(priv)->sclass = nv35_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv35_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x35),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv35_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv35_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv35_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+		{}
+	}
 };
+
+int
+nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv35_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 7e19379..ffa902e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -25,26 +25,15 @@
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 #include <engine/fifo.h>
 
-struct nv40_gr_priv {
-	struct nvkm_gr base;
-	u32 size;
-};
-
-struct nv40_gr_chan {
-	struct nvkm_gr_chan base;
-};
-
-static u64
+u64
 nv40_gr_units(struct nvkm_gr *gr)
 {
-	struct nv40_gr_priv *priv = (void *)gr;
-
-	return nv_rd32(priv, 0x1540);
+	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
 }
 
 /*******************************************************************************
@@ -52,80 +41,29 @@
  ******************************************************************************/
 
 static int
-nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 20, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
 #ifdef __BIG_ENDIAN
-	nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+		nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
 #endif
-	nv_wo32(obj, 0x0c, 0x00000000);
-	nv_wo32(obj, 0x10, 0x00000000);
-	return 0;
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-nv40_gr_ofuncs = {
-	.ctor = nv40_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv40_gr_sclass[] = {
-	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
-	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
-	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
-	{ 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
-	{},
-};
-
-static struct nvkm_oclass
-nv44_gr_sclass[] = {
-	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
-	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
-	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
-	{ 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
-	{},
+const struct nvkm_object_func
+nv40_gr_object = {
+	.bind = nv40_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -133,361 +71,334 @@
  ******************************************************************************/
 
 static int
-nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nv40_gr_priv *priv = (void *)engine;
-	struct nv40_gr_chan *chan;
-	int ret;
-
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
-	nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
-	return 0;
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	struct nv40_gr *gr = chan->gr;
+	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		chan->inst = (*pgpuobj)->addr;
+		nvkm_kmap(*pgpuobj);
+		nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
 static int
-nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
 {
-	struct nv40_gr_priv *priv = (void *)object->engine;
-	struct nv40_gr_chan *chan = (void *)object;
-	u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	struct nv40_gr *gr = chan->gr;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = 0x01000000 | chan->inst >> 4;
 	int ret = 0;
 
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
 
-	if (nv_rd32(priv, 0x40032c) == inst) {
+	if (nvkm_rd32(device, 0x40032c) == inst) {
 		if (suspend) {
-			nv_wr32(priv, 0x400720, 0x00000000);
-			nv_wr32(priv, 0x400784, inst);
-			nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
-			nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
-			if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
-				u32 insn = nv_rd32(priv, 0x400308);
-				nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
+			nvkm_wr32(device, 0x400720, 0x00000000);
+			nvkm_wr32(device, 0x400784, inst);
+			nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
+			nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
+			if (nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
+					break;
+			) < 0) {
+				u32 insn = nvkm_rd32(device, 0x400308);
+				nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
 				ret = -EBUSY;
 			}
 		}
 
-		nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
+		nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
 	}
 
-	if (nv_rd32(priv, 0x400330) == inst)
-		nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
+	if (nvkm_rd32(device, 0x400330) == inst)
+		nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
 
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
 	return ret;
 }
 
-static struct nvkm_oclass
-nv40_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = nv40_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+static void *
+nv40_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	unsigned long flags;
+	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv40_gr_chan = {
+	.dtor = nv40_gr_chan_dtor,
+	.fini = nv40_gr_chan_fini,
+	.bind = nv40_gr_chan_bind,
 };
 
+int
+nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nv40_gr_chan *chan;
+	unsigned long flags;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+
+	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+	list_add(&chan->head, &gr->chan);
+	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+	return 0;
+}
+
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 static void
-nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv40_gr_priv *priv = (void *)engine;
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x40:
 	case 0x41:
 	case 0x42:
 	case 0x43:
 	case 0x45:
-	case 0x4e:
-		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
-		switch (nv_device(priv)->chipset) {
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		switch (device->chipset) {
 		case 0x40:
 		case 0x45:
-			nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
-			nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+			nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+			nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
 			break;
 		case 0x41:
 		case 0x42:
 		case 0x43:
-			nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
-			nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+			nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+			nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
 			break;
 		default:
 			break;
 		}
 		break;
-	case 0x44:
-	case 0x4a:
-		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
-		break;
-	case 0x46:
-	case 0x4c:
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-	case 0x63:
-	case 0x67:
-	case 0x68:
-		nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
-		switch (nv_device(priv)->chipset) {
-		case 0x47:
-		case 0x49:
-		case 0x4b:
-			nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
-			nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
-			break;
-		default:
-			break;
-		}
+		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+		nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
 		break;
 	default:
+		WARN_ON(1);
 		break;
 	}
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
-static void
-nv40_gr_intr(struct nvkm_subdev *subdev)
+void
+nv40_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle = NULL;
-	struct nv40_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nv40_gr_chan *temp, *chan = NULL;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
 	u32 show = stat;
-	int chid;
+	char msg[128], src[128], sta[128];
+	unsigned long flags;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+	spin_lock_irqsave(&gr->base.engine.lock, flags);
+	list_for_each_entry(temp, &gr->chan, head) {
+		if (temp->inst >> 4 == inst) {
+			chan = temp;
+			list_del(&chan->head);
+			list_add(&chan->head, &gr->chan);
+			break;
+		}
+	}
 
 	if (stat & NV_PGRAPH_INTR_ERROR) {
-		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-			handle = nvkm_handle_get_class(engctx, class);
-			if (handle && !nv_call(handle->object, mthd, data))
-				show &= ~NV_PGRAPH_INTR_ERROR;
-			nvkm_handle_put(handle);
-		}
-
 		if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
-			nv_mask(priv, 0x402000, 0, 0);
+			nvkm_mask(device, 0x402000, 0, 0);
 		}
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, inst << 4, nvkm_client_name(engctx), subc,
-			 class, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%08x %s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta,
+			   chan ? chan->fifo->chid : -1, inst << 4,
+			   chan ? chan->fifo->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
+	spin_unlock_irqrestore(&gr->base.engine.lock, flags);
 }
 
-static int
-nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv40_gr_init(struct nvkm_gr *base)
 {
-	struct nv40_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv40_gr_intr;
-	nv_engine(priv)->cclass = &nv40_gr_cclass;
-	if (nv44_gr_class(priv))
-		nv_engine(priv)->sclass = nv44_gr_sclass;
-	else
-		nv_engine(priv)->sclass = nv40_gr_sclass;
-	nv_engine(priv)->tile_prog = nv40_gr_tile_prog;
-
-	priv->base.units = nv40_gr_units;
-	return 0;
-}
-
-static int
-nv40_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nv40_gr_priv *priv = (void *)engine;
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int ret, i, j;
 	u32 vramsz;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
 	/* generate and upload context program */
-	ret = nv40_grctx_init(nv_device(priv), &priv->size);
+	ret = nv40_grctx_init(device, &gr->size);
 	if (ret)
 		return ret;
 
 	/* No context present currently */
-	nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+	nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 
-	j = nv_rd32(priv, 0x1540) & 0xff;
+	j = nvkm_rd32(device, 0x1540) & 0xff;
 	if (j) {
 		for (i = 0; !(j & 1); j >>= 1, i++)
 			;
-		nv_wr32(priv, 0x405000, i);
+		nvkm_wr32(device, 0x405000, i);
 	}
 
-	if (nv_device(priv)->chipset == 0x40) {
-		nv_wr32(priv, 0x4009b0, 0x83280fff);
-		nv_wr32(priv, 0x4009b4, 0x000000a0);
+	if (device->chipset == 0x40) {
+		nvkm_wr32(device, 0x4009b0, 0x83280fff);
+		nvkm_wr32(device, 0x4009b4, 0x000000a0);
 	} else {
-		nv_wr32(priv, 0x400820, 0x83280eff);
-		nv_wr32(priv, 0x400824, 0x000000a0);
+		nvkm_wr32(device, 0x400820, 0x83280eff);
+		nvkm_wr32(device, 0x400824, 0x000000a0);
 	}
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x40:
 	case 0x45:
-		nv_wr32(priv, 0x4009b8, 0x0078e366);
-		nv_wr32(priv, 0x4009bc, 0x0000014c);
+		nvkm_wr32(device, 0x4009b8, 0x0078e366);
+		nvkm_wr32(device, 0x4009bc, 0x0000014c);
 		break;
 	case 0x41:
 	case 0x42: /* pciid also 0x00Cx */
 	/* case 0x0120: XXX (pciid) */
-		nv_wr32(priv, 0x400828, 0x007596ff);
-		nv_wr32(priv, 0x40082c, 0x00000108);
+		nvkm_wr32(device, 0x400828, 0x007596ff);
+		nvkm_wr32(device, 0x40082c, 0x00000108);
 		break;
 	case 0x43:
-		nv_wr32(priv, 0x400828, 0x0072cb77);
-		nv_wr32(priv, 0x40082c, 0x00000108);
+		nvkm_wr32(device, 0x400828, 0x0072cb77);
+		nvkm_wr32(device, 0x40082c, 0x00000108);
 		break;
 	case 0x44:
 	case 0x46: /* G72 */
 	case 0x4a:
 	case 0x4c: /* G7x-based C51 */
 	case 0x4e:
-		nv_wr32(priv, 0x400860, 0);
-		nv_wr32(priv, 0x400864, 0);
+		nvkm_wr32(device, 0x400860, 0);
+		nvkm_wr32(device, 0x400864, 0);
 		break;
 	case 0x47: /* G70 */
 	case 0x49: /* G71 */
 	case 0x4b: /* G73 */
-		nv_wr32(priv, 0x400828, 0x07830610);
-		nv_wr32(priv, 0x40082c, 0x0000016A);
+		nvkm_wr32(device, 0x400828, 0x07830610);
+		nvkm_wr32(device, 0x40082c, 0x0000016A);
 		break;
 	default:
 		break;
 	}
 
-	nv_wr32(priv, 0x400b38, 0x2ffff800);
-	nv_wr32(priv, 0x400b3c, 0x00006000);
+	nvkm_wr32(device, 0x400b38, 0x2ffff800);
+	nvkm_wr32(device, 0x400b3c, 0x00006000);
 
 	/* Tiling related stuff. */
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x44:
 	case 0x4a:
-		nv_wr32(priv, 0x400bc4, 0x1003d888);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b500);
+		nvkm_wr32(device, 0x400bc4, 0x1003d888);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
 		break;
 	case 0x46:
-		nv_wr32(priv, 0x400bc4, 0x0000e024);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b520);
+		nvkm_wr32(device, 0x400bc4, 0x0000e024);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
 		break;
 	case 0x4c:
 	case 0x4e:
 	case 0x67:
-		nv_wr32(priv, 0x400bc4, 0x1003d888);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b540);
+		nvkm_wr32(device, 0x400bc4, 0x1003d888);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
 		break;
 	default:
 		break;
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
-
 	/* begin RAM config */
-	vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
-	switch (nv_device(priv)->chipset) {
+	vramsz = device->func->resource_size(device, 1) - 1;
+	switch (device->chipset) {
 	case 0x40:
-		nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x400820, 0);
-		nv_wr32(priv, 0x400824, 0);
-		nv_wr32(priv, 0x400864, vramsz);
-		nv_wr32(priv, 0x400868, vramsz);
+		nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x400820, 0);
+		nvkm_wr32(device, 0x400824, 0);
+		nvkm_wr32(device, 0x400864, vramsz);
+		nvkm_wr32(device, 0x400868, vramsz);
 		break;
 	default:
-		switch (nv_device(priv)->chipset) {
+		switch (device->chipset) {
 		case 0x41:
 		case 0x42:
 		case 0x43:
@@ -495,33 +406,70 @@
 		case 0x4e:
 		case 0x44:
 		case 0x4a:
-			nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
-			nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
+			nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
+			nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
 			break;
 		default:
-			nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
-			nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
+			nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
+			nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
 			break;
 		}
-		nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x400840, 0);
-		nv_wr32(priv, 0x400844, 0);
-		nv_wr32(priv, 0x4008A0, vramsz);
-		nv_wr32(priv, 0x4008A4, vramsz);
+		nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x400840, 0);
+		nvkm_wr32(device, 0x400844, 0);
+		nvkm_wr32(device, 0x4008A0, vramsz);
+		nvkm_wr32(device, 0x4008A4, vramsz);
 		break;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv40_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv40_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+int
+nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv40_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+	INIT_LIST_HEAD(&gr->chan);
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv40_gr = {
+	.init = nv40_gr_init,
+	.intr = nv40_gr_intr,
+	.tile = nv40_gr_tile,
+	.units = nv40_gr_units,
+	.chan_new = nv40_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+		{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+		{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+		{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+		{ -1, -1, 0x4097, &nv40_gr_object }, /* curie */
+		{}
+	}
 };
+
+int
+nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv40_gr_new_(&nv40_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index d852bd6..2812ed1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,22 +1,45 @@
 #ifndef __NV40_GR_H__
 #define __NV40_GR_H__
-#include <engine/gr.h>
+#define nv40_gr(p) container_of((p), struct nv40_gr, base)
+#include "priv.h"
 
-#include <core/device.h>
-struct nvkm_gpuobj;
+struct nv40_gr {
+	struct nvkm_gr base;
+	u32 size;
+	struct list_head chan;
+};
+
+int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv40_gr_init(struct nvkm_gr *);
+void nv40_gr_intr(struct nvkm_gr *);
+u64 nv40_gr_units(struct nvkm_gr *);
+
+#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
+
+struct nv40_gr_chan {
+	struct nvkm_object object;
+	struct nv40_gr *gr;
+	struct nvkm_fifo_chan *fifo;
+	u32 inst;
+	struct list_head head;
+};
+
+int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv40_gr_object;
 
 /* returns 1 if device is one of the nv4x using the 0x4497 object class,
  * helpful to determine a number of other hardware features
  */
 static inline int
-nv44_gr_class(void *priv)
+nv44_gr_class(struct nvkm_device *device)
 {
-	struct nvkm_device *device = nv_device(priv);
-
 	if ((device->chipset & 0xf0) == 0x60)
 		return 1;
 
-	return !(0x0baf & (1 << (device->chipset & 0x0f)));
+	return !(0x0aaf & (1 << (device->chipset & 0x0f)));
 }
 
 int  nv40_grctx_init(struct nvkm_device *, u32 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
new file mode 100644
index 0000000..45ff802
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv40.h"
+#include "regs.h"
+
+#include <subdev/fb.h>
+#include <engine/fifo.h>
+
+static void
+nv44_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
+{
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
+	unsigned long flags;
+
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
+
+	switch (device->chipset) {
+	case 0x44:
+	case 0x4a:
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		break;
+	case 0x46:
+	case 0x4c:
+	case 0x63:
+	case 0x67:
+	case 0x68:
+		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	case 0x4e:
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	nvkm_fifo_start(fifo, &flags);
+}
+
+static const struct nvkm_gr_func
+nv44_gr = {
+	.init = nv40_gr_init,
+	.intr = nv40_gr_intr,
+	.tile = nv44_gr_tile,
+	.units = nv40_gr_units,
+	.chan_new = nv40_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+		{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+		{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+		{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+		{ -1, -1, 0x4497, &nv40_gr_object }, /* curie */
+		{}
+	}
+};
+
+int
+nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv40_gr_new_(&nv44_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index 270d7cd..b19b912 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -24,27 +24,13 @@
 #include "nv50.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-#include <subdev/timer.h>
 
-struct nv50_gr_priv {
-	struct nvkm_gr base;
-	spinlock_t lock;
-	u32 size;
-};
-
-struct nv50_gr_chan {
-	struct nvkm_gr_chan base;
-};
-
-static u64
+u64
 nv50_gr_units(struct nvkm_gr *gr)
 {
-	struct nv50_gr_priv *priv = (void *)gr;
-
-	return nv_rd32(priv, 0x1540);
+	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
 }
 
 /*******************************************************************************
@@ -52,86 +38,25 @@
  ******************************************************************************/
 
 static int
-nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+				  align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-nv50_gr_ofuncs = {
-	.ctor = nv50_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x5097, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-g84_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8297, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gt200_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8397, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gt215_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8597, &nv50_gr_ofuncs },
-	{ 0x85c0, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-mcp89_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x85c0, &nv50_gr_ofuncs },
-	{ 0x8697, &nv50_gr_ofuncs },
-	{}
+const struct nvkm_object_func
+nv50_gr_object = {
+	.bind = nv50_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -139,161 +64,44 @@
  ******************************************************************************/
 
 static int
-nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nv50_gr_priv *priv = (void *)engine;
-	struct nv50_gr_chan *chan;
-	int ret;
-
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
-				     0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
-	return 0;
+	struct nv50_gr *gr = nv50_gr_chan(object)->gr;
+	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_oclass
-nv50_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+static const struct nvkm_object_func
+nv50_gr_chan = {
+	.bind = nv50_gr_chan_bind,
 };
 
+int
+nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nv50_gr_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+	return 0;
+}
+
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static const struct nvkm_bitfield nv50_pgr_status[] = {
-	{ 0x00000001, "BUSY" }, /* set when any bit is set */
-	{ 0x00000002, "DISPATCH" },
-	{ 0x00000004, "UNK2" },
-	{ 0x00000008, "UNK3" },
-	{ 0x00000010, "UNK4" },
-	{ 0x00000020, "UNK5" },
-	{ 0x00000040, "M2MF" },
-	{ 0x00000080, "UNK7" },
-	{ 0x00000100, "CTXPROG" },
-	{ 0x00000200, "VFETCH" },
-	{ 0x00000400, "CCACHE_PREGEOM" },
-	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
-	{ 0x00001000, "VCLIP" },
-	{ 0x00002000, "RATTR_APLANE" },
-	{ 0x00004000, "TRAST" },
-	{ 0x00008000, "CLIPID" },
-	{ 0x00010000, "ZCULL" },
-	{ 0x00020000, "ENG2D" },
-	{ 0x00040000, "RMASK" },
-	{ 0x00080000, "TPC_RAST" },
-	{ 0x00100000, "TPC_PROP" },
-	{ 0x00200000, "TPC_TEX" },
-	{ 0x00400000, "TPC_GEOM" },
-	{ 0x00800000, "TPC_MP" },
-	{ 0x01000000, "ROP" },
-	{}
-};
-
-static const char *const nv50_pgr_vstatus_0[] = {
-	"VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
-	NULL
-};
-
-static const char *const nv50_pgr_vstatus_1[] = {
-	"TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
-};
-
-static const char *const nv50_pgr_vstatus_2[] = {
-	"RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
-	"ROP", NULL
-};
-
-static void
-nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r,
-		       const char *const units[], u32 status)
-{
-	int i;
-
-	nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
-
-	for (i = 0; units[i] && status; i++) {
-		if ((status & 7) == 1)
-			pr_cont(" %s", units[i]);
-		status >>= 3;
-	}
-	if (status)
-		pr_cont(" (invalid: 0x%x)", status);
-	pr_cont("\n");
-}
-
-static int
-g84_gr_tlb_flush(struct nvkm_engine *engine)
-{
-	struct nvkm_timer *ptimer = nvkm_timer(engine);
-	struct nv50_gr_priv *priv = (void *)engine;
-	bool idle, timeout = false;
-	unsigned long flags;
-	u64 start;
-	u32 tmp;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
-
-	start = ptimer->read(ptimer);
-	do {
-		idle = true;
-
-		for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-	} while (!idle &&
-		 !(timeout = ptimer->read(ptimer) - start > 2000000000));
-
-	if (timeout) {
-		nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
-
-		tmp = nv_rd32(priv, 0x400700);
-		nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
-		nvkm_bitfield_print(nv50_pgr_status, tmp);
-		pr_cont("\n");
-
-		nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
-				       nv_rd32(priv, 0x400380));
-		nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
-				       nv_rd32(priv, 0x400384));
-		nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
-				       nv_rd32(priv, 0x400388));
-	}
-
-
-	nv_wr32(priv, 0x100c80, 0x00000001);
-	if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
-		nv_error(priv, "vm flush timeout\n");
-	nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return timeout ? -EBUSY : 0;
-}
-
 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
 	{ 0x01, "STACK_UNDERFLOW" },
 	{ 0x02, "STACK_MISMATCH" },
@@ -427,157 +235,172 @@
 };
 
 static void
-nv50_priv_prop_trap(struct nv50_gr_priv *priv,
-		    u32 ustatus_addr, u32 ustatus, u32 tp)
+nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
 {
-	u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
-	u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
-	u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
-	u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
-	u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
-	u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
-	u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
+	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
+	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
+	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
+	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
+	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
+	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
+	char msg[128];
 
 	/* CUDA memory: l[], g[] or stack. */
 	if (ustatus & 0x00000080) {
 		if (e18 & 0x80000000) {
 			/* g[] read fault? */
-			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
 			e18 &= ~0x1f000000;
 		} else if (e18 & 0xc) {
 			/* g[] write fault? */
-			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
 			e18 &= ~0x00000f80;
 		} else {
-			nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
 				 tp, e14, e10);
 		}
 		ustatus &= ~0x00000080;
 	}
 	if (ustatus) {
-		nv_error(priv, "TRAP_PROP - TP %d -", tp);
-		nvkm_bitfield_print(nv50_gr_trap_prop, ustatus);
-		pr_cont(" - Address %02x%08x\n", e14, e10);
+		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
+		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
+				   "Address %02x%08x\n",
+			   tp, ustatus, msg, e14, e10);
 	}
-	nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
 		 tp, e0c, e18, e1c, e20, e24);
 }
 
 static void
-nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display)
+nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
 {
-	u32 units = nv_rd32(priv, 0x1540);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 units = nvkm_rd32(device, 0x1540);
 	u32 addr, mp10, status, pc, oplow, ophigh;
+	char msg[128];
 	int i;
 	int mps = 0;
 	for (i = 0; i < 4; i++) {
 		if (!(units & 1 << (i+24)))
 			continue;
-		if (nv_device(priv)->chipset < 0xa0)
+		if (device->chipset < 0xa0)
 			addr = 0x408200 + (tpid << 12) + (i << 7);
 		else
 			addr = 0x408100 + (tpid << 11) + (i << 7);
-		mp10 = nv_rd32(priv, addr + 0x10);
-		status = nv_rd32(priv, addr + 0x14);
+		mp10 = nvkm_rd32(device, addr + 0x10);
+		status = nvkm_rd32(device, addr + 0x14);
 		if (!status)
 			continue;
 		if (display) {
-			nv_rd32(priv, addr + 0x20);
-			pc = nv_rd32(priv, addr + 0x24);
-			oplow = nv_rd32(priv, addr + 0x70);
-			ophigh = nv_rd32(priv, addr + 0x74);
-			nv_error(priv, "TRAP_MP_EXEC - "
-					"TP %d MP %d:", tpid, i);
-			nvkm_bitfield_print(nv50_mp_exec_errors, status);
-			pr_cont(" at %06x warp %d, opcode %08x %08x\n",
-					pc&0xffffff, pc >> 24,
-					oplow, ophigh);
+			nvkm_rd32(device, addr + 0x20);
+			pc = nvkm_rd32(device, addr + 0x24);
+			oplow = nvkm_rd32(device, addr + 0x70);
+			ophigh = nvkm_rd32(device, addr + 0x74);
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_mp_exec_errors, status);
+			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
+					   "%08x [%s] at %06x warp %d, "
+					   "opcode %08x %08x\n",
+				   tpid, i, status, msg, pc & 0xffffff,
+				   pc >> 24, oplow, ophigh);
 		}
-		nv_wr32(priv, addr + 0x10, mp10);
-		nv_wr32(priv, addr + 0x14, 0);
+		nvkm_wr32(device, addr + 0x10, mp10);
+		nvkm_wr32(device, addr + 0x14, 0);
 		mps++;
 	}
 	if (!mps && display)
-		nv_error(priv, "TRAP_MP_EXEC - TP %d: "
+		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
 				"No MPs claiming errors?\n", tpid);
 }
 
 static void
-nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old,
+nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
 		  u32 ustatus_new, int display, const char *name)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 units = nvkm_rd32(device, 0x1540);
 	int tps = 0;
-	u32 units = nv_rd32(priv, 0x1540);
 	int i, r;
+	char msg[128];
 	u32 ustatus_addr, ustatus;
 	for (i = 0; i < 16; i++) {
 		if (!(units & (1 << i)))
 			continue;
-		if (nv_device(priv)->chipset < 0xa0)
+		if (device->chipset < 0xa0)
 			ustatus_addr = ustatus_old + (i << 12);
 		else
 			ustatus_addr = ustatus_new + (i << 11);
-		ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
 		if (!ustatus)
 			continue;
 		tps++;
 		switch (type) {
 		case 6: /* texture error... unknown for now */
 			if (display) {
-				nv_error(priv, "magic set %d:\n", i);
+				nvkm_error(subdev, "magic set %d:\n", i);
 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
-					nv_error(priv, "\t0x%08x: 0x%08x\n", r,
-						nv_rd32(priv, r));
+					nvkm_error(subdev, "\t%08x: %08x\n", r,
+						   nvkm_rd32(device, r));
 				if (ustatus) {
-					nv_error(priv, "%s - TP%d:", name, i);
-					nvkm_bitfield_print(nv50_tex_traps,
-							       ustatus);
-					pr_cont("\n");
+					nvkm_snprintbf(msg, sizeof(msg),
+						       nv50_tex_traps, ustatus);
+					nvkm_error(subdev,
+						   "%s - TP%d: %08x [%s]\n",
+						   name, i, ustatus, msg);
 					ustatus = 0;
 				}
 			}
 			break;
 		case 7: /* MP error */
 			if (ustatus & 0x04030000) {
-				nv50_priv_mp_trap(priv, i, display);
+				nv50_gr_mp_trap(gr, i, display);
 				ustatus &= ~0x04030000;
 			}
 			if (ustatus && display) {
-				nv_error(priv, "%s - TP%d:", name, i);
-				nvkm_bitfield_print(nv50_mpc_traps, ustatus);
-				pr_cont("\n");
+				nvkm_snprintbf(msg, sizeof(msg),
+					       nv50_mpc_traps, ustatus);
+				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
+					   name, i, ustatus, msg);
 				ustatus = 0;
 			}
 			break;
 		case 8: /* PROP error */
 			if (display)
-				nv50_priv_prop_trap(
-						priv, ustatus_addr, ustatus, i);
+				nv50_gr_prop_trap(
+						gr, ustatus_addr, ustatus, i);
 			ustatus = 0;
 			break;
 		}
 		if (ustatus) {
 			if (display)
-				nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
 		}
-		nv_wr32(priv, ustatus_addr, 0xc0000000);
+		nvkm_wr32(device, ustatus_addr, 0xc0000000);
 	}
 
 	if (!tps && display)
-		nv_warn(priv, "%s - No TPs claiming errors?\n", name);
+		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
 }
 
 static int
-nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
-		     int chid, u64 inst, struct nvkm_object *engctx)
+nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
+		     int chid, u64 inst, const char *name)
 {
-	u32 status = nv_rd32(priv, 0x400108);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 status = nvkm_rd32(device, 0x400108);
 	u32 ustatus;
+	char msg[128];
 
 	if (!status && display) {
-		nv_error(priv, "TRAP: no units reporting traps?\n");
+		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
 		return 1;
 	}
 
@@ -585,71 +408,72 @@
 	 * COND, QUERY. If you get a trap from it, the command is still stuck
 	 * in DISPATCH and you need to do something about it. */
 	if (status & 0x001) {
-		ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
 		if (!ustatus && display) {
-			nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
+			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
 		}
 
-		nv_wr32(priv, 0x400500, 0x00000000);
+		nvkm_wr32(device, 0x400500, 0x00000000);
 
 		/* Known to be triggered by screwed up NOTIFY and COND... */
 		if (ustatus & 0x00000001) {
-			u32 addr = nv_rd32(priv, 0x400808);
+			u32 addr = nvkm_rd32(device, 0x400808);
 			u32 subc = (addr & 0x00070000) >> 16;
 			u32 mthd = (addr & 0x00001ffc);
-			u32 datal = nv_rd32(priv, 0x40080c);
-			u32 datah = nv_rd32(priv, 0x400810);
-			u32 class = nv_rd32(priv, 0x400814);
-			u32 r848 = nv_rd32(priv, 0x400848);
+			u32 datal = nvkm_rd32(device, 0x40080c);
+			u32 datah = nvkm_rd32(device, 0x400810);
+			u32 class = nvkm_rd32(device, 0x400814);
+			u32 r848 = nvkm_rd32(device, 0x400848);
 
-			nv_error(priv, "TRAP DISPATCH_FAULT\n");
+			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
 			if (display && (addr & 0x80000000)) {
-				nv_error(priv,
-					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
-					 chid, inst,
-					 nvkm_client_name(engctx), subc,
-					 class, mthd, datah, datal, addr, r848);
+				nvkm_error(subdev,
+					   "ch %d [%010llx %s] subc %d "
+					   "class %04x mthd %04x data %08x%08x "
+					   "400808 %08x 400848 %08x\n",
+					   chid, inst, name, subc, class, mthd,
+					   datah, datal, addr, r848);
 			} else
 			if (display) {
-				nv_error(priv, "no stuck command?\n");
+				nvkm_error(subdev, "no stuck command?\n");
 			}
 
-			nv_wr32(priv, 0x400808, 0);
-			nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
-			nv_wr32(priv, 0x400848, 0);
+			nvkm_wr32(device, 0x400808, 0);
+			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
+			nvkm_wr32(device, 0x400848, 0);
 			ustatus &= ~0x00000001;
 		}
 
 		if (ustatus & 0x00000002) {
-			u32 addr = nv_rd32(priv, 0x40084c);
+			u32 addr = nvkm_rd32(device, 0x40084c);
 			u32 subc = (addr & 0x00070000) >> 16;
 			u32 mthd = (addr & 0x00001ffc);
-			u32 data = nv_rd32(priv, 0x40085c);
-			u32 class = nv_rd32(priv, 0x400814);
+			u32 data = nvkm_rd32(device, 0x40085c);
+			u32 class = nvkm_rd32(device, 0x400814);
 
-			nv_error(priv, "TRAP DISPATCH_QUERY\n");
+			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
 			if (display && (addr & 0x80000000)) {
-				nv_error(priv,
-					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
-					 chid, inst,
-					 nvkm_client_name(engctx), subc,
-					 class, mthd, data, addr);
+				nvkm_error(subdev,
+					   "ch %d [%010llx %s] subc %d "
+					   "class %04x mthd %04x data %08x "
+					   "40084c %08x\n", chid, inst, name,
+					   subc, class, mthd, data, addr);
 			} else
 			if (display) {
-				nv_error(priv, "no stuck command?\n");
+				nvkm_error(subdev, "no stuck command?\n");
 			}
 
-			nv_wr32(priv, 0x40084c, 0);
+			nvkm_wr32(device, 0x40084c, 0);
 			ustatus &= ~0x00000002;
 		}
 
 		if (ustatus && display) {
-			nv_error(priv, "TRAP_DISPATCH (unknown "
-				      "0x%08x)\n", ustatus);
+			nvkm_error(subdev, "TRAP_DISPATCH "
+					   "(unknown %08x)\n", ustatus);
 		}
 
-		nv_wr32(priv, 0x400804, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x001);
+		nvkm_wr32(device, 0x400804, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x001);
 		status &= ~0x001;
 		if (!status)
 			return 0;
@@ -657,81 +481,91 @@
 
 	/* M2MF: Memory to memory copy engine. */
 	if (status & 0x002) {
-		u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
+		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_M2MF");
-			nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
-				nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_m2mf, ustatus);
+			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x406804),
+				   nvkm_rd32(device, 0x406808),
+				   nvkm_rd32(device, 0x40680c),
+				   nvkm_rd32(device, 0x406810));
 		}
 
 		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(priv, 0x400040, 2);
-		nv_wr32(priv, 0x400040, 0);
-		nv_wr32(priv, 0x406800, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x002);
+		nvkm_wr32(device, 0x400040, 2);
+		nvkm_wr32(device, 0x400040, 0);
+		nvkm_wr32(device, 0x406800, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x002);
 		status &= ~0x002;
 	}
 
 	/* VFETCH: Fetches data from vertex buffers. */
 	if (status & 0x004) {
-		u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
+		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_VFETCH");
-			nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
-				nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_vfetch, ustatus);
+			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x400c00),
+				   nvkm_rd32(device, 0x400c08),
+				   nvkm_rd32(device, 0x400c0c),
+				   nvkm_rd32(device, 0x400c10));
 		}
 
-		nv_wr32(priv, 0x400c04, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x004);
+		nvkm_wr32(device, 0x400c04, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x004);
 		status &= ~0x004;
 	}
 
 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
 	if (status & 0x008) {
-		ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_STRMOUT");
-			nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
-				nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_strmout, ustatus);
+			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x401804),
+				   nvkm_rd32(device, 0x401808),
+				   nvkm_rd32(device, 0x40180c),
+				   nvkm_rd32(device, 0x401810));
 		}
 
 		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(priv, 0x400040, 0x80);
-		nv_wr32(priv, 0x400040, 0);
-		nv_wr32(priv, 0x401800, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x008);
+		nvkm_wr32(device, 0x400040, 0x80);
+		nvkm_wr32(device, 0x400040, 0);
+		nvkm_wr32(device, 0x401800, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x008);
 		status &= ~0x008;
 	}
 
 	/* CCACHE: Handles code and c[] caches and fills them. */
 	if (status & 0x010) {
-		ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_CCACHE");
-			nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
-				     " %08x %08x %08x\n",
-				nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
-				nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
-				nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
-				nv_rd32(priv, 0x40501c));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_ccache, ustatus);
+			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
+					   "%08x %08x %08x\n",
+				   nvkm_rd32(device, 0x405000),
+				   nvkm_rd32(device, 0x405004),
+				   nvkm_rd32(device, 0x405008),
+				   nvkm_rd32(device, 0x40500c),
+				   nvkm_rd32(device, 0x405010),
+				   nvkm_rd32(device, 0x405014),
+				   nvkm_rd32(device, 0x40501c));
 		}
 
-		nv_wr32(priv, 0x405018, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x010);
+		nvkm_wr32(device, 0x405018, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x010);
 		status &= ~0x010;
 	}
 
@@ -739,239 +573,174 @@
 	 * remaining, so try to handle it anyway. Perhaps related to that
 	 * unknown DMA slot on tesla? */
 	if (status & 0x20) {
-		ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
 		if (display)
-			nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
-		nv_wr32(priv, 0x402000, 0xc0000000);
+			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
+		nvkm_wr32(device, 0x402000, 0xc0000000);
 		/* no status modifiction on purpose */
 	}
 
 	/* TEXTURE: CUDA texturing units */
 	if (status & 0x040) {
-		nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
+		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
 				    "TRAP_TEXTURE");
-		nv_wr32(priv, 0x400108, 0x040);
+		nvkm_wr32(device, 0x400108, 0x040);
 		status &= ~0x040;
 	}
 
 	/* MP: CUDA execution engines. */
 	if (status & 0x080) {
-		nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
+		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
 				    "TRAP_MP");
-		nv_wr32(priv, 0x400108, 0x080);
+		nvkm_wr32(device, 0x400108, 0x080);
 		status &= ~0x080;
 	}
 
 	/* PROP:  Handles TP-initiated uncached memory accesses:
 	 * l[], g[], stack, 2d surfaces, render targets. */
 	if (status & 0x100) {
-		nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
+		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
 				    "TRAP_PROP");
-		nv_wr32(priv, 0x400108, 0x100);
+		nvkm_wr32(device, 0x400108, 0x100);
 		status &= ~0x100;
 	}
 
 	if (status) {
 		if (display)
-			nv_error(priv, "TRAP: unknown 0x%08x\n", status);
-		nv_wr32(priv, 0x400108, status);
+			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
+		nvkm_wr32(device, 0x400108, status);
 	}
 
 	return 1;
 }
 
-static void
-nv50_gr_intr(struct nvkm_subdev *subdev)
+void
+nv50_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle = NULL;
-	struct nv50_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x400100);
-	u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
-	u32 addr = nv_rd32(priv, 0x400704);
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, 0x400100);
+	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
+	u32 addr = nvkm_rd32(device, 0x400704);
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, 0x400708);
-	u32 class = nv_rd32(priv, 0x400814);
+	u32 data = nvkm_rd32(device, 0x400708);
+	u32 class = nvkm_rd32(device, 0x400814);
 	u32 show = stat, show_bitfield = stat;
-	int chid;
+	const struct nvkm_enum *en;
+	unsigned long flags;
+	const char *name = "unknown";
+	char msg[128];
+	int chid = -1;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000010) {
-		handle = nvkm_handle_get_class(engctx, class);
-		if (handle && !nv_call(handle->object, mthd, data))
-			show &= ~0x00000010;
-		nvkm_handle_put(handle);
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+	if (chan)  {
+		name = chan->object.client->name;
+		chid = chan->chid;
 	}
 
 	if (show & 0x00100000) {
-		u32 ecode = nv_rd32(priv, 0x400110);
-		nv_error(priv, "DATA_ERROR ");
-		nvkm_enum_print(nv50_data_error_names, ecode);
-		pr_cont("\n");
+		u32 ecode = nvkm_rd32(device, 0x400110);
+		en = nvkm_enum_find(nv50_data_error_names, ecode);
+		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
+			   ecode, en ? en->name : "");
 		show_bitfield &= ~0x00100000;
 	}
 
 	if (stat & 0x00200000) {
-		if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12,
-					  engctx))
+		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
 			show &= ~0x00200000;
 		show_bitfield &= ~0x00200000;
 	}
 
-	nv_wr32(priv, 0x400100, stat);
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400100, stat);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	if (show) {
 		show &= show_bitfield;
-		if (show) {
-			nv_error(priv, "%s", "");
-			nvkm_bitfield_print(nv50_gr_intr_name, show);
-			pr_cont("\n");
-		}
-		nv_error(priv,
-			 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, (u64)inst << 12, nvkm_client_name(engctx),
-			 subc, class, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
+		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   stat, msg, chid, (u64)inst << 12, name,
+			   subc, class, mthd, data);
 	}
 
-	if (nv_rd32(priv, 0x400824) & (1 << 31))
-		nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
+	if (nvkm_rd32(device, 0x400824) & (1 << 31))
+		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
 
-	nvkm_engctx_put(engctx);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
-static int
-nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv50_gr_init(struct nvkm_gr *base)
 {
-	struct nv50_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00201000;
-	nv_subdev(priv)->intr = nv50_gr_intr;
-	nv_engine(priv)->cclass = &nv50_gr_cclass;
-
-	priv->base.units = nv50_gr_units;
-
-	switch (nv_device(priv)->chipset) {
-	case 0x50:
-		nv_engine(priv)->sclass = nv50_gr_sclass;
-		break;
-	case 0x84:
-	case 0x86:
-	case 0x92:
-	case 0x94:
-	case 0x96:
-	case 0x98:
-		nv_engine(priv)->sclass = g84_gr_sclass;
-		break;
-	case 0xa0:
-	case 0xaa:
-	case 0xac:
-		nv_engine(priv)->sclass = gt200_gr_sclass;
-		break;
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-		nv_engine(priv)->sclass = gt215_gr_sclass;
-		break;
-	case 0xaf:
-		nv_engine(priv)->sclass = mcp89_gr_sclass;
-		break;
-
-	}
-
-	/* unfortunate hw bug workaround... */
-	if (nv_device(priv)->chipset != 0x50 &&
-	    nv_device(priv)->chipset != 0xac)
-		nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
-
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static int
-nv50_gr_init(struct nvkm_object *object)
-{
-	struct nv50_gr_priv *priv = (void *)object;
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int ret, units, i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
-	nv_wr32(priv, 0x40008c, 0x00000004);
+	nvkm_wr32(device, 0x40008c, 0x00000004);
 
 	/* reset/enable traps and interrupts */
-	nv_wr32(priv, 0x400804, 0xc0000000);
-	nv_wr32(priv, 0x406800, 0xc0000000);
-	nv_wr32(priv, 0x400c04, 0xc0000000);
-	nv_wr32(priv, 0x401800, 0xc0000000);
-	nv_wr32(priv, 0x405018, 0xc0000000);
-	nv_wr32(priv, 0x402000, 0xc0000000);
+	nvkm_wr32(device, 0x400804, 0xc0000000);
+	nvkm_wr32(device, 0x406800, 0xc0000000);
+	nvkm_wr32(device, 0x400c04, 0xc0000000);
+	nvkm_wr32(device, 0x401800, 0xc0000000);
+	nvkm_wr32(device, 0x405018, 0xc0000000);
+	nvkm_wr32(device, 0x402000, 0xc0000000);
 
-	units = nv_rd32(priv, 0x001540);
+	units = nvkm_rd32(device, 0x001540);
 	for (i = 0; i < 16; i++) {
 		if (!(units & (1 << i)))
 			continue;
 
-		if (nv_device(priv)->chipset < 0xa0) {
-			nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
-			nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
-			nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
+		if (device->chipset < 0xa0) {
+			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
+			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
+			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
 		} else {
-			nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
-			nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
-			nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
 		}
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	/* upload context program, initialise ctxctl defaults */
-	ret = nv50_grctx_init(nv_device(priv), &priv->size);
+	ret = nv50_grctx_init(device, &gr->size);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, 0x400824, 0x00000000);
-	nv_wr32(priv, 0x400828, 0x00000000);
-	nv_wr32(priv, 0x40082c, 0x00000000);
-	nv_wr32(priv, 0x400830, 0x00000000);
-	nv_wr32(priv, 0x40032c, 0x00000000);
-	nv_wr32(priv, 0x400330, 0x00000000);
+	nvkm_wr32(device, 0x400824, 0x00000000);
+	nvkm_wr32(device, 0x400828, 0x00000000);
+	nvkm_wr32(device, 0x40082c, 0x00000000);
+	nvkm_wr32(device, 0x400830, 0x00000000);
+	nvkm_wr32(device, 0x40032c, 0x00000000);
+	nvkm_wr32(device, 0x400330, 0x00000000);
 
 	/* some unknown zcull magic */
-	switch (nv_device(priv)->chipset & 0xf0) {
+	switch (device->chipset & 0xf0) {
 	case 0x50:
 	case 0x80:
 	case 0x90:
-		nv_wr32(priv, 0x402ca8, 0x00000800);
+		nvkm_wr32(device, 0x402ca8, 0x00000800);
 		break;
 	case 0xa0:
 	default:
-		if (nv_device(priv)->chipset == 0xa0 ||
-		    nv_device(priv)->chipset == 0xaa ||
-		    nv_device(priv)->chipset == 0xac) {
-			nv_wr32(priv, 0x402ca8, 0x00000802);
+		if (device->chipset == 0xa0 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac) {
+			nvkm_wr32(device, 0x402ca8, 0x00000802);
 		} else {
-			nv_wr32(priv, 0x402cc0, 0x00000000);
-			nv_wr32(priv, 0x402ca8, 0x00000002);
+			nvkm_wr32(device, 0x402cc0, 0x00000000);
+			nvkm_wr32(device, 0x402ca8, 0x00000002);
 		}
 
 		break;
@@ -979,21 +748,47 @@
 
 	/* zero out zcull regions */
 	for (i = 0; i < 8; i++) {
-		nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
 	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-nv50_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv50_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+int
+nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv50_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv50_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x5097, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{}
+	}
 };
+
+int
+nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&nv50_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index bcf786f..45eec83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,8 +1,34 @@
 #ifndef __NV50_GR_H__
 #define __NV50_GR_H__
-#include <engine/gr.h>
-struct nvkm_device;
-struct nvkm_gpuobj;
+#define nv50_gr(p) container_of((p), struct nv50_gr, base)
+#include "priv.h"
+
+struct nv50_gr {
+	struct nvkm_gr base;
+	const struct nv50_gr_func *func;
+	spinlock_t lock;
+	u32 size;
+};
+
+int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv50_gr_init(struct nvkm_gr *);
+void nv50_gr_intr(struct nvkm_gr *);
+u64 nv50_gr_units(struct nvkm_gr *);
+
+int g84_gr_tlb_flush(struct nvkm_gr *);
+
+#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
+
+struct nv50_gr_chan {
+	struct nvkm_object object;
+	struct nv50_gr *gr;
+};
+
+int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv50_gr_object;
 
 int  nv50_grctx_init(struct nvkm_device *, u32 *size);
 void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
new file mode 100644
index 0000000..a234590
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -0,0 +1,38 @@
+#ifndef __NVKM_GR_PRIV_H__
+#define __NVKM_GR_PRIV_H__
+#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
+#include <engine/gr.h>
+#include <core/enum.h>
+struct nvkm_fb_tile;
+struct nvkm_fifo_chan;
+
+int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
+		 int index, u32 pmc_enable, bool enable,
+		 struct nvkm_gr *);
+
+bool nv04_gr_idle(struct nvkm_gr *);
+
+struct nvkm_gr_func {
+	void *(*dtor)(struct nvkm_gr *);
+	int (*oneinit)(struct nvkm_gr *);
+	int (*init)(struct nvkm_gr *);
+	void (*intr)(struct nvkm_gr *);
+	void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
+	int (*tlb_flush)(struct nvkm_gr *);
+	int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
+			const struct nvkm_oclass *, struct nvkm_object **);
+	int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
+	/* Returns chipset-specific counts of units packed into an u64.
+	 */
+	u64 (*units)(struct nvkm_gr *);
+	struct nvkm_sclass sclass[];
+};
+
+extern const struct nvkm_bitfield nv04_gr_nsource[];
+extern const struct nvkm_object_func nv04_gr_object;
+
+extern const struct nvkm_bitfield nv10_gr_intr_name[];
+extern const struct nvkm_bitfield nv10_gr_nstatus[];
+
+extern const struct nvkm_enum nv50_data_error_names[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index 0df889f..34ff001 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -21,74 +21,24 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#include "priv.h"
 
-struct g84_mpeg_priv {
-	struct nvkm_mpeg base;
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+g84_mpeg = {
+	.init = nv50_mpeg_init,
+	.intr = nv50_mpeg_intr,
+	.cclass = &nv50_mpeg_cclass,
+	.sclass = {
+		{ -1, -1, G82_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
 
-struct g84_mpeg_chan {
-	struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_sclass[] = {
-	{ 0x8274, &nv50_mpeg_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * PMPEG context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = _nvkm_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	struct g84_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv50_mpeg_intr;
-	nv_engine(priv)->cclass = &g84_mpeg_cclass;
-	nv_engine(priv)->sclass = g84_mpeg_sclass;
-	return 0;
+	return nvkm_engine_new_(&g84_mpeg, device, index, 0x00000002,
+				true, pmpeg);
 }
-
-struct nvkm_oclass
-g84_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv50_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index b5bef07..d4d8942 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -24,281 +24,271 @@
 #include "nv31.h"
 
 #include <core/client.h>
-#include <core/handle.h>
-#include <engine/fifo.h>
-#include <subdev/instmem.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
 
 /*******************************************************************************
  * MPEG object classes
  ******************************************************************************/
 
 static int
-nv31_mpeg_object_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
+nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		      int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 20, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
-}
-
-static int
-nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
-{
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	u32 inst = *(u32 *)arg << 4;
-	u32 dma0 = nv_ro32(imem, inst + 0);
-	u32 dma1 = nv_ro32(imem, inst + 4);
-	u32 dma2 = nv_ro32(imem, inst + 8);
-	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
-	u32 size = dma1 + 1;
-
-	/* only allow linear DMA objects */
-	if (!(dma0 & 0x00002000))
-		return -EINVAL;
-
-	if (mthd == 0x0190) {
-		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
-		nv_wr32(priv, 0x00b334, base);
-		nv_wr32(priv, 0x00b324, size);
-	} else
-	if (mthd == 0x01a0) {
-		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
-		nv_wr32(priv, 0x00b360, base);
-		nv_wr32(priv, 0x00b364, size);
-	} else {
-		/* DMA_IMAGE, VRAM only */
-		if (dma0 & 0x00030000)
-			return -EINVAL;
-
-		nv_wr32(priv, 0x00b370, base);
-		nv_wr32(priv, 0x00b374, size);
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
 	}
-
-	return 0;
+	return ret;
 }
 
-struct nvkm_ofuncs
-nv31_mpeg_ofuncs = {
-	.ctor = nv31_mpeg_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_omthds
-nv31_mpeg_omthds[] = {
-	{ 0x0190, 0x0190, nv31_mpeg_mthd_dma },
-	{ 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
-	{ 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
-	{}
-};
-
-struct nvkm_oclass
-nv31_mpeg_sclass[] = {
-	{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
-	{}
+const struct nvkm_object_func
+nv31_mpeg_object = {
+	.bind = nv31_mpeg_object_bind,
 };
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
 
-static int
-nv31_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+static void *
+nv31_mpeg_chan_dtor(struct nvkm_object *object)
 {
-	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
+	struct nv31_mpeg *mpeg = chan->mpeg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	if (mpeg->chan == chan)
+		mpeg->chan = NULL;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv31_mpeg_chan = {
+	.dtor = nv31_mpeg_chan_dtor,
+};
+
+int
+nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
 	struct nv31_mpeg_chan *chan;
 	unsigned long flags;
-	int ret;
+	int ret = -EBUSY;
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
+	chan->mpeg = mpeg;
+	chan->fifo = fifoch;
+	*pobject = &chan->object;
 
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	if (priv->chan) {
-		spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		*pobject = NULL;
-		return -EBUSY;
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	if (!mpeg->chan) {
+		mpeg->chan = chan;
+		ret = 0;
 	}
-	priv->chan = chan;
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-	return 0;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return ret;
 }
 
-static void
-nv31_mpeg_context_dtor(struct nvkm_object *object)
-{
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	struct nv31_mpeg_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	priv->chan = NULL;
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-	nvkm_object_destroy(&chan->base);
-}
-
-struct nvkm_oclass
-nv31_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x31),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv31_mpeg_context_ctor,
-		.dtor = nv31_mpeg_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-	},
-};
-
 /*******************************************************************************
  * PMPEG engine/subdev functions
  ******************************************************************************/
 
 void
-nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
+nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+	struct nvkm_device *device = mpeg->engine.subdev.device;
 
-	nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
-	nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
-	nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
+	nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
 }
 
-void
-nv31_mpeg_intr(struct nvkm_subdev *subdev)
+static bool
+nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
 {
-	struct nv31_mpeg_priv *priv = (void *)subdev;
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_handle *handle;
-	struct nvkm_object *engctx;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
+	u32 inst = data << 4;
+	u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
+	u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
+	u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
+	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+	u32 size = dma1 + 1;
+
+	/* only allow linear DMA objects */
+	if (!(dma0 & 0x00002000))
+		return false;
+
+	if (mthd == 0x0190) {
+		/* DMA_CMD */
+		nvkm_mask(device, 0x00b300, 0x00010000,
+				  (dma0 & 0x00030000) ? 0x00010000 : 0);
+		nvkm_wr32(device, 0x00b334, base);
+		nvkm_wr32(device, 0x00b324, size);
+	} else
+	if (mthd == 0x01a0) {
+		/* DMA_DATA */
+		nvkm_mask(device, 0x00b300, 0x00020000,
+				  (dma0 & 0x00030000) ? 0x00020000 : 0);
+		nvkm_wr32(device, 0x00b360, base);
+		nvkm_wr32(device, 0x00b364, size);
+	} else {
+		/* DMA_IMAGE, VRAM only */
+		if (dma0 & 0x00030000)
+			return false;
+
+		nvkm_wr32(device, 0x00b370, base);
+		nvkm_wr32(device, 0x00b374, size);
+	}
+
+	return true;
+}
+
+static bool
+nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
+{
+	struct nvkm_device *device = mpeg->engine.subdev.device;
+	switch (mthd) {
+	case 0x190:
+	case 0x1a0:
+	case 0x1b0:
+		return mpeg->func->mthd_dma(device, mthd, data);
+	default:
+		break;
+	}
+	return false;
+}
+
+static void
+nv31_mpeg_intr(struct nvkm_engine *engine)
+{
+	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
 	u32 show = stat;
 	unsigned long flags;
 
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	engctx = nv_object(priv->chan);
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
 			show &= ~0x01000000;
 		}
 
-		if (type == 0x00000010 && engctx) {
-			handle = nvkm_handle_get_class(engctx, 0x3174);
-			if (handle && !nv_call(handle->object, mthd, data))
+		if (type == 0x00000010) {
+			if (!nv31_mpeg_mthd(mpeg, mthd, data))
 				show &= ~0x01000000;
-			nvkm_handle_put(handle);
 		}
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 pfifo->chid(pfifo, engctx),
-			 nvkm_client_name(engctx), stat, type, mthd, data);
+		nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
+			   mpeg->chan ? mpeg->chan->fifo->chid : -1,
+			   mpeg->chan ? mpeg->chan->object.client->name :
+			   "unknown", stat, type, mthd, data);
 	}
 
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-}
-
-static int
-nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv31_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv31_mpeg_intr;
-	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
-	nv_engine(priv)->sclass = nv31_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
 }
 
 int
-nv31_mpeg_init(struct nvkm_object *object)
+nv31_mpeg_init(struct nvkm_engine *mpeg)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv31_mpeg_priv *priv = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret, i;
-
-	ret = nvkm_mpeg_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
 
 	/* VPE init */
-	nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-	nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+	nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
 
 	/* PMPEG init */
-	nv_wr32(priv, 0x00b32c, 0x00000000);
-	nv_wr32(priv, 0x00b314, 0x00000100);
-	nv_wr32(priv, 0x00b220, 0x00000031);
-	nv_wr32(priv, 0x00b300, 0x02001ec1);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x00b32c, 0x00000000);
+	nvkm_wr32(device, 0x00b314, 0x00000100);
+	nvkm_wr32(device, 0x00b220, 0x00000031);
+	nvkm_wr32(device, 0x00b300, 0x02001ec1);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
 
-	nv_wr32(priv, 0x00b100, 0xffffffff);
-	nv_wr32(priv, 0x00b140, 0xffffffff);
+	nvkm_wr32(device, 0x00b100, 0xffffffff);
+	nvkm_wr32(device, 0x00b140, 0xffffffff);
 
-	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
-		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "timeout %08x\n",
+			   nvkm_rd32(device, 0x00b200));
 		return -EBUSY;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv31_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x31),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv31_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
+static void *
+nv31_mpeg_dtor(struct nvkm_engine *engine)
+{
+	return nv31_mpeg(engine);
+}
+
+static const struct nvkm_engine_func
+nv31_mpeg_ = {
+	.dtor = nv31_mpeg_dtor,
+	.init = nv31_mpeg_init,
+	.intr = nv31_mpeg_intr,
+	.tile = nv31_mpeg_tile,
+	.fifo.cclass = nv31_mpeg_chan_new,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
+
+int
+nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_engine **pmpeg)
+{
+	struct nv31_mpeg *mpeg;
+
+	if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+		return -ENOMEM;
+	mpeg->func = func;
+	*pmpeg = &mpeg->engine;
+
+	return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002,
+				true, &mpeg->engine);
+}
+
+static const struct nv31_mpeg_func
+nv31_mpeg = {
+	.mthd_dma = nv31_mpeg_mthd_dma,
+};
+
+int
+nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 782b796..d3bb34f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,13 +1,30 @@
 #ifndef __NV31_MPEG_H__
 #define __NV31_MPEG_H__
+#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
+#include "priv.h"
 #include <engine/mpeg.h>
 
-struct nv31_mpeg_chan {
-	struct nvkm_object base;
-};
-
-struct nv31_mpeg_priv {
-	struct nvkm_mpeg base;
+struct nv31_mpeg {
+	const struct nv31_mpeg_func *func;
+	struct nvkm_engine engine;
 	struct nv31_mpeg_chan *chan;
 };
+
+int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
+		   int index, struct nvkm_engine **);
+
+struct nv31_mpeg_func {
+	bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
+};
+
+#define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
+
+struct nv31_mpeg_chan {
+	struct nvkm_object object;
+	struct nv31_mpeg *mpeg;
+	struct nvkm_fifo_chan *fifo;
+};
+
+int nv31_mpeg_chan_new(struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+		       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index 9508bf9..16de5bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -25,110 +25,53 @@
 
 #include <subdev/instmem.h>
 
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-nv40_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
+bool
+nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	u32 inst = *(u32 *)arg << 4;
-	u32 dma0 = nv_ro32(imem, inst + 0);
-	u32 dma1 = nv_ro32(imem, inst + 4);
-	u32 dma2 = nv_ro32(imem, inst + 8);
+	struct nvkm_instmem *imem = device->imem;
+	u32 inst = data << 4;
+	u32 dma0 = nvkm_instmem_rd32(imem, inst + 0);
+	u32 dma1 = nvkm_instmem_rd32(imem, inst + 4);
+	u32 dma2 = nvkm_instmem_rd32(imem, inst + 8);
 	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
 	u32 size = dma1 + 1;
 
 	/* only allow linear DMA objects */
 	if (!(dma0 & 0x00002000))
-		return -EINVAL;
+		return false;
 
 	if (mthd == 0x0190) {
 		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
-		nv_wr32(priv, 0x00b334, base);
-		nv_wr32(priv, 0x00b324, size);
+		nvkm_mask(device, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nvkm_wr32(device, 0x00b334, base);
+		nvkm_wr32(device, 0x00b324, size);
 	} else
 	if (mthd == 0x01a0) {
 		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
-		nv_wr32(priv, 0x00b360, base);
-		nv_wr32(priv, 0x00b364, size);
+		nvkm_mask(device, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nvkm_wr32(device, 0x00b360, base);
+		nvkm_wr32(device, 0x00b364, size);
 	} else {
 		/* DMA_IMAGE, VRAM only */
 		if (dma0 & 0x00030000)
-			return -EINVAL;
+			return false;
 
-		nv_wr32(priv, 0x00b370, base);
-		nv_wr32(priv, 0x00b374, size);
+		nvkm_wr32(device, 0x00b370, base);
+		nvkm_wr32(device, 0x00b374, size);
 	}
 
-	return 0;
+	return true;
 }
 
-static struct nvkm_omthds
-nv40_mpeg_omthds[] = {
-	{ 0x0190, 0x0190, nv40_mpeg_mthd_dma },
-	{ 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
-	{ 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
-	{}
+static const struct nv31_mpeg_func
+nv40_mpeg = {
+	.mthd_dma = nv40_mpeg_mthd_dma,
 };
 
-struct nvkm_oclass
-nv40_mpeg_sclass[] = {
-	{ 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
-	{}
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static void
-nv40_mpeg_intr(struct nvkm_subdev *subdev)
+int
+nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	struct nv31_mpeg_priv *priv = (void *)subdev;
-	u32 stat;
-
-	if ((stat = nv_rd32(priv, 0x00b100)))
-		nv31_mpeg_intr(subdev);
-
-	if ((stat = nv_rd32(priv, 0x00b800))) {
-		nv_error(priv, "PMSRCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x00b800, stat);
-	}
+	return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
 }
-
-static int
-nv40_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv31_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv40_mpeg_intr;
-	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
-	nv_engine(priv)->sclass = nv40_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv40_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index 4720ac8..d433cfa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -21,165 +21,197 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine)
+#include "priv.h"
 
 #include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
 
-struct nv44_mpeg_priv {
-	struct nvkm_mpeg base;
-};
+#include <nvif/class.h>
 
-struct nv44_mpeg_chan {
-	struct nvkm_mpeg_chan base;
+struct nv44_mpeg {
+	struct nvkm_engine engine;
+	struct list_head chan;
 };
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
+#define nv44_mpeg_chan(p) container_of((p), struct nv44_mpeg_chan, object)
 
-static int
-nv44_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	struct nv44_mpeg_chan *chan;
-	int ret;
-
-	ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 264 * 4,
-				       16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
-	return 0;
-}
-
-static int
-nv44_mpeg_context_fini(struct nvkm_object *object, bool suspend)
-{
-
-	struct nv44_mpeg_priv *priv = (void *)object->engine;
-	struct nv44_mpeg_chan *chan = (void *)object;
-	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
-
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x00b318) == inst)
-		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-	return 0;
-}
-
-static struct nvkm_oclass
-nv44_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = nv44_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
+struct nv44_mpeg_chan {
+	struct nvkm_object object;
+	struct nv44_mpeg *mpeg;
+	struct nvkm_fifo_chan *fifo;
+	struct list_head head;
+	u32 inst;
 };
 
+static int
+nv44_mpeg_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	int ret = nvkm_gpuobj_new(chan->object.engine->subdev.device, 264 * 4,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		chan->inst = (*pgpuobj)->addr;
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x78, 0x02001ec1);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
+}
+
+static int
+nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend)
+{
+
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	struct nv44_mpeg *mpeg = chan->mpeg;
+	struct nvkm_device *device = mpeg->engine.subdev.device;
+	u32 inst = 0x80000000 | (chan->inst >> 4);
+
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000000);
+	if (nvkm_rd32(device, 0x00b318) == inst)
+		nvkm_mask(device, 0x00b318, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
+	return 0;
+}
+
+static void *
+nv44_mpeg_chan_dtor(struct nvkm_object *object)
+{
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	struct nv44_mpeg *mpeg = chan->mpeg;
+	unsigned long flags;
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv44_mpeg_chan = {
+	.dtor = nv44_mpeg_chan_dtor,
+	.fini = nv44_mpeg_chan_fini,
+	.bind = nv44_mpeg_chan_bind,
+};
+
+static int
+nv44_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine);
+	struct nv44_mpeg_chan *chan;
+	unsigned long flags;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv44_mpeg_chan, oclass, &chan->object);
+	chan->mpeg = mpeg;
+	chan->fifo = fifoch;
+	*pobject = &chan->object;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_add(&chan->head, &mpeg->chan);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return 0;
+}
+
 /*******************************************************************************
  * PMPEG engine/subdev functions
  ******************************************************************************/
 
-static void
-nv44_mpeg_intr(struct nvkm_subdev *subdev)
+static bool
+nv44_mpeg_mthd(struct nvkm_device *device, u32 mthd, u32 data)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct nv44_mpeg_priv *priv = (void *)subdev;
-	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
-	u32 show = stat;
-	int chid;
+	switch (mthd) {
+	case 0x190:
+	case 0x1a0:
+	case 0x1b0:
+		return nv40_mpeg_mthd_dma(device, mthd, data);
+	default:
+		break;
+	}
+	return false;
+}
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+static void
+nv44_mpeg_intr(struct nvkm_engine *engine)
+{
+	struct nv44_mpeg *mpeg = nv44_mpeg(engine);
+	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nv44_mpeg_chan *temp, *chan = NULL;
+	unsigned long flags;
+	u32 inst = nvkm_rd32(device, 0x00b318) & 0x000fffff;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
+	u32 show = stat;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_for_each_entry(temp, &mpeg->chan, head) {
+		if (temp->inst >> 4 == inst) {
+			chan = temp;
+			list_del(&chan->head);
+			list_add(&chan->head, &mpeg->chan);
+			break;
+		}
+	}
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
 			show &= ~0x01000000;
 		}
 
 		if (type == 0x00000010) {
-			handle = nvkm_handle_get_class(engctx, 0x3174);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv44_mpeg_mthd(subdev->device, mthd, data))
 				show &= ~0x01000000;
-			nvkm_handle_put(handle);
 		}
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv,
-			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 chid, inst << 4, nvkm_client_name(engctx), stat,
-			 type, mthd, data);
+		nvkm_error(subdev, "ch %d [%08x %s] %08x %08x %08x %08x\n",
+			   chan ? chan->fifo->chid : -1, inst << 4,
+			   chan ? chan->object.client->name : "unknown",
+			   stat, type, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
 }
 
-static void
-nv44_mpeg_me_intr(struct nvkm_subdev *subdev)
-{
-	struct nv44_mpeg_priv *priv = (void *)subdev;
-	u32 stat;
-
-	if ((stat = nv_rd32(priv, 0x00b100)))
-		nv44_mpeg_intr(subdev);
-
-	if ((stat = nv_rd32(priv, 0x00b800))) {
-		nv_error(priv, "PMSRCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x00b800, stat);
+static const struct nvkm_engine_func
+nv44_mpeg = {
+	.init = nv31_mpeg_init,
+	.intr = nv44_mpeg_intr,
+	.tile = nv31_mpeg_tile,
+	.fifo.cclass = nv44_mpeg_chan_new,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
 	}
-}
-
-static int
-nv44_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv44_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv44_mpeg_me_intr;
-	nv_engine(priv)->cclass = &nv44_mpeg_cclass;
-	nv_engine(priv)->sclass = nv40_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv44_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
 };
+
+int
+nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	struct nv44_mpeg *mpeg;
+
+	if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&mpeg->chan);
+	*pmpeg = &mpeg->engine;
+
+	return nvkm_engine_ctor(&nv44_mpeg, device, index, 0x00000002,
+				true, &mpeg->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index b3463f3..c3a85df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -21,98 +21,35 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#include "priv.h"
 
-#include <subdev/bar.h>
+#include <core/gpuobj.h>
 #include <subdev/timer.h>
 
-struct nv50_mpeg_priv {
-	struct nvkm_mpeg base;
-};
-
-struct nv50_mpeg_chan {
-	struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static int
-nv50_mpeg_object_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
-{
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
-}
-
-struct nvkm_ofuncs
-nv50_mpeg_ofuncs = {
-	.ctor = nv50_mpeg_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_mpeg_sclass[] = {
-	{ 0x3174, &nv50_mpeg_ofuncs },
-	{}
-};
+#include <nvif/class.h>
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
 
-int
-nv50_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+static int
+nv50_mpeg_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		      int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_mpeg_chan *chan;
-	int ret;
-
-	ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
-				       0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv_wo32(chan, 0x0070, 0x00801ec1);
-	nv_wo32(chan, 0x007c, 0x0000037c);
-	bar->flush(bar);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 128 * 4,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x70, 0x00801ec1);
+		nvkm_wo32(*pgpuobj, 0x7c, 0x0000037c);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_oclass
+const struct nvkm_object_func
 nv50_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = _nvkm_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
+	.bind = nv50_mpeg_cclass_bind,
 };
 
 /*******************************************************************************
@@ -120,106 +57,79 @@
  ******************************************************************************/
 
 void
-nv50_mpeg_intr(struct nvkm_subdev *subdev)
+nv50_mpeg_intr(struct nvkm_engine *mpeg)
 {
-	struct nv50_mpeg_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
 	u32 show = stat;
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_wr32(priv, 0x00b308, 0x00000100);
+			nvkm_wr32(device, 0x00b308, 0x00000100);
 			show &= ~0x01000000;
 		}
 	}
 
 	if (show) {
-		nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
-			stat, type, mthd, data);
+		nvkm_info(subdev, "%08x %08x %08x %08x\n",
+			  stat, type, mthd, data);
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
-}
-
-static void
-nv50_vpe_intr(struct nvkm_subdev *subdev)
-{
-	struct nv50_mpeg_priv *priv = (void *)subdev;
-
-	if (nv_rd32(priv, 0x00b100))
-		nv50_mpeg_intr(subdev);
-
-	if (nv_rd32(priv, 0x00b800)) {
-		u32 stat = nv_rd32(priv, 0x00b800);
-		nv_info(priv, "PMSRCH: 0x%08x\n", stat);
-		nv_wr32(priv, 0xb800, stat);
-	}
-}
-
-static int
-nv50_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00400002;
-	nv_subdev(priv)->intr = nv50_vpe_intr;
-	nv_engine(priv)->cclass = &nv50_mpeg_cclass;
-	nv_engine(priv)->sclass = nv50_mpeg_sclass;
-	return 0;
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 }
 
 int
-nv50_mpeg_init(struct nvkm_object *object)
+nv50_mpeg_init(struct nvkm_engine *mpeg)
 {
-	struct nv50_mpeg_priv *priv = (void *)object;
-	int ret;
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
 
-	ret = nvkm_mpeg_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, 0x00b32c, 0x00000000);
+	nvkm_wr32(device, 0x00b314, 0x00000100);
+	nvkm_wr32(device, 0x00b0e0, 0x0000001a);
 
-	nv_wr32(priv, 0x00b32c, 0x00000000);
-	nv_wr32(priv, 0x00b314, 0x00000100);
-	nv_wr32(priv, 0x00b0e0, 0x0000001a);
+	nvkm_wr32(device, 0x00b220, 0x00000044);
+	nvkm_wr32(device, 0x00b300, 0x00801ec1);
+	nvkm_wr32(device, 0x00b390, 0x00000000);
+	nvkm_wr32(device, 0x00b394, 0x00000000);
+	nvkm_wr32(device, 0x00b398, 0x00000000);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
 
-	nv_wr32(priv, 0x00b220, 0x00000044);
-	nv_wr32(priv, 0x00b300, 0x00801ec1);
-	nv_wr32(priv, 0x00b390, 0x00000000);
-	nv_wr32(priv, 0x00b394, 0x00000000);
-	nv_wr32(priv, 0x00b398, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x00b100, 0xffffffff);
+	nvkm_wr32(device, 0x00b140, 0xffffffff);
 
-	nv_wr32(priv, 0x00b100, 0xffffffff);
-	nv_wr32(priv, 0x00b140, 0xffffffff);
-
-	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
-		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "timeout %08x\n",
+			   nvkm_rd32(device, 0x00b200));
 		return -EBUSY;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv50_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv50_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
+static const struct nvkm_engine_func
+nv50_mpeg = {
+	.init = nv50_mpeg_init,
+	.intr = nv50_mpeg_intr,
+	.cclass = &nv50_mpeg_cclass,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
+
+int
+nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002,
+				true, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
new file mode 100644
index 0000000..d575310
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_MPEG_PRIV_H__
+#define __NVKM_MPEG_PRIV_H__
+#include <engine/mpeg.h>
+struct nvkm_fifo_chan;
+
+int nv31_mpeg_init(struct nvkm_engine *);
+void nv31_mpeg_tile(struct nvkm_engine *, int, struct nvkm_fb_tile *);
+extern const struct nvkm_object_func nv31_mpeg_object;
+
+bool nv40_mpeg_mthd_dma(struct nvkm_device *, u32, u32);
+
+int nv50_mpeg_init(struct nvkm_engine *);
+void nv50_mpeg_intr(struct nvkm_engine *);
+
+extern const struct nvkm_object_func nv50_mpeg_cclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
index c59c83a..1a71511 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
@@ -1,3 +1,5 @@
+nvkm-y += nvkm/engine/mspdec/base.o
 nvkm-y += nvkm/engine/mspdec/g98.o
+nvkm-y += nvkm/engine/mspdec/gt215.o
 nvkm-y += nvkm/engine/mspdec/gf100.o
 nvkm-y += nvkm/engine/mspdec/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
new file mode 100644
index 0000000..80211f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
+		 struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index 2174577..1f1a99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -21,89 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_mspdec_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_sclass[] = {
-	{ 0x88b2, &nvkm_object_ofuncs },
-	{ 0x85b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-g98_mspdec_init(struct nvkm_object *object)
+void
+g98_mspdec_init(struct nvkm_falcon *mspdec)
 {
-	struct g98_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000ffd2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = mspdec->engine.subdev.device;
+	nvkm_wr32(device, 0x085010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08501c, 0x0000fff2);
 }
 
-static int
-g98_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct g98_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x01020000;
-	nv_engine(priv)->cclass = &g98_mspdec_cclass;
-	nv_engine(priv)->sclass = g98_mspdec_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-g98_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_mspdec = {
+	.pmc_enable = 0x01020000,
+	.init = g98_mspdec_init,
+	.sclass = {
+		{ -1, -1, G98_MSPDEC },
+		{}
+	}
 };
+
+int
+g98_mspdec_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index c814a5f..371fd6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_mspdec_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_sclass[] = {
-	{ 0x90b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_mspdec_init(struct nvkm_object *object)
+void
+gf100_mspdec_init(struct nvkm_falcon *mspdec)
 {
-	struct gf100_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000fff2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = mspdec->engine.subdev.device;
+	nvkm_wr32(device, 0x085010, 0x0000fff2);
+	nvkm_wr32(device, 0x08501c, 0x0000fff2);
 }
 
-static int
-gf100_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct gf100_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00020000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_mspdec_cclass;
-	nv_engine(priv)->sclass = gf100_mspdec_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_mspdec = {
+	.pmc_enable = 0x00020000,
+	.init = gf100_mspdec_init,
+	.sclass = {
+		{ -1, -1, GF100_MSPDEC },
+		{}
+	}
 };
+
+int
+gf100_mspdec_new(struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 9799206..de804a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -21,89 +21,23 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gk104_mspdec_priv {
-	struct nvkm_falcon base;
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_mspdec = {
+	.pmc_enable = 0x00020000,
+	.init = gf100_mspdec_init,
+	.sclass = {
+		{ -1, -1, GK104_MSPDEC },
+		{}
+	}
 };
 
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_sclass[] = {
-	{ 0x95b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_mspdec_init(struct nvkm_object *object)
+int
+gk104_mspdec_new(struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
 {
-	struct gk104_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000fff2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
 }
-
-static int
-gk104_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct gk104_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00020000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gk104_mspdec_cclass;
-	nv_engine(priv)->sclass = gk104_mspdec_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gk104_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
new file mode 100644
index 0000000..8356317
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_mspdec = {
+	.pmc_enable = 0x01020000,
+	.init = g98_mspdec_init,
+	.sclass = {
+		{ -1, -1, GT212_MSPDEC },
+		{}
+	}
+};
+
+int
+gt215_mspdec_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
new file mode 100644
index 0000000..d518af4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSPDEC_PRIV_H__
+#define __NVKM_MSPDEC_PRIV_H__
+#include <engine/mspdec.h>
+
+int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		     int index, struct nvkm_engine **);
+
+void g98_mspdec_init(struct nvkm_falcon *);
+
+void gf100_mspdec_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
index 4576a9e..3ea7eaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
@@ -1,2 +1,4 @@
+nvkm-y += nvkm/engine/msppp/base.o
 nvkm-y += nvkm/engine/msppp/g98.o
+nvkm-y += nvkm/engine/msppp/gt215.o
 nvkm-y += nvkm/engine/msppp/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
new file mode 100644
index 0000000..bfae5e60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		int index, struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 7a602a2..73f633a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -21,89 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_msppp_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_sclass[] = {
-	{ 0x88b3, &nvkm_object_ofuncs },
-	{ 0x85b3, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_cclass = {
-	.handle = NV_ENGCTX(MSPPP, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
-
-static int
-g98_msppp_init(struct nvkm_object *object)
+void
+g98_msppp_init(struct nvkm_falcon *msppp)
 {
-	struct g98_msppp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x086010, 0x0000ffd2);
-	nv_wr32(priv, 0x08601c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msppp->engine.subdev.device;
+	nvkm_wr32(device, 0x086010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08601c, 0x0000fff2);
 }
 
-static int
-g98_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g98_msppp_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
-				 "PMSPPP", "msppp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00400002;
-	nv_engine(priv)->cclass = &g98_msppp_cclass;
-	nv_engine(priv)->sclass = g98_msppp_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-g98_msppp_oclass = {
-	.handle = NV_ENGINE(MSPPP, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_msppp_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_msppp_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_msppp = {
+	.pmc_enable = 0x00400002,
+	.init = g98_msppp_init,
+	.sclass = {
+		{ -1, -1, G98_MSPPP },
+		{}
+	}
 };
+
+int
+g98_msppp_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 6047bae..c42c0c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_msppp_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_sclass[] = {
-	{ 0x90b3, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_cclass = {
-	.handle = NV_ENGCTX(MSPPP, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_msppp_init(struct nvkm_object *object)
+static void
+gf100_msppp_init(struct nvkm_falcon *msppp)
 {
-	struct gf100_msppp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x086010, 0x0000fff2);
-	nv_wr32(priv, 0x08601c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msppp->engine.subdev.device;
+	nvkm_wr32(device, 0x086010, 0x0000fff2);
+	nvkm_wr32(device, 0x08601c, 0x0000fff2);
 }
 
-static int
-gf100_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf100_msppp_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
-				 "PMSPPP", "msppp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_msppp_cclass;
-	nv_engine(priv)->sclass = gf100_msppp_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_msppp_oclass = {
-	.handle = NV_ENGINE(MSPPP, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_msppp_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_msppp_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_msppp = {
+	.pmc_enable = 0x00000002,
+	.init = gf100_msppp_init,
+	.sclass = {
+		{ -1, -1, GF100_MSPPP },
+		{}
+	}
 };
+
+int
+gf100_msppp_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
new file mode 100644
index 0000000..00e7795
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msppp = {
+	.pmc_enable = 0x00400002,
+	.init = g98_msppp_init,
+	.sclass = {
+		{ -1, -1, GT212_MSPPP },
+		{}
+	}
+};
+
+int
+gt215_msppp_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
new file mode 100644
index 0000000..37a91f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -0,0 +1,9 @@
+#ifndef __NVKM_MSPPP_PRIV_H__
+#define __NVKM_MSPPP_PRIV_H__
+#include <engine/msppp.h>
+
+int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		    int index, struct nvkm_engine **);
+
+void g98_msppp_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
index 0c98110..28c8ecd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
@@ -1,3 +1,6 @@
+nvkm-y += nvkm/engine/msvld/base.o
 nvkm-y += nvkm/engine/msvld/g98.o
+nvkm-y += nvkm/engine/msvld/gt215.o
+nvkm-y += nvkm/engine/msvld/mcp89.o
 nvkm-y += nvkm/engine/msvld/gf100.o
 nvkm-y += nvkm/engine/msvld/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
new file mode 100644
index 0000000..745bbb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		int index, struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index c8a6b4e..47e2929 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -21,90 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_msvld_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_sclass[] = {
-	{ 0x88b1, &nvkm_object_ofuncs },
-	{ 0x85b1, &nvkm_object_ofuncs },
-	{ 0x86b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-g98_msvld_init(struct nvkm_object *object)
+void
+g98_msvld_init(struct nvkm_falcon *msvld)
 {
-	struct g98_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000ffd2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msvld->engine.subdev.device;
+	nvkm_wr32(device, 0x084010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08401c, 0x0000fff2);
 }
 
-static int
-g98_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g98_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x04008000;
-	nv_engine(priv)->cclass = &g98_msvld_cclass;
-	nv_engine(priv)->sclass = g98_msvld_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-g98_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, G98_MSVLD },
+		{}
+	}
 };
+
+int
+g98_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index b8d1e0f..1ac581b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_msvld_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_sclass[] = {
-	{ 0x90b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_msvld_init(struct nvkm_object *object)
+void
+gf100_msvld_init(struct nvkm_falcon *msvld)
 {
-	struct gf100_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000fff2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msvld->engine.subdev.device;
+	nvkm_wr32(device, 0x084010, 0x0000fff2);
+	nvkm_wr32(device, 0x08401c, 0x0000fff2);
 }
 
-static int
-gf100_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf100_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00008000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_msvld_cclass;
-	nv_engine(priv)->sclass = gf100_msvld_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_msvld = {
+	.pmc_enable = 0x00008000,
+	.init = gf100_msvld_init,
+	.sclass = {
+		{ -1, -1, GF100_MSVLD },
+		{}
+	}
 };
+
+int
+gf100_msvld_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index a0b0927..4bba16e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -21,89 +21,23 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gk104_msvld_priv {
-	struct nvkm_falcon base;
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_msvld = {
+	.pmc_enable = 0x00008000,
+	.init = gf100_msvld_init,
+	.sclass = {
+		{ -1, -1, GK104_MSVLD },
+		{}
+	}
 };
 
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_sclass[] = {
-	{ 0x95b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_msvld_init(struct nvkm_object *object)
+int
+gk104_msvld_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
 {
-	struct gk104_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000fff2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
 }
-
-static int
-gk104_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gk104_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00008000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gk104_msvld_cclass;
-	nv_engine(priv)->sclass = gk104_msvld_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gk104_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
new file mode 100644
index 0000000..e17cb56
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, GT212_MSVLD },
+		{}
+	}
+};
+
+int
+gt215_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
new file mode 100644
index 0000000..511800f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+mcp89_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, IGT21A_MSVLD },
+		{}
+	}
+};
+
+int
+mcp89_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
new file mode 100644
index 0000000..9dc1da6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSVLD_PRIV_H__
+#define __NVKM_MSVLD_PRIV_H__
+#include <engine/msvld.h>
+
+int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		    int index, struct nvkm_engine **);
+
+void g98_msvld_init(struct nvkm_falcon *);
+
+void gf100_msvld_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
index 413b609..1614d38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
@@ -1,9 +1,10 @@
 nvkm-y += nvkm/engine/pm/base.o
-nvkm-y += nvkm/engine/pm/daemon.o
 nvkm-y += nvkm/engine/pm/nv40.o
 nvkm-y += nvkm/engine/pm/nv50.o
 nvkm-y += nvkm/engine/pm/g84.o
+nvkm-y += nvkm/engine/pm/gt200.o
 nvkm-y += nvkm/engine/pm/gt215.o
 nvkm-y += nvkm/engine/pm/gf100.o
+nvkm-y += nvkm/engine/pm/gf108.o
+nvkm-y += nvkm/engine/pm/gf117.o
 nvkm-y += nvkm/engine/pm/gk104.o
-nvkm-y += nvkm/engine/pm/gk110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 4cf36a3..0db9be2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -24,370 +24,751 @@
 #include "priv.h"
 
 #include <core/client.h>
-#include <core/device.h>
 #include <core/option.h>
 
 #include <nvif/class.h>
 #include <nvif/ioctl.h>
 #include <nvif/unpack.h>
 
-#define QUAD_MASK 0x0f
-#define QUAD_FREE 0x01
-
-static struct nvkm_perfsig *
-nvkm_perfsig_find_(struct nvkm_perfdom *dom, const char *name, u32 size)
+static u8
+nvkm_pm_count_perfdom(struct nvkm_pm *pm)
 {
-	char path[64];
+	struct nvkm_perfdom *dom;
+	u8 domain_nr = 0;
+
+	list_for_each_entry(dom, &pm->domains, head)
+		domain_nr++;
+	return domain_nr;
+}
+
+static u16
+nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
+{
+	u16 signal_nr = 0;
 	int i;
 
-	if (name[0] != '/') {
+	if (dom) {
 		for (i = 0; i < dom->signal_nr; i++) {
-			if ( dom->signal[i].name &&
-			    !strncmp(name, dom->signal[i].name, size))
-				return &dom->signal[i];
+			if (dom->signal[i].name)
+				signal_nr++;
 		}
-	} else {
-		for (i = 0; i < dom->signal_nr; i++) {
-			snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
-			if (!strncmp(name, path, size))
-				return &dom->signal[i];
+	}
+	return signal_nr;
+}
+
+static struct nvkm_perfdom *
+nvkm_perfdom_find(struct nvkm_pm *pm, int di)
+{
+	struct nvkm_perfdom *dom;
+	int tmp = 0;
+
+	list_for_each_entry(dom, &pm->domains, head) {
+		if (tmp++ == di)
+			return dom;
+	}
+	return NULL;
+}
+
+struct nvkm_perfsig *
+nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
+{
+	struct nvkm_perfdom *dom = *pdom;
+
+	if (dom == NULL) {
+		dom = nvkm_perfdom_find(pm, di);
+		if (dom == NULL)
+			return NULL;
+		*pdom = dom;
+	}
+
+	if (!dom->signal[si].name)
+		return NULL;
+	return &dom->signal[si];
+}
+
+static u8
+nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
+{
+	u8 source_nr = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
+		if (sig->source[i])
+			source_nr++;
+	}
+	return source_nr;
+}
+
+static struct nvkm_perfsrc *
+nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
+{
+	struct nvkm_perfsrc *src;
+	bool found = false;
+	int tmp = 1; /* Sources ID start from 1 */
+	u8 i;
+
+	for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
+		if (sig->source[i] == si) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		list_for_each_entry(src, &pm->sources, head) {
+			if (tmp++ == si)
+				return src;
 		}
 	}
 
 	return NULL;
 }
 
-struct nvkm_perfsig *
-nvkm_perfsig_find(struct nvkm_pm *ppm, const char *name, u32 size,
-		  struct nvkm_perfdom **pdom)
+static int
+nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
 {
-	struct nvkm_perfdom *dom = *pdom;
+	struct nvkm_subdev *subdev = &pm->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_perfdom *dom = NULL;
 	struct nvkm_perfsig *sig;
+	struct nvkm_perfsrc *src;
+	u32 mask, value;
+	int i, j;
 
-	if (dom == NULL) {
-		list_for_each_entry(dom, &ppm->domains, head) {
-			sig = nvkm_perfsig_find_(dom, name, size);
-			if (sig) {
-				*pdom = dom;
-				return sig;
-			}
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+			sig = nvkm_perfsig_find(pm, ctr->domain,
+						ctr->signal[i], &dom);
+			if (!sig)
+				return -EINVAL;
+
+			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+			if (!src)
+				return -EINVAL;
+
+			/* set enable bit if needed */
+			mask = value = 0x00000000;
+			if (src->enable)
+				mask = value = 0x80000000;
+			mask  |= (src->mask << src->shift);
+			value |= ((ctr->source[i][j] >> 32) << src->shift);
+
+			/* enable the source */
+			nvkm_mask(device, src->addr, mask, value);
+			nvkm_debug(subdev,
+				   "enabled source %08x %08x %08x\n",
+				   src->addr, mask, value);
 		}
-
-		return NULL;
 	}
-
-	return nvkm_perfsig_find_(dom, name, size);
+	return 0;
 }
 
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *ppm, const char *name,
-		  struct nvkm_perfdom **pdom)
+static int
+nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
 {
+	struct nvkm_subdev *subdev = &pm->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_perfdom *dom = NULL;
 	struct nvkm_perfsig *sig;
-	struct nvkm_perfctr *ctr;
+	struct nvkm_perfsrc *src;
+	u32 mask;
+	int i, j;
 
-	sig = nvkm_perfsig_find(ppm, name, strlen(name), pdom);
-	if (!sig)
-		return NULL;
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+			sig = nvkm_perfsig_find(pm, ctr->domain,
+						ctr->signal[i], &dom);
+			if (!sig)
+				return -EINVAL;
 
-	ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
-	if (ctr) {
-		ctr->signal[0] = sig;
-		ctr->logic_op = 0xaaaa;
+			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+			if (!src)
+				return -EINVAL;
+
+			/* unset enable bit if needed */
+			mask = 0x00000000;
+			if (src->enable)
+				mask = 0x80000000;
+			mask |= (src->mask << src->shift);
+
+			/* disable the source */
+			nvkm_mask(device, src->addr, mask, 0);
+			nvkm_debug(subdev, "disabled source %08x %08x\n",
+				   src->addr, mask);
+		}
 	}
-
-	return ctr;
+	return 0;
 }
 
 /*******************************************************************************
- * Perfmon object classes
+ * Perfdom object classes
  ******************************************************************************/
 static int
-nvkm_perfctr_query(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
 {
 	union {
-		struct nvif_perfctr_query_v0 v0;
+		struct nvif_perfdom_init none;
 	} *args = data;
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfdom *dom = NULL, *chk;
-	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
-	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
-	const char *name;
-	int tmp = 0, di, si;
-	int ret;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret, i;
 
-	nv_ioctl(object, "perfctr query size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "perfctr query vers %d iter %08x\n",
-			 args->v0.version, args->v0.iter);
-		di = (args->v0.iter & 0xff000000) >> 24;
-		si = (args->v0.iter & 0x00ffffff) - 1;
-	} else
-		return ret;
-
-	list_for_each_entry(chk, &ppm->domains, head) {
-		if (tmp++ == di) {
-			dom = chk;
-			break;
-		}
-	}
-
-	if (dom == NULL || si >= (int)dom->signal_nr)
-		return -EINVAL;
-
-	if (si >= 0) {
-		if (raw || !(name = dom->signal[si].name)) {
-			snprintf(args->v0.name, sizeof(args->v0.name),
-				 "/%s/%02x", dom->name, si);
-		} else {
-			strncpy(args->v0.name, name, sizeof(args->v0.name));
-		}
-	}
-
-	do {
-		while (++si < dom->signal_nr) {
-			if (all || dom->signal[si].name) {
-				args->v0.iter = (di << 24) | ++si;
-				return 0;
-			}
-		}
-		si = -1;
-		di = di + 1;
-		dom = list_entry(dom->head.next, typeof(*dom), head);
-	} while (&dom->head != &ppm->domains);
-
-	args->v0.iter = 0xffffffff;
-	return 0;
-}
-
-static int
-nvkm_perfctr_sample(struct nvkm_object *object, void *data, u32 size)
-{
-	union {
-		struct nvif_perfctr_sample none;
-	} *args = data;
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfctr *ctr, *tmp;
-	struct nvkm_perfdom *dom;
-	int ret;
-
-	nv_ioctl(object, "perfctr sample size %d\n", size);
+	nvif_ioctl(object, "perfdom init size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "perfctr sample\n");
+		nvif_ioctl(object, "perfdom init\n");
 	} else
 		return ret;
-	ppm->sequence++;
 
-	list_for_each_entry(dom, &ppm->domains, head) {
-		/* sample previous batch of counters */
-		if (dom->quad != QUAD_MASK) {
-			dom->func->next(ppm, dom);
-			tmp = NULL;
-			while (!list_empty(&dom->list)) {
-				ctr = list_first_entry(&dom->list,
-						       typeof(*ctr), head);
-				if (ctr->slot < 0) break;
-				if ( tmp && tmp == ctr) break;
-				if (!tmp) tmp = ctr;
-				dom->func->read(ppm, dom, ctr);
-				ctr->slot  = -1;
-				list_move_tail(&ctr->head, &dom->list);
-			}
+	for (i = 0; i < 4; i++) {
+		if (dom->ctr[i]) {
+			dom->func->init(pm, dom, dom->ctr[i]);
+
+			/* enable sources */
+			nvkm_perfsrc_enable(pm, dom->ctr[i]);
 		}
-
-		dom->quad = QUAD_MASK;
-
-		/* setup next batch of counters for sampling */
-		list_for_each_entry(ctr, &dom->list, head) {
-			ctr->slot = ffs(dom->quad) - 1;
-			if (ctr->slot < 0)
-				break;
-			dom->quad &= ~(QUAD_FREE << ctr->slot);
-			dom->func->init(ppm, dom, ctr);
-		}
-
-		if (dom->quad != QUAD_MASK)
-			dom->func->next(ppm, dom);
 	}
 
+	/* start next batch of counters for sampling */
+	dom->func->next(pm, dom);
+	return 0;
+}
+
+static int
+nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
+{
+	union {
+		struct nvif_perfdom_sample none;
+	} *args = data;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret;
+
+	nvif_ioctl(object, "perfdom sample size %d\n", size);
+	if (nvif_unvers(args->none)) {
+		nvif_ioctl(object, "perfdom sample\n");
+	} else
+		return ret;
+	pm->sequence++;
+
+	/* sample previous batch of counters */
+	list_for_each_entry(dom, &pm->domains, head)
+		dom->func->next(pm, dom);
+
 	return 0;
 }
 
 static int
-nvkm_perfctr_read(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
 {
 	union {
-		struct nvif_perfctr_read_v0 v0;
+		struct nvif_perfdom_read_v0 v0;
 	} *args = data;
-	struct nvkm_perfctr *ctr = (void *)object;
-	int ret;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret, i;
 
-	nv_ioctl(object, "perfctr read size %d\n", size);
+	nvif_ioctl(object, "perfdom read size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "perfctr read vers %d\n", args->v0.version);
+		nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
 	} else
 		return ret;
 
-	if (!ctr->clk)
+	for (i = 0; i < 4; i++) {
+		if (dom->ctr[i])
+			dom->func->read(pm, dom, dom->ctr[i]);
+	}
+
+	if (!dom->clk)
 		return -EAGAIN;
 
-	args->v0.clk = ctr->clk;
-	args->v0.ctr = ctr->ctr;
+	for (i = 0; i < 4; i++)
+		if (dom->ctr[i])
+			args->v0.ctr[i] = dom->ctr[i]->ctr;
+	args->v0.clk = dom->clk;
 	return 0;
 }
 
 static int
-nvkm_perfctr_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
+	struct nvkm_perfdom *dom = nvkm_perfdom(object);
 	switch (mthd) {
-	case NVIF_PERFCTR_V0_QUERY:
-		return nvkm_perfctr_query(object, data, size);
-	case NVIF_PERFCTR_V0_SAMPLE:
-		return nvkm_perfctr_sample(object, data, size);
-	case NVIF_PERFCTR_V0_READ:
-		return nvkm_perfctr_read(object, data, size);
+	case NVIF_PERFDOM_V0_INIT:
+		return nvkm_perfdom_init(dom, data, size);
+	case NVIF_PERFDOM_V0_SAMPLE:
+		return nvkm_perfdom_sample(dom, data, size);
+	case NVIF_PERFDOM_V0_READ:
+		return nvkm_perfdom_read(dom, data, size);
 	default:
 		break;
 	}
 	return -EINVAL;
 }
 
-static void
-nvkm_perfctr_dtor(struct nvkm_object *object)
+static void *
+nvkm_perfdom_dtor(struct nvkm_object *object)
 {
-	struct nvkm_perfctr *ctr = (void *)object;
-	if (ctr->head.next)
-		list_del(&ctr->head);
-	nvkm_object_destroy(&ctr->base);
+	struct nvkm_perfdom *dom = nvkm_perfdom(object);
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		struct nvkm_perfctr *ctr = dom->ctr[i];
+		if (ctr) {
+			nvkm_perfsrc_disable(pm, ctr);
+			if (ctr->head.next)
+				list_del(&ctr->head);
+		}
+		kfree(ctr);
+	}
+
+	return dom;
 }
 
 static int
-nvkm_perfctr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
+		 struct nvkm_perfsig *signal[4], u64 source[4][8],
+		 u16 logic_op, struct nvkm_perfctr **pctr)
 {
-	union {
-		struct nvif_perfctr_v0 v0;
-	} *args = data;
-	struct nvkm_pm *ppm = (void *)engine;
-	struct nvkm_perfdom *dom = NULL;
-	struct nvkm_perfsig *sig[4] = {};
 	struct nvkm_perfctr *ctr;
-	int ret, i;
+	int i, j;
 
-	nv_ioctl(parent, "create perfctr size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n",
-			 args->v0.version, args->v0.logic_op);
-	} else
-		return ret;
+	if (!dom)
+		return -EINVAL;
 
-	for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
-		sig[i] = nvkm_perfsig_find(ppm, args->v0.name[i],
-					   strnlen(args->v0.name[i],
-						   sizeof(args->v0.name[i])),
-					   &dom);
-		if (!sig[i])
-			return -EINVAL;
+	ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
+	if (!ctr)
+		return -ENOMEM;
+
+	ctr->domain   = domain;
+	ctr->logic_op = logic_op;
+	ctr->slot     = slot;
+	for (i = 0; i < 4; i++) {
+		if (signal[i]) {
+			ctr->signal[i] = signal[i] - dom->signal;
+			for (j = 0; j < 8; j++)
+				ctr->source[i][j] = source[i][j];
+		}
 	}
+	list_add_tail(&ctr->head, &dom->list);
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &ctr);
-	*pobject = nv_object(ctr);
-	if (ret)
-		return ret;
-
-	ctr->slot = -1;
-	ctr->logic_op = args->v0.logic_op;
-	ctr->signal[0] = sig[0];
-	ctr->signal[1] = sig[1];
-	ctr->signal[2] = sig[2];
-	ctr->signal[3] = sig[3];
-	if (dom)
-		list_add_tail(&ctr->head, &dom->list);
 	return 0;
 }
 
-static struct nvkm_ofuncs
-nvkm_perfctr_ofuncs = {
-	.ctor = nvkm_perfctr_ctor,
-	.dtor = nvkm_perfctr_dtor,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
-	.mthd = nvkm_perfctr_mthd,
+static const struct nvkm_object_func
+nvkm_perfdom = {
+	.dtor = nvkm_perfdom_dtor,
+	.mthd = nvkm_perfdom_mthd,
 };
 
-struct nvkm_oclass
-nvkm_pm_sclass[] = {
-	{ .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
-	  .ofuncs = &nvkm_perfctr_ofuncs,
-	},
-	{},
-};
+static int
+nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
+		  const struct nvkm_oclass *oclass, void *data, u32 size,
+		  struct nvkm_object **pobject)
+{
+	union {
+		struct nvif_perfdom_v0 v0;
+	} *args = data;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_object *parent = oclass->parent;
+	struct nvkm_perfdom *sdom = NULL;
+	struct nvkm_perfctr *ctr[4] = {};
+	struct nvkm_perfdom *dom;
+	int c, s, m;
+	int ret;
+
+	nvif_ioctl(parent, "create perfdom size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
+			   args->v0.version, args->v0.domain, args->v0.mode);
+	} else
+		return ret;
+
+	for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
+		struct nvkm_perfsig *sig[4] = {};
+		u64 src[4][8] = {};
+
+		for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
+			sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
+						   args->v0.ctr[c].signal[s],
+						   &sdom);
+			if (args->v0.ctr[c].signal[s] && !sig[s])
+				return -EINVAL;
+
+			for (m = 0; m < 8; m++) {
+				src[s][m] = args->v0.ctr[c].source[s][m];
+				if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
+							            src[s][m]))
+					return -EINVAL;
+			}
+		}
+
+		ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
+				       args->v0.ctr[c].logic_op, &ctr[c]);
+		if (ret)
+			return ret;
+	}
+
+	if (!sdom)
+		return -EINVAL;
+
+	if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
+	dom->perfmon = perfmon;
+	*pobject = &dom->object;
+
+	dom->func = sdom->func;
+	dom->addr = sdom->addr;
+	dom->mode = args->v0.mode;
+	for (c = 0; c < ARRAY_SIZE(ctr); c++)
+		dom->ctr[c] = ctr[c];
+	return 0;
+}
 
 /*******************************************************************************
- * PPM context
+ * Perfmon object classes
  ******************************************************************************/
-static void
-nvkm_perfctx_dtor(struct nvkm_object *object)
+static int
+nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
 {
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfctx *ctx = (void *)object;
+	union {
+		struct nvif_perfmon_query_domain_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_perfdom *dom;
+	u8 domain_nr;
+	int di, ret;
 
-	mutex_lock(&nv_subdev(ppm)->mutex);
-	nvkm_engctx_destroy(&ctx->base);
-	if (ppm->context == ctx)
-		ppm->context = NULL;
-	mutex_unlock(&nv_subdev(ppm)->mutex);
+	nvif_ioctl(object, "perfmon query domain size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
+			   args->v0.version, args->v0.iter);
+		di = (args->v0.iter & 0xff) - 1;
+	} else
+		return ret;
+
+	domain_nr = nvkm_pm_count_perfdom(pm);
+	if (di >= (int)domain_nr)
+		return -EINVAL;
+
+	if (di >= 0) {
+		dom = nvkm_perfdom_find(pm, di);
+		if (dom == NULL)
+			return -EINVAL;
+
+		args->v0.id         = di;
+		args->v0.signal_nr  = nvkm_perfdom_count_perfsig(dom);
+		strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
+
+		/* Currently only global counters (PCOUNTER) are implemented
+		 * but this will be different for local counters (MP). */
+		args->v0.counter_nr = 4;
+	}
+
+	if (++di < domain_nr) {
+		args->v0.iter = ++di;
+		return 0;
+	}
+
+	args->v0.iter = 0xff;
+	return 0;
 }
 
 static int
-nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
 {
-	struct nvkm_pm *ppm = (void *)engine;
-	struct nvkm_perfctx *ctx;
-	int ret;
+	union {
+		struct nvif_perfmon_query_signal_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_device *device = pm->engine.subdev.device;
+	struct nvkm_perfdom *dom;
+	struct nvkm_perfsig *sig;
+	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
+	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
+	int ret, si;
 
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0, 0, 0, &ctx);
-	*pobject = nv_object(ctx);
-	if (ret)
+	nvif_ioctl(object, "perfmon query signal size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object,
+			   "perfmon query signal vers %d dom %d iter %04x\n",
+			   args->v0.version, args->v0.domain, args->v0.iter);
+		si = (args->v0.iter & 0xffff) - 1;
+	} else
 		return ret;
 
-	mutex_lock(&nv_subdev(ppm)->mutex);
-	if (ppm->context == NULL)
-		ppm->context = ctx;
-	if (ctx != ppm->context)
-		ret = -EBUSY;
-	mutex_unlock(&nv_subdev(ppm)->mutex);
+	dom = nvkm_perfdom_find(pm, args->v0.domain);
+	if (dom == NULL || si >= (int)dom->signal_nr)
+		return -EINVAL;
 
-	return ret;
+	if (si >= 0) {
+		sig = &dom->signal[si];
+		if (raw || !sig->name) {
+			snprintf(args->v0.name, sizeof(args->v0.name),
+				 "/%s/%02x", dom->name, si);
+		} else {
+			strncpy(args->v0.name, sig->name,
+				sizeof(args->v0.name));
+		}
+
+		args->v0.signal = si;
+		args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
+	}
+
+	while (++si < dom->signal_nr) {
+		if (all || dom->signal[si].name) {
+			args->v0.iter = ++si;
+			return 0;
+		}
+	}
+
+	args->v0.iter = 0xffff;
+	return 0;
 }
 
-struct nvkm_oclass
-nvkm_pm_cclass = {
-	.handle = NV_ENGCTX(PM, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_perfctx_ctor,
-		.dtor = nvkm_perfctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-	},
+static int
+nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
+{
+	union {
+		struct nvif_perfmon_query_source_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_perfdom *dom = NULL;
+	struct nvkm_perfsig *sig;
+	struct nvkm_perfsrc *src;
+	u8 source_nr = 0;
+	int si, ret;
+
+	nvif_ioctl(object, "perfmon query source size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object,
+			   "perfmon source vers %d dom %d sig %02x iter %02x\n",
+			   args->v0.version, args->v0.domain, args->v0.signal,
+			   args->v0.iter);
+		si = (args->v0.iter & 0xff) - 1;
+	} else
+		return ret;
+
+	sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
+	if (!sig)
+		return -EINVAL;
+
+	source_nr = nvkm_perfsig_count_perfsrc(sig);
+	if (si >= (int)source_nr)
+		return -EINVAL;
+
+	if (si >= 0) {
+		src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
+		if (!src)
+			return -EINVAL;
+
+		args->v0.source = sig->source[si];
+		args->v0.mask   = src->mask;
+		strncpy(args->v0.name, src->name, sizeof(args->v0.name));
+	}
+
+	if (++si < source_nr) {
+		args->v0.iter = ++si;
+		return 0;
+	}
+
+	args->v0.iter = 0xff;
+	return 0;
+}
+
+static int
+nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+	switch (mthd) {
+	case NVIF_PERFMON_V0_QUERY_DOMAIN:
+		return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
+	case NVIF_PERFMON_V0_QUERY_SIGNAL:
+		return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
+	case NVIF_PERFMON_V0_QUERY_SOURCE:
+		return nvkm_perfmon_mthd_query_source(perfmon, data, size);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **pobject)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
+	return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
+}
+
+static int
+nvkm_perfmon_child_get(struct nvkm_object *object, int index,
+		       struct nvkm_oclass *oclass)
+{
+	if (index == 0) {
+		oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM;
+		oclass->base.minver = 0;
+		oclass->base.maxver = 0;
+		oclass->ctor = nvkm_perfmon_child_new;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void *
+nvkm_perfmon_dtor(struct nvkm_object *object)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+	struct nvkm_pm *pm = perfmon->pm;
+	mutex_lock(&pm->engine.subdev.mutex);
+	if (pm->perfmon == &perfmon->object)
+		pm->perfmon = NULL;
+	mutex_unlock(&pm->engine.subdev.mutex);
+	return perfmon;
+}
+
+static struct nvkm_object_func
+nvkm_perfmon = {
+	.dtor = nvkm_perfmon_dtor,
+	.mthd = nvkm_perfmon_mthd,
+	.sclass = nvkm_perfmon_child_get,
 };
 
+static int
+nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_perfmon *perfmon;
+
+	if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
+	perfmon->pm = pm;
+	*pobject = &perfmon->object;
+	return 0;
+}
+
 /*******************************************************************************
  * PPM engine/subdev functions
  ******************************************************************************/
+
+static int
+nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_pm *pm = nvkm_pm(oclass->engine);
+	int ret;
+
+	ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
+	if (ret)
+		return ret;
+
+	mutex_lock(&pm->engine.subdev.mutex);
+	if (pm->perfmon == NULL)
+		pm->perfmon = *pobject;
+	ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
+	mutex_unlock(&pm->engine.subdev.mutex);
+	return ret;
+}
+
+static const struct nvkm_device_oclass
+nvkm_pm_oclass = {
+	.base.oclass = NVIF_IOCTL_NEW_V0_PERFMON,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nvkm_pm_oclass_new,
+};
+
+static int
+nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
+		   const struct nvkm_device_oclass **class)
+{
+	if (index == 0) {
+		oclass->base = nvkm_pm_oclass.base;
+		*class = &nvkm_pm_oclass;
+		return index;
+	}
+	return 1;
+}
+
 int
-nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
+nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
+		 const struct nvkm_specsrc *spec)
+{
+	const struct nvkm_specsrc *ssrc;
+	const struct nvkm_specmux *smux;
+	struct nvkm_perfsrc *src;
+	u8 source_nr = 0;
+
+	if (!spec) {
+		/* No sources are defined for this signal. */
+		return 0;
+	}
+
+	ssrc = spec;
+	while (ssrc->name) {
+		smux = ssrc->mux;
+		while (smux->name) {
+			bool found = false;
+			u8 source_id = 0;
+			u32 len;
+
+			list_for_each_entry(src, &pm->sources, head) {
+				if (src->addr == ssrc->addr &&
+				    src->shift == smux->shift) {
+					found = true;
+					break;
+				}
+				source_id++;
+			}
+
+			if (!found) {
+				src = kzalloc(sizeof(*src), GFP_KERNEL);
+				if (!src)
+					return -ENOMEM;
+
+				src->addr   = ssrc->addr;
+				src->mask   = smux->mask;
+				src->shift  = smux->shift;
+				src->enable = smux->enable;
+
+				len = strlen(ssrc->name) +
+				      strlen(smux->name) + 2;
+				src->name = kzalloc(len, GFP_KERNEL);
+				if (!src->name) {
+					kfree(src);
+					return -ENOMEM;
+				}
+				snprintf(src->name, len, "%s_%s", ssrc->name,
+					 smux->name);
+
+				list_add_tail(&src->head, &pm->sources);
+			}
+
+			sig->source[source_nr++] = source_id + 1;
+			smux++;
+		}
+		ssrc++;
+	}
+
+	return 0;
+}
+
+int
+nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
 		 u32 base, u32 size_unit, u32 size_domain,
 		 const struct nvkm_specdom *spec)
 {
 	const struct nvkm_specdom *sdom;
 	const struct nvkm_specsig *ssig;
 	struct nvkm_perfdom *dom;
-	int i;
+	int ret, i;
 
 	for (i = 0; i == 0 || mask; i++) {
 		u32 addr = base + (i * size_unit);
@@ -410,16 +791,20 @@
 					 "%s/%02x", name, (int)(sdom - spec));
 			}
 
-			list_add_tail(&dom->head, &ppm->domains);
+			list_add_tail(&dom->head, &pm->domains);
 			INIT_LIST_HEAD(&dom->list);
 			dom->func = sdom->func;
 			dom->addr = addr;
-			dom->quad = QUAD_MASK;
 			dom->signal_nr = sdom->signal_nr;
 
 			ssig = (sdom++)->signal;
 			while (ssig->name) {
-				dom->signal[ssig->signal].name = ssig->name;
+				struct nvkm_perfsig *sig =
+					&dom->signal[ssig->signal];
+				sig->name = ssig->name;
+				ret = nvkm_perfsrc_new(pm, sig, ssig->source);
+				if (ret)
+					return ret;
 				ssig++;
 			}
 
@@ -432,47 +817,49 @@
 	return 0;
 }
 
-int
-_nvkm_pm_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_pm *ppm = (void *)object;
-	return nvkm_engine_fini(&ppm->base, suspend);
+	struct nvkm_pm *pm = nvkm_pm(engine);
+	if (pm->func->fini)
+		pm->func->fini(pm);
+	return 0;
 }
 
-int
-_nvkm_pm_init(struct nvkm_object *object)
+static void *
+nvkm_pm_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_pm *ppm = (void *)object;
-	return nvkm_engine_init(&ppm->base);
-}
+	struct nvkm_pm *pm = nvkm_pm(engine);
+	struct nvkm_perfdom *dom, *next_dom;
+	struct nvkm_perfsrc *src, *next_src;
 
-void
-_nvkm_pm_dtor(struct nvkm_object *object)
-{
-	struct nvkm_pm *ppm = (void *)object;
-	struct nvkm_perfdom *dom, *tmp;
-
-	list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
+	list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
 		list_del(&dom->head);
 		kfree(dom);
 	}
 
-	nvkm_engine_destroy(&ppm->base);
+	list_for_each_entry_safe(src, next_src, &pm->sources, head) {
+		list_del(&src->head);
+		kfree(src->name);
+		kfree(src);
+	}
+
+	return pm;
 }
 
+static const struct nvkm_engine_func
+nvkm_pm = {
+	.dtor = nvkm_pm_dtor,
+	.fini = nvkm_pm_fini,
+	.base.sclass = nvkm_pm_oclass_get,
+};
+
 int
-nvkm_pm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_pm *pm)
 {
-	struct nvkm_pm *ppm;
-	int ret;
-
-	ret = nvkm_engine_create_(parent, engine, oclass, true, "PPM",
-				  "pm", length, pobject);
-	ppm = *pobject;
-	if (ret)
-		return ret;
-
-	INIT_LIST_HEAD(&ppm->domains);
-	return 0;
+	pm->func = func;
+	INIT_LIST_HEAD(&pm->domains);
+	INIT_LIST_HEAD(&pm->sources);
+	return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
deleted file mode 100644
index a7a5f3a..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-static void
-pwr_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
-		 struct nvkm_perfctr *ctr)
-{
-	u32 mask = 0x00000000;
-	u32 ctrl = 0x00000001;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
-		mask |= 1 << (ctr->signal[i] - dom->signal);
-
-	nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
-	nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
-	nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
-}
-
-static void
-pwr_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
-		 struct nvkm_perfctr *ctr)
-{
-	ctr->ctr = ppm->pwr[ctr->slot];
-	ctr->clk = ppm->pwr[ppm->last];
-}
-
-static void
-pwr_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
-{
-	int i;
-
-	for (i = 0; i <= ppm->last; i++) {
-		ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
-		nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
-	}
-}
-
-static const struct nvkm_funcdom
-pwr_perfctr_func = {
-	.init = pwr_perfctr_init,
-	.read = pwr_perfctr_read,
-	.next = pwr_perfctr_next,
-};
-
-const struct nvkm_specdom
-gt215_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
-
-const struct nvkm_specdom
-gf100_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{ 0x14, "pwr_ce1_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
-
-const struct nvkm_specdom
-gk104_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{ 0x14, "pwr_ce1_idle" },
-			{ 0x15, "pwr_ce2_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index d54c670..6e441dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -23,15 +23,121 @@
  */
 #include "nv40.h"
 
+const struct nvkm_specsrc
+g84_vfetch_sources[] = {
+	{ 0x400c0c, (const struct nvkm_specmux[]) {
+			{ 0x3, 0, "unk0" },
+			{}
+		}, "pgraph_vfetch_unk0c" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_prop_sources[] = {
+	{ 0x408e50, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc0_prop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_crop_sources[] = {
+	{ 0x407008, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_crop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_tex_sources[] = {
+	{ 0x408808, (const struct nvkm_specmux[]) {
+			{ 0xfffff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc0_tex_unk08" },
+	{}
+};
+
 static const struct nvkm_specdom
 g84_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xbd, "pc01_gr_idle" },
+			{ 0x5e, "pc01_strmout_00" },
+			{ 0x5f, "pc01_strmout_01" },
+			{ 0xd2, "pc01_trast_00" },
+			{ 0xd3, "pc01_trast_01" },
+			{ 0xd4, "pc01_trast_02" },
+			{ 0xd5, "pc01_trast_03" },
+			{ 0xd8, "pc01_trast_04" },
+			{ 0xd9, "pc01_trast_05" },
+			{ 0x5c, "pc01_vattr_00" },
+			{ 0x5d, "pc01_vattr_01" },
+			{ 0x66, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x67, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x68, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x69, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x76, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x77, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x78, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x79, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+			{ 0xa4, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xa0, (const struct nvkm_specsig[]) {
+			{ 0x30, "pc02_crop_00", g84_crop_sources },
+			{ 0x31, "pc02_crop_01", g84_crop_sources },
+			{ 0x32, "pc02_crop_02", g84_crop_sources },
+			{ 0x33, "pc02_crop_03", g84_crop_sources },
+			{ 0x00, "pc02_prop_00", g84_prop_sources },
+			{ 0x01, "pc02_prop_01", g84_prop_sources },
+			{ 0x02, "pc02_prop_02", g84_prop_sources },
+			{ 0x03, "pc02_prop_03", g84_prop_sources },
+			{ 0x04, "pc02_prop_04", g84_prop_sources },
+			{ 0x05, "pc02_prop_05", g84_prop_sources },
+			{ 0x06, "pc02_prop_06", g84_prop_sources },
+			{ 0x07, "pc02_prop_07", g84_prop_sources },
+			{ 0x48, "pc02_tex_00", g84_tex_sources },
+			{ 0x49, "pc02_tex_01", g84_tex_sources },
+			{ 0x4a, "pc02_tex_02", g84_tex_sources },
+			{ 0x4b, "pc02_tex_03", g84_tex_sources },
+			{ 0x1a, "pc02_tex_04", g84_tex_sources },
+			{ 0x1b, "pc02_tex_05", g84_tex_sources },
+			{ 0x1c, "pc02_tex_06", g84_tex_sources },
+			{ 0x44, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x45, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x46, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x47, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0x8c, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,14 +158,8 @@
 	{}
 };
 
-struct nvkm_oclass *
-g84_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = g84_pm,
-}.base;
+int
+g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(g84_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index 008fed7..d2901e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -23,62 +23,146 @@
  */
 #include "gf100.h"
 
+const struct nvkm_specsrc
+gf100_pbfb_sources[] = {
+	{ 0x10f100, (const struct nvkm_specmux[]) {
+			{ 0x1, 0, "unk0" },
+			{ 0x3f, 4, "unk4" },
+			{}
+		}, "pbfb_broadcast_pm_unk100" },
+	{}
+};
+
+const struct nvkm_specsrc
+gf100_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 29, "unk29" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_l1_sources[] = {
+	{ 0x5044a8, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_l1_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_tex_sources[] = {
+	{ 0x5042c0, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 8, "sel1", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_unk400_sources[] = {
+	{ 0x50440c, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_unk400_pm_mux" },
+	{}
+};
+
 static const struct nvkm_specdom
 gf100_pm_hub[] = {
 	{}
 };
 
-static const struct nvkm_specdom
+const struct nvkm_specdom
 gf100_pm_gpc[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x00, "gpc00_l1_00", gf100_l1_sources },
+			{ 0x01, "gpc00_l1_01", gf100_l1_sources },
+			{ 0x02, "gpc00_l1_02", gf100_l1_sources },
+			{ 0x03, "gpc00_l1_03", gf100_l1_sources },
+			{ 0x05, "gpc00_l1_04", gf100_l1_sources },
+			{ 0x06, "gpc00_l1_05", gf100_l1_sources },
+			{ 0x0a, "gpc00_tex_00", gf100_tex_sources },
+			{ 0x0b, "gpc00_tex_01", gf100_tex_sources },
+			{ 0x0c, "gpc00_tex_02", gf100_tex_sources },
+			{ 0x0d, "gpc00_tex_03", gf100_tex_sources },
+			{ 0x0e, "gpc00_tex_04", gf100_tex_sources },
+			{ 0x0f, "gpc00_tex_05", gf100_tex_sources },
+			{ 0x10, "gpc00_tex_06", gf100_tex_sources },
+			{ 0x11, "gpc00_tex_07", gf100_tex_sources },
+			{ 0x12, "gpc00_tex_08", gf100_tex_sources },
+			{ 0x26, "gpc00_unk400_00", gf100_unk400_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
-static const struct nvkm_specdom
+const struct nvkm_specdom
 gf100_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x10, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x21, "part00_pmfb_00", gf100_pmfb_sources },
+			{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+			{ 0x00, "part00_pmfb_02", gf100_pmfb_sources },
+			{ 0x02, "part00_pmfb_03", gf100_pmfb_sources },
+			{ 0x01, "part00_pmfb_04", gf100_pmfb_sources },
+			{ 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
+			{ 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
+			{ 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
+			{ 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
+			{ 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
+			{ 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
+			{ 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
 static void
-gf100_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		   struct nvkm_perfctr *ctr)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	struct gf100_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 	u32 log = ctr->logic_op;
 	u32 src = 0x00000000;
 	int i;
 
-	for (i = 0; i < 4 && ctr->signal[i]; i++)
-		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+	for (i = 0; i < 4; i++)
+		src |= ctr->signal[i] << (i * 8);
 
-	nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
-	nv_wr32(priv, dom->addr + 0x100, 0x00000000);
-	nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
-	nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
+	nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
+	nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
+	nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
+	nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
 }
 
 static void
-gf100_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		   struct nvkm_perfctr *ctr)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	struct gf100_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 
-	switch (cntr->base.slot) {
-	case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
-	case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
-	case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
-	case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
+	switch (ctr->slot) {
+	case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
+	case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
+	case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
+	case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
 	}
-	cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
+	dom->clk = nvkm_rd32(device, dom->addr + 0x070);
 }
 
 static void
-gf100_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
-	nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
+	struct nvkm_device *device = pm->engine.subdev.device;
+	nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
+	nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
 }
 
 const struct nvkm_funcdom
@@ -88,72 +172,72 @@
 	.next = gf100_perfctr_next,
 };
 
-int
-gf100_pm_fini(struct nvkm_object *object, bool suspend)
+static void
+gf100_pm_fini(struct nvkm_pm *pm)
 {
-	struct gf100_pm_priv *priv = (void *)object;
-	nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
-	nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
-	return nvkm_pm_fini(&priv->base, suspend);
+	struct nvkm_device *device = pm->engine.subdev.device;
+	nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
 }
 
-static int
-gf100_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+static const struct nvkm_pm_func
+gf100_pm_ = {
+	.fini = gf100_pm_fini,
+};
+
+int
+gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pm **ppm)
 {
-	struct gf100_pm_priv *priv;
+	struct nvkm_pm *pm;
 	u32 mask;
 	int ret;
 
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gf100_pm_pwr);
+	ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
 	if (ret)
 		return ret;
 
 	/* HUB */
-	ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
-			       gf100_pm_hub);
+	ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
+			       func->doms_hub);
 	if (ret)
 		return ret;
 
 	/* GPC */
-	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
-	mask &= ~nv_rd32(priv, 0x022504);
-	mask &= ~nv_rd32(priv, 0x022584);
+	mask  = (1 << nvkm_rd32(device, 0x022430)) - 1;
+	mask &= ~nvkm_rd32(device, 0x022504);
+	mask &= ~nvkm_rd32(device, 0x022584);
 
-	ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
-			       0x1000, 0x200, gf100_pm_gpc);
+	ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
+			       0x1000, 0x200, func->doms_gpc);
 	if (ret)
 		return ret;
 
 	/* PART */
-	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
-	mask &= ~nv_rd32(priv, 0x022548);
-	mask &= ~nv_rd32(priv, 0x0225c8);
+	mask  = (1 << nvkm_rd32(device, 0x022438)) - 1;
+	mask &= ~nvkm_rd32(device, 0x022548);
+	mask &= ~nvkm_rd32(device, 0x0225c8);
 
-	ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
-			       0x1000, 0x200, gf100_pm_part);
+	ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
+			       0x1000, 0x200, func->doms_part);
 	if (ret)
 		return ret;
 
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	priv->base.last = 7;
 	return 0;
 }
 
-struct nvkm_oclass
-gf100_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
+static const struct gf100_pm_func
+gf100_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf100_pm_hub,
+	.doms_part = gf100_pm_part,
 };
+
+int
+gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf100_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 6a01fc7..56d0344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -2,14 +2,18 @@
 #define __NVKM_PM_NVC0_H__
 #include "priv.h"
 
-struct gf100_pm_priv {
-	struct nvkm_pm base;
+struct gf100_pm_func {
+	const struct nvkm_specdom *doms_hub;
+	const struct nvkm_specdom *doms_gpc;
+	const struct nvkm_specdom *doms_part;
 };
 
-struct gf100_pm_cntr {
-	struct nvkm_perfctr base;
-};
+int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
+		  int index, struct nvkm_pm **);
 
 extern const struct nvkm_funcdom gf100_perfctr_func;
-int gf100_pm_fini(struct nvkm_object *, bool);
+extern const struct nvkm_specdom gf100_pm_gpc[];
+
+extern const struct nvkm_specsrc gf100_pbfb_sources[];
+extern const struct nvkm_specsrc gf100_pmfb_sources[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
new file mode 100644
index 0000000..49b24c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specdom
+gf108_pm_hub[] = {
+	{}
+};
+
+static const struct nvkm_specdom
+gf108_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x14, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x15, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x20, "part00_pbfb_02", gf100_pbfb_sources },
+			{ 0x21, "part00_pbfb_03", gf100_pbfb_sources },
+			{ 0x01, "part00_pmfb_00", gf100_pmfb_sources },
+			{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+			{ 0x05, "part00_pmfb_02", gf100_pmfb_sources},
+			{ 0x07, "part00_pmfb_03", gf100_pmfb_sources },
+			{ 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
+			{ 0x12, "part00_pmfb_05", gf100_pmfb_sources },
+			{ 0x13, "part00_pmfb_06", gf100_pmfb_sources },
+			{ 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
+			{ 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
+			{ 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
+			{ 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
+			{ 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
+	{}
+};
+
+static const struct gf100_pm_func
+gf108_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf108_pm_hub,
+	.doms_part = gf108_pm_part,
+};
+
+int
+gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf108_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
new file mode 100644
index 0000000..9170025
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specsrc
+gf117_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 28, "unk28" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{ 0x14125c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp0_pm_unk25c" },
+	{}
+};
+
+static const struct nvkm_specdom
+gf117_pm_hub[] = {
+	{}
+};
+
+static const struct nvkm_specdom
+gf117_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x12, "part00_pmfb_00", gf117_pmfb_sources },
+			{ 0x15, "part00_pmfb_01", gf117_pmfb_sources },
+			{ 0x16, "part00_pmfb_02", gf117_pmfb_sources },
+			{ 0x18, "part00_pmfb_03", gf117_pmfb_sources },
+			{ 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
+			{ 0x23, "part00_pmfb_05", gf117_pmfb_sources },
+			{ 0x24, "part00_pmfb_06", gf117_pmfb_sources },
+			{ 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
+			{ 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
+			{ 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
+			{ 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
+			{ 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
+	{}
+};
+
+static const struct gf100_pm_func
+gf117_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf117_pm_hub,
+	.doms_part = gf117_pm_part,
+};
+
+int
+gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf117_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 75b9ff3..07f946d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -23,6 +23,52 @@
  */
 #include "gf100.h"
 
+static const struct nvkm_specsrc
+gk104_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 28, "unk28" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{ 0x14125c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp0_pm_unk25c" },
+	{ 0x14165c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp1_pm_unk25c" },
+	{ 0x141a5c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp2_pm_unk25c" },
+	{ 0x141e5c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp3_pm_unk25c" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gk104_tex_sources[] = {
+	{ 0x5042c0, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 8, "sel1", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+	{ 0x5042c8, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
+	{ 0x5042b8, (const struct nvkm_specmux[]) {
+			{ 0xff, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
+	{}
+};
+
 static const struct nvkm_specdom
 gk104_pm_hub[] = {
 	{ 0x60, (const struct nvkm_specsig[]) {
@@ -69,12 +115,51 @@
 			{ 0xc7, "gpc00_user_0" },
 			{}
 		}, &gf100_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &gf100_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{ 0x00, "gpc02_tex_00", gk104_tex_sources },
+			{ 0x01, "gpc02_tex_01", gk104_tex_sources },
+			{ 0x02, "gpc02_tex_02", gk104_tex_sources },
+			{ 0x03, "gpc02_tex_03", gk104_tex_sources },
+			{ 0x04, "gpc02_tex_04", gk104_tex_sources },
+			{ 0x05, "gpc02_tex_05", gk104_tex_sources },
+			{ 0x06, "gpc02_tex_06", gk104_tex_sources },
+			{ 0x07, "gpc02_tex_07", gk104_tex_sources },
+			{ 0x08, "gpc02_tex_08", gk104_tex_sources },
+			{ 0x0a, "gpc02_tex_0a", gk104_tex_sources },
+			{ 0x0b, "gpc02_tex_0b", gk104_tex_sources },
+			{ 0x0d, "gpc02_tex_0c", gk104_tex_sources },
+			{ 0x0c, "gpc02_tex_0d", gk104_tex_sources },
+			{ 0x0e, "gpc02_tex_0e", gk104_tex_sources },
+			{ 0x0f, "gpc02_tex_0f", gk104_tex_sources },
+			{ 0x10, "gpc02_tex_10", gk104_tex_sources },
+			{ 0x11, "gpc02_tex_11", gk104_tex_sources },
+			{ 0x12, "gpc02_tex_12", gk104_tex_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
 static const struct nvkm_specdom
 gk104_pm_part[] = {
 	{ 0x60, (const struct nvkm_specsig[]) {
+			{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
+			{ 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
+			{ 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
+			{ 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
+			{ 0x10, "part00_pmfb_04", gk104_pmfb_sources },
+			{ 0x12, "part00_pmfb_05", gk104_pmfb_sources },
+			{ 0x15, "part00_pmfb_06", gk104_pmfb_sources },
+			{ 0x16, "part00_pmfb_07", gk104_pmfb_sources },
+			{ 0x18, "part00_pmfb_08", gk104_pmfb_sources },
+			{ 0x21, "part00_pmfb_09", gk104_pmfb_sources },
+			{ 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
+			{ 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
+			{ 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
 			{ 0x47, "part00_user_0" },
 			{}
 		}, &gf100_perfctr_func },
@@ -85,64 +170,15 @@
 	{}
 };
 
-static int
-gk104_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gf100_pm_priv *priv;
-	u32 mask;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	/* PDAEMON */
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
-	if (ret)
-		return ret;
-
-	/* HUB */
-	ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
-			       gk104_pm_hub);
-	if (ret)
-		return ret;
-
-	/* GPC */
-	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
-	mask &= ~nv_rd32(priv, 0x022504);
-	mask &= ~nv_rd32(priv, 0x022584);
-
-	ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
-			       0x1000, 0x200, gk104_pm_gpc);
-	if (ret)
-		return ret;
-
-	/* PART */
-	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
-	mask &= ~nv_rd32(priv, 0x022548);
-	mask &= ~nv_rd32(priv, 0x0225c8);
-
-	ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
-			       0x1000, 0x200, gk104_pm_part);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	priv->base.last = 7;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
+static const struct gf100_pm_func
+gk104_pm = {
+	.doms_gpc = gk104_pm_gpc,
+	.doms_hub = gk104_pm_hub,
+	.doms_part = gk104_pm_part,
 };
+
+int
+gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gk104_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
deleted file mode 100644
index 6820176..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-static int
-gk110_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gf100_pm_priv *priv;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk110_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xf0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk110_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
new file mode 100644
index 0000000..5cf5dd5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Nouveau project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "nv40.h"
+
+const struct nvkm_specsrc
+gt200_crop_sources[] = {
+	{ 0x407008, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x1f, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_crop_pm_mux" },
+	{}
+};
+
+const struct nvkm_specsrc
+gt200_prop_sources[] = {
+	{ 0x408750, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc0_prop_pm_mux" },
+	{}
+};
+
+const struct nvkm_specsrc
+gt200_tex_sources[] = {
+	{ 0x408508, (const struct nvkm_specmux[]) {
+			{ 0xfffff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc0_tex_unk08" },
+	{}
+};
+
+static const struct nvkm_specdom
+gt200_pm[] = {
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xc9, "pc01_gr_idle" },
+			{ 0x84, "pc01_strmout_00" },
+			{ 0x85, "pc01_strmout_01" },
+			{ 0xde, "pc01_trast_00" },
+			{ 0xdf, "pc01_trast_01" },
+			{ 0xe0, "pc01_trast_02" },
+			{ 0xe1, "pc01_trast_03" },
+			{ 0xe4, "pc01_trast_04" },
+			{ 0xe5, "pc01_trast_05" },
+			{ 0x82, "pc01_vattr_00" },
+			{ 0x83, "pc01_vattr_01" },
+			{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+
+			{ 0xb0, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0x55, "pc02_crop_00", gt200_crop_sources },
+			{ 0x56, "pc02_crop_01", gt200_crop_sources },
+			{ 0x57, "pc02_crop_02", gt200_crop_sources },
+			{ 0x58, "pc02_crop_03", gt200_crop_sources },
+			{ 0x00, "pc02_prop_00", gt200_prop_sources },
+			{ 0x01, "pc02_prop_01", gt200_prop_sources },
+			{ 0x02, "pc02_prop_02", gt200_prop_sources },
+			{ 0x03, "pc02_prop_03", gt200_prop_sources },
+			{ 0x04, "pc02_prop_04", gt200_prop_sources },
+			{ 0x05, "pc02_prop_05", gt200_prop_sources },
+			{ 0x06, "pc02_prop_06", gt200_prop_sources },
+			{ 0x07, "pc02_prop_07", gt200_prop_sources },
+			{ 0x78, "pc02_tex_00", gt200_tex_sources },
+			{ 0x79, "pc02_tex_01", gt200_tex_sources },
+			{ 0x7a, "pc02_tex_02", gt200_tex_sources },
+			{ 0x7b, "pc02_tex_03", gt200_tex_sources },
+			{ 0x32, "pc02_tex_04", gt200_tex_sources },
+			{ 0x33, "pc02_tex_05", gt200_tex_sources },
+			{ 0x34, "pc02_tex_06", gt200_tex_sources },
+			{ 0x74, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x75, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x76, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x77, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xec, "pc02_trailer" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+int
+gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(gt200_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index d065bfc..c9227ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -23,15 +23,94 @@
  */
 #include "nv40.h"
 
+static const struct nvkm_specsrc
+gt215_zcull_sources[] = {
+	{ 0x402ca4, (const struct nvkm_specmux[]) {
+			{ 0x7fff, 0, "unk0" },
+			{ 0xff, 24, "unk24" },
+			{}
+		}, "pgraph_zcull_pm_unka4" },
+	{}
+};
+
 static const struct nvkm_specdom
 gt215_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xcb, "pc01_gr_idle" },
+			{ 0x86, "pc01_strmout_00" },
+			{ 0x87, "pc01_strmout_01" },
+			{ 0xe0, "pc01_trast_00" },
+			{ 0xe1, "pc01_trast_01" },
+			{ 0xe2, "pc01_trast_02" },
+			{ 0xe3, "pc01_trast_03" },
+			{ 0xe6, "pc01_trast_04" },
+			{ 0xe7, "pc01_trast_05" },
+			{ 0x84, "pc01_vattr_00" },
+			{ 0x85, "pc01_vattr_01" },
+			{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", gt215_zcull_sources },
+			{ 0x08, "pc01_zcull_01", gt215_zcull_sources },
+			{ 0x09, "pc01_zcull_02", gt215_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", gt215_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", gt215_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", gt215_zcull_sources },
+			{ 0xb2, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x64, "pc02_crop_00", gt200_crop_sources },
+			{ 0x65, "pc02_crop_01", gt200_crop_sources },
+			{ 0x66, "pc02_crop_02", gt200_crop_sources },
+			{ 0x67, "pc02_crop_03", gt200_crop_sources },
+			{ 0x00, "pc02_prop_00", gt200_prop_sources },
+			{ 0x01, "pc02_prop_01", gt200_prop_sources },
+			{ 0x02, "pc02_prop_02", gt200_prop_sources },
+			{ 0x03, "pc02_prop_03", gt200_prop_sources },
+			{ 0x04, "pc02_prop_04", gt200_prop_sources },
+			{ 0x05, "pc02_prop_05", gt200_prop_sources },
+			{ 0x06, "pc02_prop_06", gt200_prop_sources },
+			{ 0x07, "pc02_prop_07", gt200_prop_sources },
+			{ 0x80, "pc02_tex_00", gt200_tex_sources },
+			{ 0x81, "pc02_tex_01", gt200_tex_sources },
+			{ 0x82, "pc02_tex_02", gt200_tex_sources },
+			{ 0x83, "pc02_tex_03", gt200_tex_sources },
+			{ 0x3a, "pc02_tex_04", gt200_tex_sources },
+			{ 0x3b, "pc02_tex_05", gt200_tex_sources },
+			{ 0x3c, "pc02_tex_06", gt200_tex_sources },
+			{ 0x7c, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x7d, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x7e, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x7f, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xcc, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,32 +131,8 @@
 	{}
 };
 
-static int
-gt215_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **object)
+int
+gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
 {
-	int ret = nv40_pm_ctor(parent, engine, oclass, data, size, object);
-	if (ret == 0) {
-		struct nv40_pm_priv *priv = (void *)*object;
-		ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
-				       gt215_pm_pwr);
-		if (ret)
-			return ret;
-
-		priv->base.last = 3;
-	}
-	return ret;
+	return nv40_pm_new_(gt215_pm, device, index, ppm);
 }
-
-struct nvkm_oclass *
-gt215_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = gt215_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index ff22f06..4bef72a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -24,46 +24,44 @@
 #include "nv40.h"
 
 static void
-nv40_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		  struct nvkm_perfctr *ctr)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	struct nv40_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 	u32 log = ctr->logic_op;
 	u32 src = 0x00000000;
 	int i;
 
-	for (i = 0; i < 4 && ctr->signal[i]; i++)
-		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+	for (i = 0; i < 4; i++)
+		src |= ctr->signal[i] << (i * 8);
 
-	nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
-	nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
-	nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
+	nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
+	nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
+	nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
 }
 
 static void
-nv40_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		  struct nvkm_perfctr *ctr)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	struct nv40_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 
-	switch (cntr->base.slot) {
-	case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
-	case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
-	case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
-	case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
+	switch (ctr->slot) {
+	case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
+	case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
+	case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
+	case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
 	}
-	cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
+	dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
 }
 
 static void
-nv40_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	if (priv->sequence != ppm->sequence) {
-		nv_wr32(priv, 0x400084, 0x00000020);
-		priv->sequence = ppm->sequence;
+	struct nvkm_device *device = pm->engine.subdev.device;
+	if (pm->sequence != pm->sequence) {
+		nvkm_wr32(device, 0x400084, 0x00000020);
+		pm->sequence = pm->sequence;
 	}
 }
 
@@ -74,6 +72,28 @@
 	.next = nv40_perfctr_next,
 };
 
+static const struct nvkm_pm_func
+nv40_pm_ = {
+};
+
+int
+nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
+	     int index, struct nvkm_pm **ppm)
+{
+	struct nv40_pm *pm;
+	int ret;
+
+	if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+		return -ENOMEM;
+	*ppm = &pm->base;
+
+	ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
+	if (ret)
+		return ret;
+
+	return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
+}
+
 static const struct nvkm_specdom
 nv40_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -95,36 +115,7 @@
 };
 
 int
-nv40_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
 {
-	struct nv40_pm_oclass *mclass = (void *)oclass;
-	struct nv40_pm_priv *priv;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	return 0;
+	return nv40_pm_new_(nv40_pm, device, index, ppm);
 }
-
-struct nvkm_oclass *
-nv40_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = nv40_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 2338e15..da481ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,24 +1,14 @@
 #ifndef __NVKM_PM_NV40_H__
 #define __NVKM_PM_NV40_H__
+#define nv40_pm(p) container_of((p), struct nv40_pm, base)
 #include "priv.h"
 
-struct nv40_pm_oclass {
-	struct nvkm_oclass base;
-	const struct nvkm_specdom *doms;
-};
-
-struct nv40_pm_priv {
+struct nv40_pm {
 	struct nvkm_pm base;
 	u32 sequence;
 };
 
-int nv40_pm_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *data, u32 size,
-		      struct nvkm_object **pobject);
-
-struct nv40_pm_cntr {
-	struct nvkm_perfctr base;
-};
-
+int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
+		 int index, struct nvkm_pm **);
 extern const struct nvkm_funcdom nv40_perfctr_func;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index 6af83b5..cc5a41d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -23,35 +23,153 @@
  */
 #include "nv40.h"
 
+const struct nvkm_specsrc
+nv50_zcull_sources[] = {
+	{ 0x402ca4, (const struct nvkm_specmux[]) {
+			{ 0x7fff, 0, "unk0" },
+			{}
+		}, "pgraph_zcull_pm_unka4" },
+	{}
+};
+
+const struct nvkm_specsrc
+nv50_zrop_sources[] = {
+	{ 0x40708c, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0xf, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_zrop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_prop_sources[] = {
+	{ 0x40be50, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc3_prop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_crop_sources[] = {
+        { 0x407008, (const struct nvkm_specmux[]) {
+                        { 0x7, 0, "sel0", true },
+                        { 0x7, 16, "sel1", true },
+                        {}
+                }, "pgraph_rop0_crop_pm_mux" },
+        {}
+};
+
+static const struct nvkm_specsrc
+nv50_tex_sources[] = {
+	{ 0x40b808, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc3_tex_unk08" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_vfetch_sources[] = {
+	{ 0x400c0c, (const struct nvkm_specmux[]) {
+			{ 0x1, 0, "unk0" },
+			{}
+		}, "pgraph_vfetch_unk0c" },
+	{}
+};
+
 static const struct nvkm_specdom
 nv50_pm[] = {
-	{ 0x040, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x100, (const struct nvkm_specsig[]) {
-			{ 0xc8, "gr_idle" },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xc8, "pc01_gr_idle" },
+			{ 0x7f, "pc01_strmout_00" },
+			{ 0x80, "pc01_strmout_01" },
+			{ 0xdc, "pc01_trast_00" },
+			{ 0xdd, "pc01_trast_01" },
+			{ 0xde, "pc01_trast_02" },
+			{ 0xdf, "pc01_trast_03" },
+			{ 0xe2, "pc01_trast_04" },
+			{ 0xe3, "pc01_trast_05" },
+			{ 0x7c, "pc01_vattr_00" },
+			{ 0x7d, "pc01_vattr_01" },
+			{ 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
+			{ 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
+			{ 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
+			{ 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
+			{ 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
+			{ 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
+			{ 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
+			{ 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
+			{ 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
+			{ 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
+			{ 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
+			{ 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
+			{ 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
+			{ 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
+			{ 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
+			{ 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
+			{ 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
+			{ 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
+			{ 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
+			{ 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
+			{ 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
+			{ 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
+			{ 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
+			{ 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
+			{ 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
+			{ 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
+			{ 0x20, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x21, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x22, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x23, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x24, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x25, "pc01_zcull_05", nv50_zcull_sources },
+			{ 0xae, "pc01_unk00" },
+			{ 0xee, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x100, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0x52, "pc02_crop_00", nv50_crop_sources },
+			{ 0x53, "pc02_crop_01", nv50_crop_sources },
+			{ 0x54, "pc02_crop_02", nv50_crop_sources },
+			{ 0x55, "pc02_crop_03", nv50_crop_sources },
+			{ 0x00, "pc02_prop_00", nv50_prop_sources },
+			{ 0x01, "pc02_prop_01", nv50_prop_sources },
+			{ 0x02, "pc02_prop_02", nv50_prop_sources },
+			{ 0x03, "pc02_prop_03", nv50_prop_sources },
+			{ 0x04, "pc02_prop_04", nv50_prop_sources },
+			{ 0x05, "pc02_prop_05", nv50_prop_sources },
+			{ 0x06, "pc02_prop_06", nv50_prop_sources },
+			{ 0x07, "pc02_prop_07", nv50_prop_sources },
+			{ 0x70, "pc02_tex_00", nv50_tex_sources },
+			{ 0x71, "pc02_tex_01", nv50_tex_sources },
+			{ 0x72, "pc02_tex_02", nv50_tex_sources },
+			{ 0x73, "pc02_tex_03", nv50_tex_sources },
+			{ 0x40, "pc02_tex_04", nv50_tex_sources },
+			{ 0x41, "pc02_tex_05", nv50_tex_sources },
+			{ 0x42, "pc02_tex_06", nv50_tex_sources },
+			{ 0x6c, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x6d, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x6e, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x6f, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xee, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x020, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x040, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
 	{}
 };
 
-struct nvkm_oclass *
-nv50_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = nv50_pm,
-}.base;
+int
+nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(nv50_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 1e6eff2..d7b81cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,58 +1,85 @@
 #ifndef __NVKM_PM_PRIV_H__
 #define __NVKM_PM_PRIV_H__
+#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
 #include <engine/pm.h>
 
+int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
+		 int index, struct nvkm_pm *);
+
+struct nvkm_pm_func {
+	void (*fini)(struct nvkm_pm *);
+};
+
 struct nvkm_perfctr {
-	struct nvkm_object base;
 	struct list_head head;
-	struct nvkm_perfsig *signal[4];
+	u8 domain;
+	u8  signal[4];
+	u64 source[4][8];
 	int slot;
 	u32 logic_op;
-	u32 clk;
 	u32 ctr;
 };
 
-extern struct nvkm_oclass nvkm_pm_sclass[];
-
-#include <core/engctx.h>
-
-struct nvkm_perfctx {
-	struct nvkm_engctx base;
+struct nvkm_specmux {
+	u32 mask;
+	u8 shift;
+	const char *name;
+	bool enable;
 };
 
-extern struct nvkm_oclass nvkm_pm_cclass;
+struct nvkm_specsrc {
+	u32 addr;
+	const struct nvkm_specmux *mux;
+	const char *name;
+};
+
+struct nvkm_perfsrc {
+	struct list_head head;
+	char *name;
+	u32 addr;
+	u32 mask;
+	u8 shift;
+	bool enable;
+};
+
+extern const struct nvkm_specsrc nv50_zcull_sources[];
+extern const struct nvkm_specsrc nv50_zrop_sources[];
+extern const struct nvkm_specsrc g84_vfetch_sources[];
+extern const struct nvkm_specsrc gt200_crop_sources[];
+extern const struct nvkm_specsrc gt200_prop_sources[];
+extern const struct nvkm_specsrc gt200_tex_sources[];
 
 struct nvkm_specsig {
 	u8 signal;
 	const char *name;
+	const struct nvkm_specsrc *source;
 };
 
 struct nvkm_perfsig {
 	const char *name;
+	u8 source[8];
 };
 
-struct nvkm_perfdom;
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *, const char *, struct nvkm_perfdom **);
-
 struct nvkm_specdom {
 	u16 signal_nr;
 	const struct nvkm_specsig *signal;
 	const struct nvkm_funcdom *func;
 };
 
-extern const struct nvkm_specdom gt215_pm_pwr[];
-extern const struct nvkm_specdom gf100_pm_pwr[];
-extern const struct nvkm_specdom gk104_pm_pwr[];
+#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
 
 struct nvkm_perfdom {
+	struct nvkm_object object;
+	struct nvkm_perfmon *perfmon;
 	struct list_head head;
 	struct list_head list;
 	const struct nvkm_funcdom *func;
+	struct nvkm_perfctr *ctr[4];
 	char name[32];
 	u32 addr;
-	u8  quad;
-	u32 signal_nr;
+	u8  mode;
+	u32 clk;
+	u16 signal_nr;
 	struct nvkm_perfsig signal[];
 };
 
@@ -67,24 +94,10 @@
 int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
 		     const struct nvkm_specdom *);
 
-#define nvkm_pm_create(p,e,o,d)                                        \
-	nvkm_pm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pm_dtor(p) ({                                             \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_dtor(nv_object(c));                                   \
-})
-#define nvkm_pm_init(p) ({                                             \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_init(nv_object(c));                                   \
-})
-#define nvkm_pm_fini(p,s) ({                                           \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_fini(nv_object(c), (s));                              \
-})
+#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
 
-int nvkm_pm_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-void _nvkm_pm_dtor(struct nvkm_object *);
-int  _nvkm_pm_init(struct nvkm_object *);
-int  _nvkm_pm_fini(struct nvkm_object *, bool);
+struct nvkm_perfmon {
+	struct nvkm_object object;
+	struct nvkm_pm *pm;
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
index 06ee060..66b147b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
@@ -1,5 +1,5 @@
 /*
- *  fuc microcode for g98 psec engine
+ *  fuc microcode for g98 sec engine
  *  Copyright (C) 2010  Marcin Kościelnicki
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,7 @@
  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
-.section #g98_psec_data
+.section #g98_sec_data
 
 ctx_dma:
 ctx_dma_query:		.b32 0
@@ -94,7 +94,7 @@
 
 .align 0x100
 
-.section #g98_psec_code
+.section #g98_sec_code
 
 	// $r0 is always set to 0 in our code - this allows some space savings.
 	clear b32 $r0
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 5d65c4f..eca6222 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_psec_data[] = {
+uint32_t g98_sec_data[] = {
 /* 0x0000: ctx_dma */
 /* 0x0000: ctx_dma_query */
 	0x00000000,
@@ -150,7 +150,7 @@
 	0x00000000,
 };
 
-uint32_t g98_psec_code[] = {
+uint32_t g98_sec_code[] = {
 	0x17f004bd,
 	0x0010fe35,
 	0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 9d5c1b8..995c2c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -22,47 +22,14 @@
  * Authors: Ben Skeggs
  */
 #include <engine/sec.h>
-#include <engine/falcon.h>
+#include <engine/fifo.h>
 #include "fuc/g98.fuc0s.h"
 
 #include <core/client.h>
 #include <core/enum.h>
-#include <engine/fifo.h>
+#include <core/gpuobj.h>
 
-struct g98_sec_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_sec_sclass[] = {
-	{ 0x88b4, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PSEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_sec_cclass = {
-	.handle = NV_ENGCTX(SEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PSEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
 static const struct nvkm_enum g98_sec_isr_error_name[] = {
 	{ 0x0000, "ILLEGAL_MTHD" },
@@ -73,77 +40,44 @@
 };
 
 static void
-g98_sec_intr(struct nvkm_subdev *subdev)
+g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct g98_sec_priv *priv = (void *)subdev;
-	u32 disp = nv_rd32(priv, 0x08701c);
-	u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
-	u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
-	u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
-	u32 addr = nv_rd32(priv, 0x087040) >> 16;
+	struct nvkm_subdev *subdev = &sec->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 ssta = nvkm_rd32(device, 0x087040) & 0x0000ffff;
+	u32 addr = nvkm_rd32(device, 0x087040) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
 	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(priv, 0x087044);
-	int chid;
+	u32 data = nvkm_rd32(device, 0x087044);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(g98_sec_isr_error_name, ssta);
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000040) {
-		nv_error(priv, "DISPATCH_ERROR [");
-		nvkm_enum_print(g98_sec_isr_error_name, ssta);
-		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
-		       chid, (u64)inst << 12, nvkm_client_name(engctx),
-		       subc, mthd, data);
-		nv_wr32(priv, 0x087004, 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		nv_error(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x087004, stat);
-	}
-
-	nvkm_engctx_put(engctx);
+	nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+			   "subc %d mthd %04x data %08x\n", ssta,
+		   en ? en->name : "UNKNOWN", chan ? chan->chid : -1,
+		   chan ? chan->inst->addr : 0,
+		   chan ? chan->object.client->name : "unknown",
+		   subc, mthd, data);
 }
 
-static int
-g98_sec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct g98_sec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x087000, true,
-				 "PSEC", "sec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00004000;
-	nv_subdev(priv)->intr = g98_sec_intr;
-	nv_engine(priv)->cclass = &g98_sec_cclass;
-	nv_engine(priv)->sclass = g98_sec_sclass;
-	nv_falcon(priv)->code.data = g98_psec_code;
-	nv_falcon(priv)->code.size = sizeof(g98_psec_code);
-	nv_falcon(priv)->data.data = g98_psec_data;
-	nv_falcon(priv)->data.size = sizeof(g98_psec_data);
-	return 0;
-}
-
-struct nvkm_oclass
-g98_sec_oclass = {
-	.handle = NV_ENGINE(SEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_sec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = _nvkm_falcon_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_sec = {
+	.code.data = g98_sec_code,
+	.code.size = sizeof(g98_sec_code),
+	.data.data = g98_sec_data,
+	.data.size = sizeof(g98_sec_data),
+	.pmc_enable = 0x00004000,
+	.intr = g98_sec_intr,
+	.sclass = {
+		{ -1, -1, G98_SEC },
+		{}
+	}
 };
+
+int
+g98_sec_new(struct nvkm_device *device, int index,
+	    struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(&g98_sec, device, index,
+				true, 0x087000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
index bdc3a059..1c291e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
@@ -1,4 +1,9 @@
+nvkm-y += nvkm/engine/sw/base.o
 nvkm-y += nvkm/engine/sw/nv04.o
 nvkm-y += nvkm/engine/sw/nv10.o
 nvkm-y += nvkm/engine/sw/nv50.o
 nvkm-y += nvkm/engine/sw/gf100.o
+
+nvkm-y += nvkm/engine/sw/chan.o
+
+nvkm-y += nvkm/engine/sw/nvsw.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
new file mode 100644
index 0000000..53c1f7e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <engine/fifo.h>
+
+bool
+nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data)
+{
+	struct nvkm_sw_chan *chan;
+	bool handled = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_for_each_entry(chan, &sw->chan, head) {
+		if (chan->fifo->chid == chid) {
+			handled = nvkm_sw_chan_mthd(chan, subc, mthd, data);
+			list_del(&chan->head);
+			list_add(&chan->head, &sw->chan);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+	return handled;
+}
+
+static int
+nvkm_sw_oclass_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_sw_chan *chan = nvkm_sw_chan(oclass->parent);
+	const struct nvkm_sw_chan_sclass *sclass = oclass->engn;
+	return sclass->ctor(chan, oclass, data, size, pobject);
+}
+
+static int
+nvkm_sw_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+	struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+	int c = 0;
+
+	while (sw->func->sclass[c].ctor) {
+		if (c++ == index) {
+			oclass->engn = &sw->func->sclass[index];
+			oclass->base =  sw->func->sclass[index].base;
+			oclass->base.ctor = nvkm_sw_oclass_new;
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_sw_cclass_get(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+	return sw->func->chan_new(sw, fifoch, oclass, pobject);
+}
+
+static void *
+nvkm_sw_dtor(struct nvkm_engine *engine)
+{
+	return nvkm_sw(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_sw = {
+	.dtor = nvkm_sw_dtor,
+	.fifo.cclass = nvkm_sw_cclass_get,
+	.fifo.sclass = nvkm_sw_oclass_get,
+};
+
+int
+nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_sw **psw)
+{
+	struct nvkm_sw *sw;
+
+	if (!(sw = *psw = kzalloc(sizeof(*sw), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&sw->chan);
+	sw->func = func;
+
+	return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
new file mode 100644
index 0000000..d082f4f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "chan.h"
+
+#include <core/notify.h>
+#include <engine/fifo.h>
+
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+bool
+nvkm_sw_chan_mthd(struct nvkm_sw_chan *chan, int subc, u32 mthd, u32 data)
+{
+	switch (mthd) {
+	case 0x0000:
+		return true;
+	case 0x0500:
+		nvkm_event_send(&chan->event, 1, 0, NULL, 0);
+		return true;
+	default:
+		if (chan->func->mthd)
+			return chan->func->mthd(chan, subc, mthd, data);
+		break;
+	}
+	return false;
+}
+
+static int
+nvkm_sw_chan_event_ctor(struct nvkm_object *object, void *data, u32 size,
+			struct nvkm_notify *notify)
+{
+	union {
+		struct nvif_notify_uevent_req none;
+	} *req = data;
+	int ret;
+
+	if (nvif_unvers(req->none)) {
+		notify->size  = sizeof(struct nvif_notify_uevent_rep);
+		notify->types = 1;
+		notify->index = 0;
+	}
+
+	return ret;
+}
+
+static const struct nvkm_event_func
+nvkm_sw_chan_event = {
+	.ctor = nvkm_sw_chan_event_ctor,
+};
+
+static void *
+nvkm_sw_chan_dtor(struct nvkm_object *object)
+{
+	struct nvkm_sw_chan *chan = nvkm_sw_chan(object);
+	struct nvkm_sw *sw = chan->sw;
+	unsigned long flags;
+	void *data = chan;
+
+	if (chan->func->dtor)
+		data = chan->func->dtor(chan);
+	nvkm_event_fini(&chan->event);
+
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+	return data;
+}
+
+static const struct nvkm_object_func
+nvkm_sw_chan = {
+	.dtor = nvkm_sw_chan_dtor,
+};
+
+int
+nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
+		  struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass,
+		  struct nvkm_sw_chan *chan)
+{
+	unsigned long flags;
+
+	nvkm_object_ctor(&nvkm_sw_chan, oclass, &chan->object);
+	chan->func = func;
+	chan->sw = sw;
+	chan->fifo = fifo;
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_add(&chan->head, &sw->chan);
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+
+	return nvkm_event_init(&nvkm_sw_chan_event, 1, 1, &chan->event);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
new file mode 100644
index 0000000..6608bf6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_SW_CHAN_H__
+#define __NVKM_SW_CHAN_H__
+#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
+#include "priv.h"
+#include <core/event.h>
+
+struct nvkm_sw_chan {
+	const struct nvkm_sw_chan_func *func;
+	struct nvkm_object object;
+	struct nvkm_sw *sw;
+	struct nvkm_fifo_chan *fifo;
+	struct list_head head;
+
+	struct nvkm_event event;
+};
+
+struct nvkm_sw_chan_func {
+	void *(*dtor)(struct nvkm_sw_chan *);
+	bool (*mthd)(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+};
+
+int nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *, struct nvkm_sw *,
+		      struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+		      struct nvkm_sw_chan *);
+bool nvkm_sw_chan_mthd(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index 533d5d8..b01ef7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -23,119 +23,133 @@
  */
 #include "nv50.h"
 
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
+#include <engine/disp.h>
+#include <engine/fifo.h>
 
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-gf100_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	u64 data = *(u32 *)args;
-	if (mthd == 0x0400) {
-		chan->vblank.offset &= 0x00ffffffffULL;
-		chan->vblank.offset |= data << 32;
-	} else {
-		chan->vblank.offset &= 0xff00000000ULL;
-		chan->vblank.offset |= data;
-	}
-	return 0;
-}
-
-static int
-gf100_sw_mthd_mp_control(struct nvkm_object *object, u32 mthd,
-			 void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	u32 data = *(u32 *)args;
-
-	switch (mthd) {
-	case 0x600:
-		nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
-		break;
-	case 0x644:
-		if (data & ~0x1ffffe)
-			return -EINVAL;
-		nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
-		break;
-	case 0x6ac:
-		nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
-		break;
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static struct nvkm_omthds
-gf100_sw_omthds[] = {
-	{ 0x0400, 0x0400, gf100_sw_mthd_vblsem_offset },
-	{ 0x0404, 0x0404, gf100_sw_mthd_vblsem_offset },
-	{ 0x0408, 0x0408, nv50_sw_mthd_vblsem_value },
-	{ 0x040c, 0x040c, nv50_sw_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nv50_sw_mthd_flip },
-	{ 0x0600, 0x0600, gf100_sw_mthd_mp_control },
-	{ 0x0644, 0x0644, gf100_sw_mthd_mp_control },
-	{ 0x06ac, 0x06ac, gf100_sw_mthd_mp_control },
-	{}
-};
-
-static struct nvkm_oclass
-gf100_sw_sclass[] = {
-	{ 0x906e, &nvkm_object_ofuncs, gf100_sw_omthds },
-	{}
-};
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
 static int
-gf100_sw_vblsem_release(struct nvkm_notify *notify)
+gf100_sw_chan_vblsem_release(struct nvkm_notify *notify)
 {
 	struct nv50_sw_chan *chan =
 		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_sw *sw = chan->base.sw;
+	struct nvkm_device *device = sw->engine.subdev.device;
+	u32 inst = chan->base.fifo->inst->addr >> 12;
 
-	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
-	bar->flush(bar);
-	nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
-	nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
-	nv_wr32(priv, 0x060014, chan->vblank.value);
+	nvkm_wr32(device, 0x001718, 0x80000000 | inst);
+	nvkm_bar_flush(device->bar);
+	nvkm_wr32(device, 0x06000c, upper_32_bits(chan->vblank.offset));
+	nvkm_wr32(device, 0x060010, lower_32_bits(chan->vblank.offset));
+	nvkm_wr32(device, 0x060014, chan->vblank.value);
 
 	return NVKM_NOTIFY_DROP;
 }
 
-static struct nv50_sw_cclass
-gf100_sw_cclass = {
-	.base.handle = NV_ENGCTX(SW, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_context_ctor,
-		.dtor = nv50_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
-	.vblank = gf100_sw_vblsem_release,
+static bool
+gf100_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
+{
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	struct nvkm_engine *engine = chan->base.object.engine;
+	struct nvkm_device *device = engine->subdev.device;
+	switch (mthd) {
+	case 0x0400:
+		chan->vblank.offset &= 0x00ffffffffULL;
+		chan->vblank.offset |= (u64)data << 32;
+		return true;
+	case 0x0404:
+		chan->vblank.offset &= 0xff00000000ULL;
+		chan->vblank.offset |= data;
+		return true;
+	case 0x0408:
+		chan->vblank.value = data;
+		return true;
+	case 0x040c:
+		if (data < device->disp->vblank.index_nr) {
+			nvkm_notify_get(&chan->vblank.notify[data]);
+			return true;
+		}
+		break;
+	case 0x600: /* MP.PM_UNK000 */
+		nvkm_wr32(device, 0x419e00, data);
+		return true;
+	case 0x644: /* MP.TRAP_WARP_ERROR_EN */
+		if (!(data & ~0x001ffffe)) {
+			nvkm_wr32(device, 0x419e44, data);
+			return true;
+		}
+		break;
+	case 0x6ac: /* MP.PM_UNK0AC */
+		nvkm_wr32(device, 0x419eac, data);
+		return true;
+	default:
+		break;
+	}
+	return false;
+}
+
+static const struct nvkm_sw_chan_func
+gf100_sw_chan = {
+	.dtor = nv50_sw_chan_dtor,
+	.mthd = gf100_sw_chan_mthd,
 };
 
+static int
+gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+		  const struct nvkm_oclass *oclass,
+		  struct nvkm_object **pobject)
+{
+	struct nvkm_disp *disp = sw->engine.subdev.device->disp;
+	struct nv50_sw_chan *chan;
+	int ret, i;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_sw_chan_ctor(&gf100_sw_chan, sw, fifoch, oclass,
+				&chan->base);
+	if (ret)
+		return ret;
+
+	for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+		ret = nvkm_notify_init(NULL, &disp->vblank,
+				       gf100_sw_chan_vblsem_release, false,
+				       &(struct nvif_notify_head_req_v0) {
+					.head = i,
+				       },
+				       sizeof(struct nvif_notify_head_req_v0),
+				       sizeof(struct nvif_notify_head_rep_v0),
+				       &chan->vblank.notify[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf100_sw_oclass = &(struct nv50_sw_oclass) {
-	.base.handle = NV_ENGINE(SW, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
-	.cclass = &gf100_sw_cclass.base,
-	.sclass =  gf100_sw_sclass,
-}.base;
+static const struct nvkm_sw_func
+gf100_sw = {
+	.chan_new = gf100_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_GF100 } },
+		{}
+	}
+};
+
+int
+gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&gf100_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 8970244..445217f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -21,15 +21,18 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/sw.h>
-#include <engine/fifo.h>
+#define nv04_sw_chan(p) container_of((p), struct nv04_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 
-struct nv04_sw_priv {
-	struct nvkm_sw base;
-};
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
+#include <nvif/unpack.h>
 
 struct nv04_sw_chan {
 	struct nvkm_sw_chan base;
+	atomic_t ref;
 };
 
 /*******************************************************************************
@@ -37,103 +40,99 @@
  ******************************************************************************/
 
 static int
-nv04_sw_set_ref(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nv04_nvsw_mthd_get_ref(struct nvkm_nvsw *nvsw, void *data, u32 size)
 {
-	struct nvkm_object *channel = (void *)nv_engctx(object->parent);
-	struct nvkm_fifo_chan *fifo = (void *)channel->parent;
-	atomic_set(&fifo->refcnt, *(u32*)data);
-	return 0;
+	struct nv04_sw_chan *chan = nv04_sw_chan(nvsw->chan);
+	union {
+		struct nv04_nvsw_get_ref_v0 v0;
+	} *args = data;
+	int ret;
+
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		args->v0.ref = atomic_read(&chan->ref);
+	}
+
+	return ret;
 }
 
 static int
-nv04_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
+nv04_nvsw_mthd(struct nvkm_nvsw *nvsw, u32 mthd, void *data, u32 size)
 {
-	struct nv04_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
+	switch (mthd) {
+	case NV04_NVSW_GET_REF:
+		return nv04_nvsw_mthd_get_ref(nvsw, data, size);
+	default:
+		break;
+	}
 	return -EINVAL;
 }
 
-static struct nvkm_omthds
-nv04_sw_omthds[] = {
-	{ 0x0150, 0x0150, nv04_sw_set_ref },
-	{ 0x0500, 0x0500, nv04_sw_flip },
-	{}
+static const struct nvkm_nvsw_func
+nv04_nvsw = {
+	.mthd = nv04_nvsw_mthd,
 };
 
-static struct nvkm_oclass
-nv04_sw_sclass[] = {
-	{ 0x006e, &nvkm_object_ofuncs, nv04_sw_omthds },
-	{}
-};
+static int
+nv04_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+	      void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nvkm_nvsw_new_(&nv04_nvsw, chan, oclass, data, size, pobject);
+}
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
-static int
-nv04_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+static bool
+nv04_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
 {
-	struct nv04_sw_chan *chan;
-	int ret;
+	struct nv04_sw_chan *chan = nv04_sw_chan(base);
 
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	switch (mthd) {
+	case 0x0150:
+		atomic_set(&chan->ref, data);
+		return true;
+	default:
+		break;
+	}
 
-	return 0;
+	return false;
 }
 
-static struct nvkm_oclass
-nv04_sw_cclass = {
-	.handle = NV_ENGCTX(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_sw_context_ctor,
-		.dtor = _nvkm_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
+static const struct nvkm_sw_chan_func
+nv04_sw_chan = {
+	.mthd = nv04_sw_chan_mthd,
 };
 
+static int
+nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv04_sw_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	atomic_set(&chan->ref, 0);
+	*pobject = &chan->base.object;
+
+	return nvkm_sw_chan_ctor(&nv04_sw_chan, sw, fifo, oclass, &chan->base);
+}
+
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-void
-nv04_sw_intr(struct nvkm_subdev *subdev)
-{
-	nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
-}
-
-static int
-nv04_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv04_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nv04_sw_cclass;
-	nv_engine(priv)->sclass = nv04_sw_sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
-}
-
-struct nvkm_oclass *
-nv04_sw_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
+static const struct nvkm_sw_func
+nv04_sw = {
+	.chan_new = nv04_sw_chan_new,
+	.sclass = {
+		{ nv04_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV04 } },
+		{}
+	}
 };
+
+int
+nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&nv04_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index c61153a..adf70d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -21,102 +21,48 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/sw.h>
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 
-struct nv10_sw_priv {
-	struct nvkm_sw base;
-};
-
-struct nv10_sw_chan {
-	struct nvkm_sw_chan base;
-};
-
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-nv10_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
-	struct nv10_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv10_sw_omthds[] = {
-	{ 0x0500, 0x0500, nv10_sw_flip },
-	{}
-};
-
-static struct nvkm_oclass
-nv10_sw_sclass[] = {
-	{ 0x016e, &nvkm_object_ofuncs, nv10_sw_omthds },
-	{}
-};
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
-static int
-nv10_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
-{
-	struct nv10_sw_chan *chan;
-	int ret;
-
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-nv10_sw_cclass = {
-	.handle = NV_ENGCTX(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_sw_context_ctor,
-		.dtor = _nvkm_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
+static const struct nvkm_sw_chan_func
+nv10_sw_chan = {
 };
 
+static int
+nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nvkm_sw_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->object;
+
+	return nvkm_sw_chan_ctor(&nv10_sw_chan, sw, fifo, oclass, chan);
+}
+
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-static int
-nv10_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv10_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nv10_sw_cclass;
-	nv_engine(priv)->sclass = nv10_sw_sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
-}
-
-struct nvkm_oclass *
-nv10_sw_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(SW, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
+static const struct nvkm_sw_func
+nv10_sw = {
+	.chan_new = nv10_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV10 } },
+		{}
+	}
 };
+
+int
+nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&nv10_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 401fcd7..a381196 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -23,153 +23,98 @@
  */
 #include "nv50.h"
 
-#include <core/device.h>
-#include <core/handle.h>
-#include <core/namedb.h>
+#include <core/gpuobj.h>
 #include <engine/disp.h>
+#include <engine/fifo/chan.h>
 #include <subdev/bar.h>
 
 #include <nvif/event.h>
-
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-nv50_sw_mthd_dma_vblsem(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	struct nvkm_fifo_chan *fifo = (void *)nv_object(chan)->parent;
-	struct nvkm_handle *handle;
-	int ret = -EINVAL;
-
-	handle = nvkm_namedb_get(nv_namedb(fifo), *(u32 *)args);
-	if (!handle)
-		return -ENOENT;
-
-	if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-		struct nvkm_gpuobj *gpuobj = nv_gpuobj(handle->object);
-		chan->vblank.ctxdma = gpuobj->node->offset >> 4;
-		ret = 0;
-	}
-	nvkm_namedb_put(handle);
-	return ret;
-}
-
-static int
-nv50_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	chan->vblank.offset = *(u32 *)args;
-	return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_value(struct nvkm_object *object, u32 mthd,
-			  void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	chan->vblank.value = *(u32 *)args;
-	return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_release(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	u32 head = *(u32 *)args;
-	if (head >= nvkm_disp(chan)->vblank.index_nr)
-		return -EINVAL;
-
-	nvkm_notify_get(&chan->vblank.notify[head]);
-	return 0;
-}
-
-int
-nv50_sw_mthd_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv50_sw_omthds[] = {
-	{ 0x018c, 0x018c, nv50_sw_mthd_dma_vblsem },
-	{ 0x0400, 0x0400, nv50_sw_mthd_vblsem_offset },
-	{ 0x0404, 0x0404, nv50_sw_mthd_vblsem_value },
-	{ 0x0408, 0x0408, nv50_sw_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nv50_sw_mthd_flip },
-	{}
-};
-
-static struct nvkm_oclass
-nv50_sw_sclass[] = {
-	{ 0x506e, &nvkm_object_ofuncs, nv50_sw_omthds },
-	{}
-};
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
 static int
-nv50_sw_vblsem_release(struct nvkm_notify *notify)
+nv50_sw_chan_vblsem_release(struct nvkm_notify *notify)
 {
 	struct nv50_sw_chan *chan =
 		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_sw *sw = chan->base.sw;
+	struct nvkm_device *device = sw->engine.subdev.device;
 
-	nv_wr32(priv, 0x001704, chan->vblank.channel);
-	nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-	bar->flush(bar);
+	nvkm_wr32(device, 0x001704, chan->base.fifo->inst->addr >> 12);
+	nvkm_wr32(device, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+	nvkm_bar_flush(device->bar);
 
-	if (nv_device(priv)->chipset == 0x50) {
-		nv_wr32(priv, 0x001570, chan->vblank.offset);
-		nv_wr32(priv, 0x001574, chan->vblank.value);
+	if (device->chipset == 0x50) {
+		nvkm_wr32(device, 0x001570, chan->vblank.offset);
+		nvkm_wr32(device, 0x001574, chan->vblank.value);
 	} else {
-		nv_wr32(priv, 0x060010, chan->vblank.offset);
-		nv_wr32(priv, 0x060014, chan->vblank.value);
+		nvkm_wr32(device, 0x060010, chan->vblank.offset);
+		nvkm_wr32(device, 0x060014, chan->vblank.value);
 	}
 
 	return NVKM_NOTIFY_DROP;
 }
 
-void
-nv50_sw_context_dtor(struct nvkm_object *object)
+static bool
+nv50_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
 {
-	struct nv50_sw_chan *chan = (void *)object;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
-		nvkm_notify_fini(&chan->vblank.notify[i]);
-
-	nvkm_sw_context_destroy(&chan->base);
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	struct nvkm_engine *engine = chan->base.object.engine;
+	struct nvkm_device *device = engine->subdev.device;
+	switch (mthd) {
+	case 0x018c: chan->vblank.ctxdma = data; return true;
+	case 0x0400: chan->vblank.offset = data; return true;
+	case 0x0404: chan->vblank.value  = data; return true;
+	case 0x0408:
+		if (data < device->disp->vblank.index_nr) {
+			nvkm_notify_get(&chan->vblank.notify[data]);
+			return true;
+		}
+		break;
+	default:
+		break;
+	}
+	return false;
 }
 
-int
-nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+void *
+nv50_sw_chan_dtor(struct nvkm_sw_chan *base)
 {
-	struct nvkm_disp *pdisp = nvkm_disp(parent);
-	struct nv50_sw_cclass *pclass = (void *)oclass;
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	int i;
+	for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
+		nvkm_notify_fini(&chan->vblank.notify[i]);
+	return chan;
+}
+
+static const struct nvkm_sw_chan_func
+nv50_sw_chan = {
+	.dtor = nv50_sw_chan_dtor,
+	.mthd = nv50_sw_chan_mthd,
+};
+
+static int
+nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nvkm_disp *disp = sw->engine.subdev.device->disp;
 	struct nv50_sw_chan *chan;
 	int ret, i;
 
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_sw_chan_ctor(&nv50_sw_chan, sw, fifoch, oclass, &chan->base);
 	if (ret)
 		return ret;
 
-	for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
-		ret = nvkm_notify_init(NULL, &pdisp->vblank, pclass->vblank,
-				       false,
+	for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+		ret = nvkm_notify_init(NULL, &disp->vblank,
+				       nv50_sw_chan_vblsem_release, false,
 				       &(struct nvif_notify_head_req_v0) {
 					.head = i,
 				       },
@@ -180,55 +125,24 @@
 			return ret;
 	}
 
-	chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
 	return 0;
 }
 
-static struct nv50_sw_cclass
-nv50_sw_cclass = {
-	.base.handle = NV_ENGCTX(SW, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_context_ctor,
-		.dtor = nv50_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
-	.vblank = nv50_sw_vblsem_release,
-};
-
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
+static const struct nvkm_sw_func
+nv50_sw = {
+	.chan_new = nv50_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV50 } },
+		{}
+	}
+};
+
 int
-nv50_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
 {
-	struct nv50_sw_oclass *pclass = (void *)oclass;
-	struct nv50_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = pclass->cclass;
-	nv_engine(priv)->sclass = pclass->sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
+	return nvkm_sw_new_(&nv50_sw, device, index, psw);
 }
-
-struct nvkm_oclass *
-nv50_sw_oclass = &(struct nv50_sw_oclass) {
-	.base.handle = NV_ENGINE(SW, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
-	.cclass = &nv50_sw_cclass.base,
-	.sclass =  nv50_sw_sclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index d8adc11..25cdfde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,45 +1,20 @@
 #ifndef __NVKM_SW_NV50_H__
 #define __NVKM_SW_NV50_H__
-#include <engine/sw.h>
+#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 #include <core/notify.h>
 
-struct nv50_sw_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *cclass;
-	struct nvkm_oclass *sclass;
-};
-
-struct nv50_sw_priv {
-	struct nvkm_sw base;
-};
-
-int  nv50_sw_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-
-struct nv50_sw_cclass {
-	struct nvkm_oclass base;
-	int (*vblank)(struct nvkm_notify *);
-};
-
 struct nv50_sw_chan {
 	struct nvkm_sw_chan base;
 	struct {
 		struct nvkm_notify notify[4];
-		u32 channel;
 		u32 ctxdma;
 		u64 offset;
 		u32 value;
 	} vblank;
 };
 
-int  nv50_sw_context_ctor(struct nvkm_object *,
-				struct nvkm_object *,
-				struct nvkm_oclass *, void *, u32,
-				struct nvkm_object **);
-void nv50_sw_context_dtor(struct nvkm_object *);
-
-int nv50_sw_mthd_vblsem_value(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_vblsem_release(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_flip(struct nvkm_object *, u32, void *, u32);
+void *nv50_sw_chan_dtor(struct nvkm_sw_chan *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
new file mode 100644
index 0000000..66cf986
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nvsw.h"
+#include "chan.h"
+
+#include <nvif/class.h>
+
+static int
+nvkm_nvsw_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+	if (nvsw->func->mthd)
+		return nvsw->func->mthd(nvsw, mthd, data, size);
+	return -ENODEV;
+}
+
+static int
+nvkm_nvsw_ntfy_(struct nvkm_object *object, u32 mthd,
+		struct nvkm_event **pevent)
+{
+	struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+	switch (mthd) {
+	case NVSW_NTFY_UEVENT:
+		*pevent = &nvsw->chan->event;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_nvsw_ = {
+	.mthd = nvkm_nvsw_mthd_,
+	.ntfy = nvkm_nvsw_ntfy_,
+};
+
+int
+nvkm_nvsw_new_(const struct nvkm_nvsw_func *func, struct nvkm_sw_chan *chan,
+	       const struct nvkm_oclass *oclass, void *data, u32 size,
+	       struct nvkm_object **pobject)
+{
+	struct nvkm_nvsw *nvsw;
+
+	if (!(nvsw = kzalloc(sizeof(*nvsw), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &nvsw->object;
+
+	nvkm_object_ctor(&nvkm_nvsw_, oclass, &nvsw->object);
+	nvsw->func = func;
+	nvsw->chan = chan;
+	return 0;
+}
+
+static const struct nvkm_nvsw_func
+nvkm_nvsw = {
+};
+
+int
+nvkm_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+	      void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nvkm_nvsw_new_(&nvkm_nvsw, chan, oclass, data, size, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
new file mode 100644
index 0000000..943ef4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_NVSW_H__
+#define __NVKM_NVSW_H__
+#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
+#include "priv.h"
+
+struct nvkm_nvsw {
+	struct nvkm_object object;
+	const struct nvkm_nvsw_func *func;
+	struct nvkm_sw_chan *chan;
+};
+
+struct nvkm_nvsw_func {
+	int (*mthd)(struct nvkm_nvsw *, u32 mthd, void *data, u32 size);
+};
+
+int nvkm_nvsw_new_(const struct nvkm_nvsw_func *, struct nvkm_sw_chan *,
+		   const struct nvkm_oclass *, void *data, u32 size,
+		   struct nvkm_object **pobject);
+int nvkm_nvsw_new(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+		  void *data, u32 size, struct nvkm_object **pobject);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
new file mode 100644
index 0000000..0ef1318
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_SW_PRIV_H__
+#define __NVKM_SW_PRIV_H__
+#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
+#include <engine/sw.h>
+struct nvkm_sw_chan;
+
+int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
+		 int index, struct nvkm_sw **);
+
+struct nvkm_sw_chan_sclass {
+	int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
+
+struct nvkm_sw_func {
+	int (*chan_new)(struct nvkm_sw *, struct nvkm_fifo_chan *,
+			const struct nvkm_oclass *, struct nvkm_object **);
+	const struct nvkm_sw_chan_sclass sclass[];
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 45f4e18..4188c77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -22,72 +22,23 @@
  * Authors: Ben Skeggs, Ilia Mirkin
  */
 #include <engine/vp.h>
-#include <engine/xtensa.h>
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-/*******************************************************************************
- * VP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_sclass[] = {
-	{ 0x7476, &nvkm_object_ofuncs },
-	{},
+static const struct nvkm_xtensa_func
+g84_vp = {
+	.pmc_enable = 0x01020000,
+	.fifo_val = 0x111,
+	.unkd28 = 0x9c544,
+	.sclass = {
+		{ -1, -1, NV74_VP2 },
+		{}
+	}
 };
 
-/*******************************************************************************
- * PVP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_cclass = {
-	.handle = NV_ENGCTX(VP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_xtensa_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
-};
-
-/*******************************************************************************
- * PVP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_vp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	    struct nvkm_oclass *oclass, void *data, u32 size,
-	    struct nvkm_object **pobject)
+int
+g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
-	struct nvkm_xtensa *priv;
-	int ret;
-
-	ret = nvkm_xtensa_create(parent, engine, oclass, 0xf000, true,
-				 "PVP", "vp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x01020000;
-	nv_engine(priv)->cclass = &g84_vp_cclass;
-	nv_engine(priv)->sclass = g84_vp_sclass;
-	priv->fifo_val = 0x111;
-	priv->unkd28 = 0x9c544;
-	return 0;
+	return nvkm_xtensa_new_(&g84_vp, device, index,
+				true, 0x00f000, pengine);
 }
-
-struct nvkm_oclass
-g84_vp_oclass = {
-	.handle = NV_ENGINE(VP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_vp_ctor,
-		.dtor = _nvkm_xtensa_dtor,
-		.init = _nvkm_xtensa_init,
-		.fini = _nvkm_xtensa_fini,
-		.rd32 = _nvkm_xtensa_rd32,
-		.wr32 = _nvkm_xtensa_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index cea90df..a3d4f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -20,153 +20,173 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include <engine/xtensa.h>
-#include <core/device.h>
 
-#include <core/engctx.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
 
-u32
-_nvkm_xtensa_rd32(struct nvkm_object *object, u64 addr)
+static int
+nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
-	return nv_rd32(xtensa, xtensa->addr + addr);
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
+	int c = 0;
+
+	while (xtensa->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = xtensa->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
 }
 
-void
-_nvkm_xtensa_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static int
+nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+			int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
-	nv_wr32(xtensa, xtensa->addr + addr, data);
+	return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
+			       true, parent, pgpuobj);
 }
 
-int
-_nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			 struct nvkm_oclass *oclass, void *data, u32 size,
-			 struct nvkm_object **pobject)
-{
-	struct nvkm_engctx *engctx;
-	int ret;
+static const struct nvkm_object_func
+nvkm_xtensa_cclass = {
+	.bind = nvkm_xtensa_cclass_bind,
+};
 
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
-	*pobject = nv_object(engctx);
-	return ret;
-}
-
-void
-_nvkm_xtensa_intr(struct nvkm_subdev *subdev)
+static void
+nvkm_xtensa_intr(struct nvkm_engine *engine)
 {
-	struct nvkm_xtensa *xtensa = (void *)subdev;
-	u32 unk104 = nv_ro32(xtensa, 0xd04);
-	u32 intr = nv_ro32(xtensa, 0xc20);
-	u32 chan = nv_ro32(xtensa, 0xc28);
-	u32 unk10c = nv_ro32(xtensa, 0xd0c);
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = xtensa->addr;
+	u32 unk104 = nvkm_rd32(device, base + 0xd04);
+	u32 intr = nvkm_rd32(device, base + 0xc20);
+	u32 chan = nvkm_rd32(device, base + 0xc28);
+	u32 unk10c = nvkm_rd32(device, base + 0xd0c);
 
 	if (intr & 0x10)
-		nv_warn(xtensa, "Watchdog interrupt, engine hung.\n");
-	nv_wo32(xtensa, 0xc20, intr);
-	intr = nv_ro32(xtensa, 0xc20);
+		nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
+	nvkm_wr32(device, base + 0xc20, intr);
+	intr = nvkm_rd32(device, base + 0xc20);
 	if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
-		nv_debug(xtensa, "Enabling FIFO_CTRL\n");
-		nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val);
+		nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
+		nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
 	}
 }
 
-int
-nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 addr, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+static int
+nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_xtensa *xtensa;
-	int ret;
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_device *device = xtensa->engine.subdev.device;
+	const u32 base = xtensa->addr;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
-				  fname, length, pobject);
-	xtensa = *pobject;
-	if (ret)
-		return ret;
+	nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
+	nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
 
-	nv_subdev(xtensa)->intr = _nvkm_xtensa_intr;
-	xtensa->addr = addr;
+	if (!suspend)
+		nvkm_memory_del(&xtensa->gpu_fw);
 	return 0;
 }
 
-int
-_nvkm_xtensa_init(struct nvkm_object *object)
+static int
+nvkm_xtensa_init(struct nvkm_engine *engine)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_xtensa *xtensa = (void *)object;
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = xtensa->addr;
 	const struct firmware *fw;
 	char name[32];
 	int i, ret;
+	u64 addr, size;
 	u32 tmp;
 
-	ret = nvkm_engine_init(&xtensa->base);
-	if (ret)
-		return ret;
-
 	if (!xtensa->gpu_fw) {
 		snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
 			 xtensa->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_warn(xtensa, "unable to load firmware %s\n", name);
+			nvkm_warn(subdev, "unable to load firmware %s\n", name);
 			return ret;
 		}
 
 		if (fw->size > 0x40000) {
-			nv_warn(xtensa, "firmware %s too large\n", name);
+			nvkm_warn(subdev, "firmware %s too large\n", name);
 			release_firmware(fw);
 			return -EINVAL;
 		}
 
-		ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x40000, 0x1000, false,
 				      &xtensa->gpu_fw);
 		if (ret) {
 			release_firmware(fw);
 			return ret;
 		}
 
-		nv_debug(xtensa, "Loading firmware to address: 0x%llx\n",
-			 xtensa->gpu_fw->addr);
-
+		nvkm_kmap(xtensa->gpu_fw);
 		for (i = 0; i < fw->size / 4; i++)
-			nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+			nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+		nvkm_done(xtensa->gpu_fw);
 		release_firmware(fw);
 	}
 
-	nv_wo32(xtensa, 0xd10, 0x1fffffff); /* ?? */
-	nv_wo32(xtensa, 0xd08, 0x0fffffff); /* ?? */
+	addr = nvkm_memory_addr(xtensa->gpu_fw);
+	size = nvkm_memory_size(xtensa->gpu_fw);
 
-	nv_wo32(xtensa, 0xd28, xtensa->unkd28); /* ?? */
-	nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
-	nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+	nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
+	nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
 
-	nv_wo32(xtensa, 0xcc0, xtensa->gpu_fw->addr >> 8); /* XT_REGION_BASE */
-	nv_wo32(xtensa, 0xcc4, 0x1c); /* XT_REGION_SETUP */
-	nv_wo32(xtensa, 0xcc8, xtensa->gpu_fw->size >> 8); /* XT_REGION_LIMIT */
+	nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
+	nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+	nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 
-	tmp = nv_rd32(xtensa, 0x0);
-	nv_wo32(xtensa, 0xde0, tmp); /* SCRATCH_H2X */
+	nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
+	nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
+	nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
 
-	nv_wo32(xtensa, 0xce8, 0xf); /* XT_REGION_SETUP */
+	tmp = nvkm_rd32(device, 0x0);
+	nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
 
-	nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
-	nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+	nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
+
+	nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+	nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 	return 0;
 }
 
-int
-_nvkm_xtensa_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_xtensa_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
+	return nvkm_xtensa(engine);
+}
 
-	nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */
-	nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */
+static const struct nvkm_engine_func
+nvkm_xtensa = {
+	.dtor = nvkm_xtensa_dtor,
+	.init = nvkm_xtensa_init,
+	.fini = nvkm_xtensa_fini,
+	.intr = nvkm_xtensa_intr,
+	.fifo.sclass = nvkm_xtensa_oclass_get,
+	.cclass = &nvkm_xtensa_cclass,
+};
 
-	if (!suspend)
-		nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw);
+int
+nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
+		 struct nvkm_device *device, int index, bool enable,
+		 u32 addr, struct nvkm_engine **pengine)
+{
+	struct nvkm_xtensa *xtensa;
 
-	return nvkm_engine_fini(&xtensa->base, suspend);
+	if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
+		return -ENOMEM;
+	xtensa->func = func;
+	xtensa->addr = addr;
+	*pengine = &xtensa->engine;
+
+	return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable,
+				enable, &xtensa->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index a1bb3e4..ee2c38f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -13,6 +13,7 @@
 include $(src)/nvkm/subdev/mc/Kbuild
 include $(src)/nvkm/subdev/mmu/Kbuild
 include $(src)/nvkm/subdev/mxm/Kbuild
+include $(src)/nvkm/subdev/pci/Kbuild
 include $(src)/nvkm/subdev/pmu/Kbuild
 include $(src)/nvkm/subdev/therm/Kbuild
 include $(src)/nvkm/subdev/timer/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1ab554a..1e138b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -1,4 +1,5 @@
 nvkm-y += nvkm/subdev/bar/base.o
 nvkm-y += nvkm/subdev/bar/nv50.o
+nvkm-y += nvkm/subdev/bar/g84.o
 nvkm-y += nvkm/subdev/bar/gf100.o
 nvkm-y += nvkm/subdev/bar/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 3502d00..a9433ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -23,122 +23,61 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
+void
+nvkm_bar_flush(struct nvkm_bar *bar)
+{
+	if (bar && bar->func->flush)
+		bar->func->flush(bar);
+}
 
-struct nvkm_barobj {
-	struct nvkm_object base;
-	struct nvkm_vma vma;
-	void __iomem *iomem;
-};
+struct nvkm_vm *
+nvkm_bar_kmap(struct nvkm_bar *bar)
+{
+	/* disallow kmap() until after vm has been bootstrapped */
+	if (bar && bar->func->kmap && bar->subdev.oneinit)
+		return bar->func->kmap(bar);
+	return NULL;
+}
+
+int
+nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
+{
+	return bar->func->umap(bar, size, type, vma);
+}
 
 static int
-nvkm_barobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+nvkm_bar_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_bar *bar = nvkm_bar(device);
-	struct nvkm_mem *mem = data;
-	struct nvkm_barobj *barobj;
-	int ret;
-
-	ret = nvkm_object_create(parent, engine, oclass, 0, &barobj);
-	*pobject = nv_object(barobj);
-	if (ret)
-		return ret;
-
-	ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
-	if (ret)
-		return ret;
-
-	barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
-				(u32)barobj->vma.offset, mem->size << 12);
-	if (!barobj->iomem) {
-		nv_warn(bar, "PRAMIN ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	return 0;
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->oneinit(bar);
 }
 
-static void
-nvkm_barobj_dtor(struct nvkm_object *object)
+static int
+nvkm_bar_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bar *bar = nvkm_bar(object);
-	struct nvkm_barobj *barobj = (void *)object;
-	if (barobj->vma.node) {
-		if (barobj->iomem)
-			iounmap(barobj->iomem);
-		bar->unmap(bar, &barobj->vma);
-	}
-	nvkm_object_destroy(&barobj->base);
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->init(bar);
 }
 
-static u32
-nvkm_barobj_rd32(struct nvkm_object *object, u64 addr)
+static void *
+nvkm_bar_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_barobj *barobj = (void *)object;
-	return ioread32_native(barobj->iomem + addr);
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->dtor(bar);
 }
 
-static void
-nvkm_barobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_barobj *barobj = (void *)object;
-	iowrite32_native(data, barobj->iomem + addr);
-}
-
-static struct nvkm_oclass
-nvkm_barobj_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_barobj_ctor,
-		.dtor = nvkm_barobj_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-		.rd32 = nvkm_barobj_rd32,
-		.wr32 = nvkm_barobj_wr32,
-	},
+static const struct nvkm_subdev_func
+nvkm_bar = {
+	.dtor = nvkm_bar_dtor,
+	.oneinit = nvkm_bar_oneinit,
+	.init = nvkm_bar_init,
 };
 
-int
-nvkm_bar_alloc(struct nvkm_bar *bar, struct nvkm_object *parent,
-	       struct nvkm_mem *mem, struct nvkm_object **pobject)
-{
-	struct nvkm_object *gpuobj;
-	int ret = nvkm_object_ctor(parent, &parent->engine->subdev.object,
-				   &nvkm_barobj_oclass, mem, 0, &gpuobj);
-	if (ret == 0)
-		*pobject = gpuobj;
-	return ret;
-}
-
-int
-nvkm_bar_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	struct nvkm_bar *bar;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "BARCTL",
-				  "bar", length, pobject);
-	bar = *pobject;
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 void
-nvkm_bar_destroy(struct nvkm_bar *bar)
+nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_bar *bar)
 {
-	nvkm_subdev_destroy(&bar->base);
-}
-
-void
-_nvkm_bar_dtor(struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = (void *)object;
-	nvkm_bar_destroy(bar);
+	nvkm_subdev_ctor(&nvkm_bar, device, index, 0, &bar->subdev);
+	bar->func = func;
+	spin_lock_init(&bar->lock);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
new file mode 100644
index 0000000..ef71713
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+void
+g84_bar_flush(struct nvkm_bar *bar)
+{
+	struct nvkm_device *device = bar->subdev.device;
+	unsigned long flags;
+	spin_lock_irqsave(&bar->lock, flags);
+	nvkm_wr32(device, 0x070000, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x070000) & 0x00000002))
+			break;
+	);
+	spin_unlock_irqrestore(&bar->lock, flags);
+}
+
+static const struct nvkm_bar_func
+g84_bar_func = {
+	.dtor = nv50_bar_dtor,
+	.oneinit = nv50_bar_oneinit,
+	.init = nv50_bar_init,
+	.kmap = nv50_bar_kmap,
+	.umap = nv50_bar_umap,
+	.flush = g84_bar_flush,
+};
+
+int
+g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 12a1aeb..c794b2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -21,101 +21,60 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "gf100.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/mmu.h>
 
-struct gf100_bar_priv_vm {
-	struct nvkm_gpuobj *mem;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gf100_bar_priv {
-	struct nvkm_bar base;
-	spinlock_t lock;
-	struct gf100_bar_priv_vm bar[2];
-};
-
-static int
-gf100_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	       struct nvkm_vma *vma)
+static struct nvkm_vm *
+gf100_bar_kmap(struct nvkm_bar *base)
 {
-	struct gf100_bar_priv *priv = (void *)bar;
-	int ret;
+	return gf100_bar(base)->bar[0].vm;
+}
 
-	ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+int
+gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
+{
+	struct gf100_bar *bar = gf100_bar(base);
+	return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
 }
 
 static int
-gf100_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	       struct nvkm_vma *vma)
+gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
+		  struct lock_class_key *key, int bar_nr)
 {
-	struct gf100_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar[1].vm, mem->size << 12,
-			  mem->page_shift, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
-}
-
-static void
-gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
-{
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
-}
-
-static int
-gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
-		  int bar_nr)
-{
-	struct nvkm_device *device = nv_device(&priv->base);
+	struct nvkm_device *device = bar->base.subdev.device;
 	struct nvkm_vm *vm;
 	resource_size_t bar_len;
 	int ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
 			      &bar_vm->mem);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
-			      &bar_vm->pgd);
+	ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
 	if (ret)
 		return ret;
 
-	bar_len = nv_device_resource_len(device, bar_nr);
+	bar_len = device->func->resource_size(device, bar_nr);
 
-	ret = nvkm_vm_new(device, 0, bar_len, 0, &vm);
+	ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
 	/*
 	 * Bootstrap page table lookup.
 	 */
 	if (bar_nr == 3) {
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL,
-				      (bar_len >> 12) * 8, 0x1000,
-				      NVOBJ_FLAG_ZERO_ALLOC,
-				      &vm->pgt[0].obj[0]);
-		vm->pgt[0].refcount[0] = 1;
-		if (ret)
+		ret = nvkm_vm_boot(vm, bar_len);
+		if (ret) {
+			nvkm_vm_ref(NULL, &vm, NULL);
 			return ret;
+		}
 	}
 
 	ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
@@ -123,97 +82,101 @@
 	if (ret)
 		return ret;
 
-	nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
-	nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
-	nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
-	nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+	nvkm_kmap(bar_vm->mem);
+	nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
+	nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
+	nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
+	nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+	nvkm_done(bar_vm->mem);
 	return 0;
 }
 
 int
-gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gf100_bar_oneinit(struct nvkm_bar *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_bar_priv *priv;
-	bool has_bar3 = nv_device_resource_len(device, 3) != 0;
+	static struct lock_class_key bar1_lock;
+	static struct lock_class_key bar3_lock;
+	struct gf100_bar *bar = gf100_bar(base);
 	int ret;
 
-	ret = nvkm_bar_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
 	/* BAR3 */
-	if (has_bar3) {
-		ret = gf100_bar_ctor_vm(priv, &priv->bar[0], 3);
+	if (bar->base.func->kmap) {
+		ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
 		if (ret)
 			return ret;
 	}
 
 	/* BAR1 */
-	ret = gf100_bar_ctor_vm(priv, &priv->bar[1], 1);
+	ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
 	if (ret)
 		return ret;
 
-	if (has_bar3) {
-		priv->base.alloc = nvkm_bar_alloc;
-		priv->base.kmap = gf100_bar_kmap;
-	}
-	priv->base.umap = gf100_bar_umap;
-	priv->base.unmap = gf100_bar_unmap;
-	priv->base.flush = g84_bar_flush;
-	spin_lock_init(&priv->lock);
 	return 0;
 }
 
-void
-gf100_bar_dtor(struct nvkm_object *object)
-{
-	struct gf100_bar_priv *priv = (void *)object;
-
-	nvkm_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[1].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[1].mem);
-
-	if (priv->bar[0].vm) {
-		nvkm_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
-	}
-	nvkm_gpuobj_ref(NULL, &priv->bar[0].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[0].mem);
-
-	nvkm_bar_destroy(&priv->base);
-}
-
 int
-gf100_bar_init(struct nvkm_object *object)
+gf100_bar_init(struct nvkm_bar *base)
 {
-	struct gf100_bar_priv *priv = (void *)object;
-	int ret;
+	struct gf100_bar *bar = gf100_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	u32 addr;
 
-	ret = nvkm_bar_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
 
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
+	nvkm_wr32(device, 0x001704, 0x80000000 | addr);
 
-	nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
-	if (priv->bar[0].mem)
-		nv_wr32(priv, 0x001714,
-			0xc0000000 | priv->bar[0].mem->addr >> 12);
+	if (bar->bar[0].mem) {
+		addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
+		nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-gf100_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_bar_ctor,
-		.dtor = gf100_bar_dtor,
-		.init = gf100_bar_init,
-		.fini = _nvkm_bar_fini,
-	},
+void *
+gf100_bar_dtor(struct nvkm_bar *base)
+{
+	struct gf100_bar *bar = gf100_bar(base);
+
+	nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
+	nvkm_gpuobj_del(&bar->bar[1].pgd);
+	nvkm_memory_del(&bar->bar[1].mem);
+
+	if (bar->bar[0].vm) {
+		nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
+	}
+	nvkm_gpuobj_del(&bar->bar[0].pgd);
+	nvkm_memory_del(&bar->bar[0].mem);
+	return bar;
+}
+
+int
+gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_bar **pbar)
+{
+	struct gf100_bar *bar;
+	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_bar_ctor(func, device, index, &bar->base);
+	*pbar = &bar->base;
+	return 0;
+}
+
+static const struct nvkm_bar_func
+gf100_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.init = gf100_bar_init,
+	.kmap = gf100_bar_kmap,
+	.umap = gf100_bar_umap,
+	.flush = g84_bar_flush,
 };
+
+int
+gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
new file mode 100644
index 0000000..f7dea69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -0,0 +1,23 @@
+#ifndef __GF100_BAR_H__
+#define __GF100_BAR_H__
+#define gf100_bar(p) container_of((p), struct gf100_bar, base)
+#include "priv.h"
+
+struct gf100_bar_vm {
+	struct nvkm_memory *mem;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+};
+
+struct gf100_bar {
+	struct nvkm_bar base;
+	struct gf100_bar_vm bar[2];
+};
+
+int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+		   int, struct nvkm_bar **);
+void *gf100_bar_dtor(struct nvkm_bar *);
+int gf100_bar_oneinit(struct nvkm_bar *);
+int gf100_bar_init(struct nvkm_bar *);
+int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 148f739..9232fab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -19,32 +19,22 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include "priv.h"
+#include "gf100.h"
+
+static const struct nvkm_bar_func
+gk20a_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.init = gf100_bar_init,
+	.umap = gf100_bar_umap,
+	.flush = g84_bar_flush,
+};
 
 int
-gk20a_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
 {
-	struct nvkm_bar *bar;
-	int ret;
-
-	ret = gf100_bar_ctor(parent, engine, oclass, data, size, pobject);
-	if (ret)
-		return ret;
-
-	bar = (struct nvkm_bar *)*pobject;
-	bar->iomap_uncached = true;
-	return 0;
+	int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
+	if (ret == 0)
+		(*pbar)->iomap_uncached = true;
+	return ret;
 }
-
-struct nvkm_oclass
-gk20a_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_bar_ctor,
-		.dtor = gf100_bar_dtor,
-		.init = gf100_bar_init,
-		.fini = _nvkm_bar_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 8548adb..370dcd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -21,251 +21,196 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "nv50.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/mmu.h>
 #include <subdev/timer.h>
 
-struct nv50_bar_priv {
-	struct nvkm_bar base;
-	spinlock_t lock;
-	struct nvkm_gpuobj *mem;
-	struct nvkm_gpuobj *pad;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *bar1_vm;
-	struct nvkm_gpuobj *bar1;
-	struct nvkm_vm *bar3_vm;
-	struct nvkm_gpuobj *bar3;
-};
-
-static int
-nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	      struct nvkm_vma *vma)
+struct nvkm_vm *
+nv50_bar_kmap(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+	return nv50_bar(base)->bar3_vm;
 }
 
-static int
-nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	      struct nvkm_vma *vma)
+int
+nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+	struct nv50_bar *bar = nv50_bar(base);
+	return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
 }
 
 static void
-nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
+nv50_bar_flush(struct nvkm_bar *base)
 {
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
-}
-
-static void
-nv50_bar_flush(struct nvkm_bar *bar)
-{
-	struct nv50_bar_priv *priv = (void *)bar;
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
 	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_wr32(priv, 0x00330c, 0x00000001);
-	if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
-		nv_warn(priv, "flush timeout\n");
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&bar->base.lock, flags);
+	nvkm_wr32(device, 0x00330c, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
+			break;
+	);
+	spin_unlock_irqrestore(&bar->base.lock, flags);
 }
 
-void
-g84_bar_flush(struct nvkm_bar *bar)
+int
+nv50_bar_oneinit(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
-	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_wr32(bar, 0x070000, 0x00000001);
-	if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
-		nv_warn(priv, "flush timeout\n");
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static int
-nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_object *heap;
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	static struct lock_class_key bar1_lock;
+	static struct lock_class_key bar3_lock;
 	struct nvkm_vm *vm;
-	struct nv50_bar_priv *priv;
 	u64 start, limit;
 	int ret;
 
-	ret = nvkm_bar_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
-			      NVOBJ_FLAG_HEAP, &priv->mem);
-	heap = nv_object(priv->mem);
+	ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
+			      &bar->pad);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap,
-			      (device->chipset == 0x50) ? 0x1400 : 0x0200,
-			      0, 0, &priv->pad);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd);
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
 	if (ret)
 		return ret;
 
 	/* BAR3 */
 	start = 0x0100000000ULL;
-	limit = start + nv_device_resource_len(device, 3);
+	limit = start + device->func->resource_size(device, 3);
 
-	ret = nvkm_vm_new(device, start, limit, start, &vm);
+	ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap,
-			      ((limit-- - start) >> 12) * 8, 0x1000,
-			      NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
-	vm->pgt[0].refcount[0] = 1;
+	ret = nvkm_vm_boot(vm, limit-- - start);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd);
+	ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
 	nvkm_vm_ref(NULL, &vm, NULL);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
+	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
 	if (ret)
 		return ret;
 
-	nv_wo32(priv->bar3, 0x00, 0x7fc00000);
-	nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
-	nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
-	nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
-				  upper_32_bits(start));
-	nv_wo32(priv->bar3, 0x10, 0x00000000);
-	nv_wo32(priv->bar3, 0x14, 0x00000000);
+	nvkm_kmap(bar->bar3);
+	nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
+	nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
+	nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
+	nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
+				   upper_32_bits(start));
+	nvkm_wo32(bar->bar3, 0x10, 0x00000000);
+	nvkm_wo32(bar->bar3, 0x14, 0x00000000);
+	nvkm_done(bar->bar3);
 
 	/* BAR1 */
 	start = 0x0000000000ULL;
-	limit = start + nv_device_resource_len(device, 1);
+	limit = start + device->func->resource_size(device, 1);
 
-	ret = nvkm_vm_new(device, start, limit--, start, &vm);
+	ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
-	ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd);
+	ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
 	nvkm_vm_ref(NULL, &vm, NULL);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
+	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
 	if (ret)
 		return ret;
 
-	nv_wo32(priv->bar1, 0x00, 0x7fc00000);
-	nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
-	nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
-	nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
-				  upper_32_bits(start));
-	nv_wo32(priv->bar1, 0x10, 0x00000000);
-	nv_wo32(priv->bar1, 0x14, 0x00000000);
-
-	priv->base.alloc = nvkm_bar_alloc;
-	priv->base.kmap = nv50_bar_kmap;
-	priv->base.umap = nv50_bar_umap;
-	priv->base.unmap = nv50_bar_unmap;
-	if (device->chipset == 0x50)
-		priv->base.flush = nv50_bar_flush;
-	else
-		priv->base.flush = g84_bar_flush;
-	spin_lock_init(&priv->lock);
+	nvkm_kmap(bar->bar1);
+	nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
+	nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
+	nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
+	nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
+				   upper_32_bits(start));
+	nvkm_wo32(bar->bar1, 0x10, 0x00000000);
+	nvkm_wo32(bar->bar1, 0x14, 0x00000000);
+	nvkm_done(bar->bar1);
 	return 0;
 }
 
-static void
-nv50_bar_dtor(struct nvkm_object *object)
+int
+nv50_bar_init(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->bar1);
-	nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar3);
-	if (priv->bar3_vm) {
-		nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
-	}
-	nvkm_gpuobj_ref(NULL, &priv->pgd);
-	nvkm_gpuobj_ref(NULL, &priv->pad);
-	nvkm_gpuobj_ref(NULL, &priv->mem);
-	nvkm_bar_destroy(&priv->base);
-}
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	int i;
 
-static int
-nv50_bar_init(struct nvkm_object *object)
-{
-	struct nv50_bar_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_bar_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x100c80, 0x00060001);
-	if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) {
-		nv_error(priv, "vm flush timeout\n");
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x100c80, 0x00060001);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+			break;
+	) < 0)
 		return -EBUSY;
-	}
 
-	nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
-	nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
-	nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
-	nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
+	nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
+	nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x001900 + (i * 4), 0x00000000);
+		nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
 	return 0;
 }
 
-static int
-nv50_bar_fini(struct nvkm_object *object, bool suspend)
+void *
+nv50_bar_dtor(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)object;
-	return nvkm_bar_fini(&priv->base, suspend);
+	struct nv50_bar *bar = nv50_bar(base);
+	nvkm_gpuobj_del(&bar->bar1);
+	nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
+	nvkm_gpuobj_del(&bar->bar3);
+	if (bar->bar3_vm) {
+		nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
+	}
+	nvkm_gpuobj_del(&bar->pgd);
+	nvkm_gpuobj_del(&bar->pad);
+	nvkm_gpuobj_del(&bar->mem);
+	return bar;
 }
 
-struct nvkm_oclass
-nv50_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_bar_ctor,
-		.dtor = nv50_bar_dtor,
-		.init = nv50_bar_init,
-		.fini = nv50_bar_fini,
-	},
+int
+nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	      int index, u32 pgd_addr, struct nvkm_bar **pbar)
+{
+	struct nv50_bar *bar;
+	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_bar_ctor(func, device, index, &bar->base);
+	bar->pgd_addr = pgd_addr;
+	*pbar = &bar->base;
+	return 0;
+}
+
+static const struct nvkm_bar_func
+nv50_bar_func = {
+	.dtor = nv50_bar_dtor,
+	.oneinit = nv50_bar_oneinit,
+	.init = nv50_bar_init,
+	.kmap = nv50_bar_kmap,
+	.umap = nv50_bar_umap,
+	.flush = nv50_bar_flush,
 };
+
+int
+nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
new file mode 100644
index 0000000..1eb764f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -0,0 +1,26 @@
+#ifndef __NV50_BAR_H__
+#define __NV50_BAR_H__
+#define nv50_bar(p) container_of((p), struct nv50_bar, base)
+#include "priv.h"
+
+struct nv50_bar {
+	struct nvkm_bar base;
+	u32 pgd_addr;
+	struct nvkm_gpuobj *mem;
+	struct nvkm_gpuobj *pad;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *bar1_vm;
+	struct nvkm_gpuobj *bar1;
+	struct nvkm_vm *bar3_vm;
+	struct nvkm_gpuobj *bar3;
+};
+
+int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+		  int, u32 pgd_addr, struct nvkm_bar **);
+void *nv50_bar_dtor(struct nvkm_bar *);
+int nv50_bar_oneinit(struct nvkm_bar *);
+int nv50_bar_init(struct nvkm_bar *);
+struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
+int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index aa85f61..d834ef2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,30 +1,19 @@
 #ifndef __NVKM_BAR_PRIV_H__
 #define __NVKM_BAR_PRIV_H__
+#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
 #include <subdev/bar.h>
 
-#define nvkm_bar_create(p,e,o,d)                                            \
-	nvkm_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_bar_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_bar_fini(p,s)                                                  \
-	nvkm_subdev_fini(&(p)->base, (s))
+void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
+		   int, struct nvkm_bar *);
 
-int nvkm_bar_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-void nvkm_bar_destroy(struct nvkm_bar *);
-
-void _nvkm_bar_dtor(struct nvkm_object *);
-#define _nvkm_bar_init _nvkm_subdev_init
-#define _nvkm_bar_fini _nvkm_subdev_fini
-
-int  nvkm_bar_alloc(struct nvkm_bar *, struct nvkm_object *,
-		    struct nvkm_mem *, struct nvkm_object **);
+struct nvkm_bar_func {
+	void *(*dtor)(struct nvkm_bar *);
+	int (*oneinit)(struct nvkm_bar *);
+	int (*init)(struct nvkm_bar *);
+	struct nvkm_vm *(*kmap)(struct nvkm_bar *);
+	int  (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
+	void (*flush)(struct nvkm_bar *);
+};
 
 void g84_bar_flush(struct nvkm_bar *);
-
-int gf100_bar_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void gf100_bar_dtor(struct nvkm_object *);
-int gf100_bar_init(struct nvkm_object *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
index 08eb03f..43f0ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
@@ -33,14 +33,14 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x04)
-			data = nv_ro16(bios, bit_M.offset + 0x03);
+			data = nvbios_rd16(bios, bit_M.offset + 0x03);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*cnt = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*cnt = nvbios_rd08(bios, data + 0x03);
 				return data;
 			default:
 				break;
@@ -59,8 +59,8 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type    = nv_ro08(bios, data + 0x04);
-		info->pointer = nv_ro16(bios, data + 0x05);
+		info->type    = nvbios_rd08(bios, data + 0x04);
+		info->pointer = nvbios_rd16(bios, data + 0x05);
 		break;
 	default:
 		break;
@@ -89,9 +89,9 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type  = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
-		info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
-		info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
+		info->type  = (nvbios_rd08(bios, data + 0x00) & 0x0f) >> 0;
+		info->strap = (nvbios_rd08(bios, data + 0x00) & 0xf0) >> 4;
+		info->group = (nvbios_rd08(bios, data + 0x01) & 0x0f) >> 0;
 		return data;
 	default:
 		break;
@@ -103,12 +103,13 @@
 nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
 	       struct nvbios_M0203E *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_M0203T M0203T;
 	u8  cnt, len, idx = 0xff;
 	u32 data;
 
 	if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
-		nv_warn(bios, "M0203T not found\n");
+		nvkm_warn(subdev, "M0203T not found\n");
 		return 0x00000000;
 	}
 
@@ -119,7 +120,7 @@
 				continue;
 			return data;
 		default:
-			nv_warn(bios, "M0203T type %02x\n", M0203T.type);
+			nvkm_warn(subdev, "M0203T type %02x\n", M0203T.type);
 			return 0x00000000;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
index e1a8ad5..293a6af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
@@ -34,16 +34,16 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x08)
-			data = nv_ro32(bios, bit_M.offset + 0x05);
+			data = nvbios_rd32(bios, bit_M.offset + 0x05);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*ssz = nv_ro08(bios, data + 0x03);
-				*snr = nv_ro08(bios, data + 0x04);
-				*cnt = nv_ro08(bios, data + 0x05);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*ssz = nvbios_rd08(bios, data + 0x03);
+				*snr = nvbios_rd08(bios, data + 0x04);
+				*cnt = nvbios_rd08(bios, data + 0x05);
 				return data;
 			default:
 				break;
@@ -63,7 +63,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->freq = nv_ro16(bios, data + 0x06);
+		info->freq = nvbios_rd16(bios, data + 0x06);
 		break;
 	default:
 		break;
@@ -96,7 +96,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type = nv_ro08(bios, data + 0x00) & 0x0f;
+		info->type = nvbios_rd08(bios, data + 0x00) & 0x0f;
 		return data;
 	default:
 		break;
@@ -126,7 +126,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro08(bios, data + 0x00);
+		info->data = nvbios_rd08(bios, data + 0x00);
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
index 3026920..95d49a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
@@ -34,16 +34,16 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x0c)
-			data = nv_ro32(bios, bit_M.offset + 0x09);
+			data = nvbios_rd32(bios, bit_M.offset + 0x09);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*ssz = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*ssz = nvbios_rd08(bios, data + 0x03);
 				*snr = 1;
-				*cnt = nv_ro08(bios, data + 0x04);
+				*cnt = nvbios_rd08(bios, data + 0x04);
 				return data;
 			default:
 				break;
@@ -78,12 +78,12 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->v00_40 = (nv_ro08(bios, data + 0x00) & 0x40) >> 6;
-		info->bits   =  nv_ro08(bios, data + 0x00) & 0x3f;
-		info->modulo =  nv_ro08(bios, data + 0x01);
-		info->v02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		info->v02_07 =  nv_ro08(bios, data + 0x02) & 0x07;
-		info->v03    =  nv_ro08(bios, data + 0x03);
+		info->v00_40 = (nvbios_rd08(bios, data + 0x00) & 0x40) >> 6;
+		info->bits   =  nvbios_rd08(bios, data + 0x00) & 0x3f;
+		info->modulo =  nvbios_rd08(bios, data + 0x01);
+		info->v02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		info->v02_07 =  nvbios_rd08(bios, data + 0x02) & 0x07;
+		info->v03    =  nvbios_rd08(bios, data + 0x03);
 		return data;
 	default:
 		break;
@@ -122,7 +122,7 @@
 				u32 mask = (1ULL << M0209E.bits) - 1;
 				u16  off = bits / 8;
 				u8   mod = bits % 8;
-				info->data[i] = nv_ro32(bios, data + off);
+				info->data[i] = nvbios_rd32(bios, data + off);
 				info->data[i] = info->data[i] >> mod;
 				info->data[i] = info->data[i] & mask;
 			}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
index b72edcf..3f7db3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
@@ -34,15 +34,15 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2 && bit_P.length > 0x63)
-			data = nv_ro32(bios, bit_P.offset + 0x60);
+			data = nvbios_rd32(bios, bit_P.offset + 0x60);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
+			*ver = nvbios_rd08(bios, data + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 1);
-				*cnt = nv_ro08(bios, data + 2);
+				*hdr = nvbios_rd08(bios, data + 1);
+				*cnt = nvbios_rd08(bios, data + 2);
 				*len = 4;
-				*xnr = nv_ro08(bios, data + 3);
+				*xnr = nvbios_rd08(bios, data + 3);
 				*xsz = 4;
 				return data;
 			default:
@@ -72,7 +72,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro32(bios, data);
+		info->data = nvbios_rd32(bios, data);
 		return data;
 	default:
 		break;
@@ -98,7 +98,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro32(bios, data);
+		info->data = nvbios_rd32(bios, data);
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 8db204f..7953689 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -53,6 +53,20 @@
 }
 
 int
+nvbios_memcmp(struct nvkm_bios *bios, u32 addr, const char *str, u32 len)
+{
+	unsigned char c1, c2;
+
+	while (len--) {
+		c1 = nvbios_rd08(bios, addr++);
+		c2 = *(str++);
+		if (c1 != c2)
+			return c1 - c2;
+	}
+	return 0;
+}
+
+int
 nvbios_extend(struct nvkm_bios *bios, u32 length)
 {
 	if (bios->size < length) {
@@ -69,62 +83,29 @@
 	return 0;
 }
 
-static u8
-nvkm_bios_rd08(struct nvkm_object *object, u64 addr)
+static void *
+nvkm_bios_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bios *bios = (void *)object;
-	return bios->data[addr];
+	struct nvkm_bios *bios = nvkm_bios(subdev);
+	kfree(bios->data);
+	return bios;
 }
 
-static u16
-nvkm_bios_rd16(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return get_unaligned_le16(&bios->data[addr]);
-}
+static const struct nvkm_subdev_func
+nvkm_bios = {
+	.dtor = nvkm_bios_dtor,
+};
 
-static u32
-nvkm_bios_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return get_unaligned_le32(&bios->data[addr]);
-}
-
-static void
-nvkm_bios_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	bios->data[addr] = data;
-}
-
-static void
-nvkm_bios_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	put_unaligned_le16(data, &bios->data[addr]);
-}
-
-static void
-nvkm_bios_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	put_unaligned_le32(data, &bios->data[addr]);
-}
-
-static int
-nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
 {
 	struct nvkm_bios *bios;
 	struct bit_entry bit_i;
 	int ret;
 
-	ret = nvkm_subdev_create(parent, engine, oclass, 0,
-				 "VBIOS", "bios", &bios);
-	*pobject = nv_object(bios);
-	if (ret)
-		return ret;
+	if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_bios, device, index, 0, &bios->subdev);
 
 	ret = nvbios_shadow(bios);
 	if (ret)
@@ -134,73 +115,33 @@
 	bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
 					  "\xff\x7f""NV\0", 5);
 	if (bios->bmp_offset) {
-		nv_info(bios, "BMP version %x.%x\n",
-			bmp_version(bios) >> 8,
-			bmp_version(bios) & 0xff);
+		nvkm_debug(&bios->subdev, "BMP version %x.%x\n",
+			   bmp_version(bios) >> 8,
+			   bmp_version(bios) & 0xff);
 	}
 
 	bios->bit_offset = nvbios_findstr(bios->data, bios->size,
 					  "\xff\xb8""BIT", 5);
 	if (bios->bit_offset)
-		nv_info(bios, "BIT signature found\n");
+		nvkm_debug(&bios->subdev, "BIT signature found\n");
 
 	/* determine the vbios version number */
 	if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
-		bios->version.major = nv_ro08(bios, bit_i.offset + 3);
-		bios->version.chip  = nv_ro08(bios, bit_i.offset + 2);
-		bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
-		bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
-		bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
+		bios->version.major = nvbios_rd08(bios, bit_i.offset + 3);
+		bios->version.chip  = nvbios_rd08(bios, bit_i.offset + 2);
+		bios->version.minor = nvbios_rd08(bios, bit_i.offset + 1);
+		bios->version.micro = nvbios_rd08(bios, bit_i.offset + 0);
+		bios->version.patch = nvbios_rd08(bios, bit_i.offset + 4);
 	} else
 	if (bmp_version(bios)) {
-		bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
-		bios->version.chip  = nv_ro08(bios, bios->bmp_offset + 12);
-		bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
-		bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
+		bios->version.major = nvbios_rd08(bios, bios->bmp_offset + 13);
+		bios->version.chip  = nvbios_rd08(bios, bios->bmp_offset + 12);
+		bios->version.minor = nvbios_rd08(bios, bios->bmp_offset + 11);
+		bios->version.micro = nvbios_rd08(bios, bios->bmp_offset + 10);
 	}
 
-	nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
-		bios->version.major, bios->version.chip,
-		bios->version.minor, bios->version.micro, bios->version.patch);
-
+	nvkm_info(&bios->subdev, "version %02x.%02x.%02x.%02x.%02x\n",
+		  bios->version.major, bios->version.chip,
+		  bios->version.minor, bios->version.micro, bios->version.patch);
 	return 0;
 }
-
-static void
-nvkm_bios_dtor(struct nvkm_object *object)
-{
-	struct nvkm_bios *bios = (void *)object;
-	kfree(bios->data);
-	nvkm_subdev_destroy(&bios->base);
-}
-
-static int
-nvkm_bios_init(struct nvkm_object *object)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return nvkm_subdev_init(&bios->base);
-}
-
-static int
-nvkm_bios_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return nvkm_subdev_fini(&bios->base, suspend);
-}
-
-struct nvkm_oclass
-nvkm_bios_oclass = {
-	.handle = NV_SUBDEV(VBIOS, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_bios_ctor,
-		.dtor = nvkm_bios_dtor,
-		.init = nvkm_bios_init,
-		.fini = nvkm_bios_fini,
-		.rd08 = nvkm_bios_rd08,
-		.rd16 = nvkm_bios_rd16,
-		.rd32 = nvkm_bios_rd32,
-		.wr08 = nvkm_bios_wr08,
-		.wr16 = nvkm_bios_wr16,
-		.wr32 = nvkm_bios_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
index eab54049..070ff33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
@@ -28,18 +28,18 @@
 bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit)
 {
 	if (likely(bios->bit_offset)) {
-		u8  entries = nv_ro08(bios, bios->bit_offset + 10);
+		u8  entries = nvbios_rd08(bios, bios->bit_offset + 10);
 		u32 entry   = bios->bit_offset + 12;
 		while (entries--) {
-			if (nv_ro08(bios, entry + 0) == id) {
-				bit->id      = nv_ro08(bios, entry + 0);
-				bit->version = nv_ro08(bios, entry + 1);
-				bit->length  = nv_ro16(bios, entry + 2);
-				bit->offset  = nv_ro16(bios, entry + 4);
+			if (nvbios_rd08(bios, entry + 0) == id) {
+				bit->id      = nvbios_rd08(bios, entry + 0);
+				bit->version = nvbios_rd08(bios, entry + 1);
+				bit->length  = nvbios_rd16(bios, entry + 2);
+				bit->offset  = nvbios_rd16(bios, entry + 4);
 				return 0;
 			}
 
-			entry += nv_ro08(bios, bios->bit_offset + 9);
+			entry += nvbios_rd08(bios, bios->bit_offset + 9);
 		}
 
 		return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 12e9585..3756ec9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -34,17 +34,17 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			boost = nv_ro16(bios, bit_P.offset + 0x30);
+			boost = nvbios_rd16(bios, bit_P.offset + 0x30);
 
 		if (boost) {
-			*ver = nv_ro08(bios, boost + 0);
+			*ver = nvbios_rd08(bios, boost + 0);
 			switch (*ver) {
 			case 0x11:
-				*hdr = nv_ro08(bios, boost + 1);
-				*cnt = nv_ro08(bios, boost + 5);
-				*len = nv_ro08(bios, boost + 2);
-				*snr = nv_ro08(bios, boost + 4);
-				*ssz = nv_ro08(bios, boost + 3);
+				*hdr = nvbios_rd08(bios, boost + 1);
+				*cnt = nvbios_rd08(bios, boost + 5);
+				*len = nvbios_rd08(bios, boost + 2);
+				*snr = nvbios_rd08(bios, boost + 4);
+				*ssz = nvbios_rd08(bios, boost + 3);
 				return boost;
 			default:
 				break;
@@ -78,9 +78,9 @@
 	u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
-		info->min    =  nv_ro16(bios, data + 0x02) * 1000;
-		info->max    =  nv_ro16(bios, data + 0x04) * 1000;
+		info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->min    =  nvbios_rd16(bios, data + 0x02) * 1000;
+		info->max    =  nvbios_rd16(bios, data + 0x04) * 1000;
 	}
 	return data;
 }
@@ -117,10 +117,10 @@
 	data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->domain  = nv_ro08(bios, data + 0x00);
-		info->percent = nv_ro08(bios, data + 0x01);
-		info->min     = nv_ro16(bios, data + 0x02) * 1000;
-		info->max     = nv_ro16(bios, data + 0x04) * 1000;
+		info->domain  = nvbios_rd08(bios, data + 0x00);
+		info->percent = nvbios_rd08(bios, data + 0x01);
+		info->min     = nvbios_rd16(bios, data + 0x02) * 1000;
+		info->max     = nvbios_rd16(bios, data + 0x04) * 1000;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
index 706a165..2768234 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
@@ -30,12 +30,12 @@
 {
 	u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
-		u32 data = nv_ro16(bios, dcb + 0x14);
+		u32 data = nvbios_rd16(bios, dcb + 0x14);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
-			*hdr = nv_ro08(bios, data + 1);
-			*cnt = nv_ro08(bios, data + 2);
-			*len = nv_ro08(bios, data + 3);
+			*ver = nvbios_rd08(bios, data + 0);
+			*hdr = nvbios_rd08(bios, data + 1);
+			*cnt = nvbios_rd08(bios, data + 2);
+			*len = nvbios_rd08(bios, data + 3);
 			return data;
 		}
 	}
@@ -77,18 +77,18 @@
 	switch (!!data * *ver) {
 	case 0x30:
 	case 0x40:
-		info->type     =  nv_ro08(bios, data + 0x00);
-		info->location =  nv_ro08(bios, data + 0x01) & 0x0f;
-		info->hpd      = (nv_ro08(bios, data + 0x01) & 0x30) >> 4;
-		info->dp       = (nv_ro08(bios, data + 0x01) & 0xc0) >> 6;
+		info->type     =  nvbios_rd08(bios, data + 0x00);
+		info->location =  nvbios_rd08(bios, data + 0x01) & 0x0f;
+		info->hpd      = (nvbios_rd08(bios, data + 0x01) & 0x30) >> 4;
+		info->dp       = (nvbios_rd08(bios, data + 0x01) & 0xc0) >> 6;
 		if (*len < 4)
 			return data;
-		info->hpd     |= (nv_ro08(bios, data + 0x02) & 0x03) << 2;
-		info->dp      |=  nv_ro08(bios, data + 0x02) & 0x0c;
-		info->di       = (nv_ro08(bios, data + 0x02) & 0xf0) >> 4;
-		info->hpd     |= (nv_ro08(bios, data + 0x03) & 0x07) << 4;
-		info->sr       = (nv_ro08(bios, data + 0x03) & 0x08) >> 3;
-		info->lcdid    = (nv_ro08(bios, data + 0x03) & 0x70) >> 4;
+		info->hpd     |= (nvbios_rd08(bios, data + 0x02) & 0x03) << 2;
+		info->dp      |=  nvbios_rd08(bios, data + 0x02) & 0x0c;
+		info->di       = (nvbios_rd08(bios, data + 0x02) & 0xf0) >> 4;
+		info->hpd     |= (nvbios_rd08(bios, data + 0x03) & 0x07) << 4;
+		info->sr       = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+		info->lcdid    = (nvbios_rd08(bios, data + 0x03) & 0x70) >> 4;
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 16f7ad8..32e0162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -34,17 +34,17 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			cstep = nv_ro16(bios, bit_P.offset + 0x34);
+			cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
 
 		if (cstep) {
-			*ver = nv_ro08(bios, cstep + 0);
+			*ver = nvbios_rd08(bios, cstep + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, cstep + 1);
-				*cnt = nv_ro08(bios, cstep + 3);
-				*len = nv_ro08(bios, cstep + 2);
-				*xnr = nv_ro08(bios, cstep + 5);
-				*xsz = nv_ro08(bios, cstep + 4);
+				*hdr = nvbios_rd08(bios, cstep + 1);
+				*cnt = nvbios_rd08(bios, cstep + 3);
+				*len = nvbios_rd08(bios, cstep + 2);
+				*xnr = nvbios_rd08(bios, cstep + 5);
+				*xsz = nvbios_rd08(bios, cstep + 4);
 				return cstep;
 			default:
 				break;
@@ -75,8 +75,8 @@
 	u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
-		info->index   = nv_ro08(bios, data + 0x03);
+		info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->index   = nvbios_rd08(bios, data + 0x03);
 	}
 	return data;
 }
@@ -113,10 +113,10 @@
 	u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->freq    = nv_ro16(bios, data + 0x00) * 1000;
-		info->unkn[0] = nv_ro08(bios, data + 0x02);
-		info->unkn[1] = nv_ro08(bios, data + 0x03);
-		info->voltage = nv_ro08(bios, data + 0x04);
+		info->freq    = nvbios_rd16(bios, data + 0x00) * 1000;
+		info->unkn[0] = nvbios_rd08(bios, data + 0x02);
+		info->unkn[1] = nvbios_rd08(bios, data + 0x03);
+		info->voltage = nvbios_rd08(bios, data + 0x04);
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index 8d78140..8304b80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -24,38 +24,37 @@
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
-#include <core/device.h>
-
 u16
 dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-	struct nvkm_device *device = nv_device(bios);
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	u16 dcb = 0x0000;
 
 	if (device->card_type > NV_04)
-		dcb = nv_ro16(bios, 0x36);
+		dcb = nvbios_rd16(bios, 0x36);
 	if (!dcb) {
-		nv_warn(bios, "DCB table not found\n");
+		nvkm_warn(subdev, "DCB table not found\n");
 		return dcb;
 	}
 
-	*ver = nv_ro08(bios, dcb);
+	*ver = nvbios_rd08(bios, dcb);
 
 	if (*ver >= 0x42) {
-		nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
+		nvkm_warn(subdev, "DCB version 0x%02x unknown\n", *ver);
 		return 0x0000;
 	} else
 	if (*ver >= 0x30) {
-		if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
-			*hdr = nv_ro08(bios, dcb + 1);
-			*cnt = nv_ro08(bios, dcb + 2);
-			*len = nv_ro08(bios, dcb + 3);
+		if (nvbios_rd32(bios, dcb + 6) == 0x4edcbdcb) {
+			*hdr = nvbios_rd08(bios, dcb + 1);
+			*cnt = nvbios_rd08(bios, dcb + 2);
+			*len = nvbios_rd08(bios, dcb + 3);
 			return dcb;
 		}
 	} else
 	if (*ver >= 0x20) {
-		if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
-			u16 i2c = nv_ro16(bios, dcb + 2);
+		if (nvbios_rd32(bios, dcb + 4) == 0x4edcbdcb) {
+			u16 i2c = nvbios_rd16(bios, dcb + 2);
 			*hdr = 8;
 			*cnt = (i2c - dcb) / 8;
 			*len = 8;
@@ -63,8 +62,8 @@
 		}
 	} else
 	if (*ver >= 0x15) {
-		if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
-			u16 i2c = nv_ro16(bios, dcb + 2);
+		if (!nvbios_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
+			u16 i2c = nvbios_rd16(bios, dcb + 2);
 			*hdr = 4;
 			*cnt = (i2c - dcb) / 10;
 			*len = 10;
@@ -88,11 +87,11 @@
 		 *
 		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
 		 */
-		nv_warn(bios, "DCB contains no useful data\n");
+		nvkm_debug(subdev, "DCB contains no useful data\n");
 		return 0x0000;
 	}
 
-	nv_warn(bios, "DCB header validation failed\n");
+	nvkm_warn(subdev, "DCB header validation failed\n");
 	return 0x0000;
 }
 
@@ -126,7 +125,7 @@
 	memset(outp, 0x00, sizeof(*outp));
 	if (dcb) {
 		if (*ver >= 0x20) {
-			u32 conn = nv_ro32(bios, dcb + 0x00);
+			u32 conn = nvbios_rd32(bios, dcb + 0x00);
 			outp->or        = (conn & 0x0f000000) >> 24;
 			outp->location  = (conn & 0x00300000) >> 20;
 			outp->bus       = (conn & 0x000f0000) >> 16;
@@ -140,7 +139,7 @@
 		}
 
 		if (*ver >= 0x40) {
-			u32 conf = nv_ro32(bios, dcb + 0x04);
+			u32 conf = nvbios_rd32(bios, dcb + 0x04);
 			switch (outp->type) {
 			case DCB_OUTPUT_DP:
 				switch (conf & 0x00e00000) {
@@ -156,20 +155,19 @@
 					break;
 				}
 
-				outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
-				if (*ver < 0x41) {
-					switch (outp->dpconf.link_nr) {
-					case 0x0f:
-						outp->dpconf.link_nr = 4;
-						break;
-					case 0x03:
-						outp->dpconf.link_nr = 2;
-						break;
-					case 0x01:
-					default:
-						outp->dpconf.link_nr = 1;
-						break;
-					}
+				switch ((conf & 0x0f000000) >> 24) {
+				case 0xf:
+				case 0x4:
+					outp->dpconf.link_nr = 4;
+					break;
+				case 0x3:
+				case 0x2:
+					outp->dpconf.link_nr = 2;
+					break;
+				case 0x1:
+				default:
+					outp->dpconf.link_nr = 1;
+					break;
 				}
 
 				/* fall-through... */
@@ -215,14 +213,14 @@
 	u16 outp;
 
 	while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
-		if (nv_ro32(bios, outp) == 0x00000000)
+		if (nvbios_rd32(bios, outp) == 0x00000000)
 			break; /* seen on an NV11 with DCB v1.5 */
-		if (nv_ro32(bios, outp) == 0xffffffff)
+		if (nvbios_rd32(bios, outp) == 0xffffffff)
 			break; /* seen on an NV17 with DCB v2.0 */
 
-		if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
+		if (nvbios_rd08(bios, outp) == DCB_OUTPUT_UNUSED)
 			continue;
-		if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
+		if (nvbios_rd08(bios, outp) == DCB_OUTPUT_EOL)
 			break;
 
 		ret = exec(bios, data, idx, outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index 262c410..a5e9213 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -33,17 +33,17 @@
 
 	if (!bit_entry(bios, 'U', &U)) {
 		if (U.version == 1) {
-			u16 data = nv_ro16(bios, U.offset);
+			u16 data = nvbios_rd16(bios, U.offset);
 			if (data) {
-				*ver = nv_ro08(bios, data + 0x00);
+				*ver = nvbios_rd08(bios, data + 0x00);
 				switch (*ver) {
 				case 0x20:
 				case 0x21:
 				case 0x22:
-					*hdr = nv_ro08(bios, data + 0x01);
-					*len = nv_ro08(bios, data + 0x02);
-					*cnt = nv_ro08(bios, data + 0x03);
-					*sub = nv_ro08(bios, data + 0x04);
+					*hdr = nvbios_rd08(bios, data + 0x01);
+					*len = nvbios_rd08(bios, data + 0x02);
+					*cnt = nvbios_rd08(bios, data + 0x03);
+					*sub = nvbios_rd08(bios, data + 0x04);
 					return data;
 				default:
 					break;
@@ -72,7 +72,7 @@
 {
 	u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
 	if (data && *len >= 2) {
-		info->data = nv_ro16(bios, data + 0);
+		info->data = nvbios_rd16(bios, data + 0);
 		return data;
 	}
 	return 0x0000;
@@ -85,7 +85,7 @@
 	struct nvbios_disp info;
 	u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
 	if (data) {
-		*cnt = nv_ro08(bios, info.data + 0x05);
+		*cnt = nvbios_rd08(bios, info.data + 0x05);
 		*len = 0x06;
 		data = info.data;
 	}
@@ -98,15 +98,15 @@
 {
 	u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
 	if (data && *hdr >= 0x0a) {
-		info->type      = nv_ro16(bios, data + 0x00);
-		info->mask      = nv_ro32(bios, data + 0x02);
+		info->type      = nvbios_rd16(bios, data + 0x00);
+		info->mask      = nvbios_rd32(bios, data + 0x02);
 		if (*ver <= 0x20) /* match any link */
 			info->mask |= 0x00c0;
-		info->script[0] = nv_ro16(bios, data + 0x06);
-		info->script[1] = nv_ro16(bios, data + 0x08);
+		info->script[0] = nvbios_rd16(bios, data + 0x06);
+		info->script[1] = nvbios_rd16(bios, data + 0x08);
 		info->script[2] = 0x0000;
 		if (*hdr >= 0x0c)
-			info->script[2] = nv_ro16(bios, data + 0x0a);
+			info->script[2] = nvbios_rd16(bios, data + 0x0a);
 		return data;
 	}
 	return 0x0000;
@@ -141,9 +141,9 @@
 {
 	u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
 	if (data) {
-		info->match     = nv_ro16(bios, data + 0x00);
-		info->clkcmp[0] = nv_ro16(bios, data + 0x02);
-		info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+		info->match     = nvbios_rd16(bios, data + 0x00);
+		info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
+		info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
 	}
 	return data;
 }
@@ -164,8 +164,8 @@
 nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz)
 {
 	while (cmp) {
-		if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
-			return  nv_ro16(bios, cmp + 0x02);
+		if (khz / 10 >= nvbios_rd16(bios, cmp + 0x00))
+			return  nvbios_rd16(bios, cmp + 0x02);
 		cmp += 0x04;
 	}
 	return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 95970fa..0533247 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -32,17 +32,17 @@
 
 	if (!bit_entry(bios, 'd', &d)) {
 		if (d.version == 1 && d.length >= 2) {
-			u16 data = nv_ro16(bios, d.offset);
+			u16 data = nvbios_rd16(bios, d.offset);
 			if (data) {
-				*ver = nv_ro08(bios, data + 0x00);
+				*ver = nvbios_rd08(bios, data + 0x00);
 				switch (*ver) {
 				case 0x21:
 				case 0x30:
 				case 0x40:
 				case 0x41:
-					*hdr = nv_ro08(bios, data + 0x01);
-					*len = nv_ro08(bios, data + 0x02);
-					*cnt = nv_ro08(bios, data + 0x03);
+					*hdr = nvbios_rd08(bios, data + 0x01);
+					*len = nvbios_rd08(bios, data + 0x02);
+					*cnt = nvbios_rd08(bios, data + 0x03);
 					return data;
 				default:
 					break;
@@ -60,17 +60,17 @@
 {
 	u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
 	if (data && idx < *cnt) {
-		u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+		u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
 		switch (*ver * !!outp) {
 		case 0x21:
 		case 0x30:
-			*hdr = nv_ro08(bios, data + 0x04);
-			*len = nv_ro08(bios, data + 0x05);
-			*cnt = nv_ro08(bios, outp + 0x04);
+			*hdr = nvbios_rd08(bios, data + 0x04);
+			*len = nvbios_rd08(bios, data + 0x05);
+			*cnt = nvbios_rd08(bios, outp + 0x04);
 			break;
 		case 0x40:
 		case 0x41:
-			*hdr = nv_ro08(bios, data + 0x04);
+			*hdr = nvbios_rd08(bios, data + 0x04);
 			*cnt = 0;
 			*len = 0;
 			break;
@@ -91,31 +91,31 @@
 	u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data && *ver) {
-		info->type = nv_ro16(bios, data + 0x00);
-		info->mask = nv_ro16(bios, data + 0x02);
+		info->type = nvbios_rd16(bios, data + 0x00);
+		info->mask = nvbios_rd16(bios, data + 0x02);
 		switch (*ver) {
 		case 0x21:
 		case 0x30:
-			info->flags     = nv_ro08(bios, data + 0x05);
-			info->script[0] = nv_ro16(bios, data + 0x06);
-			info->script[1] = nv_ro16(bios, data + 0x08);
-			info->lnkcmp    = nv_ro16(bios, data + 0x0a);
+			info->flags     = nvbios_rd08(bios, data + 0x05);
+			info->script[0] = nvbios_rd16(bios, data + 0x06);
+			info->script[1] = nvbios_rd16(bios, data + 0x08);
+			info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
 			if (*len >= 0x0f) {
-				info->script[2] = nv_ro16(bios, data + 0x0c);
-				info->script[3] = nv_ro16(bios, data + 0x0e);
+				info->script[2] = nvbios_rd16(bios, data + 0x0c);
+				info->script[3] = nvbios_rd16(bios, data + 0x0e);
 			}
 			if (*len >= 0x11)
-				info->script[4] = nv_ro16(bios, data + 0x10);
+				info->script[4] = nvbios_rd16(bios, data + 0x10);
 			break;
 		case 0x40:
 		case 0x41:
-			info->flags     = nv_ro08(bios, data + 0x04);
-			info->script[0] = nv_ro16(bios, data + 0x05);
-			info->script[1] = nv_ro16(bios, data + 0x07);
-			info->lnkcmp    = nv_ro16(bios, data + 0x09);
-			info->script[2] = nv_ro16(bios, data + 0x0b);
-			info->script[3] = nv_ro16(bios, data + 0x0d);
-			info->script[4] = nv_ro16(bios, data + 0x0f);
+			info->flags     = nvbios_rd08(bios, data + 0x04);
+			info->script[0] = nvbios_rd16(bios, data + 0x05);
+			info->script[1] = nvbios_rd16(bios, data + 0x07);
+			info->lnkcmp    = nvbios_rd16(bios, data + 0x09);
+			info->script[2] = nvbios_rd16(bios, data + 0x0b);
+			info->script[3] = nvbios_rd16(bios, data + 0x0d);
+			info->script[4] = nvbios_rd16(bios, data + 0x0f);
 			break;
 		default:
 			data = 0x0000;
@@ -147,8 +147,9 @@
 	if (*ver >= 0x40) {
 		outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
 		*hdr = *hdr + (*len * * cnt);
-		*len = nv_ro08(bios, outp + 0x06);
-		*cnt = nv_ro08(bios, outp + 0x07);
+		*len = nvbios_rd08(bios, outp + 0x06);
+		*cnt = nvbios_rd08(bios, outp + 0x07) *
+		       nvbios_rd08(bios, outp + 0x05);
 	}
 
 	if (idx < *cnt)
@@ -167,17 +168,17 @@
 	if (data) {
 		switch (*ver) {
 		case 0x21:
-			info->dc    = nv_ro08(bios, data + 0x02);
-			info->pe    = nv_ro08(bios, data + 0x03);
-			info->tx_pu = nv_ro08(bios, data + 0x04);
+			info->dc    = nvbios_rd08(bios, data + 0x02);
+			info->pe    = nvbios_rd08(bios, data + 0x03);
+			info->tx_pu = nvbios_rd08(bios, data + 0x04);
 			break;
 		case 0x30:
 		case 0x40:
 		case 0x41:
-			info->pc    = nv_ro08(bios, data + 0x00);
-			info->dc    = nv_ro08(bios, data + 0x01);
-			info->pe    = nv_ro08(bios, data + 0x02);
-			info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
+			info->pc    = nvbios_rd08(bios, data + 0x00);
+			info->dc    = nvbios_rd08(bios, data + 0x01);
+			info->pe    = nvbios_rd08(bios, data + 0x02);
+			info->tx_pu = nvbios_rd08(bios, data + 0x03);
 			break;
 		default:
 			data = 0x0000;
@@ -196,17 +197,15 @@
 	u16 data;
 
 	if (*ver >= 0x30) {
-		/*XXX: there's a second set of these on at least 4.1, that
-		 *     i've witnessed nvidia using instead of the first
-		 *     on gm204.  figure out what/why
-		 */
 		const u8 vsoff[] = { 0, 4, 7, 9 };
 		idx = (pc * 10) + vsoff[vs] + pe;
+		if (*ver >= 0x40 && *hdr >= 0x12)
+			idx += nvbios_rd08(bios, outp + 0x11) * 40;
 	} else {
 		while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
 						  ver, hdr, cnt, len))) {
-			if (nv_ro08(bios, data + 0x00) == vs &&
-			    nv_ro08(bios, data + 0x01) == pe)
+			if (nvbios_rd08(bios, data + 0x00) == vs &&
+			    nvbios_rd08(bios, data + 0x01) == pe)
 				break;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index a8503a1..c9e6f6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -35,14 +35,14 @@
 	if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
 		return 0x0000;
 
-	extdev = nv_ro16(bios, dcb + 18);
+	extdev = nvbios_rd16(bios, dcb + 18);
 	if (!extdev)
 		return 0x0000;
 
-	*ver = nv_ro08(bios, extdev + 0);
-	*hdr = nv_ro08(bios, extdev + 1);
-	*cnt = nv_ro08(bios, extdev + 2);
-	*len = nv_ro08(bios, extdev + 3);
+	*ver = nvbios_rd08(bios, extdev + 0);
+	*hdr = nvbios_rd08(bios, extdev + 1);
+	*cnt = nvbios_rd08(bios, extdev + 2);
+	*len = nvbios_rd08(bios, extdev + 3);
 	return extdev + *hdr;
 }
 
@@ -60,9 +60,9 @@
 extdev_parse_entry(struct nvkm_bios *bios, u16 offset,
 		   struct nvbios_extdev_func *entry)
 {
-	entry->type = nv_ro08(bios, offset + 0);
-	entry->addr = nv_ro08(bios, offset + 1);
-	entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
+	entry->type = nvbios_rd08(bios, offset + 0);
+	entry->addr = nvbios_rd08(bios, offset + 1);
+	entry->bus = (nvbios_rd08(bios, offset + 2) >> 4) & 1;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 8dba70d..43006db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -33,15 +33,15 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2 && bit_P.length >= 0x5a)
-			fan = nv_ro16(bios, bit_P.offset + 0x58);
+			fan = nvbios_rd16(bios, bit_P.offset + 0x58);
 
 		if (fan) {
-			*ver = nv_ro08(bios, fan + 0);
+			*ver = nvbios_rd08(bios, fan + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, fan + 1);
-				*len = nv_ro08(bios, fan + 2);
-				*cnt = nv_ro08(bios, fan + 3);
+				*hdr = nvbios_rd08(bios, fan + 1);
+				*len = nvbios_rd08(bios, fan + 2);
+				*cnt = nvbios_rd08(bios, fan + 3);
 				return fan;
 			default:
 				break;
@@ -69,7 +69,7 @@
 
 	u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
 	if (data) {
-		u8 type = nv_ro08(bios, data + 0x00);
+		u8 type = nvbios_rd08(bios, data + 0x00);
 		switch (type) {
 		case 0:
 			fan->type = NVBIOS_THERM_FAN_TOGGLE;
@@ -83,10 +83,10 @@
 			fan->type = NVBIOS_THERM_FAN_UNK;
 		}
 
-		fan->min_duty = nv_ro08(bios, data + 0x02);
-		fan->max_duty = nv_ro08(bios, data + 0x03);
+		fan->min_duty = nvbios_rd08(bios, data + 0x02);
+		fan->max_duty = nvbios_rd08(bios, data + 0x03);
 
-		fan->pwm_freq = nv_ro32(bios, data + 0x0b) & 0xffffff;
+		fan->pwm_freq = nvbios_rd32(bios, data + 0x0b) & 0xffffff;
 	}
 
 	return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
index 8ce154d..2107b55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
@@ -33,22 +33,22 @@
 	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb) {
 		if (*ver >= 0x30 && *hdr >= 0x0c)
-			data = nv_ro16(bios, dcb + 0x0a);
+			data = nvbios_rd16(bios, dcb + 0x0a);
 		else
-		if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
-			data = nv_ro16(bios, dcb - 0x0f);
+		if (*ver >= 0x22 && nvbios_rd08(bios, dcb - 1) >= 0x13)
+			data = nvbios_rd16(bios, dcb - 0x0f);
 
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			if (*ver < 0x30) {
 				*hdr = 3;
-				*cnt = nv_ro08(bios, data + 0x02);
-				*len = nv_ro08(bios, data + 0x01);
+				*cnt = nvbios_rd08(bios, data + 0x02);
+				*len = nvbios_rd08(bios, data + 0x01);
 			} else
 			if (*ver <= 0x41) {
-				*hdr = nv_ro08(bios, data + 0x01);
-				*cnt = nv_ro08(bios, data + 0x02);
-				*len = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*cnt = nvbios_rd08(bios, data + 0x02);
+				*len = nvbios_rd08(bios, data + 0x03);
 			} else {
 				data = 0x0000;
 			}
@@ -81,7 +81,7 @@
 	u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
 	if (data) {
 		if (*ver < 0x40) {
-			u16 info = nv_ro16(bios, data);
+			u16 info = nvbios_rd16(bios, data);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x001f) >> 0,
 				.func = (info & 0x07e0) >> 5,
@@ -91,7 +91,7 @@
 			};
 		} else
 		if (*ver < 0x41) {
-			u32 info = nv_ro32(bios, data);
+			u32 info = nvbios_rd32(bios, data);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x0000001f) >> 0,
 				.func = (info & 0x0000ff00) >> 8,
@@ -100,8 +100,8 @@
 				.param = !!(info & 0x80000000),
 			};
 		} else {
-			u32 info = nv_ro32(bios, data + 0);
-			u8 info1 = nv_ro32(bios, data + 4);
+			u32 info = nvbios_rd32(bios, data + 0);
+			u8 info1 = nvbios_rd32(bios, data + 4);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x0000003f) >> 0,
 				.func = (info & 0x0000ff00) >> 8,
@@ -131,8 +131,8 @@
 	/* DCB 2.2, fixed TVDAC GPIO data */
 	if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
 		if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
-			u8 conf = nv_ro08(bios, data - 5);
-			u8 addr = nv_ro08(bios, data - 4);
+			u8 conf = nvbios_rd08(bios, data - 5);
+			u8 addr = nvbios_rd08(bios, data - 4);
 			if (conf & 0x01) {
 				*gpio = (struct dcb_gpio_func) {
 					.func = DCB_GPIO_TVDAC0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index c4e1f08..0fc60be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -32,21 +32,21 @@
 	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb) {
 		if (*ver >= 0x15)
-			i2c = nv_ro16(bios, dcb + 2);
+			i2c = nvbios_rd16(bios, dcb + 2);
 		if (*ver >= 0x30)
-			i2c = nv_ro16(bios, dcb + 4);
+			i2c = nvbios_rd16(bios, dcb + 4);
 	}
 
 	if (i2c && *ver >= 0x42) {
-		nv_warn(bios, "ccb %02x not supported\n", *ver);
+		nvkm_warn(&bios->subdev, "ccb %02x not supported\n", *ver);
 		return 0x0000;
 	}
 
 	if (i2c && *ver >= 0x30) {
-		*ver = nv_ro08(bios, i2c + 0);
-		*hdr = nv_ro08(bios, i2c + 1);
-		*cnt = nv_ro08(bios, i2c + 2);
-		*len = nv_ro08(bios, i2c + 3);
+		*ver = nvbios_rd08(bios, i2c + 0);
+		*hdr = nvbios_rd08(bios, i2c + 1);
+		*cnt = nvbios_rd08(bios, i2c + 2);
+		*len = nvbios_rd08(bios, i2c + 3);
 	} else {
 		*ver = *ver; /* use DCB version */
 		*hdr = 0;
@@ -70,13 +70,14 @@
 int
 dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, len;
 	u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
 	if (ent) {
 		if (ver >= 0x41) {
-			u32 ent_value = nv_ro32(bios, ent);
-			u8 i2c_port = (ent_value >> 27) & 0x1f;
-			u8 dpaux_port = (ent_value >> 22) & 0x1f;
+			u32 ent_value = nvbios_rd32(bios, ent);
+			u8 i2c_port = (ent_value >> 0) & 0x1f;
+			u8 dpaux_port = (ent_value >> 5) & 0x1f;
 			/* value 0x1f means unused according to DCB 4.x spec */
 			if (i2c_port == 0x1f && dpaux_port == 0x1f)
 				info->type = DCB_I2C_UNUSED;
@@ -84,9 +85,9 @@
 				info->type = DCB_I2C_PMGR;
 		} else
 		if (ver >= 0x30) {
-			info->type = nv_ro08(bios, ent + 0x03);
+			info->type = nvbios_rd08(bios, ent + 0x03);
 		} else {
-			info->type = nv_ro08(bios, ent + 0x03) & 0x07;
+			info->type = nvbios_rd08(bios, ent + 0x03) & 0x07;
 			if (info->type == 0x07)
 				info->type = DCB_I2C_UNUSED;
 		}
@@ -98,27 +99,27 @@
 
 		switch (info->type) {
 		case DCB_I2C_NV04_BIT:
-			info->drive = nv_ro08(bios, ent + 0);
-			info->sense = nv_ro08(bios, ent + 1);
+			info->drive = nvbios_rd08(bios, ent + 0);
+			info->sense = nvbios_rd08(bios, ent + 1);
 			return 0;
 		case DCB_I2C_NV4E_BIT:
-			info->drive = nv_ro08(bios, ent + 1);
+			info->drive = nvbios_rd08(bios, ent + 1);
 			return 0;
 		case DCB_I2C_NVIO_BIT:
-			info->drive = nv_ro08(bios, ent + 0) & 0x0f;
-			if (nv_ro08(bios, ent + 1) & 0x01)
-				info->share = nv_ro08(bios, ent + 1) >> 1;
+			info->drive = nvbios_rd08(bios, ent + 0) & 0x0f;
+			if (nvbios_rd08(bios, ent + 1) & 0x01)
+				info->share = nvbios_rd08(bios, ent + 1) >> 1;
 			return 0;
 		case DCB_I2C_NVIO_AUX:
-			info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
-			if (nv_ro08(bios, ent + 1) & 0x01)
+			info->auxch = nvbios_rd08(bios, ent + 0) & 0x0f;
+			if (nvbios_rd08(bios, ent + 1) & 0x01)
 					info->share = info->auxch;
 			return 0;
 		case DCB_I2C_PMGR:
-			info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
+			info->drive = (nvbios_rd16(bios, ent + 0) & 0x01f) >> 0;
 			if (info->drive == 0x1f)
 				info->drive = DCB_I2C_UNUSED;
-			info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
+			info->auxch = (nvbios_rd16(bios, ent + 0) & 0x3e0) >> 5;
 			if (info->auxch == 0x1f)
 				info->auxch = DCB_I2C_UNUSED;
 			info->share = info->auxch;
@@ -126,7 +127,7 @@
 		case DCB_I2C_UNUSED:
 			return 0;
 		default:
-			nv_warn(bios, "unknown i2c type %d\n", info->type);
+			nvkm_warn(subdev, "unknown i2c type %d\n", info->type);
 			info->type = DCB_I2C_UNUSED;
 			return 0;
 		}
@@ -136,21 +137,21 @@
 		/* BMP (from v4.0 has i2c info in the structure, it's in a
 		 * fixed location on earlier VBIOS
 		 */
-		if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
+		if (nvbios_rd08(bios, bios->bmp_offset + 5) < 4)
 			ent = 0x0048;
 		else
 			ent = 0x0036 + bios->bmp_offset;
 
 		if (idx == 0) {
-			info->drive = nv_ro08(bios, ent + 4);
+			info->drive = nvbios_rd08(bios, ent + 4);
 			if (!info->drive) info->drive = 0x3f;
-			info->sense = nv_ro08(bios, ent + 5);
+			info->sense = nvbios_rd08(bios, ent + 5);
 			if (!info->sense) info->sense = 0x3e;
 		} else
 		if (idx == 1) {
-			info->drive = nv_ro08(bios, ent + 6);
+			info->drive = nvbios_rd08(bios, ent + 6);
 			if (!info->drive) info->drive = 0x37;
-			info->sense = nv_ro08(bios, ent + 7);
+			info->sense = nvbios_rd08(bios, ent + 7);
 			if (!info->sense) info->sense = 0x36;
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 1815540..74b14cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -29,20 +29,21 @@
 static bool
 nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_pcirT pcir;
 	struct nvbios_npdeT npde;
 	u8  ver;
 	u16 hdr;
 	u32 data;
 
-	switch ((data = nv_ro16(bios, image->base + 0x00))) {
+	switch ((data = nvbios_rd16(bios, image->base + 0x00))) {
 	case 0xaa55:
 	case 0xbb77:
 	case 0x4e56: /* NV */
 		break;
 	default:
-		nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
-			 image->base, data);
+		nvkm_debug(subdev, "%08x: ROM signature (%04x) unknown\n",
+			   image->base, data);
 		return false;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f4611e3..65af314 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -31,18 +31,18 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/ramcfg.h>
 
-#include <core/device.h>
 #include <subdev/devinit.h>
 #include <subdev/gpio.h>
 #include <subdev/i2c.h>
 #include <subdev/vga.h>
 
 #define bioslog(lvl, fmt, args...) do {                                        \
-	nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset,            \
-		  init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args);   \
+	nvkm_printk(init->subdev, lvl, info, "0x%04x[%c]: "fmt,                \
+		    init->offset, init_exec(init) ?                            \
+		    '0' + (init->nested - 1) : ' ', ##args);                   \
 } while(0)
 #define cont(fmt, args...) do {                                                \
-	if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE)                      \
+	if (init->subdev->debug >= NV_DBG_TRACE)                               \
 		printk(fmt, ##args);                                           \
 } while(0)
 #define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
@@ -141,7 +141,7 @@
 static inline u32
 init_nvreg(struct nvbios_init *init, u32 reg)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
 
 	/* C51 (at least) sometimes has the lower bits set which the VBIOS
 	 * interprets to mean that access needs to go through certain IO
@@ -154,7 +154,7 @@
 	/* GF8+ display scripts need register addresses mangled a bit to
 	 * select a specific CRTC/OR
 	 */
-	if (nv_device(init->bios)->card_type >= NV_50) {
+	if (init->bios->subdev.device->card_type >= NV_50) {
 		if (reg & 0x80000000) {
 			reg += init_crtc(init) * 0x800;
 			reg &= ~0x80000000;
@@ -173,35 +173,36 @@
 	if (reg & ~0x00fffffc)
 		warn("unknown bits in register 0x%08x\n", reg);
 
-	if (devinit->mmio)
-		reg = devinit->mmio(devinit, reg);
-	return reg;
+	return nvkm_devinit_mmio(devinit, reg);
 }
 
 static u32
 init_rd32(struct nvbios_init *init, u32 reg)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init))
-		return nv_rd32(init->subdev, reg);
+		return nvkm_rd32(device, reg);
 	return 0x00000000;
 }
 
 static void
 init_wr32(struct nvbios_init *init, u32 reg, u32 val)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init))
-		nv_wr32(init->subdev, reg, val);
+		nvkm_wr32(device, reg, val);
 }
 
 static u32
 init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init)) {
-		u32 tmp = nv_rd32(init->subdev, reg);
-		nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
+		u32 tmp = nvkm_rd32(device, reg);
+		nvkm_wr32(device, reg, (tmp & ~mask) | val);
 		return tmp;
 	}
 	return 0x00000000;
@@ -211,7 +212,7 @@
 init_rdport(struct nvbios_init *init, u16 port)
 {
 	if (init_exec(init))
-		return nv_rdport(init->subdev, init->crtc, port);
+		return nvkm_rdport(init->subdev->device, init->crtc, port);
 	return 0x00;
 }
 
@@ -219,7 +220,7 @@
 init_wrport(struct nvbios_init *init, u16 port, u8 value)
 {
 	if (init_exec(init))
-		nv_wrport(init->subdev, init->crtc, port, value);
+		nvkm_wrport(init->subdev->device, init->crtc, port, value);
 }
 
 static u8
@@ -228,7 +229,7 @@
 	struct nvkm_subdev *subdev = init->subdev;
 	if (init_exec(init)) {
 		int head = init->crtc < 0 ? 0 : init->crtc;
-		return nv_rdvgai(subdev, head, port, index);
+		return nvkm_rdvgai(subdev->device, head, port, index);
 	}
 	return 0x00;
 }
@@ -236,80 +237,80 @@
 static void
 init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
 {
+	struct nvkm_device *device = init->subdev->device;
+
 	/* force head 0 for updates to cr44, it only exists on first head */
-	if (nv_device(init->subdev)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		if (port == 0x03d4 && index == 0x44)
 			init->crtc = 0;
 	}
 
 	if (init_exec(init)) {
 		int head = init->crtc < 0 ? 0 : init->crtc;
-		nv_wrvgai(init->subdev, head, port, index, value);
+		nvkm_wrvgai(device, head, port, index, value);
 	}
 
 	/* select head 1 if cr44 write selected it */
-	if (nv_device(init->subdev)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		if (port == 0x03d4 && index == 0x44 && value == 3)
 			init->crtc = 1;
 	}
 }
 
-static struct nvkm_i2c_port *
+static struct i2c_adapter *
 init_i2c(struct nvbios_init *init, int index)
 {
-	struct nvkm_i2c *i2c = nvkm_i2c(init->bios);
+	struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+	struct nvkm_i2c_bus *bus;
 
 	if (index == 0xff) {
-		index = NV_I2C_DEFAULT(0);
+		index = NVKM_I2C_BUS_PRI;
 		if (init->outp && init->outp->i2c_upper_default)
-			index = NV_I2C_DEFAULT(1);
-	} else
-	if (index < 0) {
-		if (!init->outp) {
-			if (init_exec(init))
-				error("script needs output for i2c\n");
-			return NULL;
-		}
-
-		if (index == -2 && init->outp->location) {
-			index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
-			return i2c->find_type(i2c, index);
-		}
-
-		index = init->outp->i2c_index;
-		if (init->outp->type == DCB_OUTPUT_DP)
-			index += NV_I2C_AUX(0);
+			index = NVKM_I2C_BUS_SEC;
 	}
 
-	return i2c->find(i2c, index);
+	bus = nvkm_i2c_bus_find(i2c, index);
+	return bus ? &bus->i2c : NULL;
 }
 
 static int
 init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, index);
-	if (port && init_exec(init))
-		return nv_rdi2cr(port, addr, reg);
+	struct i2c_adapter *adap = init_i2c(init, index);
+	if (adap && init_exec(init))
+		return nvkm_rdi2cr(adap, addr, reg);
 	return -ENODEV;
 }
 
 static int
 init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, index);
-	if (port && init_exec(init))
-		return nv_wri2cr(port, addr, reg, val);
+	struct i2c_adapter *adap = init_i2c(init, index);
+	if (adap && init_exec(init))
+		return nvkm_wri2cr(adap, addr, reg, val);
 	return -ENODEV;
 }
 
+static struct nvkm_i2c_aux *
+init_aux(struct nvbios_init *init)
+{
+	struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+	if (!init->outp) {
+		if (init_exec(init))
+			error("script needs output for aux\n");
+		return NULL;
+	}
+	return nvkm_i2c_aux_find(i2c, init->outp->i2c_index);
+}
+
 static u8
 init_rdauxr(struct nvbios_init *init, u32 addr)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, -2);
+	struct nvkm_i2c_aux *aux = init_aux(init);
 	u8 data;
 
-	if (port && init_exec(init)) {
-		int ret = nv_rdaux(port, addr, &data, 1);
+	if (aux && init_exec(init)) {
+		int ret = nvkm_rdaux(aux, addr, &data, 1);
 		if (ret == 0)
 			return data;
 		trace("auxch read failed with %d\n", ret);
@@ -321,9 +322,9 @@
 static int
 init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, -2);
-	if (port && init_exec(init)) {
-		int ret = nv_wraux(port, addr, &data, 1);
+	struct nvkm_i2c_aux *aux = init_aux(init);
+	if (aux && init_exec(init)) {
+		int ret = nvkm_wraux(aux, addr, &data, 1);
 		if (ret)
 			trace("auxch write failed with %d\n", ret);
 		return ret;
@@ -334,9 +335,9 @@
 static void
 init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
-	if (devinit->pll_set && init_exec(init)) {
-		int ret = devinit->pll_set(devinit, id, freq);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
+	if (init_exec(init)) {
+		int ret = nvkm_devinit_pll_set(devinit, id, freq);
 		if (ret)
 			warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
 	}
@@ -371,7 +372,7 @@
 	u16 len, data = init_table(bios, &len);
 	if (data) {
 		if (len >= offset + 2) {
-			data = nv_ro16(bios, data + offset);
+			data = nvbios_rd16(bios, data + offset);
 			if (data)
 				return data;
 
@@ -407,12 +408,12 @@
 			return 0x0000;
 
 		data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
-		return nv_ro16(bios, data + (index * 2));
+		return nvbios_rd16(bios, data + (index * 2));
 	}
 
 	data = init_script_table(&init);
 	if (data)
-		return nv_ro16(bios, data + (index * 2));
+		return nvbios_rd16(bios, data + (index * 2));
 
 	return 0x0000;
 }
@@ -422,7 +423,7 @@
 {
 	u16 len, data = init_table(bios, &len);
 	if (data && len >= 16)
-		return nv_ro16(bios, data + 14);
+		return nvbios_rd16(bios, data + 14);
 	return 0x0000;
 }
 
@@ -454,9 +455,9 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_xlat_table(init);
 	if (table) {
-		u16 data = nv_ro16(bios, table + (index * 2));
+		u16 data = nvbios_rd16(bios, table + (index * 2));
 		if (data)
-			return nv_ro08(bios, data + offset);
+			return nvbios_rd08(bios, data + offset);
 		warn("xlat table pointer %d invalid\n", index);
 	}
 	return 0x00;
@@ -472,9 +473,9 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_condition_table(init);
 	if (table) {
-		u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
-		u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
-		u32 val = nv_ro32(bios, table + (cond * 12) + 8);
+		u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0);
+		u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4);
+		u32 val = nvbios_rd32(bios, table + (cond * 12) + 8);
 		trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
 		      cond, reg, msk, val);
 		return (init_rd32(init, reg) & msk) == val;
@@ -488,10 +489,10 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_io_condition_table(init);
 	if (table) {
-		u16 port = nv_ro16(bios, table + (cond * 5) + 0);
-		u8 index = nv_ro08(bios, table + (cond * 5) + 2);
-		u8  mask = nv_ro08(bios, table + (cond * 5) + 3);
-		u8 value = nv_ro08(bios, table + (cond * 5) + 4);
+		u16 port = nvbios_rd16(bios, table + (cond * 5) + 0);
+		u8 index = nvbios_rd08(bios, table + (cond * 5) + 2);
+		u8  mask = nvbios_rd08(bios, table + (cond * 5) + 3);
+		u8 value = nvbios_rd08(bios, table + (cond * 5) + 4);
 		trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
 		      cond, port, index, mask, value);
 		return (init_rdvgai(init, port, index) & mask) == value;
@@ -505,15 +506,15 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_io_flag_condition_table(init);
 	if (table) {
-		u16 port = nv_ro16(bios, table + (cond * 9) + 0);
-		u8 index = nv_ro08(bios, table + (cond * 9) + 2);
-		u8  mask = nv_ro08(bios, table + (cond * 9) + 3);
-		u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
-		u16 data = nv_ro16(bios, table + (cond * 9) + 5);
-		u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
-		u8 value = nv_ro08(bios, table + (cond * 9) + 8);
+		u16 port = nvbios_rd16(bios, table + (cond * 9) + 0);
+		u8 index = nvbios_rd08(bios, table + (cond * 9) + 2);
+		u8  mask = nvbios_rd08(bios, table + (cond * 9) + 3);
+		u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4);
+		u16 data = nvbios_rd16(bios, table + (cond * 9) + 5);
+		u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7);
+		u8 value = nvbios_rd08(bios, table + (cond * 9) + 8);
 		u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
-		return (nv_ro08(bios, data + ioval) & dmask) == value;
+		return (nvbios_rd08(bios, data + ioval) & dmask) == value;
 	}
 	return false;
 }
@@ -573,7 +574,7 @@
 static void
 init_reserved(struct nvbios_init *init)
 {
-	u8 opcode = nv_ro08(init->bios, init->offset);
+	u8 opcode = nvbios_rd08(init->bios, init->offset);
 	u8 length, i;
 
 	switch (opcode) {
@@ -587,7 +588,7 @@
 
 	trace("RESERVED 0x%02x\t", opcode);
 	for (i = 1; i < length; i++)
-		cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+		cont(" 0x%02x", nvbios_rd08(init->bios, init->offset + i));
 	cont("\n");
 	init->offset += length;
 }
@@ -611,12 +612,12 @@
 init_io_restrict_prog(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 count = nv_ro08(bios, init->offset + 6);
-	u32  reg = nv_ro32(bios, init->offset + 7);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 count = nvbios_rd08(bios, init->offset + 6);
+	u32  reg = nvbios_rd32(bios, init->offset + 7);
 	u8 conf, i;
 
 	trace("IO_RESTRICT_PROG\tR[0x%06x] = "
@@ -626,7 +627,7 @@
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 
 		if (i == conf) {
 			trace("\t0x%08x *\n", data);
@@ -648,7 +649,7 @@
 init_repeat(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 	u16 repeat = init->repeat;
 
 	trace("REPEAT\t0x%02x\n", count);
@@ -674,13 +675,13 @@
 init_io_restrict_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	s8  iofc = nv_ro08(bios, init->offset + 6);
-	u8 count = nv_ro08(bios, init->offset + 7);
-	u32  reg = nv_ro32(bios, init->offset + 8);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	s8  iofc = nvbios_rd08(bios, init->offset + 6);
+	u8 count = nvbios_rd08(bios, init->offset + 7);
+	u32  reg = nvbios_rd32(bios, init->offset + 8);
 	u8 conf, i;
 
 	trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
@@ -690,7 +691,7 @@
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 freq = nv_ro16(bios, init->offset) * 10;
+		u32 freq = nvbios_rd16(bios, init->offset) * 10;
 
 		if (i == conf) {
 			trace("\t%dkHz *\n", freq);
@@ -730,12 +731,12 @@
 init_copy(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 smask = nv_ro08(bios, init->offset + 6);
-	u16 port = nv_ro16(bios, init->offset + 7);
-	u8 index = nv_ro08(bios, init->offset + 9);
-	u8  mask = nv_ro08(bios, init->offset + 10);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 smask = nvbios_rd08(bios, init->offset + 6);
+	u16 port = nvbios_rd16(bios, init->offset + 7);
+	u8 index = nvbios_rd08(bios, init->offset + 9);
+	u8  mask = nvbios_rd08(bios, init->offset + 10);
 	u8  data;
 
 	trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
@@ -769,7 +770,7 @@
 init_io_flag_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -787,8 +788,8 @@
 {
 	struct nvkm_bios *bios = init->bios;
 	struct nvbios_dpout info;
-	u8  cond = nv_ro08(bios, init->offset + 1);
-	u8  unkn = nv_ro08(bios, init->offset + 2);
+	u8  cond = nvbios_rd08(bios, init->offset + 1);
+	u8  unkn = nvbios_rd08(bios, init->offset + 2);
 	u8  ver, hdr, cnt, len;
 	u16 data;
 
@@ -834,7 +835,7 @@
 init_io_mask_or(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u8    or = init_or(init);
 	u8  data;
 
@@ -853,7 +854,7 @@
 init_io_or(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u8    or = init_or(init);
 	u8  data;
 
@@ -872,8 +873,8 @@
 init_andn_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
 
 	trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
 	init->offset += 9;
@@ -889,8 +890,8 @@
 init_or_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
 
 	trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
 	init->offset += 9;
@@ -906,19 +907,19 @@
 init_idx_addr_latched(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 creg = nv_ro32(bios, init->offset + 1);
-	u32 dreg = nv_ro32(bios, init->offset + 5);
-	u32 mask = nv_ro32(bios, init->offset + 9);
-	u32 data = nv_ro32(bios, init->offset + 13);
-	u8 count = nv_ro08(bios, init->offset + 17);
+	u32 creg = nvbios_rd32(bios, init->offset + 1);
+	u32 dreg = nvbios_rd32(bios, init->offset + 5);
+	u32 mask = nvbios_rd32(bios, init->offset + 9);
+	u32 data = nvbios_rd32(bios, init->offset + 13);
+	u8 count = nvbios_rd08(bios, init->offset + 17);
 
 	trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
 	trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
 	init->offset += 18;
 
 	while (count--) {
-		u8 iaddr = nv_ro08(bios, init->offset + 0);
-		u8 idata = nv_ro08(bios, init->offset + 1);
+		u8 iaddr = nvbios_rd08(bios, init->offset + 0);
+		u8 idata = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
 		init->offset += 2;
@@ -936,12 +937,12 @@
 init_io_restrict_pll2(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 count = nv_ro08(bios, init->offset + 6);
-	u32  reg = nv_ro32(bios, init->offset + 7);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 count = nvbios_rd08(bios, init->offset + 6);
+	u32  reg = nvbios_rd32(bios, init->offset + 7);
 	u8  conf, i;
 
 	trace("IO_RESTRICT_PLL2\t"
@@ -951,7 +952,7 @@
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 freq = nv_ro32(bios, init->offset);
+		u32 freq = nvbios_rd32(bios, init->offset);
 		if (i == conf) {
 			trace("\t%dkHz *\n", freq);
 			init_prog_pll(init, reg, freq);
@@ -971,8 +972,8 @@
 init_pll2(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 freq = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 freq = nvbios_rd32(bios, init->offset + 5);
 
 	trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
 	init->offset += 9;
@@ -988,17 +989,17 @@
 init_i2c_byte(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 
 	trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	while (count--) {
-		u8  reg = nv_ro08(bios, init->offset + 0);
-		u8 mask = nv_ro08(bios, init->offset + 1);
-		u8 data = nv_ro08(bios, init->offset + 2);
+		u8  reg = nvbios_rd08(bios, init->offset + 0);
+		u8 mask = nvbios_rd08(bios, init->offset + 1);
+		u8 data = nvbios_rd08(bios, init->offset + 2);
 		int val;
 
 		trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
@@ -1019,16 +1020,16 @@
 init_zm_i2c_byte(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 
 	trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	while (count--) {
-		u8  reg = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8  reg = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", reg, data);
 		init->offset += 2;
@@ -1045,28 +1046,28 @@
 init_zm_i2c(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 	u8 data[256], i;
 
 	trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	for (i = 0; i < count; i++) {
-		data[i] = nv_ro08(bios, init->offset);
+		data[i] = nvbios_rd08(bios, init->offset);
 		trace("\t0x%02x\n", data[i]);
 		init->offset++;
 	}
 
 	if (init_exec(init)) {
-		struct nvkm_i2c_port *port = init_i2c(init, index);
+		struct i2c_adapter *adap = init_i2c(init, index);
 		struct i2c_msg msg = {
 			.addr = addr, .flags = 0, .len = count, .buf = data,
 		};
 		int ret;
 
-		if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
+		if (adap && (ret = i2c_transfer(adap, &msg, 1)) != 1)
 			warn("i2c wr failed, %d\n", ret);
 	}
 }
@@ -1079,10 +1080,10 @@
 init_tmds(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 tmds = nv_ro08(bios, init->offset + 1);
-	u8 addr = nv_ro08(bios, init->offset + 2);
-	u8 mask = nv_ro08(bios, init->offset + 3);
-	u8 data = nv_ro08(bios, init->offset + 4);
+	u8 tmds = nvbios_rd08(bios, init->offset + 1);
+	u8 addr = nvbios_rd08(bios, init->offset + 2);
+	u8 mask = nvbios_rd08(bios, init->offset + 3);
+	u8 data = nvbios_rd08(bios, init->offset + 4);
 	u32 reg = init_tmds_reg(init, tmds);
 
 	trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1105,16 +1106,16 @@
 init_zm_tmds_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  tmds = nv_ro08(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 2);
+	u8  tmds = nvbios_rd08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 2);
 	u32  reg = init_tmds_reg(init, tmds);
 
 	trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
 	init->offset += 3;
 
 	while (count--) {
-		u8 addr = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 addr = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", addr, data);
 		init->offset += 2;
@@ -1132,10 +1133,10 @@
 init_cr_idx_adr_latch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr0 = nv_ro08(bios, init->offset + 1);
-	u8 addr1 = nv_ro08(bios, init->offset + 2);
-	u8  base = nv_ro08(bios, init->offset + 3);
-	u8 count = nv_ro08(bios, init->offset + 4);
+	u8 addr0 = nvbios_rd08(bios, init->offset + 1);
+	u8 addr1 = nvbios_rd08(bios, init->offset + 2);
+	u8  base = nvbios_rd08(bios, init->offset + 3);
+	u8 count = nvbios_rd08(bios, init->offset + 4);
 	u8 save0;
 
 	trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
@@ -1143,7 +1144,7 @@
 
 	save0 = init_rdvgai(init, 0x03d4, addr0);
 	while (count--) {
-		u8 data = nv_ro08(bios, init->offset);
+		u8 data = nvbios_rd08(bios, init->offset);
 
 		trace("\t\t[0x%02x] = 0x%02x\n", base, data);
 		init->offset += 1;
@@ -1162,9 +1163,9 @@
 init_cr(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr = nv_ro08(bios, init->offset + 1);
-	u8 mask = nv_ro08(bios, init->offset + 2);
-	u8 data = nv_ro08(bios, init->offset + 3);
+	u8 addr = nvbios_rd08(bios, init->offset + 1);
+	u8 mask = nvbios_rd08(bios, init->offset + 2);
+	u8 data = nvbios_rd08(bios, init->offset + 3);
 	u8 val;
 
 	trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
@@ -1182,8 +1183,8 @@
 init_zm_cr(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr = nv_ro08(bios, init->offset + 1);
-	u8 data = nv_ro08(bios, init->offset + 2);
+	u8 addr = nvbios_rd08(bios, init->offset + 1);
+	u8 data = nvbios_rd08(bios, init->offset + 2);
 
 	trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr,  data);
 	init->offset += 3;
@@ -1199,14 +1200,14 @@
 init_zm_cr_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 
 	trace("ZM_CR_GROUP\n");
 	init->offset += 2;
 
 	while (count--) {
-		u8 addr = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 addr = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
 		init->offset += 2;
@@ -1223,8 +1224,8 @@
 init_condition_time(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  cond = nv_ro08(bios, init->offset + 1);
-	u8 retry = nv_ro08(bios, init->offset + 2);
+	u8  cond = nvbios_rd08(bios, init->offset + 1);
+	u8 retry = nvbios_rd08(bios, init->offset + 2);
 	u8  wait = min((u16)retry * 50, 100);
 
 	trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
@@ -1250,7 +1251,7 @@
 init_ltime(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 msec = nv_ro16(bios, init->offset + 1);
+	u16 msec = nvbios_rd16(bios, init->offset + 1);
 
 	trace("LTIME\t0x%04x\n", msec);
 	init->offset += 3;
@@ -1267,14 +1268,14 @@
 init_zm_reg_sequence(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 base = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 base = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
 	init->offset += 6;
 
 	while (count--) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 
 		trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
 		init->offset += 4;
@@ -1292,9 +1293,9 @@
 init_pll_indirect(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u16 addr = nv_ro16(bios, init->offset + 5);
-	u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 5);
+	u32 freq = (u32)nvbios_rd16(bios, addr) * 1000;
 
 	trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
 	      reg, addr, freq);
@@ -1311,9 +1312,9 @@
 init_zm_reg_indirect(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u16 addr = nv_ro16(bios, init->offset + 5);
-	u32 data = nv_ro32(bios, addr);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 5);
+	u32 data = nvbios_rd32(bios, addr);
 
 	trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
 	      reg, addr, data);
@@ -1330,7 +1331,7 @@
 init_sub_direct(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 addr = nv_ro16(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 1);
 	u16 save;
 
 	trace("SUB_DIRECT\t0x%04x\n", addr);
@@ -1356,7 +1357,7 @@
 init_jump(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 offset = nv_ro16(bios, init->offset + 1);
+	u16 offset = nvbios_rd16(bios, init->offset + 1);
 
 	trace("JUMP\t0x%04x\n", offset);
 
@@ -1374,11 +1375,11 @@
 init_i2c_if(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2);
-	u8   reg = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8  data = nv_ro08(bios, init->offset + 5);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2);
+	u8   reg = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8  data = nvbios_rd08(bios, init->offset + 5);
 	u8 value;
 
 	trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
@@ -1401,12 +1402,12 @@
 init_copy_nv_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  sreg = nv_ro32(bios, init->offset + 1);
-	u8  shift = nv_ro08(bios, init->offset + 5);
-	u32 smask = nv_ro32(bios, init->offset + 6);
-	u32  sxor = nv_ro32(bios, init->offset + 10);
-	u32  dreg = nv_ro32(bios, init->offset + 14);
-	u32 dmask = nv_ro32(bios, init->offset + 18);
+	u32  sreg = nvbios_rd32(bios, init->offset + 1);
+	u8  shift = nvbios_rd08(bios, init->offset + 5);
+	u32 smask = nvbios_rd32(bios, init->offset + 6);
+	u32  sxor = nvbios_rd32(bios, init->offset + 10);
+	u32  dreg = nvbios_rd32(bios, init->offset + 14);
+	u32 dmask = nvbios_rd32(bios, init->offset + 18);
 	u32 data;
 
 	trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
@@ -1427,9 +1428,9 @@
 init_zm_index_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  data = nv_ro08(bios, init->offset + 4);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  data = nvbios_rd08(bios, init->offset + 4);
 
 	trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
 	init->offset += 5;
@@ -1444,14 +1445,14 @@
 static void
 init_compute_mem(struct nvbios_init *init)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
 
 	trace("COMPUTE_MEM\n");
 	init->offset += 1;
 
 	init_exec_force(init, true);
-	if (init_exec(init) && devinit->meminit)
-		devinit->meminit(devinit);
+	if (init_exec(init))
+		nvkm_devinit_meminit(devinit);
 	init_exec_force(init, false);
 }
 
@@ -1463,9 +1464,9 @@
 init_reset(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32   reg = nv_ro32(bios, init->offset + 1);
-	u32 data1 = nv_ro32(bios, init->offset + 5);
-	u32 data2 = nv_ro32(bios, init->offset + 9);
+	u32   reg = nvbios_rd32(bios, init->offset + 1);
+	u32 data1 = nvbios_rd32(bios, init->offset + 5);
+	u32 data2 = nvbios_rd32(bios, init->offset + 9);
 	u32 savepci19;
 
 	trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
@@ -1513,14 +1514,14 @@
 
 	mdata = init_configure_mem_clk(init);
 	sdata = bmp_sdr_seq_table(bios);
-	if (nv_ro08(bios, mdata) & 0x01)
+	if (nvbios_rd08(bios, mdata) & 0x01)
 		sdata = bmp_ddr_seq_table(bios);
 	mdata += 6; /* skip to data */
 
 	data = init_rdvgai(init, 0x03c4, 0x01);
 	init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
 
-	for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
+	for (; (addr = nvbios_rd32(bios, sdata)) != 0xffffffff; sdata += 4) {
 		switch (addr) {
 		case 0x10021c: /* CKE_NORMAL */
 		case 0x1002d0: /* CMD_REFRESH */
@@ -1528,7 +1529,7 @@
 			data = 0x00000001;
 			break;
 		default:
-			data = nv_ro32(bios, mdata);
+			data = nvbios_rd32(bios, mdata);
 			mdata += 4;
 			if (data == 0xffffffff)
 				continue;
@@ -1563,12 +1564,12 @@
 	mdata = init_configure_mem_clk(init);
 
 	/* NVPLL */
-	clock = nv_ro16(bios, mdata + 4) * 10;
+	clock = nvbios_rd16(bios, mdata + 4) * 10;
 	init_prog_pll(init, 0x680500, clock);
 
 	/* MPLL */
-	clock = nv_ro16(bios, mdata + 2) * 10;
-	if (nv_ro08(bios, mdata) & 0x01)
+	clock = nvbios_rd16(bios, mdata + 2) * 10;
+	if (nvbios_rd08(bios, mdata) & 0x01)
 		clock *= 2;
 	init_prog_pll(init, 0x680504, clock);
 
@@ -1609,9 +1610,9 @@
 init_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8  mask = nv_ro16(bios, init->offset + 3);
-	u8  data = nv_ro16(bios, init->offset + 4);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8  mask = nvbios_rd16(bios, init->offset + 3);
+	u8  data = nvbios_rd16(bios, init->offset + 4);
 	u8 value;
 
 	trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
@@ -1621,7 +1622,7 @@
 	 * needed some day..  it's almost certainly wrong, but, it also
 	 * somehow makes things work...
 	 */
-	if (nv_device(init->bios)->card_type >= NV_50 &&
+	if (bios->subdev.device->card_type >= NV_50 &&
 	    port == 0x03c3 && data == 0x01) {
 		init_mask(init, 0x614100, 0xf0800000, 0x00800000);
 		init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
@@ -1649,7 +1650,7 @@
 init_sub(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u16 addr, save;
 
 	trace("SUB\t0x%02x\n", index);
@@ -1676,8 +1677,8 @@
 init_ram_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  mask = nv_ro08(bios, init->offset + 1);
-	u8 value = nv_ro08(bios, init->offset + 2);
+	u8  mask = nvbios_rd08(bios, init->offset + 1);
+	u8 value = nvbios_rd08(bios, init->offset + 2);
 
 	trace("RAM_CONDITION\t"
 	      "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
@@ -1695,9 +1696,9 @@
 init_nv_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
-	u32 data = nv_ro32(bios, init->offset + 9);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
+	u32 data = nvbios_rd32(bios, init->offset + 9);
 
 	trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
 	init->offset += 13;
@@ -1713,15 +1714,15 @@
 init_macro(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  macro = nv_ro08(bios, init->offset + 1);
+	u8  macro = nvbios_rd08(bios, init->offset + 1);
 	u16 table;
 
 	trace("MACRO\t0x%02x\n", macro);
 
 	table = init_macro_table(init);
 	if (table) {
-		u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
-		u32 data = nv_ro32(bios, table + (macro * 8) + 4);
+		u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0);
+		u32 data = nvbios_rd32(bios, table + (macro * 8) + 4);
 		trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
 		init_wr32(init, addr, data);
 	}
@@ -1742,6 +1743,24 @@
 }
 
 /**
+ * INIT_STRAP_CONDITION - opcode 0x73
+ *
+ */
+static void
+init_strap_condition(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32 mask = nvbios_rd32(bios, init->offset + 1);
+	u32 value = nvbios_rd32(bios, init->offset + 5);
+
+	trace("STRAP_CONDITION\t(R[0x101000] & 0x%08x) == 0x%08x\n", mask, value);
+	init->offset += 9;
+
+	if ((init_rd32(init, 0x101000) & mask) != value)
+		init_exec_set(init, false);
+}
+
+/**
  * INIT_TIME - opcode 0x74
  *
  */
@@ -1749,7 +1768,7 @@
 init_time(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 usec = nv_ro16(bios, init->offset + 1);
+	u16 usec = nvbios_rd16(bios, init->offset + 1);
 
 	trace("TIME\t0x%04x\n", usec);
 	init->offset += 3;
@@ -1770,7 +1789,7 @@
 init_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -1787,7 +1806,7 @@
 init_io_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("IO_CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -1797,6 +1816,23 @@
 }
 
 /**
+ * INIT_ZM_REG16 - opcode 0x77
+ *
+ */
+static void
+init_zm_reg16(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u16 data = nvbios_rd16(bios, init->offset + 5);
+
+	trace("ZM_REG\tR[0x%06x] = 0x%04x\n", addr, data);
+	init->offset += 7;
+
+	init_wr32(init, addr, data);
+}
+
+/**
  * INIT_INDEX_IO - opcode 0x78
  *
  */
@@ -1804,10 +1840,10 @@
 init_index_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro16(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8  data = nv_ro08(bios, init->offset + 5);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd16(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8  data = nvbios_rd08(bios, init->offset + 5);
 	u8 value;
 
 	trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1826,8 +1862,8 @@
 init_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 freq = nv_ro16(bios, init->offset + 5) * 10;
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 freq = nvbios_rd16(bios, init->offset + 5) * 10;
 
 	trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
 	init->offset += 7;
@@ -1843,8 +1879,8 @@
 init_zm_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u32 data = nv_ro32(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u32 data = nvbios_rd32(bios, init->offset + 5);
 
 	trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
 	init->offset += 9;
@@ -1863,7 +1899,7 @@
 init_ram_restrict_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  type = nv_ro08(bios, init->offset + 1);
+	u8  type = nvbios_rd08(bios, init->offset + 1);
 	u8 count = init_ram_restrict_group_count(init);
 	u8 strap = init_ram_restrict(init);
 	u8 cconf;
@@ -1872,7 +1908,7 @@
 	init->offset += 2;
 
 	for (cconf = 0; cconf < count; cconf++) {
-		u32 freq = nv_ro32(bios, init->offset);
+		u32 freq = nvbios_rd32(bios, init->offset);
 
 		if (cconf == strap) {
 			trace("%dkHz *\n", freq);
@@ -1892,13 +1928,13 @@
 static void
 init_gpio(struct nvbios_init *init)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(init->bios);
+	struct nvkm_gpio *gpio = init->bios->subdev.device->gpio;
 
 	trace("GPIO\n");
 	init->offset += 1;
 
-	if (init_exec(init) && gpio && gpio->reset)
-		gpio->reset(gpio, DCB_GPIO_UNUSED);
+	if (init_exec(init))
+		nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
 }
 
 /**
@@ -1909,9 +1945,9 @@
 init_ram_restrict_zm_reg_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8  incr = nv_ro08(bios, init->offset + 5);
-	u8   num = nv_ro08(bios, init->offset + 6);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8  incr = nvbios_rd08(bios, init->offset + 5);
+	u8   num = nvbios_rd08(bios, init->offset + 6);
 	u8 count = init_ram_restrict_group_count(init);
 	u8 index = init_ram_restrict(init);
 	u8 i, j;
@@ -1923,7 +1959,7 @@
 	for (i = 0; i < num; i++) {
 		trace("\tR[0x%06x] = {\n", addr);
 		for (j = 0; j < count; j++) {
-			u32 data = nv_ro32(bios, init->offset);
+			u32 data = nvbios_rd32(bios, init->offset);
 
 			if (j == index) {
 				trace("\t\t0x%08x *\n", data);
@@ -1947,8 +1983,8 @@
 init_copy_zm_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 sreg = nv_ro32(bios, init->offset + 1);
-	u32 dreg = nv_ro32(bios, init->offset + 5);
+	u32 sreg = nvbios_rd32(bios, init->offset + 1);
+	u32 dreg = nvbios_rd32(bios, init->offset + 5);
 
 	trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
 	init->offset += 9;
@@ -1964,14 +2000,14 @@
 init_zm_reg_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
 	init->offset += 6;
 
 	while (count--) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 		trace("\t0x%08x\n", data);
 		init_wr32(init, addr, data);
 		init->offset += 4;
@@ -1986,13 +2022,13 @@
 init_xlat(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 saddr = nv_ro32(bios, init->offset + 1);
-	u8 sshift = nv_ro08(bios, init->offset + 5);
-	u8  smask = nv_ro08(bios, init->offset + 6);
-	u8  index = nv_ro08(bios, init->offset + 7);
-	u32 daddr = nv_ro32(bios, init->offset + 8);
-	u32 dmask = nv_ro32(bios, init->offset + 12);
-	u8  shift = nv_ro08(bios, init->offset + 16);
+	u32 saddr = nvbios_rd32(bios, init->offset + 1);
+	u8 sshift = nvbios_rd08(bios, init->offset + 5);
+	u8  smask = nvbios_rd08(bios, init->offset + 6);
+	u8  index = nvbios_rd08(bios, init->offset + 7);
+	u32 daddr = nvbios_rd32(bios, init->offset + 8);
+	u32 dmask = nvbios_rd32(bios, init->offset + 12);
+	u8  shift = nvbios_rd08(bios, init->offset + 16);
 	u32 data;
 
 	trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
@@ -2014,9 +2050,9 @@
 init_zm_mask_add(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
-	u32  add = nv_ro32(bios, init->offset + 9);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
+	u32  add = nvbios_rd32(bios, init->offset + 9);
 	u32 data;
 
 	trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
@@ -2035,15 +2071,15 @@
 init_auxch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
 	init->offset += 6;
 
 	while (count--) {
-		u8 mask = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 mask = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 		trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
 		mask = init_rdauxr(init, addr) & mask;
 		init_wrauxr(init, addr, mask | data);
@@ -2059,14 +2095,14 @@
 init_zm_auxch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
 	init->offset += 6;
 
 	while (count--) {
-		u8 data = nv_ro08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 0);
 		trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
 		init_wrauxr(init, addr, data);
 		init->offset += 1;
@@ -2081,21 +2117,21 @@
 init_i2c_long_if(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 reglo = nv_ro08(bios, init->offset + 3);
-	u8 reghi = nv_ro08(bios, init->offset + 4);
-	u8  mask = nv_ro08(bios, init->offset + 5);
-	u8  data = nv_ro08(bios, init->offset + 6);
-	struct nvkm_i2c_port *port;
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 reglo = nvbios_rd08(bios, init->offset + 3);
+	u8 reghi = nvbios_rd08(bios, init->offset + 4);
+	u8  mask = nvbios_rd08(bios, init->offset + 5);
+	u8  data = nvbios_rd08(bios, init->offset + 6);
+	struct i2c_adapter *adap;
 
 	trace("I2C_LONG_IF\t"
 	      "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
 	      index, addr, reglo, reghi, mask, data);
 	init->offset += 7;
 
-	port = init_i2c(init, index);
-	if (port) {
+	adap = init_i2c(init, index);
+	if (adap) {
 		u8 i[2] = { reghi, reglo };
 		u8 o[1] = {};
 		struct i2c_msg msg[] = {
@@ -2104,7 +2140,7 @@
 		};
 		int ret;
 
-		ret = i2c_transfer(&port->adapter, msg, 2);
+		ret = i2c_transfer(adap, msg, 2);
 		if (ret == 2 && ((o[0] & mask) == data))
 			return;
 	}
@@ -2120,9 +2156,9 @@
 init_gpio_ne(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	struct nvkm_gpio *gpio = nvkm_gpio(bios);
+	struct nvkm_gpio *gpio = bios->subdev.device->gpio;
 	struct dcb_gpio_func func;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 	u8 idx = 0, ver, len;
 	u16 data, i;
 
@@ -2130,21 +2166,21 @@
 	init->offset += 2;
 
 	for (i = init->offset; i < init->offset + count; i++)
-		cont("0x%02x ", nv_ro08(bios, i));
+		cont("0x%02x ", nvbios_rd08(bios, i));
 	cont("\n");
 
 	while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
 		if (func.func != DCB_GPIO_UNUSED) {
 			for (i = init->offset; i < init->offset + count; i++) {
-				if (func.func == nv_ro08(bios, i))
+				if (func.func == nvbios_rd08(bios, i))
 					break;
 			}
 
 			trace("\tFUNC[0x%02x]", func.func);
 			if (i == (init->offset + count)) {
 				cont(" *");
-				if (init_exec(init) && gpio && gpio->reset)
-					gpio->reset(gpio, func.func);
+				if (init_exec(init))
+					nvkm_gpio_reset(gpio, func.func);
 			}
 			cont("\n");
 		}
@@ -2202,9 +2238,11 @@
 	[0x6f] = { init_macro },
 	[0x71] = { init_done },
 	[0x72] = { init_resume },
+	[0x73] = { init_strap_condition },
 	[0x74] = { init_time },
 	[0x75] = { init_condition },
 	[0x76] = { init_io_condition },
+	[0x77] = { init_zm_reg16 },
 	[0x78] = { init_index_io },
 	[0x79] = { init_pll },
 	[0x7a] = { init_zm_reg },
@@ -2232,7 +2270,7 @@
 {
 	init->nested++;
 	while (init->offset) {
-		u8 opcode = nv_ro08(init->bios, init->offset);
+		u8 opcode = nvbios_rd08(init->bios, init->offset);
 		if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
 			error("unknown opcode 0x%02x\n", opcode);
 			return -EINVAL;
@@ -2247,13 +2285,13 @@
 int
 nvbios_init(struct nvkm_subdev *subdev, bool execute)
 {
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	int ret = 0;
 	int i = -1;
 	u16 data;
 
 	if (execute)
-		nv_info(bios, "running init tables\n");
+		nvkm_debug(subdev, "running init tables\n");
 	while (!ret && (data = (init_script(bios, ++i)))) {
 		struct nvbios_init init = {
 			.subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index c4087df..3ddf093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -28,17 +28,18 @@
 u16
 mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct bit_entry x;
 
 	if (bit_entry(bios, 'x', &x)) {
-		nv_debug(bios, "BIT 'x' table not present\n");
+		nvkm_debug(subdev, "BIT 'x' table not present\n");
 		return 0x0000;
 	}
 
 	*ver = x.version;
 	*hdr = x.length;
 	if (*ver != 1 || *hdr < 3) {
-		nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
+		nvkm_warn(subdev, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
 		return 0x0000;
 	}
 
@@ -73,23 +74,24 @@
 u8
 mxm_sor_map(struct nvkm_bios *bios, u8 conn)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, hdr;
 	u16 mxm = mxm_table(bios, &ver, &hdr);
 	if (mxm && hdr >= 6) {
-		u16 map = nv_ro16(bios, mxm + 4);
+		u16 map = nvbios_rd16(bios, mxm + 4);
 		if (map) {
-			ver = nv_ro08(bios, map);
+			ver = nvbios_rd08(bios, map);
 			if (ver == 0x10) {
-				if (conn < nv_ro08(bios, map + 3)) {
-					map += nv_ro08(bios, map + 1);
+				if (conn < nvbios_rd08(bios, map + 3)) {
+					map += nvbios_rd08(bios, map + 1);
 					map += conn;
-					return nv_ro08(bios, map);
+					return nvbios_rd08(bios, map);
 				}
 
 				return 0x00;
 			}
 
-			nv_warn(bios, "unknown sor map v%02x\n", ver);
+			nvkm_warn(subdev, "unknown sor map v%02x\n", ver);
 		}
 	}
 
@@ -102,30 +104,31 @@
 	if (bios->version.chip == 0x98)
 		return g98_sor_map[conn];
 
-	nv_warn(bios, "missing sor map\n");
+	nvkm_warn(subdev, "missing sor map\n");
 	return 0x00;
 }
 
 u8
 mxm_ddc_map(struct nvkm_bios *bios, u8 port)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, hdr;
 	u16 mxm = mxm_table(bios, &ver, &hdr);
 	if (mxm && hdr >= 8) {
-		u16 map = nv_ro16(bios, mxm + 6);
+		u16 map = nvbios_rd16(bios, mxm + 6);
 		if (map) {
-			ver = nv_ro08(bios, map);
+			ver = nvbios_rd08(bios, map);
 			if (ver == 0x10) {
-				if (port < nv_ro08(bios, map + 3)) {
-					map += nv_ro08(bios, map + 1);
+				if (port < nvbios_rd08(bios, map + 3)) {
+					map += nvbios_rd08(bios, map + 1);
 					map += port;
-					return nv_ro08(bios, map);
+					return nvbios_rd08(bios, map);
 				}
 
 				return 0x00;
 			}
 
-			nv_warn(bios, "unknown ddc map v%02x\n", ver);
+			nvkm_warn(subdev, "unknown ddc map v%02x\n", ver);
 		}
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
index fd7dd71..955df29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
@@ -32,12 +32,13 @@
 	u8  ver; u16 hdr;
 	u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
 	if (data = (data + hdr + 0x0f) & ~0x0f, data) {
-		switch (nv_ro32(bios, data + 0x00)) {
+		switch (nvbios_rd32(bios, data + 0x00)) {
 		case 0x4544504e: /* NPDE */
 			break;
 		default:
-			nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
-				 data, nv_ro32(bios, data + 0x00));
+			nvkm_debug(&bios->subdev,
+				   "%08x: NPDE signature (%08x) unknown\n",
+				   data, nvbios_rd32(bios, data + 0x00));
 			data = 0;
 			break;
 		}
@@ -51,8 +52,8 @@
 	u32 data = nvbios_npdeTe(bios, base);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->image_size = nv_ro16(bios, data + 0x08) * 512;
-		info->last = nv_ro08(bios, data + 0x0a) & 0x80;
+		info->image_size = nvbios_rd16(bios, data + 0x08) * 512;
+		info->last = nvbios_rd08(bios, data + 0x0a) & 0x80;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
index df59787..67cb3aeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
@@ -27,19 +27,20 @@
 u32
 nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr)
 {
-	u32 data = nv_ro16(bios, base + 0x18);
+	u32 data = nvbios_rd16(bios, base + 0x18);
 	if (data) {
 		data += base;
-		switch (nv_ro32(bios, data + 0x00)) {
+		switch (nvbios_rd32(bios, data + 0x00)) {
 		case 0x52494350: /* PCIR */
 		case 0x53494752: /* RGIS */
 		case 0x5344504e: /* NPDS */
-			*hdr = nv_ro16(bios, data + 0x0a);
-			*ver = nv_ro08(bios, data + 0x0c);
+			*hdr = nvbios_rd16(bios, data + 0x0a);
+			*ver = nvbios_rd08(bios, data + 0x0c);
 			break;
 		default:
-			nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
-				 data, nv_ro32(bios, data + 0x00));
+			nvkm_debug(&bios->subdev,
+				   "%08x: PCIR signature (%08x) unknown\n",
+				   data, nvbios_rd32(bios, data + 0x00));
 			data = 0;
 			break;
 		}
@@ -54,15 +55,15 @@
 	u32 data = nvbios_pcirTe(bios, base, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->vendor_id = nv_ro16(bios, data + 0x04);
-		info->device_id = nv_ro16(bios, data + 0x06);
-		info->class_code[0] = nv_ro08(bios, data + 0x0d);
-		info->class_code[1] = nv_ro08(bios, data + 0x0e);
-		info->class_code[2] = nv_ro08(bios, data + 0x0f);
-		info->image_size = nv_ro16(bios, data + 0x10) * 512;
-		info->image_rev = nv_ro16(bios, data + 0x12);
-		info->image_type = nv_ro08(bios, data + 0x14);
-		info->last = nv_ro08(bios, data + 0x15) & 0x80;
+		info->vendor_id = nvbios_rd16(bios, data + 0x04);
+		info->device_id = nvbios_rd16(bios, data + 0x06);
+		info->class_code[0] = nvbios_rd08(bios, data + 0x0d);
+		info->class_code[1] = nvbios_rd08(bios, data + 0x0e);
+		info->class_code[2] = nvbios_rd08(bios, data + 0x0f);
+		info->image_size = nvbios_rd16(bios, data + 0x10) * 512;
+		info->image_rev = nvbios_rd16(bios, data + 0x12);
+		info->image_type = nvbios_rd08(bios, data + 0x14);
+		info->last = nvbios_rd08(bios, data + 0x15) & 0x80;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 382ae9c..aa7e33b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -25,8 +25,6 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/perf.h>
 
-#include <core/device.h>
-
 u16
 nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
 		  u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
@@ -36,22 +34,22 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version <= 2) {
-			perf = nv_ro16(bios, bit_P.offset + 0);
+			perf = nvbios_rd16(bios, bit_P.offset + 0);
 			if (perf) {
-				*ver = nv_ro08(bios, perf + 0);
-				*hdr = nv_ro08(bios, perf + 1);
+				*ver = nvbios_rd08(bios, perf + 0);
+				*hdr = nvbios_rd08(bios, perf + 1);
 				if (*ver >= 0x40 && *ver < 0x41) {
-					*cnt = nv_ro08(bios, perf + 5);
-					*len = nv_ro08(bios, perf + 2);
-					*snr = nv_ro08(bios, perf + 4);
-					*ssz = nv_ro08(bios, perf + 3);
+					*cnt = nvbios_rd08(bios, perf + 5);
+					*len = nvbios_rd08(bios, perf + 2);
+					*snr = nvbios_rd08(bios, perf + 4);
+					*ssz = nvbios_rd08(bios, perf + 3);
 					return perf;
 				} else
 				if (*ver >= 0x20 && *ver < 0x40) {
-					*cnt = nv_ro08(bios, perf + 2);
-					*len = nv_ro08(bios, perf + 3);
-					*snr = nv_ro08(bios, perf + 4);
-					*ssz = nv_ro08(bios, perf + 5);
+					*cnt = nvbios_rd08(bios, perf + 2);
+					*len = nvbios_rd08(bios, perf + 3);
+					*snr = nvbios_rd08(bios, perf + 4);
+					*ssz = nvbios_rd08(bios, perf + 5);
 					return perf;
 				}
 			}
@@ -59,13 +57,13 @@
 	}
 
 	if (bios->bmp_offset) {
-		if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
-			perf = nv_ro16(bios, bios->bmp_offset + 0x94);
+		if (nvbios_rd08(bios, bios->bmp_offset + 6) >= 0x25) {
+			perf = nvbios_rd16(bios, bios->bmp_offset + 0x94);
 			if (perf) {
-				*hdr = nv_ro08(bios, perf + 0);
-				*ver = nv_ro08(bios, perf + 1);
-				*cnt = nv_ro08(bios, perf + 2);
-				*len = nv_ro08(bios, perf + 3);
+				*hdr = nvbios_rd08(bios, perf + 0);
+				*ver = nvbios_rd08(bios, perf + 1);
+				*cnt = nvbios_rd08(bios, perf + 2);
+				*len = nvbios_rd08(bios, perf + 3);
 				*snr = 0;
 				*ssz = 0;
 				return perf;
@@ -98,55 +96,55 @@
 {
 	u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
-	info->pstate = nv_ro08(bios, perf + 0x00);
+	info->pstate = nvbios_rd08(bios, perf + 0x00);
 	switch (!!perf * *ver) {
 	case 0x12:
 	case 0x13:
 	case 0x14:
-		info->core     = nv_ro32(bios, perf + 0x01) * 10;
-		info->memory   = nv_ro32(bios, perf + 0x05) * 20;
-		info->fanspeed = nv_ro08(bios, perf + 0x37);
+		info->core     = nvbios_rd32(bios, perf + 0x01) * 10;
+		info->memory   = nvbios_rd32(bios, perf + 0x05) * 20;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x37);
 		if (*hdr > 0x38)
-			info->voltage = nv_ro08(bios, perf + 0x38);
+			info->voltage = nvbios_rd08(bios, perf + 0x38);
 		break;
 	case 0x21:
 	case 0x23:
 	case 0x24:
-		info->fanspeed = nv_ro08(bios, perf + 0x04);
-		info->voltage  = nv_ro08(bios, perf + 0x05);
-		info->shader   = nv_ro16(bios, perf + 0x06) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+		info->voltage  = nvbios_rd08(bios, perf + 0x05);
+		info->shader   = nvbios_rd16(bios, perf + 0x06) * 1000;
 		info->core     = info->shader + (signed char)
-				 nv_ro08(bios, perf + 0x08) * 1000;
-		switch (nv_device(bios)->chipset) {
+				 nvbios_rd08(bios, perf + 0x08) * 1000;
+		switch (bios->subdev.device->chipset) {
 		case 0x49:
 		case 0x4b:
-			info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
+			info->memory = nvbios_rd16(bios, perf + 0x0b) * 1000;
 			break;
 		default:
-			info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
+			info->memory = nvbios_rd16(bios, perf + 0x0b) * 2000;
 			break;
 		}
 		break;
 	case 0x25:
-		info->fanspeed = nv_ro08(bios, perf + 0x04);
-		info->voltage  = nv_ro08(bios, perf + 0x05);
-		info->core     = nv_ro16(bios, perf + 0x06) * 1000;
-		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
-		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+		info->voltage  = nvbios_rd08(bios, perf + 0x05);
+		info->core     = nvbios_rd16(bios, perf + 0x06) * 1000;
+		info->shader   = nvbios_rd16(bios, perf + 0x0a) * 1000;
+		info->memory   = nvbios_rd16(bios, perf + 0x0c) * 1000;
 		break;
 	case 0x30:
-		info->script   = nv_ro16(bios, perf + 0x02);
+		info->script   = nvbios_rd16(bios, perf + 0x02);
 	case 0x35:
-		info->fanspeed = nv_ro08(bios, perf + 0x06);
-		info->voltage  = nv_ro08(bios, perf + 0x07);
-		info->core     = nv_ro16(bios, perf + 0x08) * 1000;
-		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
-		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
-		info->vdec     = nv_ro16(bios, perf + 0x10) * 1000;
-		info->disp     = nv_ro16(bios, perf + 0x14) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x06);
+		info->voltage  = nvbios_rd08(bios, perf + 0x07);
+		info->core     = nvbios_rd16(bios, perf + 0x08) * 1000;
+		info->shader   = nvbios_rd16(bios, perf + 0x0a) * 1000;
+		info->memory   = nvbios_rd16(bios, perf + 0x0c) * 1000;
+		info->vdec     = nvbios_rd16(bios, perf + 0x10) * 1000;
+		info->disp     = nvbios_rd16(bios, perf + 0x14) * 1000;
 		break;
 	case 0x40:
-		info->voltage  = nv_ro08(bios, perf + 0x02);
+		info->voltage  = nvbios_rd08(bios, perf + 0x02);
 		break;
 	default:
 		return 0x0000;
@@ -175,7 +173,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x40:
-		info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
+		info->v40.freq = (nvbios_rd16(bios, data + 0x00) & 0x3fff) * 1000;
 		break;
 	default:
 		break;
@@ -193,7 +191,7 @@
 		return -ENODEV;
 
 	if (ver >= 0x20 && ver < 0x40 && hdr > 6)
-		fan->pwm_divisor = nv_ro16(bios, perf + 6);
+		fan->pwm_divisor = nvbios_rd16(bios, perf + 6);
 	else
 		fan->pwm_divisor = 0;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index ebd402e..125ec2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -27,7 +27,6 @@
 #include <subdev/bios/pll.h>
 #include <subdev/vga.h>
 
-#include <core/device.h>
 
 struct pll_mapping {
 	u8  type;
@@ -84,20 +83,20 @@
 	struct bit_entry bit_C;
 
 	if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
-		u16 data = nv_ro16(bios, bit_C.offset + 8);
+		u16 data = nvbios_rd16(bios, bit_C.offset + 8);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
-			*hdr = nv_ro08(bios, data + 1);
-			*len = nv_ro08(bios, data + 2);
-			*cnt = nv_ro08(bios, data + 3);
+			*ver = nvbios_rd08(bios, data + 0);
+			*hdr = nvbios_rd08(bios, data + 1);
+			*len = nvbios_rd08(bios, data + 2);
+			*cnt = nvbios_rd08(bios, data + 3);
 			return data;
 		}
 	}
 
 	if (bmp_version(bios) >= 0x0524) {
-		u16 data = nv_ro16(bios, bios->bmp_offset + 142);
+		u16 data = nvbios_rd16(bios, bios->bmp_offset + 142);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
+			*ver = nvbios_rd08(bios, data + 0);
 			*hdr = 1;
 			*cnt = 1;
 			*len = 0x18;
@@ -112,7 +111,8 @@
 static struct pll_mapping *
 pll_map(struct nvkm_bios *bios)
 {
-	switch (nv_device(bios)->card_type) {
+	struct nvkm_device *device = bios->subdev.device;
+	switch (device->card_type) {
 	case NV_04:
 	case NV_10:
 	case NV_11:
@@ -123,12 +123,12 @@
 	case NV_40:
 		return nv40_pll_mapping;
 	case NV_50:
-		if (nv_device(bios)->chipset == 0x50)
+		if (device->chipset == 0x50)
 			return nv50_pll_mapping;
 		else
-		if (nv_device(bios)->chipset <  0xa3 ||
-		    nv_device(bios)->chipset == 0xaa ||
-		    nv_device(bios)->chipset == 0xac)
+		if (device->chipset <  0xa3 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac)
 			return g84_pll_mapping;
 	default:
 		return NULL;
@@ -146,8 +146,8 @@
 	if (data && *ver >= 0x30) {
 		data += hdr;
 		while (cnt--) {
-			if (nv_ro32(bios, data + 3) == reg) {
-				*type = nv_ro08(bios, data + 0);
+			if (nvbios_rd32(bios, data + 3) == reg) {
+				*type = nvbios_rd08(bios, data + 0);
 				return data;
 			}
 			data += *len;
@@ -161,7 +161,7 @@
 			u16 addr = (data += hdr);
 			*type = map->type;
 			while (cnt--) {
-				if (nv_ro32(bios, data) == map->reg)
+				if (nvbios_rd32(bios, data) == map->reg)
 					return data;
 				data += *len;
 			}
@@ -188,8 +188,8 @@
 	if (data && *ver >= 0x30) {
 		data += hdr;
 		while (cnt--) {
-			if (nv_ro08(bios, data + 0) == type) {
-				*reg = nv_ro32(bios, data + 3);
+			if (nvbios_rd08(bios, data + 0) == type) {
+				*reg = nvbios_rd32(bios, data + 3);
 				return data;
 			}
 			data += *len;
@@ -203,7 +203,7 @@
 			u16 addr = (data += hdr);
 			*reg = map->reg;
 			while (cnt--) {
-				if (nv_ro32(bios, data) == map->reg)
+				if (nvbios_rd32(bios, data) == map->reg)
 					return data;
 				data += *len;
 			}
@@ -222,6 +222,8 @@
 int
 nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	u8  ver, len;
 	u32 reg = type;
 	u16 data;
@@ -245,12 +247,12 @@
 		break;
 	case 0x10:
 	case 0x11:
-		info->vco1.min_freq = nv_ro32(bios, data + 0);
-		info->vco1.max_freq = nv_ro32(bios, data + 4);
-		info->vco2.min_freq = nv_ro32(bios, data + 8);
-		info->vco2.max_freq = nv_ro32(bios, data + 12);
-		info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
-		info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
+		info->vco1.min_freq = nvbios_rd32(bios, data + 0);
+		info->vco1.max_freq = nvbios_rd32(bios, data + 4);
+		info->vco2.min_freq = nvbios_rd32(bios, data + 8);
+		info->vco2.max_freq = nvbios_rd32(bios, data + 12);
+		info->vco1.min_inputfreq = nvbios_rd32(bios, data + 16);
+		info->vco2.min_inputfreq = nvbios_rd32(bios, data + 20);
 		info->vco1.max_inputfreq = INT_MAX;
 		info->vco2.max_inputfreq = INT_MAX;
 
@@ -291,82 +293,82 @@
 		break;
 	case 0x20:
 	case 0x21:
-		info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
-		info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
-		info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
-		info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
-		info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
-		info->vco1.min_n = nv_ro08(bios, data + 20);
-		info->vco1.max_n = nv_ro08(bios, data + 21);
-		info->vco1.min_m = nv_ro08(bios, data + 22);
-		info->vco1.max_m = nv_ro08(bios, data + 23);
-		info->vco2.min_n = nv_ro08(bios, data + 24);
-		info->vco2.max_n = nv_ro08(bios, data + 25);
-		info->vco2.min_m = nv_ro08(bios, data + 26);
-		info->vco2.max_m = nv_ro08(bios, data + 27);
+		info->vco1.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco2.min_freq = nvbios_rd16(bios, data + 8) * 1000;
+		info->vco2.max_freq = nvbios_rd16(bios, data + 10) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+		info->vco2.min_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 16) * 1000;
+		info->vco2.max_inputfreq = nvbios_rd16(bios, data + 18) * 1000;
+		info->vco1.min_n = nvbios_rd08(bios, data + 20);
+		info->vco1.max_n = nvbios_rd08(bios, data + 21);
+		info->vco1.min_m = nvbios_rd08(bios, data + 22);
+		info->vco1.max_m = nvbios_rd08(bios, data + 23);
+		info->vco2.min_n = nvbios_rd08(bios, data + 24);
+		info->vco2.max_n = nvbios_rd08(bios, data + 25);
+		info->vco2.min_m = nvbios_rd08(bios, data + 26);
+		info->vco2.max_m = nvbios_rd08(bios, data + 27);
 
-		info->max_p = nv_ro08(bios, data + 29);
+		info->max_p = nvbios_rd08(bios, data + 29);
 		info->max_p_usable = info->max_p;
 		if (bios->version.chip < 0x60)
 			info->max_p_usable = 0x6;
-		info->bias_p = nv_ro08(bios, data + 30);
+		info->bias_p = nvbios_rd08(bios, data + 30);
 
 		if (len > 0x22)
-			info->refclk = nv_ro32(bios, data + 31);
+			info->refclk = nvbios_rd32(bios, data + 31);
 		break;
 	case 0x30:
-		data = nv_ro16(bios, data + 1);
+		data = nvbios_rd16(bios, data + 1);
 
-		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
-		info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
-		info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
-		info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
-		info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
-		info->vco1.min_n = nv_ro08(bios, data + 16);
-		info->vco1.max_n = nv_ro08(bios, data + 17);
-		info->vco1.min_m = nv_ro08(bios, data + 18);
-		info->vco1.max_m = nv_ro08(bios, data + 19);
-		info->vco2.min_n = nv_ro08(bios, data + 20);
-		info->vco2.max_n = nv_ro08(bios, data + 21);
-		info->vco2.min_m = nv_ro08(bios, data + 22);
-		info->vco2.max_m = nv_ro08(bios, data + 23);
-		info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
-		info->bias_p = nv_ro08(bios, data + 27);
-		info->refclk = nv_ro32(bios, data + 28);
+		info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+		info->vco2.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco2.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 8) * 1000;
+		info->vco2.min_inputfreq = nvbios_rd16(bios, data + 10) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+		info->vco2.max_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+		info->vco1.min_n = nvbios_rd08(bios, data + 16);
+		info->vco1.max_n = nvbios_rd08(bios, data + 17);
+		info->vco1.min_m = nvbios_rd08(bios, data + 18);
+		info->vco1.max_m = nvbios_rd08(bios, data + 19);
+		info->vco2.min_n = nvbios_rd08(bios, data + 20);
+		info->vco2.max_n = nvbios_rd08(bios, data + 21);
+		info->vco2.min_m = nvbios_rd08(bios, data + 22);
+		info->vco2.max_m = nvbios_rd08(bios, data + 23);
+		info->max_p_usable = info->max_p = nvbios_rd08(bios, data + 25);
+		info->bias_p = nvbios_rd08(bios, data + 27);
+		info->refclk = nvbios_rd32(bios, data + 28);
 		break;
 	case 0x40:
-		info->refclk = nv_ro16(bios, data + 9) * 1000;
-		data = nv_ro16(bios, data + 1);
+		info->refclk = nvbios_rd16(bios, data + 9) * 1000;
+		data = nvbios_rd16(bios, data + 1);
 
-		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
-		info->vco1.min_m = nv_ro08(bios, data + 8);
-		info->vco1.max_m = nv_ro08(bios, data + 9);
-		info->vco1.min_n = nv_ro08(bios, data + 10);
-		info->vco1.max_n = nv_ro08(bios, data + 11);
-		info->min_p = nv_ro08(bios, data + 12);
-		info->max_p = nv_ro08(bios, data + 13);
+		info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco1.min_m = nvbios_rd08(bios, data + 8);
+		info->vco1.max_m = nvbios_rd08(bios, data + 9);
+		info->vco1.min_n = nvbios_rd08(bios, data + 10);
+		info->vco1.max_n = nvbios_rd08(bios, data + 11);
+		info->min_p = nvbios_rd08(bios, data + 12);
+		info->max_p = nvbios_rd08(bios, data + 13);
 		break;
 	default:
-		nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
+		nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
 		return -EINVAL;
 	}
 
 	if (!info->refclk) {
-		info->refclk = nv_device(bios)->crystal;
+		info->refclk = device->crystal;
 		if (bios->version.chip == 0x51) {
-			u32 sel_clk = nv_rd32(bios, 0x680524);
+			u32 sel_clk = nvkm_rd32(device, 0x680524);
 			if ((info->reg == 0x680508 && sel_clk & 0x20) ||
 			    (info->reg == 0x680520 && sel_clk & 0x80)) {
-				if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
+				if (nvkm_rdvgac(device, 0, 0x27) < 0xa3)
 					info->refclk = 200000;
 				else
 					info->refclk = 25000;
@@ -380,8 +382,8 @@
 	 * with an empty limit table (seen on nv18)
 	 */
 	if (!info->vco1.max_freq) {
-		info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
-		info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
+		info->vco1.max_freq = nvbios_rd32(bios, bios->bmp_offset + 67);
+		info->vco1.min_freq = nvbios_rd32(bios, bios->bmp_offset + 71);
 		if (bmp_version(bios) < 0x0506) {
 			info->vco1.max_freq = 256000;
 			info->vco1.min_freq = 128000;
@@ -393,7 +395,7 @@
 		info->vco1.max_n = 0xff;
 		info->vco1.min_m = 0x1;
 
-		if (nv_device(bios)->crystal == 13500) {
+		if (device->crystal == 13500) {
 			/* nv05 does this, nv11 doesn't, nv10 unknown */
 			if (bios->version.chip < 0x11)
 				info->vco1.min_m = 0x7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index 20c5ce0..441ec45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -49,12 +49,12 @@
 
 	if (!bit_entry(bios, 'p', &bit_p)) {
 		if (bit_p.version == 2 && bit_p.length >= 4)
-			data = nv_ro32(bios, bit_p.offset + 0x00);
+			data = nvbios_rd32(bios, bit_p.offset + 0x00);
 		if ((data = weirdo_pointer(bios, data))) {
-			*ver = nv_ro08(bios, data + 0x00); /* maybe? */
-			*hdr = nv_ro08(bios, data + 0x01);
-			*len = nv_ro08(bios, data + 0x02);
-			*cnt = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*len = nvbios_rd08(bios, data + 0x02);
+			*cnt = nvbios_rd08(bios, data + 0x03);
 		}
 	}
 
@@ -95,8 +95,8 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	default:
-		info->type = nv_ro08(bios, data + 0x00);
-		info->data = nv_ro32(bios, data + 0x02);
+		info->type = nvbios_rd08(bios, data + 0x00);
+		info->data = nvbios_rd32(bios, data + 0x02);
 		break;
 	}
 	return data;
@@ -112,21 +112,21 @@
 	while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
 		if ( pmuE.type == type &&
 		    (data = weirdo_pointer(bios, pmuE.data))) {
-			info->init_addr_pmu = nv_ro32(bios, data + 0x08);
-			info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
+			info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
+			info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
 			info->boot_addr     = data + 0x30;
-			info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
-					      nv_ro32(bios, data + 0x18);
-			info->boot_size     = nv_ro32(bios, data + 0x1c) -
-					      nv_ro32(bios, data + 0x18);
+			info->boot_addr_pmu = nvbios_rd32(bios, data + 0x10) +
+					      nvbios_rd32(bios, data + 0x18);
+			info->boot_size     = nvbios_rd32(bios, data + 0x1c) -
+					      nvbios_rd32(bios, data + 0x18);
 			info->code_addr     = info->boot_addr + info->boot_size;
 			info->code_addr_pmu = info->boot_addr_pmu +
 					      info->boot_size;
-			info->code_size     = nv_ro32(bios, data + 0x20);
+			info->code_size     = nvbios_rd32(bios, data + 0x20);
 			info->data_addr     = data + 0x30 +
-					      nv_ro32(bios, data + 0x24);
-			info->data_addr_pmu = nv_ro32(bios, data + 0x28);
-			info->data_size     = nv_ro32(bios, data + 0x2c);
+					      nvbios_rd32(bios, data + 0x24);
+			info->data_addr_pmu = nvbios_rd32(bios, data + 0x28);
+			info->data_size     = nvbios_rd32(bios, data + 0x2c);
 			return true;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 95e4fa1..e0ec2a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,5 +1,6 @@
 #ifndef __NVKM_BIOS_PRIV_H__
 #define __NVKM_BIOS_PRIV_H__
+#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
 #include <subdev/bios.h>
 
 struct nvbios_source {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
index a17b221..d5222af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
@@ -29,7 +29,7 @@
 static u8
 nvbios_ramcfg_strap(struct nvkm_subdev *subdev)
 {
-	return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2;
+	return (nvkm_rd32(subdev->device, 0x101000) & 0x0000003c) >> 2;
 }
 
 u8
@@ -39,9 +39,9 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 1 && bit_M.length >= 5)
-			return nv_ro08(bios, bit_M.offset + 2);
+			return nvbios_rd08(bios, bit_M.offset + 2);
 		if (bit_M.version == 2 && bit_M.length >= 3)
-			return nv_ro08(bios, bit_M.offset + 0);
+			return nvbios_rd08(bios, bit_M.offset + 0);
 	}
 
 	return 0x00;
@@ -50,7 +50,7 @@
 u8
 nvbios_ramcfg_index(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	u8 strap = nvbios_ramcfg_strap(subdev);
 	u32 xlat = 0x00000000;
 	struct bit_entry bit_M;
@@ -59,7 +59,7 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 1 && bit_M.length >= 5)
-			xlat = nv_ro16(bios, bit_M.offset + 3);
+			xlat = nvbios_rd16(bios, bit_M.offset + 3);
 		if (bit_M.version == 2 && bit_M.length >= 3) {
 			/*XXX: is M ever shorter than this?
 			 *     if not - what is xlat used for now?
@@ -68,11 +68,11 @@
 			if (bit_M.length >= 7 &&
 			    nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
 				return M0203E.group;
-			xlat = nv_ro16(bios, bit_M.offset + 1);
+			xlat = nvbios_rd16(bios, bit_M.offset + 1);
 		}
 	}
 
 	if (xlat)
-		strap = nv_ro08(bios, xlat + strap);
+		strap = nvbios_rd08(bios, xlat + strap);
 	return strap;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index 8b17bb4..f0e1fc74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -34,18 +34,18 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			rammap = nv_ro16(bios, bit_P.offset + 4);
+			rammap = nvbios_rd16(bios, bit_P.offset + 4);
 
 		if (rammap) {
-			*ver = nv_ro08(bios, rammap + 0);
+			*ver = nvbios_rd08(bios, rammap + 0);
 			switch (*ver) {
 			case 0x10:
 			case 0x11:
-				*hdr = nv_ro08(bios, rammap + 1);
-				*cnt = nv_ro08(bios, rammap + 5);
-				*len = nv_ro08(bios, rammap + 2);
-				*snr = nv_ro08(bios, rammap + 4);
-				*ssz = nv_ro08(bios, rammap + 3);
+				*hdr = nvbios_rd08(bios, rammap + 1);
+				*cnt = nvbios_rd08(bios, rammap + 5);
+				*len = nvbios_rd08(bios, rammap + 2);
+				*snr = nvbios_rd08(bios, rammap + 4);
+				*ssz = nvbios_rd08(bios, rammap + 3);
 				return rammap;
 			default:
 				break;
@@ -72,6 +72,21 @@
 	return 0x0000;
 }
 
+/* Pretend a performance mode is also a rammap entry, helps coalesce entries
+ * later on */
+u32
+nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+		struct nvbios_ramcfg *p)
+{
+	memset(p, 0x00, sizeof(*p));
+
+	p->rammap_00_16_20 = (nvbios_rd08(bios, data + 0x16) & 0x20) >> 5;
+	p->rammap_00_16_40 = (nvbios_rd08(bios, data + 0x16) & 0x40) >> 6;
+	p->rammap_00_17_02 = (nvbios_rd08(bios, data + 0x17) & 0x02) >> 1;
+
+	return data;
+}
+
 u32
 nvbios_rammapEp(struct nvkm_bios *bios, int idx,
 		u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
@@ -82,18 +97,18 @@
 	p->rammap_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->rammap_min      =  nv_ro16(bios, data + 0x00);
-		p->rammap_max      =  nv_ro16(bios, data + 0x02);
-		p->rammap_10_04_02 = (nv_ro08(bios, data + 0x04) & 0x02) >> 1;
-		p->rammap_10_04_08 = (nv_ro08(bios, data + 0x04) & 0x08) >> 3;
+		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
+		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
+		p->rammap_10_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+		p->rammap_10_04_08 = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
 		break;
 	case 0x11:
-		p->rammap_min      =  nv_ro16(bios, data + 0x00);
-		p->rammap_max      =  nv_ro16(bios, data + 0x02);
-		p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
-		p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
-		p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
-		temp = nv_ro32(bios, data + 0x09);
+		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
+		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
+		p->rammap_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+		p->rammap_11_08_0c = (nvbios_rd08(bios, data + 0x08) & 0x0c) >> 2;
+		p->rammap_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+		temp = nvbios_rd32(bios, data + 0x09);
 		p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
 		p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
 		p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
@@ -102,10 +117,10 @@
 		p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
 		p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
 		p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
-		p->rammap_11_0d    =  nv_ro08(bios, data + 0x0d);
-		p->rammap_11_0e    =  nv_ro08(bios, data + 0x0e);
-		p->rammap_11_0f    =  nv_ro08(bios, data + 0x0f);
-		p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+		p->rammap_11_0d    =  nvbios_rd08(bios, data + 0x0d);
+		p->rammap_11_0e    =  nvbios_rd08(bios, data + 0x0e);
+		p->rammap_11_0f    =  nvbios_rd08(bios, data + 0x0f);
+		p->rammap_11_11_0c = (nvbios_rd08(bios, data + 0x11) & 0x0c) >> 2;
 		break;
 	default:
 		data = 0;
@@ -141,6 +156,36 @@
 }
 
 u32
+nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+		struct nvbios_ramcfg *p)
+{
+	data += (idx * size);
+
+	if (size < 11)
+		return 0x00000000;
+
+	p->ramcfg_ver = 0;
+	p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
+	p->ramcfg_00_03_01 = (nvbios_rd08(bios, data + 0x03) & 0x01) >> 0;
+	p->ramcfg_00_03_02 = (nvbios_rd08(bios, data + 0x03) & 0x02) >> 1;
+	p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
+	p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+	p->ramcfg_RON      = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
+	p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+	p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
+	p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
+	p->ramcfg_00_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+	p->ramcfg_00_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+	p->ramcfg_00_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+	p->ramcfg_00_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+	p->ramcfg_00_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
+	p->ramcfg_00_0a_0f = (nvbios_rd08(bios, data + 0x0a) & 0x0f) >> 0;
+	p->ramcfg_00_0a_f0 = (nvbios_rd08(bios, data + 0x0a) & 0xf0) >> 4;
+
+	return data;
+}
+
+u32
 nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
 		u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
@@ -150,58 +195,58 @@
 	p->ramcfg_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->ramcfg_timing   =  nv_ro08(bios, data + 0x01);
-		p->ramcfg_10_02_01 = (nv_ro08(bios, data + 0x02) & 0x01) >> 0;
-		p->ramcfg_10_02_02 = (nv_ro08(bios, data + 0x02) & 0x02) >> 1;
-		p->ramcfg_10_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
-		p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
-		p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
-		p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
-		p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
-		p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
-		p->ramcfg_10_05    = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
-		p->ramcfg_10_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
-		p->ramcfg_10_07    = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
-		p->ramcfg_10_08    = (nv_ro08(bios, data + 0x08) & 0xff) >> 0;
-		p->ramcfg_10_09_0f = (nv_ro08(bios, data + 0x09) & 0x0f) >> 0;
-		p->ramcfg_10_09_f0 = (nv_ro08(bios, data + 0x09) & 0xf0) >> 4;
+		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
+		p->ramcfg_10_02_01 = (nvbios_rd08(bios, data + 0x02) & 0x01) >> 0;
+		p->ramcfg_10_02_02 = (nvbios_rd08(bios, data + 0x02) & 0x02) >> 1;
+		p->ramcfg_10_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+		p->ramcfg_10_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+		p->ramcfg_10_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+		p->ramcfg_10_02_20 = (nvbios_rd08(bios, data + 0x02) & 0x20) >> 5;
+		p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+		p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
+		p->ramcfg_10_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+		p->ramcfg_10_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+		p->ramcfg_10_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+		p->ramcfg_10_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+		p->ramcfg_10_09_0f = (nvbios_rd08(bios, data + 0x09) & 0x0f) >> 0;
+		p->ramcfg_10_09_f0 = (nvbios_rd08(bios, data + 0x09) & 0xf0) >> 4;
 		break;
 	case 0x11:
-		p->ramcfg_timing   =  nv_ro08(bios, data + 0x00);
-		p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
-		p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
-		p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
-		p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
-		p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
-		p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
-		p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
-		p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
-		p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
-		p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
-		p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
-		p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
-		p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
-		p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
-		p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
-		p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
-		p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
-		p->ramcfg_11_04    = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
-		p->ramcfg_11_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
-		p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
-		p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
-		p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
-		p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
-		p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
-		p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
-		p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
-		p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
-		p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
-		p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
-		p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
-		p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
-		p->ramcfg_11_09    = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x00);
+		p->ramcfg_11_01_01 = (nvbios_rd08(bios, data + 0x01) & 0x01) >> 0;
+		p->ramcfg_11_01_02 = (nvbios_rd08(bios, data + 0x01) & 0x02) >> 1;
+		p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
+		p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
+		p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
+		p->ramcfg_11_01_20 = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
+		p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
+		p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
+		p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
+		p->ramcfg_11_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+		p->ramcfg_11_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+		p->ramcfg_11_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+		p->ramcfg_11_02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		p->ramcfg_11_02_80 = (nvbios_rd08(bios, data + 0x02) & 0x80) >> 7;
+		p->ramcfg_11_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+		p->ramcfg_11_03_30 = (nvbios_rd08(bios, data + 0x03) & 0x30) >> 4;
+		p->ramcfg_11_03_c0 = (nvbios_rd08(bios, data + 0x03) & 0xc0) >> 6;
+		p->ramcfg_11_03_f0 = (nvbios_rd08(bios, data + 0x03) & 0xf0) >> 4;
+		p->ramcfg_11_04    = (nvbios_rd08(bios, data + 0x04) & 0xff) >> 0;
+		p->ramcfg_11_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+		p->ramcfg_11_07_02 = (nvbios_rd08(bios, data + 0x07) & 0x02) >> 1;
+		p->ramcfg_11_07_04 = (nvbios_rd08(bios, data + 0x07) & 0x04) >> 2;
+		p->ramcfg_11_07_08 = (nvbios_rd08(bios, data + 0x07) & 0x08) >> 3;
+		p->ramcfg_11_07_10 = (nvbios_rd08(bios, data + 0x07) & 0x10) >> 4;
+		p->ramcfg_11_07_40 = (nvbios_rd08(bios, data + 0x07) & 0x40) >> 6;
+		p->ramcfg_11_07_80 = (nvbios_rd08(bios, data + 0x07) & 0x80) >> 7;
+		p->ramcfg_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+		p->ramcfg_11_08_02 = (nvbios_rd08(bios, data + 0x08) & 0x02) >> 1;
+		p->ramcfg_11_08_04 = (nvbios_rd08(bios, data + 0x08) & 0x04) >> 2;
+		p->ramcfg_11_08_08 = (nvbios_rd08(bios, data + 0x08) & 0x08) >> 3;
+		p->ramcfg_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+		p->ramcfg_11_08_20 = (nvbios_rd08(bios, data + 0x08) & 0x20) >> 5;
+		p->ramcfg_11_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
 		break;
 	default:
 		data = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 8c2b7cb..792f017 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -23,13 +23,11 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/image.h>
 
 struct shadow {
-	struct nvkm_oclass base;
 	u32 skip;
 	const struct nvbios_source *func;
 	void *data;
@@ -38,9 +36,8 @@
 };
 
 static bool
-shadow_fetch(struct nvkm_bios *bios, u32 upto)
+shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
 {
-	struct shadow *mthd = (void *)nv_object(bios)->oclass;
 	const u32 limit = (upto + 3) & ~3;
 	const u32 start = bios->size;
 	void *data = mthd->data;
@@ -51,65 +48,35 @@
 	return bios->size >= limit;
 }
 
-static u8
-shadow_rd08(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 1))
-		return bios->data[addr];
-	return 0x00;
-}
-
-static u16
-shadow_rd16(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 2))
-		return get_unaligned_le16(&bios->data[addr]);
-	return 0x0000;
-}
-
-static u32
-shadow_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 4))
-		return get_unaligned_le32(&bios->data[addr]);
-	return 0x00000000;
-}
-
-static struct nvkm_oclass
-shadow_class = {
-	.handle = NV_SUBDEV(VBIOS, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.rd08 = shadow_rd08,
-		.rd16 = shadow_rd16,
-		.rd32 = shadow_rd32,
-	},
-};
-
 static int
-shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
+shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_image image;
 	int score = 1;
 
-	if (!nvbios_image(bios, idx, &image)) {
-		nv_debug(bios, "image %d invalid\n", idx);
+	if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+		nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
 		return 0;
 	}
-	nv_debug(bios, "%08x: type %02x, %d bytes\n",
-		 image.base, image.type, image.size);
 
-	if (!shadow_fetch(bios, image.size)) {
-		nv_debug(bios, "%08x: fetch failed\n", image.base);
+	if (!nvbios_image(bios, idx, &image)) {
+		nvkm_debug(subdev, "image %d invalid\n", idx);
+		return 0;
+	}
+	nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
+		   image.base, image.type, image.size);
+
+	if (!shadow_fetch(bios, mthd, image.size)) {
+		nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
 		return 0;
 	}
 
 	switch (image.type) {
 	case 0x00:
 		if (nvbios_checksum(&bios->data[image.base], image.size)) {
-			nv_debug(bios, "%08x: checksum failed\n", image.base);
+			nvkm_debug(subdev, "%08x: checksum failed\n",
+				   image.base);
 			if (mthd->func->rw)
 				score += 1;
 			score += 1;
@@ -123,28 +90,17 @@
 	}
 
 	if (!image.last)
-		score += shadow_image(bios, idx + 1, mthd);
+		score += shadow_image(bios, idx + 1, offset + image.size, mthd);
 	return score;
 }
 
 static int
-shadow_score(struct nvkm_bios *bios, struct shadow *mthd)
-{
-	struct nvkm_oclass *oclass = nv_object(bios)->oclass;
-	int score;
-	nv_object(bios)->oclass = &mthd->base;
-	score = shadow_image(bios, 0, mthd);
-	nv_object(bios)->oclass = oclass;
-	return score;
-
-}
-
-static int
 shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
 {
 	const struct nvbios_source *func = mthd->func;
+	struct nvkm_subdev *subdev = &bios->subdev;
 	if (func->name) {
-		nv_debug(bios, "trying %s...\n", name ? name : func->name);
+		nvkm_debug(subdev, "trying %s...\n", name ? name : func->name);
 		if (func->init) {
 			mthd->data = func->init(bios, name);
 			if (IS_ERR(mthd->data)) {
@@ -152,10 +108,10 @@
 				return 0;
 			}
 		}
-		mthd->score = shadow_score(bios, mthd);
+		mthd->score = shadow_image(bios, 0, 0, mthd);
 		if (func->fini)
 			func->fini(mthd->data);
-		nv_debug(bios, "scored %d\n", mthd->score);
+		nvkm_debug(subdev, "scored %d\n", mthd->score);
 		mthd->data = bios->data;
 		mthd->size = bios->size;
 		bios->data  = NULL;
@@ -178,7 +134,7 @@
 static void *
 shadow_fw_init(struct nvkm_bios *bios, const char *name)
 {
-	struct device *dev = &nv_device(bios)->pdev->dev;
+	struct device *dev = bios->subdev.device->dev;
 	const struct firmware *fw;
 	int ret = request_firmware(&fw, name, dev);
 	if (ret)
@@ -198,22 +154,24 @@
 int
 nvbios_shadow(struct nvkm_bios *bios)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct shadow mthds[] = {
-		{ shadow_class, 0, &nvbios_of },
-		{ shadow_class, 0, &nvbios_ramin },
-		{ shadow_class, 0, &nvbios_rom },
-		{ shadow_class, 0, &nvbios_acpi_fast },
-		{ shadow_class, 4, &nvbios_acpi_slow },
-		{ shadow_class, 1, &nvbios_pcirom },
-		{ shadow_class, 1, &nvbios_platform },
-		{ shadow_class }
-	}, *mthd = mthds, *best = NULL;
+		{ 0, &nvbios_of },
+		{ 0, &nvbios_ramin },
+		{ 0, &nvbios_rom },
+		{ 0, &nvbios_acpi_fast },
+		{ 4, &nvbios_acpi_slow },
+		{ 1, &nvbios_pcirom },
+		{ 1, &nvbios_platform },
+		{}
+	}, *mthd, *best = NULL;
 	const char *optarg;
 	char *source;
 	int optlen;
 
 	/* handle user-specified bios source */
-	optarg = nvkm_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+	optarg = nvkm_stropt(device->cfgopt, "NvBios", &optlen);
 	source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
 	if (source) {
 		/* try to match one of the built-in methods */
@@ -234,7 +192,7 @@
 		}
 
 		if (!best->score) {
-			nv_error(bios, "%s invalid\n", source);
+			nvkm_error(subdev, "%s invalid\n", source);
 			kfree(source);
 			source = NULL;
 		}
@@ -259,12 +217,12 @@
 	}
 
 	if (!best->score) {
-		nv_fatal(bios, "unable to locate usable image\n");
+		nvkm_error(subdev, "unable to locate usable image\n");
 		return -EINVAL;
 	}
 
-	nv_info(bios, "using image from %s\n", best->func ?
-		best->func->name : source);
+	nvkm_debug(subdev, "using image from %s\n", best->func ?
+		   best->func->name : source);
 	bios->data = best->data;
 	bios->size = best->size;
 	kfree(source);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index f9d0eb5..8fecb5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,14 +22,12 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
 #if defined(CONFIG_ACPI) && defined(CONFIG_X86)
 int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
 #else
 static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev)
+nouveau_acpi_rom_supported(struct device *dev)
 {
 	return false;
 }
@@ -90,7 +88,7 @@
 static void *
 acpi_init(struct nvkm_bios *bios, const char *name)
 {
-	if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
+	if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
 		return ERR_PTR(-ENODEV);
 	return NULL;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4c19a7d..bd60d7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -21,8 +21,7 @@
  *
  */
 #include "priv.h"
-
-#include <core/device.h>
+#include <core/pci.h>
 
 #if defined(__powerpc__)
 struct priv {
@@ -44,7 +43,7 @@
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
 	struct device_node *dn;
 	struct priv *priv;
 	if (!(dn = pci_device_to_OF_node(pdev)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 1b04548..9b91da0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -22,7 +22,7 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+#include <core/pci.h>
 
 struct priv {
 	struct pci_dev *pdev;
@@ -53,10 +53,16 @@
 static void *
 pcirom_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct nvkm_device *device = bios->subdev.device;
 	struct priv *priv = NULL;
+	struct pci_dev *pdev;
 	int ret;
 
+	if (device->func->pci)
+		pdev = device->func->pci(device)->pdev;
+	else
+		return ERR_PTR(-ENODEV);
+
 	if (!(ret = pci_enable_rom(pdev))) {
 		if (ret = -ENOMEM,
 		    (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
@@ -85,10 +91,16 @@
 static void *
 platform_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct nvkm_device *device = bios->subdev.device;
+	struct pci_dev *pdev;
 	struct priv *priv;
 	int ret = -ENOMEM;
 
+	if (device->func->pci)
+		pdev = device->func->pci(device)->pdev;
+	else
+		return ERR_PTR(-ENODEV);
+
 	if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
 		if (ret = -ENODEV,
 		    (priv->rom = pci_platform_rom(pdev, &priv->size)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index abe8ae4..0f537c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -22,8 +22,6 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
 struct priv {
 	struct nvkm_bios *bios;
 	u32 bar0;
@@ -32,10 +30,11 @@
 static u32
 pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
+	struct nvkm_device *device = bios->subdev.device;
 	u32 i;
 	if (offset + length <= 0x00100000) {
 		for (i = offset; i < offset + length; i += 4)
-			*(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
+			*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x700000 + i);
 		return length;
 	}
 	return 0;
@@ -46,7 +45,8 @@
 {
 	struct priv *priv = data;
 	if (priv) {
-		nv_wr32(priv->bios, 0x001700, priv->bar0);
+		struct nvkm_device *device = priv->bios->subdev.device;
+		nvkm_wr32(device, 0x001700, priv->bar0);
 		kfree(priv);
 	}
 }
@@ -54,21 +54,23 @@
 static void *
 pramin_init(struct nvkm_bios *bios, const char *name)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct priv *priv = NULL;
 	u64 addr = 0;
 
 	/* PRAMIN always potentially available prior to nv50 */
-	if (nv_device(bios)->card_type < NV_50)
+	if (device->card_type < NV_50)
 		return NULL;
 
 	/* we can't get the bios image pointer without PDISP */
-	if (nv_device(bios)->card_type >= GM100)
-		addr = nv_rd32(bios, 0x021c04);
+	if (device->card_type >= GM100)
+		addr = nvkm_rd32(device, 0x021c04);
 	else
-	if (nv_device(bios)->card_type >= NV_C0)
-		addr = nv_rd32(bios, 0x022500);
+	if (device->card_type >= NV_C0)
+		addr = nvkm_rd32(device, 0x022500);
 	if (addr & 0x00000001) {
-		nv_debug(bios, "... display disabled\n");
+		nvkm_debug(subdev, "... display disabled\n");
 		return ERR_PTR(-ENODEV);
 	}
 
@@ -76,32 +78,32 @@
 	 * important as we don't want to be touching vram on an
 	 * uninitialised board
 	 */
-	addr = nv_rd32(bios, 0x619f04);
+	addr = nvkm_rd32(device, 0x619f04);
 	if (!(addr & 0x00000008)) {
-		nv_debug(bios, "... not enabled\n");
+		nvkm_debug(subdev, "... not enabled\n");
 		return ERR_PTR(-ENODEV);
 	}
 	if ( (addr & 0x00000003) != 1) {
-		nv_debug(bios, "... not in vram\n");
+		nvkm_debug(subdev, "... not in vram\n");
 		return ERR_PTR(-ENODEV);
 	}
 
 	/* some alternate method inherited from xf86-video-nv... */
 	addr = (addr & 0xffffff00) << 8;
 	if (!addr) {
-		addr  = (u64)nv_rd32(bios, 0x001700) << 16;
+		addr  = (u64)nvkm_rd32(device, 0x001700) << 16;
 		addr += 0xf0000;
 	}
 
 	/* modify bar0 PRAMIN window to cover the bios image */
 	if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
-		nv_error(bios, "... out of memory\n");
+		nvkm_error(subdev, "... out of memory\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
 	priv->bios = bios;
-	priv->bar0 = nv_rd32(bios, 0x001700);
-	nv_wr32(bios, 0x001700, addr >> 16);
+	priv->bar0 = nvkm_rd32(device, 0x001700);
+	nvkm_wr32(device, 0x001700, addr >> 16);
 	return priv;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
index 6ec3b23..ffa4b39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
@@ -22,15 +22,16 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+#include <subdev/pci.h>
 
 static u32
 prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
+	struct nvkm_device *device = data;
 	u32 i;
 	if (offset + length <= 0x00100000) {
 		for (i = offset; i < offset + length; i += 4)
-			*(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
+			*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x300000 + i);
 		return length;
 	}
 	return 0;
@@ -39,25 +40,18 @@
 static void
 prom_fini(void *data)
 {
-	struct nvkm_bios *bios = data;
-	if (nv_device(bios)->card_type < NV_50)
-		nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
-	else
-		nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
+	struct nvkm_device *device = data;
+	nvkm_pci_rom_shadow(device->pci, true);
 }
 
 static void *
 prom_init(struct nvkm_bios *bios, const char *name)
 {
-	if (nv_device(bios)->card_type < NV_50) {
-		if (nv_device(bios)->card_type == NV_40 &&
-		    nv_device(bios)->chipset >= 0x4c)
-			return ERR_PTR(-ENODEV);
-		nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
-	} else {
-		nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
-	}
-	return bios;
+	struct nvkm_device *device = bios->subdev.device;
+	if (device->card_type == NV_40 && device->chipset >= 0x4c)
+		return ERR_PTR(-ENODEV);
+	nvkm_pci_rom_shadow(device->pci, false);
+	return device;
 }
 
 const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index 249ff6d..a54cfec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,8 +25,6 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/therm.h>
 
-#include <core/device.h>
-
 static u16
 therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
 {
@@ -35,24 +33,24 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 1)
-			therm = nv_ro16(bios, bit_P.offset + 12);
+			therm = nvbios_rd16(bios, bit_P.offset + 12);
 		else if (bit_P.version == 2)
-			therm = nv_ro16(bios, bit_P.offset + 16);
+			therm = nvbios_rd16(bios, bit_P.offset + 16);
 		else
-			nv_error(bios,
-				"unknown offset for thermal in BIT P %d\n",
-				bit_P.version);
+			nvkm_error(&bios->subdev,
+				   "unknown offset for thermal in BIT P %d\n",
+				   bit_P.version);
 	}
 
 	/* exit now if we haven't found the thermal table */
 	if (!therm)
 		return 0x0000;
 
-	*ver = nv_ro08(bios, therm + 0);
-	*hdr = nv_ro08(bios, therm + 1);
-	*len = nv_ro08(bios, therm + 2);
-	*cnt = nv_ro08(bios, therm + 3);
-	return therm + nv_ro08(bios, therm + 1);
+	*ver = nvbios_rd08(bios, therm + 0);
+	*hdr = nvbios_rd08(bios, therm + 1);
+	*len = nvbios_rd08(bios, therm + 2);
+	*cnt = nvbios_rd08(bios, therm + 3);
+	return therm + nvbios_rd08(bios, therm + 1);
 }
 
 static u16
@@ -83,9 +81,9 @@
 	sensor_section = -1;
 	i = 0;
 	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
-		s16 value = nv_ro16(bios, entry + 1);
+		s16 value = nvbios_rd16(bios, entry + 1);
 
-		switch (nv_ro08(bios, entry + 0)) {
+		switch (nvbios_rd08(bios, entry + 0)) {
 		case 0x0:
 			thrs_section = value;
 			if (value > 0)
@@ -94,7 +92,7 @@
 		case 0x01:
 			sensor_section++;
 			if (sensor_section == 0) {
-				offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
+				offset = ((s8) nvbios_rd08(bios, entry + 2)) / 2;
 				sensor->offset_constant = offset;
 			}
 			break;
@@ -165,9 +163,9 @@
 	fan->nr_fan_trip = 0;
 	fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
 	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
-		s16 value = nv_ro16(bios, entry + 1);
+		s16 value = nvbios_rd16(bios, entry + 1);
 
-		switch (nv_ro08(bios, entry + 0)) {
+		switch (nvbios_rd08(bios, entry + 0)) {
 		case 0x22:
 			fan->min_duty = value & 0xff;
 			fan->max_duty = (value & 0xff00) >> 8;
@@ -198,14 +196,14 @@
 		case 0x46:
 			if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
 				fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
-			fan->linear_min_temp = nv_ro08(bios, entry + 1);
-			fan->linear_max_temp = nv_ro08(bios, entry + 2);
+			fan->linear_min_temp = nvbios_rd08(bios, entry + 1);
+			fan->linear_max_temp = nvbios_rd08(bios, entry + 2);
 			break;
 		}
 	}
 
 	/* starting from fermi, fan management is always linear */
-	if (nv_device(bios)->card_type >= NV_C0 &&
+	if (bios->subdev.device->card_type >= NV_C0 &&
 		fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
 		fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 763fd29..99f6432 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -34,27 +34,27 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 1)
-			timing = nv_ro16(bios, bit_P.offset + 4);
+			timing = nvbios_rd16(bios, bit_P.offset + 4);
 		else
 		if (bit_P.version == 2)
-			timing = nv_ro16(bios, bit_P.offset + 8);
+			timing = nvbios_rd16(bios, bit_P.offset + 8);
 
 		if (timing) {
-			*ver = nv_ro08(bios, timing + 0);
+			*ver = nvbios_rd08(bios, timing + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, timing + 1);
-				*cnt = nv_ro08(bios, timing + 2);
-				*len = nv_ro08(bios, timing + 3);
+				*hdr = nvbios_rd08(bios, timing + 1);
+				*cnt = nvbios_rd08(bios, timing + 2);
+				*len = nvbios_rd08(bios, timing + 3);
 				*snr = 0;
 				*ssz = 0;
 				return timing;
 			case 0x20:
-				*hdr = nv_ro08(bios, timing + 1);
-				*cnt = nv_ro08(bios, timing + 5);
-				*len = nv_ro08(bios, timing + 2);
-				*snr = nv_ro08(bios, timing + 4);
-				*ssz = nv_ro08(bios, timing + 3);
+				*hdr = nvbios_rd08(bios, timing + 1);
+				*cnt = nvbios_rd08(bios, timing + 5);
+				*len = nvbios_rd08(bios, timing + 2);
+				*snr = nvbios_rd08(bios, timing + 4);
+				*ssz = nvbios_rd08(bios, timing + 3);
 				return timing;
 			default:
 				break;
@@ -90,18 +90,20 @@
 	p->timing_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->timing_10_WR    = nv_ro08(bios, data + 0x00);
-		p->timing_10_WTR   = nv_ro08(bios, data + 0x01);
-		p->timing_10_CL    = nv_ro08(bios, data + 0x02);
-		p->timing_10_RC    = nv_ro08(bios, data + 0x03);
-		p->timing_10_RFC   = nv_ro08(bios, data + 0x05);
-		p->timing_10_RAS   = nv_ro08(bios, data + 0x07);
-		p->timing_10_RP    = nv_ro08(bios, data + 0x09);
-		p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
-		p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
-		p->timing_10_RRD   = nv_ro08(bios, data + 0x0c);
-		p->timing_10_13    = nv_ro08(bios, data + 0x0d);
-		p->timing_10_ODT   = nv_ro08(bios, data + 0x0e) & 0x07;
+		p->timing_10_WR    = nvbios_rd08(bios, data + 0x00);
+		p->timing_10_WTR   = nvbios_rd08(bios, data + 0x01);
+		p->timing_10_CL    = nvbios_rd08(bios, data + 0x02);
+		p->timing_10_RC    = nvbios_rd08(bios, data + 0x03);
+		p->timing_10_RFC   = nvbios_rd08(bios, data + 0x05);
+		p->timing_10_RAS   = nvbios_rd08(bios, data + 0x07);
+		p->timing_10_RP    = nvbios_rd08(bios, data + 0x09);
+		p->timing_10_RCDRD = nvbios_rd08(bios, data + 0x0a);
+		p->timing_10_RCDWR = nvbios_rd08(bios, data + 0x0b);
+		p->timing_10_RRD   = nvbios_rd08(bios, data + 0x0c);
+		p->timing_10_13    = nvbios_rd08(bios, data + 0x0d);
+		p->timing_10_ODT   = nvbios_rd08(bios, data + 0x0e) & 0x07;
+		if (p->ramcfg_ver >= 0x10)
+			p->ramcfg_RON = nvbios_rd08(bios, data + 0x0e) & 0x07;
 
 		p->timing_10_24  = 0xff;
 		p->timing_10_21  = 0;
@@ -112,45 +114,45 @@
 
 		switch (min_t(u8, *hdr, 25)) {
 		case 25:
-			p->timing_10_24  = nv_ro08(bios, data + 0x18);
+			p->timing_10_24  = nvbios_rd08(bios, data + 0x18);
 		case 24:
 		case 23:
 		case 22:
-			p->timing_10_21  = nv_ro08(bios, data + 0x15);
+			p->timing_10_21  = nvbios_rd08(bios, data + 0x15);
 		case 21:
-			p->timing_10_20  = nv_ro08(bios, data + 0x14);
+			p->timing_10_20  = nvbios_rd08(bios, data + 0x14);
 		case 20:
-			p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+			p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
 		case 19:
-			p->timing_10_18  = nv_ro08(bios, data + 0x12);
+			p->timing_10_18  = nvbios_rd08(bios, data + 0x12);
 		case 18:
 		case 17:
-			p->timing_10_16  = nv_ro08(bios, data + 0x10);
+			p->timing_10_16  = nvbios_rd08(bios, data + 0x10);
 		}
 
 		break;
 	case 0x20:
-		p->timing[0] = nv_ro32(bios, data + 0x00);
-		p->timing[1] = nv_ro32(bios, data + 0x04);
-		p->timing[2] = nv_ro32(bios, data + 0x08);
-		p->timing[3] = nv_ro32(bios, data + 0x0c);
-		p->timing[4] = nv_ro32(bios, data + 0x10);
-		p->timing[5] = nv_ro32(bios, data + 0x14);
-		p->timing[6] = nv_ro32(bios, data + 0x18);
-		p->timing[7] = nv_ro32(bios, data + 0x1c);
-		p->timing[8] = nv_ro32(bios, data + 0x20);
-		p->timing[9] = nv_ro32(bios, data + 0x24);
-		p->timing[10] = nv_ro32(bios, data + 0x28);
-		p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
-		p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
-		p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
-		p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
-		temp = nv_ro16(bios, data + 0x2c);
+		p->timing[0] = nvbios_rd32(bios, data + 0x00);
+		p->timing[1] = nvbios_rd32(bios, data + 0x04);
+		p->timing[2] = nvbios_rd32(bios, data + 0x08);
+		p->timing[3] = nvbios_rd32(bios, data + 0x0c);
+		p->timing[4] = nvbios_rd32(bios, data + 0x10);
+		p->timing[5] = nvbios_rd32(bios, data + 0x14);
+		p->timing[6] = nvbios_rd32(bios, data + 0x18);
+		p->timing[7] = nvbios_rd32(bios, data + 0x1c);
+		p->timing[8] = nvbios_rd32(bios, data + 0x20);
+		p->timing[9] = nvbios_rd32(bios, data + 0x24);
+		p->timing[10] = nvbios_rd32(bios, data + 0x28);
+		p->timing_20_2e_03 = (nvbios_rd08(bios, data + 0x2e) & 0x03) >> 0;
+		p->timing_20_2e_30 = (nvbios_rd08(bios, data + 0x2e) & 0x30) >> 4;
+		p->timing_20_2e_c0 = (nvbios_rd08(bios, data + 0x2e) & 0xc0) >> 6;
+		p->timing_20_2f_03 = (nvbios_rd08(bios, data + 0x2f) & 0x03) >> 0;
+		temp = nvbios_rd16(bios, data + 0x2c);
 		p->timing_20_2c_003f = (temp & 0x003f) >> 0;
 		p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
-		p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
-		p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
-		temp = nv_ro16(bios, data + 0x31);
+		p->timing_20_30_07 = (nvbios_rd08(bios, data + 0x30) & 0x07) >> 0;
+		p->timing_20_30_f8 = (nvbios_rd08(bios, data + 0x30) & 0xf8) >> 3;
+		temp = nvbios_rd16(bios, data + 0x31);
 		p->timing_20_31_0007 = (temp & 0x0007) >> 0;
 		p->timing_20_31_0078 = (temp & 0x0078) >> 3;
 		p->timing_20_31_0780 = (temp & 0x0780) >> 7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index e95b69f..2f13db7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -33,15 +33,15 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2) {
-			vmap = nv_ro16(bios, bit_P.offset + 0x20);
+			vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
 			if (vmap) {
-				*ver = nv_ro08(bios, vmap + 0);
+				*ver = nvbios_rd08(bios, vmap + 0);
 				switch (*ver) {
 				case 0x10:
 				case 0x20:
-					*hdr = nv_ro08(bios, vmap + 1);
-					*cnt = nv_ro08(bios, vmap + 3);
-					*len = nv_ro08(bios, vmap + 2);
+					*hdr = nvbios_rd08(bios, vmap + 1);
+					*cnt = nvbios_rd08(bios, vmap + 3);
+					*len = nvbios_rd08(bios, vmap + 2);
 					return vmap;
 				default:
 					break;
@@ -88,23 +88,23 @@
 	switch (!!vmap * *ver) {
 	case 0x10:
 		info->link   = 0xff;
-		info->min    = nv_ro32(bios, vmap + 0x00);
-		info->max    = nv_ro32(bios, vmap + 0x04);
-		info->arg[0] = nv_ro32(bios, vmap + 0x08);
-		info->arg[1] = nv_ro32(bios, vmap + 0x0c);
-		info->arg[2] = nv_ro32(bios, vmap + 0x10);
+		info->min    = nvbios_rd32(bios, vmap + 0x00);
+		info->max    = nvbios_rd32(bios, vmap + 0x04);
+		info->arg[0] = nvbios_rd32(bios, vmap + 0x08);
+		info->arg[1] = nvbios_rd32(bios, vmap + 0x0c);
+		info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
 		break;
 	case 0x20:
-		info->unk0   = nv_ro08(bios, vmap + 0x00);
-		info->link   = nv_ro08(bios, vmap + 0x01);
-		info->min    = nv_ro32(bios, vmap + 0x02);
-		info->max    = nv_ro32(bios, vmap + 0x06);
-		info->arg[0] = nv_ro32(bios, vmap + 0x0a);
-		info->arg[1] = nv_ro32(bios, vmap + 0x0e);
-		info->arg[2] = nv_ro32(bios, vmap + 0x12);
-		info->arg[3] = nv_ro32(bios, vmap + 0x16);
-		info->arg[4] = nv_ro32(bios, vmap + 0x1a);
-		info->arg[5] = nv_ro32(bios, vmap + 0x1e);
+		info->unk0   = nvbios_rd08(bios, vmap + 0x00);
+		info->link   = nvbios_rd08(bios, vmap + 0x01);
+		info->min    = nvbios_rd32(bios, vmap + 0x02);
+		info->max    = nvbios_rd32(bios, vmap + 0x06);
+		info->arg[0] = nvbios_rd32(bios, vmap + 0x0a);
+		info->arg[1] = nvbios_rd32(bios, vmap + 0x0e);
+		info->arg[2] = nvbios_rd32(bios, vmap + 0x12);
+		info->arg[3] = nvbios_rd32(bios, vmap + 0x16);
+		info->arg[4] = nvbios_rd32(bios, vmap + 0x1a);
+		info->arg[5] = nvbios_rd32(bios, vmap + 0x1e);
 		break;
 	}
 	return vmap;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 8454ab7..615804c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -33,30 +33,30 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			volt = nv_ro16(bios, bit_P.offset + 0x0c);
+			volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
 		else
 		if (bit_P.version == 1)
-			volt = nv_ro16(bios, bit_P.offset + 0x10);
+			volt = nvbios_rd16(bios, bit_P.offset + 0x10);
 
 		if (volt) {
-			*ver = nv_ro08(bios, volt + 0);
+			*ver = nvbios_rd08(bios, volt + 0);
 			switch (*ver) {
 			case 0x12:
 				*hdr = 5;
-				*cnt = nv_ro08(bios, volt + 2);
-				*len = nv_ro08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 2);
+				*len = nvbios_rd08(bios, volt + 1);
 				return volt;
 			case 0x20:
-				*hdr = nv_ro08(bios, volt + 1);
-				*cnt = nv_ro08(bios, volt + 2);
-				*len = nv_ro08(bios, volt + 3);
+				*hdr = nvbios_rd08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 2);
+				*len = nvbios_rd08(bios, volt + 3);
 				return volt;
 			case 0x30:
 			case 0x40:
 			case 0x50:
-				*hdr = nv_ro08(bios, volt + 1);
-				*cnt = nv_ro08(bios, volt + 3);
-				*len = nv_ro08(bios, volt + 2);
+				*hdr = nvbios_rd08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 3);
+				*len = nvbios_rd08(bios, volt + 2);
 				return volt;
 			}
 		}
@@ -73,28 +73,28 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!volt * *ver) {
 	case 0x12:
-		info->vidmask = nv_ro08(bios, volt + 0x04);
+		info->vidmask = nvbios_rd08(bios, volt + 0x04);
 		break;
 	case 0x20:
-		info->vidmask = nv_ro08(bios, volt + 0x05);
+		info->vidmask = nvbios_rd08(bios, volt + 0x05);
 		break;
 	case 0x30:
-		info->vidmask = nv_ro08(bios, volt + 0x04);
+		info->vidmask = nvbios_rd08(bios, volt + 0x04);
 		break;
 	case 0x40:
-		info->base    = nv_ro32(bios, volt + 0x04);
-		info->step    = nv_ro16(bios, volt + 0x08);
-		info->vidmask = nv_ro08(bios, volt + 0x0b);
+		info->base    = nvbios_rd32(bios, volt + 0x04);
+		info->step    = nvbios_rd16(bios, volt + 0x08);
+		info->vidmask = nvbios_rd08(bios, volt + 0x0b);
 		/*XXX*/
 		info->min     = 0;
 		info->max     = info->base;
 		break;
 	case 0x50:
-		info->vidmask = nv_ro08(bios, volt + 0x06);
-		info->min     = nv_ro32(bios, volt + 0x0a);
-		info->max     = nv_ro32(bios, volt + 0x0e);
-		info->base    = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
-		info->step    = nv_ro16(bios, volt + 0x16);
+		info->vidmask = nvbios_rd08(bios, volt + 0x06);
+		info->min     = nvbios_rd32(bios, volt + 0x0a);
+		info->max     = nvbios_rd32(bios, volt + 0x0e);
+		info->base    = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff;
+		info->step    = nvbios_rd16(bios, volt + 0x16);
 		break;
 	}
 	return volt;
@@ -121,12 +121,12 @@
 	switch (!!volt * *ver) {
 	case 0x12:
 	case 0x20:
-		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
-		info->vid     = nv_ro08(bios, volt + 0x01);
+		info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+		info->vid     = nvbios_rd08(bios, volt + 0x01);
 		break;
 	case 0x30:
-		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
-		info->vid     = nv_ro08(bios, volt + 0x01) >> 2;
+		info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+		info->vid     = nvbios_rd08(bios, volt + 0x01) >> 2;
 		break;
 	case 0x40:
 	case 0x50:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
index 63a5e1b..250fc42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
@@ -30,12 +30,12 @@
 {
 	u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
 	if (data && *ver >= 0x40 && *hdr >= 0x06) {
-		u16 xpio = nv_ro16(bios, data + 0x04);
+		u16 xpio = nvbios_rd16(bios, data + 0x04);
 		if (xpio) {
-			*ver = nv_ro08(bios, data + 0x00);
-			*hdr = nv_ro08(bios, data + 0x01);
-			*cnt = nv_ro08(bios, data + 0x02);
-			*len = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00);
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*cnt = nvbios_rd08(bios, data + 0x02);
+			*len = nvbios_rd08(bios, data + 0x03);
 			return xpio;
 		}
 	}
@@ -48,12 +48,12 @@
 {
 	u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
 	if (data && idx < *cnt) {
-		u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+		u16 xpio = nvbios_rd16(bios, data + *hdr + (idx * *len));
 		if (xpio) {
-			*ver = nv_ro08(bios, data + 0x00);
-			*hdr = nv_ro08(bios, data + 0x01);
-			*cnt = nv_ro08(bios, data + 0x02);
-			*len = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00);
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*cnt = nvbios_rd08(bios, data + 0x02);
+			*len = nvbios_rd08(bios, data + 0x03);
 			return xpio;
 		}
 	}
@@ -66,9 +66,9 @@
 {
 	u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
 	if (data && *len >= 6) {
-		info->type = nv_ro08(bios, data + 0x04);
-		info->addr = nv_ro08(bios, data + 0x05);
-		info->flags = nv_ro08(bios, data + 0x06);
+		info->type = nvbios_rd08(bios, data + 0x04);
+		info->addr = nvbios_rd08(bios, data + 0x05);
+		info->flags = nvbios_rd08(bios, data + 0x06);
 	}
 	return 0x0000;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
index 83d80b1..5fa9e91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
@@ -1,3 +1,4 @@
+nvkm-y += nvkm/subdev/bus/base.o
 nvkm-y += nvkm/subdev/bus/hwsq.o
 nvkm-y += nvkm/subdev/bus/nv04.o
 nvkm-y += nvkm/subdev/bus/nv31.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
new file mode 100644
index 0000000..dc5a10f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+nvkm_bus_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_bus *bus = nvkm_bus(subdev);
+	bus->func->intr(bus);
+}
+
+static int
+nvkm_bus_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_bus *bus = nvkm_bus(subdev);
+	bus->func->init(bus);
+	return 0;
+}
+
+static void *
+nvkm_bus_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_bus(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_bus = {
+	.dtor = nvkm_bus_dtor,
+	.init = nvkm_bus_init,
+	.intr = nvkm_bus_intr,
+};
+
+int
+nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_bus **pbus)
+{
+	struct nvkm_bus *bus;
+	if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_bus, device, index, 0, &bus->subdev);
+	bus->func = func;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index cbe699e..9700b5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -22,37 +22,43 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 #include <subdev/timer.h>
 
 static int
-g94_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+g94_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
 {
-	struct nv50_bus_priv *priv = (void *)pbus;
+	struct nvkm_device *device = bus->subdev.device;
 	int i;
 
-	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(pbus, 0x001304, 0x00000000);
-	nv_wr32(pbus, 0x001318, 0x00000000);
+	nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nvkm_wr32(device, 0x001304, 0x00000000);
+	nvkm_wr32(device, 0x001318, 0x00000000);
 	for (i = 0; i < size; i++)
-		nv_wr32(priv, 0x080000 + (i * 4), data[i]);
-	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
-	nv_wr32(pbus, 0x00130c, 0x00000001);
+		nvkm_wr32(device, 0x080000 + (i * 4), data[i]);
+	nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+	nvkm_wr32(device, 0x00130c, 0x00000001);
 
-	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+			break;
+	) < 0)
+		return -ETIMEDOUT;
+
+	return 0;
 }
 
-struct nvkm_oclass *
-g94_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv50_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+g94_bus = {
+	.init = nv50_bus_init,
 	.intr = nv50_bus_intr,
 	.hwsq_exec = g94_bus_hwsq_exec,
 	.hwsq_size = 128,
-}.base;
+};
+
+int
+g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&g94_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index ebc63ba..e0930d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -22,59 +22,54 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 static void
-gf100_bus_intr(struct nvkm_subdev *subdev)
+gf100_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
 
 	if (stat & 0x0000000e) {
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
 
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc),
-			 (stat & 0x00000002) ? "!ENGINE " : "",
-			 (stat & 0x00000004) ? "IBUS " : "",
-			 (stat & 0x00000008) ? "TIMEOUT " : "");
+		nvkm_error(subdev,
+			   "MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc),
+			   (stat & 0x00000002) ? "!ENGINE " : "",
+			   (stat & 0x00000004) ? "IBUS " : "",
+			   (stat & 0x00000008) ? "TIMEOUT " : "");
 
-		nv_wr32(pbus, 0x009084, 0x00000000);
-		nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+		nvkm_wr32(device, 0x009084, 0x00000000);
+		nvkm_wr32(device, 0x001100, (stat & 0x0000000e));
 		stat &= ~0x0000000e;
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-gf100_bus_init(struct nvkm_object *object)
+static void
+gf100_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x0000000e);
-	return 0;
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x0000000e);
 }
 
-struct nvkm_oclass *
-gf100_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = gf100_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+gf100_bus = {
+	.init = gf100_bus_init,
 	.intr = gf100_bus_intr,
-}.base;
+};
+
+int
+gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&gf100_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index 7622b41..79f1cf5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include <subdev/bus.h>
+#include "priv.h"
 
 struct nvkm_hwsq {
-	struct nvkm_bus *pbus;
+	struct nvkm_subdev *subdev;
 	u32 addr;
 	u32 data;
 	struct {
@@ -41,13 +41,13 @@
 }
 
 int
-nvkm_hwsq_init(struct nvkm_bus *pbus, struct nvkm_hwsq **phwsq)
+nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
 {
 	struct nvkm_hwsq *hwsq;
 
 	hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
 	if (hwsq) {
-		hwsq->pbus = pbus;
+		hwsq->subdev = subdev;
 		hwsq->addr = ~0;
 		hwsq->data = ~0;
 		memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
@@ -63,21 +63,23 @@
 	struct nvkm_hwsq *hwsq = *phwsq;
 	int ret = 0, i;
 	if (hwsq) {
-		struct nvkm_bus *pbus = hwsq->pbus;
+		struct nvkm_subdev *subdev = hwsq->subdev;
+		struct nvkm_bus *bus = subdev->device->bus;
 		hwsq->c.size = (hwsq->c.size + 4) / 4;
-		if (hwsq->c.size <= pbus->hwsq_size) {
+		if (hwsq->c.size <= bus->func->hwsq_size) {
 			if (exec)
-				ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
-						      hwsq->c.size);
+				ret = bus->func->hwsq_exec(bus,
+							   (u32 *)hwsq->c.data,
+								  hwsq->c.size);
 			if (ret)
-				nv_error(pbus, "hwsq exec failed: %d\n", ret);
+				nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
 		} else {
-			nv_error(pbus, "hwsq ucode too large\n");
+			nvkm_error(subdev, "hwsq ucode too large\n");
 			ret = -ENOSPC;
 		}
 
 		for (i = 0; ret && i < hwsq->c.size; i++)
-			nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
+			nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
 
 		*phwsq = NULL;
 		kfree(hwsq);
@@ -88,7 +90,7 @@
 void
 nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
 {
-	nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
+	nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
 
 	if (hwsq->data != data) {
 		if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
@@ -113,7 +115,7 @@
 void
 nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
 {
-	nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
+	nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
 	flag += 0x80;
 	if (data >= 0)
 		flag += 0x20;
@@ -125,7 +127,7 @@
 void
 nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
 {
-	nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
+	nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
 	hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
 }
 
@@ -138,6 +140,6 @@
 		shift++;
 	}
 
-	nv_debug(hwsq->pbus, "    DELAY = %d ns\n", nsec);
+	nvkm_debug(hwsq->subdev, "    DELAY = %d ns\n", nsec);
 	hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index ebf709c..8117ec5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -59,10 +59,9 @@
 static inline int
 hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
 	int ret;
 
-	ret = nvkm_hwsq_init(pbus, &ram->hwsq);
+	ret = nvkm_hwsq_init(subdev, &ram->hwsq);
 	if (ret)
 		return ret;
 
@@ -85,8 +84,9 @@
 static inline u32
 hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
 {
+	struct nvkm_device *device = ram->subdev->device;
 	if (reg->sequence != ram->sequence)
-		reg->data = nv_rd32(ram->subdev, reg->addr);
+		reg->data = nvkm_rd32(device, reg->addr);
 	return reg->data;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 19c8e50..c80b967 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -22,73 +22,55 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+
+#include <subdev/gpio.h>
 
 static void
-nv04_bus_intr(struct nvkm_subdev *subdev)
+nv04_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
 
 	if (stat & 0x00000001) {
-		nv_error(pbus, "BUS ERROR\n");
+		nvkm_error(subdev, "BUS ERROR\n");
 		stat &= ~0x00000001;
-		nv_wr32(pbus, 0x001100, 0x00000001);
+		nvkm_wr32(device, 0x001100, 0x00000001);
 	}
 
 	if (stat & 0x00000110) {
-		subdev = nvkm_subdev(subdev, NVDEV_SUBDEV_GPIO);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_gpio *gpio = device->gpio;
+		if (gpio)
+			nvkm_subdev_intr(&gpio->subdev);
 		stat &= ~0x00000110;
-		nv_wr32(pbus, 0x001100, 0x00000110);
+		nvkm_wr32(device, 0x001100, 0x00000110);
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-nv04_bus_init(struct nvkm_object *object)
+static void
+nv04_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00000111);
-
-	return nvkm_bus_init(&priv->base);
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00000111);
 }
 
+static const struct nvkm_bus_func
+nv04_bus = {
+	.init = nv04_bus_init,
+	.intr = nv04_bus_intr,
+};
+
 int
-nv04_bus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
 {
-	struct nv04_bus_impl *impl = (void *)oclass;
-	struct nv04_bus_priv *priv;
-	int ret;
-
-	ret = nvkm_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = impl->intr;
-	priv->base.hwsq_exec = impl->hwsq_exec;
-	priv->base.hwsq_size = impl->hwsq_size;
-	return 0;
+	return nvkm_bus_new_(&nv04_bus, device, index, pbus);
 }
-
-struct nvkm_oclass *
-nv04_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv04_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
-	.intr = nv04_bus_intr,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
deleted file mode 100644
index 3ddc8f9..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __NVKM_BUS_NV04_H__
-#define __NVKM_BUS_NV04_H__
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
-	struct nvkm_bus base;
-};
-
-int  nv04_bus_ctor(struct nvkm_object *, struct nvkm_object *,
-		   struct nvkm_oclass *, void *, u32,
-		   struct nvkm_object **);
-int  nv50_bus_init(struct nvkm_object *);
-void nv50_bus_intr(struct nvkm_subdev *);
-
-struct nv04_bus_impl {
-	struct nvkm_oclass base;
-	void (*intr)(struct nvkm_subdev *);
-	int  (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
-	u32  hwsq_size;
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index c5739bce..5153d89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -22,70 +22,67 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+#include <subdev/therm.h>
 
 static void
-nv31_bus_intr(struct nvkm_subdev *subdev)
+nv31_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
-	u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
+	u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144);
 
 	if (gpio) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_GPIO);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_gpio *gpio = device->gpio;
+		if (gpio)
+			nvkm_subdev_intr(&gpio->subdev);
 	}
 
 	if (stat & 0x00000008) {  /* NV41- */
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
 
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc));
+		nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc));
 
 		stat &= ~0x00000008;
-		nv_wr32(pbus, 0x001100, 0x00000008);
+		nvkm_wr32(device, 0x001100, 0x00000008);
 	}
 
 	if (stat & 0x00070000) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_therm *therm = device->therm;
+		if (therm)
+			nvkm_subdev_intr(&therm->subdev);
 		stat &= ~0x00070000;
-		nv_wr32(pbus, 0x001100, 0x00070000);
+		nvkm_wr32(device, 0x001100, 0x00070000);
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-nv31_bus_init(struct nvkm_object *object)
+static void
+nv31_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00070008);
-	return 0;
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00070008);
 }
 
-struct nvkm_oclass *
-nv31_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x31),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv31_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+nv31_bus = {
+	.init = nv31_bus_init,
 	.intr = nv31_bus_intr,
-}.base;
+};
+
+int
+nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&nv31_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 1987863..19e10fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -22,83 +22,84 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
+#include <subdev/therm.h>
 #include <subdev/timer.h>
 
 static int
-nv50_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+nv50_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
 {
-	struct nv50_bus_priv *priv = (void *)pbus;
+	struct nvkm_device *device = bus->subdev.device;
 	int i;
 
-	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(pbus, 0x001304, 0x00000000);
+	nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nvkm_wr32(device, 0x001304, 0x00000000);
 	for (i = 0; i < size; i++)
-		nv_wr32(priv, 0x001400 + (i * 4), data[i]);
-	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
-	nv_wr32(pbus, 0x00130c, 0x00000003);
+		nvkm_wr32(device, 0x001400 + (i * 4), data[i]);
+	nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+	nvkm_wr32(device, 0x00130c, 0x00000003);
 
-	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
-}
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+			break;
+	) < 0)
+		return -ETIMEDOUT;
 
-void
-nv50_bus_intr(struct nvkm_subdev *subdev)
-{
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
-
-	if (stat & 0x00000008) {
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
-
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc));
-
-		stat &= ~0x00000008;
-		nv_wr32(pbus, 0x001100, 0x00000008);
-	}
-
-	if (stat & 0x00010000) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
-		stat &= ~0x00010000;
-		nv_wr32(pbus, 0x001100, 0x00010000);
-	}
-
-	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0);
-	}
-}
-
-int
-nv50_bus_init(struct nvkm_object *object)
-{
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00010008);
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv50_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+void
+nv50_bus_intr(struct nvkm_bus *bus)
+{
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
+
+	if (stat & 0x00000008) {
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
+
+		nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc));
+
+		stat &= ~0x00000008;
+		nvkm_wr32(device, 0x001100, 0x00000008);
+	}
+
+	if (stat & 0x00010000) {
+		struct nvkm_therm *therm = device->therm;
+		if (therm)
+			nvkm_subdev_intr(&therm->subdev);
+		stat &= ~0x00010000;
+		nvkm_wr32(device, 0x001100, 0x00010000);
+	}
+
+	if (stat) {
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0);
+	}
+}
+
+void
+nv50_bus_init(struct nvkm_bus *bus)
+{
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00010008);
+}
+
+static const struct nvkm_bus_func
+nv50_bus = {
+	.init = nv50_bus_init,
 	.intr = nv50_bus_intr,
 	.hwsq_exec = nv50_bus_hwsq_exec,
 	.hwsq_size = 64,
-}.base;
+};
+
+int
+nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&nv50_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
new file mode 100644
index 0000000..a130f2c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_BUS_PRIV_H__
+#define __NVKM_BUS_PRIV_H__
+#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
+#include <subdev/bus.h>
+
+struct nvkm_bus_func {
+	void (*init)(struct nvkm_bus *);
+	void (*intr)(struct nvkm_bus *);
+	int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
+	u32 hwsq_size;
+};
+
+int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
+		  struct nvkm_bus **);
+
+void nv50_bus_init(struct nvkm_bus *);
+void nv50_bus_intr(struct nvkm_bus *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index 9c2f688..ed7717b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,5 +8,6 @@
 nvkm-y += nvkm/subdev/clk/gf100.o
 nvkm-y += nvkm/subdev/clk/gk104.o
 nvkm-y += nvkm/subdev/clk/gk20a.o
+
 nvkm-y += nvkm/subdev/clk/pllnv04.o
 nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 39a83d8..dc8682c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -21,7 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#include "priv.h"
+
 #include <subdev/bios.h>
 #include <subdev/bios/boost.h>
 #include <subdev/bios/cstep.h>
@@ -30,7 +31,6 @@
 #include <subdev/therm.h>
 #include <subdev/volt.h>
 
-#include <core/device.h>
 #include <core/option.h>
 
 /******************************************************************************
@@ -40,7 +40,7 @@
 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
 		u8 pstate, u8 domain, u32 input)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
+	struct nvkm_bios *bios = clk->subdev.device->bios;
 	struct nvbios_boostE boostE;
 	u8  ver, hdr, cnt, len;
 	u16 data;
@@ -77,8 +77,10 @@
 static int
 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 {
-	struct nvkm_therm *ptherm = nvkm_therm(clk);
-	struct nvkm_volt *volt = nvkm_volt(clk);
+	struct nvkm_subdev *subdev = &clk->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_therm *therm = device->therm;
+	struct nvkm_volt *volt = device->volt;
 	struct nvkm_cstate *cstate;
 	int ret;
 
@@ -88,41 +90,41 @@
 		cstate = &pstate->base;
 	}
 
-	if (ptherm) {
-		ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, +1);
+	if (therm) {
+		ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
 		if (ret && ret != -ENODEV) {
-			nv_error(clk, "failed to raise fan speed: %d\n", ret);
+			nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
 			return ret;
 		}
 	}
 
 	if (volt) {
-		ret = volt->set_id(volt, cstate->voltage, +1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
 		if (ret && ret != -ENODEV) {
-			nv_error(clk, "failed to raise voltage: %d\n", ret);
+			nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
 			return ret;
 		}
 	}
 
-	ret = clk->calc(clk, cstate);
+	ret = clk->func->calc(clk, cstate);
 	if (ret == 0) {
-		ret = clk->prog(clk);
-		clk->tidy(clk);
+		ret = clk->func->prog(clk);
+		clk->func->tidy(clk);
 	}
 
 	if (volt) {
-		ret = volt->set_id(volt, cstate->voltage, -1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
 		if (ret && ret != -ENODEV)
-			nv_error(clk, "failed to lower voltage: %d\n", ret);
+			nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
 	}
 
-	if (ptherm) {
-		ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, -1);
+	if (therm) {
+		ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
 		if (ret && ret != -ENODEV)
-			nv_error(clk, "failed to lower fan speed: %d\n", ret);
+			nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
 	}
 
-	return 0;
+	return ret;
 }
 
 static void
@@ -135,8 +137,8 @@
 static int
 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
-	struct nvkm_domain *domain = clk->domains;
+	struct nvkm_bios *bios = clk->subdev.device->bios;
+	const struct nvkm_domain *domain = clk->domains;
 	struct nvkm_cstate *cstate = NULL;
 	struct nvbios_cstepX cstepX;
 	u8  ver, hdr;
@@ -172,7 +174,8 @@
 static int
 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
 {
-	struct nvkm_fb *pfb = nvkm_fb(clk);
+	struct nvkm_subdev *subdev = &clk->subdev;
+	struct nvkm_ram *ram = subdev->device->fb->ram;
 	struct nvkm_pstate *pstate;
 	int ret, idx = 0;
 
@@ -181,17 +184,17 @@
 			break;
 	}
 
-	nv_debug(clk, "setting performance state %d\n", pstatei);
+	nvkm_debug(subdev, "setting performance state %d\n", pstatei);
 	clk->pstate = pstatei;
 
-	if (pfb->ram && pfb->ram->calc) {
+	if (ram && ram->func->calc) {
 		int khz = pstate->base.domain[nv_clk_src_mem];
 		do {
-			ret = pfb->ram->calc(pfb, khz);
+			ret = ram->func->calc(ram, khz);
 			if (ret == 0)
-				ret = pfb->ram->prog(pfb);
+				ret = ram->func->prog(ram);
 		} while (ret > 0);
-		pfb->ram->tidy(pfb);
+		ram->func->tidy(ram);
 	}
 
 	return nvkm_cstate_prog(clk, pstate, 0);
@@ -201,31 +204,32 @@
 nvkm_pstate_work(struct work_struct *work)
 {
 	struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
+	struct nvkm_subdev *subdev = &clk->subdev;
 	int pstate;
 
 	if (!atomic_xchg(&clk->waiting, 0))
 		return;
 	clk->pwrsrc = power_supply_is_system_supplied();
 
-	nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
-		 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
-		 clk->astate, clk->tstate, clk->dstate);
+	nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+		   clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
+		   clk->astate, clk->tstate, clk->dstate);
 
 	pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
 	if (clk->state_nr && pstate != -1) {
 		pstate = (pstate < 0) ? clk->astate : pstate;
-		pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+		pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
 		pstate = max(pstate, clk->dstate);
 	} else {
 		pstate = clk->pstate = -1;
 	}
 
-	nv_trace(clk, "-> %d\n", pstate);
+	nvkm_trace(subdev, "-> %d\n", pstate);
 	if (pstate != clk->pstate) {
 		int ret = nvkm_pstate_prog(clk, pstate);
 		if (ret) {
-			nv_error(clk, "error setting pstate %d: %d\n",
-				 pstate, ret);
+			nvkm_error(subdev, "error setting pstate %d: %d\n",
+				   pstate, ret);
 		}
 	}
 
@@ -246,8 +250,9 @@
 static void
 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
 {
-	struct nvkm_domain *clock = clk->domains - 1;
+	const struct nvkm_domain *clock = clk->domains - 1;
 	struct nvkm_cstate *cstate;
+	struct nvkm_subdev *subdev = &clk->subdev;
 	char info[3][32] = { "", "", "" };
 	char name[4] = "--";
 	int i = -1;
@@ -261,12 +266,12 @@
 		if (hi == 0)
 			continue;
 
-		nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+		nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
 		list_for_each_entry(cstate, &pstate->list, head) {
 			u32 freq = cstate->domain[clock->name];
 			lo = min(lo, freq);
 			hi = max(hi, freq);
-			nv_debug(clk, "%10d KHz\n", freq);
+			nvkm_debug(subdev, "%10d KHz\n", freq);
 		}
 
 		if (clock->mname && ++i < ARRAY_SIZE(info)) {
@@ -282,7 +287,7 @@
 		}
 	}
 
-	nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+	nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
 }
 
 static void
@@ -301,8 +306,8 @@
 static int
 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
-	struct nvkm_domain *domain = clk->domains - 1;
+	struct nvkm_bios *bios = clk->subdev.device->bios;
+	const struct nvkm_domain *domain = clk->domains - 1;
 	struct nvkm_pstate *pstate;
 	struct nvkm_cstate *cstate;
 	struct nvbios_cstepE cstepE;
@@ -471,32 +476,37 @@
  *****************************************************************************/
 
 int
-_nvkm_clk_fini(struct nvkm_object *object, bool suspend)
+nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 {
-	struct nvkm_clk *clk = (void *)object;
-	nvkm_notify_put(&clk->pwrsrc_ntfy);
-	return nvkm_subdev_fini(&clk->base, suspend);
+	return clk->func->read(clk, src);
 }
 
-int
-_nvkm_clk_init(struct nvkm_object *object)
+static int
+nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_clk *clk = (void *)object;
-	struct nvkm_domain *clock = clk->domains;
-	int ret;
+	struct nvkm_clk *clk = nvkm_clk(subdev);
+	nvkm_notify_put(&clk->pwrsrc_ntfy);
+	flush_work(&clk->work);
+	if (clk->func->fini)
+		clk->func->fini(clk);
+	return 0;
+}
 
-	ret = nvkm_subdev_init(&clk->base);
-	if (ret)
-		return ret;
+static int
+nvkm_clk_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_clk *clk = nvkm_clk(subdev);
+	const struct nvkm_domain *clock = clk->domains;
+	int ret;
 
 	memset(&clk->bstate, 0x00, sizeof(clk->bstate));
 	INIT_LIST_HEAD(&clk->bstate.list);
 	clk->bstate.pstate = 0xff;
 
 	while (clock->name != nv_clk_src_max) {
-		ret = clk->read(clk, clock->name);
+		ret = nvkm_clk_read(clk, clock->name);
 		if (ret < 0) {
-			nv_error(clk, "%02x freq unknown\n", clock->name);
+			nvkm_error(subdev, "%02x freq unknown\n", clock->name);
 			return ret;
 		}
 		clk->bstate.base.domain[clock->name] = ret;
@@ -505,6 +515,9 @@
 
 	nvkm_pstate_info(clk, &clk->bstate);
 
+	if (clk->func->init)
+		return clk->func->init(clk);
+
 	clk->astate = clk->state_nr - 1;
 	clk->tstate = 0;
 	clk->dstate = 0;
@@ -513,61 +526,63 @@
 	return 0;
 }
 
-void
-_nvkm_clk_dtor(struct nvkm_object *object)
+static void *
+nvkm_clk_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_clk *clk = (void *)object;
+	struct nvkm_clk *clk = nvkm_clk(subdev);
 	struct nvkm_pstate *pstate, *temp;
 
 	nvkm_notify_fini(&clk->pwrsrc_ntfy);
 
+	/* Early return if the pstates have been provided statically */
+	if (clk->func->pstates)
+		return clk;
+
 	list_for_each_entry_safe(pstate, temp, &clk->states, head) {
 		nvkm_pstate_del(pstate);
 	}
 
-	nvkm_subdev_destroy(&clk->base);
+	return clk;
 }
 
+static const struct nvkm_subdev_func
+nvkm_clk = {
+	.dtor = nvkm_clk_dtor,
+	.init = nvkm_clk_init,
+	.fini = nvkm_clk_fini,
+};
+
 int
-nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, struct nvkm_domain *clocks,
-		 struct nvkm_pstate *pstates, int nb_pstates,
-		 bool allow_reclock, int length, void **object)
+nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk *clk)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_clk *clk;
 	int ret, idx, arglen;
 	const char *mode;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "CLK",
-				  "clock", length, object);
-	clk = *object;
-	if (ret)
-		return ret;
-
+	nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
+	clk->func = func;
 	INIT_LIST_HEAD(&clk->states);
-	clk->domains = clocks;
+	clk->domains = func->domains;
 	clk->ustate_ac = -1;
 	clk->ustate_dc = -1;
+	clk->allow_reclock = allow_reclock;
 
 	INIT_WORK(&clk->work, nvkm_pstate_work);
 	init_waitqueue_head(&clk->wait);
 	atomic_set(&clk->waiting, 0);
 
 	/* If no pstates are provided, try and fetch them from the BIOS */
-	if (!pstates) {
+	if (!func->pstates) {
 		idx = 0;
 		do {
 			ret = nvkm_pstate_new(clk, idx++);
 		} while (ret == 0);
 	} else {
-		for (idx = 0; idx < nb_pstates; idx++)
-			list_add_tail(&pstates[idx].head, &clk->states);
-		clk->state_nr = nb_pstates;
+		for (idx = 0; idx < func->nr_pstates; idx++)
+			list_add_tail(&func->pstates[idx].head, &clk->states);
+		clk->state_nr = func->nr_pstates;
 	}
 
-	clk->allow_reclock = allow_reclock;
-
 	ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
 			       NULL, 0, 0, &clk->pwrsrc_ntfy);
 	if (ret)
@@ -589,3 +604,12 @@
 
 	return 0;
 }
+
+int
+nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk **pclk)
+{
+	if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index 4c90b97..347da9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -23,25 +23,26 @@
  */
 #include "nv50.h"
 
-static struct nvkm_domain
-g84_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_vdec   , 0xff },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+g84_clk = {
+	.read = nv50_clk_read,
+	.calc = nv50_clk_calc,
+	.prog = nv50_clk_prog,
+	.tidy = nv50_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_vdec   , 0xff },
+		{ nv_clk_src_max }
+	}
 };
 
-struct nvkm_oclass *
-g84_clk_oclass = &(struct nv50_clk_oclass) {
-	.base.handle = NV_SUBDEV(CLK, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-	.domains = g84_domains,
-}.base;
+int
+g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	return nv50_clk_new_(&g84_clk, device, index,
+			     (device->chipset == 0xa0), pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 3d7330d..a52b7e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define gf100_clk(p) container_of((p), struct gf100_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
@@ -38,29 +38,29 @@
 	u32 coef;
 };
 
-struct gf100_clk_priv {
+struct gf100_clk {
 	struct nvkm_clk base;
 	struct gf100_clk_info eng[16];
 };
 
-static u32 read_div(struct gf100_clk_priv *, int, u32, u32);
+static u32 read_div(struct gf100_clk *, int, u32, u32);
 
 static u32
-read_vco(struct gf100_clk_priv *priv, u32 dsrc)
+read_vco(struct gf100_clk *clk, u32 dsrc)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 ssrc = nv_rd32(priv, dsrc);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc);
 	if (!(ssrc & 0x00000100))
-		return clk->read(clk, nv_clk_src_sppll0);
-	return clk->read(clk, nv_clk_src_sppll1);
+		return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
+	return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
 }
 
 static u32
-read_pll(struct gf100_clk_priv *priv, u32 pll)
+read_pll(struct gf100_clk *clk, u32 pll)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 ctrl = nv_rd32(priv, pll + 0x00);
-	u32 coef = nv_rd32(priv, pll + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0x00);
+	u32 coef = nvkm_rd32(device, pll + 0x04);
 	u32 P = (coef & 0x003f0000) >> 16;
 	u32 N = (coef & 0x0000ff00) >> 8;
 	u32 M = (coef & 0x000000ff) >> 0;
@@ -72,20 +72,20 @@
 	switch (pll) {
 	case 0x00e800:
 	case 0x00e820:
-		sclk = nv_device(priv)->crystal;
+		sclk = device->crystal;
 		P = 1;
 		break;
 	case 0x132000:
-		sclk = clk->read(clk, nv_clk_src_mpllsrc);
+		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
 		break;
 	case 0x132020:
-		sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
 		break;
 	case 0x137000:
 	case 0x137020:
 	case 0x137040:
 	case 0x1370e0:
-		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
 		break;
 	default:
 		return 0;
@@ -95,46 +95,48 @@
 }
 
 static u32
-read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
 	switch (ssrc & 0x00000003) {
 	case 0:
 		if ((ssrc & 0x00030000) != 0x00030000)
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 108000;
 	case 2:
 		return 100000;
 	case 3:
 		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sclk = read_vco(clk, dsrc + (doff * 4));
 			u32 sdiv = (sctl & 0x0000003f) + 2;
 			return (sclk * 2) / sdiv;
 		}
 
-		return read_vco(priv, dsrc + (doff * 4));
+		return read_vco(clk, dsrc + (doff * 4));
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gf100_clk_priv *priv, int clk)
+read_clk(struct gf100_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
-	u32 ssel = nv_rd32(priv, 0x137100);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
+	u32 ssel = nvkm_rd32(device, 0x137100);
 	u32 sclk, sdiv;
 
-	if (ssel & (1 << clk)) {
-		if (clk < 7)
-			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+	if (ssel & (1 << idx)) {
+		if (idx < 7)
+			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
 		else
-			sclk = read_pll(priv, 0x1370e0);
+			sclk = read_pll(clk, 0x1370e0);
 		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
 	} else {
-		sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+		sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
 	}
 
@@ -145,10 +147,11 @@
 }
 
 static int
-gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nvkm_device *device = nv_device(clk);
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
@@ -156,47 +159,47 @@
 	case nv_clk_src_href:
 		return 100000;
 	case nv_clk_src_sppll0:
-		return read_pll(priv, 0x00e800);
+		return read_pll(clk, 0x00e800);
 	case nv_clk_src_sppll1:
-		return read_pll(priv, 0x00e820);
+		return read_pll(clk, 0x00e820);
 
 	case nv_clk_src_mpllsrcref:
-		return read_div(priv, 0, 0x137320, 0x137330);
+		return read_div(clk, 0, 0x137320, 0x137330);
 	case nv_clk_src_mpllsrc:
-		return read_pll(priv, 0x132020);
+		return read_pll(clk, 0x132020);
 	case nv_clk_src_mpll:
-		return read_pll(priv, 0x132000);
+		return read_pll(clk, 0x132000);
 	case nv_clk_src_mdiv:
-		return read_div(priv, 0, 0x137300, 0x137310);
+		return read_div(clk, 0, 0x137300, 0x137310);
 	case nv_clk_src_mem:
-		if (nv_rd32(priv, 0x1373f0) & 0x00000002)
-			return clk->read(clk, nv_clk_src_mpll);
-		return clk->read(clk, nv_clk_src_mdiv);
+		if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
+			return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
+		return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
 
 	case nv_clk_src_gpc:
-		return read_clk(priv, 0x00);
+		return read_clk(clk, 0x00);
 	case nv_clk_src_rop:
-		return read_clk(priv, 0x01);
+		return read_clk(clk, 0x01);
 	case nv_clk_src_hubk07:
-		return read_clk(priv, 0x02);
+		return read_clk(clk, 0x02);
 	case nv_clk_src_hubk06:
-		return read_clk(priv, 0x07);
+		return read_clk(clk, 0x07);
 	case nv_clk_src_hubk01:
-		return read_clk(priv, 0x08);
+		return read_clk(clk, 0x08);
 	case nv_clk_src_copy:
-		return read_clk(priv, 0x09);
+		return read_clk(clk, 0x09);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x0c);
+		return read_clk(clk, 0x0c);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x0e);
+		return read_clk(clk, 0x0e);
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static u32
-calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
 {
 	u32 div = min((ref * 2) / freq, (u32)65);
 	if (div < 2)
@@ -207,7 +210,7 @@
 }
 
 static u32
-calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
 {
 	u32 sclk;
 
@@ -229,28 +232,29 @@
 	}
 
 	/* otherwise, calculate the closest divider */
-	sclk = read_vco(priv, 0x137160 + (clk * 4));
-	if (clk < 7)
-		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	sclk = read_vco(clk, 0x137160 + (idx * 4));
+	if (idx < 7)
+		sclk = calc_div(clk, idx, sclk, freq, ddiv);
 	return sclk;
 }
 
 static u32
-calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll limits;
 	int N, M, P, ret;
 
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
 	if (ret)
 		return 0;
 
-	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
 	if (!limits.refclk)
 		return 0;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
 	if (ret <= 0)
 		return 0;
 
@@ -259,10 +263,9 @@
 }
 
 static int
-calc_clk(struct gf100_clk_priv *priv,
-	 struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
+	struct gf100_clk_info *info = &clk->eng[idx];
 	u32 freq = cstate->domain[dom];
 	u32 src0, div0, div1D, div1P = 0;
 	u32 clk0, clk1 = 0;
@@ -272,16 +275,16 @@
 		return 0;
 
 	/* first possible path, using only dividers */
-	clk0 = calc_src(priv, clk, freq, &src0, &div0);
-	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+	clk0 = calc_src(clk, idx, freq, &src0, &div0);
+	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
 
 	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x00004387 & (1 << clk))) {
-		if (clk <= 7)
-			clk1 = calc_pll(priv, clk, freq, &info->coef);
+	if (clk0 != freq && (0x00004387 & (1 << idx))) {
+		if (idx <= 7)
+			clk1 = calc_pll(clk, idx, freq, &info->coef);
 		else
 			clk1 = cstate->domain[nv_clk_src_hubk06];
-		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
 	}
 
 	/* select the method which gets closest to target freq */
@@ -303,7 +306,7 @@
 			info->mdiv |= 0x80000000;
 			info->mdiv |= div1P << 8;
 		}
-		info->ssel = (1 << clk);
+		info->ssel = (1 << idx);
 		info->freq = clk1;
 	}
 
@@ -311,81 +314,96 @@
 }
 
 static int
-gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
-	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
-	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
-	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
-	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
-	    (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
-	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
-	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
+	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
 		return ret;
 
 	return 0;
 }
 
 static void
-gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_0(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	if (clk < 7 && !info->ssel) {
-		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
-		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	if (idx < 7 && !info->ssel) {
+		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
+		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
 	}
 }
 
 static void
-gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_1(struct gf100_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+			break;
+	);
 }
 
 static void
-gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_2(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	const u32 addr = 0x137000 + (clk * 0x20);
-	if (clk <= 7) {
-		nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
-		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 addr = 0x137000 + (idx * 0x20);
+	if (idx <= 7) {
+		nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
 		if (info->coef) {
-			nv_wr32(priv, addr + 0x04, info->coef);
-			nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
-			nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
-			nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+			nvkm_wr32(device, addr + 0x04, info->coef);
+			nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+			nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+					break;
+			);
+			nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
 		}
 	}
 }
 
 static void
-gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_3(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
-		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+			if (tmp == info->ssel)
+				break;
+		);
 	}
 }
 
 static void
-gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_4(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
 }
 
 static int
-gf100_clk_prog(struct nvkm_clk *clk)
+gf100_clk_prog(struct nvkm_clk *base)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
 	struct {
-		void (*exec)(struct gf100_clk_priv *, int);
+		void (*exec)(struct gf100_clk *, int);
 	} stage[] = {
 		{ gf100_clk_prog_0 }, /* div programming */
 		{ gf100_clk_prog_1 }, /* select div mode */
@@ -396,10 +414,10 @@
 	int i, j;
 
 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
-		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
-			if (!priv->eng[j].freq)
+		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
+			if (!clk->eng[j].freq)
 				continue;
-			stage[i].exec(priv, j);
+			stage[i].exec(clk, j);
 		}
 	}
 
@@ -407,56 +425,42 @@
 }
 
 static void
-gf100_clk_tidy(struct nvkm_clk *clk)
+gf100_clk_tidy(struct nvkm_clk *base)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
-	memset(priv->eng, 0x00, sizeof(priv->eng));
+	struct gf100_clk *clk = gf100_clk(base);
+	memset(clk->eng, 0x00, sizeof(clk->eng));
 }
 
-static struct nvkm_domain
-gf100_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_hubk06 , 0x00 },
-	{ nv_clk_src_hubk01 , 0x01 },
-	{ nv_clk_src_copy   , 0x02 },
-	{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
-	{ nv_clk_src_rop    , 0x04 },
-	{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
-	{ nv_clk_src_vdec   , 0x06 },
-	{ nv_clk_src_daemon , 0x0a },
-	{ nv_clk_src_hubk07 , 0x0b },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gf100_clk = {
+	.read = gf100_clk_read,
+	.calc = gf100_clk_calc,
+	.prog = gf100_clk_prog,
+	.tidy = gf100_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_hubk06 , 0x00 },
+		{ nv_clk_src_hubk01 , 0x01 },
+		{ nv_clk_src_copy   , 0x02 },
+		{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
+		{ nv_clk_src_rop    , 0x04 },
+		{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
+		{ nv_clk_src_vdec   , 0x06 },
+		{ nv_clk_src_daemon , 0x0a },
+		{ nv_clk_src_hubk07 , 0x0b },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gf100_clk_priv *priv;
-	int ret;
+	struct gf100_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gf100_domain,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gf100_clk_read;
-	priv->base.calc = gf100_clk_calc;
-	priv->base.prog = gf100_clk_prog;
-	priv->base.tidy = gf100_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
 }
-
-struct nvkm_oclass
-gf100_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index e9b2310..396f7e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define gk104_clk(p) container_of((p), struct gk104_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/timer.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
@@ -38,28 +38,30 @@
 	u32 coef;
 };
 
-struct gk104_clk_priv {
+struct gk104_clk {
 	struct nvkm_clk base;
 	struct gk104_clk_info eng[16];
 };
 
-static u32 read_div(struct gk104_clk_priv *, int, u32, u32);
-static u32 read_pll(struct gk104_clk_priv *, u32);
+static u32 read_div(struct gk104_clk *, int, u32, u32);
+static u32 read_pll(struct gk104_clk *, u32);
 
 static u32
-read_vco(struct gk104_clk_priv *priv, u32 dsrc)
+read_vco(struct gk104_clk *clk, u32 dsrc)
 {
-	u32 ssrc = nv_rd32(priv, dsrc);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc);
 	if (!(ssrc & 0x00000100))
-		return read_pll(priv, 0x00e800);
-	return read_pll(priv, 0x00e820);
+		return read_pll(clk, 0x00e800);
+	return read_pll(clk, 0x00e820);
 }
 
 static u32
-read_pll(struct gk104_clk_priv *priv, u32 pll)
+read_pll(struct gk104_clk *clk, u32 pll)
 {
-	u32 ctrl = nv_rd32(priv, pll + 0x00);
-	u32 coef = nv_rd32(priv, pll + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0x00);
+	u32 coef = nvkm_rd32(device, pll + 0x04);
 	u32 P = (coef & 0x003f0000) >> 16;
 	u32 N = (coef & 0x0000ff00) >> 8;
 	u32 M = (coef & 0x000000ff) >> 0;
@@ -72,22 +74,22 @@
 	switch (pll) {
 	case 0x00e800:
 	case 0x00e820:
-		sclk = nv_device(priv)->crystal;
+		sclk = device->crystal;
 		P = 1;
 		break;
 	case 0x132000:
-		sclk = read_pll(priv, 0x132020);
+		sclk = read_pll(clk, 0x132020);
 		P = (coef & 0x10000000) ? 2 : 1;
 		break;
 	case 0x132020:
-		sclk = read_div(priv, 0, 0x137320, 0x137330);
-		fN   = nv_rd32(priv, pll + 0x10) >> 16;
+		sclk = read_div(clk, 0, 0x137320, 0x137330);
+		fN   = nvkm_rd32(device, pll + 0x10) >> 16;
 		break;
 	case 0x137000:
 	case 0x137020:
 	case 0x137040:
 	case 0x1370e0:
-		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
 		break;
 	default:
 		return 0;
@@ -101,70 +103,73 @@
 }
 
 static u32
-read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
 	switch (ssrc & 0x00000003) {
 	case 0:
 		if ((ssrc & 0x00030000) != 0x00030000)
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 108000;
 	case 2:
 		return 100000;
 	case 3:
 		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sclk = read_vco(clk, dsrc + (doff * 4));
 			u32 sdiv = (sctl & 0x0000003f) + 2;
 			return (sclk * 2) / sdiv;
 		}
 
-		return read_vco(priv, dsrc + (doff * 4));
+		return read_vco(clk, dsrc + (doff * 4));
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_mem(struct gk104_clk_priv *priv)
+read_mem(struct gk104_clk *clk)
 {
-	switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
-	case 1: return read_pll(priv, 0x132020);
-	case 2: return read_pll(priv, 0x132000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
+	case 1: return read_pll(clk, 0x132020);
+	case 2: return read_pll(clk, 0x132000);
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gk104_clk_priv *priv, int clk)
+read_clk(struct gk104_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
 	u32 sclk, sdiv;
 
-	if (clk < 7) {
-		u32 ssel = nv_rd32(priv, 0x137100);
-		if (ssel & (1 << clk)) {
-			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+	if (idx < 7) {
+		u32 ssel = nvkm_rd32(device, 0x137100);
+		if (ssel & (1 << idx)) {
+			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
 			sdiv = 1;
 		} else {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			sdiv = 0;
 		}
 	} else {
-		u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+		u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
 		if ((ssrc & 0x00000003) == 0x00000003) {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			if (ssrc & 0x00000100) {
 				if (ssrc & 0x40000000)
-					sclk = read_pll(priv, 0x1370e0);
+					sclk = read_pll(clk, 0x1370e0);
 				sdiv = 1;
 			} else {
 				sdiv = 0;
 			}
 		} else {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			sdiv = 0;
 		}
 	}
@@ -181,10 +186,11 @@
 }
 
 static int
-gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nvkm_device *device = nv_device(clk);
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
@@ -192,29 +198,29 @@
 	case nv_clk_src_href:
 		return 100000;
 	case nv_clk_src_mem:
-		return read_mem(priv);
+		return read_mem(clk);
 	case nv_clk_src_gpc:
-		return read_clk(priv, 0x00);
+		return read_clk(clk, 0x00);
 	case nv_clk_src_rop:
-		return read_clk(priv, 0x01);
+		return read_clk(clk, 0x01);
 	case nv_clk_src_hubk07:
-		return read_clk(priv, 0x02);
+		return read_clk(clk, 0x02);
 	case nv_clk_src_hubk06:
-		return read_clk(priv, 0x07);
+		return read_clk(clk, 0x07);
 	case nv_clk_src_hubk01:
-		return read_clk(priv, 0x08);
+		return read_clk(clk, 0x08);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x0c);
+		return read_clk(clk, 0x0c);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x0e);
+		return read_clk(clk, 0x0e);
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static u32
-calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
 {
 	u32 div = min((ref * 2) / freq, (u32)65);
 	if (div < 2)
@@ -225,7 +231,7 @@
 }
 
 static u32
-calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
 {
 	u32 sclk;
 
@@ -247,28 +253,29 @@
 	}
 
 	/* otherwise, calculate the closest divider */
-	sclk = read_vco(priv, 0x137160 + (clk * 4));
-	if (clk < 7)
-		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	sclk = read_vco(clk, 0x137160 + (idx * 4));
+	if (idx < 7)
+		sclk = calc_div(clk, idx, sclk, freq, ddiv);
 	return sclk;
 }
 
 static u32
-calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll limits;
 	int N, M, P, ret;
 
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
 	if (ret)
 		return 0;
 
-	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
 	if (!limits.refclk)
 		return 0;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
 	if (ret <= 0)
 		return 0;
 
@@ -277,10 +284,10 @@
 }
 
 static int
-calc_clk(struct gk104_clk_priv *priv,
-	 struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gk104_clk *clk,
+	 struct nvkm_cstate *cstate, int idx, int dom)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
 	u32 freq = cstate->domain[dom];
 	u32 src0, div0, div1D, div1P = 0;
 	u32 clk0, clk1 = 0;
@@ -290,16 +297,16 @@
 		return 0;
 
 	/* first possible path, using only dividers */
-	clk0 = calc_src(priv, clk, freq, &src0, &div0);
-	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+	clk0 = calc_src(clk, idx, freq, &src0, &div0);
+	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
 
 	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
-		if (clk <= 7)
-			clk1 = calc_pll(priv, clk, freq, &info->coef);
+	if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
+		if (idx <= 7)
+			clk1 = calc_pll(clk, idx, freq, &info->coef);
 		else
 			clk1 = cstate->domain[nv_clk_src_hubk06];
-		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
 	}
 
 	/* select the method which gets closest to target freq */
@@ -320,7 +327,7 @@
 			info->mdiv |= 0x80000000;
 			info->mdiv |= div1P << 8;
 		}
-		info->ssel = (1 << clk);
+		info->ssel = (1 << idx);
 		info->dsrc = 0x40000100;
 		info->freq = clk1;
 	}
@@ -329,98 +336,115 @@
 }
 
 static int
-gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
-	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
-	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
-	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
-	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
-	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
-	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
 		return ret;
 
 	return 0;
 }
 
 static void
-gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_0(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (!info->ssel) {
-		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
-		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
+		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
 	}
 }
 
 static void
-gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+			break;
+	);
 }
 
 static void
-gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
 }
 
 static void
-gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_2(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
-	const u32 addr = 0x137000 + (clk * 0x20);
-	nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
-	nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 addr = 0x137000 + (idx * 0x20);
+	nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+	nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
 	if (info->coef) {
-		nv_wr32(priv, addr + 0x04, info->coef);
-		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
-		nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
-		nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+		nvkm_wr32(device, addr + 0x04, info->coef);
+		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+				break;
+		);
+		nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
 	}
 }
 
 static void
-gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_3(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel)
-		nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
 	else
-		nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
+		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
 }
 
 static void
-gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
-		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+			if (tmp == info->ssel)
+				break;
+		);
 	}
 }
 
 static void
-gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
-		nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
+		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
 	}
 }
 
 static int
-gk104_clk_prog(struct nvkm_clk *clk)
+gk104_clk_prog(struct nvkm_clk *base)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
 	struct {
 		u32 mask;
-		void (*exec)(struct gk104_clk_priv *, int);
+		void (*exec)(struct gk104_clk *, int);
 	} stage[] = {
 		{ 0x007f, gk104_clk_prog_0   }, /* div programming */
 		{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
@@ -433,12 +457,12 @@
 	int i, j;
 
 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
-		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
 			if (!(stage[i].mask & (1 << j)))
 				continue;
-			if (!priv->eng[j].freq)
+			if (!clk->eng[j].freq)
 				continue;
-			stage[i].exec(priv, j);
+			stage[i].exec(clk, j);
 		}
 	}
 
@@ -446,55 +470,41 @@
 }
 
 static void
-gk104_clk_tidy(struct nvkm_clk *clk)
+gk104_clk_tidy(struct nvkm_clk *base)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
-	memset(priv->eng, 0x00, sizeof(priv->eng));
+	struct gk104_clk *clk = gk104_clk(base);
+	memset(clk->eng, 0x00, sizeof(clk->eng));
 }
 
-static struct nvkm_domain
-gk104_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
-	{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
-	{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_hubk01 , 0x05 },
-	{ nv_clk_src_vdec   , 0x06 },
-	{ nv_clk_src_daemon , 0x07 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gk104_clk = {
+	.read = gk104_clk_read,
+	.calc = gk104_clk_calc,
+	.prog = gk104_clk_prog,
+	.tidy = gk104_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+		{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
+		{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_hubk01 , 0x05 },
+		{ nv_clk_src_vdec   , 0x06 },
+		{ nv_clk_src_daemon , 0x07 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gk104_clk_priv *priv;
-	int ret;
+	struct gk104_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gk104_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gk104_clk_read;
-	priv->base.calc = gk104_clk_calc;
-	priv->base.prog = gk104_clk_prog;
-	priv->base.tidy = gk104_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass
-gk104_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 65c5327..254094a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -22,15 +22,12 @@
  * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
  *
  */
-#include <subdev/clk.h>
+#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
+#include "priv.h"
+
+#include <core/tegra.h>
 #include <subdev/timer.h>
 
-#include <core/device.h>
-
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
-
 #define MHZ (1000 * 1000)
 
 #define MASK(w)	((1 << w) - 1)
@@ -117,41 +114,42 @@
 	.min_pl = 1, .max_pl = 32,
 };
 
-struct gk20a_clk_priv {
+struct gk20a_clk {
 	struct nvkm_clk base;
 	const struct gk20a_clk_pllg_params *params;
 	u32 m, n, pl;
 	u32 parent_rate;
 };
-#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
 
 static void
-gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_read_mnp(struct gk20a_clk *clk)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 val;
 
-	val = nv_rd32(priv, GPCPLL_COEFF);
-	priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-	priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
-	priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
+	clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+	clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+	clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 }
 
 static u32
-gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv)
+gk20a_pllg_calc_rate(struct gk20a_clk *clk)
 {
 	u32 rate;
 	u32 divider;
 
-	rate = priv->parent_rate * priv->n;
-	divider = priv->m * pl_to_div[priv->pl];
+	rate = clk->parent_rate * clk->n;
+	divider = clk->m * pl_to_div[clk->pl];
 	do_div(rate, divider);
 
 	return rate / 2;
 }
 
 static int
-gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	u32 target_clk_f, ref_clk_f, target_freq;
 	u32 min_vco_f, max_vco_f;
 	u32 low_pl, high_pl, best_pl;
@@ -163,13 +161,13 @@
 	u32 pl;
 
 	target_clk_f = rate * 2 / MHZ;
-	ref_clk_f = priv->parent_rate / MHZ;
+	ref_clk_f = clk->parent_rate / MHZ;
 
-	max_vco_f = priv->params->max_vco;
-	min_vco_f = priv->params->min_vco;
-	best_m = priv->params->max_m;
-	best_n = priv->params->min_n;
-	best_pl = priv->params->min_pl;
+	max_vco_f = clk->params->max_vco;
+	min_vco_f = clk->params->min_vco;
+	best_m = clk->params->max_m;
+	best_n = clk->params->min_n;
+	best_pl = clk->params->min_pl;
 
 	target_vco_f = target_clk_f + target_clk_f / 50;
 	if (max_vco_f < target_vco_f)
@@ -177,13 +175,13 @@
 
 	/* min_pl <= high_pl <= max_pl */
 	high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
-	high_pl = min(high_pl, priv->params->max_pl);
-	high_pl = max(high_pl, priv->params->min_pl);
+	high_pl = min(high_pl, clk->params->max_pl);
+	high_pl = max(high_pl, clk->params->min_pl);
 
 	/* min_pl <= low_pl <= max_pl */
 	low_pl = min_vco_f / target_vco_f;
-	low_pl = min(low_pl, priv->params->max_pl);
-	low_pl = max(low_pl, priv->params->min_pl);
+	low_pl = min(low_pl, clk->params->max_pl);
+	low_pl = max(low_pl, clk->params->min_pl);
 
 	/* Find Indices of high_pl and low_pl */
 	for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
@@ -199,30 +197,30 @@
 		}
 	}
 
-	nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
-		 pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+	nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
+		   pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
 
 	/* Select lowest possible VCO */
 	for (pl = low_pl; pl <= high_pl; pl++) {
 		target_vco_f = target_clk_f * pl_to_div[pl];
-		for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
+		for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
 			u_f = ref_clk_f / m;
 
-			if (u_f < priv->params->min_u)
+			if (u_f < clk->params->min_u)
 				break;
-			if (u_f > priv->params->max_u)
+			if (u_f > clk->params->max_u)
 				continue;
 
 			n = (target_vco_f * m) / ref_clk_f;
 			n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
 
-			if (n > priv->params->max_n)
+			if (n > clk->params->max_n)
 				break;
 
 			for (; n <= n2; n++) {
-				if (n < priv->params->min_n)
+				if (n < clk->params->min_n)
 					continue;
-				if (n > priv->params->max_n)
+				if (n > clk->params->max_n)
 					break;
 
 				vco_f = ref_clk_f * n / m;
@@ -250,71 +248,75 @@
 	WARN_ON(best_delta == ~0);
 
 	if (best_delta != 0)
-		nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
-			 target_clk_f);
+		nvkm_debug(subdev,
+			   "no best match for target @ %dMHz on gpc_pll",
+			   target_clk_f);
 
-	priv->m = best_m;
-	priv->n = best_n;
-	priv->pl = best_pl;
+	clk->m = best_m;
+	clk->n = best_n;
+	clk->pl = best_pl;
 
-	target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
+	target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
 
-	nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
-		 target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
+	nvkm_debug(subdev,
+		   "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
+		   target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
 	return 0;
 }
 
 static int
-gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
+gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 val;
 	int ramp_timeout;
 
 	/* get old coefficients */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	/* do nothing if NDIV is the same */
 	if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
 		return 0;
 
 	/* setup */
-	nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+	nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
 		0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
-	nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+	nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
 		0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
 
 	/* pll slowdown mode */
-	nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
 
 	/* new ndiv ready for ramp */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
 	val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
 	udelay(1);
-	nv_wr32(priv, GPCPLL_COEFF, val);
+	nvkm_wr32(device, GPCPLL_COEFF, val);
 
 	/* dynamic ramp to new ndiv */
-	val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+	val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 	val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
 	udelay(1);
-	nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
+	nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
 
 	for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
 		udelay(1);
-		val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+		val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
 		if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
 			break;
 	}
 
 	/* exit slowdown mode */
-	nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
 		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
-	nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 
 	if (ramp_timeout <= 0) {
-		nv_error(priv, "gpcpll dynamic ramp timeout\n");
+		nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
 		return -ETIMEDOUT;
 	}
 
@@ -322,149 +324,147 @@
 }
 
 static void
-_gk20a_pllg_enable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_enable(struct gk20a_clk *clk)
 {
-	nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
-	nv_rd32(priv, GPCPLL_CFG);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+	nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static void
-_gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_disable(struct gk20a_clk *clk)
 {
-	nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
-	nv_rd32(priv, GPCPLL_CFG);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+	nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static int
-_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide)
+_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 val, cfg;
 	u32 m_old, pl_old, n_lo;
 
 	/* get old coefficients */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
 	pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 
 	/* do NDIV slide if there is no change in M and PL */
-	cfg = nv_rd32(priv, GPCPLL_CFG);
-	if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
+	cfg = nvkm_rd32(device, GPCPLL_CFG);
+	if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
 	    (cfg & GPCPLL_CFG_ENABLE)) {
-		return gk20a_pllg_slide(priv, priv->n);
+		return gk20a_pllg_slide(clk, clk->n);
 	}
 
 	/* slide down to NDIV_LO */
-	n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
-			    priv->parent_rate / MHZ);
+	n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
+			    clk->parent_rate / MHZ);
 	if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
-		int ret = gk20a_pllg_slide(priv, n_lo);
+		int ret = gk20a_pllg_slide(clk, n_lo);
 
 		if (ret)
 			return ret;
 	}
 
 	/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
-	nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
 		0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
 
 	/* put PLL in bypass before programming it */
-	val = nv_rd32(priv, SEL_VCO);
+	val = nvkm_rd32(device, SEL_VCO);
 	val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 	udelay(2);
-	nv_wr32(priv, SEL_VCO, val);
+	nvkm_wr32(device, SEL_VCO, val);
 
 	/* get out from IDDQ */
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_IDDQ) {
 		val &= ~GPCPLL_CFG_IDDQ;
-		nv_wr32(priv, GPCPLL_CFG, val);
-		nv_rd32(priv, GPCPLL_CFG);
+		nvkm_wr32(device, GPCPLL_CFG, val);
+		nvkm_rd32(device, GPCPLL_CFG);
 		udelay(2);
 	}
 
-	_gk20a_pllg_disable(priv);
+	_gk20a_pllg_disable(clk);
 
-	nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
-		 priv->pl);
+	nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
+		   clk->m, clk->n, clk->pl);
 
-	n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
-			    priv->parent_rate / MHZ);
-	val = priv->m << GPCPLL_COEFF_M_SHIFT;
-	val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
-	val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
-	nv_wr32(priv, GPCPLL_COEFF, val);
+	n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
+			    clk->parent_rate / MHZ);
+	val = clk->m << GPCPLL_COEFF_M_SHIFT;
+	val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
+	val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
+	nvkm_wr32(device, GPCPLL_COEFF, val);
 
-	_gk20a_pllg_enable(priv);
+	_gk20a_pllg_enable(clk);
 
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_LOCK_DET_OFF) {
 		val &= ~GPCPLL_CFG_LOCK_DET_OFF;
-		nv_wr32(priv, GPCPLL_CFG, val);
+		nvkm_wr32(device, GPCPLL_CFG, val);
 	}
 
-	if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
-				GPCPLL_CFG_LOCK)) {
-		nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
+	if (nvkm_usec(device, 300,
+		if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
+			break;
+	) < 0)
 		return -ETIMEDOUT;
-	}
 
 	/* switch to VCO mode */
-	nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+	nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 
 	/* restore out divider 1:1 */
-	val = nv_rd32(priv, GPC2CLK_OUT);
+	val = nvkm_rd32(device, GPC2CLK_OUT);
 	val &= ~GPC2CLK_OUT_VCODIV_MASK;
 	udelay(2);
-	nv_wr32(priv, GPC2CLK_OUT, val);
+	nvkm_wr32(device, GPC2CLK_OUT, val);
 
 	/* slide up to new NDIV */
-	return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
+	return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
 }
 
 static int
-gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk)
 {
 	int err;
 
-	err = _gk20a_pllg_program_mnp(priv, true);
+	err = _gk20a_pllg_program_mnp(clk, true);
 	if (err)
-		err = _gk20a_pllg_program_mnp(priv, false);
+		err = _gk20a_pllg_program_mnp(clk, false);
 
 	return err;
 }
 
 static void
-gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+gk20a_pllg_disable(struct gk20a_clk *clk)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 val;
 
 	/* slide to VCO min */
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_ENABLE) {
 		u32 coeff, m, n_lo;
 
-		coeff = nv_rd32(priv, GPCPLL_COEFF);
+		coeff = nvkm_rd32(device, GPCPLL_COEFF);
 		m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-		n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
-				    priv->parent_rate / MHZ);
-		gk20a_pllg_slide(priv, n_lo);
+		n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
+				    clk->parent_rate / MHZ);
+		gk20a_pllg_slide(clk, n_lo);
 	}
 
 	/* put PLL in bypass before disabling it */
-	nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
 
-	_gk20a_pllg_disable(priv);
+	_gk20a_pllg_disable(clk);
 }
 
 #define GK20A_CLK_GPC_MDIV 1000
 
-static struct nvkm_domain
-gk20a_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
-	{ nv_clk_src_max }
-};
-
 static struct nvkm_pstate
 gk20a_pstates[] = {
 	{
@@ -560,87 +560,99 @@
 };
 
 static int
-gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(clk)->crystal;
+		return device->crystal;
 	case nv_clk_src_gpc:
-		gk20a_pllg_read_mnp(priv);
-		return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
+		gk20a_pllg_read_mnp(clk);
+		return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static int
-gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
 
-	return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
+	return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
 					 GK20A_CLK_GPC_MDIV);
 }
 
 static int
-gk20a_clk_prog(struct nvkm_clk *clk)
+gk20a_clk_prog(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
 
-	return gk20a_pllg_program_mnp(priv);
+	return gk20a_pllg_program_mnp(clk);
 }
 
 static void
-gk20a_clk_tidy(struct nvkm_clk *clk)
+gk20a_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static int
-gk20a_clk_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_clk_fini(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_clk_fini(&priv->base, false);
-
-	gk20a_pllg_disable(priv);
-
-	return ret;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	gk20a_pllg_disable(clk);
 }
 
 static int
-gk20a_clk_init(struct nvkm_object *object)
+gk20a_clk_init(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)object;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	int ret;
 
-	nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
+	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
 
-	ret = nvkm_clk_init(&priv->base);
-	if (ret)
-		return ret;
-
-	ret = gk20a_clk_prog(&priv->base);
+	ret = gk20a_clk_prog(&clk->base);
 	if (ret) {
-		nv_error(priv, "cannot initialize clock\n");
+		nvkm_error(subdev, "cannot initialize clock\n");
 		return ret;
 	}
 
 	return 0;
 }
 
-static int
-gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+gk20a_clk = {
+	.init = gk20a_clk_init,
+	.fini = gk20a_clk_fini,
+	.read = gk20a_clk_read,
+	.calc = gk20a_clk_calc,
+	.prog = gk20a_clk_prog,
+	.tidy = gk20a_clk_tidy,
+	.pstates = gk20a_pstates,
+	.nr_pstates = ARRAY_SIZE(gk20a_pstates),
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+		{ nv_clk_src_max }
+	}
+};
+
+int
+gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gk20a_clk_priv *priv;
-	struct nouveau_platform_device *plat;
-	int ret;
-	int i;
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_clk *clk;
+	int ret, i;
+
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
 	/* Finish initializing the pstates */
 	for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
@@ -648,33 +660,11 @@
 		gk20a_pstates[i].pstate = i + 1;
 	}
 
-	ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains,
-			      gk20a_pstates, ARRAY_SIZE(gk20a_pstates),
-			      true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	clk->params = &gk20a_pllg_params;
+	clk->parent_rate = clk_get_rate(tdev->clk);
 
-	priv->params = &gk20a_pllg_params;
-
-	plat = nv_device_to_platform(nv_device(parent));
-	priv->parent_rate = clk_get_rate(plat->gpu->clk);
-	nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
-
-	priv->base.read = gk20a_clk_read;
-	priv->base.calc = gk20a_clk_calc;
-	priv->base.prog = gk20a_clk_prog;
-	priv->base.tidy = gk20a_clk_tidy;
-	return 0;
+	ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
+	nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
+		  clk->parent_rate / MHZ);
+	return ret;
 }
-
-struct nvkm_oclass
-gk20a_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_clk_ctor,
-		.dtor = _nvkm_subdev_dtor,
-		.init = gk20a_clk_init,
-		.fini = gk20a_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 065e9f5..07feae6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -22,56 +22,58 @@
  * Authors: Ben Skeggs
  *          Roy Spliet
  */
+#define gt215_clk(p) container_of((p), struct gt215_clk, base)
 #include "gt215.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <engine/fifo.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
 
-struct gt215_clk_priv {
+struct gt215_clk {
 	struct nvkm_clk base;
 	struct gt215_clk_info eng[nv_clk_src_max];
 };
 
-static u32 read_clk(struct gt215_clk_priv *, int, bool);
-static u32 read_pll(struct gt215_clk_priv *, int, u32);
+static u32 read_clk(struct gt215_clk *, int, bool);
+static u32 read_pll(struct gt215_clk *, int, u32);
 
 static u32
-read_vco(struct gt215_clk_priv *priv, int clk)
+read_vco(struct gt215_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
 
 	switch (sctl & 0x00000030) {
 	case 0x00000000:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case 0x00000020:
-		return read_pll(priv, 0x41, 0x00e820);
+		return read_pll(clk, 0x41, 0x00e820);
 	case 0x00000030:
-		return read_pll(priv, 0x42, 0x00e8a0);
+		return read_pll(clk, 0x42, 0x00e8a0);
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
+read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 sctl, sdiv, sclk;
 
 	/* refclk for the 0xe8xx plls is a fixed frequency */
-	if (clk >= 0x40) {
-		if (nv_device(priv)->chipset == 0xaf) {
+	if (idx >= 0x40) {
+		if (device->chipset == 0xaf) {
 			/* no joke.. seriously.. sigh.. */
-			return nv_rd32(priv, 0x00471c) * 1000;
+			return nvkm_rd32(device, 0x00471c) * 1000;
 		}
 
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	}
 
-	sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
 	if (!ignore_en && !(sctl & 0x00000100))
 		return 0;
 
@@ -83,7 +85,7 @@
 	switch (sctl & 0x00003000) {
 	case 0x00000000:
 		if (!(sctl & 0x00000200))
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 0;
 	case 0x00002000:
 		if (sctl & 0x00000040)
@@ -94,7 +96,7 @@
 		if (!(sctl & 0x00000001))
 			return 0;
 
-		sclk = read_vco(priv, clk);
+		sclk = read_vco(clk, idx);
 		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
 		return (sclk * 2) / sdiv;
 	default:
@@ -103,14 +105,15 @@
 }
 
 static u32
-read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
+read_pll(struct gt215_clk *clk, int idx, u32 pll)
 {
-	u32 ctrl = nv_rd32(priv, pll + 0);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0);
 	u32 sclk = 0, P = 1, N = 1, M = 1;
 
 	if (!(ctrl & 0x00000008)) {
 		if (ctrl & 0x00000001) {
-			u32 coef = nv_rd32(priv, pll + 4);
+			u32 coef = nvkm_rd32(device, pll + 4);
 			M = (coef & 0x000000ff) >> 0;
 			N = (coef & 0x0000ff00) >> 8;
 			P = (coef & 0x003f0000) >> 16;
@@ -121,10 +124,10 @@
 			if ((pll & 0x00ff00) == 0x00e800)
 				P = 1;
 
-			sclk = read_clk(priv, 0x00 + clk, false);
+			sclk = read_clk(clk, 0x00 + idx, false);
 		}
 	} else {
-		sclk = read_clk(priv, 0x10 + clk, false);
+		sclk = read_clk(clk, 0x10 + idx, false);
 	}
 
 	if (M * P)
@@ -134,41 +137,43 @@
 }
 
 static int
-gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
+	struct gt215_clk *clk = gt215_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 hsrc;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_core:
 	case nv_clk_src_core_intm:
-		return read_pll(priv, 0x00, 0x4200);
+		return read_pll(clk, 0x00, 0x4200);
 	case nv_clk_src_shader:
-		return read_pll(priv, 0x01, 0x4220);
+		return read_pll(clk, 0x01, 0x4220);
 	case nv_clk_src_mem:
-		return read_pll(priv, 0x02, 0x4000);
+		return read_pll(clk, 0x02, 0x4000);
 	case nv_clk_src_disp:
-		return read_clk(priv, 0x20, false);
+		return read_clk(clk, 0x20, false);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x21, false);
+		return read_clk(clk, 0x21, false);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x25, false);
+		return read_clk(clk, 0x25, false);
 	case nv_clk_src_host:
-		hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28;
+		hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
 		switch (hsrc) {
 		case 0:
-			return read_clk(priv, 0x1d, false);
+			return read_clk(clk, 0x1d, false);
 		case 2:
 		case 3:
 			return 277000;
 		default:
-			nv_error(clk, "unknown HOST clock source %d\n", hsrc);
+			nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
 			return -EINVAL;
 		}
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 
@@ -176,10 +181,10 @@
 }
 
 int
-gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
+gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
 	       struct gt215_clk_info *info)
 {
-	struct gt215_clk_priv *priv = (void *)clock;
+	struct gt215_clk *clk = gt215_clk(base);
 	u32 oclk, sclk, sdiv;
 	s32 diff;
 
@@ -196,7 +201,7 @@
 		info->clk = 0x00002140;
 		return khz;
 	default:
-		sclk = read_vco(priv, clk);
+		sclk = read_vco(clk, idx);
 		sdiv = min((sclk * 2) / khz, (u32)65);
 		oclk = (sclk * 2) / sdiv;
 		diff = ((khz + 3000) - oclk);
@@ -224,11 +229,11 @@
 }
 
 int
-gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
+gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
 	       struct gt215_clk_info *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(clock);
-	struct gt215_clk_priv *priv = (void *)clock;
+	struct gt215_clk *clk = gt215_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll limits;
 	int P, N, M, diff;
 	int ret;
@@ -237,22 +242,22 @@
 
 	/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
 	 * PLL and use the divider instead. */
-	ret = gt215_clk_info(clock, clk, khz, info);
+	ret = gt215_clk_info(&clk->base, idx, khz, info);
 	diff = khz - ret;
 	if (!pll || (diff >= -2000 && diff < 3000)) {
 		goto out;
 	}
 
 	/* Try with PLL */
-	ret = nvbios_pll_parse(bios, pll, &limits);
+	ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
 	if (ret)
 		return ret;
 
-	ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info);
+	ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
 	if (ret != limits.refclk)
 		return -EINVAL;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
 	if (ret >= 0) {
 		info->pll = (P << 16) | (N << 8) | M;
 	}
@@ -263,22 +268,22 @@
 }
 
 static int
-calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate,
-	 int clk, u32 pll, int idx)
+calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
+	 int idx, u32 pll, int dom)
 {
-	int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx],
-				 &priv->eng[idx]);
+	int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
+				 &clk->eng[dom]);
 	if (ret >= 0)
 		return 0;
 	return ret;
 }
 
 static int
-calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
+calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
 {
 	int ret = 0;
 	u32 kHz = cstate->domain[nv_clk_src_host];
-	struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
+	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
 
 	if (kHz == 277000) {
 		info->clk = 0;
@@ -288,7 +293,7 @@
 
 	info->host_out = NVA3_HOST_CLK;
 
-	ret = gt215_clk_info(&priv->base, 0x1d, kHz, info);
+	ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
 	if (ret >= 0)
 		return 0;
 
@@ -298,21 +303,33 @@
 int
 gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 
 	/* halt and idle execution engines */
-	nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
-	nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
+	nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
 	/* Wait until the interrupt handler is finished */
-	if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (!nvkm_rd32(device, 0x000100))
+			break;
+	) < 0)
 		return -EBUSY;
 
-	if (pfifo)
-		pfifo->pause(pfifo, flags);
+	if (fifo)
+		nvkm_fifo_pause(fifo, flags);
 
-	if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x002504) & 0x00000010)
+			break;
+	) < 0)
 		return -EIO;
-	if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f;
+		if (tmp == 0x0000003f)
+			break;
+	) < 0)
 		return -EIO;
 
 	return 0;
@@ -321,86 +338,94 @@
 void
 gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 
-	if (pfifo && flags)
-		pfifo->start(pfifo, flags);
+	if (fifo && flags)
+		nvkm_fifo_start(fifo, flags);
 
-	nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
-	nv_mask(clk, 0x020060, 0x00070000, 0x00040000);
+	nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
 }
 
 static void
-disable_clk_src(struct gt215_clk_priv *priv, u32 src)
+disable_clk_src(struct gt215_clk *clk, u32 src)
 {
-	nv_mask(priv, src, 0x00000100, 0x00000000);
-	nv_mask(priv, src, 0x00000001, 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, src, 0x00000100, 0x00000000);
+	nvkm_mask(device, src, 0x00000001, 0x00000000);
 }
 
 static void
-prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx)
+prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	const u32 src0 = 0x004120 + (clk * 4);
-	const u32 src1 = 0x004160 + (clk * 4);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 src0 = 0x004120 + (idx * 4);
+	const u32 src1 = 0x004160 + (idx * 4);
 	const u32 ctrl = pll + 0;
 	const u32 coef = pll + 4;
 	u32 bypass;
 
 	if (info->pll) {
 		/* Always start from a non-PLL clock */
-		bypass = nv_rd32(priv, ctrl)  & 0x00000008;
+		bypass = nvkm_rd32(device, ctrl)  & 0x00000008;
 		if (!bypass) {
-			nv_mask(priv, src1, 0x00000101, 0x00000101);
-			nv_mask(priv, ctrl, 0x00000008, 0x00000008);
+			nvkm_mask(device, src1, 0x00000101, 0x00000101);
+			nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
 			udelay(20);
 		}
 
-		nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk);
-		nv_wr32(priv, coef, info->pll);
-		nv_mask(priv, ctrl, 0x00000015, 0x00000015);
-		nv_mask(priv, ctrl, 0x00000010, 0x00000000);
-		if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) {
-			nv_mask(priv, ctrl, 0x00000010, 0x00000010);
-			nv_mask(priv, src0, 0x00000101, 0x00000000);
+		nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
+		nvkm_wr32(device, coef, info->pll);
+		nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
+		nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, ctrl) & 0x00020000)
+				break;
+		) < 0) {
+			nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+			nvkm_mask(device, src0, 0x00000101, 0x00000000);
 			return;
 		}
-		nv_mask(priv, ctrl, 0x00000010, 0x00000010);
-		nv_mask(priv, ctrl, 0x00000008, 0x00000000);
-		disable_clk_src(priv, src1);
+		nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+		nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
+		disable_clk_src(clk, src1);
 	} else {
-		nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
-		nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+		nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
+		nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
 		udelay(20);
-		nv_mask(priv, ctrl, 0x00000001, 0x00000000);
-		disable_clk_src(priv, src0);
+		nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
+		disable_clk_src(clk, src0);
 	}
 }
 
 static void
-prog_clk(struct gt215_clk_priv *priv, int clk, int idx)
+prog_clk(struct gt215_clk *clk, int idx, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
 }
 
 static void
-prog_host(struct gt215_clk_priv *priv)
+prog_host(struct gt215_clk *clk)
 {
-	struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
-	u32 hsrc = (nv_rd32(priv, 0xc040));
+	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 hsrc = (nvkm_rd32(device, 0xc040));
 
 	switch (info->host_out) {
 	case NVA3_HOST_277:
 		if ((hsrc & 0x30000000) == 0) {
-			nv_wr32(priv, 0xc040, hsrc | 0x20000000);
-			disable_clk_src(priv, 0x4194);
+			nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
+			disable_clk_src(clk, 0x4194);
 		}
 		break;
 	case NVA3_HOST_CLK:
-		prog_clk(priv, 0x1d, nv_clk_src_host);
+		prog_clk(clk, 0x1d, nv_clk_src_host);
 		if ((hsrc & 0x30000000) >= 0x20000000) {
-			nv_wr32(priv, 0xc040, hsrc & ~0x30000000);
+			nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
 		}
 		break;
 	default:
@@ -408,44 +433,45 @@
 	}
 
 	/* This seems to be a clock gating factor on idle, always set to 64 */
-	nv_wr32(priv, 0xc044, 0x3e);
+	nvkm_wr32(device, 0xc044, 0x3e);
 }
 
 static void
-prog_core(struct gt215_clk_priv *priv, int idx)
+prog_core(struct gt215_clk *clk, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	u32 fb_delay = nv_rd32(priv, 0x10002c);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 fb_delay = nvkm_rd32(device, 0x10002c);
 
 	if (fb_delay < info->fb_delay)
-		nv_wr32(priv, 0x10002c, info->fb_delay);
+		nvkm_wr32(device, 0x10002c, info->fb_delay);
 
-	prog_pll(priv, 0x00, 0x004200, idx);
+	prog_pll(clk, 0x00, 0x004200, dom);
 
 	if (fb_delay > info->fb_delay)
-		nv_wr32(priv, 0x10002c, info->fb_delay);
+		nvkm_wr32(device, 0x10002c, info->fb_delay);
 }
 
 static int
-gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
-	struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+	struct gt215_clk *clk = gt215_clk(base);
+	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
-	    (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
-	    (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
-	    (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
-	    (ret = calc_host(priv, cstate)))
+	if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+	    (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+	    (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+	    (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
+	    (ret = calc_host(clk, cstate)))
 		return ret;
 
 	/* XXX: Should be reading the highest bit in the VBIOS clock to decide
 	 * whether to use a PLL or not... but using a PLL defeats the purpose */
 	if (core->pll) {
-		ret = gt215_clk_info(clk, 0x10,
+		ret = gt215_clk_info(&clk->base, 0x10,
 				     cstate->domain[nv_clk_src_core_intm],
-				     &priv->eng[nv_clk_src_core_intm]);
+				     &clk->eng[nv_clk_src_core_intm]);
 		if (ret < 0)
 			return ret;
 	}
@@ -454,81 +480,67 @@
 }
 
 static int
-gt215_clk_prog(struct nvkm_clk *clk)
+gt215_clk_prog(struct nvkm_clk *base)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
-	struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+	struct gt215_clk *clk = gt215_clk(base);
+	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
 	int ret = 0;
 	unsigned long flags;
 	unsigned long *f = &flags;
 
-	ret = gt215_clk_pre(clk, f);
+	ret = gt215_clk_pre(&clk->base, f);
 	if (ret)
 		goto out;
 
 	if (core->pll)
-		prog_core(priv, nv_clk_src_core_intm);
+		prog_core(clk, nv_clk_src_core_intm);
 
-	prog_core(priv,  nv_clk_src_core);
-	prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
-	prog_clk(priv, 0x20, nv_clk_src_disp);
-	prog_clk(priv, 0x21, nv_clk_src_vdec);
-	prog_host(priv);
+	prog_core(clk,  nv_clk_src_core);
+	prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
+	prog_clk(clk, 0x20, nv_clk_src_disp);
+	prog_clk(clk, 0x21, nv_clk_src_vdec);
+	prog_host(clk);
 
 out:
 	if (ret == -EBUSY)
 		f = NULL;
 
-	gt215_clk_post(clk, f);
+	gt215_clk_post(&clk->base, f);
 	return ret;
 }
 
 static void
-gt215_clk_tidy(struct nvkm_clk *clk)
+gt215_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static struct nvkm_domain
-gt215_domain[] = {
-	{ nv_clk_src_crystal  , 0xff },
-	{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
-	{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
-	{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
-	{ nv_clk_src_vdec     , 0x03 },
-	{ nv_clk_src_disp     , 0x04 },
-	{ nv_clk_src_host     , 0x05 },
-	{ nv_clk_src_core_intm, 0x06 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gt215_clk = {
+	.read = gt215_clk_read,
+	.calc = gt215_clk_calc,
+	.prog = gt215_clk_prog,
+	.tidy = gt215_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal  , 0xff },
+		{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
+		{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
+		{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
+		{ nv_clk_src_vdec     , 0x03 },
+		{ nv_clk_src_disp     , 0x04 },
+		{ nv_clk_src_host     , 0x05 },
+		{ nv_clk_src_core_intm, 0x06 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gt215_clk_priv *priv;
-	int ret;
+	struct gt215_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gt215_clk_read;
-	priv->base.calc = gt215_clk_calc;
-	priv->base.prog = gt215_clk_prog;
-	priv->base.tidy = gt215_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass
-gt215_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index b447d9c..8865b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,6 +1,6 @@
 #ifndef __NVKM_CLK_NVA3_H__
 #define __NVKM_CLK_NVA3_H__
-#include <subdev/clk.h>
+#include "priv.h"
 
 struct gt215_clk_info {
 	u32 clk;
@@ -13,6 +13,6 @@
 };
 
 int  gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
-int  gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags);
-void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags);
+int  gt215_clk_pre(struct nvkm_clk *, unsigned long *flags);
+void gt215_clk_post(struct nvkm_clk *, unsigned long *flags);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index c54417b..1c21b8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -21,15 +21,15 @@
  *
  * Authors: Ben Skeggs
  */
+#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
 #include "gt215.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
 
-struct mcp77_clk_priv {
+struct mcp77_clk {
 	struct nvkm_clk base;
 	enum nv_clk_src csrc, ssrc, vsrc;
 	u32 cctrl, sctrl;
@@ -39,27 +39,29 @@
 };
 
 static u32
-read_div(struct nvkm_clk *clk)
+read_div(struct mcp77_clk *clk)
 {
-	return nv_rd32(clk, 0x004600);
+	struct nvkm_device *device = clk->base.subdev.device;
+	return nvkm_rd32(device, 0x004600);
 }
 
 static u32
-read_pll(struct nvkm_clk *clk, u32 base)
+read_pll(struct mcp77_clk *clk, u32 base)
 {
-	u32 ctrl = nv_rd32(clk, base + 0);
-	u32 coef = nv_rd32(clk, base + 4);
-	u32 ref = clk->read(clk, nv_clk_src_href);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, base + 0);
+	u32 coef = nvkm_rd32(device, base + 4);
+	u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
 	u32 post_div = 0;
 	u32 clock = 0;
 	int N1, M1;
 
 	switch (base){
 	case 0x4020:
-		post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+		post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
 		break;
 	case 0x4028:
-		post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+		post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
 		break;
 	default:
 		break;
@@ -76,59 +78,61 @@
 }
 
 static int
-mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(clk, 0x00c054);
+	struct mcp77_clk *clk = mcp77_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c054);
 	u32 P = 0;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /* PCIE reference clock */
 	case nv_clk_src_hclkm4:
-		return clk->read(clk, nv_clk_src_href) * 4;
+		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
 	case nv_clk_src_hclkm2d3:
-		return clk->read(clk, nv_clk_src_href) * 2 / 3;
+		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
 	case nv_clk_src_host:
 		switch (mast & 0x000c0000) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
 		case 0x00040000: break;
-		case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
-		case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+		case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+		case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
 		}
 		break;
 	case nv_clk_src_core:
-		P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
 
 		switch (mast & 0x00000003) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000001: return 0;
-		case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+		case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
 		case 0x00000003: return read_pll(clk, 0x004028) >> P;
 		}
 		break;
 	case nv_clk_src_cclk:
 		if ((mast & 0x03000000) != 0x03000000)
-			return clk->read(clk, nv_clk_src_core);
+			return nvkm_clk_read(&clk->base, nv_clk_src_core);
 
 		if ((mast & 0x00000200) == 0x00000000)
-			return clk->read(clk, nv_clk_src_core);
+			return nvkm_clk_read(&clk->base, nv_clk_src_core);
 
 		switch (mast & 0x00000c00) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_href);
-		case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
-		case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
+		case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+		case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
 		default: return 0;
 		}
 	case nv_clk_src_shader:
-		P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
 		switch (mast & 0x00000030) {
 		case 0x00000000:
 			if (mast & 0x00000040)
-				return clk->read(clk, nv_clk_src_href) >> P;
-			return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000010: break;
 		case 0x00000020: return read_pll(clk, 0x004028) >> P;
 		case 0x00000030: return read_pll(clk, 0x004020) >> P;
@@ -142,7 +146,7 @@
 
 		switch (mast & 0x00400000) {
 		case 0x00400000:
-			return clk->read(clk, nv_clk_src_core) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			break;
 		default:
 			return 500000 >> P;
@@ -153,29 +157,28 @@
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return 0;
 }
 
 static u32
-calc_pll(struct mcp77_clk_priv *priv, u32 reg,
+calc_pll(struct mcp77_clk *clk, u32 reg,
 	 u32 clock, int *N, int *M, int *P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
-	struct nvkm_clk *clk = &priv->base;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return 0;
 
 	pll.vco2.max_freq = 0;
-	pll.refclk = clk->read(clk, nv_clk_src_href);
+	pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
 	if (!pll.refclk)
 		return 0;
 
-	return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+	return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
 }
 
 static inline u32
@@ -197,26 +200,27 @@
 }
 
 static int
-mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
+	struct mcp77_clk *clk = mcp77_clk(base);
 	const int shader = cstate->domain[nv_clk_src_shader];
 	const int core = cstate->domain[nv_clk_src_core];
 	const int vdec = cstate->domain[nv_clk_src_vdec];
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	u32 out = 0, clock = 0;
 	int N, M, P1, P2 = 0;
 	int divs = 0;
 
 	/* cclk: find suitable source, disable PLL if we can */
-	if (core < clk->read(clk, nv_clk_src_hclkm4))
-		out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+	if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
+		out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
 
 	/* Calculate clock * 2, so shader clock can use it too */
-	clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+	clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
 
 	if (abs(core - out) <= abs(core - (clock >> 1))) {
-		priv->csrc = nv_clk_src_hclkm4;
-		priv->cctrl = divs << 16;
+		clk->csrc = nv_clk_src_hclkm4;
+		clk->cctrl = divs << 16;
 	} else {
 		/* NVCTRL is actually used _after_ NVPOST, and after what we
 		 * call NVPLL. To make matters worse, NVPOST is an integer
@@ -226,31 +230,31 @@
 			P1 = 2;
 		}
 
-		priv->csrc = nv_clk_src_core;
-		priv->ccoef = (N << 8) | M;
+		clk->csrc = nv_clk_src_core;
+		clk->ccoef = (N << 8) | M;
 
-		priv->cctrl = (P2 + 1) << 16;
-		priv->cpost = (1 << P1) << 16;
+		clk->cctrl = (P2 + 1) << 16;
+		clk->cpost = (1 << P1) << 16;
 	}
 
 	/* sclk: nvpll + divisor, href or spll */
 	out = 0;
-	if (shader == clk->read(clk, nv_clk_src_href)) {
-		priv->ssrc = nv_clk_src_href;
+	if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
+		clk->ssrc = nv_clk_src_href;
 	} else {
-		clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
-		if (priv->csrc == nv_clk_src_core)
+		clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
+		if (clk->csrc == nv_clk_src_core)
 			out = calc_P((core << 1), shader, &divs);
 
 		if (abs(shader - out) <=
 		    abs(shader - clock) &&
 		   (divs + P2) <= 7) {
-			priv->ssrc = nv_clk_src_core;
-			priv->sctrl = (divs + P2) << 16;
+			clk->ssrc = nv_clk_src_core;
+			clk->sctrl = (divs + P2) << 16;
 		} else {
-			priv->ssrc = nv_clk_src_shader;
-			priv->scoef = (N << 8) | M;
-			priv->sctrl = P1 << 16;
+			clk->ssrc = nv_clk_src_shader;
+			clk->scoef = (N << 8) | M;
+			clk->sctrl = P1 << 16;
 		}
 	}
 
@@ -258,172 +262,162 @@
 	out = calc_P(core, vdec, &divs);
 	clock = calc_P(500000, vdec, &P1);
 	if(abs(vdec - out) <= abs(vdec - clock)) {
-		priv->vsrc = nv_clk_src_cclk;
-		priv->vdiv = divs << 16;
+		clk->vsrc = nv_clk_src_cclk;
+		clk->vdiv = divs << 16;
 	} else {
-		priv->vsrc = nv_clk_src_vdec;
-		priv->vdiv = P1 << 16;
+		clk->vsrc = nv_clk_src_vdec;
+		clk->vdiv = P1 << 16;
 	}
 
 	/* Print strategy! */
-	nv_debug(priv, "nvpll: %08x %08x %08x\n",
-			priv->ccoef, priv->cpost, priv->cctrl);
-	nv_debug(priv, " spll: %08x %08x %08x\n",
-			priv->scoef, priv->spost, priv->sctrl);
-	nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
-	if (priv->csrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "core: hrefm4\n");
+	nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
+		   clk->ccoef, clk->cpost, clk->cctrl);
+	nvkm_debug(subdev, " spll: %08x %08x %08x\n",
+		   clk->scoef, clk->spost, clk->sctrl);
+	nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
+	if (clk->csrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "core: hrefm4\n");
 	else
-		nv_debug(priv, "core: nvpll\n");
+		nvkm_debug(subdev, "core: nvpll\n");
 
-	if (priv->ssrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "shader: hrefm4\n");
-	else if (priv->ssrc == nv_clk_src_core)
-		nv_debug(priv, "shader: nvpll\n");
+	if (clk->ssrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "shader: hrefm4\n");
+	else if (clk->ssrc == nv_clk_src_core)
+		nvkm_debug(subdev, "shader: nvpll\n");
 	else
-		nv_debug(priv, "shader: spll\n");
+		nvkm_debug(subdev, "shader: spll\n");
 
-	if (priv->vsrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "vdec: 500MHz\n");
+	if (clk->vsrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "vdec: 500MHz\n");
 	else
-		nv_debug(priv, "vdec: core\n");
+		nvkm_debug(subdev, "vdec: core\n");
 
 	return 0;
 }
 
 static int
-mcp77_clk_prog(struct nvkm_clk *clk)
+mcp77_clk_prog(struct nvkm_clk *base)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
+	struct mcp77_clk *clk = mcp77_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 pllmask = 0, mast;
 	unsigned long flags;
 	unsigned long *f = &flags;
 	int ret = 0;
 
-	ret = gt215_clk_pre(clk, f);
+	ret = gt215_clk_pre(&clk->base, f);
 	if (ret)
 		goto out;
 
 	/* First switch to safe clocks: href */
-	mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+	mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
 	mast &= ~0x00400e73;
 	mast |= 0x03000000;
 
-	switch (priv->csrc) {
+	switch (clk->csrc) {
 	case nv_clk_src_hclkm4:
-		nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+		nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
 		mast |= 0x00000002;
 		break;
 	case nv_clk_src_core:
-		nv_wr32(clk, 0x402c, priv->ccoef);
-		nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
-		nv_wr32(clk, 0x4040, priv->cpost);
+		nvkm_wr32(device, 0x402c, clk->ccoef);
+		nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
+		nvkm_wr32(device, 0x4040, clk->cpost);
 		pllmask |= (0x3 << 8);
 		mast |= 0x00000003;
 		break;
 	default:
-		nv_warn(priv,"Reclocking failed: unknown core clock\n");
+		nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
 		goto resume;
 	}
 
-	switch (priv->ssrc) {
+	switch (clk->ssrc) {
 	case nv_clk_src_href:
-		nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+		nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
 		/* mast |= 0x00000000; */
 		break;
 	case nv_clk_src_core:
-		nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+		nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
 		mast |= 0x00000020;
 		break;
 	case nv_clk_src_shader:
-		nv_wr32(clk, 0x4024, priv->scoef);
-		nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
-		nv_wr32(clk, 0x4070, priv->spost);
+		nvkm_wr32(device, 0x4024, clk->scoef);
+		nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
+		nvkm_wr32(device, 0x4070, clk->spost);
 		pllmask |= (0x3 << 12);
 		mast |= 0x00000030;
 		break;
 	default:
-		nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+		nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
 		goto resume;
 	}
 
-	if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
-		nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
+		if (tmp == pllmask)
+			break;
+	) < 0)
 		goto resume;
-	}
 
-	switch (priv->vsrc) {
+	switch (clk->vsrc) {
 	case nv_clk_src_cclk:
 		mast |= 0x00400000;
 	default:
-		nv_wr32(clk, 0x4600, priv->vdiv);
+		nvkm_wr32(device, 0x4600, clk->vdiv);
 	}
 
-	nv_wr32(clk, 0xc054, mast);
+	nvkm_wr32(device, 0xc054, mast);
 
 resume:
 	/* Disable some PLLs and dividers when unused */
-	if (priv->csrc != nv_clk_src_core) {
-		nv_wr32(clk, 0x4040, 0x00000000);
-		nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+	if (clk->csrc != nv_clk_src_core) {
+		nvkm_wr32(device, 0x4040, 0x00000000);
+		nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
 	}
 
-	if (priv->ssrc != nv_clk_src_shader) {
-		nv_wr32(clk, 0x4070, 0x00000000);
-		nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+	if (clk->ssrc != nv_clk_src_shader) {
+		nvkm_wr32(device, 0x4070, 0x00000000);
+		nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
 	}
 
 out:
 	if (ret == -EBUSY)
 		f = NULL;
 
-	gt215_clk_post(clk, f);
+	gt215_clk_post(&clk->base, f);
 	return ret;
 }
 
 static void
-mcp77_clk_tidy(struct nvkm_clk *clk)
+mcp77_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static struct nvkm_domain
-mcp77_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+mcp77_clk = {
+	.read = mcp77_clk_read,
+	.calc = mcp77_clk_calc,
+	.prog = mcp77_clk_prog,
+	.tidy = mcp77_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct mcp77_clk_priv *priv;
-	int ret;
+	struct mcp77_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = mcp77_clk_read;
-	priv->base.calc = mcp77_clk_calc;
-	priv->base.prog = mcp77_clk_prog;
-	priv->base.tidy = mcp77_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass *
-mcp77_clk_oclass = &(struct nvkm_oclass) {
-	.handle = NV_SUBDEV(CLK, 0xaa),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = mcp77_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index 63dbbb5..b280f85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -21,23 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#include "priv.h"
 #include "pll.h"
 
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit/nv04.h>
 
-struct nv04_clk_priv {
-	struct nvkm_clk base;
-};
-
 int
 nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
 		  int clk, struct nvkm_pll_vals *pv)
 {
 	int N1, M1, N2, M2, P;
-	int ret = nv04_pll_calc(nv_subdev(clock), info, clk, &N1, &M1, &N2, &M2, &P);
+	int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P);
 	if (ret) {
 		pv->refclk = info->refclk;
 		pv->N1 = N1;
@@ -52,8 +48,9 @@
 int
 nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(clk);
-	int cv = nvkm_bios(clk)->version.chip;
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	int cv = device->bios->version.chip;
 
 	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
 	    cv >= 0x40) {
@@ -67,37 +64,20 @@
 	return 0;
 }
 
-static struct nvkm_domain
-nv04_domain[] = {
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+nv04_clk = {
+	.domains = {
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct nv04_clk_priv *priv;
-	int ret;
-
-	ret = nvkm_clk_create(parent, engine, oclass, nv04_domain,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.pll_calc = nv04_clk_pll_calc;
-	priv->base.pll_prog = nv04_clk_pll_prog;
-	return 0;
+	int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+	if (ret == 0) {
+		(*pclk)->pll_calc = nv04_clk_pll_calc;
+		(*pclk)->pll_prog = nv04_clk_pll_prog;
+	}
+	return ret;
 }
-
-struct nvkm_oclass
-nv04_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index ed83813..2ab9b9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -21,14 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define nv40_clk(p) container_of((p), struct nv40_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
-struct nv40_clk_priv {
+struct nv40_clk {
 	struct nvkm_clk base;
 	u32 ctrl;
 	u32 npll_ctrl;
@@ -36,64 +36,56 @@
 	u32 spll;
 };
 
-static struct nvkm_domain
-nv40_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_max }
-};
-
 static u32
-read_pll_1(struct nv40_clk_priv *priv, u32 reg)
+read_pll_1(struct nv40_clk *clk, u32 reg)
 {
-	u32 ctrl = nv_rd32(priv, reg + 0x00);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, reg + 0x00);
 	int P = (ctrl & 0x00070000) >> 16;
 	int N = (ctrl & 0x0000ff00) >> 8;
 	int M = (ctrl & 0x000000ff) >> 0;
-	u32 ref = 27000, clk = 0;
+	u32 ref = 27000, khz = 0;
 
 	if (ctrl & 0x80000000)
-		clk = ref * N / M;
+		khz = ref * N / M;
 
-	return clk >> P;
+	return khz >> P;
 }
 
 static u32
-read_pll_2(struct nv40_clk_priv *priv, u32 reg)
+read_pll_2(struct nv40_clk *clk, u32 reg)
 {
-	u32 ctrl = nv_rd32(priv, reg + 0x00);
-	u32 coef = nv_rd32(priv, reg + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, reg + 0x00);
+	u32 coef = nvkm_rd32(device, reg + 0x04);
 	int N2 = (coef & 0xff000000) >> 24;
 	int M2 = (coef & 0x00ff0000) >> 16;
 	int N1 = (coef & 0x0000ff00) >> 8;
 	int M1 = (coef & 0x000000ff) >> 0;
 	int P = (ctrl & 0x00070000) >> 16;
-	u32 ref = 27000, clk = 0;
+	u32 ref = 27000, khz = 0;
 
 	if ((ctrl & 0x80000000) && M1) {
-		clk = ref * N1 / M1;
+		khz = ref * N1 / M1;
 		if ((ctrl & 0x40000100) == 0x40000000) {
 			if (M2)
-				clk = clk * N2 / M2;
+				khz = khz * N2 / M2;
 			else
-				clk = 0;
+				khz = 0;
 		}
 	}
 
-	return clk >> P;
+	return khz >> P;
 }
 
 static u32
-read_clk(struct nv40_clk_priv *priv, u32 src)
+read_clk(struct nv40_clk *clk, u32 src)
 {
 	switch (src) {
 	case 3:
-		return read_pll_2(priv, 0x004000);
+		return read_pll_2(clk, 0x004000);
 	case 2:
-		return read_pll_1(priv, 0x004008);
+		return read_pll_1(clk, 0x004008);
 	default:
 		break;
 	}
@@ -102,46 +94,48 @@
 }
 
 static int
-nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(priv, 0x00c040);
+	struct nv40_clk *clk = nv40_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /*XXX: PCIE/AGP differ*/
 	case nv_clk_src_core:
-		return read_clk(priv, (mast & 0x00000003) >> 0);
+		return read_clk(clk, (mast & 0x00000003) >> 0);
 	case nv_clk_src_shader:
-		return read_clk(priv, (mast & 0x00000030) >> 4);
+		return read_clk(clk, (mast & 0x00000030) >> 4);
 	case nv_clk_src_mem:
-		return read_pll_2(priv, 0x4020);
+		return read_pll_2(clk, 0x4020);
 	default:
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return -EINVAL;
 }
 
 static int
-nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
+nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
 		  int *N1, int *M1, int *N2, int *M2, int *log2P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return ret;
 
-	if (clk < pll.vco1.max_freq)
+	if (khz < pll.vco1.max_freq)
 		pll.vco2.max_freq = 0;
 
-	ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+	ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
 	if (ret == 0)
 		return -ERANGE;
 
@@ -149,93 +143,90 @@
 }
 
 static int
-nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
+	struct nv40_clk *clk = nv40_clk(base);
 	int gclk = cstate->domain[nv_clk_src_core];
 	int sclk = cstate->domain[nv_clk_src_shader];
 	int N1, M1, N2, M2, log2P;
 	int ret;
 
 	/* core/geometric clock */
-	ret = nv40_clk_calc_pll(priv, 0x004000, gclk,
+	ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
 				&N1, &M1, &N2, &M2, &log2P);
 	if (ret < 0)
 		return ret;
 
 	if (N2 == M2) {
-		priv->npll_ctrl = 0x80000100 | (log2P << 16);
-		priv->npll_coef = (N1 << 8) | M1;
+		clk->npll_ctrl = 0x80000100 | (log2P << 16);
+		clk->npll_coef = (N1 << 8) | M1;
 	} else {
-		priv->npll_ctrl = 0xc0000000 | (log2P << 16);
-		priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+		clk->npll_ctrl = 0xc0000000 | (log2P << 16);
+		clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
 	}
 
 	/* use the second pll for shader/rop clock, if it differs from core */
 	if (sclk && sclk != gclk) {
-		ret = nv40_clk_calc_pll(priv, 0x004008, sclk,
+		ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
 					&N1, &M1, NULL, NULL, &log2P);
 		if (ret < 0)
 			return ret;
 
-		priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
-		priv->ctrl = 0x00000223;
+		clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+		clk->ctrl = 0x00000223;
 	} else {
-		priv->spll = 0x00000000;
-		priv->ctrl = 0x00000333;
+		clk->spll = 0x00000000;
+		clk->ctrl = 0x00000333;
 	}
 
 	return 0;
 }
 
 static int
-nv40_clk_prog(struct nvkm_clk *clk)
+nv40_clk_prog(struct nvkm_clk *base)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
-	nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
-	nv_wr32(priv, 0x004004, priv->npll_coef);
-	nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
-	nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+	struct nv40_clk *clk = nv40_clk(base);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
+	nvkm_wr32(device, 0x004004, clk->npll_coef);
+	nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
+	nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
 	mdelay(5);
-	nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+	nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
 	return 0;
 }
 
 static void
-nv40_clk_tidy(struct nvkm_clk *clk)
+nv40_clk_tidy(struct nvkm_clk *obj)
 {
 }
 
-static int
-nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nv40_clk_priv *priv;
-	int ret;
-
-	ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.pll_calc = nv04_clk_pll_calc;
-	priv->base.pll_prog = nv04_clk_pll_prog;
-	priv->base.read = nv40_clk_read;
-	priv->base.calc = nv40_clk_calc;
-	priv->base.prog = nv40_clk_prog;
-	priv->base.tidy = nv40_clk_tidy;
-	return 0;
-}
-
-struct nvkm_oclass
-nv40_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
+static const struct nvkm_clk_func
+nv40_clk = {
+	.read = nv40_clk_read,
+	.calc = nv40_clk_calc,
+	.prog = nv40_clk_prog,
+	.tidy = nv40_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_max }
+	}
 };
+
+int
+nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	struct nv40_clk *clk;
+
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	clk->base.pll_calc = nv04_clk_pll_calc;
+	clk->base.pll_prog = nv04_clk_pll_prog;
+	*pclk = &clk->base;
+
+	return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 9b4ffd6..5841f29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -25,38 +25,39 @@
 #include "pll.h"
 #include "seq.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
 static u32
-read_div(struct nv50_clk_priv *priv)
+read_div(struct nv50_clk *clk)
 {
-	switch (nv_device(priv)->chipset) {
+	struct nvkm_device *device = clk->base.subdev.device;
+	switch (device->chipset) {
 	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
 	case 0x84:
 	case 0x86:
 	case 0x98:
 	case 0xa0:
-		return nv_rd32(priv, 0x004700);
+		return nvkm_rd32(device, 0x004700);
 	case 0x92:
 	case 0x94:
 	case 0x96:
-		return nv_rd32(priv, 0x004800);
+		return nvkm_rd32(device, 0x004800);
 	default:
 		return 0x00000000;
 	}
 }
 
 static u32
-read_pll_src(struct nv50_clk_priv *priv, u32 base)
+read_pll_src(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
-	u32 rsel = nv_rd32(priv, 0x00e18c);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+	u32 rsel = nvkm_rd32(device, 0x00e18c);
 	int P, N, M, id;
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 	case 0xa0:
 		switch (base) {
@@ -65,11 +66,11 @@
 		case 0x4008: id = !!(rsel & 0x00000008); break;
 		case 0x4030: id = 0; break;
 		default:
-			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			nvkm_error(subdev, "ref: bad pll %06x\n", base);
 			return 0;
 		}
 
-		coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+		coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
 		ref *=  (coef & 0x01000000) ? 2 : 4;
 		P    =  (coef & 0x00070000) >> 16;
 		N    = ((coef & 0x0000ff00) >> 8) + 1;
@@ -78,7 +79,7 @@
 	case 0x84:
 	case 0x86:
 	case 0x92:
-		coef = nv_rd32(priv, 0x00e81c);
+		coef = nvkm_rd32(device, 0x00e81c);
 		P    = (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -86,26 +87,26 @@
 	case 0x94:
 	case 0x96:
 	case 0x98:
-		rsel = nv_rd32(priv, 0x00c050);
+		rsel = nvkm_rd32(device, 0x00c050);
 		switch (base) {
 		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
 		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
 		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
 		case 0x4030: rsel = 3; break;
 		default:
-			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			nvkm_error(subdev, "ref: bad pll %06x\n", base);
 			return 0;
 		}
 
 		switch (rsel) {
 		case 0: id = 1; break;
-		case 1: return clk->read(clk, nv_clk_src_crystal);
-		case 2: return clk->read(clk, nv_clk_src_href);
+		case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+		case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 		case 3: id = 0; break;
 		}
 
-		coef =  nv_rd32(priv, 0x00e81c + (id * 0x28));
-		P    = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+		coef =  nvkm_rd32(device, 0x00e81c + (id * 0x28));
+		P    = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
 		P   += (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -121,10 +122,11 @@
 }
 
 static u32
-read_pll_ref(struct nv50_clk_priv *priv, u32 base)
+read_pll_ref(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 src, mast = nv_rd32(priv, 0x00c040);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 src, mast = nvkm_rd32(device, 0x00c040);
 
 	switch (base) {
 	case 0x004028:
@@ -140,33 +142,33 @@
 		src = !!(mast & 0x02000000);
 		break;
 	case 0x00e810:
-		return clk->read(clk, nv_clk_src_crystal);
+		return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
 	default:
-		nv_error(priv, "bad pll 0x%06x\n", base);
+		nvkm_error(subdev, "bad pll %06x\n", base);
 		return 0;
 	}
 
 	if (src)
-		return clk->read(clk, nv_clk_src_href);
+		return nvkm_clk_read(&clk->base, nv_clk_src_href);
 
-	return read_pll_src(priv, base);
+	return read_pll_src(clk, base);
 }
 
 static u32
-read_pll(struct nv50_clk_priv *priv, u32 base)
+read_pll(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 mast = nv_rd32(priv, 0x00c040);
-	u32 ctrl = nv_rd32(priv, base + 0);
-	u32 coef = nv_rd32(priv, base + 4);
-	u32 ref = read_pll_ref(priv, base);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
+	u32 ctrl = nvkm_rd32(device, base + 0);
+	u32 coef = nvkm_rd32(device, base + 4);
+	u32 ref = read_pll_ref(clk, base);
 	u32 freq = 0;
 	int N1, N2, M1, M2;
 
 	if (base == 0x004028 && (mast & 0x00100000)) {
 		/* wtf, appears to only disable post-divider on gt200 */
-		if (nv_device(priv)->chipset != 0xa0)
-			return clk->read(clk, nv_clk_src_dom6);
+		if (device->chipset != 0xa0)
+			return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
 	}
 
 	N2 = (coef & 0xff000000) >> 24;
@@ -186,71 +188,73 @@
 	return freq;
 }
 
-static int
-nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+int
+nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(priv, 0x00c040);
+	struct nv50_clk *clk = nv50_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
 	u32 P = 0;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /* PCIE reference clock */
 	case nv_clk_src_hclk:
-		return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+		return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000);
 	case nv_clk_src_hclkm3:
-		return clk->read(clk, nv_clk_src_hclk) * 3;
+		return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
 	case nv_clk_src_hclkm3d2:
-		return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+		return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2;
 	case nv_clk_src_host:
 		switch (mast & 0x30000000) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_href);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 		case 0x10000000: break;
 		case 0x20000000: /* !0x50 */
-		case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+		case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
 		}
 		break;
 	case nv_clk_src_core:
 		if (!(mast & 0x00100000))
-			P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+			P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
 		switch (mast & 0x00000003) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
-		case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
-		case 0x00000002: return read_pll(priv, 0x004020) >> P;
-		case 0x00000003: return read_pll(priv, 0x004028) >> P;
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
+		case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
+		case 0x00000002: return read_pll(clk, 0x004020) >> P;
+		case 0x00000003: return read_pll(clk, 0x004028) >> P;
 		}
 		break;
 	case nv_clk_src_shader:
-		P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
 		switch (mast & 0x00000030) {
 		case 0x00000000:
 			if (mast & 0x00000080)
-				return clk->read(clk, nv_clk_src_host) >> P;
-			return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000010: break;
-		case 0x00000020: return read_pll(priv, 0x004028) >> P;
-		case 0x00000030: return read_pll(priv, 0x004020) >> P;
+		case 0x00000020: return read_pll(clk, 0x004028) >> P;
+		case 0x00000030: return read_pll(clk, 0x004020) >> P;
 		}
 		break;
 	case nv_clk_src_mem:
-		P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
-		if (nv_rd32(priv, 0x004008) & 0x00000200) {
+		P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
+		if (nvkm_rd32(device, 0x004008) & 0x00000200) {
 			switch (mast & 0x0000c000) {
 			case 0x00000000:
-				return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 			case 0x00008000:
 			case 0x0000c000:
-				return clk->read(clk, nv_clk_src_href) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
 			}
 		} else {
-			return read_pll(priv, 0x004008) >> P;
+			return read_pll(clk, 0x004008) >> P;
 		}
 		break;
 	case nv_clk_src_vdec:
-		P = (read_div(priv) & 0x00000700) >> 8;
-		switch (nv_device(priv)->chipset) {
+		P = (read_div(clk) & 0x00000700) >> 8;
+		switch (device->chipset) {
 		case 0x84:
 		case 0x86:
 		case 0x92:
@@ -259,51 +263,51 @@
 		case 0xa0:
 			switch (mast & 0x00000c00) {
 			case 0x00000000:
-				if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
-					return clk->read(clk, nv_clk_src_core) >> P;
-				return clk->read(clk, nv_clk_src_crystal) >> P;
+				if (device->chipset == 0xa0) /* wtf?? */
+					return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 			case 0x00000400:
 				return 0;
 			case 0x00000800:
 				if (mast & 0x01000000)
-					return read_pll(priv, 0x004028) >> P;
-				return read_pll(priv, 0x004030) >> P;
+					return read_pll(clk, 0x004028) >> P;
+				return read_pll(clk, 0x004030) >> P;
 			case 0x00000c00:
-				return clk->read(clk, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			}
 			break;
 		case 0x98:
 			switch (mast & 0x00000c00) {
 			case 0x00000000:
-				return clk->read(clk, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			case 0x00000400:
 				return 0;
 			case 0x00000800:
-				return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P;
 			case 0x00000c00:
-				return clk->read(clk, nv_clk_src_mem) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P;
 			}
 			break;
 		}
 		break;
 	case nv_clk_src_dom6:
-		switch (nv_device(priv)->chipset) {
+		switch (device->chipset) {
 		case 0x50:
 		case 0xa0:
-			return read_pll(priv, 0x00e810) >> 2;
+			return read_pll(clk, 0x00e810) >> 2;
 		case 0x84:
 		case 0x86:
 		case 0x92:
 		case 0x94:
 		case 0x96:
 		case 0x98:
-			P = (read_div(priv) & 0x00000007) >> 0;
+			P = (read_div(clk) & 0x00000007) >> 0;
 			switch (mast & 0x0c000000) {
-			case 0x00000000: return clk->read(clk, nv_clk_src_href);
+			case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 			case 0x04000000: break;
-			case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+			case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
 			case 0x0c000000:
-				return clk->read(clk, nv_clk_src_hclkm3) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P;
 			}
 			break;
 		default:
@@ -313,27 +317,27 @@
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return -EINVAL;
 }
 
 static u32
-calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return 0;
 
 	pll.vco2.max_freq = 0;
-	pll.refclk = read_pll_ref(priv, reg);
+	pll.refclk = read_pll_ref(clk, reg);
 	if (!pll.refclk)
 		return 0;
 
-	return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+	return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P);
 }
 
 static inline u32
@@ -360,11 +364,13 @@
 	return ((a / 1000) == (b / 1000));
 }
 
-static int
-nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+int
+nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	struct nv50_clk_hwsq *hwsq = &priv->hwsq;
+	struct nv50_clk *clk = nv50_clk(base);
+	struct nv50_clk_hwsq *hwsq = &clk->hwsq;
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	const int shader = cstate->domain[nv_clk_src_shader];
 	const int core = cstate->domain[nv_clk_src_core];
 	const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -375,7 +381,7 @@
 	int freq, out;
 
 	/* prepare a hwsq script from which we'll perform the reclock */
-	out = clk_init(hwsq, nv_subdev(clk));
+	out = clk_init(hwsq, subdev);
 	if (out)
 		return out;
 
@@ -393,15 +399,15 @@
 		freq = calc_div(core, vdec, &P1);
 
 		/* see how close we can get using xpll/hclk as a source */
-		if (nv_device(priv)->chipset != 0x98)
-			out = read_pll(priv, 0x004030);
+		if (device->chipset != 0x98)
+			out = read_pll(clk, 0x004030);
 		else
-			out = clk->read(clk, nv_clk_src_hclkm3d2);
+			out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2);
 		out = calc_div(out, vdec, &P2);
 
 		/* select whichever gets us closest */
 		if (abs(vdec - freq) <= abs(vdec - out)) {
-			if (nv_device(priv)->chipset != 0x98)
+			if (device->chipset != 0x98)
 				mastv |= 0x00000c00;
 			divsv |= P1 << 8;
 		} else {
@@ -417,14 +423,14 @@
 	 * of the host clock frequency
 	 */
 	if (dom6) {
-		if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+		if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) {
 			mastv |= 0x00000000;
 		} else
-		if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+		if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) {
 			mastv |= 0x08000000;
 		} else {
-			freq = clk->read(clk, nv_clk_src_hclk) * 3;
-			freq = calc_div(freq, dom6, &P1);
+			freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
+			calc_div(freq, dom6, &P1);
 
 			mastv |= 0x0c000000;
 			divsv |= P1;
@@ -444,13 +450,13 @@
 	/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
 	 * sclk to hclk) before reprogramming
 	 */
-	if (nv_device(priv)->chipset < 0x92)
+	if (device->chipset < 0x92)
 		clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
 	else
 		clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
 
 	/* core: for the moment at least, always use nvpll */
-	freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+	freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
 	if (freq == 0)
 		return -ERANGE;
 
@@ -468,7 +474,7 @@
 		clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
 		clk_mask(hwsq, mast, 0x00100033, 0x00000023);
 	} else {
-		freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+		freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
 		if (freq == 0)
 			return -ERANGE;
 
@@ -485,77 +491,71 @@
 	return 0;
 }
 
-static int
-nv50_clk_prog(struct nvkm_clk *clk)
+int
+nv50_clk_prog(struct nvkm_clk *base)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	return clk_exec(&priv->hwsq, true);
+	struct nv50_clk *clk = nv50_clk(base);
+	return clk_exec(&clk->hwsq, true);
 }
 
-static void
-nv50_clk_tidy(struct nvkm_clk *clk)
+void
+nv50_clk_tidy(struct nvkm_clk *base)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	clk_exec(&priv->hwsq, false);
+	struct nv50_clk *clk = nv50_clk(base);
+	clk_exec(&clk->hwsq, false);
 }
 
 int
-nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk **pclk)
 {
-	struct nv50_clk_oclass *pclass = (void *)oclass;
-	struct nv50_clk_priv *priv;
+	struct nv50_clk *clk;
 	int ret;
 
-	ret = nvkm_clk_create(parent, engine, oclass, pclass->domains,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+	*pclk = &clk->base;
 	if (ret)
 		return ret;
 
-	priv->hwsq.r_fifo = hwsq_reg(0x002504);
-	priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
-	priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
-	priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
-	priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
-	switch (nv_device(priv)->chipset) {
+	clk->hwsq.r_fifo = hwsq_reg(0x002504);
+	clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
+	clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
+	clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+	clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+	switch (device->chipset) {
 	case 0x92:
 	case 0x94:
 	case 0x96:
-		priv->hwsq.r_divs = hwsq_reg(0x004800);
+		clk->hwsq.r_divs = hwsq_reg(0x004800);
 		break;
 	default:
-		priv->hwsq.r_divs = hwsq_reg(0x004700);
+		clk->hwsq.r_divs = hwsq_reg(0x004700);
 		break;
 	}
-	priv->hwsq.r_mast = hwsq_reg(0x00c040);
-
-	priv->base.read = nv50_clk_read;
-	priv->base.calc = nv50_clk_calc;
-	priv->base.prog = nv50_clk_prog;
-	priv->base.tidy = nv50_clk_tidy;
+	clk->hwsq.r_mast = hwsq_reg(0x00c040);
 	return 0;
 }
 
-static struct nvkm_domain
-nv50_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+nv50_clk = {
+	.read = nv50_clk_read,
+	.calc = nv50_clk_calc,
+	.prog = nv50_clk_prog,
+	.tidy = nv50_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_max }
+	}
 };
 
-struct nvkm_oclass *
-nv50_clk_oclass = &(struct nv50_clk_oclass) {
-	.base.handle = NV_SUBDEV(CLK, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-	.domains = nv50_domains,
-}.base;
+int
+nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 0ead76a..d3c7fb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,7 +1,9 @@
-#ifndef __NVKM_CLK_NV50_H__
-#define __NVKM_CLK_NV50_H__
+#ifndef __NV50_CLK_H__
+#define __NV50_CLK_H__
+#define nv50_clk(p) container_of((p), struct nv50_clk, base)
+#include "priv.h"
+
 #include <subdev/bus/hwsq.h>
-#include <subdev/clk.h>
 
 struct nv50_clk_hwsq {
 	struct hwsq base;
@@ -12,17 +14,15 @@
 	struct hwsq_reg r_mast;
 };
 
-struct nv50_clk_priv {
+struct nv50_clk {
 	struct nvkm_clk base;
 	struct nv50_clk_hwsq hwsq;
 };
 
-int  nv50_clk_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *, u32,
-		     struct nvkm_object **);
-
-struct nv50_clk_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_domain *domains;
-};
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool, struct nvkm_clk **);
+int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
+int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
+int nv50_clk_prog(struct nvkm_clk *);
+void nv50_clk_tidy(struct nvkm_clk *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
index 783a3e7..c6fccd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
@@ -79,7 +79,7 @@
 	}
 
 	if (unlikely(best_err == ~0)) {
-		nv_error(subdev, "unable to find matching pll values\n");
+		nvkm_error(subdev, "unable to find matching pll values\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
index f229289..5ad6787 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
@@ -37,7 +37,7 @@
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
 	int minM = info->vco1.min_m, maxM = info->vco1.max_m;
 	int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -136,7 +136,7 @@
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	int chip_version = nvkm_bios(subdev)->version.chip;
+	int chip_version = subdev->device->bios->version.chip;
 	int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
 	int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
 	int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
@@ -240,6 +240,6 @@
 	}
 
 	if (!ret)
-		nv_error(subdev, "unable to compute acceptable pll values\n");
+		nvkm_error(subdev, "unable to compute acceptable pll values\n");
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
new file mode 100644
index 0000000..51eafc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_CLK_PRIV_H__
+#define __NVKM_CLK_PRIV_H__
+#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
+#include <subdev/clk.h>
+
+struct nvkm_clk_func {
+	int (*init)(struct nvkm_clk *);
+	void (*fini)(struct nvkm_clk *);
+	int (*read)(struct nvkm_clk *, enum nv_clk_src);
+	int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
+	int (*prog)(struct nvkm_clk *);
+	void (*tidy)(struct nvkm_clk *);
+	struct nvkm_pstate *pstates;
+	int nr_pstates;
+	struct nvkm_domain domains[];
+};
+
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool allow_reclock, struct nvkm_clk *);
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool allow_reclock, struct nvkm_clk **);
+
+int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
+		      struct nvkm_pll_vals *);
+int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index b0d7c5f..5f25402 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -23,74 +23,108 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/vga.h>
 
-int
-_nvkm_devinit_fini(struct nvkm_object *object, bool suspend)
+u32
+nvkm_devinit_mmio(struct nvkm_devinit *init, u32 addr)
 {
-	struct nvkm_devinit *devinit = (void *)object;
-
-	/* force full reinit on resume */
-	if (suspend)
-		devinit->post = true;
-
-	/* unlock the extended vga crtc regs */
-	nv_lockvgac(devinit, false);
-
-	return nvkm_subdev_fini(&devinit->base, suspend);
+	if (init->func->mmio)
+		addr = init->func->mmio(init, addr);
+	return addr;
 }
 
 int
-_nvkm_devinit_init(struct nvkm_object *object)
+nvkm_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 khz)
 {
-	struct nvkm_devinit_impl *impl = (void *)object->oclass;
-	struct nvkm_devinit *devinit = (void *)object;
-	int ret;
-
-	ret = nvkm_subdev_init(&devinit->base);
-	if (ret)
-		return ret;
-
-	ret = impl->post(&devinit->base, devinit->post);
-	if (ret)
-		return ret;
-
-	if (impl->disable)
-		nv_device(devinit)->disable_mask |= impl->disable(devinit);
-	return 0;
+	return init->func->pll_set(init, type, khz);
 }
 
 void
-_nvkm_devinit_dtor(struct nvkm_object *object)
+nvkm_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nvkm_devinit *devinit = (void *)object;
+	if (init->func->meminit)
+		init->func->meminit(init);
+}
 
-	/* lock crtc regs */
-	nv_lockvgac(devinit, true);
-
-	nvkm_subdev_destroy(&devinit->base);
+u64
+nvkm_devinit_disable(struct nvkm_devinit *init)
+{
+	if (init && init->func->disable)
+		return init->func->disable(init);
+	return 0;
 }
 
 int
-nvkm_devinit_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int size, void **pobject)
+nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
 {
-	struct nvkm_devinit_impl *impl = (void *)oclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_devinit *devinit;
-	int ret;
+	int ret = 0;
+	if (init && init->func->post)
+		ret = init->func->post(init, init->post);
+	*disable = nvkm_devinit_disable(init);
+	return ret;
+}
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
-				  "init", size, pobject);
-	devinit = *pobject;
-	if (ret)
-		return ret;
-
-	devinit->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
-	devinit->meminit = impl->meminit;
-	devinit->pll_set = impl->pll_set;
-	devinit->mmio    = impl->mmio;
+static int
+nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	/* force full reinit on resume */
+	if (suspend)
+		init->post = true;
 	return 0;
 }
+
+static int
+nvkm_devinit_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+
+	if (init->func->preinit)
+		init->func->preinit(init);
+
+	/* unlock the extended vga crtc regs */
+	nvkm_lockvgac(subdev->device, false);
+	return 0;
+}
+
+static int
+nvkm_devinit_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	if (init->func->init)
+		init->func->init(init);
+	return 0;
+}
+
+static void *
+nvkm_devinit_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	void *data = init;
+
+	if (init->func->dtor)
+		data = init->func->dtor(init);
+
+	/* lock crtc regs */
+	nvkm_lockvgac(subdev->device, true);
+	return data;
+}
+
+static const struct nvkm_subdev_func
+nvkm_devinit = {
+	.dtor = nvkm_devinit_dtor,
+	.preinit = nvkm_devinit_preinit,
+	.init = nvkm_devinit_init,
+	.fini = nvkm_devinit_fini,
+};
+
+void
+nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit *init)
+{
+	nvkm_subdev_ctor(&nvkm_devinit, device, index, 0, &init->subdev);
+	init->func = func;
+	init->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 36684c3..6c5bbff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -23,7 +23,6 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include <core/device.h>
 #include <subdev/fb/regsnv04.h>
 
 #define NV04_PFB_DEBUG_0					0x00100080
@@ -48,8 +47,8 @@
 static inline struct io_mapping *
 fbmem_init(struct nvkm_device *dev)
 {
-	return io_mapping_create_wc(nv_device_resource_start(dev, 1),
-				    nv_device_resource_len(dev, 1));
+	return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
+				    dev->func->resource_size(dev, 1));
 }
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index ca776ce..e895289 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -27,40 +27,42 @@
 #include <subdev/bios/init.h>
 
 static u64
-g84_devinit_disable(struct nvkm_devinit *devinit)
+g84_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MPEG);
-		disable |= (1ULL << NVDEV_ENGINE_VP);
-		disable |= (1ULL << NVDEV_ENGINE_BSP);
-		disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+		disable |= (1ULL << NVKM_ENGINE_MPEG);
+		disable |= (1ULL << NVKM_ENGINE_VP);
+		disable |= (1ULL << NVKM_ENGINE_BSP);
+		disable |= (1ULL << NVKM_ENGINE_CIPHER);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_BSP);
+		disable |= (1ULL << NVKM_ENGINE_BSP);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+		disable |= (1ULL << NVKM_ENGINE_CIPHER);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-g84_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+g84_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = g84_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+g84_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&g84_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index d29bace..a9d4584 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -27,39 +27,41 @@
 #include <subdev/bios/init.h>
 
 static u64
-g98_devinit_disable(struct nvkm_devinit *devinit)
+g98_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_SEC);
+		disable |= (1ULL << NVKM_ENGINE_SEC);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-g98_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x98),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+g98_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = g98_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+g98_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&g98_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index c61102f..22b0140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -29,19 +29,19 @@
 #include <subdev/clk/pll.h>
 
 int
-gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gf100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct nvbios_pll info;
 	int N, fN, M, P;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, type, &info);
+	ret = nvbios_pll_parse(device->bios, type, &info);
 	if (ret)
 		return ret;
 
-	ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
 	if (ret < 0)
 		return ret;
 
@@ -50,12 +50,12 @@
 	case PLL_VPLL1:
 	case PLL_VPLL2:
 	case PLL_VPLL3:
-		nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
-		nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
-		nv_wr32(priv, info.reg + 0x10, fN << 16);
+		nvkm_mask(device, info.reg + 0x0c, 0x00000000, 0x00000100);
+		nvkm_wr32(device, info.reg + 0x04, (P << 16) | (N << 8) | M);
+		nvkm_wr32(device, info.reg + 0x10, fN << 16);
 		break;
 	default:
-		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
 		ret = -EINVAL;
 		break;
 	}
@@ -64,64 +64,44 @@
 }
 
 static u64
-gf100_devinit_disable(struct nvkm_devinit *devinit)
+gf100_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r022500 = nv_rd32(priv, 0x022500);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r022500 = nvkm_rd32(device, 0x022500);
 	u64 disable = 0ULL;
 
 	if (r022500 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 
 	if (r022500 & 0x00000002) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (r022500 & 0x00000004)
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (r022500 & 0x00000008)
-		disable |= (1ULL << NVDEV_ENGINE_MSENC);
+		disable |= (1ULL << NVKM_ENGINE_MSENC);
 	if (r022500 & 0x00000100)
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 	if (r022500 & 0x00000200)
-		disable |= (1ULL << NVDEV_ENGINE_CE1);
+		disable |= (1ULL << NVKM_ENGINE_CE1);
 
 	return disable;
 }
 
-int
-gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
-{
-	struct nvkm_devinit_impl *impl = (void *)oclass;
-	struct nv50_devinit_priv *priv;
-	u64 disable;
-	int ret;
-
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	disable = impl->disable(&priv->base);
-	if (disable & (1ULL << NVDEV_ENGINE_DISP))
-		priv->base.post = true;
-
-	return 0;
-}
-
-struct nvkm_oclass *
-gf100_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gf100_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gf100_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gf100_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 87ca0ec..2be98bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -27,33 +27,35 @@
 #include <subdev/bios/init.h>
 
 u64
-gm107_devinit_disable(struct nvkm_devinit *devinit)
+gm107_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r021c00 = nv_rd32(priv, 0x021c00);
-	u32 r021c04 = nv_rd32(priv, 0x021c04);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r021c00 = nvkm_rd32(device, 0x021c00);
+	u32 r021c04 = nvkm_rd32(device, 0x021c04);
 	u64 disable = 0ULL;
 
 	if (r021c00 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 	if (r021c00 & 0x00000004)
-		disable |= (1ULL << NVDEV_ENGINE_CE2);
+		disable |= (1ULL << NVKM_ENGINE_CE2);
 	if (r021c04 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gm107_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gm107_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gm107_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index 1076fcf..2b9c3f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -28,69 +28,74 @@
 #include <subdev/bios/pmu.h>
 
 static void
-pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
+pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = init->base.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
+	nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
 	for (i = 0; i < len; i += 4) {
 		if ((i & 0xff) == 0)
-			nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
-		nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
+			nvkm_wr32(device, 0x10a188, (pmu + i) >> 8);
+		nvkm_wr32(device, 0x10a184, nvbios_rd32(bios, img + i));
 	}
 
 	while (i & 0xff) {
-		nv_wr32(priv, 0x10a184, 0x00000000);
+		nvkm_wr32(device, 0x10a184, 0x00000000);
 		i += 4;
 	}
 }
 
 static void
-pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
+pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = init->base.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu);
 	for (i = 0; i < len; i += 4)
-		nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
+		nvkm_wr32(device, 0x10a1c4, nvbios_rd32(bios, img + i));
 }
 
 static u32
-pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
+pmu_args(struct nv50_devinit *init, u32 argp, u32 argi)
 {
-	nv_wr32(priv, 0x10a1c0, argp);
-	nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
-	return nv_rd32(priv, 0x10a1c4);
+	struct nvkm_device *device = init->base.subdev.device;
+	nvkm_wr32(device, 0x10a1c0, argp);
+	nvkm_wr32(device, 0x10a1c0, nvkm_rd32(device, 0x10a1c4) + argi);
+	return nvkm_rd32(device, 0x10a1c4);
 }
 
 static void
-pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
+pmu_exec(struct nv50_devinit *init, u32 init_addr)
 {
-	nv_wr32(priv, 0x10a104, init_addr);
-	nv_wr32(priv, 0x10a10c, 0x00000000);
-	nv_wr32(priv, 0x10a100, 0x00000002);
+	struct nvkm_device *device = init->base.subdev.device;
+	nvkm_wr32(device, 0x10a104, init_addr);
+	nvkm_wr32(device, 0x10a10c, 0x00000000);
+	nvkm_wr32(device, 0x10a100, 0x00000002);
 }
 
 static int
-pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
+pmu_load(struct nv50_devinit *init, u8 type, bool post,
 	 u32 *init_addr_pmu, u32 *args_addr_pmu)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pmuR pmu;
 
 	if (!nvbios_pmuRm(bios, type, &pmu)) {
-		nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
+		nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
 		return -EINVAL;
 	}
 
 	if (!post)
 		return 0;
 
-	pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
-	pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
-	pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
+	pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
+	pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
+	pmu_data(init, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
 
 	if (init_addr_pmu) {
 		*init_addr_pmu = pmu.init_addr_pmu;
@@ -98,75 +103,79 @@
 		return 0;
 	}
 
-	return pmu_exec(priv, pmu.init_addr_pmu), 0;
+	return pmu_exec(init, pmu.init_addr_pmu), 0;
 }
 
 static int
-gm204_devinit_post(struct nvkm_subdev *subdev, bool post)
+gm204_devinit_post(struct nvkm_devinit *base, bool post)
 {
-	struct nv50_devinit_priv *priv = (void *)nvkm_devinit(subdev);
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct bit_entry bit_I;
-	u32 init, args;
+	u32 exec, args;
 	int ret;
 
 	if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
 					    bit_I.length < 0x1c) {
-		nv_error(priv, "VBIOS PMU init data not found\n");
+		nvkm_error(subdev, "VBIOS PMU init data not found\n");
 		return -EINVAL;
 	}
 
 	/* reset PMU and load init table parser ucode */
 	if (post) {
-		nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
-		nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
-		nv_rd32(priv, 0x000200);
-		while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
+		nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+		nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+		nvkm_rd32(device, 0x000200);
+		while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
 		}
 	}
 
-	ret = pmu_load(priv, 0x04, post, &init, &args);
+	ret = pmu_load(init, 0x04, post, &exec, &args);
 	if (ret)
 		return ret;
 
 	/* upload first chunk of init data */
 	if (post) {
-		u32 pmu = pmu_args(priv, args + 0x08, 0x08);
-		u32 img = nv_ro16(bios, bit_I.offset + 0x14);
-		u32 len = nv_ro16(bios, bit_I.offset + 0x16);
-		pmu_data(priv, pmu, img, len);
+		u32 pmu = pmu_args(init, args + 0x08, 0x08);
+		u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
+		u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
+		pmu_data(init, pmu, img, len);
 	}
 
 	/* upload second chunk of init data */
 	if (post) {
-		u32 pmu = pmu_args(priv, args + 0x08, 0x10);
-		u32 img = nv_ro16(bios, bit_I.offset + 0x18);
-		u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
-		pmu_data(priv, pmu, img, len);
+		u32 pmu = pmu_args(init, args + 0x08, 0x10);
+		u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
+		u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
+		pmu_data(init, pmu, img, len);
 	}
 
 	/* execute init tables */
 	if (post) {
-		nv_wr32(priv, 0x10a040, 0x00005000);
-		pmu_exec(priv, init);
-		while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
+		nvkm_wr32(device, 0x10a040, 0x00005000);
+		pmu_exec(init, exec);
+		while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
 		}
 	}
 
 	/* load and execute some other ucode image (bios therm?) */
-	return pmu_load(priv, 0x01, post, NULL, NULL);
+	return pmu_load(init, 0x01, post, NULL, NULL);
 }
 
-struct nvkm_oclass *
-gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gm204_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = gm204_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gm107_devinit_disable,
-	.post = gm204_devinit_post,
-}.base;
+};
+
+int
+gm204_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gm204_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 6a3e8d4..9a8522f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -29,32 +29,32 @@
 #include <subdev/clk/pll.h>
 
 int
-gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gt215_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct nvbios_pll info;
 	int N, fN, M, P;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, type, &info);
+	ret = nvbios_pll_parse(device->bios, type, &info);
 	if (ret)
 		return ret;
 
-	ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
 	if (ret < 0)
 		return ret;
 
 	switch (info.type) {
 	case PLL_VPLL0:
 	case PLL_VPLL1:
-		nv_wr32(priv, info.reg + 0, 0x50000610);
-		nv_mask(priv, info.reg + 4, 0x003fffff,
-					    (P << 16) | (M << 8) | N);
-		nv_wr32(priv, info.reg + 8, fN);
+		nvkm_wr32(device, info.reg + 0, 0x50000610);
+		nvkm_mask(device, info.reg + 4, 0x003fffff,
+						(P << 16) | (M << 8) | N);
+		nvkm_wr32(device, info.reg + 8, fN);
 		break;
 	default:
-		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
 		ret = -EINVAL;
 		break;
 	}
@@ -63,24 +63,24 @@
 }
 
 static u64
-gt215_devinit_disable(struct nvkm_devinit *devinit)
+gt215_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 
 	return disable;
 }
@@ -99,9 +99,10 @@
 };
 
 static u32
-gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
+gt215_devinit_mmio(struct nvkm_devinit *base, u32 addr)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_device *device = init->base.subdev.device;
 	u32 *mmio = gt215_devinit_mmio_part;
 
 	/* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
@@ -113,7 +114,7 @@
 	 *
 	 * the binary driver avoids touching these registers at all, however,
 	 * the video bios doesn't care and does what the scripts say.  it's
-	 * presumed that the io-port access to priv registers isn't effected
+	 * presumed that the io-port access to init registers isn't effected
 	 * by the screw-up bug mentioned above.
 	 *
 	 * really, a new opcode should've been invented to handle these
@@ -122,9 +123,9 @@
 	while (mmio[0]) {
 		if (addr >= mmio[0] && addr <= mmio[1]) {
 			u32 part = (addr / mmio[2]) & 7;
-			if (!priv->r001540)
-				priv->r001540 = nv_rd32(priv, 0x001540);
-			if (part >= hweight8((priv->r001540 >> 16) & 0xff))
+			if (!init->r001540)
+				init->r001540 = nvkm_rd32(device, 0x001540);
+			if (part >= hweight8((init->r001540 >> 16) & 0xff))
 				return ~0;
 			return addr;
 		}
@@ -134,17 +135,19 @@
 	return addr;
 }
 
-struct nvkm_oclass *
-gt215_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gt215_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
+	.mmio = gt215_devinit_mmio,
 	.pll_set = gt215_devinit_pll_set,
 	.disable = gt215_devinit_disable,
-	.mmio    = gt215_devinit_mmio,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gt215_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index 55cf48b..ce4f718 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -27,40 +27,42 @@
 #include <subdev/bios/init.h>
 
 static u64
-mcp89_devinit_disable(struct nvkm_devinit *devinit)
+mcp89_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_VIC);
+		disable |= (1ULL << NVKM_ENGINE_VIC);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-mcp89_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xaf),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+mcp89_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gt215_devinit_pll_set,
 	.disable = mcp89_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+mcp89_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 03a0da8..c8d4553 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -33,25 +33,26 @@
 #include <subdev/vga.h>
 
 static void
-nv04_devinit_meminit(struct nvkm_devinit *devinit)
+nv04_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
 	/* Sequencer and refresh off */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
-	nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
 
-	nv_mask(priv, NV04_PFB_BOOT_0, ~0,
+	nvkm_mask(device, NV04_PFB_BOOT_0, ~0,
 		      NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
 		      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 		      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
@@ -62,49 +63,49 @@
 	fbmem_poke(fb, 0x400000, patt + 1);
 
 	if (fbmem_peek(fb, 0) == patt + 1) {
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			      NV04_PFB_BOOT_0_RAM_TYPE,
 			      NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
-		nv_mask(priv, NV04_PFB_DEBUG_0,
+		nvkm_mask(device, NV04_PFB_DEBUG_0,
 			      NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
 
 		for (i = 0; i < 4; i++)
 			fbmem_poke(fb, 4 * i, patt);
 
 		if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 	} else
 	if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 			      NV04_PFB_BOOT_0_RAM_AMOUNT,
 			      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 	} else
 	if (fbmem_peek(fb, 0) != patt) {
 		if (fbmem_readback(fb, 0x800000, patt))
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 		else
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
 			      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
 	} else
 	if (!fbmem_readback(fb, 0x800000, patt)) {
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 
 	}
 
 	/* Refresh on, sequencer on */
-	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
 	fbmem_fini(fb);
 }
 
@@ -139,11 +140,12 @@
 }
 
 void
-setPLL_single(struct nvkm_devinit *devinit, u32 reg,
+setPLL_single(struct nvkm_devinit *init, u32 reg,
 	      struct nvkm_pll_vals *pv)
 {
-	int chip_version = nvkm_bios(devinit)->version.chip;
-	uint32_t oldpll = nv_rd32(devinit, reg);
+	struct nvkm_device *device = init->subdev.device;
+	int chip_version = device->bios->version.chip;
+	uint32_t oldpll = nvkm_rd32(device, reg);
 	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
 	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
 	uint32_t saved_powerctrl_1 = 0;
@@ -153,30 +155,30 @@
 		return;	/* already set */
 
 	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
-		nv_wr32(devinit, 0x001584,
+		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+		nvkm_wr32(device, 0x001584,
 			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
 			1 << shift_powerctrl_1);
 	}
 
 	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
 		/* upclock -- write new post divider first */
-		nv_wr32(devinit, reg, pv->log2P << 16 | (oldpll & 0xffff));
+		nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff));
 	else
 		/* downclock -- write new NM first */
-		nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
+		nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1);
 
 	if ((chip_version < 0x17 || chip_version == 0x1a) &&
 	    chip_version != 0x11)
 		/* wait a bit on older chips */
 		msleep(64);
-	nv_rd32(devinit, reg);
+	nvkm_rd32(device, reg);
 
 	/* then write the other half as well */
-	nv_wr32(devinit, reg, pll);
+	nvkm_wr32(device, reg, pll);
 
 	if (shift_powerctrl_1 >= 0)
-		nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
 }
 
 static uint32_t
@@ -193,14 +195,15 @@
 }
 
 void
-setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
+setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
 		       struct nvkm_pll_vals *pv)
 {
-	int chip_version = nvkm_bios(devinit)->version.chip;
+	struct nvkm_device *device = init->subdev.device;
+	int chip_version = device->bios->version.chip;
 	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
 	uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
-	uint32_t oldpll1 = nv_rd32(devinit, reg1);
-	uint32_t oldpll2 = !nv3035 ? nv_rd32(devinit, reg2) : 0;
+	uint32_t oldpll1 = nvkm_rd32(device, reg1);
+	uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0;
 	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
 	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
 	uint32_t oldramdac580 = 0, ramdac580 = 0;
@@ -215,7 +218,7 @@
 		pll2 = 0;
 	}
 	if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
-		oldramdac580 = nv_rd32(devinit, 0x680580);
+		oldramdac580 = nvkm_rd32(device, 0x680580);
 		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
 		if (oldramdac580 != ramdac580)
 			oldpll1 = ~0;	/* force mismatch */
@@ -231,8 +234,8 @@
 		return;	/* already set */
 
 	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
-		nv_wr32(devinit, 0x001584,
+		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+		nvkm_wr32(device, 0x001584,
 			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
 			1 << shift_powerctrl_1);
 	}
@@ -251,26 +254,26 @@
 			shift_c040 += 2;
 		}
 
-		savedc040 = nv_rd32(devinit, 0xc040);
+		savedc040 = nvkm_rd32(device, 0xc040);
 		if (shift_c040 != 14)
-			nv_wr32(devinit, 0xc040, savedc040 & ~(3 << shift_c040));
+			nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040));
 	}
 
 	if (oldramdac580 != ramdac580)
-		nv_wr32(devinit, 0x680580, ramdac580);
+		nvkm_wr32(device, 0x680580, ramdac580);
 
 	if (!nv3035)
-		nv_wr32(devinit, reg2, pll2);
-	nv_wr32(devinit, reg1, pll1);
+		nvkm_wr32(device, reg2, pll2);
+	nvkm_wr32(device, reg1, pll1);
 
 	if (shift_powerctrl_1 >= 0)
-		nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
 	if (chip_version >= 0x40)
-		nv_wr32(devinit, 0xc040, savedc040);
+		nvkm_wr32(device, 0xc040, savedc040);
 }
 
 void
-setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
+setPLL_double_lowregs(struct nvkm_devinit *init, u32 NMNMreg,
 		      struct nvkm_pll_vals *pv)
 {
 	/* When setting PLLs, there is a merry game of disabling and enabling
@@ -280,10 +283,10 @@
 	 * combined herein. Without luck it deviates from each card's formula
 	 * so as to not work on any :)
 	 */
-
+	struct nvkm_device *device = init->subdev.device;
 	uint32_t Preg = NMNMreg - 4;
 	bool mpll = Preg == 0x4020;
-	uint32_t oldPval = nv_rd32(devinit, Preg);
+	uint32_t oldPval = nvkm_rd32(device, Preg);
 	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
 	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
 			0xc << 28 | pv->log2P << 16;
@@ -292,7 +295,7 @@
 	uint32_t maskc040 = ~(3 << 14), savedc040;
 	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
 
-	if (nv_rd32(devinit, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+	if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
 		return;
 
 	if (Preg == 0x4000)
@@ -304,7 +307,7 @@
 		struct nvbios_pll info;
 		uint8_t Pval2;
 
-		if (nvbios_pll_parse(nvkm_bios(devinit), Preg, &info))
+		if (nvbios_pll_parse(device->bios, Preg, &info))
 			return;
 
 		Pval2 = pv->log2P + info.bias_p;
@@ -312,47 +315,48 @@
 			Pval2 = info.max_p;
 		Pval |= 1 << 28 | Pval2 << 20;
 
-		saved4600 = nv_rd32(devinit, 0x4600);
-		nv_wr32(devinit, 0x4600, saved4600 | 8 << 28);
+		saved4600 = nvkm_rd32(device, 0x4600);
+		nvkm_wr32(device, 0x4600, saved4600 | 8 << 28);
 	}
 	if (single_stage)
 		Pval |= mpll ? 1 << 12 : 1 << 8;
 
-	nv_wr32(devinit, Preg, oldPval | 1 << 28);
-	nv_wr32(devinit, Preg, Pval & ~(4 << 28));
+	nvkm_wr32(device, Preg, oldPval | 1 << 28);
+	nvkm_wr32(device, Preg, Pval & ~(4 << 28));
 	if (mpll) {
 		Pval |= 8 << 20;
-		nv_wr32(devinit, 0x4020, Pval & ~(0xc << 28));
-		nv_wr32(devinit, 0x4038, Pval & ~(0xc << 28));
+		nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28));
+		nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28));
 	}
 
-	savedc040 = nv_rd32(devinit, 0xc040);
-	nv_wr32(devinit, 0xc040, savedc040 & maskc040);
+	savedc040 = nvkm_rd32(device, 0xc040);
+	nvkm_wr32(device, 0xc040, savedc040 & maskc040);
 
-	nv_wr32(devinit, NMNMreg, NMNM);
+	nvkm_wr32(device, NMNMreg, NMNM);
 	if (NMNMreg == 0x4024)
-		nv_wr32(devinit, 0x403c, NMNM);
+		nvkm_wr32(device, 0x403c, NMNM);
 
-	nv_wr32(devinit, Preg, Pval);
+	nvkm_wr32(device, Preg, Pval);
 	if (mpll) {
 		Pval &= ~(8 << 20);
-		nv_wr32(devinit, 0x4020, Pval);
-		nv_wr32(devinit, 0x4038, Pval);
-		nv_wr32(devinit, 0x4600, saved4600);
+		nvkm_wr32(device, 0x4020, Pval);
+		nvkm_wr32(device, 0x4038, Pval);
+		nvkm_wr32(device, 0x4600, saved4600);
 	}
 
-	nv_wr32(devinit, 0xc040, savedc040);
+	nvkm_wr32(device, 0xc040, savedc040);
 
 	if (mpll) {
-		nv_wr32(devinit, 0x4020, Pval & ~(1 << 28));
-		nv_wr32(devinit, 0x4038, Pval & ~(1 << 28));
+		nvkm_wr32(device, 0x4020, Pval & ~(1 << 28));
+		nvkm_wr32(device, 0x4038, Pval & ~(1 << 28));
 	}
 }
 
 int
 nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(devinit);
+	struct nvkm_subdev *subdev = &devinit->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvkm_pll_vals pv;
 	struct nvbios_pll info;
 	int cv = bios->version.chip;
@@ -363,8 +367,7 @@
 	if (ret)
 		return ret;
 
-	ret = nv04_pll_calc(nv_subdev(devinit), &info, freq,
-			    &N1, &M1, &N2, &M2, &P);
+	ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
 	if (!ret)
 		return -EINVAL;
 
@@ -388,83 +391,76 @@
 }
 
 int
-nv04_devinit_fini(struct nvkm_object *object, bool suspend)
+nv04_devinit_post(struct nvkm_devinit *init, bool execute)
 {
-	struct nv04_devinit_priv *priv = (void *)object;
-	int ret;
-
-	/* make i2c busses accessible */
-	nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
-
-	ret = nvkm_devinit_fini(&priv->base, suspend);
-	if (ret)
-		return ret;
-
-	/* unslave crtcs */
-	if (priv->owner < 0)
-		priv->owner = nv_rdvgaowner(priv);
-	nv_wrvgaowner(priv, 0);
-	return 0;
-}
-
-int
-nv04_devinit_init(struct nvkm_object *object)
-{
-	struct nv04_devinit_priv *priv = (void *)object;
-
-	if (!priv->base.post) {
-		u32 htotal = nv_rdvgac(priv, 0, 0x06);
-		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
-		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
-		htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
-		htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
-		if (!htotal) {
-			nv_info(priv, "adaptor not initialised\n");
-			priv->base.post = true;
-		}
-	}
-
-	return nvkm_devinit_init(&priv->base);
+	return nvbios_init(&init->subdev, execute);
 }
 
 void
-nv04_devinit_dtor(struct nvkm_object *object)
+nv04_devinit_preinit(struct nvkm_devinit *base)
 {
-	struct nv04_devinit_priv *priv = (void *)object;
+	struct nv04_devinit *init = nv04_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
+	/* make i2c busses accessible */
+	nvkm_mask(device, 0x000200, 0x00000001, 0x00000001);
+
+	/* unslave crtcs */
+	if (init->owner < 0)
+		init->owner = nvkm_rdvgaowner(device);
+	nvkm_wrvgaowner(device, 0);
+
+	if (!init->base.post) {
+		u32 htotal = nvkm_rdvgac(device, 0, 0x06);
+		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8;
+		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4;
+		htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10;
+		htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11;
+		if (!htotal) {
+			nvkm_debug(subdev, "adaptor not initialised\n");
+			init->base.post = true;
+		}
+	}
+}
+
+void *
+nv04_devinit_dtor(struct nvkm_devinit *base)
+{
+	struct nv04_devinit *init = nv04_devinit(base);
 	/* restore vga owner saved at first init */
-	nv_wrvgaowner(priv, priv->owner);
-
-	nvkm_devinit_destroy(&priv->base);
+	nvkm_wrvgaowner(init->base.subdev.device, init->owner);
+	return init;
 }
 
 int
-nv04_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_devinit_new_(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit **pinit)
 {
-	struct nv04_devinit_priv *priv;
-	int ret;
+	struct nv04_devinit *init;
 
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+		return -ENOMEM;
+	*pinit = &init->base;
 
-	priv->owner = -1;
+	nvkm_devinit_ctor(func, device, index, &init->base);
+	init->owner = -1;
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv04_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv04_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv04_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 7c63abf..4a87c8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,19 +1,19 @@
-#ifndef __NVKM_DEVINIT_NV04_H__
-#define __NVKM_DEVINIT_NV04_H__
+#ifndef __NV04_DEVINIT_H__
+#define __NV04_DEVINIT_H__
+#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
 #include "priv.h"
 struct nvkm_pll_vals;
 
-struct nv04_devinit_priv {
+struct nv04_devinit {
 	struct nvkm_devinit base;
 	int owner;
 };
 
-int  nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void nv04_devinit_dtor(struct nvkm_object *);
-int  nv04_devinit_init(struct nvkm_object *);
-int  nv04_devinit_fini(struct nvkm_object *, bool);
+int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+		      int, struct nvkm_devinit **);
+void *nv04_devinit_dtor(struct nvkm_devinit *);
+void nv04_devinit_preinit(struct nvkm_devinit *);
+void nv04_devinit_fini(struct nvkm_devinit *);
 int  nv04_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 void setPLL_single(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index def8649..9891ead 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -32,7 +32,7 @@
 #include <subdev/vga.h>
 
 static void
-nv05_devinit_meminit(struct nvkm_devinit *devinit)
+nv05_devinit_meminit(struct nvkm_devinit *init)
 {
 	static const u8 default_config_tab[][2] = {
 		{ 0x24, 0x00 },
@@ -44,8 +44,9 @@
 		{ 0x06, 0x00 },
 		{ 0x00, 0x00 }
 	};
-	struct nv04_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct io_mapping *fb;
 	u32 patt = 0xdeadbeef;
 	u16 data;
@@ -53,88 +54,90 @@
 	int i, v;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
+	strap = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
 	if ((data = bmp_mem_init_table(bios))) {
-		ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
-		ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
+		ramcfg[0] = nvbios_rd08(bios, data + 2 * strap + 0);
+		ramcfg[1] = nvbios_rd08(bios, data + 2 * strap + 1);
 	} else {
 		ramcfg[0] = default_config_tab[strap][0];
 		ramcfg[1] = default_config_tab[strap][1];
 	}
 
 	/* Sequencer off */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
 
-	if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+	if (nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
 		goto out;
 
-	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
 
 	/* If present load the hardcoded scrambling table */
 	if (data) {
 		for (i = 0, data += 0x10; i < 8; i++, data += 4) {
-			u32 scramble = nv_ro32(bios, data);
-			nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
+			u32 scramble = nvbios_rd32(bios, data);
+			nvkm_wr32(device, NV04_PFB_SCRAMBLE(i), scramble);
 		}
 	}
 
 	/* Set memory type/width/length defaults depending on the straps */
-	nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+	nvkm_mask(device, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
 
 	if (ramcfg[1] & 0x80)
-		nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+		nvkm_mask(device, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
 
-	nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
-	nv_mask(priv, NV04_PFB_CFG1, 0, 1);
+	nvkm_mask(device, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+	nvkm_mask(device, NV04_PFB_CFG1, 0, 1);
 
 	/* Probe memory bus width */
 	for (i = 0; i < 4; i++)
 		fbmem_poke(fb, 4 * i, patt);
 
 	if (fbmem_peek(fb, 0xc) != patt)
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			  NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
 
 	/* Probe memory length */
-	v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+	v = nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
 
 	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
 	    (!fbmem_readback(fb, 0x1000000, ++patt) ||
 	     !fbmem_readback(fb, 0, ++patt)))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
 
 	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
 	    !fbmem_readback(fb, 0x800000, ++patt))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 
 	if (!fbmem_readback(fb, 0x400000, ++patt))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 
 out:
 	/* Sequencer on */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv05_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x05),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv05_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv05_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv05_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 7aabc1b..570822f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -30,33 +30,33 @@
 #include <subdev/bios/init.h>
 
 static void
-nv10_devinit_meminit(struct nvkm_devinit *devinit)
+nv10_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	static const int mem_width[] = { 0x10, 0x00, 0x20 };
 	int mem_width_count;
 	uint32_t patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i, j, k;
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17)
+	if (device->card_type >= NV_11 && device->chipset >= 0x17)
 		mem_width_count = 3;
 	else
 		mem_width_count = 2;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+	nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
 
 	/* Probe memory bus width */
 	for (i = 0; i < mem_width_count; i++) {
-		nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
+		nvkm_mask(device, NV04_PFB_CFG0, 0x30, mem_width[i]);
 
 		for (j = 0; j < 4; j++) {
 			for (k = 0; k < 4; k++)
@@ -75,7 +75,7 @@
 
 	/* Probe amount of installed memory */
 	for (i = 0; i < 4; i++) {
-		int off = nv_rd32(priv, 0x10020c) - 0x100000;
+		int off = nvkm_rd32(device, 0x10020c) - 0x100000;
 
 		fbmem_poke(fb, off, patt);
 		fbmem_poke(fb, 0, 0);
@@ -90,22 +90,24 @@
 	}
 
 	/* IC missing - disable the upper half memory space. */
-	nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
+	nvkm_mask(device, NV04_PFB_CFG0, 0x1000, 0);
 
 amount_found:
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv10_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x10),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv10_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv10_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv10_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index 9f36fff..fefafec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -26,15 +26,17 @@
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
 
-struct nvkm_oclass *
-nv1a_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x1a),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv1a_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv1a_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 02fcfd9..4ef04e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -30,48 +30,50 @@
 #include <subdev/bios/init.h>
 
 static void
-nv20_devinit_meminit(struct nvkm_devinit *devinit)
+nv20_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
 	uint32_t amount, off;
 	struct io_mapping *fb;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+	nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
 
 	/* Allow full addressing */
-	nv_mask(priv, NV04_PFB_CFG0, 0, mask);
+	nvkm_mask(device, NV04_PFB_CFG0, 0, mask);
 
-	amount = nv_rd32(priv, 0x10020c);
+	amount = nvkm_rd32(device, 0x10020c);
 	for (off = amount; off > 0x2000000; off -= 0x2000000)
 		fbmem_poke(fb, off - 4, off);
 
-	amount = nv_rd32(priv, 0x10020c);
+	amount = nvkm_rd32(device, 0x10020c);
 	if (amount != fbmem_peek(fb, amount - 4))
 		/* IC missing - disable the upper half memory space. */
-		nv_mask(priv, NV04_PFB_CFG0, mask, 0);
+		nvkm_mask(device, NV04_PFB_CFG0, mask, 0);
 
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv20_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x20),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv20_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv20_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv20_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 26b7cb1..337c2c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -29,47 +29,48 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clk/pll.h>
-#include <subdev/ibus.h>
 #include <subdev/vga.h>
 
 int
-nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_pll info;
 	int N1, M1, N2, M2, P;
 	int ret;
 
 	ret = nvbios_pll_parse(bios, type, &info);
 	if (ret) {
-		nv_error(devinit, "failed to retrieve pll data, %d\n", ret);
+		nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret);
 		return ret;
 	}
 
-	ret = nv04_pll_calc(nv_subdev(devinit), &info, freq, &N1, &M1, &N2, &M2, &P);
+	ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
 	if (!ret) {
-		nv_error(devinit, "failed pll calculation\n");
+		nvkm_error(subdev, "failed pll calculation\n");
 		return ret;
 	}
 
 	switch (info.type) {
 	case PLL_VPLL0:
 	case PLL_VPLL1:
-		nv_wr32(priv, info.reg + 0, 0x10000611);
-		nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
-		nv_mask(priv, info.reg + 8, 0x7fff00ff, (P  << 28) |
-							(M2 << 16) | N2);
+		nvkm_wr32(device, info.reg + 0, 0x10000611);
+		nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
+		nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P  << 28) |
+							    (M2 << 16) | N2);
 		break;
 	case PLL_MEMORY:
-		nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
-						        (info.bias_p << 19) |
-							(P << 16));
-		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		nvkm_mask(device, info.reg + 0, 0x01ff0000,
+					        (P << 22) |
+						(info.bias_p << 19) |
+						(P << 16));
+		nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
 		break;
 	default:
-		nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
-		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16));
+		nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
 		break;
 	}
 
@@ -77,57 +78,68 @@
 }
 
 static u64
-nv50_devinit_disable(struct nvkm_devinit *devinit)
+nv50_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000))
-		disable |= (1ULL << NVDEV_ENGINE_MPEG);
+		disable |= (1ULL << NVKM_ENGINE_MPEG);
 
 	return disable;
 }
 
-int
-nv50_devinit_init(struct nvkm_object *object)
+void
+nv50_devinit_preinit(struct nvkm_devinit *base)
 {
-	struct nvkm_bios *bios = nvkm_bios(object);
-	struct nvkm_ibus *ibus = nvkm_ibus(object);
-	struct nv50_devinit_priv *priv = (void *)object;
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* our heuristics can't detect whether the board has had its
+	 * devinit scripts executed or not if the display engine is
+	 * missing, assume it's a secondary gpu which requires post
+	 */
+	if (!init->base.post) {
+		u64 disable = nvkm_devinit_disable(&init->base);
+		if (disable & (1ULL << NVKM_ENGINE_DISP))
+			init->base.post = true;
+	}
+
+	/* magic to detect whether or not x86 vbios code has executed
+	 * the devinit scripts to initialise the board
+	 */
+	if (!init->base.post) {
+		if (!nvkm_rdvgac(device, 0, 0x00) &&
+		    !nvkm_rdvgac(device, 0, 0x1a)) {
+			nvkm_debug(subdev, "adaptor not initialised\n");
+			init->base.post = true;
+		}
+	}
+}
+
+void
+nv50_devinit_init(struct nvkm_devinit *base)
+{
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_outp info;
 	struct dcb_output outp;
 	u8  ver = 0xff, hdr, cnt, len;
-	int ret, i = 0;
-
-	if (!priv->base.post) {
-		if (!nv_rdvgac(priv, 0, 0x00) &&
-		    !nv_rdvgac(priv, 0, 0x1a)) {
-			nv_info(priv, "adaptor not initialised\n");
-			priv->base.post = true;
-		}
-	}
-
-	/* some boards appear to require certain priv register timeouts
-	 * to be bumped before runing devinit scripts.  not a clue why
-	 * the vbios engineers didn't make the scripts just work...
-	 */
-	if (priv->base.post && ibus)
-		nv_ofuncs(ibus)->init(nv_object(ibus));
-
-	ret = nvkm_devinit_init(&priv->base);
-	if (ret)
-		return ret;
+	int i = 0;
 
 	/* if we ran the init tables, we have to execute the first script
 	 * pointer of each dcb entry's display encoder table in order
 	 * to properly initialise each encoder.
 	 */
-	while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+	while (init->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
 		if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
 				      &ver, &hdr, &cnt, &len, &info)) {
-			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
+			struct nvbios_init exec = {
+				.subdev = subdev,
 				.bios = bios,
 				.offset = info.script[0],
 				.outp = &outp,
@@ -135,40 +147,39 @@
 				.execute = 1,
 			};
 
-			nvbios_exec(&init);
+			nvbios_exec(&exec);
 		}
 		i++;
 	}
-
-	return 0;
 }
 
 int
-nv50_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv50_devinit_new_(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit **pinit)
 {
-	struct nv50_devinit_priv *priv;
-	int ret;
+	struct nv50_devinit *init;
 
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+		return -ENOMEM;
+	*pinit = &init->base;
 
+	nvkm_devinit_ctor(func, device, index, &init->base);
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv50_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = nv50_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv50_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9243521c..5de70a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,16 +1,17 @@
-#ifndef __NVKM_DEVINIT_NV50_H__
-#define __NVKM_DEVINIT_NV50_H__
+#ifndef __NV50_DEVINIT_H__
+#define __NV50_DEVINIT_H__
+#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
 #include "priv.h"
 
-struct nv50_devinit_priv {
+struct nv50_devinit {
 	struct nvkm_devinit base;
 	u32 r001540;
 };
 
-int  nv50_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-int  nv50_devinit_init(struct nvkm_object *);
+int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+		      int, struct nvkm_devinit **);
+void nv50_devinit_preinit(struct nvkm_devinit *);
+void nv50_devinit_init(struct nvkm_devinit *);
 int  nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 int  gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index bb51a95..e1f6ae5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,34 +1,21 @@
 #ifndef __NVKM_DEVINIT_PRIV_H__
 #define __NVKM_DEVINIT_PRIV_H__
+#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
 #include <subdev/devinit.h>
 
-struct nvkm_devinit_impl {
-	struct nvkm_oclass base;
+struct nvkm_devinit_func {
+	void *(*dtor)(struct nvkm_devinit *);
+	void (*preinit)(struct nvkm_devinit *);
+	void (*init)(struct nvkm_devinit *);
+	int  (*post)(struct nvkm_devinit *, bool post);
+	u32  (*mmio)(struct nvkm_devinit *, u32);
 	void (*meminit)(struct nvkm_devinit *);
 	int  (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
 	u64  (*disable)(struct nvkm_devinit *);
-	u32  (*mmio)(struct nvkm_devinit *, u32);
-	int  (*post)(struct nvkm_subdev *, bool);
 };
 
-#define nvkm_devinit_create(p,e,o,d)                                        \
-	nvkm_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_devinit_destroy(p) ({                                          \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_dtor(nv_object(d));                                   \
-})
-#define nvkm_devinit_init(p) ({                                             \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_init(nv_object(d));                                   \
-})
-#define nvkm_devinit_fini(p,s) ({                                           \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_fini(nv_object(d), (s));                              \
-})
+void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
+		       int index, struct nvkm_devinit *);
 
-int nvkm_devinit_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-void _nvkm_devinit_dtor(struct nvkm_object *);
-int _nvkm_devinit_init(struct nvkm_object *);
-int _nvkm_devinit_fini(struct nvkm_object *, bool suspend);
+int nv04_devinit_post(struct nvkm_devinit *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d6be4c6c..0810570 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -23,6 +23,8 @@
 nvkm-y += nvkm/subdev/fb/gk104.o
 nvkm-y += nvkm/subdev/fb/gk20a.o
 nvkm-y += nvkm/subdev/fb/gm107.o
+
+nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
 nvkm-y += nvkm/subdev/fb/ramnv10.o
 nvkm-y += nvkm/subdev/fb/ramnv1a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 61fde43..a719b9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -22,144 +22,151 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include "ram.h"
 
 #include <subdev/bios.h>
 #include <subdev/bios/M0203.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+
+bool
+nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
+{
+	return fb->func->memtype_valid(fb, memtype);
+}
+
+void
+nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+	fb->func->tile.fini(fb, region, tile);
+}
+
+void
+nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
+		  u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
+{
+	fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
+}
+
+void
+nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	if (fb->func->tile.prog) {
+		fb->func->tile.prog(fb, region, tile);
+		if (device->gr)
+			nvkm_engine_tile(&device->gr->engine, region);
+		if (device->mpeg)
+			nvkm_engine_tile(device->mpeg, region);
+	}
+}
 
 int
 nvkm_fb_bios_memtype(struct nvkm_bios *bios)
 {
-	const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
 	struct nvbios_M0203E M0203E;
 	u8 ver, hdr;
 
 	if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
 		switch (M0203E.type) {
-		case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
-		case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
-		case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
-		case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
+		case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
+		case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
+		case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
+		case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
 		default:
-			nv_warn(bios, "M0203E type %02x\n", M0203E.type);
-			return NV_MEM_TYPE_UNKNOWN;
+			nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
+			return NVKM_RAM_TYPE_UNKNOWN;
 		}
 	}
 
-	nv_warn(bios, "M0203E not matched!\n");
-	return NV_MEM_TYPE_UNKNOWN;
+	nvkm_warn(subdev, "M0203E not matched!\n");
+	return NVKM_RAM_TYPE_UNKNOWN;
 }
 
-int
-_nvkm_fb_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_fb_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
-	int ret;
-
-	if (pfb->ram) {
-		ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
-		if (ret && suspend)
-			return ret;
-	}
-
-	return nvkm_subdev_fini(&pfb->base, suspend);
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	if (fb->func->intr)
+		fb->func->intr(fb);
 }
 
-int
-_nvkm_fb_init(struct nvkm_object *object)
+static int
+nvkm_fb_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
-	int ret, i;
-
-	ret = nvkm_subdev_init(&pfb->base);
-	if (ret)
-		return ret;
-
-	if (pfb->ram) {
-		ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
-		if (ret)
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	if (fb->func->ram_new) {
+		int ret = fb->func->ram_new(fb, &fb->ram);
+		if (ret) {
+			nvkm_error(subdev, "vram setup failed, %d\n", ret);
 			return ret;
+		}
 	}
-
-	for (i = 0; i < pfb->tile.regions; i++)
-		pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
-
 	return 0;
 }
 
-void
-_nvkm_fb_dtor(struct nvkm_object *object)
+static int
+nvkm_fb_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
-	int i;
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	int ret, i;
 
-	for (i = 0; i < pfb->tile.regions; i++)
-		pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-	nvkm_mm_fini(&pfb->tags);
-
-	if (pfb->ram) {
-		nvkm_mm_fini(&pfb->vram);
-		nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
+	if (fb->ram) {
+		ret = nvkm_ram_init(fb->ram);
+		if (ret)
+			return ret;
 	}
 
-	nvkm_subdev_destroy(&pfb->base);
+	for (i = 0; i < fb->tile.regions; i++)
+		fb->func->tile.prog(fb, i, &fb->tile.region[i]);
+
+	if (fb->func->init)
+		fb->func->init(fb);
+	return 0;
+}
+
+static void *
+nvkm_fb_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	int i;
+
+	for (i = 0; i < fb->tile.regions; i++)
+		fb->func->tile.fini(fb, i, &fb->tile.region[i]);
+
+	nvkm_ram_del(&fb->ram);
+
+	if (fb->func->dtor)
+		return fb->func->dtor(fb);
+	return fb;
+}
+
+static const struct nvkm_subdev_func
+nvkm_fb = {
+	.dtor = nvkm_fb_dtor,
+	.oneinit = nvkm_fb_oneinit,
+	.init = nvkm_fb_init,
+	.intr = nvkm_fb_intr,
+};
+
+void
+nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb *fb)
+{
+	nvkm_subdev_ctor(&nvkm_fb, device, index, 0, &fb->subdev);
+	fb->func = func;
+	fb->tile.regions = fb->func->tile.regions;
 }
 
 int
-nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb **pfb)
 {
-	struct nvkm_fb_impl *impl = (void *)oclass;
-	static const char *name[] = {
-		[NV_MEM_TYPE_UNKNOWN] = "unknown",
-		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
-		[NV_MEM_TYPE_SGRAM  ] = "SGRAM",
-		[NV_MEM_TYPE_SDRAM  ] = "SDRAM",
-		[NV_MEM_TYPE_DDR1   ] = "DDR1",
-		[NV_MEM_TYPE_DDR2   ] = "DDR2",
-		[NV_MEM_TYPE_DDR3   ] = "DDR3",
-		[NV_MEM_TYPE_GDDR2  ] = "GDDR2",
-		[NV_MEM_TYPE_GDDR3  ] = "GDDR3",
-		[NV_MEM_TYPE_GDDR4  ] = "GDDR4",
-		[NV_MEM_TYPE_GDDR5  ] = "GDDR5",
-	};
-	struct nvkm_object *ram;
-	struct nvkm_fb *pfb;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
-				  length, pobject);
-	pfb = *pobject;
-	if (ret)
-		return ret;
-
-	pfb->memtype_valid = impl->memtype;
-
-	if (!impl->ram)
-		return 0;
-
-	ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
-	if (ret) {
-		nv_fatal(pfb, "error detecting memory configuration!!\n");
-		return ret;
-	}
-
-	pfb->ram = (void *)ram;
-
-	if (!nvkm_mm_initialised(&pfb->vram)) {
-		ret = nvkm_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1);
-		if (ret)
-			return ret;
-	}
-
-	if (!nvkm_mm_initialised(&pfb->tags)) {
-		ret = nvkm_mm_init(&pfb->tags, 0, pfb->ram->tags ?
-				   ++pfb->ram->tags : 0, 1);
-		if (ret)
-			return ret;
-	}
-
-	nv_info(pfb, "RAM type: %s\n", name[pfb->ram->type]);
-	nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram->size >> 20));
-	nv_info(pfb, "   ZCOMP: %d tags\n", pfb->ram->tags);
+	if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(func, device, index, *pfb);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 6c968d1..9c28392 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-g84_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x84),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+g84_fb = {
+	.ram_new = nv50_ram_new,
 	.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&g84_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 15b462a..79b523a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  * 	    Roy Spliet <rspliet@eclipso.eu>
  */
-#include "priv.h"
+#include "ram.h"
 
 struct ramxlat {
 	int id;
@@ -42,9 +42,9 @@
 
 static const struct ramxlat
 ramgddr3_cl_lo[] = {
-	{ 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
+	{ 5, 5 }, { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, { 12, 8 },
 	/* the below are mentioned in some, but not all, gddr3 docs */
-	{ 12, 4 }, { 13, 5 }, { 14, 6 },
+	{ 13, 9 }, { 14, 6 },
 	/* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
 	/* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
 	 * { 15, 11 }, */
@@ -61,24 +61,25 @@
 static const struct ramxlat
 ramgddr3_wr_lo[] = {
 	{ 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
-	{ 11, 0 },
+	{ 11, 0 }, { 13 , 1 },
 	/* the below are mentioned in some, but not all, gddr3 docs */
-	{ 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
+	{ 4, 1 }, { 6, 3 }, { 12, 1 },
 	{ -1 }
 };
 
 int
 nvkm_gddr3_calc(struct nvkm_ram *ram)
 {
-	int CL, WR, CWL, DLL = 0, ODT = 0, hi;
+	int CL, WR, CWL, DLL = 0, ODT = 0, RON, hi;
 
 	switch (ram->next->bios.timing_ver) {
 	case 0x10:
 		CWL = ram->next->bios.timing_10_CWL;
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT;
+		RON = ram->next->bios.ramcfg_RON;
 		break;
 	case 0x20:
 		CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
@@ -89,6 +90,7 @@
 		ODT =  (ram->mr[1] & 0x004) >> 2 |
 		       (ram->mr[1] & 0x040) >> 5 |
 		       (ram->mr[1] & 0x200) >> 7;
+		RON = !(ram->mr[1] & 0x300) >> 8;
 		break;
 	default:
 		return -ENOSYS;
@@ -107,7 +109,7 @@
 
 	ram->mr[1] &= ~0x3fc;
 	ram->mr[1] |= (ODT & 0x03) << 2;
-	ram->mr[1] |= (ODT & 0x03) << 8;
+	ram->mr[1] |= (RON & 0x03) << 8;
 	ram->mr[1] |= (WR  & 0x03) << 4;
 	ram->mr[1] |= (WR  & 0x04) << 5;
 	ram->mr[1] |= !DLL << 6;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
index f6f9eee..24f83b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
+#include "ram.h"
 
 /* binary driver only executes this path if the condition (a) is true
  * for any configuration (combination of rammap+ramcfg+timing) that
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index d51aa02..008bb98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -22,101 +22,90 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
-
-#include <core/device.h>
+#include "ram.h"
 
 extern const u8 gf100_pte_storage_type_map[256];
 
 bool
-gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
 {
 	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
 	return likely((gf100_pte_storage_type_map[memtype] != 0xff));
 }
 
-static void
-gf100_fb_intr(struct nvkm_subdev *subdev)
+void
+gf100_fb_intr(struct nvkm_fb *base)
 {
-	struct gf100_fb_priv *priv = (void *)subdev;
-	u32 intr = nv_rd32(priv, 0x000100);
-	if (intr & 0x08000000) {
-		nv_debug(priv, "PFFB intr\n");
-		intr &= ~0x08000000;
-	}
-	if (intr & 0x00002000) {
-		nv_debug(priv, "PBFB intr\n");
-		intr &= ~0x00002000;
-	}
-}
-
-int
-gf100_fb_init(struct nvkm_object *object)
-{
-	struct gf100_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	if (priv->r100c10_page)
-		nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
-
-	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
-	return 0;
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_subdev *subdev = &fb->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x000100);
+	if (intr & 0x08000000)
+		nvkm_debug(subdev, "PFFB intr\n");
+	if (intr & 0x00002000)
+		nvkm_debug(subdev, "PBFB intr\n");
 }
 
 void
-gf100_fb_dtor(struct nvkm_object *object)
+gf100_fb_init(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct gf100_fb_priv *priv = (void *)object;
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
-	if (priv->r100c10_page) {
-		dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+	if (fb->r100c10_page)
+		nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+}
+
+void *
+gf100_fb_dtor(struct nvkm_fb *base)
+{
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
+
+	if (fb->r100c10_page) {
+		dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
 			       DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c10_page);
+		__free_page(fb->r100c10_page);
 	}
 
-	nvkm_fb_destroy(&priv->base);
+	return fb;
 }
 
 int
-gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_fb **pfb)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_fb_priv *priv;
-	int ret;
+	struct gf100_fb *fb;
 
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(func, device, index, &fb->base);
+	*pfb = &fb->base;
 
-	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (priv->r100c10_page) {
-		priv->r100c10 = dma_map_page(nv_device_base(device),
-					     priv->r100c10_page, 0, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(nv_device_base(device), priv->r100c10))
+	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c10_page) {
+		fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c10))
 			return -EFAULT;
 	}
 
-	nv_subdev(priv)->intr = gf100_fb_intr;
 	return 0;
 }
 
-struct nvkm_oclass *
-gf100_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gf100_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gf100_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gf100_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gf100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 0af4da2..2160e5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,28 +1,17 @@
 #ifndef __NVKM_RAM_NVC0_H__
 #define __NVKM_RAM_NVC0_H__
+#define gf100_fb(p) container_of((p), struct gf100_fb, base)
 #include "priv.h"
-#include "nv50.h"
 
-struct gf100_fb_priv {
+struct gf100_fb {
 	struct nvkm_fb base;
 	struct page *r100c10_page;
 	dma_addr_t r100c10;
 };
 
-int  gf100_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void gf100_fb_dtor(struct nvkm_object *);
-int  gf100_fb_init(struct nvkm_object *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
-
-#define gf100_ram_create(p,e,o,m,d)                                             \
-	gf100_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
-int  gf100_ram_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, u32, int, void **);
-int  gf100_ram_get(struct nvkm_fb *, u64, u32, u32, u32,
-		  struct nvkm_mem **);
-void gf100_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-
-int  gk104_ram_init(struct nvkm_object*);
+int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
+		  int index, struct nvkm_fb **);
+void *gf100_fb_dtor(struct nvkm_fb *);
+void gf100_fb_init(struct nvkm_fb *);
+void gf100_fb_intr(struct nvkm_fb *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 1c08317..0edb3c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -22,16 +22,19 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gk104_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gk104_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gk104_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gk104_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gk104_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a5d7857..81447eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -19,50 +19,23 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include "gf100.h"
+#include "priv.h"
 
-struct gk20a_fb_priv {
-	struct nvkm_fb base;
+static void
+gk20a_fb_init(struct nvkm_fb *fb)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+}
+
+static const struct nvkm_fb_func
+gk20a_fb = {
+	.init = gk20a_fb_init,
+	.memtype_valid = gf100_fb_memtype_valid,
 };
 
-static int
-gk20a_fb_init(struct nvkm_object *object)
+int
+gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
 {
-	struct gk20a_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
-	return 0;
+	return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
 }
-
-static int
-gk20a_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gk20a_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nvkm_oclass *
-gk20a_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = gk20a_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 843f935..2a91df8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -22,16 +22,19 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gm107_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gm107_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gm107_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gm107_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gm107_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index dd9b8a0..ebb3060 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gt215_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xa3),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &gt215_ram_oclass,
+static const struct nv50_fb_func
+gt215_fb = {
+	.ram_new = gt215_ram_new,
 	.trap = 0x000d0fff,
-}.base.base;
+};
+
+int
+gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&gt215_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 7be4a47..73b3b86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-mcp77_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xaa),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp77_fb = {
+	.ram_new = mcp77_ram_new,
 	.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&mcp77_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 2d00656..6d11e32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-mcp89_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xaf),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp89_fb = {
+	.ram_new = mcp77_ram_new,
 	.trap = 0x089d1fff,
-}.base.base;
+};
+
+int
+mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&mcp89_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c063dec..8ff2e5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -21,67 +21,39 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 #include "regsnv04.h"
 
 bool
-nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
 {
 	if (!(tile_flags & 0xff00))
 		return true;
-
 	return false;
 }
 
-static int
-nv04_fb_init(struct nvkm_object *object)
+static void
+nv04_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
 
 	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
 	 * nvidia reading PFB_CFG_0, then writing back its original value.
 	 * (which was 0x701114 in this case)
 	 */
-	nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
-	return 0;
+	nvkm_wr32(device, NV04_PFB_CFG0, 0x1114);
 }
 
+static const struct nvkm_fb_func
+nv04_fb = {
+	.init = nv04_fb_init,
+	.ram_new = nv04_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
 int
-nv04_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
 {
-	struct nv04_fb_impl *impl = (void *)oclass;
-	struct nv04_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.tile.regions = impl->tile.regions;
-	priv->base.tile.init = impl->tile.init;
-	priv->base.tile.comp = impl->tile.comp;
-	priv->base.tile.fini = impl->tile.fini;
-	priv->base.tile.prog = impl->tile.prog;
-	return 0;
+	return nvkm_fb_new_(&nv04_fb, device, index, pfb);
 }
-
-struct nvkm_oclass *
-nv04_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x04),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv04_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv04_ram_oclass,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
deleted file mode 100644
index caa0d03..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_FB_NV04_H__
-#define __NVKM_FB_NV04_H__
-#include "priv.h"
-
-struct nv04_fb_priv {
-	struct nvkm_fb base;
-};
-
-int  nv04_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-
-struct nv04_fb_impl {
-	struct nvkm_fb_impl base;
-	struct {
-		int regions;
-		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
-			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
-		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
-			     struct nvkm_fb_tile *);
-		void (*fini)(struct nvkm_fb *, int i,
-			     struct nvkm_fb_tile *);
-		void (*prog)(struct nvkm_fb *, int i,
-			     struct nvkm_fb_tile *);
-	} tile;
-};
-
-void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int  nv30_fb_init(struct nvkm_object *);
-void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-
-void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
-		       struct nvkm_fb_tile *);
-
-int  nv41_fb_init(struct nvkm_object *);
-void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int  nv44_fb_init(struct nvkm_object *);
-void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index f3530e4a..e8c44f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x80000000 | addr;
@@ -35,7 +36,7 @@
 }
 
 void
-nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0;
 	tile->limit = 0;
@@ -44,27 +45,27 @@
 }
 
 void
-nv10_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100240 + (i * 0x10));
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100240 + (i * 0x10));
 }
 
-struct nvkm_oclass *
-nv10_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x10),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv10_ram_oclass,
+static const struct nvkm_fb_func
+nv10_fb = {
 	.tile.regions = 8,
 	.tile.init = nv10_fb_tile_init,
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+	.ram_new = nv10_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv10_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 83bcb73..2ae0beb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -23,21 +23,21 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv1a_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x1a),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv1a_ram_oclass,
+static const struct nvkm_fb_func
+nv1a_fb = {
 	.tile.regions = 8,
 	.tile.init = nv10_fb_tile_init,
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+	.ram_new = nv1a_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index e37084b..126865d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -23,28 +23,29 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv20_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x00000001 | addr;
 	tile->limit = max(1u, addr + size) - 1;
 	tile->pitch = pitch;
 	if (flags & 4) {
-		pfb->tile.comp(pfb, i, size, flags, tile);
+		fb->func->tile.comp(fb, i, size, flags, tile);
 		tile->addr |= 2;
 	}
 }
 
 static void
-nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
 		else              tile->zcomp = 0x04000000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -56,39 +57,39 @@
 }
 
 void
-nv20_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0;
 	tile->limit = 0;
 	tile->pitch = 0;
 	tile->zcomp = 0;
-	nvkm_mm_free(&pfb->tags, &tile->tag);
+	nvkm_mm_free(&fb->ram->tags, &tile->tag);
 }
 
 void
-nv20_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100240 + (i * 0x10));
-	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100240 + (i * 0x10));
+	nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
-struct nvkm_oclass *
-nv20_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x20),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv20_fb = {
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv20_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv20_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index bc9f54f..c56746d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
 		else              tile->zcomp = 0x00200000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -41,20 +42,19 @@
 	}
 }
 
-struct nvkm_oclass *
-nv25_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x25),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv25_fb = {
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv25_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv25_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 09ebb94..2a7c483 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -23,20 +23,19 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
+#include "ram.h"
 
 void
-nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	/* for performance, select alternate bank offset for zeta */
 	if (!(flags & 4)) {
 		tile->addr = (0 << 4);
 	} else {
-		if (pfb->tile.comp) /* z compression */
-			pfb->tile.comp(pfb, i, size, flags, tile);
+		if (fb->func->tile.comp) /* z compression */
+			fb->func->tile.comp(fb, i, size, flags, tile);
 		tile->addr = (1 << 4);
 	}
 
@@ -47,12 +46,12 @@
 }
 
 static void
-nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
 		else           tile->zcomp |= 0x02000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -64,23 +63,24 @@
 }
 
 static int
-calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
+calc_bias(struct nvkm_fb *fb, int k, int i, int j)
 {
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_device *device = fb->subdev.device;
 	int b = (device->chipset > 0x30 ?
-		 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+		 nvkm_rd32(device, 0x122c + 0x10 * k + 0x4 * j) >>
+			(4 * (i ^ 1)) :
 		 0) & 0xf;
 
 	return 2 * (b & 0x8 ? b - 0x10 : b);
 }
 
 static int
-calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
+calc_ref(struct nvkm_fb *fb, int l, int k, int i)
 {
 	int j, x = 0;
 
 	for (j = 0; j < 4; j++) {
-		int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
+		int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j);
 
 		x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
 	}
@@ -88,16 +88,11 @@
 	return x;
 }
 
-int
-nv30_fb_init(struct nvkm_object *object)
+void
+nv30_fb_init(struct nvkm_fb *fb)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret, i, j;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
+	int i, j;
 
 	/* Init the memory timing regs at 0x10037c/0x1003ac */
 	if (device->chipset == 0x30 ||
@@ -105,36 +100,34 @@
 	    device->chipset == 0x35) {
 		/* Related to ROP count */
 		int n = (device->chipset == 0x31 ? 2 : 4);
-		int l = nv_rd32(priv, 0x1003d0);
+		int l = nvkm_rd32(device, 0x1003d0);
 
 		for (i = 0; i < n; i++) {
 			for (j = 0; j < 3; j++)
-				nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
-					calc_ref(priv, l, 0, j));
+				nvkm_wr32(device, 0x10037c + 0xc * i + 0x4 * j,
+					  calc_ref(fb, l, 0, j));
 
 			for (j = 0; j < 2; j++)
-				nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
-					calc_ref(priv, l, 1, j));
+				nvkm_wr32(device, 0x1003ac + 0x8 * i + 0x4 * j,
+					  calc_ref(fb, l, 1, j));
 		}
 	}
-
-	return 0;
 }
 
-struct nvkm_oclass *
-nv30_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x30),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv30_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv30_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv30_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index c01dc18..1604b37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
 		else           tile->zcomp |= 0x08000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -42,20 +43,20 @@
 	}
 }
 
-struct nvkm_oclass *
-nv35_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x35),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv35_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv35_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv35_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index cad75a1..80cc0a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
 		else           tile->zcomp |= 0x20000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -42,20 +43,20 @@
 	}
 }
 
-struct nvkm_oclass *
-nv36_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x36),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv36_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv36_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv36_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index dbe5c19..deec46a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -23,16 +23,17 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x80);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x100);
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x100);
 	if ( (flags & 2) &&
-	    !nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	    !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
 		tile->zcomp |= ((tile->tag->offset           ) >> 8);
 		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -42,34 +43,26 @@
 	}
 }
 
-static int
-nv40_fb_init(struct nvkm_object *object)
+static void
+nv40_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
-	return 0;
+	nvkm_mask(fb->subdev.device, 0x10033c, 0x00008000, 0x00000000);
 }
 
-struct nvkm_oclass *
-nv40_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x40),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv40_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv40_ram_oclass,
+static const struct nvkm_fb_func
+nv40_fb = {
+	.init = nv40_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv40_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv40_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
deleted file mode 100644
index 6021826..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __NVKM_FB_NV40_H__
-#define __NVKM_FB_NV40_H__
-#include "priv.h"
-
-struct nv40_ram {
-	struct nvkm_ram base;
-	u32 ctrl;
-	u32 coef;
-};
-
-int  nv40_ram_calc(struct nvkm_fb *, u32);
-int  nv40_ram_prog(struct nvkm_fb *);
-void nv40_ram_tidy(struct nvkm_fb *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index d9e1a40..79e57dd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -23,46 +23,40 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv41_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100600 + (i * 0x10));
-	nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100600 + (i * 0x10));
+	nvkm_wr32(device, 0x100700 + (i * 0x04), tile->zcomp);
 }
 
-int
-nv41_fb_init(struct nvkm_object *object)
+void
+nv41_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100800, 0x00000001);
-	return 0;
+	nvkm_wr32(fb->subdev.device, 0x100800, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv41_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x41),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv41_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv41_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv41_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 20b97c8..06246cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x00000001; /* mode = vram */
@@ -36,42 +37,36 @@
 }
 
 void
-nv44_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100600 + (i * 0x10));
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100600 + (i * 0x10));
 }
 
-int
-nv44_fb_init(struct nvkm_object *object)
+void
+nv44_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100850, 0x80000000);
-	nv_wr32(priv, 0x100800, 0x00000001);
-	return 0;
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100850, 0x80000000);
+	nvkm_wr32(device, 0x100800, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv44_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x44),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv44_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv44_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv44_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 5bfac38..3598a1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	/* for performance, select alternate bank offset for zeta */
@@ -39,19 +40,19 @@
 	tile->pitch = pitch;
 }
 
-struct nvkm_oclass *
-nv46_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x46),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv46_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv46_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv46_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index d3b3988..c505e44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -23,22 +23,23 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv47_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x47),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv47_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv41_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv47_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 236e36c..7b91b9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -23,22 +23,23 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv49_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x49),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv49_ram_oclass,
+static const struct nvkm_fb_func
+nv49_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv49_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv49_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 1352b6a..4e98210 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -23,21 +23,22 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv4e_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x4e),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv4e_ram_oclass,
+static const struct nvkm_fb_func
+nv4e_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv46_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0480ce5..f5edfad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -22,11 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/enum.h>
+#include <engine/fifo.h>
 
 int
 nv50_fb_memtype[0x80] = {
@@ -40,130 +40,139 @@
 	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
 };
 
-bool
-nv50_fb_memtype_valid(struct nvkm_fb *pfb, u32 memtype)
+static int
+nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
+{
+	struct nv50_fb *fb = nv50_fb(base);
+	return fb->func->ram_new(&fb->base, pram);
+}
+
+static bool
+nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
 {
 	return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
 }
 
 static const struct nvkm_enum vm_dispatch_subclients[] = {
-	{ 0x00000000, "GRCTX", NULL },
-	{ 0x00000001, "NOTIFY", NULL },
-	{ 0x00000002, "QUERY", NULL },
-	{ 0x00000003, "COND", NULL },
-	{ 0x00000004, "M2M_IN", NULL },
-	{ 0x00000005, "M2M_OUT", NULL },
-	{ 0x00000006, "M2M_NOTIFY", NULL },
+	{ 0x00000000, "GRCTX" },
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "QUERY" },
+	{ 0x00000003, "COND" },
+	{ 0x00000004, "M2M_IN" },
+	{ 0x00000005, "M2M_OUT" },
+	{ 0x00000006, "M2M_NOTIFY" },
 	{}
 };
 
 static const struct nvkm_enum vm_ccache_subclients[] = {
-	{ 0x00000000, "CB", NULL },
-	{ 0x00000001, "TIC", NULL },
-	{ 0x00000002, "TSC", NULL },
+	{ 0x00000000, "CB" },
+	{ 0x00000001, "TIC" },
+	{ 0x00000002, "TSC" },
 	{}
 };
 
 static const struct nvkm_enum vm_prop_subclients[] = {
-	{ 0x00000000, "RT0", NULL },
-	{ 0x00000001, "RT1", NULL },
-	{ 0x00000002, "RT2", NULL },
-	{ 0x00000003, "RT3", NULL },
-	{ 0x00000004, "RT4", NULL },
-	{ 0x00000005, "RT5", NULL },
-	{ 0x00000006, "RT6", NULL },
-	{ 0x00000007, "RT7", NULL },
-	{ 0x00000008, "ZETA", NULL },
-	{ 0x00000009, "LOCAL", NULL },
-	{ 0x0000000a, "GLOBAL", NULL },
-	{ 0x0000000b, "STACK", NULL },
-	{ 0x0000000c, "DST2D", NULL },
+	{ 0x00000000, "RT0" },
+	{ 0x00000001, "RT1" },
+	{ 0x00000002, "RT2" },
+	{ 0x00000003, "RT3" },
+	{ 0x00000004, "RT4" },
+	{ 0x00000005, "RT5" },
+	{ 0x00000006, "RT6" },
+	{ 0x00000007, "RT7" },
+	{ 0x00000008, "ZETA" },
+	{ 0x00000009, "LOCAL" },
+	{ 0x0000000a, "GLOBAL" },
+	{ 0x0000000b, "STACK" },
+	{ 0x0000000c, "DST2D" },
 	{}
 };
 
 static const struct nvkm_enum vm_pfifo_subclients[] = {
-	{ 0x00000000, "PUSHBUF", NULL },
-	{ 0x00000001, "SEMAPHORE", NULL },
+	{ 0x00000000, "PUSHBUF" },
+	{ 0x00000001, "SEMAPHORE" },
 	{}
 };
 
 static const struct nvkm_enum vm_bar_subclients[] = {
-	{ 0x00000000, "FB", NULL },
-	{ 0x00000001, "IN", NULL },
+	{ 0x00000000, "FB" },
+	{ 0x00000001, "IN" },
 	{}
 };
 
 static const struct nvkm_enum vm_client[] = {
-	{ 0x00000000, "STRMOUT", NULL },
+	{ 0x00000000, "STRMOUT" },
 	{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
-	{ 0x00000004, "PFIFO_WRITE", NULL },
+	{ 0x00000004, "PFIFO_WRITE" },
 	{ 0x00000005, "CCACHE", vm_ccache_subclients },
-	{ 0x00000006, "PMSPPP", NULL },
-	{ 0x00000007, "CLIPID", NULL },
-	{ 0x00000008, "PFIFO_READ", NULL },
-	{ 0x00000009, "VFETCH", NULL },
-	{ 0x0000000a, "TEXTURE", NULL },
+	{ 0x00000006, "PMSPPP" },
+	{ 0x00000007, "CLIPID" },
+	{ 0x00000008, "PFIFO_READ" },
+	{ 0x00000009, "VFETCH" },
+	{ 0x0000000a, "TEXTURE" },
 	{ 0x0000000b, "PROP", vm_prop_subclients },
-	{ 0x0000000c, "PVP", NULL },
-	{ 0x0000000d, "PBSP", NULL },
-	{ 0x0000000e, "PCRYPT", NULL },
-	{ 0x0000000f, "PCOUNTER", NULL },
-	{ 0x00000011, "PDAEMON", NULL },
+	{ 0x0000000c, "PVP" },
+	{ 0x0000000d, "PBSP" },
+	{ 0x0000000e, "PCRYPT" },
+	{ 0x0000000f, "PCOUNTER" },
+	{ 0x00000011, "PDAEMON" },
 	{}
 };
 
 static const struct nvkm_enum vm_engine[] = {
-	{ 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
-	{ 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
-	{ 0x00000004, "PEEPHOLE", NULL },
-	{ 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
+	{ 0x00000000, "PGRAPH" },
+	{ 0x00000001, "PVP" },
+	{ 0x00000004, "PEEPHOLE" },
+	{ 0x00000005, "PFIFO", vm_pfifo_subclients },
 	{ 0x00000006, "BAR", vm_bar_subclients },
-	{ 0x00000008, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
-	{ 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
-	{ 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
-	{ 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CIPHER },
-	{ 0x0000000b, "PCOUNTER", NULL },
-	{ 0x0000000c, "SEMAPHORE_BG", NULL },
-	{ 0x0000000d, "PCE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x0000000e, "PDAEMON", NULL },
+	{ 0x00000008, "PMSPPP" },
+	{ 0x00000008, "PMPEG" },
+	{ 0x00000009, "PBSP" },
+	{ 0x0000000a, "PCRYPT" },
+	{ 0x0000000b, "PCOUNTER" },
+	{ 0x0000000c, "SEMAPHORE_BG" },
+	{ 0x0000000d, "PCE0" },
+	{ 0x0000000e, "PDAEMON" },
 	{}
 };
 
 static const struct nvkm_enum vm_fault[] = {
-	{ 0x00000000, "PT_NOT_PRESENT", NULL },
-	{ 0x00000001, "PT_TOO_SHORT", NULL },
-	{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
-	{ 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
-	{ 0x00000004, "PAGE_READ_ONLY", NULL },
-	{ 0x00000006, "NULL_DMAOBJ", NULL },
-	{ 0x00000007, "WRONG_MEMTYPE", NULL },
-	{ 0x0000000b, "VRAM_LIMIT", NULL },
-	{ 0x0000000f, "DMAOBJ_LIMIT", NULL },
+	{ 0x00000000, "PT_NOT_PRESENT" },
+	{ 0x00000001, "PT_TOO_SHORT" },
+	{ 0x00000002, "PAGE_NOT_PRESENT" },
+	{ 0x00000003, "PAGE_SYSTEM_ONLY" },
+	{ 0x00000004, "PAGE_READ_ONLY" },
+	{ 0x00000006, "NULL_DMAOBJ" },
+	{ 0x00000007, "WRONG_MEMTYPE" },
+	{ 0x0000000b, "VRAM_LIMIT" },
+	{ 0x0000000f, "DMAOBJ_LIMIT" },
 	{}
 };
 
 static void
-nv50_fb_intr(struct nvkm_subdev *subdev)
+nv50_fb_intr(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(subdev);
-	struct nvkm_engine *engine;
-	struct nv50_fb_priv *priv = (void *)subdev;
-	const struct nvkm_enum *en, *cl;
-	struct nvkm_object *engctx = NULL;
-	u32 trap[6], idx, chan;
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_subdev *subdev = &fb->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo *fifo = device->fifo;
+	struct nvkm_fifo_chan *chan;
+	const struct nvkm_enum *en, *re, *cl, *sc;
+	u32 trap[6], idx, inst;
 	u8 st0, st1, st2, st3;
+	unsigned long flags;
 	int i;
 
-	idx = nv_rd32(priv, 0x100c90);
+	idx = nvkm_rd32(device, 0x100c90);
 	if (!(idx & 0x80000000))
 		return;
 	idx &= 0x00ffffff;
 
 	for (i = 0; i < 6; i++) {
-		nv_wr32(priv, 0x100c90, idx | i << 24);
-		trap[i] = nv_rd32(priv, 0x100c94);
+		nvkm_wr32(device, 0x100c90, idx | i << 24);
+		trap[i] = nvkm_rd32(device, 0x100c94);
 	}
-	nv_wr32(priv, 0x100c90, idx | 0x80000000);
+	nvkm_wr32(device, 0x100c90, idx | 0x80000000);
 
 	/* decode status bits into something more useful */
 	if (device->chipset  < 0xa3 ||
@@ -178,143 +187,103 @@
 		st2 = (trap[0] & 0x00ff0000) >> 16;
 		st3 = (trap[0] & 0xff000000) >> 24;
 	}
-	chan = (trap[2] << 16) | trap[1];
+	inst = ((trap[2] << 16) | trap[1]) << 12;
 
 	en = nvkm_enum_find(vm_engine, st0);
-
-	if (en && en->data2) {
-		const struct nvkm_enum *orig_en = en;
-		while (en->name && en->value == st0 && en->data2) {
-			engine = nvkm_engine(subdev, en->data2);
-			/*XXX: clean this up */
-			if (!engine && en->data2 == NVDEV_ENGINE_BSP)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_MSVLD);
-			if (!engine && en->data2 == NVDEV_ENGINE_CIPHER)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_SEC);
-			if (!engine && en->data2 == NVDEV_ENGINE_VP)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_MSPDEC);
-			if (engine) {
-				engctx = nvkm_engctx_get(engine, chan);
-				if (engctx)
-					break;
-			}
-			en++;
-		}
-		if (!engctx)
-			en = orig_en;
-	}
-
-	nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
-		 (trap[5] & 0x00000100) ? "read" : "write",
-		 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
-		 nvkm_client_name(engctx));
-
-	nvkm_engctx_put(engctx);
-
-	if (en)
-		pr_cont("%s/", en->name);
-	else
-		pr_cont("%02x/", st0);
-
+	re = nvkm_enum_find(vm_fault , st1);
 	cl = nvkm_enum_find(vm_client, st2);
-	if (cl)
-		pr_cont("%s/", cl->name);
-	else
-		pr_cont("%02x/", st2);
+	if      (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
+	else if (en && en->data) sc = nvkm_enum_find(en->data, st3);
+	else                     sc = NULL;
 
-	if      (cl && cl->data) cl = nvkm_enum_find(cl->data, st3);
-	else if (en && en->data) cl = nvkm_enum_find(en->data, st3);
-	else                     cl = NULL;
-	if (cl)
-		pr_cont("%s", cl->name);
-	else
-		pr_cont("%02x", st3);
-
-	pr_cont(" reason: ");
-	en = nvkm_enum_find(vm_fault, st1);
-	if (en)
-		pr_cont("%s\n", en->name);
-	else
-		pr_cont("0x%08x\n", st1);
+	chan = nvkm_fifo_chan_inst(fifo, inst, &flags);
+	nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] "
+			   "engine %02x [%s] client %02x [%s] "
+			   "subclient %02x [%s] reason %08x [%s]\n",
+		   (trap[5] & 0x00000100) ? "read" : "write",
+		   trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
+		   chan ? chan->chid : -1, inst,
+		   chan ? chan->object.client->name : "unknown",
+		   st0, en ? en->name : "",
+		   st2, cl ? cl->name : "", st3, sc ? sc->name : "",
+		   st1, re ? re->name : "");
+	nvkm_fifo_chan_put(fifo, flags, &chan);
 }
 
-int
-nv50_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+static void
+nv50_fb_init(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv50_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (priv->r100c08_page) {
-		priv->r100c08 = dma_map_page(nv_device_base(device),
-					     priv->r100c08_page, 0, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(nv_device_base(device), priv->r100c08))
-			return -EFAULT;
-	} else {
-		nv_warn(priv, "failed 0x100c08 page alloc\n");
-	}
-
-	nv_subdev(priv)->intr = nv50_fb_intr;
-	return 0;
-}
-
-void
-nv50_fb_dtor(struct nvkm_object *object)
-{
-	struct nvkm_device *device = nv_device(object);
-	struct nv50_fb_priv *priv = (void *)object;
-
-	if (priv->r100c08_page) {
-		dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
-			       DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c08_page);
-	}
-
-	nvkm_fb_destroy(&priv->base);
-}
-
-int
-nv50_fb_init(struct nvkm_object *object)
-{
-	struct nv50_fb_impl *impl = (void *)object->oclass;
-	struct nv50_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
 	/* Not a clue what this is exactly.  Without pointing it at a
 	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
 	 * cause IOMMU "read from address 0" errors (rh#561267)
 	 */
-	nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+	nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
 
 	/* This is needed to get meaningful information from 100c90
 	 * on traps. No idea what these values mean exactly. */
-	nv_wr32(priv, 0x100c90, impl->trap);
+	nvkm_wr32(device, 0x100c90, fb->func->trap);
+}
+
+static void *
+nv50_fb_dtor(struct nvkm_fb *base)
+{
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
+
+	if (fb->r100c08_page) {
+		dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
+			       DMA_BIDIRECTIONAL);
+		__free_page(fb->r100c08_page);
+	}
+
+	return fb;
+}
+
+static const struct nvkm_fb_func
+nv50_fb_ = {
+	.dtor = nv50_fb_dtor,
+	.init = nv50_fb_init,
+	.intr = nv50_fb_intr,
+	.ram_new = nv50_fb_ram_new,
+	.memtype_valid = nv50_fb_memtype_valid,
+};
+
+int
+nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb **pfb)
+{
+	struct nv50_fb *fb;
+
+	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
+	fb->func = func;
+	*pfb = &fb->base;
+
+	fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c08_page) {
+		fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c08))
+			return -EFAULT;
+	} else {
+		nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
+	}
+
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x50),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+nv50_fb = {
+	.ram_new = nv50_ram_new,
 	.trap = 0x000707ff,
-}.base.base;
+};
+
+int
+nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&nv50_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index f3cde3f..faa88c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,31 +1,21 @@
 #ifndef __NVKM_FB_NV50_H__
 #define __NVKM_FB_NV50_H__
+#define nv50_fb(p) container_of((p), struct nv50_fb, base)
 #include "priv.h"
 
-struct nv50_fb_priv {
+struct nv50_fb {
+	const struct nv50_fb_func *func;
 	struct nvkm_fb base;
 	struct page *r100c08_page;
 	dma_addr_t r100c08;
 };
 
-int  nv50_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void nv50_fb_dtor(struct nvkm_object *);
-int  nv50_fb_init(struct nvkm_object *);
-
-struct nv50_fb_impl {
-	struct nvkm_fb_impl base;
+struct nv50_fb_func {
+	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 	u32 trap;
 };
 
-#define nv50_ram_create(p,e,o,d)                                               \
-	nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
-int  nv50_ram_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, int, void **);
-int  nv50_ram_get(struct nvkm_fb *, u64 size, u32 align, u32 ncmin,
-		  u32 memtype, struct nvkm_mem **);
-void nv50_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-void __nv50_ram_put(struct nvkm_fb *, struct nvkm_mem *);
+int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
+		 struct nvkm_fb **pfb);
 extern int nv50_fb_memtype[0x80];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 485c4b6..62b9feb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,73 +1,62 @@
 #ifndef __NVKM_FB_PRIV_H__
 #define __NVKM_FB_PRIV_H__
+#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
 #include <subdev/fb.h>
 struct nvkm_bios;
 
-#define nvkm_ram_create(p,e,o,d)                                            \
-	nvkm_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d)
-#define nvkm_ram_destroy(p)                                                 \
-	nvkm_object_destroy(&(p)->base)
-#define nvkm_ram_init(p)                                                    \
-	nvkm_object_init(&(p)->base)
-#define nvkm_ram_fini(p,s)                                                  \
-	nvkm_object_fini(&(p)->base, (s))
+struct nvkm_fb_func {
+	void *(*dtor)(struct nvkm_fb *);
+	void (*init)(struct nvkm_fb *);
+	void (*intr)(struct nvkm_fb *);
 
-#define nvkm_ram_create_(p,e,o,s,d)                                         \
-	nvkm_object_create_((p), (e), (o), 0, (s), (void **)d)
-#define _nvkm_ram_dtor nvkm_object_destroy
-#define _nvkm_ram_init nvkm_object_init
-#define _nvkm_ram_fini nvkm_object_fini
+	struct {
+		int regions;
+		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
+			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
+		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
+			     struct nvkm_fb_tile *);
+		void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+		void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+	} tile;
 
-extern struct nvkm_oclass nv04_ram_oclass;
-extern struct nvkm_oclass nv10_ram_oclass;
-extern struct nvkm_oclass nv1a_ram_oclass;
-extern struct nvkm_oclass nv20_ram_oclass;
-extern struct nvkm_oclass nv40_ram_oclass;
-extern struct nvkm_oclass nv41_ram_oclass;
-extern struct nvkm_oclass nv44_ram_oclass;
-extern struct nvkm_oclass nv49_ram_oclass;
-extern struct nvkm_oclass nv4e_ram_oclass;
-extern struct nvkm_oclass nv50_ram_oclass;
-extern struct nvkm_oclass gt215_ram_oclass;
-extern struct nvkm_oclass mcp77_ram_oclass;
-extern struct nvkm_oclass gf100_ram_oclass;
-extern struct nvkm_oclass gk104_ram_oclass;
-extern struct nvkm_oclass gm107_ram_oclass;
+	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 
-int nvkm_sddr2_calc(struct nvkm_ram *ram);
-int nvkm_sddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
-
-#define nvkm_fb_create(p,e,c,d)                                             \
-	nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
-#define nvkm_fb_destroy(p) ({                                               \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_dtor(nv_object(pfb));                                      \
-})
-#define nvkm_fb_init(p) ({                                                  \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_init(nv_object(pfb));                                      \
-})
-#define nvkm_fb_fini(p,s) ({                                                \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_fini(nv_object(pfb), (s));                                 \
-})
-
-int nvkm_fb_create_(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, int, void **);
-void _nvkm_fb_dtor(struct nvkm_object *);
-int  _nvkm_fb_init(struct nvkm_object *);
-int  _nvkm_fb_fini(struct nvkm_object *, bool);
-
-struct nvkm_fb_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *ram;
-	bool (*memtype)(struct nvkm_fb *, u32);
+	bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
 };
 
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-bool nv50_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
+		  int index, struct nvkm_fb *);
+int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
+		 int index, struct nvkm_fb **);
+int nvkm_fb_bios_memtype(struct nvkm_bios *);
 
-int  nvkm_fb_bios_memtype(struct nvkm_bios *);
+bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+
+void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv30_fb_init(struct nvkm_fb *);
+void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+
+void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
+		       struct nvkm_fb_tile *);
+
+void nv41_fb_init(struct nvkm_fb *);
+void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv44_fb_init(struct nvkm_fb *);
+void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+
+bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
new file mode 100644
index 0000000..c17d559
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ram.h"
+
+int
+nvkm_ram_init(struct nvkm_ram *ram)
+{
+	if (ram->func->init)
+		return ram->func->init(ram);
+	return 0;
+}
+
+void
+nvkm_ram_del(struct nvkm_ram **pram)
+{
+	struct nvkm_ram *ram = *pram;
+	if (ram && !WARN_ON(!ram->func)) {
+		if (ram->func->dtor)
+			*pram = ram->func->dtor(ram);
+		nvkm_mm_fini(&ram->tags);
+		nvkm_mm_fini(&ram->vram);
+		kfree(*pram);
+		*pram = NULL;
+	}
+}
+
+int
+nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	      enum nvkm_ram_type type, u64 size, u32 tags,
+	      struct nvkm_ram *ram)
+{
+	static const char *name[] = {
+		[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
+		[NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
+		[NVKM_RAM_TYPE_SGRAM  ] = "SGRAM",
+		[NVKM_RAM_TYPE_SDRAM  ] = "SDRAM",
+		[NVKM_RAM_TYPE_DDR1   ] = "DDR1",
+		[NVKM_RAM_TYPE_DDR2   ] = "DDR2",
+		[NVKM_RAM_TYPE_DDR3   ] = "DDR3",
+		[NVKM_RAM_TYPE_GDDR2  ] = "GDDR2",
+		[NVKM_RAM_TYPE_GDDR3  ] = "GDDR3",
+		[NVKM_RAM_TYPE_GDDR4  ] = "GDDR4",
+		[NVKM_RAM_TYPE_GDDR5  ] = "GDDR5",
+	};
+	struct nvkm_subdev *subdev = &fb->subdev;
+	int ret;
+
+	nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
+	ram->func = func;
+	ram->fb = fb;
+	ram->type = type;
+	ram->size = size;
+
+	if (!nvkm_mm_initialised(&ram->vram)) {
+		ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (!nvkm_mm_initialised(&ram->tags)) {
+		ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
+		if (ret)
+			return ret;
+
+		nvkm_debug(subdev, "%d compression tags\n", tags);
+	}
+
+	return 0;
+}
+
+int
+nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	      enum nvkm_ram_type type, u64 size, u32 tags,
+	      struct nvkm_ram **pram)
+{
+	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
new file mode 100644
index 0000000..f816cbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -0,0 +1,50 @@
+#ifndef __NVKM_FB_RAM_PRIV_H__
+#define __NVKM_FB_RAM_PRIV_H__
+#include "priv.h"
+
+int  nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   enum nvkm_ram_type, u64 size, u32 tags,
+		   struct nvkm_ram *);
+int  nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   enum nvkm_ram_type, u64 size, u32 tags,
+		   struct nvkm_ram **);
+void nvkm_ram_del(struct nvkm_ram **);
+int  nvkm_ram_init(struct nvkm_ram *);
+
+extern const struct nvkm_ram_func nv04_ram_func;
+
+int  nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   struct nvkm_ram *);
+int  nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
+
+int  gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		    u32, struct nvkm_ram *);
+int  gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+
+int  gk104_ram_init(struct nvkm_ram *ram);
+
+/* RAM type-specific MR calculation routines */
+int nvkm_sddr2_calc(struct nvkm_ram *);
+int nvkm_sddr3_calc(struct nvkm_ram *);
+int nvkm_gddr3_calc(struct nvkm_ram *);
+int nvkm_gddr5_calc(struct nvkm_ram *, bool nuts);
+
+int nv04_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv10_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv1a_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv20_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv40_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv41_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv44_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv49_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv4e_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index f343682..9ef9d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,10 +1,11 @@
 #ifndef __NVKM_FBRAM_FUC_H__
 #define __NVKM_FBRAM_FUC_H__
+#include <subdev/fb.h>
 #include <subdev/pmu.h>
 
 struct ramfuc {
 	struct nvkm_memx *memx;
-	struct nvkm_fb *pfb;
+	struct nvkm_fb *fb;
 	int sequence;
 };
 
@@ -54,17 +55,14 @@
 }
 
 static inline int
-ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb)
+ramfuc_init(struct ramfuc *ram, struct nvkm_fb *fb)
 {
-	struct nvkm_pmu *pmu = nvkm_pmu(pfb);
-	int ret;
-
-	ret = nvkm_memx_init(pmu, &ram->memx);
+	int ret = nvkm_memx_init(fb->subdev.device->pmu, &ram->memx);
 	if (ret)
 		return ret;
 
 	ram->sequence++;
-	ram->pfb = pfb;
+	ram->fb = fb;
 	return 0;
 }
 
@@ -72,9 +70,9 @@
 ramfuc_exec(struct ramfuc *ram, bool exec)
 {
 	int ret = 0;
-	if (ram->pfb) {
+	if (ram->fb) {
 		ret = nvkm_memx_fini(&ram->memx, exec);
-		ram->pfb = NULL;
+		ram->fb = NULL;
 	}
 	return ret;
 }
@@ -82,8 +80,9 @@
 static inline u32
 ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
 {
+	struct nvkm_device *device = ram->fb->subdev.device;
 	if (reg->sequence != ram->sequence)
-		reg->data = nv_rd32(ram->pfb, reg->addr);
+		reg->data = nvkm_rd32(device, reg->addr);
 	return reg->data;
 }
 
@@ -144,11 +143,9 @@
 }
 
 static inline int
-ramfuc_train_result(struct nvkm_fb *pfb, u32 *result, u32 rsize)
+ramfuc_train_result(struct nvkm_fb *fb, u32 *result, u32 rsize)
 {
-	struct nvkm_pmu *pmu = nvkm_pmu(pfb);
-
-	return nvkm_memx_train_result(pmu, result, rsize);
+	return nvkm_memx_train_result(fb->subdev.device->pmu, result, rsize);
 }
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index de9f395..772425c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include "gf100.h"
+#define gf100_ram(p) container_of((p), struct gf100_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
@@ -108,9 +108,10 @@
 gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
 {
 	struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
-	struct nvkm_fb *pfb = nvkm_fb(ram);
-	u32 part = nv_rd32(pfb, 0x022438), i;
-	u32 mask = nv_rd32(pfb, 0x022554);
+	struct nvkm_fb *fb = ram->base.fb;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 part = nvkm_rd32(device, 0x022438), i;
+	u32 mask = nvkm_rd32(device, 0x022554);
 	u32 addr = 0x110974;
 
 	ram_wr32(fuc, 0x10f910, magic);
@@ -124,12 +125,14 @@
 }
 
 static int
-gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gf100_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_clk *clk = nvkm_clk(pfb);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gf100_ram *ram = (void *)pfb->ram;
+	struct gf100_ram *ram = gf100_ram(base);
 	struct gf100_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_clk *clk = device->clk;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_ramcfg cfg;
 	u8  ver, cnt, len, strap;
 	struct {
@@ -145,37 +148,37 @@
 	rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
 				      &cnt, &ramcfg.size, &cfg);
 	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
-		nv_error(pfb, "invalid/missing rammap entry\n");
+		nvkm_error(subdev, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
 	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
 	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
-		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		nvkm_error(subdev, "invalid/missing ramcfg entry\n");
 		return -EINVAL;
 	}
 
 	/* lookup memory timings, if bios says they're present */
-	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	strap = nvbios_rd08(bios, ramcfg.data + 0x01);
 	if (strap != 0xff) {
 		timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
 					      &cnt, &len);
 		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
-			nv_error(pfb, "invalid/missing timing entry\n");
+			nvkm_error(subdev, "invalid/missing timing entry\n");
 			return -EINVAL;
 		}
 	} else {
 		timing.data = 0;
 	}
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -184,9 +187,9 @@
 
 	/* determine target mclk configuration */
 	if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
-		ref = clk->read(clk, nv_clk_src_sppll0);
+		ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
 	else
-		ref = clk->read(clk, nv_clk_src_sppll1);
+		ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
 	div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
 	out = (ref * 2) / (div + 2);
 	mode = freq != out;
@@ -210,10 +213,10 @@
 
 	if (mode == 1 && from == 0) {
 		/* calculate refpll */
-		ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll,
-				     ram->mempll.refclk, &N1, NULL, &M1, &P);
+		ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
+				     &N1, NULL, &M1, &P);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc refpll\n");
+			nvkm_error(subdev, "unable to calc refpll\n");
 			return ret ? ret : -ERANGE;
 		}
 
@@ -225,10 +228,10 @@
 		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
 
 		/* calculate mempll */
-		ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+		ret = gt215_pll_calc(subdev, &ram->mempll, freq,
 				     &N1, NULL, &M1, &P);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc refpll\n");
+			nvkm_error(subdev, "unable to calc refpll\n");
 			return ret ? ret : -ERANGE;
 		}
 
@@ -402,49 +405,48 @@
 }
 
 static int
-gf100_ram_prog(struct nvkm_fb *pfb)
+gf100_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gf100_ram *ram = (void *)pfb->ram;
-	struct gf100_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+	struct gf100_ram *ram = gf100_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
 	return 0;
 }
 
 static void
-gf100_ram_tidy(struct nvkm_fb *pfb)
+gf100_ram_tidy(struct nvkm_ram *base)
 {
-	struct gf100_ram *ram = (void *)pfb->ram;
-	struct gf100_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, false);
+	struct gf100_ram *ram = gf100_ram(base);
+	ram_exec(&ram->fuc, false);
 }
 
 extern const u8 gf100_pte_storage_type_map[256];
 
 void
-gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(pfb);
+	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
 	struct nvkm_mem *mem = *pmem;
 
 	*pmem = NULL;
 	if (unlikely(mem == NULL))
 		return;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (mem->tag)
-		ltc->tags_free(ltc, &mem->tag);
-	__nv50_ram_put(pfb, mem);
-	mutex_unlock(&pfb->base.mutex);
+		nvkm_ltc_tags_free(ltc, &mem->tag);
+	__nv50_ram_put(ram, mem);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	kfree(mem);
 }
 
 int
-gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
 	      u32 memtype, struct nvkm_mem **pmem)
 {
-	struct nvkm_mm *mm = &pfb->vram;
+	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
+	struct nvkm_mm *mm = &ram->vram;
 	struct nvkm_mm_node *r;
 	struct nvkm_mem *mem;
 	int type = (memtype & 0x0ff);
@@ -452,9 +454,9 @@
 	const bool comp = gf100_pte_storage_type_map[type] != type;
 	int ret;
 
-	size  >>= 12;
-	align >>= 12;
-	ncmin >>= 12;
+	size  >>= NVKM_RAM_MM_SHIFT;
+	align >>= NVKM_RAM_MM_SHIFT;
+	ncmin >>= NVKM_RAM_MM_SHIFT;
 	if (!ncmin)
 		ncmin = size;
 
@@ -465,14 +467,12 @@
 	INIT_LIST_HEAD(&mem->regions);
 	mem->size = size;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (comp) {
-		struct nvkm_ltc *ltc = nvkm_ltc(pfb);
-
 		/* compression only works with lpages */
-		if (align == (1 << (17 - 12))) {
+		if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
 			int n = size >> 5;
-			ltc->tags_alloc(ltc, n, &mem->tag);
+			nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
 		}
 
 		if (unlikely(!mem->tag))
@@ -486,178 +486,173 @@
 		else
 			ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
 		if (ret) {
-			mutex_unlock(&pfb->base.mutex);
-			pfb->ram->put(pfb, &mem);
+			mutex_unlock(&ram->fb->subdev.mutex);
+			ram->func->put(ram, &mem);
 			return ret;
 		}
 
 		list_add_tail(&r->rl_entry, &mem->regions);
 		size -= r->length;
 	} while (size);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
+	mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
 	*pmem = mem;
 	return 0;
 }
 
-int
-gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, u32 maskaddr, int size,
-		  void **pobject)
+static int
+gf100_ram_init(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nvkm_ram *ram;
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 parts = nv_rd32(pfb, 0x022438);
-	u32 pmask = nv_rd32(pfb, maskaddr);
-	u32 bsize = nv_rd32(pfb, 0x10f20c);
-	u32 offset, length;
-	bool uniform = true;
-	int ret, part;
+	static const u8  train0[] = {
+		0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
+		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+	};
+	static const u32 train1[] = {
+		0x00000000, 0xffffffff,
+		0x55555555, 0xaaaaaaaa,
+		0x33333333, 0xcccccccc,
+		0xf0f0f0f0, 0x0f0f0f0f,
+		0x00ff00ff, 0xff00ff00,
+		0x0000ffff, 0xffff0000,
+	};
+	struct gf100_ram *ram = gf100_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	int i;
 
-	ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
-	ram = *pobject;
+	switch (ram->base.type) {
+	case NVKM_RAM_TYPE_GDDR5:
+		break;
+	default:
+		return 0;
+	}
+
+	/* prepare for ddr link training, and load training patterns */
+	for (i = 0; i < 0x30; i++) {
+		nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
+		nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
+		nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
+	}
+
+	return 0;
+}
+
+static const struct nvkm_ram_func
+gf100_ram_func = {
+	.init = gf100_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
+	.calc = gf100_ram_calc,
+	.prog = gf100_ram_prog,
+	.tidy = gf100_ram_tidy,
+};
+
+int
+gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	       u32 maskaddr, struct nvkm_ram *ram)
+{
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+	u32 parts = nvkm_rd32(device, 0x022438);
+	u32 pmask = nvkm_rd32(device, maskaddr);
+	u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
+	u64 psize, size = 0;
+	enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
+	bool uniform = true;
+	int ret, i;
+
+	nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
+	nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
+
+	/* read amount of vram attached to each memory controller */
+	for (i = 0; i < parts; i++) {
+		if (pmask & (1 << i))
+			continue;
+
+		psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
+		if (psize != bsize) {
+			if (psize < bsize)
+				bsize = psize;
+			uniform = false;
+		}
+
+		nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
+		size += psize;
+	}
+
+	ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
 	if (ret)
 		return ret;
 
-	nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
-	nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
-	ram->type = nvkm_fb_bios_memtype(bios);
-	ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
-
-	/* read amount of vram attached to each memory controller */
-	for (part = 0; part < parts; part++) {
-		if (!(pmask & (1 << part))) {
-			u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
-			if (psize != bsize) {
-				if (psize < bsize)
-					bsize = psize;
-				uniform = false;
-			}
-
-			nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
-			ram->size += (u64)psize << 20;
-		}
-	}
+	nvkm_mm_fini(&ram->vram);
 
 	/* if all controllers have the same amount attached, there's no holes */
 	if (uniform) {
-		offset = rsvd_head;
-		length = (ram->size >> 12) - rsvd_head - rsvd_tail;
-		ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+				   (size - rsvd_head - rsvd_tail) >>
+				   NVKM_RAM_MM_SHIFT, 1);
+		if (ret)
+			return ret;
 	} else {
 		/* otherwise, address lowest common amount from 0GiB */
-		ret = nvkm_mm_init(&pfb->vram, rsvd_head,
-				   (bsize << 8) * parts - rsvd_head, 1);
+		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+				   ((bsize * parts) - rsvd_head) >>
+				   NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
 			return ret;
 
 		/* and the rest starting from (8GiB + common_size) */
-		offset = (0x0200000000ULL >> 12) + (bsize << 8);
-		length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
-
-		ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+		ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
+				   NVKM_RAM_MM_SHIFT,
+				   (size - (bsize * parts) - rsvd_tail) >>
+				   NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
-			nvkm_mm_fini(&pfb->vram);
+			return ret;
 	}
 
-	if (ret)
-		return ret;
-
-	ram->get = gf100_ram_get;
-	ram->put = gf100_ram_put;
+	ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
 	return 0;
 }
 
-static int
-gf100_ram_init(struct nvkm_object *object)
+int
+gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gf100_ram *ram = (void *)object;
-	int ret, i;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
-
-	/* prepare for ddr link training, and load training patterns */
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5: {
-		static const u8  train0[] = {
-			0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
-			0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
-		};
-		static const u32 train1[] = {
-			0x00000000, 0xffffffff,
-			0x55555555, 0xaaaaaaaa,
-			0x33333333, 0xcccccccc,
-			0xf0f0f0f0, 0x0f0f0f0f,
-			0x00ff00ff, 0xff00ff00,
-			0x0000ffff, 0xffff0000,
-		};
-
-		for (i = 0; i < 0x30; i++) {
-			nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
-		}
-	}	break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int
-gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_bios *bios = nvkm_bios(parent);
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct gf100_ram *ram;
 	int ret;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
 	if (ret)
 		return ret;
 
 	ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
 	if (ret) {
-		nv_error(ram, "mclk refpll data not found\n");
+		nvkm_error(subdev, "mclk refpll data not found\n");
 		return ret;
 	}
 
 	ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
 	if (ret) {
-		nv_error(ram, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5:
-		ram->base.calc = gf100_ram_calc;
-		ram->base.prog = gf100_ram_prog;
-		ram->base.tidy = gf100_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
 	ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
 	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
@@ -718,14 +713,3 @@
 	ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
 	return 0;
 }
-
-struct nvkm_oclass
-gf100_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gf100_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1ef15c3..9893556 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
+#define gk104_ram(p) container_of((p), struct gk104_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
-#include "gf100.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
@@ -229,8 +229,9 @@
 gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
 	       u32 _mask, u32 _data, u32 _copy)
 {
-	struct gk104_fb_priv *priv = (void *)nvkm_fb(ram);
+	struct nvkm_fb *fb = ram->base.fb;
 	struct ramfuc *fuc = &ram->fuc.base;
+	struct nvkm_device *device = fb->subdev.device;
 	u32 addr = 0x110000 + (reg->addr & 0xfff);
 	u32 mask = _mask | _copy;
 	u32 data = (_data & _mask) | (reg->data & _copy);
@@ -238,7 +239,7 @@
 
 	for (i = 0; i < 16; i++, addr += 0x1000) {
 		if (ram->pnuts & (1 << i)) {
-			u32 prev = nv_rd32(priv, addr);
+			u32 prev = nvkm_rd32(device, addr);
 			u32 next = (prev & ~mask) | data;
 			nvkm_memx_wr32(fuc->memx, addr, next);
 		}
@@ -248,9 +249,8 @@
 	gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
 
 static int
-gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
 	struct nvkm_ram_data *next = ram->base.next;
 	int vc = !next->bios.ramcfg_11_02_08;
@@ -674,9 +674,8 @@
  ******************************************************************************/
 
 static int
-gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
 	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
 	const u32 runk0 = ram->fN1 << 16;
@@ -926,9 +925,9 @@
  ******************************************************************************/
 
 static int
-gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
+gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
 	struct nvkm_ram_data *cfg;
 	u32 mhz = khz / 1000;
 
@@ -941,19 +940,19 @@
 		}
 	}
 
-	nv_error(ram, "ramcfg data for %dMHz not found\n", mhz);
+	nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz);
 	return -EINVAL;
 }
 
 static int
-gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
+gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
 	int refclk, i;
 	int ret;
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -973,11 +972,11 @@
 		refclk = fuc->mempll.refclk;
 
 	/* calculate refpll coefficients */
-	ret = gt215_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+	ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
 			     &ram->fN1, &ram->M1, &ram->P1);
 	fuc->mempll.refclk = ret;
 	if (ret <= 0) {
-		nv_error(pfb, "unable to calc refpll\n");
+		nvkm_error(subdev, "unable to calc refpll\n");
 		return -EINVAL;
 	}
 
@@ -990,10 +989,10 @@
 		fuc->mempll.min_p = 1;
 		fuc->mempll.max_p = 2;
 
-		ret = gt215_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
+		ret = gt215_pll_calc(subdev, &fuc->mempll, next->freq,
 				     &ram->N2, NULL, &ram->M2, &ram->P2);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc mempll\n");
+			nvkm_error(subdev, "unable to calc mempll\n");
 			return -EINVAL;
 		}
 	}
@@ -1005,15 +1004,15 @@
 	ram->base.freq = next->freq;
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ret = nvkm_sddr3_calc(&ram->base);
 		if (ret == 0)
-			ret = gk104_ram_calc_sddr3(pfb, next->freq);
+			ret = gk104_ram_calc_sddr3(ram, next->freq);
 		break;
-	case NV_MEM_TYPE_GDDR5:
+	case NVKM_RAM_TYPE_GDDR5:
 		ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
 		if (ret == 0)
-			ret = gk104_ram_calc_gddr5(pfb, next->freq);
+			ret = gk104_ram_calc_gddr5(ram, next->freq);
 		break;
 	default:
 		ret = -ENOSYS;
@@ -1024,21 +1023,22 @@
 }
 
 static int
-gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_clk *clk = nvkm_clk(pfb);
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct gk104_ram *ram = gk104_ram(base);
+	struct nvkm_clk *clk = ram->base.fb->subdev.device->clk;
 	struct nvkm_ram_data *xits = &ram->base.xition;
 	struct nvkm_ram_data *copy;
 	int ret;
 
 	if (ram->base.next == NULL) {
-		ret = gk104_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+		ret = gk104_ram_calc_data(ram,
+					  nvkm_clk_read(clk, nv_clk_src_mem),
 					  &ram->base.former);
 		if (ret)
 			return ret;
 
-		ret = gk104_ram_calc_data(pfb, freq, &ram->base.target);
+		ret = gk104_ram_calc_data(ram, freq, &ram->base.target);
 		if (ret)
 			return ret;
 
@@ -1062,13 +1062,13 @@
 		ram->base.next = &ram->base.target;
 	}
 
-	return gk104_ram_calc_xits(pfb, ram->base.next);
+	return gk104_ram_calc_xits(ram, ram->base.next);
 }
 
 static void
-gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	struct nvkm_ram_data *cfg;
 	u32 mhz = freq / 1000;
 	u32 mask, data;
@@ -1090,31 +1090,31 @@
 		data |= cfg->bios.rammap_11_09_01ff;
 		mask |= 0x000001ff;
 	}
-	nv_mask(pfb, 0x10f468, mask, data);
+	nvkm_mask(device, 0x10f468, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
 		data |= cfg->bios.rammap_11_0a_0400;
 		mask |= 0x00000001;
 	}
-	nv_mask(pfb, 0x10f420, mask, data);
+	nvkm_mask(device, 0x10f420, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
 		data |= cfg->bios.rammap_11_0a_0800;
 		mask |= 0x00000001;
 	}
-	nv_mask(pfb, 0x10f430, mask, data);
+	nvkm_mask(device, 0x10f430, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
 		data |= cfg->bios.rammap_11_0b_01f0;
 		mask |= 0x0000001f;
 	}
-	nv_mask(pfb, 0x10f400, mask, data);
+	nvkm_mask(device, 0x10f400, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
 		data |= cfg->bios.rammap_11_0b_0200 << 9;
 		mask |= 0x00000200;
 	}
-	nv_mask(pfb, 0x10f410, mask, data);
+	nvkm_mask(device, 0x10f410, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
 		data |= cfg->bios.rammap_11_0d << 16;
@@ -1124,7 +1124,7 @@
 		data |= cfg->bios.rammap_11_0f << 8;
 		mask |= 0x0000ff00;
 	}
-	nv_mask(pfb, 0x10f440, mask, data);
+	nvkm_mask(device, 0x10f440, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
 		data |= cfg->bios.rammap_11_0e << 8;
@@ -1138,15 +1138,15 @@
 		data |= cfg->bios.rammap_11_0b_0400 << 5;
 		mask |= 0x00000020;
 	}
-	nv_mask(pfb, 0x10f444, mask, data);
+	nvkm_mask(device, 0x10f444, mask, data);
 }
 
 static int
-gk104_ram_prog(struct nvkm_fb *pfb)
+gk104_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct gk104_ram *ram = gk104_ram(base);
 	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	struct nvkm_ram_data *next = ram->base.next;
 
 	if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
@@ -1154,20 +1154,19 @@
 		return (ram->base.next == &ram->base.xition);
 	}
 
-	gk104_ram_prog_0(pfb, 1000);
+	gk104_ram_prog_0(ram, 1000);
 	ram_exec(fuc, true);
-	gk104_ram_prog_0(pfb, next->freq);
+	gk104_ram_prog_0(ram, next->freq);
 
 	return (ram->base.next == &ram->base.xition);
 }
 
 static void
-gk104_ram_tidy(struct nvkm_fb *pfb)
+gk104_ram_tidy(struct nvkm_ram *base)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
-	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct gk104_ram *ram = gk104_ram(base);
 	ram->base.next = NULL;
-	ram_exec(fuc, false);
+	ram_exec(&ram->fuc, false);
 }
 
 struct gk104_ram_train {
@@ -1183,10 +1182,10 @@
 };
 
 static int
-gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
+gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg,
 		     struct gk104_ram_train *train)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_bios *bios = ram->fb->subdev.device->bios;
 	struct nvbios_M0205E M0205E;
 	struct nvbios_M0205S M0205S;
 	struct nvbios_M0209E M0209E;
@@ -1244,33 +1243,35 @@
 }
 
 static int
-gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
+gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train)
 {
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int i, j;
 
 	if ((train->mask & 0x03d3) != 0x03d3) {
-		nv_warn(pfb, "missing link training data\n");
+		nvkm_warn(subdev, "missing link training data\n");
 		return -EINVAL;
 	}
 
 	for (i = 0; i < 0x30; i++) {
 		for (j = 0; j < 8; j += 4) {
-			nv_wr32(pfb, 0x10f968 + j, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f920 + j, 0x00000000 |
+			nvkm_wr32(device, 0x10f968 + j, 0x00000000 | (i << 8));
+			nvkm_wr32(device, 0x10f920 + j, 0x00000000 |
 						   train->type08.data[i] << 4 |
 						   train->type06.data[i]);
-			nv_wr32(pfb, 0x10f918 + j, train->type00.data[i]);
-			nv_wr32(pfb, 0x10f920 + j, 0x00000100 |
+			nvkm_wr32(device, 0x10f918 + j, train->type00.data[i]);
+			nvkm_wr32(device, 0x10f920 + j, 0x00000100 |
 						   train->type09.data[i] << 4 |
 						   train->type07.data[i]);
-			nv_wr32(pfb, 0x10f918 + j, train->type01.data[i]);
+			nvkm_wr32(device, 0x10f918 + j, train->type01.data[i]);
 		}
 	}
 
 	for (j = 0; j < 8; j += 4) {
 		for (i = 0; i < 0x100; i++) {
-			nv_wr32(pfb, 0x10f968 + j, i);
-			nv_wr32(pfb, 0x10f900 + j, train->type04.data[i]);
+			nvkm_wr32(device, 0x10f968 + j, i);
+			nvkm_wr32(device, 0x10f900 + j, train->type04.data[i]);
 		}
 	}
 
@@ -1278,23 +1279,24 @@
 }
 
 static int
-gk104_ram_train_init(struct nvkm_fb *pfb)
+gk104_ram_train_init(struct nvkm_ram *ram)
 {
-	u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+	u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev);
 	struct gk104_ram_train *train;
-	int ret = -ENOMEM, i;
+	int ret, i;
 
-	if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) {
-		for (i = 0; i < 0x100; i++) {
-			ret = gk104_ram_train_type(pfb, i, ramcfg, train);
-			if (ret && ret != -ENOENT)
-				break;
-		}
+	if (!(train = kzalloc(sizeof(*train), GFP_KERNEL)))
+		return -ENOMEM;
+
+	for (i = 0; i < 0x100; i++) {
+		ret = gk104_ram_train_type(ram, i, ramcfg, train);
+		if (ret && ret != -ENOENT)
+			break;
 	}
 
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_GDDR5:
-		ret = gk104_ram_train_init_0(pfb, train);
+	switch (ram->type) {
+	case NVKM_RAM_TYPE_GDDR5:
+		ret = gk104_ram_train_init_0(ram, train);
 		break;
 	default:
 		ret = 0;
@@ -1306,18 +1308,14 @@
 }
 
 int
-gk104_ram_init(struct nvkm_object *object)
+gk104_ram_init(struct nvkm_ram *ram)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gk104_ram *ram   = (void *)object;
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	u8  ver, hdr, cnt, len, snr, ssz;
 	u32 data, save;
-	int ret, i;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
+	int i;
 
 	/* run a bunch of tables from rammap table.  there's actually
 	 * individual pointers for each rammap entry too, but, nvidia
@@ -1334,33 +1332,32 @@
 	if (!data || hdr < 0x15)
 		return -EINVAL;
 
-	cnt  = nv_ro08(bios, data + 0x14); /* guess at count */
-	data = nv_ro32(bios, data + 0x10); /* guess u32... */
-	save = nv_rd32(pfb, 0x10f65c) & 0x000000f0;
+	cnt  = nvbios_rd08(bios, data + 0x14); /* guess at count */
+	data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+	save = nvkm_rd32(device, 0x10f65c) & 0x000000f0;
 	for (i = 0; i < cnt; i++, data += 4) {
 		if (i != save >> 4) {
-			nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
+			nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4);
 			nvbios_exec(&(struct nvbios_init) {
-					.subdev = nv_subdev(pfb),
+					.subdev = subdev,
 					.bios = bios,
-					.offset = nv_ro32(bios, data),
+					.offset = nvbios_rd32(bios, data),
 					.execute = 1,
 				    });
 		}
 	}
-	nv_mask(pfb, 0x10f65c, 0x000000f0, save);
-	nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
-	nv_wr32(pfb, 0x10ecc0, 0xffffffff);
-	nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x10f65c, 0x000000f0, save);
+	nvkm_mask(device, 0x10f584, 0x11000000, 0x00000000);
+	nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+	nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010);
 
-	return gk104_ram_train_init(pfb);
+	return gk104_ram_train_init(ram);
 }
 
 static int
 gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
 {
-	struct nvkm_fb *pfb = (void *)nv_object(ram)->parent;
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_bios *bios = ram->base.fb->subdev.device->bios;
 	struct nvkm_ram_data *cfg;
 	struct nvbios_ramcfg *d = &ram->diff;
 	struct nvbios_ramcfg *p, *n;
@@ -1426,63 +1423,64 @@
 	return ret;
 }
 
-static void
-gk104_ram_dtor(struct nvkm_object *object)
+static void *
+gk104_ram_dtor(struct nvkm_ram *base)
 {
-	struct gk104_ram *ram = (void *)object;
+	struct gk104_ram *ram = gk104_ram(base);
 	struct nvkm_ram_data *cfg, *tmp;
 
 	list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
 		kfree(cfg);
 	}
 
-	nvkm_ram_destroy(&ram->base);
+	return ram;
 }
 
-static int
-gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gk104_ram_func = {
+	.dtor = gk104_ram_dtor,
+	.init = gk104_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
+	.calc = gk104_ram_calc,
+	.prog = gk104_ram_prog,
+	.tidy = gk104_ram_tidy,
+};
+
+int
+gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_gpio *gpio = device->gpio;
 	struct dcb_gpio_func func;
 	struct gk104_ram *ram;
 	int ret, i;
-	u8  ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+	u8  ramcfg = nvbios_ramcfg_index(subdev);
 	u32 tmp;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
 	if (ret)
 		return ret;
 
 	INIT_LIST_HEAD(&ram->cfg);
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
-	case NV_MEM_TYPE_GDDR5:
-		ram->base.calc = gk104_ram_calc;
-		ram->base.prog = gk104_ram_prog;
-		ram->base.tidy = gk104_ram_tidy;
-		break;
-	default:
-		nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
-		break;
-	}
-
 	/* calculate a mask of differently configured memory partitions,
 	 * because, of course reclocking wasn't complicated enough
 	 * already without having to treat some of them differently to
 	 * the others....
 	 */
-	ram->parts = nv_rd32(pfb, 0x022438);
-	ram->pmask = nv_rd32(pfb, 0x022554);
+	ram->parts = nvkm_rd32(device, 0x022438);
+	ram->pmask = nvkm_rd32(device, 0x022554);
 	ram->pnuts = 0;
 	for (i = 0, tmp = 0; i < ram->parts; i++) {
 		if (!(ram->pmask & (1 << i))) {
-			u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+			u32 cfg1 = nvkm_rd32(device, 0x110204 + (i * 0x1000));
 			if (tmp && tmp != cfg1) {
 				ram->pnuts |= (1 << i);
 				continue;
@@ -1505,7 +1503,7 @@
 	for (i = 0; !ret; i++) {
 		ret = gk104_ram_ctor_data(ram, ramcfg, i);
 		if (ret && ret != -ENOENT) {
-			nv_error(pfb, "failed to parse ramcfg data\n");
+			nvkm_error(subdev, "failed to parse ramcfg data\n");
 			return ret;
 		}
 	}
@@ -1513,25 +1511,25 @@
 	/* parse bios data for both pll's */
 	ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
 	if (ret) {
-		nv_error(pfb, "mclk refpll data not found\n");
+		nvkm_error(subdev, "mclk refpll data not found\n");
 		return ret;
 	}
 
 	ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
 	if (ret) {
-		nv_error(pfb, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
 	/* lookup memory voltage gpios */
-	ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
 		ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
 		ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
 	}
 
-	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
 		ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
@@ -1588,7 +1586,7 @@
 	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5:
+	case NVKM_RAM_TYPE_GDDR5:
 		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
 		ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
 		ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
@@ -1600,7 +1598,7 @@
 		ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
 		ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
 		break;
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
 		ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
 		break;
@@ -1626,14 +1624,3 @@
 	ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
 	return 0;
 }
-
-struct nvkm_oclass
-gk104_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ram_ctor,
-		.dtor = gk104_ram_dtor,
-		.init = gk104_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index a298b39f..43d807f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -21,35 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "gf100.h"
+#include "ram.h"
 
-struct gm107_ram {
-	struct nvkm_ram base;
+static const struct nvkm_ram_func
+gm107_ram_func = {
+	.init = gk104_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
 };
 
-static int
-gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct gm107_ram *ram;
-	int ret;
+	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	return 0;
+	return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
 }
-
-struct nvkm_oclass
-gm107_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gk104_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 2417640..5c08ae8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -22,11 +22,10 @@
  * Authors: Ben Skeggs
  * 	    Roy Spliet <rspliet@eclipso.eu>
  */
-
+#define gt215_ram(p) container_of((p), struct gt215_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
-#include "nv50.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/M0205.h>
@@ -154,14 +153,14 @@
  * Link training for (at least) DDR3
  */
 int
-gt215_link_train(struct nvkm_fb *pfb)
+gt215_link_train(struct gt215_ram *ram)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
-	struct nvkm_clk *clk = nvkm_clk(pfb);
 	struct gt215_ltrain *train = &ram->ltrain;
-	struct nvkm_device *device = nv_device(pfb);
 	struct gt215_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_clk *clk = device->clk;
 	u32 *result, r1700;
 	int ret, i;
 	struct nvbios_M0205T M0205T = { 0 };
@@ -182,27 +181,29 @@
 
 	/* Clock speeds for training and back */
 	nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
-	if (M0205T.freq == 0)
+	if (M0205T.freq == 0) {
+		kfree(result);
 		return -ENOENT;
+	}
 
-	clk_current = clk->read(clk, nv_clk_src_mem);
+	clk_current = nvkm_clk_read(clk, nv_clk_src_mem);
 
 	ret = gt215_clk_pre(clk, f);
 	if (ret)
 		goto out;
 
 	/* First: clock up/down */
-	ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
+	ret = ram->base.func->calc(&ram->base, (u32) M0205T.freq * 1000);
 	if (ret)
 		goto out;
 
 	/* Do this *after* calc, eliminates write in script */
-	nv_wr32(pfb, 0x111400, 0x00000000);
+	nvkm_wr32(device, 0x111400, 0x00000000);
 	/* XXX: Magic writes that improve train reliability? */
-	nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
-	nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
-	nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
-	nv_wr32(pfb, 0x100c04, 0x00000400);
+	nvkm_mask(device, 0x100674, 0x0000ffff, 0x00000000);
+	nvkm_mask(device, 0x1005e4, 0x0000ffff, 0x00000000);
+	nvkm_mask(device, 0x100b0c, 0x000000ff, 0x00000000);
+	nvkm_wr32(device, 0x100c04, 0x00000400);
 
 	/* Now the training script */
 	r1700 = ram_rd32(fuc, 0x001700);
@@ -235,22 +236,22 @@
 
 	ram_exec(fuc, true);
 
-	ram->base.calc(pfb, clk_current);
+	ram->base.func->calc(&ram->base, clk_current);
 	ram_exec(fuc, true);
 
 	/* Post-processing, avoids flicker */
-	nv_mask(pfb, 0x616308, 0x10, 0x10);
-	nv_mask(pfb, 0x616b08, 0x10, 0x10);
+	nvkm_mask(device, 0x616308, 0x10, 0x10);
+	nvkm_mask(device, 0x616b08, 0x10, 0x10);
 
 	gt215_clk_post(clk, f);
 
-	ram_train_result(pfb, result, 64);
+	ram_train_result(ram->base.fb, result, 64);
 	for (i = 0; i < 64; i++)
-		nv_debug(pfb, "Train: %08x", result[i]);
+		nvkm_debug(subdev, "Train: %08x", result[i]);
 	gt215_link_train_calc(result, train);
 
-	nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
-			train->r_1111e0, train->r_111400);
+	nvkm_debug(subdev, "Train: %08x %08x %08x", train->r_100720,
+		   train->r_1111e0, train->r_111400);
 
 	kfree(result);
 
@@ -265,11 +266,12 @@
 	train->state = NVA3_TRAIN_UNSUPPORTED;
 
 	gt215_clk_post(clk, f);
+	kfree(result);
 	return ret;
 }
 
 int
-gt215_link_train_init(struct nvkm_fb *pfb)
+gt215_link_train_init(struct gt215_ram *ram)
 {
 	static const u32 pattern[16] = {
 		0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
@@ -277,9 +279,9 @@
 		0x33333333, 0x55555555, 0x77777777, 0x66666666,
 		0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
 	};
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
 	struct gt215_ltrain *train = &ram->ltrain;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_mem *mem;
 	struct nvbios_M0205E M0205E;
 	u8 ver, hdr, cnt, len;
@@ -298,48 +300,47 @@
 
 	train->state = NVA3_TRAIN_ONCE;
 
-	ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
+	ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
+				  &ram->ltrain.mem);
 	if (ret)
 		return ret;
 
 	mem = ram->ltrain.mem;
 
-	nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
-	nv_wr32(pfb, 0x1005a8, 0x0000ffff);
-	nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
+	nvkm_wr32(device, 0x1005a8, 0x0000ffff);
+	nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
 
 	for (i = 0; i < 0x30; i++) {
-		nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
-		nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+		nvkm_wr32(device, 0x10f8c0, (i << 8) | i);
+		nvkm_wr32(device, 0x10f900, pattern[i % 16]);
 	}
 
 	for (i = 0; i < 0x30; i++) {
-		nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
-		nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+		nvkm_wr32(device, 0x10f8e0, (i << 8) | i);
+		nvkm_wr32(device, 0x10f920, pattern[i % 16]);
 	}
 
 	/* And upload the pattern */
-	r001700 = nv_rd32(pfb, 0x1700);
-	nv_wr32(pfb, 0x1700, mem->offset >> 16);
+	r001700 = nvkm_rd32(device, 0x1700);
+	nvkm_wr32(device, 0x1700, mem->offset >> 16);
 	for (i = 0; i < 16; i++)
-		nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
+		nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
 	for (i = 0; i < 16; i++)
-		nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
-	nv_wr32(pfb, 0x1700, r001700);
+		nvkm_wr32(device, 0x700100 + (i << 2), pattern[i]);
+	nvkm_wr32(device, 0x1700, r001700);
 
-	train->r_100720 = nv_rd32(pfb, 0x100720);
-	train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
-	train->r_111400 = nv_rd32(pfb, 0x111400);
+	train->r_100720 = nvkm_rd32(device, 0x100720);
+	train->r_1111e0 = nvkm_rd32(device, 0x1111e0);
+	train->r_111400 = nvkm_rd32(device, 0x111400);
 	return 0;
 }
 
 void
-gt215_link_train_fini(struct nvkm_fb *pfb)
+gt215_link_train_fini(struct gt215_ram *ram)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
-
 	if (ram->ltrain.mem)
-		pfb->ram->put(pfb, &ram->ltrain.mem);
+		ram->base.func->put(&ram->base, &ram->ltrain.mem);
 }
 
 /*
@@ -347,24 +348,25 @@
  */
 #define T(t) cfg->timing_10_##t
 static int
-gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
+gt215_ram_timing_calc(struct gt215_ram *ram, u32 *timing)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
 	struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int tUNK_base, tUNK_40_0, prevCL;
 	u32 cur2, cur3, cur7, cur8;
 
-	cur2 = nv_rd32(pfb, 0x100228);
-	cur3 = nv_rd32(pfb, 0x10022c);
-	cur7 = nv_rd32(pfb, 0x10023c);
-	cur8 = nv_rd32(pfb, 0x100240);
+	cur2 = nvkm_rd32(device, 0x100228);
+	cur3 = nvkm_rd32(device, 0x10022c);
+	cur7 = nvkm_rd32(device, 0x10023c);
+	cur8 = nvkm_rd32(device, 0x100240);
 
 
 	switch ((!T(CWL)) * ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		T(CWL) = T(CL) - 1;
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
 		break;
 	}
@@ -402,8 +404,8 @@
 	timing[8] = cur8 & 0xffffff00;
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_GDDR3:
 		tUNK_40_0 = prevCL - (cur8 & 0xff);
 		if (tUNK_40_0 > 0)
 			timing[8] |= T(CL);
@@ -412,11 +414,11 @@
 		break;
 	}
 
-	nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
-			timing[0], timing[1], timing[2], timing[3]);
-	nv_debug(pfb, "  230: %08x %08x %08x %08x\n",
-			timing[4], timing[5], timing[6], timing[7]);
-	nv_debug(pfb, "  240: %08x\n", timing[8]);
+	nvkm_debug(subdev, "Entry: 220: %08x %08x %08x %08x\n",
+		   timing[0], timing[1], timing[2], timing[3]);
+	nvkm_debug(subdev, "  230: %08x %08x %08x %08x\n",
+		   timing[4], timing[5], timing[6], timing[7]);
+	nvkm_debug(subdev, "  240: %08x\n", timing[8]);
 	return 0;
 }
 #undef T
@@ -466,13 +468,13 @@
 static void
 gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.pfb);
+	struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio;
 	struct dcb_gpio_func func;
 	u32 reg, sh, gpio_val;
 	int ret;
 
-	if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
-		ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	if (nvkm_gpio_get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
+		ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 		if (ret)
 			return;
 
@@ -487,12 +489,14 @@
 }
 
 static int
-gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gt215_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
+	struct gt215_ram *ram = gt215_ram(base);
 	struct gt215_ramfuc *fuc = &ram->fuc;
 	struct gt215_ltrain *train = &ram->ltrain;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct gt215_clk_info mclk;
 	struct nvkm_ram_data *next;
 	u8  ver, hdr, cnt, len, strap;
@@ -508,28 +512,27 @@
 	ram->base.next = next;
 
 	if (ram->ltrain.state == NVA3_TRAIN_ONCE)
-		gt215_link_train(pfb);
+		gt215_link_train(ram);
 
 	/* lookup memory config data relevant to the target frequency */
-	i = 0;
 	data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
 			       &next->bios);
 	if (!data || ver != 0x10 || hdr < 0x05) {
-		nv_error(pfb, "invalid/missing rammap entry\n");
+		nvkm_error(subdev, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
 	data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
 			       &ver, &hdr, &next->bios);
 	if (!data || ver != 0x10 || hdr < 0x09) {
-		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		nvkm_error(subdev, "invalid/missing ramcfg entry\n");
 		return -EINVAL;
 	}
 
@@ -539,20 +542,20 @@
 				       &ver, &hdr, &cnt, &len,
 				       &next->bios);
 		if (!data || ver != 0x10 || hdr < 0x17) {
-			nv_error(pfb, "invalid/missing timing entry\n");
+			nvkm_error(subdev, "invalid/missing timing entry\n");
 			return -EINVAL;
 		}
 	}
 
-	ret = gt215_pll_info(nvkm_clk(pfb), 0x12, 0x4000, freq, &mclk);
+	ret = gt215_pll_info(device->clk, 0x12, 0x4000, freq, &mclk);
 	if (ret < 0) {
-		nv_error(pfb, "failed mclk calculation\n");
+		nvkm_error(subdev, "failed mclk calculation\n");
 		return ret;
 	}
 
-	gt215_ram_timing_calc(pfb, timing);
+	gt215_ram_timing_calc(ram, timing);
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -562,13 +565,13 @@
 	ram->base.mr[2] = ram_rd32(fuc, mr[2]);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		ret = nvkm_sddr2_calc(&ram->base);
 		break;
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ret = nvkm_sddr3_calc(&ram->base);
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		ret = nvkm_gddr3_calc(&ram->base);
 		break;
 	default:
@@ -579,7 +582,7 @@
 	if (ret)
 		return ret;
 
-	/* XXX: where the fuck does 750MHz come from? */
+	/* XXX: 750MHz seems rather arbitrary */
 	if (freq <= 750000) {
 		r004018 = 0x10000000;
 		r100760 = 0x22222222;
@@ -590,7 +593,7 @@
 		r100da0 = 0x00000000;
 	}
 
-	if (!next->bios.ramcfg_10_DLLoff)
+	if (!next->bios.ramcfg_DLLoff)
 		r004018 |= 0x00004000;
 
 	/* pll2pll requires to switch to a safe clock first */
@@ -623,18 +626,18 @@
 	ram_nsec(fuc, 2000);
 
 	if (!next->bios.ramcfg_10_02_10) {
-		if (ram->base.type == NV_MEM_TYPE_GDDR3)
+		if (ram->base.type == NVKM_RAM_TYPE_GDDR3)
 			ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
 		else
 			ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
 	}
 
 	/* If we're disabling the DLL, do it now */
-	switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
+	switch (next->bios.ramcfg_DLLoff * ram->base.type) {
+	case NVKM_RAM_TYPE_DDR3:
 		nvkm_sddr3_dll_disable(fuc, ram->base.mr);
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		nvkm_gddr3_dll_disable(fuc, ram->base.mr);
 		break;
 	}
@@ -650,7 +653,7 @@
 	ram_wr32(fuc, 0x1002dc, 0x00000001);
 	ram_nsec(fuc, 2000);
 
-	if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
+	if (device->chipset == 0xa3 && freq <= 500000)
 		ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
 
 	/* Fiddle with clocks */
@@ -708,7 +711,7 @@
 		ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
 	}
 
-	if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
+	if (device->chipset == 0xa3 && freq > 500000) {
 		ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
 	}
 
@@ -752,11 +755,11 @@
 
 	if (next->bios.ramcfg_10_02_04) {
 		switch (ram->base.type) {
-		case NV_MEM_TYPE_DDR3:
-			if (nv_device(pfb)->chipset != 0xa8)
+		case NVKM_RAM_TYPE_DDR3:
+			if (device->chipset != 0xa8)
 				r111100 |= 0x00000004;
 			/* no break */
-		case NV_MEM_TYPE_DDR2:
+		case NVKM_RAM_TYPE_DDR2:
 			r111100 |= 0x08000000;
 			break;
 		default:
@@ -764,12 +767,12 @@
 		}
 	} else {
 		switch (ram->base.type) {
-		case NV_MEM_TYPE_DDR2:
+		case NVKM_RAM_TYPE_DDR2:
 			r111100 |= 0x1a800000;
 			unk714  |= 0x00000010;
 			break;
-		case NV_MEM_TYPE_DDR3:
-			if (nv_device(pfb)->chipset == 0xa8) {
+		case NVKM_RAM_TYPE_DDR3:
+			if (device->chipset == 0xa8) {
 				r111100 |=  0x08000000;
 			} else {
 				r111100 &= ~0x00000004;
@@ -777,7 +780,7 @@
 			}
 			unk714  |= 0x00000010;
 			break;
-		case NV_MEM_TYPE_GDDR3:
+		case NVKM_RAM_TYPE_GDDR3:
 			r111100 |= 0x30000000;
 			unk714  |= 0x00000020;
 			break;
@@ -810,16 +813,16 @@
 		gt215_ram_fbvref(fuc, 1);
 
 	/* Reset DLL */
-	if (!next->bios.ramcfg_10_DLLoff)
+	if (!next->bios.ramcfg_DLLoff)
 		nvkm_sddr2_dll_reset(fuc);
 
-	if (ram->base.type == NV_MEM_TYPE_GDDR3) {
+	if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
 		ram_nsec(fuc, 31000);
 	} else {
 		ram_nsec(fuc, 14000);
 	}
 
-	if (ram->base.type == NV_MEM_TYPE_DDR3) {
+	if (ram->base.type == NVKM_RAM_TYPE_DDR3) {
 		ram_wr32(fuc, 0x100264, 0x1);
 		ram_nsec(fuc, 2000);
 	}
@@ -855,24 +858,24 @@
 }
 
 static int
-gt215_ram_prog(struct nvkm_fb *pfb)
+gt215_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
+	struct gt215_ram *ram = gt215_ram(base);
 	struct gt215_ramfuc *fuc = &ram->fuc;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
 
 	if (exec) {
-		nv_mask(pfb, 0x001534, 0x2, 0x2);
+		nvkm_mask(device, 0x001534, 0x2, 0x2);
 
 		ram_exec(fuc, true);
 
 		/* Post-processing, avoids flicker */
-		nv_mask(pfb, 0x002504, 0x1, 0x0);
-		nv_mask(pfb, 0x001534, 0x2, 0x0);
+		nvkm_mask(device, 0x002504, 0x1, 0x0);
+		nvkm_mask(device, 0x001534, 0x2, 0x0);
 
-		nv_mask(pfb, 0x616308, 0x10, 0x10);
-		nv_mask(pfb, 0x616b08, 0x10, 0x10);
+		nvkm_mask(device, 0x616308, 0x10, 0x10);
+		nvkm_mask(device, 0x616b08, 0x10, 0x10);
 	} else {
 		ram_exec(fuc, false);
 	}
@@ -880,69 +883,56 @@
 }
 
 static void
-gt215_ram_tidy(struct nvkm_fb *pfb)
+gt215_ram_tidy(struct nvkm_ram *base)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
-	struct gt215_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, false);
+	struct gt215_ram *ram = gt215_ram(base);
+	ram_exec(&ram->fuc, false);
 }
 
 static int
-gt215_ram_init(struct nvkm_object *object)
+gt215_ram_init(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gt215_ram   *ram = (void *)object;
-	int ret;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
-
-	gt215_link_train_init(pfb);
+	struct gt215_ram *ram = gt215_ram(base);
+	gt215_link_train_init(ram);
 	return 0;
 }
 
-static int
-gt215_ram_fini(struct nvkm_object *object, bool suspend)
+static void *
+gt215_ram_dtor(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-
-	if (!suspend)
-		gt215_link_train_fini(pfb);
-
-	return 0;
+	struct gt215_ram *ram = gt215_ram(base);
+	gt215_link_train_fini(ram);
+	return ram;
 }
 
-static int
-gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 datasize,
-	       struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gt215_ram_func = {
+	.dtor = gt215_ram_dtor,
+	.init = gt215_ram_init,
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
+	.calc = gt215_ram_calc,
+	.prog = gt215_ram_prog,
+	.tidy = gt215_ram_tidy,
+};
+
+int
+gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+	struct nvkm_gpio *gpio = fb->subdev.device->gpio;
 	struct dcb_gpio_func func;
 	struct gt215_ram *ram;
-	int ret, i;
 	u32 reg, shift;
+	int ret, i;
 
-	ret = nv50_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nv50_ram_ctor(&gt215_ram_func, fb, &ram->base);
 	if (ret)
 		return ret;
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_DDR3:
-	case NV_MEM_TYPE_GDDR3:
-		ram->base.calc = gt215_ram_calc;
-		ram->base.prog = gt215_ram_prog;
-		ram->base.tidy = gt215_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
 	ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
 	ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
@@ -992,7 +982,7 @@
 		ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
 	}
 
-	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		nv50_gpio_location(func.line, &reg, &shift);
 		ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
@@ -1000,13 +990,3 @@
 
 	return 0;
 }
-
-struct nvkm_oclass
-gt215_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gt215_ram_init,
-		.fini = gt215_ram_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index abc18e8..0a0e44b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -21,81 +21,67 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
+#include "ram.h"
 
-struct mcp77_ram_priv {
+struct mcp77_ram {
 	struct nvkm_ram base;
 	u64 poller_base;
 };
 
 static int
-mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 datasize,
-	       struct nvkm_object **pobject)
+mcp77_ram_init(struct nvkm_ram *base)
 {
-	u32 rsvd_head = ( 256 * 1024); /* vga memory */
-	u32 rsvd_tail = (1024 * 1024); /* vbios etc */
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct mcp77_ram_priv *priv;
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.type   = NV_MEM_TYPE_STOLEN;
-	priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
-	priv->base.size   = (u64)nv_rd32(pfb, 0x100e14) << 12;
-
-	rsvd_tail += 0x1000;
-	priv->poller_base = priv->base.size - rsvd_tail;
-
-	ret = nvkm_mm_init(&pfb->vram, rsvd_head >> 12,
-			   (priv->base.size  - (rsvd_head + rsvd_tail)) >> 12,
-			   1);
-	if (ret)
-		return ret;
-
-	priv->base.get = nv50_ram_get;
-	priv->base.put = nv50_ram_put;
-	return 0;
-}
-
-static int
-mcp77_ram_init(struct nvkm_object *object)
-{
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct mcp77_ram_priv *priv = (void *)object;
-	int ret;
-	u64 dniso, hostnb, flush;
-
-	ret = nvkm_ram_init(&priv->base);
-	if (ret)
-		return ret;
-
-	dniso  = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
-	hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
-	flush  = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+	struct mcp77_ram *ram = mcp77_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	u32 dniso  = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
+	u32 hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
+	u32 flush  = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
 
 	/* Enable NISO poller for various clients and set their associated
 	 * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
 	 */
-	nv_wr32(pfb, 0x100c18, dniso);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
-	nv_wr32(pfb, 0x100c1c, hostnb);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
-	nv_wr32(pfb, 0x100c24, flush);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+	nvkm_wr32(device, 0x100c18, dniso);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
+	nvkm_wr32(device, 0x100c1c, hostnb);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00000002);
+	nvkm_wr32(device, 0x100c24, flush);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00010000);
 	return 0;
 }
 
-struct nvkm_oclass
-mcp77_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = mcp77_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = mcp77_ram_init,
-		.fini = _nvkm_ram_fini,
-	},
+static const struct nvkm_ram_func
+mcp77_ram_func = {
+	.init = mcp77_ram_init,
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
 };
+
+int
+mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	u32 rsvd_tail = (1024 * 1024) + 0x1000; /* vbios etc + poller mem */
+	u64 base = (u64)nvkm_rd32(device, 0x100e10) << 12;
+	u64 size = (u64)nvkm_rd32(device, 0x100e14) << 12;
+	struct mcp77_ram *ram;
+	int ret;
+
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+			    size, 0, &ram->base);
+	if (ret)
+		return ret;
+
+	ram->poller_base = size - rsvd_tail;
+	ram->base.stolen = base;
+	nvkm_mm_fini(&ram->base.vram);
+
+	return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+			    (size - rsvd_head - rsvd_tail) >>
+			    NVKM_RAM_MM_SHIFT, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 855de16..6f053a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -21,59 +21,45 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 #include "regsnv04.h"
 
-static int
-nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
-	int ret;
+const struct nvkm_ram_func
+nv04_ram_func = {
+};
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+int
+nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 boot0 = nvkm_rd32(device, NV04_PFB_BOOT_0);
+	u64 size;
+	enum nvkm_ram_type type;
 
 	if (boot0 & 0x00000100) {
-		ram->size  = ((boot0 >> 12) & 0xf) * 2 + 2;
-		ram->size *= 1024 * 1024;
+		size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+		size *= 1024 * 1024;
 	} else {
 		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
-			ram->size = 32 * 1024 * 1024;
+			size = 32 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
-			ram->size = 16 * 1024 * 1024;
+			size = 16 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
-			ram->size = 8 * 1024 * 1024;
+			size = 8 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
-			ram->size = 4 * 1024 * 1024;
+			size = 4 * 1024 * 1024;
 			break;
 		}
 	}
 
 	if ((boot0 & 0x00000038) <= 0x10)
-		ram->type = NV_MEM_TYPE_SGRAM;
+		type = NVKM_RAM_TYPE_SGRAM;
 	else
-		ram->type = NV_MEM_TYPE_SDRAM;
+		type = NVKM_RAM_TYPE_SDRAM;
 
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv04_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index 3b8a1ed..dfd155c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -21,39 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 cfg0 = nv_rd32(pfb, 0x100200);
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32 cfg0 = nvkm_rd32(device, 0x100200);
+	enum nvkm_ram_type type;
 
 	if (cfg0 & 0x00000001)
-		ram->type = NV_MEM_TYPE_DDR1;
+		type = NVKM_RAM_TYPE_DDR1;
 	else
-		ram->type = NV_MEM_TYPE_SDRAM;
+		type = NVKM_RAM_TYPE_SDRAM;
 
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv10_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index fbae05d..3c6a871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -21,33 +21,21 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-#include <core/device.h>
-
-static int
-nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
 	struct pci_dev *bridge;
 	u32 mem, mib;
-	int ret;
 
 	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
 	if (!bridge) {
-		nv_fatal(pfb, "no bridge device\n");
+		nvkm_error(&fb->subdev, "no bridge device\n");
 		return -ENODEV;
 	}
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	if (nv_device(pfb)->chipset == 0x1a) {
+	if (fb->subdev.device->chipset == 0x1a) {
 		pci_read_config_dword(bridge, 0x7c, &mem);
 		mib = ((mem >> 6) & 31) + 1;
 	} else {
@@ -55,18 +43,6 @@
 		mib = ((mem >> 4) & 127) + 1;
 	}
 
-	ram->type = NV_MEM_TYPE_STOLEN;
-	ram->size = mib * 1024 * 1024;
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+			     mib * 1024 * 1024, 0, pram);
 }
-
-struct nvkm_oclass
-nv1a_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv1a_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index d9e7187..747e47c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -21,42 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+	struct nvkm_device *device = fb->subdev.device;
+	u32 pbus1218 =  nvkm_rd32(device, 0x001218);
+	u32     size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
+	u32     tags =  nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+	case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
+	}
+
+	ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->type = NV_MEM_TYPE_GDDR2; break;
-	}
-	ram->size  = (nv_rd32(pfb, 0x10020c) & 0xff000000);
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  = nv_rd32(pfb, 0x100320);
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv20_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 3d31fa4..56f8cff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -21,9 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
 #include <subdev/bios/init.h>
@@ -31,23 +30,23 @@
 #include <subdev/clk/pll.h>
 #include <subdev/timer.h>
 
-int
-nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
+static int
+nv40_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nv40_ram *ram = nv40_ram(base);
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll pll;
 	int N1, M1, N2, M2;
 	int log2P, ret;
 
 	ret = nvbios_pll_parse(bios, 0x04, &pll);
 	if (ret) {
-		nv_error(pfb, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
-	ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
-			    &N1, &M1, &N2, &M2, &log2P);
+	ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P);
 	if (ret < 0)
 		return ret;
 
@@ -64,11 +63,13 @@
 	return 0;
 }
 
-int
-nv40_ram_prog(struct nvkm_fb *pfb)
+static int
+nv40_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nv40_ram *ram = nv40_ram(base);
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct bit_entry M;
 	u32 crtc_mask = 0;
 	u8  sr1[2];
@@ -76,12 +77,12 @@
 
 	/* determine which CRTCs are active, fetch VGA_SR1 for each */
 	for (i = 0; i < 2; i++) {
-		u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
+		u32 vbl = nvkm_rd32(device, 0x600808 + (i * 0x2000));
 		u32 cnt = 0;
 		do {
-			if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
-				nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-				sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
+			if (vbl != nvkm_rd32(device, 0x600808 + (i * 0x2000))) {
+				nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+				sr1[i] = nvkm_rd08(device, 0x0c03c5 + (i * 0x2000));
 				if (!(sr1[i] & 0x20))
 					crtc_mask |= (1 << i);
 				break;
@@ -94,55 +95,66 @@
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if (!(tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if ( (tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
 	}
 
 	/* prepare ram for reclocking */
-	nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
-	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
-	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
-	nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
-	nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
+	nvkm_wr32(device, 0x1002d4, 0x00000001); /* precharge */
+	nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nvkm_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+	nvkm_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
 
 	/* change the PLL of each memory partition */
-	nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
-	switch (nv_device(pfb)->chipset) {
+	nvkm_mask(device, 0x00c040, 0x0000c000, 0x00000000);
+	switch (device->chipset) {
 	case 0x40:
 	case 0x45:
 	case 0x41:
 	case 0x42:
 	case 0x47:
-		nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
-		nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x004048, ram->coef);
-		nv_wr32(pfb, 0x004030, ram->coef);
+		nvkm_mask(device, 0x004044, 0xc0771100, ram->ctrl);
+		nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x004048, ram->coef);
+		nvkm_wr32(device, 0x004030, ram->coef);
 	case 0x43:
 	case 0x49:
 	case 0x4b:
-		nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x00403c, ram->coef);
+		nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x00403c, ram->coef);
 	default:
-		nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x004024, ram->coef);
+		nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x004024, ram->coef);
 		break;
 	}
 	udelay(100);
-	nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
+	nvkm_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
 
 	/* re-enable normal operation of memory controller */
-	nv_wr32(pfb, 0x1002dc, 0x00000000);
-	nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
+	nvkm_wr32(device, 0x1002dc, 0x00000000);
+	nvkm_mask(device, 0x100210, 0x80000000, 0x80000000);
 	udelay(100);
 
 	/* execute memory reset script from vbios */
 	if (!bit_entry(bios, 'M', &M)) {
 		struct nvbios_init init = {
-			.subdev = nv_subdev(pfb),
+			.subdev = subdev,
 			.bios = bios,
-			.offset = nv_ro16(bios, M.offset + 0x00),
+			.offset = nvbios_rd16(bios, M.offset + 0x00),
 			.execute = 1,
 		};
 
@@ -155,58 +167,64 @@
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if ( (tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
 	}
 
 	return 0;
 }
 
-void
-nv40_ram_tidy(struct nvkm_fb *pfb)
+static void
+nv40_ram_tidy(struct nvkm_ram *base)
 {
 }
 
-static int
-nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+nv40_ram_func = {
+	.calc = nv40_ram_calc,
+	.prog = nv40_ram_prog,
+	.tidy = nv40_ram_tidy,
+};
+
+int
+nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
+	      u32 tags, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
 	struct nv40_ram *ram;
-	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+	return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
+}
+
+int
+nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 pbus1218 = nvkm_rd32(device, 0x001218);
+	u32     size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32     tags = nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+	case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
+	}
+
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
-	}
-
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-
-struct nvkm_oclass
-nv40_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
new file mode 100644
index 0000000..8a05245
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -0,0 +1,14 @@
+#ifndef __NV40_FB_RAM_H__
+#define __NV40_FB_RAM_H__
+#define nv40_ram(p) container_of((p), struct nv40_ram, base)
+#include "ram.h"
+
+struct nv40_ram {
+	struct nvkm_ram base;
+	u32 ctrl;
+	u32 coef;
+};
+
+int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
+		  struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 33c612b..114828b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -21,46 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb474 = nv_rd32(pfb, 0x100474);
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32  tags = nvkm_rd32(device, 0x100320);
+	u32 fb474 = nvkm_rd32(device, 0x100474);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (fb474 & 0x00000004)
+		type = NVKM_RAM_TYPE_GDDR3;
+	if (fb474 & 0x00000002)
+		type = NVKM_RAM_TYPE_DDR2;
+	if (fb474 & 0x00000001)
+		type = NVKM_RAM_TYPE_DDR1;
+
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	if (pfb474 & 0x00000004)
-		ram->base.type = NV_MEM_TYPE_GDDR3;
-	if (pfb474 & 0x00000002)
-		ram->base.type = NV_MEM_TYPE_DDR2;
-	if (pfb474 & 0x00000001)
-		ram->base.type = NV_MEM_TYPE_DDR1;
-
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv41_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv41_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index f575a72..bc56fbf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -21,44 +21,22 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb474 = nv_rd32(pfb, 0x100474);
-	int ret;
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32 fb474 = nvkm_rd32(device, 0x100474);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+	if (fb474 & 0x00000004)
+		type = NVKM_RAM_TYPE_GDDR3;
+	if (fb474 & 0x00000002)
+		type = NVKM_RAM_TYPE_DDR2;
+	if (fb474 & 0x00000001)
+		type = NVKM_RAM_TYPE_DDR1;
 
-	if (pfb474 & 0x00000004)
-		ram->base.type = NV_MEM_TYPE_GDDR3;
-	if (pfb474 & 0x00000002)
-		ram->base.type = NV_MEM_TYPE_DDR2;
-	if (pfb474 & 0x00000001)
-		ram->base.type = NV_MEM_TYPE_DDR1;
-
-	ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
-	return 0;
+	return nv40_ram_new_(fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv44_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index 51b44cd..c01f4b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -21,46 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb914 = nv_rd32(pfb, 0x100914);
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32  tags = nvkm_rd32(device, 0x100320);
+	u32 fb914 = nvkm_rd32(device, 0x100914);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	switch (pfb914 & 0x00000003) {
-	case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
-	case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
+	switch (fb914 & 0x00000003) {
+	case 0x00000000: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000001: type = NVKM_RAM_TYPE_DDR2 ; break;
+	case 0x00000002: type = NVKM_RAM_TYPE_GDDR3; break;
 	case 0x00000003: break;
 	}
 
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
+	if (ret)
+		return ret;
+
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv49_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv49_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index f3ed1c6..fa3c2e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -21,34 +21,13 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->type = NV_MEM_TYPE_STOLEN;
-	return 0;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
+			     size, 0, pram);
 }
-
-struct nvkm_oclass
-nv4e_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv4e_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index d2c81dd..9197e0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -21,14 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#define nv50_ram(p) container_of((p), struct nv50_ram, base)
+#include "ram.h"
 #include "ramseq.h"
+#include "nv50.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/perf.h>
 #include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
 #include <subdev/bios/timing.h>
 #include <subdev/clk/pll.h>
 
@@ -38,11 +40,20 @@
 	struct hwsq_reg r_0x004008;
 	struct hwsq_reg r_0x00400c;
 	struct hwsq_reg r_0x00c040;
+	struct hwsq_reg r_0x100200;
 	struct hwsq_reg r_0x100210;
+	struct hwsq_reg r_0x10021c;
 	struct hwsq_reg r_0x1002d0;
 	struct hwsq_reg r_0x1002d4;
 	struct hwsq_reg r_0x1002dc;
-	struct hwsq_reg r_0x100da0[8];
+	struct hwsq_reg r_0x10053c;
+	struct hwsq_reg r_0x1005a0;
+	struct hwsq_reg r_0x1005a4;
+	struct hwsq_reg r_0x100710;
+	struct hwsq_reg r_0x100714;
+	struct hwsq_reg r_0x100718;
+	struct hwsq_reg r_0x10071c;
+	struct hwsq_reg r_0x100da0;
 	struct hwsq_reg r_0x100e20;
 	struct hwsq_reg r_0x100e24;
 	struct hwsq_reg r_0x611200;
@@ -55,64 +66,181 @@
 	struct nv50_ramseq hwsq;
 };
 
-#define QFX5800NVA0 1
+#define T(t) cfg->timing_10_##t
+static int
+nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
+{
+	struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 cur2, cur4, cur7, cur8;
+	u8 unkt3b;
+
+	cur2 = nvkm_rd32(device, 0x100228);
+	cur4 = nvkm_rd32(device, 0x100230);
+	cur7 = nvkm_rd32(device, 0x10023c);
+	cur8 = nvkm_rd32(device, 0x100240);
+
+	switch ((!T(CWL)) * ram->base.type) {
+	case NVKM_RAM_TYPE_DDR2:
+		T(CWL) = T(CL) - 1;
+		break;
+	case NVKM_RAM_TYPE_GDDR3:
+		T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
+		break;
+	}
+
+	/* XXX: N=1 is not proper statistics */
+	if (device->chipset == 0xa0) {
+		unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
+		timing[6] = (0x2d + T(CL) - T(CWL) +
+				ram->base.next->bios.rammap_00_16_40) << 16 |
+			    T(CWL) << 8 |
+			    (0x2f + T(CL) - T(CWL));
+	} else {
+		unkt3b = 0x16;
+		timing[6] = (0x2b + T(CL) - T(CWL)) << 16 |
+			    max_t(s8, T(CWL) - 2, 1) << 8 |
+			    (0x2e + T(CL) - T(CWL));
+	}
+
+	timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
+	timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
+		    max_t(u8, T(18), 1) << 16 |
+		    (T(WTR) + 1 + T(CWL)) << 8 |
+		    (3 + T(CL) - T(CWL));
+	timing[2] = (T(CWL) - 1) << 24 |
+		    (T(RRD) << 16) |
+		    (T(RCDWR) << 8) |
+		    T(RCDRD);
+	timing[3] = (unkt3b - 2 + T(CL)) << 24 |
+		    unkt3b << 16 |
+		    (T(CL) - 1) << 8 |
+		    (T(CL) - 1);
+	timing[4] = (cur4 & 0xffff0000) |
+		    T(13) << 8 |
+		    T(13);
+	timing[5] = T(RFC) << 24 |
+		    max_t(u8, T(RCDRD), T(RCDWR)) << 16 |
+		    T(RP);
+	/* Timing 6 is already done above */
+	timing[7] = (cur7 & 0xff00ffff) | (T(CL) - 1) << 16;
+	timing[8] = (cur8 & 0xffffff00);
+
+	/* XXX: P.version == 1 only has DDR2 and GDDR3? */
+	if (ram->base.type == NVKM_RAM_TYPE_DDR2) {
+		timing[5] |= (T(CL) + 3) << 8;
+		timing[8] |= (T(CL) - 4);
+	} else
+	if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
+		timing[5] |= (T(CL) + 2) << 8;
+		timing[8] |= (T(CL) - 2);
+	}
+
+	nvkm_debug(subdev, " 220: %08x %08x %08x %08x\n",
+		   timing[0], timing[1], timing[2], timing[3]);
+	nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
+		   timing[4], timing[5], timing[6], timing[7]);
+	nvkm_debug(subdev, " 240: %08x\n", timing[8]);
+	return 0;
+}
+#undef T
+
+static void
+nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
+{
+	ram_mask(hwsq, mr[0], 0x100, 0x100);
+	ram_mask(hwsq, mr[0], 0x100, 0x000);
+	ram_nsec(hwsq, 24000);
+}
 
 static int
-nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
+nv50_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ram *ram = nv50_ram(base);
 	struct nv50_ramseq *hwsq = &ram->hwsq;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_perfE perfE;
 	struct nvbios_pll mpll;
-	struct {
-		u32 data;
-		u8  size;
-	} ramcfg, timing;
-	u8  ver, hdr, cnt, len, strap;
+	struct nvkm_ram_data *next;
+	u8  ver, hdr, cnt, len, strap, size;
+	u32 data;
+	u32 r100da0, r004008, unk710, unk714, unk718, unk71c;
 	int N1, M1, N2, M2, P;
 	int ret, i;
+	u32 timing[9];
+
+	next = &ram->base.target;
+	next->freq = freq;
+	ram->base.next = next;
 
 	/* lookup closest matching performance table entry for frequency */
 	i = 0;
 	do {
-		ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
-					    &ramcfg.size, &perfE);
-		if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
-		    (ramcfg.size < 2)) {
-			nv_error(pfb, "invalid/missing perftab entry\n");
+		data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
+				     &size, &perfE);
+		if (!data || (ver < 0x25 || ver >= 0x40) ||
+		    (size < 2)) {
+			nvkm_error(subdev, "invalid/missing perftab entry\n");
 			return -EINVAL;
 		}
 	} while (perfE.memory < freq);
 
+	nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
+
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
-	ramcfg.data += hdr + (strap * ramcfg.size);
-
-	/* lookup memory timings, if bios says they're present */
-	strap = nv_ro08(bios, ramcfg.data + 0x01);
-	if (strap != 0xff) {
-		timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
-					      &cnt, &len);
-		if (!timing.data || ver != 0x10 || hdr < 0x12) {
-			nv_error(pfb, "invalid/missing timing entry "
-				 "%02x %04x %02x %02x\n",
-				 strap, timing.data, ver, hdr);
-			return -EINVAL;
-		}
-	} else {
-		timing.data = 0;
+	data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap,
+			&next->bios);
+	if (!data) {
+		nvkm_error(subdev, "invalid/missing rammap entry ");
+		return -EINVAL;
 	}
 
-	ret = ram_init(hwsq, nv_subdev(pfb));
+	/* lookup memory timings, if bios says they're present */
+	if (next->bios.ramcfg_timing != 0xff) {
+		data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
+					&ver, &hdr, &cnt, &len, &next->bios);
+		if (!data || ver != 0x10 || hdr < 0x12) {
+			nvkm_error(subdev, "invalid/missing timing entry "
+				 "%02x %04x %02x %02x\n",
+				 strap, data, ver, hdr);
+			return -EINVAL;
+		}
+	}
+
+	nv50_ram_timing_calc(ram, timing);
+
+	ret = ram_init(hwsq, subdev);
 	if (ret)
 		return ret;
 
+	/* Determine ram-specific MR values */
+	ram->base.mr[0] = ram_rd32(hwsq, mr[0]);
+	ram->base.mr[1] = ram_rd32(hwsq, mr[1]);
+	ram->base.mr[2] = ram_rd32(hwsq, mr[2]);
+
+	switch (ram->base.type) {
+	case NVKM_RAM_TYPE_GDDR3:
+		ret = nvkm_gddr3_calc(&ram->base);
+		break;
+	default:
+		ret = -ENOSYS;
+		break;
+	}
+
+	if (ret)
+		return ret;
+
+	/* Always disable this bit during reclock */
+	ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
+
 	ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
 	ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
 	ram_wr32(hwsq, 0x611200, 0x00003300);
@@ -120,6 +248,7 @@
 	ram_nsec(hwsq, 8000);
 	ram_setf(hwsq, 0x10, 0x00); /* disable fb */
 	ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+	ram_nsec(hwsq, 2000);
 
 	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
 	ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
@@ -129,97 +258,149 @@
 
 	ret = nvbios_pll_parse(bios, 0x004008, &mpll);
 	mpll.vco2.max_freq = 0;
-	if (ret == 0) {
-		ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
+	if (ret >= 0) {
+		ret = nv04_pll_calc(subdev, &mpll, freq,
 				    &N1, &M1, &N2, &M2, &P);
-		if (ret == 0)
+		if (ret <= 0)
 			ret = -EINVAL;
 	}
 
 	if (ret < 0)
 		return ret;
 
+	/* XXX: 750MHz seems rather arbitrary */
+	if (freq <= 750000) {
+		r100da0 = 0x00000010;
+		r004008 = 0x90000000;
+	} else {
+		r100da0 = 0x00000000;
+		r004008 = 0x80000000;
+	}
+
+	r004008 |= (mpll.bias_p << 19) | (P << 22) | (P << 16);
+
 	ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
-	ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
+	/* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does
+	 * it have a different rammap bit from DLLoff? */
+	ram_mask(hwsq, 0x004008, 0x00004200, 0x00000200 |
+			next->bios.rammap_00_16_40 << 14);
 	ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
-	ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
-					     (P << 22) | (P << 16));
-#if QFX5800NVA0
-	for (i = 0; i < 8; i++)
-		ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
-#endif
-	ram_nsec(hwsq, 96000); /*XXX*/
+	ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
+	if (subdev->device->chipset >= 0x96)
+		ram_wr32(hwsq, 0x100da0, r100da0);
+	ram_nsec(hwsq, 64000); /*XXX*/
+	ram_nsec(hwsq, 32000); /*XXX*/
+
 	ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
 
 	ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
+	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* disable self-refresh */
 	ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
 
 	ram_nsec(hwsq, 12000);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		ram_nuke(hwsq, mr[0]); /* force update */
 		ram_mask(hwsq, mr[0], 0x000, 0x000);
 		break;
-	case NV_MEM_TYPE_GDDR3:
-		ram_mask(hwsq, mr[2], 0x000, 0x000);
+	case NVKM_RAM_TYPE_GDDR3:
+		ram_nuke(hwsq, mr[1]); /* force update */
+		ram_wr32(hwsq, mr[1], ram->base.mr[1]);
 		ram_nuke(hwsq, mr[0]); /* force update */
-		ram_mask(hwsq, mr[0], 0x000, 0x000);
+		ram_wr32(hwsq, mr[0], ram->base.mr[0]);
 		break;
 	default:
 		break;
 	}
 
-	ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[3], 0xffffffff, timing[3]);
+	ram_mask(hwsq, timing[1], 0xffffffff, timing[1]);
+	ram_mask(hwsq, timing[6], 0xffffffff, timing[6]);
+	ram_mask(hwsq, timing[7], 0xffffffff, timing[7]);
+	ram_mask(hwsq, timing[8], 0xffffffff, timing[8]);
+	ram_mask(hwsq, timing[0], 0xffffffff, timing[0]);
+	ram_mask(hwsq, timing[2], 0xffffffff, timing[2]);
+	ram_mask(hwsq, timing[4], 0xffffffff, timing[4]);
+	ram_mask(hwsq, timing[5], 0xffffffff, timing[5]);
 
-	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+	if (!next->bios.ramcfg_00_03_02)
+		ram_mask(hwsq, 0x10021c, 0x00010000, 0x00000000);
+	ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12);
 
-#if QFX5800NVA0
-	ram_nuke(hwsq, 0x100e24);
-	ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
-	ram_nuke(hwsq, 0x100e20);
-	ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
-#endif
+	/* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
+	unk710  = ram_rd32(hwsq, 0x100710) & ~0x00000101;
+	unk714  = ram_rd32(hwsq, 0x100714) & ~0xf0000020;
+	unk718  = ram_rd32(hwsq, 0x100718) & ~0x00000100;
+	unk71c  = ram_rd32(hwsq, 0x10071c) & ~0x00000100;
 
-	ram_mask(hwsq, mr[0], 0x100, 0x100);
-	ram_mask(hwsq, mr[0], 0x100, 0x000);
+	if ( next->bios.ramcfg_00_03_01)
+		unk71c |= 0x00000100;
+	if ( next->bios.ramcfg_00_03_02)
+		unk710 |= 0x00000100;
+	if (!next->bios.ramcfg_00_03_08) {
+		unk710 |= 0x1;
+		unk714 |= 0x20;
+	}
+	if ( next->bios.ramcfg_00_04_04)
+		unk714 |= 0x70000000;
+	if ( next->bios.ramcfg_00_04_20)
+		unk718 |= 0x00000100;
+
+	ram_mask(hwsq, 0x100714, 0xffffffff, unk714);
+	ram_mask(hwsq, 0x10071c, 0xffffffff, unk71c);
+	ram_mask(hwsq, 0x100718, 0xffffffff, unk718);
+	ram_mask(hwsq, 0x100710, 0xffffffff, unk710);
+
+	if (next->bios.rammap_00_16_20) {
+		ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 |
+					 next->bios.ramcfg_00_06 << 8 |
+					 next->bios.ramcfg_00_05);
+		ram_wr32(hwsq, 0x1005a4, next->bios.ramcfg_00_09 << 8 |
+					 next->bios.ramcfg_00_08);
+		ram_mask(hwsq, 0x10053c, 0x00001000, 0x00000000);
+	} else {
+		ram_mask(hwsq, 0x10053c, 0x00001000, 0x00001000);
+	}
+	ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]);
+
+	/* Reset DLL */
+	if (!next->bios.ramcfg_DLLoff)
+		nvkm_sddr2_dll_reset(hwsq);
 
 	ram_setf(hwsq, 0x10, 0x01); /* enable fb */
 	ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
 	ram_wr32(hwsq, 0x611200, 0x00003330);
 	ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
+
+	if (next->bios.rammap_00_17_02)
+		ram_mask(hwsq, 0x100200, 0x00000800, 0x00000800);
+	if (!next->bios.rammap_00_16_40)
+		ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000);
+	if (next->bios.ramcfg_00_03_02)
+		ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000);
+
 	return 0;
 }
 
 static int
-nv50_ram_prog(struct nvkm_fb *pfb)
+nv50_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct nv50_ram *ram = (void *)pfb->ram;
-	struct nv50_ramseq *hwsq = &ram->hwsq;
-
-	ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+	struct nv50_ram *ram = nv50_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
 	return 0;
 }
 
 static void
-nv50_ram_tidy(struct nvkm_fb *pfb)
+nv50_ram_tidy(struct nvkm_ram *base)
 {
-	struct nv50_ram *ram = (void *)pfb->ram;
-	struct nv50_ramseq *hwsq = &ram->hwsq;
-	ram_exec(hwsq, false);
+	struct nv50_ram *ram = nv50_ram(base);
+	ram_exec(&ram->hwsq, false);
 }
 
 void
-__nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
+__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
 {
 	struct nvkm_mm_node *this;
 
@@ -227,14 +408,14 @@
 		this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
 
 		list_del(&this->rl_entry);
-		nvkm_mm_free(&pfb->vram, &this);
+		nvkm_mm_free(&ram->vram, &this);
 	}
 
-	nvkm_mm_free(&pfb->tags, &mem->tag);
+	nvkm_mm_free(&ram->tags, &mem->tag);
 }
 
 void
-nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
 {
 	struct nvkm_mem *mem = *pmem;
 
@@ -242,19 +423,19 @@
 	if (unlikely(mem == NULL))
 		return;
 
-	mutex_lock(&pfb->base.mutex);
-	__nv50_ram_put(pfb, mem);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
+	__nv50_ram_put(ram, mem);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	kfree(mem);
 }
 
 int
-nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
 	     u32 memtype, struct nvkm_mem **pmem)
 {
-	struct nvkm_mm *heap = &pfb->vram;
-	struct nvkm_mm *tags = &pfb->tags;
+	struct nvkm_mm *heap = &ram->vram;
+	struct nvkm_mm *tags = &ram->tags;
 	struct nvkm_mm_node *r;
 	struct nvkm_mem *mem;
 	int comp = (memtype & 0x300) >> 8;
@@ -262,17 +443,17 @@
 	int back = (memtype & 0x800);
 	int min, max, ret;
 
-	max = (size >> 12);
-	min = ncmin ? (ncmin >> 12) : max;
-	align >>= 12;
+	max = (size >> NVKM_RAM_MM_SHIFT);
+	min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
+	align >>= NVKM_RAM_MM_SHIFT;
 
 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
 	if (!mem)
 		return -ENOMEM;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (comp) {
-		if (align == 16) {
+		if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
 			int n = (max >> 4) * comp;
 
 			ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
@@ -295,34 +476,45 @@
 		else
 			ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
 		if (ret) {
-			mutex_unlock(&pfb->base.mutex);
-			pfb->ram->put(pfb, &mem);
+			mutex_unlock(&ram->fb->subdev.mutex);
+			ram->func->put(ram, &mem);
 			return ret;
 		}
 
 		list_add_tail(&r->rl_entry, &mem->regions);
 		max -= r->length;
 	} while (max);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
+	mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
 	*pmem = mem;
 	return 0;
 }
 
+static const struct nvkm_ram_func
+nv50_ram_func = {
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
+	.calc = nv50_ram_calc,
+	.prog = nv50_ram_prog,
+	.tidy = nv50_ram_tidy,
+};
+
 static u32
-nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
+nv50_fb_vram_rblock(struct nvkm_ram *ram)
 {
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int colbits, rowbitsa, rowbitsb, banks;
 	u64 rowsize, predicted;
 	u32 r0, r4, rt, rblock_size;
 
-	r0 = nv_rd32(pfb, 0x100200);
-	r4 = nv_rd32(pfb, 0x100204);
-	rt = nv_rd32(pfb, 0x100250);
-	nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n",
-		 r0, r4, rt, nv_rd32(pfb, 0x001540));
+	r0 = nvkm_rd32(device, 0x100200);
+	r4 = nvkm_rd32(device, 0x100204);
+	rt = nvkm_rd32(device, 0x100250);
+	nvkm_debug(subdev, "memcfg %08x %08x %08x %08x\n",
+		   r0, r4, rt, nvkm_rd32(device, 0x001540));
 
 	colbits  =  (r4 & 0x0000f000) >> 12;
 	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
@@ -335,103 +527,94 @@
 		predicted += rowsize << rowbitsb;
 
 	if (predicted != ram->size) {
-		nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
-			(u32)(ram->size >> 20));
+		nvkm_warn(subdev, "memory controller reports %d MiB VRAM\n",
+			  (u32)(ram->size >> 20));
 	}
 
 	rblock_size = rowsize;
 	if (rt & 1)
 		rblock_size *= 3;
 
-	nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+	nvkm_debug(subdev, "rblock %d bytes\n", rblock_size);
 	return rblock_size;
 }
 
 int
-nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nv50_ram_ctor(const struct nvkm_ram_func *func,
+	      struct nvkm_fb *fb, struct nvkm_ram *ram)
 {
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
+	struct nvkm_device *device = fb->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+	u64 size = nvkm_rd32(device, 0x10020c);
+	u32 tags = nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create_(parent, engine, oclass, length, pobject);
-	ram = *pobject;
-	if (ret)
-		return ret;
-
-	ram->size = nv_rd32(pfb, 0x10020c);
-	ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
-
-	ram->part_mask = (nv_rd32(pfb, 0x001540) & 0x00ff0000) >> 16;
-	ram->parts = hweight8(ram->part_mask);
-
-	switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
-	case 0: ram->type = NV_MEM_TYPE_DDR1; break;
+	switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
+	case 0: type = NVKM_RAM_TYPE_DDR1; break;
 	case 1:
-		if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-			ram->type = NV_MEM_TYPE_DDR3;
+		if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3)
+			type = NVKM_RAM_TYPE_DDR3;
 		else
-			ram->type = NV_MEM_TYPE_DDR2;
+			type = NVKM_RAM_TYPE_DDR2;
 		break;
-	case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
-	case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
+	case 2: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 3: type = NVKM_RAM_TYPE_GDDR4; break;
+	case 4: type = NVKM_RAM_TYPE_GDDR5; break;
 	default:
 		break;
 	}
 
-	ret = nvkm_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
-			   (rsvd_head + rsvd_tail),
-			   nv50_fb_vram_rblock(pfb, ram) >> 12);
+	size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
+
+	ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
 	if (ret)
 		return ret;
 
-	ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
-	ram->get = nv50_ram_get;
-	ram->put = nv50_ram_put;
-	return 0;
+	ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16;
+	ram->parts = hweight8(ram->part_mask);
+	ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
+	nvkm_mm_fini(&ram->vram);
+
+	return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+			    (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
+			    nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
 }
 
-static int
-nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 datasize,
-	      struct nvkm_object **pobject)
+int
+nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
 	struct nv50_ram *ram;
 	int ret, i;
 
-	ret = nv50_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base);
 	if (ret)
 		return ret;
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_GDDR3:
-		ram->base.calc = nv50_ram_calc;
-		ram->base.prog = nv50_ram_prog;
-		ram->base.tidy = nv50_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
 	ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
 	ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
 	ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
+	ram->hwsq.r_0x100200 = hwsq_reg(0x100200);
 	ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
+	ram->hwsq.r_0x10021c = hwsq_reg(0x10021c);
 	ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
 	ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
 	ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
-	for (i = 0; i < 8; i++)
-		ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
+	ram->hwsq.r_0x10053c = hwsq_reg(0x10053c);
+	ram->hwsq.r_0x1005a0 = hwsq_reg(0x1005a0);
+	ram->hwsq.r_0x1005a4 = hwsq_reg(0x1005a4);
+	ram->hwsq.r_0x100710 = hwsq_reg(0x100710);
+	ram->hwsq.r_0x100714 = hwsq_reg(0x100714);
+	ram->hwsq.r_0x100718 = hwsq_reg(0x100718);
+	ram->hwsq.r_0x10071c = hwsq_reg(0x10071c);
+	ram->hwsq.r_0x100da0 = hwsq_stride(0x100da0, 4, ram->base.part_mask);
 	ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
 	ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
 	ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
@@ -453,13 +636,3 @@
 
 	return 0;
 }
-
-struct nvkm_oclass
-nv50_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index afab42d..86bf674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -65,7 +65,7 @@
 	case 0x10:
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT & 3;
 		break;
 	case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 1084435..b4edc97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -53,7 +53,7 @@
 ramddr3_wr[] = {
 	{ 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
 	/* the below are mentioned in some, but not all, ddr3 docs */
-	{ 14, 7 }, { 16, 0 },
+	{ 14, 7 }, { 15, 7 }, { 16, 0 },
 	{ -1 }
 };
 
@@ -61,7 +61,7 @@
 ramddr3_cwl[] = {
 	{ 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
 	/* the below are mentioned in some, but not all, ddr3 docs */
-	{ 9, 4 },
+	{ 9, 4 }, { 10, 5 },
 	{ -1 }
 };
 
@@ -79,7 +79,7 @@
 		CWL = ram->next->bios.timing_10_CWL;
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT;
 		break;
 	case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index b7b7193..f414497 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -21,31 +21,34 @@
  *
  * Authors: Martin Peres
  */
-#include <subdev/fuse.h>
+#include "priv.h"
 
-int
-_nvkm_fuse_init(struct nvkm_object *object)
+u32
+nvkm_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct nvkm_fuse *fuse = (void *)object;
-	return nvkm_subdev_init(&fuse->base);
+	return fuse->func->read(fuse, addr);
 }
 
-void
-_nvkm_fuse_dtor(struct nvkm_object *object)
+static void *
+nvkm_fuse_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fuse *fuse = (void *)object;
-	nvkm_subdev_destroy(&fuse->base);
+	return nvkm_fuse(subdev);
 }
 
+static const struct nvkm_subdev_func
+nvkm_fuse = {
+	.dtor = nvkm_fuse_dtor,
+};
+
 int
-nvkm_fuse_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_fuse **pfuse)
 {
 	struct nvkm_fuse *fuse;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "FUSE",
-				  "fuse", length, pobject);
-	fuse = *pobject;
-	return ret;
+	if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_fuse, device, index, 0, &fuse->subdev);
+	fuse->func = func;
+	spin_lock_init(&fuse->lock);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 393ef3a0..13671fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -23,56 +23,31 @@
  */
 #include "priv.h"
 
-struct gf100_fuse_priv {
-	struct nvkm_fuse base;
-
-	spinlock_t fuse_enable_lock;
-};
-
 static u32
-gf100_fuse_rd32(struct nvkm_object *object, u64 addr)
+gf100_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct gf100_fuse_priv *priv = (void *)object;
+	struct nvkm_device *device = fuse->subdev.device;
 	unsigned long flags;
 	u32 fuse_enable, unk, val;
 
 	/* racy if another part of nvkm start writing to these regs */
-	spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-	fuse_enable = nv_mask(priv, 0x22400, 0x800, 0x800);
-	unk = nv_mask(priv, 0x21000, 0x1, 0x1);
-	val = nv_rd32(priv, 0x21100 + addr);
-	nv_wr32(priv, 0x21000, unk);
-	nv_wr32(priv, 0x22400, fuse_enable);
-	spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+	spin_lock_irqsave(&fuse->lock, flags);
+	fuse_enable = nvkm_mask(device, 0x022400, 0x800, 0x800);
+	unk = nvkm_mask(device, 0x021000, 0x1, 0x1);
+	val = nvkm_rd32(device, 0x021100 + addr);
+	nvkm_wr32(device, 0x021000, unk);
+	nvkm_wr32(device, 0x022400, fuse_enable);
+	spin_unlock_irqrestore(&fuse->lock, flags);
 	return val;
 }
 
-
-static int
-gf100_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gf100_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->fuse_enable_lock);
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0xC0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = gf100_fuse_rd32,
-	},
+static const struct nvkm_fuse_func
+gf100_fuse = {
+	.read = gf100_fuse_read,
 };
+
+int
+gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+{
+	return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 0b256aa..9aff4ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -23,40 +23,20 @@
  */
 #include "priv.h"
 
-struct gm107_fuse_priv {
-	struct nvkm_fuse base;
-};
-
 static u32
-gm107_fuse_rd32(struct nvkm_object *object, u64 addr)
+gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct gf100_fuse_priv *priv = (void *)object;
-	return nv_rd32(priv, 0x21100 + addr);
+	struct nvkm_device *device = fuse->subdev.device;
+	return nvkm_rd32(device, 0x021100 + addr);
 }
 
-
-static int
-gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gm107_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-
-	return ret;
-}
-
-struct nvkm_oclass
-gm107_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0x117),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = gm107_fuse_rd32,
-	},
+static const struct nvkm_fuse_func
+gm107_fuse = {
+	.read = gm107_fuse_read,
 };
+
+int
+gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+{
+	return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 0d2afc4..514c193 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -23,54 +23,29 @@
  */
 #include "priv.h"
 
-struct nv50_fuse_priv {
-	struct nvkm_fuse base;
-
-	spinlock_t fuse_enable_lock;
-};
-
 static u32
-nv50_fuse_rd32(struct nvkm_object *object, u64 addr)
+nv50_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct nv50_fuse_priv *priv = (void *)object;
+	struct nvkm_device *device = fuse->subdev.device;
 	unsigned long flags;
 	u32 fuse_enable, val;
 
 	/* racy if another part of nvkm start writing to this reg */
-	spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-	fuse_enable = nv_mask(priv, 0x1084, 0x800, 0x800);
-	val = nv_rd32(priv, 0x21000 + addr);
-	nv_wr32(priv, 0x1084, fuse_enable);
-	spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+	spin_lock_irqsave(&fuse->lock, flags);
+	fuse_enable = nvkm_mask(device, 0x001084, 0x800, 0x800);
+	val = nvkm_rd32(device, 0x021000 + addr);
+	nvkm_wr32(device, 0x001084, fuse_enable);
+	spin_unlock_irqrestore(&fuse->lock, flags);
 	return val;
 }
 
-
-static int
-nv50_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->fuse_enable_lock);
-	return 0;
-}
-
-struct nvkm_oclass
-nv50_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = nv50_fuse_rd32,
-	},
+static const struct nvkm_fuse_func
+nv50_fuse = {
+	.read = &nv50_fuse_read,
 };
+
+int
+nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+{
+	return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 7e050f7..b0390b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,7 +1,12 @@
 #ifndef __NVKM_FUSE_PRIV_H__
 #define __NVKM_FUSE_PRIV_H__
+#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
 #include <subdev/fuse.h>
 
-int _nvkm_fuse_init(struct nvkm_object *object);
-void _nvkm_fuse_dtor(struct nvkm_object *object);
+struct nvkm_fuse_func {
+	u32 (*read)(struct nvkm_fuse *, u32 addr);
+};
+
+int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
+		   int index, struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index ea42a9e..e52c5e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -2,5 +2,5 @@
 nvkm-y += nvkm/subdev/gpio/nv10.o
 nvkm-y += nvkm/subdev/gpio/nv50.o
 nvkm-y += nvkm/subdev/gpio/g94.o
-nvkm-y += nvkm/subdev/gpio/gf110.o
+nvkm-y += nvkm/subdev/gpio/gf119.o
 nvkm-y += nvkm/subdev/gpio/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index dea5816..d45ec99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -23,28 +23,33 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/notify.h>
 
 static int
 nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out)
 {
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
+	return gpio->func->drive(gpio, line, dir, out);
 }
 
 static int
 nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line)
 {
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	return impl->sense ? impl->sense(gpio, line) : -ENODEV;
+	return gpio->func->sense(gpio, line);
 }
 
-static int
+void
+nvkm_gpio_reset(struct nvkm_gpio *gpio, u8 func)
+{
+	if (gpio->func->reset)
+		gpio->func->reset(gpio, func);
+}
+
+int
 nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
 	       struct dcb_gpio_func *func)
 {
-	struct nvkm_bios *bios = nvkm_bios(gpio);
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	u8  ver, len;
 	u16 data;
 
@@ -56,11 +61,11 @@
 		return 0;
 
 	/* Apple iMac G4 NV18 */
-	if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
+	if (device->quirk && device->quirk->tv_gpio) {
 		if (tag == DCB_GPIO_TVDAC0) {
 			*func = (struct dcb_gpio_func) {
 				.func = DCB_GPIO_TVDAC0,
-				.line = 4,
+				.line = device->quirk->tv_gpio,
 				.log[0] = 0,
 				.log[1] = 1,
 			};
@@ -71,7 +76,7 @@
 	return -ENOENT;
 }
 
-static int
+int
 nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
 {
 	struct dcb_gpio_func func;
@@ -87,7 +92,7 @@
 	return ret;
 }
 
-static int
+int
 nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line)
 {
 	struct dcb_gpio_func func;
@@ -107,16 +112,14 @@
 nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	impl->intr_mask(gpio, type, 1 << index, 0);
+	gpio->func->intr_mask(gpio, type, 1 << index, 0);
 }
 
 static void
 nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	impl->intr_mask(gpio, type, 1 << index, 1 << index);
+	gpio->func->intr_mask(gpio, type, 1 << index, 1 << index);
 }
 
 static int
@@ -133,16 +136,22 @@
 	return -EINVAL;
 }
 
+static const struct nvkm_event_func
+nvkm_gpio_intr_func = {
+	.ctor = nvkm_gpio_intr_ctor,
+	.init = nvkm_gpio_intr_init,
+	.fini = nvkm_gpio_intr_fini,
+};
+
 static void
 nvkm_gpio_intr(struct nvkm_subdev *subdev)
 {
 	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
 	u32 hi, lo, i;
 
-	impl->intr_stat(gpio, &hi, &lo);
+	gpio->func->intr_stat(gpio, &hi, &lo);
 
-	for (i = 0; (hi | lo) && i < impl->lines; i++) {
+	for (i = 0; (hi | lo) && i < gpio->func->lines; i++) {
 		struct nvkm_gpio_ntfy_rep rep = {
 			.mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
 				(NVKM_GPIO_LO * !!(lo & (1 << i))),
@@ -151,24 +160,15 @@
 	}
 }
 
-static const struct nvkm_event_func
-nvkm_gpio_intr_func = {
-	.ctor = nvkm_gpio_intr_ctor,
-	.init = nvkm_gpio_intr_init,
-	.fini = nvkm_gpio_intr_fini,
-};
-
-int
-_nvkm_gpio_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	const struct nvkm_gpio_impl *impl = (void *)object->oclass;
-	struct nvkm_gpio *gpio = nvkm_gpio(object);
-	u32 mask = (1 << impl->lines) - 1;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+	u32 mask = (1 << gpio->func->lines) - 1;
 
-	impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
-	impl->intr_stat(gpio, &mask, &mask);
-
-	return nvkm_subdev_fini(&gpio->base, suspend);
+	gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
+	gpio->func->intr_stat(gpio, &mask, &mask);
+	return 0;
 }
 
 static struct dmi_system_id gpio_reset_ids[] = {
@@ -182,70 +182,43 @@
 	{ }
 };
 
-int
-_nvkm_gpio_init(struct nvkm_object *object)
+static int
+nvkm_gpio_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(object);
-	int ret;
-
-	ret = nvkm_subdev_init(&gpio->base);
-	if (ret)
-		return ret;
-
-	if (gpio->reset && dmi_check_system(gpio_reset_ids))
-		gpio->reset(gpio, DCB_GPIO_UNUSED);
-
-	return ret;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+	if (dmi_check_system(gpio_reset_ids))
+		nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
+	return 0;
 }
 
-void
-_nvkm_gpio_dtor(struct nvkm_object *object)
+static void *
+nvkm_gpio_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_gpio *gpio = (void *)object;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
 	nvkm_event_fini(&gpio->event);
-	nvkm_subdev_destroy(&gpio->base);
+	return gpio;
 }
 
-int
-nvkm_gpio_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	const struct nvkm_gpio_impl *impl = (void *)oclass;
-	struct nvkm_gpio *gpio;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "GPIO",
-				  "gpio", length, pobject);
-	gpio = *pobject;
-	if (ret)
-		return ret;
-
-	gpio->find = nvkm_gpio_find;
-	gpio->set  = nvkm_gpio_set;
-	gpio->get  = nvkm_gpio_get;
-	gpio->reset = impl->reset;
-
-	ret = nvkm_event_init(&nvkm_gpio_intr_func, 2, impl->lines,
-			      &gpio->event);
-	if (ret)
-		return ret;
-
-	nv_subdev(gpio)->intr = nvkm_gpio_intr;
-	return 0;
-}
+static const struct nvkm_subdev_func
+nvkm_gpio = {
+	.dtor = nvkm_gpio_dtor,
+	.init = nvkm_gpio_init,
+	.fini = nvkm_gpio_fini,
+	.intr = nvkm_gpio_intr,
+};
 
 int
-_nvkm_gpio_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_gpio **pgpio)
 {
 	struct nvkm_gpio *gpio;
-	int ret;
 
-	ret = nvkm_gpio_create(parent, engine, oclass, &gpio);
-	*pobject = nv_object(gpio);
-	if (ret)
-		return ret;
+	if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
+		return -ENOMEM;
 
-	return 0;
+	nvkm_subdev_ctor(&nvkm_gpio, device, index, 0, &gpio->subdev);
+	gpio->func = func;
+
+	return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
+			       &gpio->event);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 12b3e01..6dcda55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -26,21 +26,23 @@
 void
 g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr0 = nv_rd32(gpio, 0x00e054);
-	u32 intr1 = nv_rd32(gpio, 0x00e074);
-	u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
-	u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x00e054);
+	u32 intr1 = nvkm_rd32(device, 0x00e074);
+	u32 stat0 = nvkm_rd32(device, 0x00e050) & intr0;
+	u32 stat1 = nvkm_rd32(device, 0x00e070) & intr1;
 	*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
 	*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
-	nv_wr32(gpio, 0x00e054, intr0);
-	nv_wr32(gpio, 0x00e074, intr1);
+	nvkm_wr32(device, 0x00e054, intr0);
+	nvkm_wr32(device, 0x00e074, intr1);
 }
 
 void
 g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte0 = nv_rd32(gpio, 0x00e050);
-	u32 inte1 = nv_rd32(gpio, 0x00e070);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte0 = nvkm_rd32(device, 0x00e050);
+	u32 inte1 = nvkm_rd32(device, 0x00e070);
 	if (type & NVKM_GPIO_LO)
 		inte0 = (inte0 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@
 		inte1 = (inte1 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte1 = (inte1 & ~mask) | data;
-	nv_wr32(gpio, 0x00e050, inte0);
-	nv_wr32(gpio, 0x00e070, inte1);
+	nvkm_wr32(device, 0x00e050, inte0);
+	nvkm_wr32(device, 0x00e070, inte1);
 }
 
-struct nvkm_oclass *
-g94_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+g94_gpio = {
 	.lines = 32,
 	.intr_stat = g94_gpio_intr_stat,
 	.intr_mask = g94_gpio_intr_mask,
 	.drive = nv50_gpio_drive,
 	.sense = nv50_gpio_sense,
 	.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
deleted file mode 100644
index 2c3bb25..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-void
-gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
-{
-	struct nvkm_bios *bios = nvkm_bios(gpio);
-	u8 ver, len;
-	u16 entry;
-	int ent = -1;
-
-	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
-		u32 data = nv_ro32(bios, entry);
-		u8  line =   (data & 0x0000003f);
-		u8  defs = !!(data & 0x00000080);
-		u8  func =   (data & 0x0000ff00) >> 8;
-		u8  unk0 =   (data & 0x00ff0000) >> 16;
-		u8  unk1 =   (data & 0x1f000000) >> 24;
-
-		if ( func  == DCB_GPIO_UNUSED ||
-		    (match != DCB_GPIO_UNUSED && match != func))
-			continue;
-
-		gpio->set(gpio, 0, func, line, defs);
-
-		nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0);
-		if (unk1--)
-			nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line);
-	}
-}
-
-int
-gf110_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
-{
-	u32 data = ((dir ^ 1) << 13) | (out << 12);
-	nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
-	nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
-	return 0;
-}
-
-int
-gf110_gpio_sense(struct nvkm_gpio *gpio, int line)
-{
-	return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
-}
-
-struct nvkm_oclass *
-gf110_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
-	.lines = 32,
-	.intr_stat = g94_gpio_intr_stat,
-	.intr_mask = g94_gpio_intr_mask,
-	.drive = gf110_gpio_drive,
-	.sense = gf110_gpio_sense,
-	.reset = gf110_gpio_reset,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
new file mode 100644
index 0000000..bb7400d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+void
+gf119_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	u8 ver, len;
+	u16 entry;
+	int ent = -1;
+
+	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+		u32 data = nvbios_rd32(bios, entry);
+		u8  line =   (data & 0x0000003f);
+		u8  defs = !!(data & 0x00000080);
+		u8  func =   (data & 0x0000ff00) >> 8;
+		u8  unk0 =   (data & 0x00ff0000) >> 16;
+		u8  unk1 =   (data & 0x1f000000) >> 24;
+
+		if ( func  == DCB_GPIO_UNUSED ||
+		    (match != DCB_GPIO_UNUSED && match != func))
+			continue;
+
+		nvkm_gpio_set(gpio, 0, func, line, defs);
+
+		nvkm_mask(device, 0x00d610 + (line * 4), 0xff, unk0);
+		if (unk1--)
+			nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+	}
+}
+
+int
+gf119_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 data = ((dir ^ 1) << 13) | (out << 12);
+	nvkm_mask(device, 0x00d610 + (line * 4), 0x00003000, data);
+	nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+	return 0;
+}
+
+int
+gf119_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+	struct nvkm_device *device = gpio->subdev.device;
+	return !!(nvkm_rd32(device, 0x00d610 + (line * 4)) & 0x00004000);
+}
+
+static const struct nvkm_gpio_func
+gf119_gpio = {
+	.lines = 32,
+	.intr_stat = g94_gpio_intr_stat,
+	.intr_mask = g94_gpio_intr_mask,
+	.drive = gf119_gpio_drive,
+	.sense = gf119_gpio_sense,
+	.reset = gf119_gpio_reset,
+};
+
+int
+gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 42fd2fa..3f45afd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -26,21 +26,23 @@
 static void
 gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr0 = nv_rd32(gpio, 0x00dc00);
-	u32 intr1 = nv_rd32(gpio, 0x00dc80);
-	u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0;
-	u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x00dc00);
+	u32 intr1 = nvkm_rd32(device, 0x00dc80);
+	u32 stat0 = nvkm_rd32(device, 0x00dc08) & intr0;
+	u32 stat1 = nvkm_rd32(device, 0x00dc88) & intr1;
 	*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
 	*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
-	nv_wr32(gpio, 0x00dc00, intr0);
-	nv_wr32(gpio, 0x00dc80, intr1);
+	nvkm_wr32(device, 0x00dc00, intr0);
+	nvkm_wr32(device, 0x00dc80, intr1);
 }
 
 void
 gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte0 = nv_rd32(gpio, 0x00dc08);
-	u32 inte1 = nv_rd32(gpio, 0x00dc88);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte0 = nvkm_rd32(device, 0x00dc08);
+	u32 inte1 = nvkm_rd32(device, 0x00dc88);
 	if (type & NVKM_GPIO_LO)
 		inte0 = (inte0 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@
 		inte1 = (inte1 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte1 = (inte1 & ~mask) | data;
-	nv_wr32(gpio, 0x00dc08, inte0);
-	nv_wr32(gpio, 0x00dc88, inte1);
+	nvkm_wr32(device, 0x00dc08, inte0);
+	nvkm_wr32(device, 0x00dc88, inte1);
 }
 
-struct nvkm_oclass *
-gk104_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+gk104_gpio = {
 	.lines = 32,
 	.intr_stat = gk104_gpio_intr_stat,
 	.intr_mask = gk104_gpio_intr_mask,
-	.drive = gf110_gpio_drive,
-	.sense = gf110_gpio_sense,
-	.reset = gf110_gpio_reset,
-}.base;
+	.drive = gf119_gpio_drive,
+	.sense = gf119_gpio_sense,
+	.reset = gf119_gpio_reset,
+};
+
+int
+gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index 2b29515..ae3499b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -28,19 +28,20 @@
 static int
 nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	if (line < 2) {
 		line = line * 16;
-		line = nv_rd32(gpio, 0x600818) >> line;
+		line = nvkm_rd32(device, 0x600818) >> line;
 		return !!(line & 0x0100);
 	} else
 	if (line < 10) {
 		line = (line - 2) * 4;
-		line = nv_rd32(gpio, 0x60081c) >> line;
+		line = nvkm_rd32(device, 0x60081c) >> line;
 		return !!(line & 0x04);
 	} else
 	if (line < 14) {
 		line = (line - 10) * 4;
-		line = nv_rd32(gpio, 0x600850) >> line;
+		line = nvkm_rd32(device, 0x600850) >> line;
 		return !!(line & 0x04);
 	}
 
@@ -50,6 +51,7 @@
 static int
 nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, mask, data;
 
 	if (line < 2) {
@@ -73,43 +75,44 @@
 		return -EINVAL;
 	}
 
-	nv_mask(gpio, reg, mask << line, data << line);
+	nvkm_mask(device, reg, mask << line, data << line);
 	return 0;
 }
 
 static void
 nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr = nv_rd32(gpio, 0x001104);
-	u32 stat = nv_rd32(gpio, 0x001144) & intr;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x001104);
+	u32 stat = nvkm_rd32(device, 0x001144) & intr;
 	*lo = (stat & 0xffff0000) >> 16;
 	*hi = (stat & 0x0000ffff);
-	nv_wr32(gpio, 0x001104, intr);
+	nvkm_wr32(device, 0x001104, intr);
 }
 
 static void
 nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte = nv_rd32(gpio, 0x001144);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte = nvkm_rd32(device, 0x001144);
 	if (type & NVKM_GPIO_LO)
 		inte = (inte & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte = (inte & ~mask) | data;
-	nv_wr32(gpio, 0x001144, inte);
+	nvkm_wr32(device, 0x001144, inte);
 }
 
-struct nvkm_oclass *
-nv10_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x10),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+nv10_gpio = {
 	.lines = 16,
 	.intr_stat = nv10_gpio_intr_stat,
 	.intr_mask = nv10_gpio_intr_mask,
 	.drive = nv10_gpio_drive,
 	.sense = nv10_gpio_sense,
-}.base;
+};
+
+int
+nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 6a03103..8996649 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -26,14 +26,15 @@
 void
 nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 {
-	struct nvkm_bios *bios = nvkm_bios(gpio);
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	u8 ver, len;
 	u16 entry;
 	int ent = -1;
 
 	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
 		static const u32 regs[] = { 0xe100, 0xe28c };
-		u32 data = nv_ro32(bios, entry);
+		u32 data = nvbios_rd32(bios, entry);
 		u8  line =   (data & 0x0000001f);
 		u8  func =   (data & 0x0000ff00) >> 8;
 		u8  defs = !!(data & 0x01000000);
@@ -47,9 +48,9 @@
 		    (match != DCB_GPIO_UNUSED && match != func))
 			continue;
 
-		gpio->set(gpio, 0, func, line, defs);
+		nvkm_gpio_set(gpio, 0, func, line, defs);
 
-		nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh);
+		nvkm_mask(device, reg, 0x00010001 << lsh, val << lsh);
 	}
 }
 
@@ -69,60 +70,63 @@
 int
 nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, shift;
 
 	if (nv50_gpio_location(line, &reg, &shift))
 		return -EINVAL;
 
-	nv_mask(gpio, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
+	nvkm_mask(device, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
 	return 0;
 }
 
 int
 nv50_gpio_sense(struct nvkm_gpio *gpio, int line)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, shift;
 
 	if (nv50_gpio_location(line, &reg, &shift))
 		return -EINVAL;
 
-	return !!(nv_rd32(gpio, reg) & (4 << shift));
+	return !!(nvkm_rd32(device, reg) & (4 << shift));
 }
 
 static void
 nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr = nv_rd32(gpio, 0x00e054);
-	u32 stat = nv_rd32(gpio, 0x00e050) & intr;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00e054);
+	u32 stat = nvkm_rd32(device, 0x00e050) & intr;
 	*lo = (stat & 0xffff0000) >> 16;
 	*hi = (stat & 0x0000ffff);
-	nv_wr32(gpio, 0x00e054, intr);
+	nvkm_wr32(device, 0x00e054, intr);
 }
 
 static void
 nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte = nv_rd32(gpio, 0x00e050);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte = nvkm_rd32(device, 0x00e050);
 	if (type & NVKM_GPIO_LO)
 		inte = (inte & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte = (inte & ~mask) | data;
-	nv_wr32(gpio, 0x00e050, inte);
+	nvkm_wr32(device, 0x00e050, inte);
 }
 
-struct nvkm_oclass *
-nv50_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+nv50_gpio = {
 	.lines = 16,
 	.intr_stat = nv50_gpio_intr_stat,
 	.intr_mask = nv50_gpio_intr_mask,
 	.drive = nv50_gpio_drive,
 	.sense = nv50_gpio_sense,
 	.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 382f8d4..371bcdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,33 +1,9 @@
 #ifndef __NVKM_GPIO_PRIV_H__
 #define __NVKM_GPIO_PRIV_H__
+#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
 #include <subdev/gpio.h>
 
-#define nvkm_gpio_create(p,e,o,d)                                           \
-	nvkm_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_gpio_destroy(p) ({                                             \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_dtor(nv_object(gpio));                                   \
-})
-#define nvkm_gpio_init(p) ({                                                \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_init(nv_object(gpio));                                   \
-})
-#define nvkm_gpio_fini(p,s) ({                                              \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_fini(nv_object(gpio), (s));                              \
-})
-
-int  nvkm_gpio_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-int  _nvkm_gpio_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-void _nvkm_gpio_dtor(struct nvkm_object *);
-int  _nvkm_gpio_init(struct nvkm_object *);
-int  _nvkm_gpio_fini(struct nvkm_object *, bool);
-
-struct nvkm_gpio_impl {
-	struct nvkm_oclass base;
+struct nvkm_gpio_func {
 	int lines;
 
 	/* read and ack pending interrupts, returning only data
@@ -51,6 +27,9 @@
 	void (*reset)(struct nvkm_gpio *, u8);
 };
 
+int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
+		   int index, struct nvkm_gpio **);
+
 void nv50_gpio_reset(struct nvkm_gpio *, u8);
 int  nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
 int  nv50_gpio_sense(struct nvkm_gpio *, int);
@@ -58,7 +37,7 @@
 void g94_gpio_intr_stat(struct nvkm_gpio *, u32 *, u32 *);
 void g94_gpio_intr_mask(struct nvkm_gpio *, u32, u32, u32);
 
-void gf110_gpio_reset(struct nvkm_gpio *, u8);
-int  gf110_gpio_drive(struct nvkm_gpio *, int, int, int);
-int  gf110_gpio_sense(struct nvkm_gpio *, int);
+void gf119_gpio_reset(struct nvkm_gpio *, u8);
+int  gf119_gpio_drive(struct nvkm_gpio *, int, int, int);
+int  gf119_gpio_sense(struct nvkm_gpio *, int);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index d683074..1f730613 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -1,16 +1,30 @@
 nvkm-y += nvkm/subdev/i2c/base.o
-nvkm-y += nvkm/subdev/i2c/anx9805.o
-nvkm-y += nvkm/subdev/i2c/aux.o
-nvkm-y += nvkm/subdev/i2c/bit.o
-nvkm-y += nvkm/subdev/i2c/pad.o
-nvkm-y += nvkm/subdev/i2c/padnv04.o
-nvkm-y += nvkm/subdev/i2c/padg94.o
-nvkm-y += nvkm/subdev/i2c/padgm204.o
 nvkm-y += nvkm/subdev/i2c/nv04.o
 nvkm-y += nvkm/subdev/i2c/nv4e.o
 nvkm-y += nvkm/subdev/i2c/nv50.o
 nvkm-y += nvkm/subdev/i2c/g94.o
-nvkm-y += nvkm/subdev/i2c/gf110.o
 nvkm-y += nvkm/subdev/i2c/gf117.o
+nvkm-y += nvkm/subdev/i2c/gf119.o
 nvkm-y += nvkm/subdev/i2c/gk104.o
 nvkm-y += nvkm/subdev/i2c/gm204.o
+
+nvkm-y += nvkm/subdev/i2c/pad.o
+nvkm-y += nvkm/subdev/i2c/padnv04.o
+nvkm-y += nvkm/subdev/i2c/padnv4e.o
+nvkm-y += nvkm/subdev/i2c/padnv50.o
+nvkm-y += nvkm/subdev/i2c/padg94.o
+nvkm-y += nvkm/subdev/i2c/padgf119.o
+nvkm-y += nvkm/subdev/i2c/padgm204.o
+
+nvkm-y += nvkm/subdev/i2c/bus.o
+nvkm-y += nvkm/subdev/i2c/busnv04.o
+nvkm-y += nvkm/subdev/i2c/busnv4e.o
+nvkm-y += nvkm/subdev/i2c/busnv50.o
+nvkm-y += nvkm/subdev/i2c/busgf119.o
+nvkm-y += nvkm/subdev/i2c/bit.o
+
+nvkm-y += nvkm/subdev/i2c/aux.o
+nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgm204.o
+
+nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index d17dd1c..b7b01c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -21,184 +21,57 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "port.h"
+#define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
+#define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
+#define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
+#include "aux.h"
+#include "bus.h"
 
-struct anx9805_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-	u32 ctrl;
+struct anx9805_pad {
+	struct nvkm_i2c_pad base;
+	struct nvkm_i2c_bus *bus;
+	u8 addr;
+};
+
+struct anx9805_bus {
+	struct nvkm_i2c_bus base;
+	struct anx9805_pad *pad;
+	u8 addr;
 };
 
 static int
-anx9805_train(struct nvkm_i2c_port *port, int link_nr, int link_bw, bool enh)
+anx9805_bus_xfer(struct nvkm_i2c_bus *base, struct i2c_msg *msgs, int num)
 {
-	struct anx9805_i2c_port *chan = (void *)port;
-	struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
-	u8 tmp, i;
-
-	DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh);
-
-	nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
-	nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
-	nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
-	nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
-
-	i = 0;
-	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
-		mdelay(5);
-		if (i++ == 100) {
-			nv_error(port, "link training timed out\n");
-			return -ETIMEDOUT;
-		}
-	}
-
-	if (tmp & 0x70) {
-		nv_error(port, "link training failed: 0x%02x\n", tmp);
-		return -EIO;
-	}
-
-	return 1;
-}
-
-static int
-anx9805_aux(struct nvkm_i2c_port *port, bool retry,
-	    u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct anx9805_i2c_port *chan = (void *)port;
-	struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
-	int i, ret = -ETIMEDOUT;
-	u8 buf[16] = {};
-	u8 tmp;
-
-	DBG("%02x %05x %d\n", type, addr, size);
-
-	tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
-	nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
-	nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
-	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
-
-	nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
-	if (!(type & 1)) {
-		memcpy(buf, data, size);
-		DBG("%16ph", buf);
-		for (i = 0; i < size; i++)
-			nv_wri2cr(mast, chan->addr, 0xf0 + i, buf[i]);
-	}
-	nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
-	nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >>  0);
-	nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >>  8);
-	nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
-	nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
-
-	i = 0;
-	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
-		mdelay(5);
-		if (i++ == 32)
-			goto done;
-	}
-
-	if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
-		ret = -EIO;
-		goto done;
-	}
-
-	if (type & 1) {
-		for (i = 0; i < size; i++)
-			buf[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
-		DBG("%16ph", buf);
-		memcpy(data, buf, size);
-	}
-
-	ret = 0;
-done:
-	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
-	return ret;
-}
-
-static const struct nvkm_i2c_func
-anx9805_aux_func = {
-	.aux = anx9805_aux,
-	.lnk_ctl = anx9805_train,
-};
-
-static int
-anx9805_aux_chan_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 index,
-		      struct nvkm_object **pobject)
-{
-	struct nvkm_i2c_port *mast = (void *)parent;
-	struct anx9805_i2c_port *chan;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &anx9805_aux_func,
-				   &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	switch ((oclass->handle & 0xff00) >> 8) {
-	case 0x0d:
-		chan->addr = 0x38;
-		chan->ctrl = 0x39;
-		break;
-	case 0x0e:
-		chan->addr = 0x3c;
-		chan->ctrl = 0x3b;
-		break;
-	default:
-		BUG_ON(1);
-	}
-
-	if (mast->adapter.algo == &i2c_bit_algo) {
-		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
-		algo->udelay = max(algo->udelay, 40);
-	}
-
-	return 0;
-}
-
-static struct nvkm_ofuncs
-anx9805_aux_ofuncs = {
-	.ctor =  anx9805_aux_chan_ctor,
-	.dtor = _nvkm_i2c_port_dtor,
-	.init = _nvkm_i2c_port_init,
-	.fini = _nvkm_i2c_port_fini,
-};
-
-static int
-anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
-	struct anx9805_i2c_port *port = adap->algo_data;
-	struct nvkm_i2c_port *mast = (void *)nv_object(port)->parent;
+	struct anx9805_bus *bus = anx9805_bus(base);
+	struct anx9805_pad *pad = bus->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
 	struct i2c_msg *msg = msgs;
 	int ret = -ETIMEDOUT;
 	int i, j, cnt = num;
 	u8 seg = 0x00, off = 0x00, tmp;
 
-	tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
-	nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
-	nv_wri2cr(mast, port->ctrl, 0x07, tmp);
-	nv_wri2cr(mast, port->addr, 0x43, 0x05);
+	tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x10;
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x10);
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+	nvkm_wri2cr(adap, bus->addr, 0x43, 0x05);
 	mdelay(5);
 
 	while (cnt--) {
 		if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
-			nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
-			nv_wri2cr(mast, port->addr, 0x41, seg);
-			nv_wri2cr(mast, port->addr, 0x42, off);
-			nv_wri2cr(mast, port->addr, 0x44, msg->len);
-			nv_wri2cr(mast, port->addr, 0x45, 0x00);
-			nv_wri2cr(mast, port->addr, 0x43, 0x01);
+			nvkm_wri2cr(adap, bus->addr, 0x40, msg->addr << 1);
+			nvkm_wri2cr(adap, bus->addr, 0x41, seg);
+			nvkm_wri2cr(adap, bus->addr, 0x42, off);
+			nvkm_wri2cr(adap, bus->addr, 0x44, msg->len);
+			nvkm_wri2cr(adap, bus->addr, 0x45, 0x00);
+			nvkm_wri2cr(adap, bus->addr, 0x43, 0x01);
 			for (i = 0; i < msg->len; i++) {
 				j = 0;
-				while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
+				while (nvkm_rdi2cr(adap, bus->addr, 0x46) & 0x10) {
 					mdelay(5);
 					if (j++ == 32)
 						goto done;
 				}
-				msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
+				msg->buf[i] = nvkm_rdi2cr(adap, bus->addr, 0x47);
 			}
 		} else
 		if (!(msg->flags & I2C_M_RD)) {
@@ -217,76 +90,189 @@
 
 	ret = num;
 done:
-	nv_wri2cr(mast, port->addr, 0x43, 0x00);
+	nvkm_wri2cr(adap, bus->addr, 0x43, 0x00);
 	return ret;
 }
 
-static u32
-anx9805_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm
-anx9805_i2c_algo = {
-	.master_xfer = anx9805_xfer,
-	.functionality = anx9805_func
-};
-
-static const struct nvkm_i2c_func
-anx9805_i2c_func = {
+static const struct nvkm_i2c_bus_func
+anx9805_bus_func = {
+	.xfer = anx9805_bus_xfer,
 };
 
 static int
-anx9805_ddc_port_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 index,
-		      struct nvkm_object **pobject)
+anx9805_bus_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+		struct nvkm_i2c_bus **pbus)
 {
-	struct nvkm_i2c_port *mast = (void *)parent;
-	struct anx9805_i2c_port *port;
+	struct anx9805_pad *pad = anx9805_pad(base);
+	struct anx9805_bus *bus;
 	int ret;
 
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &anx9805_i2c_algo, &anx9805_i2c_func, &port);
-	*pobject = nv_object(port);
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+	bus->pad = pad;
+
+	ret = nvkm_i2c_bus_ctor(&anx9805_bus_func, &pad->base, id, &bus->base);
 	if (ret)
 		return ret;
 
-	switch ((oclass->handle & 0xff00) >> 8) {
-	case 0x0d:
-		port->addr = 0x3d;
-		port->ctrl = 0x39;
-		break;
-	case 0x0e:
-		port->addr = 0x3f;
-		port->ctrl = 0x3b;
-		break;
+	switch (pad->addr) {
+	case 0x39: bus->addr = 0x3d; break;
+	case 0x3b: bus->addr = 0x3f; break;
 	default:
-		BUG_ON(1);
-	}
-
-	if (mast->adapter.algo == &i2c_bit_algo) {
-		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
-		algo->udelay = max(algo->udelay, 40);
+		return -ENOSYS;
 	}
 
 	return 0;
 }
 
-static struct nvkm_ofuncs
-anx9805_ddc_ofuncs = {
-	.ctor =  anx9805_ddc_port_ctor,
-	.dtor = _nvkm_i2c_port_dtor,
-	.init = _nvkm_i2c_port_init,
-	.fini = _nvkm_i2c_port_fini,
+struct anx9805_aux {
+	struct nvkm_i2c_aux base;
+	struct anx9805_pad *pad;
+	u8 addr;
 };
 
-struct nvkm_oclass
-nvkm_anx9805_sclass[] = {
-	{ .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
-	{}
+static int
+anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
+		 u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct anx9805_aux *aux = anx9805_aux(base);
+	struct anx9805_pad *pad = aux->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
+	int i, ret = -ETIMEDOUT;
+	u8 buf[16] = {};
+	u8 tmp;
+
+	AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size);
+
+	tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+	nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
+
+	nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
+	if (!(type & 1)) {
+		memcpy(buf, data, size);
+		AUX_DBG(&aux->base, "%16ph", buf);
+		for (i = 0; i < size; i++)
+			nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
+	}
+	nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type);
+	nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >>  0);
+	nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >>  8);
+	nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
+	nvkm_wri2cr(adap, aux->addr, 0xe9, 0x01);
+
+	i = 0;
+	while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xe9)) & 0x01) {
+		mdelay(5);
+		if (i++ == 32)
+			goto done;
+	}
+
+	if ((tmp = nvkm_rdi2cr(adap, pad->addr, 0xf7)) & 0x01) {
+		ret = -EIO;
+		goto done;
+	}
+
+	if (type & 1) {
+		for (i = 0; i < size; i++)
+			buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
+		AUX_DBG(&aux->base, "%16ph", buf);
+		memcpy(data, buf, size);
+	}
+
+	ret = 0;
+done:
+	nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
+	return ret;
+}
+
+static int
+anx9805_aux_lnk_ctl(struct nvkm_i2c_aux *base,
+		    int link_nr, int link_bw, bool enh)
+{
+	struct anx9805_aux *aux = anx9805_aux(base);
+	struct anx9805_pad *pad = aux->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
+	u8 tmp, i;
+
+	AUX_DBG(&aux->base, "ANX9805 train %d %02x %d",
+		link_nr, link_bw, enh);
+
+	nvkm_wri2cr(adap, aux->addr, 0xa0, link_bw);
+	nvkm_wri2cr(adap, aux->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+	nvkm_wri2cr(adap, aux->addr, 0xa2, 0x01);
+	nvkm_wri2cr(adap, aux->addr, 0xa8, 0x01);
+
+	i = 0;
+	while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xa8)) & 0x01) {
+		mdelay(5);
+		if (i++ == 100) {
+			AUX_ERR(&aux->base, "link training timeout");
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (tmp & 0x70) {
+		AUX_ERR(&aux->base, "link training failed");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static const struct nvkm_i2c_aux_func
+anx9805_aux_func = {
+	.xfer = anx9805_aux_xfer,
+	.lnk_ctl = anx9805_aux_lnk_ctl,
 };
+
+static int
+anx9805_aux_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+		struct nvkm_i2c_aux **pbus)
+{
+	struct anx9805_pad *pad = anx9805_pad(base);
+	struct anx9805_aux *aux;
+	int ret;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &aux->base;
+	aux->pad = pad;
+
+	ret = nvkm_i2c_aux_ctor(&anx9805_aux_func, &pad->base, id, &aux->base);
+	if (ret)
+		return ret;
+
+	switch (pad->addr) {
+	case 0x39: aux->addr = 0x38; break;
+	case 0x3b: aux->addr = 0x3c; break;
+	default:
+		return -ENOSYS;
+	}
+
+	return 0;
+}
+
+static const struct nvkm_i2c_pad_func
+anx9805_pad_func = {
+	.bus_new_4 = anx9805_bus_new,
+	.aux_new_6 = anx9805_aux_new,
+};
+
+int
+anx9805_pad_new(struct nvkm_i2c_bus *bus, int id, u8 addr,
+		struct nvkm_i2c_pad **ppad)
+{
+	struct anx9805_pad *pad;
+
+	if (!(pad = kzalloc(sizeof(*pad), GFP_KERNEL)))
+		return -ENOMEM;
+	*ppad = &pad->base;
+
+	nvkm_i2c_pad_ctor(&anx9805_pad_func, bus->pad->i2c, id, &pad->base);
+	pad->bus = bus;
+	pad->addr = addr;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 1c18860..f0851d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -21,50 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
-
-int
-nv_rdaux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-	if (port->func->aux) {
-		int ret = i2c->acquire(port, 0);
-		if (ret == 0) {
-			ret = port->func->aux(port, true, 9, addr, data, size);
-			i2c->release(port);
-		}
-		return ret;
-	}
-	return -ENODEV;
-}
-
-int
-nv_wraux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-	if (port->func->aux) {
-		int ret = i2c->acquire(port, 0);
-		if (ret == 0) {
-			ret = port->func->aux(port, true, 8, addr, data, size);
-			i2c->release(port);
-		}
-		return ret;
-	}
-	return -ENODEV;
-}
+#include "aux.h"
+#include "pad.h"
 
 static int
-aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 {
-	struct nvkm_i2c_port *port = adap->algo_data;
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
+	struct nvkm_i2c_aux *aux = container_of(adap, typeof(*aux), i2c);
 	struct i2c_msg *msg = msgs;
 	int ret, mcnt = num;
 
-	if (!port->func->aux)
-		return -ENODEV;
-
-	ret = i2c->acquire(port, 0);
+	ret = nvkm_i2c_aux_acquire(aux);
 	if (ret)
 		return ret;
 
@@ -84,9 +51,9 @@
 			if (mcnt || remaining > 16)
 				cmd |= 4; /* MOT */
 
-			ret = port->func->aux(port, true, cmd, msg->addr, ptr, cnt);
+			ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt);
 			if (ret < 0) {
-				i2c->release(port);
+				nvkm_i2c_aux_release(aux);
 				return ret;
 			}
 
@@ -97,17 +64,111 @@
 		msg++;
 	}
 
-	i2c->release(port);
+	nvkm_i2c_aux_release(aux);
 	return num;
 }
 
 static u32
-aux_func(struct i2c_adapter *adap)
+nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
 {
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-const struct i2c_algorithm nvkm_i2c_aux_algo = {
-	.master_xfer = aux_xfer,
-	.functionality = aux_func
+const struct i2c_algorithm
+nvkm_i2c_aux_i2c_algo = {
+	.master_xfer = nvkm_i2c_aux_i2c_xfer,
+	.functionality = nvkm_i2c_aux_i2c_func
 };
+
+void
+nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	AUX_TRACE(aux, "monitor: %s", monitor ? "yes" : "no");
+	if (monitor)
+		nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX);
+	else
+		nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_OFF);
+}
+
+void
+nvkm_i2c_aux_release(struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	AUX_TRACE(aux, "release");
+	nvkm_i2c_pad_release(pad);
+	mutex_unlock(&aux->mutex);
+}
+
+int
+nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	int ret;
+	AUX_TRACE(aux, "acquire");
+	mutex_lock(&aux->mutex);
+	ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+	if (ret)
+		mutex_unlock(&aux->mutex);
+	return ret;
+}
+
+int
+nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
+		  u32 addr, u8 *data, u8 size)
+{
+	return aux->func->xfer(aux, retry, type, addr, data, size);
+}
+
+int
+nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *aux, int nr, int bw, bool ef)
+{
+	if (aux->func->lnk_ctl)
+		return aux->func->lnk_ctl(aux, nr, bw, ef);
+	return -ENODEV;
+}
+
+void
+nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
+{
+	struct nvkm_i2c_aux *aux = *paux;
+	if (aux && !WARN_ON(!aux->func)) {
+		AUX_TRACE(aux, "dtor");
+		list_del(&aux->head);
+		i2c_del_adapter(&aux->i2c);
+		kfree(*paux);
+		*paux = NULL;
+	}
+}
+
+int
+nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_device *device = pad->i2c->subdev.device;
+
+	aux->func = func;
+	aux->pad = pad;
+	aux->id = id;
+	mutex_init(&aux->mutex);
+	list_add_tail(&aux->head, &pad->i2c->aux);
+	AUX_TRACE(aux, "ctor");
+
+	snprintf(aux->i2c.name, sizeof(aux->i2c.name), "nvkm-%s-aux-%04x",
+		 dev_name(device->dev), id);
+	aux->i2c.owner = THIS_MODULE;
+	aux->i2c.dev.parent = device->dev;
+	aux->i2c.algo = &nvkm_i2c_aux_i2c_algo;
+	return i2c_add_adapter(&aux->i2c);
+}
+
+int
+nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_aux **paux)
+{
+	if (!(*paux = kzalloc(sizeof(**paux), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_i2c_aux_ctor(func, pad, id, *paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
new file mode 100644
index 0000000..35a892e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -0,0 +1,30 @@
+#ifndef __NVKM_I2C_AUX_H__
+#define __NVKM_I2C_AUX_H__
+#include "pad.h"
+
+struct nvkm_i2c_aux_func {
+	int  (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
+		     u32 addr, u8 *data, u8 size);
+	int  (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+			bool enhanced_framing);
+};
+
+int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+		      u32 addr, u8 *data, u8 size);
+
+int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int gm204_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+
+#define AUX_MSG(b,l,f,a...) do {                                               \
+	struct nvkm_i2c_aux *_aux = (b);                                       \
+	nvkm_##l(&_aux->pad->i2c->subdev, "aux %04x: "f"\n", _aux->id, ##a);   \
+} while(0)
+#define AUX_ERR(b,f,a...) AUX_MSG((b), error, f, ##a)
+#define AUX_DBG(b,f,a...) AUX_MSG((b), debug, f, ##a)
+#define AUX_TRACE(b,f,a...) AUX_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
new file mode 100644
index 0000000..954f5b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
+#include "aux.h"
+
+struct g94_i2c_aux {
+	struct nvkm_i2c_aux base;
+	int ch;
+};
+
+static void
+g94_i2c_aux_fini(struct g94_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+g94_i2c_aux_init(struct g94_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+			g94_i2c_aux_fini(aux);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+static int
+g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+		 u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct g94_i2c_aux *aux = g94_i2c_aux(obj);
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 base = aux->ch * 0x50;
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ret, i;
+
+	AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+	ret = g94_i2c_aux_init(aux);
+	if (ret < 0)
+		goto out;
+
+	stat = nvkm_rd32(device, 0x00e4e8 + base);
+	if (!(stat & 0x10000000)) {
+		AUX_TRACE(&aux->base, "sink not detected");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+			nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nvkm_rd32(device, 0x00e4e4 + base);
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nvkm_wr32(device, 0x00e4e0 + base, addr);
+
+	/* (maybe) retry transaction a number of times on failure... */
+	for (retries = 0; !ret && retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
+		nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nvkm_rd32(device, 0x00e4e4 + base);
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR(&aux->base, "timeout %08x", ctrl);
+				ret = -EIO;
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+		ret = 1;
+
+		/* read status, and check if transaction completed ok */
+		stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
+		if ((stat & 0x000f0000) == 0x00080000 ||
+		    (stat & 0x000f0000) == 0x00020000)
+			ret = retry ? 0 : 1;
+		if ((stat & 0x00000100))
+			ret = -ETIMEDOUT;
+		if ((stat & 0x00000e00))
+			ret = -EIO;
+
+		AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
+			AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	g94_i2c_aux_fini(aux);
+	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux_func = {
+	.xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+		struct nvkm_i2c_aux **paux)
+{
+	struct g94_i2c_aux *aux;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*paux = &aux->base;
+
+	nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+	aux->ch = drive;
+	aux->base.intr = 1 << aux->ch;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
new file mode 100644
index 0000000..bed231b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gm204_i2c_aux(p) container_of((p), struct gm204_i2c_aux, base)
+#include "aux.h"
+
+struct gm204_i2c_aux {
+	struct nvkm_i2c_aux base;
+	int ch;
+};
+
+static void
+gm204_i2c_aux_fini(struct gm204_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+			gm204_i2c_aux_fini(aux);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+static int
+gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+		   u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct gm204_i2c_aux *aux = gm204_i2c_aux(obj);
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 base = aux->ch * 0x50;
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ret, i;
+
+	AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+	ret = gm204_i2c_aux_init(aux);
+	if (ret < 0)
+		goto out;
+
+	stat = nvkm_rd32(device, 0x00d958 + base);
+	if (!(stat & 0x10000000)) {
+		AUX_TRACE(&aux->base, "sink not detected");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+			nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nvkm_rd32(device, 0x00d954 + base);
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nvkm_wr32(device, 0x00d950 + base, addr);
+
+	/* (maybe) retry transaction a number of times on failure... */
+	for (retries = 0; !ret && retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
+		nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nvkm_rd32(device, 0x00d954 + base);
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR(&aux->base, "timeout %08x", ctrl);
+				ret = -EIO;
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+		ret = 1;
+
+		/* read status, and check if transaction completed ok */
+		stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
+		if ((stat & 0x000f0000) == 0x00080000 ||
+		    (stat & 0x000f0000) == 0x00020000)
+			ret = retry ? 0 : 1;
+		if ((stat & 0x00000100))
+			ret = -ETIMEDOUT;
+		if ((stat & 0x00000e00))
+			ret = -EIO;
+
+		AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
+			AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	gm204_i2c_aux_fini(aux);
+	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+gm204_i2c_aux_func = {
+	.xfer = gm204_i2c_aux_xfer,
+};
+
+int
+gm204_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+		struct nvkm_i2c_aux **paux)
+{
+	struct gm204_i2c_aux *aux;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*paux = &aux->base;
+
+	nvkm_i2c_aux_ctor(&gm204_i2c_aux_func, pad, index, &aux->base);
+	aux->ch = drive;
+	aux->base.intr = 1 << aux->ch;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 9200f12..243a71f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -22,328 +22,91 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include "aux.h"
+#include "bus.h"
 #include "pad.h"
 
-#include <core/device.h>
 #include <core/notify.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
 
-/******************************************************************************
- * interface to linux i2c bit-banging algorithm
- *****************************************************************************/
-
-#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
-#define CSTMSEL true
-#else
-#define CSTMSEL false
-#endif
-
-static int
-nvkm_i2c_pre_xfer(struct i2c_adapter *adap)
+static struct nvkm_i2c_pad *
+nvkm_i2c_pad_find(struct nvkm_i2c *i2c, int id)
 {
-	struct i2c_algo_bit_data *bit = adap->algo_data;
-	struct nvkm_i2c_port *port = bit->data;
-	return nvkm_i2c(port)->acquire(port, bit->timeout);
-}
+	struct nvkm_i2c_pad *pad;
 
-static void
-nvkm_i2c_post_xfer(struct i2c_adapter *adap)
-{
-	struct i2c_algo_bit_data *bit = adap->algo_data;
-	struct nvkm_i2c_port *port = bit->data;
-	return nvkm_i2c(port)->release(port);
-}
-
-static void
-nvkm_i2c_setscl(void *data, int state)
-{
-	struct nvkm_i2c_port *port = data;
-	port->func->drive_scl(port, state);
-}
-
-static void
-nvkm_i2c_setsda(void *data, int state)
-{
-	struct nvkm_i2c_port *port = data;
-	port->func->drive_sda(port, state);
-}
-
-static int
-nvkm_i2c_getscl(void *data)
-{
-	struct nvkm_i2c_port *port = data;
-	return port->func->sense_scl(port);
-}
-
-static int
-nvkm_i2c_getsda(void *data)
-{
-	struct nvkm_i2c_port *port = data;
-	return port->func->sense_sda(port);
-}
-
-/******************************************************************************
- * base i2c "port" class implementation
- *****************************************************************************/
-
-int
-_nvkm_i2c_port_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_i2c_port *port = (void *)object;
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-	nv_ofuncs(pad)->fini(nv_object(pad), suspend);
-	return nvkm_object_fini(&port->base, suspend);
-}
-
-void
-_nvkm_i2c_port_dtor(struct nvkm_object *object)
-{
-	struct nvkm_i2c_port *port = (void *)object;
-	i2c_del_adapter(&port->adapter);
-	nvkm_object_destroy(&port->base);
-}
-
-int
-nvkm_i2c_port_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, u8 index,
-		      const struct i2c_algorithm *algo,
-		      const struct nvkm_i2c_func *func,
-		      int size, void **pobject)
-{
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_i2c_port *port;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
-	port = *pobject;
-	if (ret)
-		return ret;
-
-	snprintf(port->adapter.name, sizeof(port->adapter.name),
-		 "nvkm-%s-%d", device->name, index);
-	port->adapter.owner = THIS_MODULE;
-	port->adapter.dev.parent = nv_device_base(device);
-	port->index = index;
-	port->aux = -1;
-	port->func = func;
-	mutex_init(&port->mutex);
-
-	if ( algo == &nvkm_i2c_bit_algo &&
-	    !nvkm_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
-		struct i2c_algo_bit_data *bit;
-
-		bit = kzalloc(sizeof(*bit), GFP_KERNEL);
-		if (!bit)
-			return -ENOMEM;
-
-		bit->udelay = 10;
-		bit->timeout = usecs_to_jiffies(2200);
-		bit->data = port;
-		bit->pre_xfer = nvkm_i2c_pre_xfer;
-		bit->post_xfer = nvkm_i2c_post_xfer;
-		bit->setsda = nvkm_i2c_setsda;
-		bit->setscl = nvkm_i2c_setscl;
-		bit->getsda = nvkm_i2c_getsda;
-		bit->getscl = nvkm_i2c_getscl;
-
-		port->adapter.algo_data = bit;
-		ret = i2c_bit_add_bus(&port->adapter);
-	} else {
-		port->adapter.algo_data = port;
-		port->adapter.algo = algo;
-		ret = i2c_add_adapter(&port->adapter);
+	list_for_each_entry(pad, &i2c->pad, head) {
+		if (pad->id == id)
+			return pad;
 	}
 
-	if (ret == 0)
-		list_add_tail(&port->head, &i2c->ports);
-	return ret;
+	return NULL;
 }
 
-/******************************************************************************
- * base i2c subdev class implementation
- *****************************************************************************/
-
-static struct nvkm_i2c_port *
-nvkm_i2c_find(struct nvkm_i2c *i2c, u8 index)
+struct nvkm_i2c_bus *
+nvkm_i2c_bus_find(struct nvkm_i2c *i2c, int id)
 {
-	struct nvkm_bios *bios = nvkm_bios(i2c);
-	struct nvkm_i2c_port *port;
+	struct nvkm_bios *bios = i2c->subdev.device->bios;
+	struct nvkm_i2c_bus *bus;
 
-	if (index == NV_I2C_DEFAULT(0) ||
-	    index == NV_I2C_DEFAULT(1)) {
+	if (id == NVKM_I2C_BUS_PRI || id == NVKM_I2C_BUS_SEC) {
 		u8  ver, hdr, cnt, len;
 		u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
 		if (i2c && ver >= 0x30) {
-			u8 auxidx = nv_ro08(bios, i2c + 4);
-			if (index == NV_I2C_DEFAULT(0))
-				index = (auxidx & 0x0f) >> 0;
+			u8 auxidx = nvbios_rd08(bios, i2c + 4);
+			if (id == NVKM_I2C_BUS_PRI)
+				id = NVKM_I2C_BUS_CCB((auxidx & 0x0f) >> 0);
 			else
-				index = (auxidx & 0xf0) >> 4;
+				id = NVKM_I2C_BUS_CCB((auxidx & 0xf0) >> 4);
 		} else {
-			index = 2;
+			id = NVKM_I2C_BUS_CCB(2);
 		}
 	}
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		if (port->index == index)
-			return port;
+	list_for_each_entry(bus, &i2c->bus, head) {
+		if (bus->id == id)
+			return bus;
 	}
 
 	return NULL;
 }
 
-static struct nvkm_i2c_port *
-nvkm_i2c_find_type(struct nvkm_i2c *i2c, u16 type)
+struct nvkm_i2c_aux *
+nvkm_i2c_aux_find(struct nvkm_i2c *i2c, int id)
 {
-	struct nvkm_i2c_port *port;
+	struct nvkm_i2c_aux *aux;
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		if (nv_hclass(port) == type)
-			return port;
+	list_for_each_entry(aux, &i2c->aux, head) {
+		if (aux->id == id)
+			return aux;
 	}
 
 	return NULL;
 }
 
 static void
-nvkm_i2c_release_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-
-	if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
-		nv_ofuncs(pad)->fini(nv_object(pad), false);
-		wake_up_all(&i2c->wait);
-	}
-}
-
-static int
-nvkm_i2c_try_acquire_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-
-	if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
-		struct nvkm_object *owner = (void *)pad->port;
-		do {
-			if (owner == (void *)port)
-				return 0;
-			owner = owner->parent;
-		} while(owner);
-		nvkm_i2c_release_pad(port);
-		return -EBUSY;
-	}
-
-	pad->next = port;
-	nv_ofuncs(pad)->init(nv_object(pad));
-	return 0;
-}
-
-static int
-nvkm_i2c_acquire_pad(struct nvkm_i2c_port *port, unsigned long timeout)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-
-	if (timeout) {
-		if (wait_event_timeout(i2c->wait,
-				       nvkm_i2c_try_acquire_pad(port) == 0,
-				       timeout) == 0)
-			return -EBUSY;
-	} else {
-		wait_event(i2c->wait, nvkm_i2c_try_acquire_pad(port) == 0);
-	}
-
-	return 0;
-}
-
-static void
-nvkm_i2c_release(struct nvkm_i2c_port *port)
-__releases(pad->mutex)
-{
-	nvkm_i2c(port)->release_pad(port);
-	mutex_unlock(&port->mutex);
-}
-
-static int
-nvkm_i2c_acquire(struct nvkm_i2c_port *port, unsigned long timeout)
-__acquires(pad->mutex)
-{
-	int ret;
-	mutex_lock(&port->mutex);
-	if ((ret = nvkm_i2c(port)->acquire_pad(port, timeout)))
-		mutex_unlock(&port->mutex);
-	return ret;
-}
-
-static int
-nvkm_i2c_identify(struct nvkm_i2c *i2c, int index, const char *what,
-		  struct nvkm_i2c_board_info *info,
-		  bool (*match)(struct nvkm_i2c_port *,
-				struct i2c_board_info *, void *), void *data)
-{
-	struct nvkm_i2c_port *port = nvkm_i2c_find(i2c, index);
-	int i;
-
-	if (!port) {
-		nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
-		return -ENODEV;
-	}
-
-	nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
-	for (i = 0; info[i].dev.addr; i++) {
-		u8 orig_udelay = 0;
-
-		if ((port->adapter.algo == &i2c_bit_algo) &&
-		    (info[i].udelay != 0)) {
-			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
-			nv_debug(i2c, "using custom udelay %d instead of %d\n",
-			         info[i].udelay, algo->udelay);
-			orig_udelay = algo->udelay;
-			algo->udelay = info[i].udelay;
-		}
-
-		if (nv_probe_i2c(port, info[i].dev.addr) &&
-		    (!match || match(port, &info[i].dev, data))) {
-			nv_info(i2c, "detected %s: %s\n", what,
-				info[i].dev.type);
-			return i;
-		}
-
-		if (orig_udelay) {
-			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
-			algo->udelay = orig_udelay;
-		}
-	}
-
-	nv_debug(i2c, "no devices found.\n");
-	return -ENODEV;
-}
-
-static void
-nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int id)
 {
 	struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
-	struct nvkm_i2c_port *port = i2c->find(i2c, index);
-	const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
-	if (port && port->aux >= 0)
-		impl->aux_mask(i2c, type, 1 << port->aux, 0);
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+	if (aux)
+		i2c->func->aux_mask(i2c, type, aux->intr, 0);
 }
 
 static void
-nvkm_i2c_intr_init(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_init(struct nvkm_event *event, int type, int id)
 {
 	struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
-	struct nvkm_i2c_port *port = i2c->find(i2c, index);
-	const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
-	if (port && port->aux >= 0)
-		impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+	if (aux)
+		i2c->func->aux_mask(i2c, type, aux->intr, aux->intr);
 }
 
 static int
 nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
-		      struct nvkm_notify *notify)
+		   struct nvkm_notify *notify)
 {
 	struct nvkm_i2c_ntfy_req *req = data;
 	if (!WARN_ON(size != sizeof(*req))) {
@@ -355,38 +118,6 @@
 	return -EINVAL;
 }
 
-static void
-nvkm_i2c_intr(struct nvkm_subdev *subdev)
-{
-	struct nvkm_i2c_impl *impl = (void *)nv_oclass(subdev);
-	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
-	struct nvkm_i2c_port *port;
-	u32 hi, lo, rq, tx, e;
-
-	if (impl->aux_stat) {
-		impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
-		if (hi || lo || rq || tx) {
-			list_for_each_entry(port, &i2c->ports, head) {
-				if (e = 0, port->aux < 0)
-					continue;
-
-				if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
-				if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
-				if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
-				if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
-				if (e) {
-					struct nvkm_i2c_ntfy_rep rep = {
-						.mask = e,
-					};
-					nvkm_event_send(&i2c->event, rep.mask,
-							port->index, &rep,
-							sizeof(rep));
-				}
-			}
-		}
-	}
-}
-
 static const struct nvkm_event_func
 nvkm_i2c_intr_func = {
 	.ctor = nvkm_i2c_intr_ctor,
@@ -394,229 +125,272 @@
 	.fini = nvkm_i2c_intr_fini,
 };
 
-int
-_nvkm_i2c_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_i2c_impl *impl = (void *)nv_oclass(object);
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port;
-	u32 mask;
-	int ret;
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_aux *aux;
+	u32 hi, lo, rq, tx;
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
-		if (ret && suspend)
-			goto fail;
-	}
+	if (!i2c->func->aux_stat)
+		return;
 
-	if ((mask = (1 << impl->aux) - 1), impl->aux_stat) {
-		impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
-		impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
-	}
+	i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
+	if (!hi && !lo && !rq && !tx)
+		return;
 
-	return nvkm_subdev_fini(&i2c->base, suspend);
-fail:
-	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
-		nv_ofuncs(port)->init(nv_object(port));
-	}
-
-	return ret;
-}
-
-int
-_nvkm_i2c_init(struct nvkm_object *object)
-{
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port;
-	int ret;
-
-	ret = nvkm_subdev_init(&i2c->base);
-	if (ret == 0) {
-		list_for_each_entry(port, &i2c->ports, head) {
-			ret = nv_ofuncs(port)->init(nv_object(port));
-			if (ret)
-				goto fail;
+	list_for_each_entry(aux, &i2c->aux, head) {
+		u32 mask = 0;
+		if (hi & aux->intr) mask |= NVKM_I2C_PLUG;
+		if (lo & aux->intr) mask |= NVKM_I2C_UNPLUG;
+		if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
+		if (tx & aux->intr) mask |= NVKM_I2C_DONE;
+		if (mask) {
+			struct nvkm_i2c_ntfy_rep rep = {
+				.mask = mask,
+			};
+			nvkm_event_send(&i2c->event, rep.mask, aux->id,
+					&rep, sizeof(rep));
 		}
 	}
-
-	return ret;
-fail:
-	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
-		nv_ofuncs(port)->fini(nv_object(port), false);
-	}
-
-	return ret;
 }
 
-void
-_nvkm_i2c_dtor(struct nvkm_object *object)
+static int
+nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port, *temp;
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_pad *pad;
+	u32 mask;
+
+	if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
+		i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
+		i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
+	}
+
+	list_for_each_entry(pad, &i2c->pad, head) {
+		nvkm_i2c_pad_fini(pad);
+	}
+
+	return 0;
+}
+
+static int
+nvkm_i2c_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_bus *bus;
+	struct nvkm_i2c_pad *pad;
+
+	list_for_each_entry(pad, &i2c->pad, head) {
+		nvkm_i2c_pad_init(pad);
+	}
+
+	list_for_each_entry(bus, &i2c->bus, head) {
+		nvkm_i2c_bus_init(bus);
+	}
+
+	return 0;
+}
+
+static void *
+nvkm_i2c_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
 
 	nvkm_event_fini(&i2c->event);
 
-	list_for_each_entry_safe(port, temp, &i2c->ports, head) {
-		nvkm_object_ref(NULL, (struct nvkm_object **)&port);
+	while (!list_empty(&i2c->aux)) {
+		struct nvkm_i2c_aux *aux =
+			list_first_entry(&i2c->aux, typeof(*aux), head);
+		nvkm_i2c_aux_del(&aux);
 	}
 
-	nvkm_subdev_destroy(&i2c->base);
+	while (!list_empty(&i2c->bus)) {
+		struct nvkm_i2c_bus *bus =
+			list_first_entry(&i2c->bus, typeof(*bus), head);
+		nvkm_i2c_bus_del(&bus);
+	}
+
+	while (!list_empty(&i2c->pad)) {
+		struct nvkm_i2c_pad *pad =
+			list_first_entry(&i2c->pad, typeof(*pad), head);
+		nvkm_i2c_pad_del(&pad);
+	}
+
+	return i2c;
 }
 
-static struct nvkm_oclass *
-nvkm_i2c_extdev_sclass[] = {
-	nvkm_anx9805_sclass,
+static const struct nvkm_subdev_func
+nvkm_i2c = {
+	.dtor = nvkm_i2c_dtor,
+	.init = nvkm_i2c_init,
+	.fini = nvkm_i2c_fini,
+	.intr = nvkm_i2c_intr,
 };
 
-static void
-nvkm_i2c_create_port(struct nvkm_i2c *i2c, int index, u8 type,
-		     struct dcb_i2c_entry *info)
-{
-	const struct nvkm_i2c_impl *impl = (void *)nv_oclass(i2c);
-	struct nvkm_oclass *oclass;
-	struct nvkm_object *parent;
-	struct nvkm_object *object;
-	int ret, pad;
-
-	if (info->share != DCB_I2C_UNUSED) {
-		pad    = info->share;
-		oclass = impl->pad_s;
-	} else {
-		if (type != DCB_I2C_NVIO_AUX)
-			pad = 0x100 + info->drive;
-		else
-			pad = 0x100 + info->auxch;
-		oclass = impl->pad_x;
-	}
-
-	ret = nvkm_object_ctor(nv_object(i2c), NULL, oclass,
-			       NULL, pad, &parent);
-	if (ret < 0)
-		return;
-
-	oclass = impl->sclass;
-	do {
-		ret = -EINVAL;
-		if (oclass->handle == type) {
-			ret = nvkm_object_ctor(parent, NULL, oclass,
-					       info, index, &object);
-		}
-	} while (ret && (++oclass)->handle);
-
-	nvkm_object_ref(NULL, &parent);
+static const struct nvkm_i2c_drv {
+	u8 bios;
+	u8 addr;
+	int (*pad_new)(struct nvkm_i2c_bus *, int id, u8 addr,
+		       struct nvkm_i2c_pad **);
 }
+nvkm_i2c_drv[] = {
+	{ 0x0d, 0x39, anx9805_pad_new },
+	{ 0x0e, 0x3b, anx9805_pad_new },
+	{}
+};
 
 int
-nvkm_i2c_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_i2c **pi2c)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_i2c *i2c;
-	struct nvkm_object *object;
-	struct dcb_i2c_entry info;
-	int ret, i, j, index = -1;
-	struct dcb_output outp;
-	u8  ver, hdr;
-	u32 data;
+	struct dcb_i2c_entry ccbE;
+	struct dcb_output dcbE;
+	u8 ver, hdr;
+	int ret, i;
 
-	ret = nvkm_subdev_create(parent, engine, oclass, 0, "I2C", "i2c", &i2c);
-	*pobject = nv_object(i2c);
-	if (ret)
-		return ret;
+	if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
+		return -ENOMEM;
 
-	nv_subdev(i2c)->intr = nvkm_i2c_intr;
-	i2c->find = nvkm_i2c_find;
-	i2c->find_type = nvkm_i2c_find_type;
-	i2c->acquire_pad = nvkm_i2c_acquire_pad;
-	i2c->release_pad = nvkm_i2c_release_pad;
-	i2c->acquire = nvkm_i2c_acquire;
-	i2c->release = nvkm_i2c_release;
-	i2c->identify = nvkm_i2c_identify;
-	init_waitqueue_head(&i2c->wait);
-	INIT_LIST_HEAD(&i2c->ports);
+	nvkm_subdev_ctor(&nvkm_i2c, device, index, 0, &i2c->subdev);
+	i2c->func = func;
+	INIT_LIST_HEAD(&i2c->pad);
+	INIT_LIST_HEAD(&i2c->bus);
+	INIT_LIST_HEAD(&i2c->aux);
 
-	while (!dcb_i2c_parse(bios, ++index, &info)) {
-		switch (info.type) {
-		case DCB_I2C_NV04_BIT:
-		case DCB_I2C_NV4E_BIT:
-		case DCB_I2C_NVIO_BIT:
-			nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
-					     info.type, &info);
-			break;
-		case DCB_I2C_NVIO_AUX:
-			nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
-					     info.type, &info);
-			break;
-		case DCB_I2C_PMGR:
-			if (info.drive != DCB_I2C_UNUSED) {
-				nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
-						     DCB_I2C_NVIO_BIT, &info);
-			}
-			if (info.auxch != DCB_I2C_UNUSED) {
-				nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
-						     DCB_I2C_NVIO_AUX, &info);
-			}
-			break;
-		case DCB_I2C_UNUSED:
-		default:
-			continue;
-		}
-	}
-
-	/* in addition to the busses specified in the i2c table, there
-	 * may be ddc/aux channels hiding behind external tmds/dp/etc
-	 * transmitters.
-	 */
-	index = NV_I2C_EXT(0);
 	i = -1;
-	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
-		if (!outp.location || !outp.extdev)
-			continue;
+	while (!dcb_i2c_parse(bios, ++i, &ccbE)) {
+		struct nvkm_i2c_pad *pad = NULL;
+		struct nvkm_i2c_bus *bus = NULL;
+		struct nvkm_i2c_aux *aux = NULL;
 
-		switch (outp.type) {
-		case DCB_OUTPUT_TMDS:
-			info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
-			break;
-		case DCB_OUTPUT_DP:
-			info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
-			break;
-		default:
+		nvkm_debug(&i2c->subdev, "ccb %02x: type %02x drive %02x "
+			   "sense %02x share %02x auxch %02x\n", i, ccbE.type,
+			   ccbE.drive, ccbE.sense, ccbE.share, ccbE.auxch);
+
+		if (ccbE.share != DCB_I2C_UNUSED) {
+			const int id = NVKM_I2C_PAD_HYBRID(ccbE.share);
+			if (!(pad = nvkm_i2c_pad_find(i2c, id)))
+				ret = func->pad_s_new(i2c, id, &pad);
+			else
+				ret = 0;
+		} else {
+			ret = func->pad_x_new(i2c, NVKM_I2C_PAD_CCB(i), &pad);
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x pad, %d\n", i, ret);
+			nvkm_i2c_pad_del(&pad);
 			continue;
 		}
 
-		ret = -ENODEV;
-		j = -1;
-		while (ret && ++j < ARRAY_SIZE(nvkm_i2c_extdev_sclass)) {
-			parent = nv_object(i2c->find(i2c, outp.i2c_index));
-			oclass = nvkm_i2c_extdev_sclass[j];
-			do {
-				if (oclass->handle != info.type)
-					continue;
-				ret = nvkm_object_ctor(parent, NULL, oclass,
-						       NULL, index++, &object);
-			} while (ret && (++oclass)->handle);
+		if (pad->func->bus_new_0 && ccbE.type == DCB_I2C_NV04_BIT) {
+			ret = pad->func->bus_new_0(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.drive,
+						   ccbE.sense, &bus);
+		} else
+		if (pad->func->bus_new_4 &&
+		    ( ccbE.type == DCB_I2C_NV4E_BIT ||
+		      ccbE.type == DCB_I2C_NVIO_BIT ||
+		     (ccbE.type == DCB_I2C_PMGR &&
+		      ccbE.drive != DCB_I2C_UNUSED))) {
+			ret = pad->func->bus_new_4(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.drive, &bus);
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x bus, %d\n", i, ret);
+			nvkm_i2c_bus_del(&bus);
+		}
+
+		if (pad->func->aux_new_6 &&
+		    ( ccbE.type == DCB_I2C_NVIO_AUX ||
+		     (ccbE.type == DCB_I2C_PMGR &&
+		      ccbE.auxch != DCB_I2C_UNUSED))) {
+			ret = pad->func->aux_new_6(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.auxch, &aux);
+		} else {
+			ret = 0;
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x aux, %d\n", i, ret);
+			nvkm_i2c_aux_del(&aux);
+		}
+
+		if (ccbE.type != DCB_I2C_UNUSED && !bus && !aux) {
+			nvkm_warn(&i2c->subdev, "ccb %02x was ignored\n", i);
+			continue;
 		}
 	}
 
-	ret = nvkm_event_init(&nvkm_i2c_intr_func, 4, index, &i2c->event);
-	if (ret)
-		return ret;
+	i = -1;
+	while (dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE)) {
+		const struct nvkm_i2c_drv *drv = nvkm_i2c_drv;
+		struct nvkm_i2c_bus *bus;
+		struct nvkm_i2c_pad *pad;
 
-	return 0;
-}
+		/* internal outputs handled by native i2c busses (above) */
+		if (!dcbE.location)
+			continue;
 
-int
-_nvkm_i2c_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_i2c *i2c;
-	int ret;
+		/* we need an i2c bus to talk to the external encoder */
+		bus = nvkm_i2c_bus_find(i2c, dcbE.i2c_index);
+		if (!bus) {
+			nvkm_debug(&i2c->subdev, "dcb %02x no bus\n", i);
+			continue;
+		}
 
-	ret = nvkm_i2c_create(parent, engine, oclass, &i2c);
-	*pobject = nv_object(i2c);
-	if (ret)
-		return ret;
+		/* ... and a driver for it */
+		while (drv->pad_new) {
+			if (drv->bios == dcbE.extdev)
+				break;
+			drv++;
+		}
 
-	return 0;
+		if (!drv->pad_new) {
+			nvkm_debug(&i2c->subdev, "dcb %02x drv %02x unknown\n",
+				   i, dcbE.extdev);
+			continue;
+		}
+
+		/* find/create an instance of the driver */
+		pad = nvkm_i2c_pad_find(i2c, NVKM_I2C_PAD_EXT(dcbE.extdev));
+		if (!pad) {
+			const int id = NVKM_I2C_PAD_EXT(dcbE.extdev);
+			ret = drv->pad_new(bus, id, drv->addr, &pad);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x pad, %d\n",
+					   i, ret);
+				nvkm_i2c_pad_del(&pad);
+				continue;
+			}
+		}
+
+		/* create any i2c bus / aux channel required by the output */
+		if (pad->func->aux_new_6 && dcbE.type == DCB_OUTPUT_DP) {
+			const int id = NVKM_I2C_AUX_EXT(dcbE.extdev);
+			struct nvkm_i2c_aux *aux = NULL;
+			ret = pad->func->aux_new_6(pad, id, 0, &aux);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x aux, %d\n",
+					   i, ret);
+				nvkm_i2c_aux_del(&aux);
+			}
+		} else
+		if (pad->func->bus_new_4) {
+			const int id = NVKM_I2C_BUS_EXT(dcbE.extdev);
+			struct nvkm_i2c_bus *bus = NULL;
+			ret = pad->func->bus_new_4(pad, id, 0, &bus);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x bus, %d\n",
+					   i, ret);
+				nvkm_i2c_bus_del(&bus);
+			}
+		}
+	}
+
+	return nvkm_event_init(&nvkm_i2c_intr_func, 4, i, &i2c->event);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
index 861a453..cdce11b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
@@ -9,7 +9,7 @@
  * Software is furnished to do so, subject to the following conditions:
  *
  * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * all copies or substantial busions of the Software.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "bus.h"
 
 #ifdef CONFIG_NOUVEAU_I2C_INTERNAL
 #define T_TIMEOUT  2200000
@@ -29,205 +29,188 @@
 #define T_HOLD     5000
 
 static inline void
-i2c_drive_scl(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_scl(struct nvkm_i2c_bus *bus, int state)
 {
-	port->func->drive_scl(port, state);
+	bus->func->drive_scl(bus, state);
 }
 
 static inline void
-i2c_drive_sda(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_sda(struct nvkm_i2c_bus *bus, int state)
 {
-	port->func->drive_sda(port, state);
+	bus->func->drive_sda(bus, state);
 }
 
 static inline int
-i2c_sense_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_scl(struct nvkm_i2c_bus *bus)
 {
-	return port->func->sense_scl(port);
+	return bus->func->sense_scl(bus);
 }
 
 static inline int
-i2c_sense_sda(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_sda(struct nvkm_i2c_bus *bus)
 {
-	return port->func->sense_sda(port);
+	return bus->func->sense_sda(bus);
 }
 
 static void
-i2c_delay(struct nvkm_i2c_port *port, u32 nsec)
+nvkm_i2c_delay(struct nvkm_i2c_bus *bus, u32 nsec)
 {
 	udelay((nsec + 500) / 1000);
 }
 
 static bool
-i2c_raise_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_raise_scl(struct nvkm_i2c_bus *bus)
 {
 	u32 timeout = T_TIMEOUT / T_RISEFALL;
 
-	i2c_drive_scl(port, 1);
+	nvkm_i2c_drive_scl(bus, 1);
 	do {
-		i2c_delay(port, T_RISEFALL);
-	} while (!i2c_sense_scl(port) && --timeout);
+		nvkm_i2c_delay(bus, T_RISEFALL);
+	} while (!nvkm_i2c_sense_scl(bus) && --timeout);
 
 	return timeout != 0;
 }
 
 static int
-i2c_start(struct nvkm_i2c_port *port)
+i2c_start(struct nvkm_i2c_bus *bus)
 {
 	int ret = 0;
 
-	if (!i2c_sense_scl(port) ||
-	    !i2c_sense_sda(port)) {
-		i2c_drive_scl(port, 0);
-		i2c_drive_sda(port, 1);
-		if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_sense_scl(bus) ||
+	    !nvkm_i2c_sense_sda(bus)) {
+		nvkm_i2c_drive_scl(bus, 0);
+		nvkm_i2c_drive_sda(bus, 1);
+		if (!nvkm_i2c_raise_scl(bus))
 			ret = -EBUSY;
 	}
 
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_sda(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return ret;
 }
 
 static void
-i2c_stop(struct nvkm_i2c_port *port)
+i2c_stop(struct nvkm_i2c_bus *bus)
 {
-	i2c_drive_scl(port, 0);
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_drive_sda(bus, 0);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	i2c_drive_scl(port, 1);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 1);
+	nvkm_i2c_delay(bus, T_HOLD);
+	nvkm_i2c_drive_sda(bus, 1);
+	nvkm_i2c_delay(bus, T_HOLD);
 }
 
 static int
-i2c_bitw(struct nvkm_i2c_port *port, int sda)
+i2c_bitw(struct nvkm_i2c_bus *bus, int sda)
 {
-	i2c_drive_sda(port, sda);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_sda(bus, sda);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_raise_scl(bus))
 		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_delay(bus, T_HOLD);
 
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return 0;
 }
 
 static int
-i2c_bitr(struct nvkm_i2c_port *port)
+i2c_bitr(struct nvkm_i2c_bus *bus)
 {
 	int sda;
 
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_sda(bus, 1);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_raise_scl(bus))
 		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_delay(bus, T_HOLD);
 
-	sda = i2c_sense_sda(port);
+	sda = nvkm_i2c_sense_sda(bus);
 
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return sda;
 }
 
 static int
-i2c_get_byte(struct nvkm_i2c_port *port, u8 *byte, bool last)
+nvkm_i2c_get_byte(struct nvkm_i2c_bus *bus, u8 *byte, bool last)
 {
 	int i, bit;
 
 	*byte = 0;
 	for (i = 7; i >= 0; i--) {
-		bit = i2c_bitr(port);
+		bit = i2c_bitr(bus);
 		if (bit < 0)
 			return bit;
 		*byte |= bit << i;
 	}
 
-	return i2c_bitw(port, last ? 1 : 0);
+	return i2c_bitw(bus, last ? 1 : 0);
 }
 
 static int
-i2c_put_byte(struct nvkm_i2c_port *port, u8 byte)
+nvkm_i2c_put_byte(struct nvkm_i2c_bus *bus, u8 byte)
 {
 	int i, ret;
 	for (i = 7; i >= 0; i--) {
-		ret = i2c_bitw(port, !!(byte & (1 << i)));
+		ret = i2c_bitw(bus, !!(byte & (1 << i)));
 		if (ret < 0)
 			return ret;
 	}
 
-	ret = i2c_bitr(port);
+	ret = i2c_bitr(bus);
 	if (ret == 1) /* nack */
 		ret = -EIO;
 	return ret;
 }
 
 static int
-i2c_addr(struct nvkm_i2c_port *port, struct i2c_msg *msg)
+i2c_addr(struct nvkm_i2c_bus *bus, struct i2c_msg *msg)
 {
 	u32 addr = msg->addr << 1;
 	if (msg->flags & I2C_M_RD)
 		addr |= 1;
-	return i2c_put_byte(port, addr);
+	return nvkm_i2c_put_byte(bus, addr);
 }
 
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
 {
-	struct nvkm_i2c_port *port = adap->algo_data;
 	struct i2c_msg *msg = msgs;
 	int ret = 0, mcnt = num;
 
-	ret = nvkm_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
-	if (ret)
-		return ret;
-
 	while (!ret && mcnt--) {
 		u8 remaining = msg->len;
 		u8 *ptr = msg->buf;
 
-		ret = i2c_start(port);
+		ret = i2c_start(bus);
 		if (ret == 0)
-			ret = i2c_addr(port, msg);
+			ret = i2c_addr(bus, msg);
 
 		if (msg->flags & I2C_M_RD) {
 			while (!ret && remaining--)
-				ret = i2c_get_byte(port, ptr++, !remaining);
+				ret = nvkm_i2c_get_byte(bus, ptr++, !remaining);
 		} else {
 			while (!ret && remaining--)
-				ret = i2c_put_byte(port, *ptr++);
+				ret = nvkm_i2c_put_byte(bus, *ptr++);
 		}
 
 		msg++;
 	}
 
-	i2c_stop(port);
-	nvkm_i2c(port)->release(port);
+	i2c_stop(bus);
 	return (ret < 0) ? ret : num;
 }
 #else
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
 {
 	return -ENODEV;
 }
 #endif
-
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nvkm_i2c_bit_algo = {
-	.master_xfer = i2c_bit_xfer,
-	.functionality = i2c_bit_func
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
new file mode 100644
index 0000000..807a2b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "bus.h"
+#include "pad.h"
+
+#include <core/option.h>
+
+/*******************************************************************************
+ * i2c-algo-bit
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_pre_xfer(struct i2c_adapter *adap)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	return nvkm_i2c_bus_acquire(bus);
+}
+
+static void
+nvkm_i2c_bus_post_xfer(struct i2c_adapter *adap)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	return nvkm_i2c_bus_release(bus);
+}
+
+static void
+nvkm_i2c_bus_setscl(void *data, int state)
+{
+	struct nvkm_i2c_bus *bus = data;
+	bus->func->drive_scl(bus, state);
+}
+
+static void
+nvkm_i2c_bus_setsda(void *data, int state)
+{
+	struct nvkm_i2c_bus *bus = data;
+	bus->func->drive_sda(bus, state);
+}
+
+static int
+nvkm_i2c_bus_getscl(void *data)
+{
+	struct nvkm_i2c_bus *bus = data;
+	return bus->func->sense_scl(bus);
+}
+
+static int
+nvkm_i2c_bus_getsda(void *data)
+{
+	struct nvkm_i2c_bus *bus = data;
+	return bus->func->sense_sda(bus);
+}
+
+/*******************************************************************************
+ * !i2c-algo-bit (off-chip i2c bus / hw i2c / internal bit-banging algo)
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	int ret;
+
+	ret = nvkm_i2c_bus_acquire(bus);
+	if (ret)
+		return ret;
+
+	ret = bus->func->xfer(bus, msgs, num);
+	nvkm_i2c_bus_release(bus);
+	return ret;
+}
+
+static u32
+nvkm_i2c_bus_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+nvkm_i2c_bus_algo = {
+	.master_xfer = nvkm_i2c_bus_xfer,
+	.functionality = nvkm_i2c_bus_func,
+};
+
+/*******************************************************************************
+ * nvkm_i2c_bus base
+ ******************************************************************************/
+void
+nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
+{
+	BUS_TRACE(bus, "init");
+	if (bus->func->init)
+		bus->func->init(bus);
+}
+
+void
+nvkm_i2c_bus_release(struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_i2c_pad *pad = bus->pad;
+	BUS_TRACE(bus, "release");
+	nvkm_i2c_pad_release(pad);
+	mutex_unlock(&bus->mutex);
+}
+
+int
+nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_i2c_pad *pad = bus->pad;
+	int ret;
+	BUS_TRACE(bus, "acquire");
+	mutex_lock(&bus->mutex);
+	ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+	if (ret)
+		mutex_unlock(&bus->mutex);
+	return ret;
+}
+
+int
+nvkm_i2c_bus_probe(struct nvkm_i2c_bus *bus, const char *what,
+		   struct nvkm_i2c_bus_probe *info,
+		   bool (*match)(struct nvkm_i2c_bus *,
+				 struct i2c_board_info *, void *), void *data)
+{
+	int i;
+
+	BUS_DBG(bus, "probing %ss", what);
+	for (i = 0; info[i].dev.addr; i++) {
+		u8 orig_udelay = 0;
+
+		if ((bus->i2c.algo == &i2c_bit_algo) && (info[i].udelay != 0)) {
+			struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+			BUS_DBG(bus, "%dms delay instead of %dms",
+				     info[i].udelay, algo->udelay);
+			orig_udelay = algo->udelay;
+			algo->udelay = info[i].udelay;
+		}
+
+		if (nvkm_probe_i2c(&bus->i2c, info[i].dev.addr) &&
+		    (!match || match(bus, &info[i].dev, data))) {
+			BUS_DBG(bus, "detected %s: %s",
+				what, info[i].dev.type);
+			return i;
+		}
+
+		if (orig_udelay) {
+			struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+			algo->udelay = orig_udelay;
+		}
+	}
+
+	BUS_DBG(bus, "no devices found.");
+	return -ENODEV;
+}
+
+void
+nvkm_i2c_bus_del(struct nvkm_i2c_bus **pbus)
+{
+	struct nvkm_i2c_bus *bus = *pbus;
+	if (bus && !WARN_ON(!bus->func)) {
+		BUS_TRACE(bus, "dtor");
+		list_del(&bus->head);
+		i2c_del_adapter(&bus->i2c);
+		kfree(bus->i2c.algo_data);
+		kfree(*pbus);
+		*pbus = NULL;
+	}
+}
+
+int
+nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_device *device = pad->i2c->subdev.device;
+	struct i2c_algo_bit_data *bit;
+#ifndef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+	const bool internal = false;
+#else
+	const bool internal = true;
+#endif
+	int ret;
+
+	bus->func = func;
+	bus->pad = pad;
+	bus->id = id;
+	mutex_init(&bus->mutex);
+	list_add_tail(&bus->head, &pad->i2c->bus);
+	BUS_TRACE(bus, "ctor");
+
+	snprintf(bus->i2c.name, sizeof(bus->i2c.name), "nvkm-%s-bus-%04x",
+		 dev_name(device->dev), id);
+	bus->i2c.owner = THIS_MODULE;
+	bus->i2c.dev.parent = device->dev;
+
+	if ( bus->func->drive_scl &&
+	    !nvkm_boolopt(device->cfgopt, "NvI2C", internal)) {
+		if (!(bit = kzalloc(sizeof(*bit), GFP_KERNEL)))
+			return -ENOMEM;
+		bit->udelay = 10;
+		bit->timeout = usecs_to_jiffies(2200);
+		bit->data = bus;
+		bit->pre_xfer = nvkm_i2c_bus_pre_xfer;
+		bit->post_xfer = nvkm_i2c_bus_post_xfer;
+		bit->setscl = nvkm_i2c_bus_setscl;
+		bit->setsda = nvkm_i2c_bus_setsda;
+		bit->getscl = nvkm_i2c_bus_getscl;
+		bit->getsda = nvkm_i2c_bus_getsda;
+		bus->i2c.algo_data = bit;
+		ret = i2c_bit_add_bus(&bus->i2c);
+	} else {
+		bus->i2c.algo = &nvkm_i2c_bus_algo;
+		ret = i2c_add_adapter(&bus->i2c);
+	}
+
+	return ret;
+}
+
+int
+nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_bus **pbus)
+{
+	if (!(*pbus = kzalloc(sizeof(**pbus), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_i2c_bus_ctor(func, pad, id, *pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
new file mode 100644
index 0000000..e1be14c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -0,0 +1,37 @@
+#ifndef __NVKM_I2C_BUS_H__
+#define __NVKM_I2C_BUS_H__
+#include "pad.h"
+
+struct nvkm_i2c_bus_func {
+	void (*init)(struct nvkm_i2c_bus *);
+	void (*drive_scl)(struct nvkm_i2c_bus *, int state);
+	void (*drive_sda)(struct nvkm_i2c_bus *, int state);
+	int (*sense_scl)(struct nvkm_i2c_bus *);
+	int (*sense_sda)(struct nvkm_i2c_bus *);
+	int (*xfer)(struct nvkm_i2c_bus *, struct i2c_msg *, int num);
+};
+
+int nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+
+int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
+
+int nv04_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, u8,
+		     struct nvkm_i2c_bus **);
+
+int nv4e_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int nv50_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int gf119_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+
+#define BUS_MSG(b,l,f,a...) do {                                               \
+	struct nvkm_i2c_bus *_bus = (b);                                       \
+	nvkm_##l(&_bus->pad->i2c->subdev, "bus %04x: "f"\n", _bus->id, ##a);   \
+} while(0)
+#define BUS_ERR(b,f,a...) BUS_MSG((b), error, f, ##a)
+#define BUS_DBG(b,f,a...) BUS_MSG((b), debug, f, ##a)
+#define BUS_TRACE(b,f,a...) BUS_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
new file mode 100644
index 0000000..96bbdda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gf119_i2c_bus(p) container_of((p), struct gf119_i2c_bus, base)
+#include "bus.h"
+
+struct gf119_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+};
+
+static void
+gf119_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x00000001, state ? 0x00000001 : 0);
+}
+
+static void
+gf119_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x00000002, state ? 0x00000002 : 0);
+}
+
+static int
+gf119_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000010);
+}
+
+static int
+gf119_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000020);
+}
+
+static void
+gf119_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_wr32(device, bus->addr, 0x00000007);
+}
+
+static const struct nvkm_i2c_bus_func
+gf119_i2c_bus_func = {
+	.init = gf119_i2c_bus_init,
+	.drive_scl = gf119_i2c_bus_drive_scl,
+	.drive_sda = gf119_i2c_bus_drive_sda,
+	.sense_scl = gf119_i2c_bus_sense_scl,
+	.sense_sda = gf119_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+gf119_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct gf119_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&gf119_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = 0x00d014 + (drive * 0x20);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
new file mode 100644
index 0000000..a58db15
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv04_i2c_bus(p) container_of((p), struct nv04_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv04_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u8 drive;
+	u8 sense;
+};
+
+static void
+nv04_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	u8 val = nvkm_rdvgac(device, 0, bus->drive);
+	if (state) val |= 0x20;
+	else	   val &= 0xdf;
+	nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	u8 val = nvkm_rdvgac(device, 0, bus->drive);
+	if (state) val |= 0x10;
+	else	   val &= 0xef;
+	nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x04);
+}
+
+static int
+nv04_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x08);
+}
+
+static const struct nvkm_i2c_bus_func
+nv04_i2c_bus_func = {
+	.drive_scl = nv04_i2c_bus_drive_scl,
+	.drive_sda = nv04_i2c_bus_drive_sda,
+	.sense_scl = nv04_i2c_bus_sense_scl,
+	.sense_sda = nv04_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv04_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive, u8 sense,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct nv04_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv04_i2c_bus_func, pad, id, &bus->base);
+	bus->drive = drive;
+	bus->sense = sense;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
new file mode 100644
index 0000000..cdd73dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv4e_i2c_bus(p) container_of((p), struct nv4e_i2c_bus, base)
+#include "bus.h"
+
+struct nv4e_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+};
+
+static void
+nv4e_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00080000);
+}
+
+static const struct nvkm_i2c_bus_func
+nv4e_i2c_bus_func = {
+	.drive_scl = nv4e_i2c_bus_drive_scl,
+	.drive_sda = nv4e_i2c_bus_drive_sda,
+	.sense_scl = nv4e_i2c_bus_sense_scl,
+	.sense_sda = nv4e_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv4e_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct nv4e_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv4e_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = 0x600800 + drive;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
new file mode 100644
index 0000000..8db8399
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv50_i2c_bus(p) container_of((p), struct nv50_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv50_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+	u32 data;
+};
+
+static void
+nv50_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	if (state) bus->data |= 0x01;
+	else	   bus->data &= 0xfe;
+	nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static void
+nv50_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	if (state) bus->data |= 0x02;
+	else	   bus->data &= 0xfd;
+	nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static int
+nv50_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000001);
+}
+
+static int
+nv50_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000002);
+}
+
+static void
+nv50_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_wr32(device, bus->addr, (bus->data = 0x00000007));
+}
+
+static const struct nvkm_i2c_bus_func
+nv50_i2c_bus_func = {
+	.init = nv50_i2c_bus_init,
+	.drive_scl = nv50_i2c_bus_drive_scl,
+	.drive_sda = nv50_i2c_bus_drive_sda,
+	.sense_scl = nv50_i2c_bus_sense_scl,
+	.sense_sda = nv50_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv50_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	static const u32 addr[] = {
+		0x00e138, 0x00e150, 0x00e168, 0x00e180,
+		0x00e254, 0x00e274, 0x00e764, 0x00e780,
+		0x00e79c, 0x00e7b8
+	};
+	struct nv50_i2c_bus *bus;
+
+	if (drive >= ARRAY_SIZE(addr)) {
+		nvkm_warn(&pad->i2c->subdev, "bus %d unknown\n", drive);
+		return -ENODEV;
+	}
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv50_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = addr[drive];
+	bus->data = 0x00000007;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index 2a2dd47..bb2a31d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -21,26 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
 void
 g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
 {
-	u32 intr = nv_rd32(i2c, 0x00e06c);
-	u32 stat = nv_rd32(i2c, 0x00e068) & intr, i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00e06c);
+	u32 stat = nvkm_rd32(device, 0x00e068) & intr, i;
 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
 		if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
 		if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
 		if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
 	}
-	nv_wr32(i2c, 0x00e06c, intr);
+	nvkm_wr32(device, 0x00e06c, intr);
 }
 
 void
 g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 {
-	u32 temp = nv_rd32(i2c, 0x00e068), i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 temp = nvkm_rd32(device, 0x00e068), i;
 	for (i = 0; i < 8; i++) {
 		if (mask & (1 << i)) {
 			if (!(data & (1 << i))) {
@@ -50,230 +53,20 @@
 			temp |= type << (i * 4);
 		}
 	}
-	nv_wr32(i2c, 0x00e068, temp);
-}
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
-	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
-	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
-	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
-	const u32 urep = unksel ? 0x01000000 : 0x02000000;
-	u32 ctrl, timeout;
-
-	/* wait up to 1ms for any previous transaction to be done... */
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
-			return -EBUSY;
-		}
-	} while (ctrl & 0x03010000);
-
-	/* set some magic, and wait up to 1ms for it to appear */
-	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("magic wait 0x%08x\n", ctrl);
-			auxch_fini(aux, ch);
-			return -EBUSY;
-		}
-	} while ((ctrl & 0x03000000) != urep);
-
-	return 0;
-}
-
-int
-g94_aux(struct nvkm_i2c_port *base, bool retry,
-	 u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *aux = nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	u32 ctrl, stat, timeout, retries;
-	u32 xbuf[4] = {};
-	int ch = port->addr;
-	int ret, i;
-
-	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
-	ret = auxch_init(aux, ch);
-	if (ret)
-		goto out;
-
-	stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
-	if (!(stat & 0x10000000)) {
-		AUX_DBG("sink not detected\n");
-		ret = -ENXIO;
-		goto out;
-	}
-
-	if (!(type & 1)) {
-		memcpy(xbuf, data, size);
-		for (i = 0; i < 16; i += 4) {
-			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
-			nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
-		}
-	}
-
-	ctrl  = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-	ctrl &= ~0x0001f0ff;
-	ctrl |= type << 12;
-	ctrl |= size - 1;
-	nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
-
-	/* (maybe) retry transaction a number of times on failure... */
-	for (retries = 0; !ret && retries < 32; retries++) {
-		/* reset, and delay a while if this is a retry */
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
-		if (retries)
-			udelay(400);
-
-		/* transaction request, wait up to 1ms for it to complete */
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
-		timeout = 1000;
-		do {
-			ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-			udelay(1);
-			if (!timeout--) {
-				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
-				ret = -EIO;
-				goto out;
-			}
-		} while (ctrl & 0x00010000);
-		ret = 1;
-
-		/* read status, and check if transaction completed ok */
-		stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
-		if ((stat & 0x000f0000) == 0x00080000 ||
-		    (stat & 0x000f0000) == 0x00020000)
-			ret = retry ? 0 : 1;
-		if ((stat & 0x00000100))
-			ret = -ETIMEDOUT;
-		if ((stat & 0x00000e00))
-			ret = -EIO;
-
-		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
-	}
-
-	if (type & 1) {
-		for (i = 0; i < 16; i += 4) {
-			xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
-			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
-		}
-		memcpy(data, xbuf, size);
-	}
-
-out:
-	auxch_fini(aux, ch);
-	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+	nvkm_wr32(device, 0x00e068, temp);
 }
 
 static const struct nvkm_i2c_func
-g94_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = nv50_i2c_sense_scl,
-	.sense_sda = nv50_i2c_sense_sda,
-};
-
-static int
-g94_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 index,
-		  struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &g94_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	if (info->drive >= nv50_i2c_addr_nr)
-		return -EINVAL;
-
-	port->state = 7;
-	port->addr = nv50_i2c_addr[info->drive];
-	return 0;
-}
-
-static const struct nvkm_i2c_func
-g94_aux_func = {
-	.aux       = g94_aux,
-};
-
-int
-g94_aux_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 index,
-		  struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &g94_aux_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->base.aux = info->auxch;
-	port->addr = info->auxch;
-	return 0;
-}
-
-static struct nvkm_oclass
-g94_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-g94_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = g94_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
+g94_i2c = {
+	.pad_x_new = g94_i2c_pad_x_new,
+	.pad_s_new = g94_i2c_pad_s_new,
 	.aux = 4,
 	.aux_stat = g94_aux_stat,
 	.aux_mask = g94_aux_mask,
-}.base;
+};
+
+int
+g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
deleted file mode 100644
index 4d4ac66..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-
-static int
-gf110_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000010);
-}
-
-static int
-gf110_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000020);
-}
-
-static const struct nvkm_i2c_func
-gf110_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = gf110_i2c_sense_scl,
-	.sense_sda = gf110_i2c_sense_sda,
-};
-
-int
-gf110_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 index,
-		    struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &gf110_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->state = 0x00000007;
-	port->addr = 0x00d014 + (info->drive * 0x20);
-	return 0;
-}
-
-struct nvkm_oclass
-gf110_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gf110_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-gf110_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
-	.aux = 4,
-	.aux_stat = g94_aux_stat,
-	.aux_mask = g94_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index e290b40..ae4aad3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -21,18 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
-struct nvkm_oclass *
-gf117_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &nv04_i2c_pad_oclass,
-}.base;
+static const struct nvkm_i2c_func
+gf117_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+};
+
+int
+gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
new file mode 100644
index 0000000..6f2b02a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "pad.h"
+
+static const struct nvkm_i2c_func
+gf119_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gf119_i2c_pad_s_new,
+	.aux = 4,
+	.aux_stat = g94_aux_stat,
+	.aux_mask = g94_aux_mask,
+};
+
+int
+gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index 1a46490..f9f6bf4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -21,26 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
 void
 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
 {
-	u32 intr = nv_rd32(i2c, 0x00dc60);
-	u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00dc60);
+	u32 stat = nvkm_rd32(device, 0x00dc68) & intr, i;
 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
 		if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
 		if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
 		if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
 	}
-	nv_wr32(i2c, 0x00dc60, intr);
+	nvkm_wr32(device, 0x00dc60, intr);
 }
 
 void
 gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 {
-	u32 temp = nv_rd32(i2c, 0x00dc68), i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 temp = nvkm_rd32(device, 0x00dc68), i;
 	for (i = 0; i < 8; i++) {
 		if (mask & (1 << i)) {
 			if (!(data & (1 << i))) {
@@ -50,22 +53,20 @@
 			temp |= type << (i * 4);
 		}
 	}
-	nv_wr32(i2c, 0x00dc68, temp);
+	nvkm_wr32(device, 0x00dc68, temp);
 }
 
-struct nvkm_oclass *
-gk104_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
+static const struct nvkm_i2c_func
+gk104_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gf119_i2c_pad_s_new,
 	.aux = 4,
 	.aux_stat = gk104_aux_stat,
 	.aux_mask = gk104_aux_mask,
-}.base;
+};
+
+int
+gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
index ab64237..ff9f7d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
@@ -21,199 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
-	nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
-	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
-	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
-	const u32 urep = unksel ? 0x01000000 : 0x02000000;
-	u32 ctrl, timeout;
-
-	/* wait up to 1ms for any previous transaction to be done... */
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
-			return -EBUSY;
-		}
-	} while (ctrl & 0x03010000);
-
-	/* set some magic, and wait up to 1ms for it to appear */
-	nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("magic wait 0x%08x\n", ctrl);
-			auxch_fini(aux, ch);
-			return -EBUSY;
-		}
-	} while ((ctrl & 0x03000000) != urep);
-
-	return 0;
-}
-
-int
-gm204_aux(struct nvkm_i2c_port *base, bool retry,
-	 u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *aux = nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	u32 ctrl, stat, timeout, retries;
-	u32 xbuf[4] = {};
-	int ch = port->addr;
-	int ret, i;
-
-	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
-	ret = auxch_init(aux, ch);
-	if (ret)
-		goto out;
-
-	stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
-	if (!(stat & 0x10000000)) {
-		AUX_DBG("sink not detected\n");
-		ret = -ENXIO;
-		goto out;
-	}
-
-	if (!(type & 1)) {
-		memcpy(xbuf, data, size);
-		for (i = 0; i < 16; i += 4) {
-			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
-			nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
-		}
-	}
-
-	ctrl  = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-	ctrl &= ~0x0001f0ff;
-	ctrl |= type << 12;
-	ctrl |= size - 1;
-	nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
-
-	/* (maybe) retry transaction a number of times on failure... */
-	for (retries = 0; !ret && retries < 32; retries++) {
-		/* reset, and delay a while if this is a retry */
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
-		if (retries)
-			udelay(400);
-
-		/* transaction request, wait up to 1ms for it to complete */
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
-
-		timeout = 1000;
-		do {
-			ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-			udelay(1);
-			if (!timeout--) {
-				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
-				ret = -EIO;
-				goto out;
-			}
-		} while (ctrl & 0x00010000);
-		ret = 1;
-
-		/* read status, and check if transaction completed ok */
-		stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
-		if ((stat & 0x000f0000) == 0x00080000 ||
-		    (stat & 0x000f0000) == 0x00020000)
-			ret = retry ? 0 : 1;
-		if ((stat & 0x00000100))
-			ret = -ETIMEDOUT;
-		if ((stat & 0x00000e00))
-			ret = -EIO;
-
-		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
-	}
-
-	if (type & 1) {
-		for (i = 0; i < 16; i += 4) {
-			xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
-			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
-		}
-		memcpy(data, xbuf, size);
-	}
-
-out:
-	auxch_fini(aux, ch);
-	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
-}
+#include "priv.h"
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-gm204_aux_func = {
-	.aux       = gm204_aux,
-};
-
-int
-gm204_aux_port_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 index,
-		    struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &gm204_aux_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->base.aux = info->auxch;
-	port->addr = info->auxch;
-	return 0;
-}
-
-struct nvkm_oclass
-gm204_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gf110_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gm204_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-gm204_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gm204_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &gm204_i2c_pad_oclass,
+gm204_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gm204_i2c_pad_s_new,
 	.aux = 8,
 	.aux_stat = gk104_aux_stat,
 	.aux_mask = gk104_aux_mask,
-}.base;
+};
+
+int
+gm204_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gm204_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 4cdf1c4..18776f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -22,107 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv04_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv04_i2c_port {
-	struct nvkm_i2c_port base;
-	u8 drive;
-	u8 sense;
-};
-
-static void
-nv04_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	u8 val = nv_rdvgac(priv, 0, port->drive);
-	if (state) val |= 0x20;
-	else	   val &= 0xdf;
-	nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static void
-nv04_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	u8 val = nv_rdvgac(priv, 0, port->drive);
-	if (state) val |= 0x10;
-	else	   val &= 0xef;
-	nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static int
-nv04_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
-}
-
-static int
-nv04_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
-}
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv04_i2c_func = {
-	.drive_scl = nv04_i2c_drive_scl,
-	.drive_sda = nv04_i2c_drive_sda,
-	.sense_scl = nv04_i2c_sense_scl,
-	.sense_sda = nv04_i2c_sense_sda,
+nv04_i2c = {
+	.pad_x_new = nv04_i2c_pad_new,
 };
 
-static int
-nv04_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+int
+nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv04_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv04_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->drive = info->drive;
-	port->sense = info->sense;
-	return 0;
+	return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv04_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv04_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv04_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv04_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 046fe5e..6b762f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -22,99 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv4e_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv4e_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-};
-
-static void
-nv4e_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
-}
-
-static void
-nv4e_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
-}
-
-static int
-nv4e_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00040000);
-}
-
-static int
-nv4e_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00080000);
-}
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv4e_i2c_func = {
-	.drive_scl = nv4e_i2c_drive_scl,
-	.drive_sda = nv4e_i2c_drive_sda,
-	.sense_scl = nv4e_i2c_sense_scl,
-	.sense_sda = nv4e_i2c_sense_sda,
+nv4e_i2c = {
+	.pad_x_new = nv4e_i2c_pad_new,
 };
 
-static int
-nv4e_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+int
+nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv4e_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv4e_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->addr = 0x600800 + info->drive;
-	return 0;
+	return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv4e_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv4e_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv4e_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x4e),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv4e_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index fba5b26..75640ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -21,113 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
-
-void
-nv50_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	if (state) port->state |= 0x01;
-	else	   port->state &= 0xfe;
-	nv_wr32(priv, port->addr, port->state);
-}
-
-void
-nv50_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	if (state) port->state |= 0x02;
-	else	   port->state &= 0xfd;
-	nv_wr32(priv, port->addr, port->state);
-}
-
-int
-nv50_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000001);
-}
-
-int
-nv50_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000002);
-}
+#include "priv.h"
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv50_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = nv50_i2c_sense_scl,
-	.sense_sda = nv50_i2c_sense_sda,
+nv50_i2c = {
+	.pad_x_new = nv50_i2c_pad_new,
 };
 
-const u32 nv50_i2c_addr[] = {
-	0x00e138, 0x00e150, 0x00e168, 0x00e180,
-	0x00e254, 0x00e274, 0x00e764, 0x00e780,
-	0x00e79c, 0x00e7b8
-};
-const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
-
-static int
-nv50_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv50_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	if (info->drive >= nv50_i2c_addr_nr)
-		return -EINVAL;
-
-	port->state = 0x00000007;
-	port->addr = nv50_i2c_addr[info->drive];
-	return 0;
-}
-
 int
-nv50_i2c_port_init(struct nvkm_object *object)
+nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(object);
-	struct nv50_i2c_port *port = (void *)object;
-	nv_wr32(priv, port->addr, port->state);
-	return nvkm_i2c_port_init(&port->base);
+	return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv50_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv50_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv50_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv50_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
deleted file mode 100644
index b3139e7..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __NV50_I2C_H__
-#define __NV50_I2C_H__
-#include "priv.h"
-
-struct nv50_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv50_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-	u32 state;
-};
-
-extern const u32 nv50_i2c_addr[];
-extern const int nv50_i2c_addr_nr;
-int  nv50_i2c_port_init(struct nvkm_object *);
-int  nv50_i2c_sense_scl(struct nvkm_i2c_port *);
-int  nv50_i2c_sense_sda(struct nvkm_i2c_port *);
-void nv50_i2c_drive_scl(struct nvkm_i2c_port *, int state);
-void nv50_i2c_drive_sda(struct nvkm_i2c_port *, int state);
-
-int  g94_aux_port_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-void g94_i2c_acquire(struct nvkm_i2c_port *);
-void g94_i2c_release(struct nvkm_i2c_port *);
-
-int  gf110_i2c_port_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
index a242eeb..2c5fcb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Red Hat Inc.
+ * Copyright 2015 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,65 +19,98 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "pad.h"
 
-int
-_nvkm_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_pad_mode_locked(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c_pad *pad = (void *)object;
-	DBG("-> NULL\n");
-	pad->port = NULL;
-	return nvkm_object_fini(&pad->base, suspend);
+	PAD_TRACE(pad, "-> %s", (mode == NVKM_I2C_PAD_AUX) ? "aux" :
+			      (mode == NVKM_I2C_PAD_I2C) ? "i2c" : "off");
+	if (pad->func->mode)
+		pad->func->mode(pad, mode);
+}
+
+void
+nvkm_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
+{
+	PAD_TRACE(pad, "mode %d", mode);
+	mutex_lock(&pad->mutex);
+	nvkm_i2c_pad_mode_locked(pad, mode);
+	pad->mode = mode;
+	mutex_unlock(&pad->mutex);
+}
+
+void
+nvkm_i2c_pad_release(struct nvkm_i2c_pad *pad)
+{
+	PAD_TRACE(pad, "release");
+	if (pad->mode == NVKM_I2C_PAD_OFF)
+		nvkm_i2c_pad_mode_locked(pad, pad->mode);
+	mutex_unlock(&pad->mutex);
 }
 
 int
-_nvkm_i2c_pad_init(struct nvkm_object *object)
+nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c_pad *pad = (void *)object;
-	DBG("-> PORT:%02x\n", pad->next->index);
-	pad->port = pad->next;
-	return nvkm_object_init(&pad->base);
-}
-
-int
-nvkm_i2c_pad_create_(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int index,
-		     int size, void **pobject)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_pad *pad;
-	int ret;
-
-	list_for_each_entry(port, &i2c->ports, head) {
-		pad = nvkm_i2c_pad(port);
-		if (pad->index == index) {
-			atomic_inc(&nv_object(pad)->refcount);
-			*pobject = pad;
-			return 1;
+	PAD_TRACE(pad, "acquire");
+	mutex_lock(&pad->mutex);
+	if (pad->mode != mode) {
+		if (pad->mode != NVKM_I2C_PAD_OFF) {
+			mutex_unlock(&pad->mutex);
+			return -EBUSY;
 		}
+		nvkm_i2c_pad_mode_locked(pad, mode);
 	}
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
-	pad = *pobject;
-	if (ret)
-		return ret;
-
-	pad->index = index;
 	return 0;
 }
 
-int
-_nvkm_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+void
+nvkm_i2c_pad_fini(struct nvkm_i2c_pad *pad)
 {
-	struct nvkm_i2c_pad *pad;
-	int ret;
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	return ret;
+	PAD_TRACE(pad, "fini");
+	nvkm_i2c_pad_mode_locked(pad, NVKM_I2C_PAD_OFF);
+}
+
+void
+nvkm_i2c_pad_init(struct nvkm_i2c_pad *pad)
+{
+	PAD_TRACE(pad, "init");
+	nvkm_i2c_pad_mode_locked(pad, pad->mode);
+}
+
+void
+nvkm_i2c_pad_del(struct nvkm_i2c_pad **ppad)
+{
+	struct nvkm_i2c_pad *pad = *ppad;
+	if (pad) {
+		PAD_TRACE(pad, "dtor");
+		list_del(&pad->head);
+		kfree(pad);
+		pad = NULL;
+	}
+}
+
+void
+nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+		  int id, struct nvkm_i2c_pad *pad)
+{
+	pad->func = func;
+	pad->i2c = i2c;
+	pad->id = id;
+	pad->mode = NVKM_I2C_PAD_OFF;
+	mutex_init(&pad->mutex);
+	list_add_tail(&pad->head, &i2c->pad);
+	PAD_TRACE(pad, "ctor");
+}
+
+int
+nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+		  int id, struct nvkm_i2c_pad **ppad)
+{
+	if (!(*ppad = kzalloc(sizeof(**ppad), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_i2c_pad_ctor(func, i2c, id, *ppad);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index f3422cc..9eeb992 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,56 +1,67 @@
 #ifndef __NVKM_I2C_PAD_H__
 #define __NVKM_I2C_PAD_H__
-#include "priv.h"
+#include <subdev/i2c.h>
 
 struct nvkm_i2c_pad {
-	struct nvkm_object base;
-	int index;
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_port *next;
+	const struct nvkm_i2c_pad_func *func;
+	struct nvkm_i2c *i2c;
+#define NVKM_I2C_PAD_HYBRID(n) /* 'n' is hw pad index */                     (n)
+#define NVKM_I2C_PAD_CCB(n) /* 'n' is ccb index */                 ((n) + 0x100)
+#define NVKM_I2C_PAD_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x200)
+	int id;
+
+	enum nvkm_i2c_pad_mode {
+		NVKM_I2C_PAD_OFF,
+		NVKM_I2C_PAD_I2C,
+		NVKM_I2C_PAD_AUX,
+	} mode;
+	struct mutex mutex;
+	struct list_head head;
 };
 
-static inline struct nvkm_i2c_pad *
-nvkm_i2c_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_object *pad = nv_object(port);
-	while (!nv_iclass(pad->parent, NV_SUBDEV_CLASS))
-		pad = pad->parent;
-	return (void *)pad;
-}
+struct nvkm_i2c_pad_func {
+	int (*bus_new_0)(struct nvkm_i2c_pad *, int id, u8 drive, u8 sense,
+			 struct nvkm_i2c_bus **);
+	int (*bus_new_4)(struct nvkm_i2c_pad *, int id, u8 drive,
+			 struct nvkm_i2c_bus **);
 
-#define nvkm_i2c_pad_create(p,e,o,i,d)                                         \
-	nvkm_i2c_pad_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
-#define nvkm_i2c_pad_destroy(p) ({                                             \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_dtor(nv_object(_p));                                     \
-})
-#define nvkm_i2c_pad_init(p) ({                                                \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_init(nv_object(_p));                                     \
-})
-#define nvkm_i2c_pad_fini(p,s) ({                                              \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_fini(nv_object(_p), (s));                                \
-})
+	int (*aux_new_6)(struct nvkm_i2c_pad *, int id, u8 drive,
+			 struct nvkm_i2c_aux **);
 
-int nvkm_i2c_pad_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int index, int, void **);
+	void (*mode)(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+};
 
-int _nvkm_i2c_pad_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-#define _nvkm_i2c_pad_dtor nvkm_object_destroy
-int _nvkm_i2c_pad_init(struct nvkm_object *);
-int _nvkm_i2c_pad_fini(struct nvkm_object *, bool);
+void nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+		       int id, struct nvkm_i2c_pad *);
+int nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+		      int id, struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_del(struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_init(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_fini(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+int nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+void nvkm_i2c_pad_release(struct nvkm_i2c_pad *);
 
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_i2c_pad *_pad = (void *)pad;                               \
-	nv_##l(_pad, "PAD:%c:%02x: "f,                                         \
-	       _pad->index >= 0x100 ? 'X' : 'S',                               \
-	       _pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
+void g94_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+
+int nv04_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv4e_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv50_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int g94_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int g94_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int anx9805_pad_new(struct nvkm_i2c_bus *, int, u8, struct nvkm_i2c_pad **);
+
+#define PAD_MSG(p,l,f,a...) do {                                               \
+	struct nvkm_i2c_pad *_pad = (p);                                       \
+	nvkm_##l(&_pad->i2c->subdev, "pad %04x: "f"\n", _pad->id, ##a);        \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define PAD_ERR(p,f,a...) PAD_MSG((p), error, f, ##a)
+#define PAD_DBG(p,f,a...) PAD_MSG((p), debug, f, ##a)
+#define PAD_TRACE(p,f,a...) PAD_MSG((p), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
index e9832f7..5904bc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
@@ -22,64 +22,55 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "aux.h"
+#include "bus.h"
 
-struct g94_i2c_pad {
-	struct nvkm_i2c_pad base;
-	int addr;
-};
-
-static int
-g94_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+void
+g94_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct g94_i2c_pad *pad = (void *)object;
-	nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
-	return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
+	struct nvkm_subdev *subdev = &pad->i2c->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
 
-static int
-g94_i2c_pad_init(struct nvkm_object *object)
-{
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct g94_i2c_pad *pad = (void *)object;
-
-	switch (nv_oclass(pad->base.next)->handle) {
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
-		nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x00000002);
+	switch (mode) {
+	case NVKM_I2C_PAD_OFF:
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000001);
 		break;
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+	case NVKM_I2C_PAD_I2C:
+		nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x0000c001);
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
+		break;
+	case NVKM_I2C_PAD_AUX:
+		nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x00000002);
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
+		break;
 	default:
-		nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x0000c001);
+		WARN_ON(1);
 		break;
 	}
-
-	nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000000);
-	return nvkm_i2c_pad_init(&pad->base);
 }
 
-static int
-g94_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 index,
-		 struct nvkm_object **pobject)
-{
-	struct g94_i2c_pad *pad;
-	int ret;
-
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	if (ret)
-		return ret;
-
-	pad->addr = index * 0x50;;
-	return 0;
-}
-
-struct nvkm_oclass
-g94_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g94_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = g94_i2c_pad_init,
-		.fini = g94_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_s_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+	.mode = g94_i2c_pad_mode,
 };
+
+int
+g94_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&g94_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_x_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+};
+
+int
+g94_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&g94_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
new file mode 100644
index 0000000..d53212f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "aux.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_s_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+	.mode = g94_i2c_pad_mode,
+};
+
+int
+gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gf119_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_x_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+};
+
+int
+gf119_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gf119_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
index be59040..24a4d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
@@ -22,64 +22,55 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "aux.h"
+#include "bus.h"
 
-struct gm204_i2c_pad {
-	struct nvkm_i2c_pad base;
-	int addr;
-};
-
-static int
-gm204_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+static void
+gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct gm204_i2c_pad *pad = (void *)object;
-	nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
-	return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
+	struct nvkm_subdev *subdev = &pad->i2c->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
 
-static int
-gm204_i2c_pad_init(struct nvkm_object *object)
-{
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct gm204_i2c_pad *pad = (void *)object;
-
-	switch (nv_oclass(pad->base.next)->handle) {
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
-		nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
+	switch (mode) {
+	case NVKM_I2C_PAD_OFF:
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000001);
 		break;
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+	case NVKM_I2C_PAD_I2C:
+		nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x0000c001);
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
+		break;
+	case NVKM_I2C_PAD_AUX:
+		nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x00000002);
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
+		break;
 	default:
-		nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
+		WARN_ON(1);
 		break;
 	}
-
-	nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
-	return nvkm_i2c_pad_init(&pad->base);
 }
 
-static int
-gm204_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
-{
-	struct gm204_i2c_pad *pad;
-	int ret;
-
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	if (ret)
-		return ret;
-
-	pad->addr = index * 0x50;;
-	return 0;
-}
-
-struct nvkm_oclass
-gm204_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = gm204_i2c_pad_init,
-		.fini = gm204_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_s_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = gm204_i2c_aux_new,
+	.mode = gm204_i2c_pad_mode,
 };
+
+int
+gm204_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gm204_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_x_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = gm204_i2c_aux_new,
+};
+
+int
+gm204_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gm204_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
index 22c7daa..310046a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
@@ -22,13 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "bus.h"
 
-struct nvkm_oclass
-nv04_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = _nvkm_i2c_pad_init,
-		.fini = _nvkm_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+nv04_i2c_pad_func = {
+	.bus_new_0 = nv04_i2c_bus_new,
 };
+
+int
+nv04_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv04_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
new file mode 100644
index 0000000..dda6fc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv4e_i2c_pad_func = {
+	.bus_new_4 = nv4e_i2c_bus_new,
+};
+
+int
+nv4e_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv4e_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
new file mode 100644
index 0000000..a03f25b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv50_i2c_pad_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+};
+
+int
+nv50_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv50_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
deleted file mode 100644
index 586f53d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __NVKM_I2C_PORT_H__
-#define __NVKM_I2C_PORT_H__
-#include "priv.h"
-
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_i2c_port *_port = (void *)port;                         \
-	nv_##l(_port, "PORT:%02x: "f, _port->index, ##a);                      \
-} while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index 6586e15..bf655a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,69 +1,14 @@
 #ifndef __NVKM_I2C_PRIV_H__
 #define __NVKM_I2C_PRIV_H__
+#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
 #include <subdev/i2c.h>
 
-extern struct nvkm_oclass nv04_i2c_pad_oclass;
-extern struct nvkm_oclass g94_i2c_pad_oclass;
-extern struct nvkm_oclass gm204_i2c_pad_oclass;
+int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
+		  int index, struct nvkm_i2c **);
 
-#define nvkm_i2c_port_create(p,e,o,i,a,f,d)                                 \
-	nvkm_i2c_port_create_((p), (e), (o), (i), (a), (f),                 \
-				 sizeof(**d), (void **)d)
-#define nvkm_i2c_port_destroy(p) ({                                         \
-	struct nvkm_i2c_port *port = (p);                                   \
-	_nvkm_i2c_port_dtor(nv_object(i2c));                                \
-})
-#define nvkm_i2c_port_init(p)                                               \
-	nvkm_object_init(&(p)->base)
-#define nvkm_i2c_port_fini(p,s)                                             \
-	nvkm_object_fini(&(p)->base, (s))
-
-int nvkm_i2c_port_create_(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, u8,
-			     const struct i2c_algorithm *,
-			     const struct nvkm_i2c_func *,
-			     int, void **);
-void _nvkm_i2c_port_dtor(struct nvkm_object *);
-#define _nvkm_i2c_port_init nvkm_object_init
-int  _nvkm_i2c_port_fini(struct nvkm_object *, bool);
-
-#define nvkm_i2c_create(p,e,o,d)                                            \
-	nvkm_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_i2c_destroy(p) ({                                              \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_dtor(nv_object(i2c));                                     \
-})
-#define nvkm_i2c_init(p) ({                                                 \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_init(nv_object(i2c));                                     \
-})
-#define nvkm_i2c_fini(p,s) ({                                               \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_fini(nv_object(i2c), (s));                                \
-})
-
-int nvkm_i2c_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-int  _nvkm_i2c_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void _nvkm_i2c_dtor(struct nvkm_object *);
-int  _nvkm_i2c_init(struct nvkm_object *);
-int  _nvkm_i2c_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nvkm_anx9805_sclass[];
-extern struct nvkm_oclass gf110_i2c_sclass[];
-
-extern const struct i2c_algorithm nvkm_i2c_bit_algo;
-extern const struct i2c_algorithm nvkm_i2c_aux_algo;
-
-struct nvkm_i2c_impl {
-	struct nvkm_oclass base;
-
-	/* supported i2c port classes */
-	struct nvkm_oclass *sclass;
-	struct nvkm_oclass *pad_x;
-	struct nvkm_oclass *pad_s;
+struct nvkm_i2c_func {
+	int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
+	int (*pad_s_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
 
 	/* number of native dp aux channels present */
 	int aux;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 8e578f8..37a0496 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -23,55 +23,54 @@
  */
 #include <subdev/ibus.h>
 
-struct gf100_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gf100_ibus_intr_hub(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
-	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
+	nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr_rop(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
-	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
+	nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr_gpc(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
-	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
+	nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr(struct nvkm_subdev *subdev)
+gf100_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gf100_ibus_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x121c58);
-	u32 intr1 = nv_rd32(priv, 0x121c5c);
-	u32 hubnr = nv_rd32(priv, 0x121c70);
-	u32 ropnr = nv_rd32(priv, 0x121c74);
-	u32 gpcnr = nv_rd32(priv, 0x121c78);
+	struct nvkm_device *device = ibus->device;
+	u32 intr0 = nvkm_rd32(device, 0x121c58);
+	u32 intr1 = nvkm_rd32(device, 0x121c5c);
+	u32 hubnr = nvkm_rd32(device, 0x121c70);
+	u32 ropnr = nvkm_rd32(device, 0x121c74);
+	u32 gpcnr = nvkm_rd32(device, 0x121c78);
 	u32 i;
 
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_hub(priv, i);
+			gf100_ibus_intr_hub(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -79,7 +78,7 @@
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_rop(priv, i);
+			gf100_ibus_intr_rop(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -87,36 +86,24 @@
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gf100_ibus_intr_gpc(priv, i);
+			gf100_ibus_intr_gpc(ibus, i);
 			intr1 &= ~stat;
 		}
 	}
 }
 
-static int
-gf100_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_subdev_func
+gf100_ibus = {
+	.intr = gf100_ibus_intr,
+};
+
+int
+gf100_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
 {
-	struct gf100_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gf100_ibus_intr;
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus);
 	return 0;
 }
-
-struct nvkm_oclass
-gf100_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = _nvkm_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 7b6e9a6..ba33609 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -23,55 +23,54 @@
  */
 #include <subdev/ibus.h>
 
-struct gk104_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gk104_ibus_intr_hub(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
-	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
+	nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr_rop(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
-	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
+	nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr_gpc(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
-	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
+	nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr(struct nvkm_subdev *subdev)
+gk104_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gk104_ibus_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x120058);
-	u32 intr1 = nv_rd32(priv, 0x12005c);
-	u32 hubnr = nv_rd32(priv, 0x120070);
-	u32 ropnr = nv_rd32(priv, 0x120074);
-	u32 gpcnr = nv_rd32(priv, 0x120078);
+	struct nvkm_device *device = ibus->device;
+	u32 intr0 = nvkm_rd32(device, 0x120058);
+	u32 intr1 = nvkm_rd32(device, 0x12005c);
+	u32 hubnr = nvkm_rd32(device, 0x120070);
+	u32 ropnr = nvkm_rd32(device, 0x120074);
+	u32 gpcnr = nvkm_rd32(device, 0x120078);
 	u32 i;
 
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_hub(priv, i);
+			gk104_ibus_intr_hub(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -79,7 +78,7 @@
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_rop(priv, i);
+			gk104_ibus_intr_rop(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -87,53 +86,40 @@
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gk104_ibus_intr_gpc(priv, i);
+			gk104_ibus_intr_gpc(ibus, i);
 			intr1 &= ~stat;
 		}
 	}
 }
 
 static int
-gk104_ibus_init(struct nvkm_object *object)
+gk104_ibus_init(struct nvkm_subdev *ibus)
 {
-	struct gk104_ibus_priv *priv = (void *)object;
-	int ret = nvkm_ibus_init(&priv->base);
-	if (ret == 0) {
-		nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
-		nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
-		nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
-		nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
-		nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
-		nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
-		nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
-	}
-	return ret;
-}
-
-static int
-gk104_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gk104_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gk104_ibus_intr;
+	struct nvkm_device *device = ibus->device;
+	nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
+	nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
+	nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
+	nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
+	nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
+	nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200);
+	nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880);
 	return 0;
 }
 
-struct nvkm_oclass
-gk104_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = gk104_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
+static const struct nvkm_subdev_func
+gk104_ibus = {
+	.preinit = gk104_ibus_init,
+	.init = gk104_ibus_init,
+	.intr = gk104_ibus_intr,
 };
+
+int
+gk104_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
+{
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 24dcdfb..3484079 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -22,89 +22,68 @@
 #include <subdev/ibus.h>
 #include <subdev/timer.h>
 
-struct gk20a_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
+gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
 {
-	nv_mask(priv, 0x137250, 0x3f, 0);
+	struct nvkm_device *device = ibus->device;
+	nvkm_mask(device, 0x137250, 0x3f, 0);
 
-	nv_mask(priv, 0x000200, 0x20, 0);
+	nvkm_mask(device, 0x000200, 0x20, 0);
 	usleep_range(20, 30);
-	nv_mask(priv, 0x000200, 0x20, 0x20);
+	nvkm_mask(device, 0x000200, 0x20, 0x20);
 
-	nv_wr32(priv, 0x12004c, 0x4);
-	nv_wr32(priv, 0x122204, 0x2);
-	nv_rd32(priv, 0x122204);
+	nvkm_wr32(device, 0x12004c, 0x4);
+	nvkm_wr32(device, 0x122204, 0x2);
+	nvkm_rd32(device, 0x122204);
 
 	/*
 	 * Bug: increase clock timeout to avoid operation failure at high
 	 * gpcclk rate.
 	 */
-	nv_wr32(priv, 0x122354, 0x800);
-	nv_wr32(priv, 0x128328, 0x800);
-	nv_wr32(priv, 0x124320, 0x800);
+	nvkm_wr32(device, 0x122354, 0x800);
+	nvkm_wr32(device, 0x128328, 0x800);
+	nvkm_wr32(device, 0x124320, 0x800);
 }
 
 static void
-gk20a_ibus_intr(struct nvkm_subdev *subdev)
+gk20a_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gk20a_ibus_priv *priv = (void *)subdev;
-	u32 status0 = nv_rd32(priv, 0x120058);
+	struct nvkm_device *device = ibus->device;
+	u32 status0 = nvkm_rd32(device, 0x120058);
 
 	if (status0 & 0x7) {
-		nv_debug(priv, "resetting priv ring\n");
-		gk20a_ibus_init_priv_ring(priv);
+		nvkm_debug(ibus, "resetting ibus ring\n");
+		gk20a_ibus_init_ibus_ring(ibus);
 	}
 
 	/* Acknowledge interrupt */
-	nv_mask(priv, 0x12004c, 0x2, 0x2);
-
-	if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00))
-		nv_warn(priv, "timeout waiting for ringmaster ack\n");
+	nvkm_mask(device, 0x12004c, 0x2, 0x2);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+			break;
+	);
 }
 
 static int
-gk20a_ibus_init(struct nvkm_object *object)
+gk20a_ibus_init(struct nvkm_subdev *ibus)
 {
-	struct gk20a_ibus_priv *priv = (void *)object;
-	int ret;
-
-	ret = _nvkm_ibus_init(object);
-	if (ret)
-		return ret;
-
-	gk20a_ibus_init_priv_ring(priv);
-
+	gk20a_ibus_init_ibus_ring(ibus);
 	return 0;
 }
 
-static int
-gk20a_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gk20a_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gk20a_ibus_intr;
-	return 0;
-}
-
-struct nvkm_oclass
-gk20a_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = gk20a_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
+static const struct nvkm_subdev_func
+gk20a_ibus = {
+	.init = gk20a_ibus_init,
+	.intr = gk20a_ibus_intr,
 };
+
+int
+gk20a_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
+{
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index d16358c..895ba74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,124 +23,291 @@
  */
 #include "priv.h"
 
-#include <core/engine.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
 
 /******************************************************************************
  * instmem object base implementation
  *****************************************************************************/
+#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
 
-void
-_nvkm_instobj_dtor(struct nvkm_object *object)
+struct nvkm_instobj {
+	struct nvkm_memory memory;
+	struct nvkm_memory *parent;
+	struct nvkm_instmem *imem;
+	struct list_head head;
+	u32 *suspend;
+	void __iomem *map;
+};
+
+static enum nvkm_memory_target
+nvkm_instobj_target(struct nvkm_memory *memory)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nvkm_instobj *iobj = (void *)object;
-
-	mutex_lock(&nv_subdev(imem)->mutex);
-	list_del(&iobj->head);
-	mutex_unlock(&nv_subdev(imem)->mutex);
-
-	return nvkm_object_destroy(&iobj->base);
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_target(memory);
 }
 
-int
-nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int length, void **pobject)
+static u64
+nvkm_instobj_addr(struct nvkm_memory *memory)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(parent);
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_addr(memory);
+}
+
+static u64
+nvkm_instobj_size(struct nvkm_memory *memory)
+{
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_size(memory);
+}
+
+static void
+nvkm_instobj_release(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	nvkm_bar_flush(iobj->imem->subdev.device->bar);
+}
+
+static void __iomem *
+nvkm_instobj_acquire(struct nvkm_memory *memory)
+{
+	return nvkm_instobj(memory)->map;
+}
+
+static u32
+nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	return ioread32_native(nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	iowrite32_native(data, nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+	memory = nvkm_instobj(memory)->parent;
+	nvkm_memory_map(memory, vma, offset);
+}
+
+static void *
+nvkm_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	list_del(&iobj->head);
+	nvkm_memory_del(&iobj->parent);
+	return iobj;
+}
+
+const struct nvkm_memory_func
+nvkm_instobj_func = {
+	.dtor = nvkm_instobj_dtor,
+	.target = nvkm_instobj_target,
+	.addr = nvkm_instobj_addr,
+	.size = nvkm_instobj_size,
+	.acquire = nvkm_instobj_acquire,
+	.release = nvkm_instobj_release,
+	.rd32 = nvkm_instobj_rd32,
+	.wr32 = nvkm_instobj_wr32,
+	.map = nvkm_instobj_map,
+};
+
+static void
+nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+	memory = nvkm_instobj(memory)->parent;
+	nvkm_memory_boot(memory, vm);
+}
+
+static void
+nvkm_instobj_release_slow(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	nvkm_instobj_release(memory);
+	nvkm_done(iobj->parent);
+}
+
+static void __iomem *
+nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	iobj->map = nvkm_kmap(iobj->parent);
+	if (iobj->map)
+		memory->func = &nvkm_instobj_func;
+	return iobj->map;
+}
+
+static u32
+nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	return nvkm_ro32(iobj->parent, offset);
+}
+
+static void
+nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	return nvkm_wo32(iobj->parent, offset, data);
+}
+
+const struct nvkm_memory_func
+nvkm_instobj_func_slow = {
+	.dtor = nvkm_instobj_dtor,
+	.target = nvkm_instobj_target,
+	.addr = nvkm_instobj_addr,
+	.size = nvkm_instobj_size,
+	.boot = nvkm_instobj_boot,
+	.acquire = nvkm_instobj_acquire_slow,
+	.release = nvkm_instobj_release_slow,
+	.rd32 = nvkm_instobj_rd32_slow,
+	.wr32 = nvkm_instobj_wr32_slow,
+	.map = nvkm_instobj_map,
+};
+
+int
+nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
+{
+	struct nvkm_memory *memory = NULL;
 	struct nvkm_instobj *iobj;
+	u32 offset;
 	int ret;
 
-	ret = nvkm_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
-				  length, pobject);
-	iobj = *pobject;
+	ret = imem->func->memory_new(imem, size, align, zero, &memory);
 	if (ret)
-		return ret;
+		goto done;
 
-	mutex_lock(&imem->base.mutex);
-	list_add(&iobj->head, &imem->list);
-	mutex_unlock(&imem->base.mutex);
-	return 0;
+	if (!imem->func->persistent) {
+		if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
+		iobj->parent = memory;
+		iobj->imem = imem;
+		list_add_tail(&iobj->head, &imem->list);
+		memory = &iobj->memory;
+	}
+
+	if (!imem->func->zero && zero) {
+		void __iomem *map = nvkm_kmap(memory);
+		if (unlikely(!map)) {
+			for (offset = 0; offset < size; offset += 4)
+				nvkm_wo32(memory, offset, 0x00000000);
+		} else {
+			memset_io(map, 0x00, size);
+		}
+		nvkm_done(memory);
+	}
+
+done:
+	if (ret)
+		nvkm_memory_del(&memory);
+	*pmemory = memory;
+	return ret;
 }
 
 /******************************************************************************
  * instmem subdev base implementation
  *****************************************************************************/
 
-static int
-nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent,
-		   u32 size, u32 align, struct nvkm_object **pobject)
+u32
+nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
 {
-	struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass;
-	struct nvkm_instobj_args args = { .size = size, .align = align };
-	return nvkm_object_ctor(parent, &parent->engine->subdev.object,
-				impl->instobj, &args, sizeof(args), pobject);
+	return imem->func->rd32(imem, addr);
 }
 
-int
-_nvkm_instmem_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
 {
-	struct nvkm_instmem *imem = (void *)object;
+	return imem->func->wr32(imem, addr, data);
+}
+
+static int
+nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
 	struct nvkm_instobj *iobj;
-	int i, ret = 0;
+	int i;
+
+	if (imem->func->fini)
+		imem->func->fini(imem);
 
 	if (suspend) {
-		mutex_lock(&imem->base.mutex);
 		list_for_each_entry(iobj, &imem->list, head) {
-			iobj->suspend = vmalloc(iobj->size);
-			if (!iobj->suspend) {
-				ret = -ENOMEM;
-				break;
-			}
+			struct nvkm_memory *memory = iobj->parent;
+			u64 size = nvkm_memory_size(memory);
 
-			for (i = 0; i < iobj->size; i += 4)
-				iobj->suspend[i / 4] = nv_ro32(iobj, i);
+			iobj->suspend = vmalloc(size);
+			if (!iobj->suspend)
+				return -ENOMEM;
+
+			for (i = 0; i < size; i += 4)
+				iobj->suspend[i / 4] = nvkm_ro32(memory, i);
 		}
-		mutex_unlock(&imem->base.mutex);
-		if (ret)
-			return ret;
 	}
 
-	return nvkm_subdev_fini(&imem->base, suspend);
+	return 0;
 }
 
-int
-_nvkm_instmem_init(struct nvkm_object *object)
+static int
+nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_instmem *imem = (void *)object;
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	if (imem->func->oneinit)
+		return imem->func->oneinit(imem);
+	return 0;
+}
+
+static int
+nvkm_instmem_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
 	struct nvkm_instobj *iobj;
-	int ret, i;
+	int i;
 
-	ret = nvkm_subdev_init(&imem->base);
-	if (ret)
-		return ret;
-
-	mutex_lock(&imem->base.mutex);
 	list_for_each_entry(iobj, &imem->list, head) {
 		if (iobj->suspend) {
-			for (i = 0; i < iobj->size; i += 4)
-				nv_wo32(iobj, i, iobj->suspend[i / 4]);
+			struct nvkm_memory *memory = iobj->parent;
+			u64 size = nvkm_memory_size(memory);
+			for (i = 0; i < size; i += 4)
+				nvkm_wo32(memory, i, iobj->suspend[i / 4]);
 			vfree(iobj->suspend);
 			iobj->suspend = NULL;
 		}
 	}
-	mutex_unlock(&imem->base.mutex);
+
 	return 0;
 }
 
-int
-nvkm_instmem_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_instmem_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_instmem *imem;
-	int ret;
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	if (imem->func->dtor)
+		return imem->func->dtor(imem);
+	return imem;
+}
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "INSTMEM",
-				  "instmem", length, pobject);
-	imem = *pobject;
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+nvkm_instmem = {
+	.dtor = nvkm_instmem_dtor,
+	.oneinit = nvkm_instmem_oneinit,
+	.init = nvkm_instmem_init,
+	.fini = nvkm_instmem_fini,
+};
 
+void
+nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_instmem *imem)
+{
+	nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
+	imem->func = func;
 	INIT_LIST_HEAD(&imem->list);
-	imem->alloc = nvkm_instmem_alloc;
-	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index dd0994d..cd7feb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -37,32 +37,27 @@
  * to use more "relaxed" allocation parameters when using the DMA API, since we
  * never need a kernel mapping.
  */
-
-#include <subdev/fb.h>
-#include <core/mm.h>
-#include <core/device.h>
-
-#ifdef __KERNEL__
-#include <linux/dma-attrs.h>
-#include <linux/iommu.h>
-#include <nouveau_platform.h>
-#endif
-
+#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
 #include "priv.h"
 
-struct gk20a_instobj_priv {
-	struct nvkm_instobj base;
-	/* Must be second member here - see nouveau_gpuobj_map_vm() */
-	struct nvkm_mem *mem;
-	/* Pointed by mem */
-	struct nvkm_mem _mem;
+#include <core/memory.h>
+#include <core/mm.h>
+#include <core/tegra.h>
+#include <subdev/fb.h>
+
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
+
+struct gk20a_instobj {
+	struct nvkm_memory memory;
+	struct gk20a_instmem *imem;
+	struct nvkm_mem mem;
 };
 
 /*
  * Used for objects allocated using the DMA API
  */
 struct gk20a_instobj_dma {
-	struct gk20a_instobj_priv base;
+	struct gk20a_instobj base;
 
 	void *cpuaddr;
 	dma_addr_t handle;
@@ -73,14 +68,15 @@
  * Used for objects flattened using the IOMMU API
  */
 struct gk20a_instobj_iommu {
-	struct gk20a_instobj_priv base;
+	struct gk20a_instobj base;
 
 	/* array of base.mem->size pages */
 	struct page *pages[];
 };
 
-struct gk20a_instmem_priv {
+struct gk20a_instmem {
 	struct nvkm_instmem base;
+	unsigned long lock_flags;
 	spinlock_t lock;
 	u64 addr;
 
@@ -94,6 +90,42 @@
 	struct dma_attrs attrs;
 };
 
+static enum nvkm_memory_target
+gk20a_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_HOST;
+}
+
+static u64
+gk20a_instobj_addr(struct nvkm_memory *memory)
+{
+	return gk20a_instobj(memory)->mem.offset;
+
+}
+
+static u64
+gk20a_instobj_size(struct nvkm_memory *memory)
+{
+	return (u64)gk20a_instobj(memory)->mem.size << 12;
+}
+
+static void __iomem *
+gk20a_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+	unsigned long flags;
+	spin_lock_irqsave(&imem->lock, flags);
+	imem->lock_flags = flags;
+	return NULL;
+}
+
+static void
+gk20a_instobj_release(struct nvkm_memory *memory)
+{
+	struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+	spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
 /*
  * Use PRAMIN to read/write data and avoid coherency issues.
  * PRAMIN uses the GPU path and ensures data will always be coherent.
@@ -104,160 +136,170 @@
  */
 
 static u32
-gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct gk20a_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
 	u32 data;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	data = nv_rd32(priv, 0x700000 + addr);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	data = nvkm_rd32(device, 0x700000 + addr);
 	return data;
 }
 
 static void
-gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct gk20a_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	nv_wr32(priv, 0x700000 + addr, data);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	nvkm_wr32(device, 0x700000 + addr, data);
 }
 
 static void
-gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	nvkm_vm_map_at(vma, offset, &node->mem);
+}
+
+static void
+gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
 {
 	struct gk20a_instobj_dma *node = (void *)_node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
-	struct device *dev = nv_device_base(nv_device(priv));
+	struct gk20a_instmem *imem = _node->imem;
+	struct device *dev = imem->base.subdev.device->dev;
 
 	if (unlikely(!node->cpuaddr))
 		return;
 
-	dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
-		       node->handle, &priv->attrs);
+	dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
+		       node->handle, &imem->attrs);
 }
 
 static void
-gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
 {
 	struct gk20a_instobj_iommu *node = (void *)_node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+	struct gk20a_instmem *imem = _node->imem;
 	struct nvkm_mm_node *r;
 	int i;
 
-	if (unlikely(list_empty(&_node->mem->regions)))
+	if (unlikely(list_empty(&_node->mem.regions)))
 		return;
 
-	r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+	r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
 			     rl_entry);
 
 	/* clear bit 34 to unmap pages */
-	r->offset &= ~BIT(34 - priv->iommu_pgshift);
+	r->offset &= ~BIT(34 - imem->iommu_pgshift);
 
 	/* Unmap pages from GPU address space and free them */
-	for (i = 0; i < _node->mem->size; i++) {
-		iommu_unmap(priv->domain,
-			    (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+	for (i = 0; i < _node->mem.size; i++) {
+		iommu_unmap(imem->domain,
+			    (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
 		__free_page(node->pages[i]);
 	}
 
 	/* Release area from GPU address space */
-	mutex_lock(priv->mm_mutex);
-	nvkm_mm_free(priv->mm, &r);
-	mutex_unlock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
+	nvkm_mm_free(imem->mm, &r);
+	mutex_unlock(imem->mm_mutex);
 }
 
-static void
-gk20a_instobj_dtor(struct nvkm_object *object)
+static void *
+gk20a_instobj_dtor(struct nvkm_memory *memory)
 {
-	struct gk20a_instobj_priv *node = (void *)object;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
 
-	if (priv->domain)
+	if (imem->domain)
 		gk20a_instobj_dtor_iommu(node);
 	else
 		gk20a_instobj_dtor_dma(node);
 
-	nvkm_instobj_destroy(&node->base);
+	return node;
 }
 
+static const struct nvkm_memory_func
+gk20a_instobj_func = {
+	.dtor = gk20a_instobj_dtor,
+	.target = gk20a_instobj_target,
+	.addr = gk20a_instobj_addr,
+	.size = gk20a_instobj_size,
+	.acquire = gk20a_instobj_acquire,
+	.release = gk20a_instobj_release,
+	.rd32 = gk20a_instobj_rd32,
+	.wr32 = gk20a_instobj_wr32,
+	.map = gk20a_instobj_map,
+};
+
 static int
-gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, u32 npages, u32 align,
-		       struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
+		       struct gk20a_instobj **_node)
 {
 	struct gk20a_instobj_dma *node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct device *dev = nv_device_base(nv_device(parent));
-	int ret;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
+	struct device *dev = subdev->device->dev;
 
-	ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
-				   (void **)&node);
+	if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
+		return -ENOMEM;
 	*_node = &node->base;
-	if (ret)
-		return ret;
 
 	node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
 					&node->handle, GFP_KERNEL,
-					&priv->attrs);
+					&imem->attrs);
 	if (!node->cpuaddr) {
-		nv_error(priv, "cannot allocate DMA memory\n");
+		nvkm_error(subdev, "cannot allocate DMA memory\n");
 		return -ENOMEM;
 	}
 
 	/* alignment check */
 	if (unlikely(node->handle & (align - 1)))
-		nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
-			&node->handle, align);
+		nvkm_warn(subdev,
+			  "memory not aligned as requested: %pad (0x%x)\n",
+			  &node->handle, align);
 
 	/* present memory for being mapped using small pages */
 	node->r.type = 12;
 	node->r.offset = node->handle >> 12;
 	node->r.length = (npages << PAGE_SHIFT) >> 12;
 
-	node->base._mem.offset = node->handle;
+	node->base.mem.offset = node->handle;
 
-	INIT_LIST_HEAD(&node->base._mem.regions);
-	list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+	INIT_LIST_HEAD(&node->base.mem.regions);
+	list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
 
 	return 0;
 }
 
 static int
-gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
-			 struct nvkm_oclass *oclass, u32 npages, u32 align,
-			 struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
+			 struct gk20a_instobj **_node)
 {
 	struct gk20a_instobj_iommu *node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+	struct nvkm_subdev *subdev = &imem->base.subdev;
 	struct nvkm_mm_node *r;
 	int ret;
 	int i;
 
-	ret = nvkm_instobj_create_(parent, engine, oclass,
-				sizeof(*node) + sizeof(node->pages[0]) * npages,
-				(void **)&node);
+	if (!(node = kzalloc(sizeof(*node) +
+			     sizeof( node->pages[0]) * npages, GFP_KERNEL)))
+		return -ENOMEM;
 	*_node = &node->base;
-	if (ret)
-		return ret;
 
 	/* Allocate backing memory */
 	for (i = 0; i < npages; i++) {
@@ -270,48 +312,48 @@
 		node->pages[i] = p;
 	}
 
-	mutex_lock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
 	/* Reserve area from GPU address space */
-	ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
-			   align >> priv->iommu_pgshift, &r);
-	mutex_unlock(priv->mm_mutex);
+	ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
+			   align >> imem->iommu_pgshift, &r);
+	mutex_unlock(imem->mm_mutex);
 	if (ret) {
-		nv_error(priv, "virtual space is full!\n");
+		nvkm_error(subdev, "virtual space is full!\n");
 		goto free_pages;
 	}
 
 	/* Map into GPU address space */
 	for (i = 0; i < npages; i++) {
 		struct page *p = node->pages[i];
-		u32 offset = (r->offset + i) << priv->iommu_pgshift;
+		u32 offset = (r->offset + i) << imem->iommu_pgshift;
 
-		ret = iommu_map(priv->domain, offset, page_to_phys(p),
+		ret = iommu_map(imem->domain, offset, page_to_phys(p),
 				PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
 		if (ret < 0) {
-			nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+			nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
 
 			while (i-- > 0) {
 				offset -= PAGE_SIZE;
-				iommu_unmap(priv->domain, offset, PAGE_SIZE);
+				iommu_unmap(imem->domain, offset, PAGE_SIZE);
 			}
 			goto release_area;
 		}
 	}
 
 	/* Bit 34 tells that an address is to be resolved through the IOMMU */
-	r->offset |= BIT(34 - priv->iommu_pgshift);
+	r->offset |= BIT(34 - imem->iommu_pgshift);
 
-	node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+	node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
 
-	INIT_LIST_HEAD(&node->base._mem.regions);
-	list_add_tail(&r->rl_entry, &node->base._mem.regions);
+	INIT_LIST_HEAD(&node->base.mem.regions);
+	list_add_tail(&r->rl_entry, &node->base.mem.regions);
 
 	return 0;
 
 release_area:
-	mutex_lock(priv->mm_mutex);
-	nvkm_mm_free(priv->mm, &r);
-	mutex_unlock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
+	nvkm_mm_free(imem->mm, &r);
+	mutex_unlock(imem->mm_mutex);
 
 free_pages:
 	for (i = 0; i < npages && node->pages[i] != NULL; i++)
@@ -321,120 +363,92 @@
 }
 
 static int
-gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 _size,
-		   struct nvkm_object **pobject)
+gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		  struct nvkm_memory **pmemory)
 {
-	struct nvkm_instobj_args *args = data;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct gk20a_instobj_priv *node;
-	u32 size, align;
+	struct gk20a_instmem *imem = gk20a_instmem(base);
+	struct gk20a_instobj *node = NULL;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
 	int ret;
 
-	nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
-		 priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+	nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
+		   imem->domain ? "IOMMU" : "DMA", size, align);
 
 	/* Round size and align to page bounds */
-	size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
-	align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+	size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
+	align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
 
-	if (priv->domain)
-		ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
-					      size >> PAGE_SHIFT, align, &node);
+	if (imem->domain)
+		ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
+					       align, &node);
 	else
-		ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
-					     size >> PAGE_SHIFT, align, &node);
-	*pobject = nv_object(node);
+		ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
+					     align, &node);
+	*pmemory = node ? &node->memory : NULL;
 	if (ret)
 		return ret;
 
-	node->mem = &node->_mem;
+	nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
+	node->imem = imem;
 
 	/* present memory for being mapped using small pages */
-	node->mem->size = size >> 12;
-	node->mem->memtype = 0;
-	node->mem->page_shift = 12;
+	node->mem.size = size >> 12;
+	node->mem.memtype = 0;
+	node->mem.page_shift = 12;
 
-	node->base.addr = node->mem->offset;
-	node->base.size = size;
-
-	nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
-		 size, align, node->mem->offset);
+	nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+		   size, align, node->mem.offset);
 
 	return 0;
 }
 
-static struct nvkm_instobj_impl
-gk20a_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_instobj_ctor,
-		.dtor = gk20a_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = gk20a_instobj_rd32,
-		.wr32 = gk20a_instobj_wr32,
-	},
-};
-
-
-
-static int
-gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_instmem_fini(struct nvkm_instmem *base)
 {
-	struct gk20a_instmem_priv *priv = (void *)object;
-	priv->addr = ~0ULL;
-	return nvkm_instmem_fini(&priv->base, suspend);
+	gk20a_instmem(base)->addr = ~0ULL;
 }
 
-static int
-gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+static const struct nvkm_instmem_func
+gk20a_instmem = {
+	.fini = gk20a_instmem_fini,
+	.memory_new = gk20a_instobj_new,
+	.persistent = true,
+	.zero = false,
+};
+
+int
+gk20a_instmem_new(struct nvkm_device *device, int index,
+		  struct nvkm_instmem **pimem)
 {
-	struct gk20a_instmem_priv *priv;
-	struct nouveau_platform_device *plat;
-	int ret;
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_instmem *imem;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+	spin_lock_init(&imem->lock);
+	*pimem = &imem->base;
 
-	spin_lock_init(&priv->lock);
+	if (tdev->iommu.domain) {
+		imem->domain = tdev->iommu.domain;
+		imem->mm = &tdev->iommu.mm;
+		imem->iommu_pgshift = tdev->iommu.pgshift;
+		imem->mm_mutex = &tdev->iommu.mutex;
 
-	plat = nv_device_to_platform(nv_device(parent));
-	if (plat->gpu->iommu.domain) {
-		priv->domain = plat->gpu->iommu.domain;
-		priv->mm = plat->gpu->iommu.mm;
-		priv->iommu_pgshift = plat->gpu->iommu.pgshift;
-		priv->mm_mutex = &plat->gpu->iommu.mutex;
-
-		nv_info(priv, "using IOMMU\n");
+		nvkm_info(&imem->base.subdev, "using IOMMU\n");
 	} else {
-		init_dma_attrs(&priv->attrs);
+		init_dma_attrs(&imem->attrs);
 		/*
 		 * We will access instmem through PRAMIN and thus do not need a
 		 * consistent CPU pointer or kernel mapping
 		 */
-		dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
-		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
-		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
-		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+		dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
+		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
+		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
 
-		nv_info(priv, "using DMA API\n");
+		nvkm_info(&imem->base.subdev, "using DMA API\n");
 	}
 
 	return 0;
 }
-
-struct nvkm_oclass *
-gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_instmem_ctor,
-		.dtor = _nvkm_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = gk20a_instmem_fini,
-	},
-	.instobj = &gk20a_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 282143f..6133c8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -21,173 +21,207 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
+#include "priv.h"
 
+#include <core/memory.h>
 #include <core/ramht.h>
 
+struct nv04_instmem {
+	struct nvkm_instmem base;
+	struct nvkm_mm heap;
+};
+
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
+#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
+
+struct nv04_instobj {
+	struct nvkm_memory memory;
+	struct nv04_instmem *imem;
+	struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv04_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_INST;
+}
+
+static u64
+nv04_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv04_instobj(memory)->node->offset;
+}
+
+static u64
+nv04_instobj_size(struct nvkm_memory *memory)
+{
+	return nv04_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv04_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	return device->pri + 0x700000 + iobj->node->offset;
+}
+
+static void
+nv04_instobj_release(struct nvkm_memory *memory)
+{
+}
 
 static u32
-nv04_instobj_rd32(struct nvkm_object *object, u64 addr)
+nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	return nv_ro32(priv, node->mem->offset + addr);
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
 }
 
 static void
-nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	nv_wo32(priv, node->mem->offset + addr, data);
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
 }
 
-static void
-nv04_instobj_dtor(struct nvkm_object *object)
+static void *
+nv04_instobj_dtor(struct nvkm_memory *memory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	struct nvkm_subdev *subdev = (void *)priv;
-
-	mutex_lock(&subdev->mutex);
-	nvkm_mm_free(&priv->heap, &node->mem);
-	mutex_unlock(&subdev->mutex);
-
-	nvkm_instobj_destroy(&node->base);
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	mutex_lock(&iobj->imem->base.subdev.mutex);
+	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	return iobj;
 }
 
+static const struct nvkm_memory_func
+nv04_instobj_func = {
+	.dtor = nv04_instobj_dtor,
+	.target = nv04_instobj_target,
+	.size = nv04_instobj_size,
+	.addr = nv04_instobj_addr,
+	.acquire = nv04_instobj_acquire,
+	.release = nv04_instobj_release,
+	.rd32 = nv04_instobj_rd32,
+	.wr32 = nv04_instobj_wr32,
+};
+
 static int
-nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct nv04_instobj_priv *node;
-	struct nvkm_instobj_args *args = data;
-	struct nvkm_subdev *subdev = (void *)priv;
+	struct nv04_instmem *imem = nv04_instmem(base);
+	struct nv04_instobj *iobj;
 	int ret;
 
-	if (!args->align)
-		args->align = 1;
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
 
-	ret = nvkm_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
-	if (ret)
-		return ret;
+	nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
+	iobj->imem = imem;
 
-	mutex_lock(&subdev->mutex);
-	ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
-			   args->align, &node->mem);
-	mutex_unlock(&subdev->mutex);
-	if (ret)
-		return ret;
-
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->length;
-	return 0;
+	mutex_lock(&imem->base.subdev.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+			   align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.subdev.mutex);
+	return ret;
 }
 
-struct nvkm_instobj_impl
-nv04_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_instobj_ctor,
-		.dtor = nv04_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = nv04_instobj_rd32,
-		.wr32 = nv04_instobj_wr32,
-	},
-};
-
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
 static u32
-nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
 {
-	return nv_rd32(object, 0x700000 + addr);
+	return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
 }
 
 static void
-nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
 {
-	return nv_wr32(object, 0x700000 + addr, data);
-}
-
-void
-nv04_instmem_dtor(struct nvkm_object *object)
-{
-	struct nv04_instmem_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ramfc);
-	nvkm_gpuobj_ref(NULL, &priv->ramro);
-	nvkm_ramht_ref(NULL, &priv->ramht);
-	nvkm_gpuobj_ref(NULL, &priv->vbios);
-	nvkm_mm_fini(&priv->heap);
-	if (priv->iomem)
-		iounmap(priv->iomem);
-	nvkm_instmem_destroy(&priv->base);
+	nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
 }
 
 static int
-nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_instmem_oneinit(struct nvkm_instmem *base)
 {
-	struct nv04_instmem_priv *priv;
+	struct nv04_instmem *imem = nv04_instmem(base);
+	struct nvkm_device *device = imem->base.subdev.device;
 	int ret;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
 	/* PRAMIN aperture maps over the end of VRAM, reserve it */
-	priv->base.reserved = 512 * 1024;
+	imem->base.reserved = 512 * 1024;
 
-	ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
 	/* 0x00000-0x10000: reserve for probable vbios image */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
-			      &priv->vbios);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+			      &imem->base.vbios);
 	if (ret)
 		return ret;
 
 	/* 0x10000-0x18000: reserve for RAMHT */
-	ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+	ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
 	if (ret)
 		return ret;
 
 	/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
+			      &imem->base.ramfc);
 	if (ret)
 		return ret;
 
 	/* 0x18800-0x18a00: reserve for RAMRO */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
-			      &priv->ramro);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
+			      &imem->base.ramro);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_instmem_ctor,
-		.dtor = nv04_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = _nvkm_instmem_fini,
-		.rd32 = nv04_instmem_rd32,
-		.wr32 = nv04_instmem_wr32,
-	},
-	.instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv04_instmem_dtor(struct nvkm_instmem *base)
+{
+	struct nv04_instmem *imem = nv04_instmem(base);
+	nvkm_memory_del(&imem->base.ramfc);
+	nvkm_memory_del(&imem->base.ramro);
+	nvkm_ramht_del(&imem->base.ramht);
+	nvkm_memory_del(&imem->base.vbios);
+	nvkm_mm_fini(&imem->heap);
+	return imem;
+}
+
+static const struct nvkm_instmem_func
+nv04_instmem = {
+	.dtor = nv04_instmem_dtor,
+	.oneinit = nv04_instmem_oneinit,
+	.rd32 = nv04_instmem_rd32,
+	.wr32 = nv04_instmem_wr32,
+	.memory_new = nv04_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv04_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
+{
+	struct nv04_instmem *imem;
+
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
+	*pimem = &imem->base;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
deleted file mode 100644
index 42b6c92..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __NV04_INSTMEM_H__
-#define __NV04_INSTMEM_H__
-#include "priv.h"
-
-#include <core/mm.h>
-
-extern struct nvkm_instobj_impl nv04_instobj_oclass;
-
-struct nv04_instmem_priv {
-	struct nvkm_instmem base;
-
-	void __iomem *iomem;
-	struct nvkm_mm heap;
-
-	struct nvkm_gpuobj *vbios;
-	struct nvkm_ramht  *ramht;
-	struct nvkm_gpuobj *ramro;
-	struct nvkm_gpuobj *ramfc;
-};
-
-static inline struct nv04_instmem_priv *
-nv04_instmem(void *obj)
-{
-	return (void *)nvkm_instmem(obj);
-}
-
-struct nv04_instobj_priv {
-	struct nvkm_instobj base;
-	struct nvkm_mm_node *mem;
-};
-
-void nv04_instmem_dtor(struct nvkm_object *);
-
-int nv04_instmem_alloc(struct nvkm_instmem *, struct nvkm_object *,
-		       u32 size, u32 align, struct nvkm_object **pobject);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index b42b858..c054387 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -21,116 +21,239 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
+#include "priv.h"
 
+#include <core/memory.h>
 #include <core/ramht.h>
 #include <engine/gr/nv40.h>
 
+struct nv40_instmem {
+	struct nvkm_instmem base;
+	struct nvkm_mm heap;
+	void __iomem *iomem;
+};
+
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
+#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
+
+struct nv40_instobj {
+	struct nvkm_memory memory;
+	struct nv40_instmem *imem;
+	struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv40_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_INST;
+}
+
+static u64
+nv40_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv40_instobj(memory)->node->offset;
+}
+
+static u64
+nv40_instobj_size(struct nvkm_memory *memory)
+{
+	return nv40_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv40_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	return iobj->imem->iomem + iobj->node->offset;
+}
+
+static void
+nv40_instobj_release(struct nvkm_memory *memory)
+{
+}
+
+static u32
+nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
+}
+
+static void
+nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
+}
+
+static void *
+nv40_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	mutex_lock(&iobj->imem->base.subdev.mutex);
+	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	return iobj;
+}
+
+static const struct nvkm_memory_func
+nv40_instobj_func = {
+	.dtor = nv40_instobj_dtor,
+	.target = nv40_instobj_target,
+	.size = nv40_instobj_size,
+	.addr = nv40_instobj_addr,
+	.acquire = nv40_instobj_acquire,
+	.release = nv40_instobj_release,
+	.rd32 = nv40_instobj_rd32,
+	.wr32 = nv40_instobj_wr32,
+};
+
+static int
+nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
+{
+	struct nv40_instmem *imem = nv40_instmem(base);
+	struct nv40_instobj *iobj;
+	int ret;
+
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
+
+	nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
+	iobj->imem = imem;
+
+	mutex_lock(&imem->base.subdev.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+			   align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.subdev.mutex);
+	return ret;
+}
+
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
 static u32
-nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
 {
-	struct nv04_instmem_priv *priv = (void *)object;
-	return ioread32_native(priv->iomem + addr);
+	return ioread32_native(nv40_instmem(base)->iomem + addr);
 }
 
 static void
-nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
 {
-	struct nv04_instmem_priv *priv = (void *)object;
-	iowrite32_native(data, priv->iomem + addr);
+	iowrite32_native(data, nv40_instmem(base)->iomem + addr);
 }
 
 static int
-nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv40_instmem_oneinit(struct nvkm_instmem *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_instmem_priv *priv;
-	int ret, bar, vs;
-
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	/* map bar */
-	if (nv_device_resource_len(device, 2))
-		bar = 2;
-	else
-		bar = 3;
-
-	priv->iomem = ioremap(nv_device_resource_start(device, bar),
-			      nv_device_resource_len(device, bar));
-	if (!priv->iomem) {
-		nv_error(priv, "unable to map PRAMIN BAR\n");
-		return -EFAULT;
-	}
+	struct nv40_instmem *imem = nv40_instmem(base);
+	struct nvkm_device *device = imem->base.subdev.device;
+	int ret, vs;
 
 	/* PRAMIN aperture maps over the end of vram, reserve enough space
 	 * to fit graphics contexts for every channel, the magics come
 	 * from engine/gr/nv40.c
 	 */
-	vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
-	if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
-	else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
-	else if (nv44_gr_class(priv))  priv->base.reserved = 0x4980 * vs;
-	else				  priv->base.reserved = 0x4a40 * vs;
-	priv->base.reserved += 16 * 1024;
-	priv->base.reserved *= 32;		/* per-channel */
-	priv->base.reserved += 512 * 1024;	/* pci(e)gart table */
-	priv->base.reserved += 512 * 1024;	/* object storage */
+	vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
+	if      (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
+	else if (device->chipset  < 0x43) imem->base.reserved = 0x4f00 * vs;
+	else if (nv44_gr_class(device))   imem->base.reserved = 0x4980 * vs;
+	else				  imem->base.reserved = 0x4a40 * vs;
+	imem->base.reserved += 16 * 1024;
+	imem->base.reserved *= 32;		/* per-channel */
+	imem->base.reserved += 512 * 1024;	/* pci(e)gart table */
+	imem->base.reserved += 512 * 1024;	/* object storage */
+	imem->base.reserved = round_up(imem->base.reserved, 4096);
 
-	priv->base.reserved = round_up(priv->base.reserved, 4096);
-
-	ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
 	/* 0x00000-0x10000: reserve for probable vbios image */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
-			      &priv->vbios);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+			      &imem->base.vbios);
 	if (ret)
 		return ret;
 
 	/* 0x10000-0x18000: reserve for RAMHT */
-	ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+	ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
 	if (ret)
 		return ret;
 
 	/* 0x18000-0x18200: reserve for RAMRO
 	 * 0x18200-0x20000: padding
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
-			      &priv->ramro);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
+			      &imem->base.ramro);
 	if (ret)
 		return ret;
 
 	/* 0x20000-0x21000: reserve for RAMFC
 	 * 0x21000-0x40000: padding and some unknown crap
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
+			      &imem->base.ramfc);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-struct nvkm_oclass *
-nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_instmem_ctor,
-		.dtor = nv04_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = _nvkm_instmem_fini,
-		.rd32 = nv40_instmem_rd32,
-		.wr32 = nv40_instmem_wr32,
-	},
-	.instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv40_instmem_dtor(struct nvkm_instmem *base)
+{
+	struct nv40_instmem *imem = nv40_instmem(base);
+	nvkm_memory_del(&imem->base.ramfc);
+	nvkm_memory_del(&imem->base.ramro);
+	nvkm_ramht_del(&imem->base.ramht);
+	nvkm_memory_del(&imem->base.vbios);
+	nvkm_mm_fini(&imem->heap);
+	if (imem->iomem)
+		iounmap(imem->iomem);
+	return imem;
+}
+
+static const struct nvkm_instmem_func
+nv40_instmem = {
+	.dtor = nv40_instmem_dtor,
+	.oneinit = nv40_instmem_oneinit,
+	.rd32 = nv40_instmem_rd32,
+	.wr32 = nv40_instmem_wr32,
+	.memory_new = nv40_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv40_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
+{
+	struct nv40_instmem *imem;
+	int bar;
+
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
+	*pimem = &imem->base;
+
+	/* map bar */
+	if (device->func->resource_size(device, 2))
+		bar = 2;
+	else
+		bar = 3;
+
+	imem->iomem = ioremap(device->func->resource_addr(device, bar),
+			      device->func->resource_size(device, bar));
+	if (!imem->iomem) {
+		nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 8404143..6d512c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -21,149 +21,229 @@
  *
  * Authors: Ben Skeggs
  */
+#define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
 #include "priv.h"
 
+#include <core/memory.h>
+#include <subdev/bar.h>
 #include <subdev/fb.h>
+#include <subdev/mmu.h>
 
-struct nv50_instmem_priv {
+struct nv50_instmem {
 	struct nvkm_instmem base;
+	unsigned long lock_flags;
 	spinlock_t lock;
 	u64 addr;
 };
 
-struct nv50_instobj_priv {
-	struct nvkm_instobj base;
-	struct nvkm_mem *mem;
-};
-
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
+#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
+
+struct nv50_instobj {
+	struct nvkm_memory memory;
+	struct nv50_instmem *imem;
+	struct nvkm_mem *mem;
+	struct nvkm_vma bar;
+	void *map;
+};
+
+static enum nvkm_memory_target
+nv50_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_VRAM;
+}
+
+static u64
+nv50_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv50_instobj(memory)->mem->offset;
+}
+
+static u64
+nv50_instobj_size(struct nvkm_memory *memory)
+{
+	return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
+}
+
+static void
+nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u64 size = nvkm_memory_size(memory);
+	void __iomem *map;
+	int ret;
+
+	iobj->map = ERR_PTR(-ENOMEM);
+
+	ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
+	if (ret == 0) {
+		map = ioremap(device->func->resource_addr(device, 3) +
+			      (u32)iobj->bar.offset, size);
+		if (map) {
+			nvkm_memory_map(memory, &iobj->bar, 0);
+			iobj->map = map;
+		} else {
+			nvkm_warn(subdev, "PRAMIN ioremap failed\n");
+			nvkm_vm_put(&iobj->bar);
+		}
+	} else {
+		nvkm_warn(subdev, "PRAMIN exhausted\n");
+	}
+}
+
+static void
+nv50_instobj_release(struct nvkm_memory *memory)
+{
+	struct nv50_instmem *imem = nv50_instobj(memory)->imem;
+	spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
+static void __iomem *
+nv50_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_bar *bar = imem->base.subdev.device->bar;
+	struct nvkm_vm *vm;
+	unsigned long flags;
+
+	if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
+		nvkm_memory_boot(memory, vm);
+	if (!IS_ERR_OR_NULL(iobj->map))
+		return iobj->map;
+
+	spin_lock_irqsave(&imem->lock, flags);
+	imem->lock_flags = flags;
+	return NULL;
+}
 
 static u32
-nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
+nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv50_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
 	u32 data;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	data = nv_rd32(priv, 0x700000 + addr);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	data = nvkm_rd32(device, 0x700000 + addr);
 	return data;
 }
 
 static void
-nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv50_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	nv_wr32(priv, 0x700000 + addr, data);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	nvkm_wr32(device, 0x700000 + addr, data);
 }
 
 static void
-nv50_instobj_dtor(struct nvkm_object *object)
+nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
 {
-	struct nv50_instobj_priv *node = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	pfb->ram->put(pfb, &node->mem);
-	nvkm_instobj_destroy(&node->base);
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	nvkm_vm_map_at(vma, offset, iobj->mem);
 }
 
+static void *
+nv50_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
+	if (!IS_ERR_OR_NULL(iobj->map)) {
+		nvkm_vm_put(&iobj->bar);
+		iounmap(iobj->map);
+	}
+	ram->func->put(ram, &iobj->mem);
+	return iobj;
+}
+
+static const struct nvkm_memory_func
+nv50_instobj_func = {
+	.dtor = nv50_instobj_dtor,
+	.target = nv50_instobj_target,
+	.size = nv50_instobj_size,
+	.addr = nv50_instobj_addr,
+	.boot = nv50_instobj_boot,
+	.acquire = nv50_instobj_acquire,
+	.release = nv50_instobj_release,
+	.rd32 = nv50_instobj_rd32,
+	.wr32 = nv50_instobj_wr32,
+	.map = nv50_instobj_map,
+};
+
 static int
-nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_instobj_args *args = data;
-	struct nv50_instobj_priv *node;
+	struct nv50_instmem *imem = nv50_instmem(base);
+	struct nv50_instobj *iobj;
+	struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
 	int ret;
 
-	args->size  = max((args->size  + 4095) & ~4095, (u32)4096);
-	args->align = max((args->align + 4095) & ~4095, (u32)4096);
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
 
-	ret = nvkm_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
+	nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
+	iobj->imem = imem;
+
+	size  = max((size  + 4095) & ~4095, (u32)4096);
+	align = max((align + 4095) & ~4095, (u32)4096);
+
+	ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
 	if (ret)
 		return ret;
 
-	ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
-	if (ret)
-		return ret;
-
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->size << 12;
-	node->mem->page_shift = 12;
+	iobj->mem->page_shift = 12;
 	return 0;
 }
 
-static struct nvkm_instobj_impl
-nv50_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_instobj_ctor,
-		.dtor = nv50_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = nv50_instobj_rd32,
-		.wr32 = nv50_instobj_wr32,
-	},
-};
-
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
-static int
-nv50_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+nv50_instmem_fini(struct nvkm_instmem *base)
 {
-	struct nv50_instmem_priv *priv = (void *)object;
-	priv->addr = ~0ULL;
-	return nvkm_instmem_fini(&priv->base, suspend);
+	nv50_instmem(base)->addr = ~0ULL;
 }
 
-static int
-nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+static const struct nvkm_instmem_func
+nv50_instmem = {
+	.fini = nv50_instmem_fini,
+	.memory_new = nv50_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv50_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
 {
-	struct nv50_instmem_priv *priv;
-	int ret;
+	struct nv50_instmem *imem;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->lock);
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+	spin_lock_init(&imem->lock);
+	*pimem = &imem->base;
 	return 0;
 }
-
-struct nvkm_oclass *
-nv50_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_instmem_ctor,
-		.dtor = _nvkm_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = nv50_instmem_fini,
-	},
-	.instobj = &nv50_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index b10e292..ace4471 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,54 +1,20 @@
 #ifndef __NVKM_INSTMEM_PRIV_H__
 #define __NVKM_INSTMEM_PRIV_H__
+#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
 #include <subdev/instmem.h>
 
-struct nvkm_instobj_impl {
-	struct nvkm_oclass base;
+struct nvkm_instmem_func {
+	void *(*dtor)(struct nvkm_instmem *);
+	int (*oneinit)(struct nvkm_instmem *);
+	void (*fini)(struct nvkm_instmem *);
+	u32  (*rd32)(struct nvkm_instmem *, u32 addr);
+	void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
+	int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
+			  bool zero, struct nvkm_memory **);
+	bool persistent;
+	bool zero;
 };
 
-struct nvkm_instobj_args {
-	u32 size;
-	u32 align;
-};
-
-#define nvkm_instobj_create(p,e,o,d)                                        \
-	nvkm_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instobj_destroy(p) ({                                          \
-	struct nvkm_instobj *iobj = (p);                                    \
-	_nvkm_instobj_dtor(nv_object(iobj));                                \
-})
-#define nvkm_instobj_init(p)                                                \
-	nvkm_object_init(&(p)->base)
-#define nvkm_instobj_fini(p,s)                                              \
-	nvkm_object_fini(&(p)->base, (s))
-
-int  nvkm_instobj_create_(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, int, void **);
-void _nvkm_instobj_dtor(struct nvkm_object *);
-#define _nvkm_instobj_init nvkm_object_init
-#define _nvkm_instobj_fini nvkm_object_fini
-
-struct nvkm_instmem_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *instobj;
-};
-
-#define nvkm_instmem_create(p,e,o,d)                                        \
-	nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instmem_destroy(p)                                             \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_instmem_init(p) ({                                             \
-	struct nvkm_instmem *imem = (p);                                    \
-	_nvkm_instmem_init(nv_object(imem));                                \
-})
-#define nvkm_instmem_fini(p,s) ({                                           \
-	struct nvkm_instmem *imem = (p);                                    \
-	_nvkm_instmem_fini(nv_object(imem), (s));                           \
-})
-
-int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-#define _nvkm_instmem_dtor _nvkm_subdev_dtor
-int _nvkm_instmem_init(struct nvkm_object *);
-int _nvkm_instmem_fini(struct nvkm_object *, bool);
+void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
+		       int index, struct nvkm_instmem *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 2fb87fb..930d25b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,102 +23,110 @@
  */
 #include "priv.h"
 
-static int
+#include <subdev/fb.h>
+
+int
 nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
 {
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	int ret;
-
-	ret = nvkm_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
+	int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
 	if (ret)
 		*pnode = NULL;
-
 	return ret;
 }
 
-static void
+void
 nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
 {
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	nvkm_mm_free(&priv->tags, pnode);
+	nvkm_mm_free(&ltc->tags, pnode);
 }
 
-static void
+void
 nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
 	const u32 limit = first + count - 1;
 
-	BUG_ON((first > limit) || (limit >= priv->num_tags));
+	BUG_ON((first > limit) || (limit >= ltc->num_tags));
 
-	impl->cbc_clear(priv, first, limit);
-	impl->cbc_wait(priv);
+	ltc->func->cbc_clear(ltc, first, limit);
+	ltc->func->cbc_wait(ltc);
 }
 
-static int
+int
 nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
-	impl->zbc_clear_color(priv, index, color);
-	return index;
-}
-
-static int
-nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
-{
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	priv->zbc_depth[index] = depth;
-	impl->zbc_clear_depth(priv, index, depth);
+	memcpy(ltc->zbc_color[index], color, sizeof(ltc->zbc_color[index]));
+	ltc->func->zbc_clear_color(ltc, index, color);
 	return index;
 }
 
 int
-_nvkm_ltc_init(struct nvkm_object *object)
+nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
-	struct nvkm_ltc_priv *priv = (void *)object;
-	int ret, i;
+	ltc->zbc_depth[index] = depth;
+	ltc->func->zbc_clear_depth(ltc, index, depth);
+	return index;
+}
 
-	ret = nvkm_subdev_init(&priv->base.base);
-	if (ret)
-		return ret;
+static void
+nvkm_ltc_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	ltc->func->intr(ltc);
+}
 
-	for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) {
-		impl->zbc_clear_color(priv, i, priv->zbc_color[i]);
-		impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]);
+static int
+nvkm_ltc_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	return ltc->func->oneinit(ltc);
+}
+
+static int
+nvkm_ltc_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	int i;
+
+	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+		ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
+		ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
 	}
 
+	ltc->func->init(ltc);
 	return 0;
 }
 
-int
-nvkm_ltc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_ltc_dtor(struct nvkm_subdev *subdev)
 {
-	const struct nvkm_ltc_impl *impl = (void *)oclass;
-	struct nvkm_ltc_priv *priv;
-	int ret;
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
+	nvkm_mm_fini(&ltc->tags);
+	if (ram)
+		nvkm_mm_free(&ram->vram, &ltc->tag_ram);
+	return ltc;
+}
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PLTCG",
-				  "l2c", length, pobject);
-	priv = *pobject;
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+nvkm_ltc = {
+	.dtor = nvkm_ltc_dtor,
+	.oneinit = nvkm_ltc_oneinit,
+	.init = nvkm_ltc_init,
+	.intr = nvkm_ltc_intr,
+};
 
-	memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color));
-	memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
+int
+nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_ltc **pltc)
+{
+	struct nvkm_ltc *ltc;
 
-	priv->base.base.intr = impl->intr;
-	priv->base.tags_alloc = nvkm_ltc_tags_alloc;
-	priv->base.tags_free = nvkm_ltc_tags_free;
-	priv->base.tags_clear = nvkm_ltc_tags_clear;
-	priv->base.zbc_min = 1; /* reserve 0 for disabled */
-	priv->base.zbc_max = min(impl->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
-	priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
-	priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
+	if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, &ltc->subdev);
+	ltc->func = func;
+	ltc->zbc_min = 1; /* reserve 0 for disabled */
+	ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 7fb5ea0..45ac765 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -28,38 +28,47 @@
 #include <subdev/timer.h>
 
 void
-gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
 {
-	nv_wr32(priv, 0x17e8cc, start);
-	nv_wr32(priv, 0x17e8d0, limit);
-	nv_wr32(priv, 0x17e8c8, 0x00000004);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_wr32(device, 0x17e8cc, start);
+	nvkm_wr32(device, 0x17e8d0, limit);
+	nvkm_wr32(device, 0x17e8c8, 0x00000004);
 }
 
 void
-gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gf100_ltc_cbc_wait(struct nvkm_ltc *ltc)
 {
+	struct nvkm_device *device = ltc->subdev.device;
 	int c, s;
-	for (c = 0; c < priv->ltc_nr; c++) {
-		for (s = 0; s < priv->lts_nr; s++)
-			nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0);
+	for (c = 0; c < ltc->ltc_nr; c++) {
+		for (s = 0; s < ltc->lts_nr; s++) {
+			const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400);
+			nvkm_msec(device, 2000,
+				if (!nvkm_rd32(device, addr))
+					break;
+			);
+		}
 	}
 }
 
 void
-gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
-	nv_mask(priv, 0x17ea44, 0x0000000f, i);
-	nv_wr32(priv, 0x17ea48, color[0]);
-	nv_wr32(priv, 0x17ea4c, color[1]);
-	nv_wr32(priv, 0x17ea50, color[2]);
-	nv_wr32(priv, 0x17ea54, color[3]);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+	nvkm_wr32(device, 0x17ea48, color[0]);
+	nvkm_wr32(device, 0x17ea4c, color[1]);
+	nvkm_wr32(device, 0x17ea50, color[2]);
+	nvkm_wr32(device, 0x17ea54, color[3]);
 }
 
 void
-gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
 {
-	nv_mask(priv, 0x17ea44, 0x0000000f, i);
-	nv_wr32(priv, 0x17ea58, depth);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+	nvkm_wr32(device, 0x17ea58, depth);
 }
 
 static const struct nvkm_bitfield
@@ -81,88 +90,60 @@
 };
 
 static void
-gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s)
 {
-	u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
-	u32 intr = nv_rd32(priv, base + 0x020);
+	struct nvkm_subdev *subdev = &ltc->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 base = 0x141000 + (c * 0x2000) + (s * 0x400);
+	u32 intr = nvkm_rd32(device, base + 0x020);
 	u32 stat = intr & 0x0000ffff;
+	char msg[128];
 
 	if (stat) {
-		nv_info(priv, "LTC%d_LTS%d:", ltc, lts);
-		nvkm_bitfield_print(gf100_ltc_lts_intr_name, stat);
-		pr_cont("\n");
+		nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+		nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg);
 	}
 
-	nv_wr32(priv, base + 0x020, intr);
+	nvkm_wr32(device, base + 0x020, intr);
 }
 
 void
-gf100_ltc_intr(struct nvkm_subdev *subdev)
+gf100_ltc_intr(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)subdev;
+	struct nvkm_device *device = ltc->subdev.device;
 	u32 mask;
 
-	mask = nv_rd32(priv, 0x00017c);
+	mask = nvkm_rd32(device, 0x00017c);
 	while (mask) {
-		u32 lts, ltc = __ffs(mask);
-		for (lts = 0; lts < priv->lts_nr; lts++)
-			gf100_ltc_lts_intr(priv, ltc, lts);
-		mask &= ~(1 << ltc);
+		u32 s, c = __ffs(mask);
+		for (s = 0; s < ltc->lts_nr; s++)
+			gf100_ltc_lts_intr(ltc, c, s);
+		mask &= ~(1 << c);
 	}
 }
 
-static int
-gf100_ltc_init(struct nvkm_object *object)
-{
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
-
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
-	nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
-	nv_wr32(priv, 0x17e8d4, priv->tag_base);
-	nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
-}
-
-void
-gf100_ltc_dtor(struct nvkm_object *object)
-{
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nvkm_ltc_priv *priv = (void *)object;
-
-	nvkm_mm_fini(&priv->tags);
-	if (pfb->ram)
-		nvkm_mm_free(&pfb->vram, &priv->tag_ram);
-
-	nvkm_ltc_destroy(priv);
-}
-
 /* TODO: Figure out tag memory details and drop the over-cautious allocation.
  */
 int
-gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
+gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 {
+	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
 	u32 tag_size, tag_margin, tag_align;
 	int ret;
 
 	/* No VRAM, no tags for now. */
-	if (!pfb->ram) {
-		priv->num_tags = 0;
+	if (!ram) {
+		ltc->num_tags = 0;
 		goto mm_init;
 	}
 
 	/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
-	priv->num_tags = (pfb->ram->size >> 17) / 4;
-	if (priv->num_tags > (1 << 17))
-		priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
-	priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
+	ltc->num_tags = (ram->size >> 17) / 4;
+	if (ltc->num_tags > (1 << 17))
+		ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
+	ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
 
-	tag_align = priv->ltc_nr * 0x800;
+	tag_align = ltc->ltc_nr * 0x800;
 	tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
 
 	/* 4 part 4 sub: 0x2000 bytes for 56 tags */
@@ -173,72 +154,71 @@
 	 *
 	 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
 	 */
-	tag_size  = (priv->num_tags / 64) * 0x6000 + tag_margin;
+	tag_size  = (ltc->num_tags / 64) * 0x6000 + tag_margin;
 	tag_size += tag_align;
 	tag_size  = (tag_size + 0xfff) >> 12; /* round up */
 
-	ret = nvkm_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
-			   &priv->tag_ram);
+	ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
+			   &ltc->tag_ram);
 	if (ret) {
-		priv->num_tags = 0;
+		ltc->num_tags = 0;
 	} else {
-		u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin;
+		u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
 
 		tag_base += tag_align - 1;
-		ret = do_div(tag_base, tag_align);
+		do_div(tag_base, tag_align);
 
-		priv->tag_base = tag_base;
+		ltc->tag_base = tag_base;
 	}
 
 mm_init:
-	ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1);
-	return ret;
+	return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
 }
 
 int
-gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gf100_ltc_oneinit(struct nvkm_ltc *ltc)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ltc_priv *priv;
-	u32 parts, mask;
-	int ret, i;
+	struct nvkm_device *device = ltc->subdev.device;
+	const u32 parts = nvkm_rd32(device, 0x022438);
+	const u32  mask = nvkm_rd32(device, 0x022554);
+	const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28;
+	int i;
 
-	ret = nvkm_ltc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	parts = nv_rd32(priv, 0x022438);
-	mask = nv_rd32(priv, 0x022554);
 	for (i = 0; i < parts; i++) {
 		if (!(mask & (1 << i)))
-			priv->ltc_nr++;
+			ltc->ltc_nr++;
 	}
-	priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+	ltc->lts_nr = slice;
 
-	ret = gf100_ltc_init_tag_ram(pfb, priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gf100_ltc_intr;
-	return 0;
+	return gf100_ltc_oneinit_tag_ram(ltc);
 }
 
-struct nvkm_oclass *
-gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gf100_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static void
+gf100_ltc_init(struct nvkm_ltc *ltc)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
+
+	nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+	nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+	nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
+}
+
+static const struct nvkm_ltc_func
+gf100_ltc = {
+	.oneinit = gf100_ltc_oneinit,
+	.init = gf100_ltc_init,
 	.intr = gf100_ltc_intr,
 	.cbc_clear = gf100_ltc_cbc_clear,
 	.cbc_wait = gf100_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gf100_ltc_zbc_clear_color,
 	.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index d53959b..839e6b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -23,37 +23,32 @@
  */
 #include "priv.h"
 
-static int
-gk104_ltc_init(struct nvkm_object *object)
+static void
+gk104_ltc_init(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
 
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
-	nv_wr32(priv, 0x17e000, priv->ltc_nr);
-	nv_wr32(priv, 0x17e8d4, priv->tag_base);
-	nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
+	nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+	nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
 }
 
-struct nvkm_oclass *
-gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gk104_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static const struct nvkm_ltc_func
+gk104_ltc = {
+	.oneinit = gf100_ltc_oneinit,
+	.init = gk104_ltc_init,
 	.intr = gf100_ltc_intr,
 	.cbc_clear = gf100_ltc_cbc_clear,
 	.cbc_wait = gf100_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gf100_ltc_zbc_clear_color,
 	.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 6b3f6f4..389331b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -27,127 +27,121 @@
 #include <subdev/timer.h>
 
 static void
-gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
 {
-	nv_wr32(priv, 0x17e270, start);
-	nv_wr32(priv, 0x17e274, limit);
-	nv_wr32(priv, 0x17e26c, 0x00000004);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_wr32(device, 0x17e270, start);
+	nvkm_wr32(device, 0x17e274, limit);
+	nvkm_wr32(device, 0x17e26c, 0x00000004);
 }
 
 static void
-gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
 {
+	struct nvkm_device *device = ltc->subdev.device;
 	int c, s;
-	for (c = 0; c < priv->ltc_nr; c++) {
-		for (s = 0; s < priv->lts_nr; s++)
-			nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0);
+	for (c = 0; c < ltc->ltc_nr; c++) {
+		for (s = 0; s < ltc->lts_nr; s++) {
+			const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
+			nvkm_msec(device, 2000,
+				if (!nvkm_rd32(device, addr))
+					break;
+			);
+		}
 	}
 }
 
 static void
-gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
-	nv_mask(priv, 0x17e338, 0x0000000f, i);
-	nv_wr32(priv, 0x17e33c, color[0]);
-	nv_wr32(priv, 0x17e340, color[1]);
-	nv_wr32(priv, 0x17e344, color[2]);
-	nv_wr32(priv, 0x17e348, color[3]);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17e338, 0x0000000f, i);
+	nvkm_wr32(device, 0x17e33c, color[0]);
+	nvkm_wr32(device, 0x17e340, color[1]);
+	nvkm_wr32(device, 0x17e344, color[2]);
+	nvkm_wr32(device, 0x17e348, color[3]);
 }
 
 static void
-gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
 {
-	nv_mask(priv, 0x17e338, 0x0000000f, i);
-	nv_wr32(priv, 0x17e34c, depth);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17e338, 0x0000000f, i);
+	nvkm_wr32(device, 0x17e34c, depth);
 }
 
 static void
-gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
 {
-	u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
-	u32 stat = nv_rd32(priv, base + 0x00c);
+	struct nvkm_subdev *subdev = &ltc->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 base = 0x140000 + (c * 0x2000) + (s * 0x400);
+	u32 stat = nvkm_rd32(device, base + 0x00c);
 
 	if (stat) {
-		nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
-		nv_wr32(priv, base + 0x00c, stat);
+		nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
+		nvkm_wr32(device, base + 0x00c, stat);
 	}
 }
 
 static void
-gm107_ltc_intr(struct nvkm_subdev *subdev)
+gm107_ltc_intr(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)subdev;
+	struct nvkm_device *device = ltc->subdev.device;
 	u32 mask;
 
-	mask = nv_rd32(priv, 0x00017c);
+	mask = nvkm_rd32(device, 0x00017c);
 	while (mask) {
-		u32 lts, ltc = __ffs(mask);
-		for (lts = 0; lts < priv->lts_nr; lts++)
-			gm107_ltc_lts_isr(priv, ltc, lts);
-		mask &= ~(1 << ltc);
+		u32 s, c = __ffs(mask);
+		for (s = 0; s < ltc->lts_nr; s++)
+			gm107_ltc_lts_isr(ltc, c, s);
+		mask &= ~(1 << c);
 	}
 }
 
 static int
-gm107_ltc_init(struct nvkm_object *object)
+gm107_ltc_oneinit(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
+	struct nvkm_device *device = ltc->subdev.device;
+	const u32 parts = nvkm_rd32(device, 0x022438);
+	const u32  mask = nvkm_rd32(device, 0x021c14);
+	const u32 slice = nvkm_rd32(device, 0x17e280) >> 28;
+	int i;
 
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x17e27c, priv->ltc_nr);
-	nv_wr32(priv, 0x17e278, priv->tag_base);
-	nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
-}
-
-static int
-gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ltc_priv *priv;
-	u32 parts, mask;
-	int ret, i;
-
-	ret = nvkm_ltc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	parts = nv_rd32(priv, 0x022438);
-	mask = nv_rd32(priv, 0x021c14);
 	for (i = 0; i < parts; i++) {
 		if (!(mask & (1 << i)))
-			priv->ltc_nr++;
+			ltc->ltc_nr++;
 	}
-	priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
+	ltc->lts_nr = slice;
 
-	ret = gf100_ltc_init_tag_ram(pfb, priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	return gf100_ltc_oneinit_tag_ram(ltc);
 }
 
-struct nvkm_oclass *
-gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xff),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gm107_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static void
+gm107_ltc_init(struct nvkm_ltc *ltc)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
+
+	nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e278, ltc->tag_base);
+	nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
+}
+
+static const struct nvkm_ltc_func
+gm107_ltc = {
+	.oneinit = gm107_ltc_oneinit,
+	.init = gm107_ltc_init,
 	.intr = gm107_ltc_intr,
 	.cbc_clear = gm107_ltc_cbc_clear,
 	.cbc_wait = gm107_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gm107_ltc_zbc_clear_color,
 	.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 09537d7..4e05037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,69 +1,29 @@
 #ifndef __NVKM_LTC_PRIV_H__
 #define __NVKM_LTC_PRIV_H__
+#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
 #include <subdev/ltc.h>
 
-#include <core/mm.h>
-struct nvkm_fb;
+int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
+		  int index, struct nvkm_ltc **);
 
-struct nvkm_ltc_priv {
-	struct nvkm_ltc base;
-	u32 ltc_nr;
-	u32 lts_nr;
+struct nvkm_ltc_func {
+	int  (*oneinit)(struct nvkm_ltc *);
+	void (*init)(struct nvkm_ltc *);
+	void (*intr)(struct nvkm_ltc *);
 
-	u32 num_tags;
-	u32 tag_base;
-	struct nvkm_mm tags;
-	struct nvkm_mm_node *tag_ram;
-
-	u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
-	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
-};
-
-#define nvkm_ltc_create(p,e,o,d)                                               \
-	nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_ltc_destroy(p) ({                                                 \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_dtor(nv_object(_priv));                                      \
-})
-#define nvkm_ltc_init(p) ({                                                    \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_init(nv_object(_priv));                                      \
-})
-#define nvkm_ltc_fini(p,s) ({                                                  \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_fini(nv_object(_priv), (s));                                 \
-})
-
-int  nvkm_ltc_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, int, void **);
-
-#define _nvkm_ltc_dtor _nvkm_subdev_dtor
-int _nvkm_ltc_init(struct nvkm_object *);
-#define _nvkm_ltc_fini _nvkm_subdev_fini
-
-int  gf100_ltc_ctor(struct nvkm_object *, struct nvkm_object *,
-		    struct nvkm_oclass *, void *, u32,
-		    struct nvkm_object **);
-void gf100_ltc_dtor(struct nvkm_object *);
-int  gf100_ltc_init_tag_ram(struct nvkm_fb *, struct nvkm_ltc_priv *);
-int  gf100_ltc_tags_alloc(struct nvkm_ltc *, u32, struct nvkm_mm_node **);
-void gf100_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-
-struct nvkm_ltc_impl {
-	struct nvkm_oclass base;
-	void (*intr)(struct nvkm_subdev *);
-
-	void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
-	void (*cbc_wait)(struct nvkm_ltc_priv *);
+	void (*cbc_clear)(struct nvkm_ltc *, u32 start, u32 limit);
+	void (*cbc_wait)(struct nvkm_ltc *);
 
 	int zbc;
-	void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]);
-	void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
+	void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
+	void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
 };
 
-void gf100_ltc_intr(struct nvkm_subdev *);
-void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
-void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
-void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
-void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
+int gf100_ltc_oneinit(struct nvkm_ltc *);
+int gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *);
+void gf100_ltc_intr(struct nvkm_ltc *);
+void gf100_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
+void gf100_ltc_cbc_wait(struct nvkm_ltc *);
+void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
+void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 721643f..bef325d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,11 +1,7 @@
 nvkm-y += nvkm/subdev/mc/base.o
 nvkm-y += nvkm/subdev/mc/nv04.o
-nvkm-y += nvkm/subdev/mc/nv40.o
 nvkm-y += nvkm/subdev/mc/nv44.o
-nvkm-y += nvkm/subdev/mc/nv4c.o
 nvkm-y += nvkm/subdev/mc/nv50.o
-nvkm-y += nvkm/subdev/mc/g94.o
 nvkm-y += nvkm/subdev/mc/g98.o
 nvkm-y += nvkm/subdev/mc/gf100.o
-nvkm-y += nvkm/subdev/mc/gf106.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 5b051a2..954fbbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -23,147 +23,101 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 
-static inline void
-nvkm_mc_unk260(struct nvkm_mc *pmc, u32 data)
+void
+nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
 {
-	const struct nvkm_mc_oclass *impl = (void *)nv_oclass(pmc);
-	if (impl->unk260)
-		impl->unk260(pmc, data);
-}
-
-static inline u32
-nvkm_mc_intr_mask(struct nvkm_mc *pmc)
-{
-	u32 intr = nv_rd32(pmc, 0x000100);
-	if (intr == 0xffffffff) /* likely fallen off the bus */
-		intr = 0x00000000;
-	return intr;
-}
-
-static irqreturn_t
-nvkm_mc_intr(int irq, void *arg)
-{
-	struct nvkm_mc *pmc = arg;
-	const struct nvkm_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
-	const struct nvkm_mc_intr *map = oclass->intr;
-	struct nvkm_subdev *unit;
-	u32 intr;
-
-	nv_wr32(pmc, 0x000140, 0x00000000);
-	nv_rd32(pmc, 0x000140);
-	intr = nvkm_mc_intr_mask(pmc);
-	if (pmc->use_msi)
-		oclass->msi_rearm(pmc);
-
-	if (intr) {
-		u32 stat = intr = nvkm_mc_intr_mask(pmc);
-		while (map->stat) {
-			if (intr & map->stat) {
-				unit = nvkm_subdev(pmc, map->unit);
-				if (unit && unit->intr)
-					unit->intr(unit);
-				stat &= ~map->stat;
-			}
-			map++;
-		}
-
-		if (stat)
-			nv_error(pmc, "unknown intr 0x%08x\n", stat);
-	}
-
-	nv_wr32(pmc, 0x000140, 0x00000001);
-	return intr ? IRQ_HANDLED : IRQ_NONE;
-}
-
-int
-_nvkm_mc_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_mc *pmc = (void *)object;
-	nv_wr32(pmc, 0x000140, 0x00000000);
-	return nvkm_subdev_fini(&pmc->base, suspend);
-}
-
-int
-_nvkm_mc_init(struct nvkm_object *object)
-{
-	struct nvkm_mc *pmc = (void *)object;
-	int ret = nvkm_subdev_init(&pmc->base);
-	if (ret)
-		return ret;
-	nv_wr32(pmc, 0x000140, 0x00000001);
-	return 0;
+	if (mc->func->unk260)
+		mc->func->unk260(mc, data);
 }
 
 void
-_nvkm_mc_dtor(struct nvkm_object *object)
+nvkm_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_mc *pmc = (void *)object;
-	free_irq(pmc->irq, pmc);
-	if (pmc->use_msi)
-		pci_disable_msi(device->pdev);
-	nvkm_subdev_destroy(&pmc->base);
+	return mc->func->intr_unarm(mc);
 }
 
-int
-nvkm_mc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *bclass, int length, void **pobject)
+void
+nvkm_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	const struct nvkm_mc_oclass *oclass = (void *)bclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_mc *pmc;
-	int ret;
+	return mc->func->intr_rearm(mc);
+}
 
-	ret = nvkm_subdev_create_(parent, engine, bclass, 0, "PMC",
-				  "master", length, pobject);
-	pmc = *pobject;
-	if (ret)
-		return ret;
+static u32
+nvkm_mc_intr_mask(struct nvkm_mc *mc)
+{
+	u32 intr = mc->func->intr_mask(mc);
+	if (WARN_ON_ONCE(intr == 0xffffffff))
+		intr = 0; /* likely fallen off the bus */
+	return intr;
+}
 
-	pmc->unk260 = nvkm_mc_unk260;
+void
+nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	struct nvkm_subdev *subdev;
+	const struct nvkm_mc_intr *map = mc->func->intr;
+	u32 stat, intr;
 
-	if (nv_device_is_pci(device)) {
-		switch (device->pdev->device & 0x0ff0) {
-		case 0x00f0:
-		case 0x02e0:
-			/* BR02? NFI how these would be handled yet exactly */
-			break;
-		default:
-			switch (device->chipset) {
-			case 0xaa:
-				/* reported broken, nv also disable it */
-				break;
-			default:
-				pmc->use_msi = true;
-				break;
-			}
+	stat = intr = nvkm_mc_intr_mask(mc);
+	while (map->stat) {
+		if (intr & map->stat) {
+			subdev = nvkm_device_subdev(device, map->unit);
+			if (subdev)
+				nvkm_subdev_intr(subdev);
+			stat &= ~map->stat;
 		}
-
-		pmc->use_msi = nvkm_boolopt(device->cfgopt, "NvMSI",
-					    pmc->use_msi);
-
-		if (pmc->use_msi && oclass->msi_rearm) {
-			pmc->use_msi = pci_enable_msi(device->pdev) == 0;
-			if (pmc->use_msi) {
-				nv_info(pmc, "MSI interrupts enabled\n");
-				oclass->msi_rearm(pmc);
-			}
-		} else {
-			pmc->use_msi = false;
-		}
+		map++;
 	}
 
-	ret = nv_device_get_irq(device, true);
-	if (ret < 0)
-		return ret;
-	pmc->irq = ret;
+	if (stat)
+		nvkm_error(&mc->subdev, "intr %08x\n", stat);
+	*handled = intr != 0;
+}
 
-	ret = request_irq(pmc->irq, nvkm_mc_intr, IRQF_SHARED, "nvkm", pmc);
-	if (ret < 0)
-		return ret;
+static int
+nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_mc *mc = nvkm_mc(subdev);
+	nvkm_mc_intr_unarm(mc);
+	return 0;
+}
 
+static int
+nvkm_mc_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mc *mc = nvkm_mc(subdev);
+	if (mc->func->init)
+		mc->func->init(mc);
+	nvkm_mc_intr_rearm(mc);
+	return 0;
+}
+
+static void *
+nvkm_mc_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_mc(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_mc = {
+	.dtor = nvkm_mc_dtor,
+	.init = nvkm_mc_init,
+	.fini = nvkm_mc_fini,
+};
+
+int
+nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_mc **pmc)
+{
+	struct nvkm_mc *mc;
+
+	if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_mc, device, index, 0, &mc->subdev);
+	mc->func = func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
deleted file mode 100644
index f042e7d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv04.h"
-
-struct nvkm_oclass *
-g94_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv50_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 8ab7f12..7344ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -21,38 +21,40 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 static const struct nvkm_mc_intr
 g98_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP first, so pageflip timestamps work */
-	{ 0x00000001, NVDEV_ENGINE_MSPPP },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00004000, NVDEV_ENGINE_SEC },	/* NV84:NVA3 */
-	{ 0x00008000, NVDEV_ENGINE_MSVLD },
-	{ 0x00020000, NVDEV_ENGINE_MSPDEC },
-	{ 0x00040000, NVDEV_SUBDEV_PMU },	/* NVA3:NVC0 */
-	{ 0x00080000, NVDEV_SUBDEV_THERM },	/* NVA3:NVC0 */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
-	{ 0x00400000, NVDEV_ENGINE_CE0 },	/* NVA3-     */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
-	{ 0x0042d101, NVDEV_SUBDEV_FB },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work */
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00004000, NVKM_ENGINE_SEC },	/* NV84:NVA3 */
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00040000, NVKM_SUBDEV_PMU },	/* NVA3:NVC0 */
+	{ 0x00080000, NVKM_SUBDEV_THERM },	/* NVA3:NVC0 */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
+	{ 0x00400000, NVKM_ENGINE_CE0 },	/* NVA3-     */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x0042d101, NVKM_SUBDEV_FB },
 	{},
 };
 
-struct nvkm_oclass *
-g98_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x98),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+g98_mc = {
+	.init = nv50_mc_init,
 	.intr = g98_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&g98_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 2425984..122fe69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -21,56 +21,77 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 const struct nvkm_mc_intr
 gf100_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP first, so pageflip timestamps work. */
-	{ 0x00000001, NVDEV_ENGINE_MSPPP },
-	{ 0x00000020, NVDEV_ENGINE_CE0 },
-	{ 0x00000040, NVDEV_ENGINE_CE1 },
-	{ 0x00000080, NVDEV_ENGINE_CE2 },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00002000, NVDEV_SUBDEV_FB },
-	{ 0x00008000, NVDEV_ENGINE_MSVLD },
-	{ 0x00040000, NVDEV_SUBDEV_THERM },
-	{ 0x00020000, NVDEV_ENGINE_MSPDEC },
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C },	/* PMGR->I2C/AUX */
-	{ 0x01000000, NVDEV_SUBDEV_PMU },
-	{ 0x02000000, NVDEV_SUBDEV_LTC },
-	{ 0x08000000, NVDEV_SUBDEV_FB },
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x40000000, NVDEV_SUBDEV_IBUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work. */
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x00000020, NVKM_ENGINE_CE0 },
+	{ 0x00000040, NVKM_ENGINE_CE1 },
+	{ 0x00000080, NVKM_ENGINE_CE2 },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00002000, NVKM_SUBDEV_FB },
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00040000, NVKM_SUBDEV_THERM },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C },	/* PMGR->I2C/AUX */
+	{ 0x01000000, NVKM_SUBDEV_PMU },
+	{ 0x02000000, NVKM_SUBDEV_LTC },
+	{ 0x08000000, NVKM_SUBDEV_FB },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
 	{},
 };
 
-static void
-gf100_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+gf100_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)pmc;
-	nv_wr32(priv, 0x088704, 0x00000000);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000000);
+	nvkm_wr32(device, 0x000144, 0x00000000);
+	nvkm_rd32(device, 0x000140);
 }
 
 void
-gf100_mc_unk260(struct nvkm_mc *pmc, u32 data)
+gf100_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	nv_wr32(pmc, 0x000260, data);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000001);
+	nvkm_wr32(device, 0x000144, 0x00000001);
 }
 
-struct nvkm_oclass *
-gf100_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+u32
+gf100_mc_intr_mask(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x000100);
+	u32 intr1 = nvkm_rd32(device, 0x000104);
+	return intr0 | intr1;
+}
+
+void
+gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
+{
+	nvkm_wr32(mc->subdev.device, 0x000260, data);
+}
+
+static const struct nvkm_mc_func
+gf100_mc = {
+	.init = nv50_mc_init,
 	.intr = gf100_mc_intr,
-	.msi_rearm = gf100_mc_msi_rearm,
+	.intr_unarm = gf100_mc_intr_unarm,
+	.intr_rearm = gf100_mc_intr_rearm,
+	.intr_mask = gf100_mc_intr_mask,
 	.unk260 = gf100_mc_unk260,
-}.base;
+};
+
+int
+gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gf100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
deleted file mode 100644
index 8d2a8f4..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv04.h"
-
-struct nvkm_oclass *
-gf106_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = gf100_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-	.unk260 = gf100_mc_unk260,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 43b2774..d92efb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -21,17 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-struct nvkm_oclass *
-gk20a_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+gk20a_mc = {
+	.init = nv50_mc_init,
 	.intr = gf100_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = gf100_mc_intr_unarm,
+	.intr_rearm = gf100_mc_intr_rearm,
+	.intr_mask = gf100_mc_intr_mask,
+};
+
+int
+gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 3271382..d282ec1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -21,58 +21,63 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 const struct nvkm_mc_intr
 nv04_mc_intr[] = {
-	{ 0x00000001, NVDEV_ENGINE_MPEG },	/* NV17- MPEG/ME */
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00010000, NVDEV_ENGINE_DISP },
-	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV40- */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x01000000, NVDEV_ENGINE_DISP },	/* NV04- PCRTC0 */
-	{ 0x02000000, NVDEV_ENGINE_DISP },	/* NV11- PCRTC1 */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x00000001, NVKM_ENGINE_MPEG },	/* NV17- MPEG/ME */
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00010000, NVKM_ENGINE_DISP },
+	{ 0x00020000, NVKM_ENGINE_VP },	/* NV40- */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x01000000, NVKM_ENGINE_DISP },	/* NV04- PCRTC0 */
+	{ 0x02000000, NVKM_ENGINE_DISP },	/* NV11- PCRTC1 */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
 	{}
 };
 
-int
-nv04_mc_init(struct nvkm_object *object)
+void
+nv04_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
-	nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
-
-	return nvkm_mc_init(&priv->base);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000000);
+	nvkm_rd32(device, 0x000140);
 }
 
-int
-nv04_mc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+void
+nv04_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv;
-	int ret;
-
-	ret = nvkm_mc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv04_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv04_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+u32
+nv04_mc_intr_mask(struct nvkm_mc *mc)
+{
+	return nvkm_rd32(mc->subdev.device, 0x000100);
+}
+
+void
+nv04_mc_init(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
+	nvkm_wr32(device, 0x001850, 0x00000001); /* disable rom access */
+}
+
+static const struct nvkm_mc_func
+nv04_mc = {
+	.init = nv04_mc_init,
 	.intr = nv04_mc_intr,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv04_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
deleted file mode 100644
index 411de3d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __NVKM_MC_NV04_H__
-#define __NVKM_MC_NV04_H__
-#include "priv.h"
-
-struct nv04_mc_priv {
-	struct nvkm_mc base;
-};
-
-int  nv04_mc_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-
-extern const struct nvkm_mc_intr nv04_mc_intr[];
-int  nv04_mc_init(struct nvkm_object *);
-void nv40_mc_msi_rearm(struct nvkm_mc *);
-int  nv44_mc_init(struct nvkm_object *object);
-int  nv50_mc_init(struct nvkm_object *);
-extern const struct nvkm_mc_intr nv50_mc_intr[];
-extern const struct nvkm_mc_intr gf100_mc_intr[];
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
deleted file mode 100644
index b761305..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv04.h"
-
-void
-nv40_mc_msi_rearm(struct nvkm_mc *pmc)
-{
-	struct nv04_mc_priv *priv = (void *)pmc;
-	nv_wr08(priv, 0x088068, 0xff);
-}
-
-struct nvkm_oclass *
-nv40_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv04_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv04_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 2c7f7c7..9a3ac99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -21,33 +21,33 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-int
-nv44_mc_init(struct nvkm_object *object)
+void
+nv44_mc_init(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-	u32 tmp = nv_rd32(priv, 0x10020c);
+	struct nvkm_device *device = mc->subdev.device;
+	u32 tmp = nvkm_rd32(device, 0x10020c);
 
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
 
-	nv_wr32(priv, 0x001700, tmp);
-	nv_wr32(priv, 0x001704, 0);
-	nv_wr32(priv, 0x001708, 0);
-	nv_wr32(priv, 0x00170c, tmp);
-
-	return nvkm_mc_init(&priv->base);
+	nvkm_wr32(device, 0x001700, tmp);
+	nvkm_wr32(device, 0x001704, 0);
+	nvkm_wr32(device, 0x001708, 0);
+	nvkm_wr32(device, 0x00170c, tmp);
 }
 
-struct nvkm_oclass *
-nv44_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x44),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv44_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+nv44_mc = {
+	.init = nv44_mc_init,
 	.intr = nv04_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv44_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
deleted file mode 100644
index c0aac7e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2014 Ilia Mirkin
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ilia Mirkin
- */
-#include "nv04.h"
-
-struct nvkm_oclass *
-nv4c_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x4c),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv44_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv04_mc_intr,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 40e3019..5f27d7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -21,52 +21,44 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
 
 const struct nvkm_mc_intr
 nv50_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP before FIFO, so pageflip-timestamping works! */
-	{ 0x00000001, NVDEV_ENGINE_MPEG },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00004000, NVDEV_ENGINE_CIPHER },	/* NV84- */
-	{ 0x00008000, NVDEV_ENGINE_BSP },	/* NV84- */
-	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV84- */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
-	{ 0x0002d101, NVDEV_SUBDEV_FB },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP before FIFO, so pageflip-timestamping works! */
+	{ 0x00000001, NVKM_ENGINE_MPEG },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00004000, NVKM_ENGINE_CIPHER },	/* NV84- */
+	{ 0x00008000, NVKM_ENGINE_BSP },	/* NV84- */
+	{ 0x00020000, NVKM_ENGINE_VP },	/* NV84- */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x0002d101, NVKM_SUBDEV_FB },
 	{},
 };
 
-static void
-nv50_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+nv50_mc_init(struct nvkm_mc *mc)
 {
-	struct nvkm_device *device = nv_device(pmc);
-	pci_write_config_byte(device->pdev, 0x68, 0xff);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
 }
 
+static const struct nvkm_mc_func
+nv50_mc = {
+	.init = nv50_mc_init,
+	.intr = nv50_mc_intr,
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
 int
-nv50_mc_init(struct nvkm_object *object)
+nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
-	return nvkm_mc_init(&priv->base);
+	return nvkm_mc_new_(&nv50_mc, device, index, pmc);
 }
-
-struct nvkm_oclass *
-nv50_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv50_mc_intr,
-	.msi_rearm = nv50_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d2cad07..307f6c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,36 +1,42 @@
 #ifndef __NVKM_MC_PRIV_H__
 #define __NVKM_MC_PRIV_H__
+#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
 #include <subdev/mc.h>
 
-#define nvkm_mc_create(p,e,o,d)                                             \
-	nvkm_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mc_destroy(p) ({                                               \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_dtor(nv_object(pmc));        \
-})
-#define nvkm_mc_init(p) ({                                                  \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_init(nv_object(pmc));        \
-})
-#define nvkm_mc_fini(p,s) ({                                                \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_fini(nv_object(pmc), (s));   \
-})
-
-int  nvkm_mc_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-void _nvkm_mc_dtor(struct nvkm_object *);
-int  _nvkm_mc_init(struct nvkm_object *);
-int  _nvkm_mc_fini(struct nvkm_object *, bool);
+int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
+		 int index, struct nvkm_mc **);
 
 struct nvkm_mc_intr {
 	u32 stat;
 	u32 unit;
 };
 
-struct nvkm_mc_oclass {
-	struct nvkm_oclass base;
+struct nvkm_mc_func {
+	void (*init)(struct nvkm_mc *);
 	const struct nvkm_mc_intr *intr;
-	void (*msi_rearm)(struct nvkm_mc *);
+	/* disable reporting of interrupts to host */
+	void (*intr_unarm)(struct nvkm_mc *);
+	/* enable reporting of interrupts to host */
+	void (*intr_rearm)(struct nvkm_mc *);
+	/* retrieve pending interrupt mask (NV_PMC_INTR) */
+	u32 (*intr_mask)(struct nvkm_mc *);
 	void (*unk260)(struct nvkm_mc *, u32);
 };
 
+void nv04_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv04_mc_intr[];
+void nv04_mc_intr_unarm(struct nvkm_mc *);
+void nv04_mc_intr_rearm(struct nvkm_mc *);
+u32 nv04_mc_intr_mask(struct nvkm_mc *);
+
+void nv44_mc_init(struct nvkm_mc *);
+
+void nv50_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv50_mc_intr[];
+
+extern const struct nvkm_mc_intr gf100_mc_intr[];
+void gf100_mc_intr_unarm(struct nvkm_mc *);
+void gf100_mc_intr_rearm(struct nvkm_mc *);
+u32 gf100_mc_intr_mask(struct nvkm_mc *);
 void gf100_mc_unk260(struct nvkm_mc *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 277b6ec..e04a229 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/fb.h>
+#include "priv.h"
 
 #include <core/gpuobj.h>
+#include <subdev/fb.h>
 
 void
 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
@@ -32,12 +32,12 @@
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_mm_node *r;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	delta = 0;
@@ -46,14 +46,14 @@
 		u32 num  = r->length >> bits;
 
 		while (num) {
-			struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 			end = (pte + num);
 			if (unlikely(end >= max))
 				end = max;
 			len = end - pte;
 
-			mmu->map(vma, pgt, node, pte, len, phys, delta);
+			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
 
 			num -= len;
 			pte += len;
@@ -67,7 +67,7 @@
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 static void
@@ -76,20 +76,20 @@
 {
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	unsigned m, sglen;
 	u32 end, len;
 	int i;
 	struct scatterlist *sg;
 
 	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
 
 		end = pte + sglen;
@@ -100,7 +100,7 @@
 		for (m = 0; m < len; m++) {
 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-			mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
 			num--;
 			pte++;
 
@@ -115,7 +115,7 @@
 			for (; m < sglen; m++) {
 				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-				mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
 				num--;
 				pte++;
 				if (num == 0)
@@ -125,7 +125,7 @@
 
 	}
 finish:
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 static void
@@ -135,24 +135,24 @@
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
 	dma_addr_t *list = mem->pages;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 		end = (pte + num);
 		if (unlikely(end >= max))
 			end = max;
 		len = end - pte;
 
-		mmu->map_sg(vma, pgt, mem, pte, len, list);
+		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
 
 		num  -= len;
 		pte  += len;
@@ -163,7 +163,7 @@
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 void
@@ -183,24 +183,24 @@
 {
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 		end = (pte + num);
 		if (unlikely(end >= max))
 			end = max;
 		len = end - pte;
 
-		mmu->unmap(pgt, pte, len);
+		mmu->func->unmap(vma, pgt, pte, len);
 
 		num -= len;
 		pte += len;
@@ -210,7 +210,7 @@
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 void
@@ -225,7 +225,7 @@
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgd *vpgd;
 	struct nvkm_vm_pgt *vpgt;
-	struct nvkm_gpuobj *pgt;
+	struct nvkm_memory *pgt;
 	u32 pde;
 
 	for (pde = fpde; pde <= lpde; pde++) {
@@ -233,16 +233,14 @@
 		if (--vpgt->refcount[big])
 			continue;
 
-		pgt = vpgt->obj[big];
-		vpgt->obj[big] = NULL;
+		pgt = vpgt->mem[big];
+		vpgt->mem[big] = NULL;
 
 		list_for_each_entry(vpgd, &vm->pgd_list, head) {
-			mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
 		}
 
-		mutex_unlock(&nv_subdev(mmu)->mutex);
-		nvkm_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&nv_subdev(mmu)->mutex);
+		nvkm_memory_del(&pgt);
 	}
 }
 
@@ -252,34 +250,23 @@
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
 	struct nvkm_vm_pgd *vpgd;
-	struct nvkm_gpuobj *pgt;
-	int big = (type != mmu->spg_shift);
+	int big = (type != mmu->func->spg_shift);
 	u32 pgt_size;
 	int ret;
 
-	pgt_size  = (1 << (mmu->pgt_bits + 12)) >> type;
+	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
 	pgt_size *= 8;
 
-	mutex_unlock(&nv_subdev(mmu)->mutex);
-	ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
-			      NVOBJ_FLAG_ZERO_ALLOC, &pgt);
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+			      pgt_size, 0x1000, true, &vpgt->mem[big]);
 	if (unlikely(ret))
 		return ret;
 
-	/* someone beat us to filling the PDE while we didn't have the lock */
-	if (unlikely(vpgt->refcount[big]++)) {
-		mutex_unlock(&nv_subdev(mmu)->mutex);
-		nvkm_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&nv_subdev(mmu)->mutex);
-		return 0;
-	}
-
-	vpgt->obj[big] = pgt;
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
-		mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
 	}
 
+	vpgt->refcount[big]++;
 	return 0;
 }
 
@@ -293,20 +280,20 @@
 	u32 fpde, lpde, pde;
 	int ret;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
 			   &vma->node);
 	if (unlikely(ret != 0)) {
-		mutex_unlock(&nv_subdev(mmu)->mutex);
+		mutex_unlock(&vm->mutex);
 		return ret;
 	}
 
-	fpde = (vma->node->offset >> mmu->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+	fpde = (vma->node->offset >> mmu->func->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
 
 	for (pde = fpde; pde <= lpde; pde++) {
 		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
-		int big = (vma->node->type != mmu->spg_shift);
+		int big = (vma->node->type != mmu->func->spg_shift);
 
 		if (likely(vpgt->refcount[big])) {
 			vpgt->refcount[big]++;
@@ -318,11 +305,11 @@
 			if (pde != fpde)
 				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
 			nvkm_mm_free(&vm->mm, &vma->node);
-			mutex_unlock(&nv_subdev(mmu)->mutex);
+			mutex_unlock(&vm->mutex);
 			return ret;
 		}
 	}
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 
 	vma->vm = NULL;
 	nvkm_vm_ref(vm, &vma->vm, NULL);
@@ -334,27 +321,49 @@
 void
 nvkm_vm_put(struct nvkm_vma *vma)
 {
-	struct nvkm_vm *vm = vma->vm;
-	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_mmu *mmu;
+	struct nvkm_vm *vm;
 	u32 fpde, lpde;
 
 	if (unlikely(vma->node == NULL))
 		return;
-	fpde = (vma->node->offset >> mmu->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+	vm = vma->vm;
+	mmu = vm->mmu;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
-	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde);
+	fpde = (vma->node->offset >> mmu->func->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
+
+	mutex_lock(&vm->mutex);
+	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
 	nvkm_mm_free(&vm->mm, &vma->node);
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 
 	nvkm_vm_ref(NULL, &vma->vm, NULL);
 }
 
 int
-nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-	       u32 block, struct nvkm_vm **pvm)
+nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
 {
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_memory *pgt;
+	int ret;
+
+	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
+	if (ret == 0) {
+		vm->pgt[0].refcount[0] = 1;
+		vm->pgt[0].mem[0] = pgt;
+		nvkm_memory_boot(pgt, vm);
+	}
+
+	return ret;
+}
+
+int
+nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
+{
+	static struct lock_class_key _key;
 	struct nvkm_vm *vm;
 	u64 mm_length = (offset + length) - mm_offset;
 	int ret;
@@ -363,11 +372,12 @@
 	if (!vm)
 		return -ENOMEM;
 
+	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
 	INIT_LIST_HEAD(&vm->pgd_list);
 	vm->mmu = mmu;
 	kref_init(&vm->refcount);
-	vm->fpde = offset >> (mmu->pgt_bits + 12);
-	vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12);
+	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
+	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
 
 	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
 	if (!vm->pgt) {
@@ -390,10 +400,12 @@
 
 int
 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
-	    struct nvkm_vm **pvm)
+	    struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	struct nvkm_mmu *mmu = nvkm_mmu(device);
-	return mmu->create(mmu, offset, length, mm_offset, pvm);
+	struct nvkm_mmu *mmu = device->mmu;
+	if (!mmu->func->create)
+		return -EINVAL;
+	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
 }
 
 static int
@@ -410,38 +422,33 @@
 	if (!vpgd)
 		return -ENOMEM;
 
-	nvkm_gpuobj_ref(pgd, &vpgd->obj);
+	vpgd->obj = pgd;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	for (i = vm->fpde; i <= vm->lpde; i++)
-		mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
 	list_add(&vpgd->head, &vm->pgd_list);
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 	return 0;
 }
 
 static void
 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgd *vpgd, *tmp;
-	struct nvkm_gpuobj *pgd = NULL;
 
 	if (!mpgd)
 		return;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
 		if (vpgd->obj == mpgd) {
-			pgd = vpgd->obj;
 			list_del(&vpgd->head);
 			kfree(vpgd);
 			break;
 		}
 	}
-	mutex_unlock(&nv_subdev(mmu)->mutex);
-
-	nvkm_gpuobj_ref(NULL, &pgd);
+	mutex_unlock(&vm->mutex);
 }
 
 static void
@@ -478,3 +485,58 @@
 	*ptr = ref;
 	return 0;
 }
+
+static int
+nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->oneinit)
+		return mmu->func->oneinit(mmu);
+	return 0;
+}
+
+static int
+nvkm_mmu_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->init)
+		mmu->func->init(mmu);
+	return 0;
+}
+
+static void *
+nvkm_mmu_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->dtor)
+		return mmu->func->dtor(mmu);
+	return mmu;
+}
+
+static const struct nvkm_subdev_func
+nvkm_mmu = {
+	.dtor = nvkm_mmu_dtor,
+	.oneinit = nvkm_mmu_oneinit,
+	.init = nvkm_mmu_init,
+};
+
+void
+nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu *mmu)
+{
+	nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
+	mmu->func = func;
+	mmu->limit = func->limit;
+	mmu->dma_bits = func->dma_bits;
+	mmu->lpg_shift = func->lpg_shift;
+}
+
+int
+nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu **pmmu)
+{
+	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_mmu_ctor(func, device, index, *pmmu);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 294cda3..7ac507c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,19 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
+#include "priv.h"
+
 #include <subdev/fb.h>
 #include <subdev/ltc.h>
 #include <subdev/timer.h>
 
 #include <core/gpuobj.h>
 
-struct gf100_mmu_priv {
-	struct nvkm_mmu base;
-};
-
-
 /* Map from compressed to corresponding uncompressed storage type.
  * The value 0xff represents an invalid storage type.
  */
@@ -75,17 +70,19 @@
 
 
 static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
+gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
 {
 	u32 pde[2] = { 0, 0 };
 
 	if (pgt[0])
-		pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
+		pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
 	if (pgt[1])
-		pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
+		pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
 
-	nv_wo32(pgd, (index * 8) + 0, pde[0]);
-	nv_wo32(pgd, (index * 8) + 4, pde[1]);
+	nvkm_kmap(pgd);
+	nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
+	nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
+	nvkm_done(pgd);
 }
 
 static inline u64
@@ -103,7 +100,7 @@
 }
 
 static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	     struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
 	u64 next = 1 << (vma->node->type - 8);
@@ -112,126 +109,113 @@
 	pte <<= 3;
 
 	if (mem->tag) {
-		struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu);
+		struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
 		u32 tag = mem->tag->offset + (delta >> 17);
 		phys |= (u64)tag << (32 + 12);
 		next |= (u64)1   << (32 + 12);
-		ltc->tags_clear(ltc, tag, cnt);
+		nvkm_ltc_tags_clear(ltc, tag, cnt);
 	}
 
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		phys += next;
 		pte  += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 		struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
 	/* compressed storage types are invalid for system memory */
 	u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
 
+	nvkm_kmap(pgt);
 	pte <<= 3;
 	while (cnt--) {
 		u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
+	nvkm_kmap(pgt);
 	pte <<= 3;
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, 0x00000000);
-		nv_wo32(pgt, pte + 4, 0x00000000);
+		nvkm_wo32(pgt, pte + 0, 0x00000000);
+		nvkm_wo32(pgt, pte + 4, 0x00000000);
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 gf100_vm_flush(struct nvkm_vm *vm)
 {
-	struct gf100_mmu_priv *priv = (void *)vm->mmu;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_device *device = mmu->subdev.device;
 	struct nvkm_vm_pgd *vpgd;
 	u32 type;
 
-	bar->flush(bar);
-
 	type = 0x00000001; /* PAGE_ALL */
-	if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
+	if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
 		type |= 0x00000004; /* HUB_ONLY */
 
-	mutex_lock(&nv_subdev(priv)->mutex);
+	mutex_lock(&mmu->subdev.mutex);
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
 		/* looks like maybe a "free flush slots" counter, the
 		 * faster you write to 0x100cbc to more it decreases
 		 */
-		if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
-			nv_error(priv, "vm timeout 0: 0x%08x %d\n",
-				 nv_rd32(priv, 0x100c80), type);
-		}
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+				break;
+		);
 
-		nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
-		nv_wr32(priv, 0x100cbc, 0x80000000 | type);
+		nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
+		nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
 
 		/* wait for flush to be queued? */
-		if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
-			nv_error(priv, "vm timeout 1: 0x%08x %d\n",
-				 nv_rd32(priv, 0x100c80), type);
-		}
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+				break;
+		);
 	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_unlock(&mmu->subdev.mutex);
 }
 
 static int
 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-		struct nvkm_vm **pvm)
+		struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm);
+	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
 }
 
-static int
-gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gf100_mmu_priv *priv;
-	int ret;
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.limit = 1ULL << 40;
-	priv->base.dma_bits = 40;
-	priv->base.pgt_bits  = 27 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 17;
-	priv->base.create = gf100_vm_create;
-	priv->base.map_pgt = gf100_vm_map_pgt;
-	priv->base.map = gf100_vm_map;
-	priv->base.map_sg = gf100_vm_map_sg;
-	priv->base.unmap = gf100_vm_unmap;
-	priv->base.flush = gf100_vm_flush;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_mmu_ctor,
-		.dtor = _nvkm_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+gf100_mmu = {
+	.limit = (1ULL << 40),
+	.dma_bits = 40,
+	.pgt_bits  = 27 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 17,
+	.create = gf100_vm_create,
+	.map_pgt = gf100_vm_map_pgt,
+	.map = gf100_vm_map,
+	.map_sg = gf100_vm_map_sg,
+	.unmap = gf100_vm_unmap,
+	.flush = gf100_vm_flush,
 };
+
+int
+gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index fe93ea2..37927c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 
 #define NV04_PDMA_SIZE (128 * 1024 * 1024)
@@ -34,30 +33,34 @@
  ******************************************************************************/
 
 static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	pte = 0x00008 + (pte * 4);
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
 		u32 phys = (u32)*list++;
 		while (cnt && page--) {
-			nv_wo32(pgt, pte, phys | 3);
+			nvkm_wo32(pgt, pte, phys | 3);
 			phys += NV04_PDMA_PAGE;
 			pte += 4;
 			cnt -= 1;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte = 0x00008 + (pte * 4);
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte, 0x00000000);
+		nvkm_wo32(pgt, pte, 0x00000000);
 		pte += 4;
 	}
+	nvkm_done(pgt);
 }
 
 static void
@@ -66,86 +69,81 @@
 }
 
 /*******************************************************************************
- * VM object
- ******************************************************************************/
-
-int
-nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart,
-	       struct nvkm_vm **pvm)
-{
-	return -EINVAL;
-}
-
-/*******************************************************************************
  * MMU subdev
  ******************************************************************************/
 
 static int
-nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv04_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv;
-	struct nvkm_gpuobj *dma;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *dma;
 	int ret;
 
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
-			      "pcigart", &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV04_PDMA_SIZE;
-	priv->base.dma_bits = 32;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv04_vm_map_sg;
-	priv->base.unmap = nv04_vm_unmap;
-	priv->base.flush = nv04_vm_flush;
-
-	ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
-			     &priv->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
-			      16, NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	dma = priv->vm->pgt[0].obj[0];
-	priv->vm->pgt[0].refcount[0] = 1;
+			      16, true, &dma);
+	mmu->vm->pgt[0].mem[0] = dma;
+	mmu->vm->pgt[0].refcount[0] = 1;
 	if (ret)
 		return ret;
 
-	nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
-	nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	nvkm_kmap(dma);
+	nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+	nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	nvkm_done(dma);
 	return 0;
 }
 
-void
-nv04_mmu_dtor(struct nvkm_object *object)
+void *
+nv04_mmu_dtor(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	if (priv->vm) {
-		nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->vm, NULL);
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	if (mmu->vm) {
+		nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &mmu->vm, NULL);
 	}
-	if (priv->nullp) {
-		pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
-				    priv->nullp, priv->null);
+	if (mmu->nullp) {
+		dma_free_coherent(device->dev, 16 * 1024,
+				  mmu->nullp, mmu->null);
 	}
-	nvkm_mmu_destroy(&priv->base);
+	return mmu;
 }
 
-struct nvkm_oclass
-nv04_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+int
+nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu **pmmu)
+{
+	struct nv04_mmu *mmu;
+	if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmmu = &mmu->base;
+	nvkm_mmu_ctor(func, device, index, &mmu->base);
+	return 0;
+}
+
+const struct nvkm_mmu_func
+nv04_mmu = {
+	.oneinit = nv04_mmu_oneinit,
+	.dtor = nv04_mmu_dtor,
+	.limit = NV04_PDMA_SIZE,
+	.dma_bits = 32,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv04_vm_map_sg,
+	.unmap = nv04_vm_unmap,
+	.flush = nv04_vm_flush,
 };
+
+int
+nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
index 7bf6f4b..363e33b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
@@ -1,19 +1,18 @@
 #ifndef __NV04_MMU_PRIV__
 #define __NV04_MMU_PRIV__
+#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
+#include "priv.h"
 
-#include <subdev/mmu.h>
-
-struct nv04_mmu_priv {
+struct nv04_mmu {
 	struct nvkm_mmu base;
 	struct nvkm_vm *vm;
 	dma_addr_t null;
 	void *nullp;
 };
 
-static inline struct nv04_mmu_priv *
-nv04_mmu(void *obj)
-{
-	return (void *)nvkm_mmu(obj);
-}
+int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_mmu **);
+void *nv04_mmu_dtor(struct nvkm_mmu *);
 
+extern const struct nvkm_mmu_func nv04_mmu;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 61ee3ab..c6a26f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
 #include <subdev/timer.h>
@@ -36,45 +35,50 @@
  ******************************************************************************/
 
 static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	pte = pte * 4;
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 page = PAGE_SIZE / NV41_GART_PAGE;
 		u64 phys = (u64)*list++;
 		while (cnt && page--) {
-			nv_wo32(pgt, pte, (phys >> 7) | 1);
+			nvkm_wo32(pgt, pte, (phys >> 7) | 1);
 			phys += NV41_GART_PAGE;
 			pte += 4;
 			cnt -= 1;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte = pte * 4;
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte, 0x00000000);
+		nvkm_wo32(pgt, pte, 0x00000000);
 		pte += 4;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 nv41_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv04_mmu_priv *priv = (void *)vm->mmu;
+	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+	struct nvkm_device *device = mmu->base.subdev.device;
 
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nv_wr32(priv, 0x100810, 0x00000022);
-	if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
-		nv_warn(priv, "flush timeout, 0x%08x\n",
-			nv_rd32(priv, 0x100810));
-	}
-	nv_wr32(priv, 0x100810, 0x00000000);
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_lock(&mmu->base.subdev.mutex);
+	nvkm_wr32(device, 0x100810, 0x00000022);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100810) & 0x00000020)
+			break;
+	);
+	nvkm_wr32(device, 0x100810, 0x00000000);
+	mutex_unlock(&mmu->base.subdev.mutex);
 }
 
 /*******************************************************************************
@@ -82,76 +86,56 @@
  ******************************************************************************/
 
 static int
-nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv41_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_mmu_priv *priv;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
 	int ret;
 
-	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
-	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
-		return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
-					data, size, pobject);
-	}
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
-			      "pciegart", &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV41_GART_SIZE;
-	priv->base.dma_bits = 39;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv41_vm_map_sg;
-	priv->base.unmap = nv41_vm_unmap;
-	priv->base.flush = nv41_vm_flush;
-
-	ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
-			     &priv->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
-			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	priv->vm->pgt[0].refcount[0] = 1;
-	if (ret)
-		return ret;
-
-	return 0;
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
+			      &mmu->vm->pgt[0].mem[0]);
+	mmu->vm->pgt[0].refcount[0] = 1;
+	return ret;
 }
 
-static int
-nv41_mmu_init(struct nvkm_object *object)
+static void
+nv41_mmu_init(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0];
-	int ret;
-
-	ret = nvkm_mmu_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
-	nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x100820, 0x00000000);
-	return 0;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
+	nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
+	nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x100820, 0x00000000);
 }
 
-struct nvkm_oclass
-nv41_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x41),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv41_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = nv41_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv41_mmu = {
+	.dtor = nv04_mmu_dtor,
+	.oneinit = nv41_mmu_oneinit,
+	.init = nv41_mmu_init,
+	.limit = NV41_GART_SIZE,
+	.dma_bits = 39,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv41_vm_map_sg,
+	.unmap = nv41_vm_unmap,
+	.flush = nv41_vm_flush,
 };
+
+int
+nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->type == NVKM_DEVICE_AGP ||
+	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+		return nv04_mmu_new(device, index, pmmu);
+
+	return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index b90ded1..a648c23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
 #include <subdev/timer.h>
@@ -36,16 +35,16 @@
  ******************************************************************************/
 
 static void
-nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
+nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
 	     dma_addr_t *list, u32 pte, u32 cnt)
 {
 	u32 base = (pte << 2) & ~0x0000000f;
 	u32 tmp[4];
 
-	tmp[0] = nv_ro32(pgt, base + 0x0);
-	tmp[1] = nv_ro32(pgt, base + 0x4);
-	tmp[2] = nv_ro32(pgt, base + 0x8);
-	tmp[3] = nv_ro32(pgt, base + 0xc);
+	tmp[0] = nvkm_ro32(pgt, base + 0x0);
+	tmp[1] = nvkm_ro32(pgt, base + 0x4);
+	tmp[2] = nvkm_ro32(pgt, base + 0x8);
+	tmp[3] = nvkm_ro32(pgt, base + 0xc);
 
 	while (cnt--) {
 		u32 addr = list ? (*list++ >> 12) : (null >> 12);
@@ -75,24 +74,25 @@
 		}
 	}
 
-	nv_wo32(pgt, base + 0x0, tmp[0]);
-	nv_wo32(pgt, base + 0x4, tmp[1]);
-	nv_wo32(pgt, base + 0x8, tmp[2]);
-	nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
+	nvkm_wo32(pgt, base + 0x0, tmp[0]);
+	nvkm_wo32(pgt, base + 0x4, tmp[1]);
+	nvkm_wo32(pgt, base + 0x8, tmp[2]);
+	nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
 }
 
 static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
-	struct nv04_mmu_priv *priv = (void *)vma->vm->mmu;
+	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
 	u32 tmp[4];
 	int i;
 
+	nvkm_kmap(pgt);
 	if (pte & 3) {
 		u32  max = 4 - (pte & 3);
 		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, priv->null, list, pte, part);
+		nv44_vm_fill(pgt, mmu->null, list, pte, part);
 		pte  += part;
 		list += part;
 		cnt  -= part;
@@ -101,51 +101,57 @@
 	while (cnt >= 4) {
 		for (i = 0; i < 4; i++)
 			tmp[i] = *list++ >> 12;
-		nv_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
-		nv_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
-		nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
-		nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
+		nvkm_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+		nvkm_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+		nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+		nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
 		cnt -= 4;
 	}
 
 	if (cnt)
-		nv44_vm_fill(pgt, priv->null, list, pte, cnt);
+		nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
+	nvkm_done(pgt);
 }
 
 static void
-nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
-	struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt);
+	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
 
+	nvkm_kmap(pgt);
 	if (pte & 3) {
 		u32  max = 4 - (pte & 3);
 		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, priv->null, NULL, pte, part);
+		nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
 		pte  += part;
 		cnt  -= part;
 	}
 
 	while (cnt >= 4) {
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
 		cnt -= 4;
 	}
 
 	if (cnt)
-		nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
+		nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
+	nvkm_done(pgt);
 }
 
 static void
 nv44_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv04_mmu_priv *priv = (void *)vm->mmu;
-	nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
-	nv_wr32(priv, 0x100808, 0x00000020);
-	if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
-		nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
-	nv_wr32(priv, 0x100808, 0x00000000);
+	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
+	nvkm_wr32(device, 0x100808, 0x00000020);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100808) & 0x00000001)
+			break;
+	);
+	nvkm_wr32(device, 0x100808, 0x00000000);
 }
 
 /*******************************************************************************
@@ -153,95 +159,78 @@
  ******************************************************************************/
 
 static int
-nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv44_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_mmu_priv *priv;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
 	int ret;
 
-	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
-	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
-		return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
-					data, size, pobject);
+	mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
+					&mmu->null, GFP_KERNEL);
+	if (!mmu->nullp) {
+		nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
+		mmu->null = 0;
 	}
 
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
-			      "pciegart", &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV44_GART_SIZE;
-	priv->base.dma_bits = 39;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv44_vm_map_sg;
-	priv->base.unmap = nv44_vm_unmap;
-	priv->base.flush = nv44_vm_flush;
-
-	priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
-	if (!priv->nullp) {
-		nv_error(priv, "unable to allocate dummy pages\n");
-		return -ENOMEM;
-	}
-
-	ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
-			     &priv->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
-			      512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	priv->vm->pgt[0].refcount[0] = 1;
-	if (ret)
-		return ret;
-
-	return 0;
+			      512 * 1024, true,
+			      &mmu->vm->pgt[0].mem[0]);
+	mmu->vm->pgt[0].refcount[0] = 1;
+	return ret;
 }
 
-static int
-nv44_mmu_init(struct nvkm_object *object)
+static void
+nv44_mmu_init(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0];
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
 	u32 addr;
-	int ret;
-
-	ret = nvkm_mmu_init(&priv->base);
-	if (ret)
-		return ret;
 
 	/* calculate vram address of this PRAMIN block, object must be
 	 * allocated on 512KiB alignment, and not exceed a total size
 	 * of 512KiB for this to work correctly
 	 */
-	addr  = nv_rd32(priv, 0x10020c);
-	addr -= ((gart->addr >> 19) + 1) << 19;
+	addr  = nvkm_rd32(device, 0x10020c);
+	addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
 
-	nv_wr32(priv, 0x100850, 0x80000000);
-	nv_wr32(priv, 0x100818, priv->null);
-	nv_wr32(priv, 0x100804, NV44_GART_SIZE);
-	nv_wr32(priv, 0x100850, 0x00008000);
-	nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
-	nv_wr32(priv, 0x100820, 0x00000000);
-	nv_wr32(priv, 0x10082c, 0x00000001);
-	nv_wr32(priv, 0x100800, addr | 0x00000010);
-	return 0;
+	nvkm_wr32(device, 0x100850, 0x80000000);
+	nvkm_wr32(device, 0x100818, mmu->null);
+	nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+	nvkm_wr32(device, 0x100850, 0x00008000);
+	nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
+	nvkm_wr32(device, 0x100820, 0x00000000);
+	nvkm_wr32(device, 0x10082c, 0x00000001);
+	nvkm_wr32(device, 0x100800, addr | 0x00000010);
 }
 
-struct nvkm_oclass
-nv44_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = nv44_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv44_mmu = {
+	.dtor = nv04_mmu_dtor,
+	.oneinit = nv44_mmu_oneinit,
+	.init = nv44_mmu_init,
+	.limit = NV44_GART_SIZE,
+	.dma_bits = 39,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv44_vm_map_sg,
+	.unmap = nv44_vm_unmap,
+	.flush = nv44_vm_flush,
 };
+
+int
+nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->type == NVKM_DEVICE_AGP ||
+	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+		return nv04_mmu_new(device, index, pmmu);
+
+	return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index b83550f..a1f8d65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,31 +21,28 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
+#include "priv.h"
+
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
-
-#include <core/engine.h>
-#include <core/gpuobj.h>
-
-struct nv50_mmu_priv {
-	struct nvkm_mmu base;
-};
+#include <engine/gr.h>
 
 static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
+nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
 {
 	u64 phys = 0xdeadcafe00000000ULL;
 	u32 coverage = 0;
 
 	if (pgt[0]) {
-		phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
-		coverage = (pgt[0]->size >> 3) << 12;
+		/* present, 4KiB pages */
+		phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
+		coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
 	} else
 	if (pgt[1]) {
-		phys = 0x00000001 | pgt[1]->addr; /* present */
-		coverage = (pgt[1]->size >> 3) << 16;
+		/* present, 64KiB pages  */
+		phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
+		coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
 	}
 
 	if (phys & 1) {
@@ -57,8 +54,10 @@
 			phys |= 0x20;
 	}
 
-	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
-	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+	nvkm_kmap(pgd);
+	nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+	nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+	nvkm_done(pgd);
 }
 
 static inline u64
@@ -75,17 +74,18 @@
 }
 
 static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	    struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
+	struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
 	u32 comp = (mem->memtype & 0x180) >> 7;
 	u32 block, target;
 	int i;
 
 	/* IGPs don't have real VRAM, re-target to stolen system memory */
 	target = 0;
-	if (nvkm_fb(vma->vm->mmu)->ram->stolen) {
-		phys += nvkm_fb(vma->vm->mmu)->ram->stolen;
+	if (ram->stolen) {
+		phys += ram->stolen;
 		target = 3;
 	}
 
@@ -93,6 +93,7 @@
 	pte <<= 3;
 	cnt <<= 3;
 
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 offset_h = upper_32_bits(phys);
 		u32 offset_l = lower_32_bits(phys);
@@ -113,129 +114,118 @@
 		}
 
 		while (block) {
-			nv_wo32(pgt, pte + 0, offset_l);
-			nv_wo32(pgt, pte + 4, offset_h);
+			nvkm_wo32(pgt, pte + 0, offset_l);
+			nvkm_wo32(pgt, pte + 4, offset_h);
 			pte += 8;
 			block -= 8;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
 	pte <<= 3;
+	nvkm_kmap(pgt);
 	while (cnt--) {
 		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte <<= 3;
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, 0x00000000);
-		nv_wo32(pgt, pte + 4, 0x00000000);
+		nvkm_wo32(pgt, pte + 0, 0x00000000);
+		nvkm_wo32(pgt, pte + 4, 0x00000000);
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 nv50_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv50_mmu_priv *priv = (void *)vm->mmu;
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_engine *engine;
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_subdev *subdev = &mmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	int i, vme;
 
-	bar->flush(bar);
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+	mutex_lock(&subdev->mutex);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
 		if (!atomic_read(&vm->engref[i]))
 			continue;
 
 		/* unfortunate hw bug workaround... */
-		engine = nvkm_engine(priv, i);
-		if (engine && engine->tlb_flush) {
-			engine->tlb_flush(engine);
-			continue;
+		if (i == NVKM_ENGINE_GR && device->gr) {
+			int ret = nvkm_gr_tlb_flush(device->gr);
+			if (ret != -ENODEV)
+				continue;
 		}
 
 		switch (i) {
-		case NVDEV_ENGINE_GR    : vme = 0x00; break;
-		case NVDEV_ENGINE_VP    :
-		case NVDEV_ENGINE_MSPDEC: vme = 0x01; break;
-		case NVDEV_SUBDEV_BAR   : vme = 0x06; break;
-		case NVDEV_ENGINE_MSPPP :
-		case NVDEV_ENGINE_MPEG  : vme = 0x08; break;
-		case NVDEV_ENGINE_BSP   :
-		case NVDEV_ENGINE_MSVLD : vme = 0x09; break;
-		case NVDEV_ENGINE_CIPHER:
-		case NVDEV_ENGINE_SEC   : vme = 0x0a; break;
-		case NVDEV_ENGINE_CE0   : vme = 0x0d; break;
+		case NVKM_ENGINE_GR    : vme = 0x00; break;
+		case NVKM_ENGINE_VP    :
+		case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
+		case NVKM_SUBDEV_BAR   : vme = 0x06; break;
+		case NVKM_ENGINE_MSPPP :
+		case NVKM_ENGINE_MPEG  : vme = 0x08; break;
+		case NVKM_ENGINE_BSP   :
+		case NVKM_ENGINE_MSVLD : vme = 0x09; break;
+		case NVKM_ENGINE_CIPHER:
+		case NVKM_ENGINE_SEC   : vme = 0x0a; break;
+		case NVKM_ENGINE_CE0   : vme = 0x0d; break;
 		default:
 			continue;
 		}
 
-		nv_wr32(priv, 0x100c80, (vme << 16) | 1);
-		if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
-			nv_error(priv, "vm flush timeout: engine %d\n", vme);
+		nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+				break;
+		) < 0)
+			nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
 	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_unlock(&subdev->mutex);
 }
 
 static int
-nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length,
-	       u64 mm_offset, struct nvkm_vm **pvm)
+nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+	       struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	u32 block = (1 << (mmu->pgt_bits + 12));
+	u32 block = (1 << (mmu->func->pgt_bits + 12));
 	if (block > length)
 		block = length;
 
-	return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm);
+	return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
 }
 
-static int
-nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nv50_mmu_priv *priv;
-	int ret;
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.limit = 1ULL << 40;
-	priv->base.dma_bits = 40;
-	priv->base.pgt_bits  = 29 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 16;
-	priv->base.create = nv50_vm_create;
-	priv->base.map_pgt = nv50_vm_map_pgt;
-	priv->base.map = nv50_vm_map;
-	priv->base.map_sg = nv50_vm_map_sg;
-	priv->base.unmap = nv50_vm_unmap;
-	priv->base.flush = nv50_vm_flush;
-	return 0;
-}
-
-struct nvkm_oclass
-nv50_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mmu_ctor,
-		.dtor = _nvkm_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv50_mmu = {
+	.limit = (1ULL << 40),
+	.dma_bits = 40,
+	.pgt_bits  = 29 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 16,
+	.create = nv50_vm_create,
+	.map_pgt = nv50_vm_map_pgt,
+	.map = nv50_vm_map,
+	.map_sg = nv50_vm_map_sg,
+	.unmap = nv50_vm_unmap,
+	.flush = nv50_vm_flush,
 };
+
+int
+nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
new file mode 100644
index 0000000..27cedc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -0,0 +1,39 @@
+#ifndef __NVKM_MMU_PRIV_H__
+#define __NVKM_MMU_PRIV_H__
+#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
+#include <subdev/mmu.h>
+
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
+		   int index, struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_mmu **);
+
+struct nvkm_mmu_func {
+	void *(*dtor)(struct nvkm_mmu *);
+	int (*oneinit)(struct nvkm_mmu *);
+	void (*init)(struct nvkm_mmu *);
+
+	u64 limit;
+	u8  dma_bits;
+	u32 pgt_bits;
+	u8  spg_shift;
+	u8  lpg_shift;
+
+	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
+		       struct lock_class_key *, struct nvkm_vm **);
+
+	void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
+			struct nvkm_memory *pgt[2]);
+	void (*map)(struct nvkm_vma *, struct nvkm_memory *,
+		    struct nvkm_mem *, u32 pte, u32 cnt,
+		    u64 phys, u64 delta);
+	void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
+		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
+	void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
+		      u32 pte, u32 cnt);
+	void (*flush)(struct nvkm_vm *);
+};
+
+int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
+		   struct lock_class_key *, struct nvkm_vm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 0ca9dca..9700a76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -23,14 +23,13 @@
  */
 #include "mxms.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/mxm.h>
 #include <subdev/i2c.h>
 
 static bool
-mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
+mxm_shadow_rom_fetch(struct nvkm_i2c_bus *bus, u8 addr,
 		     u8 offset, u8 size, u8 *data)
 {
 	struct i2c_msg msgs[] = {
@@ -38,27 +37,28 @@
 		{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
 	};
 
-	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+	return i2c_transfer(&bus->i2c, msgs, 2) == 2;
 }
 
 static bool
 mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
-	struct nvkm_i2c *i2c = nvkm_i2c(mxm);
-	struct nvkm_i2c_port *port = NULL;
+	struct nvkm_device *device = mxm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
+	struct nvkm_i2c_bus *bus = NULL;
 	u8 i2cidx, mxms[6], addr, size;
 
 	i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
 	if (i2cidx < 0x0f)
-		port = i2c->find(i2c, i2cidx);
-	if (!port)
+		bus = nvkm_i2c_bus_find(i2c, i2cidx);
+	if (!bus)
 		return false;
 
 	addr = 0x54;
-	if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
+	if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms)) {
 		addr = 0x56;
-		if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
+		if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms))
 			return false;
 	}
 
@@ -67,7 +67,7 @@
 	mxm->mxms = kmalloc(size, GFP_KERNEL);
 
 	if (mxm->mxms &&
-	    mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
+	    mxm_shadow_rom_fetch(bus, addr, 0, size, mxm->mxms))
 		return true;
 
 	kfree(mxm->mxms);
@@ -79,7 +79,8 @@
 static bool
 mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 {
-	struct nvkm_device *device = nv_device(mxm);
+	struct nvkm_subdev *subdev = &mxm->subdev;
+	struct nvkm_device *device = subdev->device;
 	static char muid[] = {
 		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
 		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
@@ -94,7 +95,7 @@
 	acpi_handle handle;
 	int rev;
 
-	handle = ACPI_HANDLE(nv_device_base(device));
+	handle = ACPI_HANDLE(device->dev);
 	if (!handle)
 		return false;
 
@@ -106,7 +107,7 @@
 	rev = (version & 0xf0) << 4 | (version & 0x0f);
 	obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
 	if (!obj) {
-		nv_debug(mxm, "DSM MXMS failed\n");
+		nvkm_debug(subdev, "DSM MXMS failed\n");
 		return false;
 	}
 
@@ -114,7 +115,8 @@
 		mxm->mxms = kmemdup(obj->buffer.pointer,
 					 obj->buffer.length, GFP_KERNEL);
 	} else if (obj->type == ACPI_TYPE_INTEGER) {
-		nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+		nvkm_debug(subdev, "DSM MXMS returned 0x%llx\n",
+			   obj->integer.value);
 	}
 
 	ACPI_FREE(obj);
@@ -129,6 +131,7 @@
 static u8
 wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
 	struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
 	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -137,18 +140,18 @@
 
 	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
 	if (ACPI_FAILURE(status)) {
-		nv_debug(mxm, "WMMX MXMI returned %d\n", status);
+		nvkm_debug(subdev, "WMMX MXMI returned %d\n", status);
 		return 0x00;
 	}
 
 	obj = retn.pointer;
 	if (obj->type == ACPI_TYPE_INTEGER) {
 		version = obj->integer.value;
-		nv_debug(mxm, "WMMX MXMI version %d.%d\n",
-			     (version >> 4), version & 0x0f);
+		nvkm_debug(subdev, "WMMX MXMI version %d.%d\n",
+			   (version >> 4), version & 0x0f);
 	} else {
 		version = 0;
-		nv_debug(mxm, "WMMX MXMI returned non-integer\n");
+		nvkm_debug(subdev, "WMMX MXMI returned non-integer\n");
 	}
 
 	kfree(obj);
@@ -158,6 +161,7 @@
 static bool
 mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
 	struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
 	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -165,7 +169,7 @@
 	acpi_status status;
 
 	if (!wmi_has_guid(WMI_WMMX_GUID)) {
-		nv_debug(mxm, "WMMX GUID not found\n");
+		nvkm_debug(subdev, "WMMX GUID not found\n");
 		return false;
 	}
 
@@ -177,7 +181,7 @@
 
 	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
 	if (ACPI_FAILURE(status)) {
-		nv_debug(mxm, "WMMX MXMS returned %d\n", status);
+		nvkm_debug(subdev, "WMMX MXMS returned %d\n", status);
 		return false;
 	}
 
@@ -211,7 +215,7 @@
 {
 	struct mxm_shadow_h *shadow = _mxm_shadow;
 	do {
-		nv_debug(mxm, "checking %s\n", shadow->name);
+		nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name);
 		if (shadow->exec(mxm, version)) {
 			if (mxms_valid(mxm))
 				return 0;
@@ -222,33 +226,33 @@
 	return -ENOENT;
 }
 
+static const struct nvkm_subdev_func
+nvkm_mxm = {
+};
+
 int
-nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_bios *bios = nvkm_bios(device);
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_mxm *mxm;
 	u8  ver, len;
 	u16 data;
-	int ret;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
-				  length, pobject);
-	mxm = *pobject;
-	if (ret)
-		return ret;
+	if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_mxm, device, index, 0, &mxm->subdev);
 
 	data = mxm_table(bios, &ver, &len);
-	if (!data || !(ver = nv_ro08(bios, data))) {
-		nv_debug(mxm, "no VBIOS data, nothing to do\n");
+	if (!data || !(ver = nvbios_rd08(bios, data))) {
+		nvkm_debug(&mxm->subdev, "no VBIOS data, nothing to do\n");
 		return 0;
 	}
 
-	nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+	nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
 
 	if (mxm_shadow(mxm, ver)) {
-		nv_info(mxm, "failed to locate valid SIS\n");
+		nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
 #if 0
 		/* we should, perhaps, fall back to some kind of limited
 		 * mode here if the x86 vbios hasn't already done the
@@ -261,8 +265,8 @@
 #endif
 	}
 
-	nv_info(mxm, "MXMS Version %d.%d\n",
-		mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
+	nvkm_debug(&mxm->subdev, "MXMS Version %d.%d\n",
+		   mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
 	mxms_foreach(mxm, 0, NULL, NULL);
 
 	if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index a9b1d63..45a2f8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -47,7 +47,7 @@
 		break;
 	}
 
-	nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
+	nvkm_debug(&mxm->subdev, "unknown version %d.%d\n", mxms[4], mxms[5]);
 	return 0x0000;
 }
 
@@ -71,7 +71,7 @@
 	while (size--)
 		sum += *mxms++;
 	if (sum) {
-		nv_debug(mxm, "checksum invalid\n");
+		nvkm_debug(&mxm->subdev, "checksum invalid\n");
 		return false;
 	}
 	return true;
@@ -82,7 +82,7 @@
 {
 	u8 *mxms = mxms_data(mxm);
 	if (*(u32 *)mxms != 0x5f4d584d) {
-		nv_debug(mxm, "signature invalid\n");
+		nvkm_debug(&mxm->subdev, "signature invalid\n");
 		return false;
 	}
 
@@ -96,6 +96,7 @@
 mxms_foreach(struct nvkm_mxm *mxm, u8 types,
 	     bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u8 *mxms = mxms_data(mxm);
 	u8 *desc = mxms + mxms_headerlen(mxm);
 	u8 *fini = desc + mxms_structlen(mxm) - 1;
@@ -140,29 +141,28 @@
 			entries   = desc[1] & 0x07;
 			break;
 		default:
-			nv_debug(mxm, "unknown descriptor type %d\n", type);
+			nvkm_debug(subdev, "unknown descriptor type %d\n", type);
 			return false;
 		}
 
-		if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
-			static const char * mxms_desc_name[] = {
+		if (mxm->subdev.debug >= NV_DBG_DEBUG && (exec == NULL)) {
+			static const char * mxms_desc[] = {
 				"ODS", "SCCS", "TS", "IPS",
 				"GSD", "VSS", "BCS", "FCS",
 			};
 			u8 *dump = desc;
+			char data[32], *ptr;
 			int i, j;
 
-			nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
-			for (j = headerlen - 1; j >= 0; j--)
-				pr_cont("%02x", dump[j]);
-			pr_cont("\n");
+			for (j = headerlen - 1, ptr = data; j >= 0; j--)
+				ptr += sprintf(ptr, "%02x", dump[j]);
 			dump += headerlen;
 
+			nvkm_debug(subdev, "%4s: %s\n", mxms_desc[type], data);
 			for (i = 0; i < entries; i++, dump += recordlen) {
-				nv_debug(mxm, "      ");
-				for (j = recordlen - 1; j >= 0; j--)
-					pr_cont("%02x", dump[j]);
-				pr_cont("\n");
+				for (j = recordlen - 1, ptr = data; j >= 0; j--)
+					ptr += sprintf(ptr, "%02x", dump[j]);
+				nvkm_debug(subdev, "      %s\n", data);
 			}
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 4ef8040..333e0c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,6 +1,6 @@
 #ifndef __NVMXM_MXMS_H__
 #define __NVMXM_MXMS_H__
-#include <subdev/mxm.h>
+#include "priv.h"
 
 struct mxms_odev {
 	u8 outp_type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index f20e4ca..db14fad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -28,10 +28,6 @@
 #include <subdev/bios/dcb.h>
 #include <subdev/bios/mxm.h>
 
-struct nv50_mxm_priv {
-	struct nvkm_mxm base;
-};
-
 struct context {
 	u32 *outp;
 	struct mxms_odev desc;
@@ -53,7 +49,7 @@
 static bool
 mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
+	struct nvkm_bios *bios = mxm->subdev.device->bios;
 	struct context *ctx = info;
 	u64 desc = *(u64 *)data;
 
@@ -107,8 +103,8 @@
 	 * if one isn't found, disable it.
 	 */
 	if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
-		nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
-			idx, ctx.outp[0], ctx.outp[1]);
+		nvkm_debug(&mxm->subdev, "disable %d: %08x %08x\n",
+			   idx, ctx.outp[0], ctx.outp[1]);
 		ctx.outp[0] |= 0x0000000f;
 		return 0;
 	}
@@ -180,20 +176,22 @@
 static bool
 mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u64 desc = *(u64 *)data;
 	if ((desc & 0xf0) != 0xf0)
-		nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+		nvkm_info(subdev, "unmatched output device %016llx\n", desc);
 	return true;
 }
 
 static void
 mxm_dcb_sanitise(struct nvkm_mxm *mxm)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
+	struct nvkm_subdev *subdev = &mxm->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	u8  ver, hdr, cnt, len;
 	u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
 	if (dcb == 0x0000 || ver != 0x40) {
-		nv_debug(mxm, "unsupported DCB version\n");
+		nvkm_debug(subdev, "unsupported DCB version\n");
 		return;
 	}
 
@@ -201,31 +199,20 @@
 	mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
 }
 
-static int
-nv50_mxm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
 {
-	struct nv50_mxm_priv *priv;
+	struct nvkm_mxm *mxm;
 	int ret;
 
-	ret = nvkm_mxm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_mxm_new_(device, index, &mxm);
+	if (mxm)
+		*pmxm = &mxm->subdev;
 	if (ret)
 		return ret;
 
-	if (priv->base.action & MXM_SANITISE_DCB)
-		mxm_dcb_sanitise(&priv->base);
+	if (mxm->action & MXM_SANITISE_DCB)
+		mxm_dcb_sanitise(mxm);
+
 	return 0;
 }
-
-struct nvkm_oclass
-nv50_mxm_oclass = {
-	.handle = NV_SUBDEV(MXM, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mxm_ctor,
-		.dtor = _nvkm_mxm_dtor,
-		.init = _nvkm_mxm_init,
-		.fini = _nvkm_mxm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
new file mode 100644
index 0000000..7d97015
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -0,0 +1,15 @@
+#ifndef __NVKM_MXM_PRIV_H__
+#define __NVKM_MXM_PRIV_H__
+#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
+#include <subdev/mxm.h>
+
+#define MXM_SANITISE_DCB 0x00000001
+
+struct nvkm_mxm {
+	struct nvkm_subdev subdev;
+	u32 action;
+	u8 *mxms;
+};
+
+int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
new file mode 100644
index 0000000..99672c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -0,0 +1,7 @@
+nvkm-y += nvkm/subdev/pci/agp.o
+nvkm-y += nvkm/subdev/pci/base.o
+nvkm-y += nvkm/subdev/pci/nv04.o
+nvkm-y += nvkm/subdev/pci/nv40.o
+nvkm-y += nvkm/subdev/pci/nv4c.o
+nvkm-y += nvkm/subdev/pci/nv50.o
+nvkm-y += nvkm/subdev/pci/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
new file mode 100644
index 0000000..814cb51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2015 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "agp.h"
+#ifdef __NVKM_PCI_AGP_H__
+#include <core/option.h>
+
+struct nvkm_device_agp_quirk {
+	u16 hostbridge_vendor;
+	u16 hostbridge_device;
+	u16 chip_vendor;
+	u16 chip_device;
+	int mode;
+};
+
+static const struct nvkm_device_agp_quirk
+nvkm_device_agp_quirks[] = {
+	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+	{},
+};
+
+void
+nvkm_agp_fini(struct nvkm_pci *pci)
+{
+	if (pci->agp.acquired) {
+		agp_backend_release(pci->agp.bridge);
+		pci->agp.acquired = false;
+	}
+}
+
+/* Ensure AGP controller is in a consistent state in case we need to
+ * execute the VBIOS DEVINIT scripts.
+ */
+void
+nvkm_agp_preinit(struct nvkm_pci *pci)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	u32 mode = nvkm_pci_rd32(pci, 0x004c);
+	u32 save[2];
+
+	/* First of all, disable fast writes, otherwise if it's already
+	 * enabled in the AGP bridge and we disable the card's AGP
+	 * controller we might be locking ourselves out of it.
+	 */
+	if ((mode | pci->agp.mode) & PCI_AGP_COMMAND_FW) {
+		mode = pci->agp.mode & ~PCI_AGP_COMMAND_FW;
+		agp_enable(pci->agp.bridge, mode);
+	}
+
+	/* clear busmaster bit, and disable AGP */
+	save[0] = nvkm_pci_rd32(pci, 0x0004);
+	nvkm_pci_wr32(pci, 0x0004, save[0] & ~0x00000004);
+	nvkm_pci_wr32(pci, 0x004c, 0x00000000);
+
+	/* reset PGRAPH, PFIFO and PTIMER */
+	save[1] = nvkm_mask(device, 0x000200, 0x00011100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00011100, save[1]);
+
+	/* and restore busmaster bit (gives effect of resetting AGP) */
+	nvkm_pci_wr32(pci, 0x0004, save[0]);
+}
+
+int
+nvkm_agp_init(struct nvkm_pci *pci)
+{
+	if (!agp_backend_acquire(pci->pdev)) {
+		nvkm_error(&pci->subdev, "failed to acquire agp\n");
+		return -ENODEV;
+	}
+
+	agp_enable(pci->agp.bridge, pci->agp.mode);
+	pci->agp.acquired = true;
+	return 0;
+}
+
+void
+nvkm_agp_dtor(struct nvkm_pci *pci)
+{
+	arch_phys_wc_del(pci->agp.mtrr);
+}
+
+void
+nvkm_agp_ctor(struct nvkm_pci *pci)
+{
+	const struct nvkm_device_agp_quirk *quirk = nvkm_device_agp_quirks;
+	struct nvkm_subdev *subdev = &pci->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct agp_kern_info info;
+	int mode = -1;
+
+#ifdef __powerpc__
+	/* Disable AGP by default on all PowerPC machines for now -- At
+	 * least some UniNorth-2 AGP bridges are known to be broken:
+	 * DMA from the host to the card works just fine, but writeback
+	 * from the card to the host goes straight to memory
+	 * untranslated bypassing that GATT somehow, making them quite
+	 * painful to deal with...
+	 */
+	mode = 0;
+#endif
+	mode = nvkm_longopt(device->cfgopt, "NvAGP", mode);
+
+	/* acquire bridge temporarily, so that we can copy its info */
+	if (!(pci->agp.bridge = agp_backend_acquire(pci->pdev))) {
+		nvkm_warn(subdev, "failed to acquire agp\n");
+		return;
+	}
+	agp_copy_info(pci->agp.bridge, &info);
+	agp_backend_release(pci->agp.bridge);
+
+	pci->agp.mode = info.mode;
+	pci->agp.base = info.aper_base;
+	pci->agp.size = info.aper_size * 1024 * 1024;
+	pci->agp.cma  = info.cant_use_aperture;
+	pci->agp.mtrr = -1;
+
+	/* determine if bridge + chipset combination needs a workaround */
+	while (quirk->hostbridge_vendor) {
+		if (info.device->vendor == quirk->hostbridge_vendor &&
+		    info.device->device == quirk->hostbridge_device &&
+		    pci->pdev->vendor == quirk->chip_vendor &&
+		    pci->pdev->device == quirk->chip_device) {
+			nvkm_info(subdev, "forcing default agp mode to %dX, "
+					  "use NvAGP=<mode> to override\n",
+				  quirk->mode);
+			mode = quirk->mode;
+			break;
+		}
+		quirk++;
+	}
+
+	/* apply quirk / user-specified mode */
+	if (mode >= 1) {
+		if (pci->agp.mode & 0x00000008)
+			mode /= 4; /* AGPv3 */
+		pci->agp.mode &= ~0x00000007;
+		pci->agp.mode |= (mode & 0x7);
+	} else
+	if (mode == 0) {
+		pci->agp.bridge = NULL;
+		return;
+	}
+
+	/* fast writes appear to be broken on nv18, they make the card
+	 * lock up randomly.
+	 */
+	if (device->chipset == 0x18)
+		pci->agp.mode &= ~PCI_AGP_COMMAND_FW;
+
+	pci->agp.mtrr = arch_phys_wc_add(pci->agp.base, pci->agp.size);
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
new file mode 100644
index 0000000..df2dd08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -0,0 +1,18 @@
+#include "priv.h"
+#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
+#ifndef __NVKM_PCI_AGP_H__
+#define __NVKM_PCI_AGP_H__
+
+void nvkm_agp_ctor(struct nvkm_pci *);
+void nvkm_agp_dtor(struct nvkm_pci *);
+void nvkm_agp_preinit(struct nvkm_pci *);
+int nvkm_agp_init(struct nvkm_pci *);
+void nvkm_agp_fini(struct nvkm_pci *);
+#endif
+#else
+static inline void nvkm_agp_ctor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_dtor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_preinit(struct nvkm_pci *pci) {}
+static inline int nvkm_agp_init(struct nvkm_pci *pci) { return -ENOSYS; }
+static inline void nvkm_agp_fini(struct nvkm_pci *pci) {}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
new file mode 100644
index 0000000..d1c148e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "agp.h"
+
+#include <core/option.h>
+#include <core/pci.h>
+#include <subdev/mc.h>
+
+u32
+nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	return pci->func->rd32(pci, addr);
+}
+
+void
+nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	pci->func->wr08(pci, addr, data);
+}
+
+void
+nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	pci->func->wr32(pci, addr, data);
+}
+
+void
+nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
+{
+	u32 data = nvkm_pci_rd32(pci, 0x0050);
+	if (shadow)
+		data |=  0x00000001;
+	else
+		data &= ~0x00000001;
+	nvkm_pci_wr32(pci, 0x0050, data);
+}
+
+static irqreturn_t
+nvkm_pci_intr(int irq, void *arg)
+{
+	struct nvkm_pci *pci = arg;
+	struct nvkm_mc *mc = pci->subdev.device->mc;
+	bool handled = false;
+	if (likely(mc)) {
+		nvkm_mc_intr_unarm(mc);
+		if (pci->msi)
+			pci->func->msi_rearm(pci);
+		nvkm_mc_intr(mc, &handled);
+		nvkm_mc_intr_rearm(mc);
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int
+nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+
+	if (pci->irq >= 0) {
+		free_irq(pci->irq, pci);
+		pci->irq = -1;
+	};
+
+	if (pci->agp.bridge)
+		nvkm_agp_fini(pci);
+
+	return 0;
+}
+
+static int
+nvkm_pci_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	if (pci->agp.bridge)
+		nvkm_agp_preinit(pci);
+	return 0;
+}
+
+static int
+nvkm_pci_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	struct pci_dev *pdev = pci->pdev;
+	int ret;
+
+	if (pci->agp.bridge) {
+		ret = nvkm_agp_init(pci);
+		if (ret)
+			return ret;
+	}
+
+	ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+	if (ret)
+		return ret;
+
+	pci->irq = pdev->irq;
+	return ret;
+}
+
+static void *
+nvkm_pci_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	nvkm_agp_dtor(pci);
+	if (pci->msi)
+		pci_disable_msi(pci->pdev);
+	return nvkm_pci(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_pci_func = {
+	.dtor = nvkm_pci_dtor,
+	.preinit = nvkm_pci_preinit,
+	.init = nvkm_pci_init,
+	.fini = nvkm_pci_fini,
+};
+
+int
+nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pci **ppci)
+{
+	struct nvkm_pci *pci;
+
+	if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
+	pci->func = func;
+	pci->pdev = device->func->pci(device)->pdev;
+	pci->irq = -1;
+
+	if (device->type == NVKM_DEVICE_AGP)
+		nvkm_agp_ctor(pci);
+
+	switch (pci->pdev->device & 0x0ff0) {
+	case 0x00f0:
+	case 0x02e0:
+		/* BR02? NFI how these would be handled yet exactly */
+		break;
+	default:
+		switch (device->chipset) {
+		case 0xaa:
+			/* reported broken, nv also disable it */
+			break;
+		default:
+			pci->msi = true;
+			break;
+		}
+	}
+
+	pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
+	if (pci->msi && func->msi_rearm) {
+		pci->msi = pci_enable_msi(pci->pdev) == 0;
+		if (pci->msi)
+			nvkm_debug(&pci->subdev, "MSI enabled\n");
+	} else {
+		pci->msi = false;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
new file mode 100644
index 0000000..86f8226
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gf100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	nvkm_pci_wr08(pci, 0x0704, 0xff);
+}
+
+static const struct nvkm_pci_func
+gf100_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = gf100_pci_msi_rearm,
+};
+
+int
+gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
new file mode 100644
index 0000000..5b1ed42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static u32
+nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	return nvkm_rd32(device, 0x001800 + addr);
+}
+
+static void
+nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr08(device, 0x001800 + addr, data);
+}
+
+static void
+nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr32(device, 0x001800 + addr, data);
+}
+
+static const struct nvkm_pci_func
+nv04_pci_func = {
+	.rd32 = nv04_pci_rd32,
+	.wr08 = nv04_pci_wr08,
+	.wr32 = nv04_pci_wr32,
+};
+
+int
+nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
new file mode 100644
index 0000000..090a187
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+u32
+nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	return nvkm_rd32(device, 0x088000 + addr);
+}
+
+void
+nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr08(device, 0x088000 + addr, data);
+}
+
+void
+nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr32(device, 0x088000 + addr, data);
+}
+
+static void
+nv40_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	nvkm_pci_wr08(pci, 0x0068, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv40_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = nv40_pci_msi_rearm,
+};
+
+int
+nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
new file mode 100644
index 0000000..1f1b26b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_pci_func
+nv4c_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+};
+
+int
+nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
new file mode 100644
index 0000000..3e167d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <core/pci.h>
+
+/* MSI re-arm through the PRI appears to be broken on the original G80,
+ * so we access it via alternate PCI config space mechanisms.
+ */
+static void
+nv50_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	struct pci_dev *pdev = device->func->pci(device)->pdev;
+	pci_write_config_byte(pdev, 0x68, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv50_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = nv50_pci_msi_rearm,
+};
+
+int
+nv50_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv50_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
new file mode 100644
index 0000000..d22c2c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -0,0 +1,19 @@
+#ifndef __NVKM_PCI_PRIV_H__
+#define __NVKM_PCI_PRIV_H__
+#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
+#include <subdev/pci.h>
+
+int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
+		  int index, struct nvkm_pci **);
+
+struct nvkm_pci_func {
+	u32 (*rd32)(struct nvkm_pci *, u16 addr);
+	void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
+	void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
+	void (*msi_rearm)(struct nvkm_pci *);
+};
+
+u32 nv40_pci_rd32(struct nvkm_pci *, u16);
+void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
+void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 7081d6a..88b643b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -2,8 +2,9 @@
 nvkm-y += nvkm/subdev/pmu/memx.o
 nvkm-y += nvkm/subdev/pmu/gt215.o
 nvkm-y += nvkm/subdev/pmu/gf100.o
-nvkm-y += nvkm/subdev/pmu/gf110.o
+nvkm-y += nvkm/subdev/pmu/gf119.o
 nvkm-y += nvkm/subdev/pmu/gk104.o
 nvkm-y += nvkm/subdev/pmu/gk110.o
 nvkm-y += nvkm/subdev/pmu/gk208.o
 nvkm-y += nvkm/subdev/pmu/gk20a.o
+nvkm-y += nvkm/subdev/pmu/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 054b2d2..27a79c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -28,21 +28,25 @@
 void
 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
-	const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu);
-	if (impl->pgob)
-		impl->pgob(pmu, enable);
+	if (pmu->func->pgob)
+		pmu->func->pgob(pmu, enable);
 }
 
-static int
+int
 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
 	      u32 process, u32 message, u32 data0, u32 data1)
 {
-	struct nvkm_subdev *subdev = nv_subdev(pmu);
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 addr;
 
 	/* wait for a free slot in the fifo */
-	addr  = nv_rd32(pmu, 0x10a4a0);
-	if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8))
+	addr  = nvkm_rd32(device, 0x10a4a0);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x10a4b0);
+		if (tmp != (addr ^ 8))
+			break;
+	) < 0)
 		return -EBUSY;
 
 	/* we currently only support a single process at a time waiting
@@ -57,20 +61,20 @@
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000001);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000001);
+		nvkm_wr32(device, 0x10a580, 0x00000001);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
 
 	/* write the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
 				pmu->send.base));
-	nv_wr32(pmu, 0x10a1c4, process);
-	nv_wr32(pmu, 0x10a1c4, message);
-	nv_wr32(pmu, 0x10a1c4, data0);
-	nv_wr32(pmu, 0x10a1c4, data1);
-	nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f);
+	nvkm_wr32(device, 0x10a1c4, process);
+	nvkm_wr32(device, 0x10a1c4, message);
+	nvkm_wr32(device, 0x10a1c4, data0);
+	nvkm_wr32(device, 0x10a1c4, data1);
+	nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
 
 	/* release data segment access */
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* wait for reply, if requested */
 	if (reply) {
@@ -87,29 +91,31 @@
 nvkm_pmu_recv(struct work_struct *work)
 {
 	struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 process, message, data0, data1;
 
 	/* nothing to do if GET == PUT */
-	u32 addr =  nv_rd32(pmu, 0x10a4cc);
-	if (addr == nv_rd32(pmu, 0x10a4c8))
+	u32 addr =  nvkm_rd32(device, 0x10a4cc);
+	if (addr == nvkm_rd32(device, 0x10a4c8))
 		return;
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000002);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000002);
+		nvkm_wr32(device, 0x10a580, 0x00000002);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
 
 	/* read the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+	nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
 				pmu->recv.base));
-	process = nv_rd32(pmu, 0x10a1c4);
-	message = nv_rd32(pmu, 0x10a1c4);
-	data0   = nv_rd32(pmu, 0x10a1c4);
-	data1   = nv_rd32(pmu, 0x10a1c4);
-	nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f);
+	process = nvkm_rd32(device, 0x10a1c4);
+	message = nvkm_rd32(device, 0x10a1c4);
+	data0   = nvkm_rd32(device, 0x10a1c4);
+	data1   = nvkm_rd32(device, 0x10a1c4);
+	nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
 
 	/* release data segment access */
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* wake process if it's waiting on a synchronous reply */
 	if (pmu->recv.process) {
@@ -126,143 +132,149 @@
 	/* right now there's no other expected responses from the engine,
 	 * so assume that any unexpected message is an error.
 	 */
-	nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
-		(char)((process & 0x000000ff) >>  0),
-		(char)((process & 0x0000ff00) >>  8),
-		(char)((process & 0x00ff0000) >> 16),
-		(char)((process & 0xff000000) >> 24),
-		process, message, data0, data1);
+	nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+		  (char)((process & 0x000000ff) >>  0),
+		  (char)((process & 0x0000ff00) >>  8),
+		  (char)((process & 0x00ff0000) >> 16),
+		  (char)((process & 0xff000000) >> 24),
+		  process, message, data0, data1);
 }
 
 static void
 nvkm_pmu_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_pmu *pmu = (void *)subdev;
-	u32 disp = nv_rd32(pmu, 0x10a01c);
-	u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16);
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
+	u32 disp = nvkm_rd32(device, 0x10a01c);
+	u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
 
 	if (intr & 0x00000020) {
-		u32 stat = nv_rd32(pmu, 0x10a16c);
+		u32 stat = nvkm_rd32(device, 0x10a16c);
 		if (stat & 0x80000000) {
-			nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n",
-				 stat & 0x00ffffff, nv_rd32(pmu, 0x10a168));
-			nv_wr32(pmu, 0x10a16c, 0x00000000);
+			nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+				   stat & 0x00ffffff,
+				   nvkm_rd32(device, 0x10a168));
+			nvkm_wr32(device, 0x10a16c, 0x00000000);
 			intr &= ~0x00000020;
 		}
 	}
 
 	if (intr & 0x00000040) {
 		schedule_work(&pmu->recv.work);
-		nv_wr32(pmu, 0x10a004, 0x00000040);
+		nvkm_wr32(device, 0x10a004, 0x00000040);
 		intr &= ~0x00000040;
 	}
 
 	if (intr & 0x00000080) {
-		nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0),
-						     nv_rd32(pmu, 0x10a7a4));
-		nv_wr32(pmu, 0x10a004, 0x00000080);
+		nvkm_info(subdev, "wr32 %06x %08x\n",
+			  nvkm_rd32(device, 0x10a7a0),
+			  nvkm_rd32(device, 0x10a7a4));
+		nvkm_wr32(device, 0x10a004, 0x00000080);
 		intr &= ~0x00000080;
 	}
 
 	if (intr) {
-		nv_error(pmu, "intr 0x%08x\n", intr);
-		nv_wr32(pmu, 0x10a004, intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
+		nvkm_wr32(device, 0x10a004, intr);
 	}
 }
 
-int
-_nvkm_pmu_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_pmu *pmu = (void *)object;
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
 
-	nv_wr32(pmu, 0x10a014, 0x00000060);
+	nvkm_wr32(device, 0x10a014, 0x00000060);
 	flush_work(&pmu->recv.work);
-
-	return nvkm_subdev_fini(&pmu->base, suspend);
+	return 0;
 }
 
-int
-_nvkm_pmu_init(struct nvkm_object *object)
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
 {
-	const struct nvkm_pmu_impl *impl = (void *)object->oclass;
-	struct nvkm_pmu *pmu = (void *)object;
-	int ret, i;
-
-	ret = nvkm_subdev_init(&pmu->base);
-	if (ret)
-		return ret;
-
-	nv_subdev(pmu)->intr = nvkm_pmu_intr;
-	pmu->message = nvkm_pmu_send;
-	pmu->pgob = nvkm_pmu_pgob;
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
+	int i;
 
 	/* prevent previous ucode from running, wait for idle, reset */
-	nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
-	nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00002000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00002000, 0x00002000);
-	nv_rd32(pmu, 0x000200);
-	nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000);
+	nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+	nvkm_msec(device, 2000,
+		if (!nvkm_rd32(device, 0x10a04c))
+			break;
+	);
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
+			break;
+	);
 
 	/* upload data segment */
-	nv_wr32(pmu, 0x10a1c0, 0x01000000);
-	for (i = 0; i < impl->data.size / 4; i++)
-		nv_wr32(pmu, 0x10a1c4, impl->data.data[i]);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000);
+	for (i = 0; i < pmu->func->data.size / 4; i++)
+		nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
 
 	/* upload code segment */
-	nv_wr32(pmu, 0x10a180, 0x01000000);
-	for (i = 0; i < impl->code.size / 4; i++) {
+	nvkm_wr32(device, 0x10a180, 0x01000000);
+	for (i = 0; i < pmu->func->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(pmu, 0x10a188, i >> 6);
-		nv_wr32(pmu, 0x10a184, impl->code.data[i]);
+			nvkm_wr32(device, 0x10a188, i >> 6);
+		nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
 	}
 
 	/* start it running */
-	nv_wr32(pmu, 0x10a10c, 0x00000000);
-	nv_wr32(pmu, 0x10a104, 0x00000000);
-	nv_wr32(pmu, 0x10a100, 0x00000002);
+	nvkm_wr32(device, 0x10a10c, 0x00000000);
+	nvkm_wr32(device, 0x10a104, 0x00000000);
+	nvkm_wr32(device, 0x10a100, 0x00000002);
 
 	/* wait for valid host->pmu ring configuration */
-	if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4d0))
+			break;
+	) < 0)
 		return -EBUSY;
-	pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff;
-	pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16;
+	pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+	pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
 
 	/* wait for valid pmu->host ring configuration */
-	if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4dc))
+			break;
+	) < 0)
 		return -EBUSY;
-	pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff;
-	pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16;
+	pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+	pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
 
-	nv_wr32(pmu, 0x10a010, 0x000000e0);
+	nvkm_wr32(device, 0x10a010, 0x000000e0);
 	return 0;
 }
 
+static void *
+nvkm_pmu_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_pmu(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_pmu = {
+	.dtor = nvkm_pmu_dtor,
+	.init = nvkm_pmu_init,
+	.fini = nvkm_pmu_fini,
+	.intr = nvkm_pmu_intr,
+};
+
 int
-nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pmu **ppmu)
 {
 	struct nvkm_pmu *pmu;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU",
-				  "pmu", length, pobject);
-	pmu = *pobject;
-	if (ret)
-		return ret;
-
+	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
+	pmu->func = func;
 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
 	init_waitqueue_head(&pmu->recv.wait);
 	return 0;
 }
-
-int
-_nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_pmu *pmu;
-	int ret = nvkm_pmu_create(parent, engine, oclass, &pmu);
-	*pobject = nv_object(pmu);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
deleted file mode 100644
index ae9c3f1..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#define NVKM_PPWR_CHIPSET GF119
-#define HW_TICKS_PER_US 324
-
-//#define NVKM_FALCON_PC24
-#define NVKM_FALCON_UNSHIFTED_IO
-//#define NVKM_FALCON_MMIO_UAS
-//#define NVKM_FALCON_MMIO_TRAP
-
-#include "macros.fuc"
-
-.section #gf110_pmu_data
-#define INCLUDE_PROC
-#include "kernel.fuc"
-#include "arith.fuc"
-#include "host.fuc"
-#include "memx.fuc"
-#include "perf.fuc"
-#include "i2c_.fuc"
-#include "test.fuc"
-#include "idle.fuc"
-#undef INCLUDE_PROC
-
-#define INCLUDE_DATA
-#include "kernel.fuc"
-#include "arith.fuc"
-#include "host.fuc"
-#include "memx.fuc"
-#include "perf.fuc"
-#include "i2c_.fuc"
-#include "test.fuc"
-#include "idle.fuc"
-#undef INCLUDE_DATA
-.align 256
-
-.section #gf110_pmu_code
-#define INCLUDE_CODE
-#include "kernel.fuc"
-#include "arith.fuc"
-#include "host.fuc"
-#include "memx.fuc"
-#include "perf.fuc"
-#include "i2c_.fuc"
-#include "test.fuc"
-#include "idle.fuc"
-#undef INCLUDE_CODE
-.align 256
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
deleted file mode 100644
index a0c499e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
+++ /dev/null
@@ -1,1795 +0,0 @@
-uint32_t gf110_pmu_data[] = {
-/* 0x0000: proc_kern */
-	0x52544e49,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0058: proc_list_head */
-	0x54534f48,
-	0x0000049d,
-	0x00000446,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x584d454d,
-	0x0000068b,
-	0x0000067d,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x46524550,
-	0x0000068f,
-	0x0000068d,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x5f433249,
-	0x00000aaa,
-	0x0000094d,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x54534554,
-	0x00000acd,
-	0x00000aac,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x454c4449,
-	0x00000ad9,
-	0x00000ad7,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0268: proc_list_tail */
-/* 0x0268: time_prev */
-	0x00000000,
-/* 0x026c: time_next */
-	0x00000000,
-/* 0x0270: fifo_queue */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x02f0: rfifo_queue */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0370: memx_func_head */
-	0x00000001,
-	0x00000000,
-	0x000004d3,
-/* 0x037c: memx_func_next */
-	0x00000002,
-	0x00000000,
-	0x00000554,
-	0x00000003,
-	0x00000002,
-	0x000005d8,
-	0x00040004,
-	0x00000000,
-	0x000005f4,
-	0x00010005,
-	0x00000000,
-	0x0000060e,
-	0x00010006,
-	0x00000000,
-	0x000005d3,
-	0x00000007,
-	0x00000000,
-	0x00000619,
-/* 0x03c4: memx_func_tail */
-/* 0x03c4: memx_ts_start */
-	0x00000000,
-/* 0x03c8: memx_ts_end */
-	0x00000000,
-/* 0x03cc: memx_data_head */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0bcc: memx_data_tail */
-/* 0x0bcc: memx_train_head */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0ccc: memx_train_tail */
-/* 0x0ccc: i2c_scl_map */
-	0x00000400,
-	0x00000800,
-	0x00001000,
-	0x00002000,
-	0x00004000,
-	0x00008000,
-	0x00010000,
-	0x00020000,
-	0x00040000,
-	0x00080000,
-/* 0x0cf4: i2c_sda_map */
-	0x00100000,
-	0x00200000,
-	0x00400000,
-	0x00800000,
-	0x01000000,
-	0x02000000,
-	0x04000000,
-	0x08000000,
-	0x10000000,
-	0x20000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-};
-
-uint32_t gf110_pmu_code[] = {
-	0x034d0ef5,
-/* 0x0004: rd32 */
-	0x07a007f1,
-	0xbd000ed0,
-	0x01d7f004,
-	0xf101d3f0,
-	0xd007ac07,
-	0x04bd000d,
-/* 0x001c: rd32_wait */
-	0x07acd7f1,
-	0xf100ddcf,
-	0xf47000d4,
-	0xd7f1f51b,
-	0xddcf07a4,
-/* 0x0033: wr32 */
-	0xf100f800,
-	0xd007a007,
-	0x04bd000e,
-	0x07a407f1,
-	0xbd000dd0,
-	0x02d7f004,
-	0xf0f0d5f0,
-	0x07f101d3,
-	0x0dd007ac,
-/* 0x0057: wr32_wait */
-	0xf104bd00,
-	0xcf07acd7,
-	0xd4f100dd,
-	0x1bf47000,
-/* 0x0067: nsec */
-	0xf900f8f5,
-	0xf080f990,
-	0x88cf2c87,
-/* 0x0071: nsec_loop */
-	0x2c97f000,
-	0xbb0099cf,
-	0x9eb80298,
-	0xf41ef406,
-	0x90fc80fc,
-/* 0x0086: wait */
-	0x90f900f8,
-	0x87f080f9,
-	0x0088cf2c,
-/* 0x0090: wait_loop */
-	0xf402eeb9,
-	0xdab90421,
-	0x04adfd02,
-	0xf406acb8,
-	0x97f0120b,
-	0x0099cf2c,
-	0xb80298bb,
-	0x1ef4069b,
-/* 0x00b1: wait_done */
-	0xfc80fce2,
-/* 0x00b7: intr_watchdog */
-	0x9800f890,
-	0x96b003e9,
-	0x2a0bf400,
-	0xbb9a0a98,
-	0x1cf4029a,
-	0x01d7f00f,
-	0x028c21f5,
-	0x0ef494bd,
-/* 0x00d5: intr_watchdog_next_time */
-	0x9b0a9815,
-	0xf400a6b0,
-	0x9ab8090b,
-	0x061cf406,
-/* 0x00e4: intr_watchdog_next_time_set */
-/* 0x00e7: intr_watchdog_next_proc */
-	0x809b0980,
-	0xe0b603e9,
-	0x68e6b158,
-	0xc61bf402,
-/* 0x00f6: intr */
-	0x00f900f8,
-	0x80f904bd,
-	0xa0f990f9,
-	0xc0f9b0f9,
-	0xe0f9d0f9,
-	0xf7f0f0f9,
-	0x0188fe00,
-	0x87f180f9,
-	0x88cf05d0,
-	0x0180b600,
-	0x05d007f1,
-	0xbd0008d0,
-	0x0887f004,
-	0xc40088cf,
-	0x0bf40289,
-	0x9b008020,
-	0xf458e7f0,
-	0x0998b721,
-	0x0096b09b,
-	0xf00e0bf4,
-	0x09d03407,
-	0x8004bd00,
-/* 0x014e: intr_skip_watchdog */
-	0x89e49a09,
-	0x0bf40800,
-	0x8897f13c,
-	0x0099cf06,
-	0xf4029ac4,
-	0xc7f1260b,
-	0xcccf04c0,
-	0xf1c0f900,
-	0xf14f48e7,
-	0xf05453e3,
-	0x21f500d7,
-	0xc0fc02f1,
-	0x04c007f1,
-	0xbd000cd0,
-/* 0x0185: intr_subintr_skip_fifo */
-	0x8807f104,
-	0x0009d006,
-/* 0x018e: intr_skip_subintr */
-	0x89c404bd,
-	0x070bf420,
-	0xffbfa4f1,
-/* 0x0198: intr_skip_pause */
-	0xf44089c4,
-	0xa4f1070b,
-/* 0x01a2: intr_skip_user0 */
-	0x07f0ffbf,
-	0x0008d004,
-	0x80fc04bd,
-	0xfc0088fe,
-	0xfce0fcf0,
-	0xfcc0fcd0,
-	0xfca0fcb0,
-	0xfc80fc90,
-	0x0032f400,
-/* 0x01c6: ticks_from_ns */
-	0xc0f901f8,
-	0xd7f1b0f9,
-	0xd3f00144,
-	0xb321f500,
-	0xe8ccec03,
-	0x00b4b003,
-	0xec120bf4,
-	0xf103e8ee,
-	0xf00144d7,
-	0x21f500d3,
-/* 0x01ee: ticks_from_ns_quit */
-	0xceb903b3,
-	0xfcb0fc02,
-/* 0x01f7: ticks_from_us */
-	0xf900f8c0,
-	0xf1b0f9c0,
-	0xf00144d7,
-	0x21f500d3,
-	0xceb903b3,
-	0x00b4b002,
-	0xbd050bf4,
-/* 0x0211: ticks_from_us_quit */
-	0xfcb0fce4,
-/* 0x0217: ticks_to_us */
-	0xf100f8c0,
-	0xf00144d7,
-	0xedff00d3,
-/* 0x0223: timer */
-	0xf900f8ec,
-	0xf480f990,
-	0xf8981032,
-	0x0086b003,
-	0xbd531cf4,
-	0x3807f084,
-	0xbd0008d0,
-	0x3487f004,
-	0x980088cf,
-	0x98bb9a09,
-	0x00e9bb02,
-	0xf003fe80,
-	0x88cf0887,
-	0x0284f000,
-	0xf0201bf4,
-	0x88cf3487,
-	0x06e0b800,
-	0xb8090bf4,
-	0x1cf406e8,
-/* 0x026d: timer_reset */
-	0x3407f00e,
-	0xbd000ed0,
-	0x9a0e8004,
-/* 0x0278: timer_enable */
-	0xf00187f0,
-	0x08d03807,
-/* 0x0283: timer_done */
-	0xf404bd00,
-	0x80fc1031,
-	0x00f890fc,
-/* 0x028c: send_proc */
-	0x90f980f9,
-	0x9805e898,
-	0x86f004e9,
-	0x0689b804,
-	0xc42a0bf4,
-	0x88940398,
-	0x1880b604,
-	0x98008ebb,
-	0x8a8000fa,
-	0x018d8000,
-	0x80028c80,
-	0x90b6038b,
-	0x0794f001,
-	0xf404e980,
-/* 0x02c6: send_done */
-	0x90fc0231,
-	0x00f880fc,
-/* 0x02cc: find */
-	0x87f080f9,
-	0x0131f458,
-/* 0x02d4: find_loop */
-	0xb8008a98,
-	0x0bf406ae,
-	0x5880b610,
-	0x026886b1,
-	0xf4f01bf4,
-/* 0x02ea: find_done */
-	0x8eb90132,
-	0xf880fc02,
-/* 0x02f1: send */
-	0xcc21f500,
-	0x9701f402,
-/* 0x02fa: recv */
-	0x90f900f8,
-	0xe89880f9,
-	0x04e99805,
-	0xb80132f4,
-	0x0bf40689,
-	0x0389c43d,
-	0xf00180b6,
-	0xe8800784,
-	0x02ea9805,
-	0x8ffef0f9,
-	0xb9f0f901,
-	0x999402ef,
-	0x00e9bb04,
-	0x9818e0b6,
-	0xec9803eb,
-	0x01ed9802,
-	0xf900ee98,
-	0xfef0fca5,
-	0x31f400f8,
-/* 0x0347: recv_done */
-	0xfcf0fc01,
-	0xf890fc80,
-/* 0x034d: init */
-	0x0817f100,
-	0x0011cf01,
-	0x010911e7,
-	0xfe0814b6,
-	0x17f10014,
-	0x13f000e0,
-	0x1c07f000,
-	0xbd0001d0,
-	0xff17f004,
-	0xd01407f0,
-	0x04bd0001,
-	0xf10217f0,
-	0xf0080015,
-	0x01d01007,
-	0xf104bd00,
-	0xf000f617,
-	0x10fe0013,
-	0x1031f400,
-	0xf00117f0,
-	0x01d03807,
-	0xf004bd00,
-/* 0x03a2: init_proc */
-	0xf19858f7,
-	0x0016b001,
-	0xf9fa0bf4,
-	0x58f0b615,
-/* 0x03b3: mulu32_32_64 */
-	0xf9f20ef4,
-	0xf920f910,
-	0x9540f930,
-	0xd29510e1,
-	0xbdc4bd10,
-	0xc0edffb4,
-	0xb9301dff,
-	0x34f10234,
-	0x34b6ffff,
-	0x1045b610,
-	0xbb00c3bb,
-	0xe2ff01b4,
-	0x0234b930,
-	0xffff34f1,
-	0xb61034b6,
-	0xc3bb1045,
-	0x01b4bb00,
-	0xbb3012ff,
-	0x40fc00b3,
-	0x20fc30fc,
-	0x00f810fc,
-/* 0x0404: host_send */
-	0x04b017f1,
-	0xf10011cf,
-	0xcf04a027,
-	0x12b80022,
-	0x2f0bf406,
-	0x94071ec4,
-	0xe0b704ee,
-	0xeb980270,
-	0x02ec9803,
-	0x9801ed98,
-	0x21f500ee,
-	0x10b602f1,
-	0x0f1ec401,
-	0x04b007f1,
-	0xbd000ed0,
-	0xc30ef404,
-/* 0x0444: host_send_done */
-/* 0x0446: host_recv */
-	0x17f100f8,
-	0x13f14e49,
-	0xe1b85254,
-	0xb30bf406,
-/* 0x0454: host_recv_wait */
-	0x04cc17f1,
-	0xf10011cf,
-	0xcf04c827,
-	0x16f00022,
-	0x0612b808,
-	0xc4ec0bf4,
-	0x34b60723,
-	0xf030b704,
-	0x033b8002,
-	0x80023c80,
-	0x3e80013d,
-	0x0120b600,
-	0xf10f24f0,
-	0xd004c807,
-	0x04bd0002,
-	0xf04027f0,
-	0x02d00007,
-	0xf804bd00,
-/* 0x049d: host_init */
-	0x8017f100,
-	0x1014b600,
-	0x027015f1,
-	0x04d007f1,
-	0xbd0001d0,
-	0x8017f104,
-	0x1014b600,
-	0x02f015f1,
-	0x04dc07f1,
-	0xbd0001d0,
-	0x0117f004,
-	0x04c407f1,
-	0xbd0001d0,
-/* 0x04d3: memx_func_enter */
-	0xf100f804,
-	0xf1162067,
-	0xf1f55d77,
-	0xb9ffff73,
-	0x21f4026e,
-	0x02d8b904,
-	0xf90487fd,
-	0xfc80f960,
-	0xf4e0fcd0,
-	0x77f13321,
-	0x73f1fffe,
-	0x6eb9ffff,
-	0x0421f402,
-	0xfd02d8b9,
-	0x60f90487,
-	0xd0fc80f9,
-	0x21f4e0fc,
-	0xf067f133,
-	0x026eb926,
-	0xb90421f4,
-	0x87fd02d8,
-	0xf960f904,
-	0xfcd0fc80,
-	0x3321f4e0,
-	0xf10467f0,
-	0xd007e007,
-	0x04bd0006,
-/* 0x053c: memx_func_enter_wait */
-	0x07c067f1,
-	0xf00066cf,
-	0x0bf40464,
-	0x2c67f0f6,
-	0x800066cf,
-	0x00f8f106,
-/* 0x0554: memx_func_leave */
-	0xcf2c67f0,
-	0x06800066,
-	0x0467f0f2,
-	0x07e407f1,
-	0xbd0006d0,
-/* 0x0569: memx_func_leave_wait */
-	0xc067f104,
-	0x0066cf07,
-	0xf40464f0,
-	0x67f1f61b,
-	0x77f126f0,
-	0x73f00001,
-	0x026eb900,
-	0xb90421f4,
-	0x87fd02d8,
-	0xf960f905,
-	0xfcd0fc80,
-	0x3321f4e0,
-	0x162067f1,
-	0xf4026eb9,
-	0xd8b90421,
-	0x0587fd02,
-	0x80f960f9,
-	0xe0fcd0fc,
-	0xf13321f4,
-	0xf00aa277,
-	0x6eb90073,
-	0x0421f402,
-	0xfd02d8b9,
-	0x60f90587,
-	0xd0fc80f9,
-	0x21f4e0fc,
-/* 0x05d3: memx_func_wait_vblank */
-	0xb600f833,
-	0x00f80410,
-/* 0x05d8: memx_func_wr32 */
-	0x98001698,
-	0x10b60115,
-	0xf960f908,
-	0xfcd0fc50,
-	0x3321f4e0,
-	0xf40242b6,
-	0x00f8e91b,
-/* 0x05f4: memx_func_wait */
-	0xcf2c87f0,
-	0x1e980088,
-	0x011d9800,
-	0x98021c98,
-	0x10b6031b,
-	0x8621f410,
-/* 0x060e: memx_func_delay */
-	0x1e9800f8,
-	0x0410b600,
-	0xf86721f4,
-/* 0x0619: memx_func_train */
-/* 0x061b: memx_exec */
-	0xf900f800,
-	0xb9d0f9e0,
-	0xb2b902c1,
-/* 0x0625: memx_exec_next */
-	0x00139802,
-	0xe70410b6,
-	0xe701f034,
-	0xb601e033,
-	0x30f00132,
-	0xde35980c,
-	0x12b855f9,
-	0xe41ef406,
-	0x98f10b98,
-	0xcbbbf20c,
-	0xc4b7f102,
-	0x00bbcf07,
-	0xe0fcd0fc,
-	0x02f121f5,
-/* 0x065e: memx_info */
-	0xc67000f8,
-	0x0e0bf401,
-/* 0x0664: memx_info_data */
-	0x03ccc7f1,
-	0x0800b7f1,
-/* 0x066f: memx_info_train */
-	0xf10b0ef4,
-	0xf10bccc7,
-/* 0x0677: memx_info_send */
-	0xf50100b7,
-	0xf802f121,
-/* 0x067d: memx_recv */
-	0x01d6b000,
-	0xb09b0bf4,
-	0x0bf400d6,
-/* 0x068b: memx_init */
-	0xf800f8d8,
-/* 0x068d: perf_recv */
-/* 0x068f: perf_init */
-	0xf800f800,
-/* 0x0691: i2c_drive_scl */
-	0x0036b000,
-	0xf10e0bf4,
-	0xd007e007,
-	0x04bd0001,
-/* 0x06a2: i2c_drive_scl_lo */
-	0x07f100f8,
-	0x01d007e4,
-	0xf804bd00,
-/* 0x06ad: i2c_drive_sda */
-	0x0036b000,
-	0xf10e0bf4,
-	0xd007e007,
-	0x04bd0002,
-/* 0x06be: i2c_drive_sda_lo */
-	0x07f100f8,
-	0x02d007e4,
-	0xf804bd00,
-/* 0x06c9: i2c_sense_scl */
-	0x0132f400,
-	0x07c437f1,
-	0xfd0033cf,
-	0x0bf40431,
-	0x0131f406,
-/* 0x06dc: i2c_sense_scl_done */
-/* 0x06de: i2c_sense_sda */
-	0x32f400f8,
-	0xc437f101,
-	0x0033cf07,
-	0xf40432fd,
-	0x31f4060b,
-/* 0x06f1: i2c_sense_sda_done */
-/* 0x06f3: i2c_raise_scl */
-	0xf900f801,
-	0x9847f140,
-	0x0137f008,
-	0x069121f5,
-/* 0x0700: i2c_raise_scl_wait */
-	0x03e8e7f1,
-	0xf56721f4,
-	0xf406c921,
-	0x42b60901,
-	0xef1bf401,
-/* 0x0714: i2c_raise_scl_done */
-	0x00f840fc,
-/* 0x0718: i2c_start */
-	0x06c921f5,
-	0xf50d11f4,
-	0xf406de21,
-	0x0ef40611,
-/* 0x0729: i2c_start_rep */
-	0x0037f030,
-	0x069121f5,
-	0xf50137f0,
-	0xbb06ad21,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x06f321f5,
-	0xf40464b6,
-/* 0x0756: i2c_start_send */
-	0x37f01f11,
-	0xad21f500,
-	0x88e7f106,
-	0x6721f413,
-	0xf50037f0,
-	0xf1069121,
-	0xf41388e7,
-/* 0x0772: i2c_start_out */
-	0x00f86721,
-/* 0x0774: i2c_stop */
-	0xf50037f0,
-	0xf0069121,
-	0x21f50037,
-	0xe7f106ad,
-	0x21f403e8,
-	0x0137f067,
-	0x069121f5,
-	0x1388e7f1,
-	0xf06721f4,
-	0x21f50137,
-	0xe7f106ad,
-	0x21f41388,
-/* 0x07a7: i2c_bitw */
-	0xf500f867,
-	0xf106ad21,
-	0xf403e8e7,
-	0x76bb6721,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb606f321,
-	0x11f40464,
-	0x88e7f118,
-	0x6721f413,
-	0xf50037f0,
-	0xf1069121,
-	0xf41388e7,
-/* 0x07e6: i2c_bitw_out */
-	0x00f86721,
-/* 0x07e8: i2c_bitr */
-	0xf50137f0,
-	0xf106ad21,
-	0xf403e8e7,
-	0x76bb6721,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb606f321,
-	0x11f40464,
-	0xde21f51b,
-	0x0037f006,
-	0x069121f5,
-	0x1388e7f1,
-	0xf06721f4,
-	0x31f4013c,
-/* 0x082d: i2c_bitr_done */
-/* 0x082f: i2c_get_byte */
-	0xf000f801,
-	0x47f00057,
-/* 0x0835: i2c_get_byte_next */
-	0x0154b608,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xe821f550,
-	0x0464b607,
-	0xfd2b11f4,
-	0x42b60553,
-	0xd81bf401,
-	0xbb0137f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07a721f5,
-/* 0x087f: i2c_get_byte_done */
-	0xf80464b6,
-/* 0x0881: i2c_put_byte */
-	0x0847f000,
-/* 0x0884: i2c_put_byte_next */
-	0xff0142b6,
-	0x76bb3854,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb607a721,
-	0x11f40464,
-	0x0046b034,
-	0xbbd81bf4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07e821f5,
-	0xf40464b6,
-	0x76bb0f11,
-	0x0136b000,
-	0xf4061bf4,
-/* 0x08da: i2c_put_byte_done */
-	0x00f80132,
-/* 0x08dc: i2c_addr */
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x1821f550,
-	0x0464b607,
-	0xe72911f4,
-	0xb6012ec3,
-	0x53fd0134,
-	0x0076bb05,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b60881,
-/* 0x0921: i2c_addr_done */
-/* 0x0923: i2c_acquire_addr */
-	0xc700f804,
-	0xe4b6f8ce,
-	0x14e0b705,
-/* 0x092f: i2c_acquire */
-	0xf500f8d0,
-	0xf4092321,
-	0xd9f00421,
-	0x3321f403,
-/* 0x093e: i2c_release */
-	0x21f500f8,
-	0x21f40923,
-	0x03daf004,
-	0xf83321f4,
-/* 0x094d: i2c_recv */
-	0x0132f400,
-	0xb6f8c1c7,
-	0x16b00214,
-	0x3a1ff528,
-	0xf413a001,
-	0x0032980c,
-	0x0ccc13a0,
-	0xf4003198,
-	0xd0f90231,
-	0xd0f9e0f9,
-	0x000067f1,
-	0x100063f1,
-	0xbb016792,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x092f21f5,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b31bf5,
-	0xbb0057f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x08dc21f5,
-	0xf50464b6,
-	0xc700d011,
-	0x76bbe0c5,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb6088121,
-	0x11f50464,
-	0x57f000ad,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b608dc,
-	0x8a11f504,
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b6082f,
-	0x6a11f404,
-	0xbbe05bcb,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x077421f5,
-	0xb90464b6,
-	0x74bd025b,
-/* 0x0a53: i2c_recv_not_rd08 */
-	0xb0430ef4,
-	0x1bf401d6,
-	0x0057f03d,
-	0x08dc21f5,
-	0xc73311f4,
-	0x21f5e0c5,
-	0x11f40881,
-	0x0057f029,
-	0x08dc21f5,
-	0xc71f11f4,
-	0x21f5e0b5,
-	0x11f40881,
-	0x7421f515,
-	0xc774bd07,
-	0x1bf408c5,
-	0x0232f409,
-/* 0x0a93: i2c_recv_not_wr08 */
-/* 0x0a93: i2c_recv_done */
-	0xc7030ef4,
-	0x21f5f8ce,
-	0xe0fc093e,
-	0x12f4d0fc,
-	0x027cb90a,
-	0x02f121f5,
-/* 0x0aa8: i2c_recv_exit */
-/* 0x0aaa: i2c_init */
-	0x00f800f8,
-/* 0x0aac: test_recv */
-	0x05d817f1,
-	0xb60011cf,
-	0x07f10110,
-	0x01d005d8,
-	0xf104bd00,
-	0xf1d900e7,
-	0xf5134fe3,
-	0xf8022321,
-/* 0x0acd: test_init */
-	0x00e7f100,
-	0x2321f508,
-/* 0x0ad7: idle_recv */
-	0xf800f802,
-/* 0x0ad9: idle */
-	0x0031f400,
-	0x05d417f1,
-	0xb60011cf,
-	0x07f10110,
-	0x01d005d4,
-/* 0x0aef: idle_loop */
-	0xf004bd00,
-	0x32f45817,
-/* 0x0af5: idle_proc */
-/* 0x0af5: idle_proc_exec */
-	0xb910f902,
-	0x21f5021e,
-	0x10fc02fa,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0b09: idle_proc_next */
-	0x5810b6ef,
-	0xf4061fb8,
-	0x02f4e61b,
-	0x0028f4dd,
-	0x00c10ef4,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4 b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
new file mode 100644
index 0000000..2f28c7e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF119
+#define HW_TICKS_PER_US 324
+
+//#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #gf119_pmu_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "arith.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "i2c_.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "arith.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "i2c_.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #gf119_pmu_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "arith.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "i2c_.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
new file mode 100644
index 0000000..31552af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -0,0 +1,1795 @@
+uint32_t gf119_pmu_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x0000049d,
+	0x00000446,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000068b,
+	0x0000067d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x0000068f,
+	0x0000068d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x5f433249,
+	0x00000aaa,
+	0x0000094d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x00000acd,
+	0x00000aac,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x00000ad9,
+	0x00000ad7,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
+	0x00000000,
+/* 0x026c: time_next */
+	0x00000000,
+/* 0x0270: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x02f0: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0370: memx_func_head */
+	0x00000001,
+	0x00000000,
+	0x000004d3,
+/* 0x037c: memx_func_next */
+	0x00000002,
+	0x00000000,
+	0x00000554,
+	0x00000003,
+	0x00000002,
+	0x000005d8,
+	0x00040004,
+	0x00000000,
+	0x000005f4,
+	0x00010005,
+	0x00000000,
+	0x0000060e,
+	0x00010006,
+	0x00000000,
+	0x000005d3,
+	0x00000007,
+	0x00000000,
+	0x00000619,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
+	0x00000000,
+/* 0x03c8: memx_ts_end */
+	0x00000000,
+/* 0x03cc: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
+	0x00000400,
+	0x00000800,
+	0x00001000,
+	0x00002000,
+	0x00004000,
+	0x00008000,
+	0x00010000,
+	0x00020000,
+	0x00040000,
+	0x00080000,
+/* 0x0cf4: i2c_sda_map */
+	0x00100000,
+	0x00200000,
+	0x00400000,
+	0x00800000,
+	0x01000000,
+	0x02000000,
+	0x04000000,
+	0x08000000,
+	0x10000000,
+	0x20000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t gf119_pmu_code[] = {
+	0x034d0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xbd000ed0,
+	0x01d7f004,
+	0xf101d3f0,
+	0xd007ac07,
+	0x04bd000d,
+/* 0x001c: rd32_wait */
+	0x07acd7f1,
+	0xf100ddcf,
+	0xf47000d4,
+	0xd7f1f51b,
+	0xddcf07a4,
+/* 0x0033: wr32 */
+	0xf100f800,
+	0xd007a007,
+	0x04bd000e,
+	0x07a407f1,
+	0xbd000dd0,
+	0x02d7f004,
+	0xf0f0d5f0,
+	0x07f101d3,
+	0x0dd007ac,
+/* 0x0057: wr32_wait */
+	0xf104bd00,
+	0xcf07acd7,
+	0xd4f100dd,
+	0x1bf47000,
+/* 0x0067: nsec */
+	0xf900f8f5,
+	0xf080f990,
+	0x88cf2c87,
+/* 0x0071: nsec_loop */
+	0x2c97f000,
+	0xbb0099cf,
+	0x9eb80298,
+	0xf41ef406,
+	0x90fc80fc,
+/* 0x0086: wait */
+	0x90f900f8,
+	0x87f080f9,
+	0x0088cf2c,
+/* 0x0090: wait_loop */
+	0xf402eeb9,
+	0xdab90421,
+	0x04adfd02,
+	0xf406acb8,
+	0x97f0120b,
+	0x0099cf2c,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00b1: wait_done */
+	0xfc80fce2,
+/* 0x00b7: intr_watchdog */
+	0x9800f890,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb9a0a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x028c21f5,
+	0x0ef494bd,
+/* 0x00d5: intr_watchdog_next_time */
+	0x9b0a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00e4: intr_watchdog_next_time_set */
+/* 0x00e7: intr_watchdog_next_proc */
+	0x809b0980,
+	0xe0b603e9,
+	0x68e6b158,
+	0xc61bf402,
+/* 0x00f6: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x88cf05d0,
+	0x0180b600,
+	0x05d007f1,
+	0xbd0008d0,
+	0x0887f004,
+	0xc40088cf,
+	0x0bf40289,
+	0x9b008020,
+	0xf458e7f0,
+	0x0998b721,
+	0x0096b09b,
+	0xf00e0bf4,
+	0x09d03407,
+	0x8004bd00,
+/* 0x014e: intr_skip_watchdog */
+	0x89e49a09,
+	0x0bf40800,
+	0x8897f13c,
+	0x0099cf06,
+	0xf4029ac4,
+	0xc7f1260b,
+	0xcccf04c0,
+	0xf1c0f900,
+	0xf14f48e7,
+	0xf05453e3,
+	0x21f500d7,
+	0xc0fc02f1,
+	0x04c007f1,
+	0xbd000cd0,
+/* 0x0185: intr_subintr_skip_fifo */
+	0x8807f104,
+	0x0009d006,
+/* 0x018e: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x0198: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x01a2: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0008d004,
+	0x80fc04bd,
+	0xfc0088fe,
+	0xfce0fcf0,
+	0xfcc0fcd0,
+	0xfca0fcb0,
+	0xfc80fc90,
+	0x0032f400,
+/* 0x01c6: ticks_from_ns */
+	0xc0f901f8,
+	0xd7f1b0f9,
+	0xd3f00144,
+	0xb321f500,
+	0xe8ccec03,
+	0x00b4b003,
+	0xec120bf4,
+	0xf103e8ee,
+	0xf00144d7,
+	0x21f500d3,
+/* 0x01ee: ticks_from_ns_quit */
+	0xceb903b3,
+	0xfcb0fc02,
+/* 0x01f7: ticks_from_us */
+	0xf900f8c0,
+	0xf1b0f9c0,
+	0xf00144d7,
+	0x21f500d3,
+	0xceb903b3,
+	0x00b4b002,
+	0xbd050bf4,
+/* 0x0211: ticks_from_us_quit */
+	0xfcb0fce4,
+/* 0x0217: ticks_to_us */
+	0xf100f8c0,
+	0xf00144d7,
+	0xedff00d3,
+/* 0x0223: timer */
+	0xf900f8ec,
+	0xf480f990,
+	0xf8981032,
+	0x0086b003,
+	0xbd531cf4,
+	0x3807f084,
+	0xbd0008d0,
+	0x3487f004,
+	0x980088cf,
+	0x98bb9a09,
+	0x00e9bb02,
+	0xf003fe80,
+	0x88cf0887,
+	0x0284f000,
+	0xf0201bf4,
+	0x88cf3487,
+	0x06e0b800,
+	0xb8090bf4,
+	0x1cf406e8,
+/* 0x026d: timer_reset */
+	0x3407f00e,
+	0xbd000ed0,
+	0x9a0e8004,
+/* 0x0278: timer_enable */
+	0xf00187f0,
+	0x08d03807,
+/* 0x0283: timer_done */
+	0xf404bd00,
+	0x80fc1031,
+	0x00f890fc,
+/* 0x028c: send_proc */
+	0x90f980f9,
+	0x9805e898,
+	0x86f004e9,
+	0x0689b804,
+	0xc42a0bf4,
+	0x88940398,
+	0x1880b604,
+	0x98008ebb,
+	0x8a8000fa,
+	0x018d8000,
+	0x80028c80,
+	0x90b6038b,
+	0x0794f001,
+	0xf404e980,
+/* 0x02c6: send_done */
+	0x90fc0231,
+	0x00f880fc,
+/* 0x02cc: find */
+	0x87f080f9,
+	0x0131f458,
+/* 0x02d4: find_loop */
+	0xb8008a98,
+	0x0bf406ae,
+	0x5880b610,
+	0x026886b1,
+	0xf4f01bf4,
+/* 0x02ea: find_done */
+	0x8eb90132,
+	0xf880fc02,
+/* 0x02f1: send */
+	0xcc21f500,
+	0x9701f402,
+/* 0x02fa: recv */
+	0x90f900f8,
+	0xe89880f9,
+	0x04e99805,
+	0xb80132f4,
+	0x0bf40689,
+	0x0389c43d,
+	0xf00180b6,
+	0xe8800784,
+	0x02ea9805,
+	0x8ffef0f9,
+	0xb9f0f901,
+	0x999402ef,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x0347: recv_done */
+	0xfcf0fc01,
+	0xf890fc80,
+/* 0x034d: init */
+	0x0817f100,
+	0x0011cf01,
+	0x010911e7,
+	0xfe0814b6,
+	0x17f10014,
+	0x13f000e0,
+	0x1c07f000,
+	0xbd0001d0,
+	0xff17f004,
+	0xd01407f0,
+	0x04bd0001,
+	0xf10217f0,
+	0xf0080015,
+	0x01d01007,
+	0xf104bd00,
+	0xf000f617,
+	0x10fe0013,
+	0x1031f400,
+	0xf00117f0,
+	0x01d03807,
+	0xf004bd00,
+/* 0x03a2: init_proc */
+	0xf19858f7,
+	0x0016b001,
+	0xf9fa0bf4,
+	0x58f0b615,
+/* 0x03b3: mulu32_32_64 */
+	0xf9f20ef4,
+	0xf920f910,
+	0x9540f930,
+	0xd29510e1,
+	0xbdc4bd10,
+	0xc0edffb4,
+	0xb9301dff,
+	0x34f10234,
+	0x34b6ffff,
+	0x1045b610,
+	0xbb00c3bb,
+	0xe2ff01b4,
+	0x0234b930,
+	0xffff34f1,
+	0xb61034b6,
+	0xc3bb1045,
+	0x01b4bb00,
+	0xbb3012ff,
+	0x40fc00b3,
+	0x20fc30fc,
+	0x00f810fc,
+/* 0x0404: host_send */
+	0x04b017f1,
+	0xf10011cf,
+	0xcf04a027,
+	0x12b80022,
+	0x2f0bf406,
+	0x94071ec4,
+	0xe0b704ee,
+	0xeb980270,
+	0x02ec9803,
+	0x9801ed98,
+	0x21f500ee,
+	0x10b602f1,
+	0x0f1ec401,
+	0x04b007f1,
+	0xbd000ed0,
+	0xc30ef404,
+/* 0x0444: host_send_done */
+/* 0x0446: host_recv */
+	0x17f100f8,
+	0x13f14e49,
+	0xe1b85254,
+	0xb30bf406,
+/* 0x0454: host_recv_wait */
+	0x04cc17f1,
+	0xf10011cf,
+	0xcf04c827,
+	0x16f00022,
+	0x0612b808,
+	0xc4ec0bf4,
+	0x34b60723,
+	0xf030b704,
+	0x033b8002,
+	0x80023c80,
+	0x3e80013d,
+	0x0120b600,
+	0xf10f24f0,
+	0xd004c807,
+	0x04bd0002,
+	0xf04027f0,
+	0x02d00007,
+	0xf804bd00,
+/* 0x049d: host_init */
+	0x8017f100,
+	0x1014b600,
+	0x027015f1,
+	0x04d007f1,
+	0xbd0001d0,
+	0x8017f104,
+	0x1014b600,
+	0x02f015f1,
+	0x04dc07f1,
+	0xbd0001d0,
+	0x0117f004,
+	0x04c407f1,
+	0xbd0001d0,
+/* 0x04d3: memx_func_enter */
+	0xf100f804,
+	0xf1162067,
+	0xf1f55d77,
+	0xb9ffff73,
+	0x21f4026e,
+	0x02d8b904,
+	0xf90487fd,
+	0xfc80f960,
+	0xf4e0fcd0,
+	0x77f13321,
+	0x73f1fffe,
+	0x6eb9ffff,
+	0x0421f402,
+	0xfd02d8b9,
+	0x60f90487,
+	0xd0fc80f9,
+	0x21f4e0fc,
+	0xf067f133,
+	0x026eb926,
+	0xb90421f4,
+	0x87fd02d8,
+	0xf960f904,
+	0xfcd0fc80,
+	0x3321f4e0,
+	0xf10467f0,
+	0xd007e007,
+	0x04bd0006,
+/* 0x053c: memx_func_enter_wait */
+	0x07c067f1,
+	0xf00066cf,
+	0x0bf40464,
+	0x2c67f0f6,
+	0x800066cf,
+	0x00f8f106,
+/* 0x0554: memx_func_leave */
+	0xcf2c67f0,
+	0x06800066,
+	0x0467f0f2,
+	0x07e407f1,
+	0xbd0006d0,
+/* 0x0569: memx_func_leave_wait */
+	0xc067f104,
+	0x0066cf07,
+	0xf40464f0,
+	0x67f1f61b,
+	0x77f126f0,
+	0x73f00001,
+	0x026eb900,
+	0xb90421f4,
+	0x87fd02d8,
+	0xf960f905,
+	0xfcd0fc80,
+	0x3321f4e0,
+	0x162067f1,
+	0xf4026eb9,
+	0xd8b90421,
+	0x0587fd02,
+	0x80f960f9,
+	0xe0fcd0fc,
+	0xf13321f4,
+	0xf00aa277,
+	0x6eb90073,
+	0x0421f402,
+	0xfd02d8b9,
+	0x60f90587,
+	0xd0fc80f9,
+	0x21f4e0fc,
+/* 0x05d3: memx_func_wait_vblank */
+	0xb600f833,
+	0x00f80410,
+/* 0x05d8: memx_func_wr32 */
+	0x98001698,
+	0x10b60115,
+	0xf960f908,
+	0xfcd0fc50,
+	0x3321f4e0,
+	0xf40242b6,
+	0x00f8e91b,
+/* 0x05f4: memx_func_wait */
+	0xcf2c87f0,
+	0x1e980088,
+	0x011d9800,
+	0x98021c98,
+	0x10b6031b,
+	0x8621f410,
+/* 0x060e: memx_func_delay */
+	0x1e9800f8,
+	0x0410b600,
+	0xf86721f4,
+/* 0x0619: memx_func_train */
+/* 0x061b: memx_exec */
+	0xf900f800,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x0625: memx_exec_next */
+	0x00139802,
+	0xe70410b6,
+	0xe701f034,
+	0xb601e033,
+	0x30f00132,
+	0xde35980c,
+	0x12b855f9,
+	0xe41ef406,
+	0x98f10b98,
+	0xcbbbf20c,
+	0xc4b7f102,
+	0x00bbcf07,
+	0xe0fcd0fc,
+	0x02f121f5,
+/* 0x065e: memx_info */
+	0xc67000f8,
+	0x0e0bf401,
+/* 0x0664: memx_info_data */
+	0x03ccc7f1,
+	0x0800b7f1,
+/* 0x066f: memx_info_train */
+	0xf10b0ef4,
+	0xf10bccc7,
+/* 0x0677: memx_info_send */
+	0xf50100b7,
+	0xf802f121,
+/* 0x067d: memx_recv */
+	0x01d6b000,
+	0xb09b0bf4,
+	0x0bf400d6,
+/* 0x068b: memx_init */
+	0xf800f8d8,
+/* 0x068d: perf_recv */
+/* 0x068f: perf_init */
+	0xf800f800,
+/* 0x0691: i2c_drive_scl */
+	0x0036b000,
+	0xf10e0bf4,
+	0xd007e007,
+	0x04bd0001,
+/* 0x06a2: i2c_drive_scl_lo */
+	0x07f100f8,
+	0x01d007e4,
+	0xf804bd00,
+/* 0x06ad: i2c_drive_sda */
+	0x0036b000,
+	0xf10e0bf4,
+	0xd007e007,
+	0x04bd0002,
+/* 0x06be: i2c_drive_sda_lo */
+	0x07f100f8,
+	0x02d007e4,
+	0xf804bd00,
+/* 0x06c9: i2c_sense_scl */
+	0x0132f400,
+	0x07c437f1,
+	0xfd0033cf,
+	0x0bf40431,
+	0x0131f406,
+/* 0x06dc: i2c_sense_scl_done */
+/* 0x06de: i2c_sense_sda */
+	0x32f400f8,
+	0xc437f101,
+	0x0033cf07,
+	0xf40432fd,
+	0x31f4060b,
+/* 0x06f1: i2c_sense_sda_done */
+/* 0x06f3: i2c_raise_scl */
+	0xf900f801,
+	0x9847f140,
+	0x0137f008,
+	0x069121f5,
+/* 0x0700: i2c_raise_scl_wait */
+	0x03e8e7f1,
+	0xf56721f4,
+	0xf406c921,
+	0x42b60901,
+	0xef1bf401,
+/* 0x0714: i2c_raise_scl_done */
+	0x00f840fc,
+/* 0x0718: i2c_start */
+	0x06c921f5,
+	0xf50d11f4,
+	0xf406de21,
+	0x0ef40611,
+/* 0x0729: i2c_start_rep */
+	0x0037f030,
+	0x069121f5,
+	0xf50137f0,
+	0xbb06ad21,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x06f321f5,
+	0xf40464b6,
+/* 0x0756: i2c_start_send */
+	0x37f01f11,
+	0xad21f500,
+	0x88e7f106,
+	0x6721f413,
+	0xf50037f0,
+	0xf1069121,
+	0xf41388e7,
+/* 0x0772: i2c_start_out */
+	0x00f86721,
+/* 0x0774: i2c_stop */
+	0xf50037f0,
+	0xf0069121,
+	0x21f50037,
+	0xe7f106ad,
+	0x21f403e8,
+	0x0137f067,
+	0x069121f5,
+	0x1388e7f1,
+	0xf06721f4,
+	0x21f50137,
+	0xe7f106ad,
+	0x21f41388,
+/* 0x07a7: i2c_bitw */
+	0xf500f867,
+	0xf106ad21,
+	0xf403e8e7,
+	0x76bb6721,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb606f321,
+	0x11f40464,
+	0x88e7f118,
+	0x6721f413,
+	0xf50037f0,
+	0xf1069121,
+	0xf41388e7,
+/* 0x07e6: i2c_bitw_out */
+	0x00f86721,
+/* 0x07e8: i2c_bitr */
+	0xf50137f0,
+	0xf106ad21,
+	0xf403e8e7,
+	0x76bb6721,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb606f321,
+	0x11f40464,
+	0xde21f51b,
+	0x0037f006,
+	0x069121f5,
+	0x1388e7f1,
+	0xf06721f4,
+	0x31f4013c,
+/* 0x082d: i2c_bitr_done */
+/* 0x082f: i2c_get_byte */
+	0xf000f801,
+	0x47f00057,
+/* 0x0835: i2c_get_byte_next */
+	0x0154b608,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xe821f550,
+	0x0464b607,
+	0xfd2b11f4,
+	0x42b60553,
+	0xd81bf401,
+	0xbb0137f0,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07a721f5,
+/* 0x087f: i2c_get_byte_done */
+	0xf80464b6,
+/* 0x0881: i2c_put_byte */
+	0x0847f000,
+/* 0x0884: i2c_put_byte_next */
+	0xff0142b6,
+	0x76bb3854,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb607a721,
+	0x11f40464,
+	0x0046b034,
+	0xbbd81bf4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07e821f5,
+	0xf40464b6,
+	0x76bb0f11,
+	0x0136b000,
+	0xf4061bf4,
+/* 0x08da: i2c_put_byte_done */
+	0x00f80132,
+/* 0x08dc: i2c_addr */
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x1821f550,
+	0x0464b607,
+	0xe72911f4,
+	0xb6012ec3,
+	0x53fd0134,
+	0x0076bb05,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60881,
+/* 0x0921: i2c_addr_done */
+/* 0x0923: i2c_acquire_addr */
+	0xc700f804,
+	0xe4b6f8ce,
+	0x14e0b705,
+/* 0x092f: i2c_acquire */
+	0xf500f8d0,
+	0xf4092321,
+	0xd9f00421,
+	0x3321f403,
+/* 0x093e: i2c_release */
+	0x21f500f8,
+	0x21f40923,
+	0x03daf004,
+	0xf83321f4,
+/* 0x094d: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x3a1ff528,
+	0xf413a001,
+	0x0032980c,
+	0x0ccc13a0,
+	0xf4003198,
+	0xd0f90231,
+	0xd0f9e0f9,
+	0x000067f1,
+	0x100063f1,
+	0xbb016792,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x092f21f5,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b31bf5,
+	0xbb0057f0,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x08dc21f5,
+	0xf50464b6,
+	0xc700d011,
+	0x76bbe0c5,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6088121,
+	0x11f50464,
+	0x57f000ad,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b608dc,
+	0x8a11f504,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b6082f,
+	0x6a11f404,
+	0xbbe05bcb,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x077421f5,
+	0xb90464b6,
+	0x74bd025b,
+/* 0x0a53: i2c_recv_not_rd08 */
+	0xb0430ef4,
+	0x1bf401d6,
+	0x0057f03d,
+	0x08dc21f5,
+	0xc73311f4,
+	0x21f5e0c5,
+	0x11f40881,
+	0x0057f029,
+	0x08dc21f5,
+	0xc71f11f4,
+	0x21f5e0b5,
+	0x11f40881,
+	0x7421f515,
+	0xc774bd07,
+	0x1bf408c5,
+	0x0232f409,
+/* 0x0a93: i2c_recv_not_wr08 */
+/* 0x0a93: i2c_recv_done */
+	0xc7030ef4,
+	0x21f5f8ce,
+	0xe0fc093e,
+	0x12f4d0fc,
+	0x027cb90a,
+	0x02f121f5,
+/* 0x0aa8: i2c_recv_exit */
+/* 0x0aaa: i2c_init */
+	0x00f800f8,
+/* 0x0aac: test_recv */
+	0x05d817f1,
+	0xb60011cf,
+	0x07f10110,
+	0x01d005d8,
+	0xf104bd00,
+	0xf1d900e7,
+	0xf5134fe3,
+	0xf8022321,
+/* 0x0acd: test_init */
+	0x00e7f100,
+	0x2321f508,
+/* 0x0ad7: idle_recv */
+	0xf800f802,
+/* 0x0ad9: idle */
+	0x0031f400,
+	0x05d417f1,
+	0xb60011cf,
+	0x07f10110,
+	0x01d005d4,
+/* 0x0aef: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x0af5: idle_proc */
+/* 0x0af5: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc02fa,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x0b09: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00c10ef4,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 78a4ea0..aeb8ccd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -24,17 +24,16 @@
 #include "priv.h"
 #include "fuc/gf100.fuc3.h"
 
-struct nvkm_oclass *
-gf100_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gf100_pmu = {
 	.code.data = gf100_pmu_code,
 	.code.size = sizeof(gf100_pmu_code),
 	.data.data = gf100_pmu_data,
 	.data.size = sizeof(gf100_pmu_data),
-}.base;
+};
+
+int
+gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
deleted file mode 100644
index 6b3a238..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-#include "fuc/gf110.fuc4.h"
-
-struct nvkm_oclass *
-gf110_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
-	.code.data = gf110_pmu_code,
-	.code.size = sizeof(gf110_pmu_code),
-	.data.data = gf110_pmu_data,
-	.data.size = sizeof(gf110_pmu_data),
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
new file mode 100644
index 0000000..fbc88d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "fuc/gf119.fuc4.h"
+
+static const struct nvkm_pmu_func
+gf119_pmu = {
+	.code.data = gf119_pmu_code,
+	.code.size = sizeof(gf119_pmu_code),
+	.data.data = gf119_pmu_data,
+	.data.size = sizeof(gf119_pmu_data),
+};
+
+int
+gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 28fdb8e..e33f5c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -21,47 +21,97 @@
  *
  * Authors: Ben Skeggs
  */
-#define gf110_pmu_code gk104_pmu_code
-#define gf110_pmu_data gk104_pmu_data
+#define gf119_pmu_code gk104_pmu_code
+#define gf119_pmu_data gk104_pmu_data
 #include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
+
+#include <core/option.h>
+#include <subdev/timer.h>
+
+static void
+magic_(struct nvkm_device *device, u32 ctrl, int size)
+{
+	nvkm_wr32(device, 0x00c800, 0x00000000);
+	nvkm_wr32(device, 0x00c808, 0x00000000);
+	nvkm_wr32(device, 0x00c800, ctrl);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x00c800) & 0x40000000) {
+			while (size--)
+				nvkm_wr32(device, 0x00c804, 0x00000000);
+			break;
+		}
+	);
+	nvkm_wr32(device, 0x00c800, 0x00000000);
+}
+
+static void
+magic(struct nvkm_device *device, u32 ctrl)
+{
+	magic_(device, 0x8000a41f | ctrl, 6);
+	magic_(device, 0x80000421 | ctrl, 1);
+}
 
 static void
 gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
-	nv_rd32(pmu, 0x000200);
-	nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+	struct nvkm_device *device = pmu->subdev.device;
+
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
+	nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
-	nv_rd32(pmu, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+	nvkm_rd32(device, 0x000200);
+
+	if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
+	    device->quirk ? device->quirk->War00C800_0 : false)) {
+		nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
+		switch (device->chipset) {
+		case 0xe4:
+			magic(device, 0x04000000);
+			magic(device, 0x06000000);
+			magic(device, 0x0c000000);
+			magic(device, 0x0e000000);
+			break;
+		case 0xe6:
+			magic(device, 0x02000000);
+			magic(device, 0x04000000);
+			magic(device, 0x0a000000);
+			break;
+		case 0xe7:
+			magic(device, 0x02000000);
+			break;
+		default:
+			break;
+		}
+	}
 }
 
-struct nvkm_oclass *
-gk104_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk104_pmu = {
 	.code.data = gk104_pmu_code,
 	.code.size = sizeof(gk104_pmu_code),
 	.data.data = gk104_pmu_data,
 	.data.size = sizeof(gk104_pmu_data),
 	.pgob = gk104_pmu_pgob,
-}.base;
+};
+
+int
+gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 89bb94b..ae25524 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -21,16 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#define gf110_pmu_code gk110_pmu_code
-#define gf110_pmu_data gk110_pmu_data
+#define gf119_pmu_code gk110_pmu_code
+#define gf119_pmu_data gk110_pmu_data
 #include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
 
 #include <subdev/timer.h>
 
 void
 gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	static const struct {
 		u32 addr;
 		u32 data;
@@ -54,42 +55,44 @@
 	};
 	int i;
 
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
-	nv_rd32(pmu, 0x000200);
-	nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000);
+	nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000);
 	for (i = 0; i < ARRAY_SIZE(magic); i++) {
-		nv_wr32(pmu, magic[i].addr, magic[i].data);
-		nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000);
+		nvkm_wr32(device, magic[i].addr, magic[i].data);
+		nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, magic[i].addr) & 0x80000000))
+				break;
+		);
 	}
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
-	nv_rd32(pmu, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+	nvkm_rd32(device, 0x000200);
 }
 
-struct nvkm_oclass *
-gk110_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk110_pmu = {
 	.code.data = gk110_pmu_code,
 	.code.size = sizeof(gk110_pmu_code),
 	.data.data = gk110_pmu_data,
 	.data.size = sizeof(gk110_pmu_data),
 	.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b14134e..3b49176 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -24,18 +24,17 @@
 #include "priv.h"
 #include "fuc/gk208.fuc5.h"
 
-struct nvkm_oclass *
-gk208_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0x00),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk208_pmu = {
 	.code.data = gk208_pmu_code,
 	.code.size = sizeof(gk208_pmu_code),
 	.data.data = gk208_pmu_data,
 	.data.size = sizeof(gk208_pmu_data),
 	.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 594f746..6689d02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -19,6 +19,7 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
+#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
 #include "priv.h"
 
 #include <subdev/clk.h>
@@ -35,7 +36,7 @@
 	unsigned int avg_load;
 };
 
-struct gk20a_pmu_priv {
+struct gk20a_pmu {
 	struct nvkm_pmu base;
 	struct nvkm_alarm alarm;
 	struct gk20a_pmu_dvfs_data *data;
@@ -48,28 +49,28 @@
 };
 
 static int
-gk20a_pmu_dvfs_target(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
 {
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 
 	return nvkm_clk_astate(clk, *state, 0, false);
 }
 
 static int
-gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
 {
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 
 	*state = clk->pstate;
 	return 0;
 }
 
 static int
-gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
 				int *state, int load)
 {
-	struct gk20a_pmu_dvfs_data *data = priv->data;
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct gk20a_pmu_dvfs_data *data = pmu->data;
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 	int cur_level, level;
 
 	/* For GK20A, the performance level is directly mapped to pstate */
@@ -84,7 +85,8 @@
 		level = min(clk->state_nr - 1, level);
 	}
 
-	nv_trace(priv, "cur level = %d, new level = %d\n", cur_level, level);
+	nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
+		   cur_level, level);
 
 	*state = level;
 
@@ -95,30 +97,35 @@
 }
 
 static int
-gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
 			      struct gk20a_pmu_dvfs_dev_status *status)
 {
-	status->busy = nv_rd32(priv, 0x10a508 + (BUSY_SLOT * 0x10));
-	status->total= nv_rd32(priv, 0x10a508 + (CLK_SLOT * 0x10));
+	struct nvkm_device *device = pmu->base.subdev.device;
+	status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10));
+	status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10));
 	return 0;
 }
 
 static void
-gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu_priv *priv)
+gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
 {
-	nv_wr32(priv, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
-	nv_wr32(priv, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
+	struct nvkm_device *device = pmu->base.subdev.device;
+	nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
+	nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
 }
 
 static void
 gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
 {
-	struct gk20a_pmu_priv *priv =
-		container_of(alarm, struct gk20a_pmu_priv, alarm);
-	struct gk20a_pmu_dvfs_data *data = priv->data;
+	struct gk20a_pmu *pmu =
+		container_of(alarm, struct gk20a_pmu, alarm);
+	struct gk20a_pmu_dvfs_data *data = pmu->data;
 	struct gk20a_pmu_dvfs_dev_status status;
-	struct nvkm_clk *clk = nvkm_clk(priv);
-	struct nvkm_volt *volt = nvkm_volt(priv);
+	struct nvkm_subdev *subdev = &pmu->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_clk *clk = device->clk;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_volt *volt = device->volt;
 	u32 utilization = 0;
 	int state, ret;
 
@@ -129,9 +136,9 @@
 	if (!clk || !volt)
 		goto resched;
 
-	ret = gk20a_pmu_dvfs_get_dev_status(priv, &status);
+	ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
 	if (ret) {
-		nv_warn(priv, "failed to get device status\n");
+		nvkm_warn(subdev, "failed to get device status\n");
 		goto resched;
 	}
 
@@ -140,56 +147,52 @@
 
 	data->avg_load = (data->p_smooth * data->avg_load) + utilization;
 	data->avg_load /= data->p_smooth + 1;
-	nv_trace(priv, "utilization = %d %%, avg_load = %d %%\n",
-			utilization, data->avg_load);
+	nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
+		   utilization, data->avg_load);
 
-	ret = gk20a_pmu_dvfs_get_cur_state(priv, &state);
+	ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
 	if (ret) {
-		nv_warn(priv, "failed to get current state\n");
+		nvkm_warn(subdev, "failed to get current state\n");
 		goto resched;
 	}
 
-	if (gk20a_pmu_dvfs_get_target_state(priv, &state, data->avg_load)) {
-		nv_trace(priv, "set new state to %d\n", state);
-		gk20a_pmu_dvfs_target(priv, &state);
+	if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
+		nvkm_trace(subdev, "set new state to %d\n", state);
+		gk20a_pmu_dvfs_target(pmu, &state);
 	}
 
 resched:
-	gk20a_pmu_dvfs_reset_dev_status(priv);
-	nvkm_timer_alarm(priv, 100000000, alarm);
+	gk20a_pmu_dvfs_reset_dev_status(pmu);
+	nvkm_timer_alarm(tmr, 100000000, alarm);
 }
 
 static int
-gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
+gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	struct gk20a_pmu_priv *priv = (void *)pmu;
+	struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+	nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm);
+	return 0;
+}
 
-	nvkm_timer_alarm_cancel(priv, &priv->alarm);
-
-	return nvkm_subdev_fini(&pmu->base, suspend);
+static void *
+gk20a_pmu_dtor(struct nvkm_subdev *subdev)
+{
+	return gk20a_pmu(subdev);
 }
 
 static int
-gk20a_pmu_init(struct nvkm_object *object)
+gk20a_pmu_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	struct gk20a_pmu_priv *priv = (void *)pmu;
-	int ret;
-
-	ret = nvkm_subdev_init(&pmu->base);
-	if (ret)
-		return ret;
-
-	pmu->pgob = nvkm_pmu_pgob;
+	struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+	struct nvkm_device *device = pmu->base.subdev.device;
 
 	/* init pwr perf counter */
-	nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
-	nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
-	nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
+	nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
+	nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
+	nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
 
-	nvkm_timer_alarm(pmu, 2000000000, &priv->alarm);
-	return ret;
+	nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm);
+	return 0;
 }
 
 static struct gk20a_pmu_dvfs_data
@@ -199,32 +202,26 @@
 	.p_smooth = 1,
 };
 
-static int
-gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_subdev_func
+gk20a_pmu = {
+	.init = gk20a_pmu_init,
+	.fini = gk20a_pmu_fini,
+	.dtor = gk20a_pmu_dtor,
+};
+
+int
+gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	struct gk20a_pmu_priv *priv;
-	int ret;
+	static const struct nvkm_pmu_func func = {};
+	struct gk20a_pmu *pmu;
 
-	ret = nvkm_pmu_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+		return -ENOMEM;
+	pmu->base.func = &func;
+	*ppmu = &pmu->base;
 
-	priv->data = &gk20a_dvfs_data;
-
-	nvkm_alarm_init(&priv->alarm, gk20a_pmu_dvfs_work);
+	nvkm_subdev_ctor(&gk20a_pmu, device, index, 0, &pmu->base.subdev);
+	pmu->data = &gk20a_dvfs_data;
+	nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
 	return 0;
 }
-
-struct nvkm_oclass *
-gk20a_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = gk20a_pmu_init,
-		.fini = gk20a_pmu_fini,
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
new file mode 100644
index 0000000..31b8692
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#define gk208_pmu_code gm107_pmu_code
+#define gk208_pmu_data gm107_pmu_data
+#include "fuc/gk208.fuc5.h"
+
+static const struct nvkm_pmu_func
+gm107_pmu = {
+	.code.data = gm107_pmu_code,
+	.code.size = sizeof(gm107_pmu_code),
+	.data.data = gm107_pmu_data,
+	.data.size = sizeof(gm107_pmu_data),
+};
+
+int
+gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 30aaeb2..8ba7fa4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,26 +24,25 @@
 #include "priv.h"
 #include "fuc/gt215.fuc3.h"
 
-static int
-gt215_pmu_init(struct nvkm_object *object)
+static void
+gt215_pmu_reset(struct nvkm_pmu *pmu)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	nv_mask(pmu, 0x022210, 0x00000001, 0x00000000);
-	nv_mask(pmu, 0x022210, 0x00000001, 0x00000001);
-	return nvkm_pmu_init(pmu);
+	struct nvkm_device *device = pmu->subdev.device;
+	nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
 }
 
-struct nvkm_oclass *
-gt215_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = gt215_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gt215_pmu = {
+	.reset = gt215_pmu_reset,
 	.code.data = gt215_pmu_code,
 	.code.size = sizeof(gt215_pmu_code),
 	.data.data = gt215_pmu_data,
 	.data.size = sizeof(gt215_pmu_data),
-}.base;
+};
+
+int
+gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index b75c5b8..e6f7416 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -2,8 +2,6 @@
 #define __NVKM_PMU_MEMX_H__
 #include "priv.h"
 
-#include <core/device.h>
-
 struct nvkm_memx {
 	struct nvkm_pmu *pmu;
 	u32 base;
@@ -18,13 +16,13 @@
 static void
 memx_out(struct nvkm_memx *memx)
 {
-	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_device *device = memx->pmu->subdev.device;
 	int i;
 
 	if (memx->c.mthd) {
-		nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+		nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
 		for (i = 0; i < memx->c.size; i++)
-			nv_wr32(pmu, 0x10a1c4, memx->c.data[i]);
+			nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
 		memx->c.mthd = 0;
 		memx->c.size = 0;
 	}
@@ -44,12 +42,13 @@
 int
 nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	struct nvkm_memx *memx;
 	u32 reply[2];
 	int ret;
 
-	ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
-			   MEMX_INFO_DATA, 0);
+	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+			    MEMX_INFO_DATA, 0);
 	if (ret)
 		return ret;
 
@@ -62,9 +61,9 @@
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000003);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000003);
-	nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base);
+		nvkm_wr32(device, 0x10a580, 0x00000003);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000003);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
 	return 0;
 }
 
@@ -73,23 +72,25 @@
 {
 	struct nvkm_memx *memx = *pmemx;
 	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 finish, reply[2];
 
 	/* flush the cache... */
 	memx_out(memx);
 
 	/* release data segment access */
-	finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff;
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* call MEMX process to execute the script, and wait for reply */
 	if (exec) {
-		pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
-			     memx->base, finish);
+		nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
+			      memx->base, finish);
 	}
 
-	nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n",
-		 reply[0], reply[1]);
+	nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+		   reply[0], reply[1]);
 	kfree(memx);
 	return 0;
 }
@@ -97,7 +98,7 @@
 void
 nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
 {
-	nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data);
+	nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
 	memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
 }
 
@@ -105,8 +106,8 @@
 nvkm_memx_wait(struct nvkm_memx *memx,
 		  u32 addr, u32 mask, u32 data, u32 nsec)
 {
-	nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
-				addr, mask, data, nsec);
+	nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
+		   addr, mask, data, nsec);
 	memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -114,7 +115,7 @@
 void
 nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
 {
-	nv_debug(memx->pmu, "    DELAY = %d ns\n", nsec);
+	nvkm_debug(&memx->pmu->subdev, "    DELAY = %d ns\n", nsec);
 	memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -122,16 +123,17 @@
 void
 nvkm_memx_wait_vblank(struct nvkm_memx *memx)
 {
-	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_subdev *subdev = &memx->pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 heads, x, y, px = 0;
 	int i, head_sync;
 
-	if (nv_device(pmu)->chipset < 0xd0) {
-		heads = nv_rd32(pmu, 0x610050);
+	if (device->chipset < 0xd0) {
+		heads = nvkm_rd32(device, 0x610050);
 		for (i = 0; i < 2; i++) {
 			/* Heuristic: sync to head with biggest resolution */
 			if (heads & (2 << (i << 3))) {
-				x = nv_rd32(pmu, 0x610b40 + (0x540 * i));
+				x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
 				y = (x & 0xffff0000) >> 16;
 				x &= 0x0000ffff;
 				if ((x * y) > px) {
@@ -143,11 +145,11 @@
 	}
 
 	if (px == 0) {
-		nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n");
+		nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
 		return;
 	}
 
-	nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync);
+	nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
 	memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -155,18 +157,19 @@
 void
 nvkm_memx_train(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   MEM TRAIN\n");
+	nvkm_debug(&memx->pmu->subdev, "   MEM TRAIN\n");
 	memx_cmd(memx, MEMX_TRAIN, 0, NULL);
 }
 
 int
 nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	u32 reply[2], base, size, i;
 	int ret;
 
-	ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
-			   MEMX_INFO_TRAIN, 0);
+	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+			    MEMX_INFO_TRAIN, 0);
 	if (ret)
 		return ret;
 
@@ -176,10 +179,10 @@
 		return -ENOMEM;
 
 	/* read the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x02000000 | base);
+	nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
 
 	for (i = 0; i < size; i++)
-		res[i] = nv_rd32(pmu, 0x10a1c4);
+		res[i] = nvkm_rd32(device, 0x10a1c4);
 
 	return 0;
 }
@@ -187,14 +190,14 @@
 void
 nvkm_memx_block(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   HOST BLOCKED\n");
+	nvkm_debug(&memx->pmu->subdev, "   HOST BLOCKED\n");
 	memx_cmd(memx, MEMX_ENTER, 0, NULL);
 }
 
 void
 nvkm_memx_unblock(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   HOST UNBLOCKED\n");
+	nvkm_debug(&memx->pmu->subdev, "   HOST UNBLOCKED\n");
 	memx_cmd(memx, MEMX_LEAVE, 0, NULL);
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 799e7c8..f38c88f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,38 +1,20 @@
 #ifndef __NVKM_PMU_PRIV_H__
 #define __NVKM_PMU_PRIV_H__
+#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
 #include <subdev/pmu.h>
 #include <subdev/pmu/fuc/os.h>
 
-#define nvkm_pmu_create(p, e, o, d)                                         \
-	nvkm_pmu_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pmu_destroy(p)                                                 \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_pmu_init(p) ({                                                 \
-	struct nvkm_pmu *_pmu = (p);                                       \
-	_nvkm_pmu_init(nv_object(_pmu));                                   \
-})
-#define nvkm_pmu_fini(p,s) ({                                               \
-	struct nvkm_pmu *_pmu = (p);                                       \
-	_nvkm_pmu_fini(nv_object(_pmu), (s));                              \
-})
+int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_pmu **);
 
-int nvkm_pmu_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
+struct nvkm_pmu_func {
+	void (*reset)(struct nvkm_pmu *);
 
-int _nvkm_pmu_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-#define _nvkm_pmu_dtor _nvkm_subdev_dtor
-int _nvkm_pmu_init(struct nvkm_object *);
-int _nvkm_pmu_fini(struct nvkm_object *, bool);
-void nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable);
-
-struct nvkm_pmu_impl {
-	struct nvkm_oclass base;
 	struct {
 		u32 *data;
 		u32  size;
 	} code;
+
 	struct {
 		u32 *data;
 		u32  size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 5837cf1..135758b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,5 +9,5 @@
 nvkm-y += nvkm/subdev/therm/nv50.o
 nvkm-y += nvkm/subdev/therm/g84.o
 nvkm-y += nvkm/subdev/therm/gt215.o
-nvkm-y += nvkm/subdev/therm/gf110.o
+nvkm-y += nvkm/subdev/therm/gf119.o
 nvkm-y += nvkm/subdev/therm/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index ec327cb..949dc61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -23,21 +23,26 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+int
+nvkm_therm_temp_get(struct nvkm_therm *therm)
+{
+	if (therm->func->temp_get)
+		return therm->func->temp_get(therm);
+	return -ENODEV;
+}
 
 static int
 nvkm_therm_update_trip(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_trip_point *trip = priv->fan->bios.trip,
+	struct nvbios_therm_trip_point *trip = therm->fan->bios.trip,
 				       *cur_trip = NULL,
-				       *last_trip = priv->last_trip;
-	u8  temp = therm->temp_get(therm);
+				       *last_trip = therm->last_trip;
+	u8  temp = therm->func->temp_get(therm);
 	u16 duty, i;
 
 	/* look for the trip point corresponding to the current temperature */
 	cur_trip = NULL;
-	for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+	for (i = 0; i < therm->fan->bios.nr_fan_trip; i++) {
 		if (temp >= trip[i].temp)
 			cur_trip = &trip[i];
 	}
@@ -49,10 +54,10 @@
 
 	if (cur_trip) {
 		duty = cur_trip->fan_duty;
-		priv->last_trip = cur_trip;
+		therm->last_trip = cur_trip;
 	} else {
 		duty = 0;
-		priv->last_trip = NULL;
+		therm->last_trip = NULL;
 	}
 
 	return duty;
@@ -61,51 +66,50 @@
 static int
 nvkm_therm_update_linear(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	u8  linear_min_temp = priv->fan->bios.linear_min_temp;
-	u8  linear_max_temp = priv->fan->bios.linear_max_temp;
-	u8  temp = therm->temp_get(therm);
+	u8  linear_min_temp = therm->fan->bios.linear_min_temp;
+	u8  linear_max_temp = therm->fan->bios.linear_max_temp;
+	u8  temp = therm->func->temp_get(therm);
 	u16 duty;
 
 	/* handle the non-linear part first */
 	if (temp < linear_min_temp)
-		return priv->fan->bios.min_duty;
+		return therm->fan->bios.min_duty;
 	else if (temp > linear_max_temp)
-		return priv->fan->bios.max_duty;
+		return therm->fan->bios.max_duty;
 
 	/* we are in the linear zone */
 	duty  = (temp - linear_min_temp);
-	duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+	duty *= (therm->fan->bios.max_duty - therm->fan->bios.min_duty);
 	duty /= (linear_max_temp - linear_min_temp);
-	duty += priv->fan->bios.min_duty;
+	duty += therm->fan->bios.min_duty;
 	return duty;
 }
 
 static void
 nvkm_therm_update(struct nvkm_therm *therm, int mode)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_timer *tmr = subdev->device->timer;
 	unsigned long flags;
 	bool immd = true;
 	bool poll = true;
 	int duty = -1;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&therm->lock, flags);
 	if (mode < 0)
-		mode = priv->mode;
-	priv->mode = mode;
+		mode = therm->mode;
+	therm->mode = mode;
 
 	switch (mode) {
 	case NVKM_THERM_CTRL_MANUAL:
-		ptimer->alarm_cancel(ptimer, &priv->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->alarm);
 		duty = nvkm_therm_fan_get(therm);
 		if (duty < 0)
 			duty = 100;
 		poll = false;
 		break;
 	case NVKM_THERM_CTRL_AUTO:
-		switch(priv->fan->bios.fan_mode) {
+		switch(therm->fan->bios.fan_mode) {
 		case NVBIOS_THERM_FAN_TRIP:
 			duty = nvkm_therm_update_trip(therm);
 			break;
@@ -113,8 +117,8 @@
 			duty = nvkm_therm_update_linear(therm);
 			break;
 		case NVBIOS_THERM_FAN_OTHER:
-			if (priv->cstate)
-				duty = priv->cstate;
+			if (therm->cstate)
+				duty = therm->cstate;
 			poll = false;
 			break;
 		}
@@ -122,29 +126,29 @@
 		break;
 	case NVKM_THERM_CTRL_NONE:
 	default:
-		ptimer->alarm_cancel(ptimer, &priv->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->alarm);
 		poll = false;
 	}
 
-	if (list_empty(&priv->alarm.head) && poll)
-		ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	if (list_empty(&therm->alarm.head) && poll)
+		nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
+	spin_unlock_irqrestore(&therm->lock, flags);
 
 	if (duty >= 0) {
-		nv_debug(therm, "FAN target request: %d%%\n", duty);
+		nvkm_debug(subdev, "FAN target request: %d%%\n", duty);
 		nvkm_therm_fan_set(therm, immd, duty);
 	}
 }
 
 int
-nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
+nvkm_therm_cstate(struct nvkm_therm *therm, int fan, int dir)
 {
-	struct nvkm_therm_priv *priv = (void *)ptherm;
-	if (!dir || (dir < 0 && fan < priv->cstate) ||
-		    (dir > 0 && fan > priv->cstate)) {
-		nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
-		priv->cstate = fan;
-		nvkm_therm_update(ptherm, -1);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	if (!dir || (dir < 0 && fan < therm->cstate) ||
+		    (dir > 0 && fan > therm->cstate)) {
+		nvkm_debug(subdev, "default fan speed -> %d%%\n", fan);
+		therm->cstate = fan;
+		nvkm_therm_update(therm, -1);
 	}
 	return 0;
 }
@@ -152,16 +156,16 @@
 static void
 nvkm_therm_alarm(struct nvkm_alarm *alarm)
 {
-	struct nvkm_therm_priv *priv =
-	       container_of(alarm, struct nvkm_therm_priv, alarm);
-	nvkm_therm_update(&priv->base, -1);
+	struct nvkm_therm *therm =
+	       container_of(alarm, struct nvkm_therm, alarm);
+	nvkm_therm_update(therm, -1);
 }
 
 int
 nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_device *device = nv_device(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	static const char *name[] = {
 		"disabled",
 		"manual",
@@ -171,51 +175,49 @@
 	/* The default PPWR ucode on fermi interferes with fan management */
 	if ((mode >= ARRAY_SIZE(name)) ||
 	    (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
-	     !nvkm_subdev(device, NVDEV_SUBDEV_PMU)))
+	     !device->pmu))
 		return -EINVAL;
 
 	/* do not allow automatic fan management if the thermal sensor is
 	 * not available */
-	if (mode == NVKM_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
+	if (mode == NVKM_THERM_CTRL_AUTO &&
+	    therm->func->temp_get(therm) < 0)
 		return -EINVAL;
 
-	if (priv->mode == mode)
+	if (therm->mode == mode)
 		return 0;
 
-	nv_info(therm, "fan management: %s\n", name[mode]);
+	nvkm_debug(subdev, "fan management: %s\n", name[mode]);
 	nvkm_therm_update(therm, mode);
 	return 0;
 }
 
 int
-nvkm_therm_attr_get(struct nvkm_therm *therm,
-		       enum nvkm_therm_attr_type type)
+nvkm_therm_attr_get(struct nvkm_therm *therm, enum nvkm_therm_attr_type type)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
 	switch (type) {
 	case NVKM_THERM_ATTR_FAN_MIN_DUTY:
-		return priv->fan->bios.min_duty;
+		return therm->fan->bios.min_duty;
 	case NVKM_THERM_ATTR_FAN_MAX_DUTY:
-		return priv->fan->bios.max_duty;
+		return therm->fan->bios.max_duty;
 	case NVKM_THERM_ATTR_FAN_MODE:
-		return priv->mode;
+		return therm->mode;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST:
-		return priv->bios_sensor.thrs_fan_boost.temp;
+		return therm->bios_sensor.thrs_fan_boost.temp;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
-		return priv->bios_sensor.thrs_fan_boost.hysteresis;
+		return therm->bios_sensor.thrs_fan_boost.hysteresis;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK:
-		return priv->bios_sensor.thrs_down_clock.temp;
+		return therm->bios_sensor.thrs_down_clock.temp;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
-		return priv->bios_sensor.thrs_down_clock.hysteresis;
+		return therm->bios_sensor.thrs_down_clock.hysteresis;
 	case NVKM_THERM_ATTR_THRS_CRITICAL:
-		return priv->bios_sensor.thrs_critical.temp;
+		return therm->bios_sensor.thrs_critical.temp;
 	case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
-		return priv->bios_sensor.thrs_critical.hysteresis;
+		return therm->bios_sensor.thrs_critical.hysteresis;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN:
-		return priv->bios_sensor.thrs_shutdown.temp;
+		return therm->bios_sensor.thrs_shutdown.temp;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
-		return priv->bios_sensor.thrs_shutdown.hysteresis;
+		return therm->bios_sensor.thrs_shutdown.hysteresis;
 	}
 
 	return -EINVAL;
@@ -225,143 +227,156 @@
 nvkm_therm_attr_set(struct nvkm_therm *therm,
 		    enum nvkm_therm_attr_type type, int value)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
 	switch (type) {
 	case NVKM_THERM_ATTR_FAN_MIN_DUTY:
 		if (value < 0)
 			value = 0;
-		if (value > priv->fan->bios.max_duty)
-			value = priv->fan->bios.max_duty;
-		priv->fan->bios.min_duty = value;
+		if (value > therm->fan->bios.max_duty)
+			value = therm->fan->bios.max_duty;
+		therm->fan->bios.min_duty = value;
 		return 0;
 	case NVKM_THERM_ATTR_FAN_MAX_DUTY:
 		if (value < 0)
 			value = 0;
-		if (value < priv->fan->bios.min_duty)
-			value = priv->fan->bios.min_duty;
-		priv->fan->bios.max_duty = value;
+		if (value < therm->fan->bios.min_duty)
+			value = therm->fan->bios.min_duty;
+		therm->fan->bios.max_duty = value;
 		return 0;
 	case NVKM_THERM_ATTR_FAN_MODE:
 		return nvkm_therm_fan_mode(therm, value);
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST:
-		priv->bios_sensor.thrs_fan_boost.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_fan_boost.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
-		priv->bios_sensor.thrs_fan_boost.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_fan_boost.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK:
-		priv->bios_sensor.thrs_down_clock.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_down_clock.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
-		priv->bios_sensor.thrs_down_clock.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_down_clock.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_CRITICAL:
-		priv->bios_sensor.thrs_critical.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_critical.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
-		priv->bios_sensor.thrs_critical.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_critical.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN:
-		priv->bios_sensor.thrs_shutdown.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_shutdown.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
-		priv->bios_sensor.thrs_shutdown.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_shutdown.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	}
 
 	return -EINVAL;
 }
 
-int
-_nvkm_therm_init(struct nvkm_object *object)
+static void
+nvkm_therm_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_therm *therm = (void *)object;
-	struct nvkm_therm_priv *priv = (void *)therm;
-	int ret;
-
-	ret = nvkm_subdev_init(&therm->base);
-	if (ret)
-		return ret;
-
-	if (priv->suspend >= 0) {
-		/* restore the pwm value only when on manual or auto mode */
-		if (priv->suspend > 0)
-			nvkm_therm_fan_set(therm, true, priv->fan->percent);
-
-		nvkm_therm_fan_mode(therm, priv->suspend);
-	}
-	nvkm_therm_sensor_init(therm);
-	nvkm_therm_fan_init(therm);
-	return 0;
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+	if (therm->func->intr)
+		therm->func->intr(therm);
 }
 
-int
-_nvkm_therm_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_therm *therm = (void *)object;
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+
+	if (therm->func->fini)
+		therm->func->fini(therm);
 
 	nvkm_therm_fan_fini(therm, suspend);
 	nvkm_therm_sensor_fini(therm, suspend);
+
 	if (suspend) {
-		priv->suspend = priv->mode;
-		priv->mode = NVKM_THERM_CTRL_NONE;
+		therm->suspend = therm->mode;
+		therm->mode = NVKM_THERM_CTRL_NONE;
 	}
 
-	return nvkm_subdev_fini(&therm->base, suspend);
-}
-
-int
-nvkm_therm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	struct nvkm_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PTHERM",
-				  "therm", length, pobject);
-	priv = *pobject;
-	if (ret)
-		return ret;
-
-	nvkm_alarm_init(&priv->alarm, nvkm_therm_alarm);
-	spin_lock_init(&priv->lock);
-	spin_lock_init(&priv->sensor.alarm_program_lock);
-
-	priv->base.fan_get = nvkm_therm_fan_user_get;
-	priv->base.fan_set = nvkm_therm_fan_user_set;
-	priv->base.fan_sense = nvkm_therm_fan_sense;
-	priv->base.attr_get = nvkm_therm_attr_get;
-	priv->base.attr_set = nvkm_therm_attr_set;
-	priv->mode = priv->suspend = -1; /* undefined */
 	return 0;
 }
 
-int
-nvkm_therm_preinit(struct nvkm_therm *therm)
+static int
+nvkm_therm_oneinit(struct nvkm_subdev *subdev)
 {
+	struct nvkm_therm *therm = nvkm_therm(subdev);
 	nvkm_therm_sensor_ctor(therm);
 	nvkm_therm_ic_ctor(therm);
 	nvkm_therm_fan_ctor(therm);
-
 	nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
 	nvkm_therm_sensor_preinit(therm);
 	return 0;
 }
 
-void
-_nvkm_therm_dtor(struct nvkm_object *object)
+static int
+nvkm_therm_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_therm_priv *priv = (void *)object;
-	kfree(priv->fan);
-	nvkm_subdev_destroy(&priv->base.base);
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+
+	therm->func->init(therm);
+
+	if (therm->suspend >= 0) {
+		/* restore the pwm value only when on manual or auto mode */
+		if (therm->suspend > 0)
+			nvkm_therm_fan_set(therm, true, therm->fan->percent);
+
+		nvkm_therm_fan_mode(therm, therm->suspend);
+	}
+
+	nvkm_therm_sensor_init(therm);
+	nvkm_therm_fan_init(therm);
+	return 0;
+}
+
+static void *
+nvkm_therm_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+	kfree(therm->fan);
+	return therm;
+}
+
+static const struct nvkm_subdev_func
+nvkm_therm = {
+	.dtor = nvkm_therm_dtor,
+	.oneinit = nvkm_therm_oneinit,
+	.init = nvkm_therm_init,
+	.fini = nvkm_therm_fini,
+	.intr = nvkm_therm_intr,
+};
+
+int
+nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+		int index, struct nvkm_therm **ptherm)
+{
+	struct nvkm_therm *therm;
+
+	if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_therm, device, index, 0, &therm->subdev);
+	therm->func = func;
+
+	nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
+	spin_lock_init(&therm->lock);
+	spin_lock_init(&therm->sensor.alarm_program_lock);
+
+	therm->fan_get = nvkm_therm_fan_user_get;
+	therm->fan_set = nvkm_therm_fan_user_set;
+	therm->attr_get = nvkm_therm_attr_get;
+	therm->attr_set = nvkm_therm_attr_set;
+	therm->mode = therm->suspend = -1; /* undefined */
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 434fa74..91198d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -32,8 +32,8 @@
 nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
 {
 	struct nvkm_therm *therm = fan->parent;
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(priv);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_timer *tmr = subdev->device->timer;
 	unsigned long flags;
 	int ret = 0;
 	int duty;
@@ -45,7 +45,7 @@
 	target = max_t(u8, target, fan->bios.min_duty);
 	target = min_t(u8, target, fan->bios.max_duty);
 	if (fan->percent != target) {
-		nv_debug(therm, "FAN target: %d\n", target);
+		nvkm_debug(subdev, "FAN target: %d\n", target);
 		fan->percent = target;
 	}
 
@@ -70,7 +70,7 @@
 		duty = target;
 	}
 
-	nv_debug(therm, "FAN update: %d\n", duty);
+	nvkm_debug(subdev, "FAN update: %d\n", duty);
 	ret = fan->set(therm, duty);
 	if (ret) {
 		spin_unlock_irqrestore(&fan->lock, flags);
@@ -95,7 +95,7 @@
 		else
 			delay = bump_period;
 
-		ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
+		nvkm_timer_alarm(tmr, delay * 1000 * 1000, &fan->alarm);
 	}
 
 	return ret;
@@ -111,48 +111,51 @@
 int
 nvkm_therm_fan_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return priv->fan->get(therm);
+	return therm->fan->get(therm);
 }
 
 int
 nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return nvkm_fan_update(priv->fan, immediate, percent);
+	return nvkm_fan_update(therm->fan, immediate, percent);
 }
 
 int
 nvkm_therm_fan_sense(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_gpio *gpio = device->gpio;
 	u32 cycles, cur, prev;
 	u64 start, end, tach;
 
-	if (priv->fan->tach.func == DCB_GPIO_UNUSED)
+	if (therm->func->fan_sense)
+		return therm->func->fan_sense(therm);
+
+	if (therm->fan->tach.func == DCB_GPIO_UNUSED)
 		return -ENODEV;
 
 	/* Time a complete rotation and extrapolate to RPM:
 	 * When the fan spins, it changes the value of GPIO FAN_SENSE.
 	 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
 	 */
-	start = ptimer->read(ptimer);
-	prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+	start = nvkm_timer_read(tmr);
+	prev = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+				      therm->fan->tach.line);
 	cycles = 0;
 	do {
 		usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
 
-		cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+		cur = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+					     therm->fan->tach.line);
 		if (prev != cur) {
 			if (!start)
-				start = ptimer->read(ptimer);
+				start = nvkm_timer_read(tmr);
 			cycles++;
 			prev = cur;
 		}
-	} while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
-	end = ptimer->read(ptimer);
+	} while (cycles < 5 && nvkm_timer_read(tmr) - start < 250000000);
+	end = nvkm_timer_read(tmr);
 
 	if (cycles == 5) {
 		tach = (u64)60000000000ULL;
@@ -171,9 +174,7 @@
 int
 nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
-	if (priv->mode != NVKM_THERM_CTRL_MANUAL)
+	if (therm->mode != NVKM_THERM_CTRL_MANUAL)
 		return -EINVAL;
 
 	return nvkm_therm_fan_set(therm, true, percent);
@@ -182,29 +183,25 @@
 static void
 nvkm_therm_fan_set_defaults(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
-	priv->fan->bios.pwm_freq = 0;
-	priv->fan->bios.min_duty = 0;
-	priv->fan->bios.max_duty = 100;
-	priv->fan->bios.bump_period = 500;
-	priv->fan->bios.slow_down_period = 2000;
-	priv->fan->bios.linear_min_temp = 40;
-	priv->fan->bios.linear_max_temp = 85;
+	therm->fan->bios.pwm_freq = 0;
+	therm->fan->bios.min_duty = 0;
+	therm->fan->bios.max_duty = 100;
+	therm->fan->bios.bump_period = 500;
+	therm->fan->bios.slow_down_period = 2000;
+	therm->fan->bios.linear_min_temp = 40;
+	therm->fan->bios.linear_max_temp = 85;
 }
 
 static void
 nvkm_therm_fan_safety_checks(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	if (therm->fan->bios.min_duty > 100)
+		therm->fan->bios.min_duty = 100;
+	if (therm->fan->bios.max_duty > 100)
+		therm->fan->bios.max_duty = 100;
 
-	if (priv->fan->bios.min_duty > 100)
-		priv->fan->bios.min_duty = 100;
-	if (priv->fan->bios.max_duty > 100)
-		priv->fan->bios.max_duty = 100;
-
-	if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
-		priv->fan->bios.min_duty = priv->fan->bios.max_duty;
+	if (therm->fan->bios.min_duty > therm->fan->bios.max_duty)
+		therm->fan->bios.min_duty = therm->fan->bios.max_duty;
 }
 
 int
@@ -216,29 +213,28 @@
 int
 nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	if (suspend)
-		ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->fan->alarm);
 	return 0;
 }
 
 int
 nvkm_therm_fan_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
-	struct nvkm_bios *bios = nvkm_bios(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_gpio *gpio = device->gpio;
+	struct nvkm_bios *bios = device->bios;
 	struct dcb_gpio_func func;
 	int ret;
 
 	/* attempt to locate a drivable fan, and determine control method */
-	ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+	ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
 	if (ret == 0) {
 		/* FIXME: is this really the place to perform such checks ? */
 		if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
-			nv_debug(therm, "GPIO_FAN is in input mode\n");
+			nvkm_debug(subdev, "GPIO_FAN is in input mode\n");
 			ret = -EINVAL;
 		} else {
 			ret = nvkm_fanpwm_create(therm, &func);
@@ -254,28 +250,29 @@
 			return ret;
 	}
 
-	nv_info(therm, "FAN control: %s\n", priv->fan->type);
+	nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type);
 
 	/* read the current speed, it is useful when resuming */
-	priv->fan->percent = nvkm_therm_fan_get(therm);
+	therm->fan->percent = nvkm_therm_fan_get(therm);
 
 	/* attempt to detect a tachometer connection */
-	ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+	ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff,
+			     &therm->fan->tach);
 	if (ret)
-		priv->fan->tach.func = DCB_GPIO_UNUSED;
+		therm->fan->tach.func = DCB_GPIO_UNUSED;
 
 	/* initialise fan bump/slow update handling */
-	priv->fan->parent = therm;
-	nvkm_alarm_init(&priv->fan->alarm, nvkm_fan_alarm);
-	spin_lock_init(&priv->fan->lock);
+	therm->fan->parent = therm;
+	nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm);
+	spin_lock_init(&therm->fan->lock);
 
 	/* other random init... */
 	nvkm_therm_fan_set_defaults(therm);
-	nvbios_perf_fan_parse(bios, &priv->fan->perf);
-	if (!nvbios_fan_parse(bios, &priv->fan->bios)) {
-		nv_debug(therm, "parsing the fan table failed\n");
-		if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
-			nv_error(therm, "parsing both fan tables failed\n");
+	nvbios_perf_fan_parse(bios, &therm->fan->perf);
+	if (!nvbios_fan_parse(bios, &therm->fan->bios)) {
+		nvkm_debug(subdev, "parsing the fan table failed\n");
+		if (nvbios_therm_fan_parse(bios, &therm->fan->bios))
+			nvkm_error(subdev, "parsing both fan tables failed\n");
 	}
 	nvkm_therm_fan_safety_checks(therm);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
index 534e597..8ae300f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
@@ -38,11 +38,10 @@
 int
 nvkm_fannil_create(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
 	struct nvkm_fan *priv;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = priv;
+	therm->fan = priv;
 	if (!priv)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
index bde5cea..340f37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
@@ -24,13 +24,12 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/fan.h>
 #include <subdev/gpio.h>
 
-struct nvkm_fanpwm_priv {
+struct nvkm_fanpwm {
 	struct nvkm_fan base;
 	struct dcb_gpio_func func;
 };
@@ -38,76 +37,74 @@
 static int
 nvkm_fanpwm_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
-	int card_type = nv_device(therm)->card_type;
+	struct nvkm_fanpwm *fan = (void *)therm->fan;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_gpio *gpio = device->gpio;
+	int card_type = device->card_type;
 	u32 divs, duty;
 	int ret;
 
-	ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+	ret = therm->func->pwm_get(therm, fan->func.line, &divs, &duty);
 	if (ret == 0 && divs) {
 		divs = max(divs, duty);
-		if (card_type <= NV_40 || (priv->func.log[0] & 1))
+		if (card_type <= NV_40 || (fan->func.log[0] & 1))
 			duty = divs - duty;
 		return (duty * 100) / divs;
 	}
 
-	return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+	return nvkm_gpio_get(gpio, 0, fan->func.func, fan->func.line) * 100;
 }
 
 static int
 nvkm_fanpwm_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
-	int card_type = nv_device(therm)->card_type;
+	struct nvkm_fanpwm *fan = (void *)therm->fan;
+	int card_type = therm->subdev.device->card_type;
 	u32 divs, duty;
 	int ret;
 
-	divs = priv->base.perf.pwm_divisor;
-	if (priv->base.bios.pwm_freq) {
+	divs = fan->base.perf.pwm_divisor;
+	if (fan->base.bios.pwm_freq) {
 		divs = 1;
-		if (therm->pwm_clock)
-			divs = therm->pwm_clock(therm, priv->func.line);
-		divs /= priv->base.bios.pwm_freq;
+		if (therm->func->pwm_clock)
+			divs = therm->func->pwm_clock(therm, fan->func.line);
+		divs /= fan->base.bios.pwm_freq;
 	}
 
 	duty = ((divs * percent) + 99) / 100;
-	if (card_type <= NV_40 || (priv->func.log[0] & 1))
+	if (card_type <= NV_40 || (fan->func.log[0] & 1))
 		duty = divs - duty;
 
-	ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+	ret = therm->func->pwm_set(therm, fan->func.line, divs, duty);
 	if (ret == 0)
-		ret = therm->pwm_ctrl(therm, priv->func.line, true);
+		ret = therm->func->pwm_ctrl(therm, fan->func.line, true);
 	return ret;
 }
 
 int
 nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
 {
-	struct nvkm_device *device = nv_device(therm);
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
-	struct nvkm_fanpwm_priv *priv;
-	struct nvbios_therm_fan fan;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_fanpwm *fan;
+	struct nvbios_therm_fan info = {};
 	u32 divs, duty;
 
-	nvbios_fan_parse(bios, &fan);
+	nvbios_fan_parse(bios, &info);
 
 	if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
-	    !therm->pwm_ctrl || fan.type == NVBIOS_THERM_FAN_TOGGLE ||
-	     therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+	    !therm->func->pwm_ctrl || info.type == NVBIOS_THERM_FAN_TOGGLE ||
+	     therm->func->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
 		return -ENODEV;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = &priv->base;
-	if (!priv)
+	fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+	therm->fan = &fan->base;
+	if (!fan)
 		return -ENOMEM;
 
-	priv->base.type = "PWM";
-	priv->base.get = nvkm_fanpwm_get;
-	priv->base.set = nvkm_fanpwm_set;
-	priv->func = *func;
+	fan->base.type = "PWM";
+	fan->base.get = nvkm_fanpwm_get;
+	fan->base.set = nvkm_fanpwm_set;
+	fan->func = *func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 4ce041e..59701b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -26,7 +26,7 @@
 #include <subdev/gpio.h>
 #include <subdev/timer.h>
 
-struct nvkm_fantog_priv {
+struct nvkm_fantog {
 	struct nvkm_fan base;
 	struct nvkm_alarm alarm;
 	spinlock_t lock;
@@ -36,83 +36,81 @@
 };
 
 static void
-nvkm_fantog_update(struct nvkm_fantog_priv *priv, int percent)
+nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)priv->base.parent;
-	struct nvkm_timer *ptimer = nvkm_timer(tpriv);
-	struct nvkm_gpio *gpio = nvkm_gpio(tpriv);
+	struct nvkm_therm *therm = fan->base.parent;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_gpio *gpio = device->gpio;
 	unsigned long flags;
 	int duty;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&fan->lock, flags);
 	if (percent < 0)
-		percent = priv->percent;
-	priv->percent = percent;
+		percent = fan->percent;
+	fan->percent = percent;
 
-	duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
-	gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+	duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
 
-	if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
-		u64 next_change = (percent * priv->period_us) / 100;
+	if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+		u64 next_change = (percent * fan->period_us) / 100;
 		if (!duty)
-			next_change = priv->period_us - next_change;
-		ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+			next_change = fan->period_us - next_change;
+		nvkm_timer_alarm(tmr, next_change * 1000, &fan->alarm);
 	}
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock_irqrestore(&fan->lock, flags);
 }
 
 static void
 nvkm_fantog_alarm(struct nvkm_alarm *alarm)
 {
-	struct nvkm_fantog_priv *priv =
-	       container_of(alarm, struct nvkm_fantog_priv, alarm);
-	nvkm_fantog_update(priv, -1);
+	struct nvkm_fantog *fan =
+	       container_of(alarm, struct nvkm_fantog, alarm);
+	nvkm_fantog_update(fan, -1);
 }
 
 static int
 nvkm_fantog_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
-	return priv->percent;
+	struct nvkm_fantog *fan = (void *)therm->fan;
+	return fan->percent;
 }
 
 static int
 nvkm_fantog_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
-	if (therm->pwm_ctrl)
-		therm->pwm_ctrl(therm, priv->func.line, false);
-	nvkm_fantog_update(priv, percent);
+	struct nvkm_fantog *fan = (void *)therm->fan;
+	if (therm->func->pwm_ctrl)
+		therm->func->pwm_ctrl(therm, fan->func.line, false);
+	nvkm_fantog_update(fan, percent);
 	return 0;
 }
 
 int
 nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv;
+	struct nvkm_fantog *fan;
 	int ret;
 
-	if (therm->pwm_ctrl) {
-		ret = therm->pwm_ctrl(therm, func->line, false);
+	if (therm->func->pwm_ctrl) {
+		ret = therm->func->pwm_ctrl(therm, func->line, false);
 		if (ret)
 			return ret;
 	}
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = &priv->base;
-	if (!priv)
+	fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+	therm->fan = &fan->base;
+	if (!fan)
 		return -ENOMEM;
 
-	priv->base.type = "toggle";
-	priv->base.get = nvkm_fantog_get;
-	priv->base.set = nvkm_fantog_set;
-	nvkm_alarm_init(&priv->alarm, nvkm_fantog_alarm);
-	priv->period_us = 100000; /* 10Hz */
-	priv->percent = 100;
-	priv->func = *func;
-	spin_lock_init(&priv->lock);
+	fan->base.type = "toggle";
+	fan->base.get = nvkm_fantog_get;
+	fan->base.set = nvkm_fantog_set;
+	nvkm_alarm_init(&fan->alarm, nvkm_fantog_alarm);
+	fan->period_us = 100000; /* 10Hz */
+	fan->percent = 100;
+	fan->func = *func;
+	spin_lock_init(&fan->lock);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 85b5d0c..86e8193 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -26,17 +26,13 @@
 
 #include <subdev/fuse.h>
 
-struct g84_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 int
 g84_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_fuse *fuse = nvkm_fuse(therm);
+	struct nvkm_device *device = therm->subdev.device;
 
-	if (nv_ro32(fuse, 0x1a8) == 1)
-		return nv_rd32(therm, 0x20400);
+	if (nvkm_fuse_read(device->fuse, 0x1a8) == 1)
+		return nvkm_rd32(device, 0x20400);
 	else
 		return -ENODEV;
 }
@@ -44,12 +40,12 @@
 void
 g84_sensor_setup(struct nvkm_therm *therm)
 {
-	struct nvkm_fuse *fuse = nvkm_fuse(therm);
+	struct nvkm_device *device = therm->subdev.device;
 
 	/* enable temperature reading for cards with insane defaults */
-	if (nv_ro32(fuse, 0x1a8) == 1) {
-		nv_mask(therm, 0x20008, 0x80008000, 0x80000000);
-		nv_mask(therm, 0x2000c, 0x80000003, 0x00000000);
+	if (nvkm_fuse_read(device->fuse, 0x1a8) == 1) {
+		nvkm_mask(device, 0x20008, 0x80008000, 0x80000000);
+		nvkm_mask(device, 0x2000c, 0x80000003, 0x00000000);
 		mdelay(20); /* wait for the temperature to stabilize */
 	}
 }
@@ -57,36 +53,40 @@
 static void
 g84_therm_program_alarms(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
 	/* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
-	nv_wr32(therm, 0x20000, 0x000003ff);
+	nvkm_wr32(device, 0x20000, 0x000003ff);
 
 	/* shutdown: The computer should be shutdown when reached */
-	nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
-	nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+	nvkm_wr32(device, 0x20484, sensor->thrs_shutdown.hysteresis);
+	nvkm_wr32(device, 0x20480, sensor->thrs_shutdown.temp);
 
 	/* THRS_1 : fan boost*/
-	nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+	nvkm_wr32(device, 0x204c4, sensor->thrs_fan_boost.temp);
 
 	/* THRS_2 : critical */
-	nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+	nvkm_wr32(device, 0x204c0, sensor->thrs_critical.temp);
 
 	/* THRS_4 : down clock */
-	nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	nvkm_wr32(device, 0x20414, sensor->thrs_down_clock.temp);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
-	nv_debug(therm,
-		 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-		 sensor->thrs_down_clock.temp,
-		 sensor->thrs_down_clock.hysteresis,
-		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+	nvkm_debug(subdev,
+		   "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		   sensor->thrs_fan_boost.temp,
+		   sensor->thrs_fan_boost.hysteresis,
+		   sensor->thrs_down_clock.temp,
+		   sensor->thrs_down_clock.hysteresis,
+		   sensor->thrs_critical.temp,
+		   sensor->thrs_critical.hysteresis,
+		   sensor->thrs_shutdown.temp,
+		   sensor->thrs_shutdown.hysteresis);
 
 }
 
@@ -97,24 +97,25 @@
 				   const struct nvbios_therm_threshold *thrs,
 				   enum nvkm_therm_thrs thrs_name)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	enum nvkm_therm_thrs_direction direction;
 	enum nvkm_therm_thrs_state prev_state, new_state;
 	int temp, cur;
 
 	prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
-	temp = nv_rd32(therm, thrs_reg);
+	temp = nvkm_rd32(device, thrs_reg);
 
 	/* program the next threshold */
 	if (temp == thrs->temp) {
-		nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+		nvkm_wr32(device, thrs_reg, thrs->temp - thrs->hysteresis);
 		new_state = NVKM_THERM_THRS_HIGHER;
 	} else {
-		nv_wr32(therm, thrs_reg, thrs->temp);
+		nvkm_wr32(device, thrs_reg, thrs->temp);
 		new_state = NVKM_THERM_THRS_LOWER;
 	}
 
 	/* fix the state (in case someone reprogrammed the alarms) */
-	cur = therm->temp_get(therm);
+	cur = therm->func->temp_get(therm);
 	if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
 		new_state = NVKM_THERM_THRS_HIGHER;
 	else if (new_state == NVKM_THERM_THRS_HIGHER &&
@@ -135,17 +136,17 @@
 }
 
 static void
-g84_therm_intr(struct nvkm_subdev *subdev)
+g84_therm_intr(struct nvkm_therm *therm)
 {
-	struct nvkm_therm *therm = nvkm_therm(subdev);
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	unsigned long flags;
 	uint32_t intr;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
-	intr = nv_rd32(therm, 0x20100) & 0x3ff;
+	intr = nvkm_rd32(device, 0x20100) & 0x3ff;
 
 	/* THRS_4: downclock */
 	if (intr & 0x002) {
@@ -180,87 +181,66 @@
 	}
 
 	if (intr)
-		nv_error(therm, "unhandled intr 0x%08x\n", intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
 
 	/* ACK everything */
-	nv_wr32(therm, 0x20100, 0xffffffff);
-	nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+	nvkm_wr32(device, 0x20100, 0xffffffff);
+	nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
 
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 }
 
-static int
-g84_therm_init(struct nvkm_object *object)
+void
+g84_therm_fini(struct nvkm_therm *therm)
 {
-	struct g84_therm_priv *priv = (void *)object;
-	int ret;
+	struct nvkm_device *device = therm->subdev.device;
 
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
-
-	g84_sensor_setup(&priv->base.base);
-	return 0;
-}
-
-static int
-g84_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g84_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.sensor.program_alarms = g84_therm_program_alarms;
-	nv_subdev(priv)->intr = g84_therm_intr;
-
-	/* init the thresholds */
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_SHUTDOWN,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_FANBOOST,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_CRITICAL,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_DOWNCLOCK,
-					      NVKM_THERM_THRS_LOWER);
-
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-int
-g84_therm_fini(struct nvkm_object *object, bool suspend)
-{
 	/* Disable PTherm IRQs */
-	nv_wr32(object, 0x20000, 0x00000000);
+	nvkm_wr32(device, 0x20000, 0x00000000);
 
 	/* ACK all PTherm IRQs */
-	nv_wr32(object, 0x20100, 0xffffffff);
-	nv_wr32(object, 0x1100, 0x10000); /* PBUS */
-
-	return _nvkm_therm_fini(object, suspend);
+	nvkm_wr32(device, 0x20100, 0xffffffff);
+	nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
 }
 
-struct nvkm_oclass
-g84_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = g84_therm_init,
-		.fini = g84_therm_fini,
-	},
+static void
+g84_therm_init(struct nvkm_therm *therm)
+{
+	g84_sensor_setup(therm);
+}
+
+static const struct nvkm_therm_func
+g84_therm = {
+	.init = g84_therm_init,
+	.fini = g84_therm_fini,
+	.intr = g84_therm_intr,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.program_alarms = g84_therm_program_alarms,
 };
+
+int
+g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
+{
+	struct nvkm_therm *therm;
+	int ret;
+
+	ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
+	*ptherm = therm;
+	if (ret)
+		return ret;
+
+	/* init the thresholds */
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_SHUTDOWN,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_FANBOOST,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_CRITICAL,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_DOWNCLOCK,
+						     NVKM_THERM_THRS_LOWER);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
deleted file mode 100644
index 46b7e65..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/device.h>
-
-struct gf110_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
-static int
-pwm_info(struct nvkm_therm *therm, int line)
-{
-	u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
-
-	switch (gpio & 0x000000c0) {
-	case 0x00000000: /* normal mode, possibly pwm forced off by us */
-	case 0x00000040: /* nvio special */
-		switch (gpio & 0x0000001f) {
-		case 0x00: return 2;
-		case 0x19: return 1;
-		case 0x1c: return 0;
-		case 0x1e: return 2;
-		default:
-			break;
-		}
-	default:
-		break;
-	}
-
-	nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
-	return -ENODEV;
-}
-
-static int
-gf110_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
-{
-	u32 data = enable ? 0x00000040 : 0x00000000;
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2)
-		nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
-	/* nothing to do for indx == 2, it seems hardwired to PTHERM */
-	return 0;
-}
-
-static int
-gf110_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2) {
-		if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
-			*divs = nv_rd32(therm, 0x00e114 + (indx * 8));
-			*duty = nv_rd32(therm, 0x00e118 + (indx * 8));
-			return 0;
-		}
-	} else if (indx == 2) {
-		*divs = nv_rd32(therm, 0x0200d8) & 0x1fff;
-		*duty = nv_rd32(therm, 0x0200dc) & 0x1fff;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int
-gf110_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2) {
-		nv_wr32(therm, 0x00e114 + (indx * 8), divs);
-		nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
-	} else if (indx == 2) {
-		nv_mask(therm, 0x0200d8, 0x1fff, divs); /* keep the high bits */
-		nv_wr32(therm, 0x0200dc, duty | 0x40000000);
-	}
-	return 0;
-}
-
-static int
-gf110_fan_pwm_clock(struct nvkm_therm *therm, int line)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return 0;
-	else if (indx < 2)
-		return (nv_device(therm)->crystal * 1000) / 20;
-	else
-		return nv_device(therm)->crystal * 1000 / 10;
-}
-
-int
-gf110_therm_init(struct nvkm_object *object)
-{
-	struct gf110_therm_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
-
-	/* enable fan tach, count revolutions per-second */
-	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
-	if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
-		nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
-		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
-		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
-	}
-	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
-	return 0;
-}
-
-static int
-gf110_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf110_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	g84_sensor_setup(&priv->base.base);
-
-	priv->base.base.pwm_ctrl = gf110_fan_pwm_ctrl;
-	priv->base.base.pwm_get = gf110_fan_pwm_get;
-	priv->base.base.pwm_set = gf110_fan_pwm_set;
-	priv->base.base.pwm_clock = gf110_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gf110_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0xd0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf110_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gf110_therm_init,
-		.fini = g84_therm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
new file mode 100644
index 0000000..06dcfd6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static int
+pwm_info(struct nvkm_therm *therm, int line)
+{
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 gpio = nvkm_rd32(device, 0x00d610 + (line * 0x04));
+
+	switch (gpio & 0x000000c0) {
+	case 0x00000000: /* normal mode, possibly pwm forced off by us */
+	case 0x00000040: /* nvio special */
+		switch (gpio & 0x0000001f) {
+		case 0x00: return 2;
+		case 0x19: return 1;
+		case 0x1c: return 0;
+		case 0x1e: return 2;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	nvkm_error(subdev, "GPIO %d unknown PWM: %08x\n", line, gpio);
+	return -ENODEV;
+}
+
+static int
+gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	u32 data = enable ? 0x00000040 : 0x00000000;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2)
+		nvkm_mask(device, 0x00d610 + (line * 0x04), 0x000000c0, data);
+	/* nothing to do for indx == 2, it seems hardwired to PTHERM */
+	return 0;
+}
+
+static int
+gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2) {
+		if (nvkm_rd32(device, 0x00d610 + (line * 0x04)) & 0x00000040) {
+			*divs = nvkm_rd32(device, 0x00e114 + (indx * 8));
+			*duty = nvkm_rd32(device, 0x00e118 + (indx * 8));
+			return 0;
+		}
+	} else if (indx == 2) {
+		*divs = nvkm_rd32(device, 0x0200d8) & 0x1fff;
+		*duty = nvkm_rd32(device, 0x0200dc) & 0x1fff;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2) {
+		nvkm_wr32(device, 0x00e114 + (indx * 8), divs);
+		nvkm_wr32(device, 0x00e118 + (indx * 8), duty | 0x80000000);
+	} else if (indx == 2) {
+		nvkm_mask(device, 0x0200d8, 0x1fff, divs); /* keep the high bits */
+		nvkm_wr32(device, 0x0200dc, duty | 0x40000000);
+	}
+	return 0;
+}
+
+static int
+gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return 0;
+	else if (indx < 2)
+		return (device->crystal * 1000) / 20;
+	else
+		return device->crystal * 1000 / 10;
+}
+
+void
+gf119_therm_init(struct nvkm_therm *therm)
+{
+	struct nvkm_device *device = therm->subdev.device;
+
+	g84_sensor_setup(therm);
+
+	/* enable fan tach, count revolutions per-second */
+	nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
+	if (therm->fan->tach.func != DCB_GPIO_UNUSED) {
+		nvkm_mask(device, 0x00d79c, 0x000000ff, therm->fan->tach.line);
+		nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+		nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
+	}
+	nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
+}
+
+static const struct nvkm_therm_func
+gf119_therm = {
+	.init = gf119_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = gf119_fan_pwm_ctrl,
+	.pwm_get = gf119_fan_pwm_get,
+	.pwm_set = gf119_fan_pwm_set,
+	.pwm_clock = gf119_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gf119_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 2fd110f..86848ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -23,12 +23,6 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct gm107_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 static int
 gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
@@ -39,55 +33,43 @@
 static int
 gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
-	*divs = nv_rd32(therm, 0x10eb20) & 0x1fff;
-	*duty = nv_rd32(therm, 0x10eb24) & 0x1fff;
+	struct nvkm_device *device = therm->subdev.device;
+	*divs = nvkm_rd32(device, 0x10eb20) & 0x1fff;
+	*duty = nvkm_rd32(device, 0x10eb24) & 0x1fff;
 	return 0;
 }
 
 static int
 gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
-	nv_mask(therm, 0x10eb10, 0x1fff, divs); /* keep the high bits */
-	nv_wr32(therm, 0x10eb14, duty | 0x80000000);
+	struct nvkm_device *device = therm->subdev.device;
+	nvkm_mask(device, 0x10eb10, 0x1fff, divs); /* keep the high bits */
+	nvkm_wr32(device, 0x10eb14, duty | 0x80000000);
 	return 0;
 }
 
 static int
 gm107_fan_pwm_clock(struct nvkm_therm *therm, int line)
 {
-	return nv_device(therm)->crystal * 1000;
+	return therm->subdev.device->crystal * 1000;
 }
 
-static int
-gm107_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gm107_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = gm107_fan_pwm_ctrl;
-	priv->base.base.pwm_get = gm107_fan_pwm_get;
-	priv->base.base.pwm_set = gm107_fan_pwm_set;
-	priv->base.base.pwm_clock = gm107_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gm107_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x117),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gf110_therm_init,
-		.fini = g84_therm_fini,
-	},
+static const struct nvkm_therm_func
+gm107_therm = {
+	.init = gf119_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = gm107_fan_pwm_ctrl,
+	.pwm_get = gm107_fan_pwm_get,
+	.pwm_set = gm107_fan_pwm_set,
+	.pwm_clock = gm107_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+gm107_therm_new(struct nvkm_device *device, int index,
+		struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index e99be20..c08097f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -23,78 +23,53 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <subdev/gpio.h>
 
-struct gt215_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 int
 gt215_therm_fan_sense(struct nvkm_therm *therm)
 {
-	u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
-	u32 ctrl = nv_rd32(therm, 0x00e720);
+	struct nvkm_device *device = therm->subdev.device;
+	u32 tach = nvkm_rd32(device, 0x00e728) & 0x0000ffff;
+	u32 ctrl = nvkm_rd32(device, 0x00e720);
 	if (ctrl & 0x00000001)
 		return tach * 60 / 2;
 	return -ENODEV;
 }
 
-static int
-gt215_therm_init(struct nvkm_object *object)
+static void
+gt215_therm_init(struct nvkm_therm *therm)
 {
-	struct gt215_therm_priv *priv = (void *)object;
-	struct dcb_gpio_func *tach = &priv->base.fan->tach;
-	int ret;
+	struct nvkm_device *device = therm->subdev.device;
+	struct dcb_gpio_func *tach = &therm->fan->tach;
 
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
-
-	g84_sensor_setup(&priv->base.base);
+	g84_sensor_setup(therm);
 
 	/* enable fan tach, count revolutions per-second */
-	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+	nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
 	if (tach->func != DCB_GPIO_UNUSED) {
-		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
-		nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
-		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+		nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+		nvkm_mask(device, 0x00e720, 0x001f0000, tach->line << 16);
+		nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
 	}
-	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
-	return 0;
+	nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
 }
 
-static int
-gt215_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gt215_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gt215_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gt215_therm_init,
-		.fini = g84_therm_fini,
-	},
+static const struct nvkm_therm_func
+gt215_therm = {
+	.init = gt215_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+gt215_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 09fc460..6e0ddc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -27,16 +27,16 @@
 #include <subdev/i2c.h>
 
 static bool
-probe_monitoring_device(struct nvkm_i2c_port *i2c,
+probe_monitoring_device(struct nvkm_i2c_bus *bus,
 			struct i2c_board_info *info, void *data)
 {
-	struct nvkm_therm_priv *priv = data;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_therm *therm = data;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	struct i2c_client *client;
 
 	request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
-	client = i2c_new_device(&i2c->adapter, info);
+	client = i2c_new_device(&bus->i2c, info);
 	if (!client)
 		return false;
 
@@ -46,15 +46,15 @@
 		return false;
 	}
 
-	nv_info(priv,
-		"Found an %s at address 0x%x (controlled by lm_sensors, "
-		"temp offset %+i C)\n",
-		info->type, info->addr, sensor->offset_constant);
-	priv->ic = client;
+	nvkm_debug(&therm->subdev,
+		   "Found an %s at address 0x%x (controlled by lm_sensors, "
+		   "temp offset %+i C)\n",
+		   info->type, info->addr, sensor->offset_constant);
+	therm->ic = client;
 	return true;
 }
 
-static struct nvkm_i2c_board_info
+static struct nvkm_i2c_bus_probe
 nv_board_infos[] = {
 	{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
 	{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0  },
@@ -82,38 +82,43 @@
 void
 nvkm_therm_ic_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
-	struct nvkm_i2c *i2c = nvkm_i2c(therm);
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
+	struct nvkm_i2c_bus *bus;
 	struct nvbios_extdev_func extdev_entry;
 
+	bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+	if (!bus)
+		return;
+
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
-		struct nvkm_i2c_board_info board[] = {
+		struct nvkm_i2c_bus_probe board[] = {
 		  { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
 		  { }
 		};
 
-		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-			      board, probe_monitoring_device, therm);
-		if (priv->ic)
+		nvkm_i2c_bus_probe(bus, "monitoring device", board,
+				   probe_monitoring_device, therm);
+		if (therm->ic)
 			return;
 	}
 
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
-		struct nvkm_i2c_board_info board[] = {
+		struct nvkm_i2c_bus_probe board[] = {
 		  { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
 		  { }
 		};
 
-		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-			      board, probe_monitoring_device, therm);
-		if (priv->ic)
+		nvkm_i2c_bus_probe(bus, "monitoring device", board,
+				   probe_monitoring_device, therm);
+		if (therm->ic)
 			return;
 	}
 
 	/* The vbios doesn't provide the address of an exisiting monitoring
 	   device. Let's try our static list.
 	 */
-	i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-		      nv_board_infos, probe_monitoring_device, therm);
+	nvkm_i2c_bus_probe(bus, "monitoring device", nv_board_infos,
+			   probe_monitoring_device, therm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 8496fff..6326fdc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -24,26 +24,17 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct nv40_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
 
 static enum nv40_sensor_style
 nv40_sensor_style(struct nvkm_therm *therm)
 {
-	struct nvkm_device *device = nv_device(therm);
-
-	switch (device->chipset) {
+	switch (therm->subdev.device->chipset) {
 	case 0x43:
 	case 0x44:
 	case 0x4a:
 	case 0x47:
 		return OLD_STYLE;
-
 	case 0x46:
 	case 0x49:
 	case 0x4b:
@@ -61,18 +52,19 @@
 static int
 nv40_sensor_setup(struct nvkm_therm *therm)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	enum nv40_sensor_style style = nv40_sensor_style(therm);
 
 	/* enable ADC readout and disable the ALARM threshold */
 	if (style == NEW_STYLE) {
-		nv_mask(therm, 0x15b8, 0x80000000, 0);
-		nv_wr32(therm, 0x15b0, 0x80003fff);
+		nvkm_mask(device, 0x15b8, 0x80000000, 0);
+		nvkm_wr32(device, 0x15b0, 0x80003fff);
 		mdelay(20); /* wait for the temperature to stabilize */
-		return nv_rd32(therm, 0x15b4) & 0x3fff;
+		return nvkm_rd32(device, 0x15b4) & 0x3fff;
 	} else if (style == OLD_STYLE) {
-		nv_wr32(therm, 0x15b0, 0xff);
+		nvkm_wr32(device, 0x15b0, 0xff);
 		mdelay(20); /* wait for the temperature to stabilize */
-		return nv_rd32(therm, 0x15b4) & 0xff;
+		return nvkm_rd32(device, 0x15b4) & 0xff;
 	} else
 		return -ENODEV;
 }
@@ -80,17 +72,17 @@
 static int
 nv40_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	enum nv40_sensor_style style = nv40_sensor_style(therm);
 	int core_temp;
 
 	if (style == NEW_STYLE) {
-		nv_wr32(therm, 0x15b0, 0x80003fff);
-		core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
+		nvkm_wr32(device, 0x15b0, 0x80003fff);
+		core_temp = nvkm_rd32(device, 0x15b4) & 0x3fff;
 	} else if (style == OLD_STYLE) {
-		nv_wr32(therm, 0x15b0, 0xff);
-		core_temp = nv_rd32(therm, 0x15b4) & 0xff;
+		nvkm_wr32(device, 0x15b0, 0xff);
+		core_temp = nvkm_rd32(device, 0x15b4) & 0xff;
 	} else
 		return -ENODEV;
 
@@ -113,11 +105,13 @@
 static int
 nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 mask = enable ? 0x80000000 : 0x0000000;
-	if      (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
-	else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+	if      (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask);
+	else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask);
 	else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 	return 0;
@@ -126,8 +120,10 @@
 static int
 nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	if (line == 2) {
-		u32 reg = nv_rd32(therm, 0x0010f0);
+		u32 reg = nvkm_rd32(device, 0x0010f0);
 		if (reg & 0x80000000) {
 			*duty = (reg & 0x7fff0000) >> 16;
 			*divs = (reg & 0x00007fff);
@@ -135,14 +131,14 @@
 		}
 	} else
 	if (line == 9) {
-		u32 reg = nv_rd32(therm, 0x0015f4);
+		u32 reg = nvkm_rd32(device, 0x0015f4);
 		if (reg & 0x80000000) {
-			*divs = nv_rd32(therm, 0x0015f8);
+			*divs = nvkm_rd32(device, 0x0015f8);
 			*duty = (reg & 0x7fffffff);
 			return 0;
 		}
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 
@@ -152,14 +148,16 @@
 static int
 nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	if (line == 2) {
-		nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
+		nvkm_mask(device, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
 	} else
 	if (line == 9) {
-		nv_wr32(therm, 0x0015f8, divs);
-		nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
+		nvkm_wr32(device, 0x0015f8, divs);
+		nvkm_mask(device, 0x0015f4, 0x7fffffff, duty);
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 
@@ -167,59 +165,40 @@
 }
 
 void
-nv40_therm_intr(struct nvkm_subdev *subdev)
+nv40_therm_intr(struct nvkm_therm *therm)
 {
-	struct nvkm_therm *therm = nvkm_therm(subdev);
-	uint32_t stat = nv_rd32(therm, 0x1100);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	uint32_t stat = nvkm_rd32(device, 0x1100);
 
 	/* traitement */
 
 	/* ack all IRQs */
-	nv_wr32(therm, 0x1100, 0x70000);
+	nvkm_wr32(device, 0x1100, 0x70000);
 
-	nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+	nvkm_error(subdev, "THERM received an IRQ: stat = %x\n", stat);
 }
 
-static int
-nv40_therm_ctor(struct nvkm_object *parent,
-		struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static void
+nv40_therm_init(struct nvkm_therm *therm)
 {
-	struct nv40_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv40_fan_pwm_get;
-	priv->base.base.pwm_set = nv40_fan_pwm_set;
-	priv->base.base.temp_get = nv40_temp_get;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	nv_subdev(priv)->intr = nv40_therm_intr;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv40_therm_init(struct nvkm_object *object)
-{
-	struct nvkm_therm *therm = (void *)object;
-
 	nv40_sensor_setup(therm);
-
-	return _nvkm_therm_init(object);
 }
 
-struct nvkm_oclass
-nv40_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = nv40_therm_init,
-		.fini = _nvkm_therm_fini,
-	},
+static const struct nvkm_therm_func
+nv40_therm = {
+	.init = nv40_therm_init,
+	.intr = nv40_therm_intr,
+	.pwm_ctrl = nv40_fan_pwm_ctrl,
+	.pwm_get = nv40_fan_pwm_get,
+	.pwm_set = nv40_fan_pwm_set,
+	.temp_get = nv40_temp_get,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+nv40_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 1ef59e8..9b57b43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -24,15 +24,11 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct nv50_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 static int
 pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+
 	if (*line == 0x04) {
 		*ctrl = 0x00e100;
 		*line = 4;
@@ -48,7 +44,7 @@
 		*line = 0;
 		*indx = 0;
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", *line);
 		return -ENODEV;
 	}
 
@@ -58,23 +54,25 @@
 int
 nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	u32 data = enable ? 0x00000001 : 0x00000000;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret == 0)
-		nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+		nvkm_mask(device, ctrl, 0x00010001 << line, data << line);
 	return ret;
 }
 
 int
 nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret)
 		return ret;
 
-	if (nv_rd32(therm, ctrl) & (1 << line)) {
-		*divs = nv_rd32(therm, 0x00e114 + (id * 8));
-		*duty = nv_rd32(therm, 0x00e118 + (id * 8));
+	if (nvkm_rd32(device, ctrl) & (1 << line)) {
+		*divs = nvkm_rd32(device, 0x00e114 + (id * 8));
+		*duty = nvkm_rd32(device, 0x00e118 + (id * 8));
 		return 0;
 	}
 
@@ -84,36 +82,36 @@
 int
 nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret)
 		return ret;
 
-	nv_wr32(therm, 0x00e114 + (id * 8), divs);
-	nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
+	nvkm_wr32(device, 0x00e114 + (id * 8), divs);
+	nvkm_wr32(device, 0x00e118 + (id * 8), duty | 0x80000000);
 	return 0;
 }
 
 int
 nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
 {
-	int chipset = nv_device(therm)->chipset;
-	int crystal = nv_device(therm)->crystal;
+	struct nvkm_device *device = therm->subdev.device;
 	int pwm_clock;
 
 	/* determine the PWM source clock */
-	if (chipset > 0x50 && chipset < 0x94) {
-		u8 pwm_div = nv_rd32(therm, 0x410c);
-		if (nv_rd32(therm, 0xc040) & 0x800000) {
+	if (device->chipset > 0x50 && device->chipset < 0x94) {
+		u8 pwm_div = nvkm_rd32(device, 0x410c);
+		if (nvkm_rd32(device, 0xc040) & 0x800000) {
 			/* Use the HOST clock (100 MHz)
 			* Where does this constant(2.4) comes from? */
 			pwm_clock = (100000000 >> pwm_div) * 10 / 24;
 		} else {
 			/* Where does this constant(20) comes from? */
-			pwm_clock = (crystal * 1000) >> pwm_div;
+			pwm_clock = (device->crystal * 1000) >> pwm_div;
 			pwm_clock /= 20;
 		}
 	} else {
-		pwm_clock = (crystal * 1000) / 20;
+		pwm_clock = (device->crystal * 1000) / 20;
 	}
 
 	return pwm_clock;
@@ -122,18 +120,19 @@
 static void
 nv50_sensor_setup(struct nvkm_therm *therm)
 {
-	nv_mask(therm, 0x20010, 0x40000000, 0x0);
+	struct nvkm_device *device = therm->subdev.device;
+	nvkm_mask(device, 0x20010, 0x40000000, 0x0);
 	mdelay(20); /* wait for the temperature to stabilize */
 }
 
 static int
 nv50_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	int core_temp;
 
-	core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
+	core_temp = nvkm_rd32(device, 0x20014) & 0x3fff;
 
 	/* if the slope or the offset is unset, do no use the sensor */
 	if (!sensor->slope_div || !sensor->slope_mult ||
@@ -151,48 +150,27 @@
 	return core_temp;
 }
 
-static int
-nv50_therm_ctor(struct nvkm_object *parent,
-		struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static void
+nv50_therm_init(struct nvkm_therm *therm)
 {
-	struct nv50_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = nv50_temp_get;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	nv_subdev(priv)->intr = nv40_therm_intr;
-
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv50_therm_init(struct nvkm_object *object)
-{
-	struct nvkm_therm *therm = (void *)object;
-
 	nv50_sensor_setup(therm);
-
-	return _nvkm_therm_init(object);
 }
 
-struct nvkm_oclass
-nv50_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = nv50_therm_init,
-		.fini = _nvkm_therm_fini,
-	},
+static const struct nvkm_therm_func
+nv50_therm = {
+	.init = nv50_therm_init,
+	.intr = nv40_therm_intr,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = nv50_temp_get,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+nv50_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 916a149..235a5d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -1,5 +1,6 @@
 #ifndef __NVTHERM_PRIV_H__
 #define __NVTHERM_PRIV_H__
+#define nvkm_therm(p) container_of((p), struct nvkm_therm, subdev)
 /*
  * Copyright 2012 The Nouveau community
  *
@@ -28,8 +29,9 @@
 #include <subdev/bios/extdev.h>
 #include <subdev/bios/gpio.h>
 #include <subdev/bios/perf.h>
-#include <subdev/bios/therm.h>
-#include <subdev/timer.h>
+
+int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
+		    int index, struct nvkm_therm **);
 
 struct nvkm_fan {
 	struct nvkm_therm *parent;
@@ -48,59 +50,6 @@
 	struct dcb_gpio_func tach;
 };
 
-enum nvkm_therm_thrs_direction {
-	NVKM_THERM_THRS_FALLING = 0,
-	NVKM_THERM_THRS_RISING = 1
-};
-
-enum nvkm_therm_thrs_state {
-	NVKM_THERM_THRS_LOWER = 0,
-	NVKM_THERM_THRS_HIGHER = 1
-};
-
-enum nvkm_therm_thrs {
-	NVKM_THERM_THRS_FANBOOST = 0,
-	NVKM_THERM_THRS_DOWNCLOCK = 1,
-	NVKM_THERM_THRS_CRITICAL = 2,
-	NVKM_THERM_THRS_SHUTDOWN = 3,
-	NVKM_THERM_THRS_NR
-};
-
-struct nvkm_therm_priv {
-	struct nvkm_therm base;
-
-	/* automatic thermal management */
-	struct nvkm_alarm alarm;
-	spinlock_t lock;
-	struct nvbios_therm_trip_point *last_trip;
-	int mode;
-	int cstate;
-	int suspend;
-
-	/* bios */
-	struct nvbios_therm_sensor bios_sensor;
-
-	/* fan priv */
-	struct nvkm_fan *fan;
-
-	/* alarms priv */
-	struct {
-		spinlock_t alarm_program_lock;
-		struct nvkm_alarm therm_poll_alarm;
-		enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
-		void (*program_alarms)(struct nvkm_therm *);
-	} sensor;
-
-	/* what should be done if the card overheats */
-	struct {
-		void (*downclock)(struct nvkm_therm *, bool active);
-		void (*pause)(struct nvkm_therm *, bool active);
-	} emergency;
-
-	/* ic */
-	struct i2c_client *ic;
-};
-
 int nvkm_therm_fan_mode(struct nvkm_therm *, int mode);
 int nvkm_therm_attr_get(struct nvkm_therm *, enum nvkm_therm_attr_type);
 int nvkm_therm_attr_set(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
@@ -117,8 +66,6 @@
 int nvkm_therm_fan_user_get(struct nvkm_therm *);
 int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
 
-int nvkm_therm_fan_sense(struct nvkm_therm *);
-
 int nvkm_therm_preinit(struct nvkm_therm *);
 
 int  nvkm_therm_sensor_init(struct nvkm_therm *);
@@ -134,18 +81,37 @@
 			     enum nvkm_therm_thrs_direction);
 void nvkm_therm_program_alarms_polling(struct nvkm_therm *);
 
-void nv40_therm_intr(struct nvkm_subdev *);
+struct nvkm_therm_func {
+	void (*init)(struct nvkm_therm *);
+	void (*fini)(struct nvkm_therm *);
+	void (*intr)(struct nvkm_therm *);
+
+	int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
+	int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
+	int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
+	int (*pwm_clock)(struct nvkm_therm *, int line);
+
+	int (*temp_get)(struct nvkm_therm *);
+
+	int (*fan_sense)(struct nvkm_therm *);
+
+	void (*program_alarms)(struct nvkm_therm *);
+};
+
+void nv40_therm_intr(struct nvkm_therm *);
+
 int  nv50_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
 int  nv50_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
 int  nv50_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
 int  nv50_fan_pwm_clock(struct nvkm_therm *, int);
+
 int  g84_temp_get(struct nvkm_therm *);
 void g84_sensor_setup(struct nvkm_therm *);
-int  g84_therm_fini(struct nvkm_object *, bool suspend);
+void g84_therm_fini(struct nvkm_therm *);
 
 int gt215_therm_fan_sense(struct nvkm_therm *);
 
-int gf110_therm_init(struct nvkm_object *);
+void gf119_therm_init(struct nvkm_therm *);
 
 int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
 int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index aa13744..b9703c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -26,29 +26,25 @@
 static void
 nvkm_therm_temp_set_defaults(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	therm->bios_sensor.offset_constant = 0;
 
-	priv->bios_sensor.offset_constant = 0;
+	therm->bios_sensor.thrs_fan_boost.temp = 90;
+	therm->bios_sensor.thrs_fan_boost.hysteresis = 3;
 
-	priv->bios_sensor.thrs_fan_boost.temp = 90;
-	priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
+	therm->bios_sensor.thrs_down_clock.temp = 95;
+	therm->bios_sensor.thrs_down_clock.hysteresis = 3;
 
-	priv->bios_sensor.thrs_down_clock.temp = 95;
-	priv->bios_sensor.thrs_down_clock.hysteresis = 3;
+	therm->bios_sensor.thrs_critical.temp = 105;
+	therm->bios_sensor.thrs_critical.hysteresis = 5;
 
-	priv->bios_sensor.thrs_critical.temp = 105;
-	priv->bios_sensor.thrs_critical.hysteresis = 5;
-
-	priv->bios_sensor.thrs_shutdown.temp = 135;
-	priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
+	therm->bios_sensor.thrs_shutdown.temp = 135;
+	therm->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
 }
 
-
 static void
 nvkm_therm_temp_safety_checks(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *s = &priv->bios_sensor;
+	struct nvbios_therm_sensor *s = &therm->bios_sensor;
 
 	/* enforce a minimum hysteresis on thresholds */
 	s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
@@ -63,8 +59,7 @@
 				      enum nvkm_therm_thrs thrs,
 				      enum nvkm_therm_thrs_state st)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	priv->sensor.alarm_state[thrs] = st;
+	therm->sensor.alarm_state[thrs] = st;
 }
 
 /* must be called with alarm_program_lock taken ! */
@@ -72,8 +67,7 @@
 nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm,
 				      enum nvkm_therm_thrs thrs)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return priv->sensor.alarm_state[thrs];
+	return therm->sensor.alarm_state[thrs];
 }
 
 static void
@@ -87,22 +81,23 @@
 nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
 			enum nvkm_therm_thrs_direction dir)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_subdev *subdev = &therm->subdev;
 	bool active;
 	const char *thresolds[] = {
 		"fanboost", "downclock", "critical", "shutdown"
 	};
-	int temperature = therm->temp_get(therm);
+	int temperature = therm->func->temp_get(therm);
 
 	if (thrs < 0 || thrs > 3)
 		return;
 
 	if (dir == NVKM_THERM_THRS_FALLING)
-		nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
-			temperature, thresolds[thrs]);
+		nvkm_info(subdev,
+			  "temperature (%i C) went below the '%s' threshold\n",
+			  temperature, thresolds[thrs]);
 	else
-		nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
-			temperature, thresolds[thrs]);
+		nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
+			  temperature, thresolds[thrs]);
 
 	active = (dir == NVKM_THERM_THRS_RISING);
 	switch (thrs) {
@@ -113,12 +108,12 @@
 		}
 		break;
 	case NVKM_THERM_THRS_DOWNCLOCK:
-		if (priv->emergency.downclock)
-			priv->emergency.downclock(therm, active);
+		if (therm->emergency.downclock)
+			therm->emergency.downclock(therm, active);
 		break;
 	case NVKM_THERM_THRS_CRITICAL:
-		if (priv->emergency.pause)
-			priv->emergency.pause(therm, active);
+		if (therm->emergency.pause)
+			therm->emergency.pause(therm, active);
 		break;
 	case NVKM_THERM_THRS_SHUTDOWN:
 		if (active) {
@@ -145,7 +140,7 @@
 {
 	enum nvkm_therm_thrs_direction direction;
 	enum nvkm_therm_thrs_state prev_state, new_state;
-	int temp = therm->temp_get(therm);
+	int temp = therm->func->temp_get(therm);
 
 	prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
 
@@ -166,19 +161,19 @@
 static void
 alarm_timer_callback(struct nvkm_alarm *alarm)
 {
-	struct nvkm_therm_priv *priv =
-	container_of(alarm, struct nvkm_therm_priv, sensor.therm_poll_alarm);
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
-	struct nvkm_timer *ptimer = nvkm_timer(priv);
-	struct nvkm_therm *therm = &priv->base;
+	struct nvkm_therm *therm =
+		container_of(alarm, struct nvkm_therm, sensor.therm_poll_alarm);
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
 					  NVKM_THERM_THRS_FANBOOST);
 
-	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+	nvkm_therm_threshold_hyst_polling(therm,
+					  &sensor->thrs_down_clock,
 					  NVKM_THERM_THRS_DOWNCLOCK);
 
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
@@ -187,46 +182,45 @@
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
 					  NVKM_THERM_THRS_SHUTDOWN);
 
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
 	/* schedule the next poll in one second */
-	if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
-		ptimer->alarm(ptimer, 1000000000ULL, alarm);
+	if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+		nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
 }
 
 void
 nvkm_therm_program_alarms_polling(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 
-	nv_debug(therm,
-		 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-		 sensor->thrs_down_clock.temp,
-		 sensor->thrs_down_clock.hysteresis,
-		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+	nvkm_debug(&therm->subdev,
+		   "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		   sensor->thrs_fan_boost.temp,
+		   sensor->thrs_fan_boost.hysteresis,
+		   sensor->thrs_down_clock.temp,
+		   sensor->thrs_down_clock.hysteresis,
+		   sensor->thrs_critical.temp,
+		   sensor->thrs_critical.hysteresis,
+		   sensor->thrs_shutdown.temp,
+		   sensor->thrs_shutdown.hysteresis);
 
-	alarm_timer_callback(&priv->sensor.therm_poll_alarm);
+	alarm_timer_callback(&therm->sensor.therm_poll_alarm);
 }
 
 int
 nvkm_therm_sensor_init(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	priv->sensor.program_alarms(therm);
+	therm->func->program_alarms(therm);
 	return 0;
 }
 
 int
 nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	if (suspend)
-		ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->sensor.therm_poll_alarm);
 	return 0;
 }
 
@@ -235,24 +229,24 @@
 {
 	const char *sensor_avail = "yes";
 
-	if (therm->temp_get(therm) < 0)
+	if (therm->func->temp_get(therm) < 0)
 		sensor_avail = "no";
 
-	nv_info(therm, "internal sensor: %s\n", sensor_avail);
+	nvkm_debug(&therm->subdev, "internal sensor: %s\n", sensor_avail);
 }
 
 int
 nvkm_therm_sensor_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 
-	nvkm_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+	nvkm_alarm_init(&therm->sensor.therm_poll_alarm, alarm_timer_callback);
 
 	nvkm_therm_temp_set_defaults(therm);
 	if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
-				      &priv->bios_sensor))
-		nv_error(therm, "nvbios_therm_sensor_parse failed\n");
+				      &therm->bios_sensor))
+		nvkm_error(subdev, "nvbios_therm_sensor_parse failed\n");
 	nvkm_therm_temp_safety_checks(therm);
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
index d1d38b4..e436f0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
@@ -1,3 +1,5 @@
 nvkm-y += nvkm/subdev/timer/base.o
 nvkm-y += nvkm/subdev/timer/nv04.o
+nvkm-y += nvkm/subdev/timer/nv40.o
+nvkm-y += nvkm/subdev/timer/nv41.o
 nvkm-y += nvkm/subdev/timer/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d894061..d4dae1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -21,73 +21,131 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/timer.h>
+#include "priv.h"
 
-bool
-nvkm_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+u64
+nvkm_timer_read(struct nvkm_timer *tmr)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
-			if ((nv_rd32(obj, addr) & mask) == data)
-				return true;
-		} else {
-			if ((nv_ro32(obj, addr) & mask) == data)
-				return true;
-		}
-	} while (ptimer->read(ptimer) - time0 < nsec);
-
-	return false;
-}
-
-bool
-nvkm_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
-{
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
-			if ((nv_rd32(obj, addr) & mask) != data)
-				return true;
-		} else {
-			if ((nv_ro32(obj, addr) & mask) != data)
-				return true;
-		}
-	} while (ptimer->read(ptimer) - time0 < nsec);
-
-	return false;
-}
-
-bool
-nvkm_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
-{
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (func(data) == true)
-			return true;
-	} while (ptimer->read(ptimer) - time0 < nsec);
-
-	return false;
+	return tmr->func->read(tmr);
 }
 
 void
-nvkm_timer_alarm(void *obj, u32 nsec, struct nvkm_alarm *alarm)
+nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	ptimer->alarm(ptimer, nsec, alarm);
+	struct nvkm_alarm *alarm, *atemp;
+	unsigned long flags;
+	LIST_HEAD(exec);
+
+	/* move any due alarms off the pending list */
+	spin_lock_irqsave(&tmr->lock, flags);
+	list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
+		if (alarm->timestamp <= nvkm_timer_read(tmr))
+			list_move_tail(&alarm->head, &exec);
+	}
+
+	/* reschedule interrupt for next alarm time */
+	if (!list_empty(&tmr->alarms)) {
+		alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
+		tmr->func->alarm_init(tmr, alarm->timestamp);
+	} else {
+		tmr->func->alarm_fini(tmr);
+	}
+	spin_unlock_irqrestore(&tmr->lock, flags);
+
+	/* execute any pending alarm handlers */
+	list_for_each_entry_safe(alarm, atemp, &exec, head) {
+		list_del_init(&alarm->head);
+		alarm->func(alarm);
+	}
 }
 
 void
-nvkm_timer_alarm_cancel(void *obj, struct nvkm_alarm *alarm)
+nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	ptimer->alarm_cancel(ptimer, alarm);
+	struct nvkm_alarm *list;
+	unsigned long flags;
+
+	alarm->timestamp = nvkm_timer_read(tmr) + nsec;
+
+	/* append new alarm to list, in soonest-alarm-first order */
+	spin_lock_irqsave(&tmr->lock, flags);
+	if (!nsec) {
+		if (!list_empty(&alarm->head))
+			list_del(&alarm->head);
+	} else {
+		list_for_each_entry(list, &tmr->alarms, head) {
+			if (list->timestamp > alarm->timestamp)
+				break;
+		}
+		list_add_tail(&alarm->head, &list->head);
+	}
+	spin_unlock_irqrestore(&tmr->lock, flags);
+
+	/* process pending alarms */
+	nvkm_timer_alarm_trigger(tmr);
+}
+
+void
+nvkm_timer_alarm_cancel(struct nvkm_timer *tmr, struct nvkm_alarm *alarm)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&tmr->lock, flags);
+	list_del_init(&alarm->head);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+}
+
+static void
+nvkm_timer_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	tmr->func->intr(tmr);
+}
+
+static int
+nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	tmr->func->alarm_fini(tmr);
+	return 0;
+}
+
+static int
+nvkm_timer_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	if (tmr->func->init)
+		tmr->func->init(tmr);
+	tmr->func->time(tmr, ktime_to_ns(ktime_get()));
+	nvkm_timer_alarm_trigger(tmr);
+	return 0;
+}
+
+static void *
+nvkm_timer_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_timer(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_timer = {
+	.dtor = nvkm_timer_dtor,
+	.init = nvkm_timer_init,
+	.fini = nvkm_timer_fini,
+	.intr = nvkm_timer_intr,
+};
+
+int
+nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
+		int index, struct nvkm_timer **ptmr)
+{
+	struct nvkm_timer *tmr;
+
+	if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_timer, device, index, 0, &tmr->subdev);
+	tmr->func = func;
+	INIT_LIST_HEAD(&tmr->alarms);
+	spin_lock_init(&tmr->lock);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 80e3806..9ed5f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -21,36 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-static int
-gk20a_timer_init(struct nvkm_object *object)
-{
-	struct nv04_timer_priv *priv = (void *)object;
-	u32 hi = upper_32_bits(priv->suspend_time);
-	u32 lo = lower_32_bits(priv->suspend_time);
-	int ret;
-
-	ret = nvkm_timer_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_debug(priv, "time low        : 0x%08x\n", lo);
-	nv_debug(priv, "time high       : 0x%08x\n", hi);
-
-	/* restore the time before suspend */
-	nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
-	nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
-	return 0;
-}
-
-struct nvkm_oclass
-gk20a_timer_oclass = {
-	.handle = NV_SUBDEV(TIMER, 0xff),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_timer_ctor,
-		.dtor = nv04_timer_dtor,
-		.init = gk20a_timer_init,
-		.fini = nv04_timer_fini,
-	}
+static const struct nvkm_timer_func
+gk20a_timer = {
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
 };
+
+int
+gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 6b7facb..7b9ce87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -21,165 +21,92 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+#include "regsnv04.h"
 
-#include <core/device.h>
-
-static u64
-nv04_timer_read(struct nvkm_timer *ptimer)
+void
+nv04_timer_time(struct nvkm_timer *tmr, u64 time)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 hi = upper_32_bits(time);
+	u32 lo = lower_32_bits(time);
+
+	nvkm_debug(subdev, "time low        : %08x\n", lo);
+	nvkm_debug(subdev, "time high       : %08x\n", hi);
+
+	nvkm_wr32(device, NV04_PTIMER_TIME_1, hi);
+	nvkm_wr32(device, NV04_PTIMER_TIME_0, lo);
+}
+
+u64
+nv04_timer_read(struct nvkm_timer *tmr)
+{
+	struct nvkm_device *device = tmr->subdev.device;
 	u32 hi, lo;
 
 	do {
-		hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
-		lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
-	} while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
+		hi = nvkm_rd32(device, NV04_PTIMER_TIME_1);
+		lo = nvkm_rd32(device, NV04_PTIMER_TIME_0);
+	} while (hi != nvkm_rd32(device, NV04_PTIMER_TIME_1));
 
 	return ((u64)hi << 32 | lo);
 }
 
-static void
-nv04_timer_alarm_trigger(struct nvkm_timer *ptimer)
+void
+nv04_timer_alarm_fini(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	struct nvkm_alarm *alarm, *atemp;
-	unsigned long flags;
-	LIST_HEAD(exec);
-
-	/* move any due alarms off the pending list */
-	spin_lock_irqsave(&priv->lock, flags);
-	list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
-		if (alarm->timestamp <= ptimer->read(ptimer))
-			list_move_tail(&alarm->head, &exec);
-	}
-
-	/* reschedule interrupt for next alarm time */
-	if (!list_empty(&priv->alarms)) {
-		alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
-		nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
-		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
-	} else {
-		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* execute any pending alarm handlers */
-	list_for_each_entry_safe(alarm, atemp, &exec, head) {
-		list_del_init(&alarm->head);
-		alarm->func(alarm);
-	}
+	struct nvkm_device *device = tmr->subdev.device;
+	nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000000);
 }
 
-static void
-nv04_timer_alarm(struct nvkm_timer *ptimer, u64 time, struct nvkm_alarm *alarm)
+void
+nv04_timer_alarm_init(struct nvkm_timer *tmr, u32 time)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	struct nvkm_alarm *list;
-	unsigned long flags;
-
-	alarm->timestamp = ptimer->read(ptimer) + time;
-
-	/* append new alarm to list, in soonest-alarm-first order */
-	spin_lock_irqsave(&priv->lock, flags);
-	if (!time) {
-		if (!list_empty(&alarm->head))
-			list_del(&alarm->head);
-	} else {
-		list_for_each_entry(list, &priv->alarms, head) {
-			if (list->timestamp > alarm->timestamp)
-				break;
-		}
-		list_add_tail(&alarm->head, &list->head);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* process pending alarms */
-	nv04_timer_alarm_trigger(ptimer);
+	struct nvkm_device *device = tmr->subdev.device;
+	nvkm_wr32(device, NV04_PTIMER_ALARM_0, time);
+	nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001);
 }
 
-static void
-nv04_timer_alarm_cancel(struct nvkm_timer *ptimer, struct nvkm_alarm *alarm)
+void
+nv04_timer_intr(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	list_del_init(&alarm->head);
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void
-nv04_timer_intr(struct nvkm_subdev *subdev)
-{
-	struct nv04_timer_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
 
 	if (stat & 0x00000001) {
-		nv04_timer_alarm_trigger(&priv->base);
-		nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
+		nvkm_timer_alarm_trigger(tmr);
+		nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat) {
-		nv_error(priv, "unknown stat 0x%08x\n", stat);
-		nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, NV04_PTIMER_INTR_0, stat);
 	}
 }
 
-int
-nv04_timer_fini(struct nvkm_object *object, bool suspend)
+static void
+nv04_timer_init(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)object;
-	if (suspend)
-		priv->suspend_time = nv04_timer_read(&priv->base);
-	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	return nvkm_timer_fini(&priv->base, suspend);
-}
-
-static int
-nv04_timer_init(struct nvkm_object *object)
-{
-	struct nvkm_device *device = nv_device(object);
-	struct nv04_timer_priv *priv = (void *)object;
-	u32 m = 1, f, n, d, lo, hi;
-	int ret;
-
-	ret = nvkm_timer_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = 0; /*XXX: nvclk */
+	u32 n, d;
 
 	/* aim for 31.25MHz, which gives us nanosecond timestamps */
 	d = 1000000 / 32;
+	n = f;
 
-	/* determine base clock for timer source */
-#if 0 /*XXX*/
-	if (device->chipset < 0x40) {
-		n = nvkm_hw_get_clock(device, PLL_CORE);
-	} else
-#endif
-	if (device->chipset <= 0x40) {
-		/*XXX: figure this out */
-		f = -1;
-		n = 0;
-	} else {
-		f = device->crystal;
-		n = f;
-		while (n < (d * 2)) {
-			n += (n / m);
-			m++;
+	if (!f) {
+		n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+		d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+		if (!n || !d) {
+			n = 1;
+			d = 1;
 		}
-
-		nv_wr32(priv, 0x009220, m - 1);
-	}
-
-	if (!n) {
-		nv_warn(priv, "unknown input clock freq\n");
-		if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
-		    !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
-			nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
-			nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
-		}
-		return 0;
+		nvkm_warn(subdev, "unknown input clock freq\n");
 	}
 
 	/* reduce ratio to acceptable values */
@@ -198,65 +125,27 @@
 		d >>= 1;
 	}
 
-	/* restore the time before suspend */
-	lo = priv->suspend_time;
-	hi = (priv->suspend_time >> 32);
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
 
-	nv_debug(priv, "input frequency : %dHz\n", f);
-	nv_debug(priv, "input multiplier: %d\n", m);
-	nv_debug(priv, "numerator       : 0x%08x\n", n);
-	nv_debug(priv, "denominator     : 0x%08x\n", d);
-	nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
-	nv_debug(priv, "time low        : 0x%08x\n", lo);
-	nv_debug(priv, "time high       : 0x%08x\n", hi);
-
-	nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
-	nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
-	nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
-	nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
-	return 0;
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
 }
 
-void
-nv04_timer_dtor(struct nvkm_object *object)
-{
-	struct nv04_timer_priv *priv = (void *)object;
-	return nvkm_timer_destroy(&priv->base);
-}
+static const struct nvkm_timer_func
+nv04_timer = {
+	.init = nv04_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
 
 int
-nv04_timer_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
 {
-	struct nv04_timer_priv *priv;
-	int ret;
-
-	ret = nvkm_timer_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.intr = nv04_timer_intr;
-	priv->base.read = nv04_timer_read;
-	priv->base.alarm = nv04_timer_alarm;
-	priv->base.alarm_cancel = nv04_timer_alarm_cancel;
-	priv->suspend_time = 0;
-
-	INIT_LIST_HEAD(&priv->alarms);
-	spin_lock_init(&priv->lock);
-	return 0;
+	return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
 }
-
-struct nvkm_oclass
-nv04_timer_oclass = {
-	.handle = NV_SUBDEV(TIMER, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_timer_ctor,
-		.dtor = nv04_timer_dtor,
-		.init = nv04_timer_init,
-		.fini = nv04_timer_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
deleted file mode 100644
index 89996a9..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __NVKM_TIMER_NV04_H__
-#define __NVKM_TIMER_NV04_H__
-#include "priv.h"
-
-#define NV04_PTIMER_INTR_0      0x009100
-#define NV04_PTIMER_INTR_EN_0   0x009140
-#define NV04_PTIMER_NUMERATOR   0x009200
-#define NV04_PTIMER_DENOMINATOR 0x009210
-#define NV04_PTIMER_TIME_0      0x009400
-#define NV04_PTIMER_TIME_1      0x009410
-#define NV04_PTIMER_ALARM_0     0x009420
-
-struct nv04_timer_priv {
-	struct nvkm_timer base;
-	struct list_head alarms;
-	spinlock_t lock;
-	u64 suspend_time;
-};
-
-int  nv04_timer_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *, u32,
-		     struct nvkm_object **);
-void nv04_timer_dtor(struct nvkm_object *);
-int  nv04_timer_fini(struct nvkm_object *, bool);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
new file mode 100644
index 0000000..bb99a15
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv40_timer_init(struct nvkm_timer *tmr)
+{
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = 0; /*XXX: figure this out */
+	u32 n, d;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+	n = f;
+
+	if (!f) {
+		n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+		d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+		if (!n || !d) {
+			n = 1;
+			d = 1;
+		}
+		nvkm_warn(subdev, "unknown input clock freq\n");
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
+
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv40_timer = {
+	.init = nv40_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
new file mode 100644
index 0000000..3cf9ec1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv41_timer_init(struct nvkm_timer *tmr)
+{
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = device->crystal;
+	u32 m = 1, n, d;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+	n = f;
+
+	while (n < (d * 2)) {
+		n += (n / m);
+		m++;
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "input multiplier: %d\n", m);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", (f * m) * d / n);
+
+	nvkm_wr32(device, 0x009220, m - 1);
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv41_timer = {
+	.init = nv41_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 08e29a3..f820ca2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,4 +1,26 @@
 #ifndef __NVKM_TIMER_PRIV_H__
 #define __NVKM_TIMER_PRIV_H__
+#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
 #include <subdev/timer.h>
+
+int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
+		    int index, struct nvkm_timer **);
+
+struct nvkm_timer_func {
+	void (*init)(struct nvkm_timer *);
+	void (*intr)(struct nvkm_timer *);
+	u64 (*read)(struct nvkm_timer *);
+	void (*time)(struct nvkm_timer *, u64 time);
+	void (*alarm_init)(struct nvkm_timer *, u32 time);
+	void (*alarm_fini)(struct nvkm_timer *);
+};
+
+void nvkm_timer_alarm_trigger(struct nvkm_timer *);
+
+void nv04_timer_fini(struct nvkm_timer *);
+void nv04_timer_intr(struct nvkm_timer *);
+void nv04_timer_time(struct nvkm_timer *, u64);
+u64 nv04_timer_read(struct nvkm_timer *);
+void nv04_timer_alarm_init(struct nvkm_timer *, u32);
+void nv04_timer_alarm_fini(struct nvkm_timer *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
new file mode 100644
index 0000000..10bef85
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -0,0 +1,7 @@
+#define NV04_PTIMER_INTR_0      0x009100
+#define NV04_PTIMER_INTR_EN_0   0x009140
+#define NV04_PTIMER_NUMERATOR   0x009200
+#define NV04_PTIMER_DENOMINATOR 0x009210
+#define NV04_PTIMER_TIME_0      0x009400
+#define NV04_PTIMER_TIME_1      0x009410
+#define NV04_PTIMER_ALARM_0     0x009420
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 39f1580..4752dbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -21,49 +21,45 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/volt.h>
+#include "priv.h"
+
 #include <subdev/bios.h>
 #include <subdev/bios/vmap.h>
 #include <subdev/bios/volt.h>
 
-static int
+int
 nvkm_volt_get(struct nvkm_volt *volt)
 {
-	if (volt->vid_get) {
-		int ret = volt->vid_get(volt), i;
-		if (ret >= 0) {
-			for (i = 0; i < volt->vid_nr; i++) {
-				if (volt->vid[i].vid == ret)
-					return volt->vid[i].uv;
-			}
-			ret = -EINVAL;
+	int ret = volt->func->vid_get(volt), i;
+	if (ret >= 0) {
+		for (i = 0; i < volt->vid_nr; i++) {
+			if (volt->vid[i].vid == ret)
+				return volt->vid[i].uv;
 		}
-		return ret;
+		ret = -EINVAL;
 	}
-	return -ENODEV;
+	return ret;
 }
 
 static int
 nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
 {
-	if (volt->vid_set) {
-		int i, ret = -EINVAL;
-		for (i = 0; i < volt->vid_nr; i++) {
-			if (volt->vid[i].uv == uv) {
-				ret = volt->vid_set(volt, volt->vid[i].vid);
-				nv_debug(volt, "set %duv: %d\n", uv, ret);
-				break;
-			}
+	struct nvkm_subdev *subdev = &volt->subdev;
+	int i, ret = -EINVAL;
+	for (i = 0; i < volt->vid_nr; i++) {
+		if (volt->vid[i].uv == uv) {
+			ret = volt->func->vid_set(volt, volt->vid[i].vid);
+			nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+			break;
 		}
-		return ret;
 	}
-	return -ENODEV;
+	return ret;
 }
 
 static int
 nvkm_volt_map(struct nvkm_volt *volt, u8 id)
 {
-	struct nvkm_bios *bios = nvkm_bios(volt);
+	struct nvkm_bios *bios = volt->subdev.device->bios;
 	struct nvbios_vmap_entry info;
 	u8  ver, len;
 	u16 vmap;
@@ -82,10 +78,15 @@
 	return id ? id * 10000 : -ENODEV;
 }
 
-static int
+int
 nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
 {
-	int ret = nvkm_volt_map(volt, id);
+	int ret;
+
+	if (volt->func->set_id)
+		return volt->func->set_id(volt, id, condition);
+
+	ret = nvkm_volt_map(volt, id);
 	if (ret >= 0) {
 		int prev = nvkm_volt_get(volt);
 		if (!condition || prev < 0 ||
@@ -134,51 +135,41 @@
 	}
 }
 
-int
-_nvkm_volt_init(struct nvkm_object *object)
+static int
+nvkm_volt_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_volt *volt = (void *)object;
-	int ret;
-
-	ret = nvkm_subdev_init(&volt->base);
-	if (ret)
-		return ret;
-
-	ret = volt->get(volt);
+	struct nvkm_volt *volt = nvkm_volt(subdev);
+	int ret = nvkm_volt_get(volt);
 	if (ret < 0) {
 		if (ret != -ENODEV)
-			nv_debug(volt, "current voltage unknown\n");
+			nvkm_debug(subdev, "current voltage unknown\n");
 		return 0;
 	}
-
-	nv_info(volt, "GPU voltage: %duv\n", ret);
+	nvkm_debug(subdev, "current voltage: %duv\n", ret);
 	return 0;
 }
 
-void
-_nvkm_volt_dtor(struct nvkm_object *object)
+static void *
+nvkm_volt_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_volt *volt = (void *)object;
-	nvkm_subdev_destroy(&volt->base);
+	return nvkm_volt(subdev);
 }
 
-int
-nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
+static const struct nvkm_subdev_func
+nvkm_volt = {
+	.dtor = nvkm_volt_dtor,
+	.init = nvkm_volt_init,
+};
+
+void
+nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_volt *volt)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_volt *volt;
-	int ret, i;
+	struct nvkm_bios *bios = device->bios;
+	int i;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "VOLT",
-				  "voltage", length, pobject);
-	volt = *pobject;
-	if (ret)
-		return ret;
-
-	volt->get = nvkm_volt_get;
-	volt->set = nvkm_volt_set;
-	volt->set_id = nvkm_volt_set_id;
+	nvkm_subdev_ctor(&nvkm_volt, device, index, 0, &volt->subdev);
+	volt->func = func;
 
 	/* Assuming the non-bios device should build the voltage table later */
 	if (bios)
@@ -186,19 +177,18 @@
 
 	if (volt->vid_nr) {
 		for (i = 0; i < volt->vid_nr; i++) {
-			nv_debug(volt, "VID %02x: %duv\n",
-				 volt->vid[i].vid, volt->vid[i].uv);
-		}
-
-		/*XXX: this is an assumption.. there probably exists boards
-		 * out there with i2c-connected voltage controllers too..
-		 */
-		ret = nvkm_voltgpio_init(volt);
-		if (ret == 0) {
-			volt->vid_get = nvkm_voltgpio_get;
-			volt->vid_set = nvkm_voltgpio_set;
+			nvkm_debug(&volt->subdev, "VID %02x: %duv\n",
+				   volt->vid[i].vid, volt->vid[i].uv);
 		}
 	}
+}
 
-	return ret;
+int
+nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_volt **pvolt)
+{
+	if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_volt_ctor(func, device, index, *pvolt);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 871fd51..fd56c64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -19,10 +19,10 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <subdev/volt.h>
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
+#define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
+#include "priv.h"
+
+#include <core/tegra.h>
 
 struct cvb_coef {
 	int c0;
@@ -33,7 +33,7 @@
 	int c5;
 };
 
-struct gk20a_volt_priv {
+struct gk20a_volt {
 	struct nvkm_volt base;
 	struct regulator *vdd;
 };
@@ -101,43 +101,45 @@
 }
 
 static int
-gk20a_volt_vid_get(struct nvkm_volt *volt)
+gk20a_volt_vid_get(struct nvkm_volt *base)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
+	struct gk20a_volt *volt = gk20a_volt(base);
 	int i, uv;
 
-	uv = regulator_get_voltage(priv->vdd);
+	uv = regulator_get_voltage(volt->vdd);
 
-	for (i = 0; i < volt->vid_nr; i++)
-		if (volt->vid[i].uv >= uv)
+	for (i = 0; i < volt->base.vid_nr; i++)
+		if (volt->base.vid[i].uv >= uv)
 			return i;
 
 	return -EINVAL;
 }
 
 static int
-gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid)
+gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
+	struct gk20a_volt *volt = gk20a_volt(base);
+	struct nvkm_subdev *subdev = &volt->base.subdev;
 
-	nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
-	return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
+	nvkm_debug(subdev, "set voltage as %duv\n", volt->base.vid[vid].uv);
+	return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
 }
 
 static int
-gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
-	int prev_uv = regulator_get_voltage(priv->vdd);
-	int target_uv = volt->vid[id].uv;
+	struct gk20a_volt *volt = gk20a_volt(base);
+	struct nvkm_subdev *subdev = &volt->base.subdev;
+	int prev_uv = regulator_get_voltage(volt->vdd);
+	int target_uv = volt->base.vid[id].uv;
 	int ret;
 
-	nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
-			prev_uv, target_uv, condition);
+	nvkm_debug(subdev, "prev=%d, target=%d, condition=%d\n",
+		   prev_uv, target_uv, condition);
 	if (!condition ||
 		(condition < 0 && target_uv < prev_uv) ||
 		(condition > 0 && target_uv > prev_uv)) {
-		ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
+		ret = gk20a_volt_vid_set(&volt->base, volt->base.vid[id].vid);
 	} else {
 		ret = 0;
 	}
@@ -145,53 +147,42 @@
 	return ret;
 }
 
-static int
-gk20a_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_volt_func
+gk20a_volt = {
+	.vid_get = gk20a_volt_vid_get,
+	.vid_set = gk20a_volt_vid_set,
+	.set_id = gk20a_volt_set_id,
+};
+
+int
+gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
-	struct gk20a_volt_priv *priv;
-	struct nvkm_volt *volt;
-	struct nouveau_platform_device *plat;
-	int i, ret, uv;
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_volt *volt;
+	int i, uv;
 
-	ret = nvkm_volt_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
+		return -ENOMEM;
 
-	volt = &priv->base;
+	nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
+	*pvolt = &volt->base;
 
-	plat = nv_device_to_platform(nv_device(parent));
+	uv = regulator_get_voltage(tdev->vdd);
+	nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
 
-	uv = regulator_get_voltage(plat->gpu->vdd);
-	nv_info(priv, "The default voltage is %duV\n", uv);
+	volt->vdd = tdev->vdd;
 
-	priv->vdd = plat->gpu->vdd;
-	priv->base.vid_get = gk20a_volt_vid_get;
-	priv->base.vid_set = gk20a_volt_vid_set;
-	priv->base.set_id = gk20a_volt_set_id;
-
-	volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
-	nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
-	for (i = 0; i < volt->vid_nr; i++) {
-		volt->vid[i].vid = i;
-		volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
-					plat->gpu_speedo);
-		nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
-					volt->vid[i].uv);
+	volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
+	nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
+		   volt->base.vid_nr);
+	for (i = 0; i < volt->base.vid_nr; i++) {
+		volt->base.vid[i].vid = i;
+		volt->base.vid[i].uv =
+			gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+						tdev->gpu_speedo);
+		nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
+			   volt->base.vid[i].vid, volt->base.vid[i].uv);
 	}
 
 	return 0;
 }
-
-struct nvkm_oclass
-gk20a_volt_oclass = {
-	.handle = NV_SUBDEV(VOLT, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_volt_ctor,
-		.dtor = _nvkm_volt_dtor,
-		.init = _nvkm_volt_init,
-		.fini = _nvkm_volt_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index b778deb..d2bac1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -34,13 +34,13 @@
 int
 nvkm_voltgpio_get(struct nvkm_volt *volt)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_gpio *gpio = volt->subdev.device->gpio;
 	u8 vid = 0;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tags); i++) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->get(gpio, 0, tags[i], 0xff);
+			int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff);
 			if (ret < 0)
 				return ret;
 			vid |= ret << i;
@@ -53,12 +53,12 @@
 int
 nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_gpio *gpio = volt->subdev.device->gpio;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
+			int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1);
 			if (ret < 0)
 				return ret;
 		}
@@ -70,7 +70,8 @@
 int
 nvkm_voltgpio_init(struct nvkm_volt *volt)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_subdev *subdev = &volt->subdev;
+	struct nvkm_gpio *gpio = subdev->device->gpio;
 	struct dcb_gpio_func func;
 	int i;
 
@@ -82,11 +83,11 @@
 	 */
 	for (i = 0; i < ARRAY_SIZE(tags); i++) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
+			int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func);
 			if (ret) {
 				if (ret != -ENOENT)
 					return ret;
-				nv_debug(volt, "VID bit %d has no GPIO\n", i);
+				nvkm_debug(subdev, "VID bit %d has no GPIO\n", i);
 				volt->vid_mask &= ~(1 << i);
 			}
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 0ac5a3f..23409387 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -21,35 +21,24 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/volt.h>
+#include "priv.h"
 
-struct nv40_volt_priv {
-	struct nvkm_volt base;
+static const struct nvkm_volt_func
+nv40_volt = {
+	.vid_get = nvkm_voltgpio_get,
+	.vid_set = nvkm_voltgpio_set,
 };
 
-static int
-nv40_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
-	struct nv40_volt_priv *priv;
+	struct nvkm_volt *volt;
 	int ret;
 
-	ret = nvkm_volt_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
+	*pvolt = volt;
 	if (ret)
 		return ret;
 
-	return 0;
+	return nvkm_voltgpio_init(volt);
 }
-
-struct nvkm_oclass
-nv40_volt_oclass = {
-	.handle = NV_SUBDEV(VOLT, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_volt_ctor,
-		.dtor = _nvkm_volt_dtor,
-		.init = _nvkm_volt_init,
-		.fini = _nvkm_volt_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
new file mode 100644
index 0000000..394f37c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -0,0 +1,20 @@
+#ifndef __NVKM_VOLT_PRIV_H__
+#define __NVKM_VOLT_PRIV_H__
+#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
+#include <subdev/volt.h>
+
+void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
+		    int index, struct nvkm_volt *);
+int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
+		   int index, struct nvkm_volt **);
+
+struct nvkm_volt_func {
+	int (*vid_get)(struct nvkm_volt *);
+	int (*vid_set)(struct nvkm_volt *, u8 vid);
+	int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+};
+
+int nvkm_voltgpio_init(struct nvkm_volt *);
+int nvkm_voltgpio_get(struct nvkm_volt *);
+int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 23d9c92..9a4ba4f 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -388,11 +388,13 @@
 	copy_timings_drm_to_omap(&omap_crtc->timings, mode);
 }
 
-static void omap_crtc_atomic_begin(struct drm_crtc *crtc)
+static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
 }
 
-static void omap_crtc_atomic_flush(struct drm_crtc *crtc)
+static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 720d16b..b8e4cde 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -86,11 +86,11 @@
 	/* Note: to properly handle manual update displays, we wrap the
 	 * basic fbdev ops which write to the framebuffer
 	 */
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_read = drm_fb_helper_sys_read,
+	.fb_write = drm_fb_helper_sys_write,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
@@ -179,10 +179,10 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
 		dev_err(dev->dev, "failed to allocate fb info\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(fbi);
 		goto fail_unlock;
 	}
 
@@ -190,7 +190,6 @@
 
 	fbdev->fb = fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_DEFAULT;
@@ -198,12 +197,6 @@
 
 	strcpy(fbi->fix.id, MODULE_NAME);
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto fail_unlock;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -236,8 +229,9 @@
 fail:
 
 	if (ret) {
-		if (fbi)
-			framebuffer_release(fbi);
+
+		drm_fb_helper_release_fbi(helper);
+
 		if (fb) {
 			drm_framebuffer_unregister_private(fb);
 			drm_framebuffer_remove(fb);
@@ -312,17 +306,11 @@
 	struct omap_drm_private *priv = dev->dev_private;
 	struct drm_fb_helper *helper = priv->fbdev;
 	struct omap_fbdev *fbdev;
-	struct fb_info *fbi;
 
 	DBG();
 
-	fbi = helper->fbdev;
-
-	/* only cleanup framebuffer if it is present */
-	if (fbi) {
-		unregister_framebuffer(fbi);
-		framebuffer_release(fbi);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	drm_fb_helper_fini(helper);
 
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6d64c7b..7d4704b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,13 +18,21 @@
 	  that it can be automatically turned off when the panel goes into a
 	  low power state.
 
-config DRM_PANEL_LD9040
-	tristate "LD9040 RGB/SPI panel"
+config DRM_PANEL_SAMSUNG_LD9040
+	tristate "Samsung LD9040 RGB/SPI panel"
 	depends on OF && SPI
 	select VIDEOMODE_HELPERS
 
-config DRM_PANEL_S6E8AA0
-	tristate "S6E8AA0 DSI video mode panel"
+config DRM_PANEL_LG_LG4573
+	tristate "LG4573 RGB/SPI panel"
+	depends on OF && SPI
+	select VIDEOMODE_HELPERS
+	help
+	  Say Y here if you want to enable support for LG4573 RGB panel.
+	  To compile this driver as a module, choose M here.
+
+config DRM_PANEL_SAMSUNG_S6E8AA0
+	tristate "Samsung S6E8AA0 DSI video mode panel"
 	depends on OF
 	select DRM_MIPI_DSI
 	select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 4b2a043..d0f016d 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
-obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
-obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
deleted file mode 100644
index 9c27bde..0000000
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- * ld9040 AMOLED LCD drm_panel driver.
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd
- * Derived from drivers/video/backlight/ld9040.c
- *
- * Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
-
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
-
-#include <video/mipi_display.h>
-#include <video/of_videomode.h>
-#include <video/videomode.h>
-
-/* Manufacturer Command Set */
-#define MCS_MANPWR		0xb0
-#define MCS_ELVSS_ON		0xb1
-#define MCS_USER_SETTING	0xf0
-#define MCS_DISPCTL		0xf2
-#define MCS_POWER_CTRL		0xf4
-#define MCS_GTCON		0xf7
-#define MCS_PANEL_CONDITION	0xf8
-#define MCS_GAMMA_SET1		0xf9
-#define MCS_GAMMA_CTRL		0xfb
-
-/* array of gamma tables for gamma value 2.2 */
-static u8 const ld9040_gammas[25][22] = {
-	{ 0xf9, 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30, 0x00, 0xaf, 0xc0,
-	  0xb8, 0xcd, 0x00, 0x3d, 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44 },
-	{ 0xf9, 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c, 0x00, 0xaf, 0xbf,
-	  0xb6, 0xcb, 0x00, 0x4b, 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52 },
-	{ 0xf9, 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41, 0x00, 0xb0, 0xbe,
-	  0xb5, 0xc9, 0x00, 0x51, 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57 },
-	{ 0xf9, 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46, 0x00, 0xb1, 0xbc,
-	  0xb5, 0xc8, 0x00, 0x56, 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d },
-	{ 0xf9, 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b, 0x00, 0xb3, 0xbc,
-	  0xb4, 0xc7, 0x00, 0x5c, 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62 },
-	{ 0xf9, 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f, 0x00, 0xb4, 0xbb,
-	  0xb3, 0xc7, 0x00, 0x60, 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67 },
-	{ 0xf9, 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53, 0x00, 0xb5, 0xbb,
-	  0xb3, 0xc6, 0x00, 0x65, 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c },
-	{ 0xf9, 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57, 0x00, 0xb5, 0xbb,
-	  0xb0, 0xc5, 0x00, 0x6a, 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70 },
-	{ 0xf9, 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b, 0x00, 0xb5, 0xba,
-	  0xb1, 0xc4, 0x00, 0x6e, 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75 },
-	{ 0xf9, 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f, 0x00, 0xb5, 0xba,
-	  0xb0, 0xc3, 0x00, 0x72, 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a },
-	{ 0xf9, 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62, 0x00, 0xb6, 0xba,
-	  0xaf, 0xc3, 0x00, 0x76, 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e },
-	{ 0xf9, 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65, 0x00, 0xb7, 0xb8,
-	  0xaf, 0xc3, 0x00, 0x7a, 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81 },
-	{ 0xf9, 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69, 0x00, 0xb8, 0xb9,
-	  0xae, 0xc1, 0x00, 0x7f, 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85 },
-	{ 0xf9, 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c, 0x00, 0xb8, 0xb8,
-	  0xae, 0xc1, 0x00, 0x82, 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89 },
-	{ 0xf9, 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f, 0x00, 0xb8, 0xb8,
-	  0xad, 0xc0, 0x00, 0x86, 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d },
-	{ 0xf9, 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72, 0x00, 0xb8, 0xb8,
-	  0xac, 0xbf, 0x00, 0x8a, 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91 },
-	{ 0xf9, 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75, 0x00, 0xb9, 0xb8,
-	  0xab, 0xbe, 0x00, 0x8e, 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94 },
-	{ 0xf9, 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77, 0x00, 0xb9, 0xb7,
-	  0xab, 0xbe, 0x00, 0x90, 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97 },
-	{ 0xf9, 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a, 0x00, 0xb9, 0xb7,
-	  0xaa, 0xbd, 0x00, 0x94, 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a },
-	{ 0xf9, 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d, 0x00, 0xb9, 0xb6,
-	  0xaa, 0xbb, 0x00, 0x97, 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d },
-	{ 0xf9, 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80, 0x00, 0xb8, 0xb6,
-	  0xaa, 0xbc, 0x00, 0x9a, 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0 },
-	{ 0xf9, 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84, 0x00, 0xb9, 0xb7,
-	  0xa8, 0xbc, 0x00, 0x9d, 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4 },
-	{ 0xf9, 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86, 0x00, 0xb8, 0xb5,
-	  0xa8, 0xbc, 0x00, 0xa0, 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7 },
-	{ 0xf9, 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89, 0x00, 0xb7, 0xb6,
-	  0xa8, 0xba, 0x00, 0xa4, 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa },
-	{ 0xf9, 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91, 0x00, 0xb2, 0xb4,
-	  0xaa, 0xbb, 0x00, 0xac, 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3 },
-};
-
-struct ld9040 {
-	struct device *dev;
-	struct drm_panel panel;
-
-	struct regulator_bulk_data supplies[2];
-	struct gpio_desc *reset_gpio;
-	u32 power_on_delay;
-	u32 reset_delay;
-	struct videomode vm;
-	u32 width_mm;
-	u32 height_mm;
-
-	int brightness;
-
-	/* This field is tested by functions directly accessing bus before
-	 * transfer, transfer is skipped if it is set. In case of transfer
-	 * failure or unexpected response the field is set to error value.
-	 * Such construct allows to eliminate many checks in higher level
-	 * functions.
-	 */
-	int error;
-};
-
-static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
-{
-	return container_of(panel, struct ld9040, panel);
-}
-
-static int ld9040_clear_error(struct ld9040 *ctx)
-{
-	int ret = ctx->error;
-
-	ctx->error = 0;
-	return ret;
-}
-
-static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data)
-{
-	struct spi_device *spi = to_spi_device(ctx->dev);
-	struct spi_transfer xfer = {
-		.len		= 2,
-		.tx_buf		= &data,
-	};
-	struct spi_message msg;
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	return spi_sync(spi, &msg);
-}
-
-static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
-{
-	int ret = 0;
-
-	if (ctx->error < 0 || len == 0)
-		return;
-
-	dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
-	ret = ld9040_spi_write_word(ctx, *data);
-
-	while (!ret && --len) {
-		++data;
-		ret = ld9040_spi_write_word(ctx, *data | 0x100);
-	}
-
-	if (ret) {
-		dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
-			(int)len, data);
-		ctx->error = ret;
-	}
-
-	usleep_range(300, 310);
-}
-
-#define ld9040_dcs_write_seq_static(ctx, seq...) \
-({\
-	static const u8 d[] = { seq };\
-	ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
-})
-
-static void ld9040_brightness_set(struct ld9040 *ctx)
-{
-	ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness],
-			 ARRAY_SIZE(ld9040_gammas[ctx->brightness]));
-
-	ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a);
-}
-
-static void ld9040_init(struct ld9040 *ctx)
-{
-	ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
-	ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
-		0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
-		0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
-		0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
-	ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
-		0x02, 0x08, 0x08, 0x10, 0x10);
-	ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
-	ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
-		0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
-	ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
-	ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
-	ld9040_brightness_set(ctx);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
-}
-
-static int ld9040_power_on(struct ld9040 *ctx)
-{
-	int ret;
-
-	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-	if (ret < 0)
-		return ret;
-
-	msleep(ctx->power_on_delay);
-	gpiod_set_value(ctx->reset_gpio, 0);
-	msleep(ctx->reset_delay);
-	gpiod_set_value(ctx->reset_gpio, 1);
-	msleep(ctx->reset_delay);
-
-	return 0;
-}
-
-static int ld9040_power_off(struct ld9040 *ctx)
-{
-	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
-static int ld9040_disable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int ld9040_unprepare(struct drm_panel *panel)
-{
-	struct ld9040 *ctx = panel_to_ld9040(panel);
-
-	msleep(120);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
-	msleep(40);
-
-	ld9040_clear_error(ctx);
-
-	return ld9040_power_off(ctx);
-}
-
-static int ld9040_prepare(struct drm_panel *panel)
-{
-	struct ld9040 *ctx = panel_to_ld9040(panel);
-	int ret;
-
-	ret = ld9040_power_on(ctx);
-	if (ret < 0)
-		return ret;
-
-	ld9040_init(ctx);
-
-	ret = ld9040_clear_error(ctx);
-
-	if (ret < 0)
-		ld9040_unprepare(panel);
-
-	return ret;
-}
-
-static int ld9040_enable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int ld9040_get_modes(struct drm_panel *panel)
-{
-	struct drm_connector *connector = panel->connector;
-	struct ld9040 *ctx = panel_to_ld9040(panel);
-	struct drm_display_mode *mode;
-
-	mode = drm_mode_create(connector->dev);
-	if (!mode) {
-		DRM_ERROR("failed to create a new display mode\n");
-		return 0;
-	}
-
-	drm_display_mode_from_videomode(&ctx->vm, mode);
-	mode->width_mm = ctx->width_mm;
-	mode->height_mm = ctx->height_mm;
-	connector->display_info.width_mm = mode->width_mm;
-	connector->display_info.height_mm = mode->height_mm;
-
-	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_probed_add(connector, mode);
-
-	return 1;
-}
-
-static const struct drm_panel_funcs ld9040_drm_funcs = {
-	.disable = ld9040_disable,
-	.unprepare = ld9040_unprepare,
-	.prepare = ld9040_prepare,
-	.enable = ld9040_enable,
-	.get_modes = ld9040_get_modes,
-};
-
-static int ld9040_parse_dt(struct ld9040 *ctx)
-{
-	struct device *dev = ctx->dev;
-	struct device_node *np = dev->of_node;
-	int ret;
-
-	ret = of_get_videomode(np, &ctx->vm, 0);
-	if (ret < 0)
-		return ret;
-
-	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
-	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
-	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
-	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
-
-	return 0;
-}
-
-static int ld9040_probe(struct spi_device *spi)
-{
-	struct device *dev = &spi->dev;
-	struct ld9040 *ctx;
-	int ret;
-
-	ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	spi_set_drvdata(spi, ctx);
-
-	ctx->dev = dev;
-	ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
-
-	ret = ld9040_parse_dt(ctx);
-	if (ret < 0)
-		return ret;
-
-	ctx->supplies[0].supply = "vdd3";
-	ctx->supplies[1].supply = "vci";
-	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
-				      ctx->supplies);
-	if (ret < 0)
-		return ret;
-
-	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
-	if (IS_ERR(ctx->reset_gpio)) {
-		dev_err(dev, "cannot get reset-gpios %ld\n",
-			PTR_ERR(ctx->reset_gpio));
-		return PTR_ERR(ctx->reset_gpio);
-	}
-
-	spi->bits_per_word = 9;
-	ret = spi_setup(spi);
-	if (ret < 0) {
-		dev_err(dev, "spi setup failed.\n");
-		return ret;
-	}
-
-	drm_panel_init(&ctx->panel);
-	ctx->panel.dev = dev;
-	ctx->panel.funcs = &ld9040_drm_funcs;
-
-	return drm_panel_add(&ctx->panel);
-}
-
-static int ld9040_remove(struct spi_device *spi)
-{
-	struct ld9040 *ctx = spi_get_drvdata(spi);
-
-	ld9040_power_off(ctx);
-	drm_panel_remove(&ctx->panel);
-
-	return 0;
-}
-
-static const struct of_device_id ld9040_of_match[] = {
-	{ .compatible = "samsung,ld9040" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, ld9040_of_match);
-
-static struct spi_driver ld9040_driver = {
-	.probe = ld9040_probe,
-	.remove = ld9040_remove,
-	.driver = {
-		.name = "ld9040",
-		.owner = THIS_MODULE,
-		.of_match_table = ld9040_of_match,
-	},
-};
-module_spi_driver(ld9040_driver);
-
-MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
-MODULE_DESCRIPTION("ld9040 LCD Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
new file mode 100644
index 0000000..a7b4939
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Heiko Schocher <hs@denx.de>
+ *
+ * from:
+ * drivers/gpu/drm/panel/panel-ld9040.c
+ * ld9040 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Derived from drivers/video/backlight/ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+struct lg4573 {
+	struct drm_panel panel;
+	struct spi_device *spi;
+	struct videomode vm;
+};
+
+static inline struct lg4573 *panel_to_lg4573(struct drm_panel *panel)
+{
+	return container_of(panel, struct lg4573, panel);
+}
+
+static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
+{
+	struct spi_transfer xfer = {
+		.len = 2,
+	};
+	u16 temp = cpu_to_be16(data);
+	struct spi_message msg;
+
+	dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
+	xfer.tx_buf = &temp;
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+
+	return spi_sync(ctx->spi, &msg);
+}
+
+static int lg4573_spi_write_u16_array(struct lg4573 *ctx, const u16 *buffer,
+				      unsigned int count)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < count; i++) {
+		ret = lg4573_spi_write_u16(ctx, buffer[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int lg4573_spi_write_dcs(struct lg4573 *ctx, u8 dcs)
+{
+	return lg4573_spi_write_u16(ctx, (0x70 << 8 | dcs));
+}
+
+static int lg4573_display_on(struct lg4573 *ctx)
+{
+	int ret;
+
+	ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+	if (ret)
+		return ret;
+
+	msleep(5);
+
+	return lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int lg4573_display_off(struct lg4573 *ctx)
+{
+	int ret;
+
+	ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+	if (ret)
+		return ret;
+
+	msleep(120);
+
+	return lg4573_spi_write_dcs(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+}
+
+static int lg4573_display_mode_settings(struct lg4573 *ctx)
+{
+	static const u16 display_mode_settings[] = {
+		0x703A, 0x7270, 0x70B1, 0x7208,
+		0x723B, 0x720F, 0x70B2, 0x7200,
+		0x72C8, 0x70B3, 0x7200, 0x70B4,
+		0x7200, 0x70B5, 0x7242, 0x7210,
+		0x7210, 0x7200, 0x7220, 0x70B6,
+		0x720B, 0x720F, 0x723C, 0x7213,
+		0x7213, 0x72E8, 0x70B7, 0x7246,
+		0x7206, 0x720C, 0x7200, 0x7200,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer display mode settings\n");
+	return lg4573_spi_write_u16_array(ctx, display_mode_settings,
+					  ARRAY_SIZE(display_mode_settings));
+}
+
+static int lg4573_power_settings(struct lg4573 *ctx)
+{
+	static const u16 power_settings[] = {
+		0x70C0, 0x7201, 0x7211, 0x70C3,
+		0x7207, 0x7203, 0x7204, 0x7204,
+		0x7204, 0x70C4, 0x7212, 0x7224,
+		0x7218, 0x7218, 0x7202, 0x7249,
+		0x70C5, 0x726F, 0x70C6, 0x7241,
+		0x7263,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer power settings\n");
+	return lg4573_spi_write_u16_array(ctx, power_settings,
+					  ARRAY_SIZE(power_settings));
+}
+
+static int lg4573_gamma_settings(struct lg4573 *ctx)
+{
+	static const u16 gamma_settings[] = {
+		0x70D0, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D1, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+		0x70D2, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D3, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+		0x70D4, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D5, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer gamma settings\n");
+	return lg4573_spi_write_u16_array(ctx, gamma_settings,
+					  ARRAY_SIZE(gamma_settings));
+}
+
+static int lg4573_init(struct lg4573 *ctx)
+{
+	int ret;
+
+	dev_dbg(ctx->panel.dev, "initializing LCD\n");
+
+	ret = lg4573_display_mode_settings(ctx);
+	if (ret)
+		return ret;
+
+	ret = lg4573_power_settings(ctx);
+	if (ret)
+		return ret;
+
+	return lg4573_gamma_settings(ctx);
+}
+
+static int lg4573_power_on(struct lg4573 *ctx)
+{
+	return lg4573_display_on(ctx);
+}
+
+static int lg4573_disable(struct drm_panel *panel)
+{
+	struct lg4573 *ctx = panel_to_lg4573(panel);
+
+	return lg4573_display_off(ctx);
+}
+
+static int lg4573_enable(struct drm_panel *panel)
+{
+	struct lg4573 *ctx = panel_to_lg4573(panel);
+
+	lg4573_init(ctx);
+
+	return lg4573_power_on(ctx);
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 27000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 10,
+	.hsync_end = 480 + 10 + 59,
+	.htotal = 480 + 10 + 59 + 10,
+	.vdisplay = 800,
+	.vsync_start = 800 + 15,
+	.vsync_end = 800 + 15 + 15,
+	.vtotal = 800 + 15 + 15 + 15,
+	.vrefresh = 60,
+};
+
+static int lg4573_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+			default_mode.hdisplay, default_mode.vdisplay,
+			default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	panel->connector->display_info.width_mm = 61;
+	panel->connector->display_info.height_mm = 103;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs lg4573_drm_funcs = {
+	.disable = lg4573_disable,
+	.enable = lg4573_enable,
+	.get_modes = lg4573_get_modes,
+};
+
+static int lg4573_probe(struct spi_device *spi)
+{
+	struct lg4573 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->spi = spi;
+
+	spi_set_drvdata(spi, ctx);
+	spi->bits_per_word = 8;
+
+	ret = spi_setup(spi);
+	if (ret < 0) {
+		dev_err(&spi->dev, "SPI setup failed: %d\n", ret);
+		return ret;
+	}
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = &spi->dev;
+	ctx->panel.funcs = &lg4573_drm_funcs;
+
+	return drm_panel_add(&ctx->panel);
+}
+
+static int lg4573_remove(struct spi_device *spi)
+{
+	struct lg4573 *ctx = spi_get_drvdata(spi);
+
+	lg4573_display_off(ctx);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id lg4573_of_match[] = {
+	{ .compatible = "lg,lg4573" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lg4573_of_match);
+
+static struct spi_driver lg4573_driver = {
+	.probe = lg4573_probe,
+	.remove = lg4573_remove,
+	.driver = {
+		.name = "lg4573",
+		.owner = THIS_MODULE,
+		.of_match_table = lg4573_of_match,
+	},
+};
+module_spi_driver(lg4573_driver);
+
+MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
+MODULE_DESCRIPTION("lg4573 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
deleted file mode 100644
index 3005110..0000000
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ /dev/null
@@ -1,1067 +0,0 @@
-/*
- * MIPI-DSI based s6e8aa0 AMOLED LCD 5.3 inch panel driver.
- *
- * Copyright (c) 2013 Samsung Electronics Co., Ltd
- *
- * Inki Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- * Joongmock Shin <jmock.shin@samsung.com>
- * Eunchul Kim <chulspro.kim@samsung.com>
- * Tomasz Figa <t.figa@samsung.com>
- * Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-
-#include <video/mipi_display.h>
-#include <video/of_videomode.h>
-#include <video/videomode.h>
-
-#define LDI_MTP_LENGTH			24
-#define GAMMA_LEVEL_NUM			25
-#define GAMMA_TABLE_LEN			26
-
-#define PANELCTL_SS_MASK		(1 << 5)
-#define PANELCTL_SS_1_800		(0 << 5)
-#define PANELCTL_SS_800_1		(1 << 5)
-#define PANELCTL_GTCON_MASK		(7 << 2)
-#define PANELCTL_GTCON_110		(6 << 2)
-#define PANELCTL_GTCON_111		(7 << 2)
-
-#define PANELCTL_CLK1_CON_MASK		(7 << 3)
-#define PANELCTL_CLK1_000		(0 << 3)
-#define PANELCTL_CLK1_001		(1 << 3)
-#define PANELCTL_CLK2_CON_MASK		(7 << 0)
-#define PANELCTL_CLK2_000		(0 << 0)
-#define PANELCTL_CLK2_001		(1 << 0)
-
-#define PANELCTL_INT1_CON_MASK		(7 << 3)
-#define PANELCTL_INT1_000		(0 << 3)
-#define PANELCTL_INT1_001		(1 << 3)
-#define PANELCTL_INT2_CON_MASK		(7 << 0)
-#define PANELCTL_INT2_000		(0 << 0)
-#define PANELCTL_INT2_001		(1 << 0)
-
-#define PANELCTL_BICTL_CON_MASK		(7 << 3)
-#define PANELCTL_BICTL_000		(0 << 3)
-#define PANELCTL_BICTL_001		(1 << 3)
-#define PANELCTL_BICTLB_CON_MASK	(7 << 0)
-#define PANELCTL_BICTLB_000		(0 << 0)
-#define PANELCTL_BICTLB_001		(1 << 0)
-
-#define PANELCTL_EM_CLK1_CON_MASK	(7 << 3)
-#define PANELCTL_EM_CLK1_110		(6 << 3)
-#define PANELCTL_EM_CLK1_111		(7 << 3)
-#define PANELCTL_EM_CLK1B_CON_MASK	(7 << 0)
-#define PANELCTL_EM_CLK1B_110		(6 << 0)
-#define PANELCTL_EM_CLK1B_111		(7 << 0)
-
-#define PANELCTL_EM_CLK2_CON_MASK	(7 << 3)
-#define PANELCTL_EM_CLK2_110		(6 << 3)
-#define PANELCTL_EM_CLK2_111		(7 << 3)
-#define PANELCTL_EM_CLK2B_CON_MASK	(7 << 0)
-#define PANELCTL_EM_CLK2B_110		(6 << 0)
-#define PANELCTL_EM_CLK2B_111		(7 << 0)
-
-#define PANELCTL_EM_INT1_CON_MASK	(7 << 3)
-#define PANELCTL_EM_INT1_000		(0 << 3)
-#define PANELCTL_EM_INT1_001		(1 << 3)
-#define PANELCTL_EM_INT2_CON_MASK	(7 << 0)
-#define PANELCTL_EM_INT2_000		(0 << 0)
-#define PANELCTL_EM_INT2_001		(1 << 0)
-
-#define AID_DISABLE			(0x4)
-#define AID_1				(0x5)
-#define AID_2				(0x6)
-#define AID_3				(0x7)
-
-typedef u8 s6e8aa0_gamma_table[GAMMA_TABLE_LEN];
-
-struct s6e8aa0_variant {
-	u8 version;
-	const s6e8aa0_gamma_table *gamma_tables;
-};
-
-struct s6e8aa0 {
-	struct device *dev;
-	struct drm_panel panel;
-
-	struct regulator_bulk_data supplies[2];
-	struct gpio_desc *reset_gpio;
-	u32 power_on_delay;
-	u32 reset_delay;
-	u32 init_delay;
-	bool flip_horizontal;
-	bool flip_vertical;
-	struct videomode vm;
-	u32 width_mm;
-	u32 height_mm;
-
-	u8 version;
-	u8 id;
-	const struct s6e8aa0_variant *variant;
-	int brightness;
-
-	/* This field is tested by functions directly accessing DSI bus before
-	 * transfer, transfer is skipped if it is set. In case of transfer
-	 * failure or unexpected response the field is set to error value.
-	 * Such construct allows to eliminate many checks in higher level
-	 * functions.
-	 */
-	int error;
-};
-
-static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
-{
-	return container_of(panel, struct s6e8aa0, panel);
-}
-
-static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
-{
-	int ret = ctx->error;
-
-	ctx->error = 0;
-	return ret;
-}
-
-static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
-{
-	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-	ssize_t ret;
-
-	if (ctx->error < 0)
-		return;
-
-	ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
-	if (ret < 0) {
-		dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
-			(int)len, data);
-		ctx->error = ret;
-	}
-}
-
-static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
-{
-	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-	int ret;
-
-	if (ctx->error < 0)
-		return ctx->error;
-
-	ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
-	if (ret < 0) {
-		dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
-		ctx->error = ret;
-	}
-
-	return ret;
-}
-
-#define s6e8aa0_dcs_write_seq(ctx, seq...) \
-({\
-	const u8 d[] = { seq };\
-	BUILD_BUG_ON_MSG(ARRAY_SIZE(d) > 64, "DCS sequence too big for stack");\
-	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
-})
-
-#define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
-({\
-	static const u8 d[] = { seq };\
-	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
-})
-
-static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
-}
-
-static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx)
-{
-	static const u8 aids[] = {
-		0x04, 0x04, 0x04, 0x04, 0x04, 0x60, 0x80, 0xA0
-	};
-	u8 aid = aids[ctx->id >> 5];
-	u8 cfg = 0x3d;
-	u8 clk_con = 0xc8;
-	u8 int_con = 0x08;
-	u8 bictl_con = 0x48;
-	u8 em_clk1_con = 0xff;
-	u8 em_clk2_con = 0xff;
-	u8 em_int_con = 0xc8;
-
-	if (ctx->flip_vertical) {
-		/* GTCON */
-		cfg &= ~(PANELCTL_GTCON_MASK);
-		cfg |= (PANELCTL_GTCON_110);
-	}
-
-	if (ctx->flip_horizontal) {
-		/* SS */
-		cfg &= ~(PANELCTL_SS_MASK);
-		cfg |= (PANELCTL_SS_1_800);
-	}
-
-	if (ctx->flip_horizontal || ctx->flip_vertical) {
-		/* CLK1,2_CON */
-		clk_con &= ~(PANELCTL_CLK1_CON_MASK |
-			PANELCTL_CLK2_CON_MASK);
-		clk_con |= (PANELCTL_CLK1_000 | PANELCTL_CLK2_001);
-
-		/* INT1,2_CON */
-		int_con &= ~(PANELCTL_INT1_CON_MASK |
-			PANELCTL_INT2_CON_MASK);
-		int_con |= (PANELCTL_INT1_000 | PANELCTL_INT2_001);
-
-		/* BICTL,B_CON */
-		bictl_con &= ~(PANELCTL_BICTL_CON_MASK |
-			PANELCTL_BICTLB_CON_MASK);
-		bictl_con |= (PANELCTL_BICTL_000 |
-			PANELCTL_BICTLB_001);
-
-		/* EM_CLK1,1B_CON */
-		em_clk1_con &= ~(PANELCTL_EM_CLK1_CON_MASK |
-			PANELCTL_EM_CLK1B_CON_MASK);
-		em_clk1_con |= (PANELCTL_EM_CLK1_110 |
-			PANELCTL_EM_CLK1B_110);
-
-		/* EM_CLK2,2B_CON */
-		em_clk2_con &= ~(PANELCTL_EM_CLK2_CON_MASK |
-			PANELCTL_EM_CLK2B_CON_MASK);
-		em_clk2_con |= (PANELCTL_EM_CLK2_110 |
-			PANELCTL_EM_CLK2B_110);
-
-		/* EM_INT1,2_CON */
-		em_int_con &= ~(PANELCTL_EM_INT1_CON_MASK |
-			PANELCTL_EM_INT2_CON_MASK);
-		em_int_con |= (PANELCTL_EM_INT1_000 |
-			PANELCTL_EM_INT2_001);
-	}
-
-	s6e8aa0_dcs_write_seq(ctx,
-		0xf8, cfg, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00,
-		0x3c, 0x78, 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00,
-		0x00, 0x20, aid, 0x08, 0x6e, 0x00, 0x00, 0x00,
-		0x02, 0x07, 0x07, 0x23, 0x23, 0xc0, clk_con, int_con,
-		bictl_con, 0xc1, 0x00, 0xc1, em_clk1_con, em_clk2_con,
-		em_int_con);
-}
-
-static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx)
-{
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write_seq_static(ctx,
-			0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x94, 0x00,
-			0x3c, 0x78, 0x10, 0x27, 0x08, 0x6e, 0x00, 0x00,
-			0x00, 0x00, 0x04, 0x08, 0x6e, 0x00, 0x00, 0x00,
-			0x00, 0x07, 0x07, 0x23, 0x6e, 0xc0, 0xc1, 0x01,
-			0x81, 0xc1, 0x00, 0xc3, 0xf6, 0xf6, 0xc1
-		);
-	else
-		s6e8aa0_panel_cond_set_v142(ctx);
-}
-
-static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d);
-}
-
-static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00);
-}
-
-static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx)
-{
-	static const u8 pent32[] = {
-		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xc0, 0x44, 0x44, 0xc0, 0x00
-	};
-
-	static const u8 pent142[] = {
-		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, 0x00
-	};
-
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32));
-	else
-		s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142));
-}
-
-static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx)
-{
-	static const u8 pwr142[] = {
-		0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x1e, 0x33, 0x02
-	};
-
-	static const u8 pwr32[] = {
-		0xf4, 0xcf, 0x0a, 0x15, 0x10, 0x19, 0x33, 0x02
-	};
-
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32));
-	else
-		s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142));
-}
-
-static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx)
-{
-	u8 id = ctx->id ? 0 : 0x95;
-
-	s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id);
-}
-
-static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx)
-{
-	u8 br;
-
-	switch (ctx->brightness) {
-	case 0 ... 6: /* 30cd ~ 100cd */
-		br = 0xdf;
-		break;
-	case 7 ... 11: /* 120cd ~ 150cd */
-		br = 0xdd;
-		break;
-	case 12 ... 15: /* 180cd ~ 210cd */
-	default:
-		br = 0xd9;
-		break;
-	case 16 ... 24: /* 240cd ~ 300cd */
-		br = 0xd0;
-		break;
-	}
-
-	s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e,
-		0xc4, 0x0f, 0x40, 0x41, br, 0x00, 0x60, 0x19);
-}
-
-static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx)
-{
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write_seq_static(ctx,
-			0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, 0xc4, 0x07,
-			0x40, 0x41, 0xc1, 0x00, 0x60, 0x19);
-	else
-		s6e8aa0_elvss_nvm_set_v142(ctx);
-};
-
-static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
-}
-
-static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v142[GAMMA_LEVEL_NUM] = {
-	{
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x62, 0x55, 0x55,
-		0xaf, 0xb1, 0xb1, 0xbd, 0xce, 0xb7, 0x9a, 0xb1,
-		0x90, 0xb2, 0xc4, 0xae, 0x00, 0x60, 0x00, 0x40,
-		0x00, 0x70,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x74, 0x68, 0x69,
-		0xb8, 0xc1, 0xb7, 0xbd, 0xcd, 0xb8, 0x93, 0xab,
-		0x88, 0xb4, 0xc4, 0xb1, 0x00, 0x6b, 0x00, 0x4d,
-		0x00, 0x7d,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x95, 0x8a, 0x89,
-		0xb4, 0xc6, 0xb2, 0xc5, 0xd2, 0xbf, 0x90, 0xa8,
-		0x85, 0xb5, 0xc4, 0xb3, 0x00, 0x7b, 0x00, 0x5d,
-		0x00, 0x8f,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9f, 0x98, 0x92,
-		0xb3, 0xc4, 0xb0, 0xbc, 0xcc, 0xb4, 0x91, 0xa6,
-		0x87, 0xb5, 0xc5, 0xb4, 0x00, 0x87, 0x00, 0x6a,
-		0x00, 0x9e,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x99, 0x93, 0x8b,
-		0xb2, 0xc2, 0xb0, 0xbd, 0xce, 0xb4, 0x90, 0xa6,
-		0x87, 0xb3, 0xc3, 0xb2, 0x00, 0x8d, 0x00, 0x70,
-		0x00, 0xa4,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xa5, 0x99,
-		0xb2, 0xc2, 0xb0, 0xbb, 0xcd, 0xb1, 0x93, 0xa7,
-		0x8a, 0xb2, 0xc1, 0xb0, 0x00, 0x92, 0x00, 0x75,
-		0x00, 0xaa,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xa0, 0x93,
-		0xb6, 0xc4, 0xb4, 0xb5, 0xc8, 0xaa, 0x94, 0xa9,
-		0x8c, 0xb2, 0xc0, 0xb0, 0x00, 0x97, 0x00, 0x7a,
-		0x00, 0xaf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xa7, 0x96,
-		0xb3, 0xc2, 0xb0, 0xba, 0xcb, 0xb0, 0x94, 0xa8,
-		0x8c, 0xb0, 0xbf, 0xaf, 0x00, 0x9f, 0x00, 0x83,
-		0x00, 0xb9,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9d, 0xa2, 0x90,
-		0xb6, 0xc5, 0xb3, 0xb8, 0xc9, 0xae, 0x94, 0xa8,
-		0x8d, 0xaf, 0xbd, 0xad, 0x00, 0xa4, 0x00, 0x88,
-		0x00, 0xbf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xac, 0x97,
-		0xb4, 0xc4, 0xb1, 0xbb, 0xcb, 0xb2, 0x93, 0xa7,
-		0x8d, 0xae, 0xbc, 0xad, 0x00, 0xa7, 0x00, 0x8c,
-		0x00, 0xc3,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa2, 0xa9, 0x93,
-		0xb6, 0xc5, 0xb2, 0xba, 0xc9, 0xb0, 0x93, 0xa7,
-		0x8d, 0xae, 0xbb, 0xac, 0x00, 0xab, 0x00, 0x90,
-		0x00, 0xc8,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9e, 0xa6, 0x8f,
-		0xb7, 0xc6, 0xb3, 0xb8, 0xc8, 0xb0, 0x93, 0xa6,
-		0x8c, 0xae, 0xbb, 0xad, 0x00, 0xae, 0x00, 0x93,
-		0x00, 0xcc,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb4, 0x9c,
-		0xb3, 0xc3, 0xaf, 0xb7, 0xc7, 0xaf, 0x93, 0xa6,
-		0x8c, 0xaf, 0xbc, 0xad, 0x00, 0xb1, 0x00, 0x97,
-		0x00, 0xcf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xb1, 0x98,
-		0xb1, 0xc2, 0xab, 0xba, 0xc9, 0xb2, 0x93, 0xa6,
-		0x8d, 0xae, 0xba, 0xab, 0x00, 0xb5, 0x00, 0x9b,
-		0x00, 0xd4,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xae, 0x94,
-		0xb2, 0xc3, 0xac, 0xbb, 0xca, 0xb4, 0x91, 0xa4,
-		0x8a, 0xae, 0xba, 0xac, 0x00, 0xb8, 0x00, 0x9e,
-		0x00, 0xd8,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb7, 0x9c,
-		0xae, 0xc0, 0xa9, 0xba, 0xc9, 0xb3, 0x92, 0xa5,
-		0x8b, 0xad, 0xb9, 0xab, 0x00, 0xbb, 0x00, 0xa1,
-		0x00, 0xdc,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb4, 0x97,
-		0xb0, 0xc1, 0xaa, 0xb9, 0xc8, 0xb2, 0x92, 0xa5,
-		0x8c, 0xae, 0xb9, 0xab, 0x00, 0xbe, 0x00, 0xa4,
-		0x00, 0xdf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
-		0xb0, 0xc2, 0xab, 0xbb, 0xc9, 0xb3, 0x91, 0xa4,
-		0x8b, 0xad, 0xb8, 0xaa, 0x00, 0xc1, 0x00, 0xa8,
-		0x00, 0xe2,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
-		0xae, 0xbf, 0xa8, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
-		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xc4, 0x00, 0xab,
-		0x00, 0xe6,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb6, 0x98,
-		0xaf, 0xc0, 0xa8, 0xb8, 0xc7, 0xb2, 0x93, 0xa5,
-		0x8d, 0xad, 0xb7, 0xa9, 0x00, 0xc7, 0x00, 0xae,
-		0x00, 0xe9,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
-		0xaf, 0xc1, 0xa9, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
-		0x8b, 0xad, 0xb7, 0xaa, 0x00, 0xc9, 0x00, 0xb0,
-		0x00, 0xec,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
-		0xac, 0xbe, 0xa6, 0xbb, 0xc9, 0xb4, 0x90, 0xa3,
-		0x8a, 0xad, 0xb7, 0xa9, 0x00, 0xcc, 0x00, 0xb4,
-		0x00, 0xf0,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xb0, 0x91,
-		0xae, 0xc0, 0xa6, 0xba, 0xc8, 0xb4, 0x91, 0xa4,
-		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xcf, 0x00, 0xb7,
-		0x00, 0xf3,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb8, 0x98,
-		0xab, 0xbd, 0xa4, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
-		0x8b, 0xac, 0xb6, 0xa8, 0x00, 0xd1, 0x00, 0xb9,
-		0x00, 0xf6,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb5, 0x95,
-		0xa9, 0xbc, 0xa1, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
-		0x8a, 0xad, 0xb6, 0xa8, 0x00, 0xd6, 0x00, 0xbf,
-		0x00, 0xfc,
-	},
-};
-
-static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v96[GAMMA_LEVEL_NUM] = {
-	{
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xdf, 0x1f, 0xd7, 0xdc, 0xb7, 0xe1, 0xc0, 0xaf,
-		0xc4, 0xd2, 0xd0, 0xcf, 0x00, 0x4d, 0x00, 0x40,
-		0x00, 0x5f,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd5, 0x35, 0xcf, 0xdc, 0xc1, 0xe1, 0xbf, 0xb3,
-		0xc1, 0xd2, 0xd1, 0xce,	0x00, 0x53, 0x00, 0x46,
-		0x00, 0x67,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd2, 0x64, 0xcf, 0xdb, 0xc6, 0xe1, 0xbd, 0xb3,
-		0xbd, 0xd2, 0xd2, 0xce,	0x00, 0x59, 0x00, 0x4b,
-		0x00, 0x6e,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd0, 0x7c, 0xcf, 0xdb, 0xc9, 0xe0, 0xbc, 0xb4,
-		0xbb, 0xcf, 0xd1, 0xcc, 0x00, 0x5f, 0x00, 0x50,
-		0x00, 0x75,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd0, 0x8e, 0xd1, 0xdb, 0xcc, 0xdf, 0xbb, 0xb6,
-		0xb9, 0xd0, 0xd1, 0xcd,	0x00, 0x63, 0x00, 0x54,
-		0x00, 0x7a,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd1, 0x9e, 0xd5, 0xda, 0xcd, 0xdd, 0xbb, 0xb7,
-		0xb9, 0xce, 0xce, 0xc9,	0x00, 0x68, 0x00, 0x59,
-		0x00, 0x81,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd0, 0xa5, 0xd6, 0xda, 0xcf, 0xdd, 0xbb, 0xb7,
-		0xb8, 0xcc, 0xcd, 0xc7,	0x00, 0x6c, 0x00, 0x5c,
-		0x00, 0x86,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xfe,
-		0xd0, 0xae, 0xd7, 0xd9, 0xd0, 0xdb, 0xb9, 0xb6,
-		0xb5, 0xca, 0xcc, 0xc5,	0x00, 0x74, 0x00, 0x63,
-		0x00, 0x90,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf9,
-		0xcf, 0xb0, 0xd6, 0xd9, 0xd1, 0xdb, 0xb9, 0xb6,
-		0xb4, 0xca, 0xcb, 0xc5,	0x00, 0x77, 0x00, 0x66,
-		0x00, 0x94,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf7,
-		0xcf, 0xb3, 0xd7, 0xd8, 0xd1, 0xd9, 0xb7, 0xb6,
-		0xb3, 0xc9, 0xca, 0xc3,	0x00, 0x7b, 0x00, 0x69,
-		0x00, 0x99,
-
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfd, 0x2f, 0xf7,
-		0xdf, 0xb5, 0xd6, 0xd8, 0xd1, 0xd8, 0xb6, 0xb5,
-		0xb2, 0xca, 0xcb, 0xc4,	0x00, 0x7e, 0x00, 0x6c,
-		0x00, 0x9d,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfa, 0x2f, 0xf5,
-		0xce, 0xb6, 0xd5, 0xd7, 0xd2, 0xd8, 0xb6, 0xb4,
-		0xb0, 0xc7, 0xc9, 0xc1,	0x00, 0x84, 0x00, 0x71,
-		0x00, 0xa5,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf7, 0x2f, 0xf2,
-		0xce, 0xb9, 0xd5, 0xd8, 0xd2, 0xd8, 0xb4, 0xb4,
-		0xaf, 0xc7, 0xc9, 0xc1,	0x00, 0x87, 0x00, 0x73,
-		0x00, 0xa8,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf5, 0x2f, 0xf0,
-		0xdf, 0xba, 0xd5, 0xd7, 0xd2, 0xd7, 0xb4, 0xb4,
-		0xaf, 0xc5, 0xc7, 0xbf,	0x00, 0x8a, 0x00, 0x76,
-		0x00, 0xac,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf2, 0x2f, 0xed,
-		0xcE, 0xbb, 0xd4, 0xd6, 0xd2, 0xd6, 0xb5, 0xb4,
-		0xaF, 0xc5, 0xc7, 0xbf,	0x00, 0x8c, 0x00, 0x78,
-		0x00, 0xaf,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x2f, 0xeb,
-		0xcd, 0xbb, 0xd2, 0xd7, 0xd3, 0xd6, 0xb3, 0xb4,
-		0xae, 0xc5, 0xc6, 0xbe,	0x00, 0x91, 0x00, 0x7d,
-		0x00, 0xb6,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xee, 0x2f, 0xea,
-		0xce, 0xbd, 0xd4, 0xd6, 0xd2, 0xd5, 0xb2, 0xb3,
-		0xad, 0xc3, 0xc4, 0xbb,	0x00, 0x94, 0x00, 0x7f,
-		0x00, 0xba,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xec, 0x2f, 0xe8,
-		0xce, 0xbe, 0xd3, 0xd6, 0xd3, 0xd5, 0xb2, 0xb2,
-		0xac, 0xc3, 0xc5, 0xbc,	0x00, 0x96, 0x00, 0x81,
-		0x00, 0xbd,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xeb, 0x2f, 0xe7,
-		0xce, 0xbf, 0xd3, 0xd6, 0xd2, 0xd5, 0xb1, 0xb2,
-		0xab, 0xc2, 0xc4, 0xbb,	0x00, 0x99, 0x00, 0x83,
-		0x00, 0xc0,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x5f, 0xe9,
-		0xca, 0xbf, 0xd3, 0xd5, 0xd2, 0xd4, 0xb2, 0xb2,
-		0xab, 0xc1, 0xc4, 0xba,	0x00, 0x9b, 0x00, 0x85,
-		0x00, 0xc3,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xea, 0x5f, 0xe8,
-		0xee, 0xbf, 0xd2, 0xd5, 0xd2, 0xd4, 0xb1, 0xb2,
-		0xab, 0xc1, 0xc2, 0xb9,	0x00, 0x9D, 0x00, 0x87,
-		0x00, 0xc6,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe9, 0x5f, 0xe7,
-		0xcd, 0xbf, 0xd2, 0xd6, 0xd2, 0xd4, 0xb1, 0xb2,
-		0xab, 0xbe, 0xc0, 0xb7,	0x00, 0xa1, 0x00, 0x8a,
-		0x00, 0xca,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x61, 0xe6,
-		0xcd, 0xbf, 0xd1, 0xd6, 0xd3, 0xd4, 0xaf, 0xb0,
-		0xa9, 0xbe, 0xc1, 0xb7,	0x00, 0xa3, 0x00, 0x8b,
-		0x00, 0xce,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x62, 0xe5,
-		0xcc, 0xc0, 0xd0, 0xd6, 0xd2, 0xd4, 0xaf, 0xb1,
-		0xa9, 0xbd, 0xc0, 0xb6,	0x00, 0xa5, 0x00, 0x8d,
-		0x00, 0xd0,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe7, 0x7f, 0xe3,
-		0xcc, 0xc1, 0xd0, 0xd5, 0xd3, 0xd3, 0xae, 0xaf,
-		0xa8, 0xbe, 0xc0, 0xb7,	0x00, 0xa8, 0x00, 0x90,
-		0x00, 0xd3,
-	}
-};
-
-static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v32[GAMMA_LEVEL_NUM] = {
-	{
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0x72, 0x5e, 0x6b,
-		0xa1, 0xa7, 0x9a, 0xb4, 0xcb, 0xb8, 0x92, 0xac,
-		0x97, 0xb4, 0xc3, 0xb5, 0x00, 0x4e, 0x00, 0x37,
-		0x00, 0x58,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0x85, 0x71, 0x7d,
-		0xa6, 0xb6, 0xa1, 0xb5, 0xca, 0xba, 0x93, 0xac,
-		0x98, 0xb2, 0xc0, 0xaf, 0x00, 0x59, 0x00, 0x43,
-		0x00, 0x64,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa4, 0x94, 0x9e,
-		0xa0, 0xbb, 0x9c, 0xc3, 0xd2, 0xc6, 0x93, 0xaa,
-		0x95, 0xb7, 0xc2, 0xb4, 0x00, 0x65, 0x00, 0x50,
-		0x00, 0x74,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa1, 0xa6,
-		0xa0, 0xb9, 0x9b, 0xc3, 0xd1, 0xc8, 0x90, 0xa6,
-		0x90, 0xbb, 0xc3, 0xb7, 0x00, 0x6f, 0x00, 0x5b,
-		0x00, 0x80,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa6, 0x9d, 0x9f,
-		0x9f, 0xb8, 0x9a, 0xc7, 0xd5, 0xcc, 0x90, 0xa5,
-		0x8f, 0xb8, 0xc1, 0xb6, 0x00, 0x74, 0x00, 0x60,
-		0x00, 0x85,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb3, 0xae, 0xae,
-		0x9e, 0xb7, 0x9a, 0xc8, 0xd6, 0xce, 0x91, 0xa6,
-		0x90, 0xb6, 0xc0, 0xb3, 0x00, 0x78, 0x00, 0x65,
-		0x00, 0x8a,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa9, 0xa8,
-		0xa3, 0xb9, 0x9e, 0xc4, 0xd3, 0xcb, 0x94, 0xa6,
-		0x90, 0xb6, 0xbf, 0xb3, 0x00, 0x7c, 0x00, 0x69,
-		0x00, 0x8e,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xaf, 0xaf, 0xa9,
-		0xa5, 0xbc, 0xa2, 0xc7, 0xd5, 0xcd, 0x93, 0xa5,
-		0x8f, 0xb4, 0xbd, 0xb1, 0x00, 0x83, 0x00, 0x70,
-		0x00, 0x96,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xab, 0xa3,
-		0xaa, 0xbf, 0xa7, 0xc5, 0xd3, 0xcb, 0x93, 0xa5,
-		0x8f, 0xb2, 0xbb, 0xb0, 0x00, 0x86, 0x00, 0x74,
-		0x00, 0x9b,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xb5, 0xab,
-		0xab, 0xc0, 0xa9, 0xc7, 0xd4, 0xcc, 0x94, 0xa4,
-		0x8f, 0xb1, 0xbb, 0xaf, 0x00, 0x8a, 0x00, 0x77,
-		0x00, 0x9e,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb2, 0xa7,
-		0xae, 0xc2, 0xab, 0xc5, 0xd3, 0xca, 0x93, 0xa4,
-		0x8f, 0xb1, 0xba, 0xae, 0x00, 0x8d, 0x00, 0x7b,
-		0x00, 0xa2,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xaf, 0xa3,
-		0xb0, 0xc3, 0xae, 0xc4, 0xd1, 0xc8, 0x93, 0xa4,
-		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x8f, 0x00, 0x7d,
-		0x00, 0xa5,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbd, 0xaf,
-		0xae, 0xc1, 0xab, 0xc2, 0xd0, 0xc6, 0x94, 0xa4,
-		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x92, 0x00, 0x80,
-		0x00, 0xa8,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xb9, 0xac,
-		0xad, 0xc1, 0xab, 0xc4, 0xd1, 0xc7, 0x95, 0xa4,
-		0x90, 0xb0, 0xb9, 0xad, 0x00, 0x95, 0x00, 0x84,
-		0x00, 0xac,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb6, 0xa7,
-		0xaf, 0xc2, 0xae, 0xc5, 0xd1, 0xc7, 0x93, 0xa3,
-		0x8e, 0xb0, 0xb9, 0xad, 0x00, 0x98, 0x00, 0x86,
-		0x00, 0xaf,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbf, 0xaf,
-		0xad, 0xc1, 0xab, 0xc3, 0xd0, 0xc6, 0x94, 0xa3,
-		0x8f, 0xaf, 0xb8, 0xac, 0x00, 0x9a, 0x00, 0x89,
-		0x00, 0xb2,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xbc, 0xac,
-		0xaf, 0xc2, 0xad, 0xc2, 0xcf, 0xc4, 0x94, 0xa3,
-		0x90, 0xaf, 0xb8, 0xad, 0x00, 0x9c, 0x00, 0x8b,
-		0x00, 0xb5,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
-		0xb1, 0xc4, 0xaf, 0xc3, 0xcf, 0xc5, 0x94, 0xa3,
-		0x8f, 0xae, 0xb7, 0xac, 0x00, 0x9f, 0x00, 0x8e,
-		0x00, 0xb8,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
-		0xaf, 0xc2, 0xad, 0xc1, 0xce, 0xc3, 0x95, 0xa3,
-		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa2, 0x00, 0x91,
-		0x00, 0xbb,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xbe, 0xac,
-		0xb1, 0xc4, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa4,
-		0x91, 0xad, 0xb6, 0xab, 0x00, 0xa4, 0x00, 0x93,
-		0x00, 0xbd,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
-		0xb3, 0xc5, 0xb2, 0xc1, 0xcd, 0xc2, 0x95, 0xa3,
-		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa6, 0x00, 0x95,
-		0x00, 0xc0,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
-		0xb0, 0xc3, 0xaf, 0xc2, 0xce, 0xc2, 0x94, 0xa2,
-		0x90, 0xac, 0xb6, 0xab, 0x00, 0xa8, 0x00, 0x98,
-		0x00, 0xc3,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xb8, 0xa5,
-		0xb3, 0xc5, 0xb2, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
-		0x90, 0xad, 0xb6, 0xab, 0x00, 0xaa, 0x00, 0x9a,
-		0x00, 0xc5,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xc0, 0xac,
-		0xb0, 0xc3, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa2,
-		0x90, 0xac, 0xb5, 0xa9, 0x00, 0xac, 0x00, 0x9c,
-		0x00, 0xc8,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbd, 0xa8,
-		0xaf, 0xc2, 0xaf, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
-		0x90, 0xac, 0xb5, 0xaa, 0x00, 0xb1, 0x00, 0xa1,
-		0x00, 0xcc,
-	},
-};
-
-static const struct s6e8aa0_variant s6e8aa0_variants[] = {
-	{
-		.version = 32,
-		.gamma_tables = s6e8aa0_gamma_tables_v32,
-	}, {
-		.version = 96,
-		.gamma_tables = s6e8aa0_gamma_tables_v96,
-	}, {
-		.version = 142,
-		.gamma_tables = s6e8aa0_gamma_tables_v142,
-	}, {
-		.version = 210,
-		.gamma_tables = s6e8aa0_gamma_tables_v142,
-	}
-};
-
-static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx)
-{
-	const u8 *gamma;
-
-	if (ctx->error)
-		return;
-
-	gamma = ctx->variant->gamma_tables[ctx->brightness];
-
-	if (ctx->version >= 142)
-		s6e8aa0_elvss_nvm_set(ctx);
-
-	s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN);
-
-	/* update gamma table. */
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03);
-}
-
-static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_apply_level_1_key(ctx);
-	s6e8aa0_apply_level_2_key(ctx);
-	msleep(20);
-
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
-	msleep(40);
-
-	s6e8aa0_panel_cond_set(ctx);
-	s6e8aa0_display_condition_set(ctx);
-	s6e8aa0_brightness_set(ctx);
-	s6e8aa0_etc_source_control(ctx);
-	s6e8aa0_etc_pentile_control(ctx);
-	s6e8aa0_elvss_nvm_set(ctx);
-	s6e8aa0_etc_power_control(ctx);
-	s6e8aa0_etc_elvss_control(ctx);
-	msleep(ctx->init_delay);
-}
-
-static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
-						   u16 size)
-{
-	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-	int ret;
-
-	if (ctx->error < 0)
-		return;
-
-	ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
-	if (ret < 0) {
-		dev_err(ctx->dev,
-			"error %d setting maximum return packet size to %d\n",
-			ret, size);
-		ctx->error = ret;
-	}
-}
-
-static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
-{
-	u8 id[3];
-	int ret, i;
-
-	ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
-	if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
-		dev_err(ctx->dev, "read id failed\n");
-		ctx->error = -EIO;
-		return;
-	}
-
-	dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]);
-
-	for (i = 0; i < ARRAY_SIZE(s6e8aa0_variants); ++i) {
-		if (id[1] == s6e8aa0_variants[i].version)
-			break;
-	}
-	if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
-		dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
-		ctx->error = -EINVAL;
-		return;
-	}
-
-	ctx->variant = &s6e8aa0_variants[i];
-	ctx->version = id[1];
-	ctx->id = id[2];
-}
-
-static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_set_maximum_return_packet_size(ctx, 3);
-	s6e8aa0_read_mtp_id(ctx);
-	s6e8aa0_panel_init(ctx);
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
-}
-
-static int s6e8aa0_power_on(struct s6e8aa0 *ctx)
-{
-	int ret;
-
-	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-	if (ret < 0)
-		return ret;
-
-	msleep(ctx->power_on_delay);
-
-	gpiod_set_value(ctx->reset_gpio, 0);
-	usleep_range(10000, 11000);
-	gpiod_set_value(ctx->reset_gpio, 1);
-
-	msleep(ctx->reset_delay);
-
-	return 0;
-}
-
-static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
-{
-	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
-static int s6e8aa0_disable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int s6e8aa0_unprepare(struct drm_panel *panel)
-{
-	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
-
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
-	msleep(40);
-
-	s6e8aa0_clear_error(ctx);
-
-	return s6e8aa0_power_off(ctx);
-}
-
-static int s6e8aa0_prepare(struct drm_panel *panel)
-{
-	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
-	int ret;
-
-	ret = s6e8aa0_power_on(ctx);
-	if (ret < 0)
-		return ret;
-
-	s6e8aa0_set_sequence(ctx);
-	ret = ctx->error;
-
-	if (ret < 0)
-		s6e8aa0_unprepare(panel);
-
-	return ret;
-}
-
-static int s6e8aa0_enable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int s6e8aa0_get_modes(struct drm_panel *panel)
-{
-	struct drm_connector *connector = panel->connector;
-	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
-	struct drm_display_mode *mode;
-
-	mode = drm_mode_create(connector->dev);
-	if (!mode) {
-		DRM_ERROR("failed to create a new display mode\n");
-		return 0;
-	}
-
-	drm_display_mode_from_videomode(&ctx->vm, mode);
-	mode->width_mm = ctx->width_mm;
-	mode->height_mm = ctx->height_mm;
-	connector->display_info.width_mm = mode->width_mm;
-	connector->display_info.height_mm = mode->height_mm;
-
-	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_probed_add(connector, mode);
-
-	return 1;
-}
-
-static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
-	.disable = s6e8aa0_disable,
-	.unprepare = s6e8aa0_unprepare,
-	.prepare = s6e8aa0_prepare,
-	.enable = s6e8aa0_enable,
-	.get_modes = s6e8aa0_get_modes,
-};
-
-static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx)
-{
-	struct device *dev = ctx->dev;
-	struct device_node *np = dev->of_node;
-	int ret;
-
-	ret = of_get_videomode(np, &ctx->vm, 0);
-	if (ret < 0)
-		return ret;
-
-	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
-	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
-	of_property_read_u32(np, "init-delay", &ctx->init_delay);
-	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
-	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
-
-	ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal");
-	ctx->flip_vertical = of_property_read_bool(np, "flip-vertical");
-
-	return 0;
-}
-
-static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
-{
-	struct device *dev = &dsi->dev;
-	struct s6e8aa0 *ctx;
-	int ret;
-
-	ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	mipi_dsi_set_drvdata(dsi, ctx);
-
-	ctx->dev = dev;
-
-	dsi->lanes = 4;
-	dsi->format = MIPI_DSI_FMT_RGB888;
-	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
-		| MIPI_DSI_MODE_VIDEO_HFP | MIPI_DSI_MODE_VIDEO_HBP
-		| MIPI_DSI_MODE_VIDEO_HSA | MIPI_DSI_MODE_EOT_PACKET
-		| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
-
-	ret = s6e8aa0_parse_dt(ctx);
-	if (ret < 0)
-		return ret;
-
-	ctx->supplies[0].supply = "vdd3";
-	ctx->supplies[1].supply = "vci";
-	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
-				      ctx->supplies);
-	if (ret < 0) {
-		dev_err(dev, "failed to get regulators: %d\n", ret);
-		return ret;
-	}
-
-	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
-	if (IS_ERR(ctx->reset_gpio)) {
-		dev_err(dev, "cannot get reset-gpios %ld\n",
-			PTR_ERR(ctx->reset_gpio));
-		return PTR_ERR(ctx->reset_gpio);
-	}
-
-	ctx->brightness = GAMMA_LEVEL_NUM - 1;
-
-	drm_panel_init(&ctx->panel);
-	ctx->panel.dev = dev;
-	ctx->panel.funcs = &s6e8aa0_drm_funcs;
-
-	ret = drm_panel_add(&ctx->panel);
-	if (ret < 0)
-		return ret;
-
-	ret = mipi_dsi_attach(dsi);
-	if (ret < 0)
-		drm_panel_remove(&ctx->panel);
-
-	return ret;
-}
-
-static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
-{
-	struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
-
-	mipi_dsi_detach(dsi);
-	drm_panel_remove(&ctx->panel);
-
-	return 0;
-}
-
-static const struct of_device_id s6e8aa0_of_match[] = {
-	{ .compatible = "samsung,s6e8aa0" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, s6e8aa0_of_match);
-
-static struct mipi_dsi_driver s6e8aa0_driver = {
-	.probe = s6e8aa0_probe,
-	.remove = s6e8aa0_remove,
-	.driver = {
-		.name = "panel_s6e8aa0",
-		.of_match_table = s6e8aa0_of_match,
-	},
-};
-module_mipi_dsi_driver(s6e8aa0_driver);
-
-MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joongmock Shin <jmock.shin@samsung.com>");
-MODULE_AUTHOR("Eunchul Kim <chulspro.kim@samsung.com>");
-MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
-MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
-MODULE_DESCRIPTION("MIPI-DSI based s6e8aa0 AMOLED LCD Panel Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
new file mode 100644
index 0000000..b202377
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -0,0 +1,389 @@
+/*
+ * ld9040 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Derived from drivers/video/backlight/ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/* Manufacturer Command Set */
+#define MCS_MANPWR		0xb0
+#define MCS_ELVSS_ON		0xb1
+#define MCS_USER_SETTING	0xf0
+#define MCS_DISPCTL		0xf2
+#define MCS_POWER_CTRL		0xf4
+#define MCS_GTCON		0xf7
+#define MCS_PANEL_CONDITION	0xf8
+#define MCS_GAMMA_SET1		0xf9
+#define MCS_GAMMA_CTRL		0xfb
+
+/* array of gamma tables for gamma value 2.2 */
+static u8 const ld9040_gammas[25][22] = {
+	{ 0xf9, 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30, 0x00, 0xaf, 0xc0,
+	  0xb8, 0xcd, 0x00, 0x3d, 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44 },
+	{ 0xf9, 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c, 0x00, 0xaf, 0xbf,
+	  0xb6, 0xcb, 0x00, 0x4b, 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52 },
+	{ 0xf9, 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41, 0x00, 0xb0, 0xbe,
+	  0xb5, 0xc9, 0x00, 0x51, 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57 },
+	{ 0xf9, 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46, 0x00, 0xb1, 0xbc,
+	  0xb5, 0xc8, 0x00, 0x56, 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d },
+	{ 0xf9, 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b, 0x00, 0xb3, 0xbc,
+	  0xb4, 0xc7, 0x00, 0x5c, 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62 },
+	{ 0xf9, 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f, 0x00, 0xb4, 0xbb,
+	  0xb3, 0xc7, 0x00, 0x60, 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67 },
+	{ 0xf9, 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53, 0x00, 0xb5, 0xbb,
+	  0xb3, 0xc6, 0x00, 0x65, 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c },
+	{ 0xf9, 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57, 0x00, 0xb5, 0xbb,
+	  0xb0, 0xc5, 0x00, 0x6a, 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70 },
+	{ 0xf9, 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b, 0x00, 0xb5, 0xba,
+	  0xb1, 0xc4, 0x00, 0x6e, 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75 },
+	{ 0xf9, 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f, 0x00, 0xb5, 0xba,
+	  0xb0, 0xc3, 0x00, 0x72, 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a },
+	{ 0xf9, 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62, 0x00, 0xb6, 0xba,
+	  0xaf, 0xc3, 0x00, 0x76, 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e },
+	{ 0xf9, 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65, 0x00, 0xb7, 0xb8,
+	  0xaf, 0xc3, 0x00, 0x7a, 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81 },
+	{ 0xf9, 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69, 0x00, 0xb8, 0xb9,
+	  0xae, 0xc1, 0x00, 0x7f, 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85 },
+	{ 0xf9, 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c, 0x00, 0xb8, 0xb8,
+	  0xae, 0xc1, 0x00, 0x82, 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89 },
+	{ 0xf9, 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f, 0x00, 0xb8, 0xb8,
+	  0xad, 0xc0, 0x00, 0x86, 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d },
+	{ 0xf9, 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72, 0x00, 0xb8, 0xb8,
+	  0xac, 0xbf, 0x00, 0x8a, 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91 },
+	{ 0xf9, 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75, 0x00, 0xb9, 0xb8,
+	  0xab, 0xbe, 0x00, 0x8e, 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94 },
+	{ 0xf9, 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77, 0x00, 0xb9, 0xb7,
+	  0xab, 0xbe, 0x00, 0x90, 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97 },
+	{ 0xf9, 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a, 0x00, 0xb9, 0xb7,
+	  0xaa, 0xbd, 0x00, 0x94, 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a },
+	{ 0xf9, 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d, 0x00, 0xb9, 0xb6,
+	  0xaa, 0xbb, 0x00, 0x97, 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d },
+	{ 0xf9, 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80, 0x00, 0xb8, 0xb6,
+	  0xaa, 0xbc, 0x00, 0x9a, 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0 },
+	{ 0xf9, 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84, 0x00, 0xb9, 0xb7,
+	  0xa8, 0xbc, 0x00, 0x9d, 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4 },
+	{ 0xf9, 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86, 0x00, 0xb8, 0xb5,
+	  0xa8, 0xbc, 0x00, 0xa0, 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7 },
+	{ 0xf9, 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89, 0x00, 0xb7, 0xb6,
+	  0xa8, 0xba, 0x00, 0xa4, 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa },
+	{ 0xf9, 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91, 0x00, 0xb2, 0xb4,
+	  0xaa, 0xbb, 0x00, 0xac, 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3 },
+};
+
+struct ld9040 {
+	struct device *dev;
+	struct drm_panel panel;
+
+	struct regulator_bulk_data supplies[2];
+	struct gpio_desc *reset_gpio;
+	u32 power_on_delay;
+	u32 reset_delay;
+	struct videomode vm;
+	u32 width_mm;
+	u32 height_mm;
+
+	int brightness;
+
+	/* This field is tested by functions directly accessing bus before
+	 * transfer, transfer is skipped if it is set. In case of transfer
+	 * failure or unexpected response the field is set to error value.
+	 * Such construct allows to eliminate many checks in higher level
+	 * functions.
+	 */
+	int error;
+};
+
+static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
+{
+	return container_of(panel, struct ld9040, panel);
+}
+
+static int ld9040_clear_error(struct ld9040 *ctx)
+{
+	int ret = ctx->error;
+
+	ctx->error = 0;
+	return ret;
+}
+
+static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data)
+{
+	struct spi_device *spi = to_spi_device(ctx->dev);
+	struct spi_transfer xfer = {
+		.len		= 2,
+		.tx_buf		= &data,
+	};
+	struct spi_message msg;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+
+	return spi_sync(spi, &msg);
+}
+
+static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
+{
+	int ret = 0;
+
+	if (ctx->error < 0 || len == 0)
+		return;
+
+	dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
+	ret = ld9040_spi_write_word(ctx, *data);
+
+	while (!ret && --len) {
+		++data;
+		ret = ld9040_spi_write_word(ctx, *data | 0x100);
+	}
+
+	if (ret) {
+		dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
+			(int)len, data);
+		ctx->error = ret;
+	}
+
+	usleep_range(300, 310);
+}
+
+#define ld9040_dcs_write_seq_static(ctx, seq...) \
+({\
+	static const u8 d[] = { seq };\
+	ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+static void ld9040_brightness_set(struct ld9040 *ctx)
+{
+	ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness],
+			 ARRAY_SIZE(ld9040_gammas[ctx->brightness]));
+
+	ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a);
+}
+
+static void ld9040_init(struct ld9040 *ctx)
+{
+	ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
+	ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
+		0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
+		0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
+		0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
+	ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
+		0x02, 0x08, 0x08, 0x10, 0x10);
+	ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
+	ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
+		0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
+	ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
+	ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
+	ld9040_brightness_set(ctx);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int ld9040_power_on(struct ld9040 *ctx)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	msleep(ctx->power_on_delay);
+	gpiod_set_value(ctx->reset_gpio, 0);
+	msleep(ctx->reset_delay);
+	gpiod_set_value(ctx->reset_gpio, 1);
+	msleep(ctx->reset_delay);
+
+	return 0;
+}
+
+static int ld9040_power_off(struct ld9040 *ctx)
+{
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int ld9040_disable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int ld9040_unprepare(struct drm_panel *panel)
+{
+	struct ld9040 *ctx = panel_to_ld9040(panel);
+
+	msleep(120);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+	msleep(40);
+
+	ld9040_clear_error(ctx);
+
+	return ld9040_power_off(ctx);
+}
+
+static int ld9040_prepare(struct drm_panel *panel)
+{
+	struct ld9040 *ctx = panel_to_ld9040(panel);
+	int ret;
+
+	ret = ld9040_power_on(ctx);
+	if (ret < 0)
+		return ret;
+
+	ld9040_init(ctx);
+
+	ret = ld9040_clear_error(ctx);
+
+	if (ret < 0)
+		ld9040_unprepare(panel);
+
+	return ret;
+}
+
+static int ld9040_enable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int ld9040_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct ld9040 *ctx = panel_to_ld9040(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (!mode) {
+		DRM_ERROR("failed to create a new display mode\n");
+		return 0;
+	}
+
+	drm_display_mode_from_videomode(&ctx->vm, mode);
+	mode->width_mm = ctx->width_mm;
+	mode->height_mm = ctx->height_mm;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs ld9040_drm_funcs = {
+	.disable = ld9040_disable,
+	.unprepare = ld9040_unprepare,
+	.prepare = ld9040_prepare,
+	.enable = ld9040_enable,
+	.get_modes = ld9040_get_modes,
+};
+
+static int ld9040_parse_dt(struct ld9040 *ctx)
+{
+	struct device *dev = ctx->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	ret = of_get_videomode(np, &ctx->vm, 0);
+	if (ret < 0)
+		return ret;
+
+	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
+	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
+	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
+	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
+
+	return 0;
+}
+
+static int ld9040_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+	struct ld9040 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	spi_set_drvdata(spi, ctx);
+
+	ctx->dev = dev;
+	ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
+
+	ret = ld9040_parse_dt(ctx);
+	if (ret < 0)
+		return ret;
+
+	ctx->supplies[0].supply = "vdd3";
+	ctx->supplies[1].supply = "vci";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+				      ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(ctx->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(ctx->reset_gpio));
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	spi->bits_per_word = 9;
+	ret = spi_setup(spi);
+	if (ret < 0) {
+		dev_err(dev, "spi setup failed.\n");
+		return ret;
+	}
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = dev;
+	ctx->panel.funcs = &ld9040_drm_funcs;
+
+	return drm_panel_add(&ctx->panel);
+}
+
+static int ld9040_remove(struct spi_device *spi)
+{
+	struct ld9040 *ctx = spi_get_drvdata(spi);
+
+	ld9040_power_off(ctx);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id ld9040_of_match[] = {
+	{ .compatible = "samsung,ld9040" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ld9040_of_match);
+
+static struct spi_driver ld9040_driver = {
+	.probe = ld9040_probe,
+	.remove = ld9040_remove,
+	.driver = {
+		.name = "panel-samsung-ld9040",
+		.owner = THIS_MODULE,
+		.of_match_table = ld9040_of_match,
+	},
+};
+module_spi_driver(ld9040_driver);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("ld9040 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
new file mode 100644
index 0000000..a188a39
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -0,0 +1,1067 @@
+/*
+ * MIPI-DSI based s6e8aa0 AMOLED LCD 5.3 inch panel driver.
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ *
+ * Inki Dae, <inki.dae@samsung.com>
+ * Donghwa Lee, <dh09.lee@samsung.com>
+ * Joongmock Shin <jmock.shin@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Tomasz Figa <t.figa@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#define LDI_MTP_LENGTH			24
+#define GAMMA_LEVEL_NUM			25
+#define GAMMA_TABLE_LEN			26
+
+#define PANELCTL_SS_MASK		(1 << 5)
+#define PANELCTL_SS_1_800		(0 << 5)
+#define PANELCTL_SS_800_1		(1 << 5)
+#define PANELCTL_GTCON_MASK		(7 << 2)
+#define PANELCTL_GTCON_110		(6 << 2)
+#define PANELCTL_GTCON_111		(7 << 2)
+
+#define PANELCTL_CLK1_CON_MASK		(7 << 3)
+#define PANELCTL_CLK1_000		(0 << 3)
+#define PANELCTL_CLK1_001		(1 << 3)
+#define PANELCTL_CLK2_CON_MASK		(7 << 0)
+#define PANELCTL_CLK2_000		(0 << 0)
+#define PANELCTL_CLK2_001		(1 << 0)
+
+#define PANELCTL_INT1_CON_MASK		(7 << 3)
+#define PANELCTL_INT1_000		(0 << 3)
+#define PANELCTL_INT1_001		(1 << 3)
+#define PANELCTL_INT2_CON_MASK		(7 << 0)
+#define PANELCTL_INT2_000		(0 << 0)
+#define PANELCTL_INT2_001		(1 << 0)
+
+#define PANELCTL_BICTL_CON_MASK		(7 << 3)
+#define PANELCTL_BICTL_000		(0 << 3)
+#define PANELCTL_BICTL_001		(1 << 3)
+#define PANELCTL_BICTLB_CON_MASK	(7 << 0)
+#define PANELCTL_BICTLB_000		(0 << 0)
+#define PANELCTL_BICTLB_001		(1 << 0)
+
+#define PANELCTL_EM_CLK1_CON_MASK	(7 << 3)
+#define PANELCTL_EM_CLK1_110		(6 << 3)
+#define PANELCTL_EM_CLK1_111		(7 << 3)
+#define PANELCTL_EM_CLK1B_CON_MASK	(7 << 0)
+#define PANELCTL_EM_CLK1B_110		(6 << 0)
+#define PANELCTL_EM_CLK1B_111		(7 << 0)
+
+#define PANELCTL_EM_CLK2_CON_MASK	(7 << 3)
+#define PANELCTL_EM_CLK2_110		(6 << 3)
+#define PANELCTL_EM_CLK2_111		(7 << 3)
+#define PANELCTL_EM_CLK2B_CON_MASK	(7 << 0)
+#define PANELCTL_EM_CLK2B_110		(6 << 0)
+#define PANELCTL_EM_CLK2B_111		(7 << 0)
+
+#define PANELCTL_EM_INT1_CON_MASK	(7 << 3)
+#define PANELCTL_EM_INT1_000		(0 << 3)
+#define PANELCTL_EM_INT1_001		(1 << 3)
+#define PANELCTL_EM_INT2_CON_MASK	(7 << 0)
+#define PANELCTL_EM_INT2_000		(0 << 0)
+#define PANELCTL_EM_INT2_001		(1 << 0)
+
+#define AID_DISABLE			(0x4)
+#define AID_1				(0x5)
+#define AID_2				(0x6)
+#define AID_3				(0x7)
+
+typedef u8 s6e8aa0_gamma_table[GAMMA_TABLE_LEN];
+
+struct s6e8aa0_variant {
+	u8 version;
+	const s6e8aa0_gamma_table *gamma_tables;
+};
+
+struct s6e8aa0 {
+	struct device *dev;
+	struct drm_panel panel;
+
+	struct regulator_bulk_data supplies[2];
+	struct gpio_desc *reset_gpio;
+	u32 power_on_delay;
+	u32 reset_delay;
+	u32 init_delay;
+	bool flip_horizontal;
+	bool flip_vertical;
+	struct videomode vm;
+	u32 width_mm;
+	u32 height_mm;
+
+	u8 version;
+	u8 id;
+	const struct s6e8aa0_variant *variant;
+	int brightness;
+
+	/* This field is tested by functions directly accessing DSI bus before
+	 * transfer, transfer is skipped if it is set. In case of transfer
+	 * failure or unexpected response the field is set to error value.
+	 * Such construct allows to eliminate many checks in higher level
+	 * functions.
+	 */
+	int error;
+};
+
+static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
+{
+	return container_of(panel, struct s6e8aa0, panel);
+}
+
+static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
+{
+	int ret = ctx->error;
+
+	ctx->error = 0;
+	return ret;
+}
+
+static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	ssize_t ret;
+
+	if (ctx->error < 0)
+		return;
+
+	ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
+	if (ret < 0) {
+		dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
+			(int)len, data);
+		ctx->error = ret;
+	}
+}
+
+static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (ctx->error < 0)
+		return ctx->error;
+
+	ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
+	if (ret < 0) {
+		dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
+		ctx->error = ret;
+	}
+
+	return ret;
+}
+
+#define s6e8aa0_dcs_write_seq(ctx, seq...) \
+({\
+	const u8 d[] = { seq };\
+	BUILD_BUG_ON_MSG(ARRAY_SIZE(d) > 64, "DCS sequence too big for stack");\
+	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+#define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
+({\
+	static const u8 d[] = { seq };\
+	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
+}
+
+static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx)
+{
+	static const u8 aids[] = {
+		0x04, 0x04, 0x04, 0x04, 0x04, 0x60, 0x80, 0xA0
+	};
+	u8 aid = aids[ctx->id >> 5];
+	u8 cfg = 0x3d;
+	u8 clk_con = 0xc8;
+	u8 int_con = 0x08;
+	u8 bictl_con = 0x48;
+	u8 em_clk1_con = 0xff;
+	u8 em_clk2_con = 0xff;
+	u8 em_int_con = 0xc8;
+
+	if (ctx->flip_vertical) {
+		/* GTCON */
+		cfg &= ~(PANELCTL_GTCON_MASK);
+		cfg |= (PANELCTL_GTCON_110);
+	}
+
+	if (ctx->flip_horizontal) {
+		/* SS */
+		cfg &= ~(PANELCTL_SS_MASK);
+		cfg |= (PANELCTL_SS_1_800);
+	}
+
+	if (ctx->flip_horizontal || ctx->flip_vertical) {
+		/* CLK1,2_CON */
+		clk_con &= ~(PANELCTL_CLK1_CON_MASK |
+			PANELCTL_CLK2_CON_MASK);
+		clk_con |= (PANELCTL_CLK1_000 | PANELCTL_CLK2_001);
+
+		/* INT1,2_CON */
+		int_con &= ~(PANELCTL_INT1_CON_MASK |
+			PANELCTL_INT2_CON_MASK);
+		int_con |= (PANELCTL_INT1_000 | PANELCTL_INT2_001);
+
+		/* BICTL,B_CON */
+		bictl_con &= ~(PANELCTL_BICTL_CON_MASK |
+			PANELCTL_BICTLB_CON_MASK);
+		bictl_con |= (PANELCTL_BICTL_000 |
+			PANELCTL_BICTLB_001);
+
+		/* EM_CLK1,1B_CON */
+		em_clk1_con &= ~(PANELCTL_EM_CLK1_CON_MASK |
+			PANELCTL_EM_CLK1B_CON_MASK);
+		em_clk1_con |= (PANELCTL_EM_CLK1_110 |
+			PANELCTL_EM_CLK1B_110);
+
+		/* EM_CLK2,2B_CON */
+		em_clk2_con &= ~(PANELCTL_EM_CLK2_CON_MASK |
+			PANELCTL_EM_CLK2B_CON_MASK);
+		em_clk2_con |= (PANELCTL_EM_CLK2_110 |
+			PANELCTL_EM_CLK2B_110);
+
+		/* EM_INT1,2_CON */
+		em_int_con &= ~(PANELCTL_EM_INT1_CON_MASK |
+			PANELCTL_EM_INT2_CON_MASK);
+		em_int_con |= (PANELCTL_EM_INT1_000 |
+			PANELCTL_EM_INT2_001);
+	}
+
+	s6e8aa0_dcs_write_seq(ctx,
+		0xf8, cfg, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00,
+		0x3c, 0x78, 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00,
+		0x00, 0x20, aid, 0x08, 0x6e, 0x00, 0x00, 0x00,
+		0x02, 0x07, 0x07, 0x23, 0x23, 0xc0, clk_con, int_con,
+		bictl_con, 0xc1, 0x00, 0xc1, em_clk1_con, em_clk2_con,
+		em_int_con);
+}
+
+static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx)
+{
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write_seq_static(ctx,
+			0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x94, 0x00,
+			0x3c, 0x78, 0x10, 0x27, 0x08, 0x6e, 0x00, 0x00,
+			0x00, 0x00, 0x04, 0x08, 0x6e, 0x00, 0x00, 0x00,
+			0x00, 0x07, 0x07, 0x23, 0x6e, 0xc0, 0xc1, 0x01,
+			0x81, 0xc1, 0x00, 0xc3, 0xf6, 0xf6, 0xc1
+		);
+	else
+		s6e8aa0_panel_cond_set_v142(ctx);
+}
+
+static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d);
+}
+
+static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00);
+}
+
+static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx)
+{
+	static const u8 pent32[] = {
+		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xc0, 0x44, 0x44, 0xc0, 0x00
+	};
+
+	static const u8 pent142[] = {
+		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, 0x00
+	};
+
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32));
+	else
+		s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142));
+}
+
+static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx)
+{
+	static const u8 pwr142[] = {
+		0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x1e, 0x33, 0x02
+	};
+
+	static const u8 pwr32[] = {
+		0xf4, 0xcf, 0x0a, 0x15, 0x10, 0x19, 0x33, 0x02
+	};
+
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32));
+	else
+		s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142));
+}
+
+static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx)
+{
+	u8 id = ctx->id ? 0 : 0x95;
+
+	s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id);
+}
+
+static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx)
+{
+	u8 br;
+
+	switch (ctx->brightness) {
+	case 0 ... 6: /* 30cd ~ 100cd */
+		br = 0xdf;
+		break;
+	case 7 ... 11: /* 120cd ~ 150cd */
+		br = 0xdd;
+		break;
+	case 12 ... 15: /* 180cd ~ 210cd */
+	default:
+		br = 0xd9;
+		break;
+	case 16 ... 24: /* 240cd ~ 300cd */
+		br = 0xd0;
+		break;
+	}
+
+	s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e,
+		0xc4, 0x0f, 0x40, 0x41, br, 0x00, 0x60, 0x19);
+}
+
+static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx)
+{
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write_seq_static(ctx,
+			0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, 0xc4, 0x07,
+			0x40, 0x41, 0xc1, 0x00, 0x60, 0x19);
+	else
+		s6e8aa0_elvss_nvm_set_v142(ctx);
+};
+
+static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
+}
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v142[GAMMA_LEVEL_NUM] = {
+	{
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x62, 0x55, 0x55,
+		0xaf, 0xb1, 0xb1, 0xbd, 0xce, 0xb7, 0x9a, 0xb1,
+		0x90, 0xb2, 0xc4, 0xae, 0x00, 0x60, 0x00, 0x40,
+		0x00, 0x70,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x74, 0x68, 0x69,
+		0xb8, 0xc1, 0xb7, 0xbd, 0xcd, 0xb8, 0x93, 0xab,
+		0x88, 0xb4, 0xc4, 0xb1, 0x00, 0x6b, 0x00, 0x4d,
+		0x00, 0x7d,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x95, 0x8a, 0x89,
+		0xb4, 0xc6, 0xb2, 0xc5, 0xd2, 0xbf, 0x90, 0xa8,
+		0x85, 0xb5, 0xc4, 0xb3, 0x00, 0x7b, 0x00, 0x5d,
+		0x00, 0x8f,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9f, 0x98, 0x92,
+		0xb3, 0xc4, 0xb0, 0xbc, 0xcc, 0xb4, 0x91, 0xa6,
+		0x87, 0xb5, 0xc5, 0xb4, 0x00, 0x87, 0x00, 0x6a,
+		0x00, 0x9e,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x99, 0x93, 0x8b,
+		0xb2, 0xc2, 0xb0, 0xbd, 0xce, 0xb4, 0x90, 0xa6,
+		0x87, 0xb3, 0xc3, 0xb2, 0x00, 0x8d, 0x00, 0x70,
+		0x00, 0xa4,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xa5, 0x99,
+		0xb2, 0xc2, 0xb0, 0xbb, 0xcd, 0xb1, 0x93, 0xa7,
+		0x8a, 0xb2, 0xc1, 0xb0, 0x00, 0x92, 0x00, 0x75,
+		0x00, 0xaa,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xa0, 0x93,
+		0xb6, 0xc4, 0xb4, 0xb5, 0xc8, 0xaa, 0x94, 0xa9,
+		0x8c, 0xb2, 0xc0, 0xb0, 0x00, 0x97, 0x00, 0x7a,
+		0x00, 0xaf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xa7, 0x96,
+		0xb3, 0xc2, 0xb0, 0xba, 0xcb, 0xb0, 0x94, 0xa8,
+		0x8c, 0xb0, 0xbf, 0xaf, 0x00, 0x9f, 0x00, 0x83,
+		0x00, 0xb9,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9d, 0xa2, 0x90,
+		0xb6, 0xc5, 0xb3, 0xb8, 0xc9, 0xae, 0x94, 0xa8,
+		0x8d, 0xaf, 0xbd, 0xad, 0x00, 0xa4, 0x00, 0x88,
+		0x00, 0xbf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xac, 0x97,
+		0xb4, 0xc4, 0xb1, 0xbb, 0xcb, 0xb2, 0x93, 0xa7,
+		0x8d, 0xae, 0xbc, 0xad, 0x00, 0xa7, 0x00, 0x8c,
+		0x00, 0xc3,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa2, 0xa9, 0x93,
+		0xb6, 0xc5, 0xb2, 0xba, 0xc9, 0xb0, 0x93, 0xa7,
+		0x8d, 0xae, 0xbb, 0xac, 0x00, 0xab, 0x00, 0x90,
+		0x00, 0xc8,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9e, 0xa6, 0x8f,
+		0xb7, 0xc6, 0xb3, 0xb8, 0xc8, 0xb0, 0x93, 0xa6,
+		0x8c, 0xae, 0xbb, 0xad, 0x00, 0xae, 0x00, 0x93,
+		0x00, 0xcc,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb4, 0x9c,
+		0xb3, 0xc3, 0xaf, 0xb7, 0xc7, 0xaf, 0x93, 0xa6,
+		0x8c, 0xaf, 0xbc, 0xad, 0x00, 0xb1, 0x00, 0x97,
+		0x00, 0xcf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xb1, 0x98,
+		0xb1, 0xc2, 0xab, 0xba, 0xc9, 0xb2, 0x93, 0xa6,
+		0x8d, 0xae, 0xba, 0xab, 0x00, 0xb5, 0x00, 0x9b,
+		0x00, 0xd4,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xae, 0x94,
+		0xb2, 0xc3, 0xac, 0xbb, 0xca, 0xb4, 0x91, 0xa4,
+		0x8a, 0xae, 0xba, 0xac, 0x00, 0xb8, 0x00, 0x9e,
+		0x00, 0xd8,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb7, 0x9c,
+		0xae, 0xc0, 0xa9, 0xba, 0xc9, 0xb3, 0x92, 0xa5,
+		0x8b, 0xad, 0xb9, 0xab, 0x00, 0xbb, 0x00, 0xa1,
+		0x00, 0xdc,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb4, 0x97,
+		0xb0, 0xc1, 0xaa, 0xb9, 0xc8, 0xb2, 0x92, 0xa5,
+		0x8c, 0xae, 0xb9, 0xab, 0x00, 0xbe, 0x00, 0xa4,
+		0x00, 0xdf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
+		0xb0, 0xc2, 0xab, 0xbb, 0xc9, 0xb3, 0x91, 0xa4,
+		0x8b, 0xad, 0xb8, 0xaa, 0x00, 0xc1, 0x00, 0xa8,
+		0x00, 0xe2,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
+		0xae, 0xbf, 0xa8, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
+		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xc4, 0x00, 0xab,
+		0x00, 0xe6,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb6, 0x98,
+		0xaf, 0xc0, 0xa8, 0xb8, 0xc7, 0xb2, 0x93, 0xa5,
+		0x8d, 0xad, 0xb7, 0xa9, 0x00, 0xc7, 0x00, 0xae,
+		0x00, 0xe9,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
+		0xaf, 0xc1, 0xa9, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
+		0x8b, 0xad, 0xb7, 0xaa, 0x00, 0xc9, 0x00, 0xb0,
+		0x00, 0xec,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
+		0xac, 0xbe, 0xa6, 0xbb, 0xc9, 0xb4, 0x90, 0xa3,
+		0x8a, 0xad, 0xb7, 0xa9, 0x00, 0xcc, 0x00, 0xb4,
+		0x00, 0xf0,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xb0, 0x91,
+		0xae, 0xc0, 0xa6, 0xba, 0xc8, 0xb4, 0x91, 0xa4,
+		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xcf, 0x00, 0xb7,
+		0x00, 0xf3,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb8, 0x98,
+		0xab, 0xbd, 0xa4, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
+		0x8b, 0xac, 0xb6, 0xa8, 0x00, 0xd1, 0x00, 0xb9,
+		0x00, 0xf6,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb5, 0x95,
+		0xa9, 0xbc, 0xa1, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
+		0x8a, 0xad, 0xb6, 0xa8, 0x00, 0xd6, 0x00, 0xbf,
+		0x00, 0xfc,
+	},
+};
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v96[GAMMA_LEVEL_NUM] = {
+	{
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xdf, 0x1f, 0xd7, 0xdc, 0xb7, 0xe1, 0xc0, 0xaf,
+		0xc4, 0xd2, 0xd0, 0xcf, 0x00, 0x4d, 0x00, 0x40,
+		0x00, 0x5f,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd5, 0x35, 0xcf, 0xdc, 0xc1, 0xe1, 0xbf, 0xb3,
+		0xc1, 0xd2, 0xd1, 0xce,	0x00, 0x53, 0x00, 0x46,
+		0x00, 0x67,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd2, 0x64, 0xcf, 0xdb, 0xc6, 0xe1, 0xbd, 0xb3,
+		0xbd, 0xd2, 0xd2, 0xce,	0x00, 0x59, 0x00, 0x4b,
+		0x00, 0x6e,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd0, 0x7c, 0xcf, 0xdb, 0xc9, 0xe0, 0xbc, 0xb4,
+		0xbb, 0xcf, 0xd1, 0xcc, 0x00, 0x5f, 0x00, 0x50,
+		0x00, 0x75,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd0, 0x8e, 0xd1, 0xdb, 0xcc, 0xdf, 0xbb, 0xb6,
+		0xb9, 0xd0, 0xd1, 0xcd,	0x00, 0x63, 0x00, 0x54,
+		0x00, 0x7a,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd1, 0x9e, 0xd5, 0xda, 0xcd, 0xdd, 0xbb, 0xb7,
+		0xb9, 0xce, 0xce, 0xc9,	0x00, 0x68, 0x00, 0x59,
+		0x00, 0x81,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd0, 0xa5, 0xd6, 0xda, 0xcf, 0xdd, 0xbb, 0xb7,
+		0xb8, 0xcc, 0xcd, 0xc7,	0x00, 0x6c, 0x00, 0x5c,
+		0x00, 0x86,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xfe,
+		0xd0, 0xae, 0xd7, 0xd9, 0xd0, 0xdb, 0xb9, 0xb6,
+		0xb5, 0xca, 0xcc, 0xc5,	0x00, 0x74, 0x00, 0x63,
+		0x00, 0x90,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf9,
+		0xcf, 0xb0, 0xd6, 0xd9, 0xd1, 0xdb, 0xb9, 0xb6,
+		0xb4, 0xca, 0xcb, 0xc5,	0x00, 0x77, 0x00, 0x66,
+		0x00, 0x94,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf7,
+		0xcf, 0xb3, 0xd7, 0xd8, 0xd1, 0xd9, 0xb7, 0xb6,
+		0xb3, 0xc9, 0xca, 0xc3,	0x00, 0x7b, 0x00, 0x69,
+		0x00, 0x99,
+
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfd, 0x2f, 0xf7,
+		0xdf, 0xb5, 0xd6, 0xd8, 0xd1, 0xd8, 0xb6, 0xb5,
+		0xb2, 0xca, 0xcb, 0xc4,	0x00, 0x7e, 0x00, 0x6c,
+		0x00, 0x9d,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfa, 0x2f, 0xf5,
+		0xce, 0xb6, 0xd5, 0xd7, 0xd2, 0xd8, 0xb6, 0xb4,
+		0xb0, 0xc7, 0xc9, 0xc1,	0x00, 0x84, 0x00, 0x71,
+		0x00, 0xa5,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf7, 0x2f, 0xf2,
+		0xce, 0xb9, 0xd5, 0xd8, 0xd2, 0xd8, 0xb4, 0xb4,
+		0xaf, 0xc7, 0xc9, 0xc1,	0x00, 0x87, 0x00, 0x73,
+		0x00, 0xa8,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf5, 0x2f, 0xf0,
+		0xdf, 0xba, 0xd5, 0xd7, 0xd2, 0xd7, 0xb4, 0xb4,
+		0xaf, 0xc5, 0xc7, 0xbf,	0x00, 0x8a, 0x00, 0x76,
+		0x00, 0xac,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf2, 0x2f, 0xed,
+		0xcE, 0xbb, 0xd4, 0xd6, 0xd2, 0xd6, 0xb5, 0xb4,
+		0xaF, 0xc5, 0xc7, 0xbf,	0x00, 0x8c, 0x00, 0x78,
+		0x00, 0xaf,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x2f, 0xeb,
+		0xcd, 0xbb, 0xd2, 0xd7, 0xd3, 0xd6, 0xb3, 0xb4,
+		0xae, 0xc5, 0xc6, 0xbe,	0x00, 0x91, 0x00, 0x7d,
+		0x00, 0xb6,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xee, 0x2f, 0xea,
+		0xce, 0xbd, 0xd4, 0xd6, 0xd2, 0xd5, 0xb2, 0xb3,
+		0xad, 0xc3, 0xc4, 0xbb,	0x00, 0x94, 0x00, 0x7f,
+		0x00, 0xba,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xec, 0x2f, 0xe8,
+		0xce, 0xbe, 0xd3, 0xd6, 0xd3, 0xd5, 0xb2, 0xb2,
+		0xac, 0xc3, 0xc5, 0xbc,	0x00, 0x96, 0x00, 0x81,
+		0x00, 0xbd,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xeb, 0x2f, 0xe7,
+		0xce, 0xbf, 0xd3, 0xd6, 0xd2, 0xd5, 0xb1, 0xb2,
+		0xab, 0xc2, 0xc4, 0xbb,	0x00, 0x99, 0x00, 0x83,
+		0x00, 0xc0,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x5f, 0xe9,
+		0xca, 0xbf, 0xd3, 0xd5, 0xd2, 0xd4, 0xb2, 0xb2,
+		0xab, 0xc1, 0xc4, 0xba,	0x00, 0x9b, 0x00, 0x85,
+		0x00, 0xc3,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xea, 0x5f, 0xe8,
+		0xee, 0xbf, 0xd2, 0xd5, 0xd2, 0xd4, 0xb1, 0xb2,
+		0xab, 0xc1, 0xc2, 0xb9,	0x00, 0x9D, 0x00, 0x87,
+		0x00, 0xc6,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe9, 0x5f, 0xe7,
+		0xcd, 0xbf, 0xd2, 0xd6, 0xd2, 0xd4, 0xb1, 0xb2,
+		0xab, 0xbe, 0xc0, 0xb7,	0x00, 0xa1, 0x00, 0x8a,
+		0x00, 0xca,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x61, 0xe6,
+		0xcd, 0xbf, 0xd1, 0xd6, 0xd3, 0xd4, 0xaf, 0xb0,
+		0xa9, 0xbe, 0xc1, 0xb7,	0x00, 0xa3, 0x00, 0x8b,
+		0x00, 0xce,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x62, 0xe5,
+		0xcc, 0xc0, 0xd0, 0xd6, 0xd2, 0xd4, 0xaf, 0xb1,
+		0xa9, 0xbd, 0xc0, 0xb6,	0x00, 0xa5, 0x00, 0x8d,
+		0x00, 0xd0,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe7, 0x7f, 0xe3,
+		0xcc, 0xc1, 0xd0, 0xd5, 0xd3, 0xd3, 0xae, 0xaf,
+		0xa8, 0xbe, 0xc0, 0xb7,	0x00, 0xa8, 0x00, 0x90,
+		0x00, 0xd3,
+	}
+};
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v32[GAMMA_LEVEL_NUM] = {
+	{
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0x72, 0x5e, 0x6b,
+		0xa1, 0xa7, 0x9a, 0xb4, 0xcb, 0xb8, 0x92, 0xac,
+		0x97, 0xb4, 0xc3, 0xb5, 0x00, 0x4e, 0x00, 0x37,
+		0x00, 0x58,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0x85, 0x71, 0x7d,
+		0xa6, 0xb6, 0xa1, 0xb5, 0xca, 0xba, 0x93, 0xac,
+		0x98, 0xb2, 0xc0, 0xaf, 0x00, 0x59, 0x00, 0x43,
+		0x00, 0x64,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa4, 0x94, 0x9e,
+		0xa0, 0xbb, 0x9c, 0xc3, 0xd2, 0xc6, 0x93, 0xaa,
+		0x95, 0xb7, 0xc2, 0xb4, 0x00, 0x65, 0x00, 0x50,
+		0x00, 0x74,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa1, 0xa6,
+		0xa0, 0xb9, 0x9b, 0xc3, 0xd1, 0xc8, 0x90, 0xa6,
+		0x90, 0xbb, 0xc3, 0xb7, 0x00, 0x6f, 0x00, 0x5b,
+		0x00, 0x80,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa6, 0x9d, 0x9f,
+		0x9f, 0xb8, 0x9a, 0xc7, 0xd5, 0xcc, 0x90, 0xa5,
+		0x8f, 0xb8, 0xc1, 0xb6, 0x00, 0x74, 0x00, 0x60,
+		0x00, 0x85,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb3, 0xae, 0xae,
+		0x9e, 0xb7, 0x9a, 0xc8, 0xd6, 0xce, 0x91, 0xa6,
+		0x90, 0xb6, 0xc0, 0xb3, 0x00, 0x78, 0x00, 0x65,
+		0x00, 0x8a,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa9, 0xa8,
+		0xa3, 0xb9, 0x9e, 0xc4, 0xd3, 0xcb, 0x94, 0xa6,
+		0x90, 0xb6, 0xbf, 0xb3, 0x00, 0x7c, 0x00, 0x69,
+		0x00, 0x8e,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xaf, 0xaf, 0xa9,
+		0xa5, 0xbc, 0xa2, 0xc7, 0xd5, 0xcd, 0x93, 0xa5,
+		0x8f, 0xb4, 0xbd, 0xb1, 0x00, 0x83, 0x00, 0x70,
+		0x00, 0x96,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xab, 0xa3,
+		0xaa, 0xbf, 0xa7, 0xc5, 0xd3, 0xcb, 0x93, 0xa5,
+		0x8f, 0xb2, 0xbb, 0xb0, 0x00, 0x86, 0x00, 0x74,
+		0x00, 0x9b,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xb5, 0xab,
+		0xab, 0xc0, 0xa9, 0xc7, 0xd4, 0xcc, 0x94, 0xa4,
+		0x8f, 0xb1, 0xbb, 0xaf, 0x00, 0x8a, 0x00, 0x77,
+		0x00, 0x9e,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb2, 0xa7,
+		0xae, 0xc2, 0xab, 0xc5, 0xd3, 0xca, 0x93, 0xa4,
+		0x8f, 0xb1, 0xba, 0xae, 0x00, 0x8d, 0x00, 0x7b,
+		0x00, 0xa2,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xaf, 0xa3,
+		0xb0, 0xc3, 0xae, 0xc4, 0xd1, 0xc8, 0x93, 0xa4,
+		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x8f, 0x00, 0x7d,
+		0x00, 0xa5,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbd, 0xaf,
+		0xae, 0xc1, 0xab, 0xc2, 0xd0, 0xc6, 0x94, 0xa4,
+		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x92, 0x00, 0x80,
+		0x00, 0xa8,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xb9, 0xac,
+		0xad, 0xc1, 0xab, 0xc4, 0xd1, 0xc7, 0x95, 0xa4,
+		0x90, 0xb0, 0xb9, 0xad, 0x00, 0x95, 0x00, 0x84,
+		0x00, 0xac,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb6, 0xa7,
+		0xaf, 0xc2, 0xae, 0xc5, 0xd1, 0xc7, 0x93, 0xa3,
+		0x8e, 0xb0, 0xb9, 0xad, 0x00, 0x98, 0x00, 0x86,
+		0x00, 0xaf,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbf, 0xaf,
+		0xad, 0xc1, 0xab, 0xc3, 0xd0, 0xc6, 0x94, 0xa3,
+		0x8f, 0xaf, 0xb8, 0xac, 0x00, 0x9a, 0x00, 0x89,
+		0x00, 0xb2,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xbc, 0xac,
+		0xaf, 0xc2, 0xad, 0xc2, 0xcf, 0xc4, 0x94, 0xa3,
+		0x90, 0xaf, 0xb8, 0xad, 0x00, 0x9c, 0x00, 0x8b,
+		0x00, 0xb5,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
+		0xb1, 0xc4, 0xaf, 0xc3, 0xcf, 0xc5, 0x94, 0xa3,
+		0x8f, 0xae, 0xb7, 0xac, 0x00, 0x9f, 0x00, 0x8e,
+		0x00, 0xb8,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
+		0xaf, 0xc2, 0xad, 0xc1, 0xce, 0xc3, 0x95, 0xa3,
+		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa2, 0x00, 0x91,
+		0x00, 0xbb,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xbe, 0xac,
+		0xb1, 0xc4, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa4,
+		0x91, 0xad, 0xb6, 0xab, 0x00, 0xa4, 0x00, 0x93,
+		0x00, 0xbd,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
+		0xb3, 0xc5, 0xb2, 0xc1, 0xcd, 0xc2, 0x95, 0xa3,
+		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa6, 0x00, 0x95,
+		0x00, 0xc0,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
+		0xb0, 0xc3, 0xaf, 0xc2, 0xce, 0xc2, 0x94, 0xa2,
+		0x90, 0xac, 0xb6, 0xab, 0x00, 0xa8, 0x00, 0x98,
+		0x00, 0xc3,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xb8, 0xa5,
+		0xb3, 0xc5, 0xb2, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
+		0x90, 0xad, 0xb6, 0xab, 0x00, 0xaa, 0x00, 0x9a,
+		0x00, 0xc5,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xc0, 0xac,
+		0xb0, 0xc3, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa2,
+		0x90, 0xac, 0xb5, 0xa9, 0x00, 0xac, 0x00, 0x9c,
+		0x00, 0xc8,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbd, 0xa8,
+		0xaf, 0xc2, 0xaf, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
+		0x90, 0xac, 0xb5, 0xaa, 0x00, 0xb1, 0x00, 0xa1,
+		0x00, 0xcc,
+	},
+};
+
+static const struct s6e8aa0_variant s6e8aa0_variants[] = {
+	{
+		.version = 32,
+		.gamma_tables = s6e8aa0_gamma_tables_v32,
+	}, {
+		.version = 96,
+		.gamma_tables = s6e8aa0_gamma_tables_v96,
+	}, {
+		.version = 142,
+		.gamma_tables = s6e8aa0_gamma_tables_v142,
+	}, {
+		.version = 210,
+		.gamma_tables = s6e8aa0_gamma_tables_v142,
+	}
+};
+
+static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx)
+{
+	const u8 *gamma;
+
+	if (ctx->error)
+		return;
+
+	gamma = ctx->variant->gamma_tables[ctx->brightness];
+
+	if (ctx->version >= 142)
+		s6e8aa0_elvss_nvm_set(ctx);
+
+	s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN);
+
+	/* update gamma table. */
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03);
+}
+
+static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_apply_level_1_key(ctx);
+	s6e8aa0_apply_level_2_key(ctx);
+	msleep(20);
+
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+	msleep(40);
+
+	s6e8aa0_panel_cond_set(ctx);
+	s6e8aa0_display_condition_set(ctx);
+	s6e8aa0_brightness_set(ctx);
+	s6e8aa0_etc_source_control(ctx);
+	s6e8aa0_etc_pentile_control(ctx);
+	s6e8aa0_elvss_nvm_set(ctx);
+	s6e8aa0_etc_power_control(ctx);
+	s6e8aa0_etc_elvss_control(ctx);
+	msleep(ctx->init_delay);
+}
+
+static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
+						   u16 size)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (ctx->error < 0)
+		return;
+
+	ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
+	if (ret < 0) {
+		dev_err(ctx->dev,
+			"error %d setting maximum return packet size to %d\n",
+			ret, size);
+		ctx->error = ret;
+	}
+}
+
+static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
+{
+	u8 id[3];
+	int ret, i;
+
+	ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
+	if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+		dev_err(ctx->dev, "read id failed\n");
+		ctx->error = -EIO;
+		return;
+	}
+
+	dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]);
+
+	for (i = 0; i < ARRAY_SIZE(s6e8aa0_variants); ++i) {
+		if (id[1] == s6e8aa0_variants[i].version)
+			break;
+	}
+	if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
+		dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
+		ctx->error = -EINVAL;
+		return;
+	}
+
+	ctx->variant = &s6e8aa0_variants[i];
+	ctx->version = id[1];
+	ctx->id = id[2];
+}
+
+static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_set_maximum_return_packet_size(ctx, 3);
+	s6e8aa0_read_mtp_id(ctx);
+	s6e8aa0_panel_init(ctx);
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int s6e8aa0_power_on(struct s6e8aa0 *ctx)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	msleep(ctx->power_on_delay);
+
+	gpiod_set_value(ctx->reset_gpio, 0);
+	usleep_range(10000, 11000);
+	gpiod_set_value(ctx->reset_gpio, 1);
+
+	msleep(ctx->reset_delay);
+
+	return 0;
+}
+
+static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
+{
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int s6e8aa0_disable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int s6e8aa0_unprepare(struct drm_panel *panel)
+{
+	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+	msleep(40);
+
+	s6e8aa0_clear_error(ctx);
+
+	return s6e8aa0_power_off(ctx);
+}
+
+static int s6e8aa0_prepare(struct drm_panel *panel)
+{
+	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+	int ret;
+
+	ret = s6e8aa0_power_on(ctx);
+	if (ret < 0)
+		return ret;
+
+	s6e8aa0_set_sequence(ctx);
+	ret = ctx->error;
+
+	if (ret < 0)
+		s6e8aa0_unprepare(panel);
+
+	return ret;
+}
+
+static int s6e8aa0_enable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int s6e8aa0_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (!mode) {
+		DRM_ERROR("failed to create a new display mode\n");
+		return 0;
+	}
+
+	drm_display_mode_from_videomode(&ctx->vm, mode);
+	mode->width_mm = ctx->width_mm;
+	mode->height_mm = ctx->height_mm;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
+	.disable = s6e8aa0_disable,
+	.unprepare = s6e8aa0_unprepare,
+	.prepare = s6e8aa0_prepare,
+	.enable = s6e8aa0_enable,
+	.get_modes = s6e8aa0_get_modes,
+};
+
+static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx)
+{
+	struct device *dev = ctx->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	ret = of_get_videomode(np, &ctx->vm, 0);
+	if (ret < 0)
+		return ret;
+
+	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
+	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
+	of_property_read_u32(np, "init-delay", &ctx->init_delay);
+	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
+	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
+
+	ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal");
+	ctx->flip_vertical = of_property_read_bool(np, "flip-vertical");
+
+	return 0;
+}
+
+static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct s6e8aa0 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, ctx);
+
+	ctx->dev = dev;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
+		| MIPI_DSI_MODE_VIDEO_HFP | MIPI_DSI_MODE_VIDEO_HBP
+		| MIPI_DSI_MODE_VIDEO_HSA | MIPI_DSI_MODE_EOT_PACKET
+		| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+
+	ret = s6e8aa0_parse_dt(ctx);
+	if (ret < 0)
+		return ret;
+
+	ctx->supplies[0].supply = "vdd3";
+	ctx->supplies[1].supply = "vci";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+				      ctx->supplies);
+	if (ret < 0) {
+		dev_err(dev, "failed to get regulators: %d\n", ret);
+		return ret;
+	}
+
+	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(ctx->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(ctx->reset_gpio));
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	ctx->brightness = GAMMA_LEVEL_NUM - 1;
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = dev;
+	ctx->panel.funcs = &s6e8aa0_drm_funcs;
+
+	ret = drm_panel_add(&ctx->panel);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0)
+		drm_panel_remove(&ctx->panel);
+
+	return ret;
+}
+
+static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
+{
+	struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
+
+	mipi_dsi_detach(dsi);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id s6e8aa0_of_match[] = {
+	{ .compatible = "samsung,s6e8aa0" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, s6e8aa0_of_match);
+
+static struct mipi_dsi_driver s6e8aa0_driver = {
+	.probe = s6e8aa0_probe,
+	.remove = s6e8aa0_remove,
+	.driver = {
+		.name = "panel-samsung-s6e8aa0",
+		.of_match_table = s6e8aa0_of_match,
+	},
+};
+module_mipi_dsi_driver(s6e8aa0_driver);
+
+MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joongmock Shin <jmock.shin@samsung.com>");
+MODULE_AUTHOR("Eunchul Kim <chulspro.kim@samsung.com>");
+MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based s6e8aa0 AMOLED LCD Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f94201b..f97b73e 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -713,7 +713,12 @@
 	.hactive = { 1280, 1280, 1280 },
 	.hfront_porch = { 1, 1, 10 },
 	.hback_porch = { 1, 1, 10 },
-	.hsync_len = { 52, 158, 661 },
+	/*
+	 * According to the data sheet, the minimum horizontal blanking interval
+	 * is 54 clocks (1 + 52 + 1), but tests with a Nitrogen6X have shown the
+	 * minimum working horizontal blanking interval to be 60 clocks.
+	 */
+	.hsync_len = { 58, 158, 661 },
 	.vactive = { 800, 800, 800 },
 	.vfront_porch = { 1, 1, 10 },
 	.vback_porch = { 1, 1, 10 },
@@ -729,6 +734,7 @@
 		.width = 151,
 		.height = 94,
 	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
 };
 
 static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -943,6 +949,60 @@
 	},
 };
 
+static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
+	.clock = 10870,
+	.hdisplay = 480,
+	.hsync_start = 480 + 2,
+	.hsync_end = 480 + 2 + 41,
+	.htotal = 480 + 2 + 41 + 2,
+	.vdisplay = 272,
+	.vsync_start = 272 + 2,
+	.vsync_end = 272 + 2 + 4,
+	.vtotal = 272 + 2 + 4 + 2,
+	.vrefresh = 74,
+};
+
+static const struct panel_desc nec_nl4827hc19_05b = {
+	.modes = &nec_nl4827hc19_05b_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 95,
+		.height = 54,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24
+};
+
+static const struct display_timing okaya_rs800480t_7x0gp_timing = {
+	.pixelclock = { 30000000, 30000000, 40000000 },
+	.hactive = { 800, 800, 800 },
+	.hfront_porch = { 40, 40, 40 },
+	.hback_porch = { 40, 40, 40 },
+	.hsync_len = { 1, 48, 48 },
+	.vactive = { 480, 480, 480 },
+	.vfront_porch = { 13, 13, 13 },
+	.vback_porch = { 29, 29, 29 },
+	.vsync_len = { 3, 3, 3 },
+	.flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc okaya_rs800480t_7x0gp = {
+	.timings = &okaya_rs800480t_7x0gp_timing,
+	.num_timings = 1,
+	.bpc = 6,
+	.size = {
+		.width = 154,
+		.height = 87,
+	},
+	.delay = {
+		.prepare = 41,
+		.enable = 50,
+		.unprepare = 41,
+		.disable = 50,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
 static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
 	.clock = 25000,
 	.hdisplay = 480,
@@ -1113,6 +1173,12 @@
 		.compatible = "lg,lp129qe",
 		.data = &lg_lp129qe,
 	}, {
+		.compatible = "nec,nl4827hc19-05b",
+		.data = &nec_nl4827hc19_05b,
+	}, {
+		.compatible = "okaya,rs800480t-7x0gp",
+		.data = &okaya_rs800480t_7x0gp,
+	}, {
 		.compatible = "ortustech,com43h4m85ulc",
 		.data = &ortustech_com43h4m85ulc,
 	}, {
@@ -1169,6 +1235,34 @@
 	unsigned int lanes;
 };
 
+static const struct drm_display_mode auo_b080uan01_mode = {
+	.clock = 154500,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 62,
+	.hsync_end = 1200 + 62 + 4,
+	.htotal = 1200 + 62 + 4 + 62,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 9,
+	.vsync_end = 1920 + 9 + 2,
+	.vtotal = 1920 + 9 + 2 + 8,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b080uan01 = {
+	.desc = {
+		.modes = &auo_b080uan01_mode,
+		.num_modes = 1,
+		.bpc = 8,
+		.size = {
+			.width = 108,
+			.height = 272,
+		},
+	},
+	.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+	.format = MIPI_DSI_FMT_RGB888,
+	.lanes = 4,
+};
+
 static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
 	.clock = 71000,
 	.hdisplay = 800,
@@ -1256,6 +1350,9 @@
 
 static const struct of_device_id dsi_of_match[] = {
 	{
+		.compatible = "auo,b080uan01",
+		.data = &auo_b080uan01
+	}, {
 		.compatible = "lg,ld070wx3-sl01",
 		.data = &lg_ld070wx3_sl01
 	}, {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 6b6e57e..41c422f 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -197,7 +197,7 @@
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -207,7 +207,7 @@
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -217,7 +217,7 @@
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -345,7 +345,6 @@
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct qxl_bo *qbo = NULL;
-	struct device *device = &qdev->pdev->dev;
 	int ret;
 	int size;
 	int bpp = sizes->surface_bpp;
@@ -374,9 +373,9 @@
 		 shadow);
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -388,7 +387,7 @@
 
 	/* setup helper with fb data */
 	qfbdev->helper.fb = fb;
-	qfbdev->helper.fbdev = info;
+
 	qfbdev->shadow = shadow;
 	strcpy(info->fix.id, "qxldrmfb");
 
@@ -410,11 +409,6 @@
 			       sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = qdev->vram_size;
 
@@ -423,13 +417,7 @@
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	info->fbdefio = &qxl_defio;
@@ -441,6 +429,8 @@
 	DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(&qfbdev->helper);
 out_unref:
 	if (qbo) {
 		ret = qxl_bo_reserve(qbo, false);
@@ -479,15 +469,11 @@
 
 static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
 {
-	struct fb_info *info;
 	struct qxl_framebuffer *qfb = &qfbdev->qfb;
 
-	if (qfbdev->helper.fbdev) {
-		info = qfbdev->helper.fbdev;
+	drm_fb_helper_unregister_fbi(&qfbdev->helper);
+	drm_fb_helper_release_fbi(&qfbdev->helper);
 
-		unregister_framebuffer(info);
-		framebuffer_release(info);
-	}
 	if (qfb->obj) {
 		qxlfb_destroy_pinned_object(qfb->obj);
 		qfb->obj = NULL;
@@ -557,7 +543,7 @@
 
 void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
 {
-	fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
+	drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
 }
 
 bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6d6f33d..b28370e 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -272,7 +272,6 @@
 		return;
 	dev_err(qdev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
-		mutex_lock(&qdev->ddev->struct_mutex);
 		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -280,8 +279,7 @@
 		list_del_init(&bo->list);
 		mutex_unlock(&qdev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&qdev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index f81e0d7..9cd49c5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -171,8 +171,9 @@
 		return -E2BIG;
 
 	tx_buf[0] = msg->address & 0xff;
-	tx_buf[1] = msg->address >> 8;
-	tx_buf[2] = msg->request << 4;
+	tx_buf[1] = (msg->address >> 8) & 0xff;
+	tx_buf[2] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 44480c1..7520727 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -76,16 +76,35 @@
 
 struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
 {
-	int i;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_atom_dig *dig;
+	struct r600_audio_pin *pin = NULL;
+	int i, pin_count;
 
 	dce6_afmt_get_connected_pins(rdev);
 
 	for (i = 0; i < rdev->audio.num_pins; i++) {
-		if (rdev->audio.pin[i].connected)
-			return &rdev->audio.pin[i];
+		if (rdev->audio.pin[i].connected) {
+			pin = &rdev->audio.pin[i];
+			pin_count = 0;
+
+			list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+				if (radeon_encoder_is_digital(encoder)) {
+					radeon_encoder = to_radeon_encoder(encoder);
+					dig = radeon_encoder->enc_priv;
+					if (dig->pin == pin)
+						pin_count++;
+				}
+			}
+
+			if (pin_count == 0)
+				return pin;
+		}
 	}
-	DRM_ERROR("No connected audio pins found!\n");
-	return NULL;
+	if (!pin)
+		DRM_ERROR("No connected audio pins found!\n");
+	return pin;
 }
 
 void dce6_afmt_select_pin(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fbc8d88..2c02e99 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -522,13 +522,15 @@
 		return err;
 	}
 
-	if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
-		if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
-		else
-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
-	} else {
-		frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+	if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
+		if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+			if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+			else
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+		} else {
+			frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+		}
 	}
 
 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c097d3a..a9b01bc 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3387,6 +3387,14 @@
 	    rdev->pdev->subsystem_device == 0x30ae)
 		return;
 
+	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x280a)
+		return;
+
 	/* DYN CLK 1 */
 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
 	if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 94b21ae..5a2cafb 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -95,6 +95,11 @@
 			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 			} else if (radeon_dp_needs_link_train(radeon_connector)) {
+				/* Don't try to start link training before we
+				 * have the dpcd */
+				if (!radeon_dp_getdpcd(radeon_connector))
+					return;
+
 				/* set it to OFF so that drm_helper_connector_dpms()
 				 * won't return immediately since the current state
 				 * is ON at this point.
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index fcbd60b..3b0c229 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -116,8 +116,8 @@
 	       AUX_SW_WR_BYTES(bytes));
 
 	/* write the data header into the registers */
-	/* request, addres, msg size */
-	byte = (msg->request << 4);
+	/* request, address, msg size */
+	byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
 	WREG32(AUX_SW_DATA + aux_offset[instance],
 	       AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
 
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 257b10be..5e09c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -246,9 +246,10 @@
 	kfree(radeon_connector);
 }
 
-static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+static int radeon_connector_dpms(struct drm_connector *connector, int mode)
 {
 	DRM_DEBUG_KMS("\n");
+	return 0;
 }
 
 static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
@@ -284,11 +285,10 @@
 
 	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
 	drm_mode_connector_set_path_property(connector, pathprop);
-	drm_reinit_primary_mode_group(dev);
 
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	radeon_fb_add_connector(rdev, connector);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_unlock_all(dev);
 
 	drm_connector_register(connector);
 	return connector;
@@ -303,14 +303,12 @@
 
 	drm_connector_unregister(connector);
 	/* need to nuke the connector */
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	/* dpms off */
 	radeon_fb_remove_connector(rdev, connector);
 
 	drm_connector_cleanup(connector);
-	mutex_unlock(&dev->mode_config.mutex);
-	drm_reinit_primary_mode_group(dev);
-
+	drm_modeset_unlock_all(dev);
 
 	kfree(connector);
 	DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index aeb6767..7214858 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -82,9 +82,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = radeon_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -227,7 +227,6 @@
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct radeon_bo *rbo = NULL;
-	struct device *device = &rdev->pdev->dev;
 	int ret;
 	unsigned long tmp;
 
@@ -250,9 +249,9 @@
 	rbo = gem_to_radeon_bo(gobj);
 
 	/* okay we have an object now allocate the framebuffer */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -262,14 +261,13 @@
 	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	fb = &rfbdev->rfb.base;
 
 	/* setup helper */
 	rfbdev->helper.fb = fb;
-	rfbdev->helper.fbdev = info;
 
 	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
 
@@ -289,11 +287,6 @@
 	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = rdev->mc.aper_size;
 
@@ -301,13 +294,7 @@
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
@@ -319,6 +306,8 @@
 	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unref:
 	if (rbo) {
 
@@ -339,17 +328,10 @@
 
 static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
 {
-	struct fb_info *info;
 	struct radeon_framebuffer *rfb = &rfbdev->rfb;
 
-	if (rfbdev->helper.fbdev) {
-		info = rfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+	drm_fb_helper_release_fbi(&rfbdev->helper);
 
 	if (rfb->obj) {
 		radeonfb_destroy_pinned_object(rfb->obj);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index e476c33..9a4d69e 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -845,7 +845,8 @@
 		hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
 		break;
 
-	case KGD_ENGINE_SDMA:
+	case KGD_ENGINE_SDMA1:
+	case KGD_ENGINE_SDMA2:
 		hdr = (const union radeon_firmware_header *)
 							rdev->sdma_fw->data;
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6763627..d302488 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -419,7 +419,6 @@
 	}
 	dev_err(rdev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
-		mutex_lock(&rdev->ddev->struct_mutex);
 		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -427,8 +426,7 @@
 		list_del_init(&bo->list);
 		mutex_unlock(&bo->rdev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&rdev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c1ba83a..05751f3 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -253,7 +253,6 @@
 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
 		return;
 
-	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
 
@@ -268,7 +267,6 @@
 			/* needs a GPU reset dont reset here */
 			mutex_unlock(&rdev->ring_lock);
 			up_write(&rdev->pm.mclk_lock);
-			mutex_unlock(&rdev->ddev->struct_mutex);
 			return;
 		}
 	}
@@ -304,7 +302,6 @@
 
 	mutex_unlock(&rdev->ring_lock);
 	up_write(&rdev->pm.mclk_lock);
-	mutex_unlock(&rdev->ddev->struct_mutex);
 }
 
 static void radeon_pm_print_states(struct radeon_device *rdev)
@@ -1062,7 +1059,6 @@
 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
 	}
 
-	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
 
@@ -1113,7 +1109,6 @@
 done:
 	mutex_unlock(&rdev->ring_lock);
 	up_write(&rdev->pm.mclk_lock);
-	mutex_unlock(&rdev->ddev->struct_mutex);
 }
 
 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 65d6ba6..48cb199 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -496,7 +496,8 @@
 	return true;
 }
 
-static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_pending_vblank_event *event = crtc->state->event;
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -512,7 +513,8 @@
 	}
 }
 
-static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
 {
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 5b0dc0f..f261512 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,9 +37,9 @@
 static struct fb_ops rockchip_drm_fbdev_ops = {
 	.owner		= THIS_MODULE,
 	.fb_mmap	= rockchip_fbdev_mmap,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -77,10 +77,10 @@
 
 	private->fbdev_bo = &rk_obj->base;
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
-		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
-		ret = -ENOMEM;
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		dev_err(dev->dev, "Failed to create framebuffer info.\n");
+		ret = PTR_ERR(fbi);
 		goto err_rockchip_gem_free_object;
 	}
 
@@ -89,21 +89,13 @@
 	if (IS_ERR(helper->fb)) {
 		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
 		ret = PTR_ERR(helper->fb);
-		goto err_framebuffer_release;
+		goto err_release_fbi;
 	}
 
-	helper->fbdev = fbi;
-
 	fbi->par = helper;
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->fbops = &rockchip_drm_fbdev_ops;
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		dev_err(dev->dev, "Failed to allocate color map.\n");
-		goto err_drm_framebuffer_unref;
-	}
-
 	fb = helper->fb;
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -124,10 +116,8 @@
 
 	return 0;
 
-err_drm_framebuffer_unref:
-	drm_framebuffer_unreference(helper->fb);
-err_framebuffer_release:
-	framebuffer_release(fbi);
+err_release_fbi:
+	drm_fb_helper_release_fbi(helper);
 err_rockchip_gem_free_object:
 	rockchip_gem_free_object(&rk_obj->base);
 	return ret;
@@ -190,21 +180,8 @@
 
 	helper = &private->fbdev_helper;
 
-	if (helper->fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = helper->fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
-				      ret);
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	if (helper->fb)
 		drm_framebuffer_unreference(helper->fb);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eba5f8a..a6d9104 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -200,13 +200,10 @@
 	struct drm_gem_object *obj;
 	int ret;
 
-	mutex_lock(&dev->struct_mutex);
-
 	obj = drm_gem_object_lookup(dev, file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
-		ret = -EINVAL;
-		goto unlock;
+		return -EINVAL;
 	}
 
 	ret = drm_gem_create_mmap_offset(obj);
@@ -217,10 +214,9 @@
 	DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
 
 out:
-	drm_gem_object_unreference(obj);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
 }
 
 /*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 34b78e7..5d8ae5e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -50,6 +50,8 @@
 
 #define VOP_WIN_SET(x, win, name, v) \
 		REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_SCL_SET(x, win, name, v) \
+		REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
 #define VOP_CTRL_SET(x, name, v) \
 		REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
 
@@ -164,7 +166,37 @@
 	struct vop_reg vpost_st_end;
 };
 
+struct vop_scl_regs {
+	struct vop_reg cbcr_vsd_mode;
+	struct vop_reg cbcr_vsu_mode;
+	struct vop_reg cbcr_hsd_mode;
+	struct vop_reg cbcr_ver_scl_mode;
+	struct vop_reg cbcr_hor_scl_mode;
+	struct vop_reg yrgb_vsd_mode;
+	struct vop_reg yrgb_vsu_mode;
+	struct vop_reg yrgb_hsd_mode;
+	struct vop_reg yrgb_ver_scl_mode;
+	struct vop_reg yrgb_hor_scl_mode;
+	struct vop_reg line_load_mode;
+	struct vop_reg cbcr_axi_gather_num;
+	struct vop_reg yrgb_axi_gather_num;
+	struct vop_reg vsd_cbcr_gt2;
+	struct vop_reg vsd_cbcr_gt4;
+	struct vop_reg vsd_yrgb_gt2;
+	struct vop_reg vsd_yrgb_gt4;
+	struct vop_reg bic_coe_sel;
+	struct vop_reg cbcr_axi_gather_en;
+	struct vop_reg yrgb_axi_gather_en;
+
+	struct vop_reg lb_mode;
+	struct vop_reg scale_yrgb_x;
+	struct vop_reg scale_yrgb_y;
+	struct vop_reg scale_cbcr_x;
+	struct vop_reg scale_cbcr_y;
+};
+
 struct vop_win_phy {
+	const struct vop_scl_regs *scl;
 	const uint32_t *data_formats;
 	uint32_t nformats;
 
@@ -222,7 +254,36 @@
 	DRM_FORMAT_BGR565,
 };
 
+static const struct vop_scl_regs win_full_scl = {
+	.cbcr_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 31),
+	.cbcr_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 30),
+	.cbcr_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 28),
+	.cbcr_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 26),
+	.cbcr_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 24),
+	.yrgb_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 23),
+	.yrgb_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 22),
+	.yrgb_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 20),
+	.yrgb_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 18),
+	.yrgb_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 16),
+	.line_load_mode = VOP_REG(WIN0_CTRL1, 0x1, 15),
+	.cbcr_axi_gather_num = VOP_REG(WIN0_CTRL1, 0x7, 12),
+	.yrgb_axi_gather_num = VOP_REG(WIN0_CTRL1, 0xf, 8),
+	.vsd_cbcr_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 7),
+	.vsd_cbcr_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 6),
+	.vsd_yrgb_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 5),
+	.vsd_yrgb_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 4),
+	.bic_coe_sel = VOP_REG(WIN0_CTRL1, 0x3, 2),
+	.cbcr_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 1),
+	.yrgb_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 0),
+	.lb_mode = VOP_REG(WIN0_CTRL0, 0x7, 5),
+	.scale_yrgb_x = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+	.scale_yrgb_y = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+	.scale_cbcr_x = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+	.scale_cbcr_y = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
 static const struct vop_win_phy win01_data = {
+	.scl = &win_full_scl,
 	.data_formats = formats_01,
 	.nformats = ARRAY_SIZE(formats_01),
 	.enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
@@ -279,6 +340,12 @@
 	{DSP_CTRL0, 0x00000000},
 	{WIN0_CTRL0, 0x00000080},
 	{WIN1_CTRL0, 0x00000080},
+	/* TODO: Win2/3 support multiple area function, but we haven't found
+	 * a suitable way to use it yet, so let's just use them as other windows
+	 * with only area 0 enabled.
+	 */
+	{WIN2_CTRL0, 0x00000010},
+	{WIN3_CTRL0, 0x00000010},
 };
 
 /*
@@ -393,6 +460,18 @@
 	}
 }
 
+static bool is_yuv_support(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		return true;
+	default:
+		return false;
+	}
+}
+
 static bool is_alpha_support(uint32_t format)
 {
 	switch (format) {
@@ -404,6 +483,126 @@
 	}
 }
 
+static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
+				  uint32_t dst, bool is_horizontal,
+				  int vsu_mode, int *vskiplines)
+{
+	uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
+
+	if (is_horizontal) {
+		if (mode == SCALE_UP)
+			val = GET_SCL_FT_BIC(src, dst);
+		else if (mode == SCALE_DOWN)
+			val = GET_SCL_FT_BILI_DN(src, dst);
+	} else {
+		if (mode == SCALE_UP) {
+			if (vsu_mode == SCALE_UP_BIL)
+				val = GET_SCL_FT_BILI_UP(src, dst);
+			else
+				val = GET_SCL_FT_BIC(src, dst);
+		} else if (mode == SCALE_DOWN) {
+			if (vskiplines) {
+				*vskiplines = scl_get_vskiplines(src, dst);
+				val = scl_get_bili_dn_vskip(src, dst,
+							    *vskiplines);
+			} else {
+				val = GET_SCL_FT_BILI_DN(src, dst);
+			}
+		}
+	}
+
+	return val;
+}
+
+static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
+			     uint32_t src_w, uint32_t src_h, uint32_t dst_w,
+			     uint32_t dst_h, uint32_t pixel_format)
+{
+	uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
+	uint16_t cbcr_hor_scl_mode = SCALE_NONE;
+	uint16_t cbcr_ver_scl_mode = SCALE_NONE;
+	int hsub = drm_format_horz_chroma_subsampling(pixel_format);
+	int vsub = drm_format_vert_chroma_subsampling(pixel_format);
+	bool is_yuv = is_yuv_support(pixel_format);
+	uint16_t cbcr_src_w = src_w / hsub;
+	uint16_t cbcr_src_h = src_h / vsub;
+	uint16_t vsu_mode;
+	uint16_t lb_mode;
+	uint32_t val;
+	int vskiplines;
+
+	if (dst_w > 3840) {
+		DRM_ERROR("Maximum destination width (3840) exceeded\n");
+		return;
+	}
+
+	yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+	yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+
+	if (is_yuv) {
+		cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+		cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+		if (cbcr_hor_scl_mode == SCALE_DOWN)
+			lb_mode = scl_vop_cal_lb_mode(dst_w, true);
+		else
+			lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
+	} else {
+		if (yrgb_hor_scl_mode == SCALE_DOWN)
+			lb_mode = scl_vop_cal_lb_mode(dst_w, false);
+		else
+			lb_mode = scl_vop_cal_lb_mode(src_w, false);
+	}
+
+	VOP_SCL_SET(vop, win, lb_mode, lb_mode);
+	if (lb_mode == LB_RGB_3840X2) {
+		if (yrgb_ver_scl_mode != SCALE_NONE) {
+			DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+			return;
+		}
+		if (cbcr_ver_scl_mode != SCALE_NONE) {
+			DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+			return;
+		}
+		vsu_mode = SCALE_UP_BIL;
+	} else if (lb_mode == LB_RGB_2560X4) {
+		vsu_mode = SCALE_UP_BIL;
+	} else {
+		vsu_mode = SCALE_UP_BIC;
+	}
+
+	val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
+				true, 0, NULL);
+	VOP_SCL_SET(vop, win, scale_yrgb_x, val);
+	val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
+				false, vsu_mode, &vskiplines);
+	VOP_SCL_SET(vop, win, scale_yrgb_y, val);
+
+	VOP_SCL_SET(vop, win, vsd_yrgb_gt4, vskiplines == 4);
+	VOP_SCL_SET(vop, win, vsd_yrgb_gt2, vskiplines == 2);
+
+	VOP_SCL_SET(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
+	VOP_SCL_SET(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
+	VOP_SCL_SET(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
+	VOP_SCL_SET(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
+	VOP_SCL_SET(vop, win, yrgb_vsu_mode, vsu_mode);
+	if (is_yuv) {
+		val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
+					dst_w, true, 0, NULL);
+		VOP_SCL_SET(vop, win, scale_cbcr_x, val);
+		val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
+					dst_h, false, vsu_mode, &vskiplines);
+		VOP_SCL_SET(vop, win, scale_cbcr_y, val);
+
+		VOP_SCL_SET(vop, win, vsd_cbcr_gt4, vskiplines == 4);
+		VOP_SCL_SET(vop, win, vsd_cbcr_gt2, vskiplines == 2);
+		VOP_SCL_SET(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
+		VOP_SCL_SET(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
+		VOP_SCL_SET(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
+		VOP_SCL_SET(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
+		VOP_SCL_SET(vop, win, cbcr_vsu_mode, vsu_mode);
+	}
+}
+
 static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
 {
 	unsigned long flags;
@@ -478,6 +677,7 @@
 		goto err_disable_aclk;
 	}
 
+	memcpy(vop->regs, vop->regsbak, vop->len);
 	/*
 	 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
 	 */
@@ -598,17 +798,22 @@
 	struct vop *vop = to_vop(crtc);
 	struct drm_gem_object *obj;
 	struct rockchip_gem_object *rk_obj;
+	struct drm_gem_object *uv_obj;
+	struct rockchip_gem_object *rk_uv_obj;
 	unsigned long offset;
 	unsigned int actual_w;
 	unsigned int actual_h;
 	unsigned int dsp_stx;
 	unsigned int dsp_sty;
 	unsigned int y_vir_stride;
+	unsigned int uv_vir_stride = 0;
 	dma_addr_t yrgb_mst;
+	dma_addr_t uv_mst = 0;
 	enum vop_data_format format;
 	uint32_t val;
 	bool is_alpha;
 	bool rb_swap;
+	bool is_yuv;
 	bool visible;
 	int ret;
 	struct drm_rect dest = {
@@ -629,11 +834,15 @@
 		.y2 = crtc->mode.vdisplay,
 	};
 	bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+	int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
+					DRM_PLANE_HELPER_NO_SCALING;
+	int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
+					DRM_PLANE_HELPER_NO_SCALING;
 
 	ret = drm_plane_helper_check_update(plane, crtc, fb,
 					    &src, &dest, &clip,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    DRM_PLANE_HELPER_NO_SCALING,
+					    min_scale,
+					    max_scale,
 					    can_position, false, &visible);
 	if (ret)
 		return ret;
@@ -643,6 +852,8 @@
 
 	is_alpha = is_alpha_support(fb->pixel_format);
 	rb_swap = has_rb_swapped(fb->pixel_format);
+	is_yuv = is_yuv_support(fb->pixel_format);
+
 	format = vop_convert_format(fb->pixel_format);
 	if (format < 0)
 		return format;
@@ -655,19 +866,46 @@
 
 	rk_obj = to_rockchip_obj(obj);
 
+	if (is_yuv) {
+		/*
+		 * Src.x1 can be odd when do clip, but yuv plane start point
+		 * need align with 2 pixel.
+		 */
+		val = (src.x1 >> 16) % 2;
+		src.x1 += val << 16;
+		src.x2 += val << 16;
+	}
+
 	actual_w = (src.x2 - src.x1) >> 16;
 	actual_h = (src.y2 - src.y1) >> 16;
-	crtc_x = max(0, crtc_x);
-	crtc_y = max(0, crtc_y);
 
-	dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
-	dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+	dsp_stx = dest.x1 + crtc->mode.htotal - crtc->mode.hsync_start;
+	dsp_sty = dest.y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
 
-	offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+	offset = (src.x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
 	offset += (src.y1 >> 16) * fb->pitches[0];
-	yrgb_mst = rk_obj->dma_addr + offset;
 
-	y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+	yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+	y_vir_stride = fb->pitches[0] >> 2;
+
+	if (is_yuv) {
+		int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+		int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+		int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+
+		uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+		if (!uv_obj) {
+			DRM_ERROR("fail to get uv object from framebuffer\n");
+			return -EINVAL;
+		}
+		rk_uv_obj = to_rockchip_obj(uv_obj);
+		uv_vir_stride = fb->pitches[1] >> 2;
+
+		offset = (src.x1 >> 16) * bpp / hsub;
+		offset += (src.y1 >> 16) * fb->pitches[1] / vsub;
+
+		uv_mst = rk_uv_obj->dma_addr + offset + fb->offsets[1];
+	}
 
 	/*
 	 * If this plane update changes the plane's framebuffer, (or more
@@ -704,9 +942,22 @@
 	VOP_WIN_SET(vop, win, format, format);
 	VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
 	VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+	if (is_yuv) {
+		VOP_WIN_SET(vop, win, uv_vir, uv_vir_stride);
+		VOP_WIN_SET(vop, win, uv_mst, uv_mst);
+	}
+
+	if (win->phy->scl)
+		scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
+				    dest.x2 - dest.x1, dest.y2 - dest.y1,
+				    fb->pixel_format);
+
 	val = (actual_h - 1) << 16;
 	val |= (actual_w - 1) & 0xffff;
 	VOP_WIN_SET(vop, win, act_info, val);
+
+	val = (dest.y2 - dest.y1 - 1) << 16;
+	val |= (dest.x2 - dest.x1 - 1) & 0xffff;
 	VOP_WIN_SET(vop, win, dsp_info, val);
 	val = (dsp_sty - 1) << 16;
 	val |= (dsp_stx - 1) & 0xffff;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 63e9b3a..a2d4ddb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -198,4 +198,92 @@
 	ALPHA_SRC_GLOBAL,
 };
 
+enum scale_mode {
+	SCALE_NONE = 0x0,
+	SCALE_UP   = 0x1,
+	SCALE_DOWN = 0x2
+};
+
+enum lb_mode {
+	LB_YUV_3840X5 = 0x0,
+	LB_YUV_2560X8 = 0x1,
+	LB_RGB_3840X2 = 0x2,
+	LB_RGB_2560X4 = 0x3,
+	LB_RGB_1920X5 = 0x4,
+	LB_RGB_1280X8 = 0x5
+};
+
+enum sacle_up_mode {
+	SCALE_UP_BIL = 0x0,
+	SCALE_UP_BIC = 0x1
+};
+
+enum scale_down_mode {
+	SCALE_DOWN_BIL = 0x0,
+	SCALE_DOWN_AVG = 0x1
+};
+
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+#define SCL_FT_DEFAULT_FIXPOINT_SHIFT	12
+#define SCL_MAX_VSKIPLINES		4
+#define MIN_SCL_FT_AFTER_VSKIP		1
+
+static inline uint16_t scl_cal_scale(int src, int dst, int shift)
+{
+	return ((src * 2 - 3) << (shift - 1)) / (dst - 1);
+}
+
+#define GET_SCL_FT_BILI_DN(src, dst)	scl_cal_scale(src, dst, 12)
+#define GET_SCL_FT_BILI_UP(src, dst)	scl_cal_scale(src, dst, 16)
+#define GET_SCL_FT_BIC(src, dst)	scl_cal_scale(src, dst, 16)
+
+static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
+					     int vskiplines)
+{
+	int act_height;
+
+	act_height = (src_h + vskiplines - 1) / vskiplines;
+
+	return GET_SCL_FT_BILI_DN(act_height, dst_h);
+}
+
+static inline enum scale_mode scl_get_scl_mode(int src, int dst)
+{
+	if (src < dst)
+		return SCALE_UP;
+	else if (src > dst)
+		return SCALE_DOWN;
+
+	return SCALE_NONE;
+}
+
+static inline int scl_get_vskiplines(uint32_t srch, uint32_t dsth)
+{
+	uint32_t vskiplines;
+
+	for (vskiplines = SCL_MAX_VSKIPLINES; vskiplines > 1; vskiplines /= 2)
+		if (srch >= vskiplines * dsth * MIN_SCL_FT_AFTER_VSKIP)
+			break;
+
+	return vskiplines;
+}
+
+static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
+{
+	int lb_mode;
+
+	if (width > 2560)
+		lb_mode = LB_RGB_3840X2;
+	else if (width > 1920)
+		lb_mode = LB_RGB_2560X4;
+	else if (!is_yuv)
+		lb_mode = LB_RGB_1920X5;
+	else if (width > 1280)
+		lb_mode = LB_YUV_3840X5;
+	else
+		lb_mode = LB_YUV_2560X8;
+
+	return lb_mode;
+}
+
 #endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 859ccb6..e9272b0 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -248,7 +248,7 @@
 	lcdc_write(sdev, LDDDSR, value);
 
 	/* Setup planes. */
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+	drm_for_each_legacy_plane(plane, dev) {
 		if (plane->crtc == crtc)
 			shmob_drm_plane_setup(plane);
 	}
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4e..e27490b 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,12 +1,11 @@
 sticompositor-y := \
-	sti_layer.o \
 	sti_mixer.o \
 	sti_gdp.o \
 	sti_vid.o \
 	sti_cursor.o \
 	sti_compositor.o \
-	sti_drm_crtc.o \
-	sti_drm_plane.o
+	sti_crtc.o \
+	sti_plane.o
 
 stihdmi-y := sti_hdmi.o \
 	sti_hdmi_tx3g0c55phy.o \
@@ -24,4 +23,4 @@
 	sticompositor.o \
 	sti_hqvdp.o \
 	stidvo.o \
-	sti_drm_drv.o
+	sti_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3..c652627 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
 #include <drm/drmP.h>
 
 #include "sti_compositor.h"
-#include "sti_drm_crtc.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
+#include "sti_crtc.h"
+#include "sti_cursor.h"
+#include "sti_drv.h"
 #include "sti_gdp.h"
+#include "sti_plane.h"
+#include "sti_vid.h"
 #include "sti_vtg.h"
 
 /*
@@ -31,7 +33,7 @@
 			{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
 			{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
 			{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
-			{STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+			{STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
 			{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
 			{STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
 	},
@@ -53,14 +55,29 @@
 	},
 };
 
-static int sti_compositor_init_subdev(struct sti_compositor *compo,
-		struct sti_compositor_subdev_descriptor *desc,
-		unsigned int array_size)
+static int sti_compositor_bind(struct device *dev,
+			       struct device *master,
+			       void *data)
 {
-	unsigned int i, mixer_id = 0, layer_id = 0;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
+	struct sti_private *dev_priv = drm_dev->dev_private;
+	struct drm_plane *cursor = NULL;
+	struct drm_plane *primary = NULL;
+	struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
+	unsigned int array_size = compo->data.nb_subdev;
 
+	dev_priv->compo = compo;
+
+	/* Register mixer subdev and video subdev first */
 	for (i = 0; i < array_size; i++) {
 		switch (desc[i].type) {
+		case STI_VID_SUBDEV:
+			compo->vid[vid_id++] =
+			    sti_vid_create(compo->dev, desc[i].id,
+					   compo->regs + desc[i].offset);
+			break;
 		case STI_MIXER_MAIN_SUBDEV:
 		case STI_MIXER_AUX_SUBDEV:
 			compo->mixer[mixer_id++] =
@@ -68,83 +85,68 @@
 					     compo->regs + desc[i].offset);
 			break;
 		case STI_GPD_SUBDEV:
-		case STI_VID_SUBDEV:
 		case STI_CURSOR_SUBDEV:
-			compo->layer[layer_id++] =
-			    sti_layer_create(compo->dev, desc[i].id,
-					     compo->regs + desc[i].offset);
+			/* Nothing to do, wait for the second round */
 			break;
 		default:
 			DRM_ERROR("Unknow subdev compoment type\n");
 			return 1;
 		}
-
 	}
-	compo->nb_mixers = mixer_id;
-	compo->nb_layers = layer_id;
 
-	return 0;
-}
+	/* Register the other subdevs, create crtc and planes */
+	for (i = 0; i < array_size; i++) {
+		enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
 
-static int sti_compositor_bind(struct device *dev, struct device *master,
-	void *data)
-{
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-	unsigned int i, crtc = 0, plane = 0;
-	struct sti_drm_private *dev_priv = drm_dev->dev_private;
-	struct drm_plane *cursor = NULL;
-	struct drm_plane *primary = NULL;
+		if (crtc_id < mixer_id)
+			plane_type = DRM_PLANE_TYPE_PRIMARY;
 
-	dev_priv->compo = compo;
-
-	for (i = 0; i < compo->nb_layers; i++) {
-		if (compo->layer[i]) {
-			enum sti_layer_desc desc = compo->layer[i]->desc;
-			enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
-			enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
-
-			if (crtc < compo->nb_mixers)
-				plane_type = DRM_PLANE_TYPE_PRIMARY;
-
-			switch (type) {
-			case STI_CUR:
-				cursor = sti_drm_plane_init(drm_dev,
-						compo->layer[i],
-						1, DRM_PLANE_TYPE_CURSOR);
-				break;
-			case STI_GDP:
-			case STI_VID:
-				primary = sti_drm_plane_init(drm_dev,
-						compo->layer[i],
-						(1 << compo->nb_mixers) - 1,
-						plane_type);
-				plane++;
-				break;
-			case STI_BCK:
-			case STI_VDP:
+		switch (desc[i].type) {
+		case STI_MIXER_MAIN_SUBDEV:
+		case STI_MIXER_AUX_SUBDEV:
+		case STI_VID_SUBDEV:
+			/* Nothing to do, already done at the first round */
+			break;
+		case STI_CURSOR_SUBDEV:
+			cursor = sti_cursor_create(drm_dev, compo->dev,
+						   desc[i].id,
+						   compo->regs + desc[i].offset,
+						   1);
+			if (!cursor) {
+				DRM_ERROR("Can't create CURSOR plane\n");
 				break;
 			}
-
-			/* The first planes are reserved for primary planes*/
-			if (crtc < compo->nb_mixers && primary) {
-				sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
-						primary, cursor);
-				crtc++;
-				cursor = NULL;
-				primary = NULL;
+			break;
+		case STI_GPD_SUBDEV:
+			primary = sti_gdp_create(drm_dev, compo->dev,
+						 desc[i].id,
+						 compo->regs + desc[i].offset,
+						 (1 << mixer_id) - 1,
+						 plane_type);
+			if (!primary) {
+				DRM_ERROR("Can't create GDP plane\n");
+				break;
 			}
+			break;
+		default:
+			DRM_ERROR("Unknown subdev compoment type\n");
+			return 1;
+		}
+
+		/* The first planes are reserved for primary planes*/
+		if (crtc_id < mixer_id && primary) {
+			sti_crtc_init(drm_dev, compo->mixer[crtc_id],
+				      primary, cursor);
+			crtc_id++;
+			cursor = NULL;
+			primary = NULL;
 		}
 	}
 
-	drm_vblank_init(drm_dev, crtc);
+	drm_vblank_init(drm_dev, crtc_id);
 	/* Allow usage of vblank without having to call drm_irq_install */
 	drm_dev->irq_enabled = 1;
 
-	DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
-			 crtc, plane);
-	DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
-
 	return 0;
 }
 
@@ -179,7 +181,6 @@
 	struct device_node *vtg_np;
 	struct sti_compositor *compo;
 	struct resource *res;
-	int err;
 
 	compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
 	if (!compo) {
@@ -187,7 +188,7 @@
 		return -ENOMEM;
 	}
 	compo->dev = dev;
-	compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+	compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
 
 	/* populate data structure depending on compatibility */
 	BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -251,12 +252,6 @@
 	if (vtg_np)
 		compo->vtg_aux = of_vtg_find(vtg_np);
 
-	/* Initialize compositor subdevices */
-	err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
-					 compo->data.nb_subdev);
-	if (err)
-		return err;
-
 	platform_set_drvdata(pdev, compo);
 
 	return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44..1a4a73d 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
 #include <linux/clk.h>
 #include <linux/kernel.h>
 
-#include "sti_layer.h"
 #include "sti_mixer.h"
+#include "sti_plane.h"
 
 #define WAIT_NEXT_VSYNC_MS      50 /*ms*/
 
-#define STI_MAX_LAYER 8
 #define STI_MAX_MIXER 2
+#define STI_MAX_VID   1
 
 enum sti_compositor_subdev_type {
 	STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@
  * @rst_main: reset control of the main path
  * @rst_aux: reset control of the aux path
  * @mixer: array of mixers
+ * @vid: array of vids
  * @vtg_main: vtg for main data path
  * @vtg_aux: vtg for auxillary data path
- * @layer: array of layers
- * @nb_mixers: number of mixers for this compositor
- * @nb_layers: number of layers (GDP,VID,...) for this compositor
  * @vtg_vblank_nb: callback for VTG VSYNC notification
  */
 struct sti_compositor {
@@ -77,11 +75,9 @@
 	struct reset_control *rst_main;
 	struct reset_control *rst_aux;
 	struct sti_mixer *mixer[STI_MAX_MIXER];
+	struct sti_vid *vid[STI_MAX_VID];
 	struct sti_vtg *vtg_main;
 	struct sti_vtg *vtg_aux;
-	struct sti_layer *layer[STI_MAX_LAYER];
-	int nb_mixers;
-	int nb_layers;
 	struct notifier_block vtg_vblank_nb;
 };
 
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
new file mode 100644
index 0000000..018ffc9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *          Fabien Dessenne <fabien.dessenne@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
+#include "sti_vid.h"
+#include "sti_vtg.h"
+
+static void sti_crtc_enable(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct device *dev = mixer->dev;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	mixer->status = STI_MIXER_READY;
+
+	/* Prepare and enable the compo IP clock */
+	if (mixer->id == STI_MIXER_MAIN) {
+		if (clk_prepare_enable(compo->clk_compo_main))
+			DRM_INFO("Failed to prepare/enable compo_main clk\n");
+	} else {
+		if (clk_prepare_enable(compo->clk_compo_aux))
+			DRM_INFO("Failed to prepare/enable compo_aux clk\n");
+	}
+
+	drm_crtc_vblank_on(crtc);
+}
+
+static void sti_crtc_disabling(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	mixer->status = STI_MIXER_DISABLING;
+}
+
+static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	/* accept the provided drm_display_mode, do not fix it up */
+	return true;
+}
+
+static int
+sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct device *dev = mixer->dev;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+	struct clk *clk;
+	int rate = mode->clock * 1000;
+	int res;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      mode->base.id, mode->name);
+
+	DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+		      mode->vrefresh, mode->clock,
+		      mode->hdisplay,
+		      mode->hsync_start, mode->hsync_end,
+		      mode->htotal,
+		      mode->vdisplay,
+		      mode->vsync_start, mode->vsync_end,
+		      mode->vtotal, mode->type, mode->flags);
+
+	/* Set rate and prepare/enable pixel clock */
+	if (mixer->id == STI_MIXER_MAIN)
+		clk = compo->clk_pix_main;
+	else
+		clk = compo->clk_pix_aux;
+
+	res = clk_set_rate(clk, rate);
+	if (res < 0) {
+		DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
+		return -EINVAL;
+	}
+	if (clk_prepare_enable(clk)) {
+		DRM_ERROR("Failed to prepare/enable pix clk\n");
+		return -EINVAL;
+	}
+
+	sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
+			compo->vtg_main : compo->vtg_aux, &crtc->mode);
+
+	res = sti_mixer_active_video_area(mixer, &crtc->mode);
+	if (res) {
+		DRM_ERROR("Can't set active video area\n");
+		return -EINVAL;
+	}
+
+	return res;
+}
+
+static void sti_crtc_disable(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct device *dev = mixer->dev;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
+
+	/* Disable Background */
+	sti_mixer_set_background_status(mixer, false);
+
+	drm_crtc_vblank_off(crtc);
+
+	/* Disable pixel clock and compo IP clocks */
+	if (mixer->id == STI_MIXER_MAIN) {
+		clk_disable_unprepare(compo->clk_pix_main);
+		clk_disable_unprepare(compo->clk_compo_main);
+	} else {
+		clk_disable_unprepare(compo->clk_pix_aux);
+		clk_disable_unprepare(compo->clk_compo_aux);
+	}
+
+	mixer->status = STI_MIXER_DISABLED;
+}
+
+static void
+sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	sti_crtc_enable(crtc);
+	sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+}
+
+static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+	if (crtc->state->event) {
+		crtc->state->event->pipe = drm_crtc_index(crtc);
+
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		mixer->pending_event = crtc->state->event;
+		crtc->state->event = NULL;
+	}
+}
+
+static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
+	struct drm_device *drm_dev = crtc->dev;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+	struct drm_plane *p;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	/* perform plane actions */
+	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+		struct sti_plane *plane = to_sti_plane(p);
+
+		switch (plane->status) {
+		case STI_PLANE_UPDATED:
+			/* update planes tag as updated */
+			DRM_DEBUG_DRIVER("update plane %s\n",
+					 sti_plane_to_str(plane));
+
+			if (sti_mixer_set_plane_depth(mixer, plane)) {
+				DRM_ERROR("Cannot set plane %s depth\n",
+					  sti_plane_to_str(plane));
+				break;
+			}
+
+			if (sti_mixer_set_plane_status(mixer, plane, true)) {
+				DRM_ERROR("Cannot enable plane %s at mixer\n",
+					  sti_plane_to_str(plane));
+				break;
+			}
+
+			/* if plane is HQVDP_0 then commit the vid[0] */
+			if (plane->desc == STI_HQVDP_0)
+				sti_vid_commit(compo->vid[0], p->state);
+
+			plane->status = STI_PLANE_READY;
+
+			break;
+		case STI_PLANE_DISABLING:
+			/* disabling sequence for planes tag as disabling */
+			DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
+					 sti_plane_to_str(plane));
+
+			if (sti_mixer_set_plane_status(mixer, plane, false)) {
+				DRM_ERROR("Cannot disable plane %s at mixer\n",
+					  sti_plane_to_str(plane));
+				continue;
+			}
+
+			if (plane->desc == STI_CURSOR)
+				/* tag plane status for disabled */
+				plane->status = STI_PLANE_DISABLED;
+			else
+				/* tag plane status for flushing */
+				plane->status = STI_PLANE_FLUSHING;
+
+			/* if plane is HQVDP_0 then disable the vid[0] */
+			if (plane->desc == STI_HQVDP_0)
+				sti_vid_disable(compo->vid[0]);
+
+			break;
+		default:
+			/* Other status case are not handled */
+			break;
+		}
+	}
+}
+
+static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
+	.enable = sti_crtc_enable,
+	.disable = sti_crtc_disabling,
+	.mode_fixup = sti_crtc_mode_fixup,
+	.mode_set = drm_helper_crtc_mode_set,
+	.mode_set_nofb = sti_crtc_mode_set_nofb,
+	.mode_set_base = drm_helper_crtc_mode_set_base,
+	.atomic_begin = sti_crtc_atomic_begin,
+	.atomic_flush = sti_crtc_atomic_flush,
+};
+
+static void sti_crtc_destroy(struct drm_crtc *crtc)
+{
+	DRM_DEBUG_KMS("\n");
+	drm_crtc_cleanup(crtc);
+}
+
+static int sti_crtc_set_property(struct drm_crtc *crtc,
+				 struct drm_property *property,
+				 uint64_t val)
+{
+	DRM_DEBUG_KMS("\n");
+	return 0;
+}
+
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+		       unsigned long event, void *data)
+{
+	struct drm_device *drm_dev;
+	struct sti_compositor *compo =
+		container_of(nb, struct sti_compositor, vtg_vblank_nb);
+	int *crtc = data;
+	unsigned long flags;
+	struct sti_private *priv;
+
+	drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
+	priv = drm_dev->dev_private;
+
+	if ((event != VTG_TOP_FIELD_EVENT) &&
+	    (event != VTG_BOTTOM_FIELD_EVENT)) {
+		DRM_ERROR("unknown event: %lu\n", event);
+		return -EINVAL;
+	}
+
+	drm_handle_vblank(drm_dev, *crtc);
+
+	spin_lock_irqsave(&drm_dev->event_lock, flags);
+	if (compo->mixer[*crtc]->pending_event) {
+		drm_send_vblank_event(drm_dev, -1,
+				      compo->mixer[*crtc]->pending_event);
+		drm_vblank_put(drm_dev, *crtc);
+		compo->mixer[*crtc]->pending_event = NULL;
+	}
+	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+	if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
+		struct drm_plane *p;
+
+		/* Disable mixer only if all overlay planes (GDP and VDP)
+		 * are disabled */
+		list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+			struct sti_plane *plane = to_sti_plane(p);
+
+			if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
+				if (plane->status != STI_PLANE_DISABLED)
+					return 0;
+		}
+		sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
+	}
+
+	return 0;
+}
+
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct sti_private *dev_priv = dev->dev_private;
+	struct sti_compositor *compo = dev_priv->compo;
+	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
+			compo->vtg_main : compo->vtg_aux,
+			vtg_vblank_nb, crtc)) {
+		DRM_ERROR("Cannot register VTG notifier\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(sti_crtc_enable_vblank);
+
+void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
+{
+	struct sti_private *priv = drm_dev->dev_private;
+	struct sti_compositor *compo = priv->compo;
+	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
+			compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+	/* free the resources of the pending requests */
+	if (compo->mixer[crtc]->pending_event) {
+		drm_vblank_put(drm_dev, crtc);
+		compo->mixer[crtc]->pending_event = NULL;
+	}
+}
+EXPORT_SYMBOL(sti_crtc_disable_vblank);
+
+static struct drm_crtc_funcs sti_crtc_funcs = {
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.destroy = sti_crtc_destroy,
+	.set_property = sti_crtc_set_property,
+	.reset = drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+bool sti_crtc_is_main(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+	if (mixer->id == STI_MIXER_MAIN)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(sti_crtc_is_main);
+
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+		  struct drm_plane *primary, struct drm_plane *cursor)
+{
+	struct drm_crtc *crtc = &mixer->drm_crtc;
+	int res;
+
+	res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+					&sti_crtc_funcs);
+	if (res) {
+		DRM_ERROR("Can't initialze CRTC\n");
+		return -EINVAL;
+	}
+
+	drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
+
+	DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
+			 crtc->base.id, sti_mixer_to_str(mixer));
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
new file mode 100644
index 0000000..51963e6
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CRTC_H_
+#define _STI_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+		  struct drm_plane *primary, struct drm_plane *cursor);
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+		       unsigned long event, void *data);
+bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee..dd10321 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -7,8 +7,14 @@
  */
 #include <drm/drmP.h>
 
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sti_compositor.h"
 #include "sti_cursor.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 /* Registers */
@@ -42,15 +48,19 @@
 /**
  * STI Cursor structure
  *
- * @layer:      layer structure
- * @width:      cursor width
- * @height:     cursor height
- * @clut:       color look up table
- * @clut_paddr: color look up table physical address
- * @pixmap:     pixmap dma buffer (clut8-format cursor)
+ * @sti_plane:    sti_plane structure
+ * @dev:          driver device
+ * @regs:         cursor registers
+ * @width:        cursor width
+ * @height:       cursor height
+ * @clut:         color look up table
+ * @clut_paddr:   color look up table physical address
+ * @pixmap:       pixmap dma buffer (clut8-format cursor)
  */
 struct sti_cursor {
-	struct sti_layer layer;
+	struct sti_plane plane;
+	struct device *dev;
+	void __iomem *regs;
 	unsigned int width;
 	unsigned int height;
 	unsigned short *clut;
@@ -62,22 +72,10 @@
 	DRM_FORMAT_ARGB8888,
 };
 
-#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
 
-static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
+static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
 {
-	return cursor_supported_formats;
-}
-
-static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(cursor_supported_formats);
-}
-
-static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	u32 *src = layer->vaddr;
 	u8  *dst = cursor->pixmap.base;
 	unsigned int i, j;
 	u32 a, r, g, b;
@@ -96,101 +94,8 @@
 	}
 }
 
-static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+static void sti_cursor_init(struct sti_cursor *cursor)
 {
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	struct drm_display_mode *mode = layer->mode;
-	u32 y, x;
-	u32 val;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	if (layer->src_w < STI_CURS_MIN_SIZE ||
-	    layer->src_h < STI_CURS_MIN_SIZE ||
-	    layer->src_w > STI_CURS_MAX_SIZE ||
-	    layer->src_h > STI_CURS_MAX_SIZE) {
-		DRM_ERROR("Invalid cursor size (%dx%d)\n",
-				layer->src_w, layer->src_h);
-		return -EINVAL;
-	}
-
-	/* If the cursor size has changed, re-allocated the pixmap */
-	if (!cursor->pixmap.base ||
-	    (cursor->width != layer->src_w) ||
-	    (cursor->height != layer->src_h)) {
-		cursor->width = layer->src_w;
-		cursor->height = layer->src_h;
-
-		if (cursor->pixmap.base)
-			dma_free_writecombine(layer->dev,
-					      cursor->pixmap.size,
-					      cursor->pixmap.base,
-					      cursor->pixmap.paddr);
-
-		cursor->pixmap.size = cursor->width * cursor->height;
-
-		cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
-							cursor->pixmap.size,
-							&cursor->pixmap.paddr,
-							GFP_KERNEL | GFP_DMA);
-		if (!cursor->pixmap.base) {
-			DRM_ERROR("Failed to allocate memory for pixmap\n");
-			return -ENOMEM;
-		}
-	}
-
-	/* Convert ARGB8888 to CLUT8 */
-	sti_cursor_argb8888_to_clut8(layer);
-
-	/* AWS and AWE depend on the mode */
-	y = sti_vtg_get_line_number(*mode, 0);
-	x = sti_vtg_get_pixel_number(*mode, 0);
-	val = y << 16 | x;
-	writel(val, layer->regs + CUR_AWS);
-	y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
-	x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
-	val = y << 16 | x;
-	writel(val, layer->regs + CUR_AWE);
-
-	if (first_prepare) {
-		/* Set and fetch CLUT */
-		writel(cursor->clut_paddr, layer->regs + CUR_CML);
-		writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
-	}
-
-	return 0;
-}
-
-static int sti_cursor_commit_layer(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	struct drm_display_mode *mode = layer->mode;
-	u32 ydo, xdo;
-
-	dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	/* Set memory location, size, and position */
-	writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
-	writel(cursor->width, layer->regs + CUR_PMP);
-	writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
-
-	ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
-	xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
-	writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
-
-	return 0;
-}
-
-static int sti_cursor_disable_layer(struct sti_layer *layer)
-{
-	return 0;
-}
-
-static void sti_cursor_init(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
 	unsigned short *base = cursor->clut;
 	unsigned int a, r, g, b;
 
@@ -205,18 +110,139 @@
 						  (b * 5);
 }
 
-static const struct sti_layer_funcs cursor_ops = {
-	.get_formats = sti_cursor_get_formats,
-	.get_nb_formats = sti_cursor_get_nb_formats,
-	.init = sti_cursor_init,
-	.prepare = sti_cursor_prepare_layer,
-	.commit = sti_cursor_commit_layer,
-	.disable = sti_cursor_disable_layer,
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+				     struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_cursor *cursor = to_sti_cursor(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	int src_w = state->src_w >> 16;
+	int src_h = state->src_h >> 16;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_gem_cma_object *cma_obj;
+	u32 y, x;
+	u32 val;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
+
+	dev_dbg(cursor->dev, "%s %s\n", __func__,
+		sti_plane_to_str(plane));
+
+	if (src_w < STI_CURS_MIN_SIZE ||
+	    src_h < STI_CURS_MIN_SIZE ||
+	    src_w > STI_CURS_MAX_SIZE ||
+	    src_h > STI_CURS_MAX_SIZE) {
+		DRM_ERROR("Invalid cursor size (%dx%d)\n",
+				src_w, src_h);
+		return;
+	}
+
+	/* If the cursor size has changed, re-allocated the pixmap */
+	if (!cursor->pixmap.base ||
+	    (cursor->width != src_w) ||
+	    (cursor->height != src_h)) {
+		cursor->width = src_w;
+		cursor->height = src_h;
+
+		if (cursor->pixmap.base)
+			dma_free_writecombine(cursor->dev,
+					      cursor->pixmap.size,
+					      cursor->pixmap.base,
+					      cursor->pixmap.paddr);
+
+		cursor->pixmap.size = cursor->width * cursor->height;
+
+		cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
+							cursor->pixmap.size,
+							&cursor->pixmap.paddr,
+							GFP_KERNEL | GFP_DMA);
+		if (!cursor->pixmap.base) {
+			DRM_ERROR("Failed to allocate memory for pixmap\n");
+			return;
+		}
+	}
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	/* Convert ARGB8888 to CLUT8 */
+	sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
+
+	/* AWS and AWE depend on the mode */
+	y = sti_vtg_get_line_number(*mode, 0);
+	x = sti_vtg_get_pixel_number(*mode, 0);
+	val = y << 16 | x;
+	writel(val, cursor->regs + CUR_AWS);
+	y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+	x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+	val = y << 16 | x;
+	writel(val, cursor->regs + CUR_AWE);
+
+	if (first_prepare) {
+		/* Set and fetch CLUT */
+		writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+		writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
+	}
+
+	/* Set memory location, size, and position */
+	writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
+	writel(cursor->width, cursor->regs + CUR_PMP);
+	writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
+
+	y = sti_vtg_get_line_number(*mode, dst_y);
+	x = sti_vtg_get_pixel_number(*mode, dst_y);
+	writel((y << 16) | x, cursor->regs + CUR_VPO);
+
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
+				      struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+	.atomic_update = sti_cursor_atomic_update,
+	.atomic_disable = sti_cursor_atomic_disable,
 };
 
-struct sti_layer *sti_cursor_create(struct device *dev)
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+				    struct device *dev, int desc,
+				    void __iomem *baseaddr,
+				    unsigned int possible_crtcs)
 {
 	struct sti_cursor *cursor;
+	size_t size;
+	int res;
 
 	cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
 	if (!cursor) {
@@ -225,18 +251,43 @@
 	}
 
 	/* Allocate clut buffer */
-	cursor->clut = dma_alloc_writecombine(dev,
-			0x100 * sizeof(unsigned short),
-			&cursor->clut_paddr,
-			GFP_KERNEL | GFP_DMA);
+	size = 0x100 * sizeof(unsigned short);
+	cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
+					      GFP_KERNEL | GFP_DMA);
 
 	if (!cursor->clut) {
 		DRM_ERROR("Failed to allocate memory for cursor clut\n");
-		devm_kfree(dev, cursor);
-		return NULL;
+		goto err_clut;
 	}
 
-	cursor->layer.ops = &cursor_ops;
+	cursor->dev = dev;
+	cursor->regs = baseaddr;
+	cursor->plane.desc = desc;
+	cursor->plane.status = STI_PLANE_DISABLED;
 
-	return (struct sti_layer *)cursor;
+	sti_cursor_init(cursor);
+
+	res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
+				       possible_crtcs,
+				       &sti_plane_helpers_funcs,
+				       cursor_supported_formats,
+				       ARRAY_SIZE(cursor_supported_formats),
+				       DRM_PLANE_TYPE_CURSOR);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		goto err_plane;
+	}
+
+	drm_plane_helper_add(&cursor->plane.drm_plane,
+			     &sti_cursor_helpers_funcs);
+
+	sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
+
+	return &cursor->plane.drm_plane;
+
+err_plane:
+	dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+err_clut:
+	devm_kfree(dev, cursor);
+	return NULL;
 }
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c98274..2ee5c10 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
 #ifndef _STI_CURSOR_H_
 #define _STI_CURSOR_H_
 
-struct sti_layer *sti_cursor_create(struct device *dev);
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+				    struct device *dev, int desc,
+				    void __iomem *baseaddr,
+				    unsigned int possible_crtcs);
 
 #endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
deleted file mode 100644
index 6b641c5..0000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <linux/clk.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
-#include "sti_vtg.h"
-
-static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	DRM_DEBUG_KMS("\n");
-}
-
-static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-
-	mixer->enabled = true;
-
-	/* Prepare and enable the compo IP clock */
-	if (mixer->id == STI_MIXER_MAIN) {
-		if (clk_prepare_enable(compo->clk_compo_main))
-			DRM_INFO("Failed to prepare/enable compo_main clk\n");
-	} else {
-		if (clk_prepare_enable(compo->clk_compo_aux))
-			DRM_INFO("Failed to prepare/enable compo_aux clk\n");
-	}
-
-	sti_mixer_clear_all_layers(mixer);
-}
-
-static void sti_drm_crtc_commit(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct sti_layer *layer;
-
-	if ((!mixer || !compo)) {
-		DRM_ERROR("Can not find mixer or compositor)\n");
-		return;
-	}
-
-	/* get GDP which is reserved to the CRTC FB */
-	layer = to_sti_layer(crtc->primary);
-	if (layer)
-		sti_layer_commit(layer);
-	else
-		DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
-
-	/* Enable layer on mixer */
-	if (sti_mixer_set_layer_status(mixer, layer, true))
-		DRM_ERROR("Can not enable layer at mixer\n");
-
-	drm_crtc_vblank_on(crtc);
-}
-
-static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode)
-{
-	/* accept the provided drm_display_mode, do not fix it up */
-	return true;
-}
-
-static int
-sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct clk *clk;
-	int rate = mode->clock * 1000;
-	int res;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      mode->base.id, mode->name);
-
-	DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
-		      mode->vrefresh, mode->clock,
-		      mode->hdisplay,
-		      mode->hsync_start, mode->hsync_end,
-		      mode->htotal,
-		      mode->vdisplay,
-		      mode->vsync_start, mode->vsync_end,
-		      mode->vtotal, mode->type, mode->flags);
-
-	/* Set rate and prepare/enable pixel clock */
-	if (mixer->id == STI_MIXER_MAIN)
-		clk = compo->clk_pix_main;
-	else
-		clk = compo->clk_pix_aux;
-
-	res = clk_set_rate(clk, rate);
-	if (res < 0) {
-		DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
-		return -EINVAL;
-	}
-	if (clk_prepare_enable(clk)) {
-		DRM_ERROR("Failed to prepare/enable pix clk\n");
-		return -EINVAL;
-	}
-
-	sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux, &crtc->mode);
-
-	res = sti_mixer_active_video_area(mixer, &crtc->mode);
-	if (res) {
-		DRM_ERROR("Can not set active video area\n");
-		return -EINVAL;
-	}
-
-	return res;
-}
-
-static void sti_drm_crtc_disable(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-
-	if (!mixer->enabled)
-		return;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
-
-	/* Disable Background */
-	sti_mixer_set_background_status(mixer, false);
-
-	drm_crtc_vblank_off(crtc);
-
-	/* Disable pixel clock and compo IP clocks */
-	if (mixer->id == STI_MIXER_MAIN) {
-		clk_disable_unprepare(compo->clk_pix_main);
-		clk_disable_unprepare(compo->clk_compo_main);
-	} else {
-		clk_disable_unprepare(compo->clk_pix_aux);
-		clk_disable_unprepare(compo->clk_compo_aux);
-	}
-
-	mixer->enabled = false;
-}
-
-static void
-sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
-{
-	sti_drm_crtc_prepare(crtc);
-	sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
-}
-
-static void sti_drm_atomic_begin(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-
-	if (crtc->state->event) {
-		crtc->state->event->pipe = drm_crtc_index(crtc);
-
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
-		mixer->pending_event = crtc->state->event;
-		crtc->state->event = NULL;
-	}
-}
-
-static void sti_drm_atomic_flush(struct drm_crtc *crtc)
-{
-}
-
-static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
-	.dpms = sti_drm_crtc_dpms,
-	.prepare = sti_drm_crtc_prepare,
-	.commit = sti_drm_crtc_commit,
-	.mode_fixup = sti_drm_crtc_mode_fixup,
-	.mode_set = drm_helper_crtc_mode_set,
-	.mode_set_nofb = sti_drm_crtc_mode_set_nofb,
-	.mode_set_base = drm_helper_crtc_mode_set_base,
-	.disable = sti_drm_crtc_disable,
-	.atomic_begin = sti_drm_atomic_begin,
-	.atomic_flush = sti_drm_atomic_flush,
-};
-
-static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
-{
-	DRM_DEBUG_KMS("\n");
-	drm_crtc_cleanup(crtc);
-}
-
-static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
-				     struct drm_property *property,
-				     uint64_t val)
-{
-	DRM_DEBUG_KMS("\n");
-	return 0;
-}
-
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
-			   unsigned long event, void *data)
-{
-	struct drm_device *drm_dev;
-	struct sti_compositor *compo =
-		container_of(nb, struct sti_compositor, vtg_vblank_nb);
-	int *crtc = data;
-	unsigned long flags;
-	struct sti_drm_private *priv;
-
-	drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
-	priv = drm_dev->dev_private;
-
-	if ((event != VTG_TOP_FIELD_EVENT) &&
-	    (event != VTG_BOTTOM_FIELD_EVENT)) {
-		DRM_ERROR("unknown event: %lu\n", event);
-		return -EINVAL;
-	}
-
-	drm_handle_vblank(drm_dev, *crtc);
-
-	spin_lock_irqsave(&drm_dev->event_lock, flags);
-	if (compo->mixer[*crtc]->pending_event) {
-		drm_send_vblank_event(drm_dev, -1,
-				compo->mixer[*crtc]->pending_event);
-		drm_vblank_put(drm_dev, *crtc);
-		compo->mixer[*crtc]->pending_event = NULL;
-	}
-	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
-	return 0;
-}
-
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
-{
-	struct sti_drm_private *dev_priv = dev->dev_private;
-	struct sti_compositor *compo = dev_priv->compo;
-	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
-
-	if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux,
-			vtg_vblank_nb, crtc)) {
-		DRM_ERROR("Cannot register VTG notifier\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
-
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
-{
-	struct sti_drm_private *priv = dev->dev_private;
-	struct sti_compositor *compo = priv->compo;
-	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
-		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
-
-	/* free the resources of the pending requests */
-	if (compo->mixer[crtc]->pending_event) {
-		drm_vblank_put(dev, crtc);
-		compo->mixer[crtc]->pending_event = NULL;
-	}
-}
-EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
-
-static struct drm_crtc_funcs sti_crtc_funcs = {
-	.set_config = drm_atomic_helper_set_config,
-	.page_flip = drm_atomic_helper_page_flip,
-	.destroy = sti_drm_crtc_destroy,
-	.set_property = sti_drm_crtc_set_property,
-	.reset = drm_atomic_helper_crtc_reset,
-	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
-};
-
-bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-
-	if (mixer->id == STI_MIXER_MAIN)
-		return true;
-
-	return false;
-}
-EXPORT_SYMBOL(sti_drm_crtc_is_main);
-
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
-		struct drm_plane *primary, struct drm_plane *cursor)
-{
-	struct drm_crtc *crtc = &mixer->drm_crtc;
-	int res;
-
-	res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
-			&sti_crtc_funcs);
-	if (res) {
-		DRM_ERROR("Can not initialze CRTC\n");
-		return -EINVAL;
-	}
-
-	drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
-
-	DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
-			 crtc->base.id, sti_mixer_to_str(mixer));
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
deleted file mode 100644
index caca8b1..0000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_CRTC_H_
-#define _STI_DRM_CRTC_H_
-
-#include <drm/drmP.h>
-
-struct sti_mixer;
-
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
-		struct drm_plane *primary, struct drm_plane *cursor);
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
-		unsigned long event, void *data);
-bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
deleted file mode 100644
index 59d558b..0000000
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-
-#include <linux/component.h>
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
-
-#define DRIVER_NAME	"sti"
-#define DRIVER_DESC	"STMicroelectronics SoC DRM"
-#define DRIVER_DATE	"20140601"
-#define DRIVER_MAJOR	1
-#define DRIVER_MINOR	0
-
-#define STI_MAX_FB_HEIGHT	4096
-#define STI_MAX_FB_WIDTH	4096
-
-static void sti_drm_atomic_schedule(struct sti_drm_private *private,
-				  struct drm_atomic_state *state)
-{
-	private->commit.state = state;
-	schedule_work(&private->commit.work);
-}
-
-static void sti_drm_atomic_complete(struct sti_drm_private *private,
-				  struct drm_atomic_state *state)
-{
-	struct drm_device *drm = private->drm_dev;
-
-	/*
-	 * Everything below can be run asynchronously without the need to grab
-	 * any modeset locks at all under one condition: It must be guaranteed
-	 * that the asynchronous work has either been cancelled (if the driver
-	 * supports it, which at least requires that the framebuffers get
-	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
-	 * before the new state gets committed on the software side with
-	 * drm_atomic_helper_swap_state().
-	 *
-	 * This scheme allows new atomic state updates to be prepared and
-	 * checked in parallel to the asynchronous completion of the previous
-	 * update. Which is important since compositors need to figure out the
-	 * composition of the next frame right after having submitted the
-	 * current layout.
-	 */
-
-	drm_atomic_helper_commit_modeset_disables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state);
-	drm_atomic_helper_commit_modeset_enables(drm, state);
-
-	drm_atomic_helper_wait_for_vblanks(drm, state);
-
-	drm_atomic_helper_cleanup_planes(drm, state);
-	drm_atomic_state_free(state);
-}
-
-static void sti_drm_atomic_work(struct work_struct *work)
-{
-	struct sti_drm_private *private = container_of(work,
-			struct sti_drm_private, commit.work);
-
-	sti_drm_atomic_complete(private, private->commit.state);
-}
-
-static int sti_drm_atomic_commit(struct drm_device *drm,
-			       struct drm_atomic_state *state, bool async)
-{
-	struct sti_drm_private *private = drm->dev_private;
-	int err;
-
-	err = drm_atomic_helper_prepare_planes(drm, state);
-	if (err)
-		return err;
-
-	/* serialize outstanding asynchronous commits */
-	mutex_lock(&private->commit.lock);
-	flush_work(&private->commit.work);
-
-	/*
-	 * This is the point of no return - everything below never fails except
-	 * when the hw goes bonghits. Which means we can commit the new state on
-	 * the software side now.
-	 */
-
-	drm_atomic_helper_swap_state(drm, state);
-
-	if (async)
-		sti_drm_atomic_schedule(private, state);
-	else
-		sti_drm_atomic_complete(private, state);
-
-	mutex_unlock(&private->commit.lock);
-	return 0;
-}
-
-static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
-	.fb_create = drm_fb_cma_create,
-	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = sti_drm_atomic_commit,
-};
-
-static void sti_drm_mode_config_init(struct drm_device *dev)
-{
-	dev->mode_config.min_width = 0;
-	dev->mode_config.min_height = 0;
-
-	/*
-	 * set max width and height as default value.
-	 * this value would be used to check framebuffer size limitation
-	 * at drm_mode_addfb().
-	 */
-	dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
-	dev->mode_config.max_height = STI_MAX_FB_WIDTH;
-
-	dev->mode_config.funcs = &sti_drm_mode_config_funcs;
-}
-
-static int sti_drm_load(struct drm_device *dev, unsigned long flags)
-{
-	struct sti_drm_private *private;
-	int ret;
-
-	private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
-	if (!private) {
-		DRM_ERROR("Failed to allocate private\n");
-		return -ENOMEM;
-	}
-	dev->dev_private = (void *)private;
-	private->drm_dev = dev;
-
-	mutex_init(&private->commit.lock);
-	INIT_WORK(&private->commit.work, sti_drm_atomic_work);
-
-	drm_mode_config_init(dev);
-	drm_kms_helper_poll_init(dev);
-
-	sti_drm_mode_config_init(dev);
-
-	ret = component_bind_all(dev->dev, dev);
-	if (ret) {
-		drm_kms_helper_poll_fini(dev);
-		drm_mode_config_cleanup(dev);
-		kfree(private);
-		return ret;
-	}
-
-	drm_mode_config_reset(dev);
-
-#ifdef CONFIG_DRM_STI_FBDEV
-	drm_fbdev_cma_init(dev, 32,
-		   dev->mode_config.num_crtc,
-		   dev->mode_config.num_connector);
-#endif
-	return 0;
-}
-
-static const struct file_operations sti_drm_driver_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.mmap = drm_gem_cma_mmap,
-	.poll = drm_poll,
-	.read = drm_read,
-	.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = drm_compat_ioctl,
-#endif
-	.release = drm_release,
-};
-
-static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
-						struct drm_gem_object *obj,
-						int flags)
-{
-	/* we want to be able to write in mmapped buffer */
-	flags |= O_RDWR;
-	return drm_gem_prime_export(dev, obj, flags);
-}
-
-static struct drm_driver sti_drm_driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
-	    DRIVER_GEM | DRIVER_PRIME,
-	.load = sti_drm_load,
-	.gem_free_object = drm_gem_cma_free_object,
-	.gem_vm_ops = &drm_gem_cma_vm_ops,
-	.dumb_create = drm_gem_cma_dumb_create,
-	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
-	.dumb_destroy = drm_gem_dumb_destroy,
-	.fops = &sti_drm_driver_fops,
-
-	.get_vblank_counter = drm_vblank_count,
-	.enable_vblank = sti_drm_crtc_enable_vblank,
-	.disable_vblank = sti_drm_crtc_disable_vblank,
-
-	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_export = sti_drm_gem_prime_export,
-	.gem_prime_import = drm_gem_prime_import,
-	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
-	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
-	.gem_prime_vmap = drm_gem_cma_prime_vmap,
-	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
-	.gem_prime_mmap = drm_gem_cma_prime_mmap,
-
-	.name = DRIVER_NAME,
-	.desc = DRIVER_DESC,
-	.date = DRIVER_DATE,
-	.major = DRIVER_MAJOR,
-	.minor = DRIVER_MINOR,
-};
-
-static int compare_of(struct device *dev, void *data)
-{
-	return dev->of_node == data;
-}
-
-static int sti_drm_bind(struct device *dev)
-{
-	return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
-}
-
-static void sti_drm_unbind(struct device *dev)
-{
-	drm_put_dev(dev_get_drvdata(dev));
-}
-
-static const struct component_master_ops sti_drm_ops = {
-	.bind = sti_drm_bind,
-	.unbind = sti_drm_unbind,
-};
-
-static int sti_drm_master_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *node = dev->parent->of_node;
-	struct device_node *child_np;
-	struct component_match *match = NULL;
-
-	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
-
-	child_np = of_get_next_available_child(node, NULL);
-
-	while (child_np) {
-		component_match_add(dev, &match, compare_of, child_np);
-		of_node_put(child_np);
-		child_np = of_get_next_available_child(node, child_np);
-	}
-
-	return component_master_add_with_match(dev, &sti_drm_ops, match);
-}
-
-static int sti_drm_master_remove(struct platform_device *pdev)
-{
-	component_master_del(&pdev->dev, &sti_drm_ops);
-	return 0;
-}
-
-static struct platform_driver sti_drm_master_driver = {
-	.probe = sti_drm_master_probe,
-	.remove = sti_drm_master_remove,
-	.driver = {
-		.name = DRIVER_NAME "__master",
-	},
-};
-
-static int sti_drm_platform_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *node = dev->of_node;
-	struct platform_device *master;
-
-	of_platform_populate(node, NULL, NULL, dev);
-
-	platform_driver_register(&sti_drm_master_driver);
-	master = platform_device_register_resndata(dev,
-			DRIVER_NAME "__master", -1,
-			NULL, 0, NULL, 0);
-	if (IS_ERR(master))
-               return PTR_ERR(master);
-
-	platform_set_drvdata(pdev, master);
-	return 0;
-}
-
-static int sti_drm_platform_remove(struct platform_device *pdev)
-{
-	struct platform_device *master = platform_get_drvdata(pdev);
-
-	of_platform_depopulate(&pdev->dev);
-	platform_device_unregister(master);
-	platform_driver_unregister(&sti_drm_master_driver);
-	return 0;
-}
-
-static const struct of_device_id sti_drm_dt_ids[] = {
-	{ .compatible = "st,sti-display-subsystem", },
-	{ /* end node */ },
-};
-MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
-
-static struct platform_driver sti_drm_platform_driver = {
-	.probe = sti_drm_platform_probe,
-	.remove = sti_drm_platform_remove,
-	.driver = {
-		.name = DRIVER_NAME,
-		.of_match_table = sti_drm_dt_ids,
-	},
-};
-
-module_platform_driver(sti_drm_platform_driver);
-
-MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
deleted file mode 100644
index c413aa3..0000000
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_DRV_H_
-#define _STI_DRM_DRV_H_
-
-#include <drm/drmP.h>
-
-struct sti_compositor;
-struct sti_tvout;
-
-/**
- * STI drm private structure
- * This structure is stored as private in the drm_device
- *
- * @compo:                 compositor
- * @plane_zorder_property: z-order property for CRTC planes
- * @drm_dev:               drm device
- */
-struct sti_drm_private {
-	struct sti_compositor *compo;
-	struct drm_property *plane_zorder_property;
-	struct drm_device *drm_dev;
-
-	struct {
-		struct drm_atomic_state *state;
-		struct work_struct work;
-		struct mutex lock;
-	} commit;
-};
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
deleted file mode 100644
index 64d4ed4..0000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
-#include "sti_vtg.h"
-
-enum sti_layer_desc sti_layer_default_zorder[] = {
-	STI_GDP_0,
-	STI_VID_0,
-	STI_GDP_1,
-	STI_VID_1,
-	STI_GDP_2,
-	STI_GDP_3,
-};
-
-/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
-
-static int
-sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-		     unsigned int crtc_w, unsigned int crtc_h,
-		     uint32_t src_x, uint32_t src_y,
-		     uint32_t src_w, uint32_t src_h)
-{
-	struct sti_layer *layer = to_sti_layer(plane);
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	int res;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      plane->base.id, sti_layer_to_str(layer));
-	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
-
-	res = sti_mixer_set_layer_depth(mixer, layer);
-	if (res) {
-		DRM_ERROR("Can not set layer depth\n");
-		return res;
-	}
-
-	/* src_x are in 16.16 format. */
-	res = sti_layer_prepare(layer, crtc, fb,
-			&crtc->mode, mixer->id,
-			crtc_x, crtc_y, crtc_w, crtc_h,
-			src_x >> 16, src_y >> 16,
-			src_w >> 16, src_h >> 16);
-	if (res) {
-		DRM_ERROR("Layer prepare failed\n");
-		return res;
-	}
-
-	res = sti_layer_commit(layer);
-	if (res) {
-		DRM_ERROR("Layer commit failed\n");
-		return res;
-	}
-
-	res = sti_mixer_set_layer_status(mixer, layer, true);
-	if (res) {
-		DRM_ERROR("Can not enable layer at mixer\n");
-		return res;
-	}
-
-	return 0;
-}
-
-static int sti_drm_disable_plane(struct drm_plane *plane)
-{
-	struct sti_layer *layer;
-	struct sti_mixer *mixer;
-	int lay_res, mix_res;
-
-	if (!plane->crtc) {
-		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
-		return 0;
-	}
-	layer = to_sti_layer(plane);
-	mixer = to_sti_mixer(plane->crtc);
-
-	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			plane->crtc->base.id, sti_mixer_to_str(mixer),
-			plane->base.id, sti_layer_to_str(layer));
-
-	/* Disable layer at mixer level */
-	mix_res = sti_mixer_set_layer_status(mixer, layer, false);
-	if (mix_res)
-		DRM_ERROR("Can not disable layer at mixer\n");
-
-	/* Wait a while to be sure that a Vsync event is received */
-	msleep(WAIT_NEXT_VSYNC_MS);
-
-	/* Then disable layer itself */
-	lay_res = sti_layer_disable(layer);
-	if (lay_res)
-		DRM_ERROR("Layer disable failed\n");
-
-	if (lay_res || mix_res)
-		return -EINVAL;
-
-	return 0;
-}
-
-static void sti_drm_plane_destroy(struct drm_plane *plane)
-{
-	DRM_DEBUG_DRIVER("\n");
-
-	drm_plane_helper_disable(plane);
-	drm_plane_cleanup(plane);
-}
-
-static int sti_drm_plane_set_property(struct drm_plane *plane,
-				      struct drm_property *property,
-				      uint64_t val)
-{
-	struct drm_device *dev = plane->dev;
-	struct sti_drm_private *private = dev->dev_private;
-	struct sti_layer *layer = to_sti_layer(plane);
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (property == private->plane_zorder_property) {
-		layer->zorder = val;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static struct drm_plane_funcs sti_drm_plane_funcs = {
-	.update_plane = drm_atomic_helper_update_plane,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = sti_drm_plane_destroy,
-	.set_property = sti_drm_plane_set_property,
-	.reset = drm_atomic_helper_plane_reset,
-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
-				  struct drm_framebuffer *fb,
-				  const struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
-static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
-				   struct drm_framebuffer *fb,
-				   const struct drm_plane_state *old_fb)
-{
-}
-
-static int sti_drm_plane_atomic_check(struct drm_plane *plane,
-				      struct drm_plane_state *state)
-{
-	return 0;
-}
-
-static void sti_drm_plane_atomic_update(struct drm_plane *plane,
-					struct drm_plane_state *oldstate)
-{
-	struct drm_plane_state *state = plane->state;
-
-	sti_drm_update_plane(plane, state->crtc, state->fb,
-			    state->crtc_x, state->crtc_y,
-			    state->crtc_w, state->crtc_h,
-			    state->src_x, state->src_y,
-			    state->src_w, state->src_h);
-}
-
-static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
-					 struct drm_plane_state *oldstate)
-{
-	sti_drm_disable_plane(plane);
-}
-
-static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
-	.prepare_fb = sti_drm_plane_prepare_fb,
-	.cleanup_fb = sti_drm_plane_cleanup_fb,
-	.atomic_check = sti_drm_plane_atomic_check,
-	.atomic_update = sti_drm_plane_atomic_update,
-	.atomic_disable = sti_drm_plane_atomic_disable,
-};
-
-static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
-						 uint64_t default_val)
-{
-	struct drm_device *dev = plane->dev;
-	struct sti_drm_private *private = dev->dev_private;
-	struct drm_property *prop;
-	struct sti_layer *layer = to_sti_layer(plane);
-
-	prop = private->plane_zorder_property;
-	if (!prop) {
-		prop = drm_property_create_range(dev, 0, "zpos", 0,
-						 GAM_MIXER_NB_DEPTH_LEVEL - 1);
-		if (!prop)
-			return;
-
-		private->plane_zorder_property = prop;
-	}
-
-	drm_object_attach_property(&plane->base, prop, default_val);
-	layer->zorder = default_val;
-}
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
-				     struct sti_layer *layer,
-				     unsigned int possible_crtcs,
-				     enum drm_plane_type type)
-{
-	int err, i;
-	uint64_t default_zorder = 0;
-
-	err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
-			     &sti_drm_plane_funcs,
-			     sti_layer_get_formats(layer),
-			     sti_layer_get_nb_formats(layer), type);
-	if (err) {
-		DRM_ERROR("Failed to initialize plane\n");
-		return NULL;
-	}
-
-	drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
-
-	for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
-		if (sti_layer_default_zorder[i] == layer->desc)
-			break;
-
-	default_zorder = i + 1;
-
-	if (type == DRM_PLANE_TYPE_OVERLAY)
-		sti_drm_plane_attach_zorder_property(&layer->plane,
-				default_zorder);
-
-	DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
-			 layer->plane.base.id,
-			 sti_layer_to_str(layer), default_zorder);
-
-	return &layer->plane;
-}
-EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
deleted file mode 100644
index 4f19183..0000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_PLANE_H_
-#define _STI_DRM_PLANE_H_
-
-#include <drm/drmP.h>
-
-struct sti_layer;
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
-		struct sti_layer *layer,
-		unsigned int possible_crtcs,
-		enum drm_plane_type type);
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
new file mode 100644
index 0000000..6f4af6a
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/component.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_crtc.h"
+#include "sti_drv.h"
+
+#define DRIVER_NAME	"sti"
+#define DRIVER_DESC	"STMicroelectronics SoC DRM"
+#define DRIVER_DATE	"20140601"
+#define DRIVER_MAJOR	1
+#define DRIVER_MINOR	0
+
+#define STI_MAX_FB_HEIGHT	4096
+#define STI_MAX_FB_WIDTH	4096
+
+static void sti_atomic_schedule(struct sti_private *private,
+				struct drm_atomic_state *state)
+{
+	private->commit.state = state;
+	schedule_work(&private->commit.work);
+}
+
+static void sti_atomic_complete(struct sti_private *private,
+				struct drm_atomic_state *state)
+{
+	struct drm_device *drm = private->drm_dev;
+
+	/*
+	 * Everything below can be run asynchronously without the need to grab
+	 * any modeset locks at all under one condition: It must be guaranteed
+	 * that the asynchronous work has either been cancelled (if the driver
+	 * supports it, which at least requires that the framebuffers get
+	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+	 * before the new state gets committed on the software side with
+	 * drm_atomic_helper_swap_state().
+	 *
+	 * This scheme allows new atomic state updates to be prepared and
+	 * checked in parallel to the asynchronous completion of the previous
+	 * update. Which is important since compositors need to figure out the
+	 * composition of the next frame right after having submitted the
+	 * current layout.
+	 */
+
+	drm_atomic_helper_commit_modeset_disables(drm, state);
+	drm_atomic_helper_commit_planes(drm, state);
+	drm_atomic_helper_commit_modeset_enables(drm, state);
+
+	drm_atomic_helper_wait_for_vblanks(drm, state);
+
+	drm_atomic_helper_cleanup_planes(drm, state);
+	drm_atomic_state_free(state);
+}
+
+static void sti_atomic_work(struct work_struct *work)
+{
+	struct sti_private *private = container_of(work,
+			struct sti_private, commit.work);
+
+	sti_atomic_complete(private, private->commit.state);
+}
+
+static int sti_atomic_commit(struct drm_device *drm,
+			     struct drm_atomic_state *state, bool async)
+{
+	struct sti_private *private = drm->dev_private;
+	int err;
+
+	err = drm_atomic_helper_prepare_planes(drm, state);
+	if (err)
+		return err;
+
+	/* serialize outstanding asynchronous commits */
+	mutex_lock(&private->commit.lock);
+	flush_work(&private->commit.work);
+
+	/*
+	 * This is the point of no return - everything below never fails except
+	 * when the hw goes bonghits. Which means we can commit the new state on
+	 * the software side now.
+	 */
+
+	drm_atomic_helper_swap_state(drm, state);
+
+	if (async)
+		sti_atomic_schedule(private, state);
+	else
+		sti_atomic_complete(private, state);
+
+	mutex_unlock(&private->commit.lock);
+	return 0;
+}
+
+static struct drm_mode_config_funcs sti_mode_config_funcs = {
+	.fb_create = drm_fb_cma_create,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = sti_atomic_commit,
+};
+
+static void sti_mode_config_init(struct drm_device *dev)
+{
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * set max width and height as default value.
+	 * this value would be used to check framebuffer size limitation
+	 * at drm_mode_addfb().
+	 */
+	dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
+	dev->mode_config.max_height = STI_MAX_FB_WIDTH;
+
+	dev->mode_config.funcs = &sti_mode_config_funcs;
+}
+
+static int sti_load(struct drm_device *dev, unsigned long flags)
+{
+	struct sti_private *private;
+	int ret;
+
+	private = kzalloc(sizeof(*private), GFP_KERNEL);
+	if (!private) {
+		DRM_ERROR("Failed to allocate private\n");
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)private;
+	private->drm_dev = dev;
+
+	mutex_init(&private->commit.lock);
+	INIT_WORK(&private->commit.work, sti_atomic_work);
+
+	drm_mode_config_init(dev);
+	drm_kms_helper_poll_init(dev);
+
+	sti_mode_config_init(dev);
+
+	ret = component_bind_all(dev->dev, dev);
+	if (ret) {
+		drm_kms_helper_poll_fini(dev);
+		drm_mode_config_cleanup(dev);
+		kfree(private);
+		return ret;
+	}
+
+	drm_mode_config_reset(dev);
+
+#ifdef CONFIG_DRM_STI_FBDEV
+	drm_fbdev_cma_init(dev, 32,
+			   dev->mode_config.num_crtc,
+			   dev->mode_config.num_connector);
+#endif
+	return 0;
+}
+
+static const struct file_operations sti_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.mmap = drm_gem_cma_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+	.unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.release = drm_release,
+};
+
+static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
+					    struct drm_gem_object *obj,
+					    int flags)
+{
+	/* we want to be able to write in mmapped buffer */
+	flags |= O_RDWR;
+	return drm_gem_prime_export(dev, obj, flags);
+}
+
+static struct drm_driver sti_driver = {
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+	    DRIVER_GEM | DRIVER_PRIME,
+	.load = sti_load,
+	.gem_free_object = drm_gem_cma_free_object,
+	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.dumb_create = drm_gem_cma_dumb_create,
+	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+	.fops = &sti_driver_fops,
+
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = sti_crtc_enable_vblank,
+	.disable_vblank = sti_crtc_disable_vblank,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = sti_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+	return dev->of_node == data;
+}
+
+static int sti_bind(struct device *dev)
+{
+	return drm_platform_init(&sti_driver, to_platform_device(dev));
+}
+
+static void sti_unbind(struct device *dev)
+{
+	drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops sti_ops = {
+	.bind = sti_bind,
+	.unbind = sti_unbind,
+};
+
+static int sti_platform_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct device_node *child_np;
+	struct component_match *match = NULL;
+
+	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+	of_platform_populate(node, NULL, NULL, dev);
+
+	child_np = of_get_next_available_child(node, NULL);
+
+	while (child_np) {
+		component_match_add(dev, &match, compare_of, child_np);
+		of_node_put(child_np);
+		child_np = of_get_next_available_child(node, child_np);
+	}
+
+	return component_master_add_with_match(dev, &sti_ops, match);
+}
+
+static int sti_platform_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &sti_ops);
+	of_platform_depopulate(&pdev->dev);
+
+	return 0;
+}
+
+static const struct of_device_id sti_dt_ids[] = {
+	{ .compatible = "st,sti-display-subsystem", },
+	{ /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, sti_dt_ids);
+
+static struct platform_driver sti_platform_driver = {
+	.probe = sti_platform_probe,
+	.remove = sti_platform_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = sti_dt_ids,
+	},
+};
+
+module_platform_driver(sti_platform_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
new file mode 100644
index 0000000..9372f69
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRV_H_
+#define _STI_DRV_H_
+
+#include <drm/drmP.h>
+
+struct sti_compositor;
+struct sti_tvout;
+
+/**
+ * STI drm private structure
+ * This structure is stored as private in the drm_device
+ *
+ * @compo:                 compositor
+ * @plane_zorder_property: z-order property for CRTC planes
+ * @drm_dev:               drm device
+ */
+struct sti_private {
+	struct sti_compositor *compo;
+	struct drm_property *plane_zorder_property;
+	struct drm_device *drm_dev;
+
+	struct {
+		struct drm_atomic_state *state;
+		struct work_struct work;
+		struct mutex lock;
+	} commit;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906f..9365670 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -9,9 +9,12 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
 #include "sti_compositor.h"
 #include "sti_gdp.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 #define ALPHASWITCH     BIT(6)
@@ -26,7 +29,7 @@
 #define GDP_XBGR8888    (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
 #define GDP_ARGB8565    0x04
 #define GDP_ARGB8888    0x05
-#define GDP_ABGR8888	(GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
+#define GDP_ABGR8888    (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
 #define GDP_ARGB1555    0x06
 #define GDP_ARGB4444    0x07
 #define GDP_CLUT8       0x0B
@@ -53,8 +56,8 @@
 #define GAM_GDP_PPT_IGNORE      (BIT(1) | BIT(0))
 #define GAM_GDP_SIZE_MAX        0x7FF
 
-#define GDP_NODE_NB_BANK	2
-#define GDP_NODE_PER_FIELD	2
+#define GDP_NODE_NB_BANK        2
+#define GDP_NODE_PER_FIELD      2
 
 struct sti_gdp_node {
 	u32 gam_gdp_ctl;
@@ -85,16 +88,20 @@
 /**
  * STI GDP structure
  *
- * @layer:		layer structure
+ * @sti_plane:          sti_plane structure
+ * @dev:                driver device
+ * @regs:               gdp registers
  * @clk_pix:            pixel clock for the current gdp
  * @clk_main_parent:    gdp parent clock if main path used
  * @clk_aux_parent:     gdp parent clock if aux path used
  * @vtg_field_nb:       callback for VTG FIELD (top or bottom) notification
  * @is_curr_top:        true if the current node processed is the top field
- * @node_list:		array of node list
+ * @node_list:          array of node list
  */
 struct sti_gdp {
-	struct sti_layer layer;
+	struct sti_plane plane;
+	struct device *dev;
+	void __iomem *regs;
 	struct clk *clk_pix;
 	struct clk *clk_main_parent;
 	struct clk *clk_aux_parent;
@@ -103,7 +110,7 @@
 	struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
 };
 
-#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
 
 static const uint32_t gdp_supported_formats[] = {
 	DRM_FORMAT_XRGB8888,
@@ -120,16 +127,6 @@
 	DRM_FORMAT_C8,
 };
 
-static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
-{
-	return gdp_supported_formats;
-}
-
-static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(gdp_supported_formats);
-}
-
 static int sti_gdp_fourcc2format(int fourcc)
 {
 	switch (fourcc) {
@@ -175,20 +172,19 @@
 
 /**
  * sti_gdp_get_free_nodes
- * @layer: gdp layer
+ * @gdp: gdp pointer
  *
  * Look for a GDP node list that is not currently read by the HW.
  *
  * RETURNS:
  * Pointer to the free GDP node list
  */
-static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
 {
 	int hw_nvn;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
 	unsigned int i;
 
-	hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+	hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
 	if (!hw_nvn)
 		goto end;
 
@@ -199,7 +195,7 @@
 
 	/* in hazardious cases restart with the first node */
 	DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
-			sti_layer_to_str(layer), hw_nvn);
+			sti_plane_to_str(&gdp->plane), hw_nvn);
 
 end:
 	return &gdp->node_list[0];
@@ -207,7 +203,7 @@
 
 /**
  * sti_gdp_get_current_nodes
- * @layer: GDP layer
+ * @gdp: gdp pointer
  *
  * Look for GDP nodes that are currently read by the HW.
  *
@@ -215,13 +211,12 @@
  * Pointer to the current GDP node list
  */
 static
-struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
 {
 	int hw_nvn;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
 	unsigned int i;
 
-	hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+	hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
 	if (!hw_nvn)
 		goto end;
 
@@ -232,205 +227,25 @@
 
 end:
 	DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
-				hw_nvn, sti_layer_to_str(layer));
+				hw_nvn, sti_plane_to_str(&gdp->plane));
 
 	return NULL;
 }
 
 /**
- * sti_gdp_prepare_layer
- * @lay: gdp layer
- * @first_prepare: true if it is the first time this function is called
- *
- * Update the free GDP node list according to the layer properties.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
-	struct sti_gdp_node_list *list;
-	struct sti_gdp_node *top_field, *btm_field;
-	struct drm_display_mode *mode = layer->mode;
-	struct device *dev = layer->dev;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	int format;
-	unsigned int depth, bpp;
-	int rate = mode->clock * 1000;
-	int res;
-	u32 ydo, xdo, yds, xds;
-
-	list = sti_gdp_get_free_nodes(layer);
-	top_field = list->top_field;
-	btm_field = list->btm_field;
-
-	dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
-			sti_layer_to_str(layer), top_field, btm_field);
-
-	/* Build the top field from layer params */
-	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
-	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
-	format = sti_gdp_fourcc2format(layer->format);
-	if (format == -1) {
-		DRM_ERROR("Format not supported by GDP %.4s\n",
-			  (char *)&layer->format);
-		return 1;
-	}
-	top_field->gam_gdp_ctl |= format;
-	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
-	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
-
-	/* pixel memory location */
-	drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
-	top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
-	top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
-	top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
-
-	/* input parameters */
-	top_field->gam_gdp_pmp = layer->pitches[0];
-	top_field->gam_gdp_size =
-	    clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
-	    clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
-
-	/* output parameters */
-	ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
-	yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
-	xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
-	xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
-	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
-	top_field->gam_gdp_vps = (yds << 16) | xds;
-
-	/* Same content and chained together */
-	memcpy(btm_field, top_field, sizeof(*btm_field));
-	top_field->gam_gdp_nvn = list->btm_field_paddr;
-	btm_field->gam_gdp_nvn = list->top_field_paddr;
-
-	/* Interlaced mode */
-	if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
-		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
-		    layer->pitches[0];
-
-	if (first_prepare) {
-		/* Register gdp callback */
-		if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
-				compo->vtg_main : compo->vtg_aux,
-				&gdp->vtg_field_nb, layer->mixer_id)) {
-			DRM_ERROR("Cannot register VTG notifier\n");
-			return 1;
-		}
-
-		/* Set and enable gdp clock */
-		if (gdp->clk_pix) {
-			struct clk *clkp;
-			/* According to the mixer used, the gdp pixel clock
-			 * should have a different parent clock. */
-			if (layer->mixer_id == STI_MIXER_MAIN)
-				clkp = gdp->clk_main_parent;
-			else
-				clkp = gdp->clk_aux_parent;
-
-			if (clkp)
-				clk_set_parent(gdp->clk_pix, clkp);
-
-			res = clk_set_rate(gdp->clk_pix, rate);
-			if (res < 0) {
-				DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
-						rate);
-				return 1;
-			}
-
-			if (clk_prepare_enable(gdp->clk_pix)) {
-				DRM_ERROR("Failed to prepare/enable gdp\n");
-				return 1;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * sti_gdp_commit_layer
- * @lay: gdp layer
- *
- * Update the NVN field of the 'right' field of the current GDP node (being
- * used by the HW) with the address of the updated ('free') top field GDP node.
- * - In interlaced mode the 'right' field is the bottom field as we update
- *   frames starting from their top field
- * - In progressive mode, we update both bottom and top fields which are
- *   equal nodes.
- * At the next VSYNC, the updated node list will be used by the HW.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_commit_layer(struct sti_layer *layer)
-{
-	struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
-	struct sti_gdp_node *updated_top_node = updated_list->top_field;
-	struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	u32 dma_updated_top = updated_list->top_field_paddr;
-	u32 dma_updated_btm = updated_list->btm_field_paddr;
-	struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
-
-	dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
-			sti_layer_to_str(layer),
-			updated_top_node, updated_btm_node);
-	dev_dbg(layer->dev, "Current NVN:0x%X\n",
-		readl(layer->regs + GAM_GDP_NVN_OFFSET));
-	dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
-		(unsigned long)layer->paddr,
-		readl(layer->regs + GAM_GDP_PML_OFFSET));
-
-	if (curr_list == NULL) {
-		/* First update or invalid node should directly write in the
-		 * hw register */
-		DRM_DEBUG_DRIVER("%s first update (or invalid node)",
-				sti_layer_to_str(layer));
-
-		writel(gdp->is_curr_top == true ?
-				dma_updated_btm : dma_updated_top,
-				layer->regs + GAM_GDP_NVN_OFFSET);
-		return 0;
-	}
-
-	if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		if (gdp->is_curr_top == true) {
-			/* Do not update in the middle of the frame, but
-			 * postpone the update after the bottom field has
-			 * been displayed */
-			curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
-		} else {
-			/* Direct update to avoid one frame delay */
-			writel(dma_updated_top,
-				layer->regs + GAM_GDP_NVN_OFFSET);
-		}
-	} else {
-		/* Direct update for progressive to avoid one frame delay */
-		writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
-	}
-
-	return 0;
-}
-
-/**
- * sti_gdp_disable_layer
- * @lay: gdp layer
+ * sti_gdp_disable
+ * @gdp: gdp pointer
  *
  * Disable a GDP.
- *
- * RETURNS:
- * 0 on success.
  */
-static int sti_gdp_disable_layer(struct sti_layer *layer)
+static void sti_gdp_disable(struct sti_gdp *gdp)
 {
+	struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
 	unsigned int i;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct sti_compositor *compo = dev_get_drvdata(layer->dev);
 
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
 
 	/* Set the nodes as 'to be ignored on mixer' */
 	for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,14 +253,14 @@
 		gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
 	}
 
-	if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+	if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
 			compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	if (gdp->clk_pix)
 		clk_disable_unprepare(gdp->clk_pix);
 
-	return 0;
+	gdp->plane.status = STI_PLANE_DISABLED;
 }
 
 /**
@@ -464,6 +279,14 @@
 {
 	struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
 
+	if (gdp->plane.status == STI_PLANE_FLUSHING) {
+		/* disable need to be synchronize on vsync event */
+		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+				 sti_plane_to_str(&gdp->plane));
+
+		sti_gdp_disable(gdp);
+	}
+
 	switch (event) {
 	case VTG_TOP_FIELD_EVENT:
 		gdp->is_curr_top = true;
@@ -479,10 +302,9 @@
 	return 0;
 }
 
-static void sti_gdp_init(struct sti_layer *layer)
+static void sti_gdp_init(struct sti_gdp *gdp)
 {
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct device_node *np = layer->dev->of_node;
+	struct device_node *np = gdp->dev->of_node;
 	dma_addr_t dma_addr;
 	void *base;
 	unsigned int i, size;
@@ -490,8 +312,8 @@
 	/* Allocate all the nodes within a single memory page */
 	size = sizeof(struct sti_gdp_node) *
 	    GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
-	base = dma_alloc_writecombine(layer->dev,
-			size, &dma_addr, GFP_KERNEL | GFP_DMA);
+	base = dma_alloc_writecombine(gdp->dev,
+				      size, &dma_addr, GFP_KERNEL | GFP_DMA);
 
 	if (!base) {
 		DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +348,7 @@
 		/* GDP of STiH407 chip have its own pixel clock */
 		char *clk_name;
 
-		switch (layer->desc) {
+		switch (gdp->plane.desc) {
 		case STI_GDP_0:
 			clk_name = "pix_gdp1";
 			break;
@@ -544,32 +366,249 @@
 			return;
 		}
 
-		gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+		gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
 		if (IS_ERR(gdp->clk_pix))
 			DRM_ERROR("Cannot get %s clock\n", clk_name);
 
-		gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+		gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
 		if (IS_ERR(gdp->clk_main_parent))
 			DRM_ERROR("Cannot get main_parent clock\n");
 
-		gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+		gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
 		if (IS_ERR(gdp->clk_aux_parent))
 			DRM_ERROR("Cannot get aux_parent clock\n");
 	}
 }
 
-static const struct sti_layer_funcs gdp_ops = {
-	.get_formats = sti_gdp_get_formats,
-	.get_nb_formats = sti_gdp_get_nb_formats,
-	.init = sti_gdp_init,
-	.prepare = sti_gdp_prepare_layer,
-	.commit = sti_gdp_commit_layer,
-	.disable = sti_gdp_disable_layer,
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+				  struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_gdp *gdp = to_sti_gdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
+	struct drm_framebuffer *fb =  state->fb;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct sti_mixer *mixer;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y, dst_w, dst_h;
+	int src_x, src_y, src_w, src_h;
+	struct drm_gem_cma_object *cma_obj;
+	struct sti_gdp_node_list *list;
+	struct sti_gdp_node_list *curr_list;
+	struct sti_gdp_node *top_field, *btm_field;
+	u32 dma_updated_top;
+	u32 dma_updated_btm;
+	int format;
+	unsigned int depth, bpp;
+	u32 ydo, xdo, yds, xds;
+	int res;
+
+	/* Manage the case where crtc is null (disabled) */
+	if (!crtc)
+		return;
+
+	mixer = to_sti_mixer(crtc);
+	mode = &crtc->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	src_x = state->src_x >> 16;
+	src_y = state->src_y >> 16;
+	src_w = state->src_w >> 16;
+	src_h = state->src_h >> 16;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	list = sti_gdp_get_free_nodes(gdp);
+	top_field = list->top_field;
+	btm_field = list->btm_field;
+
+	dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+		sti_plane_to_str(plane), top_field, btm_field);
+
+	/* build the top field */
+	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+	format = sti_gdp_fourcc2format(fb->pixel_format);
+	if (format == -1) {
+		DRM_ERROR("Format not supported by GDP %.4s\n",
+			  (char *)&fb->pixel_format);
+		return;
+	}
+	top_field->gam_gdp_ctl |= format;
+	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+			 (char *)&fb->pixel_format,
+			 (unsigned long)cma_obj->paddr);
+
+	/* pixel memory location */
+	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+	top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+	top_field->gam_gdp_pml += src_x * (bpp >> 3);
+	top_field->gam_gdp_pml += src_y * fb->pitches[0];
+
+	/* input parameters */
+	top_field->gam_gdp_pmp = fb->pitches[0];
+	top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+				  clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
+
+	/* output parameters */
+	ydo = sti_vtg_get_line_number(*mode, dst_y);
+	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
+	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+	top_field->gam_gdp_vps = (yds << 16) | xds;
+
+	/* Same content and chained together */
+	memcpy(btm_field, top_field, sizeof(*btm_field));
+	top_field->gam_gdp_nvn = list->btm_field_paddr;
+	btm_field->gam_gdp_nvn = list->top_field_paddr;
+
+	/* Interlaced mode */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+					 fb->pitches[0];
+
+	if (first_prepare) {
+		/* Register gdp callback */
+		if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
+				compo->vtg_main : compo->vtg_aux,
+				&gdp->vtg_field_nb, mixer->id)) {
+			DRM_ERROR("Cannot register VTG notifier\n");
+			return;
+		}
+
+		/* Set and enable gdp clock */
+		if (gdp->clk_pix) {
+			struct clk *clkp;
+			int rate = mode->clock * 1000;
+
+			/* According to the mixer used, the gdp pixel clock
+			 * should have a different parent clock. */
+			if (mixer->id == STI_MIXER_MAIN)
+				clkp = gdp->clk_main_parent;
+			else
+				clkp = gdp->clk_aux_parent;
+
+			if (clkp)
+				clk_set_parent(gdp->clk_pix, clkp);
+
+			res = clk_set_rate(gdp->clk_pix, rate);
+			if (res < 0) {
+				DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+					  rate);
+				return;
+			}
+
+			if (clk_prepare_enable(gdp->clk_pix)) {
+				DRM_ERROR("Failed to prepare/enable gdp\n");
+				return;
+			}
+		}
+	}
+
+	/* Update the NVN field of the 'right' field of the current GDP node
+	 * (being used by the HW) with the address of the updated ('free') top
+	 * field GDP node.
+	 * - In interlaced mode the 'right' field is the bottom field as we
+	 *   update frames starting from their top field
+	 * - In progressive mode, we update both bottom and top fields which
+	 *   are equal nodes.
+	 * At the next VSYNC, the updated node list will be used by the HW.
+	 */
+	curr_list = sti_gdp_get_current_nodes(gdp);
+	dma_updated_top = list->top_field_paddr;
+	dma_updated_btm = list->btm_field_paddr;
+
+	dev_dbg(gdp->dev, "Current NVN:0x%X\n",
+		readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+	dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
+		(unsigned long)cma_obj->paddr,
+		readl(gdp->regs + GAM_GDP_PML_OFFSET));
+
+	if (!curr_list) {
+		/* First update or invalid node should directly write in the
+		 * hw register */
+		DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+				 sti_plane_to_str(plane));
+
+		writel(gdp->is_curr_top ?
+				dma_updated_btm : dma_updated_top,
+				gdp->regs + GAM_GDP_NVN_OFFSET);
+		goto end;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (gdp->is_curr_top) {
+			/* Do not update in the middle of the frame, but
+			 * postpone the update after the bottom field has
+			 * been displayed */
+			curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+		} else {
+			/* Direct update to avoid one frame delay */
+			writel(dma_updated_top,
+			       gdp->regs + GAM_GDP_NVN_OFFSET);
+		}
+	} else {
+		/* Direct update for progressive to avoid one frame delay */
+		writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
+	}
+
+end:
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
+				   struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+	.atomic_update = sti_gdp_atomic_update,
+	.atomic_disable = sti_gdp_atomic_disable,
 };
 
-struct sti_layer *sti_gdp_create(struct device *dev, int id)
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+				 struct device *dev, int desc,
+				 void __iomem *baseaddr,
+				 unsigned int possible_crtcs,
+				 enum drm_plane_type type)
 {
 	struct sti_gdp *gdp;
+	int res;
 
 	gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
 	if (!gdp) {
@@ -577,8 +616,33 @@
 		return NULL;
 	}
 
-	gdp->layer.ops = &gdp_ops;
+	gdp->dev = dev;
+	gdp->regs = baseaddr;
+	gdp->plane.desc = desc;
+	gdp->plane.status = STI_PLANE_DISABLED;
+
 	gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
 
-	return (struct sti_layer *)gdp;
+	sti_gdp_init(gdp);
+
+	res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
+				       possible_crtcs,
+				       &sti_plane_helpers_funcs,
+				       gdp_supported_formats,
+				       ARRAY_SIZE(gdp_supported_formats),
+				       type);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		goto err;
+	}
+
+	drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
+
+	sti_plane_init_property(&gdp->plane, type);
+
+	return &gdp->plane.drm_plane;
+
+err:
+	devm_kfree(dev, gdp);
+	return NULL;
 }
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab682..73947a4 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,9 @@
 
 #include <linux/types.h>
 
-struct sti_layer *sti_gdp_create(struct device *dev, int id);
-
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+				 struct device *dev, int desc,
+				 void __iomem *baseaddr,
+				 unsigned int possible_crtcs,
+				 enum drm_plane_type type);
 #endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f28a4d5..09e29e4 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@
 	return count;
 
 fail:
-	DRM_ERROR("Can not read HDMI EDID\n");
+	DRM_ERROR("Can't read HDMI EDID\n");
 	return 0;
 }
 
@@ -693,21 +693,8 @@
 	struct sti_hdmi_connector *connector;
 	struct drm_connector *drm_connector;
 	struct drm_bridge *bridge;
-	struct device_node *ddc;
 	int err;
 
-	ddc = of_parse_phandle(dev->of_node, "ddc", 0);
-	if (ddc) {
-		hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
-		if (!hdmi->ddc_adapt) {
-			err = -EPROBE_DEFER;
-			of_node_put(ddc);
-			return err;
-		}
-
-		of_node_put(ddc);
-	}
-
 	/* Set the drm device handle */
 	hdmi->drm_dev = drm_dev;
 
@@ -796,6 +783,7 @@
 	struct sti_hdmi *hdmi;
 	struct device_node *np = dev->of_node;
 	struct resource *res;
+	struct device_node *ddc;
 	int ret;
 
 	DRM_INFO("%s\n", __func__);
@@ -804,6 +792,17 @@
 	if (!hdmi)
 		return -ENOMEM;
 
+	ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
+	if (ddc) {
+		hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+		if (!hdmi->ddc_adapt) {
+			of_node_put(ddc);
+			return -EPROBE_DEFER;
+		}
+
+		of_node_put(ddc);
+	}
+
 	hdmi->dev = pdev->dev;
 
 	/* Get resources */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62d..7c8f9b8 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -12,11 +12,12 @@
 #include <linux/reset.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 
-#include "sti_drm_plane.h"
-#include "sti_hqvdp.h"
+#include "sti_compositor.h"
 #include "sti_hqvdp_lut.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 /* Firmware name */
@@ -322,8 +323,7 @@
  * @dev:               driver device
  * @drm_dev:           the drm device
  * @regs:              registers
- * @layer:             layer structure for hqvdp it self
- * @vid_plane:         VID plug used as link with compositor IP
+ * @plane:             plane structure for hqvdp it self
  * @clk:               IP clock
  * @clk_pix_main:      pix main clock
  * @reset:             reset control
@@ -334,13 +334,13 @@
  * @hqvdp_cmd:         buffer of commands
  * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
  * @vtg:               vtg for main data path
+ * @xp70_initialized:  true if xp70 is already initialized
  */
 struct sti_hqvdp {
 	struct device *dev;
 	struct drm_device *drm_dev;
 	void __iomem *regs;
-	struct sti_layer layer;
-	struct drm_plane *vid_plane;
+	struct sti_plane plane;
 	struct clk *clk;
 	struct clk *clk_pix_main;
 	struct reset_control *reset;
@@ -351,24 +351,15 @@
 	void *hqvdp_cmd;
 	dma_addr_t hqvdp_cmd_paddr;
 	struct sti_vtg *vtg;
+	bool xp70_initialized;
 };
 
-#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
 
 static const uint32_t hqvdp_supported_formats[] = {
 	DRM_FORMAT_NV12,
 };
 
-static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
-{
-	return hqvdp_supported_formats;
-}
-
-static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(hqvdp_supported_formats);
-}
-
 /**
  * sti_hqvdp_get_free_cmd
  * @hqvdp: hqvdp structure
@@ -484,7 +475,12 @@
 
 /**
  * sti_hqvdp_check_hw_scaling
- * @layer: hqvdp layer
+ * @hqvdp: hqvdp pointer
+ * @mode: display mode with timing constraints
+ * @src_w: source width
+ * @src_h: source height
+ * @dst_w: destination width
+ * @dst_h: destination height
  *
  * Check if the HW is able to perform the scaling request
  * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,184 +494,36 @@
  * RETURNS:
  * True if the HW can scale.
  */
-static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
+				       struct drm_display_mode *mode,
+				       int src_w, int src_h,
+				       int dst_w, int dst_h)
 {
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	unsigned long lfw;
 	unsigned int inv_zy;
 
-	lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
-	lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+	lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+	lfw /= max(src_w, dst_w) * mode->clock / 1000;
 
-	inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+	inv_zy = DIV_ROUND_UP(src_h, dst_h);
 
 	return (inv_zy <= lfw) ? true : false;
 }
 
 /**
- * sti_hqvdp_prepare_layer
- * @layer: hqvdp layer
- * @first_prepare: true if it is the first time this function is called
+ * sti_hqvdp_disable
+ * @hqvdp: hqvdp pointer
  *
- * Prepares a command for the firmware
- *
- * RETURNS:
- * 0 on success.
+ * Disables the HQVDP plane
  */
-static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
 {
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
-	struct sti_hqvdp_cmd *cmd;
-	int scale_h, scale_v;
-	int cmd_offset;
-
-	dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	/* prepare and commit VID plane */
-	hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
-					layer->crtc, layer->fb,
-					layer->dst_x, layer->dst_y,
-					layer->dst_w, layer->dst_h,
-					layer->src_x, layer->src_y,
-					layer->src_w, layer->src_h);
-
-	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
-	if (cmd_offset == -1) {
-		DRM_ERROR("No available hqvdp_cmd now\n");
-		return -EBUSY;
-	}
-	cmd = hqvdp->hqvdp_cmd + cmd_offset;
-
-	if (!sti_hqvdp_check_hw_scaling(layer)) {
-		DRM_ERROR("Scaling beyond HW capabilities\n");
-		return -EINVAL;
-	}
-
-	/* Static parameters, defaulting to progressive mode */
-	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
-	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
-	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
-	cmd->csdi.config = CSDI_CONFIG_PROG;
-
-	/* VC1RE, FMD bypassed : keep everything set to 0
-	 * IQI/P2I bypassed */
-	cmd->iqi.config = IQI_CONFIG_DFLT;
-	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
-	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
-	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
-
-	/* Buffer planes address */
-	cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
-	cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
-
-	/* Pitches */
-	cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
-			layer->pitches[0];
-	cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
-			layer->pitches[1];
-
-	/* Input / output size
-	 * Align to upper even value */
-	layer->dst_w = ALIGN(layer->dst_w, 2);
-	layer->dst_h = ALIGN(layer->dst_h, 2);
-
-	if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
-	    (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
-	    (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
-	    (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
-		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
-				layer->src_w, layer->src_h,
-				layer->dst_w, layer->dst_h);
-		return -EINVAL;
-	}
-	cmd->top.input_viewport_size = cmd->top.input_frame_size =
-			layer->src_h << 16 | layer->src_w;
-	cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
-	cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
-
-	/* Handle interlaced */
-	if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
-		/* Top field to display */
-		cmd->top.config = TOP_CONFIG_INTER_TOP;
-
-		/* Update pitches and vert size */
-		cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
-					     layer->src_w;
-		cmd->top.luma_processed_pitch *= 2;
-		cmd->top.luma_src_pitch *= 2;
-		cmd->top.chroma_processed_pitch *= 2;
-		cmd->top.chroma_src_pitch *= 2;
-
-		/* Enable directional deinterlacing processing */
-		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
-		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
-		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
-	}
-
-	/* Update hvsrc lut coef */
-	scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
-	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
-
-	scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
-	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
-
-	if (first_prepare) {
-		/* Prevent VTG shutdown */
-		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
-			DRM_ERROR("Failed to prepare/enable pix main clk\n");
-			return -ENXIO;
-		}
-
-		/* Register VTG Vsync callback to handle bottom fields */
-		if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
-				sti_vtg_register_client(hqvdp->vtg,
-					&hqvdp->vtg_nb, layer->mixer_id)) {
-			DRM_ERROR("Cannot register VTG notifier\n");
-			return -ENXIO;
-		}
-	}
-
-	return 0;
-}
-
-static int sti_hqvdp_commit_layer(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
-	int cmd_offset;
-
-	dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
-	if (cmd_offset == -1) {
-		DRM_ERROR("No available hqvdp_cmd now\n");
-		return -EBUSY;
-	}
-
-	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
-			hqvdp->regs + HQVDP_MBX_NEXT_CMD);
-
-	hqvdp->curr_field_count++;
-
-	/* Interlaced : get ready to display the bottom field at next Vsync */
-	if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
-		hqvdp->btm_field_pending = true;
-
-	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
-			__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
-
-	return 0;
-}
-
-static int sti_hqvdp_disable_layer(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	int i;
 
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
 
 	/* Unregister VTG Vsync callback */
-	if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
-		sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+	if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	/* Set next cmd to NULL */
@@ -691,15 +539,10 @@
 	/* VTG can stop now */
 	clk_disable_unprepare(hqvdp->clk_pix_main);
 
-	if (i == POLL_MAX_ATTEMPT) {
+	if (i == POLL_MAX_ATTEMPT)
 		DRM_ERROR("XP70 could not revert to idle\n");
-		return -ENXIO;
-	}
 
-	/* disable VID plane */
-	hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
-
-	return 0;
+	hqvdp->plane.status = STI_PLANE_DISABLED;
 }
 
 /**
@@ -724,6 +567,14 @@
 		return 0;
 	}
 
+	if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
+		/* disable need to be synchronize on vsync event */
+		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+				 sti_plane_to_str(&hqvdp->plane));
+
+		sti_hqvdp_disable(hqvdp);
+	}
+
 	if (hqvdp->btm_field_pending) {
 		/* Create the btm field command from the current one */
 		btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@@ -758,32 +609,10 @@
 	return 0;
 }
 
-static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
 {
-	struct drm_plane *plane;
-
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-		struct sti_layer *layer = to_sti_layer(plane);
-
-		if (layer->desc == id)
-			return plane;
-	}
-
-	return NULL;
-}
-
-static void sti_hqvd_init(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	int size;
 
-	/* find the plane macthing with vid 0 */
-	hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
-	if (!hqvdp->vid_plane) {
-		DRM_ERROR("Cannot find Main video layer\n");
-		return;
-	}
-
 	hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
 
 	/* Allocate memory for the VDP commands */
@@ -799,24 +628,213 @@
 	memset(hqvdp->hqvdp_cmd, 0, size);
 }
 
-static const struct sti_layer_funcs hqvdp_ops = {
-	.get_formats = sti_hqvdp_get_formats,
-	.get_nb_formats = sti_hqvdp_get_nb_formats,
-	.init = sti_hqvd_init,
-	.prepare = sti_hqvdp_prepare_layer,
-	.commit = sti_hqvdp_commit_layer,
-	.disable = sti_hqvdp_disable_layer,
+static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+				    struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	int src_x = state->src_x >> 16;
+	int src_y = state->src_y >> 16;
+	int src_w = state->src_w >> 16;
+	int src_h = state->src_h >> 16;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_gem_cma_object *cma_obj;
+	struct sti_hqvdp_cmd *cmd;
+	int scale_h, scale_v;
+	int cmd_offset;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+	if (cmd_offset == -1) {
+		DRM_ERROR("No available hqvdp_cmd now\n");
+		return;
+	}
+	cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+	if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+					src_w, src_h,
+					dst_w, dst_h)) {
+		DRM_ERROR("Scaling beyond HW capabilities\n");
+		return;
+	}
+
+	/* Static parameters, defaulting to progressive mode */
+	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+	cmd->csdi.config = CSDI_CONFIG_PROG;
+
+	/* VC1RE, FMD bypassed : keep everything set to 0
+	 * IQI/P2I bypassed */
+	cmd->iqi.config = IQI_CONFIG_DFLT;
+	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+			 (char *)&fb->pixel_format,
+			 (unsigned long)cma_obj->paddr);
+
+	/* Buffer planes address */
+	cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
+	cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+
+	/* Pitches */
+	cmd->top.luma_processed_pitch = fb->pitches[0];
+	cmd->top.luma_src_pitch = fb->pitches[0];
+	cmd->top.chroma_processed_pitch = fb->pitches[1];
+	cmd->top.chroma_src_pitch = fb->pitches[1];
+
+	/* Input / output size
+	 * Align to upper even value */
+	dst_w = ALIGN(dst_w, 2);
+	dst_h = ALIGN(dst_h, 2);
+
+	if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+	    (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+	    (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+	    (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+			  src_w, src_h,
+			  dst_w, dst_h);
+		return;
+	}
+
+	cmd->top.input_viewport_size = src_h << 16 | src_w;
+	cmd->top.input_frame_size = src_h << 16 | src_w;
+	cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
+	cmd->top.input_viewport_ori = src_y << 16 | src_x;
+
+	/* Handle interlaced */
+	if (fb->flags & DRM_MODE_FB_INTERLACED) {
+		/* Top field to display */
+		cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+		/* Update pitches and vert size */
+		cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
+		cmd->top.luma_processed_pitch *= 2;
+		cmd->top.luma_src_pitch *= 2;
+		cmd->top.chroma_processed_pitch *= 2;
+		cmd->top.chroma_src_pitch *= 2;
+
+		/* Enable directional deinterlacing processing */
+		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+	}
+
+	/* Update hvsrc lut coef */
+	scale_h = SCALE_FACTOR * dst_w / src_w;
+	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+	scale_v = SCALE_FACTOR * dst_h / src_h;
+	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+	if (first_prepare) {
+		/* Prevent VTG shutdown */
+		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+			DRM_ERROR("Failed to prepare/enable pix main clk\n");
+			return;
+		}
+
+		/* Register VTG Vsync callback to handle bottom fields */
+		if (sti_vtg_register_client(hqvdp->vtg,
+					    &hqvdp->vtg_nb,
+					    mixer->id)) {
+			DRM_ERROR("Cannot register VTG notifier\n");
+			return;
+		}
+	}
+
+	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+	       hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+	hqvdp->curr_field_count++;
+
+	/* Interlaced : get ready to display the bottom field at next Vsync */
+	if (fb->flags & DRM_MODE_FB_INTERLACED)
+		hqvdp->btm_field_pending = true;
+
+	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+		__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
+				     struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+	.atomic_update = sti_hqvdp_atomic_update,
+	.atomic_disable = sti_hqvdp_atomic_disable,
 };
 
-struct sti_layer *sti_hqvdp_create(struct device *dev)
+static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
+					  struct device *dev, int desc)
 {
 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+	int res;
 
-	hqvdp->layer.ops = &hqvdp_ops;
+	hqvdp->plane.desc = desc;
+	hqvdp->plane.status = STI_PLANE_DISABLED;
 
-	return &hqvdp->layer;
+	sti_hqvdp_init(hqvdp);
+
+	res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
+				       &sti_plane_helpers_funcs,
+				       hqvdp_supported_formats,
+				       ARRAY_SIZE(hqvdp_supported_formats),
+				       DRM_PLANE_TYPE_OVERLAY);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		return NULL;
+	}
+
+	drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
+
+	sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
+
+	return &hqvdp->plane.drm_plane;
 }
-EXPORT_SYMBOL(sti_hqvdp_create);
 
 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
 {
@@ -859,6 +877,12 @@
 	} *header;
 
 	DRM_DEBUG_DRIVER("\n");
+
+	if (hqvdp->xp70_initialized) {
+		DRM_INFO("HQVDP XP70 already initialized\n");
+		return;
+	}
+
 	/* Check firmware parts */
 	if (!firmware) {
 		DRM_ERROR("Firmware not available\n");
@@ -946,7 +970,10 @@
 	/* Launch Vsync */
 	writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
 
-	DRM_INFO("HQVDP XP70 started\n");
+	DRM_INFO("HQVDP XP70 initialized\n");
+
+	hqvdp->xp70_initialized = true;
+
 out:
 	release_firmware(firmware);
 }
@@ -955,7 +982,7 @@
 {
 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
-	struct sti_layer *layer;
+	struct drm_plane *plane;
 	int err;
 
 	DRM_DEBUG_DRIVER("\n");
@@ -971,13 +998,10 @@
 		return err;
 	}
 
-	layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
-	if (!layer) {
+	/* Create HQVDP plane once xp70 is initialized */
+	plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
+	if (!plane)
 		DRM_ERROR("Can't create HQVDP plane\n");
-		return -ENOMEM;
-	}
-
-	sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0..0000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HQVDP_H_
-#define _STI_HQVDP_H_
-
-struct sti_layer *sti_hqvdp_create(struct device *dev);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f..0000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_cursor.h"
-#include "sti_gdp.h"
-#include "sti_hqvdp.h"
-#include "sti_layer.h"
-#include "sti_vid.h"
-
-const char *sti_layer_to_str(struct sti_layer *layer)
-{
-	switch (layer->desc) {
-	case STI_GDP_0:
-		return "GDP0";
-	case STI_GDP_1:
-		return "GDP1";
-	case STI_GDP_2:
-		return "GDP2";
-	case STI_GDP_3:
-		return "GDP3";
-	case STI_VID_0:
-		return "VID0";
-	case STI_VID_1:
-		return "VID1";
-	case STI_CURSOR:
-		return "CURSOR";
-	case STI_HQVDP_0:
-		return "HQVDP0";
-	default:
-		return "<UNKNOWN LAYER>";
-	}
-}
-EXPORT_SYMBOL(sti_layer_to_str);
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
-				   void __iomem *baseaddr)
-{
-
-	struct sti_layer *layer = NULL;
-
-	switch (desc & STI_LAYER_TYPE_MASK) {
-	case STI_GDP:
-		layer = sti_gdp_create(dev, desc);
-		break;
-	case STI_VID:
-		layer = sti_vid_create(dev);
-		break;
-	case STI_CUR:
-		layer = sti_cursor_create(dev);
-		break;
-	case STI_VDP:
-		layer = sti_hqvdp_create(dev);
-		break;
-	}
-
-	if (!layer) {
-		DRM_ERROR("Failed to create layer\n");
-		return NULL;
-	}
-
-	layer->desc = desc;
-	layer->dev = dev;
-	layer->regs = baseaddr;
-
-	layer->ops->init(layer);
-
-	DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
-
-	return layer;
-}
-EXPORT_SYMBOL(sti_layer_create);
-
-int sti_layer_prepare(struct sti_layer *layer,
-		      struct drm_crtc *crtc,
-		      struct drm_framebuffer *fb,
-		      struct drm_display_mode *mode, int mixer_id,
-		      int dest_x, int dest_y, int dest_w, int dest_h,
-		      int src_x, int src_y, int src_w, int src_h)
-{
-	int ret;
-	unsigned int i;
-	struct drm_gem_cma_object *cma_obj;
-
-	if (!layer || !fb || !mode) {
-		DRM_ERROR("Null fb, layer or mode\n");
-		return 1;
-	}
-
-	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	if (!cma_obj) {
-		DRM_ERROR("Can't get CMA GEM object for fb\n");
-		return 1;
-	}
-
-	layer->crtc = crtc;
-	layer->fb = fb;
-	layer->mode = mode;
-	layer->mixer_id = mixer_id;
-	layer->dst_x = dest_x;
-	layer->dst_y = dest_y;
-	layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
-	layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
-	layer->src_x = src_x;
-	layer->src_y = src_y;
-	layer->src_w = src_w;
-	layer->src_h = src_h;
-	layer->format = fb->pixel_format;
-	layer->vaddr = cma_obj->vaddr;
-	layer->paddr = cma_obj->paddr;
-	for (i = 0; i < 4; i++) {
-		layer->pitches[i] = fb->pitches[i];
-		layer->offsets[i] = fb->offsets[i];
-	}
-
-	DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
-			 sti_layer_to_str(layer),
-			 layer->mixer_id);
-	DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
-			 sti_layer_to_str(layer),
-			 layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
-			 layer->src_w, layer->src_h, layer->src_x,
-			 layer->src_y);
-
-	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
-			 (char *)&layer->format, (unsigned long)layer->paddr);
-
-	if (!layer->ops->prepare)
-		goto err_no_prepare;
-
-	ret = layer->ops->prepare(layer, !layer->enabled);
-	if (!ret)
-		layer->enabled = true;
-
-	return ret;
-
-err_no_prepare:
-	DRM_ERROR("Cannot prepare\n");
-	return 1;
-}
-
-int sti_layer_commit(struct sti_layer *layer)
-{
-	if (!layer)
-		return 1;
-
-	if (!layer->ops->commit)
-		goto err_no_commit;
-
-	return layer->ops->commit(layer);
-
-err_no_commit:
-	DRM_ERROR("Cannot commit\n");
-	return 1;
-}
-
-int sti_layer_disable(struct sti_layer *layer)
-{
-	int ret;
-
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
-	if (!layer)
-		return 1;
-
-	if (!layer->enabled)
-		return 0;
-
-	if (!layer->ops->disable)
-		goto err_no_disable;
-
-	ret = layer->ops->disable(layer);
-	if (!ret)
-		layer->enabled = false;
-	else
-		DRM_ERROR("Disable failed\n");
-
-	return ret;
-
-err_no_disable:
-	DRM_ERROR("Cannot disable\n");
-	return 1;
-}
-
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
-{
-	if (!layer)
-		return NULL;
-
-	if (!layer->ops->get_formats)
-		return NULL;
-
-	return layer->ops->get_formats(layer);
-}
-
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
-{
-	if (!layer)
-		return 0;
-
-	if (!layer->ops->get_nb_formats)
-		return 0;
-
-	return layer->ops->get_nb_formats(layer);
-}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497..0000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_LAYER_H_
-#define _STI_LAYER_H_
-
-#include <drm/drmP.h>
-
-#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
-
-#define STI_LAYER_TYPE_SHIFT 8
-#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
-
-struct sti_layer;
-
-enum sti_layer_type {
-	STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
-	STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
-	STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
-	STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
-	STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
-};
-
-enum sti_layer_id_of_type {
-	STI_ID_0 = 0,
-	STI_ID_1 = 1,
-	STI_ID_2 = 2,
-	STI_ID_3 = 3
-};
-
-enum sti_layer_desc {
-	STI_GDP_0       = STI_GDP | STI_ID_0,
-	STI_GDP_1       = STI_GDP | STI_ID_1,
-	STI_GDP_2       = STI_GDP | STI_ID_2,
-	STI_GDP_3       = STI_GDP | STI_ID_3,
-	STI_VID_0       = STI_VID | STI_ID_0,
-	STI_VID_1       = STI_VID | STI_ID_1,
-	STI_HQVDP_0     = STI_VDP | STI_ID_0,
-	STI_CURSOR      = STI_CUR,
-	STI_BACK        = STI_BCK
-};
-
-/**
- * STI layer functions structure
- *
- * @get_formats:	get layer supported formats
- * @get_nb_formats:	get number of format supported
- * @init:               initialize the layer
- * @prepare:		prepare layer before rendering
- * @commit:		set layer for rendering
- * @disable:		disable layer
- */
-struct sti_layer_funcs {
-	const uint32_t* (*get_formats)(struct sti_layer *layer);
-	unsigned int (*get_nb_formats)(struct sti_layer *layer);
-	void (*init)(struct sti_layer *layer);
-	int (*prepare)(struct sti_layer *layer, bool first_prepare);
-	int (*commit)(struct sti_layer *layer);
-	int (*disable)(struct sti_layer *layer);
-};
-
-/**
- * STI layer structure
- *
- * @plane:              drm plane it is bound to (if any)
- * @fb:                 drm fb it is bound to
- * @crtc:               crtc it is bound to
- * @mode:               display mode
- * @desc:               layer type & id
- * @device:		driver device
- * @regs:		layer registers
- * @ops:                layer functions
- * @zorder:             layer z-order
- * @mixer_id:           id of the mixer used to display the layer
- * @enabled:            to know if the layer is active or not
- * @src_x src_y:        coordinates of the input (fb) area
- * @src_w src_h:        size of the input (fb) area
- * @dst_x dst_y:        coordinates of the output (crtc) area
- * @dst_w dst_h:        size of the output (crtc) area
- * @format:             format
- * @pitches:            pitch of 'planes' (eg: Y, U, V)
- * @offsets:            offset of 'planes'
- * @vaddr:              virtual address of the input buffer
- * @paddr:              physical address of the input buffer
- */
-struct sti_layer {
-	struct drm_plane plane;
-	struct drm_framebuffer *fb;
-	struct drm_crtc *crtc;
-	struct drm_display_mode *mode;
-	enum sti_layer_desc desc;
-	struct device *dev;
-	void __iomem *regs;
-	const struct sti_layer_funcs *ops;
-	int zorder;
-	int mixer_id;
-	bool enabled;
-	int src_x, src_y;
-	int src_w, src_h;
-	int dst_x, dst_y;
-	int dst_w, dst_h;
-	uint32_t format;
-	unsigned int pitches[4];
-	unsigned int offsets[4];
-	void *vaddr;
-	dma_addr_t paddr;
-};
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
-			void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer,
-			struct drm_crtc *crtc,
-			struct drm_framebuffer *fb,
-			struct drm_display_mode *mode,
-			int mixer_id,
-			int dest_x, int dest_y,
-			int dest_w, int dest_h,
-			int src_x, int src_y,
-			int src_w, int src_h);
-int sti_layer_commit(struct sti_layer *layer);
-int sti_layer_disable(struct sti_layer *layer);
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
-const char *sti_layer_to_str(struct sti_layer *layer);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 13a4b84..0182e93 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -58,6 +58,7 @@
 		return "<UNKNOWN MIXER>";
 	}
 }
+EXPORT_SYMBOL(sti_mixer_to_str);
 
 static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
 {
@@ -101,52 +102,57 @@
 	sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
 }
 
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
 {
-	int layer_id = 0, depth = layer->zorder;
+	int plane_id, depth = plane->zorder;
+	unsigned int i;
 	u32 mask, val;
 
-	if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+	if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
 		return 1;
 
-	switch (layer->desc) {
+	switch (plane->desc) {
 	case STI_GDP_0:
-		layer_id = GAM_DEPTH_GDP0_ID;
+		plane_id = GAM_DEPTH_GDP0_ID;
 		break;
 	case STI_GDP_1:
-		layer_id = GAM_DEPTH_GDP1_ID;
+		plane_id = GAM_DEPTH_GDP1_ID;
 		break;
 	case STI_GDP_2:
-		layer_id = GAM_DEPTH_GDP2_ID;
+		plane_id = GAM_DEPTH_GDP2_ID;
 		break;
 	case STI_GDP_3:
-		layer_id = GAM_DEPTH_GDP3_ID;
+		plane_id = GAM_DEPTH_GDP3_ID;
 		break;
-	case STI_VID_0:
 	case STI_HQVDP_0:
-		layer_id = GAM_DEPTH_VID0_ID;
-		break;
-	case STI_VID_1:
-		layer_id = GAM_DEPTH_VID1_ID;
+		plane_id = GAM_DEPTH_VID0_ID;
 		break;
 	case STI_CURSOR:
 		/* no need to set depth for cursor */
 		return 0;
 	default:
-		DRM_ERROR("Unknown layer %d\n", layer->desc);
+		DRM_ERROR("Unknown plane %d\n", plane->desc);
 		return 1;
 	}
-	mask = GAM_DEPTH_MASK_ID << (3 * depth);
-	layer_id = layer_id << (3 * depth);
+
+	/* Search if a previous depth was already assigned to the plane */
+	val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+	for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+		mask = GAM_DEPTH_MASK_ID << (3 * i);
+		if ((val & mask) == plane_id << (3 * i))
+			break;
+	}
+
+	mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
+	plane_id = plane_id << (3 * (depth - 1));
 
 	DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
-			 sti_layer_to_str(layer), depth);
+			 sti_plane_to_str(plane), depth);
 	dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
-		layer_id, mask);
+		plane_id, mask);
 
-	val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
 	val &= ~mask;
-	val |= layer_id;
+	val |= plane_id;
 	sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
 
 	dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -176,9 +182,9 @@
 	return 0;
 }
 
-static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
 {
-	switch (layer->desc) {
+	switch (plane->desc) {
 	case STI_BACK:
 		return GAM_CTL_BACK_MASK;
 	case STI_GDP_0:
@@ -189,11 +195,8 @@
 		return GAM_CTL_GDP2_MASK;
 	case STI_GDP_3:
 		return GAM_CTL_GDP3_MASK;
-	case STI_VID_0:
 	case STI_HQVDP_0:
 		return GAM_CTL_VID0_MASK;
-	case STI_VID_1:
-		return GAM_CTL_VID1_MASK;
 	case STI_CURSOR:
 		return GAM_CTL_CURSOR_MASK;
 	default:
@@ -201,17 +204,17 @@
 	}
 }
 
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
-			       struct sti_layer *layer, bool status)
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+			       struct sti_plane *plane, bool status)
 {
 	u32 mask, val;
 
 	DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
-			 sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+			 sti_mixer_to_str(mixer), sti_plane_to_str(plane));
 
-	mask = sti_mixer_get_layer_mask(layer);
+	mask = sti_mixer_get_plane_mask(plane);
 	if (!mask) {
-		DRM_ERROR("Can not find layer mask\n");
+		DRM_ERROR("Can't find layer mask\n");
 		return -EINVAL;
 	}
 
@@ -223,15 +226,6 @@
 	return 0;
 }
 
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
-{
-	u32 val;
-
-	DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
-	val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
-	sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
-}
-
 void sti_mixer_set_matrix(struct sti_mixer *mixer)
 {
 	unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index b972821..efb1a9a 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,10 +11,16 @@
 
 #include <drm/drmP.h>
 
-#include "sti_layer.h"
+#include "sti_plane.h"
 
 #define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
 
+enum sti_mixer_status {
+	STI_MIXER_READY,
+	STI_MIXER_DISABLING,
+	STI_MIXER_DISABLED,
+};
+
 /**
  * STI Mixer subdevice structure
  *
@@ -23,33 +29,32 @@
  * @id: id of the mixer
  * @drm_crtc: crtc object link to the mixer
  * @pending_event: set if a flip event is pending on crtc
- * @enabled: to know if the mixer is active or not
+ * @status: to know the status of the mixer
  */
 struct sti_mixer {
 	struct device *dev;
 	void __iomem *regs;
 	int id;
-	struct drm_crtc	drm_crtc;
+	struct drm_crtc drm_crtc;
 	struct drm_pending_vblank_event *pending_event;
-	bool enabled;
+	enum sti_mixer_status status;
 };
 
 const char *sti_mixer_to_str(struct sti_mixer *mixer);
 
 struct sti_mixer *sti_mixer_create(struct device *dev, int id,
-		void __iomem *baseaddr);
+				   void __iomem *baseaddr);
 
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
-		struct sti_layer *layer, bool status);
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+			       struct sti_plane *plane, bool status);
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
 int sti_mixer_active_video_area(struct sti_mixer *mixer,
-		struct drm_display_mode *mode);
+				struct drm_display_mode *mode);
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
 
 /* depth in Cross-bar control = z order */
-#define GAM_MIXER_NB_DEPTH_LEVEL 7
+#define GAM_MIXER_NB_DEPTH_LEVEL 6
 
 #define STI_MIXER_MAIN 0
 #define STI_MIXER_AUX  1
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
new file mode 100644
index 0000000..d5c5e91
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *          Fabien Dessenne <fabien.dessenne@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drv.h"
+#include "sti_plane.h"
+
+/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
+enum sti_plane_desc sti_plane_default_zorder[] = {
+	STI_GDP_0,
+	STI_GDP_1,
+	STI_HQVDP_0,
+	STI_GDP_2,
+	STI_GDP_3,
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane)
+{
+	switch (plane->desc) {
+	case STI_GDP_0:
+		return "GDP0";
+	case STI_GDP_1:
+		return "GDP1";
+	case STI_GDP_2:
+		return "GDP2";
+	case STI_GDP_3:
+		return "GDP3";
+	case STI_HQVDP_0:
+		return "HQVDP0";
+	case STI_CURSOR:
+		return "CURSOR";
+	default:
+		return "<UNKNOWN PLANE>";
+	}
+}
+EXPORT_SYMBOL(sti_plane_to_str);
+
+static void sti_plane_destroy(struct drm_plane *drm_plane)
+{
+	DRM_DEBUG_DRIVER("\n");
+
+	drm_plane_helper_disable(drm_plane);
+	drm_plane_cleanup(drm_plane);
+}
+
+static int sti_plane_set_property(struct drm_plane *drm_plane,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	struct drm_device *dev = drm_plane->dev;
+	struct sti_private *private = dev->dev_private;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (property == private->plane_zorder_property) {
+		plane->zorder = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
+{
+	struct drm_device *dev = drm_plane->dev;
+	struct sti_private *private = dev->dev_private;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct drm_property *prop;
+
+	prop = private->plane_zorder_property;
+	if (!prop) {
+		prop = drm_property_create_range(dev, 0, "zpos", 1,
+						 GAM_MIXER_NB_DEPTH_LEVEL);
+		if (!prop)
+			return;
+
+		private->plane_zorder_property = prop;
+	}
+
+	drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
+}
+
+void sti_plane_init_property(struct sti_plane *plane,
+			     enum drm_plane_type type)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
+		if (sti_plane_default_zorder[i] == plane->desc)
+			break;
+
+	plane->zorder = i + 1;
+
+	if (type == DRM_PLANE_TYPE_OVERLAY)
+		sti_plane_attach_zorder_property(&plane->drm_plane);
+
+	DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
+			 plane->drm_plane.base.id,
+			 sti_plane_to_str(plane), plane->zorder);
+}
+EXPORT_SYMBOL(sti_plane_init_property);
+
+struct drm_plane_funcs sti_plane_helpers_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = sti_plane_destroy,
+	.set_property = sti_plane_set_property,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+EXPORT_SYMBOL(sti_plane_helpers_funcs);
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
new file mode 100644
index 0000000..86f1e6f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_PLANE_H_
+#define _STI_PLANE_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+extern struct drm_plane_funcs sti_plane_helpers_funcs;
+
+#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
+
+#define STI_PLANE_TYPE_SHIFT 8
+#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
+
+enum sti_plane_type {
+	STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
+	STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
+	STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
+	STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
+};
+
+enum sti_plane_id_of_type {
+	STI_ID_0 = 0,
+	STI_ID_1 = 1,
+	STI_ID_2 = 2,
+	STI_ID_3 = 3
+};
+
+enum sti_plane_desc {
+	STI_GDP_0       = STI_GDP | STI_ID_0,
+	STI_GDP_1       = STI_GDP | STI_ID_1,
+	STI_GDP_2       = STI_GDP | STI_ID_2,
+	STI_GDP_3       = STI_GDP | STI_ID_3,
+	STI_HQVDP_0     = STI_VDP | STI_ID_0,
+	STI_CURSOR      = STI_CUR,
+	STI_BACK        = STI_BCK
+};
+
+enum sti_plane_status {
+	STI_PLANE_READY,
+	STI_PLANE_UPDATED,
+	STI_PLANE_DISABLING,
+	STI_PLANE_FLUSHING,
+	STI_PLANE_DISABLED,
+};
+
+/**
+ * STI plane structure
+ *
+ * @plane:              drm plane it is bound to (if any)
+ * @desc:               plane type & id
+ * @status:             to know the status of the plane
+ * @zorder:             plane z-order
+ */
+struct sti_plane {
+	struct drm_plane drm_plane;
+	enum sti_plane_desc desc;
+	enum sti_plane_status status;
+	int zorder;
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_init_property(struct sti_plane *plane,
+			     enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5cc5311..c1aac8e 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,7 +16,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
 
 /* glue registers */
 #define TVO_CSC_MAIN_M0                  0x000
@@ -473,7 +473,7 @@
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@@ -523,7 +523,7 @@
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -575,7 +575,7 @@
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -644,7 +644,6 @@
 	struct sti_tvout *tvout = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
 	unsigned int i;
-	int ret;
 
 	tvout->drm_dev = drm_dev;
 
@@ -658,17 +657,15 @@
 
 	sti_tvout_create_encoders(drm_dev, tvout);
 
-	ret = component_bind_all(dev, drm_dev);
-	if (ret)
-		sti_tvout_destroy_encoders(tvout);
-
-	return ret;
+	return 0;
 }
 
 static void sti_tvout_unbind(struct device *dev, struct device *master,
 	void *data)
 {
-	/* do nothing */
+	struct sti_tvout *tvout = dev_get_drvdata(dev);
+
+	sti_tvout_destroy_encoders(tvout);
 }
 
 static const struct component_ops sti_tvout_ops = {
@@ -676,34 +673,12 @@
 	.unbind	= sti_tvout_unbind,
 };
 
-static int compare_of(struct device *dev, void *data)
-{
-	return dev->of_node == data;
-}
-
-static int sti_tvout_master_bind(struct device *dev)
-{
-	return 0;
-}
-
-static void sti_tvout_master_unbind(struct device *dev)
-{
-	/* do nothing */
-}
-
-static const struct component_master_ops sti_tvout_master_ops = {
-	.bind = sti_tvout_master_bind,
-	.unbind = sti_tvout_master_unbind,
-};
-
 static int sti_tvout_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *node = dev->of_node;
 	struct sti_tvout *tvout;
 	struct resource *res;
-	struct device_node *child_np;
-	struct component_match *match = NULL;
 
 	DRM_INFO("%s\n", __func__);
 
@@ -734,24 +709,11 @@
 
 	platform_set_drvdata(pdev, tvout);
 
-	of_platform_populate(node, NULL, NULL, dev);
-
-	child_np = of_get_next_available_child(node, NULL);
-
-	while (child_np) {
-		component_match_add(dev, &match, compare_of, child_np);
-		of_node_put(child_np);
-		child_np = of_get_next_available_child(node, child_np);
-	}
-
-	component_master_add_with_match(dev, &sti_tvout_master_ops, match);
-
 	return component_add(dev, &sti_tvout_ops);
 }
 
 static int sti_tvout_remove(struct platform_device *pdev)
 {
-	component_master_del(&pdev->dev, &sti_tvout_master_ops);
 	component_del(&pdev->dev, &sti_tvout_ops);
 	return 0;
 }
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a..a8254cc 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
 
 #include <drm/drmP.h>
 
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vid.h"
 #include "sti_vtg.h"
 
@@ -43,35 +43,37 @@
 #define VID_MPR2_BT709          0x07150545
 #define VID_MPR3_BT709          0x00000AE8
 
-static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+void sti_vid_commit(struct sti_vid *vid,
+		    struct drm_plane_state *state)
 {
-	u32 val;
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	u32 val, ydo, xdo, yds, xds;
+
+	/* Input / output size
+	 * Align to upper even value */
+	dst_w = ALIGN(dst_w, 2);
+	dst_h = ALIGN(dst_h, 2);
 
 	/* Unmask */
 	val = readl(vid->regs + VID_CTL);
 	val &= ~VID_CTL_IGNORE;
 	writel(val, vid->regs + VID_CTL);
 
-	return 0;
-}
-
-static int sti_vid_commit_layer(struct sti_layer *vid)
-{
-	struct drm_display_mode *mode = vid->mode;
-	u32 ydo, xdo, yds, xds;
-
-	ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
-	yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
-	xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
-	xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+	ydo = sti_vtg_get_line_number(*mode, dst_y);
+	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
 
 	writel((ydo << 16) | xdo, vid->regs + VID_VPO);
 	writel((yds << 16) | xds, vid->regs + VID_VPS);
-
-	return 0;
 }
 
-static int sti_vid_disable_layer(struct sti_layer *vid)
+void sti_vid_disable(struct sti_vid *vid)
 {
 	u32 val;
 
@@ -79,21 +81,9 @@
 	val = readl(vid->regs + VID_CTL);
 	val |= VID_CTL_IGNORE;
 	writel(val, vid->regs + VID_CTL);
-
-	return 0;
 }
 
-static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
-{
-	return NULL;
-}
-
-static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
-{
-	return 0;
-}
-
-static void sti_vid_init(struct sti_layer *vid)
+static void sti_vid_init(struct sti_vid *vid)
 {
 	/* Enable PSI, Mask layer */
 	writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +103,10 @@
 	writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
 }
 
-static const struct sti_layer_funcs vid_ops = {
-	.get_formats = sti_vid_get_formats,
-	.get_nb_formats = sti_vid_get_nb_formats,
-	.init = sti_vid_init,
-	.prepare = sti_vid_prepare_layer,
-	.commit = sti_vid_commit_layer,
-	.disable = sti_vid_disable_layer,
-};
-
-struct sti_layer *sti_vid_create(struct device *dev)
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+			       void __iomem *baseaddr)
 {
-	struct sti_layer *vid;
+	struct sti_vid *vid;
 
 	vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
 	if (!vid) {
@@ -132,7 +114,11 @@
 		return NULL;
 	}
 
-	vid->ops = &vid_ops;
+	vid->dev = dev;
+	vid->regs = baseaddr;
+	vid->id = id;
+
+	sti_vid_init(vid);
 
 	return vid;
 }
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd..5dea479 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,23 @@
 #ifndef _STI_VID_H_
 #define _STI_VID_H_
 
-struct sti_layer *sti_vid_create(struct device *dev);
+/**
+ * STI VID structure
+ *
+ * @dev:   driver device
+ * @regs:  vid registers
+ * @id:    id of the vid
+ */
+struct sti_vid {
+	struct device *dev;
+	void __iomem *regs;
+	int id;
+};
+
+void sti_vid_commit(struct sti_vid *vid,
+		    struct drm_plane_state *state);
+void sti_vid_disable(struct sti_vid *vid);
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+			       void __iomem *baseaddr);
 
 #endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a287e4f..ddefb85 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -76,6 +76,14 @@
 	return NULL;
 }
 
+static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
+{
+	stats->frames = 0;
+	stats->vblank = 0;
+	stats->underflow = 0;
+	stats->overflow = 0;
+}
+
 /*
  * Reads the active copy of a register. This takes the dc->lock spinlock to
  * prevent races with the VBLANK processing which also needs access to the
@@ -759,7 +767,6 @@
 	/* position the cursor */
 	value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
 	tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
 }
 
 static void tegra_cursor_atomic_disable(struct drm_plane *plane,
@@ -809,9 +816,11 @@
 		return ERR_PTR(-ENOMEM);
 
 	/*
-	 * We'll treat the cursor as an overlay plane with index 6 here so
-	 * that the update and activation request bits in DC_CMD_STATE_CONTROL
-	 * match up.
+	 * This index is kind of fake. The cursor isn't a regular plane, but
+	 * its update and activation request bits in DC_CMD_STATE_CONTROL do
+	 * use the same programming. Setting this fake index here allows the
+	 * code in tegra_add_plane_state() to do the right thing without the
+	 * need to special-casing the cursor plane.
 	 */
 	plane->index = 6;
 
@@ -1015,6 +1024,8 @@
 		crtc->state = &state->base;
 		crtc->state->crtc = crtc;
 	}
+
+	drm_crtc_vblank_reset(crtc);
 }
 
 static struct drm_crtc_state *
@@ -1052,90 +1063,6 @@
 	.atomic_destroy_state = tegra_crtc_atomic_destroy_state,
 };
 
-static void tegra_dc_stop(struct tegra_dc *dc)
-{
-	u32 value;
-
-	/* stop the display controller */
-	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
-	value &= ~DISP_CTRL_MODE_MASK;
-	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
-	tegra_dc_commit(dc);
-}
-
-static bool tegra_dc_idle(struct tegra_dc *dc)
-{
-	u32 value;
-
-	value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
-
-	return (value & DISP_CTRL_MODE_MASK) == 0;
-}
-
-static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
-{
-	timeout = jiffies + msecs_to_jiffies(timeout);
-
-	while (time_before(jiffies, timeout)) {
-		if (tegra_dc_idle(dc))
-			return 0;
-
-		usleep_range(1000, 2000);
-	}
-
-	dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
-	return -ETIMEDOUT;
-}
-
-static void tegra_crtc_disable(struct drm_crtc *crtc)
-{
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-	u32 value;
-
-	if (!tegra_dc_idle(dc)) {
-		tegra_dc_stop(dc);
-
-		/*
-		 * Ignore the return value, there isn't anything useful to do
-		 * in case this fails.
-		 */
-		tegra_dc_wait_idle(dc, 100);
-	}
-
-	/*
-	 * This should really be part of the RGB encoder driver, but clearing
-	 * these bits has the side-effect of stopping the display controller.
-	 * When that happens no VBLANK interrupts will be raised. At the same
-	 * time the encoder is disabled before the display controller, so the
-	 * above code is always going to timeout waiting for the controller
-	 * to go idle.
-	 *
-	 * Given the close coupling between the RGB encoder and the display
-	 * controller doing it here is still kind of okay. None of the other
-	 * encoder drivers require these bits to be cleared.
-	 *
-	 * XXX: Perhaps given that the display controller is switched off at
-	 * this point anyway maybe clearing these bits isn't even useful for
-	 * the RGB encoder?
-	 */
-	if (dc->rgb) {
-		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
-		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
-			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
-		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-	}
-
-	drm_crtc_vblank_off(crtc);
-}
-
-static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted)
-{
-	return true;
-}
-
 static int tegra_dc_set_timings(struct tegra_dc *dc,
 				struct drm_display_mode *mode)
 {
@@ -1229,7 +1156,85 @@
 	tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
 }
 
-static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tegra_dc_stop(struct tegra_dc *dc)
+{
+	u32 value;
+
+	/* stop the display controller */
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+	value &= ~DISP_CTRL_MODE_MASK;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	tegra_dc_commit(dc);
+}
+
+static bool tegra_dc_idle(struct tegra_dc *dc)
+{
+	u32 value;
+
+	value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
+
+	return (value & DISP_CTRL_MODE_MASK) == 0;
+}
+
+static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
+{
+	timeout = jiffies + msecs_to_jiffies(timeout);
+
+	while (time_before(jiffies, timeout)) {
+		if (tegra_dc_idle(dc))
+			return 0;
+
+		usleep_range(1000, 2000);
+	}
+
+	dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
+	return -ETIMEDOUT;
+}
+
+static void tegra_crtc_disable(struct drm_crtc *crtc)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	u32 value;
+
+	if (!tegra_dc_idle(dc)) {
+		tegra_dc_stop(dc);
+
+		/*
+		 * Ignore the return value, there isn't anything useful to do
+		 * in case this fails.
+		 */
+		tegra_dc_wait_idle(dc, 100);
+	}
+
+	/*
+	 * This should really be part of the RGB encoder driver, but clearing
+	 * these bits has the side-effect of stopping the display controller.
+	 * When that happens no VBLANK interrupts will be raised. At the same
+	 * time the encoder is disabled before the display controller, so the
+	 * above code is always going to timeout waiting for the controller
+	 * to go idle.
+	 *
+	 * Given the close coupling between the RGB encoder and the display
+	 * controller doing it here is still kind of okay. None of the other
+	 * encoder drivers require these bits to be cleared.
+	 *
+	 * XXX: Perhaps given that the display controller is switched off at
+	 * this point anyway maybe clearing these bits isn't even useful for
+	 * the RGB encoder?
+	 */
+	if (dc->rgb) {
+		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+	}
+
+	tegra_dc_stats_reset(&dc->stats);
+	drm_crtc_vblank_off(crtc);
+}
+
+static void tegra_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 	struct tegra_dc_state *state = to_dc_state(crtc->state);
@@ -1259,15 +1264,7 @@
 	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
 
 	tegra_dc_commit(dc);
-}
 
-static void tegra_crtc_prepare(struct drm_crtc *crtc)
-{
-	drm_crtc_vblank_off(crtc);
-}
-
-static void tegra_crtc_commit(struct drm_crtc *crtc)
-{
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -1277,7 +1274,8 @@
 	return 0;
 }
 
-static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct tegra_dc *dc = to_tegra_dc(crtc);
 
@@ -1291,7 +1289,8 @@
 	}
 }
 
-static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct tegra_dc_state *state = to_dc_state(crtc->state);
 	struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -1302,10 +1301,7 @@
 
 static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
 	.disable = tegra_crtc_disable,
-	.mode_fixup = tegra_crtc_mode_fixup,
-	.mode_set_nofb = tegra_crtc_mode_set_nofb,
-	.prepare = tegra_crtc_prepare,
-	.commit = tegra_crtc_commit,
+	.enable = tegra_crtc_enable,
 	.atomic_check = tegra_crtc_atomic_check,
 	.atomic_begin = tegra_crtc_atomic_begin,
 	.atomic_flush = tegra_crtc_atomic_flush,
@@ -1323,6 +1319,7 @@
 		/*
 		dev_dbg(dc->dev, "%s(): frame end\n", __func__);
 		*/
+		dc->stats.frames++;
 	}
 
 	if (status & VBLANK_INT) {
@@ -1331,12 +1328,21 @@
 		*/
 		drm_crtc_handle_vblank(&dc->base);
 		tegra_dc_finish_page_flip(dc);
+		dc->stats.vblank++;
 	}
 
 	if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
 		/*
 		dev_dbg(dc->dev, "%s(): underflow\n", __func__);
 		*/
+		dc->stats.underflow++;
+	}
+
+	if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
+		/*
+		dev_dbg(dc->dev, "%s(): overflow\n", __func__);
+		*/
+		dc->stats.overflow++;
 	}
 
 	return IRQ_HANDLED;
@@ -1346,6 +1352,14 @@
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_dc *dc = node->info_ent->data;
+	int err = 0;
+
+	drm_modeset_lock_crtc(&dc->base, NULL);
+
+	if (!dc->base.state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-40s %#05x %08x\n", #name, name,	\
@@ -1566,11 +1580,59 @@
 
 #undef DUMP_REG
 
+unlock:
+	drm_modeset_unlock_crtc(&dc->base);
+	return err;
+}
+
+static int tegra_dc_show_crc(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+	int err = 0;
+	u32 value;
+
+	drm_modeset_lock_crtc(&dc->base, NULL);
+
+	if (!dc->base.state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
+	tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
+	tegra_dc_commit(dc);
+
+	drm_crtc_wait_one_vblank(&dc->base);
+	drm_crtc_wait_one_vblank(&dc->base);
+
+	value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
+	seq_printf(s, "%08x\n", value);
+
+	tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
+
+unlock:
+	drm_modeset_unlock_crtc(&dc->base);
+	return err;
+}
+
+static int tegra_dc_show_stats(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+
+	seq_printf(s, "frames: %lu\n", dc->stats.frames);
+	seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
+	seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
+	seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+
 	return 0;
 }
 
 static struct drm_info_list debugfs_files[] = {
 	{ "regs", tegra_dc_show_regs, 0, NULL },
+	{ "crc", tegra_dc_show_crc, 0, NULL },
+	{ "stats", tegra_dc_show_stats, 0, NULL },
 };
 
 static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
@@ -1716,7 +1778,8 @@
 		tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
 	}
 
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
 
 	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
@@ -1732,15 +1795,19 @@
 		WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
 	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
 
-	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
 
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
 
 	if (dc->soc->supports_border_color)
 		tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
 
+	tegra_dc_stats_reset(&dc->stats);
+
 	return 0;
 
 cleanup:
@@ -1826,8 +1893,20 @@
 	.has_powergate = true,
 };
 
+static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
+	.supports_border_color = false,
+	.supports_interlacing = true,
+	.supports_cursor = true,
+	.supports_block_linear = true,
+	.pitch_align = 64,
+	.has_powergate = true,
+};
+
 static const struct of_device_id tegra_dc_of_match[] = {
 	{
+		.compatible = "nvidia,tegra210-dc",
+		.data = &tegra210_dc_soc_info,
+	}, {
 		.compatible = "nvidia,tegra124-dc",
 		.data = &tegra124_dc_soc_info,
 	}, {
@@ -1957,6 +2036,10 @@
 		return -ENXIO;
 	}
 
+	dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+	if (!dc->syncpt)
+		dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
 	INIT_LIST_HEAD(&dc->client.list);
 	dc->client.ops = &dc_client_ops;
 	dc->client.dev = &pdev->dev;
@@ -1974,10 +2057,6 @@
 		return err;
 	}
 
-	dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
-	if (!dc->syncpt)
-		dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
-
 	platform_set_drvdata(pdev, dc);
 
 	return 0;
@@ -2016,7 +2095,6 @@
 struct platform_driver tegra_dc_driver = {
 	.driver = {
 		.name = "tegra-dc",
-		.owner = THIS_MODULE,
 		.of_match_table = tegra_dc_of_match,
 	},
 	.probe = tegra_dc_probe,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 55792da..4a26863 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -86,6 +86,11 @@
 #define DC_CMD_REG_ACT_CONTROL			0x043
 
 #define DC_COM_CRC_CONTROL			0x300
+#define  DC_COM_CRC_CONTROL_ALWAYS (1 << 3)
+#define  DC_COM_CRC_CONTROL_FULL_FRAME  (0 << 2)
+#define  DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2)
+#define  DC_COM_CRC_CONTROL_WAIT (1 << 1)
+#define  DC_COM_CRC_CONTROL_ENABLE (1 << 0)
 #define DC_COM_CRC_CHECKSUM			0x301
 #define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
 #define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
@@ -114,15 +119,17 @@
 #define DC_COM_CRC_CHECKSUM_LATCHED		0x329
 
 #define DC_DISP_DISP_SIGNAL_OPTIONS0		0x400
-#define H_PULSE_0_ENABLE (1 <<  8)
-#define H_PULSE_1_ENABLE (1 << 10)
-#define H_PULSE_2_ENABLE (1 << 12)
+#define H_PULSE0_ENABLE (1 <<  8)
+#define H_PULSE1_ENABLE (1 << 10)
+#define H_PULSE2_ENABLE (1 << 12)
 
 #define DC_DISP_DISP_SIGNAL_OPTIONS1		0x401
 
 #define DC_DISP_DISP_WIN_OPTIONS		0x402
 #define HDMI_ENABLE	(1 << 30)
 #define DSI_ENABLE	(1 << 29)
+#define SOR1_TIMING_CYA	(1 << 27)
+#define SOR1_ENABLE	(1 << 26)
 #define SOR_ENABLE	(1 << 25)
 #define CURSOR_ENABLE	(1 << 16)
 
@@ -242,9 +249,20 @@
 #define BASE_COLOR_SIZE565     (6 << 0)
 #define BASE_COLOR_SIZE332     (7 << 0)
 #define BASE_COLOR_SIZE888     (8 << 0)
+#define DITHER_CONTROL_MASK    (3 << 8)
 #define DITHER_CONTROL_DISABLE (0 << 8)
 #define DITHER_CONTROL_ORDERED (2 << 8)
 #define DITHER_CONTROL_ERRDIFF (3 << 8)
+#define BASE_COLOR_SIZE_MASK   (0xf << 0)
+#define BASE_COLOR_SIZE_666    (0 << 0)
+#define BASE_COLOR_SIZE_111    (1 << 0)
+#define BASE_COLOR_SIZE_222    (2 << 0)
+#define BASE_COLOR_SIZE_333    (3 << 0)
+#define BASE_COLOR_SIZE_444    (4 << 0)
+#define BASE_COLOR_SIZE_555    (5 << 0)
+#define BASE_COLOR_SIZE_565    (6 << 0)
+#define BASE_COLOR_SIZE_332    (7 << 0)
+#define BASE_COLOR_SIZE_888    (8 << 0)
 
 #define DC_DISP_SHIFT_CLOCK_OPTIONS		0x431
 #define  SC1_H_QUALIFIER_NONE	(1 << 16)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 07b2697..224a7dc 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -294,26 +294,41 @@
 	}
 
 	dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
-	if (IS_ERR(dpaux->rst))
+	if (IS_ERR(dpaux->rst)) {
+		dev_err(&pdev->dev, "failed to get reset control: %ld\n",
+			PTR_ERR(dpaux->rst));
 		return PTR_ERR(dpaux->rst);
+	}
 
 	dpaux->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(dpaux->clk))
+	if (IS_ERR(dpaux->clk)) {
+		dev_err(&pdev->dev, "failed to get module clock: %ld\n",
+			PTR_ERR(dpaux->clk));
 		return PTR_ERR(dpaux->clk);
+	}
 
 	err = clk_prepare_enable(dpaux->clk);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable module clock: %d\n",
+			err);
 		return err;
+	}
 
 	reset_control_deassert(dpaux->rst);
 
 	dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
-	if (IS_ERR(dpaux->clk_parent))
+	if (IS_ERR(dpaux->clk_parent)) {
+		dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
+			PTR_ERR(dpaux->clk_parent));
 		return PTR_ERR(dpaux->clk_parent);
+	}
 
 	err = clk_prepare_enable(dpaux->clk_parent);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
+			err);
 		return err;
+	}
 
 	err = clk_set_rate(dpaux->clk_parent, 270000000);
 	if (err < 0) {
@@ -323,8 +338,11 @@
 	}
 
 	dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(dpaux->vdd))
+	if (IS_ERR(dpaux->vdd)) {
+		dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
+			PTR_ERR(dpaux->vdd));
 		return PTR_ERR(dpaux->vdd);
+	}
 
 	err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
 			       dev_name(dpaux->dev), dpaux);
@@ -334,6 +352,8 @@
 		return err;
 	}
 
+	disable_irq(dpaux->irq);
+
 	dpaux->aux.transfer = tegra_dpaux_transfer;
 	dpaux->aux.dev = &pdev->dev;
 
@@ -341,6 +361,24 @@
 	if (err < 0)
 		return err;
 
+	/*
+	 * Assume that by default the DPAUX/I2C pads will be used for HDMI,
+	 * so power them up and configure them in I2C mode.
+	 *
+	 * The DPAUX code paths reconfigure the pads in AUX mode, but there
+	 * is no possibility to perform the I2C mode configuration in the
+	 * HDMI path.
+	 */
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+	value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
+	value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+		DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+		DPAUX_HYBRID_PADCTL_MODE_I2C;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+
 	/* enable and clear all interrupts */
 	value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
 		DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -359,6 +397,12 @@
 static int tegra_dpaux_remove(struct platform_device *pdev)
 {
 	struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+	u32 value;
+
+	/* make sure pads are powered down when not in use */
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+	value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
 
 	drm_dp_aux_unregister(&dpaux->aux);
 
@@ -376,6 +420,7 @@
 }
 
 static const struct of_device_id tegra_dpaux_of_match[] = {
+	{ .compatible = "nvidia,tegra210-dpaux", },
 	{ .compatible = "nvidia,tegra124-dpaux", },
 	{ },
 };
@@ -425,8 +470,10 @@
 		enum drm_connector_status status;
 
 		status = tegra_dpaux_detect(dpaux);
-		if (status == connector_status_connected)
+		if (status == connector_status_connected) {
+			enable_irq(dpaux->irq);
 			return 0;
+		}
 
 		usleep_range(1000, 2000);
 	}
@@ -439,6 +486,8 @@
 	unsigned long timeout;
 	int err;
 
+	disable_irq(dpaux->irq);
+
 	err = regulator_disable(dpaux->vdd);
 	if (err < 0)
 		return err;
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
index 806e245..20783d9 100644
--- a/drivers/gpu/drm/tegra/dpaux.h
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -57,6 +57,8 @@
 #define DPAUX_DP_AUX_CONFIG 0x45
 
 #define DPAUX_HYBRID_PADCTL 0x49
+#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15)
+#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14)
 #define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
 #define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
 #define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 427f50c..6d88cf1 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -171,8 +171,6 @@
 	if (err < 0)
 		goto fbdev;
 
-	drm_mode_config_reset(drm);
-
 	/*
 	 * We don't use the drm_irq_install() helpers provided by the DRM
 	 * core, so we need to set this manually in order to allow the
@@ -182,11 +180,14 @@
 
 	/* syncpoints are used for full 32-bit hardware VBLANK counters */
 	drm->max_vblank_count = 0xffffffff;
+	drm->vblank_disable_allowed = true;
 
 	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
 	if (err < 0)
 		goto device;
 
+	drm_mode_config_reset(drm);
+
 	err = tegra_drm_fb_init(drm);
 	if (err < 0)
 		goto vblank;
@@ -1037,9 +1038,8 @@
 }
 #endif
 
-static const struct dev_pm_ops host1x_drm_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
-};
+static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
+			 host1x_drm_resume);
 
 static const struct of_device_id host1x_drm_subdevs[] = {
 	{ .compatible = "nvidia,tegra20-dc", },
@@ -1056,6 +1056,12 @@
 	{ .compatible = "nvidia,tegra124-dc", },
 	{ .compatible = "nvidia,tegra124-sor", },
 	{ .compatible = "nvidia,tegra124-hdmi", },
+	{ .compatible = "nvidia,tegra124-dsi", },
+	{ .compatible = "nvidia,tegra132-dsi", },
+	{ .compatible = "nvidia,tegra210-dc", },
+	{ .compatible = "nvidia,tegra210-dsi", },
+	{ .compatible = "nvidia,tegra210-sor", },
+	{ .compatible = "nvidia,tegra210-sor1", },
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 659b2fc..ec49275 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
 
 #include <uapi/drm/tegra_drm.h>
 #include <linux/host1x.h>
+#include <linux/of_gpio.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -104,6 +105,13 @@
 struct tegra_dc_soc_info;
 struct tegra_output;
 
+struct tegra_dc_stats {
+	unsigned long frames;
+	unsigned long vblank;
+	unsigned long underflow;
+	unsigned long overflow;
+};
+
 struct tegra_dc {
 	struct host1x_client client;
 	struct host1x_syncpt *syncpt;
@@ -121,6 +129,7 @@
 
 	struct tegra_output *rgb;
 
+	struct tegra_dc_stats stats;
 	struct list_head list;
 
 	struct drm_info_list *debugfs_files;
@@ -200,6 +209,7 @@
 	const struct edid *edid;
 	unsigned int hpd_irq;
 	int hpd_gpio;
+	enum of_gpio_flags hpd_gpio_flags;
 
 	struct drm_encoder encoder;
 	struct drm_connector connector;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index ed970f6..f0a138e 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -119,6 +119,16 @@
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_dsi *dsi = node->info_ent->data;
+	struct drm_crtc *crtc = dsi->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
+
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-32s %#05x %08x\n", #name, name,	\
@@ -208,7 +218,9 @@
 
 #undef DUMP_REG
 
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static struct drm_info_list debugfs_files[] = {
@@ -548,14 +560,19 @@
 
 		/* horizontal sync width */
 		hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
-		hsw -= 10;
 
 		/* horizontal back porch */
 		hbp = (mode->htotal - mode->hsync_end) * mul / div;
-		hbp -= 14;
+
+		if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
+			hbp += hsw;
 
 		/* horizontal front porch */
 		hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+
+		/* subtract packet overhead */
+		hsw -= 10;
+		hbp -= 14;
 		hfp -= 8;
 
 		tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
@@ -726,10 +743,6 @@
 		tegra_dsi_soft_reset(dsi->slave);
 }
 
-static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
 static void tegra_dsi_connector_reset(struct drm_connector *connector)
 {
 	struct tegra_dsi_state *state;
@@ -756,7 +769,7 @@
 }
 
 static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
-	.dpms = tegra_dsi_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = tegra_dsi_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -782,59 +795,6 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_dsi_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	struct tegra_dsi *dsi = to_dsi(output);
-	struct tegra_dsi_state *state;
-	u32 value;
-
-	state = tegra_dsi_get_state(dsi);
-
-	tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
-
-	/*
-	 * The D-PHY timing fields are expressed in byte-clock cycles, so
-	 * multiply the period by 8.
-	 */
-	tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing);
-
-	if (output->panel)
-		drm_panel_prepare(output->panel);
-
-	tegra_dsi_configure(dsi, dc->pipe, mode);
-
-	/* enable display controller */
-	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-	value |= DSI_ENABLE;
-	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-	tegra_dc_commit(dc);
-
-	/* enable DSI controller */
-	tegra_dsi_enable(dsi);
-
-	if (output->panel)
-		drm_panel_enable(output->panel);
-
-	return;
-}
-
 static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
 {
 	struct tegra_output *output = encoder_to_output(encoder);
@@ -874,6 +834,46 @@
 	return;
 }
 
+static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_dsi *dsi = to_dsi(output);
+	struct tegra_dsi_state *state;
+	u32 value;
+
+	state = tegra_dsi_get_state(dsi);
+
+	tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
+
+	/*
+	 * The D-PHY timing fields are expressed in byte-clock cycles, so
+	 * multiply the period by 8.
+	 */
+	tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing);
+
+	if (output->panel)
+		drm_panel_prepare(output->panel);
+
+	tegra_dsi_configure(dsi, dc->pipe, mode);
+
+	/* enable display controller */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value |= DSI_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	/* enable DSI controller */
+	tegra_dsi_enable(dsi);
+
+	if (output->panel)
+		drm_panel_enable(output->panel);
+
+	return;
+}
+
 static int
 tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
 			       struct drm_crtc_state *crtc_state,
@@ -956,11 +956,8 @@
 }
 
 static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
-	.dpms = tegra_dsi_encoder_dpms,
-	.prepare = tegra_dsi_encoder_prepare,
-	.commit = tegra_dsi_encoder_commit,
-	.mode_set = tegra_dsi_encoder_mode_set,
 	.disable = tegra_dsi_encoder_disable,
+	.enable = tegra_dsi_encoder_enable,
 	.atomic_check = tegra_dsi_encoder_atomic_check,
 };
 
@@ -992,6 +989,10 @@
 		DSI_PAD_OUT_CLK(0x0);
 	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
 
+	value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+		DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
 	return tegra_mipi_calibrate(dsi->mipi);
 }
 
@@ -1621,6 +1622,9 @@
 }
 
 static const struct of_device_id tegra_dsi_of_match[] = {
+	{ .compatible = "nvidia,tegra210-dsi", },
+	{ .compatible = "nvidia,tegra132-dsi", },
+	{ .compatible = "nvidia,tegra124-dsi", },
 	{ .compatible = "nvidia,tegra114-dsi", },
 	{ },
 };
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index bad1006..2192636 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -113,6 +113,10 @@
 #define DSI_PAD_SLEW_DN(x)		(((x) & 0x7) << 12)
 #define DSI_PAD_SLEW_UP(x)		(((x) & 0x7) << 16)
 #define DSI_PAD_CONTROL_3		0x51
+#define  DSI_PAD_PREEMP_PD_CLK(x)	(((x) & 0x3) << 12)
+#define  DSI_PAD_PREEMP_PU_CLK(x)	(((x) & 0x3) << 8)
+#define  DSI_PAD_PREEMP_PD(x)		(((x) & 0x3) << 4)
+#define  DSI_PAD_PREEMP_PU(x)		(((x) & 0x3) << 0)
 #define DSI_PAD_CONTROL_4		0x52
 #define DSI_GANGED_MODE_CONTROL		0x53
 #define DSI_GANGED_MODE_CONTROL_ENABLE	(1 << 0)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 397fb34..07c844b 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -184,9 +184,9 @@
 #ifdef CONFIG_DRM_TEGRA_FBDEV
 static struct fb_ops tegra_fb_ops = {
 	.owner = THIS_MODULE,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
@@ -224,11 +224,11 @@
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
-	info = framebuffer_alloc(0, drm->dev);
-	if (!info) {
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
 		dev_err(drm->dev, "failed to allocate framebuffer info\n");
 		drm_gem_object_unreference_unlocked(&bo->gem);
-		return -ENOMEM;
+		return PTR_ERR(info);
 	}
 
 	fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
@@ -248,12 +248,6 @@
 	info->flags = FBINFO_FLAG_DEFAULT;
 	info->fbops = &tegra_fb_ops;
 
-	err = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (err < 0) {
-		dev_err(drm->dev, "failed to allocate color map: %d\n", err);
-		goto destroy;
-	}
-
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
 
@@ -282,7 +276,7 @@
 	drm_framebuffer_unregister_private(fb);
 	tegra_fb_destroy(fb);
 release:
-	framebuffer_release(info);
+	drm_fb_helper_release_fbi(helper);
 	return err;
 }
 
@@ -347,20 +341,9 @@
 
 static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
 {
-	struct fb_info *info = fbdev->base.fbdev;
 
-	if (info) {
-		int err;
-
-		err = unregister_framebuffer(info);
-		if (err < 0)
-			DRM_DEBUG_KMS("failed to unregister framebuffer\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev->base);
+	drm_fb_helper_release_fbi(&fbdev->base);
 
 	if (fbdev->fb) {
 		drm_framebuffer_unregister_private(&fbdev->fb->base);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 06ab178..52b32cb 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -772,13 +772,8 @@
 	return drm_detect_hdmi_monitor(edid);
 }
 
-static void tegra_hdmi_connector_dpms(struct drm_connector *connector,
-				      int mode)
-{
-}
-
 static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
-	.dpms = tegra_hdmi_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -818,22 +813,27 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	u32 value;
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~HDMI_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_commit(dc);
+	}
 }
 
-static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder)
+static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 {
-}
-
-static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
-					struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted)
-{
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
@@ -872,13 +872,13 @@
 
 	tegra_dc_writel(dc, VSYNC_H_POSITION(1),
 			DC_DISP_DISP_TIMING_OPTIONS);
-	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
 			DC_DISP_DISP_COLOR_CONTROL);
 
 	/* video_preamble uses h_pulse2 */
 	pulse_start = 1 + h_sync_width + h_back_porch - 10;
 
-	tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+	tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
 
 	value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
 		PULSE_LAST_END_A;
@@ -1035,24 +1035,6 @@
 	/* TODO: add HDCP support */
 }
 
-static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	u32 value;
-
-	/*
-	 * The following accesses registers of the display controller, so make
-	 * sure it's only executed when the output is attached to one.
-	 */
-	if (dc) {
-		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-		value &= ~HDMI_ENABLE;
-		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-		tegra_dc_commit(dc);
-	}
-}
-
 static int
 tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
 				struct drm_crtc_state *crtc_state,
@@ -1075,11 +1057,8 @@
 }
 
 static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
-	.dpms = tegra_hdmi_encoder_dpms,
-	.prepare = tegra_hdmi_encoder_prepare,
-	.commit = tegra_hdmi_encoder_commit,
-	.mode_set = tegra_hdmi_encoder_mode_set,
 	.disable = tegra_hdmi_encoder_disable,
+	.enable = tegra_hdmi_encoder_enable,
 	.atomic_check = tegra_hdmi_encoder_atomic_check,
 };
 
@@ -1087,11 +1066,16 @@
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_hdmi *hdmi = node->info_ent->data;
-	int err;
+	struct drm_crtc *crtc = hdmi->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
 
-	err = clk_prepare_enable(hdmi->clk);
-	if (err)
-		return err;
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-56s %#05x %08x\n", #name, name,	\
@@ -1258,9 +1242,9 @@
 
 #undef DUMP_REG
 
-	clk_disable_unprepare(hdmi->clk);
-
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static struct drm_info_list debugfs_files[] = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 37db479..46664b6 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,8 +7,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/of_gpio.h>
-
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_panel.h>
 #include "drm.h"
@@ -59,10 +57,17 @@
 	enum drm_connector_status status = connector_status_unknown;
 
 	if (gpio_is_valid(output->hpd_gpio)) {
-		if (gpio_get_value(output->hpd_gpio) == 0)
-			status = connector_status_disconnected;
-		else
-			status = connector_status_connected;
+		if (output->hpd_gpio_flags & OF_GPIO_ACTIVE_LOW) {
+			if (gpio_get_value(output->hpd_gpio) != 0)
+				status = connector_status_disconnected;
+			else
+				status = connector_status_connected;
+		} else {
+			if (gpio_get_value(output->hpd_gpio) == 0)
+				status = connector_status_disconnected;
+			else
+				status = connector_status_connected;
+		}
 	} else {
 		if (!output->panel)
 			status = connector_status_disconnected;
@@ -97,7 +102,6 @@
 int tegra_output_probe(struct tegra_output *output)
 {
 	struct device_node *ddc, *panel;
-	enum of_gpio_flags flags;
 	int err, size;
 
 	if (!output->of_node)
@@ -128,7 +132,7 @@
 
 	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
 						   "nvidia,hpd-gpio", 0,
-						   &flags);
+						   &output->hpd_gpio_flags);
 	if (gpio_is_valid(output->hpd_gpio)) {
 		unsigned long flags;
 
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 7cd833f..bc9735b 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -18,7 +18,6 @@
 struct tegra_rgb {
 	struct tegra_output output;
 	struct tegra_dc *dc;
-	bool enabled;
 
 	struct clk *clk_parent;
 	struct clk *clk;
@@ -88,13 +87,8 @@
 		tegra_dc_writel(dc, table[i].value, table[i].offset);
 }
 
-static void tegra_rgb_connector_dpms(struct drm_connector *connector,
-				     int mode)
-{
-}
-
 static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
-	.dpms = tegra_rgb_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -125,21 +119,22 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
 {
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_rgb *rgb = to_rgb(output);
+
+	if (output->panel)
+		drm_panel_disable(output->panel);
+
+	tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+	tegra_dc_commit(rgb->dc);
+
+	if (output->panel)
+		drm_panel_unprepare(output->panel);
 }
 
-static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_rgb_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
+static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
 {
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_rgb *rgb = to_rgb(output);
@@ -174,21 +169,6 @@
 		drm_panel_enable(output->panel);
 }
 
-static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_rgb *rgb = to_rgb(output);
-
-	if (output->panel)
-		drm_panel_disable(output->panel);
-
-	tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
-	tegra_dc_commit(rgb->dc);
-
-	if (output->panel)
-		drm_panel_unprepare(output->panel);
-}
-
 static int
 tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
 			       struct drm_crtc_state *crtc_state,
@@ -231,11 +211,8 @@
 }
 
 static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
-	.dpms = tegra_rgb_encoder_dpms,
-	.prepare = tegra_rgb_encoder_prepare,
-	.commit = tegra_rgb_encoder_commit,
-	.mode_set = tegra_rgb_encoder_mode_set,
 	.disable = tegra_rgb_encoder_disable,
+	.enable = tegra_rgb_encoder_enable,
 	.atomic_check = tegra_rgb_encoder_atomic_check,
 };
 
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7591d89..da1715e 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -10,7 +10,9 @@
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
 #include <soc/tegra/pmc.h>
@@ -23,11 +25,146 @@
 #include "drm.h"
 #include "sor.h"
 
+#define SOR_REKEY 0x38
+
+struct tegra_sor_hdmi_settings {
+	unsigned long frequency;
+
+	u8 vcocap;
+	u8 ichpmp;
+	u8 loadadj;
+	u8 termadj;
+	u8 tx_pu;
+	u8 bg_vref;
+
+	u8 drive_current[4];
+	u8 preemphasis[4];
+};
+
+#if 1
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+	{
+		.frequency = 54000000,
+		.vcocap = 0x0,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x10,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 75000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x40,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 150000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 300000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0xa,
+		.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+		.preemphasis = { 0x00, 0x17, 0x17, 0x17 },
+	}, {
+		.frequency = 600000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	},
+};
+#else
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+	{
+		.frequency = 75000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x40,
+		.bg_vref = 0x8,
+		.drive_current = { 0x29, 0x29, 0x29, 0x29 },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 150000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x30, 0x37, 0x37, 0x37 },
+		.preemphasis = { 0x01, 0x02, 0x02, 0x02 },
+	}, {
+		.frequency = 300000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x6,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0xf,
+		.drive_current = { 0x30, 0x37, 0x37, 0x37 },
+		.preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
+	}, {
+		.frequency = 600000000,
+		.vcocap = 0x3,
+		.ichpmp = 0xa,
+		.loadadj = 0x3,
+		.termadj = 0xb,
+		.tx_pu = 0x66,
+		.bg_vref = 0xe,
+		.drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
+		.preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
+	},
+};
+#endif
+
+struct tegra_sor_soc {
+	bool supports_edp;
+	bool supports_lvds;
+	bool supports_hdmi;
+	bool supports_dp;
+
+	const struct tegra_sor_hdmi_settings *settings;
+	unsigned int num_settings;
+};
+
+struct tegra_sor;
+
+struct tegra_sor_ops {
+	const char *name;
+	int (*probe)(struct tegra_sor *sor);
+	int (*remove)(struct tegra_sor *sor);
+};
+
 struct tegra_sor {
 	struct host1x_client client;
 	struct tegra_output output;
 	struct device *dev;
 
+	const struct tegra_sor_soc *soc;
 	void __iomem *regs;
 
 	struct reset_control *rst;
@@ -38,12 +175,19 @@
 
 	struct tegra_dpaux *dpaux;
 
-	struct mutex lock;
-	bool enabled;
-
 	struct drm_info_list *debugfs_files;
 	struct drm_minor *minor;
 	struct dentry *debugfs;
+
+	const struct tegra_sor_ops *ops;
+
+	/* for HDMI 2.0 */
+	struct tegra_sor_hdmi_settings *settings;
+	unsigned int num_settings;
+
+	struct regulator *avdd_io_supply;
+	struct regulator *vdd_pll_supply;
+	struct regulator *hdmi_supply;
 };
 
 struct tegra_sor_config {
@@ -94,40 +238,40 @@
 		SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
 		SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
 		SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
-	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0);
+	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
 
 	value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE0(0x0f);
-	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0);
+	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
 
-	value = SOR_LANE_POST_CURSOR_LANE3(0x00) |
-		SOR_LANE_POST_CURSOR_LANE2(0x00) |
-		SOR_LANE_POST_CURSOR_LANE1(0x00) |
-		SOR_LANE_POST_CURSOR_LANE0(0x00);
-	tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0);
+	value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
+		SOR_LANE_POSTCURSOR_LANE2(0x00) |
+		SOR_LANE_POSTCURSOR_LANE1(0x00) |
+		SOR_LANE_POSTCURSOR_LANE0(0x00);
+	tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
 
 	/* disable LVDS mode */
 	tegra_sor_writel(sor, 0, SOR_LVDS);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_TX_PU_ENABLE;
 	value &= ~SOR_DP_PADCTL_TX_PU_MASK;
 	value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
 		 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	usleep_range(10, 100);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
 		   SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
 	if (err < 0)
@@ -148,11 +292,11 @@
 	if (err < 0)
 		return err;
 
-	value = tegra_sor_readl(sor, SOR_DP_SPARE_0);
+	value = tegra_sor_readl(sor, SOR_DP_SPARE0);
 	value |= SOR_DP_SPARE_SEQ_ENABLE;
 	value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
 	value |= SOR_DP_SPARE_MACRO_SOR_CLK;
-	tegra_sor_writel(sor, value, SOR_DP_SPARE_0);
+	tegra_sor_writel(sor, value, SOR_DP_SPARE0);
 
 	for (i = 0, value = 0; i < link->num_lanes; i++) {
 		unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -187,18 +331,59 @@
 	return 0;
 }
 
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+	u32 mask = 0x08, adj = 0, value;
+
+	/* enable pad calibration logic */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value |= SOR_PLL1_TMDS_TERM;
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	while (mask) {
+		adj |= mask;
+
+		value = tegra_sor_readl(sor, SOR_PLL1);
+		value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+		value |= SOR_PLL1_TMDS_TERMADJ(adj);
+		tegra_sor_writel(sor, value, SOR_PLL1);
+
+		usleep_range(100, 200);
+
+		value = tegra_sor_readl(sor, SOR_PLL1);
+		if (value & SOR_PLL1_TERM_COMPOUT)
+			adj &= ~mask;
+
+		mask >>= 1;
+	}
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+	value |= SOR_PLL1_TMDS_TERMADJ(adj);
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	/* disable pad calibration logic */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+}
+
 static void tegra_sor_super_update(struct tegra_sor *sor)
 {
-	tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
-	tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0);
-	tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
+	tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
+	tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
+	tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
 }
 
 static void tegra_sor_update(struct tegra_sor *sor)
 {
-	tegra_sor_writel(sor, 0, SOR_STATE_0);
-	tegra_sor_writel(sor, 1, SOR_STATE_0);
-	tegra_sor_writel(sor, 0, SOR_STATE_0);
+	tegra_sor_writel(sor, 0, SOR_STATE0);
+	tegra_sor_writel(sor, 1, SOR_STATE0);
+	tegra_sor_writel(sor, 0, SOR_STATE0);
 }
 
 static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
@@ -235,16 +420,16 @@
 	unsigned long value, timeout;
 
 	/* wake up in normal mode */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
 	value |= SOR_SUPER_STATE_MODE_NORMAL;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	/* attach */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value |= SOR_SUPER_STATE_ATTACHED;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -385,7 +570,7 @@
 }
 
 static int tegra_sor_calc_config(struct tegra_sor *sor,
-				 struct drm_display_mode *mode,
+				 const struct drm_display_mode *mode,
 				 struct tegra_sor_config *config,
 				 struct drm_dp_link *link)
 {
@@ -481,9 +666,9 @@
 	unsigned long value, timeout;
 
 	/* switch to safe mode */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_MODE_NORMAL;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -498,15 +683,15 @@
 		return -ETIMEDOUT;
 
 	/* go to sleep */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	/* detach */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_ATTACHED;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -552,10 +737,10 @@
 	if (err < 0)
 		dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
 		   SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	/* stop lane sequencer */
 	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
@@ -575,39 +760,26 @@
 	if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
 		return -ETIMEDOUT;
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value |= SOR_PLL_0_POWER_OFF;
-	value |= SOR_PLL_0_VCOPD;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD;
-	value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD;
+	value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
 	return 0;
 }
 
-static int tegra_sor_crc_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
-static int tegra_sor_crc_release(struct inode *inode, struct file *file)
-{
-	return 0;
-}
-
 static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
 {
 	u32 value;
@@ -615,8 +787,8 @@
 	timeout = jiffies + msecs_to_jiffies(timeout);
 
 	while (time_before(jiffies, timeout)) {
-		value = tegra_sor_readl(sor, SOR_CRC_A);
-		if (value & SOR_CRC_A_VALID)
+		value = tegra_sor_readl(sor, SOR_CRCA);
+		if (value & SOR_CRCA_VALID)
 			return 0;
 
 		usleep_range(100, 200);
@@ -625,24 +797,25 @@
 	return -ETIMEDOUT;
 }
 
-static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
-				  size_t size, loff_t *ppos)
+static int tegra_sor_show_crc(struct seq_file *s, void *data)
 {
-	struct tegra_sor *sor = file->private_data;
-	ssize_t num, err;
-	char buf[10];
+	struct drm_info_node *node = s->private;
+	struct tegra_sor *sor = node->info_ent->data;
+	struct drm_crtc *crtc = sor->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
 	u32 value;
 
-	mutex_lock(&sor->lock);
+	drm_modeset_lock_all(drm);
 
-	if (!sor->enabled) {
-		err = -EAGAIN;
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
 		goto unlock;
 	}
 
-	value = tegra_sor_readl(sor, SOR_STATE_1);
+	value = tegra_sor_readl(sor, SOR_STATE1);
 	value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
-	tegra_sor_writel(sor, value, SOR_STATE_1);
+	tegra_sor_writel(sor, value, SOR_STATE1);
 
 	value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
 	value |= SOR_CRC_CNTRL_ENABLE;
@@ -656,65 +829,66 @@
 	if (err < 0)
 		goto unlock;
 
-	tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
-	value = tegra_sor_readl(sor, SOR_CRC_B);
+	tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
+	value = tegra_sor_readl(sor, SOR_CRCB);
 
-	num = scnprintf(buf, sizeof(buf), "%08x\n", value);
-
-	err = simple_read_from_buffer(buffer, size, ppos, buf, num);
+	seq_printf(s, "%08x\n", value);
 
 unlock:
-	mutex_unlock(&sor->lock);
+	drm_modeset_unlock_all(drm);
 	return err;
 }
 
-static const struct file_operations tegra_sor_crc_fops = {
-	.owner = THIS_MODULE,
-	.open = tegra_sor_crc_open,
-	.read = tegra_sor_crc_read,
-	.release = tegra_sor_crc_release,
-};
-
 static int tegra_sor_show_regs(struct seq_file *s, void *data)
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_sor *sor = node->info_ent->data;
+	struct drm_crtc *crtc = sor->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
+
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-38s %#05x %08x\n", #name, name,	\
 		   tegra_sor_readl(sor, name))
 
 	DUMP_REG(SOR_CTXSW);
-	DUMP_REG(SOR_SUPER_STATE_0);
-	DUMP_REG(SOR_SUPER_STATE_1);
-	DUMP_REG(SOR_STATE_0);
-	DUMP_REG(SOR_STATE_1);
-	DUMP_REG(SOR_HEAD_STATE_0(0));
-	DUMP_REG(SOR_HEAD_STATE_0(1));
-	DUMP_REG(SOR_HEAD_STATE_1(0));
-	DUMP_REG(SOR_HEAD_STATE_1(1));
-	DUMP_REG(SOR_HEAD_STATE_2(0));
-	DUMP_REG(SOR_HEAD_STATE_2(1));
-	DUMP_REG(SOR_HEAD_STATE_3(0));
-	DUMP_REG(SOR_HEAD_STATE_3(1));
-	DUMP_REG(SOR_HEAD_STATE_4(0));
-	DUMP_REG(SOR_HEAD_STATE_4(1));
-	DUMP_REG(SOR_HEAD_STATE_5(0));
-	DUMP_REG(SOR_HEAD_STATE_5(1));
+	DUMP_REG(SOR_SUPER_STATE0);
+	DUMP_REG(SOR_SUPER_STATE1);
+	DUMP_REG(SOR_STATE0);
+	DUMP_REG(SOR_STATE1);
+	DUMP_REG(SOR_HEAD_STATE0(0));
+	DUMP_REG(SOR_HEAD_STATE0(1));
+	DUMP_REG(SOR_HEAD_STATE1(0));
+	DUMP_REG(SOR_HEAD_STATE1(1));
+	DUMP_REG(SOR_HEAD_STATE2(0));
+	DUMP_REG(SOR_HEAD_STATE2(1));
+	DUMP_REG(SOR_HEAD_STATE3(0));
+	DUMP_REG(SOR_HEAD_STATE3(1));
+	DUMP_REG(SOR_HEAD_STATE4(0));
+	DUMP_REG(SOR_HEAD_STATE4(1));
+	DUMP_REG(SOR_HEAD_STATE5(0));
+	DUMP_REG(SOR_HEAD_STATE5(1));
 	DUMP_REG(SOR_CRC_CNTRL);
 	DUMP_REG(SOR_DP_DEBUG_MVID);
 	DUMP_REG(SOR_CLK_CNTRL);
 	DUMP_REG(SOR_CAP);
 	DUMP_REG(SOR_PWR);
 	DUMP_REG(SOR_TEST);
-	DUMP_REG(SOR_PLL_0);
-	DUMP_REG(SOR_PLL_1);
-	DUMP_REG(SOR_PLL_2);
-	DUMP_REG(SOR_PLL_3);
+	DUMP_REG(SOR_PLL0);
+	DUMP_REG(SOR_PLL1);
+	DUMP_REG(SOR_PLL2);
+	DUMP_REG(SOR_PLL3);
 	DUMP_REG(SOR_CSTM);
 	DUMP_REG(SOR_LVDS);
-	DUMP_REG(SOR_CRC_A);
-	DUMP_REG(SOR_CRC_B);
+	DUMP_REG(SOR_CRCA);
+	DUMP_REG(SOR_CRCB);
 	DUMP_REG(SOR_BLANK);
 	DUMP_REG(SOR_SEQ_CTL);
 	DUMP_REG(SOR_LANE_SEQ_CTL);
@@ -736,86 +910,89 @@
 	DUMP_REG(SOR_SEQ_INST(15));
 	DUMP_REG(SOR_PWM_DIV);
 	DUMP_REG(SOR_PWM_CTL);
-	DUMP_REG(SOR_VCRC_A_0);
-	DUMP_REG(SOR_VCRC_A_1);
-	DUMP_REG(SOR_VCRC_B_0);
-	DUMP_REG(SOR_VCRC_B_1);
-	DUMP_REG(SOR_CCRC_A_0);
-	DUMP_REG(SOR_CCRC_A_1);
-	DUMP_REG(SOR_CCRC_B_0);
-	DUMP_REG(SOR_CCRC_B_1);
-	DUMP_REG(SOR_EDATA_A_0);
-	DUMP_REG(SOR_EDATA_A_1);
-	DUMP_REG(SOR_EDATA_B_0);
-	DUMP_REG(SOR_EDATA_B_1);
-	DUMP_REG(SOR_COUNT_A_0);
-	DUMP_REG(SOR_COUNT_A_1);
-	DUMP_REG(SOR_COUNT_B_0);
-	DUMP_REG(SOR_COUNT_B_1);
-	DUMP_REG(SOR_DEBUG_A_0);
-	DUMP_REG(SOR_DEBUG_A_1);
-	DUMP_REG(SOR_DEBUG_B_0);
-	DUMP_REG(SOR_DEBUG_B_1);
+	DUMP_REG(SOR_VCRC_A0);
+	DUMP_REG(SOR_VCRC_A1);
+	DUMP_REG(SOR_VCRC_B0);
+	DUMP_REG(SOR_VCRC_B1);
+	DUMP_REG(SOR_CCRC_A0);
+	DUMP_REG(SOR_CCRC_A1);
+	DUMP_REG(SOR_CCRC_B0);
+	DUMP_REG(SOR_CCRC_B1);
+	DUMP_REG(SOR_EDATA_A0);
+	DUMP_REG(SOR_EDATA_A1);
+	DUMP_REG(SOR_EDATA_B0);
+	DUMP_REG(SOR_EDATA_B1);
+	DUMP_REG(SOR_COUNT_A0);
+	DUMP_REG(SOR_COUNT_A1);
+	DUMP_REG(SOR_COUNT_B0);
+	DUMP_REG(SOR_COUNT_B1);
+	DUMP_REG(SOR_DEBUG_A0);
+	DUMP_REG(SOR_DEBUG_A1);
+	DUMP_REG(SOR_DEBUG_B0);
+	DUMP_REG(SOR_DEBUG_B1);
 	DUMP_REG(SOR_TRIG);
 	DUMP_REG(SOR_MSCHECK);
 	DUMP_REG(SOR_XBAR_CTRL);
 	DUMP_REG(SOR_XBAR_POL);
-	DUMP_REG(SOR_DP_LINKCTL_0);
-	DUMP_REG(SOR_DP_LINKCTL_1);
-	DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
-	DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
-	DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
-	DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
-	DUMP_REG(SOR_LANE_PREEMPHASIS_0);
-	DUMP_REG(SOR_LANE_PREEMPHASIS_1);
-	DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
-	DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
-	DUMP_REG(SOR_LANE_POST_CURSOR_0);
-	DUMP_REG(SOR_LANE_POST_CURSOR_1);
-	DUMP_REG(SOR_DP_CONFIG_0);
-	DUMP_REG(SOR_DP_CONFIG_1);
-	DUMP_REG(SOR_DP_MN_0);
-	DUMP_REG(SOR_DP_MN_1);
-	DUMP_REG(SOR_DP_PADCTL_0);
-	DUMP_REG(SOR_DP_PADCTL_1);
-	DUMP_REG(SOR_DP_DEBUG_0);
-	DUMP_REG(SOR_DP_DEBUG_1);
-	DUMP_REG(SOR_DP_SPARE_0);
-	DUMP_REG(SOR_DP_SPARE_1);
+	DUMP_REG(SOR_DP_LINKCTL0);
+	DUMP_REG(SOR_DP_LINKCTL1);
+	DUMP_REG(SOR_LANE_DRIVE_CURRENT0);
+	DUMP_REG(SOR_LANE_DRIVE_CURRENT1);
+	DUMP_REG(SOR_LANE4_DRIVE_CURRENT0);
+	DUMP_REG(SOR_LANE4_DRIVE_CURRENT1);
+	DUMP_REG(SOR_LANE_PREEMPHASIS0);
+	DUMP_REG(SOR_LANE_PREEMPHASIS1);
+	DUMP_REG(SOR_LANE4_PREEMPHASIS0);
+	DUMP_REG(SOR_LANE4_PREEMPHASIS1);
+	DUMP_REG(SOR_LANE_POSTCURSOR0);
+	DUMP_REG(SOR_LANE_POSTCURSOR1);
+	DUMP_REG(SOR_DP_CONFIG0);
+	DUMP_REG(SOR_DP_CONFIG1);
+	DUMP_REG(SOR_DP_MN0);
+	DUMP_REG(SOR_DP_MN1);
+	DUMP_REG(SOR_DP_PADCTL0);
+	DUMP_REG(SOR_DP_PADCTL1);
+	DUMP_REG(SOR_DP_DEBUG0);
+	DUMP_REG(SOR_DP_DEBUG1);
+	DUMP_REG(SOR_DP_SPARE0);
+	DUMP_REG(SOR_DP_SPARE1);
 	DUMP_REG(SOR_DP_AUDIO_CTRL);
 	DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
 	DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
 	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6);
 	DUMP_REG(SOR_DP_TPG);
 	DUMP_REG(SOR_DP_TPG_CONFIG);
-	DUMP_REG(SOR_DP_LQ_CSTM_0);
-	DUMP_REG(SOR_DP_LQ_CSTM_1);
-	DUMP_REG(SOR_DP_LQ_CSTM_2);
+	DUMP_REG(SOR_DP_LQ_CSTM0);
+	DUMP_REG(SOR_DP_LQ_CSTM1);
+	DUMP_REG(SOR_DP_LQ_CSTM2);
 
 #undef DUMP_REG
 
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static const struct drm_info_list debugfs_files[] = {
+	{ "crc", tegra_sor_show_crc, 0, NULL },
 	{ "regs", tegra_sor_show_regs, 0, NULL },
 };
 
 static int tegra_sor_debugfs_init(struct tegra_sor *sor,
 				  struct drm_minor *minor)
 {
-	struct dentry *entry;
+	const char *name = sor->soc->supports_dp ? "sor1" : "sor";
 	unsigned int i;
-	int err = 0;
+	int err;
 
-	sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
+	sor->debugfs = debugfs_create_dir(name, minor->debugfs_root);
 	if (!sor->debugfs)
 		return -ENOMEM;
 
@@ -835,14 +1012,9 @@
 	if (err < 0)
 		goto free;
 
-	entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
-				    &tegra_sor_crc_fops);
-	if (!entry) {
-		err = -ENOMEM;
-		goto free;
-	}
+	sor->minor = minor;
 
-	return err;
+	return 0;
 
 free:
 	kfree(sor->debugfs_files);
@@ -860,14 +1032,10 @@
 	sor->minor = NULL;
 
 	kfree(sor->debugfs_files);
-	sor->debugfs = NULL;
+	sor->debugfs_files = NULL;
 
 	debugfs_remove_recursive(sor->debugfs);
-	sor->debugfs_files = NULL;
-}
-
-static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
-{
+	sor->debugfs = NULL;
 }
 
 static enum drm_connector_status
@@ -879,11 +1047,11 @@
 	if (sor->dpaux)
 		return tegra_dpaux_detect(sor->dpaux);
 
-	return connector_status_unknown;
+	return tegra_output_connector_detect(connector, force);
 }
 
 static const struct drm_connector_funcs tegra_sor_connector_funcs = {
-	.dpms = tegra_sor_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_sor_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -926,22 +1094,102 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_sor_edp_disable(struct drm_encoder *encoder)
 {
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_sor *sor = to_sor(output);
+	u32 value;
+	int err;
+
+	if (output->panel)
+		drm_panel_disable(output->panel);
+
+	err = tegra_sor_detach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+	tegra_sor_writel(sor, 0, SOR_STATE1);
+	tegra_sor_update(sor);
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~SOR_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_commit(dc);
+	}
+
+	err = tegra_sor_power_down(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+	if (sor->dpaux) {
+		err = tegra_dpaux_disable(sor->dpaux);
+		if (err < 0)
+			dev_err(sor->dev, "failed to disable DP: %d\n", err);
+	}
+
+	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
+
+	if (output->panel)
+		drm_panel_unprepare(output->panel);
+
+	reset_control_assert(sor->rst);
+	clk_disable_unprepare(sor->clk);
 }
 
-static void tegra_sor_encoder_prepare(struct drm_encoder *encoder)
+#if 0
+static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
+			      unsigned int *value)
 {
-}
+	unsigned int hfp, hsw, hbp, a = 0, b;
 
-static void tegra_sor_encoder_commit(struct drm_encoder *encoder)
-{
-}
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
 
-static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
+	pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
+
+	b = hfp - 1;
+
+	pr_info("a: %u, b: %u\n", a, b);
+	pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
+
+	if (a + hsw + hbp <= 11) {
+		a = 1 + 11 - hsw - hbp;
+		pr_info("a: %u\n", a);
+	}
+
+	if (a > b)
+		return -EINVAL;
+
+	if (hsw < 1)
+		return -EINVAL;
+
+	if (mode->hdisplay < 16)
+		return -EINVAL;
+
+	if (value) {
+		if (b > a && a % 2)
+			*value = a + 1;
+		else
+			*value = a;
+	}
+
+	return 0;
+}
+#endif
+
+static void tegra_sor_edp_enable(struct drm_encoder *encoder)
 {
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
 	unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
@@ -952,14 +1200,9 @@
 	int err = 0;
 	u32 value;
 
-	mutex_lock(&sor->lock);
-
-	if (sor->enabled)
-		goto unlock;
-
 	err = clk_prepare_enable(sor->clk);
 	if (err < 0)
-		goto unlock;
+		dev_err(sor->dev, "failed to enable clock: %d\n", err);
 
 	reset_control_deassert(sor->rst);
 
@@ -978,7 +1221,7 @@
 		if (err < 0) {
 			dev_err(sor->dev, "failed to probe eDP link: %d\n",
 				err);
-			goto unlock;
+			return;
 		}
 	}
 
@@ -999,40 +1242,40 @@
 	value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 	usleep_range(20, 100);
 
-	value = tegra_sor_readl(sor, SOR_PLL_3);
-	value |= SOR_PLL_3_PLL_VDD_MODE_V3_3;
-	tegra_sor_writel(sor, value, SOR_PLL_3);
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+	tegra_sor_writel(sor, value, SOR_PLL3);
 
-	value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST |
-		SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
+		SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD;
-	value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	value |= SOR_PLL_2_LVDS_ENABLE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD;
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	value |= SOR_PLL2_LVDS_ENABLE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
-	value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM;
-	tegra_sor_writel(sor, value, SOR_PLL_1);
+	value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
+	tegra_sor_writel(sor, value, SOR_PLL1);
 
 	while (true) {
-		value = tegra_sor_readl(sor, SOR_PLL_2);
-		if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0)
+		value = tegra_sor_readl(sor, SOR_PLL2);
+		if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
 			break;
 
 		usleep_range(250, 1000);
 	}
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE;
-	value &= ~SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	/*
 	 * power up
@@ -1045,51 +1288,49 @@
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
 	/* step 1 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN |
-		 SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
+		 SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	/* step 2 */
 	err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
-		goto unlock;
-	}
 
 	usleep_range(5, 100);
 
 	/* step 3 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
 	/* step 4 */
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value &= ~SOR_PLL_0_POWER_OFF;
-	value &= ~SOR_PLL_0_VCOPD;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_VCOPD;
+	value &= ~SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(200, 1000);
 
 	/* step 5 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	/* switch to DP clock */
 	err = clk_set_parent(sor->clk, sor->clk_dp);
@@ -1097,7 +1338,7 @@
 		dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
 
 	/* power DP lanes */
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 
 	if (link.num_lanes <= 2)
 		value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
@@ -1114,12 +1355,12 @@
 	else
 		value |= SOR_DP_PADCTL_PD_TXD_0;
 
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 	value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
 	value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
-	tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 	/* start lane sequencer */
 	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
@@ -1141,14 +1382,14 @@
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
 	/* set linkctl */
-	value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 	value |= SOR_DP_LINKCTL_ENABLE;
 
 	value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
 	value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
 
 	value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-	tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 	for (i = 0, value = 0; i < 4; i++) {
 		unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -1159,7 +1400,7 @@
 
 	tegra_sor_writel(sor, value, SOR_DP_TPG);
 
-	value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
+	value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
 	value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
 	value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
 
@@ -1176,7 +1417,7 @@
 
 	value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
 	value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
-	tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
+	tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
 
 	value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
 	value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
@@ -1189,33 +1430,27 @@
 	tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
 
 	/* enable pad calibration logic */
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_PAD_CAL_PD;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	if (sor->dpaux) {
 		u8 rate, lanes;
 
 		err = drm_dp_link_probe(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to probe eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		err = drm_dp_link_power_up(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to power up eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		err = drm_dp_link_configure(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to configure eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		rate = drm_dp_link_rate_to_bw_code(link.rate);
 		lanes = link.num_lanes;
@@ -1225,14 +1460,14 @@
 		value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
 		tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-		value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+		value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 		value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
 		value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
 
 		if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
 			value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
 
-		tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+		tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 		/* disable training pattern generator */
 
@@ -1249,17 +1484,14 @@
 		if (err < 0) {
 			dev_err(sor->dev, "DP fast link training failed: %d\n",
 				err);
-			goto unlock;
 		}
 
 		dev_dbg(sor->dev, "fast link training succeeded\n");
 	}
 
 	err = tegra_sor_power_up(sor, 250);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-		goto unlock;
-	}
 
 	/*
 	 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
@@ -1295,7 +1527,7 @@
 		break;
 	}
 
-	tegra_sor_writel(sor, value, SOR_STATE_1);
+	tegra_sor_writel(sor, value, SOR_STATE1);
 
 	/*
 	 * TODO: The video timing programming below doesn't seem to match the
@@ -1303,25 +1535,27 @@
 	 */
 
 	value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
 
 	vse = mode->vsync_end - mode->vsync_start - 1;
 	hse = mode->hsync_end - mode->hsync_start - 1;
 
 	value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
 
 	vbe = vse + (mode->vsync_start - mode->vdisplay);
 	hbe = hse + (mode->hsync_start - mode->hdisplay);
 
 	value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
 
 	vbs = vbe + mode->vdisplay;
 	hbs = hbe + mode->hdisplay;
 
 	value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+	tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
 
 	/* CSTM (LVDS, link A/B, upper) */
 	value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
@@ -1330,10 +1564,8 @@
 
 	/* PWM setup */
 	err = tegra_sor_setup_pwm(sor, 250);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-		goto unlock;
-	}
 
 	tegra_sor_update(sor);
 
@@ -1344,93 +1576,15 @@
 	tegra_dc_commit(dc);
 
 	err = tegra_sor_attach(sor);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-		goto unlock;
-	}
 
 	err = tegra_sor_wakeup(sor);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to enable DC: %d\n", err);
-		goto unlock;
-	}
 
 	if (output->panel)
 		drm_panel_enable(output->panel);
-
-	sor->enabled = true;
-
-unlock:
-	mutex_unlock(&sor->lock);
-}
-
-static void tegra_sor_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	struct tegra_sor *sor = to_sor(output);
-	u32 value;
-	int err;
-
-	mutex_lock(&sor->lock);
-
-	if (!sor->enabled)
-		goto unlock;
-
-	if (output->panel)
-		drm_panel_disable(output->panel);
-
-	err = tegra_sor_detach(sor);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-		goto unlock;
-	}
-
-	tegra_sor_writel(sor, 0, SOR_STATE_1);
-	tegra_sor_update(sor);
-
-	/*
-	 * The following accesses registers of the display controller, so make
-	 * sure it's only executed when the output is attached to one.
-	 */
-	if (dc) {
-		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-		value &= ~SOR_ENABLE;
-		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-		tegra_dc_commit(dc);
-	}
-
-	err = tegra_sor_power_down(sor);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-		goto unlock;
-	}
-
-	if (sor->dpaux) {
-		err = tegra_dpaux_disable(sor->dpaux);
-		if (err < 0) {
-			dev_err(sor->dev, "failed to disable DP: %d\n", err);
-			goto unlock;
-		}
-	}
-
-	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
-		goto unlock;
-	}
-
-	if (output->panel)
-		drm_panel_unprepare(output->panel);
-
-	clk_disable_unprepare(sor->clk);
-	reset_control_assert(sor->rst);
-
-	sor->enabled = false;
-
-unlock:
-	mutex_unlock(&sor->lock);
 }
 
 static int
@@ -1454,37 +1608,578 @@
 	return 0;
 }
 
-static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = {
-	.dpms = tegra_sor_encoder_dpms,
-	.prepare = tegra_sor_encoder_prepare,
-	.commit = tegra_sor_encoder_commit,
-	.mode_set = tegra_sor_encoder_mode_set,
-	.disable = tegra_sor_encoder_disable,
+static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
+	.disable = tegra_sor_edp_disable,
+	.enable = tegra_sor_edp_enable,
+	.atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
+{
+	u32 value = 0;
+	size_t i;
+
+	for (i = size; i > 0; i--)
+		value = (value << 8) | ptr[i - 1];
+
+	return value;
+}
+
+static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
+					  const void *data, size_t size)
+{
+	const u8 *ptr = data;
+	unsigned long offset;
+	size_t i, j;
+	u32 value;
+
+	switch (ptr[0]) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_AUDIO:
+		offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
+		break;
+
+	default:
+		dev_err(sor->dev, "unsupported infoframe type: %02x\n",
+			ptr[0]);
+		return;
+	}
+
+	value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+		INFOFRAME_HEADER_VERSION(ptr[1]) |
+		INFOFRAME_HEADER_LEN(ptr[2]);
+	tegra_sor_writel(sor, value, offset);
+	offset++;
+
+	/*
+	 * Each subpack contains 7 bytes, divided into:
+	 * - subpack_low: bytes 0 - 3
+	 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+	 */
+	for (i = 3, j = 0; i < size; i += 7, j += 8) {
+		size_t rem = size - i, num = min_t(size_t, rem, 4);
+
+		value = tegra_sor_hdmi_subpack(&ptr[i], num);
+		tegra_sor_writel(sor, value, offset++);
+
+		num = min_t(size_t, rem - num, 3);
+
+		value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
+		tegra_sor_writel(sor, value, offset++);
+	}
+}
+
+static int
+tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
+				   const struct drm_display_mode *mode)
+{
+	u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+	struct hdmi_avi_infoframe frame;
+	u32 value;
+	int err;
+
+	/* disable AVI infoframe */
+	value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+	value &= ~INFOFRAME_CTRL_SINGLE;
+	value &= ~INFOFRAME_CTRL_OTHER;
+	value &= ~INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+	/* enable AVI infoframe */
+	value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+	value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+	value |= INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+	return 0;
+}
+
+static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
+{
+	u32 value;
+
+	value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+	value &= ~INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static struct tegra_sor_hdmi_settings *
+tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
+{
+	unsigned int i;
+
+	for (i = 0; i < sor->num_settings; i++)
+		if (frequency <= sor->settings[i].frequency)
+			return &sor->settings[i];
+
+	return NULL;
+}
+
+static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
+{
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_sor *sor = to_sor(output);
+	u32 value;
+	int err;
+
+	err = tegra_sor_detach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+	tegra_sor_writel(sor, 0, SOR_STATE1);
+	tegra_sor_update(sor);
+
+	/* disable display to SOR clock */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value &= ~SOR1_TIMING_CYA;
+	value &= ~SOR1_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	err = tegra_sor_power_down(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
+
+	reset_control_assert(sor->rst);
+	usleep_range(1000, 2000);
+	clk_disable_unprepare(sor->clk);
+}
+
+static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
+{
+	struct tegra_output *output = encoder_to_output(encoder);
+	unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
+	struct tegra_sor_hdmi_settings *settings;
+	struct tegra_sor *sor = to_sor(output);
+	struct drm_display_mode *mode;
+	struct drm_display_info *info;
+	u32 value;
+	int err;
+
+	mode = &encoder->crtc->state->adjusted_mode;
+	info = &output->connector.display_info;
+
+	err = clk_prepare_enable(sor->clk);
+	if (err < 0)
+		dev_err(sor->dev, "failed to enable clock: %d\n", err);
+
+	usleep_range(1000, 2000);
+
+	reset_control_deassert(sor->rst);
+
+	err = clk_set_parent(sor->clk, sor->clk_safe);
+	if (err < 0)
+		dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+	div = clk_get_rate(sor->clk) / 1000000 * 4;
+
+	err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
+	tegra_sor_writel(sor, value, SOR_PLL3);
+
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_VCOPD;
+	value &= ~SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(200, 400);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+		 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	while (true) {
+		value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+		if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
+			break;
+
+		usleep_range(250, 1000);
+	}
+
+	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+		SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
+	tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+	while (true) {
+		value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+		if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+			break;
+
+		usleep_range(250, 1000);
+	}
+
+	value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+	value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+	value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+	if (mode->clock < 340000)
+		value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+	else
+		value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
+
+	value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+	value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+	value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+	value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+	value |= SOR_DP_SPARE_SEQ_ENABLE;
+	tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+	value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
+		SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
+	tegra_sor_writel(sor, value, SOR_SEQ_CTL);
+
+	value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
+		SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
+	tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
+	tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
+
+	/* program the reference clock */
+	value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
+	tegra_sor_writel(sor, value, SOR_REFCLK);
+
+	/* XXX don't hardcode */
+	value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
+		SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
+		SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
+		SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
+		SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
+		SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
+		SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
+		SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
+		SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
+		SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
+	tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+	tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+
+	err = clk_set_parent(sor->clk, sor->clk_parent);
+	if (err < 0)
+		dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+
+	value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+
+	/* XXX is this the proper check? */
+	if (mode->clock < 75000)
+		value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+
+	tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+
+	max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
+
+	value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
+		SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
+	tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
+
+	/* H_PULSE2 setup */
+	pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) +
+		      (mode->htotal - mode->hsync_end) - 10;
+
+	value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
+		PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+	value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+	value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
+	value |= H_PULSE2_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+	/* infoframe setup */
+	err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
+	if (err < 0)
+		dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+
+	/* XXX HDMI audio support not implemented yet */
+	tegra_sor_hdmi_disable_audio_infoframe(sor);
+
+	/* use single TMDS protocol */
+	value = tegra_sor_readl(sor, SOR_STATE1);
+	value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+	value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
+	tegra_sor_writel(sor, value, SOR_STATE1);
+
+	/* power up pad calibration */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* production settings */
+	settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
+	if (IS_ERR(settings)) {
+		dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
+			mode->clock * 1000, PTR_ERR(settings));
+		return;
+	}
+
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_ICHPMP_MASK;
+	value &= ~SOR_PLL0_VCOCAP_MASK;
+	value |= SOR_PLL0_ICHPMP(settings->ichpmp);
+	value |= SOR_PLL0_VCOCAP(settings->vcocap);
+	tegra_sor_writel(sor, value, SOR_PLL0);
+
+	tegra_sor_dp_term_calibrate(sor);
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value &= ~SOR_PLL1_LOADADJ_MASK;
+	value |= SOR_PLL1_LOADADJ(settings->loadadj);
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
+	value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref);
+	tegra_sor_writel(sor, value, SOR_PLL3);
+
+	value = settings->drive_current[0] << 24 |
+		settings->drive_current[1] << 16 |
+		settings->drive_current[2] <<  8 |
+		settings->drive_current[3] <<  0;
+	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+
+	value = settings->preemphasis[0] << 24 |
+		settings->preemphasis[1] << 16 |
+		settings->preemphasis[2] <<  8 |
+		settings->preemphasis[3] <<  0;
+	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+	value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+	value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* power down pad calibration */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* miscellaneous display controller settings */
+	value = VSYNC_H_POSITION(1);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+
+	value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
+	value &= ~DITHER_CONTROL_MASK;
+	value &= ~BASE_COLOR_SIZE_MASK;
+
+	switch (info->bpc) {
+	case 6:
+		value |= BASE_COLOR_SIZE_666;
+		break;
+
+	case 8:
+		value |= BASE_COLOR_SIZE_888;
+		break;
+
+	default:
+		WARN(1, "%u bits-per-color not supported\n", info->bpc);
+		break;
+	}
+
+	tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
+
+	err = tegra_sor_power_up(sor, 250);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+	/* configure mode */
+	value = tegra_sor_readl(sor, SOR_STATE1);
+	value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+	value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+	value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+	value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+		 SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		value |= SOR_STATE_ASY_HSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		value |= SOR_STATE_ASY_VSYNCPOL;
+
+	switch (info->bpc) {
+	case 8:
+		value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+		break;
+
+	case 6:
+		value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	tegra_sor_writel(sor, value, SOR_STATE1);
+
+	value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+	value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
+	value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+	value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+	value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
+	value |= SOR_HEAD_STATE_COLORSPACE_RGB;
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+	/*
+	 * TODO: The video timing programming below doesn't seem to match the
+	 * register definitions.
+	 */
+
+	value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+	/* sync end = sync width - 1 */
+	vse = mode->vsync_end - mode->vsync_start - 1;
+	hse = mode->hsync_end - mode->hsync_start - 1;
+
+	value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+	/* blank end = sync end + back porch */
+	vbe = vse + (mode->vtotal - mode->vsync_end);
+	hbe = hse + (mode->htotal - mode->hsync_end);
+
+	value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+	/* blank start = blank end + active */
+	vbs = vbe + mode->vdisplay;
+	hbs = hbe + mode->hdisplay;
+
+	value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+	tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+
+	tegra_sor_update(sor);
+
+	err = tegra_sor_attach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+	/* enable display to SOR clock and generate HDMI preamble */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value |= SOR1_ENABLE | SOR1_TIMING_CYA;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	err = tegra_sor_wakeup(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
+	.disable = tegra_sor_hdmi_disable,
+	.enable = tegra_sor_hdmi_enable,
 	.atomic_check = tegra_sor_encoder_atomic_check,
 };
 
 static int tegra_sor_init(struct host1x_client *client)
 {
 	struct drm_device *drm = dev_get_drvdata(client->parent);
+	const struct drm_encoder_helper_funcs *helpers = NULL;
 	struct tegra_sor *sor = host1x_client_to_sor(client);
+	int connector = DRM_MODE_CONNECTOR_Unknown;
+	int encoder = DRM_MODE_ENCODER_NONE;
 	int err;
 
-	if (!sor->dpaux)
-		return -ENODEV;
+	if (!sor->dpaux) {
+		if (sor->soc->supports_hdmi) {
+			connector = DRM_MODE_CONNECTOR_HDMIA;
+			encoder = DRM_MODE_ENCODER_TMDS;
+			helpers = &tegra_sor_hdmi_helpers;
+		} else if (sor->soc->supports_lvds) {
+			connector = DRM_MODE_CONNECTOR_LVDS;
+			encoder = DRM_MODE_ENCODER_LVDS;
+		}
+	} else {
+		if (sor->soc->supports_edp) {
+			connector = DRM_MODE_CONNECTOR_eDP;
+			encoder = DRM_MODE_ENCODER_TMDS;
+			helpers = &tegra_sor_edp_helpers;
+		} else if (sor->soc->supports_dp) {
+			connector = DRM_MODE_CONNECTOR_DisplayPort;
+			encoder = DRM_MODE_ENCODER_TMDS;
+		}
+	}
 
 	sor->output.dev = sor->dev;
 
 	drm_connector_init(drm, &sor->output.connector,
 			   &tegra_sor_connector_funcs,
-			   DRM_MODE_CONNECTOR_eDP);
+			   connector);
 	drm_connector_helper_add(&sor->output.connector,
 				 &tegra_sor_connector_helper_funcs);
 	sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
 	drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
-			 DRM_MODE_ENCODER_TMDS);
-	drm_encoder_helper_add(&sor->output.encoder,
-			       &tegra_sor_encoder_helper_funcs);
+			 encoder);
+	drm_encoder_helper_add(&sor->output.encoder, helpers);
 
 	drm_mode_connector_attach_encoder(&sor->output.connector,
 					  &sor->output.encoder);
@@ -1577,18 +2272,130 @@
 	.exit = tegra_sor_exit,
 };
 
+static const struct tegra_sor_ops tegra_sor_edp_ops = {
+	.name = "eDP",
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+	int err;
+
+	sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+	if (IS_ERR(sor->avdd_io_supply)) {
+		dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+			PTR_ERR(sor->avdd_io_supply));
+		return PTR_ERR(sor->avdd_io_supply);
+	}
+
+	err = regulator_enable(sor->avdd_io_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+			err);
+		return err;
+	}
+
+	sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+	if (IS_ERR(sor->vdd_pll_supply)) {
+		dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+			PTR_ERR(sor->vdd_pll_supply));
+		return PTR_ERR(sor->vdd_pll_supply);
+	}
+
+	err = regulator_enable(sor->vdd_pll_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+			err);
+		return err;
+	}
+
+	sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+	if (IS_ERR(sor->hdmi_supply)) {
+		dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+			PTR_ERR(sor->hdmi_supply));
+		return PTR_ERR(sor->hdmi_supply);
+	}
+
+	err = regulator_enable(sor->hdmi_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+	regulator_disable(sor->hdmi_supply);
+	regulator_disable(sor->vdd_pll_supply);
+	regulator_disable(sor->avdd_io_supply);
+
+	return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+	.name = "HDMI",
+	.probe = tegra_sor_hdmi_probe,
+	.remove = tegra_sor_hdmi_remove,
+};
+
+static const struct tegra_sor_soc tegra124_sor = {
+	.supports_edp = true,
+	.supports_lvds = true,
+	.supports_hdmi = false,
+	.supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor = {
+	.supports_edp = true,
+	.supports_lvds = false,
+	.supports_hdmi = false,
+	.supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor1 = {
+	.supports_edp = false,
+	.supports_lvds = false,
+	.supports_hdmi = true,
+	.supports_dp = true,
+
+	.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
+	.settings = tegra210_sor_hdmi_defaults,
+};
+
+static const struct of_device_id tegra_sor_of_match[] = {
+	{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
+	{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+	{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
+
 static int tegra_sor_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *match;
 	struct device_node *np;
 	struct tegra_sor *sor;
 	struct resource *regs;
 	int err;
 
+	match = of_match_device(tegra_sor_of_match, &pdev->dev);
+
 	sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
 	if (!sor)
 		return -ENOMEM;
 
 	sor->output.dev = sor->dev = &pdev->dev;
+	sor->soc = match->data;
+
+	sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
+				     sor->soc->num_settings *
+					sizeof(*sor->settings),
+				     GFP_KERNEL);
+	if (!sor->settings)
+		return -ENOMEM;
+
+	sor->num_settings = sor->soc->num_settings;
 
 	np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
 	if (np) {
@@ -1599,51 +2406,106 @@
 			return -EPROBE_DEFER;
 	}
 
+	if (!sor->dpaux) {
+		if (sor->soc->supports_hdmi) {
+			sor->ops = &tegra_sor_hdmi_ops;
+		} else if (sor->soc->supports_lvds) {
+			dev_err(&pdev->dev, "LVDS not supported yet\n");
+			return -ENODEV;
+		} else {
+			dev_err(&pdev->dev, "unknown (non-DP) support\n");
+			return -ENODEV;
+		}
+	} else {
+		if (sor->soc->supports_edp) {
+			sor->ops = &tegra_sor_edp_ops;
+		} else if (sor->soc->supports_dp) {
+			dev_err(&pdev->dev, "DisplayPort not supported yet\n");
+			return -ENODEV;
+		} else {
+			dev_err(&pdev->dev, "unknown (DP) support\n");
+			return -ENODEV;
+		}
+	}
+
 	err = tegra_output_probe(&sor->output);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to probe output: %d\n", err);
 		return err;
+	}
+
+	if (sor->ops && sor->ops->probe) {
+		err = sor->ops->probe(sor);
+		if (err < 0) {
+			dev_err(&pdev->dev, "failed to probe %s: %d\n",
+				sor->ops->name, err);
+			goto output;
+		}
+	}
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	sor->regs = devm_ioremap_resource(&pdev->dev, regs);
-	if (IS_ERR(sor->regs))
-		return PTR_ERR(sor->regs);
+	if (IS_ERR(sor->regs)) {
+		err = PTR_ERR(sor->regs);
+		goto remove;
+	}
 
 	sor->rst = devm_reset_control_get(&pdev->dev, "sor");
-	if (IS_ERR(sor->rst))
-		return PTR_ERR(sor->rst);
+	if (IS_ERR(sor->rst)) {
+		err = PTR_ERR(sor->rst);
+		dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(sor->clk))
-		return PTR_ERR(sor->clk);
+	if (IS_ERR(sor->clk)) {
+		err = PTR_ERR(sor->clk);
+		dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
-	if (IS_ERR(sor->clk_parent))
-		return PTR_ERR(sor->clk_parent);
+	if (IS_ERR(sor->clk_parent)) {
+		err = PTR_ERR(sor->clk_parent);
+		dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
-	if (IS_ERR(sor->clk_safe))
-		return PTR_ERR(sor->clk_safe);
+	if (IS_ERR(sor->clk_safe)) {
+		err = PTR_ERR(sor->clk_safe);
+		dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
-	if (IS_ERR(sor->clk_dp))
-		return PTR_ERR(sor->clk_dp);
+	if (IS_ERR(sor->clk_dp)) {
+		err = PTR_ERR(sor->clk_dp);
+		dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
+		goto remove;
+	}
 
 	INIT_LIST_HEAD(&sor->client.list);
 	sor->client.ops = &sor_client_ops;
 	sor->client.dev = &pdev->dev;
 
-	mutex_init(&sor->lock);
-
 	err = host1x_client_register(&sor->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
-		return err;
+		goto remove;
 	}
 
 	platform_set_drvdata(pdev, sor);
 
 	return 0;
+
+remove:
+	if (sor->ops && sor->ops->remove)
+		sor->ops->remove(sor);
+output:
+	tegra_output_remove(&sor->output);
+	return err;
 }
 
 static int tegra_sor_remove(struct platform_device *pdev)
@@ -1658,17 +2520,17 @@
 		return err;
 	}
 
+	if (sor->ops && sor->ops->remove) {
+		err = sor->ops->remove(sor);
+		if (err < 0)
+			dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
+	}
+
 	tegra_output_remove(&sor->output);
 
 	return 0;
 }
 
-static const struct of_device_id tegra_sor_of_match[] = {
-	{ .compatible = "nvidia,tegra124-sor", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
-
 struct platform_driver tegra_sor_driver = {
 	.driver = {
 		.name = "tegra-sor",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index a5f8853..2d31d02 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -11,9 +11,9 @@
 
 #define SOR_CTXSW 0x00
 
-#define SOR_SUPER_STATE_0 0x01
+#define SOR_SUPER_STATE0 0x01
 
-#define SOR_SUPER_STATE_1 0x02
+#define SOR_SUPER_STATE1 0x02
 #define  SOR_SUPER_STATE_ATTACHED		(1 << 3)
 #define  SOR_SUPER_STATE_MODE_NORMAL		(1 << 2)
 #define  SOR_SUPER_STATE_HEAD_MODE_MASK		(3 << 0)
@@ -21,9 +21,9 @@
 #define  SOR_SUPER_STATE_HEAD_MODE_SNOOZE	(1 << 0)
 #define  SOR_SUPER_STATE_HEAD_MODE_SLEEP	(0 << 0)
 
-#define SOR_STATE_0 0x03
+#define SOR_STATE0 0x03
 
-#define SOR_STATE_1 0x04
+#define SOR_STATE1 0x04
 #define  SOR_STATE_ASY_PIXELDEPTH_MASK		(0xf << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_18_444	(0x2 << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_24_444	(0x5 << 17)
@@ -33,19 +33,27 @@
 #define  SOR_STATE_ASY_PROTOCOL_CUSTOM		(0xf << 8)
 #define  SOR_STATE_ASY_PROTOCOL_DP_A		(0x8 << 8)
 #define  SOR_STATE_ASY_PROTOCOL_DP_B		(0x9 << 8)
+#define  SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A	(0x1 << 8)
 #define  SOR_STATE_ASY_PROTOCOL_LVDS		(0x0 << 8)
 #define  SOR_STATE_ASY_CRC_MODE_MASK		(0x3 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_NON_ACTIVE	(0x2 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_COMPLETE	(0x1 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_ACTIVE		(0x0 << 6)
+#define  SOR_STATE_ASY_OWNER_MASK		0xf
 #define  SOR_STATE_ASY_OWNER(x)			(((x) & 0xf) << 0)
 
-#define SOR_HEAD_STATE_0(x) (0x05 + (x))
-#define SOR_HEAD_STATE_1(x) (0x07 + (x))
-#define SOR_HEAD_STATE_2(x) (0x09 + (x))
-#define SOR_HEAD_STATE_3(x) (0x0b + (x))
-#define SOR_HEAD_STATE_4(x) (0x0d + (x))
-#define SOR_HEAD_STATE_5(x) (0x0f + (x))
+#define SOR_HEAD_STATE0(x) (0x05 + (x))
+#define  SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3)
+#define  SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2)
+#define  SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2)
+#define  SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2)
+#define  SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0)
+#define  SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0)
+#define SOR_HEAD_STATE1(x) (0x07 + (x))
+#define SOR_HEAD_STATE2(x) (0x09 + (x))
+#define SOR_HEAD_STATE3(x) (0x0b + (x))
+#define SOR_HEAD_STATE4(x) (0x0d + (x))
+#define SOR_HEAD_STATE5(x) (0x0f + (x))
 #define SOR_CRC_CNTRL 0x11
 #define  SOR_CRC_CNTRL_ENABLE			(1 << 0)
 #define SOR_DP_DEBUG_MVID 0x12
@@ -75,62 +83,101 @@
 #define  SOR_TEST_HEAD_MODE_MASK		(3 << 8)
 #define  SOR_TEST_HEAD_MODE_AWAKE		(2 << 8)
 
-#define SOR_PLL_0 0x17
-#define  SOR_PLL_0_ICHPMP_MASK			(0xf << 24)
-#define  SOR_PLL_0_ICHPMP(x)			(((x) & 0xf) << 24)
-#define  SOR_PLL_0_VCOCAP_MASK			(0xf << 8)
-#define  SOR_PLL_0_VCOCAP(x)			(((x) & 0xf) << 8)
-#define  SOR_PLL_0_VCOCAP_RST			SOR_PLL_0_VCOCAP(3)
-#define  SOR_PLL_0_PLLREG_MASK			(0x3 << 6)
-#define  SOR_PLL_0_PLLREG_LEVEL(x)		(((x) & 0x3) << 6)
-#define  SOR_PLL_0_PLLREG_LEVEL_V25		SOR_PLL_0_PLLREG_LEVEL(0)
-#define  SOR_PLL_0_PLLREG_LEVEL_V15		SOR_PLL_0_PLLREG_LEVEL(1)
-#define  SOR_PLL_0_PLLREG_LEVEL_V35		SOR_PLL_0_PLLREG_LEVEL(2)
-#define  SOR_PLL_0_PLLREG_LEVEL_V45		SOR_PLL_0_PLLREG_LEVEL(3)
-#define  SOR_PLL_0_PULLDOWN			(1 << 5)
-#define  SOR_PLL_0_RESISTOR_EXT			(1 << 4)
-#define  SOR_PLL_0_VCOPD			(1 << 2)
-#define  SOR_PLL_0_POWER_OFF			(1 << 0)
+#define SOR_PLL0 0x17
+#define  SOR_PLL0_ICHPMP_MASK			(0xf << 24)
+#define  SOR_PLL0_ICHPMP(x)			(((x) & 0xf) << 24)
+#define  SOR_PLL0_VCOCAP_MASK			(0xf << 8)
+#define  SOR_PLL0_VCOCAP(x)			(((x) & 0xf) << 8)
+#define  SOR_PLL0_VCOCAP_RST			SOR_PLL0_VCOCAP(3)
+#define  SOR_PLL0_PLLREG_MASK			(0x3 << 6)
+#define  SOR_PLL0_PLLREG_LEVEL(x)		(((x) & 0x3) << 6)
+#define  SOR_PLL0_PLLREG_LEVEL_V25		SOR_PLL0_PLLREG_LEVEL(0)
+#define  SOR_PLL0_PLLREG_LEVEL_V15		SOR_PLL0_PLLREG_LEVEL(1)
+#define  SOR_PLL0_PLLREG_LEVEL_V35		SOR_PLL0_PLLREG_LEVEL(2)
+#define  SOR_PLL0_PLLREG_LEVEL_V45		SOR_PLL0_PLLREG_LEVEL(3)
+#define  SOR_PLL0_PULLDOWN			(1 << 5)
+#define  SOR_PLL0_RESISTOR_EXT			(1 << 4)
+#define  SOR_PLL0_VCOPD				(1 << 2)
+#define  SOR_PLL0_PWR				(1 << 0)
 
-#define SOR_PLL_1 0x18
+#define SOR_PLL1 0x18
 /* XXX: read-only bit? */
-#define  SOR_PLL_1_TERM_COMPOUT			(1 << 15)
-#define  SOR_PLL_1_TMDS_TERM			(1 << 8)
+#define  SOR_PLL1_LOADADJ_MASK			(0xf << 20)
+#define  SOR_PLL1_LOADADJ(x)			(((x) & 0xf) << 20)
+#define  SOR_PLL1_TERM_COMPOUT			(1 << 15)
+#define  SOR_PLL1_TMDS_TERMADJ_MASK		(0xf << 9)
+#define  SOR_PLL1_TMDS_TERMADJ(x)		(((x) & 0xf) << 9)
+#define  SOR_PLL1_TMDS_TERM			(1 << 8)
 
-#define SOR_PLL_2 0x19
-#define  SOR_PLL_2_LVDS_ENABLE			(1 << 25)
-#define  SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE		(1 << 24)
-#define  SOR_PLL_2_PORT_POWERDOWN		(1 << 23)
-#define  SOR_PLL_2_BANDGAP_POWERDOWN		(1 << 22)
-#define  SOR_PLL_2_POWERDOWN_OVERRIDE		(1 << 18)
-#define  SOR_PLL_2_SEQ_PLLCAPPD			(1 << 17)
+#define SOR_PLL2 0x19
+#define  SOR_PLL2_LVDS_ENABLE			(1 << 25)
+#define  SOR_PLL2_SEQ_PLLCAPPD_ENFORCE		(1 << 24)
+#define  SOR_PLL2_PORT_POWERDOWN		(1 << 23)
+#define  SOR_PLL2_BANDGAP_POWERDOWN		(1 << 22)
+#define  SOR_PLL2_POWERDOWN_OVERRIDE		(1 << 18)
+#define  SOR_PLL2_SEQ_PLLCAPPD			(1 << 17)
+#define  SOR_PLL2_SEQ_PLL_PULLDOWN		(1 << 16)
 
-#define SOR_PLL_3 0x1a
-#define  SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13)
-#define  SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13)
+#define SOR_PLL3 0x1a
+#define  SOR_PLL3_BG_VREF_LEVEL_MASK		(0xf << 24)
+#define  SOR_PLL3_BG_VREF_LEVEL(x)		(((x) & 0xf) << 24)
+#define  SOR_PLL3_PLL_VDD_MODE_1V8		(0 << 13)
+#define  SOR_PLL3_PLL_VDD_MODE_3V3		(1 << 13)
 
 #define SOR_CSTM 0x1b
+#define  SOR_CSTM_ROTCLK_MASK			(0xf << 24)
+#define  SOR_CSTM_ROTCLK(x)			(((x) & 0xf) << 24)
 #define  SOR_CSTM_LVDS				(1 << 16)
 #define  SOR_CSTM_LINK_ACT_B			(1 << 15)
 #define  SOR_CSTM_LINK_ACT_A			(1 << 14)
 #define  SOR_CSTM_UPPER				(1 << 11)
 
 #define SOR_LVDS 0x1c
-#define SOR_CRC_A 0x1d
-#define  SOR_CRC_A_VALID			(1 << 0)
-#define  SOR_CRC_A_RESET			(1 << 0)
-#define SOR_CRC_B 0x1e
+#define SOR_CRCA 0x1d
+#define  SOR_CRCA_VALID			(1 << 0)
+#define  SOR_CRCA_RESET			(1 << 0)
+#define SOR_CRCB 0x1e
 #define SOR_BLANK 0x1f
 #define SOR_SEQ_CTL 0x20
+#define  SOR_SEQ_CTL_PD_PC_ALT(x)	(((x) & 0xf) << 12)
+#define  SOR_SEQ_CTL_PD_PC(x)		(((x) & 0xf) <<  8)
+#define  SOR_SEQ_CTL_PU_PC_ALT(x)	(((x) & 0xf) <<  4)
+#define  SOR_SEQ_CTL_PU_PC(x)		(((x) & 0xf) <<  0)
 
 #define SOR_LANE_SEQ_CTL 0x21
 #define  SOR_LANE_SEQ_CTL_TRIGGER		(1 << 31)
+#define  SOR_LANE_SEQ_CTL_STATE_BUSY		(1 << 28)
 #define  SOR_LANE_SEQ_CTL_SEQUENCE_UP		(0 << 20)
 #define  SOR_LANE_SEQ_CTL_SEQUENCE_DOWN		(1 << 20)
 #define  SOR_LANE_SEQ_CTL_POWER_STATE_UP	(0 << 16)
 #define  SOR_LANE_SEQ_CTL_POWER_STATE_DOWN	(1 << 16)
+#define  SOR_LANE_SEQ_CTL_DELAY(x)		(((x) & 0xf) << 12)
 
 #define SOR_SEQ_INST(x) (0x22 + (x))
+#define  SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+#define  SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define  SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29)
+#define  SOR_SEQ_INST_BLANK_V (1 << 28)
+#define  SOR_SEQ_INST_BLANK_H (1 << 27)
+#define  SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define  SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define  SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define  SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define  SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define  SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define  SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define  SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define  SOR_SEQ_INST_SEQUENCE_UP (0 << 19)
+#define  SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19)
+#define  SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18)
+#define  SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18)
+#define  SOR_SEQ_INST_PORT_POWERDOWN (1 << 17)
+#define  SOR_SEQ_INST_PLL_POWERDOWN (1 << 16)
+#define  SOR_SEQ_INST_HALT (1 << 15)
+#define  SOR_SEQ_INST_WAIT_US (0 << 12)
+#define  SOR_SEQ_INST_WAIT_MS (1 << 12)
+#define  SOR_SEQ_INST_WAIT_VSYNC (2 << 12)
+#define  SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0)
 
 #define SOR_PWM_DIV 0x32
 #define  SOR_PWM_DIV_MASK			0xffffff
@@ -140,32 +187,36 @@
 #define  SOR_PWM_CTL_CLK_SEL			(1 << 30)
 #define  SOR_PWM_CTL_DUTY_CYCLE_MASK		0xffffff
 
-#define SOR_VCRC_A_0 0x34
-#define SOR_VCRC_A_1 0x35
-#define SOR_VCRC_B_0 0x36
-#define SOR_VCRC_B_1 0x37
-#define SOR_CCRC_A_0 0x38
-#define SOR_CCRC_A_1 0x39
-#define SOR_CCRC_B_0 0x3a
-#define SOR_CCRC_B_1 0x3b
-#define SOR_EDATA_A_0 0x3c
-#define SOR_EDATA_A_1 0x3d
-#define SOR_EDATA_B_0 0x3e
-#define SOR_EDATA_B_1 0x3f
-#define SOR_COUNT_A_0 0x40
-#define SOR_COUNT_A_1 0x41
-#define SOR_COUNT_B_0 0x42
-#define SOR_COUNT_B_1 0x43
-#define SOR_DEBUG_A_0 0x44
-#define SOR_DEBUG_A_1 0x45
-#define SOR_DEBUG_B_0 0x46
-#define SOR_DEBUG_B_1 0x47
+#define SOR_VCRC_A0 0x34
+#define SOR_VCRC_A1 0x35
+#define SOR_VCRC_B0 0x36
+#define SOR_VCRC_B1 0x37
+#define SOR_CCRC_A0 0x38
+#define SOR_CCRC_A1 0x39
+#define SOR_CCRC_B0 0x3a
+#define SOR_CCRC_B1 0x3b
+#define SOR_EDATA_A0 0x3c
+#define SOR_EDATA_A1 0x3d
+#define SOR_EDATA_B0 0x3e
+#define SOR_EDATA_B1 0x3f
+#define SOR_COUNT_A0 0x40
+#define SOR_COUNT_A1 0x41
+#define SOR_COUNT_B0 0x42
+#define SOR_COUNT_B1 0x43
+#define SOR_DEBUG_A0 0x44
+#define SOR_DEBUG_A1 0x45
+#define SOR_DEBUG_B0 0x46
+#define SOR_DEBUG_B1 0x47
 #define SOR_TRIG 0x48
 #define SOR_MSCHECK 0x49
 #define SOR_XBAR_CTRL 0x4a
+#define  SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17)
+#define  SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) <<  2)
+#define  SOR_XBAR_CTRL_LINK_SWAP (1 << 1)
+#define  SOR_XBAR_CTRL_BYPASS (1 << 0)
 #define SOR_XBAR_POL 0x4b
 
-#define SOR_DP_LINKCTL_0 0x4c
+#define SOR_DP_LINKCTL0 0x4c
 #define  SOR_DP_LINKCTL_LANE_COUNT_MASK		(0x1f << 16)
 #define  SOR_DP_LINKCTL_LANE_COUNT(x)		(((1 << (x)) - 1) << 16)
 #define  SOR_DP_LINKCTL_ENHANCED_FRAME		(1 << 14)
@@ -173,34 +224,34 @@
 #define  SOR_DP_LINKCTL_TU_SIZE(x)		(((x) & 0x7f) << 2)
 #define  SOR_DP_LINKCTL_ENABLE			(1 << 0)
 
-#define SOR_DP_LINKCTL_1 0x4d
+#define SOR_DP_LINKCTL1 0x4d
 
-#define SOR_LANE_DRIVE_CURRENT_0 0x4e
-#define SOR_LANE_DRIVE_CURRENT_1 0x4f
-#define SOR_LANE4_DRIVE_CURRENT_0 0x50
-#define SOR_LANE4_DRIVE_CURRENT_1 0x51
+#define SOR_LANE_DRIVE_CURRENT0 0x4e
+#define SOR_LANE_DRIVE_CURRENT1 0x4f
+#define SOR_LANE4_DRIVE_CURRENT0 0x50
+#define SOR_LANE4_DRIVE_CURRENT1 0x51
 #define  SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
 #define  SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
 #define  SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
 #define  SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_LANE_PREEMPHASIS_0 0x52
-#define SOR_LANE_PREEMPHASIS_1 0x53
-#define SOR_LANE4_PREEMPHASIS_0 0x54
-#define SOR_LANE4_PREEMPHASIS_1 0x55
+#define SOR_LANE_PREEMPHASIS0 0x52
+#define SOR_LANE_PREEMPHASIS1 0x53
+#define SOR_LANE4_PREEMPHASIS0 0x54
+#define SOR_LANE4_PREEMPHASIS1 0x55
 #define  SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
 #define  SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
 #define  SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
 #define  SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_LANE_POST_CURSOR_0 0x56
-#define SOR_LANE_POST_CURSOR_1 0x57
-#define  SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24)
-#define  SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16)
-#define  SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8)
-#define  SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0)
+#define SOR_LANE_POSTCURSOR0 0x56
+#define SOR_LANE_POSTCURSOR1 0x57
+#define  SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24)
+#define  SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16)
+#define  SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8)
+#define  SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_DP_CONFIG_0 0x58
+#define SOR_DP_CONFIG0 0x58
 #define SOR_DP_CONFIG_DISPARITY_NEGATIVE	(1 << 31)
 #define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE		(1 << 26)
 #define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY	(1 << 24)
@@ -211,11 +262,11 @@
 #define SOR_DP_CONFIG_WATERMARK_MASK	(0x3f << 0)
 #define SOR_DP_CONFIG_WATERMARK(x)	(((x) & 0x3f) << 0)
 
-#define SOR_DP_CONFIG_1 0x59
-#define SOR_DP_MN_0 0x5a
-#define SOR_DP_MN_1 0x5b
+#define SOR_DP_CONFIG1 0x59
+#define SOR_DP_MN0 0x5a
+#define SOR_DP_MN1 0x5b
 
-#define SOR_DP_PADCTL_0 0x5c
+#define SOR_DP_PADCTL0 0x5c
 #define  SOR_DP_PADCTL_PAD_CAL_PD	(1 << 23)
 #define  SOR_DP_PADCTL_TX_PU_ENABLE	(1 << 22)
 #define  SOR_DP_PADCTL_TX_PU_MASK	(0xff << 8)
@@ -229,17 +280,18 @@
 #define  SOR_DP_PADCTL_PD_TXD_1		(1 << 1)
 #define  SOR_DP_PADCTL_PD_TXD_2		(1 << 0)
 
-#define SOR_DP_PADCTL_1 0x5d
+#define SOR_DP_PADCTL1 0x5d
 
-#define SOR_DP_DEBUG_0 0x5e
-#define SOR_DP_DEBUG_1 0x5f
+#define SOR_DP_DEBUG0 0x5e
+#define SOR_DP_DEBUG1 0x5f
 
-#define SOR_DP_SPARE_0 0x60
-#define  SOR_DP_SPARE_MACRO_SOR_CLK	(1 << 2)
-#define  SOR_DP_SPARE_PANEL_INTERNAL	(1 << 1)
-#define  SOR_DP_SPARE_SEQ_ENABLE	(1 << 0)
+#define SOR_DP_SPARE0 0x60
+#define  SOR_DP_SPARE_DISP_VIDEO_PREAMBLE	(1 << 3)
+#define  SOR_DP_SPARE_MACRO_SOR_CLK		(1 << 2)
+#define  SOR_DP_SPARE_PANEL_INTERNAL		(1 << 1)
+#define  SOR_DP_SPARE_SEQ_ENABLE		(1 << 0)
 
-#define SOR_DP_SPARE_1 0x61
+#define SOR_DP_SPARE1 0x61
 #define SOR_DP_AUDIO_CTRL 0x62
 
 #define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
@@ -249,13 +301,13 @@
 #define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
 
 #define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c
 
 #define SOR_DP_TPG 0x6d
 #define  SOR_DP_TPG_CHANNEL_CODING	(1 << 6)
@@ -275,8 +327,44 @@
 #define  SOR_DP_TPG_PATTERN_NONE	(0x0 << 0)
 
 #define SOR_DP_TPG_CONFIG 0x6e
-#define SOR_DP_LQ_CSTM_0 0x6f
-#define SOR_DP_LQ_CSTM_1 0x70
-#define SOR_DP_LQ_CSTM_2 0x71
+#define SOR_DP_LQ_CSTM0 0x6f
+#define SOR_DP_LQ_CSTM1 0x70
+#define SOR_DP_LQ_CSTM2 0x71
+
+#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
+#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
+#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
+
+#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f
+#define  INFOFRAME_CTRL_CHECKSUM_ENABLE	(1 << 9)
+#define  INFOFRAME_CTRL_SINGLE		(1 << 8)
+#define  INFOFRAME_CTRL_OTHER		(1 << 4)
+#define  INFOFRAME_CTRL_ENABLE		(1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0
+#define  INFOFRAME_STATUS_DONE		(1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1
+#define  INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16)
+#define  INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define  INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+
+#define SOR_HDMI_CTRL 0xc0
+#define  SOR_HDMI_CTRL_ENABLE (1 << 30)
+#define  SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define  SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
+#define  SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+
+#define SOR_REFCLK 0xe6
+#define  SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
+#define  SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define SOR_INPUT_CONTROL 0xe8
+#define  SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
+#define  SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+
+#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
+#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
+#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
 
 #endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 7a03158..0af8bed 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -375,25 +375,17 @@
 		dev_info(&pdev->dev, "found backlight\n");
 	}
 
-	panel_mod->enable_gpio = devm_gpiod_get(&pdev->dev, "enable");
+	panel_mod->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
+							 GPIOD_OUT_LOW);
 	if (IS_ERR(panel_mod->enable_gpio)) {
 		ret = PTR_ERR(panel_mod->enable_gpio);
-		if (ret != -ENOENT) {
-			dev_err(&pdev->dev, "failed to request enable GPIO\n");
-			goto fail_backlight;
-		}
-
-		/* Optional GPIO is not here, continue silently. */
-		panel_mod->enable_gpio = NULL;
-	} else {
-		ret = gpiod_direction_output(panel_mod->enable_gpio, 0);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "failed to setup GPIO\n");
-			goto fail_backlight;
-		}
-		dev_info(&pdev->dev, "found enable GPIO\n");
+		dev_err(&pdev->dev, "failed to request enable GPIO\n");
+		goto fail_backlight;
 	}
 
+	if (panel_mod->enable_gpio)
+		dev_info(&pdev->dev, "found enable GPIO\n");
+
 	mod = &panel_mod->base;
 	pdev->dev.platform_data = mod;
 
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf080ab..4e19d0f 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -340,7 +340,7 @@
 		swap_storage = shmem_file_setup("ttm swap",
 						ttm->num_pages << PAGE_SHIFT,
 						0);
-		if (unlikely(IS_ERR(swap_storage))) {
+		if (IS_ERR(swap_storage)) {
 			pr_err("Failed allocating swap storage\n");
 			return PTR_ERR(swap_storage);
 		}
@@ -354,7 +354,7 @@
 		if (unlikely(from_page == NULL))
 			continue;
 		to_page = shmem_read_mapping_page(swap_space, i);
-		if (unlikely(IS_ERR(to_page))) {
+		if (IS_ERR(to_page)) {
 			ret = PTR_ERR(to_page);
 			goto out_err;
 		}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 5fc16ce..62c7b1d 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -288,7 +288,7 @@
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 
 	udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
 			  rect->height);
@@ -298,7 +298,7 @@
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_copyarea(info, region);
+	drm_fb_helper_sys_copyarea(info, region);
 
 	udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
 			  region->height);
@@ -308,7 +308,7 @@
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 
 	udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
 			  image->height);
@@ -476,7 +476,6 @@
 		container_of(helper, struct udl_fbdev, helper);
 	struct drm_device *dev = ufbdev->helper.dev;
 	struct fb_info *info;
-	struct device *device = dev->dev;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct udl_gem_object *obj;
@@ -506,21 +505,20 @@
 		goto out_gfree;
 	}
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_gfree;
 	}
 	info->par = ufbdev;
 
 	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
 	if (ret)
-		goto out_gfree;
+		goto out_destroy_fbi;
 
 	fb = &ufbdev->ufb.base;
 
 	ufbdev->helper.fb = fb;
-	ufbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "udldrmfb");
 
@@ -533,18 +531,13 @@
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_gfree;
-	}
-
-
 	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
 		      fb->width, fb->height,
 		      ufbdev->ufb.obj->vmapping);
 
 	return ret;
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_gfree:
 	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
 out:
@@ -558,14 +551,8 @@
 static void udl_fbdev_destroy(struct drm_device *dev,
 			      struct udl_fbdev *ufbdev)
 {
-	struct fb_info *info;
-	if (ufbdev->helper.fbdev) {
-		info = ufbdev->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&ufbdev->helper);
+	drm_fb_helper_release_fbi(&ufbdev->helper);
 	drm_fb_helper_fini(&ufbdev->helper);
 	drm_framebuffer_unregister_private(&ufbdev->ufb.base);
 	drm_framebuffer_cleanup(&ufbdev->ufb.base);
@@ -631,11 +618,7 @@
 		return;
 
 	ufbdev = udl->fbdev;
-	if (ufbdev->helper.fbdev) {
-		struct fb_info *info;
-		info = ufbdev->helper.fbdev;
-		unlink_framebuffer(info);
-	}
+	drm_fb_helper_unlink_fbi(&ufbdev->helper);
 }
 
 struct drm_framebuffer *
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index df198d9..6a81e08 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -173,7 +173,7 @@
 				   const struct fb_fillrect *rect)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
 			     rect->width, rect->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -183,7 +183,7 @@
 				   const struct fb_copyarea *area)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
 			   area->width, area->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -193,7 +193,7 @@
 				    const struct fb_image *image)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
 			     image->width, image->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -230,7 +230,6 @@
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct virtio_gpu_object *obj;
-	struct device *device = vgdev->dev;
 	uint32_t resid, format, size;
 	int ret;
 
@@ -317,18 +316,12 @@
 	if (ret)
 		goto err_obj_attach;
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto err_fb_alloc;
 	}
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto err_fb_alloc_cmap;
-	}
-
 	info->par = helper;
 
 	ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
@@ -339,7 +332,6 @@
 	fb = &vfbdev->vgfb.base;
 
 	vfbdev->helper.fb = fb;
-	vfbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "virtiodrmfb");
 	info->flags = FBINFO_DEFAULT;
@@ -357,9 +349,7 @@
 	return 0;
 
 err_fb_init:
-	fb_dealloc_cmap(&info->cmap);
-err_fb_alloc_cmap:
-	framebuffer_release(info);
+	drm_fb_helper_release_fbi(helper);
 err_fb_alloc:
 	virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
 err_obj_attach:
@@ -371,15 +361,11 @@
 static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
 				    struct virtio_gpu_fbdev *vgfbdev)
 {
-	struct fb_info *info;
 	struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
 
-	if (vgfbdev->helper.fbdev) {
-		info = vgfbdev->helper.fbdev;
+	drm_fb_helper_unregister_fbi(&vgfbdev->helper);
+	drm_fb_helper_release_fbi(&vgfbdev->helper);
 
-		unregister_framebuffer(info);
-		framebuffer_release(info);
-	}
 	if (vgfb->obj)
 		vgfb->obj = NULL;
 	drm_fb_helper_fini(&vgfbdev->helper);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ce0ab95..d281575 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,7 @@
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
 	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
-	    vmwgfx_cmdbuf_res.o \
+	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
+	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 0000000..8cce7f1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
+/*
+ * Intentionally empty file.
+ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 0000000..9ce2466
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,110 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_caps.h --
+ *
+ *       Definitions for SVGA3D hardware capabilities.  Capabilities
+ *       are used to query for optional rendering features during
+ *       driver initialization. The capability data is stored as very
+ *       basic key/value dictionary within the "FIFO register" memory
+ *       area at the beginning of BAR2.
+ *
+ *       Note that these definitions are only for 3D capabilities.
+ *       The SVGA device also has "device capabilities" and "FIFO
+ *       capabilities", which are non-3D-specific and are stored as
+ *       bitfields rather than key/value pairs.
+ */
+
+#ifndef _SVGA3D_CAPS_H_
+#define _SVGA3D_CAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#define SVGA_FIFO_3D_CAPS_SIZE   (SVGA_FIFO_3D_CAPS_LAST - \
+                                  SVGA_FIFO_3D_CAPS + 1)
+
+
+/*
+ * SVGA3dCapsRecordType
+ *
+ *    Record types that can be found in the caps block.
+ *    Related record types are grouped together numerically so that
+ *    SVGA3dCaps_FindRecord() can be applied on a range of record
+ *    types.
+ */
+
+typedef enum {
+   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
+   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
+} SVGA3dCapsRecordType;
+
+
+/*
+ * SVGA3dCapsRecordHeader
+ *
+ *    Header field leading each caps block record. Contains the offset (in
+ *    register words, NOT bytes) to the next caps block record (or the end
+ *    of caps block records which will be a zero word) and the record type
+ *    as defined above.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecordHeader {
+   uint32 length;
+   SVGA3dCapsRecordType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecordHeader;
+
+
+/*
+ * SVGA3dCapsRecord
+ *
+ *    Caps block record; "data" is a placeholder for the actual data structure
+ *    contained within the record;
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecord {
+   SVGA3dCapsRecordHeader header;
+   uint32 data[1];
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 0000000..2dfd57c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2071 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_cmd.h --
+ *
+ *       SVGA 3d hardware cmd definitions
+ */
+
+#ifndef _SVGA3D_CMD_H_
+#define _SVGA3D_CMD_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+#include "svga3d_types.h"
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+typedef enum {
+   SVGA_3D_CMD_LEGACY_BASE                                = 1000,
+   SVGA_3D_CMD_BASE                                       = 1040,
+
+   SVGA_3D_CMD_SURFACE_DEFINE                             = 1040,
+   SVGA_3D_CMD_SURFACE_DESTROY                            = 1041,
+   SVGA_3D_CMD_SURFACE_COPY                               = 1042,
+   SVGA_3D_CMD_SURFACE_STRETCHBLT                         = 1043,
+   SVGA_3D_CMD_SURFACE_DMA                                = 1044,
+   SVGA_3D_CMD_CONTEXT_DEFINE                             = 1045,
+   SVGA_3D_CMD_CONTEXT_DESTROY                            = 1046,
+   SVGA_3D_CMD_SETTRANSFORM                               = 1047,
+   SVGA_3D_CMD_SETZRANGE                                  = 1048,
+   SVGA_3D_CMD_SETRENDERSTATE                             = 1049,
+   SVGA_3D_CMD_SETRENDERTARGET                            = 1050,
+   SVGA_3D_CMD_SETTEXTURESTATE                            = 1051,
+   SVGA_3D_CMD_SETMATERIAL                                = 1052,
+   SVGA_3D_CMD_SETLIGHTDATA                               = 1053,
+   SVGA_3D_CMD_SETLIGHTENABLED                            = 1054,
+   SVGA_3D_CMD_SETVIEWPORT                                = 1055,
+   SVGA_3D_CMD_SETCLIPPLANE                               = 1056,
+   SVGA_3D_CMD_CLEAR                                      = 1057,
+   SVGA_3D_CMD_PRESENT                                    = 1058,
+   SVGA_3D_CMD_SHADER_DEFINE                              = 1059,
+   SVGA_3D_CMD_SHADER_DESTROY                             = 1060,
+   SVGA_3D_CMD_SET_SHADER                                 = 1061,
+   SVGA_3D_CMD_SET_SHADER_CONST                           = 1062,
+   SVGA_3D_CMD_DRAW_PRIMITIVES                            = 1063,
+   SVGA_3D_CMD_SETSCISSORRECT                             = 1064,
+   SVGA_3D_CMD_BEGIN_QUERY                                = 1065,
+   SVGA_3D_CMD_END_QUERY                                  = 1066,
+   SVGA_3D_CMD_WAIT_FOR_QUERY                             = 1067,
+   SVGA_3D_CMD_PRESENT_READBACK                           = 1068,
+   SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN                     = 1069,
+   SVGA_3D_CMD_SURFACE_DEFINE_V2                          = 1070,
+   SVGA_3D_CMD_GENERATE_MIPMAPS                           = 1071,
+   SVGA_3D_CMD_VIDEO_CREATE_DECODER                       = 1072,
+   SVGA_3D_CMD_VIDEO_DESTROY_DECODER                      = 1073,
+   SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR                     = 1074,
+   SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR                    = 1075,
+   SVGA_3D_CMD_VIDEO_DECODE_START_FRAME                   = 1076,
+   SVGA_3D_CMD_VIDEO_DECODE_RENDER                        = 1077,
+   SVGA_3D_CMD_VIDEO_DECODE_END_FRAME                     = 1078,
+   SVGA_3D_CMD_VIDEO_PROCESS_FRAME                        = 1079,
+   SVGA_3D_CMD_ACTIVATE_SURFACE                           = 1080,
+   SVGA_3D_CMD_DEACTIVATE_SURFACE                         = 1081,
+   SVGA_3D_CMD_SCREEN_DMA                                 = 1082,
+   SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE                   = 1083,
+   SVGA_3D_CMD_OPEN_CONTEXT_SURFACE                       = 1084,
+
+   SVGA_3D_CMD_LOGICOPS_BITBLT                            = 1085,
+   SVGA_3D_CMD_LOGICOPS_TRANSBLT                          = 1086,
+   SVGA_3D_CMD_LOGICOPS_STRETCHBLT                        = 1087,
+   SVGA_3D_CMD_LOGICOPS_COLORFILL                         = 1088,
+   SVGA_3D_CMD_LOGICOPS_ALPHABLEND                        = 1089,
+   SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND                    = 1090,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE                            = 1091,
+   SVGA_3D_CMD_READBACK_OTABLE                            = 1092,
+
+   SVGA_3D_CMD_DEFINE_GB_MOB                              = 1093,
+   SVGA_3D_CMD_DESTROY_GB_MOB                             = 1094,
+   SVGA_3D_CMD_DEAD3                                      = 1095,
+   SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING                      = 1096,
+
+   SVGA_3D_CMD_DEFINE_GB_SURFACE                          = 1097,
+   SVGA_3D_CMD_DESTROY_GB_SURFACE                         = 1098,
+   SVGA_3D_CMD_BIND_GB_SURFACE                            = 1099,
+   SVGA_3D_CMD_COND_BIND_GB_SURFACE                       = 1100,
+   SVGA_3D_CMD_UPDATE_GB_IMAGE                            = 1101,
+   SVGA_3D_CMD_UPDATE_GB_SURFACE                          = 1102,
+   SVGA_3D_CMD_READBACK_GB_IMAGE                          = 1103,
+   SVGA_3D_CMD_READBACK_GB_SURFACE                        = 1104,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE                        = 1105,
+   SVGA_3D_CMD_INVALIDATE_GB_SURFACE                      = 1106,
+
+   SVGA_3D_CMD_DEFINE_GB_CONTEXT                          = 1107,
+   SVGA_3D_CMD_DESTROY_GB_CONTEXT                         = 1108,
+   SVGA_3D_CMD_BIND_GB_CONTEXT                            = 1109,
+   SVGA_3D_CMD_READBACK_GB_CONTEXT                        = 1110,
+   SVGA_3D_CMD_INVALIDATE_GB_CONTEXT                      = 1111,
+
+   SVGA_3D_CMD_DEFINE_GB_SHADER                           = 1112,
+   SVGA_3D_CMD_DESTROY_GB_SHADER                          = 1113,
+   SVGA_3D_CMD_BIND_GB_SHADER                             = 1114,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE64                          = 1115,
+
+   SVGA_3D_CMD_BEGIN_GB_QUERY                             = 1116,
+   SVGA_3D_CMD_END_GB_QUERY                               = 1117,
+   SVGA_3D_CMD_WAIT_FOR_GB_QUERY                          = 1118,
+
+   SVGA_3D_CMD_NOP                                        = 1119,
+
+   SVGA_3D_CMD_ENABLE_GART                                = 1120,
+   SVGA_3D_CMD_DISABLE_GART                               = 1121,
+   SVGA_3D_CMD_MAP_MOB_INTO_GART                          = 1122,
+   SVGA_3D_CMD_UNMAP_GART_RANGE                           = 1123,
+
+   SVGA_3D_CMD_DEFINE_GB_SCREENTARGET                     = 1124,
+   SVGA_3D_CMD_DESTROY_GB_SCREENTARGET                    = 1125,
+   SVGA_3D_CMD_BIND_GB_SCREENTARGET                       = 1126,
+   SVGA_3D_CMD_UPDATE_GB_SCREENTARGET                     = 1127,
+
+   SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL                  = 1128,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL                = 1129,
+
+   SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE                 = 1130,
+
+   SVGA_3D_CMD_GB_SCREEN_DMA                              = 1131,
+   SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH                 = 1132,
+   SVGA_3D_CMD_GB_MOB_FENCE                               = 1133,
+   SVGA_3D_CMD_DEFINE_GB_SURFACE_V2                       = 1134,
+   SVGA_3D_CMD_DEFINE_GB_MOB64                            = 1135,
+   SVGA_3D_CMD_REDEFINE_GB_MOB64                          = 1136,
+   SVGA_3D_CMD_NOP_ERROR                                  = 1137,
+
+   SVGA_3D_CMD_SET_VERTEX_STREAMS                         = 1138,
+   SVGA_3D_CMD_SET_VERTEX_DECLS                           = 1139,
+   SVGA_3D_CMD_SET_VERTEX_DIVISORS                        = 1140,
+   SVGA_3D_CMD_DRAW                                       = 1141,
+   SVGA_3D_CMD_DRAW_INDEXED                               = 1142,
+
+   /*
+    * DX10 Commands
+    */
+   SVGA_3D_CMD_DX_MIN                                     = 1143,
+   SVGA_3D_CMD_DX_DEFINE_CONTEXT                          = 1143,
+   SVGA_3D_CMD_DX_DESTROY_CONTEXT                         = 1144,
+   SVGA_3D_CMD_DX_BIND_CONTEXT                            = 1145,
+   SVGA_3D_CMD_DX_READBACK_CONTEXT                        = 1146,
+   SVGA_3D_CMD_DX_INVALIDATE_CONTEXT                      = 1147,
+   SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER              = 1148,
+   SVGA_3D_CMD_DX_SET_SHADER_RESOURCES                    = 1149,
+   SVGA_3D_CMD_DX_SET_SHADER                              = 1150,
+   SVGA_3D_CMD_DX_SET_SAMPLERS                            = 1151,
+   SVGA_3D_CMD_DX_DRAW                                    = 1152,
+   SVGA_3D_CMD_DX_DRAW_INDEXED                            = 1153,
+   SVGA_3D_CMD_DX_DRAW_INSTANCED                          = 1154,
+   SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED                  = 1155,
+   SVGA_3D_CMD_DX_DRAW_AUTO                               = 1156,
+   SVGA_3D_CMD_DX_SET_INPUT_LAYOUT                        = 1157,
+   SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS                      = 1158,
+   SVGA_3D_CMD_DX_SET_INDEX_BUFFER                        = 1159,
+   SVGA_3D_CMD_DX_SET_TOPOLOGY                            = 1160,
+   SVGA_3D_CMD_DX_SET_RENDERTARGETS                       = 1161,
+   SVGA_3D_CMD_DX_SET_BLEND_STATE                         = 1162,
+   SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE                  = 1163,
+   SVGA_3D_CMD_DX_SET_RASTERIZER_STATE                    = 1164,
+   SVGA_3D_CMD_DX_DEFINE_QUERY                            = 1165,
+   SVGA_3D_CMD_DX_DESTROY_QUERY                           = 1166,
+   SVGA_3D_CMD_DX_BIND_QUERY                              = 1167,
+   SVGA_3D_CMD_DX_SET_QUERY_OFFSET                        = 1168,
+   SVGA_3D_CMD_DX_BEGIN_QUERY                             = 1169,
+   SVGA_3D_CMD_DX_END_QUERY                               = 1170,
+   SVGA_3D_CMD_DX_READBACK_QUERY                          = 1171,
+   SVGA_3D_CMD_DX_SET_PREDICATION                         = 1172,
+   SVGA_3D_CMD_DX_SET_SOTARGETS                           = 1173,
+   SVGA_3D_CMD_DX_SET_VIEWPORTS                           = 1174,
+   SVGA_3D_CMD_DX_SET_SCISSORRECTS                        = 1175,
+   SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW                 = 1176,
+   SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW                 = 1177,
+   SVGA_3D_CMD_DX_PRED_COPY_REGION                        = 1178,
+   SVGA_3D_CMD_DX_PRED_COPY                               = 1179,
+   SVGA_3D_CMD_DX_STRETCHBLT                              = 1180,
+   SVGA_3D_CMD_DX_GENMIPS                                 = 1181,
+   SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE                      = 1182,
+   SVGA_3D_CMD_DX_READBACK_SUBRESOURCE                    = 1183,
+   SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE                  = 1184,
+   SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW              = 1185,
+   SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW             = 1186,
+   SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW                = 1187,
+   SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW               = 1188,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW                = 1189,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW               = 1190,
+   SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT                    = 1191,
+   SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT                   = 1192,
+   SVGA_3D_CMD_DX_DEFINE_BLEND_STATE                      = 1193,
+   SVGA_3D_CMD_DX_DESTROY_BLEND_STATE                     = 1194,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE               = 1195,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE              = 1196,
+   SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE                 = 1197,
+   SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE                = 1198,
+   SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE                    = 1199,
+   SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE                   = 1200,
+   SVGA_3D_CMD_DX_DEFINE_SHADER                           = 1201,
+   SVGA_3D_CMD_DX_DESTROY_SHADER                          = 1202,
+   SVGA_3D_CMD_DX_BIND_SHADER                             = 1203,
+   SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT                     = 1204,
+   SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT                    = 1205,
+   SVGA_3D_CMD_DX_SET_STREAMOUTPUT                        = 1206,
+   SVGA_3D_CMD_DX_SET_COTABLE                             = 1207,
+   SVGA_3D_CMD_DX_READBACK_COTABLE                        = 1208,
+   SVGA_3D_CMD_DX_BUFFER_COPY                             = 1209,
+   SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER                    = 1210,
+   SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK               = 1211,
+   SVGA_3D_CMD_DX_MOVE_QUERY                              = 1212,
+   SVGA_3D_CMD_DX_BIND_ALL_QUERY                          = 1213,
+   SVGA_3D_CMD_DX_READBACK_ALL_QUERY                      = 1214,
+   SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER               = 1215,
+   SVGA_3D_CMD_DX_MOB_FENCE_64                            = 1216,
+   SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT                  = 1217,
+   SVGA_3D_CMD_DX_HINT                                    = 1218,
+   SVGA_3D_CMD_DX_BUFFER_UPDATE                           = 1219,
+   SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET           = 1220,
+   SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET           = 1221,
+   SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET           = 1222,
+
+   /*
+    * Reserve some IDs to be used for the DX11 shader types.
+    */
+   SVGA_3D_CMD_DX_RESERVED1                               = 1223,
+   SVGA_3D_CMD_DX_RESERVED2                               = 1224,
+   SVGA_3D_CMD_DX_RESERVED3                               = 1225,
+
+   SVGA_3D_CMD_DX_MAX                                     = 1226,
+   SVGA_3D_CMD_MAX                                        = 1226,
+   SVGA_3D_CMD_FUTURE_MAX                                 = 3000
+} SVGAFifo3dCmdId;
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               id;
+   uint32               size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHeader;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               numMipLevels;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceFace;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   uint32                      multisampleCount;
+   SVGA3dTextureFilter         autogenFilter;
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dClearFlag      clearFlag;
+   uint32               color;
+   float                depth;
+   uint32               stencil;
+   /* Followed by variable number of SVGA3dRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dLightType      type;
+   SVGA3dBool           inWorldSpace;
+   float                diffuse[4];
+   float                specular[4];
+   float                ambient[4];
+   float                position[4];
+   float                direction[4];
+   float                range;
+   float                falloff;
+   float                attenuation0;
+   float                attenuation1;
+   float                attenuation2;
+   float                theta;
+   float                phi;
+}
+#include "vmware_pack_end.h"
+SVGA3dLightData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   /* Followed by variable number of SVGA3dCopyRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRenderStateName   state;
+   union {
+      uint32               uintValue;
+      float                floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dRenderState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                 cid;
+   SVGA3dRenderTargetType type;
+   SVGA3dSurfaceImageId   target;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   SVGA3dBox             boxSrc;
+   SVGA3dBox             boxDest;
+   SVGA3dStretchBltMode  mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * If the discard flag is present in a surface DMA operation, the host may
+    * discard the contents of the current mipmap level and face of the target
+    * surface before applying the surface DMA contents.
+    */
+   uint32 discard : 1;
+
+   /*
+    * If the unsynchronized flag is present, the host may perform this upload
+    * without syncing to pending reads on this surface.
+    */
+   uint32 unsynchronized : 1;
+
+   /*
+    * Guests *MUST* set the reserved bits to 0 before submitting the command
+    * suffix as future flags may occupy these bits.
+    */
+   uint32 reserved : 30;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceDMAFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGuestImage guest;
+   SVGA3dSurfaceImageId host;
+   SVGA3dTransferType transfer;
+   /*
+    * Followed by variable number of SVGA3dCopyBox structures. For consistency
+    * in all clipping logic and coordinate translation, we define the
+    * "source" in each copyBox as the guest image and the
+    * "destination" as the host image, regardless of transfer
+    * direction.
+    *
+    * For efficiency, the SVGA3D device is free to copy more data than
+    * specified. For example, it may round copy boxes outwards such
+    * that they lie on particular alignment boundaries.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ *    This is a command suffix that will appear after a SurfaceDMA command in
+ *    the FIFO.  It contains some extra information that hosts may use to
+ *    optimize performance or protect the guest.  This suffix exists to preserve
+ *    backwards compatibility while also allowing for new functionality to be
+ *    implemented.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 suffixSize;
+
+   /*
+    * The maximum offset is used to determine the maximum offset from the
+    * guestPtr base address that will be accessed or written to during this
+    * surfaceDMA.  If the suffix is supported, the host will respect this
+    * boundary while performing surface DMAs.
+    *
+    * Defaults to MAX_UINT32
+    */
+   uint32 maximumOffset;
+
+   /*
+    * A set of flags that describes optimizations that the host may perform
+    * while performing this surface DMA operation.  The guest should never rely
+    * on behaviour that is different when these flags are set for correctness.
+    *
+    * Defaults to 0
+    */
+   SVGA3dSurfaceDMAFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ *   This command is the SVGA3D device's generic drawing entry point.
+ *   It can draw multiple ranges of primitives, optionally using an
+ *   index buffer, using an arbitrary collection of vertex buffers.
+ *
+ *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ *   during this draw call. The declarations specify which surface
+ *   the vertex data lives in, what that vertex data is used for,
+ *   and how to interpret it.
+ *
+ *   Each SVGA3dPrimitiveRange defines a collection of primitives
+ *   to render using the same vertex arrays. An index buffer is
+ *   optional.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * A range hint is an optional specification for the range of indices
+    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+    * that the entire array will be used.
+    *
+    * These are only hints. The SVGA3D device may use them for
+    * performance optimization if possible, but it's also allowed to
+    * ignore these values.
+    */
+   uint32               first;
+   uint32               last;
+}
+#include "vmware_pack_end.h"
+SVGA3dArrayRangeHint;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Define the origin and shape of a vertex or index array. Both
+    * 'offset' and 'stride' are in bytes. The provided surface will be
+    * reinterpreted as a flat array of bytes in the same format used
+    * by surface DMA operations. To avoid unnecessary conversions, the
+    * surface should be created with the SVGA3D_BUFFER format.
+    *
+    * Index 0 in the array starts 'offset' bytes into the surface.
+    * Index 1 begins at byte 'offset + stride', etc. Array indices may
+    * not be negative.
+    */
+   uint32               surfaceId;
+   uint32               offset;
+   uint32               stride;
+}
+#include "vmware_pack_end.h"
+SVGA3dArray;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   SVGA3dDeclType       type;
+   SVGA3dDeclMethod     method;
+   SVGA3dDeclUsage      usage;
+   uint32               usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexArrayIdentity;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexDecl {
+   SVGA3dVertexArrayIdentity  identity;
+   SVGA3dArray                array;
+   SVGA3dArrayRangeHint       rangeHint;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexDecl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dPrimitiveRange {
+   /*
+    * Define a group of primitives to render, from sequential indices.
+    *
+    * The value of 'primitiveType' and 'primitiveCount' imply the
+    * total number of vertices that will be rendered.
+    */
+   SVGA3dPrimitiveType  primType;
+   uint32               primitiveCount;
+
+   /*
+    * Optional index buffer. If indexArray.surfaceId is
+    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+    * without an index buffer is identical to rendering with an index
+    * buffer containing the sequence [0, 1, 2, 3, ...].
+    *
+    * If an index buffer is in use, indexWidth specifies the width in
+    * bytes of each index value. It must be less than or equal to
+    * indexArray.stride.
+    *
+    * (Currently, the SVGA3D device requires index buffers to be tightly
+    * packed. In other words, indexWidth == indexArray.stride)
+    */
+   SVGA3dArray          indexArray;
+   uint32               indexWidth;
+
+   /*
+    * Optional index bias. This number is added to all indices from
+    * indexArray before they are used as vertex array indices. This
+    * can be used in multiple ways:
+    *
+    *  - When not using an indexArray, this bias can be used to
+    *    specify where in the vertex arrays to begin rendering.
+    *
+    *  - A positive number here is equivalent to increasing the
+    *    offset in each vertex array.
+    *
+    *  - A negative number can be used to render using a small
+    *    vertex array and an index buffer that contains large
+    *    values. This may be used by some applications that
+    *    crop a vertex buffer without modifying their index
+    *    buffer.
+    *
+    * Note that rendering with a negative bias value may be slower and
+    * use more memory than rendering with a positive or zero bias.
+    */
+   int32                indexBias;
+}
+#include "vmware_pack_end.h"
+SVGA3dPrimitiveRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               numVertexDecls;
+   uint32               numRanges;
+
+   /*
+    * There are two variable size arrays after the
+    * SVGA3dCmdDrawPrimitives structure. In order,
+    * they are:
+    *
+    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+    *    SVGA3D_MAX_VERTEX_ARRAYS;
+    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+    *    the frequency divisor for the corresponding vertex decl).
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 primitiveCount;        /* How many primitives to render */
+   uint32 startVertexLocation;   /* Which vertex do we start rendering at. */
+
+   uint8 primitiveType;          /* SVGA3dPrimitiveType */
+   uint8 padding[3];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDraw;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint8 primitiveType;       /* SVGA3dPrimitiveType */
+
+   uint32 indexBufferSid;     /* Valid index buffer sid. */
+   uint32 indexBufferOffset;  /* Byte offset into the vertex buffer, almost */
+			      /* always 0 for DX9 guests, non-zero for OpenGL */
+                              /* guests.  We can't represent non-multiple of */
+                              /* stride offsets in D3D9Renderer... */
+   uint8 indexBufferStride;   /* Allowable values = 1, 2, or 4 */
+
+   int32 baseVertexLocation;  /* Bias applied to the index when selecting a */
+                              /* vertex from the streams, may be negative */
+
+   uint32 primitiveCount;     /* How many primitives to render */
+   uint32 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawIndexed;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;          /* SVGA3dDeclType */
+   uint8 method;        /* SVGA3dDeclMethod */
+   uint8 usage;         /* SVGA3dDeclUsage */
+   uint8 usageIndex;
+   uint8 padding;
+
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numElements;
+
+   /*
+    * Followed by numElements SVGA3dVertexElement structures.
+    *
+    * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDecls;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexStream;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numStreams;
+   /*
+    * Followed by numStream SVGA3dVertexStream structures.
+    *
+    * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexStreams;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   uint32 numDivisors;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDivisors;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   stage;
+   SVGA3dTextureStateName   name;
+   union {
+      uint32                value;
+      float                 floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dTextureState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dTextureState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   cid;
+   SVGA3dTransformType      type;
+   float                    matrix[16];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                min;
+   float                max;
+}
+#include "vmware_pack_end.h"
+SVGA3dZRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dZRange         zRange;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                diffuse[4];
+   float                ambient[4];
+   float                specular[4];
+   float                emissive[4];
+   float                shininess;
+}
+#include "vmware_pack_end.h"
+SVGA3dMaterial;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dFace           face;
+   SVGA3dMaterial       material;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   SVGA3dLightData      data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   uint32               enabled;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   float                plane[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+   /* Followed by variable number of DWORDs for shader bycode */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  reg;     /* register number */
+   SVGA3dShaderType        type;
+   SVGA3dShaderConstType   ctype;
+   uint32                  values[4];
+
+   /*
+    * Followed by a variable number of additional values.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dShaderType     type;
+   uint32               shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShader;       /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;   /* Points to an SVGA3dQueryResult structure */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
+
+
+/*
+ * SVGA3D_CMD_WAIT_FOR_QUERY --
+ *
+ *    Will read the SVGA3dQueryResult structure pointed to by guestResult,
+ *    and if the state member is set to anything else than
+ *    SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
+ *
+ *    Otherwise, in addition to the query explicitly waited for,
+ *    All queries with the same type and issued with the same cid, for which
+ *    an SVGA_3D_CMD_END_QUERY command has previously been sent, will
+ *    be finished after execution of this command.
+ *
+ *    A query will be identified by the gmrId and offset of the guestResult
+ *    member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
+ *    been sent previously with an indentical gmrId and offset, it will
+ *    effectively end all queries with an identical type issued with the
+ *    same cid, and the SVGA3dQueryResult structure pointed to by
+ *    guestResult will not be written to. This property can be used to
+ *    implement a query barrier for a given cid and query type.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;        /* Same parameters passed to END_QUERY */
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               totalSize;    /* Set by guest before query is ended. */
+   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
+   union {                            /* Set by host on exit from PENDING state */
+      uint32            result32;
+      uint32            queryCookie; /* May be used to identify which QueryGetData this
+                                        result corresponds to. */
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dQueryResult;
+
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ *    This is a blit from an SVGA3D surface to a Screen Object.
+ *    This blit must be directed at a specific screen.
+ *
+ *    The blit copies from a rectangular region of an SVGA3D surface
+ *    image to a rectangular region of a screen.
+ *
+ *    This command takes an optional variable-length list of clipping
+ *    rectangles after the body of the command. If no rectangles are
+ *    specified, there is no clipping region. The entire destRect is
+ *    drawn to. If one or more rectangles are included, they describe
+ *    a clipping region. The clip rectangle coordinates are measured
+ *    relative to the top-left corner of destRect.
+ *
+ *    The srcImage must be from mip=0 face=0.
+ *
+ *    This supports scaling if the src and dest are of different sizes.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId srcImage;
+   SVGASignedRect       srcRect;
+   uint32               destScreenId; /* Screen Object ID */
+   SVGASignedRect       destRect;
+   /* Clipping: zero or more SVGASignedRects follow */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   SVGA3dTextureFilter  filter;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdActivateSurface;               /* SVGA_3D_CMD_ACTIVATE_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDeactivateSurface;             /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
+
+/*
+ * Screen DMA command
+ *
+ * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  The SVGA_CAP_3D device
+ * cap bit is not required.
+ *
+ * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
+ *   be different, but it is required that guest makes sure refBuffer has
+ *   exactly the same contents that were written to when last time screen DMA
+ *   command is received by host.
+ *
+ * - changemap is generated by lib/blit, and it has the changes from last
+ *   received screen DMA or more.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdScreenDMA {
+   uint32 screenId;
+   SVGAGuestImage refBuffer;
+   SVGAGuestImage destBuffer;
+   SVGAGuestImage changeMap;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenDMA;        /* SVGA_3D_CMD_SCREEN_DMA */
+
+/*
+ * Set Unity Surface Cookie
+ *
+ * Associates the supplied cookie with the surface id for use with
+ * Unity.  This cookie is a hint from guest to host, there is no way
+ * for the guest to readback the cookie and the host is free to drop
+ * the cookie association at will.  The default value for the cookie
+ * on all surfaces is 0.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdSetUnitySurfaceCookie {
+   uint32 sid;
+   uint64 cookie;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetUnitySurfaceCookie;   /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
+
+/*
+ * Open a context-specific surface in a non-context-specific manner.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdOpenContextSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdOpenContextSurface;   /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
+
+
+/*
+ * Logic ops
+ */
+
+#define SVGA3D_LOTRANSBLT_HONORALPHA     (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORX      (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORY      (0x02)
+#define SVGA3D_LOALPHABLEND_SRCHASALPHA  (0x01)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsBitBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsBitBlt;   /* SVGA_3D_CMD_LOGICOPS_BITBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsTransBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsTransBlt;   /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsStretchBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint16 mode;
+   uint16 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsStretchBlt;   /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsColorFill {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dRect structures. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsColorFill;   /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsAlphaBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 alphaVal;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsAlphaBlend;   /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
+
+#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
+
+#define SVGA3D_CLEARTYPE_GAMMA_WIDTH  512
+#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsClearTypeBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId tmp;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dSurfaceImageId gammaSurf;
+   SVGA3dSurfaceImageId alphaSurf;
+   uint32 gamma;
+   uint32 color;
+   uint32 color2;
+   int32 alphaOffsetX;
+   int32 alphaOffsetY;
+   /* Followed by variable number of SVGA3dBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsClearTypeBlend;   /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
+
+
+/*
+ * Guest-backed objects definitions.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobFormat ptDepth;
+   uint32 sizeInBytes;
+   PPN64 base;
+}
+#include "vmware_pack_end.h"
+SVGAOTableMobEntry;
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceFormat format;
+   SVGA3dSurfaceFlags surfaceFlags;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   SVGAMobId mobid;
+   uint32 arraySize;
+   uint32 mobPitch;
+   uint32 pad[5];
+}
+#include "vmware_pack_end.h"
+SVGAOTableSurfaceEntry;
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableContextEntry;
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableShaderEntry;
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
+
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+typedef uint32 SVGAScreenTargetFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId image;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+   uint32 dpi;
+   uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGAOTableScreenTargetEntry;
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
+	(sizeof(SVGAOTableScreenTargetEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32 value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstInt;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstBool;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;
+   uint8 methodUsage;
+   uint8 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint16 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexStream;
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRect viewport;
+   SVGA3dRect scissorRect;
+   SVGA3dZRange zRange;
+
+   SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
+   SVGAGBVertexElement decl1[4];
+
+   uint32 renderStates[SVGA3D_RS_MAX];
+   SVGAGBVertexElement decl2[18];
+   uint32 pad0[2];
+
+   struct {
+      SVGA3dFace face;
+      SVGA3dMaterial material;
+   } material;
+
+   float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
+   float matrices[SVGA3D_TRANSFORM_MAX][16];
+
+   SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
+   SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
+
+   /*
+    * Shaders currently bound
+    */
+   uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
+   SVGAGBVertexElement decl3[10];
+   uint32 pad1[3];
+
+   uint32 occQueryActive;
+   uint32 occQueryValue;
+
+   /*
+    * Int/Bool Shader constants
+    */
+   SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   uint16 pShaderBValues;
+   uint16 vShaderBValues;
+
+
+   SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
+   SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
+   uint32 numVertexDecls;
+   uint32 numVertexStreams;
+   uint32 numVertexDivisors;
+   uint32 pad2[30];
+
+   /*
+    * Texture Stages
+    *
+    * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
+    * textureStages array.
+    * SVGA3D_TS_COLOR_KEY is in tsColorKey.
+    */
+   uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
+   uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
+   uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
+
+   /*
+    * Float Shader constants.
+    */
+   SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
+   SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
+}
+#include "vmware_pack_end.h"
+SVGAGBContextData;
+#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBMob {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdRedefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBMobMapping {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to a mob.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurface {
+   uint32 sid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurfaceWithPitch {
+   uint32 sid;
+   SVGAMobId mobid;
+   uint32 baseLevelPitch;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurfaceWithPitch;   /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
+
+/*
+ * Conditionally bind a mob to a guest-backed surface if testMobid
+ * matches the currently bound mob.  Optionally issue a
+ * readback/update on the surface while it is still bound to the old
+ * mobid if the mobid is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE   (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct{
+   uint32 sid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+   uint32 flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBImage {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE */
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBShader {
+   uint32 shid;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBShader {
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBShader {
+   uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  regStart;
+   SVGA3dShaderType        shaderType;
+   SVGA3dShaderConstType   constType;
+
+   /*
+    * Followed by a variable number of shader constants.
+    *
+    * Note that FLOAT and INT constants are 4-dwords in length, while
+    * BOOL constants are 1-dword in length.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetGBShaderConstInline;   /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ *    The semantics of this command are identical to the
+ *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ *    to a Mob instead of a GMR.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 mustBeZero;
+   uint32 initialized;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 gartOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gartOffset;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+
+   /*
+    * The physical DPI that the guest expects this screen displayed at.
+    *
+    * Guests which are not DPI-aware should set this to zero.
+    */
+   uint32 dpi;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdGBScreenDMA {
+   uint32 screenId;
+   uint32 dead;
+   SVGAMobId destMobID;
+   uint32 destPitch;
+   SVGAMobId changeMapMobID;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBScreenDMA;        /* SVGA_3D_CMD_GB_SCREEN_DMA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBMobFence;  /* SVGA_3D_CMD_GB_MOB_FENCE*/
+
+#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 0000000..c18b663
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,457 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_devcaps.h --
+ *
+ *       SVGA 3d caps definitions
+ */
+
+#ifndef _SVGA3D_DEVCAPS_H_
+#define _SVGA3D_DEVCAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * 3D Hardware Version
+ *
+ *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ *   register.   Is set by the host and read by the guest.  This lets
+ *   us make new guest drivers which are backwards-compatible with old
+ *   SVGA hardware revisions.  It does not let us support old guest
+ *   drivers.  Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
+
+typedef enum {
+   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
+   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
+   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
+   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
+   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
+   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
+   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * DevCap indexes.
+ */
+
+typedef enum {
+   SVGA3D_DEVCAP_INVALID                           = ((uint32)-1),
+   SVGA3D_DEVCAP_3D                                = 0,
+   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
+
+   /*
+    * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+    * fixed-function texture units available. Each of these units
+    * work in both FFP and Shader modes, and they support texture
+    * transforms and texture coordinates. The host may have additional
+    * texture image units that are only usable with shaders.
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,
+   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
+   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
+   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
+   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
+   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
+   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
+   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
+   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12,
+   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13,
+   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14,
+   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
+   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
+   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
+   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
+   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
+   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
+   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
+   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
+   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
+   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
+   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
+   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
+   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
+   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
+   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
+   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
+   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
+   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
+   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
+   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
+   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
+   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
+
+   /*
+    * There is a hole in our devcap definitions for
+    * historical reasons.
+    *
+    * Define a constant just for completeness.
+    */
+   SVGA3D_DEVCAP_MISSING62                         = 62,
+
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
+
+   /*
+    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+    * render targets.  This does not include the depth or stencil targets.
+    */
+   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
+
+   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
+   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
+   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
+   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
+   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
+   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
+   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
+   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
+   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
+   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
+   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
+   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
+
+   /*
+    * This is the maximum number of SVGA context IDs that the guest
+    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+    */
+   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
+
+   /*
+    * This is the maximum number of SVGA surface IDs that the guest
+    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+    */
+   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
+
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
+
+   SVGA3D_DEVCAP_SURFACEFMT_ATI1                   = 82,
+   SVGA3D_DEVCAP_SURFACEFMT_ATI2                   = 83,
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD1                             = 84,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+    * ored together, one for every type of video decoding supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+    * ored together, one for every type of video processing supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
+
+   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
+   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
+   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
+   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
+
+   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
+
+   /*
+    * Does the host support the SVGA logic ops commands?
+    */
+   SVGA3D_DEVCAP_LOGICOPS                          = 92,
+
+   /*
+    * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+    */
+   SVGA3D_DEVCAP_TS_COLOR_KEY                      = 93, /* boolean */
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD2                             = 94,
+
+   /*
+    * Does the device support the DX commands?
+    */
+   SVGA3D_DEVCAP_DX                                = 95,
+
+   /*
+    * What is the maximum size of a texture array?
+    *
+    * (Even if this cap is zero, cubemaps are still allowed.)
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE            = 96,
+
+   /*
+    * What is the maximum number of vertex buffers that can
+    * be used in the DXContext inputAssembly?
+    */
+   SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS              = 97,
+
+   /*
+    * What is the maximum number of constant buffers
+    * that can be expected to work correctly with a
+    * DX context?
+    */
+   SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS           = 98,
+
+   /*
+    * Does the device support provoking vertex control?
+    * If zero, the first vertex will always be the provoking vertex.
+    */
+   SVGA3D_DEVCAP_DX_PROVOKING_VERTEX               = 99,
+
+   SVGA3D_DEVCAP_DXFMT_X8R8G8B8                    = 100,
+   SVGA3D_DEVCAP_DXFMT_A8R8G8B8                    = 101,
+   SVGA3D_DEVCAP_DXFMT_R5G6B5                      = 102,
+   SVGA3D_DEVCAP_DXFMT_X1R5G5B5                    = 103,
+   SVGA3D_DEVCAP_DXFMT_A1R5G5B5                    = 104,
+   SVGA3D_DEVCAP_DXFMT_A4R4G4B4                    = 105,
+   SVGA3D_DEVCAP_DXFMT_Z_D32                       = 106,
+   SVGA3D_DEVCAP_DXFMT_Z_D16                       = 107,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8                     = 108,
+   SVGA3D_DEVCAP_DXFMT_Z_D15S1                     = 109,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8                  = 110,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4           = 111,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE16                 = 112,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8           = 113,
+   SVGA3D_DEVCAP_DXFMT_DXT1                        = 114,
+   SVGA3D_DEVCAP_DXFMT_DXT2                        = 115,
+   SVGA3D_DEVCAP_DXFMT_DXT3                        = 116,
+   SVGA3D_DEVCAP_DXFMT_DXT4                        = 117,
+   SVGA3D_DEVCAP_DXFMT_DXT5                        = 118,
+   SVGA3D_DEVCAP_DXFMT_BUMPU8V8                    = 119,
+   SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5                  = 120,
+   SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8                = 121,
+   SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8                  = 122,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S10E5                  = 123,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S23E8                  = 124,
+   SVGA3D_DEVCAP_DXFMT_A2R10G10B10                 = 125,
+   SVGA3D_DEVCAP_DXFMT_V8U8                        = 126,
+   SVGA3D_DEVCAP_DXFMT_Q8W8V8U8                    = 127,
+   SVGA3D_DEVCAP_DXFMT_CxV8U8                      = 128,
+   SVGA3D_DEVCAP_DXFMT_X8L8V8U8                    = 129,
+   SVGA3D_DEVCAP_DXFMT_A2W10V10U10                 = 130,
+   SVGA3D_DEVCAP_DXFMT_ALPHA8                      = 131,
+   SVGA3D_DEVCAP_DXFMT_R_S10E5                     = 132,
+   SVGA3D_DEVCAP_DXFMT_R_S23E8                     = 133,
+   SVGA3D_DEVCAP_DXFMT_RG_S10E5                    = 134,
+   SVGA3D_DEVCAP_DXFMT_RG_S23E8                    = 135,
+   SVGA3D_DEVCAP_DXFMT_BUFFER                      = 136,
+   SVGA3D_DEVCAP_DXFMT_Z_D24X8                     = 137,
+   SVGA3D_DEVCAP_DXFMT_V16U16                      = 138,
+   SVGA3D_DEVCAP_DXFMT_G16R16                      = 139,
+   SVGA3D_DEVCAP_DXFMT_A16B16G16R16                = 140,
+   SVGA3D_DEVCAP_DXFMT_UYVY                        = 141,
+   SVGA3D_DEVCAP_DXFMT_YUY2                        = 142,
+   SVGA3D_DEVCAP_DXFMT_NV12                        = 143,
+   SVGA3D_DEVCAP_DXFMT_AYUV                        = 144,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS       = 145,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT           = 146,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT           = 147,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS          = 148,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT             = 149,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT              = 150,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT              = 151,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS       = 152,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT           = 153,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM          = 154,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT           = 155,
+   SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS             = 156,
+   SVGA3D_DEVCAP_DXFMT_R32G32_UINT                 = 157,
+   SVGA3D_DEVCAP_DXFMT_R32G32_SINT                 = 158,
+   SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS           = 159,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT        = 160,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS    = 161,
+   SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT     = 162,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS        = 163,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT            = 164,
+   SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT             = 165,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS           = 166,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM              = 167,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB         = 168,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT               = 169,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT               = 170,
+   SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS             = 171,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UINT                 = 172,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SINT                 = 173,
+   SVGA3D_DEVCAP_DXFMT_R32_TYPELESS                = 174,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT                   = 175,
+   SVGA3D_DEVCAP_DXFMT_R32_UINT                    = 176,
+   SVGA3D_DEVCAP_DXFMT_R32_SINT                    = 177,
+   SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS              = 178,
+   SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT           = 179,
+   SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS       = 180,
+   SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT        = 181,
+   SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS               = 182,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UNORM                  = 183,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UINT                   = 184,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SINT                   = 185,
+   SVGA3D_DEVCAP_DXFMT_R16_TYPELESS                = 186,
+   SVGA3D_DEVCAP_DXFMT_R16_UNORM                   = 187,
+   SVGA3D_DEVCAP_DXFMT_R16_UINT                    = 188,
+   SVGA3D_DEVCAP_DXFMT_R16_SNORM                   = 189,
+   SVGA3D_DEVCAP_DXFMT_R16_SINT                    = 190,
+   SVGA3D_DEVCAP_DXFMT_R8_TYPELESS                 = 191,
+   SVGA3D_DEVCAP_DXFMT_R8_UNORM                    = 192,
+   SVGA3D_DEVCAP_DXFMT_R8_UINT                     = 193,
+   SVGA3D_DEVCAP_DXFMT_R8_SNORM                    = 194,
+   SVGA3D_DEVCAP_DXFMT_R8_SINT                     = 195,
+   SVGA3D_DEVCAP_DXFMT_P8                          = 196,
+   SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP          = 197,
+   SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM             = 198,
+   SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM             = 199,
+   SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS                = 200,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB              = 201,
+   SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS                = 202,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB              = 203,
+   SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS                = 204,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB              = 205,
+   SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS                = 206,
+   SVGA3D_DEVCAP_DXFMT_ATI1                        = 207,
+   SVGA3D_DEVCAP_DXFMT_BC4_SNORM                   = 208,
+   SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS                = 209,
+   SVGA3D_DEVCAP_DXFMT_ATI2                        = 210,
+   SVGA3D_DEVCAP_DXFMT_BC5_SNORM                   = 211,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM  = 212,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS           = 213,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB         = 214,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS           = 215,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB         = 216,
+   SVGA3D_DEVCAP_DXFMT_Z_DF16                      = 217,
+   SVGA3D_DEVCAP_DXFMT_Z_DF24                      = 218,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT                 = 219,
+   SVGA3D_DEVCAP_DXFMT_YV12                        = 220,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT          = 221,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT          = 222,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM          = 223,
+   SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT                = 224,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM           = 225,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM              = 226,
+   SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT                = 227,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UNORM                = 228,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SNORM                = 229,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT                   = 230,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SNORM                  = 231,
+   SVGA3D_DEVCAP_DXFMT_R16_FLOAT                   = 232,
+   SVGA3D_DEVCAP_DXFMT_D16_UNORM                   = 233,
+   SVGA3D_DEVCAP_DXFMT_A8_UNORM                    = 234,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM                   = 235,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM                   = 236,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM                   = 237,
+   SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM                = 238,
+   SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM              = 239,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM              = 240,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM              = 241,
+   SVGA3D_DEVCAP_DXFMT_BC4_UNORM                   = 242,
+   SVGA3D_DEVCAP_DXFMT_BC5_UNORM                   = 243,
+
+   SVGA3D_DEVCAP_MAX                       /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+/*
+ * Bit definitions for DXFMT devcaps
+ *
+ *
+ * SUPPORTED: Can the format be defined?
+ * SHADER_SAMPLE: Can the format be sampled from a shader?
+ * COLOR_RENDERTARGET: Can the format be a color render target?
+ * DEPTH_RENDERTARGET: Can the format be a depth render target?
+ * BLENDABLE: Is the format blendable?
+ * MIPS: Does the format support mip levels?
+ * ARRAY: Does the format support texture arrays?
+ * VOLUME: Does the format support having volume?
+ * MULTISAMPLE_2: Does the format support 2x multisample?
+ * MULTISAMPLE_4: Does the format support 4x multisample?
+ * MULTISAMPLE_8: Does the format support 8x multisample?
+ */
+#define SVGA3D_DXFMT_SUPPORTED                (1 <<  0)
+#define SVGA3D_DXFMT_SHADER_SAMPLE            (1 <<  1)
+#define SVGA3D_DXFMT_COLOR_RENDERTARGET       (1 <<  2)
+#define SVGA3D_DXFMT_DEPTH_RENDERTARGET       (1 <<  3)
+#define SVGA3D_DXFMT_BLENDABLE                (1 <<  4)
+#define SVGA3D_DXFMT_MIPS                     (1 <<  5)
+#define SVGA3D_DXFMT_ARRAY                    (1 <<  6)
+#define SVGA3D_DXFMT_VOLUME                   (1 <<  7)
+#define SVGA3D_DXFMT_DX_VERTEX_BUFFER         (1 <<  8)
+#define SVGADX_DXFMT_MULTISAMPLE_2            (1 <<  9)
+#define SVGADX_DXFMT_MULTISAMPLE_4            (1 << 10)
+#define SVGADX_DXFMT_MULTISAMPLE_8            (1 << 11)
+#define SVGADX_DXFMT_MAX                      (1 << 12)
+
+/*
+ * Convenience mask for any multisample capability.
+ *
+ * The multisample bits imply both load and render capability.
+ */
+#define SVGA3D_DXFMT_MULTISAMPLE ( \
+           SVGADX_DXFMT_MULTISAMPLE_2 | \
+           SVGADX_DXFMT_MULTISAMPLE_4 | \
+           SVGADX_DXFMT_MULTISAMPLE_8 )
+
+typedef union {
+   Bool   b;
+   uint32 u;
+   int32  i;
+   float  f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 0000000..8c5ae60
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1487 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_dx.h --
+ *
+ *       SVGA 3d hardware definitions for DX10 support.
+ */
+
+#ifndef _SVGA3D_DX_H_
+#define _SVGA3D_DX_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga3d_limits.h"
+
+#define SVGA3D_INPUT_MIN               0
+#define SVGA3D_INPUT_PER_VERTEX_DATA   0
+#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
+#define SVGA3D_INPUT_MAX               2
+typedef uint32 SVGA3dInputClassification;
+
+#define SVGA3D_RESOURCE_TYPE_MIN      1
+#define SVGA3D_RESOURCE_BUFFER        1
+#define SVGA3D_RESOURCE_TEXTURE1D     2
+#define SVGA3D_RESOURCE_TEXTURE2D     3
+#define SVGA3D_RESOURCE_TEXTURE3D     4
+#define SVGA3D_RESOURCE_TEXTURECUBE   5
+#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
+#define SVGA3D_RESOURCE_BUFFEREX      6
+#define SVGA3D_RESOURCE_TYPE_MAX      7
+typedef uint32 SVGA3dResourceType;
+
+#define SVGA3D_DEPTH_WRITE_MASK_ZERO   0
+#define SVGA3D_DEPTH_WRITE_MASK_ALL    1
+typedef uint8 SVGA3dDepthWriteMask;
+
+#define SVGA3D_FILTER_MIP_LINEAR  (1 << 0)
+#define SVGA3D_FILTER_MAG_LINEAR  (1 << 2)
+#define SVGA3D_FILTER_MIN_LINEAR  (1 << 4)
+#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
+#define SVGA3D_FILTER_COMPARE     (1 << 7)
+typedef uint32 SVGA3dFilter;
+
+#define SVGA3D_CULL_INVALID 0
+#define SVGA3D_CULL_MIN     1
+#define SVGA3D_CULL_NONE    1
+#define SVGA3D_CULL_FRONT   2
+#define SVGA3D_CULL_BACK    3
+#define SVGA3D_CULL_MAX     4
+typedef uint8 SVGA3dCullMode;
+
+#define SVGA3D_COMPARISON_INVALID         0
+#define SVGA3D_COMPARISON_MIN             1
+#define SVGA3D_COMPARISON_NEVER           1
+#define SVGA3D_COMPARISON_LESS            2
+#define SVGA3D_COMPARISON_EQUAL           3
+#define SVGA3D_COMPARISON_LESS_EQUAL      4
+#define SVGA3D_COMPARISON_GREATER         5
+#define SVGA3D_COMPARISON_NOT_EQUAL       6
+#define SVGA3D_COMPARISON_GREATER_EQUAL   7
+#define SVGA3D_COMPARISON_ALWAYS          8
+#define SVGA3D_COMPARISON_MAX             9
+typedef uint8 SVGA3dComparisonFunc;
+
+#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_SOTARGETS 4
+#define SVGA3D_DX_MAX_SRVIEWS 128
+#define SVGA3D_DX_MAX_CONSTBUFFERS 16
+#define SVGA3D_DX_MAX_SAMPLERS 16
+
+/* Id limits */
+static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
+static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+
+typedef uint32 SVGA3dSurfaceId;
+typedef uint32 SVGA3dShaderResourceViewId;
+typedef uint32 SVGA3dRenderTargetViewId;
+typedef uint32 SVGA3dDepthStencilViewId;
+
+typedef uint32 SVGA3dShaderId;
+typedef uint32 SVGA3dElementLayoutId;
+typedef uint32 SVGA3dSamplerId;
+typedef uint32 SVGA3dBlendStateId;
+typedef uint32 SVGA3dDepthStencilStateId;
+typedef uint32 SVGA3dRasterizerStateId;
+typedef uint32 SVGA3dQueryId;
+typedef uint32 SVGA3dStreamOutputId;
+
+typedef union {
+   struct {
+      float r;
+      float g;
+      float b;
+      float a;
+   };
+
+   float value[4];
+} SVGA3dRGBAFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableDXContextEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineContext;   /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyContext;   /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
+
+/*
+ * Bind a DX context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindContext;   /* SVGA_3D_CMD_DX_BIND_CONTEXT */
+
+/*
+ * Readback a DX context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackContext;   /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateContext;   /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dReplyFormatData {
+   uint32 formatSupport;
+   uint32 msaa2xQualityLevels:5;
+   uint32 msaa4xQualityLevels:5;
+   uint32 msaa8xQualityLevels:5;
+   uint32 msaa16xQualityLevels:5;
+   uint32 msaa32xQualityLevels:5;
+   uint32 pad:7;
+}
+#include "vmware_pack_end.h"
+SVGA3dReplyFormatData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSingleConstantBuffer {
+   uint32 slot;
+   SVGA3dShaderType type;
+   SVGA3dSurfaceId sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSingleConstantBuffer;
+/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderResources {
+   uint32 startView;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dShaderResourceViewId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSamplers {
+   uint32 startSampler;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dSamplerId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDraw {
+   uint32 vertexCount;
+   uint32 startVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexed {
+   uint32 indexCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstanced {
+   uint32 vertexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstanced {
+   uint32 indexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawAuto {
+   uint32 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetInputLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexBuffer {
+   SVGA3dSurfaceId sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexBuffer;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetVertexBuffers {
+   uint32 startBuffer;
+   /* Followed by a variable number of SVGA3dVertexBuffer's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetIndexBuffer {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetTopology {
+   SVGA3dPrimitiveType topology;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRenderTargets {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetBlendState {
+   SVGA3dBlendStateId blendId;
+   float blendFactor[4];
+   uint32 sampleMask;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+   uint32 stencilRef;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
+
+#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
+typedef uint32 SVGA3dDXQueryFlags;
+
+/*
+ * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
+ * to track query state transitions, but are not intended to be used by the
+ * driver.
+ */
+#define SVGADX_QDSTATE_INVALID   ((uint8)-1) /* Query has no state */
+#define SVGADX_QDSTATE_MIN       0
+#define SVGADX_QDSTATE_IDLE      0   /* Query hasn't started yet */
+#define SVGADX_QDSTATE_ACTIVE    1   /* Query is actively gathering data */
+#define SVGADX_QDSTATE_PENDING   2   /* Query is waiting for results */
+#define SVGADX_QDSTATE_FINISHED  3   /* Query has completed */
+#define SVGADX_QDSTATE_MAX       4
+typedef uint8 SVGADXQueryDeviceState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dQueryTypeUint8 type;
+   uint16 pad0;
+   SVGADXQueryDeviceState state;
+   SVGA3dDXQueryFlags flags;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXQueryEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineQuery {
+   SVGA3dQueryId queryId;
+   SVGA3dQueryType type;
+   SVGA3dDXQueryFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetQueryOffset {
+   SVGA3dQueryId queryId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBeginQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXEndQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXMoveQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllQuery {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackAllQuery {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetPredication {
+   SVGA3dQueryId queryId;
+   uint32 predicateValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct MKS3dDXSOState {
+   uint32 offset;       /* Starting offset */
+   uint32 intOffset;    /* Internal offset */
+   uint32 vertexCount;  /* vertices written */
+   uint32 sizeInBytes;  /* max bytes to write */
+}
+#include "vmware_pack_end.h"
+SVGA3dDXSOState;
+
+/* Set the offset field to this value to append SO values to the buffer */
+#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSoTarget {
+   SVGA3dSurfaceId sid;
+   uint32 offset;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dSoTarget;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSOTargets {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dSOTarget's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dViewport
+{
+   float x;
+   float y;
+   float width;
+   float height;
+   float minDepth;
+   float maxDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dViewport;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetViewports {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dViewport's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
+
+#define SVGA3D_DX_MAX_VIEWPORTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetScissorRects {
+   uint32 pad0;
+   /* Followed by a variable number of SVGASignedRect's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
+
+#define SVGA3D_DX_MAX_SCISSORRECTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+   SVGA3dRGBAFloat rgba;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearDepthStencilView {
+   uint16 flags;
+   uint16 stencil;
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   float depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopyRegion {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopyRegion;
+/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopy {
+   SVGA3dSurfaceId dstSid;
+   SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferCopy {
+   SVGA3dSurfaceId dest;
+   SVGA3dSurfaceId src;
+   uint32 destX;
+   uint32 srcX;
+   uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferCopy;
+/* SVGA_3D_CMD_DX_BUFFER_COPY */
+
+typedef uint32 SVGA3dDXStretchBltMode;
+#define SVGADX_STRETCHBLT_LINEAR         (1 << 0)
+#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXStretchBlt {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceId dstSid;
+   uint32 destSubResource;
+   SVGA3dBox boxSrc;
+   SVGA3dBox boxDest;
+   SVGA3dDXStretchBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGenMips {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
+
+/*
+ * Defines a resource/DX surface.  Resources share the surfaceId namespace.
+ *
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Update a sub-resource in a guest-backed resource.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXUpdateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXUpdateSubResource;   /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
+
+/*
+ * Readback a subresource in a guest-backed resource.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackSubResource;   /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateSubResource;   /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferFromBuffer;   /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.  Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
+ * The context is implied from the command buffer header.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredTransferFromBuffer;
+/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSurfaceCopyAndReadback {
+   SVGA3dSurfaceId srcSid;
+   SVGA3dSurfaceId destSid;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSurfaceCopyAndReadback;
+/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 pad0;
+         uint32 pad1;
+      } buffer;
+      struct {
+         uint32 mostDetailedMip;
+         uint32 firstArraySlice;
+         uint32 mipLevels;
+         uint32 arraySize;
+      } tex;
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 flags;
+         uint32 pad0;
+      } bufferex;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderResourceViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dShaderResourceViewDesc desc;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSRViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dShaderResourceViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShaderResourceView;
+/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShaderResourceView;
+/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRenderTargetViewDesc {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+      } buffer;
+      struct {
+         uint32 mipSlice;
+         uint32 firstArraySlice;
+         uint32 arraySize;
+      } tex;                    /* 1d, 2d, cube */
+      struct {
+         uint32 mipSlice;
+         uint32 firstW;
+         uint32 wSize;
+      } tex3D;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderTargetViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dRenderTargetViewDesc desc;
+   uint32 pad[2];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRTViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dRenderTargetViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRenderTargetView;
+/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRenderTargetView;
+/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
+
+/*
+ */
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH   0x01
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
+#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK         0x03
+typedef uint8 SVGA3DCreateDSViewFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+   uint32 pad2;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDSViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilView;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dInputElementDesc {
+   uint32 inputSlot;
+   uint32 alignedByteOffset;
+   SVGA3dSurfaceFormat format;
+   SVGA3dInputClassification inputSlotClass;
+   uint32 instanceDataStepRate;
+   uint32 inputRegister;
+}
+#include "vmware_pack_end.h"
+SVGA3dInputElementDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * XXX: How many of these can there be?
+    */
+   uint32 elid;
+   uint32 numDescs;
+   SVGA3dInputElementDesc desc[32];
+   uint32 pad[62];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXElementLayoutEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+   /* Followed by a variable number of SVGA3dInputElementDesc's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineElementLayout;
+/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyElementLayout;
+/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
+
+
+#define SVGA3D_DX_MAX_RENDER_TARGETS 8
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXBlendStatePerRT {
+      uint8 blendEnable;
+      uint8 srcBlend;
+      uint8 destBlend;
+      uint8 blendOp;
+      uint8 srcBlendAlpha;
+      uint8 destBlendAlpha;
+      uint8 blendOpAlpha;
+      uint8 renderTargetWriteMask;
+      uint8 logicOpEnable;
+      uint8 logicOp;
+      uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXBlendStatePerRT;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+   uint32 pad1[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXBlendStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineBlendState {
+   SVGA3dBlendStateId blendId;
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyBlendState {
+   SVGA3dBlendStateId blendId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDepthStencilEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilState;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilState;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   uint8 multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+   uint32 forcedSampleCount;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRasterizerStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   uint8 multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRasterizerState;
+/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRasterizerState;
+/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+   uint32 pad2[6];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSamplerEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineSamplerState {
+   SVGA3dSamplerId samplerId;
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroySamplerState {
+   SVGA3dSamplerId samplerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSignatureEntry {
+   uint8 systemValue;
+   uint8 reg;                 /* register is a reserved word */
+   uint16 mask;
+   uint8 registerComponentType;
+   uint8 minPrecision;
+   uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignatureEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes; /* Number of bytes of shader text. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACOTableDXShaderEntry {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+   uint32 numInputSignatureEntries;
+   uint32 numOutputSignatureEntries;
+
+   uint32 numPatchConstantSignatureEntries;
+
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXShaderEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShader {
+   SVGA3dShaderId shaderId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShader {
+   uint32 cid;
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShader;   /* SVGA_3D_CMD_DX_BIND_SHADER */
+
+/*
+ * The maximum number of streamout decl's in each streamout entry.
+ */
+#define SVGA3D_MAX_STREAMOUT_DECLS 64
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dStreamOutputDeclarationEntry {
+   uint32 outputSlot;
+   uint32 registerIndex;
+   uint8  registerMask;
+   uint8  pad0;
+   uint16 pad1;
+   uint32 stream;
+}
+#include "vmware_pack_end.h"
+SVGA3dStreamOutputDeclarationEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOTableStreamOutputEntry {
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+   uint32 pad[250];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXStreamOutputEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutput {
+   SVGA3dStreamOutputId soid;
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMobFence64;  /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
+
+/*
+ * SVGA3dCmdSetCOTable --
+ *
+ * This command allows the guest to bind a mob to a context-object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCOTable {
+   uint32 cid;
+   uint32 mobid;
+   SVGACOTableType type;
+   uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackCOTable {
+   uint32 cid;
+   SVGACOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCOTableData {
+   uint32 mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCOTableData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dBufferBinding {
+   uint32 bufferId;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dConstantBufferBinding {
+   uint32 sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dConstantBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXInputAssemblyMobFormat {
+   uint32 layoutId;
+   SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+   uint32 indexBufferSid;
+   uint32 pad;
+   uint32 indexBufferOffset;
+   uint32 indexBufferFormat;
+   uint32 topology;
+}
+#include "vmware_pack_end.h"
+SVGADXInputAssemblyMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXContextMobFormat {
+   SVGADXInputAssemblyMobFormat inputAssembly;
+
+   struct {
+      uint32 blendStateId;
+      uint32 blendFactor[4];
+      uint32 sampleMask;
+      uint32 depthStencilStateId;
+      uint32 stencilRef;
+      uint32 rasterizerStateId;
+      uint32 depthStencilViewId;
+      uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
+      uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
+   } renderState;
+
+   struct {
+      uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
+      uint32 soid;
+   } streamOut;
+   uint32 pad0[11];
+
+   uint8 numViewports;
+   uint8 numScissorRects;
+   uint16 pad1[1];
+
+   uint32 pad2[3];
+
+   SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
+   uint32 pad3[32];
+
+   SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
+   uint32 pad4[64];
+
+   struct {
+      uint32 queryID;
+      uint32 value;
+   } predication;
+   uint32 pad5[2];
+
+   struct {
+      uint32 shaderId;
+      SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+      uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
+      uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
+   } shaderState[SVGA3D_NUM_SHADERTYPE];
+   uint32 pad6[26];
+
+   SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
+
+   SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
+   uint32 pad7[381];
+}
+#include "vmware_pack_end.h"
+SVGADXContextMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTempSetContext {
+   uint32 dxcid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
+
+#endif /* _SVGA3D_DX_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 0000000..a1c3687
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,99 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_limits.h --
+ *
+ *       SVGA 3d hardware limits
+ */
+
+#ifndef _SVGA3D_LIMITS_H_
+#define _SVGA3D_LIMITS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#define SVGA3D_NUM_CLIPPLANES                   6
+#define SVGA3D_MAX_RENDER_TARGETS               8
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  (SVGA3D_MAX_RENDER_TARGETS)
+#define SVGA3D_MAX_UAVIEWS                      8
+#define SVGA3D_MAX_CONTEXT_IDS                  256
+#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
+
+/*
+ * Maximum ID a shader can be assigned on a given context.
+ */
+#define SVGA3D_MAX_SHADERIDS                    5000
+/*
+ * Maximum number of shaders of a given type that can be defined
+ * (including all contexts).
+ */
+#define SVGA3D_MAX_SIMULTANEOUS_SHADERS         20000
+
+#define SVGA3D_NUM_TEXTURE_UNITS                32
+#define SVGA3D_NUM_LIGHTS                       8
+
+/*
+ * Maximum size in dwords of shader text the SVGA device will allow.
+ * Currently 8 MB.
+ */
+#define SVGA3D_MAX_SHADER_MEMORY  (8 * 1024 * 1024 / sizeof(uint32))
+
+#define SVGA3D_MAX_CLIP_PLANES    6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Number of faces in a cubemap.
+ */
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+/*
+ * Maximum number of array indexes in a GB surface (with DX enabled).
+ */
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+
+/*
+ * The maximum number of vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS   32
+
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 0000000..b44ce64
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,50 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ *       SVGA 3d hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#include "svga3d_types.h"
+#include "svga3d_limits.h"
+#include "svga3d_cmd.h"
+#include "svga3d_dx.h"
+#include "svga3d_devcaps.h"
+
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 0000000..58704f0
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1204 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
+#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ *    1. Red, bump W, luminance and depth are stored in the first channel.
+ *    2. Green, bump V and stencil are stored in the second channel.
+ *    3. Blue and bump U are stored in the third channel.
+ *    4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ *    1. For compressed formats, only the data channel is used and its size
+ *       is equal to that of a singular block in the compression scheme.
+ *    2. For buffer formats, only the data channel is used and its size is
+ *       exactly one byte in length.
+ *    3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
+	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
+						    data */
+	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
+						    data */
+	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
+						    U and V */
+	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
+						    data */
+	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
+						    data */
+	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
+						    channel */
+	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
+						    data */
+	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
+						    data */
+	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
+						    data */
+	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
+						    data */
+	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
+	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
+						    channel */
+	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
+						    data */
+	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
+						    data */
+	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
+						    data depending on the
+						    compression method used */
+	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
+						    floating point
+						    representation in
+						    all channels */
+	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
+						    data. */
+	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
+	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
+	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
+	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
+	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
+						    e.g., NV12. */
+	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
+						    Y, U, V, e.g., YV12. */
+
+	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V,
+	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_LUMINANCE,
+	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_W,
+	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V |
+	SVGA3DBLOCKDESC_W |
+	SVGA3DBLOCKDESC_Q,
+	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_IEEE_FP,
+	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
+	SVGA3DBLOCKDESC_STENCIL,
+	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
+	SVGA3DBLOCKDESC_Y,
+	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+	SVGA3DBLOCKDESC_Y |
+	SVGA3DBLOCKDESC_U_VIDEO |
+	SVGA3DBLOCKDESC_V_VIDEO,
+	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_EXP,
+	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_2PLANAR_YUV,
+	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ *    1. Block description.
+ *    2. Dimensions of a block in the surface.
+ *    3. Size of block in bytes.
+ *    4. Bit depth of the pixel data.
+ *    5. Channel bit depths and masks (if applicable).
+ */
+struct svga3d_channel_def {
+	union {
+		u8 blue;
+		u8 u;
+		u8 uv_video;
+		u8 u_video;
+	};
+	union {
+		u8 green;
+		u8 v;
+		u8 stencil;
+		u8 v_video;
+	};
+	union {
+		u8 red;
+		u8 w;
+		u8 luminance;
+		u8 y;
+		u8 depth;
+		u8 data;
+	};
+	union {
+		u8 alpha;
+		u8 q;
+		u8 exp;
+	};
+};
+
+struct svga3d_surface_desc {
+	SVGA3dSurfaceFormat format;
+	enum svga3d_block_desc block_desc;
+	surf_size_struct block_size;
+	u32 bytes_per_block;
+	u32 pitch_bytes_per_block;
+
+	u32 total_bit_depth;
+	struct svga3d_channel_def bit_depth;
+	struct svga3d_channel_def bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+   {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
+      {1, 1, 1},  0, 0,
+      0, {{0}, {0}, {0}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      15, {{5}, {5}, {5}, {0}},
+      {{0}, {5}, {10}, {0}}},
+
+   {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{4}, {4}, {4}, {4}},
+      {{0}, {4}, {8}, {12}}},
+
+   {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {1}, {15}, {0}},
+      {{0}, {15}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
+    {1  , 1, 1},  1, 1,
+      8, {{0}, {0}, {4}, {4}},
+      {{0}, {0}, {0}, {4}}},
+
+   {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {0}, {8}}},
+
+   {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {0}, {8}}},
+
+   {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {6}, {0}},
+      {{11}, {6}, {0}, {0}}},
+
+   {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  3, 3,
+      24, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{24}, {16}, {8}, {0}}},
+
+   {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  4, 4,
+      32, {{16}, {16}, {0}, {0}},
+      {{16}, {0}, {0}, {0}}},
+
+   {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {0}, {16}, {0}}},
+
+   {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {0}, {8}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
+      {2, 2, 1},  6, 2,
+      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {0}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {11}, {11}, {0}},
+      {{0}, {10}, {21}, {0}}},
+
+   {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {0}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+      {1, 1, 1},  4, 4,
+      32, {{9}, {9}, {9}, {5}},
+      {{18}, {9}, {0}, {27}}},
+
+   {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
+      {2, 2, 1},  6, 2,
+      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{24}, {16}, {8}, {0}}},
+
+   {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {0}, {16}, {0}}},
+
+   {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{16}, {16}, {0}, {0}},
+      {{16}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+	uint64_t tmp = (uint64_t) a*b;
+	return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+	if (format < ARRAY_SIZE(svga3d_surface_descs))
+		return &svga3d_surface_descs[format];
+
+	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ *      Given a base level size and the mip level, compute the size of
+ *      the mip level.
+ *
+ * Results:
+ *      See above.
+ *
+ * Side effects:
+ *      None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+	surf_size_struct size;
+
+	size.width = max_t(u32, base_level.width >> mip_level, 1);
+	size.height = max_t(u32, base_level.height >> mip_level, 1);
+	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+	return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+				 const surf_size_struct *pixel_size,
+				 surf_size_struct *block_size)
+{
+	block_size->width = DIV_ROUND_UP(pixel_size->width,
+					 desc->block_size.width);
+	block_size->height = DIV_ROUND_UP(pixel_size->height,
+					  desc->block_size.height);
+	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+					 desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+			      const surf_size_struct *size)
+{
+	u32 pitch;
+	surf_size_struct blocks;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+	pitch = blocks.width * desc->pitch_bytes_per_block;
+
+	return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ *      Return the number of bytes of buffer space required to store
+ *      one image of a surface, optionally using the specified pitch.
+ *
+ *      If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ *      This function is overflow-safe. If the result would have
+ *      overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ *      Byte count.
+ *
+ * Side effects:
+ *      None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+				    const surf_size_struct *size,
+				    u32 pitch)
+{
+	surf_size_struct image_blocks;
+	u32 slice_size, total_size;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+	if (svga3dsurface_is_planar_surface(desc)) {
+		total_size = clamped_umul32(image_blocks.width,
+					    image_blocks.height);
+		total_size = clamped_umul32(total_size, image_blocks.depth);
+		total_size = clamped_umul32(total_size, desc->bytes_per_block);
+		return total_size;
+	}
+
+	if (pitch == 0)
+		pitch = svga3dsurface_calculate_pitch(desc, size);
+
+	slice_size = clamped_umul32(image_blocks.height, pitch);
+	total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+	return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+				  surf_size_struct base_level_size,
+				  u32 num_mip_levels,
+				  u32 num_layers)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	u32 total_size = 0;
+	u32 mip;
+
+	for (mip = 0; mip < num_mip_levels; mip++) {
+		surf_size_struct size =
+			svga3dsurface_get_mip_size(base_level_size, mip);
+		total_size += svga3dsurface_get_image_buffer_size(desc,
+								  &size, 0);
+	}
+
+	return total_size * num_layers;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+			       u32 width, u32 height,
+			       u32 x, u32 y, u32 z)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+	const u32 bd = desc->block_size.depth;
+	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+	const u32 offset = (z / bd * imgstride +
+			    y / bh * rowstride +
+			    x / bw * desc->bytes_per_block);
+	return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+			       surf_size_struct baseLevelSize,
+			       u32 numMipLevels,
+			       u32 face,
+			       u32 mip)
+
+{
+	u32 offset;
+	u32 mipChainBytes;
+	u32 mipChainBytesToLevel;
+	u32 i;
+	const struct svga3d_surface_desc *desc;
+	surf_size_struct mipSize;
+	u32 bytes;
+
+	desc = svga3dsurface_get_desc(format);
+
+	mipChainBytes = 0;
+	mipChainBytesToLevel = 0;
+	for (i = 0; i < numMipLevels; i++) {
+		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+		mipChainBytes += bytes;
+		if (i < mip)
+			mipChainBytesToLevel += bytes;
+	}
+
+	offset = mipChainBytes * face + mipChainBytesToLevel;
+
+	return offset;
+}
+
+
+/**
+ * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with just the GBObjects cap-bit
+ *                                             set)
+ * @format: format to queried
+ *
+ * RETURNS:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_X8R8G8B8 ||
+		format == SVGA3D_A8R8G8B8 ||
+		format == SVGA3D_R5G6B5   ||
+		format == SVGA3D_X1R5G5B5 ||
+		format == SVGA3D_A1R5G5B5 ||
+		format == SVGA3D_P8);
+}
+
+
+/**
+ * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with DX10 enabled)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_R8G8B8A8_UNORM ||
+		format == SVGA3D_B8G8R8A8_UNORM ||
+		format == SVGA3D_B8G8R8X8_UNORM);
+}
+
+
+/**
+ * svga3dsurface_is_screen_target_format - Is the specified format usable as a
+ *                                         ScreenTarget?
+ *                                         (for some combination of caps)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	if (svga3dsurface_is_gb_screen_target_format(format)) {
+		return true;
+	}
+	return svga3dsurface_is_dx_screen_target_format(format);
+}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 0000000..27b33ba
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1633 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_types.h --
+ *
+ *       SVGA 3d hardware definitions for basic types
+ */
+
+#ifndef _SVGA3D_TYPES_H_
+#define _SVGA3D_TYPES_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * Generic Types
+ */
+
+#define SVGA3D_INVALID_ID         ((uint32)-1)
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+   uint32               srcx;
+   uint32               srcy;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyBox {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+   uint32               srcx;
+   uint32               srcy;
+   uint32               srcz;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+}
+#include "vmware_pack_end.h"
+SVGA3dRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+}
+#include "vmware_pack_end.h"
+SVGA3dBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+}
+#include "vmware_pack_end.h"
+SVGA3dPoint;
+
+/*
+ * Surface formats.
+ */
+typedef enum SVGA3dSurfaceFormat {
+   SVGA3D_FORMAT_INVALID               = 0,
+
+   SVGA3D_X8R8G8B8                     = 1,
+   SVGA3D_FORMAT_MIN                   = 1,
+
+   SVGA3D_A8R8G8B8                     = 2,
+
+   SVGA3D_R5G6B5                       = 3,
+   SVGA3D_X1R5G5B5                     = 4,
+   SVGA3D_A1R5G5B5                     = 5,
+   SVGA3D_A4R4G4B4                     = 6,
+
+   SVGA3D_Z_D32                        = 7,
+   SVGA3D_Z_D16                        = 8,
+   SVGA3D_Z_D24S8                      = 9,
+   SVGA3D_Z_D15S1                      = 10,
+
+   SVGA3D_LUMINANCE8                   = 11,
+   SVGA3D_LUMINANCE4_ALPHA4            = 12,
+   SVGA3D_LUMINANCE16                  = 13,
+   SVGA3D_LUMINANCE8_ALPHA8            = 14,
+
+   SVGA3D_DXT1                         = 15,
+   SVGA3D_DXT2                         = 16,
+   SVGA3D_DXT3                         = 17,
+   SVGA3D_DXT4                         = 18,
+   SVGA3D_DXT5                         = 19,
+
+   SVGA3D_BUMPU8V8                     = 20,
+   SVGA3D_BUMPL6V5U5                   = 21,
+   SVGA3D_BUMPX8L8V8U8                 = 22,
+   SVGA3D_BUMPL8V8U8                   = 23,
+
+   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
+   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
+
+   SVGA3D_A2R10G10B10                  = 26,
+
+   /* signed formats */
+   SVGA3D_V8U8                         = 27,
+   SVGA3D_Q8W8V8U8                     = 28,
+   SVGA3D_CxV8U8                       = 29,
+
+   /* mixed formats */
+   SVGA3D_X8L8V8U8                     = 30,
+   SVGA3D_A2W10V10U10                  = 31,
+
+   SVGA3D_ALPHA8                       = 32,
+
+   /* Single- and dual-component floating point formats */
+   SVGA3D_R_S10E5                      = 33,
+   SVGA3D_R_S23E8                      = 34,
+   SVGA3D_RG_S10E5                     = 35,
+   SVGA3D_RG_S23E8                     = 36,
+
+   SVGA3D_BUFFER                       = 37,
+
+   SVGA3D_Z_D24X8                      = 38,
+
+   SVGA3D_V16U16                       = 39,
+
+   SVGA3D_G16R16                       = 40,
+   SVGA3D_A16B16G16R16                 = 41,
+
+   /* Packed Video formats */
+   SVGA3D_UYVY                         = 42,
+   SVGA3D_YUY2                         = 43,
+
+   /* Planar video formats */
+   SVGA3D_NV12                         = 44,
+
+   /* Video format with alpha */
+   SVGA3D_AYUV                         = 45,
+
+   SVGA3D_R32G32B32A32_TYPELESS        = 46,
+   SVGA3D_R32G32B32A32_UINT            = 47,
+   SVGA3D_R32G32B32A32_SINT            = 48,
+   SVGA3D_R32G32B32_TYPELESS           = 49,
+   SVGA3D_R32G32B32_FLOAT              = 50,
+   SVGA3D_R32G32B32_UINT               = 51,
+   SVGA3D_R32G32B32_SINT               = 52,
+   SVGA3D_R16G16B16A16_TYPELESS        = 53,
+   SVGA3D_R16G16B16A16_UINT            = 54,
+   SVGA3D_R16G16B16A16_SNORM           = 55,
+   SVGA3D_R16G16B16A16_SINT            = 56,
+   SVGA3D_R32G32_TYPELESS              = 57,
+   SVGA3D_R32G32_UINT                  = 58,
+   SVGA3D_R32G32_SINT                  = 59,
+   SVGA3D_R32G8X24_TYPELESS            = 60,
+   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
+   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
+   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
+   SVGA3D_R10G10B10A2_TYPELESS         = 64,
+   SVGA3D_R10G10B10A2_UINT             = 65,
+   SVGA3D_R11G11B10_FLOAT              = 66,
+   SVGA3D_R8G8B8A8_TYPELESS            = 67,
+   SVGA3D_R8G8B8A8_UNORM               = 68,
+   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
+   SVGA3D_R8G8B8A8_UINT                = 70,
+   SVGA3D_R8G8B8A8_SINT                = 71,
+   SVGA3D_R16G16_TYPELESS              = 72,
+   SVGA3D_R16G16_UINT                  = 73,
+   SVGA3D_R16G16_SINT                  = 74,
+   SVGA3D_R32_TYPELESS                 = 75,
+   SVGA3D_D32_FLOAT                    = 76,
+   SVGA3D_R32_UINT                     = 77,
+   SVGA3D_R32_SINT                     = 78,
+   SVGA3D_R24G8_TYPELESS               = 79,
+   SVGA3D_D24_UNORM_S8_UINT            = 80,
+   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
+   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
+   SVGA3D_R8G8_TYPELESS                = 83,
+   SVGA3D_R8G8_UNORM                   = 84,
+   SVGA3D_R8G8_UINT                    = 85,
+   SVGA3D_R8G8_SINT                    = 86,
+   SVGA3D_R16_TYPELESS                 = 87,
+   SVGA3D_R16_UNORM                    = 88,
+   SVGA3D_R16_UINT                     = 89,
+   SVGA3D_R16_SNORM                    = 90,
+   SVGA3D_R16_SINT                     = 91,
+   SVGA3D_R8_TYPELESS                  = 92,
+   SVGA3D_R8_UNORM                     = 93,
+   SVGA3D_R8_UINT                      = 94,
+   SVGA3D_R8_SNORM                     = 95,
+   SVGA3D_R8_SINT                      = 96,
+   SVGA3D_P8                           = 97,
+   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
+   SVGA3D_R8G8_B8G8_UNORM              = 99,
+   SVGA3D_G8R8_G8B8_UNORM              = 100,
+   SVGA3D_BC1_TYPELESS                 = 101,
+   SVGA3D_BC1_UNORM_SRGB               = 102,
+   SVGA3D_BC2_TYPELESS                 = 103,
+   SVGA3D_BC2_UNORM_SRGB               = 104,
+   SVGA3D_BC3_TYPELESS                 = 105,
+   SVGA3D_BC3_UNORM_SRGB               = 106,
+   SVGA3D_BC4_TYPELESS                 = 107,
+   SVGA3D_ATI1                         = 108,   /* DX9-specific BC4_UNORM */
+   SVGA3D_BC4_SNORM                    = 109,
+   SVGA3D_BC5_TYPELESS                 = 110,
+   SVGA3D_ATI2                         = 111,   /* DX9-specific BC5_UNORM */
+   SVGA3D_BC5_SNORM                    = 112,
+   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
+   SVGA3D_B8G8R8A8_TYPELESS            = 114,
+   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
+   SVGA3D_B8G8R8X8_TYPELESS            = 116,
+   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
+
+   /* Advanced depth formats. */
+   SVGA3D_Z_DF16                       = 118,
+   SVGA3D_Z_DF24                       = 119,
+   SVGA3D_Z_D24S8_INT                  = 120,
+
+   /* Planar video formats. */
+   SVGA3D_YV12                         = 121,
+
+   SVGA3D_R32G32B32A32_FLOAT           = 122,
+   SVGA3D_R16G16B16A16_FLOAT           = 123,
+   SVGA3D_R16G16B16A16_UNORM           = 124,
+   SVGA3D_R32G32_FLOAT                 = 125,
+   SVGA3D_R10G10B10A2_UNORM            = 126,
+   SVGA3D_R8G8B8A8_SNORM               = 127,
+   SVGA3D_R16G16_FLOAT                 = 128,
+   SVGA3D_R16G16_UNORM                 = 129,
+   SVGA3D_R16G16_SNORM                 = 130,
+   SVGA3D_R32_FLOAT                    = 131,
+   SVGA3D_R8G8_SNORM                   = 132,
+   SVGA3D_R16_FLOAT                    = 133,
+   SVGA3D_D16_UNORM                    = 134,
+   SVGA3D_A8_UNORM                     = 135,
+   SVGA3D_BC1_UNORM                    = 136,
+   SVGA3D_BC2_UNORM                    = 137,
+   SVGA3D_BC3_UNORM                    = 138,
+   SVGA3D_B5G6R5_UNORM                 = 139,
+   SVGA3D_B5G5R5A1_UNORM               = 140,
+   SVGA3D_B8G8R8A8_UNORM               = 141,
+   SVGA3D_B8G8R8X8_UNORM               = 142,
+   SVGA3D_BC4_UNORM                    = 143,
+   SVGA3D_BC5_UNORM                    = 144,
+
+   SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef enum SVGA3dSurfaceFlags {
+   SVGA3D_SURFACE_CUBEMAP               = (1 << 0),
+
+   /*
+    * HINT flags are not enforced by the device but are useful for
+    * performance.
+    */
+   SVGA3D_SURFACE_HINT_STATIC           = (1 << 1),
+   SVGA3D_SURFACE_HINT_DYNAMIC          = (1 << 2),
+   SVGA3D_SURFACE_HINT_INDEXBUFFER      = (1 << 3),
+   SVGA3D_SURFACE_HINT_VERTEXBUFFER     = (1 << 4),
+   SVGA3D_SURFACE_HINT_TEXTURE          = (1 << 5),
+   SVGA3D_SURFACE_HINT_RENDERTARGET     = (1 << 6),
+   SVGA3D_SURFACE_HINT_DEPTHSTENCIL     = (1 << 7),
+   SVGA3D_SURFACE_HINT_WRITEONLY        = (1 << 8),
+   SVGA3D_SURFACE_MASKABLE_ANTIALIAS    = (1 << 9),
+   SVGA3D_SURFACE_AUTOGENMIPMAPS        = (1 << 10),
+   SVGA3D_SURFACE_DECODE_RENDERTARGET   = (1 << 11),
+
+   /*
+    * Is this surface using a base-level pitch for it's mob backing?
+    *
+    * This flag is not intended to be set by guest-drivers, but is instead
+    * set by the device when the surface is bound to a mob with a specified
+    * pitch.
+    */
+   SVGA3D_SURFACE_MOB_PITCH             = (1 << 12),
+
+   SVGA3D_SURFACE_INACTIVE              = (1 << 13),
+   SVGA3D_SURFACE_HINT_RT_LOCKABLE      = (1 << 14),
+   SVGA3D_SURFACE_VOLUME                = (1 << 15),
+
+   /*
+    * Required to be set on a surface to bind it to a screen target.
+    */
+   SVGA3D_SURFACE_SCREENTARGET          = (1 << 16),
+
+   /*
+    * Align images in the guest-backing mob to 16-bytes.
+    */
+   SVGA3D_SURFACE_ALIGN16               = (1 << 17),
+
+   SVGA3D_SURFACE_1D                    = (1 << 18),
+   SVGA3D_SURFACE_ARRAY                 = (1 << 19),
+
+   /*
+    * Bind flags.
+    * These are enforced for any surface defined with DefineGBSurface_v2.
+    */
+   SVGA3D_SURFACE_BIND_VERTEX_BUFFER    = (1 << 20),
+   SVGA3D_SURFACE_BIND_INDEX_BUFFER     = (1 << 21),
+   SVGA3D_SURFACE_BIND_CONSTANT_BUFFER  = (1 << 22),
+   SVGA3D_SURFACE_BIND_SHADER_RESOURCE  = (1 << 23),
+   SVGA3D_SURFACE_BIND_RENDER_TARGET    = (1 << 24),
+   SVGA3D_SURFACE_BIND_DEPTH_STENCIL    = (1 << 25),
+   SVGA3D_SURFACE_BIND_STREAM_OUTPUT    = (1 << 26),
+
+   /*
+    * A note on staging flags:
+    *
+    * The STAGING flags notes that the surface will not be used directly by the
+    * drawing pipeline, i.e. that it will not be bound to any bind point.
+    * Staging surfaces may be used by copy operations to move data in and out
+    * of other surfaces.
+    *
+    * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+    * updates indirectly, i.e. the surface will not be updated directly, but
+    * will receive copies from staging surfaces.
+    */
+   SVGA3D_SURFACE_STAGING_UPLOAD        = (1 << 27),
+   SVGA3D_SURFACE_STAGING_DOWNLOAD      = (1 << 28),
+   SVGA3D_SURFACE_HINT_INDIRECT_UPDATE  = (1 << 29),
+
+   /*
+    * Setting this flag allow this surface to be used with the
+    * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command.  It is only valid for
+    * buffer surfaces, an no bind flags are allowed to be set on surfaces
+    * with this flag.
+    */
+   SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  = (1 << 30),
+
+   /*
+    * Marker for the last defined bit.
+    */
+   SVGA3D_SURFACE_FLAG_MAX              = (1 << 31),
+} SVGA3dSurfaceFlags;
+
+#define SVGA3D_SURFACE_HB_DISALLOWED_MASK        \
+        (  SVGA3D_SURFACE_MOB_PITCH    |         \
+           SVGA3D_SURFACE_SCREENTARGET |         \
+           SVGA3D_SURFACE_ALIGN16 |              \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |   \
+           SVGA3D_SURFACE_STAGING_UPLOAD |       \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |     \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER   \
+        )
+
+#define SVGA3D_SURFACE_2D_DISALLOWED_MASK           \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_ARRAY |                   \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+        )
+
+#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_INACTIVE |                \
+           SVGA3D_SURFACE_STAGING_UPLOAD |          \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |        \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE |    \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+        )
+
+#define SVGA3D_SURFACE_DX_ONLY_MASK             \
+        (  SVGA3D_SURFACE_BIND_STREAM_OUTPUT |  \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  \
+
+#define SVGA3D_SURFACE_STAGING_MASK             \
+        (  SVGA3D_SURFACE_STAGING_UPLOAD |      \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD      \
+        )
+
+#define SVGA3D_SURFACE_BIND_MASK                  \
+        (  SVGA3D_SURFACE_BIND_VERTEX_BUFFER   |  \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER    |  \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |  \
+           SVGA3D_SURFACE_BIND_SHADER_RESOURCE |  \
+           SVGA3D_SURFACE_BIND_RENDER_TARGET   |  \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL   |  \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT      \
+        )
+
+typedef enum {
+   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
+   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
+   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
+   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
+   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
+   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
+   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip).  This flag
+ * should not to be set on alpha formats.
+ */
+   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target
+ * (meaning that the pixel pipe will DE-linearize data on output to format)
+ */
+   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate
+ * wrap modes, nor mipmapping.
+ */
+   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
+} SVGA3dFormatOp;
+
+#define SVGA3D_FORMAT_POSITIVE                             \
+   (SVGA3DFORMAT_OP_TEXTURE                              | \
+    SVGA3DFORMAT_OP_VOLUMETEXTURE                        | \
+    SVGA3DFORMAT_OP_CUBETEXTURE                          | \
+    SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET               | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET             | \
+    SVGA3DFORMAT_OP_ZSTENCIL                             | \
+    SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH  | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
+    SVGA3DFORMAT_OP_DISPLAYMODE                          | \
+    SVGA3DFORMAT_OP_3DACCELERATION                       | \
+    SVGA3DFORMAT_OP_PIXELSIZE                            | \
+    SVGA3DFORMAT_OP_CONVERT_TO_ARGB                      | \
+    SVGA3DFORMAT_OP_OFFSCREENPLAIN                       | \
+    SVGA3DFORMAT_OP_SRGBREAD                             | \
+    SVGA3DFORMAT_OP_BUMPMAP                              | \
+    SVGA3DFORMAT_OP_DMAP                                 | \
+    SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                   | \
+    SVGA3DFORMAT_OP_SRGBWRITE                            | \
+    SVGA3DFORMAT_OP_AUTOGENMIPMAP                        | \
+    SVGA3DFORMAT_OP_VERTEXTEXTURE)
+
+#define SVGA3D_FORMAT_NEGATIVE               \
+   (SVGA3DFORMAT_OP_NOFILTER               | \
+    SVGA3DFORMAT_OP_NOALPHABLEND           | \
+    SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*
+ * Entries must be located at the same position.
+ */
+typedef union {
+   uint32 value;
+   struct {
+      uint32 texture : 1;
+      uint32 volumeTexture : 1;
+      uint32 cubeTexture : 1;
+      uint32 offscreenRenderTarget : 1;
+      uint32 sameFormatRenderTarget : 1;
+      uint32 unknown1 : 1;
+      uint32 zStencil : 1;
+      uint32 zStencilArbitraryDepth : 1;
+      uint32 sameFormatUpToAlpha : 1;
+      uint32 unknown2 : 1;
+      uint32 displayMode : 1;
+      uint32 acceleration3d : 1;
+      uint32 pixelSize : 1;
+      uint32 convertToARGB : 1;
+      uint32 offscreenPlain : 1;
+      uint32 sRGBRead : 1;
+      uint32 bumpMap : 1;
+      uint32 dmap : 1;
+      uint32 noFilter : 1;
+      uint32 memberOfGroupARGB : 1;
+      uint32 sRGBWrite : 1;
+      uint32 noAlphaBlend : 1;
+      uint32 autoGenMipMap : 1;
+      uint32 vertexTexture : 1;
+      uint32 noTexCoordWrapNorMip : 1;
+   };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_RS_INVALID                   = 0,
+   SVGA3D_RS_MIN                       = 1,
+   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
+   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
+   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
+   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
+   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
+   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
+   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
+   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
+   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
+   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
+   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
+   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
+   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
+   SVGA3D_RS_FOGSTART                  = 16,    /* float */
+   SVGA3D_RS_FOGEND                    = 17,    /* float */
+   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
+   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
+   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
+   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
+   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
+   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
+   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
+   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
+   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
+   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
+   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
+   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
+   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
+   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
+   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
+   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
+   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
+   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
+   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
+   SVGA3D_RS_ZBIAS                     = 45,    /* float */
+   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
+   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
+   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
+   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
+   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
+   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
+   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
+   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
+   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
+   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
+   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
+
+
+   /*
+    * Output Gamma Level
+    *
+    * Output gamma effects the gamma curve of colors that are output from the
+    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
+    * value is <= 0.0, gamma correction is ignored and linear color space is
+    * used.
+    */
+
+   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
+   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
+   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
+   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
+   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
+   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
+   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
+   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
+   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
+   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
+   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
+   SVGA3D_RS_LINEWIDTH                 = 98,    /* float */
+   SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
+   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
+   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
+   SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
+   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
+   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
+   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
+   SVGA3D_VERTEXMATERIAL_MAX      = 3,
+} SVGA3dVertexMaterial;
+
+typedef enum {
+   SVGA3D_FILLMODE_INVALID = 0,
+   SVGA3D_FILLMODE_MIN     = 1,
+   SVGA3D_FILLMODE_POINT   = 1,
+   SVGA3D_FILLMODE_LINE    = 2,
+   SVGA3D_FILLMODE_FILL    = 3,
+   SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16   mode;       /* SVGA3dFillModeType */
+      uint16   face;       /* SVGA3dFace */
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dFillMode;
+
+typedef enum {
+   SVGA3D_SHADEMODE_INVALID = 0,
+   SVGA3D_SHADEMODE_FLAT    = 1,
+   SVGA3D_SHADEMODE_SMOOTH  = 2,
+   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
+   SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16 repeat;
+      uint16 pattern;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dLinePattern;
+
+typedef enum {
+   SVGA3D_BLENDOP_INVALID             = 0,
+   SVGA3D_BLENDOP_MIN                 = 1,
+   SVGA3D_BLENDOP_ZERO                = 1,
+   SVGA3D_BLENDOP_ONE                 = 2,
+   SVGA3D_BLENDOP_SRCCOLOR            = 3,
+   SVGA3D_BLENDOP_INVSRCCOLOR         = 4,
+   SVGA3D_BLENDOP_SRCALPHA            = 5,
+   SVGA3D_BLENDOP_INVSRCALPHA         = 6,
+   SVGA3D_BLENDOP_DESTALPHA           = 7,
+   SVGA3D_BLENDOP_INVDESTALPHA        = 8,
+   SVGA3D_BLENDOP_DESTCOLOR           = 9,
+   SVGA3D_BLENDOP_INVDESTCOLOR        = 10,
+   SVGA3D_BLENDOP_SRCALPHASAT         = 11,
+   SVGA3D_BLENDOP_BLENDFACTOR         = 12,
+   SVGA3D_BLENDOP_INVBLENDFACTOR      = 13,
+   SVGA3D_BLENDOP_SRC1COLOR           = 14,
+   SVGA3D_BLENDOP_INVSRC1COLOR        = 15,
+   SVGA3D_BLENDOP_SRC1ALPHA           = 16,
+   SVGA3D_BLENDOP_INVSRC1ALPHA        = 17,
+   SVGA3D_BLENDOP_BLENDFACTORALPHA    = 18,
+   SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
+   SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+   SVGA3D_BLENDEQ_INVALID            = 0,
+   SVGA3D_BLENDEQ_MIN                = 1,
+   SVGA3D_BLENDEQ_ADD                = 1,
+   SVGA3D_BLENDEQ_SUBTRACT           = 2,
+   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
+   SVGA3D_BLENDEQ_MINIMUM            = 4,
+   SVGA3D_BLENDEQ_MAXIMUM            = 5,
+   SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+   SVGA3D_DX11_LOGICOP_MIN           = 0,
+   SVGA3D_DX11_LOGICOP_CLEAR         = 0,
+   SVGA3D_DX11_LOGICOP_SET           = 1,
+   SVGA3D_DX11_LOGICOP_COPY          = 2,
+   SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
+   SVGA3D_DX11_LOGICOP_NOOP          = 4,
+   SVGA3D_DX11_LOGICOP_INVERT        = 5,
+   SVGA3D_DX11_LOGICOP_AND           = 6,
+   SVGA3D_DX11_LOGICOP_NAND          = 7,
+   SVGA3D_DX11_LOGICOP_OR            = 8,
+   SVGA3D_DX11_LOGICOP_NOR           = 9,
+   SVGA3D_DX11_LOGICOP_XOR           = 10,
+   SVGA3D_DX11_LOGICOP_EQUIV         = 11,
+   SVGA3D_DX11_LOGICOP_AND_REVERSE   = 12,
+   SVGA3D_DX11_LOGICOP_AND_INVERTED  = 13,
+   SVGA3D_DX11_LOGICOP_OR_REVERSE    = 14,
+   SVGA3D_DX11_LOGICOP_OR_INVERTED   = 15,
+   SVGA3D_DX11_LOGICOP_MAX
+} SVGA3dDX11LogicOp;
+
+typedef enum {
+   SVGA3D_FRONTWINDING_INVALID = 0,
+   SVGA3D_FRONTWINDING_CW      = 1,
+   SVGA3D_FRONTWINDING_CCW     = 2,
+   SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+   SVGA3D_FACE_INVALID  = 0,
+   SVGA3D_FACE_NONE     = 1,
+   SVGA3D_FACE_MIN      = 1,
+   SVGA3D_FACE_FRONT    = 2,
+   SVGA3D_FACE_BACK     = 3,
+   SVGA3D_FACE_FRONT_BACK = 4,
+   SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+   SVGA3D_CMP_INVALID              = 0,
+   SVGA3D_CMP_NEVER                = 1,
+   SVGA3D_CMP_LESS                 = 2,
+   SVGA3D_CMP_EQUAL                = 3,
+   SVGA3D_CMP_LESSEQUAL            = 4,
+   SVGA3D_CMP_GREATER              = 5,
+   SVGA3D_CMP_NOTEQUAL             = 6,
+   SVGA3D_CMP_GREATEREQUAL         = 7,
+   SVGA3D_CMP_ALWAYS               = 8,
+   SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+   SVGA3D_FOGFUNC_INVALID          = 0,
+   SVGA3D_FOGFUNC_EXP              = 1,
+   SVGA3D_FOGFUNC_EXP2             = 2,
+   SVGA3D_FOGFUNC_LINEAR           = 3,
+   SVGA3D_FOGFUNC_PER_VERTEX       = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+   SVGA3D_FOGTYPE_INVALID          = 0,
+   SVGA3D_FOGTYPE_VERTEX           = 1,
+   SVGA3D_FOGTYPE_PIXEL            = 2,
+   SVGA3D_FOGTYPE_MAX              = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+   SVGA3D_FOGBASE_INVALID          = 0,
+   SVGA3D_FOGBASE_DEPTHBASED       = 1,
+   SVGA3D_FOGBASE_RANGEBASED       = 2,
+   SVGA3D_FOGBASE_MAX              = 3
+} SVGA3dFogBase;
+
+typedef enum {
+   SVGA3D_STENCILOP_INVALID        = 0,
+   SVGA3D_STENCILOP_MIN            = 1,
+   SVGA3D_STENCILOP_KEEP           = 1,
+   SVGA3D_STENCILOP_ZERO           = 2,
+   SVGA3D_STENCILOP_REPLACE        = 3,
+   SVGA3D_STENCILOP_INCRSAT        = 4,
+   SVGA3D_STENCILOP_DECRSAT        = 5,
+   SVGA3D_STENCILOP_INVERT         = 6,
+   SVGA3D_STENCILOP_INCR           = 7,
+   SVGA3D_STENCILOP_DECR           = 8,
+   SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+   SVGA3D_CLIPPLANE_0              = (1 << 0),
+   SVGA3D_CLIPPLANE_1              = (1 << 1),
+   SVGA3D_CLIPPLANE_2              = (1 << 2),
+   SVGA3D_CLIPPLANE_3              = (1 << 3),
+   SVGA3D_CLIPPLANE_4              = (1 << 4),
+   SVGA3D_CLIPPLANE_5              = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+   SVGA3D_CLEAR_COLOR              = 0x1,
+   SVGA3D_CLEAR_DEPTH              = 0x2,
+   SVGA3D_CLEAR_STENCIL            = 0x4,
+
+   /*
+    * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
+    * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
+    * bit will be ignored.
+    */
+   SVGA3D_CLEAR_COLORFILL          = 0x8
+} SVGA3dClearFlag;
+
+typedef enum {
+   SVGA3D_RT_DEPTH                 = 0,
+   SVGA3D_RT_MIN                   = 0,
+   SVGA3D_RT_STENCIL               = 1,
+   SVGA3D_RT_COLOR0                = 2,
+   SVGA3D_RT_COLOR1                = 3,
+   SVGA3D_RT_COLOR2                = 4,
+   SVGA3D_RT_COLOR3                = 5,
+   SVGA3D_RT_COLOR4                = 6,
+   SVGA3D_RT_COLOR5                = 7,
+   SVGA3D_RT_COLOR6                = 8,
+   SVGA3D_RT_COLOR7                = 9,
+   SVGA3D_RT_MAX,
+   SVGA3D_RT_INVALID               = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint32  red   : 1;
+      uint32  green : 1;
+      uint32  blue  : 1;
+      uint32  alpha : 1;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dColorMask;
+
+typedef enum {
+   SVGA3D_VBLEND_DISABLE            = 0,
+   SVGA3D_VBLEND_1WEIGHT            = 1,
+   SVGA3D_VBLEND_2WEIGHT            = 2,
+   SVGA3D_VBLEND_3WEIGHT            = 3,
+   SVGA3D_VBLEND_MAX                = 4,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+   SVGA3D_WRAPCOORD_0   = 1 << 0,
+   SVGA3D_WRAPCOORD_1   = 1 << 1,
+   SVGA3D_WRAPCOORD_2   = 1 << 2,
+   SVGA3D_WRAPCOORD_3   = 1 << 3,
+   SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_TS_INVALID                    = 0,
+   SVGA3D_TS_MIN                        = 1,
+   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
+   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
+   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
+   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
+   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
+   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
+   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
+   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
+   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
+   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
+   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
+   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
+   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
+
+
+   /*
+    * Sampler Gamma Level
+    *
+    * Sampler gamma effects the color of samples taken from the sampler.  A
+    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
+    * gamma value is ignored and a linear space is used.
+    */
+
+   SVGA3D_TS_GAMMA                      = 25,   /* float */
+   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
+   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
+   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_PREGB_MAX                  = 30,   /* Max value before GBObjects */
+   SVGA3D_TS_CONSTANT                   = 30,   /* SVGA3dColor */
+   SVGA3D_TS_COLOR_KEY_ENABLE           = 31,   /* SVGA3dBool */
+   SVGA3D_TS_COLOR_KEY                  = 32,   /* SVGA3dColor */
+   SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+   SVGA3D_TC_INVALID                   = 0,
+   SVGA3D_TC_DISABLE                   = 1,
+   SVGA3D_TC_SELECTARG1                = 2,
+   SVGA3D_TC_SELECTARG2                = 3,
+   SVGA3D_TC_MODULATE                  = 4,
+   SVGA3D_TC_ADD                       = 5,
+   SVGA3D_TC_ADDSIGNED                 = 6,
+   SVGA3D_TC_SUBTRACT                  = 7,
+   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
+   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
+   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
+   SVGA3D_TC_BLENDFACTORALPHA          = 11,
+   SVGA3D_TC_MODULATE2X                = 12,
+   SVGA3D_TC_MODULATE4X                = 13,
+   SVGA3D_TC_DSDT                      = 14,
+   SVGA3D_TC_DOTPRODUCT3               = 15,
+   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
+   SVGA3D_TC_ADDSIGNED2X               = 17,
+   SVGA3D_TC_ADDSMOOTH                 = 18,
+   SVGA3D_TC_PREMODULATE               = 19,
+   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
+   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
+   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
+   SVGA3D_TC_MULTIPLYADD               = 25,
+   SVGA3D_TC_LERP                      = 26,
+   SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+   SVGA3D_TEX_ADDRESS_INVALID    = 0,
+   SVGA3D_TEX_ADDRESS_MIN        = 1,
+   SVGA3D_TEX_ADDRESS_WRAP       = 1,
+   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
+   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
+   SVGA3D_TEX_ADDRESS_BORDER     = 4,
+   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+   SVGA3D_TEX_ADDRESS_EDGE       = 6,
+   SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+   SVGA3D_TEX_FILTER_NONE           = 0,
+   SVGA3D_TEX_FILTER_MIN            = 0,
+   SVGA3D_TEX_FILTER_NEAREST        = 1,
+   SVGA3D_TEX_FILTER_LINEAR         = 2,
+   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
+   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+   SVGA3D_TEX_TRANSFORM_OFF    = 0,
+   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
+   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
+   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
+   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
+   SVGA3D_TEX_PROJECTED        = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+   SVGA3D_TEXCOORD_GEN_OFF              = 0,
+   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
+   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
+   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
+   SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+   SVGA3D_TA_INVALID    = 0,
+   SVGA3D_TA_TFACTOR    = 1,
+   SVGA3D_TA_PREVIOUS   = 2,
+   SVGA3D_TA_DIFFUSE    = 3,
+   SVGA3D_TA_TEXTURE    = 4,
+   SVGA3D_TA_SPECULAR   = 5,
+   SVGA3D_TA_CONSTANT   = 6,
+   SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+   SVGA3D_TM_NONE       = 0,
+   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
+   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+   SVGA3D_DECLUSAGE_POSITION     = 0,
+   SVGA3D_DECLUSAGE_BLENDWEIGHT,
+   SVGA3D_DECLUSAGE_BLENDINDICES,
+   SVGA3D_DECLUSAGE_NORMAL,
+   SVGA3D_DECLUSAGE_PSIZE,
+   SVGA3D_DECLUSAGE_TEXCOORD,
+   SVGA3D_DECLUSAGE_TANGENT,
+   SVGA3D_DECLUSAGE_BINORMAL,
+   SVGA3D_DECLUSAGE_TESSFACTOR,
+   SVGA3D_DECLUSAGE_POSITIONT,
+   SVGA3D_DECLUSAGE_COLOR,
+   SVGA3D_DECLUSAGE_FOG,
+   SVGA3D_DECLUSAGE_DEPTH,
+   SVGA3D_DECLUSAGE_SAMPLE,
+   SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+   SVGA3D_DECLMETHOD_DEFAULT     = 0,
+   SVGA3D_DECLMETHOD_PARTIALU,
+   SVGA3D_DECLMETHOD_PARTIALV,
+   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
+   SVGA3D_DECLMETHOD_UV,
+   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
+   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
+                                       /* map */
+} SVGA3dDeclMethod;
+
+typedef enum {
+   SVGA3D_DECLTYPE_FLOAT1        =  0,
+   SVGA3D_DECLTYPE_FLOAT2        =  1,
+   SVGA3D_DECLTYPE_FLOAT3        =  2,
+   SVGA3D_DECLTYPE_FLOAT4        =  3,
+   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
+   SVGA3D_DECLTYPE_UBYTE4        =  5,
+   SVGA3D_DECLTYPE_SHORT2        =  6,
+   SVGA3D_DECLTYPE_SHORT4        =  7,
+   SVGA3D_DECLTYPE_UBYTE4N       =  8,
+   SVGA3D_DECLTYPE_SHORT2N       =  9,
+   SVGA3D_DECLTYPE_SHORT4N       = 10,
+   SVGA3D_DECLTYPE_USHORT2N      = 11,
+   SVGA3D_DECLTYPE_USHORT4N      = 12,
+   SVGA3D_DECLTYPE_UDEC3         = 13,
+   SVGA3D_DECLTYPE_DEC3N         = 14,
+   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
+   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
+   SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+   struct {
+      /*
+       * For index data, this number represents the number of instances to draw.
+       * For instance data, this number represents the number of
+       * instances/vertex in this stream
+       */
+      uint32 count : 30;
+
+      /*
+       * This is 1 if this is supposed to be the data that is repeated for
+       * every instance.
+       */
+      uint32 indexedData : 1;
+
+      /*
+       * This is 1 if this is supposed to be the per-instance data.
+       */
+      uint32 instanceData : 1;
+   };
+
+   uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+   /*
+    * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
+    *
+    * List MIN second so debuggers will think INVALID is
+    * the correct name.
+    */
+   SVGA3D_PRIMITIVE_INVALID                     = 0,
+   SVGA3D_PRIMITIVE_MIN                         = 0,
+   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
+   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
+   SVGA3D_PRIMITIVE_LINELIST                    = 3,
+   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
+   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
+   SVGA3D_PRIMITIVE_LINELIST_ADJ                = 7,
+   SVGA3D_PRIMITIVE_PREDX_MAX                   = 7,
+   SVGA3D_PRIMITIVE_LINESTRIP_ADJ               = 8,
+   SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ            = 9,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ           = 10,
+   SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+   SVGA3D_COORDINATE_INVALID                   = 0,
+   SVGA3D_COORDINATE_LEFTHANDED                = 1,
+   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
+   SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+   SVGA3D_TRANSFORM_INVALID                     = 0,
+   SVGA3D_TRANSFORM_WORLD                       = 1,
+   SVGA3D_TRANSFORM_MIN                         = 1,
+   SVGA3D_TRANSFORM_VIEW                        = 2,
+   SVGA3D_TRANSFORM_PROJECTION                  = 3,
+   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
+   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
+   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
+   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
+   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
+   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
+   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
+   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
+   SVGA3D_TRANSFORM_WORLD1                      = 12,
+   SVGA3D_TRANSFORM_WORLD2                      = 13,
+   SVGA3D_TRANSFORM_WORLD3                      = 14,
+   SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+   SVGA3D_LIGHTTYPE_INVALID                     = 0,
+   SVGA3D_LIGHTTYPE_MIN                         = 1,
+   SVGA3D_LIGHTTYPE_POINT                       = 1,
+   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
+   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
+   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
+   SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+   SVGA3D_CUBEFACE_POSX                         = 0,
+   SVGA3D_CUBEFACE_NEGX                         = 1,
+   SVGA3D_CUBEFACE_POSY                         = 2,
+   SVGA3D_CUBEFACE_NEGY                         = 3,
+   SVGA3D_CUBEFACE_POSZ                         = 4,
+   SVGA3D_CUBEFACE_NEGZ                         = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+   SVGA3D_SHADERTYPE_INVALID                    = 0,
+   SVGA3D_SHADERTYPE_MIN                        = 1,
+   SVGA3D_SHADERTYPE_VS                         = 1,
+   SVGA3D_SHADERTYPE_PS                         = 2,
+   SVGA3D_SHADERTYPE_PREDX_MAX                  = 3,
+   SVGA3D_SHADERTYPE_GS                         = 3,
+   SVGA3D_SHADERTYPE_DX10_MAX                   = 4,
+   SVGA3D_SHADERTYPE_HS                         = 4,
+   SVGA3D_SHADERTYPE_DS                         = 5,
+   SVGA3D_SHADERTYPE_CS                         = 6,
+   SVGA3D_SHADERTYPE_MAX                        = 7
+} SVGA3dShaderType;
+
+#define SVGA3D_NUM_SHADERTYPE_PREDX \
+   (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE_DX10 \
+   (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE \
+   (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
+typedef enum {
+   SVGA3D_CONST_TYPE_MIN                        = 0,
+   SVGA3D_CONST_TYPE_FLOAT                      = 0,
+   SVGA3D_CONST_TYPE_INT                        = 1,
+   SVGA3D_CONST_TYPE_BOOL                       = 2,
+   SVGA3D_CONST_TYPE_MAX                        = 3,
+} SVGA3dShaderConstType;
+
+/*
+ * Register limits for shader consts.
+ */
+#define SVGA3D_CONSTREG_MAX            256
+#define SVGA3D_CONSTINTREG_MAX         16
+#define SVGA3D_CONSTBOOLREG_MAX        16
+
+typedef enum {
+   SVGA3D_STRETCH_BLT_POINT                     = 0,
+   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
+   SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+   SVGA3D_QUERYTYPE_INVALID                     = ((uint8)-1),
+   SVGA3D_QUERYTYPE_MIN                         = 0,
+   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
+   SVGA3D_QUERYTYPE_TIMESTAMP                   = 1,
+   SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT           = 2,
+   SVGA3D_QUERYTYPE_PIPELINESTATS               = 3,
+   SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE          = 4,
+   SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS           = 5,
+   SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE     = 6,
+   SVGA3D_QUERYTYPE_OCCLUSION64                 = 7,
+   SVGA3D_QUERYTYPE_EVENT                       = 8,
+   SVGA3D_QUERYTYPE_DX10_MAX                    = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM0             = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM1             = 10,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM2             = 11,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM3             = 12,
+   SVGA3D_QUERYTYPE_SOP_STREAM0                 = 13,
+   SVGA3D_QUERYTYPE_SOP_STREAM1                 = 14,
+   SVGA3D_QUERYTYPE_SOP_STREAM2                 = 15,
+   SVGA3D_QUERYTYPE_SOP_STREAM3                 = 16,
+   SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef uint8 SVGA3dQueryTypeUint8;
+
+#define SVGA3D_NUM_QUERYTYPE  (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
+
+/*
+ * This is the maximum number of queries per context that can be active
+ * simultaneously between a beginQuery and endQuery.
+ */
+#define SVGA3D_MAX_QUERY 64
+
+/*
+ * Query result buffer formats
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 passed;
+}
+#include "vmware_pack_end.h"
+SVGADXEventQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 timestamp;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 realFrequency;
+   uint32 disjoint;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampDisjointQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 inputAssemblyVertices;
+   uint64 inputAssemblyPrimitives;
+   uint64 vertexShaderInvocations;
+   uint64 geometryShaderInvocations;
+   uint64 geometryShaderPrimitives;
+   uint64 clipperInvocations;
+   uint64 clipperPrimitives;
+   uint64 pixelShaderInvocations;
+   uint64 hullShaderInvocations;
+   uint64 domainShaderInvocations;
+   uint64 computeShaderInvocations;
+}
+#include "vmware_pack_end.h"
+SVGADXPipelineStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 anySamplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 numPrimitivesWritten;
+   uint64 numPrimitivesRequired;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 overflowed;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusion64QueryResult;
+
+/*
+ * SVGADXQueryResultUnion is not intended for use in the protocol, but is
+ * very helpful when working with queries generically.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union SVGADXQueryResultUnion {
+   SVGADXOcclusionQueryResult occ;
+   SVGADXEventQueryResult event;
+   SVGADXTimestampQueryResult ts;
+   SVGADXTimestampDisjointQueryResult tsDisjoint;
+   SVGADXPipelineStatisticsQueryResult pipelineStats;
+   SVGADXOcclusionPredicateQueryResult occPred;
+   SVGADXStreamOutStatisticsQueryResult soStats;
+   SVGADXStreamOutPredicateQueryResult soPred;
+   SVGADXOcclusion64QueryResult occ64;
+}
+#include "vmware_pack_end.h"
+SVGADXQueryResultUnion;
+
+
+typedef enum {
+   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Query is not finished yet */
+   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully */
+   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully */
+   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (guest only) */
+} SVGA3dQueryState;
+
+typedef enum {
+   SVGA3D_WRITE_HOST_VRAM        = 1,
+   SVGA3D_READ_HOST_VRAM         = 2,
+} SVGA3dTransferType;
+
+typedef enum {
+   SVGA3D_LOGICOP_INVALID   = 0,
+   SVGA3D_LOGICOP_MIN       = 1,
+   SVGA3D_LOGICOP_COPY      = 1,
+   SVGA3D_LOGICOP_NOT       = 2,
+   SVGA3D_LOGICOP_AND       = 3,
+   SVGA3D_LOGICOP_OR        = 4,
+   SVGA3D_LOGICOP_XOR       = 5,
+   SVGA3D_LOGICOP_NXOR      = 6,
+   SVGA3D_LOGICOP_ROP3MIN   = 30,   /* 7-29 are reserved for future logic ops. */
+   SVGA3D_LOGICOP_ROP3MAX   = (SVGA3D_LOGICOP_ROP3MIN + 255),
+   SVGA3D_LOGICOP_MAX       = (SVGA3D_LOGICOP_ROP3MAX + 1),
+} SVGA3dLogicOp;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+	 uint16  function;       /* SVGA3dFogFunction */
+	 uint8   type;           /* SVGA3dFogType */
+	 uint8   base;           /* SVGA3dFogBase */
+      };
+      uint32     uintValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSurfaceImageId {
+   uint32               sid;
+   uint32               face;
+   uint32               mipmap;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceImageId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               width;
+   uint32               height;
+   uint32               depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dSize;
+
+/*
+ * Guest-backed objects definitions.
+ */
+typedef enum {
+   SVGA_OTABLE_MOB             = 0,
+   SVGA_OTABLE_MIN             = 0,
+   SVGA_OTABLE_SURFACE         = 1,
+   SVGA_OTABLE_CONTEXT         = 2,
+   SVGA_OTABLE_SHADER          = 3,
+   SVGA_OTABLE_SCREENTARGET    = 4,
+
+   SVGA_OTABLE_DX9_MAX         = 5,
+
+   SVGA_OTABLE_DXCONTEXT       = 5,
+   SVGA_OTABLE_MAX             = 6
+} SVGAOTableType;
+
+/*
+ * Deprecated.
+ */
+#define SVGA_OTABLE_COUNT 4
+
+typedef enum {
+   SVGA_COTABLE_MIN             = 0,
+   SVGA_COTABLE_RTVIEW          = 0,
+   SVGA_COTABLE_DSVIEW          = 1,
+   SVGA_COTABLE_SRVIEW          = 2,
+   SVGA_COTABLE_ELEMENTLAYOUT   = 3,
+   SVGA_COTABLE_BLENDSTATE      = 4,
+   SVGA_COTABLE_DEPTHSTENCIL    = 5,
+   SVGA_COTABLE_RASTERIZERSTATE = 6,
+   SVGA_COTABLE_SAMPLER         = 7,
+   SVGA_COTABLE_STREAMOUTPUT    = 8,
+   SVGA_COTABLE_DXQUERY         = 9,
+   SVGA_COTABLE_DXSHADER        = 10,
+   SVGA_COTABLE_DX10_MAX        = 11,
+   SVGA_COTABLE_UAVIEW          = 11,
+   SVGA_COTABLE_MAX
+} SVGACOTableType;
+
+/*
+ * The largest size (number of entries) allowed in a COTable.
+ */
+#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
+
+typedef enum SVGAMobFormat {
+   SVGA3D_MOBFMT_INVALID     = SVGA3D_INVALID_ID,
+   SVGA3D_MOBFMT_PTDEPTH_0   = 0,
+   SVGA3D_MOBFMT_MIN         = 0,
+   SVGA3D_MOBFMT_PTDEPTH_1   = 1,
+   SVGA3D_MOBFMT_PTDEPTH_2   = 2,
+   SVGA3D_MOBFMT_RANGE       = 3,
+   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+   SVGA3D_MOBFMT_PREDX_MAX   = 7,
+   SVGA3D_MOBFMT_EMPTY       = 7,
+   SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+#define SVGA3D_MOB_EMPTY_BASE 1
+
+#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
new file mode 100644
index 0000000..884b1d1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -0,0 +1,89 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_escape.h --
+ *
+ *    Definitions for our own (vendor-specific) SVGA Escape commands.
+ */
+
+#ifndef _SVGA_ESCAPE_H_
+#define _SVGA_ESCAPE_H_
+
+
+/*
+ * Namespace IDs for the escape command
+ */
+
+#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
+#define SVGA_ESCAPE_NSID_DEVEL  0xFFFFFFFF
+
+
+/*
+ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
+ * the first DWORD of escape data (after the nsID and size). As a
+ * guideline we're using the high word and low word as a major and
+ * minor command number, respectively.
+ *
+ * Major command number allocation:
+ *
+ *   0000: Reserved
+ *   0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
+ *   0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
+ *   0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
+ */
+
+#define SVGA_ESCAPE_VMWARE_MAJOR_MASK  0xFFFF0000
+
+
+/*
+ * SVGA Hint commands.
+ *
+ * These escapes let the SVGA driver provide optional information to
+ * he host about the state of the guest or guest applications. The
+ * host can use these hints to make user interface or performance
+ * decisions.
+ *
+ * Notes:
+ *
+ *   - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
+ *     that use the SVGA Screen Object extension. Instead of sending
+ *     this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
+ *     Screen Object.
+ */
+
+#define SVGA_ESCAPE_VMWARE_HINT               0x00030000
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  /* Deprecated */
+
+typedef
+struct {
+   uint32 command;
+   uint32 fullscreen;
+   struct {
+      int32 x, y;
+   } monitorPosition;
+} SVGAEscapeHintFullscreen;
+
+#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
new file mode 100644
index 0000000..faf6d9b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -0,0 +1,199 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_overlay.h --
+ *
+ *    Definitions for video-overlay support.
+ */
+
+#ifndef _SVGA_OVERLAY_H_
+#define _SVGA_OVERLAY_H_
+
+#include "svga_reg.h"
+
+/*
+ * Video formats we support
+ */
+
+#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
+#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
+#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
+
+typedef enum {
+   SVGA_OVERLAY_FORMAT_INVALID = 0,
+   SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
+   SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
+   SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
+} SVGAOverlayFormat;
+
+#define SVGA_VIDEO_COLORKEY_MASK             0x00ffffff
+
+#define SVGA_ESCAPE_VMWARE_VIDEO             0x00020000
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS    0x00020001
+        /* FIFO escape layout:
+         * Type, Stream Id, (Register Id, Value) pairs */
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH       0x00020002
+        /* FIFO escape layout:
+         * Type, Stream Id */
+
+typedef
+struct SVGAEscapeVideoSetRegs {
+   struct {
+      uint32 cmdType;
+      uint32 streamId;
+   } header;
+
+   /* May include zero or more items. */
+   struct {
+      uint32 registerId;
+      uint32 value;
+   } items[1];
+} SVGAEscapeVideoSetRegs;
+
+typedef
+struct SVGAEscapeVideoFlush {
+   uint32 cmdType;
+   uint32 streamId;
+} SVGAEscapeVideoFlush;
+
+
+/*
+ * Struct definitions for the video overlay commands built on
+ * SVGAFifoCmdEscape.
+ */
+typedef
+struct {
+   uint32 command;
+   uint32 overlay;
+} SVGAFifoEscapeCmdVideoBase;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+} SVGAFifoEscapeCmdVideoFlush;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[1];
+} SVGAFifoEscapeCmdVideoSetRegs;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[SVGA_VIDEO_NUM_REGS];
+} SVGAFifoEscapeCmdVideoSetAllRegs;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * VMwareVideoGetAttributes --
+ *
+ *      Computes the size, pitches and offsets for YUV frames.
+ *
+ * Results:
+ *      TRUE on success; otherwise FALSE on failure.
+ *
+ * Side effects:
+ *      Pitches and offsets for the given YUV frame are put in 'pitches'
+ *      and 'offsets' respectively. They are both optional though.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline bool
+VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
+                         uint32 *width,                     /* IN / OUT */
+                         uint32 *height,                    /* IN / OUT */
+                         uint32 *size,                      /* OUT */
+                         uint32 *pitches,                   /* OUT (optional) */
+                         uint32 *offsets)                   /* OUT (optional) */
+{
+    int tmp;
+
+    *width = (*width + 1) & ~1;
+
+    if (offsets) {
+        offsets[0] = 0;
+    }
+
+    switch (format) {
+    case VMWARE_FOURCC_YV12:
+       *height = (*height + 1) & ~1;
+       *size = (*width) * (*height);
+
+       if (pitches) {
+          pitches[0] = *width;
+       }
+
+       if (offsets) {
+          offsets[1] = *size;
+       }
+
+       tmp = *width >> 1;
+
+       if (pitches) {
+          pitches[1] = pitches[2] = tmp;
+       }
+
+       tmp *= (*height >> 1);
+       *size += tmp;
+
+       if (offsets) {
+          offsets[2] = *size;
+       }
+
+       *size += tmp;
+       break;
+
+    case VMWARE_FOURCC_YUY2:
+    case VMWARE_FOURCC_UYVY:
+       *size = *width * 2;
+
+       if (pitches) {
+          pitches[0] = *size;
+       }
+
+       *size *= *height;
+       break;
+
+    default:
+       return false;
+    }
+
+    return true;
+}
+
+#endif /* _SVGA_OVERLAY_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
new file mode 100644
index 0000000..6e0ccb7
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -0,0 +1,1936 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_reg.h --
+ *
+ *    Virtual hardware definitions for the VMware SVGA II device.
+ */
+
+#ifndef _SVGA_REG_H_
+#define _SVGA_REG_H_
+#include <linux/pci_ids.h>
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga_types.h"
+
+/*
+ * SVGA_REG_ENABLE bit definitions.
+ */
+typedef enum {
+   SVGA_REG_ENABLE_DISABLE = 0,
+   SVGA_REG_ENABLE_ENABLE = (1 << 0),
+   SVGA_REG_ENABLE_HIDE = (1 << 1),
+} SvgaRegEnable;
+
+typedef uint32 SVGAMobId;
+
+/*
+ * Arbitrary and meaningless limits. Please ignore these when writing
+ * new drivers.
+ */
+#define SVGA_MAX_WIDTH                  2560
+#define SVGA_MAX_HEIGHT                 1600
+
+
+#define SVGA_MAX_BITS_PER_PIXEL         32
+#define SVGA_MAX_DEPTH                  24
+#define SVGA_MAX_DISPLAYS               10
+
+/*
+ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
+ * cursor bypass mode. This is still supported, but no new guest
+ * drivers should use it.
+ */
+#define SVGA_CURSOR_ON_HIDE            0x0   /* Must be 0 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_SHOW            0x1   /* Must be 1 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2   /* Remove the cursor from the framebuffer because we need to see what's under it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
+
+/*
+ * The maximum framebuffer size that can traced for guests unless the
+ * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES.  In that case
+ * the full framebuffer can be traced independent of this limit.
+ */
+#define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
+
+#define SVGA_MAX_PSEUDOCOLOR_DEPTH      8
+#define SVGA_MAX_PSEUDOCOLORS           (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
+#define SVGA_NUM_PALETTE_REGS           (3 * SVGA_MAX_PSEUDOCOLORS)
+
+#define SVGA_MAGIC         0x900000UL
+#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
+
+/* Version 2 let the address of the frame buffer be unsigned on Win32 */
+#define SVGA_VERSION_2     2
+#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
+
+/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
+   PALETTE_BASE has moved */
+#define SVGA_VERSION_1     1
+#define SVGA_ID_1          SVGA_MAKE_ID(SVGA_VERSION_1)
+
+/* Version 0 is the initial version */
+#define SVGA_VERSION_0     0
+#define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
+
+/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+#define SVGA_ID_INVALID    0xFFFFFFFF
+
+/* Port offsets, relative to BAR0 */
+#define SVGA_INDEX_PORT         0x0
+#define SVGA_VALUE_PORT         0x1
+#define SVGA_BIOS_PORT          0x2
+#define SVGA_IRQSTATUS_PORT     0x8
+
+/*
+ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
+ *
+ * Interrupts are only supported when the
+ * SVGA_CAP_IRQMASK capability is present.
+ */
+#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER       0x8    /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR                0x10   /* Error while processing commands */
+
+/*
+ * Registers
+ */
+
+enum {
+   SVGA_REG_ID = 0,
+   SVGA_REG_ENABLE = 1,
+   SVGA_REG_WIDTH = 2,
+   SVGA_REG_HEIGHT = 3,
+   SVGA_REG_MAX_WIDTH = 4,
+   SVGA_REG_MAX_HEIGHT = 5,
+   SVGA_REG_DEPTH = 6,
+   SVGA_REG_BITS_PER_PIXEL = 7,       /* Current bpp in the guest */
+   SVGA_REG_PSEUDOCOLOR = 8,
+   SVGA_REG_RED_MASK = 9,
+   SVGA_REG_GREEN_MASK = 10,
+   SVGA_REG_BLUE_MASK = 11,
+   SVGA_REG_BYTES_PER_LINE = 12,
+   SVGA_REG_FB_START = 13,            /* (Deprecated) */
+   SVGA_REG_FB_OFFSET = 14,
+   SVGA_REG_VRAM_SIZE = 15,
+   SVGA_REG_FB_SIZE = 16,
+
+   /* ID 0 implementation only had the above registers, then the palette */
+   SVGA_REG_ID_0_TOP = 17,
+
+   SVGA_REG_CAPABILITIES = 17,
+   SVGA_REG_MEM_START = 18,           /* (Deprecated) */
+   SVGA_REG_MEM_SIZE = 19,
+   SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
+   SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_GUEST_ID = 23,            /* Set guest OS identifier */
+   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
+   SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
+   SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
+   SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
+   SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
+   SVGA_REG_SCRATCH_SIZE = 29,        /* Number of scratch registers */
+   SVGA_REG_MEM_REGS = 30,            /* Number of FIFO registers */
+   SVGA_REG_NUM_DISPLAYS = 31,        /* (Deprecated) */
+   SVGA_REG_PITCHLOCK = 32,           /* Fixed pitch for all modes */
+   SVGA_REG_IRQMASK = 33,             /* Interrupt mask */
+
+   /* Legacy multi-monitor support */
+   SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
+   SVGA_REG_DISPLAY_ID = 35,        /* Display ID for the following display attributes */
+   SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
+   SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
+   SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
+   SVGA_REG_DISPLAY_WIDTH = 39,     /* The display's width */
+   SVGA_REG_DISPLAY_HEIGHT = 40,    /* The display's height */
+
+   /* See "Guest memory regions" below. */
+   SVGA_REG_GMR_ID = 41,
+   SVGA_REG_GMR_DESCRIPTOR = 42,
+   SVGA_REG_GMR_MAX_IDS = 43,
+   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
+
+   SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
+   SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
+   SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
+   SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
+   SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
+   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
+   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
+   SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
+   SVGA_REG_CMD_PREPEND_LOW = 53,
+   SVGA_REG_CMD_PREPEND_HIGH = 54,
+   SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+   SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+   SVGA_REG_MOB_MAX_SIZE = 57,
+   SVGA_REG_TOP = 58,               /* Must be 1 more than the last register */
+
+   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
+   /* Next 768 (== 256*3) registers exist for colormap */
+   SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
+                                    /* Base of scratch registers */
+   /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
+      First 4 are reserved for VESA BIOS Extension; any remaining are for
+      the use of the current SVGA driver. */
+};
+
+/*
+ * Guest memory regions (GMRs):
+ *
+ * This is a new memory mapping feature available in SVGA devices
+ * which have the SVGA_CAP_GMR bit set. Previously, there were two
+ * fixed memory regions available with which to share data between the
+ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
+ * are our name for an extensible way of providing arbitrary DMA
+ * buffers for use between the driver and the SVGA device. They are a
+ * new alternative to framebuffer memory, usable for both 2D and 3D
+ * graphics operations.
+ *
+ * Since GMR mapping must be done synchronously with guest CPU
+ * execution, we use a new pair of SVGA registers:
+ *
+ *   SVGA_REG_GMR_ID --
+ *
+ *     Read/write.
+ *     This register holds the 32-bit ID (a small positive integer)
+ *     of a GMR to create, delete, or redefine. Writing this register
+ *     has no side-effects.
+ *
+ *   SVGA_REG_GMR_DESCRIPTOR --
+ *
+ *     Write-only.
+ *     Writing this register will create, delete, or redefine the GMR
+ *     specified by the above ID register. If this register is zero,
+ *     the GMR is deleted. Any pointers into this GMR (including those
+ *     currently being processed by FIFO commands) will be
+ *     synchronously invalidated.
+ *
+ *     If this register is nonzero, it must be the physical page
+ *     number (PPN) of a data structure which describes the physical
+ *     layout of the memory region this GMR should describe. The
+ *     descriptor structure will be read synchronously by the SVGA
+ *     device when this register is written. The descriptor need not
+ *     remain allocated for the lifetime of the GMR.
+ *
+ *     The guest driver should write SVGA_REG_GMR_ID first, then
+ *     SVGA_REG_GMR_DESCRIPTOR.
+ *
+ *   SVGA_REG_GMR_MAX_IDS --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to support a maximum number of
+ *     user-defined GMR IDs. This register holds the number of supported
+ *     IDs. (The maximum supported ID plus 1)
+ *
+ *   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to put a limit on the total number
+ *     of SVGAGuestMemDescriptor structures it will read when defining
+ *     a single GMR.
+ *
+ * The descriptor structure is an array of SVGAGuestMemDescriptor
+ * structures. Each structure may do one of three things:
+ *
+ *   - Terminate the GMR descriptor list.
+ *     (ppn==0, numPages==0)
+ *
+ *   - Add a PPN or range of PPNs to the GMR's virtual address space.
+ *     (ppn != 0, numPages != 0)
+ *
+ *   - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
+ *     support multi-page GMR descriptor tables without forcing the
+ *     driver to allocate physically contiguous memory.
+ *     (ppn != 0, numPages == 0)
+ *
+ * Note that each physical page of SVGAGuestMemDescriptor structures
+ * can describe at least 2MB of guest memory. If the driver needs to
+ * use more than one page of descriptor structures, it must use one of
+ * its SVGAGuestMemDescriptors to point to an additional page.  The
+ * device will never automatically cross a page boundary.
+ *
+ * Once the driver has described a GMR, it is immediately available
+ * for use via any FIFO command that uses an SVGAGuestPtr structure.
+ * These pointers include a GMR identifier plus an offset into that
+ * GMR.
+ *
+ * The driver must check the SVGA_CAP_GMR bit before using the GMR
+ * registers.
+ */
+
+/*
+ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
+ * memory as well.  In the future, these IDs could even be used to
+ * allow legacy memory regions to be redefined by the guest as GMRs.
+ *
+ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
+ * is being phased out. Please try to use user-defined GMRs whenever
+ * possible.
+ */
+#define SVGA_GMR_NULL         ((uint32) -1)
+#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestMemDescriptor {
+   uint32 ppn;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGAGuestMemDescriptor;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestPtr {
+   uint32 gmrId;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGuestPtr;
+
+/*
+ * Register based command buffers --
+ *
+ * Provide an SVGA device interface that allows the guest to submit
+ * command buffers to the SVGA device through an SVGA device register.
+ * The metadata for each command buffer is contained in the
+ * SVGACBHeader structure along with the return status codes.
+ *
+ * The SVGA device supports command buffers if
+ * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register.  The
+ * fifo must be enabled for command buffers to be submitted.
+ *
+ * Command buffers are submitted when the guest writing the 64 byte
+ * aligned physical address into the SVGA_REG_COMMAND_LOW and
+ * SVGA_REG_COMMAND_HIGH.  SVGA_REG_COMMAND_HIGH contains the upper 32
+ * bits of the physical address.  SVGA_REG_COMMAND_LOW contains the
+ * lower 32 bits of the physical address, since the command buffer
+ * headers are required to be 64 byte aligned the lower 6 bits are
+ * used for the SVGACBContext value.  Writing to SVGA_REG_COMMAND_LOW
+ * submits the command buffer to the device and queues it for
+ * execution.  The SVGA device supports at least
+ * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
+ * per context and if that limit is reached the device will write the
+ * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
+ * buffer header synchronously and not raise any IRQs.
+ *
+ * It is invalid to submit a command buffer without a valid physical
+ * address and results are undefined.
+ *
+ * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
+ * will be supported.  If a larger command buffer is submitted results
+ * are unspecified and the device will either complete the command
+ * buffer or return an error.
+ *
+ * The device guarantees that any individual command in a command
+ * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
+ * enough to fit a 64x64 color-cursor definition.  If the command is
+ * too large the device is allowed to process the command or return an
+ * error.
+ *
+ * The device context is a special SVGACBContext that allows for
+ * synchronous register like accesses with the flexibility of
+ * commands.  There is a different command set defined by
+ * SVGADeviceContextCmdId.  The commands in each command buffer is not
+ * allowed to straddle physical pages.
+ *
+ * The offset field which is available starting with the
+ * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
+ * start of command processing into the buffer.  If an error is
+ * encountered the errorOffset will still be relative to the specific
+ * PA, not biased by the offset.  When the command buffer is finished
+ * the guest should not read the offset field as there is no guarantee
+ * what it will set to.
+ */
+
+#define SVGA_CB_MAX_SIZE (512 * 1024)  /* 512 KB */
+#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
+#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
+
+#define SVGA_CB_CONTEXT_MASK 0x3f
+typedef enum {
+   SVGA_CB_CONTEXT_DEVICE = 0x3f,
+   SVGA_CB_CONTEXT_0      = 0x0,
+   SVGA_CB_CONTEXT_MAX    = 0x1,
+} SVGACBContext;
+
+
+typedef enum {
+   /*
+    * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
+    * field before submitting the command buffer header, the host will
+    * change the value when it is done with the command buffer.
+    */
+   SVGA_CB_STATUS_NONE             = 0,
+
+   /*
+    * Written by the host when a command buffer completes successfully.
+    * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
+    * the SVGA_CB_FLAG_NO_IRQ flag is set.
+    */
+   SVGA_CB_STATUS_COMPLETED        = 1,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the command buffer was not submitted.  No
+    * IRQ is raised.
+    */
+   SVGA_CB_STATUS_QUEUE_FULL       = 2,
+
+   /*
+    * Written by the host when an error was detected parsing a command
+    * in the command buffer, errorOffset is written to contain the
+    * offset to the first byte of the failing command.  The device
+    * raises the IRQ with both SVGA_IRQFLAG_ERROR and
+    * SVGA_IRQFLAG_COMMAND_BUFFER.  Some of the commands may have been
+    * processed.
+    */
+   SVGA_CB_STATUS_COMMAND_ERROR    = 3,
+
+   /*
+    * Written by the host if there is an error parsing the command
+    * buffer header.  The device raises the IRQ with both
+    * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER.  The device
+    * did not processes any of the command buffer.
+    */
+   SVGA_CB_STATUS_CB_HEADER_ERROR  = 4,
+
+   /*
+    * Written by the host if the guest requested the host to preempt
+    * the command buffer.  The device will not raise any IRQs and the
+    * command buffer was not processed.
+    */
+   SVGA_CB_STATUS_PREEMPTED        = 5,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the the command buffer was not submitted
+    * due to an error.  No IRQ is raised.
+    */
+   SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+} SVGACBStatus;
+
+typedef enum {
+   SVGA_CB_FLAG_NONE       = 0,
+   SVGA_CB_FLAG_NO_IRQ     = 1 << 0,
+   SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
+   SVGA_CB_FLAG_MOB        = 1 << 2,
+} SVGACBFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   volatile SVGACBStatus status;
+   volatile uint32 errorOffset;
+   uint64 id;
+   SVGACBFlags flags;
+   uint32 length;
+   union {
+      PA pa;
+      struct {
+         SVGAMobId mobid;
+         uint32 mobOffset;
+      } mob;
+   } ptr;
+   uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+   uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
+   uint32 mustBeZero[6];
+}
+#include "vmware_pack_end.h"
+SVGACBHeader;
+
+typedef enum {
+   SVGA_DC_CMD_NOP                   = 0,
+   SVGA_DC_CMD_START_STOP_CONTEXT    = 1,
+   SVGA_DC_CMD_PREEMPT               = 2,
+   SVGA_DC_CMD_MAX                   = 3,
+   SVGA_DC_CMD_FORCE_UINT            = MAX_UINT32,
+} SVGADeviceContextCmdId;
+
+typedef struct {
+   uint32 enable;
+   SVGACBContext context;
+} SVGADCCmdStartStop;
+
+/*
+ * SVGADCCmdPreempt --
+ *
+ * This command allows the guest to request that all command buffers
+ * on the specified context be preempted that can be.  After execution
+ * of this command all command buffers that were preempted will
+ * already have SVGA_CB_STATUS_PREEMPTED written into the status
+ * field.  The device might still be processing a command buffer,
+ * assuming execution of it started before the preemption request was
+ * received.  Specifying the ignoreIDZero flag to TRUE will cause the
+ * device to not preempt command buffers with the id field in the
+ * command buffer header set to zero.
+ */
+
+typedef struct {
+   SVGACBContext context;
+   uint32 ignoreIDZero;
+} SVGADCCmdPreempt;
+
+/*
+ * SVGAGMRImageFormat --
+ *
+ *    This is a packed representation of the source 2D image format
+ *    for a GMR-to-screen blit. Currently it is defined as an encoding
+ *    of the screen's color depth and bits-per-pixel, however, 16 bits
+ *    are reserved for future use to identify other encodings (such as
+ *    RGBA or higher-precision images).
+ *
+ *    Currently supported formats:
+ *
+ *       bpp depth  Format Name
+ *       --- -----  -----------
+ *        32    24  32-bit BGRX
+ *        24    24  24-bit BGR
+ *        16    16  RGB 5-6-5
+ *        16    15  RGB 5-5-5
+ *
+ */
+
+typedef struct SVGAGMRImageFormat {
+   union {
+      struct {
+         uint32 bitsPerPixel : 8;
+         uint32 colorDepth   : 8;
+	 uint32 reserved     : 16;  /* Must be zero */
+      };
+
+      uint32 value;
+   };
+} SVGAGMRImageFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestImage {
+   SVGAGuestPtr         ptr;
+
+   /*
+    * A note on interpretation of pitch: This value of pitch is the
+    * number of bytes between vertically adjacent image
+    * blocks. Normally this is the number of bytes between the first
+    * pixel of two adjacent scanlines. With compressed textures,
+    * however, this may represent the number of bytes between
+    * compression blocks rather than between rows of pixels.
+    *
+    * XXX: Compressed textures currently must be tightly packed in guest memory.
+    *
+    * If the image is 1-dimensional, pitch is ignored.
+    *
+    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+    * assuming each row of blocks is tightly packed.
+    */
+   uint32 pitch;
+}
+#include "vmware_pack_end.h"
+SVGAGuestImage;
+
+/*
+ * SVGAColorBGRX --
+ *
+ *    A 24-bit color format (BGRX), which does not depend on the
+ *    format of the legacy guest framebuffer (GFB) or the current
+ *    GMRFB state.
+ */
+
+typedef struct SVGAColorBGRX {
+   union {
+      struct {
+         uint32 b : 8;
+         uint32 g : 8;
+         uint32 r : 8;
+	 uint32 x : 8;  /* Unused */
+      };
+
+      uint32 value;
+   };
+} SVGAColorBGRX;
+
+
+/*
+ * SVGASignedRect --
+ * SVGASignedPoint --
+ *
+ *    Signed rectangle and point primitives. These are used by the new
+ *    2D primitives for drawing to Screen Objects, which can occupy a
+ *    signed virtual coordinate space.
+ *
+ *    SVGASignedRect specifies a half-open interval: the (left, top)
+ *    pixel is part of the rectangle, but the (right, bottom) pixel is
+ *    not.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32  left;
+   int32  top;
+   int32  right;
+   int32  bottom;
+}
+#include "vmware_pack_end.h"
+SVGASignedRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32  x;
+   int32  y;
+}
+#include "vmware_pack_end.h"
+SVGASignedPoint;
+
+
+/*
+ * SVGA Device Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
+ *
+ * SVGA_CAP_IRQMASK --
+ *    Provides device interrupts.  Adds device register SVGA_REG_IRQMASK
+ *    to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
+ *    set/clear pending interrupts.
+ *
+ * SVGA_CAP_GMR --
+ *    Provides synchronous mapping of guest memory regions (GMR).
+ *    Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
+ *    SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
+ *
+ * SVGA_CAP_TRACES --
+ *    Allows framebuffer trace-based updates even when FIFO is enabled.
+ *    Adds device register SVGA_REG_TRACES.
+ *
+ * SVGA_CAP_GMR2 --
+ *    Provides asynchronous commands to define and remap guest memory
+ *    regions.  Adds device registers SVGA_REG_GMRS_MAX_PAGES and
+ *    SVGA_REG_MEMORY_SIZE.
+ *
+ * SVGA_CAP_SCREEN_OBJECT_2 --
+ *    Allow screen object support, and require backing stores from the
+ *    guest for each screen object.
+ *
+ * SVGA_CAP_COMMAND_BUFFERS --
+ *    Enable register based command buffer submission.
+ *
+ * SVGA_CAP_DEAD1 --
+ *    This cap was incorrectly used by old drivers and should not be
+ *    reused.
+ *
+ * SVGA_CAP_CMD_BUFFERS_2 --
+ *    Enable support for the prepend command buffer submision
+ *    registers.  SVGA_REG_CMD_PREPEND_LOW and
+ *    SVGA_REG_CMD_PREPEND_HIGH.
+ *
+ * SVGA_CAP_GBOBJECTS --
+ *    Enable guest-backed objects and surfaces.
+ *
+ * SVGA_CAP_CMD_BUFFERS_3 --
+ *    Enable support for command buffers in a mob.
+ */
+
+#define SVGA_CAP_NONE               0x00000000
+#define SVGA_CAP_RECT_COPY          0x00000002
+#define SVGA_CAP_CURSOR             0x00000020
+#define SVGA_CAP_CURSOR_BYPASS      0x00000040
+#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080
+#define SVGA_CAP_8BIT_EMULATION     0x00000100
+#define SVGA_CAP_ALPHA_CURSOR       0x00000200
+#define SVGA_CAP_3D                 0x00004000
+#define SVGA_CAP_EXTENDED_FIFO      0x00008000
+#define SVGA_CAP_MULTIMON           0x00010000
+#define SVGA_CAP_PITCHLOCK          0x00020000
+#define SVGA_CAP_IRQMASK            0x00040000
+#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000
+#define SVGA_CAP_GMR                0x00100000
+#define SVGA_CAP_TRACES             0x00200000
+#define SVGA_CAP_GMR2               0x00400000
+#define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
+#define SVGA_CAP_COMMAND_BUFFERS    0x01000000
+#define SVGA_CAP_DEAD1              0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2      0x04000000
+#define SVGA_CAP_GBOBJECTS          0x08000000
+#define SVGA_CAP_DX                 0x10000000
+
+#define SVGA_CAP_CMD_RESERVED       0x80000000
+
+
+/*
+ * The Guest can optionally read some SVGA device capabilities through
+ * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
+ * the SVGA device is initialized.  The type of capability the guest
+ * is requesting from the SVGABackdoorCapType enum should be placed in
+ * the upper 16 bits of the backdoor command id (ECX).  On success the
+ * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
+ * the requested capability.  If the command is not supported then EBX
+ * will be left unchanged and EAX will be set to -1.  Because it is
+ * possible that -1 is the value of the requested cap the correct way
+ * to check if the command was successful is to check if EBX was changed
+ * to BDOOR_MAGIC making sure to initialize the register to something
+ * else first.
+ */
+
+typedef enum {
+   SVGABackdoorCapDeviceCaps = 0,
+   SVGABackdoorCapFifoCaps = 1,
+   SVGABackdoorCap3dHWVersion = 2,
+   SVGABackdoorCapMax = 3,
+} SVGABackdoorCapType;
+
+
+/*
+ * FIFO register indices.
+ *
+ * The FIFO is a chunk of device memory mapped into guest physmem.  It
+ * is always treated as 32-bit words.
+ *
+ * The guest driver gets to decide how to partition it between
+ * - FIFO registers (there are always at least 4, specifying where the
+ *   following data area is and how much data it contains; there may be
+ *   more registers following these, depending on the FIFO protocol
+ *   version in use)
+ * - FIFO data, written by the guest and slurped out by the VMX.
+ * These indices are 32-bit word offsets into the FIFO.
+ */
+
+enum {
+   /*
+    * Block 1 (basic registers): The originally defined FIFO registers.
+    * These exist and are valid for all versions of the FIFO protocol.
+    */
+
+   SVGA_FIFO_MIN = 0,
+   SVGA_FIFO_MAX,       /* The distance from MIN to MAX must be at least 10K */
+   SVGA_FIFO_NEXT_CMD,
+   SVGA_FIFO_STOP,
+
+   /*
+    * Block 2 (extended registers): Mandatory registers for the extended
+    * FIFO.  These exist if the SVGA caps register includes
+    * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
+    * associated capability bit is enabled.
+    *
+    * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
+    * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
+    * This means that the guest has to test individually (in most cases
+    * using FIFO caps) for the presence of registers after this; the VMX
+    * can define "extended FIFO" to mean whatever it wants, and currently
+    * won't enable it unless there's room for that set and much more.
+    */
+
+   SVGA_FIFO_CAPABILITIES = 4,
+   SVGA_FIFO_FLAGS,
+   /* Valid with SVGA_FIFO_CAP_FENCE: */
+   SVGA_FIFO_FENCE,
+
+   /*
+    * Block 3a (optional extended registers): Additional registers for the
+    * extended FIFO, whose presence isn't actually implied by
+    * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
+    * leave room for them.
+    *
+    * These in block 3a, the VMX currently considers mandatory for the
+    * extended FIFO.
+    */
+
+   /* Valid if exists (i.e. if extended FIFO enabled): */
+   SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
+   /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
+   SVGA_FIFO_PITCHLOCK,
+
+   /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
+   SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
+   SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
+   SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
+   SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
+   SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
+
+   /* Valid with SVGA_FIFO_CAP_RESERVE: */
+   SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
+
+   /*
+    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
+    *
+    * By default this is SVGA_ID_INVALID, to indicate that the cursor
+    * coordinates are specified relative to the virtual root. If this
+    * is set to a specific screen ID, cursor position is reinterpreted
+    * as a signed offset relative to that screen's origin.
+    */
+   SVGA_FIFO_CURSOR_SCREEN_ID,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_DEAD
+    *
+    * An arbitrary value written by the host, drivers should not use it.
+    */
+   SVGA_FIFO_DEAD,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
+    *
+    * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
+    * on platforms that can enforce graphics resource limits.
+    */
+   SVGA_FIFO_3D_HWVERSION_REVISED,
+
+   /*
+    * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
+    * registers, but this must be done carefully and with judicious use of
+    * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
+    * enough to tell you whether the register exists: we've shipped drivers
+    * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
+    * the earlier ones.  The actual order of introduction was:
+    * - PITCHLOCK
+    * - 3D_CAPS
+    * - CURSOR_* (cursor bypass 3)
+    * - RESERVED
+    * So, code that wants to know whether it can use any of the
+    * aforementioned registers, or anything else added after PITCHLOCK and
+    * before 3D_CAPS, needs to reason about something other than
+    * SVGA_FIFO_MIN.
+    */
+
+   /*
+    * 3D caps block space; valid with 3D hardware version >=
+    * SVGA3D_HWVERSION_WS6_B1.
+    */
+   SVGA_FIFO_3D_CAPS      = 32,
+   SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
+
+   /*
+    * End of VMX's current definition of "extended-FIFO registers".
+    * Registers before here are always enabled/disabled as a block; either
+    * the extended FIFO is enabled and includes all preceding registers, or
+    * it's disabled entirely.
+    *
+    * Block 3b (truly optional extended registers): Additional registers for
+    * the extended FIFO, which the VMX already knows how to enable and
+    * disable with correct granularity.
+    *
+    * Registers after here exist if and only if the guest SVGA driver
+    * sets SVGA_FIFO_MIN high enough to leave room for them.
+    */
+
+   /* Valid if register exists: */
+   SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
+   SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
+   SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
+
+   /*
+    * Always keep this last.  This defines the maximum number of
+    * registers we know about.  At power-on, this value is placed in
+    * the SVGA_REG_MEM_REGS register, and we expect the guest driver
+    * to allocate this much space in FIFO memory for registers.
+    */
+    SVGA_FIFO_NUM_REGS
+};
+
+
+/*
+ * Definition of registers included in extended FIFO support.
+ *
+ * The guest SVGA driver gets to allocate the FIFO between registers
+ * and data.  It must always allocate at least 4 registers, but old
+ * drivers stopped there.
+ *
+ * The VMX will enable extended FIFO support if and only if the guest
+ * left enough room for all registers defined as part of the mandatory
+ * set for the extended FIFO.
+ *
+ * Note that the guest drivers typically allocate the FIFO only at
+ * initialization time, not at mode switches, so it's likely that the
+ * number of FIFO registers won't change without a reboot.
+ *
+ * All registers less than this value are guaranteed to be present if
+ * svgaUser->fifo.extended is set. Any later registers must be tested
+ * individually for compatibility at each use (in the VMX).
+ *
+ * This value is used only by the VMX, so it can change without
+ * affecting driver compatibility; keep it that way?
+ */
+#define SVGA_FIFO_EXTENDED_MANDATORY_REGS  (SVGA_FIFO_3D_CAPS_LAST + 1)
+
+
+/*
+ * FIFO Synchronization Registers
+ *
+ *  This explains the relationship between the various FIFO
+ *  sync-related registers in IOSpace and in FIFO space.
+ *
+ *  SVGA_REG_SYNC --
+ *
+ *       The SYNC register can be used in two different ways by the guest:
+ *
+ *         1. If the guest wishes to fully sync (drain) the FIFO,
+ *            it will write once to SYNC then poll on the BUSY
+ *            register. The FIFO is sync'ed once BUSY is zero.
+ *
+ *         2. If the guest wants to asynchronously wake up the host,
+ *            it will write once to SYNC without polling on BUSY.
+ *            Ideally it will do this after some new commands have
+ *            been placed in the FIFO, and after reading a zero
+ *            from SVGA_FIFO_BUSY.
+ *
+ *       (1) is the original behaviour that SYNC was designed to
+ *       support.  Originally, a write to SYNC would implicitly
+ *       trigger a read from BUSY. This causes us to synchronously
+ *       process the FIFO.
+ *
+ *       This behaviour has since been changed so that writing SYNC
+ *       will *not* implicitly cause a read from BUSY. Instead, it
+ *       makes a channel call which asynchronously wakes up the MKS
+ *       thread.
+ *
+ *       New guests can use this new behaviour to implement (2)
+ *       efficiently. This lets guests get the host's attention
+ *       without waiting for the MKS to poll, which gives us much
+ *       better CPU utilization on SMP hosts and on UP hosts while
+ *       we're blocked on the host GPU.
+ *
+ *       Old guests shouldn't notice the behaviour change. SYNC was
+ *       never guaranteed to process the entire FIFO, since it was
+ *       bounded to a particular number of CPU cycles. Old guests will
+ *       still loop on the BUSY register until the FIFO is empty.
+ *
+ *       Writing to SYNC currently has the following side-effects:
+ *
+ *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
+ *         - Asynchronously wakes up the MKS thread for FIFO processing
+ *         - The value written to SYNC is recorded as a "reason", for
+ *           stats purposes.
+ *
+ *       If SVGA_FIFO_BUSY is available, drivers are advised to only
+ *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
+ *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
+ *       eventually set SVGA_FIFO_BUSY on its own, but this approach
+ *       lets the driver avoid sending multiple asynchronous wakeup
+ *       messages to the MKS thread.
+ *
+ *  SVGA_REG_BUSY --
+ *
+ *       This register is set to TRUE when SVGA_REG_SYNC is written,
+ *       and it reads as FALSE when the FIFO has been completely
+ *       drained.
+ *
+ *       Every read from this register causes us to synchronously
+ *       process FIFO commands. There is no guarantee as to how many
+ *       commands each read will process.
+ *
+ *       CPU time spent processing FIFO commands will be billed to
+ *       the guest.
+ *
+ *       New drivers should avoid using this register unless they
+ *       need to guarantee that the FIFO is completely drained. It
+ *       is overkill for performing a sync-to-fence. Older drivers
+ *       will use this register for any type of synchronization.
+ *
+ *  SVGA_FIFO_BUSY --
+ *
+ *       This register is a fast way for the guest driver to check
+ *       whether the FIFO is already being processed. It reads and
+ *       writes at normal RAM speeds, with no monitor intervention.
+ *
+ *       If this register reads as TRUE, the host is guaranteeing that
+ *       any new commands written into the FIFO will be noticed before
+ *       the MKS goes back to sleep.
+ *
+ *       If this register reads as FALSE, no such guarantee can be
+ *       made.
+ *
+ *       The guest should use this register to quickly determine
+ *       whether or not it needs to wake up the host. If the guest
+ *       just wrote a command or group of commands that it would like
+ *       the host to begin processing, it should:
+ *
+ *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
+ *            action is necessary.
+ *
+ *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
+ *            code that we've already sent a SYNC to the host and we
+ *            don't need to send a duplicate.
+ *
+ *         3. Write a reason to SVGA_REG_SYNC. This will send an
+ *            asynchronous wakeup to the MKS thread.
+ */
+
+
+/*
+ * FIFO Capabilities
+ *
+ *      Fence -- Fence register and command are supported
+ *      Accel Front -- Front buffer only commands are supported
+ *      Pitch Lock -- Pitch lock register is supported
+ *      Video -- SVGA Video overlay units are supported
+ *      Escape -- Escape command is supported
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT --
+ *
+ *    Provides dynamic multi-screen rendering, for improved Unity and
+ *    multi-monitor modes. With Screen Object, the guest can
+ *    dynamically create and destroy 'screens', which can represent
+ *    Unity windows or virtual monitors. Screen Object also provides
+ *    strong guarantees that DMA operations happen only when
+ *    guest-initiated. Screen Object deprecates the BAR1 guest
+ *    framebuffer (GFB) and all commands that work only with the GFB.
+ *
+ *    New registers:
+ *       FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
+ *
+ *    New 2D commands:
+ *       DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
+ *       BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
+ *
+ *    New 3D commands:
+ *       BLIT_SURFACE_TO_SCREEN
+ *
+ *    New guarantees:
+ *
+ *       - The host will not read or write guest memory, including the GFB,
+ *         except when explicitly initiated by a DMA command.
+ *
+ *       - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
+ *         is guaranteed to complete before any subsequent FENCEs.
+ *
+ *       - All legacy commands which affect a Screen (UPDATE, PRESENT,
+ *         PRESENT_READBACK) as well as new Screen blit commands will
+ *         all behave consistently as blits, and memory will be read
+ *         or written in FIFO order.
+ *
+ *         For example, if you PRESENT from one SVGA3D surface to multiple
+ *         places on the screen, the data copied will always be from the
+ *         SVGA3D surface at the time the PRESENT was issued in the FIFO.
+ *         This was not necessarily true on devices without Screen Object.
+ *
+ *         This means that on devices that support Screen Object, the
+ *         PRESENT_READBACK command should not be necessary unless you
+ *         actually want to read back the results of 3D rendering into
+ *         system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
+ *         command provides a strict superset of functionality.)
+ *
+ *       - When a screen is resized, either using Screen Object commands or
+ *         legacy multimon registers, its contents are preserved.
+ *
+ * SVGA_FIFO_CAP_GMR2 --
+ *
+ *    Provides new commands to define and remap guest memory regions (GMR).
+ *
+ *    New 2D commands:
+ *       DEFINE_GMR2, REMAP_GMR2.
+ *
+ * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
+ *
+ *    Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
+ *    This register may replace SVGA_FIFO_3D_HWVERSION on platforms
+ *    that enforce graphics resource limits.  This allows the platform
+ *    to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
+ *    drivers that do not limit their resources.
+ *
+ *    Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
+ *    are codependent (and thus we use a single capability bit).
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
+ *
+ *    Modifies the DEFINE_SCREEN command to include a guest provided
+ *    backing store in GMR memory and the bytesPerLine for the backing
+ *    store.  This capability requires the use of a backing store when
+ *    creating screen objects.  However if SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    is present then backing stores are optional.
+ *
+ * SVGA_FIFO_CAP_DEAD --
+ *
+ *    Drivers should not use this cap bit.  This cap bit can not be
+ *    reused since some hosts already expose it.
+ */
+
+#define SVGA_FIFO_CAP_NONE                  0
+#define SVGA_FIFO_CAP_FENCE             (1<<0)
+#define SVGA_FIFO_CAP_ACCELFRONT        (1<<1)
+#define SVGA_FIFO_CAP_PITCHLOCK         (1<<2)
+#define SVGA_FIFO_CAP_VIDEO             (1<<3)
+#define SVGA_FIFO_CAP_CURSOR_BYPASS_3   (1<<4)
+#define SVGA_FIFO_CAP_ESCAPE            (1<<5)
+#define SVGA_FIFO_CAP_RESERVE           (1<<6)
+#define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
+#define SVGA_FIFO_CAP_GMR2              (1<<8)
+#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED  SVGA_FIFO_CAP_GMR2
+#define SVGA_FIFO_CAP_SCREEN_OBJECT_2   (1<<9)
+#define SVGA_FIFO_CAP_DEAD              (1<<10)
+
+
+/*
+ * FIFO Flags
+ *
+ *      Accel Front -- Driver should use front buffer only commands
+ */
+
+#define SVGA_FIFO_FLAG_NONE                 0
+#define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
+#define SVGA_FIFO_FLAG_RESERVED        (1<<31) /* Internal use only */
+
+/*
+ * FIFO reservation sentinel value
+ */
+
+#define SVGA_FIFO_RESERVED_UNKNOWN      0xffffffff
+
+
+/*
+ * Video overlay support
+ */
+
+#define SVGA_NUM_OVERLAY_UNITS 32
+
+
+/*
+ * Video capabilities that the guest is currently using
+ */
+
+#define SVGA_VIDEO_FLAG_COLORKEY        0x0001
+
+
+/*
+ * Offsets for the video overlay registers
+ */
+
+enum {
+   SVGA_VIDEO_ENABLED = 0,
+   SVGA_VIDEO_FLAGS,
+   SVGA_VIDEO_DATA_OFFSET,
+   SVGA_VIDEO_FORMAT,
+   SVGA_VIDEO_COLORKEY,
+   SVGA_VIDEO_SIZE,          /* Deprecated */
+   SVGA_VIDEO_WIDTH,
+   SVGA_VIDEO_HEIGHT,
+   SVGA_VIDEO_SRC_X,
+   SVGA_VIDEO_SRC_Y,
+   SVGA_VIDEO_SRC_WIDTH,
+   SVGA_VIDEO_SRC_HEIGHT,
+   SVGA_VIDEO_DST_X,         /* Signed int32 */
+   SVGA_VIDEO_DST_Y,         /* Signed int32 */
+   SVGA_VIDEO_DST_WIDTH,
+   SVGA_VIDEO_DST_HEIGHT,
+   SVGA_VIDEO_PITCH_1,
+   SVGA_VIDEO_PITCH_2,
+   SVGA_VIDEO_PITCH_3,
+   SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
+   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
+                             /* (SVGA_ID_INVALID) */
+   SVGA_VIDEO_NUM_REGS
+};
+
+
+/*
+ * SVGA Overlay Units
+ *
+ *      width and height relate to the entire source video frame.
+ *      srcX, srcY, srcWidth and srcHeight represent subset of the source
+ *      video frame to be displayed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOverlayUnit {
+   uint32 enabled;
+   uint32 flags;
+   uint32 dataOffset;
+   uint32 format;
+   uint32 colorKey;
+   uint32 size;
+   uint32 width;
+   uint32 height;
+   uint32 srcX;
+   uint32 srcY;
+   uint32 srcWidth;
+   uint32 srcHeight;
+   int32  dstX;
+   int32  dstY;
+   uint32 dstWidth;
+   uint32 dstHeight;
+   uint32 pitches[3];
+   uint32 dataGMRId;
+   uint32 dstScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAOverlayUnit;
+
+
+/*
+ * Guest display topology
+ *
+ * XXX: This structure is not part of the SVGA device's interface, and
+ * doesn't really belong here.
+ */
+#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
+
+typedef struct SVGADisplayTopology {
+   uint16 displayId;
+   uint16 isPrimary;
+   uint32 width;
+   uint32 height;
+   uint32 positionX;
+   uint32 positionY;
+} SVGADisplayTopology;
+
+
+/*
+ * SVGAScreenObject --
+ *
+ *    This is a new way to represent a guest's multi-monitor screen or
+ *    Unity window. Screen objects are only supported if the
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
+ *
+ *    If Screen Objects are supported, they can be used to fully
+ *    replace the functionality provided by the framebuffer registers
+ *    (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
+ *
+ *    The screen object is a struct with guaranteed binary
+ *    compatibility. New flags can be added, and the struct may grow,
+ *    but existing fields must retain their meaning.
+ *
+ *    Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
+ *    a SVGAGuestPtr that is used to back the screen contents.  This
+ *    memory must come from the GFB.  The guest is not allowed to
+ *    access the memory and doing so will have undefined results.  The
+ *    backing store is required to be page aligned and the size is
+ *    padded to the next page boundry.  The number of pages is:
+ *       (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
+ *
+ *    The pitch in the backingStore is required to be at least large
+ *    enough to hold a 32bbp scanline.  It is recommended that the
+ *    driver pad bytesPerLine for a potential performance win.
+ *
+ *    The cloneCount field is treated as a hint from the guest that
+ *    the user wants this display to be cloned, countCount times.  A
+ *    value of zero means no cloning should happen.
+ */
+
+#define SVGA_SCREEN_MUST_BE_SET     (1 << 0)
+#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
+#define SVGA_SCREEN_IS_PRIMARY      (1 << 1)
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
+ * deactivated the base layer is defined to lose all contents and
+ * become black.  When a screen is deactivated the backing store is
+ * optional.  When set backingPtr and bytesPerLine will be ignored.
+ */
+#define SVGA_SCREEN_DEACTIVATE  (1 << 3)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When this flag is set
+ * the screen contents will be outputted as all black to the user
+ * though the base layer contents is preserved.  The screen base layer
+ * can still be read and written to like normal though the no visible
+ * effect will be seen by the user.  When the flag is changed the
+ * screen will be blanked or redrawn to the current contents as needed
+ * without any extra commands from the driver.  This flag only has an
+ * effect when the screen is not deactivated.
+ */
+#define SVGA_SCREEN_BLANKING (1 << 4)
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 structSize;   /* sizeof(SVGAScreenObject) */
+   uint32 id;
+   uint32 flags;
+   struct {
+      uint32 width;
+      uint32 height;
+   } size;
+   struct {
+      int32 x;
+      int32 y;
+   } root;
+
+   /*
+    * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
+    * with SVGA_FIFO_CAP_SCREEN_OBJECT.
+    */
+   SVGAGuestImage backingStore;
+
+   /*
+    * The cloneCount field is treated as a hint from the guest that
+    * the user wants this display to be cloned, cloneCount times.
+    *
+    * A value of zero means no cloning should happen.
+    */
+   uint32 cloneCount;
+}
+#include "vmware_pack_end.h"
+SVGAScreenObject;
+
+
+/*
+ *  Commands in the command FIFO:
+ *
+ *  Command IDs defined below are used for the traditional 2D FIFO
+ *  communication (not all commands are available for all versions of the
+ *  SVGA FIFO protocol).
+ *
+ *  Note the holes in the command ID numbers: These commands have been
+ *  deprecated, and the old IDs must not be reused.
+ *
+ *  Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
+ *  protocol.
+ *
+ *  Each command's parameters are described by the comments and
+ *  structs below.
+ */
+
+typedef enum {
+   SVGA_CMD_INVALID_CMD           = 0,
+   SVGA_CMD_UPDATE                = 1,
+   SVGA_CMD_RECT_COPY             = 3,
+   SVGA_CMD_RECT_ROP_COPY         = 14,
+   SVGA_CMD_DEFINE_CURSOR         = 19,
+   SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
+   SVGA_CMD_UPDATE_VERBOSE        = 25,
+   SVGA_CMD_FRONT_ROP_FILL        = 29,
+   SVGA_CMD_FENCE                 = 30,
+   SVGA_CMD_ESCAPE                = 33,
+   SVGA_CMD_DEFINE_SCREEN         = 34,
+   SVGA_CMD_DESTROY_SCREEN        = 35,
+   SVGA_CMD_DEFINE_GMRFB          = 36,
+   SVGA_CMD_BLIT_GMRFB_TO_SCREEN  = 37,
+   SVGA_CMD_BLIT_SCREEN_TO_GMRFB  = 38,
+   SVGA_CMD_ANNOTATION_FILL       = 39,
+   SVGA_CMD_ANNOTATION_COPY       = 40,
+   SVGA_CMD_DEFINE_GMR2           = 41,
+   SVGA_CMD_REMAP_GMR2            = 42,
+   SVGA_CMD_DEAD                  = 43,
+   SVGA_CMD_DEAD_2                = 44,
+   SVGA_CMD_NOP                   = 45,
+   SVGA_CMD_NOP_ERROR             = 46,
+   SVGA_CMD_MAX
+} SVGAFifoCmdId;
+
+#define SVGA_CMD_MAX_DATASIZE       (256 * 1024)
+#define SVGA_CMD_MAX_ARGS           64
+
+
+/*
+ * SVGA_CMD_UPDATE --
+ *
+ *    This is a DMA transfer which copies from the Guest Framebuffer
+ *    (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
+ *    intersect with the provided virtual rectangle.
+ *
+ *    This command does not support using arbitrary guest memory as a
+ *    data source- it only works with the pre-defined GFB memory.
+ *    This command also does not support signed virtual coordinates.
+ *    If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
+ *    negative root x/y coordinates, the negative portion of those
+ *    screens will not be reachable by this command.
+ *
+ *    This command is not necessary when using framebuffer
+ *    traces. Traces are automatically enabled if the SVGA FIFO is
+ *    disabled, and you may explicitly enable/disable traces using
+ *    SVGA_REG_TRACES. With traces enabled, any write to the GFB will
+ *    automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
+ *
+ *    Traces and SVGA_CMD_UPDATE are the only supported ways to render
+ *    pseudocolor screen updates. The newer Screen Object commands
+ *    only support true color formats.
+ *
+ * Availability:
+ *    Always available.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdate;
+
+
+/*
+ * SVGA_CMD_RECT_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_RECT_ROP_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *    The value of ROP may only be SVGA_ROP_COPY, and this command is
+ *    only supported for backwards compatibility reasons.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+   uint32 rop;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectRopCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_CURSOR --
+ *
+ *    Provide a new cursor image, as an AND/XOR mask.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_CURSOR
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   uint32 andMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   uint32 xorMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   /*
+    * Followed by scanline data for AND mask, then XOR mask.
+    * Each scanline is padded to a 32-bit boundary.
+   */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineCursor;
+
+
+/*
+ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
+ *
+ *    Provide a new cursor image, in 32-bit BGRA format.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_ALPHA_CURSOR
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   /* Followed by scanline data */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineAlphaCursor;
+
+
+/*
+ * SVGA_CMD_UPDATE_VERBOSE --
+ *
+ *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
+ *    'reason' value, an opaque cookie which is used by internal
+ *    debugging tools. Third party drivers should not use this
+ *    command.
+ *
+ * Availability:
+ *    SVGA_CAP_EXTENDED_FIFO
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 reason;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdateVerbose;
+
+
+/*
+ * SVGA_CMD_FRONT_ROP_FILL --
+ *
+ *    This is a hint which tells the SVGA device that the driver has
+ *    just filled a rectangular region of the GFB with a solid
+ *    color. Instead of reading these pixels from the GFB, the device
+ *    can assume that they all equal 'color'. This is primarily used
+ *    for remote desktop protocols.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ACCELFRONT
+ */
+
+#define  SVGA_ROP_COPY                    0x03
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 color;     /* In the same format as the GFB */
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 rop;       /* Must be SVGA_ROP_COPY */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFrontRopFill;
+
+
+/*
+ * SVGA_CMD_FENCE --
+ *
+ *    Insert a synchronization fence.  When the SVGA device reaches
+ *    this command, it will copy the 'fence' value into the
+ *    SVGA_FIFO_FENCE register. It will also compare the fence against
+ *    SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
+ *    SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
+ *    raise this interrupt.
+ *
+ * Availability:
+ *    SVGA_FIFO_FENCE for this command,
+ *    SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 fence;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFence;
+
+
+/*
+ * SVGA_CMD_ESCAPE --
+ *
+ *    Send an extended or vendor-specific variable length command.
+ *    This is used for video overlay, third party plugins, and
+ *    internal debugging tools. See svga_escape.h
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ESCAPE
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 nsid;
+   uint32 size;
+   /* followed by 'size' bytes of data */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdEscape;
+
+
+/*
+ * SVGA_CMD_DEFINE_SCREEN --
+ *
+ *    Define or redefine an SVGAScreenObject. See the description of
+ *    SVGAScreenObject above.  The video driver is responsible for
+ *    generating new screen IDs. They should be small positive
+ *    integers. The virtual device will have an implementation
+ *    specific upper limit on the number of screen IDs
+ *    supported. Drivers are responsible for recycling IDs. The first
+ *    valid ID is zero.
+ *
+ *    - Interaction with other registers:
+ *
+ *    For backwards compatibility, when the GFB mode registers (WIDTH,
+ *    HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
+ *    deletes all screens other than screen #0, and redefines screen
+ *    #0 according to the specified mode. Drivers that use
+ *    SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
+ *
+ *    If you use screen objects, do not use the legacy multi-mon
+ *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAScreenObject screen;   /* Variable-length according to version */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineScreen;
+
+
+/*
+ * SVGA_CMD_DESTROY_SCREEN --
+ *
+ *    Destroy an SVGAScreenObject. Its ID is immediately available for
+ *    re-use.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 screenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDestroyScreen;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMRFB --
+ *
+ *    This command sets a piece of SVGA device state called the
+ *    Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
+ *    piece of light-weight state which identifies the location and
+ *    format of an image in guest memory or in BAR1. The GMRFB has
+ *    an arbitrary size, and it doesn't need to match the geometry
+ *    of the GFB or any screen object.
+ *
+ *    The GMRFB can be redefined as often as you like. You could
+ *    always use the same GMRFB, you could redefine it before
+ *    rendering from a different guest screen, or you could even
+ *    redefine it before every blit.
+ *
+ *    There are multiple ways to use this command. The simplest way is
+ *    to use it to move the framebuffer either to elsewhere in the GFB
+ *    (BAR1) memory region, or to a user-defined GMR. This lets a
+ *    driver use a framebuffer allocated entirely out of normal system
+ *    memory, which we encourage.
+ *
+ *    Another way to use this command is to set up a ring buffer of
+ *    updates in GFB memory. If a driver wants to ensure that no
+ *    frames are skipped by the SVGA device, it is important that the
+ *    driver not modify the source data for a blit until the device is
+ *    done processing the command. One efficient way to accomplish
+ *    this is to use a ring of small DMA buffers. Each buffer is used
+ *    for one blit, then we move on to the next buffer in the
+ *    ring. The FENCE mechanism is used to protect each buffer from
+ *    re-use until the device is finished with that buffer's
+ *    corresponding blit.
+ *
+ *    This command does not affect the meaning of SVGA_CMD_UPDATE.
+ *    UPDATEs always occur from the legacy GFB memory area. This
+ *    command has no support for pseudocolor GMRFBs. Currently only
+ *    true-color 15, 16, and 24-bit depths are supported. Future
+ *    devices may expose capabilities for additional framebuffer
+ *    formats.
+ *
+ *    The default GMRFB value is undefined. Drivers must always send
+ *    this command at least once before performing any blit from the
+ *    GMRFB.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGuestPtr        ptr;
+   uint32              bytesPerLine;
+   SVGAGMRImageFormat  format;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMRFB;
+
+
+/*
+ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
+ *
+ *    This is a guest-to-host blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from the current GMRFB to
+ *    a ScreenObject.
+ *
+ *    The destination coordinate may be specified relative to a
+ *    screen's origin.  The provided screen ID must be valid.
+ *
+ *    The SVGA device is guaranteed to finish reading from the GMRFB
+ *    by the time any subsequent FENCE commands are reached.
+ *
+ *    This command consumes an annotation. See the
+ *    SVGA_CMD_ANNOTATION_* commands for details.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  srcOrigin;
+   SVGASignedRect   destRect;
+   uint32           destScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitGMRFBToScreen;
+
+
+/*
+ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
+ *
+ *    This is a host-to-guest blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from a single ScreenObject
+ *    back to the current GMRFB.
+ *
+ *    The source coordinate is specified relative to a screen's
+ *    origin.  The provided screen ID must be valid. If any parameters
+ *    are invalid, the resulting pixel values are undefined.
+ *
+ *    The SVGA device is guaranteed to finish writing to the GMRFB by
+ *    the time any subsequent FENCE commands are reached.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  destOrigin;
+   SVGASignedRect   srcRect;
+   uint32           srcScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitScreenToGMRFB;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_FILL --
+ *
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAColorBGRX  color;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationFill;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_COPY --
+ *
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  srcOrigin;
+   uint32           srcScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMR2 --
+ *
+ *    Define guest memory region v2.  See the description of GMRs above.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gmrId;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMR2;
+
+
+/*
+ * SVGA_CMD_REMAP_GMR2 --
+ *
+ *    Remap guest memory region v2.  See the description of GMRs above.
+ *
+ *    This command allows guest to modify a portion of an existing GMR by
+ *    invalidating it or reassigning it to different guest physical pages.
+ *    The pages are identified by physical page number (PPN).  The pages
+ *    are assumed to be pinned and valid for DMA operations.
+ *
+ *    Description of command flags:
+ *
+ *    SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
+ *       The PPN list must not overlap with the remap region (this can be
+ *       handled trivially by referencing a separate GMR).  If flag is
+ *       disabled, PPN list is appended to SVGARemapGMR command.
+ *
+ *    SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
+ *       it is in PPN32 format.
+ *
+ *    SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
+ *       A single PPN can be used to invalidate a portion of a GMR or
+ *       map it to to a single guest scratch page.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef enum {
+   SVGA_REMAP_GMR2_PPN32         = 0,
+   SVGA_REMAP_GMR2_VIA_GMR       = (1 << 0),
+   SVGA_REMAP_GMR2_PPN64         = (1 << 1),
+   SVGA_REMAP_GMR2_SINGLE_PPN    = (1 << 2),
+} SVGARemapGMR2Flags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gmrId;
+   SVGARemapGMR2Flags flags;
+   uint32 offsetPages; /* offset in pages to begin remap */
+   uint32 numPages; /* number of pages to remap */
+   /*
+    * Followed by additional data depending on SVGARemapGMR2Flags.
+    *
+    * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
+    * Otherwise an array of page descriptors in PPN32 or PPN64 format
+    * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
+    * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
+    */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRemapGMR2;
+
+
+/*
+ * Size of SVGA device memory such as frame buffer and FIFO.
+ */
+#define SVGA_VRAM_MIN_SIZE             (4 * 640 * 480) /* bytes */
+#define SVGA_VRAM_MIN_SIZE_3D       (16 * 1024 * 1024)
+#define SVGA_VRAM_MAX_SIZE         (128 * 1024 * 1024)
+#define SVGA_MEMORY_SIZE_MAX      (1024 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_MAX           (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN       (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX       (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT   (256 * 1024)
+
+#define SVGA_VRAM_SIZE_W2K          (64 * 1024 * 1024) /* 64 MB */
+
+/*
+ * To simplify autoDetect display configuration, support a minimum of
+ * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
+ *   numDisplays = 2
+ *   maxWidth = numDisplay * 1920 = 3840
+ *   maxHeight = rotated width of single monitor = 1920
+ *   vramSize = maxWidth * maxHeight * 4 = 29491200
+ */
+#define SVGA_VRAM_SIZE_AUTODETECT   (32 * 1024 * 1024)
+
+#if defined(VMX86_SERVER)
+#define SVGA_VRAM_SIZE               (4 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           (64 * 1024 * 1024)
+#define SVGA_FIFO_SIZE                    (256 * 1024)
+#define SVGA_FIFO_SIZE_3D                 (516 * 1024)
+#define SVGA_MEMORY_SIZE_DEFAULT   (160 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                  FALSE
+#else
+#define SVGA_VRAM_SIZE              (16 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           SVGA_VRAM_MAX_SIZE
+#define SVGA_FIFO_SIZE               (2 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_3D               SVGA_FIFO_SIZE
+#define SVGA_MEMORY_SIZE_DEFAULT   (768 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                   TRUE
+#endif
+
+#define SVGA_FIFO_SIZE_GBOBJECTS          (256 * 1024)
+#define SVGA_VRAM_SIZE_GBOBJECTS     (4 * 1024 * 1024)
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 0000000..2e8ba4d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,46 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+#define MAX_UINT16 U16_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 0000000..120eab8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,21 @@
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 0000000..7e7b0ce
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 0000000..e2e440e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+__packed
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
deleted file mode 100644
index f58dc7d..0000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ /dev/null
@@ -1,2627 +0,0 @@
-/**********************************************************
- * Copyright 1998-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga3d_reg.h --
- *
- *       SVGA 3D hardware definitions
- */
-
-#ifndef _SVGA3D_REG_H_
-#define _SVGA3D_REG_H_
-
-#include "svga_reg.h"
-
-typedef uint32 PPN;
-typedef __le64 PPN64;
-
-/*
- * 3D Hardware Version
- *
- *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
- *   register.   Is set by the host and read by the guest.  This lets
- *   us make new guest drivers which are backwards-compatible with old
- *   SVGA hardware revisions.  It does not let us support old guest
- *   drivers.  Good enough for now.
- *
- */
-
-#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
-#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
-#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
-
-typedef enum {
-   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
-   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
-   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
-   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
-   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
-   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
-   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
-   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
-} SVGA3dHardwareVersion;
-
-/*
- * Generic Types
- */
-
-typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
-#define SVGA3D_NUM_CLIPPLANES                   6
-#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  8
-#define SVGA3D_MAX_CONTEXT_IDS                  256
-#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
-
-#define SVGA3D_NUM_TEXTURE_UNITS                32
-#define SVGA3D_NUM_LIGHTS                       8
-
-/*
- * Surface formats.
- *
- * If you modify this list, be sure to keep GLUtil.c in sync. It
- * includes the internal format definition of each surface in
- * GLUtil_ConvertSurfaceFormat, and it contains a table of
- * human-readable names in GLUtil_GetFormatName.
- */
-
-typedef enum SVGA3dSurfaceFormat {
-   SVGA3D_FORMAT_MIN                   = 0,
-   SVGA3D_FORMAT_INVALID               = 0,
-
-   SVGA3D_X8R8G8B8                     = 1,
-   SVGA3D_A8R8G8B8                     = 2,
-
-   SVGA3D_R5G6B5                       = 3,
-   SVGA3D_X1R5G5B5                     = 4,
-   SVGA3D_A1R5G5B5                     = 5,
-   SVGA3D_A4R4G4B4                     = 6,
-
-   SVGA3D_Z_D32                        = 7,
-   SVGA3D_Z_D16                        = 8,
-   SVGA3D_Z_D24S8                      = 9,
-   SVGA3D_Z_D15S1                      = 10,
-
-   SVGA3D_LUMINANCE8                   = 11,
-   SVGA3D_LUMINANCE4_ALPHA4            = 12,
-   SVGA3D_LUMINANCE16                  = 13,
-   SVGA3D_LUMINANCE8_ALPHA8            = 14,
-
-   SVGA3D_DXT1                         = 15,
-   SVGA3D_DXT2                         = 16,
-   SVGA3D_DXT3                         = 17,
-   SVGA3D_DXT4                         = 18,
-   SVGA3D_DXT5                         = 19,
-
-   SVGA3D_BUMPU8V8                     = 20,
-   SVGA3D_BUMPL6V5U5                   = 21,
-   SVGA3D_BUMPX8L8V8U8                 = 22,
-   SVGA3D_BUMPL8V8U8                   = 23,
-
-   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
-   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
-
-   SVGA3D_A2R10G10B10                  = 26,
-
-   /* signed formats */
-   SVGA3D_V8U8                         = 27,
-   SVGA3D_Q8W8V8U8                     = 28,
-   SVGA3D_CxV8U8                       = 29,
-
-   /* mixed formats */
-   SVGA3D_X8L8V8U8                     = 30,
-   SVGA3D_A2W10V10U10                  = 31,
-
-   SVGA3D_ALPHA8                       = 32,
-
-   /* Single- and dual-component floating point formats */
-   SVGA3D_R_S10E5                      = 33,
-   SVGA3D_R_S23E8                      = 34,
-   SVGA3D_RG_S10E5                     = 35,
-   SVGA3D_RG_S23E8                     = 36,
-
-   SVGA3D_BUFFER                       = 37,
-
-   SVGA3D_Z_D24X8                      = 38,
-
-   SVGA3D_V16U16                       = 39,
-
-   SVGA3D_G16R16                       = 40,
-   SVGA3D_A16B16G16R16                 = 41,
-
-   /* Packed Video formats */
-   SVGA3D_UYVY                         = 42,
-   SVGA3D_YUY2                         = 43,
-
-   /* Planar video formats */
-   SVGA3D_NV12                         = 44,
-
-   /* Video format with alpha */
-   SVGA3D_AYUV                         = 45,
-
-   SVGA3D_R32G32B32A32_TYPELESS        = 46,
-   SVGA3D_R32G32B32A32_FLOAT           = 25,
-   SVGA3D_R32G32B32A32_UINT            = 47,
-   SVGA3D_R32G32B32A32_SINT            = 48,
-   SVGA3D_R32G32B32_TYPELESS           = 49,
-   SVGA3D_R32G32B32_FLOAT              = 50,
-   SVGA3D_R32G32B32_UINT               = 51,
-   SVGA3D_R32G32B32_SINT               = 52,
-   SVGA3D_R16G16B16A16_TYPELESS        = 53,
-   SVGA3D_R16G16B16A16_FLOAT           = 24,
-   SVGA3D_R16G16B16A16_UNORM           = 41,
-   SVGA3D_R16G16B16A16_UINT            = 54,
-   SVGA3D_R16G16B16A16_SNORM           = 55,
-   SVGA3D_R16G16B16A16_SINT            = 56,
-   SVGA3D_R32G32_TYPELESS              = 57,
-   SVGA3D_R32G32_FLOAT                 = 36,
-   SVGA3D_R32G32_UINT                  = 58,
-   SVGA3D_R32G32_SINT                  = 59,
-   SVGA3D_R32G8X24_TYPELESS            = 60,
-   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
-   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
-   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
-   SVGA3D_R10G10B10A2_TYPELESS         = 64,
-   SVGA3D_R10G10B10A2_UNORM            = 26,
-   SVGA3D_R10G10B10A2_UINT             = 65,
-   SVGA3D_R11G11B10_FLOAT              = 66,
-   SVGA3D_R8G8B8A8_TYPELESS            = 67,
-   SVGA3D_R8G8B8A8_UNORM               = 68,
-   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
-   SVGA3D_R8G8B8A8_UINT                = 70,
-   SVGA3D_R8G8B8A8_SNORM               = 28,
-   SVGA3D_R8G8B8A8_SINT                = 71,
-   SVGA3D_R16G16_TYPELESS              = 72,
-   SVGA3D_R16G16_FLOAT                 = 35,
-   SVGA3D_R16G16_UNORM                 = 40,
-   SVGA3D_R16G16_UINT                  = 73,
-   SVGA3D_R16G16_SNORM                 = 39,
-   SVGA3D_R16G16_SINT                  = 74,
-   SVGA3D_R32_TYPELESS                 = 75,
-   SVGA3D_D32_FLOAT                    = 76,
-   SVGA3D_R32_FLOAT                    = 34,
-   SVGA3D_R32_UINT                     = 77,
-   SVGA3D_R32_SINT                     = 78,
-   SVGA3D_R24G8_TYPELESS               = 79,
-   SVGA3D_D24_UNORM_S8_UINT            = 80,
-   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
-   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
-   SVGA3D_R8G8_TYPELESS                = 83,
-   SVGA3D_R8G8_UNORM                   = 84,
-   SVGA3D_R8G8_UINT                    = 85,
-   SVGA3D_R8G8_SNORM                   = 27,
-   SVGA3D_R8G8_SINT                    = 86,
-   SVGA3D_R16_TYPELESS                 = 87,
-   SVGA3D_R16_FLOAT                    = 33,
-   SVGA3D_D16_UNORM                    = 8,
-   SVGA3D_R16_UNORM                    = 88,
-   SVGA3D_R16_UINT                     = 89,
-   SVGA3D_R16_SNORM                    = 90,
-   SVGA3D_R16_SINT                     = 91,
-   SVGA3D_R8_TYPELESS                  = 92,
-   SVGA3D_R8_UNORM                     = 93,
-   SVGA3D_R8_UINT                      = 94,
-   SVGA3D_R8_SNORM                     = 95,
-   SVGA3D_R8_SINT                      = 96,
-   SVGA3D_A8_UNORM                     = 32,
-   SVGA3D_R1_UNORM                     = 97,
-   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
-   SVGA3D_R8G8_B8G8_UNORM              = 99,
-   SVGA3D_G8R8_G8B8_UNORM              = 100,
-   SVGA3D_BC1_TYPELESS                 = 101,
-   SVGA3D_BC1_UNORM                    = 15,
-   SVGA3D_BC1_UNORM_SRGB               = 102,
-   SVGA3D_BC2_TYPELESS                 = 103,
-   SVGA3D_BC2_UNORM                    = 17,
-   SVGA3D_BC2_UNORM_SRGB               = 104,
-   SVGA3D_BC3_TYPELESS                 = 105,
-   SVGA3D_BC3_UNORM                    = 19,
-   SVGA3D_BC3_UNORM_SRGB               = 106,
-   SVGA3D_BC4_TYPELESS                 = 107,
-   SVGA3D_BC4_UNORM                    = 108,
-   SVGA3D_BC4_SNORM                    = 109,
-   SVGA3D_BC5_TYPELESS                 = 110,
-   SVGA3D_BC5_UNORM                    = 111,
-   SVGA3D_BC5_SNORM                    = 112,
-   SVGA3D_B5G6R5_UNORM                 = 3,
-   SVGA3D_B5G5R5A1_UNORM               = 5,
-   SVGA3D_B8G8R8A8_UNORM               = 2,
-   SVGA3D_B8G8R8X8_UNORM               = 1,
-   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
-   SVGA3D_B8G8R8A8_TYPELESS            = 114,
-   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
-   SVGA3D_B8G8R8X8_TYPELESS            = 116,
-   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
-
-   /* Advanced D3D9 depth formats. */
-   SVGA3D_Z_DF16                       = 118,
-   SVGA3D_Z_DF24                       = 119,
-   SVGA3D_Z_D24S8_INT                  = 120,
-
-   /* Planar video formats. */
-   SVGA3D_YV12                         = 121,
-
-   SVGA3D_FORMAT_MAX                   = 122,
-} SVGA3dSurfaceFormat;
-
-typedef uint32 SVGA3dColor; /* a, r, g, b */
-
-/*
- * These match the D3DFORMAT_OP definitions used by Direct3D. We need
- * them so that we can query the host for what the supported surface
- * operations are (when we're using the D3D backend, in particular),
- * and so we can send those operations to the guest.
- */
-typedef enum {
-   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
-   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
-   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
-   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
-   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
-   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
-   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
-
-/*
- * This format can be used as a render target if the current display mode
- * is the same depth if the alpha channel is ignored. e.g. if the device
- * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
- * format op list entry for A8R8G8B8 should have this cap.
- */
-   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
-
-/*
- * This format contains DirectDraw support (including Flip).  This flag
- * should not to be set on alpha formats.
- */
-   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
-
-/*
- * The rasterizer can support some level of Direct3D support in this format
- * and implies that the driver can create a Context in this mode (for some
- * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
- * flag must also be set.
- */
-   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
-
-/*
- * This is set for a private format when the driver has put the bpp in
- * the structure.
- */
-   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
-
-/*
- * Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
- */
-   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
-
-/*
- * Indicates that this format can be used to create offscreen plain surfaces.
- */
-   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
-
-/*
- * Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
- */
-   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
-
-/*
- * Indicates that this format can be used in the bumpmap instructions
- */
-   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
-
-/*
- * Indicates that this format can be sampled by the displacement map sampler
- */
-   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
-
-/*
- * Indicates that this format cannot be used with texture filtering
- */
-   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
-
-/*
- * Indicates that format conversions are supported to this RGB format if
- * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
- */
-   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
-
-/*
- * Indicated that this format can be written as an SRGB target (meaning that the
- * pixel pipe will DE-linearize data on output to format)
- */
-   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
-
-/*
- * Indicates that this format cannot be used with alpha blending
- */
-   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
-
-/*
- * Indicates that the device can auto-generated sublevels for resources
- * of this format
- */
-   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
-
-/*
- * Indicates that this format can be used by vertex texture sampler
- */
-   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
-
-/*
- * Indicates that this format supports neither texture coordinate wrap
- * modes, nor mipmapping
- */
-   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
-} SVGA3dFormatOp;
-
-/*
- * This structure is a conversion of SVGA3DFORMAT_OP_*.
- * Entries must be located at the same position.
- */
-typedef union {
-   uint32 value;
-   struct {
-      uint32 texture : 1;
-      uint32 volumeTexture : 1;
-      uint32 cubeTexture : 1;
-      uint32 offscreenRenderTarget : 1;
-      uint32 sameFormatRenderTarget : 1;
-      uint32 unknown1 : 1;
-      uint32 zStencil : 1;
-      uint32 zStencilArbitraryDepth : 1;
-      uint32 sameFormatUpToAlpha : 1;
-      uint32 unknown2 : 1;
-      uint32 displayMode : 1;
-      uint32 acceleration3d : 1;
-      uint32 pixelSize : 1;
-      uint32 convertToARGB : 1;
-      uint32 offscreenPlain : 1;
-      uint32 sRGBRead : 1;
-      uint32 bumpMap : 1;
-      uint32 dmap : 1;
-      uint32 noFilter : 1;
-      uint32 memberOfGroupARGB : 1;
-      uint32 sRGBWrite : 1;
-      uint32 noAlphaBlend : 1;
-      uint32 autoGenMipMap : 1;
-      uint32 vertexTexture : 1;
-      uint32 noTexCoordWrapNorMip : 1;
-   };
-} SVGA3dSurfaceFormatCaps;
-
-/*
- * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
- * must fit in a uint32.
- */
-
-typedef enum {
-   SVGA3D_RS_INVALID                   = 0,
-   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
-   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
-   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
-   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
-   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
-   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
-   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
-   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
-   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
-   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
-   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
-   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
-   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
-   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
-   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
-   SVGA3D_RS_FOGSTART                  = 16,    /* float */
-   SVGA3D_RS_FOGEND                    = 17,    /* float */
-   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
-   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
-   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
-   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
-   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
-   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
-   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
-   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
-   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
-   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
-   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
-   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
-   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
-   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
-   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
-   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
-   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
-   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
-   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
-   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
-   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
-   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
-   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
-   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
-   SVGA3D_RS_ZBIAS                     = 45,    /* float */
-   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
-   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
-   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
-   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
-   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
-   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
-   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
-   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
-   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
-   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
-   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
-   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
-   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
-   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
-
-
-   /*
-    * Output Gamma Level
-    *
-    * Output gamma effects the gamma curve of colors that are output from the
-    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
-    * value is <= 0.0, gamma correction is ignored and linear color space is
-    * used.
-    */
-
-   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
-   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
-   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
-   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
-   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
-   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
-   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
-   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
-   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
-   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
-   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
-   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
-   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
-   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
-   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
-   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
-   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
-   SVGA3D_RS_LINEAA                    = 98,    /* SVGA3dBool */
-   SVGA3D_RS_LINEWIDTH                 = 99,    /* float */
-   SVGA3D_RS_MAX
-} SVGA3dRenderStateName;
-
-typedef enum {
-   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
-   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
-   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
-   SVGA3D_TRANSPARENCYANTIALIAS_MAX
-} SVGA3dTransparencyAntialiasType;
-
-typedef enum {
-   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
-   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
-   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
-} SVGA3dVertexMaterial;
-
-typedef enum {
-   SVGA3D_FILLMODE_INVALID = 0,
-   SVGA3D_FILLMODE_POINT   = 1,
-   SVGA3D_FILLMODE_LINE    = 2,
-   SVGA3D_FILLMODE_FILL    = 3,
-   SVGA3D_FILLMODE_MAX
-} SVGA3dFillModeType;
-
-
-typedef
-union {
-   struct {
-      uint16   mode;       /* SVGA3dFillModeType */
-      uint16   face;       /* SVGA3dFace */
-   };
-   uint32 uintValue;
-} SVGA3dFillMode;
-
-typedef enum {
-   SVGA3D_SHADEMODE_INVALID = 0,
-   SVGA3D_SHADEMODE_FLAT    = 1,
-   SVGA3D_SHADEMODE_SMOOTH  = 2,
-   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
-   SVGA3D_SHADEMODE_MAX
-} SVGA3dShadeMode;
-
-typedef
-union {
-   struct {
-      uint16 repeat;
-      uint16 pattern;
-   };
-   uint32 uintValue;
-} SVGA3dLinePattern;
-
-typedef enum {
-   SVGA3D_BLENDOP_INVALID            = 0,
-   SVGA3D_BLENDOP_ZERO               = 1,
-   SVGA3D_BLENDOP_ONE                = 2,
-   SVGA3D_BLENDOP_SRCCOLOR           = 3,
-   SVGA3D_BLENDOP_INVSRCCOLOR        = 4,
-   SVGA3D_BLENDOP_SRCALPHA           = 5,
-   SVGA3D_BLENDOP_INVSRCALPHA        = 6,
-   SVGA3D_BLENDOP_DESTALPHA          = 7,
-   SVGA3D_BLENDOP_INVDESTALPHA       = 8,
-   SVGA3D_BLENDOP_DESTCOLOR          = 9,
-   SVGA3D_BLENDOP_INVDESTCOLOR       = 10,
-   SVGA3D_BLENDOP_SRCALPHASAT        = 11,
-   SVGA3D_BLENDOP_BLENDFACTOR        = 12,
-   SVGA3D_BLENDOP_INVBLENDFACTOR     = 13,
-   SVGA3D_BLENDOP_MAX
-} SVGA3dBlendOp;
-
-typedef enum {
-   SVGA3D_BLENDEQ_INVALID            = 0,
-   SVGA3D_BLENDEQ_ADD                = 1,
-   SVGA3D_BLENDEQ_SUBTRACT           = 2,
-   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
-   SVGA3D_BLENDEQ_MINIMUM            = 4,
-   SVGA3D_BLENDEQ_MAXIMUM            = 5,
-   SVGA3D_BLENDEQ_MAX
-} SVGA3dBlendEquation;
-
-typedef enum {
-   SVGA3D_FRONTWINDING_INVALID = 0,
-   SVGA3D_FRONTWINDING_CW      = 1,
-   SVGA3D_FRONTWINDING_CCW     = 2,
-   SVGA3D_FRONTWINDING_MAX
-} SVGA3dFrontWinding;
-
-typedef enum {
-   SVGA3D_FACE_INVALID  = 0,
-   SVGA3D_FACE_NONE     = 1,
-   SVGA3D_FACE_FRONT    = 2,
-   SVGA3D_FACE_BACK     = 3,
-   SVGA3D_FACE_FRONT_BACK = 4,
-   SVGA3D_FACE_MAX
-} SVGA3dFace;
-
-/*
- * The order and the values should not be changed
- */
-
-typedef enum {
-   SVGA3D_CMP_INVALID              = 0,
-   SVGA3D_CMP_NEVER                = 1,
-   SVGA3D_CMP_LESS                 = 2,
-   SVGA3D_CMP_EQUAL                = 3,
-   SVGA3D_CMP_LESSEQUAL            = 4,
-   SVGA3D_CMP_GREATER              = 5,
-   SVGA3D_CMP_NOTEQUAL             = 6,
-   SVGA3D_CMP_GREATEREQUAL         = 7,
-   SVGA3D_CMP_ALWAYS               = 8,
-   SVGA3D_CMP_MAX
-} SVGA3dCmpFunc;
-
-/*
- * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
- * the fog factor to be specified in the alpha component of the specular
- * (a.k.a. secondary) vertex color.
- */
-typedef enum {
-   SVGA3D_FOGFUNC_INVALID          = 0,
-   SVGA3D_FOGFUNC_EXP              = 1,
-   SVGA3D_FOGFUNC_EXP2             = 2,
-   SVGA3D_FOGFUNC_LINEAR           = 3,
-   SVGA3D_FOGFUNC_PER_VERTEX       = 4
-} SVGA3dFogFunction;
-
-/*
- * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
- * or per-pixel basis.
- */
-typedef enum {
-   SVGA3D_FOGTYPE_INVALID          = 0,
-   SVGA3D_FOGTYPE_VERTEX           = 1,
-   SVGA3D_FOGTYPE_PIXEL            = 2,
-   SVGA3D_FOGTYPE_MAX              = 3
-} SVGA3dFogType;
-
-/*
- * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
- * computed using the eye Z value of each pixel (or vertex), whereas range-
- * based fog is computed using the actual distance (range) to the eye.
- */
-typedef enum {
-   SVGA3D_FOGBASE_INVALID          = 0,
-   SVGA3D_FOGBASE_DEPTHBASED       = 1,
-   SVGA3D_FOGBASE_RANGEBASED       = 2,
-   SVGA3D_FOGBASE_MAX              = 3
-} SVGA3dFogBase;
-
-typedef enum {
-   SVGA3D_STENCILOP_INVALID        = 0,
-   SVGA3D_STENCILOP_KEEP           = 1,
-   SVGA3D_STENCILOP_ZERO           = 2,
-   SVGA3D_STENCILOP_REPLACE        = 3,
-   SVGA3D_STENCILOP_INCRSAT        = 4,
-   SVGA3D_STENCILOP_DECRSAT        = 5,
-   SVGA3D_STENCILOP_INVERT         = 6,
-   SVGA3D_STENCILOP_INCR           = 7,
-   SVGA3D_STENCILOP_DECR           = 8,
-   SVGA3D_STENCILOP_MAX
-} SVGA3dStencilOp;
-
-typedef enum {
-   SVGA3D_CLIPPLANE_0              = (1 << 0),
-   SVGA3D_CLIPPLANE_1              = (1 << 1),
-   SVGA3D_CLIPPLANE_2              = (1 << 2),
-   SVGA3D_CLIPPLANE_3              = (1 << 3),
-   SVGA3D_CLIPPLANE_4              = (1 << 4),
-   SVGA3D_CLIPPLANE_5              = (1 << 5),
-} SVGA3dClipPlanes;
-
-typedef enum {
-   SVGA3D_CLEAR_COLOR              = 0x1,
-   SVGA3D_CLEAR_DEPTH              = 0x2,
-   SVGA3D_CLEAR_STENCIL            = 0x4
-} SVGA3dClearFlag;
-
-typedef enum {
-   SVGA3D_RT_DEPTH                 = 0,
-   SVGA3D_RT_STENCIL               = 1,
-   SVGA3D_RT_COLOR0                = 2,
-   SVGA3D_RT_COLOR1                = 3,
-   SVGA3D_RT_COLOR2                = 4,
-   SVGA3D_RT_COLOR3                = 5,
-   SVGA3D_RT_COLOR4                = 6,
-   SVGA3D_RT_COLOR5                = 7,
-   SVGA3D_RT_COLOR6                = 8,
-   SVGA3D_RT_COLOR7                = 9,
-   SVGA3D_RT_MAX,
-   SVGA3D_RT_INVALID               = ((uint32)-1),
-} SVGA3dRenderTargetType;
-
-#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
-
-typedef
-union {
-   struct {
-      uint32  red   : 1;
-      uint32  green : 1;
-      uint32  blue  : 1;
-      uint32  alpha : 1;
-   };
-   uint32 uintValue;
-} SVGA3dColorMask;
-
-typedef enum {
-   SVGA3D_VBLEND_DISABLE            = 0,
-   SVGA3D_VBLEND_1WEIGHT            = 1,
-   SVGA3D_VBLEND_2WEIGHT            = 2,
-   SVGA3D_VBLEND_3WEIGHT            = 3,
-} SVGA3dVertexBlendFlags;
-
-typedef enum {
-   SVGA3D_WRAPCOORD_0   = 1 << 0,
-   SVGA3D_WRAPCOORD_1   = 1 << 1,
-   SVGA3D_WRAPCOORD_2   = 1 << 2,
-   SVGA3D_WRAPCOORD_3   = 1 << 3,
-   SVGA3D_WRAPCOORD_ALL = 0xF,
-} SVGA3dWrapFlags;
-
-/*
- * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
- * must fit in a uint32.
- */
-
-typedef enum {
-   SVGA3D_TS_INVALID                    = 0,
-   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
-   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
-   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
-   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
-   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
-   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
-   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
-   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
-   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
-   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
-   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
-   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
-   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
-   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
-   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
-   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
-   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
-
-
-   /*
-    * Sampler Gamma Level
-    *
-    * Sampler gamma effects the color of samples taken from the sampler.  A
-    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
-    * gamma value is ignored and a linear space is used.
-    */
-
-   SVGA3D_TS_GAMMA                      = 25,   /* float */
-   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
-   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
-   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
-   SVGA3D_TS_MAX
-} SVGA3dTextureStateName;
-
-typedef enum {
-   SVGA3D_TC_INVALID                   = 0,
-   SVGA3D_TC_DISABLE                   = 1,
-   SVGA3D_TC_SELECTARG1                = 2,
-   SVGA3D_TC_SELECTARG2                = 3,
-   SVGA3D_TC_MODULATE                  = 4,
-   SVGA3D_TC_ADD                       = 5,
-   SVGA3D_TC_ADDSIGNED                 = 6,
-   SVGA3D_TC_SUBTRACT                  = 7,
-   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
-   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
-   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
-   SVGA3D_TC_BLENDFACTORALPHA          = 11,
-   SVGA3D_TC_MODULATE2X                = 12,
-   SVGA3D_TC_MODULATE4X                = 13,
-   SVGA3D_TC_DSDT                      = 14,
-   SVGA3D_TC_DOTPRODUCT3               = 15,
-   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
-   SVGA3D_TC_ADDSIGNED2X               = 17,
-   SVGA3D_TC_ADDSMOOTH                 = 18,
-   SVGA3D_TC_PREMODULATE               = 19,
-   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
-   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
-   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
-   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
-   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
-   SVGA3D_TC_MULTIPLYADD               = 25,
-   SVGA3D_TC_LERP                      = 26,
-   SVGA3D_TC_MAX
-} SVGA3dTextureCombiner;
-
-#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
-
-typedef enum {
-   SVGA3D_TEX_ADDRESS_INVALID    = 0,
-   SVGA3D_TEX_ADDRESS_WRAP       = 1,
-   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
-   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
-   SVGA3D_TEX_ADDRESS_BORDER     = 4,
-   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
-   SVGA3D_TEX_ADDRESS_EDGE       = 6,
-   SVGA3D_TEX_ADDRESS_MAX
-} SVGA3dTextureAddress;
-
-/*
- * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
- * disabled, and the rasterizer should use the magnification filter instead.
- */
-typedef enum {
-   SVGA3D_TEX_FILTER_NONE           = 0,
-   SVGA3D_TEX_FILTER_NEAREST        = 1,
-   SVGA3D_TEX_FILTER_LINEAR         = 2,
-   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
-   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
-   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
-   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
-   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
-   SVGA3D_TEX_FILTER_MAX
-} SVGA3dTextureFilter;
-
-typedef enum {
-   SVGA3D_TEX_TRANSFORM_OFF    = 0,
-   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
-   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
-   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
-   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
-   SVGA3D_TEX_PROJECTED        = (1 << 15),
-} SVGA3dTexTransformFlags;
-
-typedef enum {
-   SVGA3D_TEXCOORD_GEN_OFF              = 0,
-   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
-   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
-   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
-   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
-   SVGA3D_TEXCOORD_GEN_MAX
-} SVGA3dTextureCoordGen;
-
-/*
- * Texture argument constants for texture combiner
- */
-typedef enum {
-   SVGA3D_TA_INVALID    = 0,
-   SVGA3D_TA_CONSTANT   = 1,
-   SVGA3D_TA_PREVIOUS   = 2,
-   SVGA3D_TA_DIFFUSE    = 3,
-   SVGA3D_TA_TEXTURE    = 4,
-   SVGA3D_TA_SPECULAR   = 5,
-   SVGA3D_TA_MAX
-} SVGA3dTextureArgData;
-
-#define SVGA3D_TM_MASK_LEN 4
-
-/* Modifiers for texture argument constants defined above. */
-typedef enum {
-   SVGA3D_TM_NONE       = 0,
-   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
-   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
-} SVGA3dTextureArgModifier;
-
-#define SVGA3D_INVALID_ID         ((uint32)-1)
-#define SVGA3D_MAX_CLIP_PLANES    6
-
-/*
- * This is the limit to the number of fixed-function texture
- * transforms and texture coordinates we can support. It does *not*
- * correspond to the number of texture image units (samplers) we
- * support!
- */
-#define SVGA3D_MAX_TEXTURE_COORDS 8
-
-/*
- * Vertex declarations
- *
- * Notes:
- *
- * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
- * draw with any POSITIONT vertex arrays, the programmable vertex
- * pipeline will be implicitly disabled. Drawing will take place as if
- * no vertex shader was bound.
- */
-
-typedef enum {
-   SVGA3D_DECLUSAGE_POSITION     = 0,
-   SVGA3D_DECLUSAGE_BLENDWEIGHT,       /*  1 */
-   SVGA3D_DECLUSAGE_BLENDINDICES,      /*  2 */
-   SVGA3D_DECLUSAGE_NORMAL,            /*  3 */
-   SVGA3D_DECLUSAGE_PSIZE,             /*  4 */
-   SVGA3D_DECLUSAGE_TEXCOORD,          /*  5 */
-   SVGA3D_DECLUSAGE_TANGENT,           /*  6 */
-   SVGA3D_DECLUSAGE_BINORMAL,          /*  7 */
-   SVGA3D_DECLUSAGE_TESSFACTOR,        /*  8 */
-   SVGA3D_DECLUSAGE_POSITIONT,         /*  9 */
-   SVGA3D_DECLUSAGE_COLOR,             /* 10 */
-   SVGA3D_DECLUSAGE_FOG,               /* 11 */
-   SVGA3D_DECLUSAGE_DEPTH,             /* 12 */
-   SVGA3D_DECLUSAGE_SAMPLE,            /* 13 */
-   SVGA3D_DECLUSAGE_MAX
-} SVGA3dDeclUsage;
-
-typedef enum {
-   SVGA3D_DECLMETHOD_DEFAULT     = 0,
-   SVGA3D_DECLMETHOD_PARTIALU,
-   SVGA3D_DECLMETHOD_PARTIALV,
-   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
-   SVGA3D_DECLMETHOD_UV,
-   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
-   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
-} SVGA3dDeclMethod;
-
-typedef enum {
-   SVGA3D_DECLTYPE_FLOAT1        =  0,
-   SVGA3D_DECLTYPE_FLOAT2        =  1,
-   SVGA3D_DECLTYPE_FLOAT3        =  2,
-   SVGA3D_DECLTYPE_FLOAT4        =  3,
-   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
-   SVGA3D_DECLTYPE_UBYTE4        =  5,
-   SVGA3D_DECLTYPE_SHORT2        =  6,
-   SVGA3D_DECLTYPE_SHORT4        =  7,
-   SVGA3D_DECLTYPE_UBYTE4N       =  8,
-   SVGA3D_DECLTYPE_SHORT2N       =  9,
-   SVGA3D_DECLTYPE_SHORT4N       = 10,
-   SVGA3D_DECLTYPE_USHORT2N      = 11,
-   SVGA3D_DECLTYPE_USHORT4N      = 12,
-   SVGA3D_DECLTYPE_UDEC3         = 13,
-   SVGA3D_DECLTYPE_DEC3N         = 14,
-   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
-   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
-   SVGA3D_DECLTYPE_MAX,
-} SVGA3dDeclType;
-
-/*
- * This structure is used for the divisor for geometry instancing;
- * it's a direct translation of the Direct3D equivalent.
- */
-typedef union {
-   struct {
-      /*
-       * For index data, this number represents the number of instances to draw.
-       * For instance data, this number represents the number of
-       * instances/vertex in this stream
-       */
-      uint32 count : 30;
-
-      /*
-       * This is 1 if this is supposed to be the data that is repeated for
-       * every instance.
-       */
-      uint32 indexedData : 1;
-
-      /*
-       * This is 1 if this is supposed to be the per-instance data.
-       */
-      uint32 instanceData : 1;
-   };
-
-   uint32 value;
-} SVGA3dVertexDivisor;
-
-typedef enum {
-   SVGA3D_PRIMITIVE_INVALID                     = 0,
-   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
-   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
-   SVGA3D_PRIMITIVE_LINELIST                    = 3,
-   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
-   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
-   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
-   SVGA3D_PRIMITIVE_MAX
-} SVGA3dPrimitiveType;
-
-typedef enum {
-   SVGA3D_COORDINATE_INVALID                   = 0,
-   SVGA3D_COORDINATE_LEFTHANDED                = 1,
-   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
-   SVGA3D_COORDINATE_MAX
-} SVGA3dCoordinateType;
-
-typedef enum {
-   SVGA3D_TRANSFORM_INVALID                     = 0,
-   SVGA3D_TRANSFORM_WORLD                       = 1,
-   SVGA3D_TRANSFORM_VIEW                        = 2,
-   SVGA3D_TRANSFORM_PROJECTION                  = 3,
-   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
-   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
-   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
-   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
-   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
-   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
-   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
-   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
-   SVGA3D_TRANSFORM_WORLD1                      = 12,
-   SVGA3D_TRANSFORM_WORLD2                      = 13,
-   SVGA3D_TRANSFORM_WORLD3                      = 14,
-   SVGA3D_TRANSFORM_MAX
-} SVGA3dTransformType;
-
-typedef enum {
-   SVGA3D_LIGHTTYPE_INVALID                     = 0,
-   SVGA3D_LIGHTTYPE_POINT                       = 1,
-   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
-   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
-   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
-   SVGA3D_LIGHTTYPE_MAX
-} SVGA3dLightType;
-
-typedef enum {
-   SVGA3D_CUBEFACE_POSX                         = 0,
-   SVGA3D_CUBEFACE_NEGX                         = 1,
-   SVGA3D_CUBEFACE_POSY                         = 2,
-   SVGA3D_CUBEFACE_NEGY                         = 3,
-   SVGA3D_CUBEFACE_POSZ                         = 4,
-   SVGA3D_CUBEFACE_NEGZ                         = 5,
-} SVGA3dCubeFace;
-
-typedef enum {
-   SVGA3D_SHADERTYPE_INVALID                    = 0,
-   SVGA3D_SHADERTYPE_MIN                        = 1,
-   SVGA3D_SHADERTYPE_VS                         = 1,
-   SVGA3D_SHADERTYPE_PS                         = 2,
-   SVGA3D_SHADERTYPE_MAX                        = 3,
-   SVGA3D_SHADERTYPE_GS                         = 3,
-} SVGA3dShaderType;
-
-#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
-
-typedef enum {
-   SVGA3D_CONST_TYPE_FLOAT                      = 0,
-   SVGA3D_CONST_TYPE_INT                        = 1,
-   SVGA3D_CONST_TYPE_BOOL                       = 2,
-   SVGA3D_CONST_TYPE_MAX
-} SVGA3dShaderConstType;
-
-#define SVGA3D_MAX_SURFACE_FACES                6
-
-typedef enum {
-   SVGA3D_STRETCH_BLT_POINT                     = 0,
-   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
-   SVGA3D_STRETCH_BLT_MAX
-} SVGA3dStretchBltMode;
-
-typedef enum {
-   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
-   SVGA3D_QUERYTYPE_MAX
-} SVGA3dQueryType;
-
-typedef enum {
-   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Waiting on the host (set by guest) */
-   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully (set by host) */
-   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully (set by host) */
-   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (For guest use only) */
-} SVGA3dQueryState;
-
-typedef enum {
-   SVGA3D_WRITE_HOST_VRAM        = 1,
-   SVGA3D_READ_HOST_VRAM         = 2,
-} SVGA3dTransferType;
-
-/*
- * The maximum number of vertex arrays we're guaranteed to support in
- * SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_VERTEX_ARRAYS   32
-
-/*
- * The maximum number of primitive ranges we're guaranteed to support
- * in SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
-
-/*
- * Identifiers for commands in the command FIFO.
- *
- * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
- * the SVGA3D protocol and remain reserved; they should not be used in the
- * future.
- *
- * IDs between 1040 and 1999 (inclusive) are available for use by the
- * current SVGA3D protocol.
- *
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
- * and up.
- */
-
-#define SVGA_3D_CMD_LEGACY_BASE            1000
-#define SVGA_3D_CMD_BASE                   1040
-
-#define SVGA_3D_CMD_SURFACE_DEFINE         SVGA_3D_CMD_BASE + 0     /* Deprecated */
-#define SVGA_3D_CMD_SURFACE_DESTROY        SVGA_3D_CMD_BASE + 1
-#define SVGA_3D_CMD_SURFACE_COPY           SVGA_3D_CMD_BASE + 2
-#define SVGA_3D_CMD_SURFACE_STRETCHBLT     SVGA_3D_CMD_BASE + 3
-#define SVGA_3D_CMD_SURFACE_DMA            SVGA_3D_CMD_BASE + 4
-#define SVGA_3D_CMD_CONTEXT_DEFINE         SVGA_3D_CMD_BASE + 5
-#define SVGA_3D_CMD_CONTEXT_DESTROY        SVGA_3D_CMD_BASE + 6
-#define SVGA_3D_CMD_SETTRANSFORM           SVGA_3D_CMD_BASE + 7
-#define SVGA_3D_CMD_SETZRANGE              SVGA_3D_CMD_BASE + 8
-#define SVGA_3D_CMD_SETRENDERSTATE         SVGA_3D_CMD_BASE + 9
-#define SVGA_3D_CMD_SETRENDERTARGET        SVGA_3D_CMD_BASE + 10
-#define SVGA_3D_CMD_SETTEXTURESTATE        SVGA_3D_CMD_BASE + 11
-#define SVGA_3D_CMD_SETMATERIAL            SVGA_3D_CMD_BASE + 12
-#define SVGA_3D_CMD_SETLIGHTDATA           SVGA_3D_CMD_BASE + 13
-#define SVGA_3D_CMD_SETLIGHTENABLED        SVGA_3D_CMD_BASE + 14
-#define SVGA_3D_CMD_SETVIEWPORT            SVGA_3D_CMD_BASE + 15
-#define SVGA_3D_CMD_SETCLIPPLANE           SVGA_3D_CMD_BASE + 16
-#define SVGA_3D_CMD_CLEAR                  SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT                SVGA_3D_CMD_BASE + 18    /* Deprecated */
-#define SVGA_3D_CMD_SHADER_DEFINE          SVGA_3D_CMD_BASE + 19
-#define SVGA_3D_CMD_SHADER_DESTROY         SVGA_3D_CMD_BASE + 20
-#define SVGA_3D_CMD_SET_SHADER             SVGA_3D_CMD_BASE + 21
-#define SVGA_3D_CMD_SET_SHADER_CONST       SVGA_3D_CMD_BASE + 22
-#define SVGA_3D_CMD_DRAW_PRIMITIVES        SVGA_3D_CMD_BASE + 23
-#define SVGA_3D_CMD_SETSCISSORRECT         SVGA_3D_CMD_BASE + 24
-#define SVGA_3D_CMD_BEGIN_QUERY            SVGA_3D_CMD_BASE + 25
-#define SVGA_3D_CMD_END_QUERY              SVGA_3D_CMD_BASE + 26
-#define SVGA_3D_CMD_WAIT_FOR_QUERY         SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK       SVGA_3D_CMD_BASE + 28    /* Deprecated */
-#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_SURFACE_DEFINE_V2      SVGA_3D_CMD_BASE + 30
-#define SVGA_3D_CMD_GENERATE_MIPMAPS       SVGA_3D_CMD_BASE + 31
-#define SVGA_3D_CMD_ACTIVATE_SURFACE       SVGA_3D_CMD_BASE + 40
-#define SVGA_3D_CMD_DEACTIVATE_SURFACE     SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_SCREEN_DMA               1082
-#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
-#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE     1084
-
-#define SVGA_3D_CMD_LOGICOPS_BITBLT          1085
-#define SVGA_3D_CMD_LOGICOPS_TRANSBLT        1086
-#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT      1087
-#define SVGA_3D_CMD_LOGICOPS_COLORFILL       1088
-#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND      1089
-#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND  1090
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE          1091
-#define SVGA_3D_CMD_READBACK_OTABLE          1092
-
-#define SVGA_3D_CMD_DEFINE_GB_MOB            1093
-#define SVGA_3D_CMD_DESTROY_GB_MOB           1094
-#define SVGA_3D_CMD_REDEFINE_GB_MOB          1095
-#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING    1096
-
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE        1097
-#define SVGA_3D_CMD_DESTROY_GB_SURFACE       1098
-#define SVGA_3D_CMD_BIND_GB_SURFACE          1099
-#define SVGA_3D_CMD_COND_BIND_GB_SURFACE     1100
-#define SVGA_3D_CMD_UPDATE_GB_IMAGE          1101
-#define SVGA_3D_CMD_UPDATE_GB_SURFACE        1102
-#define SVGA_3D_CMD_READBACK_GB_IMAGE        1103
-#define SVGA_3D_CMD_READBACK_GB_SURFACE      1104
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE      1105
-#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE    1106
-
-#define SVGA_3D_CMD_DEFINE_GB_CONTEXT        1107
-#define SVGA_3D_CMD_DESTROY_GB_CONTEXT       1108
-#define SVGA_3D_CMD_BIND_GB_CONTEXT          1109
-#define SVGA_3D_CMD_READBACK_GB_CONTEXT      1110
-#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT    1111
-
-#define SVGA_3D_CMD_DEFINE_GB_SHADER         1112
-#define SVGA_3D_CMD_DESTROY_GB_SHADER        1113
-#define SVGA_3D_CMD_BIND_GB_SHADER           1114
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE64        1115
-
-#define SVGA_3D_CMD_BEGIN_GB_QUERY           1116
-#define SVGA_3D_CMD_END_GB_QUERY             1117
-#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY        1118
-
-#define SVGA_3D_CMD_NOP                      1119
-
-#define SVGA_3D_CMD_ENABLE_GART              1120
-#define SVGA_3D_CMD_DISABLE_GART             1121
-#define SVGA_3D_CMD_MAP_MOB_INTO_GART        1122
-#define SVGA_3D_CMD_UNMAP_GART_RANGE         1123
-
-#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET   1124
-#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET  1125
-#define SVGA_3D_CMD_BIND_GB_SCREENTARGET     1126
-#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET   1127
-
-#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL   1128
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
-
-#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE  1130
-#define SVGA_3D_CMD_GB_SCREEN_DMA               1131
-#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH  1132
-#define SVGA_3D_CMD_GB_MOB_FENCE                1133
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2        1134
-#define SVGA_3D_CMD_DEFINE_GB_MOB64          1135
-#define SVGA_3D_CMD_REDEFINE_GB_MOB64        1136
-#define SVGA_3D_CMD_NOP_ERROR                1137
-
-#define SVGA_3D_CMD_RESERVED1                1138
-#define SVGA_3D_CMD_RESERVED2                1139
-#define SVGA_3D_CMD_RESERVED3                1140
-#define SVGA_3D_CMD_RESERVED4                1141
-#define SVGA_3D_CMD_RESERVED5                1142
-
-#define SVGA_3D_CMD_MAX                      1142
-#define SVGA_3D_CMD_FUTURE_MAX               3000
-
-/*
- * Common substructures used in multiple FIFO commands:
- */
-
-typedef struct {
-   union {
-      struct {
-         uint16  function;       /* SVGA3dFogFunction */
-         uint8   type;           /* SVGA3dFogType */
-         uint8   base;           /* SVGA3dFogBase */
-      };
-      uint32     uintValue;
-   };
-} SVGA3dFogMode;
-
-/*
- * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
- * is a surface ID as well as face/mipmap indices.
- */
-
-typedef
-struct SVGA3dSurfaceImageId {
-   uint32               sid;
-   uint32               face;
-   uint32               mipmap;
-} SVGA3dSurfaceImageId;
-
-typedef
-struct SVGA3dGuestImage {
-   SVGAGuestPtr         ptr;
-
-   /*
-    * A note on interpretation of pitch: This value of pitch is the
-    * number of bytes between vertically adjacent image
-    * blocks. Normally this is the number of bytes between the first
-    * pixel of two adjacent scanlines. With compressed textures,
-    * however, this may represent the number of bytes between
-    * compression blocks rather than between rows of pixels.
-    *
-    * XXX: Compressed textures currently must be tightly packed in guest memory.
-    *
-    * If the image is 1-dimensional, pitch is ignored.
-    *
-    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
-    * assuming each row of blocks is tightly packed.
-    */
-   uint32 pitch;
-} SVGA3dGuestImage;
-
-
-/*
- * FIFO command format definitions:
- */
-
-/*
- * The data size header following cmdNum for every 3d command
- */
-typedef
-struct {
-   uint32               id;
-   uint32               size;
-} SVGA3dCmdHeader;
-
-/*
- * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
- * optional mipmaps and cube faces.
- */
-
-typedef
-struct {
-   uint32               width;
-   uint32               height;
-   uint32               depth;
-} SVGA3dSize;
-
-typedef enum {
-   SVGA3D_SURFACE_CUBEMAP              = (1 << 0),
-   SVGA3D_SURFACE_HINT_STATIC          = (1 << 1),
-   SVGA3D_SURFACE_HINT_DYNAMIC         = (1 << 2),
-   SVGA3D_SURFACE_HINT_INDEXBUFFER     = (1 << 3),
-   SVGA3D_SURFACE_HINT_VERTEXBUFFER    = (1 << 4),
-   SVGA3D_SURFACE_HINT_TEXTURE         = (1 << 5),
-   SVGA3D_SURFACE_HINT_RENDERTARGET    = (1 << 6),
-   SVGA3D_SURFACE_HINT_DEPTHSTENCIL    = (1 << 7),
-   SVGA3D_SURFACE_HINT_WRITEONLY       = (1 << 8),
-   SVGA3D_SURFACE_MASKABLE_ANTIALIAS   = (1 << 9),
-   SVGA3D_SURFACE_AUTOGENMIPMAPS       = (1 << 10),
-} SVGA3dSurfaceFlags;
-
-typedef
-struct {
-   uint32               numMipLevels;
-} SVGA3dSurfaceFace;
-
-typedef
-struct {
-   uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
-   SVGA3dSurfaceFormat         format;
-   /*
-    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
-    * structures must have the same value of numMipLevels field.
-    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
-    * numMipLevels set to 0.
-    */
-   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
-   /*
-    * Followed by an SVGA3dSize structure for each mip level in each face.
-    *
-    * A note on surface sizes: Sizes are always specified in pixels,
-    * even if the true surface size is not a multiple of the minimum
-    * block size of the surface's format. For example, a 3x3x1 DXT1
-    * compressed texture would actually be stored as a 4x4x1 image in
-    * memory.
-    */
-} SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
-
-typedef
-struct {
-   uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
-   SVGA3dSurfaceFormat         format;
-   /*
-    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
-    * structures must have the same value of numMipLevels field.
-    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
-    * numMipLevels set to 0.
-    */
-   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
-   uint32                      multisampleCount;
-   SVGA3dTextureFilter         autogenFilter;
-   /*
-    * Followed by an SVGA3dSize structure for each mip level in each face.
-    *
-    * A note on surface sizes: Sizes are always specified in pixels,
-    * even if the true surface size is not a multiple of the minimum
-    * block size of the surface's format. For example, a 3x3x1 DXT1
-    * compressed texture would actually be stored as a 4x4x1 image in
-    * memory.
-    */
-} SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
-
-typedef
-struct {
-   uint32               sid;
-} SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
-
-typedef
-struct {
-   uint32               cid;
-} SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
-
-typedef
-struct {
-   uint32               cid;
-} SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dClearFlag      clearFlag;
-   uint32               color;
-   float                depth;
-   uint32               stencil;
-   /* Followed by variable number of SVGA3dRect structures */
-} SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
-
-typedef
-struct SVGA3dCopyRect {
-   uint32               x;
-   uint32               y;
-   uint32               w;
-   uint32               h;
-   uint32               srcx;
-   uint32               srcy;
-} SVGA3dCopyRect;
-
-typedef
-struct SVGA3dCopyBox {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-   uint32               w;
-   uint32               h;
-   uint32               d;
-   uint32               srcx;
-   uint32               srcy;
-   uint32               srcz;
-} SVGA3dCopyBox;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               w;
-   uint32               h;
-} SVGA3dRect;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-   uint32               w;
-   uint32               h;
-   uint32               d;
-} SVGA3dBox;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-} SVGA3dPoint;
-
-typedef
-struct {
-   SVGA3dLightType      type;
-   SVGA3dBool           inWorldSpace;
-   float                diffuse[4];
-   float                specular[4];
-   float                ambient[4];
-   float                position[4];
-   float                direction[4];
-   float                range;
-   float                falloff;
-   float                attenuation0;
-   float                attenuation1;
-   float                attenuation2;
-   float                theta;
-   float                phi;
-} SVGA3dLightData;
-
-typedef
-struct {
-   uint32               sid;
-   /* Followed by variable number of SVGA3dCopyRect structures */
-} SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
-
-typedef
-struct {
-   SVGA3dRenderStateName   state;
-   union {
-      uint32               uintValue;
-      float                floatValue;
-   };
-} SVGA3dRenderState;
-
-typedef
-struct {
-   uint32               cid;
-   /* Followed by variable number of SVGA3dRenderState structures */
-} SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
-
-typedef
-struct {
-   uint32                 cid;
-   SVGA3dRenderTargetType type;
-   SVGA3dSurfaceImageId   target;
-} SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId  src;
-   SVGA3dSurfaceImageId  dest;
-   /* Followed by variable number of SVGA3dCopyBox structures */
-} SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId  src;
-   SVGA3dSurfaceImageId  dest;
-   SVGA3dBox             boxSrc;
-   SVGA3dBox             boxDest;
-   SVGA3dStretchBltMode  mode;
-} SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
-
-typedef
-struct {
-   /*
-    * If the discard flag is present in a surface DMA operation, the host may
-    * discard the contents of the current mipmap level and face of the target
-    * surface before applying the surface DMA contents.
-    */
-   uint32 discard : 1;
-
-   /*
-    * If the unsynchronized flag is present, the host may perform this upload
-    * without syncing to pending reads on this surface.
-    */
-   uint32 unsynchronized : 1;
-
-   /*
-    * Guests *MUST* set the reserved bits to 0 before submitting the command
-    * suffix as future flags may occupy these bits.
-    */
-   uint32 reserved : 30;
-} SVGA3dSurfaceDMAFlags;
-
-typedef
-struct {
-   SVGA3dGuestImage      guest;
-   SVGA3dSurfaceImageId  host;
-   SVGA3dTransferType    transfer;
-   /*
-    * Followed by variable number of SVGA3dCopyBox structures. For consistency
-    * in all clipping logic and coordinate translation, we define the
-    * "source" in each copyBox as the guest image and the
-    * "destination" as the host image, regardless of transfer
-    * direction.
-    *
-    * For efficiency, the SVGA3D device is free to copy more data than
-    * specified. For example, it may round copy boxes outwards such
-    * that they lie on particular alignment boundaries.
-    */
-} SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
-
-/*
- * SVGA3dCmdSurfaceDMASuffix --
- *
- *    This is a command suffix that will appear after a SurfaceDMA command in
- *    the FIFO.  It contains some extra information that hosts may use to
- *    optimize performance or protect the guest.  This suffix exists to preserve
- *    backwards compatibility while also allowing for new functionality to be
- *    implemented.
- */
-
-typedef
-struct {
-   uint32 suffixSize;
-
-   /*
-    * The maximum offset is used to determine the maximum offset from the
-    * guestPtr base address that will be accessed or written to during this
-    * surfaceDMA.  If the suffix is supported, the host will respect this
-    * boundary while performing surface DMAs.
-    *
-    * Defaults to MAX_UINT32
-    */
-   uint32 maximumOffset;
-
-   /*
-    * A set of flags that describes optimizations that the host may perform
-    * while performing this surface DMA operation.  The guest should never rely
-    * on behaviour that is different when these flags are set for correctness.
-    *
-    * Defaults to 0
-    */
-   SVGA3dSurfaceDMAFlags flags;
-} SVGA3dCmdSurfaceDMASuffix;
-
-/*
- * SVGA_3D_CMD_DRAW_PRIMITIVES --
- *
- *   This command is the SVGA3D device's generic drawing entry point.
- *   It can draw multiple ranges of primitives, optionally using an
- *   index buffer, using an arbitrary collection of vertex buffers.
- *
- *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
- *   during this draw call. The declarations specify which surface
- *   the vertex data lives in, what that vertex data is used for,
- *   and how to interpret it.
- *
- *   Each SVGA3dPrimitiveRange defines a collection of primitives
- *   to render using the same vertex arrays. An index buffer is
- *   optional.
- */
-
-typedef
-struct {
-   /*
-    * A range hint is an optional specification for the range of indices
-    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
-    * that the entire array will be used.
-    *
-    * These are only hints. The SVGA3D device may use them for
-    * performance optimization if possible, but it's also allowed to
-    * ignore these values.
-    */
-   uint32               first;
-   uint32               last;
-} SVGA3dArrayRangeHint;
-
-typedef
-struct {
-   /*
-    * Define the origin and shape of a vertex or index array. Both
-    * 'offset' and 'stride' are in bytes. The provided surface will be
-    * reinterpreted as a flat array of bytes in the same format used
-    * by surface DMA operations. To avoid unnecessary conversions, the
-    * surface should be created with the SVGA3D_BUFFER format.
-    *
-    * Index 0 in the array starts 'offset' bytes into the surface.
-    * Index 1 begins at byte 'offset + stride', etc. Array indices may
-    * not be negative.
-    */
-   uint32               surfaceId;
-   uint32               offset;
-   uint32               stride;
-} SVGA3dArray;
-
-typedef
-struct {
-   /*
-    * Describe a vertex array's data type, and define how it is to be
-    * used by the fixed function pipeline or the vertex shader. It
-    * isn't useful to have two VertexDecls with the same
-    * VertexArrayIdentity in one draw call.
-    */
-   SVGA3dDeclType       type;
-   SVGA3dDeclMethod     method;
-   SVGA3dDeclUsage      usage;
-   uint32               usageIndex;
-} SVGA3dVertexArrayIdentity;
-
-typedef
-struct {
-   SVGA3dVertexArrayIdentity  identity;
-   SVGA3dArray                array;
-   SVGA3dArrayRangeHint       rangeHint;
-} SVGA3dVertexDecl;
-
-typedef
-struct {
-   /*
-    * Define a group of primitives to render, from sequential indices.
-    *
-    * The value of 'primitiveType' and 'primitiveCount' imply the
-    * total number of vertices that will be rendered.
-    */
-   SVGA3dPrimitiveType  primType;
-   uint32               primitiveCount;
-
-   /*
-    * Optional index buffer. If indexArray.surfaceId is
-    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
-    * without an index buffer is identical to rendering with an index
-    * buffer containing the sequence [0, 1, 2, 3, ...].
-    *
-    * If an index buffer is in use, indexWidth specifies the width in
-    * bytes of each index value. It must be less than or equal to
-    * indexArray.stride.
-    *
-    * (Currently, the SVGA3D device requires index buffers to be tightly
-    * packed. In other words, indexWidth == indexArray.stride)
-    */
-   SVGA3dArray          indexArray;
-   uint32               indexWidth;
-
-   /*
-    * Optional index bias. This number is added to all indices from
-    * indexArray before they are used as vertex array indices. This
-    * can be used in multiple ways:
-    *
-    *  - When not using an indexArray, this bias can be used to
-    *    specify where in the vertex arrays to begin rendering.
-    *
-    *  - A positive number here is equivalent to increasing the
-    *    offset in each vertex array.
-    *
-    *  - A negative number can be used to render using a small
-    *    vertex array and an index buffer that contains large
-    *    values. This may be used by some applications that
-    *    crop a vertex buffer without modifying their index
-    *    buffer.
-    *
-    * Note that rendering with a negative bias value may be slower and
-    * use more memory than rendering with a positive or zero bias.
-    */
-   int32                indexBias;
-} SVGA3dPrimitiveRange;
-
-typedef
-struct {
-   uint32               cid;
-   uint32               numVertexDecls;
-   uint32               numRanges;
-
-   /*
-    * There are two variable size arrays after the
-    * SVGA3dCmdDrawPrimitives structure. In order,
-    * they are:
-    *
-    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
-    *    SVGA3D_MAX_VERTEX_ARRAYS;
-    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
-    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
-    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
-    *    the frequency divisor for the corresponding vertex decl).
-    */
-} SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
-
-typedef
-struct {
-   uint32                   stage;
-   SVGA3dTextureStateName   name;
-   union {
-      uint32                value;
-      float                 floatValue;
-   };
-} SVGA3dTextureState;
-
-typedef
-struct {
-   uint32               cid;
-   /* Followed by variable number of SVGA3dTextureState structures */
-} SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
-
-typedef
-struct {
-   uint32                   cid;
-   SVGA3dTransformType      type;
-   float                    matrix[16];
-} SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
-
-typedef
-struct {
-   float                min;
-   float                max;
-} SVGA3dZRange;
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dZRange         zRange;
-} SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
-
-typedef
-struct {
-   float                diffuse[4];
-   float                ambient[4];
-   float                specular[4];
-   float                emissive[4];
-   float                shininess;
-} SVGA3dMaterial;
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dFace           face;
-   SVGA3dMaterial       material;
-} SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   SVGA3dLightData      data;
-} SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   uint32               enabled;
-} SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dRect           rect;
-} SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dRect           rect;
-} SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   float                plane[4];
-} SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               shid;
-   SVGA3dShaderType     type;
-   /* Followed by variable number of DWORDs for shader bycode */
-} SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               shid;
-   SVGA3dShaderType     type;
-} SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
-
-typedef
-struct {
-   uint32                  cid;
-   uint32                  reg;     /* register number */
-   SVGA3dShaderType        type;
-   SVGA3dShaderConstType   ctype;
-   uint32                  values[4];
-} SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dShaderType     type;
-   uint32               shid;
-} SVGA3dCmdSetShader;             /* SVGA_3D_CMD_SET_SHADER */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-} SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAGuestPtr         guestResult;  /* Points to an SVGA3dQueryResult structure */
-} SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
-
-typedef
-struct {
-   uint32               cid;          /* Same parameters passed to END_QUERY */
-   SVGA3dQueryType      type;
-   SVGAGuestPtr         guestResult;
-} SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
-
-typedef
-struct {
-   uint32               totalSize;    /* Set by guest before query is ended. */
-   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
-   union {                            /* Set by host on exit from PENDING state */
-      uint32            result32;
-   };
-} SVGA3dQueryResult;
-
-/*
- * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
- *
- *    This is a blit from an SVGA3D surface to a Screen Object. Just
- *    like GMR-to-screen blits, this blit may be directed at a
- *    specific screen or to the virtual coordinate space.
- *
- *    The blit copies from a rectangular region of an SVGA3D surface
- *    image to a rectangular region of a screen or screens.
- *
- *    This command takes an optional variable-length list of clipping
- *    rectangles after the body of the command. If no rectangles are
- *    specified, there is no clipping region. The entire destRect is
- *    drawn to. If one or more rectangles are included, they describe
- *    a clipping region. The clip rectangle coordinates are measured
- *    relative to the top-left corner of destRect.
- *
- *    This clipping region serves multiple purposes:
- *
- *      - It can be used to perform an irregularly shaped blit more
- *        efficiently than by issuing many separate blit commands.
- *
- *      - It is equivalent to allowing blits with non-integer
- *        source coordinates. You could blit just one half-pixel
- *        of a source, for example, by specifying a larger
- *        destination rectangle than you need, then removing
- *        part of it using a clip rectangle.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
- *
- * Limitations:
- *
- *    - Currently, no backend supports blits from a mipmap or face
- *      other than the first one.
- */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId srcImage;
-   SVGASignedRect       srcRect;
-   uint32               destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
-   SVGASignedRect       destRect;     /* Supports scaling if src/rest different size */
-   /* Clipping: zero or more SVGASignedRects follow */
-} SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
-
-typedef
-struct {
-   uint32               sid;
-   SVGA3dTextureFilter  filter;
-} SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
-/*
- * Guest-backed surface definitions.
- */
-
-typedef uint32 SVGAMobId;
-
-typedef enum SVGAMobFormat {
-   SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
-   SVGA3D_MOBFMT_PTDEPTH_0 = 0,
-   SVGA3D_MOBFMT_PTDEPTH_1 = 1,
-   SVGA3D_MOBFMT_PTDEPTH_2 = 2,
-   SVGA3D_MOBFMT_RANGE     = 3,
-   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
-   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
-   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
-   SVGA3D_MOBFMT_MAX,
-} SVGAMobFormat;
-
-/*
- * Sizes of opaque types.
- */
-
-#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
-#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
-#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
-#define SVGA3D_CONTEXT_DATA_SIZE 16384
-
-/*
- * SVGA3dCmdSetOTableBase --
- *
- * This command allows the guest to specify the base PPN of the
- * specified object table.
- */
-
-typedef enum {
-   SVGA_OTABLE_MOB           = 0,
-   SVGA_OTABLE_MIN           = 0,
-   SVGA_OTABLE_SURFACE       = 1,
-   SVGA_OTABLE_CONTEXT       = 2,
-   SVGA_OTABLE_SHADER        = 3,
-   SVGA_OTABLE_SCREEN_TARGET = 4,
-   SVGA_OTABLE_DX9_MAX       = 5,
-   SVGA_OTABLE_MAX           = 8
-} SVGAOTableType;
-
-typedef
-struct {
-   SVGAOTableType type;
-   PPN baseAddress;
-   uint32 sizeInBytes;
-   uint32 validSizeInBytes;
-   SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
-
-typedef
-struct {
-   SVGAOTableType type;
-   PPN64 baseAddress;
-   uint32 sizeInBytes;
-   uint32 validSizeInBytes;
-   SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
-
-typedef
-struct {
-   SVGAOTableType type;
-} __packed
-SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
-
-/*
- * Define a memory object (Mob) in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
-
-
-/*
- * Destroys an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBMob {
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
-
-/*
- * Redefine an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob;   /* SVGA_3D_CMD_REDEFINE_GB_MOB */
-
-/*
- * Define a memory object (Mob) in the OTable with a PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob64 {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN64 base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
-
-/*
- * Redefine an object in the OTable with PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob64 {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN64 base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
-
-/*
- * Notification that the page tables have been modified.
- */
-
-typedef
-struct SVGA3dCmdUpdateGBMobMapping {
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
-
-/*
- * Define a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDefineGBSurface {
-   uint32 sid;
-   SVGA3dSurfaceFlags surfaceFlags;
-   SVGA3dSurfaceFormat format;
-   uint32 numMipLevels;
-   uint32 multisampleCount;
-   SVGA3dTextureFilter autogenFilter;
-   SVGA3dSize size;
-} __packed
-SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
-
-/*
- * Destroy a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
-
-/*
- * Bind a guest-backed surface to an object.
- */
-
-typedef
-struct SVGA3dCmdBindGBSurface {
-   uint32 sid;
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
-
-/*
- * Conditionally bind a mob to a guest backed surface if testMobid
- * matches the currently bound mob.  Optionally issue a readback on
- * the surface while it is still bound to the old mobid if the mobid
- * is changed by this command.
- */
-
-#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
-
-typedef
-struct{
-   uint32 sid;
-   SVGAMobId testMobid;
-   SVGAMobId mobid;
-   uint32 flags;
-} __packed
-SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
-
-/*
- * Update an image in a guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBImage {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
-
-/*
- * Update an entire guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
-
-/*
- * Readback an image in a guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImage {
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
-
-/*
- * Readback an entire guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
-
-/*
- * Readback a sub rect of an image in a guest-backed surface.  After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImagePartial {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-   uint32 invertBox;
-} __packed
-SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
-
-/*
- * Invalidate an image in a guest-backed surface.
- * (Notify the device that the contents can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImage {
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
-
-/*
- * Invalidate an entire guest-backed surface.
- * (Notify the device that the contents if all images can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
-
-/*
- * Invalidate a sub rect of an image in a guest-backed surface.  After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImagePartial {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-   uint32 invertBox;
-} __packed
-SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
-
-/*
- * Define a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDefineGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
-
-/*
- * Destroy a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
-
-/*
- * Bind a guest-backed context.
- *
- * validContents should be set to 0 for new contexts,
- * and 1 if this is an old context which is getting paged
- * back on to the device.
- *
- * For new contexts, it is recommended that the driver
- * issue commands to initialize all interesting state
- * prior to rendering.
- */
-
-typedef
-struct SVGA3dCmdBindGBContext {
-   uint32 cid;
-   SVGAMobId mobid;
-   uint32 validContents;
-} __packed
-SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
-
-/*
- * Readback a guest-backed context.
- * (Request that the device flush the contents back into guest memory.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
-
-/*
- * Invalidate a guest-backed context.
- */
-typedef
-struct SVGA3dCmdInvalidateGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
-
-/*
- * Define a guest-backed shader.
- */
-
-typedef
-struct SVGA3dCmdDefineGBShader {
-   uint32 shid;
-   SVGA3dShaderType type;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
-
-/*
- * Bind a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdBindGBShader {
-   uint32 shid;
-   SVGAMobId mobid;
-   uint32 offsetInBytes;
-} __packed
-SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
-
-/*
- * Destroy a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdDestroyGBShader {
-   uint32 shid;
-} __packed
-SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
-
-typedef
-struct {
-   uint32                  cid;
-   uint32                  regStart;
-   SVGA3dShaderType        shaderType;
-   SVGA3dShaderConstType   constType;
-
-   /*
-    * Followed by a variable number of shader constants.
-    *
-    * Note that FLOAT and INT constants are 4-dwords in length, while
-    * BOOL constants are 1-dword in length.
-    */
-} __packed
-SVGA3dCmdSetGBShaderConstInline;
-/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-} __packed
-SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAMobId mobid;
-   uint32 offset;
-} __packed
-SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
-
-
-/*
- * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
- *
- *    The semantics of this command are identical to the
- *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
- *    to a Mob instead of a GMR.
- */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAMobId mobid;
-   uint32 offset;
-} __packed
-SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
-
-typedef
-struct {
-   SVGAMobId mobid;
-   uint32 fbOffset;
-   uint32 initalized;
-} __packed
-SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
-
-typedef
-struct {
-   SVGAMobId mobid;
-   uint32 gartOffset;
-} __packed
-SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
-
-
-typedef
-struct {
-   uint32 gartOffset;
-   uint32 numPages;
-} __packed
-SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
-
-
-/*
- * Screen Targets
- */
-#define SVGA_STFLAG_PRIMARY (1 << 0)
-
-typedef
-struct {
-   uint32 stid;
-   uint32 width;
-   uint32 height;
-   int32 xRoot;
-   int32 yRoot;
-   uint32 flags;
-} __packed
-SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-} __packed
-SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-   SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
-
-/*
- * Capability query index.
- *
- * Notes:
- *
- *   1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
- *      fixed-function texture units available. Each of these units
- *      work in both FFP and Shader modes, and they support texture
- *      transforms and texture coordinates. The host may have additional
- *      texture image units that are only usable with shaders.
- *
- *   2. The BUFFER_FORMAT capabilities are deprecated, and they always
- *      return TRUE. Even on physical hardware that does not support
- *      these formats natively, the SVGA3D device will provide an emulation
- *      which should be invisible to the guest OS.
- *
- *      In general, the SVGA3D device should support any operation on
- *      any surface format, it just may perform some of these
- *      operations in software depending on the capabilities of the
- *      available physical hardware.
- *
- *      XXX: In the future, we will add capabilities that describe in
- *      detail what formats are supported in hardware for what kinds
- *      of operations.
- */
-
-typedef enum {
-   SVGA3D_DEVCAP_3D                                = 0,
-   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
-   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,  /* See note (1) */
-   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
-   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
-   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
-   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
-   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
-   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
-   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
-   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12, /* See note (2) */
-   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13, /* See note (2) */
-   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14, /* See note (2) */
-   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
-   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
-   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
-   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
-   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
-   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
-   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
-   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
-   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
-   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
-   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
-   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
-   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
-   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
-   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
-   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
-   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
-   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
-   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
-   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
-   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
-   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
-
-   /*
-    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
-    * render targets.  This does no include the depth or stencil targets.
-    */
-   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
-
-   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
-   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
-   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
-   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
-   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
-   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
-   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
-   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
-   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
-   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
-   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
-   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
-
-   /*
-    * This is the maximum number of SVGA context IDs that the guest
-    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
-    */
-   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
-
-   /*
-    * This is the maximum number of SVGA surface IDs that the guest
-    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
-    */
-   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
-
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
-
-   SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM              = 82,
-   SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM              = 83,
-
-   /*
-    * Deprecated.
-    */
-   SVGA3D_DEVCAP_VGPU10                            = 84,
-
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
-    * ored together, one for every type of video decoding supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
-
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
-    * ored together, one for every type of video processing supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
-
-   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
-   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
-   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
-   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
-
-   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
-
-   /*
-    * Does the host support the SVGA logic ops commands?
-    */
-   SVGA3D_DEVCAP_LOGICOPS                          = 92,
-
-   /*
-    * What support does the host have for screen targets?
-    *
-    * See the SVGA3D_SCREENTARGET_CAP bits below.
-    */
-   SVGA3D_DEVCAP_SCREENTARGETS                     = 93,
-
-   SVGA3D_DEVCAP_MAX                                  /* This must be the last index. */
-} SVGA3dDevCapIndex;
-
-typedef union {
-   Bool   b;
-   uint32 u;
-   int32  i;
-   float  f;
-} SVGA3dDevCapResult;
-
-typedef enum {
-   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
-   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
-   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
-   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
-} SVGA3dCapsRecordType;
-
-typedef
-struct SVGA3dCapsRecordHeader {
-   uint32 length;
-   SVGA3dCapsRecordType type;
-}
-SVGA3dCapsRecordHeader;
-
-typedef
-struct SVGA3dCapsRecord {
-   SVGA3dCapsRecordHeader header;
-   uint32 data[1];
-}
-SVGA3dCapsRecord;
-
-
-typedef uint32 SVGA3dCapPair[2];
-
-#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
deleted file mode 100644
index ef33850..0000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ /dev/null
@@ -1,912 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
-
-#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
-#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
-#define min_t(type, x, y)  ((x) < (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
-#define u64 uint64_t
-#define U32_MAX ((u32)~0U)
-
-#endif /* __KERNEL__ */
-
-#include "svga3d_reg.h"
-
-/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- *    1. Red, bump W, luminance and depth are stored in the first channel.
- *    2. Green, bump V and stencil are stored in the second channel.
- *    3. Blue and bump U are stored in the third channel.
- *    4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- *    1. For compressed formats, only the data channel is used and its size
- *       is equal to that of a singular block in the compression scheme.
- *    2. For buffer formats, only the data channel is used and its size is
- *       exactly one byte in length.
- *    3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
- */
-
-enum svga3d_block_desc {
-	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
-	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
-						    data */
-	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
-						    data */
-	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
-						    U and V */
-	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
-						    data */
-	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
-						    data */
-	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
-						    channel */
-	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
-						    data */
-	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
-						    data */
-	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
-						    data */
-	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
-						    data */
-	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
-	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
-						    channel */
-	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
-						    data */
-	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
-						    data */
-	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
-						    data depending on the
-						    compression method used */
-	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
-						    floating point
-						    representation in
-						    all channels */
-	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
-						    data. */
-	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
-	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
-	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
-	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
-	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
-						    e.g., NV12. */
-	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
-						    Y, U, V, e.g., YV12. */
-
-	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
-	SVGA3DBLOCKDESC_GREEN,
-	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
-	SVGA3DBLOCKDESC_BLUE,
-	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
-	SVGA3DBLOCKDESC_V,
-	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
-	SVGA3DBLOCKDESC_LUMINANCE,
-	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
-	SVGA3DBLOCKDESC_W,
-	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
-	SVGA3DBLOCKDESC_V |
-	SVGA3DBLOCKDESC_W |
-	SVGA3DBLOCKDESC_Q,
-	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
-	SVGA3DBLOCKDESC_IEEE_FP,
-	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
-	SVGA3DBLOCKDESC_GREEN,
-	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
-	SVGA3DBLOCKDESC_BLUE,
-	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
-	SVGA3DBLOCKDESC_STENCIL,
-	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
-	SVGA3DBLOCKDESC_Y,
-	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
-	SVGA3DBLOCKDESC_Y |
-	SVGA3DBLOCKDESC_U_VIDEO |
-	SVGA3DBLOCKDESC_V_VIDEO,
-	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_EXP,
-	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-	SVGA3DBLOCKDESC_2PLANAR_YUV,
-	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-	SVGA3DBLOCKDESC_3PLANAR_YUV,
-};
-
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- *    1. Block description.
- *    2. Dimensions of a block in the surface.
- *    3. Size of block in bytes.
- *    4. Bit depth of the pixel data.
- *    5. Channel bit depths and masks (if applicable).
- */
-#define SVGA3D_CHANNEL_DEF(type)		\
-	struct {				\
-		union {				\
-			type blue;              \
-			type u;                 \
-			type uv_video;          \
-			type u_video;           \
-		};				\
-		union {				\
-			type green;             \
-			type v;                 \
-			type stencil;           \
-			type v_video;           \
-		};				\
-		union {				\
-			type red;               \
-			type w;                 \
-			type luminance;         \
-			type y;                 \
-			type depth;             \
-			type data;              \
-		};				\
-		union {				\
-			type alpha;             \
-			type q;                 \
-			type exp;               \
-		};				\
-	}
-
-struct svga3d_surface_desc {
-	enum svga3d_block_desc block_desc;
-	surf_size_struct block_size;
-	u32 bytes_per_block;
-	u32 pitch_bytes_per_block;
-
-	struct {
-		u32 total;
-		SVGA3D_CHANNEL_DEF(uint8);
-	} bit_depth;
-
-	struct {
-		SVGA3D_CHANNEL_DEF(uint8);
-	} bit_offset;
-};
-
-static const struct svga3d_surface_desc svga3d_surface_descs[] = {
-	{SVGA3DBLOCKDESC_NONE,
-	 {1, 1, 1},  0, 0, {0, {{0}, {0}, {0}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_FORMAT_INVALID */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_X8R8G8B8 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_A8R8G8B8 */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  2, 2, {16, {{5}, {6}, {5}, {0} } },
-	 {{{0}, {5}, {11}, {0} } } },    /* SVGA3D_R5G6B5 */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  2, 2, {15, {{5}, {5}, {5}, {0} } },
-	 {{{0}, {5}, {10}, {0} } } },    /* SVGA3D_X1R5G5B5 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {5}, {1} } },
-	 {{{0}, {5}, {10}, {15} } } },   /* SVGA3D_A1R5G5B5 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  2, 2, {16, {{4}, {4}, {4}, {4} } },
-	 {{{0}, {4}, {8}, {12} } } },    /* SVGA3D_A4R4G4B4 */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D32 */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D16 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  2, 2, {16, {{0}, {1}, {15}, {0} } },
-	 {{{0}, {15}, {0}, {0} } } },    /* SVGA3D_Z_D15S1 */
-
-	{SVGA3DBLOCKDESC_LUMINANCE,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE8 */
-
-	{SVGA3DBLOCKDESC_LA,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {4}, {4} } },
-	 {{{0}, {0}, {0}, {4} } } },     /* SVGA3D_LUMINANCE4_ALPHA4 */
-
-	{SVGA3DBLOCKDESC_LUMINANCE,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE16 */
-
-	{SVGA3DBLOCKDESC_LA,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
-	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_LUMINANCE8_ALPHA8 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT1 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT2 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT3 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT4 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT5 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
-	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_BUMPU8V8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {6}, {0} } },
-	 {{{11}, {6}, {0}, {0} } } },    /* SVGA3D_BUMPL6V5U5 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPX8L8V8U8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  3, 3, {24, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPL8V8U8 */
-
-	{SVGA3DBLOCKDESC_RGBA_FP,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_ARGB_S10E5 */
-
-	{SVGA3DBLOCKDESC_RGBA_FP,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_ARGB_S23E8 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2R10G10B10 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_V8U8 */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{24}, {16}, {8}, {0} } } },   /* SVGA3D_Q8W8V8U8 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_CxV8U8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_X8L8V8U8 */
-
-	{SVGA3DBLOCKDESC_UVWA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2W10V10U10 */
-
-	{SVGA3DBLOCKDESC_ALPHA,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {0}, {8} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_ALPHA8 */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S10E5 */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S23E8 */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_RG_S10E5 */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_RG_S23E8 */
-
-	{SVGA3DBLOCKDESC_BUFFER,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BUFFER */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24X8 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  4, 4, {32, {{16}, {16}, {0}, {0} } },
-	 {{{16}, {0}, {0}, {0} } } },    /* SVGA3D_V16U16 */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {0}, {16}, {0} } } },    /* SVGA3D_G16R16 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_A16B16G16R16 */
-
-	{SVGA3DBLOCKDESC_YUV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {8}, {0} } } },     /* SVGA3D_UYVY */
-
-	{SVGA3DBLOCKDESC_YUV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_YUY2 */
-
-	{SVGA3DBLOCKDESC_NV12,
-	 {2, 2, 1},  6, 2, {48, {{0}, {0}, {48}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_NV12 */
-
-	{SVGA3DBLOCKDESC_AYUV,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_AYUV */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_UINT */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_SINT */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGB_FP,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_FLOAT */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_UINT */
-
-	{SVGA3DBLOCKDESC_UVW,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_SINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_UINT */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SNORM */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G8X24_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_D32_FLOAT_S8X24_UINT */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  8, 8, {64, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },    /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
-
-	{SVGA3DBLOCKDESC_GREEN,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {0}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_X32_TYPELESS_G8X24_UINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_UINT */
-
-	{SVGA3DBLOCKDESC_RGB_FP,
-	 {1, 1, 1},  4, 4, {32, {{10}, {11}, {11}, {0} } },
-	 {{{0}, {10}, {21}, {0} } } },  /* SVGA3D_R11G11B10_FLOAT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBA_SRGB,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_D32_FLOAT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_UINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_R24G8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_D24_UNORM_S8_UINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R24_UNORM_X8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_GREEN,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {0}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_X24_TYPELESS_G8_UINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UNORM */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UNORM */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UINT */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SNORM */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UNORM */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UINT */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SNORM */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {8, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R1_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBE,
-	 {1, 1, 1},  4, 4, {32, {{9}, {9}, {9}, {5} } },
-	 {{{18}, {9}, {0}, {27} } } },   /* SVGA3D_R9G9B9E5_SHAREDEXP */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_B8G8_UNORM */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_G8R8_G8B8_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_SNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_SNORM */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA_SRGB,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGB_SRGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_DF16 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_DF24 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8_INT */
-};
-
-static inline u32 clamped_umul32(u32 a, u32 b)
-{
-	u64 tmp = (u64) a*b;
-	return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
-}
-
-static inline const struct svga3d_surface_desc *
-svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
-{
-	if (format < ARRAY_SIZE(svga3d_surface_descs))
-		return &svga3d_surface_descs[format];
-
-	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- *      Given a base level size and the mip level, compute the size of
- *      the mip level.
- *
- * Results:
- *      See above.
- *
- * Side effects:
- *      None.
- *
- *----------------------------------------------------------------------
- */
-
-static inline surf_size_struct
-svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
-{
-	surf_size_struct size;
-
-	size.width = max_t(u32, base_level.width >> mip_level, 1);
-	size.height = max_t(u32, base_level.height >> mip_level, 1);
-	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
-	return size;
-}
-
-static inline void
-svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
-				 const surf_size_struct *pixel_size,
-				 surf_size_struct *block_size)
-{
-	block_size->width = DIV_ROUND_UP(pixel_size->width,
-					 desc->block_size.width);
-	block_size->height = DIV_ROUND_UP(pixel_size->height,
-					  desc->block_size.height);
-	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
-					 desc->block_size.depth);
-}
-
-static inline bool
-svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
-{
-	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
-}
-
-static inline u32
-svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
-			      const surf_size_struct *size)
-{
-	u32 pitch;
-	surf_size_struct blocks;
-
-	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
-
-	pitch = blocks.width * desc->pitch_bytes_per_block;
-
-	return pitch;
-}
-
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- *      Return the number of bytes of buffer space required to store
- *      one image of a surface, optionally using the specified pitch.
- *
- *      If pitch is zero, it is assumed that rows are tightly packed.
- *
- *      This function is overflow-safe. If the result would have
- *      overflowed, instead we return MAX_UINT32.
- *
- * Results:
- *      Byte count.
- *
- * Side effects:
- *      None.
- *
- *-----------------------------------------------------------------------------
- */
-
-static inline u32
-svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
-				    const surf_size_struct *size,
-				    u32 pitch)
-{
-	surf_size_struct image_blocks;
-	u32 slice_size, total_size;
-
-	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
-
-	if (svga3dsurface_is_planar_surface(desc)) {
-		total_size = clamped_umul32(image_blocks.width,
-					    image_blocks.height);
-		total_size = clamped_umul32(total_size, image_blocks.depth);
-		total_size = clamped_umul32(total_size, desc->bytes_per_block);
-		return total_size;
-	}
-
-	if (pitch == 0)
-		pitch = svga3dsurface_calculate_pitch(desc, size);
-
-	slice_size = clamped_umul32(image_blocks.height, pitch);
-	total_size = clamped_umul32(slice_size, image_blocks.depth);
-
-	return total_size;
-}
-
-static inline u32
-svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
-				  surf_size_struct base_level_size,
-				  u32 num_mip_levels,
-				  bool cubemap)
-{
-	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
-	u64 total_size = 0;
-	u32 mip;
-
-	for (mip = 0; mip < num_mip_levels; mip++) {
-		surf_size_struct size =
-			svga3dsurface_get_mip_size(base_level_size, mip);
-		total_size += svga3dsurface_get_image_buffer_size(desc,
-								  &size, 0);
-	}
-
-	if (cubemap)
-		total_size *= SVGA3D_MAX_SURFACE_FACES;
-
-	return (u32) min_t(u64, total_size, (u64) U32_MAX);
-}
-
-
-/**
- * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
- * in an image (or volume).
- *
- * @width: The image width in pixels.
- * @height: The image height in pixels
- */
-static inline u32
-svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
-			       u32 width, u32 height,
-			       u32 x, u32 y, u32 z)
-{
-	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
-	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
-	const u32 bd = desc->block_size.depth;
-	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
-	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
-	const u32 offset = (z / bd * imgstride +
-			    y / bh * rowstride +
-			    x / bw * desc->bytes_per_block);
-	return offset;
-}
-
-
-static inline u32
-svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
-			       surf_size_struct baseLevelSize,
-			       u32 numMipLevels,
-			       u32 face,
-			       u32 mip)
-
-{
-	u32 offset;
-	u32 mipChainBytes;
-	u32 mipChainBytesToLevel;
-	u32 i;
-	const struct svga3d_surface_desc *desc;
-	surf_size_struct mipSize;
-	u32 bytes;
-
-	desc = svga3dsurface_get_desc(format);
-
-	mipChainBytes = 0;
-	mipChainBytesToLevel = 0;
-	for (i = 0; i < numMipLevels; i++) {
-		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
-		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
-		mipChainBytes += bytes;
-		if (i < mip)
-			mipChainBytesToLevel += bytes;
-	}
-
-	offset = mipChainBytes * face + mipChainBytesToLevel;
-
-	return offset;
-}
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
deleted file mode 100644
index 8e8d968..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/**********************************************************
- * Copyright 2007-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga_escape.h --
- *
- *    Definitions for our own (vendor-specific) SVGA Escape commands.
- */
-
-#ifndef _SVGA_ESCAPE_H_
-#define _SVGA_ESCAPE_H_
-
-
-/*
- * Namespace IDs for the escape command
- */
-
-#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
-#define SVGA_ESCAPE_NSID_DEVEL  0xFFFFFFFF
-
-
-/*
- * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
- * the first DWORD of escape data (after the nsID and size). As a
- * guideline we're using the high word and low word as a major and
- * minor command number, respectively.
- *
- * Major command number allocation:
- *
- *   0000: Reserved
- *   0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
- *   0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
- *   0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
- */
-
-#define SVGA_ESCAPE_VMWARE_MAJOR_MASK  0xFFFF0000
-
-
-/*
- * SVGA Hint commands.
- *
- * These escapes let the SVGA driver provide optional information to
- * he host about the state of the guest or guest applications. The
- * host can use these hints to make user interface or performance
- * decisions.
- *
- * Notes:
- *
- *   - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
- *     that use the SVGA Screen Object extension. Instead of sending
- *     this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
- *     Screen Object.
- */
-
-#define SVGA_ESCAPE_VMWARE_HINT               0x00030000
-#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  /* Deprecated */
-
-typedef
-struct {
-   uint32 command;
-   uint32 fullscreen;
-   struct {
-      int32 x, y;
-   } monitorPosition;
-} SVGAEscapeHintFullscreen;
-
-#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
deleted file mode 100644
index f38416f..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/**********************************************************
- * Copyright 2007-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga_overlay.h --
- *
- *    Definitions for video-overlay support.
- */
-
-#ifndef _SVGA_OVERLAY_H_
-#define _SVGA_OVERLAY_H_
-
-#include "svga_reg.h"
-
-/*
- * Video formats we support
- */
-
-#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
-#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
-#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
-
-typedef enum {
-   SVGA_OVERLAY_FORMAT_INVALID = 0,
-   SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
-   SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
-   SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
-} SVGAOverlayFormat;
-
-#define SVGA_VIDEO_COLORKEY_MASK             0x00ffffff
-
-#define SVGA_ESCAPE_VMWARE_VIDEO             0x00020000
-
-#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS    0x00020001
-        /* FIFO escape layout:
-         * Type, Stream Id, (Register Id, Value) pairs */
-
-#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH       0x00020002
-        /* FIFO escape layout:
-         * Type, Stream Id */
-
-typedef
-struct SVGAEscapeVideoSetRegs {
-   struct {
-      uint32 cmdType;
-      uint32 streamId;
-   } header;
-
-   /* May include zero or more items. */
-   struct {
-      uint32 registerId;
-      uint32 value;
-   } items[1];
-} SVGAEscapeVideoSetRegs;
-
-typedef
-struct SVGAEscapeVideoFlush {
-   uint32 cmdType;
-   uint32 streamId;
-} SVGAEscapeVideoFlush;
-
-
-/*
- * Struct definitions for the video overlay commands built on
- * SVGAFifoCmdEscape.
- */
-typedef
-struct {
-   uint32 command;
-   uint32 overlay;
-} SVGAFifoEscapeCmdVideoBase;
-
-typedef
-struct {
-   SVGAFifoEscapeCmdVideoBase videoCmd;
-} SVGAFifoEscapeCmdVideoFlush;
-
-typedef
-struct {
-   SVGAFifoEscapeCmdVideoBase videoCmd;
-   struct {
-      uint32 regId;
-      uint32 value;
-   } items[1];
-} SVGAFifoEscapeCmdVideoSetRegs;
-
-typedef
-struct {
-   SVGAFifoEscapeCmdVideoBase videoCmd;
-   struct {
-      uint32 regId;
-      uint32 value;
-   } items[SVGA_VIDEO_NUM_REGS];
-} SVGAFifoEscapeCmdVideoSetAllRegs;
-
-
-/*
- *----------------------------------------------------------------------
- *
- * VMwareVideoGetAttributes --
- *
- *      Computes the size, pitches and offsets for YUV frames.
- *
- * Results:
- *      TRUE on success; otherwise FALSE on failure.
- *
- * Side effects:
- *      Pitches and offsets for the given YUV frame are put in 'pitches'
- *      and 'offsets' respectively. They are both optional though.
- *
- *----------------------------------------------------------------------
- */
-
-static inline bool
-VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
-                         uint32 *width,                     /* IN / OUT */
-                         uint32 *height,                    /* IN / OUT */
-                         uint32 *size,                      /* OUT */
-                         uint32 *pitches,                   /* OUT (optional) */
-                         uint32 *offsets)                   /* OUT (optional) */
-{
-    int tmp;
-
-    *width = (*width + 1) & ~1;
-
-    if (offsets) {
-        offsets[0] = 0;
-    }
-
-    switch (format) {
-    case VMWARE_FOURCC_YV12:
-       *height = (*height + 1) & ~1;
-       *size = (*width + 3) & ~3;
-
-       if (pitches) {
-          pitches[0] = *size;
-       }
-
-       *size *= *height;
-
-       if (offsets) {
-          offsets[1] = *size;
-       }
-
-       tmp = ((*width >> 1) + 3) & ~3;
-
-       if (pitches) {
-          pitches[1] = pitches[2] = tmp;
-       }
-
-       tmp *= (*height >> 1);
-       *size += tmp;
-
-       if (offsets) {
-          offsets[2] = *size;
-       }
-
-       *size += tmp;
-       break;
-
-    case VMWARE_FOURCC_YUY2:
-    case VMWARE_FOURCC_UYVY:
-       *size = *width * 2;
-
-       if (pitches) {
-          pitches[0] = *size;
-       }
-
-       *size *= *height;
-       break;
-
-    default:
-       return false;
-    }
-
-    return true;
-}
-
-#endif /* _SVGA_OVERLAY_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
deleted file mode 100644
index e4259c2..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ /dev/null
@@ -1,1564 +0,0 @@
-/**********************************************************
- * Copyright 1998-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga_reg.h --
- *
- *    Virtual hardware definitions for the VMware SVGA II device.
- */
-
-#ifndef _SVGA_REG_H_
-#define _SVGA_REG_H_
-
-/*
- * PCI device IDs.
- */
-#define PCI_DEVICE_ID_VMWARE_SVGA2      0x0405
-
-/*
- * SVGA_REG_ENABLE bit definitions.
- */
-#define SVGA_REG_ENABLE_DISABLE     0
-#define SVGA_REG_ENABLE_ENABLE      1
-#define SVGA_REG_ENABLE_HIDE        2
-#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
-				     SVGA_REG_ENABLE_HIDE)
-
-/*
- * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
- * cursor bypass mode. This is still supported, but no new guest
- * drivers should use it.
- */
-#define SVGA_CURSOR_ON_HIDE            0x0   /* Must be 0 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_SHOW            0x1   /* Must be 1 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2   /* Remove the cursor from the framebuffer because we need to see what's under it */
-#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
-
-/*
- * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
- * The changeMap in the monitor is proportional to this number. Therefore, we'd
- * like to keep it as small as possible to reduce monitor overhead (using
- * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
- * 4k!).
- *
- * NB: For compatibility reasons, this value must be greater than 0xff0000.
- *     See bug 335072.
- */
-#define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
-
-#define SVGA_MAX_PSEUDOCOLOR_DEPTH      8
-#define SVGA_MAX_PSEUDOCOLORS           (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
-#define SVGA_NUM_PALETTE_REGS           (3 * SVGA_MAX_PSEUDOCOLORS)
-
-#define SVGA_MAGIC         0x900000UL
-#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
-
-/* Version 2 let the address of the frame buffer be unsigned on Win32 */
-#define SVGA_VERSION_2     2
-#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
-
-/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
-   PALETTE_BASE has moved */
-#define SVGA_VERSION_1     1
-#define SVGA_ID_1          SVGA_MAKE_ID(SVGA_VERSION_1)
-
-/* Version 0 is the initial version */
-#define SVGA_VERSION_0     0
-#define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
-
-/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
-#define SVGA_ID_INVALID    0xFFFFFFFF
-
-/* Port offsets, relative to BAR0 */
-#define SVGA_INDEX_PORT         0x0
-#define SVGA_VALUE_PORT         0x1
-#define SVGA_BIOS_PORT          0x2
-#define SVGA_IRQSTATUS_PORT     0x8
-
-/*
- * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
- *
- * Interrupts are only supported when the
- * SVGA_CAP_IRQMASK capability is present.
- */
-#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
-#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
-#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
-
-/*
- * Registers
- */
-
-enum {
-   SVGA_REG_ID = 0,
-   SVGA_REG_ENABLE = 1,
-   SVGA_REG_WIDTH = 2,
-   SVGA_REG_HEIGHT = 3,
-   SVGA_REG_MAX_WIDTH = 4,
-   SVGA_REG_MAX_HEIGHT = 5,
-   SVGA_REG_DEPTH = 6,
-   SVGA_REG_BITS_PER_PIXEL = 7,       /* Current bpp in the guest */
-   SVGA_REG_PSEUDOCOLOR = 8,
-   SVGA_REG_RED_MASK = 9,
-   SVGA_REG_GREEN_MASK = 10,
-   SVGA_REG_BLUE_MASK = 11,
-   SVGA_REG_BYTES_PER_LINE = 12,
-   SVGA_REG_FB_START = 13,            /* (Deprecated) */
-   SVGA_REG_FB_OFFSET = 14,
-   SVGA_REG_VRAM_SIZE = 15,
-   SVGA_REG_FB_SIZE = 16,
-
-   /* ID 0 implementation only had the above registers, then the palette */
-
-   SVGA_REG_CAPABILITIES = 17,
-   SVGA_REG_MEM_START = 18,           /* (Deprecated) */
-   SVGA_REG_MEM_SIZE = 19,
-   SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
-   SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
-   SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
-   SVGA_REG_GUEST_ID = 23,            /* Set guest OS identifier */
-   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
-   SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
-   SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
-   SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
-   SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
-   SVGA_REG_SCRATCH_SIZE = 29,        /* Number of scratch registers */
-   SVGA_REG_MEM_REGS = 30,            /* Number of FIFO registers */
-   SVGA_REG_NUM_DISPLAYS = 31,        /* (Deprecated) */
-   SVGA_REG_PITCHLOCK = 32,           /* Fixed pitch for all modes */
-   SVGA_REG_IRQMASK = 33,             /* Interrupt mask */
-
-   /* Legacy multi-monitor support */
-   SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
-   SVGA_REG_DISPLAY_ID = 35,        /* Display ID for the following display attributes */
-   SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
-   SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
-   SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
-   SVGA_REG_DISPLAY_WIDTH = 39,     /* The display's width */
-   SVGA_REG_DISPLAY_HEIGHT = 40,    /* The display's height */
-
-   /* See "Guest memory regions" below. */
-   SVGA_REG_GMR_ID = 41,
-   SVGA_REG_GMR_DESCRIPTOR = 42,
-   SVGA_REG_GMR_MAX_IDS = 43,
-   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
-
-   SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
-   SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
-   SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
-   SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
-   SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
-   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
-   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
-   SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
-   SVGA_REG_CMD_PREPEND_LOW = 53,
-   SVGA_REG_CMD_PREPEND_HIGH = 54,
-   SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
-   SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
-   SVGA_REG_MOB_MAX_SIZE = 57,
-   SVGA_REG_TOP = 58,               /* Must be 1 more than the last register */
-
-   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
-   /* Next 768 (== 256*3) registers exist for colormap */
-
-   SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
-                                    /* Base of scratch registers */
-   /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
-      First 4 are reserved for VESA BIOS Extension; any remaining are for
-      the use of the current SVGA driver. */
-};
-
-
-/*
- * Guest memory regions (GMRs):
- *
- * This is a new memory mapping feature available in SVGA devices
- * which have the SVGA_CAP_GMR bit set. Previously, there were two
- * fixed memory regions available with which to share data between the
- * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
- * are our name for an extensible way of providing arbitrary DMA
- * buffers for use between the driver and the SVGA device. They are a
- * new alternative to framebuffer memory, usable for both 2D and 3D
- * graphics operations.
- *
- * Since GMR mapping must be done synchronously with guest CPU
- * execution, we use a new pair of SVGA registers:
- *
- *   SVGA_REG_GMR_ID --
- *
- *     Read/write.
- *     This register holds the 32-bit ID (a small positive integer)
- *     of a GMR to create, delete, or redefine. Writing this register
- *     has no side-effects.
- *
- *   SVGA_REG_GMR_DESCRIPTOR --
- *
- *     Write-only.
- *     Writing this register will create, delete, or redefine the GMR
- *     specified by the above ID register. If this register is zero,
- *     the GMR is deleted. Any pointers into this GMR (including those
- *     currently being processed by FIFO commands) will be
- *     synchronously invalidated.
- *
- *     If this register is nonzero, it must be the physical page
- *     number (PPN) of a data structure which describes the physical
- *     layout of the memory region this GMR should describe. The
- *     descriptor structure will be read synchronously by the SVGA
- *     device when this register is written. The descriptor need not
- *     remain allocated for the lifetime of the GMR.
- *
- *     The guest driver should write SVGA_REG_GMR_ID first, then
- *     SVGA_REG_GMR_DESCRIPTOR.
- *
- *   SVGA_REG_GMR_MAX_IDS --
- *
- *     Read-only.
- *     The SVGA device may choose to support a maximum number of
- *     user-defined GMR IDs. This register holds the number of supported
- *     IDs. (The maximum supported ID plus 1)
- *
- *   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
- *
- *     Read-only.
- *     The SVGA device may choose to put a limit on the total number
- *     of SVGAGuestMemDescriptor structures it will read when defining
- *     a single GMR.
- *
- * The descriptor structure is an array of SVGAGuestMemDescriptor
- * structures. Each structure may do one of three things:
- *
- *   - Terminate the GMR descriptor list.
- *     (ppn==0, numPages==0)
- *
- *   - Add a PPN or range of PPNs to the GMR's virtual address space.
- *     (ppn != 0, numPages != 0)
- *
- *   - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
- *     support multi-page GMR descriptor tables without forcing the
- *     driver to allocate physically contiguous memory.
- *     (ppn != 0, numPages == 0)
- *
- * Note that each physical page of SVGAGuestMemDescriptor structures
- * can describe at least 2MB of guest memory. If the driver needs to
- * use more than one page of descriptor structures, it must use one of
- * its SVGAGuestMemDescriptors to point to an additional page.  The
- * device will never automatically cross a page boundary.
- *
- * Once the driver has described a GMR, it is immediately available
- * for use via any FIFO command that uses an SVGAGuestPtr structure.
- * These pointers include a GMR identifier plus an offset into that
- * GMR.
- *
- * The driver must check the SVGA_CAP_GMR bit before using the GMR
- * registers.
- */
-
-/*
- * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
- * memory as well.  In the future, these IDs could even be used to
- * allow legacy memory regions to be redefined by the guest as GMRs.
- *
- * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
- * is being phased out. Please try to use user-defined GMRs whenever
- * possible.
- */
-#define SVGA_GMR_NULL         ((uint32) -1)
-#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
-
-typedef
-struct SVGAGuestMemDescriptor {
-   uint32 ppn;
-   uint32 numPages;
-} SVGAGuestMemDescriptor;
-
-typedef
-struct SVGAGuestPtr {
-   uint32 gmrId;
-   uint32 offset;
-} SVGAGuestPtr;
-
-
-/*
- * SVGAGMRImageFormat --
- *
- *    This is a packed representation of the source 2D image format
- *    for a GMR-to-screen blit. Currently it is defined as an encoding
- *    of the screen's color depth and bits-per-pixel, however, 16 bits
- *    are reserved for future use to identify other encodings (such as
- *    RGBA or higher-precision images).
- *
- *    Currently supported formats:
- *
- *       bpp depth  Format Name
- *       --- -----  -----------
- *        32    24  32-bit BGRX
- *        24    24  24-bit BGR
- *        16    16  RGB 5-6-5
- *        16    15  RGB 5-5-5
- *
- */
-
-typedef
-struct SVGAGMRImageFormat {
-   union {
-      struct {
-         uint32 bitsPerPixel : 8;
-         uint32 colorDepth   : 8;
-         uint32 reserved     : 16;  /* Must be zero */
-      };
-
-      uint32 value;
-   };
-} SVGAGMRImageFormat;
-
-typedef
-struct SVGAGuestImage {
-   SVGAGuestPtr         ptr;
-
-   /*
-    * A note on interpretation of pitch: This value of pitch is the
-    * number of bytes between vertically adjacent image
-    * blocks. Normally this is the number of bytes between the first
-    * pixel of two adjacent scanlines. With compressed textures,
-    * however, this may represent the number of bytes between
-    * compression blocks rather than between rows of pixels.
-    *
-    * XXX: Compressed textures currently must be tightly packed in guest memory.
-    *
-    * If the image is 1-dimensional, pitch is ignored.
-    *
-    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
-    * assuming each row of blocks is tightly packed.
-    */
-   uint32 pitch;
-} SVGAGuestImage;
-
-/*
- * SVGAColorBGRX --
- *
- *    A 24-bit color format (BGRX), which does not depend on the
- *    format of the legacy guest framebuffer (GFB) or the current
- *    GMRFB state.
- */
-
-typedef
-struct SVGAColorBGRX {
-   union {
-      struct {
-         uint32 b : 8;
-         uint32 g : 8;
-         uint32 r : 8;
-         uint32 x : 8;  /* Unused */
-      };
-
-      uint32 value;
-   };
-} SVGAColorBGRX;
-
-
-/*
- * SVGASignedRect --
- * SVGASignedPoint --
- *
- *    Signed rectangle and point primitives. These are used by the new
- *    2D primitives for drawing to Screen Objects, which can occupy a
- *    signed virtual coordinate space.
- *
- *    SVGASignedRect specifies a half-open interval: the (left, top)
- *    pixel is part of the rectangle, but the (right, bottom) pixel is
- *    not.
- */
-
-typedef
-struct SVGASignedRect {
-   int32  left;
-   int32  top;
-   int32  right;
-   int32  bottom;
-} SVGASignedRect;
-
-typedef
-struct SVGASignedPoint {
-   int32  x;
-   int32  y;
-} SVGASignedPoint;
-
-
-/*
- *  Capabilities
- *
- *  Note the holes in the bitfield. Missing bits have been deprecated,
- *  and must not be reused. Those capabilities will never be reported
- *  by new versions of the SVGA device.
- *
- * SVGA_CAP_GMR2 --
- *    Provides asynchronous commands to define and remap guest memory
- *    regions.  Adds device registers SVGA_REG_GMRS_MAX_PAGES and
- *    SVGA_REG_MEMORY_SIZE.
- *
- * SVGA_CAP_SCREEN_OBJECT_2 --
- *    Allow screen object support, and require backing stores from the
- *    guest for each screen object.
- */
-
-#define SVGA_CAP_NONE               0x00000000
-#define SVGA_CAP_RECT_COPY          0x00000002
-#define SVGA_CAP_CURSOR             0x00000020
-#define SVGA_CAP_CURSOR_BYPASS      0x00000040   /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080   /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_8BIT_EMULATION     0x00000100
-#define SVGA_CAP_ALPHA_CURSOR       0x00000200
-#define SVGA_CAP_3D                 0x00004000
-#define SVGA_CAP_EXTENDED_FIFO      0x00008000
-#define SVGA_CAP_MULTIMON           0x00010000   /* Legacy multi-monitor support */
-#define SVGA_CAP_PITCHLOCK          0x00020000
-#define SVGA_CAP_IRQMASK            0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000   /* Legacy multi-monitor support */
-#define SVGA_CAP_GMR                0x00100000
-#define SVGA_CAP_TRACES             0x00200000
-#define SVGA_CAP_GMR2               0x00400000
-#define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
-#define SVGA_CAP_COMMAND_BUFFERS    0x01000000
-#define SVGA_CAP_DEAD1              0x02000000
-#define SVGA_CAP_CMD_BUFFERS_2      0x04000000
-#define SVGA_CAP_GBOBJECTS          0x08000000
-
-/*
- * FIFO register indices.
- *
- * The FIFO is a chunk of device memory mapped into guest physmem.  It
- * is always treated as 32-bit words.
- *
- * The guest driver gets to decide how to partition it between
- * - FIFO registers (there are always at least 4, specifying where the
- *   following data area is and how much data it contains; there may be
- *   more registers following these, depending on the FIFO protocol
- *   version in use)
- * - FIFO data, written by the guest and slurped out by the VMX.
- * These indices are 32-bit word offsets into the FIFO.
- */
-
-enum {
-   /*
-    * Block 1 (basic registers): The originally defined FIFO registers.
-    * These exist and are valid for all versions of the FIFO protocol.
-    */
-
-   SVGA_FIFO_MIN = 0,
-   SVGA_FIFO_MAX,       /* The distance from MIN to MAX must be at least 10K */
-   SVGA_FIFO_NEXT_CMD,
-   SVGA_FIFO_STOP,
-
-   /*
-    * Block 2 (extended registers): Mandatory registers for the extended
-    * FIFO.  These exist if the SVGA caps register includes
-    * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
-    * associated capability bit is enabled.
-    *
-    * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
-    * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
-    * This means that the guest has to test individually (in most cases
-    * using FIFO caps) for the presence of registers after this; the VMX
-    * can define "extended FIFO" to mean whatever it wants, and currently
-    * won't enable it unless there's room for that set and much more.
-    */
-
-   SVGA_FIFO_CAPABILITIES = 4,
-   SVGA_FIFO_FLAGS,
-   /* Valid with SVGA_FIFO_CAP_FENCE: */
-   SVGA_FIFO_FENCE,
-
-   /*
-    * Block 3a (optional extended registers): Additional registers for the
-    * extended FIFO, whose presence isn't actually implied by
-    * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
-    * leave room for them.
-    *
-    * These in block 3a, the VMX currently considers mandatory for the
-    * extended FIFO.
-    */
-
-   /* Valid if exists (i.e. if extended FIFO enabled): */
-   SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
-   /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
-   SVGA_FIFO_PITCHLOCK,
-
-   /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
-   SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
-   SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
-   SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
-   SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
-   SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
-
-   /* Valid with SVGA_FIFO_CAP_RESERVE: */
-   SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
-
-   /*
-    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
-    *
-    * By default this is SVGA_ID_INVALID, to indicate that the cursor
-    * coordinates are specified relative to the virtual root. If this
-    * is set to a specific screen ID, cursor position is reinterpreted
-    * as a signed offset relative to that screen's origin.
-    */
-   SVGA_FIFO_CURSOR_SCREEN_ID,
-
-   /*
-    * Valid with SVGA_FIFO_CAP_DEAD
-    *
-    * An arbitrary value written by the host, drivers should not use it.
-    */
-   SVGA_FIFO_DEAD,
-
-   /*
-    * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
-    *
-    * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
-    * on platforms that can enforce graphics resource limits.
-    */
-   SVGA_FIFO_3D_HWVERSION_REVISED,
-
-   /*
-    * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
-    * registers, but this must be done carefully and with judicious use of
-    * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
-    * enough to tell you whether the register exists: we've shipped drivers
-    * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
-    * the earlier ones.  The actual order of introduction was:
-    * - PITCHLOCK
-    * - 3D_CAPS
-    * - CURSOR_* (cursor bypass 3)
-    * - RESERVED
-    * So, code that wants to know whether it can use any of the
-    * aforementioned registers, or anything else added after PITCHLOCK and
-    * before 3D_CAPS, needs to reason about something other than
-    * SVGA_FIFO_MIN.
-    */
-
-   /*
-    * 3D caps block space; valid with 3D hardware version >=
-    * SVGA3D_HWVERSION_WS6_B1.
-    */
-   SVGA_FIFO_3D_CAPS      = 32,
-   SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
-
-   /*
-    * End of VMX's current definition of "extended-FIFO registers".
-    * Registers before here are always enabled/disabled as a block; either
-    * the extended FIFO is enabled and includes all preceding registers, or
-    * it's disabled entirely.
-    *
-    * Block 3b (truly optional extended registers): Additional registers for
-    * the extended FIFO, which the VMX already knows how to enable and
-    * disable with correct granularity.
-    *
-    * Registers after here exist if and only if the guest SVGA driver
-    * sets SVGA_FIFO_MIN high enough to leave room for them.
-    */
-
-   /* Valid if register exists: */
-   SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
-   SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
-   SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
-
-   /*
-    * Always keep this last.  This defines the maximum number of
-    * registers we know about.  At power-on, this value is placed in
-    * the SVGA_REG_MEM_REGS register, and we expect the guest driver
-    * to allocate this much space in FIFO memory for registers.
-    */
-    SVGA_FIFO_NUM_REGS
-};
-
-
-/*
- * Definition of registers included in extended FIFO support.
- *
- * The guest SVGA driver gets to allocate the FIFO between registers
- * and data.  It must always allocate at least 4 registers, but old
- * drivers stopped there.
- *
- * The VMX will enable extended FIFO support if and only if the guest
- * left enough room for all registers defined as part of the mandatory
- * set for the extended FIFO.
- *
- * Note that the guest drivers typically allocate the FIFO only at
- * initialization time, not at mode switches, so it's likely that the
- * number of FIFO registers won't change without a reboot.
- *
- * All registers less than this value are guaranteed to be present if
- * svgaUser->fifo.extended is set. Any later registers must be tested
- * individually for compatibility at each use (in the VMX).
- *
- * This value is used only by the VMX, so it can change without
- * affecting driver compatibility; keep it that way?
- */
-#define SVGA_FIFO_EXTENDED_MANDATORY_REGS  (SVGA_FIFO_3D_CAPS_LAST + 1)
-
-
-/*
- * FIFO Synchronization Registers
- *
- *  This explains the relationship between the various FIFO
- *  sync-related registers in IOSpace and in FIFO space.
- *
- *  SVGA_REG_SYNC --
- *
- *       The SYNC register can be used in two different ways by the guest:
- *
- *         1. If the guest wishes to fully sync (drain) the FIFO,
- *            it will write once to SYNC then poll on the BUSY
- *            register. The FIFO is sync'ed once BUSY is zero.
- *
- *         2. If the guest wants to asynchronously wake up the host,
- *            it will write once to SYNC without polling on BUSY.
- *            Ideally it will do this after some new commands have
- *            been placed in the FIFO, and after reading a zero
- *            from SVGA_FIFO_BUSY.
- *
- *       (1) is the original behaviour that SYNC was designed to
- *       support.  Originally, a write to SYNC would implicitly
- *       trigger a read from BUSY. This causes us to synchronously
- *       process the FIFO.
- *
- *       This behaviour has since been changed so that writing SYNC
- *       will *not* implicitly cause a read from BUSY. Instead, it
- *       makes a channel call which asynchronously wakes up the MKS
- *       thread.
- *
- *       New guests can use this new behaviour to implement (2)
- *       efficiently. This lets guests get the host's attention
- *       without waiting for the MKS to poll, which gives us much
- *       better CPU utilization on SMP hosts and on UP hosts while
- *       we're blocked on the host GPU.
- *
- *       Old guests shouldn't notice the behaviour change. SYNC was
- *       never guaranteed to process the entire FIFO, since it was
- *       bounded to a particular number of CPU cycles. Old guests will
- *       still loop on the BUSY register until the FIFO is empty.
- *
- *       Writing to SYNC currently has the following side-effects:
- *
- *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
- *         - Asynchronously wakes up the MKS thread for FIFO processing
- *         - The value written to SYNC is recorded as a "reason", for
- *           stats purposes.
- *
- *       If SVGA_FIFO_BUSY is available, drivers are advised to only
- *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
- *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
- *       eventually set SVGA_FIFO_BUSY on its own, but this approach
- *       lets the driver avoid sending multiple asynchronous wakeup
- *       messages to the MKS thread.
- *
- *  SVGA_REG_BUSY --
- *
- *       This register is set to TRUE when SVGA_REG_SYNC is written,
- *       and it reads as FALSE when the FIFO has been completely
- *       drained.
- *
- *       Every read from this register causes us to synchronously
- *       process FIFO commands. There is no guarantee as to how many
- *       commands each read will process.
- *
- *       CPU time spent processing FIFO commands will be billed to
- *       the guest.
- *
- *       New drivers should avoid using this register unless they
- *       need to guarantee that the FIFO is completely drained. It
- *       is overkill for performing a sync-to-fence. Older drivers
- *       will use this register for any type of synchronization.
- *
- *  SVGA_FIFO_BUSY --
- *
- *       This register is a fast way for the guest driver to check
- *       whether the FIFO is already being processed. It reads and
- *       writes at normal RAM speeds, with no monitor intervention.
- *
- *       If this register reads as TRUE, the host is guaranteeing that
- *       any new commands written into the FIFO will be noticed before
- *       the MKS goes back to sleep.
- *
- *       If this register reads as FALSE, no such guarantee can be
- *       made.
- *
- *       The guest should use this register to quickly determine
- *       whether or not it needs to wake up the host. If the guest
- *       just wrote a command or group of commands that it would like
- *       the host to begin processing, it should:
- *
- *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
- *            action is necessary.
- *
- *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
- *            code that we've already sent a SYNC to the host and we
- *            don't need to send a duplicate.
- *
- *         3. Write a reason to SVGA_REG_SYNC. This will send an
- *            asynchronous wakeup to the MKS thread.
- */
-
-
-/*
- * FIFO Capabilities
- *
- *      Fence -- Fence register and command are supported
- *      Accel Front -- Front buffer only commands are supported
- *      Pitch Lock -- Pitch lock register is supported
- *      Video -- SVGA Video overlay units are supported
- *      Escape -- Escape command is supported
- *
- * XXX: Add longer descriptions for each capability, including a list
- *      of the new features that each capability provides.
- *
- * SVGA_FIFO_CAP_SCREEN_OBJECT --
- *
- *    Provides dynamic multi-screen rendering, for improved Unity and
- *    multi-monitor modes. With Screen Object, the guest can
- *    dynamically create and destroy 'screens', which can represent
- *    Unity windows or virtual monitors. Screen Object also provides
- *    strong guarantees that DMA operations happen only when
- *    guest-initiated. Screen Object deprecates the BAR1 guest
- *    framebuffer (GFB) and all commands that work only with the GFB.
- *
- *    New registers:
- *       FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
- *
- *    New 2D commands:
- *       DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
- *       BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
- *
- *    New 3D commands:
- *       BLIT_SURFACE_TO_SCREEN
- *
- *    New guarantees:
- *
- *       - The host will not read or write guest memory, including the GFB,
- *         except when explicitly initiated by a DMA command.
- *
- *       - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
- *         is guaranteed to complete before any subsequent FENCEs.
- *
- *       - All legacy commands which affect a Screen (UPDATE, PRESENT,
- *         PRESENT_READBACK) as well as new Screen blit commands will
- *         all behave consistently as blits, and memory will be read
- *         or written in FIFO order.
- *
- *         For example, if you PRESENT from one SVGA3D surface to multiple
- *         places on the screen, the data copied will always be from the
- *         SVGA3D surface at the time the PRESENT was issued in the FIFO.
- *         This was not necessarily true on devices without Screen Object.
- *
- *         This means that on devices that support Screen Object, the
- *         PRESENT_READBACK command should not be necessary unless you
- *         actually want to read back the results of 3D rendering into
- *         system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
- *         command provides a strict superset of functionality.)
- *
- *       - When a screen is resized, either using Screen Object commands or
- *         legacy multimon registers, its contents are preserved.
- *
- * SVGA_FIFO_CAP_GMR2 --
- *
- *    Provides new commands to define and remap guest memory regions (GMR).
- *
- *    New 2D commands:
- *       DEFINE_GMR2, REMAP_GMR2.
- *
- * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
- *
- *    Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
- *    This register may replace SVGA_FIFO_3D_HWVERSION on platforms
- *    that enforce graphics resource limits.  This allows the platform
- *    to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
- *    drivers that do not limit their resources.
- *
- *    Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
- *    are codependent (and thus we use a single capability bit).
- *
- * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
- *
- *    Modifies the DEFINE_SCREEN command to include a guest provided
- *    backing store in GMR memory and the bytesPerLine for the backing
- *    store.  This capability requires the use of a backing store when
- *    creating screen objects.  However if SVGA_FIFO_CAP_SCREEN_OBJECT
- *    is present then backing stores are optional.
- *
- * SVGA_FIFO_CAP_DEAD --
- *
- *    Drivers should not use this cap bit.  This cap bit can not be
- *    reused since some hosts already expose it.
- */
-
-#define SVGA_FIFO_CAP_NONE                  0
-#define SVGA_FIFO_CAP_FENCE             (1<<0)
-#define SVGA_FIFO_CAP_ACCELFRONT        (1<<1)
-#define SVGA_FIFO_CAP_PITCHLOCK         (1<<2)
-#define SVGA_FIFO_CAP_VIDEO             (1<<3)
-#define SVGA_FIFO_CAP_CURSOR_BYPASS_3   (1<<4)
-#define SVGA_FIFO_CAP_ESCAPE            (1<<5)
-#define SVGA_FIFO_CAP_RESERVE           (1<<6)
-#define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
-#define SVGA_FIFO_CAP_GMR2              (1<<8)
-#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED  SVGA_FIFO_CAP_GMR2
-#define SVGA_FIFO_CAP_SCREEN_OBJECT_2   (1<<9)
-#define SVGA_FIFO_CAP_DEAD              (1<<10)
-
-
-/*
- * FIFO Flags
- *
- *      Accel Front -- Driver should use front buffer only commands
- */
-
-#define SVGA_FIFO_FLAG_NONE                 0
-#define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
-#define SVGA_FIFO_FLAG_RESERVED        (1<<31) /* Internal use only */
-
-/*
- * FIFO reservation sentinel value
- */
-
-#define SVGA_FIFO_RESERVED_UNKNOWN      0xffffffff
-
-
-/*
- * Video overlay support
- */
-
-#define SVGA_NUM_OVERLAY_UNITS 32
-
-
-/*
- * Video capabilities that the guest is currently using
- */
-
-#define SVGA_VIDEO_FLAG_COLORKEY        0x0001
-
-
-/*
- * Offsets for the video overlay registers
- */
-
-enum {
-   SVGA_VIDEO_ENABLED = 0,
-   SVGA_VIDEO_FLAGS,
-   SVGA_VIDEO_DATA_OFFSET,
-   SVGA_VIDEO_FORMAT,
-   SVGA_VIDEO_COLORKEY,
-   SVGA_VIDEO_SIZE,          /* Deprecated */
-   SVGA_VIDEO_WIDTH,
-   SVGA_VIDEO_HEIGHT,
-   SVGA_VIDEO_SRC_X,
-   SVGA_VIDEO_SRC_Y,
-   SVGA_VIDEO_SRC_WIDTH,
-   SVGA_VIDEO_SRC_HEIGHT,
-   SVGA_VIDEO_DST_X,         /* Signed int32 */
-   SVGA_VIDEO_DST_Y,         /* Signed int32 */
-   SVGA_VIDEO_DST_WIDTH,
-   SVGA_VIDEO_DST_HEIGHT,
-   SVGA_VIDEO_PITCH_1,
-   SVGA_VIDEO_PITCH_2,
-   SVGA_VIDEO_PITCH_3,
-   SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
-   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
-   SVGA_VIDEO_NUM_REGS
-};
-
-
-/*
- * SVGA Overlay Units
- *
- *      width and height relate to the entire source video frame.
- *      srcX, srcY, srcWidth and srcHeight represent subset of the source
- *      video frame to be displayed.
- */
-
-typedef struct SVGAOverlayUnit {
-   uint32 enabled;
-   uint32 flags;
-   uint32 dataOffset;
-   uint32 format;
-   uint32 colorKey;
-   uint32 size;
-   uint32 width;
-   uint32 height;
-   uint32 srcX;
-   uint32 srcY;
-   uint32 srcWidth;
-   uint32 srcHeight;
-   int32  dstX;
-   int32  dstY;
-   uint32 dstWidth;
-   uint32 dstHeight;
-   uint32 pitches[3];
-   uint32 dataGMRId;
-   uint32 dstScreenId;
-} SVGAOverlayUnit;
-
-
-/*
- * SVGAScreenObject --
- *
- *    This is a new way to represent a guest's multi-monitor screen or
- *    Unity window. Screen objects are only supported if the
- *    SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
- *
- *    If Screen Objects are supported, they can be used to fully
- *    replace the functionality provided by the framebuffer registers
- *    (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
- *
- *    The screen object is a struct with guaranteed binary
- *    compatibility. New flags can be added, and the struct may grow,
- *    but existing fields must retain their meaning.
- *
- *    Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
- *    a SVGAGuestPtr that is used to back the screen contents.  This
- *    memory must come from the GFB.  The guest is not allowed to
- *    access the memory and doing so will have undefined results.  The
- *    backing store is required to be page aligned and the size is
- *    padded to the next page boundry.  The number of pages is:
- *       (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
- *
- *    The pitch in the backingStore is required to be at least large
- *    enough to hold a 32bbp scanline.  It is recommended that the
- *    driver pad bytesPerLine for a potential performance win.
- *
- *    The cloneCount field is treated as a hint from the guest that
- *    the user wants this display to be cloned, countCount times.  A
- *    value of zero means no cloning should happen.
- */
-
-#define SVGA_SCREEN_MUST_BE_SET     (1 << 0) /* Must be set or results undefined */
-#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
-#define SVGA_SCREEN_IS_PRIMARY      (1 << 1) /* Guest considers this screen to be 'primary' */
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
-
-/*
- * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
- * deactivated the base layer is defined to lose all contents and
- * become black.  When a screen is deactivated the backing store is
- * optional.  When set backingPtr and bytesPerLine will be ignored.
- */
-#define SVGA_SCREEN_DEACTIVATE  (1 << 3)
-
-/*
- * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When this flag is set
- * the screen contents will be outputted as all black to the user
- * though the base layer contents is preserved.  The screen base layer
- * can still be read and written to like normal though the no visible
- * effect will be seen by the user.  When the flag is changed the
- * screen will be blanked or redrawn to the current contents as needed
- * without any extra commands from the driver.  This flag only has an
- * effect when the screen is not deactivated.
- */
-#define SVGA_SCREEN_BLANKING (1 << 4)
-
-typedef
-struct SVGAScreenObject {
-   uint32 structSize;   /* sizeof(SVGAScreenObject) */
-   uint32 id;
-   uint32 flags;
-   struct {
-      uint32 width;
-      uint32 height;
-   } size;
-   struct {
-      int32 x;
-      int32 y;
-   } root;
-
-   /*
-    * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
-    * with SVGA_FIFO_CAP_SCREEN_OBJECT.
-    */
-   SVGAGuestImage backingStore;
-   uint32 cloneCount;
-} SVGAScreenObject;
-
-
-/*
- *  Commands in the command FIFO:
- *
- *  Command IDs defined below are used for the traditional 2D FIFO
- *  communication (not all commands are available for all versions of the
- *  SVGA FIFO protocol).
- *
- *  Note the holes in the command ID numbers: These commands have been
- *  deprecated, and the old IDs must not be reused.
- *
- *  Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
- *  protocol.
- *
- *  Each command's parameters are described by the comments and
- *  structs below.
- */
-
-typedef enum {
-   SVGA_CMD_INVALID_CMD           = 0,
-   SVGA_CMD_UPDATE                = 1,
-   SVGA_CMD_RECT_COPY             = 3,
-   SVGA_CMD_DEFINE_CURSOR         = 19,
-   SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
-   SVGA_CMD_UPDATE_VERBOSE        = 25,
-   SVGA_CMD_FRONT_ROP_FILL        = 29,
-   SVGA_CMD_FENCE                 = 30,
-   SVGA_CMD_ESCAPE                = 33,
-   SVGA_CMD_DEFINE_SCREEN         = 34,
-   SVGA_CMD_DESTROY_SCREEN        = 35,
-   SVGA_CMD_DEFINE_GMRFB          = 36,
-   SVGA_CMD_BLIT_GMRFB_TO_SCREEN  = 37,
-   SVGA_CMD_BLIT_SCREEN_TO_GMRFB  = 38,
-   SVGA_CMD_ANNOTATION_FILL       = 39,
-   SVGA_CMD_ANNOTATION_COPY       = 40,
-   SVGA_CMD_DEFINE_GMR2           = 41,
-   SVGA_CMD_REMAP_GMR2            = 42,
-   SVGA_CMD_MAX
-} SVGAFifoCmdId;
-
-#define SVGA_CMD_MAX_ARGS           64
-
-
-/*
- * SVGA_CMD_UPDATE --
- *
- *    This is a DMA transfer which copies from the Guest Framebuffer
- *    (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
- *    intersect with the provided virtual rectangle.
- *
- *    This command does not support using arbitrary guest memory as a
- *    data source- it only works with the pre-defined GFB memory.
- *    This command also does not support signed virtual coordinates.
- *    If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
- *    negative root x/y coordinates, the negative portion of those
- *    screens will not be reachable by this command.
- *
- *    This command is not necessary when using framebuffer
- *    traces. Traces are automatically enabled if the SVGA FIFO is
- *    disabled, and you may explicitly enable/disable traces using
- *    SVGA_REG_TRACES. With traces enabled, any write to the GFB will
- *    automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
- *
- *    Traces and SVGA_CMD_UPDATE are the only supported ways to render
- *    pseudocolor screen updates. The newer Screen Object commands
- *    only support true color formats.
- *
- * Availability:
- *    Always available.
- */
-
-typedef
-struct SVGAFifoCmdUpdate {
-   uint32 x;
-   uint32 y;
-   uint32 width;
-   uint32 height;
-} SVGAFifoCmdUpdate;
-
-
-/*
- * SVGA_CMD_RECT_COPY --
- *
- *    Perform a rectangular DMA transfer from one area of the GFB to
- *    another, and copy the result to any screens which intersect it.
- *
- * Availability:
- *    SVGA_CAP_RECT_COPY
- */
-
-typedef
-struct SVGAFifoCmdRectCopy {
-   uint32 srcX;
-   uint32 srcY;
-   uint32 destX;
-   uint32 destY;
-   uint32 width;
-   uint32 height;
-} SVGAFifoCmdRectCopy;
-
-
-/*
- * SVGA_CMD_DEFINE_CURSOR --
- *
- *    Provide a new cursor image, as an AND/XOR mask.
- *
- *    The recommended way to position the cursor overlay is by using
- *    the SVGA_FIFO_CURSOR_* registers, supported by the
- *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
- *
- * Availability:
- *    SVGA_CAP_CURSOR
- */
-
-typedef
-struct SVGAFifoCmdDefineCursor {
-   uint32 id;             /* Reserved, must be zero. */
-   uint32 hotspotX;
-   uint32 hotspotY;
-   uint32 width;
-   uint32 height;
-   uint32 andMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
-   uint32 xorMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
-   /*
-    * Followed by scanline data for AND mask, then XOR mask.
-    * Each scanline is padded to a 32-bit boundary.
-   */
-} SVGAFifoCmdDefineCursor;
-
-
-/*
- * SVGA_CMD_DEFINE_ALPHA_CURSOR --
- *
- *    Provide a new cursor image, in 32-bit BGRA format.
- *
- *    The recommended way to position the cursor overlay is by using
- *    the SVGA_FIFO_CURSOR_* registers, supported by the
- *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
- *
- * Availability:
- *    SVGA_CAP_ALPHA_CURSOR
- */
-
-typedef
-struct SVGAFifoCmdDefineAlphaCursor {
-   uint32 id;             /* Reserved, must be zero. */
-   uint32 hotspotX;
-   uint32 hotspotY;
-   uint32 width;
-   uint32 height;
-   /* Followed by scanline data */
-} SVGAFifoCmdDefineAlphaCursor;
-
-
-/*
- * SVGA_CMD_UPDATE_VERBOSE --
- *
- *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
- *    'reason' value, an opaque cookie which is used by internal
- *    debugging tools. Third party drivers should not use this
- *    command.
- *
- * Availability:
- *    SVGA_CAP_EXTENDED_FIFO
- */
-
-typedef
-struct SVGAFifoCmdUpdateVerbose {
-   uint32 x;
-   uint32 y;
-   uint32 width;
-   uint32 height;
-   uint32 reason;
-} SVGAFifoCmdUpdateVerbose;
-
-
-/*
- * SVGA_CMD_FRONT_ROP_FILL --
- *
- *    This is a hint which tells the SVGA device that the driver has
- *    just filled a rectangular region of the GFB with a solid
- *    color. Instead of reading these pixels from the GFB, the device
- *    can assume that they all equal 'color'. This is primarily used
- *    for remote desktop protocols.
- *
- * Availability:
- *    SVGA_FIFO_CAP_ACCELFRONT
- */
-
-#define  SVGA_ROP_COPY                    0x03
-
-typedef
-struct SVGAFifoCmdFrontRopFill {
-   uint32 color;     /* In the same format as the GFB */
-   uint32 x;
-   uint32 y;
-   uint32 width;
-   uint32 height;
-   uint32 rop;       /* Must be SVGA_ROP_COPY */
-} SVGAFifoCmdFrontRopFill;
-
-
-/*
- * SVGA_CMD_FENCE --
- *
- *    Insert a synchronization fence.  When the SVGA device reaches
- *    this command, it will copy the 'fence' value into the
- *    SVGA_FIFO_FENCE register. It will also compare the fence against
- *    SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
- *    SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
- *    raise this interrupt.
- *
- * Availability:
- *    SVGA_FIFO_FENCE for this command,
- *    SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
- */
-
-typedef
-struct {
-   uint32 fence;
-} SVGAFifoCmdFence;
-
-
-/*
- * SVGA_CMD_ESCAPE --
- *
- *    Send an extended or vendor-specific variable length command.
- *    This is used for video overlay, third party plugins, and
- *    internal debugging tools. See svga_escape.h
- *
- * Availability:
- *    SVGA_FIFO_CAP_ESCAPE
- */
-
-typedef
-struct SVGAFifoCmdEscape {
-   uint32 nsid;
-   uint32 size;
-   /* followed by 'size' bytes of data */
-} SVGAFifoCmdEscape;
-
-
-/*
- * SVGA_CMD_DEFINE_SCREEN --
- *
- *    Define or redefine an SVGAScreenObject. See the description of
- *    SVGAScreenObject above.  The video driver is responsible for
- *    generating new screen IDs. They should be small positive
- *    integers. The virtual device will have an implementation
- *    specific upper limit on the number of screen IDs
- *    supported. Drivers are responsible for recycling IDs. The first
- *    valid ID is zero.
- *
- *    - Interaction with other registers:
- *
- *    For backwards compatibility, when the GFB mode registers (WIDTH,
- *    HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
- *    deletes all screens other than screen #0, and redefines screen
- *    #0 according to the specified mode. Drivers that use
- *    SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
- *
- *    If you use screen objects, do not use the legacy multi-mon
- *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGAScreenObject screen;   /* Variable-length according to version */
-} SVGAFifoCmdDefineScreen;
-
-
-/*
- * SVGA_CMD_DESTROY_SCREEN --
- *
- *    Destroy an SVGAScreenObject. Its ID is immediately available for
- *    re-use.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   uint32 screenId;
-} SVGAFifoCmdDestroyScreen;
-
-
-/*
- * SVGA_CMD_DEFINE_GMRFB --
- *
- *    This command sets a piece of SVGA device state called the
- *    Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
- *    piece of light-weight state which identifies the location and
- *    format of an image in guest memory or in BAR1. The GMRFB has
- *    an arbitrary size, and it doesn't need to match the geometry
- *    of the GFB or any screen object.
- *
- *    The GMRFB can be redefined as often as you like. You could
- *    always use the same GMRFB, you could redefine it before
- *    rendering from a different guest screen, or you could even
- *    redefine it before every blit.
- *
- *    There are multiple ways to use this command. The simplest way is
- *    to use it to move the framebuffer either to elsewhere in the GFB
- *    (BAR1) memory region, or to a user-defined GMR. This lets a
- *    driver use a framebuffer allocated entirely out of normal system
- *    memory, which we encourage.
- *
- *    Another way to use this command is to set up a ring buffer of
- *    updates in GFB memory. If a driver wants to ensure that no
- *    frames are skipped by the SVGA device, it is important that the
- *    driver not modify the source data for a blit until the device is
- *    done processing the command. One efficient way to accomplish
- *    this is to use a ring of small DMA buffers. Each buffer is used
- *    for one blit, then we move on to the next buffer in the
- *    ring. The FENCE mechanism is used to protect each buffer from
- *    re-use until the device is finished with that buffer's
- *    corresponding blit.
- *
- *    This command does not affect the meaning of SVGA_CMD_UPDATE.
- *    UPDATEs always occur from the legacy GFB memory area. This
- *    command has no support for pseudocolor GMRFBs. Currently only
- *    true-color 15, 16, and 24-bit depths are supported. Future
- *    devices may expose capabilities for additional framebuffer
- *    formats.
- *
- *    The default GMRFB value is undefined. Drivers must always send
- *    this command at least once before performing any blit from the
- *    GMRFB.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGAGuestPtr        ptr;
-   uint32              bytesPerLine;
-   SVGAGMRImageFormat  format;
-} SVGAFifoCmdDefineGMRFB;
-
-
-/*
- * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
- *
- *    This is a guest-to-host blit. It performs a DMA operation to
- *    copy a rectangular region of pixels from the current GMRFB to
- *    one or more Screen Objects.
- *
- *    The destination coordinate may be specified relative to a
- *    screen's origin (if a screen ID is specified) or relative to the
- *    virtual coordinate system's origin (if the screen ID is
- *    SVGA_ID_INVALID). The actual destination may span zero or more
- *    screens, in the case of a virtual destination rect or a rect
- *    which extends off the edge of the specified screen.
- *
- *    This command writes to the screen's "base layer": the underlying
- *    framebuffer which exists below any cursor or video overlays. No
- *    action is necessary to explicitly hide or update any overlays
- *    which exist on top of the updated region.
- *
- *    The SVGA device is guaranteed to finish reading from the GMRFB
- *    by the time any subsequent FENCE commands are reached.
- *
- *    This command consumes an annotation. See the
- *    SVGA_CMD_ANNOTATION_* commands for details.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGASignedPoint  srcOrigin;
-   SVGASignedRect   destRect;
-   uint32           destScreenId;
-} SVGAFifoCmdBlitGMRFBToScreen;
-
-
-/*
- * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
- *
- *    This is a host-to-guest blit. It performs a DMA operation to
- *    copy a rectangular region of pixels from a single Screen Object
- *    back to the current GMRFB.
- *
- *    Usage note: This command should be used rarely. It will
- *    typically be inefficient, but it is necessary for some types of
- *    synchronization between 3D (GPU) and 2D (CPU) rendering into
- *    overlapping areas of a screen.
- *
- *    The source coordinate is specified relative to a screen's
- *    origin. The provided screen ID must be valid. If any parameters
- *    are invalid, the resulting pixel values are undefined.
- *
- *    This command reads the screen's "base layer". Overlays like
- *    video and cursor are not included, but any data which was sent
- *    using a blit-to-screen primitive will be available, no matter
- *    whether the data's original source was the GMRFB or the 3D
- *    acceleration hardware.
- *
- *    Note that our guest-to-host blits and host-to-guest blits aren't
- *    symmetric in their current implementation. While the parameters
- *    are identical, host-to-guest blits are a lot less featureful.
- *    They do not support clipping: If the source parameters don't
- *    fully fit within a screen, the blit fails. They must originate
- *    from exactly one screen. Virtual coordinates are not directly
- *    supported.
- *
- *    Host-to-guest blits do support the same set of GMRFB formats
- *    offered by guest-to-host blits.
- *
- *    The SVGA device is guaranteed to finish writing to the GMRFB by
- *    the time any subsequent FENCE commands are reached.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGASignedPoint  destOrigin;
-   SVGASignedRect   srcRect;
-   uint32           srcScreenId;
-} SVGAFifoCmdBlitScreenToGMRFB;
-
-
-/*
- * SVGA_CMD_ANNOTATION_FILL --
- *
- *    This is a blit annotation. This command stores a small piece of
- *    device state which is consumed by the next blit-to-screen
- *    command. The state is only cleared by commands which are
- *    specifically documented as consuming an annotation. Other
- *    commands (such as ESCAPEs for debugging) may intervene between
- *    the annotation and its associated blit.
- *
- *    This annotation is a promise about the contents of the next
- *    blit: The video driver is guaranteeing that all pixels in that
- *    blit will have the same value, specified here as a color in
- *    SVGAColorBGRX format.
- *
- *    The SVGA device can still render the blit correctly even if it
- *    ignores this annotation, but the annotation may allow it to
- *    perform the blit more efficiently, for example by ignoring the
- *    source data and performing a fill in hardware.
- *
- *    This annotation is most important for performance when the
- *    user's display is being remoted over a network connection.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGAColorBGRX  color;
-} SVGAFifoCmdAnnotationFill;
-
-
-/*
- * SVGA_CMD_ANNOTATION_COPY --
- *
- *    This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
- *    information about annotations.
- *
- *    This annotation is a promise about the contents of the next
- *    blit: The video driver is guaranteeing that all pixels in that
- *    blit will have the same value as those which already exist at an
- *    identically-sized region on the same or a different screen.
- *
- *    Note that the source pixels for the COPY in this annotation are
- *    sampled before applying the anqnotation's associated blit. They
- *    are allowed to overlap with the blit's destination pixels.
- *
- *    The copy source rectangle is specified the same way as the blit
- *    destination: it can be a rectangle which spans zero or more
- *    screens, specified relative to either a screen or to the virtual
- *    coordinate system's origin. If the source rectangle includes
- *    pixels which are not from exactly one screen, the results are
- *    undefined.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGASignedPoint  srcOrigin;
-   uint32           srcScreenId;
-} SVGAFifoCmdAnnotationCopy;
-
-
-/*
- * SVGA_CMD_DEFINE_GMR2 --
- *
- *    Define guest memory region v2.  See the description of GMRs above.
- *
- * Availability:
- *    SVGA_CAP_GMR2
- */
-
-typedef
-struct {
-   uint32 gmrId;
-   uint32 numPages;
-} SVGAFifoCmdDefineGMR2;
-
-
-/*
- * SVGA_CMD_REMAP_GMR2 --
- *
- *    Remap guest memory region v2.  See the description of GMRs above.
- *
- *    This command allows guest to modify a portion of an existing GMR by
- *    invalidating it or reassigning it to different guest physical pages.
- *    The pages are identified by physical page number (PPN).  The pages
- *    are assumed to be pinned and valid for DMA operations.
- *
- *    Description of command flags:
- *
- *    SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
- *       The PPN list must not overlap with the remap region (this can be
- *       handled trivially by referencing a separate GMR).  If flag is
- *       disabled, PPN list is appended to SVGARemapGMR command.
- *
- *    SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
- *       it is in PPN32 format.
- *
- *    SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
- *       A single PPN can be used to invalidate a portion of a GMR or
- *       map it to to a single guest scratch page.
- *
- * Availability:
- *    SVGA_CAP_GMR2
- */
-
-typedef enum {
-   SVGA_REMAP_GMR2_PPN32         = 0,
-   SVGA_REMAP_GMR2_VIA_GMR       = (1 << 0),
-   SVGA_REMAP_GMR2_PPN64         = (1 << 1),
-   SVGA_REMAP_GMR2_SINGLE_PPN    = (1 << 2),
-} SVGARemapGMR2Flags;
-
-typedef
-struct {
-   uint32 gmrId;
-   SVGARemapGMR2Flags flags;
-   uint32 offsetPages; /* offset in pages to begin remap */
-   uint32 numPages; /* number of pages to remap */
-   /*
-    * Followed by additional data depending on SVGARemapGMR2Flags.
-    *
-    * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
-    * Otherwise an array of page descriptors in PPN32 or PPN64 format
-    * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
-    * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
-    */
-} SVGAFifoCmdRemapGMR2;
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
deleted file mode 100644
index 55836de..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * Silly typedefs for the svga headers. Currently the headers are shared
- * between all components that talk to svga. And as such the headers are
- * are in a completely different style and use weird defines.
- *
- * This file lets all the ugly be prefixed with svga*.
- */
-
-#ifndef _SVGA_TYPES_H_
-#define _SVGA_TYPES_H_
-
-typedef uint16_t uint16;
-typedef uint32_t uint32;
-typedef uint8_t uint8;
-typedef int32_t int32;
-typedef bool Bool;
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 0000000..9c42e96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1294 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * This file implements the vmwgfx context binding manager,
+ * The sole reason for having to use this code is that vmware guest
+ * backed contexts can be swapped out to their backing mobs by the device
+ * at any time, also swapped in at any time. At swapin time, the device
+ * validates the context bindings to make sure they point to valid resources.
+ * It's this outside-of-drawcall validation (that can happen at any time),
+ * that makes this code necessary.
+ *
+ * We therefore need to kill any context bindings pointing to a resource
+ * when the resource is swapped out. Furthermore, if the vmwgfx driver has
+ * swapped out the context we can't swap it in again to kill bindings because
+ * of backing mob reservation lockdep violations, so as part of
+ * context swapout, also kill all bindings of a context, so that they are
+ * already killed if a resource to which a binding points
+ * needs to be swapped out.
+ *
+ * Note that a resource can be pointed to by bindings from multiple contexts,
+ * Therefore we can't easily protect this data by a per context mutex
+ * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
+ * to protect all binding manager data.
+ *
+ * Finally, any association between a context and a global resource
+ * (surface, shader or even DX query) is conceptually a context binding that
+ * needs to be tracked by this code.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_reg.h"
+
+#define VMW_BINDING_RT_BIT     0
+#define VMW_BINDING_PS_BIT     1
+#define VMW_BINDING_SO_BIT     2
+#define VMW_BINDING_VB_BIT     3
+#define VMW_BINDING_NUM_BITS   4
+
+#define VMW_BINDING_PS_SR_BIT  0
+
+/**
+ * struct vmw_ctx_binding_state - per context binding state
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: linked list of individual active bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units bindings.
+ * @ds_view: Depth-stencil view binding.
+ * @so_targets: StreamOutput target bindings.
+ * @vertex_buffers: Vertex buffer bindings.
+ * @index_buffer: Index buffer binding.
+ * @per_shader: Per shader-type bindings.
+ * @dirty: Bitmap tracking per binding-type changes that have not yet
+ * been emitted to the device.
+ * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
+ * have not yet been emitted to the device.
+ * @bind_cmd_buffer: Scratch space used to construct binding commands.
+ * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
+ * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
+ * device binding slot of the first command data entry in @bind_cmd_buffer.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+	struct vmw_private *dev_priv;
+	struct list_head list;
+	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
+	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+	struct vmw_ctx_bindinfo_view ds_view;
+	struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+	struct vmw_ctx_bindinfo_ib index_buffer;
+	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+
+	unsigned long dirty;
+	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
+
+	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
+	u32 bind_cmd_count;
+	u32 bind_first_slot;
+};
+
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind);
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
+				       bool rebind);
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_binding_build_asserts(void) __attribute__ ((unused));
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
+/**
+ * struct vmw_binding_info - Per binding type information for the binding
+ * manager
+ *
+ * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
+ * @offsets: array[shader_slot] of offsets to the array[slot]
+ * of struct bindings for the binding type.
+ * @scrub_func: Pointer to the scrub function for this binding type.
+ *
+ * Holds static information to help optimize the binding manager and avoid
+ * an excessive amount of switch statements.
+ */
+struct vmw_binding_info {
+	size_t size;
+	const size_t *offsets;
+	vmw_scrub_func scrub_func;
+};
+
+/*
+ * A number of static variables that help determine the scrub func and the
+ * location of the struct vmw_ctx_bindinfo slots for each binding type.
+ */
+static const size_t vmw_binding_shader_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+};
+static const size_t vmw_binding_rt_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, render_targets),
+};
+static const size_t vmw_binding_tex_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, texture_units),
+};
+static const size_t vmw_binding_cb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+};
+static const size_t vmw_binding_dx_ds_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, ds_view),
+};
+static const size_t vmw_binding_sr_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+};
+static const size_t vmw_binding_so_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, so_targets),
+};
+static const size_t vmw_binding_vb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
+};
+static const size_t vmw_binding_ib_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, index_buffer),
+};
+
+static const struct vmw_binding_info vmw_binding_infos[] = {
+	[vmw_ctx_binding_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_shader},
+	[vmw_ctx_binding_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_render_target},
+	[vmw_ctx_binding_tex] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_tex),
+		.offsets = vmw_binding_tex_offsets,
+		.scrub_func = vmw_binding_scrub_texture},
+	[vmw_ctx_binding_cb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_cb),
+		.offsets = vmw_binding_cb_offsets,
+		.scrub_func = vmw_binding_scrub_cb},
+	[vmw_ctx_binding_dx_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_dx_shader},
+	[vmw_ctx_binding_dx_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_sr] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_sr_offsets,
+		.scrub_func = vmw_binding_scrub_sr},
+	[vmw_ctx_binding_ds] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_dx_ds_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_so] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_so),
+		.offsets = vmw_binding_so_offsets,
+		.scrub_func = vmw_binding_scrub_so},
+	[vmw_ctx_binding_vb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_vb),
+		.offsets = vmw_binding_vb_offsets,
+		.scrub_func = vmw_binding_scrub_vb},
+	[vmw_ctx_binding_ib] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_ib),
+		.offsets = vmw_binding_ib_offsets,
+		.scrub_func = vmw_binding_scrub_ib},
+};
+
+/**
+ * vmw_cbs_context - Return a pointer to the context resource of a
+ * context binding state tracker.
+ *
+ * @cbs: The context binding state tracker.
+ *
+ * Provided there are any active bindings, this function will return an
+ * unreferenced pointer to the context resource that owns the context
+ * binding state tracker. If there are no active bindings, this function
+ * will return NULL. Note that the caller must somehow ensure that a reference
+ * is held on the context resource prior to calling this function.
+ */
+static const struct vmw_resource *
+vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
+{
+	if (list_empty(&cbs->list))
+		return NULL;
+
+	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
+				ctx_list)->ctx;
+}
+
+/**
+ * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
+ *
+ * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
+ * @bt: The binding type.
+ * @shader_slot: The shader slot of the binding. If none, then set to 0.
+ * @slot: The slot of the binding.
+ */
+static struct vmw_ctx_bindinfo *
+vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
+		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
+{
+	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
+	size_t offset = b->offsets[shader_slot] + b->size*slot;
+
+	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
+}
+
+/**
+ * vmw_binding_drop: Stop tracking a context binding
+ *
+ * @bi: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
+{
+	list_del(&bi->ctx_list);
+	if (!list_empty(&bi->res_list))
+		list_del(&bi->res_list);
+	bi->ctx = NULL;
+}
+
+/**
+ * vmw_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+		    const struct vmw_ctx_bindinfo *bi,
+		    u32 shader_slot, u32 slot)
+{
+	struct vmw_ctx_bindinfo *loc =
+		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
+	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
+
+	if (loc->ctx != NULL)
+		vmw_binding_drop(loc);
+
+	memcpy(loc, bi, b->size);
+	loc->scrubbed = false;
+	list_add(&loc->ctx_list, &cbs->list);
+	INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_binding_state *from,
+				 const struct vmw_ctx_bindinfo *bi)
+{
+	size_t offset = (unsigned long)bi - (unsigned long)from;
+	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
+		((unsigned long) cbs + offset);
+
+	if (loc->ctx != NULL) {
+		WARN_ON(bi->scrubbed);
+
+		vmw_binding_drop(loc);
+	}
+
+	if (bi->res != NULL) {
+		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
+		list_add_tail(&loc->ctx_list, &cbs->list);
+		list_add_tail(&loc->res_list, &loc->res->binding_head);
+	}
+}
+
+/**
+ * vmw_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_state_scrub(cbs);
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	(void) vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_binding_res_list_kill(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_res_list_scrub(head);
+	list_for_each_entry_safe(entry, next, head, res_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_binding_res_list_scrub(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, head, res_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	list_for_each_entry(entry, head, res_list) {
+		struct vmw_ctx_binding_state *cbs =
+			vmw_context_binding_state(entry->ctx);
+
+		(void) vmw_binding_emit_dirty(cbs);
+	}
+}
+
+
+/**
+ * vmw_binding_state_commit - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ * @scrubbed: Transfer only scrubbed bindings.
+ *
+ * Transfers binding info from a temporary structure
+ * (typically used by execbuf) to the persistent
+ * structure in the context. This can be done once commands have been
+ * submitted to hardware
+ */
+void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			      struct vmw_ctx_binding_state *from)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
+		vmw_binding_transfer(to, from, entry);
+		vmw_binding_drop(entry);
+	}
+}
+
+/**
+ * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+	int ret;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (likely(!entry->scrubbed))
+			continue;
+
+		if ((entry->res == NULL || entry->res->id ==
+			    SVGA3D_INVALID_ID))
+			continue;
+
+		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		entry->scrubbed = false;
+	}
+
+	return vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetRenderTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for render target "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->slot;
+	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	cmd->body.target.face = 0;
+	cmd->body.target.mipmap = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
+				     bool rebind)
+{
+	struct vmw_ctx_bindinfo_tex *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct {
+			SVGA3dCmdSetTextureState c;
+			SVGA3dTextureState s1;
+		} body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for texture "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.c.cid = bi->ctx->id;
+	cmd->body.s1.stage = binding->texture_stage;
+	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_cb *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSingleConstantBuffer body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.slot = binding->slot;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	if (rebind) {
+		cmd->body.offsetInBytes = binding->offset;
+		cmd->body.sizeInBytes = binding->size;
+		cmd->body.sid = bi->res->id;
+	} else {
+		cmd->body.offsetInBytes = 0;
+		cmd->body.sizeInBytes = 0;
+		cmd->body.sid = SVGA3D_INVALID_ID;
+	}
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_collect_view_ids - Build view id data for a view binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of view id data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_bindinfo *bi,
+				 u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biv) {
+		if (!biv->bi.ctx)
+			break;
+
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+	}
+}
+
+/**
+ * vmw_collect_dirty_view_ids - Build view id data for a view binding command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of view id data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
+				       const struct vmw_ctx_bindinfo *bi,
+				       unsigned long *dirty,
+				       u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i, next_bit;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv) {
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((!biv->bi.ctx || biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
+			   int shader_slot)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->per_shader[shader_slot].shader_res[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShaderResources body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_view_ids(cbs, loc,
+				   cbs->per_shader[shader_slot].dirty_sr,
+				   SVGA3D_DX_MAX_SRVIEWS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader"
+			  " resource binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.startView = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetRenderTargets body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX render-target"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
+		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
+	else
+		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
+				   const struct vmw_ctx_bindinfo *bi,
+				   u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_so *biso =
+		container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+	unsigned long i;
+	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
+		    ++cbs->bind_cmd_count) {
+		if (!biso->bi.ctx)
+			break;
+
+		if (!biso->bi.scrubbed) {
+			so_buffer->sid = biso->bi.res->id;
+			so_buffer->offset = biso->offset;
+			so_buffer->sizeInBytes = biso->size;
+		} else {
+			so_buffer->sid = SVGA3D_INVALID_ID;
+			so_buffer->offset = 0;
+			so_buffer->sizeInBytes = 0;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+	} *cmd;
+	size_t cmd_size, so_target_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
+	cmd_size = sizeof(*cmd) + so_target_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX SO target"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
+	cmd->header.size = sizeof(cmd->body) + so_target_size;
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
+	u32 i;
+	int ret;
+
+	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
+		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
+			continue;
+
+		ret = vmw_emit_set_sr(cbs, i);
+		if (ret)
+			break;
+
+		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
+ * SVGA3dCmdDXSetVertexBuffers command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of SVGA3dVertexBuffer data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
+				  const struct vmw_ctx_bindinfo *bi,
+				  unsigned long *dirty,
+				  u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_vb *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	unsigned long i, next_bit;
+	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv, ++vbs) {
+		if (!biv->bi.ctx || biv->bi.scrubbed) {
+			vbs->sid = SVGA3D_INVALID_ID;
+			vbs->stride = 0;
+			vbs->offset = 0;
+		} else {
+			vbs->sid = biv->bi.res->id;
+			vbs->stride = biv->stride;
+			vbs->offset = biv->offset;
+		}
+		cbs->bind_cmd_count++;
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->vertex_buffers[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+	} *cmd;
+	size_t cmd_size, set_vb_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
+			     SVGA3D_DX_MAX_VERTEXBUFFERS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
+	cmd_size = sizeof(*cmd) + set_vb_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
+	cmd->header.size = sizeof(cmd->body) + set_vb_size;
+	cmd->body.startBuffer = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->dirty_vb,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_dirty - Issue delayed binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ * This function issues the delayed binding commands that arise from
+ * previous scrub / unscrub calls. These binding commands are typically
+ * commands that batch a number of bindings and therefore it makes sense
+ * to delay them.
+ */
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
+{
+	int ret = 0;
+	unsigned long hit = 0;
+
+	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
+	      < VMW_BINDING_NUM_BITS) {
+
+		switch (hit) {
+		case VMW_BINDING_RT_BIT:
+			ret = vmw_emit_set_rt(cbs);
+			break;
+		case VMW_BINDING_PS_BIT:
+			ret = vmw_binding_emit_dirty_ps(cbs);
+			break;
+		case VMW_BINDING_SO_BIT:
+			ret = vmw_emit_set_so(cbs);
+			break;
+		case VMW_BINDING_VB_BIT:
+			ret = vmw_emit_set_vb(cbs);
+			break;
+		default:
+			BUG();
+		}
+		if (ret)
+			return ret;
+
+		__clear_bit(hit, &cbs->dirty);
+		hit++;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
+	__set_bit(VMW_BINDING_PS_SR_BIT,
+		  &cbs->per_shader[biv->shader_slot].dirty);
+	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_vb *bivb =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(bivb->slot, cbs->dirty_vb);
+	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_ib *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetIndexBuffer body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX index buffer "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	if (rebind) {
+		cmd->body.sid = bi->res->id;
+		cmd->body.format = binding->format;
+		cmd->body.offset = binding->offset;
+	} else {
+		cmd->body.sid = SVGA3D_INVALID_ID;
+		cmd->body.format = 0;
+		cmd->body.offset = 0;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
+ * memory accounting.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Returns a pointer to a newly allocated struct or an error pointer on error.
+ */
+struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv)
+{
+	struct vmw_ctx_binding_state *cbs;
+	int ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
+				   false, false);
+	if (ret)
+		return ERR_PTR(ret);
+
+	cbs = vzalloc(sizeof(*cbs));
+	if (!cbs) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	cbs->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&cbs->list);
+
+	return cbs;
+}
+
+/**
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
+ * memory accounting info.
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
+ */
+void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_private *dev_priv = cbs->dev_priv;
+
+	vfree(cbs);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+}
+
+/**
+ * vmw_binding_state_list - Get the binding list of a
+ * struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state
+ *
+ * Returns the binding list which can be used to traverse through the bindings
+ * and access the resource information of all bindings.
+ */
+struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
+{
+	return &cbs->list;
+}
+
+/**
+ * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
+ *
+ * Drops all bindings registered in @cbs. No device binding actions are
+ * performed.
+ */
+void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/*
+ * This function is unused at run-time, and only used to hold various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_binding_build_asserts(void)
+{
+	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
+	BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
+	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
+	 * view id arrays.
+	 */
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
+	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
+	 */
+	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 0000000..bf2e77a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_BINDING_H_
+#define _VMWGFX_BINDING_H_
+
+#include "device_include/svga3d_reg.h"
+#include <linux/list.h>
+
+#define VMW_MAX_VIEW_BINDINGS 128
+
+struct vmw_private;
+struct vmw_ctx_binding_state;
+
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+	vmw_ctx_binding_shader,
+	vmw_ctx_binding_rt,
+	vmw_ctx_binding_tex,
+	vmw_ctx_binding_cb,
+	vmw_ctx_binding_dx_shader,
+	vmw_ctx_binding_dx_rt,
+	vmw_ctx_binding_sr,
+	vmw_ctx_binding_ds,
+	vmw_ctx_binding_so,
+	vmw_ctx_binding_vb,
+	vmw_ctx_binding_ib,
+	vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - single binding metadata
+ *
+ * @ctx_list: List head for the context's list of bindings.
+ * @res_list: List head for a resource's list of bindings.
+ * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
+ * indicates no binding present.
+ * @res: Non-refcounted pointer to the resource the binding points to. This
+ * is typically a surface or a view.
+ * @bt: Binding type.
+ * @scrubbed: Whether the binding has been scrubbed from the context.
+ */
+struct vmw_ctx_bindinfo {
+	struct list_head ctx_list;
+	struct list_head res_list;
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	enum vmw_ctx_binding_type bt;
+	bool scrubbed;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @texture_stage: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_tex {
+	struct vmw_ctx_bindinfo bi;
+	uint32 texture_stage;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_shader - Shader binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_shader {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_cb {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_view - View binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_view {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @stride: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_vb {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 stride;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @format: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_ib {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 format;
+};
+
+/**
+ * struct vmw_dx_shader_bindings - per shader type context binding state
+ *
+ * @shader: The shader binding for this shader type
+ * @const_buffer: Const buffer bindings for this shader type.
+ * @shader_res: Shader resource view bindings for this shader type.
+ * @dirty_sr: Bitmap tracking individual shader resource bindings changes
+ * that have not yet been emitted to the device.
+ * @dirty: Bitmap tracking per-binding type binding changes that have not
+ * yet been emitted to the device.
+ */
+struct vmw_dx_shader_bindings {
+	struct vmw_ctx_bindinfo_shader shader;
+	struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+	struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
+	DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
+	unsigned long dirty;
+};
+
+extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+			    const struct vmw_ctx_bindinfo *ci,
+			    u32 shader_slot, u32 slot);
+extern void
+vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			 struct vmw_ctx_binding_state *from);
+extern void vmw_binding_res_list_kill(struct list_head *head);
+extern void vmw_binding_res_list_scrub(struct list_head *head);
+extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+extern struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv);
+extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
+extern struct list_head *
+vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cff2bf9..3329f62 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,12 @@
 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 };
 
+static struct ttm_place mob_ne_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
 struct ttm_placement vmw_vram_placement = {
 	.num_placement = 1,
 	.placement = &vram_placement_flags,
@@ -200,6 +206,13 @@
 	.busy_placement = &mob_placement_flags
 };
 
+struct ttm_placement vmw_mob_ne_placement = {
+	.num_placement = 1,
+	.num_busy_placement = 1,
+	.placement = &mob_ne_placement_flags,
+	.busy_placement = &mob_ne_placement_flags
+};
+
 struct vmw_ttm_tt {
 	struct ttm_dma_tt dma_ttm;
 	struct vmw_private *dev_priv;
@@ -804,9 +817,9 @@
 /**
  * vmw_move_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to move.
- * @mem:            The truct ttm_mem_reg indicating to what memory
- *                  region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
  *
  * Calls move_notify for all subsystems needing it.
  * (currently only resources).
@@ -815,13 +828,14 @@
 			    struct ttm_mem_reg *mem)
 {
 	vmw_resource_move_notify(bo, mem);
+	vmw_query_move_notify(bo, mem);
 }
 
 
 /**
  * vmw_swap_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to be swapped out.
+ * @bo: The TTM buffer object about to be swapped out.
  */
 static void vmw_swap_notify(struct ttm_buffer_object *bo)
 {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 0000000..5ae8f92
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1303 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_api.h"
+
+/*
+ * Size of inline command buffers. Try to make sure that a page size is a
+ * multiple of the DMA pool allocation size.
+ */
+#define VMW_CMDBUF_INLINE_ALIGN 64
+#define VMW_CMDBUF_INLINE_SIZE \
+	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+
+/**
+ * struct vmw_cmdbuf_context - Command buffer context queues
+ *
+ * @submitted: List of command buffers that have been submitted to the
+ * manager but not yet submitted to hardware.
+ * @hw_submitted: List of command buffers submitted to hardware.
+ * @preempted: List of preempted command buffers.
+ * @num_hw_submitted: Number of buffers currently being processed by hardware
+ */
+struct vmw_cmdbuf_context {
+	struct list_head submitted;
+	struct list_head hw_submitted;
+	struct list_head preempted;
+	unsigned num_hw_submitted;
+};
+
+/**
+ * struct vmw_cmdbuf_man: - Command buffer manager
+ *
+ * @cur_mutex: Mutex protecting the command buffer used for incremental small
+ * kernel command submissions, @cur.
+ * @space_mutex: Mutex to protect against starvation when we allocate
+ * main pool buffer space.
+ * @work: A struct work_struct implementeing command buffer error handling.
+ * Immutable.
+ * @dev_priv: Pointer to the device private struct. Immutable.
+ * @ctx: Array of command buffer context queues. The queues and the context
+ * data is protected by @lock.
+ * @error: List of command buffers that have caused device errors.
+ * Protected by @lock.
+ * @mm: Range manager for the command buffer space. Manager allocations and
+ * frees are protected by @lock.
+ * @cmd_space: Buffer object for the command buffer space, unless we were
+ * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
+ * @map_obj: Mapping state for @cmd_space. Immutable.
+ * @map: Pointer to command buffer space. May be a mapped buffer object or
+ * a contigous coherent DMA memory allocation. Immutable.
+ * @cur: Command buffer for small kernel command submissions. Protected by
+ * the @cur_mutex.
+ * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
+ * @default_size: Default size for the @cur command buffer. Immutable.
+ * @max_hw_submitted: Max number of in-flight command buffers the device can
+ * handle. Immutable.
+ * @lock: Spinlock protecting command submission queues.
+ * @header: Pool of DMA memory for device command buffer headers.
+ * Internal protection.
+ * @dheaders: Pool of DMA memory for device command buffer headers with trailing
+ * space for inline data. Internal protection.
+ * @tasklet: Tasklet struct for irq processing. Immutable.
+ * @alloc_queue: Wait queue for processes waiting to allocate command buffer
+ * space.
+ * @idle_queue: Wait queue for processes waiting for command buffer idle.
+ * @irq_on: Whether the process function has requested irq to be turned on.
+ * Protected by @lock.
+ * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
+ * allocation. Immutable.
+ * @has_pool: Has a large pool of DMA memory which allows larger allocations.
+ * Typically this is false only during bootstrap.
+ * @handle: DMA address handle for the command buffer space if @using_mob is
+ * false. Immutable.
+ * @size: The size of the command buffer space. Immutable.
+ */
+struct vmw_cmdbuf_man {
+	struct mutex cur_mutex;
+	struct mutex space_mutex;
+	struct work_struct work;
+	struct vmw_private *dev_priv;
+	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
+	struct list_head error;
+	struct drm_mm mm;
+	struct ttm_buffer_object *cmd_space;
+	struct ttm_bo_kmap_obj map_obj;
+	u8 *map;
+	struct vmw_cmdbuf_header *cur;
+	size_t cur_pos;
+	size_t default_size;
+	unsigned max_hw_submitted;
+	spinlock_t lock;
+	struct dma_pool *headers;
+	struct dma_pool *dheaders;
+	struct tasklet_struct tasklet;
+	wait_queue_head_t alloc_queue;
+	wait_queue_head_t idle_queue;
+	bool irq_on;
+	bool using_mob;
+	bool has_pool;
+	dma_addr_t handle;
+	size_t size;
+};
+
+/**
+ * struct vmw_cmdbuf_header - Command buffer metadata
+ *
+ * @man: The command buffer manager.
+ * @cb_header: Device command buffer header, allocated from a DMA pool.
+ * @cb_context: The device command buffer context.
+ * @list: List head for attaching to the manager lists.
+ * @node: The range manager node.
+ * @handle. The DMA address of @cb_header. Handed to the device on command
+ * buffer submission.
+ * @cmd: Pointer to the command buffer space of this buffer.
+ * @size: Size of the command buffer space of this buffer.
+ * @reserved: Reserved space of this buffer.
+ * @inline_space: Whether inline command buffer space is used.
+ */
+struct vmw_cmdbuf_header {
+	struct vmw_cmdbuf_man *man;
+	SVGACBHeader *cb_header;
+	SVGACBContext cb_context;
+	struct list_head list;
+	struct drm_mm_node node;
+	dma_addr_t handle;
+	u8 *cmd;
+	size_t size;
+	size_t reserved;
+	bool inline_space;
+};
+
+/**
+ * struct vmw_cmdbuf_dheader - Device command buffer header with inline
+ * command buffer space.
+ *
+ * @cb_header: Device command buffer header.
+ * @cmd: Inline command buffer space.
+ */
+struct vmw_cmdbuf_dheader {
+	SVGACBHeader cb_header;
+	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
+};
+
+/**
+ * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
+ *
+ * @page_size: Size of requested command buffer space in pages.
+ * @node: Pointer to the range manager node.
+ * @done: True if this allocation has succeeded.
+ */
+struct vmw_cmdbuf_alloc_info {
+	size_t page_size;
+	struct drm_mm_node *node;
+	bool done;
+};
+
+/* Loop over each context in the command buffer manager. */
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
+	     ++(_i), ++(_ctx))
+
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
+
+
+/**
+ * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
+ *
+ * @man: The range manager.
+ * @interruptible: Whether to wait interruptible when locking.
+ */
+static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
+{
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->cur_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->cur_mutex);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
+ *
+ * @man: The range manager.
+ */
+static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
+{
+	mutex_unlock(&man->cur_mutex);
+}
+
+/**
+ * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
+ * been used for the device context with inline command buffers.
+ * Need not be called locked.
+ *
+ * @header: Pointer to the header to free.
+ */
+static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+
+	if (WARN_ON_ONCE(!header->inline_space))
+		return;
+
+	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
+			       cb_header);
+	dma_pool_free(header->man->dheaders, dheader, header->handle);
+	kfree(header);
+}
+
+/**
+ * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * header: Pointer to the header to free.
+ *
+ * For internal use. Must be called with man::lock held.
+ */
+static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	BUG_ON(!spin_is_locked(&man->lock));
+
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+
+	drm_mm_remove_node(&header->node);
+	wake_up_all(&man->alloc_queue);
+	if (header->cb_header)
+		dma_pool_free(man->headers, header->cb_header,
+			      header->handle);
+	kfree(header);
+}
+
+/**
+ * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * @header: Pointer to the header to free.
+ */
+void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	/* Avoid locking if inline_space */
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+	spin_lock_bh(&man->lock);
+	__vmw_cmdbuf_header_free(header);
+	spin_unlock_bh(&man->lock);
+}
+
+
+/**
+ * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ *
+ * @header: The header of the buffer to submit.
+ */
+static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+	u32 val;
+
+	if (sizeof(header->handle) > 4)
+		val = (header->handle >> 32);
+	else
+		val = 0;
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+
+	val = (header->handle & 0xFFFFFFFFULL);
+	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
+
+	return header->cb_header->status;
+}
+
+/**
+ * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
+ *
+ * @ctx: The command buffer context to initialize
+ */
+static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
+{
+	INIT_LIST_HEAD(&ctx->hw_submitted);
+	INIT_LIST_HEAD(&ctx->submitted);
+	INIT_LIST_HEAD(&ctx->preempted);
+	ctx->num_hw_submitted = 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submits command buffers to hardware until there are no more command
+ * buffers to submit or the hardware can't handle more command buffers.
+ */
+static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
+				  struct vmw_cmdbuf_context *ctx)
+{
+	while (ctx->num_hw_submitted < man->max_hw_submitted &&
+	      !list_empty(&ctx->submitted)) {
+		struct vmw_cmdbuf_header *entry;
+		SVGACBStatus status;
+
+		entry = list_first_entry(&ctx->submitted,
+					 struct vmw_cmdbuf_header,
+					 list);
+
+		status = vmw_cmdbuf_header_submit(entry);
+
+		/* This should never happen */
+		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
+			break;
+		}
+
+		list_del(&entry->list);
+		list_add_tail(&entry->list, &ctx->hw_submitted);
+		ctx->num_hw_submitted++;
+	}
+
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submit command buffers to hardware if possible, and process finished
+ * buffers. Typically freeing them, but on preemption or error take
+ * appropriate action. Wake up waiters if appropriate.
+ */
+static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_context *ctx,
+				   int *notempty)
+{
+	struct vmw_cmdbuf_header *entry, *next;
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+
+	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
+		SVGACBStatus status = entry->cb_header->status;
+
+		if (status == SVGA_CB_STATUS_NONE)
+			break;
+
+		list_del(&entry->list);
+		wake_up_all(&man->idle_queue);
+		ctx->num_hw_submitted--;
+		switch (status) {
+		case SVGA_CB_STATUS_COMPLETED:
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		case SVGA_CB_STATUS_COMMAND_ERROR:
+		case SVGA_CB_STATUS_CB_HEADER_ERROR:
+			list_add_tail(&entry->list, &man->error);
+			schedule_work(&man->work);
+			break;
+		case SVGA_CB_STATUS_PREEMPTED:
+			list_add(&entry->list, &ctx->preempted);
+			break;
+		default:
+			WARN_ONCE(true, "Undefined command buffer status.\n");
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		}
+	}
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+	if (!list_empty(&ctx->submitted))
+		(*notempty)++;
+}
+
+/**
+ * vmw_cmdbuf_man_process - Process all command buffer contexts and
+ * switch on and off irqs as appropriate.
+ *
+ * @man: The command buffer manager.
+ *
+ * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
+ * command buffers left that are not submitted to hardware, Make sure
+ * IRQ handling is turned on. Otherwise, make sure it's turned off. This
+ * function may return -EAGAIN to indicate it should be rerun due to
+ * possibly missed IRQs if IRQs has just been turned on.
+ */
+static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+{
+	int notempty = 0;
+	struct vmw_cmdbuf_context *ctx;
+	int i;
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
+
+	if (man->irq_on && !notempty) {
+		vmw_generic_waiter_remove(man->dev_priv,
+					  SVGA_IRQFLAG_COMMAND_BUFFER,
+					  &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = false;
+	} else if (!man->irq_on && notempty) {
+		vmw_generic_waiter_add(man->dev_priv,
+				       SVGA_IRQFLAG_COMMAND_BUFFER,
+				       &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = true;
+
+		/* Rerun in case we just missed an irq. */
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
+ * command buffer context
+ *
+ * @man: The command buffer manager.
+ * @header: The header of the buffer to submit.
+ * @cb_context: The command buffer context to use.
+ *
+ * This function adds @header to the "submitted" queue of the command
+ * buffer context identified by @cb_context. It then calls the command buffer
+ * manager processing to potentially submit the buffer to hardware.
+ * @man->lock needs to be held when calling this function.
+ */
+static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
+			       struct vmw_cmdbuf_header *header,
+			       SVGACBContext cb_context)
+{
+	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
+		header->cb_header->dxContext = 0;
+	header->cb_context = cb_context;
+	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
+
+	if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+		vmw_cmdbuf_man_process(man);
+}
+
+/**
+ * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
+ * handler implemented as a tasklet.
+ *
+ * @data: Tasklet closure. A pointer to the command buffer manager cast to
+ * an unsigned long.
+ *
+ * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * command buffer processor to free finished buffers and submit any
+ * queued buffers to hardware.
+ */
+static void vmw_cmdbuf_man_tasklet(unsigned long data)
+{
+	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
+
+	spin_lock(&man->lock);
+	if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+		(void) vmw_cmdbuf_man_process(man);
+	spin_unlock(&man->lock);
+}
+
+/**
+ * vmw_cmdbuf_work_func - The deferred work function that handles
+ * command buffer errors.
+ *
+ * @work: The work func closure argument.
+ *
+ * Restarting the command buffer context after an error requires process
+ * context, so it is deferred to this work function.
+ */
+static void vmw_cmdbuf_work_func(struct work_struct *work)
+{
+	struct vmw_cmdbuf_man *man =
+		container_of(work, struct vmw_cmdbuf_man, work);
+	struct vmw_cmdbuf_header *entry, *next;
+	bool restart = false;
+
+	spin_lock_bh(&man->lock);
+	list_for_each_entry_safe(entry, next, &man->error, list) {
+		restart = true;
+		DRM_ERROR("Command buffer error.\n");
+
+		list_del(&entry->list);
+		__vmw_cmdbuf_header_free(entry);
+		wake_up_all(&man->idle_queue);
+	}
+	spin_unlock_bh(&man->lock);
+
+	if (restart && vmw_cmdbuf_startstop(man, true))
+		DRM_ERROR("Failed restarting command buffer context 0.\n");
+
+}
+
+/**
+ * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ *
+ * @man: The command buffer manager.
+ * @check_preempted: Check also the preempted queue for pending command buffers.
+ *
+ */
+static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
+				bool check_preempted)
+{
+	struct vmw_cmdbuf_context *ctx;
+	bool idle = false;
+	int i;
+
+	spin_lock_bh(&man->lock);
+	vmw_cmdbuf_man_process(man);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		if (!list_empty(&ctx->submitted) ||
+		    !list_empty(&ctx->hw_submitted) ||
+		    (check_preempted && !list_empty(&ctx->preempted)))
+			goto out_unlock;
+	}
+
+	idle = list_empty(&man->error);
+
+out_unlock:
+	spin_unlock_bh(&man->lock);
+
+	return idle;
+}
+
+/**
+ * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed. Call with @man->cur_mutex held.
+ */
+static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+	if (!cur)
+		return;
+
+	spin_lock_bh(&man->lock);
+	if (man->cur_pos == 0) {
+		__vmw_cmdbuf_header_free(cur);
+		goto out_unlock;
+	}
+
+	man->cur->cb_header->length = man->cur_pos;
+	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
+out_unlock:
+	spin_unlock_bh(&man->lock);
+	man->cur = NULL;
+	man->cur_pos = 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Whether to sleep interruptible when sleeping.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed.
+ */
+int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+			 bool interruptible)
+{
+	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
+
+	if (ret)
+		return ret;
+
+	__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_idle - Wait for command buffer manager idle.
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Sleep interruptible while waiting.
+ * @timeout: Time out after this many ticks.
+ *
+ * Wait until the command buffer manager has processed all command buffers,
+ * or until a timeout occurs. If a timeout occurs, the function will return
+ * -EBUSY.
+ */
+int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+		    unsigned long timeout)
+{
+	int ret;
+
+	ret = vmw_cmdbuf_cur_flush(man, interruptible);
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		ret = wait_event_interruptible_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	} else {
+		ret = wait_event_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+	if (ret == 0) {
+		if (!vmw_cmdbuf_man_idle(man, true))
+			ret = -EBUSY;
+		else
+			ret = 0;
+	}
+	if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @info: Allocation info. Will hold the size on entry and allocated mm node
+ * on successful return.
+ *
+ * Try to allocate buffer space from the main pool. Returns true if succeeded.
+ * If a fatal error was hit, the error code is returned in @info->ret.
+ */
+static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_alloc_info *info)
+{
+	int ret;
+
+	if (info->done)
+		return true;
+ 
+	memset(info->node, 0, sizeof(*info->node));
+	spin_lock_bh(&man->lock);
+	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
+					 0, 0,
+					 DRM_MM_SEARCH_DEFAULT,
+					 DRM_MM_CREATE_DEFAULT);
+	spin_unlock_bh(&man->lock);
+	info->done = !ret;
+
+	return info->done;
+}
+
+/**
+ * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @node: Pointer to pre-allocated range-manager node.
+ * @size: The size of the allocation.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * This function allocates buffer space from the main pool, and if there is
+ * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
+ * become available.
+ */
+static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
+				  struct drm_mm_node *node,
+				  size_t size,
+				  bool interruptible)
+{
+	struct vmw_cmdbuf_alloc_info info;
+
+	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	info.node = node;
+	info.done = false;
+
+	/*
+	 * To prevent starvation of large requests, only one allocating call
+	 * at a time waiting for space.
+	 */
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->space_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->space_mutex);
+	}
+
+	/* Try to allocate space without waiting. */
+	if (vmw_cmdbuf_try_alloc(man, &info))
+		goto out_unlock;
+
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		int ret;
+
+		ret = wait_event_interruptible
+			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+		if (ret) {
+			vmw_generic_waiter_remove
+				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
+				 &man->dev_priv->cmdbuf_waiters);
+			mutex_unlock(&man->space_mutex);
+			return ret;
+		}
+	} else {
+		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+
+out_unlock:
+	mutex_unlock(&man->space_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
+ * space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ */
+static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_header *header,
+				 size_t size,
+				 bool interruptible)
+{
+	SVGACBHeader *cb_hdr;
+	size_t offset;
+	int ret;
+
+	if (!man->has_pool)
+		return -ENOMEM;
+
+	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
+
+	if (ret)
+		return ret;
+
+	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
+					   &header->handle);
+	if (!header->cb_header) {
+		ret = -ENOMEM;
+		goto out_no_cb_header;
+	}
+
+	header->size = header->node.size << PAGE_SHIFT;
+	cb_hdr = header->cb_header;
+	offset = header->node.start << PAGE_SHIFT;
+	header->cmd = man->map + offset;
+	memset(cb_hdr, 0, sizeof(*cb_hdr));
+	if (man->using_mob) {
+		cb_hdr->flags = SVGA_CB_FLAG_MOB;
+		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+		cb_hdr->ptr.mob.mobOffset = offset;
+	} else {
+		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
+	}
+
+	return 0;
+
+out_no_cb_header:
+	spin_lock_bh(&man->lock);
+	drm_mm_remove_node(&header->node);
+	spin_unlock_bh(&man->lock);
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_space_inline - Set up a command buffer header with
+ * inline command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ */
+static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_header *header,
+				   int size)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+	SVGACBHeader *cb_hdr;
+
+	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
+		return -ENOMEM;
+
+	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
+				 &header->handle);
+	if (!dheader)
+		return -ENOMEM;
+
+	header->inline_space = true;
+	header->size = VMW_CMDBUF_INLINE_SIZE;
+	cb_hdr = &dheader->cb_header;
+	header->cb_header = cb_hdr;
+	header->cmd = dheader->cmd;
+	memset(dheader, 0, sizeof(*dheader));
+	cb_hdr->status = SVGA_CB_STATUS_NONE;
+	cb_hdr->flags = SVGA_CB_FLAG_NONE;
+	cb_hdr->ptr.pa = (u64)header->handle +
+		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
+ * command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @p_header: points to a header pointer to populate on successful return.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer. The header pointer returned in @p_header should
+ * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
+ */
+void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+		       size_t size, bool interruptible,
+		       struct vmw_cmdbuf_header **p_header)
+{
+	struct vmw_cmdbuf_header *header;
+	int ret = 0;
+
+	*p_header = NULL;
+
+	header = kzalloc(sizeof(*header), GFP_KERNEL);
+	if (!header)
+		return ERR_PTR(-ENOMEM);
+
+	if (size <= VMW_CMDBUF_INLINE_SIZE)
+		ret = vmw_cmdbuf_space_inline(man, header, size);
+	else
+		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
+
+	if (ret) {
+		kfree(header);
+		return ERR_PTR(ret);
+	}
+
+	header->man = man;
+	INIT_LIST_HEAD(&header->list);
+	header->cb_header->status = SVGA_CB_STATUS_NONE;
+	*p_header = header;
+
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
+ * command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
+				    size_t size,
+				    int ctx_id,
+				    bool interruptible)
+{
+	struct vmw_cmdbuf_header *cur;
+	void *ret;
+
+	if (vmw_cmdbuf_cur_lock(man, interruptible))
+		return ERR_PTR(-ERESTARTSYS);
+
+	cur = man->cur;
+	if (cur && (size + man->cur_pos > cur->size ||
+		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
+		     ctx_id != cur->cb_header->dxContext)))
+		__vmw_cmdbuf_cur_flush(man);
+
+	if (!man->cur) {
+		ret = vmw_cmdbuf_alloc(man,
+				       max_t(size_t, size, man->default_size),
+				       interruptible, &man->cur);
+		if (IS_ERR(ret)) {
+			vmw_cmdbuf_cur_unlock(man);
+			return ret;
+		}
+
+		cur = man->cur;
+	}
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		cur->cb_header->dxContext = ctx_id;
+	}
+
+	cur->reserved = size;
+
+	return (void *) (man->cur->cmd + man->cur_pos);
+}
+
+/**
+ * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
+				  size_t size, bool flush)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+	WARN_ON(size > cur->reserved);
+	man->cur_pos += size;
+	if (!size)
+		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+			 int ctx_id, bool interruptible,
+			 struct vmw_cmdbuf_header *header)
+{
+	if (!header)
+		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
+
+	if (size > header->size)
+		return ERR_PTR(-EINVAL);
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		header->cb_header->dxContext = ctx_id;
+	}
+
+	header->reserved = size;
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_commit - Commit commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+		       struct vmw_cmdbuf_header *header, bool flush)
+{
+	if (!header) {
+		vmw_cmdbuf_commit_cur(man, size, flush);
+		return;
+	}
+
+	(void) vmw_cmdbuf_cur_lock(man, false);
+	__vmw_cmdbuf_cur_flush(man);
+	WARN_ON(size > header->reserved);
+	man->cur = header;
+	man->cur_pos = size;
+	if (!size)
+		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
+ *
+ * @man: The command buffer manager.
+ */
+void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
+{
+	if (!man)
+		return;
+
+	tasklet_schedule(&man->tasklet);
+}
+
+/**
+ * vmw_cmdbuf_send_device_command - Send a command through the device context.
+ *
+ * @man: The command buffer manager.
+ * @command: Pointer to the command to send.
+ * @size: Size of the command.
+ *
+ * Synchronously sends a device context command.
+ */
+static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
+					  const void *command,
+					  size_t size)
+{
+	struct vmw_cmdbuf_header *header;
+	int status;
+	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
+
+	if (IS_ERR(cmd))
+		return PTR_ERR(cmd);
+
+	memcpy(cmd, command, size);
+	header->cb_header->length = size;
+	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
+	spin_lock_bh(&man->lock);
+	status = vmw_cmdbuf_header_submit(header);
+	spin_unlock_bh(&man->lock);
+	vmw_cmdbuf_header_free(header);
+
+	if (status != SVGA_CB_STATUS_COMPLETED) {
+		DRM_ERROR("Device context command failed with status %d\n",
+			  status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_startstop - Send a start / stop command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @enable: Whether to enable or disable the context.
+ *
+ * Synchronously sends a device start / stop context command.
+ */
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+				bool enable)
+{
+	struct {
+		uint32 id;
+		SVGADCCmdStartStop body;
+	} __packed cmd;
+
+	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
+	cmd.body.enable = (enable) ? 1 : 0;
+	cmd.body.context = SVGA_CB_CONTEXT_0;
+
+	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+/**
+ * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the main space pool.
+ * @default_size: The default size of the command buffer for small kernel
+ * submissions.
+ *
+ * Set the size and allocate the main command buffer space pool,
+ * as well as the default size of the command buffer for
+ * small kernel submissions. If successful, this enables large command
+ * submissions. Note that this function requires that rudimentary command
+ * submission is already available and that the MOB memory manager is alive.
+ * Returns 0 on success. Negative error code on failure.
+ */
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+			     size_t size, size_t default_size)
+{
+	struct vmw_private *dev_priv = man->dev_priv;
+	bool dummy;
+	int ret;
+
+	if (man->has_pool)
+		return -EINVAL;
+
+	/* First, try to allocate a huge chunk of DMA memory */
+	size = PAGE_ALIGN(size);
+	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+				      &man->handle, GFP_KERNEL);
+	if (man->map) {
+		man->using_mob = false;
+	} else {
+		/*
+		 * DMA memory failed. If we can have command buffers in a
+		 * MOB, try to use that instead. Note that this will
+		 * actually call into the already enabled manager, when
+		 * binding the MOB.
+		 */
+		if (!(dev_priv->capabilities & SVGA_CAP_DX))
+			return -ENOMEM;
+
+		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
+				    &vmw_mob_ne_placement, 0, false, NULL,
+				    &man->cmd_space);
+		if (ret)
+			return ret;
+
+		man->using_mob = true;
+		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
+				  &man->map_obj);
+		if (ret)
+			goto out_no_map;
+
+		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+	}
+
+	man->size = size;
+	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
+
+	man->has_pool = true;
+	man->default_size = default_size;
+	DRM_INFO("Using command buffers with %s pool.\n",
+		 (man->using_mob) ? "MOB" : "DMA");
+
+	return 0;
+
+out_no_map:
+	if (man->using_mob)
+		ttm_bo_unref(&man->cmd_space);
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
+ * inline command buffer submissions only.
+ *
+ * @dev_priv: Pointer to device private structure.
+ *
+ * Returns a pointer to a cummand buffer manager to success or error pointer
+ * on failure. The command buffer manager will be enabled for submissions of
+ * size VMW_CMDBUF_INLINE_SIZE only.
+ */
+struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_cmdbuf_man *man;
+	struct vmw_cmdbuf_context *ctx;
+	int i;
+	int ret;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
+		return ERR_PTR(-ENOSYS);
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+	if (!man)
+		return ERR_PTR(-ENOMEM);
+
+	man->headers = dma_pool_create("vmwgfx cmdbuf",
+				       &dev_priv->dev->pdev->dev,
+				       sizeof(SVGACBHeader),
+				       64, PAGE_SIZE);
+	if (!man->headers) {
+		ret = -ENOMEM;
+		goto out_no_pool;
+	}
+
+	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
+					&dev_priv->dev->pdev->dev,
+					sizeof(struct vmw_cmdbuf_dheader),
+					64, PAGE_SIZE);
+	if (!man->dheaders) {
+		ret = -ENOMEM;
+		goto out_no_dpool;
+	}
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_init(ctx);
+
+	INIT_LIST_HEAD(&man->error);
+	spin_lock_init(&man->lock);
+	mutex_init(&man->cur_mutex);
+	mutex_init(&man->space_mutex);
+	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
+		     (unsigned long) man);
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	init_waitqueue_head(&man->alloc_queue);
+	init_waitqueue_head(&man->idle_queue);
+	man->dev_priv = dev_priv;
+	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
+	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
+	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
+			       &dev_priv->error_waiters);
+	ret = vmw_cmdbuf_startstop(man, true);
+	if (ret) {
+		DRM_ERROR("Failed starting command buffer context 0.\n");
+		vmw_cmdbuf_man_destroy(man);
+		return ERR_PTR(ret);
+	}
+
+	return man;
+
+out_no_dpool:
+	dma_pool_destroy(man->headers);
+out_no_pool:
+	kfree(man);
+
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function removes the main buffer space pool, and should be called
+ * before MOB memory management is removed. When this function has been called,
+ * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
+ * less are allowed, and the default size of the command buffer for small kernel
+ * submissions is also set to this size.
+ */
+void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
+{
+	if (!man->has_pool)
+		return;
+
+	man->has_pool = false;
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+	if (man->using_mob) {
+		(void) ttm_bo_kunmap(&man->map_obj);
+		ttm_bo_unref(&man->cmd_space);
+	} else {
+		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+				  man->size, man->map, man->handle);
+	}
+}
+
+/**
+ * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function idles and then destroys a command buffer manager.
+ */
+void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
+{
+	WARN_ON_ONCE(man->has_pool);
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+	if (vmw_cmdbuf_startstop(man, false))
+		DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
+				  &man->dev_priv->error_waiters);
+	tasklet_kill(&man->tasklet);
+	(void) cancel_work_sync(&man->work);
+	dma_pool_destroy(man->dheaders);
+	dma_pool_destroy(man->headers);
+	mutex_destroy(&man->cur_mutex);
+	mutex_destroy(&man->space_mutex);
+	kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 21e9b7f..13db8a2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,15 +26,10 @@
  **************************************************************************/
 
 #include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
 
 #define VMW_CMDBUF_RES_MAN_HT_ORDER 12
 
-enum vmw_cmdbuf_res_state {
-	VMW_CMDBUF_RES_COMMITED,
-	VMW_CMDBUF_RES_ADD,
-	VMW_CMDBUF_RES_DEL
-};
-
 /**
  * struct vmw_cmdbuf_res - Command buffer managed resource entry.
  *
@@ -132,9 +127,12 @@
 
 	list_for_each_entry_safe(entry, next, list, head) {
 		list_del(&entry->head);
+		if (entry->res->func->commit_notify)
+			entry->res->func->commit_notify(entry->res,
+							entry->state);
 		switch (entry->state) {
 		case VMW_CMDBUF_RES_ADD:
-			entry->state = VMW_CMDBUF_RES_COMMITED;
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
 			list_add_tail(&entry->head, &entry->man->list);
 			break;
 		case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@
 						 &entry->hash);
 			list_del(&entry->head);
 			list_add_tail(&entry->head, &entry->man->list);
-			entry->state = VMW_CMDBUF_RES_COMMITED;
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
 			break;
 		default:
 			BUG();
@@ -231,6 +229,9 @@
  * @res_type: The resource type.
  * @user_key: The user-space id of the resource.
  * @list: The staging list.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
  *
  * This function looks up the struct vmw_cmdbuf_res entry from the manager
  * hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@
 int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
 			  enum vmw_cmdbuf_res_type res_type,
 			  u32 user_key,
-			  struct list_head *list)
+			  struct list_head *list,
+			  struct vmw_resource **res_p)
 {
 	struct vmw_cmdbuf_res *entry;
 	struct drm_hash_item *hash;
@@ -256,12 +258,14 @@
 	switch (entry->state) {
 	case VMW_CMDBUF_RES_ADD:
 		vmw_cmdbuf_res_free(man, entry);
+		*res_p = NULL;
 		break;
-	case VMW_CMDBUF_RES_COMMITED:
+	case VMW_CMDBUF_RES_COMMITTED:
 		(void) drm_ht_remove_item(&man->resources, &entry->hash);
 		list_del(&entry->head);
 		entry->state = VMW_CMDBUF_RES_DEL;
 		list_add_tail(&entry->head, list);
+		*res_p = entry->res;
 		break;
 	default:
 		BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 5ac9287..443d1ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_user_context {
 	struct ttm_base_object base;
 	struct vmw_resource res;
-	struct vmw_ctx_binding_state cbs;
+	struct vmw_ctx_binding_state *cbs;
 	struct vmw_cmdbuf_res_manager *man;
+	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+	spinlock_t cotable_lock;
+	struct vmw_dma_buffer *dx_query_mob;
 };
 
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@
 				 bool readback,
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
 static uint64_t vmw_user_context_size;
 
 static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@
 	.unbind = vmw_gb_context_unbind
 };
 
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
-	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
-	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
-	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+	.res_type = vmw_res_dx_context,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "dx contexts",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_context_create,
+	.destroy = vmw_dx_context_destroy,
+	.bind = vmw_dx_context_bind,
+	.unbind = vmw_dx_context_unbind
+};
 
 /**
  * Context management:
  */
 
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+	struct vmw_resource *res;
+	int i;
+
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[i];
+		uctx->cotables[i] = NULL;
+		spin_unlock(&uctx->cotable_lock);
+
+		if (res)
+			vmw_resource_unreference(&res);
+	}
+}
+
 static void vmw_hw_context_destroy(struct vmw_resource *res)
 {
 	struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@
 	} *cmd;
 
 
-	if (res->func->destroy == vmw_gb_context_destroy) {
+	if (res->func->destroy == vmw_gb_context_destroy ||
+	    res->func->destroy == vmw_dx_context_destroy) {
 		mutex_lock(&dev_priv->cmdbuf_mutex);
 		vmw_cmdbuf_res_man_destroy(uctx->man);
 		mutex_lock(&dev_priv->binding_mutex);
-		(void) vmw_context_binding_state_kill(&uctx->cbs);
-		(void) vmw_gb_context_destroy(res);
+		vmw_binding_state_kill(uctx->cbs);
+		(void) res->func->destroy(res);
 		mutex_unlock(&dev_priv->binding_mutex);
 		if (dev_priv->pinned_bo != NULL &&
 		    !dev_priv->query_cid_valid)
 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
+		vmw_context_cotables_unref(uctx);
 		return;
 	}
 
@@ -135,43 +162,67 @@
 		return;
 	}
 
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 static int vmw_gb_context_init(struct vmw_private *dev_priv,
+			       bool dx,
 			       struct vmw_resource *res,
-			       void (*res_free) (struct vmw_resource *res))
+			       void (*res_free)(struct vmw_resource *res))
 {
-	int ret;
+	int ret, i;
 	struct vmw_user_context *uctx =
 		container_of(res, struct vmw_user_context, res);
 
+	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+			    SVGA3D_CONTEXT_DATA_SIZE);
 	ret = vmw_resource_init(dev_priv, res, true,
-				res_free, &vmw_gb_context_func);
-	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+				res_free,
+				dx ? &vmw_dx_context_func :
+				&vmw_gb_context_func);
 	if (unlikely(ret != 0))
 		goto out_err;
 
 	if (dev_priv->has_mob) {
 		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
-		if (unlikely(IS_ERR(uctx->man))) {
+		if (IS_ERR(uctx->man)) {
 			ret = PTR_ERR(uctx->man);
 			uctx->man = NULL;
 			goto out_err;
 		}
 	}
 
-	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
-	INIT_LIST_HEAD(&uctx->cbs.list);
+	uctx->cbs = vmw_binding_state_alloc(dev_priv);
+	if (IS_ERR(uctx->cbs)) {
+		ret = PTR_ERR(uctx->cbs);
+		goto out_err;
+	}
+
+	spin_lock_init(&uctx->cotable_lock);
+
+	if (dx) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+							      &uctx->res, i);
+			if (unlikely(uctx->cotables[i] == NULL)) {
+				ret = -ENOMEM;
+				goto out_cotables;
+			}
+		}
+	}
+
+
 
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
 
+out_cotables:
+	vmw_context_cotables_unref(uctx);
 out_err:
 	if (res_free)
 		res_free(res);
@@ -182,7 +233,8 @@
 
 static int vmw_context_init(struct vmw_private *dev_priv,
 			    struct vmw_resource *res,
-			    void (*res_free) (struct vmw_resource *res))
+			    void (*res_free)(struct vmw_resource *res),
+			    bool dx)
 {
 	int ret;
 
@@ -192,7 +244,7 @@
 	} *cmd;
 
 	if (dev_priv->has_mob)
-		return vmw_gb_context_init(dev_priv, res, res_free);
+		return vmw_gb_context_init(dev_priv, dx, res, res_free);
 
 	ret = vmw_resource_init(dev_priv, res, false,
 				res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@
 		return -ENOMEM;
 	}
 
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
 
@@ -232,19 +284,10 @@
 	return ret;
 }
 
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
-	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
-	int ret;
 
-	if (unlikely(res == NULL))
-		return NULL;
-
-	ret = vmw_context_init(dev_priv, res, NULL);
-
-	return (ret == 0) ? res : NULL;
-}
-
+/*
+ * GB context.
+ */
 
 static int vmw_gb_context_create(struct vmw_resource *res)
 {
@@ -281,7 +324,7 @@
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
 
@@ -309,7 +352,6 @@
 			  "binding.\n");
 		return -ENOMEM;
 	}
-
 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
@@ -346,7 +388,7 @@
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_state_scrub(&uctx->cbs);
+	vmw_binding_state_scrub(uctx->cbs);
 
 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
@@ -414,7 +456,231 @@
 	if (dev_priv->query_cid == res->id)
 		dev_priv->query_cid_valid = false;
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineContext body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a context id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validContents = res->backup_dirty;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+	return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+				   bool readback)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	int i;
+
+	vmw_binding_state_scrub(uctx->cbs);
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		struct vmw_resource *res;
+
+		/* Avoid racing with ongoing cotable destruction. */
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[vmw_cotable_scrub_order[i]];
+		if (res)
+			res = vmw_resource_reference_unless_doomed(res);
+		spin_unlock(&uctx->cotable_lock);
+		if (!res)
+			continue;
+
+		WARN_ON(vmw_cotable_scrub(res, readback));
+		vmw_resource_unreference(&res);
+	}
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackContext body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd2;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_context_scrub_cotables(res, readback);
+
+	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+	    readback) {
+		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+		if (vmw_query_readback_all(uctx->dx_query_mob))
+			DRM_ERROR("Failed to read back query states\n");
+	}
+
+	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "unbinding.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd2 = (void *) cmd;
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.cid = res->id;
+		cmd2 = (void *) (&cmd1[1]);
+	}
+	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd2->header.size = sizeof(cmd2->body);
+	cmd2->body.cid = res->id;
+	cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyContext body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (dev_priv->query_cid == res->id)
+		dev_priv->query_cid_valid = false;
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
@@ -435,6 +701,11 @@
 	    container_of(res, struct vmw_user_context, res);
 	struct vmw_private *dev_priv = res->dev_priv;
 
+	if (ctx->cbs)
+		vmw_binding_state_free(ctx->cbs);
+
+	(void) vmw_context_bind_dx_query(res, NULL);
+
 	ttm_base_object_kfree(ctx, base);
 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 			    vmw_user_context_size);
@@ -465,8 +736,8 @@
 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 }
 
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv, bool dx)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 
+	if (!dev_priv->has_dx && dx) {
+		DRM_ERROR("DX contexts not supported by device.\n");
+		return -EINVAL;
+	}
 
 	/*
 	 * Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@
 	 * From here on, the destructor takes over resource freeing.
 	 */
 
-	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
 	if (unlikely(ret != 0))
 		goto out_unlock;
 
@@ -535,371 +810,29 @@
 out_unlock:
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
-
 }
 
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
 {
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetShader body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for shader "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.shader_type;
-	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
+	return vmw_context_define(dev, data, file_priv, false);
 }
 
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
 {
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetRenderTarget body;
-	} *cmd;
+	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+	struct drm_vmw_context_arg *rep = &arg->rep;
 
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for render target "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.rt_type;
-	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	cmd->body.target.face = 0;
-	cmd->body.target.mipmap = 0;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
-				     bool rebind)
-{
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		struct {
-			SVGA3dCmdSetTextureState c;
-			SVGA3dTextureState s1;
-		} body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for texture "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-
-	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.c.cid = bi->ctx->id;
-	cmd->body.s1.stage = bi->i1.texture_stage;
-	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
-{
-	list_del(&cb->ctx_list);
-	if (!list_empty(&cb->res_list))
-		list_del(&cb->res_list);
-	cb->bi.ctx = NULL;
-}
-
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-			    const struct vmw_ctx_bindinfo *bi)
-{
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
-			DRM_ERROR("Illegal render target type %u.\n",
-				  (unsigned) bi->i1.rt_type);
-			return -EINVAL;
-		}
-		loc = &cbs->render_targets[bi->i1.rt_type];
-		break;
-	case vmw_ctx_binding_tex:
-		if (unlikely((unsigned)bi->i1.texture_stage >=
-			     SVGA3D_NUM_TEXTURE_UNITS)) {
-			DRM_ERROR("Illegal texture/sampler unit %u.\n",
-				  (unsigned) bi->i1.texture_stage);
-			return -EINVAL;
-		}
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		if (unlikely((unsigned)bi->i1.shader_type >=
-			     SVGA3D_SHADERTYPE_MAX)) {
-			DRM_ERROR("Illegal shader type %u.\n",
-				  (unsigned) bi->i1.shader_type);
-			return -EINVAL;
-		}
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
+	switch (arg->req) {
+	case drm_vmw_context_legacy:
+		return vmw_context_define(dev, rep, file_priv, false);
+	case drm_vmw_context_dx:
+		return vmw_context_define(dev, rep, file_priv, true);
 	default:
-		BUG();
-	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	loc->bi = *bi;
-	loc->bi.scrubbed = false;
-	list_add_tail(&loc->ctx_list, &cbs->list);
-	INIT_LIST_HEAD(&loc->res_list);
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
-					 const struct vmw_ctx_bindinfo *bi)
-{
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		loc = &cbs->render_targets[bi->i1.rt_type];
 		break;
-	case vmw_ctx_binding_tex:
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
-	default:
-		BUG();
 	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	if (bi->res != NULL) {
-		loc->bi = *bi;
-		list_add_tail(&loc->ctx_list, &cbs->list);
-		list_add_tail(&loc->res_list, &bi->res->binding_head);
-	}
-}
-
-/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
-	if (!cb->bi.scrubbed) {
-		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
-		cb->bi.scrubbed = true;
-	}
-	vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
- */
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
-{
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
-		vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
-{
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
-}
-
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
-{
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, res_list)
-		vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
- */
-void vmw_context_binding_res_list_scrub(struct list_head *head)
-{
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, head, res_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
-}
-
-/**
- * vmw_context_binding_state_transfer - Commit staged binding info
- *
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
- *
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
- */
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
-					struct vmw_ctx_binding_state *from)
-{
-	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
-		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
-}
-
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
-{
-	struct vmw_ctx_binding *entry;
-	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
-	int ret;
-
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (likely(!entry->bi.scrubbed))
-			continue;
-
-		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
-			    SVGA3D_INVALID_ID))
-			continue;
-
-		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
-		if (unlikely(ret != 0))
-			return ret;
-
-		entry->bi.scrubbed = false;
-	}
-
-	return 0;
+	return -EINVAL;
 }
 
 /**
@@ -912,10 +845,93 @@
  */
 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
 {
-	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+
+	return vmw_binding_state_list(uctx->cbs);
 }
 
 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 {
 	return container_of(ctx, struct vmw_user_context, res)->man;
 }
+
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+					 SVGACOTableType cotable_type)
+{
+	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+		return ERR_PTR(-EINVAL);
+
+	return vmw_resource_reference
+		(container_of(ctx, struct vmw_user_context, res)->
+		 cotables[cotable_type]);
+}
+
+/**
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
+{
+	return container_of(ctx, struct vmw_user_context, res)->cbs;
+}
+
+/**
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context.  If @mob is NULL, then this function will
+ * remove the association between the MOB and the context.  This function
+ * assumes the binding_mutex is held.
+ *
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
+ *
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter.  0 otherwise.
+ */
+int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+			      struct vmw_dma_buffer *mob)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx_res, struct vmw_user_context, res);
+
+	if (mob == NULL) {
+		if (uctx->dx_query_mob) {
+			uctx->dx_query_mob->dx_query_ctx = NULL;
+			vmw_dmabuf_unreference(&uctx->dx_query_mob);
+			uctx->dx_query_mob = NULL;
+		}
+
+		return 0;
+	}
+
+	/* Can only have one MOB per context for queries */
+	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+		return -EINVAL;
+
+	mob->dx_query_ctx  = ctx_res;
+
+	if (!uctx->dx_query_mob)
+		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
+
+	return 0;
+}
+
+/**
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
+ *
+ * @ctx_res: The context resource
+ */
+struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx_res, struct vmw_user_context, res);
+
+	return uctx->dx_query_mob;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 0000000..ce659a1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,662 @@
+/**************************************************************************
+ *
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Treat context OTables as resources to make use of the resource
+ * backing MOB eviction mechanism, that is used to read back the COTable
+ * whenever the backing MOB is evicted.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+
+/**
+ * struct vmw_cotable - Context Object Table resource
+ *
+ * @res: struct vmw_resource we are deriving from.
+ * @ctx: non-refcounted pointer to the owning context.
+ * @size_read_back: Size of data read back during eviction.
+ * @seen_entries: Seen entries in command stream for this cotable.
+ * @type: The cotable type.
+ * @scrubbed: Whether the cotable has been scrubbed.
+ * @resource_list: List of resources in the cotable.
+ */
+struct vmw_cotable {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	size_t size_read_back;
+	int seen_entries;
+	u32 type;
+	bool scrubbed;
+	struct list_head resource_list;
+};
+
+/**
+ * struct vmw_cotable_info - Static info about cotable types
+ *
+ * @min_initial_entries: Min number of initial intries at cotable allocation
+ * for this cotable type.
+ * @size: Size of each entry.
+ */
+struct vmw_cotable_info {
+	u32 min_initial_entries;
+	u32 size;
+	void (*unbind_func)(struct vmw_private *, struct list_head *,
+			    bool);
+};
+
+static const struct vmw_cotable_info co_info[] = {
+	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+	{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
+	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
+	{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
+	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+};
+
+/*
+ * Cotables with bindings that we remove must be scrubbed first,
+ * otherwise, the device will swap in an invalid context when we remove
+ * bindings before scrubbing a cotable...
+ */
+const SVGACOTableType vmw_cotable_scrub_order[] = {
+	SVGA_COTABLE_RTVIEW,
+	SVGA_COTABLE_DSVIEW,
+	SVGA_COTABLE_SRVIEW,
+	SVGA_COTABLE_DXSHADER,
+	SVGA_COTABLE_ELEMENTLAYOUT,
+	SVGA_COTABLE_BLENDSTATE,
+	SVGA_COTABLE_DEPTHSTENCIL,
+	SVGA_COTABLE_RASTERIZERSTATE,
+	SVGA_COTABLE_SAMPLER,
+	SVGA_COTABLE_STREAMOUTPUT,
+	SVGA_COTABLE_DXQUERY,
+};
+
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_create(struct vmw_resource *res);
+static int vmw_cotable_destroy(struct vmw_resource *res);
+
+static const struct vmw_res_func vmw_cotable_func = {
+	.res_type = vmw_res_cotable,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "context guest backed object tables",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_cotable_create,
+	.destroy = vmw_cotable_destroy,
+	.bind = vmw_cotable_bind,
+	.unbind = vmw_cotable_unbind,
+};
+
+/**
+ * vmw_cotable - Convert a struct vmw_resource pointer to a struct
+ * vmw_cotable pointer
+ *
+ * @res: Pointer to the resource.
+ */
+static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_cotable, res);
+}
+
+/**
+ * vmw_cotable_destroy - Cotable resource destroy callback
+ *
+ * @res: Pointer to the cotable resource.
+ *
+ * There is no device cotable destroy command, so this function only
+ * makes sure that the resource id is set to invalid.
+ */
+static int vmw_cotable_destroy(struct vmw_resource *res)
+{
+	res->id = -1;
+	return 0;
+}
+
+/**
+ * vmw_cotable_unscrub - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ * This is identical to bind() except the function interface looks different.
+ */
+static int vmw_cotable_unscrub(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = &res->backup->base;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	lockdep_assert_held(&bo->resv->lock.base);
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for cotable "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
+	WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = vcotbl->ctx->id;
+	cmd->body.type = vcotbl->type;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validSizeInBytes = vcotbl->size_read_back;
+
+	vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+	vcotbl->scrubbed = false;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_bind - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ */
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf)
+{
+	/*
+	 * The create() callback may have changed @res->backup without
+	 * the caller noticing, and with val_buf->bo still pointing to
+	 * the old backup buffer. Although hackish, and not used currently,
+	 * take the opportunity to correct the value here so that it's not
+	 * misused in the future.
+	 */
+	val_buf->bo = &res->backup->base;
+
+	return vmw_cotable_unscrub(res);
+}
+
+/**
+ * vmw_cotable_scrub - Scrub the cotable from the device.
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether initiate a readback of the cotable data to the backup
+ * buffer.
+ *
+ * In some situations (context swapouts) it might be desirable to make the
+ * device forget about the cotable without performing a full unbind. A full
+ * unbind requires reserved backup buffers and it might not be possible to
+ * reserve them due to locking order violation issues. The vmw_cotable_scrub
+ * function implements a partial unbind() without that requirement but with the
+ * following restrictions.
+ * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
+ *    be called.
+ * 2) Before the cotable backing buffer is used by the CPU, or during the
+ *    resource destruction, vmw_cotable_unbind() must be called.
+ */
+int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	size_t submit_size;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd0;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd1;
+
+	if (vcotbl->scrubbed)
+		return 0;
+
+	if (co_info[vcotbl->type].unbind_func)
+		co_info[vcotbl->type].unbind_func(dev_priv,
+						  &vcotbl->resource_list,
+						  readback);
+	submit_size = sizeof(*cmd1);
+	if (readback)
+		submit_size += sizeof(*cmd0);
+
+	cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
+	if (!cmd1) {
+		DRM_ERROR("Failed reserving FIFO space for cotable "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	vcotbl->size_read_back = 0;
+	if (readback) {
+		cmd0 = (void *) cmd1;
+		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd0->header.size = sizeof(cmd0->body);
+		cmd0->body.cid = vcotbl->ctx->id;
+		cmd0->body.type = vcotbl->type;
+		cmd1 = (void *) &cmd0[1];
+		vcotbl->size_read_back = res->backup_size;
+	}
+	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd1->header.size = sizeof(cmd1->body);
+	cmd1->body.cid = vcotbl->ctx->id;
+	cmd1->body.type = vcotbl->type;
+	cmd1->body.mobid = SVGA3D_INVALID_ID;
+	cmd1->body.validSizeInBytes = 0;
+	vmw_fifo_commit_flush(dev_priv, submit_size);
+	vcotbl->scrubbed = true;
+
+	/* Trigger a create() on next validate. */
+	res->id = -1;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_unbind - Cotable resource unbind callback
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether to read back cotable data to the backup buffer.
+ * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * Unbinds the cotable from the device and fences the backup buffer.
+ */
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	if (list_empty(&res->mob_head))
+		return 0;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	lockdep_assert_held(&bo->resv->lock.base);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!vcotbl->scrubbed)
+		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
+	mutex_unlock(&dev_priv->binding_mutex);
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_fence_single_bo(bo, fence);
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return ret;
+}
+
+/**
+ * vmw_cotable_readback - Read back a cotable without unbinding.
+ *
+ * @res: The cotable resource.
+ *
+ * Reads back a cotable to its backing mob without scrubbing the MOB from
+ * the cotable. The MOB is fenced for subsequent CPU access.
+ */
+static int vmw_cotable_readback(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd;
+	struct vmw_fence_obj *fence;
+
+	if (!vcotbl->scrubbed) {
+		cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+					  SVGA3D_INVALID_ID);
+		if (!cmd) {
+			DRM_ERROR("Failed reserving FIFO space for cotable "
+				  "readback.\n");
+			return -ENOMEM;
+		}
+		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.cid = vcotbl->ctx->id;
+		cmd->body.type = vcotbl->type;
+		vcotbl->size_read_back = res->backup_size;
+		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	}
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_fence_single_bo(&res->backup->base, fence);
+	vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_resize - Resize a cotable.
+ *
+ * @res: The cotable resource.
+ * @new_size: The new size.
+ *
+ * Resizes a cotable and binds the new backup buffer.
+ * On failure the cotable is left intact.
+ * Important! This function may not fail once the MOB switch has been
+ * committed to hardware. That would put the device context in an
+ * invalid state which we can't currently recover from.
+ */
+static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_dma_buffer *buf, *old_buf = res->backup;
+	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
+	size_t old_size = res->backup_size;
+	size_t old_size_read_back = vcotbl->size_read_back;
+	size_t cur_size_read_back;
+	struct ttm_bo_kmap_obj old_map, new_map;
+	int ret;
+	size_t i;
+
+	ret = vmw_cotable_readback(res);
+	if (ret)
+		return ret;
+
+	cur_size_read_back = vcotbl->size_read_back;
+	vcotbl->size_read_back = old_size_read_back;
+
+	/*
+	 * While device is processing, Allocate and reserve a buffer object
+	 * for the new COTable. Initially pin the buffer object to make sure
+	 * we can use tryreserve without failure.
+	 */
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+			      true, vmw_dmabuf_bo_free);
+	if (ret) {
+		DRM_ERROR("Failed initializing new cotable MOB.\n");
+		return ret;
+	}
+
+	bo = &buf->base;
+	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+
+	ret = ttm_bo_wait(old_bo, false, false, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed waiting for cotable unbind.\n");
+		goto out_wait;
+	}
+
+	/*
+	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
+	 * This should really be a TTM utility.
+	 */
+	for (i = 0; i < old_bo->num_pages; ++i) {
+		bool dummy;
+
+		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping old COTable on resize.\n");
+			goto out_wait;
+		}
+		ret = ttm_bo_kmap(bo, i, 1, &new_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping new COTable on resize.\n");
+			goto out_map_new;
+		}
+		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
+		       ttm_kmap_obj_virtual(&old_map, &dummy),
+		       PAGE_SIZE);
+		ttm_bo_kunmap(&new_map);
+		ttm_bo_kunmap(&old_map);
+	}
+
+	/* Unpin new buffer, and switch backup buffers. */
+	ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed validating new COTable backup buffer.\n");
+		goto out_wait;
+	}
+
+	res->backup = buf;
+	res->backup_size = new_size;
+	vcotbl->size_read_back = cur_size_read_back;
+
+	/*
+	 * Now tell the device to switch. If this fails, then we need to
+	 * revert the full resize.
+	 */
+	ret = vmw_cotable_unscrub(res);
+	if (ret) {
+		DRM_ERROR("Failed switching COTable backup buffer.\n");
+		res->backup = old_buf;
+		res->backup_size = old_size;
+		vcotbl->size_read_back = old_size_read_back;
+		goto out_wait;
+	}
+
+	/* Let go of the old mob. */
+	list_del(&res->mob_head);
+	list_add_tail(&res->mob_head, &buf->res_list);
+	vmw_dmabuf_unreference(&old_buf);
+	res->id = vcotbl->type;
+
+	return 0;
+
+out_map_new:
+	ttm_bo_kunmap(&old_map);
+out_wait:
+	ttm_bo_unreserve(bo);
+	vmw_dmabuf_unreference(&buf);
+
+	return ret;
+}
+
+/**
+ * vmw_cotable_create - Cotable resource create callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * There is no separate create command for cotables, so this callback, which
+ * is called before bind() in the validation sequence is instead used for two
+ * things.
+ * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
+ *    buffer, that is, if @res->mob_head is non-empty.
+ * 2) Resize the cotable if needed.
+ */
+static int vmw_cotable_create(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	size_t new_size = res->backup_size;
+	size_t needed_size;
+	int ret;
+
+	/* Check whether we need to resize the cotable */
+	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
+	while (needed_size > new_size)
+		new_size *= 2;
+
+	if (likely(new_size <= res->backup_size)) {
+		if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+			ret = vmw_cotable_unscrub(res);
+			if (ret)
+				return ret;
+		}
+		res->id = vcotbl->type;
+		return 0;
+	}
+
+	return vmw_cotable_resize(res, new_size);
+}
+
+/**
+ * vmw_hw_cotable_destroy - Cotable hw_destroy callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * The final (part of resource destruction) destroy callback.
+ */
+static void vmw_hw_cotable_destroy(struct vmw_resource *res)
+{
+	(void) vmw_cotable_destroy(res);
+}
+
+static size_t cotable_acc_size;
+
+/**
+ * vmw_cotable_free - Cotable resource destructor
+ *
+ * @res: Pointer to a cotable resource.
+ */
+static void vmw_cotable_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	kfree(res);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+}
+
+/**
+ * vmw_cotable_alloc - Create a cotable resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @ctx: Pointer to the context resource.
+ * The cotable resource will not add a refcount.
+ * @type: The cotable type.
+ */
+struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+				       struct vmw_resource *ctx,
+				       u32 type)
+{
+	struct vmw_cotable *vcotbl;
+	int ret;
+	u32 num_entries;
+
+	if (unlikely(cotable_acc_size == 0))
+		cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   cotable_acc_size, false, true);
+	if (unlikely(ret))
+		return ERR_PTR(ret);
+
+	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
+	if (unlikely(vcotbl == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_alloc;
+	}
+
+	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
+				vmw_cotable_free, &vmw_cotable_func);
+	if (unlikely(ret != 0))
+		goto out_no_init;
+
+	INIT_LIST_HEAD(&vcotbl->resource_list);
+	vcotbl->res.id = type;
+	vcotbl->res.backup_size = PAGE_SIZE;
+	num_entries = PAGE_SIZE / co_info[type].size;
+	if (num_entries < co_info[type].min_initial_entries) {
+		vcotbl->res.backup_size = co_info[type].min_initial_entries *
+			co_info[type].size;
+		vcotbl->res.backup_size =
+			(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	}
+
+	vcotbl->scrubbed = true;
+	vcotbl->seen_entries = -1;
+	vcotbl->type = type;
+	vcotbl->ctx = ctx;
+
+	vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+
+	return &vcotbl->res;
+
+out_no_init:
+	kfree(vcotbl);
+out_no_alloc:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cotable_notify - Notify the cotable about an item creation
+ *
+ * @res: Pointer to a cotable resource.
+ * @id: Item id.
+ */
+int vmw_cotable_notify(struct vmw_resource *res, int id)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+
+	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
+		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
+			  (unsigned) vcotbl->type, id);
+		return -EINVAL;
+	}
+
+	if (vcotbl->seen_entries < id) {
+		/* Trigger a call to create() on next validate */
+		res->id = -1;
+		vcotbl->seen_entries = id;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ *
+ * @res: pointer struct vmw_resource representing the cotable.
+ * @head: pointer to the struct list_head member of the resource, dedicated
+ * to the cotable active resource list.
+ */
+void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
+{
+	struct vmw_cotable *vcotbl =
+		container_of(res, struct vmw_cotable, res);
+
+	list_add_tail(head, &vcotbl->resource_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 914b375..299925a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,25 +32,20 @@
 
 
 /**
- * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
- * @pin:  Pin buffer if true.
+ * @placement:  The placement to pin it.
  * @interruptible:  Use interruptible wait.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
  * Returns
  *  -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
-			    struct vmw_dma_buffer *buf,
-			    struct ttm_placement *placement,
-			    bool interruptible)
+int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
+				struct vmw_dma_buffer *buf,
+				struct ttm_placement *placement,
+				bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
 	int ret;
@@ -66,6 +61,8 @@
 		goto err;
 
 	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
 	ttm_bo_unreserve(bo);
 
@@ -75,12 +72,10 @@
 }
 
 /**
- * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
@@ -90,55 +85,34 @@
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
-			      struct vmw_dma_buffer *buf,
-			      bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
-	struct ttm_placement *placement;
 	int ret;
 
 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
-	/**
-	 * Put BO in VRAM if there is space, otherwise as a GMR.
-	 * If there is no space in VRAM and GMR ids are all used up,
-	 * start evicting GMRs to make room. If the DMA buffer can't be
-	 * used as a GMR, this will return -ENOMEM.
-	 */
-
-	if (pin)
-		placement = &vmw_vram_gmr_ne_placement;
-	else
-		placement = &vmw_vram_gmr_placement;
-
-	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+			      false);
 	if (likely(ret == 0) || ret == -ERESTARTSYS)
-		goto err_unreserve;
+		goto out_unreserve;
 
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 
-	/**
-	 * If that failed, try VRAM again, this time evicting
-	 * previous contents.
-	 */
+out_unreserve:
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
-	if (pin)
-		placement = &vmw_vram_ne_placement;
-	else
-		placement = &vmw_vram_placement;
-
-	ret = ttm_bo_validate(bo, placement, interruptible, false);
-
-err_unreserve:
 	ttm_bo_unreserve(bo);
 err:
 	ttm_write_unlock(&dev_priv->reservation_sem);
@@ -146,67 +120,50 @@
 }
 
 /**
- * vmw_dmabuf_to_vram - Move a buffer to vram.
+ * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
- * @pin:  Pin buffer in vram if true.
  * @interruptible:  Use interruptible wait.
  *
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
-		       struct vmw_dma_buffer *buf,
-		       bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+			   struct vmw_dma_buffer *buf,
+			   bool interruptible)
 {
-	struct ttm_placement *placement;
-
-	if (pin)
-		placement = &vmw_vram_ne_placement;
-	else
-		placement = &vmw_vram_placement;
-
-	return vmw_dmabuf_to_placement(dev_priv, buf,
-				       placement,
-				       interruptible);
+	return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+					   interruptible);
 }
 
 /**
- * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @pin:  Pin buffer in vram if true.
+ * @buf:  DMA buffer to pin.
  * @interruptible:  Use interruptible wait.
  *
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
-				struct vmw_dma_buffer *buf,
-				bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
+				    struct vmw_dma_buffer *buf,
+				    bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
 	struct ttm_placement placement;
 	struct ttm_place place;
 	int ret = 0;
 
-	if (pin)
-		place = vmw_vram_ne_placement.placement[0];
-	else
-		place = vmw_vram_placement.placement[0];
+	place = vmw_vram_placement.placement[0];
 	place.lpfn = bo->num_pages;
-
 	placement.num_placement = 1;
 	placement.placement = &place;
 	placement.num_busy_placement = 1;
@@ -216,13 +173,16 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err_unlock;
 
-	/* Is this buffer already in vram but not at the start of it? */
+	/*
+	 * Is this buffer already in vram but not at the start of it?
+	 * In that case, evict it first because TTM isn't good at handling
+	 * that situation.
+	 */
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
 	    bo->mem.start < bo->num_pages &&
 	    bo->mem.start > 0)
@@ -230,8 +190,10 @@
 
 	ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
-	/* For some reason we didn't up at the start of vram */
+	/* For some reason we didn't end up at the start of vram */
 	WARN_ON(ret == 0 && bo->offset != 0);
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
 	ttm_bo_unreserve(bo);
 err_unlock:
@@ -240,13 +202,10 @@
 	return ret;
 }
 
-
 /**
- * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to unpin.
@@ -259,16 +218,25 @@
 		     struct vmw_dma_buffer *buf,
 		     bool interruptible)
 {
-	/*
-	 * We could in theory early out if the buffer is
-	 * unpinned but we need to lock and reserve the buffer
-	 * anyways so we don't gain much by that.
-	 */
-	return vmw_dmabuf_to_placement(dev_priv, buf,
-				       &vmw_evictable_placement,
-				       interruptible);
-}
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
 
+	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+	if (unlikely(ret != 0))
+		goto err;
+
+	vmw_bo_pin_reserved(buf, false);
+
+	ttm_bo_unreserve(bo);
+
+err:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
 
 /**
  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
@@ -291,21 +259,31 @@
 
 
 /**
- * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
  *
- * @bo: The buffer object. Must be reserved.
+ * @vbo: The buffer object. Must be reserved.
  * @pin: Whether to pin or unpin.
  *
  */
-void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
 {
 	struct ttm_place pl;
 	struct ttm_placement placement;
+	struct ttm_buffer_object *bo = &vbo->base;
 	uint32_t old_mem_type = bo->mem.mem_type;
 	int ret;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 
+	if (pin) {
+		if (vbo->pin_count++ > 0)
+			return;
+	} else {
+		WARN_ON(vbo->pin_count <= 0);
+		if (--vbo->pin_count > 0)
+			return;
+	}
+
 	pl.fpfn = 0;
 	pl.lpfn = 0;
 	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 620bb5c..e13b20b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
@@ -127,6 +128,9 @@
 #define DRM_IOCTL_VMW_SYNCCPU					\
 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 		 struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
+		struct drm_vmw_context_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
-	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
-		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@
 	VMW_IOCTL_DEF(VMW_SYNCCPU,
 		      vmw_user_dmabuf_synccpu_ioctl,
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+		      vmw_extended_context_define_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +285,8 @@
 		DRM_INFO("  Command Buffers 2.\n");
 	if (capabilities & SVGA_CAP_GBOBJECTS)
 		DRM_INFO("  Guest Backed Resources.\n");
+	if (capabilities & SVGA_CAP_DX)
+		DRM_INFO("  DX Features.\n");
 }
 
 /**
@@ -296,30 +305,31 @@
 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
 	int ret;
-	struct ttm_buffer_object *bo;
+	struct vmw_dma_buffer *vbo;
 	struct ttm_bo_kmap_obj map;
 	volatile SVGA3dQueryResult *result;
 	bool dummy;
 
 	/*
-	 * Create the bo as pinned, so that a tryreserve will
+	 * Create the vbo as pinned, so that a tryreserve will
 	 * immediately succeed. This is because we're the only
 	 * user of the bo currently.
 	 */
-	ret = ttm_bo_create(&dev_priv->bdev,
-			    PAGE_SIZE,
-			    ttm_bo_type_device,
-			    &vmw_sys_ne_placement,
-			    0, false, NULL,
-			    &bo);
+	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+	if (!vbo)
+		return -ENOMEM;
 
+	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
+			      &vmw_sys_ne_placement, false,
+			      &vmw_dmabuf_bo_free);
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
 	BUG_ON(ret != 0);
+	vmw_bo_pin_reserved(vbo, true);
 
-	ret = ttm_bo_kmap(bo, 0, 1, &map);
+	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
 	if (likely(ret == 0)) {
 		result = ttm_kmap_obj_virtual(&map, &dummy);
 		result->totalSize = sizeof(*result);
@@ -327,18 +337,55 @@
 		result->result32 = 0xff;
 		ttm_bo_kunmap(&map);
 	}
-	vmw_bo_pin(bo, false);
-	ttm_bo_unreserve(bo);
+	vmw_bo_pin_reserved(vbo, false);
+	ttm_bo_unreserve(&vbo->base);
 
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Dummy query buffer map failed.\n");
-		ttm_bo_unref(&bo);
+		vmw_dmabuf_unreference(&vbo);
 	} else
-		dev_priv->dummy_query_bo = bo;
+		dev_priv->dummy_query_bo = vbo;
 
 	return ret;
 }
 
+/**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+static int vmw_request_device_late(struct vmw_private *dev_priv)
+{
+	int ret;
+
+	if (dev_priv->has_mob) {
+		ret = vmw_otables_setup(dev_priv);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Unable to initialize "
+				  "guest Memory OBjects.\n");
+			return ret;
+		}
+	}
+
+	if (dev_priv->cman) {
+		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+					       256*4096, 2*4096);
+		if (ret) {
+			struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+			dev_priv->cman = NULL;
+			vmw_cmdbuf_man_destroy(man);
+		}
+	}
+
+	return 0;
+}
+
 static int vmw_request_device(struct vmw_private *dev_priv)
 {
 	int ret;
@@ -349,14 +396,16 @@
 		return ret;
 	}
 	vmw_fence_fifo_up(dev_priv->fman);
-	if (dev_priv->has_mob) {
-		ret = vmw_otables_setup(dev_priv);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Unable to initialize "
-				  "guest Memory OBjects.\n");
-			goto out_no_mob;
-		}
+	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+	if (IS_ERR(dev_priv->cman)) {
+		dev_priv->cman = NULL;
+		dev_priv->has_dx = false;
 	}
+
+	ret = vmw_request_device_late(dev_priv);
+	if (ret)
+		goto out_no_mob;
+
 	ret = vmw_dummy_query_bo_create(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_query_bo;
@@ -364,15 +413,29 @@
 	return 0;
 
 out_no_query_bo:
-	if (dev_priv->has_mob)
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
+	if (dev_priv->has_mob) {
+		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 		vmw_otables_takedown(dev_priv);
+	}
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
 out_no_mob:
 	vmw_fence_fifo_down(dev_priv->fman);
 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 	return ret;
 }
 
-static void vmw_release_device(struct vmw_private *dev_priv)
+/**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+static void vmw_release_device_early(struct vmw_private *dev_priv)
 {
 	/*
 	 * Previous destructions should've released
@@ -381,65 +444,31 @@
 
 	BUG_ON(dev_priv->pinned_bo != NULL);
 
-	ttm_bo_unref(&dev_priv->dummy_query_bo);
-	if (dev_priv->has_mob)
+	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
+
+	if (dev_priv->has_mob) {
+		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 		vmw_otables_takedown(dev_priv);
-	vmw_fence_fifo_down(dev_priv->fman);
-	vmw_fifo_release(dev_priv, &dev_priv->fifo);
-}
-
-
-/**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
-int vmw_3d_resource_inc(struct vmw_private *dev_priv,
-			bool unhide_svga)
-{
-	int ret = 0;
-
-	mutex_lock(&dev_priv->release_mutex);
-	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
-		ret = vmw_request_device(dev_priv);
-		if (unlikely(ret != 0))
-			--dev_priv->num_3d_resources;
-	} else if (unhide_svga) {
-		vmw_write(dev_priv, SVGA_REG_ENABLE,
-			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
-			  ~SVGA_REG_ENABLE_HIDE);
 	}
-
-	mutex_unlock(&dev_priv->release_mutex);
-	return ret;
 }
 
 /**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
  */
-void vmw_3d_resource_dec(struct vmw_private *dev_priv,
-			 bool hide_svga)
+static void vmw_release_device_late(struct vmw_private *dev_priv)
 {
-	int32_t n3d;
+	vmw_fence_fifo_down(dev_priv->fman);
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
 
-	mutex_lock(&dev_priv->release_mutex);
-	if (unlikely(--dev_priv->num_3d_resources == 0))
-		vmw_release_device(dev_priv);
-	else if (hide_svga)
-		vmw_write(dev_priv, SVGA_REG_ENABLE,
-			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
-			  SVGA_REG_ENABLE_HIDE);
-
-	n3d = (int32_t) dev_priv->num_3d_resources;
-	mutex_unlock(&dev_priv->release_mutex);
-
-	BUG_ON(n3d < 0);
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 }
 
 /**
@@ -603,6 +632,7 @@
 	spin_lock_init(&dev_priv->hw_lock);
 	spin_lock_init(&dev_priv->waiter_lock);
 	spin_lock_init(&dev_priv->cap_lock);
+	spin_lock_init(&dev_priv->svga_lock);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 		idr_init(&dev_priv->res_idr[i]);
@@ -673,22 +703,31 @@
 				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 		dev_priv->max_mob_size =
 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
-	} else
+		dev_priv->stdu_max_width =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+		dev_priv->stdu_max_height =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+		dev_priv->texture_max_width = vmw_read(dev_priv,
+						       SVGA_REG_DEV_CAP);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+		dev_priv->texture_max_height = vmw_read(dev_priv,
+							SVGA_REG_DEV_CAP);
+	} else {
+		dev_priv->texture_max_width = 8192;
+		dev_priv->texture_max_height = 8192;
 		dev_priv->prim_bb_mem = dev_priv->vram_size;
+	}
+
+	vmw_print_capabilities(dev_priv->capabilities);
 
 	ret = vmw_dma_masks(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_err0;
 
-	/*
-	 * Limit back buffer size to VRAM size.  Remove this once
-	 * screen targets are implemented.
-	 */
-	if (dev_priv->prim_bb_mem > dev_priv->vram_size)
-		dev_priv->prim_bb_mem = dev_priv->vram_size;
-
-	vmw_print_capabilities(dev_priv->capabilities);
-
 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		DRM_INFO("Max GMR ids is %u\n",
 			 (unsigned)dev_priv->max_gmr_ids);
@@ -714,17 +753,6 @@
 	dev_priv->active_master = &dev_priv->fbdev_master;
 
 
-	ret = ttm_bo_device_init(&dev_priv->bdev,
-				 dev_priv->bo_global_ref.ref.object,
-				 &vmw_bo_driver,
-				 dev->anon_inode->i_mapping,
-				 VMWGFX_FILE_PAGE_OFFSET,
-				 false);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
-		goto out_err1;
-	}
-
 	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
 					       dev_priv->mmio_size);
 
@@ -787,13 +815,28 @@
 		goto out_no_fman;
 	}
 
+	ret = ttm_bo_device_init(&dev_priv->bdev,
+				 dev_priv->bo_global_ref.ref.object,
+				 &vmw_bo_driver,
+				 dev->anon_inode->i_mapping,
+				 VMWGFX_FILE_PAGE_OFFSET,
+				 false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+		goto out_no_bdev;
+	}
 
+	/*
+	 * Enable VRAM, but initially don't use it until SVGA is enabled and
+	 * unhidden.
+	 */
 	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 			     (dev_priv->vram_size >> PAGE_SHIFT));
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 		goto out_no_vram;
 	}
+	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 
 	dev_priv->has_gmr = true;
 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +857,28 @@
 		}
 	}
 
-	vmw_kms_save_vga(dev_priv);
+	if (dev_priv->has_mob) {
+		spin_lock(&dev_priv->cap_lock);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		spin_unlock(&dev_priv->cap_lock);
+	}
 
-	/* Start kms and overlay systems, needs fifo. */
+
 	ret = vmw_kms_init(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_kms;
 	vmw_overlay_init(dev_priv);
 
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		goto out_no_fifo;
+
+	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
 	if (dev_priv->enable_fb) {
-		ret = vmw_3d_resource_inc(dev_priv, true);
-		if (unlikely(ret != 0))
-			goto out_no_fifo;
+		vmw_fifo_resource_inc(dev_priv);
+		vmw_svga_enable(dev_priv);
 		vmw_fb_init(dev_priv);
 	}
 
@@ -838,13 +891,14 @@
 	vmw_overlay_close(dev_priv);
 	vmw_kms_close(dev_priv);
 out_no_kms:
-	vmw_kms_restore_vga(dev_priv);
 	if (dev_priv->has_mob)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 out_no_vram:
+	(void)ttm_bo_device_release(&dev_priv->bdev);
+out_no_bdev:
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -860,13 +914,13 @@
 	iounmap(dev_priv->mmio_virt);
 out_err3:
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	(void)ttm_bo_device_release(&dev_priv->bdev);
-out_err1:
 	vmw_ttm_global_release(dev_priv);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
 
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	kfree(dev_priv);
 	return ret;
 }
@@ -882,19 +936,24 @@
 		drm_ht_remove(&dev_priv->ctx.res_ht);
 	vfree(dev_priv->ctx.cmd_bounce);
 	if (dev_priv->enable_fb) {
+		vmw_fb_off(dev_priv);
 		vmw_fb_close(dev_priv);
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, false);
+		vmw_fifo_resource_dec(dev_priv);
+		vmw_svga_disable(dev_priv);
 	}
+
 	vmw_kms_close(dev_priv);
 	vmw_overlay_close(dev_priv);
 
-	if (dev_priv->has_mob)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 
+	vmw_release_device_early(dev_priv);
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+	(void) ttm_bo_device_release(&dev_priv->bdev);
+	vmw_release_device_late(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
@@ -906,7 +965,8 @@
 	ttm_object_device_release(&dev_priv->tdev);
 	iounmap(dev_priv->mmio_virt);
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	(void)ttm_bo_device_release(&dev_priv->bdev);
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	vmw_ttm_global_release(dev_priv);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -992,10 +1052,15 @@
 	}
 
 	/*
-	 * Check if we were previously master, but now dropped.
+	 * Check if we were previously master, but now dropped. In that
+	 * case, allow at least render node functionality.
 	 */
 	if (vmw_fp->locked_master) {
 		mutex_unlock(&dev->master_mutex);
+
+		if (flags & DRM_RENDER_ALLOW)
+			return NULL;
+
 		DRM_ERROR("Dropped master trying to access ioctl that "
 			  "requires authentication.\n");
 		return ERR_PTR(-EACCES);
@@ -1044,17 +1109,27 @@
 		const struct drm_ioctl_desc *ioctl =
 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-		if (unlikely(ioctl->cmd != cmd)) {
-			DRM_ERROR("Invalid command format, ioctl %d\n",
-				  nr - DRM_COMMAND_BASE);
-			return -EINVAL;
+		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+			if (unlikely(ret != 0))
+				return ret;
+
+			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+				goto out_io_encoding;
+
+			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+							_IOC_SIZE(cmd));
 		}
+
+		if (unlikely(ioctl->cmd != cmd))
+			goto out_io_encoding;
+
 		flags = ioctl->flags;
 	} else if (!drm_ioctl_flags(nr, &flags))
 		return -EINVAL;
 
 	vmaster = vmw_master_check(dev, file_priv, flags);
-	if (unlikely(IS_ERR(vmaster))) {
+	if (IS_ERR(vmaster)) {
 		ret = PTR_ERR(vmaster);
 
 		if (ret != -ERESTARTSYS)
@@ -1068,6 +1143,12 @@
 		ttm_read_unlock(&vmaster->lock);
 
 	return ret;
+
+out_io_encoding:
+	DRM_ERROR("Invalid command format, ioctl %d\n",
+		  nr - DRM_COMMAND_BASE);
+
+	return -EINVAL;
 }
 
 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1167,11 @@
 
 static void vmw_lastclose(struct drm_device *dev)
 {
-	struct drm_crtc *crtc;
-	struct drm_mode_set set;
-	int ret;
-
-	set.x = 0;
-	set.y = 0;
-	set.fb = NULL;
-	set.mode = NULL;
-	set.connectors = NULL;
-	set.num_connectors = 0;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		set.crtc = crtc;
-		ret = drm_mode_set_config_internal(&set);
-		WARN_ON(ret != 0);
-	}
-
 }
 
 static void vmw_master_init(struct vmw_master *vmaster)
 {
 	ttm_lock_init(&vmaster->lock);
-	INIT_LIST_HEAD(&vmaster->fb_surf);
-	mutex_init(&vmaster->fb_surf_mutex);
 }
 
 static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1199,6 @@
 	kfree(vmaster);
 }
 
-
 static int vmw_master_set(struct drm_device *dev,
 			  struct drm_file *file_priv,
 			  bool from_open)
@@ -1148,27 +1209,13 @@
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret = 0;
 
-	if (!dev_priv->enable_fb) {
-		ret = vmw_3d_resource_inc(dev_priv, true);
-		if (unlikely(ret != 0))
-			return ret;
-		vmw_kms_save_vga(dev_priv);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-	}
-
 	if (active) {
 		BUG_ON(active != &dev_priv->fbdev_master);
 		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
 		if (unlikely(ret != 0))
-			goto out_no_active_lock;
+			return ret;
 
 		ttm_lock_set_kill(&active->lock, true, SIGTERM);
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Unable to clean VRAM on "
-				  "master drop.\n");
-		}
-
 		dev_priv->active_master = NULL;
 	}
 
@@ -1182,14 +1229,6 @@
 	dev_priv->active_master = vmaster;
 
 	return 0;
-
-out_no_active_lock:
-	if (!dev_priv->enable_fb) {
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-	}
-	return ret;
 }
 
 static void vmw_master_drop(struct drm_device *dev,
@@ -1214,16 +1253,9 @@
 	}
 
 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-	vmw_execbuf_release_pinned_bo(dev_priv);
 
-	if (!dev_priv->enable_fb) {
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0))
-			DRM_ERROR("Unable to clean VRAM on master drop.\n");
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-	}
+	if (!dev_priv->enable_fb)
+		vmw_svga_disable(dev_priv);
 
 	dev_priv->active_master = &dev_priv->fbdev_master;
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1265,76 @@
 		vmw_fb_on(dev_priv);
 }
 
+/**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+static void __vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+void vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	ttm_read_lock(&dev_priv->reservation_sem, false);
+	__vmw_svga_enable(dev_priv);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+static void __vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+void vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	ttm_write_lock(&dev_priv->reservation_sem, false);
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		spin_unlock(&dev_priv->svga_lock);
+		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+			DRM_ERROR("Failed evicting VRAM buffers.\n");
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	} else
+		spin_unlock(&dev_priv->svga_lock);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+}
 
 static void vmw_remove(struct pci_dev *pdev)
 {
@@ -1250,23 +1352,26 @@
 
 	switch (val) {
 	case PM_HIBERNATION_PREPARE:
-	case PM_SUSPEND_PREPARE:
+		if (dev_priv->enable_fb)
+			vmw_fb_off(dev_priv);
 		ttm_suspend_lock(&dev_priv->reservation_sem);
 
-		/**
+		/*
 		 * This empties VRAM and unbinds all GMR bindings.
 		 * Buffer contents is moved to swappable memory.
 		 */
 		vmw_execbuf_release_pinned_bo(dev_priv);
 		vmw_resource_evict_all(dev_priv);
+		vmw_release_device_early(dev_priv);
 		ttm_bo_swapout_all(&dev_priv->bdev);
-
+		vmw_fence_fifo_down(dev_priv->fman);
 		break;
 	case PM_POST_HIBERNATION:
-	case PM_POST_SUSPEND:
 	case PM_POST_RESTORE:
+		vmw_fence_fifo_up(dev_priv->fman);
 		ttm_suspend_unlock(&dev_priv->reservation_sem);
-
+		if (dev_priv->enable_fb)
+			vmw_fb_on(dev_priv);
 		break;
 	case PM_RESTORE_PREPARE:
 		break;
@@ -1276,20 +1381,13 @@
 	return 0;
 }
 
-/**
- * These might not be needed with the virtual SVGA device.
- */
-
 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 
-	if (dev_priv->num_3d_resources != 0) {
-		DRM_INFO("Can't suspend or hibernate "
-			 "while 3D resources are active.\n");
+	if (dev_priv->refuse_hibernation)
 		return -EBUSY;
-	}
 
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
@@ -1321,56 +1419,62 @@
 	return vmw_pci_resume(pdev);
 }
 
-static int vmw_pm_prepare(struct device *kdev)
+static int vmw_pm_freeze(struct device *kdev)
 {
 	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 
-	/**
-	 * Release 3d reference held by fbdev and potentially
-	 * stop fifo.
-	 */
 	dev_priv->suspended = true;
 	if (dev_priv->enable_fb)
-			vmw_3d_resource_dec(dev_priv, true);
+		vmw_fifo_resource_dec(dev_priv);
 
-	if (dev_priv->num_3d_resources != 0) {
-
-		DRM_INFO("Can't suspend or hibernate "
-			 "while 3D resources are active.\n");
-
+	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
 		if (dev_priv->enable_fb)
-			vmw_3d_resource_inc(dev_priv, true);
+			vmw_fifo_resource_inc(dev_priv);
+		WARN_ON(vmw_request_device_late(dev_priv));
 		dev_priv->suspended = false;
 		return -EBUSY;
 	}
 
+	if (dev_priv->enable_fb)
+		__vmw_svga_disable(dev_priv);
+	
+	vmw_release_device_late(dev_priv);
+
 	return 0;
 }
 
-static void vmw_pm_complete(struct device *kdev)
+static int vmw_pm_restore(struct device *kdev)
 {
 	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
+	int ret;
 
 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 	(void) vmw_read(dev_priv, SVGA_REG_ID);
 
-	/**
-	 * Reclaim 3d reference held by fbdev and potentially
-	 * start fifo.
-	 */
 	if (dev_priv->enable_fb)
-			vmw_3d_resource_inc(dev_priv, false);
+		vmw_fifo_resource_inc(dev_priv);
+
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		return ret;
+
+	if (dev_priv->enable_fb)
+		__vmw_svga_enable(dev_priv);
 
 	dev_priv->suspended = false;
+
+	return 0;
 }
 
 static const struct dev_pm_ops vmw_pm_ops = {
-	.prepare = vmw_pm_prepare,
-	.complete = vmw_pm_complete,
+	.freeze = vmw_pm_freeze,
+	.thaw = vmw_pm_restore,
+	.restore = vmw_pm_restore,
 	.suspend = vmw_pm_suspend,
 	.resume = vmw_pm_resume,
 };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 0336d49..6d02de6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,17 +40,17 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20140704"
+#define VMWGFX_DRIVER_DATE "20150810"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
 #define VMWGFX_MAX_VALIDATIONS 2048
 #define VMWGFX_MAX_DISPLAYS 16
 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
 
 /*
  * Perhaps we should have sysfs entries for these.
@@ -59,6 +59,8 @@
 #define VMWGFX_NUM_GB_SHADER 20000
 #define VMWGFX_NUM_GB_SURFACE 32768
 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_DXCONTEXT 256
+#define VMWGFX_NUM_DXQUERY 512
 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
 			VMWGFX_NUM_GB_SHADER +\
 			VMWGFX_NUM_GB_SURFACE +\
@@ -85,6 +87,9 @@
 struct vmw_dma_buffer {
 	struct ttm_buffer_object base;
 	struct list_head res_list;
+	s32 pin_count;
+	/* Not ref-counted.  Protected by binding_mutex */
+	struct vmw_resource *dx_query_ctx;
 };
 
 /**
@@ -113,6 +118,7 @@
 	bool backup_dirty; /* Protected by backup buffer reserved */
 	struct vmw_dma_buffer *backup;
 	unsigned long backup_offset;
+	unsigned long pin_count; /* Protected by resource reserved */
 	const struct vmw_res_func *func;
 	struct list_head lru_head; /* Protected by the resource lock */
 	struct list_head mob_head; /* Protected by @backup reserved */
@@ -130,6 +136,9 @@
 	vmw_res_surface,
 	vmw_res_stream,
 	vmw_res_shader,
+	vmw_res_dx_context,
+	vmw_res_cotable,
+	vmw_res_view,
 	vmw_res_max
 };
 
@@ -137,7 +146,8 @@
  * Resources that are managed using command streams.
  */
 enum vmw_cmdbuf_res_type {
-	vmw_cmdbuf_res_compat_shader
+	vmw_cmdbuf_res_shader,
+	vmw_cmdbuf_res_view
 };
 
 struct vmw_cmdbuf_res_manager;
@@ -160,11 +170,13 @@
 	struct drm_vmw_size *sizes;
 	uint32_t num_sizes;
 	bool scanout;
+	uint32_t array_size;
 	/* TODO so far just a extra pointer */
 	struct vmw_cursor_snooper snooper;
 	struct vmw_surface_offset *offsets;
 	SVGA3dTextureFilter autogen_filter;
 	uint32_t multisample_count;
+	struct list_head view_list;
 };
 
 struct vmw_marker_queue {
@@ -176,14 +188,15 @@
 
 struct vmw_fifo_state {
 	unsigned long reserved_size;
-	__le32 *dynamic_buffer;
-	__le32 *static_buffer;
+	u32 *dynamic_buffer;
+	u32 *static_buffer;
 	unsigned long static_buffer_size;
 	bool using_bounce_buffer;
 	uint32_t capabilities;
 	struct mutex fifo_mutex;
 	struct rw_semaphore rwsem;
 	struct vmw_marker_queue marker_queue;
+	bool dx;
 };
 
 struct vmw_relocation {
@@ -264,70 +277,15 @@
 };
 
 /*
- * enum vmw_ctx_binding_type - abstract resource to context binding types
+ * enum vmw_display_unit_type - Describes the display unit
  */
-enum vmw_ctx_binding_type {
-	vmw_ctx_binding_shader,
-	vmw_ctx_binding_rt,
-	vmw_ctx_binding_tex,
-	vmw_ctx_binding_max
+enum vmw_display_unit_type {
+	vmw_du_invalid = 0,
+	vmw_du_legacy,
+	vmw_du_screen_object,
+	vmw_du_screen_target
 };
 
-/**
- * struct vmw_ctx_bindinfo - structure representing a single context binding
- *
- * @ctx: Pointer to the context structure. NULL means the binding is not
- * active.
- * @res: Non ref-counted pointer to the bound resource.
- * @bt: The binding type.
- * @i1: Union of information needed to unbind.
- */
-struct vmw_ctx_bindinfo {
-	struct vmw_resource *ctx;
-	struct vmw_resource *res;
-	enum vmw_ctx_binding_type bt;
-	bool scrubbed;
-	union {
-		SVGA3dShaderType shader_type;
-		SVGA3dRenderTargetType rt_type;
-		uint32 texture_stage;
-	} i1;
-};
-
-/**
- * struct vmw_ctx_binding - structure representing a single context binding
- *                        - suitable for tracking in a context
- *
- * @ctx_list: List head for context.
- * @res_list: List head for bound resource.
- * @bi: Binding info
- */
-struct vmw_ctx_binding {
-	struct list_head ctx_list;
-	struct list_head res_list;
-	struct vmw_ctx_bindinfo bi;
-};
-
-
-/**
- * struct vmw_ctx_binding_state - context binding state
- *
- * @list: linked list of individual bindings.
- * @render_targets: Render target bindings.
- * @texture_units: Texture units/samplers bindings.
- * @shaders: Shader bindings.
- *
- * Note that this structure also provides storage space for the individual
- * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
- * for individual bindings.
- *
- */
-struct vmw_ctx_binding_state {
-	struct list_head list;
-	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
-	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
-	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
-};
 
 struct vmw_sw_context{
 	struct drm_open_hash res_ht;
@@ -342,15 +300,21 @@
 	uint32_t *cmd_bounce;
 	uint32_t cmd_bounce_size;
 	struct list_head resource_list;
-	struct ttm_buffer_object *cur_query_bo;
+	struct list_head ctx_resource_list; /* For contexts and cotables */
+	struct vmw_dma_buffer *cur_query_bo;
 	struct list_head res_relocations;
 	uint32_t *buf_start;
 	struct vmw_res_cache_entry res_cache[vmw_res_max];
 	struct vmw_resource *last_query_ctx;
 	bool needs_post_query_barrier;
 	struct vmw_resource *error_resource;
-	struct vmw_ctx_binding_state staged_bindings;
+	struct vmw_ctx_binding_state *staged_bindings;
+	bool staged_bindings_inuse;
 	struct list_head staged_cmd_res;
+	struct vmw_resource_val_node *dx_ctx_node;
+	struct vmw_dma_buffer *dx_query_mob;
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_cmdbuf_res_manager *man;
 };
 
 struct vmw_legacy_display;
@@ -358,8 +322,6 @@
 
 struct vmw_master {
 	struct ttm_lock lock;
-	struct mutex fb_surf_mutex;
-	struct list_head fb_surf;
 };
 
 struct vmw_vga_topology_state {
@@ -370,6 +332,26 @@
 	uint32_t pos_y;
 };
 
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+	unsigned long size;
+	struct vmw_mob *page_table;
+	bool enabled;
+};
+
+struct vmw_otable_batch {
+	unsigned num_otables;
+	struct vmw_otable *otables;
+	struct vmw_resource *context;
+	struct ttm_buffer_object *otable_bo;
+};
+
 struct vmw_private {
 	struct ttm_bo_device bdev;
 	struct ttm_bo_global_ref bo_global_ref;
@@ -387,9 +369,13 @@
 	uint32_t mmio_size;
 	uint32_t fb_max_width;
 	uint32_t fb_max_height;
+	uint32_t texture_max_width;
+	uint32_t texture_max_height;
+	uint32_t stdu_max_width;
+	uint32_t stdu_max_height;
 	uint32_t initial_width;
 	uint32_t initial_height;
-	__le32 __iomem *mmio_virt;
+	u32 __iomem *mmio_virt;
 	int mmio_mtrr;
 	uint32_t capabilities;
 	uint32_t max_gmr_ids;
@@ -401,6 +387,7 @@
 	bool has_mob;
 	spinlock_t hw_lock;
 	spinlock_t cap_lock;
+	bool has_dx;
 
 	/*
 	 * VGA registers.
@@ -420,6 +407,7 @@
 	 */
 
 	void *fb_info;
+	enum vmw_display_unit_type active_display_unit;
 	struct vmw_legacy_display *ldu_priv;
 	struct vmw_screen_object_display *sou_priv;
 	struct vmw_overlay *overlay_priv;
@@ -453,6 +441,8 @@
 	spinlock_t waiter_lock;
 	int fence_queue_waiters; /* Protected by waiter_lock */
 	int goal_queue_waiters; /* Protected by waiter_lock */
+	int cmdbuf_waiters; /* Protected by irq_lock */
+	int error_waiters; /* Protected by irq_lock */
 	atomic_t fifo_queue_waiters;
 	uint32_t last_read_seqno;
 	spinlock_t irq_lock;
@@ -484,6 +474,7 @@
 
 	bool stealth;
 	bool enable_fb;
+	spinlock_t svga_lock;
 
 	/**
 	 * Master management.
@@ -493,9 +484,10 @@
 	struct vmw_master fbdev_master;
 	struct notifier_block pm_nb;
 	bool suspended;
+	bool refuse_hibernation;
 
 	struct mutex release_mutex;
-	uint32_t num_3d_resources;
+	atomic_t num_fifo_resources;
 
 	/*
 	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
@@ -507,8 +499,8 @@
 	 * are protected by the cmdbuf mutex.
 	 */
 
-	struct ttm_buffer_object *dummy_query_bo;
-	struct ttm_buffer_object *pinned_bo;
+	struct vmw_dma_buffer *dummy_query_bo;
+	struct vmw_dma_buffer *pinned_bo;
 	uint32_t query_cid;
 	uint32_t query_cid_valid;
 	bool dummy_query_bo_pinned;
@@ -531,8 +523,9 @@
 	/*
 	 * Guest Backed stuff
 	 */
-	struct ttm_buffer_object *otable_bo;
-	struct vmw_otable *otables;
+	struct vmw_otable_batch otable_batch;
+
+	struct vmw_cmdbuf_man *cman;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -587,8 +580,9 @@
 	return val;
 }
 
-int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
+extern void vmw_svga_enable(struct vmw_private *dev_priv);
+extern void vmw_svga_disable(struct vmw_private *dev_priv);
+
 
 /**
  * GMR utilities - vmwgfx_gmr.c
@@ -610,7 +604,8 @@
 extern struct vmw_resource *
 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
-extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+				bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 				  struct ttm_object_file *tfile,
@@ -660,10 +655,14 @@
 				  uint32_t *inout_id,
 				  struct vmw_resource **out);
 extern void vmw_resource_unreserve(struct vmw_resource *res,
+				   bool switch_backup,
 				   struct vmw_dma_buffer *new_backup,
 				   unsigned long new_backup_offset);
 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
 				     struct ttm_mem_reg *mem);
+extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *mem);
+extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
 				struct vmw_fence_obj *fence);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@@ -671,25 +670,25 @@
 /**
  * DMA buffer helper routines - vmwgfx_dmabuf.c
  */
-extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
-				   struct vmw_dma_buffer *bo,
-				   struct ttm_placement *placement,
-				   bool interruptible);
-extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
-			      struct vmw_dma_buffer *buf,
-			      bool pin, bool interruptible);
-extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
-				     struct vmw_dma_buffer *buf,
-				     bool pin, bool interruptible);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
 				       struct vmw_dma_buffer *bo,
-				       bool pin, bool interruptible);
+				       struct ttm_placement *placement,
+				       bool interruptible);
+extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible);
+extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+					 struct vmw_dma_buffer *buf,
+					 bool interruptible);
+extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+					   struct vmw_dma_buffer *bo,
+					   bool interruptible);
 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
 			    struct vmw_dma_buffer *bo,
 			    bool interruptible);
 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
 				 SVGAGuestPtr *ptr);
-extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
+extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -717,7 +716,10 @@
 extern void vmw_fifo_release(struct vmw_private *dev_priv,
 			     struct vmw_fifo_state *fifo);
 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void *
+vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
 			       uint32_t *seqno);
 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -726,6 +728,8 @@
 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 				     uint32_t cid);
+extern int vmw_fifo_flush(struct vmw_private *dev_priv,
+			  bool interruptible);
 
 /**
  * TTM glue - vmwgfx_ttm_glue.c
@@ -750,6 +754,7 @@
 extern struct ttm_placement vmw_evictable_placement;
 extern struct ttm_placement vmw_srf_placement;
 extern struct ttm_placement vmw_mob_placement;
+extern struct ttm_placement vmw_mob_ne_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -800,14 +805,15 @@
  * Command submission - vmwgfx_execbuf.c
  */
 
-extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+			     struct drm_file *file_priv, size_t size);
 extern int vmw_execbuf_process(struct drm_file *file_priv,
 			       struct vmw_private *dev_priv,
 			       void __user *user_commands,
 			       void *kernel_commands,
 			       uint32_t command_size,
 			       uint64_t throttle_us,
+			       uint32_t dx_context_handle,
 			       struct drm_vmw_fence_rep __user
 			       *user_fence_rep,
 			       struct vmw_fence_obj **out_fence);
@@ -826,6 +832,11 @@
 					*user_fence_rep,
 					struct vmw_fence_obj *fence,
 					uint32_t fence_handle);
+extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+				      struct ttm_buffer_object *bo,
+				      bool interruptible,
+				      bool validate_as_mob);
+
 
 /**
  * IRQs and wating - vmwgfx_irq.c
@@ -833,8 +844,8 @@
 
 extern irqreturn_t vmw_irq_handler(int irq, void *arg);
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
-			     uint32_t seqno, bool interruptible,
-			     unsigned long timeout);
+			  uint32_t seqno, bool interruptible,
+			  unsigned long timeout);
 extern void vmw_irq_preinstall(struct drm_device *dev);
 extern int vmw_irq_postinstall(struct drm_device *dev);
 extern void vmw_irq_uninstall(struct drm_device *dev);
@@ -852,6 +863,10 @@
 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+				   int *waiter_count);
+extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+				      u32 flag, int *waiter_count);
 
 /**
  * Rudimentary fence-like objects currently used only for throttling -
@@ -861,9 +876,9 @@
 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
 extern int vmw_marker_push(struct vmw_marker_queue *queue,
-			  uint32_t seqno);
+			   uint32_t seqno);
 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
-			  uint32_t signaled_seqno);
+			   uint32_t signaled_seqno);
 extern int vmw_wait_lag(struct vmw_private *dev_priv,
 			struct vmw_marker_queue *queue, uint32_t us);
 
@@ -908,12 +923,6 @@
 		    uint32_t sid, int32_t destX, int32_t destY,
 		    struct drm_vmw_rect *clips,
 		    uint32_t num_clips);
-int vmw_kms_readback(struct vmw_private *dev_priv,
-		     struct drm_file *file_priv,
-		     struct vmw_framebuffer *vfb,
-		     struct drm_vmw_fence_rep __user *user_fence_rep,
-		     struct drm_vmw_rect *clips,
-		     uint32_t num_clips);
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 
@@ -927,6 +936,10 @@
 int vmw_dumb_destroy(struct drm_file *file_priv,
 		     struct drm_device *dev,
 		     uint32_t handle);
+extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
+extern void vmw_resource_unpin(struct vmw_resource *res);
+extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+
 /**
  * Overlay control - vmwgfx_overlay.c
  */
@@ -982,27 +995,33 @@
 
 extern const struct vmw_user_resource_conv *user_context_converter;
 
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
 extern int vmw_context_check(struct vmw_private *dev_priv,
 			     struct ttm_object_file *tfile,
 			     int id,
 			     struct vmw_resource **p_res);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
+extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+					     struct drm_file *file_priv);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
-extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-				   const struct vmw_ctx_bindinfo *ci);
-extern void
-vmw_context_binding_state_transfer(struct vmw_resource *res,
-				   struct vmw_ctx_binding_state *cbs);
-extern void vmw_context_binding_res_list_kill(struct list_head *head);
-extern void vmw_context_binding_res_list_scrub(struct list_head *head);
-extern int vmw_context_rebind_all(struct vmw_resource *ctx);
 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 extern struct vmw_cmdbuf_res_manager *
 vmw_context_res_man(struct vmw_resource *ctx);
+extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+						SVGACOTableType cotable_type);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+struct vmw_ctx_binding_state;
+extern struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx);
+extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+					  bool readback);
+extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+				     struct vmw_dma_buffer *mob);
+extern struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
+
+
 /*
  * Surface management - vmwgfx_surface.c
  */
@@ -1025,6 +1044,16 @@
 			     uint32_t handle, int *id);
 extern int vmw_surface_validate(struct vmw_private *dev_priv,
 				struct vmw_surface *srf);
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       uint32_t svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       struct vmw_surface **srf_out);
 
 /*
  * Shader management - vmwgfx_shader.c
@@ -1042,12 +1071,21 @@
 				 SVGA3dShaderType shader_type,
 				 size_t size,
 				 struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-				    u32 user_key, SVGA3dShaderType shader_type,
-				    struct list_head *list);
+extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+			     u32 user_key, SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+			     struct vmw_resource *ctx,
+			     u32 user_key,
+			     SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+					     struct list_head *list,
+					     bool readback);
+
 extern struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-			 u32 user_key, SVGA3dShaderType shader_type);
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key, SVGA3dShaderType shader_type);
 
 /*
  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1071,7 +1109,48 @@
 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
 				 enum vmw_cmdbuf_res_type res_type,
 				 u32 user_key,
-				 struct list_head *list);
+				 struct list_head *list,
+				 struct vmw_resource **res);
+
+/*
+ * COTable management - vmwgfx_cotable.c
+ */
+extern const SVGACOTableType vmw_cotable_scrub_order[];
+extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+					      struct vmw_resource *ctx,
+					      u32 type);
+extern int vmw_cotable_notify(struct vmw_resource *res, int id);
+extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
+extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
+				     struct list_head *head);
+
+/*
+ * Command buffer managerment vmwgfx_cmdbuf.c
+ */
+struct vmw_cmdbuf_man;
+struct vmw_cmdbuf_header;
+
+extern struct vmw_cmdbuf_man *
+vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+				    size_t size, size_t default_size);
+extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
+extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
+extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+			   unsigned long timeout);
+extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+				int ctx_id, bool interruptible,
+				struct vmw_cmdbuf_header *header);
+extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+			      struct vmw_cmdbuf_header *header,
+			      bool flush);
+extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
+extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+			      size_t size, bool interruptible,
+			      struct vmw_cmdbuf_header **p_header);
+extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
+extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+				bool interruptible);
 
 
 /**
@@ -1116,4 +1195,14 @@
 {
 	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
 }
+
+static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
+{
+	atomic_inc(&dev_priv->num_fifo_resources);
+}
+
+static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
+{
+	atomic_dec(&dev_priv->num_fifo_resources);
+}
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 97ad3bc..b565654 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
 #include "vmwgfx_reg.h"
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_HT_ORDER 12
 
@@ -59,8 +61,11 @@
  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  * @first_usage: Set to true the first time the resource is referenced in
  * the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
+ * @switching_backup: The command stream provides a new backup buffer for a
+ * resource.
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
+ * reference. So resource reservation does not need to allocate a backup
+ * buffer for the resource.
  */
 struct vmw_resource_val_node {
 	struct list_head head;
@@ -69,8 +74,9 @@
 	struct vmw_dma_buffer *new_backup;
 	struct vmw_ctx_binding_state *staged_bindings;
 	unsigned long new_backup_offset;
-	bool first_usage;
-	bool no_buffer_needed;
+	u32 first_usage : 1;
+	u32 switching_backup : 1;
+	u32 no_buffer_needed : 1;
 };
 
 /**
@@ -92,22 +98,40 @@
 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 				       (_gb_disable), (_gb_enable)}
 
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					struct vmw_resource *ctx);
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGAMobId *id,
+				 struct vmw_dma_buffer **vmw_bo_p);
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+				   struct vmw_dma_buffer *vbo,
+				   bool validate_as_mob,
+				   uint32_t *p_val_node);
+
+
 /**
- * vmw_resource_unreserve - unreserve resources previously reserved for
+ * vmw_resources_unreserve - unreserve resources previously reserved for
  * command submission.
  *
- * @list_head: list of resources to unreserve.
+ * @sw_context: pointer to the software context
  * @backoff: Whether command submission failed.
  */
-static void vmw_resource_list_unreserve(struct list_head *list,
-					bool backoff)
+static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
+				    bool backoff)
 {
 	struct vmw_resource_val_node *val;
+	struct list_head *list = &sw_context->resource_list;
+
+	if (sw_context->dx_query_mob && !backoff)
+		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+					  sw_context->dx_query_mob);
 
 	list_for_each_entry(val, list, head) {
 		struct vmw_resource *res = val->res;
-		struct vmw_dma_buffer *new_backup =
-			backoff ? NULL : val->new_backup;
+		bool switch_backup =
+			(backoff) ? false : val->switching_backup;
 
 		/*
 		 * Transfer staged context bindings to the
@@ -115,18 +139,71 @@
 		 */
 		if (unlikely(val->staged_bindings)) {
 			if (!backoff) {
-				vmw_context_binding_state_transfer
-					(val->res, val->staged_bindings);
+				vmw_binding_state_commit
+					(vmw_context_binding_state(val->res),
+					 val->staged_bindings);
 			}
-			kfree(val->staged_bindings);
+
+			if (val->staged_bindings != sw_context->staged_bindings)
+				vmw_binding_state_free(val->staged_bindings);
+			else
+				sw_context->staged_bindings_inuse = false;
 			val->staged_bindings = NULL;
 		}
-		vmw_resource_unreserve(res, new_backup,
-			val->new_backup_offset);
+		vmw_resource_unreserve(res, switch_backup, val->new_backup,
+				       val->new_backup_offset);
 		vmw_dmabuf_unreference(&val->new_backup);
 	}
 }
 
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
+ * added to the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The validation context:
+ * @node: The validation node holding this context.
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   struct vmw_resource_val_node *node)
+{
+	int ret;
+
+	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	if (!sw_context->staged_bindings) {
+		sw_context->staged_bindings =
+			vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(sw_context->staged_bindings)) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			ret = PTR_ERR(sw_context->staged_bindings);
+			sw_context->staged_bindings = NULL;
+			goto out_err;
+		}
+	}
+
+	if (sw_context->staged_bindings_inuse) {
+		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(node->staged_bindings)) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			ret = PTR_ERR(node->staged_bindings);
+			node->staged_bindings = NULL;
+			goto out_err;
+		}
+	} else {
+		node->staged_bindings = sw_context->staged_bindings;
+		sw_context->staged_bindings_inuse = true;
+	}
+
+	return 0;
+out_err:
+	return ret;
+}
 
 /**
  * vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@
 				struct vmw_resource *res,
 				struct vmw_resource_val_node **p_node)
 {
+	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_resource_val_node *node;
 	struct drm_hash_item *hash;
 	int ret;
@@ -169,14 +247,90 @@
 		kfree(node);
 		return ret;
 	}
-	list_add_tail(&node->head, &sw_context->resource_list);
 	node->res = vmw_resource_reference(res);
 	node->first_usage = true;
-
 	if (unlikely(p_node != NULL))
 		*p_node = node;
 
-	return 0;
+	if (!dev_priv->has_mob) {
+		list_add_tail(&node->head, &sw_context->resource_list);
+		return 0;
+	}
+
+	switch (vmw_res_type(res)) {
+	case vmw_res_context:
+	case vmw_res_dx_context:
+		list_add(&node->head, &sw_context->ctx_resource_list);
+		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
+		break;
+	case vmw_res_cotable:
+		list_add_tail(&node->head, &sw_context->ctx_resource_list);
+		break;
+	default:
+		list_add_tail(&node->head, &sw_context->resource_list);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
+ * to the validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+				struct vmw_resource *view)
+{
+	int ret;
+
+	/*
+	 * First add the resource the view is pointing to, otherwise
+	 * it may be swapped out when the view is validated.
+	 */
+	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+	if (ret)
+		return ret;
+
+	return vmw_resource_val_add(sw_context, view, NULL);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
+ * pointing to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on,
+ * or scheduled for creation on. If there is no DX context set, the function
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ */
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+			       enum vmw_view_type view_type, u32 id)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	view = vmw_view_lookup(sw_context->man, view_type, id);
+	if (IS_ERR(view))
+		return PTR_ERR(view);
+
+	ret = vmw_view_res_val_add(sw_context, view);
+	vmw_resource_unreference(&view);
+
+	return ret;
 }
 
 /**
@@ -195,24 +349,56 @@
 					struct vmw_resource *ctx)
 {
 	struct list_head *binding_list;
-	struct vmw_ctx_binding *entry;
+	struct vmw_ctx_bindinfo *entry;
 	int ret = 0;
 	struct vmw_resource *res;
+	u32 i;
 
+	/* Add all cotables to the validation list. */
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			res = vmw_context_cotable(ctx, i);
+			if (IS_ERR(res))
+				continue;
+
+			ret = vmw_resource_val_add(sw_context, res, NULL);
+			vmw_resource_unreference(&res);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+	}
+
+
+	/* Add all resources bound to the context to the validation list */
 	mutex_lock(&dev_priv->binding_mutex);
 	binding_list = vmw_context_binding_list(ctx);
 
 	list_for_each_entry(entry, binding_list, ctx_list) {
-		res = vmw_resource_reference_unless_doomed(entry->bi.res);
+		/* entry->res is not refcounted */
+		res = vmw_resource_reference_unless_doomed(entry->res);
 		if (unlikely(res == NULL))
 			continue;
 
-		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+		if (vmw_res_type(entry->res) == vmw_res_view)
+			ret = vmw_view_res_val_add(sw_context, entry->res);
+		else
+			ret = vmw_resource_val_add(sw_context, entry->res,
+						   NULL);
 		vmw_resource_unreference(&res);
 		if (unlikely(ret != 0))
 			break;
 	}
 
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		struct vmw_dma_buffer *dx_query_mob;
+
+		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
+		if (dx_query_mob)
+			ret = vmw_bo_to_validate_list(sw_context,
+						      dx_query_mob,
+						      true, NULL);
+	}
+
 	mutex_unlock(&dev_priv->binding_mutex);
 	return ret;
 }
@@ -308,7 +494,7 @@
  * submission is reached.
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-				   struct ttm_buffer_object *bo,
+				   struct vmw_dma_buffer *vbo,
 				   bool validate_as_mob,
 				   uint32_t *p_val_node)
 {
@@ -318,7 +504,7 @@
 	struct drm_hash_item *hash;
 	int ret;
 
-	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
 				    &hash) == 0)) {
 		vval_buf = container_of(hash, struct vmw_validate_buffer,
 					hash);
@@ -336,7 +522,7 @@
 			return -EINVAL;
 		}
 		vval_buf = &sw_context->val_bufs[val_node];
-		vval_buf->hash.key = (unsigned long) bo;
+		vval_buf->hash.key = (unsigned long) vbo;
 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@
 		}
 		++sw_context->cur_val_buf;
 		val_buf = &vval_buf->base;
-		val_buf->bo = ttm_bo_reference(bo);
+		val_buf->bo = ttm_bo_reference(&vbo->base);
 		val_buf->shared = false;
 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 		vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@
 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 {
 	struct vmw_resource_val_node *val;
-	int ret;
+	int ret = 0;
 
 	list_for_each_entry(val, &sw_context->resource_list, head) {
 		struct vmw_resource *res = val->res;
 
-		ret = vmw_resource_reserve(res, val->no_buffer_needed);
+		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
 		if (unlikely(ret != 0))
 			return ret;
 
 		if (res->backup) {
-			struct ttm_buffer_object *bo = &res->backup->base;
+			struct vmw_dma_buffer *vbo = res->backup;
 
 			ret = vmw_bo_to_validate_list
-				(sw_context, bo,
+				(sw_context, vbo,
 				 vmw_resource_needs_backup(res), NULL);
 
 			if (unlikely(ret != 0))
 				return ret;
 		}
 	}
-	return 0;
+
+	if (sw_context->dx_query_mob) {
+		struct vmw_dma_buffer *expected_dx_query_mob;
+
+		expected_dx_query_mob =
+			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
+		if (expected_dx_query_mob &&
+		    expected_dx_query_mob != sw_context->dx_query_mob) {
+			ret = -EINVAL;
+		}
+	}
+
+	return ret;
 }
 
 /**
@@ -409,6 +607,7 @@
 
 	list_for_each_entry(val, &sw_context->resource_list, head) {
 		struct vmw_resource *res = val->res;
+		struct vmw_dma_buffer *backup = res->backup;
 
 		ret = vmw_resource_validate(res);
 		if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@
 				DRM_ERROR("Failed to validate resource.\n");
 			return ret;
 		}
+
+		/* Check if the resource switched backup buffer */
+		if (backup && res->backup && (backup != res->backup)) {
+			struct vmw_dma_buffer *vbo = res->backup;
+
+			ret = vmw_bo_to_validate_list
+				(sw_context, vbo,
+				 vmw_resource_needs_backup(res), NULL);
+			if (ret) {
+				ttm_bo_unreserve(&vbo->base);
+				return ret;
+			}
+		}
 	}
 	return 0;
 }
 
-
 /**
  * vmw_cmd_res_reloc_add - Add a resource to a software context's
  * relocation- and validation lists.
  *
  * @dev_priv: Pointer to a struct vmw_private identifying the device.
  * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
  * @id_loc: Pointer to where the id that needs translation is located.
  * @res: Valid pointer to a struct vmw_resource.
  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@
  */
 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 				 struct vmw_sw_context *sw_context,
-				 enum vmw_res_type res_type,
 				 uint32_t *id_loc,
 				 struct vmw_resource *res,
 				 struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (res_type == vmw_res_context && dev_priv->has_mob &&
-	    node->first_usage) {
-
-		/*
-		 * Put contexts first on the list to be able to exit
-		 * list traversal for contexts early.
-		 */
-		list_del(&node->head);
-		list_add(&node->head, &sw_context->resource_list);
-
-		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
-		if (unlikely(ret != 0))
-			return ret;
-		node->staged_bindings =
-			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
-		if (node->staged_bindings == NULL) {
-			DRM_ERROR("Failed to allocate context binding "
-				  "information.\n");
-			return -ENOMEM;
-		}
-		INIT_LIST_HEAD(&node->staged_bindings->list);
-	}
-
 	if (p_val)
 		*p_val = node;
 
@@ -554,7 +740,7 @@
 	rcache->res = res;
 	rcache->handle = *id_loc;
 
-	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
 				    res, &node);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
@@ -573,6 +759,46 @@
 }
 
 /**
+ * vmw_rebind_dx_query - Rebind DX query associated with the context
+ *
+ * @ctx_res: context the query belongs to
+ *
+ * This function assumes binding_mutex is held.
+ */
+static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
+{
+	struct vmw_private *dev_priv = ctx_res->dev_priv;
+	struct vmw_dma_buffer *dx_query_mob;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindAllQuery body;
+	} *cmd;
+
+
+	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
+
+	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
+
+	if (cmd == NULL) {
+		DRM_ERROR("Failed to rebind queries.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = ctx_res->id;
+	cmd->body.mobid = dx_query_mob->base.mem.start;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
+
+	return 0;
+}
+
+/**
  * vmw_rebind_contexts - Rebind all resources previously bound to
  * referenced contexts.
  *
@@ -589,12 +815,80 @@
 		if (unlikely(!val->staged_bindings))
 			break;
 
-		ret = vmw_context_rebind_all(val->res);
+		ret = vmw_binding_rebind_all
+			(vmw_context_binding_state(val->res));
 		if (unlikely(ret != 0)) {
 			if (ret != -ERESTARTSYS)
 				DRM_ERROR("Failed to rebind context.\n");
 			return ret;
 		}
+
+		ret = vmw_rebind_all_dx_query(val->res);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context
+ * binding state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+				 enum vmw_view_type view_type,
+				 enum vmw_ctx_binding_type binding_type,
+				 uint32 shader_slot,
+				 uint32 view_ids[], u32 num_views,
+				 u32 first_slot)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_cmdbuf_res_manager *man;
+	u32 i;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	man = sw_context->man;
+	for (i = 0; i < num_views; ++i) {
+		struct vmw_ctx_bindinfo_view binding;
+		struct vmw_resource *view = NULL;
+
+		if (view_ids[i] != SVGA3D_INVALID_ID) {
+			view = vmw_view_lookup(man, view_type, view_ids[i]);
+			if (IS_ERR(view)) {
+				DRM_ERROR("View not found.\n");
+				return PTR_ERR(view);
+			}
+
+			ret = vmw_view_res_val_add(sw_context, view);
+			if (ret) {
+				DRM_ERROR("Could not add view to "
+					  "validation list.\n");
+				vmw_resource_unreference(&view);
+				return ret;
+			}
+		}
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = view;
+		binding.bi.bt = binding_type;
+		binding.shader_slot = shader_slot;
+		binding.slot = first_slot + i;
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				shader_slot, binding.slot);
+		if (view)
+			vmw_resource_unreference(&view);
 	}
 
 	return 0;
@@ -638,6 +932,12 @@
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
 
+	if (cmd->body.type >= SVGA3D_RT_MAX) {
+		DRM_ERROR("Illegal render target type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
 				&ctx_node);
@@ -651,13 +951,14 @@
 		return ret;
 
 	if (dev_priv->has_mob) {
-		struct vmw_ctx_bindinfo bi;
+		struct vmw_ctx_bindinfo_view binding;
 
-		bi.ctx = ctx_node->res;
-		bi.res = res_node ? res_node->res : NULL;
-		bi.bt = vmw_ctx_binding_rt;
-		bi.i1.rt_type = cmd->body.type;
-		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = res_node ? res_node->res : NULL;
+		binding.bi.bt = vmw_ctx_binding_rt;
+		binding.slot = cmd->body.type;
+		vmw_binding_add(ctx_node->staged_bindings,
+				&binding.bi, 0, binding.slot);
 	}
 
 	return 0;
@@ -674,16 +975,62 @@
 	int ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				user_surface_converter,
 				&cmd->body.src.sid, NULL);
-	if (unlikely(ret != 0))
+	if (ret)
 		return ret;
+
 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				 user_surface_converter,
 				 &cmd->body.dest.sid, NULL);
 }
 
+static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBufferCopy body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest, NULL);
+}
+
+static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXPredCopyRegion body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.srcSid, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dstSid, NULL);
+}
+
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 				     struct vmw_sw_context *sw_context,
 				     SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@
  * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-				       struct ttm_buffer_object *new_query_bo,
+				       struct vmw_dma_buffer *new_query_bo,
 				       struct vmw_sw_context *sw_context)
 {
 	struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-		if (unlikely(new_query_bo->num_pages > 4)) {
+		if (unlikely(new_query_bo->base.num_pages > 4)) {
 			DRM_ERROR("Query buffer too large.\n");
 			return -EINVAL;
 		}
@@ -833,12 +1180,12 @@
 
 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 		if (dev_priv->pinned_bo) {
-			vmw_bo_pin(dev_priv->pinned_bo, false);
-			ttm_bo_unref(&dev_priv->pinned_bo);
+			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
 		}
 
 		if (!sw_context->needs_post_query_barrier) {
-			vmw_bo_pin(sw_context->cur_query_bo, true);
+			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
 
 			/*
 			 * We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@
 			 * dummy queries in context destroy paths.
 			 */
 
-			vmw_bo_pin(dev_priv->dummy_query_bo, true);
-			dev_priv->dummy_query_bo_pinned = true;
+			if (!dev_priv->dummy_query_bo_pinned) {
+				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
+						    true);
+				dev_priv->dummy_query_bo_pinned = true;
+			}
 
 			BUG_ON(sw_context->last_query_ctx == NULL);
 			dev_priv->query_cid = sw_context->last_query_ctx->id;
 			dev_priv->query_cid_valid = true;
 			dev_priv->pinned_bo =
-				ttm_bo_reference(sw_context->cur_query_bo);
+				vmw_dmabuf_reference(sw_context->cur_query_bo);
 		}
 	}
 }
@@ -882,7 +1232,6 @@
 				 struct vmw_dma_buffer **vmw_bo_p)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	uint32_t handle = *id;
 	struct vmw_relocation *reloc;
 	int ret;
@@ -893,7 +1242,6 @@
 		ret = -EINVAL;
 		goto out_no_reloc;
 	}
-	bo = &vmw_bo->base;
 
 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 		DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@
 	reloc->mob_loc = id;
 	reloc->location = NULL;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -944,7 +1292,6 @@
 				   struct vmw_dma_buffer **vmw_bo_p)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	uint32_t handle = ptr->gmrId;
 	struct vmw_relocation *reloc;
 	int ret;
@@ -955,7 +1302,6 @@
 		ret = -EINVAL;
 		goto out_no_reloc;
 	}
-	bo = &vmw_bo->base;
 
 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 		DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 	reloc->location = ptr;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -980,6 +1326,98 @@
 	return ret;
 }
 
+
+
+/**
+ * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * This function adds the new query into the query COTABLE
+ */
+static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct vmw_dx_define_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineQuery q;
+	} *cmd;
+
+	int    ret;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *cotable_res;
+
+
+	if (ctx_node == NULL) {
+		DRM_ERROR("DX Context not set for query.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+
+	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
+	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+		return -EINVAL;
+
+	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+	vmw_resource_unreference(&cotable_res);
+
+	return ret;
+}
+
+
+
+/**
+ * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * The query bind operation will eventually associate the query ID
+ * with its backing MOB.  In this function, we take the user mode
+ * MOB ID and use vmw_translate_mob_ptr() to translate it to its
+ * kernel mode equivalent.
+ */
+static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_dx_bind_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindQuery q;
+	} *cmd;
+
+	struct vmw_dma_buffer *vmw_bo;
+	int    ret;
+
+
+	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+
+	/*
+	 * Look up the buffer pointed to by q.mobid, put it on the relocation
+	 * list so its kernel mode MOB ID can be filled in later
+	 */
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+				    &vmw_bo);
+
+	if (ret != 0)
+		return ret;
+
+	sw_context->dx_query_mob = vmw_bo;
+	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+
+	return ret;
+}
+
+
+
 /**
  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
  *
@@ -1074,7 +1512,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
@@ -1128,7 +1566,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
@@ -1363,6 +1801,12 @@
 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
 			continue;
 
+		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+			DRM_ERROR("Illegal texture/sampler unit %u.\n",
+				  (unsigned) cur_state->stage);
+			return -EINVAL;
+		}
+
 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 					user_surface_converter,
 					&cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@
 			return ret;
 
 		if (dev_priv->has_mob) {
-			struct vmw_ctx_bindinfo bi;
+			struct vmw_ctx_bindinfo_tex binding;
 
-			bi.ctx = ctx_node->res;
-			bi.res = res_node ? res_node->res : NULL;
-			bi.bt = vmw_ctx_binding_tex;
-			bi.i1.texture_stage = cur_state->stage;
-			vmw_context_binding_add(ctx_node->staged_bindings,
-						&bi);
+			binding.bi.ctx = ctx_node->res;
+			binding.bi.res = res_node ? res_node->res : NULL;
+			binding.bi.bt = vmw_ctx_binding_tex;
+			binding.texture_stage = cur_state->stage;
+			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+					0, binding.texture_stage);
 		}
 	}
 
@@ -1407,6 +1851,47 @@
 	return ret;
 }
 
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     struct vmw_resource_val_node *val_node,
+				     uint32_t *buf_id,
+				     unsigned long backup_offset)
+{
+	struct vmw_dma_buffer *dma_buf;
+	int ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+	if (ret)
+		return ret;
+
+	val_node->switching_backup = true;
+	if (val_node->first_usage)
+		val_node->no_buffer_needed = true;
+
+	vmw_dmabuf_unreference(&val_node->new_backup);
+	val_node->new_backup = dma_buf;
+	val_node->new_backup_offset = backup_offset;
+
+	return 0;
+}
+
+
 /**
  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
  *
@@ -1420,7 +1905,8 @@
  * @backup_offset: Offset of backup into MOB.
  *
  * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
  */
 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
 				 struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@
 				 uint32_t *buf_id,
 				 unsigned long backup_offset)
 {
-	int ret;
-	struct vmw_dma_buffer *dma_buf;
 	struct vmw_resource_val_node *val_node;
+	int ret;
 
 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
 				converter, res_id, &val_node);
-	if (unlikely(ret != 0))
+	if (ret)
 		return ret;
 
-	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
-	if (unlikely(ret != 0))
-		return ret;
-
-	if (val_node->first_usage)
-		val_node->no_buffer_needed = true;
-
-	vmw_dmabuf_unreference(&val_node->new_backup);
-	val_node->new_backup = dma_buf;
-	val_node->new_backup_offset = backup_offset;
-
-	return 0;
+	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+					 buf_id, backup_offset);
 }
 
 /**
@@ -1703,10 +2178,10 @@
 	if (unlikely(!dev_priv->has_mob))
 		return 0;
 
-	ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
-				       cmd->body.shid,
-				       cmd->body.type,
-				       &sw_context->staged_cmd_res);
+	ret = vmw_shader_remove(vmw_context_res_man(val->res),
+				cmd->body.shid,
+				cmd->body.type,
+				&sw_context->staged_cmd_res);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1734,13 +2209,19 @@
 		SVGA3dCmdSetShader body;
 	} *cmd;
 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
-	struct vmw_ctx_bindinfo bi;
+	struct vmw_ctx_bindinfo_shader binding;
 	struct vmw_resource *res = NULL;
 	int ret;
 
 	cmd = container_of(header, struct vmw_set_shader_cmd,
 			   header);
 
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+		DRM_ERROR("Illegal shader type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
 				&ctx_node);
@@ -1751,14 +2232,12 @@
 		return 0;
 
 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
-		res = vmw_compat_shader_lookup
-			(vmw_context_res_man(ctx_node->res),
-			 cmd->body.shid,
-			 cmd->body.type);
+		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+					cmd->body.shid,
+					cmd->body.type);
 
 		if (!IS_ERR(res)) {
 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
-						    vmw_res_shader,
 						    &cmd->body.shid, res,
 						    &res_node);
 			vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@
 			return ret;
 	}
 
-	bi.ctx = ctx_node->res;
-	bi.res = res_node ? res_node->res : NULL;
-	bi.bt = vmw_ctx_binding_shader;
-	bi.i1.shader_type = cmd->body.type;
-	return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res_node ? res_node->res : NULL;
+	binding.bi.bt = vmw_ctx_binding_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, 0);
+	return 0;
 }
 
 /**
@@ -1842,6 +2323,690 @@
 				     cmd->body.offsetInBytes);
 }
 
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSingleConstantBuffer body;
+	} *cmd;
+	struct vmw_resource_val_node *res_node = NULL;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_cb binding;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.sid, &res_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res_node ? res_node->res : NULL;
+	binding.bi.bt = vmw_ctx_binding_cb;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	binding.offset = cmd->body.offsetInBytes;
+	binding.size = cmd->body.sizeInBytes;
+	binding.slot = cmd->body.slot;
+
+	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
+			  (unsigned) cmd->body.type,
+			  (unsigned) binding.slot);
+		return -EINVAL;
+	}
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, binding.slot);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_shader_res - Validate an
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShaderResources body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dShaderResourceViewId);
+
+	if ((u64) cmd->body.startView + (u64) num_sr_view >
+	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
+	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+		DRM_ERROR("Invalid shader binding.\n");
+		return -EINVAL;
+	}
+
+	return vmw_view_bindings_add(sw_context, vmw_view_sr,
+				     vmw_ctx_binding_sr,
+				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+				     (void *) &cmd[1], num_sr_view,
+				     cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShader body;
+	} *cmd;
+	struct vmw_resource *res = NULL;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_shader binding;
+	int ret = 0;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+		DRM_ERROR("Illegal shader type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
+	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+		if (IS_ERR(res)) {
+			DRM_ERROR("Could not find shader for binding.\n");
+			return PTR_ERR(res);
+		}
+
+		ret = vmw_resource_val_add(sw_context, res, NULL);
+		if (ret)
+			goto out_unref;
+	}
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_dx_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, 0);
+out_unref:
+	if (res)
+		vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_vb binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+		SVGA3dVertexBuffer buf[];
+	} *cmd;
+	int i, ret, num;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dVertexBuffer);
+	if ((u64)num + (u64)cmd->body.startBuffer >
+	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+		DRM_ERROR("Invalid number of vertex buffers.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cmd->buf[i].sid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.bt = vmw_ctx_binding_vb;
+		binding.bi.res = ((res_node) ? res_node->res : NULL);
+		binding.offset = cmd->buf[i].offset;
+		binding.stride = cmd->buf[i].stride;
+		binding.slot = i + cmd->body.startBuffer;
+
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				0, binding.slot);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_ib binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetIndexBuffer body;
+	} *cmd;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.sid, &res_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = ((res_node) ? res_node->res : NULL);
+	binding.bi.bt = vmw_ctx_binding_ib;
+	binding.offset = cmd->body.offset;
+	binding.format = cmd->body.format;
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate an
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetRenderTargets body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dRenderTargetViewId);
+
+	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+		DRM_ERROR("Invalid DX Rendertarget binding.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
+				    vmw_ctx_binding_ds, 0,
+				    &cmd->body.depthStencilViewId, 1, 0);
+	if (ret)
+		return ret;
+
+	return vmw_view_bindings_add(sw_context, vmw_view_rt,
+				     vmw_ctx_binding_dx_rt, 0,
+				     (void *)&cmd[1], num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearRenderTargetView body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_view_id_val_add(sw_context, vmw_view_rt,
+				   cmd->body.renderTargetViewId);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearDepthStencilView body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_view_id_val_add(sw_context, vmw_view_ds,
+				   cmd->body.depthStencilViewId);
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource_val_node *srf_node;
+	struct vmw_resource *res;
+	enum vmw_view_type view_type;
+	int ret;
+	/*
+	 * This is based on the fact that all affected define commands have
+	 * the same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+		uint32 sid;
+	} *cmd;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	view_type = vmw_view_cmd_to_type(header->id);
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->sid, &srf_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+	vmw_resource_unreference(&res);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_view_add(sw_context->man,
+			    ctx_node->res,
+			    srf_node->res,
+			    view_type,
+			    cmd->defined_id,
+			    header,
+			    header->size + sizeof(*header),
+			    &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_set_so_targets - Validate an
+ * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_so binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+		SVGA3dSoTarget targets[];
+	} *cmd;
+	int i, ret, num;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dSoTarget);
+
+	if (num > SVGA3D_DX_MAX_SOTARGETS) {
+		DRM_ERROR("Invalid DX SO binding.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cmd->targets[i].sid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = ((res_node) ? res_node->res : NULL);
+		binding.bi.bt = vmw_ctx_binding_so,
+		binding.offset = cmd->targets[i].offset;
+		binding.size = cmd->targets[i].sizeInBytes;
+		binding.slot = i;
+
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				0, binding.slot);
+	}
+
+	return 0;
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	/*
+	 * This is based on the fact that all affected define commands have
+	 * the same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+	} *cmd;
+	enum vmw_so_type so_type;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	so_type = vmw_so_cmd_to_type(header->id);
+	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate an
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		union {
+			SVGA3dCmdDXReadbackSubResource r_body;
+			SVGA3dCmdDXInvalidateSubResource i_body;
+			SVGA3dCmdDXUpdateSubResource u_body;
+			SVGA3dSurfaceId sid;
+		};
+	} *cmd;
+
+	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and
+ * schedule the view resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this
+ * command batch, make sure it's validated (present in the device) so that
+ * the remove command will not confuse the device.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_remove(sw_context->man,
+			      cmd->body.view_id, view_type,
+			      &sw_context->staged_cmd_res,
+			      &view);
+	if (ret || !view)
+		return ret;
+
+	/*
+	 * Add view to the validate list iff it was not created using this
+	 * command batch.
+	 */
+	return vmw_view_res_val_add(sw_context, view);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+				    struct vmw_sw_context *sw_context,
+				    SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+	ret = vmw_cotable_notify(res, cmd->body.shaderId);
+	vmw_resource_unreference(&res);
+	if (ret)
+		return ret;
+
+	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+				 cmd->body.shaderId, cmd->body.type,
+				 &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+				&sw_context->staged_cmd_res);
+	if (ret)
+		DRM_ERROR("Could not find shader to remove.\n");
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource_val_node *res_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (cmd->body.cid != SVGA3D_INVALID_ID) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+					user_context_converter,
+					&cmd->body.cid, &ctx_node);
+		if (ret)
+			return ret;
+	} else {
+		ctx_node = sw_context->dx_ctx_node;
+		if (!ctx_node) {
+			DRM_ERROR("DX Context not set.\n");
+			return -EINVAL;
+		}
+	}
+
+	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+				cmd->body.shid, 0);
+	if (IS_ERR(res)) {
+		DRM_ERROR("Could not find shader to bind.\n");
+		return PTR_ERR(res);
+	}
+
+	ret = vmw_resource_val_add(sw_context, res, &res_node);
+	if (ret) {
+		DRM_ERROR("Error creating resource validation node.\n");
+		goto out_unref;
+	}
+
+
+	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+					&cmd->body.mobid,
+					cmd->body.offsetInBytes);
+out_unref:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@
 	uint32_t size_remaining = *size;
 	uint32_t cmd_id;
 
-	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	cmd_id = ((uint32_t *)buf)[0];
 	switch (cmd_id) {
 	case SVGA_CMD_UPDATE:
 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
 		    false, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
 		    false, false, true),
@@ -2051,7 +3216,147 @@
 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
-		    true, false, true)
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+		    false, false, true),
+
+	/*
+	 * DX commands
+	 */
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+		    &vmw_cmd_dx_set_shader_res, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+		    &vmw_cmd_dx_set_index_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+		    &vmw_cmd_dx_set_rendertargets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+		    &vmw_cmd_ok, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+		    &vmw_cmd_dx_define_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+		    &vmw_cmd_dx_destroy_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+		    &vmw_cmd_dx_bind_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
+		    &vmw_cmd_dx_set_so_targets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
+		    &vmw_cmd_buffer_copy_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
+		    &vmw_cmd_pred_copy_check, true, false, true),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@
 	const struct vmw_cmd_entry *entry;
 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
 
-	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	cmd_id = ((uint32_t *)buf)[0];
 	/* Handle any none 3D commands */
 	if (unlikely(cmd_id < SVGA_CMD_MAX))
 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 
 
-	cmd_id = le32_to_cpu(header->id);
-	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+	cmd_id = header->id;
+	*size = header->size + sizeof(SVGA3dCmdHeader);
 
 	cmd_id -= SVGA_3D_CMD_BASE;
 	if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@
  *
  * @list: The resource list.
  */
-static void vmw_resource_list_unreference(struct list_head *list)
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
+					  struct list_head *list)
 {
 	struct vmw_resource_val_node *val, *val_next;
 
@@ -2195,8 +3501,15 @@
 	list_for_each_entry_safe(val, val_next, list, head) {
 		list_del_init(&val->head);
 		vmw_resource_unreference(&val->res);
-		if (unlikely(val->staged_bindings))
-			kfree(val->staged_bindings);
+
+		if (val->staged_bindings) {
+			if (val->staged_bindings != sw_context->staged_bindings)
+				vmw_binding_state_free(val->staged_bindings);
+			else
+				sw_context->staged_bindings_inuse = false;
+			val->staged_bindings = NULL;
+		}
+
 		kfree(val);
 	}
 }
@@ -2222,24 +3535,21 @@
 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
-static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-				      struct ttm_buffer_object *bo,
-				      bool validate_as_mob)
+int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+			       struct ttm_buffer_object *bo,
+			       bool interruptible,
+			       bool validate_as_mob)
 {
+	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
+						  base);
 	int ret;
 
-
-	/*
-	 * Don't validate pinned buffers.
-	 */
-
-	if (bo == dev_priv->pinned_bo ||
-	    (bo == dev_priv->dummy_query_bo &&
-	     dev_priv->dummy_query_bo_pinned))
+	if (vbo->pin_count > 0)
 		return 0;
 
 	if (validate_as_mob)
-		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
+				       false);
 
 	/**
 	 * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@
 	 * used as a GMR, this will return -ENOMEM.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+			      false);
 	if (likely(ret == 0 || ret == -ERESTARTSYS))
 		return ret;
 
@@ -2257,8 +3568,7 @@
 	 * previous contents.
 	 */
 
-	DRM_INFO("Falling through to VRAM.\n");
-	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 	return ret;
 }
 
@@ -2270,6 +3580,7 @@
 
 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+						 true,
 						 entry->validate_as_mob);
 		if (unlikely(ret != 0))
 			return ret;
@@ -2417,7 +3728,164 @@
 	}
 }
 
+/**
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
+ * the fifo.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @kernel_commands: Pointer to the unpatched command batch.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command batch
+ * pointed to by @kernel_commands will have been modified.
+ */
+static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
+				   void *kernel_commands,
+				   u32 command_size,
+				   struct vmw_sw_context *sw_context)
+{
+	void *cmd;
 
+	if (sw_context->dx_ctx_node)
+		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+					  sw_context->dx_ctx_node->res->id);
+	else
+		cmd = vmw_fifo_reserve(dev_priv, command_size);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving fifo space for commands.\n");
+		return -ENOMEM;
+	}
+
+	vmw_apply_relocations(sw_context);
+	memcpy(cmd, kernel_commands, command_size);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_fifo_commit(dev_priv, command_size);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
+ * the command buffer manager.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @header: Opaque handle to the command buffer allocation.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command buffer
+ * represented by @header will have been modified.
+ */
+static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
+				     struct vmw_cmdbuf_header *header,
+				     u32 command_size,
+				     struct vmw_sw_context *sw_context)
+{
+	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+		  SVGA3D_INVALID_ID);
+	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
+				       id, false, header);
+
+	vmw_apply_relocations(sw_context);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
+ * submission using a command buffer.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @user_commands: User-space pointer to the commands to be submitted.
+ * @command_size: Size of the unpatched command batch.
+ * @header: Out parameter returning the opaque pointer to the command buffer.
+ *
+ * This function checks whether we can use the command buffer manager for
+ * submission and if so, creates a command buffer of suitable size and
+ * copies the user data into that buffer.
+ *
+ * On successful return, the function returns a pointer to the data in the
+ * command buffer and *@header is set to non-NULL.
+ * If command buffers could not be used, the function will return the value
+ * of @kernel_commands on function call. That value may be NULL. In that case,
+ * the value of *@header will be set to NULL.
+ * If an error is encountered, the function will return a pointer error value.
+ * If the function is interrupted by a signal while sleeping, it will return
+ * -ERESTARTSYS casted to a pointer error value.
+ */
+static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
+				void __user *user_commands,
+				void *kernel_commands,
+				u32 command_size,
+				struct vmw_cmdbuf_header **header)
+{
+	size_t cmdbuf_size;
+	int ret;
+
+	*header = NULL;
+	if (!dev_priv->cman || kernel_commands)
+		return kernel_commands;
+
+	if (command_size > SVGA_CB_MAX_SIZE) {
+		DRM_ERROR("Command buffer is too large.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* If possible, add a little space for fencing. */
+	cmdbuf_size = command_size + 512;
+	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
+	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
+					   true, header);
+	if (IS_ERR(kernel_commands))
+		return kernel_commands;
+
+	ret = copy_from_user(kernel_commands, user_commands,
+			     command_size);
+	if (ret) {
+		DRM_ERROR("Failed copying commands.\n");
+		vmw_cmdbuf_header_free(*header);
+		*header = NULL;
+		return ERR_PTR(-EFAULT);
+	}
+
+	return kernel_commands;
+}
+
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   uint32_t handle)
+{
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource *res;
+	int ret;
+
+	if (handle == SVGA3D_INVALID_ID)
+		return 0;
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
+					      handle, user_context_converter,
+					      &res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
+			  (unsigned) handle);
+		return ret;
+	}
+
+	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	sw_context->dx_ctx_node = ctx_node;
+	sw_context->man = vmw_context_res_man(res);
+out_err:
+	vmw_resource_unreference(&res);
+	return ret;
+}
 
 int vmw_execbuf_process(struct drm_file *file_priv,
 			struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@
 			void *kernel_commands,
 			uint32_t command_size,
 			uint64_t throttle_us,
+			uint32_t dx_context_handle,
 			struct drm_vmw_fence_rep __user *user_fence_rep,
 			struct vmw_fence_obj **out_fence)
 {
@@ -2432,18 +3901,33 @@
 	struct vmw_fence_obj *fence = NULL;
 	struct vmw_resource *error_resource;
 	struct list_head resource_list;
+	struct vmw_cmdbuf_header *header;
 	struct ww_acquire_ctx ticket;
 	uint32_t handle;
-	void *cmd;
 	int ret;
 
+	if (throttle_us) {
+		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+				   throttle_us);
+
+		if (ret)
+			return ret;
+	}
+
+	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
+					     kernel_commands, command_size,
+					     &header);
+	if (IS_ERR(kernel_commands))
+		return PTR_ERR(kernel_commands);
+
 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
-	if (unlikely(ret != 0))
-		return -ERESTARTSYS;
+	if (ret) {
+		ret = -ERESTARTSYS;
+		goto out_free_header;
+	}
 
+	sw_context->kernel = false;
 	if (kernel_commands == NULL) {
-		sw_context->kernel = false;
-
 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
 		if (unlikely(ret != 0))
 			goto out_unlock;
@@ -2458,19 +3942,26 @@
 			goto out_unlock;
 		}
 		kernel_commands = sw_context->cmd_bounce;
-	} else
+	} else if (!header)
 		sw_context->kernel = true;
 
 	sw_context->fp = vmw_fpriv(file_priv);
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	INIT_LIST_HEAD(&sw_context->resource_list);
+	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
 	sw_context->cur_query_bo = dev_priv->pinned_bo;
 	sw_context->last_query_ctx = NULL;
 	sw_context->needs_post_query_barrier = false;
+	sw_context->dx_ctx_node = NULL;
+	sw_context->dx_query_mob = NULL;
+	sw_context->dx_query_ctx = NULL;
 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
 	INIT_LIST_HEAD(&sw_context->validate_nodes);
 	INIT_LIST_HEAD(&sw_context->res_relocations);
+	if (sw_context->staged_bindings)
+		vmw_binding_state_reset(sw_context->staged_bindings);
+
 	if (!sw_context->res_ht_initialized) {
 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
 		if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@
 		sw_context->res_ht_initialized = true;
 	}
 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
 	INIT_LIST_HEAD(&resource_list);
+	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+	if (unlikely(ret != 0)) {
+		list_splice_init(&sw_context->ctx_resource_list,
+				 &sw_context->resource_list);
+		goto out_err_nores;
+	}
+
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 				command_size);
+	/*
+	 * Merge the resource lists before checking the return status
+	 * from vmd_cmd_check_all so that all the open hashtabs will
+	 * be handled properly even if vmw_cmd_check_all fails.
+	 */
+	list_splice_init(&sw_context->ctx_resource_list,
+			 &sw_context->resource_list);
+
 	if (unlikely(ret != 0))
 		goto out_err_nores;
 
@@ -2502,14 +4007,6 @@
 	if (unlikely(ret != 0))
 		goto out_err;
 
-	if (throttle_us) {
-		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
-				   throttle_us);
-
-		if (unlikely(ret != 0))
-			goto out_err;
-	}
-
 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
 	if (unlikely(ret != 0)) {
 		ret = -ERESTARTSYS;
@@ -2522,21 +4019,17 @@
 			goto out_unlock_binding;
 	}
 
-	cmd = vmw_fifo_reserve(dev_priv, command_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving fifo space for commands.\n");
-		ret = -ENOMEM;
-		goto out_unlock_binding;
+	if (!header) {
+		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
+					      command_size, sw_context);
+	} else {
+		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
+						sw_context);
+		header = NULL;
 	}
-
-	vmw_apply_relocations(sw_context);
-	memcpy(cmd, kernel_commands, command_size);
-
-	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
-	vmw_resource_relocations_free(&sw_context->res_relocations);
-
-	vmw_fifo_commit(dev_priv, command_size);
 	mutex_unlock(&dev_priv->binding_mutex);
+	if (ret)
+		goto out_err;
 
 	vmw_query_bo_switch_commit(dev_priv, sw_context);
 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +4044,7 @@
 	if (ret != 0)
 		DRM_ERROR("Fence submission error. Syncing.\n");
 
-	vmw_resource_list_unreserve(&sw_context->resource_list, false);
+	vmw_resources_unreserve(sw_context, false);
 
 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
 				    (void *) fence);
@@ -2580,7 +4073,7 @@
 	 * Unreference resources outside of the cmdbuf_mutex to
 	 * avoid deadlocks in resource destruction paths.
 	 */
-	vmw_resource_list_unreference(&resource_list);
+	vmw_resource_list_unreference(sw_context, &resource_list);
 
 	return 0;
 
@@ -2589,7 +4082,7 @@
 out_err:
 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
 out_err_nores:
-	vmw_resource_list_unreserve(&sw_context->resource_list, true);
+	vmw_resources_unreserve(sw_context, true);
 	vmw_resource_relocations_free(&sw_context->res_relocations);
 	vmw_free_relocations(sw_context);
 	vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@
 	 * Unreference resources outside of the cmdbuf_mutex to
 	 * avoid deadlocks in resource destruction paths.
 	 */
-	vmw_resource_list_unreference(&resource_list);
+	vmw_resource_list_unreference(sw_context, &resource_list);
 	if (unlikely(error_resource != NULL))
 		vmw_resource_unreference(&error_resource);
+out_free_header:
+	if (header)
+		vmw_cmdbuf_header_free(header);
 
 	return ret;
 }
@@ -2628,9 +4124,11 @@
 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
 
 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
-	vmw_bo_pin(dev_priv->pinned_bo, false);
-	vmw_bo_pin(dev_priv->dummy_query_bo, false);
-	dev_priv->dummy_query_bo_pinned = false;
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
 }
 
 
@@ -2672,11 +4170,11 @@
 
 	INIT_LIST_HEAD(&validate_list);
 
-	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
 	pinned_val.shared = false;
 	list_add_tail(&pinned_val.head, &validate_list);
 
-	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
 	query_val.shared = false;
 	list_add_tail(&query_val.head, &validate_list);
 
@@ -2697,10 +4195,11 @@
 		dev_priv->query_cid_valid = false;
 	}
 
-	vmw_bo_pin(dev_priv->pinned_bo, false);
-	vmw_bo_pin(dev_priv->dummy_query_bo, false);
-	dev_priv->dummy_query_bo_pinned = false;
-
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
 	if (fence == NULL) {
 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
 						  NULL);
@@ -2712,7 +4211,9 @@
 
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
+	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+	DRM_INFO("Dummy query bo pin count: %d\n",
+		 dev_priv->dummy_query_bo->pin_count);
 
 out_unlock:
 	return;
@@ -2722,7 +4223,7 @@
 out_no_reserve:
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
+	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
 }
 
 /**
@@ -2751,36 +4252,68 @@
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+		      struct drm_file *file_priv, size_t size)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+	struct drm_vmw_execbuf_arg arg;
 	int ret;
+	static const size_t copy_offset[] = {
+		offsetof(struct drm_vmw_execbuf_arg, context_handle),
+		sizeof(struct drm_vmw_execbuf_arg)};
+
+	if (unlikely(size < copy_offset[0])) {
+		DRM_ERROR("Invalid command size, ioctl %d\n",
+			  DRM_VMW_EXECBUF);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
+		return -EFAULT;
 
 	/*
-	 * This will allow us to extend the ioctl argument while
+	 * Extend the ioctl argument while
 	 * maintaining backwards compatibility:
 	 * We take different code paths depending on the value of
-	 * arg->version.
+	 * arg.version.
 	 */
 
-	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
+		     arg.version == 0)) {
 		DRM_ERROR("Incorrect execbuf version.\n");
-		DRM_ERROR("You're running outdated experimental "
-			  "vmwgfx user-space drivers.");
 		return -EINVAL;
 	}
 
+	if (arg.version > 1 &&
+	    copy_from_user(&arg.context_handle,
+			   (void __user *) (data + copy_offset[0]),
+			   copy_offset[arg.version - 1] -
+			   copy_offset[0]) != 0)
+		return -EFAULT;
+
+	switch (arg.version) {
+	case 1:
+		arg.context_handle = (uint32_t) -1;
+		break;
+	case 2:
+		if (arg.pad64 != 0) {
+			DRM_ERROR("Unused IOCTL data not set to zero.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		break;
+	}
+
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
 		return ret;
 
 	ret = vmw_execbuf_process(file_priv, dev_priv,
-				  (void __user *)(unsigned long)arg->commands,
-				  NULL, arg->command_size, arg->throttle_us,
-				  (void __user *)(unsigned long)arg->fence_rep,
+				  (void __user *)(unsigned long)arg.commands,
+				  NULL, arg.command_size, arg.throttle_us,
+				  arg.context_handle,
+				  (void __user *)(unsigned long)arg.fence_rep,
 				  NULL);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0a474f3..3b1faf7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -1,7 +1,7 @@
 /**************************************************************************
  *
  * Copyright © 2007 David Airlie
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
 
 #include <drm/ttm/ttm_placement.h>
 
@@ -40,21 +41,22 @@
 
 	void *vmalloc;
 
+	struct mutex bo_mutex;
 	struct vmw_dma_buffer *vmw_bo;
 	struct ttm_bo_kmap_obj map;
+	void *bo_ptr;
+	unsigned bo_size;
+	struct drm_framebuffer *set_fb;
+	struct drm_display_mode *set_mode;
+	u32 fb_x;
+	u32 fb_y;
+	bool bo_iowrite;
 
 	u32 pseudo_palette[17];
 
-	unsigned depth;
-	unsigned bpp;
-
 	unsigned max_width;
 	unsigned max_height;
 
-	void *bo_ptr;
-	unsigned bo_size;
-	bool bo_iowrite;
-
 	struct {
 		spinlock_t lock;
 		bool active;
@@ -63,6 +65,10 @@
 		unsigned x2;
 		unsigned y2;
 	} dirty;
+
+	struct drm_crtc *crtc;
+	struct drm_connector *con;
+	struct delayed_work local_work;
 };
 
 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -77,7 +83,7 @@
 		return 1;
 	}
 
-	switch (par->depth) {
+	switch (par->set_fb->depth) {
 	case 24:
 	case 32:
 		pal[regno] = ((red & 0xff00) << 8) |
@@ -85,7 +91,8 @@
 			     ((blue  & 0xff00) >> 8);
 		break;
 	default:
-		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+		DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
+			  par->set_fb->bits_per_pixel);
 		return 1;
 	}
 
@@ -134,12 +141,6 @@
 		return -EINVAL;
 	}
 
-	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
-	    (var->xoffset != 0 || var->yoffset != 0)) {
-		DRM_ERROR("Can not handle panning without display topology\n");
-		return -EINVAL;
-	}
-
 	if ((var->xoffset + var->xres) > par->max_width ||
 	    (var->yoffset + var->yres) > par->max_height) {
 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
@@ -156,46 +157,6 @@
 	return 0;
 }
 
-static int vmw_fb_set_par(struct fb_info *info)
-{
-	struct vmw_fb_par *par = info->par;
-	struct vmw_private *vmw_priv = par->vmw_priv;
-	int ret;
-
-	info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
-
-	ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
-				 info->fix.line_length,
-				 par->bpp, par->depth);
-	if (ret)
-		return ret;
-
-	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
-		/* TODO check if pitch and offset changes */
-		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-	}
-
-	/* This is really helpful since if this fails the user
-	 * can probably not see anything on the screen.
-	 */
-	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
-
-	return 0;
-}
-
-static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
-			      struct fb_info *info)
-{
-	return 0;
-}
-
 static int vmw_fb_blank(int blank, struct fb_info *info)
 {
 	return 0;
@@ -205,65 +166,89 @@
  * Dirty code
  */
 
-static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+static void vmw_fb_dirty_flush(struct work_struct *work)
 {
+	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
+					      local_work.work);
 	struct vmw_private *vmw_priv = par->vmw_priv;
 	struct fb_info *info = vmw_priv->fb_info;
-	int stride = (info->fix.line_length / 4);
-	int *src = (int *)info->screen_base;
-	__le32 __iomem *vram_mem = par->bo_ptr;
-	unsigned long flags;
-	unsigned x, y, w, h;
-	int i, k;
-	struct {
-		uint32_t header;
-		SVGAFifoCmdUpdate body;
-	} *cmd;
+	unsigned long irq_flags;
+	s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+	u32 cpp, max_x, max_y;
+	struct drm_clip_rect clip;
+	struct drm_framebuffer *cur_fb;
+	u8 *src_ptr, *dst_ptr;
 
 	if (vmw_priv->suspended)
 		return;
 
-	spin_lock_irqsave(&par->dirty.lock, flags);
+	mutex_lock(&par->bo_mutex);
+	cur_fb = par->set_fb;
+	if (!cur_fb)
+		goto out_unlock;
+
+	spin_lock_irqsave(&par->dirty.lock, irq_flags);
 	if (!par->dirty.active) {
-		spin_unlock_irqrestore(&par->dirty.lock, flags);
-		return;
+		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+		goto out_unlock;
 	}
-	x = par->dirty.x1;
-	y = par->dirty.y1;
-	w = min(par->dirty.x2, info->var.xres) - x;
-	h = min(par->dirty.y2, info->var.yres) - y;
+
+	/*
+	 * Handle panning when copying from vmalloc to framebuffer.
+	 * Clip dirty area to framebuffer.
+	 */
+	cpp = (cur_fb->bits_per_pixel + 7) / 8;
+	max_x = par->fb_x + cur_fb->width;
+	max_y = par->fb_y + cur_fb->height;
+
+	dst_x1 = par->dirty.x1 - par->fb_x;
+	dst_y1 = par->dirty.y1 - par->fb_y;
+	dst_x1 = max_t(s32, dst_x1, 0);
+	dst_y1 = max_t(s32, dst_y1, 0);
+
+	dst_x2 = par->dirty.x2 - par->fb_x;
+	dst_y2 = par->dirty.y2 - par->fb_y;
+	dst_x2 = min_t(s32, dst_x2, max_x);
+	dst_y2 = min_t(s32, dst_y2, max_y);
+	w = dst_x2 - dst_x1;
+	h = dst_y2 - dst_y1;
+	w = max_t(s32, 0, w);
+	h = max_t(s32, 0, h);
+
 	par->dirty.x1 = par->dirty.x2 = 0;
 	par->dirty.y1 = par->dirty.y2 = 0;
-	spin_unlock_irqrestore(&par->dirty.lock, flags);
+	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
 
-	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
-		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
-			iowrite32(src[k], vram_mem + k);
+	if (w && h) {
+		dst_ptr = (u8 *)par->bo_ptr  +
+			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
+		src_ptr = (u8 *)par->vmalloc +
+			((dst_y1 + par->fb_y) * info->fix.line_length +
+			 (dst_x1 + par->fb_x) * cpp);
+
+		while (h-- > 0) {
+			memcpy(dst_ptr, src_ptr, w*cpp);
+			dst_ptr += par->set_fb->pitches[0];
+			src_ptr += info->fix.line_length;
+		}
+
+		clip.x1 = dst_x1;
+		clip.x2 = dst_x2;
+		clip.y1 = dst_y1;
+		clip.y2 = dst_y2;
+
+		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
+						       &clip, 1));
+		vmw_fifo_flush(vmw_priv, false);
 	}
-
-#if 0
-	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
-#endif
-
-	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		return;
-	}
-
-	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
-	cmd->body.x = cpu_to_le32(x);
-	cmd->body.y = cpu_to_le32(y);
-	cmd->body.width = cpu_to_le32(w);
-	cmd->body.height = cpu_to_le32(h);
-	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+out_unlock:
+	mutex_unlock(&par->bo_mutex);
 }
 
 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
 			      unsigned x1, unsigned y1,
 			      unsigned width, unsigned height)
 {
-	struct fb_info *info = par->vmw_priv->fb_info;
 	unsigned long flags;
 	unsigned x2 = x1 + width;
 	unsigned y2 = y1 + height;
@@ -277,7 +262,8 @@
 		/* if we are active start the dirty work
 		 * we share the work with the defio system */
 		if (par->dirty.active)
-			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+			schedule_delayed_work(&par->local_work,
+					      VMW_DIRTY_DELAY);
 	} else {
 		if (x1 < par->dirty.x1)
 			par->dirty.x1 = x1;
@@ -291,6 +277,28 @@
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
 }
 
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+
+	if ((var->xoffset + var->xres) > var->xres_virtual ||
+	    (var->yoffset + var->yres) > var->yres_virtual) {
+		DRM_ERROR("Requested panning can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+	if (par->set_fb)
+		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
+				  par->set_fb->height);
+	mutex_unlock(&par->bo_mutex);
+
+	return 0;
+}
+
 static void vmw_deferred_io(struct fb_info *info,
 			    struct list_head *pagelist)
 {
@@ -319,12 +327,17 @@
 		par->dirty.x2 = info->var.xres;
 		par->dirty.y2 = y2;
 		spin_unlock_irqrestore(&par->dirty.lock, flags);
-	}
 
-	vmw_fb_dirty_flush(par);
+		/*
+		 * Since we've already waited on this work once, try to
+		 * execute asap.
+		 */
+		cancel_delayed_work(&par->local_work);
+		schedule_delayed_work(&par->local_work, 0);
+	}
 };
 
-struct fb_deferred_io vmw_defio = {
+static struct fb_deferred_io vmw_defio = {
 	.delay		= VMW_DIRTY_DELAY,
 	.deferred_io	= vmw_deferred_io,
 };
@@ -358,6 +371,256 @@
  * Bring up code
  */
 
+static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+			    size_t size, struct vmw_dma_buffer **out)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	int ret;
+
+	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
+
+	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
+	if (!vmw_bo) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+			      &vmw_sys_placement,
+			      false,
+			      &vmw_dmabuf_bo_free);
+	if (unlikely(ret != 0))
+		goto err_unlock; /* init frees the buffer on failure */
+
+	*out = vmw_bo;
+	ttm_write_unlock(&vmw_priv->reservation_sem);
+
+	return 0;
+
+err_unlock:
+	ttm_write_unlock(&vmw_priv->reservation_sem);
+	return ret;
+}
+
+static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
+				int *depth)
+{
+	switch (var->bits_per_pixel) {
+	case 32:
+		*depth = (var->transp.length > 0) ? 32 : 24;
+		break;
+	default:
+		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vmw_fb_kms_detach(struct vmw_fb_par *par,
+			     bool detach_bo,
+			     bool unref_bo)
+{
+	struct drm_framebuffer *cur_fb = par->set_fb;
+	int ret;
+
+	/* Detach the KMS framebuffer from crtcs */
+	if (par->set_mode) {
+		struct drm_mode_set set;
+
+		set.crtc = par->crtc;
+		set.x = 0;
+		set.y = 0;
+		set.mode = NULL;
+		set.fb = NULL;
+		set.num_connectors = 1;
+		set.connectors = &par->con;
+		ret = drm_mode_set_config_internal(&set);
+		if (ret) {
+			DRM_ERROR("Could not unset a mode.\n");
+			return ret;
+		}
+		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+		par->set_mode = NULL;
+	}
+
+	if (cur_fb) {
+		drm_framebuffer_unreference(cur_fb);
+		par->set_fb = NULL;
+	}
+
+	if (par->vmw_bo && detach_bo) {
+		if (par->bo_ptr) {
+			ttm_bo_kunmap(&par->map);
+			par->bo_ptr = NULL;
+		}
+		if (unref_bo)
+			vmw_dmabuf_unreference(&par->vmw_bo);
+		else
+			vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
+	}
+
+	return 0;
+}
+
+static int vmw_fb_kms_framebuffer(struct fb_info *info)
+{
+	struct drm_mode_fb_cmd mode_cmd;
+	struct vmw_fb_par *par = info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_framebuffer *cur_fb;
+	struct vmw_framebuffer *vfb;
+	int ret = 0;
+	size_t new_bo_size;
+
+	ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+	if (ret)
+		return ret;
+
+	mode_cmd.width = var->xres;
+	mode_cmd.height = var->yres;
+	mode_cmd.bpp = var->bits_per_pixel;
+	mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+
+	cur_fb = par->set_fb;
+	if (cur_fb && cur_fb->width == mode_cmd.width &&
+	    cur_fb->height == mode_cmd.height &&
+	    cur_fb->bits_per_pixel == mode_cmd.bpp &&
+	    cur_fb->depth == mode_cmd.depth &&
+	    cur_fb->pitches[0] == mode_cmd.pitch)
+		return 0;
+
+	/* Need new buffer object ? */
+	new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+	ret = vmw_fb_kms_detach(par,
+				par->bo_size < new_bo_size ||
+				par->bo_size > 2*new_bo_size,
+				true);
+	if (ret)
+		return ret;
+
+	if (!par->vmw_bo) {
+		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
+				       &par->vmw_bo);
+		if (ret) {
+			DRM_ERROR("Failed creating a buffer object for "
+				  "fbdev.\n");
+			return ret;
+		}
+		par->bo_size = new_bo_size;
+	}
+
+	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
+				      true, &mode_cmd);
+	if (IS_ERR(vfb))
+		return PTR_ERR(vfb);
+
+	par->set_fb = &vfb->base;
+
+	if (!par->bo_ptr) {
+		/*
+		 * Pin before mapping. Since we don't know in what placement
+		 * to pin, call into KMS to do it for us.
+		 */
+		ret = vfb->pin(vfb);
+		if (ret) {
+			DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+			return ret;
+		}
+
+		ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+				  par->vmw_bo->base.num_pages, &par->map);
+		if (ret) {
+			vfb->unpin(vfb);
+			DRM_ERROR("Could not map the fbdev framebuffer.\n");
+			return ret;
+		}
+
+		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+	}
+
+	return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+	struct vmw_private *vmw_priv = par->vmw_priv;
+	struct drm_mode_set set;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
+		DRM_MODE_TYPE_DRIVER,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+	};
+	struct drm_display_mode *old_mode;
+	struct drm_display_mode *mode;
+	int ret;
+
+	old_mode = par->set_mode;
+	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+	if (!mode) {
+		DRM_ERROR("Could not create new fb mode.\n");
+		return -ENOMEM;
+	}
+
+	mode->hdisplay = var->xres;
+	mode->vdisplay = var->yres;
+	vmw_guess_mode_timing(mode);
+
+	if (old_mode && drm_mode_equal(old_mode, mode)) {
+		drm_mode_destroy(vmw_priv->dev, mode);
+		mode = old_mode;
+		old_mode = NULL;
+	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
+					       mode->hdisplay *
+					       (var->bits_per_pixel + 7) / 8,
+					       mode->vdisplay)) {
+		drm_mode_destroy(vmw_priv->dev, mode);
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	drm_modeset_lock_all(vmw_priv->dev);
+	ret = vmw_fb_kms_framebuffer(info);
+	if (ret)
+		goto out_unlock;
+
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+
+	set.crtc = par->crtc;
+	set.x = 0;
+	set.y = 0;
+	set.mode = mode;
+	set.fb = par->set_fb;
+	set.num_connectors = 1;
+	set.connectors = &par->con;
+
+	ret = drm_mode_set_config_internal(&set);
+	if (ret)
+		goto out_unlock;
+
+	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+			  par->set_fb->width, par->set_fb->height);
+
+	/* If there already was stuff dirty we wont
+	 * schedule a new work, so lets do it now */
+
+	schedule_delayed_work(&par->local_work, 0);
+
+out_unlock:
+	if (old_mode)
+		drm_mode_destroy(vmw_priv->dev, old_mode);
+	par->set_mode = mode;
+
+	drm_modeset_unlock_all(vmw_priv->dev);
+	mutex_unlock(&par->bo_mutex);
+
+	return ret;
+}
+
+
 static struct fb_ops vmw_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = vmw_fb_check_var,
@@ -370,55 +633,14 @@
 	.fb_blank = vmw_fb_blank,
 };
 
-static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
-			    size_t size, struct vmw_dma_buffer **out)
-{
-	struct vmw_dma_buffer *vmw_bo;
-	struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
-	struct ttm_placement ne_placement;
-	int ret;
-
-	ne_placement.num_placement = 1;
-	ne_placement.placement = &ne_place;
-	ne_placement.num_busy_placement = 1;
-	ne_placement.busy_placement = &ne_place;
-
-	ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
-
-	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
-	if (!vmw_bo) {
-		ret = -ENOMEM;
-		goto err_unlock;
-	}
-
-	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
-			      &ne_placement,
-			      false,
-			      &vmw_dmabuf_bo_free);
-	if (unlikely(ret != 0))
-		goto err_unlock; /* init frees the buffer on failure */
-
-	*out = vmw_bo;
-
-	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
-
-	return 0;
-
-err_unlock:
-	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
-	return ret;
-}
-
 int vmw_fb_init(struct vmw_private *vmw_priv)
 {
 	struct device *device = &vmw_priv->dev->pdev->dev;
 	struct vmw_fb_par *par;
 	struct fb_info *info;
-	unsigned initial_width, initial_height;
 	unsigned fb_width, fb_height;
 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+	struct drm_display_mode *init_mode;
 	int ret;
 
 	fb_bpp = 32;
@@ -428,9 +650,6 @@
 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 
-	initial_width = min(vmw_priv->initial_width, fb_width);
-	initial_height = min(vmw_priv->initial_height, fb_height);
-
 	fb_pitch = fb_width * fb_bpp / 8;
 	fb_size = fb_pitch * fb_height;
 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -444,35 +663,35 @@
 	 */
 	vmw_priv->fb_info = info;
 	par = info->par;
+	memset(par, 0, sizeof(*par));
+	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
 	par->vmw_priv = vmw_priv;
-	par->depth = fb_depth;
-	par->bpp = fb_bpp;
 	par->vmalloc = NULL;
 	par->max_width = fb_width;
 	par->max_height = fb_height;
 
+	drm_modeset_lock_all(vmw_priv->dev);
+	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
+				      par->max_height, &par->con,
+				      &par->crtc, &init_mode);
+	if (ret) {
+		drm_modeset_unlock_all(vmw_priv->dev);
+		goto err_kms;
+	}
+
+	info->var.xres = init_mode->hdisplay;
+	info->var.yres = init_mode->vdisplay;
+	drm_modeset_unlock_all(vmw_priv->dev);
+
 	/*
 	 * Create buffers and alloc memory
 	 */
-	par->vmalloc = vmalloc(fb_size);
+	par->vmalloc = vzalloc(fb_size);
 	if (unlikely(par->vmalloc == NULL)) {
 		ret = -ENOMEM;
 		goto err_free;
 	}
 
-	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
-	if (unlikely(ret != 0))
-		goto err_free;
-
-	ret = ttm_bo_kmap(&par->vmw_bo->base,
-			  0,
-			  par->vmw_bo->base.num_pages,
-			  &par->map);
-	if (unlikely(ret != 0))
-		goto err_unref;
-	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
-	par->bo_size = fb_size;
-
 	/*
 	 * Fixed and var
 	 */
@@ -490,7 +709,7 @@
 	info->fix.smem_len = fb_size;
 
 	info->pseudo_palette = par->pseudo_palette;
-	info->screen_base = par->vmalloc;
+	info->screen_base = (char __iomem *)par->vmalloc;
 	info->screen_size = fb_size;
 
 	info->flags = FBINFO_DEFAULT;
@@ -508,18 +727,14 @@
 
 	info->var.xres_virtual = fb_width;
 	info->var.yres_virtual = fb_height;
-	info->var.bits_per_pixel = par->bpp;
+	info->var.bits_per_pixel = fb_bpp;
 	info->var.xoffset = 0;
 	info->var.yoffset = 0;
 	info->var.activate = FB_ACTIVATE_NOW;
 	info->var.height = -1;
 	info->var.width = -1;
 
-	info->var.xres = initial_width;
-	info->var.yres = initial_height;
-
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
 	info->apertures = alloc_apertures(1);
 	if (!info->apertures) {
 		ret = -ENOMEM;
@@ -535,6 +750,7 @@
 	par->dirty.y1 = par->dirty.y2 = 0;
 	par->dirty.active = true;
 	spin_lock_init(&par->dirty.lock);
+	mutex_init(&par->bo_mutex);
 	info->fbdefio = &vmw_defio;
 	fb_deferred_io_init(info);
 
@@ -542,16 +758,16 @@
 	if (unlikely(ret != 0))
 		goto err_defio;
 
+	vmw_fb_set_par(info);
+
 	return 0;
 
 err_defio:
 	fb_deferred_io_cleanup(info);
 err_aper:
-	ttm_bo_kunmap(&par->map);
-err_unref:
-	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
 err_free:
 	vfree(par->vmalloc);
+err_kms:
 	framebuffer_release(info);
 	vmw_priv->fb_info = NULL;
 
@@ -562,22 +778,19 @@
 {
 	struct fb_info *info;
 	struct vmw_fb_par *par;
-	struct ttm_buffer_object *bo;
 
 	if (!vmw_priv->fb_info)
 		return 0;
 
 	info = vmw_priv->fb_info;
 	par = info->par;
-	bo = &par->vmw_bo->base;
-	par->vmw_bo = NULL;
 
 	/* ??? order */
 	fb_deferred_io_cleanup(info);
+	cancel_delayed_work_sync(&par->local_work);
 	unregister_framebuffer(info);
 
-	ttm_bo_kunmap(&par->map);
-	ttm_bo_unref(&bo);
+	(void) vmw_fb_kms_detach(par, true, true);
 
 	vfree(par->vmalloc);
 	framebuffer_release(info);
@@ -602,11 +815,11 @@
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
 
 	flush_delayed_work(&info->deferred_work);
+	flush_delayed_work(&par->local_work);
 
-	par->bo_ptr = NULL;
-	ttm_bo_kunmap(&par->map);
-
-	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
+	mutex_lock(&par->bo_mutex);
+	(void) vmw_fb_kms_detach(par, true, false);
+	mutex_unlock(&par->bo_mutex);
 
 	return 0;
 }
@@ -616,8 +829,6 @@
 	struct fb_info *info;
 	struct vmw_fb_par *par;
 	unsigned long flags;
-	bool dummy;
-	int ret;
 
 	if (!vmw_priv->fb_info)
 		return -EINVAL;
@@ -625,38 +836,10 @@
 	info = vmw_priv->fb_info;
 	par = info->par;
 
-	/* we are already active */
-	if (par->bo_ptr != NULL)
-		return 0;
-
-	/* Make sure that all overlays are stoped when we take over */
-	vmw_overlay_stop_all(vmw_priv);
-
-	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("could not move buffer to start of VRAM\n");
-		goto err_no_buffer;
-	}
-
-	ret = ttm_bo_kmap(&par->vmw_bo->base,
-			  0,
-			  par->vmw_bo->base.num_pages,
-			  &par->map);
-	BUG_ON(ret != 0);
-	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
-
+	vmw_fb_set_par(info);
 	spin_lock_irqsave(&par->dirty.lock, flags);
 	par->dirty.active = true;
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
-
-err_no_buffer:
-	vmw_fb_set_par(info);
-
-	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
-
-	/* If there already was stuff dirty we wont
-	 * schedule a new work, so lets do it now */
-	schedule_delayed_work(&info->deferred_work, 0);
-
+ 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0..567dded 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,7 +142,7 @@
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct vmw_private *dev_priv = fman->dev_priv;
 
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 		return false;
@@ -386,7 +386,7 @@
 				      u32 passed_seqno)
 {
 	u32 goal_seqno;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 	struct vmw_fence_obj *fence;
 
 	if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@
 {
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	u32 goal_seqno;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 
 	if (fence_is_signaled_locked(&fence->base))
 		return false;
@@ -453,7 +453,7 @@
 	struct list_head action_list;
 	bool needs_rerun;
 	uint32_t seqno, new_seqno;
-	__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
 
 	seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 26a4add..8be6c29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03..80c40c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
 #include <drm/drmP.h>
 #include <drm/ttm/ttm_placement.h>
 
+struct vmw_temp_set_context {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDXTempSetContext body;
+};
+
 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t fifo_min, hwversion;
 	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
@@ -71,8 +76,8 @@
 	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
 		return false;
 
-	/* Non-Screen Object path does not support surfaces */
-	if (!dev_priv->sou_priv)
+	/* Legacy Display Unit does not support surfaces */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return false;
 
 	return true;
@@ -80,7 +85,7 @@
 
 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t caps;
 
 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@
 
 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max;
 	uint32_t min;
-	uint32_t dummy;
 
+	fifo->dx = false;
 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
 	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
 	if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@
 	mutex_init(&fifo->fifo_mutex);
 	init_rwsem(&fifo->rwsem);
 
-	/*
-	 * Allow mapping the first page read-only to user-space.
-	 */
-
 	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
 	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
 	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@
 	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
 	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
 	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
-	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+		  SVGA_REG_ENABLE_HIDE);
+	vmw_write(dev_priv, SVGA_REG_TRACES, 0);
 
 	min = 4;
 	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@
 	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
 	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
 	vmw_marker_queue_init(&fifo->marker_queue);
-	return vmw_fifo_send_fence(dev_priv, &dummy);
+
+	return 0;
 }
 
 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	static DEFINE_SPINLOCK(ping_lock);
 	unsigned long irq_flags;
 
@@ -178,7 +183,7 @@
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
 	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@
 
 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@
  * Returns:
  *   Pointer to the fifo, or null on error (possible hardware hang).
  */
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+				    uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max;
 	uint32_t min;
 	uint32_t next_cmd;
@@ -372,7 +378,8 @@
 				if (reserveable)
 					iowrite32(bytes, fifo_mem +
 						  SVGA_FIFO_RESERVED);
-				return fifo_mem + (next_cmd >> 2);
+				return (void __force *) (fifo_mem +
+							 (next_cmd >> 2));
 			} else {
 				need_bounce = true;
 			}
@@ -391,11 +398,36 @@
 out_err:
 	fifo_state->reserved_size = 0;
 	mutex_unlock(&fifo_state->fifo_mutex);
+
 	return NULL;
 }
 
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+			  int ctx_id)
+{
+	void *ret;
+
+	if (dev_priv->cman)
+		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
+					 ctx_id, false, NULL);
+	else if (ctx_id == SVGA3D_INVALID_ID)
+		ret = vmw_local_fifo_reserve(dev_priv, bytes);
+	else {
+		WARN_ON("Command buffer has not been allocated.\n");
+		ret = NULL;
+	}
+	if (IS_ERR_OR_NULL(ret)) {
+		DRM_ERROR("Fifo reserve failure of %u bytes.\n",
+			  (unsigned) bytes);
+		dump_stack();
+		return NULL;
+	}
+
+	return ret;
+}
+
 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
-			      __le32 __iomem *fifo_mem,
+			      u32 __iomem *fifo_mem,
 			      uint32_t next_cmd,
 			      uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -417,7 +449,7 @@
 }
 
 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
-			       __le32 __iomem *fifo_mem,
+			       u32 __iomem *fifo_mem,
 			       uint32_t next_cmd,
 			       uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -436,15 +468,19 @@
 	}
 }
 
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
+	if (fifo_state->dx)
+		bytes += sizeof(struct vmw_temp_set_context);
+
+	fifo_state->dx = false;
 	BUG_ON((bytes & 3) != 0);
 	BUG_ON(bytes > fifo_state->reserved_size);
 
@@ -482,13 +518,53 @@
 	mutex_unlock(&fifo_state->fifo_mutex);
 }
 
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+
+/**
+ * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @bytes: Number of bytes to commit.
+ */
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+/**
+ * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * starts.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @interruptible: Whether to wait interruptible if function needs to sleep.
+ */
+int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+{
+	might_sleep();
+
+	if (dev_priv->cman)
+		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
+	else
+		return 0;
+}
+
 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 	struct svga_fifo_cmd_fence *cmd_fence;
-	void *fm;
+	u32 *fm;
 	int ret = 0;
-	uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
 
 	fm = vmw_fifo_reserve(dev_priv, bytes);
 	if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@
 		return 0;
 	}
 
-	*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
-	cmd_fence = (struct svga_fifo_cmd_fence *)
-	    ((unsigned long)fm + sizeof(__le32));
-
-	iowrite32(*seqno, &cmd_fence->fence);
-	vmw_fifo_commit(dev_priv, bytes);
+	*fm++ = SVGA_CMD_FENCE;
+	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
+	cmd_fence->fence = *seqno;
+	vmw_fifo_commit_flush(dev_priv, bytes);
 	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
 	vmw_update_seqno(dev_priv, fifo_state);
 
@@ -545,7 +619,7 @@
 	 * without writing to the query result structure.
 	 */
 
-	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@
 	 * without writing to the query result structure.
 	 */
 
-	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@
 
 	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
 }
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 61d8d80..66ffa1d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 69c8ce23..0a970af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
 #include "vmwgfx_drv.h"
 #include <drm/vmwgfx_drm.h>
 #include "vmwgfx_kms.h"
+#include "device_include/svga3d_caps.h"
 
 struct svga_3d_compat_cap {
 	SVGA3dCapsRecordHeader header;
@@ -63,7 +64,7 @@
 		break;
 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 	{
-		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -105,6 +106,13 @@
 	case DRM_VMW_PARAM_MAX_MOB_SIZE:
 		param->value = dev_priv->max_mob_size;
 		break;
+	case DRM_VMW_PARAM_SCREEN_TARGET:
+		param->value =
+			(dev_priv->active_display_unit == vmw_du_screen_target);
+		break;
+	case DRM_VMW_PARAM_DX:
+		param->value = dev_priv->has_dx;
+		break;
 	default:
 		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
 			  param->param);
@@ -154,7 +162,7 @@
 		(struct drm_vmw_get_3d_cap_arg *) data;
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	uint32_t size;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
 	void *bounce;
 	int ret;
@@ -235,7 +243,7 @@
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
@@ -318,7 +326,7 @@
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9fe9827..9498a5e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -56,6 +56,9 @@
 	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
 		wake_up_all(&dev_priv->fifo_queue);
 
+	if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+			     SVGA_IRQFLAG_ERROR))
+		vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
 
 	return IRQ_HANDLED;
 }
@@ -69,7 +72,7 @@
 void vmw_update_seqno(struct vmw_private *dev_priv,
 			 struct vmw_fifo_state *fifo_state)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
 	if (dev_priv->last_read_seqno != seqno) {
@@ -131,8 +134,16 @@
 	 * Block command submission while waiting for idle.
 	 */
 
-	if (fifo_idle)
+	if (fifo_idle) {
 		down_read(&fifo_state->rwsem);
+		if (dev_priv->cman) {
+			ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
+					      10*HZ);
+			if (ret)
+				goto out_err;
+		}
+	}
+
 	signal_seq = atomic_read(&dev_priv->marker_seq);
 	ret = 0;
 
@@ -167,10 +178,11 @@
 	}
 	finish_wait(&dev_priv->fence_queue, &__wait);
 	if (ret == 0 && fifo_idle) {
-		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
 	}
 	wake_up_all(&dev_priv->fence_queue);
+out_err:
 	if (fifo_idle)
 		up_read(&fifo_state->rwsem);
 
@@ -315,3 +327,30 @@
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 }
+
+void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+			    u32 flag, int *waiter_count)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+	if ((*waiter_count)++ == 0) {
+		outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+		dev_priv->irq_mask |= flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
+
+void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+			       u32 flag, int *waiter_count)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+	if (--(*waiter_count) == 0) {
+		dev_priv->irq_mask &= ~flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 07cda8c..61fb7f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,45 +31,7 @@
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
-
-struct vmw_clip_rect {
-	int x1, x2, y1, y2;
-};
-
-/**
- * Clip @num_rects number of @rects against @clip storing the
- * results in @out_rects and the number of passed rects in @out_num.
- */
-static void vmw_clip_cliprects(struct drm_clip_rect *rects,
-			int num_rects,
-			struct vmw_clip_rect clip,
-			SVGASignedRect *out_rects,
-			int *out_num)
-{
-	int i, k;
-
-	for (i = 0, k = 0; i < num_rects; i++) {
-		int x1 = max_t(int, clip.x1, rects[i].x1);
-		int y1 = max_t(int, clip.y1, rects[i].y1);
-		int x2 = min_t(int, clip.x2, rects[i].x2);
-		int y2 = min_t(int, clip.y2, rects[i].y2);
-
-		if (x1 >= x2)
-			continue;
-		if (y1 >= y2)
-			continue;
-
-		out_rects[k].left   = x1;
-		out_rects[k].top    = y1;
-		out_rects[k].right  = x2;
-		out_rects[k].bottom = y2;
-		k++;
-	}
-
-	*out_num = k;
-}
-
-void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+void vmw_du_cleanup(struct vmw_display_unit *du)
 {
 	if (du->cursor_surface)
 		vmw_surface_unreference(&du->cursor_surface);
@@ -109,12 +71,12 @@
 
 	memcpy(&cmd[1], image, image_size);
 
-	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
-	cmd->cursor.id = cpu_to_le32(0);
-	cmd->cursor.width = cpu_to_le32(width);
-	cmd->cursor.height = cpu_to_le32(height);
-	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
-	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+	cmd->cursor.id = 0;
+	cmd->cursor.width = width;
+	cmd->cursor.height = height;
+	cmd->cursor.hotspotX = hotspotX;
+	cmd->cursor.hotspotY = hotspotY;
 
 	vmw_fifo_commit(dev_priv, cmd_size);
 
@@ -161,7 +123,7 @@
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
 				bool show, int x, int y)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t count;
 
 	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -367,15 +329,6 @@
 
 	srf->snooper.age++;
 
-	/* we can't call this function from this function since execbuf has
-	 * reserved fifo space.
-	 *
-	 * if (srf->snooper.crtc)
-	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
-	 *					 srf->snooper.image, 64, 64,
-	 *					 du->hotspot_x, du->hotspot_y);
-	 */
-
 	ttm_bo_kunmap(&map);
 err_unreserve:
 	ttm_bo_unreserve(bo);
@@ -412,183 +365,19 @@
  * Surface framebuffer code
  */
 
-#define vmw_framebuffer_to_vfbs(x) \
-	container_of(x, struct vmw_framebuffer_surface, base.base)
-
-struct vmw_framebuffer_surface {
-	struct vmw_framebuffer base;
-	struct vmw_surface *surface;
-	struct vmw_dma_buffer *buffer;
-	struct list_head head;
-	struct drm_master *master;
-};
-
 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_surface *vfbs =
 		vmw_framebuffer_to_vfbs(framebuffer);
-	struct vmw_master *vmaster = vmw_master(vfbs->master);
 
-
-	mutex_lock(&vmaster->fb_surf_mutex);
-	list_del(&vfbs->head);
-	mutex_unlock(&vmaster->fb_surf_mutex);
-
-	drm_master_put(&vfbs->master);
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_surface_unreference(&vfbs->surface);
-	ttm_base_object_unref(&vfbs->base.user_obj);
+	if (vfbs->base.user_obj)
+		ttm_base_object_unref(&vfbs->base.user_obj);
 
 	kfree(vfbs);
 }
 
-static int do_surface_dirty_sou(struct vmw_private *dev_priv,
-				struct drm_file *file_priv,
-				struct vmw_framebuffer *framebuffer,
-				unsigned flags, unsigned color,
-				struct drm_clip_rect *clips,
-				unsigned num_clips, int inc,
-				struct vmw_fence_obj **out_fence)
-{
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *clips_ptr;
-	struct drm_clip_rect *tmp;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, num_units;
-	int ret = 0; /* silence warning */
-	int left, right, top, bottom;
-
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdBlitSurfaceToScreen body;
-	} *cmd;
-	SVGASignedRect *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
-			    head) {
-		if (crtc->primary->fb != &framebuffer->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	BUG_ON(!clips || !num_clips);
-
-	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
-	if (unlikely(tmp == NULL)) {
-		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
-		return -ENOMEM;
-	}
-
-	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
-	cmd = kzalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Temporary fifo memory alloc failed.\n");
-		ret = -ENOMEM;
-		goto out_free_tmp;
-	}
-
-	/* setup blits pointer */
-	blits = (SVGASignedRect *)&cmd[1];
-
-	/* initial clip region */
-	left = clips->x1;
-	right = clips->x2;
-	top = clips->y1;
-	bottom = clips->y2;
-
-	/* skip the first clip rect */
-	for (i = 1, clips_ptr = clips + inc;
-	     i < num_clips; i++, clips_ptr += inc) {
-		left = min_t(int, left, (int)clips_ptr->x1);
-		right = max_t(int, right, (int)clips_ptr->x2);
-		top = min_t(int, top, (int)clips_ptr->y1);
-		bottom = max_t(int, bottom, (int)clips_ptr->y2);
-	}
-
-	/* only need to do this once */
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-	cmd->body.srcRect.left = left;
-	cmd->body.srcRect.right = right;
-	cmd->body.srcRect.top = top;
-	cmd->body.srcRect.bottom = bottom;
-
-	clips_ptr = clips;
-	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-		tmp[i].x1 = clips_ptr->x1 - left;
-		tmp[i].x2 = clips_ptr->x2 - left;
-		tmp[i].y1 = clips_ptr->y1 - top;
-		tmp[i].y2 = clips_ptr->y2 - top;
-	}
-
-	/* do per unit writing, reuse fifo for each */
-	for (i = 0; i < num_units; i++) {
-		struct vmw_display_unit *unit = units[i];
-		struct vmw_clip_rect clip;
-		int num;
-
-		clip.x1 = left - unit->crtc.x;
-		clip.y1 = top - unit->crtc.y;
-		clip.x2 = right - unit->crtc.x;
-		clip.y2 = bottom - unit->crtc.y;
-
-		/* skip any crtcs that misses the clip region */
-		if (clip.x1 >= unit->crtc.mode.hdisplay ||
-		    clip.y1 >= unit->crtc.mode.vdisplay ||
-		    clip.x2 <= 0 || clip.y2 <= 0)
-			continue;
-
-		/*
-		 * In order for the clip rects to be correctly scaled
-		 * the src and dest rects needs to be the same size.
-		 */
-		cmd->body.destRect.left = clip.x1;
-		cmd->body.destRect.right = clip.x2;
-		cmd->body.destRect.top = clip.y1;
-		cmd->body.destRect.bottom = clip.y2;
-
-		/* create a clip rect of the crtc in dest coords */
-		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
-		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
-		clip.x1 = 0 - clip.x1;
-		clip.y1 = 0 - clip.y1;
-
-		/* need to reset sid as it is changed by execbuf */
-		cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-		cmd->body.destScreenId = unit->unit;
-
-		/* clip and write blits to cmd stream */
-		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
-		/* if no cliprects hit skip this */
-		if (num == 0)
-			continue;
-
-		/* only return the last fence */
-		if (out_fence && *out_fence)
-			vmw_fence_obj_unreference(out_fence);
-
-		/* recalculate package length */
-		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
-		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL, out_fence);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-
-	kfree(cmd);
-out_free_tmp:
-	kfree(tmp);
-
-	return ret;
-}
-
 static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 				  struct drm_file *file_priv,
 				  unsigned flags, unsigned color,
@@ -601,11 +390,8 @@
 	struct drm_clip_rect norect;
 	int ret, inc = 1;
 
-	if (unlikely(vfbs->master != file_priv->master))
-		return -EINVAL;
-
-	/* Require ScreenObject support for 3D */
-	if (!dev_priv->sou_priv)
+	/* Legacy Display Unit does not support 3D */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return -EINVAL;
 
 	drm_modeset_lock_all(dev_priv->dev);
@@ -627,10 +413,16 @@
 		inc = 2; /* skip source rects */
 	}
 
-	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
-				   flags, color,
-				   clips, num_clips, inc, NULL);
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
+		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
+						   clips, NULL, NULL, 0, 0,
+						   num_clips, inc, NULL);
+	else
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
+						 clips, NULL, NULL, 0, 0,
+						 num_clips, inc, NULL);
 
+	vmw_fifo_flush(dev_priv, false);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 
 	drm_modeset_unlock_all(dev_priv->dev);
@@ -638,27 +430,66 @@
 	return 0;
 }
 
+/**
+ * vmw_kms_readback - Perform a readback from the screen system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips)
+{
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_object:
+		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
+					    user_fence_rep, vclips, num_clips);
+	case vmw_du_screen_target:
+		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
+					user_fence_rep, NULL, vclips, num_clips,
+					1, false, true);
+	default:
+		WARN_ONCE(true,
+			  "Readback called with invalid display system.\n");
+}
+
+	return -ENOSYS;
+}
+
+
 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 	.destroy = vmw_framebuffer_surface_destroy,
 	.dirty = vmw_framebuffer_surface_dirty,
 };
 
 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
-					   struct drm_file *file_priv,
 					   struct vmw_surface *surface,
 					   struct vmw_framebuffer **out,
 					   const struct drm_mode_fb_cmd
-					   *mode_cmd)
+					   *mode_cmd,
+					   bool is_dmabuf_proxy)
 
 {
 	struct drm_device *dev = dev_priv->dev;
 	struct vmw_framebuffer_surface *vfbs;
 	enum SVGA3dSurfaceFormat format;
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret;
 
-	/* 3D is only supported on HWv8 hosts which supports screen objects */
-	if (!dev_priv->sou_priv)
+	/* 3D is only supported on HWv8 and newer hosts */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return -ENOSYS;
 
 	/*
@@ -692,15 +523,16 @@
 	case 15:
 		format = SVGA3D_A1R5G5B5;
 		break;
-	case 8:
-		format = SVGA3D_LUMINANCE8;
-		break;
 	default:
 		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
 		return -EINVAL;
 	}
 
-	if (unlikely(format != surface->format)) {
+	/*
+	 * For DX, surface format validation is done when surface->scanout
+	 * is set.
+	 */
+	if (!dev_priv->has_dx && format != surface->format) {
 		DRM_ERROR("Invalid surface format for requested mode.\n");
 		return -EINVAL;
 	}
@@ -711,38 +543,27 @@
 		goto out_err1;
 	}
 
-	if (!vmw_surface_reference(surface)) {
-		DRM_ERROR("failed to reference surface %p\n", surface);
-		ret = -EINVAL;
-		goto out_err2;
-	}
-
 	/* XXX get the first 3 from the surface info */
 	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
 	vfbs->base.base.pitches[0] = mode_cmd->pitch;
 	vfbs->base.base.depth = mode_cmd->depth;
 	vfbs->base.base.width = mode_cmd->width;
 	vfbs->base.base.height = mode_cmd->height;
-	vfbs->surface = surface;
+	vfbs->surface = vmw_surface_reference(surface);
 	vfbs->base.user_handle = mode_cmd->handle;
-	vfbs->master = drm_master_get(file_priv->master);
-
-	mutex_lock(&vmaster->fb_surf_mutex);
-	list_add_tail(&vfbs->head, &vmaster->fb_surf);
-	mutex_unlock(&vmaster->fb_surf_mutex);
+	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
 
 	*out = &vfbs->base;
 
 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
 				   &vmw_framebuffer_surface_funcs);
 	if (ret)
-		goto out_err3;
+		goto out_err2;
 
 	return 0;
 
-out_err3:
-	vmw_surface_unreference(&surface);
 out_err2:
+	vmw_surface_unreference(&surface);
 	kfree(vfbs);
 out_err1:
 	return ret;
@@ -752,14 +573,6 @@
  * Dmabuf framebuffer code
  */
 
-#define vmw_framebuffer_to_vfbd(x) \
-	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
-
-struct vmw_framebuffer_dmabuf {
-	struct vmw_framebuffer base;
-	struct vmw_dma_buffer *buffer;
-};
-
 static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_dmabuf *vfbd =
@@ -767,185 +580,12 @@
 
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_dmabuf_unreference(&vfbd->buffer);
-	ttm_base_object_unref(&vfbd->base.user_obj);
+	if (vfbd->base.user_obj)
+		ttm_base_object_unref(&vfbd->base.user_obj);
 
 	kfree(vfbd);
 }
 
-static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
-			       struct vmw_framebuffer *framebuffer,
-			       unsigned flags, unsigned color,
-			       struct drm_clip_rect *clips,
-			       unsigned num_clips, int increment)
-{
-	size_t fifo_size;
-	int i;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdUpdate body;
-	} *cmd;
-
-	fifo_size = sizeof(*cmd) * num_clips;
-	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	for (i = 0; i < num_clips; i++, clips += increment) {
-		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
-		cmd[i].body.x = cpu_to_le32(clips->x1);
-		cmd[i].body.y = cpu_to_le32(clips->y1);
-		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
-		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
-	}
-
-	vmw_fifo_commit(dev_priv, fifo_size);
-	return 0;
-}
-
-static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
-				  struct vmw_private *dev_priv,
-				  struct vmw_framebuffer *framebuffer)
-{
-	int depth = framebuffer->base.depth;
-	size_t fifo_size;
-	int ret;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdDefineGMRFB body;
-	} *cmd;
-
-	/* Emulate RGBA support, contrary to svga_reg.h this is not
-	 * supported by hosts. This is only a problem if we are reading
-	 * this value later and expecting what we uploaded back.
-	 */
-	if (depth == 32)
-		depth = 24;
-
-	fifo_size = sizeof(*cmd);
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	cmd->header = SVGA_CMD_DEFINE_GMRFB;
-	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
-	cmd->body.format.colorDepth = depth;
-	cmd->body.format.reserved = 0;
-	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
-	cmd->body.ptr.gmrId = framebuffer->user_handle;
-	cmd->body.ptr.offset = 0;
-
-	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-				  fifo_size, 0, NULL, NULL);
-
-	kfree(cmd);
-
-	return ret;
-}
-
-static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
-			       struct vmw_private *dev_priv,
-			       struct vmw_framebuffer *framebuffer,
-			       unsigned flags, unsigned color,
-			       struct drm_clip_rect *clips,
-			       unsigned num_clips, int increment,
-			       struct vmw_fence_obj **out_fence)
-{
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *clips_ptr;
-	int i, k, num_units, ret;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdBlitGMRFBToScreen body;
-	} *blits;
-
-	ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
-	if (unlikely(ret != 0))
-		return ret; /* define_gmrfb prints warnings */
-
-	fifo_size = sizeof(*blits) * num_clips;
-	blits = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(blits == NULL)) {
-		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
-		return -ENOMEM;
-	}
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &framebuffer->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	for (k = 0; k < num_units; k++) {
-		struct vmw_display_unit *unit = units[k];
-		int hit_num = 0;
-
-		clips_ptr = clips;
-		for (i = 0; i < num_clips; i++, clips_ptr += increment) {
-			int clip_x1 = clips_ptr->x1 - unit->crtc.x;
-			int clip_y1 = clips_ptr->y1 - unit->crtc.y;
-			int clip_x2 = clips_ptr->x2 - unit->crtc.x;
-			int clip_y2 = clips_ptr->y2 - unit->crtc.y;
-			int move_x, move_y;
-
-			/* skip any crtcs that misses the clip region */
-			if (clip_x1 >= unit->crtc.mode.hdisplay ||
-			    clip_y1 >= unit->crtc.mode.vdisplay ||
-			    clip_x2 <= 0 || clip_y2 <= 0)
-				continue;
-
-			/* clip size to crtc size */
-			clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
-			clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
-
-			/* translate both src and dest to bring clip into screen */
-			move_x = min_t(int, clip_x1, 0);
-			move_y = min_t(int, clip_y1, 0);
-
-			/* actual translate done here */
-			blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
-			blits[hit_num].body.destScreenId = unit->unit;
-			blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
-			blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
-			blits[hit_num].body.destRect.left = clip_x1 - move_x;
-			blits[hit_num].body.destRect.top = clip_y1 - move_y;
-			blits[hit_num].body.destRect.right = clip_x2;
-			blits[hit_num].body.destRect.bottom = clip_y2;
-			hit_num++;
-		}
-
-		/* no clips hit the crtc */
-		if (hit_num == 0)
-			continue;
-
-		/* only return the last fence */
-		if (out_fence && *out_fence)
-			vmw_fence_obj_unreference(out_fence);
-
-		fifo_size = sizeof(*blits) * hit_num;
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
-					  fifo_size, 0, NULL, out_fence);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-	kfree(blits);
-
-	return ret;
-}
-
 static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 				 struct drm_file *file_priv,
 				 unsigned flags, unsigned color,
@@ -977,16 +617,29 @@
 		increment = 2;
 	}
 
-	if (dev_priv->ldu_priv) {
-		ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
-					  flags, color,
-					  clips, num_clips, increment);
-	} else {
-		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
-					  flags, color,
-					  clips, num_clips, increment, NULL);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_target:
+		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
+				       clips, NULL, num_clips, increment,
+				       true, true);
+		break;
+	case vmw_du_screen_object:
+		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
+						  clips, num_clips, increment,
+						  true,
+						  NULL);
+		break;
+	case vmw_du_legacy:
+		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
+						  clips, num_clips, increment);
+		break;
+	default:
+		ret = -EINVAL;
+		WARN_ONCE(true, "Dirty called with invalid display system.\n");
+		break;
 	}
 
+	vmw_fifo_flush(dev_priv, false);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 
 	drm_modeset_unlock_all(dev_priv->dev);
@@ -1002,40 +655,132 @@
 /**
  * Pin the dmabuffer to the start of vram.
  */
-static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 {
 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
+	struct vmw_dma_buffer *buf;
 	int ret;
 
-	/* This code should not be used with screen objects */
-	BUG_ON(dev_priv->sou_priv);
+	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
-	vmw_overlay_pause_all(dev_priv);
+	if (!buf)
+		return 0;
 
-	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_legacy:
+		vmw_overlay_pause_all(dev_priv);
+		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+		vmw_overlay_resume_all(dev_priv);
+		break;
+	case vmw_du_screen_object:
+	case vmw_du_screen_target:
+		if (vfb->dmabuf)
+			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
+							     false);
 
-	vmw_overlay_resume_all(dev_priv);
+		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
+						   &vmw_mob_placement, false);
+	default:
+		return -EINVAL;
+	}
 
-	WARN_ON(ret != 0);
+	return ret;
+}
+
+static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
+{
+	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+	struct vmw_dma_buffer *buf;
+
+	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
+
+	if (WARN_ON(!buf))
+		return 0;
+
+	return vmw_dmabuf_unpin(dev_priv, buf, false);
+}
+
+/**
+ * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ *
+ * @dev: DRM device
+ * @mode_cmd: parameters for the new surface
+ * @dmabuf_mob: MOB backing the DMA buf
+ * @srf_out: newly created surface
+ *
+ * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * same buffer.  This way we can do a surface copy rather than a surface DMA.
+ * This is a more efficient approach
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+				   const struct drm_mode_fb_cmd *mode_cmd,
+				   struct vmw_dma_buffer *dmabuf_mob,
+				   struct vmw_surface **srf_out)
+{
+	uint32_t format;
+	struct drm_vmw_size content_base_size;
+	struct vmw_resource *res;
+	int ret;
+
+	switch (mode_cmd->depth) {
+	case 32:
+	case 24:
+		format = SVGA3D_X8R8G8B8;
+		break;
+
+	case 16:
+	case 15:
+		format = SVGA3D_R5G6B5;
+		break;
+
+	case 8:
+		format = SVGA3D_P8;
+		break;
+
+	default:
+		DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+		return -EINVAL;
+	}
+
+	content_base_size.width  = mode_cmd->width;
+	content_base_size.height = mode_cmd->height;
+	content_base_size.depth  = 1;
+
+	ret = vmw_surface_gb_priv_define(dev,
+			0, /* kernel visible only */
+			0, /* flags */
+			format,
+			true, /* can be a scanout buffer */
+			1, /* num of mip levels */
+			0,
+			0,
+			content_base_size,
+			srf_out);
+	if (ret) {
+		DRM_ERROR("Failed to allocate proxy content buffer\n");
+		return ret;
+	}
+
+	res = &(*srf_out)->res;
+
+	/* Reserve and switch the backing mob. */
+	mutex_lock(&res->dev_priv->cmdbuf_mutex);
+	(void) vmw_resource_reserve(res, false, true);
+	vmw_dmabuf_unreference(&res->backup);
+	res->backup = vmw_dmabuf_reference(dmabuf_mob);
+	res->backup_offset = 0;
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 
 	return 0;
 }
 
-static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
-{
-	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
 
-	if (!vfbd->buffer) {
-		WARN_ON(!vfbd->buffer);
-		return 0;
-	}
-
-	return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
-}
 
 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 					  struct vmw_dma_buffer *dmabuf,
@@ -1057,7 +802,7 @@
 	}
 
 	/* Limited framebuffer color depth support for screen objects */
-	if (dev_priv->sou_priv) {
+	if (dev_priv->active_display_unit == vmw_du_screen_object) {
 		switch (mode_cmd->depth) {
 		case 32:
 		case 24:
@@ -1089,41 +834,96 @@
 		goto out_err1;
 	}
 
-	if (!vmw_dmabuf_reference(dmabuf)) {
-		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
-		ret = -EINVAL;
-		goto out_err2;
-	}
-
 	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
 	vfbd->base.base.pitches[0] = mode_cmd->pitch;
 	vfbd->base.base.depth = mode_cmd->depth;
 	vfbd->base.base.width = mode_cmd->width;
 	vfbd->base.base.height = mode_cmd->height;
-	if (!dev_priv->sou_priv) {
-		vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
-		vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
-	}
 	vfbd->base.dmabuf = true;
-	vfbd->buffer = dmabuf;
+	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
 	vfbd->base.user_handle = mode_cmd->handle;
 	*out = &vfbd->base;
 
 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
 				   &vmw_framebuffer_dmabuf_funcs);
 	if (ret)
-		goto out_err3;
+		goto out_err2;
 
 	return 0;
 
-out_err3:
-	vmw_dmabuf_unreference(&dmabuf);
 out_err2:
+	vmw_dmabuf_unreference(&dmabuf);
 	kfree(vfbd);
 out_err1:
 	return ret;
 }
 
+/**
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @surface: Pointer to a surface to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @only_2d: No presents will occur to this dma buffer based framebuffer. This
+ * Helps the code to do some important optimizations.
+ * @mode_cmd: Frame-buffer metadata.
+ */
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_dma_buffer *dmabuf,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd *mode_cmd)
+{
+	struct vmw_framebuffer *vfb = NULL;
+	bool is_dmabuf_proxy = false;
+	int ret;
+
+	/*
+	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
+	 * therefore, wrap the DMA buf in a surface so we can use the
+	 * SurfaceCopy command.
+	 */
+	if (dmabuf && only_2d &&
+	    dev_priv->active_display_unit == vmw_du_screen_target) {
+		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
+					      dmabuf, &surface);
+		if (ret)
+			return ERR_PTR(ret);
+
+		is_dmabuf_proxy = true;
+	}
+
+	/* Create the new framebuffer depending one what we have */
+	if (surface) {
+		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+						      mode_cmd,
+						      is_dmabuf_proxy);
+
+		/*
+		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
+		 * needed
+		 */
+		if (is_dmabuf_proxy)
+			vmw_surface_unreference(&surface);
+	} else if (dmabuf) {
+		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
+						     mode_cmd);
+	} else {
+		BUG();
+	}
+
+	if (ret)
+		return ERR_PTR(ret);
+
+	vfb->pin = vmw_framebuffer_pin;
+	vfb->unpin = vmw_framebuffer_unpin;
+
+	return vfb;
+}
+
 /*
  * Generic Kernel modesetting functions
  */
@@ -1157,7 +957,7 @@
 	if (!vmw_kms_validate_mode_vram(dev_priv,
 					mode_cmd.pitch,
 					mode_cmd.height)) {
-		DRM_ERROR("VRAM size is too small for requested mode.\n");
+		DRM_ERROR("Requested mode exceed bounding box limit.\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1187,15 +987,13 @@
 	if (ret)
 		goto err_out;
 
-	/* Create the new framebuffer depending one what we got back */
-	if (bo)
-		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-						     &mode_cmd);
-	else if (surface)
-		ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
-						      surface, &vfb, &mode_cmd);
-	else
-		BUG();
+	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+				      !(dev_priv->capabilities & SVGA_CAP_3D),
+				      &mode_cmd);
+	if (IS_ERR(vfb)) {
+		ret = PTR_ERR(vfb);
+		goto err_out;
+ 	}
 
 err_out:
 	/* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -1218,6 +1016,21 @@
 	.fb_create = vmw_kms_fb_create,
 };
 
+static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+				   struct drm_file *file_priv,
+				   struct vmw_framebuffer *vfb,
+				   struct vmw_surface *surface,
+				   uint32_t sid,
+				   int32_t destX, int32_t destY,
+				   struct drm_vmw_rect *clips,
+				   uint32_t num_clips)
+{
+	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
+					    &surface->res, destX, destY,
+					    num_clips, 1, NULL);
+}
+
+
 int vmw_kms_present(struct vmw_private *dev_priv,
 		    struct drm_file *file_priv,
 		    struct vmw_framebuffer *vfb,
@@ -1227,238 +1040,31 @@
 		    struct drm_vmw_rect *clips,
 		    uint32_t num_clips)
 {
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *tmp;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, k, num_units;
-	int ret = 0; /* silence warning */
-	int left, right, top, bottom;
+	int ret;
 
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdBlitSurfaceToScreen body;
-	} *cmd;
-	SVGASignedRect *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &vfb->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_target:
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
+						 &surface->res, destX, destY,
+						 num_clips, 1, NULL);
+		break;
+	case vmw_du_screen_object:
+		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
+					      sid, destX, destY, clips,
+					      num_clips);
+		break;
+	default:
+		WARN_ONCE(true,
+			  "Present called with invalid display system.\n");
+		ret = -ENOSYS;
+		break;
 	}
+	if (ret)
+		return ret;
 
-	BUG_ON(surface == NULL);
-	BUG_ON(!clips || !num_clips);
+	vmw_fifo_flush(dev_priv, false);
 
-	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
-	if (unlikely(tmp == NULL)) {
-		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
-		return -ENOMEM;
-	}
-
-	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-		ret = -ENOMEM;
-		goto out_free_tmp;
-	}
-
-	left = clips->x;
-	right = clips->x + clips->w;
-	top = clips->y;
-	bottom = clips->y + clips->h;
-
-	for (i = 1; i < num_clips; i++) {
-		left = min_t(int, left, (int)clips[i].x);
-		right = max_t(int, right, (int)clips[i].x + clips[i].w);
-		top = min_t(int, top, (int)clips[i].y);
-		bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
-	}
-
-	/* only need to do this once */
-	memset(cmd, 0, fifo_size);
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-
-	blits = (SVGASignedRect *)&cmd[1];
-
-	cmd->body.srcRect.left = left;
-	cmd->body.srcRect.right = right;
-	cmd->body.srcRect.top = top;
-	cmd->body.srcRect.bottom = bottom;
-
-	for (i = 0; i < num_clips; i++) {
-		tmp[i].x1 = clips[i].x - left;
-		tmp[i].x2 = clips[i].x + clips[i].w - left;
-		tmp[i].y1 = clips[i].y - top;
-		tmp[i].y2 = clips[i].y + clips[i].h - top;
-	}
-
-	for (k = 0; k < num_units; k++) {
-		struct vmw_display_unit *unit = units[k];
-		struct vmw_clip_rect clip;
-		int num;
-
-		clip.x1 = left + destX - unit->crtc.x;
-		clip.y1 = top + destY - unit->crtc.y;
-		clip.x2 = right + destX - unit->crtc.x;
-		clip.y2 = bottom + destY - unit->crtc.y;
-
-		/* skip any crtcs that misses the clip region */
-		if (clip.x1 >= unit->crtc.mode.hdisplay ||
-		    clip.y1 >= unit->crtc.mode.vdisplay ||
-		    clip.x2 <= 0 || clip.y2 <= 0)
-			continue;
-
-		/*
-		 * In order for the clip rects to be correctly scaled
-		 * the src and dest rects needs to be the same size.
-		 */
-		cmd->body.destRect.left = clip.x1;
-		cmd->body.destRect.right = clip.x2;
-		cmd->body.destRect.top = clip.y1;
-		cmd->body.destRect.bottom = clip.y2;
-
-		/* create a clip rect of the crtc in dest coords */
-		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
-		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
-		clip.x1 = 0 - clip.x1;
-		clip.y1 = 0 - clip.y1;
-
-		/* need to reset sid as it is changed by execbuf */
-		cmd->body.srcImage.sid = sid;
-		cmd->body.destScreenId = unit->unit;
-
-		/* clip and write blits to cmd stream */
-		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
-		/* if no cliprects hit skip this */
-		if (num == 0)
-			continue;
-
-		/* recalculate package length */
-		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
-		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL, NULL);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-	kfree(cmd);
-out_free_tmp:
-	kfree(tmp);
-
-	return ret;
-}
-
-int vmw_kms_readback(struct vmw_private *dev_priv,
-		     struct drm_file *file_priv,
-		     struct vmw_framebuffer *vfb,
-		     struct drm_vmw_fence_rep __user *user_fence_rep,
-		     struct drm_vmw_rect *clips,
-		     uint32_t num_clips)
-{
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
-	struct vmw_dma_buffer *dmabuf = vfbd->buffer;
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, k, ret, num_units, blits_pos;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdDefineGMRFB body;
-	} *cmd;
-	struct {
-		uint32_t header;
-		SVGAFifoCmdBlitScreenToGMRFB body;
-	} *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &vfb->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	BUG_ON(dmabuf == NULL);
-	BUG_ON(!clips || !num_clips);
-
-	/* take a safe guess at fifo size */
-	fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	cmd->header = SVGA_CMD_DEFINE_GMRFB;
-	cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
-	cmd->body.format.colorDepth = vfb->base.depth;
-	cmd->body.format.reserved = 0;
-	cmd->body.bytesPerLine = vfb->base.pitches[0];
-	cmd->body.ptr.gmrId = vfb->user_handle;
-	cmd->body.ptr.offset = 0;
-
-	blits = (void *)&cmd[1];
-	blits_pos = 0;
-	for (i = 0; i < num_units; i++) {
-		struct drm_vmw_rect *c = clips;
-		for (k = 0; k < num_clips; k++, c++) {
-			/* transform clip coords to crtc origin based coords */
-			int clip_x1 = c->x - units[i]->crtc.x;
-			int clip_x2 = c->x - units[i]->crtc.x + c->w;
-			int clip_y1 = c->y - units[i]->crtc.y;
-			int clip_y2 = c->y - units[i]->crtc.y + c->h;
-			int dest_x = c->x;
-			int dest_y = c->y;
-
-			/* compensate for clipping, we negate
-			 * a negative number and add that.
-			 */
-			if (clip_x1 < 0)
-				dest_x += -clip_x1;
-			if (clip_y1 < 0)
-				dest_y += -clip_y1;
-
-			/* clip */
-			clip_x1 = max(clip_x1, 0);
-			clip_y1 = max(clip_y1, 0);
-			clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
-			clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
-
-			/* and cull any rects that misses the crtc */
-			if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
-			    clip_y1 >= units[i]->crtc.mode.vdisplay ||
-			    clip_x2 <= 0 || clip_y2 <= 0)
-				continue;
-
-			blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
-			blits[blits_pos].body.srcScreenId = units[i]->unit;
-			blits[blits_pos].body.destOrigin.x = dest_x;
-			blits[blits_pos].body.destOrigin.y = dest_y;
-
-			blits[blits_pos].body.srcRect.left = clip_x1;
-			blits[blits_pos].body.srcRect.top = clip_y1;
-			blits[blits_pos].body.srcRect.right = clip_x2;
-			blits[blits_pos].body.srcRect.bottom = clip_y2;
-			blits_pos++;
-		}
-	}
-	/* reset size here and use calculated exact size from loops */
-	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
-
-	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
-				  0, user_fence_rep, NULL);
-
-	kfree(cmd);
-
-	return ret;
+	return 0;
 }
 
 int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1470,30 +1076,37 @@
 	dev->mode_config.funcs = &vmw_kms_funcs;
 	dev->mode_config.min_width = 1;
 	dev->mode_config.min_height = 1;
-	/* assumed largest fb size */
-	dev->mode_config.max_width = 8192;
-	dev->mode_config.max_height = 8192;
+	dev->mode_config.max_width = dev_priv->texture_max_width;
+	dev->mode_config.max_height = dev_priv->texture_max_height;
 
-	ret = vmw_kms_init_screen_object_display(dev_priv);
-	if (ret) /* Fallback */
-		(void)vmw_kms_init_legacy_display_system(dev_priv);
+	ret = vmw_kms_stdu_init_display(dev_priv);
+	if (ret) {
+		ret = vmw_kms_sou_init_display(dev_priv);
+		if (ret) /* Fallback */
+			ret = vmw_kms_ldu_init_display(dev_priv);
+	}
 
-	return 0;
+	return ret;
 }
 
 int vmw_kms_close(struct vmw_private *dev_priv)
 {
+	int ret;
+
 	/*
 	 * Docs says we should take the lock before calling this function
 	 * but since it destroys encoders and our destructor calls
 	 * drm_encoder_cleanup which takes the lock we deadlock.
 	 */
 	drm_mode_config_cleanup(dev_priv->dev);
-	if (dev_priv->sou_priv)
-		vmw_kms_close_screen_object_display(dev_priv);
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
+		ret = vmw_kms_sou_close_display(dev_priv);
+	else if (dev_priv->active_display_unit == vmw_du_screen_target)
+		ret = vmw_kms_stdu_close_display(dev_priv);
 	else
-		vmw_kms_close_legacy_display_system(dev_priv);
-	return 0;
+		ret = vmw_kms_ldu_close_display(dev_priv);
+
+	return ret;
 }
 
 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
@@ -1569,7 +1182,7 @@
 		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
 	else if (vmw_fifo_have_pitchlock(vmw_priv))
 		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
-						       SVGA_FIFO_PITCHLOCK);
+						   SVGA_FIFO_PITCHLOCK);
 
 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
 		return 0;
@@ -1641,7 +1254,9 @@
 				uint32_t pitch,
 				uint32_t height)
 {
-	return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
+	return ((u64) pitch * (u64) height) < (u64)
+		((dev_priv->active_display_unit == vmw_du_screen_target) ?
+		 dev_priv->prim_bb_mem : dev_priv->vram_size);
 }
 
 
@@ -1715,75 +1330,6 @@
 	return 0;
 }
 
-int vmw_du_page_flip(struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb,
-		     struct drm_pending_vblank_event *event,
-		     uint32_t page_flip_flags)
-{
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	struct drm_framebuffer *old_fb = crtc->primary->fb;
-	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
-	struct drm_file *file_priv ;
-	struct vmw_fence_obj *fence = NULL;
-	struct drm_clip_rect clips;
-	int ret;
-
-	if (event == NULL)
-		return -EINVAL;
-
-	/* require ScreenObject support for page flipping */
-	if (!dev_priv->sou_priv)
-		return -ENOSYS;
-
-	file_priv = event->base.file_priv;
-	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
-		return -EINVAL;
-
-	crtc->primary->fb = fb;
-
-	/* do a full screen dirty update */
-	clips.x1 = clips.y1 = 0;
-	clips.x2 = fb->width;
-	clips.y2 = fb->height;
-
-	if (vfb->dmabuf)
-		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
-					  0, 0, &clips, 1, 1, &fence);
-	else
-		ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
-					   0, 0, &clips, 1, 1, &fence);
-
-
-	if (ret != 0)
-		goto out_no_fence;
-	if (!fence) {
-		ret = -EINVAL;
-		goto out_no_fence;
-	}
-
-	ret = vmw_event_fence_action_queue(file_priv, fence,
-					   &event->base,
-					   &event->event.tv_sec,
-					   &event->event.tv_usec,
-					   true);
-
-	/*
-	 * No need to hold on to this now. The only cleanup
-	 * we need to do if we fail is unref the fence.
-	 */
-	vmw_fence_obj_unreference(&fence);
-
-	if (vmw_crtc_to_du(crtc)->is_implicit)
-		vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
-
-	return ret;
-
-out_no_fence:
-	crtc->primary->fb = old_fb;
-	return ret;
-}
-
-
 void vmw_du_crtc_save(struct drm_crtc *crtc)
 {
 }
@@ -1808,8 +1354,9 @@
 	}
 }
 
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
 {
+	return 0;
 }
 
 void vmw_du_connector_save(struct drm_connector *connector)
@@ -1919,7 +1466,7 @@
  * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
  * members filled in.
  */
-static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+void vmw_guess_mode_timing(struct drm_display_mode *mode)
 {
 	mode->hsync_start = mode->hdisplay + 50;
 	mode->hsync_end = mode->hsync_start + 50;
@@ -1954,36 +1501,39 @@
 	 * If using screen objects, then assume 32-bpp because that's what the
 	 * SVGA device is assuming
 	 */
-	if (dev_priv->sou_priv)
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
 		assumed_bpp = 4;
 
-	/* Add preferred mode */
-	{
-		mode = drm_mode_duplicate(dev, &prefmode);
-		if (!mode)
-			return 0;
-		mode->hdisplay = du->pref_width;
-		mode->vdisplay = du->pref_height;
-		vmw_guess_mode_timing(mode);
-
-		if (vmw_kms_validate_mode_vram(dev_priv,
-						mode->hdisplay * assumed_bpp,
-						mode->vdisplay)) {
-			drm_mode_probed_add(connector, mode);
-		} else {
-			drm_mode_destroy(dev, mode);
-			mode = NULL;
-		}
-
-		if (du->pref_mode) {
-			list_del_init(&du->pref_mode->head);
-			drm_mode_destroy(dev, du->pref_mode);
-		}
-
-		/* mode might be null here, this is intended */
-		du->pref_mode = mode;
+	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+		max_width  = min(max_width,  dev_priv->stdu_max_width);
+		max_height = min(max_height, dev_priv->stdu_max_height);
 	}
 
+	/* Add preferred mode */
+	mode = drm_mode_duplicate(dev, &prefmode);
+	if (!mode)
+		return 0;
+	mode->hdisplay = du->pref_width;
+	mode->vdisplay = du->pref_height;
+	vmw_guess_mode_timing(mode);
+
+	if (vmw_kms_validate_mode_vram(dev_priv,
+					mode->hdisplay * assumed_bpp,
+					mode->vdisplay)) {
+		drm_mode_probed_add(connector, mode);
+	} else {
+		drm_mode_destroy(dev, mode);
+		mode = NULL;
+	}
+
+	if (du->pref_mode) {
+		list_del_init(&du->pref_mode->head);
+		drm_mode_destroy(dev, du->pref_mode);
+	}
+
+	/* mode might be null here, this is intended */
+	du->pref_mode = mode;
+
 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
 		bmode = &vmw_kms_connector_builtin[i];
 		if (bmode->hdisplay > max_width ||
@@ -2003,11 +1553,9 @@
 		drm_mode_probed_add(connector, mode);
 	}
 
-	/* Move the prefered mode first, help apps pick the right mode. */
-	if (du->pref_mode)
-		list_move(&du->pref_mode->head, &connector->probed_modes);
-
 	drm_mode_connector_list_update(connector, true);
+	/* Move the prefered mode first, help apps pick the right mode. */
+	drm_mode_sort(&connector->modes);
 
 	return 1;
 }
@@ -2031,7 +1579,9 @@
 	unsigned rects_size;
 	int ret;
 	int i;
+	u64 total_pixels = 0;
 	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_vmw_rect bounding_box = {0};
 
 	if (!arg->num_outputs) {
 		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
@@ -2062,6 +1612,40 @@
 			ret = -EINVAL;
 			goto out_free;
 		}
+
+		/*
+		 * bounding_box.w and bunding_box.h are used as
+		 * lower-right coordinates
+		 */
+		if (rects[i].x + rects[i].w > bounding_box.w)
+			bounding_box.w = rects[i].x + rects[i].w;
+
+		if (rects[i].y + rects[i].h > bounding_box.h)
+			bounding_box.h = rects[i].y + rects[i].h;
+
+		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
+	}
+
+	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+		/*
+		 * For Screen Targets, the limits for a toplogy are:
+		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
+		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
+		 */
+		u64 bb_mem    = bounding_box.w * bounding_box.h * 4;
+		u64 pixel_mem = total_pixels * 4;
+
+		if (bb_mem > dev_priv->prim_bb_mem) {
+			DRM_ERROR("Topology is beyond supported limits.\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
+
+		if (pixel_mem > dev_priv->prim_bb_mem) {
+			DRM_ERROR("Combined output size too large\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
 	}
 
 	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
@@ -2070,3 +1654,419 @@
 	kfree(rects);
 	return ret;
 }
+
+/**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty)
+{
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_crtc *crtc;
+	u32 num_units = 0;
+	u32 i, k;
+	int ret;
+
+	dirty->dev_priv = dev_priv;
+
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+		if (crtc->primary->fb != &framebuffer->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	for (k = 0; k < num_units; k++) {
+		struct vmw_display_unit *unit = units[k];
+		s32 crtc_x = unit->crtc.x;
+		s32 crtc_y = unit->crtc.y;
+		s32 crtc_width = unit->crtc.mode.hdisplay;
+		s32 crtc_height = unit->crtc.mode.vdisplay;
+		const struct drm_clip_rect *clips_ptr = clips;
+		const struct drm_vmw_rect *vclips_ptr = vclips;
+
+		dirty->unit = unit;
+		if (dirty->fifo_reserve_size > 0) {
+			dirty->cmd = vmw_fifo_reserve(dev_priv,
+						      dirty->fifo_reserve_size);
+			if (!dirty->cmd) {
+				DRM_ERROR("Couldn't reserve fifo space "
+					  "for dirty blits.\n");
+				return ret;
+			}
+			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+		}
+		dirty->num_hits = 0;
+		for (i = 0; i < num_clips; i++, clips_ptr += increment,
+		       vclips_ptr += increment) {
+			s32 clip_left;
+			s32 clip_top;
+
+			/*
+			 * Select clip array type. Note that integer type
+			 * in @clips is unsigned short, whereas in @vclips
+			 * it's 32-bit.
+			 */
+			if (clips) {
+				dirty->fb_x = (s32) clips_ptr->x1;
+				dirty->fb_y = (s32) clips_ptr->y1;
+				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+					crtc_x;
+				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+					crtc_y;
+			} else {
+				dirty->fb_x = vclips_ptr->x;
+				dirty->fb_y = vclips_ptr->y;
+				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+					dest_x - crtc_x;
+				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+					dest_y - crtc_y;
+			}
+
+			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+			/* Skip this clip if it's outside the crtc region */
+			if (dirty->unit_x1 >= crtc_width ||
+			    dirty->unit_y1 >= crtc_height ||
+			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+				continue;
+
+			/* Clip right and bottom to crtc limits */
+			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+					       crtc_width);
+			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+					       crtc_height);
+
+			/* Clip left and top to crtc limits */
+			clip_left = min_t(s32, dirty->unit_x1, 0);
+			clip_top = min_t(s32, dirty->unit_y1, 0);
+			dirty->unit_x1 -= clip_left;
+			dirty->unit_y1 -= clip_top;
+			dirty->fb_x -= clip_left;
+			dirty->fb_y -= clip_top;
+
+			dirty->clip(dirty);
+		}
+
+		dirty->fifo_commit(dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
+ * command submission.
+ *
+ * @dev_priv. Pointer to a device private structure.
+ * @buf: The buffer object
+ * @interruptible: Whether to perform waits as interruptible.
+ * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
+ * The buffer will be validated as a GMR. Already pinned buffers will not be
+ * validated.
+ *
+ * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible,
+				  bool validate_as_mob)
+{
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+
+	ttm_bo_reserve(bo, false, false, interruptible, NULL);
+	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+					 validate_as_mob);
+	if (ret)
+		ttm_bo_unreserve(bo);
+
+	return ret;
+}
+
+/**
+ * vmw_kms_helper_buffer_revert - Undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ *
+ * @res: Pointer to the buffer object.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ */
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+{
+	if (buf)
+		ttm_bo_unreserve(&buf->base);
+}
+
+/**
+ * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
+ * kms command submission.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @file_priv: Pointer to a struct drm_file representing the caller's
+ * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
+ * if non-NULL, @user_fence_rep must be non-NULL.
+ * @buf: The buffer object.
+ * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ * @user_fence_rep: Optional pointer to a user-space provided struct
+ * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
+ * function copies fence data to user-space in a fail-safe manner.
+ */
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+				  struct drm_file *file_priv,
+				  struct vmw_dma_buffer *buf,
+				  struct vmw_fence_obj **out_fence,
+				  struct drm_vmw_fence_rep __user *
+				  user_fence_rep)
+{
+	struct vmw_fence_obj *fence;
+	uint32_t handle;
+	int ret;
+
+	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+					 file_priv ? &handle : NULL);
+	if (buf)
+		vmw_fence_single_bo(&buf->base, fence);
+	if (file_priv)
+		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+					    ret, user_fence_rep, fence,
+					    handle);
+	if (out_fence)
+		*out_fence = fence;
+	else
+		vmw_fence_obj_unreference(&fence);
+
+	vmw_kms_helper_buffer_revert(buf);
+}
+
+
+/**
+ * vmw_kms_helper_resource_revert - Undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+{
+	vmw_kms_helper_buffer_revert(res->backup);
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
+ * command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @interruptible: Whether to perform waits as interruptible.
+ *
+ * Reserves and validates also the backup buffer if a guest-backed resource.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+				    bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible)
+		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+	else
+		mutex_lock(&res->dev_priv->cmdbuf_mutex);
+
+	if (unlikely(ret != 0))
+		return -ERESTARTSYS;
+
+	ret = vmw_resource_reserve(res, interruptible, false);
+	if (ret)
+		goto out_unlock;
+
+	if (res->backup) {
+		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
+						    interruptible,
+						    res->dev_priv->has_mob);
+		if (ret)
+			goto out_unreserve;
+	}
+	ret = vmw_resource_validate(res);
+	if (ret)
+		goto out_revert;
+	return 0;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(res->backup);
+out_unreserve:
+	vmw_resource_unreserve(res, false, NULL, 0);
+out_unlock:
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+	return ret;
+}
+
+/**
+ * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
+ * kms command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+			     struct vmw_fence_obj **out_fence)
+{
+	if (res->backup || out_fence)
+		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+					     out_fence, NULL);
+
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_update_proxy - Helper function to update a proxy surface from
+ * its backing MOB.
+ *
+ * @res: Pointer to the surface resource
+ * @clips: Clip rects in framebuffer (surface) space.
+ * @num_clips: Number of clips in @clips.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ *
+ * This function makes sure the proxy surface is updated from its backing MOB
+ * using the region given by @clips. The surface resource @res and its backing
+ * MOB needs to be reserved and validated on call.
+ */
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBImage body;
+	} *cmd;
+	SVGA3dBox *box;
+	size_t copy_size = 0;
+	int i;
+
+	if (!clips)
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+	if (!cmd) {
+		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
+			  "update.\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+		box = &cmd->body.box;
+
+		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.image.sid = res->id;
+		cmd->body.image.face = 0;
+		cmd->body.image.mipmap = 0;
+
+		if (clips->x1 > size->width || clips->x2 > size->width ||
+		    clips->y1 > size->height || clips->y2 > size->height) {
+			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+			return -EINVAL;
+		}
+
+		box->x = clips->x1;
+		box->y = clips->y1;
+		box->z = 0;
+		box->w = clips->x2 - clips->x1;
+		box->h = clips->y2 - clips->y1;
+		box->d = 1;
+
+		copy_size += sizeof(*cmd);
+	}
+
+	vmw_fifo_commit(dev_priv, copy_size);
+
+	return 0;
+}
+
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode)
+{
+	struct drm_connector *con;
+	struct vmw_display_unit *du;
+	struct drm_display_mode *mode;
+	int i = 0;
+
+	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+			    head) {
+		if (i == unit)
+			break;
+
+		++i;
+	}
+
+	if (i != unit) {
+		DRM_ERROR("Could not find initial display unit.\n");
+		return -EINVAL;
+	}
+
+	if (list_empty(&con->modes))
+		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
+
+	if (list_empty(&con->modes)) {
+		DRM_ERROR("Could not find initial display mode.\n");
+		return -EINVAL;
+	}
+
+	du = vmw_connector_to_du(con);
+	*p_con = con;
+	*p_crtc = &du->crtc;
+
+	list_for_each_entry(mode, &con->modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+			break;
+	}
+
+	if (mode->type & DRM_MODE_TYPE_PREFERRED)
+		*p_mode = mode;
+	else {
+		WARN_ONCE(true, "Could not find initial preferred mode.\n");
+		*p_mode = list_first_entry(&con->modes,
+					   struct drm_display_mode,
+					   head);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8d038c3..782df7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,11 +32,60 @@
 #include <drm/drm_crtc_helper.h>
 #include "vmwgfx_drv.h"
 
+/**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+struct vmw_kms_dirty {
+	void (*fifo_commit)(struct vmw_kms_dirty *);
+	void (*clip)(struct vmw_kms_dirty *);
+	size_t fifo_reserve_size;
+	struct vmw_private *dev_priv;
+	struct vmw_display_unit *unit;
+	void *cmd;
+	u32 num_hits;
+	s32 fb_x;
+	s32 fb_y;
+	s32 unit_x1;
+	s32 unit_y1;
+	s32 unit_x2;
+	s32 unit_y2;
+};
+
 #define VMWGFX_NUM_DISPLAY_UNITS 8
 
 
 #define vmw_framebuffer_to_vfb(x) \
 	container_of(x, struct vmw_framebuffer, base)
+#define vmw_framebuffer_to_vfbs(x) \
+	container_of(x, struct vmw_framebuffer_surface, base.base)
+#define vmw_framebuffer_to_vfbd(x) \
+	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 
 /**
  * Base class for framebuffers
@@ -53,9 +102,27 @@
 	uint32_t user_handle;
 };
 
+/*
+ * Clip rectangle
+ */
+struct vmw_clip_rect {
+	int x1, x2, y1, y2;
+};
 
-#define vmw_crtc_to_du(x) \
-	container_of(x, struct vmw_display_unit, crtc)
+struct vmw_framebuffer_surface {
+	struct vmw_framebuffer base;
+	struct vmw_surface *surface;
+	struct vmw_dma_buffer *buffer;
+	struct list_head head;
+	bool is_dmabuf_proxy;  /* true if this is proxy surface for DMA buf */
+};
+
+
+struct vmw_framebuffer_dmabuf {
+	struct vmw_framebuffer base;
+	struct vmw_dma_buffer *buffer;
+};
+
 
 /*
  * Basic cursor manipulation
@@ -120,11 +187,7 @@
 /*
  * Shared display unit functions - vmwgfx_kms.c
  */
-void vmw_display_unit_cleanup(struct vmw_display_unit *du);
-int vmw_du_page_flip(struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb,
-		     struct drm_pending_vblank_event *event,
-		     uint32_t page_flip_flags);
+void vmw_du_cleanup(struct vmw_display_unit *du);
 void vmw_du_crtc_save(struct drm_crtc *crtc);
 void vmw_du_crtc_restore(struct drm_crtc *crtc);
 void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -133,7 +196,7 @@
 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 			   uint32_t handle, uint32_t width, uint32_t height);
 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
 void vmw_du_connector_save(struct drm_connector *connector);
 void vmw_du_connector_restore(struct drm_connector *connector);
 enum drm_connector_status
@@ -143,25 +206,118 @@
 int vmw_du_connector_set_property(struct drm_connector *connector,
 				  struct drm_property *property,
 				  uint64_t val);
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty);
 
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible,
+				  bool validate_as_mob);
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+				  struct drm_file *file_priv,
+				  struct vmw_dma_buffer *buf,
+				  struct vmw_fence_obj **out_fence,
+				  struct drm_vmw_fence_rep __user *
+				  user_fence_rep);
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+				    bool interruptible);
+void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+				    struct vmw_fence_obj **out_fence);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips);
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_dma_buffer *dmabuf,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd *mode_cmd);
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode);
+void vmw_guess_mode_timing(struct drm_display_mode *mode);
 
 /*
  * Legacy display unit functions - vmwgfx_ldu.c
  */
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment);
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment);
 
 /*
  * Screen Objects display functions - vmwgfx_scrn.c
  */
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
-			      struct drm_vmw_rect *rects);
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
-				     struct drm_crtc *crtc);
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
-					      struct drm_crtc *crtc);
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment,
+				bool interruptible,
+				struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips);
+
+/*
+ * Screen Target Display Unit functions - vmwgfx_stdu.c
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence);
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible);
 
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5c289f7..bb63e4d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +57,7 @@
 static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
 {
 	list_del_init(&ldu->active);
-	vmw_display_unit_cleanup(&ldu->base);
+	vmw_du_cleanup(&ldu->base);
 	kfree(ldu);
 }
 
@@ -279,7 +279,7 @@
 		return -EINVAL;
 	}
 
-	vmw_fb_off(dev_priv);
+	vmw_svga_enable(dev_priv);
 
 	crtc->primary->fb = fb;
 	encoder->crtc = crtc;
@@ -385,7 +385,7 @@
 	return 0;
 }
 
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 	int i, ret;
@@ -422,6 +422,10 @@
 	else
 		vmw_ldu_init(dev_priv, 0);
 
+	dev_priv->active_display_unit = vmw_du_legacy;
+
+	DRM_INFO("Legacy Display Unit initialized\n");
+
 	return 0;
 
 err_vblank_cleanup:
@@ -432,7 +436,7 @@
 	return ret;
 }
 
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 
@@ -447,3 +451,38 @@
 
 	return 0;
 }
+
+
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment)
+{
+	size_t fifo_size;
+	int i;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdUpdate body;
+	} *cmd;
+
+	fifo_size = sizeof(*cmd) * num_clips;
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	for (i = 0; i < num_clips; i++, clips += increment) {
+		cmd[i].header = SVGA_CMD_UPDATE;
+		cmd[i].body.x = clips->x1;
+		cmd[i].body.y = clips->y1;
+		cmd[i].body.width = clips->x2 - clips->x1;
+		cmd[i].body.height = clips->y2 - clips->y1;
+	}
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 04a64b8..23db160 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,7 +31,7 @@
  * If we set up the screen target otable, screen objects stop working.
  */
 
-#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
 
 #ifdef CONFIG_64BIT
 #define VMW_PPN_SIZE 8
@@ -67,9 +67,23 @@
  * @size:           Size of the table (page-aligned).
  * @page_table:     Pointer to a struct vmw_mob holding the page table.
  */
-struct vmw_otable {
-	unsigned long size;
-	struct vmw_mob *page_table;
+static const struct vmw_otable pre_dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+};
+
+static const struct vmw_otable dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
 };
 
 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@
  */
 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 				 SVGAOTableType type,
+				 struct ttm_buffer_object *otable_bo,
 				 unsigned long offset,
 				 struct vmw_otable *otable)
 {
@@ -106,7 +121,7 @@
 
 	BUG_ON(otable->page_table != NULL);
 
-	vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+	vsgt = vmw_bo_sg_table(otable_bo);
 	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
 	WARN_ON(!vmw_piter_next(&iter));
 
@@ -142,7 +157,7 @@
 	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.type = type;
-	cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
 	cmd->body.sizeInBytes = otable->size;
 	cmd->body.validSizeInBytes = 0;
 	cmd->body.ptDepth = mob->pt_level;
@@ -191,18 +206,19 @@
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for OTable "
 			  "takedown.\n");
-	} else {
-		memset(cmd, 0, sizeof(*cmd));
-		cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
-		cmd->header.size = sizeof(cmd->body);
-		cmd->body.type = type;
-		cmd->body.baseAddress = 0;
-		cmd->body.sizeInBytes = 0;
-		cmd->body.validSizeInBytes = 0;
-		cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
-		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+		return;
 	}
 
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = type;
+	cmd->body.baseAddress = 0;
+	cmd->body.sizeInBytes = 0;
+	cmd->body.validSizeInBytes = 0;
+	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
 	if (bo) {
 		int ret;
 
@@ -217,47 +233,21 @@
 	otable->page_table = NULL;
 }
 
-/*
- * vmw_otables_setup - Set up guest backed memory object tables
- *
- * @dev_priv:       Pointer to a device private structure
- *
- * Takes care of the device guest backed surface
- * initialization, by setting up the guest backed memory object tables.
- * Returns 0 on success and various error codes on failure. A succesful return
- * means the object tables can be taken down using the vmw_otables_takedown
- * function.
- */
-int vmw_otables_setup(struct vmw_private *dev_priv)
+
+static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
+				  struct vmw_otable_batch *batch)
 {
 	unsigned long offset;
 	unsigned long bo_size;
-	struct vmw_otable *otables;
+	struct vmw_otable *otables = batch->otables;
 	SVGAOTableType i;
 	int ret;
 
-	otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
-			  GFP_KERNEL);
-	if (unlikely(otables == NULL)) {
-		DRM_ERROR("Failed to allocate space for otable "
-			  "metadata.\n");
-		return -ENOMEM;
-	}
-
-	otables[SVGA_OTABLE_MOB].size =
-		VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SURFACE].size =
-		VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
-	otables[SVGA_OTABLE_CONTEXT].size =
-		VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SHADER].size =
-		VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SCREEN_TARGET].size =
-		VMWGFX_NUM_GB_SCREEN_TARGET *
-		SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
-
 	bo_size = 0;
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!otables[i].enabled)
+			continue;
+
 		otables[i].size =
 			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
 		bo_size += otables[i].size;
@@ -267,46 +257,114 @@
 			    ttm_bo_type_device,
 			    &vmw_sys_ne_placement,
 			    0, false, NULL,
-			    &dev_priv->otable_bo);
+			    &batch->otable_bo);
 
 	if (unlikely(ret != 0))
 		goto out_no_bo;
 
-	ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
 	BUG_ON(ret != 0);
-	ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+	ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
 	if (unlikely(ret != 0))
 		goto out_unreserve;
-	ret = vmw_bo_map_dma(dev_priv->otable_bo);
+	ret = vmw_bo_map_dma(batch->otable_bo);
 	if (unlikely(ret != 0))
 		goto out_unreserve;
 
-	ttm_bo_unreserve(dev_priv->otable_bo);
+	ttm_bo_unreserve(batch->otable_bo);
 
 	offset = 0;
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
-		ret = vmw_setup_otable_base(dev_priv, i, offset,
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!batch->otables[i].enabled)
+			continue;
+
+		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+					    offset,
 					    &otables[i]);
 		if (unlikely(ret != 0))
 			goto out_no_setup;
 		offset += otables[i].size;
 	}
 
-	dev_priv->otables = otables;
 	return 0;
 
 out_unreserve:
-	ttm_bo_unreserve(dev_priv->otable_bo);
+	ttm_bo_unreserve(batch->otable_bo);
 out_no_setup:
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-		vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
+	}
 
-	ttm_bo_unref(&dev_priv->otable_bo);
+	ttm_bo_unref(&batch->otable_bo);
 out_no_bo:
-	kfree(otables);
 	return ret;
 }
 
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A successful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
+	int ret;
+
+	if (dev_priv->has_dx) {
+		*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
+		if (*otables == NULL)
+			return -ENOMEM;
+
+		memcpy(*otables, dx_tables, sizeof(dx_tables));
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
+	} else {
+		*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
+		if (*otables == NULL)
+			return -ENOMEM;
+
+		memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
+	}
+
+	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
+	if (unlikely(ret != 0))
+		goto out_setup;
+
+	return 0;
+
+out_setup:
+	kfree(*otables);
+	return ret;
+}
+
+static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+			       struct vmw_otable_batch *batch)
+{
+	SVGAOTableType i;
+	struct ttm_buffer_object *bo = batch->otable_bo;
+	int ret;
+
+	for (i = 0; i < batch->num_otables; ++i)
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
+
+	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	BUG_ON(ret != 0);
+
+	vmw_fence_single_bo(bo, NULL);
+	ttm_bo_unreserve(bo);
+
+	ttm_bo_unref(&batch->otable_bo);
+}
 
 /*
  * vmw_otables_takedown - Take down guest backed memory object tables
@@ -317,26 +375,10 @@
  */
 void vmw_otables_takedown(struct vmw_private *dev_priv)
 {
-	SVGAOTableType i;
-	struct ttm_buffer_object *bo = dev_priv->otable_bo;
-	int ret;
-
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-		vmw_takedown_otable_base(dev_priv, i,
-					 &dev_priv->otables[i]);
-
-	ret = ttm_bo_reserve(bo, false, true, false, NULL);
-	BUG_ON(ret != 0);
-
-	vmw_fence_single_bo(bo, NULL);
-	ttm_bo_unreserve(bo);
-
-	ttm_bo_unref(&dev_priv->otable_bo);
-	kfree(dev_priv->otables);
-	dev_priv->otables = NULL;
+	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
+	kfree(dev_priv->otable_batch.otables);
 }
 
-
 /*
  * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
  * needed for a guest backed memory object.
@@ -409,7 +451,7 @@
 		goto out_unreserve;
 
 	ttm_bo_unreserve(mob->pt_bo);
-	
+
 	return 0;
 
 out_unreserve:
@@ -429,15 +471,15 @@
  * *@addr according to the page table entry size.
  */
 #if (VMW_PPN_SIZE == 8)
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
 {
-	*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+	*((u64 *) *addr) = val >> PAGE_SHIFT;
 	*addr += 2;
 }
 #else
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
 {
-	*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+	*(*addr)++ = val >> PAGE_SHIFT;
 }
 #endif
 
@@ -459,7 +501,7 @@
 	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
 	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
 	unsigned long pt_page;
-	__le32 *addr, *save_addr;
+	u32 *addr, *save_addr;
 	unsigned long i;
 	struct page *page;
 
@@ -574,7 +616,7 @@
 		vmw_fence_single_bo(bo, NULL);
 		ttm_bo_unreserve(bo);
 	}
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 /*
@@ -627,7 +669,7 @@
 		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
 	}
 
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -640,7 +682,7 @@
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.mobid = mob_id;
 	cmd->body.ptDepth = mob->pt_level;
-	cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
 	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -648,7 +690,7 @@
 	return 0;
 
 out_no_cmd_space:
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 	if (pt_set_up)
 		ttm_bo_unref(&mob->pt_bo);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f6..76069f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,8 +31,8 @@
 
 #include <drm/ttm/ttm_placement.h>
 
-#include "svga_overlay.h"
-#include "svga_escape.h"
+#include "device_include/svga_overlay.h"
+#include "device_include/svga_escape.h"
 
 #define VMW_MAX_NUM_STREAMS 1
 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
@@ -100,7 +100,7 @@
 {
 	struct vmw_escape_video_flush *flush;
 	size_t fifo_size;
-	bool have_so = dev_priv->sou_priv ? true : false;
+	bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
 	int i, num_items;
 	SVGAGuestPtr ptr;
 
@@ -231,10 +231,10 @@
 	if (!pin)
 		return vmw_dmabuf_unpin(dev_priv, buf, inter);
 
-	if (!dev_priv->sou_priv)
-		return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
 
-	return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+	return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
 }
 
 /**
@@ -453,7 +453,7 @@
 
 static bool vmw_overlay_available(const struct vmw_private *dev_priv)
 {
-	return (dev_priv->overlay_priv != NULL && 
+	return (dev_priv->overlay_priv != NULL &&
 		((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
 		 VMW_OVERLAY_CAP_MASK));
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a..dce7980 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,19 +39,17 @@
 #define VMWGFX_IRQSTATUS_PORT 0x8
 
 struct svga_guest_mem_descriptor {
-	__le32 ppn;
-	__le32 num_pages;
+	u32 ppn;
+	u32 num_pages;
 };
 
 struct svga_fifo_cmd_fence {
-	__le32 fence;
+	u32 fence;
 };
 
 #define SVGA_SYNC_GENERIC         1
 #define SVGA_SYNC_FIFOFULL        2
 
-#include "svga_types.h"
-
-#include "svga3d_reg.h"
+#include "device_include/svga3d_reg.h"
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15..c1912f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,7 @@
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
@@ -121,6 +122,7 @@
 	int id;
 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
+	write_lock(&dev_priv->resource_lock);
 	res->avail = false;
 	list_del_init(&res->lru_head);
 	write_unlock(&dev_priv->resource_lock);
@@ -143,10 +145,10 @@
 	}
 
 	if (likely(res->hw_destroy != NULL)) {
-		res->hw_destroy(res);
 		mutex_lock(&dev_priv->binding_mutex);
-		vmw_context_binding_res_list_kill(&res->binding_head);
+		vmw_binding_res_list_kill(&res->binding_head);
 		mutex_unlock(&dev_priv->binding_mutex);
+		res->hw_destroy(res);
 	}
 
 	id = res->id;
@@ -156,20 +158,17 @@
 		kfree(res);
 
 	write_lock(&dev_priv->resource_lock);
-
 	if (id != -1)
 		idr_remove(idr, id);
+	write_unlock(&dev_priv->resource_lock);
 }
 
 void vmw_resource_unreference(struct vmw_resource **p_res)
 {
 	struct vmw_resource *res = *p_res;
-	struct vmw_private *dev_priv = res->dev_priv;
 
 	*p_res = NULL;
-	write_lock(&dev_priv->resource_lock);
 	kref_put(&res->kref, vmw_resource_release);
-	write_unlock(&dev_priv->resource_lock);
 }
 
 
@@ -260,17 +259,16 @@
 	write_unlock(&dev_priv->resource_lock);
 }
 
-struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
-					 struct idr *idr, int id)
+static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+						struct idr *idr, int id)
 {
 	struct vmw_resource *res;
 
 	read_lock(&dev_priv->resource_lock);
 	res = idr_find(idr, id);
-	if (res && res->avail)
-		kref_get(&res->kref);
-	else
+	if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
 		res = NULL;
+
 	read_unlock(&dev_priv->resource_lock);
 
 	if (unlikely(res == NULL))
@@ -900,20 +898,21 @@
 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 				   vmw_user_stream_size,
 				   false, true);
+	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0)) {
 		if (ret != -ERESTARTSYS)
 			DRM_ERROR("Out of graphics memory for stream"
 				  " creation.\n");
-		goto out_unlock;
-	}
 
+		goto out_ret;
+	}
 
 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 	if (unlikely(stream == NULL)) {
 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 				    vmw_user_stream_size);
 		ret = -ENOMEM;
-		goto out_unlock;
+		goto out_ret;
 	}
 
 	res = &stream->stream.res;
@@ -926,7 +925,7 @@
 
 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
 	if (unlikely(ret != 0))
-		goto out_unlock;
+		goto out_ret;
 
 	tmp = vmw_resource_reference(res);
 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -940,8 +939,7 @@
 	arg->stream_id = res->id;
 out_err:
 	vmw_resource_unreference(&res);
-out_unlock:
-	ttm_read_unlock(&dev_priv->reservation_sem);
+out_ret:
 	return ret;
 }
 
@@ -1152,14 +1150,16 @@
  * command submission.
  *
  * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @switch_backup:     Backup buffer has been switched.
  * @new_backup:        Pointer to new backup buffer if command submission
- *                     switched.
- * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *                     switched. May be NULL.
+ * @new_backup_offset: New backup offset if @switch_backup is true.
  *
  * Currently unreserving a resource means putting it back on the device's
  * resource lru list, so that it can be evicted if necessary.
  */
 void vmw_resource_unreserve(struct vmw_resource *res,
+			    bool switch_backup,
 			    struct vmw_dma_buffer *new_backup,
 			    unsigned long new_backup_offset)
 {
@@ -1168,22 +1168,25 @@
 	if (!list_empty(&res->lru_head))
 		return;
 
-	if (new_backup && new_backup != res->backup) {
-
+	if (switch_backup && new_backup != res->backup) {
 		if (res->backup) {
 			lockdep_assert_held(&res->backup->base.resv->lock.base);
 			list_del_init(&res->mob_head);
 			vmw_dmabuf_unreference(&res->backup);
 		}
 
-		res->backup = vmw_dmabuf_reference(new_backup);
-		lockdep_assert_held(&new_backup->base.resv->lock.base);
-		list_add_tail(&res->mob_head, &new_backup->res_list);
+		if (new_backup) {
+			res->backup = vmw_dmabuf_reference(new_backup);
+			lockdep_assert_held(&new_backup->base.resv->lock.base);
+			list_add_tail(&res->mob_head, &new_backup->res_list);
+		} else {
+			res->backup = NULL;
+		}
 	}
-	if (new_backup)
+	if (switch_backup)
 		res->backup_offset = new_backup_offset;
 
-	if (!res->func->may_evict || res->id == -1)
+	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 		return;
 
 	write_lock(&dev_priv->resource_lock);
@@ -1259,7 +1262,8 @@
  * the buffer may not be bound to the resource at this point.
  *
  */
-int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+			 bool no_backup)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 	int ret;
@@ -1270,9 +1274,13 @@
 
 	if (res->func->needs_backup && res->backup == NULL &&
 	    !no_backup) {
-		ret = vmw_resource_buf_alloc(res, true);
-		if (unlikely(ret != 0))
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to allocate a backup buffer "
+				  "of size %lu. bytes\n",
+				  (unsigned long) res->backup_size);
 			return ret;
+		}
 	}
 
 	return 0;
@@ -1305,7 +1313,7 @@
  * @res:            The resource to evict.
  * @interruptible:  Whether to wait interruptible.
  */
-int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 {
 	struct ttm_validate_buffer val_buf;
 	const struct vmw_res_func *func = res->func;
@@ -1356,7 +1364,7 @@
 	struct ttm_validate_buffer val_buf;
 	unsigned err_count = 0;
 
-	if (likely(!res->func->may_evict))
+	if (!res->func->create)
 		return 0;
 
 	val_buf.bo = NULL;
@@ -1443,9 +1451,9 @@
 /**
  * vmw_resource_move_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to move.
- * @mem:            The truct ttm_mem_reg indicating to what memory
- *                  region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
  *
  * Evicts the Guest Backed hardware resource if the backup
  * buffer is being moved out of MOB memory.
@@ -1495,6 +1503,101 @@
 	}
 }
 
+
+
+/**
+ * vmw_query_readback_all - Read back cached query states
+ *
+ * @dx_query_mob: Buffer containing the DX query MOB
+ *
+ * Read back cached states from the device if they exist.  This function
+ * assumings binding_mutex is held.
+ */
+int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+{
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_private *dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackAllQuery body;
+	} *cmd;
+
+
+	/* No query bound, so do nothing */
+	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
+		return 0;
+
+	dx_query_ctx = dx_query_mob->dx_query_ctx;
+	dev_priv     = dx_query_ctx->dev_priv;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for "
+			  "query MOB read back.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid    = dx_query_ctx->id;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Triggers a rebind the next time affected context is bound */
+	dx_query_mob->dx_query_ctx = NULL;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_query_move_notify - Read back cached query states
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The memory region @bo is moving to.
+ *
+ * Called before the query MOB is swapped out to read back cached query
+ * states from the device.
+ */
+void vmw_query_move_notify(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *mem)
+{
+	struct vmw_dma_buffer *dx_query_mob;
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct vmw_private *dev_priv;
+
+
+	dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	mutex_lock(&dev_priv->binding_mutex);
+
+	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return;
+	}
+
+	/* If BO is being moved from MOB to system memory */
+	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
+		struct vmw_fence_obj *fence;
+
+		(void) vmw_query_readback_all(dx_query_mob);
+		mutex_unlock(&dev_priv->binding_mutex);
+
+		/* Create a fence and attach the BO to it */
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		vmw_fence_single_bo(bo, fence);
+
+		if (fence != NULL)
+			vmw_fence_obj_unreference(&fence);
+
+		(void) ttm_bo_wait(bo, false, false, false);
+	} else
+		mutex_unlock(&dev_priv->binding_mutex);
+
+}
+
 /**
  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
  *
@@ -1573,3 +1676,107 @@
 
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
+
+/**
+ * vmw_resource_pin - Add a pin reference on a resource
+ *
+ * @res: The resource to add a pin reference on
+ *
+ * This function adds a pin reference, and if needed validates the resource.
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ * This function returns 0 on success and a negative error code on failure.
+ */
+int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	ret = vmw_resource_reserve(res, interruptible, false);
+	if (ret)
+		goto out_no_reserve;
+
+	if (res->pin_count == 0) {
+		struct vmw_dma_buffer *vbo = NULL;
+
+		if (res->backup) {
+			vbo = res->backup;
+
+			ttm_bo_reserve(&vbo->base, interruptible, false, false,
+				       NULL);
+			if (!vbo->pin_count) {
+				ret = ttm_bo_validate
+					(&vbo->base,
+					 res->func->backup_placement,
+					 interruptible, false);
+				if (ret) {
+					ttm_bo_unreserve(&vbo->base);
+					goto out_no_validate;
+				}
+			}
+
+			/* Do we really need to pin the MOB as well? */
+			vmw_bo_pin_reserved(vbo, true);
+		}
+		ret = vmw_resource_validate(res);
+		if (vbo)
+			ttm_bo_unreserve(&vbo->base);
+		if (ret)
+			goto out_no_validate;
+	}
+	res->pin_count++;
+
+out_no_validate:
+	vmw_resource_unreserve(res, false, NULL, 0UL);
+out_no_reserve:
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unpin - Remove a pin reference from a resource
+ *
+ * @res: The resource to remove a pin reference from
+ *
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ */
+void vmw_resource_unpin(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	ttm_read_lock(&dev_priv->reservation_sem, false);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	ret = vmw_resource_reserve(res, false, true);
+	WARN_ON(ret);
+
+	WARN_ON(res->pin_count == 0);
+	if (--res->pin_count == 0 && res->backup) {
+		struct vmw_dma_buffer *vbo = res->backup;
+
+		ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+		vmw_bo_pin_reserved(vbo, false);
+		ttm_bo_unreserve(&vbo->base);
+	}
+
+	vmw_resource_unreserve(res, false, NULL, 0UL);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * vmw_res_type - Return the resource type
+ *
+ * @res: Pointer to the resource
+ */
+enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
+{
+	return res->func->res_type;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index f3adeed..5994ef6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,12 @@
 
 #include "vmwgfx_drv.h"
 
+enum vmw_cmdbuf_res_state {
+	VMW_CMDBUF_RES_COMMITTED,
+	VMW_CMDBUF_RES_ADD,
+	VMW_CMDBUF_RES_DEL
+};
+
 /**
  * struct vmw_user_resource_conv - Identify a derived user-exported resource
  * type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@
  * @bind:              Bind a hardware resource to persistent buffer storage.
  * @unbind:            Unbind a hardware resource from persistent
  *                     buffer storage.
+ * @commit_notify:     If the resource is a command buffer managed resource,
+ *                     callback to notify that a define or remove command
+ *                     has been committed to the device.
  */
-
 struct vmw_res_func {
 	enum vmw_res_type res_type;
 	bool needs_backup;
@@ -71,6 +79,8 @@
 	int (*unbind) (struct vmw_resource *res,
 		       bool readback,
 		       struct ttm_validate_buffer *val_buf);
+	void (*commit_notify)(struct vmw_resource *res,
+			      enum vmw_cmdbuf_res_state state);
 };
 
 int vmw_resource_alloc_id(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 7dc591d..b96d1ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,10 +36,55 @@
 #define vmw_connector_to_sou(x) \
 	container_of(x, struct vmw_screen_object_unit, base.connector)
 
+/**
+ * struct vmw_kms_sou_surface_dirty - Closure structure for
+ * blit surface to screen command.
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @dst_x: Difference between source clip rects and framebuffer coordinates.
+ * @dst_y: Difference between source clip rects and framebuffer coordinates.
+ * @sid: Surface id of surface to copy from.
+ */
+struct vmw_kms_sou_surface_dirty {
+	struct vmw_kms_dirty base;
+	s32 left, right, top, bottom;
+	s32 dst_x, dst_y;
+	u32 sid;
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_kms_sou_readback_blit {
+	uint32 header;
+	SVGAFifoCmdBlitScreenToGMRFB body;
+};
+
+struct vmw_kms_sou_dmabuf_blit {
+	uint32 header;
+	SVGAFifoCmdBlitGMRFBToScreen body;
+};
+
+struct vmw_kms_sou_dirty_cmd {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdBlitSurfaceToScreen body;
+};
+
+
+/*
+ * Other structs.
+ */
+
 struct vmw_screen_object_display {
 	unsigned num_implicit;
 
 	struct vmw_framebuffer *implicit_fb;
+	SVGAFifoCmdDefineGMRFB cur;
+	struct vmw_dma_buffer *pinned_gmrfb;
 };
 
 /**
@@ -57,7 +102,7 @@
 
 static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
 {
-	vmw_display_unit_cleanup(&sou->base);
+	vmw_du_cleanup(&sou->base);
 	kfree(sou);
 }
 
@@ -72,7 +117,7 @@
 }
 
 static void vmw_sou_del_active(struct vmw_private *vmw_priv,
-			      struct vmw_screen_object_unit *sou)
+			       struct vmw_screen_object_unit *sou)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
@@ -84,8 +129,8 @@
 }
 
 static void vmw_sou_add_active(struct vmw_private *vmw_priv,
-			      struct vmw_screen_object_unit *sou,
-			      struct vmw_framebuffer *vfb)
+			       struct vmw_screen_object_unit *sou,
+			       struct vmw_framebuffer *vfb)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
@@ -202,14 +247,7 @@
 static void vmw_sou_backing_free(struct vmw_private *dev_priv,
 				 struct vmw_screen_object_unit *sou)
 {
-	struct ttm_buffer_object *bo;
-
-	if (unlikely(sou->buffer == NULL))
-		return;
-
-	bo = &sou->buffer->base;
-	ttm_bo_unref(&bo);
-	sou->buffer = NULL;
+	vmw_dmabuf_unreference(&sou->buffer);
 	sou->buffer_size = 0;
 }
 
@@ -274,13 +312,13 @@
 	dev_priv = vmw_priv(crtc->dev);
 
 	if (set->num_connectors > 1) {
-		DRM_ERROR("to many connectors\n");
+		DRM_ERROR("Too many connectors\n");
 		return -EINVAL;
 	}
 
 	if (set->num_connectors == 1 &&
 	    set->connectors[0] != &sou->base.connector) {
-		DRM_ERROR("connector doesn't match %p %p\n",
+		DRM_ERROR("Connector doesn't match %p %p\n",
 			set->connectors[0], &sou->base.connector);
 		return -EINVAL;
 	}
@@ -331,7 +369,7 @@
 		return -EINVAL;
 	}
 
-	vmw_fb_off(dev_priv);
+	vmw_svga_enable(dev_priv);
 
 	if (mode->hdisplay != crtc->mode.hdisplay ||
 	    mode->vdisplay != crtc->mode.vdisplay) {
@@ -390,6 +428,108 @@
 	return 0;
 }
 
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
+					    struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	if (!sou->base.is_implicit)
+		return true;
+
+	if (dev_priv->sou_priv->num_implicit != 1)
+		return false;
+
+	return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
+				       struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	BUG_ON(!sou->base.is_implicit);
+
+	dev_priv->sou_priv->implicit_fb =
+		vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+}
+
+static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  struct drm_pending_vblank_event *event,
+				  uint32_t flags)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct drm_framebuffer *old_fb = crtc->primary->fb;
+	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+	struct vmw_fence_obj *fence = NULL;
+	struct drm_clip_rect clips;
+	int ret;
+
+	/* require ScreenObject support for page flipping */
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+		return -EINVAL;
+
+	crtc->primary->fb = fb;
+
+	/* do a full screen dirty update */
+	clips.x1 = clips.y1 = 0;
+	clips.x2 = fb->width;
+	clips.y2 = fb->height;
+
+	if (vfb->dmabuf)
+		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
+						  &clips, 1, 1,
+						  true, &fence);
+	else
+		ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
+						   &clips, NULL, NULL,
+						   0, 0, 1, 1, &fence);
+
+
+	if (ret != 0)
+		goto out_no_fence;
+	if (!fence) {
+		ret = -EINVAL;
+		goto out_no_fence;
+	}
+
+	if (event) {
+		struct drm_file *file_priv = event->base.file_priv;
+
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   true);
+	}
+
+	/*
+	 * No need to hold on to this now. The only cleanup
+	 * we need to do if we fail is unref the fence.
+	 */
+	vmw_fence_obj_unreference(&fence);
+
+	if (vmw_crtc_to_du(crtc)->is_implicit)
+		vmw_sou_update_implicit_fb(dev_priv, crtc);
+
+	return ret;
+
+out_no_fence:
+	crtc->primary->fb = old_fb;
+	return ret;
+}
+
 static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
 	.save = vmw_du_crtc_save,
 	.restore = vmw_du_crtc_restore,
@@ -398,7 +538,7 @@
 	.gamma_set = vmw_du_crtc_gamma_set,
 	.destroy = vmw_sou_crtc_destroy,
 	.set_config = vmw_sou_crtc_set_config,
-	.page_flip = vmw_du_page_flip,
+	.page_flip = vmw_sou_crtc_page_flip,
 };
 
 /*
@@ -423,7 +563,7 @@
 	vmw_sou_destroy(vmw_connector_to_sou(connector));
 }
 
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+static struct drm_connector_funcs vmw_sou_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.save = vmw_du_connector_save,
 	.restore = vmw_du_connector_restore,
@@ -458,7 +598,7 @@
 	sou->base.pref_mode = NULL;
 	sou->base.is_implicit = true;
 
-	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+	drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
 			   DRM_MODE_CONNECTOR_VIRTUAL);
 	connector->status = vmw_du_connector_detect(connector, true);
 
@@ -481,7 +621,7 @@
 	return 0;
 }
 
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 	int i, ret;
@@ -516,7 +656,9 @@
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
 		vmw_sou_init(dev_priv, i);
 
-	DRM_INFO("Screen objects system initialized\n");
+	dev_priv->active_display_unit = vmw_du_screen_object;
+
+	DRM_INFO("Screen Objects Display Unit initialized\n");
 
 	return 0;
 
@@ -529,7 +671,7 @@
 	return ret;
 }
 
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 
@@ -543,35 +685,369 @@
 	return 0;
 }
 
-/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
- */
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
-				     struct drm_crtc *crtc)
+static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+				  struct vmw_framebuffer *framebuffer)
 {
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+	struct vmw_dma_buffer *buf =
+		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+			     base)->buffer;
+	int depth = framebuffer->base.depth;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
 
-	if (!sou->base.is_implicit)
-		return true;
+	/* Emulate RGBA support, contrary to svga_reg.h this is not
+	 * supported by hosts. This is only a problem if we are reading
+	 * this value later and expecting what we uploaded back.
+	 */
+	if (depth == 32)
+		depth = 24;
 
-	if (dev_priv->sou_priv->num_implicit != 1)
-		return false;
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (!cmd) {
+		DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+		return -ENOMEM;
+	}
 
-	return true;
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+	cmd->body.format.colorDepth = depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+	/* Buffer is reserved in vram or GMR */
+	vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
 }
 
 /**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
+ * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
+ * blit surface to screen command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in the command, and translates the cliprects
+ * to match the destination bounding box encoded.
  */
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
-					      struct drm_crtc *crtc)
+static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
 {
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
+	s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
+	size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+	int i;
 
-	BUG_ON(!sou->base.is_implicit);
+	cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+	cmd->header.size = sizeof(cmd->body) + region_size;
 
-	dev_priv->sou_priv->implicit_fb =
-		vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+	/*
+	 * Use the destination bounding box to specify destination - and
+	 * source bounding regions.
+	 */
+	cmd->body.destRect.left = sdirty->left;
+	cmd->body.destRect.right = sdirty->right;
+	cmd->body.destRect.top = sdirty->top;
+	cmd->body.destRect.bottom = sdirty->bottom;
+
+	cmd->body.srcRect.left = sdirty->left + trans_x;
+	cmd->body.srcRect.right = sdirty->right + trans_x;
+	cmd->body.srcRect.top = sdirty->top + trans_y;
+	cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
+
+	cmd->body.srcImage.sid = sdirty->sid;
+	cmd->body.destScreenId = dirty->unit->unit;
+
+	/* Blits are relative to the destination rect. Translate. */
+	for (i = 0; i < dirty->num_hits; ++i, ++blit) {
+		blit->left -= sdirty->left;
+		blit->right -= sdirty->left;
+		blit->top -= sdirty->top;
+		blit->bottom -= sdirty->top;
+	}
+
+	vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
+
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a SVGASignedRect cliprect and updates the bounding box of the
+ * BLIT_SURFACE_TO_SCREEN command.
+ */
+static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+
+	/* Destination rect. */
+	blit += dirty->num_hits;
+	blit->left = dirty->unit_x1;
+	blit->top = dirty->unit_y1;
+	blit->right = dirty->unit_x2;
+	blit->bottom = dirty->unit_y2;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_kms_sou_surface_dirty sdirty;
+	int ret;
+
+	if (!srf)
+		srf = &vfbs->surface->res;
+
+	ret = vmw_kms_helper_resource_prepare(srf, true);
+	if (ret)
+		return ret;
+
+	sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
+	sdirty.base.clip = vmw_sou_surface_clip;
+	sdirty.base.dev_priv = dev_priv;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
+	  sizeof(SVGASignedRect) * num_clips;
+
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+	sdirty.dst_x = dest_x;
+	sdirty.dst_y = dest_y;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+	vmw_kms_helper_resource_finish(srf, out_fence);
+
+	return ret;
+}
+
+/**
+ * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_dmabuf_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
+ */
+static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+	blit->body.destScreenId = dirty->unit->unit;
+	blit->body.srcOrigin.x = dirty->fb_x;
+	blit->body.srcOrigin.y = dirty->fb_y;
+	blit->body.destRect.left = dirty->unit_x1;
+	blit->body.destRect.top = dirty->unit_y1;
+	blit->body.destRect.right = dirty->unit_x2;
+	blit->body.destRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects.
+ * @num_clips: Number of clip rects in @clips.
+ * @increment: Increment to use when looping over @clips.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment,
+				bool interruptible,
+				struct vmw_fence_obj **out_fence)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+			     base)->buffer;
+	struct vmw_kms_dirty dirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+					    false);
+	if (ret)
+		return ret;
+
+	ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
+	dirty.clip = vmw_sou_dmabuf_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+				   0, 0, num_clips, increment, &dirty);
+	vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+
+	return ret;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(buf);
+
+	return ret;
+}
+
+
+/**
+ * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_readback_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_readback_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
+ */
+static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+	blit->body.srcScreenId = dirty->unit->unit;
+	blit->body.destOrigin.x = dirty->fb_x;
+	blit->body.destOrigin.y = dirty->fb_y;
+	blit->body.srcRect.left = dirty->unit_x1;
+	blit->body.srcRect.top = dirty->unit_y1;
+	blit->body.srcRect.right = dirty->unit_x2;
+	blit->body.srcRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_readback - Perform a readback from the screen object system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+	struct vmw_kms_dirty dirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
+	if (ret)
+		return ret;
+
+	ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.fifo_commit = vmw_sou_readback_fifo_commit;
+	dirty.clip = vmw_sou_readback_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
+				   0, 0, num_clips, 1, &dirty);
+	vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+				     user_fence_rep);
+
+	return ret;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(buf);
+
+	return ret;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a..bba1ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,12 +27,15 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_shader {
 	struct vmw_resource res;
 	SVGA3dShaderType type;
 	uint32_t size;
+	uint8_t num_input_sig;
+	uint8_t num_output_sig;
 };
 
 struct vmw_user_shader {
@@ -40,8 +43,18 @@
 	struct vmw_shader shader;
 };
 
+struct vmw_dx_shader {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	struct vmw_resource *cotable;
+	u32 id;
+	bool committed;
+	struct list_head cotable_head;
+};
+
 static uint64_t vmw_user_shader_size;
 static uint64_t vmw_shader_size;
+static size_t vmw_shader_dx_size;
 
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
@@ -55,6 +68,18 @@
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
+static int vmw_dx_shader_create(struct vmw_resource *res);
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state);
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
+static uint64_t vmw_user_shader_size;
+
 static const struct vmw_user_resource_conv user_shader_conv = {
 	.object_type = VMW_RES_SHADER,
 	.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@
 	.unbind = vmw_gb_shader_unbind
 };
 
+static const struct vmw_res_func vmw_dx_shader_func = {
+	.res_type = vmw_res_shader,
+	.needs_backup = true,
+	.may_evict = false,
+	.type_name = "dx shaders",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_shader_create,
+	/*
+	 * The destroy callback is only called with a committed resource on
+	 * context destroy, in which case we destroy the cotable anyway,
+	 * so there's no need to destroy DX shaders separately.
+	 */
+	.destroy = NULL,
+	.bind = vmw_dx_shader_bind,
+	.unbind = vmw_dx_shader_unbind,
+	.commit_notify = vmw_dx_shader_commit_notify,
+};
+
 /**
  * Shader management:
  */
@@ -87,25 +130,42 @@
 	return container_of(res, struct vmw_shader, res);
 }
 
+/**
+ * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
+ * struct vmw_dx_shader
+ *
+ * @res: Pointer to the struct vmw_resource.
+ */
+static inline struct vmw_dx_shader *
+vmw_res_to_dx_shader(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_dx_shader, res);
+}
+
 static void vmw_hw_shader_destroy(struct vmw_resource *res)
 {
-	(void) vmw_gb_shader_destroy(res);
+	if (likely(res->func->destroy))
+		(void) res->func->destroy(res);
+	else
+		res->id = -1;
 }
 
+
 static int vmw_gb_shader_init(struct vmw_private *dev_priv,
 			      struct vmw_resource *res,
 			      uint32_t size,
 			      uint64_t offset,
 			      SVGA3dShaderType type,
+			      uint8_t num_input_sig,
+			      uint8_t num_output_sig,
 			      struct vmw_dma_buffer *byte_code,
 			      void (*res_free) (struct vmw_resource *res))
 {
 	struct vmw_shader *shader = vmw_res_to_shader(res);
 	int ret;
 
-	ret = vmw_resource_init(dev_priv, res, true,
-				res_free, &vmw_gb_shader_func);
-
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				&vmw_gb_shader_func);
 
 	if (unlikely(ret != 0)) {
 		if (res_free)
@@ -122,11 +182,17 @@
 	}
 	shader->size = size;
 	shader->type = type;
+	shader->num_input_sig = num_input_sig;
+	shader->num_output_sig = num_output_sig;
 
 	vmw_resource_activate(res, vmw_hw_shader_destroy);
 	return 0;
 }
 
+/*
+ * GB shader code:
+ */
+
 static int vmw_gb_shader_create(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
@@ -165,7 +231,7 @@
 	cmd->body.type = shader->type;
 	cmd->body.sizeInBytes = shader->size;
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
 
@@ -259,7 +325,7 @@
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_scrub(&res->binding_head);
+	vmw_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -275,11 +341,326 @@
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
 
+/*
+ * DX shader code:
+ */
+
+/**
+ * vmw_dx_shader_commit_notify - Notify that a shader operation has been
+ * committed to hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the shader resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (state == VMW_CMDBUF_RES_ADD) {
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_cotable_add_resource(shader->cotable,
+					 &shader->cotable_head);
+		shader->committed = true;
+		res->id = shader->id;
+		mutex_unlock(&dev_priv->binding_mutex);
+	} else {
+		mutex_lock(&dev_priv->binding_mutex);
+		list_del_init(&shader->cotable_head);
+		shader->committed = false;
+		res->id = -1;
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+}
+
+/**
+ * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function reverts a scrub operation.
+ */
+static int vmw_dx_shader_unscrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (!list_empty(&shader->cotable_head) || !shader->committed)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+				  shader->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "scrubbing.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = shader->id;
+	cmd->body.mobid = res->backup->base.mem.start;
+	cmd->body.offsetInBytes = res->backup_offset;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_create - The DX shader create callback
+ *
+ * @res: The DX shader resource
+ *
+ * The create callback is called as part of resource validation and
+ * makes sure that we unscrub the shader if it's previously been scrubbed.
+ */
+static int vmw_dx_shader_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	int ret = 0;
+
+	WARN_ON_ONCE(!shader->committed);
+
+	if (!list_empty(&res->mob_head)) {
+		mutex_lock(&dev_priv->binding_mutex);
+		ret = vmw_dx_shader_unscrub(res);
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+
+	res->id = shader->id;
+	return ret;
+}
+
+/**
+ * vmw_dx_shader_bind - The DX shader bind callback
+ *
+ * @res: The DX shader resource
+ * @val_buf: Pointer to the validate buffer.
+ *
+ */
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_shader_unscrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function unbinds a MOB from the DX shader without requiring the
+ * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
+ * However, once the driver eventually decides to unbind the MOB, it doesn't
+ * need to access the context.
+ */
+static int vmw_dx_shader_scrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (list_empty(&shader->cotable_head))
+		return 0;
+
+	WARN_ON_ONCE(!shader->committed);
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "scrubbing.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = res->id;
+	cmd->body.mobid = SVGA3D_INVALID_ID;
+	cmd->body.offsetInBytes = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&shader->cotable_head);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_unbind - The dx shader unbind callback.
+ *
+ * @res: The shader resource
+ * @readback: Whether this is a readback unbind. Currently unused.
+ * @val_buf: MOB buffer information.
+ */
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				bool readback,
+				struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	ret = vmw_dx_shader_scrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	if (ret)
+		return ret;
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+	vmw_fence_single_bo(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
+ * DX shaders.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ *
+ * Scrubs all shader MOBs so that any subsequent shader unbind or shader
+ * destroy operation won't need to swap in the context.
+ */
+void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+				      struct list_head *list,
+				      bool readback)
+{
+	struct vmw_dx_shader *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, cotable_head) {
+		WARN_ON(vmw_dx_shader_scrub(&entry->res));
+		if (!readback)
+			entry->committed = false;
+	}
+}
+
+/**
+ * vmw_dx_shader_res_free - The DX shader free callback
+ *
+ * @res: The shader resource
+ *
+ * Frees the DX shader resource and updates memory accounting.
+ */
+static void vmw_dx_shader_res_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+
+	vmw_resource_unreference(&shader->cotable);
+	kfree(shader);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+}
+
+/**
+ * vmw_dx_shader_add - Add a shader resource as a command buffer managed
+ * resource.
+ *
+ * @man: The command buffer resource manager.
+ * @ctx: Pointer to the context resource.
+ * @user_key: The id used for this shader.
+ * @shader_type: The shader type.
+ * @list: The list of staged command buffer managed resources.
+ */
+int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+		      struct vmw_resource *ctx,
+		      u32 user_key,
+		      SVGA3dShaderType shader_type,
+		      struct list_head *list)
+{
+	struct vmw_dx_shader *shader;
+	struct vmw_resource *res;
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	int ret;
+
+	if (!vmw_shader_dx_size)
+		vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
+		return -EINVAL;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
+				   false, true);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		return ret;
+	}
+
+	shader = kmalloc(sizeof(*shader), GFP_KERNEL);
+	if (!shader) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+		return -ENOMEM;
+	}
+
+	res = &shader->res;
+	shader->ctx = ctx;
+	shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+	shader->id = user_key;
+	shader->committed = false;
+	INIT_LIST_HEAD(&shader->cotable_head);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_dx_shader_res_free, &vmw_dx_shader_func);
+	if (ret)
+		goto out_resource_init;
+
+	/*
+	 * The user_key name-space is not per shader type for DX shaders,
+	 * so when hashing, use a single zero shader type.
+	 */
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, 0),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = shader->id;
+	vmw_resource_activate(res, vmw_hw_shader_destroy);
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+
+
 /**
  * User-space shader management:
  */
@@ -341,6 +722,8 @@
 				 size_t shader_size,
 				 size_t offset,
 				 SVGA3dShaderType shader_type,
+				 uint8_t num_input_sig,
+				 uint8_t num_output_sig,
 				 struct ttm_object_file *tfile,
 				 u32 *handle)
 {
@@ -383,7 +766,8 @@
 	 */
 
 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-				 offset, shader_type, buffer,
+				 offset, shader_type, num_input_sig,
+				 num_output_sig, buffer,
 				 vmw_user_shader_free);
 	if (unlikely(ret != 0))
 		goto out;
@@ -407,11 +791,11 @@
 }
 
 
-struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
-				      struct vmw_dma_buffer *buffer,
-				      size_t shader_size,
-				      size_t offset,
-				      SVGA3dShaderType shader_type)
+static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+					     struct vmw_dma_buffer *buffer,
+					     size_t shader_size,
+					     size_t offset,
+					     SVGA3dShaderType shader_type)
 {
 	struct vmw_shader *shader;
 	struct vmw_resource *res;
@@ -449,7 +833,7 @@
 	 * From here on, the destructor takes over resource freeing.
 	 */
 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-				 offset, shader_type, buffer,
+				 offset, shader_type, 0, 0, buffer,
 				 vmw_shader_free);
 
 out_err:
@@ -457,19 +841,20 @@
 }
 
 
-int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
+static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+			     enum drm_vmw_shader_type shader_type_drm,
+			     u32 buffer_handle, size_t size, size_t offset,
+			     uint8_t num_input_sig, uint8_t num_output_sig,
+			     uint32_t *shader_handle)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_shader_create_arg *arg =
-		(struct drm_vmw_shader_create_arg *)data;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	struct vmw_dma_buffer *buffer = NULL;
 	SVGA3dShaderType shader_type;
 	int ret;
 
-	if (arg->buffer_handle != SVGA3D_INVALID_ID) {
-		ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+	if (buffer_handle != SVGA3D_INVALID_ID) {
+		ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
 					     &buffer);
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@
 		}
 
 		if ((u64)buffer->base.num_pages * PAGE_SIZE <
-		    (u64)arg->size + (u64)arg->offset) {
+		    (u64)size + (u64)offset) {
 			DRM_ERROR("Illegal buffer- or shader size.\n");
 			ret = -EINVAL;
 			goto out_bad_arg;
 		}
 	}
 
-	switch (arg->shader_type) {
+	switch (shader_type_drm) {
 	case drm_vmw_shader_type_vs:
 		shader_type = SVGA3D_SHADERTYPE_VS;
 		break;
 	case drm_vmw_shader_type_ps:
 		shader_type = SVGA3D_SHADERTYPE_PS;
 		break;
-	case drm_vmw_shader_type_gs:
-		shader_type = SVGA3D_SHADERTYPE_GS;
-		break;
 	default:
 		DRM_ERROR("Illegal shader type.\n");
 		ret = -EINVAL;
@@ -505,8 +887,9 @@
 	if (unlikely(ret != 0))
 		goto out_bad_arg;
 
-	ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
-				    shader_type, tfile, &arg->shader_handle);
+	ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
+				    shader_type, num_input_sig,
+				    num_output_sig, tfile, shader_handle);
 
 	ttm_read_unlock(&dev_priv->reservation_sem);
 out_bad_arg:
@@ -515,7 +898,7 @@
 }
 
 /**
- * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * vmw_shader_id_ok - Check whether a compat shader user key and
  * shader type are within valid bounds.
  *
  * @user_key: User space id of the shader.
@@ -523,13 +906,13 @@
  *
  * Returns true if valid false if not.
  */
-static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
 {
 	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
 }
 
 /**
- * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
+ * vmw_shader_key - Compute a hash key suitable for a compat shader.
  *
  * @user_key: User space id of the shader.
  * @shader_type: Shader type.
@@ -537,13 +920,13 @@
  * Returns a hash key suitable for a command buffer managed resource
  * manager hash table.
  */
-static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
 {
 	return user_key | (shader_type << 20);
 }
 
 /**
- * vmw_compat_shader_remove - Stage a compat shader for removal.
+ * vmw_shader_remove - Stage a compat shader for removal.
  *
  * @man: Pointer to the compat shader manager identifying the shader namespace.
  * @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@
  * @shader_type: Shader type.
  * @list: Caller's list of staged command buffer resource actions.
  */
-int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-			     u32 user_key, SVGA3dShaderType shader_type,
-			     struct list_head *list)
+int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+		      u32 user_key, SVGA3dShaderType shader_type,
+		      struct list_head *list)
 {
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	struct vmw_resource *dummy;
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
-	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
-				     vmw_compat_shader_key(user_key,
-							   shader_type),
-				     list);
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type),
+				     list, &dummy);
 }
 
 /**
@@ -591,7 +975,7 @@
 	int ret;
 	struct vmw_resource *res;
 
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
 	/* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@
 	if (unlikely(ret != 0))
 		goto no_reserve;
 
-	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
-				 vmw_compat_shader_key(user_key, shader_type),
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, shader_type),
 				 res, list);
 	vmw_resource_unreference(&res);
 no_reserve:
@@ -639,7 +1023,7 @@
 }
 
 /**
- * vmw_compat_shader_lookup - Look up a compat shader
+ * vmw_shader_lookup - Look up a compat shader
  *
  * @man: Pointer to the command buffer managed resource manager identifying
  * the shader namespace.
@@ -650,14 +1034,26 @@
  * found. An error pointer otherwise.
  */
 struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-			 u32 user_key,
-			 SVGA3dShaderType shader_type)
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key,
+		  SVGA3dShaderType shader_type)
 {
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return ERR_PTR(-EINVAL);
 
-	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
-				     vmw_compat_shader_key(user_key,
-							   shader_type));
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type));
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_vmw_shader_create_arg *arg =
+		(struct drm_vmw_shader_create_arg *)data;
+
+	return vmw_shader_define(dev, file_priv, arg->shader_type,
+				 arg->buffer_handle,
+				 arg->size, arg->offset,
+				 0, 0,
+				 &arg->shader_handle);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 0000000..5a73eeb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,555 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+/*
+ * The currently only reason we need to keep track of views is that if we
+ * destroy a hardware surface, all views pointing to it must also be destroyed,
+ * otherwise the device will error.
+ * So in particuar if a surface is evicted, we must destroy all views pointing
+ * to it, and all context bindings of that view. Similarly we must restore
+ * the view bindings, views and surfaces pointed to by the views when a
+ * context is referenced in the command stream.
+ */
+
+/**
+ * struct vmw_view - view metadata
+ *
+ * @res: The struct vmw_resource we derive from
+ * @ctx: Non-refcounted pointer to the context this view belongs to.
+ * @srf: Refcounted pointer to the surface pointed to by this view.
+ * @cotable: Refcounted pointer to the cotable holding this view.
+ * @srf_head: List head for the surface-to-view list.
+ * @cotable_head: List head for the cotable-to_view list.
+ * @view_type: View type.
+ * @view_id: User-space per context view id. Currently used also as per
+ * context device view id.
+ * @cmd_size: Size of the SVGA3D define view command that we've copied from the
+ * command stream.
+ * @committed: Whether the view is actually created or pending creation at the
+ * device level.
+ * @cmd: The SVGA3D define view command copied from the command stream.
+ */
+struct vmw_view {
+	struct rcu_head rcu;
+	struct vmw_resource res;
+	struct vmw_resource *ctx;      /* Immutable */
+	struct vmw_resource *srf;      /* Immutable */
+	struct vmw_resource *cotable;  /* Immutable */
+	struct list_head srf_head;     /* Protected by binding_mutex */
+	struct list_head cotable_head; /* Protected by binding_mutex */
+	unsigned view_type;            /* Immutable */
+	unsigned view_id;              /* Immutable */
+	u32 cmd_size;                  /* Immutable */
+	bool committed;                /* Protected by binding_mutex */
+	u32 cmd[1];                    /* Immutable */
+};
+
+static int vmw_view_create(struct vmw_resource *res);
+static int vmw_view_destroy(struct vmw_resource *res);
+static void vmw_hw_view_destroy(struct vmw_resource *res);
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state);
+
+static const struct vmw_res_func vmw_view_func = {
+	.res_type = vmw_res_view,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "DX view",
+	.backup_placement = NULL,
+	.create = vmw_view_create,
+	.commit_notify = vmw_view_commit_notify,
+};
+
+/**
+ * struct vmw_view - view define command body stub
+ *
+ * @view_id: The device id of the view being defined
+ * @sid: The surface id of the view being defined
+ *
+ * This generic struct is used by the code to change @view_id and @sid of a
+ * saved view define command.
+ */
+struct vmw_view_define {
+	uint32 view_id;
+	uint32 sid;
+};
+
+/**
+ * vmw_view - Convert a struct vmw_resource to a struct vmw_view
+ *
+ * @res: Pointer to the resource to convert.
+ *
+ * Returns a pointer to a struct vmw_view.
+ */
+static struct vmw_view *vmw_view(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_view, res);
+}
+
+/**
+ * vmw_view_commit_notify - Notify that a view operation has been committed to
+ * hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the view resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (state == VMW_CMDBUF_RES_ADD) {
+		struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+
+		list_add_tail(&view->srf_head, &srf->view_list);
+		vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+		view->committed = true;
+		res->id = view->view_id;
+
+	} else {
+		list_del_init(&view->cotable_head);
+		list_del_init(&view->srf_head);
+		view->committed = false;
+		res->id = -1;
+	}
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_create - Create a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Create a hardware view. Typically used if that view has previously been
+ * destroyed by an eviction operation.
+ */
+static int vmw_view_create(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct vmw_view_define body;
+	} *cmd;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!view->committed) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return 0;
+	}
+
+	cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
+				  view->ctx->id);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for view creation.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+	memcpy(cmd, &view->cmd, view->cmd_size);
+	WARN_ON(cmd->body.view_id != view->view_id);
+	/* Sid may have changed due to surface eviction. */
+	WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
+	cmd->body.sid = view->srf->id;
+	vmw_fifo_commit(res->dev_priv, view->cmd_size);
+	res->id = view->view_id;
+	list_add_tail(&view->srf_head, &srf->view_list);
+	vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_view_destroy - Destroy a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view. Typically used on unexpected termination of the
+ * owning process or if the surface the view is pointing to is destroyed.
+ */
+static int vmw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_view *view = vmw_view(res);
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+	vmw_binding_res_list_scrub(&res->binding_head);
+
+	if (!view->committed || res->id == -1)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for view "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = vmw_view_destroy_cmds[view->view_type];
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.view_id = view->view_id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&view->cotable_head);
+	list_del_init(&view->srf_head);
+
+	return 0;
+}
+
+/**
+ * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static void vmw_hw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	WARN_ON(vmw_view_destroy(res));
+	res->id = -1;
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
+{
+	return user_key | (view_type << 20);
+}
+
+/**
+ * vmw_view_id_ok - Basic view id and type range checks.
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Checks that the view id and type (typically provided by user-space) is
+ * valid.
+ */
+static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
+{
+	return (user_key < SVGA_COTABLE_MAX_IDS &&
+		view_type < vmw_view_max);
+}
+
+/**
+ * vmw_view_res_free - resource res_free callback for view resources
+ *
+ * @res: Pointer to a struct vmw_resource
+ *
+ * Frees memory and memory accounting held by a struct vmw_view.
+ */
+static void vmw_view_res_free(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	vmw_resource_unreference(&view->cotable);
+	vmw_resource_unreference(&view->srf);
+	kfree_rcu(view, rcu);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_view_add - Create a view resource and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @ctx: Pointer to a struct vmw_resource identifying the active context.
+ * @srf: Pointer to a struct vmw_resource identifying the surface the view
+ * points to.
+ * @view_type: The view type deduced from the view create command.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the view type and to the context.
+ * @cmd: Pointer to the view create command in the command stream.
+ * @cmd_size: Size of the view create command in the command stream.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+		 struct vmw_resource *ctx,
+		 struct vmw_resource *srf,
+		 enum vmw_view_type view_type,
+		 u32 user_key,
+		 const void *cmd,
+		 size_t cmd_size,
+		 struct list_head *list)
+{
+	static const size_t vmw_view_define_sizes[] = {
+		[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
+		[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
+		[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+	};
+
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	struct vmw_resource *res;
+	struct vmw_view *view;
+	size_t size;
+	int ret;
+
+	if (cmd_size != vmw_view_define_sizes[view_type] +
+	    sizeof(SVGA3dCmdHeader)) {
+		DRM_ERROR("Illegal view create command size.\n");
+		return -EINVAL;
+	}
+
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		DRM_ERROR("Illegal view add view id.\n");
+		return -EINVAL;
+	}
+
+	size = offsetof(struct vmw_view, cmd) + cmd_size;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for view"
+				  " creation.\n");
+		return ret;
+	}
+
+	view = kmalloc(size, GFP_KERNEL);
+	if (!view) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+		return -ENOMEM;
+	}
+
+	res = &view->res;
+	view->ctx = ctx;
+	view->srf = vmw_resource_reference(srf);
+	view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+	view->view_type = view_type;
+	view->view_id = user_key;
+	view->cmd_size = cmd_size;
+	view->committed = false;
+	INIT_LIST_HEAD(&view->srf_head);
+	INIT_LIST_HEAD(&view->cotable_head);
+	memcpy(&view->cmd, cmd, cmd_size);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_view_res_free, &vmw_view_func);
+	if (ret)
+		goto out_resource_init;
+
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
+				 vmw_view_key(user_key, view_type),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = view->view_id;
+	vmw_resource_activate(res, vmw_hw_view_destroy);
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_view_remove - Stage a view for removal.
+ *
+ * @man: Pointer to the view manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the view. The key is
+ * unique to the view type.
+ * @view_type: View type
+ * @list: Caller's list of staged command buffer resource actions.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ */
+int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+		    u32 user_key, enum vmw_view_type view_type,
+		    struct list_head *list,
+		    struct vmw_resource **res_p)
+{
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		DRM_ERROR("Illegal view remove view id.\n");
+		return -EINVAL;
+	}
+
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type),
+				     list, res_p);
+}
+
+/**
+ * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views belonging to a cotable.
+ * @readback: Unused. Needed for function interface only.
+ *
+ * This function evicts all views belonging to a cotable.
+ * It must be called with the binding_mutex held, and the caller must hold
+ * a reference to the view resource. This is typically called before the
+ * cotable is paged out.
+ */
+void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list,
+				   bool readback)
+{
+	struct vmw_view *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, cotable_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_surface_list_destroy - Evict all views pointing to a surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views pointing to a surface.
+ *
+ * This function evicts all views pointing to a surface. This is typically
+ * called before the surface is evicted.
+ */
+void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list)
+{
+	struct vmw_view *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, srf_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
+ * pointing to.
+ *
+ * @res: pointer to a view resource.
+ *
+ * Note that the view itself is holding a reference, so as long
+ * the view resource is alive, the surface resource will be.
+ */
+struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
+{
+	return vmw_view(res)->srf;
+}
+
+/**
+ * vmw_view_lookup - Look up a view.
+ *
+ * @man: The context's cmdbuf ref manager.
+ * @view_type: The view type.
+ * @user_key: The view user id.
+ *
+ * returns a refcounted pointer to a view or an error pointer if not found.
+ */
+struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+				     enum vmw_view_type view_type,
+				     u32 user_key)
+{
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type));
+}
+
+const u32 vmw_view_destroy_cmds[] = {
+	[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+	[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+	[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+};
+
+const SVGACOTableType vmw_view_cotables[] = {
+	[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
+	[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
+	[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+};
+
+const SVGACOTableType vmw_so_cotables[] = {
+	[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
+	[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
+	[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
+	[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
+	[vmw_so_ss] = SVGA_COTABLE_SAMPLER,
+	[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
+};
+
+
+/* To remove unused function warning */
+static void vmw_so_build_asserts(void) __attribute__((used));
+
+
+/*
+ * This function is unused at run-time, and only used to dump various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_so_build_asserts(void)
+{
+	/* Assert that our vmw_view_cmd_to_type() function is correct. */
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
+
+	/* Assert that our "one body fits all" assumption is valid */
+	BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
+
+	/* Assert that the view key space can hold all view ids. */
+	BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
+
+	/*
+	 * Assert that the offset of sid in all view define commands
+	 * is what we assume it to be.
+	 */
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 0000000..2687383
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,160 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef VMW_SO_H
+#define VMW_SO_H
+
+enum vmw_view_type {
+	vmw_view_sr,
+	vmw_view_rt,
+	vmw_view_ds,
+	vmw_view_max,
+};
+
+enum vmw_so_type {
+	vmw_so_el,
+	vmw_so_bs,
+	vmw_so_ds,
+	vmw_so_rs,
+	vmw_so_ss,
+	vmw_so_so,
+	vmw_so_max,
+};
+
+/**
+ * union vmw_view_destroy - view destruction command body
+ *
+ * @rtv: RenderTarget view destruction command body
+ * @srv: ShaderResource view destruction command body
+ * @dsv: DepthStencil view destruction command body
+ * @view_id: A single u32 view id.
+ *
+ * The assumption here is that all union members are really represented by a
+ * single u32 in the command stream. If that's not the case,
+ * the size of this union will not equal the size of an u32, and the
+ * assumption is invalid, and we detect that at compile time in the
+ * vmw_so_build_asserts() function.
+ */
+union vmw_view_destroy {
+	struct SVGA3dCmdDXDestroyRenderTargetView rtv;
+	struct SVGA3dCmdDXDestroyShaderResourceView srv;
+	struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+	u32 view_id;
+};
+
+/* Map enum vmw_view_type to view destroy command ids*/
+extern const u32 vmw_view_destroy_cmds[];
+
+/* Map enum vmw_view_type to SVGACOTableType */
+extern const SVGACOTableType vmw_view_cotables[];
+
+/* Map enum vmw_so_type to SVGACOTableType */
+extern const SVGACOTableType vmw_so_cotables[];
+
+/*
+ * vmw_view_cmd_to_type - Return the view type for a create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given view create or destroy command id, return the corresponding
+ * enum vmw_view_type. If the command is unknown, return vmw_view_max.
+ * The validity of the simplified calculation is verified in the
+ * vmw_so_build_asserts() function.
+ */
+static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
+{
+	u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+
+	if (tmp > (u32)vmw_view_max)
+		return vmw_view_max;
+
+	return (enum vmw_view_type) tmp;
+}
+
+/*
+ * vmw_so_cmd_to_type - Return the state object type for a
+ * create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given state object create or destroy command id,
+ * return the corresponding enum vmw_so_type. If the command is uknown,
+ * return vmw_so_max. We should perhaps optimize this function using
+ * a similar strategy as vmw_view_cmd_to_type().
+ */
+static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
+{
+	switch (id) {
+	case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
+	case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
+		return vmw_so_el;
+	case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
+		return vmw_so_bs;
+	case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
+		return vmw_so_ds;
+	case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
+		return vmw_so_rs;
+	case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
+		return vmw_so_ss;
+	case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+	case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
+		return vmw_so_so;
+	default:
+		break;
+	}
+	return vmw_so_max;
+}
+
+/*
+ * View management - vmwgfx_so.c
+ */
+extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+			struct vmw_resource *ctx,
+			struct vmw_resource *srf,
+			enum vmw_view_type view_type,
+			u32 user_key,
+			const void *cmd,
+			size_t cmd_size,
+			struct list_head *list);
+
+extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+			   u32 user_key, enum vmw_view_type view_type,
+			   struct list_head *list,
+			   struct vmw_resource **res_p);
+
+extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *view_list);
+extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *list,
+					  bool readback);
+extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
+extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+					    enum vmw_view_type view_type,
+					    u32 user_key);
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 0000000..c22e2df
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1266 @@
+/******************************************************************************
+ *
+ * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
+#include <drm/drm_plane_helper.h>
+
+#define vmw_crtc_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.crtc)
+#define vmw_encoder_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.encoder)
+#define vmw_connector_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.connector)
+
+
+
+enum stdu_content_type {
+	SAME_AS_DISPLAY = 0,
+	SEPARATE_SURFACE,
+	SEPARATE_DMA
+};
+
+/**
+ * struct vmw_stdu_dirty - closure structure for the update functions
+ *
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @transfer: Transfer direction for DMA command.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @sid: Surface ID when copying between surface and screen targets.
+ */
+struct vmw_stdu_dirty {
+	struct vmw_kms_dirty base;
+	SVGA3dTransferType  transfer;
+	s32 left, right, top, bottom;
+	u32 pitch;
+	union {
+		struct vmw_dma_buffer *buf;
+		u32 sid;
+	};
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_stdu_update {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdUpdateGBScreenTarget body;
+};
+
+struct vmw_stdu_dma {
+	SVGA3dCmdHeader     header;
+	SVGA3dCmdSurfaceDMA body;
+};
+
+struct vmw_stdu_surface_copy {
+	SVGA3dCmdHeader      header;
+	SVGA3dCmdSurfaceCopy body;
+};
+
+
+/**
+ * struct vmw_screen_target_display_unit
+ *
+ * @base: VMW specific DU structure
+ * @display_srf: surface to be displayed.  The dimension of this will always
+ *               match the display mode.  If the display mode matches
+ *               content_vfbs dimensions, then this is a pointer into the
+ *               corresponding field in content_vfbs.  If not, then this
+ *               is a separate buffer to which content_vfbs will blit to.
+ * @content_fb: holds the rendered content, can be a surface or DMA buffer
+ * @content_type:  content_fb type
+ * @defined:  true if the current display unit has been initialized
+ */
+struct vmw_screen_target_display_unit {
+	struct vmw_display_unit base;
+
+	struct vmw_surface     *display_srf;
+	struct drm_framebuffer *content_fb;
+
+	enum stdu_content_type content_fb_type;
+
+	bool defined;
+};
+
+
+
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit helper Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_pin_display - pins the resource associated with the display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * Since the display surface can either be a private surface allocated by us,
+ * or it can point to the content surface, we use this function to not pin the
+ * same resource twice.
+ */
+static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
+{
+	return vmw_resource_pin(&stdu->display_srf->res, false);
+}
+
+
+
+/**
+ * vmw_stdu_unpin_display - unpins the resource associated with display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * If the display surface was privatedly allocated by
+ * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
+ * won't be automatically cleaned up when all the framebuffers are freed.  As
+ * such, we have to explicitly call vmw_resource_unreference() to get it freed.
+ */
+static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
+{
+	if (stdu->display_srf) {
+		struct vmw_resource *res = &stdu->display_srf->res;
+
+		vmw_resource_unpin(res);
+
+		if (stdu->content_fb_type != SAME_AS_DISPLAY) {
+			vmw_resource_unreference(&res);
+			stdu->content_fb_type = SAME_AS_DISPLAY;
+		}
+
+		stdu->display_srf = NULL;
+	}
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit CRTC Functions
+ *****************************************************************************/
+
+
+/**
+ * vmw_stdu_crtc_destroy - cleans up the STDU
+ *
+ * @crtc: used to get a reference to the containing STDU
+ */
+static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
+}
+
+/**
+ * vmw_stdu_define_st - Defines a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to create a Screen Target for
+ *
+ * Creates a STDU that we can used later.  This function is called whenever the
+ * framebuffer size changes.
+ *
+ * RETURNs:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_define_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBScreenTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space defining Screen Target\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.width  = stdu->display_srf->base_size.width;
+	cmd->body.height = stdu->display_srf->base_size.height;
+	cmd->body.flags  = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
+	cmd->body.dpi    = 0;
+	cmd->body.xRoot  = stdu->base.crtc.x;
+	cmd->body.yRoot  = stdu->base.crtc.y;
+
+	if (!stdu->base.is_implicit) {
+		cmd->body.xRoot  = stdu->base.gui_x;
+		cmd->body.yRoot  = stdu->base.gui_y;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	stdu->defined = true;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_bind_st - Binds a surface to a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ * @res: Buffer to bind to the screen target.  Set to NULL to blank screen.
+ *
+ * Binding a surface to a Screen Target the same as flipping
+ */
+static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
+			    struct vmw_screen_target_display_unit *stdu,
+			    struct vmw_resource *res)
+{
+	SVGA3dSurfaceImageId image;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBScreenTarget body;
+	} *cmd;
+
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined\n");
+		return -EINVAL;
+	}
+
+	/* Set up image using information in vfb */
+	memset(&image, 0, sizeof(image));
+	image.sid = res ? res->id : SVGA3D_INVALID_ID;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space binding a screen target\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.image  = image;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
+ * bounding box.
+ *
+ * @cmd: Pointer to command stream.
+ * @unit: Screen target unit.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ */
+static void vmw_stdu_populate_update(void *cmd, int unit,
+				     s32 left, s32 right, s32 top, s32 bottom)
+{
+	struct vmw_stdu_update *update = cmd;
+
+	update->header.id   = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
+	update->header.size = sizeof(update->body);
+
+	update->body.stid   = unit;
+	update->body.rect.x = left;
+	update->body.rect.y = top;
+	update->body.rect.w = right - left;
+	update->body.rect.h = bottom - top;
+}
+
+/**
+ * vmw_stdu_update_st - Full update of a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ *
+ * This function needs to be called whenever the content of a screen
+ * target has changed completely. Typically as a result of a backing
+ * surface change.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_update_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu)
+{
+	struct vmw_stdu_update *cmd;
+	struct drm_crtc *crtc = &stdu->base.crtc;
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined");
+		return -EINVAL;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+		return -ENOMEM;
+	}
+
+	vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
+				 0, crtc->mode.vdisplay);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy_st - Destroy a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to destroy
+ */
+static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
+			       struct vmw_screen_target_display_unit *stdu)
+{
+	int    ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBScreenTarget body;
+	} *cmd;
+
+
+	/* Nothing to do if not successfully defined */
+	if (unlikely(!stdu->defined))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Force sync */
+	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to sync with HW");
+
+	stdu->defined = false;
+
+	return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set:  mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer *vfb;
+	struct vmw_framebuffer_surface *new_vfbs;
+	struct drm_display_mode *mode;
+	struct drm_framebuffer  *new_fb;
+	struct drm_crtc      *crtc;
+	struct drm_encoder   *encoder;
+	struct drm_connector *connector;
+	int    ret;
+
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	crtc     = set->crtc;
+	crtc->x  = set->x;
+	crtc->y  = set->y;
+	stdu     = vmw_crtc_to_stdu(crtc);
+	mode     = set->mode;
+	new_fb   = set->fb;
+	dev_priv = vmw_priv(crtc->dev);
+
+
+	if (set->num_connectors > 1) {
+		DRM_ERROR("Too many connectors\n");
+		return -EINVAL;
+	}
+
+	if (set->num_connectors == 1 &&
+	    set->connectors[0] != &stdu->base.connector) {
+		DRM_ERROR("Connectors don't match %p %p\n",
+			set->connectors[0], &stdu->base.connector);
+		return -EINVAL;
+	}
+
+
+	/* Since they always map one to one these are safe */
+	connector = &stdu->base.connector;
+	encoder   = &stdu->base.encoder;
+
+
+	/*
+	 * After this point the CRTC will be considered off unless a new fb
+	 * is bound
+	 */
+	if (stdu->defined) {
+		/* Unbind current surface by binding an invalid one */
+		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+
+		/* Update Screen Target, display will now be blank */
+		if (crtc->primary->fb) {
+			vmw_stdu_update_st(dev_priv, stdu);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+
+		crtc->primary->fb  = NULL;
+		crtc->enabled      = false;
+		encoder->crtc      = NULL;
+		connector->encoder = NULL;
+
+		vmw_stdu_unpin_display(stdu);
+		stdu->content_fb      = NULL;
+		stdu->content_fb_type = SAME_AS_DISPLAY;
+
+		ret = vmw_stdu_destroy_st(dev_priv, stdu);
+		/* The hardware is hung, give up */
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+
+	/* Any of these conditions means the caller wants CRTC off */
+	if (set->num_connectors == 0 || !mode || !new_fb)
+		return 0;
+
+
+	if (set->x + mode->hdisplay > new_fb->width ||
+	    set->y + mode->vdisplay > new_fb->height) {
+		DRM_ERROR("Set outside of framebuffer\n");
+		return -EINVAL;
+	}
+
+	stdu->content_fb = new_fb;
+	vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
+
+	if (vfb->dmabuf)
+		stdu->content_fb_type = SEPARATE_DMA;
+
+	/*
+	 * If the requested mode is different than the width and height
+	 * of the FB or if the content buffer is a DMA buf, then allocate
+	 * a display FB that matches the dimension of the mode
+	 */
+	if (mode->hdisplay != new_fb->width  ||
+	    mode->vdisplay != new_fb->height ||
+	    stdu->content_fb_type != SAME_AS_DISPLAY) {
+		struct vmw_surface content_srf;
+		struct drm_vmw_size display_base_size = {0};
+		struct vmw_surface *display_srf;
+
+
+		display_base_size.width  = mode->hdisplay;
+		display_base_size.height = mode->vdisplay;
+		display_base_size.depth  = 1;
+
+		/*
+		 * If content buffer is a DMA buf, then we have to construct
+		 * surface info
+		 */
+		if (stdu->content_fb_type == SEPARATE_DMA) {
+
+			switch (new_fb->bits_per_pixel) {
+			case 32:
+				content_srf.format = SVGA3D_X8R8G8B8;
+				break;
+
+			case 16:
+				content_srf.format = SVGA3D_R5G6B5;
+				break;
+
+			case 8:
+				content_srf.format = SVGA3D_P8;
+				break;
+
+			default:
+				DRM_ERROR("Invalid format\n");
+				ret = -EINVAL;
+				goto err_unref_content;
+			}
+
+			content_srf.flags             = 0;
+			content_srf.mip_levels[0]     = 1;
+			content_srf.multisample_count = 0;
+		} else {
+
+			stdu->content_fb_type = SEPARATE_SURFACE;
+
+			new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+			content_srf = *new_vfbs->surface;
+		}
+
+
+		ret = vmw_surface_gb_priv_define(crtc->dev,
+				0, /* because kernel visible only */
+				content_srf.flags,
+				content_srf.format,
+				true, /* a scanout buffer */
+				content_srf.mip_levels[0],
+				content_srf.multisample_count,
+				0,
+				display_base_size,
+				&display_srf);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Cannot allocate a display FB.\n");
+			goto err_unref_content;
+		}
+
+		stdu->display_srf = display_srf;
+	} else {
+		new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+		stdu->display_srf = new_vfbs->surface;
+	}
+
+
+	ret = vmw_stdu_pin_display(stdu);
+	if (unlikely(ret != 0)) {
+		stdu->display_srf = NULL;
+		goto err_unref_content;
+	}
+
+	vmw_svga_enable(dev_priv);
+
+	/*
+	 * Steps to displaying a surface, assume surface is already
+	 * bound:
+	 *   1.  define a screen target
+	 *   2.  bind a fb to the screen target
+	 *   3.  update that screen target (this is done later by
+	 *       vmw_kms_stdu_do_surface_dirty_or_present)
+	 */
+	ret = vmw_stdu_define_st(dev_priv, stdu);
+	if (unlikely(ret != 0))
+		goto err_unpin_display_and_content;
+
+	ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+	if (unlikely(ret != 0))
+		goto err_unpin_destroy_st;
+
+
+	connector->encoder = encoder;
+	encoder->crtc      = crtc;
+
+	crtc->mode    = *mode;
+	crtc->primary->fb = new_fb;
+	crtc->enabled = true;
+
+	return ret;
+
+err_unpin_destroy_st:
+	vmw_stdu_destroy_st(dev_priv, stdu);
+err_unpin_display_and_content:
+	vmw_stdu_unpin_display(stdu);
+err_unref_content:
+	stdu->content_fb = NULL;
+	return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
+ *
+ * @crtc: CRTC to attach FB to
+ * @fb: FB to attach
+ * @event: Event to be posted. This event should've been alloced
+ *         using k[mz]alloc, and should've been completely initialized.
+ * @page_flip_flags: Input flags.
+ *
+ * If the STDU uses the same display and content buffers, i.e. a true flip,
+ * this function will replace the existing display buffer with the new content
+ * buffer.
+ *
+ * If the STDU uses different display and content buffers, i.e. a blit, then
+ * only the content buffer will be updated.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
+				   struct drm_framebuffer *new_fb,
+				   struct drm_pending_vblank_event *event,
+				   uint32_t flags)
+
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct vmw_screen_target_display_unit *stdu;
+	int ret;
+
+	if (crtc == NULL)
+		return -EINVAL;
+
+	dev_priv          = vmw_priv(crtc->dev);
+	stdu              = vmw_crtc_to_stdu(crtc);
+	crtc->primary->fb = new_fb;
+	stdu->content_fb  = new_fb;
+
+	if (stdu->display_srf) {
+		/*
+		 * If the display surface is the same as the content surface
+		 * then remove the reference
+		 */
+		if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+			if (stdu->defined) {
+				/* Unbind the current surface */
+				ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+				if (unlikely(ret != 0))
+					goto err_out;
+			}
+			vmw_stdu_unpin_display(stdu);
+			stdu->display_srf = NULL;
+		}
+	}
+
+
+	if (!new_fb) {
+		/* Blanks the display */
+		(void) vmw_stdu_update_st(dev_priv, stdu);
+
+		return 0;
+	}
+
+
+	if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+		stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
+		ret = vmw_stdu_pin_display(stdu);
+		if (ret) {
+			stdu->display_srf = NULL;
+			goto err_out;
+		}
+
+		/* Bind display surface */
+		ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+		if (unlikely(ret != 0))
+			goto err_unpin_display_and_content;
+	}
+
+	/* Update display surface: after this point everything is bound */
+	ret = vmw_stdu_update_st(dev_priv, stdu);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (event) {
+		struct vmw_fence_obj *fence = NULL;
+		struct drm_file *file_priv = event->base.file_priv;
+
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		if (!fence)
+			return -ENOMEM;
+
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   true);
+		vmw_fence_obj_unreference(&fence);
+	}
+
+	return ret;
+
+err_unpin_display_and_content:
+	vmw_stdu_unpin_display(stdu);
+err_out:
+	crtc->primary->fb = NULL;
+	stdu->content_fb = NULL;
+	return ret;
+}
+
+
+/**
+ * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface DMA command cliprect and updates the bounding box
+ * for the DMA.
+ */
+static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+	blit += dirty->num_hits;
+	blit->srcx = dirty->fb_x;
+	blit->srcy = dirty->fb_y;
+	blit->x = dirty->unit_x1;
+	blit->y = dirty->unit_y1;
+	blit->d = 1;
+	blit->w = dirty->unit_x2 - dirty->unit_x1;
+	blit->h = dirty->unit_y2 - dirty->unit_y1;
+	dirty->num_hits++;
+
+	if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
+		return;
+
+	/* Destination bounding box */
+	ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
+	ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
+	ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
+	ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a DMA command, and optionally encodes
+ * a screen target update command, depending on transfer direction.
+ */
+static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+	SVGA3dCmdSurfaceDMASuffix *suffix =
+		(SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
+	size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
+	cmd->header.size = sizeof(cmd->body) + blit_size;
+	vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
+	cmd->body.guest.pitch = ddirty->pitch;
+	cmd->body.host.sid = stdu->display_srf->res.id;
+	cmd->body.host.face = 0;
+	cmd->body.host.mipmap = 0;
+	cmd->body.transfer = ddirty->transfer;
+	suffix->suffixSize = sizeof(*suffix);
+	suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+
+	if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+		blit_size += sizeof(struct vmw_stdu_update);
+
+		vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
+					 ddirty->left, ddirty->right,
+					 ddirty->top, ddirty->bottom);
+	}
+
+	vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+
+	ddirty->left = ddirty->top = S32_MAX;
+	ddirty->right = ddirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * framebuffer and the screen target system.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm-file identifying the caller. May be
+ * set to NULL, but then @user_fence_rep must also be set to NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @num_clips: Number of clip rects in @clips or @vclips.
+ * @increment: Increment to use when looping over @clips or @vclips.
+ * @to_surface: Whether to DMA to the screen target system as opposed to
+ * from the screen target system.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ *
+ * If DMA-ing till the screen target system, the function will also notify
+ * the screen target system that a bounding box of the cliprects has been
+ * updated.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+	struct vmw_stdu_dirty ddirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+					    false);
+	if (ret)
+		return ret;
+
+	ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+		SVGA3D_READ_HOST_VRAM;
+	ddirty.left = ddirty.top = S32_MAX;
+	ddirty.right = ddirty.bottom = S32_MIN;
+	ddirty.pitch = vfb->base.pitches[0];
+	ddirty.buf = buf;
+	ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
+	ddirty.base.clip = vmw_stdu_dmabuf_clip;
+	ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
+		num_clips * sizeof(SVGA3dCopyBox) +
+		sizeof(SVGA3dCmdSurfaceDMASuffix);
+	if (to_surface)
+		ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
+
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
+				   0, 0, num_clips, increment, &ddirty.base);
+	vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+				     user_fence_rep);
+
+	return ret;
+}
+
+/**
+ * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface copy command cliprect and updates the bounding box
+ * for the copy.
+ */
+static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		blit += dirty->num_hits;
+		blit->srcx = dirty->fb_x;
+		blit->srcy = dirty->fb_y;
+		blit->x = dirty->unit_x1;
+		blit->y = dirty->unit_y1;
+		blit->d = 1;
+		blit->w = dirty->unit_x2 - dirty->unit_x1;
+		blit->h = dirty->unit_y2 - dirty->unit_y1;
+	}
+
+	dirty->num_hits++;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * copy command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a surface copy command, and encodes a screen
+ * target update command.
+ */
+static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_stdu_update *update;
+	size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
+	size_t commit_size;
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
+		cmd->header.size = sizeof(cmd->body) + blit_size;
+		cmd->body.src.sid = sdirty->sid;
+		cmd->body.dest.sid = stdu->display_srf->res.id;
+		update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
+		commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+	} else {
+		update = dirty->cmd;
+		commit_size = sizeof(*update);
+	}
+
+	vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
+				 sdirty->right, sdirty->top, sdirty->bottom);
+
+	vmw_fifo_commit(dirty->dev_priv, commit_size);
+
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_stdu_dirty sdirty;
+	int ret;
+
+	if (!srf)
+		srf = &vfbs->surface->res;
+
+	ret = vmw_kms_helper_resource_prepare(srf, true);
+	if (ret)
+		return ret;
+
+	if (vfbs->is_dmabuf_proxy) {
+		ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
+		if (ret)
+			goto out_finish;
+	}
+
+	sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
+	sdirty.base.clip = vmw_kms_stdu_surface_clip;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
+		sizeof(SVGA3dCopyBox) * num_clips +
+		sizeof(struct vmw_stdu_update);
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+out_finish:
+	vmw_kms_helper_resource_finish(srf, out_fence);
+
+	return ret;
+}
+
+
+/*
+ *  Screen Target CRTC dispatch table
+ */
+static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+	.save = vmw_du_crtc_save,
+	.restore = vmw_du_crtc_restore,
+	.cursor_set = vmw_du_crtc_cursor_set,
+	.cursor_move = vmw_du_crtc_cursor_move,
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_stdu_crtc_destroy,
+	.set_config = vmw_stdu_crtc_set_config,
+	.page_flip = vmw_stdu_crtc_page_flip,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Encoder Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_encoder_destroy - cleans up the STDU
+ *
+ * @encoder: used the get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+	.destroy = vmw_stdu_encoder_destroy,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Connector Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_connector_destroy - cleans up the STDU
+ *
+ * @connector: used to get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+{
+	vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+}
+
+
+
+static struct drm_connector_funcs vmw_stdu_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.save = vmw_du_connector_save,
+	.restore = vmw_du_connector_restore,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.set_property = vmw_du_connector_set_property,
+	.destroy = vmw_stdu_connector_destroy,
+};
+
+
+
+/**
+ * vmw_stdu_init - Sets up a Screen Target Display Unit
+ *
+ * @dev_priv: VMW DRM device
+ * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
+ *
+ * This function is called once per CRTC, and allocates one Screen Target
+ * display unit to represent that CRTC.  Since the SVGA device does not separate
+ * out encoder and connector, they are represented as part of the STDU as well.
+ */
+static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+
+	stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
+	if (!stdu)
+		return -ENOMEM;
+
+	stdu->base.unit = unit;
+	crtc = &stdu->base.crtc;
+	encoder = &stdu->base.encoder;
+	connector = &stdu->base.connector;
+
+	stdu->base.pref_active = (unit == 0);
+	stdu->base.pref_width  = dev_priv->initial_width;
+	stdu->base.pref_height = dev_priv->initial_height;
+	stdu->base.is_implicit = true;
+
+	drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	connector->status = vmw_du_connector_detect(connector, false);
+
+	drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+	drm_mode_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	(void) drm_connector_register(connector);
+
+	drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.dirty_info_property,
+				   1);
+
+	return 0;
+}
+
+
+
+/**
+ *  vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
+ *
+ *  @stdu:  Screen Target Display Unit to be destroyed
+ *
+ *  Clean up after vmw_stdu_init
+ */
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
+{
+	vmw_stdu_unpin_display(stdu);
+
+	vmw_du_cleanup(&stdu->base);
+	kfree(stdu);
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display KMS Functions
+ *
+ * These functions are called by the common KMS code in vmwgfx_kms.c
+ *****************************************************************************/
+
+/**
+ * vmw_kms_stdu_init_display - Initializes a Screen Target based display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * This function initialize a Screen Target based display device.  It checks
+ * the capability bits to make sure the underlying hardware can support
+ * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
+ * Units, as supported by the display hardware.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+
+	/* Do nothing if Screen Target support is turned off */
+	if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+		return -ENOSYS;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
+		return -ENOSYS;
+
+	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = drm_mode_create_dirty_info_property(dev);
+	if (unlikely(ret != 0))
+		goto err_vblank_cleanup;
+
+	dev_priv->active_display_unit = vmw_du_screen_target;
+
+	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
+		ret = vmw_stdu_init(dev_priv, i);
+
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to initialize STDU %d", i);
+			goto err_vblank_cleanup;
+		}
+	}
+
+	DRM_INFO("Screen Target Display device initialized\n");
+
+	return 0;
+
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev);
+	return ret;
+}
+
+
+
+/**
+ * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * Frees up any resources allocated by vmw_kms_stdu_init_display
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	drm_vblank_cleanup(dev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3..3361769 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,8 +27,11 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 #include <ttm/ttm_placement.h>
-#include "svga3d_surfacedefs.h"
+#include "device_include/svga3d_surfacedefs.h"
+
 
 /**
  * struct vmw_user_surface - User-space visible surface resource
@@ -36,7 +39,7 @@
  * @base:           The TTM base object handling user-space visibility.
  * @srf:            The surface metadata.
  * @size:           TTM accounting size for the surface.
- * @master:         master of the creating client. Used for security check.
+ * @master: master of the creating client. Used for security check.
  */
 struct vmw_user_surface {
 	struct ttm_prime_object prime;
@@ -220,7 +223,7 @@
 	cmd->header.size = cmd_len;
 	cmd->body.sid = srf->res.id;
 	cmd->body.surfaceFlags = srf->flags;
-	cmd->body.format = cpu_to_le32(srf->format);
+	cmd->body.format = srf->format;
 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
 
@@ -340,7 +343,7 @@
 		dev_priv->used_memory_size -= res->backup_size;
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
 	}
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 /**
@@ -576,14 +579,14 @@
 
 	BUG_ON(res_free == NULL);
 	if (!dev_priv->has_mob)
-		(void) vmw_3d_resource_inc(dev_priv, false);
+		vmw_fifo_resource_inc(dev_priv);
 	ret = vmw_resource_init(dev_priv, res, true, res_free,
 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
 				&vmw_legacy_surface_func);
 
 	if (unlikely(ret != 0)) {
 		if (!dev_priv->has_mob)
-			vmw_3d_resource_dec(dev_priv, false);
+			vmw_fifo_resource_dec(dev_priv);
 		res_free(res);
 		return ret;
 	}
@@ -593,6 +596,7 @@
 	 * surface validate.
 	 */
 
+	INIT_LIST_HEAD(&srf->view_list);
 	vmw_resource_activate(res, vmw_hw_surface_destroy);
 	return ret;
 }
@@ -723,6 +727,7 @@
 	desc = svga3dsurface_get_desc(req->format);
 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
 		DRM_ERROR("Invalid surface format for surface creation.\n");
+		DRM_ERROR("Format requested is: %d\n", req->format);
 		return -EINVAL;
 	}
 
@@ -906,6 +911,12 @@
 				  "surface reference.\n");
 			return -EACCES;
 		}
+		if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+			DRM_ERROR("Locked master refused legacy "
+				  "surface reference.\n");
+			return -EACCES;
+		}
+
 		handle = u_handle;
 	}
 
@@ -1018,17 +1029,21 @@
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_surface *srf = vmw_res_to_srf(res);
-	uint32_t cmd_len, submit_len;
+	uint32_t cmd_len, cmd_id, submit_len;
 	int ret;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDefineGBSurface body;
 	} *cmd;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface_v2 body;
+	} *cmd2;
 
 	if (likely(res->id != -1))
 		return 0;
 
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 	ret = vmw_resource_alloc_id(res);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed to allocate a surface id.\n");
@@ -1040,9 +1055,19 @@
 		goto out_no_fifo;
 	}
 
-	cmd_len = sizeof(cmd->body);
-	submit_len = sizeof(*cmd);
+	if (srf->array_size > 0) {
+		/* has_dx checked on creation time. */
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
+		cmd_len = sizeof(cmd2->body);
+		submit_len = sizeof(*cmd2);
+	} else {
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+		cmd_len = sizeof(cmd->body);
+		submit_len = sizeof(*cmd);
+	}
+
 	cmd = vmw_fifo_reserve(dev_priv, submit_len);
+	cmd2 = (typeof(cmd2))cmd;
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for surface "
 			  "creation.\n");
@@ -1050,17 +1075,33 @@
 		goto out_no_fifo;
 	}
 
-	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
-	cmd->header.size = cmd_len;
-	cmd->body.sid = srf->res.id;
-	cmd->body.surfaceFlags = srf->flags;
-	cmd->body.format = cpu_to_le32(srf->format);
-	cmd->body.numMipLevels = srf->mip_levels[0];
-	cmd->body.multisampleCount = srf->multisample_count;
-	cmd->body.autogenFilter = srf->autogen_filter;
-	cmd->body.size.width = srf->base_size.width;
-	cmd->body.size.height = srf->base_size.height;
-	cmd->body.size.depth = srf->base_size.depth;
+	if (srf->array_size > 0) {
+		cmd2->header.id = cmd_id;
+		cmd2->header.size = cmd_len;
+		cmd2->body.sid = srf->res.id;
+		cmd2->body.surfaceFlags = srf->flags;
+		cmd2->body.format = cpu_to_le32(srf->format);
+		cmd2->body.numMipLevels = srf->mip_levels[0];
+		cmd2->body.multisampleCount = srf->multisample_count;
+		cmd2->body.autogenFilter = srf->autogen_filter;
+		cmd2->body.size.width = srf->base_size.width;
+		cmd2->body.size.height = srf->base_size.height;
+		cmd2->body.size.depth = srf->base_size.depth;
+		cmd2->body.arraySize = srf->array_size;
+	} else {
+		cmd->header.id = cmd_id;
+		cmd->header.size = cmd_len;
+		cmd->body.sid = srf->res.id;
+		cmd->body.surfaceFlags = srf->flags;
+		cmd->body.format = cpu_to_le32(srf->format);
+		cmd->body.numMipLevels = srf->mip_levels[0];
+		cmd->body.multisampleCount = srf->multisample_count;
+		cmd->body.autogenFilter = srf->autogen_filter;
+		cmd->body.size.width = srf->base_size.width;
+		cmd->body.size.height = srf->base_size.height;
+		cmd->body.size.depth = srf->base_size.depth;
+	}
+
 	vmw_fifo_commit(dev_priv, submit_len);
 
 	return 0;
@@ -1068,7 +1109,7 @@
 out_no_fifo:
 	vmw_resource_release_id(res);
 out_no_id:
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 	return ret;
 }
 
@@ -1188,6 +1229,7 @@
 static int vmw_gb_surface_destroy(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1239,8 @@
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_scrub(&res->binding_head);
+	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
+	vmw_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -1213,11 +1256,12 @@
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
 
+
 /**
  * vmw_gb_surface_define_ioctl - Ioctl function implementing
  *                               the user surface define functionality.
@@ -1241,77 +1285,51 @@
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 	uint32_t size;
-	const struct svga3d_surface_desc *desc;
 	uint32_t backup_handle;
 
+
 	if (unlikely(vmw_user_surface_size == 0))
 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
 			128;
 
 	size = vmw_user_surface_size + 128;
 
-	desc = svga3dsurface_get_desc(req->format);
-	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
-		DRM_ERROR("Invalid surface format for surface creation.\n");
-		return -EINVAL;
-	}
+	/* Define a surface based on the parameters. */
+	ret = vmw_surface_gb_priv_define(dev,
+			size,
+			req->svga3d_flags,
+			req->format,
+			req->drm_surface_flags & drm_vmw_surface_flag_scanout,
+			req->mip_levels,
+			req->multisample_count,
+			req->array_size,
+			req->base_size,
+			&srf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	user_srf = container_of(srf, struct vmw_user_surface, srf);
+	if (drm_is_primary_client(file_priv))
+		user_srf->master = drm_master_get(file_priv->master);
 
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-				   size, false, true);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Out of graphics memory for surface"
-				  " creation.\n");
-		goto out_unlock;
-	}
+	res = &user_srf->srf.res;
 
-	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
-	if (unlikely(user_srf == NULL)) {
-		ret = -ENOMEM;
-		goto out_no_user_srf;
-	}
-
-	srf = &user_srf->srf;
-	res = &srf->res;
-
-	srf->flags = req->svga3d_flags;
-	srf->format = req->format;
-	srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
-	srf->mip_levels[0] = req->mip_levels;
-	srf->num_sizes = 1;
-	srf->sizes = NULL;
-	srf->offsets = NULL;
-	user_srf->size = size;
-	srf->base_size = req->base_size;
-	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
-	srf->multisample_count = req->multisample_count;
-	res->backup_size = svga3dsurface_get_serialized_size
-	  (srf->format, srf->base_size, srf->mip_levels[0],
-	   srf->flags & SVGA3D_SURFACE_CUBEMAP);
-
-	user_srf->prime.base.shareable = false;
-	user_srf->prime.base.tfile = NULL;
-	if (drm_is_primary_client(file_priv))
-		user_srf->master = drm_master_get(file_priv->master);
-
-	/**
-	 * From this point, the generic resource management functions
-	 * destroy the object on failure.
-	 */
-
-	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-	if (unlikely(ret != 0))
-		goto out_unlock;
 
 	if (req->buffer_handle != SVGA3D_INVALID_ID) {
 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
 					     &res->backup);
-	} else if (req->drm_surface_flags &
-		   drm_vmw_surface_flag_create_buffer)
+		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
+		    res->backup_size) {
+			DRM_ERROR("Surface backup buffer is too small.\n");
+			vmw_dmabuf_unreference(&res->backup);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
 					    res->backup_size,
 					    req->drm_surface_flags &
@@ -1324,7 +1342,7 @@
 		goto out_unlock;
 	}
 
-	tmp = vmw_resource_reference(&srf->res);
+	tmp = vmw_resource_reference(res);
 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
 				    req->drm_surface_flags &
 				    drm_vmw_surface_flag_shareable,
@@ -1337,7 +1355,7 @@
 		goto out_unlock;
 	}
 
-	rep->handle = user_srf->prime.base.hash.key;
+	rep->handle      = user_srf->prime.base.hash.key;
 	rep->backup_size = res->backup_size;
 	if (res->backup) {
 		rep->buffer_map_handle =
@@ -1352,10 +1370,6 @@
 
 	vmw_resource_unreference(&res);
 
-	ttm_read_unlock(&dev_priv->reservation_sem);
-	return 0;
-out_no_user_srf:
-	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 out_unlock:
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
@@ -1415,6 +1429,7 @@
 	rep->creq.drm_surface_flags = 0;
 	rep->creq.multisample_count = srf->multisample_count;
 	rep->creq.autogen_filter = srf->autogen_filter;
+	rep->creq.array_size = srf->array_size;
 	rep->creq.buffer_handle = backup_handle;
 	rep->creq.base_size = srf->base_size;
 	rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1429,3 +1444,137 @@
 
 	return ret;
 }
+
+/**
+ * vmw_surface_gb_priv_define - Define a private GB surface
+ *
+ * @dev:  Pointer to a struct drm_device
+ * @user_accounting_size:  Used to track user-space memory usage, set
+ *                         to 0 for kernel mode only memory
+ * @svga3d_flags: SVGA3d surface flags for the device
+ * @format: requested surface format
+ * @for_scanout: true if inteded to be used for scanout buffer
+ * @num_mip_levels:  number of MIP levels
+ * @multisample_count:
+ * @array_size: Surface array size.
+ * @size: width, heigh, depth of the surface requested
+ * @user_srf_out: allocated user_srf.  Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx.  For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       uint32_t svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       struct vmw_surface **srf_out)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	int ret;
+	u32 num_layers;
+
+	*srf_out = NULL;
+
+	if (for_scanout) {
+		if (!svga3dsurface_is_screen_target_format(format)) {
+			DRM_ERROR("Invalid Screen Target surface format.");
+			return -EINVAL;
+		}
+	} else {
+		const struct svga3d_surface_desc *desc;
+
+		desc = svga3dsurface_get_desc(format);
+		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+			DRM_ERROR("Invalid surface format.\n");
+			return -EINVAL;
+		}
+	}
+
+	/* array_size must be null for non-GL3 host. */
+	if (array_size > 0 && !dev_priv->has_dx) {
+		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   user_accounting_size, false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(user_srf == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	*srf_out  = &user_srf->srf;
+	user_srf->size = user_accounting_size;
+	user_srf->prime.base.shareable = false;
+	user_srf->prime.base.tfile     = NULL;
+
+	srf = &user_srf->srf;
+	srf->flags             = svga3d_flags;
+	srf->format            = format;
+	srf->scanout           = for_scanout;
+	srf->mip_levels[0]     = num_mip_levels;
+	srf->num_sizes         = 1;
+	srf->sizes             = NULL;
+	srf->offsets           = NULL;
+	srf->base_size         = size;
+	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
+	srf->array_size        = array_size;
+	srf->multisample_count = multisample_count;
+
+	if (array_size)
+		num_layers = array_size;
+	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
+		num_layers = SVGA3D_MAX_SURFACE_FACES;
+	else
+		num_layers = 1;
+
+	srf->res.backup_size   =
+		svga3dsurface_get_serialized_size(srf->format,
+						  srf->base_size,
+						  srf->mip_levels[0],
+						  num_layers);
+
+	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+		srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+	if (dev_priv->active_display_unit == vmw_du_screen_target &&
+	    for_scanout)
+		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+	/*
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb..e771091 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index fbc6ee6..52a6fd2 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -31,6 +31,9 @@
 #include "dev.h"
 
 #define MIPI_CAL_CTRL			0x00
+#define MIPI_CAL_CTRL_NOISE_FILTER(x)	(((x) & 0xf) << 26)
+#define MIPI_CAL_CTRL_PRESCALE(x)	(((x) & 0x3) << 24)
+#define MIPI_CAL_CTRL_CLKEN_OVR		(1 << 4)
 #define MIPI_CAL_CTRL_START		(1 << 0)
 
 #define MIPI_CAL_AUTOCAL_CTRL		0x01
@@ -44,15 +47,18 @@
 #define MIPI_CAL_CONFIG_CSIC		0x07
 #define MIPI_CAL_CONFIG_CSID		0x08
 #define MIPI_CAL_CONFIG_CSIE		0x09
+#define MIPI_CAL_CONFIG_CSIF		0x0a
 #define MIPI_CAL_CONFIG_DSIA		0x0e
 #define MIPI_CAL_CONFIG_DSIB		0x0f
 #define MIPI_CAL_CONFIG_DSIC		0x10
 #define MIPI_CAL_CONFIG_DSID		0x11
 
-#define MIPI_CAL_CONFIG_DSIAB_CLK	0x19
-#define MIPI_CAL_CONFIG_DSICD_CLK	0x1a
+#define MIPI_CAL_CONFIG_DSIA_CLK	0x19
+#define MIPI_CAL_CONFIG_DSIB_CLK	0x1a
 #define MIPI_CAL_CONFIG_CSIAB_CLK	0x1b
+#define MIPI_CAL_CONFIG_DSIC_CLK	0x1c
 #define MIPI_CAL_CONFIG_CSICD_CLK	0x1c
+#define MIPI_CAL_CONFIG_DSID_CLK	0x1d
 #define MIPI_CAL_CONFIG_CSIE_CLK	0x1d
 
 /* for data and clock lanes */
@@ -73,8 +79,11 @@
 
 #define MIPI_CAL_BIAS_PAD_CFG1		0x17
 #define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_DRV_UP_REF(x) (((x) & 0x7) << 8)
 
 #define MIPI_CAL_BIAS_PAD_CFG2		0x18
+#define MIPI_CAL_BIAS_PAD_VCLAMP(x)	(((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_VAUXP(x)	(((x) & 0x7) << 4)
 #define MIPI_CAL_BIAS_PAD_PDVREG	(1 << 1)
 
 struct tegra_mipi_pad {
@@ -86,13 +95,35 @@
 	bool has_clk_lane;
 	const struct tegra_mipi_pad *pads;
 	unsigned int num_pads;
+
+	bool clock_enable_override;
+	bool needs_vclamp_ref;
+
+	/* bias pad configuration settings */
+	u8 pad_drive_down_ref;
+	u8 pad_drive_up_ref;
+
+	u8 pad_vclamp_level;
+	u8 pad_vauxp_level;
+
+	/* calibration settings for data lanes */
+	u8 hspdos;
+	u8 hspuos;
+	u8 termos;
+
+	/* calibration settings for clock lanes */
+	u8 hsclkpdos;
+	u8 hsclkpuos;
 };
 
 struct tegra_mipi {
 	const struct tegra_mipi_soc *soc;
+	struct device *dev;
 	void __iomem *regs;
 	struct mutex lock;
 	struct clk *clk;
+
+	unsigned long usage_count;
 };
 
 struct tegra_mipi_device {
@@ -114,6 +145,67 @@
 	writel(value, mipi->regs + (offset << 2));
 }
 
+static int tegra_mipi_power_up(struct tegra_mipi *mipi)
+{
+	u32 value;
+	int err;
+
+	err = clk_enable(mipi->clk);
+	if (err < 0)
+		return err;
+
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+
+	if (mipi->soc->needs_vclamp_ref)
+		value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+	clk_disable(mipi->clk);
+
+	return 0;
+}
+
+static int tegra_mipi_power_down(struct tegra_mipi *mipi)
+{
+	u32 value;
+	int err;
+
+	err = clk_enable(mipi->clk);
+	if (err < 0)
+		return err;
+
+	/*
+	 * The MIPI_CAL_BIAS_PAD_PDVREG controls a voltage regulator that
+	 * supplies the DSI pads. This must be kept enabled until none of the
+	 * DSI lanes are used anymore.
+	 */
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+	value |= MIPI_CAL_BIAS_PAD_PDVREG;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+	/*
+	 * MIPI_CAL_BIAS_PAD_PDVCLAMP and MIPI_CAL_BIAS_PAD_E_VCLAMP_REF
+	 * control a regulator that supplies current to the pre-driver logic.
+	 * Powering down this regulator causes DSI to fail, so it must remain
+	 * powered on until none of the DSI lanes are used anymore.
+	 */
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+
+	if (mipi->soc->needs_vclamp_ref)
+		value &= ~MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+	value |= MIPI_CAL_BIAS_PAD_PDVCLAMP;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+	return 0;
+}
+
 struct tegra_mipi_device *tegra_mipi_request(struct device *device)
 {
 	struct device_node *np = device->of_node;
@@ -150,6 +242,20 @@
 	dev->pads = args.args[0];
 	dev->device = device;
 
+	mutex_lock(&dev->mipi->lock);
+
+	if (dev->mipi->usage_count++ == 0) {
+		err = tegra_mipi_power_up(dev->mipi);
+		if (err < 0) {
+			dev_err(dev->mipi->dev,
+				"failed to power up MIPI bricks: %d\n",
+				err);
+			return ERR_PTR(err);
+		}
+	}
+
+	mutex_unlock(&dev->mipi->lock);
+
 	return dev;
 
 put:
@@ -164,6 +270,25 @@
 
 void tegra_mipi_free(struct tegra_mipi_device *device)
 {
+	int err;
+
+	mutex_lock(&device->mipi->lock);
+
+	if (--device->mipi->usage_count == 0) {
+		err = tegra_mipi_power_down(device->mipi);
+		if (err < 0) {
+			/*
+			 * Not much that can be done here, so an error message
+			 * will have to do.
+			 */
+			dev_err(device->mipi->dev,
+				"failed to power down MIPI bricks: %d\n",
+				err);
+		}
+	}
+
+	mutex_unlock(&device->mipi->lock);
+
 	platform_device_put(device->pdev);
 	kfree(device);
 }
@@ -199,16 +324,15 @@
 
 	mutex_lock(&device->mipi->lock);
 
-	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
-	value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
-	value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
-	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
-
-	tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
-			  MIPI_CAL_BIAS_PAD_CFG1);
+	value = MIPI_CAL_BIAS_PAD_DRV_DN_REF(soc->pad_drive_down_ref) |
+		MIPI_CAL_BIAS_PAD_DRV_UP_REF(soc->pad_drive_up_ref);
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG1);
 
 	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
-	value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+	value &= ~MIPI_CAL_BIAS_PAD_VCLAMP(0x7);
+	value &= ~MIPI_CAL_BIAS_PAD_VAUXP(0x7);
+	value |= MIPI_CAL_BIAS_PAD_VCLAMP(soc->pad_vclamp_level);
+	value |= MIPI_CAL_BIAS_PAD_VAUXP(soc->pad_vauxp_level);
 	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
 
 	for (i = 0; i < soc->num_pads; i++) {
@@ -216,21 +340,38 @@
 
 		if (device->pads & BIT(i)) {
 			data = MIPI_CAL_CONFIG_SELECT |
-			       MIPI_CAL_CONFIG_HSPDOS(0) |
-			       MIPI_CAL_CONFIG_HSPUOS(4) |
-			       MIPI_CAL_CONFIG_TERMOS(5);
+			       MIPI_CAL_CONFIG_HSPDOS(soc->hspdos) |
+			       MIPI_CAL_CONFIG_HSPUOS(soc->hspuos) |
+			       MIPI_CAL_CONFIG_TERMOS(soc->termos);
 			clk = MIPI_CAL_CONFIG_SELECT |
-			      MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
-			      MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+			      MIPI_CAL_CONFIG_HSCLKPDOSD(soc->hsclkpdos) |
+			      MIPI_CAL_CONFIG_HSCLKPUOSD(soc->hsclkpuos);
 		}
 
 		tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
 
-		if (soc->has_clk_lane)
+		if (soc->has_clk_lane && soc->pads[i].clk != 0)
 			tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
 	}
 
 	value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+	value &= ~MIPI_CAL_CTRL_NOISE_FILTER(0xf);
+	value &= ~MIPI_CAL_CTRL_PRESCALE(0x3);
+	value |= MIPI_CAL_CTRL_NOISE_FILTER(0xa);
+	value |= MIPI_CAL_CTRL_PRESCALE(0x2);
+
+	if (!soc->clock_enable_override)
+		value &= ~MIPI_CAL_CTRL_CLKEN_OVR;
+	else
+		value |= MIPI_CAL_CTRL_CLKEN_OVR;
+
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
+
+	/* clear any pending status bits */
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_STATUS);
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_STATUS);
+
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
 	value |= MIPI_CAL_CTRL_START;
 	tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
 
@@ -259,6 +400,17 @@
 	.has_clk_lane = false,
 	.pads = tegra114_mipi_pads,
 	.num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = true,
+	.pad_drive_down_ref = 0x2,
+	.pad_drive_up_ref = 0x0,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x4,
+	.termos = 0x5,
+	.hsclkpdos = 0x0,
+	.hsclkpuos = 0x4,
 };
 
 static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
@@ -266,20 +418,80 @@
 	{ .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
 	{ .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
 	{ .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
-	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
-	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
-	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK  },
+	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK  },
+	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK  },
 };
 
 static const struct tegra_mipi_soc tegra124_mipi_soc = {
 	.has_clk_lane = true,
 	.pads = tegra124_mipi_pads,
 	.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = true,
+	.pad_drive_down_ref = 0x2,
+	.pad_drive_up_ref = 0x0,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x0,
+	.termos = 0x0,
+	.hsclkpdos = 0x1,
+	.hsclkpuos = 0x2,
 };
 
-static struct of_device_id tegra_mipi_of_match[] = {
+static const struct tegra_mipi_soc tegra132_mipi_soc = {
+	.has_clk_lane = true,
+	.pads = tegra124_mipi_pads,
+	.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+	.clock_enable_override = false,
+	.needs_vclamp_ref = false,
+	.pad_drive_down_ref = 0x0,
+	.pad_drive_up_ref = 0x3,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x0,
+	.termos = 0x0,
+	.hsclkpdos = 0x3,
+	.hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_pad tegra210_mipi_pads[] = {
+	{ .data = MIPI_CAL_CONFIG_CSIA, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIB, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIC, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSID, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIF, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSIC, .clk = MIPI_CAL_CONFIG_DSIC_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSID, .clk = MIPI_CAL_CONFIG_DSID_CLK },
+};
+
+static const struct tegra_mipi_soc tegra210_mipi_soc = {
+	.has_clk_lane = true,
+	.pads = tegra210_mipi_pads,
+	.num_pads = ARRAY_SIZE(tegra210_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = false,
+	.pad_drive_down_ref = 0x0,
+	.pad_drive_up_ref = 0x3,
+	.pad_vclamp_level = 0x1,
+	.pad_vauxp_level = 0x1,
+	.hspdos = 0x0,
+	.hspuos = 0x2,
+	.termos = 0x0,
+	.hsclkpdos = 0x0,
+	.hsclkpuos = 0x2,
+};
+
+static const struct of_device_id tegra_mipi_of_match[] = {
 	{ .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
 	{ .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+	{ .compatible = "nvidia,tegra132-mipi", .data = &tegra132_mipi_soc },
+	{ .compatible = "nvidia,tegra210-mipi", .data = &tegra210_mipi_soc },
 	{ },
 };
 
@@ -299,6 +511,7 @@
 		return -ENOMEM;
 
 	mipi->soc = match->data;
+	mipi->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mipi->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 00f2058..243f99a 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -915,8 +915,8 @@
 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
-	struct irq_chip *chip = irq_get_chip(irq);
 
 	chained_irq_enter(chip, desc);
 
@@ -928,8 +928,8 @@
 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	const int int_reg[] = { 4, 5, 8, 9};
-	struct irq_chip *chip = irq_get_chip(irq);
 
 	chained_irq_enter(chip, desc);
 
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 37ac7b5..2106066 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -6,17 +6,19 @@
  * Licensed under GPLv2
  *
  * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
-
- Switcher interface - methods require for ATPX and DCM
- - switchto - this throws the output MUX switch
- - discrete_set_power - sets the power state for the discrete card
-
- GPU driver interface
- - set_gpu_state - this should do the equiv of s/r for the card
-		  - this should *not* set the discrete power state
- - switch_check  - check if the device is in a position to switch now
+ *
+ * Switcher interface - methods require for ATPX and DCM
+ * - switchto - this throws the output MUX switch
+ * - discrete_set_power - sets the power state for the discrete card
+ *
+ * GPU driver interface
+ * - set_gpu_state - this should do the equiv of s/r for the card
+ *                 - this should *not* set the discrete power state
+ * - switch_check  - check if the device is in a position to switch now
  */
 
+#define pr_fmt(fmt) "vga_switcheroo: " fmt
+
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
@@ -111,7 +113,7 @@
 
 	vgasr_priv.handler = handler;
 	if (vga_switcheroo_ready()) {
-		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		pr_info("enabled\n");
 		vga_switcheroo_enable();
 	}
 	mutex_unlock(&vgasr_mutex);
@@ -124,7 +126,7 @@
 	mutex_lock(&vgasr_mutex);
 	vgasr_priv.handler = NULL;
 	if (vgasr_priv.active) {
-		pr_info("vga_switcheroo: disabled\n");
+		pr_info("disabled\n");
 		vga_switcheroo_debugfs_fini(&vgasr_priv);
 		vgasr_priv.active = false;
 	}
@@ -155,7 +157,7 @@
 		vgasr_priv.registered_clients++;
 
 	if (vga_switcheroo_ready()) {
-		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		pr_info("enabled\n");
 		vga_switcheroo_enable();
 	}
 	mutex_unlock(&vgasr_mutex);
@@ -167,7 +169,8 @@
 				   bool driver_power_control)
 {
 	return register_client(pdev, ops, -1,
-			       pdev == vga_default_device(), driver_power_control);
+			       pdev == vga_default_device(),
+			       driver_power_control);
 }
 EXPORT_SYMBOL(vga_switcheroo_register_client);
 
@@ -183,6 +186,7 @@
 find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->pdev == pdev)
 			return client;
@@ -193,6 +197,7 @@
 find_client_from_id(struct list_head *head, int client_id)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->id == client_id)
 			return client;
@@ -203,6 +208,7 @@
 find_active_client(struct list_head *head)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->active && client_is_vga(client))
 			return client;
@@ -235,7 +241,7 @@
 		kfree(client);
 	}
 	if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
-		printk(KERN_INFO "vga_switcheroo: disabled\n");
+		pr_info("disabled\n");
 		vga_switcheroo_debugfs_fini(&vgasr_priv);
 		vgasr_priv.active = false;
 	}
@@ -260,10 +266,12 @@
 {
 	struct vga_switcheroo_client *client;
 	int i = 0;
+
 	mutex_lock(&vgasr_mutex);
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
 		seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
-			   client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+			   client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" :
+								     "IGD",
 			   client_is_vga(client) ? "" : "-Audio",
 			   client->active ? '+' : ' ',
 			   client->driver_power_control ? "Dyn" : "",
@@ -347,6 +355,7 @@
 
 	if (new_client->fb_info) {
 		struct fb_event event;
+
 		console_lock();
 		event.info = new_client->fb_info;
 		fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
@@ -375,7 +384,7 @@
 
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
 		if (!client->ops->can_switch(client->pdev)) {
-			printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+			pr_err("client %x refused switch\n", client->id);
 			return false;
 		}
 	}
@@ -484,20 +493,20 @@
 	if (can_switch) {
 		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+			pr_err("switching failed stage 1 %d\n", ret);
 
 		ret = vga_switchto_stage2(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+			pr_err("switching failed stage 2 %d\n", ret);
 
 	} else {
-		printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+		pr_info("setting delayed switch to client %d\n", client->id);
 		vgasr_priv.delayed_switch_active = true;
 		vgasr_priv.delayed_client_id = client_id;
 
 		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
+			pr_err("delayed switching stage 1 failed %d\n", ret);
 	}
 
 out:
@@ -516,32 +525,32 @@
 
 static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
 {
-	if (priv->switch_file) {
-		debugfs_remove(priv->switch_file);
-		priv->switch_file = NULL;
-	}
-	if (priv->debugfs_root) {
-		debugfs_remove(priv->debugfs_root);
-		priv->debugfs_root = NULL;
-	}
+	debugfs_remove(priv->switch_file);
+	priv->switch_file = NULL;
+
+	debugfs_remove(priv->debugfs_root);
+	priv->debugfs_root = NULL;
 }
 
 static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
 {
+	static const char mp[] = "/sys/kernel/debug";
+
 	/* already initialised */
 	if (priv->debugfs_root)
 		return 0;
 	priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
 
 	if (!priv->debugfs_root) {
-		printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+		pr_err("Cannot create %s/vgaswitcheroo\n", mp);
 		goto fail;
 	}
 
 	priv->switch_file = debugfs_create_file("switch", 0644,
-						priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+						priv->debugfs_root, NULL,
+						&vga_switcheroo_debugfs_fops);
 	if (!priv->switch_file) {
-		printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+		pr_err("cannot create %s/vgaswitcheroo/switch\n", mp);
 		goto fail;
 	}
 	return 0;
@@ -560,7 +569,8 @@
 	if (!vgasr_priv.delayed_switch_active)
 		goto err;
 
-	printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+	pr_info("processing delayed switch to %d\n",
+		vgasr_priv.delayed_client_id);
 
 	client = find_client_from_id(&vgasr_priv.clients,
 				     vgasr_priv.delayed_client_id);
@@ -569,7 +579,7 @@
 
 	ret = vga_switchto_stage2(client);
 	if (ret)
-		printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
+		pr_err("delayed switching failed stage 2 %d\n", ret);
 
 	vgasr_priv.delayed_switch_active = false;
 	err = 0;
@@ -579,7 +589,8 @@
 }
 EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
 
-static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void vga_switcheroo_power_switch(struct pci_dev *pdev,
+					enum vga_switcheroo_state state)
 {
 	struct vga_switcheroo_client *client;
 
@@ -598,7 +609,8 @@
 
 /* force a PCI device to a certain state - mainly to turn off audio clients */
 
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
+				       enum vga_switcheroo_state dynamic)
 {
 	struct vga_switcheroo_client *client;
 
@@ -644,7 +656,8 @@
 
 /* this version is for the case where the power switch is separate
    to the device being powered down. */
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_ops(struct device *dev,
+				      struct dev_pm_domain *domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
@@ -675,7 +688,8 @@
 	/* we need to check if we have to switch back on the video
 	   device so the audio device can come back */
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
-		if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
+		if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
+		    client_is_vga(client)) {
 			found = client;
 			ret = pm_runtime_get_sync(&client->pdev->dev);
 			if (ret) {
@@ -695,12 +709,15 @@
 	return ret;
 }
 
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+int
+vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
+						 struct dev_pm_domain *domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
 		domain->ops = *dev->bus->pm;
-		domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
+		domain->ops.runtime_resume =
+			vga_switcheroo_runtime_resume_hdmi_audio;
 
 		dev->pm_domain = domain;
 		return 0;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 7bcbf86..a0b4334 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -29,6 +29,8 @@
  *
  */
 
+#define pr_fmt(fmt) "vgaarb: " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
@@ -134,7 +136,6 @@
 {
 	return vga_default;
 }
-
 EXPORT_SYMBOL_GPL(vga_default_device);
 
 void vga_set_default_device(struct pci_dev *pdev)
@@ -298,9 +299,9 @@
 
 	pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
 
-	if (!vgadev->bridge_has_one_vga) {
+	if (!vgadev->bridge_has_one_vga)
 		vga_irq_set_state(vgadev, true);
-	}
+
 	vgadev->owns |= wants;
 lock_them:
 	vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -452,15 +453,15 @@
 }
 EXPORT_SYMBOL(vga_put);
 
-/* Rules for using a bridge to control a VGA descendant decoding:
-   if a bridge has only one VGA descendant then it can be used
-   to control the VGA routing for that device.
-   It should always use the bridge closest to the device to control it.
-   If a bridge has a direct VGA descendant, but also have a sub-bridge
-   VGA descendant then we cannot use that bridge to control the direct VGA descendant.
-   So for every device we register, we need to iterate all its parent bridges
-   so we can invalidate any devices using them properly.
-*/
+/*
+ * Rules for using a bridge to control a VGA descendant decoding: if a bridge
+ * has only one VGA descendant then it can be used to control the VGA routing
+ * for that device. It should always use the bridge closest to the device to
+ * control it. If a bridge has a direct VGA descendant, but also have a sub-
+ * bridge VGA descendant then we cannot use that bridge to control the direct
+ * VGA descendant. So for every device we register, we need to iterate all
+ * its parent bridges so we can invalidate any devices using them properly.
+ */
 static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
 {
 	struct vga_device *same_bridge_vgadev;
@@ -484,21 +485,26 @@
 
 			/* see if the share a bridge with this device */
 			if (new_bridge == bridge) {
-				/* if their direct parent bridge is the same
-				   as any bridge of this device then it can't be used
-				   for that device */
+				/*
+				 * If their direct parent bridge is the same
+				 * as any bridge of this device then it can't
+				 * be used for that device.
+				 */
 				same_bridge_vgadev->bridge_has_one_vga = false;
 			}
 
-			/* now iterate the previous devices bridge hierarchy */
-			/* if the new devices parent bridge is in the other devices
-			   hierarchy then we can't use it to control this device */
+			/*
+			 * Now iterate the previous devices bridge hierarchy.
+			 * If the new devices parent bridge is in the other
+			 * devices hierarchy then we can't use it to control
+			 * this device
+			 */
 			while (bus) {
 				bridge = bus->self;
-				if (bridge) {
-					if (bridge == vgadev->pdev->bus->self)
-						vgadev->bridge_has_one_vga = false;
-				}
+
+				if (bridge && bridge == vgadev->pdev->bus->self)
+					vgadev->bridge_has_one_vga = false;
+
 				bus = bus->parent;
 			}
 		}
@@ -527,10 +533,10 @@
 	/* Allocate structure */
 	vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
 	if (vgadev == NULL) {
-		pr_err("vgaarb: failed to allocate pci device\n");
-		/* What to do on allocation failure ? For now, let's
-		 * just do nothing, I'm not sure there is anything saner
-		 * to be done
+		pr_err("failed to allocate pci device\n");
+		/*
+		 * What to do on allocation failure ? For now, let's just do
+		 * nothing, I'm not sure there is anything saner to be done.
 		 */
 		return false;
 	}
@@ -566,8 +572,8 @@
 		bridge = bus->self;
 		if (bridge) {
 			u16 l;
-			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
-					     &l);
+
+			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l);
 			if (!(l & PCI_BRIDGE_CTL_VGA)) {
 				vgadev->owns = 0;
 				break;
@@ -581,8 +587,7 @@
 	 */
 	if (vga_default == NULL &&
 	    ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
-		pr_info("vgaarb: setting as boot device: PCI:%s\n",
-			pci_name(pdev));
+		pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
 		vga_set_default_device(pdev);
 	}
 
@@ -591,7 +596,7 @@
 	/* Add to the list */
 	list_add(&vgadev->list, &vga_list);
 	vga_count++;
-	pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+	pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
 		pci_name(pdev),
 		vga_iostate_to_str(vgadev->decodes),
 		vga_iostate_to_str(vgadev->owns),
@@ -651,7 +656,7 @@
 	decodes_unlocked = vgadev->locks & decodes_removed;
 	vgadev->decodes = new_decodes;
 
-	pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+	pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
 		pci_name(vgadev->pdev),
 		vga_iostate_to_str(old_decodes),
 		vga_iostate_to_str(vgadev->decodes),
@@ -673,10 +678,12 @@
 	if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
 	    new_decodes & VGA_RSRC_LEGACY_MASK)
 		vga_decode_count++;
-	pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
+	pr_debug("decoding count now is: %d\n", vga_decode_count);
 }
 
-static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev,
+				      unsigned int decodes,
+				      bool userspace)
 {
 	struct vga_device *vgadev;
 	unsigned long flags;
@@ -712,7 +719,8 @@
 /* call with NULL to unregister */
 int vga_client_register(struct pci_dev *pdev, void *cookie,
 			void (*irq_set_state)(void *cookie, bool state),
-			unsigned int (*set_vga_decode)(void *cookie, bool decode))
+			unsigned int (*set_vga_decode)(void *cookie,
+						       bool decode))
 {
 	int ret = -ENODEV;
 	struct vga_device *vgadev;
@@ -832,7 +840,7 @@
 	return 1;
 }
 
-static ssize_t vga_arb_read(struct file *file, char __user * buf,
+static ssize_t vga_arb_read(struct file *file, char __user *buf,
 			    size_t count, loff_t *ppos)
 {
 	struct vga_arb_private *priv = file->private_data;
@@ -899,7 +907,7 @@
  * TODO: To avoid parsing inside kernel and to improve the speed we may
  * consider use ioctl here
  */
-static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+static ssize_t vga_arb_write(struct file *file, const char __user *buf,
 			     size_t count, loff_t *ppos)
 {
 	struct vga_arb_private *priv = file->private_data;
@@ -1075,13 +1083,13 @@
 				ret_val = -EPROTO;
 				goto done;
 			}
-			pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+			pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
 				domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 
 			pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
-			pr_debug("vgaarb: pdev %p\n", pdev);
+			pr_debug("pdev %p\n", pdev);
 			if (!pdev) {
-				pr_err("vgaarb: invalid PCI address %x:%x:%x\n",
+				pr_err("invalid PCI address %x:%x:%x\n",
 					domain, bus, devfn);
 				ret_val = -ENODEV;
 				goto done;
@@ -1089,10 +1097,13 @@
 		}
 
 		vgadev = vgadev_find(pdev);
-		pr_debug("vgaarb: vgadev %p\n", vgadev);
+		pr_debug("vgadev %p\n", vgadev);
 		if (vgadev == NULL) {
-			pr_err("vgaarb: this pci device is not a vga device\n");
-			pci_dev_put(pdev);
+			if (pdev) {
+				pr_err("this pci device is not a vga device\n");
+				pci_dev_put(pdev);
+			}
+
 			ret_val = -ENODEV;
 			goto done;
 		}
@@ -1109,7 +1120,7 @@
 			}
 		}
 		if (i == MAX_USER_CARDS) {
-			pr_err("vgaarb: maximum user cards (%d) number reached!\n",
+			pr_err("maximum user cards (%d) number reached!\n",
 				MAX_USER_CARDS);
 			pci_dev_put(pdev);
 			/* XXX: which value to return? */
@@ -1125,7 +1136,7 @@
 	} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
 		curr_pos += 8;
 		remaining -= 8;
-		pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
+		pr_debug("client 0x%p called 'decodes'\n", priv);
 
 		if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
 			ret_val = -EPROTO;
@@ -1150,7 +1161,7 @@
 	return ret_val;
 }
 
-static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
 {
 	struct vga_arb_private *priv = file->private_data;
 
@@ -1246,7 +1257,8 @@
 		else
 			new_state = true;
 		if (vgadev->set_vga_decode) {
-			new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+			new_decodes = vgadev->set_vga_decode(vgadev->cookie,
+							     new_state);
 			vga_update_device_decodes(vgadev, new_decodes);
 		}
 	}
@@ -1300,7 +1312,7 @@
 
 	rc = misc_register(&vga_arb_device);
 	if (rc < 0)
-		pr_err("vgaarb: error %d registering device\n", rc);
+		pr_err("error %d registering device\n", rc);
 
 	bus_register_notifier(&pci_bus_type, &pci_notifier);
 
@@ -1312,21 +1324,29 @@
 			       PCI_ANY_ID, pdev)) != NULL)
 		vga_arbiter_add_pci_device(pdev);
 
-	pr_info("vgaarb: loaded\n");
+	pr_info("loaded\n");
 
 	list_for_each_entry(vgadev, &vga_list, list) {
 #if defined(CONFIG_X86) || defined(CONFIG_IA64)
-		/* Override I/O based detection done by vga_arbiter_add_pci_device()
-		 * as it may take the wrong device (e.g. on Apple system under EFI).
+		/*
+		 * Override vga_arbiter_add_pci_device()'s I/O based detection
+		 * as it may take the wrong device (e.g. on Apple system under
+		 * EFI).
 		 *
-		 * Select the device owning the boot framebuffer if there is one.
+		 * Select the device owning the boot framebuffer if there is
+		 * one.
 		 */
-		resource_size_t start, end;
+		resource_size_t start, end, limit;
+		unsigned long flags;
 		int i;
 
+		limit = screen_info.lfb_base + screen_info.lfb_size;
+
 		/* Does firmware framebuffer belong to us? */
 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-			if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+			flags = pci_resource_flags(vgadev->pdev, i);
+
+			if ((flags & IORESOURCE_MEM) == 0)
 				continue;
 
 			start = pci_resource_start(vgadev->pdev, i);
@@ -1335,22 +1355,24 @@
 			if (!start || !end)
 				continue;
 
-			if (screen_info.lfb_base < start ||
-			    (screen_info.lfb_base + screen_info.lfb_size) >= end)
+			if (screen_info.lfb_base < start || limit >= end)
 				continue;
+
 			if (!vga_default_device())
-				pr_info("vgaarb: setting as boot device: PCI:%s\n",
+				pr_info("setting as boot device: PCI:%s\n",
 					pci_name(vgadev->pdev));
 			else if (vgadev->pdev != vga_default_device())
-				pr_info("vgaarb: overriding boot device: PCI:%s\n",
+				pr_info("overriding boot device: PCI:%s\n",
 					pci_name(vgadev->pdev));
 			vga_set_default_device(vgadev->pdev);
 		}
 #endif
 		if (vgadev->bridge_has_one_vga)
-			pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
+			pr_info("bridge control possible %s\n",
+				pci_name(vgadev->pdev));
 		else
-			pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
+			pr_info("no bridge control possible %s\n",
+				pci_name(vgadev->pdev));
 	}
 	return rc;
 }
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bcd914a..70a11ac 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2293,6 +2293,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 577d58d..08b8617 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -526,6 +526,13 @@
 	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
 	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
+config I2C_EMEV2
+	tristate "EMMA Mobile series I2C adapter"
+	depends on HAVE_CLK
+	help
+	  If you say yes to this option, support will be included for the
+	  I2C interface on the Renesas Electronics EM/EV family of processors.
+
 config I2C_EXYNOS5
 	tristate "Exynos5 high-speed I2C driver"
 	depends on ARCH_EXYNOS && OF
@@ -612,6 +619,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called i2c-kempld.
 
+config I2C_LPC2K
+	tristate "I2C bus support for NXP LPC2K/LPC178x/18xx/43xx"
+	depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+	help
+	  This driver supports the I2C interface found several NXP
+	  devices including LPC2xxx, LPC178x/7x and LPC18xx/43xx.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-lpc2k.
+
 config I2C_MESON
 	tristate "Amlogic Meson I2C controller"
 	depends on ARCH_MESON
@@ -1123,7 +1140,7 @@
 
 config I2C_CROS_EC_TUNNEL
 	tristate "ChromeOS EC tunnel I2C bus"
-	depends on CROS_EC_PROTO
+	depends on MFD_CROS_EC
 	help
 	  If you say yes here you get an I2C bus that will tunnel i2c commands
 	  through to the other side of the ChromeOS EC to the i2c bus
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e5f537c..6df3b30 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_I2C_DIGICOLOR)	+= i2c-digicolor.o
 obj-$(CONFIG_I2C_EFM32)		+= i2c-efm32.o
 obj-$(CONFIG_I2C_EG20T)		+= i2c-eg20t.o
+obj-$(CONFIG_I2C_EMEV2)		+= i2c-emev2.o
 obj-$(CONFIG_I2C_EXYNOS5)	+= i2c-exynos5.o
 obj-$(CONFIG_I2C_GPIO)		+= i2c-gpio.o
 obj-$(CONFIG_I2C_HIGHLANDER)	+= i2c-highlander.o
@@ -58,6 +59,7 @@
 obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
 obj-$(CONFIG_I2C_JZ4780)	+= i2c-jz4780.o
 obj-$(CONFIG_I2C_KEMPLD)	+= i2c-kempld.o
+obj-$(CONFIG_I2C_LPC2K)		+= i2c-lpc2k.o
 obj-$(CONFIG_I2C_MESON)		+= i2c-meson.o
 obj-$(CONFIG_I2C_MPC)		+= i2c-mpc.o
 obj-$(CONFIG_I2C_MT65XX)	+= i2c-mt65xx.o
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 2ee78e0..84deed6 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -17,6 +17,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/of.h>
 
 /* Register offsets for the I2C device. */
 #define CDNS_I2C_CR_OFFSET		0x00 /* Control Register, RW */
@@ -113,6 +114,8 @@
 
 #define CDNS_I2C_TIMEOUT_MAX	0xFF
 
+#define CDNS_I2C_BROKEN_HOLD_BIT	BIT(0)
+
 #define cdns_i2c_readreg(offset)       readl_relaxed(id->membase + offset)
 #define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
 
@@ -135,6 +138,7 @@
  * @bus_hold_flag:	Flag used in repeated start for clearing HOLD bit
  * @clk:		Pointer to struct clk
  * @clk_rate_change_nb:	Notifier block for clock rate changes
+ * @quirks:		flag for broken hold bit usage in r1p10
  */
 struct cdns_i2c {
 	void __iomem *membase;
@@ -154,6 +158,11 @@
 	unsigned int bus_hold_flag;
 	struct clk *clk;
 	struct notifier_block clk_rate_change_nb;
+	u32 quirks;
+};
+
+struct cdns_platform_data {
+	u32 quirks;
 };
 
 #define to_cdns_i2c(_nb)	container_of(_nb, struct cdns_i2c, \
@@ -172,6 +181,12 @@
 		cdns_i2c_writereg(reg & ~CDNS_I2C_CR_HOLD, CDNS_I2C_CR_OFFSET);
 }
 
+static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround)
+{
+	return (hold_wrkaround &&
+		(id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1));
+}
+
 /**
  * cdns_i2c_isr - Interrupt handler for the I2C device
  * @irq:	irq number for the I2C device
@@ -186,6 +201,7 @@
 {
 	unsigned int isr_status, avail_bytes, updatetx;
 	unsigned int bytes_to_send;
+	bool hold_quirk;
 	struct cdns_i2c *id = ptr;
 	/* Signal completion only after everything is updated */
 	int done_flag = 0;
@@ -208,6 +224,8 @@
 	if (id->recv_count > id->curr_recv_count)
 		updatetx = 1;
 
+	hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+
 	/* When receiving, handle data interrupt and completion interrupt */
 	if (id->p_recv_buf &&
 	    ((isr_status & CDNS_I2C_IXR_COMP) ||
@@ -229,8 +247,7 @@
 			id->recv_count--;
 			id->curr_recv_count--;
 
-			if (updatetx &&
-			    (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
+			if (cdns_is_holdquirk(id, hold_quirk))
 				break;
 		}
 
@@ -241,8 +258,7 @@
 		 * maintain transfer size non-zero while performing a large
 		 * receive operation.
 		 */
-		if (updatetx &&
-		    (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
+		if (cdns_is_holdquirk(id, hold_quirk)) {
 			/* wait while fifo is full */
 			while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
 			       (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -264,6 +280,22 @@
 						  CDNS_I2C_XFER_SIZE_OFFSET);
 				id->curr_recv_count = id->recv_count;
 			}
+		} else if (id->recv_count && !hold_quirk &&
+						!id->curr_recv_count) {
+
+			/* Set the slave address in address register*/
+			cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
+						CDNS_I2C_ADDR_OFFSET);
+
+			if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
+				cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+						CDNS_I2C_XFER_SIZE_OFFSET);
+				id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
+			} else {
+				cdns_i2c_writereg(id->recv_count,
+						CDNS_I2C_XFER_SIZE_OFFSET);
+				id->curr_recv_count = id->recv_count;
+			}
 		}
 
 		/* Clear hold (if not repeated start) and signal completion */
@@ -535,11 +567,13 @@
 	int ret, count;
 	u32 reg;
 	struct cdns_i2c *id = adap->algo_data;
+	bool hold_quirk;
 
 	/* Check if the bus is free */
 	if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA)
 		return -EAGAIN;
 
+	hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT);
 	/*
 	 * Set the flag to one when multiple messages are to be
 	 * processed with a repeated start.
@@ -552,7 +586,7 @@
 		 * followed by any other message, an error is returned
 		 * indicating that this sequence is not supported.
 		 */
-		for (count = 0; count < num - 1; count++) {
+		for (count = 0; (count < num - 1 && hold_quirk); count++) {
 			if (msgs[count].flags & I2C_M_RD) {
 				dev_warn(adap->dev.parent,
 					 "Can't do repeated start after a receive message\n");
@@ -815,6 +849,17 @@
 static SIMPLE_DEV_PM_OPS(cdns_i2c_dev_pm_ops, cdns_i2c_suspend,
 			 cdns_i2c_resume);
 
+static const struct cdns_platform_data r1p10_i2c_def = {
+	.quirks = CDNS_I2C_BROKEN_HOLD_BIT,
+};
+
+static const struct of_device_id cdns_i2c_of_match[] = {
+	{ .compatible = "cdns,i2c-r1p10", .data = &r1p10_i2c_def },
+	{ .compatible = "cdns,i2c-r1p14",},
+	{ /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, cdns_i2c_of_match);
+
 /**
  * cdns_i2c_probe - Platform registration call
  * @pdev:	Handle to the platform device structure
@@ -830,6 +875,7 @@
 	struct resource *r_mem;
 	struct cdns_i2c *id;
 	int ret;
+	const struct of_device_id *match;
 
 	id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL);
 	if (!id)
@@ -837,6 +883,12 @@
 
 	platform_set_drvdata(pdev, id);
 
+	match = of_match_node(cdns_i2c_of_match, pdev->dev.of_node);
+	if (match && match->data) {
+		const struct cdns_platform_data *data = match->data;
+		id->quirks = data->quirks;
+	}
+
 	r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	id->membase = devm_ioremap_resource(&pdev->dev, r_mem);
 	if (IS_ERR(id->membase))
@@ -844,6 +896,7 @@
 
 	id->irq = platform_get_irq(pdev, 0);
 
+	id->adap.owner = THIS_MODULE;
 	id->adap.dev.of_node = pdev->dev.of_node;
 	id->adap.algo = &cdns_i2c_algo;
 	id->adap.timeout = CDNS_I2C_TIMEOUT;
@@ -935,12 +988,6 @@
 	return 0;
 }
 
-static const struct of_device_id cdns_i2c_of_match[] = {
-	{ .compatible = "cdns,i2c-r1p10", },
-	{ /* end of table */ }
-};
-MODULE_DEVICE_TABLE(of, cdns_i2c_of_match);
-
 static struct platform_driver cdns_i2c_drv = {
 	.driver = {
 		.name  = DRIVER_NAME,
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 6f19a33..7441cdc 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -777,8 +777,7 @@
 
 	enabled = dw_readl(dev, DW_IC_ENABLE);
 	stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
-	dev_dbg(dev->dev, "%s:  %s enabled= 0x%x stat=0x%x\n", __func__,
-		dev->adapter.name, enabled, stat);
+	dev_dbg(dev->dev, "%s: enabled=%#x stat=%#x\n", __func__, enabled, stat);
 	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
 		return IRQ_NONE;
 
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 6643d2d..df23e8c 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -260,8 +260,8 @@
 
 	snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci");
 
-	r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr, IRQF_SHARED,
-			adap->name, dev);
+	r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr,
+			IRQF_SHARED | IRQF_COND_SUSPEND, adap->name, dev);
 	if (r) {
 		dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
 		return r;
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
new file mode 100644
index 0000000..192ef6b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -0,0 +1,332 @@
+/*
+ * I2C driver for the Renesas EMEV2 SoC
+ *
+ * Copyright (C) 2015 Wolfram Sang <wsa@sang-engineering.com>
+ * Copyright 2013 Codethink Ltd.
+ * Copyright 2010-2015 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+
+/* I2C Registers */
+#define I2C_OFS_IICACT0		0x00	/* start */
+#define I2C_OFS_IIC0		0x04	/* shift */
+#define I2C_OFS_IICC0		0x08	/* control */
+#define I2C_OFS_SVA0		0x0c	/* slave address */
+#define I2C_OFS_IICCL0		0x10	/* clock select */
+#define I2C_OFS_IICX0		0x14	/* extension */
+#define I2C_OFS_IICS0		0x18	/* status */
+#define I2C_OFS_IICSE0		0x1c	/* status For emulation */
+#define I2C_OFS_IICF0		0x20	/* IIC flag */
+
+/* I2C IICACT0 Masks */
+#define I2C_BIT_IICE0		0x0001
+
+/* I2C IICC0 Masks */
+#define I2C_BIT_LREL0		0x0040
+#define I2C_BIT_WREL0		0x0020
+#define I2C_BIT_SPIE0		0x0010
+#define I2C_BIT_WTIM0		0x0008
+#define I2C_BIT_ACKE0		0x0004
+#define I2C_BIT_STT0		0x0002
+#define I2C_BIT_SPT0		0x0001
+
+/* I2C IICCL0 Masks */
+#define I2C_BIT_SMC0		0x0008
+#define I2C_BIT_DFC0		0x0004
+
+/* I2C IICSE0 Masks */
+#define I2C_BIT_MSTS0		0x0080
+#define I2C_BIT_ALD0		0x0040
+#define I2C_BIT_EXC0		0x0020
+#define I2C_BIT_COI0		0x0010
+#define I2C_BIT_TRC0		0x0008
+#define I2C_BIT_ACKD0		0x0004
+#define I2C_BIT_STD0		0x0002
+#define I2C_BIT_SPD0		0x0001
+
+/* I2C IICF0 Masks */
+#define I2C_BIT_STCF		0x0080
+#define I2C_BIT_IICBSY		0x0040
+#define I2C_BIT_STCEN		0x0002
+#define I2C_BIT_IICRSV		0x0001
+
+struct em_i2c_device {
+	void __iomem *base;
+	struct i2c_adapter adap;
+	struct completion msg_done;
+	struct clk *sclk;
+};
+
+static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
+{
+	writeb((readb(priv->base + reg) & ~clear) | set, priv->base + reg);
+}
+
+static int em_i2c_wait_for_event(struct em_i2c_device *priv)
+{
+	unsigned long time_left;
+	int status;
+
+	reinit_completion(&priv->msg_done);
+
+	time_left = wait_for_completion_timeout(&priv->msg_done, priv->adap.timeout);
+
+	if (!time_left)
+		return -ETIMEDOUT;
+
+	status = readb(priv->base + I2C_OFS_IICSE0);
+	return status & I2C_BIT_ALD0 ? -EAGAIN : status;
+}
+
+static void em_i2c_stop(struct em_i2c_device *priv)
+{
+	/* Send Stop condition */
+	em_clear_set_bit(priv, 0, I2C_BIT_SPT0 | I2C_BIT_SPIE0, I2C_OFS_IICC0);
+
+	/* Wait for stop condition */
+	em_i2c_wait_for_event(priv);
+}
+
+static void em_i2c_reset(struct i2c_adapter *adap)
+{
+	struct em_i2c_device *priv = i2c_get_adapdata(adap);
+	int retr;
+
+	/* If I2C active */
+	if (readb(priv->base + I2C_OFS_IICACT0) & I2C_BIT_IICE0) {
+		/* Disable I2C operation */
+		writeb(0, priv->base + I2C_OFS_IICACT0);
+
+		retr = 1000;
+		while (readb(priv->base + I2C_OFS_IICACT0) == 1 && retr)
+			retr--;
+		WARN_ON(retr == 0);
+	}
+
+	/* Transfer mode set */
+	writeb(I2C_BIT_DFC0, priv->base + I2C_OFS_IICCL0);
+
+	/* Can Issue start without detecting a stop, Reservation disabled. */
+	writeb(I2C_BIT_STCEN | I2C_BIT_IICRSV, priv->base + I2C_OFS_IICF0);
+
+	/* I2C enable, 9 bit interrupt mode */
+	writeb(I2C_BIT_WTIM0, priv->base + I2C_OFS_IICC0);
+
+	/* Enable I2C operation */
+	writeb(I2C_BIT_IICE0, priv->base + I2C_OFS_IICACT0);
+
+	retr = 1000;
+	while (readb(priv->base + I2C_OFS_IICACT0) == 0 && retr)
+		retr--;
+	WARN_ON(retr == 0);
+}
+
+static int __em_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+				int stop)
+{
+	struct em_i2c_device *priv = i2c_get_adapdata(adap);
+	int count, status, read = !!(msg->flags & I2C_M_RD);
+
+	/* Send start condition */
+	em_clear_set_bit(priv, 0, I2C_BIT_ACKE0 | I2C_BIT_WTIM0, I2C_OFS_IICC0);
+	em_clear_set_bit(priv, 0, I2C_BIT_STT0, I2C_OFS_IICC0);
+
+	/* Send slave address and R/W type */
+	writeb((msg->addr << 1) | read, priv->base + I2C_OFS_IIC0);
+
+	/* Wait for transaction */
+	status = em_i2c_wait_for_event(priv);
+	if (status < 0)
+		goto out_reset;
+
+	/* Received NACK (result of setting slave address and R/W) */
+	if (!(status & I2C_BIT_ACKD0)) {
+		em_i2c_stop(priv);
+		goto out;
+	}
+
+	/* Extra setup for read transactions */
+	if (read) {
+		/* 8 bit interrupt mode */
+		em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_ACKE0, I2C_OFS_IICC0);
+		em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_WREL0, I2C_OFS_IICC0);
+
+		/* Wait for transaction */
+		status = em_i2c_wait_for_event(priv);
+		if (status < 0)
+			goto out_reset;
+	}
+
+	/* Send / receive data */
+	for (count = 0; count < msg->len; count++) {
+		if (read) { /* Read transaction */
+			msg->buf[count] = readb(priv->base + I2C_OFS_IIC0);
+			em_clear_set_bit(priv, 0, I2C_BIT_WREL0, I2C_OFS_IICC0);
+
+		} else { /* Write transaction */
+			/* Received NACK */
+			if (!(status & I2C_BIT_ACKD0)) {
+				em_i2c_stop(priv);
+				goto out;
+			}
+
+			/* Write data */
+			writeb(msg->buf[count], priv->base + I2C_OFS_IIC0);
+		}
+
+		/* Wait for R/W transaction */
+		status = em_i2c_wait_for_event(priv);
+		if (status < 0)
+			goto out_reset;
+	}
+
+	if (stop)
+		em_i2c_stop(priv);
+
+	return count;
+
+out_reset:
+	em_i2c_reset(adap);
+out:
+	return status < 0 ? status : -ENXIO;
+}
+
+static int em_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+	int num)
+{
+	struct em_i2c_device *priv = i2c_get_adapdata(adap);
+	int ret, i;
+
+	if (readb(priv->base + I2C_OFS_IICF0) & I2C_BIT_IICBSY)
+		return -EAGAIN;
+
+	for (i = 0; i < num; i++) {
+		ret = __em_i2c_xfer(adap, &msgs[i], (i == (num - 1)));
+		if (ret < 0)
+			return ret;
+	}
+
+	/* I2C transfer completed */
+	return num;
+}
+
+static irqreturn_t em_i2c_irq_handler(int this_irq, void *dev_id)
+{
+	struct em_i2c_device *priv = dev_id;
+
+	complete(&priv->msg_done);
+	return IRQ_HANDLED;
+}
+
+static u32 em_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm em_i2c_algo = {
+	.master_xfer = em_i2c_xfer,
+	.functionality = em_i2c_func,
+};
+
+static int em_i2c_probe(struct platform_device *pdev)
+{
+	struct em_i2c_device *priv;
+	struct resource *r;
+	int irq, ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	strlcpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
+
+	priv->sclk = devm_clk_get(&pdev->dev, "sclk");
+	if (IS_ERR(priv->sclk))
+		return PTR_ERR(priv->sclk);
+
+	clk_prepare_enable(priv->sclk);
+
+	priv->adap.timeout = msecs_to_jiffies(100);
+	priv->adap.retries = 5;
+	priv->adap.dev.parent = &pdev->dev;
+	priv->adap.algo = &em_i2c_algo;
+	priv->adap.owner = THIS_MODULE;
+	priv->adap.dev.of_node = pdev->dev.of_node;
+
+	init_completion(&priv->msg_done);
+
+	platform_set_drvdata(pdev, priv);
+	i2c_set_adapdata(&priv->adap, priv);
+
+	em_i2c_reset(&priv->adap);
+
+	irq = platform_get_irq(pdev, 0);
+	ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+				"em_i2c", priv);
+	if (ret)
+		goto err_clk;
+
+	ret = i2c_add_adapter(&priv->adap);
+
+	if (ret)
+		goto err_clk;
+
+	dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+
+	return 0;
+
+err_clk:
+	clk_disable_unprepare(priv->sclk);
+	return ret;
+}
+
+static int em_i2c_remove(struct platform_device *dev)
+{
+	struct em_i2c_device *priv = platform_get_drvdata(dev);
+
+	i2c_del_adapter(&priv->adap);
+	clk_disable_unprepare(priv->sclk);
+
+	return 0;
+}
+
+static const struct of_device_id em_i2c_ids[] = {
+	{ .compatible = "renesas,iic-emev2", },
+	{ }
+};
+
+static struct platform_driver em_i2c_driver = {
+	.probe = em_i2c_probe,
+	.remove = em_i2c_remove,
+	.driver = {
+		.name = "em-i2c",
+		.of_match_table = em_i2c_ids,
+	}
+};
+module_platform_driver(em_i2c_driver);
+
+MODULE_DESCRIPTION("EMEV2 I2C bus driver");
+MODULE_AUTHOR("Ian Molton and Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, em_i2c_ids);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 5ecbb3f..eaef9bc 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -88,12 +88,13 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
 		defined CONFIG_DMI
 #include <linux/gpio.h>
 #include <linux/i2c-mux-gpio.h>
-#include <linux/platform_device.h>
 #endif
 
 /* I801 SMBus address offsets */
@@ -113,6 +114,16 @@
 #define SMBPCICTL	0x004
 #define SMBPCISTS	0x006
 #define SMBHSTCFG	0x040
+#define TCOBASE		0x050
+#define TCOCTL		0x054
+
+#define ACPIBASE		0x040
+#define ACPIBASE_SMI_OFF	0x030
+#define ACPICTRL		0x044
+#define ACPICTRL_EN		0x080
+
+#define SBREG_BAR		0x10
+#define SBREG_SMBCTRL		0xc6000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS		0x08
@@ -125,6 +136,9 @@
 #define SMBHSTCFG_SMB_SMI_EN	2
 #define SMBHSTCFG_I2C_EN	4
 
+/* TCO configuration bits for TCOCTL */
+#define TCOCTL_EN		0x0100
+
 /* Auxiliary control register bits, ICH4+ only */
 #define SMBAUXCTL_CRC		1
 #define SMBAUXCTL_E32B		2
@@ -221,6 +235,7 @@
 	const struct i801_mux_config *mux_drvdata;
 	struct platform_device *mux_pdev;
 #endif
+	struct platform_device *tco_pdev;
 };
 
 #define FEATURE_SMBUS_PEC	(1 << 0)
@@ -230,6 +245,7 @@
 #define FEATURE_IRQ		(1 << 4)
 /* Not really a feature, but it's convenient to handle it as such */
 #define FEATURE_IDF		(1 << 15)
+#define FEATURE_TCO		(1 << 16)
 
 static const char *i801_feature_names[] = {
 	"SMBus PEC",
@@ -1132,6 +1148,95 @@
 }
 #endif
 
+static const struct itco_wdt_platform_data tco_platform_data = {
+	.name = "Intel PCH",
+	.version = 4,
+};
+
+static DEFINE_SPINLOCK(p2sb_spinlock);
+
+static void i801_add_tco(struct i801_priv *priv)
+{
+	struct pci_dev *pci_dev = priv->pci_dev;
+	struct resource tco_res[3], *res;
+	struct platform_device *pdev;
+	unsigned int devfn;
+	u32 tco_base, tco_ctl;
+	u32 base_addr, ctrl_val;
+	u64 base64_addr;
+
+	if (!(priv->features & FEATURE_TCO))
+		return;
+
+	pci_read_config_dword(pci_dev, TCOBASE, &tco_base);
+	pci_read_config_dword(pci_dev, TCOCTL, &tco_ctl);
+	if (!(tco_ctl & TCOCTL_EN))
+		return;
+
+	memset(tco_res, 0, sizeof(tco_res));
+
+	res = &tco_res[ICH_RES_IO_TCO];
+	res->start = tco_base & ~1;
+	res->end = res->start + 32 - 1;
+	res->flags = IORESOURCE_IO;
+
+	/*
+	 * Power Management registers.
+	 */
+	devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
+	pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
+
+	res = &tco_res[ICH_RES_IO_SMI];
+	res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
+	res->end = res->start + 3;
+	res->flags = IORESOURCE_IO;
+
+	/*
+	 * Enable the ACPI I/O space.
+	 */
+	pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
+	ctrl_val |= ACPICTRL_EN;
+	pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
+
+	/*
+	 * We must access the NO_REBOOT bit over the Primary to Sideband
+	 * bridge (P2SB). The BIOS prevents the P2SB device from being
+	 * enumerated by the PCI subsystem, so we need to unhide/hide it
+	 * to lookup the P2SB BAR.
+	 */
+	spin_lock(&p2sb_spinlock);
+
+	devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
+
+	/* Unhide the P2SB device */
+	pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0);
+
+	pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr);
+	base64_addr = base_addr & 0xfffffff0;
+
+	pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr);
+	base64_addr |= (u64)base_addr << 32;
+
+	/* Hide the P2SB device */
+	pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x1);
+	spin_unlock(&p2sb_spinlock);
+
+	res = &tco_res[ICH_RES_MEM_OFF];
+	res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+	res->end = res->start + 3;
+	res->flags = IORESOURCE_MEM;
+
+	pdev = platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+						 tco_res, 3, &tco_platform_data,
+						 sizeof(tco_platform_data));
+	if (IS_ERR(pdev)) {
+		dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
+		return;
+	}
+
+	priv->tco_pdev = pdev;
+}
+
 static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	unsigned char temp;
@@ -1149,6 +1254,15 @@
 
 	priv->pci_dev = dev;
 	switch (dev->device) {
+	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
+	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
+		priv->features |= FEATURE_I2C_BLOCK_READ;
+		priv->features |= FEATURE_IRQ;
+		priv->features |= FEATURE_SMBUS_PEC;
+		priv->features |= FEATURE_BLOCK_BUFFER;
+		priv->features |= FEATURE_TCO;
+		break;
+
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
@@ -1265,6 +1379,8 @@
 	dev_info(&dev->dev, "SMBus using %s\n",
 		 priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling");
 
+	i801_add_tco(priv);
+
 	/* set up the sysfs linkage to our parent device */
 	priv->adapter.dev.parent = &dev->dev;
 
@@ -1296,6 +1412,8 @@
 	i2c_del_adapter(&priv->adapter);
 	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 
+	platform_device_unregister(priv->tco_pdev);
+
 	/*
 	 * do not call pci_disable_device(dev) since it can cause hard hangs on
 	 * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
new file mode 100644
index 0000000..8560a13
--- /dev/null
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2011 NXP Semiconductors
+ *
+ * Code portions referenced from the i2x-pxa and i2c-pnx drivers
+ *
+ * Make SMBus byte and word transactions work on LPC178x/7x
+ * Copyright (c) 2012
+ * Alexander Potashev, Emcraft Systems, aspotashev@emcraft.com
+ * Anton Protopopov, Emcraft Systems, antonp@emcraft.com
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+
+/* LPC24xx register offsets and bits */
+#define LPC24XX_I2CONSET	0x00
+#define LPC24XX_I2STAT		0x04
+#define LPC24XX_I2DAT		0x08
+#define LPC24XX_I2ADDR		0x0c
+#define LPC24XX_I2SCLH		0x10
+#define LPC24XX_I2SCLL		0x14
+#define LPC24XX_I2CONCLR	0x18
+
+#define LPC24XX_AA		BIT(2)
+#define LPC24XX_SI		BIT(3)
+#define LPC24XX_STO		BIT(4)
+#define LPC24XX_STA		BIT(5)
+#define LPC24XX_I2EN		BIT(6)
+
+#define LPC24XX_STO_AA		(LPC24XX_STO | LPC24XX_AA)
+#define LPC24XX_CLEAR_ALL	(LPC24XX_AA | LPC24XX_SI | LPC24XX_STO | \
+				 LPC24XX_STA | LPC24XX_I2EN)
+
+/* I2C SCL clock has different duty cycle depending on mode */
+#define I2C_STD_MODE_DUTY		46
+#define I2C_FAST_MODE_DUTY		36
+#define I2C_FAST_MODE_PLUS_DUTY		38
+
+/*
+ * 26 possible I2C status codes, but codes applicable only
+ * to master are listed here and used in this driver
+ */
+enum {
+	M_BUS_ERROR		= 0x00,
+	M_START			= 0x08,
+	M_REPSTART		= 0x10,
+	MX_ADDR_W_ACK		= 0x18,
+	MX_ADDR_W_NACK		= 0x20,
+	MX_DATA_W_ACK		= 0x28,
+	MX_DATA_W_NACK		= 0x30,
+	M_DATA_ARB_LOST		= 0x38,
+	MR_ADDR_R_ACK		= 0x40,
+	MR_ADDR_R_NACK		= 0x48,
+	MR_DATA_R_ACK		= 0x50,
+	MR_DATA_R_NACK		= 0x58,
+	M_I2C_IDLE		= 0xf8,
+};
+
+struct lpc2k_i2c {
+	void __iomem		*base;
+	struct clk		*clk;
+	int			irq;
+	wait_queue_head_t	wait;
+	struct i2c_adapter	adap;
+	struct i2c_msg		*msg;
+	int			msg_idx;
+	int			msg_status;
+	int			is_last;
+};
+
+static void i2c_lpc2k_reset(struct lpc2k_i2c *i2c)
+{
+	/* Will force clear all statuses */
+	writel(LPC24XX_CLEAR_ALL, i2c->base + LPC24XX_I2CONCLR);
+	writel(0, i2c->base + LPC24XX_I2ADDR);
+	writel(LPC24XX_I2EN, i2c->base + LPC24XX_I2CONSET);
+}
+
+static int i2c_lpc2k_clear_arb(struct lpc2k_i2c *i2c)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+	/*
+	 * If the transfer needs to abort for some reason, we'll try to
+	 * force a stop condition to clear any pending bus conditions
+	 */
+	writel(LPC24XX_STO, i2c->base + LPC24XX_I2CONSET);
+
+	/* Wait for status change */
+	while (readl(i2c->base + LPC24XX_I2STAT) != M_I2C_IDLE) {
+		if (time_after(jiffies, timeout)) {
+			/* Bus was not idle, try to reset adapter */
+			i2c_lpc2k_reset(i2c);
+			return -EBUSY;
+		}
+
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+static void i2c_lpc2k_pump_msg(struct lpc2k_i2c *i2c)
+{
+	unsigned char data;
+	u32 status;
+
+	/*
+	 * I2C in the LPC2xxx series is basically a state machine.
+	 * Just run through the steps based on the current status.
+	 */
+	status = readl(i2c->base + LPC24XX_I2STAT);
+
+	switch (status) {
+	case M_START:
+	case M_REPSTART:
+		/* Start bit was just sent out, send out addr and dir */
+		data = i2c->msg->addr << 1;
+		if (i2c->msg->flags & I2C_M_RD)
+			data |= 1;
+
+		writel(data, i2c->base + LPC24XX_I2DAT);
+		writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
+		break;
+
+	case MX_ADDR_W_ACK:
+	case MX_DATA_W_ACK:
+		/*
+		 * Address or data was sent out with an ACK. If there is more
+		 * data to send, send it now
+		 */
+		if (i2c->msg_idx < i2c->msg->len) {
+			writel(i2c->msg->buf[i2c->msg_idx],
+			       i2c->base + LPC24XX_I2DAT);
+		} else if (i2c->is_last) {
+			/* Last message, send stop */
+			writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET);
+			writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+			i2c->msg_status = 0;
+			disable_irq_nosync(i2c->irq);
+		} else {
+			i2c->msg_status = 0;
+			disable_irq_nosync(i2c->irq);
+		}
+
+		i2c->msg_idx++;
+		break;
+
+	case MR_ADDR_R_ACK:
+		/* Receive first byte from slave */
+		if (i2c->msg->len == 1) {
+			/* Last byte, return NACK */
+			writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONCLR);
+		} else {
+			/* Not last byte, return ACK */
+			writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONSET);
+		}
+
+		writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
+		break;
+
+	case MR_DATA_R_NACK:
+		/*
+		 * The I2C shows NACK status on reads, so we need to accept
+		 * the NACK as an ACK here. This should be ok, as the real
+		 * BACK would of been caught on the address write.
+		 */
+	case MR_DATA_R_ACK:
+		/* Data was received */
+		if (i2c->msg_idx < i2c->msg->len) {
+			i2c->msg->buf[i2c->msg_idx] =
+					readl(i2c->base + LPC24XX_I2DAT);
+		}
+
+		/* If transfer is done, send STOP */
+		if (i2c->msg_idx >= i2c->msg->len - 1 && i2c->is_last) {
+			writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET);
+			writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+			i2c->msg_status = 0;
+		}
+
+		/* Message is done */
+		if (i2c->msg_idx >= i2c->msg->len - 1) {
+			i2c->msg_status = 0;
+			disable_irq_nosync(i2c->irq);
+		}
+
+		/*
+		 * One pre-last data input, send NACK to tell the slave that
+		 * this is going to be the last data byte to be transferred.
+		 */
+		if (i2c->msg_idx >= i2c->msg->len - 2) {
+			/* One byte left to receive - NACK */
+			writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONCLR);
+		} else {
+			/* More than one byte left to receive - ACK */
+			writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONSET);
+		}
+
+		writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
+		i2c->msg_idx++;
+		break;
+
+	case MX_ADDR_W_NACK:
+	case MX_DATA_W_NACK:
+	case MR_ADDR_R_NACK:
+		/* NACK processing is done */
+		writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET);
+		i2c->msg_status = -ENXIO;
+		disable_irq_nosync(i2c->irq);
+		break;
+
+	case M_DATA_ARB_LOST:
+		/* Arbitration lost */
+		i2c->msg_status = -EAGAIN;
+
+		/* Release the I2C bus */
+		writel(LPC24XX_STA | LPC24XX_STO, i2c->base + LPC24XX_I2CONCLR);
+		disable_irq_nosync(i2c->irq);
+		break;
+
+	default:
+		/* Unexpected statuses */
+		i2c->msg_status = -EIO;
+		disable_irq_nosync(i2c->irq);
+		break;
+	}
+
+	/* Exit on failure or all bytes transferred */
+	if (i2c->msg_status != -EBUSY)
+		wake_up(&i2c->wait);
+
+	/*
+	 * If `msg_status` is zero, then `lpc2k_process_msg()`
+	 * is responsible for clearing the SI flag.
+	 */
+	if (i2c->msg_status != 0)
+		writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+}
+
+static int lpc2k_process_msg(struct lpc2k_i2c *i2c, int msgidx)
+{
+	/* A new transfer is kicked off by initiating a start condition */
+	if (!msgidx) {
+		writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONSET);
+	} else {
+		/*
+		 * A multi-message I2C transfer continues where the
+		 * previous I2C transfer left off and uses the
+		 * current condition of the I2C adapter.
+		 */
+		if (unlikely(i2c->msg->flags & I2C_M_NOSTART)) {
+			WARN_ON(i2c->msg->len == 0);
+
+			if (!(i2c->msg->flags & I2C_M_RD)) {
+				/* Start transmit of data */
+				writel(i2c->msg->buf[0],
+				       i2c->base + LPC24XX_I2DAT);
+				i2c->msg_idx++;
+			}
+		} else {
+			/* Start or repeated start */
+			writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONSET);
+		}
+
+		writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+	}
+
+	enable_irq(i2c->irq);
+
+	/* Wait for transfer completion */
+	if (wait_event_timeout(i2c->wait, i2c->msg_status != -EBUSY,
+			       msecs_to_jiffies(1000)) == 0) {
+		disable_irq_nosync(i2c->irq);
+
+		return -ETIMEDOUT;
+	}
+
+	return i2c->msg_status;
+}
+
+static int i2c_lpc2k_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+			  int msg_num)
+{
+	struct lpc2k_i2c *i2c = i2c_get_adapdata(adap);
+	int ret, i;
+	u32 stat;
+
+	/* Check for bus idle condition */
+	stat = readl(i2c->base + LPC24XX_I2STAT);
+	if (stat != M_I2C_IDLE) {
+		/* Something is holding the bus, try to clear it */
+		return i2c_lpc2k_clear_arb(i2c);
+	}
+
+	/* Process a single message at a time */
+	for (i = 0; i < msg_num; i++) {
+		/* Save message pointer and current message data index */
+		i2c->msg = &msgs[i];
+		i2c->msg_idx = 0;
+		i2c->msg_status = -EBUSY;
+		i2c->is_last = (i == (msg_num - 1));
+
+		ret = lpc2k_process_msg(i2c, i);
+		if (ret)
+			return ret;
+	}
+
+	return msg_num;
+}
+
+static irqreturn_t i2c_lpc2k_handler(int irq, void *dev_id)
+{
+	struct lpc2k_i2c *i2c = dev_id;
+
+	if (readl(i2c->base + LPC24XX_I2CONSET) & LPC24XX_SI) {
+		i2c_lpc2k_pump_msg(i2c);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static u32 i2c_lpc2k_functionality(struct i2c_adapter *adap)
+{
+	/* Only emulated SMBus for now */
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm i2c_lpc2k_algorithm = {
+	.master_xfer	= i2c_lpc2k_xfer,
+	.functionality	= i2c_lpc2k_functionality,
+};
+
+static int i2c_lpc2k_probe(struct platform_device *pdev)
+{
+	struct lpc2k_i2c *i2c;
+	struct resource *res;
+	u32 bus_clk_rate;
+	u32 scl_high;
+	u32 clkrate;
+	int ret;
+
+	i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
+	if (!i2c)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	i2c->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(i2c->base))
+		return PTR_ERR(i2c->base);
+
+	i2c->irq = platform_get_irq(pdev, 0);
+	if (i2c->irq < 0) {
+		dev_err(&pdev->dev, "can't get interrupt resource\n");
+		return i2c->irq;
+	}
+
+	init_waitqueue_head(&i2c->wait);
+
+	i2c->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(i2c->clk)) {
+		dev_err(&pdev->dev, "error getting clock\n");
+		return PTR_ERR(i2c->clk);
+	}
+
+	ret = clk_prepare_enable(i2c->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable clock.\n");
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, i2c->irq, i2c_lpc2k_handler, 0,
+			       dev_name(&pdev->dev), i2c);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't request interrupt.\n");
+		goto fail_clk;
+	}
+
+	disable_irq_nosync(i2c->irq);
+
+	/* Place controller is a known state */
+	i2c_lpc2k_reset(i2c);
+
+	ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+				   &bus_clk_rate);
+	if (ret)
+		bus_clk_rate = 100000; /* 100 kHz default clock rate */
+
+	clkrate = clk_get_rate(i2c->clk);
+	if (clkrate == 0) {
+		dev_err(&pdev->dev, "can't get I2C base clock\n");
+		ret = -EINVAL;
+		goto fail_clk;
+	}
+
+	/* Setup I2C dividers to generate clock with proper duty cycle */
+	clkrate = clkrate / bus_clk_rate;
+	if (bus_clk_rate <= 100000)
+		scl_high = (clkrate * I2C_STD_MODE_DUTY) / 100;
+	else if (bus_clk_rate <= 400000)
+		scl_high = (clkrate * I2C_FAST_MODE_DUTY) / 100;
+	else
+		scl_high = (clkrate * I2C_FAST_MODE_PLUS_DUTY) / 100;
+
+	writel(scl_high, i2c->base + LPC24XX_I2SCLH);
+	writel(clkrate - scl_high, i2c->base + LPC24XX_I2SCLL);
+
+	platform_set_drvdata(pdev, i2c);
+
+	i2c_set_adapdata(&i2c->adap, i2c);
+	i2c->adap.owner = THIS_MODULE;
+	strlcpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
+	i2c->adap.algo = &i2c_lpc2k_algorithm;
+	i2c->adap.dev.parent = &pdev->dev;
+	i2c->adap.dev.of_node = pdev->dev.of_node;
+
+	ret = i2c_add_adapter(&i2c->adap);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to add adapter!\n");
+		goto fail_clk;
+	}
+
+	dev_info(&pdev->dev, "LPC2K I2C adapter\n");
+
+	return 0;
+
+fail_clk:
+	clk_disable_unprepare(i2c->clk);
+	return ret;
+}
+
+static int i2c_lpc2k_remove(struct platform_device *dev)
+{
+	struct lpc2k_i2c *i2c = platform_get_drvdata(dev);
+
+	i2c_del_adapter(&i2c->adap);
+	clk_disable_unprepare(i2c->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int i2c_lpc2k_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct lpc2k_i2c *i2c = platform_get_drvdata(pdev);
+
+	clk_disable(i2c->clk);
+
+	return 0;
+}
+
+static int i2c_lpc2k_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct lpc2k_i2c *i2c = platform_get_drvdata(pdev);
+
+	clk_enable(i2c->clk);
+	i2c_lpc2k_reset(i2c);
+
+	return 0;
+}
+
+static const struct dev_pm_ops i2c_lpc2k_dev_pm_ops = {
+	.suspend_noirq = i2c_lpc2k_suspend,
+	.resume_noirq = i2c_lpc2k_resume,
+};
+
+#define I2C_LPC2K_DEV_PM_OPS (&i2c_lpc2k_dev_pm_ops)
+#else
+#define I2C_LPC2K_DEV_PM_OPS NULL
+#endif
+
+static const struct of_device_id lpc2k_i2c_match[] = {
+	{ .compatible = "nxp,lpc1788-i2c" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, lpc2k_i2c_match);
+
+static struct platform_driver i2c_lpc2k_driver = {
+	.probe	= i2c_lpc2k_probe,
+	.remove	= i2c_lpc2k_remove,
+	.driver	= {
+		.name		= "lpc2k-i2c",
+		.pm		= I2C_LPC2K_DEV_PM_OPS,
+		.of_match_table	= lpc2k_i2c_match,
+	},
+};
+module_platform_driver(i2c_lpc2k_driver);
+
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_DESCRIPTION("I2C driver for LPC2xxx devices");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lpc2k-i2c");
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 9920eef..c02e6c0 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -59,6 +59,7 @@
 #define I2C_DMA_START_EN		0x0001
 #define I2C_DMA_INT_FLAG_NONE		0x0000
 #define I2C_DMA_CLR_FLAG		0x0000
+#define I2C_DMA_HARD_RST		0x0002
 
 #define I2C_DEFAULT_SPEED		100000	/* hz */
 #define MAX_FS_MODE_SPEED		400000
@@ -81,6 +82,7 @@
 	OFFSET_INT_FLAG = 0x0,
 	OFFSET_INT_EN = 0x04,
 	OFFSET_EN = 0x08,
+	OFFSET_RST = 0x0c,
 	OFFSET_CON = 0x18,
 	OFFSET_TX_MEM_ADDR = 0x1c,
 	OFFSET_RX_MEM_ADDR = 0x20,
@@ -262,6 +264,10 @@
 		      I2C_CONTROL_CLK_EXT_EN | I2C_CONTROL_DMA_EN;
 	writew(control_reg, i2c->base + OFFSET_CONTROL);
 	writew(I2C_DELAY_LEN, i2c->base + OFFSET_DELAY_LEN);
+
+	writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+	udelay(50);
+	writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
 }
 
 /*
@@ -551,15 +557,22 @@
 {
 	struct mtk_i2c *i2c = dev_id;
 	u16 restart_flag = 0;
+	u16 intr_stat;
 
 	if (i2c->dev_comp->auto_restart)
 		restart_flag = I2C_RS_TRANSFER;
 
-	i2c->irq_stat = readw(i2c->base + OFFSET_INTR_STAT);
-	writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR
-		| I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_STAT);
+	intr_stat = readw(i2c->base + OFFSET_INTR_STAT);
+	writew(intr_stat, i2c->base + OFFSET_INTR_STAT);
 
-	complete(&i2c->msg_complete);
+	/*
+	 * when occurs ack error, i2c controller generate two interrupts
+	 * first is the ack error interrupt, then the complete interrupt
+	 * i2c->irq_stat need keep the two interrupt value.
+	 */
+	i2c->irq_stat |= intr_stat;
+	if (i2c->irq_stat & (I2C_TRANSAC_COMP | restart_flag))
+		complete(&i2c->msg_complete);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index fc9bf7f..08d26ba 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -270,35 +270,35 @@
 	[OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30,
 };
 
-static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
+static inline void omap_i2c_write_reg(struct omap_i2c_dev *omap,
 				      int reg, u16 val)
 {
-	writew_relaxed(val, i2c_dev->base +
-			(i2c_dev->regs[reg] << i2c_dev->reg_shift));
+	writew_relaxed(val, omap->base +
+			(omap->regs[reg] << omap->reg_shift));
 }
 
-static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
+static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *omap, int reg)
 {
-	return readw_relaxed(i2c_dev->base +
-				(i2c_dev->regs[reg] << i2c_dev->reg_shift));
+	return readw_relaxed(omap->base +
+				(omap->regs[reg] << omap->reg_shift));
 }
 
-static void __omap_i2c_init(struct omap_i2c_dev *dev)
+static void __omap_i2c_init(struct omap_i2c_dev *omap)
 {
 
-	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
 
 	/* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
-	omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
+	omap_i2c_write_reg(omap, OMAP_I2C_PSC_REG, omap->pscstate);
 
 	/* SCL low and high time values */
-	omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
-	omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
-	if (dev->rev >= OMAP_I2C_REV_ON_3430_3530)
-		omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+	omap_i2c_write_reg(omap, OMAP_I2C_SCLL_REG, omap->scllstate);
+	omap_i2c_write_reg(omap, OMAP_I2C_SCLH_REG, omap->sclhstate);
+	if (omap->rev >= OMAP_I2C_REV_ON_3430_3530)
+		omap_i2c_write_reg(omap, OMAP_I2C_WE_REG, omap->westate);
 
 	/* Take the I2C module out of reset: */
-	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
 
 	/*
 	 * NOTE: right after setting CON_EN, STAT_BB could be 0 while the
@@ -310,32 +310,32 @@
 	 * Don't write to this register if the IE state is 0 as it can
 	 * cause deadlock.
 	 */
-	if (dev->iestate)
-		omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
+	if (omap->iestate)
+		omap_i2c_write_reg(omap, OMAP_I2C_IE_REG, omap->iestate);
 }
 
-static int omap_i2c_reset(struct omap_i2c_dev *dev)
+static int omap_i2c_reset(struct omap_i2c_dev *omap)
 {
 	unsigned long timeout;
 	u16 sysc;
 
-	if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
-		sysc = omap_i2c_read_reg(dev, OMAP_I2C_SYSC_REG);
+	if (omap->rev >= OMAP_I2C_OMAP1_REV_2) {
+		sysc = omap_i2c_read_reg(omap, OMAP_I2C_SYSC_REG);
 
 		/* Disable I2C controller before soft reset */
-		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
-			omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
+		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG,
+			omap_i2c_read_reg(omap, OMAP_I2C_CON_REG) &
 				~(OMAP_I2C_CON_EN));
 
-		omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK);
+		omap_i2c_write_reg(omap, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK);
 		/* For some reason we need to set the EN bit before the
 		 * reset done bit gets set. */
 		timeout = jiffies + OMAP_I2C_TIMEOUT;
-		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
-		while (!(omap_i2c_read_reg(dev, OMAP_I2C_SYSS_REG) &
+		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+		while (!(omap_i2c_read_reg(omap, OMAP_I2C_SYSS_REG) &
 			 SYSS_RESETDONE_MASK)) {
 			if (time_after(jiffies, timeout)) {
-				dev_warn(dev->dev, "timeout waiting "
+				dev_warn(omap->dev, "timeout waiting "
 						"for controller reset\n");
 				return -ETIMEDOUT;
 			}
@@ -343,18 +343,18 @@
 		}
 
 		/* SYSC register is cleared by the reset; rewrite it */
-		omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, sysc);
+		omap_i2c_write_reg(omap, OMAP_I2C_SYSC_REG, sysc);
 
-		if (dev->rev > OMAP_I2C_REV_ON_3430_3530) {
+		if (omap->rev > OMAP_I2C_REV_ON_3430_3530) {
 			/* Schedule I2C-bus monitoring on the next transfer */
-			dev->bb_valid = 0;
+			omap->bb_valid = 0;
 		}
 	}
 
 	return 0;
 }
 
-static int omap_i2c_init(struct omap_i2c_dev *dev)
+static int omap_i2c_init(struct omap_i2c_dev *omap)
 {
 	u16 psc = 0, scll = 0, sclh = 0;
 	u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
@@ -362,23 +362,23 @@
 	unsigned long internal_clk = 0;
 	struct clk *fclk;
 
-	if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
+	if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
 		/*
 		 * Enabling all wakup sources to stop I2C freezing on
 		 * WFI instruction.
 		 * REVISIT: Some wkup sources might not be needed.
 		 */
-		dev->westate = OMAP_I2C_WE_ALL;
+		omap->westate = OMAP_I2C_WE_ALL;
 	}
 
-	if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+	if (omap->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
 		/*
 		 * The I2C functional clock is the armxor_ck, so there's
 		 * no need to get "armxor_ck" separately.  Now, if OMAP2420
 		 * always returns 12MHz for the functional clock, we can
 		 * do this bit unconditionally.
 		 */
-		fclk = clk_get(dev->dev, "fck");
+		fclk = clk_get(omap->dev, "fck");
 		fclk_rate = clk_get_rate(fclk);
 		clk_put(fclk);
 
@@ -395,7 +395,7 @@
 			psc = fclk_rate / 12000000;
 	}
 
-	if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+	if (!(omap->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
 
 		/*
 		 * HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -403,14 +403,14 @@
 		 * to get longer filter period for better noise suppression.
 		 * The filter is iclk (fclk for HS) period.
 		 */
-		if (dev->speed > 400 ||
-			       dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+		if (omap->speed > 400 ||
+			       omap->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
 			internal_clk = 19200;
-		else if (dev->speed > 100)
+		else if (omap->speed > 100)
 			internal_clk = 9600;
 		else
 			internal_clk = 4000;
-		fclk = clk_get(dev->dev, "fck");
+		fclk = clk_get(omap->dev, "fck");
 		fclk_rate = clk_get_rate(fclk) / 1000;
 		clk_put(fclk);
 
@@ -419,7 +419,7 @@
 		psc = psc - 1;
 
 		/* If configured for High Speed */
-		if (dev->speed > 400) {
+		if (omap->speed > 400) {
 			unsigned long scl;
 
 			/* For first phase of HS mode */
@@ -428,20 +428,20 @@
 			fssclh = (scl / 3) - 5;
 
 			/* For second phase of HS mode */
-			scl = fclk_rate / dev->speed;
+			scl = fclk_rate / omap->speed;
 			hsscll = scl - (scl / 3) - 7;
 			hssclh = (scl / 3) - 5;
-		} else if (dev->speed > 100) {
+		} else if (omap->speed > 100) {
 			unsigned long scl;
 
 			/* Fast mode */
-			scl = internal_clk / dev->speed;
+			scl = internal_clk / omap->speed;
 			fsscll = scl - (scl / 3) - 7;
 			fssclh = (scl / 3) - 5;
 		} else {
 			/* Standard mode */
-			fsscll = internal_clk / (dev->speed * 2) - 7;
-			fssclh = internal_clk / (dev->speed * 2) - 5;
+			fsscll = internal_clk / (omap->speed * 2) - 7;
+			fssclh = internal_clk / (omap->speed * 2) - 5;
 		}
 		scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
 		sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
@@ -450,25 +450,25 @@
 		fclk_rate /= (psc + 1) * 1000;
 		if (psc > 2)
 			psc = 2;
-		scll = fclk_rate / (dev->speed * 2) - 7 + psc;
-		sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
+		scll = fclk_rate / (omap->speed * 2) - 7 + psc;
+		sclh = fclk_rate / (omap->speed * 2) - 7 + psc;
 	}
 
-	dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
+	omap->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
 			OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
-			OMAP_I2C_IE_AL)  | ((dev->fifo_size) ?
+			OMAP_I2C_IE_AL)  | ((omap->fifo_size) ?
 				(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
 
-	dev->pscstate = psc;
-	dev->scllstate = scll;
-	dev->sclhstate = sclh;
+	omap->pscstate = psc;
+	omap->scllstate = scll;
+	omap->sclhstate = sclh;
 
-	if (dev->rev <= OMAP_I2C_REV_ON_3430_3530) {
+	if (omap->rev <= OMAP_I2C_REV_ON_3430_3530) {
 		/* Not implemented */
-		dev->bb_valid = 1;
+		omap->bb_valid = 1;
 	}
 
-	__omap_i2c_init(dev);
+	__omap_i2c_init(omap);
 
 	return 0;
 }
@@ -476,14 +476,14 @@
 /*
  * Waiting on Bus Busy
  */
-static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev)
+static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap)
 {
 	unsigned long timeout;
 
 	timeout = jiffies + OMAP_I2C_TIMEOUT;
-	while (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
+	while (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
 		if (time_after(jiffies, timeout))
-			return i2c_recover_bus(&dev->adapter);
+			return i2c_recover_bus(&omap->adapter);
 		msleep(1);
 	}
 
@@ -518,19 +518,19 @@
  * 3. Any transfer started in the middle of another master's transfer
  *    results in unpredictable results and data corruption
  */
-static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *dev)
+static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *omap)
 {
 	unsigned long bus_free_timeout = 0;
 	unsigned long timeout;
 	int bus_free = 0;
 	u16 stat, systest;
 
-	if (dev->bb_valid)
+	if (omap->bb_valid)
 		return 0;
 
 	timeout = jiffies + OMAP_I2C_TIMEOUT;
 	while (1) {
-		stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+		stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
 		/*
 		 * We will see BB or BF event in a case IP had detected any
 		 * activity on the I2C bus. Now IP correctly tracks the bus
@@ -543,7 +543,7 @@
 		 * Otherwise, we must look signals on the bus to make
 		 * the right decision.
 		 */
-		systest = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+		systest = omap_i2c_read_reg(omap, OMAP_I2C_SYSTEST_REG);
 		if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) &&
 		    (systest & OMAP_I2C_SYSTEST_SDA_I_FUNC)) {
 			if (!bus_free) {
@@ -564,22 +564,22 @@
 		}
 
 		if (time_after(jiffies, timeout)) {
-			dev_warn(dev->dev, "timeout waiting for bus ready\n");
+			dev_warn(omap->dev, "timeout waiting for bus ready\n");
 			return -ETIMEDOUT;
 		}
 
 		msleep(1);
 	}
 
-	dev->bb_valid = 1;
+	omap->bb_valid = 1;
 	return 0;
 }
 
-static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
+static void omap_i2c_resize_fifo(struct omap_i2c_dev *omap, u8 size, bool is_rx)
 {
 	u16		buf;
 
-	if (dev->flags & OMAP_I2C_FLAG_NO_FIFO)
+	if (omap->flags & OMAP_I2C_FLAG_NO_FIFO)
 		return;
 
 	/*
@@ -589,29 +589,29 @@
 	 * then we might use draining feature to transfer the remaining bytes.
 	 */
 
-	dev->threshold = clamp(size, (u8) 1, dev->fifo_size);
+	omap->threshold = clamp(size, (u8) 1, omap->fifo_size);
 
-	buf = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG);
+	buf = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG);
 
 	if (is_rx) {
 		/* Clear RX Threshold */
 		buf &= ~(0x3f << 8);
-		buf |= ((dev->threshold - 1) << 8) | OMAP_I2C_BUF_RXFIF_CLR;
+		buf |= ((omap->threshold - 1) << 8) | OMAP_I2C_BUF_RXFIF_CLR;
 	} else {
 		/* Clear TX Threshold */
 		buf &= ~0x3f;
-		buf |= (dev->threshold - 1) | OMAP_I2C_BUF_TXFIF_CLR;
+		buf |= (omap->threshold - 1) | OMAP_I2C_BUF_TXFIF_CLR;
 	}
 
-	omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
+	omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, buf);
 
-	if (dev->rev < OMAP_I2C_REV_ON_3630)
-		dev->b_hw = 1; /* Enable hardware fixes */
+	if (omap->rev < OMAP_I2C_REV_ON_3630)
+		omap->b_hw = 1; /* Enable hardware fixes */
 
 	/* calculate wakeup latency constraint for MPU */
-	if (dev->set_mpu_wkup_lat != NULL)
-		dev->latency = (1000000 * dev->threshold) /
-			(1000 * dev->speed / 8);
+	if (omap->set_mpu_wkup_lat != NULL)
+		omap->latency = (1000000 * omap->threshold) /
+			(1000 * omap->speed / 8);
 }
 
 /*
@@ -620,42 +620,42 @@
 static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
 			     struct i2c_msg *msg, int stop)
 {
-	struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
+	struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
 	unsigned long timeout;
 	u16 w;
 
-	dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+	dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
 		msg->addr, msg->len, msg->flags, stop);
 
 	if (msg->len == 0)
 		return -EINVAL;
 
-	dev->receiver = !!(msg->flags & I2C_M_RD);
-	omap_i2c_resize_fifo(dev, msg->len, dev->receiver);
+	omap->receiver = !!(msg->flags & I2C_M_RD);
+	omap_i2c_resize_fifo(omap, msg->len, omap->receiver);
 
-	omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr);
+	omap_i2c_write_reg(omap, OMAP_I2C_SA_REG, msg->addr);
 
 	/* REVISIT: Could the STB bit of I2C_CON be used with probing? */
-	dev->buf = msg->buf;
-	dev->buf_len = msg->len;
+	omap->buf = msg->buf;
+	omap->buf_len = msg->len;
 
-	/* make sure writes to dev->buf_len are ordered */
+	/* make sure writes to omap->buf_len are ordered */
 	barrier();
 
-	omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
+	omap_i2c_write_reg(omap, OMAP_I2C_CNT_REG, omap->buf_len);
 
 	/* Clear the FIFO Buffers */
-	w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG);
+	w = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG);
 	w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
-	omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);
+	omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, w);
 
-	reinit_completion(&dev->cmd_complete);
-	dev->cmd_err = 0;
+	reinit_completion(&omap->cmd_complete);
+	omap->cmd_err = 0;
 
 	w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
 
 	/* High speed configuration */
-	if (dev->speed > 400)
+	if (omap->speed > 400)
 		w |= OMAP_I2C_CON_OPMODE_HS;
 
 	if (msg->flags & I2C_M_STOP)
@@ -665,27 +665,27 @@
 	if (!(msg->flags & I2C_M_RD))
 		w |= OMAP_I2C_CON_TRX;
 
-	if (!dev->b_hw && stop)
+	if (!omap->b_hw && stop)
 		w |= OMAP_I2C_CON_STP;
 	/*
 	 * NOTE: STAT_BB bit could became 1 here if another master occupy
 	 * the bus. IP successfully complete transfer when the bus will be
 	 * free again (BB reset to 0).
 	 */
-	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
+	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
 
 	/*
 	 * Don't write stt and stp together on some hardware.
 	 */
-	if (dev->b_hw && stop) {
+	if (omap->b_hw && stop) {
 		unsigned long delay = jiffies + OMAP_I2C_TIMEOUT;
-		u16 con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+		u16 con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
 		while (con & OMAP_I2C_CON_STT) {
-			con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+			con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
 
 			/* Let the user know if i2c is in a bad state */
 			if (time_after(jiffies, delay)) {
-				dev_err(dev->dev, "controller timed out "
+				dev_err(omap->dev, "controller timed out "
 				"waiting for start condition to finish\n");
 				return -ETIMEDOUT;
 			}
@@ -694,42 +694,42 @@
 
 		w |= OMAP_I2C_CON_STP;
 		w &= ~OMAP_I2C_CON_STT;
-		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
+		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
 	}
 
 	/*
 	 * REVISIT: We should abort the transfer on signals, but the bus goes
 	 * into arbitration and we're currently unable to recover from it.
 	 */
-	timeout = wait_for_completion_timeout(&dev->cmd_complete,
+	timeout = wait_for_completion_timeout(&omap->cmd_complete,
 						OMAP_I2C_TIMEOUT);
 	if (timeout == 0) {
-		dev_err(dev->dev, "controller timed out\n");
-		omap_i2c_reset(dev);
-		__omap_i2c_init(dev);
+		dev_err(omap->dev, "controller timed out\n");
+		omap_i2c_reset(omap);
+		__omap_i2c_init(omap);
 		return -ETIMEDOUT;
 	}
 
-	if (likely(!dev->cmd_err))
+	if (likely(!omap->cmd_err))
 		return 0;
 
 	/* We have an error */
-	if (dev->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) {
-		omap_i2c_reset(dev);
-		__omap_i2c_init(dev);
+	if (omap->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) {
+		omap_i2c_reset(omap);
+		__omap_i2c_init(omap);
 		return -EIO;
 	}
 
-	if (dev->cmd_err & OMAP_I2C_STAT_AL)
+	if (omap->cmd_err & OMAP_I2C_STAT_AL)
 		return -EAGAIN;
 
-	if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
+	if (omap->cmd_err & OMAP_I2C_STAT_NACK) {
 		if (msg->flags & I2C_M_IGNORE_NAK)
 			return 0;
 
-		w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+		w = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
 		w |= OMAP_I2C_CON_STP;
-		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
+		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
 		return -EREMOTEIO;
 	}
 	return -EIO;
@@ -743,24 +743,24 @@
 static int
 omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 {
-	struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
+	struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
 	int i;
 	int r;
 
-	r = pm_runtime_get_sync(dev->dev);
+	r = pm_runtime_get_sync(omap->dev);
 	if (r < 0)
 		goto out;
 
-	r = omap_i2c_wait_for_bb_valid(dev);
+	r = omap_i2c_wait_for_bb_valid(omap);
 	if (r < 0)
 		goto out;
 
-	r = omap_i2c_wait_for_bb(dev);
+	r = omap_i2c_wait_for_bb(omap);
 	if (r < 0)
 		goto out;
 
-	if (dev->set_mpu_wkup_lat != NULL)
-		dev->set_mpu_wkup_lat(dev->dev, dev->latency);
+	if (omap->set_mpu_wkup_lat != NULL)
+		omap->set_mpu_wkup_lat(omap->dev, omap->latency);
 
 	for (i = 0; i < num; i++) {
 		r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
@@ -771,14 +771,14 @@
 	if (r == 0)
 		r = num;
 
-	omap_i2c_wait_for_bb(dev);
+	omap_i2c_wait_for_bb(omap);
 
-	if (dev->set_mpu_wkup_lat != NULL)
-		dev->set_mpu_wkup_lat(dev->dev, -1);
+	if (omap->set_mpu_wkup_lat != NULL)
+		omap->set_mpu_wkup_lat(omap->dev, -1);
 
 out:
-	pm_runtime_mark_last_busy(dev->dev);
-	pm_runtime_put_autosuspend(dev->dev);
+	pm_runtime_mark_last_busy(omap->dev);
+	pm_runtime_put_autosuspend(omap->dev);
 	return r;
 }
 
@@ -790,19 +790,19 @@
 }
 
 static inline void
-omap_i2c_complete_cmd(struct omap_i2c_dev *dev, u16 err)
+omap_i2c_complete_cmd(struct omap_i2c_dev *omap, u16 err)
 {
-	dev->cmd_err |= err;
-	complete(&dev->cmd_complete);
+	omap->cmd_err |= err;
+	complete(&omap->cmd_complete);
 }
 
 static inline void
-omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat)
+omap_i2c_ack_stat(struct omap_i2c_dev *omap, u16 stat)
 {
-	omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
+	omap_i2c_write_reg(omap, OMAP_I2C_STAT_REG, stat);
 }
 
-static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
+static inline void i2c_omap_errata_i207(struct omap_i2c_dev *omap, u16 stat)
 {
 	/*
 	 * I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8)
@@ -813,17 +813,17 @@
 	 */
 	if (stat & OMAP_I2C_STAT_RDR) {
 		/* Step 1: If RDR is set, clear it */
-		omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+		omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR);
 
 		/* Step 2: */
-		if (!(omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+		if (!(omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG)
 						& OMAP_I2C_STAT_BB)) {
 
 			/* Step 3: */
-			if (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+			if (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG)
 						& OMAP_I2C_STAT_RDR) {
-				omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
-				dev_dbg(dev->dev, "RDR when bus is busy.\n");
+				omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR);
+				dev_dbg(omap->dev, "RDR when bus is busy.\n");
 			}
 
 		}
@@ -836,50 +836,50 @@
 static irqreturn_t
 omap_i2c_omap1_isr(int this_irq, void *dev_id)
 {
-	struct omap_i2c_dev *dev = dev_id;
+	struct omap_i2c_dev *omap = dev_id;
 	u16 iv, w;
 
-	if (pm_runtime_suspended(dev->dev))
+	if (pm_runtime_suspended(omap->dev))
 		return IRQ_NONE;
 
-	iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG);
+	iv = omap_i2c_read_reg(omap, OMAP_I2C_IV_REG);
 	switch (iv) {
 	case 0x00:	/* None */
 		break;
 	case 0x01:	/* Arbitration lost */
-		dev_err(dev->dev, "Arbitration lost\n");
-		omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_AL);
+		dev_err(omap->dev, "Arbitration lost\n");
+		omap_i2c_complete_cmd(omap, OMAP_I2C_STAT_AL);
 		break;
 	case 0x02:	/* No acknowledgement */
-		omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_NACK);
-		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP);
+		omap_i2c_complete_cmd(omap, OMAP_I2C_STAT_NACK);
+		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP);
 		break;
 	case 0x03:	/* Register access ready */
-		omap_i2c_complete_cmd(dev, 0);
+		omap_i2c_complete_cmd(omap, 0);
 		break;
 	case 0x04:	/* Receive data ready */
-		if (dev->buf_len) {
-			w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG);
-			*dev->buf++ = w;
-			dev->buf_len--;
-			if (dev->buf_len) {
-				*dev->buf++ = w >> 8;
-				dev->buf_len--;
+		if (omap->buf_len) {
+			w = omap_i2c_read_reg(omap, OMAP_I2C_DATA_REG);
+			*omap->buf++ = w;
+			omap->buf_len--;
+			if (omap->buf_len) {
+				*omap->buf++ = w >> 8;
+				omap->buf_len--;
 			}
 		} else
-			dev_err(dev->dev, "RRDY IRQ while no data requested\n");
+			dev_err(omap->dev, "RRDY IRQ while no data requested\n");
 		break;
 	case 0x05:	/* Transmit data ready */
-		if (dev->buf_len) {
-			w = *dev->buf++;
-			dev->buf_len--;
-			if (dev->buf_len) {
-				w |= *dev->buf++ << 8;
-				dev->buf_len--;
+		if (omap->buf_len) {
+			w = *omap->buf++;
+			omap->buf_len--;
+			if (omap->buf_len) {
+				w |= *omap->buf++ << 8;
+				omap->buf_len--;
 			}
-			omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
+			omap_i2c_write_reg(omap, OMAP_I2C_DATA_REG, w);
 		} else
-			dev_err(dev->dev, "XRDY IRQ while no data to send\n");
+			dev_err(omap->dev, "XRDY IRQ while no data to send\n");
 		break;
 	default:
 		return IRQ_NONE;
@@ -896,28 +896,28 @@
  * data to DATA_REG. Otherwise some data bytes can be lost while transferring
  * them from the memory to the I2C interface.
  */
-static int errata_omap3_i462(struct omap_i2c_dev *dev)
+static int errata_omap3_i462(struct omap_i2c_dev *omap)
 {
 	unsigned long timeout = 10000;
 	u16 stat;
 
 	do {
-		stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+		stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
 		if (stat & OMAP_I2C_STAT_XUDF)
 			break;
 
 		if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
-			omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_XRDY |
+			omap_i2c_ack_stat(omap, (OMAP_I2C_STAT_XRDY |
 							OMAP_I2C_STAT_XDR));
 			if (stat & OMAP_I2C_STAT_NACK) {
-				dev->cmd_err |= OMAP_I2C_STAT_NACK;
-				omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+				omap->cmd_err |= OMAP_I2C_STAT_NACK;
+				omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
 			}
 
 			if (stat & OMAP_I2C_STAT_AL) {
-				dev_err(dev->dev, "Arbitration lost\n");
-				dev->cmd_err |= OMAP_I2C_STAT_AL;
-				omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
+				dev_err(omap->dev, "Arbitration lost\n");
+				omap->cmd_err |= OMAP_I2C_STAT_AL;
+				omap_i2c_ack_stat(omap, OMAP_I2C_STAT_AL);
 			}
 
 			return -EIO;
@@ -927,61 +927,61 @@
 	} while (--timeout);
 
 	if (!timeout) {
-		dev_err(dev->dev, "timeout waiting on XUDF bit\n");
+		dev_err(omap->dev, "timeout waiting on XUDF bit\n");
 		return 0;
 	}
 
 	return 0;
 }
 
-static void omap_i2c_receive_data(struct omap_i2c_dev *dev, u8 num_bytes,
+static void omap_i2c_receive_data(struct omap_i2c_dev *omap, u8 num_bytes,
 		bool is_rdr)
 {
 	u16		w;
 
 	while (num_bytes--) {
-		w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG);
-		*dev->buf++ = w;
-		dev->buf_len--;
+		w = omap_i2c_read_reg(omap, OMAP_I2C_DATA_REG);
+		*omap->buf++ = w;
+		omap->buf_len--;
 
 		/*
 		 * Data reg in 2430, omap3 and
 		 * omap4 is 8 bit wide
 		 */
-		if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
-			*dev->buf++ = w >> 8;
-			dev->buf_len--;
+		if (omap->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
+			*omap->buf++ = w >> 8;
+			omap->buf_len--;
 		}
 	}
 }
 
-static int omap_i2c_transmit_data(struct omap_i2c_dev *dev, u8 num_bytes,
+static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
 		bool is_xdr)
 {
 	u16		w;
 
 	while (num_bytes--) {
-		w = *dev->buf++;
-		dev->buf_len--;
+		w = *omap->buf++;
+		omap->buf_len--;
 
 		/*
 		 * Data reg in 2430, omap3 and
 		 * omap4 is 8 bit wide
 		 */
-		if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
-			w |= *dev->buf++ << 8;
-			dev->buf_len--;
+		if (omap->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
+			w |= *omap->buf++ << 8;
+			omap->buf_len--;
 		}
 
-		if (dev->errata & I2C_OMAP_ERRATA_I462) {
+		if (omap->errata & I2C_OMAP_ERRATA_I462) {
 			int ret;
 
-			ret = errata_omap3_i462(dev);
+			ret = errata_omap3_i462(omap);
 			if (ret < 0)
 				return ret;
 		}
 
-		omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
+		omap_i2c_write_reg(omap, OMAP_I2C_DATA_REG, w);
 	}
 
 	return 0;
@@ -990,19 +990,19 @@
 static irqreturn_t
 omap_i2c_isr(int irq, void *dev_id)
 {
-	struct omap_i2c_dev *dev = dev_id;
+	struct omap_i2c_dev *omap = dev_id;
 	irqreturn_t ret = IRQ_HANDLED;
 	u16 mask;
 	u16 stat;
 
-	spin_lock(&dev->lock);
-	mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
-	stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+	spin_lock(&omap->lock);
+	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+	stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
 
 	if (stat & mask)
 		ret = IRQ_WAKE_THREAD;
 
-	spin_unlock(&dev->lock);
+	spin_unlock(&omap->lock);
 
 	return ret;
 }
@@ -1010,20 +1010,20 @@
 static irqreturn_t
 omap_i2c_isr_thread(int this_irq, void *dev_id)
 {
-	struct omap_i2c_dev *dev = dev_id;
+	struct omap_i2c_dev *omap = dev_id;
 	unsigned long flags;
 	u16 bits;
 	u16 stat;
 	int err = 0, count = 0;
 
-	spin_lock_irqsave(&dev->lock, flags);
+	spin_lock_irqsave(&omap->lock, flags);
 	do {
-		bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
-		stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+		bits = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+		stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
 		stat &= bits;
 
 		/* If we're in receiver mode, ignore XDR/XRDY */
-		if (dev->receiver)
+		if (omap->receiver)
 			stat &= ~(OMAP_I2C_STAT_XDR | OMAP_I2C_STAT_XRDY);
 		else
 			stat &= ~(OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_RRDY);
@@ -1033,32 +1033,32 @@
 			goto out;
 		}
 
-		dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat);
+		dev_dbg(omap->dev, "IRQ (ISR = 0x%04x)\n", stat);
 		if (count++ == 100) {
-			dev_warn(dev->dev, "Too much work in one IRQ\n");
+			dev_warn(omap->dev, "Too much work in one IRQ\n");
 			break;
 		}
 
 		if (stat & OMAP_I2C_STAT_NACK) {
 			err |= OMAP_I2C_STAT_NACK;
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
 		}
 
 		if (stat & OMAP_I2C_STAT_AL) {
-			dev_err(dev->dev, "Arbitration lost\n");
+			dev_err(omap->dev, "Arbitration lost\n");
 			err |= OMAP_I2C_STAT_AL;
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_AL);
 		}
 
 		/*
 		 * ProDB0017052: Clear ARDY bit twice
 		 */
 		if (stat & OMAP_I2C_STAT_ARDY)
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_ARDY);
 
 		if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
 					OMAP_I2C_STAT_AL)) {
-			omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
+			omap_i2c_ack_stat(omap, (OMAP_I2C_STAT_RRDY |
 						OMAP_I2C_STAT_RDR |
 						OMAP_I2C_STAT_XRDY |
 						OMAP_I2C_STAT_XDR |
@@ -1069,28 +1069,28 @@
 		if (stat & OMAP_I2C_STAT_RDR) {
 			u8 num_bytes = 1;
 
-			if (dev->fifo_size)
-				num_bytes = dev->buf_len;
+			if (omap->fifo_size)
+				num_bytes = omap->buf_len;
 
-			if (dev->errata & I2C_OMAP_ERRATA_I207) {
-				i2c_omap_errata_i207(dev, stat);
-				num_bytes = (omap_i2c_read_reg(dev,
+			if (omap->errata & I2C_OMAP_ERRATA_I207) {
+				i2c_omap_errata_i207(omap, stat);
+				num_bytes = (omap_i2c_read_reg(omap,
 					OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
 			}
 
-			omap_i2c_receive_data(dev, num_bytes, true);
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+			omap_i2c_receive_data(omap, num_bytes, true);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR);
 			continue;
 		}
 
 		if (stat & OMAP_I2C_STAT_RRDY) {
 			u8 num_bytes = 1;
 
-			if (dev->threshold)
-				num_bytes = dev->threshold;
+			if (omap->threshold)
+				num_bytes = omap->threshold;
 
-			omap_i2c_receive_data(dev, num_bytes, false);
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RRDY);
+			omap_i2c_receive_data(omap, num_bytes, false);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RRDY);
 			continue;
 		}
 
@@ -1098,14 +1098,14 @@
 			u8 num_bytes = 1;
 			int ret;
 
-			if (dev->fifo_size)
-				num_bytes = dev->buf_len;
+			if (omap->fifo_size)
+				num_bytes = omap->buf_len;
 
-			ret = omap_i2c_transmit_data(dev, num_bytes, true);
+			ret = omap_i2c_transmit_data(omap, num_bytes, true);
 			if (ret < 0)
 				break;
 
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XDR);
 			continue;
 		}
 
@@ -1113,36 +1113,36 @@
 			u8 num_bytes = 1;
 			int ret;
 
-			if (dev->threshold)
-				num_bytes = dev->threshold;
+			if (omap->threshold)
+				num_bytes = omap->threshold;
 
-			ret = omap_i2c_transmit_data(dev, num_bytes, false);
+			ret = omap_i2c_transmit_data(omap, num_bytes, false);
 			if (ret < 0)
 				break;
 
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XRDY);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XRDY);
 			continue;
 		}
 
 		if (stat & OMAP_I2C_STAT_ROVR) {
-			dev_err(dev->dev, "Receive overrun\n");
+			dev_err(omap->dev, "Receive overrun\n");
 			err |= OMAP_I2C_STAT_ROVR;
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ROVR);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_ROVR);
 			break;
 		}
 
 		if (stat & OMAP_I2C_STAT_XUDF) {
-			dev_err(dev->dev, "Transmit underflow\n");
+			dev_err(omap->dev, "Transmit underflow\n");
 			err |= OMAP_I2C_STAT_XUDF;
-			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XUDF);
+			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XUDF);
 			break;
 		}
 	} while (stat);
 
-	omap_i2c_complete_cmd(dev, err);
+	omap_i2c_complete_cmd(omap, err);
 
 out:
-	spin_unlock_irqrestore(&dev->lock, flags);
+	spin_unlock_irqrestore(&omap->lock, flags);
 
 	return IRQ_HANDLED;
 }
@@ -1284,7 +1284,7 @@
 static int
 omap_i2c_probe(struct platform_device *pdev)
 {
-	struct omap_i2c_dev	*dev;
+	struct omap_i2c_dev	*omap;
 	struct i2c_adapter	*adap;
 	struct resource		*mem;
 	const struct omap_i2c_bus_platform_data *pdata =
@@ -1302,46 +1302,46 @@
 		return irq;
 	}
 
-	dev = devm_kzalloc(&pdev->dev, sizeof(struct omap_i2c_dev), GFP_KERNEL);
-	if (!dev)
+	omap = devm_kzalloc(&pdev->dev, sizeof(struct omap_i2c_dev), GFP_KERNEL);
+	if (!omap)
 		return -ENOMEM;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dev->base = devm_ioremap_resource(&pdev->dev, mem);
-	if (IS_ERR(dev->base))
-		return PTR_ERR(dev->base);
+	omap->base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(omap->base))
+		return PTR_ERR(omap->base);
 
 	match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
 	if (match) {
 		u32 freq = 100000; /* default to 100000 Hz */
 
 		pdata = match->data;
-		dev->flags = pdata->flags;
+		omap->flags = pdata->flags;
 
 		of_property_read_u32(node, "clock-frequency", &freq);
 		/* convert DT freq value in Hz into kHz for speed */
-		dev->speed = freq / 1000;
+		omap->speed = freq / 1000;
 	} else if (pdata != NULL) {
-		dev->speed = pdata->clkrate;
-		dev->flags = pdata->flags;
-		dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
+		omap->speed = pdata->clkrate;
+		omap->flags = pdata->flags;
+		omap->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
 	}
 
-	dev->dev = &pdev->dev;
-	dev->irq = irq;
+	omap->dev = &pdev->dev;
+	omap->irq = irq;
 
-	spin_lock_init(&dev->lock);
+	spin_lock_init(&omap->lock);
 
-	platform_set_drvdata(pdev, dev);
-	init_completion(&dev->cmd_complete);
+	platform_set_drvdata(pdev, omap);
+	init_completion(&omap->cmd_complete);
 
-	dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+	omap->reg_shift = (omap->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
 
-	pm_runtime_enable(dev->dev);
-	pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT);
-	pm_runtime_use_autosuspend(dev->dev);
+	pm_runtime_enable(omap->dev);
+	pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
+	pm_runtime_use_autosuspend(omap->dev);
 
-	r = pm_runtime_get_sync(dev->dev);
+	r = pm_runtime_get_sync(omap->dev);
 	if (r < 0)
 		goto err_free_mem;
 
@@ -1351,42 +1351,42 @@
 	 * Also since the omap_i2c_read_reg uses reg_map_ip_* a
 	 * readw_relaxed is done.
 	 */
-	rev = readw_relaxed(dev->base + 0x04);
+	rev = readw_relaxed(omap->base + 0x04);
 
-	dev->scheme = OMAP_I2C_SCHEME(rev);
-	switch (dev->scheme) {
+	omap->scheme = OMAP_I2C_SCHEME(rev);
+	switch (omap->scheme) {
 	case OMAP_I2C_SCHEME_0:
-		dev->regs = (u8 *)reg_map_ip_v1;
-		dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG);
-		minor = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
-		major = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+		omap->regs = (u8 *)reg_map_ip_v1;
+		omap->rev = omap_i2c_read_reg(omap, OMAP_I2C_REV_REG);
+		minor = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev);
+		major = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev);
 		break;
 	case OMAP_I2C_SCHEME_1:
 		/* FALLTHROUGH */
 	default:
-		dev->regs = (u8 *)reg_map_ip_v2;
+		omap->regs = (u8 *)reg_map_ip_v2;
 		rev = (rev << 16) |
-			omap_i2c_read_reg(dev, OMAP_I2C_IP_V2_REVNB_LO);
+			omap_i2c_read_reg(omap, OMAP_I2C_IP_V2_REVNB_LO);
 		minor = OMAP_I2C_REV_SCHEME_1_MINOR(rev);
 		major = OMAP_I2C_REV_SCHEME_1_MAJOR(rev);
-		dev->rev = rev;
+		omap->rev = rev;
 	}
 
-	dev->errata = 0;
+	omap->errata = 0;
 
-	if (dev->rev >= OMAP_I2C_REV_ON_2430 &&
-			dev->rev < OMAP_I2C_REV_ON_4430_PLUS)
-		dev->errata |= I2C_OMAP_ERRATA_I207;
+	if (omap->rev >= OMAP_I2C_REV_ON_2430 &&
+			omap->rev < OMAP_I2C_REV_ON_4430_PLUS)
+		omap->errata |= I2C_OMAP_ERRATA_I207;
 
-	if (dev->rev <= OMAP_I2C_REV_ON_3430_3530)
-		dev->errata |= I2C_OMAP_ERRATA_I462;
+	if (omap->rev <= OMAP_I2C_REV_ON_3430_3530)
+		omap->errata |= I2C_OMAP_ERRATA_I462;
 
-	if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+	if (!(omap->flags & OMAP_I2C_FLAG_NO_FIFO)) {
 		u16 s;
 
 		/* Set up the fifo size - Get total size */
-		s = (omap_i2c_read_reg(dev, OMAP_I2C_BUFSTAT_REG) >> 14) & 0x3;
-		dev->fifo_size = 0x8 << s;
+		s = (omap_i2c_read_reg(omap, OMAP_I2C_BUFSTAT_REG) >> 14) & 0x3;
+		omap->fifo_size = 0x8 << s;
 
 		/*
 		 * Set up notification threshold as half the total available
@@ -1394,36 +1394,36 @@
 		 * call back latencies.
 		 */
 
-		dev->fifo_size = (dev->fifo_size / 2);
+		omap->fifo_size = (omap->fifo_size / 2);
 
-		if (dev->rev < OMAP_I2C_REV_ON_3630)
-			dev->b_hw = 1; /* Enable hardware fixes */
+		if (omap->rev < OMAP_I2C_REV_ON_3630)
+			omap->b_hw = 1; /* Enable hardware fixes */
 
 		/* calculate wakeup latency constraint for MPU */
-		if (dev->set_mpu_wkup_lat != NULL)
-			dev->latency = (1000000 * dev->fifo_size) /
-				       (1000 * dev->speed / 8);
+		if (omap->set_mpu_wkup_lat != NULL)
+			omap->latency = (1000000 * omap->fifo_size) /
+				       (1000 * omap->speed / 8);
 	}
 
 	/* reset ASAP, clearing any IRQs */
-	omap_i2c_init(dev);
+	omap_i2c_init(omap);
 
-	if (dev->rev < OMAP_I2C_OMAP1_REV_2)
-		r = devm_request_irq(&pdev->dev, dev->irq, omap_i2c_omap1_isr,
-				IRQF_NO_SUSPEND, pdev->name, dev);
+	if (omap->rev < OMAP_I2C_OMAP1_REV_2)
+		r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr,
+				IRQF_NO_SUSPEND, pdev->name, omap);
 	else
-		r = devm_request_threaded_irq(&pdev->dev, dev->irq,
+		r = devm_request_threaded_irq(&pdev->dev, omap->irq,
 				omap_i2c_isr, omap_i2c_isr_thread,
 				IRQF_NO_SUSPEND | IRQF_ONESHOT,
-				pdev->name, dev);
+				pdev->name, omap);
 
 	if (r) {
-		dev_err(dev->dev, "failure requesting irq %i\n", dev->irq);
+		dev_err(omap->dev, "failure requesting irq %i\n", omap->irq);
 		goto err_unuse_clocks;
 	}
 
-	adap = &dev->adapter;
-	i2c_set_adapdata(adap, dev);
+	adap = &omap->adapter;
+	i2c_set_adapdata(adap, omap);
 	adap->owner = THIS_MODULE;
 	adap->class = I2C_CLASS_DEPRECATED;
 	strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
@@ -1436,21 +1436,21 @@
 	adap->nr = pdev->id;
 	r = i2c_add_numbered_adapter(adap);
 	if (r) {
-		dev_err(dev->dev, "failure adding adapter\n");
+		dev_err(omap->dev, "failure adding adapter\n");
 		goto err_unuse_clocks;
 	}
 
-	dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
-		 major, minor, dev->speed);
+	dev_info(omap->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
+		 major, minor, omap->speed);
 
-	pm_runtime_mark_last_busy(dev->dev);
-	pm_runtime_put_autosuspend(dev->dev);
+	pm_runtime_mark_last_busy(omap->dev);
+	pm_runtime_put_autosuspend(omap->dev);
 
 	return 0;
 
 err_unuse_clocks:
-	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
-	pm_runtime_put(dev->dev);
+	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+	pm_runtime_put(omap->dev);
 	pm_runtime_disable(&pdev->dev);
 err_free_mem:
 
@@ -1459,16 +1459,16 @@
 
 static int omap_i2c_remove(struct platform_device *pdev)
 {
-	struct omap_i2c_dev	*dev = platform_get_drvdata(pdev);
+	struct omap_i2c_dev	*omap = platform_get_drvdata(pdev);
 	int ret;
 
-	i2c_del_adapter(&dev->adapter);
+	i2c_del_adapter(&omap->adapter);
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0)
 		return ret;
 
-	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
-	pm_runtime_put(&pdev->dev);
+	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
@@ -1476,24 +1476,23 @@
 #ifdef CONFIG_PM
 static int omap_i2c_runtime_suspend(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
+	struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
-	_dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG);
+	omap->iestate = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
 
-	if (_dev->scheme == OMAP_I2C_SCHEME_0)
-		omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0);
+	if (omap->scheme == OMAP_I2C_SCHEME_0)
+		omap_i2c_write_reg(omap, OMAP_I2C_IE_REG, 0);
 	else
-		omap_i2c_write_reg(_dev, OMAP_I2C_IP_V2_IRQENABLE_CLR,
+		omap_i2c_write_reg(omap, OMAP_I2C_IP_V2_IRQENABLE_CLR,
 				   OMAP_I2C_IP_V2_INTERRUPTS_MASK);
 
-	if (_dev->rev < OMAP_I2C_OMAP1_REV_2) {
-		omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
+	if (omap->rev < OMAP_I2C_OMAP1_REV_2) {
+		omap_i2c_read_reg(omap, OMAP_I2C_IV_REG); /* Read clears */
 	} else {
-		omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate);
+		omap_i2c_write_reg(omap, OMAP_I2C_STAT_REG, omap->iestate);
 
 		/* Flush posted write */
-		omap_i2c_read_reg(_dev, OMAP_I2C_STAT_REG);
+		omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
 	}
 
 	pinctrl_pm_select_sleep_state(dev);
@@ -1503,15 +1502,14 @@
 
 static int omap_i2c_runtime_resume(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
+	struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
 	pinctrl_pm_select_default_state(dev);
 
-	if (!_dev->regs)
+	if (!omap->regs)
 		return 0;
 
-	__omap_i2c_init(_dev);
+	__omap_i2c_init(omap);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 9b94c3d..a8e54df 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -20,6 +20,8 @@
    GNU General Public License for more details.
  * ------------------------------------------------------------------------ */
 
+#define pr_fmt(fmt) "i2c-parport: " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -176,26 +178,24 @@
 			break;
 	}
 	if (i == MAX_DEVICE) {
-		pr_debug("i2c-parport: Not using parport%d.\n", port->number);
+		pr_debug("Not using parport%d.\n", port->number);
 		return;
 	}
 
 	adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL);
-	if (adapter == NULL) {
-		printk(KERN_ERR "i2c-parport: Failed to kzalloc\n");
+	if (!adapter)
 		return;
-	}
 	memset(&i2c_parport_cb, 0, sizeof(i2c_parport_cb));
 	i2c_parport_cb.flags = PARPORT_FLAG_EXCL;
 	i2c_parport_cb.irq_func = i2c_parport_irq;
 	i2c_parport_cb.private = adapter;
 
-	pr_debug("i2c-parport: attaching to %s\n", port->name);
+	pr_debug("attaching to %s\n", port->name);
 	parport_disable_irq(port);
 	adapter->pdev = parport_register_dev_model(port, "i2c-parport",
 						   &i2c_parport_cb, i);
 	if (!adapter->pdev) {
-		printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
+		pr_err("Unable to register with parport\n");
 		goto err_free;
 	}
 
@@ -215,7 +215,8 @@
 	adapter->adapter.dev.parent = port->physport->dev;
 
 	if (parport_claim_or_block(adapter->pdev) < 0) {
-		printk(KERN_ERR "i2c-parport: Could not claim parallel port\n");
+		dev_err(&adapter->pdev->dev,
+			"Could not claim parallel port\n");
 		goto err_unregister;
 	}
 
@@ -230,7 +231,7 @@
 	}
 
 	if (i2c_bit_add_bus(&adapter->adapter) < 0) {
-		printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
+		dev_err(&adapter->pdev->dev, "Unable to register with I2C\n");
 		goto err_unregister;
 	}
 
@@ -242,8 +243,8 @@
 		if (adapter->ara)
 			parport_enable_irq(port);
 		else
-			printk(KERN_WARNING "i2c-parport: Failed to register "
-			       "ARA client\n");
+			dev_warn(&adapter->pdev->dev,
+				 "Failed to register ARA client\n");
 	}
 
 	/* Add the new adapter to the list */
@@ -298,12 +299,12 @@
 static int __init i2c_parport_init(void)
 {
 	if (type < 0) {
-		printk(KERN_WARNING "i2c-parport: adapter type unspecified\n");
+		pr_warn("adapter type unspecified\n");
 		return -ENODEV;
 	}
 
 	if (type >= ARRAY_SIZE(adapter_parm)) {
-		printk(KERN_WARNING "i2c-parport: invalid type (%d)\n", type);
+		pr_warn("invalid type (%d)\n", type);
 		return -ENODEV;
 	}
 
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index 4e12945..84a6616 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -89,6 +89,13 @@
 		.getsda	= { 0x80, PORT_STAT, 1 },
 		.init	= { 0x04, PORT_DATA, 1 },
 	},
+	/* type 8: VCT-jig */
+	{
+		.setsda	= { 0x04, PORT_DATA, 1 },
+		.setscl	= { 0x01, PORT_DATA, 1 },
+		.getsda	= { 0x40, PORT_STAT, 0 },
+		.getscl	= { 0x80, PORT_STAT, 1 },
+	},
 };
 
 static int type = -1;
@@ -103,4 +110,5 @@
 	" 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n"
 	" 6 = Barco LPT->DVI (K5800236) adapter\n"
 	" 7 = One For All JP1 parallel port adapter\n"
+	" 8 = VCT-jig\n"
 );
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index d9c0d6a..645e4b7 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -132,6 +132,7 @@
 	unsigned int		msg_idx;
 	unsigned int		msg_ptr;
 	unsigned int		slave_addr;
+	unsigned int		req_slave_addr;
 
 	struct i2c_adapter	adap;
 	struct clk		*clk;
@@ -253,15 +254,20 @@
 static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
 {
 	unsigned int i;
-	printk(KERN_ERR "i2c: error: %s\n", why);
-	printk(KERN_ERR "i2c: msg_num: %d msg_idx: %d msg_ptr: %d\n",
+	struct device *dev = &i2c->adap.dev;
+
+	dev_err(dev, "slave_0x%x error: %s\n",
+		i2c->req_slave_addr >> 1, why);
+	dev_err(dev, "msg_num: %d msg_idx: %d msg_ptr: %d\n",
 		i2c->msg_num, i2c->msg_idx, i2c->msg_ptr);
-	printk(KERN_ERR "i2c: ICR: %08x ISR: %08x\n",
-	       readl(_ICR(i2c)), readl(_ISR(i2c)));
-	printk(KERN_DEBUG "i2c: log: ");
+	dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n",
+		readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)),
+		readl(_ISR(i2c)));
+	dev_dbg(dev, "log: ");
 	for (i = 0; i < i2c->irqlogidx; i++)
-		printk("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]);
-	printk("\n");
+		pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]);
+
+	pr_debug("\n");
 }
 
 #else /* ifdef DEBUG */
@@ -459,7 +465,7 @@
 	writel(I2C_ISR_INIT, _ISR(i2c));
 	writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c));
 
-	if (i2c->reg_isar)
+	if (i2c->reg_isar && IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
 		writel(i2c->slave_addr, _ISAR(i2c));
 
 	/* set control register values */
@@ -638,6 +644,7 @@
 	 * Step 1: target slave address into IDBR
 	 */
 	writel(i2c_pxa_addr_byte(i2c->msg), _IDBR(i2c));
+	i2c->req_slave_addr = i2c_pxa_addr_byte(i2c->msg);
 
 	/*
 	 * Step 2: initiate the write.
@@ -745,8 +752,10 @@
 	ret = i2c->msg_idx;
 
 out:
-	if (timeout == 0)
+	if (timeout == 0) {
 		i2c_pxa_scream_blue_murder(i2c, "timeout");
+		ret = I2C_RETRY;
+	}
 
 	return ret;
 }
@@ -949,6 +958,7 @@
 		 * Write the next address.
 		 */
 		writel(i2c_pxa_addr_byte(i2c->msg), _IDBR(i2c));
+		i2c->req_slave_addr = i2c_pxa_addr_byte(i2c->msg);
 
 		/*
 		 * And trigger a repeated start, and send the byte.
@@ -1114,7 +1124,9 @@
 		i2c->use_pio = 1;
 	if (of_get_property(np, "mrvl,i2c-fast-mode", NULL))
 		i2c->fast_mode = 1;
-	*i2c_types = (u32)(of_id->data);
+
+	*i2c_types = (enum pxa_i2c_types)(of_id->data);
+
 	return 0;
 }
 
@@ -1146,10 +1158,19 @@
 	struct resource *res = NULL;
 	int ret, irq;
 
-	i2c = kzalloc(sizeof(struct pxa_i2c), GFP_KERNEL);
-	if (!i2c) {
-		ret = -ENOMEM;
-		goto emalloc;
+	i2c = devm_kzalloc(&dev->dev, sizeof(struct pxa_i2c), GFP_KERNEL);
+	if (!i2c)
+		return -ENOMEM;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	i2c->reg_base = devm_ioremap_resource(&dev->dev, res);
+	if (IS_ERR(i2c->reg_base))
+		return PTR_ERR(i2c->reg_base);
+
+	irq = platform_get_irq(dev, 0);
+	if (irq < 0) {
+		dev_err(&dev->dev, "no irq resource: %d\n", irq);
+		return irq;
 	}
 
 	/* Default adapter num to device id; i2c_pxa_probe_dt can override. */
@@ -1159,19 +1180,7 @@
 	if (ret > 0)
 		ret = i2c_pxa_probe_pdata(dev, i2c, &i2c_type);
 	if (ret < 0)
-		goto eclk;
-
-	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	irq = platform_get_irq(dev, 0);
-	if (res == NULL || irq < 0) {
-		ret = -ENODEV;
-		goto eclk;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res), res->name)) {
-		ret = -ENOMEM;
-		goto eclk;
-	}
+		return ret;
 
 	i2c->adap.owner   = THIS_MODULE;
 	i2c->adap.retries = 5;
@@ -1181,16 +1190,10 @@
 
 	strlcpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
 
-	i2c->clk = clk_get(&dev->dev, NULL);
+	i2c->clk = devm_clk_get(&dev->dev, NULL);
 	if (IS_ERR(i2c->clk)) {
-		ret = PTR_ERR(i2c->clk);
-		goto eclk;
-	}
-
-	i2c->reg_base = ioremap(res->start, resource_size(res));
-	if (!i2c->reg_base) {
-		ret = -EIO;
-		goto eremap;
+		dev_err(&dev->dev, "failed to get the clk: %ld\n", PTR_ERR(i2c->clk));
+		return PTR_ERR(i2c->clk);
 	}
 
 	i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
@@ -1232,10 +1235,13 @@
 		i2c->adap.algo = &i2c_pxa_pio_algorithm;
 	} else {
 		i2c->adap.algo = &i2c_pxa_algorithm;
-		ret = request_irq(irq, i2c_pxa_handler, IRQF_SHARED,
-				  dev_name(&dev->dev), i2c);
-		if (ret)
+		ret = devm_request_irq(&dev->dev, irq, i2c_pxa_handler,
+				IRQF_SHARED | IRQF_NO_SUSPEND,
+				dev_name(&dev->dev), i2c);
+		if (ret) {
+			dev_err(&dev->dev, "failed to request irq: %d\n", ret);
 			goto ereqirq;
+		}
 	}
 
 	i2c_pxa_reset(i2c);
@@ -1248,33 +1254,22 @@
 
 	ret = i2c_add_numbered_adapter(&i2c->adap);
 	if (ret < 0) {
-		printk(KERN_INFO "I2C: Failed to add bus\n");
-		goto eadapt;
+		dev_err(&dev->dev, "failed to add bus: %d\n", ret);
+		goto ereqirq;
 	}
 
 	platform_set_drvdata(dev, i2c);
 
 #ifdef CONFIG_I2C_PXA_SLAVE
-	printk(KERN_INFO "I2C: %s: PXA I2C adapter, slave address %d\n",
-	       dev_name(&i2c->adap.dev), i2c->slave_addr);
+	dev_info(&i2c->adap.dev, " PXA I2C adapter, slave address %d\n",
+		i2c->slave_addr);
 #else
-	printk(KERN_INFO "I2C: %s: PXA I2C adapter\n",
-	       dev_name(&i2c->adap.dev));
+	dev_info(&i2c->adap.dev, " PXA I2C adapter\n");
 #endif
 	return 0;
 
-eadapt:
-	if (!i2c->use_pio)
-		free_irq(irq, i2c);
 ereqirq:
 	clk_disable_unprepare(i2c->clk);
-	iounmap(i2c->reg_base);
-eremap:
-	clk_put(i2c->clk);
-eclk:
-	kfree(i2c);
-emalloc:
-	release_mem_region(res->start, resource_size(res));
 	return ret;
 }
 
@@ -1283,15 +1278,8 @@
 	struct pxa_i2c *i2c = platform_get_drvdata(dev);
 
 	i2c_del_adapter(&i2c->adap);
-	if (!i2c->use_pio)
-		free_irq(i2c->irq, i2c);
 
 	clk_disable_unprepare(i2c->clk);
-	clk_put(i2c->clk);
-
-	iounmap(i2c->reg_base);
-	release_mem_region(i2c->iobase, i2c->iosize);
-	kfree(i2c);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 78a3668..b7e1a36 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -100,6 +100,12 @@
 #define I2C_HEADER_CONTINUE_XFER		(1<<15)
 #define I2C_HEADER_MASTER_ADDR_SHIFT		12
 #define I2C_HEADER_SLAVE_ADDR_SHIFT		1
+
+#define I2C_CONFIG_LOAD				0x08C
+#define I2C_MSTR_CONFIG_LOAD			(1 << 0)
+#define I2C_SLV_CONFIG_LOAD			(1 << 1)
+#define I2C_TIMEOUT_CONFIG_LOAD			(1 << 2)
+
 /*
  * msg_end_type: The bus control which need to be send at end of transfer.
  * @MSG_END_STOP: Send stop pulse at end of transfer.
@@ -121,6 +127,8 @@
  * @has_single_clk_source: The i2c controller has single clock source. Tegra30
  *		and earlier Socs has two clock sources i.e. div-clk and
  *		fast-clk.
+ * @has_config_load_reg: Has the config load register to load the new
+ *		configuration.
  * @clk_divisor_hs_mode: Clock divisor in HS mode.
  * @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is
  *		applicable if there is no fast clock source i.e. single clock
@@ -131,8 +139,10 @@
 	bool has_continue_xfer_support;
 	bool has_per_pkt_xfer_complete_irq;
 	bool has_single_clk_source;
+	bool has_config_load_reg;
 	int clk_divisor_hs_mode;
 	int clk_divisor_std_fast_mode;
+	u16 clk_divisor_fast_plus_mode;
 };
 
 /**
@@ -172,6 +182,7 @@
 	size_t msg_buf_remaining;
 	int msg_read;
 	u32 bus_clk_rate;
+	u16 clk_divisor_non_hs_mode;
 	bool is_suspended;
 };
 
@@ -410,6 +421,7 @@
 	u32 val;
 	int err = 0;
 	u32 clk_divisor;
+	unsigned long timeout = jiffies + HZ;
 
 	err = tegra_i2c_clock_enable(i2c_dev);
 	if (err < 0) {
@@ -431,7 +443,7 @@
 
 	/* Make sure clock divisor programmed correctly */
 	clk_divisor = i2c_dev->hw->clk_divisor_hs_mode;
-	clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode <<
+	clk_divisor |= i2c_dev->clk_divisor_non_hs_mode <<
 					I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT;
 	i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
 
@@ -451,6 +463,18 @@
 	if (tegra_i2c_flush_fifos(i2c_dev))
 		err = -ETIMEDOUT;
 
+	if (i2c_dev->hw->has_config_load_reg) {
+		i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
+		while (i2c_readl(i2c_dev, I2C_CONFIG_LOAD) != 0) {
+			if (time_after(jiffies, timeout)) {
+				dev_warn(i2c_dev->dev,
+					"timeout waiting for config load\n");
+				return -ETIMEDOUT;
+			}
+			msleep(1);
+		}
+	}
+
 	tegra_i2c_clock_disable(i2c_dev);
 
 	if (i2c_dev->irq_disabled) {
@@ -681,6 +705,8 @@
 	.has_single_clk_source = false,
 	.clk_divisor_hs_mode = 3,
 	.clk_divisor_std_fast_mode = 0,
+	.clk_divisor_fast_plus_mode = 0,
+	.has_config_load_reg = false,
 };
 
 static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -689,6 +715,8 @@
 	.has_single_clk_source = false,
 	.clk_divisor_hs_mode = 3,
 	.clk_divisor_std_fast_mode = 0,
+	.clk_divisor_fast_plus_mode = 0,
+	.has_config_load_reg = false,
 };
 
 static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -697,10 +725,23 @@
 	.has_single_clk_source = true,
 	.clk_divisor_hs_mode = 1,
 	.clk_divisor_std_fast_mode = 0x19,
+	.clk_divisor_fast_plus_mode = 0x10,
+	.has_config_load_reg = false,
+};
+
+static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
+	.has_continue_xfer_support = true,
+	.has_per_pkt_xfer_complete_irq = true,
+	.has_single_clk_source = true,
+	.clk_divisor_hs_mode = 1,
+	.clk_divisor_std_fast_mode = 0x19,
+	.clk_divisor_fast_plus_mode = 0x10,
+	.has_config_load_reg = true,
 };
 
 /* Match table for of_platform binding */
 static const struct of_device_id tegra_i2c_of_match[] = {
+	{ .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
 	{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
 	{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
 	{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
@@ -793,7 +834,14 @@
 		}
 	}
 
-	clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
+	i2c_dev->clk_divisor_non_hs_mode =
+			i2c_dev->hw->clk_divisor_std_fast_mode;
+	if (i2c_dev->hw->clk_divisor_fast_plus_mode &&
+		(i2c_dev->bus_clk_rate == 1000000))
+		i2c_dev->clk_divisor_non_hs_mode =
+			i2c_dev->hw->clk_divisor_fast_plus_mode;
+
+	clk_multiplier *= (i2c_dev->clk_divisor_non_hs_mode + 1);
 	ret = clk_set_rate(i2c_dev->div_clk,
 			   i2c_dev->bus_clk_rate * clk_multiplier);
 	if (ret) {
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 47e88ad..543456a 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -391,11 +391,11 @@
 			VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
 			0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
 			VPRBRD_USB_TIMEOUT_MS);
-	    if (ret != 1) {
-		dev_err(&pdev->dev,
-			"failure setting i2c_bus_freq to %d\n", i2c_bus_freq);
-		return -EIO;
-	    }
+		if (ret != 1) {
+			dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n",
+				i2c_bus_freq);
+			return -EIO;
+		}
 	} else {
 		dev_err(&pdev->dev,
 			"invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 1c9cb65..4233f56 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -198,10 +198,10 @@
 	int rc;
 
 	paddr = dma_map_single(ctx->dev, ctx->dma_buffer, readlen, DMA_FROM_DEVICE);
-	rc = dma_mapping_error(ctx->dev, paddr);
-	if (rc) {
+	if (dma_mapping_error(ctx->dev, paddr)) {
 		dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n",
 			ctx->dma_buffer);
+		rc = -ENOMEM;
 		goto err;
 	}
 
@@ -241,10 +241,10 @@
 	memcpy(ctx->dma_buffer, data, writelen);
 	paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
 			       DMA_TO_DEVICE);
-	rc = dma_mapping_error(ctx->dev, paddr);
-	if (rc) {
+	if (dma_mapping_error(ctx->dev, paddr)) {
 		dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n",
 			ctx->dma_buffer);
+		rc = -ENOMEM;
 		goto err;
 	}
 
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4dda23f..e23a7b0 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -283,7 +283,7 @@
 	/* Enable interrupts */
 	xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
 
-	xiic_irq_clr_en(i2c, XIIC_INTR_AAS_MASK | XIIC_INTR_ARB_LOST_MASK);
+	xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
 }
 
 static void xiic_deinit(struct xiic_i2c *i2c)
@@ -358,8 +358,9 @@
 	wake_up(&i2c->wait);
 }
 
-static void xiic_process(struct xiic_i2c *i2c)
+static irqreturn_t xiic_process(int irq, void *dev_id)
 {
+	struct xiic_i2c *i2c = dev_id;
 	u32 pend, isr, ier;
 	u32 clr = 0;
 
@@ -368,6 +369,7 @@
 	 * To find which interrupts are pending; AND interrupts pending with
 	 * interrupts masked.
 	 */
+	spin_lock(&i2c->lock);
 	isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
 	ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
 	pend = isr & ier;
@@ -378,11 +380,6 @@
 		__func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
 		i2c->tx_msg, i2c->nmsgs);
 
-	/* Do not processes a devices interrupts if the device has no
-	 * interrupts pending
-	 */
-	if (!pend)
-		return;
 
 	/* Service requesting interrupt */
 	if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
@@ -402,13 +399,15 @@
 		 */
 		xiic_reinit(i2c);
 
+		if (i2c->rx_msg)
+			xiic_wakeup(i2c, STATE_ERROR);
 		if (i2c->tx_msg)
 			xiic_wakeup(i2c, STATE_ERROR);
-
-	} else if (pend & XIIC_INTR_RX_FULL_MASK) {
+	}
+	if (pend & XIIC_INTR_RX_FULL_MASK) {
 		/* Receive register/FIFO is full */
 
-		clr = XIIC_INTR_RX_FULL_MASK;
+		clr |= XIIC_INTR_RX_FULL_MASK;
 		if (!i2c->rx_msg) {
 			dev_dbg(i2c->adap.dev.parent,
 				"%s unexpexted RX IRQ\n", __func__);
@@ -441,9 +440,10 @@
 				__xiic_start_xfer(i2c);
 			}
 		}
-	} else if (pend & XIIC_INTR_BNB_MASK) {
+	}
+	if (pend & XIIC_INTR_BNB_MASK) {
 		/* IIC bus has transitioned to not busy */
-		clr = XIIC_INTR_BNB_MASK;
+		clr |= XIIC_INTR_BNB_MASK;
 
 		/* The bus is not busy, disable BusNotBusy interrupt */
 		xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
@@ -456,12 +456,12 @@
 			xiic_wakeup(i2c, STATE_DONE);
 		else
 			xiic_wakeup(i2c, STATE_ERROR);
-
-	} else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
+	}
+	if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
 		/* Transmit register/FIFO is empty or ½ empty */
 
-		clr = pend &
-			(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
+		clr |= (pend &
+			(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK));
 
 		if (!i2c->tx_msg) {
 			dev_dbg(i2c->adap.dev.parent,
@@ -492,16 +492,13 @@
 			 * make sure to disable tx half
 			 */
 			xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
-	} else {
-		/* got IRQ which is not acked */
-		dev_err(i2c->adap.dev.parent, "%s Got unexpected IRQ\n",
-			__func__);
-		clr = pend;
 	}
 out:
 	dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
 
 	xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
+	spin_unlock(&i2c->lock);
+	return IRQ_HANDLED;
 }
 
 static int xiic_bus_busy(struct xiic_i2c *i2c)
@@ -525,7 +522,7 @@
 	 */
 	err = xiic_bus_busy(i2c);
 	while (err && tries--) {
-		mdelay(1);
+		msleep(1);
 		err = xiic_bus_busy(i2c);
 	}
 
@@ -602,19 +599,21 @@
 static irqreturn_t xiic_isr(int irq, void *dev_id)
 {
 	struct xiic_i2c *i2c = dev_id;
-
-	spin_lock(&i2c->lock);
-	/* disable interrupts globally */
-	xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
+	u32 pend, isr, ier;
+	irqreturn_t ret = IRQ_NONE;
+	/* Do not processes a devices interrupts if the device has no
+	 * interrupts pending
+	 */
 
 	dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
 
-	xiic_process(i2c);
+	isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+	ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+	pend = isr & ier;
+	if (pend)
+		ret = IRQ_WAKE_THREAD;
 
-	xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
-	spin_unlock(&i2c->lock);
-
-	return IRQ_HANDLED;
+	return ret;
 }
 
 static void __xiic_start_xfer(struct xiic_i2c *i2c)
@@ -663,16 +662,8 @@
 
 static void xiic_start_xfer(struct xiic_i2c *i2c)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&i2c->lock, flags);
-	xiic_reinit(i2c);
-	/* disable interrupts globally */
-	xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
-	spin_unlock_irqrestore(&i2c->lock, flags);
 
 	__xiic_start_xfer(i2c);
-	xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
 }
 
 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@@ -755,7 +746,10 @@
 	spin_lock_init(&i2c->lock);
 	init_waitqueue_head(&i2c->wait);
 
-	ret = devm_request_irq(&pdev->dev, irq, xiic_isr, 0, pdev->name, i2c);
+	ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
+					xiic_process, IRQF_ONESHOT,
+					pdev->name, i2c);
+
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
 		return ret;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c83e4d1..5f89f1e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -27,6 +27,7 @@
    I2C slave support (c) 2014 by Wolfram Sang <wsa@sang-engineering.com>
  */
 
+#include <dt-bindings/i2c/i2c.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
@@ -47,6 +48,7 @@
 #include <linux/rwsem.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/acpi.h>
 #include <linux/jump_label.h>
 #include <asm/uaccess.h>
@@ -57,6 +59,9 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/i2c.h>
 
+#define I2C_ADDR_OFFSET_TEN_BIT	0xa000
+#define I2C_ADDR_OFFSET_SLAVE	0x1000
+
 /* core_lock protects i2c_adapter_idr, and guarantees
    that device detection, deletion of detected devices, and attach_adapter
    calls are serialized */
@@ -641,11 +646,13 @@
 	if (!client->irq) {
 		int irq = -ENOENT;
 
-		if (dev->of_node)
-			irq = of_irq_get(dev->of_node, 0);
-		else if (ACPI_COMPANION(dev))
+		if (dev->of_node) {
+			irq = of_irq_get_byname(dev->of_node, "irq");
+			if (irq == -EINVAL || irq == -ENODATA)
+				irq = of_irq_get(dev->of_node, 0);
+		} else if (ACPI_COMPANION(dev)) {
 			irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
-
+		}
 		if (irq == -EPROBE_DEFER)
 			return irq;
 		if (irq < 0)
@@ -658,23 +665,49 @@
 	if (!driver->probe || !driver->id_table)
 		return -ENODEV;
 
-	if (!device_can_wakeup(&client->dev))
-		device_init_wakeup(&client->dev,
-					client->flags & I2C_CLIENT_WAKE);
+	if (client->flags & I2C_CLIENT_WAKE) {
+		int wakeirq = -ENOENT;
+
+		if (dev->of_node) {
+			wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+			if (wakeirq == -EPROBE_DEFER)
+				return wakeirq;
+		}
+
+		device_init_wakeup(&client->dev, true);
+
+		if (wakeirq > 0 && wakeirq != client->irq)
+			status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
+		else if (client->irq > 0)
+			status = dev_pm_set_wake_irq(dev, wakeirq);
+		else
+			status = 0;
+
+		if (status)
+			dev_warn(&client->dev, "failed to set up wakeup irq");
+	}
+
 	dev_dbg(dev, "probe\n");
 
 	status = of_clk_set_defaults(dev->of_node, false);
 	if (status < 0)
-		return status;
+		goto err_clear_wakeup_irq;
 
 	status = dev_pm_domain_attach(&client->dev, true);
 	if (status != -EPROBE_DEFER) {
 		status = driver->probe(client, i2c_match_id(driver->id_table,
 					client));
 		if (status)
-			dev_pm_domain_detach(&client->dev, true);
+			goto err_detach_pm_domain;
 	}
 
+	return 0;
+
+err_detach_pm_domain:
+	dev_pm_domain_detach(&client->dev, true);
+err_clear_wakeup_irq:
+	dev_pm_clear_wake_irq(&client->dev);
+	device_init_wakeup(&client->dev, false);
 	return status;
 }
 
@@ -694,6 +727,10 @@
 	}
 
 	dev_pm_domain_detach(&client->dev, true);
+
+	dev_pm_clear_wake_irq(&client->dev);
+	device_init_wakeup(&client->dev, false);
+
 	return status;
 }
 
@@ -778,17 +815,32 @@
 EXPORT_SYMBOL(i2c_verify_client);
 
 
+/* Return a unique address which takes the flags of the client into account */
+static unsigned short i2c_encode_flags_to_addr(struct i2c_client *client)
+{
+	unsigned short addr = client->addr;
+
+	/* For some client flags, add an arbitrary offset to avoid collisions */
+	if (client->flags & I2C_CLIENT_TEN)
+		addr |= I2C_ADDR_OFFSET_TEN_BIT;
+
+	if (client->flags & I2C_CLIENT_SLAVE)
+		addr |= I2C_ADDR_OFFSET_SLAVE;
+
+	return addr;
+}
+
 /* This is a permissive address validity check, I2C address map constraints
  * are purposely not enforced, except for the general call address. */
-static int i2c_check_client_addr_validity(const struct i2c_client *client)
+static int i2c_check_addr_validity(unsigned addr, unsigned short flags)
 {
-	if (client->flags & I2C_CLIENT_TEN) {
+	if (flags & I2C_CLIENT_TEN) {
 		/* 10-bit address, all values are valid */
-		if (client->addr > 0x3ff)
+		if (addr > 0x3ff)
 			return -EINVAL;
 	} else {
 		/* 7-bit address, reject the general call address */
-		if (client->addr == 0x00 || client->addr > 0x7f)
+		if (addr == 0x00 || addr > 0x7f)
 			return -EINVAL;
 	}
 	return 0;
@@ -798,7 +850,7 @@
  * device uses a reserved address, then it shouldn't be probed. 7-bit
  * addressing is assumed, 10-bit address devices are rare and should be
  * explicitly enumerated. */
-static int i2c_check_addr_validity(unsigned short addr)
+static int i2c_check_7bit_addr_validity_strict(unsigned short addr)
 {
 	/*
 	 * Reserved addresses per I2C specification:
@@ -820,7 +872,7 @@
 	struct i2c_client	*client = i2c_verify_client(dev);
 	int			addr = *(int *)addrp;
 
-	if (client && client->addr == addr)
+	if (client && i2c_encode_flags_to_addr(client) == addr)
 		return -EBUSY;
 	return 0;
 }
@@ -923,10 +975,8 @@
 		return;
 	}
 
-	/* For 10-bit clients, add an arbitrary offset to avoid collisions */
 	dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
-		     client->addr | ((client->flags & I2C_CLIENT_TEN)
-				     ? 0xa000 : 0));
+		     i2c_encode_flags_to_addr(client));
 }
 
 /**
@@ -968,8 +1018,7 @@
 
 	strlcpy(client->name, info->type, sizeof(client->name));
 
-	/* Check for address validity */
-	status = i2c_check_client_addr_validity(client);
+	status = i2c_check_addr_validity(client->addr, client->flags);
 	if (status) {
 		dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n",
 			client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr);
@@ -977,7 +1026,7 @@
 	}
 
 	/* Check for address business */
-	status = i2c_check_addr_busy(adap, client->addr);
+	status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client));
 	if (status)
 		goto out_err;
 
@@ -1142,6 +1191,16 @@
 		return -EINVAL;
 	}
 
+	if ((info.addr & I2C_ADDR_OFFSET_TEN_BIT) == I2C_ADDR_OFFSET_TEN_BIT) {
+		info.addr &= ~I2C_ADDR_OFFSET_TEN_BIT;
+		info.flags |= I2C_CLIENT_TEN;
+	}
+
+	if (info.addr & I2C_ADDR_OFFSET_SLAVE) {
+		info.addr &= ~I2C_ADDR_OFFSET_SLAVE;
+		info.flags |= I2C_CLIENT_SLAVE;
+	}
+
 	client = i2c_new_device(adap, &info);
 	if (!client)
 		return -EINVAL;
@@ -1193,7 +1252,7 @@
 			  i2c_adapter_depth(adap));
 	list_for_each_entry_safe(client, next, &adap->userspace_clients,
 				 detected) {
-		if (client->addr == addr) {
+		if (i2c_encode_flags_to_addr(client) == addr) {
 			dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
 				 "delete_device", client->name, client->addr);
 
@@ -1273,7 +1332,8 @@
 	struct i2c_client *result;
 	struct i2c_board_info info = {};
 	struct dev_archdata dev_ad = {};
-	const __be32 *addr;
+	const __be32 *addr_be;
+	u32 addr;
 	int len;
 
 	dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
@@ -1284,20 +1344,31 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	addr = of_get_property(node, "reg", &len);
-	if (!addr || (len < sizeof(*addr))) {
+	addr_be = of_get_property(node, "reg", &len);
+	if (!addr_be || (len < sizeof(*addr_be))) {
 		dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
 			node->full_name);
 		return ERR_PTR(-EINVAL);
 	}
 
-	info.addr = be32_to_cpup(addr);
-	if (info.addr > (1 << 10) - 1) {
+	addr = be32_to_cpup(addr_be);
+	if (addr & I2C_TEN_BIT_ADDRESS) {
+		addr &= ~I2C_TEN_BIT_ADDRESS;
+		info.flags |= I2C_CLIENT_TEN;
+	}
+
+	if (addr & I2C_OWN_SLAVE_ADDRESS) {
+		addr &= ~I2C_OWN_SLAVE_ADDRESS;
+		info.flags |= I2C_CLIENT_SLAVE;
+	}
+
+	if (i2c_check_addr_validity(addr, info.flags)) {
 		dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
 			info.addr, node->full_name);
 		return ERR_PTR(-EINVAL);
 	}
 
+	info.addr = addr;
 	info.of_node = of_node_get(node);
 	info.archdata = &dev_ad;
 
@@ -1371,6 +1442,24 @@
 	return adapter;
 }
 EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+	struct i2c_adapter *adapter;
+
+	adapter = of_find_i2c_adapter_by_node(node);
+	if (!adapter)
+		return NULL;
+
+	if (!try_module_get(adapter->owner)) {
+		put_device(&adapter->dev);
+		adapter = NULL;
+	}
+
+	return adapter;
+}
+EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
 #else
 static void of_i2c_register_devices(struct i2c_adapter *adap) { }
 #endif /* CONFIG_OF */
@@ -2262,14 +2351,14 @@
 	int err;
 
 	/* Make sure the address is valid */
-	err = i2c_check_addr_validity(addr);
+	err = i2c_check_7bit_addr_validity_strict(addr);
 	if (err) {
 		dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n",
 			 addr);
 		return err;
 	}
 
-	/* Skip if already in use */
+	/* Skip if already in use (7 bit, no need to encode flags) */
 	if (i2c_check_addr_busy(adapter, addr))
 		return 0;
 
@@ -2379,13 +2468,13 @@
 
 	for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
 		/* Check address validity */
-		if (i2c_check_addr_validity(addr_list[i]) < 0) {
+		if (i2c_check_7bit_addr_validity_strict(addr_list[i]) < 0) {
 			dev_warn(&adap->dev, "Invalid 7-bit address "
 				 "0x%02x\n", addr_list[i]);
 			continue;
 		}
 
-		/* Check address availability */
+		/* Check address availability (7 bit, no need to encode flags) */
 		if (i2c_check_addr_busy(adap, addr_list[i])) {
 			dev_dbg(&adap->dev, "Address 0x%02x already in "
 				"use, not probing\n", addr_list[i]);
@@ -2413,9 +2502,15 @@
 
 	mutex_lock(&core_lock);
 	adapter = idr_find(&i2c_adapter_idr, nr);
-	if (adapter && !try_module_get(adapter->owner))
+	if (!adapter)
+		goto exit;
+
+	if (try_module_get(adapter->owner))
+		get_device(&adapter->dev);
+	else
 		adapter = NULL;
 
+ exit:
 	mutex_unlock(&core_lock);
 	return adapter;
 }
@@ -2423,8 +2518,11 @@
 
 void i2c_put_adapter(struct i2c_adapter *adap)
 {
-	if (adap)
-		module_put(adap->owner);
+	if (!adap)
+		return;
+
+	put_device(&adap->dev);
+	module_put(adap->owner);
 }
 EXPORT_SYMBOL(i2c_put_adapter);
 
@@ -2942,6 +3040,63 @@
 }
 EXPORT_SYMBOL(i2c_smbus_xfer);
 
+/**
+ * i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate
+ * @client: Handle to slave device
+ * @command: Byte interpreted by slave
+ * @length: Size of data block; SMBus allows at most I2C_SMBUS_BLOCK_MAX bytes
+ * @values: Byte array into which data will be read; big enough to hold
+ *	the data returned by the slave.  SMBus allows at most
+ *	I2C_SMBUS_BLOCK_MAX bytes.
+ *
+ * This executes the SMBus "block read" protocol if supported by the adapter.
+ * If block read is not supported, it emulates it using either word or byte
+ * read protocols depending on availability.
+ *
+ * The addresses of the I2C slave device that are accessed with this function
+ * must be mapped to a linear region, so that a block read will have the same
+ * effect as a byte read. Before using this function you must double-check
+ * if the I2C slave does support exchanging a block transfer with a byte
+ * transfer.
+ */
+s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+					      u8 command, u8 length, u8 *values)
+{
+	u8 i = 0;
+	int status;
+
+	if (length > I2C_SMBUS_BLOCK_MAX)
+		length = I2C_SMBUS_BLOCK_MAX;
+
+	if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+		return i2c_smbus_read_i2c_block_data(client, command, length, values);
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA))
+		return -EOPNOTSUPP;
+
+	if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+		while ((i + 2) <= length) {
+			status = i2c_smbus_read_word_data(client, command + i);
+			if (status < 0)
+				return status;
+			values[i] = status & 0xff;
+			values[i + 1] = status >> 8;
+			i += 2;
+		}
+	}
+
+	while (i < length) {
+		status = i2c_smbus_read_byte_data(client, command + i);
+		if (status < 0)
+			return status;
+		values[i] = status;
+		i++;
+	}
+
+	return i;
+}
+EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
+
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
 int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
 {
@@ -2952,9 +3107,13 @@
 		return -EINVAL;
 	}
 
+	if (!(client->flags & I2C_CLIENT_SLAVE))
+		dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
+			 __func__);
+
 	if (!(client->flags & I2C_CLIENT_TEN)) {
 		/* Enforce stricter address checking */
-		ret = i2c_check_addr_validity(client->addr);
+		ret = i2c_check_7bit_addr_validity_strict(client->addr);
 		if (ret) {
 			dev_err(&client->dev, "%s: invalid address\n", __func__);
 			return ret;
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 1da4496..b2039f9 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -157,7 +157,6 @@
 static struct i2c_driver i2c_slave_eeprom_driver = {
 	.driver = {
 		.name = "i2c-slave-eeprom",
-		.owner = THIS_MODULE,
 	},
 	.probe = i2c_slave_eeprom_probe,
 	.remove = i2c_slave_eeprom_remove,
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index fdd0769..f06b0e2 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -61,4 +61,15 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called pinctrl-i2cmux.
 
+config I2C_MUX_REG
+	tristate "Register-based I2C multiplexer"
+	help
+	  If you say yes to this option, support will be included for a
+	  register based I2C multiplexer. This driver provides access to
+	  I2C busses connected through a MUX, which is controlled
+	  by a single register.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-mux-reg.
+
 endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 465778b..e89799b 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -7,5 +7,6 @@
 obj-$(CONFIG_I2C_MUX_PCA9541)	+= i2c-mux-pca9541.o
 obj-$(CONFIG_I2C_MUX_PCA954x)	+= i2c-mux-pca954x.o
 obj-$(CONFIG_I2C_MUX_PINCTRL)	+= i2c-mux-pinctrl.o
+obj-$(CONFIG_I2C_MUX_REG)	+= i2c-mux-reg.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 5cf1b60..402e3a6 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -196,7 +196,8 @@
 		dev_err(dev, "Cannot parse i2c-parent\n");
 		return -EINVAL;
 	}
-	arb->parent = of_find_i2c_adapter_by_node(parent_np);
+	arb->parent = of_get_i2c_adapter_by_node(parent_np);
+	of_node_put(parent_np);
 	if (!arb->parent) {
 		dev_err(dev, "Cannot find parent bus\n");
 		return -EPROBE_DEFER;
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 70db992..b8e11c1 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -76,6 +76,7 @@
 		return -ENODEV;
 	}
 	adapter = of_find_i2c_adapter_by_node(adapter_np);
+	of_node_put(adapter_np);
 	if (!adapter)
 		return -EPROBE_DEFER;
 
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 0c8d4d2..d0ba424 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -386,7 +386,6 @@
 static struct i2c_driver pca9541_driver = {
 	.driver = {
 		   .name = "pca9541",
-		   .owner = THIS_MODULE,
 		   },
 	.probe = pca9541_probe,
 	.remove = pca9541_remove,
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index ea4aa9d..acfcef3 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -300,7 +300,6 @@
 	.driver		= {
 		.name	= "pca954x",
 		.pm	= &pca954x_pm,
-		.owner	= THIS_MODULE,
 	},
 	.probe		= pca954x_probe,
 	.remove		= pca954x_remove,
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index b48378c..b5a982b 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -111,6 +111,7 @@
 		return -ENODEV;
 	}
 	adapter = of_find_i2c_adapter_by_node(adapter_np);
+	of_node_put(adapter_np);
 	if (!adapter) {
 		dev_err(mux->dev, "Cannot find parent bus\n");
 		return -EPROBE_DEFER;
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
new file mode 100644
index 0000000..5fbd5bd
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -0,0 +1,290 @@
+/*
+ * I2C multiplexer using a single register
+ *
+ * Copyright 2015 Freescale Semiconductor
+ * York Sun  <yorksun@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_data/i2c-mux-reg.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct regmux {
+	struct i2c_adapter *parent;
+	struct i2c_adapter **adap; /* child busses */
+	struct i2c_mux_reg_platform_data data;
+};
+
+static int i2c_mux_reg_set(const struct regmux *mux, unsigned int chan_id)
+{
+	if (!mux->data.reg)
+		return -EINVAL;
+
+	/*
+	 * Write to the register, followed by a read to ensure the write is
+	 * completed on a "posted" bus, for example PCI or write buffers.
+	 * The endianness of reading doesn't matter and the return data
+	 * is not used.
+	 */
+	switch (mux->data.reg_size) {
+	case 4:
+		if (mux->data.little_endian)
+			iowrite32(chan_id, mux->data.reg);
+		else
+			iowrite32be(chan_id, mux->data.reg);
+		if (!mux->data.write_only)
+			ioread32(mux->data.reg);
+		break;
+	case 2:
+		if (mux->data.little_endian)
+			iowrite16(chan_id, mux->data.reg);
+		else
+			iowrite16be(chan_id, mux->data.reg);
+		if (!mux->data.write_only)
+			ioread16(mux->data.reg);
+		break;
+	case 1:
+		iowrite8(chan_id, mux->data.reg);
+		if (!mux->data.write_only)
+			ioread8(mux->data.reg);
+		break;
+	}
+
+	return 0;
+}
+
+static int i2c_mux_reg_select(struct i2c_adapter *adap, void *data,
+			      unsigned int chan)
+{
+	struct regmux *mux = data;
+
+	return i2c_mux_reg_set(mux, chan);
+}
+
+static int i2c_mux_reg_deselect(struct i2c_adapter *adap, void *data,
+				unsigned int chan)
+{
+	struct regmux *mux = data;
+
+	if (mux->data.idle_in_use)
+		return i2c_mux_reg_set(mux, mux->data.idle);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_reg_probe_dt(struct regmux *mux,
+					struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *adapter_np, *child;
+	struct i2c_adapter *adapter;
+	struct resource res;
+	unsigned *values;
+	int i = 0;
+
+	if (!np)
+		return -ENODEV;
+
+	adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+	if (!adapter_np) {
+		dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
+		return -ENODEV;
+	}
+	adapter = of_find_i2c_adapter_by_node(adapter_np);
+	of_node_put(adapter_np);
+	if (!adapter)
+		return -EPROBE_DEFER;
+
+	mux->parent = adapter;
+	mux->data.parent = i2c_adapter_id(adapter);
+	put_device(&adapter->dev);
+
+	mux->data.n_values = of_get_child_count(np);
+	if (of_find_property(np, "little-endian", NULL)) {
+		mux->data.little_endian = true;
+	} else if (of_find_property(np, "big-endian", NULL)) {
+		mux->data.little_endian = false;
+	} else {
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : \
+	defined(__LITTLE_ENDIAN)
+		mux->data.little_endian = true;
+#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : \
+	defined(__BIG_ENDIAN)
+		mux->data.little_endian = false;
+#else
+#error Endianness not defined?
+#endif
+	}
+	if (of_find_property(np, "write-only", NULL))
+		mux->data.write_only = true;
+	else
+		mux->data.write_only = false;
+
+	values = devm_kzalloc(&pdev->dev,
+			      sizeof(*mux->data.values) * mux->data.n_values,
+			      GFP_KERNEL);
+	if (!values) {
+		dev_err(&pdev->dev, "Cannot allocate values array");
+		return -ENOMEM;
+	}
+
+	for_each_child_of_node(np, child) {
+		of_property_read_u32(child, "reg", values + i);
+		i++;
+	}
+	mux->data.values = values;
+
+	if (!of_property_read_u32(np, "idle-state", &mux->data.idle))
+		mux->data.idle_in_use = true;
+
+	/* map address from "reg" if exists */
+	if (of_address_to_resource(np, 0, &res)) {
+		mux->data.reg_size = resource_size(&res);
+		mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
+		if (IS_ERR(mux->data.reg))
+			return PTR_ERR(mux->data.reg);
+	}
+
+	return 0;
+}
+#else
+static int i2c_mux_reg_probe_dt(struct regmux *mux,
+					struct platform_device *pdev)
+{
+	return 0;
+}
+#endif
+
+static int i2c_mux_reg_probe(struct platform_device *pdev)
+{
+	struct regmux *mux;
+	struct i2c_adapter *parent;
+	struct resource *res;
+	int (*deselect)(struct i2c_adapter *, void *, u32);
+	unsigned int class;
+	int i, ret, nr;
+
+	mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+	if (!mux)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, mux);
+
+	if (dev_get_platdata(&pdev->dev)) {
+		memcpy(&mux->data, dev_get_platdata(&pdev->dev),
+			sizeof(mux->data));
+
+		parent = i2c_get_adapter(mux->data.parent);
+		if (!parent)
+			return -EPROBE_DEFER;
+
+		mux->parent = parent;
+	} else {
+		ret = i2c_mux_reg_probe_dt(mux, pdev);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Error parsing device tree");
+			return ret;
+		}
+	}
+
+	if (!mux->data.reg) {
+		dev_info(&pdev->dev,
+			"Register not set, using platform resource\n");
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		mux->data.reg_size = resource_size(res);
+		mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(mux->data.reg))
+			return PTR_ERR(mux->data.reg);
+	}
+
+	if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
+	    mux->data.reg_size != 1) {
+		dev_err(&pdev->dev, "Invalid register size\n");
+		return -EINVAL;
+	}
+
+	mux->adap = devm_kzalloc(&pdev->dev,
+				 sizeof(*mux->adap) * mux->data.n_values,
+				 GFP_KERNEL);
+	if (!mux->adap) {
+		dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
+		return -ENOMEM;
+	}
+
+	if (mux->data.idle_in_use)
+		deselect = i2c_mux_reg_deselect;
+	else
+		deselect = NULL;
+
+	for (i = 0; i < mux->data.n_values; i++) {
+		nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
+		class = mux->data.classes ? mux->data.classes[i] : 0;
+
+		mux->adap[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, mux,
+						   nr, mux->data.values[i],
+						   class, i2c_mux_reg_select,
+						   deselect);
+		if (!mux->adap[i]) {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+			goto add_adapter_failed;
+		}
+	}
+
+	dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
+		 mux->data.n_values, mux->parent->name);
+
+	return 0;
+
+add_adapter_failed:
+	for (; i > 0; i--)
+		i2c_del_mux_adapter(mux->adap[i - 1]);
+
+	return ret;
+}
+
+static int i2c_mux_reg_remove(struct platform_device *pdev)
+{
+	struct regmux *mux = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < mux->data.n_values; i++)
+		i2c_del_mux_adapter(mux->adap[i]);
+
+	i2c_put_adapter(mux->parent);
+
+	return 0;
+}
+
+static const struct of_device_id i2c_mux_reg_of_match[] = {
+	{ .compatible = "i2c-mux-reg", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_reg_of_match);
+
+static struct platform_driver i2c_mux_reg_driver = {
+	.probe	= i2c_mux_reg_probe,
+	.remove	= i2c_mux_reg_remove,
+	.driver	= {
+		.name	= "i2c-mux-reg",
+	},
+};
+
+module_platform_driver(i2c_mux_reg_driver);
+
+MODULE_DESCRIPTION("Register-based I2C multiplexer driver");
+MODULE_AUTHOR("York Sun <yorksun@freescale.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:i2c-mux-reg");
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index f101bb5..993eb20 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -469,16 +469,12 @@
 	dev = &client->dev;
 
 	/* gpio interrupt pin */
-	gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0);
+	gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0, GPIOD_IN);
 	if (IS_ERR(gpio)) {
 		dev_err(dev, "acpi gpio get index failed\n");
 		return PTR_ERR(gpio);
 	}
 
-	ret = gpiod_direction_input(gpio);
-	if (ret)
-		return ret;
-
 	ret = gpiod_to_irq(gpio);
 	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
 
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index e330205..d8e614c 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -845,16 +845,12 @@
 	dev = &client->dev;
 
 	/* data ready GPIO interrupt pin */
-	gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0);
+	gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0, GPIOD_IN);
 	if (IS_ERR(gpio)) {
 		dev_err(dev, "ACPI GPIO get index failed\n");
 		return PTR_ERR(gpio);
 	}
 
-	ret = gpiod_direction_input(gpio);
-	if (ret)
-		return ret;
-
 	ret = gpiod_to_irq(gpio);
 
 	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index bac3fb4..30eb245 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1144,73 +1144,6 @@
 }
 EXPORT_SYMBOL(ib_get_dma_mr);
 
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
-			     struct ib_phys_buf *phys_buf_array,
-			     int num_phys_buf,
-			     int mr_access_flags,
-			     u64 *iova_start)
-{
-	struct ib_mr *mr;
-	int err;
-
-	err = ib_check_mr_access(mr_access_flags);
-	if (err)
-		return ERR_PTR(err);
-
-	if (!pd->device->reg_phys_mr)
-		return ERR_PTR(-ENOSYS);
-
-	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
-				     mr_access_flags, iova_start);
-
-	if (!IS_ERR(mr)) {
-		mr->device  = pd->device;
-		mr->pd      = pd;
-		mr->uobject = NULL;
-		atomic_inc(&pd->usecnt);
-		atomic_set(&mr->usecnt, 0);
-	}
-
-	return mr;
-}
-EXPORT_SYMBOL(ib_reg_phys_mr);
-
-int ib_rereg_phys_mr(struct ib_mr *mr,
-		     int mr_rereg_mask,
-		     struct ib_pd *pd,
-		     struct ib_phys_buf *phys_buf_array,
-		     int num_phys_buf,
-		     int mr_access_flags,
-		     u64 *iova_start)
-{
-	struct ib_pd *old_pd;
-	int ret;
-
-	ret = ib_check_mr_access(mr_access_flags);
-	if (ret)
-		return ret;
-
-	if (!mr->device->rereg_phys_mr)
-		return -ENOSYS;
-
-	if (atomic_read(&mr->usecnt))
-		return -EBUSY;
-
-	old_pd = mr->pd;
-
-	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
-					phys_buf_array, num_phys_buf,
-					mr_access_flags, iova_start);
-
-	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
-		atomic_dec(&old_pd->usecnt);
-		atomic_inc(&pd->usecnt);
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(ib_rereg_phys_mr);
-
 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
 {
 	return mr->device->query_mr ?
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 8f4a30fc..c642082 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -343,9 +343,8 @@
 	__set_bit(EV_FF, dev->evbit);
 
 	/* Copy "true" bits into ff device bitmap */
-	for (i = 0; i <= FF_MAX; i++)
-		if (test_bit(i, dev->ffbit))
-			__set_bit(i, ff->ffbit);
+	for_each_set_bit(i, dev->ffbit, FF_CNT)
+		__set_bit(i, ff->ffbit);
 
 	/* we can emulate RUMBLE with periodic effects */
 	if (test_bit(FF_PERIODIC, ff->ffbit))
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 78d2499..5391abd 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -674,13 +674,19 @@
  */
 static void input_dev_release_keys(struct input_dev *dev)
 {
+	bool need_sync = false;
 	int code;
 
 	if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
-		for_each_set_bit(code, dev->key, KEY_CNT)
+		for_each_set_bit(code, dev->key, KEY_CNT) {
 			input_pass_event(dev, EV_KEY, code, 0);
+			need_sync = true;
+		}
+
+		if (need_sync)
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+
 		memset(dev->key, 0, sizeof(dev->key));
-		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 1d247bc..6cb5a3e 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -859,12 +859,11 @@
 	joydev->handle.handler = handler;
 	joydev->handle.private = joydev;
 
-	for (i = 0; i < ABS_CNT; i++)
-		if (test_bit(i, dev->absbit)) {
-			joydev->absmap[i] = joydev->nabs;
-			joydev->abspam[joydev->nabs] = i;
-			joydev->nabs++;
-		}
+	for_each_set_bit(i, dev->absbit, ABS_CNT) {
+		joydev->absmap[i] = joydev->nabs;
+		joydev->abspam[joydev->nabs] = i;
+		joydev->nabs++;
+	}
 
 	for (i = BTN_JOYSTICK - BTN_MISC; i < KEY_MAX - BTN_MISC + 1; i++)
 		if (test_bit(i + BTN_MISC, dev->keybit)) {
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index 30af2e8..4a8258b 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -47,6 +47,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/bitrev.h>
 #include <linux/input.h>
 #include <linux/serio.h>
 
@@ -72,16 +73,6 @@
 	char phys[32];
 };
 
-
-/* bits in all incoming bytes needs to be "reversed" */
-static int zhenhua_bitreverse(int x)
-{
-	x = ((x & 0xaa) >> 1) | ((x & 0x55) << 1);
-	x = ((x & 0xcc) >> 2) | ((x & 0x33) << 2);
-	x = ((x & 0xf0) >> 4) | ((x & 0x0f) << 4);
-	return x;
-}
-
 /*
  * zhenhua_process_packet() decodes packets the driver receives from the
  * RC transmitter. It updates the data accordingly.
@@ -120,7 +111,7 @@
 		return IRQ_HANDLED;	/* wrong MSB -- ignore this byte */
 
 	if (zhenhua->idx < ZHENHUA_MAX_LENGTH)
-		zhenhua->data[zhenhua->idx++] = zhenhua_bitreverse(data);
+		zhenhua->data[zhenhua->idx++] = bitrev8(data);
 
 	if (zhenhua->idx == ZHENHUA_MAX_LENGTH) {
 		zhenhua_process_packet(zhenhua);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 82a8fb5..2e80107 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -187,7 +187,7 @@
 
 config KEYBOARD_GPIO
 	tristate "GPIO Buttons"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  This driver implements support for buttons connected
 	  to GPIO pins of various CPUs (and some other chips).
@@ -253,7 +253,7 @@
 
 config KEYBOARD_MATRIX
 	tristate "GPIO driven matrix keypad support"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_MATRIXKMAP
 	help
 	  Enable support for GPIO driven matrix keypad.
@@ -689,7 +689,7 @@
 config KEYBOARD_CROS_EC
 	tristate "ChromeOS EC keyboard"
 	select INPUT_MATRIXKMAP
-	depends on CROS_EC_PROTO
+	depends on MFD_CROS_EC
 	help
 	  Say Y here to enable the matrix keyboard used by ChromeOS devices
 	  and implemented on the ChromeOS EC. You must enable one bus option
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 6ed83cf..4d446d5 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -1097,7 +1097,6 @@
 static struct i2c_driver adp5589_driver = {
 	.driver = {
 		.name = KBUILD_MODNAME,
-		.owner = THIS_MODULE,
 		.pm = &adp5589_dev_pm_ops,
 	},
 	.probe = adp5589_probe,
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index f07461a..378db10 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/of_irq.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
@@ -47,6 +48,20 @@
 #define CAP11XX_REG_CONFIG2		0x44
 #define CAP11XX_REG_CONFIG2_ALT_POL	BIT(6)
 #define CAP11XX_REG_SENSOR_BASE_CNT(X)	(0x50 + (X))
+#define CAP11XX_REG_LED_POLARITY	0x73
+#define CAP11XX_REG_LED_OUTPUT_CONTROL	0x74
+
+#define CAP11XX_REG_LED_DUTY_CYCLE_1	0x90
+#define CAP11XX_REG_LED_DUTY_CYCLE_2	0x91
+#define CAP11XX_REG_LED_DUTY_CYCLE_3	0x92
+#define CAP11XX_REG_LED_DUTY_CYCLE_4	0x93
+
+#define CAP11XX_REG_LED_DUTY_MIN_MASK	(0x0f)
+#define CAP11XX_REG_LED_DUTY_MIN_MASK_SHIFT	(0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK	(0xf0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT	(4)
+#define CAP11XX_REG_LED_DUTY_MAX_VALUE	(15)
+
 #define CAP11XX_REG_SENSOR_CALIB	(0xb1 + (X))
 #define CAP11XX_REG_SENSOR_CALIB_LSB1	0xb9
 #define CAP11XX_REG_SENSOR_CALIB_LSB2	0xba
@@ -56,10 +71,23 @@
 
 #define CAP11XX_MANUFACTURER_ID	0x5d
 
+#ifdef CONFIG_LEDS_CLASS
+struct cap11xx_led {
+	struct cap11xx_priv *priv;
+	struct led_classdev cdev;
+	struct work_struct work;
+	u32 reg;
+	enum led_brightness new_brightness;
+};
+#endif
+
 struct cap11xx_priv {
 	struct regmap *regmap;
 	struct input_dev *idev;
 
+	struct cap11xx_led *leds;
+	int num_leds;
+
 	/* config */
 	u32 keycodes[];
 };
@@ -67,6 +95,7 @@
 struct cap11xx_hw_model {
 	u8 product_id;
 	unsigned int num_channels;
+	unsigned int num_leds;
 };
 
 enum {
@@ -76,9 +105,9 @@
 };
 
 static const struct cap11xx_hw_model cap11xx_devices[] = {
-	[CAP1106] = { .product_id = 0x55, .num_channels = 6 },
-	[CAP1126] = { .product_id = 0x53, .num_channels = 6 },
-	[CAP1188] = { .product_id = 0x50, .num_channels = 8 },
+	[CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0 },
+	[CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2 },
+	[CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8 },
 };
 
 static const struct reg_default cap11xx_reg_defaults[] = {
@@ -111,6 +140,7 @@
 	{ CAP11XX_REG_STANDBY_SENSITIVITY,	0x02 },
 	{ CAP11XX_REG_STANDBY_THRESH,		0x40 },
 	{ CAP11XX_REG_CONFIG2,			0x40 },
+	{ CAP11XX_REG_LED_POLARITY,		0x00 },
 	{ CAP11XX_REG_SENSOR_CALIB_LSB1,	0x00 },
 	{ CAP11XX_REG_SENSOR_CALIB_LSB2,	0x00 },
 };
@@ -177,6 +207,12 @@
 
 static int cap11xx_set_sleep(struct cap11xx_priv *priv, bool sleep)
 {
+	/*
+	 * DLSEEP mode will turn off all LEDS, prevent this
+	 */
+	if (IS_ENABLED(CONFIG_LEDS_CLASS) && priv->num_leds)
+		return 0;
+
 	return regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
 				  CAP11XX_REG_MAIN_CONTROL_DLSEEP,
 				  sleep ? CAP11XX_REG_MAIN_CONTROL_DLSEEP : 0);
@@ -196,6 +232,104 @@
 	cap11xx_set_sleep(priv, true);
 }
 
+#ifdef CONFIG_LEDS_CLASS
+static void cap11xx_led_work(struct work_struct *work)
+{
+	struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
+	struct cap11xx_priv *priv = led->priv;
+	int value = led->new_brightness;
+
+	/*
+	 * All LEDs share the same duty cycle as this is a HW limitation.
+	 * Brightness levels per LED are either 0 (OFF) and 1 (ON).
+	 */
+	regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
+				BIT(led->reg), value ? BIT(led->reg) : 0);
+}
+
+static void cap11xx_led_set(struct led_classdev *cdev,
+			   enum led_brightness value)
+{
+	struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
+
+	if (led->new_brightness == value)
+		return;
+
+	led->new_brightness = value;
+	schedule_work(&led->work);
+}
+
+static int cap11xx_init_leds(struct device *dev,
+			     struct cap11xx_priv *priv, int num_leds)
+{
+	struct device_node *node = dev->of_node, *child;
+	struct cap11xx_led *led;
+	int cnt = of_get_child_count(node);
+	int error;
+
+	if (!num_leds || !cnt)
+		return 0;
+
+	if (cnt > num_leds)
+		return -EINVAL;
+
+	led = devm_kcalloc(dev, cnt, sizeof(struct cap11xx_led), GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	priv->leds = led;
+
+	error = regmap_update_bits(priv->regmap,
+				CAP11XX_REG_LED_OUTPUT_CONTROL, 0xff, 0);
+	if (error)
+		return error;
+
+	error = regmap_update_bits(priv->regmap, CAP11XX_REG_LED_DUTY_CYCLE_4,
+				CAP11XX_REG_LED_DUTY_MAX_MASK,
+				CAP11XX_REG_LED_DUTY_MAX_VALUE <<
+				CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT);
+	if (error)
+		return error;
+
+	for_each_child_of_node(node, child) {
+		u32 reg;
+
+		led->cdev.name =
+			of_get_property(child, "label", NULL) ? : child->name;
+		led->cdev.default_trigger =
+			of_get_property(child, "linux,default-trigger", NULL);
+		led->cdev.flags = 0;
+		led->cdev.brightness_set = cap11xx_led_set;
+		led->cdev.max_brightness = 1;
+		led->cdev.brightness = LED_OFF;
+
+		error = of_property_read_u32(child, "reg", &reg);
+		if (error != 0 || reg >= num_leds)
+			return -EINVAL;
+
+		led->reg = reg;
+		led->priv = priv;
+
+		INIT_WORK(&led->work, cap11xx_led_work);
+
+		error = devm_led_classdev_register(dev, &led->cdev);
+		if (error)
+			return error;
+
+		priv->num_leds++;
+		led++;
+	}
+
+	return 0;
+}
+#else
+static int cap11xx_init_leds(struct device *dev,
+			     struct cap11xx_priv *priv, int num_leds)
+{
+	return 0;
+}
+#endif
+
 static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
 			     const struct i2c_device_id *id)
 {
@@ -316,6 +450,10 @@
 	priv->idev->open = cap11xx_input_open;
 	priv->idev->close = cap11xx_input_close;
 
+	error = cap11xx_init_leds(dev, priv, cap->num_leds);
+	if (error)
+		return error;
+
 	input_set_drvdata(priv->idev, priv);
 
 	/*
@@ -361,7 +499,6 @@
 static struct i2c_driver cap11xx_i2c_driver = {
 	.driver = {
 		.name	= "cap11xx",
-		.owner	= THIS_MODULE,
 		.of_match_table = cap11xx_dt_ids,
 	},
 	.id_table	= cap11xx_i2c_ids,
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ddf4045..9d517ca 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -239,6 +239,11 @@
 		}
 	}
 
+	if (i == ddata->pdata->nbuttons) {
+		error = -EINVAL;
+		goto out;
+	}
+
 	mutex_lock(&ddata->disable_lock);
 
 	for (i = 0; i < ddata->pdata->nbuttons; i++) {
@@ -655,7 +660,9 @@
 		if (of_property_read_u32(pp, "linux,input-type", &button->type))
 			button->type = EV_KEY;
 
-		button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+		button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
+				 /* legacy name */
+				 of_property_read_bool(pp, "gpio-key,wakeup");
 
 		button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
 
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index c6dc644..870cfa6 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -152,7 +152,10 @@
 					     &button->type))
 			button->type = EV_KEY;
 
-		button->wakeup = fwnode_property_present(child, "gpio-key,wakeup");
+		button->wakeup =
+			fwnode_property_read_bool(child, "wakeup-source") ||
+			/* legacy name */
+			fwnode_property_read_bool(child, "gpio-key,wakeup");
 
 		if (fwnode_property_read_u32(child, "debounce-interval",
 					     &button->debounce_interval))
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 0ad422b..c717e8f 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -223,7 +223,6 @@
 static struct i2c_driver lm8333_driver = {
 	.driver = {
 		.name		= "lm8333",
-		.owner		= THIS_MODULE,
 	},
 	.probe		= lm8333_probe,
 	.remove		= lm8333_remove,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index b370a59..7f12b65 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -425,8 +425,10 @@
 
 	if (of_get_property(np, "linux,no-autorepeat", NULL))
 		pdata->no_autorepeat = true;
-	if (of_get_property(np, "linux,wakeup", NULL))
-		pdata->wakeup = true;
+
+	pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+			of_property_read_bool(np, "linux,wakeup"); /* legacy */
+
 	if (of_get_property(np, "gpio-activelow", NULL))
 		pdata->active_low = true;
 
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 375b05c..31090d7 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -265,7 +265,6 @@
 static struct i2c_driver mcs_touchkey_driver = {
 	.driver = {
 		.name	= "mcs_touchkey",
-		.owner	= THIS_MODULE,
 		.pm	= &mcs_touchkey_pm_ops,
 	},
 	.probe		= mcs_touchkey_probe,
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 3aa2ec4..0fd612d 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -305,7 +305,6 @@
 static struct i2c_driver mpr_touchkey_driver = {
 	.driver = {
 		.name	= "mpr121",
-		.owner	= THIS_MODULE,
 		.pm	= &mpr121_touchkey_pm_ops,
 	},
 	.id_table	= mpr121_id,
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 32580af..5c68e3f 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -507,6 +507,7 @@
  */
 static int pmic8xxx_kp_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
 	unsigned int rows, cols;
 	bool repeat;
 	bool wakeup;
@@ -524,10 +525,11 @@
 		return -EINVAL;
 	}
 
-	repeat = !of_property_read_bool(pdev->dev.of_node,
-					"linux,input-no-autorepeat");
-	wakeup = of_property_read_bool(pdev->dev.of_node,
-					"linux,keypad-wakeup");
+	repeat = !of_property_read_bool(np, "linux,input-no-autorepeat");
+
+	wakeup = of_property_read_bool(np, "wakeup-source") ||
+		 /* legacy name */
+		 of_property_read_bool(np, "linux,keypad-wakeup");
 
 	kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
 	if (!kp)
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 52cd6e8..5a57787 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -277,7 +277,6 @@
 static struct i2c_driver qt1070_driver = {
 	.driver	= {
 		.name	= "qt1070",
-		.owner	= THIS_MODULE,
 		.pm	= &qt1070_pm_ops,
 	},
 	.id_table	= qt1070_id,
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 819b228..43b8648 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -497,7 +497,6 @@
 static struct i2c_driver qt2160_driver = {
 	.driver = {
 		.name	= "qt2160",
-		.owner  = THIS_MODULE,
 	},
 
 	.id_table	= qt2160_idtable,
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 43e48dac..4e319eb 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -299,8 +299,10 @@
 	if (of_get_property(np, "linux,input-no-autorepeat", NULL))
 		pdata->no_autorepeat = true;
 
-	if (of_get_property(np, "linux,input-wakeup", NULL))
-		pdata->wakeup = true;
+	pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+			/* legacy name */
+			of_property_read_bool(np, "linux,input-wakeup");
+
 
 	return pdata;
 }
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 31c606a..e92dfd8 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -17,6 +17,7 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/mfd/tc3589x.h>
+#include <linux/device.h>
 
 /* Maximum supported keypad matrix row/columns size */
 #define TC3589x_MAX_KPROW               8
@@ -352,7 +353,10 @@
 	}
 
 	plat->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat");
-	plat->enable_wakeup = of_property_read_bool(np, "linux,wakeup");
+
+	plat->enable_wakeup = of_property_read_bool(np, "wakeup-source") ||
+			      /* legacy name */
+			      of_property_read_bool(np, "linux,wakeup");
 
 	/* The custom delay format is ms/16 */
 	of_property_read_u32(np, "debounce-delay-ms", &debounce_ms);
@@ -386,12 +390,15 @@
 	if (irq < 0)
 		return irq;
 
-	keypad = kzalloc(sizeof(struct tc_keypad), GFP_KERNEL);
-	input = input_allocate_device();
-	if (!keypad || !input) {
-		dev_err(&pdev->dev, "failed to allocate keypad memory\n");
-		error = -ENOMEM;
-		goto err_free_mem;
+	keypad = devm_kzalloc(&pdev->dev, sizeof(struct tc_keypad),
+			      GFP_KERNEL);
+	if (!keypad)
+		return -ENOMEM;
+
+	input = devm_input_allocate_device(&pdev->dev);
+	if (!input) {
+		dev_err(&pdev->dev, "failed to allocate input device\n");
+		return -ENOMEM;
 	}
 
 	keypad->board = plat;
@@ -410,7 +417,7 @@
 					   NULL, input);
 	if (error) {
 		dev_err(&pdev->dev, "Failed to build keymap\n");
-		goto err_free_mem;
+		return error;
 	}
 
 	keypad->keymap = input->keycode;
@@ -421,20 +428,23 @@
 
 	input_set_drvdata(input, keypad);
 
-	error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
-				     plat->irqtype | IRQF_ONESHOT,
-				     "tc3589x-keypad", keypad);
-	if (error < 0) {
+	tc3589x_keypad_disable(keypad);
+
+	error = devm_request_threaded_irq(&pdev->dev, irq,
+					  NULL, tc3589x_keypad_irq,
+					  plat->irqtype | IRQF_ONESHOT,
+					  "tc3589x-keypad", keypad);
+	if (error) {
 		dev_err(&pdev->dev,
 				"Could not allocate irq %d,error %d\n",
 				irq, error);
-		goto err_free_mem;
+		return error;
 	}
 
 	error = input_register_device(input);
 	if (error) {
 		dev_err(&pdev->dev, "Could not register input device\n");
-		goto err_free_irq;
+		return error;
 	}
 
 	/* let platform decide if keypad is a wakeup source or not */
@@ -444,30 +454,6 @@
 	platform_set_drvdata(pdev, keypad);
 
 	return 0;
-
-err_free_irq:
-	free_irq(irq, keypad);
-err_free_mem:
-	input_free_device(input);
-	kfree(keypad);
-	return error;
-}
-
-static int tc3589x_keypad_remove(struct platform_device *pdev)
-{
-	struct tc_keypad *keypad = platform_get_drvdata(pdev);
-	int irq = platform_get_irq(pdev, 0);
-
-	if (!keypad->keypad_stopped)
-		tc3589x_keypad_disable(keypad);
-
-	free_irq(irq, keypad);
-
-	input_unregister_device(keypad->input);
-
-	kfree(keypad);
-
-	return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -518,7 +504,6 @@
 		.pm	= &tc3589x_keypad_dev_pm_ops,
 	},
 	.probe	= tc3589x_keypad_probe,
-	.remove	= tc3589x_keypad_remove,
 };
 module_platform_driver(tc3589x_keypad_driver);
 
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 4e491c1..9002298 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -404,7 +404,6 @@
 static struct i2c_driver tca8418_keypad_driver = {
 	.driver = {
 		.name	= TCA8418_NAME,
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tca8418_dt_ids),
 	},
 	.probe		= tca8418_keypad_probe,
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c41dec8..906dd1b 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -247,7 +247,7 @@
 config INPUT_GP2A
 	tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
 	  hooked to an I2C bus.
@@ -257,7 +257,7 @@
 
 config INPUT_GPIO_BEEPER
 	tristate "Generic GPIO Beeper support"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a beeper connected to a GPIO pin.
 
@@ -266,7 +266,7 @@
 
 config INPUT_GPIO_TILT_POLLED
 	tristate "Polled GPIO tilt switch"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_POLLDEV
 	help
 	  This driver implements support for tilt switches connected
@@ -557,7 +557,7 @@
 
 config INPUT_GPIO_ROTARY_ENCODER
 	tristate "Rotary encoders connected to GPIO pins"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here to add support for rotary encoders connected to GPIO lines.
 	  Check file:Documentation/input/rotary-encoder.txt for more
@@ -764,7 +764,8 @@
 
 config INPUT_DRV260X_HAPTICS
 	tristate "TI DRV260X haptics support"
-	depends on INPUT && I2C && GPIOLIB
+	depends on INPUT && I2C
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_FF_MEMLESS
 	select REGMAP_I2C
 	help
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index bdb5d03..a8b0a2e 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -158,7 +158,6 @@
 static struct i2c_driver adxl34x_driver = {
 	.driver = {
 		.name = "adxl34x",
-		.owner = THIS_MODULE,
 		.pm = &adxl34x_i2c_pm,
 		.of_match_table = of_match_ptr(adxl34x_of_id),
 	},
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4dbbed7..4bf6785 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -170,8 +170,8 @@
 
 	INIT_WORK(&haptics->work, arizona_haptics_work);
 
-	haptics->input_dev = input_allocate_device();
-	if (haptics->input_dev == NULL) {
+	haptics->input_dev = devm_input_allocate_device(&pdev->dev);
+	if (!haptics->input_dev) {
 		dev_err(arizona->dev, "Failed to allocate input device\n");
 		return -ENOMEM;
 	}
@@ -188,41 +188,23 @@
 	if (ret < 0) {
 		dev_err(arizona->dev, "input_ff_create_memless() failed: %d\n",
 			ret);
-		goto err_ialloc;
+		return ret;
 	}
 
 	ret = input_register_device(haptics->input_dev);
 	if (ret < 0) {
 		dev_err(arizona->dev, "couldn't register input device: %d\n",
 			ret);
-		goto err_iff;
+		return ret;
 	}
 
 	platform_set_drvdata(pdev, haptics);
 
 	return 0;
-
-err_iff:
-	if (haptics->input_dev)
-		input_ff_destroy(haptics->input_dev);
-err_ialloc:
-	input_free_device(haptics->input_dev);
-
-	return ret;
-}
-
-static int arizona_haptics_remove(struct platform_device *pdev)
-{
-	struct arizona_haptics *haptics = platform_get_drvdata(pdev);
-
-	input_unregister_device(haptics->input_dev);
-
-	return 0;
 }
 
 static struct platform_driver arizona_haptics_driver = {
 	.probe		= arizona_haptics_probe,
-	.remove		= arizona_haptics_remove,
 	.driver		= {
 		.name	= "arizona-haptics",
 	},
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b36831c..1d0e61d 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -333,10 +333,9 @@
 	y = ((0xc0 & data[2]) >> 6) | (data[3] << 2);
 	z = ((0xc0 & data[4]) >> 6) | (data[5] << 2);
 
-	/* sign extension */
-	x = (s16) (x << 6) >> 6;
-	y = (s16) (y << 6) >> 6;
-	z = (s16) (z << 6) >> 6;
+	x = sign_extend32(x, 9);
+	y = sign_extend32(y, 9);
+	z = sign_extend32(z, 9);
 
 	input_report_abs(bma150->input, ABS_X, x);
 	input_report_abs(bma150->input, ABS_Y, y);
@@ -654,7 +653,6 @@
 
 static struct i2c_driver bma150_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= BMA150_DRIVER,
 		.pm	= &bma150_pm,
 	},
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index 4fdef98..c702191 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -118,7 +118,6 @@
 	.id_table	= cma3000_i2c_id,
 	.driver = {
 		.name	= "cma3000_i2c_accl",
-		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &cma3000_i2c_pm_ops,
 #endif
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index e5d60ec..2adfd86c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -204,7 +204,7 @@
 	int overdrive_voltage;
 };
 
-static struct reg_default drv260x_reg_defs[] = {
+static const struct reg_default drv260x_reg_defs[] = {
 	{ DRV260X_STATUS, 0xe0 },
 	{ DRV260X_MODE, 0x40 },
 	{ DRV260X_RT_PB_IN, 0x00 },
@@ -313,14 +313,14 @@
 	gpiod_set_value(haptics->enable_gpio, 0);
 }
 
-static const struct reg_default drv260x_lra_cal_regs[] = {
+static const struct reg_sequence drv260x_lra_cal_regs[] = {
 	{ DRV260X_MODE, DRV260X_AUTO_CAL },
 	{ DRV260X_CTRL3, DRV260X_NG_THRESH_2 },
 	{ DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
 		DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
 };
 
-static const struct reg_default drv260x_lra_init_regs[] = {
+static const struct reg_sequence drv260x_lra_init_regs[] = {
 	{ DRV260X_MODE, DRV260X_RT_PLAYBACK },
 	{ DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS |
 		DRV260X_AUDIO_HAPTICS_FILTER_125HZ },
@@ -337,7 +337,7 @@
 	{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
 };
 
-static const struct reg_default drv260x_erm_cal_regs[] = {
+static const struct reg_sequence drv260x_erm_cal_regs[] = {
 	{ DRV260X_MODE, DRV260X_AUTO_CAL },
 	{ DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
 	{ DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
@@ -720,7 +720,6 @@
 	.probe		= drv260x_probe,
 	.driver		= {
 		.name	= "drv260x-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv260x_of_match),
 		.pm	= &drv260x_pm_ops,
 	},
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index 0afaa33..ef9bc12 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -74,7 +74,7 @@
 	0x9b, 0x9f, 0xa5, 0xad, 0xb8, 0xc4, 0xd2, 0xe0, 0xf0, 0x00,
 };
 
-static struct reg_default drv2665_reg_defs[] = {
+static const struct reg_default drv2665_reg_defs[] = {
 	{ DRV2665_STATUS, 0x02 },
 	{ DRV2665_CTRL_1, 0x28 },
 	{ DRV2665_CTRL_2, 0x40 },
@@ -132,7 +132,7 @@
 			"Failed to enter standby mode: %d\n", error);
 }
 
-static const struct reg_default drv2665_init_regs[] = {
+static const struct reg_sequence drv2665_init_regs[] = {
 	{ DRV2665_CTRL_2, 0 | DRV2665_10_MS_IDLE_TOUT },
 	{ DRV2665_CTRL_1, DRV2665_25_VPP_GAIN },
 };
@@ -309,7 +309,6 @@
 	.probe		= drv2665_probe,
 	.driver		= {
 		.name	= "drv2665-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv2665_of_match),
 		.pm	= &drv2665_pm_ops,
 	},
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index fc0fddf..d5ba748 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -116,7 +116,7 @@
 	u32 frequency;
 };
 
-static struct reg_default drv2667_reg_defs[] = {
+static const struct reg_default drv2667_reg_defs[] = {
 	{ DRV2667_STATUS, 0x02 },
 	{ DRV2667_CTRL_1, 0x28 },
 	{ DRV2667_CTRL_2, 0x40 },
@@ -262,14 +262,14 @@
 			"Failed to enter standby mode: %d\n", error);
 }
 
-static const struct reg_default drv2667_init_regs[] = {
+static const struct reg_sequence drv2667_init_regs[] = {
 	{ DRV2667_CTRL_2, 0 },
 	{ DRV2667_CTRL_1, DRV2667_25_VPP_GAIN },
 	{ DRV2667_WV_SEQ_0, 1 },
 	{ DRV2667_WV_SEQ_1, 0 }
 };
 
-static const struct reg_default drv2667_page1_init[] = {
+static const struct reg_sequence drv2667_page1_init[] = {
 	{ DRV2667_RAM_HDR_SZ, 0x05 },
 	{ DRV2667_RAM_START_HI, 0x80 },
 	{ DRV2667_RAM_START_LO, 0x06 },
@@ -484,7 +484,6 @@
 	.probe		= drv2667_probe,
 	.driver		= {
 		.name	= "drv2667-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv2667_of_match),
 		.pm	= &drv2667_pm_ops,
 	},
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index 0ac176d..3bfdfcc 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -267,11 +267,11 @@
 	{ GP2A_I2C_NAME, 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id);
 
 static struct i2c_driver gp2a_i2c_driver = {
 	.driver = {
 		.name	= GP2A_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &gp2a_pm,
 	},
 	.probe		= gp2a_probe,
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index 6e29349..e058d71 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -658,7 +658,6 @@
 static struct i2c_driver kxtj9_driver = {
 	.driver = {
 		.name	= NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &kxtj9_pm_ops,
 	},
 	.probe		= kxtj9_probe,
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index d0f6872..a806ba3 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -394,7 +394,7 @@
 	{ "max8997-haptic", 0 },
 	{ },
 };
-MODULE_DEVICE_TABLE(i2c, max8997_haptic_id);
+MODULE_DEVICE_TABLE(platform, max8997_haptic_id);
 
 static struct platform_driver max8997_haptic_driver = {
 	.driver	= {
@@ -407,7 +407,6 @@
 };
 module_platform_driver(max8997_haptic_driver);
 
-MODULE_ALIAS("platform:max8997-haptic");
 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
 MODULE_DESCRIPTION("max8997_haptic driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 5e50513..f088db3 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -466,7 +466,6 @@
 static struct i2c_driver mpu3050_i2c_driver = {
 	.driver	= {
 		.name	= "mpu3050",
-		.owner	= THIS_MODULE,
 		.pm	= &mpu3050_pm,
 		.of_match_table = mpu3050_of_match,
 	},
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 97f711a..4abdf1e 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -208,7 +208,6 @@
 static struct i2c_driver pcf8574_kp_driver = {
 	.driver = {
 		.name  = DRV_NAME,
-		.owner = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm = &pcf8574_kp_pm_ops,
 #endif
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index c4ca20e..3f02e0e 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -20,17 +20,72 @@
 #include <linux/regmap.h>
 #include <linux/log2.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 
 #define PON_CNTL_1 0x1C
 #define PON_CNTL_PULL_UP BIT(7)
 #define PON_CNTL_TRIG_DELAY_MASK (0x7)
+#define PON_CNTL_1_PULL_UP_EN			0xe0
+#define PON_CNTL_1_USB_PWR_EN			0x10
+#define PON_CNTL_1_WD_EN_RESET			0x08
+
+#define PM8058_SLEEP_CTRL			0x02b
+#define PM8921_SLEEP_CTRL			0x10a
+
+#define SLEEP_CTRL_SMPL_EN_RESET		0x04
+
+/* Regulator master enable addresses */
+#define REG_PM8058_VREG_EN_MSM			0x018
+#define REG_PM8058_VREG_EN_GRP_5_4		0x1c8
+
+/* Regulator control registers for shutdown/reset */
+#define PM8058_S0_CTRL				0x004
+#define PM8058_S1_CTRL				0x005
+#define PM8058_S3_CTRL				0x111
+#define PM8058_L21_CTRL				0x120
+#define PM8058_L22_CTRL				0x121
+
+#define PM8058_REGULATOR_ENABLE_MASK		0x80
+#define PM8058_REGULATOR_ENABLE			0x80
+#define PM8058_REGULATOR_DISABLE		0x00
+#define PM8058_REGULATOR_PULL_DOWN_MASK		0x40
+#define PM8058_REGULATOR_PULL_DOWN_EN		0x40
+
+/* Buck CTRL register */
+#define PM8058_SMPS_LEGACY_VREF_SEL		0x20
+#define PM8058_SMPS_LEGACY_VPROG_MASK		0x1f
+#define PM8058_SMPS_ADVANCED_BAND_MASK		0xC0
+#define PM8058_SMPS_ADVANCED_BAND_SHIFT		6
+#define PM8058_SMPS_ADVANCED_VPROG_MASK		0x3f
+
+/* Buck TEST2 registers for shutdown/reset */
+#define PM8058_S0_TEST2				0x084
+#define PM8058_S1_TEST2				0x085
+#define PM8058_S3_TEST2				0x11a
+
+#define PM8058_REGULATOR_BANK_WRITE		0x80
+#define PM8058_REGULATOR_BANK_MASK		0x70
+#define PM8058_REGULATOR_BANK_SHIFT		4
+#define PM8058_REGULATOR_BANK_SEL(n)	((n) << PM8058_REGULATOR_BANK_SHIFT)
+
+/* Buck TEST2 register bank 1 */
+#define PM8058_SMPS_LEGACY_VLOW_SEL		0x01
+
+/* Buck TEST2 register bank 7 */
+#define PM8058_SMPS_ADVANCED_MODE_MASK		0x02
+#define PM8058_SMPS_ADVANCED_MODE		0x02
+#define PM8058_SMPS_LEGACY_MODE			0x00
 
 /**
  * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
  * @key_press_irq: key press irq number
+ * @regmap: device regmap
+ * @shutdown_fn: shutdown configuration function
  */
 struct pmic8xxx_pwrkey {
 	int key_press_irq;
+	struct regmap *regmap;
+	int (*shutdown_fn)(struct pmic8xxx_pwrkey *, bool);
 };
 
 static irqreturn_t pwrkey_press_irq(int irq, void *_pwr)
@@ -76,6 +131,212 @@
 static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
 		pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
 
+static void pmic8xxx_pwrkey_shutdown(struct platform_device *pdev)
+{
+	struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
+	int error;
+	u8 mask, val;
+	bool reset = system_state == SYSTEM_RESTART;
+
+	if (pwrkey->shutdown_fn) {
+		error = pwrkey->shutdown_fn(pwrkey, reset);
+		if (error)
+			return;
+	}
+
+	/*
+	 * Select action to perform (reset or shutdown) when PS_HOLD goes low.
+	 * Also ensure that KPD, CBL0, and CBL1 pull ups are enabled and that
+	 * USB charging is enabled.
+	 */
+	mask = PON_CNTL_1_PULL_UP_EN | PON_CNTL_1_USB_PWR_EN;
+	mask |= PON_CNTL_1_WD_EN_RESET;
+	val = mask;
+	if (!reset)
+		val &= ~PON_CNTL_1_WD_EN_RESET;
+
+	regmap_update_bits(pwrkey->regmap, PON_CNTL_1, mask, val);
+}
+
+/*
+ * Set an SMPS regulator to be disabled in its CTRL register, but enabled
+ * in the master enable register.  Also set it's pull down enable bit.
+ * Take care to make sure that the output voltage doesn't change if switching
+ * from advanced mode to legacy mode.
+ */
+static int pm8058_disable_smps_locally_set_pull_down(struct regmap *regmap,
+	u16 ctrl_addr, u16 test2_addr, u16 master_enable_addr,
+	u8 master_enable_bit)
+{
+	int error;
+	u8 vref_sel, vlow_sel, band, vprog, bank;
+	unsigned int reg;
+
+	bank = PM8058_REGULATOR_BANK_SEL(7);
+	error = regmap_write(regmap, test2_addr, bank);
+	if (error)
+		return error;
+
+	error = regmap_read(regmap, test2_addr, &reg);
+	if (error)
+		return error;
+
+	reg &= PM8058_SMPS_ADVANCED_MODE_MASK;
+	/* Check if in advanced mode. */
+	if (reg == PM8058_SMPS_ADVANCED_MODE) {
+		/* Determine current output voltage. */
+		error = regmap_read(regmap, ctrl_addr, &reg);
+		if (error)
+			return error;
+
+		band = reg & PM8058_SMPS_ADVANCED_BAND_MASK;
+		band >>= PM8058_SMPS_ADVANCED_BAND_SHIFT;
+		switch (band) {
+		case 3:
+			vref_sel = 0;
+			vlow_sel = 0;
+			break;
+		case 2:
+			vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+			vlow_sel = 0;
+			break;
+		case 1:
+			vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+			vlow_sel = PM8058_SMPS_LEGACY_VLOW_SEL;
+			break;
+		default:
+			pr_err("%s: regulator already disabled\n", __func__);
+			return -EPERM;
+		}
+		vprog = reg & PM8058_SMPS_ADVANCED_VPROG_MASK;
+		/* Round up if fine step is in use. */
+		vprog = (vprog + 1) >> 1;
+		if (vprog > PM8058_SMPS_LEGACY_VPROG_MASK)
+			vprog = PM8058_SMPS_LEGACY_VPROG_MASK;
+
+		/* Set VLOW_SEL bit. */
+		bank = PM8058_REGULATOR_BANK_SEL(1);
+		error = regmap_write(regmap, test2_addr, bank);
+		if (error)
+			return error;
+
+		error = regmap_update_bits(regmap, test2_addr,
+			PM8058_REGULATOR_BANK_WRITE | PM8058_REGULATOR_BANK_MASK
+				| PM8058_SMPS_LEGACY_VLOW_SEL,
+			PM8058_REGULATOR_BANK_WRITE |
+			PM8058_REGULATOR_BANK_SEL(1) | vlow_sel);
+		if (error)
+			return error;
+
+		/* Switch to legacy mode */
+		bank = PM8058_REGULATOR_BANK_SEL(7);
+		error = regmap_write(regmap, test2_addr, bank);
+		if (error)
+			return error;
+
+		error = regmap_update_bits(regmap, test2_addr,
+				PM8058_REGULATOR_BANK_WRITE |
+				PM8058_REGULATOR_BANK_MASK |
+				PM8058_SMPS_ADVANCED_MODE_MASK,
+				PM8058_REGULATOR_BANK_WRITE |
+				PM8058_REGULATOR_BANK_SEL(7) |
+				PM8058_SMPS_LEGACY_MODE);
+		if (error)
+			return error;
+
+		/* Enable locally, enable pull down, keep voltage the same. */
+		error = regmap_update_bits(regmap, ctrl_addr,
+			PM8058_REGULATOR_ENABLE_MASK |
+			PM8058_REGULATOR_PULL_DOWN_MASK |
+			PM8058_SMPS_LEGACY_VREF_SEL |
+			PM8058_SMPS_LEGACY_VPROG_MASK,
+			PM8058_REGULATOR_ENABLE | PM8058_REGULATOR_PULL_DOWN_EN
+				| vref_sel | vprog);
+		if (error)
+			return error;
+	}
+
+	/* Enable in master control register. */
+	error = regmap_update_bits(regmap, master_enable_addr,
+			master_enable_bit, master_enable_bit);
+	if (error)
+		return error;
+
+	/* Disable locally and enable pull down. */
+	return regmap_update_bits(regmap, ctrl_addr,
+		PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+		PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_disable_ldo_locally_set_pull_down(struct regmap *regmap,
+		u16 ctrl_addr, u16 master_enable_addr, u8 master_enable_bit)
+{
+	int error;
+
+	/* Enable LDO in master control register. */
+	error = regmap_update_bits(regmap, master_enable_addr,
+			master_enable_bit, master_enable_bit);
+	if (error)
+		return error;
+
+	/* Disable LDO in CTRL register and set pull down */
+	return regmap_update_bits(regmap, ctrl_addr,
+		PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+		PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+	int error;
+	struct regmap *regmap = pwrkey->regmap;
+	u8 mask, val;
+
+	/* When shutting down, enable active pulldowns on important rails. */
+	if (!reset) {
+		/* Disable SMPS's 0,1,3 locally and set pulldown enable bits. */
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S0_CTRL, PM8058_S0_TEST2,
+			REG_PM8058_VREG_EN_MSM, BIT(7));
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S1_CTRL, PM8058_S1_TEST2,
+			REG_PM8058_VREG_EN_MSM, BIT(6));
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S3_CTRL, PM8058_S3_TEST2,
+			REG_PM8058_VREG_EN_GRP_5_4, BIT(7) | BIT(4));
+		/* Disable LDO 21 locally and set pulldown enable bit. */
+		pm8058_disable_ldo_locally_set_pull_down(regmap,
+			PM8058_L21_CTRL, REG_PM8058_VREG_EN_GRP_5_4,
+			BIT(1));
+	}
+
+	/*
+	 * Fix-up: Set regulator LDO22 to 1.225 V in high power mode. Leave its
+	 * pull-down state intact. This ensures a safe shutdown.
+	 */
+	error = regmap_update_bits(regmap, PM8058_L22_CTRL, 0xbf, 0x93);
+	if (error)
+		return error;
+
+	/* Enable SMPL if resetting is desired */
+	mask = SLEEP_CTRL_SMPL_EN_RESET;
+	val = 0;
+	if (reset)
+		val = mask;
+	return regmap_update_bits(regmap, PM8058_SLEEP_CTRL, mask, val);
+}
+
+static int pm8921_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+	struct regmap *regmap = pwrkey->regmap;
+	u8 mask = SLEEP_CTRL_SMPL_EN_RESET;
+	u8 val = 0;
+
+	/* Enable SMPL if resetting is desired */
+	if (reset)
+		val = mask;
+	return regmap_update_bits(regmap, PM8921_SLEEP_CTRL, mask, val);
+}
+
 static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
 {
 	struct input_dev *pwr;
@@ -109,6 +370,8 @@
 	if (!pwrkey)
 		return -ENOMEM;
 
+	pwrkey->shutdown_fn = of_device_get_match_data(&pdev->dev);
+	pwrkey->regmap = regmap;
 	pwrkey->key_press_irq = key_press_irq;
 
 	pwr = devm_input_allocate_device(&pdev->dev);
@@ -182,8 +445,8 @@
 }
 
 static const struct of_device_id pm8xxx_pwr_key_id_table[] = {
-	{ .compatible = "qcom,pm8058-pwrkey" },
-	{ .compatible = "qcom,pm8921-pwrkey" },
+	{ .compatible = "qcom,pm8058-pwrkey", .data = &pm8058_pwrkey_shutdown },
+	{ .compatible = "qcom,pm8921-pwrkey", .data = &pm8921_pwrkey_shutdown },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, pm8xxx_pwr_key_id_table);
@@ -191,6 +454,7 @@
 static struct platform_driver pmic8xxx_pwrkey_driver = {
 	.probe		= pmic8xxx_pwrkey_probe,
 	.remove		= pmic8xxx_pwrkey_remove,
+	.shutdown	= pmic8xxx_pwrkey_shutdown,
 	.driver		= {
 		.name	= "pm8xxx-pwrkey",
 		.pm	= &pm8xxx_pwr_key_pm_ops,
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index e956e81..62c5814 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -7,6 +7,7 @@
 #include <linux/input-polldev.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/gpio.h>
 
 #include <asm/mach-rc32434/gpio.h>
 #include <asm/mach-rc32434/rb.h>
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 421e29e..345df9b 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -320,10 +320,8 @@
 	 * Check if absmin/absmax/absfuzz/absflat are sane.
 	 */
 
-	for (cnt = 0; cnt < ABS_CNT; cnt++) {
+	for_each_set_bit(cnt, dev->absbit, ABS_CNT) {
 		int min, max;
-		if (!test_bit(cnt, dev->absbit))
-			continue;
 
 		min = input_abs_get_min(dev, cnt);
 		max = input_abs_get_max(dev, cnt);
@@ -416,7 +414,7 @@
 	dev->id.product	= user_dev->id.product;
 	dev->id.version	= user_dev->id.version;
 
-	for (i = 0; i < ABS_CNT; i++) {
+	for_each_set_bit(i, dev->absbit, ABS_CNT) {
 		input_abs_set_max(dev, i, user_dev->absmax[i]);
 		input_abs_set_min(dev, i, user_dev->absmin[i]);
 		input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index d7820d1..17f97e5 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -341,7 +341,7 @@
 
 config MOUSE_GPIO
 	tristate "GPIO mouse"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_POLLDEV
 	help
 	  This driver simulates a mouse on GPIO lines of various CPUs (and some
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 793300b..ee6a6e9 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -24,7 +24,7 @@
 obj-$(CONFIG_MOUSE_SYNAPTICS_USB)	+= synaptics_usb.o
 obj-$(CONFIG_MOUSE_VSXXXAA)		+= vsxxxaa.o
 
-cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o
+cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o cyapa_gen6.o
 psmouse-objs := psmouse-base.o synaptics.o focaltech.o
 
 psmouse-$(CONFIG_MOUSE_PS2_ALPS)	+= alps.o
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index efe1484..eb76b61 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -6,7 +6,7 @@
  *   Daniel Kurtz <djkurtz@chromium.org>
  *   Benson Leung <bleung@chromium.org>
  *
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
  * Copyright (C) 2011-2012 Google, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -21,10 +21,12 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
 #include <linux/acpi.h>
+#include <linux/of.h>
 #include "cyapa.h"
 
 
@@ -39,11 +41,33 @@
 
 static int cyapa_reinitialize(struct cyapa *cyapa);
 
-static inline bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa)
 {
+	if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_BL)
+		return true;
+
 	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_BL)
 		return true;
 
+	return false;
+}
+
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa)
+{
+	if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_APP)
+		return true;
+
+	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+		return true;
+
+	return false;
+}
+
+static bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+{
+	if (cyapa_is_pip_bl_mode(cyapa))
+		return true;
+
 	if (cyapa->gen == CYAPA_GEN3 &&
 		cyapa->state >= CYAPA_STATE_BL_BUSY &&
 		cyapa->state <= CYAPA_STATE_BL_ACTIVE)
@@ -54,7 +78,7 @@
 
 static inline bool cyapa_is_operational_mode(struct cyapa *cyapa)
 {
-	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+	if (cyapa_is_pip_app_mode(cyapa))
 		return true;
 
 	if (cyapa->gen == CYAPA_GEN3 && cyapa->state == CYAPA_STATE_OP)
@@ -188,6 +212,15 @@
 			if (!error)
 				goto out_detected;
 		}
+		if (cyapa->gen == CYAPA_GEN_UNKNOWN ||
+				cyapa->gen == CYAPA_GEN6 ||
+				cyapa->gen == CYAPA_GEN5) {
+			error = cyapa_pip_state_parse(cyapa,
+					status, BL_STATUS_SIZE);
+			if (!error)
+				goto out_detected;
+		}
+		/* For old Gen5 trackpads detecting. */
 		if ((cyapa->gen == CYAPA_GEN_UNKNOWN ||
 				cyapa->gen == CYAPA_GEN5) &&
 			!smbus && even_addr) {
@@ -284,6 +317,9 @@
 		return error;
 
 	switch (cyapa->gen) {
+	case CYAPA_GEN6:
+		cyapa->ops = &cyapa_gen6_ops;
+		break;
 	case CYAPA_GEN5:
 		cyapa->ops = &cyapa_gen5_ops;
 		break;
@@ -306,7 +342,7 @@
 
 /*
  * Returns 0 on device detected, negative errno on no device detected.
- * And when the device is detected and opertaional, it will be reset to
+ * And when the device is detected and operational, it will be reset to
  * full power active mode automatically.
  */
 static int cyapa_detect(struct cyapa *cyapa)
@@ -333,6 +369,7 @@
 {
 	struct cyapa *cyapa = input_get_drvdata(input);
 	struct i2c_client *client = cyapa->client;
+	struct device *dev = &client->dev;
 	int error;
 
 	error = mutex_lock_interruptible(&cyapa->state_sync_lock);
@@ -346,10 +383,9 @@
 		 * when in operational mode.
 		 */
 		error = cyapa->ops->set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error) {
-			dev_warn(&client->dev,
-				"set active power failed: %d\n", error);
+			dev_warn(dev, "set active power failed: %d\n", error);
 			goto out;
 		}
 	} else {
@@ -361,10 +397,14 @@
 	}
 
 	enable_irq(client->irq);
-	if (!pm_runtime_enabled(&client->dev)) {
-		pm_runtime_set_active(&client->dev);
-		pm_runtime_enable(&client->dev);
+	if (!pm_runtime_enabled(dev)) {
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
 	}
+
+	pm_runtime_get_sync(dev);
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_sync_autosuspend(dev);
 out:
 	mutex_unlock(&cyapa->state_sync_lock);
 	return error;
@@ -374,16 +414,17 @@
 {
 	struct cyapa *cyapa = input_get_drvdata(input);
 	struct i2c_client *client = cyapa->client;
+	struct device *dev = &cyapa->client->dev;
 
 	mutex_lock(&cyapa->state_sync_lock);
 
 	disable_irq(client->irq);
-	if (pm_runtime_enabled(&client->dev))
-		pm_runtime_disable(&client->dev);
-	pm_runtime_set_suspended(&client->dev);
+	if (pm_runtime_enabled(dev))
+		pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
 
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
 
 	mutex_unlock(&cyapa->state_sync_lock);
 }
@@ -443,6 +484,7 @@
 	if (cyapa->gen >= CYAPA_GEN5) {
 		input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
 		input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 255, 0, 0);
+		input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
 	}
 
 	input_abs_set_res(input, ABS_MT_POSITION_X,
@@ -492,7 +534,7 @@
 		 */
 		if (!input || cyapa->operational)
 			cyapa->ops->set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		/* Gen3 always using polling mode for command. */
 		if (cyapa->gen >= CYAPA_GEN5)
 			enable_irq(cyapa->client->irq);
@@ -507,7 +549,8 @@
 		if (cyapa->gen >= CYAPA_GEN5)
 			disable_irq(cyapa->client->irq);
 		if (!input || cyapa->operational)
-			cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+			cyapa->ops->set_power_mode(cyapa,
+						   PWR_MODE_OFF, 0, false);
 	}
 }
 
@@ -563,6 +606,8 @@
 	error = cyapa_gen3_ops.initialize(cyapa);
 	if (!error)
 		error = cyapa_gen5_ops.initialize(cyapa);
+	if (!error)
+		error = cyapa_gen6_ops.initialize(cyapa);
 	if (error)
 		return error;
 
@@ -572,7 +617,7 @@
 
 	/* Power down the device until we need it. */
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
 
 	return 0;
 }
@@ -588,7 +633,8 @@
 
 	/* Avoid command failures when TP was in OFF state. */
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+		cyapa->ops->set_power_mode(cyapa,
+					   PWR_MODE_FULL_ACTIVE, 0, false);
 
 	error = cyapa_detect(cyapa);
 	if (error)
@@ -607,7 +653,8 @@
 	if (!input || !input->users) {
 		/* Reset to power OFF state to save power when no user open. */
 		if (cyapa->operational)
-			cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+			cyapa->ops->set_power_mode(cyapa,
+						   PWR_MODE_OFF, 0, false);
 	} else if (!error && cyapa->operational) {
 		/*
 		 * Make sure only enable runtime PM when device is
@@ -615,6 +662,10 @@
 		 */
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
+
+		pm_runtime_get_sync(dev);
+		pm_runtime_mark_last_busy(dev);
+		pm_runtime_put_sync_autosuspend(dev);
 	}
 
 	return error;
@@ -624,27 +675,44 @@
 {
 	struct cyapa *cyapa = dev_id;
 	struct device *dev = &cyapa->client->dev;
+	int error;
 
-	pm_runtime_get_sync(dev);
 	if (device_may_wakeup(dev))
 		pm_wakeup_event(dev, 0);
 
-	/* Interrupt event maybe cuased by host command to trackpad device. */
+	/* Interrupt event can be caused by host command to trackpad device. */
 	if (cyapa->ops->irq_cmd_handler(cyapa)) {
 		/*
 		 * Interrupt event maybe from trackpad device input reporting.
 		 */
 		if (!cyapa->input) {
 			/*
-			 * Still in probling or in firware image
-			 * udpating or reading.
+			 * Still in probing or in firmware image
+			 * updating or reading.
 			 */
 			cyapa->ops->sort_empty_output_data(cyapa,
 					NULL, NULL, NULL);
 			goto out;
 		}
 
-		if (!cyapa->operational || cyapa->ops->irq_handler(cyapa)) {
+		if (cyapa->operational) {
+			error = cyapa->ops->irq_handler(cyapa);
+
+			/*
+			 * Apply runtime power management to touch report event
+			 * except the events caused by the command responses.
+			 * Note:
+			 * It will introduce about 20~40 ms additional delay
+			 * time in receiving for first valid touch report data.
+			 * The time is used to execute device runtime resume
+			 * process.
+			 */
+			pm_runtime_get_sync(dev);
+			pm_runtime_mark_last_busy(dev);
+			pm_runtime_put_sync_autosuspend(dev);
+		}
+
+		if (!cyapa->operational || error) {
 			if (!mutex_trylock(&cyapa->state_sync_lock)) {
 				cyapa->ops->sort_empty_output_data(cyapa,
 					NULL, NULL, NULL);
@@ -656,8 +724,6 @@
 	}
 
 out:
-	pm_runtime_mark_last_busy(dev);
-	pm_runtime_put_sync_autosuspend(dev);
 	return IRQ_HANDLED;
 }
 
@@ -1051,12 +1117,12 @@
 		dev_dbg(dev, "firmware update successfully done.\n");
 
 	/*
-	 * Redetect trackpad device states because firmware update process
+	 * Re-detect trackpad device states because firmware update process
 	 * will reset trackpad device into bootloader mode.
 	 */
 	ret = cyapa_reinitialize(cyapa);
 	if (ret) {
-		dev_err(dev, "failed to redetect after updated: %d\n", ret);
+		dev_err(dev, "failed to re-detect after updated: %d\n", ret);
 		error = error ? error : ret;
 	}
 
@@ -1120,9 +1186,11 @@
 	case CYAPA_STATE_BL_ACTIVE:
 		return "bootloader active";
 	case CYAPA_STATE_GEN5_BL:
+	case CYAPA_STATE_GEN6_BL:
 		return "bootloader";
 	case CYAPA_STATE_OP:
 	case CYAPA_STATE_GEN5_APP:
+	case CYAPA_STATE_GEN6_APP:
 		return "operational";  /* Normal valid state. */
 	default:
 		return "invalid mode";
@@ -1175,6 +1243,13 @@
 	sysfs_remove_group(&cyapa->client->dev.kobj, &cyapa_sysfs_group);
 }
 
+static void cyapa_disable_regulator(void *data)
+{
+	struct cyapa *cyapa = data;
+
+	regulator_disable(cyapa->vcc);
+}
+
 static int cyapa_probe(struct i2c_client *client,
 		       const struct i2c_device_id *dev_id)
 {
@@ -1208,6 +1283,27 @@
 	sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
 		client->addr);
 
+	cyapa->vcc = devm_regulator_get(dev, "vcc");
+	if (IS_ERR(cyapa->vcc)) {
+		error = PTR_ERR(cyapa->vcc);
+		dev_err(dev, "failed to get vcc regulator: %d\n", error);
+		return error;
+	}
+
+	error = regulator_enable(cyapa->vcc);
+	if (error) {
+		dev_err(dev, "failed to enable regulator: %d\n", error);
+		return error;
+	}
+
+	error = devm_add_action(dev, cyapa_disable_regulator, cyapa);
+	if (error) {
+		cyapa_disable_regulator(cyapa);
+		dev_err(dev, "failed to add disable regulator action: %d\n",
+			error);
+		return error;
+	}
+
 	error = cyapa_initialize(cyapa);
 	if (error) {
 		dev_err(dev, "failed to detect and initialize tp device.\n");
@@ -1296,12 +1392,19 @@
 		power_mode = device_may_wakeup(dev) ? cyapa->suspend_power_mode
 						    : PWR_MODE_OFF;
 		error = cyapa->ops->set_power_mode(cyapa, power_mode,
-				cyapa->suspend_sleep_time);
+				cyapa->suspend_sleep_time, true);
 		if (error)
 			dev_err(dev, "suspend set power mode failed: %d\n",
 					error);
 	}
 
+	/*
+	 * Disable proximity interrupt when system idle, want true touch to
+	 * wake the system.
+	 */
+	if (cyapa->dev_pwr_mode != PWR_MODE_OFF)
+		cyapa->ops->set_proximity(cyapa, false);
+
 	if (device_may_wakeup(dev))
 		cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
 
@@ -1322,7 +1425,10 @@
 		cyapa->irq_wake = false;
 	}
 
-	/* Update device states and runtime PM states. */
+	/*
+	 * Update device states and runtime PM states.
+	 * Re-Enable proximity interrupt after enter operational mode.
+	 */
 	error = cyapa_reinitialize(cyapa);
 	if (error)
 		dev_warn(dev, "failed to reinitialize TP device: %d\n", error);
@@ -1340,7 +1446,8 @@
 
 	error = cyapa->ops->set_power_mode(cyapa,
 			cyapa->runtime_suspend_power_mode,
-			cyapa->runtime_suspend_sleep_time);
+			cyapa->runtime_suspend_sleep_time,
+			false);
 	if (error)
 		dev_warn(dev, "runtime suspend failed: %d\n", error);
 
@@ -1352,7 +1459,8 @@
 	struct cyapa *cyapa = dev_get_drvdata(dev);
 	int error;
 
-	error = cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+	error = cyapa->ops->set_power_mode(cyapa,
+					   PWR_MODE_FULL_ACTIVE, 0, false);
 	if (error)
 		dev_warn(dev, "runtime resume failed: %d\n", error);
 
@@ -1374,17 +1482,26 @@
 static const struct acpi_device_id cyapa_acpi_id[] = {
 	{ "CYAP0000", 0 },  /* Gen3 trackpad with 0x67 I2C address. */
 	{ "CYAP0001", 0 },  /* Gen5 trackpad with 0x24 I2C address. */
+	{ "CYAP0002", 0 },  /* Gen6 trackpad with 0x24 I2C address. */
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, cyapa_acpi_id);
 #endif
 
+#ifdef CONFIG_OF
+static const struct of_device_id cyapa_of_match[] = {
+	{ .compatible = "cypress,cyapa" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cyapa_of_match);
+#endif
+
 static struct i2c_driver cyapa_driver = {
 	.driver = {
 		.name = "cyapa",
-		.owner = THIS_MODULE,
 		.pm = &cyapa_pm_ops,
 		.acpi_match_table = ACPI_PTR(cyapa_acpi_id),
+		.of_match_table = of_match_ptr(cyapa_of_match),
 	},
 
 	.probe = cyapa_probe,
diff --git a/drivers/input/mouse/cyapa.h b/drivers/input/mouse/cyapa.h
index adc9ed5..b812bba 100644
--- a/drivers/input/mouse/cyapa.h
+++ b/drivers/input/mouse/cyapa.h
@@ -3,7 +3,7 @@
  *
  * Author: Dudley Du <dudl@cypress.com>
  *
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive for
@@ -19,13 +19,14 @@
 #define CYAPA_GEN_UNKNOWN   0x00   /* unknown protocol. */
 #define CYAPA_GEN3   0x03   /* support MT-protocol B with tracking ID. */
 #define CYAPA_GEN5   0x05   /* support TrueTouch GEN5 trackpad device. */
+#define CYAPA_GEN6   0x06   /* support TrueTouch GEN6 trackpad device. */
 
 #define CYAPA_NAME   "Cypress APA Trackpad (cyapa)"
 
 /*
  * Macros for SMBus communication
  */
-#define SMBUS_READ   0x01
+#define SMBUS_READ  0x01
 #define SMBUS_WRITE 0x00
 #define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
 #define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
@@ -159,12 +160,89 @@
 
 #define AUTOSUSPEND_DELAY   2000 /* unit : ms */
 
-#define UNINIT_SLEEP_TIME 0xFFFF
-#define UNINIT_PWR_MODE   0xFF
-
 #define BTN_ONLY_MODE_NAME   "buttononly"
 #define OFF_MODE_NAME        "off"
 
+/* Common macros for PIP interface. */
+#define PIP_HID_DESCRIPTOR_ADDR		0x0001
+#define PIP_REPORT_DESCRIPTOR_ADDR	0x0002
+#define PIP_INPUT_REPORT_ADDR		0x0003
+#define PIP_OUTPUT_REPORT_ADDR		0x0004
+#define PIP_CMD_DATA_ADDR		0x0006
+
+#define PIP_RETRIEVE_DATA_STRUCTURE	0x24
+#define PIP_CMD_CALIBRATE		0x28
+#define PIP_BL_CMD_VERIFY_APP_INTEGRITY	0x31
+#define PIP_BL_CMD_GET_BL_INFO		0x38
+#define PIP_BL_CMD_PROGRAM_VERIFY_ROW	0x39
+#define PIP_BL_CMD_LAUNCH_APP		0x3b
+#define PIP_BL_CMD_INITIATE_BL		0x48
+#define PIP_INVALID_CMD			0xff
+
+#define PIP_HID_DESCRIPTOR_SIZE		32
+#define PIP_HID_APP_REPORT_ID		0xf7
+#define PIP_HID_BL_REPORT_ID		0xff
+
+#define PIP_BL_CMD_REPORT_ID		0x40
+#define PIP_BL_RESP_REPORT_ID		0x30
+#define PIP_APP_CMD_REPORT_ID		0x2f
+#define PIP_APP_RESP_REPORT_ID		0x1f
+
+#define PIP_READ_SYS_INFO_CMD_LENGTH	7
+#define PIP_BL_READ_APP_INFO_CMD_LENGTH	13
+#define PIP_MIN_BL_CMD_LENGTH		13
+#define PIP_MIN_BL_RESP_LENGTH		11
+#define PIP_MIN_APP_CMD_LENGTH		7
+#define PIP_MIN_APP_RESP_LENGTH		5
+#define PIP_UNSUPPORTED_CMD_RESP_LENGTH	6
+#define PIP_READ_SYS_INFO_RESP_LENGTH	71
+#define PIP_BL_APP_INFO_RESP_LENGTH	30
+#define PIP_BL_GET_INFO_RESP_LENGTH	19
+
+#define PIP_BL_PLATFORM_VER_SHIFT	4
+#define PIP_BL_PLATFORM_VER_MASK	0x0f
+
+#define PIP_PRODUCT_FAMILY_MASK		0xf000
+#define PIP_PRODUCT_FAMILY_TRACKPAD	0x1000
+
+#define PIP_DEEP_SLEEP_STATE_ON		0x00
+#define PIP_DEEP_SLEEP_STATE_OFF	0x01
+#define PIP_DEEP_SLEEP_STATE_MASK	0x03
+#define PIP_APP_DEEP_SLEEP_REPORT_ID	0xf0
+#define PIP_DEEP_SLEEP_RESP_LENGTH	5
+#define PIP_DEEP_SLEEP_OPCODE		0x08
+#define PIP_DEEP_SLEEP_OPCODE_MASK	0x0f
+
+#define PIP_RESP_LENGTH_OFFSET		0
+#define	    PIP_RESP_LENGTH_SIZE	2
+#define PIP_RESP_REPORT_ID_OFFSET	2
+#define PIP_RESP_RSVD_OFFSET		3
+#define     PIP_RESP_RSVD_KEY		0x00
+#define PIP_RESP_BL_SOP_OFFSET		4
+#define     PIP_SOP_KEY			0x01  /* Start of Packet */
+#define     PIP_EOP_KEY			0x17  /* End of Packet */
+#define PIP_RESP_APP_CMD_OFFSET		4
+#define     GET_PIP_CMD_CODE(reg)	((reg) & 0x7f)
+#define PIP_RESP_STATUS_OFFSET		5
+
+#define VALID_CMD_RESP_HEADER(resp, cmd)				  \
+	(((resp)[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID) && \
+	((resp)[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) &&		  \
+	(GET_PIP_CMD_CODE((resp)[PIP_RESP_APP_CMD_OFFSET]) == (cmd)))
+
+#define PIP_CMD_COMPLETE_SUCCESS(resp_data) \
+	((resp_data)[PIP_RESP_STATUS_OFFSET] == 0x00)
+
+/* Variables to record latest gen5 trackpad power states. */
+#define UNINIT_SLEEP_TIME	0xffff
+#define UNINIT_PWR_MODE		0xff
+#define PIP_DEV_SET_PWR_STATE(cyapa, s)		((cyapa)->dev_pwr_mode = (s))
+#define PIP_DEV_GET_PWR_STATE(cyapa)		((cyapa)->dev_pwr_mode)
+#define PIP_DEV_SET_SLEEP_TIME(cyapa, t)	((cyapa)->dev_sleep_time = (t))
+#define PIP_DEV_GET_SLEEP_TIME(cyapa)		((cyapa)->dev_sleep_time)
+#define PIP_DEV_UNINIT_SLEEP_TIME(cyapa)	\
+		(((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+
 /* The touch.id is used as the MT slot id, thus max MT slot is 15 */
 #define CYAPA_MAX_MT_SLOTS  15
 
@@ -195,10 +273,12 @@
 	int (*sort_empty_output_data)(struct cyapa *,
 			u8 *, int *, cb_sort);
 
-	int (*set_power_mode)(struct cyapa *, u8, u16);
+	int (*set_power_mode)(struct cyapa *, u8, u16, bool);
+
+	int (*set_proximity)(struct cyapa *, bool);
 };
 
-struct cyapa_gen5_cmd_states {
+struct cyapa_pip_cmd_states {
 	struct mutex cmd_lock;
 	struct completion cmd_ready;
 	atomic_t cmd_issued;
@@ -214,7 +294,7 @@
 };
 
 union cyapa_cmd_states {
-	struct cyapa_gen5_cmd_states gen5;
+	struct cyapa_pip_cmd_states pip;
 };
 
 enum cyapa_state {
@@ -225,6 +305,14 @@
 	CYAPA_STATE_OP,
 	CYAPA_STATE_GEN5_BL,
 	CYAPA_STATE_GEN5_APP,
+	CYAPA_STATE_GEN6_BL,
+	CYAPA_STATE_GEN6_APP,
+};
+
+struct gen6_interval_setting {
+	u16 active_interval;
+	u16 lp1_interval;
+	u16 lp2_interval;
 };
 
 /* The main device structure */
@@ -233,6 +321,7 @@
 	u8 status[BL_STATUS_SIZE];
 	bool operational; /* true: ready for data reporting; false: not. */
 
+	struct regulator *vcc;
 	struct i2c_client *client;
 	struct input_dev *input;
 	char phys[32];	/* Device physical location */
@@ -246,9 +335,11 @@
 	u16 runtime_suspend_sleep_time;
 	u8 dev_pwr_mode;
 	u16 dev_sleep_time;
+	struct gen6_interval_setting gen6_interval_setting;
 
 	/* Read from query data region. */
 	char product_id[16];
+	u8 platform_ver;  /* Platform version. */
 	u8 fw_maj_ver;  /* Firmware major version. */
 	u8 fw_min_ver;  /* Firmware minor version. */
 	u8 btn_capability;
@@ -259,7 +350,7 @@
 	int physical_size_y;
 
 	/* Used in ttsp and truetouch based trackpad devices. */
-	u8 x_origin;  /* X Axis Origin: 0 = left side; 1 = rigth side. */
+	u8 x_origin;  /* X Axis Origin: 0 = left side; 1 = right side. */
 	u8 y_origin;  /* Y Axis Origin: 0 = top; 1 = bottom. */
 	int electrodes_x;  /* Number of electrodes on the X Axis*/
 	int electrodes_y;  /* Number of electrodes on the Y Axis*/
@@ -282,9 +373,9 @@
 
 
 ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
-				u8 *values);
+				 u8 *values);
 ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
-				u8 *values);
+			       u8 *values);
 
 ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values);
 
@@ -293,9 +384,51 @@
 u8 cyapa_sleep_time_to_pwr_cmd(u16 sleep_time);
 u16 cyapa_pwr_cmd_to_sleep_time(u8 pwr_mode);
 
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size);
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size);
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+				u8 *buf, int *len, cb_sort func);
+int cyapa_i2c_pip_cmd_irq_sync(struct cyapa *cyapa,
+			       u8 *cmd, int cmd_len,
+			       u8 *resp_data, int *resp_len,
+			       unsigned long timeout,
+			       cb_sort func,
+			       bool irq_mode);
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len);
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa, u8 *buf, int len);
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state);
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_bl_exit(struct cyapa *cyapa);
+int cyapa_pip_bl_enter(struct cyapa *cyapa);
 
+
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa);
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa);
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa);
+
+int cyapa_pip_resume_scanning(struct cyapa *cyapa);
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa);
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_do_fw_update(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_activate(struct cyapa *cyapa);
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa);
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count);
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable);
+
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa);
+int cyapa_pip_irq_handler(struct cyapa *cyapa);
+
+
+extern u8 pip_read_sys_info[];
+extern u8 pip_bl_read_app_info[];
 extern const char product_id[];
 extern const struct cyapa_dev_ops cyapa_gen3_ops;
 extern const struct cyapa_dev_ops cyapa_gen5_ops;
+extern const struct cyapa_dev_ops cyapa_gen6_ops;
 
 #endif
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 3faf01c..1a9d12a 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -6,7 +6,7 @@
  *   Daniel Kurtz <djkurtz@chromium.org>
  *   Benson Leung <bleung@chromium.org>
  *
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
  * Copyright (C) 2011-2012 Google, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -950,7 +950,7 @@
  * Device power mode can only be set when device is in operational mode.
  */
 static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
-				     u16 always_unused)
+		u16 always_unused, bool is_suspend_unused)
 {
 	int ret;
 	u8 power;
@@ -999,6 +999,11 @@
 	return ret;
 }
 
+static int cyapa_gen3_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	return -EOPNOTSUPP;
+}
+
 static int cyapa_gen3_get_query_data(struct cyapa *cyapa)
 {
 	u8 query_data[QUERY_DATA_SIZE];
@@ -1107,7 +1112,7 @@
 		 * may cause problems, so we set the power mode first here.
 		 */
 		error = cyapa_gen3_set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error)
 			dev_err(dev, "%s: set full power mode failed: %d\n",
 				__func__, error);
@@ -1156,7 +1161,7 @@
 	 * so, stop cyapa_gen3_irq_handler to continue process to
 	 * avoid unwanted to error detecting and processing.
 	 *
-	 * And also, avoid the periodicly accerted interrupts to be processed
+	 * And also, avoid the periodically asserted interrupts to be processed
 	 * as touch inputs when gen3 failed to launch into application mode,
 	 * which will cause gen3 stays in bootloader mode.
 	 */
@@ -1243,4 +1248,6 @@
 	.irq_cmd_handler = cyapa_gen3_irq_cmd_handler,
 	.sort_empty_output_data = cyapa_gen3_empty_output_data,
 	.set_power_mode = cyapa_gen3_set_power_mode,
+
+	.set_proximity = cyapa_gen3_set_proximity,
 };
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index afc39e7..118ba97 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -3,7 +3,7 @@
  *
  * Author: Dudley Du <dudl@cypress.com>
  *
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive for
@@ -19,15 +19,11 @@
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 #include <linux/crc-itu-t.h>
+#include <linux/pm_runtime.h>
 #include "cyapa.h"
 
 
-/* Macro of Gen5 */
-#define RECORD_EVENT_NONE        0
-#define RECORD_EVENT_TOUCHDOWN	 1
-#define RECORD_EVENT_DISPLACE    2
-#define RECORD_EVENT_LIFTOFF     3
-
+/* Macro of TSG firmware image */
 #define CYAPA_TSG_FLASH_MAP_BLOCK_SIZE      0x80
 #define CYAPA_TSG_IMG_FW_HDR_SIZE           13
 #define CYAPA_TSG_FW_ROW_SIZE               (CYAPA_TSG_FLASH_MAP_BLOCK_SIZE)
@@ -44,32 +40,65 @@
 
 #define CYAPA_TSG_MAX_CMD_SIZE              256
 
-#define GEN5_BL_CMD_VERIFY_APP_INTEGRITY    0x31
-#define GEN5_BL_CMD_GET_BL_INFO		    0x38
-#define GEN5_BL_CMD_PROGRAM_VERIFY_ROW      0x39
-#define GEN5_BL_CMD_LAUNCH_APP		    0x3b
-#define GEN5_BL_CMD_INITIATE_BL		    0x48
+/* Macro of PIP interface */
+#define PIP_BL_INITIATE_RESP_LEN            11
+#define PIP_BL_FAIL_EXIT_RESP_LEN           11
+#define PIP_BL_FAIL_EXIT_STATUS_CODE        0x0c
+#define PIP_BL_VERIFY_INTEGRITY_RESP_LEN    12
+#define PIP_BL_INTEGRITY_CHEKC_PASS         0x00
+#define PIP_BL_BLOCK_WRITE_RESP_LEN         11
 
-#define GEN5_HID_DESCRIPTOR_ADDR	0x0001
-#define GEN5_REPORT_DESCRIPTOR_ADDR	0x0002
-#define GEN5_INPUT_REPORT_ADDR		0x0003
-#define GEN5_OUTPUT_REPORT_ADDR		0x0004
-#define GEN5_CMD_DATA_ADDR		0x0006
+#define PIP_TOUCH_REPORT_ID         0x01
+#define PIP_BTN_REPORT_ID           0x03
+#define PIP_WAKEUP_EVENT_REPORT_ID  0x04
+#define PIP_PUSH_BTN_REPORT_ID      0x06
+#define GEN5_OLD_PUSH_BTN_REPORT_ID 0x05  /* Special for old Gen5 TP. */
+#define PIP_PROXIMITY_REPORT_ID     0x07
 
-#define GEN5_TOUCH_REPORT_HEAD_SIZE     7
-#define GEN5_TOUCH_REPORT_MAX_SIZE      127
-#define GEN5_BTN_REPORT_HEAD_SIZE       6
-#define GEN5_BTN_REPORT_MAX_SIZE        14
-#define GEN5_WAKEUP_EVENT_SIZE          4
-#define GEN5_RAW_DATA_HEAD_SIZE         24
+#define PIP_PROXIMITY_REPORT_SIZE	6
+#define PIP_PROXIMITY_DISTANCE_OFFSET	0x05
+#define PIP_PROXIMITY_DISTANCE_MASK	0x01
 
-#define GEN5_BL_CMD_REPORT_ID           0x40
-#define GEN5_BL_RESP_REPORT_ID          0x30
-#define GEN5_APP_CMD_REPORT_ID          0x2f
-#define GEN5_APP_RESP_REPORT_ID         0x1f
+#define PIP_TOUCH_REPORT_HEAD_SIZE     7
+#define PIP_TOUCH_REPORT_MAX_SIZE      127
+#define PIP_BTN_REPORT_HEAD_SIZE       6
+#define PIP_BTN_REPORT_MAX_SIZE        14
+#define PIP_WAKEUP_EVENT_SIZE          4
 
-#define GEN5_APP_DEEP_SLEEP_REPORT_ID   0xf0
-#define GEN5_DEEP_SLEEP_RESP_LENGTH     5
+#define PIP_NUMBER_OF_TOUCH_OFFSET  5
+#define PIP_NUMBER_OF_TOUCH_MASK    0x1f
+#define PIP_BUTTONS_OFFSET          5
+#define PIP_BUTTONS_MASK            0x0f
+#define PIP_GET_EVENT_ID(reg)       (((reg) >> 5) & 0x03)
+#define PIP_GET_TOUCH_ID(reg)       ((reg) & 0x1f)
+#define PIP_TOUCH_TYPE_FINGER	    0x00
+#define PIP_TOUCH_TYPE_PROXIMITY    0x01
+#define PIP_TOUCH_TYPE_HOVER	    0x02
+#define PIP_GET_TOUCH_TYPE(reg)     ((reg) & 0x07)
+
+#define RECORD_EVENT_NONE        0
+#define RECORD_EVENT_TOUCHDOWN	 1
+#define RECORD_EVENT_DISPLACE    2
+#define RECORD_EVENT_LIFTOFF     3
+
+#define PIP_SENSING_MODE_MUTUAL_CAP_FINE   0x00
+#define PIP_SENSING_MODE_SELF_CAP          0x02
+
+#define PIP_SET_PROXIMITY	0x49
+
+/* Macro of Gen5 */
+#define GEN5_BL_MAX_OUTPUT_LENGTH     0x0100
+#define GEN5_APP_MAX_OUTPUT_LENGTH    0x00fe
+
+#define GEN5_POWER_STATE_ACTIVE              0x01
+#define GEN5_POWER_STATE_LOOK_FOR_TOUCH      0x02
+#define GEN5_POWER_STATE_READY               0x03
+#define GEN5_POWER_STATE_IDLE                0x04
+#define GEN5_POWER_STATE_BTN_ONLY            0x05
+#define GEN5_POWER_STATE_OFF                 0x06
+
+#define GEN5_POWER_READY_MAX_INTRVL_TIME  50   /* Unit: ms */
+#define GEN5_POWER_IDLE_MAX_INTRVL_TIME   250  /* Unit: ms */
 
 #define GEN5_CMD_GET_PARAMETER		     0x05
 #define GEN5_CMD_SET_PARAMETER		     0x06
@@ -82,80 +111,12 @@
 
 #define GEN5_PARAMETER_DISABLE_PIP_REPORT    0x08
 
-#define GEN5_POWER_STATE_ACTIVE              0x01
-#define GEN5_POWER_STATE_LOOK_FOR_TOUCH      0x02
-#define GEN5_POWER_STATE_READY               0x03
-#define GEN5_POWER_STATE_IDLE                0x04
-#define GEN5_POWER_STATE_BTN_ONLY            0x05
-#define GEN5_POWER_STATE_OFF                 0x06
-
-#define GEN5_DEEP_SLEEP_STATE_MASK  0x03
-#define GEN5_DEEP_SLEEP_STATE_ON    0x00
-#define GEN5_DEEP_SLEEP_STATE_OFF   0x01
-
-#define GEN5_DEEP_SLEEP_OPCODE      0x08
-#define GEN5_DEEP_SLEEP_OPCODE_MASK 0x0f
-
-#define GEN5_POWER_READY_MAX_INTRVL_TIME  50   /* Unit: ms */
-#define GEN5_POWER_IDLE_MAX_INTRVL_TIME   250  /* Unit: ms */
-
-#define GEN5_CMD_REPORT_ID_OFFSET       4
-
-#define GEN5_RESP_REPORT_ID_OFFSET      2
-#define GEN5_RESP_RSVD_OFFSET           3
-#define     GEN5_RESP_RSVD_KEY          0x00
-#define GEN5_RESP_BL_SOP_OFFSET         4
-#define     GEN5_SOP_KEY                0x01  /* Start of Packet */
-#define     GEN5_EOP_KEY                0x17  /* End of Packet */
-#define GEN5_RESP_APP_CMD_OFFSET        4
-#define     GET_GEN5_CMD_CODE(reg)      ((reg) & 0x7f)
-
-#define VALID_CMD_RESP_HEADER(resp, cmd)				    \
-	(((resp)[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID) && \
-	((resp)[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) &&	    \
-	(GET_GEN5_CMD_CODE((resp)[GEN5_RESP_APP_CMD_OFFSET]) == (cmd)))
-
-#define GEN5_MIN_BL_CMD_LENGTH           13
-#define GEN5_MIN_BL_RESP_LENGTH          11
-#define GEN5_MIN_APP_CMD_LENGTH          7
-#define GEN5_MIN_APP_RESP_LENGTH         5
-#define GEN5_UNSUPPORTED_CMD_RESP_LENGTH 6
-
-#define GEN5_RESP_LENGTH_OFFSET  0x00
-#define GEN5_RESP_LENGTH_SIZE    2
-
-#define GEN5_HID_DESCRIPTOR_SIZE      32
-#define GEN5_BL_HID_REPORT_ID         0xff
-#define GEN5_APP_HID_REPORT_ID        0xf7
-#define GEN5_BL_MAX_OUTPUT_LENGTH     0x0100
-#define GEN5_APP_MAX_OUTPUT_LENGTH    0x00fe
-
 #define GEN5_BL_REPORT_DESCRIPTOR_SIZE            0x1d
 #define GEN5_BL_REPORT_DESCRIPTOR_ID              0xfe
 #define GEN5_APP_REPORT_DESCRIPTOR_SIZE           0xee
 #define GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE  0xfa
 #define GEN5_APP_REPORT_DESCRIPTOR_ID             0xf6
 
-#define GEN5_TOUCH_REPORT_ID         0x01
-#define GEN5_BTN_REPORT_ID           0x03
-#define GEN5_WAKEUP_EVENT_REPORT_ID  0x04
-#define GEN5_OLD_PUSH_BTN_REPORT_ID  0x05
-#define GEN5_PUSH_BTN_REPORT_ID      0x06
-
-#define GEN5_CMD_COMPLETE_SUCCESS(status) ((status) == 0x00)
-
-#define GEN5_BL_INITIATE_RESP_LEN            11
-#define GEN5_BL_FAIL_EXIT_RESP_LEN           11
-#define GEN5_BL_FAIL_EXIT_STATUS_CODE        0x0c
-#define GEN5_BL_VERIFY_INTEGRITY_RESP_LEN    12
-#define GEN5_BL_INTEGRITY_CHEKC_PASS         0x00
-#define GEN5_BL_BLOCK_WRITE_RESP_LEN         11
-#define GEN5_BL_READ_APP_INFO_RESP_LEN       31
-#define GEN5_CMD_CALIBRATE                   0x28
-#define CYAPA_SENSING_MODE_MUTUAL_CAP_FINE   0x00
-#define CYAPA_SENSING_MODE_SELF_CAP          0x02
-
-#define GEN5_CMD_RETRIEVE_DATA_STRUCTURE     0x24
 #define GEN5_RETRIEVE_MUTUAL_PWC_DATA        0x00
 #define GEN5_RETRIEVE_SELF_CAP_PWC_DATA      0x01
 
@@ -170,28 +131,19 @@
 #define GEN5_PANEL_SCAN_SELF_BASELINE        0x04
 #define GEN5_PANEL_SCAN_SELF_DIFFCOUNT       0x05
 
-/* The offset only valid for reterive PWC and panel scan commands */
+/* The offset only valid for retrieve PWC and panel scan commands */
 #define GEN5_RESP_DATA_STRUCTURE_OFFSET      10
 #define GEN5_PWC_DATA_ELEMENT_SIZE_MASK      0x07
 
-#define	GEN5_NUMBER_OF_TOUCH_OFFSET  5
-#define GEN5_NUMBER_OF_TOUCH_MASK    0x1f
-#define GEN5_BUTTONS_OFFSET          5
-#define GEN5_BUTTONS_MASK            0x0f
-#define GEN5_GET_EVENT_ID(reg)       (((reg) >> 5) & 0x03)
-#define GEN5_GET_TOUCH_ID(reg)       ((reg) & 0x1f)
 
-#define GEN5_PRODUCT_FAMILY_MASK        0xf000
-#define GEN5_PRODUCT_FAMILY_TRACKPAD    0x1000
-
-#define TSG_INVALID_CMD   0xff
-
-struct cyapa_gen5_touch_record {
+struct cyapa_pip_touch_record {
 	/*
 	 * Bit 7 - 3: reserved
 	 * Bit 2 - 0: touch type;
 	 *            0 : standard finger;
-	 *            1 - 15 : reserved.
+	 *            1 : proximity (Start supported in Gen5 TP).
+	 *            2 : finger hover (defined, but not used yet.)
+	 *            3 - 15 : reserved.
 	 */
 	u8 touch_type;
 
@@ -221,7 +173,14 @@
 	/* Bit 15 - 8 of Y-axis coordinate of the touch in pixel. */
 	u8 y_hi;
 
-	/* Touch intensity in counts, pressure value. */
+	/*
+	 * The meaning of this value is different when touch_type is different.
+	 * For standard finger type:
+	 *	Touch intensity in counts, pressure value.
+	 * For proximity type (Start supported in Gen5 TP):
+	 *	The distance, in surface units, between the contact and
+	 *	the surface.
+	 **/
 	u8 z;
 
 	/*
@@ -260,9 +219,9 @@
 	u8 orientation;
 } __packed;
 
-struct cyapa_gen5_report_data {
-	u8 report_head[GEN5_TOUCH_REPORT_HEAD_SIZE];
-	struct cyapa_gen5_touch_record touch_records[10];
+struct cyapa_pip_report_data {
+	u8 report_head[PIP_TOUCH_REPORT_HEAD_SIZE];
+	struct cyapa_pip_touch_record touch_records[10];
 } __packed;
 
 struct cyapa_tsg_bin_image_head {
@@ -272,6 +231,12 @@
 	u8 fw_major_version;
 	u8 fw_minor_version;
 	u8 fw_revision_control_number[8];
+	u8 silicon_id_hi;
+	u8 silicon_id_lo;
+	u8 chip_revision;
+	u8 family_id;
+	u8 bl_ver_maj;
+	u8 bl_ver_min;
 } __packed;
 
 struct cyapa_tsg_bin_image_data_record {
@@ -288,36 +253,36 @@
 	struct cyapa_tsg_bin_image_data_record records[0];
 } __packed;
 
-struct gen5_bl_packet_start {
+struct pip_bl_packet_start {
 	u8 sop;  /* Start of packet, must be 01h */
 	u8 cmd_code;
 	__le16 data_length;  /* Size of data parameter start from data[0] */
 } __packed;
 
-struct gen5_bl_packet_end {
+struct pip_bl_packet_end {
 	__le16 crc;
 	u8 eop;  /* End of packet, must be 17h */
 } __packed;
 
-struct gen5_bl_cmd_head {
+struct pip_bl_cmd_head {
 	__le16 addr;   /* Output report register address, must be 0004h */
 	/* Size of packet not including output report register address */
 	__le16 length;
 	u8 report_id;  /* Bootloader output report id, must be 40h */
 	u8 rsvd;  /* Reserved, must be 0 */
-	struct gen5_bl_packet_start packet_start;
+	struct pip_bl_packet_start packet_start;
 	u8 data[0];  /* Command data variable based on commands */
 } __packed;
 
 /* Initiate bootload command data structure. */
-struct gen5_bl_initiate_cmd_data {
+struct pip_bl_initiate_cmd_data {
 	/* Key must be "A5h 01h 02h 03h FFh FEh FDh 5Ah" */
 	u8 key[CYAPA_TSG_BL_KEY_SIZE];
 	u8 metadata_raw_parameter[CYAPA_TSG_FLASH_MAP_METADATA_SIZE];
 	__le16 metadata_crc;
 } __packed;
 
-struct gen5_bl_metadata_row_params {
+struct tsg_bl_metadata_row_params {
 	__le16 size;
 	__le16 maximum_size;
 	__le32 app_start;
@@ -332,13 +297,13 @@
 } __packed;
 
 /* Bootload program and verify row command data structure */
-struct gen5_bl_flash_row_head {
+struct tsg_bl_flash_row_head {
 	u8 flash_array_id;
 	__le16 flash_row_id;
 	u8 flash_data[0];
 } __packed;
 
-struct gen5_app_cmd_head {
+struct pip_app_cmd_head {
 	__le16 addr;   /* Output report register address, must be 0004h */
 	/* Size of packet not including output report register address */
 	__le16 length;
@@ -369,30 +334,26 @@
 	u8 data_id;
 } __packed;
 
-/* Variables to record latest gen5 trackpad power states. */
-#define GEN5_DEV_SET_PWR_STATE(cyapa, s)	((cyapa)->dev_pwr_mode = (s))
-#define GEN5_DEV_GET_PWR_STATE(cyapa)		((cyapa)->dev_pwr_mode)
-#define GEN5_DEV_SET_SLEEP_TIME(cyapa, t)	((cyapa)->dev_sleep_time = (t))
-#define GEN5_DEV_GET_SLEEP_TIME(cyapa)		((cyapa)->dev_sleep_time)
-#define GEN5_DEV_UNINIT_SLEEP_TIME(cyapa)	\
-		(((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+u8 pip_read_sys_info[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02 };
+u8 pip_bl_read_app_info[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
+		0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
+	};
 
-
-static u8 cyapa_gen5_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
+static u8 cyapa_pip_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
 	0xff, 0xfe, 0xfd, 0x5a };
 
-static int cyapa_gen5_initialize(struct cyapa *cyapa)
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 
-	init_completion(&gen5_pip->cmd_ready);
-	atomic_set(&gen5_pip->cmd_issued, 0);
-	mutex_init(&gen5_pip->cmd_lock);
+	init_completion(&pip->cmd_ready);
+	atomic_set(&pip->cmd_issued, 0);
+	mutex_init(&pip->cmd_lock);
 
-	gen5_pip->resp_sort_func = NULL;
-	gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
-	gen5_pip->resp_data = NULL;
-	gen5_pip->resp_len = NULL;
+	pip->resp_sort_func = NULL;
+	pip->in_progress_cmd = PIP_INVALID_CMD;
+	pip->resp_data = NULL;
+	pip->resp_len = NULL;
 
 	cyapa->dev_pwr_mode = UNINIT_PWR_MODE;
 	cyapa->dev_sleep_time = UNINIT_SLEEP_TIME;
@@ -401,7 +362,7 @@
 }
 
 /* Return negative errno, or else the number of bytes read. */
-static ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
 {
 	int ret;
 
@@ -415,14 +376,13 @@
 
 	if (ret != size)
 		return (ret < 0) ? ret : -EIO;
-
 	return size;
 }
 
 /**
  * Return a negative errno code else zero on success.
  */
-static ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
 {
 	int ret;
 
@@ -441,10 +401,10 @@
  * This function is aimed to dump all not read data in Gen5 trackpad
  * before send any command, otherwise, the interrupt line will be blocked.
  */
-static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
 		u8 *buf, int *len, cb_sort func)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 	int report_count;
 	int empty_count;
@@ -476,13 +436,13 @@
 		if (empty_count > 5)
 			return 0;
 
-		error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf,
-				GEN5_RESP_LENGTH_SIZE);
+		error = cyapa_i2c_pip_read(cyapa, pip->empty_buf,
+				PIP_RESP_LENGTH_SIZE);
 		if (error < 0)
 			return error;
 
-		length = get_unaligned_le16(gen5_pip->empty_buf);
-		if (length == GEN5_RESP_LENGTH_SIZE) {
+		length = get_unaligned_le16(pip->empty_buf);
+		if (length == PIP_RESP_LENGTH_SIZE) {
 			empty_count++;
 			continue;
 		} else if (length > CYAPA_REG_MAP_SIZE) {
@@ -490,11 +450,11 @@
 			return -EINVAL;
 		} else if (length == 0) {
 			/* Application or bootloader launch data polled out. */
-			length = GEN5_RESP_LENGTH_SIZE;
+			length = PIP_RESP_LENGTH_SIZE;
 			if (buf && buf_len && func &&
-				func(cyapa, gen5_pip->empty_buf, length)) {
+				func(cyapa, pip->empty_buf, length)) {
 				length = min(buf_len, length);
-				memcpy(buf, gen5_pip->empty_buf, length);
+				memcpy(buf, pip->empty_buf, length);
 				*len = length;
 				/* Response found, success. */
 				return 0;
@@ -502,19 +462,19 @@
 			continue;
 		}
 
-		error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+		error = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
 		if (error < 0)
 			return error;
 
 		report_count--;
 		empty_count = 0;
-		length = get_unaligned_le16(gen5_pip->empty_buf);
-		if (length <= GEN5_RESP_LENGTH_SIZE) {
+		length = get_unaligned_le16(pip->empty_buf);
+		if (length <= PIP_RESP_LENGTH_SIZE) {
 			empty_count++;
 		} else if (buf && buf_len && func &&
-			func(cyapa, gen5_pip->empty_buf, length)) {
+			func(cyapa, pip->empty_buf, length)) {
 			length = min(buf_len, length);
-			memcpy(buf, gen5_pip->empty_buf, length);
+			memcpy(buf, pip->empty_buf, length);
 			*len = length;
 			/* Response found, success. */
 			return 0;
@@ -531,24 +491,24 @@
 		u8 *cmd, size_t cmd_len,
 		unsigned long timeout)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int error;
 
 	/* Wait for interrupt to set ready completion */
-	init_completion(&gen5_pip->cmd_ready);
+	init_completion(&pip->cmd_ready);
 
-	atomic_inc(&gen5_pip->cmd_issued);
+	atomic_inc(&pip->cmd_issued);
 	error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
 	if (error) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return (error < 0) ? error : -EIO;
 	}
 
 	/* Wait for interrupt to indicate command is completed. */
-	timeout = wait_for_completion_timeout(&gen5_pip->cmd_ready,
+	timeout = wait_for_completion_timeout(&pip->cmd_ready,
 				msecs_to_jiffies(timeout));
 	if (timeout == 0) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return -ETIMEDOUT;
 	}
 
@@ -562,15 +522,15 @@
 		unsigned long timeout,
 		cb_sort func)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int tries;
 	int length;
 	int error;
 
-	atomic_inc(&gen5_pip->cmd_issued);
+	atomic_inc(&pip->cmd_issued);
 	error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
 	if (error) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return error < 0 ? error : -EIO;
 	}
 
@@ -591,11 +551,11 @@
 			error = error ? error : -ETIMEDOUT;
 	}
 
-	atomic_dec(&gen5_pip->cmd_issued);
+	atomic_dec(&pip->cmd_issued);
 	return error;
 }
 
-static int cyapa_i2c_pip_cmd_irq_sync(
+int cyapa_i2c_pip_cmd_irq_sync(
 		struct cyapa *cyapa,
 		u8 *cmd, int cmd_len,
 		u8 *resp_data, int *resp_len,
@@ -603,34 +563,34 @@
 		cb_sort func,
 		bool irq_mode)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int error;
 
 	if (!cmd || !cmd_len)
 		return -EINVAL;
 
 	/* Commands must be serialized. */
-	error = mutex_lock_interruptible(&gen5_pip->cmd_lock);
+	error = mutex_lock_interruptible(&pip->cmd_lock);
 	if (error)
 		return error;
 
-	gen5_pip->resp_sort_func = func;
-	gen5_pip->resp_data = resp_data;
-	gen5_pip->resp_len = resp_len;
+	pip->resp_sort_func = func;
+	pip->resp_data = resp_data;
+	pip->resp_len = resp_len;
 
-	if (cmd_len >= GEN5_MIN_APP_CMD_LENGTH &&
-			cmd[4] == GEN5_APP_CMD_REPORT_ID) {
+	if (cmd_len >= PIP_MIN_APP_CMD_LENGTH &&
+			cmd[4] == PIP_APP_CMD_REPORT_ID) {
 		/* Application command */
-		gen5_pip->in_progress_cmd = cmd[6] & 0x7f;
-	} else if (cmd_len >= GEN5_MIN_BL_CMD_LENGTH &&
-			cmd[4] == GEN5_BL_CMD_REPORT_ID) {
+		pip->in_progress_cmd = cmd[6] & 0x7f;
+	} else if (cmd_len >= PIP_MIN_BL_CMD_LENGTH &&
+			cmd[4] == PIP_BL_CMD_REPORT_ID) {
 		/* Bootloader command */
-		gen5_pip->in_progress_cmd = cmd[7];
+		pip->in_progress_cmd = cmd[7];
 	}
 
 	/* Send command data, wait and read output response data's length. */
 	if (irq_mode) {
-		gen5_pip->is_irq_mode = true;
+		pip->is_irq_mode = true;
 		error = cyapa_do_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 							timeout);
 		if (error == -ETIMEDOUT && resp_data &&
@@ -646,54 +606,54 @@
 				error = error ? error : -ETIMEDOUT;
 		}
 	} else {
-		gen5_pip->is_irq_mode = false;
+		pip->is_irq_mode = false;
 		error = cyapa_do_i2c_pip_cmd_polling(cyapa, cmd, cmd_len,
 				resp_data, resp_len, timeout, func);
 	}
 
-	gen5_pip->resp_sort_func = NULL;
-	gen5_pip->resp_data = NULL;
-	gen5_pip->resp_len = NULL;
-	gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
+	pip->resp_sort_func = NULL;
+	pip->resp_data = NULL;
+	pip->resp_len = NULL;
+	pip->in_progress_cmd = PIP_INVALID_CMD;
 
-	mutex_unlock(&gen5_pip->cmd_lock);
+	mutex_unlock(&pip->cmd_lock);
 	return error;
 }
 
-static bool cyapa_gen5_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
 		u8 *data, int len)
 {
-	if (!data || len < GEN5_MIN_BL_RESP_LENGTH)
+	if (!data || len < PIP_MIN_BL_RESP_LENGTH)
 		return false;
 
 	/* Bootloader input report id 30h */
-	if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_RESP_REPORT_ID &&
-			data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
-			data[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY)
+	if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_BL_RESP_REPORT_ID &&
+			data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+			data[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY)
 		return true;
 
 	return false;
 }
 
-static bool cyapa_gen5_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
 		u8 *data, int len)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int resp_len;
 
-	if (!data || len < GEN5_MIN_APP_RESP_LENGTH)
+	if (!data || len < PIP_MIN_APP_RESP_LENGTH)
 		return false;
 
-	if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID &&
-			data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) {
-		resp_len = get_unaligned_le16(&data[GEN5_RESP_LENGTH_OFFSET]);
-		if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) == 0x00 &&
-			resp_len == GEN5_UNSUPPORTED_CMD_RESP_LENGTH &&
-			data[5] == gen5_pip->in_progress_cmd) {
+	if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID &&
+			data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) {
+		resp_len = get_unaligned_le16(&data[PIP_RESP_LENGTH_OFFSET]);
+		if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) == 0x00 &&
+			resp_len == PIP_UNSUPPORTED_CMD_RESP_LENGTH &&
+			data[5] == pip->in_progress_cmd) {
 			/* Unsupported command code */
 			return false;
-		} else if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) ==
-				gen5_pip->in_progress_cmd) {
+		} else if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) ==
+				pip->in_progress_cmd) {
 			/* Correct command response received */
 			return true;
 		}
@@ -702,10 +662,10 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_application_launch_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_application_launch_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
-	if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+	if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
 		return false;
 
 	/*
@@ -718,25 +678,25 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_hid_descriptor_data(struct cyapa *cyapa,
+static bool cyapa_sort_gen5_hid_descriptor_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
 	int resp_len;
 	int max_output_len;
 
 	/* Check hid descriptor. */
-	if (len != GEN5_HID_DESCRIPTOR_SIZE)
+	if (len != PIP_HID_DESCRIPTOR_SIZE)
 		return false;
 
-	resp_len = get_unaligned_le16(&buf[GEN5_RESP_LENGTH_OFFSET]);
+	resp_len = get_unaligned_le16(&buf[PIP_RESP_LENGTH_OFFSET]);
 	max_output_len = get_unaligned_le16(&buf[16]);
-	if (resp_len == GEN5_HID_DESCRIPTOR_SIZE) {
-		if (buf[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_HID_REPORT_ID &&
+	if (resp_len == PIP_HID_DESCRIPTOR_SIZE) {
+		if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID &&
 				max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 			/* BL mode HID Descriptor */
 			return true;
-		} else if ((buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+		} else if ((buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 				max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 			/* APP mode HID Descriptor */
 			return true;
@@ -746,21 +706,21 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_deep_sleep_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_deep_sleep_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
-	if (len == GEN5_DEEP_SLEEP_RESP_LENGTH &&
-		buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-			GEN5_APP_DEEP_SLEEP_REPORT_ID &&
-		(buf[4] & GEN5_DEEP_SLEEP_OPCODE_MASK) ==
-			GEN5_DEEP_SLEEP_OPCODE)
+	if (len == PIP_DEEP_SLEEP_RESP_LENGTH &&
+		buf[PIP_RESP_REPORT_ID_OFFSET] ==
+			PIP_APP_DEEP_SLEEP_REPORT_ID &&
+		(buf[4] & PIP_DEEP_SLEEP_OPCODE_MASK) ==
+			PIP_DEEP_SLEEP_OPCODE)
 		return true;
 	return false;
 }
 
 static int gen5_idle_state_parse(struct cyapa *cyapa)
 {
-	u8 resp_data[GEN5_HID_DESCRIPTOR_SIZE];
+	u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
 	int max_output_len;
 	int length;
 	u8 cmd[2];
@@ -778,9 +738,9 @@
 	if (ret != 3)
 		return ret < 0 ? ret : -EIO;
 
-	length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
-	if (length == GEN5_RESP_LENGTH_SIZE) {
-		/* Normal state of Gen5 with no data to respose */
+	length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
+	if (length == PIP_RESP_LENGTH_SIZE) {
+		/* Normal state of Gen5 with no data to response */
 		cyapa->gen = CYAPA_GEN5;
 
 		cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -788,30 +748,30 @@
 		/* Read description from trackpad device */
 		cmd[0] = 0x01;
 		cmd[1] = 0x00;
-		length = GEN5_HID_DESCRIPTOR_SIZE;
+		length = PIP_HID_DESCRIPTOR_SIZE;
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-				cmd, GEN5_RESP_LENGTH_SIZE,
+				cmd, PIP_RESP_LENGTH_SIZE,
 				resp_data, &length,
 				300,
-				cyapa_gen5_sort_hid_descriptor_data,
+				cyapa_sort_gen5_hid_descriptor_data,
 				false);
 		if (error)
 			return error;
 
 		length = get_unaligned_le16(
-				&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+				&resp_data[PIP_RESP_LENGTH_OFFSET]);
 		max_output_len = get_unaligned_le16(&resp_data[16]);
-		if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
-				length == GEN5_RESP_LENGTH_SIZE) &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_HID_REPORT_ID) &&
+		if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+				length == PIP_RESP_LENGTH_SIZE) &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_BL_REPORT_ID) &&
 			max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 			/* BL mode HID Description read */
 			cyapa->state = CYAPA_STATE_GEN5_BL;
-		} else if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
-				length == GEN5_RESP_LENGTH_SIZE) &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+		} else if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+				length == PIP_RESP_LENGTH_SIZE) &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 			max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 			/* APP mode HID Description read */
 			cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -839,14 +799,14 @@
 	 * or report any touch or button data.
 	 */
 	ret = cyapa_i2c_pip_read(cyapa, resp_data,
-			GEN5_HID_DESCRIPTOR_SIZE);
-	if (ret != GEN5_HID_DESCRIPTOR_SIZE)
+			PIP_HID_DESCRIPTOR_SIZE);
+	if (ret != PIP_HID_DESCRIPTOR_SIZE)
 		return ret < 0 ? ret : -EIO;
-	length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+	length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
 	max_output_len = get_unaligned_le16(&resp_data[16]);
-	if (length == GEN5_RESP_LENGTH_SIZE) {
-		if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_HID_REPORT_ID) {
+	if (length == PIP_RESP_LENGTH_SIZE) {
+		if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_BL_REPORT_ID) {
 			/*
 			 * BL mode HID Description has been previously
 			 * read out.
@@ -861,15 +821,15 @@
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_APP;
 		}
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			resp_data[2] == GEN5_BL_HID_REPORT_ID &&
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			resp_data[2] == PIP_HID_BL_REPORT_ID &&
 			max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 		/* BL mode HID Description read. */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 			max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 		/* APP mode HID Description read. */
 		cyapa->gen = CYAPA_GEN5;
@@ -886,22 +846,22 @@
 {
 	int length;
 
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	switch (reg_data[GEN5_RESP_REPORT_ID_OFFSET]) {
-	case GEN5_TOUCH_REPORT_ID:
-		if (length < GEN5_TOUCH_REPORT_HEAD_SIZE ||
-			length > GEN5_TOUCH_REPORT_MAX_SIZE)
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	switch (reg_data[PIP_RESP_REPORT_ID_OFFSET]) {
+	case PIP_TOUCH_REPORT_ID:
+		if (length < PIP_TOUCH_REPORT_HEAD_SIZE ||
+			length > PIP_TOUCH_REPORT_MAX_SIZE)
 			return -EINVAL;
 		break;
-	case GEN5_BTN_REPORT_ID:
+	case PIP_BTN_REPORT_ID:
 	case GEN5_OLD_PUSH_BTN_REPORT_ID:
-	case GEN5_PUSH_BTN_REPORT_ID:
-		if (length < GEN5_BTN_REPORT_HEAD_SIZE ||
-			length > GEN5_BTN_REPORT_MAX_SIZE)
+	case PIP_PUSH_BTN_REPORT_ID:
+		if (length < PIP_BTN_REPORT_HEAD_SIZE ||
+			length > PIP_BTN_REPORT_MAX_SIZE)
 			return -EINVAL;
 		break;
-	case GEN5_WAKEUP_EVENT_REPORT_ID:
-		if (length != GEN5_WAKEUP_EVENT_SIZE)
+	case PIP_WAKEUP_EVENT_REPORT_ID:
+		if (length != PIP_WAKEUP_EVENT_SIZE)
 			return -EINVAL;
 		break;
 	default:
@@ -915,7 +875,7 @@
 
 static int gen5_cmd_resp_header_parse(struct cyapa *cyapa, u8 *reg_data)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 	int ret;
 
@@ -924,15 +884,15 @@
 	 * otherwise Gen5 trackpad cannot response next command
 	 * or report any touch or button data.
 	 */
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	ret = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	ret = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
 	if (ret != length)
 		return ret < 0 ? ret : -EIO;
 
-	if (length == GEN5_RESP_LENGTH_SIZE) {
+	if (length == PIP_RESP_LENGTH_SIZE) {
 		/* Previous command has read the data through out. */
-		if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID) {
+		if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID) {
 			/* Gen5 BL command response data detected */
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_BL;
@@ -941,21 +901,21 @@
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_APP;
 		}
-	} else if ((gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID) &&
-			(gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
-				GEN5_RESP_RSVD_KEY) &&
-			(gen5_pip->empty_buf[GEN5_RESP_BL_SOP_OFFSET] ==
-				GEN5_SOP_KEY) &&
-			(gen5_pip->empty_buf[length - 1] ==
-				GEN5_EOP_KEY)) {
+	} else if ((pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID) &&
+			(pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+				PIP_RESP_RSVD_KEY) &&
+			(pip->empty_buf[PIP_RESP_BL_SOP_OFFSET] ==
+				PIP_SOP_KEY) &&
+			(pip->empty_buf[length - 1] ==
+				PIP_EOP_KEY)) {
 		/* Gen5 BL command response data detected */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_RESP_REPORT_ID &&
-			gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
-				GEN5_RESP_RSVD_KEY) {
+	} else if (pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_APP_RESP_REPORT_ID &&
+			pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+				PIP_RESP_RSVD_KEY) {
 		/* Gen5 APP command response data detected */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -977,12 +937,12 @@
 	cyapa->state = CYAPA_STATE_NO_DEVICE;
 
 	/* Parse based on Gen5 characteristic registers and bits */
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	if (length == 0 || length == GEN5_RESP_LENGTH_SIZE) {
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	if (length == 0 || length == PIP_RESP_LENGTH_SIZE) {
 		gen5_idle_state_parse(cyapa);
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			(reg_data[2] == GEN5_BL_HID_REPORT_ID ||
-				reg_data[2] == GEN5_APP_HID_REPORT_ID)) {
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			(reg_data[2] == PIP_HID_BL_REPORT_ID ||
+				reg_data[2] == PIP_HID_APP_REPORT_ID)) {
 		gen5_hid_description_header_parse(cyapa, reg_data);
 	} else if ((length == GEN5_APP_REPORT_DESCRIPTOR_SIZE ||
 			length == GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE) &&
@@ -992,17 +952,17 @@
 		cyapa->state = CYAPA_STATE_GEN5_APP;
 	} else if (length == GEN5_BL_REPORT_DESCRIPTOR_SIZE &&
 			reg_data[2] == GEN5_BL_REPORT_DESCRIPTOR_ID) {
-		/* 0x1D 0x00 0xFE is Gen5 BL report descriptior header. */
+		/* 0x1D 0x00 0xFE is Gen5 BL report descriptor header. */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (reg_data[2] == GEN5_TOUCH_REPORT_ID ||
-			reg_data[2] == GEN5_BTN_REPORT_ID ||
+	} else if (reg_data[2] == PIP_TOUCH_REPORT_ID ||
+			reg_data[2] == PIP_BTN_REPORT_ID ||
 			reg_data[2] == GEN5_OLD_PUSH_BTN_REPORT_ID ||
-			reg_data[2] == GEN5_PUSH_BTN_REPORT_ID ||
-			reg_data[2] == GEN5_WAKEUP_EVENT_REPORT_ID) {
+			reg_data[2] == PIP_PUSH_BTN_REPORT_ID ||
+			reg_data[2] == PIP_WAKEUP_EVENT_REPORT_ID) {
 		gen5_report_data_header_parse(cyapa, reg_data);
-	} else if (reg_data[2] == GEN5_BL_RESP_REPORT_ID ||
-			reg_data[2] == GEN5_APP_RESP_REPORT_ID) {
+	} else if (reg_data[2] == PIP_BL_RESP_REPORT_ID ||
+			reg_data[2] == PIP_APP_RESP_REPORT_ID) {
 		gen5_cmd_resp_header_parse(cyapa, reg_data);
 	}
 
@@ -1023,14 +983,25 @@
 	return -EAGAIN;
 }
 
-static int cyapa_gen5_bl_initiate(struct cyapa *cyapa,
-		const struct firmware *fw)
+static struct cyapa_tsg_bin_image_data_record *
+cyapa_get_image_record_data_num(const struct firmware *fw,
+		int *record_num)
 {
-	struct cyapa_tsg_bin_image *image;
-	struct gen5_bl_cmd_head *bl_cmd_head;
-	struct gen5_bl_packet_start *bl_packet_start;
-	struct gen5_bl_initiate_cmd_data *cmd_data;
-	struct gen5_bl_packet_end *bl_packet_end;
+	int head_size;
+
+	head_size = fw->data[0] + 1;
+	*record_num = (fw->size - head_size) /
+			sizeof(struct cyapa_tsg_bin_image_data_record);
+	return (struct cyapa_tsg_bin_image_data_record *)&fw->data[head_size];
+}
+
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw)
+{
+	struct cyapa_tsg_bin_image_data_record *image_records;
+	struct pip_bl_cmd_head *bl_cmd_head;
+	struct pip_bl_packet_start *bl_packet_start;
+	struct pip_bl_initiate_cmd_data *cmd_data;
+	struct pip_bl_packet_end *bl_packet_end;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
 	u16 cmd_data_len;
@@ -1046,30 +1017,28 @@
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+	bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
 	cmd_data_len = CYAPA_TSG_BL_KEY_SIZE + CYAPA_TSG_FLASH_MAP_BLOCK_SIZE;
-	cmd_len = sizeof(struct gen5_bl_cmd_head) + cmd_data_len +
-		  sizeof(struct gen5_bl_packet_end);
+	cmd_len = sizeof(struct pip_bl_cmd_head) + cmd_data_len +
+		  sizeof(struct pip_bl_packet_end);
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
 	put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
-	bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
+	bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
 
 	bl_packet_start = &bl_cmd_head->packet_start;
-	bl_packet_start->sop = GEN5_SOP_KEY;
-	bl_packet_start->cmd_code = GEN5_BL_CMD_INITIATE_BL;
+	bl_packet_start->sop = PIP_SOP_KEY;
+	bl_packet_start->cmd_code = PIP_BL_CMD_INITIATE_BL;
 	/* 8 key bytes and 128 bytes block size */
 	put_unaligned_le16(cmd_data_len, &bl_packet_start->data_length);
 
-	cmd_data = (struct gen5_bl_initiate_cmd_data *)bl_cmd_head->data;
-	memcpy(cmd_data->key, cyapa_gen5_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
+	cmd_data = (struct pip_bl_initiate_cmd_data *)bl_cmd_head->data;
+	memcpy(cmd_data->key, cyapa_pip_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
 
-	/* Copy 60 bytes Meta Data Row Parameters */
-	image = (struct cyapa_tsg_bin_image *)fw->data;
-	records_num = (fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
-				sizeof(struct cyapa_tsg_bin_image_data_record);
+	image_records = cyapa_get_image_record_data_num(fw, &records_num);
+
 	/* APP_INTEGRITY row is always the last row block */
-	data = image->records[records_num - 1].record_data;
+	data = image_records[records_num - 1].record_data;
 	memcpy(cmd_data->metadata_raw_parameter, data,
 		CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
 
@@ -1077,47 +1046,47 @@
 				CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
 	put_unaligned_le16(meta_data_crc, &cmd_data->metadata_crc);
 
-	bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+	bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
 				cmd_data_len);
 	cmd_crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
-		sizeof(struct gen5_bl_packet_start) + cmd_data_len);
+		sizeof(struct pip_bl_packet_start) + cmd_data_len);
 	put_unaligned_le16(cmd_crc, &bl_packet_end->crc);
-	bl_packet_end->eop = GEN5_EOP_KEY;
+	bl_packet_end->eop = PIP_EOP_KEY;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, cmd_len,
 			resp_data, &resp_len, 12000,
-			cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
-	if (error || resp_len != GEN5_BL_INITIATE_RESP_LEN ||
-			resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			cyapa_sort_tsg_pip_bl_resp_data, true);
+	if (error || resp_len != PIP_BL_INITIATE_RESP_LEN ||
+			resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EAGAIN;
 
 	return 0;
 }
 
-static bool cyapa_gen5_sort_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
+static bool cyapa_sort_pip_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
 {
-	if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+	if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
 		return false;
 
 	if (buf[0] == 0 && buf[1] == 0)
 		return true;
 
 	/* Exit bootloader failed for some reason. */
-	if (len == GEN5_BL_FAIL_EXIT_RESP_LEN &&
-			buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID &&
-			buf[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
-			buf[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY &&
-			buf[10] == GEN5_EOP_KEY)
+	if (len == PIP_BL_FAIL_EXIT_RESP_LEN &&
+			buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID &&
+			buf[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+			buf[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY &&
+			buf[10] == PIP_EOP_KEY)
 		return true;
 
 	return false;
 }
 
-static int cyapa_gen5_bl_exit(struct cyapa *cyapa)
+int cyapa_pip_bl_exit(struct cyapa *cyapa)
 {
 
 	u8 bl_gen5_bl_exit[] = { 0x04, 0x00,
@@ -1132,13 +1101,13 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			bl_gen5_bl_exit, sizeof(bl_gen5_bl_exit),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_bl_exit_data, false);
+			5000, cyapa_sort_pip_bl_exit_data, false);
 	if (error)
 		return error;
 
-	if (resp_len == GEN5_BL_FAIL_EXIT_RESP_LEN ||
-			resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID)
+	if (resp_len == PIP_BL_FAIL_EXIT_RESP_LEN ||
+			resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID)
 		return -EAGAIN;
 
 	if (resp_data[0] == 0x00 && resp_data[1] == 0x00)
@@ -1147,7 +1116,7 @@
 	return -ENODEV;
 }
 
-static int cyapa_gen5_bl_enter(struct cyapa *cyapa)
+int cyapa_pip_bl_enter(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2F, 0x00, 0x01 };
 	u8 resp_data[2];
@@ -1157,15 +1126,12 @@
 	error = cyapa_poll_state(cyapa, 500);
 	if (error < 0)
 		return error;
-	if (cyapa->gen != CYAPA_GEN5)
-		return -EINVAL;
 
-	/* Already in Gen5 BL. Skipping exit. */
-	if (cyapa->state == CYAPA_STATE_GEN5_BL)
+	/* Already in bootloader mode, Skipping exit. */
+	if (cyapa_is_pip_bl_mode(cyapa))
 		return 0;
-
-	if (cyapa->state != CYAPA_STATE_GEN5_APP)
-		return -EAGAIN;
+	else if (!cyapa_is_pip_app_mode(cyapa))
+		return -EINVAL;
 
 	/* Try to dump all buffered report data before any send command. */
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -1179,39 +1145,79 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_application_launch_data,
+			5000, cyapa_sort_pip_application_launch_data,
 			true);
 	if (error || resp_data[0] != 0x00 || resp_data[1] != 0x00)
 		return error < 0 ? error : -EAGAIN;
 
 	cyapa->operational = false;
-	cyapa->state = CYAPA_STATE_GEN5_BL;
+	if (cyapa->gen == CYAPA_GEN5)
+		cyapa->state = CYAPA_STATE_GEN5_BL;
+	else if (cyapa->gen == CYAPA_GEN6)
+		cyapa->state = CYAPA_STATE_GEN6_BL;
 	return 0;
 }
 
-static int cyapa_gen5_check_fw(struct cyapa *cyapa, const struct firmware *fw)
+static int cyapa_pip_fw_head_check(struct cyapa *cyapa,
+		struct cyapa_tsg_bin_image_head *image_head)
+{
+	if (image_head->head_size != 0x0C && image_head->head_size != 0x12)
+		return -EINVAL;
+
+	switch (cyapa->gen) {
+	case CYAPA_GEN6:
+		if (image_head->family_id != 0x9B ||
+		    image_head->silicon_id_hi != 0x0B)
+			return -EINVAL;
+		break;
+	case CYAPA_GEN5:
+		/* Gen5 without proximity support. */
+		if (cyapa->platform_ver < 2) {
+			if (image_head->head_size == 0x0C)
+				break;
+			return -EINVAL;
+		}
+
+		if (image_head->family_id != 0x91 ||
+		    image_head->silicon_id_hi != 0x02)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw)
 {
 	struct device *dev = &cyapa->client->dev;
-	const struct cyapa_tsg_bin_image *image = (const void *)fw->data;
+	struct cyapa_tsg_bin_image_data_record *image_records;
 	const struct cyapa_tsg_bin_image_data_record *app_integrity;
-	const struct gen5_bl_metadata_row_params *metadata;
-	size_t flash_records_count;
+	const struct tsg_bl_metadata_row_params *metadata;
+	int flash_records_count;
 	u32 fw_app_start, fw_upgrade_start;
 	u16 fw_app_len, fw_upgrade_len;
 	u16 app_crc;
 	u16 app_integrity_crc;
-	int record_index;
 	int i;
 
-	flash_records_count = (fw->size -
-			sizeof(struct cyapa_tsg_bin_image_head)) /
-			sizeof(struct cyapa_tsg_bin_image_data_record);
+	/* Verify the firmware image not miss-used for Gen5 and Gen6. */
+	if (cyapa_pip_fw_head_check(cyapa,
+		(struct cyapa_tsg_bin_image_head *)fw->data)) {
+		dev_err(dev, "%s: firmware image not match TP device.\n",
+			     __func__);
+		return -EINVAL;
+	}
+
+	image_records =
+		cyapa_get_image_record_data_num(fw, &flash_records_count);
 
 	/*
 	 * APP_INTEGRITY row is always the last row block,
 	 * and the row id must be 0x01ff.
 	 */
-	app_integrity = &image->records[flash_records_count - 1];
+	app_integrity = &image_records[flash_records_count - 1];
 
 	if (app_integrity->flash_array_id != 0x00 ||
 	    get_unaligned_be16(&app_integrity->row_number) != 0x01ff) {
@@ -1242,14 +1248,11 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Verify application image CRC
-	 */
-	record_index = fw_app_start / CYAPA_TSG_FW_ROW_SIZE -
-				CYAPA_TSG_IMG_START_ROW_NUM;
+	/* Verify application image CRC. */
 	app_crc = 0xffffU;
 	for (i = 0; i < fw_app_len / CYAPA_TSG_FW_ROW_SIZE; i++) {
-		const u8 *data = image->records[record_index + i].record_data;
+		const u8 *data = image_records[i].record_data;
+
 		app_crc = crc_itu_t(app_crc, data, CYAPA_TSG_FW_ROW_SIZE);
 	}
 
@@ -1261,13 +1264,13 @@
 	return 0;
 }
 
-static int cyapa_gen5_write_fw_block(struct cyapa *cyapa,
+static int cyapa_pip_write_fw_block(struct cyapa *cyapa,
 		struct cyapa_tsg_bin_image_data_record *flash_record)
 {
-	struct gen5_bl_cmd_head *bl_cmd_head;
-	struct gen5_bl_packet_start *bl_packet_start;
-	struct gen5_bl_flash_row_head *flash_row_head;
-	struct gen5_bl_packet_end *bl_packet_end;
+	struct pip_bl_cmd_head *bl_cmd_head;
+	struct pip_bl_packet_start *bl_packet_start;
+	struct tsg_bl_flash_row_head *flash_row_head;
+	struct pip_bl_packet_end *bl_packet_end;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	u16 cmd_len;
 	u8 flash_array_id;
@@ -1286,71 +1289,68 @@
 	record_data = flash_record->record_data;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+	bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
 	bl_packet_start = &bl_cmd_head->packet_start;
-	cmd_len = sizeof(struct gen5_bl_cmd_head) +
-		  sizeof(struct gen5_bl_flash_row_head) +
+	cmd_len = sizeof(struct pip_bl_cmd_head) +
+		  sizeof(struct tsg_bl_flash_row_head) +
 		  CYAPA_TSG_FLASH_MAP_BLOCK_SIZE +
-		  sizeof(struct gen5_bl_packet_end);
+		  sizeof(struct pip_bl_packet_end);
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
 	/* Don't include 2 bytes register address */
 	put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
-	bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
-	bl_packet_start->sop = GEN5_SOP_KEY;
-	bl_packet_start->cmd_code = GEN5_BL_CMD_PROGRAM_VERIFY_ROW;
+	bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
+	bl_packet_start->sop = PIP_SOP_KEY;
+	bl_packet_start->cmd_code = PIP_BL_CMD_PROGRAM_VERIFY_ROW;
 
 	/* 1 (Flash Array ID) + 2 (Flash Row ID) + 128 (flash data) */
-	data_len = sizeof(struct gen5_bl_flash_row_head) + record_len;
+	data_len = sizeof(struct tsg_bl_flash_row_head) + record_len;
 	put_unaligned_le16(data_len, &bl_packet_start->data_length);
 
-	flash_row_head = (struct gen5_bl_flash_row_head *)bl_cmd_head->data;
+	flash_row_head = (struct tsg_bl_flash_row_head *)bl_cmd_head->data;
 	flash_row_head->flash_array_id = flash_array_id;
 	put_unaligned_le16(flash_row_id, &flash_row_head->flash_row_id);
 	memcpy(flash_row_head->flash_data, record_data, record_len);
 
-	bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+	bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
 						      data_len);
 	crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
-		sizeof(struct gen5_bl_packet_start) + data_len);
+		sizeof(struct pip_bl_packet_start) + data_len);
 	put_unaligned_le16(crc, &bl_packet_end->crc);
-	bl_packet_end->eop = GEN5_EOP_KEY;
+	bl_packet_end->eop = PIP_EOP_KEY;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
-	if (error || resp_len != GEN5_BL_BLOCK_WRITE_RESP_LEN ||
-			resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			500, cyapa_sort_tsg_pip_bl_resp_data, true);
+	if (error || resp_len != PIP_BL_BLOCK_WRITE_RESP_LEN ||
+			resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EAGAIN;
 
 	return 0;
 }
 
-static int cyapa_gen5_do_fw_update(struct cyapa *cyapa,
+int cyapa_pip_do_fw_update(struct cyapa *cyapa,
 		const struct firmware *fw)
 {
 	struct device *dev = &cyapa->client->dev;
-	struct cyapa_tsg_bin_image_data_record *flash_record;
-	struct cyapa_tsg_bin_image *image =
-		(struct cyapa_tsg_bin_image *)fw->data;
+	struct cyapa_tsg_bin_image_data_record *image_records;
 	int flash_records_count;
 	int i;
 	int error;
 
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
-	flash_records_count =
-		(fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
-			sizeof(struct cyapa_tsg_bin_image_data_record);
+	image_records =
+		cyapa_get_image_record_data_num(fw, &flash_records_count);
+
 	/*
 	 * The last flash row 0x01ff has been written through bl_initiate
 	 * command, so DO NOT write flash 0x01ff to trackpad device.
 	 */
 	for (i = 0; i < (flash_records_count - 1); i++) {
-		flash_record = &image->records[i];
-		error = cyapa_gen5_write_fw_block(cyapa, flash_record);
+		error = cyapa_pip_write_fw_block(cyapa, &image_records[i]);
 		if (error) {
 			dev_err(dev, "%s: Gen5 FW update aborted: %d\n",
 				__func__, error);
@@ -1372,9 +1372,9 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x08) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EINVAL;
 
 	return 0;
@@ -1383,7 +1383,7 @@
 static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
 		u8 parameter_id, u16 interval_time)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_app_set_parameter_data *parameter_data;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
@@ -1393,10 +1393,10 @@
 	int error;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 	parameter_data = (struct gen5_app_set_parameter_data *)
 			 app_cmd_head->parameter_data;
-	cmd_len = sizeof(struct gen5_app_cmd_head) +
+	cmd_len = sizeof(struct pip_app_cmd_head) +
 		  sizeof(struct gen5_app_set_parameter_data);
 
 	switch (parameter_id) {
@@ -1413,14 +1413,14 @@
 		return -EINVAL;
 	}
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	/*
 	 * Don't include unused parameter value bytes and
 	 * 2 bytes register address.
 	 */
 	put_unaligned_le16(cmd_len - (4 - parameter_size) - 2,
 			   &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
 	parameter_data->parameter_id = parameter_id;
 	parameter_data->parameter_size = parameter_size;
@@ -1428,7 +1428,7 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != parameter_id ||
 		resp_data[6] != parameter_size ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER))
@@ -1440,7 +1440,7 @@
 static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
 		u8 parameter_id, u16 *interval_time)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_app_get_parameter_data *parameter_data;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
@@ -1451,10 +1451,10 @@
 	int error;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 	parameter_data = (struct gen5_app_get_parameter_data *)
 			 app_cmd_head->parameter_data;
-	cmd_len = sizeof(struct gen5_app_cmd_head) +
+	cmd_len = sizeof(struct pip_app_cmd_head) +
 		  sizeof(struct gen5_app_get_parameter_data);
 
 	*interval_time = 0;
@@ -1472,17 +1472,17 @@
 		return -EINVAL;
 	}
 
-	put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	/* Don't include 2 bytes register address */
 	put_unaligned_le16(cmd_len - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_GET_PARAMETER;
 	parameter_data->parameter_id = parameter_id;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != parameter_id || resp_data[6] == 0 ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_GET_PARAMETER))
 		return error < 0 ? error : -EINVAL;
@@ -1497,18 +1497,18 @@
 
 static int cyapa_gen5_disable_pip_report(struct cyapa *cyapa)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[10];
 	u8 resp_data[7];
 	int resp_len;
 	int error;
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 
-	put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
 	app_cmd_head->parameter_data[0] = GEN5_PARAMETER_DISABLE_PIP_REPORT;
 	app_cmd_head->parameter_data[1] = 0x01;
@@ -1516,7 +1516,7 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != GEN5_PARAMETER_DISABLE_PIP_REPORT ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER) ||
 		resp_data[6] != 0x01)
@@ -1525,26 +1525,48 @@
 	return 0;
 }
 
-static int cyapa_gen5_deep_sleep(struct cyapa *cyapa, u8 state)
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, PIP_SET_PROXIMITY,
+		     (u8)!!enable
+	};
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_SET_PROXIMITY) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data)) {
+		error = (error == -ETIMEDOUT) ? -EOPNOTSUPP : error;
+		return error < 0 ? error : -EINVAL;
+	}
+
+	return 0;
+}
+
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state)
 {
 	u8 cmd[] = { 0x05, 0x00, 0x00, 0x08};
 	u8 resp_data[5];
 	int resp_len;
 	int error;
 
-	cmd[2] = state & GEN5_DEEP_SLEEP_STATE_MASK;
+	cmd[2] = state & PIP_DEEP_SLEEP_STATE_MASK;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_deep_sleep_data, false);
-	if (error || ((resp_data[3] & GEN5_DEEP_SLEEP_STATE_MASK) != state))
+			500, cyapa_sort_pip_deep_sleep_data, false);
+	if (error || ((resp_data[3] & PIP_DEEP_SLEEP_STATE_MASK) != state))
 		return -EINVAL;
 
 	return 0;
 }
 
 static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
-		u8 power_mode, u16 sleep_time)
+		u8 power_mode, u16 sleep_time, bool is_suspend)
 {
 	struct device *dev = &cyapa->client->dev;
 	u8 power_state;
@@ -1553,43 +1575,40 @@
 	if (cyapa->state != CYAPA_STATE_GEN5_APP)
 		return 0;
 
-	/* Dump all the report data before do power mode commmands. */
-	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
-
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
 		/*
 		 * Assume TP in deep sleep mode when driver is loaded,
 		 * avoid driver unload and reload command IO issue caused by TP
 		 * has been set into deep sleep mode when unloading.
 		 */
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
 	}
 
-	if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) &&
-			GEN5_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+	if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+			PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
 		if (cyapa_gen5_get_interval_time(cyapa,
 				GEN5_PARAMETER_LP_INTRVL_ID,
 				&cyapa->dev_sleep_time) != 0)
-			GEN5_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+			PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
 
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
 		if (power_mode == PWR_MODE_OFF ||
 			power_mode == PWR_MODE_FULL_ACTIVE ||
 			power_mode == PWR_MODE_BTN_ONLY ||
-			GEN5_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+			PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
 			/* Has in correct power mode state, early return. */
 			return 0;
 		}
 	}
 
 	if (power_mode == PWR_MODE_OFF) {
-		error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_OFF);
+		error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
 		if (error) {
 			dev_err(dev, "enter deep sleep fail: %d\n", error);
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
 		return 0;
 	}
 
@@ -1598,8 +1617,8 @@
 	 * state directly, must be wake up from sleep firstly, then
 	 * continue to do next power sate change.
 	 */
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
-		error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_ON);
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+		error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
 		if (error) {
 			dev_err(dev, "deep sleep wake fail: %d\n", error);
 			return error;
@@ -1614,7 +1633,7 @@
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
 	} else if (power_mode == PWR_MODE_BTN_ONLY) {
 		error = cyapa_gen5_change_power_state(cyapa,
 				GEN5_POWER_STATE_BTN_ONLY);
@@ -1623,19 +1642,19 @@
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
 	} else {
 		/*
 		 * Continue to change power mode even failed to set
 		 * interval time, it won't affect the power mode change.
 		 * except the sleep interval time is not correct.
 		 */
-		if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) ||
-				sleep_time != GEN5_DEV_GET_SLEEP_TIME(cyapa))
+		if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) ||
+				sleep_time != PIP_DEV_GET_SLEEP_TIME(cyapa))
 			if (cyapa_gen5_set_interval_time(cyapa,
 					GEN5_PARAMETER_LP_INTRVL_ID,
 					sleep_time) == 0)
-				GEN5_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+				PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
 
 		if (sleep_time <= GEN5_POWER_READY_MAX_INTRVL_TIME)
 			power_state = GEN5_POWER_STATE_READY;
@@ -1658,17 +1677,17 @@
 		 * is suspending which may cause interrupt line unable to be
 		 * asserted again.
 		 */
-		cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
-		cyapa_gen5_disable_pip_report(cyapa);
+		if (is_suspend)
+			cyapa_gen5_disable_pip_report(cyapa);
 
-		GEN5_DEV_SET_PWR_STATE(cyapa,
+		PIP_DEV_SET_PWR_STATE(cyapa,
 			cyapa_sleep_time_to_pwr_cmd(sleep_time));
 	}
 
 	return 0;
 }
 
-static int cyapa_gen5_resume_scanning(struct cyapa *cyapa)
+int cyapa_pip_resume_scanning(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x04 };
 	u8 resp_data[6];
@@ -1682,7 +1701,7 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x04))
 		return -EINVAL;
 
@@ -1692,7 +1711,7 @@
 	return 0;
 }
 
-static int cyapa_gen5_suspend_scanning(struct cyapa *cyapa)
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x03 };
 	u8 resp_data[6];
@@ -1706,7 +1725,7 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x03))
 		return -EINVAL;
 
@@ -1716,10 +1735,10 @@
 	return 0;
 }
 
-static int cyapa_gen5_calibrate_pwcs(struct cyapa *cyapa,
+static int cyapa_pip_calibrate_pwcs(struct cyapa *cyapa,
 		u8 calibrate_sensing_mode_type)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[8];
 	u8 resp_data[6];
 	int resp_len;
@@ -1729,25 +1748,25 @@
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
-	app_cmd_head->cmd_code = GEN5_CMD_CALIBRATE;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
+	app_cmd_head->cmd_code = PIP_CMD_CALIBRATE;
 	app_cmd_head->parameter_data[0] = calibrate_sensing_mode_type;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
-	if (error || !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_CALIBRATE) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			5000, cyapa_sort_tsg_pip_app_resp_data, true);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_CMD_CALIBRATE) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EAGAIN;
 
 	return 0;
 }
 
-static ssize_t cyapa_gen5_do_calibrate(struct device *dev,
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
@@ -1755,25 +1774,25 @@
 	int error, calibrate_error;
 
 	/* 1. Suspend Scanning*/
-	error = cyapa_gen5_suspend_scanning(cyapa);
+	error = cyapa_pip_suspend_scanning(cyapa);
 	if (error)
 		return error;
 
 	/* 2. Do mutual capacitance fine calibrate. */
-	calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
-				CYAPA_SENSING_MODE_MUTUAL_CAP_FINE);
+	calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+				PIP_SENSING_MODE_MUTUAL_CAP_FINE);
 	if (calibrate_error)
 		goto resume_scanning;
 
 	/* 3. Do self capacitance calibrate. */
-	calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
-				CYAPA_SENSING_MODE_SELF_CAP);
+	calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+				PIP_SENSING_MODE_SELF_CAP);
 	if (calibrate_error)
 		goto resume_scanning;
 
 resume_scanning:
 	/* 4. Resume Scanning*/
-	error = cyapa_gen5_resume_scanning(cyapa);
+	error = cyapa_pip_resume_scanning(cyapa);
 	if (error || calibrate_error)
 		return error ? error : calibrate_error;
 
@@ -1856,7 +1875,7 @@
  * If the input value of @data_size is not 0, than means read the mutual or
  * self local PWC data. The @idac_max, @idac_min and @idac_ave are used to
  * return the max, min and average value of the mutual or self local PWC data.
- * Note, in order to raed mutual local PWC data, must read invoke this function
+ * Note, in order to read mutual local PWC data, must read invoke this function
  * to read the mutual global idac data firstly to set the correct Rx number
  * value, otherwise, the read mutual idac and PWC data may not correct.
  */
@@ -1864,7 +1883,7 @@
 		u8 cmd_code, u8 idac_data_type, int *data_size,
 		int *idac_max, int *idac_min, int *idac_ave)
 {
-	struct gen5_app_cmd_head *cmd_head;
+	struct pip_app_cmd_head *cmd_head;
 	u8 cmd[12];
 	u8 resp_data[256];
 	int resp_len;
@@ -1879,7 +1898,7 @@
 	int i;
 	int error;
 
-	if (cmd_code != GEN5_CMD_RETRIEVE_DATA_STRUCTURE ||
+	if (cmd_code != PIP_RETRIEVE_DATA_STRUCTURE ||
 		(idac_data_type != GEN5_RETRIEVE_MUTUAL_PWC_DATA &&
 		idac_data_type != GEN5_RETRIEVE_SELF_CAP_PWC_DATA) ||
 		!data_size || !idac_max || !idac_min || !idac_ave)
@@ -1935,10 +1954,10 @@
 	}
 
 	memset(cmd, 0, sizeof(cmd));
-	cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &cmd_head->addr);
+	cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &cmd_head->length);
-	cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	cmd_head->cmd_code = cmd_code;
 	do {
 		read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) /
@@ -1953,11 +1972,11 @@
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 				cmd, sizeof(cmd),
 				resp_data, &resp_len,
-				500, cyapa_gen5_sort_tsg_pip_app_resp_data,
+				500, cyapa_sort_tsg_pip_app_resp_data,
 				true);
 		if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
 				!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
-				!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+				!PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
 				resp_data[6] != idac_data_type)
 			return (error < 0) ? error : -EAGAIN;
 		read_len = get_unaligned_le16(&resp_data[7]);
@@ -1997,7 +2016,7 @@
 				tmp_count < cyapa->aligned_electrodes_rx &&
 				read_global_idac) {
 				/*
-				 * The value gap betwen global and local mutual
+				 * The value gap between global and local mutual
 				 * idac data must bigger than 50%.
 				 * Normally, global value bigger than 50,
 				 * local values less than 10.
@@ -2061,7 +2080,7 @@
 
 	data_size = 0;
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_MUTUAL_PWC_DATA,
 		&data_size,
 		gidac_mutual_max, gidac_mutual_min, gidac_mutual_ave);
@@ -2069,7 +2088,7 @@
 		return error;
 
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_MUTUAL_PWC_DATA,
 		&data_size,
 		lidac_mutual_max, lidac_mutual_min, lidac_mutual_ave);
@@ -2088,7 +2107,7 @@
 
 	data_size = 0;
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
 		&data_size,
 		lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2098,7 +2117,7 @@
 	*gidac_self_tx = *lidac_self_min;
 
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
 		&data_size,
 		lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2107,27 +2126,27 @@
 
 static ssize_t cyapa_gen5_execute_panel_scan(struct cyapa *cyapa)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[7];
 	u8 resp_data[6];
 	int resp_len;
 	int error;
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_EXECUTE_PANEL_SCAN;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || resp_len != sizeof(resp_data) ||
 			!VALID_CMD_RESP_HEADER(resp_data,
 				GEN5_CMD_EXECUTE_PANEL_SCAN) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EAGAIN;
 
 	return 0;
@@ -2138,7 +2157,7 @@
 		int *raw_data_max, int *raw_data_min, int *raw_data_ave,
 		u8 *buffer)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_retrieve_panel_scan_data *panel_sacn_data;
 	u8 cmd[12];
 	u8 resp_data[256];  /* Max bytes can transfer one time. */
@@ -2166,10 +2185,10 @@
 	/* Assume max element size is 4 currently. */
 	read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) / 4;
 	read_len = read_elements * 4;
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = cmd_code;
 	panel_sacn_data = (struct gen5_retrieve_panel_scan_data *)
 			app_cmd_head->parameter_data;
@@ -2183,10 +2202,10 @@
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 		if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
 				!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
-				!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+				!PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
 				resp_data[6] != raw_data_type)
 			return error ? error : -EAGAIN;
 
@@ -2245,11 +2264,11 @@
 	int error, resume_error;
 	int size;
 
-	if (cyapa->state != CYAPA_STATE_GEN5_APP)
+	if (!cyapa_is_pip_app_mode(cyapa))
 		return -EBUSY;
 
 	/* 1. Suspend Scanning*/
-	error = cyapa_gen5_suspend_scanning(cyapa);
+	error = cyapa_pip_suspend_scanning(cyapa);
 	if (error)
 		return error;
 
@@ -2270,7 +2289,7 @@
 	if (error)
 		goto resume_scanning;
 
-	/* 4. Execuate panel scan. It must be executed before read data. */
+	/* 4. Execute panel scan. It must be executed before read data. */
 	error = cyapa_gen5_execute_panel_scan(cyapa);
 	if (error)
 		goto resume_scanning;
@@ -2343,7 +2362,7 @@
 
 resume_scanning:
 	/* 11. Resume Scanning*/
-	resume_error = cyapa_gen5_resume_scanning(cyapa);
+	resume_error = cyapa_pip_resume_scanning(cyapa);
 	if (resume_error || error)
 		return resume_error ? resume_error : error;
 
@@ -2364,7 +2383,7 @@
 	return size;
 }
 
-static bool cyapa_gen5_sort_system_info_data(struct cyapa *cyapa,
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
 	/* Check the report id and command code */
@@ -2376,20 +2395,17 @@
 
 static int cyapa_gen5_bl_query_data(struct cyapa *cyapa)
 {
-	u8 bl_query_data_cmd[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
-		0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
-	};
-	u8 resp_data[GEN5_BL_READ_APP_INFO_RESP_LEN];
+	u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
 	int resp_len;
 	int error;
 
-	resp_len = GEN5_BL_READ_APP_INFO_RESP_LEN;
+	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-			bl_query_data_cmd, sizeof(bl_query_data_cmd),
+			pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_bl_resp_data, false);
-	if (error || resp_len != GEN5_BL_READ_APP_INFO_RESP_LEN ||
-		!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			500, cyapa_sort_tsg_pip_bl_resp_data, false);
+	if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+		!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EIO;
 
 	memcpy(&cyapa->product_id[0], &resp_data[8], 5);
@@ -2402,34 +2418,42 @@
 	cyapa->fw_maj_ver = resp_data[22];
 	cyapa->fw_min_ver = resp_data[23];
 
+	cyapa->platform_ver = (resp_data[26] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+
 	return 0;
 }
 
 static int cyapa_gen5_get_query_data(struct cyapa *cyapa)
 {
-	u8 get_system_information[] = {
-		0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02
-	};
-	u8 resp_data[71];
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
 	int resp_len;
 	u16 product_family;
 	int error;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-			get_system_information, sizeof(get_system_information),
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
 			resp_data, &resp_len,
-			2000, cyapa_gen5_sort_system_info_data, false);
+			2000, cyapa_pip_sort_system_info_data, false);
 	if (error || resp_len < sizeof(resp_data))
 		return error ? error : -EIO;
 
 	product_family = get_unaligned_le16(&resp_data[7]);
-	if ((product_family & GEN5_PRODUCT_FAMILY_MASK) !=
-		GEN5_PRODUCT_FAMILY_TRACKPAD)
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
 		return -EINVAL;
 
-	cyapa->fw_maj_ver = resp_data[15];
-	cyapa->fw_min_ver = resp_data[16];
+	cyapa->platform_ver = (resp_data[49] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+	if (cyapa->gen == CYAPA_GEN5 && cyapa->platform_ver < 2) {
+		/* Gen5 firmware that does not support proximity. */
+		cyapa->fw_maj_ver = resp_data[15];
+		cyapa->fw_min_ver = resp_data[16];
+	} else {
+		cyapa->fw_maj_ver = resp_data[9];
+		cyapa->fw_min_ver = resp_data[10];
+	}
 
 	cyapa->electrodes_x = resp_data[52];
 	cyapa->electrodes_y = resp_data[53];
@@ -2472,9 +2496,9 @@
 
 	switch (cyapa->state) {
 	case CYAPA_STATE_GEN5_BL:
-		error = cyapa_gen5_bl_exit(cyapa);
+		error = cyapa_pip_bl_exit(cyapa);
 		if (error) {
-			/* Rry to update trackpad product information. */
+			/* Try to update trackpad product information. */
 			cyapa_gen5_bl_query_data(cyapa);
 			goto out;
 		}
@@ -2486,14 +2510,23 @@
 		 * If trackpad device in deep sleep mode,
 		 * the app command will fail.
 		 * So always try to reset trackpad device to full active when
-		 * the device state is requeried.
+		 * the device state is required.
 		 */
 		error = cyapa_gen5_set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error)
 			dev_warn(dev, "%s: failed to set power active mode.\n",
 				__func__);
 
+		/* By default, the trackpad proximity function is enabled. */
+		if (cyapa->platform_ver >= 2) {
+			error = cyapa_pip_set_proximity(cyapa, true);
+			if (error)
+				dev_warn(dev,
+					"%s: failed to enable proximity.\n",
+					__func__);
+		}
+
 		/* Get trackpad product information. */
 		error = cyapa_gen5_get_query_data(cyapa);
 		if (error)
@@ -2518,14 +2551,14 @@
  * Return false, do not continue process
  * Return true, continue process.
  */
-static bool cyapa_gen5_irq_cmd_handler(struct cyapa *cyapa)
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 
-	if (atomic_read(&gen5_pip->cmd_issued)) {
+	if (atomic_read(&pip->cmd_issued)) {
 		/* Polling command response data. */
-		if (gen5_pip->is_irq_mode == false)
+		if (pip->is_irq_mode == false)
 			return false;
 
 		/*
@@ -2533,59 +2566,64 @@
 		 * these output data may caused by user put finger on
 		 * trackpad when host waiting the command response.
 		 */
-		cyapa_i2c_pip_read(cyapa, gen5_pip->irq_cmd_buf,
-			GEN5_RESP_LENGTH_SIZE);
-		length = get_unaligned_le16(gen5_pip->irq_cmd_buf);
-		length = (length <= GEN5_RESP_LENGTH_SIZE) ?
-				GEN5_RESP_LENGTH_SIZE : length;
-		if (length > GEN5_RESP_LENGTH_SIZE)
+		cyapa_i2c_pip_read(cyapa, pip->irq_cmd_buf,
+			PIP_RESP_LENGTH_SIZE);
+		length = get_unaligned_le16(pip->irq_cmd_buf);
+		length = (length <= PIP_RESP_LENGTH_SIZE) ?
+				PIP_RESP_LENGTH_SIZE : length;
+		if (length > PIP_RESP_LENGTH_SIZE)
 			cyapa_i2c_pip_read(cyapa,
-				gen5_pip->irq_cmd_buf, length);
-
-		if (!(gen5_pip->resp_sort_func &&
-			gen5_pip->resp_sort_func(cyapa,
-				gen5_pip->irq_cmd_buf, length))) {
+				pip->irq_cmd_buf, length);
+		if (!(pip->resp_sort_func &&
+			pip->resp_sort_func(cyapa,
+				pip->irq_cmd_buf, length))) {
 			/*
-			 * Work around the Gen5 V1 firmware
-			 * that does not assert interrupt signalling
-			 * that command response is ready if user
-			 * keeps touching the trackpad while command
-			 * is sent to the device.
+			 * Cover the Gen5 V1 firmware issue.
+			 * The issue is no interrupt would be asserted from
+			 * trackpad device to host for the command response
+			 * ready event. Because when there was a finger touch
+			 * on trackpad device, and the firmware output queue
+			 * won't be empty (always with touch report data), so
+			 * the interrupt signal won't be asserted again until
+			 * the output queue was previous emptied.
+			 * This issue would happen in the scenario that
+			 * user always has his/her fingers touched on the
+			 * trackpad device during system booting/rebooting.
 			 */
 			length = 0;
-			if (gen5_pip->resp_len)
-				length = *gen5_pip->resp_len;
+			if (pip->resp_len)
+				length = *pip->resp_len;
 			cyapa_empty_pip_output_data(cyapa,
-					gen5_pip->resp_data,
+					pip->resp_data,
 					&length,
-					gen5_pip->resp_sort_func);
-			if (gen5_pip->resp_len && length != 0) {
-				*gen5_pip->resp_len = length;
-				atomic_dec(&gen5_pip->cmd_issued);
-				complete(&gen5_pip->cmd_ready);
+					pip->resp_sort_func);
+			if (pip->resp_len && length != 0) {
+				*pip->resp_len = length;
+				atomic_dec(&pip->cmd_issued);
+				complete(&pip->cmd_ready);
 			}
 			return false;
 		}
 
-		if (gen5_pip->resp_data && gen5_pip->resp_len) {
-			*gen5_pip->resp_len = (*gen5_pip->resp_len < length) ?
-				*gen5_pip->resp_len : length;
-			memcpy(gen5_pip->resp_data, gen5_pip->irq_cmd_buf,
-				*gen5_pip->resp_len);
+		if (pip->resp_data && pip->resp_len) {
+			*pip->resp_len = (*pip->resp_len < length) ?
+				*pip->resp_len : length;
+			memcpy(pip->resp_data, pip->irq_cmd_buf,
+				*pip->resp_len);
 		}
-		atomic_dec(&gen5_pip->cmd_issued);
-		complete(&gen5_pip->cmd_ready);
+		atomic_dec(&pip->cmd_issued);
+		complete(&pip->cmd_ready);
 		return false;
 	}
 
 	return true;
 }
 
-static void cyapa_gen5_report_buttons(struct cyapa *cyapa,
-		const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_buttons(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
-	u8 buttons = report_data->report_head[GEN5_BUTTONS_OFFSET];
+	u8 buttons = report_data->report_head[PIP_BUTTONS_OFFSET];
 
 	buttons = (buttons << CAPABILITY_BTN_SHIFT) & CAPABILITY_BTN_MASK;
 
@@ -2605,12 +2643,23 @@
 	input_sync(input);
 }
 
-static void cyapa_gen5_report_slot_data(struct cyapa *cyapa,
-		const struct cyapa_gen5_touch_record *touch)
+static void cyapa_pip_report_proximity(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
-	u8 event_id = GEN5_GET_EVENT_ID(touch->touch_tip_event_id);
-	int slot = GEN5_GET_TOUCH_ID(touch->touch_tip_event_id);
+	u8 distance = report_data->report_head[PIP_PROXIMITY_DISTANCE_OFFSET] &
+			PIP_PROXIMITY_DISTANCE_MASK;
+
+	input_report_abs(input, ABS_DISTANCE, distance);
+	input_sync(input);
+}
+
+static void cyapa_pip_report_slot_data(struct cyapa *cyapa,
+		const struct cyapa_pip_touch_record *touch)
+{
+	struct input_dev *input = cyapa->input;
+	u8 event_id = PIP_GET_EVENT_ID(touch->touch_tip_event_id);
+	int slot = PIP_GET_TOUCH_ID(touch->touch_tip_event_id);
 	int x, y;
 
 	if (event_id == RECORD_EVENT_LIFTOFF)
@@ -2621,11 +2670,12 @@
 	x = (touch->x_hi << 8) | touch->x_lo;
 	if (cyapa->x_origin)
 		x = cyapa->max_abs_x - x;
-	input_report_abs(input, ABS_MT_POSITION_X, x);
 	y = (touch->y_hi << 8) | touch->y_lo;
 	if (cyapa->y_origin)
 		y = cyapa->max_abs_y - y;
+	input_report_abs(input, ABS_MT_POSITION_X, x);
 	input_report_abs(input, ABS_MT_POSITION_Y, y);
+	input_report_abs(input, ABS_DISTANCE, 0);
 	input_report_abs(input, ABS_MT_PRESSURE,
 		touch->z);
 	input_report_abs(input, ABS_MT_TOUCH_MAJOR,
@@ -2642,50 +2692,49 @@
 		touch->orientation);
 }
 
-static void cyapa_gen5_report_touches(struct cyapa *cyapa,
-		const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_touches(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
 	unsigned int touch_num;
 	int i;
 
-	touch_num = report_data->report_head[GEN5_NUMBER_OF_TOUCH_OFFSET] &
-			GEN5_NUMBER_OF_TOUCH_MASK;
+	touch_num = report_data->report_head[PIP_NUMBER_OF_TOUCH_OFFSET] &
+			PIP_NUMBER_OF_TOUCH_MASK;
 
 	for (i = 0; i < touch_num; i++)
-		cyapa_gen5_report_slot_data(cyapa,
+		cyapa_pip_report_slot_data(cyapa,
 			&report_data->touch_records[i]);
 
 	input_mt_sync_frame(input);
 	input_sync(input);
 }
 
-static int cyapa_gen5_irq_handler(struct cyapa *cyapa)
+int cyapa_pip_irq_handler(struct cyapa *cyapa)
 {
 	struct device *dev = &cyapa->client->dev;
-	struct cyapa_gen5_report_data report_data;
-	int ret;
-	u8 report_id;
+	struct cyapa_pip_report_data report_data;
 	unsigned int report_len;
+	u8 report_id;
+	int ret;
 
-	if (cyapa->gen != CYAPA_GEN5 ||
-		cyapa->state != CYAPA_STATE_GEN5_APP) {
+	if (!cyapa_is_pip_app_mode(cyapa)) {
 		dev_err(dev, "invalid device state, gen=%d, state=0x%02x\n",
 			cyapa->gen, cyapa->state);
 		return -EINVAL;
 	}
 
 	ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data,
-			GEN5_RESP_LENGTH_SIZE);
-	if (ret != GEN5_RESP_LENGTH_SIZE) {
+			PIP_RESP_LENGTH_SIZE);
+	if (ret != PIP_RESP_LENGTH_SIZE) {
 		dev_err(dev, "failed to read length bytes, (%d)\n", ret);
 		return -EINVAL;
 	}
 
 	report_len = get_unaligned_le16(
-			&report_data.report_head[GEN5_RESP_LENGTH_OFFSET]);
-	if (report_len < GEN5_RESP_LENGTH_SIZE) {
-		/* Invliad length or internal reset happened. */
+			&report_data.report_head[PIP_RESP_LENGTH_OFFSET]);
+	if (report_len < PIP_RESP_LENGTH_SIZE) {
+		/* Invalid length or internal reset happened. */
 		dev_err(dev, "invalid report_len=%d. bytes: %02x %02x\n",
 			report_len, report_data.report_head[0],
 			report_data.report_head[1]);
@@ -2693,7 +2742,7 @@
 	}
 
 	/* Idle, no data for report. */
-	if (report_len == GEN5_RESP_LENGTH_SIZE)
+	if (report_len == PIP_RESP_LENGTH_SIZE)
 		return 0;
 
 	ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data, report_len);
@@ -2703,70 +2752,92 @@
 		return -EINVAL;
 	}
 
-	report_id = report_data.report_head[GEN5_RESP_REPORT_ID_OFFSET];
-	if (report_id == GEN5_WAKEUP_EVENT_REPORT_ID &&
-			report_len == GEN5_WAKEUP_EVENT_SIZE) {
+	report_id = report_data.report_head[PIP_RESP_REPORT_ID_OFFSET];
+	if (report_id == PIP_WAKEUP_EVENT_REPORT_ID &&
+			report_len == PIP_WAKEUP_EVENT_SIZE) {
 		/*
 		 * Device wake event from deep sleep mode for touch.
 		 * This interrupt event is used to wake system up.
+		 *
+		 * Note:
+		 * It will introduce about 20~40 ms additional delay
+		 * time in receiving for first valid touch report data.
+		 * The time is used to execute device runtime resume
+		 * process.
 		 */
+		pm_runtime_get_sync(dev);
+		pm_runtime_mark_last_busy(dev);
+		pm_runtime_put_sync_autosuspend(dev);
 		return 0;
-	} else if (report_id != GEN5_TOUCH_REPORT_ID &&
-			report_id != GEN5_BTN_REPORT_ID &&
+	} else if (report_id != PIP_TOUCH_REPORT_ID &&
+			report_id != PIP_BTN_REPORT_ID &&
 			report_id != GEN5_OLD_PUSH_BTN_REPORT_ID &&
-			report_id != GEN5_PUSH_BTN_REPORT_ID) {
+			report_id != PIP_PUSH_BTN_REPORT_ID &&
+			report_id != PIP_PROXIMITY_REPORT_ID) {
 		/* Running in BL mode or unknown response data read. */
 		dev_err(dev, "invalid report_id=0x%02x\n", report_id);
 		return -EINVAL;
 	}
 
-	if (report_id == GEN5_TOUCH_REPORT_ID &&
-		(report_len < GEN5_TOUCH_REPORT_HEAD_SIZE ||
-			report_len > GEN5_TOUCH_REPORT_MAX_SIZE)) {
+	if (report_id == PIP_TOUCH_REPORT_ID &&
+		(report_len < PIP_TOUCH_REPORT_HEAD_SIZE ||
+			report_len > PIP_TOUCH_REPORT_MAX_SIZE)) {
 		/* Invalid report data length for finger packet. */
 		dev_err(dev, "invalid touch packet length=%d\n", report_len);
 		return 0;
 	}
 
-	if ((report_id == GEN5_BTN_REPORT_ID ||
+	if ((report_id == PIP_BTN_REPORT_ID ||
 			report_id == GEN5_OLD_PUSH_BTN_REPORT_ID ||
-			report_id == GEN5_PUSH_BTN_REPORT_ID) &&
-		(report_len < GEN5_BTN_REPORT_HEAD_SIZE ||
-			report_len > GEN5_BTN_REPORT_MAX_SIZE)) {
+			report_id == PIP_PUSH_BTN_REPORT_ID) &&
+		(report_len < PIP_BTN_REPORT_HEAD_SIZE ||
+			report_len > PIP_BTN_REPORT_MAX_SIZE)) {
 		/* Invalid report data length of button packet. */
 		dev_err(dev, "invalid button packet length=%d\n", report_len);
 		return 0;
 	}
 
-	if (report_id == GEN5_TOUCH_REPORT_ID)
-		cyapa_gen5_report_touches(cyapa, &report_data);
+	if (report_id == PIP_PROXIMITY_REPORT_ID &&
+			report_len != PIP_PROXIMITY_REPORT_SIZE) {
+		/* Invalid report data length of proximity packet. */
+		dev_err(dev, "invalid proximity data, length=%d\n", report_len);
+		return 0;
+	}
+
+	if (report_id == PIP_TOUCH_REPORT_ID)
+		cyapa_pip_report_touches(cyapa, &report_data);
+	else if (report_id == PIP_PROXIMITY_REPORT_ID)
+		cyapa_pip_report_proximity(cyapa, &report_data);
 	else
-		cyapa_gen5_report_buttons(cyapa, &report_data);
+		cyapa_pip_report_buttons(cyapa, &report_data);
 
 	return 0;
 }
 
-static int cyapa_gen5_bl_activate(struct cyapa *cyapa) { return 0; }
-static int cyapa_gen5_bl_deactivate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_activate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa) { return 0; }
+
 
 const struct cyapa_dev_ops cyapa_gen5_ops = {
-	.check_fw = cyapa_gen5_check_fw,
-	.bl_enter = cyapa_gen5_bl_enter,
-	.bl_initiate = cyapa_gen5_bl_initiate,
-	.update_fw = cyapa_gen5_do_fw_update,
-	.bl_activate = cyapa_gen5_bl_activate,
-	.bl_deactivate = cyapa_gen5_bl_deactivate,
+	.check_fw = cyapa_pip_check_fw,
+	.bl_enter = cyapa_pip_bl_enter,
+	.bl_initiate = cyapa_pip_bl_initiate,
+	.update_fw = cyapa_pip_do_fw_update,
+	.bl_activate = cyapa_pip_bl_activate,
+	.bl_deactivate = cyapa_pip_bl_deactivate,
 
 	.show_baseline = cyapa_gen5_show_baseline,
-	.calibrate_store = cyapa_gen5_do_calibrate,
+	.calibrate_store = cyapa_pip_do_calibrate,
 
-	.initialize = cyapa_gen5_initialize,
+	.initialize = cyapa_pip_cmd_state_initialize,
 
 	.state_parse = cyapa_gen5_state_parse,
 	.operational_check = cyapa_gen5_do_operational_check,
 
-	.irq_handler = cyapa_gen5_irq_handler,
-	.irq_cmd_handler = cyapa_gen5_irq_cmd_handler,
+	.irq_handler = cyapa_pip_irq_handler,
+	.irq_cmd_handler = cyapa_pip_irq_cmd_handler,
 	.sort_empty_output_data = cyapa_empty_pip_output_data,
 	.set_power_mode = cyapa_gen5_set_power_mode,
+
+	.set_proximity = cyapa_pip_set_proximity,
 };
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
new file mode 100644
index 0000000..5f19107
--- /dev/null
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -0,0 +1,749 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ *
+ * Copyright (C) 2015 Cypress Semiconductor, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/crc-itu-t.h>
+#include "cyapa.h"
+
+
+#define GEN6_ENABLE_CMD_IRQ	0x41
+#define GEN6_DISABLE_CMD_IRQ	0x42
+#define GEN6_ENABLE_DEV_IRQ	0x43
+#define GEN6_DISABLE_DEV_IRQ	0x44
+
+#define GEN6_POWER_MODE_ACTIVE		0x01
+#define GEN6_POWER_MODE_LP_MODE1	0x02
+#define GEN6_POWER_MODE_LP_MODE2	0x03
+#define GEN6_POWER_MODE_BTN_ONLY	0x04
+
+#define GEN6_SET_POWER_MODE_INTERVAL	0x47
+#define GEN6_GET_POWER_MODE_INTERVAL	0x48
+
+#define GEN6_MAX_RX_NUM 14
+#define GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC	0x00
+#define GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM		0x12
+
+
+struct pip_app_cmd_head {
+	__le16 addr;
+	__le16 length;
+	u8 report_id;
+	u8 resv;  /* Reserved, must be 0 */
+	u8 cmd_code;  /* bit7: resv, set to 0; bit6~0: command code.*/
+} __packed;
+
+struct pip_app_resp_head {
+	__le16 length;
+	u8 report_id;
+	u8 resv;  /* Reserved, must be 0 */
+	u8 cmd_code;  /* bit7: TGL; bit6~0: command code.*/
+	/*
+	 * The value of data_status can be the first byte of data or
+	 * the command status or the unsupported command code depending on the
+	 * requested command code.
+	*/
+	u8 data_status;
+} __packed;
+
+struct pip_fixed_info {
+	u8 silicon_id_high;
+	u8 silicon_id_low;
+	u8 family_id;
+};
+
+static u8 pip_get_bl_info[] = {
+	0x04, 0x00, 0x0B, 0x00, 0x40, 0x00, 0x01, 0x38,
+	0x00, 0x00, 0x70, 0x9E, 0x17
+};
+
+static bool cyapa_sort_pip_hid_descriptor_data(struct cyapa *cyapa,
+		u8 *buf, int len)
+{
+	if (len != PIP_HID_DESCRIPTOR_SIZE)
+		return false;
+
+	if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID ||
+		buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+		return true;
+
+	return false;
+}
+
+static int cyapa_get_pip_fixed_info(struct cyapa *cyapa,
+		struct pip_fixed_info *pip_info, bool is_bootloader)
+{
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+	int resp_len;
+	u16 product_family;
+	int error;
+
+	if (is_bootloader) {
+		/* Read Bootloader Information to determine Gen5 or Gen6. */
+		resp_len = sizeof(resp_data);
+		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+				pip_get_bl_info, sizeof(pip_get_bl_info),
+				resp_data, &resp_len,
+				2000, cyapa_sort_tsg_pip_bl_resp_data,
+				false);
+		if (error || resp_len < PIP_BL_GET_INFO_RESP_LENGTH)
+			return error ? error : -EIO;
+
+		pip_info->family_id = resp_data[8];
+		pip_info->silicon_id_low = resp_data[10];
+		pip_info->silicon_id_high = resp_data[11];
+
+		return 0;
+	}
+
+	/* Get App System Information to determine Gen5 or Gen6. */
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			2000, cyapa_pip_sort_system_info_data, false);
+	if (error || resp_len < PIP_READ_SYS_INFO_RESP_LENGTH)
+		return error ? error : -EIO;
+
+	product_family = get_unaligned_le16(&resp_data[7]);
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
+		return -EINVAL;
+
+	pip_info->family_id = resp_data[19];
+	pip_info->silicon_id_low = resp_data[21];
+	pip_info->silicon_id_high = resp_data[22];
+
+	return 0;
+
+}
+
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
+{
+	u8 cmd[] = { 0x01, 0x00};
+	struct pip_fixed_info pip_info;
+	u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
+	int resp_len;
+	bool is_bootloader;
+	int error;
+
+	cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+	/* Try to wake from it deep sleep state if it is. */
+	cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+
+	/* Empty the buffer queue to get fresh data with later commands. */
+	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+	/*
+	 * Read description info from trackpad device to determine running in
+	 * APP mode or Bootloader mode.
+	 */
+	resp_len = PIP_HID_DESCRIPTOR_SIZE;
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			300,
+			cyapa_sort_pip_hid_descriptor_data,
+			false);
+	if (error)
+		return error;
+
+	if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+		is_bootloader = true;
+	else if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID)
+		is_bootloader = false;
+	else
+		return -EAGAIN;
+
+	/* Get PIP fixed information to determine Gen5 or Gen6. */
+	memset(&pip_info, 0, sizeof(struct pip_fixed_info));
+	error = cyapa_get_pip_fixed_info(cyapa, &pip_info, is_bootloader);
+	if (error)
+		return error;
+
+	if (pip_info.family_id == 0x9B && pip_info.silicon_id_high == 0x0B) {
+		cyapa->gen = CYAPA_GEN6;
+		cyapa->state = is_bootloader ? CYAPA_STATE_GEN6_BL
+					     : CYAPA_STATE_GEN6_APP;
+	} else if (pip_info.family_id == 0x91 &&
+		   pip_info.silicon_id_high == 0x02) {
+		cyapa->gen = CYAPA_GEN5;
+		cyapa->state = is_bootloader ? CYAPA_STATE_GEN5_BL
+					     : CYAPA_STATE_GEN5_APP;
+	}
+
+	return 0;
+}
+
+static int cyapa_gen6_read_sys_info(struct cyapa *cyapa)
+{
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+	int resp_len;
+	u16 product_family;
+	u8 rotat_align;
+	int error;
+
+	/* Get App System Information to determine Gen5 or Gen6. */
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			2000, cyapa_pip_sort_system_info_data, false);
+	if (error || resp_len < sizeof(resp_data))
+		return error ? error : -EIO;
+
+	product_family = get_unaligned_le16(&resp_data[7]);
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
+		return -EINVAL;
+
+	cyapa->platform_ver = (resp_data[67] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+	cyapa->fw_maj_ver = resp_data[9];
+	cyapa->fw_min_ver = resp_data[10];
+
+	cyapa->electrodes_x = resp_data[33];
+	cyapa->electrodes_y = resp_data[34];
+
+	cyapa->physical_size_x =  get_unaligned_le16(&resp_data[35]) / 100;
+	cyapa->physical_size_y = get_unaligned_le16(&resp_data[37]) / 100;
+
+	cyapa->max_abs_x = get_unaligned_le16(&resp_data[39]);
+	cyapa->max_abs_y = get_unaligned_le16(&resp_data[41]);
+
+	cyapa->max_z = get_unaligned_le16(&resp_data[43]);
+
+	cyapa->x_origin = resp_data[45] & 0x01;
+	cyapa->y_origin = resp_data[46] & 0x01;
+
+	cyapa->btn_capability = (resp_data[70] << 3) & CAPABILITY_BTN_MASK;
+
+	memcpy(&cyapa->product_id[0], &resp_data[51], 5);
+	cyapa->product_id[5] = '-';
+	memcpy(&cyapa->product_id[6], &resp_data[56], 6);
+	cyapa->product_id[12] = '-';
+	memcpy(&cyapa->product_id[13], &resp_data[62], 2);
+	cyapa->product_id[15] = '\0';
+
+	rotat_align = resp_data[68];
+	if (rotat_align) {
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+	} else {
+		cyapa->electrodes_rx = cyapa->electrodes_x;
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+	}
+	cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u;
+
+	if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
+		!cyapa->physical_size_x || !cyapa->physical_size_y ||
+		!cyapa->max_abs_x || !cyapa->max_abs_y || !cyapa->max_z)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cyapa_gen6_bl_read_app_info(struct cyapa *cyapa)
+{
+	u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_bl_resp_data, false);
+	if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+		!PIP_CMD_COMPLETE_SUCCESS(resp_data))
+		return error ? error : -EIO;
+
+	cyapa->fw_maj_ver = resp_data[8];
+	cyapa->fw_min_ver = resp_data[9];
+
+	cyapa->platform_ver = (resp_data[12] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+
+	memcpy(&cyapa->product_id[0], &resp_data[13], 5);
+	cyapa->product_id[5] = '-';
+	memcpy(&cyapa->product_id[6], &resp_data[18], 6);
+	cyapa->product_id[12] = '-';
+	memcpy(&cyapa->product_id[13], &resp_data[24], 2);
+	cyapa->product_id[15] = '\0';
+
+	return 0;
+
+}
+
+static int cyapa_gen6_config_dev_irq(struct cyapa *cyapa, u8 cmd_code)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, cmd_code };
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data)
+			)
+		return error < 0 ? error : -EINVAL;
+
+	return 0;
+}
+
+static int cyapa_gen6_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	int error;
+
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+	error = cyapa_pip_set_proximity(cyapa, enable);
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+
+	return error;
+}
+
+static int cyapa_gen6_change_power_state(struct cyapa *cyapa, u8 power_mode)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, 0x46, power_mode };
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x46))
+		return error < 0 ? error : -EINVAL;
+
+	/* New power state applied in device not match the set power state. */
+	if (resp_data[5] != power_mode)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int cyapa_gen6_set_interval_setting(struct cyapa *cyapa,
+		struct gen6_interval_setting *interval_setting)
+{
+	struct gen6_set_interval_cmd {
+		__le16 addr;
+		__le16 length;
+		u8 report_id;
+		u8 rsvd;  /* Reserved, must be 0 */
+		u8 cmd_code;
+		__le16 active_interval;
+		__le16 lp1_interval;
+		__le16 lp2_interval;
+	} __packed set_interval_cmd;
+	u8 resp_data[11];
+	int resp_len;
+	int error;
+
+	memset(&set_interval_cmd, 0, sizeof(set_interval_cmd));
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &set_interval_cmd.addr);
+	put_unaligned_le16(sizeof(set_interval_cmd) - 2,
+			   &set_interval_cmd.length);
+	set_interval_cmd.report_id = PIP_APP_CMD_REPORT_ID;
+	set_interval_cmd.cmd_code = GEN6_SET_POWER_MODE_INTERVAL;
+	put_unaligned_le16(interval_setting->active_interval,
+			   &set_interval_cmd.active_interval);
+	put_unaligned_le16(interval_setting->lp1_interval,
+			   &set_interval_cmd.lp1_interval);
+	put_unaligned_le16(interval_setting->lp2_interval,
+			   &set_interval_cmd.lp2_interval);
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			(u8 *)&set_interval_cmd, sizeof(set_interval_cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error ||
+		!VALID_CMD_RESP_HEADER(resp_data, GEN6_SET_POWER_MODE_INTERVAL))
+		return error < 0 ? error : -EINVAL;
+
+	/* Get the real set intervals from response. */
+	interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+	interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+	interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+	return 0;
+}
+
+static int cyapa_gen6_get_interval_setting(struct cyapa *cyapa,
+		struct gen6_interval_setting *interval_setting)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00,
+		     GEN6_GET_POWER_MODE_INTERVAL };
+	u8 resp_data[11];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error ||
+		!VALID_CMD_RESP_HEADER(resp_data, GEN6_GET_POWER_MODE_INTERVAL))
+		return error < 0 ? error : -EINVAL;
+
+	interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+	interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+	interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+	return 0;
+}
+
+static int cyapa_gen6_deep_sleep(struct cyapa *cyapa, u8 state)
+{
+	u8 ping[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x00 };
+
+	if (state == PIP_DEEP_SLEEP_STATE_ON)
+		/*
+		 * Send ping command to notify device prepare for wake up
+		 * when it's in deep sleep mode. At this time, device will
+		 * response nothing except an I2C NAK.
+		 */
+		cyapa_i2c_pip_write(cyapa, ping, sizeof(ping));
+
+	return cyapa_pip_deep_sleep(cyapa, state);
+}
+
+static int cyapa_gen6_set_power_mode(struct cyapa *cyapa,
+		u8 power_mode, u16 sleep_time, bool is_suspend)
+{
+	struct device *dev = &cyapa->client->dev;
+	struct gen6_interval_setting *interval_setting =
+			&cyapa->gen6_interval_setting;
+	u8 lp_mode;
+	int error;
+
+	if (cyapa->state != CYAPA_STATE_GEN6_APP)
+		return 0;
+
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+		/*
+		 * Assume TP in deep sleep mode when driver is loaded,
+		 * avoid driver unload and reload command IO issue caused by TP
+		 * has been set into deep sleep mode when unloading.
+		 */
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+	}
+
+	if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+		PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+		PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+		if (power_mode == PWR_MODE_OFF ||
+			power_mode == PWR_MODE_FULL_ACTIVE ||
+			power_mode == PWR_MODE_BTN_ONLY ||
+			PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+			/* Has in correct power mode state, early return. */
+			return 0;
+		}
+	}
+
+	if (power_mode == PWR_MODE_OFF) {
+		cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+		error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
+		if (error) {
+			dev_err(dev, "enter deep sleep fail: %d\n", error);
+			return error;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		return 0;
+	}
+
+	/*
+	 * When trackpad in power off mode, it cannot change to other power
+	 * state directly, must be wake up from sleep firstly, then
+	 * continue to do next power sate change.
+	 */
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+		error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+		if (error) {
+			dev_err(dev, "deep sleep wake fail: %d\n", error);
+			return error;
+		}
+	}
+
+	/*
+	 * Disable device assert interrupts for command response to avoid
+	 * disturbing system suspending or hibernating process.
+	 */
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+	if (power_mode == PWR_MODE_FULL_ACTIVE) {
+		error = cyapa_gen6_change_power_state(cyapa,
+				GEN6_POWER_MODE_ACTIVE);
+		if (error) {
+			dev_err(dev, "change to active fail: %d\n", error);
+			goto out;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+
+		/* Sync the interval setting from device. */
+		cyapa_gen6_get_interval_setting(cyapa, interval_setting);
+
+	} else if (power_mode == PWR_MODE_BTN_ONLY) {
+		error = cyapa_gen6_change_power_state(cyapa,
+				GEN6_POWER_MODE_BTN_ONLY);
+		if (error) {
+			dev_err(dev, "fail to button only mode: %d\n", error);
+			goto out;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+	} else {
+		/*
+		 * Gen6 internally supports to 2 low power scan interval time,
+		 * so can help to switch power mode quickly.
+		 * such as runtime suspend and system suspend.
+		 */
+		if (interval_setting->lp1_interval == sleep_time) {
+			lp_mode = GEN6_POWER_MODE_LP_MODE1;
+		} else if (interval_setting->lp2_interval == sleep_time) {
+			lp_mode = GEN6_POWER_MODE_LP_MODE2;
+		} else {
+			if (interval_setting->lp1_interval == 0) {
+				interval_setting->lp1_interval = sleep_time;
+				lp_mode = GEN6_POWER_MODE_LP_MODE1;
+			} else {
+				interval_setting->lp2_interval = sleep_time;
+				lp_mode = GEN6_POWER_MODE_LP_MODE2;
+			}
+			cyapa_gen6_set_interval_setting(cyapa,
+							interval_setting);
+		}
+
+		error = cyapa_gen6_change_power_state(cyapa, lp_mode);
+		if (error) {
+			dev_err(dev, "set power state to 0x%02x failed: %d\n",
+				lp_mode, error);
+			goto out;
+		}
+
+		PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+		PIP_DEV_SET_PWR_STATE(cyapa,
+			cyapa_sleep_time_to_pwr_cmd(sleep_time));
+	}
+
+out:
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+	return error;
+}
+
+static int cyapa_gen6_initialize(struct cyapa *cyapa)
+{
+	return 0;
+}
+
+static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa,
+		u16 read_offset, u16 read_len, u8 data_id,
+		u8 *data, int *data_buf_lens)
+{
+	struct retrieve_data_struct_cmd {
+		struct pip_app_cmd_head head;
+		__le16 read_offset;
+		__le16 read_length;
+		u8 data_id;
+	} __packed cmd;
+	u8 resp_data[GEN6_MAX_RX_NUM + 10];
+	int resp_len;
+	int error;
+
+	memset(&cmd, 0, sizeof(cmd));
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr);
+	put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2);
+	cmd.head.report_id = PIP_APP_CMD_REPORT_ID;
+	cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE;
+	put_unaligned_le16(read_offset, &cmd.read_offset);
+	put_unaligned_le16(read_len, &cmd.read_length);
+	cmd.data_id = data_id;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+				(u8 *)&cmd, sizeof(cmd),
+				resp_data, &resp_len,
+				500, cyapa_sort_tsg_pip_app_resp_data,
+				true);
+	if (error || !PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
+		resp_data[6] != data_id ||
+		!VALID_CMD_RESP_HEADER(resp_data, PIP_RETRIEVE_DATA_STRUCTURE))
+		return (error < 0) ? error : -EAGAIN;
+
+	read_len = get_unaligned_le16(&resp_data[7]);
+	if (*data_buf_lens < read_len) {
+		*data_buf_lens = read_len;
+		return -ENOBUFS;
+	}
+
+	memcpy(data, &resp_data[10], read_len);
+	*data_buf_lens = read_len;
+	return 0;
+}
+
+static ssize_t cyapa_gen6_show_baseline(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyapa *cyapa = dev_get_drvdata(dev);
+	u8 data[GEN6_MAX_RX_NUM];
+	int data_len;
+	int size = 0;
+	int i;
+	int error;
+	int resume_error;
+
+	if (!cyapa_is_pip_app_mode(cyapa))
+		return -EBUSY;
+
+	/* 1. Suspend Scanning*/
+	error = cyapa_pip_suspend_scanning(cyapa);
+	if (error)
+		return error;
+
+	/* 2. IDAC and RX Attenuator Calibration Data (Center Frequency). */
+	data_len = sizeof(data);
+	error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+			GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC,
+			data, &data_len);
+	if (error)
+		goto resume_scanning;
+
+	size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d ",
+			data[0],  /* RX Attenuator Mutual */
+			data[1],  /* IDAC Mutual */
+			data[2],  /* RX Attenuator Self RX */
+			data[3],  /* IDAC Self RX */
+			data[4],  /* RX Attenuator Self TX */
+			data[5]	  /* IDAC Self TX */
+			);
+
+	/* 3. Read Attenuator Trim. */
+	data_len = sizeof(data);
+	error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+			GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM,
+			data, &data_len);
+	if (error)
+		goto resume_scanning;
+
+	/* set attenuator trim values. */
+	for (i = 0; i < data_len; i++)
+		size += scnprintf(buf + size, PAGE_SIZE - size,	"%d ", data[i]);
+	size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+resume_scanning:
+	/* 4. Resume Scanning*/
+	resume_error = cyapa_pip_resume_scanning(cyapa);
+	if (resume_error || error) {
+		memset(buf, 0, PAGE_SIZE);
+		return resume_error ? resume_error : error;
+	}
+
+	return size;
+}
+
+static int cyapa_gen6_operational_check(struct cyapa *cyapa)
+{
+	struct device *dev = &cyapa->client->dev;
+	int error;
+
+	if (cyapa->gen != CYAPA_GEN6)
+		return -ENODEV;
+
+	switch (cyapa->state) {
+	case CYAPA_STATE_GEN6_BL:
+		error = cyapa_pip_bl_exit(cyapa);
+		if (error) {
+			/* Try to update trackpad product information. */
+			cyapa_gen6_bl_read_app_info(cyapa);
+			goto out;
+		}
+
+		cyapa->state = CYAPA_STATE_GEN6_APP;
+
+	case CYAPA_STATE_GEN6_APP:
+		/*
+		 * If trackpad device in deep sleep mode,
+		 * the app command will fail.
+		 * So always try to reset trackpad device to full active when
+		 * the device state is required.
+		 */
+		error = cyapa_gen6_set_power_mode(cyapa,
+				PWR_MODE_FULL_ACTIVE, 0, false);
+		if (error)
+			dev_warn(dev, "%s: failed to set power active mode.\n",
+				__func__);
+
+		/* By default, the trackpad proximity function is enabled. */
+		error = cyapa_pip_set_proximity(cyapa, true);
+		if (error)
+			dev_warn(dev, "%s: failed to enable proximity.\n",
+				__func__);
+
+		/* Get trackpad product information. */
+		error = cyapa_gen6_read_sys_info(cyapa);
+		if (error)
+			goto out;
+		/* Only support product ID starting with CYTRA */
+		if (memcmp(cyapa->product_id, product_id,
+				strlen(product_id)) != 0) {
+			dev_err(dev, "%s: unknown product ID (%s)\n",
+				__func__, cyapa->product_id);
+			error = -EINVAL;
+		}
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+out:
+	return error;
+}
+
+const struct cyapa_dev_ops cyapa_gen6_ops = {
+	.check_fw = cyapa_pip_check_fw,
+	.bl_enter = cyapa_pip_bl_enter,
+	.bl_initiate = cyapa_pip_bl_initiate,
+	.update_fw = cyapa_pip_do_fw_update,
+	.bl_activate = cyapa_pip_bl_activate,
+	.bl_deactivate = cyapa_pip_bl_deactivate,
+
+	.show_baseline = cyapa_gen6_show_baseline,
+	.calibrate_store = cyapa_pip_do_calibrate,
+
+	.initialize = cyapa_gen6_initialize,
+
+	.state_parse = cyapa_pip_state_parse,
+	.operational_check = cyapa_gen6_operational_check,
+
+	.irq_handler = cyapa_pip_irq_handler,
+	.irq_cmd_handler = cyapa_pip_irq_cmd_handler,
+	.sort_empty_output_data = cyapa_empty_pip_output_data,
+	.set_power_mode = cyapa_gen6_set_power_mode,
+
+	.set_proximity = cyapa_gen6_set_proximity,
+};
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 5b5f403..e2b7420 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,7 @@
  * Copyright (c) 2013 ELAN Microelectronics Corp.
  *
  * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.9
+ * Version: 1.6.0
  *
  * Based on cyapa driver:
  * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +40,7 @@
 #include "elan_i2c.h"
 
 #define DRIVER_NAME		"elan_i2c"
-#define ELAN_DRIVER_VERSION	"1.5.9"
+#define ELAN_DRIVER_VERSION	"1.6.0"
 #define ETP_MAX_PRESSURE	255
 #define ETP_FWIDTH_REDUCE	90
 #define ETP_FINGER_WIDTH	15
@@ -84,7 +84,7 @@
 	int			pressure_adjustment;
 	u8			mode;
 	u8			ic_type;
-	u16			fw_vaildpage_count;
+	u16			fw_validpage_count;
 	u16			fw_signature_address;
 
 	bool			irq_wake;
@@ -94,25 +94,28 @@
 	bool			baseline_ready;
 };
 
-static int elan_get_fwinfo(u8 ic_type, u16 *vaildpage_count,
+static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
 			   u16 *signature_address)
 {
-	switch(ic_type) {
+	switch (iap_version) {
+	case 0x08:
+		*validpage_count = 512;
+		break;
 	case 0x09:
-		*vaildpage_count = 768;
+		*validpage_count = 768;
 		break;
 	case 0x0D:
-		*vaildpage_count = 896;
+		*validpage_count = 896;
 		break;
 	default:
 		/* unknown ic type clear value */
-		*vaildpage_count = 0;
+		*validpage_count = 0;
 		*signature_address = 0;
 		return -ENXIO;
 	}
 
 	*signature_address =
-		(*vaildpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
+		(*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
 
 	return 0;
 }
@@ -261,11 +264,11 @@
 	if (error)
 		return error;
 
-	error = elan_get_fwinfo(data->ic_type, &data->fw_vaildpage_count,
+	error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
 				&data->fw_signature_address);
 	if (error) {
 		dev_err(&data->client->dev,
-			"unknown ic type %d\n", data->ic_type);
+			"unknown iap version %d\n", data->iap_version);
 		return error;
 	}
 
@@ -353,7 +356,7 @@
 	iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
 
 	boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
-	for (i = boot_page_count; i < data->fw_vaildpage_count; i++) {
+	for (i = boot_page_count; i < data->fw_validpage_count; i++) {
 		u16 checksum = 0;
 		const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
 
@@ -1165,6 +1168,8 @@
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id elan_acpi_id[] = {
 	{ "ELAN0000", 0 },
+	{ "ELAN0100", 0 },
+	{ "ELAN0600", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
@@ -1181,10 +1186,10 @@
 static struct i2c_driver elan_driver = {
 	.driver = {
 		.name	= DRIVER_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &elan_pm_ops,
 		.acpi_match_table = ACPI_PTR(elan_acpi_id),
 		.of_match_table = of_match_ptr(elan_of_match),
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 	.probe		= elan_probe,
 	.id_table	= elan_id,
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index ec347703..ad18dab 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1540,6 +1540,10 @@
 	if (error)
 		goto err_clear_drvdata;
 
+	/* give PT device some time to settle down before probing */
+	if (serio->id.type == SERIO_PS_PSTHRU)
+		usleep_range(10000, 15000);
+
 	if (psmouse_probe(psmouse) < 0) {
 		error = -ENODEV;
 		goto err_close_serio;
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index cc7e0d4..11c32ac 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -432,7 +432,7 @@
 static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
 				   const char *buf, size_t count)
 {
-	int reg, val;
+	unsigned int reg, val;
 	char *rest;
 	ssize_t retval;
 
@@ -440,7 +440,7 @@
 	if (rest == buf || *rest != ' ' || reg > 0xff)
 		return -EINVAL;
 
-	retval = kstrtoint(rest + 1, 16, &val);
+	retval = kstrtouint(rest + 1, 16, &val);
 	if (retval)
 		return retval;
 
@@ -476,9 +476,10 @@
 					const char *buf, size_t count)
 {
 	struct fsp_data *pad = psmouse->private;
-	int reg, val, err;
+	unsigned int reg, val;
+	int err;
 
-	err = kstrtoint(buf, 16, &reg);
+	err = kstrtouint(buf, 16, &reg);
 	if (err)
 		return err;
 
@@ -511,9 +512,10 @@
 static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
 					const char *buf, size_t count)
 {
-	int val, err;
+	unsigned int val;
+	int err;
 
-	err = kstrtoint(buf, 16, &val);
+	err = kstrtouint(buf, 16, &val);
 	if (err)
 		return err;
 
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 6025eb4..994ae78 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,14 +519,18 @@
 	struct synaptics_data *priv = psmouse->private;
 
 	priv->mode = 0;
-	if (priv->absolute_mode)
+
+	if (priv->absolute_mode) {
 		priv->mode |= SYN_BIT_ABSOLUTE_MODE;
-	if (priv->disable_gesture)
+		if (SYN_CAP_EXTENDED(priv->capabilities))
+			priv->mode |= SYN_BIT_W_MODE;
+	}
+
+	if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
 		priv->mode |= SYN_BIT_DISABLE_GESTURE;
+
 	if (psmouse->rate >= 80)
 		priv->mode |= SYN_BIT_HIGH_RATE;
-	if (SYN_CAP_EXTENDED(priv->capabilities))
-		priv->mode |= SYN_BIT_W_MODE;
 
 	if (synaptics_mode_cmd(psmouse, priv->mode))
 		return -1;
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index ffceedc..aa7c5da 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -655,7 +655,6 @@
 static struct i2c_driver synaptics_i2c_driver = {
 	.driver = {
 		.name	= DRIVER_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &synaptics_i2c_pm,
 	},
 
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 8b748d9..c6606ca 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -175,9 +175,9 @@
 	return 0;
 }
 
-static int amba_kmi_resume(struct amba_device *dev)
+static int __maybe_unused amba_kmi_resume(struct device *dev)
 {
-	struct amba_kmi_port *kmi = amba_get_drvdata(dev);
+	struct amba_kmi_port *kmi = dev_get_drvdata(dev);
 
 	/* kick the serio layer to rescan this port */
 	serio_reconnect(kmi->io);
@@ -185,6 +185,8 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume);
+
 static struct amba_id amba_kmi_idtable[] = {
 	{
 		.id	= 0x00041050,
@@ -199,11 +201,11 @@
 	.drv		= {
 		.name	= "kmi-pl050",
 		.owner	= THIS_MODULE,
+		.pm	= &amba_kmi_dev_pm_ops,
 	},
 	.id_table	= amba_kmi_idtable,
 	.probe		= amba_kmi_probe,
 	.remove		= amba_kmi_remove,
-	.resume		= amba_kmi_resume,
 };
 
 module_amba_driver(ambakmi_driver);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index cb5ece7..c9c98f0a 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -88,6 +88,10 @@
 static bool i8042_debug;
 module_param_named(debug, i8042_debug, bool, 0600);
 MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
+
+static bool i8042_unmask_kbd_data;
+module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
+MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
 #endif
 
 static bool i8042_bypass_aux_irq_test;
@@ -116,6 +120,7 @@
 	struct serio *serio;
 	int irq;
 	bool exists;
+	bool driver_bound;
 	signed char mux;
 };
 
@@ -133,6 +138,7 @@
 static bool i8042_aux_irq_registered;
 static unsigned char i8042_suppress_kbd_ack;
 static struct platform_device *i8042_platform_device;
+static struct notifier_block i8042_kbd_bind_notifier_block;
 
 static irqreturn_t i8042_interrupt(int irq, void *dev_id);
 static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
@@ -528,10 +534,10 @@
 	port = &i8042_ports[port_no];
 	serio = port->exists ? port->serio : NULL;
 
-	dbg("%02x <- i8042 (interrupt, %d, %d%s%s)\n",
-	    data, port_no, irq,
-	    dfl & SERIO_PARITY ? ", bad parity" : "",
-	    dfl & SERIO_TIMEOUT ? ", timeout" : "");
+	filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
+		   port_no, irq,
+		   dfl & SERIO_PARITY ? ", bad parity" : "",
+		   dfl & SERIO_TIMEOUT ? ", timeout" : "");
 
 	filtered = i8042_filter(data, str, serio);
 
@@ -1438,6 +1444,29 @@
 	return error;
 }
 
+static int i8042_kbd_bind_notifier(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct serio *serio = to_serio_port(dev);
+	struct i8042_port *port = serio->port_data;
+
+	if (serio != i8042_ports[I8042_KBD_PORT_NO].serio)
+		return 0;
+
+	switch (action) {
+	case BUS_NOTIFY_BOUND_DRIVER:
+		port->driver_bound = true;
+		break;
+
+	case BUS_NOTIFY_UNBIND_DRIVER:
+		port->driver_bound = false;
+		break;
+	}
+
+	return 0;
+}
+
 static int __init i8042_probe(struct platform_device *dev)
 {
 	int error;
@@ -1507,6 +1536,10 @@
 	.shutdown	= i8042_shutdown,
 };
 
+static struct notifier_block i8042_kbd_bind_notifier_block = {
+	.notifier_call = i8042_kbd_bind_notifier,
+};
+
 static int __init i8042_init(void)
 {
 	struct platform_device *pdev;
@@ -1528,6 +1561,7 @@
 		goto err_platform_exit;
 	}
 
+	bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
 	panic_blink = i8042_panic_blink;
 
 	return 0;
@@ -1543,6 +1577,7 @@
 	platform_driver_unregister(&i8042_driver);
 	i8042_platform_exit();
 
+	bus_unregister_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
 	panic_blink = NULL;
 }
 
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index fc080be..1db0a40 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -73,6 +73,17 @@
 			printk(KERN_DEBUG KBUILD_MODNAME ": [%d] " format,	\
 			       (int) (jiffies - i8042_start_time), ##arg);	\
 	} while (0)
+
+#define filter_dbg(filter, data, format, args...)		\
+	do {							\
+		if (!i8042_debug)				\
+			break;					\
+								\
+		if (!filter || i8042_unmask_kbd_data)		\
+			dbg("%02x " format, data, ##args);	\
+		else						\
+			dbg("** " format, ##args);		\
+	} while (0)
 #else
 #define dbg_init() do { } while (0)
 #define dbg(format, arg...)							\
@@ -80,6 +91,8 @@
 		if (0)								\
 			printk(KERN_DEBUG pr_fmt(format), ##arg);		\
 	} while (0)
+
+#define filter_dbg(filter, data, format, args...) do { } while (0)
 #endif
 
 #endif /* _I8042_H */
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a05a517..8f82897 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -49,8 +49,6 @@
 
 static LIST_HEAD(serio_list);
 
-static struct bus_type serio_bus;
-
 static void serio_add_port(struct serio *serio);
 static int serio_reconnect_port(struct serio *serio);
 static void serio_disconnect_port(struct serio *serio);
@@ -1017,7 +1015,7 @@
 }
 EXPORT_SYMBOL(serio_interrupt);
 
-static struct bus_type serio_bus = {
+struct bus_type serio_bus = {
 	.name		= "serio",
 	.drv_groups	= serio_driver_groups,
 	.match		= serio_bus_match,
@@ -1029,6 +1027,7 @@
 	.pm		= &serio_pm_ops,
 #endif
 };
+EXPORT_SYMBOL(serio_bus);
 
 static int __init serio_init(void)
 {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index a854c6e..059edeb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,9 +11,9 @@
 
 if INPUT_TOUCHSCREEN
 
-config OF_TOUCHSCREEN
+config TOUCHSCREEN_PROPERTIES
 	def_tristate INPUT
-	depends on INPUT && OF
+	depends on INPUT
 
 config TOUCHSCREEN_88PM860X
 	tristate "Marvell 88PM860x touchscreen"
@@ -118,7 +118,7 @@
 config TOUCHSCREEN_AUO_PIXCIR
 	tristate "AUO in-cell touchscreen using Pixcir ICs"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a AUO display with in-cell touchscreen
 	  using Pixcir ICs.
@@ -142,7 +142,7 @@
 
 config TOUCHSCREEN_CHIPONE_ICN8318
 	tristate "chipone icn8318 touchscreen controller"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	depends on I2C
 	depends on OF
 	help
@@ -156,7 +156,7 @@
 config TOUCHSCREEN_CY8CTMG110
 	tristate "cy8ctmg110 touchscreen"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a cy8ctmg110 capacitive touchscreen on
 	  an AAVA device.
@@ -915,10 +915,11 @@
 	  module will be called tsc40.
 
 config TOUCHSCREEN_TSC2005
-        tristate "TSC2005 based touchscreens"
-        depends on SPI_MASTER
-        help
-          Say Y here if you have a TSC2005 based touchscreen.
+	tristate "TSC2005 based touchscreens"
+	depends on SPI_MASTER
+	select REGMAP_SPI
+	help
+	  Say Y here if you have a TSC2005 based touchscreen.
 
 	  If unsure, say N.
 
@@ -1029,7 +1030,7 @@
 config TOUCHSCREEN_ZFORCE
 	tristate "Neonode zForce infrared touchscreens"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a touchscreen using the zforce
 	  infraread technology from Neonode.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fa3d33b..c85aae2 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,7 +6,7 @@
 
 wm97xx-ts-y := wm97xx-core.o
 
-obj-$(CONFIG_OF_TOUCHSCREEN)		+= of_touchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PROPERTIES)	+= of_touchscreen.o
 obj-$(CONFIG_TOUCHSCREEN_88PM860X)	+= 88pm860x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_AD7877)	+= ad7877.o
 obj-$(CONFIG_TOUCHSCREEN_AD7879)	+= ad7879.o
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index dcf3907..d66962c 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -94,7 +94,6 @@
 static struct i2c_driver ad7879_i2c_driver = {
 	.driver = {
 		.name	= "ad7879",
-		.owner	= THIS_MODULE,
 		.pm	= &ad7879_pm_ops,
 	},
 	.probe		= ad7879_i2c_probe,
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index e4eb8a6..0f5f968 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1234,7 +1234,8 @@
 	of_property_read_u32(node, "ti,pendown-gpio-debounce",
 			     &pdata->gpio_pendown_debounce);
 
-	pdata->wakeup = of_property_read_bool(node, "linux,wakeup");
+	pdata->wakeup = of_property_read_bool(node, "wakeup-source") ||
+			of_property_read_bool(node, "linux,wakeup");
 
 	pdata->gpio_pendown = of_get_named_gpio(dev->of_node, "pendown-gpio", 0);
 
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index f0b954d..71b5a63 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -166,7 +166,6 @@
 static struct i2c_driver ar1021_i2c_driver = {
 	.driver	= {
 		.name	= "ar1021_i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &ar1021_i2c_pm,
 		.of_match_table = ar1021_i2c_of_match,
 	},
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index dfc7309..c562205 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -22,34 +22,20 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
 #include <linux/input/mt.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
-/* Version */
-#define MXT_VER_20		20
-#define MXT_VER_21		21
-#define MXT_VER_22		22
-
 /* Firmware files */
 #define MXT_FW_NAME		"maxtouch.fw"
 #define MXT_CFG_NAME		"maxtouch.cfg"
 #define MXT_CFG_MAGIC		"OBP_RAW V1"
 
 /* Registers */
-#define MXT_INFO		0x00
-#define MXT_FAMILY_ID		0x00
-#define MXT_VARIANT_ID		0x01
-#define MXT_VERSION		0x02
-#define MXT_BUILD		0x03
-#define MXT_MATRIX_X_SIZE	0x04
-#define MXT_MATRIX_Y_SIZE	0x05
-#define MXT_OBJECT_NUM		0x06
 #define MXT_OBJECT_START	0x07
-
 #define MXT_OBJECT_SIZE		6
 #define MXT_INFO_CHECKSUM_SIZE	3
 #define MXT_MAX_BLOCK_WRITE	256
@@ -103,21 +89,16 @@
 #define MXT_T6_STATUS_COMSERR	(1 << 2)
 
 /* MXT_GEN_POWER_T7 field */
-#define MXT_POWER_IDLEACQINT	0
-#define MXT_POWER_ACTVACQINT	1
-#define MXT_POWER_ACTV2IDLETO	2
+struct t7_config {
+	u8 idle;
+	u8 active;
+} __packed;
 
-/* MXT_GEN_ACQUIRE_T8 field */
-#define MXT_ACQUIRE_CHRGTIME	0
-#define MXT_ACQUIRE_TCHDRIFT	2
-#define MXT_ACQUIRE_DRIFTST	3
-#define MXT_ACQUIRE_TCHAUTOCAL	4
-#define MXT_ACQUIRE_SYNC	5
-#define MXT_ACQUIRE_ATCHCALST	6
-#define MXT_ACQUIRE_ATCHCALSTHR	7
+#define MXT_POWER_CFG_RUN		0
+#define MXT_POWER_CFG_DEEPSLEEP		1
 
 /* MXT_TOUCH_MULTI_T9 field */
-#define MXT_TOUCH_CTRL		0
+#define MXT_T9_CTRL		0
 #define MXT_T9_ORIENT		9
 #define MXT_T9_RANGE		18
 
@@ -139,51 +120,10 @@
 /* MXT_TOUCH_MULTI_T9 orient */
 #define MXT_T9_ORIENT_SWITCH	(1 << 0)
 
-/* MXT_PROCI_GRIPFACE_T20 field */
-#define MXT_GRIPFACE_CTRL	0
-#define MXT_GRIPFACE_XLOGRIP	1
-#define MXT_GRIPFACE_XHIGRIP	2
-#define MXT_GRIPFACE_YLOGRIP	3
-#define MXT_GRIPFACE_YHIGRIP	4
-#define MXT_GRIPFACE_MAXTCHS	5
-#define MXT_GRIPFACE_SZTHR1	7
-#define MXT_GRIPFACE_SZTHR2	8
-#define MXT_GRIPFACE_SHPTHR1	9
-#define MXT_GRIPFACE_SHPTHR2	10
-#define MXT_GRIPFACE_SUPEXTTO	11
-
-/* MXT_PROCI_NOISE field */
-#define MXT_NOISE_CTRL		0
-#define MXT_NOISE_OUTFLEN	1
-#define MXT_NOISE_GCAFUL_LSB	3
-#define MXT_NOISE_GCAFUL_MSB	4
-#define MXT_NOISE_GCAFLL_LSB	5
-#define MXT_NOISE_GCAFLL_MSB	6
-#define MXT_NOISE_ACTVGCAFVALID	7
-#define MXT_NOISE_NOISETHR	8
-#define MXT_NOISE_FREQHOPSCALE	10
-#define MXT_NOISE_FREQ0		11
-#define MXT_NOISE_FREQ1		12
-#define MXT_NOISE_FREQ2		13
-#define MXT_NOISE_FREQ3		14
-#define MXT_NOISE_FREQ4		15
-#define MXT_NOISE_IDLEGCAFVALID	16
-
 /* MXT_SPT_COMMSCONFIG_T18 */
 #define MXT_COMMS_CTRL		0
 #define MXT_COMMS_CMD		1
 
-/* MXT_SPT_CTECONFIG_T28 field */
-#define MXT_CTE_CTRL		0
-#define MXT_CTE_CMD		1
-#define MXT_CTE_MODE		2
-#define MXT_CTE_IDLEGCAFDEPTH	3
-#define MXT_CTE_ACTVGCAFDEPTH	4
-#define MXT_CTE_VOLTAGE		5
-
-#define MXT_VOLTAGE_DEFAULT	2700000
-#define MXT_VOLTAGE_STEP	10000
-
 /* Define for MXT_GEN_COMMAND_T6 */
 #define MXT_BOOT_VALUE		0xa5
 #define MXT_RESET_VALUE		0x01
@@ -291,6 +231,7 @@
 	u8 last_message_count;
 	u8 num_touchids;
 	u8 multitouch;
+	struct t7_config t7_cfg;
 
 	/* Cached parameters from object table */
 	u16 T5_address;
@@ -997,16 +938,15 @@
 
 	count = data->msg_buf[0];
 
-	if (count == 0) {
-		/*
-		 * This condition is caused by the CHG line being configured
-		 * in Mode 0. It results in unnecessary I2C operations but it
-		 * is benign.
-		 */
-		dev_dbg(dev, "Interrupt triggered but zero messages\n");
+	/*
+	 * This condition may be caused by the CHG line being configured in
+	 * Mode 0. It results in unnecessary I2C operations but it is benign.
+	 */
+	if (count == 0)
 		return IRQ_NONE;
-	} else if (count > data->max_reportid) {
-		dev_err(dev, "T44 count %d exceeded max report id\n", count);
+
+	if (count > data->max_reportid) {
+		dev_warn(dev, "T44 count %d exceeded max report id\n", count);
 		count = data->max_reportid;
 	}
 
@@ -1157,7 +1097,9 @@
 	struct device *dev = &data->client->dev;
 	int ret = 0;
 
-	dev_info(dev, "Resetting chip\n");
+	dev_info(dev, "Resetting device\n");
+
+	disable_irq(data->irq);
 
 	reinit_completion(&data->reset_completion);
 
@@ -1165,6 +1107,11 @@
 	if (ret)
 		return ret;
 
+	/* Ignore CHG line for 100ms after reset */
+	msleep(100);
+
+	enable_irq(data->irq);
+
 	ret = mxt_wait_for_completion(data, &data->reset_completion,
 				      MXT_RESET_TIMEOUT);
 	if (ret)
@@ -1361,6 +1308,8 @@
 	return 0;
 }
 
+static int mxt_init_t7_power_cfg(struct mxt_data *data);
+
 /*
  * mxt_update_cfg - download configuration to chip
  *
@@ -1508,6 +1457,9 @@
 
 	dev_info(dev, "Config successfully updated\n");
 
+	/* T7 config may have changed */
+	mxt_init_t7_power_cfg(data);
+
 release_mem:
 	kfree(config_mem);
 	return ret;
@@ -1533,7 +1485,7 @@
 	int error;
 
 	/* Read 7-byte info block starting at address 0 */
-	error = __mxt_read_reg(client, MXT_INFO, sizeof(*info), info);
+	error = __mxt_read_reg(client, 0, sizeof(*info), info);
 	if (error)
 		return error;
 
@@ -1905,6 +1857,8 @@
 	if (pdata->t19_num_keys) {
 		mxt_set_up_as_touchpad(input_dev, data);
 		mt_flags |= INPUT_MT_POINTER;
+	} else {
+		mt_flags |= INPUT_MT_DIRECT;
 	}
 
 	/* For multi touch */
@@ -2051,6 +2005,60 @@
 	return error;
 }
 
+static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
+{
+	struct device *dev = &data->client->dev;
+	int error;
+	struct t7_config *new_config;
+	struct t7_config deepsleep = { .active = 0, .idle = 0 };
+
+	if (sleep == MXT_POWER_CFG_DEEPSLEEP)
+		new_config = &deepsleep;
+	else
+		new_config = &data->t7_cfg;
+
+	error = __mxt_write_reg(data->client, data->T7_address,
+				sizeof(data->t7_cfg), new_config);
+	if (error)
+		return error;
+
+	dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
+		new_config->active, new_config->idle);
+
+	return 0;
+}
+
+static int mxt_init_t7_power_cfg(struct mxt_data *data)
+{
+	struct device *dev = &data->client->dev;
+	int error;
+	bool retry = false;
+
+recheck:
+	error = __mxt_read_reg(data->client, data->T7_address,
+				sizeof(data->t7_cfg), &data->t7_cfg);
+	if (error)
+		return error;
+
+	if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
+		if (!retry) {
+			dev_dbg(dev, "T7 cfg zero, resetting\n");
+			mxt_soft_reset(data);
+			retry = true;
+			goto recheck;
+		} else {
+			dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
+			data->t7_cfg.active = 20;
+			data->t7_cfg.idle = 100;
+			return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+		}
+	}
+
+	dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
+		data->t7_cfg.active, data->t7_cfg.idle);
+	return 0;
+}
+
 static int mxt_configure_objects(struct mxt_data *data,
 				 const struct firmware *cfg)
 {
@@ -2058,6 +2066,12 @@
 	struct mxt_info *info = &data->info;
 	int error;
 
+	error = mxt_init_t7_power_cfg(data);
+	if (error) {
+		dev_err(dev, "Failed to initialize power cfg\n");
+		return error;
+	}
+
 	if (cfg) {
 		error = mxt_update_cfg(data, cfg);
 		if (error)
@@ -2346,14 +2360,41 @@
 
 static void mxt_start(struct mxt_data *data)
 {
-	/* Touch enable */
-	mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0x83);
+	switch (data->pdata->suspend_mode) {
+	case MXT_SUSPEND_T9_CTRL:
+		mxt_soft_reset(data);
+
+		/* Touch enable */
+		/* 0x83 = SCANEN | RPTEN | ENABLE */
+		mxt_write_object(data,
+				MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0x83);
+		break;
+
+	case MXT_SUSPEND_DEEP_SLEEP:
+	default:
+		mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+
+		/* Recalibrate since chip has been in deep sleep */
+		mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+		break;
+	}
+
 }
 
 static void mxt_stop(struct mxt_data *data)
 {
-	/* Touch disable */
-	mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0);
+	switch (data->pdata->suspend_mode) {
+	case MXT_SUSPEND_T9_CTRL:
+		/* Touch disable */
+		mxt_write_object(data,
+				MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0);
+		break;
+
+	case MXT_SUSPEND_DEEP_SLEEP:
+	default:
+		mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+		break;
+	}
 }
 
 static int mxt_input_open(struct input_dev *dev)
@@ -2376,19 +2417,18 @@
 static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
 {
 	struct mxt_platform_data *pdata;
+	struct device_node *np = client->dev.of_node;
 	u32 *keymap;
-	u32 keycode;
-	int proplen, i, ret;
+	int proplen, ret;
 
-	if (!client->dev.of_node)
+	if (!np)
 		return ERR_PTR(-ENOENT);
 
 	pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return ERR_PTR(-ENOMEM);
 
-	if (of_find_property(client->dev.of_node, "linux,gpio-keymap",
-			     &proplen)) {
+	if (of_find_property(np, "linux,gpio-keymap", &proplen)) {
 		pdata->t19_num_keys = proplen / sizeof(u32);
 
 		keymap = devm_kzalloc(&client->dev,
@@ -2397,18 +2437,17 @@
 		if (!keymap)
 			return ERR_PTR(-ENOMEM);
 
-		for (i = 0; i < pdata->t19_num_keys; i++) {
-			ret = of_property_read_u32_index(client->dev.of_node,
-					"linux,gpio-keymap", i, &keycode);
-			if (ret)
-				keycode = KEY_RESERVED;
-
-			keymap[i] = keycode;
-		}
+		ret = of_property_read_u32_array(np, "linux,gpio-keymap",
+						 keymap, pdata->t19_num_keys);
+		if (ret)
+			dev_warn(&client->dev,
+				 "Couldn't read linux,gpio-keymap: %d\n", ret);
 
 		pdata->t19_keymap = keymap;
 	}
 
+	pdata->suspend_mode = MXT_SUSPEND_DEEP_SLEEP;
+
 	return pdata;
 }
 #else
@@ -2609,6 +2648,9 @@
 	struct mxt_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
+	if (!input_dev)
+		return 0;
+
 	mutex_lock(&input_dev->mutex);
 
 	if (input_dev->users)
@@ -2625,7 +2667,8 @@
 	struct mxt_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
-	mxt_soft_reset(data);
+	if (!input_dev)
+		return 0;
 
 	mutex_lock(&input_dev->mutex);
 
@@ -2666,7 +2709,6 @@
 static struct i2c_driver mxt_driver = {
 	.driver = {
 		.name	= "atmel_mxt_ts",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(mxt_of_match),
 		.acpi_match_table = ACPI_PTR(mxt_acpi_id),
 		.pm	= &mxt_pm_ops,
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index 40e02dd..38c06f7 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -686,7 +686,6 @@
 
 static struct i2c_driver auo_pixcir_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "auo_pixcir_ts",
 		.pm	= &auo_pixcir_pm_ops,
 		.of_match_table	= of_match_ptr(auo_pixcir_ts_dt_idtable),
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index b9b5dda..931417e 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -716,7 +716,6 @@
 static struct i2c_driver bu21013_driver = {
 	.driver	= {
 		.name	=	DRIVER_TP,
-		.owner	=	THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	=	&bu21013_dev_pm_ops,
 #endif
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
index 32e9db0..22a6fea 100644
--- a/drivers/input/touchscreen/chipone_icn8318.c
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -300,7 +300,6 @@
 
 static struct i2c_driver icn8318_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "chipone_icn8318",
 		.pm	= &icn8318_pm_ops,
 		.of_match_table = icn8318_of_match,
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index f2119ee..cc1d135 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -347,7 +347,6 @@
 
 static struct i2c_driver cy8ctmg110_driver = {
 	.driver		= {
-		.owner	= THIS_MODULE,
 		.name	= CY8CTMG110_DRIVER_NAME,
 		.pm	= &cy8ctmg110_pm,
 	},
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index 8e2012c..9a323dd 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -74,7 +74,6 @@
 static struct i2c_driver cyttsp4_i2c_driver = {
 	.driver = {
 		.name	= CYTTSP4_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &cyttsp4_pm_ops,
 	},
 	.probe		= cyttsp4_i2c_probe,
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 63104a8..519e2de 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -74,7 +74,6 @@
 static struct i2c_driver cyttsp_i2c_driver = {
 	.driver = {
 		.name	= CY_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &cyttsp_pm_ops,
 	},
 	.probe		= cyttsp_i2c_probe,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 394b1de..48de1e8 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1041,7 +1041,7 @@
 			     0, tsdata->num_y * 64 - 1, 0, 0);
 
 	if (!pdata)
-		touchscreen_parse_of_params(input, true);
+		touchscreen_parse_properties(input, true);
 
 	error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, INPUT_MT_DIRECT);
 	if (error) {
@@ -1134,7 +1134,6 @@
 
 static struct i2c_driver edt_ft5x06_ts_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "edt_ft5x06",
 		.of_match_table = of_match_ptr(edt_ft5x06_of_match),
 		.pm = &edt_ft5x06_ts_pm_ops,
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 4c56299..1afc08b 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -264,11 +264,11 @@
 	{ .compatible = "eeti,egalax_ts" },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, egalax_ts_dt_ids);
 
 static struct i2c_driver egalax_ts_driver = {
 	.driver = {
 		.name	= "egalax_ts",
-		.owner	= THIS_MODULE,
 		.pm	= &egalax_ts_pm_ops,
 		.of_match_table	= egalax_ts_dt_ids,
 	},
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 0efd766..ddac134 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -38,6 +38,8 @@
 #include <linux/input/mt.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
 #include <asm/unaligned.h>
 
 /* Device, Driver information */
@@ -102,6 +104,9 @@
 /* calibration timeout definition */
 #define ELAN_CALI_TIMEOUT_MSEC	10000
 
+#define ELAN_POWERON_DELAY_USEC	500
+#define ELAN_RESET_DELAY_MSEC	20
+
 enum elants_state {
 	ELAN_STATE_NORMAL,
 	ELAN_WAIT_QUEUE_HEADER,
@@ -118,6 +123,10 @@
 	struct i2c_client *client;
 	struct input_dev *input;
 
+	struct regulator *vcc33;
+	struct regulator *vccio;
+	struct gpio_desc *reset_gpio;
+
 	u16 fw_version;
 	u8 test_version;
 	u8 solution_version;
@@ -141,6 +150,7 @@
 	u8 buf[MAX_PACKET_SIZE];
 
 	bool wake_irq_enabled;
+	bool keep_power_in_suspend;
 };
 
 static int elants_i2c_send(struct i2c_client *client,
@@ -605,6 +615,7 @@
 	const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
 	const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
 	const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
+	const u8 close_idle[] = {0x54, 0x2c, 0x01, 0x01};
 	u8 buf[HEADER_SIZE];
 	u16 send_id;
 	int page, n_fw_pages;
@@ -617,8 +628,13 @@
 	} else {
 		/* Start IAP Procedure */
 		dev_dbg(&client->dev, "Normal IAP procedure\n");
+		/* Close idle mode */
+		error = elants_i2c_send(client, close_idle, sizeof(close_idle));
+		if (error)
+			dev_err(&client->dev, "Failed close idle: %d\n", error);
+		msleep(60);
 		elants_i2c_sw_reset(client);
-
+		msleep(20);
 		error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
 	}
 
@@ -1052,6 +1068,67 @@
 	sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
 }
 
+static int elants_i2c_power_on(struct elants_data *ts)
+{
+	int error;
+
+	/*
+	 * If we do not have reset gpio assume platform firmware
+	 * controls regulators and does power them on for us.
+	 */
+	if (IS_ERR_OR_NULL(ts->reset_gpio))
+		return 0;
+
+	gpiod_set_value_cansleep(ts->reset_gpio, 1);
+
+	error = regulator_enable(ts->vcc33);
+	if (error) {
+		dev_err(&ts->client->dev,
+			"failed to enable vcc33 regulator: %d\n",
+			error);
+		goto release_reset_gpio;
+	}
+
+	error = regulator_enable(ts->vccio);
+	if (error) {
+		dev_err(&ts->client->dev,
+			"failed to enable vccio regulator: %d\n",
+			error);
+		regulator_disable(ts->vcc33);
+		goto release_reset_gpio;
+	}
+
+	/*
+	 * We need to wait a bit after powering on controller before
+	 * we are allowed to release reset GPIO.
+	 */
+	udelay(ELAN_POWERON_DELAY_USEC);
+
+release_reset_gpio:
+	gpiod_set_value_cansleep(ts->reset_gpio, 0);
+	if (error)
+		return error;
+
+	msleep(ELAN_RESET_DELAY_MSEC);
+
+	return 0;
+}
+
+static void elants_i2c_power_off(void *_data)
+{
+	struct elants_data *ts = _data;
+
+	if (!IS_ERR_OR_NULL(ts->reset_gpio)) {
+		/*
+		 * Activate reset gpio to prevent leakage through the
+		 * pin once we shut off power to the controller.
+		 */
+		gpiod_set_value_cansleep(ts->reset_gpio, 1);
+		regulator_disable(ts->vccio);
+		regulator_disable(ts->vcc33);
+	}
+}
+
 static int elants_i2c_probe(struct i2c_client *client,
 			    const struct i2c_device_id *id)
 {
@@ -1066,13 +1143,6 @@
 		return -ENXIO;
 	}
 
-	/* Make sure there is something at this address */
-	if (i2c_smbus_xfer(client->adapter, client->addr, 0,
-			I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
-		dev_err(&client->dev, "nothing at this address\n");
-		return -ENXIO;
-	}
-
 	ts = devm_kzalloc(&client->dev, sizeof(struct elants_data), GFP_KERNEL);
 	if (!ts)
 		return -ENOMEM;
@@ -1083,6 +1153,62 @@
 	ts->client = client;
 	i2c_set_clientdata(client, ts);
 
+	ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
+	if (IS_ERR(ts->vcc33)) {
+		error = PTR_ERR(ts->vcc33);
+		if (error != -EPROBE_DEFER)
+			dev_err(&client->dev,
+				"Failed to get 'vcc33' regulator: %d\n",
+				error);
+		return error;
+	}
+
+	ts->vccio = devm_regulator_get(&client->dev, "vccio");
+	if (IS_ERR(ts->vccio)) {
+		error = PTR_ERR(ts->vccio);
+		if (error != -EPROBE_DEFER)
+			dev_err(&client->dev,
+				"Failed to get 'vccio' regulator: %d\n",
+				error);
+		return error;
+	}
+
+	ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ts->reset_gpio)) {
+		error = PTR_ERR(ts->reset_gpio);
+
+		if (error == -EPROBE_DEFER)
+			return error;
+
+		if (error != -ENOENT && error != -ENOSYS) {
+			dev_err(&client->dev,
+				"failed to get reset gpio: %d\n",
+				error);
+			return error;
+		}
+
+		ts->keep_power_in_suspend = true;
+	}
+
+	error = elants_i2c_power_on(ts);
+	if (error)
+		return error;
+
+	error = devm_add_action(&client->dev, elants_i2c_power_off, ts);
+	if (error) {
+		dev_err(&client->dev,
+			"failed to install power off action: %d\n", error);
+		elants_i2c_power_off(ts);
+		return error;
+	}
+
+	/* Make sure there is something at this address */
+	if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+			   I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
+		dev_err(&client->dev, "nothing at this address\n");
+		return -ENXIO;
+	}
+
 	error = elants_i2c_initialize(ts);
 	if (error) {
 		dev_err(&client->dev, "failed to initialize: %d\n", error);
@@ -1190,18 +1316,24 @@
 
 	disable_irq(client->irq);
 
-	for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
-		error = elants_i2c_send(client, set_sleep_cmd,
-					sizeof(set_sleep_cmd));
-		if (!error)
-			break;
+	if (device_may_wakeup(dev) || ts->keep_power_in_suspend) {
+		for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+			error = elants_i2c_send(client, set_sleep_cmd,
+						sizeof(set_sleep_cmd));
+			if (!error)
+				break;
 
-		dev_err(&client->dev, "suspend command failed: %d\n", error);
+			dev_err(&client->dev,
+				"suspend command failed: %d\n", error);
+		}
+
+		if (device_may_wakeup(dev))
+			ts->wake_irq_enabled =
+					(enable_irq_wake(client->irq) == 0);
+	} else {
+		elants_i2c_power_off(ts);
 	}
 
-	if (device_may_wakeup(dev))
-		ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
-
 	return 0;
 }
 
@@ -1216,13 +1348,19 @@
 	if (device_may_wakeup(dev) && ts->wake_irq_enabled)
 		disable_irq_wake(client->irq);
 
-	for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
-		error = elants_i2c_send(client, set_active_cmd,
-					sizeof(set_active_cmd));
-		if (!error)
-			break;
+	if (ts->keep_power_in_suspend) {
+		for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+			error = elants_i2c_send(client, set_active_cmd,
+						sizeof(set_active_cmd));
+			if (!error)
+				break;
 
-		dev_err(&client->dev, "resume command failed: %d\n", error);
+			dev_err(&client->dev,
+				"resume command failed: %d\n", error);
+		}
+	} else {
+		elants_i2c_power_on(ts);
+		elants_i2c_initialize(ts);
 	}
 
 	ts->state = ELAN_STATE_NORMAL;
@@ -1261,10 +1399,10 @@
 	.id_table = elants_i2c_id,
 	.driver = {
 		.name = DEVICE_NAME,
-		.owner = THIS_MODULE,
 		.pm = &elants_i2c_pm_ops,
 		.acpi_match_table = ACPI_PTR(elants_acpi_id),
 		.of_match_table = of_match_ptr(elants_of_match),
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 };
 module_i2c_driver(elants_i2c_driver);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index e36162b..4d113c9 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -420,6 +420,7 @@
 	{ "GDIX1001:00", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id goodix_acpi_match[] = {
@@ -448,7 +449,6 @@
 	.id_table = goodix_ts_id,
 	.driver = {
 		.name = "Goodix-TS",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(goodix_acpi_match),
 		.of_match_table = of_match_ptr(goodix_of_match),
 	},
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 586bee4..ddf694b 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -343,7 +343,6 @@
 static struct i2c_driver ili210x_ts_driver = {
 	.driver = {
 		.name = "ili210x_i2c",
-		.owner = THIS_MODULE,
 		.pm = &ili210x_i2c_pm,
 	},
 	.id_table = ili210x_i2c_id,
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index a68ec14..82079cd 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -229,7 +229,6 @@
 static struct i2c_driver max11801_ts_driver = {
 	.driver = {
 		.name	= "max11801_ts",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= max11801_ts_id,
 	.probe		= max11801_ts_probe,
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 67c0d31..7cce876 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -572,12 +572,12 @@
 	{ .compatible = "melfas,mms114" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, mms114_dt_match);
 #endif
 
 static struct i2c_driver mms114_driver = {
 	.driver = {
 		.name	= "mms114",
-		.owner	= THIS_MODULE,
 		.pm	= &mms114_pm_ops,
 		.of_match_table = of_match_ptr(mms114_dt_match),
 	},
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 806cd0a..bb6f2fe 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -9,12 +9,12 @@
  *
  */
 
-#include <linux/of.h>
+#include <linux/property.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
 
-static bool touchscreen_get_prop_u32(struct device_node *np,
+static bool touchscreen_get_prop_u32(struct device *dev,
 				     const char *property,
 				     unsigned int default_value,
 				     unsigned int *value)
@@ -22,7 +22,7 @@
 	u32 val;
 	int error;
 
-	error = of_property_read_u32(np, property, &val);
+	error = device_property_read_u32(dev, property, &val);
 	if (error) {
 		*value = default_value;
 		return false;
@@ -39,13 +39,9 @@
 	struct input_absinfo *absinfo;
 
 	if (!test_bit(axis, dev->absbit)) {
-		/*
-		 * Emit a warning only if the axis is not a multitouch
-		 * axis, which might not be set by the driver.
-		 */
-		if (!input_is_mt_axis(axis))
-			dev_warn(&dev->dev,
-				 "DT specifies parameters but the axis is not set up\n");
+		dev_warn(&dev->dev,
+			 "DT specifies parameters but the axis %lu is not set up\n",
+			 axis);
 		return;
 	}
 
@@ -55,52 +51,58 @@
 }
 
 /**
- * touchscreen_parse_of_params - parse common touchscreen DT properties
- * @dev: device that should be parsed
+ * touchscreen_parse_properties - parse common touchscreen DT properties
+ * @input: input device that should be parsed
+ * @multitouch: specifies whether parsed properties should be applied to
+ *	single-touch or multi-touch axes
  *
  * This function parses common DT properties for touchscreens and setups the
- * input device accordingly. The function keeps previously setuped default
+ * input device accordingly. The function keeps previously set up default
  * values if no value is specified via DT.
  */
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch)
+void touchscreen_parse_properties(struct input_dev *input, bool multitouch)
 {
-	struct device_node *np = dev->dev.parent->of_node;
+	struct device *dev = input->dev.parent;
 	unsigned int axis;
 	unsigned int maximum, fuzz;
 	bool data_present;
 
-	input_alloc_absinfo(dev);
-	if (!dev->absinfo)
+	input_alloc_absinfo(input);
+	if (!input->absinfo)
 		return;
 
 	axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-size-x",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x",
+						input_abs_get_max(input,
+								  axis) + 1,
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-x",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum - 1, fuzz);
 
 	axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-size-y",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y",
+						input_abs_get_max(input,
+								  axis) + 1,
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-y",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum - 1, fuzz);
 
 	axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-max-pressure",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev,
+						"touchscreen-max-pressure",
+						input_abs_get_max(input, axis),
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-pressure",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev,
+						"touchscreen-fuzz-pressure",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum, fuzz);
 }
-EXPORT_SYMBOL(touchscreen_parse_of_params);
+EXPORT_SYMBOL(touchscreen_parse_properties);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 8f3e243..9162172 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -24,20 +24,23 @@
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
-#include <linux/input/pixcir_ts.h>
+#include <linux/input/touchscreen.h>
 #include <linux/gpio.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+/*#include <linux/of.h>*/
 #include <linux/of_device.h>
+#include <linux/platform_data/pixcir_i2c_ts.h>
 
 #define PIXCIR_MAX_SLOTS       5 /* Max fingers supported by driver */
 
 struct pixcir_i2c_ts_data {
 	struct i2c_client *client;
 	struct input_dev *input;
-	const struct pixcir_ts_platform_data *pdata;
-	bool running;
+	struct gpio_desc *gpio_attb;
+	struct gpio_desc *gpio_reset;
+	const struct pixcir_i2c_chip_data *chip;
 	int max_fingers;	/* Max fingers supported in this instance */
+	bool running;
 };
 
 struct pixcir_touch {
@@ -60,7 +63,7 @@
 	u8 touch;
 	int ret, i;
 	int readsize;
-	const struct pixcir_i2c_chip_data *chip = &tsdata->pdata->chip;
+	const struct pixcir_i2c_chip_data *chip = tsdata->chip;
 
 	memset(report, 0, sizeof(struct pixcir_report_data));
 
@@ -113,13 +116,13 @@
 	struct pixcir_touch *touch;
 	int n, i, slot;
 	struct device *dev = &ts->client->dev;
-	const struct pixcir_i2c_chip_data *chip = &ts->pdata->chip;
+	const struct pixcir_i2c_chip_data *chip = ts->chip;
 
 	n = report->num_touches;
 	if (n > PIXCIR_MAX_SLOTS)
 		n = PIXCIR_MAX_SLOTS;
 
-	if (!chip->has_hw_ids) {
+	if (!ts->chip->has_hw_ids) {
 		for (i = 0; i < n; i++) {
 			touch = &report->touches[i];
 			pos[i].x = touch->x;
@@ -161,7 +164,6 @@
 static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
 {
 	struct pixcir_i2c_ts_data *tsdata = dev_id;
-	const struct pixcir_ts_platform_data *pdata = tsdata->pdata;
 	struct pixcir_report_data report;
 
 	while (tsdata->running) {
@@ -171,7 +173,7 @@
 		/* report it */
 		pixcir_ts_report(tsdata, &report);
 
-		if (gpio_get_value(pdata->gpio_attb)) {
+		if (gpiod_get_value_cansleep(tsdata->gpio_attb)) {
 			if (report.num_touches) {
 				/*
 				 * Last report with no finger up?
@@ -189,6 +191,17 @@
 	return IRQ_HANDLED;
 }
 
+static void pixcir_reset(struct pixcir_i2c_ts_data *tsdata)
+{
+	if (!IS_ERR_OR_NULL(tsdata->gpio_reset)) {
+		gpiod_set_value_cansleep(tsdata->gpio_reset, 1);
+		ndelay(100);	/* datasheet section 1.2.3 says 80ns min. */
+		gpiod_set_value_cansleep(tsdata->gpio_reset, 0);
+		/* wait for controller ready. 100ms guess. */
+		msleep(100);
+	}
+}
+
 static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
 				 enum pixcir_power_mode mode)
 {
@@ -411,85 +424,59 @@
 #ifdef CONFIG_OF
 static const struct of_device_id pixcir_of_match[];
 
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+			   struct pixcir_i2c_ts_data *tsdata)
 {
-	struct pixcir_ts_platform_data *pdata;
-	struct device_node *np = dev->of_node;
 	const struct of_device_id *match;
 
 	match = of_match_device(of_match_ptr(pixcir_of_match), dev);
 	if (!match)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
-	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return ERR_PTR(-ENOMEM);
+	tsdata->chip = (const struct pixcir_i2c_chip_data *)match->data;
+	if (!tsdata->chip)
+		return -EINVAL;
 
-	pdata->chip = *(const struct pixcir_i2c_chip_data *)match->data;
-
-	pdata->gpio_attb = of_get_named_gpio(np, "attb-gpio", 0);
-	/* gpio_attb validity is checked in probe */
-
-	if (of_property_read_u32(np, "touchscreen-size-x", &pdata->x_max)) {
-		dev_err(dev, "Failed to get touchscreen-size-x property\n");
-		return ERR_PTR(-EINVAL);
-	}
-	pdata->x_max -= 1;
-
-	if (of_property_read_u32(np, "touchscreen-size-y", &pdata->y_max)) {
-		dev_err(dev, "Failed to get touchscreen-size-y property\n");
-		return ERR_PTR(-EINVAL);
-	}
-	pdata->y_max -= 1;
-
-	dev_dbg(dev, "%s: x %d, y %d, gpio %d\n", __func__,
-		pdata->x_max + 1, pdata->y_max + 1, pdata->gpio_attb);
-
-	return pdata;
+	return 0;
 }
 #else
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+			   struct pixcir_i2c_ts_data *tsdata)
 {
-	return ERR_PTR(-EINVAL);
+	return -EINVAL;
 }
 #endif
 
 static int pixcir_i2c_ts_probe(struct i2c_client *client,
-					 const struct i2c_device_id *id)
+			       const struct i2c_device_id *id)
 {
 	const struct pixcir_ts_platform_data *pdata =
 			dev_get_platdata(&client->dev);
 	struct device *dev = &client->dev;
-	struct device_node *np = dev->of_node;
 	struct pixcir_i2c_ts_data *tsdata;
 	struct input_dev *input;
 	int error;
 
-	if (np && !pdata) {
-		pdata = pixcir_parse_dt(dev);
-		if (IS_ERR(pdata))
-			return PTR_ERR(pdata);
-	}
+	tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
+	if (!tsdata)
+		return -ENOMEM;
 
-	if (!pdata) {
+	if (pdata) {
+		tsdata->chip = &pdata->chip;
+	} else if (dev->of_node) {
+		error = pixcir_parse_dt(dev, tsdata);
+		if (error)
+			return error;
+	} else {
 		dev_err(&client->dev, "platform data not defined\n");
 		return -EINVAL;
 	}
 
-	if (!gpio_is_valid(pdata->gpio_attb)) {
-		dev_err(dev, "Invalid gpio_attb in pdata\n");
+	if (!tsdata->chip->max_fingers) {
+		dev_err(dev, "Invalid max_fingers in chip data\n");
 		return -EINVAL;
 	}
 
-	if (!pdata->chip.max_fingers) {
-		dev_err(dev, "Invalid max_fingers in pdata\n");
-		return -EINVAL;
-	}
-
-	tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
-	if (!tsdata)
-		return -ENOMEM;
-
 	input = devm_input_allocate_device(dev);
 	if (!input) {
 		dev_err(dev, "Failed to allocate input device\n");
@@ -498,7 +485,6 @@
 
 	tsdata->client = client;
 	tsdata->input = input;
-	tsdata->pdata = pdata;
 
 	input->name = client->name;
 	input->id.bustype = BUS_I2C;
@@ -506,15 +492,21 @@
 	input->close = pixcir_input_close;
 	input->dev.parent = &client->dev;
 
-	__set_bit(EV_KEY, input->evbit);
-	__set_bit(EV_ABS, input->evbit);
-	__set_bit(BTN_TOUCH, input->keybit);
-	input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
-	input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
-	input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
-	input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+	if (pdata) {
+		input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+		input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+	} else {
+		input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+		input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+		touchscreen_parse_properties(input, true);
+		if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
+		    !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
+			dev_err(dev, "Touchscreen size is not specified\n");
+			return -EINVAL;
+		}
+	}
 
-	tsdata->max_fingers = tsdata->pdata->chip.max_fingers;
+	tsdata->max_fingers = tsdata->chip->max_fingers;
 	if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) {
 		tsdata->max_fingers = PIXCIR_MAX_SLOTS;
 		dev_info(dev, "Limiting maximum fingers to %d\n",
@@ -530,10 +522,18 @@
 
 	input_set_drvdata(input, tsdata);
 
-	error = devm_gpio_request_one(dev, pdata->gpio_attb,
-				      GPIOF_DIR_IN, "pixcir_i2c_attb");
-	if (error) {
-		dev_err(dev, "Failed to request ATTB gpio\n");
+	tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
+	if (IS_ERR(tsdata->gpio_attb)) {
+		error = PTR_ERR(tsdata->gpio_attb);
+		dev_err(dev, "Failed to request ATTB gpio: %d\n", error);
+		return error;
+	}
+
+	tsdata->gpio_reset = devm_gpiod_get_optional(dev, "reset",
+						     GPIOD_OUT_LOW);
+	if (IS_ERR(tsdata->gpio_reset)) {
+		error = PTR_ERR(tsdata->gpio_reset);
+		dev_err(dev, "Failed to request RESET gpio: %d\n", error);
 		return error;
 	}
 
@@ -545,6 +545,8 @@
 		return error;
 	}
 
+	pixcir_reset(tsdata);
+
 	/* Always be in IDLE mode to save power, device supports auto wake */
 	error = pixcir_set_power_mode(tsdata, PIXCIR_POWER_IDLE);
 	if (error) {
@@ -602,7 +604,6 @@
 
 static struct i2c_driver pixcir_i2c_ts_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "pixcir_ts",
 		.pm	= &pixcir_dev_pm_ops,
 		.of_match_table = of_match_ptr(pixcir_of_match),
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 697e26e..e943678 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -296,7 +296,6 @@
 	.id_table	= st1232_ts_id,
 	.driver = {
 		.name	= ST1232_TS_NAME,
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(st1232_ts_dt_ids),
 		.pm	= &st1232_ts_pm_ops,
 	},
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 8be7b9b..3f11763 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -581,6 +581,7 @@
 	sur40->alloc_ctx = vb2_dma_sg_init_ctx(sur40->dev);
 	if (IS_ERR(sur40->alloc_ctx)) {
 		dev_err(sur40->dev, "Can't allocate buffer context");
+		error = PTR_ERR(sur40->alloc_ctx);
 		goto err_unreg_v4l2;
 	}
 
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index d8c025b..0f65d02 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -30,10 +30,11 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/tsc2005.h>
 #include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
 
 /*
  * The touchscreen interface operates as follows:
@@ -61,16 +62,24 @@
 #define TSC2005_CMD_12BIT		0x04
 
 /* control byte 0 */
-#define TSC2005_REG_READ		0x0001
-#define TSC2005_REG_PND0		0x0002
-#define TSC2005_REG_X			0x0000
-#define TSC2005_REG_Y			0x0008
-#define TSC2005_REG_Z1			0x0010
-#define TSC2005_REG_Z2			0x0018
-#define TSC2005_REG_TEMP_HIGH		0x0050
-#define TSC2005_REG_CFR0		0x0060
-#define TSC2005_REG_CFR1		0x0068
-#define TSC2005_REG_CFR2		0x0070
+#define TSC2005_REG_READ		0x01 /* R/W access */
+#define TSC2005_REG_PND0		0x02 /* Power Not Down Control */
+#define TSC2005_REG_X			(0x0 << 3)
+#define TSC2005_REG_Y			(0x1 << 3)
+#define TSC2005_REG_Z1			(0x2 << 3)
+#define TSC2005_REG_Z2			(0x3 << 3)
+#define TSC2005_REG_AUX			(0x4 << 3)
+#define TSC2005_REG_TEMP1		(0x5 << 3)
+#define TSC2005_REG_TEMP2		(0x6 << 3)
+#define TSC2005_REG_STATUS		(0x7 << 3)
+#define TSC2005_REG_AUX_HIGH		(0x8 << 3)
+#define TSC2005_REG_AUX_LOW		(0x9 << 3)
+#define TSC2005_REG_TEMP_HIGH		(0xA << 3)
+#define TSC2005_REG_TEMP_LOW		(0xB << 3)
+#define TSC2005_REG_CFR0		(0xC << 3)
+#define TSC2005_REG_CFR1		(0xD << 3)
+#define TSC2005_REG_CFR2		(0xE << 3)
+#define TSC2005_REG_CONV_FUNC		(0xF << 3)
 
 /* configuration register 0 */
 #define TSC2005_CFR0_PRECHARGE_276US	0x0040
@@ -112,20 +121,37 @@
 #define TSC2005_SPI_MAX_SPEED_HZ	10000000
 #define TSC2005_PENUP_TIME_MS		40
 
-struct tsc2005_spi_rd {
-	struct spi_transfer	spi_xfer;
-	u32			spi_tx;
-	u32			spi_rx;
+static const struct regmap_range tsc2005_writable_ranges[] = {
+	regmap_reg_range(TSC2005_REG_AUX_HIGH, TSC2005_REG_CFR2),
 };
 
+static const struct regmap_access_table tsc2005_writable_table = {
+	.yes_ranges = tsc2005_writable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(tsc2005_writable_ranges),
+};
+
+static struct regmap_config tsc2005_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 16,
+	.reg_stride = 0x08,
+	.max_register = 0x78,
+	.read_flag_mask = TSC2005_REG_READ,
+	.write_flag_mask = TSC2005_REG_PND0,
+	.wr_table = &tsc2005_writable_table,
+	.use_single_rw = true,
+};
+
+struct tsc2005_data {
+	u16 x;
+	u16 y;
+	u16 z1;
+	u16 z2;
+} __packed;
+#define TSC2005_DATA_REGS 4
+
 struct tsc2005 {
 	struct spi_device	*spi;
-
-	struct spi_message      spi_read_msg;
-	struct tsc2005_spi_rd	spi_x;
-	struct tsc2005_spi_rd	spi_y;
-	struct tsc2005_spi_rd	spi_z1;
-	struct tsc2005_spi_rd	spi_z2;
+	struct regmap		*regmap;
 
 	struct input_dev	*idev;
 	char			phys[32];
@@ -154,7 +180,7 @@
 
 	struct regulator	*vio;
 
-	int			reset_gpio;
+	struct gpio_desc	*reset_gpio;
 	void			(*set_reset)(bool enable);
 };
 
@@ -182,62 +208,6 @@
 	return 0;
 }
 
-static int tsc2005_write(struct tsc2005 *ts, u8 reg, u16 value)
-{
-	u32 tx = ((reg | TSC2005_REG_PND0) << 16) | value;
-	struct spi_transfer xfer = {
-		.tx_buf		= &tx,
-		.len		= 4,
-		.bits_per_word	= 24,
-	};
-	struct spi_message msg;
-	int error;
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	error = spi_sync(ts->spi, &msg);
-	if (error) {
-		dev_err(&ts->spi->dev,
-			"%s: failed, register: %x, value: %x, error: %d\n",
-			__func__, reg, value, error);
-		return error;
-	}
-
-	return 0;
-}
-
-static void tsc2005_setup_read(struct tsc2005_spi_rd *rd, u8 reg, bool last)
-{
-	memset(rd, 0, sizeof(*rd));
-
-	rd->spi_tx		   = (reg | TSC2005_REG_READ) << 16;
-	rd->spi_xfer.tx_buf	   = &rd->spi_tx;
-	rd->spi_xfer.rx_buf	   = &rd->spi_rx;
-	rd->spi_xfer.len	   = 4;
-	rd->spi_xfer.bits_per_word = 24;
-	rd->spi_xfer.cs_change	   = !last;
-}
-
-static int tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value)
-{
-	struct tsc2005_spi_rd spi_rd;
-	struct spi_message msg;
-	int error;
-
-	tsc2005_setup_read(&spi_rd, reg, true);
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&spi_rd.spi_xfer, &msg);
-
-	error = spi_sync(ts->spi, &msg);
-	if (error)
-		return error;
-
-	*value = spi_rd.spi_rx;
-	return 0;
-}
-
 static void tsc2005_update_pen_state(struct tsc2005 *ts,
 				     int x, int y, int pressure)
 {
@@ -266,26 +236,23 @@
 	struct tsc2005 *ts = _ts;
 	unsigned long flags;
 	unsigned int pressure;
-	u32 x, y;
-	u32 z1, z2;
+	struct tsc2005_data tsdata;
 	int error;
 
 	/* read the coordinates */
-	error = spi_sync(ts->spi, &ts->spi_read_msg);
+	error = regmap_bulk_read(ts->regmap, TSC2005_REG_X, &tsdata,
+				 TSC2005_DATA_REGS);
 	if (unlikely(error))
 		goto out;
 
-	x = ts->spi_x.spi_rx;
-	y = ts->spi_y.spi_rx;
-	z1 = ts->spi_z1.spi_rx;
-	z2 = ts->spi_z2.spi_rx;
-
 	/* validate position */
-	if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
+	if (unlikely(tsdata.x > MAX_12BIT || tsdata.y > MAX_12BIT))
 		goto out;
 
 	/* Skip reading if the pressure components are out of range */
-	if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
+	if (unlikely(tsdata.z1 == 0 || tsdata.z2 > MAX_12BIT))
+		goto out;
+	if (unlikely(tsdata.z1 >= tsdata.z2))
 		goto out;
 
        /*
@@ -293,8 +260,8 @@
 	* the value before pen-up - that implies SPI fed us stale data
 	*/
 	if (!ts->pen_down &&
-	    ts->in_x == x && ts->in_y == y &&
-	    ts->in_z1 == z1 && ts->in_z2 == z2) {
+	    ts->in_x == tsdata.x && ts->in_y == tsdata.y &&
+	    ts->in_z1 == tsdata.z1 && ts->in_z2 == tsdata.z2) {
 		goto out;
 	}
 
@@ -302,20 +269,20 @@
 	 * At this point we are happy we have a valid and useful reading.
 	 * Remember it for later comparisons. We may now begin downsampling.
 	 */
-	ts->in_x = x;
-	ts->in_y = y;
-	ts->in_z1 = z1;
-	ts->in_z2 = z2;
+	ts->in_x = tsdata.x;
+	ts->in_y = tsdata.y;
+	ts->in_z1 = tsdata.z1;
+	ts->in_z2 = tsdata.z2;
 
 	/* Compute touch pressure resistance using equation #1 */
-	pressure = x * (z2 - z1) / z1;
+	pressure = tsdata.x * (tsdata.z2 - tsdata.z1) / tsdata.z1;
 	pressure = pressure * ts->x_plate_ohm / 4096;
 	if (unlikely(pressure > MAX_12BIT))
 		goto out;
 
 	spin_lock_irqsave(&ts->lock, flags);
 
-	tsc2005_update_pen_state(ts, x, y, pressure);
+	tsc2005_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
 	mod_timer(&ts->penup_timer,
 		  jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
 
@@ -338,9 +305,9 @@
 
 static void tsc2005_start_scan(struct tsc2005 *ts)
 {
-	tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
-	tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
-	tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
 	tsc2005_cmd(ts, TSC2005_CMD_NORMAL);
 }
 
@@ -351,8 +318,8 @@
 
 static void tsc2005_set_reset(struct tsc2005 *ts, bool enable)
 {
-	if (ts->reset_gpio >= 0)
-		gpio_set_value(ts->reset_gpio, enable);
+	if (ts->reset_gpio)
+		gpiod_set_value_cansleep(ts->reset_gpio, enable);
 	else if (ts->set_reset)
 		ts->set_reset(enable);
 }
@@ -388,11 +355,10 @@
 				     struct device_attribute *attr,
 				     char *buf)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
-	u16 temp_high;
-	u16 temp_high_orig;
-	u16 temp_high_test;
+	struct tsc2005 *ts = dev_get_drvdata(dev);
+	unsigned int temp_high;
+	unsigned int temp_high_orig;
+	unsigned int temp_high_test;
 	bool success = true;
 	int error;
 
@@ -403,7 +369,7 @@
 	 */
 	__tsc2005_disable(ts);
 
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d\n", error);
 		success = false;
@@ -412,14 +378,14 @@
 
 	temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
 
-	error = tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test);
+	error = regmap_write(ts->regmap, TSC2005_REG_TEMP_HIGH, temp_high_test);
 	if (error) {
 		dev_warn(dev, "selftest failed: write error %d\n", error);
 		success = false;
 		goto out;
 	}
 
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d after write\n",
 			 error);
@@ -442,7 +408,7 @@
 		goto out;
 
 	/* test that the reset really happened */
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d after reset\n",
 			 error);
@@ -474,8 +440,7 @@
 				      struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 	umode_t mode = attr->mode;
 
 	if (attr == &dev_attr_selftest.attr) {
@@ -495,7 +460,7 @@
 {
 	struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work.work);
 	int error;
-	u16 r;
+	unsigned int r;
 
 	if (!mutex_trylock(&ts->mutex)) {
 		/*
@@ -511,7 +476,7 @@
 		goto out;
 
 	/* We should be able to read register without disabling interrupts. */
-	error = tsc2005_read(ts, TSC2005_REG_CFR0, &r);
+	error = regmap_read(ts->regmap, TSC2005_REG_CFR0, &r);
 	if (!error &&
 	    !((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK)) {
 		goto out;
@@ -575,20 +540,6 @@
 	mutex_unlock(&ts->mutex);
 }
 
-static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
-{
-	tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
-	tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
-	tsc2005_setup_read(&ts->spi_z1, TSC2005_REG_Z1, false);
-	tsc2005_setup_read(&ts->spi_z2, TSC2005_REG_Z2, true);
-
-	spi_message_init(&ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_x.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_y.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_z1.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
-}
-
 static int tsc2005_probe(struct spi_device *spi)
 {
 	const struct tsc2005_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -653,38 +604,31 @@
 	ts->spi = spi;
 	ts->idev = input_dev;
 
+	ts->regmap = devm_regmap_init_spi(spi, &tsc2005_regmap_config);
+	if (IS_ERR(ts->regmap))
+		return PTR_ERR(ts->regmap);
+
 	ts->x_plate_ohm = x_plate_ohm;
 	ts->esd_timeout = esd_timeout;
 
-	if (np) {
-		ts->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
-		if (ts->reset_gpio == -EPROBE_DEFER)
-			return ts->reset_gpio;
-		if (ts->reset_gpio < 0) {
-			dev_err(&spi->dev, "error acquiring reset gpio: %d\n",
-				ts->reset_gpio);
-			return ts->reset_gpio;
-		}
-
-		error = devm_gpio_request_one(&spi->dev, ts->reset_gpio, 0,
-					      "reset-gpios");
-		if (error) {
-			dev_err(&spi->dev, "error requesting reset gpio: %d\n",
-				error);
-			return error;
-		}
-
-		ts->vio = devm_regulator_get(&spi->dev, "vio");
-		if (IS_ERR(ts->vio)) {
-			error = PTR_ERR(ts->vio);
-			dev_err(&spi->dev, "vio regulator missing (%d)", error);
-			return error;
-		}
-	} else {
-		ts->reset_gpio = -1;
-		ts->set_reset = pdata->set_reset;
+	ts->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+						 GPIOD_OUT_HIGH);
+	if (IS_ERR(ts->reset_gpio)) {
+		error = PTR_ERR(ts->reset_gpio);
+		dev_err(&spi->dev, "error acquiring reset gpio: %d\n", error);
+		return error;
 	}
 
+	ts->vio = devm_regulator_get_optional(&spi->dev, "vio");
+	if (IS_ERR(ts->vio)) {
+		error = PTR_ERR(ts->vio);
+		dev_err(&spi->dev, "vio regulator missing (%d)", error);
+		return error;
+	}
+
+	if (!ts->reset_gpio && pdata)
+		ts->set_reset = pdata->set_reset;
+
 	mutex_init(&ts->mutex);
 
 	spin_lock_init(&ts->lock);
@@ -692,8 +636,6 @@
 
 	INIT_DELAYED_WORK(&ts->esd_work, tsc2005_esd_work);
 
-	tsc2005_setup_spi_xfer(ts);
-
 	snprintf(ts->phys, sizeof(ts->phys),
 		 "%s/input-ts", dev_name(&spi->dev));
 
@@ -709,7 +651,7 @@
 	input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
 
 	if (np)
-		touchscreen_parse_of_params(input_dev, false);
+		touchscreen_parse_properties(input_dev, false);
 
 	input_dev->open = tsc2005_open;
 	input_dev->close = tsc2005_close;
@@ -735,7 +677,7 @@
 			return error;
 	}
 
-	spi_set_drvdata(spi, ts);
+	dev_set_drvdata(&spi->dev, ts);
 	error = sysfs_create_group(&spi->dev.kobj, &tsc2005_attr_group);
 	if (error) {
 		dev_err(&spi->dev,
@@ -763,7 +705,7 @@
 
 static int tsc2005_remove(struct spi_device *spi)
 {
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
 
 	sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
 
@@ -775,8 +717,7 @@
 
 static int __maybe_unused tsc2005_suspend(struct device *dev)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 
 	mutex_lock(&ts->mutex);
 
@@ -792,8 +733,7 @@
 
 static int __maybe_unused tsc2005_resume(struct device *dev)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 
 	mutex_lock(&ts->mutex);
 
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index ccc8aa6..5d0cd51 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -482,7 +482,6 @@
 
 static struct i2c_driver tsc2007_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tsc2007",
 		.of_match_table = of_match_ptr(tsc2007_of_match),
 	},
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 32f8ac0..8d7a285 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -271,7 +271,6 @@
 static struct i2c_driver wacom_i2c_driver = {
 	.driver	= {
 		.name	= "wacom_i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &wacom_i2c_pm,
 	},
 
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index fb92ae1..515c20a 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -23,7 +23,7 @@
 #include <asm/unaligned.h>
 
 #define WDT87XX_NAME		"wdt87xx_i2c"
-#define WDT87XX_DRV_VER		"0.9.6"
+#define WDT87XX_DRV_VER		"0.9.7"
 #define WDT87XX_FW_NAME		"wdt87xx_fw.bin"
 #define WDT87XX_CFG_NAME	"wdt87xx_cfg.bin"
 
@@ -85,6 +85,11 @@
 #define CTL_PARAM_OFFSET_PHY_H		24
 #define CTL_PARAM_OFFSET_FACTOR		32
 
+/* The definition of the device descriptor */
+#define WDT_GD_DEVICE			1
+#define DEV_DESC_OFFSET_VID		8
+#define DEV_DESC_OFFSET_PID		10
+
 /* Communication commands */
 #define PACKET_SIZE			56
 #define VND_REQ_READ			0x06
@@ -152,6 +157,7 @@
 /* Controller requires minimum 300us between commands */
 #define WDT_COMMAND_DELAY_MS		2
 #define WDT_FLASH_WRITE_DELAY_MS	4
+#define WDT_FW_RESET_TIME		2500
 
 struct wdt87xx_sys_param {
 	u16	fw_id;
@@ -165,6 +171,8 @@
 	u16	scaling_factor;
 	u32	max_x;
 	u32	max_y;
+	u16	vendor_id;
+	u16	product_id;
 };
 
 struct wdt87xx_data {
@@ -208,6 +216,32 @@
 	return 0;
 }
 
+static int wdt87xx_get_desc(struct i2c_client *client, u8 desc_idx,
+			    u8 *buf, size_t len)
+{
+	u8 tx_buf[] = { 0x22, 0x00, 0x10, 0x0E, 0x23, 0x00 };
+	int error;
+
+	tx_buf[2] |= desc_idx & 0xF;
+
+	error = wdt87xx_i2c_xfer(client, tx_buf, sizeof(tx_buf),
+				 buf, len);
+	if (error) {
+		dev_err(&client->dev, "get desc failed: %d\n", error);
+		return error;
+	}
+
+	if (buf[0] != len) {
+		dev_err(&client->dev, "unexpected response to get desc: %d\n",
+			buf[0]);
+		return -EINVAL;
+	}
+
+	mdelay(WDT_COMMAND_DELAY_MS);
+
+	return 0;
+}
+
 static int wdt87xx_get_string(struct i2c_client *client, u8 str_idx,
 			      u8 *buf, size_t len)
 {
@@ -373,7 +407,7 @@
 	}
 
 	/* Wait the device to be ready */
-	msleep(200);
+	msleep(WDT_FW_RESET_TIME);
 
 	return 0;
 }
@@ -403,6 +437,15 @@
 	u8 buf[PKT_READ_SIZE];
 	int error;
 
+	error = wdt87xx_get_desc(client, WDT_GD_DEVICE, buf, 18);
+	if (error) {
+		dev_err(&client->dev, "failed to get device desc\n");
+		return error;
+	}
+
+	param->vendor_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_VID);
+	param->product_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_PID);
+
 	error = wdt87xx_get_string(client, STRIDX_PARAMETERS, buf, 34);
 	if (error) {
 		dev_err(&client->dev, "failed to get parameters\n");
@@ -994,6 +1037,8 @@
 
 	input->name = "WDT87xx Touchscreen";
 	input->id.bustype = BUS_I2C;
+	input->id.vendor = wdt->param.vendor_id;
+	input->id.product = wdt->param.product_id;
 	input->phys = wdt->phys;
 
 	input_set_abs_params(input, ABS_MT_POSITION_X, 0,
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index b1ae779..1534e9b 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -732,8 +732,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int wm97xx_suspend(struct device *dev, pm_message_t state)
+static int __maybe_unused wm97xx_suspend(struct device *dev)
 {
 	struct wm97xx *wm = dev_get_drvdata(dev);
 	u16 reg;
@@ -765,7 +764,7 @@
 	return 0;
 }
 
-static int wm97xx_resume(struct device *dev)
+static int __maybe_unused wm97xx_resume(struct device *dev)
 {
 	struct wm97xx *wm = dev_get_drvdata(dev);
 
@@ -799,10 +798,7 @@
 	return 0;
 }
 
-#else
-#define wm97xx_suspend		NULL
-#define wm97xx_resume		NULL
-#endif
+static SIMPLE_DEV_PM_OPS(wm97xx_pm_ops, wm97xx_suspend, wm97xx_resume);
 
 /*
  * Machine specific operations
@@ -836,8 +832,7 @@
 	.owner =	THIS_MODULE,
 	.probe =	wm97xx_probe,
 	.remove =	wm97xx_remove,
-	.suspend =	wm97xx_suspend,
-	.resume =	wm97xx_resume,
+	.pm =		&wm97xx_pm_ops,
 };
 
 static int __init wm97xx_init(void)
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index f58a196..781d0f8 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -24,14 +24,13 @@
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/device.h>
 #include <linux/sysfs.h>
 #include <linux/input/mt.h>
 #include <linux/platform_data/zforce_ts.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 
 #define WAIT_TIMEOUT		msecs_to_jiffies(1000)
 
@@ -120,6 +119,9 @@
 
 	struct regulator	*reg_vdd;
 
+	struct gpio_desc	*gpio_int;
+	struct gpio_desc	*gpio_rst;
+
 	bool			suspending;
 	bool			suspended;
 	bool			boot_complete;
@@ -161,6 +163,16 @@
 	return 0;
 }
 
+static void zforce_reset_assert(struct zforce_ts *ts)
+{
+	gpiod_set_value_cansleep(ts->gpio_rst, 1);
+}
+
+static void zforce_reset_deassert(struct zforce_ts *ts)
+{
+	gpiod_set_value_cansleep(ts->gpio_rst, 0);
+}
+
 static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
 {
 	struct i2c_client *client = ts->client;
@@ -479,7 +491,6 @@
 {
 	struct zforce_ts *ts = dev_id;
 	struct i2c_client *client = ts->client;
-	const struct zforce_ts_platdata *pdata = ts->pdata;
 	int ret;
 	u8 payload_buffer[FRAME_MAXSIZE];
 	u8 *payload;
@@ -499,7 +510,16 @@
 	if (!ts->suspending && device_may_wakeup(&client->dev))
 		pm_stay_awake(&client->dev);
 
-	while (!gpio_get_value(pdata->gpio_int)) {
+	/*
+	 * Run at least once and exit the loop if
+	 * - the optional interrupt GPIO isn't specified
+	 *   (there is only one packet read per ISR invocation, then)
+	 * or
+	 * - the GPIO isn't active any more
+	 *   (packet read until the level GPIO indicates that there is
+	 *    no IRQ any more)
+	 */
+	do {
 		ret = zforce_read_packet(ts, payload_buffer);
 		if (ret < 0) {
 			dev_err(&client->dev,
@@ -566,7 +586,7 @@
 				payload[RESPONSE_ID]);
 			break;
 		}
-	}
+	} while (gpiod_get_value_cansleep(ts->gpio_int));
 
 	if (!ts->suspending && device_may_wakeup(&client->dev))
 		pm_relax(&client->dev);
@@ -690,7 +710,7 @@
 {
 	struct zforce_ts *ts = data;
 
-	gpio_set_value(ts->pdata->gpio_rst, 0);
+	zforce_reset_assert(ts);
 
 	udelay(10);
 
@@ -712,18 +732,6 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	pdata->gpio_int = of_get_gpio(np, 0);
-	if (!gpio_is_valid(pdata->gpio_int)) {
-		dev_err(dev, "failed to get interrupt gpio\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	pdata->gpio_rst = of_get_gpio(np, 1);
-	if (!gpio_is_valid(pdata->gpio_rst)) {
-		dev_err(dev, "failed to get reset gpio\n");
-		return ERR_PTR(-EINVAL);
-	}
-
 	if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
 		dev_err(dev, "failed to get x-size property\n");
 		return ERR_PTR(-EINVAL);
@@ -755,20 +763,49 @@
 	if (!ts)
 		return -ENOMEM;
 
-	ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN,
-				    "zforce_ts_int");
-	if (ret) {
-		dev_err(&client->dev, "request of gpio %d failed, %d\n",
-			pdata->gpio_int, ret);
+	ts->gpio_rst = devm_gpiod_get_optional(&client->dev, "reset",
+					       GPIOD_OUT_HIGH);
+	if (IS_ERR(ts->gpio_rst)) {
+		ret = PTR_ERR(ts->gpio_rst);
+		dev_err(&client->dev,
+			"failed to request reset GPIO: %d\n", ret);
 		return ret;
 	}
 
-	ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
-				    GPIOF_OUT_INIT_LOW, "zforce_ts_rst");
-	if (ret) {
-		dev_err(&client->dev, "request of gpio %d failed, %d\n",
-			pdata->gpio_rst, ret);
-		return ret;
+	if (ts->gpio_rst) {
+		ts->gpio_int = devm_gpiod_get_optional(&client->dev, "irq",
+						       GPIOD_IN);
+		if (IS_ERR(ts->gpio_int)) {
+			ret = PTR_ERR(ts->gpio_int);
+			dev_err(&client->dev,
+				"failed to request interrupt GPIO: %d\n", ret);
+			return ret;
+		}
+	} else {
+		/*
+		 * Deprecated GPIO handling for compatibility
+		 * with legacy binding.
+		 */
+
+		/* INT GPIO */
+		ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0,
+						    GPIOD_IN);
+		if (IS_ERR(ts->gpio_int)) {
+			ret = PTR_ERR(ts->gpio_int);
+			dev_err(&client->dev,
+				"failed to request interrupt GPIO: %d\n", ret);
+			return ret;
+		}
+
+		/* RST GPIO */
+		ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
+					    GPIOD_OUT_HIGH);
+		if (IS_ERR(ts->gpio_rst)) {
+			ret = PTR_ERR(ts->gpio_rst);
+			dev_err(&client->dev,
+				"failed to request reset GPIO: %d\n", ret);
+			return ret;
+		}
 	}
 
 	ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd");
@@ -863,7 +900,7 @@
 	i2c_set_clientdata(client, ts);
 
 	/* let the controller boot */
-	gpio_set_value(pdata->gpio_rst, 1);
+	zforce_reset_deassert(ts);
 
 	ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
 	if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
@@ -917,7 +954,6 @@
 
 static struct i2c_driver zforce_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "zforce-ts",
 		.pm	= &zforce_pm_ops,
 		.of_match_table	= of_match_ptr(zforce_dt_idtable),
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f491aec..4664c2a 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,7 +23,8 @@
 config IOMMU_IO_PGTABLE_LPAE
 	bool "ARMv7/v8 Long Descriptor Format"
 	select IOMMU_IO_PGTABLE
-	depends on ARM || ARM64 || COMPILE_TEST
+	# SWIOTLB guarantees a dma_to_phys() implementation
+	depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
 	help
 	  Enable support for the ARM long descriptor pagetable format.
 	  This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 658ee39..f82060e7 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1835,8 +1835,8 @@
 		free_gcr3_tbl_level2(domain->gcr3_tbl);
 	else if (domain->glx == 1)
 		free_gcr3_tbl_level1(domain->gcr3_tbl);
-	else if (domain->glx != 0)
-		BUG();
+	else
+		BUG_ON(domain->glx != 0);
 
 	free_page((unsigned long)domain->gcr3_tbl);
 }
@@ -3947,11 +3947,6 @@
 	if (ret < 0)
 		return ret;
 
-	ret = -ENOMEM;
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data)
-		goto out_free_parent;
-
 	if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
 		if (get_irq_table(devid, true))
 			index = info->ioapic_pin;
@@ -3962,7 +3957,6 @@
 	}
 	if (index < 0) {
 		pr_warn("Failed to allocate IRTE\n");
-		kfree(data);
 		goto out_free_parent;
 	}
 
@@ -3974,17 +3968,18 @@
 			goto out_free_data;
 		}
 
-		if (i > 0) {
-			data = kzalloc(sizeof(*data), GFP_KERNEL);
-			if (!data)
-				goto out_free_data;
-		}
+		ret = -ENOMEM;
+		data = kzalloc(sizeof(*data), GFP_KERNEL);
+		if (!data)
+			goto out_free_data;
+
 		irq_data->hwirq = (devid << 16) + i;
 		irq_data->chip_data = data;
 		irq_data->chip = &amd_ir_chip;
 		irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
 		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
 	}
+
 	return 0;
 
 out_free_data:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index a24495e..5ef347a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -154,7 +154,7 @@
 u32 amd_iommu_max_pasid __read_mostly = ~0;
 
 bool amd_iommu_v2_present __read_mostly;
-bool amd_iommu_pc_present __read_mostly;
+static bool amd_iommu_pc_present __read_mostly;
 
 bool amd_iommu_force_isolation __read_mostly;
 
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index f7b875b..1131664 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -356,8 +356,8 @@
 		free_pasid_states_level2(dev_state->states);
 	else if (dev_state->pasid_levels == 1)
 		free_pasid_states_level1(dev_state->states);
-	else if (dev_state->pasid_levels != 0)
-		BUG();
+	else
+		BUG_ON(dev_state->pasid_levels != 0);
 
 	free_page((unsigned long)dev_state->states);
 }
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index da902ba..dafaf59 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -118,6 +118,7 @@
 
 #define ARM_SMMU_IRQ_CTRL		0x50
 #define IRQ_CTRL_EVTQ_IRQEN		(1 << 2)
+#define IRQ_CTRL_PRIQ_IRQEN		(1 << 1)
 #define IRQ_CTRL_GERROR_IRQEN		(1 << 0)
 
 #define ARM_SMMU_IRQ_CTRLACK		0x54
@@ -173,14 +174,14 @@
 #define ARM_SMMU_PRIQ_IRQ_CFG2		0xdc
 
 /* Common MSI config fields */
-#define MSI_CFG0_SH_SHIFT		60
-#define MSI_CFG0_SH_NSH			(0UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_OSH			(2UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_ISH			(3UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_MEMATTR_SHIFT		56
-#define MSI_CFG0_MEMATTR_DEVICE_nGnRE	(0x1 << MSI_CFG0_MEMATTR_SHIFT)
 #define MSI_CFG0_ADDR_SHIFT		2
 #define MSI_CFG0_ADDR_MASK		0x3fffffffffffUL
+#define MSI_CFG2_SH_SHIFT		4
+#define MSI_CFG2_SH_NSH			(0UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_OSH			(2UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_ISH			(3UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_MEMATTR_SHIFT		0
+#define MSI_CFG2_MEMATTR_DEVICE_nGnRE	(0x1 << MSI_CFG2_MEMATTR_SHIFT)
 
 #define Q_IDX(q, p)			((p) & ((1 << (q)->max_n_shift) - 1))
 #define Q_WRP(q, p)			((p) & (1 << (q)->max_n_shift))
@@ -1330,33 +1331,10 @@
 	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
 }
 
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
-	struct arm_smmu_domain *smmu_domain = cookie;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-	if (smmu->features & ARM_SMMU_FEAT_COHERENCY) {
-		dsb(ishst);
-	} else {
-		dma_addr_t dma_addr;
-		struct device *dev = smmu->dev;
-
-		dma_addr = dma_map_page(dev, virt_to_page(addr), offset, size,
-					DMA_TO_DEVICE);
-
-		if (dma_mapping_error(dev, dma_addr))
-			dev_err(dev, "failed to flush pgtable at %p\n", addr);
-		else
-			dma_unmap_page(dev, dma_addr, size, DMA_TO_DEVICE);
-	}
-}
-
 static struct iommu_gather_ops arm_smmu_gather_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
 	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
 	.tlb_sync	= arm_smmu_tlb_sync,
-	.flush_pgtable	= arm_smmu_flush_pgtable,
 };
 
 /* IOMMU API */
@@ -1531,6 +1509,7 @@
 		.ias		= ias,
 		.oas		= oas,
 		.tlb		= &arm_smmu_gather_ops,
+		.iommu_dev	= smmu->dev,
 	};
 
 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2053,9 +2032,17 @@
 	int ret;
 	struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
-	/* Calculate the L1 size, capped to the SIDSIZE */
-	size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
-	size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+	/*
+	 * If we can resolve everything with a single L2 table, then we
+	 * just need a single L1 descriptor. Otherwise, calculate the L1
+	 * size, capped to the SIDSIZE.
+	 */
+	if (smmu->sid_bits < STRTAB_SPLIT) {
+		size = 0;
+	} else {
+		size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+		size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+	}
 	cfg->num_l1_ents = 1 << size;
 
 	size += STRTAB_SPLIT;
@@ -2198,6 +2185,7 @@
 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
 {
 	int ret, irq;
+	u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
 
 	/* Disable IRQs first */
 	ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
@@ -2252,13 +2240,13 @@
 			if (IS_ERR_VALUE(ret))
 				dev_warn(smmu->dev,
 					 "failed to enable priq irq\n");
+			else
+				irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
 		}
 	}
 
 	/* Enable interrupt generation on the SMMU */
-	ret = arm_smmu_write_reg_sync(smmu,
-				      IRQ_CTRL_EVTQ_IRQEN |
-				      IRQ_CTRL_GERROR_IRQEN,
+	ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
 				      ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
 	if (ret)
 		dev_warn(smmu->dev, "failed to enable irqs\n");
@@ -2540,12 +2528,12 @@
 	case IDR5_OAS_44_BIT:
 		smmu->oas = 44;
 		break;
+	default:
+		dev_info(smmu->dev,
+			"unknown output address size. Truncating to 48-bit\n");
+		/* Fallthrough */
 	case IDR5_OAS_48_BIT:
 		smmu->oas = 48;
-		break;
-	default:
-		dev_err(smmu->dev, "unknown output address size!\n");
-		return -ENXIO;
 	}
 
 	/* Set the DMA mask for our table walker */
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4cd0c29..48a39df 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -37,6 +37,7 @@
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -607,34 +608,10 @@
 	}
 }
 
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
-	struct arm_smmu_domain *smmu_domain = cookie;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
-	/* Ensure new page tables are visible to the hardware walker */
-	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
-		dsb(ishst);
-	} else {
-		/*
-		 * If the SMMU can't walk tables in the CPU caches, treat them
-		 * like non-coherent DMA since we need to flush the new entries
-		 * all the way out to memory. There's no possibility of
-		 * recursion here as the SMMU table walker will not be wired
-		 * through another SMMU.
-		 */
-		dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
-			     DMA_TO_DEVICE);
-	}
-}
-
 static struct iommu_gather_ops arm_smmu_gather_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
 	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
 	.tlb_sync	= arm_smmu_tlb_sync,
-	.flush_pgtable	= arm_smmu_flush_pgtable,
 };
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -898,6 +875,7 @@
 		.ias		= ias,
 		.oas		= oas,
 		.tlb		= &arm_smmu_gather_ops,
+		.iommu_dev	= smmu->dev,
 	};
 
 	smmu_domain->smmu = smmu;
@@ -1532,6 +1510,7 @@
 	unsigned long size;
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 	u32 id;
+	bool cttw_dt, cttw_reg;
 
 	dev_notice(smmu->dev, "probing hardware configuration...\n");
 	dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
@@ -1571,10 +1550,22 @@
 		dev_notice(smmu->dev, "\taddress translation ops\n");
 	}
 
-	if (id & ID0_CTTW) {
+	/*
+	 * In order for DMA API calls to work properly, we must defer to what
+	 * the DT says about coherency, regardless of what the hardware claims.
+	 * Fortunately, this also opens up a workaround for systems where the
+	 * ID register value has ended up configured incorrectly.
+	 */
+	cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
+	cttw_reg = !!(id & ID0_CTTW);
+	if (cttw_dt)
 		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
-		dev_notice(smmu->dev, "\tcoherent table walk\n");
-	}
+	if (cttw_dt || cttw_reg)
+		dev_notice(smmu->dev, "\t%scoherent table walk\n",
+			   cttw_dt ? "" : "non-");
+	if (cttw_dt != cttw_reg)
+		dev_notice(smmu->dev,
+			   "\t(IDR0.CTTW overridden by dma-coherent property)\n");
 
 	if (id & ID0_SMS) {
 		u32 smr, sid, mask;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index c9db04d..8757f8d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1068,7 +1068,7 @@
 	if (intel_iommu_enabled)
 		iommu->iommu_dev = iommu_device_create(NULL, iommu,
 						       intel_iommu_groups,
-						       iommu->name);
+						       "%s", iommu->name);
 
 	return 0;
 
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index abeedc9..2570f2a2 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -41,7 +41,6 @@
 
 static struct paace *ppaact;
 static struct paace *spaact;
-static struct ome *omt __initdata;
 
 /*
  * Table for matching compatible strings, for device tree
@@ -50,7 +49,7 @@
  * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
  * string would be used.
  */
-static const struct of_device_id guts_device_ids[] __initconst = {
+static const struct of_device_id guts_device_ids[] = {
 	{ .compatible = "fsl,qoriq-device-config-1.0", },
 	{ .compatible = "fsl,qoriq-device-config-2.0", },
 	{}
@@ -599,7 +598,7 @@
  * Memory accesses to QMAN and BMAN private memory need not be coherent, so
  * clear the PAACE entry coherency attribute for them.
  */
-static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
+static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
 {
 	switch (paace_type) {
 	case QMAN_PAACE:
@@ -629,7 +628,7 @@
  * this table to translate device transaction to appropriate corenet
  * transaction.
  */
-static void __init setup_omt(struct ome *omt)
+static void setup_omt(struct ome *omt)
 {
 	struct ome *ome;
 
@@ -666,7 +665,7 @@
  * Get the maximum number of PAACT table entries
  * and subwindows supported by PAMU
  */
-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
 {
 	u32 pc_val;
 
@@ -676,9 +675,9 @@
 }
 
 /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
-				 phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
-				 phys_addr_t omt_phys)
+static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+			  phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+			  phys_addr_t omt_phys)
 {
 	u32 *pc;
 	struct pamu_mmap_regs *pamu_regs;
@@ -720,7 +719,7 @@
 }
 
 /* Enable all device LIODNS */
-static void __init setup_liodns(void)
+static void setup_liodns(void)
 {
 	int i, len;
 	struct paace *ppaace;
@@ -846,7 +845,7 @@
 /*
  * Create a coherence subdomain for a given memory block.
  */
-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
 {
 	struct device_node *np;
 	const __be32 *iprop;
@@ -988,7 +987,7 @@
 static const struct {
 	u32 svr;
 	u32 port_id;
-} port_id_map[] __initconst = {
+} port_id_map[] = {
 	{(SVR_P2040 << 8) | 0x10, 0xFF000000},	/* P2040 1.0 */
 	{(SVR_P2040 << 8) | 0x11, 0xFF000000},	/* P2040 1.1 */
 	{(SVR_P2041 << 8) | 0x10, 0xFF000000},	/* P2041 1.0 */
@@ -1006,7 +1005,7 @@
 
 #define SVR_SECURITY	0x80000	/* The Security (E) bit */
 
-static int __init fsl_pamu_probe(struct platform_device *pdev)
+static int fsl_pamu_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	void __iomem *pamu_regs = NULL;
@@ -1022,6 +1021,7 @@
 	int irq;
 	phys_addr_t ppaact_phys;
 	phys_addr_t spaact_phys;
+	struct ome *omt;
 	phys_addr_t omt_phys;
 	size_t mem_size = 0;
 	unsigned int order = 0;
@@ -1200,7 +1200,7 @@
 	return ret;
 }
 
-static struct platform_driver fsl_of_pamu_driver __initdata = {
+static struct platform_driver fsl_of_pamu_driver = {
 	.driver = {
 		.name = "fsl-of-pamu",
 	},
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c82ebee..2d7349a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -364,7 +364,8 @@
 static struct dmar_domain *si_domain;
 static int hw_pass_through = 1;
 
-/* domain represents a virtual machine, more than one devices
+/*
+ * Domain represents a virtual machine, more than one devices
  * across iommus may be owned in one domain, e.g. kvm guest.
  */
 #define DOMAIN_FLAG_VIRTUAL_MACHINE	(1 << 0)
@@ -372,11 +373,21 @@
 /* si_domain contains mulitple devices */
 #define DOMAIN_FLAG_STATIC_IDENTITY	(1 << 1)
 
+#define for_each_domain_iommu(idx, domain)			\
+	for (idx = 0; idx < g_num_of_iommus; idx++)		\
+		if (domain->iommu_refcnt[idx])
+
 struct dmar_domain {
-	int	id;			/* domain id */
 	int	nid;			/* node id */
-	DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
-					/* bitmap of iommus this domain uses*/
+
+	unsigned	iommu_refcnt[DMAR_UNITS_SUPPORTED];
+					/* Refcount of devices per iommu */
+
+
+	u16		iommu_did[DMAR_UNITS_SUPPORTED];
+					/* Domain ids per IOMMU. Use u16 since
+					 * domain ids are 16 bit wide according
+					 * to VT-d spec, section 9.3 */
 
 	struct list_head devices;	/* all devices' list */
 	struct iova_domain iovad;	/* iova's that belong to this domain */
@@ -395,7 +406,6 @@
 	int		iommu_superpage;/* Level of superpages supported:
 					   0 == 4KiB (no superpages), 1 == 2MiB,
 					   2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
-	spinlock_t	iommu_lock;	/* protect iommu set in domain */
 	u64		max_addr;	/* maximum mapped address */
 
 	struct iommu_domain domain;	/* generic domain data structure for
@@ -465,10 +475,11 @@
 
 static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-				       struct device *dev);
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-					   struct device *dev);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+				     struct device *dev);
+static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+				 struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
 			       struct intel_iommu *iommu);
 
@@ -568,6 +579,36 @@
 static struct kmem_cache *iommu_domain_cache;
 static struct kmem_cache *iommu_devinfo_cache;
 
+static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
+{
+	struct dmar_domain **domains;
+	int idx = did >> 8;
+
+	domains = iommu->domains[idx];
+	if (!domains)
+		return NULL;
+
+	return domains[did & 0xff];
+}
+
+static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
+			     struct dmar_domain *domain)
+{
+	struct dmar_domain **domains;
+	int idx = did >> 8;
+
+	if (!iommu->domains[idx]) {
+		size_t size = 256 * sizeof(struct dmar_domain *);
+		iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
+	}
+
+	domains = iommu->domains[idx];
+	if (WARN_ON(!domains))
+		return;
+	else
+		domains[did & 0xff] = domain;
+}
+
 static inline void *alloc_pgtable_page(int node)
 {
 	struct page *page;
@@ -609,6 +650,11 @@
 	return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
 }
 
+static inline int domain_type_is_si(struct dmar_domain *domain)
+{
+	return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
+}
+
 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
 {
 	return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
@@ -663,7 +709,9 @@
 
 	/* si_domain and vm domain should not get here. */
 	BUG_ON(domain_type_is_vm_or_si(domain));
-	iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+	for_each_domain_iommu(iommu_id, domain)
+		break;
+
 	if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
 		return NULL;
 
@@ -679,7 +727,7 @@
 
 	domain->iommu_coherency = 1;
 
-	for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
+	for_each_domain_iommu(i, domain) {
 		found = true;
 		if (!ecap_coherent(g_iommus[i]->ecap)) {
 			domain->iommu_coherency = 0;
@@ -759,6 +807,7 @@
 	struct context_entry *context;
 	u64 *entry;
 
+	entry = &root->lo;
 	if (ecs_enabled(iommu)) {
 		if (devfn >= 0x80) {
 			devfn -= 0x80;
@@ -766,7 +815,6 @@
 		}
 		devfn *= 2;
 	}
-	entry = &root->lo;
 	if (*entry & 1)
 		context = phys_to_virt(*entry & VTD_PAGE_MASK);
 	else {
@@ -1166,9 +1214,9 @@
 /* We can't just free the pages because the IOMMU may still be walking
    the page tables, and may have cached the intermediate levels. The
    pages can only be freed after the IOTLB flush has been done. */
-struct page *domain_unmap(struct dmar_domain *domain,
-			  unsigned long start_pfn,
-			  unsigned long last_pfn)
+static struct page *domain_unmap(struct dmar_domain *domain,
+				 unsigned long start_pfn,
+				 unsigned long last_pfn)
 {
 	struct page *freelist = NULL;
 
@@ -1192,7 +1240,7 @@
 	return freelist;
 }
 
-void dma_free_pagelist(struct page *freelist)
+static void dma_free_pagelist(struct page *freelist)
 {
 	struct page *pg;
 
@@ -1360,24 +1408,23 @@
 			 u8 bus, u8 devfn)
 {
 	bool found = false;
-	unsigned long flags;
 	struct device_domain_info *info;
 	struct pci_dev *pdev;
 
+	assert_spin_locked(&device_domain_lock);
+
 	if (!ecap_dev_iotlb_support(iommu->ecap))
 		return NULL;
 
 	if (!iommu->qi)
 		return NULL;
 
-	spin_lock_irqsave(&device_domain_lock, flags);
 	list_for_each_entry(info, &domain->devices, link)
 		if (info->iommu == iommu && info->bus == bus &&
 		    info->devfn == devfn) {
 			found = true;
 			break;
 		}
-	spin_unlock_irqrestore(&device_domain_lock, flags);
 
 	if (!found || !info->dev || !dev_is_pci(info->dev))
 		return NULL;
@@ -1436,11 +1483,14 @@
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
-				  unsigned long pfn, unsigned int pages, int ih, int map)
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
+				  struct dmar_domain *domain,
+				  unsigned long pfn, unsigned int pages,
+				  int ih, int map)
 {
 	unsigned int mask = ilog2(__roundup_pow_of_two(pages));
 	uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
+	u16 did = domain->iommu_did[iommu->seq_id];
 
 	BUG_ON(pages == 0);
 
@@ -1464,7 +1514,8 @@
 	 * flush. However, device IOTLB doesn't need to be flushed in this case.
 	 */
 	if (!cap_caching_mode(iommu->cap) || !map)
-		iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
+		iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
+				      addr, mask);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1519,65 +1570,80 @@
 
 static int iommu_init_domains(struct intel_iommu *iommu)
 {
-	unsigned long ndomains;
-	unsigned long nlongs;
+	u32 ndomains, nlongs;
+	size_t size;
 
 	ndomains = cap_ndoms(iommu->cap);
-	pr_debug("%s: Number of Domains supported <%ld>\n",
+	pr_debug("%s: Number of Domains supported <%d>\n",
 		 iommu->name, ndomains);
 	nlongs = BITS_TO_LONGS(ndomains);
 
 	spin_lock_init(&iommu->lock);
 
-	/* TBD: there might be 64K domains,
-	 * consider other allocation for future chip
-	 */
 	iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
 	if (!iommu->domain_ids) {
 		pr_err("%s: Allocating domain id array failed\n",
 		       iommu->name);
 		return -ENOMEM;
 	}
-	iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
-			GFP_KERNEL);
-	if (!iommu->domains) {
+
+	size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+	iommu->domains = kzalloc(size, GFP_KERNEL);
+
+	if (iommu->domains) {
+		size = 256 * sizeof(struct dmar_domain *);
+		iommu->domains[0] = kzalloc(size, GFP_KERNEL);
+	}
+
+	if (!iommu->domains || !iommu->domains[0]) {
 		pr_err("%s: Allocating domain array failed\n",
 		       iommu->name);
 		kfree(iommu->domain_ids);
+		kfree(iommu->domains);
 		iommu->domain_ids = NULL;
+		iommu->domains    = NULL;
 		return -ENOMEM;
 	}
 
+
+
 	/*
-	 * if Caching mode is set, then invalid translations are tagged
-	 * with domainid 0. Hence we need to pre-allocate it.
+	 * If Caching mode is set, then invalid translations are tagged
+	 * with domain-id 0, hence we need to pre-allocate it. We also
+	 * use domain-id 0 as a marker for non-allocated domain-id, so
+	 * make sure it is not used for a real domain.
 	 */
-	if (cap_caching_mode(iommu->cap))
-		set_bit(0, iommu->domain_ids);
+	set_bit(0, iommu->domain_ids);
+
 	return 0;
 }
 
 static void disable_dmar_iommu(struct intel_iommu *iommu)
 {
-	struct dmar_domain *domain;
-	int i;
+	struct device_domain_info *info, *tmp;
+	unsigned long flags;
 
-	if ((iommu->domains) && (iommu->domain_ids)) {
-		for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
-			/*
-			 * Domain id 0 is reserved for invalid translation
-			 * if hardware supports caching mode.
-			 */
-			if (cap_caching_mode(iommu->cap) && i == 0)
-				continue;
+	if (!iommu->domains || !iommu->domain_ids)
+		return;
 
-			domain = iommu->domains[i];
-			clear_bit(i, iommu->domain_ids);
-			if (domain_detach_iommu(domain, iommu) == 0 &&
-			    !domain_type_is_vm(domain))
-				domain_exit(domain);
-		}
+	spin_lock_irqsave(&device_domain_lock, flags);
+	list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
+		struct dmar_domain *domain;
+
+		if (info->iommu != iommu)
+			continue;
+
+		if (!info->dev || !info->domain)
+			continue;
+
+		domain = info->domain;
+
+		dmar_remove_one_dev_info(domain, info->dev);
+
+		if (!domain_type_is_vm_or_si(domain))
+			domain_exit(domain);
 	}
+	spin_unlock_irqrestore(&device_domain_lock, flags);
 
 	if (iommu->gcmd & DMA_GCMD_TE)
 		iommu_disable_translation(iommu);
@@ -1586,6 +1652,11 @@
 static void free_dmar_iommu(struct intel_iommu *iommu)
 {
 	if ((iommu->domains) && (iommu->domain_ids)) {
+		int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+		int i;
+
+		for (i = 0; i < elems; i++)
+			kfree(iommu->domains[i]);
 		kfree(iommu->domains);
 		kfree(iommu->domain_ids);
 		iommu->domains = NULL;
@@ -1600,8 +1671,6 @@
 
 static struct dmar_domain *alloc_domain(int flags)
 {
-	/* domain id for virtual machine, it won't be set in context */
-	static atomic_t vm_domid = ATOMIC_INIT(0);
 	struct dmar_domain *domain;
 
 	domain = alloc_domain_mem();
@@ -1611,111 +1680,64 @@
 	memset(domain, 0, sizeof(*domain));
 	domain->nid = -1;
 	domain->flags = flags;
-	spin_lock_init(&domain->iommu_lock);
 	INIT_LIST_HEAD(&domain->devices);
-	if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
-		domain->id = atomic_inc_return(&vm_domid);
 
 	return domain;
 }
 
-static int __iommu_attach_domain(struct dmar_domain *domain,
-				 struct intel_iommu *iommu)
-{
-	int num;
-	unsigned long ndomains;
-
-	ndomains = cap_ndoms(iommu->cap);
-	num = find_first_zero_bit(iommu->domain_ids, ndomains);
-	if (num < ndomains) {
-		set_bit(num, iommu->domain_ids);
-		iommu->domains[num] = domain;
-	} else {
-		num = -ENOSPC;
-	}
-
-	return num;
-}
-
-static int iommu_attach_domain(struct dmar_domain *domain,
+/* Must be called with iommu->lock */
+static int domain_attach_iommu(struct dmar_domain *domain,
 			       struct intel_iommu *iommu)
 {
-	int num;
-	unsigned long flags;
-
-	spin_lock_irqsave(&iommu->lock, flags);
-	num = __iommu_attach_domain(domain, iommu);
-	spin_unlock_irqrestore(&iommu->lock, flags);
-	if (num < 0)
-		pr_err("%s: No free domain ids\n", iommu->name);
-
-	return num;
-}
-
-static int iommu_attach_vm_domain(struct dmar_domain *domain,
-				  struct intel_iommu *iommu)
-{
-	int num;
 	unsigned long ndomains;
+	int num;
 
-	ndomains = cap_ndoms(iommu->cap);
-	for_each_set_bit(num, iommu->domain_ids, ndomains)
-		if (iommu->domains[num] == domain)
-			return num;
+	assert_spin_locked(&device_domain_lock);
+	assert_spin_locked(&iommu->lock);
 
-	return __iommu_attach_domain(domain, iommu);
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
-				struct intel_iommu *iommu)
-{
-	unsigned long flags;
-	int num, ndomains;
-
-	spin_lock_irqsave(&iommu->lock, flags);
-	if (domain_type_is_vm_or_si(domain)) {
+	domain->iommu_refcnt[iommu->seq_id] += 1;
+	domain->iommu_count += 1;
+	if (domain->iommu_refcnt[iommu->seq_id] == 1) {
 		ndomains = cap_ndoms(iommu->cap);
-		for_each_set_bit(num, iommu->domain_ids, ndomains) {
-			if (iommu->domains[num] == domain) {
-				clear_bit(num, iommu->domain_ids);
-				iommu->domains[num] = NULL;
-				break;
-			}
+		num      = find_first_zero_bit(iommu->domain_ids, ndomains);
+
+		if (num >= ndomains) {
+			pr_err("%s: No free domain ids\n", iommu->name);
+			domain->iommu_refcnt[iommu->seq_id] -= 1;
+			domain->iommu_count -= 1;
+			return -ENOSPC;
 		}
-	} else {
-		clear_bit(domain->id, iommu->domain_ids);
-		iommu->domains[domain->id] = NULL;
-	}
-	spin_unlock_irqrestore(&iommu->lock, flags);
-}
 
-static void domain_attach_iommu(struct dmar_domain *domain,
-			       struct intel_iommu *iommu)
-{
-	unsigned long flags;
+		set_bit(num, iommu->domain_ids);
+		set_iommu_domain(iommu, num, domain);
 
-	spin_lock_irqsave(&domain->iommu_lock, flags);
-	if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
-		domain->iommu_count++;
-		if (domain->iommu_count == 1)
-			domain->nid = iommu->node;
+		domain->iommu_did[iommu->seq_id] = num;
+		domain->nid			 = iommu->node;
+
 		domain_update_iommu_cap(domain);
 	}
-	spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
+	return 0;
 }
 
 static int domain_detach_iommu(struct dmar_domain *domain,
 			       struct intel_iommu *iommu)
 {
-	unsigned long flags;
-	int count = INT_MAX;
+	int num, count = INT_MAX;
 
-	spin_lock_irqsave(&domain->iommu_lock, flags);
-	if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
-		count = --domain->iommu_count;
+	assert_spin_locked(&device_domain_lock);
+	assert_spin_locked(&iommu->lock);
+
+	domain->iommu_refcnt[iommu->seq_id] -= 1;
+	count = --domain->iommu_count;
+	if (domain->iommu_refcnt[iommu->seq_id] == 0) {
+		num = domain->iommu_did[iommu->seq_id];
+		clear_bit(num, iommu->domain_ids);
+		set_iommu_domain(iommu, num, NULL);
+
 		domain_update_iommu_cap(domain);
+		domain->iommu_did[iommu->seq_id] = 0;
 	}
-	spin_unlock_irqrestore(&domain->iommu_lock, flags);
 
 	return count;
 }
@@ -1782,9 +1804,9 @@
 	return agaw;
 }
 
-static int domain_init(struct dmar_domain *domain, int guest_width)
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+		       int guest_width)
 {
-	struct intel_iommu *iommu;
 	int adjust_width, agaw;
 	unsigned long sagaw;
 
@@ -1793,7 +1815,6 @@
 	domain_reserve_special_ranges(domain);
 
 	/* calculate AGAW */
-	iommu = domain_get_iommu(domain);
 	if (guest_width > cap_mgaw(iommu->cap))
 		guest_width = cap_mgaw(iommu->cap);
 	domain->gaw = guest_width;
@@ -1836,8 +1857,6 @@
 
 static void domain_exit(struct dmar_domain *domain)
 {
-	struct dmar_drhd_unit *drhd;
-	struct intel_iommu *iommu;
 	struct page *freelist = NULL;
 
 	/* Domain 0 is reserved, so dont process it */
@@ -1848,22 +1867,16 @@
 	if (!intel_iommu_strict)
 		flush_unmaps_timeout(0);
 
-	/* remove associated devices */
+	/* Remove associated devices and clear attached or cached domains */
+	rcu_read_lock();
 	domain_remove_dev_info(domain);
+	rcu_read_unlock();
 
 	/* destroy iovas */
 	put_iova_domain(&domain->iovad);
 
 	freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
 
-	/* clear attached or cached domains */
-	rcu_read_lock();
-	for_each_active_iommu(iommu, drhd)
-		if (domain_type_is_vm(domain) ||
-		    test_bit(iommu->seq_id, domain->iommu_bmp))
-			iommu_detach_domain(domain, iommu);
-	rcu_read_unlock();
-
 	dma_free_pagelist(freelist);
 
 	free_domain_mem(domain);
@@ -1871,79 +1884,68 @@
 
 static int domain_context_mapping_one(struct dmar_domain *domain,
 				      struct intel_iommu *iommu,
-				      u8 bus, u8 devfn, int translation)
+				      u8 bus, u8 devfn)
 {
+	u16 did = domain->iommu_did[iommu->seq_id];
+	int translation = CONTEXT_TT_MULTI_LEVEL;
+	struct device_domain_info *info = NULL;
 	struct context_entry *context;
 	unsigned long flags;
 	struct dma_pte *pgd;
-	int id;
-	int agaw;
-	struct device_domain_info *info = NULL;
+	int ret, agaw;
+
+	WARN_ON(did == 0);
+
+	if (hw_pass_through && domain_type_is_si(domain))
+		translation = CONTEXT_TT_PASS_THROUGH;
 
 	pr_debug("Set context mapping for %02x:%02x.%d\n",
 		bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 
 	BUG_ON(!domain->pgd);
-	BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
-	       translation != CONTEXT_TT_MULTI_LEVEL);
 
-	spin_lock_irqsave(&iommu->lock, flags);
+	spin_lock_irqsave(&device_domain_lock, flags);
+	spin_lock(&iommu->lock);
+
+	ret = -ENOMEM;
 	context = iommu_context_addr(iommu, bus, devfn, 1);
-	spin_unlock_irqrestore(&iommu->lock, flags);
 	if (!context)
-		return -ENOMEM;
-	spin_lock_irqsave(&iommu->lock, flags);
-	if (context_present(context)) {
-		spin_unlock_irqrestore(&iommu->lock, flags);
-		return 0;
-	}
+		goto out_unlock;
 
-	context_clear_entry(context);
+	ret = 0;
+	if (context_present(context))
+		goto out_unlock;
 
-	id = domain->id;
 	pgd = domain->pgd;
 
-	if (domain_type_is_vm_or_si(domain)) {
-		if (domain_type_is_vm(domain)) {
-			id = iommu_attach_vm_domain(domain, iommu);
-			if (id < 0) {
-				spin_unlock_irqrestore(&iommu->lock, flags);
-				pr_err("%s: No free domain ids\n", iommu->name);
-				return -EFAULT;
-			}
-		}
+	context_clear_entry(context);
+	context_set_domain_id(context, did);
 
-		/* Skip top levels of page tables for
-		 * iommu which has less agaw than default.
-		 * Unnecessary for PT mode.
-		 */
-		if (translation != CONTEXT_TT_PASS_THROUGH) {
-			for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
-				pgd = phys_to_virt(dma_pte_addr(pgd));
-				if (!dma_pte_present(pgd)) {
-					spin_unlock_irqrestore(&iommu->lock, flags);
-					return -ENOMEM;
-				}
-			}
-		}
-	}
-
-	context_set_domain_id(context, id);
-
+	/*
+	 * Skip top levels of page tables for iommu which has less agaw
+	 * than default.  Unnecessary for PT mode.
+	 */
 	if (translation != CONTEXT_TT_PASS_THROUGH) {
+		for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+			ret = -ENOMEM;
+			pgd = phys_to_virt(dma_pte_addr(pgd));
+			if (!dma_pte_present(pgd))
+				goto out_unlock;
+		}
+
 		info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
 		translation = info ? CONTEXT_TT_DEV_IOTLB :
 				     CONTEXT_TT_MULTI_LEVEL;
-	}
-	/*
-	 * In pass through mode, AW must be programmed to indicate the largest
-	 * AGAW value supported by hardware. And ASR is ignored by hardware.
-	 */
-	if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
-		context_set_address_width(context, iommu->msagaw);
-	else {
+
 		context_set_address_root(context, virt_to_phys(pgd));
 		context_set_address_width(context, iommu->agaw);
+	} else {
+		/*
+		 * In pass through mode, AW must be programmed to
+		 * indicate the largest AGAW value supported by
+		 * hardware. And ASR is ignored by hardware.
+		 */
+		context_set_address_width(context, iommu->msagaw);
 	}
 
 	context_set_translation_type(context, translation);
@@ -1962,14 +1964,17 @@
 					   (((u16)bus) << 8) | devfn,
 					   DMA_CCMD_MASK_NOBIT,
 					   DMA_CCMD_DEVICE_INVL);
-		iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
+		iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
 	} else {
 		iommu_flush_write_buffer(iommu);
 	}
 	iommu_enable_dev_iotlb(info);
-	spin_unlock_irqrestore(&iommu->lock, flags);
 
-	domain_attach_iommu(domain, iommu);
+	ret = 0;
+
+out_unlock:
+	spin_unlock(&iommu->lock);
+	spin_unlock_irqrestore(&device_domain_lock, flags);
 
 	return 0;
 }
@@ -1977,7 +1982,6 @@
 struct domain_context_mapping_data {
 	struct dmar_domain *domain;
 	struct intel_iommu *iommu;
-	int translation;
 };
 
 static int domain_context_mapping_cb(struct pci_dev *pdev,
@@ -1986,13 +1990,11 @@
 	struct domain_context_mapping_data *data = opaque;
 
 	return domain_context_mapping_one(data->domain, data->iommu,
-					  PCI_BUS_NUM(alias), alias & 0xff,
-					  data->translation);
+					  PCI_BUS_NUM(alias), alias & 0xff);
 }
 
 static int
-domain_context_mapping(struct dmar_domain *domain, struct device *dev,
-		       int translation)
+domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 {
 	struct intel_iommu *iommu;
 	u8 bus, devfn;
@@ -2003,12 +2005,10 @@
 		return -ENODEV;
 
 	if (!dev_is_pci(dev))
-		return domain_context_mapping_one(domain, iommu, bus, devfn,
-						  translation);
+		return domain_context_mapping_one(domain, iommu, bus, devfn);
 
 	data.domain = domain;
 	data.iommu = iommu;
-	data.translation = translation;
 
 	return pci_for_each_dma_alias(to_pci_dev(dev),
 				      &domain_context_mapping_cb, &data);
@@ -2194,7 +2194,7 @@
 	return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
 }
 
-static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
 	if (!iommu)
 		return;
@@ -2220,21 +2220,8 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&device_domain_lock, flags);
-	list_for_each_entry_safe(info, tmp, &domain->devices, link) {
-		unlink_domain_info(info);
-		spin_unlock_irqrestore(&device_domain_lock, flags);
-
-		iommu_disable_dev_iotlb(info);
-		iommu_detach_dev(info->iommu, info->bus, info->devfn);
-
-		if (domain_type_is_vm(domain)) {
-			iommu_detach_dependent_devices(info->iommu, info->dev);
-			domain_detach_iommu(domain, info->iommu);
-		}
-
-		free_devinfo_mem(info);
-		spin_lock_irqsave(&device_domain_lock, flags);
-	}
+	list_for_each_entry_safe(info, tmp, &domain->devices, link)
+		__dmar_remove_one_dev_info(info);
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
@@ -2266,14 +2253,15 @@
 	return NULL;
 }
 
-static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
-						int bus, int devfn,
-						struct device *dev,
-						struct dmar_domain *domain)
+static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
+						    int bus, int devfn,
+						    struct device *dev,
+						    struct dmar_domain *domain)
 {
 	struct dmar_domain *found = NULL;
 	struct device_domain_info *info;
 	unsigned long flags;
+	int ret;
 
 	info = alloc_devinfo_mem();
 	if (!info)
@@ -2290,12 +2278,16 @@
 	spin_lock_irqsave(&device_domain_lock, flags);
 	if (dev)
 		found = find_domain(dev);
-	else {
+
+	if (!found) {
 		struct device_domain_info *info2;
 		info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
-		if (info2)
-			found = info2->domain;
+		if (info2) {
+			found      = info2->domain;
+			info2->dev = dev;
+		}
 	}
+
 	if (found) {
 		spin_unlock_irqrestore(&device_domain_lock, flags);
 		free_devinfo_mem(info);
@@ -2303,12 +2295,27 @@
 		return found;
 	}
 
+	spin_lock(&iommu->lock);
+	ret = domain_attach_iommu(domain, iommu);
+	spin_unlock(&iommu->lock);
+
+	if (ret) {
+		spin_unlock_irqrestore(&device_domain_lock, flags);
+		return NULL;
+	}
+
 	list_add(&info->link, &domain->devices);
 	list_add(&info->global, &device_domain_list);
 	if (dev)
 		dev->archdata.iommu = info;
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 
+	if (dev && domain_context_mapping(domain, dev)) {
+		pr_err("Domain context map for %s failed\n", dev_name(dev));
+		dmar_remove_one_dev_info(domain, dev);
+		return NULL;
+	}
+
 	return domain;
 }
 
@@ -2321,10 +2328,10 @@
 /* domain is initialized */
 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 {
+	struct device_domain_info *info = NULL;
 	struct dmar_domain *domain, *tmp;
 	struct intel_iommu *iommu;
-	struct device_domain_info *info;
-	u16 dma_alias;
+	u16 req_id, dma_alias;
 	unsigned long flags;
 	u8 bus, devfn;
 
@@ -2336,6 +2343,8 @@
 	if (!iommu)
 		return NULL;
 
+	req_id = ((u16)bus << 8) | devfn;
+
 	if (dev_is_pci(dev)) {
 		struct pci_dev *pdev = to_pci_dev(dev);
 
@@ -2360,21 +2369,15 @@
 	domain = alloc_domain(0);
 	if (!domain)
 		return NULL;
-	domain->id = iommu_attach_domain(domain, iommu);
-	if (domain->id < 0) {
-		free_domain_mem(domain);
-		return NULL;
-	}
-	domain_attach_iommu(domain, iommu);
-	if (domain_init(domain, gaw)) {
+	if (domain_init(domain, iommu, gaw)) {
 		domain_exit(domain);
 		return NULL;
 	}
 
 	/* register PCI DMA alias device */
-	if (dev_is_pci(dev)) {
-		tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
-					   dma_alias & 0xff, NULL, domain);
+	if (req_id != dma_alias && dev_is_pci(dev)) {
+		tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+					       dma_alias & 0xff, NULL, domain);
 
 		if (!tmp || tmp != domain) {
 			domain_exit(domain);
@@ -2386,7 +2389,7 @@
 	}
 
 found_domain:
-	tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+	tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
 
 	if (!tmp || tmp != domain) {
 		domain_exit(domain);
@@ -2414,8 +2417,7 @@
 		return -ENOMEM;
 	}
 
-	pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
-		 start, end, domain->id);
+	pr_debug("Mapping reserved region %llx-%llx\n", start, end);
 	/*
 	 * RMRR range might have overlap with physical memory range,
 	 * clear it first
@@ -2476,11 +2478,6 @@
 	if (ret)
 		goto error;
 
-	/* context entry init */
-	ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
-	if (ret)
-		goto error;
-
 	return 0;
 
  error:
@@ -2526,37 +2523,18 @@
 
 static int __init si_domain_init(int hw)
 {
-	struct dmar_drhd_unit *drhd;
-	struct intel_iommu *iommu;
 	int nid, ret = 0;
-	bool first = true;
 
 	si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
 	if (!si_domain)
 		return -EFAULT;
 
-	for_each_active_iommu(iommu, drhd) {
-		ret = iommu_attach_domain(si_domain, iommu);
-		if (ret < 0) {
-			domain_exit(si_domain);
-			return -EFAULT;
-		} else if (first) {
-			si_domain->id = ret;
-			first = false;
-		} else if (si_domain->id != ret) {
-			domain_exit(si_domain);
-			return -EFAULT;
-		}
-		domain_attach_iommu(si_domain, iommu);
-	}
-
 	if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
 		domain_exit(si_domain);
 		return -EFAULT;
 	}
 
-	pr_debug("Identity mapping domain is domain %d\n",
-		 si_domain->id);
+	pr_debug("Identity mapping domain allocated\n");
 
 	if (hw)
 		return 0;
@@ -2590,28 +2568,20 @@
 	return 0;
 }
 
-static int domain_add_dev_info(struct dmar_domain *domain,
-			       struct device *dev, int translation)
+static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
 {
 	struct dmar_domain *ndomain;
 	struct intel_iommu *iommu;
 	u8 bus, devfn;
-	int ret;
 
 	iommu = device_to_iommu(dev, &bus, &devfn);
 	if (!iommu)
 		return -ENODEV;
 
-	ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+	ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
 	if (ndomain != domain)
 		return -EBUSY;
 
-	ret = domain_context_mapping(domain, dev, translation);
-	if (ret) {
-		domain_remove_one_dev_info(domain, dev);
-		return ret;
-	}
-
 	return 0;
 }
 
@@ -2751,9 +2721,7 @@
 	if (!iommu_should_identity_map(dev, 1))
 		return 0;
 
-	ret = domain_add_dev_info(si_domain, dev,
-				  hw ? CONTEXT_TT_PASS_THROUGH :
-				       CONTEXT_TT_MULTI_LEVEL);
+	ret = domain_add_dev_info(si_domain, dev);
 	if (!ret)
 		pr_info("%s identity mapping for device %s\n",
 			hw ? "Hardware" : "Software", dev_name(dev));
@@ -2839,15 +2807,18 @@
 }
 
 static int copy_context_table(struct intel_iommu *iommu,
-			      struct root_entry *old_re,
+			      struct root_entry __iomem *old_re,
 			      struct context_entry **tbl,
 			      int bus, bool ext)
 {
-	struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
 	int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
+	struct context_entry __iomem *old_ce = NULL;
+	struct context_entry *new_ce = NULL, ce;
+	struct root_entry re;
 	phys_addr_t old_ce_phys;
 
 	tbl_idx = ext ? bus * 2 : bus;
+	memcpy_fromio(&re, old_re, sizeof(re));
 
 	for (devfn = 0; devfn < 256; devfn++) {
 		/* First calculate the correct index */
@@ -2867,9 +2838,9 @@
 
 			ret = 0;
 			if (devfn < 0x80)
-				old_ce_phys = root_entry_lctp(old_re);
+				old_ce_phys = root_entry_lctp(&re);
 			else
-				old_ce_phys = root_entry_uctp(old_re);
+				old_ce_phys = root_entry_uctp(&re);
 
 			if (!old_ce_phys) {
 				if (ext && devfn == 0) {
@@ -2894,7 +2865,7 @@
 		}
 
 		/* Now copy the context entry */
-		ce = old_ce[idx];
+		memcpy_fromio(&ce, old_ce + idx, sizeof(ce));
 
 		if (!__context_present(&ce))
 			continue;
@@ -2938,8 +2909,8 @@
 
 static int copy_translation_tables(struct intel_iommu *iommu)
 {
+	struct root_entry __iomem *old_rt;
 	struct context_entry **ctxt_tbls;
-	struct root_entry *old_rt;
 	phys_addr_t old_rt_phys;
 	int ctxt_table_entries;
 	unsigned long flags;
@@ -3269,7 +3240,6 @@
 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
 {
 	struct dmar_domain *domain;
-	int ret;
 
 	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
 	if (!domain) {
@@ -3278,16 +3248,6 @@
 		return NULL;
 	}
 
-	/* make sure context mapping is ok */
-	if (unlikely(!domain_context_mapped(dev))) {
-		ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
-		if (ret) {
-			pr_err("Domain context map for %s failed\n",
-			       dev_name(dev));
-			return NULL;
-		}
-	}
-
 	return domain;
 }
 
@@ -3323,7 +3283,7 @@
 			 * 32 bit DMA is removed from si_domain and fall back
 			 * to non-identity mapping.
 			 */
-			domain_remove_one_dev_info(si_domain, dev);
+			dmar_remove_one_dev_info(si_domain, dev);
 			pr_info("32bit %s uses non-identity mapping\n",
 				dev_name(dev));
 			return 0;
@@ -3335,10 +3295,7 @@
 		 */
 		if (iommu_should_identity_map(dev, 0)) {
 			int ret;
-			ret = domain_add_dev_info(si_domain, dev,
-						  hw_pass_through ?
-						  CONTEXT_TT_PASS_THROUGH :
-						  CONTEXT_TT_MULTI_LEVEL);
+			ret = domain_add_dev_info(si_domain, dev);
 			if (!ret) {
 				pr_info("64bit %s uses identity mapping\n",
 					dev_name(dev));
@@ -3399,7 +3356,9 @@
 
 	/* it's a non-present to present mapping. Only flush if caching mode */
 	if (cap_caching_mode(iommu->cap))
-		iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
+		iommu_flush_iotlb_psi(iommu, domain,
+				      mm_to_dma_pfn(iova->pfn_lo),
+				      size, 0, 1);
 	else
 		iommu_flush_write_buffer(iommu);
 
@@ -3450,7 +3409,7 @@
 
 			/* On real hardware multiple invalidations are expensive */
 			if (cap_caching_mode(iommu->cap))
-				iommu_flush_iotlb_psi(iommu, domain->id,
+				iommu_flush_iotlb_psi(iommu, domain,
 					iova->pfn_lo, iova_size(iova),
 					!deferred_flush[i].freelist[j], 0);
 			else {
@@ -3534,7 +3493,7 @@
 	freelist = domain_unmap(domain, start_pfn, last_pfn);
 
 	if (intel_iommu_strict) {
-		iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+		iommu_flush_iotlb_psi(iommu, domain, start_pfn,
 				      last_pfn - start_pfn + 1, !freelist, 0);
 		/* free iova */
 		__free_iova(&domain->iovad, iova);
@@ -3692,7 +3651,7 @@
 
 	/* it's a non-present to present mapping. Only flush if caching mode */
 	if (cap_caching_mode(iommu->cap))
-		iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
+		iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
 	else
 		iommu_flush_write_buffer(iommu);
 
@@ -4169,13 +4128,6 @@
 	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
 	iommu_enable_translation(iommu);
 
-	if (si_domain) {
-		ret = iommu_attach_domain(si_domain, iommu);
-		if (ret < 0 || si_domain->id != ret)
-			goto disable_iommu;
-		domain_attach_iommu(si_domain, iommu);
-	}
-
 	iommu_disable_protect_mem_regions(iommu);
 	return 0;
 
@@ -4337,11 +4289,9 @@
 	if (!domain)
 		return 0;
 
-	down_read(&dmar_global_lock);
-	domain_remove_one_dev_info(domain, dev);
+	dmar_remove_one_dev_info(domain, dev);
 	if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
 		domain_exit(domain);
-	up_read(&dmar_global_lock);
 
 	return 0;
 }
@@ -4398,7 +4348,7 @@
 
 			rcu_read_lock();
 			for_each_active_iommu(iommu, drhd)
-				iommu_flush_iotlb_psi(iommu, si_domain->id,
+				iommu_flush_iotlb_psi(iommu, si_domain,
 					iova->pfn_lo, iova_size(iova),
 					!freelist, 0);
 			rcu_read_unlock();
@@ -4457,11 +4407,32 @@
 }
 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
 
+static ssize_t intel_iommu_show_ndoms(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct intel_iommu *iommu = dev_get_drvdata(dev);
+	return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
+}
+static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
+
+static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct intel_iommu *iommu = dev_get_drvdata(dev);
+	return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
+						  cap_ndoms(iommu->cap)));
+}
+static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
+
 static struct attribute *intel_iommu_attrs[] = {
 	&dev_attr_version.attr,
 	&dev_attr_address.attr,
 	&dev_attr_cap.attr,
 	&dev_attr_ecap.attr,
+	&dev_attr_domains_supported.attr,
+	&dev_attr_domains_used.attr,
 	NULL,
 };
 
@@ -4541,7 +4512,7 @@
 	for_each_active_iommu(iommu, drhd)
 		iommu->iommu_dev = iommu_device_create(NULL, iommu,
 						       intel_iommu_groups,
-						       iommu->name);
+						       "%s", iommu->name);
 
 	bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
 	bus_register_notifier(&pci_bus_type, &device_nb);
@@ -4561,11 +4532,11 @@
 	return ret;
 }
 
-static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
 {
 	struct intel_iommu *iommu = opaque;
 
-	iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+	domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
 	return 0;
 }
 
@@ -4575,63 +4546,50 @@
  * devices, unbinding the driver from any one of them will possibly leave
  * the others unable to operate.
  */
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-					   struct device *dev)
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
 {
 	if (!iommu || !dev || !dev_is_pci(dev))
 		return;
 
-	pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
+	pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
 }
 
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-				       struct device *dev)
+static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
-	struct device_domain_info *info, *tmp;
 	struct intel_iommu *iommu;
 	unsigned long flags;
-	bool found = false;
-	u8 bus, devfn;
 
-	iommu = device_to_iommu(dev, &bus, &devfn);
-	if (!iommu)
+	assert_spin_locked(&device_domain_lock);
+
+	if (WARN_ON(!info))
 		return;
 
+	iommu = info->iommu;
+
+	if (info->dev) {
+		iommu_disable_dev_iotlb(info);
+		domain_context_clear(iommu, info->dev);
+	}
+
+	unlink_domain_info(info);
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	domain_detach_iommu(info->domain, iommu);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	free_devinfo_mem(info);
+}
+
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+				     struct device *dev)
+{
+	struct device_domain_info *info;
+	unsigned long flags;
+
 	spin_lock_irqsave(&device_domain_lock, flags);
-	list_for_each_entry_safe(info, tmp, &domain->devices, link) {
-		if (info->iommu == iommu && info->bus == bus &&
-		    info->devfn == devfn) {
-			unlink_domain_info(info);
-			spin_unlock_irqrestore(&device_domain_lock, flags);
-
-			iommu_disable_dev_iotlb(info);
-			iommu_detach_dev(iommu, info->bus, info->devfn);
-			iommu_detach_dependent_devices(iommu, dev);
-			free_devinfo_mem(info);
-
-			spin_lock_irqsave(&device_domain_lock, flags);
-
-			if (found)
-				break;
-			else
-				continue;
-		}
-
-		/* if there is no other devices under the same iommu
-		 * owned by this domain, clear this iommu in iommu_bmp
-		 * update iommu count and coherency
-		 */
-		if (info->iommu == iommu)
-			found = true;
-	}
-
+	info = dev->archdata.iommu;
+	__dmar_remove_one_dev_info(info);
 	spin_unlock_irqrestore(&device_domain_lock, flags);
-
-	if (found == 0) {
-		domain_detach_iommu(domain, iommu);
-		if (!domain_type_is_vm_or_si(domain))
-			iommu_detach_domain(domain, iommu);
-	}
 }
 
 static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4712,10 +4670,9 @@
 
 		old_domain = find_domain(dev);
 		if (old_domain) {
-			if (domain_type_is_vm_or_si(dmar_domain))
-				domain_remove_one_dev_info(old_domain, dev);
-			else
-				domain_remove_dev_info(old_domain);
+			rcu_read_lock();
+			dmar_remove_one_dev_info(old_domain, dev);
+			rcu_read_unlock();
 
 			if (!domain_type_is_vm_or_si(old_domain) &&
 			     list_empty(&old_domain->devices))
@@ -4755,13 +4712,13 @@
 		dmar_domain->agaw--;
 	}
 
-	return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
+	return domain_add_dev_info(dmar_domain, dev);
 }
 
 static void intel_iommu_detach_device(struct iommu_domain *domain,
 				      struct device *dev)
 {
-	domain_remove_one_dev_info(to_dmar_domain(domain), dev);
+	dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
 }
 
 static int intel_iommu_map(struct iommu_domain *domain,
@@ -4810,12 +4767,11 @@
 	struct intel_iommu *iommu;
 	unsigned long start_pfn, last_pfn;
 	unsigned int npages;
-	int iommu_id, num, ndomains, level = 0;
+	int iommu_id, level = 0;
 
 	/* Cope with horrid API which requires us to unmap more than the
 	   size argument if it happens to be a large-page mapping. */
-	if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
-		BUG();
+	BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
 
 	if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
 		size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -4827,19 +4783,11 @@
 
 	npages = last_pfn - start_pfn + 1;
 
-	for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
-               iommu = g_iommus[iommu_id];
+	for_each_domain_iommu(iommu_id, dmar_domain) {
+		iommu = g_iommus[iommu_id];
 
-               /*
-                * find bit position of dmar_domain
-                */
-               ndomains = cap_ndoms(iommu->cap);
-               for_each_set_bit(num, iommu->domain_ids, ndomains) {
-                       if (iommu->domains[num] == dmar_domain)
-                               iommu_flush_iotlb_psi(iommu, num, start_pfn,
-						     npages, !freelist, 0);
-	       }
-
+		iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+				      start_pfn, npages, !freelist, 0);
 	}
 
 	dma_free_pagelist(freelist);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f15692a..9ec4e0d 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -384,7 +384,7 @@
 
 static int iommu_load_old_irte(struct intel_iommu *iommu)
 {
-	struct irte *old_ir_table;
+	struct irte __iomem *old_ir_table;
 	phys_addr_t irt_phys;
 	unsigned int i;
 	size_t size;
@@ -413,7 +413,7 @@
 		return -ENOMEM;
 
 	/* Copy data over */
-	memcpy(iommu->ir_table->base, old_ir_table, size);
+	memcpy_fromio(iommu->ir_table->base, old_ir_table, size);
 
 	__iommu_flush_cache(iommu, iommu->ir_table->base, size);
 
@@ -426,6 +426,8 @@
 			bitmap_set(iommu->ir_table->bitmap, i, 1);
 	}
 
+	iounmap(old_ir_table);
+
 	return 0;
 }
 
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4e46021..73c0748 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -26,6 +26,8 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 
+#include <asm/barrier.h>
+
 #include "io-pgtable.h"
 
 #define ARM_LPAE_MAX_ADDR_BITS		48
@@ -200,20 +202,97 @@
 
 static bool selftest_running = false;
 
+static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+{
+	return phys_to_dma(dev, virt_to_phys(pages));
+}
+
+static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
+				    struct io_pgtable_cfg *cfg)
+{
+	struct device *dev = cfg->iommu_dev;
+	dma_addr_t dma;
+	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+
+	if (!pages)
+		return NULL;
+
+	if (!selftest_running) {
+		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma))
+			goto out_free;
+		/*
+		 * We depend on the IOMMU being able to work with any physical
+		 * address directly, so if the DMA layer suggests it can't by
+		 * giving us back some translation, that bodes very badly...
+		 */
+		if (dma != __arm_lpae_dma_addr(dev, pages))
+			goto out_unmap;
+	}
+
+	return pages;
+
+out_unmap:
+	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+	free_pages_exact(pages, size);
+	return NULL;
+}
+
+static void __arm_lpae_free_pages(void *pages, size_t size,
+				  struct io_pgtable_cfg *cfg)
+{
+	struct device *dev = cfg->iommu_dev;
+
+	if (!selftest_running)
+		dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+				 size, DMA_TO_DEVICE);
+	free_pages_exact(pages, size);
+}
+
+static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
+			       struct io_pgtable_cfg *cfg)
+{
+	struct device *dev = cfg->iommu_dev;
+
+	*ptep = pte;
+
+	if (!selftest_running)
+		dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+					   sizeof(pte), DMA_TO_DEVICE);
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+			    unsigned long iova, size_t size, int lvl,
+			    arm_lpae_iopte *ptep);
+
 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 			     unsigned long iova, phys_addr_t paddr,
 			     arm_lpae_iopte prot, int lvl,
 			     arm_lpae_iopte *ptep)
 {
 	arm_lpae_iopte pte = prot;
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
-	/* We require an unmap first */
 	if (iopte_leaf(*ptep, lvl)) {
+		/* We require an unmap first */
 		WARN_ON(!selftest_running);
 		return -EEXIST;
+	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+		/*
+		 * We need to unmap and free the old table before
+		 * overwriting it with a block entry.
+		 */
+		arm_lpae_iopte *tblp;
+		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
+		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+			return -EINVAL;
 	}
 
-	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
 		pte |= ARM_LPAE_PTE_NS;
 
 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
@@ -224,8 +303,7 @@
 	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
 
-	*ptep = pte;
-	data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+	__arm_lpae_set_pte(ptep, pte, cfg);
 	return 0;
 }
 
@@ -234,14 +312,14 @@
 			  int lvl, arm_lpae_iopte *ptep)
 {
 	arm_lpae_iopte *cptep, pte;
-	void *cookie = data->iop.cookie;
 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
 	/* Find our entry at the current level */
 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
 	/* If we can install a leaf entry at this level, then do so */
-	if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+	if (size == block_size && (size & cfg->pgsize_bitmap))
 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
 
 	/* We can't allocate tables at the final level */
@@ -251,18 +329,15 @@
 	/* Grab a pointer to the next level */
 	pte = *ptep;
 	if (!pte) {
-		cptep = alloc_pages_exact(1UL << data->pg_shift,
-					 GFP_ATOMIC | __GFP_ZERO);
+		cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
+					       GFP_ATOMIC, cfg);
 		if (!cptep)
 			return -ENOMEM;
 
-		data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
-						 cookie);
 		pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
-		if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
 			pte |= ARM_LPAE_PTE_NSTABLE;
-		*ptep = pte;
-		data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+		__arm_lpae_set_pte(ptep, pte, cfg);
 	} else {
 		cptep = iopte_deref(pte, data);
 	}
@@ -309,7 +384,7 @@
 {
 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 	arm_lpae_iopte *ptep = data->pgd;
-	int lvl = ARM_LPAE_START_LVL(data);
+	int ret, lvl = ARM_LPAE_START_LVL(data);
 	arm_lpae_iopte prot;
 
 	/* If no access, then nothing to do */
@@ -317,7 +392,14 @@
 		return 0;
 
 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
-	return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+	/*
+	 * Synchronise all PTE updates for the new mapping before there's
+	 * a chance for anything to kick off a table walk for the new iova.
+	 */
+	wmb();
+
+	return ret;
 }
 
 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
@@ -347,7 +429,7 @@
 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 	}
 
-	free_pages_exact(start, table_size);
+	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
 }
 
 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -366,8 +448,7 @@
 	unsigned long blk_start, blk_end;
 	phys_addr_t blk_paddr;
 	arm_lpae_iopte table = 0;
-	void *cookie = data->iop.cookie;
-	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
 	blk_start = iova & ~(blk_size - 1);
 	blk_end = blk_start + blk_size;
@@ -393,10 +474,9 @@
 		}
 	}
 
-	*ptep = table;
-	tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+	__arm_lpae_set_pte(ptep, table, cfg);
 	iova &= ~(blk_size - 1);
-	tlb->tlb_add_flush(iova, blk_size, true, cookie);
+	cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
 	return size;
 }
 
@@ -418,13 +498,12 @@
 
 	/* If the size matches this level, we're in the right place */
 	if (size == blk_size) {
-		*ptep = 0;
-		tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+		__arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
 
 		if (!iopte_leaf(pte, lvl)) {
 			/* Also flush any partial walks */
 			tlb->tlb_add_flush(iova, size, false, cookie);
-			tlb->tlb_sync(data->iop.cookie);
+			tlb->tlb_sync(cookie);
 			ptep = iopte_deref(pte, data);
 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
 		} else {
@@ -640,11 +719,12 @@
 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
 
 	/* Looking good; allocate a pgd */
-	data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
 	if (!data->pgd)
 		goto out_free_data;
 
-	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+	/* Ensure the empty pgd is visible before any actual TTBR write */
+	wmb();
 
 	/* TTBRs */
 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
@@ -728,11 +808,12 @@
 	cfg->arm_lpae_s2_cfg.vtcr = reg;
 
 	/* Allocate pgd pages */
-	data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
 	if (!data->pgd)
 		goto out_free_data;
 
-	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+	/* Ensure the empty pgd is visible before any actual TTBR write */
+	wmb();
 
 	/* VTTBR */
 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
@@ -818,16 +899,10 @@
 	WARN_ON(cookie != cfg_cookie);
 }
 
-static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
-	WARN_ON(cookie != cfg_cookie);
-}
-
 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
 	.tlb_flush_all	= dummy_tlb_flush_all,
 	.tlb_add_flush	= dummy_tlb_add_flush,
 	.tlb_sync	= dummy_tlb_sync,
-	.flush_pgtable	= dummy_flush_pgtable,
 };
 
 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 6436fe2..6f2e319 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -24,11 +24,6 @@
 
 #include "io-pgtable.h"
 
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
-
 static const struct io_pgtable_init_fns *
 io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
 {
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 10e32f6..ac9e234 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -17,8 +17,9 @@
  *
  * @tlb_flush_all: Synchronously invalidate the entire TLB context.
  * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
- * @tlb_sync:      Ensure any queue TLB invalidation has taken effect.
- * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ * @tlb_sync:      Ensure any queued TLB invalidation has taken effect, and
+ *                 any corresponding page table updates are visible to the
+ *                 IOMMU.
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
@@ -28,7 +29,6 @@
 	void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
 			      void *cookie);
 	void (*tlb_sync)(void *cookie);
-	void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
 };
 
 /**
@@ -41,6 +41,8 @@
  * @ias:           Input address (iova) size, in bits.
  * @oas:           Output address (paddr) size, in bits.
  * @tlb:           TLB management callbacks for this set of tables.
+ * @iommu_dev:     The device representing the DMA configuration for the
+ *                 page table walker.
  */
 struct io_pgtable_cfg {
 	#define IO_PGTABLE_QUIRK_ARM_NS	(1 << 0)	/* Set NS bit in PTEs */
@@ -49,6 +51,7 @@
 	unsigned int			ias;
 	unsigned int			oas;
 	const struct iommu_gather_ops	*tlb;
+	struct device			*iommu_dev;
 
 	/* Low-level data specific to the table format */
 	union {
@@ -140,4 +143,9 @@
 	void (*free)(struct io_pgtable *iop);
 };
 
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
 #endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 1a67c53..8cf605f 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -283,24 +283,10 @@
 	/* The hardware doesn't support selective TLB flush. */
 }
 
-static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
-	unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
-	struct ipmmu_vmsa_domain *domain = cookie;
-
-	/*
-	 * TODO: Add support for coherent walk through CCI with DVM and remove
-	 * cache handling.
-	 */
-	dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
-		     DMA_TO_DEVICE);
-}
-
 static struct iommu_gather_ops ipmmu_gather_ops = {
 	.tlb_flush_all = ipmmu_tlb_flush_all,
 	.tlb_add_flush = ipmmu_tlb_add_flush,
 	.tlb_sync = ipmmu_tlb_flush_all,
-	.flush_pgtable = ipmmu_flush_pgtable,
 };
 
 /* -----------------------------------------------------------------------------
@@ -327,6 +313,11 @@
 	domain->cfg.ias = 32;
 	domain->cfg.oas = 40;
 	domain->cfg.tlb = &ipmmu_gather_ops;
+	/*
+	 * TODO: Add support for coherent walk through CCI with DVM and remove
+	 * cache handling. For now, delegate it to the io-pgtable code.
+	 */
+	domain->cfg.iommu_dev = domain->mmu->dev;
 
 	domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
 					   domain);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 2d99930..913455a 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -84,7 +84,7 @@
 bool irq_remapping_cap(enum irq_remap_cap cap)
 {
 	if (!remap_ops || disable_irq_post)
-		return 0;
+		return false;
 
 	return (remap_ops->capability & (1 << cap));
 }
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 15a2063..e321fa5 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -106,8 +106,8 @@
 #endif
 
 	list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
-		if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
-			BUG();
+
+		BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
 
 		iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
 		BUG_ON(!iommu_drvdata);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 43429ab..60ba238 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -141,10 +141,12 @@
 	struct iommu_ops *ops = NULL;
 	int idx = 0;
 
-	if (dev_is_pci(dev)) {
-		dev_err(dev, "IOMMU is currently not supported for PCI\n");
+	/*
+	 * We can't do much for PCI devices without knowing how
+	 * device IDs are wired up from the PCI bus to the IOMMU.
+	 */
+	if (dev_is_pci(dev))
 		return NULL;
-	}
 
 	/*
 	 * We don't currently walk up the tree looking for a parent IOMMU.
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index f3d20a2..0717aa9 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
 #include <linux/debugfs.h>
 #include <linux/platform_data/iommu-omap.h>
 
@@ -29,6 +30,59 @@
 	return !obj->domain;
 }
 
+#define pr_reg(name)							\
+	do {								\
+		ssize_t bytes;						\
+		const char *str = "%20s: %08x\n";			\
+		const int maxcol = 32;					\
+		bytes = snprintf(p, maxcol, str, __stringify(name),	\
+				 iommu_read_reg(obj, MMU_##name));	\
+		p += bytes;						\
+		len -= bytes;						\
+		if (len < maxcol)					\
+			goto out;					\
+	} while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+	char *p = buf;
+
+	pr_reg(REVISION);
+	pr_reg(IRQSTATUS);
+	pr_reg(IRQENABLE);
+	pr_reg(WALKING_ST);
+	pr_reg(CNTL);
+	pr_reg(FAULT_AD);
+	pr_reg(TTB);
+	pr_reg(LOCK);
+	pr_reg(LD_TLB);
+	pr_reg(CAM);
+	pr_reg(RAM);
+	pr_reg(GFLUSH);
+	pr_reg(FLUSH_ENTRY);
+	pr_reg(READ_CAM);
+	pr_reg(READ_RAM);
+	pr_reg(EMU_FAULT_AD);
+out:
+	return p - buf;
+}
+
+static ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf,
+				   ssize_t bytes)
+{
+	if (!obj || !buf)
+		return -EINVAL;
+
+	pm_runtime_get_sync(obj->dev);
+
+	bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
+
+	pm_runtime_put_sync(obj->dev);
+
+	return bytes;
+}
+
 static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
 			       size_t count, loff_t *ppos)
 {
@@ -55,34 +109,71 @@
 	return bytes;
 }
 
-static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
-			      size_t count, loff_t *ppos)
+static int
+__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
 {
-	struct omap_iommu *obj = file->private_data;
-	char *p, *buf;
-	ssize_t bytes, rest;
+	int i;
+	struct iotlb_lock saved;
+	struct cr_regs tmp;
+	struct cr_regs *p = crs;
+
+	pm_runtime_get_sync(obj->dev);
+	iotlb_lock_get(obj, &saved);
+
+	for_each_iotlb_cr(obj, num, i, tmp) {
+		if (!iotlb_cr_valid(&tmp))
+			continue;
+		*p++ = tmp;
+	}
+
+	iotlb_lock_set(obj, &saved);
+	pm_runtime_put_sync(obj->dev);
+
+	return  p - crs;
+}
+
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+			     struct seq_file *s)
+{
+	return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
+			  (cr->cam & MMU_CAM_P) ? 1 : 0);
+}
+
+static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
+{
+	int i, num;
+	struct cr_regs *cr;
+
+	num = obj->nr_tlb_entries;
+
+	cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
+	if (!cr)
+		return 0;
+
+	num = __dump_tlb_entries(obj, cr, num);
+	for (i = 0; i < num; i++)
+		iotlb_dump_cr(obj, cr + i, s);
+	kfree(cr);
+
+	return 0;
+}
+
+static int debug_read_tlb(struct seq_file *s, void *data)
+{
+	struct omap_iommu *obj = s->private;
 
 	if (is_omap_iommu_detached(obj))
 		return -EPERM;
 
-	buf = kmalloc(count, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-	p = buf;
-
 	mutex_lock(&iommu_debug_lock);
 
-	p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
-	p += sprintf(p, "-----------------------------------------\n");
-	rest = count - (p - buf);
-	p += omap_dump_tlb_entries(obj, p, rest);
-
-	bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+	seq_printf(s, "%8s %8s\n", "cam:", "ram:");
+	seq_puts(s, "-----------------------------------------\n");
+	omap_dump_tlb_entries(obj, s);
 
 	mutex_unlock(&iommu_debug_lock);
-	kfree(buf);
 
-	return bytes;
+	return 0;
 }
 
 static void dump_ioptable(struct seq_file *s)
@@ -154,10 +245,10 @@
 		.open = simple_open,					\
 		.read = debug_read_##name,				\
 		.llseek = generic_file_llseek,				\
-	};
+	}
 
 DEBUG_FOPS_RO(regs);
-DEBUG_FOPS_RO(tlb);
+DEBUG_SEQ_FOPS_RO(tlb);
 DEBUG_SEQ_FOPS_RO(pagetable);
 
 #define __DEBUG_ADD_FILE(attr, mode)					\
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index a22c33d..36d0033 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -12,7 +12,6 @@
  */
 
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -38,11 +37,6 @@
 #define to_iommu(dev)							\
 	((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
 
-#define for_each_iotlb_cr(obj, n, __i, cr)				\
-	for (__i = 0;							\
-	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
-	     __i++)
-
 /* bitmap of the page sizes currently supported */
 #define OMAP_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
 
@@ -72,11 +66,6 @@
 #define MMU_LOCK_VICT(x)	\
 	((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
 
-struct iotlb_lock {
-	short base;
-	short vict;
-};
-
 static struct platform_driver omap_iommu_driver;
 static struct kmem_cache *iopte_cachep;
 
@@ -213,14 +202,6 @@
 /*
  *	TLB operations
  */
-static inline int iotlb_cr_valid(struct cr_regs *cr)
-{
-	if (!cr)
-		return -EINVAL;
-
-	return cr->cam & MMU_CAM_V;
-}
-
 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
 {
 	u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
@@ -260,7 +241,7 @@
 	return status;
 }
 
-static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
 {
 	u32 val;
 
@@ -268,10 +249,9 @@
 
 	l->base = MMU_LOCK_BASE(val);
 	l->vict = MMU_LOCK_VICT(val);
-
 }
 
-static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
 {
 	u32 val;
 
@@ -297,7 +277,7 @@
 }
 
 /* only used in iotlb iteration for-loop */
-static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
 {
 	struct cr_regs cr;
 	struct iotlb_lock l;
@@ -468,129 +448,6 @@
 	pm_runtime_put_sync(obj->dev);
 }
 
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-
-#define pr_reg(name)							\
-	do {								\
-		ssize_t bytes;						\
-		const char *str = "%20s: %08x\n";			\
-		const int maxcol = 32;					\
-		bytes = snprintf(p, maxcol, str, __stringify(name),	\
-				 iommu_read_reg(obj, MMU_##name));	\
-		p += bytes;						\
-		len -= bytes;						\
-		if (len < maxcol)					\
-			goto out;					\
-	} while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
-	char *p = buf;
-
-	pr_reg(REVISION);
-	pr_reg(IRQSTATUS);
-	pr_reg(IRQENABLE);
-	pr_reg(WALKING_ST);
-	pr_reg(CNTL);
-	pr_reg(FAULT_AD);
-	pr_reg(TTB);
-	pr_reg(LOCK);
-	pr_reg(LD_TLB);
-	pr_reg(CAM);
-	pr_reg(RAM);
-	pr_reg(GFLUSH);
-	pr_reg(FLUSH_ENTRY);
-	pr_reg(READ_CAM);
-	pr_reg(READ_RAM);
-	pr_reg(EMU_FAULT_AD);
-out:
-	return p - buf;
-}
-
-ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
-	if (!obj || !buf)
-		return -EINVAL;
-
-	pm_runtime_get_sync(obj->dev);
-
-	bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
-
-	pm_runtime_put_sync(obj->dev);
-
-	return bytes;
-}
-
-static int
-__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
-{
-	int i;
-	struct iotlb_lock saved;
-	struct cr_regs tmp;
-	struct cr_regs *p = crs;
-
-	pm_runtime_get_sync(obj->dev);
-	iotlb_lock_get(obj, &saved);
-
-	for_each_iotlb_cr(obj, num, i, tmp) {
-		if (!iotlb_cr_valid(&tmp))
-			continue;
-		*p++ = tmp;
-	}
-
-	iotlb_lock_set(obj, &saved);
-	pm_runtime_put_sync(obj->dev);
-
-	return  p - crs;
-}
-
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj:	target iommu
- * @cr:		contents of cam and ram register
- * @buf:	output buffer
- **/
-static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
-			     char *buf)
-{
-	char *p = buf;
-
-	/* FIXME: Need more detail analysis of cam/ram */
-	p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
-					(cr->cam & MMU_CAM_P) ? 1 : 0);
-
-	return p - buf;
-}
-
-/**
- * omap_dump_tlb_entries - dump cr arrays to given buffer
- * @obj:	target iommu
- * @buf:	output buffer
- **/
-size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
-	int i, num;
-	struct cr_regs *cr;
-	char *p = buf;
-
-	num = bytes / sizeof(*cr);
-	num = min(obj->nr_tlb_entries, num);
-
-	cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
-	if (!cr)
-		return 0;
-
-	num = __dump_tlb_entries(obj, cr, num);
-	for (i = 0; i < num; i++)
-		p += iotlb_dump_cr(obj, cr + i, p);
-	kfree(cr);
-
-	return p - buf;
-}
-
-#endif /* CONFIG_OMAP_IOMMU_DEBUG */
-
 /*
  *	H/W pagetable operations
  */
@@ -930,14 +787,14 @@
 
 	if (!iopgd_is_table(*iopgd)) {
 		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
-				obj->name, errs, da, iopgd, *iopgd);
+			obj->name, errs, da, iopgd, *iopgd);
 		return IRQ_NONE;
 	}
 
 	iopte = iopte_offset(iopgd, da);
 
 	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
-			obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
+		obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 
 	return IRQ_NONE;
 }
@@ -963,9 +820,8 @@
 	struct device *dev;
 	struct omap_iommu *obj;
 
-	dev = driver_find_device(&omap_iommu_driver.driver, NULL,
-				(void *)name,
-				device_match_by_alias);
+	dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+				 device_match_by_alias);
 	if (!dev)
 		return ERR_PTR(-ENODEV);
 
@@ -1089,7 +945,6 @@
 	{ .compatible = "ti,dra7-iommu"	},
 	{},
 };
-MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
 
 static struct platform_driver omap_iommu_driver = {
 	.probe	= omap_iommu_probe,
@@ -1121,7 +976,7 @@
 }
 
 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
-			 phys_addr_t pa, size_t bytes, int prot)
+			  phys_addr_t pa, size_t bytes, int prot)
 {
 	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1148,7 +1003,7 @@
 }
 
 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
-			    size_t size)
+			       size_t size)
 {
 	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1199,7 +1054,7 @@
 }
 
 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
-			struct device *dev)
+				   struct device *dev)
 {
 	struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
 	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1220,7 +1075,7 @@
 }
 
 static void omap_iommu_detach_dev(struct iommu_domain *domain,
-				 struct device *dev)
+				  struct device *dev)
 {
 	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
 
@@ -1237,16 +1092,12 @@
 		return NULL;
 
 	omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
-	if (!omap_domain) {
-		pr_err("kzalloc failed\n");
+	if (!omap_domain)
 		goto out;
-	}
 
 	omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
-	if (!omap_domain->pgtable) {
-		pr_err("kzalloc failed\n");
+	if (!omap_domain->pgtable)
 		goto fail_nomem;
-	}
 
 	/*
 	 * should never fail, but please keep this around to ensure
@@ -1285,7 +1136,7 @@
 }
 
 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
-					  dma_addr_t da)
+					   dma_addr_t da)
 {
 	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1302,7 +1153,7 @@
 			ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
 		else
 			dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
-							(unsigned long long)da);
+				(unsigned long long)da);
 	} else {
 		if (iopgd_is_section(*pgd))
 			ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
@@ -1310,7 +1161,7 @@
 			ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
 		else
 			dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
-							(unsigned long long)da);
+				(unsigned long long)da);
 	}
 
 	return ret;
@@ -1405,20 +1256,5 @@
 
 	return platform_driver_register(&omap_iommu_driver);
 }
-/* must be ready before omap3isp is probed */
 subsys_initcall(omap_iommu_init);
-
-static void __exit omap_iommu_exit(void)
-{
-	kmem_cache_destroy(iopte_cachep);
-
-	platform_driver_unregister(&omap_iommu_driver);
-
-	omap_iommu_debugfs_exit();
-}
-module_exit(omap_iommu_exit);
-
-MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
-MODULE_ALIAS("platform:omap-iommu");
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_LICENSE("GPL v2");
+/* must be ready before omap3isp is probed */
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index d736630..a656df2 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -13,16 +13,18 @@
 #ifndef _OMAP_IOMMU_H
 #define _OMAP_IOMMU_H
 
+#include <linux/bitops.h>
+
+#define for_each_iotlb_cr(obj, n, __i, cr)				\
+	for (__i = 0;							\
+	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
+	     __i++)
+
 struct iotlb_entry {
 	u32 da;
 	u32 pa;
 	u32 pgsz, prsvd, valid;
-	union {
-		u16 ap;
-		struct {
-			u32 endian, elsz, mixed;
-		};
-	};
+	u32 endian, elsz, mixed;
 };
 
 struct omap_iommu {
@@ -49,20 +51,13 @@
 };
 
 struct cr_regs {
-	union {
-		struct {
-			u16 cam_l;
-			u16 cam_h;
-		};
-		u32 cam;
-	};
-	union {
-		struct {
-			u16 ram_l;
-			u16 ram_h;
-		};
-		u32 ram;
-	};
+	u32 cam;
+	u32 ram;
+};
+
+struct iotlb_lock {
+	short base;
+	short vict;
 };
 
 /**
@@ -103,11 +98,11 @@
  * MMU Register bit definitions
  */
 /* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT	(1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT	(1 << 3)
-#define MMU_IRQ_EMUMISS		(1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT	(1 << 1)
-#define MMU_IRQ_TLBMISS		(1 << 0)
+#define MMU_IRQ_MULTIHITFAULT	BIT(4)
+#define MMU_IRQ_TABLEWALKFAULT	BIT(3)
+#define MMU_IRQ_EMUMISS		BIT(2)
+#define MMU_IRQ_TRANSLATIONFAULT	BIT(1)
+#define MMU_IRQ_TLBMISS		BIT(0)
 
 #define __MMU_IRQ_FAULT		\
 	(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
@@ -119,16 +114,16 @@
 /* MMU_CNTL */
 #define MMU_CNTL_SHIFT		1
 #define MMU_CNTL_MASK		(7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB	(1 << 3)
-#define MMU_CNTL_TWL_EN		(1 << 2)
-#define MMU_CNTL_MMU_EN		(1 << 1)
+#define MMU_CNTL_EML_TLB	BIT(3)
+#define MMU_CNTL_TWL_EN		BIT(2)
+#define MMU_CNTL_MMU_EN		BIT(1)
 
 /* CAM */
 #define MMU_CAM_VATAG_SHIFT	12
 #define MMU_CAM_VATAG_MASK \
 	((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
-#define MMU_CAM_P		(1 << 3)
-#define MMU_CAM_V		(1 << 2)
+#define MMU_CAM_P		BIT(3)
+#define MMU_CAM_V		BIT(2)
 #define MMU_CAM_PGSZ_MASK	3
 #define MMU_CAM_PGSZ_1M		(0 << 0)
 #define MMU_CAM_PGSZ_64K	(1 << 0)
@@ -141,9 +136,9 @@
 	((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
 
 #define MMU_RAM_ENDIAN_SHIFT	9
-#define MMU_RAM_ENDIAN_MASK	(1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_MASK	BIT(MMU_RAM_ENDIAN_SHIFT)
 #define MMU_RAM_ENDIAN_LITTLE	(0 << MMU_RAM_ENDIAN_SHIFT)
-#define MMU_RAM_ENDIAN_BIG	(1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_BIG	BIT(MMU_RAM_ENDIAN_SHIFT)
 
 #define MMU_RAM_ELSZ_SHIFT	7
 #define MMU_RAM_ELSZ_MASK	(3 << MMU_RAM_ELSZ_SHIFT)
@@ -152,7 +147,7 @@
 #define MMU_RAM_ELSZ_32		(2 << MMU_RAM_ELSZ_SHIFT)
 #define MMU_RAM_ELSZ_NONE	(3 << MMU_RAM_ELSZ_SHIFT)
 #define MMU_RAM_MIXED_SHIFT	6
-#define MMU_RAM_MIXED_MASK	(1 << MMU_RAM_MIXED_SHIFT)
+#define MMU_RAM_MIXED_MASK	BIT(MMU_RAM_MIXED_SHIFT)
 #define MMU_RAM_MIXED		MMU_RAM_MIXED_MASK
 
 #define MMU_GP_REG_BUS_ERR_BACK_EN	0x1
@@ -190,12 +185,12 @@
 /*
  * global functions
  */
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-extern ssize_t
-omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
-extern size_t
-omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
 
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n);
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l);
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l);
+
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
 void omap_iommu_debugfs_init(void);
 void omap_iommu_debugfs_exit(void);
 
@@ -222,4 +217,12 @@
 	__raw_writel(val, obj->regbase + offs);
 }
 
+static inline int iotlb_cr_valid(struct cr_regs *cr)
+{
+	if (!cr)
+		return -EINVAL;
+
+	return cr->cam & MMU_CAM_V;
+}
+
 #endif /* _OMAP_IOMMU_H */
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index f891683..01a3152 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -10,25 +10,30 @@
  * published by the Free Software Foundation.
  */
 
+#ifndef _OMAP_IOPGTABLE_H
+#define _OMAP_IOPGTABLE_H
+
+#include <linux/bitops.h>
+
 /*
  * "L2 table" address mask and size definitions.
  */
 #define IOPGD_SHIFT		20
-#define IOPGD_SIZE		(1UL << IOPGD_SHIFT)
+#define IOPGD_SIZE		BIT(IOPGD_SHIFT)
 #define IOPGD_MASK		(~(IOPGD_SIZE - 1))
 
 /*
  * "section" address mask and size definitions.
  */
 #define IOSECTION_SHIFT		20
-#define IOSECTION_SIZE		(1UL << IOSECTION_SHIFT)
+#define IOSECTION_SIZE		BIT(IOSECTION_SHIFT)
 #define IOSECTION_MASK		(~(IOSECTION_SIZE - 1))
 
 /*
  * "supersection" address mask and size definitions.
  */
 #define IOSUPER_SHIFT		24
-#define IOSUPER_SIZE		(1UL << IOSUPER_SHIFT)
+#define IOSUPER_SIZE		BIT(IOSUPER_SHIFT)
 #define IOSUPER_MASK		(~(IOSUPER_SIZE - 1))
 
 #define PTRS_PER_IOPGD		(1UL << (32 - IOPGD_SHIFT))
@@ -38,14 +43,14 @@
  * "small page" address mask and size definitions.
  */
 #define IOPTE_SHIFT		12
-#define IOPTE_SIZE		(1UL << IOPTE_SHIFT)
+#define IOPTE_SIZE		BIT(IOPTE_SHIFT)
 #define IOPTE_MASK		(~(IOPTE_SIZE - 1))
 
 /*
  * "large page" address mask and size definitions.
  */
 #define IOLARGE_SHIFT		16
-#define IOLARGE_SIZE		(1UL << IOLARGE_SHIFT)
+#define IOLARGE_SIZE		BIT(IOLARGE_SHIFT)
 #define IOLARGE_MASK		(~(IOLARGE_SIZE - 1))
 
 #define PTRS_PER_IOPTE		(1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
@@ -69,16 +74,16 @@
 /*
  * some descriptor attributes.
  */
-#define IOPGD_TABLE		(1 << 0)
-#define IOPGD_SECTION		(2 << 0)
-#define IOPGD_SUPER		(1 << 18 | 2 << 0)
+#define IOPGD_TABLE		(1)
+#define IOPGD_SECTION		(2)
+#define IOPGD_SUPER		(BIT(18) | IOPGD_SECTION)
 
 #define iopgd_is_table(x)	(((x) & 3) == IOPGD_TABLE)
 #define iopgd_is_section(x)	(((x) & (1 << 18 | 3)) == IOPGD_SECTION)
 #define iopgd_is_super(x)	(((x) & (1 << 18 | 3)) == IOPGD_SUPER)
 
-#define IOPTE_SMALL		(2 << 0)
-#define IOPTE_LARGE		(1 << 0)
+#define IOPTE_SMALL		(2)
+#define IOPTE_LARGE		(1)
 
 #define iopte_is_small(x)	(((x) & 2) == IOPTE_SMALL)
 #define iopte_is_large(x)	(((x) & 3) == IOPTE_LARGE)
@@ -93,3 +98,5 @@
 /* to find an entry in the second-level page table. */
 #define iopte_index(da)		(((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
 #define iopte_offset(iopgd, da)	(iopgd_page_vaddr(iopgd) + iopte_index(da))
+
+#endif /* _OMAP_IOPGTABLE_H */
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c1f2e52..9305964 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -27,6 +27,7 @@
 	const struct tegra_smmu_soc *soc;
 
 	unsigned long pfn_mask;
+	unsigned long tlb_mask;
 
 	unsigned long *asids;
 	struct mutex lock;
@@ -40,8 +41,10 @@
 	struct iommu_domain domain;
 	struct tegra_smmu *smmu;
 	unsigned int use_count;
-	struct page *count;
+	u32 *count;
+	struct page **pts;
 	struct page *pd;
+	dma_addr_t pd_dma;
 	unsigned id;
 	u32 attr;
 };
@@ -68,7 +71,8 @@
 #define SMMU_TLB_CONFIG 0x14
 #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
 #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
-#define  SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
+#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
+	((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
 
 #define SMMU_PTC_CONFIG 0x18
 #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
@@ -79,9 +83,9 @@
 #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
 
 #define SMMU_PTB_DATA 0x020
-#define  SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
+#define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
 
-#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
+#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
 
 #define SMMU_TLB_FLUSH 0x030
 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
@@ -134,29 +138,49 @@
 #define SMMU_PTE_ATTR		(SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
 				 SMMU_PTE_NONSECURE)
 
-static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
+static unsigned int iova_pd_index(unsigned long iova)
+{
+	return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
+}
+
+static unsigned int iova_pt_index(unsigned long iova)
+{
+	return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
+}
+
+static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
+{
+	addr >>= 12;
+	return (addr & smmu->pfn_mask) == addr;
+}
+
+static dma_addr_t smmu_pde_to_dma(u32 pde)
+{
+	return pde << 12;
+}
+
+static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
+{
+	smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
+}
+
+static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
 				  unsigned long offset)
 {
-	phys_addr_t phys = page ? page_to_phys(page) : 0;
 	u32 value;
 
-	if (page) {
-		offset &= ~(smmu->mc->soc->atom_size - 1);
+	offset &= ~(smmu->mc->soc->atom_size - 1);
 
-		if (smmu->mc->soc->num_address_bits > 32) {
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-			value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
+	if (smmu->mc->soc->num_address_bits > 32) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
 #else
-			value = 0;
+		value = 0;
 #endif
-			smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
-		}
-
-		value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
-	} else {
-		value = SMMU_PTC_FLUSH_TYPE_ALL;
+		smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
 	}
 
+	value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
 	smmu_writel(smmu, value, SMMU_PTC_FLUSH);
 }
 
@@ -236,8 +260,6 @@
 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
 {
 	struct tegra_smmu_as *as;
-	unsigned int i;
-	uint32_t *pd;
 
 	if (type != IOMMU_DOMAIN_UNMANAGED)
 		return NULL;
@@ -248,32 +270,26 @@
 
 	as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
 
-	as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
+	as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
 	if (!as->pd) {
 		kfree(as);
 		return NULL;
 	}
 
-	as->count = alloc_page(GFP_KERNEL);
+	as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
 	if (!as->count) {
 		__free_page(as->pd);
 		kfree(as);
 		return NULL;
 	}
 
-	/* clear PDEs */
-	pd = page_address(as->pd);
-	SetPageReserved(as->pd);
-
-	for (i = 0; i < SMMU_NUM_PDE; i++)
-		pd[i] = 0;
-
-	/* clear PDE usage counters */
-	pd = page_address(as->count);
-	SetPageReserved(as->count);
-
-	for (i = 0; i < SMMU_NUM_PDE; i++)
-		pd[i] = 0;
+	as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
+	if (!as->pts) {
+		kfree(as->count);
+		__free_page(as->pd);
+		kfree(as);
+		return NULL;
+	}
 
 	/* setup aperture */
 	as->domain.geometry.aperture_start = 0;
@@ -288,7 +304,6 @@
 	struct tegra_smmu_as *as = to_smmu_as(domain);
 
 	/* TODO: free page directory and page tables */
-	ClearPageReserved(as->pd);
 
 	kfree(as);
 }
@@ -376,16 +391,26 @@
 		return 0;
 	}
 
+	as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+				  DMA_TO_DEVICE);
+	if (dma_mapping_error(smmu->dev, as->pd_dma))
+		return -ENOMEM;
+
+	/* We can't handle 64-bit DMA addresses */
+	if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+		err = -ENOMEM;
+		goto err_unmap;
+	}
+
 	err = tegra_smmu_alloc_asid(smmu, &as->id);
 	if (err < 0)
-		return err;
+		goto err_unmap;
 
-	smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
-	smmu_flush_ptc(smmu, as->pd, 0);
+	smmu_flush_ptc(smmu, as->pd_dma, 0);
 	smmu_flush_tlb_asid(smmu, as->id);
 
 	smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
-	value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
+	value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
 	smmu_writel(smmu, value, SMMU_PTB_DATA);
 	smmu_flush(smmu);
 
@@ -393,6 +418,10 @@
 	as->use_count++;
 
 	return 0;
+
+err_unmap:
+	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+	return err;
 }
 
 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
@@ -402,6 +431,9 @@
 		return;
 
 	tegra_smmu_free_asid(smmu, as->id);
+
+	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+
 	as->smmu = NULL;
 }
 
@@ -465,96 +497,155 @@
 	}
 }
 
-static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
-		       struct page **pagep)
+static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
+			       u32 value)
 {
-	u32 *pd = page_address(as->pd), *pt, *count;
-	u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
-	u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
+	unsigned int pd_index = iova_pd_index(iova);
 	struct tegra_smmu *smmu = as->smmu;
-	struct page *page;
-	unsigned int i;
+	u32 *pd = page_address(as->pd);
+	unsigned long offset = pd_index * sizeof(*pd);
 
-	if (pd[pde] == 0) {
-		page = alloc_page(GFP_KERNEL | __GFP_DMA);
+	/* Set the page directory entry first */
+	pd[pd_index] = value;
+
+	/* The flush the page directory entry from caches */
+	dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
+					 sizeof(*pd), DMA_TO_DEVICE);
+
+	/* And flush the iommu */
+	smmu_flush_ptc(smmu, as->pd_dma, offset);
+	smmu_flush_tlb_section(smmu, as->id, iova);
+	smmu_flush(smmu);
+}
+
+static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+{
+	u32 *pt = page_address(pt_page);
+
+	return pt + iova_pt_index(iova);
+}
+
+static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
+				  dma_addr_t *dmap)
+{
+	unsigned int pd_index = iova_pd_index(iova);
+	struct page *pt_page;
+	u32 *pd;
+
+	pt_page = as->pts[pd_index];
+	if (!pt_page)
+		return NULL;
+
+	pd = page_address(as->pd);
+	*dmap = smmu_pde_to_dma(pd[pd_index]);
+
+	return tegra_smmu_pte_offset(pt_page, iova);
+}
+
+static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
+		       dma_addr_t *dmap)
+{
+	unsigned int pde = iova_pd_index(iova);
+	struct tegra_smmu *smmu = as->smmu;
+
+	if (!as->pts[pde]) {
+		struct page *page;
+		dma_addr_t dma;
+
+		page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
 		if (!page)
 			return NULL;
 
-		pt = page_address(page);
-		SetPageReserved(page);
+		dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
+				   DMA_TO_DEVICE);
+		if (dma_mapping_error(smmu->dev, dma)) {
+			__free_page(page);
+			return NULL;
+		}
 
-		for (i = 0; i < SMMU_NUM_PTE; i++)
-			pt[i] = 0;
+		if (!smmu_dma_addr_valid(smmu, dma)) {
+			dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
+				       DMA_TO_DEVICE);
+			__free_page(page);
+			return NULL;
+		}
 
-		smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
+		as->pts[pde] = page;
 
-		pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
+		tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
+							      SMMU_PDE_NEXT));
 
-		smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
-		smmu_flush_ptc(smmu, as->pd, pde << 2);
-		smmu_flush_tlb_section(smmu, as->id, iova);
-		smmu_flush(smmu);
+		*dmap = dma;
 	} else {
-		page = pfn_to_page(pd[pde] & smmu->pfn_mask);
-		pt = page_address(page);
+		u32 *pd = page_address(as->pd);
+
+		*dmap = smmu_pde_to_dma(pd[pde]);
 	}
 
-	*pagep = page;
-
-	/* Keep track of entries in this page table. */
-	count = page_address(as->count);
-	if (pt[pte] == 0)
-		count[pde]++;
-
-	return &pt[pte];
+	return tegra_smmu_pte_offset(as->pts[pde], iova);
 }
 
-static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
+static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
 {
-	u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
-	u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
-	u32 *count = page_address(as->count);
-	u32 *pd = page_address(as->pd), *pt;
-	struct page *page;
+	unsigned int pd_index = iova_pd_index(iova);
 
-	page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
-	pt = page_address(page);
+	as->count[pd_index]++;
+}
+
+static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
+{
+	unsigned int pde = iova_pd_index(iova);
+	struct page *page = as->pts[pde];
 
 	/*
 	 * When no entries in this page table are used anymore, return the
 	 * memory page to the system.
 	 */
-	if (pt[pte] != 0) {
-		if (--count[pde] == 0) {
-			ClearPageReserved(page);
-			__free_page(page);
-			pd[pde] = 0;
-		}
+	if (--as->count[pde] == 0) {
+		struct tegra_smmu *smmu = as->smmu;
+		u32 *pd = page_address(as->pd);
+		dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
 
-		pt[pte] = 0;
+		tegra_smmu_set_pde(as, iova, 0);
+
+		dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
+		__free_page(page);
+		as->pts[pde] = NULL;
 	}
 }
 
+static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
+			       u32 *pte, dma_addr_t pte_dma, u32 val)
+{
+	struct tegra_smmu *smmu = as->smmu;
+	unsigned long offset = offset_in_page(pte);
+
+	*pte = val;
+
+	dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
+					 4, DMA_TO_DEVICE);
+	smmu_flush_ptc(smmu, pte_dma, offset);
+	smmu_flush_tlb_group(smmu, as->id, iova);
+	smmu_flush(smmu);
+}
+
 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
 			  phys_addr_t paddr, size_t size, int prot)
 {
 	struct tegra_smmu_as *as = to_smmu_as(domain);
-	struct tegra_smmu *smmu = as->smmu;
-	unsigned long offset;
-	struct page *page;
+	dma_addr_t pte_dma;
 	u32 *pte;
 
-	pte = as_get_pte(as, iova, &page);
+	pte = as_get_pte(as, iova, &pte_dma);
 	if (!pte)
 		return -ENOMEM;
 
-	*pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
-	offset = offset_in_page(pte);
+	/* If we aren't overwriting a pre-existing entry, increment use */
+	if (*pte == 0)
+		tegra_smmu_pte_get_use(as, iova);
 
-	smmu->soc->ops->flush_dcache(page, offset, 4);
-	smmu_flush_ptc(smmu, page, offset);
-	smmu_flush_tlb_group(smmu, as->id, iova);
-	smmu_flush(smmu);
+	tegra_smmu_set_pte(as, iova, pte, pte_dma,
+			   __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
 
 	return 0;
 }
@@ -563,22 +654,15 @@
 			       size_t size)
 {
 	struct tegra_smmu_as *as = to_smmu_as(domain);
-	struct tegra_smmu *smmu = as->smmu;
-	unsigned long offset;
-	struct page *page;
+	dma_addr_t pte_dma;
 	u32 *pte;
 
-	pte = as_get_pte(as, iova, &page);
-	if (!pte)
+	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+	if (!pte || !*pte)
 		return 0;
 
-	offset = offset_in_page(pte);
-	as_put_pte(as, iova);
-
-	smmu->soc->ops->flush_dcache(page, offset, 4);
-	smmu_flush_ptc(smmu, page, offset);
-	smmu_flush_tlb_group(smmu, as->id, iova);
-	smmu_flush(smmu);
+	tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
+	tegra_smmu_pte_put_use(as, iova);
 
 	return size;
 }
@@ -587,11 +671,14 @@
 					   dma_addr_t iova)
 {
 	struct tegra_smmu_as *as = to_smmu_as(domain);
-	struct page *page;
 	unsigned long pfn;
+	dma_addr_t pte_dma;
 	u32 *pte;
 
-	pte = as_get_pte(as, iova, &page);
+	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+	if (!pte || !*pte)
+		return 0;
+
 	pfn = *pte & as->smmu->pfn_mask;
 
 	return PFN_PHYS(pfn);
@@ -816,6 +903,9 @@
 	smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
 	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
 		mc->soc->num_address_bits, smmu->pfn_mask);
+	smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+	dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
+		smmu->tlb_mask);
 
 	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
 
@@ -825,14 +915,14 @@
 	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
 
 	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
-		SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
+		SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
 
 	if (soc->supports_round_robin_arbitration)
 		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
 
 	smmu_writel(smmu, value, SMMU_TLB_CONFIG);
 
-	smmu_flush_ptc(smmu, NULL, 0);
+	smmu_flush_ptc_all(smmu);
 	smmu_flush_tlb(smmu);
 	smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
 	smmu_flush(smmu);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index e406bc5..7deed6e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -31,6 +31,7 @@
 #include <asm/cputype.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
+#include <asm/virt.h>
 
 #include "irq-gic-common.h"
 
@@ -50,6 +51,7 @@
 };
 
 static struct gic_chip_data gic_data __read_mostly;
+static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
 
 #define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
 #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
@@ -68,6 +70,11 @@
 	return gic_irq(d) < 32;
 }
 
+static inline bool forwarded_irq(struct irq_data *d)
+{
+	return d->handler_data != NULL;
+}
+
 static inline void __iomem *gic_dist_base(struct irq_data *d)
 {
 	if (gic_irq_in_rdist(d))	/* SGI+PPI -> SGI_base for this CPU */
@@ -231,6 +238,21 @@
 	gic_poke_irq(d, GICD_ICENABLER);
 }
 
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+	gic_mask_irq(d);
+	/*
+	 * When masking a forwarded interrupt, make sure it is
+	 * deactivated as well.
+	 *
+	 * This ensures that an interrupt that is getting
+	 * disabled/masked will not get "stuck", because there is
+	 * noone to deactivate it (guest is being terminated).
+	 */
+	if (forwarded_irq(d))
+		gic_poke_irq(d, GICD_ICACTIVER);
+}
+
 static void gic_unmask_irq(struct irq_data *d)
 {
 	gic_poke_irq(d, GICD_ISENABLER);
@@ -296,6 +318,17 @@
 	gic_write_eoir(gic_irq(d));
 }
 
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+	/*
+	 * No need to deactivate an LPI, or an interrupt that
+	 * is is getting forwarded to a vcpu.
+	 */
+	if (gic_irq(d) >= 8192 || forwarded_irq(d))
+		return;
+	gic_write_dir(gic_irq(d));
+}
+
 static int gic_set_type(struct irq_data *d, unsigned int type)
 {
 	unsigned int irq = gic_irq(d);
@@ -322,6 +355,12 @@
 	return gic_configure_irq(irq, type, base, rwp_wait);
 }
 
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+	d->handler_data = vcpu;
+	return 0;
+}
+
 static u64 gic_mpidr_to_affinity(u64 mpidr)
 {
 	u64 aff;
@@ -343,15 +382,26 @@
 
 		if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
 			int err;
+
+			if (static_key_true(&supports_deactivate))
+				gic_write_eoir(irqnr);
+
 			err = handle_domain_irq(gic_data.domain, irqnr, regs);
 			if (err) {
 				WARN_ONCE(true, "Unexpected interrupt received!\n");
-				gic_write_eoir(irqnr);
+				if (static_key_true(&supports_deactivate)) {
+					if (irqnr < 8192)
+						gic_write_dir(irqnr);
+				} else {
+					gic_write_eoir(irqnr);
+				}
 			}
 			continue;
 		}
 		if (irqnr < 16) {
 			gic_write_eoir(irqnr);
+			if (static_key_true(&supports_deactivate))
+				gic_write_dir(irqnr);
 #ifdef CONFIG_SMP
 			handle_IPI(irqnr, regs);
 #else
@@ -451,8 +501,13 @@
 	/* Set priority mask register */
 	gic_write_pmr(DEFAULT_PMR_VALUE);
 
-	/* EOI deactivates interrupt too (mode 0) */
-	gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+	if (static_key_true(&supports_deactivate)) {
+		/* EOI drops priority only (mode 1) */
+		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
+	} else {
+		/* EOI deactivates interrupt too (mode 0) */
+		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+	}
 
 	/* ... and let's hit the road... */
 	gic_write_grpen1(1);
@@ -661,11 +716,29 @@
 	.flags			= IRQCHIP_SET_TYPE_MASKED,
 };
 
+static struct irq_chip gic_eoimode1_chip = {
+	.name			= "GICv3",
+	.irq_mask		= gic_eoimode1_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_eoi		= gic_eoimode1_eoi_irq,
+	.irq_set_type		= gic_set_type,
+	.irq_set_affinity	= gic_set_affinity,
+	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
+	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
+	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
+	.flags			= IRQCHIP_SET_TYPE_MASKED,
+};
+
 #define GIC_ID_NR		(1U << gic_data.rdists.id_bits)
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
 			      irq_hw_number_t hw)
 {
+	struct irq_chip *chip = &gic_chip;
+
+	if (static_key_true(&supports_deactivate))
+		chip = &gic_eoimode1_chip;
+
 	/* SGIs are private to the core kernel */
 	if (hw < 16)
 		return -EPERM;
@@ -679,13 +752,13 @@
 	/* PPIs */
 	if (hw < 32) {
 		irq_set_percpu_devid(irq);
-		irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_percpu_devid_irq, NULL, NULL);
 		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
 	}
 	/* SPIs */
 	if (hw >= 32 && hw < gic_data.irq_nr) {
-		irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_fasteoi_irq, NULL, NULL);
 		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
 	}
@@ -693,7 +766,7 @@
 	if (hw >= 8192 && hw < GIC_ID_NR) {
 		if (!gic_dist_supports_lpis())
 			return -EPERM;
-		irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_fasteoi_irq, NULL, NULL);
 		set_irq_flags(irq, IRQF_VALID);
 	}
@@ -820,6 +893,12 @@
 	if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
 		redist_stride = 0;
 
+	if (!is_hyp_mode_available())
+		static_key_slow_dec(&supports_deactivate);
+
+	if (static_key_true(&supports_deactivate))
+		pr_info("GIC: Using split EOI/Deactivate mode\n");
+
 	gic_data.dist_base = dist_base;
 	gic_data.redist_regions = rdist_regs;
 	gic_data.nr_redist_regions = nr_redist_regions;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index aa3e7b8..e6b7ed5 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -47,6 +47,7 @@
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
+#include <asm/virt.h>
 
 #include "irq-gic-common.h"
 
@@ -82,6 +83,8 @@
 #define NR_GIC_CPU_IF 8
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
 
+static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
+
 #ifndef MAX_GIC_NR
 #define MAX_GIC_NR	1
 #endif
@@ -137,6 +140,36 @@
 	return d->hwirq;
 }
 
+static inline bool cascading_gic_irq(struct irq_data *d)
+{
+	void *data = irq_data_get_irq_handler_data(d);
+
+	/*
+	 * If handler_data pointing to one of the secondary GICs, then
+	 * this is a cascading interrupt, and it cannot possibly be
+	 * forwarded.
+	 */
+	if (data >= (void *)(gic_data + 1) &&
+	    data <  (void *)(gic_data + MAX_GIC_NR))
+		return true;
+
+	return false;
+}
+
+static inline bool forwarded_irq(struct irq_data *d)
+{
+	/*
+	 * A forwarded interrupt:
+	 * - is on the primary GIC
+	 * - has its handler_data set to a value
+	 * - that isn't a secondary GIC
+	 */
+	if (d->handler_data && !cascading_gic_irq(d))
+		return true;
+
+	return false;
+}
+
 /*
  * Routines to acknowledge, disable and enable interrupts
  */
@@ -157,6 +190,21 @@
 	gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
 }
 
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+	gic_mask_irq(d);
+	/*
+	 * When masking a forwarded interrupt, make sure it is
+	 * deactivated as well.
+	 *
+	 * This ensures that an interrupt that is getting
+	 * disabled/masked will not get "stuck", because there is
+	 * noone to deactivate it (guest is being terminated).
+	 */
+	if (forwarded_irq(d))
+		gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
+}
+
 static void gic_unmask_irq(struct irq_data *d)
 {
 	gic_poke_irq(d, GIC_DIST_ENABLE_SET);
@@ -167,6 +215,15 @@
 	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 }
 
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+	/* Do not deactivate an IRQ forwarded to a vcpu. */
+	if (forwarded_irq(d))
+		return;
+
+	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
+}
+
 static int gic_irq_set_irqchip_state(struct irq_data *d,
 				     enum irqchip_irq_state which, bool val)
 {
@@ -233,6 +290,16 @@
 	return gic_configure_irq(gicirq, type, base, NULL);
 }
 
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+	/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
+	if (cascading_gic_irq(d))
+		return -EINVAL;
+
+	d->handler_data = vcpu;
+	return 0;
+}
+
 #ifdef CONFIG_SMP
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 			    bool force)
@@ -272,11 +339,15 @@
 		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
 
 		if (likely(irqnr > 15 && irqnr < 1021)) {
+			if (static_key_true(&supports_deactivate))
+				writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
 			handle_domain_irq(gic->domain, irqnr, regs);
 			continue;
 		}
 		if (irqnr < 16) {
 			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+			if (static_key_true(&supports_deactivate))
+				writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
 #ifdef CONFIG_SMP
 			handle_IPI(irqnr, regs);
 #endif
@@ -329,6 +400,23 @@
 				  IRQCHIP_MASK_ON_SUSPEND,
 };
 
+static struct irq_chip gic_eoimode1_chip = {
+	.name			= "GICv2",
+	.irq_mask		= gic_eoimode1_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_eoi		= gic_eoimode1_eoi_irq,
+	.irq_set_type		= gic_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= gic_set_affinity,
+#endif
+	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
+	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
+	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_MASK_ON_SUSPEND,
+};
+
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
 {
 	if (gic_nr >= MAX_GIC_NR)
@@ -360,6 +448,10 @@
 {
 	void __iomem *cpu_base = gic_data_cpu_base(gic);
 	u32 bypass = 0;
+	u32 mode = 0;
+
+	if (static_key_true(&supports_deactivate))
+		mode = GIC_CPU_CTRL_EOImodeNS;
 
 	/*
 	* Preserve bypass disable bits to be written back later
@@ -367,7 +459,7 @@
 	bypass = readl(cpu_base + GIC_CPU_CTRL);
 	bypass &= GICC_DIS_BYPASS_MASK;
 
-	writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
+	writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
 }
 
 
@@ -803,13 +895,20 @@
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
 				irq_hw_number_t hw)
 {
+	struct irq_chip *chip = &gic_chip;
+
+	if (static_key_true(&supports_deactivate)) {
+		if (d->host_data == (void *)&gic_data[0])
+			chip = &gic_eoimode1_chip;
+	}
+
 	if (hw < 32) {
 		irq_set_percpu_devid(irq);
-		irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_percpu_devid_irq, NULL, NULL);
 		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
 	} else {
-		irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_fasteoi_irq, NULL, NULL);
 		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
 	}
@@ -894,7 +993,7 @@
 	.xlate = gic_irq_domain_xlate,
 };
 
-void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
 			   void __iomem *dist_base, void __iomem *cpu_base,
 			   u32 percpu_offset, struct device_node *node)
 {
@@ -995,6 +1094,8 @@
 		register_cpu_notifier(&gic_cpu_notifier);
 #endif
 		set_handle_irq(gic_handle_irq);
+		if (static_key_true(&supports_deactivate))
+			pr_info("GIC: Using split EOI/Deactivate mode\n");
 	}
 
 	gic_dist_init(gic);
@@ -1002,6 +1103,19 @@
 	gic_pm_init(gic);
 }
 
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+			   void __iomem *dist_base, void __iomem *cpu_base,
+			   u32 percpu_offset, struct device_node *node)
+{
+	/*
+	 * Non-DT/ACPI systems won't run a hypervisor, so let's not
+	 * bother with these...
+	 */
+	static_key_slow_dec(&supports_deactivate);
+	__gic_init_bases(gic_nr, irq_start, dist_base, cpu_base,
+			 percpu_offset, node);
+}
+
 #ifdef CONFIG_OF
 static int gic_cnt __initdata;
 
@@ -1010,6 +1124,7 @@
 {
 	void __iomem *cpu_base;
 	void __iomem *dist_base;
+	struct resource cpu_res;
 	u32 percpu_offset;
 	int irq;
 
@@ -1022,10 +1137,20 @@
 	cpu_base = of_iomap(node, 1);
 	WARN(!cpu_base, "unable to map gic cpu registers\n");
 
+	of_address_to_resource(node, 1, &cpu_res);
+
+	/*
+	 * Disable split EOI/Deactivate if either HYP is not available
+	 * or the CPU interface is too small.
+	 */
+	if (gic_cnt == 0 && (!is_hyp_mode_available() ||
+			     resource_size(&cpu_res) < SZ_8K))
+		static_key_slow_dec(&supports_deactivate);
+
 	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
 		percpu_offset = 0;
 
-	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+	__gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
 	if (!gic_cnt)
 		gic_init_physaddr(node);
 
@@ -1141,11 +1266,19 @@
 	}
 
 	/*
+	 * Disable split EOI/Deactivate if HYP is not available. ACPI
+	 * guarantees that we'll always have a GICv2, so the CPU
+	 * interface will always be the right size.
+	 */
+	if (!is_hyp_mode_available())
+		static_key_slow_dec(&supports_deactivate);
+
+	/*
 	 * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
 	 * as default IRQ domain to allow for GSI registration and GSI to IRQ
 	 * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
 	 */
-	gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
+	__gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
 	irq_set_default_host(gic_data[0].domain);
 
 	acpi_irq_model = ACPI_IRQ_MODEL_GIC;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index dae5914..1764bcf 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -41,20 +41,46 @@
 
 static void __gic_irq_dispatch(void);
 
-static inline unsigned int gic_read(unsigned int reg)
+static inline u32 gic_read32(unsigned int reg)
 {
 	return __raw_readl(gic_base + reg);
 }
 
-static inline void gic_write(unsigned int reg, unsigned int val)
+static inline u64 gic_read64(unsigned int reg)
 {
-	__raw_writel(val, gic_base + reg);
+	return __raw_readq(gic_base + reg);
 }
 
-static inline void gic_update_bits(unsigned int reg, unsigned int mask,
-				   unsigned int val)
+static inline unsigned long gic_read(unsigned int reg)
 {
-	unsigned int regval;
+	if (!mips_cm_is64)
+		return gic_read32(reg);
+	else
+		return gic_read64(reg);
+}
+
+static inline void gic_write32(unsigned int reg, u32 val)
+{
+	return __raw_writel(val, gic_base + reg);
+}
+
+static inline void gic_write64(unsigned int reg, u64 val)
+{
+	return __raw_writeq(val, gic_base + reg);
+}
+
+static inline void gic_write(unsigned int reg, unsigned long val)
+{
+	if (!mips_cm_is64)
+		return gic_write32(reg, (u32)val);
+	else
+		return gic_write64(reg, (u64)val);
+}
+
+static inline void gic_update_bits(unsigned int reg, unsigned long mask,
+				   unsigned long val)
+{
+	unsigned long regval;
 
 	regval = gic_read(reg);
 	regval &= ~mask;
@@ -65,40 +91,40 @@
 static inline void gic_reset_mask(unsigned int intr)
 {
 	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
-		  1 << GIC_INTR_BIT(intr));
+		  1ul << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_mask(unsigned int intr)
 {
 	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
-		  1 << GIC_INTR_BIT(intr));
+		  1ul << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
-			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
-			pol << GIC_INTR_BIT(intr));
+			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+			(unsigned long)pol << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
-			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
-			trig << GIC_INTR_BIT(intr));
+			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+			(unsigned long)trig << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
-			1 << GIC_INTR_BIT(intr),
-			dual << GIC_INTR_BIT(intr));
+			1ul << GIC_INTR_BIT(intr),
+			(unsigned long)dual << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
 {
-	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
-		  GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
+	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
+		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
 }
 
 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
@@ -113,10 +139,13 @@
 {
 	unsigned int hi, hi2, lo;
 
+	if (mips_cm_is64)
+		return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
+
 	do {
-		hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
-		lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
-		hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
+		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
 	} while (hi2 != hi);
 
 	return (((cycle_t) hi) << 32) + lo;
@@ -135,10 +164,14 @@
 
 void gic_write_compare(cycle_t cnt)
 {
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
-				(int)(cnt >> 32));
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
-				(int)(cnt & 0xffffffff));
+	if (mips_cm_is64) {
+		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
+	} else {
+		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+					(int)(cnt >> 32));
+		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+					(int)(cnt & 0xffffffff));
+	}
 }
 
 void gic_write_cpu_compare(cycle_t cnt, int cpu)
@@ -148,10 +181,15 @@
 	local_irq_save(flags);
 
 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
-	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
-				(int)(cnt >> 32));
-	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
-				(int)(cnt & 0xffffffff));
+
+	if (mips_cm_is64) {
+		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
+	} else {
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+					(int)(cnt >> 32));
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+					(int)(cnt & 0xffffffff));
+	}
 
 	local_irq_restore(flags);
 }
@@ -160,8 +198,11 @@
 {
 	unsigned int hi, lo;
 
-	hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
-	lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
+	if (mips_cm_is64)
+		return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
+
+	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
+	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
 
 	return (((cycle_t) hi) << 32) + lo;
 }
@@ -196,7 +237,7 @@
 	if (cpu_has_veic)
 		return true;
 
-	vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
+	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
 	switch (intr) {
 	case GIC_LOCAL_INT_TIMER:
 		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
@@ -262,7 +303,7 @@
 
 static void gic_handle_shared_int(bool chained)
 {
-	unsigned int i, intr, virq;
+	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
 	unsigned long *pcpu_mask;
 	unsigned long pending_reg, intrmask_reg;
 	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
@@ -277,8 +318,8 @@
 	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
 		pending[i] = gic_read(pending_reg);
 		intrmask[i] = gic_read(intrmask_reg);
-		pending_reg += 0x4;
-		intrmask_reg += 0x4;
+		pending_reg += gic_reg_step;
+		intrmask_reg += gic_reg_step;
 	}
 
 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -425,8 +466,8 @@
 	unsigned long pending, masked;
 	unsigned int intr, virq;
 
-	pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
-	masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
+	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
+	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
 
 	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
 
@@ -449,14 +490,14 @@
 {
 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
+	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
 }
 
 static void gic_unmask_local_irq(struct irq_data *d)
 {
 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
+	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
 }
 
 static struct irq_chip gic_local_irq_controller = {
@@ -474,7 +515,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
-		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
 }
@@ -488,7 +529,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
-		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
 }
@@ -608,7 +649,7 @@
 		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
 			if (!gic_local_irq_is_routable(j))
 				continue;
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
 		}
 	}
 }
@@ -653,27 +694,32 @@
 
 		switch (intr) {
 		case GIC_LOCAL_INT_WD:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
 			break;
 		case GIC_LOCAL_INT_COMPARE:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_TIMER:
 			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
 			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_PERFCTR:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_SWINT0:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_SWINT1:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_FDC:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
 			break;
 		default:
 			pr_err("Invalid local IRQ %d\n", intr);
@@ -778,7 +824,7 @@
 		 */
 		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
 		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
-			timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
+			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
 							 GIC_VPE_TIMER_MAP)) &
 					GIC_MAP_MSK;
 			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
diff --git a/drivers/isdn/icn/icn.h b/drivers/isdn/icn/icn.h
index b713466..f8f2e76 100644
--- a/drivers/isdn/icn/icn.h
+++ b/drivers/isdn/icn/icn.h
@@ -38,7 +38,7 @@
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/major.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 23408bd..70f4255 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -565,6 +565,17 @@
 	  This option enables support for the BlinkM RGB LED connected
 	  through I2C. Say Y to enable support for the BlinkM LED.
 
+config LEDS_POWERNV
+	tristate "LED support for PowerNV Platform"
+	depends on LEDS_CLASS
+	depends on PPC_POWERNV
+	depends on OF
+	help
+	  This option enables support for the system LEDs present on
+	  PowerNV platforms. Say 'y' to enable this support in kernel.
+	  To compile this driver as a module, choose 'm' here: the module
+	  will be called leds-powernv.
+
 config LEDS_SYSCON
 	bool "LED support for LEDs on system controllers"
 	depends on LEDS_CLASS=y
@@ -583,14 +594,6 @@
 	  This option enabled support for the LEDs on the ARM Versatile
 	  and RealView boards. Say Y to enabled these.
 
-config LEDS_PM8941_WLED
-	tristate "LED support for the Qualcomm PM8941 WLED block"
-	depends on LEDS_CLASS
-	select REGMAP
-	help
-	  This option enables support for the 'White' LED block
-	  on Qualcomm PM8941 PMICs.
-
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 8d6a24a..b503f92 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -63,8 +63,8 @@
 obj-$(CONFIG_LEDS_SYSCON)		+= leds-syscon.o
 obj-$(CONFIG_LEDS_VERSATILE)		+= leds-versatile.o
 obj-$(CONFIG_LEDS_MENF21BMC)		+= leds-menf21bmc.o
-obj-$(CONFIG_LEDS_PM8941_WLED)		+= leds-pm8941-wled.o
 obj-$(CONFIG_LEDS_KTD2692)		+= leds-ktd2692.o
+obj-$(CONFIG_LEDS_POWERNV)		+= leds-powernv.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/leds-pm8941-wled.c b/drivers/leds/leds-pm8941-wled.c
deleted file mode 100644
index bf64a59..0000000
--- a/drivers/leds/leds-pm8941-wled.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/* Copyright (c) 2015, Sony Mobile Communications, AB.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-#define PM8941_WLED_REG_VAL_BASE		0x40
-#define  PM8941_WLED_REG_VAL_MAX		0xFFF
-
-#define PM8941_WLED_REG_MOD_EN			0x46
-#define  PM8941_WLED_REG_MOD_EN_BIT		BIT(7)
-#define  PM8941_WLED_REG_MOD_EN_MASK		BIT(7)
-
-#define PM8941_WLED_REG_SYNC			0x47
-#define  PM8941_WLED_REG_SYNC_MASK		0x07
-#define  PM8941_WLED_REG_SYNC_LED1		BIT(0)
-#define  PM8941_WLED_REG_SYNC_LED2		BIT(1)
-#define  PM8941_WLED_REG_SYNC_LED3		BIT(2)
-#define  PM8941_WLED_REG_SYNC_ALL		0x07
-#define  PM8941_WLED_REG_SYNC_CLEAR		0x00
-
-#define PM8941_WLED_REG_FREQ			0x4c
-#define  PM8941_WLED_REG_FREQ_MASK		0x0f
-
-#define PM8941_WLED_REG_OVP			0x4d
-#define  PM8941_WLED_REG_OVP_MASK		0x03
-
-#define PM8941_WLED_REG_BOOST			0x4e
-#define  PM8941_WLED_REG_BOOST_MASK		0x07
-
-#define PM8941_WLED_REG_SINK			0x4f
-#define  PM8941_WLED_REG_SINK_MASK		0xe0
-#define  PM8941_WLED_REG_SINK_SHFT		0x05
-
-/* Per-'string' registers below */
-#define PM8941_WLED_REG_STR_OFFSET		0x10
-
-#define PM8941_WLED_REG_STR_MOD_EN_BASE		0x60
-#define  PM8941_WLED_REG_STR_MOD_MASK		BIT(7)
-#define  PM8941_WLED_REG_STR_MOD_EN		BIT(7)
-
-#define PM8941_WLED_REG_STR_SCALE_BASE		0x62
-#define  PM8941_WLED_REG_STR_SCALE_MASK		0x1f
-
-#define PM8941_WLED_REG_STR_MOD_SRC_BASE	0x63
-#define  PM8941_WLED_REG_STR_MOD_SRC_MASK	0x01
-#define  PM8941_WLED_REG_STR_MOD_SRC_INT	0x00
-#define  PM8941_WLED_REG_STR_MOD_SRC_EXT	0x01
-
-#define PM8941_WLED_REG_STR_CABC_BASE		0x66
-#define  PM8941_WLED_REG_STR_CABC_MASK		BIT(7)
-#define  PM8941_WLED_REG_STR_CABC_EN		BIT(7)
-
-struct pm8941_wled_config {
-	u32 i_boost_limit;
-	u32 ovp;
-	u32 switch_freq;
-	u32 num_strings;
-	u32 i_limit;
-	bool cs_out_en;
-	bool ext_gen;
-	bool cabc_en;
-};
-
-struct pm8941_wled {
-	struct regmap *regmap;
-	u16 addr;
-
-	struct led_classdev cdev;
-
-	struct pm8941_wled_config cfg;
-};
-
-static int pm8941_wled_set(struct led_classdev *cdev,
-			   enum led_brightness value)
-{
-	struct pm8941_wled *wled;
-	u8 ctrl = 0;
-	u16 val;
-	int rc;
-	int i;
-
-	wled = container_of(cdev, struct pm8941_wled, cdev);
-
-	if (value != 0)
-		ctrl = PM8941_WLED_REG_MOD_EN_BIT;
-
-	val = value * PM8941_WLED_REG_VAL_MAX / LED_FULL;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_MOD_EN,
-			PM8941_WLED_REG_MOD_EN_MASK, ctrl);
-	if (rc)
-		return rc;
-
-	for (i = 0; i < wled->cfg.num_strings; ++i) {
-		u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
-
-		rc = regmap_bulk_write(wled->regmap,
-				wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
-				v, 2);
-		if (rc)
-			return rc;
-	}
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_SYNC,
-			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_SYNC,
-			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
-	return rc;
-}
-
-static void pm8941_wled_set_brightness(struct led_classdev *cdev,
-				       enum led_brightness value)
-{
-	if (pm8941_wled_set(cdev, value)) {
-		dev_err(cdev->dev, "Unable to set brightness\n");
-		return;
-	}
-	cdev->brightness = value;
-}
-
-static int pm8941_wled_setup(struct pm8941_wled *wled)
-{
-	int rc;
-	int i;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_OVP,
-			PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_BOOST,
-			PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_FREQ,
-			PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
-	if (rc)
-		return rc;
-
-	if (wled->cfg.cs_out_en) {
-		u8 all = (BIT(wled->cfg.num_strings) - 1)
-				<< PM8941_WLED_REG_SINK_SHFT;
-
-		rc = regmap_update_bits(wled->regmap,
-				wled->addr + PM8941_WLED_REG_SINK,
-				PM8941_WLED_REG_SINK_MASK, all);
-		if (rc)
-			return rc;
-	}
-
-	for (i = 0; i < wled->cfg.num_strings; ++i) {
-		u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
-				PM8941_WLED_REG_STR_MOD_MASK,
-				PM8941_WLED_REG_STR_MOD_EN);
-		if (rc)
-			return rc;
-
-		if (wled->cfg.ext_gen) {
-			rc = regmap_update_bits(wled->regmap,
-					addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
-					PM8941_WLED_REG_STR_MOD_SRC_MASK,
-					PM8941_WLED_REG_STR_MOD_SRC_EXT);
-			if (rc)
-				return rc;
-		}
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_SCALE_BASE,
-				PM8941_WLED_REG_STR_SCALE_MASK,
-				wled->cfg.i_limit);
-		if (rc)
-			return rc;
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_CABC_BASE,
-				PM8941_WLED_REG_STR_CABC_MASK,
-				wled->cfg.cabc_en ?
-					PM8941_WLED_REG_STR_CABC_EN : 0);
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
-static const struct pm8941_wled_config pm8941_wled_config_defaults = {
-	.i_boost_limit = 3,
-	.i_limit = 20,
-	.ovp = 2,
-	.switch_freq = 5,
-	.num_strings = 0,
-	.cs_out_en = false,
-	.ext_gen = false,
-	.cabc_en = false,
-};
-
-struct pm8941_wled_var_cfg {
-	const u32 *values;
-	u32 (*fn)(u32);
-	int size;
-};
-
-static const u32 pm8941_wled_i_boost_limit_values[] = {
-	105, 385, 525, 805, 980, 1260, 1400, 1680,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
-	.values = pm8941_wled_i_boost_limit_values,
-	.size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
-};
-
-static const u32 pm8941_wled_ovp_values[] = {
-	35, 32, 29, 27,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
-	.values = pm8941_wled_ovp_values,
-	.size = ARRAY_SIZE(pm8941_wled_ovp_values),
-};
-
-static u32 pm8941_wled_num_strings_values_fn(u32 idx)
-{
-	return idx + 1;
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
-	.fn = pm8941_wled_num_strings_values_fn,
-	.size = 3,
-};
-
-static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
-{
-	return 19200 / (2 * (1 + idx));
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
-	.fn = pm8941_wled_switch_freq_values_fn,
-	.size = 16,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
-	.size = 26,
-};
-
-static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
-{
-	if (idx >= cfg->size)
-		return UINT_MAX;
-	if (cfg->fn)
-		return cfg->fn(idx);
-	if (cfg->values)
-		return cfg->values[idx];
-	return idx;
-}
-
-static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
-{
-	struct pm8941_wled_config *cfg = &wled->cfg;
-	u32 val;
-	int rc;
-	u32 c;
-	int i;
-	int j;
-
-	const struct {
-		const char *name;
-		u32 *val_ptr;
-		const struct pm8941_wled_var_cfg *cfg;
-	} u32_opts[] = {
-		{
-			"qcom,current-boost-limit",
-			&cfg->i_boost_limit,
-			.cfg = &pm8941_wled_i_boost_limit_cfg,
-		},
-		{
-			"qcom,current-limit",
-			&cfg->i_limit,
-			.cfg = &pm8941_wled_i_limit_cfg,
-		},
-		{
-			"qcom,ovp",
-			&cfg->ovp,
-			.cfg = &pm8941_wled_ovp_cfg,
-		},
-		{
-			"qcom,switching-freq",
-			&cfg->switch_freq,
-			.cfg = &pm8941_wled_switch_freq_cfg,
-		},
-		{
-			"qcom,num-strings",
-			&cfg->num_strings,
-			.cfg = &pm8941_wled_num_strings_cfg,
-		},
-	};
-	const struct {
-		const char *name;
-		bool *val_ptr;
-	} bool_opts[] = {
-		{ "qcom,cs-out", &cfg->cs_out_en, },
-		{ "qcom,ext-gen", &cfg->ext_gen, },
-		{ "qcom,cabc", &cfg->cabc_en, },
-	};
-
-	rc = of_property_read_u32(dev->of_node, "reg", &val);
-	if (rc || val > 0xffff) {
-		dev_err(dev, "invalid IO resources\n");
-		return rc ? rc : -EINVAL;
-	}
-	wled->addr = val;
-
-	rc = of_property_read_string(dev->of_node, "label", &wled->cdev.name);
-	if (rc)
-		wled->cdev.name = dev->of_node->name;
-
-	wled->cdev.default_trigger = of_get_property(dev->of_node,
-			"linux,default-trigger", NULL);
-
-	*cfg = pm8941_wled_config_defaults;
-	for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
-		rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
-		if (rc == -EINVAL) {
-			continue;
-		} else if (rc) {
-			dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
-			return rc;
-		}
-
-		c = UINT_MAX;
-		for (j = 0; c != val; j++) {
-			c = pm8941_wled_values(u32_opts[i].cfg, j);
-			if (c == UINT_MAX) {
-				dev_err(dev, "invalid value for '%s'\n",
-					u32_opts[i].name);
-				return -EINVAL;
-			}
-		}
-
-		dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
-		*u32_opts[i].val_ptr = j;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
-		if (of_property_read_bool(dev->of_node, bool_opts[i].name))
-			*bool_opts[i].val_ptr = true;
-	}
-
-	cfg->num_strings = cfg->num_strings + 1;
-
-	return 0;
-}
-
-static int pm8941_wled_probe(struct platform_device *pdev)
-{
-	struct pm8941_wled *wled;
-	struct regmap *regmap;
-	int rc;
-
-	regmap = dev_get_regmap(pdev->dev.parent, NULL);
-	if (!regmap) {
-		dev_err(&pdev->dev, "Unable to get regmap\n");
-		return -EINVAL;
-	}
-
-	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
-	if (!wled)
-		return -ENOMEM;
-
-	wled->regmap = regmap;
-
-	rc = pm8941_wled_configure(wled, &pdev->dev);
-	if (rc)
-		return rc;
-
-	rc = pm8941_wled_setup(wled);
-	if (rc)
-		return rc;
-
-	wled->cdev.brightness_set = pm8941_wled_set_brightness;
-
-	rc = devm_led_classdev_register(&pdev->dev, &wled->cdev);
-	if (rc)
-		return rc;
-
-	platform_set_drvdata(pdev, wled);
-
-	return 0;
-};
-
-static const struct of_device_id pm8941_wled_match_table[] = {
-	{ .compatible = "qcom,pm8941-wled" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
-
-static struct platform_driver pm8941_wled_driver = {
-	.probe = pm8941_wled_probe,
-	.driver	= {
-		.name = "pm8941-wled",
-		.of_match_table	= pm8941_wled_match_table,
-	},
-};
-
-module_platform_driver(pm8941_wled_driver);
-
-MODULE_DESCRIPTION("pm8941 wled driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:pm8941-wled");
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
new file mode 100644
index 0000000..2c5c5b1
--- /dev/null
+++ b/drivers/leds/leds-powernv.c
@@ -0,0 +1,345 @@
+/*
+ * PowerNV LED Driver
+ *
+ * Copyright IBM Corp. 2015
+ *
+ * Author: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
+ * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/opal.h>
+
+/* Map LED type to description. */
+struct led_type_map {
+	const int	type;
+	const char	*desc;
+};
+static const struct led_type_map led_type_map[] = {
+	{OPAL_SLOT_LED_TYPE_ID,		"identify"},
+	{OPAL_SLOT_LED_TYPE_FAULT,	"fault"},
+	{OPAL_SLOT_LED_TYPE_ATTN,	"attention"},
+	{-1,				NULL},
+};
+
+struct powernv_led_common {
+	/*
+	 * By default unload path resets all the LEDs. But on PowerNV
+	 * platform we want to retain LED state across reboot as these
+	 * are controlled by firmware. Also service processor can modify
+	 * the LEDs independent of OS. Hence avoid resetting LEDs in
+	 * unload path.
+	 */
+	bool		led_disabled;
+
+	/* Max supported LED type */
+	__be64		max_led_type;
+
+	/* glabal lock */
+	struct mutex	lock;
+};
+
+/* PowerNV LED data */
+struct powernv_led_data {
+	struct led_classdev	cdev;
+	char			*loc_code;	/* LED location code */
+	int			led_type;	/* OPAL_SLOT_LED_TYPE_* */
+
+	struct powernv_led_common *common;
+};
+
+
+/* Returns OPAL_SLOT_LED_TYPE_* for given led type string */
+static int powernv_get_led_type(const char *led_type_desc)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(led_type_map); i++)
+		if (!strcmp(led_type_map[i].desc, led_type_desc))
+			return led_type_map[i].type;
+
+	return -1;
+}
+
+/*
+ * This commits the state change of the requested LED through an OPAL call.
+ * This function is called from work queue task context when ever it gets
+ * scheduled. This function can sleep at opal_async_wait_response call.
+ */
+static void powernv_led_set(struct powernv_led_data *powernv_led,
+			    enum led_brightness value)
+{
+	int rc, token;
+	u64 led_mask, led_value = 0;
+	__be64 max_type;
+	struct opal_msg msg;
+	struct device *dev = powernv_led->cdev.dev;
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Prepare for the OPAL call */
+	max_type = powernv_led_common->max_led_type;
+	led_mask = OPAL_SLOT_LED_STATE_ON << powernv_led->led_type;
+	if (value)
+		led_value = led_mask;
+
+	/* OPAL async call */
+	token = opal_async_get_token_interruptible();
+	if (token < 0) {
+		if (token != -ERESTARTSYS)
+			dev_err(dev, "%s: Couldn't get OPAL async token\n",
+				__func__);
+		return;
+	}
+
+	rc = opal_leds_set_ind(token, powernv_led->loc_code,
+			       led_mask, led_value, &max_type);
+	if (rc != OPAL_ASYNC_COMPLETION) {
+		dev_err(dev, "%s: OPAL set LED call failed for %s [rc=%d]\n",
+			__func__, powernv_led->loc_code, rc);
+		goto out_token;
+	}
+
+	rc = opal_async_wait_response(token, &msg);
+	if (rc) {
+		dev_err(dev,
+			"%s: Failed to wait for the async response [rc=%d]\n",
+			__func__, rc);
+		goto out_token;
+	}
+
+	rc = be64_to_cpu(msg.params[1]);
+	if (rc != OPAL_SUCCESS)
+		dev_err(dev, "%s : OAPL async call returned failed [rc=%d]\n",
+			__func__, rc);
+
+out_token:
+	opal_async_release_token(token);
+}
+
+/*
+ * This function fetches the LED state for a given LED type for
+ * mentioned LED classdev structure.
+ */
+static enum led_brightness powernv_led_get(struct powernv_led_data *powernv_led)
+{
+	int rc;
+	__be64 mask, value, max_type;
+	u64 led_mask, led_value;
+	struct device *dev = powernv_led->cdev.dev;
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Fetch all LED status */
+	mask = cpu_to_be64(0);
+	value = cpu_to_be64(0);
+	max_type = powernv_led_common->max_led_type;
+
+	rc = opal_leds_get_ind(powernv_led->loc_code,
+			       &mask, &value, &max_type);
+	if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
+		dev_err(dev, "%s: OPAL get led call failed [rc=%d]\n",
+			__func__, rc);
+		return LED_OFF;
+	}
+
+	led_mask = be64_to_cpu(mask);
+	led_value = be64_to_cpu(value);
+
+	/* LED status available */
+	if (!((led_mask >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)) {
+		dev_err(dev, "%s: LED status not available for %s\n",
+			__func__, powernv_led->cdev.name);
+		return LED_OFF;
+	}
+
+	/* LED status value */
+	if ((led_value >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)
+		return LED_FULL;
+
+	return LED_OFF;
+}
+
+/*
+ * LED classdev 'brightness_get' function. This schedules work
+ * to update LED state.
+ */
+static void powernv_brightness_set(struct led_classdev *led_cdev,
+				   enum led_brightness value)
+{
+	struct powernv_led_data *powernv_led =
+		container_of(led_cdev, struct powernv_led_data, cdev);
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Do not modify LED in unload path */
+	if (powernv_led_common->led_disabled)
+		return;
+
+	mutex_lock(&powernv_led_common->lock);
+	powernv_led_set(powernv_led, value);
+	mutex_unlock(&powernv_led_common->lock);
+}
+
+/* LED classdev 'brightness_get' function */
+static enum led_brightness powernv_brightness_get(struct led_classdev *led_cdev)
+{
+	struct powernv_led_data *powernv_led =
+		container_of(led_cdev, struct powernv_led_data, cdev);
+
+	return powernv_led_get(powernv_led);
+}
+
+/*
+ * This function registers classdev structure for any given type of LED on
+ * a given child LED device node.
+ */
+static int powernv_led_create(struct device *dev,
+			      struct powernv_led_data *powernv_led,
+			      const char *led_type_desc)
+{
+	int rc;
+
+	/* Make sure LED type is supported */
+	powernv_led->led_type = powernv_get_led_type(led_type_desc);
+	if (powernv_led->led_type == -1) {
+		dev_warn(dev, "%s: No support for led type : %s\n",
+			 __func__, led_type_desc);
+		return -EINVAL;
+	}
+
+	/* Create the name for classdev */
+	powernv_led->cdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+						powernv_led->loc_code,
+						led_type_desc);
+	if (!powernv_led->cdev.name) {
+		dev_err(dev,
+			"%s: Memory allocation failed for classdev name\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	powernv_led->cdev.brightness_set = powernv_brightness_set;
+	powernv_led->cdev.brightness_get = powernv_brightness_get;
+	powernv_led->cdev.brightness = LED_OFF;
+	powernv_led->cdev.max_brightness = LED_FULL;
+
+	/* Register the classdev */
+	rc = devm_led_classdev_register(dev, &powernv_led->cdev);
+	if (rc) {
+		dev_err(dev, "%s: Classdev registration failed for %s\n",
+			__func__, powernv_led->cdev.name);
+	}
+
+	return rc;
+}
+
+/* Go through LED device tree node and register LED classdev structure */
+static int powernv_led_classdev(struct platform_device *pdev,
+				struct device_node *led_node,
+				struct powernv_led_common *powernv_led_common)
+{
+	const char *cur = NULL;
+	int rc = -1;
+	struct property *p;
+	struct device_node *np;
+	struct powernv_led_data *powernv_led;
+	struct device *dev = &pdev->dev;
+
+	for_each_child_of_node(led_node, np) {
+		p = of_find_property(np, "led-types", NULL);
+		if (!p)
+			continue;
+
+		while ((cur = of_prop_next_string(p, cur)) != NULL) {
+			powernv_led = devm_kzalloc(dev, sizeof(*powernv_led),
+						   GFP_KERNEL);
+			if (!powernv_led)
+				return -ENOMEM;
+
+			powernv_led->common = powernv_led_common;
+			powernv_led->loc_code = (char *)np->name;
+
+			rc = powernv_led_create(dev, powernv_led, cur);
+			if (rc)
+				return rc;
+		} /* while end */
+	}
+
+	return rc;
+}
+
+/* Platform driver probe */
+static int powernv_led_probe(struct platform_device *pdev)
+{
+	struct device_node *led_node;
+	struct powernv_led_common *powernv_led_common;
+	struct device *dev = &pdev->dev;
+
+	led_node = of_find_node_by_path("/ibm,opal/leds");
+	if (!led_node) {
+		dev_err(dev, "%s: LED parent device node not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	powernv_led_common = devm_kzalloc(dev, sizeof(*powernv_led_common),
+					  GFP_KERNEL);
+	if (!powernv_led_common)
+		return -ENOMEM;
+
+	mutex_init(&powernv_led_common->lock);
+	powernv_led_common->max_led_type = cpu_to_be64(OPAL_SLOT_LED_TYPE_MAX);
+
+	platform_set_drvdata(pdev, powernv_led_common);
+
+	return powernv_led_classdev(pdev, led_node, powernv_led_common);
+}
+
+/* Platform driver remove */
+static int powernv_led_remove(struct platform_device *pdev)
+{
+	struct powernv_led_common *powernv_led_common;
+
+	/* Disable LED operation */
+	powernv_led_common = platform_get_drvdata(pdev);
+	powernv_led_common->led_disabled = true;
+
+	/* Destroy lock */
+	mutex_destroy(&powernv_led_common->lock);
+
+	dev_info(&pdev->dev, "PowerNV led module unregistered\n");
+	return 0;
+}
+
+/* Platform driver property match */
+static const struct of_device_id powernv_led_match[] = {
+	{
+		.compatible	= "ibm,opal-v3-led",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, powernv_led_match);
+
+static struct platform_driver powernv_led_driver = {
+	.probe	= powernv_led_probe,
+	.remove = powernv_led_remove,
+	.driver = {
+		.name = "powernv-led-driver",
+		.of_match_table = powernv_led_match,
+	},
+};
+
+module_platform_driver(powernv_led_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV LED driver");
+MODULE_AUTHOR("Vasant Hegde <hegdevasant@linux.vnet.ibm.com>");
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 109dcaa..68dcbcb 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -408,6 +408,7 @@
 	{ "therm_adm1030", adm1030 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
 
 static int
 do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
@@ -459,6 +460,7 @@
 	.compatible	= "adm1030"
     }, {}
 };
+MODULE_DEVICE_TABLE(of, therm_of_match);
 
 static struct platform_driver therm_of_driver = {
 	.driver = {
diff --git a/drivers/macintosh/windfarm.h b/drivers/macintosh/windfarm.h
index 028cdac..901c42f 100644
--- a/drivers/macintosh/windfarm.h
+++ b/drivers/macintosh/windfarm.h
@@ -53,11 +53,9 @@
  * the kref and wf_unregister_control will decrement it, thus the
  * object creating/disposing a given control shouldn't assume it
  * still exists after wf_unregister_control has been called.
- * wf_find_control will inc the refcount for you
  */
 extern int wf_register_control(struct wf_control *ct);
 extern void wf_unregister_control(struct wf_control *ct);
-extern struct wf_control * wf_find_control(const char *name);
 extern int wf_get_control(struct wf_control *ct);
 extern void wf_put_control(struct wf_control *ct);
 
@@ -117,7 +115,6 @@
 /* Same lifetime rules as controls */
 extern int wf_register_sensor(struct wf_sensor *sr);
 extern void wf_unregister_sensor(struct wf_sensor *sr);
-extern struct wf_sensor * wf_find_sensor(const char *name);
 extern int wf_get_sensor(struct wf_sensor *sr);
 extern void wf_put_sensor(struct wf_sensor *sr);
 
@@ -144,7 +141,6 @@
 /* Overtemp conditions. Those are refcounted */
 extern void wf_set_overtemp(void);
 extern void wf_clear_overtemp(void);
-extern int wf_is_overtemp(void);
 
 #define WF_EVENT_NEW_CONTROL	0 /* param is wf_control * */
 #define WF_EVENT_NEW_SENSOR	1 /* param is wf_sensor * */
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 3ee198b6..465d770 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -72,7 +72,7 @@
 	blocking_notifier_call_chain(&wf_client_list, event, param);
 }
 
-int wf_critical_overtemp(void)
+static int wf_critical_overtemp(void)
 {
 	static char * critical_overtemp_path = "/sbin/critical_overtemp";
 	char *argv[] = { critical_overtemp_path, NULL };
@@ -84,7 +84,6 @@
 	return call_usermodehelper(critical_overtemp_path,
 				   argv, envp, UMH_WAIT_EXEC);
 }
-EXPORT_SYMBOL_GPL(wf_critical_overtemp);
 
 static int wf_thread_func(void *data)
 {
@@ -255,24 +254,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_unregister_control);
 
-struct wf_control * wf_find_control(const char *name)
-{
-	struct wf_control *ct;
-
-	mutex_lock(&wf_lock);
-	list_for_each_entry(ct, &wf_controls, link) {
-		if (!strcmp(ct->name, name)) {
-			if (wf_get_control(ct))
-				ct = NULL;
-			mutex_unlock(&wf_lock);
-			return ct;
-		}
-	}
-	mutex_unlock(&wf_lock);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_control);
-
 int wf_get_control(struct wf_control *ct)
 {
 	if (!try_module_get(ct->ops->owner))
@@ -368,24 +349,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_unregister_sensor);
 
-struct wf_sensor * wf_find_sensor(const char *name)
-{
-	struct wf_sensor *sr;
-
-	mutex_lock(&wf_lock);
-	list_for_each_entry(sr, &wf_sensors, link) {
-		if (!strcmp(sr->name, name)) {
-			if (wf_get_sensor(sr))
-				sr = NULL;
-			mutex_unlock(&wf_lock);
-			return sr;
-		}
-	}
-	mutex_unlock(&wf_lock);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_sensor);
-
 int wf_get_sensor(struct wf_sensor *sr)
 {
 	if (!try_module_get(sr->ops->owner))
@@ -435,7 +398,7 @@
 {
 	mutex_lock(&wf_lock);
 	blocking_notifier_chain_unregister(&wf_client_list, nb);
-	wf_client_count++;
+	wf_client_count--;
 	if (wf_client_count == 0)
 		wf_stop_thread();
 	mutex_unlock(&wf_lock);
@@ -474,12 +437,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_clear_overtemp);
 
-int wf_is_overtemp(void)
-{
-	return (wf_overtemp != 0);
-}
-EXPORT_SYMBOL_GPL(wf_is_overtemp);
-
 static int __init windfarm_core_init(void)
 {
 	DBG("wf: core loaded\n");
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index d0d0b03..99befa7 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -148,7 +148,7 @@
 	mhu->mbox.ops = &mhu_ops;
 	mhu->mbox.txdone_irq = false;
 	mhu->mbox.txdone_poll = true;
-	mhu->mbox.txpoll_period = 10;
+	mhu->mbox.txpoll_period = 1;
 
 	amba_set_drvdata(adev, mhu);
 
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index 0b47dd4..cfb4b44 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -204,7 +204,6 @@
 static struct platform_driver bcm2835_mbox_driver = {
 	.driver = {
 		.name = "bcm2835-mbox",
-		.owner = THIS_MODULE,
 		.of_match_table = bcm2835_mbox_of_match,
 	},
 	.probe		= bcm2835_mbox_probe,
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index c7fdb57..6a4811f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -26,8 +26,6 @@
 static LIST_HEAD(mbox_cons);
 static DEFINE_MUTEX(con_mutex);
 
-static void poll_txdone(unsigned long data);
-
 static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
 {
 	int idx;
@@ -88,7 +86,9 @@
 	spin_unlock_irqrestore(&chan->lock, flags);
 
 	if (!err && (chan->txdone_method & TXDONE_BY_POLL))
-		poll_txdone((unsigned long)chan->mbox);
+		/* kick start the timer immediately to avoid delays */
+		hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
+			      HRTIMER_MODE_REL);
 }
 
 static void tx_tick(struct mbox_chan *chan, int r)
@@ -112,9 +112,10 @@
 		complete(&chan->tx_complete);
 }
 
-static void poll_txdone(unsigned long data)
+static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
 {
-	struct mbox_controller *mbox = (struct mbox_controller *)data;
+	struct mbox_controller *mbox =
+		container_of(hrtimer, struct mbox_controller, poll_hrt);
 	bool txdone, resched = false;
 	int i;
 
@@ -130,9 +131,11 @@
 		}
 	}
 
-	if (resched)
-		mod_timer(&mbox->poll, jiffies +
-				msecs_to_jiffies(mbox->txpoll_period));
+	if (resched) {
+		hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+		return HRTIMER_RESTART;
+	}
+	return HRTIMER_NORESTART;
 }
 
 /**
@@ -451,9 +454,9 @@
 		txdone = TXDONE_BY_ACK;
 
 	if (txdone == TXDONE_BY_POLL) {
-		mbox->poll.function = &poll_txdone;
-		mbox->poll.data = (unsigned long)mbox;
-		init_timer(&mbox->poll);
+		hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL);
+		mbox->poll_hrt.function = txdone_hrtimer;
 	}
 
 	for (i = 0; i < mbox->num_chans; i++) {
@@ -495,7 +498,7 @@
 		mbox_free_channel(&mbox->chans[i]);
 
 	if (mbox->txdone_poll)
-		del_timer_sync(&mbox->poll);
+		hrtimer_cancel(&mbox->poll_hrt);
 
 	mutex_unlock(&con_mutex);
 }
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0072190..11e3bc9 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -45,6 +45,7 @@
 /* md_cluster_info flags */
 #define		MD_CLUSTER_WAITING_FOR_NEWDISK		1
 #define		MD_CLUSTER_SUSPEND_READ_BALANCING	2
+#define		MD_CLUSTER_BEGIN_JOIN_CLUSTER		3
 
 
 struct md_cluster_info {
@@ -52,7 +53,6 @@
 	dlm_lockspace_t *lockspace;
 	int slot_number;
 	struct completion completion;
-	struct dlm_lock_resource *sb_lock;
 	struct mutex sb_mutex;
 	struct dlm_lock_resource *bitmap_lockres;
 	struct list_head suspend_list;
@@ -75,6 +75,7 @@
 	NEWDISK,
 	REMOVE,
 	RE_ADD,
+	BITMAP_NEEDS_SYNC,
 };
 
 struct cluster_msg {
@@ -99,7 +100,6 @@
 {
 	int ret = 0;
 
-	init_completion(&res->completion);
 	ret = dlm_lock(res->ls, mode, &res->lksb,
 			res->flags, res->name, strlen(res->name),
 			0, sync_ast, res, res->bast);
@@ -124,6 +124,7 @@
 	res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
 	if (!res)
 		return NULL;
+	init_completion(&res->completion);
 	res->ls = cinfo->lockspace;
 	res->mddev = mddev;
 	namelen = strlen(name);
@@ -165,11 +166,24 @@
 
 static void lockres_free(struct dlm_lock_resource *res)
 {
+	int ret;
+
 	if (!res)
 		return;
 
-	init_completion(&res->completion);
-	dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+	/* cancel a lock request or a conversion request that is blocked */
+	res->flags |= DLM_LKF_CANCEL;
+retry:
+	ret = dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+	if (unlikely(ret != 0)) {
+		pr_info("%s: failed to unlock %s return %d\n", __func__, res->name, ret);
+
+		/* if a lock conversion is cancelled, then the lock is put
+		 * back to grant queue, need to ensure it is unlocked */
+		if (ret == -DLM_ECANCEL)
+			goto retry;
+	}
+	res->flags &= ~DLM_LKF_CANCEL;
 	wait_for_completion(&res->completion);
 
 	kfree(res->name);
@@ -177,18 +191,6 @@
 	kfree(res);
 }
 
-static char *pretty_uuid(char *dest, char *src)
-{
-	int i, len = 0;
-
-	for (i = 0; i < 16; i++) {
-		if (i == 4 || i == 6 || i == 8 || i == 10)
-			len += sprintf(dest + len, "-");
-		len += sprintf(dest + len, "%02x", (__u8)src[i]);
-	}
-	return dest;
-}
-
 static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres,
 		sector_t lo, sector_t hi)
 {
@@ -281,16 +283,11 @@
 	set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
 }
 
-static void recover_slot(void *arg, struct dlm_slot *slot)
+static void __recover_slot(struct mddev *mddev, int slot)
 {
-	struct mddev *mddev = arg;
 	struct md_cluster_info *cinfo = mddev->cluster_info;
 
-	pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
-			mddev->bitmap_info.cluster_name,
-			slot->nodeid, slot->slot,
-			cinfo->slot_number);
-	set_bit(slot->slot - 1, &cinfo->recovery_map);
+	set_bit(slot, &cinfo->recovery_map);
 	if (!cinfo->recovery_thread) {
 		cinfo->recovery_thread = md_register_thread(recover_bitmaps,
 				mddev, "recover");
@@ -302,6 +299,20 @@
 	md_wakeup_thread(cinfo->recovery_thread);
 }
 
+static void recover_slot(void *arg, struct dlm_slot *slot)
+{
+	struct mddev *mddev = arg;
+	struct md_cluster_info *cinfo = mddev->cluster_info;
+
+	pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
+			mddev->bitmap_info.cluster_name,
+			slot->nodeid, slot->slot,
+			cinfo->slot_number);
+	/* deduct one since dlm slot starts from one while the num of
+	 * cluster-md begins with 0 */
+	__recover_slot(mddev, slot->slot - 1);
+}
+
 static void recover_done(void *arg, struct dlm_slot *slots,
 		int num_slots, int our_slot,
 		uint32_t generation)
@@ -310,10 +321,17 @@
 	struct md_cluster_info *cinfo = mddev->cluster_info;
 
 	cinfo->slot_number = our_slot;
-	complete(&cinfo->completion);
+	/* completion is only need to be complete when node join cluster,
+	 * it doesn't need to run during another node's failure */
+	if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) {
+		complete(&cinfo->completion);
+		clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
+	}
 	clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
 }
 
+/* the ops is called when node join the cluster, and do lock recovery
+ * if node failure occurs */
 static const struct dlm_lockspace_ops md_ls_ops = {
 	.recover_prep = recover_prep,
 	.recover_slot = recover_slot,
@@ -388,7 +406,7 @@
 	int len;
 
 	len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
-	pretty_uuid(disk_uuid + len, cmsg->uuid);
+	sprintf(disk_uuid + len, "%pU", cmsg->uuid);
 	snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot);
 	pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
 	init_completion(&cinfo->newdisk_completion);
@@ -457,6 +475,11 @@
 			__func__, __LINE__, msg->slot);
 		process_readd_disk(mddev, msg);
 		break;
+	case BITMAP_NEEDS_SYNC:
+		pr_info("%s: %d Received BITMAP_NEEDS_SYNC from %d\n",
+			__func__, __LINE__, msg->slot);
+		__recover_slot(mddev, msg->slot);
+		break;
 	default:
 		pr_warn("%s:%d Received unknown message from %d\n",
 			__func__, __LINE__, msg->slot);
@@ -472,6 +495,7 @@
 	struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
 	struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
 	struct cluster_msg msg;
+	int ret;
 
 	/*get CR on Message*/
 	if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
@@ -484,13 +508,21 @@
 	process_recvd_msg(thread->mddev, &msg);
 
 	/*release CR on ack_lockres*/
-	dlm_unlock_sync(ack_lockres);
-	/*up-convert to EX on message_lockres*/
-	dlm_lock_sync(message_lockres, DLM_LOCK_EX);
+	ret = dlm_unlock_sync(ack_lockres);
+	if (unlikely(ret != 0))
+		pr_info("unlock ack failed return %d\n", ret);
+	/*up-convert to PR on message_lockres*/
+	ret = dlm_lock_sync(message_lockres, DLM_LOCK_PR);
+	if (unlikely(ret != 0))
+		pr_info("lock PR on msg failed return %d\n", ret);
 	/*get CR on ack_lockres again*/
-	dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+	ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+	if (unlikely(ret != 0))
+		pr_info("lock CR on ack failed return %d\n", ret);
 	/*release CR on message_lockres*/
-	dlm_unlock_sync(message_lockres);
+	ret = dlm_unlock_sync(message_lockres);
+	if (unlikely(ret != 0))
+		pr_info("unlock msg failed return %d\n", ret);
 }
 
 /* lock_comm()
@@ -519,7 +551,7 @@
  * The function:
  * 1. Grabs the message lockresource in EX mode
  * 2. Copies the message to the message LVB
- * 3. Downconverts message lockresource to CR
+ * 3. Downconverts message lockresource to CW
  * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
  *    and the other nodes read the message. The thread will wait here until all other
  *    nodes have released ack lock resource.
@@ -540,12 +572,12 @@
 
 	memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
 			sizeof(struct cluster_msg));
-	/*down-convert EX to CR on Message*/
-	error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CR);
+	/*down-convert EX to CW on Message*/
+	error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW);
 	if (error) {
-		pr_err("md-cluster: failed to convert EX to CR on MESSAGE(%d)\n",
+		pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n",
 				error);
-		goto failed_message;
+		goto failed_ack;
 	}
 
 	/*up-convert CR to EX on Ack*/
@@ -565,7 +597,13 @@
 	}
 
 failed_ack:
-	dlm_unlock_sync(cinfo->message_lockres);
+	error = dlm_unlock_sync(cinfo->message_lockres);
+	if (unlikely(error != 0)) {
+		pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n",
+			error);
+		/* in case the message can't be released due to some reason */
+		goto failed_ack;
+	}
 failed_message:
 	return error;
 }
@@ -587,6 +625,7 @@
 	struct dlm_lock_resource *bm_lockres;
 	struct suspend_info *s;
 	char str[64];
+	sector_t lo, hi;
 
 
 	for (i = 0; i < total_slots; i++) {
@@ -617,9 +656,24 @@
 			lockres_free(bm_lockres);
 			continue;
 		}
-		if (ret)
+		if (ret) {
+			lockres_free(bm_lockres);
 			goto out;
-		/* TODO: Read the disk bitmap sb and check if it needs recovery */
+		}
+
+		/* Read the disk bitmap sb and check if it needs recovery */
+		ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
+		if (ret) {
+			pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
+			lockres_free(bm_lockres);
+			continue;
+		}
+		if ((hi > 0) && (lo < mddev->recovery_cp)) {
+			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+			mddev->recovery_cp = lo;
+			md_check_recovery(mddev);
+		}
+
 		dlm_unlock_sync(bm_lockres);
 		lockres_free(bm_lockres);
 	}
@@ -633,20 +687,20 @@
 	int ret, ops_rv;
 	char str[64];
 
-	if (!try_module_get(THIS_MODULE))
-		return -ENOENT;
-
 	cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
 	if (!cinfo)
 		return -ENOMEM;
 
+	INIT_LIST_HEAD(&cinfo->suspend_list);
+	spin_lock_init(&cinfo->suspend_lock);
 	init_completion(&cinfo->completion);
+	set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
 
 	mutex_init(&cinfo->sb_mutex);
 	mddev->cluster_info = cinfo;
 
 	memset(str, 0, 64);
-	pretty_uuid(str, mddev->uuid);
+	sprintf(str, "%pU", mddev->uuid);
 	ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
 				DLM_LSFL_FS, LVB_SIZE,
 				&md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
@@ -659,12 +713,6 @@
 		ret = -ERANGE;
 		goto err;
 	}
-	cinfo->sb_lock = lockres_init(mddev, "cmd-super",
-					NULL, 0);
-	if (!cinfo->sb_lock) {
-		ret = -ENOMEM;
-		goto err;
-	}
 	/* Initiate the communication resources */
 	ret = -ENOMEM;
 	cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
@@ -705,9 +753,6 @@
 		goto err;
 	}
 
-	INIT_LIST_HEAD(&cinfo->suspend_list);
-	spin_lock_init(&cinfo->suspend_lock);
-
 	ret = gather_all_resync_info(mddev, nodes);
 	if (ret)
 		goto err;
@@ -719,12 +764,10 @@
 	lockres_free(cinfo->ack_lockres);
 	lockres_free(cinfo->no_new_dev_lockres);
 	lockres_free(cinfo->bitmap_lockres);
-	lockres_free(cinfo->sb_lock);
 	if (cinfo->lockspace)
 		dlm_release_lockspace(cinfo->lockspace, 2);
 	mddev->cluster_info = NULL;
 	kfree(cinfo);
-	module_put(THIS_MODULE);
 	return ret;
 }
 
@@ -740,7 +783,6 @@
 	lockres_free(cinfo->token_lockres);
 	lockres_free(cinfo->ack_lockres);
 	lockres_free(cinfo->no_new_dev_lockres);
-	lockres_free(cinfo->sb_lock);
 	lockres_free(cinfo->bitmap_lockres);
 	dlm_release_lockspace(cinfo->lockspace, 2);
 	return 0;
@@ -817,8 +859,17 @@
 
 static void resync_finish(struct mddev *mddev)
 {
+	struct md_cluster_info *cinfo = mddev->cluster_info;
+	struct cluster_msg cmsg;
+	int slot = cinfo->slot_number - 1;
+
 	pr_info("%s:%d\n", __func__, __LINE__);
 	resync_send(mddev, RESYNCING, 0, 0);
+	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+		cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC);
+		cmsg.slot = cpu_to_le32(slot);
+		sendmsg(cinfo, &cmsg);
+	}
 }
 
 static int area_resyncing(struct mddev *mddev, int direction,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4033262..4f5ecbe 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -483,6 +483,8 @@
 		bioset_free(bs);
 }
 
+static void md_safemode_timeout(unsigned long data);
+
 void mddev_init(struct mddev *mddev)
 {
 	mutex_init(&mddev->open_mutex);
@@ -490,7 +492,8 @@
 	mutex_init(&mddev->bitmap_info.mutex);
 	INIT_LIST_HEAD(&mddev->disks);
 	INIT_LIST_HEAD(&mddev->all_mddevs);
-	init_timer(&mddev->safemode_timer);
+	setup_timer(&mddev->safemode_timer, md_safemode_timeout,
+		    (unsigned long) mddev);
 	atomic_set(&mddev->active, 1);
 	atomic_set(&mddev->openers, 0);
 	atomic_set(&mddev->active_io, 0);
@@ -3255,8 +3258,6 @@
 	return 0;
 }
 
-static void md_safemode_timeout(unsigned long data);
-
 static ssize_t
 safe_delay_show(struct mddev *mddev, char *page)
 {
@@ -4189,6 +4190,8 @@
 				type = "repair";
 		} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
 			type = "recover";
+		else if (mddev->reshape_position != MaxSector)
+			type = "reshape";
 	}
 	return sprintf(page, "%s\n", type);
 }
@@ -5180,8 +5183,6 @@
 	atomic_set(&mddev->max_corr_read_errors,
 		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
 	mddev->safemode = 0;
-	mddev->safemode_timer.function = md_safemode_timeout;
-	mddev->safemode_timer.data = (unsigned long) mddev;
 	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
 	mddev->in_sync = 1;
 	smp_wmb();
@@ -5194,6 +5195,11 @@
 			if (sysfs_link_rdev(mddev, rdev))
 				/* failure here is OK */;
 
+	if (mddev->degraded && !mddev->ro)
+		/* This ensures that recovering status is reported immediately
+		 * via sysfs - until a lack of spares is confirmed.
+		 */
+		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 
 	if (mddev->flags & MD_UPDATE_SB_FLAGS)
@@ -5741,16 +5747,16 @@
 
 	err = 0;
 	spin_lock(&mddev->lock);
-	/* bitmap disabled, zero the first byte and copy out */
-	if (!mddev->bitmap_info.file)
-		file->pathname[0] = '\0';
-	else if ((ptr = file_path(mddev->bitmap_info.file,
-			       file->pathname, sizeof(file->pathname))),
-		 IS_ERR(ptr))
-		err = PTR_ERR(ptr);
-	else
-		memmove(file->pathname, ptr,
-			sizeof(file->pathname)-(ptr-file->pathname));
+	/* bitmap enabled */
+	if (mddev->bitmap_info.file) {
+		ptr = file_path(mddev->bitmap_info.file, file->pathname,
+				sizeof(file->pathname));
+		if (IS_ERR(ptr))
+			err = PTR_ERR(ptr);
+		else
+			memmove(file->pathname, ptr,
+				sizeof(file->pathname)-(ptr-file->pathname));
+	}
 	spin_unlock(&mddev->lock);
 
 	if (err == 0 &&
@@ -7069,7 +7075,7 @@
 	seq_printf(seq, "\n");
 }
 
-static void status_resync(struct seq_file *seq, struct mddev *mddev)
+static int status_resync(struct seq_file *seq, struct mddev *mddev)
 {
 	sector_t max_sectors, resync, res;
 	unsigned long dt, db;
@@ -7077,18 +7083,32 @@
 	int scale;
 	unsigned int per_milli;
 
-	if (mddev->curr_resync <= 3)
-		resync = 0;
-	else
-		resync = mddev->curr_resync
-			- atomic_read(&mddev->recovery_active);
-
 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
 		max_sectors = mddev->resync_max_sectors;
 	else
 		max_sectors = mddev->dev_sectors;
 
+	resync = mddev->curr_resync;
+	if (resync <= 3) {
+		if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+			/* Still cleaning up */
+			resync = max_sectors;
+	} else
+		resync -= atomic_read(&mddev->recovery_active);
+
+	if (resync == 0) {
+		if (mddev->recovery_cp < MaxSector) {
+			seq_printf(seq, "\tresync=PENDING");
+			return 1;
+		}
+		return 0;
+	}
+	if (resync < 3) {
+		seq_printf(seq, "\tresync=DELAYED");
+		return 1;
+	}
+
 	WARN_ON(max_sectors == 0);
 	/* Pick 'scale' such that (resync>>scale)*1000 will fit
 	 * in a sector_t, and (max_sectors>>scale) will fit in a
@@ -7153,6 +7173,7 @@
 		   ((unsigned long)rt % 60)/6);
 
 	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
+	return 1;
 }
 
 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
@@ -7298,13 +7319,8 @@
 			mddev->pers->status(seq, mddev);
 			seq_printf(seq, "\n      ");
 			if (mddev->pers->sync_request) {
-				if (mddev->curr_resync > 2) {
-					status_resync(seq, mddev);
+				if (status_resync(seq, mddev))
 					seq_printf(seq, "\n      ");
-				} else if (mddev->curr_resync >= 1)
-					seq_printf(seq, "\tresync=DELAYED\n      ");
-				else if (mddev->recovery_cp < MaxSector)
-					seq_printf(seq, "\tresync=PENDING\n      ");
 			}
 		} else
 			seq_printf(seq, "\n       ");
@@ -7387,15 +7403,19 @@
 }
 EXPORT_SYMBOL(unregister_md_personality);
 
-int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
+int register_md_cluster_operations(struct md_cluster_operations *ops,
+				   struct module *module)
 {
-	if (md_cluster_ops != NULL)
-		return -EALREADY;
+	int ret = 0;
 	spin_lock(&pers_lock);
-	md_cluster_ops = ops;
-	md_cluster_mod = module;
+	if (md_cluster_ops != NULL)
+		ret = -EALREADY;
+	else {
+		md_cluster_ops = ops;
+		md_cluster_mod = module;
+	}
 	spin_unlock(&pers_lock);
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(register_md_cluster_operations);
 
@@ -7793,7 +7813,8 @@
 		      > (max_sectors >> 4)) ||
 		     time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
 		     (j - mddev->curr_resync_completed)*2
-		     >= mddev->resync_max - mddev->curr_resync_completed
+		     >= mddev->resync_max - mddev->curr_resync_completed ||
+		     mddev->curr_resync_completed > mddev->resync_max
 			    )) {
 			/* time to update curr_resync_completed */
 			wait_event(mddev->recovery_wait,
@@ -7838,6 +7859,9 @@
 			break;
 
 		j += sectors;
+		if (j > max_sectors)
+			/* when skipping, extra large numbers can be returned. */
+			j = max_sectors;
 		if (j > 2)
 			mddev->curr_resync = j;
 		if (mddev_is_clustered(mddev))
@@ -7906,12 +7930,15 @@
 	blk_finish_plug(&plug);
 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
+	if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+	    !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+	    mddev->curr_resync > 2) {
+		mddev->curr_resync_completed = mddev->curr_resync;
+		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+	}
 	/* tell personality that we are finished */
 	mddev->pers->sync_request(mddev, max_sectors, &skipped);
 
-	if (mddev_is_clustered(mddev))
-		md_cluster_ops->resync_finish(mddev);
-
 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
 	    mddev->curr_resync > 2) {
 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -7945,6 +7972,9 @@
 		}
 	}
  skip:
+	if (mddev_is_clustered(mddev))
+		md_cluster_ops->resync_finish(mddev);
+
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
 	spin_lock(&mddev->lock);
@@ -7955,11 +7985,11 @@
 		mddev->resync_max = MaxSector;
 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
 		mddev->resync_min = mddev->curr_resync_completed;
+	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
 	mddev->curr_resync = 0;
 	spin_unlock(&mddev->lock);
 
 	wake_up(&resync_wait);
-	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
 	return;
 }
@@ -8128,6 +8158,7 @@
 			 */
 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 			md_reap_sync_thread(mddev);
+			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 			goto unlock;
 		}
@@ -8574,6 +8605,7 @@
 		/* Make sure they get written out promptly */
 		sysfs_notify_dirent_safe(rdev->sysfs_state);
 		set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
+		set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
 		md_wakeup_thread(rdev->mddev->thread);
 	}
 	return rv;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 59cda50..63e619b 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -83,7 +83,7 @@
 	char b[BDEVNAME_SIZE];
 	char b2[BDEVNAME_SIZE];
 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
-	bool discard_supported = false;
+	unsigned short blksize = 512;
 
 	if (!conf)
 		return -ENOMEM;
@@ -98,6 +98,9 @@
 		sector_div(sectors, mddev->chunk_sectors);
 		rdev1->sectors = sectors * mddev->chunk_sectors;
 
+		blksize = max(blksize, queue_logical_block_size(
+				      rdev1->bdev->bd_disk->queue));
+
 		rdev_for_each(rdev2, mddev) {
 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
 				 " with %s(%llu)\n",
@@ -134,6 +137,18 @@
 	}
 	pr_debug("md/raid0:%s: FINAL %d zones\n",
 		 mdname(mddev), conf->nr_strip_zones);
+	/*
+	 * now since we have the hard sector sizes, we can make sure
+	 * chunk size is a multiple of that sector size
+	 */
+	if ((mddev->chunk_sectors << 9) % blksize) {
+		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+		       mdname(mddev),
+		       mddev->chunk_sectors << 9, blksize);
+		err = -EINVAL;
+		goto abort;
+	}
+
 	err = -ENOMEM;
 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
 				conf->nr_strip_zones, GFP_KERNEL);
@@ -188,16 +203,9 @@
 		}
 		dev[j] = rdev1;
 
-		if (mddev->queue)
-			disk_stack_limits(mddev->gendisk, rdev1->bdev,
-					  rdev1->data_offset << 9);
-
 		if (!smallest || (rdev1->sectors < smallest->sectors))
 			smallest = rdev1;
 		cnt++;
-
-		if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
-			discard_supported = true;
 	}
 	if (cnt != mddev->raid_disks) {
 		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
@@ -258,28 +266,6 @@
 			 (unsigned long long)smallest->sectors);
 	}
 
-	/*
-	 * now since we have the hard sector sizes, we can make sure
-	 * chunk size is a multiple of that sector size
-	 */
-	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
-		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
-		       mdname(mddev),
-		       mddev->chunk_sectors << 9);
-		goto abort;
-	}
-
-	if (mddev->queue) {
-		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
-		blk_queue_io_opt(mddev->queue,
-				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
-		if (!discard_supported)
-			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
-		else
-			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
-	}
-
 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
 	*private_conf = conf;
 
@@ -378,12 +364,6 @@
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
 
-	if (mddev->queue) {
-		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
-		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
-		blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
-	}
-
 	/* if private is not null, we are here after takeover */
 	if (mddev->private == NULL) {
 		ret = create_strip_zones(mddev, &conf);
@@ -392,6 +372,29 @@
 		mddev->private = conf;
 	}
 	conf = mddev->private;
+	if (mddev->queue) {
+		struct md_rdev *rdev;
+		bool discard_supported = false;
+
+		rdev_for_each(rdev, mddev) {
+			disk_stack_limits(mddev->gendisk, rdev->bdev,
+					  rdev->data_offset << 9);
+			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+				discard_supported = true;
+		}
+		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+		blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+
+		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+		blk_queue_io_opt(mddev->queue,
+				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
+		if (!discard_supported)
+			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+		else
+			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+	}
 
 	/* calculate array device size */
 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f39d69f..4517f06 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1474,6 +1474,7 @@
 	 */
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_CHANGE_PENDING, &mddev->flags);
 	printk(KERN_ALERT
 	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
 	       "md/raid1:%s: Operation continuing on %d devices.\n",
@@ -2235,6 +2236,7 @@
 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
 {
 	int m;
+	bool fail = false;
 	for (m = 0; m < conf->raid_disks * 2 ; m++)
 		if (r1_bio->bios[m] == IO_MADE_GOOD) {
 			struct md_rdev *rdev = conf->mirrors[m].rdev;
@@ -2247,6 +2249,7 @@
 			 * narrow down and record precise write
 			 * errors.
 			 */
+			fail = true;
 			if (!narrow_write_error(r1_bio, m)) {
 				md_error(conf->mddev,
 					 conf->mirrors[m].rdev);
@@ -2258,7 +2261,13 @@
 		}
 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
 		close_write(r1_bio);
-	raid_end_bio_io(r1_bio);
+	if (fail) {
+		spin_lock_irq(&conf->device_lock);
+		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+		spin_unlock_irq(&conf->device_lock);
+		md_wakeup_thread(conf->mddev->thread);
+	} else
+		raid_end_bio_io(r1_bio);
 }
 
 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@ -2364,6 +2373,23 @@
 
 	md_check_recovery(mddev);
 
+	if (!list_empty_careful(&conf->bio_end_io_list) &&
+	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+		LIST_HEAD(tmp);
+		spin_lock_irqsave(&conf->device_lock, flags);
+		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+			list_add(&tmp, &conf->bio_end_io_list);
+			list_del_init(&conf->bio_end_io_list);
+		}
+		spin_unlock_irqrestore(&conf->device_lock, flags);
+		while (!list_empty(&tmp)) {
+			r1_bio = list_first_entry(&conf->bio_end_io_list,
+						  struct r1bio, retry_list);
+			list_del(&r1_bio->retry_list);
+			raid_end_bio_io(r1_bio);
+		}
+	}
+
 	blk_start_plug(&plug);
 	for (;;) {
 
@@ -2763,6 +2789,7 @@
 	conf->raid_disks = mddev->raid_disks;
 	conf->mddev = mddev;
 	INIT_LIST_HEAD(&conf->retry_list);
+	INIT_LIST_HEAD(&conf->bio_end_io_list);
 
 	spin_lock_init(&conf->resync_lock);
 	init_waitqueue_head(&conf->wait_barrier);
@@ -3057,6 +3084,7 @@
 
 	unfreeze_array(conf);
 
+	set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
 
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 14ebb28..c52d713 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -61,6 +61,11 @@
 	 * block, or anything else.
 	 */
 	struct list_head	retry_list;
+	/* A separate list of r1bio which just need raid_end_bio_io called.
+	 * This mustn't happen for writes which had any errors if the superblock
+	 * needs to be written.
+	 */
+	struct list_head	bio_end_io_list;
 
 	/* queue pending writes to be submitted on unplug */
 	struct bio_list		pending_bio_list;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b0fce2e..0fc33eb 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1589,6 +1589,7 @@
 	set_bit(Blocked, &rdev->flags);
 	set_bit(Faulty, &rdev->flags);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_CHANGE_PENDING, &mddev->flags);
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 	printk(KERN_ALERT
 	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
@@ -2623,6 +2624,7 @@
 		}
 		put_buf(r10_bio);
 	} else {
+		bool fail = false;
 		for (m = 0; m < conf->copies; m++) {
 			int dev = r10_bio->devs[m].devnum;
 			struct bio *bio = r10_bio->devs[m].bio;
@@ -2634,6 +2636,7 @@
 					r10_bio->sectors, 0);
 				rdev_dec_pending(rdev, conf->mddev);
 			} else if (bio != NULL && bio->bi_error) {
+				fail = true;
 				if (!narrow_write_error(r10_bio, m)) {
 					md_error(conf->mddev, rdev);
 					set_bit(R10BIO_Degraded,
@@ -2654,7 +2657,13 @@
 		if (test_bit(R10BIO_WriteError,
 			     &r10_bio->state))
 			close_write(r10_bio);
-		raid_end_bio_io(r10_bio);
+		if (fail) {
+			spin_lock_irq(&conf->device_lock);
+			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+			spin_unlock_irq(&conf->device_lock);
+			md_wakeup_thread(conf->mddev->thread);
+		} else
+			raid_end_bio_io(r10_bio);
 	}
 }
 
@@ -2669,6 +2678,23 @@
 
 	md_check_recovery(mddev);
 
+	if (!list_empty_careful(&conf->bio_end_io_list) &&
+	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+		LIST_HEAD(tmp);
+		spin_lock_irqsave(&conf->device_lock, flags);
+		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+			list_add(&tmp, &conf->bio_end_io_list);
+			list_del_init(&conf->bio_end_io_list);
+		}
+		spin_unlock_irqrestore(&conf->device_lock, flags);
+		while (!list_empty(&tmp)) {
+			r10_bio = list_first_entry(&conf->bio_end_io_list,
+						  struct r10bio, retry_list);
+			list_del(&r10_bio->retry_list);
+			raid_end_bio_io(r10_bio);
+		}
+	}
+
 	blk_start_plug(&plug);
 	for (;;) {
 
@@ -3443,6 +3469,7 @@
 	conf->reshape_safe = conf->reshape_progress;
 	spin_lock_init(&conf->device_lock);
 	INIT_LIST_HEAD(&conf->retry_list);
+	INIT_LIST_HEAD(&conf->bio_end_io_list);
 
 	spin_lock_init(&conf->resync_lock);
 	init_waitqueue_head(&conf->wait_barrier);
@@ -4097,7 +4124,7 @@
 	 * at a time, possibly less if that exceeds RESYNC_PAGES,
 	 * or we hit a bad block or something.
 	 * This might mean we pause for normal IO in the middle of
-	 * a chunk, but that is not a problem was mddev->reshape_position
+	 * a chunk, but that is not a problem as mddev->reshape_position
 	 * can record any location.
 	 *
 	 * If we will want to write to a location that isn't
@@ -4121,7 +4148,7 @@
 	 *
 	 * In all this the minimum difference in data offsets
 	 * (conf->offset_diff - always positive) allows a bit of slack,
-	 * so next can be after 'safe', but not by more than offset_disk
+	 * so next can be after 'safe', but not by more than offset_diff
 	 *
 	 * We need to prepare all the bios here before we start any IO
 	 * to ensure the size we choose is acceptable to all devices.
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 5ee6473..6fc2c75 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -53,6 +53,12 @@
 	sector_t		offset_diff;
 
 	struct list_head	retry_list;
+	/* A separate list of r1bio which just need raid_end_bio_io called.
+	 * This mustn't happen for writes which had any errors if the superblock
+	 * needs to be written.
+	 */
+	struct list_head	bio_end_io_list;
+
 	/* queue pending writes and submit them on unplug */
 	struct bio_list		pending_bio_list;
 	int			pending_count;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b29e89c..15ef2c6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -223,18 +223,14 @@
 	return slot;
 }
 
-static void return_io(struct bio *return_bi)
+static void return_io(struct bio_list *return_bi)
 {
-	struct bio *bi = return_bi;
-	while (bi) {
-
-		return_bi = bi->bi_next;
-		bi->bi_next = NULL;
+	struct bio *bi;
+	while ((bi = bio_list_pop(return_bi)) != NULL) {
 		bi->bi_iter.bi_size = 0;
 		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
 					 bi, 0);
 		bio_endio(bi);
-		bi = return_bi;
 	}
 }
 
@@ -1177,7 +1173,7 @@
 static void ops_complete_biofill(void *stripe_head_ref)
 {
 	struct stripe_head *sh = stripe_head_ref;
-	struct bio *return_bi = NULL;
+	struct bio_list return_bi = BIO_EMPTY_LIST;
 	int i;
 
 	pr_debug("%s: stripe %llu\n", __func__,
@@ -1201,17 +1197,15 @@
 			while (rbi && rbi->bi_iter.bi_sector <
 				dev->sector + STRIPE_SECTORS) {
 				rbi2 = r5_next_bio(rbi, dev->sector);
-				if (!raid5_dec_bi_active_stripes(rbi)) {
-					rbi->bi_next = return_bi;
-					return_bi = rbi;
-				}
+				if (!raid5_dec_bi_active_stripes(rbi))
+					bio_list_add(&return_bi, rbi);
 				rbi = rbi2;
 			}
 		}
 	}
 	clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
 
-	return_io(return_bi);
+	return_io(&return_bi);
 
 	set_bit(STRIPE_HANDLE, &sh->state);
 	release_stripe(sh);
@@ -2517,6 +2511,7 @@
 	set_bit(Blocked, &rdev->flags);
 	set_bit(Faulty, &rdev->flags);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_CHANGE_PENDING, &mddev->flags);
 	printk(KERN_ALERT
 	       "md/raid:%s: Disk failure on %s, disabling device.\n"
 	       "md/raid:%s: Operation continuing on %d devices.\n",
@@ -3069,7 +3064,7 @@
 static void
 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
 				struct stripe_head_state *s, int disks,
-				struct bio **return_bi)
+				struct bio_list *return_bi)
 {
 	int i;
 	BUG_ON(sh->batch_head);
@@ -3114,8 +3109,7 @@
 			bi->bi_error = -EIO;
 			if (!raid5_dec_bi_active_stripes(bi)) {
 				md_write_end(conf->mddev);
-				bi->bi_next = *return_bi;
-				*return_bi = bi;
+				bio_list_add(return_bi, bi);
 			}
 			bi = nextbi;
 		}
@@ -3139,8 +3133,7 @@
 			bi->bi_error = -EIO;
 			if (!raid5_dec_bi_active_stripes(bi)) {
 				md_write_end(conf->mddev);
-				bi->bi_next = *return_bi;
-				*return_bi = bi;
+				bio_list_add(return_bi, bi);
 			}
 			bi = bi2;
 		}
@@ -3163,10 +3156,8 @@
 					r5_next_bio(bi, sh->dev[i].sector);
 
 				bi->bi_error = -EIO;
-				if (!raid5_dec_bi_active_stripes(bi)) {
-					bi->bi_next = *return_bi;
-					*return_bi = bi;
-				}
+				if (!raid5_dec_bi_active_stripes(bi))
+					bio_list_add(return_bi, bi);
 				bi = nextbi;
 			}
 		}
@@ -3445,7 +3436,7 @@
  * never LOCKED, so we don't need to test 'failed' directly.
  */
 static void handle_stripe_clean_event(struct r5conf *conf,
-	struct stripe_head *sh, int disks, struct bio **return_bi)
+	struct stripe_head *sh, int disks, struct bio_list *return_bi)
 {
 	int i;
 	struct r5dev *dev;
@@ -3479,8 +3470,7 @@
 					wbi2 = r5_next_bio(wbi, dev->sector);
 					if (!raid5_dec_bi_active_stripes(wbi)) {
 						md_write_end(conf->mddev);
-						wbi->bi_next = *return_bi;
-						*return_bi = wbi;
+						bio_list_add(return_bi, wbi);
 					}
 					wbi = wbi2;
 				}
@@ -4613,7 +4603,15 @@
 			md_wakeup_thread(conf->mddev->thread);
 	}
 
-	return_io(s.return_bi);
+	if (!bio_list_empty(&s.return_bi)) {
+		if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+			spin_lock_irq(&conf->device_lock);
+			bio_list_merge(&conf->return_bi, &s.return_bi);
+			spin_unlock_irq(&conf->device_lock);
+			md_wakeup_thread(conf->mddev->thread);
+		} else
+			return_io(&s.return_bi);
+	}
 
 	clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
 }
@@ -4672,12 +4670,12 @@
 
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
+	struct r5conf *conf = mddev->private;
 	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
-	unsigned int chunk_sectors = mddev->chunk_sectors;
+	unsigned int chunk_sectors;
 	unsigned int bio_sectors = bio_sectors(bio);
 
-	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
-		chunk_sectors = mddev->new_chunk_sectors;
+	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
 	return  chunk_sectors >=
 		((sector & (chunk_sectors - 1)) + bio_sectors);
 }
@@ -5325,6 +5323,7 @@
 	sector_t stripe_addr;
 	int reshape_sectors;
 	struct list_head stripes;
+	sector_t retn;
 
 	if (sector_nr == 0) {
 		/* If restarting in the middle, skip the initial sectors */
@@ -5332,6 +5331,10 @@
 		    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
 			sector_nr = raid5_size(mddev, 0, 0)
 				- conf->reshape_progress;
+		} else if (mddev->reshape_backwards &&
+			   conf->reshape_progress == MaxSector) {
+			/* shouldn't happen, but just in case, finish up.*/
+			sector_nr = MaxSector;
 		} else if (!mddev->reshape_backwards &&
 			   conf->reshape_progress > 0)
 			sector_nr = conf->reshape_progress;
@@ -5340,7 +5343,8 @@
 			mddev->curr_resync_completed = sector_nr;
 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 			*skipped = 1;
-			return sector_nr;
+			retn = sector_nr;
+			goto finish;
 		}
 	}
 
@@ -5348,10 +5352,8 @@
 	 * If old and new chunk sizes differ, we need to process the
 	 * largest of these
 	 */
-	if (mddev->new_chunk_sectors > mddev->chunk_sectors)
-		reshape_sectors = mddev->new_chunk_sectors;
-	else
-		reshape_sectors = mddev->chunk_sectors;
+
+	reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
 
 	/* We update the metadata at least every 10 seconds, or when
 	 * the data about to be copied would over-write the source of
@@ -5366,11 +5368,16 @@
 	safepos = conf->reshape_safe;
 	sector_div(safepos, data_disks);
 	if (mddev->reshape_backwards) {
-		writepos -= min_t(sector_t, reshape_sectors, writepos);
+		BUG_ON(writepos < reshape_sectors);
+		writepos -= reshape_sectors;
 		readpos += reshape_sectors;
 		safepos += reshape_sectors;
 	} else {
 		writepos += reshape_sectors;
+		/* readpos and safepos are worst-case calculations.
+		 * A negative number is overly pessimistic, and causes
+		 * obvious problems for unsigned storage.  So clip to 0.
+		 */
 		readpos -= min_t(sector_t, reshape_sectors, readpos);
 		safepos -= min_t(sector_t, reshape_sectors, safepos);
 	}
@@ -5513,7 +5520,10 @@
 	 * then we need to write out the superblock.
 	 */
 	sector_nr += reshape_sectors;
-	if ((sector_nr - mddev->curr_resync_completed) * 2
+	retn = reshape_sectors;
+finish:
+	if (mddev->curr_resync_completed > mddev->resync_max ||
+	    (sector_nr - mddev->curr_resync_completed) * 2
 	    >= mddev->resync_max - mddev->curr_resync_completed) {
 		/* Cannot proceed until we've updated the superblock... */
 		wait_event(conf->wait_for_overlap,
@@ -5538,7 +5548,7 @@
 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 	}
 ret:
-	return reshape_sectors;
+	return retn;
 }
 
 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
@@ -5794,6 +5804,18 @@
 
 	md_check_recovery(mddev);
 
+	if (!bio_list_empty(&conf->return_bi) &&
+	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+		struct bio_list tmp = BIO_EMPTY_LIST;
+		spin_lock_irq(&conf->device_lock);
+		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+			bio_list_merge(&tmp, &conf->return_bi);
+			bio_list_init(&conf->return_bi);
+		}
+		spin_unlock_irq(&conf->device_lock);
+		return_io(&tmp);
+	}
+
 	blk_start_plug(&plug);
 	handled = 0;
 	spin_lock_irq(&conf->device_lock);
@@ -6234,8 +6256,8 @@
 		/* size is defined by the smallest of previous and new size */
 		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
 
-	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
-	sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
+	sectors &= ~((sector_t)conf->chunk_sectors - 1);
+	sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
 	return sectors * (raid_disks - conf->max_degraded);
 }
 
@@ -6453,6 +6475,7 @@
 	INIT_LIST_HEAD(&conf->hold_list);
 	INIT_LIST_HEAD(&conf->delayed_list);
 	INIT_LIST_HEAD(&conf->bitmap_list);
+	bio_list_init(&conf->return_bi);
 	init_llist_head(&conf->released_stripes);
 	atomic_set(&conf->active_stripes, 0);
 	atomic_set(&conf->preread_active_stripes, 0);
@@ -6542,6 +6565,9 @@
 	if (conf->reshape_progress != MaxSector) {
 		conf->prev_chunk_sectors = mddev->chunk_sectors;
 		conf->prev_algo = mddev->layout;
+	} else {
+		conf->prev_chunk_sectors = conf->chunk_sectors;
+		conf->prev_algo = conf->algorithm;
 	}
 
 	conf->min_nr_stripes = NR_STRIPES;
@@ -6661,6 +6687,8 @@
 		sector_t here_new, here_old;
 		int old_disks;
 		int max_degraded = (mddev->level == 6 ? 2 : 1);
+		int chunk_sectors;
+		int new_data_disks;
 
 		if (mddev->new_level != mddev->level) {
 			printk(KERN_ERR "md/raid:%s: unsupported reshape "
@@ -6672,28 +6700,25 @@
 		/* reshape_position must be on a new-stripe boundary, and one
 		 * further up in new geometry must map after here in old
 		 * geometry.
+		 * If the chunk sizes are different, then as we perform reshape
+		 * in units of the largest of the two, reshape_position needs
+		 * be a multiple of the largest chunk size times new data disks.
 		 */
 		here_new = mddev->reshape_position;
-		if (sector_div(here_new, mddev->new_chunk_sectors *
-			       (mddev->raid_disks - max_degraded))) {
+		chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
+		new_data_disks = mddev->raid_disks - max_degraded;
+		if (sector_div(here_new, chunk_sectors * new_data_disks)) {
 			printk(KERN_ERR "md/raid:%s: reshape_position not "
 			       "on a stripe boundary\n", mdname(mddev));
 			return -EINVAL;
 		}
-		reshape_offset = here_new * mddev->new_chunk_sectors;
+		reshape_offset = here_new * chunk_sectors;
 		/* here_new is the stripe we will write to */
 		here_old = mddev->reshape_position;
-		sector_div(here_old, mddev->chunk_sectors *
-			   (old_disks-max_degraded));
+		sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
 		/* here_old is the first stripe that we might need to read
 		 * from */
 		if (mddev->delta_disks == 0) {
-			if ((here_new * mddev->new_chunk_sectors !=
-			     here_old * mddev->chunk_sectors)) {
-				printk(KERN_ERR "md/raid:%s: reshape position is"
-				       " confused - aborting\n", mdname(mddev));
-				return -EINVAL;
-			}
 			/* We cannot be sure it is safe to start an in-place
 			 * reshape.  It is only safe if user-space is monitoring
 			 * and taking constant backups.
@@ -6712,10 +6737,10 @@
 				return -EINVAL;
 			}
 		} else if (mddev->reshape_backwards
-		    ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
-		       here_old * mddev->chunk_sectors)
-		    : (here_new * mddev->new_chunk_sectors >=
-		       here_old * mddev->chunk_sectors + (-min_offset_diff))) {
+		    ? (here_new * chunk_sectors + min_offset_diff <=
+		       here_old * chunk_sectors)
+		    : (here_new * chunk_sectors >=
+		       here_old * chunk_sectors + (-min_offset_diff))) {
 			/* Reading from the same stripe as writing to - bad */
 			printk(KERN_ERR "md/raid:%s: reshape_position too early for "
 			       "auto-recovery - aborting.\n",
@@ -6967,7 +6992,7 @@
 	int i;
 
 	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
-		mddev->chunk_sectors / 2, mddev->layout);
+		conf->chunk_sectors / 2, mddev->layout);
 	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
 	for (i = 0; i < conf->raid_disks; i++)
 		seq_printf (seq, "%s",
@@ -7173,7 +7198,9 @@
 	 * worth it.
 	 */
 	sector_t newsize;
-	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
+	struct r5conf *conf = mddev->private;
+
+	sectors &= ~((sector_t)conf->chunk_sectors - 1);
 	newsize = raid5_size(mddev, sectors, mddev->raid_disks);
 	if (mddev->external_size &&
 	    mddev->array_sectors > newsize)
@@ -7412,6 +7439,7 @@
 			rdev->data_offset = rdev->new_data_offset;
 		smp_wmb();
 		conf->reshape_progress = MaxSector;
+		conf->mddev->reshape_position = MaxSector;
 		spin_unlock_irq(&conf->device_lock);
 		wake_up(&conf->wait_for_overlap);
 
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d051442..828c292 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -265,7 +265,7 @@
 	int dec_preread_active;
 	unsigned long ops_request;
 
-	struct bio *return_bi;
+	struct bio_list return_bi;
 	struct md_rdev *blocked_rdev;
 	int handle_bad_blocks;
 };
@@ -476,6 +476,9 @@
 	int			skip_copy; /* Don't copy data from bio to stripe cache */
 	struct list_head	*last_hold; /* detect hold_list promotions */
 
+	/* bios to have bi_end_io called after metadata is synced */
+	struct bio_list		return_bi;
+
 	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
 	/* unfortunately we need two cache names as we temporarily have
 	 * two caches.
diff --git a/drivers/media/common/saa7146/saa7146_hlp.c b/drivers/media/common/saa7146/saa7146_hlp.c
index be746d1..3dc6a83 100644
--- a/drivers/media/common/saa7146/saa7146_hlp.c
+++ b/drivers/media/common/saa7146/saa7146_hlp.c
@@ -307,7 +307,7 @@
 /* simple bubble-sort algorithm with duplicate elimination */
 static int sort_and_eliminate(u32* values, int* count)
 {
-	int low = 0, high = 0, top = 0, temp = 0;
+	int low = 0, high = 0, top = 0;
 	int cur = 0, next = 0;
 
 	/* sanity checks */
@@ -318,11 +318,8 @@
 	/* bubble sort the first @count items of the array @values */
 	for( top = *count; top > 0; top--) {
 		for( low = 0, high = 1; high < top; low++, high++) {
-			if( values[low] > values[high] ) {
-				temp = values[low];
-				values[low] = values[high];
-				values[high] = temp;
-			}
+			if( values[low] > values[high] )
+				swap(values[low], values[high]);
 		}
 	}
 
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 7293775..fb66184 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -169,10 +169,10 @@
 /**
  * Safely find needle in haystack.
  *
- * @param haystack Buffer to look in.
- * @param hlen Number of bytes in haystack.
- * @param needle Buffer to find.
- * @param nlen Number of bytes in needle.
+ * @haystack: Buffer to look in.
+ * @hlen: Number of bytes in haystack.
+ * @needle: Buffer to find.
+ * @nlen: Number of bytes in needle.
  * @return Pointer into haystack needle was found at, or NULL if not found.
  */
 static char *findstr(char * haystack, int hlen, char * needle, int nlen)
@@ -197,7 +197,7 @@
 
 
 /**
- * Check CAM status.
+ * dvb_ca_en50221_check_camstatus - Check CAM status.
  */
 static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
 {
@@ -240,13 +240,13 @@
 
 
 /**
- * Wait for flags to become set on the STATUS register on a CAM interface,
- * checking for errors and timeout.
+ * dvb_ca_en50221_wait_if_status - Wait for flags to become set on the STATUS
+ *	 register on a CAM interface, checking for errors and timeout.
  *
- * @param ca CA instance.
- * @param slot Slot on interface.
- * @param waitfor Flags to wait for.
- * @param timeout_ms Timeout in milliseconds.
+ * @ca: CA instance.
+ * @slot: Slot on interface.
+ * @waitfor: Flags to wait for.
+ * @timeout_ms: Timeout in milliseconds.
  *
  * @return 0 on success, nonzero on error.
  */
@@ -290,10 +290,10 @@
 
 
 /**
- * Initialise the link layer connection to a CAM.
+ * dvb_ca_en50221_link_init - Initialise the link layer connection to a CAM.
  *
- * @param ca CA instance.
- * @param slot Slot id.
+ * @ca: CA instance.
+ * @slot: Slot id.
  *
  * @return 0 on success, nonzero on failure.
  */
@@ -346,14 +346,14 @@
 }
 
 /**
- * Read a tuple from attribute memory.
+ * dvb_ca_en50221_read_tuple - Read a tuple from attribute memory.
  *
- * @param ca CA instance.
- * @param slot Slot id.
- * @param address Address to read from. Updated.
- * @param tupleType Tuple id byte. Updated.
- * @param tupleLength Tuple length. Updated.
- * @param tuple Dest buffer for tuple (must be 256 bytes). Updated.
+ * @ca: CA instance.
+ * @slot: Slot id.
+ * @address: Address to read from. Updated.
+ * @tupleType: Tuple id byte. Updated.
+ * @tupleLength: Tuple length. Updated.
+ * @tuple: Dest buffer for tuple (must be 256 bytes). Updated.
  *
  * @return 0 on success, nonzero on error.
  */
@@ -399,11 +399,11 @@
 
 
 /**
- * Parse attribute memory of a CAM module, extracting Config register, and checking
- * it is a DVB CAM module.
+ * dvb_ca_en50221_parse_attributes - Parse attribute memory of a CAM module,
+ *	extracting Config register, and checking it is a DVB CAM module.
  *
- * @param ca CA instance.
- * @param slot Slot id.
+ * @ca: CA instance.
+ * @slot: Slot id.
  *
  * @return 0 on success, <0 on failure.
  */
@@ -546,10 +546,10 @@
 
 
 /**
- * Set CAM's configoption correctly.
+ * dvb_ca_en50221_set_configoption - Set CAM's configoption correctly.
  *
- * @param ca CA instance.
- * @param slot Slot containing the CAM.
+ * @ca: CA instance.
+ * @slot: Slot containing the CAM.
  */
 static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
 {
@@ -574,15 +574,16 @@
 
 
 /**
- * This function talks to an EN50221 CAM control interface. It reads a buffer of
- * data from the CAM. The data can either be stored in a supplied buffer, or
- * automatically be added to the slot's rx_buffer.
+ * dvb_ca_en50221_read_data - This function talks to an EN50221 CAM control
+ *	interface. It reads a buffer of data from the CAM. The data can either
+ *	be stored in a supplied buffer, or automatically be added to the slot's
+ *	rx_buffer.
  *
- * @param ca CA instance.
- * @param slot Slot to read from.
- * @param ebuf If non-NULL, the data will be written to this buffer. If NULL,
+ * @ca: CA instance.
+ * @slot: Slot to read from.
+ * @ebuf: If non-NULL, the data will be written to this buffer. If NULL,
  * the data will be added into the buffering system as a normal fragment.
- * @param ecount Size of ebuf. Ignored if ebuf is NULL.
+ * @ecount: Size of ebuf. Ignored if ebuf is NULL.
  *
  * @return Number of bytes read, or < 0 on error
  */
@@ -698,14 +699,14 @@
 
 
 /**
- * This function talks to an EN50221 CAM control interface. It writes a buffer of data
- * to a CAM.
+ * dvb_ca_en50221_write_data - This function talks to an EN50221 CAM control
+ *				interface. It writes a buffer of data to a CAM.
  *
- * @param ca CA instance.
- * @param slot Slot to write to.
- * @param ebuf The data in this buffer is treated as a complete link-level packet to
+ * @ca: CA instance.
+ * @slot: Slot to write to.
+ * @ebuf: The data in this buffer is treated as a complete link-level packet to
  * be written.
- * @param count Size of ebuf.
+ * @count: Size of ebuf.
  *
  * @return Number of bytes written, or < 0 on error.
  */
@@ -790,10 +791,10 @@
 
 
 /**
- * A CAM has been removed => shut it down.
+ * dvb_ca_en50221_camready_irq - A CAM has been removed => shut it down.
  *
- * @param ca CA instance.
- * @param slot Slot to shut down.
+ * @ca: CA instance.
+ * @slot: Slot to shut down.
  */
 static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
 {
@@ -815,11 +816,11 @@
 
 
 /**
- * A CAMCHANGE IRQ has occurred.
+ * dvb_ca_en50221_camready_irq - A CAMCHANGE IRQ has occurred.
  *
- * @param ca CA instance.
- * @param slot Slot concerned.
- * @param change_type One of the DVB_CA_CAMCHANGE_* values.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
+ * @change_type: One of the DVB_CA_CAMCHANGE_* values.
  */
 void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int change_type)
 {
@@ -844,10 +845,10 @@
 
 
 /**
- * A CAMREADY IRQ has occurred.
+ * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
  *
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
  */
 void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
 {
@@ -865,8 +866,8 @@
 /**
  * An FR or DA IRQ has occurred.
  *
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
  */
 void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
 {
@@ -899,7 +900,7 @@
 /**
  * Wake up the DVB CA thread
  *
- * @param ca CA instance.
+ * @ca: CA instance.
  */
 static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca)
 {
@@ -914,7 +915,7 @@
 /**
  * Update the delay used by the thread.
  *
- * @param ca CA instance.
+ * @ca: CA instance.
  */
 static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
 {
@@ -1177,10 +1178,10 @@
  * Real ioctl implementation.
  * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them.
  *
- * @param inode Inode concerned.
- * @param file File concerned.
- * @param cmd IOCTL command.
- * @param arg Associated argument.
+ * @inode: Inode concerned.
+ * @file: File concerned.
+ * @cmd: IOCTL command.
+ * @arg: Associated argument.
  *
  * @return 0 on success, <0 on error.
  */
@@ -1258,10 +1259,10 @@
 /**
  * Wrapper for ioctl implementation.
  *
- * @param inode Inode concerned.
- * @param file File concerned.
- * @param cmd IOCTL command.
- * @param arg Associated argument.
+ * @inode: Inode concerned.
+ * @file: File concerned.
+ * @cmd: IOCTL command.
+ * @arg: Associated argument.
  *
  * @return 0 on success, <0 on error.
  */
@@ -1275,10 +1276,10 @@
 /**
  * Implementation of write() syscall.
  *
- * @param file File structure.
- * @param buf Source buffer.
- * @param count Size of source buffer.
- * @param ppos Position in file (ignored).
+ * @file: File structure.
+ * @buf: Source buffer.
+ * @count: Size of source buffer.
+ * @ppos: Position in file (ignored).
  *
  * @return Number of bytes read, or <0 on error.
  */
@@ -1416,10 +1417,10 @@
 /**
  * Implementation of read() syscall.
  *
- * @param file File structure.
- * @param buf Destination buffer.
- * @param count Size of destination buffer.
- * @param ppos Position in file (ignored).
+ * @file: File structure.
+ * @buf: Destination buffer.
+ * @count: Size of destination buffer.
+ * @ppos: Position in file (ignored).
  *
  * @return Number of bytes read, or <0 on error.
  */
@@ -1519,8 +1520,8 @@
 /**
  * Implementation of file open syscall.
  *
- * @param inode Inode concerned.
- * @param file File concerned.
+ * @inode: Inode concerned.
+ * @file: File concerned.
  *
  * @return 0 on success, <0 on failure.
  */
@@ -1564,8 +1565,8 @@
 /**
  * Implementation of file close syscall.
  *
- * @param inode Inode concerned.
- * @param file File concerned.
+ * @inode: Inode concerned.
+ * @file: File concerned.
  *
  * @return 0 on success, <0 on failure.
  */
@@ -1592,8 +1593,8 @@
 /**
  * Implementation of poll() syscall.
  *
- * @param file File concerned.
- * @param wait poll wait table.
+ * @file: File concerned.
+ * @wait: poll wait table.
  *
  * @return Standard poll mask.
  */
@@ -1656,10 +1657,10 @@
 /**
  * Initialise a new DVB CA EN50221 interface device.
  *
- * @param dvb_adapter DVB adapter to attach the new CA device to.
- * @param ca The dvb_ca instance.
- * @param flags Flags describing the CA device (DVB_CA_FLAG_*).
- * @param slot_count Number of slots supported.
+ * @dvb_adapter: DVB adapter to attach the new CA device to.
+ * @ca: The dvb_ca instance.
+ * @flags: Flags describing the CA device (DVB_CA_FLAG_*).
+ * @slot_count: Number of slots supported.
  *
  * @return 0 on success, nonzero on failure
  */
@@ -1743,8 +1744,8 @@
 /**
  * Release a DVB CA EN50221 interface device.
  *
- * @param ca_dev The dvb_device_t instance for the CA device.
- * @param ca The associated dvb_ca instance.
+ * @ca_dev: The dvb_device_t instance for the CA device.
+ * @ca: The associated dvb_ca instance.
  */
 void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
 {
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h
index 7df2e14..aba3b4f 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.h
+++ b/drivers/media/dvb-core/dvb_ca_en50221.h
@@ -83,27 +83,27 @@
 /* Functions for reporting IRQ events */
 
 /**
- * A CAMCHANGE IRQ has occurred.
+ * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
  *
- * @param ca CA instance.
- * @param slot Slot concerned.
- * @param change_type One of the DVB_CA_CAMCHANGE_* values
+ * @pubca: CA instance.
+ * @slot: Slot concerned.
+ * @change_type: One of the DVB_CA_CAMCHANGE_* values
  */
 void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221* pubca, int slot, int change_type);
 
 /**
- * A CAMREADY IRQ has occurred.
+ * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
  *
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @pubca: CA instance.
+ * @slot: Slot concerned.
  */
 void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221* pubca, int slot);
 
 /**
- * An FR or a DA IRQ has occurred.
+ * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred.
  *
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
  */
 void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221* ca, int slot);
 
@@ -113,21 +113,21 @@
 /* Initialisation/shutdown functions */
 
 /**
- * Initialise a new DVB CA device.
+ * dvb_ca_en50221_init - Initialise a new DVB CA device.
  *
- * @param dvb_adapter DVB adapter to attach the new CA device to.
- * @param ca The dvb_ca instance.
- * @param flags Flags describing the CA device (DVB_CA_EN50221_FLAG_*).
- * @param slot_count Number of slots supported.
+ * @dvb_adapter: DVB adapter to attach the new CA device to.
+ * @ca: The dvb_ca instance.
+ * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*).
+ * @slot_count: Number of slots supported.
  *
  * @return 0 on success, nonzero on failure
  */
 extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, struct dvb_ca_en50221* ca, int flags, int slot_count);
 
 /**
- * Release a DVB CA device.
+ * dvb_ca_en50221_release - Release a DVB CA device.
  *
- * @param ca The associated dvb_ca instance.
+ * @ca: The associated dvb_ca instance.
  */
 extern void dvb_ca_en50221_release(struct dvb_ca_en50221* ca);
 
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 842b9c8..c38ef1a 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -81,7 +81,6 @@
 #define FESTATE_SEARCHING_SLOW (FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_SLOW)
 #define FESTATE_LOSTLOCK (FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW)
 
-#define FE_ALGO_HW		1
 /*
  * FESTATE_IDLE. No tuning parameters have been supplied and the loop is idling.
  * FESTATE_RETUNE. Parameters have been supplied, but we have not yet performed the first tune.
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 4816947..97661b2 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -48,6 +48,15 @@
  */
 #define MAX_DELSYS	8
 
+/**
+ * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
+ *
+ * @min_delay_ms:	minimum delay for tuning, in ms
+ * @step_size:		step size between two consecutive frequencies
+ * @max_drift:		maximum drift
+ *
+ * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite
+ */
 struct dvb_frontend_tune_settings {
 	int min_delay_ms;
 	int step_size;
@@ -56,6 +65,20 @@
 
 struct dvb_frontend;
 
+/**
+ * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
+ *
+ * @name:		name of the Frontend
+ * @frequency_min:	minimal frequency supported
+ * @frequency_max:	maximum frequency supported
+ * @frequency_step:	frequency step
+ * @bandwidth_min:	minimal frontend bandwidth supported
+ * @bandwidth_max:	maximum frontend bandwidth supported
+ * @bandwidth_step:	frontend bandwidth step
+ *
+ * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
+ * satellite.
+ */
 struct dvb_tuner_info {
 	char name[128];
 
@@ -68,6 +91,20 @@
 	u32 bandwidth_step;
 };
 
+/**
+ * struct analog_parameters - Parameters to tune into an analog/radio channel
+ *
+ * @frequency:	Frequency used by analog TV tuner (either in 62.5 kHz step,
+ * 		for TV, or 62.5 Hz for radio)
+ * @mode:	Tuner mode, as defined on enum v4l2_tuner_type
+ * @audmode:	Audio mode as defined for the rxsubchans field at videodev2.h,
+ * 		e. g. V4L2_TUNER_MODE_*
+ * @std:	TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_*
+ *
+ * Hybrid tuners should be supported by both V4L2 and DVB APIs. This
+ * struct contains the data that are used by the V4L2 side. To avoid
+ * dependencies from V4L2 headers, all enums here are declared as integers.
+ */
 struct analog_parameters {
 	unsigned int frequency;
 	unsigned int mode;
@@ -75,42 +112,6 @@
 	u64 std;
 };
 
-enum dvbfe_modcod {
-	DVBFE_MODCOD_DUMMY_PLFRAME	= 0,
-	DVBFE_MODCOD_QPSK_1_4,
-	DVBFE_MODCOD_QPSK_1_3,
-	DVBFE_MODCOD_QPSK_2_5,
-	DVBFE_MODCOD_QPSK_1_2,
-	DVBFE_MODCOD_QPSK_3_5,
-	DVBFE_MODCOD_QPSK_2_3,
-	DVBFE_MODCOD_QPSK_3_4,
-	DVBFE_MODCOD_QPSK_4_5,
-	DVBFE_MODCOD_QPSK_5_6,
-	DVBFE_MODCOD_QPSK_8_9,
-	DVBFE_MODCOD_QPSK_9_10,
-	DVBFE_MODCOD_8PSK_3_5,
-	DVBFE_MODCOD_8PSK_2_3,
-	DVBFE_MODCOD_8PSK_3_4,
-	DVBFE_MODCOD_8PSK_5_6,
-	DVBFE_MODCOD_8PSK_8_9,
-	DVBFE_MODCOD_8PSK_9_10,
-	DVBFE_MODCOD_16APSK_2_3,
-	DVBFE_MODCOD_16APSK_3_4,
-	DVBFE_MODCOD_16APSK_4_5,
-	DVBFE_MODCOD_16APSK_5_6,
-	DVBFE_MODCOD_16APSK_8_9,
-	DVBFE_MODCOD_16APSK_9_10,
-	DVBFE_MODCOD_32APSK_3_4,
-	DVBFE_MODCOD_32APSK_4_5,
-	DVBFE_MODCOD_32APSK_5_6,
-	DVBFE_MODCOD_32APSK_8_9,
-	DVBFE_MODCOD_32APSK_9_10,
-	DVBFE_MODCOD_RESERVED_1,
-	DVBFE_MODCOD_BPSK_1_3,
-	DVBFE_MODCOD_BPSK_1_4,
-	DVBFE_MODCOD_RESERVED_2
-};
-
 enum tuner_param {
 	DVBFE_TUNER_FREQUENCY		= (1 <<  0),
 	DVBFE_TUNER_TUNERSTEP		= (1 <<  1),
@@ -121,30 +122,28 @@
 	DVBFE_TUNER_DUMMY		= (1 << 31)
 };
 
-/*
- * ALGO_HW: (Hardware Algorithm)
- * ----------------------------------------------------------------
- * Devices that support this algorithm do everything in hardware
- * and no software support is needed to handle them.
- * Requesting these devices to LOCK is the only thing required,
- * device is supposed to do everything in the hardware.
+/**
+ * enum dvbfe_algo - defines the algorithm used to tune into a channel
  *
- * ALGO_SW: (Software Algorithm)
- * ----------------------------------------------------------------
+ * @DVBFE_ALGO_HW: Hardware Algorithm -
+ *	Devices that support this algorithm do everything in hardware
+ *	and no software support is needed to handle them.
+ *	Requesting these devices to LOCK is the only thing required,
+ *	device is supposed to do everything in the hardware.
+ *
+ * @DVBFE_ALGO_SW: Software Algorithm -
  * These are dumb devices, that require software to do everything
  *
- * ALGO_CUSTOM: (Customizable Agorithm)
- * ----------------------------------------------------------------
- * Devices having this algorithm can be customized to have specific
- * algorithms in the frontend driver, rather than simply doing a
- * software zig-zag. In this case the zigzag maybe hardware assisted
- * or it maybe completely done in hardware. In all cases, usage of
- * this algorithm, in conjunction with the search and track
- * callbacks, utilizes the driver specific algorithm.
+ * @DVBFE_ALGO_CUSTOM: Customizable Agorithm -
+ *	Devices having this algorithm can be customized to have specific
+ *	algorithms in the frontend driver, rather than simply doing a
+ *	software zig-zag. In this case the zigzag maybe hardware assisted
+ *	or it maybe completely done in hardware. In all cases, usage of
+ *	this algorithm, in conjunction with the search and track
+ *	callbacks, utilizes the driver specific algorithm.
  *
- * ALGO_RECOVERY: (Recovery Algorithm)
- * ----------------------------------------------------------------
- * These devices have AUTO recovery capabilities from LOCK failure
+ * @DVBFE_ALGO_RECOVERY: Recovery Algorithm -
+ *	These devices have AUTO recovery capabilities from LOCK failure
  */
 enum dvbfe_algo {
 	DVBFE_ALGO_HW			= (1 <<  0),
@@ -162,27 +161,27 @@
 	u32 refclock;
 };
 
-/*
- * search callback possible return status
+/**
+ * enum dvbfe_search - search callback possible return status
  *
- * DVBFE_ALGO_SEARCH_SUCCESS
- * The frontend search algorithm completed and returned successfully
+ * @DVBFE_ALGO_SEARCH_SUCCESS:
+ *	The frontend search algorithm completed and returned successfully
  *
- * DVBFE_ALGO_SEARCH_ASLEEP
- * The frontend search algorithm is sleeping
+ * @DVBFE_ALGO_SEARCH_ASLEEP:
+ *	The frontend search algorithm is sleeping
  *
- * DVBFE_ALGO_SEARCH_FAILED
- * The frontend search for a signal failed
+ * @DVBFE_ALGO_SEARCH_FAILED:
+ *	The frontend search for a signal failed
  *
- * DVBFE_ALGO_SEARCH_INVALID
- * The frontend search algorith was probably supplied with invalid
- * parameters and the search is an invalid one
+ * @DVBFE_ALGO_SEARCH_INVALID:
+ *	The frontend search algorith was probably supplied with invalid
+ *	parameters and the search is an invalid one
  *
- * DVBFE_ALGO_SEARCH_ERROR
- * The frontend search algorithm failed due to some error
+ * @DVBFE_ALGO_SEARCH_ERROR:
+ *	The frontend search algorithm failed due to some error
  *
- * DVBFE_ALGO_SEARCH_AGAIN
- * The frontend search algorithm was requested to search again
+ * @DVBFE_ALGO_SEARCH_AGAIN:
+ *	The frontend search algorithm was requested to search again
  */
 enum dvbfe_search {
 	DVBFE_ALGO_SEARCH_SUCCESS	= (1 <<  0),
@@ -193,7 +192,56 @@
 	DVBFE_ALGO_SEARCH_ERROR		= (1 << 31),
 };
 
-
+/**
+ * struct dvb_tuner_ops - Tuner information and callbacks
+ *
+ * @info:		embedded struct dvb_tuner_info with tuner properties
+ * @release:		callback function called when frontend is dettached.
+ *			drivers should free any allocated memory.
+ * @init:		callback function used to initialize the tuner device.
+ * @sleep:		callback function used to put the tuner to sleep.
+ * @suspend:		callback function used to inform that the Kernel will
+ *			suspend.
+ * @resume:		callback function used to inform that the Kernel is
+ *			resuming from suspend.
+ * @set_params:		callback function used to inform the tuner to tune
+ *			into a digital TV channel. The properties to be used
+ *			are stored at @dvb_frontend.dtv_property_cache;. The
+ *			tuner demod can change the parameters to reflect the
+ *			changes needed for the channel to be tuned, and
+ *			update statistics.
+ * @set_analog_params:	callback function used to tune into an analog TV
+ *			channel on hybrid tuners. It passes @analog_parameters;
+ *			to the driver.
+ * @calc_regs:		callback function used to pass register data settings
+ *			for simple tuners.
+ * @set_config:		callback function used to send some tuner-specific
+ *			parameters.
+ * @get_frequency:	get the actual tuned frequency
+ * @get_bandwidth:	get the bandwitdh used by the low pass filters
+ * @get_if_frequency:	get the Intermediate Frequency, in Hz. For baseband,
+ * 			should return 0.
+ * @get_status:		returns the frontend lock status
+ * @get_rf_strength:	returns the RF signal strengh. Used mostly to support
+ *			analog TV and radio. Digital TV should report, instead,
+ *			via DVBv5 API (@dvb_frontend.dtv_property_cache;).
+ * @get_afc:		Used only by analog TV core. Reports the frequency
+ *			drift due to AFC.
+ * @set_frequency:	Set a new frequency. Please notice that using
+ *			set_params is preferred.
+ * @set_bandwidth:	Set a new frequency. Please notice that using
+ *			set_params is preferred.
+ * @set_state:		callback function used on some legacy drivers that
+ * 			don't implement set_params in order to set properties.
+ * 			Shouldn't be used on new drivers.
+ * @get_state:		callback function used to get properties by some
+ * 			legacy drivers that don't implement set_params.
+ * 			Shouldn't be used on new drivers.
+ *
+ * NOTE: frequencies used on get_frequency and set_frequency are in Hz for
+ * terrestrial/cable or kHz for satellite.
+ *
+ */
 struct dvb_tuner_ops {
 
 	struct dvb_tuner_info info;
@@ -237,10 +285,37 @@
 	int (*get_state)(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state);
 };
 
+/**
+ * struct analog_demod_info - Information struct for analog TV part of the demod
+ *
+ * @name:	Name of the analog TV demodulator
+ */
 struct analog_demod_info {
 	char *name;
 };
 
+/**
+ * struct analog_demod_ops  - Demodulation information and callbacks for
+ *			      analog TV and radio
+ *
+ * @info:		pointer to struct analog_demod_info
+ * @set_params:		callback function used to inform the demod to set the
+ *			demodulator parameters needed to decode an analog or
+ *			radio channel. The properties are passed via
+ *			struct @analog_params;.
+ * @has_signal:		returns 0xffff if has signal, or 0 if it doesn't.
+ * @get_afc:		Used only by analog TV core. Reports the frequency
+ *			drift due to AFC.
+ * @tuner_status:	callback function that returns tuner status bits, e. g.
+ *			TUNER_STATUS_LOCKED and TUNER_STATUS_STEREO.
+ * @standby:		set the tuner to standby mode.
+ * @release:		callback function called when frontend is dettached.
+ *			drivers should free any allocated memory.
+ * @i2c_gate_ctrl:	controls the I2C gate. Newer drivers should use I2C
+ *			mux support instead.
+ * @set_config:		callback function used to send some tuner-specific
+ *			parameters.
+ */
 struct analog_demod_ops {
 
 	struct analog_demod_info info;
@@ -260,6 +335,87 @@
 
 struct dtv_frontend_properties;
 
+
+/**
+ * struct dvb_frontend_ops - Demodulation information and callbacks for
+ *			      ditialt TV
+ *
+ * @info:		embedded struct dvb_tuner_info with tuner properties
+ * @delsys:		Delivery systems supported by the frontend
+ * @release:		callback function called when frontend is dettached.
+ *			drivers should free any allocated memory.
+ * @release_sec:	callback function requesting that the Satelite Equipment
+ *			Control (SEC) driver to release and free any memory
+ *			allocated by the driver.
+ * @init:		callback function used to initialize the tuner device.
+ * @sleep:		callback function used to put the tuner to sleep.
+ * @write:		callback function used by some demod legacy drivers to
+ *			allow other drivers to write data into their registers.
+ *			Should not be used on new drivers.
+ * @tune:		callback function used by demod drivers that use
+ *			@DVBFE_ALGO_HW; to tune into a frequency.
+ * @get_frontend_algo:	returns the desired hardware algorithm.
+ * @set_frontend:	callback function used to inform the demod to set the
+ *			parameters for demodulating a digital TV channel.
+ *			The properties to be used are stored at
+ *			@dvb_frontend.dtv_property_cache;. The demod can change
+ *			the parameters to reflect the changes needed for the
+ *			channel to be decoded, and update statistics.
+ * @get_tune_settings:	callback function
+ * @get_frontend:	callback function used to inform the parameters
+ *			actuall in use. The properties to be used are stored at
+ *			@dvb_frontend.dtv_property_cache; and update
+ *			statistics. Please notice that it should not return
+ *			an error code if the statistics are not available
+ *			because the demog is not locked.
+ * @read_status:	returns the locking status of the frontend.
+ * @read_ber:		legacy callback function to return the bit error rate.
+ *			Newer drivers should provide such info via DVBv5 API,
+ *			e. g. @set_frontend;/@get_frontend;, implementing this
+ *			callback only if DVBv3 API compatibility is wanted.
+ * @read_signal_strength: legacy callback function to return the signal
+ *			strength. Newer drivers should provide such info via
+ *			DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ *			implementing this callback only if DVBv3 API
+ *			compatibility is wanted.
+ * @read_snr:		legacy callback function to return the Signal/Noise
+ * 			rate. Newer drivers should provide such info via
+ *			DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ *			implementing this callback only if DVBv3 API
+ *			compatibility is wanted.
+ * @read_ucblocks:	legacy callback function to return the Uncorrected Error
+ *			Blocks. Newer drivers should provide such info via
+ *			DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ *			implementing this callback only if DVBv3 API
+ *			compatibility is wanted.
+ * @diseqc_reset_overload: callback function to implement the
+ *			FE_DISEQC_RESET_OVERLOAD ioctl (only Satellite)
+ * @diseqc_send_master_cmd: callback function to implement the
+ *			FE_DISEQC_SEND_MASTER_CMD ioctl (only Satellite).
+ * @diseqc_recv_slave_reply: callback function to implement the
+ *			FE_DISEQC_RECV_SLAVE_REPLY ioctl (only Satellite)
+ * @diseqc_send_burst:	callback function to implement the
+ *			FE_DISEQC_SEND_BURST ioctl (only Satellite).
+ * @set_tone:		callback function to implement the
+ *			FE_SET_TONE ioctl (only Satellite).
+ * @set_voltage:	callback function to implement the
+ *			FE_SET_VOLTAGE ioctl (only Satellite).
+ * @enable_high_lnb_voltage: callback function to implement the
+ *			FE_ENABLE_HIGH_LNB_VOLTAGE ioctl (only Satellite).
+ * @dishnetwork_send_legacy_command: callback function to implement the
+ *			FE_DISHNETWORK_SEND_LEGACY_CMD ioctl (only Satellite).
+ * @i2c_gate_ctrl:	controls the I2C gate. Newer drivers should use I2C
+ *			mux support instead.
+ * @ts_bus_ctrl:	callback function used to take control of the TS bus.
+ * @set_lna:		callback function to power on/off/auto the LNA.
+ * @search:		callback function used on some custom algo search algos.
+ * @tuner_ops:		pointer to struct dvb_tuner_ops
+ * @analog_ops:		pointer to struct analog_demod_ops
+ * @set_property:	callback function to allow the frontend to validade
+ *			incoming properties. Should not be used on new drivers.
+ * @get_property:	callback function to allow the frontend to override
+ *			outcoming properties. Should not be used on new drivers.
+ */
 struct dvb_frontend_ops {
 
 	struct dvb_frontend_info info;
@@ -280,6 +436,7 @@
 		    unsigned int mode_flags,
 		    unsigned int *delay,
 		    enum fe_status *status);
+
 	/* get frontend tuning algorithm from the module */
 	enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe);
 
@@ -324,6 +481,7 @@
 #ifdef __DVB_CORE__
 #define MAX_EVENT 8
 
+/* Used only internally at dvb_frontend.c */
 struct dvb_fe_events {
 	struct dvb_frontend_event events[MAX_EVENT];
 	int			  eventw;
@@ -334,13 +492,83 @@
 };
 #endif
 
+/**
+ * struct dtv_frontend_properties - contains a list of properties that are
+ *				    specific to a digital TV standard.
+ *
+ * @frequency:		frequency in Hz for terrestrial/cable or in kHz for
+ *			Satellite
+ * @modulation:		Frontend modulation type
+ * @voltage:		SEC voltage (only Satellite)
+ * @sectone:		SEC tone mode (only Satellite)
+ * @inversion:		Spectral inversion
+ * @fec_inner:		Forward error correction inner Code Rate
+ * @transmission_mode:	Transmission Mode
+ * @bandwidth_hz:	Bandwidth, in Hz. A zero value means that userspace
+ * 			wants to autodetect.
+ * @guard_interval:	Guard Interval
+ * @hierarchy:		Hierarchy
+ * @symbol_rate:	Symbol Rate
+ * @code_rate_HP:	high priority stream code rate
+ * @code_rate_LP:	low priority stream code rate
+ * @pilot:		Enable/disable/autodetect pilot tones
+ * @rolloff:		Rolloff factor (alpha)
+ * @delivery_system:	FE delivery system (e. g. digital TV standard)
+ * @interleaving:	interleaving
+ * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard)
+ * @isdbt_sb_mode:	ISDB-T Sound Broadcast (SB) mode (only ISDB standard)
+ * @isdbt_sb_subchannel:	ISDB-T SB subchannel (only ISDB standard)
+ * @isdbt_sb_segment_idx:	ISDB-T SB segment index (only ISDB standard)
+ * @isdbt_sb_segment_count:	ISDB-T SB segment count (only ISDB standard)
+ * @isdbt_layer_enabled:	ISDB Layer enabled (only ISDB standard)
+ * @layer:		ISDB per-layer data (only ISDB standard)
+ * @layer.segment_count: Segment Count;
+ * @layer.fec:		per layer code rate;
+ * @layer.modulation:	per layer modulation;
+ * @layer.interleaving:	 per layer interleaving.
+ * @stream_id:		If different than zero, enable substream filtering, if
+ *			hardware supports (DVB-S2 and DVB-T2).
+ * @atscmh_fic_ver:	Version number of the FIC (Fast Information Channel)
+ *			signaling data (only ATSC-M/H)
+ * @atscmh_parade_id:	Parade identification number (only ATSC-M/H)
+ * @atscmh_nog:		Number of MH groups per MH subframe for a designated
+ *			parade (only ATSC-M/H)
+ * @atscmh_tnog:	Total number of MH groups including all MH groups
+ *			belonging to all MH parades in one MH subframe
+ *			(only ATSC-M/H)
+ * @atscmh_sgn:		Start group number (only ATSC-M/H)
+ * @atscmh_prc:		Parade repetition cycle (only ATSC-M/H)
+ * @atscmh_rs_frame_mode:	Reed Solomon (RS) frame mode (only ATSC-M/H)
+ * @atscmh_rs_frame_ensemble:	RS frame ensemble (only ATSC-M/H)
+ * @atscmh_rs_code_mode_pri:	RS code mode pri (only ATSC-M/H)
+ * @atscmh_rs_code_mode_sec:	RS code mode sec (only ATSC-M/H)
+ * @atscmh_sccc_block_mode:	Series Concatenated Convolutional Code (SCCC)
+ *				Block Mode (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_a:	SCCC code mode A (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_b:	SCCC code mode B (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_c:	SCCC code mode C (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_d:	SCCC code mode D (only ATSC-M/H)
+ * @lna:		Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA)
+ * @strength:		DVBv5 API statistics: Signal Strength
+ * @cnr:		DVBv5 API statistics: Signal to Noise ratio of the
+ * 			(main) carrier
+ * @pre_bit_error:	DVBv5 API statistics: pre-Viterbi bit error count
+ * @pre_bit_count:	DVBv5 API statistics: pre-Viterbi bit count
+ * @post_bit_error:	DVBv5 API statistics: post-Viterbi bit error count
+ * @post_bit_count:	DVBv5 API statistics: post-Viterbi bit count
+ * @block_error:	DVBv5 API statistics: block error count
+ * @block_count:	DVBv5 API statistics: block count
+ *
+ * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are
+ * calculated on userspace.
+ *
+ * Only a subset of the properties are needed for a given delivery system.
+ * For more info, consult the media_api.html with the documentation of the
+ * Userspace API.
+ */
 struct dtv_frontend_properties {
-
-	/* Cache State */
-	u32			state;
-
 	u32			frequency;
-	enum fe_modulation		modulation;
+	enum fe_modulation	modulation;
 
 	enum fe_sec_voltage	voltage;
 	enum fe_sec_tone_mode	sectone;
@@ -407,6 +635,11 @@
 	struct dtv_fe_stats	post_bit_count;
 	struct dtv_fe_stats	block_error;
 	struct dtv_fe_stats	block_count;
+
+	/* private: */
+	/* Cache State */
+	u32			state;
+
 };
 
 #define DVB_FE_NO_EXIT  0
@@ -414,6 +647,25 @@
 #define DVB_FE_DEVICE_REMOVED   2
 #define DVB_FE_DEVICE_RESUME    3
 
+/**
+ * struct dvb_frontend - Frontend structure to be used on drivers.
+ *
+ * @ops:		embedded struct dvb_frontend_ops
+ * @dvb:		pointer to struct dvb_adapter
+ * @demodulator_priv:	demod private data
+ * @tuner_priv:		tuner private data
+ * @frontend_priv:	frontend private data
+ * @sec_priv:		SEC private data
+ * @analog_demod_priv:	Analog demod private data
+ * @dtv_property_cache:	embedded struct dtv_frontend_properties
+ * @callback:		callback function used on some drivers to call
+ *			either the tuner or the demodulator.
+ * @id:			Frontend ID
+ * @exit:		Used to inform the DVB core that the frontend
+ *			thread should exit (usually, means that the hardware
+ *			got disconnected.
+ */
+
 struct dvb_frontend {
 	struct dvb_frontend_ops ops;
 	struct dvb_adapter *dvb;
diff --git a/drivers/media/dvb-core/dvb_math.h b/drivers/media/dvb-core/dvb_math.h
index aecc867..34dc1df 100644
--- a/drivers/media/dvb-core/dvb_math.h
+++ b/drivers/media/dvb-core/dvb_math.h
@@ -25,33 +25,38 @@
 #include <linux/types.h>
 
 /**
- * computes log2 of a value; the result is shifted left by 24 bits
+ * cintlog2 - computes log2 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
  *
  * to use rational values you can use the following method:
  *   intlog2(value) = intlog2(value * 2^x) - x * 2^24
  *
- * example: intlog2(8) will give 3 << 24 = 3 * 2^24
- * example: intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
- * example: intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
+ * Some usecase examples:
+ *	intlog2(8) will give 3 << 24 = 3 * 2^24
+ *	intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
+ *	intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
  *
- * @param value The value (must be != 0)
- * @return log2(value) * 2^24
+ *
+ * return: log2(value) * 2^24
  */
 extern unsigned int intlog2(u32 value);
 
 /**
- * computes log10 of a value; the result is shifted left by 24 bits
+ * intlog10 - computes log10 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
  *
  * to use rational values you can use the following method:
  *   intlog10(value) = intlog10(value * 10^x) - x * 2^24
  *
- * example: intlog10(1000) will give 3 << 24 = 3 * 2^24
+ * An usecase example:
+ *	intlog10(1000) will give 3 << 24 = 3 * 2^24
  *   due to the implementation intlog10(1000) might be not exactly 3 * 2^24
  *
  * look at intlog2 for similar examples
  *
- * @param value The value (must be != 0)
- * @return log10(value) * 2^24
+ * return: log10(value) * 2^24
  */
 extern unsigned int intlog10(u32 value);
 
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index a694fb1..b81e026 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -709,7 +709,7 @@
 					if (!priv->ule_dbit) {
 						 /* dest_addr buffer is only valid if priv->ule_dbit == 0 */
 						memcpy(ethh->h_dest, dest_addr, ETH_ALEN);
-						memset(ethh->h_source, 0, ETH_ALEN);
+						eth_zero_addr(ethh->h_source);
 					}
 					else /* zeroize source and dest */
 						memset( ethh, 0, ETH_ALEN*2 );
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index 9e1e11b..3ebc2d3 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -45,33 +45,33 @@
 
 
 /*
-** Notes:
-** ------
-** (1) For performance reasons read and write routines don't check buffer sizes
-**     and/or number of bytes free/available. This has to be done before these
-**     routines are called. For example:
-**
-**     *** write <buflen> bytes ***
-**     free = dvb_ringbuffer_free(rbuf);
-**     if (free >= buflen)
-**         count = dvb_ringbuffer_write(rbuf, buffer, buflen);
-**     else
-**         ...
-**
-**     *** read min. 1000, max. <bufsize> bytes ***
-**     avail = dvb_ringbuffer_avail(rbuf);
-**     if (avail >= 1000)
-**         count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
-**     else
-**         ...
-**
-** (2) If there is exactly one reader and one writer, there is no need
-**     to lock read or write operations.
-**     Two or more readers must be locked against each other.
-**     Flushing the buffer counts as a read operation.
-**     Resetting the buffer counts as a read and write operation.
-**     Two or more writers must be locked against each other.
-*/
+ * Notes:
+ * ------
+ * (1) For performance reasons read and write routines don't check buffer sizes
+ *     and/or number of bytes free/available. This has to be done before these
+ *     routines are called. For example:
+ *
+ *     *** write @buflen: bytes ***
+ *     free = dvb_ringbuffer_free(rbuf);
+ *     if (free >= buflen)
+ *         count = dvb_ringbuffer_write(rbuf, buffer, buflen);
+ *     else
+ *         ...
+ *
+ *     *** read min. 1000, max. @bufsize: bytes ***
+ *     avail = dvb_ringbuffer_avail(rbuf);
+ *     if (avail >= 1000)
+ *         count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
+ *     else
+ *         ...
+ *
+ * (2) If there is exactly one reader and one writer, there is no need
+ *     to lock read or write operations.
+ *     Two or more readers must be locked against each other.
+ *     Flushing the buffer counts as a read operation.
+ *     Resetting the buffer counts as a read and write operation.
+ *     Two or more writers must be locked against each other.
+ */
 
 /* initialize ring buffer, lock and queue */
 extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len);
@@ -87,9 +87,9 @@
 
 
 /*
-** Reset the read and write pointers to zero and flush the buffer
-** This counts as a read and write operation
-*/
+ * Reset the read and write pointers to zero and flush the buffer
+ * This counts as a read and write operation
+ */
 extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf);
 
 
@@ -101,19 +101,19 @@
 /* flush buffer protected by spinlock and wake-up waiting task(s) */
 extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
 
-/* peek at byte <offs> in the buffer */
+/* peek at byte @offs: in the buffer */
 #define DVB_RINGBUFFER_PEEK(rbuf,offs)	\
 			(rbuf)->data[((rbuf)->pread+(offs))%(rbuf)->size]
 
-/* advance read ptr by <num> bytes */
+/* advance read ptr by @num: bytes */
 #define DVB_RINGBUFFER_SKIP(rbuf,num)	\
 			(rbuf)->pread=((rbuf)->pread+(num))%(rbuf)->size
 
 /*
-** read <len> bytes from ring buffer into <buf>
-** <usermem> specifies whether <buf> resides in user space
-** returns number of bytes transferred or -EFAULT
-*/
+ * read @len: bytes from ring buffer into @buf:
+ * @usermem: specifies whether @buf: resides in user space
+ * returns number of bytes transferred or -EFAULT
+ */
 extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf,
 				   u8 __user *buf, size_t len);
 extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
@@ -127,9 +127,9 @@
 			{ (rbuf)->data[(rbuf)->pwrite]=(byte); \
 			(rbuf)->pwrite=((rbuf)->pwrite+1)%(rbuf)->size; }
 /*
-** write <len> bytes to ring buffer
-** <usermem> specifies whether <buf> resides in user space
-** returns number of bytes transferred or -EFAULT
+ * write @len: bytes to ring buffer
+ * @usermem: specifies whether @buf: resides in user space
+ * returns number of bytes transferred or -EFAULT
 */
 extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
 				    size_t len);
@@ -138,48 +138,63 @@
 
 
 /**
- * Write a packet into the ringbuffer.
+ * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer.
  *
- * <rbuf> Ringbuffer to write to.
- * <buf> Buffer to write.
- * <len> Length of buffer (currently limited to 65535 bytes max).
+ * @rbuf: Ringbuffer to write to.
+ * @buf: Buffer to write.
+ * @len: Length of buffer (currently limited to 65535 bytes max).
  * returns Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
  */
 extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
 					size_t len);
 
 /**
- * Read from a packet in the ringbuffer. Note: unlike dvb_ringbuffer_read(), this
- * does NOT update the read pointer in the ringbuffer. You must use
- * dvb_ringbuffer_pkt_dispose() to mark a packet as no longer required.
+ * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer.
+ * Note: unlike dvb_ringbuffer_read(), this does NOT update the read pointer
+ * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a
+ * packet as no longer required.
  *
- * <rbuf> Ringbuffer concerned.
- * <idx> Packet index as returned by dvb_ringbuffer_pkt_next().
- * <offset> Offset into packet to read from.
- * <buf> Destination buffer for data.
- * <len> Size of destination buffer.
- * <usermem> Set to 1 if <buf> is in userspace.
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @offset: Offset into packet to read from.
+ * @buf: Destination buffer for data.
+ * @len: Size of destination buffer.
+ *
  * returns Number of bytes read, or -EFAULT.
  */
 extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
 				       int offset, u8 __user *buf, size_t len);
+
+/**
+ * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer.
+ * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer
+ * in the ringbuffer.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @offset: Offset into packet to read from.
+ * @buf: Destination buffer for data.
+ * @len: Size of destination buffer.
+ *
+ * returns Number of bytes read, or -EFAULT.
+ */
 extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
 				       int offset, u8 *buf, size_t len);
 
 /**
- * Dispose of a packet in the ring buffer.
+ * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer.
  *
- * <rbuf> Ring buffer concerned.
- * <idx> Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @rbuf: Ring buffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
  */
 extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx);
 
 /**
- * Get the index of the next packet in a ringbuffer.
+ * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer.
  *
- * <rbuf> Ringbuffer concerned.
- * <idx> Previous packet index, or -1 to return the first packet index.
- * <pktlen> On success, will be updated to contain the length of the packet in bytes.
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Previous packet index, or -1 to return the first packet index.
+ * @pktlen: On success, will be updated to contain the length of the packet in bytes.
  * returns Packet index (if >=0), or -1 if no packets available.
  */
 extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen);
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 12629b8..c61a4f0 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -57,6 +57,25 @@
 
 struct dvb_frontend;
 
+/**
+ * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API
+ *
+ * @num:		Number of the adapter
+ * @list_head:		List with the DVB adapters
+ * @device_list:	List with the DVB devices
+ * @name:		Name of the adapter
+ * @proposed_mac:	proposed MAC address for the adapter
+ * @priv:		private data
+ * @device:		pointer to struct device
+ * @module:		pointer to struct module
+ * @mfe_shared:		mfe shared: indicates mutually exclusive frontends
+ *			Thie usage of this flag is currently deprecated
+ * @mfe_dvbdev:		Frontend device in use, in the case of MFE
+ * @mfe_lock:		Lock to prevent using the other frontends when MFE is
+ *			used.
+ * @mdev:		pointer to struct media_device, used when the media
+ *			controller is used.
+ */
 struct dvb_adapter {
 	int num;
 	struct list_head list_head;
@@ -78,7 +97,34 @@
 #endif
 };
 
-
+/**
+ * struct dvb_device - represents a DVB device node
+ *
+ * @list_head:	List head with all DVB devices
+ * @fops:	pointer to struct file_operations
+ * @adapter:	pointer to the adapter that holds this device node
+ * @type:	type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
+ *		DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
+ * @minor:	devnode minor number. Major number is always DVB_MAJOR.
+ * @id:		device ID number, inside the adapter
+ * @readers:	Initialized by the caller. Each call to open() in Read Only mode
+ *		decreases this counter by one.
+ * @writers:	Initialized by the caller. Each call to open() in Read/Write
+ *		mode decreases this counter by one.
+ * @users:	Initialized by the caller. Each call to open() in any mode
+ *		decreases this counter by one.
+ * @wait_queue:	wait queue, used to wait for certain events inside one of
+ *		the DVB API callers
+ * @kernel_ioctl: callback function used to handle ioctl calls from userspace.
+ * @name:	Name to be used for the device at the Media Controller
+ * @entity:	pointer to struct media_entity associated with the device node
+ * @pads:	pointer to struct media_pad associated with @entity;
+ * @priv:	private data
+ *
+ * This structure is used by the DVB core (frontend, CA, net, demux) in
+ * order to create the device nodes. Usually, driver should not initialize
+ * this struct diretly.
+ */
 struct dvb_device {
 	struct list_head list_head;
 	const struct file_operations *fops;
@@ -109,19 +155,55 @@
 	void *priv;
 };
 
+/**
+ * dvb_register_adapter - Registers a new DVB adapter
+ *
+ * @adap:	pointer to struct dvb_adapter
+ * @name:	Adapter's name
+ * @module:	initialized with THIS_MODULE at the caller
+ * @device:	pointer to struct device that corresponds to the device driver
+ * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter;
+ * 		to select among them. Typically, initialized with:
+ *		DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums)
+ */
+int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
+			 struct module *module, struct device *device,
+			 short *adapter_nums);
 
-extern int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
-				struct module *module, struct device *device,
-				short *adapter_nums);
-extern int dvb_unregister_adapter (struct dvb_adapter *adap);
+/**
+ * dvb_unregister_adapter - Unregisters a DVB adapter
+ *
+ * @adap:	pointer to struct dvb_adapter
+ */
+int dvb_unregister_adapter(struct dvb_adapter *adap);
 
-extern int dvb_register_device (struct dvb_adapter *adap,
-				struct dvb_device **pdvbdev,
-				const struct dvb_device *template,
-				void *priv,
-				int type);
+/**
+ * dvb_register_device - Registers a new DVB device
+ *
+ * @adap:	pointer to struct dvb_adapter
+ * @pdvbdev:	pointer to the place where the new struct dvb_device will be
+ *		stored
+ * @template:	Template used to create &pdvbdev;
+ * @device:	pointer to struct device that corresponds to the device driver
+ * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter;
+ * 		to select among them. Typically, initialized with:
+ *		DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums)
+ * @priv:	private data
+ * @type:	type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
+ *		DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
+ */
+int dvb_register_device(struct dvb_adapter *adap,
+			struct dvb_device **pdvbdev,
+			const struct dvb_device *template,
+			void *priv,
+			int type);
 
-extern void dvb_unregister_device (struct dvb_device *dvbdev);
+/**
+ * dvb_unregister_device - Unregisters a DVB device
+ *
+ * @dvbdev:	pointer to struct dvb_device
+ */
+void dvb_unregister_device(struct dvb_device *dvbdev);
 
 #ifdef CONFIG_MEDIA_CONTROLLER_DVB
 void dvb_create_media_graph(struct dvb_adapter *adap);
@@ -136,17 +218,17 @@
 #define dvb_register_media_controller(a, b) {}
 #endif
 
-extern int dvb_generic_open (struct inode *inode, struct file *file);
-extern int dvb_generic_release (struct inode *inode, struct file *file);
-extern long dvb_generic_ioctl (struct file *file,
+int dvb_generic_open (struct inode *inode, struct file *file);
+int dvb_generic_release (struct inode *inode, struct file *file);
+long dvb_generic_ioctl (struct file *file,
 			      unsigned int cmd, unsigned long arg);
 
 /* we don't mess with video_usercopy() any more,
 we simply define out own dvb_usercopy(), which will hopefully become
 generic_usercopy()  someday... */
 
-extern int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
-			    int (*func)(struct file *file, unsigned int cmd, void *arg));
+int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+		 int (*func)(struct file *file, unsigned int cmd, void *arg));
 
 /** generic DVB attach function. */
 #ifdef CONFIG_MEDIA_ATTACH
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 5ab90f3..292c947 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -1,5 +1,5 @@
 menu "Customise DVB Frontends"
-	visible if !MEDIA_SUBDRV_AUTOSELECT
+	visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
 
 comment "Multistandard (satellite) frontends"
 	depends on DVB_CORE
@@ -264,6 +264,7 @@
 config DVB_TDA10071
 	tristate "NXP TDA10071"
 	depends on DVB_CORE && I2C
+	select REGMAP
 	default m if !MEDIA_SUBDRV_AUTOSELECT
 	help
 	  Say Y when you want to support this frontend.
@@ -450,6 +451,13 @@
 	help
 	  Say Y when you want to support this frontend.
 
+config DVB_CXD2841ER
+	tristate "Sony CXD2841ER"
+	depends on DVB_CORE && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  Say Y when you want to support this frontend.
+
 config DVB_RTL2830
 	tristate "Realtek RTL2830 DVB-T"
 	depends on DVB_CORE && I2C && I2C_MUX
@@ -712,6 +720,14 @@
 
 source "drivers/media/dvb-frontends/drx39xyj/Kconfig"
 
+config DVB_LNBH25
+	tristate "LNBH25 SEC controller"
+	depends on DVB_CORE && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  An SEC control chip.
+	  Say Y when you want to support this chip.
+
 config DVB_LNBP21
 	tristate "LNBP21/LNBH24 SEC controllers"
 	depends on DVB_CORE && I2C
@@ -815,6 +831,20 @@
 	depends on DVB_CORE && I2C
 	default m if !MEDIA_SUBDRV_AUTOSELECT
 
+config DVB_HORUS3A
+	tristate "Sony Horus3A tuner"
+	depends on DVB_CORE && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  Say Y when you want to support this frontend.
+
+config DVB_ASCOT2E
+	tristate "Sony Ascot2E tuner"
+	depends on DVB_CORE && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  Say Y when you want to support this frontend.
+
 comment "Tools to develop new frontends"
 
 config DVB_DUMMY_FE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index ebab1b8..37ef17b 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -57,6 +57,7 @@
 obj-$(CONFIG_DVB_LGDT3306A) += lgdt3306a.o
 obj-$(CONFIG_DVB_LG2160) += lg2160.o
 obj-$(CONFIG_DVB_CX24123) += cx24123.o
+obj-$(CONFIG_DVB_LNBH25) += lnbh25.o
 obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
 obj-$(CONFIG_DVB_LNBP22) += lnbp22.o
 obj-$(CONFIG_DVB_ISL6405) += isl6405.o
@@ -105,6 +106,7 @@
 obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
 obj-$(CONFIG_DVB_STV0367) += stv0367.o
 obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
+obj-$(CONFIG_DVB_CXD2841ER) += cxd2841er.o
 obj-$(CONFIG_DVB_DRXK) += drxk.o
 obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
 obj-$(CONFIG_DVB_SI2165) += si2165.o
@@ -118,3 +120,5 @@
 obj-$(CONFIG_DVB_AF9033) += af9033.o
 obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
 obj-$(CONFIG_DVB_TC90522) += tc90522.o
+obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
+obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index 97ecbe0..e1e9bdd 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -12,163 +12,69 @@
  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License along
- *    with this program; if not, write to the Free Software Foundation, Inc.,
- *    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
-#include "dvb_frontend.h"
 #include "a8293.h"
 
-struct a8293_priv {
-	u8 i2c_addr;
-	struct i2c_adapter *i2c;
+struct a8293_dev {
 	struct i2c_client *client;
 	u8 reg[2];
 };
 
-static int a8293_i2c(struct a8293_priv *priv, u8 *val, int len, bool rd)
-{
-	int ret;
-	struct i2c_msg msg[1] = {
-		{
-			.addr = priv->i2c_addr,
-			.len = len,
-			.buf = val,
-		}
-	};
-
-	if (rd)
-		msg[0].flags = I2C_M_RD;
-	else
-		msg[0].flags = 0;
-
-	ret = i2c_transfer(priv->i2c, msg, 1);
-	if (ret == 1) {
-		ret = 0;
-	} else {
-		dev_warn(&priv->i2c->dev, "%s: i2c failed=%d rd=%d\n",
-				KBUILD_MODNAME, ret, rd);
-		ret = -EREMOTEIO;
-	}
-
-	return ret;
-}
-
-static int a8293_wr(struct a8293_priv *priv, u8 *val, int len)
-{
-	return a8293_i2c(priv, val, len, 0);
-}
-
-static int a8293_rd(struct a8293_priv *priv, u8 *val, int len)
-{
-	return a8293_i2c(priv, val, len, 1);
-}
-
 static int a8293_set_voltage(struct dvb_frontend *fe,
-	enum fe_sec_voltage fe_sec_voltage)
+			     enum fe_sec_voltage fe_sec_voltage)
 {
-	struct a8293_priv *priv = fe->sec_priv;
+	struct a8293_dev *dev = fe->sec_priv;
+	struct i2c_client *client = dev->client;
 	int ret;
+	u8 reg0, reg1;
 
-	dev_dbg(&priv->i2c->dev, "%s: fe_sec_voltage=%d\n", __func__,
-			fe_sec_voltage);
+	dev_dbg(&client->dev, "fe_sec_voltage=%d\n", fe_sec_voltage);
 
 	switch (fe_sec_voltage) {
 	case SEC_VOLTAGE_OFF:
 		/* ENB=0 */
-		priv->reg[0] = 0x10;
+		reg0 = 0x10;
 		break;
 	case SEC_VOLTAGE_13:
 		/* VSEL0=1, VSEL1=0, VSEL2=0, VSEL3=0, ENB=1*/
-		priv->reg[0] = 0x31;
+		reg0 = 0x31;
 		break;
 	case SEC_VOLTAGE_18:
 		/* VSEL0=0, VSEL1=0, VSEL2=0, VSEL3=1, ENB=1*/
-		priv->reg[0] = 0x38;
+		reg0 = 0x38;
 		break;
 	default:
 		ret = -EINVAL;
 		goto err;
 	}
-
-	ret = a8293_wr(priv, &priv->reg[0], 1);
-	if (ret)
-		goto err;
-
-	usleep_range(1500, 50000);
-
-	return ret;
-err:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-static void a8293_release_sec(struct dvb_frontend *fe)
-{
-	a8293_set_voltage(fe, SEC_VOLTAGE_OFF);
-
-	kfree(fe->sec_priv);
-	fe->sec_priv = NULL;
-}
-
-struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
-	struct i2c_adapter *i2c, const struct a8293_config *cfg)
-{
-	int ret;
-	struct a8293_priv *priv = NULL;
-	u8 buf[2];
-
-	/* allocate memory for the internal priv */
-	priv = kzalloc(sizeof(struct a8293_priv), GFP_KERNEL);
-	if (priv == NULL) {
-		ret = -ENOMEM;
-		goto err;
+	if (reg0 != dev->reg[0]) {
+		ret = i2c_master_send(client, &reg0, 1);
+		if (ret < 0)
+			goto err;
+		dev->reg[0] = reg0;
 	}
 
-	/* setup the priv */
-	priv->i2c = i2c;
-	priv->i2c_addr = cfg->i2c_addr;
-	fe->sec_priv = priv;
-
-	/* check if the SEC is there */
-	ret = a8293_rd(priv, buf, 2);
-	if (ret)
-		goto err;
-
-	/* ENB=0 */
-	priv->reg[0] = 0x10;
-	ret = a8293_wr(priv, &priv->reg[0], 1);
-	if (ret)
-		goto err;
-
 	/* TMODE=0, TGATE=1 */
-	priv->reg[1] = 0x82;
-	ret = a8293_wr(priv, &priv->reg[1], 1);
-	if (ret)
-		goto err;
+	reg1 = 0x82;
+	if (reg1 != dev->reg[1]) {
+		ret = i2c_master_send(client, &reg1, 1);
+		if (ret < 0)
+			goto err;
+		dev->reg[1] = reg1;
+	}
 
-	fe->ops.release_sec = a8293_release_sec;
-
-	/* override frontend ops */
-	fe->ops.set_voltage = a8293_set_voltage;
-
-	dev_info(&priv->i2c->dev, "%s: Allegro A8293 SEC attached\n",
-			KBUILD_MODNAME);
-
-	return fe;
+	usleep_range(1500, 50000);
+	return 0;
 err:
-	dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
-	kfree(priv);
-	return NULL;
+	dev_dbg(&client->dev, "failed=%d\n", ret);
+	return ret;
 }
-EXPORT_SYMBOL(a8293_attach);
 
 static int a8293_probe(struct i2c_client *client,
-			const struct i2c_device_id *id)
+		       const struct i2c_device_id *id)
 {
-	struct a8293_priv *dev;
+	struct a8293_dev *dev;
 	struct a8293_platform_data *pdata = client->dev.platform_data;
 	struct dvb_frontend *fe = pdata->dvb_frontend;
 	int ret;
@@ -181,29 +87,14 @@
 	}
 
 	dev->client = client;
-	dev->i2c = client->adapter;
-	dev->i2c_addr = client->addr;
 
 	/* check if the SEC is there */
-	ret = a8293_rd(dev, buf, 2);
-	if (ret)
-		goto err_kfree;
-
-	/* ENB=0 */
-	dev->reg[0] = 0x10;
-	ret = a8293_wr(dev, &dev->reg[0], 1);
-	if (ret)
-		goto err_kfree;
-
-	/* TMODE=0, TGATE=1 */
-	dev->reg[1] = 0x82;
-	ret = a8293_wr(dev, &dev->reg[1], 1);
-	if (ret)
+	ret = i2c_master_recv(client, buf, 2);
+	if (ret < 0)
 		goto err_kfree;
 
 	/* override frontend ops */
 	fe->ops.set_voltage = a8293_set_voltage;
-
 	fe->sec_priv = dev;
 	i2c_set_clientdata(client, dev);
 
@@ -234,7 +125,6 @@
 
 static struct i2c_driver a8293_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "a8293",
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/media/dvb-frontends/a8293.h b/drivers/media/dvb-frontends/a8293.h
index aff3653..7b90a03 100644
--- a/drivers/media/dvb-frontends/a8293.h
+++ b/drivers/media/dvb-frontends/a8293.h
@@ -12,17 +12,12 @@
  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License along
- *    with this program; if not, write to the Free Software Foundation, Inc.,
- *    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #ifndef A8293_H
 #define A8293_H
 
 #include "dvb_frontend.h"
-#include <linux/kconfig.h>
 
 /*
  * I2C address
@@ -37,21 +32,4 @@
 	struct dvb_frontend *dvb_frontend;
 };
 
-
-struct a8293_config {
-	u8 i2c_addr;
-};
-
-#if IS_REACHABLE(CONFIG_DVB_A8293)
-extern struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
-	struct i2c_adapter *i2c, const struct a8293_config *cfg);
-#else
-static inline struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
-	struct i2c_adapter *i2c, const struct a8293_config *cfg)
-{
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
-	return NULL;
-}
-#endif
-
 #endif /* A8293_H */
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 59018af..bc35206 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1387,7 +1387,6 @@
 
 static struct i2c_driver af9033_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "af9033",
 	},
 	.probe		= af9033_probe,
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
new file mode 100644
index 0000000..f770f6a
--- /dev/null
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -0,0 +1,548 @@
+/*
+ * ascot2e.c
+ *
+ * Sony Ascot3E DVB-T/T2/C/C2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+  */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dvb/frontend.h>
+#include <linux/types.h>
+#include "ascot2e.h"
+#include "dvb_frontend.h"
+
+#define MAX_WRITE_REGSIZE 10
+
+enum ascot2e_state {
+	STATE_UNKNOWN,
+	STATE_SLEEP,
+	STATE_ACTIVE
+};
+
+struct ascot2e_priv {
+	u32			frequency;
+	u8			i2c_address;
+	struct i2c_adapter	*i2c;
+	enum ascot2e_state	state;
+	void			*set_tuner_data;
+	int			(*set_tuner)(void *, int);
+};
+
+enum ascot2e_tv_system_t {
+	ASCOT2E_DTV_DVBT_5,
+	ASCOT2E_DTV_DVBT_6,
+	ASCOT2E_DTV_DVBT_7,
+	ASCOT2E_DTV_DVBT_8,
+	ASCOT2E_DTV_DVBT2_1_7,
+	ASCOT2E_DTV_DVBT2_5,
+	ASCOT2E_DTV_DVBT2_6,
+	ASCOT2E_DTV_DVBT2_7,
+	ASCOT2E_DTV_DVBT2_8,
+	ASCOT2E_DTV_DVBC_6,
+	ASCOT2E_DTV_DVBC_8,
+	ASCOT2E_DTV_DVBC2_6,
+	ASCOT2E_DTV_DVBC2_8,
+	ASCOT2E_DTV_UNKNOWN
+};
+
+struct ascot2e_band_sett {
+	u8	if_out_sel;
+	u8	agc_sel;
+	u8	mix_oll;
+	u8	rf_gain;
+	u8	if_bpf_gc;
+	u8	fif_offset;
+	u8	bw_offset;
+	u8	bw;
+	u8	rf_oldet;
+	u8	if_bpf_f0;
+};
+
+#define ASCOT2E_AUTO		0xff
+#define ASCOT2E_OFFSET(ofs)	((u8)(ofs) & 0x1F)
+#define ASCOT2E_BW_6		0x00
+#define ASCOT2E_BW_7		0x01
+#define ASCOT2E_BW_8		0x02
+#define ASCOT2E_BW_1_7		0x03
+
+static struct ascot2e_band_sett ascot2e_sett[] = {
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-4), ASCOT2E_BW_7,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-4), ASCOT2E_OFFSET(-2), ASCOT2E_BW_8,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	ASCOT2E_OFFSET(-10), ASCOT2E_OFFSET(-16), ASCOT2E_BW_1_7, 0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-4), ASCOT2E_BW_7,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+	  ASCOT2E_OFFSET(-4), ASCOT2E_OFFSET(-2), ASCOT2E_BW_8,  0x0B, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x02, ASCOT2E_AUTO, 0x03,
+	  ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-8), ASCOT2E_BW_6,  0x09, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x02, ASCOT2E_AUTO, 0x03,
+	  ASCOT2E_OFFSET(-2), ASCOT2E_OFFSET(-1), ASCOT2E_BW_8,  0x09, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x01,
+	  ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-4), ASCOT2E_BW_6,  0x09, 0x00 },
+	{ ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x01,
+	  ASCOT2E_OFFSET(-2), ASCOT2E_OFFSET(2),  ASCOT2E_BW_8,  0x09, 0x00 }
+};
+
+static void ascot2e_i2c_debug(struct ascot2e_priv *priv,
+			      u8 reg, u8 write, const u8 *data, u32 len)
+{
+	dev_dbg(&priv->i2c->dev, "ascot2e: I2C %s reg 0x%02x size %d\n",
+		(write == 0 ? "read" : "write"), reg, len);
+	print_hex_dump_bytes("ascot2e: I2C data: ",
+		DUMP_PREFIX_OFFSET, data, len);
+}
+
+static int ascot2e_write_regs(struct ascot2e_priv *priv,
+			      u8 reg, const u8 *data, u32 len)
+{
+	int ret;
+	u8 buf[MAX_WRITE_REGSIZE + 1];
+	struct i2c_msg msg[1] = {
+		{
+			.addr = priv->i2c_address,
+			.flags = 0,
+			.len = len + 1,
+			.buf = buf,
+		}
+	};
+
+	if (len + 1 >= sizeof(buf)) {
+		dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
+			 reg, len + 1);
+		return -E2BIG;
+	}
+
+	ascot2e_i2c_debug(priv, reg, 1, data, len);
+	buf[0] = reg;
+	memcpy(&buf[1], data, len);
+	ret = i2c_transfer(priv->i2c, msg, 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EREMOTEIO;
+	if (ret < 0) {
+		dev_warn(&priv->i2c->dev,
+			"%s: i2c wr failed=%d reg=%02x len=%d\n",
+			KBUILD_MODNAME, ret, reg, len);
+		return ret;
+	}
+	return 0;
+}
+
+static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val)
+{
+	return ascot2e_write_regs(priv, reg, &val, 1);
+}
+
+static int ascot2e_read_regs(struct ascot2e_priv *priv,
+			     u8 reg, u8 *val, u32 len)
+{
+	int ret;
+	struct i2c_msg msg[2] = {
+		{
+			.addr = priv->i2c_address,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg,
+		}, {
+			.addr = priv->i2c_address,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = val,
+		}
+	};
+
+	ret = i2c_transfer(priv->i2c, &msg[0], 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EREMOTEIO;
+	if (ret < 0) {
+		dev_warn(&priv->i2c->dev,
+			"%s: I2C rw failed=%d addr=%02x reg=%02x\n",
+			KBUILD_MODNAME, ret, priv->i2c_address, reg);
+		return ret;
+	}
+	ret = i2c_transfer(priv->i2c, &msg[1], 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EREMOTEIO;
+	if (ret < 0) {
+		dev_warn(&priv->i2c->dev,
+			"%s: i2c rd failed=%d addr=%02x reg=%02x\n",
+			KBUILD_MODNAME, ret, priv->i2c_address, reg);
+		return ret;
+	}
+	ascot2e_i2c_debug(priv, reg, 0, val, len);
+	return 0;
+}
+
+static int ascot2e_read_reg(struct ascot2e_priv *priv, u8 reg, u8 *val)
+{
+	return ascot2e_read_regs(priv, reg, val, 1);
+}
+
+static int ascot2e_set_reg_bits(struct ascot2e_priv *priv,
+				u8 reg, u8 data, u8 mask)
+{
+	int res;
+	u8 rdata;
+
+	if (mask != 0xff) {
+		res = ascot2e_read_reg(priv, reg, &rdata);
+		if (res != 0)
+			return res;
+		data = ((data & mask) | (rdata & (mask ^ 0xFF)));
+	}
+	return ascot2e_write_reg(priv, reg, data);
+}
+
+static int ascot2e_enter_power_save(struct ascot2e_priv *priv)
+{
+	u8 data[2];
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state == STATE_SLEEP)
+		return 0;
+	data[0] = 0x00;
+	data[1] = 0x04;
+	ascot2e_write_regs(priv, 0x14, data, 2);
+	ascot2e_write_reg(priv, 0x50, 0x01);
+	priv->state = STATE_SLEEP;
+	return 0;
+}
+
+static int ascot2e_leave_power_save(struct ascot2e_priv *priv)
+{
+	u8 data[2] = { 0xFB, 0x0F };
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state == STATE_ACTIVE)
+		return 0;
+	ascot2e_write_regs(priv, 0x14, data, 2);
+	ascot2e_write_reg(priv, 0x50, 0x00);
+	priv->state = STATE_ACTIVE;
+	return 0;
+}
+
+static int ascot2e_init(struct dvb_frontend *fe)
+{
+	struct ascot2e_priv *priv = fe->tuner_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	return ascot2e_leave_power_save(priv);
+}
+
+static int ascot2e_release(struct dvb_frontend *fe)
+{
+	struct ascot2e_priv *priv = fe->tuner_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	kfree(fe->tuner_priv);
+	fe->tuner_priv = NULL;
+	return 0;
+}
+
+static int ascot2e_sleep(struct dvb_frontend *fe)
+{
+	struct ascot2e_priv *priv = fe->tuner_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	ascot2e_enter_power_save(priv);
+	return 0;
+}
+
+static enum ascot2e_tv_system_t ascot2e_get_tv_system(struct dvb_frontend *fe)
+{
+	enum ascot2e_tv_system_t system = ASCOT2E_DTV_UNKNOWN;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct ascot2e_priv *priv = fe->tuner_priv;
+
+	if (p->delivery_system == SYS_DVBT) {
+		if (p->bandwidth_hz <= 5000000)
+			system = ASCOT2E_DTV_DVBT_5;
+		else if (p->bandwidth_hz <= 6000000)
+			system = ASCOT2E_DTV_DVBT_6;
+		else if (p->bandwidth_hz <= 7000000)
+			system = ASCOT2E_DTV_DVBT_7;
+		else if (p->bandwidth_hz <= 8000000)
+			system = ASCOT2E_DTV_DVBT_8;
+		else {
+			system = ASCOT2E_DTV_DVBT_8;
+			p->bandwidth_hz = 8000000;
+		}
+	} else if (p->delivery_system == SYS_DVBT2) {
+		if (p->bandwidth_hz <= 5000000)
+			system = ASCOT2E_DTV_DVBT2_5;
+		else if (p->bandwidth_hz <= 6000000)
+			system = ASCOT2E_DTV_DVBT2_6;
+		else if (p->bandwidth_hz <= 7000000)
+			system = ASCOT2E_DTV_DVBT2_7;
+		else if (p->bandwidth_hz <= 8000000)
+			system = ASCOT2E_DTV_DVBT2_8;
+		else {
+			system = ASCOT2E_DTV_DVBT2_8;
+			p->bandwidth_hz = 8000000;
+		}
+	} else if (p->delivery_system == SYS_DVBC_ANNEX_A) {
+		if (p->bandwidth_hz <= 6000000)
+			system = ASCOT2E_DTV_DVBC_6;
+		else if (p->bandwidth_hz <= 8000000)
+			system = ASCOT2E_DTV_DVBC_8;
+	}
+	dev_dbg(&priv->i2c->dev,
+		"%s(): ASCOT2E DTV system %d (delsys %d, bandwidth %d)\n",
+		__func__, (int)system, p->delivery_system, p->bandwidth_hz);
+	return system;
+}
+
+static int ascot2e_set_params(struct dvb_frontend *fe)
+{
+	u8 data[10];
+	u32 frequency;
+	enum ascot2e_tv_system_t tv_system;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct ascot2e_priv *priv = fe->tuner_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s(): tune frequency %dkHz\n",
+		__func__, p->frequency / 1000);
+	tv_system = ascot2e_get_tv_system(fe);
+
+	if (tv_system == ASCOT2E_DTV_UNKNOWN) {
+		dev_dbg(&priv->i2c->dev, "%s(): unknown DTV system\n",
+			__func__);
+		return -EINVAL;
+	}
+	if (priv->set_tuner)
+		priv->set_tuner(priv->set_tuner_data, 1);
+	frequency = roundup(p->frequency / 1000, 25);
+	if (priv->state == STATE_SLEEP)
+		ascot2e_leave_power_save(priv);
+
+	/* IF_OUT_SEL / AGC_SEL setting */
+	data[0] = 0x00;
+	if (ascot2e_sett[tv_system].agc_sel != ASCOT2E_AUTO) {
+		/* AGC pin setting from parameter table */
+		data[0] |= (u8)(
+			(ascot2e_sett[tv_system].agc_sel & 0x03) << 3);
+	}
+	if (ascot2e_sett[tv_system].if_out_sel != ASCOT2E_AUTO) {
+		/* IFOUT pin setting from parameter table */
+		data[0] |= (u8)(
+			(ascot2e_sett[tv_system].if_out_sel & 0x01) << 2);
+	}
+	/* Set bit[4:2] only */
+	ascot2e_set_reg_bits(priv, 0x05, data[0], 0x1c);
+	/* 0x06 - 0x0F */
+	/* REF_R setting (0x06) */
+	if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+			tv_system == ASCOT2E_DTV_DVBC_8) {
+		/* xtal, xtal*2 */
+		data[0] = (frequency > 500000) ? 16 : 32;
+	} else {
+		/* xtal/8, xtal/4 */
+		data[0] = (frequency > 500000) ? 2 : 4;
+	}
+	/* XOSC_SEL=100uA */
+	data[1] = 0x04;
+	/* KBW setting (0x08), KC0 setting (0x09), KC1 setting (0x0A) */
+	if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+			tv_system == ASCOT2E_DTV_DVBC_8) {
+		data[2] = 18;
+		data[3] = 120;
+		data[4] = 20;
+	} else {
+		data[2] = 48;
+		data[3] = 10;
+		data[4] = 30;
+	}
+	/* ORDER/R2_RANGE/R2_BANK/C2_BANK setting (0x0B) */
+	if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+			tv_system == ASCOT2E_DTV_DVBC_8)
+		data[5] = (frequency > 500000) ? 0x08 : 0x0c;
+	else
+		data[5] = (frequency > 500000) ? 0x30 : 0x38;
+	/* Set MIX_OLL (0x0C) value from parameter table */
+	data[6] = ascot2e_sett[tv_system].mix_oll;
+	/* Set RF_GAIN (0x0D) setting from parameter table */
+	if (ascot2e_sett[tv_system].rf_gain == ASCOT2E_AUTO) {
+		/* RF_GAIN auto control enable */
+		ascot2e_write_reg(priv, 0x4E, 0x01);
+		/* RF_GAIN Default value */
+		data[7] = 0x00;
+	} else {
+		/* RF_GAIN auto control disable */
+		ascot2e_write_reg(priv, 0x4E, 0x00);
+		data[7] = ascot2e_sett[tv_system].rf_gain;
+	}
+	/* Set IF_BPF_GC/FIF_OFFSET (0x0E) value from parameter table */
+	data[8] = (u8)((ascot2e_sett[tv_system].fif_offset << 3) |
+		(ascot2e_sett[tv_system].if_bpf_gc & 0x07));
+	/* Set BW_OFFSET (0x0F) value from parameter table */
+	data[9] = ascot2e_sett[tv_system].bw_offset;
+	ascot2e_write_regs(priv, 0x06, data, 10);
+	/*
+	 * 0x45 - 0x47
+	 * LNA optimization setting
+	 * RF_LNA_DIST1-5, RF_LNA_CM
+	 */
+	if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+			tv_system == ASCOT2E_DTV_DVBC_8) {
+		data[0] = 0x0F;
+		data[1] = 0x00;
+		data[2] = 0x01;
+	} else {
+		data[0] = 0x0F;
+		data[1] = 0x00;
+		data[2] = 0x03;
+	}
+	ascot2e_write_regs(priv, 0x45, data, 3);
+	/* 0x49 - 0x4A
+	 Set RF_OLDET_ENX/RF_OLDET_OLL value from parameter table */
+	data[0] = ascot2e_sett[tv_system].rf_oldet;
+	/* Set IF_BPF_F0 value from parameter table */
+	data[1] = ascot2e_sett[tv_system].if_bpf_f0;
+	ascot2e_write_regs(priv, 0x49, data, 2);
+	/*
+	 * Tune now
+	 * RFAGC fast mode / RFAGC auto control enable
+	 * (set bit[7], bit[5:4] only)
+	 * vco_cal = 1, set MIX_OL_CPU_EN
+	 */
+	ascot2e_set_reg_bits(priv, 0x0c, 0x90, 0xb0);
+	/* Logic wake up, CPU wake up */
+	data[0] = 0xc4;
+	data[1] = 0x40;
+	ascot2e_write_regs(priv, 0x03, data, 2);
+	/* 0x10 - 0x14 */
+	data[0] = (u8)(frequency & 0xFF);         /* 0x10: FRF_L */
+	data[1] = (u8)((frequency >> 8) & 0xFF);  /* 0x11: FRF_M */
+	data[2] = (u8)((frequency >> 16) & 0x0F); /* 0x12: FRF_H (bit[3:0]) */
+	/* 0x12: BW (bit[5:4]) */
+	data[2] |= (u8)(ascot2e_sett[tv_system].bw << 4);
+	data[3] = 0xFF; /* 0x13: VCO calibration enable */
+	data[4] = 0xFF; /* 0x14: Analog block enable */
+	/* Tune (Burst write) */
+	ascot2e_write_regs(priv, 0x10, data, 5);
+	msleep(50);
+	/* CPU deep sleep */
+	ascot2e_write_reg(priv, 0x04, 0x00);
+	/* Logic sleep */
+	ascot2e_write_reg(priv, 0x03, 0xC0);
+	/* RFAGC normal mode (set bit[5:4] only) */
+	ascot2e_set_reg_bits(priv, 0x0C, 0x00, 0x30);
+	priv->frequency = frequency;
+	return 0;
+}
+
+static int ascot2e_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+	struct ascot2e_priv *priv = fe->tuner_priv;
+
+	*frequency = priv->frequency * 1000;
+	return 0;
+}
+
+static struct dvb_tuner_ops ascot2e_tuner_ops = {
+	.info = {
+		.name = "Sony ASCOT2E",
+		.frequency_min = 1000000,
+		.frequency_max = 1200000000,
+		.frequency_step = 25000,
+	},
+	.init = ascot2e_init,
+	.release = ascot2e_release,
+	.sleep = ascot2e_sleep,
+	.set_params = ascot2e_set_params,
+	.get_frequency = ascot2e_get_frequency,
+};
+
+struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+				    const struct ascot2e_config *config,
+				    struct i2c_adapter *i2c)
+{
+	u8 data[4];
+	struct ascot2e_priv *priv = NULL;
+
+	priv = kzalloc(sizeof(struct ascot2e_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return NULL;
+	priv->i2c_address = (config->i2c_address >> 1);
+	priv->i2c = i2c;
+	priv->set_tuner_data = config->set_tuner_priv;
+	priv->set_tuner = config->set_tuner_callback;
+
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 1);
+
+	/* 16 MHz xTal frequency */
+	data[0] = 16;
+	/* VCO current setting */
+	data[1] = 0x06;
+	/* Logic wake up, CPU boot */
+	data[2] = 0xC4;
+	data[3] = 0x40;
+	ascot2e_write_regs(priv, 0x01, data, 4);
+	/* RFVGA optimization setting (RF_DIST0 - RF_DIST2) */
+	data[0] = 0x10;
+	data[1] = 0x3F;
+	data[2] = 0x25;
+	ascot2e_write_regs(priv, 0x22, data, 3);
+	/* PLL mode setting */
+	ascot2e_write_reg(priv, 0x28, 0x1e);
+	/* RSSI setting */
+	ascot2e_write_reg(priv, 0x59, 0x04);
+	/* TODO check CPU HW error state here */
+	msleep(80);
+	/* Xtal oscillator current control setting */
+	ascot2e_write_reg(priv, 0x4c, 0x01);
+	/* XOSC_SEL=100uA */
+	ascot2e_write_reg(priv, 0x07, 0x04);
+	/* CPU deep sleep */
+	ascot2e_write_reg(priv, 0x04, 0x00);
+	/* Logic sleep */
+	ascot2e_write_reg(priv, 0x03, 0xc0);
+	/* Power save setting */
+	data[0] = 0x00;
+	data[1] = 0x04;
+	ascot2e_write_regs(priv, 0x14, data, 2);
+	ascot2e_write_reg(priv, 0x50, 0x01);
+	priv->state = STATE_SLEEP;
+
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 0);
+
+	memcpy(&fe->ops.tuner_ops, &ascot2e_tuner_ops,
+				sizeof(struct dvb_tuner_ops));
+	fe->tuner_priv = priv;
+	dev_info(&priv->i2c->dev,
+		"Sony ASCOT2E attached on addr=%x at I2C adapter %p\n",
+		priv->i2c_address, priv->i2c);
+	return fe;
+}
+EXPORT_SYMBOL(ascot2e_attach);
+
+MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver");
+MODULE_AUTHOR("info@netup.ru");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h
new file mode 100644
index 0000000..6da4ae6
--- /dev/null
+++ b/drivers/media/dvb-frontends/ascot2e.h
@@ -0,0 +1,58 @@
+/*
+ * ascot2e.h
+ *
+ * Sony Ascot3E DVB-T/T2/C/C2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+  */
+
+#ifndef __DVB_ASCOT2E_H__
+#define __DVB_ASCOT2E_H__
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+#include <linux/i2c.h>
+
+/**
+ * struct ascot2e_config - the configuration of Ascot2E tuner driver
+ * @i2c_address:	I2C address of the tuner
+ * @xtal_freq_mhz:	Oscillator frequency, MHz
+ * @set_tuner_priv:	Callback function private context
+ * @set_tuner_callback:	Callback function that notifies the parent driver
+ *			which tuner is active now
+ */
+struct ascot2e_config {
+	u8	i2c_address;
+	u8	xtal_freq_mhz;
+	void	*set_tuner_priv;
+	int	(*set_tuner_callback)(void *, int);
+};
+
+#if IS_REACHABLE(CONFIG_DVB_ASCOT2E)
+extern struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+					const struct ascot2e_config *config,
+					struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+					const struct ascot2e_config *config,
+					struct i2c_adapter *i2c)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index 33aa941..28d7dc2 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -820,7 +820,6 @@
 
 static struct i2c_driver au8522_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "au8522",
 	},
 	.probe		= au8522_probe,
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index e18cf9e..0fe7fb1 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1011,7 +1011,7 @@
 
 static int cx24123_get_algo(struct dvb_frontend *fe)
 {
-	return 1; /* FE_ALGO_HW */
+	return DVBFE_ALGO_HW;
 }
 
 static void cx24123_release(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
new file mode 100644
index 0000000..fdffb2f
--- /dev/null
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -0,0 +1,2727 @@
+/*
+ * cxd2841er.c
+ *
+ * Sony CXD2441ER digital demodulator driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+  */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/math64.h>
+#include <linux/log2.h>
+#include <linux/dynamic_debug.h>
+
+#include "dvb_math.h"
+#include "dvb_frontend.h"
+#include "cxd2841er.h"
+#include "cxd2841er_priv.h"
+
+#define MAX_WRITE_REGSIZE	16
+
+enum cxd2841er_state {
+	STATE_SHUTDOWN = 0,
+	STATE_SLEEP_S,
+	STATE_ACTIVE_S,
+	STATE_SLEEP_TC,
+	STATE_ACTIVE_TC
+};
+
+struct cxd2841er_priv {
+	struct dvb_frontend		frontend;
+	struct i2c_adapter		*i2c;
+	u8				i2c_addr_slvx;
+	u8				i2c_addr_slvt;
+	const struct cxd2841er_config	*config;
+	enum cxd2841er_state		state;
+	u8				system;
+};
+
+static const struct cxd2841er_cnr_data s_cn_data[] = {
+	{ 0x033e, 0 }, { 0x0339, 100 }, { 0x0333, 200 },
+	{ 0x032e, 300 }, { 0x0329, 400 }, { 0x0324, 500 },
+	{ 0x031e, 600 }, { 0x0319, 700 }, { 0x0314, 800 },
+	{ 0x030f, 900 }, { 0x030a, 1000 }, { 0x02ff, 1100 },
+	{ 0x02f4, 1200 }, { 0x02e9, 1300 }, { 0x02de, 1400 },
+	{ 0x02d4, 1500 }, { 0x02c9, 1600 }, { 0x02bf, 1700 },
+	{ 0x02b5, 1800 }, { 0x02ab, 1900 }, { 0x02a1, 2000 },
+	{ 0x029b, 2100 }, { 0x0295, 2200 }, { 0x0290, 2300 },
+	{ 0x028a, 2400 }, { 0x0284, 2500 }, { 0x027f, 2600 },
+	{ 0x0279, 2700 }, { 0x0274, 2800 }, { 0x026e, 2900 },
+	{ 0x0269, 3000 }, { 0x0262, 3100 }, { 0x025c, 3200 },
+	{ 0x0255, 3300 }, { 0x024f, 3400 }, { 0x0249, 3500 },
+	{ 0x0242, 3600 }, { 0x023c, 3700 }, { 0x0236, 3800 },
+	{ 0x0230, 3900 }, { 0x022a, 4000 }, { 0x0223, 4100 },
+	{ 0x021c, 4200 }, { 0x0215, 4300 }, { 0x020e, 4400 },
+	{ 0x0207, 4500 }, { 0x0201, 4600 }, { 0x01fa, 4700 },
+	{ 0x01f4, 4800 }, { 0x01ed, 4900 }, { 0x01e7, 5000 },
+	{ 0x01e0, 5100 }, { 0x01d9, 5200 }, { 0x01d2, 5300 },
+	{ 0x01cb, 5400 }, { 0x01c4, 5500 }, { 0x01be, 5600 },
+	{ 0x01b7, 5700 }, { 0x01b1, 5800 }, { 0x01aa, 5900 },
+	{ 0x01a4, 6000 }, { 0x019d, 6100 }, { 0x0196, 6200 },
+	{ 0x018f, 6300 }, { 0x0189, 6400 }, { 0x0182, 6500 },
+	{ 0x017c, 6600 }, { 0x0175, 6700 }, { 0x016f, 6800 },
+	{ 0x0169, 6900 }, { 0x0163, 7000 }, { 0x015c, 7100 },
+	{ 0x0156, 7200 }, { 0x0150, 7300 }, { 0x014a, 7400 },
+	{ 0x0144, 7500 }, { 0x013e, 7600 }, { 0x0138, 7700 },
+	{ 0x0132, 7800 }, { 0x012d, 7900 }, { 0x0127, 8000 },
+	{ 0x0121, 8100 }, { 0x011c, 8200 }, { 0x0116, 8300 },
+	{ 0x0111, 8400 }, { 0x010b, 8500 }, { 0x0106, 8600 },
+	{ 0x0101, 8700 }, { 0x00fc, 8800 }, { 0x00f7, 8900 },
+	{ 0x00f2, 9000 }, { 0x00ee, 9100 }, { 0x00ea, 9200 },
+	{ 0x00e6, 9300 }, { 0x00e2, 9400 }, { 0x00de, 9500 },
+	{ 0x00da, 9600 }, { 0x00d7, 9700 }, { 0x00d3, 9800 },
+	{ 0x00d0, 9900 }, { 0x00cc, 10000 }, { 0x00c7, 10100 },
+	{ 0x00c3, 10200 }, { 0x00bf, 10300 }, { 0x00ba, 10400 },
+	{ 0x00b6, 10500 }, { 0x00b2, 10600 }, { 0x00ae, 10700 },
+	{ 0x00aa, 10800 }, { 0x00a7, 10900 }, { 0x00a3, 11000 },
+	{ 0x009f, 11100 }, { 0x009c, 11200 }, { 0x0098, 11300 },
+	{ 0x0094, 11400 }, { 0x0091, 11500 }, { 0x008e, 11600 },
+	{ 0x008a, 11700 }, { 0x0087, 11800 }, { 0x0084, 11900 },
+	{ 0x0081, 12000 }, { 0x007e, 12100 }, { 0x007b, 12200 },
+	{ 0x0079, 12300 }, { 0x0076, 12400 }, { 0x0073, 12500 },
+	{ 0x0071, 12600 }, { 0x006e, 12700 }, { 0x006c, 12800 },
+	{ 0x0069, 12900 }, { 0x0067, 13000 }, { 0x0065, 13100 },
+	{ 0x0062, 13200 }, { 0x0060, 13300 }, { 0x005e, 13400 },
+	{ 0x005c, 13500 }, { 0x005a, 13600 }, { 0x0058, 13700 },
+	{ 0x0056, 13800 }, { 0x0054, 13900 }, { 0x0052, 14000 },
+	{ 0x0050, 14100 }, { 0x004e, 14200 }, { 0x004c, 14300 },
+	{ 0x004b, 14400 }, { 0x0049, 14500 }, { 0x0047, 14600 },
+	{ 0x0046, 14700 }, { 0x0044, 14800 }, { 0x0043, 14900 },
+	{ 0x0041, 15000 }, { 0x003f, 15100 }, { 0x003e, 15200 },
+	{ 0x003c, 15300 }, { 0x003b, 15400 }, { 0x003a, 15500 },
+	{ 0x0037, 15700 }, { 0x0036, 15800 }, { 0x0034, 15900 },
+	{ 0x0033, 16000 }, { 0x0032, 16100 }, { 0x0031, 16200 },
+	{ 0x0030, 16300 }, { 0x002f, 16400 }, { 0x002e, 16500 },
+	{ 0x002d, 16600 }, { 0x002c, 16700 }, { 0x002b, 16800 },
+	{ 0x002a, 16900 }, { 0x0029, 17000 }, { 0x0028, 17100 },
+	{ 0x0027, 17200 }, { 0x0026, 17300 }, { 0x0025, 17400 },
+	{ 0x0024, 17500 }, { 0x0023, 17600 }, { 0x0022, 17800 },
+	{ 0x0021, 17900 }, { 0x0020, 18000 }, { 0x001f, 18200 },
+	{ 0x001e, 18300 }, { 0x001d, 18500 }, { 0x001c, 18700 },
+	{ 0x001b, 18900 }, { 0x001a, 19000 }, { 0x0019, 19200 },
+	{ 0x0018, 19300 }, { 0x0017, 19500 }, { 0x0016, 19700 },
+	{ 0x0015, 19900 }, { 0x0014, 20000 },
+};
+
+static const struct cxd2841er_cnr_data s2_cn_data[] = {
+	{ 0x05af, 0 }, { 0x0597, 100 }, { 0x057e, 200 },
+	{ 0x0567, 300 }, { 0x0550, 400 }, { 0x0539, 500 },
+	{ 0x0522, 600 }, { 0x050c, 700 }, { 0x04f6, 800 },
+	{ 0x04e1, 900 }, { 0x04cc, 1000 }, { 0x04b6, 1100 },
+	{ 0x04a1, 1200 }, { 0x048c, 1300 }, { 0x0477, 1400 },
+	{ 0x0463, 1500 }, { 0x044f, 1600 }, { 0x043c, 1700 },
+	{ 0x0428, 1800 }, { 0x0416, 1900 }, { 0x0403, 2000 },
+	{ 0x03ef, 2100 }, { 0x03dc, 2200 }, { 0x03c9, 2300 },
+	{ 0x03b6, 2400 }, { 0x03a4, 2500 }, { 0x0392, 2600 },
+	{ 0x0381, 2700 }, { 0x036f, 2800 }, { 0x035f, 2900 },
+	{ 0x034e, 3000 }, { 0x033d, 3100 }, { 0x032d, 3200 },
+	{ 0x031d, 3300 }, { 0x030d, 3400 }, { 0x02fd, 3500 },
+	{ 0x02ee, 3600 }, { 0x02df, 3700 }, { 0x02d0, 3800 },
+	{ 0x02c2, 3900 }, { 0x02b4, 4000 }, { 0x02a6, 4100 },
+	{ 0x0299, 4200 }, { 0x028c, 4300 }, { 0x027f, 4400 },
+	{ 0x0272, 4500 }, { 0x0265, 4600 }, { 0x0259, 4700 },
+	{ 0x024d, 4800 }, { 0x0241, 4900 }, { 0x0236, 5000 },
+	{ 0x022b, 5100 }, { 0x0220, 5200 }, { 0x0215, 5300 },
+	{ 0x020a, 5400 }, { 0x0200, 5500 }, { 0x01f6, 5600 },
+	{ 0x01ec, 5700 }, { 0x01e2, 5800 }, { 0x01d8, 5900 },
+	{ 0x01cf, 6000 }, { 0x01c6, 6100 }, { 0x01bc, 6200 },
+	{ 0x01b3, 6300 }, { 0x01aa, 6400 }, { 0x01a2, 6500 },
+	{ 0x0199, 6600 }, { 0x0191, 6700 }, { 0x0189, 6800 },
+	{ 0x0181, 6900 }, { 0x0179, 7000 }, { 0x0171, 7100 },
+	{ 0x0169, 7200 }, { 0x0161, 7300 }, { 0x015a, 7400 },
+	{ 0x0153, 7500 }, { 0x014b, 7600 }, { 0x0144, 7700 },
+	{ 0x013d, 7800 }, { 0x0137, 7900 }, { 0x0130, 8000 },
+	{ 0x012a, 8100 }, { 0x0124, 8200 }, { 0x011e, 8300 },
+	{ 0x0118, 8400 }, { 0x0112, 8500 }, { 0x010c, 8600 },
+	{ 0x0107, 8700 }, { 0x0101, 8800 }, { 0x00fc, 8900 },
+	{ 0x00f7, 9000 }, { 0x00f2, 9100 }, { 0x00ec, 9200 },
+	{ 0x00e7, 9300 }, { 0x00e2, 9400 }, { 0x00dd, 9500 },
+	{ 0x00d8, 9600 }, { 0x00d4, 9700 }, { 0x00cf, 9800 },
+	{ 0x00ca, 9900 }, { 0x00c6, 10000 }, { 0x00c2, 10100 },
+	{ 0x00be, 10200 }, { 0x00b9, 10300 }, { 0x00b5, 10400 },
+	{ 0x00b1, 10500 }, { 0x00ae, 10600 }, { 0x00aa, 10700 },
+	{ 0x00a6, 10800 }, { 0x00a3, 10900 }, { 0x009f, 11000 },
+	{ 0x009b, 11100 }, { 0x0098, 11200 }, { 0x0095, 11300 },
+	{ 0x0091, 11400 }, { 0x008e, 11500 }, { 0x008b, 11600 },
+	{ 0x0088, 11700 }, { 0x0085, 11800 }, { 0x0082, 11900 },
+	{ 0x007f, 12000 }, { 0x007c, 12100 }, { 0x007a, 12200 },
+	{ 0x0077, 12300 }, { 0x0074, 12400 }, { 0x0072, 12500 },
+	{ 0x006f, 12600 }, { 0x006d, 12700 }, { 0x006b, 12800 },
+	{ 0x0068, 12900 }, { 0x0066, 13000 }, { 0x0064, 13100 },
+	{ 0x0061, 13200 }, { 0x005f, 13300 }, { 0x005d, 13400 },
+	{ 0x005b, 13500 }, { 0x0059, 13600 }, { 0x0057, 13700 },
+	{ 0x0055, 13800 }, { 0x0053, 13900 }, { 0x0051, 14000 },
+	{ 0x004f, 14100 }, { 0x004e, 14200 }, { 0x004c, 14300 },
+	{ 0x004a, 14400 }, { 0x0049, 14500 }, { 0x0047, 14600 },
+	{ 0x0045, 14700 }, { 0x0044, 14800 }, { 0x0042, 14900 },
+	{ 0x0041, 15000 }, { 0x003f, 15100 }, { 0x003e, 15200 },
+	{ 0x003c, 15300 }, { 0x003b, 15400 }, { 0x003a, 15500 },
+	{ 0x0038, 15600 }, { 0x0037, 15700 }, { 0x0036, 15800 },
+	{ 0x0034, 15900 }, { 0x0033, 16000 }, { 0x0032, 16100 },
+	{ 0x0031, 16200 }, { 0x0030, 16300 }, { 0x002f, 16400 },
+	{ 0x002e, 16500 }, { 0x002d, 16600 }, { 0x002c, 16700 },
+	{ 0x002b, 16800 }, { 0x002a, 16900 }, { 0x0029, 17000 },
+	{ 0x0028, 17100 }, { 0x0027, 17200 }, { 0x0026, 17300 },
+	{ 0x0025, 17400 }, { 0x0024, 17500 }, { 0x0023, 17600 },
+	{ 0x0022, 17800 }, { 0x0021, 17900 }, { 0x0020, 18000 },
+	{ 0x001f, 18200 }, { 0x001e, 18300 }, { 0x001d, 18500 },
+	{ 0x001c, 18700 }, { 0x001b, 18900 }, { 0x001a, 19000 },
+	{ 0x0019, 19200 }, { 0x0018, 19300 }, { 0x0017, 19500 },
+	{ 0x0016, 19700 }, { 0x0015, 19900 }, { 0x0014, 20000 },
+};
+
+#define MAKE_IFFREQ_CONFIG(iffreq) ((u32)(((iffreq)/41.0)*16777216.0 + 0.5))
+
+static void cxd2841er_i2c_debug(struct cxd2841er_priv *priv,
+				u8 addr, u8 reg, u8 write,
+				const u8 *data, u32 len)
+{
+	dev_dbg(&priv->i2c->dev,
+		"cxd2841er: I2C %s addr %02x reg 0x%02x size %d\n",
+		(write == 0 ? "read" : "write"), addr, reg, len);
+	print_hex_dump_bytes("cxd2841er: I2C data: ",
+		DUMP_PREFIX_OFFSET, data, len);
+}
+
+static int cxd2841er_write_regs(struct cxd2841er_priv *priv,
+				u8 addr, u8 reg, const u8 *data, u32 len)
+{
+	int ret;
+	u8 buf[MAX_WRITE_REGSIZE + 1];
+	u8 i2c_addr = (addr == I2C_SLVX ?
+		priv->i2c_addr_slvx : priv->i2c_addr_slvt);
+	struct i2c_msg msg[1] = {
+		{
+			.addr = i2c_addr,
+			.flags = 0,
+			.len = len + 1,
+			.buf = buf,
+		}
+	};
+
+	if (len + 1 >= sizeof(buf)) {
+		dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
+			 reg, len + 1);
+		return -E2BIG;
+	}
+
+	cxd2841er_i2c_debug(priv, i2c_addr, reg, 1, data, len);
+	buf[0] = reg;
+	memcpy(&buf[1], data, len);
+
+	ret = i2c_transfer(priv->i2c, msg, 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EIO;
+	if (ret < 0) {
+		dev_warn(&priv->i2c->dev,
+			"%s: i2c wr failed=%d addr=%02x reg=%02x len=%d\n",
+			KBUILD_MODNAME, ret, i2c_addr, reg, len);
+		return ret;
+	}
+	return 0;
+}
+
+static int cxd2841er_write_reg(struct cxd2841er_priv *priv,
+			       u8 addr, u8 reg, u8 val)
+{
+	return cxd2841er_write_regs(priv, addr, reg, &val, 1);
+}
+
+static int cxd2841er_read_regs(struct cxd2841er_priv *priv,
+			       u8 addr, u8 reg, u8 *val, u32 len)
+{
+	int ret;
+	u8 i2c_addr = (addr == I2C_SLVX ?
+		priv->i2c_addr_slvx : priv->i2c_addr_slvt);
+	struct i2c_msg msg[2] = {
+		{
+			.addr = i2c_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg,
+		}, {
+			.addr = i2c_addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = val,
+		}
+	};
+
+	ret = i2c_transfer(priv->i2c, &msg[0], 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EIO;
+	if (ret < 0) {
+		dev_warn(&priv->i2c->dev,
+			"%s: i2c rw failed=%d addr=%02x reg=%02x\n",
+			KBUILD_MODNAME, ret, i2c_addr, reg);
+		return ret;
+	}
+	ret = i2c_transfer(priv->i2c, &msg[1], 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EIO;
+	if (ret < 0) {
+		dev_warn(&priv->i2c->dev,
+			"%s: i2c rd failed=%d addr=%02x reg=%02x\n",
+			KBUILD_MODNAME, ret, i2c_addr, reg);
+		return ret;
+	}
+	return 0;
+}
+
+static int cxd2841er_read_reg(struct cxd2841er_priv *priv,
+			      u8 addr, u8 reg, u8 *val)
+{
+	return cxd2841er_read_regs(priv, addr, reg, val, 1);
+}
+
+static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
+				  u8 addr, u8 reg, u8 data, u8 mask)
+{
+	int res;
+	u8 rdata;
+
+	if (mask != 0xff) {
+		res = cxd2841er_read_reg(priv, addr, reg, &rdata);
+		if (res)
+			return res;
+		data = ((data & mask) | (rdata & (mask ^ 0xFF)));
+	}
+	return cxd2841er_write_reg(priv, addr, reg, data);
+}
+
+static int cxd2841er_dvbs2_set_symbol_rate(struct cxd2841er_priv *priv,
+					   u32 symbol_rate)
+{
+	u32 reg_value = 0;
+	u8 data[3] = {0, 0, 0};
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	/*
+	 * regValue = (symbolRateKSps * 2^14 / 1000) + 0.5
+	 *          = ((symbolRateKSps * 2^14) + 500) / 1000
+	 *          = ((symbolRateKSps * 16384) + 500) / 1000
+	 */
+	reg_value = DIV_ROUND_CLOSEST(symbol_rate * 16384, 1000);
+	if ((reg_value == 0) || (reg_value > 0xFFFFF)) {
+		dev_err(&priv->i2c->dev,
+			"%s(): reg_value is out of range\n", __func__);
+		return -EINVAL;
+	}
+	data[0] = (u8)((reg_value >> 16) & 0x0F);
+	data[1] = (u8)((reg_value >>  8) & 0xFF);
+	data[2] = (u8)(reg_value & 0xFF);
+	/* Set SLV-T Bank : 0xAE */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xae);
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x20, data, 3);
+	return 0;
+}
+
+static void cxd2841er_set_ts_clock_mode(struct cxd2841er_priv *priv,
+					u8 system);
+
+static int cxd2841er_sleep_s_to_active_s(struct cxd2841er_priv *priv,
+					 u8 system, u32 symbol_rate)
+{
+	int ret;
+	u8 data[4] = { 0, 0, 0, 0 };
+
+	if (priv->state != STATE_SLEEP_S) {
+		dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, (int)priv->state);
+		return -EINVAL;
+	}
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_set_ts_clock_mode(priv, SYS_DVBS);
+	/* Set demod mode */
+	if (system == SYS_DVBS) {
+		data[0] = 0x0A;
+	} else if (system == SYS_DVBS2) {
+		data[0] = 0x0B;
+	} else {
+		dev_err(&priv->i2c->dev, "%s(): invalid delsys %d\n",
+			__func__, system);
+		return -EINVAL;
+	}
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x17, data[0]);
+	/* DVB-S/S2 */
+	data[0] = 0x00;
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Enable S/S2 auto detection 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2d, data[0]);
+	/* Set SLV-T Bank : 0xAE */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xae);
+	/* Enable S/S2 auto detection 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, data[0]);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Enable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+	/* Enable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x31, 0x01);
+	/* Enable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+	/* Enable ADC 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x3f);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Enable ADC 3 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+	/* Set SLV-T Bank : 0xA3 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa3);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xac, 0x00);
+	data[0] = 0x07;
+	data[1] = 0x3B;
+	data[2] = 0x08;
+	data[3] = 0xC5;
+	/* Set SLV-T Bank : 0xAB */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xab);
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x98, data, 4);
+	data[0] = 0x05;
+	data[1] = 0x80;
+	data[2] = 0x0A;
+	data[3] = 0x80;
+	cxd2841er_write_regs(priv, I2C_SLVT, 0xa8, data, 4);
+	data[0] = 0x0C;
+	data[1] = 0xCC;
+	cxd2841er_write_regs(priv, I2C_SLVT, 0xc3, data, 2);
+	/* Set demod parameter */
+	ret = cxd2841er_dvbs2_set_symbol_rate(priv, symbol_rate);
+	if (ret != 0)
+		return ret;
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable Hi-Z setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x10);
+	/* disable Hi-Z setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+	priv->state = STATE_ACTIVE_S;
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t_band(struct cxd2841er_priv *priv,
+					       u32 bandwidth);
+
+static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
+						u32 bandwidth);
+
+static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
+					       u32 bandwidth);
+
+static int cxd2841er_retune_active(struct cxd2841er_priv *priv,
+				   struct dtv_frontend_properties *p)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_S &&
+			priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable TS output */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+	if (priv->state == STATE_ACTIVE_S)
+		return cxd2841er_dvbs2_set_symbol_rate(
+				priv, p->symbol_rate / 1000);
+	else if (priv->state == STATE_ACTIVE_TC) {
+		switch (priv->system) {
+		case SYS_DVBT:
+			return cxd2841er_sleep_tc_to_active_t_band(
+					priv, p->bandwidth_hz);
+		case SYS_DVBT2:
+			return cxd2841er_sleep_tc_to_active_t2_band(
+					priv, p->bandwidth_hz);
+		case SYS_DVBC_ANNEX_A:
+			return cxd2841er_sleep_tc_to_active_c_band(
+					priv, 8000000);
+		}
+	}
+	dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+		__func__, priv->system);
+	return -EINVAL;
+}
+
+static int cxd2841er_active_s_to_sleep_s(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_S) {
+		dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable TS output */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+	/* enable Hi-Z setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x1f);
+	/* enable Hi-Z setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* disable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x31, 0x00);
+	/* disable ADC 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+	/* disable ADC 3 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x27);
+	/* SADC Bias ON */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x69, 0x06);
+	/* disable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+	/* Set SLV-T Bank : 0xAE */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xae);
+	/* disable S/S2 auto detection1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable S/S2 auto detection2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2d, 0x00);
+	priv->state = STATE_SLEEP_S;
+	return 0;
+}
+
+static int cxd2841er_sleep_s_to_shutdown(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_SLEEP_S) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Disable DSQOUT */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+	/* Disable DSQIN */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x9c, 0x00);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Disable oscillator */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x15, 0x01);
+	/* Set demod mode */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x01);
+	priv->state = STATE_SHUTDOWN;
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_shutdown(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_SLEEP_TC) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Disable oscillator */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x15, 0x01);
+	/* Set demod mode */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x01);
+	priv->state = STATE_SHUTDOWN;
+	return 0;
+}
+
+static int cxd2841er_active_t_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable TS output */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+	/* enable Hi-Z setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+	/* enable Hi-Z setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* disable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Disable ADC 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+	/* Disable ADC 3 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+	/* Disable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+	/* Disable RF level monitor */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+	/* Disable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+	priv->state = STATE_SLEEP_TC;
+	return 0;
+}
+
+static int cxd2841er_active_t2_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable TS output */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+	/* enable Hi-Z setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+	/* enable Hi-Z setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+	/* Cancel DVB-T2 setting */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x13);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x83, 0x40);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x86, 0x21);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x9e, 0x09, 0x0f);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x9f, 0xfb);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2a);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x38, 0x00, 0x0f);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x11, 0x00, 0x3f);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* disable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Disable ADC 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+	/* Disable ADC 3 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+	/* Disable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+	/* Disable RF level monitor */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+	/* Disable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+	priv->state = STATE_SLEEP_TC;
+	return 0;
+}
+
+static int cxd2841er_active_c_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* disable TS output */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+	/* enable Hi-Z setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+	/* enable Hi-Z setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+	/* Cancel DVB-C setting */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa3, 0x00, 0x1f);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* disable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Disable ADC 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+	/* Disable ADC 3 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+	/* Disable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+	/* Disable RF level monitor */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+	/* Disable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+	priv->state = STATE_SLEEP_TC;
+	return 0;
+}
+
+static int cxd2841er_shutdown_to_sleep_s(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_SHUTDOWN) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Clear all demodulator registers */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x02, 0x00);
+	usleep_range(3000, 5000);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Set demod SW reset */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x01);
+	/* Set X'tal clock to 20.5Mhz */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x00);
+	/* Set demod mode */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x0a);
+	/* Clear demod SW reset */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x00);
+	usleep_range(1000, 2000);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* enable DSQOUT */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x1F);
+	/* enable DSQIN */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x9C, 0x40);
+	/* TADC Bias On */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+	/* SADC Bias On */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x27);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x69, 0x06);
+	priv->state = STATE_SLEEP_S;
+	return 0;
+}
+
+static int cxd2841er_shutdown_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_SHUTDOWN) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Clear all demodulator registers */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x02, 0x00);
+	usleep_range(3000, 5000);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Set demod SW reset */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x01);
+	/* Set X'tal clock to 20.5Mhz */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x13, 0x00);
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x00);
+	/* Clear demod SW reset */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x00);
+	usleep_range(1000, 2000);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* TADC Bias On */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+	/* SADC Bias On */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x27);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x69, 0x06);
+	priv->state = STATE_SLEEP_TC;
+	return 0;
+}
+
+static int cxd2841er_tune_done(struct cxd2841er_priv *priv)
+{
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0, 0);
+	/* SW Reset */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xfe, 0x01);
+	/* Enable TS output */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x00);
+	return 0;
+}
+
+/* Set TS parallel mode */
+static void cxd2841er_set_ts_clock_mode(struct cxd2841er_priv *priv,
+					u8 system)
+{
+	u8 serial_ts, ts_rate_ctrl_off, ts_in_off;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0xc4, &serial_ts);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0xd3, &ts_rate_ctrl_off);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0xde, &ts_in_off);
+	dev_dbg(&priv->i2c->dev, "%s(): ser_ts=0x%02x rate_ctrl_off=0x%02x in_off=0x%02x\n",
+		__func__, serial_ts, ts_rate_ctrl_off, ts_in_off);
+
+	/*
+	 * slave    Bank    Addr    Bit    default    Name
+	 * <SLV-T>  00h     D9h     [7:0]  8'h08      OTSCKPERIOD
+	 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xd9, 0x08);
+	/*
+	 * Disable TS IF Clock
+	 * slave    Bank    Addr    Bit    default    Name
+	 * <SLV-T>  00h     32h     [0]    1'b1       OREG_CK_TSIF_EN
+	 */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x32, 0x00, 0x01);
+	/*
+	 * slave    Bank    Addr    Bit    default    Name
+	 * <SLV-T>  00h     33h     [1:0]  2'b01      OREG_CKSEL_TSIF
+	 */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x33, 0x00, 0x03);
+	/*
+	 * Enable TS IF Clock
+	 * slave    Bank    Addr    Bit    default    Name
+	 * <SLV-T>  00h     32h     [0]    1'b1       OREG_CK_TSIF_EN
+	 */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x32, 0x01, 0x01);
+
+	if (system == SYS_DVBT) {
+		/* Enable parity period for DVB-T */
+		cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+		cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x66, 0x01, 0x01);
+	} else if (system == SYS_DVBC_ANNEX_A) {
+		/* Enable parity period for DVB-C */
+		cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+		cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x66, 0x01, 0x01);
+	}
+}
+
+static u8 cxd2841er_chip_id(struct cxd2841er_priv *priv)
+{
+	u8 chip_id;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0, 0);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0xfd, &chip_id);
+	return chip_id;
+}
+
+static int cxd2841er_read_status_s(struct dvb_frontend *fe,
+				   enum fe_status *status)
+{
+	u8 reg = 0;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	*status = 0;
+	if (priv->state != STATE_ACTIVE_S) {
+		dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0xA0 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+	/*
+	 *  slave     Bank      Addr      Bit      Signal name
+	 * <SLV-T>    A0h       11h       [2]      ITSLOCK
+	 */
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x11, &reg);
+	if (reg & 0x04) {
+		*status = FE_HAS_SIGNAL
+			| FE_HAS_CARRIER
+			| FE_HAS_VITERBI
+			| FE_HAS_SYNC
+			| FE_HAS_LOCK;
+	}
+	dev_dbg(&priv->i2c->dev, "%s(): result 0x%x\n", __func__, *status);
+	return 0;
+}
+
+static int cxd2841er_read_status_t_t2(struct cxd2841er_priv *priv,
+				      u8 *sync, u8 *tslock, u8 *unlock)
+{
+	u8 data = 0;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC)
+		return -EINVAL;
+	if (priv->system == SYS_DVBT) {
+		/* Set SLV-T Bank : 0x10 */
+		cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	} else {
+		/* Set SLV-T Bank : 0x20 */
+		cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+	}
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x10, &data);
+	if ((data & 0x07) == 0x07) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): invalid hardware state detected\n", __func__);
+		*sync = 0;
+		*tslock = 0;
+		*unlock = 0;
+	} else {
+		*sync = ((data & 0x07) == 0x6 ? 1 : 0);
+		*tslock = ((data & 0x20) ? 1 : 0);
+		*unlock = ((data & 0x10) ? 1 : 0);
+	}
+	return 0;
+}
+
+static int cxd2841er_read_status_c(struct cxd2841er_priv *priv, u8 *tslock)
+{
+	u8 data;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC)
+		return -EINVAL;
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x88, &data);
+	if ((data & 0x01) == 0) {
+		*tslock = 0;
+	} else {
+		cxd2841er_read_reg(priv, I2C_SLVT, 0x10, &data);
+		*tslock = ((data & 0x20) ? 1 : 0);
+	}
+	return 0;
+}
+
+static int cxd2841er_read_status_tc(struct dvb_frontend *fe,
+				    enum fe_status *status)
+{
+	int ret = 0;
+	u8 sync = 0;
+	u8 tslock = 0;
+	u8 unlock = 0;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	*status = 0;
+	if (priv->state == STATE_ACTIVE_TC) {
+		if (priv->system == SYS_DVBT || priv->system == SYS_DVBT2) {
+			ret = cxd2841er_read_status_t_t2(
+				priv, &sync, &tslock, &unlock);
+			if (ret)
+				goto done;
+			if (unlock)
+				goto done;
+			if (sync)
+				*status = FE_HAS_SIGNAL |
+					FE_HAS_CARRIER |
+					FE_HAS_VITERBI |
+					FE_HAS_SYNC;
+			if (tslock)
+				*status |= FE_HAS_LOCK;
+		} else if (priv->system == SYS_DVBC_ANNEX_A) {
+			ret = cxd2841er_read_status_c(priv, &tslock);
+			if (ret)
+				goto done;
+			if (tslock)
+				*status = FE_HAS_SIGNAL |
+					FE_HAS_CARRIER |
+					FE_HAS_VITERBI |
+					FE_HAS_SYNC |
+					FE_HAS_LOCK;
+		}
+	}
+done:
+	dev_dbg(&priv->i2c->dev, "%s(): status 0x%x\n", __func__, *status);
+	return ret;
+}
+
+static int cxd2841er_get_carrier_offset_s_s2(struct cxd2841er_priv *priv,
+					     int *offset)
+{
+	u8 data[3];
+	u8 is_hs_mode;
+	s32 cfrl_ctrlval;
+	s32 temp_div, temp_q, temp_r;
+
+	if (priv->state != STATE_ACTIVE_S) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	/*
+	 * Get High Sampling Rate mode
+	 *  slave     Bank      Addr      Bit      Signal name
+	 * <SLV-T>    A0h       10h       [0]      ITRL_LOCK
+	 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x10, &data[0]);
+	if (data[0] & 0x01) {
+		/*
+		 *  slave     Bank      Addr      Bit      Signal name
+		 * <SLV-T>    A0h       50h       [4]      IHSMODE
+		 */
+		cxd2841er_read_reg(priv, I2C_SLVT, 0x50, &data[0]);
+		is_hs_mode = (data[0] & 0x10 ? 1 : 0);
+	} else {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): unable to detect sampling rate mode\n",
+			__func__);
+		return -EINVAL;
+	}
+	/*
+	 *  slave     Bank      Addr      Bit      Signal name
+	 * <SLV-T>    A0h       45h       [4:0]    ICFRL_CTRLVAL[20:16]
+	 * <SLV-T>    A0h       46h       [7:0]    ICFRL_CTRLVAL[15:8]
+	 * <SLV-T>    A0h       47h       [7:0]    ICFRL_CTRLVAL[7:0]
+	 */
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x45, data, 3);
+	cfrl_ctrlval = sign_extend32((((u32)data[0] & 0x1F) << 16) |
+				(((u32)data[1] & 0xFF) <<  8) |
+				((u32)data[2] & 0xFF), 20);
+	temp_div = (is_hs_mode ? 1048576 : 1572864);
+	if (cfrl_ctrlval > 0) {
+		temp_q = div_s64_rem(97375LL * cfrl_ctrlval,
+			temp_div, &temp_r);
+	} else {
+		temp_q = div_s64_rem(-97375LL * cfrl_ctrlval,
+			temp_div, &temp_r);
+	}
+	if (temp_r >= temp_div / 2)
+		temp_q++;
+	if (cfrl_ctrlval > 0)
+		temp_q *= -1;
+	*offset = temp_q;
+	return 0;
+}
+
+static int cxd2841er_get_carrier_offset_t2(struct cxd2841er_priv *priv,
+					   u32 bandwidth, int *offset)
+{
+	u8 data[4];
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	if (priv->system != SYS_DVBT2) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+			__func__, priv->system);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x4c, data, sizeof(data));
+	*offset = -1 * sign_extend32(
+		((u32)(data[0] & 0x0F) << 24) | ((u32)data[1] << 16) |
+		((u32)data[2] << 8) | (u32)data[3], 27);
+	switch (bandwidth) {
+	case 1712000:
+		*offset /= 582;
+		break;
+	case 5000000:
+	case 6000000:
+	case 7000000:
+	case 8000000:
+		*offset *= (bandwidth / 1000000);
+		*offset /= 940;
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s(): invalid bandwidth %d\n",
+			__func__, bandwidth);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int cxd2841er_get_carrier_offset_c(struct cxd2841er_priv *priv,
+					  int *offset)
+{
+	u8 data[2];
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	if (priv->system != SYS_DVBC_ANNEX_A) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+			__func__, priv->system);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x15, data, sizeof(data));
+	*offset = div_s64(41000LL * sign_extend32((((u32)data[0] & 0x3f) << 8)
+						| (u32)data[1], 13), 16384);
+	return 0;
+}
+
+static int cxd2841er_read_packet_errors_t(
+		struct cxd2841er_priv *priv, u32 *penum)
+{
+	u8 data[3];
+
+	*penum = 0;
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0xea, data, sizeof(data));
+	if (data[2] & 0x01)
+		*penum = ((u32)data[0] << 8) | (u32)data[1];
+	return 0;
+}
+
+static int cxd2841er_read_packet_errors_t2(
+		struct cxd2841er_priv *priv, u32 *penum)
+{
+	u8 data[3];
+
+	*penum = 0;
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x24);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0xfd, data, sizeof(data));
+	if (data[0] & 0x01)
+		*penum = ((u32)data[1] << 8) | (u32)data[2];
+	return 0;
+}
+
+static u32 cxd2841er_mon_read_ber_s(struct cxd2841er_priv *priv)
+{
+	u8 data[11];
+	u32 bit_error, bit_count;
+	u32 temp_q, temp_r;
+
+	/* Set SLV-T Bank : 0xA0 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+	/*
+	 *  slave     Bank      Addr      Bit      Signal name
+	 * <SLV-T>    A0h       35h       [0]      IFVBER_VALID
+	 * <SLV-T>    A0h       36h       [5:0]    IFVBER_BITERR[21:16]
+	 * <SLV-T>    A0h       37h       [7:0]    IFVBER_BITERR[15:8]
+	 * <SLV-T>    A0h       38h       [7:0]    IFVBER_BITERR[7:0]
+	 * <SLV-T>    A0h       3Dh       [5:0]    IFVBER_BITNUM[21:16]
+	 * <SLV-T>    A0h       3Eh       [7:0]    IFVBER_BITNUM[15:8]
+	 * <SLV-T>    A0h       3Fh       [7:0]    IFVBER_BITNUM[7:0]
+	 */
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x35, data, 11);
+	if (data[0] & 0x01) {
+		bit_error = ((u32)(data[1]  & 0x3F) << 16) |
+			((u32)(data[2]  & 0xFF) <<  8) |
+			(u32)(data[3]  & 0xFF);
+		bit_count = ((u32)(data[8]  & 0x3F) << 16) |
+			((u32)(data[9]  & 0xFF) <<  8) |
+			(u32)(data[10] & 0xFF);
+		/*
+		 *	BER = bitError / bitCount
+		 *	= (bitError * 10^7) / bitCount
+		 *	= ((bitError * 625 * 125 * 128) / bitCount
+		 */
+		if ((bit_count == 0) || (bit_error > bit_count)) {
+			dev_dbg(&priv->i2c->dev,
+				"%s(): invalid bit_error %d, bit_count %d\n",
+				__func__, bit_error, bit_count);
+			return 0;
+		}
+		temp_q = div_u64_rem(10000000ULL * bit_error,
+						bit_count, &temp_r);
+		if (bit_count != 1 && temp_r >= bit_count / 2)
+			temp_q++;
+		return temp_q;
+	}
+	dev_dbg(&priv->i2c->dev, "%s(): no data available\n", __func__);
+	return 0;
+}
+
+
+static u32 cxd2841er_mon_read_ber_s2(struct cxd2841er_priv *priv)
+{
+	u8 data[5];
+	u32 bit_error, period;
+	u32 temp_q, temp_r;
+	u32 result = 0;
+
+	/* Set SLV-T Bank : 0xB2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xb2);
+	/*
+	 *  slave     Bank      Addr      Bit      Signal name
+	 * <SLV-T>    B2h       30h       [0]      IFLBER_VALID
+	 * <SLV-T>    B2h       31h       [3:0]    IFLBER_BITERR[27:24]
+	 * <SLV-T>    B2h       32h       [7:0]    IFLBER_BITERR[23:16]
+	 * <SLV-T>    B2h       33h       [7:0]    IFLBER_BITERR[15:8]
+	 * <SLV-T>    B2h       34h       [7:0]    IFLBER_BITERR[7:0]
+	 */
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x30, data, 5);
+	if (data[0] & 0x01) {
+		/* Bit error count */
+		bit_error = ((u32)(data[1] & 0x0F) << 24) |
+			((u32)(data[2] & 0xFF) << 16) |
+			((u32)(data[3] & 0xFF) <<  8) |
+			(u32)(data[4] & 0xFF);
+
+		/* Set SLV-T Bank : 0xA0 */
+		cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+		cxd2841er_read_reg(priv, I2C_SLVT, 0x7a, data);
+		/* Measurement period */
+		period = (u32)(1 << (data[0] & 0x0F));
+		if (period == 0) {
+			dev_dbg(&priv->i2c->dev,
+				"%s(): period is 0\n", __func__);
+			return 0;
+		}
+		if (bit_error > (period * 64800)) {
+			dev_dbg(&priv->i2c->dev,
+				"%s(): invalid bit_err 0x%x period 0x%x\n",
+				__func__, bit_error, period);
+			return 0;
+		}
+		/*
+		 * BER = bitError / (period * 64800)
+		 *	= (bitError * 10^7) / (period * 64800)
+		 *	= (bitError * 10^5) / (period * 648)
+		 *	= (bitError * 12500) / (period * 81)
+		 *	= (bitError * 10) * 1250 / (period * 81)
+		 */
+		temp_q = div_u64_rem(12500ULL * bit_error,
+					period * 81, &temp_r);
+		if (temp_r >= period * 40)
+			temp_q++;
+		result = temp_q;
+	} else {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): no data available\n", __func__);
+	}
+	return result;
+}
+
+static int cxd2841er_read_ber_t2(struct cxd2841er_priv *priv, u32 *ber)
+{
+	u8 data[4];
+	u32 div, q, r;
+	u32 bit_err, period_exp, n_ldpc;
+
+	*ber = 0;
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): invalid state %d\n", __func__, priv->state);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x39, data, sizeof(data));
+	if (!(data[0] & 0x10)) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): no valid BER data\n", __func__);
+		return 0;
+	}
+	bit_err = ((u32)(data[0] & 0x0f) << 24) |
+		((u32)data[1] << 16) |
+		((u32)data[2] << 8) |
+		(u32)data[3];
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x6f, data);
+	period_exp = data[0] & 0x0f;
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x22);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x5e, data);
+	n_ldpc = ((data[0] & 0x03) == 0 ? 16200 : 64800);
+	if (bit_err > ((1U << period_exp) * n_ldpc)) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): invalid BER value\n", __func__);
+		return -EINVAL;
+	}
+	if (period_exp >= 4) {
+		div = (1U << (period_exp - 4)) * (n_ldpc / 200);
+		q = div_u64_rem(3125ULL * bit_err, div, &r);
+	} else {
+		div = (1U << period_exp) * (n_ldpc / 200);
+		q = div_u64_rem(50000ULL * bit_err, div, &r);
+	}
+	*ber = (r >= div / 2) ? q + 1 : q;
+	return 0;
+}
+
+static int cxd2841er_read_ber_t(struct cxd2841er_priv *priv, u32 *ber)
+{
+	u8 data[2];
+	u32 div, q, r;
+	u32 bit_err, period;
+
+	*ber = 0;
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): invalid state %d\n", __func__, priv->state);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x39, data);
+	if (!(data[0] & 0x01)) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): no valid BER data\n", __func__);
+		return 0;
+	}
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x22, data, sizeof(data));
+	bit_err = ((u32)data[0] << 8) | (u32)data[1];
+	cxd2841er_read_reg(priv, I2C_SLVT, 0x6f, data);
+	period = ((data[0] & 0x07) == 0) ? 256 : (4096 << (data[0] & 0x07));
+	div = period / 128;
+	q = div_u64_rem(78125ULL * bit_err, div, &r);
+	*ber = (r >= div / 2) ? q + 1 : q;
+	return 0;
+}
+
+static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv, u8 delsys)
+{
+	u8 data[3];
+	u32 res = 0, value;
+	int min_index, max_index, index;
+	static const struct cxd2841er_cnr_data *cn_data;
+
+	/* Set SLV-T Bank : 0xA1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa1);
+	/*
+	 *  slave     Bank      Addr      Bit     Signal name
+	 * <SLV-T>    A1h       10h       [0]     ICPM_QUICKRDY
+	 * <SLV-T>    A1h       11h       [4:0]   ICPM_QUICKCNDT[12:8]
+	 * <SLV-T>    A1h       12h       [7:0]   ICPM_QUICKCNDT[7:0]
+	 */
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x10, data, 3);
+	if (data[0] & 0x01) {
+		value = ((u32)(data[1] & 0x1F) << 8) | (u32)(data[2] & 0xFF);
+		min_index = 0;
+		if (delsys == SYS_DVBS) {
+			cn_data = s_cn_data;
+			max_index = sizeof(s_cn_data) /
+				sizeof(s_cn_data[0]) - 1;
+		} else {
+			cn_data = s2_cn_data;
+			max_index = sizeof(s2_cn_data) /
+				sizeof(s2_cn_data[0]) - 1;
+		}
+		if (value >= cn_data[min_index].value) {
+			res = cn_data[min_index].cnr_x1000;
+			goto done;
+		}
+		if (value <= cn_data[max_index].value) {
+			res = cn_data[max_index].cnr_x1000;
+			goto done;
+		}
+		while ((max_index - min_index) > 1) {
+			index = (max_index + min_index) / 2;
+			if (value == cn_data[index].value) {
+				res = cn_data[index].cnr_x1000;
+				goto done;
+			} else if (value > cn_data[index].value)
+				max_index = index;
+			else
+				min_index = index;
+			if ((max_index - min_index) <= 1) {
+				if (value == cn_data[max_index].value) {
+					res = cn_data[max_index].cnr_x1000;
+					goto done;
+				} else {
+					res = cn_data[min_index].cnr_x1000;
+					goto done;
+				}
+			}
+		}
+	} else {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): no data available\n", __func__);
+	}
+done:
+	return res;
+}
+
+static int cxd2841er_read_snr_t(struct cxd2841er_priv *priv, u32 *snr)
+{
+	u32 reg;
+	u8 data[2];
+
+	*snr = 0;
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): invalid state %d\n", __func__, priv->state);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+	reg = ((u32)data[0] << 8) | (u32)data[1];
+	if (reg == 0) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): reg value out of range\n", __func__);
+		return 0;
+	}
+	if (reg > 4996)
+		reg = 4996;
+	*snr = 10000 * ((intlog10(reg) - intlog10(5350 - reg)) >> 24) + 28500;
+	return 0;
+}
+
+static int cxd2841er_read_snr_t2(struct cxd2841er_priv *priv, u32 *snr)
+{
+	u32 reg;
+	u8 data[2];
+
+	*snr = 0;
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): invalid state %d\n", __func__, priv->state);
+		return -EINVAL;
+	}
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+	reg = ((u32)data[0] << 8) | (u32)data[1];
+	if (reg == 0) {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): reg value out of range\n", __func__);
+		return 0;
+	}
+	if (reg > 10876)
+		reg = 10876;
+	*snr = 10000 * ((intlog10(reg) -
+		intlog10(12600 - reg)) >> 24) + 32000;
+	return 0;
+}
+
+static u16 cxd2841er_read_agc_gain_t_t2(struct cxd2841er_priv *priv,
+					u8 delsys)
+{
+	u8 data[2];
+
+	cxd2841er_write_reg(
+		priv, I2C_SLVT, 0x00, (delsys == SYS_DVBT ? 0x10 : 0x20));
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x26, data, 2);
+	return ((((u16)data[0] & 0x0F) << 8) | (u16)(data[1] & 0xFF)) << 4;
+}
+
+static u16 cxd2841er_read_agc_gain_s(struct cxd2841er_priv *priv)
+{
+	u8 data[2];
+
+	/* Set SLV-T Bank : 0xA0 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+	/*
+	 *  slave     Bank      Addr      Bit       Signal name
+	 * <SLV-T>    A0h       1Fh       [4:0]     IRFAGC_GAIN[12:8]
+	 * <SLV-T>    A0h       20h       [7:0]     IRFAGC_GAIN[7:0]
+	 */
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x1f, data, 2);
+	return ((((u16)data[0] & 0x1F) << 8) | (u16)(data[1] & 0xFF)) << 3;
+}
+
+static int cxd2841er_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	*ber = 0;
+	switch (p->delivery_system) {
+	case SYS_DVBS:
+		*ber = cxd2841er_mon_read_ber_s(priv);
+		break;
+	case SYS_DVBS2:
+		*ber = cxd2841er_mon_read_ber_s2(priv);
+		break;
+	case SYS_DVBT:
+		return cxd2841er_read_ber_t(priv, ber);
+	case SYS_DVBT2:
+		return cxd2841er_read_ber_t2(priv, ber);
+	default:
+		*ber = 0;
+		break;
+	}
+	return 0;
+}
+
+static int cxd2841er_read_signal_strength(struct dvb_frontend *fe,
+					  u16 *strength)
+{
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	switch (p->delivery_system) {
+	case SYS_DVBT:
+	case SYS_DVBT2:
+		*strength = 65535 - cxd2841er_read_agc_gain_t_t2(
+			priv, p->delivery_system);
+		break;
+	case SYS_DVBS:
+	case SYS_DVBS2:
+		*strength = 65535 - cxd2841er_read_agc_gain_s(priv);
+		break;
+	default:
+		*strength = 0;
+		break;
+	}
+	return 0;
+}
+
+static int cxd2841er_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+	u32 tmp = 0;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	switch (p->delivery_system) {
+	case SYS_DVBT:
+		cxd2841er_read_snr_t(priv, &tmp);
+		break;
+	case SYS_DVBT2:
+		cxd2841er_read_snr_t2(priv, &tmp);
+		break;
+	case SYS_DVBS:
+	case SYS_DVBS2:
+		tmp = cxd2841er_dvbs_read_snr(priv, p->delivery_system);
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s(): unknown delivery system %d\n",
+			__func__, p->delivery_system);
+		break;
+	}
+	*snr = tmp & 0xffff;
+	return 0;
+}
+
+static int cxd2841er_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	switch (p->delivery_system) {
+	case SYS_DVBT:
+		cxd2841er_read_packet_errors_t(priv, ucblocks);
+		break;
+	case SYS_DVBT2:
+		cxd2841er_read_packet_errors_t2(priv, ucblocks);
+		break;
+	default:
+		*ucblocks = 0;
+		break;
+	}
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	return 0;
+}
+
+static int cxd2841er_dvbt2_set_profile(
+	struct cxd2841er_priv *priv, enum cxd2841er_dvbt2_profile_t profile)
+{
+	u8 tune_mode;
+	u8 seq_not2d_time;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	switch (profile) {
+	case DVBT2_PROFILE_BASE:
+		tune_mode = 0x01;
+		seq_not2d_time = 12;
+		break;
+	case DVBT2_PROFILE_LITE:
+		tune_mode = 0x05;
+		seq_not2d_time = 40;
+		break;
+	case DVBT2_PROFILE_ANY:
+		tune_mode = 0x00;
+		seq_not2d_time = 40;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x2E */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2e);
+	/* Set profile and tune mode */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x10, tune_mode, 0x07);
+	/* Set SLV-T Bank : 0x2B */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+	/* Set early unlock detection time */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x9d, seq_not2d_time);
+	return 0;
+}
+
+static int cxd2841er_dvbt2_set_plp_config(struct cxd2841er_priv *priv,
+					  u8 is_auto, u8 plp_id)
+{
+	if (is_auto) {
+		dev_dbg(&priv->i2c->dev,
+			"%s() using auto PLP selection\n", __func__);
+	} else {
+		dev_dbg(&priv->i2c->dev,
+			"%s() using manual PLP selection, ID %d\n",
+			__func__, plp_id);
+	}
+	/* Set SLV-T Bank : 0x23 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x23);
+	if (!is_auto) {
+		/* Manual PLP selection mode. Set the data PLP Id. */
+		cxd2841er_write_reg(priv, I2C_SLVT, 0xaf, plp_id);
+	}
+	/* Auto PLP select (Scanning mode = 0x00). Data PLP select = 0x01. */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xad, (is_auto ? 0x00 : 0x01));
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
+						u32 bandwidth)
+{
+	u32 iffreq;
+	u8 b20_9f[5];
+	u8 b10_a6[14];
+	u8 b10_b6[3];
+	u8 b10_d7;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	switch (bandwidth) {
+	case 8000000:
+		/* bank 0x20, reg 0x9f */
+		b20_9f[0] = 0x11;
+		b20_9f[1] = 0xf0;
+		b20_9f[2] = 0x00;
+		b20_9f[3] = 0x00;
+		b20_9f[4] = 0x00;
+		/* bank 0x10, reg 0xa6 */
+		b10_a6[0] = 0x26;
+		b10_a6[1] = 0xaf;
+		b10_a6[2] = 0x06;
+		b10_a6[3] = 0xcd;
+		b10_a6[4] = 0x13;
+		b10_a6[5] = 0xbb;
+		b10_a6[6] = 0x28;
+		b10_a6[7] = 0xba;
+		b10_a6[8] = 0x23;
+		b10_a6[9] = 0xa9;
+		b10_a6[10] = 0x1f;
+		b10_a6[11] = 0xa8;
+		b10_a6[12] = 0x2c;
+		b10_a6[13] = 0xc8;
+		iffreq = MAKE_IFFREQ_CONFIG(4.80);
+		b10_d7 = 0x00;
+		break;
+	case 7000000:
+		/* bank 0x20, reg 0x9f */
+		b20_9f[0] = 0x14;
+		b20_9f[1] = 0x80;
+		b20_9f[2] = 0x00;
+		b20_9f[3] = 0x00;
+		b20_9f[4] = 0x00;
+		/* bank 0x10, reg 0xa6 */
+		b10_a6[0] = 0x2C;
+		b10_a6[1] = 0xBD;
+		b10_a6[2] = 0x02;
+		b10_a6[3] = 0xCF;
+		b10_a6[4] = 0x04;
+		b10_a6[5] = 0xF8;
+		b10_a6[6] = 0x23;
+		b10_a6[7] = 0xA6;
+		b10_a6[8] = 0x29;
+		b10_a6[9] = 0xB0;
+		b10_a6[10] = 0x26;
+		b10_a6[11] = 0xA9;
+		b10_a6[12] = 0x21;
+		b10_a6[13] = 0xA5;
+		iffreq = MAKE_IFFREQ_CONFIG(4.2);
+		b10_d7 = 0x02;
+		break;
+	case 6000000:
+		/* bank 0x20, reg 0x9f */
+		b20_9f[0] = 0x17;
+		b20_9f[1] = 0xEA;
+		b20_9f[2] = 0xAA;
+		b20_9f[3] = 0xAA;
+		b20_9f[4] = 0xAA;
+		/* bank 0x10, reg 0xa6 */
+		b10_a6[0] = 0x27;
+		b10_a6[1] = 0xA7;
+		b10_a6[2] = 0x28;
+		b10_a6[3] = 0xB3;
+		b10_a6[4] = 0x02;
+		b10_a6[5] = 0xF0;
+		b10_a6[6] = 0x01;
+		b10_a6[7] = 0xE8;
+		b10_a6[8] = 0x00;
+		b10_a6[9] = 0xCF;
+		b10_a6[10] = 0x00;
+		b10_a6[11] = 0xE6;
+		b10_a6[12] = 0x23;
+		b10_a6[13] = 0xA4;
+		iffreq = MAKE_IFFREQ_CONFIG(3.6);
+		b10_d7 = 0x04;
+		break;
+	case 5000000:
+		/* bank 0x20, reg 0x9f */
+		b20_9f[0] = 0x1C;
+		b20_9f[1] = 0xB3;
+		b20_9f[2] = 0x33;
+		b20_9f[3] = 0x33;
+		b20_9f[4] = 0x33;
+		/* bank 0x10, reg 0xa6 */
+		b10_a6[0] = 0x27;
+		b10_a6[1] = 0xA7;
+		b10_a6[2] = 0x28;
+		b10_a6[3] = 0xB3;
+		b10_a6[4] = 0x02;
+		b10_a6[5] = 0xF0;
+		b10_a6[6] = 0x01;
+		b10_a6[7] = 0xE8;
+		b10_a6[8] = 0x00;
+		b10_a6[9] = 0xCF;
+		b10_a6[10] = 0x00;
+		b10_a6[11] = 0xE6;
+		b10_a6[12] = 0x23;
+		b10_a6[13] = 0xA4;
+		iffreq = MAKE_IFFREQ_CONFIG(3.6);
+		b10_d7 = 0x06;
+		break;
+	case 1712000:
+		/* bank 0x20, reg 0x9f */
+		b20_9f[0] = 0x58;
+		b20_9f[1] = 0xE2;
+		b20_9f[2] = 0xAF;
+		b20_9f[3] = 0xE0;
+		b20_9f[4] = 0xBC;
+		/* bank 0x10, reg 0xa6 */
+		b10_a6[0] = 0x25;
+		b10_a6[1] = 0xA0;
+		b10_a6[2] = 0x36;
+		b10_a6[3] = 0x8D;
+		b10_a6[4] = 0x2E;
+		b10_a6[5] = 0x94;
+		b10_a6[6] = 0x28;
+		b10_a6[7] = 0x9B;
+		b10_a6[8] = 0x32;
+		b10_a6[9] = 0x90;
+		b10_a6[10] = 0x2C;
+		b10_a6[11] = 0x9D;
+		b10_a6[12] = 0x29;
+		b10_a6[13] = 0x99;
+		iffreq = MAKE_IFFREQ_CONFIG(3.5);
+		b10_d7 = 0x03;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* Set SLV-T Bank : 0x20 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x20);
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x9f, b20_9f, sizeof(b20_9f));
+	/* Set SLV-T Bank : 0x27 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+	cxd2841er_set_reg_bits(
+		priv, I2C_SLVT, 0x7a,
+		(bandwidth == 1712000 ? 0x03 : 0x00), 0x0f);
+	/* Set SLV-T Bank : 0x10 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	/* Group delay equaliser sett. for ASCOT2E */
+	cxd2841er_write_regs(priv, I2C_SLVT, 0xa6, b10_a6, sizeof(b10_a6));
+	/* <IF freq setting> */
+	b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
+	b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
+	b10_b6[2] = (u8)(iffreq & 0xff);
+	cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
+	/* System bandwidth setting */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, b10_d7, 0x07);
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t_band(
+		struct cxd2841er_priv *priv, u32 bandwidth)
+{
+	u8 b13_9c[2] = { 0x01, 0x14 };
+	u8 bw8mhz_b10_9f[] = { 0x11, 0xF0, 0x00, 0x00, 0x00 };
+	u8 bw8mhz_b10_a6[] = { 0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB,
+			0x28, 0xBA, 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8 };
+	u8 bw8mhz_b10_d9[] = { 0x01, 0xE0 };
+	u8 bw8mhz_b17_38[] = { 0x01, 0x02 };
+	u8 bw7mhz_b10_9f[] = { 0x14, 0x80, 0x00, 0x00, 0x00 };
+	u8 bw7mhz_b10_a6[] = { 0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8,
+			0x23, 0xA6, 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5 };
+	u8 bw7mhz_b10_d9[] = { 0x12, 0xF8 };
+	u8 bw7mhz_b17_38[] = { 0x00, 0x03 };
+	u8 bw6mhz_b10_9f[] = { 0x17, 0xEA, 0xAA, 0xAA, 0xAA };
+	u8 bw6mhz_b10_a6[] = { 0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0,
+			0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
+	u8 bw6mhz_b10_d9[] = { 0x1F, 0xDC };
+	u8 bw6mhz_b17_38[] = { 0x00, 0x03 };
+	u8 bw5mhz_b10_9f[] = { 0x1C, 0xB3, 0x33, 0x33, 0x33 };
+	u8 bw5mhz_b10_a6[] = { 0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0,
+			0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
+	u8 bw5mhz_b10_d9[] = { 0x26, 0x3C };
+	u8 bw5mhz_b17_38[] = { 0x00, 0x03 };
+	u8 b10_b6[3];
+	u8 d7val;
+	u32 iffreq;
+	u8 *b10_9f;
+	u8 *b10_a6;
+	u8 *b10_d9;
+	u8 *b17_38;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x13);
+	/* Echo performance optimization setting */
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x9c, b13_9c, sizeof(b13_9c));
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+	switch (bandwidth) {
+	case 8000000:
+		b10_9f = bw8mhz_b10_9f;
+		b10_a6 = bw8mhz_b10_a6;
+		b10_d9 = bw8mhz_b10_d9;
+		b17_38 = bw8mhz_b17_38;
+		d7val = 0;
+		iffreq = MAKE_IFFREQ_CONFIG(4.80);
+		break;
+	case 7000000:
+		b10_9f = bw7mhz_b10_9f;
+		b10_a6 = bw7mhz_b10_a6;
+		b10_d9 = bw7mhz_b10_d9;
+		b17_38 = bw7mhz_b17_38;
+		d7val = 2;
+		iffreq = MAKE_IFFREQ_CONFIG(4.20);
+		break;
+	case 6000000:
+		b10_9f = bw6mhz_b10_9f;
+		b10_a6 = bw6mhz_b10_a6;
+		b10_d9 = bw6mhz_b10_d9;
+		b17_38 = bw6mhz_b17_38;
+		d7val = 4;
+		iffreq = MAKE_IFFREQ_CONFIG(3.60);
+		break;
+	case 5000000:
+		b10_9f = bw5mhz_b10_9f;
+		b10_a6 = bw5mhz_b10_a6;
+		b10_d9 = bw5mhz_b10_d9;
+		b17_38 = bw5mhz_b17_38;
+		d7val = 6;
+		iffreq = MAKE_IFFREQ_CONFIG(3.60);
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s(): invalid bandwidth %d\n",
+			__func__, bandwidth);
+		return -EINVAL;
+	}
+	/* <IF freq setting> */
+	b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
+	b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
+	b10_b6[2] = (u8)(iffreq & 0xff);
+	cxd2841er_write_regs(
+		priv, I2C_SLVT, 0x9f, b10_9f, sizeof(bw8mhz_b10_9f));
+	cxd2841er_write_regs(
+		priv, I2C_SLVT, 0xa6, b10_a6, sizeof(bw8mhz_b10_a6));
+	cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, d7val, 0x7);
+	cxd2841er_write_regs(
+		priv, I2C_SLVT, 0xd9, b10_d9, sizeof(bw8mhz_b10_d9));
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x17);
+	cxd2841er_write_regs(
+		priv, I2C_SLVT, 0x38, b17_38, sizeof(bw8mhz_b17_38));
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
+					       u32 bandwidth)
+{
+	u8 bw7_8mhz_b10_a6[] = {
+		0x2D, 0xC7, 0x04, 0xF4, 0x07, 0xC5, 0x2A, 0xB8,
+		0x27, 0x9E, 0x27, 0xA4, 0x29, 0xAB };
+	u8 bw6mhz_b10_a6[] = {
+		0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
+		0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
+	u8 b10_b6[3];
+	u32 iffreq;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	switch (bandwidth) {
+	case 8000000:
+	case 7000000:
+		cxd2841er_write_regs(
+			priv, I2C_SLVT, 0xa6,
+			bw7_8mhz_b10_a6, sizeof(bw7_8mhz_b10_a6));
+		iffreq = MAKE_IFFREQ_CONFIG(4.9);
+		break;
+	case 6000000:
+		cxd2841er_write_regs(
+			priv, I2C_SLVT, 0xa6,
+			bw6mhz_b10_a6, sizeof(bw6mhz_b10_a6));
+		iffreq = MAKE_IFFREQ_CONFIG(3.7);
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s(): unsupported bandwidth %d\n",
+			__func__, bandwidth);
+		return -EINVAL;
+	}
+	/* <IF freq setting> */
+	b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
+	b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
+	b10_b6[2] = (u8)(iffreq & 0xff);
+	cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
+	/* Set SLV-T Bank : 0x11 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+	switch (bandwidth) {
+	case 8000000:
+	case 7000000:
+		cxd2841er_set_reg_bits(
+			priv, I2C_SLVT, 0xa3, 0x00, 0x1f);
+		break;
+	case 6000000:
+		cxd2841er_set_reg_bits(
+			priv, I2C_SLVT, 0xa3, 0x14, 0x1f);
+		break;
+	}
+	/* Set SLV-T Bank : 0x40 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+	switch (bandwidth) {
+	case 8000000:
+		cxd2841er_set_reg_bits(
+			priv, I2C_SLVT, 0x26, 0x0b, 0x0f);
+		cxd2841er_write_reg(priv, I2C_SLVT,  0x27, 0x3e);
+		break;
+	case 7000000:
+		cxd2841er_set_reg_bits(
+			priv, I2C_SLVT, 0x26, 0x09, 0x0f);
+		cxd2841er_write_reg(priv, I2C_SLVT,  0x27, 0xd6);
+		break;
+	case 6000000:
+		cxd2841er_set_reg_bits(
+			priv, I2C_SLVT, 0x26, 0x08, 0x0f);
+		cxd2841er_write_reg(priv, I2C_SLVT,  0x27, 0x6e);
+		break;
+	}
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
+					  u32 bandwidth)
+{
+	u8 data[2] = { 0x09, 0x54 };
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_set_ts_clock_mode(priv, SYS_DVBT);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Set demod mode */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x01);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Enable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+	/* Disable RF level monitor */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+	/* Enable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+	/* Enable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
+	/* xtal freq 20.5MHz */
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
+	/* Enable ADC 4 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+	/* Set SLV-T Bank : 0x10 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	/* IFAGC gain settings */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd2, 0x0c, 0x1f);
+	/* Set SLV-T Bank : 0x11 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+	/* BBAGC TARGET level setting */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x50);
+	/* Set SLV-T Bank : 0x10 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	/* ASCOT setting ON */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+	/* Set SLV-T Bank : 0x18 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x18);
+	/* Pre-RS BER moniter setting */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x36, 0x40, 0x07);
+	/* FEC Auto Recovery setting */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x30, 0x01, 0x01);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x31, 0x01, 0x01);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* TSIF setting */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
+	cxd2841er_sleep_tc_to_active_t_band(priv, bandwidth);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Disable HiZ Setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x28);
+	/* Disable HiZ Setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+	priv->state = STATE_ACTIVE_TC;
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
+					   u32 bandwidth)
+{
+	u8 data[2] = { 0x09, 0x54 };
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_set_ts_clock_mode(priv, SYS_DVBT2);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Set demod mode */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x02);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Enable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+	/* Disable RF level monitor */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+	/* Enable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+	/* Enable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
+	/* xtal freq 20.5MHz */
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
+	/* Enable ADC 4 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+	/* Set SLV-T Bank : 0x10 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	/* IFAGC gain settings */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd2, 0x0c, 0x1f);
+	/* Set SLV-T Bank : 0x11 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+	/* BBAGC TARGET level setting */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x50);
+	/* Set SLV-T Bank : 0x10 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	/* ASCOT setting ON */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+	/* Set SLV-T Bank : 0x20 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+	/* Acquisition optimization setting */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x8b, 0x3c);
+	/* Set SLV-T Bank : 0x2b */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x76, 0x20, 0x70);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* TSIF setting */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
+	/* DVB-T2 initial setting */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x13);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x83, 0x10);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x86, 0x34);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x9e, 0x09, 0x0f);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x9f, 0xd8);
+	/* Set SLV-T Bank : 0x2a */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2a);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x38, 0x04, 0x0f);
+	/* Set SLV-T Bank : 0x2b */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x11, 0x20, 0x3f);
+
+	cxd2841er_sleep_tc_to_active_t2_band(priv, bandwidth);
+
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Disable HiZ Setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x28);
+	/* Disable HiZ Setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+	priv->state = STATE_ACTIVE_TC;
+	return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv,
+					  u32 bandwidth)
+{
+	u8 data[2] = { 0x09, 0x54 };
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_set_ts_clock_mode(priv, SYS_DVBC_ANNEX_A);
+	/* Set SLV-X Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+	/* Set demod mode */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x04);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Enable demod clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+	/* Disable RF level monitor */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+	/* Enable ADC clock */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+	/* Enable ADC 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
+	/* xtal freq 20.5MHz */
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
+	/* Enable ADC 4 */
+	cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+	/* Set SLV-T Bank : 0x10 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	/* IFAGC gain settings */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd2, 0x09, 0x1f);
+	/* Set SLV-T Bank : 0x11 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+	/* BBAGC TARGET level setting */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x48);
+	/* Set SLV-T Bank : 0x10 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	/* ASCOT setting ON */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+	/* Set SLV-T Bank : 0x40 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+	/* Demod setting */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc3, 0x00, 0x04);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* TSIF setting */
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
+
+	cxd2841er_sleep_tc_to_active_c_band(priv, 8000000);
+	/* Set SLV-T Bank : 0x00 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	/* Disable HiZ Setting 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x28);
+	/* Disable HiZ Setting 2 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+	priv->state = STATE_ACTIVE_TC;
+	return 0;
+}
+
+static int cxd2841er_get_frontend(struct dvb_frontend *fe)
+{
+	enum fe_status status = 0;
+	u16 strength = 0, snr = 0;
+	u32 errors = 0, ber = 0;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state == STATE_ACTIVE_S)
+		cxd2841er_read_status_s(fe, &status);
+	else if (priv->state == STATE_ACTIVE_TC)
+		cxd2841er_read_status_tc(fe, &status);
+
+	if (status & FE_HAS_LOCK) {
+		cxd2841er_read_signal_strength(fe, &strength);
+		p->strength.len = 1;
+		p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+		p->strength.stat[0].uvalue = strength;
+		cxd2841er_read_snr(fe, &snr);
+		p->cnr.len = 1;
+		p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+		p->cnr.stat[0].svalue = snr;
+		cxd2841er_read_ucblocks(fe, &errors);
+		p->block_error.len = 1;
+		p->block_error.stat[0].scale = FE_SCALE_COUNTER;
+		p->block_error.stat[0].uvalue = errors;
+		cxd2841er_read_ber(fe, &ber);
+		p->post_bit_error.len = 1;
+		p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+		p->post_bit_error.stat[0].uvalue = ber;
+	} else {
+		p->strength.len = 1;
+		p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		p->cnr.len = 1;
+		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		p->block_error.len = 1;
+		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		p->post_bit_error.len = 1;
+		p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+	return 0;
+}
+
+static int cxd2841er_set_frontend_s(struct dvb_frontend *fe)
+{
+	int ret = 0, i, timeout, carr_offset;
+	enum fe_status status;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	u32 symbol_rate = p->symbol_rate/1000;
+
+	dev_dbg(&priv->i2c->dev, "%s(): %s frequency=%d symbol_rate=%d\n",
+		__func__,
+		(p->delivery_system == SYS_DVBS ? "DVB-S" : "DVB-S2"),
+		 p->frequency, symbol_rate);
+	switch (priv->state) {
+	case STATE_SLEEP_S:
+		ret = cxd2841er_sleep_s_to_active_s(
+			priv, p->delivery_system, symbol_rate);
+		break;
+	case STATE_ACTIVE_S:
+		ret = cxd2841er_retune_active(priv, p);
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		ret = -EINVAL;
+		goto done;
+	}
+	if (ret) {
+		dev_dbg(&priv->i2c->dev, "%s(): tune failed\n", __func__);
+		goto done;
+	}
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 1);
+	if (fe->ops.tuner_ops.set_params)
+		fe->ops.tuner_ops.set_params(fe);
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 0);
+	cxd2841er_tune_done(priv);
+	timeout = ((3000000 + (symbol_rate - 1)) / symbol_rate) + 150;
+	for (i = 0; i < timeout / CXD2841ER_DVBS_POLLING_INVL; i++) {
+		usleep_range(CXD2841ER_DVBS_POLLING_INVL*1000,
+			(CXD2841ER_DVBS_POLLING_INVL + 2) * 1000);
+		cxd2841er_read_status_s(fe, &status);
+		if (status & FE_HAS_LOCK)
+			break;
+	}
+	if (status & FE_HAS_LOCK) {
+		if (cxd2841er_get_carrier_offset_s_s2(
+				priv, &carr_offset)) {
+			ret = -EINVAL;
+			goto done;
+		}
+		dev_dbg(&priv->i2c->dev, "%s(): carrier_offset=%d\n",
+			__func__, carr_offset);
+	}
+done:
+	return ret;
+}
+
+static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe)
+{
+	int ret = 0, timeout;
+	enum fe_status status;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (p->delivery_system == SYS_DVBT) {
+		priv->system = SYS_DVBT;
+		switch (priv->state) {
+		case STATE_SLEEP_TC:
+			ret = cxd2841er_sleep_tc_to_active_t(
+				priv, p->bandwidth_hz);
+			break;
+		case STATE_ACTIVE_TC:
+			ret = cxd2841er_retune_active(priv, p);
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+				__func__, priv->state);
+			ret = -EINVAL;
+		}
+	} else if (p->delivery_system == SYS_DVBT2) {
+		priv->system = SYS_DVBT2;
+		cxd2841er_dvbt2_set_plp_config(priv,
+			(int)(p->stream_id > 255), p->stream_id);
+		cxd2841er_dvbt2_set_profile(priv, DVBT2_PROFILE_BASE);
+		switch (priv->state) {
+		case STATE_SLEEP_TC:
+			ret = cxd2841er_sleep_tc_to_active_t2(priv,
+				p->bandwidth_hz);
+			break;
+		case STATE_ACTIVE_TC:
+			ret = cxd2841er_retune_active(priv, p);
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+				__func__, priv->state);
+			ret = -EINVAL;
+		}
+	} else if (p->delivery_system == SYS_DVBC_ANNEX_A ||
+			p->delivery_system == SYS_DVBC_ANNEX_C) {
+		priv->system = SYS_DVBC_ANNEX_A;
+		switch (priv->state) {
+		case STATE_SLEEP_TC:
+			ret = cxd2841er_sleep_tc_to_active_c(
+				priv, p->bandwidth_hz);
+			break;
+		case STATE_ACTIVE_TC:
+			ret = cxd2841er_retune_active(priv, p);
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+				__func__, priv->state);
+			ret = -EINVAL;
+		}
+	} else {
+		dev_dbg(&priv->i2c->dev,
+			"%s(): invalid delivery system %d\n",
+			__func__, p->delivery_system);
+		ret = -EINVAL;
+	}
+	if (ret)
+		goto done;
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 1);
+	if (fe->ops.tuner_ops.set_params)
+		fe->ops.tuner_ops.set_params(fe);
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 0);
+	cxd2841er_tune_done(priv);
+	timeout = 2500;
+	while (timeout > 0) {
+		ret = cxd2841er_read_status_tc(fe, &status);
+		if (ret)
+			goto done;
+		if (status & FE_HAS_LOCK)
+			break;
+		msleep(20);
+		timeout -= 20;
+	}
+	if (timeout < 0)
+		dev_dbg(&priv->i2c->dev,
+			"%s(): LOCK wait timeout\n", __func__);
+done:
+	return ret;
+}
+
+static int cxd2841er_tune_s(struct dvb_frontend *fe,
+			    bool re_tune,
+			    unsigned int mode_flags,
+			    unsigned int *delay,
+			    enum fe_status *status)
+{
+	int ret, carrier_offset;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+	dev_dbg(&priv->i2c->dev, "%s() re_tune=%d\n", __func__, re_tune);
+	if (re_tune) {
+		ret = cxd2841er_set_frontend_s(fe);
+		if (ret)
+			return ret;
+		cxd2841er_read_status_s(fe, status);
+		if (*status & FE_HAS_LOCK) {
+			if (cxd2841er_get_carrier_offset_s_s2(
+					priv, &carrier_offset))
+				return -EINVAL;
+			p->frequency += carrier_offset;
+			ret = cxd2841er_set_frontend_s(fe);
+			if (ret)
+				return ret;
+		}
+	}
+	*delay = HZ / 5;
+	return cxd2841er_read_status_s(fe, status);
+}
+
+static int cxd2841er_tune_tc(struct dvb_frontend *fe,
+			     bool re_tune,
+			     unsigned int mode_flags,
+			     unsigned int *delay,
+			     enum fe_status *status)
+{
+	int ret, carrier_offset;
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+	dev_dbg(&priv->i2c->dev, "%s(): re_tune %d\n", __func__, re_tune);
+	if (re_tune) {
+		ret = cxd2841er_set_frontend_tc(fe);
+		if (ret)
+			return ret;
+		cxd2841er_read_status_tc(fe, status);
+		if (*status & FE_HAS_LOCK) {
+			switch (priv->system) {
+			case SYS_DVBT:
+			case SYS_DVBT2:
+				ret = cxd2841er_get_carrier_offset_t2(
+					priv, p->bandwidth_hz,
+					&carrier_offset);
+				break;
+			case SYS_DVBC_ANNEX_A:
+				ret = cxd2841er_get_carrier_offset_c(
+					priv, &carrier_offset);
+				break;
+			default:
+				dev_dbg(&priv->i2c->dev,
+					"%s(): invalid delivery system %d\n",
+					__func__, priv->system);
+				return -EINVAL;
+			}
+			if (ret)
+				return ret;
+			dev_dbg(&priv->i2c->dev, "%s(): carrier offset %d\n",
+				__func__, carrier_offset);
+			p->frequency += carrier_offset;
+			ret = cxd2841er_set_frontend_tc(fe);
+			if (ret)
+				return ret;
+		}
+	}
+	*delay = HZ / 5;
+	return cxd2841er_read_status_tc(fe, status);
+}
+
+static int cxd2841er_sleep_s(struct dvb_frontend *fe)
+{
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_active_s_to_sleep_s(fe->demodulator_priv);
+	cxd2841er_sleep_s_to_shutdown(fe->demodulator_priv);
+	return 0;
+}
+
+static int cxd2841er_sleep_tc(struct dvb_frontend *fe)
+{
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state == STATE_ACTIVE_TC) {
+		switch (priv->system) {
+		case SYS_DVBT:
+			cxd2841er_active_t_to_sleep_tc(priv);
+			break;
+		case SYS_DVBT2:
+			cxd2841er_active_t2_to_sleep_tc(priv);
+			break;
+		case SYS_DVBC_ANNEX_A:
+			cxd2841er_active_c_to_sleep_tc(priv);
+			break;
+		default:
+			dev_warn(&priv->i2c->dev,
+				"%s(): unknown delivery system %d\n",
+				__func__, priv->system);
+		}
+	}
+	if (priv->state != STATE_SLEEP_TC) {
+		dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	cxd2841er_sleep_tc_to_shutdown(priv);
+	return 0;
+}
+
+static int cxd2841er_send_burst(struct dvb_frontend *fe,
+				enum fe_sec_mini_cmd burst)
+{
+	u8 data;
+	struct cxd2841er_priv *priv  = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s(): burst mode %s\n", __func__,
+		(burst == SEC_MINI_A ? "A" : "B"));
+	if (priv->state != STATE_SLEEP_S &&
+			priv->state != STATE_ACTIVE_S) {
+		dev_err(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	data = (burst == SEC_MINI_A ? 0 : 1);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xbb);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x34, 0x01);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x35, data);
+	return 0;
+}
+
+static int cxd2841er_set_tone(struct dvb_frontend *fe,
+			      enum fe_sec_tone_mode tone)
+{
+	u8 data;
+	struct cxd2841er_priv *priv  = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s(): tone %s\n", __func__,
+		(tone == SEC_TONE_ON ? "On" : "Off"));
+	if (priv->state != STATE_SLEEP_S &&
+			priv->state != STATE_ACTIVE_S) {
+		dev_err(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	data = (tone == SEC_TONE_ON ? 1 : 0);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xbb);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x36, data);
+	return 0;
+}
+
+static int cxd2841er_send_diseqc_msg(struct dvb_frontend *fe,
+				     struct dvb_diseqc_master_cmd *cmd)
+{
+	int i;
+	u8 data[12];
+	struct cxd2841er_priv *priv  = fe->demodulator_priv;
+
+	if (priv->state != STATE_SLEEP_S &&
+			priv->state != STATE_ACTIVE_S) {
+		dev_err(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+			__func__, priv->state);
+		return -EINVAL;
+	}
+	dev_dbg(&priv->i2c->dev,
+		"%s(): cmd->len %d\n", __func__, cmd->msg_len);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xbb);
+	/* DiDEqC enable */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x33, 0x01);
+	/* cmd1 length & data */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x3d, cmd->msg_len);
+	memset(data, 0, sizeof(data));
+	for (i = 0; i < cmd->msg_len && i < sizeof(data); i++)
+		data[i] = cmd->msg[i];
+	cxd2841er_write_regs(priv, I2C_SLVT, 0x3e, data, sizeof(data));
+	/* repeat count for cmd1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x37, 1);
+	/* repeat count for cmd2: always 0 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x38, 0);
+	/* start transmit */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x32, 0x01);
+	/* wait for 1 sec timeout */
+	for (i = 0; i < 50; i++) {
+		cxd2841er_read_reg(priv, I2C_SLVT, 0x10, data);
+		if (!data[0]) {
+			dev_dbg(&priv->i2c->dev,
+				"%s(): DiSEqC cmd has been sent\n", __func__);
+			return 0;
+		}
+		msleep(20);
+	}
+	dev_dbg(&priv->i2c->dev,
+		"%s(): DiSEqC cmd transmit timeout\n", __func__);
+	return -ETIMEDOUT;
+}
+
+static void cxd2841er_release(struct dvb_frontend *fe)
+{
+	struct cxd2841er_priv *priv  = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	kfree(priv);
+}
+
+static int cxd2841er_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s(): enable=%d\n", __func__, enable);
+	cxd2841er_set_reg_bits(
+		priv, I2C_SLVX, 0x8, (enable ? 0x01 : 0x00), 0x01);
+	return 0;
+}
+
+static enum dvbfe_algo cxd2841er_get_algo(struct dvb_frontend *fe)
+{
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	return DVBFE_ALGO_HW;
+}
+
+static int cxd2841er_init_s(struct dvb_frontend *fe)
+{
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_shutdown_to_sleep_s(priv);
+	/* SONY_DEMOD_CONFIG_SAT_IFAGCNEG set to 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xb9, 0x01, 0x01);
+	return 0;
+}
+
+static int cxd2841er_init_tc(struct dvb_frontend *fe)
+{
+	struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	cxd2841er_shutdown_to_sleep_tc(priv);
+	/* SONY_DEMOD_CONFIG_IFAGCNEG = 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcb, 0x40, 0x40);
+	/* SONY_DEMOD_CONFIG_IFAGC_ADC_FS = 0 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0xcd, 0x50);
+	/* SONY_DEMOD_CONFIG_PARALLEL_SEL = 1 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+	cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc4, 0x00, 0x80);
+	return 0;
+}
+
+static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
+static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops;
+static struct dvb_frontend_ops cxd2841er_dvbc_ops;
+
+static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
+					     struct i2c_adapter *i2c,
+					     u8 system)
+{
+	u8 chip_id = 0;
+	const char *type;
+	struct cxd2841er_priv *priv = NULL;
+
+	/* allocate memory for the internal state */
+	priv = kzalloc(sizeof(struct cxd2841er_priv), GFP_KERNEL);
+	if (!priv)
+		return NULL;
+	priv->i2c = i2c;
+	priv->config = cfg;
+	priv->i2c_addr_slvx = (cfg->i2c_addr + 4) >> 1;
+	priv->i2c_addr_slvt = (cfg->i2c_addr) >> 1;
+	/* create dvb_frontend */
+	switch (system) {
+	case SYS_DVBS:
+		memcpy(&priv->frontend.ops,
+			&cxd2841er_dvbs_s2_ops,
+			sizeof(struct dvb_frontend_ops));
+		type = "S/S2";
+		break;
+	case SYS_DVBT:
+		memcpy(&priv->frontend.ops,
+			&cxd2841er_dvbt_t2_ops,
+			sizeof(struct dvb_frontend_ops));
+		type = "T/T2";
+		break;
+	case SYS_DVBC_ANNEX_A:
+		memcpy(&priv->frontend.ops,
+			&cxd2841er_dvbc_ops,
+			sizeof(struct dvb_frontend_ops));
+		type = "C/C2";
+		break;
+	default:
+		kfree(priv);
+		return NULL;
+	}
+	priv->frontend.demodulator_priv = priv;
+	dev_info(&priv->i2c->dev,
+		"%s(): attaching CXD2841ER DVB-%s frontend\n",
+		__func__, type);
+	dev_info(&priv->i2c->dev,
+		"%s(): I2C adapter %p SLVX addr %x SLVT addr %x\n",
+		__func__, priv->i2c,
+		priv->i2c_addr_slvx, priv->i2c_addr_slvt);
+	chip_id = cxd2841er_chip_id(priv);
+	if (chip_id != CXD2841ER_CHIP_ID) {
+		dev_err(&priv->i2c->dev, "%s(): invalid chip ID 0x%02x\n",
+			__func__, chip_id);
+		priv->frontend.demodulator_priv = NULL;
+		kfree(priv);
+		return NULL;
+	}
+	dev_info(&priv->i2c->dev, "%s(): chip ID 0x%02x OK.\n",
+		__func__, chip_id);
+	return &priv->frontend;
+}
+
+struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
+					struct i2c_adapter *i2c)
+{
+	return cxd2841er_attach(cfg, i2c, SYS_DVBS);
+}
+EXPORT_SYMBOL(cxd2841er_attach_s);
+
+struct dvb_frontend *cxd2841er_attach_t(struct cxd2841er_config *cfg,
+					struct i2c_adapter *i2c)
+{
+	return cxd2841er_attach(cfg, i2c, SYS_DVBT);
+}
+EXPORT_SYMBOL(cxd2841er_attach_t);
+
+struct dvb_frontend *cxd2841er_attach_c(struct cxd2841er_config *cfg,
+					struct i2c_adapter *i2c)
+{
+	return cxd2841er_attach(cfg, i2c, SYS_DVBC_ANNEX_A);
+}
+EXPORT_SYMBOL(cxd2841er_attach_c);
+
+static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
+	.delsys = { SYS_DVBS, SYS_DVBS2 },
+	.info = {
+		.name		= "Sony CXD2841ER DVB-S/S2 demodulator",
+		.frequency_min	= 500000,
+		.frequency_max	= 2500000,
+		.frequency_stepsize	= 0,
+		.symbol_rate_min = 1000000,
+		.symbol_rate_max = 45000000,
+		.symbol_rate_tolerance = 500,
+		.caps = FE_CAN_INVERSION_AUTO |
+			FE_CAN_FEC_AUTO |
+			FE_CAN_QPSK,
+	},
+	.init = cxd2841er_init_s,
+	.sleep = cxd2841er_sleep_s,
+	.release = cxd2841er_release,
+	.set_frontend = cxd2841er_set_frontend_s,
+	.get_frontend = cxd2841er_get_frontend,
+	.read_status = cxd2841er_read_status_s,
+	.i2c_gate_ctrl = cxd2841er_i2c_gate_ctrl,
+	.get_frontend_algo = cxd2841er_get_algo,
+	.set_tone = cxd2841er_set_tone,
+	.diseqc_send_burst = cxd2841er_send_burst,
+	.diseqc_send_master_cmd = cxd2841er_send_diseqc_msg,
+	.tune = cxd2841er_tune_s
+};
+
+static struct  dvb_frontend_ops cxd2841er_dvbt_t2_ops = {
+	.delsys = { SYS_DVBT, SYS_DVBT2 },
+	.info = {
+		.name	= "Sony CXD2841ER DVB-T/T2 demodulator",
+		.caps = FE_CAN_FEC_1_2 |
+			FE_CAN_FEC_2_3 |
+			FE_CAN_FEC_3_4 |
+			FE_CAN_FEC_5_6 |
+			FE_CAN_FEC_7_8 |
+			FE_CAN_FEC_AUTO |
+			FE_CAN_QPSK |
+			FE_CAN_QAM_16 |
+			FE_CAN_QAM_32 |
+			FE_CAN_QAM_64 |
+			FE_CAN_QAM_128 |
+			FE_CAN_QAM_256 |
+			FE_CAN_QAM_AUTO |
+			FE_CAN_TRANSMISSION_MODE_AUTO |
+			FE_CAN_GUARD_INTERVAL_AUTO |
+			FE_CAN_HIERARCHY_AUTO |
+			FE_CAN_MUTE_TS |
+			FE_CAN_2G_MODULATION,
+		.frequency_min = 42000000,
+		.frequency_max = 1002000000
+	},
+	.init = cxd2841er_init_tc,
+	.sleep = cxd2841er_sleep_tc,
+	.release = cxd2841er_release,
+	.set_frontend = cxd2841er_set_frontend_tc,
+	.get_frontend = cxd2841er_get_frontend,
+	.read_status = cxd2841er_read_status_tc,
+	.tune = cxd2841er_tune_tc,
+	.i2c_gate_ctrl = cxd2841er_i2c_gate_ctrl,
+	.get_frontend_algo = cxd2841er_get_algo
+};
+
+static struct  dvb_frontend_ops cxd2841er_dvbc_ops = {
+	.delsys = { SYS_DVBC_ANNEX_A },
+	.info = {
+		.name	= "Sony CXD2841ER DVB-C demodulator",
+		.caps = FE_CAN_FEC_1_2 |
+			FE_CAN_FEC_2_3 |
+			FE_CAN_FEC_3_4 |
+			FE_CAN_FEC_5_6 |
+			FE_CAN_FEC_7_8 |
+			FE_CAN_FEC_AUTO |
+			FE_CAN_QAM_16 |
+			FE_CAN_QAM_32 |
+			FE_CAN_QAM_64 |
+			FE_CAN_QAM_128 |
+			FE_CAN_QAM_256 |
+			FE_CAN_QAM_AUTO |
+			FE_CAN_INVERSION_AUTO,
+		.frequency_min = 42000000,
+		.frequency_max = 1002000000
+	},
+	.init = cxd2841er_init_tc,
+	.sleep = cxd2841er_sleep_tc,
+	.release = cxd2841er_release,
+	.set_frontend = cxd2841er_set_frontend_tc,
+	.get_frontend = cxd2841er_get_frontend,
+	.read_status = cxd2841er_read_status_tc,
+	.tune = cxd2841er_tune_tc,
+	.i2c_gate_ctrl = cxd2841er_i2c_gate_ctrl,
+	.get_frontend_algo = cxd2841er_get_algo,
+};
+
+MODULE_DESCRIPTION("Sony CXD2841ER DVB-C/C2/T/T2/S/S2 demodulator driver");
+MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/cxd2841er.h b/drivers/media/dvb-frontends/cxd2841er.h
new file mode 100644
index 0000000..3472bdd
--- /dev/null
+++ b/drivers/media/dvb-frontends/cxd2841er.h
@@ -0,0 +1,65 @@
+/*
+ * cxd2841er.h
+ *
+ * Sony CXD2441ER digital demodulator driver public definitions
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+  */
+
+#ifndef CXD2841ER_H
+#define CXD2841ER_H
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+
+struct cxd2841er_config {
+	u8	i2c_addr;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_CXD2841ER)
+extern struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
+					       struct i2c_adapter *i2c);
+
+extern struct dvb_frontend *cxd2841er_attach_t(struct cxd2841er_config *cfg,
+					       struct i2c_adapter *i2c);
+
+extern struct dvb_frontend *cxd2841er_attach_c(struct cxd2841er_config *cfg,
+					       struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *cxd2841er_attach_s(
+					struct cxd2841er_config *cfg,
+					struct i2c_adapter *i2c)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+
+static inline struct dvb_frontend *cxd2841er_attach_t(
+		struct cxd2841er_config *cfg, struct i2c_adapter *i2c)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+
+static inline struct dvb_frontend *cxd2841er_attach_c(
+		struct cxd2841er_config *cfg, struct i2c_adapter *i2c)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/cxd2841er_priv.h b/drivers/media/dvb-frontends/cxd2841er_priv.h
new file mode 100644
index 0000000..33e2f49
--- /dev/null
+++ b/drivers/media/dvb-frontends/cxd2841er_priv.h
@@ -0,0 +1,43 @@
+/*
+ * cxd2841er_priv.h
+ *
+ * Sony CXD2441ER digital demodulator driver internal definitions
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CXD2841ER_PRIV_H
+#define CXD2841ER_PRIV_H
+
+#define I2C_SLVX			0
+#define I2C_SLVT			1
+
+#define CXD2841ER_CHIP_ID		0xa7
+
+#define CXD2841ER_DVBS_POLLING_INVL	10
+
+struct cxd2841er_cnr_data {
+	u32 value;
+	int cnr_x1000;
+};
+
+enum cxd2841er_dvbt2_profile_t {
+	DVBT2_PROFILE_ANY = 0,
+	DVBT2_PROFILE_BASE = 1,
+	DVBT2_PROFILE_LITE = 2
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 6d8fe88..53089e1 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -34,7 +34,7 @@
 	struct i2c_adapter *i2c;
 
 	/* the PLL descriptor */
-	struct dvb_pll_desc *pll_desc;
+	const struct dvb_pll_desc *pll_desc;
 
 	/* cached frequency/bandwidth */
 	u32 frequency;
@@ -57,7 +57,7 @@
 /* ----------------------------------------------------------- */
 
 struct dvb_pll_desc {
-	char *name;
+	const char *name;
 	u32  min;
 	u32  max;
 	u32  iffreq;
@@ -71,13 +71,13 @@
 		u32 stepsize;
 		u8  config;
 		u8  cb;
-	} entries[12];
+	} entries[];
 };
 
 /* ----------------------------------------------------------- */
 /* descriptions                                                */
 
-static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
+static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
 	.name  = "Thomson dtt7579",
 	.min   = 177000000,
 	.max   = 858000000,
@@ -99,7 +99,7 @@
 		buf[3] |= 0x10;
 }
 
-static struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
+static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
 	.name  = "Thomson dtt759x",
 	.min   = 177000000,
 	.max   = 896000000,
@@ -123,7 +123,7 @@
 		buf[3] ^= 0x10;
 }
 
-static struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
+static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
 	.name  = "Thomson dtt7520x",
 	.min   = 185000000,
 	.max   = 900000000,
@@ -141,7 +141,7 @@
 	},
 };
 
-static struct dvb_pll_desc dvb_pll_lg_z201 = {
+static const struct dvb_pll_desc dvb_pll_lg_z201 = {
 	.name  = "LG z201",
 	.min   = 174000000,
 	.max   = 862000000,
@@ -157,7 +157,7 @@
 	},
 };
 
-static struct dvb_pll_desc dvb_pll_unknown_1 = {
+static const struct dvb_pll_desc dvb_pll_unknown_1 = {
 	.name  = "unknown 1", /* used by dntv live dvb-t */
 	.min   = 174000000,
 	.max   = 862000000,
@@ -179,7 +179,7 @@
 /* Infineon TUA6010XS
  * used in Thomson Cable Tuner
  */
-static struct dvb_pll_desc dvb_pll_tua6010xs = {
+static const struct dvb_pll_desc dvb_pll_tua6010xs = {
 	.name  = "Infineon TUA6010XS",
 	.min   =  44250000,
 	.max   = 858000000,
@@ -193,7 +193,7 @@
 };
 
 /* Panasonic env57h1xd5 (some Philips PLL ?) */
-static struct dvb_pll_desc dvb_pll_env57h1xd5 = {
+static const struct dvb_pll_desc dvb_pll_env57h1xd5 = {
 	.name  = "Panasonic ENV57H1XD5",
 	.min   =  44250000,
 	.max   = 858000000,
@@ -217,7 +217,7 @@
 		buf[3] |= 0x08;
 }
 
-static struct dvb_pll_desc dvb_pll_tda665x = {
+static const struct dvb_pll_desc dvb_pll_tda665x = {
 	.name  = "Philips TDA6650/TDA6651",
 	.min   =  44250000,
 	.max   = 858000000,
@@ -251,7 +251,7 @@
 		buf[3] |= 0x08;
 }
 
-static struct dvb_pll_desc dvb_pll_tua6034 = {
+static const struct dvb_pll_desc dvb_pll_tua6034 = {
 	.name  = "Infineon TUA6034",
 	.min   =  44250000,
 	.max   = 858000000,
@@ -275,7 +275,7 @@
 		buf[3] |= 0x04;
 }
 
-static struct dvb_pll_desc dvb_pll_tded4 = {
+static const struct dvb_pll_desc dvb_pll_tded4 = {
 	.name = "ALPS TDED4",
 	.min = 47000000,
 	.max = 863000000,
@@ -293,7 +293,7 @@
 /* ALPS TDHU2
  * used in AverTVHD MCE A180
  */
-static struct dvb_pll_desc dvb_pll_tdhu2 = {
+static const struct dvb_pll_desc dvb_pll_tdhu2 = {
 	.name = "ALPS TDHU2",
 	.min = 54000000,
 	.max = 864000000,
@@ -310,7 +310,7 @@
 /* Samsung TBMV30111IN / TBMV30712IN1
  * used in Air2PC ATSC - 2nd generation (nxt2002)
  */
-static struct dvb_pll_desc dvb_pll_samsung_tbmv = {
+static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
 	.name = "Samsung TBMV30111IN / TBMV30712IN1",
 	.min = 54000000,
 	.max = 860000000,
@@ -329,7 +329,7 @@
 /*
  * Philips SD1878 Tuner.
  */
-static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
+static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
 	.name  = "Philips SD1878",
 	.min   =  950000,
 	.max   = 2150000,
@@ -395,7 +395,7 @@
 	return;
 }
 
-static struct dvb_pll_desc dvb_pll_opera1 = {
+static const struct dvb_pll_desc dvb_pll_opera1 = {
 	.name  = "Opera Tuner",
 	.min   =  900000,
 	.max   = 2250000,
@@ -442,7 +442,7 @@
 }
 
 /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */
-static struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
+static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
 	.name   = "Samsung DTOS403IH102A",
 	.min    =  44250000,
 	.max    = 858000000,
@@ -462,7 +462,7 @@
 };
 
 /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */
-static struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
+static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
 	.name	= "Samsung TDTC9251DH0",
 	.min	=  48000000,
 	.max	= 863000000,
@@ -476,7 +476,7 @@
 };
 
 /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */
-static struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
+static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
 	.name = "Samsung TBDU18132",
 	.min	=  950000,
 	.max	= 2150000, /* guesses */
@@ -497,7 +497,7 @@
 };
 
 /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */
-static struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
+static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
 	.name = "Samsung TBMU24112",
 	.min	=  950000,
 	.max	= 2150000, /* guesses */
@@ -518,7 +518,7 @@
  * 153 - 430   0  *  0   0   0   0   1   0   0x02
  * 430 - 822   0  *  0   0   1   0   0   0   0x08
  * 822 - 862   1  *  0   0   1   0   0   0   0x88 */
-static struct dvb_pll_desc dvb_pll_alps_tdee4 = {
+static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
 	.name = "ALPS TDEE4",
 	.min	=  47000000,
 	.max	= 862000000,
@@ -534,7 +534,7 @@
 
 /* ----------------------------------------------------------- */
 
-static struct dvb_pll_desc *pll_list[] = {
+static const struct dvb_pll_desc *pll_list[] = {
 	[DVB_PLL_UNDEFINED]              = NULL,
 	[DVB_PLL_THOMSON_DTT7579]        = &dvb_pll_thomson_dtt7579,
 	[DVB_PLL_THOMSON_DTT759X]        = &dvb_pll_thomson_dtt759x,
@@ -564,7 +564,7 @@
 			     const u32 frequency)
 {
 	struct dvb_pll_priv *priv = fe->tuner_priv;
-	struct dvb_pll_desc *desc = priv->pll_desc;
+	const struct dvb_pll_desc *desc = priv->pll_desc;
 	u32 div;
 	int i;
 
@@ -758,7 +758,7 @@
 			       .buf = b1, .len = 1 };
 	struct dvb_pll_priv *priv = NULL;
 	int ret;
-	struct dvb_pll_desc *desc;
+	const struct dvb_pll_desc *desc;
 
 	if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
 	    (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
new file mode 100644
index 0000000..000606a
--- /dev/null
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -0,0 +1,430 @@
+/*
+ * horus3a.h
+ *
+ * Sony Horus3A DVB-S/S2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dvb/frontend.h>
+#include <linux/types.h>
+#include "horus3a.h"
+#include "dvb_frontend.h"
+
+#define MAX_WRITE_REGSIZE      5
+
+enum horus3a_state {
+	STATE_UNKNOWN,
+	STATE_SLEEP,
+	STATE_ACTIVE
+};
+
+struct horus3a_priv {
+	u32			frequency;
+	u8			i2c_address;
+	struct i2c_adapter	*i2c;
+	enum horus3a_state	state;
+	void			*set_tuner_data;
+	int			(*set_tuner)(void *, int);
+};
+
+static void horus3a_i2c_debug(struct horus3a_priv *priv,
+			      u8 reg, u8 write, const u8 *data, u32 len)
+{
+	dev_dbg(&priv->i2c->dev, "horus3a: I2C %s reg 0x%02x size %d\n",
+		(write == 0 ? "read" : "write"), reg, len);
+	print_hex_dump_bytes("horus3a: I2C data: ",
+		DUMP_PREFIX_OFFSET, data, len);
+}
+
+static int horus3a_write_regs(struct horus3a_priv *priv,
+			      u8 reg, const u8 *data, u32 len)
+{
+	int ret;
+	u8 buf[MAX_WRITE_REGSIZE + 1];
+	struct i2c_msg msg[1] = {
+		{
+			.addr = priv->i2c_address,
+			.flags = 0,
+			.len = len + 1,
+			.buf = buf,
+		}
+	};
+
+	if (len + 1 >= sizeof(buf)) {
+		dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
+			 reg, len + 1);
+		return -E2BIG;
+	}
+
+	horus3a_i2c_debug(priv, reg, 1, data, len);
+	buf[0] = reg;
+	memcpy(&buf[1], data, len);
+	ret = i2c_transfer(priv->i2c, msg, 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EREMOTEIO;
+	if (ret < 0) {
+		dev_warn(&priv->i2c->dev,
+			"%s: i2c wr failed=%d reg=%02x len=%d\n",
+			KBUILD_MODNAME, ret, reg, len);
+		return ret;
+	}
+	return 0;
+}
+
+static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val)
+{
+	return horus3a_write_regs(priv, reg, &val, 1);
+}
+
+static int horus3a_enter_power_save(struct horus3a_priv *priv)
+{
+	u8 data[2];
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state == STATE_SLEEP)
+		return 0;
+	/* IQ Generator disable */
+	horus3a_write_reg(priv, 0x2a, 0x79);
+	/* MDIV_EN = 0 */
+	horus3a_write_reg(priv, 0x29, 0x70);
+	/* VCO disable preparation */
+	horus3a_write_reg(priv, 0x28, 0x3e);
+	/* VCO buffer disable */
+	horus3a_write_reg(priv, 0x2a, 0x19);
+	/* VCO calibration disable */
+	horus3a_write_reg(priv, 0x1c, 0x00);
+	/* Power save setting (xtal is not stopped) */
+	data[0] = 0xC0;
+	/* LNA is Disabled */
+	data[1] = 0xA7;
+	/* 0x11 - 0x12 */
+	horus3a_write_regs(priv, 0x11, data, sizeof(data));
+	priv->state = STATE_SLEEP;
+	return 0;
+}
+
+static int horus3a_leave_power_save(struct horus3a_priv *priv)
+{
+	u8 data[2];
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state == STATE_ACTIVE)
+		return 0;
+	/* Leave power save */
+	data[0] = 0x00;
+	/* LNA is Disabled */
+	data[1] = 0xa7;
+	/* 0x11 - 0x12 */
+	horus3a_write_regs(priv, 0x11, data, sizeof(data));
+	/* VCO buffer enable */
+	horus3a_write_reg(priv, 0x2a, 0x79);
+	/* VCO calibration enable */
+	horus3a_write_reg(priv, 0x1c, 0xc0);
+	/* MDIV_EN = 1 */
+	horus3a_write_reg(priv, 0x29, 0x71);
+	usleep_range(5000, 7000);
+	priv->state = STATE_ACTIVE;
+	return 0;
+}
+
+static int horus3a_init(struct dvb_frontend *fe)
+{
+	struct horus3a_priv *priv = fe->tuner_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	return 0;
+}
+
+static int horus3a_release(struct dvb_frontend *fe)
+{
+	struct horus3a_priv *priv = fe->tuner_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	kfree(fe->tuner_priv);
+	fe->tuner_priv = NULL;
+	return 0;
+}
+
+static int horus3a_sleep(struct dvb_frontend *fe)
+{
+	struct horus3a_priv *priv = fe->tuner_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	horus3a_enter_power_save(priv);
+	return 0;
+}
+
+static int horus3a_set_params(struct dvb_frontend *fe)
+{
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct horus3a_priv *priv = fe->tuner_priv;
+	u32 frequency = p->frequency;
+	u32 symbol_rate = p->symbol_rate/1000;
+	u8 mixdiv = 0;
+	u8 mdiv = 0;
+	u32 ms = 0;
+	u8 f_ctl = 0;
+	u8 g_ctl = 0;
+	u8 fc_lpf = 0;
+	u8 data[5];
+
+	dev_dbg(&priv->i2c->dev, "%s(): frequency %dkHz symbol_rate %dksps\n",
+		__func__, frequency, symbol_rate);
+	if (priv->set_tuner)
+		priv->set_tuner(priv->set_tuner_data, 0);
+	if (priv->state == STATE_SLEEP)
+		horus3a_leave_power_save(priv);
+
+	/* frequency should be X MHz (X : integer) */
+	frequency = DIV_ROUND_CLOSEST(frequency, 1000) * 1000;
+	if (frequency <= 1155000) {
+		mixdiv = 4;
+		mdiv = 1;
+	} else {
+		mixdiv = 2;
+		mdiv = 0;
+	}
+	/* Assumed that fREF == 1MHz (1000kHz) */
+	ms = DIV_ROUND_CLOSEST((frequency * mixdiv) / 2, 1000);
+	if (ms > 0x7FFF) { /* 15 bit */
+		dev_err(&priv->i2c->dev, "horus3a: invalid frequency %d\n",
+			frequency);
+		return -EINVAL;
+	}
+	if (frequency < 975000) {
+		/* F_CTL=11100 G_CTL=001 */
+		f_ctl = 0x1C;
+		g_ctl = 0x01;
+	} else if (frequency < 1050000) {
+		/* F_CTL=11000 G_CTL=010 */
+		f_ctl = 0x18;
+		g_ctl = 0x02;
+	} else if (frequency < 1150000) {
+		/* F_CTL=10100 G_CTL=010 */
+		f_ctl = 0x14;
+		g_ctl = 0x02;
+	} else if (frequency < 1250000) {
+		/* F_CTL=10000 G_CTL=011 */
+		f_ctl = 0x10;
+		g_ctl = 0x03;
+	} else if (frequency < 1350000) {
+		/* F_CTL=01100 G_CTL=100 */
+		f_ctl = 0x0C;
+		g_ctl = 0x04;
+	} else if (frequency < 1450000) {
+		/* F_CTL=01010 G_CTL=100 */
+		f_ctl = 0x0A;
+		g_ctl = 0x04;
+	} else if (frequency < 1600000) {
+		/* F_CTL=00111 G_CTL=101 */
+		f_ctl = 0x07;
+		g_ctl = 0x05;
+	} else if (frequency < 1800000) {
+		/* F_CTL=00100 G_CTL=010 */
+		f_ctl = 0x04;
+		g_ctl = 0x02;
+	} else if (frequency < 2000000) {
+		/* F_CTL=00010 G_CTL=001 */
+		f_ctl = 0x02;
+		g_ctl = 0x01;
+	} else {
+		/* F_CTL=00000 G_CTL=000 */
+		f_ctl = 0x00;
+		g_ctl = 0x00;
+	}
+	/* LPF cutoff frequency setting */
+	if (p->delivery_system == SYS_DVBS) {
+		/*
+		 * rolloff = 0.35
+		 * SR <= 4.3
+		 * fc_lpf = 5
+		 * 4.3 < SR <= 10
+		 * fc_lpf = SR * (1 + rolloff) / 2 + SR / 2 =
+		 *	SR * 1.175 = SR * (47/40)
+		 * 10 < SR
+		 * fc_lpf = SR * (1 + rolloff) / 2 + 5 =
+		 *	SR * 0.675 + 5 = SR * (27/40) + 5
+		 * NOTE: The result should be round up.
+		 */
+		if (symbol_rate <= 4300)
+			fc_lpf = 5;
+		else if (symbol_rate <= 10000)
+			fc_lpf = (u8)DIV_ROUND_UP(symbol_rate * 47, 40000);
+		else
+			fc_lpf = (u8)DIV_ROUND_UP(symbol_rate * 27, 40000) + 5;
+		/* 5 <= fc_lpf <= 36 */
+		if (fc_lpf > 36)
+			fc_lpf = 36;
+	} else if (p->delivery_system == SYS_DVBS2) {
+		int rolloff;
+
+		switch (p->rolloff) {
+		case ROLLOFF_35:
+			rolloff = 35;
+			break;
+		case ROLLOFF_25:
+			rolloff = 25;
+			break;
+		case ROLLOFF_20:
+			rolloff = 20;
+			break;
+		case ROLLOFF_AUTO:
+		default:
+			dev_err(&priv->i2c->dev,
+				"horus3a: auto roll-off is not supported\n");
+			return -EINVAL;
+		}
+		/*
+		 * SR <= 4.5:
+		 * fc_lpf = 5
+		 * 4.5 < SR <= 10:
+		 * fc_lpf = SR * (1 + rolloff) / 2 + SR / 2
+		 * 10 < SR:
+		 * fc_lpf = SR * (1 + rolloff) / 2 + 5
+		 * NOTE: The result should be round up.
+		 */
+		if (symbol_rate <= 4500)
+			fc_lpf = 5;
+		else if (symbol_rate <= 10000)
+			fc_lpf = (u8)DIV_ROUND_UP(
+				symbol_rate * (200 + rolloff), 200000);
+		else
+			fc_lpf = (u8)DIV_ROUND_UP(
+				symbol_rate * (100 + rolloff), 200000) + 5;
+		/* 5 <= fc_lpf <= 36 is valid */
+		if (fc_lpf > 36)
+			fc_lpf = 36;
+	} else {
+		dev_err(&priv->i2c->dev,
+			"horus3a: invalid delivery system %d\n",
+			p->delivery_system);
+		return -EINVAL;
+	}
+	/* 0x00 - 0x04 */
+	data[0] = (u8)((ms >> 7) & 0xFF);
+	data[1] = (u8)((ms << 1) & 0xFF);
+	data[2] = 0x00;
+	data[3] = 0x00;
+	data[4] = (u8)(mdiv << 7);
+	horus3a_write_regs(priv, 0x00, data, sizeof(data));
+	/* Write G_CTL, F_CTL */
+	horus3a_write_reg(priv, 0x09, (u8)((g_ctl << 5) | f_ctl));
+	/* Write LPF cutoff frequency */
+	horus3a_write_reg(priv, 0x37, (u8)(0x80 | (fc_lpf << 1)));
+	/* Start Calibration */
+	horus3a_write_reg(priv, 0x05, 0x80);
+	/* IQ Generator enable */
+	horus3a_write_reg(priv, 0x2a, 0x7b);
+	/* tuner stabilization time */
+	msleep(60);
+	/* Store tuned frequency to the struct */
+	priv->frequency = ms * 2 * 1000 / mixdiv;
+	return 0;
+}
+
+static int horus3a_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+	struct horus3a_priv *priv = fe->tuner_priv;
+
+	*frequency = priv->frequency;
+	return 0;
+}
+
+static struct dvb_tuner_ops horus3a_tuner_ops = {
+	.info = {
+		.name = "Sony Horus3a",
+		.frequency_min = 950000,
+		.frequency_max = 2150000,
+		.frequency_step = 1000,
+	},
+	.init = horus3a_init,
+	.release = horus3a_release,
+	.sleep = horus3a_sleep,
+	.set_params = horus3a_set_params,
+	.get_frequency = horus3a_get_frequency,
+};
+
+struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
+				    const struct horus3a_config *config,
+				    struct i2c_adapter *i2c)
+{
+	u8 buf[3], val;
+	struct horus3a_priv *priv = NULL;
+
+	priv = kzalloc(sizeof(struct horus3a_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return NULL;
+	priv->i2c_address = (config->i2c_address >> 1);
+	priv->i2c = i2c;
+	priv->set_tuner_data = config->set_tuner_priv;
+	priv->set_tuner = config->set_tuner_callback;
+
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 1);
+
+	/* wait 4ms after power on */
+	usleep_range(4000, 6000);
+	/* IQ Generator disable */
+	horus3a_write_reg(priv, 0x2a, 0x79);
+	/* REF_R = Xtal Frequency */
+	buf[0] = config->xtal_freq_mhz;
+	buf[1] = config->xtal_freq_mhz;
+	buf[2] = 0;
+	/* 0x6 - 0x8 */
+	horus3a_write_regs(priv, 0x6, buf, 3);
+	/* IQ Out = Single Ended */
+	horus3a_write_reg(priv, 0x0a, 0x40);
+	switch (config->xtal_freq_mhz) {
+	case 27:
+		val = 0x1f;
+		break;
+	case 24:
+		val = 0x10;
+		break;
+	case 16:
+		val = 0xc;
+		break;
+	default:
+		val = 0;
+		dev_warn(&priv->i2c->dev,
+			"horus3a: invalid xtal frequency %dMHz\n",
+			config->xtal_freq_mhz);
+		break;
+	}
+	val <<= 2;
+	horus3a_write_reg(priv, 0x0e, val);
+	horus3a_enter_power_save(priv);
+	usleep_range(3000, 5000);
+
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 0);
+
+	memcpy(&fe->ops.tuner_ops, &horus3a_tuner_ops,
+				sizeof(struct dvb_tuner_ops));
+	fe->tuner_priv = priv;
+	dev_info(&priv->i2c->dev,
+		"Sony HORUS3A attached on addr=%x at I2C adapter %p\n",
+		priv->i2c_address, priv->i2c);
+	return fe;
+}
+EXPORT_SYMBOL(horus3a_attach);
+
+MODULE_DESCRIPTION("Sony HORUS3A sattelite tuner driver");
+MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
new file mode 100644
index 0000000..b055319
--- /dev/null
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -0,0 +1,58 @@
+/*
+ * horus3a.h
+ *
+ * Sony Horus3A DVB-S/S2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+  */
+
+#ifndef __DVB_HORUS3A_H__
+#define __DVB_HORUS3A_H__
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+#include <linux/i2c.h>
+
+/**
+ * struct horus3a_config - the configuration of Horus3A tuner driver
+ * @i2c_address:    I2C address of the tuner
+ * @xtal_freq_mhz:  Oscillator frequency, MHz
+ * @set_tuner_priv: Callback function private context
+ * @set_tuner_callback: Callback function that notifies the parent driver
+ *          which tuner is active now
+ */
+struct horus3a_config {
+	u8	i2c_address;
+	u8	xtal_freq_mhz;
+	void	*set_tuner_priv;
+	int	(*set_tuner_callback)(void *, int);
+};
+
+#if IS_REACHABLE(CONFIG_DVB_HORUS3A)
+extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
+					const struct horus3a_config *config,
+					struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *horus3a_attach(
+					const struct cxd2820r_config *config,
+					struct i2c_adapter *i2c)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
new file mode 100644
index 0000000..ef3021e
--- /dev/null
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -0,0 +1,189 @@
+/*
+ * lnbh25.c
+ *
+ * Driver for LNB supply and control IC LNBH25
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include "dvb_frontend.h"
+#include "lnbh25.h"
+
+/**
+ * struct lnbh25_priv - LNBH25 driver private data
+ * @i2c:		pointer to the I2C adapter structure
+ * @i2c_address:	I2C address of LNBH25 SEC chip
+ * @config:		Registers configuration:
+ *			offset 0: 1st register address, always 0x02 (DATA1)
+ *			offset 1: DATA1 register value
+ *			offset 2: DATA2 register value
+ */
+struct lnbh25_priv {
+	struct i2c_adapter	*i2c;
+	u8			i2c_address;
+	u8			config[3];
+};
+
+#define LNBH25_STATUS_OFL	0x1
+#define LNBH25_STATUS_VMON	0x4
+#define LNBH25_VSEL_13		0x03
+#define LNBH25_VSEL_18		0x0a
+
+static int lnbh25_read_vmon(struct lnbh25_priv *priv)
+{
+	int i, ret;
+	u8 addr = 0x00;
+	u8 status[6];
+	struct i2c_msg msg[2] = {
+		{
+			.addr = priv->i2c_address,
+			.flags = 0,
+			.len = 1,
+			.buf = &addr
+		}, {
+			.addr = priv->i2c_address,
+			.flags = I2C_M_RD,
+			.len = sizeof(status),
+			.buf = status
+		}
+	};
+
+	for (i = 0; i < 2; i++) {
+		ret = i2c_transfer(priv->i2c, &msg[i], 1);
+		if (ret >= 0 && ret != 1)
+			ret = -EIO;
+		if (ret < 0) {
+			dev_dbg(&priv->i2c->dev,
+				"%s(): I2C transfer %d failed (%d)\n",
+				__func__, i, ret);
+			return ret;
+		}
+	}
+	print_hex_dump_bytes("lnbh25_read_vmon: ",
+		DUMP_PREFIX_OFFSET, status, sizeof(status));
+	if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) {
+		dev_err(&priv->i2c->dev,
+			"%s(): voltage in failure state, status reg 0x%x\n",
+			__func__, status[0]);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int lnbh25_set_voltage(struct dvb_frontend *fe,
+			      enum fe_sec_voltage voltage)
+{
+	int ret;
+	u8 data1_reg;
+	const char *vsel;
+	struct lnbh25_priv *priv = fe->sec_priv;
+	struct i2c_msg msg = {
+		.addr = priv->i2c_address,
+		.flags = 0,
+		.len = sizeof(priv->config),
+		.buf = priv->config
+	};
+
+	switch (voltage) {
+	case SEC_VOLTAGE_OFF:
+		data1_reg = 0x00;
+		vsel = "Off";
+		break;
+	case SEC_VOLTAGE_13:
+		data1_reg = LNBH25_VSEL_13;
+		vsel = "13V";
+		break;
+	case SEC_VOLTAGE_18:
+		data1_reg = LNBH25_VSEL_18;
+		vsel = "18V";
+		break;
+	default:
+		return -EINVAL;
+	}
+	priv->config[1] = data1_reg;
+	dev_dbg(&priv->i2c->dev,
+		"%s(): %s, I2C 0x%x write [ %02x %02x %02x ]\n",
+		__func__, vsel, priv->i2c_address,
+		priv->config[0], priv->config[1], priv->config[2]);
+	ret = i2c_transfer(priv->i2c, &msg, 1);
+	if (ret >= 0 && ret != 1)
+		ret = -EIO;
+	if (ret < 0) {
+		dev_err(&priv->i2c->dev, "%s(): I2C transfer error (%d)\n",
+			__func__, ret);
+		return ret;
+	}
+	if (voltage != SEC_VOLTAGE_OFF) {
+		msleep(120);
+		ret = lnbh25_read_vmon(priv);
+	} else {
+		msleep(20);
+		ret = 0;
+	}
+	return ret;
+}
+
+static void lnbh25_release(struct dvb_frontend *fe)
+{
+	struct lnbh25_priv *priv = fe->sec_priv;
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	lnbh25_set_voltage(fe, SEC_VOLTAGE_OFF);
+	kfree(fe->sec_priv);
+	fe->sec_priv = NULL;
+}
+
+struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
+				   struct lnbh25_config *cfg,
+				   struct i2c_adapter *i2c)
+{
+	struct lnbh25_priv *priv;
+
+	dev_dbg(&i2c->dev, "%s()\n", __func__);
+	priv = kzalloc(sizeof(struct lnbh25_priv), GFP_KERNEL);
+	if (!priv)
+		return NULL;
+	priv->i2c_address = (cfg->i2c_address >> 1);
+	priv->i2c = i2c;
+	priv->config[0] = 0x02;
+	priv->config[1] = 0x00;
+	priv->config[2] = cfg->data2_config;
+	fe->sec_priv = priv;
+	if (lnbh25_set_voltage(fe, SEC_VOLTAGE_OFF)) {
+		dev_err(&i2c->dev,
+			"%s(): no LNBH25 found at I2C addr 0x%02x\n",
+			__func__, priv->i2c_address);
+		kfree(priv);
+		fe->sec_priv = NULL;
+		return NULL;
+	}
+
+	fe->ops.release_sec = lnbh25_release;
+	fe->ops.set_voltage = lnbh25_set_voltage;
+
+	dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n",
+		__func__, priv->i2c_address);
+	return fe;
+}
+EXPORT_SYMBOL(lnbh25_attach);
+
+MODULE_DESCRIPTION("ST LNBH25 driver");
+MODULE_AUTHOR("info@netup.ru");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h
new file mode 100644
index 0000000..69f30e2
--- /dev/null
+++ b/drivers/media/dvb-frontends/lnbh25.h
@@ -0,0 +1,56 @@
+/*
+ * lnbh25.c
+ *
+ * Driver for LNB supply and control IC LNBH25
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef LNBH25_H
+#define LNBH25_H
+
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+
+/* 22 kHz tone enabled. Tone output controlled by DSQIN pin */
+#define	LNBH25_TEN	0x01
+/* Low power mode activated (used only with 22 kHz tone output disabled) */
+#define LNBH25_LPM	0x02
+/* DSQIN input pin is set to receive external 22 kHz TTL signal source */
+#define LNBH25_EXTM	0x04
+
+struct lnbh25_config {
+	u8	i2c_address;
+	u8	data2_config;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_LNBH25)
+struct dvb_frontend *lnbh25_attach(
+	struct dvb_frontend *fe,
+	struct lnbh25_config *cfg,
+	struct i2c_adapter *i2c);
+#else
+static inline dvb_frontend *lnbh25_attach(
+	struct dvb_frontend *fe,
+	struct lnbh25_config *cfg,
+	struct i2c_adapter *i2c)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index e9b2d2b..ff31e7a 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1495,7 +1495,6 @@
 
 static struct i2c_driver m88ds3103_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "m88ds3103",
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 3d01f4f..b792f30 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -915,7 +915,6 @@
 
 static struct i2c_driver rtl2830_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "rtl2830",
 	},
 	.probe		= rtl2830_probe,
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 822ea4b..78b87b2 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -1319,7 +1319,6 @@
 
 static struct i2c_driver rtl2832_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "rtl2832",
 	},
 	.probe		= rtl2832_probe,
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 7edb885..d5b994f 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1538,7 +1538,6 @@
 static struct platform_driver rtl2832_sdr_driver = {
 	.driver = {
 		.name   = "rtl2832_sdr",
-		.owner  = THIS_MODULE,
 	},
 	.probe          = rtl2832_sdr_probe,
 	.remove         = rtl2832_sdr_remove,
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index b2d9fe1..d6a8fa6 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -466,7 +466,7 @@
 
 static int s921_get_algo(struct dvb_frontend *fe)
 {
-	return 1; /* FE_ALGO_HW */
+	return DVBFE_ALGO_HW;
 }
 
 static void s921_release(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 25e238c..81788c5 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -757,7 +757,6 @@
 
 static struct i2c_driver si2168_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "si2168",
 	},
 	.probe		= si2168_probe,
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 8fd4276..43d47df 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -426,7 +426,6 @@
 
 static struct i2c_driver sp2_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "sp2",
 	},
 	.probe		= sp2_probe,
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index ec3e18e..44cb73f 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -791,11 +791,13 @@
 	memcpy(buf + 2, data, len);
 
 	if (i2cdebug)
-		printk(KERN_DEBUG "%s: %02x: %02x\n", __func__, reg, buf[2]);
+		printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
+			state->config->demod_address, reg, buf[2]);
 
 	ret = i2c_transfer(state->i2c, &msg, 1);
 	if (ret != 1)
-		printk(KERN_ERR "%s: i2c write error!\n", __func__);
+		printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
+			__func__, state->config->demod_address, reg, buf[2]);
 
 	return (ret != 1) ? -EREMOTEIO : 0;
 }
@@ -829,10 +831,12 @@
 
 	ret = i2c_transfer(state->i2c, msg, 2);
 	if (ret != 2)
-		printk(KERN_ERR "%s: i2c read error\n", __func__);
+		printk(KERN_ERR "%s: i2c read error ([%02x] %02x: %02x)\n",
+			__func__, state->config->demod_address, reg, b1[0]);
 
 	if (i2cdebug)
-		printk(KERN_DEBUG "%s: %02x: %02x\n", __func__, reg, b1[0]);
+		printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
+			state->config->demod_address, reg, b1[0]);
 
 	return b1[0];
 }
@@ -1550,6 +1554,11 @@
 
 	switch (state->config->xtal) {
 		/*set internal freq to 53.125MHz */
+	case 16000000:
+		stv0367_writereg(state, R367TER_PLLMDIV, 0x2);
+		stv0367_writereg(state, R367TER_PLLNDIV, 0x1b);
+		stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
+		break;
 	case 25000000:
 		stv0367_writereg(state, R367TER_PLLMDIV, 0xa);
 		stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index f6dc630..119d475 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -20,102 +20,15 @@
 
 #include "tda10071_priv.h"
 
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE  64
-
 static struct dvb_frontend_ops tda10071_ops;
 
-/* write multiple registers */
-static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
-	int len)
-{
-	int ret;
-	u8 buf[MAX_XFER_SIZE];
-	struct i2c_msg msg[1] = {
-		{
-			.addr = priv->cfg.demod_i2c_addr,
-			.flags = 0,
-			.len = 1 + len,
-			.buf = buf,
-		}
-	};
-
-	if (1 + len > sizeof(buf)) {
-		dev_warn(&priv->i2c->dev,
-				"%s: i2c wr reg=%04x: len=%d is too big!\n",
-				KBUILD_MODNAME, reg, len);
-		return -EINVAL;
-	}
-
-	buf[0] = reg;
-	memcpy(&buf[1], val, len);
-
-	ret = i2c_transfer(priv->i2c, msg, 1);
-	if (ret == 1) {
-		ret = 0;
-	} else {
-		dev_warn(&priv->i2c->dev,
-				"%s: i2c wr failed=%d reg=%02x len=%d\n",
-				KBUILD_MODNAME, ret, reg, len);
-		ret = -EREMOTEIO;
-	}
-	return ret;
-}
-
-/* read multiple registers */
-static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
-	int len)
-{
-	int ret;
-	u8 buf[MAX_XFER_SIZE];
-	struct i2c_msg msg[2] = {
-		{
-			.addr = priv->cfg.demod_i2c_addr,
-			.flags = 0,
-			.len = 1,
-			.buf = &reg,
-		}, {
-			.addr = priv->cfg.demod_i2c_addr,
-			.flags = I2C_M_RD,
-			.len = len,
-			.buf = buf,
-		}
-	};
-
-	if (len > sizeof(buf)) {
-		dev_warn(&priv->i2c->dev,
-				"%s: i2c wr reg=%04x: len=%d is too big!\n",
-				KBUILD_MODNAME, reg, len);
-		return -EINVAL;
-	}
-
-	ret = i2c_transfer(priv->i2c, msg, 2);
-	if (ret == 2) {
-		memcpy(val, buf, len);
-		ret = 0;
-	} else {
-		dev_warn(&priv->i2c->dev,
-				"%s: i2c rd failed=%d reg=%02x len=%d\n",
-				KBUILD_MODNAME, ret, reg, len);
-		ret = -EREMOTEIO;
-	}
-	return ret;
-}
-
-/* write single register */
-static int tda10071_wr_reg(struct tda10071_priv *priv, u8 reg, u8 val)
-{
-	return tda10071_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-static int tda10071_rd_reg(struct tda10071_priv *priv, u8 reg, u8 *val)
-{
-	return tda10071_rd_regs(priv, reg, val, 1);
-}
-
+/*
+ * XXX: regmap_update_bits() does not fit our needs as it does not support
+ * partially volatile registers. Also it performs register read even mask is as
+ * wide as register value.
+ */
 /* write single register with mask */
-static int tda10071_wr_reg_mask(struct tda10071_priv *priv,
+static int tda10071_wr_reg_mask(struct tda10071_dev *dev,
 				u8 reg, u8 val, u8 mask)
 {
 	int ret;
@@ -123,7 +36,7 @@
 
 	/* no need for read if whole reg is written */
 	if (mask != 0xff) {
-		ret = tda10071_rd_regs(priv, reg, &tmp, 1);
+		ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1);
 		if (ret)
 			return ret;
 
@@ -132,64 +45,45 @@
 		val |= tmp;
 	}
 
-	return tda10071_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register with mask */
-static int tda10071_rd_reg_mask(struct tda10071_priv *priv,
-				u8 reg, u8 *val, u8 mask)
-{
-	int ret, i;
-	u8 tmp;
-
-	ret = tda10071_rd_regs(priv, reg, &tmp, 1);
-	if (ret)
-		return ret;
-
-	tmp &= mask;
-
-	/* find position of the first bit */
-	for (i = 0; i < 8; i++) {
-		if ((mask >> i) & 0x01)
-			break;
-	}
-	*val = tmp >> i;
-
-	return 0;
+	return regmap_bulk_write(dev->regmap, reg, &val, 1);
 }
 
 /* execute firmware command */
-static int tda10071_cmd_execute(struct tda10071_priv *priv,
+static int tda10071_cmd_execute(struct tda10071_dev *dev,
 	struct tda10071_cmd *cmd)
 {
+	struct i2c_client *client = dev->client;
 	int ret, i;
-	u8 tmp;
+	unsigned int uitmp;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
 
+	mutex_lock(&dev->cmd_execute_mutex);
+
 	/* write cmd and args for firmware */
-	ret = tda10071_wr_regs(priv, 0x00, cmd->args, cmd->len);
+	ret = regmap_bulk_write(dev->regmap, 0x00, cmd->args, cmd->len);
 	if (ret)
-		goto error;
+		goto error_mutex_unlock;
 
 	/* start cmd execution */
-	ret = tda10071_wr_reg(priv, 0x1f, 1);
+	ret = regmap_write(dev->regmap, 0x1f, 1);
 	if (ret)
-		goto error;
+		goto error_mutex_unlock;
 
 	/* wait cmd execution terminate */
-	for (i = 1000, tmp = 1; i && tmp; i--) {
-		ret = tda10071_rd_reg(priv, 0x1f, &tmp);
+	for (i = 1000, uitmp = 1; i && uitmp; i--) {
+		ret = regmap_read(dev->regmap, 0x1f, &uitmp);
 		if (ret)
-			goto error;
+			goto error_mutex_unlock;
 
 		usleep_range(200, 5000);
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+	mutex_unlock(&dev->cmd_execute_mutex);
+	dev_dbg(&client->dev, "loop=%d\n", i);
 
 	if (i == 0) {
 		ret = -ETIMEDOUT;
@@ -197,26 +91,28 @@
 	}
 
 	return ret;
+error_mutex_unlock:
+	mutex_unlock(&dev->cmd_execute_mutex);
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_set_tone(struct dvb_frontend *fe,
 	enum fe_sec_tone_mode fe_sec_tone_mode)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct tda10071_cmd cmd;
 	int ret;
 	u8 tone;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: tone_mode=%d\n", __func__,
-			fe_sec_tone_mode);
+	dev_dbg(&client->dev, "tone_mode=%d\n", fe_sec_tone_mode);
 
 	switch (fe_sec_tone_mode) {
 	case SEC_TONE_ON:
@@ -226,8 +122,7 @@
 		tone = 0;
 		break;
 	default:
-		dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_tone_mode\n",
-				__func__);
+		dev_dbg(&client->dev, "invalid fe_sec_tone_mode\n");
 		ret = -EINVAL;
 		goto error;
 	}
@@ -238,30 +133,31 @@
 	cmd.args[3] = 0x00;
 	cmd.args[4] = tone;
 	cmd.len = 5;
-	ret = tda10071_cmd_execute(priv, &cmd);
+	ret = tda10071_cmd_execute(dev, &cmd);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_set_voltage(struct dvb_frontend *fe,
 	enum fe_sec_voltage fe_sec_voltage)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct tda10071_cmd cmd;
 	int ret;
 	u8 voltage;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: voltage=%d\n", __func__, fe_sec_voltage);
+	dev_dbg(&client->dev, "voltage=%d\n", fe_sec_voltage);
 
 	switch (fe_sec_voltage) {
 	case SEC_VOLTAGE_13:
@@ -274,8 +170,7 @@
 		voltage = 0;
 		break;
 	default:
-		dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_voltage\n",
-				__func__);
+		dev_dbg(&client->dev, "invalid fe_sec_voltage\n");
 		ret = -EINVAL;
 		goto error;
 	}
@@ -284,31 +179,31 @@
 	cmd.args[1] = 0;
 	cmd.args[2] = voltage;
 	cmd.len = 3;
-	ret = tda10071_cmd_execute(priv, &cmd);
+	ret = tda10071_cmd_execute(dev, &cmd);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
 	struct dvb_diseqc_master_cmd *diseqc_cmd)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct tda10071_cmd cmd;
 	int ret, i;
-	u8 tmp;
+	unsigned int uitmp;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: msg_len=%d\n", __func__,
-			diseqc_cmd->msg_len);
+	dev_dbg(&client->dev, "msg_len=%d\n", diseqc_cmd->msg_len);
 
 	if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 6) {
 		ret = -EINVAL;
@@ -316,22 +211,22 @@
 	}
 
 	/* wait LNB TX */
-	for (i = 500, tmp = 0; i && !tmp; i--) {
-		ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
+	for (i = 500, uitmp = 0; i && !uitmp; i--) {
+		ret = regmap_read(dev->regmap, 0x47, &uitmp);
 		if (ret)
 			goto error;
-
+		uitmp = (uitmp >> 0) & 1;
 		usleep_range(10000, 20000);
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+	dev_dbg(&client->dev, "loop=%d\n", i);
 
 	if (i == 0) {
 		ret = -ETIMEDOUT;
 		goto error;
 	}
 
-	ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
+	ret = regmap_update_bits(dev->regmap, 0x47, 0x01, 0x00);
 	if (ret)
 		goto error;
 
@@ -344,41 +239,42 @@
 	cmd.args[6] = diseqc_cmd->msg_len;
 	memcpy(&cmd.args[7], diseqc_cmd->msg, diseqc_cmd->msg_len);
 	cmd.len = 7 + diseqc_cmd->msg_len;
-	ret = tda10071_cmd_execute(priv, &cmd);
+	ret = tda10071_cmd_execute(dev, &cmd);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
 	struct dvb_diseqc_slave_reply *reply)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct tda10071_cmd cmd;
 	int ret, i;
-	u8 tmp;
+	unsigned int uitmp;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+	dev_dbg(&client->dev, "\n");
 
 	/* wait LNB RX */
-	for (i = 500, tmp = 0; i && !tmp; i--) {
-		ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x02);
+	for (i = 500, uitmp = 0; i && !uitmp; i--) {
+		ret = regmap_read(dev->regmap, 0x47, &uitmp);
 		if (ret)
 			goto error;
-
+		uitmp = (uitmp >> 1) & 1;
 		usleep_range(10000, 20000);
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+	dev_dbg(&client->dev, "loop=%d\n", i);
 
 	if (i == 0) {
 		ret = -ETIMEDOUT;
@@ -386,11 +282,11 @@
 	}
 
 	/* reply len */
-	ret = tda10071_rd_reg(priv, 0x46, &tmp);
+	ret = regmap_read(dev->regmap, 0x46, &uitmp);
 	if (ret)
 		goto error;
 
-	reply->msg_len = tmp & 0x1f; /* [4:0] */
+	reply->msg_len = uitmp & 0x1f; /* [4:0] */
 	if (reply->msg_len > sizeof(reply->msg))
 		reply->msg_len = sizeof(reply->msg); /* truncate API max */
 
@@ -398,35 +294,37 @@
 	cmd.args[0] = CMD_LNB_UPDATE_REPLY;
 	cmd.args[1] = 0;
 	cmd.len = 2;
-	ret = tda10071_cmd_execute(priv, &cmd);
+	ret = tda10071_cmd_execute(dev, &cmd);
 	if (ret)
 		goto error;
 
-	ret = tda10071_rd_regs(priv, cmd.len, reply->msg, reply->msg_len);
+	ret = regmap_bulk_read(dev->regmap, cmd.len, reply->msg,
+			       reply->msg_len);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
 	enum fe_sec_mini_cmd fe_sec_mini_cmd)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct tda10071_cmd cmd;
 	int ret, i;
-	u8 tmp, burst;
+	unsigned int uitmp;
+	u8 burst;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
-			fe_sec_mini_cmd);
+	dev_dbg(&client->dev, "fe_sec_mini_cmd=%d\n", fe_sec_mini_cmd);
 
 	switch (fe_sec_mini_cmd) {
 	case SEC_MINI_A:
@@ -436,29 +334,28 @@
 		burst = 1;
 		break;
 	default:
-		dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_mini_cmd\n",
-				__func__);
+		dev_dbg(&client->dev, "invalid fe_sec_mini_cmd\n");
 		ret = -EINVAL;
 		goto error;
 	}
 
 	/* wait LNB TX */
-	for (i = 500, tmp = 0; i && !tmp; i--) {
-		ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
+	for (i = 500, uitmp = 0; i && !uitmp; i--) {
+		ret = regmap_read(dev->regmap, 0x47, &uitmp);
 		if (ret)
 			goto error;
-
+		uitmp = (uitmp >> 0) & 1;
 		usleep_range(10000, 20000);
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+	dev_dbg(&client->dev, "loop=%d\n", i);
 
 	if (i == 0) {
 		ret = -ETIMEDOUT;
 		goto error;
 	}
 
-	ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
+	ret = regmap_update_bits(dev->regmap, 0x47, 0x01, 0x00);
 	if (ret)
 		goto error;
 
@@ -466,219 +363,217 @@
 	cmd.args[1] = 0;
 	cmd.args[2] = burst;
 	cmd.len = 3;
-	ret = tda10071_cmd_execute(priv, &cmd);
+	ret = tda10071_cmd_execute(dev, &cmd);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	struct tda10071_cmd cmd;
 	int ret;
-	u8 tmp;
+	unsigned int uitmp;
+	u8 buf[8];
 
 	*status = 0;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = 0;
 		goto error;
 	}
 
-	ret = tda10071_rd_reg(priv, 0x39, &tmp);
+	ret = regmap_read(dev->regmap, 0x39, &uitmp);
 	if (ret)
 		goto error;
 
 	/* 0x39[0] tuner PLL */
-	if (tmp & 0x02) /* demod PLL */
+	if (uitmp & 0x02) /* demod PLL */
 		*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
-	if (tmp & 0x04) /* viterbi or LDPC*/
+	if (uitmp & 0x04) /* viterbi or LDPC*/
 		*status |= FE_HAS_VITERBI;
-	if (tmp & 0x08) /* RS or BCH */
+	if (uitmp & 0x08) /* RS or BCH */
 		*status |= FE_HAS_SYNC | FE_HAS_LOCK;
 
-	priv->fe_status = *status;
+	dev->fe_status = *status;
+
+	/* signal strength */
+	if (dev->fe_status & FE_HAS_SIGNAL) {
+		cmd.args[0] = CMD_GET_AGCACC;
+		cmd.args[1] = 0;
+		cmd.len = 2;
+		ret = tda10071_cmd_execute(dev, &cmd);
+		if (ret)
+			goto error;
+
+		/* input power estimate dBm */
+		ret = regmap_read(dev->regmap, 0x50, &uitmp);
+		if (ret)
+			goto error;
+
+		c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+		c->strength.stat[0].svalue = (int) (uitmp - 256) * 1000;
+	} else {
+		c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* CNR */
+	if (dev->fe_status & FE_HAS_VITERBI) {
+		/* Es/No */
+		ret = regmap_bulk_read(dev->regmap, 0x3a, buf, 2);
+		if (ret)
+			goto error;
+
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+		c->cnr.stat[0].svalue = (buf[0] << 8 | buf[1] << 0) * 100;
+	} else {
+		c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* UCB/PER/BER */
+	if (dev->fe_status & FE_HAS_LOCK) {
+		/* TODO: report total bits/packets */
+		u8 delivery_system, reg, len;
+
+		switch (dev->delivery_system) {
+		case SYS_DVBS:
+			reg = 0x4c;
+			len = 8;
+			delivery_system = 1;
+			break;
+		case SYS_DVBS2:
+			reg = 0x4d;
+			len = 4;
+			delivery_system = 0;
+			break;
+		default:
+			ret = -EINVAL;
+			goto error;
+		}
+
+		ret = regmap_read(dev->regmap, reg, &uitmp);
+		if (ret)
+			goto error;
+
+		if (dev->meas_count == uitmp) {
+			dev_dbg(&client->dev, "meas not ready=%02x\n", uitmp);
+			ret = 0;
+			goto error;
+		} else {
+			dev->meas_count = uitmp;
+		}
+
+		cmd.args[0] = CMD_BER_UPDATE_COUNTERS;
+		cmd.args[1] = 0;
+		cmd.args[2] = delivery_system;
+		cmd.len = 3;
+		ret = tda10071_cmd_execute(dev, &cmd);
+		if (ret)
+			goto error;
+
+		ret = regmap_bulk_read(dev->regmap, cmd.len, buf, len);
+		if (ret)
+			goto error;
+
+		if (dev->delivery_system == SYS_DVBS) {
+			dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
+					 buf[2] << 8 | buf[3] << 0;
+			dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
+					       buf[2] << 8 | buf[3] << 0;
+			c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+			c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+			dev->block_error += buf[4] << 8 | buf[5] << 0;
+			c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+			c->block_error.stat[0].uvalue = dev->block_error;
+		} else {
+			dev->dvbv3_ber = buf[0] << 8 | buf[1] << 0;
+			dev->post_bit_error += buf[0] << 8 | buf[1] << 0;
+			c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+			c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+			c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		}
+	} else {
+		c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_read_snr(struct dvb_frontend *fe, u16 *snr)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[2];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+	if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+		*snr = div_s64(c->cnr.stat[0].svalue, 100);
+	else
 		*snr = 0;
-		ret = 0;
-		goto error;
-	}
-
-	ret = tda10071_rd_regs(priv, 0x3a, buf, 2);
-	if (ret)
-		goto error;
-
-	/* Es/No dBx10 */
-	*snr = buf[0] << 8 | buf[1];
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
+	return 0;
 }
 
 static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
-	struct tda10071_cmd cmd;
-	int ret;
-	u8 tmp;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	unsigned int uitmp;
 
-	if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+	if (c->strength.stat[0].scale == FE_SCALE_DECIBEL) {
+		uitmp = div_s64(c->strength.stat[0].svalue, 1000) + 256;
+		uitmp = clamp(uitmp, 181U, 236U); /* -75dBm - -20dBm */
+		/* scale value to 0x0000-0xffff */
+		*strength = (uitmp-181) * 0xffff / (236-181);
+	} else {
 		*strength = 0;
-		ret = 0;
-		goto error;
 	}
-
-	cmd.args[0] = CMD_GET_AGCACC;
-	cmd.args[1] = 0;
-	cmd.len = 2;
-	ret = tda10071_cmd_execute(priv, &cmd);
-	if (ret)
-		goto error;
-
-	/* input power estimate dBm */
-	ret = tda10071_rd_reg(priv, 0x50, &tmp);
-	if (ret)
-		goto error;
-
-	if (tmp < 181)
-		tmp = 181; /* -75 dBm */
-	else if (tmp > 236)
-		tmp = 236; /* -20 dBm */
-
-	/* scale value to 0x0000-0xffff */
-	*strength = (tmp-181) * 0xffff / (236-181);
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
+	return 0;
 }
 
 static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
-	struct tda10071_cmd cmd;
-	int ret, i, len;
-	u8 tmp, reg, buf[8];
+	struct tda10071_dev *dev = fe->demodulator_priv;
 
-	if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
-		*ber = priv->ber = 0;
-		ret = 0;
-		goto error;
-	}
-
-	switch (priv->delivery_system) {
-	case SYS_DVBS:
-		reg = 0x4c;
-		len = 8;
-		i = 1;
-		break;
-	case SYS_DVBS2:
-		reg = 0x4d;
-		len = 4;
-		i = 0;
-		break;
-	default:
-		*ber = priv->ber = 0;
-		return 0;
-	}
-
-	ret = tda10071_rd_reg(priv, reg, &tmp);
-	if (ret)
-		goto error;
-
-	if (priv->meas_count[i] == tmp) {
-		dev_dbg(&priv->i2c->dev, "%s: meas not ready=%02x\n", __func__,
-				tmp);
-		*ber = priv->ber;
-		return 0;
-	} else {
-		priv->meas_count[i] = tmp;
-	}
-
-	cmd.args[0] = CMD_BER_UPDATE_COUNTERS;
-	cmd.args[1] = 0;
-	cmd.args[2] = i;
-	cmd.len = 3;
-	ret = tda10071_cmd_execute(priv, &cmd);
-	if (ret)
-		goto error;
-
-	ret = tda10071_rd_regs(priv, cmd.len, buf, len);
-	if (ret)
-		goto error;
-
-	if (priv->delivery_system == SYS_DVBS) {
-		*ber = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
-		priv->ucb += (buf[4] << 8) | buf[5];
-	} else {
-		*ber = (buf[0] << 8) | buf[1];
-	}
-	priv->ber = *ber;
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
+	*ber = dev->dvbv3_ber;
+	return 0;
 }
 
 static int tda10071_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
-	int ret = 0;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+	if (c->block_error.stat[0].scale == FE_SCALE_COUNTER)
+		*ucblocks = c->block_error.stat[0].uvalue;
+	else
 		*ucblocks = 0;
-		goto error;
-	}
-
-	/* UCB is updated when BER is read. Assume BER is read anyway. */
-
-	*ucblocks = priv->ucb;
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
+	return 0;
 }
 
 static int tda10071_set_frontend(struct dvb_frontend *fe)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct tda10071_cmd cmd;
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret, i;
 	u8 mode, rolloff, pilot, inversion, div;
 	enum fe_modulation modulation;
 
-	dev_dbg(&priv->i2c->dev,
-			"%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
-			__func__, c->delivery_system, c->modulation,
-			c->frequency, c->symbol_rate, c->inversion, c->pilot,
-			c->rolloff);
+	dev_dbg(&client->dev,
+		"delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+		c->delivery_system, c->modulation, c->frequency, c->symbol_rate,
+		c->inversion, c->pilot, c->rolloff);
 
-	priv->delivery_system = SYS_UNDEFINED;
+	dev->delivery_system = SYS_UNDEFINED;
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
@@ -696,7 +591,7 @@
 		inversion = 3;
 		break;
 	default:
-		dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n", __func__);
+		dev_dbg(&client->dev, "invalid inversion\n");
 		ret = -EINVAL;
 		goto error;
 	}
@@ -722,8 +617,7 @@
 			break;
 		case ROLLOFF_AUTO:
 		default:
-			dev_dbg(&priv->i2c->dev, "%s: invalid rolloff\n",
-					__func__);
+			dev_dbg(&client->dev, "invalid rolloff\n");
 			ret = -EINVAL;
 			goto error;
 		}
@@ -739,15 +633,13 @@
 			pilot = 2;
 			break;
 		default:
-			dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
-					__func__);
+			dev_dbg(&client->dev, "invalid pilot\n");
 			ret = -EINVAL;
 			goto error;
 		}
 		break;
 	default:
-		dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
-				__func__);
+		dev_dbg(&client->dev, "invalid delivery_system\n");
 		ret = -EINVAL;
 		goto error;
 	}
@@ -757,15 +649,13 @@
 			modulation == TDA10071_MODCOD[i].modulation &&
 			c->fec_inner == TDA10071_MODCOD[i].fec) {
 			mode = TDA10071_MODCOD[i].val;
-			dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
-					__func__, mode);
+			dev_dbg(&client->dev, "mode found=%02x\n", mode);
 			break;
 		}
 	}
 
 	if (mode == 0xff) {
-		dev_dbg(&priv->i2c->dev, "%s: invalid parameter combination\n",
-				__func__);
+		dev_dbg(&client->dev, "invalid parameter combination\n");
 		ret = -EINVAL;
 		goto error;
 	}
@@ -775,11 +665,11 @@
 	else
 		div = 4;
 
-	ret = tda10071_wr_reg(priv, 0x81, div);
+	ret = regmap_write(dev->regmap, 0x81, div);
 	if (ret)
 		goto error;
 
-	ret = tda10071_wr_reg(priv, 0xe3, div);
+	ret = regmap_write(dev->regmap, 0xe3, div);
 	if (ret)
 		goto error;
 
@@ -799,31 +689,32 @@
 	cmd.args[13] = 0x00;
 	cmd.args[14] = 0x00;
 	cmd.len = 15;
-	ret = tda10071_cmd_execute(priv, &cmd);
+	ret = tda10071_cmd_execute(dev, &cmd);
 	if (ret)
 		goto error;
 
-	priv->delivery_system = c->delivery_system;
+	dev->delivery_system = c->delivery_system;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_get_frontend(struct dvb_frontend *fe)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret, i;
 	u8 buf[5], tmp;
 
-	if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
-		ret = -EFAULT;
+	if (!dev->warm || !(dev->fe_status & FE_HAS_LOCK)) {
+		ret = 0;
 		goto error;
 	}
 
-	ret = tda10071_rd_regs(priv, 0x30, buf, 5);
+	ret = regmap_bulk_read(dev->regmap, 0x30, buf, 5);
 	if (ret)
 		goto error;
 
@@ -856,7 +747,7 @@
 
 	c->frequency = (buf[2] << 16) | (buf[3] << 8) | (buf[4] << 0);
 
-	ret = tda10071_rd_regs(priv, 0x52, buf, 3);
+	ret = regmap_bulk_read(dev->regmap, 0x52, buf, 3);
 	if (ret)
 		goto error;
 
@@ -864,15 +755,18 @@
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_init(struct dvb_frontend *fe)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	struct tda10071_cmd cmd;
 	int ret, i, len, remaining, fw_size;
+	unsigned int uitmp;
 	const struct firmware *fw;
 	u8 *fw_file = TDA10071_FIRMWARE;
 	u8 tmp, buf[4];
@@ -890,7 +784,7 @@
 	};
 	struct tda10071_reg_val_mask tab2[] = {
 		{ 0xf1, 0x70, 0xff },
-		{ 0x88, priv->cfg.pll_multiplier, 0x3f },
+		{ 0x88, dev->pll_multiplier, 0x3f },
 		{ 0x89, 0x00, 0x10 },
 		{ 0x89, 0x10, 0x10 },
 		{ 0xc0, 0x01, 0x01 },
@@ -934,11 +828,11 @@
 		{ 0xd5, 0x03, 0x03 },
 	};
 
-	if (priv->warm) {
+	if (dev->warm) {
 		/* warm state - wake up device from sleep */
 
 		for (i = 0; i < ARRAY_SIZE(tab); i++) {
-			ret = tda10071_wr_reg_mask(priv, tab[i].reg,
+			ret = tda10071_wr_reg_mask(dev, tab[i].reg,
 				tab[i].val, tab[i].mask);
 			if (ret)
 				goto error;
@@ -948,78 +842,76 @@
 		cmd.args[1] = 0;
 		cmd.args[2] = 0;
 		cmd.len = 3;
-		ret = tda10071_cmd_execute(priv, &cmd);
+		ret = tda10071_cmd_execute(dev, &cmd);
 		if (ret)
 			goto error;
 	} else {
 		/* cold state - try to download firmware */
 
 		/* request the firmware, this will block and timeout */
-		ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+		ret = request_firmware(&fw, fw_file, &client->dev);
 		if (ret) {
-			dev_err(&priv->i2c->dev,
-					"%s: did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
-					KBUILD_MODNAME, fw_file, ret);
+			dev_err(&client->dev,
+				"did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
+				fw_file, ret);
 			goto error;
 		}
 
 		/* init */
 		for (i = 0; i < ARRAY_SIZE(tab2); i++) {
-			ret = tda10071_wr_reg_mask(priv, tab2[i].reg,
+			ret = tda10071_wr_reg_mask(dev, tab2[i].reg,
 				tab2[i].val, tab2[i].mask);
 			if (ret)
 				goto error_release_firmware;
 		}
 
 		/*  download firmware */
-		ret = tda10071_wr_reg(priv, 0xe0, 0x7f);
+		ret = regmap_write(dev->regmap, 0xe0, 0x7f);
 		if (ret)
 			goto error_release_firmware;
 
-		ret = tda10071_wr_reg(priv, 0xf7, 0x81);
+		ret = regmap_write(dev->regmap, 0xf7, 0x81);
 		if (ret)
 			goto error_release_firmware;
 
-		ret = tda10071_wr_reg(priv, 0xf8, 0x00);
+		ret = regmap_write(dev->regmap, 0xf8, 0x00);
 		if (ret)
 			goto error_release_firmware;
 
-		ret = tda10071_wr_reg(priv, 0xf9, 0x00);
+		ret = regmap_write(dev->regmap, 0xf9, 0x00);
 		if (ret)
 			goto error_release_firmware;
 
-		dev_info(&priv->i2c->dev,
-				"%s: found a '%s' in cold state, will try to load a firmware\n",
-				KBUILD_MODNAME, tda10071_ops.info.name);
-		dev_info(&priv->i2c->dev,
-				"%s: downloading firmware from file '%s'\n",
-				KBUILD_MODNAME, fw_file);
+		dev_info(&client->dev,
+			 "found a '%s' in cold state, will try to load a firmware\n",
+			 tda10071_ops.info.name);
+		dev_info(&client->dev, "downloading firmware from file '%s'\n",
+			 fw_file);
 
 		/* do not download last byte */
 		fw_size = fw->size - 1;
 
 		for (remaining = fw_size; remaining > 0;
-			remaining -= (priv->cfg.i2c_wr_max - 1)) {
+			remaining -= (dev->i2c_wr_max - 1)) {
 			len = remaining;
-			if (len > (priv->cfg.i2c_wr_max - 1))
-				len = (priv->cfg.i2c_wr_max - 1);
+			if (len > (dev->i2c_wr_max - 1))
+				len = (dev->i2c_wr_max - 1);
 
-			ret = tda10071_wr_regs(priv, 0xfa,
+			ret = regmap_bulk_write(dev->regmap, 0xfa,
 				(u8 *) &fw->data[fw_size - remaining], len);
 			if (ret) {
-				dev_err(&priv->i2c->dev,
-						"%s: firmware download failed=%d\n",
-						KBUILD_MODNAME, ret);
+				dev_err(&client->dev,
+					"firmware download failed=%d\n", ret);
 				goto error_release_firmware;
 			}
 		}
 		release_firmware(fw);
 
-		ret = tda10071_wr_reg(priv, 0xf7, 0x0c);
+		ret = regmap_write(dev->regmap, 0xf7, 0x0c);
 		if (ret)
 			goto error;
 
-		ret = tda10071_wr_reg(priv, 0xe0, 0x00);
+		ret = regmap_write(dev->regmap, 0xe0, 0x00);
 		if (ret)
 			goto error;
 
@@ -1027,53 +919,52 @@
 		msleep(250);
 
 		/* firmware status */
-		ret = tda10071_rd_reg(priv, 0x51, &tmp);
+		ret = regmap_read(dev->regmap, 0x51, &uitmp);
 		if (ret)
 			goto error;
 
-		if (tmp) {
-			dev_info(&priv->i2c->dev, "%s: firmware did not run\n",
-					KBUILD_MODNAME);
+		if (uitmp) {
+			dev_info(&client->dev, "firmware did not run\n");
 			ret = -EFAULT;
 			goto error;
 		} else {
-			priv->warm = true;
+			dev->warm = true;
 		}
 
 		cmd.args[0] = CMD_GET_FW_VERSION;
 		cmd.len = 1;
-		ret = tda10071_cmd_execute(priv, &cmd);
+		ret = tda10071_cmd_execute(dev, &cmd);
 		if (ret)
 			goto error;
 
-		ret = tda10071_rd_regs(priv, cmd.len, buf, 4);
+		ret = regmap_bulk_read(dev->regmap, cmd.len, buf, 4);
 		if (ret)
 			goto error;
 
-		dev_info(&priv->i2c->dev, "%s: firmware version %d.%d.%d.%d\n",
-				KBUILD_MODNAME, buf[0], buf[1], buf[2], buf[3]);
-		dev_info(&priv->i2c->dev, "%s: found a '%s' in warm state\n",
-				KBUILD_MODNAME, tda10071_ops.info.name);
+		dev_info(&client->dev, "firmware version %d.%d.%d.%d\n",
+			 buf[0], buf[1], buf[2], buf[3]);
+		dev_info(&client->dev, "found a '%s' in warm state\n",
+			 tda10071_ops.info.name);
 
-		ret = tda10071_rd_regs(priv, 0x81, buf, 2);
+		ret = regmap_bulk_read(dev->regmap, 0x81, buf, 2);
 		if (ret)
 			goto error;
 
 		cmd.args[0] = CMD_DEMOD_INIT;
-		cmd.args[1] = ((priv->cfg.xtal / 1000) >> 8) & 0xff;
-		cmd.args[2] = ((priv->cfg.xtal / 1000) >> 0) & 0xff;
+		cmd.args[1] = ((dev->clk / 1000) >> 8) & 0xff;
+		cmd.args[2] = ((dev->clk / 1000) >> 0) & 0xff;
 		cmd.args[3] = buf[0];
 		cmd.args[4] = buf[1];
-		cmd.args[5] = priv->cfg.pll_multiplier;
-		cmd.args[6] = priv->cfg.spec_inv;
+		cmd.args[5] = dev->pll_multiplier;
+		cmd.args[6] = dev->spec_inv;
 		cmd.args[7] = 0x00;
 		cmd.len = 8;
-		ret = tda10071_cmd_execute(priv, &cmd);
+		ret = tda10071_cmd_execute(dev, &cmd);
 		if (ret)
 			goto error;
 
-		if (priv->cfg.tuner_i2c_addr)
-			tmp = priv->cfg.tuner_i2c_addr;
+		if (dev->tuner_i2c_addr)
+			tmp = dev->tuner_i2c_addr;
 		else
 			tmp = 0x14;
 
@@ -1093,22 +984,22 @@
 		cmd.args[13] = 0x00;
 		cmd.args[14] = 0x00;
 		cmd.len = 15;
-		ret = tda10071_cmd_execute(priv, &cmd);
+		ret = tda10071_cmd_execute(dev, &cmd);
 		if (ret)
 			goto error;
 
 		cmd.args[0] = CMD_MPEG_CONFIG;
 		cmd.args[1] = 0;
-		cmd.args[2] = priv->cfg.ts_mode;
+		cmd.args[2] = dev->ts_mode;
 		cmd.args[3] = 0x00;
 		cmd.args[4] = 0x04;
 		cmd.args[5] = 0x00;
 		cmd.len = 6;
-		ret = tda10071_cmd_execute(priv, &cmd);
+		ret = tda10071_cmd_execute(dev, &cmd);
 		if (ret)
 			goto error;
 
-		ret = tda10071_wr_reg_mask(priv, 0xf0, 0x01, 0x01);
+		ret = regmap_update_bits(dev->regmap, 0xf0, 0x01, 0x01);
 		if (ret)
 			goto error;
 
@@ -1124,7 +1015,7 @@
 		cmd.args[9] = 30;
 		cmd.args[10] = 30;
 		cmd.len = 11;
-		ret = tda10071_cmd_execute(priv, &cmd);
+		ret = tda10071_cmd_execute(dev, &cmd);
 		if (ret)
 			goto error;
 
@@ -1133,22 +1024,33 @@
 		cmd.args[2] = 14;
 		cmd.args[3] = 14;
 		cmd.len = 4;
-		ret = tda10071_cmd_execute(priv, &cmd);
+		ret = tda10071_cmd_execute(dev, &cmd);
 		if (ret)
 			goto error;
 	}
 
+	/* init stats here in order signal app which stats are supported */
+	c->strength.len = 1;
+	c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->cnr.len = 1;
+	c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->post_bit_error.len = 1;
+	c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->block_error.len = 1;
+	c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
 	return ret;
 error_release_firmware:
 	release_firmware(fw);
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int tda10071_sleep(struct dvb_frontend *fe)
 {
-	struct tda10071_priv *priv = fe->demodulator_priv;
+	struct tda10071_dev *dev = fe->demodulator_priv;
+	struct i2c_client *client = dev->client;
 	struct tda10071_cmd cmd;
 	int ret, i;
 	struct tda10071_reg_val_mask tab[] = {
@@ -1164,7 +1066,7 @@
 		{ 0xce, 0x10, 0x10 },
 	};
 
-	if (!priv->warm) {
+	if (!dev->warm) {
 		ret = -EFAULT;
 		goto error;
 	}
@@ -1173,12 +1075,12 @@
 	cmd.args[1] = 0;
 	cmd.args[2] = 1;
 	cmd.len = 3;
-	ret = tda10071_cmd_execute(priv, &cmd);
+	ret = tda10071_cmd_execute(dev, &cmd);
 	if (ret)
 		goto error;
 
 	for (i = 0; i < ARRAY_SIZE(tab); i++) {
-		ret = tda10071_wr_reg_mask(priv, tab[i].reg, tab[i].val,
+		ret = tda10071_wr_reg_mask(dev, tab[i].reg, tab[i].val,
 			tab[i].mask);
 		if (ret)
 			goto error;
@@ -1186,7 +1088,7 @@
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
@@ -1200,71 +1102,6 @@
 	return 0;
 }
 
-static void tda10071_release(struct dvb_frontend *fe)
-{
-	struct tda10071_priv *priv = fe->demodulator_priv;
-	kfree(priv);
-}
-
-struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
-	struct i2c_adapter *i2c)
-{
-	int ret;
-	struct tda10071_priv *priv = NULL;
-	u8 tmp;
-
-	/* allocate memory for the internal priv */
-	priv = kzalloc(sizeof(struct tda10071_priv), GFP_KERNEL);
-	if (priv == NULL) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	/* make sure demod i2c address is specified */
-	if (!config->demod_i2c_addr) {
-		dev_dbg(&i2c->dev, "%s: invalid demod i2c address\n", __func__);
-		ret = -EINVAL;
-		goto error;
-	}
-
-	/* make sure tuner i2c address is specified */
-	if (!config->tuner_i2c_addr) {
-		dev_dbg(&i2c->dev, "%s: invalid tuner i2c address\n", __func__);
-		ret = -EINVAL;
-		goto error;
-	}
-
-	/* setup the priv */
-	priv->i2c = i2c;
-	memcpy(&priv->cfg, config, sizeof(struct tda10071_config));
-
-	/* chip ID */
-	ret = tda10071_rd_reg(priv, 0xff, &tmp);
-	if (ret || tmp != 0x0f)
-		goto error;
-
-	/* chip type */
-	ret = tda10071_rd_reg(priv, 0xdd, &tmp);
-	if (ret || tmp != 0x00)
-		goto error;
-
-	/* chip version */
-	ret = tda10071_rd_reg(priv, 0xfe, &tmp);
-	if (ret || tmp != 0x01)
-		goto error;
-
-	/* create dvb_frontend */
-	memcpy(&priv->fe.ops, &tda10071_ops, sizeof(struct dvb_frontend_ops));
-	priv->fe.demodulator_priv = priv;
-
-	return &priv->fe;
-error:
-	dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
-	kfree(priv);
-	return NULL;
-}
-EXPORT_SYMBOL(tda10071_attach);
-
 static struct dvb_frontend_ops tda10071_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2 },
 	.info = {
@@ -1289,8 +1126,6 @@
 			FE_CAN_2G_MODULATION
 	},
 
-	.release = tda10071_release,
-
 	.get_tune_settings = tda10071_get_tune_settings,
 
 	.init = tda10071_init,
@@ -1315,7 +1150,7 @@
 
 static struct dvb_frontend *tda10071_get_dvb_frontend(struct i2c_client *client)
 {
-	struct tda10071_priv *dev = i2c_get_clientdata(client);
+	struct tda10071_dev *dev = i2c_get_clientdata(client);
 
 	dev_dbg(&client->dev, "\n");
 
@@ -1325,10 +1160,14 @@
 static int tda10071_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
-	struct tda10071_priv *dev;
+	struct tda10071_dev *dev;
 	struct tda10071_platform_data *pdata = client->dev.platform_data;
 	int ret;
-	u8 u8tmp;
+	unsigned int uitmp;
+	static const struct regmap_config regmap_config = {
+		.reg_bits = 8,
+		.val_bits = 8,
+	};
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev) {
@@ -1337,45 +1176,48 @@
 	}
 
 	dev->client = client;
-	dev->i2c = client->adapter;
-	dev->cfg.demod_i2c_addr = client->addr;
-	dev->cfg.i2c_wr_max = pdata->i2c_wr_max;
-	dev->cfg.ts_mode = pdata->ts_mode;
-	dev->cfg.spec_inv = pdata->spec_inv;
-	dev->cfg.xtal = pdata->clk;
-	dev->cfg.pll_multiplier = pdata->pll_multiplier;
-	dev->cfg.tuner_i2c_addr = pdata->tuner_i2c_addr;
+	mutex_init(&dev->cmd_execute_mutex);
+	dev->clk = pdata->clk;
+	dev->i2c_wr_max = pdata->i2c_wr_max;
+	dev->ts_mode = pdata->ts_mode;
+	dev->spec_inv = pdata->spec_inv;
+	dev->pll_multiplier = pdata->pll_multiplier;
+	dev->tuner_i2c_addr = pdata->tuner_i2c_addr;
+	dev->regmap = devm_regmap_init_i2c(client, &regmap_config);
+	if (IS_ERR(dev->regmap)) {
+		ret = PTR_ERR(dev->regmap);
+		goto err_kfree;
+	}
 
 	/* chip ID */
-	ret = tda10071_rd_reg(dev, 0xff, &u8tmp);
+	ret = regmap_read(dev->regmap, 0xff, &uitmp);
 	if (ret)
 		goto err_kfree;
-	if (u8tmp != 0x0f) {
+	if (uitmp != 0x0f) {
 		ret = -ENODEV;
 		goto err_kfree;
 	}
 
 	/* chip type */
-	ret = tda10071_rd_reg(dev, 0xdd, &u8tmp);
+	ret = regmap_read(dev->regmap, 0xdd, &uitmp);
 	if (ret)
 		goto err_kfree;
-	if (u8tmp != 0x00) {
+	if (uitmp != 0x00) {
 		ret = -ENODEV;
 		goto err_kfree;
 	}
 
 	/* chip version */
-	ret = tda10071_rd_reg(dev, 0xfe, &u8tmp);
+	ret = regmap_read(dev->regmap, 0xfe, &uitmp);
 	if (ret)
 		goto err_kfree;
-	if (u8tmp != 0x01) {
+	if (uitmp != 0x01) {
 		ret = -ENODEV;
 		goto err_kfree;
 	}
 
 	/* create dvb_frontend */
 	memcpy(&dev->fe.ops, &tda10071_ops, sizeof(struct dvb_frontend_ops));
-	dev->fe.ops.release = NULL;
 	dev->fe.demodulator_priv = dev;
 	i2c_set_clientdata(client, dev);
 
@@ -1409,7 +1251,6 @@
 
 static struct i2c_driver tda10071_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tda10071",
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index 0ffbfa5..8f18402 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -21,12 +21,11 @@
 #ifndef TDA10071_H
 #define TDA10071_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 /*
  * I2C address
- * 0x55,
+ * 0x05, 0x55,
  */
 
 /**
@@ -53,64 +52,4 @@
 	struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
 };
 
-struct tda10071_config {
-	/* Demodulator I2C address.
-	 * Default: none, must set
-	 * Values: 0x55,
-	 */
-	u8 demod_i2c_addr;
-
-	/* Tuner I2C address.
-	 * Default: none, must set
-	 * Values: 0x14, 0x54, ...
-	 */
-	u8 tuner_i2c_addr;
-
-	/* Max bytes I2C provider can write at once.
-	 * Note: Buffer is taken from the stack currently!
-	 * Default: none, must set
-	 * Values:
-	 */
-	u16 i2c_wr_max;
-
-	/* TS output mode.
-	 * Default: TDA10071_TS_SERIAL
-	 * Values:
-	 */
-#define TDA10071_TS_SERIAL        0
-#define TDA10071_TS_PARALLEL      1
-	u8 ts_mode;
-
-	/* Input spectrum inversion.
-	 * Default: 0
-	 * Values: 0, 1
-	 */
-	bool spec_inv;
-
-	/* Xtal frequency Hz
-	 * Default: none, must set
-	 * Values:
-	 */
-	u32 xtal;
-
-	/* PLL multiplier.
-	 * Default: none, must set
-	 * Values:
-	 */
-	u8 pll_multiplier;
-};
-
-
-#if IS_REACHABLE(CONFIG_DVB_TDA10071)
-extern struct dvb_frontend *tda10071_attach(
-	const struct tda10071_config *config, struct i2c_adapter *i2c);
-#else
-static inline struct dvb_frontend *tda10071_attach(
-	const struct tda10071_config *config, struct i2c_adapter *i2c)
-{
-	dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
-	return NULL;
-}
-#endif
-
 #endif /* TDA10071_H */
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 54d7c71..b9c3601 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -24,19 +24,27 @@
 #include "dvb_frontend.h"
 #include "tda10071.h"
 #include <linux/firmware.h>
+#include <linux/regmap.h>
 
-struct tda10071_priv {
-	struct i2c_adapter *i2c;
+struct tda10071_dev {
 	struct dvb_frontend fe;
 	struct i2c_client *client;
-	struct tda10071_config cfg;
+	struct regmap *regmap;
+	struct mutex cmd_execute_mutex;
+	u32 clk;
+	u16 i2c_wr_max;
+	u8 ts_mode;
+	bool spec_inv;
+	u8 pll_multiplier;
+	u8 tuner_i2c_addr;
 
-	u8 meas_count[2];
-	u32 ber;
-	u32 ucb;
+	u8 meas_count;
+	u32 dvbv3_ber;
 	enum fe_status fe_status;
 	enum fe_delivery_system delivery_system;
 	bool warm; /* FW running */
+	u64 post_bit_error;
+	u64 block_error;
 };
 
 static struct tda10071_modcod {
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index f61b143..7979e5d 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -726,7 +726,6 @@
 
 static struct i2c_driver ts2020_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "ts2020",
 	},
 	.probe		= ts2020_probe,
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 71ee8f5..521bbf1 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -22,7 +22,7 @@
 #
 
 menu "Encoders, decoders, sensors and other helper chips"
-	visible if !MEDIA_SUBDRV_AUTOSELECT
+	visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
 
 comment "Audio decoders, processors and mixers"
 
@@ -196,7 +196,8 @@
 
 config VIDEO_ADV7604
 	tristate "Analog Devices ADV7604 decoder"
-	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && GPIOLIB
+	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+	depends on GPIOLIB || COMPILE_TEST
 	select HDMI
 	---help---
 	  Support for the Analog Devices ADV7604 video decoder.
@@ -286,6 +287,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called saa7115.
 
+config VIDEO_TC358743
+	tristate "Toshiba TC358743 decoder"
+	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+	select HDMI
+	---help---
+	  Support for the Toshiba TC358743 HDMI to MIPI CSI-2 bridge.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called tc358743.
+
 config VIDEO_TVP514X
 	tristate "Texas Instruments TVP514x video decoder"
 	depends on VIDEO_V4L2 && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index f165fae..07db257 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -78,3 +78,4 @@
 obj-$(CONFIG_VIDEO_IR_I2C)  += ir-kbd-i2c.o
 obj-$(CONFIG_VIDEO_ML86V7667)	+= ml86v7667.o
 obj-$(CONFIG_VIDEO_OV2659)	+= ov2659.o
+obj-$(CONFIG_VIDEO_TC358743)	+= tc358743.o
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index c70abab..5dd3977 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -465,7 +465,7 @@
 
 	of_node_put(child);
 
-	pd->enable_gpio = devm_gpiod_get(&client->dev, "enable");
+	pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
 	if (!pd->enable_gpio) {
 		dev_err(&client->dev, "Error getting GPIO\n");
 		return -EINVAL;
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index f0d3f5a..05f1dc6 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -401,7 +401,6 @@
 
 static struct i2c_driver adv7170_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "adv7170",
 	},
 	.probe		= adv7170_probe,
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index 321834b..f554809 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -455,7 +455,6 @@
 
 static struct i2c_driver adv7175_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "adv7175",
 	},
 	.probe		= adv7175_probe,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index a493c0b..f82c8aa 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 #include <media/v4l2-ioctl.h>
 #include <linux/videodev2.h>
 #include <media/v4l2-device.h>
@@ -1324,11 +1325,20 @@
 #define ADV7180_PM_OPS NULL
 #endif
 
+#ifdef CONFIG_OF
+static const struct of_device_id adv7180_of_id[] = {
+	{ .compatible = "adi,adv7180", },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, adv7180_of_id);
+#endif
+
 static struct i2c_driver adv7180_driver = {
 	.driver = {
-		   .owner = THIS_MODULE,
 		   .name = KBUILD_MODNAME,
 		   .pm = ADV7180_PM_OPS,
+		   .of_match_table = of_match_ptr(adv7180_of_id),
 		   },
 	.probe = adv7180_probe,
 	.remove = adv7180_remove,
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 7c50833..f89d0af 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -319,13 +319,6 @@
 
 static const struct v4l2_subdev_core_ops adv7343_core_ops = {
 	.log_status = adv7343_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static int adv7343_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
@@ -529,7 +522,6 @@
 static struct i2c_driver adv7343_driver = {
 	.driver = {
 		.of_match_table = of_match_ptr(adv7343_of_match),
-		.owner	= THIS_MODULE,
 		.name	= "adv7343",
 	},
 	.probe		= adv7343_probe,
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index 558f191..0215f95 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -306,13 +306,6 @@
 
 static const struct v4l2_subdev_core_ops adv7393_core_ops = {
 	.log_status = adv7393_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static int adv7393_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 95bcd40..e4900df 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -40,7 +40,7 @@
 
 MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
 MODULE_AUTHOR("Hans Verkuil");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
 
 #define MASK_ADV7511_EDID_RDY_INT   0x04
 #define MASK_ADV7511_MSEN_INT       0x40
@@ -1576,7 +1576,6 @@
 
 static struct i2c_driver adv7511_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "adv7511",
 	},
 	.probe = adv7511_probe,
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 808360f..5631ec0 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -37,10 +37,12 @@
 #include <linux/v4l2-dv-timings.h>
 #include <linux/videodev2.h>
 #include <linux/workqueue.h>
+#include <linux/regmap.h>
 
 #include <media/adv7604.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
 #include <media/v4l2-dv-timings.h>
 #include <media/v4l2-of.h>
 
@@ -81,6 +83,7 @@
 enum adv76xx_type {
 	ADV7604,
 	ADV7611,
+	ADV7612,
 };
 
 struct adv76xx_reg_seq {
@@ -188,6 +191,9 @@
 	/* i2c clients */
 	struct i2c_client *i2c_clients[ADV76XX_PAGE_MAX];
 
+	/* Regmaps */
+	struct regmap *regmap[ADV76XX_PAGE_MAX];
+
 	/* controls */
 	struct v4l2_ctrl *detect_tx_5v_ctrl;
 	struct v4l2_ctrl *analog_sampling_phase_ctrl;
@@ -373,66 +379,39 @@
 
 /* ----------------------------------------------------------------------- */
 
-static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
-		u8 command, bool check)
+static int adv76xx_read_check(struct adv76xx_state *state,
+			     int client_page, u8 reg)
 {
-	union i2c_smbus_data data;
-
-	if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
-			I2C_SMBUS_READ, command,
-			I2C_SMBUS_BYTE_DATA, &data))
-		return data.byte;
-	if (check)
-		v4l_err(client, "error reading %02x, %02x\n",
-				client->addr, command);
-	return -EIO;
-}
-
-static s32 adv_smbus_read_byte_data(struct adv76xx_state *state,
-				    enum adv76xx_page page, u8 command)
-{
-	return adv_smbus_read_byte_data_check(state->i2c_clients[page],
-					      command, true);
-}
-
-static s32 adv_smbus_write_byte_data(struct adv76xx_state *state,
-				     enum adv76xx_page page, u8 command,
-				     u8 value)
-{
-	struct i2c_client *client = state->i2c_clients[page];
-	union i2c_smbus_data data;
+	struct i2c_client *client = state->i2c_clients[client_page];
 	int err;
-	int i;
+	unsigned int val;
 
-	data.byte = value;
-	for (i = 0; i < 3; i++) {
-		err = i2c_smbus_xfer(client->adapter, client->addr,
-				client->flags,
-				I2C_SMBUS_WRITE, command,
-				I2C_SMBUS_BYTE_DATA, &data);
-		if (!err)
-			break;
+	err = regmap_read(state->regmap[client_page], reg, &val);
+
+	if (err) {
+		v4l_err(client, "error reading %02x, %02x\n",
+				client->addr, reg);
+		return err;
 	}
-	if (err < 0)
-		v4l_err(client, "error writing %02x, %02x, %02x\n",
-				client->addr, command, value);
-	return err;
+	return val;
 }
 
-static s32 adv_smbus_write_i2c_block_data(struct adv76xx_state *state,
-					  enum adv76xx_page page, u8 command,
-					  unsigned length, const u8 *values)
+/* adv76xx_write_block(): Write raw data with a maximum of I2C_SMBUS_BLOCK_MAX
+ * size to one or more registers.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+static int adv76xx_write_block(struct adv76xx_state *state, int client_page,
+			      unsigned int init_reg, const void *val,
+			      size_t val_len)
 {
-	struct i2c_client *client = state->i2c_clients[page];
-	union i2c_smbus_data data;
+	struct regmap *regmap = state->regmap[client_page];
 
-	if (length > I2C_SMBUS_BLOCK_MAX)
-		length = I2C_SMBUS_BLOCK_MAX;
-	data.block[0] = length;
-	memcpy(data.block + 1, values, length);
-	return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
-			      I2C_SMBUS_WRITE, command,
-			      I2C_SMBUS_I2C_BLOCK_DATA, &data);
+	if (val_len > I2C_SMBUS_BLOCK_MAX)
+		val_len = I2C_SMBUS_BLOCK_MAX;
+
+	return regmap_raw_write(regmap, init_reg, val, val_len);
 }
 
 /* ----------------------------------------------------------------------- */
@@ -441,14 +420,14 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_IO, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_IO, reg);
 }
 
 static inline int io_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_IO, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_IO], reg, val);
 }
 
 static inline int io_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -460,71 +439,70 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV7604_PAGE_AVLINK, reg);
+	return adv76xx_read_check(state, ADV7604_PAGE_AVLINK, reg);
 }
 
 static inline int avlink_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV7604_PAGE_AVLINK, reg, val);
+	return regmap_write(state->regmap[ADV7604_PAGE_AVLINK], reg, val);
 }
 
 static inline int cec_read(struct v4l2_subdev *sd, u8 reg)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_CEC, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_CEC, reg);
 }
 
 static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_CEC, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_CEC], reg, val);
 }
 
 static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_INFOFRAME, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_INFOFRAME, reg);
 }
 
 static inline int infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_INFOFRAME,
-					 reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_INFOFRAME], reg, val);
 }
 
 static inline int afe_read(struct v4l2_subdev *sd, u8 reg)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_AFE, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_AFE, reg);
 }
 
 static inline int afe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_AFE, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_AFE], reg, val);
 }
 
 static inline int rep_read(struct v4l2_subdev *sd, u8 reg)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_REP, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_REP, reg);
 }
 
 static inline int rep_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_REP, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_REP], reg, val);
 }
 
 static inline int rep_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -536,28 +514,37 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_EDID, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_EDID, reg);
 }
 
 static inline int edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_EDID, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_EDID], reg, val);
 }
 
 static inline int edid_write_block(struct v4l2_subdev *sd,
-					unsigned len, const u8 *val)
+					unsigned int total_len, const u8 *val)
 {
 	struct adv76xx_state *state = to_state(sd);
 	int err = 0;
-	int i;
+	int i = 0;
+	int len = 0;
 
-	v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
+	v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n",
+				__func__, total_len);
 
-	for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
-		err = adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_EDID,
-				i, I2C_SMBUS_BLOCK_MAX, val + i);
+	while (!err && i < total_len) {
+		len = (total_len - i) > I2C_SMBUS_BLOCK_MAX ?
+				I2C_SMBUS_BLOCK_MAX :
+				(total_len - i);
+
+		err = adv76xx_write_block(state, ADV76XX_PAGE_EDID,
+				i, val + i, len);
+		i += len;
+	}
+
 	return err;
 }
 
@@ -587,7 +574,7 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_HDMI, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_HDMI, reg);
 }
 
 static u16 hdmi_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
@@ -599,7 +586,7 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_HDMI, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_HDMI], reg, val);
 }
 
 static inline int hdmi_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -611,14 +598,14 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_TEST, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_TEST], reg, val);
 }
 
 static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV76XX_PAGE_CP, reg);
+	return adv76xx_read_check(state, ADV76XX_PAGE_CP, reg);
 }
 
 static u16 cp_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
@@ -630,7 +617,7 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV76XX_PAGE_CP, reg, val);
+	return regmap_write(state->regmap[ADV76XX_PAGE_CP], reg, val);
 }
 
 static inline int cp_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -642,14 +629,14 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_read_byte_data(state, ADV7604_PAGE_VDP, reg);
+	return adv76xx_read_check(state, ADV7604_PAGE_VDP, reg);
 }
 
 static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
 {
 	struct adv76xx_state *state = to_state(sd);
 
-	return adv_smbus_write_byte_data(state, ADV7604_PAGE_VDP, reg, val);
+	return regmap_write(state->regmap[ADV7604_PAGE_VDP], reg, val);
 }
 
 #define ADV76XX_REG(page, offset)	(((page) << 8) | (offset))
@@ -660,13 +647,16 @@
 {
 	struct adv76xx_state *state = to_state(sd);
 	unsigned int page = reg >> 8;
+	unsigned int val;
+	int err;
 
 	if (!(BIT(page) & state->info->page_mask))
 		return -EINVAL;
 
 	reg &= 0xff;
+	err = regmap_read(state->regmap[page], reg, &val);
 
-	return adv_smbus_read_byte_data(state, page, reg);
+	return err ? err : val;
 }
 #endif
 
@@ -680,7 +670,7 @@
 
 	reg &= 0xff;
 
-	return adv_smbus_write_byte_data(state, page, reg, val);
+	return regmap_write(state->regmap[page], reg, val);
 }
 
 static void adv76xx_write_reg_seq(struct v4l2_subdev *sd,
@@ -766,6 +756,23 @@
 	  ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
 };
 
+static const struct adv76xx_format_info adv7612_formats[] = {
+	{ MEDIA_BUS_FMT_RGB888_1X24, ADV76XX_OP_CH_SEL_RGB, true, false,
+	  ADV76XX_OP_MODE_SEL_SDR_444 | ADV76XX_OP_FORMAT_SEL_8BIT },
+	{ MEDIA_BUS_FMT_YUYV8_2X8, ADV76XX_OP_CH_SEL_RGB, false, false,
+	  ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+	{ MEDIA_BUS_FMT_YVYU8_2X8, ADV76XX_OP_CH_SEL_RGB, false, true,
+	  ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+	{ MEDIA_BUS_FMT_UYVY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, false,
+	  ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+	{ MEDIA_BUS_FMT_VYUY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, true,
+	  ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+	{ MEDIA_BUS_FMT_YUYV8_1X16, ADV76XX_OP_CH_SEL_RGB, false, false,
+	  ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+	{ MEDIA_BUS_FMT_YVYU8_1X16, ADV76XX_OP_CH_SEL_RGB, false, true,
+	  ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+};
+
 static const struct adv76xx_format_info *
 adv76xx_format_info(struct adv76xx_state *state, u32 code)
 {
@@ -870,6 +877,16 @@
 	return value & 1;
 }
 
+static unsigned int adv7612_read_cable_det(struct v4l2_subdev *sd)
+{
+	/*  Reads CABLE_DET_A_RAW. For input B support, need to
+	 *  account for bit 7 [MSB] of 0x6a (ie. CABLE_DET_B_RAW)
+	 */
+	u8 value = io_read(sd, 0x6f);
+
+	return value & 1;
+}
+
 static int adv76xx_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
 {
 	struct adv76xx_state *state = to_state(sd);
@@ -976,8 +993,8 @@
 		/* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
 		/* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
 		/* IO-map reg. 0x16 and 0x17 should be written in sequence */
-		if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_IO,
-						   0x16, 2, pll))
+		if (regmap_raw_write(state->regmap[ADV76XX_PAGE_IO],
+					0x16, pll, 2))
 			v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
 
 		/* active video - horizontal timing */
@@ -1028,8 +1045,8 @@
 	offset_buf[3] = offset_c & 0x0ff;
 
 	/* Registers must be written in this order with no i2c access in between */
-	if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_CP,
-					   0x77, 4, offset_buf))
+	if (regmap_raw_write(state->regmap[ADV76XX_PAGE_CP],
+			0x77, offset_buf, 4))
 		v4l2_err(sd, "%s: i2c error writing to CP reg 0x77, 0x78, 0x79, 0x7a\n", __func__);
 }
 
@@ -1058,8 +1075,8 @@
 	gain_buf[3] = ((gain_c & 0x0ff));
 
 	/* Registers must be written in this order with no i2c access in between */
-	if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_CP,
-					   0x73, 4, gain_buf))
+	if (regmap_raw_write(state->regmap[ADV76XX_PAGE_CP],
+			     0x73, gain_buf, 4))
 		v4l2_err(sd, "%s: i2c error writing to CP reg 0x73, 0x74, 0x75, 0x76\n", __func__);
 }
 
@@ -1328,7 +1345,7 @@
 		}
 	}
 
-	if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs,
+	if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
 			(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
 			(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
 			false, timings))
@@ -1760,8 +1777,8 @@
 	select_input(sd);
 	enable_input(sd);
 
-	v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
-			   (void *)&adv76xx_ev_fmt);
+	v4l2_subdev_notify_event(sd, &adv76xx_ev_fmt);
+
 	return 0;
 }
 
@@ -1928,8 +1945,7 @@
 			"%s: fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
 			__func__, fmt_change, fmt_change_digital);
 
-		v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
-				   (void *)&adv76xx_ev_fmt);
+		v4l2_subdev_notify_event(sd, &adv76xx_ev_fmt);
 
 		if (handled)
 			*handled = true;
@@ -2347,6 +2363,20 @@
 	return 0;
 }
 
+static int adv76xx_subscribe_event(struct v4l2_subdev *sd,
+				   struct v4l2_fh *fh,
+				   struct v4l2_event_subscription *sub)
+{
+	switch (sub->type) {
+	case V4L2_EVENT_SOURCE_CHANGE:
+		return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+	default:
+		return -EINVAL;
+	}
+}
+
 /* ----------------------------------------------------------------------- */
 
 static const struct v4l2_ctrl_ops adv76xx_ctrl_ops = {
@@ -2356,6 +2386,8 @@
 static const struct v4l2_subdev_core_ops adv76xx_core_ops = {
 	.log_status = adv76xx_log_status,
 	.interrupt_service_routine = adv76xx_isr,
+	.subscribe_event = adv76xx_subscribe_event,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = adv76xx_g_register,
 	.s_register = adv76xx_s_register,
@@ -2510,6 +2542,11 @@
 	io_write(sd, 0x41, 0xd0); /* STDI irq for any change, disable INT2 */
 }
 
+static void adv7612_setup_irqs(struct v4l2_subdev *sd)
+{
+	io_write(sd, 0x41, 0xd0); /* disable INT2 */
+}
+
 static void adv76xx_unregister_clients(struct adv76xx_state *state)
 {
 	unsigned int i;
@@ -2597,6 +2634,19 @@
 	{ ADV76XX_REG_SEQ_TERM, 0 },
 };
 
+static const struct adv76xx_reg_seq adv7612_recommended_settings_hdmi[] = {
+	{ ADV76XX_REG(ADV76XX_PAGE_CP, 0x6c), 0x00 },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x9b), 0x03 },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x6f), 0x08 },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x85), 0x1f },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x87), 0x70 },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x57), 0xda },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x58), 0x01 },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x03), 0x98 },
+	{ ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x4c), 0x44 },
+	{ ADV76XX_REG_SEQ_TERM, 0 },
+};
+
 static const struct adv76xx_chip_info adv76xx_chip_info[] = {
 	[ADV7604] = {
 		.type = ADV7604,
@@ -2685,17 +2735,60 @@
 		.field1_vsync_mask = 0x3fff,
 		.field1_vbackporch_mask = 0x3fff,
 	},
+	[ADV7612] = {
+		.type = ADV7612,
+		.has_afe = false,
+		.max_port = ADV76XX_PAD_HDMI_PORT_A,	/* B not supported */
+		.num_dv_ports = 1,			/* normally 2 */
+		.edid_enable_reg = 0x74,
+		.edid_status_reg = 0x76,
+		.lcf_reg = 0xa3,
+		.tdms_lock_mask = 0x43,
+		.cable_det_mask = 0x01,
+		.fmt_change_digital_mask = 0x03,
+		.cp_csc = 0xf4,
+		.formats = adv7612_formats,
+		.nformats = ARRAY_SIZE(adv7612_formats),
+		.set_termination = adv7611_set_termination,
+		.setup_irqs = adv7612_setup_irqs,
+		.read_hdmi_pixelclock = adv7611_read_hdmi_pixelclock,
+		.read_cable_det = adv7612_read_cable_det,
+		.recommended_settings = {
+		    [1] = adv7612_recommended_settings_hdmi,
+		},
+		.num_recommended_settings = {
+		    [1] = ARRAY_SIZE(adv7612_recommended_settings_hdmi),
+		},
+		.page_mask = BIT(ADV76XX_PAGE_IO) | BIT(ADV76XX_PAGE_CEC) |
+			BIT(ADV76XX_PAGE_INFOFRAME) | BIT(ADV76XX_PAGE_AFE) |
+			BIT(ADV76XX_PAGE_REP) |  BIT(ADV76XX_PAGE_EDID) |
+			BIT(ADV76XX_PAGE_HDMI) | BIT(ADV76XX_PAGE_CP),
+		.linewidth_mask = 0x1fff,
+		.field0_height_mask = 0x1fff,
+		.field1_height_mask = 0x1fff,
+		.hfrontporch_mask = 0x1fff,
+		.hsync_mask = 0x1fff,
+		.hbackporch_mask = 0x1fff,
+		.field0_vfrontporch_mask = 0x3fff,
+		.field0_vsync_mask = 0x3fff,
+		.field0_vbackporch_mask = 0x3fff,
+		.field1_vfrontporch_mask = 0x3fff,
+		.field1_vsync_mask = 0x3fff,
+		.field1_vbackporch_mask = 0x3fff,
+	},
 };
 
 static const struct i2c_device_id adv76xx_i2c_id[] = {
 	{ "adv7604", (kernel_ulong_t)&adv76xx_chip_info[ADV7604] },
 	{ "adv7611", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
+	{ "adv7612", (kernel_ulong_t)&adv76xx_chip_info[ADV7612] },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, adv76xx_i2c_id);
 
 static const struct of_device_id adv76xx_of_id[] __maybe_unused = {
 	{ .compatible = "adi,adv7611", .data = &adv76xx_chip_info[ADV7611] },
+	{ .compatible = "adi,adv7612", .data = &adv76xx_chip_info[ADV7612] },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, adv76xx_of_id);
@@ -2706,6 +2799,7 @@
 	struct device_node *endpoint;
 	struct device_node *np;
 	unsigned int flags;
+	u32 v;
 
 	np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node;
 
@@ -2715,6 +2809,12 @@
 		return -EINVAL;
 
 	v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+
+	if (!of_property_read_u32(endpoint, "default-input", &v))
+		state->pdata.default_input = v;
+	else
+		state->pdata.default_input = -1;
+
 	of_node_put(endpoint);
 
 	flags = bus_cfg.bus.parallel.flags;
@@ -2753,7 +2853,6 @@
 	/* Hardcode the remaining platform data fields. */
 	state->pdata.disable_pwrdnb = 0;
 	state->pdata.disable_cable_det_rst = 0;
-	state->pdata.default_input = -1;
 	state->pdata.blank_data = 1;
 	state->pdata.alt_data_sat = 1;
 	state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
@@ -2762,6 +2861,148 @@
 	return 0;
 }
 
+static const struct regmap_config adv76xx_regmap_cnf[] = {
+	{
+		.name			= "io",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "avlink",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "cec",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "infoframe",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "esdp",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "epp",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "afe",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "rep",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "edid",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+
+	{
+		.name			= "hdmi",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "test",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "cp",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+	{
+		.name			= "vdp",
+		.reg_bits		= 8,
+		.val_bits		= 8,
+
+		.max_register		= 0xff,
+		.cache_type		= REGCACHE_NONE,
+	},
+};
+
+static int configure_regmap(struct adv76xx_state *state, int region)
+{
+	int err;
+
+	if (!state->i2c_clients[region])
+		return -ENODEV;
+
+	state->regmap[region] =
+		devm_regmap_init_i2c(state->i2c_clients[region],
+				     &adv76xx_regmap_cnf[region]);
+
+	if (IS_ERR(state->regmap[region])) {
+		err = PTR_ERR(state->regmap[region]);
+		v4l_err(state->i2c_clients[region],
+			"Error initializing regmap %d with error %d\n",
+			region, err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int configure_regmaps(struct adv76xx_state *state)
+{
+	int i, err;
+
+	for (i = ADV7604_PAGE_AVLINK ; i < ADV76XX_PAGE_MAX; i++) {
+		err = configure_regmap(state, i);
+		if (err && (err != -ENODEV))
+			return err;
+	}
+	return 0;
+}
+
 static int adv76xx_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
@@ -2771,7 +3012,7 @@
 	struct v4l2_ctrl_handler *hdl;
 	struct v4l2_subdev *sd;
 	unsigned int i;
-	u16 val;
+	unsigned int val, val2;
 	int err;
 
 	/* Check if the adapter supports the needed features */
@@ -2833,28 +3074,59 @@
 	snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
 		id->name, i2c_adapter_id(client->adapter),
 		client->addr);
-	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+	/* Configure IO Regmap region */
+	err = configure_regmap(state, ADV76XX_PAGE_IO);
+
+	if (err) {
+		v4l2_err(sd, "Error configuring IO regmap region\n");
+		return -ENODEV;
+	}
 
 	/*
 	 * Verify that the chip is present. On ADV7604 the RD_INFO register only
 	 * identifies the revision, while on ADV7611 it identifies the model as
 	 * well. Use the HDMI slave address on ADV7604 and RD_INFO on ADV7611.
 	 */
-	if (state->info->type == ADV7604) {
-		val = adv_smbus_read_byte_data_check(client, 0xfb, false);
+	switch (state->info->type) {
+	case ADV7604:
+		err = regmap_read(state->regmap[ADV76XX_PAGE_IO], 0xfb, &val);
+		if (err) {
+			v4l2_err(sd, "Error %d reading IO Regmap\n", err);
+			return -ENODEV;
+		}
 		if (val != 0x68) {
-			v4l2_info(sd, "not an adv7604 on address 0x%x\n",
+			v4l2_err(sd, "not an adv7604 on address 0x%x\n",
 					client->addr << 1);
 			return -ENODEV;
 		}
-	} else {
-		val = (adv_smbus_read_byte_data_check(client, 0xea, false) << 8)
-		    | (adv_smbus_read_byte_data_check(client, 0xeb, false) << 0);
-		if (val != 0x2051) {
-			v4l2_info(sd, "not an adv7611 on address 0x%x\n",
+		break;
+	case ADV7611:
+	case ADV7612:
+		err = regmap_read(state->regmap[ADV76XX_PAGE_IO],
+				0xea,
+				&val);
+		if (err) {
+			v4l2_err(sd, "Error %d reading IO Regmap\n", err);
+			return -ENODEV;
+		}
+		val2 = val << 8;
+		err = regmap_read(state->regmap[ADV76XX_PAGE_IO],
+			    0xeb,
+			    &val);
+		if (err) {
+			v4l2_err(sd, "Error %d reading IO Regmap\n", err);
+			return -ENODEV;
+		}
+		val |= val2;
+		if ((state->info->type == ADV7611 && val != 0x2051) ||
+			(state->info->type == ADV7612 && val != 0x2041)) {
+			v4l2_err(sd, "not an adv761x on address 0x%x\n",
 					client->addr << 1);
 			return -ENODEV;
 		}
+		break;
 	}
 
 	/* control handlers */
@@ -2941,6 +3213,11 @@
 	if (err)
 		goto err_work_queues;
 
+	/* Configure regmaps */
+	err = configure_regmaps(state);
+	if (err)
+		goto err_entity;
+
 	err = adv76xx_core_init(sd);
 	if (err)
 		goto err_entity;
@@ -2985,7 +3262,6 @@
 
 static struct i2c_driver adv76xx_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "adv7604",
 		.of_match_table = of_match_ptr(adv76xx_of_id),
 	},
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 4cf79b2..b7269b8 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -40,6 +40,7 @@
 #include <linux/v4l2-dv-timings.h>
 #include <linux/hdmi.h>
 #include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-dv-timings.h>
 #include <media/adv7842.h>
@@ -1442,7 +1443,7 @@
 		}
 	}
 
-	if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs,
+	if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
 			(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
 			(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
 			false, timings))
@@ -1980,8 +1981,7 @@
 	select_input(sd, state->vid_std_select);
 	enable_input(sd);
 
-	v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
-			   (void *)&adv7842_ev_fmt);
+	v4l2_subdev_notify_event(sd, &adv7842_ev_fmt);
 
 	return 0;
 }
@@ -2214,8 +2214,7 @@
 			 "%s: fmt_change_cp = 0x%x, fmt_change_digital = 0x%x, fmt_change_sdp = 0x%x\n",
 			 __func__, fmt_change_cp, fmt_change_digital,
 			 fmt_change_sdp);
-		v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
-				   (void *)&adv7842_ev_fmt);
+		v4l2_subdev_notify_event(sd, &adv7842_ev_fmt);
 		if (handled)
 			*handled = true;
 	}
@@ -3005,6 +3004,20 @@
 	return -ENOTTY;
 }
 
+static int adv7842_subscribe_event(struct v4l2_subdev *sd,
+				   struct v4l2_fh *fh,
+				   struct v4l2_event_subscription *sub)
+{
+	switch (sub->type) {
+	case V4L2_EVENT_SOURCE_CHANGE:
+		return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+	default:
+		return -EINVAL;
+	}
+}
+
 /* ----------------------------------------------------------------------- */
 
 static const struct v4l2_ctrl_ops adv7842_ctrl_ops = {
@@ -3015,6 +3028,8 @@
 	.log_status = adv7842_log_status,
 	.ioctl = adv7842_ioctl,
 	.interrupt_service_routine = adv7842_isr,
+	.subscribe_event = adv7842_subscribe_event,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = adv7842_g_register,
 	.s_register = adv7842_s_register,
@@ -3210,7 +3225,7 @@
 
 	sd = &state->sd;
 	v4l2_i2c_subdev_init(sd, client, &adv7842_ops);
-	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
 	state->mode = pdata->mode;
 
 	state->hdmi_port_a = pdata->input == ADV7842_SELECT_HDMI_PORT_A;
@@ -3348,7 +3363,6 @@
 
 static struct i2c_driver adv7842_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "adv7842",
 	},
 	.probe = adv7842_probe,
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index 2984624..d3b965e 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -156,12 +156,12 @@
 	} else if (std == V4L2_STD_PAL_60) {
 		vp1 = 7;
 		ak881x->lines = 480;
-	} else if (std && !(std & ~V4L2_STD_PAL)) {
-		vp1 = 0xf;
-		ak881x->lines = 576;
-	} else if (std && !(std & ~V4L2_STD_NTSC)) {
+	} else if (std & V4L2_STD_NTSC) {
 		vp1 = 0;
 		ak881x->lines = 480;
+	} else if (std & V4L2_STD_PAL) {
+		vp1 = 0xf;
+		ak881x->lines = 576;
 	} else {
 		/* No SECAM or PAL_N/Nc supported */
 		return -EINVAL;
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 76b334a..e00e310 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -379,16 +379,6 @@
 	.s_ctrl = bt819_s_ctrl,
 };
 
-static const struct v4l2_subdev_core_ops bt819_core_ops = {
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
-};
-
 static const struct v4l2_subdev_video_ops bt819_video_ops = {
 	.s_std = bt819_s_std,
 	.s_routing = bt819_s_routing,
@@ -398,7 +388,6 @@
 };
 
 static const struct v4l2_subdev_ops bt819_ops = {
-	.core = &bt819_core_ops,
 	.video = &bt819_video_ops,
 };
 
@@ -492,7 +481,6 @@
 
 static struct i2c_driver bt819_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "bt819",
 	},
 	.probe		= bt819_probe,
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index 7fc163d..4817659 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -252,7 +252,6 @@
 
 static struct i2c_driver bt856_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "bt856",
 	},
 	.probe		= bt856_probe,
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index a8bf10f..bbec70c 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -218,7 +218,6 @@
 
 static struct i2c_driver bt866_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "bt866",
 	},
 	.probe		= bt866_probe,
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index 34b76a9..c7de979 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -132,13 +132,6 @@
 
 static const struct v4l2_subdev_core_ops cs5345_core_ops = {
 	.log_status = cs5345_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = cs5345_g_register,
 	.s_register = cs5345_s_register,
@@ -218,7 +211,6 @@
 
 static struct i2c_driver cs5345_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "cs5345",
 	},
 	.probe		= cs5345_probe,
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 27400c1..b7e87e3 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -228,7 +228,6 @@
 
 static struct i2c_driver cs53l32a_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "cs53l32a",
 	},
 	.probe		= cs53l32a_probe,
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index e15a789..fe6eb78 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -5348,7 +5348,6 @@
 
 static struct i2c_driver cx25840_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "cx25840",
 	},
 	.probe		= cx25840_probe,
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 175a761..728d2cc 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -478,6 +478,7 @@
 	{ "ir_rx_z8f0811_hdpvr", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
 
 static struct i2c_driver ir_kbd_driver = {
 	.driver = {
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index 25b81bc..77551ba 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -708,7 +708,6 @@
 
 static struct i2c_driver ks0127_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "ks0127",
 	},
 	.probe		= ks0127_probe,
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index bf47635..77eb07e 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -185,7 +185,6 @@
 
 static struct i2c_driver m52790_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "m52790",
 	},
 	.probe		= m52790_probe,
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index dcc68ec..bdb9400 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -894,7 +894,6 @@
 
 static struct i2c_driver msp_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "msp3400",
 		.pm	= &msp3400_pm_ops,
 	},
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 57132cd..a4a5c39 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -583,7 +583,6 @@
 
 static struct i2c_driver mt9v011_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "mt9v011",
 	},
 	.probe		= mt9v011_probe,
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 977f400..a68ce94 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -882,7 +882,7 @@
 static struct mt9v032_platform_data *
 mt9v032_get_pdata(struct i2c_client *client)
 {
-	struct mt9v032_platform_data *pdata;
+	struct mt9v032_platform_data *pdata = NULL;
 	struct v4l2_of_endpoint endpoint;
 	struct device_node *np;
 	struct property *prop;
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 6edffc7..49109f4 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -909,7 +909,6 @@
 	u8 ctrl1_reg = 0, ctrl2_reg = 0, ctrl3_reg = 0;
 	struct i2c_client *client = ov2659->client;
 	unsigned int desired = pdata->link_frequency;
-	u32 s_prediv = 1, s_postdiv = 1, s_mult = 1;
 	u32 prediv, postdiv, mult;
 	u32 bestdelta = -1;
 	u32 delta, actual;
@@ -929,9 +928,6 @@
 
 				if ((delta < bestdelta) || (bestdelta == -1)) {
 					bestdelta = delta;
-					s_mult    = mult;
-					s_prediv  = prediv;
-					s_postdiv = postdiv;
 					ctrl1_reg = ctrl1[i].reg;
 					ctrl2_reg = mult;
 					ctrl3_reg = ctrl3[j].reg;
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index faa64ba..b8961df 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -94,7 +94,6 @@
 
 static struct i2c_driver ov7640_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "ov7640",
 	},
 	.probe = ov7640_probe,
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 2d1e25f..e1b5dc8 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1674,7 +1674,6 @@
 
 static struct i2c_driver ov7670_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "ov7670",
 	},
 	.probe		= ov7670_probe,
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 2bc4733..e691bba 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1436,7 +1436,7 @@
 	int ret;
 
 	mutex_lock(&ov965x->lock);
-	 __ov965x_set_power(ov965x, 1);
+	__ov965x_set_power(ov965x, 1);
 	usleep_range(25000, 26000);
 
 	/* Check sensor revision */
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
index 63eb190..fa4a5eb 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -149,7 +149,6 @@
 	spidrv->remove = s5c73m3_spi_remove;
 	spidrv->probe = s5c73m3_spi_probe;
 	spidrv->driver.name = S5C73M3_SPI_DRV_NAME;
-	spidrv->driver.bus = &spi_bus_type;
 	spidrv->driver.owner = THIS_MODULE;
 	spidrv->driver.of_match_table = s5c73m3_spi_ids;
 
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index bc389d5..b1b1574 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -363,6 +363,7 @@
 static const struct i2c_device_id s5k6a3_ids[] = {
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, s5k6a3_ids);
 
 #ifdef CONFIG_OF
 static const struct of_device_id s5k6a3_of_match[] = {
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index 2960b5a..37e65f6 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -301,9 +301,7 @@
 	   first and the last of the 3 bytes block.
 	 */
 
-	tmp = tmpbuf[2];
-	tmpbuf[2] = tmpbuf[0];
-	tmpbuf[0] = tmp;
+	swap(tmpbuf[2], tmpbuf[0]);
 
 	/* Map 'Invalid block E' to 'Invalid Block' */
 	if (blocknum == 6)
@@ -520,7 +518,6 @@
 
 static struct i2c_driver saa6588_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "saa6588",
 	},
 	.probe		= saa6588_probe,
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index ba3c415..7202d3a 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -793,7 +793,6 @@
 
 static struct i2c_driver saa6752hs_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "saa6752hs",
 	},
 	.probe		= saa6752hs_probe,
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index 99689ee..6f49886 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -357,16 +357,6 @@
 	.s_ctrl = saa7110_s_ctrl,
 };
 
-static const struct v4l2_subdev_core_ops saa7110_core_ops = {
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
-};
-
 static const struct v4l2_subdev_video_ops saa7110_video_ops = {
 	.s_std = saa7110_s_std,
 	.s_routing = saa7110_s_routing,
@@ -376,7 +366,6 @@
 };
 
 static const struct v4l2_subdev_ops saa7110_ops = {
-	.core = &saa7110_core_ops,
 	.video = &saa7110_video_ops,
 };
 
@@ -472,7 +461,6 @@
 
 static struct i2c_driver saa7110_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "saa7110",
 	},
 	.probe		= saa7110_probe,
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 0eae5f4..91e7522 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1929,7 +1929,6 @@
 
 static struct i2c_driver saa711x_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "saa7115",
 	},
 	.probe		= saa711x_probe,
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 264b755..a43d96d 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -822,7 +822,6 @@
 
 static struct i2c_driver saa7127_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "saa7127",
 	},
 	.probe		= saa7127_probe,
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 7d51736..1baca37 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1204,13 +1204,6 @@
 	.g_register = saa717x_g_register,
 	.s_register = saa717x_s_register,
 #endif
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 	.log_status = saa717x_log_status,
 };
 
@@ -1363,7 +1356,6 @@
 
 static struct i2c_driver saa717x_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "saa717x",
 	},
 	.probe		= saa717x_probe,
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index f56c1c8..eecad2d 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -356,7 +356,6 @@
 
 static struct i2c_driver saa7185_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "saa7185",
 	},
 	.probe		= saa7185_probe,
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index de10a76..2f35d31 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -104,22 +104,22 @@
 static const struct mt9t112_format mt9t112_cfmts[] = {
 	{
 		.code		= MEDIA_BUS_FMT_UYVY8_2X8,
-		.colorspace	= V4L2_COLORSPACE_JPEG,
+		.colorspace	= V4L2_COLORSPACE_SRGB,
 		.fmt		= 1,
 		.order		= 0,
 	}, {
 		.code		= MEDIA_BUS_FMT_VYUY8_2X8,
-		.colorspace	= V4L2_COLORSPACE_JPEG,
+		.colorspace	= V4L2_COLORSPACE_SRGB,
 		.fmt		= 1,
 		.order		= 1,
 	}, {
 		.code		= MEDIA_BUS_FMT_YUYV8_2X8,
-		.colorspace	= V4L2_COLORSPACE_JPEG,
+		.colorspace	= V4L2_COLORSPACE_SRGB,
 		.fmt		= 1,
 		.order		= 2,
 	}, {
 		.code		= MEDIA_BUS_FMT_YVYU8_2X8,
-		.colorspace	= V4L2_COLORSPACE_JPEG,
+		.colorspace	= V4L2_COLORSPACE_SRGB,
 		.fmt		= 1,
 		.order		= 3,
 	}, {
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 42bec9b..e939c24 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -510,13 +510,39 @@
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct tw9910_priv *priv = to_tw9910(client);
+	const unsigned hact = 720;
+	const unsigned hdelay = 15;
+	unsigned vact;
+	unsigned vdelay;
+	int ret;
 
 	if (!(norm & (V4L2_STD_NTSC | V4L2_STD_PAL)))
 		return -EINVAL;
 
 	priv->norm = norm;
+	if (norm & V4L2_STD_525_60) {
+		vact = 240;
+		vdelay = 18;
+		ret = tw9910_mask_set(client, VVBI, 0x10, 0x10);
+	} else {
+		vact = 288;
+		vdelay = 24;
+		ret = tw9910_mask_set(client, VVBI, 0x10, 0x00);
+	}
+	if (!ret)
+		ret = i2c_smbus_write_byte_data(client, CROP_HI,
+			((vdelay >> 2) & 0xc0) |
+			((vact >> 4) & 0x30) |
+			((hdelay >> 6) & 0x0c) |
+			((hact >> 8) & 0x03));
+	if (!ret)
+		ret = i2c_smbus_write_byte_data(client, VDELAY_LO,
+			vdelay & 0xff);
+	if (!ret)
+		ret = i2c_smbus_write_byte_data(client, VACTIVE_LO,
+			vact & 0xff);
 
-	return 0;
+	return ret;
 }
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -711,7 +737,7 @@
 	mf->width	= priv->scale->width;
 	mf->height	= priv->scale->height;
 	mf->code	= MEDIA_BUS_FMT_UYVY8_2X8;
-	mf->colorspace	= V4L2_COLORSPACE_JPEG;
+	mf->colorspace	= V4L2_COLORSPACE_SMPTE170M;
 	mf->field	= V4L2_FIELD_INTERLACED_BT;
 
 	return 0;
@@ -732,7 +758,7 @@
 	if (mf->code != MEDIA_BUS_FMT_UYVY8_2X8)
 		return -EINVAL;
 
-	mf->colorspace = V4L2_COLORSPACE_JPEG;
+	mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
 	ret = tw9910_set_frame(sd, &width, &height);
 	if (!ret) {
@@ -762,7 +788,7 @@
 	}
 
 	mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
-	mf->colorspace = V4L2_COLORSPACE_JPEG;
+	mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
 	/*
 	 * select suitable norm
@@ -820,6 +846,7 @@
 		 "tw9910 Product ID %0x:%0x\n", id, priv->revision);
 
 	priv->norm = V4L2_STD_NTSC;
+	priv->scale = &tw9910_ntsc_scales[0];
 
 done:
 	tw9910_s_power(&priv->subdev, 0);
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index 1da8004..6b1a04f 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -388,7 +388,6 @@
 
 static struct i2c_driver sony_btf_mpx_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "sony-btf-mpx",
 	},
 	.probe = sony_btf_mpx_probe,
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index b62b6dd..b04c09d 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -489,18 +489,14 @@
 {
 	struct v4l2_mbus_framefmt *mf;
 	struct sr030pc30_info *info = to_sr030pc30(sd);
-	int ret;
 
 	if (!format || format->pad)
 		return -EINVAL;
 
 	mf = &format->format;
 
-	if (!info->curr_win || !info->curr_fmt) {
-		ret = sr030pc30_set_params(sd);
-		if (ret)
-			return ret;
-	}
+	if (!info->curr_win || !info->curr_fmt)
+		return -EINVAL;
 
 	mf->width	= info->curr_win->width;
 	mf->height	= info->curr_win->height;
@@ -636,13 +632,6 @@
 
 static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
 	.s_power	= sr030pc30_s_power,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static const struct v4l2_subdev_pad_ops sr030pc30_pad_ops = {
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
new file mode 100644
index 0000000..9ef5baa
--- /dev/null
+++ b/drivers/media/i2c/tc358743.c
@@ -0,0 +1,1979 @@
+/*
+ * tc358743 - Toshiba HDMI to CSI-2 bridge
+ *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
+ * reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
+ * REF_02 - Toshiba, TC358743XBG_HDMI-CSI_Tv11p_nm.xls
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/hdmi.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-of.h>
+#include <media/tc358743.h>
+
+#include "tc358743_regs.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-3)");
+
+MODULE_DESCRIPTION("Toshiba TC358743 HDMI to CSI-2 bridge driver");
+MODULE_AUTHOR("Ramakrishnan Muthukrishnan <ram@rkrishnan.org>");
+MODULE_AUTHOR("Mikhail Khelik <mkhelik@cisco.com>");
+MODULE_AUTHOR("Mats Randgaard <matrandg@cisco.com>");
+MODULE_LICENSE("GPL");
+
+#define EDID_NUM_BLOCKS_MAX 8
+#define EDID_BLOCK_SIZE 128
+
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE  (EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE + 2)
+
+static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
+	.type = V4L2_DV_BT_656_1120,
+	/* keep this initialization for compatibility with GCC < 4.4.6 */
+	.reserved = { 0 },
+	/* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
+	V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000,
+			V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+			V4L2_DV_BT_CAP_PROGRESSIVE |
+			V4L2_DV_BT_CAP_REDUCED_BLANKING |
+			V4L2_DV_BT_CAP_CUSTOM)
+};
+
+struct tc358743_state {
+	struct tc358743_platform_data pdata;
+	struct v4l2_of_bus_mipi_csi2 bus;
+	struct v4l2_subdev sd;
+	struct media_pad pad;
+	struct v4l2_ctrl_handler hdl;
+	struct i2c_client *i2c_client;
+	/* CONFCTL is modified in ops and tc358743_hdmi_sys_int_handler */
+	struct mutex confctl_mutex;
+
+	/* controls */
+	struct v4l2_ctrl *detect_tx_5v_ctrl;
+	struct v4l2_ctrl *audio_sampling_rate_ctrl;
+	struct v4l2_ctrl *audio_present_ctrl;
+
+	/* work queues */
+	struct workqueue_struct *work_queues;
+	struct delayed_work delayed_work_enable_hotplug;
+
+	/* edid  */
+	u8 edid_blocks_written;
+
+	/* used by i2c_wr() */
+	u8 wr_data[MAX_XFER_SIZE];
+
+	struct v4l2_dv_timings timings;
+	u32 mbus_fmt_code;
+
+	struct gpio_desc *reset_gpio;
+};
+
+static void tc358743_enable_interrupts(struct v4l2_subdev *sd,
+		bool cable_connected);
+static int tc358743_s_ctrl_detect_tx_5v(struct v4l2_subdev *sd);
+
+static inline struct tc358743_state *to_state(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct tc358743_state, sd);
+}
+
+/* --------------- I2C --------------- */
+
+static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct i2c_client *client = state->i2c_client;
+	int err;
+	u8 buf[2] = { reg >> 8, reg & 0xff };
+	struct i2c_msg msgs[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 2,
+			.buf = buf,
+		},
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = n,
+			.buf = values,
+		},
+	};
+
+	err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+	if (err != ARRAY_SIZE(msgs)) {
+		v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed\n",
+				__func__, reg, client->addr);
+	}
+}
+
+static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct i2c_client *client = state->i2c_client;
+	u8 *data = state->wr_data;
+	int err, i;
+	struct i2c_msg msg;
+
+	if ((2 + n) > sizeof(state->wr_data))
+		v4l2_warn(sd, "i2c wr reg=%04x: len=%d is too big!\n",
+			  reg, 2 + n);
+
+	msg.addr = client->addr;
+	msg.buf = data;
+	msg.len = 2 + n;
+	msg.flags = 0;
+
+	data[0] = reg >> 8;
+	data[1] = reg & 0xff;
+
+	for (i = 0; i < n; i++)
+		data[2 + i] = values[i];
+
+	err = i2c_transfer(client->adapter, &msg, 1);
+	if (err != 1) {
+		v4l2_err(sd, "%s: writing register 0x%x from 0x%x failed\n",
+				__func__, reg, client->addr);
+		return;
+	}
+
+	if (debug < 3)
+		return;
+
+	switch (n) {
+	case 1:
+		v4l2_info(sd, "I2C write 0x%04x = 0x%02x",
+				reg, data[2]);
+		break;
+	case 2:
+		v4l2_info(sd, "I2C write 0x%04x = 0x%02x%02x",
+				reg, data[3], data[2]);
+		break;
+	case 4:
+		v4l2_info(sd, "I2C write 0x%04x = 0x%02x%02x%02x%02x",
+				reg, data[5], data[4], data[3], data[2]);
+		break;
+	default:
+		v4l2_info(sd, "I2C write %d bytes from address 0x%04x\n",
+				n, reg);
+	}
+}
+
+static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg)
+{
+	u8 val;
+
+	i2c_rd(sd, reg, &val, 1);
+
+	return val;
+}
+
+static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
+{
+	i2c_wr(sd, reg, &val, 1);
+}
+
+static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
+		u8 mask, u8 val)
+{
+	i2c_wr8(sd, reg, (i2c_rd8(sd, reg) & mask) | val);
+}
+
+static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
+{
+	u16 val;
+
+	i2c_rd(sd, reg, (u8 *)&val, 2);
+
+	return val;
+}
+
+static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
+{
+	i2c_wr(sd, reg, (u8 *)&val, 2);
+}
+
+static void i2c_wr16_and_or(struct v4l2_subdev *sd, u16 reg, u16 mask, u16 val)
+{
+	i2c_wr16(sd, reg, (i2c_rd16(sd, reg) & mask) | val);
+}
+
+static u32 i2c_rd32(struct v4l2_subdev *sd, u16 reg)
+{
+	u32 val;
+
+	i2c_rd(sd, reg, (u8 *)&val, 4);
+
+	return val;
+}
+
+static void i2c_wr32(struct v4l2_subdev *sd, u16 reg, u32 val)
+{
+	i2c_wr(sd, reg, (u8 *)&val, 4);
+}
+
+/* --------------- STATUS --------------- */
+
+static inline bool is_hdmi(struct v4l2_subdev *sd)
+{
+	return i2c_rd8(sd, SYS_STATUS) & MASK_S_HDMI;
+}
+
+static inline bool tx_5v_power_present(struct v4l2_subdev *sd)
+{
+	return i2c_rd8(sd, SYS_STATUS) & MASK_S_DDC5V;
+}
+
+static inline bool no_signal(struct v4l2_subdev *sd)
+{
+	return !(i2c_rd8(sd, SYS_STATUS) & MASK_S_TMDS);
+}
+
+static inline bool no_sync(struct v4l2_subdev *sd)
+{
+	return !(i2c_rd8(sd, SYS_STATUS) & MASK_S_SYNC);
+}
+
+static inline bool audio_present(struct v4l2_subdev *sd)
+{
+	return i2c_rd8(sd, AU_STATUS0) & MASK_S_A_SAMPLE;
+}
+
+static int get_audio_sampling_rate(struct v4l2_subdev *sd)
+{
+	static const int code_to_rate[] = {
+		44100, 0, 48000, 32000, 22050, 384000, 24000, 352800,
+		88200, 768000, 96000, 705600, 176400, 0, 192000, 0
+	};
+
+	/* Register FS_SET is not cleared when the cable is disconnected */
+	if (no_signal(sd))
+		return 0;
+
+	return code_to_rate[i2c_rd8(sd, FS_SET) & MASK_FS];
+}
+
+static unsigned tc358743_num_csi_lanes_in_use(struct v4l2_subdev *sd)
+{
+	return ((i2c_rd32(sd, CSI_CONTROL) & MASK_NOL) >> 1) + 1;
+}
+
+/* --------------- TIMINGS --------------- */
+
+static inline unsigned fps(const struct v4l2_bt_timings *t)
+{
+	if (!V4L2_DV_BT_FRAME_HEIGHT(t) || !V4L2_DV_BT_FRAME_WIDTH(t))
+		return 0;
+
+	return DIV_ROUND_CLOSEST((unsigned)t->pixelclock,
+			V4L2_DV_BT_FRAME_HEIGHT(t) * V4L2_DV_BT_FRAME_WIDTH(t));
+}
+
+static int tc358743_get_detected_timings(struct v4l2_subdev *sd,
+				     struct v4l2_dv_timings *timings)
+{
+	struct v4l2_bt_timings *bt = &timings->bt;
+	unsigned width, height, frame_width, frame_height, frame_interval, fps;
+
+	memset(timings, 0, sizeof(struct v4l2_dv_timings));
+
+	if (no_signal(sd)) {
+		v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
+		return -ENOLINK;
+	}
+	if (no_sync(sd)) {
+		v4l2_dbg(1, debug, sd, "%s: no sync on signal\n", __func__);
+		return -ENOLCK;
+	}
+
+	timings->type = V4L2_DV_BT_656_1120;
+	bt->interlaced = i2c_rd8(sd, VI_STATUS1) & MASK_S_V_INTERLACE ?
+		V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+
+	width = ((i2c_rd8(sd, DE_WIDTH_H_HI) & 0x1f) << 8) +
+		i2c_rd8(sd, DE_WIDTH_H_LO);
+	height = ((i2c_rd8(sd, DE_WIDTH_V_HI) & 0x1f) << 8) +
+		i2c_rd8(sd, DE_WIDTH_V_LO);
+	frame_width = ((i2c_rd8(sd, H_SIZE_HI) & 0x1f) << 8) +
+		i2c_rd8(sd, H_SIZE_LO);
+	frame_height = (((i2c_rd8(sd, V_SIZE_HI) & 0x3f) << 8) +
+		i2c_rd8(sd, V_SIZE_LO)) / 2;
+	/* frame interval in milliseconds * 10
+	 * Require SYS_FREQ0 and SYS_FREQ1 are precisely set */
+	frame_interval = ((i2c_rd8(sd, FV_CNT_HI) & 0x3) << 8) +
+		i2c_rd8(sd, FV_CNT_LO);
+	fps = (frame_interval > 0) ?
+		DIV_ROUND_CLOSEST(10000, frame_interval) : 0;
+
+	bt->width = width;
+	bt->height = height;
+	bt->vsync = frame_height - height;
+	bt->hsync = frame_width - width;
+	bt->pixelclock = frame_width * frame_height * fps;
+	if (bt->interlaced == V4L2_DV_INTERLACED) {
+		bt->height *= 2;
+		bt->il_vsync = bt->vsync + 1;
+		bt->pixelclock /= 2;
+	}
+
+	return 0;
+}
+
+/* --------------- HOTPLUG / HDCP / EDID --------------- */
+
+static void tc358743_delayed_work_enable_hotplug(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct tc358743_state *state = container_of(dwork,
+			struct tc358743_state, delayed_work_enable_hotplug);
+	struct v4l2_subdev *sd = &state->sd;
+
+	v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+	i2c_wr8_and_or(sd, HPD_CTL, ~MASK_HPD_OUT0, MASK_HPD_OUT0);
+}
+
+static void tc358743_set_hdmi_hdcp(struct v4l2_subdev *sd, bool enable)
+{
+	v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ?
+				"enable" : "disable");
+
+	i2c_wr8_and_or(sd, HDCP_REG1,
+			~(MASK_AUTH_UNAUTH_SEL | MASK_AUTH_UNAUTH),
+			MASK_AUTH_UNAUTH_SEL_16_FRAMES | MASK_AUTH_UNAUTH_AUTO);
+
+	i2c_wr8_and_or(sd, HDCP_REG2, ~MASK_AUTO_P3_RESET,
+			SET_AUTO_P3_RESET_FRAMES(0x0f));
+
+	/* HDCP is disabled by configuring the receiver as HDCP repeater. The
+	 * repeater mode require software support to work, so HDCP
+	 * authentication will fail.
+	 */
+	i2c_wr8_and_or(sd, HDCP_REG3, ~KEY_RD_CMD, enable ? KEY_RD_CMD : 0);
+	i2c_wr8_and_or(sd, HDCP_MODE, ~(MASK_AUTO_CLR | MASK_MODE_RST_TN),
+			enable ?  (MASK_AUTO_CLR | MASK_MODE_RST_TN) : 0);
+
+	/* Apple MacBook Pro gen.8 has a bug that makes it freeze every fifth
+	 * second when HDCP is disabled, but the MAX_EXCED bit is handled
+	 * correctly and HDCP is disabled on the HDMI output.
+	 */
+	i2c_wr8_and_or(sd, BSTATUS1, ~MASK_MAX_EXCED,
+			enable ? 0 : MASK_MAX_EXCED);
+	i2c_wr8_and_or(sd, BCAPS, ~(MASK_REPEATER | MASK_READY),
+			enable ? 0 : MASK_REPEATER | MASK_READY);
+}
+
+static void tc358743_disable_edid(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+
+	/* DDC access to EDID is also disabled when hotplug is disabled. See
+	 * register DDC_CTL */
+	i2c_wr8_and_or(sd, HPD_CTL, ~MASK_HPD_OUT0, 0x0);
+}
+
+static void tc358743_enable_edid(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	if (state->edid_blocks_written == 0) {
+		v4l2_dbg(2, debug, sd, "%s: no EDID -> no hotplug\n", __func__);
+		return;
+	}
+
+	v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+	/* Enable hotplug after 100 ms. DDC access to EDID is also enabled when
+	 * hotplug is enabled. See register DDC_CTL */
+	queue_delayed_work(state->work_queues,
+			   &state->delayed_work_enable_hotplug, HZ / 10);
+
+	tc358743_enable_interrupts(sd, true);
+	tc358743_s_ctrl_detect_tx_5v(sd);
+}
+
+static void tc358743_erase_bksv(struct v4l2_subdev *sd)
+{
+	int i;
+
+	for (i = 0; i < 5; i++)
+		i2c_wr8(sd, BKSV + i, 0);
+}
+
+/* --------------- AVI infoframe --------------- */
+
+static void print_avi_infoframe(struct v4l2_subdev *sd)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct device *dev = &client->dev;
+	union hdmi_infoframe frame;
+	u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+
+	if (!is_hdmi(sd)) {
+		v4l2_info(sd, "DVI-D signal - AVI infoframe not supported\n");
+		return;
+	}
+
+	i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
+
+	if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+		v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
+		return;
+	}
+
+	hdmi_infoframe_log(KERN_INFO, dev, &frame);
+}
+
+/* --------------- CTRLS --------------- */
+
+static int tc358743_s_ctrl_detect_tx_5v(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
+			tx_5v_power_present(sd));
+}
+
+static int tc358743_s_ctrl_audio_sampling_rate(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	return v4l2_ctrl_s_ctrl(state->audio_sampling_rate_ctrl,
+			get_audio_sampling_rate(sd));
+}
+
+static int tc358743_s_ctrl_audio_present(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	return v4l2_ctrl_s_ctrl(state->audio_present_ctrl,
+			audio_present(sd));
+}
+
+static int tc358743_update_controls(struct v4l2_subdev *sd)
+{
+	int ret = 0;
+
+	ret |= tc358743_s_ctrl_detect_tx_5v(sd);
+	ret |= tc358743_s_ctrl_audio_sampling_rate(sd);
+	ret |= tc358743_s_ctrl_audio_present(sd);
+
+	return ret;
+}
+
+/* --------------- INIT --------------- */
+
+static void tc358743_reset_phy(struct v4l2_subdev *sd)
+{
+	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+	i2c_wr8_and_or(sd, PHY_RST, ~MASK_RESET_CTRL, 0);
+	i2c_wr8_and_or(sd, PHY_RST, ~MASK_RESET_CTRL, MASK_RESET_CTRL);
+}
+
+static void tc358743_reset(struct v4l2_subdev *sd, uint16_t mask)
+{
+	u16 sysctl = i2c_rd16(sd, SYSCTL);
+
+	i2c_wr16(sd, SYSCTL, sysctl | mask);
+	i2c_wr16(sd, SYSCTL, sysctl & ~mask);
+}
+
+static inline void tc358743_sleep_mode(struct v4l2_subdev *sd, bool enable)
+{
+	i2c_wr16_and_or(sd, SYSCTL, ~MASK_SLEEP,
+			enable ? MASK_SLEEP : 0);
+}
+
+static inline void enable_stream(struct v4l2_subdev *sd, bool enable)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	v4l2_dbg(3, debug, sd, "%s: %sable\n",
+			__func__, enable ? "en" : "dis");
+
+	if (enable) {
+		/* It is critical for CSI receiver to see lane transition
+		 * LP11->HS. Set to non-continuous mode to enable clock lane
+		 * LP11 state. */
+		i2c_wr32(sd, TXOPTIONCNTRL, 0);
+		/* Set to continuous mode to trigger LP11->HS transition */
+		i2c_wr32(sd, TXOPTIONCNTRL, MASK_CONTCLKMODE);
+		/* Unmute video */
+		i2c_wr8(sd, VI_MUTE, MASK_AUTO_MUTE);
+	} else {
+		/* Mute video so that all data lanes go to LSP11 state.
+		 * No data is output to CSI Tx block. */
+		i2c_wr8(sd, VI_MUTE, MASK_AUTO_MUTE | MASK_VI_MUTE);
+	}
+
+	mutex_lock(&state->confctl_mutex);
+	i2c_wr16_and_or(sd, CONFCTL, ~(MASK_VBUFEN | MASK_ABUFEN),
+			enable ? (MASK_VBUFEN | MASK_ABUFEN) : 0x0);
+	mutex_unlock(&state->confctl_mutex);
+}
+
+static void tc358743_set_pll(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct tc358743_platform_data *pdata = &state->pdata;
+	u16 pllctl0 = i2c_rd16(sd, PLLCTL0);
+	u16 pllctl1 = i2c_rd16(sd, PLLCTL1);
+	u16 pllctl0_new = SET_PLL_PRD(pdata->pll_prd) |
+		SET_PLL_FBD(pdata->pll_fbd);
+	u32 hsck = (pdata->refclk_hz / pdata->pll_prd) * pdata->pll_fbd;
+
+	v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+	/* Only rewrite when needed (new value or disabled), since rewriting
+	 * triggers another format change event. */
+	if ((pllctl0 != pllctl0_new) || ((pllctl1 & MASK_PLL_EN) == 0)) {
+		u16 pll_frs;
+
+		if (hsck > 500000000)
+			pll_frs = 0x0;
+		else if (hsck > 250000000)
+			pll_frs = 0x1;
+		else if (hsck > 125000000)
+			pll_frs = 0x2;
+		else
+			pll_frs = 0x3;
+
+		v4l2_dbg(1, debug, sd, "%s: updating PLL clock\n", __func__);
+		tc358743_sleep_mode(sd, true);
+		i2c_wr16(sd, PLLCTL0, pllctl0_new);
+		i2c_wr16_and_or(sd, PLLCTL1,
+				~(MASK_PLL_FRS | MASK_RESETB | MASK_PLL_EN),
+				(SET_PLL_FRS(pll_frs) | MASK_RESETB |
+				 MASK_PLL_EN));
+		udelay(10); /* REF_02, Sheet "Source HDMI" */
+		i2c_wr16_and_or(sd, PLLCTL1, ~MASK_CKEN, MASK_CKEN);
+		tc358743_sleep_mode(sd, false);
+	}
+}
+
+static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct tc358743_platform_data *pdata = &state->pdata;
+	u32 sys_freq;
+	u32 lockdet_ref;
+	u16 fh_min;
+	u16 fh_max;
+
+	BUG_ON(!(pdata->refclk_hz == 26000000 ||
+		 pdata->refclk_hz == 27000000 ||
+		 pdata->refclk_hz == 42000000));
+
+	sys_freq = pdata->refclk_hz / 10000;
+	i2c_wr8(sd, SYS_FREQ0, sys_freq & 0x00ff);
+	i2c_wr8(sd, SYS_FREQ1, (sys_freq & 0xff00) >> 8);
+
+	i2c_wr8_and_or(sd, PHY_CTL0, ~MASK_PHY_SYSCLK_IND,
+			(pdata->refclk_hz == 42000000) ?
+			MASK_PHY_SYSCLK_IND : 0x0);
+
+	fh_min = pdata->refclk_hz / 100000;
+	i2c_wr8(sd, FH_MIN0, fh_min & 0x00ff);
+	i2c_wr8(sd, FH_MIN1, (fh_min & 0xff00) >> 8);
+
+	fh_max = (fh_min * 66) / 10;
+	i2c_wr8(sd, FH_MAX0, fh_max & 0x00ff);
+	i2c_wr8(sd, FH_MAX1, (fh_max & 0xff00) >> 8);
+
+	lockdet_ref = pdata->refclk_hz / 100;
+	i2c_wr8(sd, LOCKDET_REF0, lockdet_ref & 0x0000ff);
+	i2c_wr8(sd, LOCKDET_REF1, (lockdet_ref & 0x00ff00) >> 8);
+	i2c_wr8(sd, LOCKDET_REF2, (lockdet_ref & 0x0f0000) >> 16);
+
+	i2c_wr8_and_or(sd, NCO_F0_MOD, ~MASK_NCO_F0_MOD,
+			(pdata->refclk_hz == 27000000) ?
+			MASK_NCO_F0_MOD_27MHZ : 0x0);
+}
+
+static void tc358743_set_csi_color_space(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	switch (state->mbus_fmt_code) {
+	case MEDIA_BUS_FMT_UYVY8_1X16:
+		v4l2_dbg(2, debug, sd, "%s: YCbCr 422 16-bit\n", __func__);
+		i2c_wr8_and_or(sd, VOUT_SET2,
+				~(MASK_SEL422 | MASK_VOUT_422FIL_100) & 0xff,
+				MASK_SEL422 | MASK_VOUT_422FIL_100);
+		i2c_wr8_and_or(sd, VI_REP, ~MASK_VOUT_COLOR_SEL & 0xff,
+				MASK_VOUT_COLOR_601_YCBCR_LIMITED);
+		mutex_lock(&state->confctl_mutex);
+		i2c_wr16_and_or(sd, CONFCTL, ~MASK_YCBCRFMT,
+				MASK_YCBCRFMT_422_8_BIT);
+		mutex_unlock(&state->confctl_mutex);
+		break;
+	case MEDIA_BUS_FMT_RGB888_1X24:
+		v4l2_dbg(2, debug, sd, "%s: RGB 888 24-bit\n", __func__);
+		i2c_wr8_and_or(sd, VOUT_SET2,
+				~(MASK_SEL422 | MASK_VOUT_422FIL_100) & 0xff,
+				0x00);
+		i2c_wr8_and_or(sd, VI_REP, ~MASK_VOUT_COLOR_SEL & 0xff,
+				MASK_VOUT_COLOR_RGB_FULL);
+		mutex_lock(&state->confctl_mutex);
+		i2c_wr16_and_or(sd, CONFCTL, ~MASK_YCBCRFMT, 0);
+		mutex_unlock(&state->confctl_mutex);
+		break;
+	default:
+		v4l2_dbg(2, debug, sd, "%s: Unsupported format code 0x%x\n",
+				__func__, state->mbus_fmt_code);
+	}
+}
+
+static unsigned tc358743_num_csi_lanes_needed(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct v4l2_bt_timings *bt = &state->timings.bt;
+	struct tc358743_platform_data *pdata = &state->pdata;
+	u32 bits_pr_pixel =
+		(state->mbus_fmt_code == MEDIA_BUS_FMT_UYVY8_1X16) ?  16 : 24;
+	u32 bps = bt->width * bt->height * fps(bt) * bits_pr_pixel;
+	u32 bps_pr_lane = (pdata->refclk_hz / pdata->pll_prd) * pdata->pll_fbd;
+
+	return DIV_ROUND_UP(bps, bps_pr_lane);
+}
+
+static void tc358743_set_csi(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct tc358743_platform_data *pdata = &state->pdata;
+	unsigned lanes = tc358743_num_csi_lanes_needed(sd);
+
+	v4l2_dbg(3, debug, sd, "%s:\n", __func__);
+
+	tc358743_reset(sd, MASK_CTXRST);
+
+	if (lanes < 1)
+		i2c_wr32(sd, CLW_CNTRL, MASK_CLW_LANEDISABLE);
+	if (lanes < 1)
+		i2c_wr32(sd, D0W_CNTRL, MASK_D0W_LANEDISABLE);
+	if (lanes < 2)
+		i2c_wr32(sd, D1W_CNTRL, MASK_D1W_LANEDISABLE);
+	if (lanes < 3)
+		i2c_wr32(sd, D2W_CNTRL, MASK_D2W_LANEDISABLE);
+	if (lanes < 4)
+		i2c_wr32(sd, D3W_CNTRL, MASK_D3W_LANEDISABLE);
+
+	i2c_wr32(sd, LINEINITCNT, pdata->lineinitcnt);
+	i2c_wr32(sd, LPTXTIMECNT, pdata->lptxtimecnt);
+	i2c_wr32(sd, TCLK_HEADERCNT, pdata->tclk_headercnt);
+	i2c_wr32(sd, TCLK_TRAILCNT, pdata->tclk_trailcnt);
+	i2c_wr32(sd, THS_HEADERCNT, pdata->ths_headercnt);
+	i2c_wr32(sd, TWAKEUP, pdata->twakeup);
+	i2c_wr32(sd, TCLK_POSTCNT, pdata->tclk_postcnt);
+	i2c_wr32(sd, THS_TRAILCNT, pdata->ths_trailcnt);
+	i2c_wr32(sd, HSTXVREGCNT, pdata->hstxvregcnt);
+
+	i2c_wr32(sd, HSTXVREGEN,
+			((lanes > 0) ? MASK_CLM_HSTXVREGEN : 0x0) |
+			((lanes > 0) ? MASK_D0M_HSTXVREGEN : 0x0) |
+			((lanes > 1) ? MASK_D1M_HSTXVREGEN : 0x0) |
+			((lanes > 2) ? MASK_D2M_HSTXVREGEN : 0x0) |
+			((lanes > 3) ? MASK_D3M_HSTXVREGEN : 0x0));
+
+	i2c_wr32(sd, TXOPTIONCNTRL, (state->bus.flags &
+		 V4L2_MBUS_CSI2_CONTINUOUS_CLOCK) ? MASK_CONTCLKMODE : 0);
+	i2c_wr32(sd, STARTCNTRL, MASK_START);
+	i2c_wr32(sd, CSI_START, MASK_STRT);
+
+	i2c_wr32(sd, CSI_CONFW, MASK_MODE_SET |
+			MASK_ADDRESS_CSI_CONTROL |
+			MASK_CSI_MODE |
+			MASK_TXHSMD |
+			((lanes == 4) ? MASK_NOL_4 :
+			 (lanes == 3) ? MASK_NOL_3 :
+			 (lanes == 2) ? MASK_NOL_2 : MASK_NOL_1));
+
+	i2c_wr32(sd, CSI_CONFW, MASK_MODE_SET |
+			MASK_ADDRESS_CSI_ERR_INTENA | MASK_TXBRK | MASK_QUNK |
+			MASK_WCER | MASK_INER);
+
+	i2c_wr32(sd, CSI_CONFW, MASK_MODE_CLEAR |
+			MASK_ADDRESS_CSI_ERR_HALT | MASK_TXBRK | MASK_QUNK);
+
+	i2c_wr32(sd, CSI_CONFW, MASK_MODE_SET |
+			MASK_ADDRESS_CSI_INT_ENA | MASK_INTER);
+}
+
+static void tc358743_set_hdmi_phy(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct tc358743_platform_data *pdata = &state->pdata;
+
+	/* Default settings from REF_02, sheet "Source HDMI"
+	 * and custom settings as platform data */
+	i2c_wr8_and_or(sd, PHY_EN, ~MASK_ENABLE_PHY, 0x0);
+	i2c_wr8(sd, PHY_CTL1, SET_PHY_AUTO_RST1_US(1600) |
+			SET_FREQ_RANGE_MODE_CYCLES(1));
+	i2c_wr8_and_or(sd, PHY_CTL2, ~MASK_PHY_AUTO_RSTn,
+			(pdata->hdmi_phy_auto_reset_tmds_detected ?
+			 MASK_PHY_AUTO_RST2 : 0) |
+			(pdata->hdmi_phy_auto_reset_tmds_in_range ?
+			 MASK_PHY_AUTO_RST3 : 0) |
+			(pdata->hdmi_phy_auto_reset_tmds_valid ?
+			 MASK_PHY_AUTO_RST4 : 0));
+	i2c_wr8(sd, PHY_BIAS, 0x40);
+	i2c_wr8(sd, PHY_CSQ, SET_CSQ_CNT_LEVEL(0x0a));
+	i2c_wr8(sd, AVM_CTL, 45);
+	i2c_wr8_and_or(sd, HDMI_DET, ~MASK_HDMI_DET_V,
+			pdata->hdmi_detection_delay << 4);
+	i2c_wr8_and_or(sd, HV_RST, ~(MASK_H_PI_RST | MASK_V_PI_RST),
+			(pdata->hdmi_phy_auto_reset_hsync_out_of_range ?
+			 MASK_H_PI_RST : 0) |
+			(pdata->hdmi_phy_auto_reset_vsync_out_of_range ?
+			 MASK_V_PI_RST : 0));
+	i2c_wr8_and_or(sd, PHY_EN, ~MASK_ENABLE_PHY, MASK_ENABLE_PHY);
+}
+
+static void tc358743_set_hdmi_audio(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	/* Default settings from REF_02, sheet "Source HDMI" */
+	i2c_wr8(sd, FORCE_MUTE, 0x00);
+	i2c_wr8(sd, AUTO_CMD0, MASK_AUTO_MUTE7 | MASK_AUTO_MUTE6 |
+			MASK_AUTO_MUTE5 | MASK_AUTO_MUTE4 |
+			MASK_AUTO_MUTE1 | MASK_AUTO_MUTE0);
+	i2c_wr8(sd, AUTO_CMD1, MASK_AUTO_MUTE9);
+	i2c_wr8(sd, AUTO_CMD2, MASK_AUTO_PLAY3 | MASK_AUTO_PLAY2);
+	i2c_wr8(sd, BUFINIT_START, SET_BUFINIT_START_MS(500));
+	i2c_wr8(sd, FS_MUTE, 0x00);
+	i2c_wr8(sd, FS_IMODE, MASK_NLPCM_SMODE | MASK_FS_SMODE);
+	i2c_wr8(sd, ACR_MODE, MASK_CTS_MODE);
+	i2c_wr8(sd, ACR_MDF0, MASK_ACR_L2MDF_1976_PPM | MASK_ACR_L1MDF_976_PPM);
+	i2c_wr8(sd, ACR_MDF1, MASK_ACR_L3MDF_3906_PPM);
+	i2c_wr8(sd, SDO_MODE1, MASK_SDO_FMT_I2S);
+	i2c_wr8(sd, DIV_MODE, SET_DIV_DLY_MS(100));
+
+	mutex_lock(&state->confctl_mutex);
+	i2c_wr16_and_or(sd, CONFCTL, 0xffff, MASK_AUDCHNUM_2 |
+			MASK_AUDOUTSEL_I2S | MASK_AUTOINDEX);
+	mutex_unlock(&state->confctl_mutex);
+}
+
+static void tc358743_set_hdmi_info_frame_mode(struct v4l2_subdev *sd)
+{
+	/* Default settings from REF_02, sheet "Source HDMI" */
+	i2c_wr8(sd, PK_INT_MODE, MASK_ISRC2_INT_MODE | MASK_ISRC_INT_MODE |
+			MASK_ACP_INT_MODE | MASK_VS_INT_MODE |
+			MASK_SPD_INT_MODE | MASK_MS_INT_MODE |
+			MASK_AUD_INT_MODE | MASK_AVI_INT_MODE);
+	i2c_wr8(sd, NO_PKT_LIMIT, 0x2c);
+	i2c_wr8(sd, NO_PKT_CLR, 0x53);
+	i2c_wr8(sd, ERR_PK_LIMIT, 0x01);
+	i2c_wr8(sd, NO_PKT_LIMIT2, 0x30);
+	i2c_wr8(sd, NO_GDB_LIMIT, 0x10);
+}
+
+static void tc358743_initial_setup(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct tc358743_platform_data *pdata = &state->pdata;
+
+	/* CEC and IR are not supported by this driver */
+	i2c_wr16_and_or(sd, SYSCTL, ~(MASK_CECRST | MASK_IRRST),
+			(MASK_CECRST | MASK_IRRST));
+
+	tc358743_reset(sd, MASK_CTXRST | MASK_HDMIRST);
+	tc358743_sleep_mode(sd, false);
+
+	i2c_wr16(sd, FIFOCTL, pdata->fifo_level);
+
+	tc358743_set_ref_clk(sd);
+
+	i2c_wr8_and_or(sd, DDC_CTL, ~MASK_DDC5V_MODE,
+			pdata->ddc5v_delay & MASK_DDC5V_MODE);
+	i2c_wr8_and_or(sd, EDID_MODE, ~MASK_EDID_MODE, MASK_EDID_MODE_E_DDC);
+
+	tc358743_set_hdmi_phy(sd);
+	tc358743_set_hdmi_hdcp(sd, pdata->enable_hdcp);
+	tc358743_set_hdmi_audio(sd);
+	tc358743_set_hdmi_info_frame_mode(sd);
+
+	/* All CE and IT formats are detected as RGB full range in DVI mode */
+	i2c_wr8_and_or(sd, VI_MODE, ~MASK_RGB_DVI, 0);
+
+	i2c_wr8_and_or(sd, VOUT_SET2, ~MASK_VOUTCOLORMODE,
+			MASK_VOUTCOLORMODE_AUTO);
+	i2c_wr8(sd, VOUT_SET3, MASK_VOUT_EXTCNT);
+}
+
+/* --------------- IRQ --------------- */
+
+static void tc358743_format_change(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct v4l2_dv_timings timings;
+	const struct v4l2_event tc358743_ev_fmt = {
+		.type = V4L2_EVENT_SOURCE_CHANGE,
+		.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+	};
+
+	if (tc358743_get_detected_timings(sd, &timings)) {
+		enable_stream(sd, false);
+
+		v4l2_dbg(1, debug, sd, "%s: Format changed. No signal\n",
+				__func__);
+	} else {
+		if (!v4l2_match_dv_timings(&state->timings, &timings, 0))
+			enable_stream(sd, false);
+
+		v4l2_print_dv_timings(sd->name,
+				"tc358743_format_change: Format changed. New format: ",
+				&timings, false);
+	}
+
+	if (sd->devnode)
+		v4l2_subdev_notify_event(sd, &tc358743_ev_fmt);
+}
+
+static void tc358743_init_interrupts(struct v4l2_subdev *sd)
+{
+	u16 i;
+
+	/* clear interrupt status registers */
+	for (i = SYS_INT; i <= KEY_INT; i++)
+		i2c_wr8(sd, i, 0xff);
+
+	i2c_wr16(sd, INTSTATUS, 0xffff);
+}
+
+static void tc358743_enable_interrupts(struct v4l2_subdev *sd,
+		bool cable_connected)
+{
+	v4l2_dbg(2, debug, sd, "%s: cable connected = %d\n", __func__,
+			cable_connected);
+
+	if (cable_connected) {
+		i2c_wr8(sd, SYS_INTM, ~(MASK_M_DDC | MASK_M_DVI_DET |
+					MASK_M_HDMI_DET) & 0xff);
+		i2c_wr8(sd, CLK_INTM, ~MASK_M_IN_DE_CHG);
+		i2c_wr8(sd, CBIT_INTM, ~(MASK_M_CBIT_FS | MASK_M_AF_LOCK |
+					MASK_M_AF_UNLOCK) & 0xff);
+		i2c_wr8(sd, AUDIO_INTM, ~MASK_M_BUFINIT_END);
+		i2c_wr8(sd, MISC_INTM, ~MASK_M_SYNC_CHG);
+	} else {
+		i2c_wr8(sd, SYS_INTM, ~MASK_M_DDC & 0xff);
+		i2c_wr8(sd, CLK_INTM, 0xff);
+		i2c_wr8(sd, CBIT_INTM, 0xff);
+		i2c_wr8(sd, AUDIO_INTM, 0xff);
+		i2c_wr8(sd, MISC_INTM, 0xff);
+	}
+}
+
+static void tc358743_hdmi_audio_int_handler(struct v4l2_subdev *sd,
+		bool *handled)
+{
+	u8 audio_int_mask = i2c_rd8(sd, AUDIO_INTM);
+	u8 audio_int = i2c_rd8(sd, AUDIO_INT) & ~audio_int_mask;
+
+	i2c_wr8(sd, AUDIO_INT, audio_int);
+
+	v4l2_dbg(3, debug, sd, "%s: AUDIO_INT = 0x%02x\n", __func__, audio_int);
+
+	tc358743_s_ctrl_audio_sampling_rate(sd);
+	tc358743_s_ctrl_audio_present(sd);
+}
+
+static void tc358743_csi_err_int_handler(struct v4l2_subdev *sd, bool *handled)
+{
+	v4l2_err(sd, "%s: CSI_ERR = 0x%x\n", __func__, i2c_rd32(sd, CSI_ERR));
+
+	i2c_wr32(sd, CSI_INT_CLR, MASK_ICRER);
+}
+
+static void tc358743_hdmi_misc_int_handler(struct v4l2_subdev *sd,
+		bool *handled)
+{
+	u8 misc_int_mask = i2c_rd8(sd, MISC_INTM);
+	u8 misc_int = i2c_rd8(sd, MISC_INT) & ~misc_int_mask;
+
+	i2c_wr8(sd, MISC_INT, misc_int);
+
+	v4l2_dbg(3, debug, sd, "%s: MISC_INT = 0x%02x\n", __func__, misc_int);
+
+	if (misc_int & MASK_I_SYNC_CHG) {
+		/* Reset the HDMI PHY to try to trigger proper lock on the
+		 * incoming video format. Erase BKSV to prevent that old keys
+		 * are used when a new source is connected. */
+		if (no_sync(sd) || no_signal(sd)) {
+			tc358743_reset_phy(sd);
+			tc358743_erase_bksv(sd);
+		}
+
+		tc358743_format_change(sd);
+
+		misc_int &= ~MASK_I_SYNC_CHG;
+		if (handled)
+			*handled = true;
+	}
+
+	if (misc_int) {
+		v4l2_err(sd, "%s: Unhandled MISC_INT interrupts: 0x%02x\n",
+				__func__, misc_int);
+	}
+}
+
+static void tc358743_hdmi_cbit_int_handler(struct v4l2_subdev *sd,
+		bool *handled)
+{
+	u8 cbit_int_mask = i2c_rd8(sd, CBIT_INTM);
+	u8 cbit_int = i2c_rd8(sd, CBIT_INT) & ~cbit_int_mask;
+
+	i2c_wr8(sd, CBIT_INT, cbit_int);
+
+	v4l2_dbg(3, debug, sd, "%s: CBIT_INT = 0x%02x\n", __func__, cbit_int);
+
+	if (cbit_int & MASK_I_CBIT_FS) {
+
+		v4l2_dbg(1, debug, sd, "%s: Audio sample rate changed\n",
+				__func__);
+		tc358743_s_ctrl_audio_sampling_rate(sd);
+
+		cbit_int &= ~MASK_I_CBIT_FS;
+		if (handled)
+			*handled = true;
+	}
+
+	if (cbit_int & (MASK_I_AF_LOCK | MASK_I_AF_UNLOCK)) {
+
+		v4l2_dbg(1, debug, sd, "%s: Audio present changed\n",
+				__func__);
+		tc358743_s_ctrl_audio_present(sd);
+
+		cbit_int &= ~(MASK_I_AF_LOCK | MASK_I_AF_UNLOCK);
+		if (handled)
+			*handled = true;
+	}
+
+	if (cbit_int) {
+		v4l2_err(sd, "%s: Unhandled CBIT_INT interrupts: 0x%02x\n",
+				__func__, cbit_int);
+	}
+}
+
+static void tc358743_hdmi_clk_int_handler(struct v4l2_subdev *sd, bool *handled)
+{
+	u8 clk_int_mask = i2c_rd8(sd, CLK_INTM);
+	u8 clk_int = i2c_rd8(sd, CLK_INT) & ~clk_int_mask;
+
+	/* Bit 7 and bit 6 are set even when they are masked */
+	i2c_wr8(sd, CLK_INT, clk_int | 0x80 | MASK_I_OUT_H_CHG);
+
+	v4l2_dbg(3, debug, sd, "%s: CLK_INT = 0x%02x\n", __func__, clk_int);
+
+	if (clk_int & (MASK_I_IN_DE_CHG)) {
+
+		v4l2_dbg(1, debug, sd, "%s: DE size or position has changed\n",
+				__func__);
+
+		/* If the source switch to a new resolution with the same pixel
+		 * frequency as the existing (e.g. 1080p25 -> 720p50), the
+		 * I_SYNC_CHG interrupt is not always triggered, while the
+		 * I_IN_DE_CHG interrupt seems to work fine. Format change
+		 * notifications are only sent when the signal is stable to
+		 * reduce the number of notifications. */
+		if (!no_signal(sd) && !no_sync(sd))
+			tc358743_format_change(sd);
+
+		clk_int &= ~(MASK_I_IN_DE_CHG);
+		if (handled)
+			*handled = true;
+	}
+
+	if (clk_int) {
+		v4l2_err(sd, "%s: Unhandled CLK_INT interrupts: 0x%02x\n",
+				__func__, clk_int);
+	}
+}
+
+static void tc358743_hdmi_sys_int_handler(struct v4l2_subdev *sd, bool *handled)
+{
+	struct tc358743_state *state = to_state(sd);
+	u8 sys_int_mask = i2c_rd8(sd, SYS_INTM);
+	u8 sys_int = i2c_rd8(sd, SYS_INT) & ~sys_int_mask;
+
+	i2c_wr8(sd, SYS_INT, sys_int);
+
+	v4l2_dbg(3, debug, sd, "%s: SYS_INT = 0x%02x\n", __func__, sys_int);
+
+	if (sys_int & MASK_I_DDC) {
+		bool tx_5v = tx_5v_power_present(sd);
+
+		v4l2_dbg(1, debug, sd, "%s: Tx 5V power present: %s\n",
+				__func__, tx_5v ?  "yes" : "no");
+
+		if (tx_5v) {
+			tc358743_enable_edid(sd);
+		} else {
+			tc358743_enable_interrupts(sd, false);
+			tc358743_disable_edid(sd);
+			memset(&state->timings, 0, sizeof(state->timings));
+			tc358743_erase_bksv(sd);
+			tc358743_update_controls(sd);
+		}
+
+		sys_int &= ~MASK_I_DDC;
+		if (handled)
+			*handled = true;
+	}
+
+	if (sys_int & MASK_I_DVI) {
+		v4l2_dbg(1, debug, sd, "%s: HDMI->DVI change detected\n",
+				__func__);
+
+		/* Reset the HDMI PHY to try to trigger proper lock on the
+		 * incoming video format. Erase BKSV to prevent that old keys
+		 * are used when a new source is connected. */
+		if (no_sync(sd) || no_signal(sd)) {
+			tc358743_reset_phy(sd);
+			tc358743_erase_bksv(sd);
+		}
+
+		sys_int &= ~MASK_I_DVI;
+		if (handled)
+			*handled = true;
+	}
+
+	if (sys_int & MASK_I_HDMI) {
+		v4l2_dbg(1, debug, sd, "%s: DVI->HDMI change detected\n",
+				__func__);
+
+		/* Register is reset in DVI mode (REF_01, c. 6.6.41) */
+		i2c_wr8(sd, ANA_CTL, MASK_APPL_PCSX_NORMAL | MASK_ANALOG_ON);
+
+		sys_int &= ~MASK_I_HDMI;
+		if (handled)
+			*handled = true;
+	}
+
+	if (sys_int) {
+		v4l2_err(sd, "%s: Unhandled SYS_INT interrupts: 0x%02x\n",
+				__func__, sys_int);
+	}
+}
+
+/* --------------- CORE OPS --------------- */
+
+static int tc358743_log_status(struct v4l2_subdev *sd)
+{
+	struct tc358743_state *state = to_state(sd);
+	struct v4l2_dv_timings timings;
+	uint8_t hdmi_sys_status =  i2c_rd8(sd, SYS_STATUS);
+	uint16_t sysctl = i2c_rd16(sd, SYSCTL);
+	u8 vi_status3 =  i2c_rd8(sd, VI_STATUS3);
+	const int deep_color_mode[4] = { 8, 10, 12, 16 };
+	static const char * const input_color_space[] = {
+		"RGB", "YCbCr 601", "Adobe RGB", "YCbCr 709", "NA (4)",
+		"xvYCC 601", "NA(6)", "xvYCC 709", "NA(8)", "sYCC601",
+		"NA(10)", "NA(11)", "NA(12)", "Adobe YCC 601"};
+
+	v4l2_info(sd, "-----Chip status-----\n");
+	v4l2_info(sd, "Chip ID: 0x%02x\n",
+			(i2c_rd16(sd, CHIPID) & MASK_CHIPID) >> 8);
+	v4l2_info(sd, "Chip revision: 0x%02x\n",
+			i2c_rd16(sd, CHIPID) & MASK_REVID);
+	v4l2_info(sd, "Reset: IR: %d, CEC: %d, CSI TX: %d, HDMI: %d\n",
+			!!(sysctl & MASK_IRRST),
+			!!(sysctl & MASK_CECRST),
+			!!(sysctl & MASK_CTXRST),
+			!!(sysctl & MASK_HDMIRST));
+	v4l2_info(sd, "Sleep mode: %s\n", sysctl & MASK_SLEEP ? "on" : "off");
+	v4l2_info(sd, "Cable detected (+5V power): %s\n",
+			hdmi_sys_status & MASK_S_DDC5V ? "yes" : "no");
+	v4l2_info(sd, "DDC lines enabled: %s\n",
+			(i2c_rd8(sd, EDID_MODE) & MASK_EDID_MODE_E_DDC) ?
+			"yes" : "no");
+	v4l2_info(sd, "Hotplug enabled: %s\n",
+			(i2c_rd8(sd, HPD_CTL) & MASK_HPD_OUT0) ?
+			"yes" : "no");
+	v4l2_info(sd, "CEC enabled: %s\n",
+			(i2c_rd16(sd, CECEN) & MASK_CECEN) ?  "yes" : "no");
+	v4l2_info(sd, "-----Signal status-----\n");
+	v4l2_info(sd, "TMDS signal detected: %s\n",
+			hdmi_sys_status & MASK_S_TMDS ? "yes" : "no");
+	v4l2_info(sd, "Stable sync signal: %s\n",
+			hdmi_sys_status & MASK_S_SYNC ? "yes" : "no");
+	v4l2_info(sd, "PHY PLL locked: %s\n",
+			hdmi_sys_status & MASK_S_PHY_PLL ? "yes" : "no");
+	v4l2_info(sd, "PHY DE detected: %s\n",
+			hdmi_sys_status & MASK_S_PHY_SCDT ? "yes" : "no");
+
+	if (tc358743_get_detected_timings(sd, &timings)) {
+		v4l2_info(sd, "No video detected\n");
+	} else {
+		v4l2_print_dv_timings(sd->name, "Detected format: ", &timings,
+				true);
+	}
+	v4l2_print_dv_timings(sd->name, "Configured format: ", &state->timings,
+			true);
+
+	v4l2_info(sd, "-----CSI-TX status-----\n");
+	v4l2_info(sd, "Lanes needed: %d\n",
+			tc358743_num_csi_lanes_needed(sd));
+	v4l2_info(sd, "Lanes in use: %d\n",
+			tc358743_num_csi_lanes_in_use(sd));
+	v4l2_info(sd, "Waiting for particular sync signal: %s\n",
+			(i2c_rd16(sd, CSI_STATUS) & MASK_S_WSYNC) ?
+			"yes" : "no");
+	v4l2_info(sd, "Transmit mode: %s\n",
+			(i2c_rd16(sd, CSI_STATUS) & MASK_S_TXACT) ?
+			"yes" : "no");
+	v4l2_info(sd, "Receive mode: %s\n",
+			(i2c_rd16(sd, CSI_STATUS) & MASK_S_RXACT) ?
+			"yes" : "no");
+	v4l2_info(sd, "Stopped: %s\n",
+			(i2c_rd16(sd, CSI_STATUS) & MASK_S_HLT) ?
+			"yes" : "no");
+	v4l2_info(sd, "Color space: %s\n",
+			state->mbus_fmt_code == MEDIA_BUS_FMT_UYVY8_1X16 ?
+			"YCbCr 422 16-bit" :
+			state->mbus_fmt_code == MEDIA_BUS_FMT_RGB888_1X24 ?
+			"RGB 888 24-bit" : "Unsupported");
+
+	v4l2_info(sd, "-----%s status-----\n", is_hdmi(sd) ? "HDMI" : "DVI-D");
+	v4l2_info(sd, "HDCP encrypted content: %s\n",
+			hdmi_sys_status & MASK_S_HDCP ? "yes" : "no");
+	v4l2_info(sd, "Input color space: %s %s range\n",
+			input_color_space[(vi_status3 & MASK_S_V_COLOR) >> 1],
+			(vi_status3 & MASK_LIMITED) ? "limited" : "full");
+	if (!is_hdmi(sd))
+		return 0;
+	v4l2_info(sd, "AV Mute: %s\n", hdmi_sys_status & MASK_S_AVMUTE ? "on" :
+			"off");
+	v4l2_info(sd, "Deep color mode: %d-bits per channel\n",
+			deep_color_mode[(i2c_rd8(sd, VI_STATUS1) &
+				MASK_S_DEEPCOLOR) >> 2]);
+	print_avi_infoframe(sd);
+
+	return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static void tc358743_print_register_map(struct v4l2_subdev *sd)
+{
+	v4l2_info(sd, "0x0000–0x00FF: Global Control Register\n");
+	v4l2_info(sd, "0x0100–0x01FF: CSI2-TX PHY Register\n");
+	v4l2_info(sd, "0x0200–0x03FF: CSI2-TX PPI Register\n");
+	v4l2_info(sd, "0x0400–0x05FF: Reserved\n");
+	v4l2_info(sd, "0x0600–0x06FF: CEC Register\n");
+	v4l2_info(sd, "0x0700–0x84FF: Reserved\n");
+	v4l2_info(sd, "0x8500–0x85FF: HDMIRX System Control Register\n");
+	v4l2_info(sd, "0x8600–0x86FF: HDMIRX Audio Control Register\n");
+	v4l2_info(sd, "0x8700–0x87FF: HDMIRX InfoFrame packet data Register\n");
+	v4l2_info(sd, "0x8800–0x88FF: HDMIRX HDCP Port Register\n");
+	v4l2_info(sd, "0x8900–0x89FF: HDMIRX Video Output Port & 3D Register\n");
+	v4l2_info(sd, "0x8A00–0x8BFF: Reserved\n");
+	v4l2_info(sd, "0x8C00–0x8FFF: HDMIRX EDID-RAM (1024bytes)\n");
+	v4l2_info(sd, "0x9000–0x90FF: HDMIRX GBD Extraction Control\n");
+	v4l2_info(sd, "0x9100–0x92FF: HDMIRX GBD RAM read\n");
+	v4l2_info(sd, "0x9300-      : Reserved\n");
+}
+
+static int tc358743_get_reg_size(u16 address)
+{
+	/* REF_01 p. 66-72 */
+	if (address <= 0x00ff)
+		return 2;
+	else if ((address >= 0x0100) && (address <= 0x06FF))
+		return 4;
+	else if ((address >= 0x0700) && (address <= 0x84ff))
+		return 2;
+	else
+		return 1;
+}
+
+static int tc358743_g_register(struct v4l2_subdev *sd,
+			       struct v4l2_dbg_register *reg)
+{
+	if (reg->reg > 0xffff) {
+		tc358743_print_register_map(sd);
+		return -EINVAL;
+	}
+
+	reg->size = tc358743_get_reg_size(reg->reg);
+
+	i2c_rd(sd, reg->reg, (u8 *)&reg->val, reg->size);
+
+	return 0;
+}
+
+static int tc358743_s_register(struct v4l2_subdev *sd,
+			       const struct v4l2_dbg_register *reg)
+{
+	if (reg->reg > 0xffff) {
+		tc358743_print_register_map(sd);
+		return -EINVAL;
+	}
+
+	/* It should not be possible for the user to enable HDCP with a simple
+	 * v4l2-dbg command.
+	 *
+	 * DO NOT REMOVE THIS unless all other issues with HDCP have been
+	 * resolved.
+	 */
+	if (reg->reg == HDCP_MODE ||
+	    reg->reg == HDCP_REG1 ||
+	    reg->reg == HDCP_REG2 ||
+	    reg->reg == HDCP_REG3 ||
+	    reg->reg == BCAPS)
+		return 0;
+
+	i2c_wr(sd, (u16)reg->reg, (u8 *)&reg->val,
+			tc358743_get_reg_size(reg->reg));
+
+	return 0;
+}
+#endif
+
+static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+	u16 intstatus = i2c_rd16(sd, INTSTATUS);
+
+	v4l2_dbg(1, debug, sd, "%s: IntStatus = 0x%04x\n", __func__, intstatus);
+
+	if (intstatus & MASK_HDMI_INT) {
+		u8 hdmi_int0 = i2c_rd8(sd, HDMI_INT0);
+		u8 hdmi_int1 = i2c_rd8(sd, HDMI_INT1);
+
+		if (hdmi_int0 & MASK_I_MISC)
+			tc358743_hdmi_misc_int_handler(sd, handled);
+		if (hdmi_int1 & MASK_I_CBIT)
+			tc358743_hdmi_cbit_int_handler(sd, handled);
+		if (hdmi_int1 & MASK_I_CLK)
+			tc358743_hdmi_clk_int_handler(sd, handled);
+		if (hdmi_int1 & MASK_I_SYS)
+			tc358743_hdmi_sys_int_handler(sd, handled);
+		if (hdmi_int1 & MASK_I_AUD)
+			tc358743_hdmi_audio_int_handler(sd, handled);
+
+		i2c_wr16(sd, INTSTATUS, MASK_HDMI_INT);
+		intstatus &= ~MASK_HDMI_INT;
+	}
+
+	if (intstatus & MASK_CSI_INT) {
+		u32 csi_int = i2c_rd32(sd, CSI_INT);
+
+		if (csi_int & MASK_INTER)
+			tc358743_csi_err_int_handler(sd, handled);
+
+		i2c_wr16(sd, INTSTATUS, MASK_CSI_INT);
+		intstatus &= ~MASK_CSI_INT;
+	}
+
+	intstatus = i2c_rd16(sd, INTSTATUS);
+	if (intstatus) {
+		v4l2_dbg(1, debug, sd,
+				"%s: Unhandled IntStatus interrupts: 0x%02x\n",
+				__func__, intstatus);
+	}
+
+	return 0;
+}
+
+static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
+{
+	struct tc358743_state *state = dev_id;
+	bool handled;
+
+	tc358743_isr(&state->sd, 0, &handled);
+
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tc358743_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+				    struct v4l2_event_subscription *sub)
+{
+	switch (sub->type) {
+	case V4L2_EVENT_SOURCE_CHANGE:
+		return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+	default:
+		return -EINVAL;
+	}
+}
+
+/* --------------- VIDEO OPS --------------- */
+
+static int tc358743_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+	*status = 0;
+	*status |= no_signal(sd) ? V4L2_IN_ST_NO_SIGNAL : 0;
+	*status |= no_sync(sd) ? V4L2_IN_ST_NO_SYNC : 0;
+
+	v4l2_dbg(1, debug, sd, "%s: status = 0x%x\n", __func__, *status);
+
+	return 0;
+}
+
+static int tc358743_s_dv_timings(struct v4l2_subdev *sd,
+				 struct v4l2_dv_timings *timings)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	if (!timings)
+		return -EINVAL;
+
+	if (debug)
+		v4l2_print_dv_timings(sd->name, "tc358743_s_dv_timings: ",
+				timings, false);
+
+	if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+		v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+		return 0;
+	}
+
+	if (!v4l2_valid_dv_timings(timings,
+				&tc358743_timings_cap, NULL, NULL)) {
+		v4l2_dbg(1, debug, sd, "%s: timings out of range\n", __func__);
+		return -ERANGE;
+	}
+
+	state->timings = *timings;
+
+	enable_stream(sd, false);
+	tc358743_set_pll(sd);
+	tc358743_set_csi(sd);
+
+	return 0;
+}
+
+static int tc358743_g_dv_timings(struct v4l2_subdev *sd,
+				 struct v4l2_dv_timings *timings)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	*timings = state->timings;
+
+	return 0;
+}
+
+static int tc358743_enum_dv_timings(struct v4l2_subdev *sd,
+				    struct v4l2_enum_dv_timings *timings)
+{
+	if (timings->pad != 0)
+		return -EINVAL;
+
+	return v4l2_enum_dv_timings_cap(timings,
+			&tc358743_timings_cap, NULL, NULL);
+}
+
+static int tc358743_query_dv_timings(struct v4l2_subdev *sd,
+		struct v4l2_dv_timings *timings)
+{
+	int ret;
+
+	ret = tc358743_get_detected_timings(sd, timings);
+	if (ret)
+		return ret;
+
+	if (debug)
+		v4l2_print_dv_timings(sd->name, "tc358743_query_dv_timings: ",
+				timings, false);
+
+	if (!v4l2_valid_dv_timings(timings,
+				&tc358743_timings_cap, NULL, NULL)) {
+		v4l2_dbg(1, debug, sd, "%s: timings out of range\n", __func__);
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static int tc358743_dv_timings_cap(struct v4l2_subdev *sd,
+		struct v4l2_dv_timings_cap *cap)
+{
+	if (cap->pad != 0)
+		return -EINVAL;
+
+	*cap = tc358743_timings_cap;
+
+	return 0;
+}
+
+static int tc358743_g_mbus_config(struct v4l2_subdev *sd,
+			     struct v4l2_mbus_config *cfg)
+{
+	cfg->type = V4L2_MBUS_CSI2;
+
+	/* Support for non-continuous CSI-2 clock is missing in the driver */
+	cfg->flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+
+	switch (tc358743_num_csi_lanes_in_use(sd)) {
+	case 1:
+		cfg->flags |= V4L2_MBUS_CSI2_1_LANE;
+		break;
+	case 2:
+		cfg->flags |= V4L2_MBUS_CSI2_2_LANE;
+		break;
+	case 3:
+		cfg->flags |= V4L2_MBUS_CSI2_3_LANE;
+		break;
+	case 4:
+		cfg->flags |= V4L2_MBUS_CSI2_4_LANE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	enable_stream(sd, enable);
+
+	return 0;
+}
+
+/* --------------- PAD OPS --------------- */
+
+static int tc358743_get_fmt(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_format *format)
+{
+	struct tc358743_state *state = to_state(sd);
+	u8 vi_rep = i2c_rd8(sd, VI_REP);
+
+	if (format->pad != 0)
+		return -EINVAL;
+
+	format->format.code = state->mbus_fmt_code;
+	format->format.width = state->timings.bt.width;
+	format->format.height = state->timings.bt.height;
+	format->format.field = V4L2_FIELD_NONE;
+
+	switch (vi_rep & MASK_VOUT_COLOR_SEL) {
+	case MASK_VOUT_COLOR_RGB_FULL:
+	case MASK_VOUT_COLOR_RGB_LIMITED:
+		format->format.colorspace = V4L2_COLORSPACE_SRGB;
+		break;
+	case MASK_VOUT_COLOR_601_YCBCR_LIMITED:
+	case MASK_VOUT_COLOR_601_YCBCR_FULL:
+		format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
+		break;
+	case MASK_VOUT_COLOR_709_YCBCR_FULL:
+	case MASK_VOUT_COLOR_709_YCBCR_LIMITED:
+		format->format.colorspace = V4L2_COLORSPACE_REC709;
+		break;
+	default:
+		format->format.colorspace = 0;
+		break;
+	}
+
+	return 0;
+}
+
+static int tc358743_set_fmt(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_format *format)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	u32 code = format->format.code; /* is overwritten by get_fmt */
+	int ret = tc358743_get_fmt(sd, cfg, format);
+
+	format->format.code = code;
+
+	if (ret)
+		return ret;
+
+	switch (code) {
+	case MEDIA_BUS_FMT_RGB888_1X24:
+	case MEDIA_BUS_FMT_UYVY8_1X16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+		return 0;
+
+	state->mbus_fmt_code = format->format.code;
+
+	enable_stream(sd, false);
+	tc358743_set_pll(sd);
+	tc358743_set_csi(sd);
+	tc358743_set_csi_color_space(sd);
+
+	return 0;
+}
+
+static int tc358743_g_edid(struct v4l2_subdev *sd,
+		struct v4l2_subdev_edid *edid)
+{
+	struct tc358743_state *state = to_state(sd);
+
+	if (edid->pad != 0)
+		return -EINVAL;
+
+	if (edid->start_block == 0 && edid->blocks == 0) {
+		edid->blocks = state->edid_blocks_written;
+		return 0;
+	}
+
+	if (state->edid_blocks_written == 0)
+		return -ENODATA;
+
+	if (edid->start_block >= state->edid_blocks_written ||
+			edid->blocks == 0)
+		return -EINVAL;
+
+	if (edid->start_block + edid->blocks > state->edid_blocks_written)
+		edid->blocks = state->edid_blocks_written - edid->start_block;
+
+	i2c_rd(sd, EDID_RAM + (edid->start_block * EDID_BLOCK_SIZE), edid->edid,
+			edid->blocks * EDID_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int tc358743_s_edid(struct v4l2_subdev *sd,
+				struct v4l2_subdev_edid *edid)
+{
+	struct tc358743_state *state = to_state(sd);
+	u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+
+	v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
+		 __func__, edid->pad, edid->start_block, edid->blocks);
+
+	if (edid->pad != 0)
+		return -EINVAL;
+
+	if (edid->start_block != 0)
+		return -EINVAL;
+
+	if (edid->blocks > EDID_NUM_BLOCKS_MAX) {
+		edid->blocks = EDID_NUM_BLOCKS_MAX;
+		return -E2BIG;
+	}
+
+	tc358743_disable_edid(sd);
+
+	i2c_wr8(sd, EDID_LEN1, edid_len & 0xff);
+	i2c_wr8(sd, EDID_LEN2, edid_len >> 8);
+
+	if (edid->blocks == 0) {
+		state->edid_blocks_written = 0;
+		return 0;
+	}
+
+	i2c_wr(sd, EDID_RAM, edid->edid, edid_len);
+
+	state->edid_blocks_written = edid->blocks;
+
+	if (tx_5v_power_present(sd))
+		tc358743_enable_edid(sd);
+
+	return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static const struct v4l2_subdev_core_ops tc358743_core_ops = {
+	.log_status = tc358743_log_status,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.g_register = tc358743_g_register,
+	.s_register = tc358743_s_register,
+#endif
+	.interrupt_service_routine = tc358743_isr,
+	.subscribe_event = tc358743_subscribe_event,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops tc358743_video_ops = {
+	.g_input_status = tc358743_g_input_status,
+	.s_dv_timings = tc358743_s_dv_timings,
+	.g_dv_timings = tc358743_g_dv_timings,
+	.query_dv_timings = tc358743_query_dv_timings,
+	.g_mbus_config = tc358743_g_mbus_config,
+	.s_stream = tc358743_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops tc358743_pad_ops = {
+	.set_fmt = tc358743_set_fmt,
+	.get_fmt = tc358743_get_fmt,
+	.get_edid = tc358743_g_edid,
+	.set_edid = tc358743_s_edid,
+	.enum_dv_timings = tc358743_enum_dv_timings,
+	.dv_timings_cap = tc358743_dv_timings_cap,
+};
+
+static const struct v4l2_subdev_ops tc358743_ops = {
+	.core = &tc358743_core_ops,
+	.video = &tc358743_video_ops,
+	.pad = &tc358743_pad_ops,
+};
+
+/* --------------- CUSTOM CTRLS --------------- */
+
+static const struct v4l2_ctrl_config tc358743_ctrl_audio_sampling_rate = {
+	.id = TC358743_CID_AUDIO_SAMPLING_RATE,
+	.name = "Audio sampling rate",
+	.type = V4L2_CTRL_TYPE_INTEGER,
+	.min = 0,
+	.max = 768000,
+	.step = 1,
+	.def = 0,
+	.flags = V4L2_CTRL_FLAG_READ_ONLY,
+};
+
+static const struct v4l2_ctrl_config tc358743_ctrl_audio_present = {
+	.id = TC358743_CID_AUDIO_PRESENT,
+	.name = "Audio present",
+	.type = V4L2_CTRL_TYPE_BOOLEAN,
+	.min = 0,
+	.max = 1,
+	.step = 1,
+	.def = 0,
+	.flags = V4L2_CTRL_FLAG_READ_ONLY,
+};
+
+/* --------------- PROBE / REMOVE --------------- */
+
+#ifdef CONFIG_OF
+static void tc358743_gpio_reset(struct tc358743_state *state)
+{
+	usleep_range(5000, 10000);
+	gpiod_set_value(state->reset_gpio, 1);
+	usleep_range(1000, 2000);
+	gpiod_set_value(state->reset_gpio, 0);
+	msleep(20);
+}
+
+static int tc358743_probe_of(struct tc358743_state *state)
+{
+	struct device *dev = &state->i2c_client->dev;
+	struct v4l2_of_endpoint *endpoint;
+	struct device_node *ep;
+	struct clk *refclk;
+	u32 bps_pr_lane;
+	int ret = -EINVAL;
+
+	refclk = devm_clk_get(dev, "refclk");
+	if (IS_ERR(refclk)) {
+		if (PTR_ERR(refclk) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get refclk: %ld\n",
+				PTR_ERR(refclk));
+		return PTR_ERR(refclk);
+	}
+
+	ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+	if (!ep) {
+		dev_err(dev, "missing endpoint node\n");
+		return -EINVAL;
+	}
+
+	endpoint = v4l2_of_alloc_parse_endpoint(ep);
+	if (IS_ERR(endpoint)) {
+		dev_err(dev, "failed to parse endpoint\n");
+		return PTR_ERR(endpoint);
+	}
+
+	if (endpoint->bus_type != V4L2_MBUS_CSI2 ||
+	    endpoint->bus.mipi_csi2.num_data_lanes == 0 ||
+	    endpoint->nr_of_link_frequencies == 0) {
+		dev_err(dev, "missing CSI-2 properties in endpoint\n");
+		goto free_endpoint;
+	}
+
+	state->bus = endpoint->bus.mipi_csi2;
+
+	clk_prepare_enable(refclk);
+
+	state->pdata.refclk_hz = clk_get_rate(refclk);
+	state->pdata.ddc5v_delay = DDC5V_DELAY_100_MS;
+	state->pdata.enable_hdcp = false;
+	/* A FIFO level of 16 should be enough for 2-lane 720p60 at 594 MHz. */
+	state->pdata.fifo_level = 16;
+	/*
+	 * The PLL input clock is obtained by dividing refclk by pll_prd.
+	 * It must be between 6 MHz and 40 MHz, lower frequency is better.
+	 */
+	switch (state->pdata.refclk_hz) {
+	case 26000000:
+	case 27000000:
+	case 42000000:
+		state->pdata.pll_prd = state->pdata.refclk_hz / 6000000;
+		break;
+	default:
+		dev_err(dev, "unsupported refclk rate: %u Hz\n",
+			state->pdata.refclk_hz);
+		goto disable_clk;
+	}
+
+	/*
+	 * The CSI bps per lane must be between 62.5 Mbps and 1 Gbps.
+	 * The default is 594 Mbps for 4-lane 1080p60 or 2-lane 720p60.
+	 */
+	bps_pr_lane = 2 * endpoint->link_frequencies[0];
+	if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) {
+		dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane);
+		goto disable_clk;
+	}
+
+	/* The CSI speed per lane is refclk / pll_prd * pll_fbd */
+	state->pdata.pll_fbd = bps_pr_lane /
+			       state->pdata.refclk_hz * state->pdata.pll_prd;
+
+	/*
+	 * FIXME: These timings are from REF_02 for 594 Mbps per lane (297 MHz
+	 * link frequency). In principle it should be possible to calculate
+	 * them based on link frequency and resolution.
+	 */
+	if (bps_pr_lane != 594000000U)
+		dev_warn(dev, "untested bps per lane: %u bps\n", bps_pr_lane);
+	state->pdata.lineinitcnt = 0xe80;
+	state->pdata.lptxtimecnt = 0x003;
+	/* tclk-preparecnt: 3, tclk-zerocnt: 20 */
+	state->pdata.tclk_headercnt = 0x1403;
+	state->pdata.tclk_trailcnt = 0x00;
+	/* ths-preparecnt: 3, ths-zerocnt: 1 */
+	state->pdata.ths_headercnt = 0x0103;
+	state->pdata.twakeup = 0x4882;
+	state->pdata.tclk_postcnt = 0x008;
+	state->pdata.ths_trailcnt = 0x2;
+	state->pdata.hstxvregcnt = 0;
+
+	state->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+						    GPIOD_OUT_LOW);
+	if (IS_ERR(state->reset_gpio)) {
+		dev_err(dev, "failed to get reset gpio\n");
+		ret = PTR_ERR(state->reset_gpio);
+		goto disable_clk;
+	}
+
+	if (state->reset_gpio)
+		tc358743_gpio_reset(state);
+
+	ret = 0;
+	goto free_endpoint;
+
+disable_clk:
+	clk_disable_unprepare(refclk);
+free_endpoint:
+	v4l2_of_free_endpoint(endpoint);
+	return ret;
+}
+#else
+static inline int tc358743_probe_of(struct tc358743_state *state)
+{
+	return -ENODEV;
+}
+#endif
+
+static int tc358743_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	static struct v4l2_dv_timings default_timing =
+		V4L2_DV_BT_CEA_640X480P59_94;
+	struct tc358743_state *state;
+	struct tc358743_platform_data *pdata = client->dev.platform_data;
+	struct v4l2_subdev *sd;
+	int err;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -EIO;
+	v4l_dbg(1, debug, client, "chip found @ 0x%x (%s)\n",
+		client->addr << 1, client->adapter->name);
+
+	state = devm_kzalloc(&client->dev, sizeof(struct tc358743_state),
+			GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->i2c_client = client;
+
+	/* platform data */
+	if (pdata) {
+		state->pdata = *pdata;
+		state->bus.flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+	} else {
+		err = tc358743_probe_of(state);
+		if (err == -ENODEV)
+			v4l_err(client, "No platform data!\n");
+		if (err)
+			return err;
+	}
+
+	sd = &state->sd;
+	v4l2_i2c_subdev_init(sd, client, &tc358743_ops);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+	/* i2c access */
+	if ((i2c_rd16(sd, CHIPID) & MASK_CHIPID) != 0) {
+		v4l2_info(sd, "not a TC358743 on address 0x%x\n",
+			  client->addr << 1);
+		return -ENODEV;
+	}
+
+	/* control handlers */
+	v4l2_ctrl_handler_init(&state->hdl, 3);
+
+	/* private controls */
+	state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(&state->hdl, NULL,
+			V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
+
+	/* custom controls */
+	state->audio_sampling_rate_ctrl = v4l2_ctrl_new_custom(&state->hdl,
+			&tc358743_ctrl_audio_sampling_rate, NULL);
+
+	state->audio_present_ctrl = v4l2_ctrl_new_custom(&state->hdl,
+			&tc358743_ctrl_audio_present, NULL);
+
+	sd->ctrl_handler = &state->hdl;
+	if (state->hdl.error) {
+		err = state->hdl.error;
+		goto err_hdl;
+	}
+
+	if (tc358743_update_controls(sd)) {
+		err = -ENODEV;
+		goto err_hdl;
+	}
+
+	/* work queues */
+	state->work_queues = create_singlethread_workqueue(client->name);
+	if (!state->work_queues) {
+		v4l2_err(sd, "Could not create work queue\n");
+		err = -ENOMEM;
+		goto err_hdl;
+	}
+
+	state->pad.flags = MEDIA_PAD_FL_SOURCE;
+	err = media_entity_init(&sd->entity, 1, &state->pad, 0);
+	if (err < 0)
+		goto err_hdl;
+
+	sd->dev = &client->dev;
+	err = v4l2_async_register_subdev(sd);
+	if (err < 0)
+		goto err_hdl;
+
+	mutex_init(&state->confctl_mutex);
+
+	INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
+			tc358743_delayed_work_enable_hotplug);
+
+	tc358743_initial_setup(sd);
+
+	tc358743_s_dv_timings(sd, &default_timing);
+
+	state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+	tc358743_set_csi_color_space(sd);
+
+	tc358743_init_interrupts(sd);
+
+	if (state->i2c_client->irq) {
+		err = devm_request_threaded_irq(&client->dev,
+						state->i2c_client->irq,
+						NULL, tc358743_irq_handler,
+						IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+						"tc358743", state);
+		if (err)
+			goto err_work_queues;
+	}
+
+	tc358743_enable_interrupts(sd, tx_5v_power_present(sd));
+	i2c_wr16(sd, INTMASK, ~(MASK_HDMI_MSK | MASK_CSI_MSK) & 0xffff);
+
+	err = v4l2_ctrl_handler_setup(sd->ctrl_handler);
+	if (err)
+		goto err_work_queues;
+
+	v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+		  client->addr << 1, client->adapter->name);
+
+	return 0;
+
+err_work_queues:
+	cancel_delayed_work(&state->delayed_work_enable_hotplug);
+	destroy_workqueue(state->work_queues);
+	mutex_destroy(&state->confctl_mutex);
+err_hdl:
+	media_entity_cleanup(&sd->entity);
+	v4l2_ctrl_handler_free(&state->hdl);
+	return err;
+}
+
+static int tc358743_remove(struct i2c_client *client)
+{
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct tc358743_state *state = to_state(sd);
+
+	cancel_delayed_work(&state->delayed_work_enable_hotplug);
+	destroy_workqueue(state->work_queues);
+	v4l2_async_unregister_subdev(sd);
+	v4l2_device_unregister_subdev(sd);
+	mutex_destroy(&state->confctl_mutex);
+	media_entity_cleanup(&sd->entity);
+	v4l2_ctrl_handler_free(&state->hdl);
+
+	return 0;
+}
+
+static struct i2c_device_id tc358743_id[] = {
+	{"tc358743", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, tc358743_id);
+
+static struct i2c_driver tc358743_driver = {
+	.driver = {
+		.name = "tc358743",
+	},
+	.probe = tc358743_probe,
+	.remove = tc358743_remove,
+	.id_table = tc358743_id,
+};
+
+module_i2c_driver(tc358743_driver);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
new file mode 100644
index 0000000..81f1db5
--- /dev/null
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -0,0 +1,681 @@
+/*
+ * tc358743 - Toshiba HDMI to CSI-2 bridge - register names and bit masks
+ *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
+ * reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
+ */
+
+/* Bit masks has prefix 'MASK_' and options after '_'. */
+
+#ifndef __TC358743_REGS_H
+#define __TC358743_REGS_H
+
+#define CHIPID                                0x0000
+#define MASK_CHIPID                           0xff00
+#define MASK_REVID                            0x00ff
+
+#define SYSCTL                                0x0002
+#define MASK_IRRST                            0x0800
+#define MASK_CECRST                           0x0400
+#define MASK_CTXRST                           0x0200
+#define MASK_HDMIRST                          0x0100
+#define MASK_SLEEP                            0x0001
+
+#define CONFCTL                               0x0004
+#define MASK_PWRISO                           0x8000
+#define MASK_ACLKOPT                          0x1000
+#define MASK_AUDCHNUM                         0x0c00
+#define MASK_AUDCHNUM_8                       0x0000
+#define MASK_AUDCHNUM_6                       0x0400
+#define MASK_AUDCHNUM_4                       0x0800
+#define MASK_AUDCHNUM_2                       0x0c00
+#define MASK_AUDCHSEL                         0x0200
+#define MASK_I2SDLYOPT                        0x0100
+#define MASK_YCBCRFMT                         0x00c0
+#define MASK_YCBCRFMT_444                     0x0000
+#define MASK_YCBCRFMT_422_12_BIT              0x0040
+#define MASK_YCBCRFMT_COLORBAR                0x0080
+#define MASK_YCBCRFMT_422_8_BIT               0x00c0
+#define MASK_INFRMEN                          0x0020
+#define MASK_AUDOUTSEL                        0x0018
+#define MASK_AUDOUTSEL_CSI                    0x0000
+#define MASK_AUDOUTSEL_I2S                    0x0010
+#define MASK_AUDOUTSEL_TDM                    0x0018
+#define MASK_AUTOINDEX                        0x0004
+#define MASK_ABUFEN                           0x0002
+#define MASK_VBUFEN                           0x0001
+
+#define FIFOCTL                               0x0006
+
+#define INTSTATUS                             0x0014
+#define MASK_AMUTE_INT                        0x0400
+#define MASK_HDMI_INT                         0x0200
+#define MASK_CSI_INT                          0x0100
+#define MASK_SYS_INT                          0x0020
+#define MASK_CEC_EINT                         0x0010
+#define MASK_CEC_TINT                         0x0008
+#define MASK_CEC_RINT                         0x0004
+#define MASK_IR_EINT                          0x0002
+#define MASK_IR_DINT                          0x0001
+
+#define INTMASK                               0x0016
+#define MASK_AMUTE_MSK                        0x0400
+#define MASK_HDMI_MSK                         0x0200
+#define MASK_CSI_MSK                          0x0100
+#define MASK_SYS_MSK                          0x0020
+#define MASK_CEC_EMSK                         0x0010
+#define MASK_CEC_TMSK                         0x0008
+#define MASK_CEC_RMSK                         0x0004
+#define MASK_IR_EMSK                          0x0002
+#define MASK_IR_DMSK                          0x0001
+
+#define INTFLAG                               0x0018
+#define INTSYSSTATUS                          0x001A
+
+#define PLLCTL0                               0x0020
+#define MASK_PLL_PRD                          0xf000
+#define SET_PLL_PRD(prd)                      ((((prd) - 1) << 12) &\
+						MASK_PLL_PRD)
+#define MASK_PLL_FBD                          0x01ff
+#define SET_PLL_FBD(fbd)                      (((fbd) - 1) & MASK_PLL_FBD)
+
+#define PLLCTL1                               0x0022
+#define MASK_PLL_FRS                          0x0c00
+#define SET_PLL_FRS(frs)                      (((frs) << 10) & MASK_PLL_FRS)
+#define MASK_PLL_LBWS                         0x0300
+#define MASK_LFBREN                           0x0040
+#define MASK_BYPCKEN                          0x0020
+#define MASK_CKEN                             0x0010
+#define MASK_RESETB                           0x0002
+#define MASK_PLL_EN                           0x0001
+
+#define CLW_CNTRL                             0x0140
+#define MASK_CLW_LANEDISABLE                  0x0001
+
+#define D0W_CNTRL                             0x0144
+#define MASK_D0W_LANEDISABLE                  0x0001
+
+#define D1W_CNTRL                             0x0148
+#define MASK_D1W_LANEDISABLE                  0x0001
+
+#define D2W_CNTRL                             0x014C
+#define MASK_D2W_LANEDISABLE                  0x0001
+
+#define D3W_CNTRL                             0x0150
+#define MASK_D3W_LANEDISABLE                  0x0001
+
+#define STARTCNTRL                            0x0204
+#define MASK_START                            0x00000001
+
+#define LINEINITCNT                           0x0210
+#define LPTXTIMECNT                           0x0214
+#define TCLK_HEADERCNT                        0x0218
+#define TCLK_TRAILCNT                         0x021C
+#define THS_HEADERCNT                         0x0220
+#define TWAKEUP                               0x0224
+#define TCLK_POSTCNT                          0x0228
+#define THS_TRAILCNT                          0x022C
+#define HSTXVREGCNT                           0x0230
+
+#define HSTXVREGEN                            0x0234
+#define MASK_D3M_HSTXVREGEN                   0x0010
+#define MASK_D2M_HSTXVREGEN                   0x0008
+#define MASK_D1M_HSTXVREGEN                   0x0004
+#define MASK_D0M_HSTXVREGEN                   0x0002
+#define MASK_CLM_HSTXVREGEN                   0x0001
+
+
+#define TXOPTIONCNTRL                         0x0238
+#define MASK_CONTCLKMODE                      0x00000001
+
+#define CSI_CONTROL                           0x040C
+#define MASK_CSI_MODE                         0x8000
+#define MASK_HTXTOEN                          0x0400
+#define MASK_TXHSMD                           0x0080
+#define MASK_HSCKMD                           0x0020
+#define MASK_NOL                              0x0006
+#define MASK_NOL_1                            0x0000
+#define MASK_NOL_2                            0x0002
+#define MASK_NOL_3                            0x0004
+#define MASK_NOL_4                            0x0006
+#define MASK_EOTDIS                           0x0001
+
+#define CSI_INT                               0x0414
+#define MASK_INTHLT                           0x00000008
+#define MASK_INTER                            0x00000004
+
+#define CSI_INT_ENA                           0x0418
+#define MASK_IENHLT                           0x00000008
+#define MASK_IENER                            0x00000004
+
+#define CSI_ERR                               0x044C
+#define MASK_INER                             0x00000200
+#define MASK_WCER                             0x00000100
+#define MASK_QUNK                             0x00000010
+#define MASK_TXBRK                            0x00000002
+
+#define CSI_ERR_INTENA                        0x0450
+#define CSI_ERR_HALT                          0x0454
+
+#define CSI_CONFW                             0x0500
+#define MASK_MODE                             0xe0000000
+#define MASK_MODE_SET                         0xa0000000
+#define MASK_MODE_CLEAR                       0xc0000000
+#define MASK_ADDRESS                          0x1f000000
+#define MASK_ADDRESS_CSI_CONTROL              0x03000000
+#define MASK_ADDRESS_CSI_INT_ENA              0x06000000
+#define MASK_ADDRESS_CSI_ERR_INTENA           0x14000000
+#define MASK_ADDRESS_CSI_ERR_HALT             0x15000000
+#define MASK_DATA                             0x0000ffff
+
+#define CSI_INT_CLR                           0x050C
+#define MASK_ICRER                            0x00000004
+
+#define CSI_START                             0x0518
+#define MASK_STRT                             0x00000001
+
+#define CECEN                                 0x0600
+#define MASK_CECEN                            0x0001
+
+#define HDMI_INT0                             0x8500
+#define MASK_I_KEY                            0x80
+#define MASK_I_MISC                           0x02
+#define MASK_I_PHYERR                         0x01
+
+#define HDMI_INT1                             0x8501
+#define MASK_I_GBD                            0x80
+#define MASK_I_HDCP                           0x40
+#define MASK_I_ERR                            0x20
+#define MASK_I_AUD                            0x10
+#define MASK_I_CBIT                           0x08
+#define MASK_I_PACKET                         0x04
+#define MASK_I_CLK                            0x02
+#define MASK_I_SYS                            0x01
+
+#define SYS_INT                               0x8502
+#define MASK_I_ACR_CTS                        0x80
+#define MASK_I_ACRN                           0x40
+#define MASK_I_DVI                            0x20
+#define MASK_I_HDMI                           0x10
+#define MASK_I_NOPMBDET                       0x08
+#define MASK_I_DPMBDET                        0x04
+#define MASK_I_TMDS                           0x02
+#define MASK_I_DDC                            0x01
+
+#define CLK_INT                               0x8503
+#define MASK_I_OUT_H_CHG                      0x40
+#define MASK_I_IN_DE_CHG                      0x20
+#define MASK_I_IN_HV_CHG                      0x10
+#define MASK_I_DC_CHG                         0x08
+#define MASK_I_PXCLK_CHG                      0x04
+#define MASK_I_PHYCLK_CHG                     0x02
+#define MASK_I_TMDSCLK_CHG                    0x01
+
+#define CBIT_INT                              0x8505
+#define MASK_I_AF_LOCK                        0x80
+#define MASK_I_AF_UNLOCK                      0x40
+#define MASK_I_CBIT_FS                        0x02
+
+#define AUDIO_INT                             0x8506
+
+#define ERR_INT                               0x8507
+#define MASK_I_EESS_ERR                       0x80
+
+#define HDCP_INT                              0x8508
+#define MASK_I_AVM_SET                        0x80
+#define MASK_I_AVM_CLR                        0x40
+#define MASK_I_LINKERR                        0x20
+#define MASK_I_SHA_END                        0x10
+#define MASK_I_R0_END                         0x08
+#define MASK_I_KM_END                         0x04
+#define MASK_I_AKSV_END                       0x02
+#define MASK_I_AN_END                         0x01
+
+#define MISC_INT                              0x850B
+#define MASK_I_AS_LAYOUT                      0x10
+#define MASK_I_NO_SPD                         0x08
+#define MASK_I_NO_VS                          0x03
+#define MASK_I_SYNC_CHG                       0x02
+#define MASK_I_AUDIO_MUTE                     0x01
+
+#define KEY_INT                               0x850F
+
+#define SYS_INTM                              0x8512
+#define MASK_M_ACR_CTS                        0x80
+#define MASK_M_ACR_N                          0x40
+#define MASK_M_DVI_DET                        0x20
+#define MASK_M_HDMI_DET                       0x10
+#define MASK_M_NOPMBDET                       0x08
+#define MASK_M_BPMBDET                        0x04
+#define MASK_M_TMDS                           0x02
+#define MASK_M_DDC                            0x01
+
+#define CLK_INTM                              0x8513
+#define MASK_M_OUT_H_CHG                      0x40
+#define MASK_M_IN_DE_CHG                      0x20
+#define MASK_M_IN_HV_CHG                      0x10
+#define MASK_M_DC_CHG                         0x08
+#define MASK_M_PXCLK_CHG                      0x04
+#define MASK_M_PHYCLK_CHG                     0x02
+#define MASK_M_TMDS_CHG                       0x01
+
+#define PACKET_INTM                           0x8514
+
+#define CBIT_INTM                             0x8515
+#define MASK_M_AF_LOCK                        0x80
+#define MASK_M_AF_UNLOCK                      0x40
+#define MASK_M_CBIT_FS                        0x02
+
+#define AUDIO_INTM                            0x8516
+#define MASK_M_BUFINIT_END                    0x01
+
+#define ERR_INTM                              0x8517
+#define MASK_M_EESS_ERR                       0x80
+
+#define HDCP_INTM                             0x8518
+#define MASK_M_AVM_SET                        0x80
+#define MASK_M_AVM_CLR                        0x40
+#define MASK_M_LINKERR                        0x20
+#define MASK_M_SHA_END                        0x10
+#define MASK_M_R0_END                         0x08
+#define MASK_M_KM_END                         0x04
+#define MASK_M_AKSV_END                       0x02
+#define MASK_M_AN_END                         0x01
+
+#define MISC_INTM                             0x851B
+#define MASK_M_AS_LAYOUT                      0x10
+#define MASK_M_NO_SPD                         0x08
+#define MASK_M_NO_VS                          0x03
+#define MASK_M_SYNC_CHG                       0x02
+#define MASK_M_AUDIO_MUTE                     0x01
+
+#define KEY_INTM                              0x851F
+
+#define SYS_STATUS                            0x8520
+#define MASK_S_SYNC                           0x80
+#define MASK_S_AVMUTE                         0x40
+#define MASK_S_HDCP                           0x20
+#define MASK_S_HDMI                           0x10
+#define MASK_S_PHY_SCDT                       0x08
+#define MASK_S_PHY_PLL                        0x04
+#define MASK_S_TMDS                           0x02
+#define MASK_S_DDC5V                          0x01
+
+#define CSI_STATUS                            0x0410
+#define MASK_S_WSYNC                          0x0400
+#define MASK_S_TXACT                          0x0200
+#define MASK_S_RXACT                          0x0100
+#define MASK_S_HLT                            0x0001
+
+#define VI_STATUS1                            0x8522
+#define MASK_S_V_GBD                          0x08
+#define MASK_S_DEEPCOLOR                      0x0c
+#define MASK_S_V_422                          0x02
+#define MASK_S_V_INTERLACE                    0x01
+
+#define AU_STATUS0                            0x8523
+#define MASK_S_A_SAMPLE                       0x01
+
+#define VI_STATUS3                            0x8528
+#define MASK_S_V_COLOR                        0x1e
+#define MASK_LIMITED                          0x01
+
+#define PHY_CTL0                              0x8531
+#define MASK_PHY_SYSCLK_IND                   0x02
+#define MASK_PHY_CTL                          0x01
+
+
+#define PHY_CTL1                              0x8532 /* Not in REF_01 */
+#define MASK_PHY_AUTO_RST1                    0xf0
+#define MASK_PHY_AUTO_RST1_OFF                0x00
+#define SET_PHY_AUTO_RST1_US(us)             ((((us) / 200) << 4) & \
+						MASK_PHY_AUTO_RST1)
+#define MASK_FREQ_RANGE_MODE                  0x0f
+#define SET_FREQ_RANGE_MODE_CYCLES(cycles)   (((cycles) - 1) & \
+						MASK_FREQ_RANGE_MODE)
+
+#define PHY_CTL2                              0x8533 /* Not in REF_01 */
+#define MASK_PHY_AUTO_RST4                    0x04
+#define MASK_PHY_AUTO_RST3                    0x02
+#define MASK_PHY_AUTO_RST2                    0x01
+#define MASK_PHY_AUTO_RSTn                    (MASK_PHY_AUTO_RST4 | \
+						MASK_PHY_AUTO_RST3 | \
+						MASK_PHY_AUTO_RST2)
+
+#define PHY_EN                                0x8534
+#define MASK_ENABLE_PHY                       0x01
+
+#define PHY_RST                               0x8535
+#define MASK_RESET_CTRL                       0x01   /* Reset active low */
+
+#define PHY_BIAS                              0x8536 /* Not in REF_01 */
+
+#define PHY_CSQ                               0x853F /* Not in REF_01 */
+#define MASK_CSQ_CNT                          0x0f
+#define SET_CSQ_CNT_LEVEL(n)                 (n & MASK_CSQ_CNT)
+
+#define SYS_FREQ0                             0x8540
+#define SYS_FREQ1                             0x8541
+
+#define SYS_CLK                               0x8542 /* Not in REF_01 */
+#define MASK_CLK_DIFF                         0x0C
+#define MASK_CLK_DIV                          0x03
+
+#define DDC_CTL                               0x8543
+#define MASK_DDC_ACK_POL                      0x08
+#define MASK_DDC_ACTION                       0x04
+#define MASK_DDC5V_MODE                       0x03
+#define MASK_DDC5V_MODE_0MS                   0x00
+#define MASK_DDC5V_MODE_50MS                  0x01
+#define MASK_DDC5V_MODE_100MS                 0x02
+#define MASK_DDC5V_MODE_200MS                 0x03
+
+#define HPD_CTL                               0x8544
+#define MASK_HPD_CTL0                         0x10
+#define MASK_HPD_OUT0                         0x01
+
+#define ANA_CTL                               0x8545
+#define MASK_APPL_PCSX                        0x30
+#define MASK_APPL_PCSX_HIZ                    0x00
+#define MASK_APPL_PCSX_L_FIX                  0x10
+#define MASK_APPL_PCSX_H_FIX                  0x20
+#define MASK_APPL_PCSX_NORMAL                 0x30
+#define MASK_ANALOG_ON                        0x01
+
+#define AVM_CTL                               0x8546
+
+#define INIT_END                              0x854A
+#define MASK_INIT_END                         0x01
+
+#define HDMI_DET                              0x8552 /* Not in REF_01 */
+#define MASK_HDMI_DET_MOD1                    0x80
+#define MASK_HDMI_DET_MOD0                    0x40
+#define MASK_HDMI_DET_V                       0x30
+#define MASK_HDMI_DET_V_SYNC                  0x00
+#define MASK_HDMI_DET_V_ASYNC_25MS            0x10
+#define MASK_HDMI_DET_V_ASYNC_50MS            0x20
+#define MASK_HDMI_DET_V_ASYNC_100MS           0x30
+#define MASK_HDMI_DET_NUM                     0x0f
+
+#define HDCP_MODE                             0x8560
+#define MASK_MODE_RST_TN                      0x20
+#define MASK_LINE_REKEY                       0x10
+#define MASK_AUTO_CLR                         0x04
+
+#define HDCP_REG1                             0x8563 /* Not in REF_01 */
+#define MASK_AUTH_UNAUTH_SEL                  0x70
+#define MASK_AUTH_UNAUTH_SEL_12_FRAMES        0x70
+#define MASK_AUTH_UNAUTH_SEL_8_FRAMES         0x60
+#define MASK_AUTH_UNAUTH_SEL_4_FRAMES         0x50
+#define MASK_AUTH_UNAUTH_SEL_2_FRAMES         0x40
+#define MASK_AUTH_UNAUTH_SEL_64_FRAMES        0x30
+#define MASK_AUTH_UNAUTH_SEL_32_FRAMES        0x20
+#define MASK_AUTH_UNAUTH_SEL_16_FRAMES        0x10
+#define MASK_AUTH_UNAUTH_SEL_ONCE             0x00
+#define MASK_AUTH_UNAUTH                      0x01
+#define MASK_AUTH_UNAUTH_AUTO                 0x01
+
+#define HDCP_REG2                             0x8564 /* Not in REF_01 */
+#define MASK_AUTO_P3_RESET                    0x0F
+#define SET_AUTO_P3_RESET_FRAMES(n)          (n & MASK_AUTO_P3_RESET)
+#define MASK_AUTO_P3_RESET_OFF                0x00
+
+#define VI_MODE                               0x8570
+#define MASK_RGB_DVI                          0x08 /* Not in REF_01 */
+
+#define VOUT_SET2                             0x8573
+#define MASK_SEL422                           0x80
+#define MASK_VOUT_422FIL_100                  0x40
+#define MASK_VOUTCOLORMODE                    0x03
+#define MASK_VOUTCOLORMODE_THROUGH            0x00
+#define MASK_VOUTCOLORMODE_AUTO               0x01
+#define MASK_VOUTCOLORMODE_MANUAL             0x03
+
+#define VOUT_SET3                             0x8574
+#define MASK_VOUT_EXTCNT                      0x08
+
+#define VI_REP                                0x8576
+#define MASK_VOUT_COLOR_SEL                   0xe0
+#define MASK_VOUT_COLOR_RGB_FULL              0x00
+#define MASK_VOUT_COLOR_RGB_LIMITED           0x20
+#define MASK_VOUT_COLOR_601_YCBCR_FULL        0x40
+#define MASK_VOUT_COLOR_601_YCBCR_LIMITED     0x60
+#define MASK_VOUT_COLOR_709_YCBCR_FULL        0x80
+#define MASK_VOUT_COLOR_709_YCBCR_LIMITED     0xa0
+#define MASK_VOUT_COLOR_FULL_TO_LIMITED       0xc0
+#define MASK_VOUT_COLOR_LIMITED_TO_FULL       0xe0
+#define MASK_IN_REP_HEN                       0x10
+#define MASK_IN_REP                           0x0f
+
+#define VI_MUTE                               0x857F
+#define MASK_AUTO_MUTE                        0xc0
+#define MASK_VI_MUTE                          0x10
+
+#define DE_WIDTH_H_LO                         0x8582 /* Not in REF_01 */
+#define DE_WIDTH_H_HI                         0x8583 /* Not in REF_01 */
+#define DE_WIDTH_V_LO                         0x8588 /* Not in REF_01 */
+#define DE_WIDTH_V_HI                         0x8589 /* Not in REF_01 */
+#define H_SIZE_LO                             0x858A /* Not in REF_01 */
+#define H_SIZE_HI                             0x858B /* Not in REF_01 */
+#define V_SIZE_LO                             0x858C /* Not in REF_01 */
+#define V_SIZE_HI                             0x858D /* Not in REF_01 */
+#define FV_CNT_LO                             0x85A1 /* Not in REF_01 */
+#define FV_CNT_HI                             0x85A2 /* Not in REF_01 */
+
+#define FH_MIN0                               0x85AA /* Not in REF_01 */
+#define FH_MIN1                               0x85AB /* Not in REF_01 */
+#define FH_MAX0                               0x85AC /* Not in REF_01 */
+#define FH_MAX1                               0x85AD /* Not in REF_01 */
+
+#define HV_RST                                0x85AF /* Not in REF_01 */
+#define MASK_H_PI_RST                         0x20
+#define MASK_V_PI_RST                         0x10
+
+#define EDID_MODE                             0x85C7
+#define MASK_EDID_SPEED                       0x40
+#define MASK_EDID_MODE                        0x03
+#define MASK_EDID_MODE_DISABLE                0x00
+#define MASK_EDID_MODE_DDC2B                  0x01
+#define MASK_EDID_MODE_E_DDC                  0x02
+
+#define EDID_LEN1                             0x85CA
+#define EDID_LEN2                             0x85CB
+
+#define HDCP_REG3                             0x85D1 /* Not in REF_01 */
+#define KEY_RD_CMD                            0x01
+
+#define FORCE_MUTE                            0x8600
+#define MASK_FORCE_AMUTE                      0x10
+#define MASK_FORCE_DMUTE                      0x01
+
+#define CMD_AUD                               0x8601
+#define MASK_CMD_BUFINIT                      0x04
+#define MASK_CMD_LOCKDET                      0x02
+#define MASK_CMD_MUTE                         0x01
+
+#define AUTO_CMD0                             0x8602
+#define MASK_AUTO_MUTE7                       0x80
+#define MASK_AUTO_MUTE6                       0x40
+#define MASK_AUTO_MUTE5                       0x20
+#define MASK_AUTO_MUTE4                       0x10
+#define MASK_AUTO_MUTE3                       0x08
+#define MASK_AUTO_MUTE2                       0x04
+#define MASK_AUTO_MUTE1                       0x02
+#define MASK_AUTO_MUTE0                       0x01
+
+#define AUTO_CMD1                             0x8603
+#define MASK_AUTO_MUTE10                      0x04
+#define MASK_AUTO_MUTE9                       0x02
+#define MASK_AUTO_MUTE8                       0x01
+
+#define AUTO_CMD2                             0x8604
+#define MASK_AUTO_PLAY3                       0x08
+#define MASK_AUTO_PLAY2                       0x04
+
+#define BUFINIT_START                         0x8606
+#define SET_BUFINIT_START_MS(milliseconds)   ((milliseconds) / 100)
+
+#define FS_MUTE                               0x8607
+#define MASK_FS_ELSE_MUTE                     0x80
+#define MASK_FS22_MUTE                        0x40
+#define MASK_FS24_MUTE                        0x20
+#define MASK_FS88_MUTE                        0x10
+#define MASK_FS96_MUTE                        0x08
+#define MASK_FS176_MUTE                       0x04
+#define MASK_FS192_MUTE                       0x02
+#define MASK_FS_NO_MUTE                       0x01
+
+#define FS_IMODE                              0x8620
+#define MASK_NLPCM_HMODE                      0x40
+#define MASK_NLPCM_SMODE                      0x20
+#define MASK_NLPCM_IMODE                      0x10
+#define MASK_FS_HMODE                         0x08
+#define MASK_FS_AMODE                         0x04
+#define MASK_FS_SMODE                         0x02
+#define MASK_FS_IMODE                         0x01
+
+#define FS_SET                                0x8621
+#define MASK_FS                               0x0f
+
+#define LOCKDET_REF0                          0x8630
+#define LOCKDET_REF1                          0x8631
+#define LOCKDET_REF2                          0x8632
+
+#define ACR_MODE                              0x8640
+#define MASK_ACR_LOAD                         0x10
+#define MASK_N_MODE                           0x04
+#define MASK_CTS_MODE                         0x01
+
+#define ACR_MDF0                              0x8641
+#define MASK_ACR_L2MDF                        0x70
+#define MASK_ACR_L2MDF_0_PPM                  0x00
+#define MASK_ACR_L2MDF_61_PPM                 0x10
+#define MASK_ACR_L2MDF_122_PPM                0x20
+#define MASK_ACR_L2MDF_244_PPM                0x30
+#define MASK_ACR_L2MDF_488_PPM                0x40
+#define MASK_ACR_L2MDF_976_PPM                0x50
+#define MASK_ACR_L2MDF_1976_PPM               0x60
+#define MASK_ACR_L2MDF_3906_PPM               0x70
+#define MASK_ACR_L1MDF                        0x07
+#define MASK_ACR_L1MDF_0_PPM                  0x00
+#define MASK_ACR_L1MDF_61_PPM                 0x01
+#define MASK_ACR_L1MDF_122_PPM                0x02
+#define MASK_ACR_L1MDF_244_PPM                0x03
+#define MASK_ACR_L1MDF_488_PPM                0x04
+#define MASK_ACR_L1MDF_976_PPM                0x05
+#define MASK_ACR_L1MDF_1976_PPM               0x06
+#define MASK_ACR_L1MDF_3906_PPM               0x07
+
+#define ACR_MDF1                              0x8642
+#define MASK_ACR_L3MDF                        0x07
+#define MASK_ACR_L3MDF_0_PPM                  0x00
+#define MASK_ACR_L3MDF_61_PPM                 0x01
+#define MASK_ACR_L3MDF_122_PPM                0x02
+#define MASK_ACR_L3MDF_244_PPM                0x03
+#define MASK_ACR_L3MDF_488_PPM                0x04
+#define MASK_ACR_L3MDF_976_PPM                0x05
+#define MASK_ACR_L3MDF_1976_PPM               0x06
+#define MASK_ACR_L3MDF_3906_PPM               0x07
+
+#define SDO_MODE1                             0x8652
+#define MASK_SDO_BIT_LENG                     0x70
+#define MASK_SDO_FMT                          0x03
+#define MASK_SDO_FMT_RIGHT                    0x00
+#define MASK_SDO_FMT_LEFT                     0x01
+#define MASK_SDO_FMT_I2S                      0x02
+
+#define DIV_MODE                              0x8665 /* Not in REF_01 */
+#define MASK_DIV_DLY                          0xf0
+#define SET_DIV_DLY_MS(milliseconds)         ((((milliseconds) / 100) << 4) & \
+						MASK_DIV_DLY)
+#define MASK_DIV_MODE                         0x01
+
+#define NCO_F0_MOD                            0x8670
+#define MASK_NCO_F0_MOD                       0x03
+#define MASK_NCO_F0_MOD_42MHZ                 0x00
+#define MASK_NCO_F0_MOD_27MHZ                 0x01
+
+#define PK_INT_MODE                           0x8709
+#define MASK_ISRC2_INT_MODE                   0x80
+#define MASK_ISRC_INT_MODE                    0x40
+#define MASK_ACP_INT_MODE                     0x20
+#define MASK_VS_INT_MODE                      0x10
+#define MASK_SPD_INT_MODE                     0x08
+#define MASK_MS_INT_MODE                      0x04
+#define MASK_AUD_INT_MODE                     0x02
+#define MASK_AVI_INT_MODE                     0x01
+
+#define NO_PKT_LIMIT                          0x870B
+#define MASK_NO_ACP_LIMIT                     0xf0
+#define SET_NO_ACP_LIMIT_MS(milliseconds)    ((((milliseconds) / 80) << 4) & \
+						MASK_NO_ACP_LIMIT)
+#define MASK_NO_AVI_LIMIT                     0x0f
+#define SET_NO_AVI_LIMIT_MS(milliseconds)    (((milliseconds) / 80) & \
+						MASK_NO_AVI_LIMIT)
+
+#define NO_PKT_CLR                            0x870C
+#define MASK_NO_VS_CLR                        0x40
+#define MASK_NO_SPD_CLR                       0x20
+#define MASK_NO_ACP_CLR                       0x10
+#define MASK_NO_AVI_CLR1                      0x02
+#define MASK_NO_AVI_CLR0                      0x01
+
+#define ERR_PK_LIMIT                          0x870D
+#define NO_PKT_LIMIT2                         0x870E
+#define PK_AVI_0HEAD                          0x8710
+#define PK_AVI_1HEAD                          0x8711
+#define PK_AVI_2HEAD                          0x8712
+#define PK_AVI_0BYTE                          0x8713
+#define PK_AVI_1BYTE                          0x8714
+#define PK_AVI_2BYTE                          0x8715
+#define PK_AVI_3BYTE                          0x8716
+#define PK_AVI_4BYTE                          0x8717
+#define PK_AVI_5BYTE                          0x8718
+#define PK_AVI_6BYTE                          0x8719
+#define PK_AVI_7BYTE                          0x871A
+#define PK_AVI_8BYTE                          0x871B
+#define PK_AVI_9BYTE                          0x871C
+#define PK_AVI_10BYTE                         0x871D
+#define PK_AVI_11BYTE                         0x871E
+#define PK_AVI_12BYTE                         0x871F
+#define PK_AVI_13BYTE                         0x8720
+#define PK_AVI_14BYTE                         0x8721
+#define PK_AVI_15BYTE                         0x8722
+#define PK_AVI_16BYTE                         0x8723
+
+#define BKSV                                  0x8800
+
+#define BCAPS                                 0x8840
+#define MASK_HDMI_RSVD                        0x80
+#define MASK_REPEATER                         0x40
+#define MASK_READY                            0x20
+#define MASK_FASTI2C                          0x10
+#define MASK_1_1_FEA                          0x02
+#define MASK_FAST_REAU                        0x01
+
+#define BSTATUS1                              0x8842
+#define MASK_MAX_EXCED                        0x08
+
+#define EDID_RAM                              0x8C00
+#define NO_GDB_LIMIT                          0x9007
+
+#endif
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index cf93021..d87168a 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -331,13 +331,6 @@
 
 static const struct v4l2_subdev_core_ops tda7432_core_ops = {
 	.log_status = tda7432_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static const struct v4l2_subdev_ops tda7432_ops = {
@@ -416,7 +409,6 @@
 
 static struct i2c_driver tda7432_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tda7432",
 	},
 	.probe		= tda7432_probe,
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index fbdff8b..f31e659 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -199,7 +199,6 @@
 
 static struct i2c_driver tda9840_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tda9840",
 	},
 	.probe		= tda9840_probe,
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index bbe1a99..084bd75 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -162,7 +162,6 @@
 
 static struct i2c_driver tea6415c_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tea6415c",
 	},
 	.probe		= tea6415c_probe,
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 30a8d75..b7f4e58 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -144,7 +144,6 @@
 
 static struct i2c_driver tea6420_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tea6420",
 	},
 	.probe		= tea6420_probe,
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 9f7fdb6..bda3a65 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -377,7 +377,6 @@
 
 static struct i2c_driver ths7303_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "ths73x3",
 	},
 	.probe		= ths7303_probe,
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index ef87f7b..0370dd8 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -122,13 +122,6 @@
 
 static const struct v4l2_subdev_core_ops tlv320aic23b_core_ops = {
 	.log_status = tlv320aic23b_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static const struct v4l2_subdev_audio_ops tlv320aic23b_audio_ops = {
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 0c50e52..2a8114a 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -2051,7 +2051,6 @@
 
 static struct i2c_driver tvaudio_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tvaudio",
 	},
 	.probe		= tvaudio_probe,
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 24e4727..a93985a 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -957,16 +957,6 @@
 	return 0;
 }
 
-static const struct v4l2_subdev_core_ops tvp514x_core_ops = {
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
-};
-
 static const struct v4l2_subdev_video_ops tvp514x_video_ops = {
 	.s_std = tvp514x_s_std,
 	.s_routing = tvp514x_s_routing,
@@ -983,7 +973,6 @@
 };
 
 static const struct v4l2_subdev_ops tvp514x_ops = {
-	.core = &tvp514x_core_ops,
 	.video = &tvp514x_video_ops,
 	.pad = &tvp514x_pad_ops,
 };
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index e4fa074..522a865 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1215,7 +1215,6 @@
 
 static struct i2c_driver tvp5150_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tvp5150",
 	},
 	.probe		= tvp5150_probe,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 05077cf..f617d8b 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -861,13 +861,6 @@
 /* V4L2 core operation handlers */
 static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
 	.log_status = tvp7002_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = tvp7002_g_register,
 	.s_register = tvp7002_s_register,
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index 12c7d21..bef79cf 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -266,7 +266,6 @@
 
 static struct i2c_driver tw9903_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tw9903",
 	},
 	.probe = tw9903_probe,
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index 2672d89..316a311 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -234,7 +234,6 @@
 
 static struct i2c_driver tw9906_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tw9906",
 	},
 	.probe = tw9906_probe,
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index d248e6a..2c0f955 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -241,7 +241,6 @@
 
 static struct i2c_driver upd64031a_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "upd64031a",
 	},
 	.probe		= upd64031a_probe,
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 3a152ce..f2057a4 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -213,7 +213,6 @@
 
 static struct i2c_driver upd64083_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "upd64083",
 	},
 	.probe		= upd64083_probe,
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 819ab6d..d6c23bd 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -194,7 +194,6 @@
 
 static struct i2c_driver vp27smpx_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "vp27smpx",
 	},
 	.probe		= vp27smpx_probe,
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 016e766..4b564f1 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -450,13 +450,6 @@
 
 static const struct v4l2_subdev_core_ops vpx3220_core_ops = {
 	.init = vpx3220_init,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static const struct v4l2_subdev_video_ops vpx3220_video_ops = {
@@ -567,7 +560,6 @@
 
 static struct i2c_driver vpx3220_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "vpx3220",
 	},
 	.probe		= vpx3220_probe,
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index 3be73f6..f086e5e 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -176,13 +176,6 @@
 
 static const struct v4l2_subdev_core_ops wm8739_core_ops = {
 	.log_status = wm8739_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static const struct v4l2_subdev_audio_ops wm8739_audio_ops = {
@@ -272,7 +265,6 @@
 
 static struct i2c_driver wm8739_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "wm8739",
 	},
 	.probe		= wm8739_probe,
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index bee7946..d33d2cd6 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -318,7 +318,6 @@
 
 static struct i2c_driver wm8775_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "wm8775",
 	},
 	.probe		= wm8775_probe,
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 4d8e01c..153a464 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -282,9 +282,9 @@
 			if (ret < 0 && ret != -ENOIOCTLCMD) {
 				dev_dbg(entity->parent->dev,
 					"link validation failed for \"%s\":%u -> \"%s\":%u, error %d\n",
-					entity->name, link->source->index,
-					link->sink->entity->name,
-					link->sink->index, ret);
+					link->source->entity->name,
+					link->source->index,
+					entity->name, link->sink->index, ret);
 				goto error;
 			}
 		}
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index f318ae9..48a611b 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -11,16 +11,16 @@
 if MEDIA_CAMERA_SUPPORT
 	comment "Media capture support"
 source "drivers/media/pci/meye/Kconfig"
+source "drivers/media/pci/solo6x10/Kconfig"
 source "drivers/media/pci/sta2x11/Kconfig"
+source "drivers/media/pci/tw68/Kconfig"
+source "drivers/media/pci/zoran/Kconfig"
 endif
 
 if MEDIA_ANALOG_TV_SUPPORT
 	comment "Media capture/analog TV support"
 source "drivers/media/pci/ivtv/Kconfig"
-source "drivers/media/pci/zoran/Kconfig"
 source "drivers/media/pci/saa7146/Kconfig"
-source "drivers/media/pci/solo6x10/Kconfig"
-source "drivers/media/pci/tw68/Kconfig"
 source "drivers/media/pci/dt3155/Kconfig"
 endif
 
@@ -49,6 +49,7 @@
 source "drivers/media/pci/ngene/Kconfig"
 source "drivers/media/pci/ddbridge/Kconfig"
 source "drivers/media/pci/smipcie/Kconfig"
+source "drivers/media/pci/netup_unidvb/Kconfig"
 endif
 
 endif #MEDIA_PCI_SUPPORT
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 23ce53b..5f8aacb 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -12,7 +12,8 @@
 		ngene/		\
 		ddbridge/	\
 		saa7146/	\
-		smipcie/
+		smipcie/	\
+		netup_unidvb/
 
 obj-$(CONFIG_VIDEO_IVTV) += ivtv/
 obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/media/pci/bt8xx/btcx-risc.c b/drivers/media/pci/bt8xx/btcx-risc.c
index 00f0880..57c7f58 100644
--- a/drivers/media/pci/bt8xx/btcx-risc.c
+++ b/drivers/media/pci/bt8xx/btcx-risc.c
@@ -160,7 +160,6 @@
 void
 btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips)
 {
-	struct v4l2_clip swap;
 	int i,j,n;
 
 	if (nclips < 2)
@@ -168,9 +167,7 @@
 	for (i = nclips-2; i >= 0; i--) {
 		for (n = 0, j = 0; j <= i; j++) {
 			if (clips[j].c.left > clips[j+1].c.left) {
-				swap = clips[j];
-				clips[j] = clips[j+1];
-				clips[j+1] = swap;
+				swap(clips[j], clips[j + 1]);
 				n++;
 			}
 		}
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 67c8d6b..a75c53d 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -194,21 +194,18 @@
 static void bttv_rc5_timer_end(unsigned long data)
 {
 	struct bttv_ir *ir = (struct bttv_ir *)data;
-	struct timeval tv;
+	ktime_t tv;
 	u32 gap, rc5, scancode;
 	u8 toggle, command, system;
 
 	/* get time */
-	do_gettimeofday(&tv);
+	tv = ktime_get();
 
+	gap = ktime_to_us(ktime_sub(tv, ir->base_time));
 	/* avoid overflow with gap >1s */
-	if (tv.tv_sec - ir->base_time.tv_sec > 1) {
+	if (gap > USEC_PER_SEC) {
 		gap = 200000;
-	} else {
-		gap = 1000000 * (tv.tv_sec - ir->base_time.tv_sec) +
-		    tv.tv_usec - ir->base_time.tv_usec;
 	}
-
 	/* signal we're ready to start a new code */
 	ir->active = false;
 
@@ -249,7 +246,7 @@
 static int bttv_rc5_irq(struct bttv *btv)
 {
 	struct bttv_ir *ir = btv->remote;
-	struct timeval tv;
+	ktime_t tv;
 	u32 gpio;
 	u32 gap;
 	unsigned long current_jiffies;
@@ -259,14 +256,12 @@
 
 	/* get time of bit */
 	current_jiffies = jiffies;
-	do_gettimeofday(&tv);
+	tv = ktime_get();
 
+	gap = ktime_to_us(ktime_sub(tv, ir->base_time));
 	/* avoid overflow with gap >1s */
-	if (tv.tv_sec - ir->base_time.tv_sec > 1) {
+	if (gap > USEC_PER_SEC) {
 		gap = 200000;
-	} else {
-		gap = 1000000 * (tv.tv_sec - ir->base_time.tv_sec) +
-		    tv.tv_usec - ir->base_time.tv_usec;
 	}
 
 	dprintk("RC5 IRQ: gap %d us for %s\n",
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index a444cfb..31bf79d 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -140,7 +140,7 @@
 	bool			rc5_gpio;   /* Is RC5 legacy GPIO enabled? */
 	u32                     last_bit;   /* last raw bit seen */
 	u32                     code;       /* raw code under construction */
-	struct timeval          base_time;  /* time of last seen code */
+	ktime_t          				base_time;  /* time of last seen code */
 	bool                    active;     /* building raw code */
 };
 
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 6a1c008..1f88ccc 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -1,7 +1,8 @@
 config VIDEO_COBALT
 	tristate "Cisco Cobalt support"
 	depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
-	depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
+	depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
+	depends on GPIOLIB || COMPILE_TEST
 	depends on SND
 	select I2C_ALGOBIT
 	select VIDEO_ADV7604
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index b994b8e..8fed61e 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -339,15 +339,16 @@
 	}
 
 	if (pcie_link_get_lanes(cobalt) != 8) {
-		cobalt_err("PCI Express link width is not 8 lanes (%d)\n",
+		cobalt_warn("PCI Express link width is %d lanes.\n",
 				pcie_link_get_lanes(cobalt));
 		if (pcie_bus_link_get_lanes(cobalt) < 8)
-			cobalt_err("The current slot only supports %d lanes, at least 8 are needed\n",
+			cobalt_warn("The current slot only supports %d lanes, for best performance 8 are needed\n",
 					pcie_bus_link_get_lanes(cobalt));
-		else
+		if (pcie_link_get_lanes(cobalt) != pcie_bus_link_get_lanes(cobalt)) {
 			cobalt_err("The card is most likely not seated correctly in the PCIe slot\n");
-		ret = -EIO;
-		goto err_disable;
+			ret = -EIO;
+			goto err_disable;
+		}
 	}
 
 	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index b40c2d1..9756fd3 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -28,6 +28,7 @@
 
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
 #include <media/adv7604.h>
 #include <media/adv7842.h>
 
@@ -641,13 +642,17 @@
 	struct cobalt_stream *s = video_drvdata(file);
 	int err;
 
-	if (vb2_is_busy(&s->q))
-		return -EBUSY;
-
 	if (s->input == 1) {
 		*timings = cea1080p60;
 		return 0;
 	}
+
+	if (v4l2_match_dv_timings(timings, &s->timings, 0))
+		return 0;
+
+	if (vb2_is_busy(&s->q))
+		return -EBUSY;
+
 	err = v4l2_subdev_call(s->sd,
 			video, s_dv_timings, timings);
 	if (!err) {
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index af52def..f752f39 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -313,13 +313,6 @@
 
 static const struct v4l2_subdev_core_ops subdev_core_ops = {
 	.log_status = subdev_log_status,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 };
 
 static const struct v4l2_subdev_tuner_ops subdev_tuner_ops = {
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 87990ec..2ce310b 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -140,12 +140,10 @@
 		/* Stop RISC Engine */
 		mmwrite(0, MANTIS_DMA_CTL);
 
-		goto err;
+		return err;
 	}
 
 	return 0;
-err:
-	return err;
 }
 EXPORT_SYMBOL_GPL(mantis_dma_init);
 
diff --git a/drivers/media/pci/netup_unidvb/Kconfig b/drivers/media/pci/netup_unidvb/Kconfig
new file mode 100644
index 0000000..f277b0b
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/Kconfig
@@ -0,0 +1,12 @@
+config DVB_NETUP_UNIDVB
+	tristate "NetUP Universal DVB card support"
+	depends on DVB_CORE && VIDEO_DEV && PCI && I2C && SPI_MASTER
+    select VIDEOBUF2_DVB
+    select VIDEOBUF2_VMALLOC
+	select DVB_HORUS3A if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_ASCOT2E if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
+	---help---
+	  Support for NetUP PCI express Universal DVB card.
+
diff --git a/drivers/media/pci/netup_unidvb/Makefile b/drivers/media/pci/netup_unidvb/Makefile
new file mode 100644
index 0000000..ee6ae05
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/Makefile
@@ -0,0 +1,9 @@
+netup-unidvb-objs += netup_unidvb_core.o
+netup-unidvb-objs += netup_unidvb_i2c.o
+netup-unidvb-objs += netup_unidvb_ci.o
+netup-unidvb-objs += netup_unidvb_spi.o
+
+obj-$(CONFIG_DVB_NETUP_UNIDVB) += netup-unidvb.o
+
+ccflags-y += -Idrivers/media/dvb-core
+ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb.h b/drivers/media/pci/netup_unidvb/netup_unidvb.h
new file mode 100644
index 0000000..fa95110
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb.h
@@ -0,0 +1,133 @@
+/*
+ * netup_unidvb.h
+ *
+ * Data type definitions for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dvb.h>
+#include <dvb_ca_en50221.h>
+
+#define NETUP_UNIDVB_NAME	"netup_unidvb"
+#define NETUP_UNIDVB_VERSION	"0.0.1"
+#define NETUP_VENDOR_ID		0x1b55
+#define NETUP_PCI_DEV_REVISION  0x2
+
+/* IRQ-related regisers */
+#define REG_ISR			0x4890
+#define REG_ISR_MASKED		0x4892
+#define REG_IMASK_SET		0x4894
+#define REG_IMASK_CLEAR		0x4896
+/* REG_ISR register bits */
+#define NETUP_UNIDVB_IRQ_SPI	(1 << 0)
+#define NETUP_UNIDVB_IRQ_I2C0	(1 << 1)
+#define NETUP_UNIDVB_IRQ_I2C1	(1 << 2)
+#define NETUP_UNIDVB_IRQ_FRA0	(1 << 4)
+#define NETUP_UNIDVB_IRQ_FRA1	(1 << 5)
+#define NETUP_UNIDVB_IRQ_FRB0	(1 << 6)
+#define NETUP_UNIDVB_IRQ_FRB1	(1 << 7)
+#define NETUP_UNIDVB_IRQ_DMA1	(1 << 8)
+#define NETUP_UNIDVB_IRQ_DMA2	(1 << 9)
+#define NETUP_UNIDVB_IRQ_CI	(1 << 10)
+#define NETUP_UNIDVB_IRQ_CAM0	(1 << 11)
+#define NETUP_UNIDVB_IRQ_CAM1	(1 << 12)
+
+struct netup_dma {
+	u8			num;
+	spinlock_t		lock;
+	struct netup_unidvb_dev	*ndev;
+	struct netup_dma_regs	*regs;
+	u32			ring_buffer_size;
+	u8			*addr_virt;
+	dma_addr_t		addr_phys;
+	u64			addr_last;
+	u32			high_addr;
+	u32			data_offset;
+	u32			data_size;
+	struct list_head	free_buffers;
+	struct work_struct	work;
+	struct timer_list	timeout;
+};
+
+enum netup_i2c_state {
+	STATE_DONE,
+	STATE_WAIT,
+	STATE_WANT_READ,
+	STATE_WANT_WRITE,
+	STATE_ERROR
+};
+
+struct netup_i2c_regs;
+
+struct netup_i2c {
+	spinlock_t			lock;
+	wait_queue_head_t		wq;
+	struct i2c_adapter		adap;
+	struct netup_unidvb_dev		*dev;
+	struct netup_i2c_regs		*regs;
+	struct i2c_msg			*msg;
+	enum netup_i2c_state		state;
+	u32				xmit_size;
+};
+
+struct netup_ci_state {
+	struct dvb_ca_en50221		ca;
+	u8 __iomem			*membase8_config;
+	u8 __iomem			*membase8_io;
+	struct netup_unidvb_dev		*dev;
+	int status;
+	int nr;
+};
+
+struct netup_spi;
+
+struct netup_unidvb_dev {
+	struct pci_dev			*pci_dev;
+	int				pci_bus;
+	int				pci_slot;
+	int				pci_func;
+	int				board_num;
+	int				old_fw;
+	u32 __iomem			*lmmio0;
+	u8 __iomem			*bmmio0;
+	u32 __iomem			*lmmio1;
+	u8 __iomem			*bmmio1;
+	u8				*dma_virt;
+	dma_addr_t			dma_phys;
+	u32				dma_size;
+	struct vb2_dvb_frontends	frontends[2];
+	struct netup_i2c		i2c[2];
+	struct workqueue_struct		*wq;
+	struct netup_dma		dma[2];
+	struct netup_ci_state		ci[2];
+	struct netup_spi		*spi;
+};
+
+int netup_i2c_register(struct netup_unidvb_dev *ndev);
+void netup_i2c_unregister(struct netup_unidvb_dev *ndev);
+irqreturn_t netup_ci_interrupt(struct netup_unidvb_dev *ndev);
+irqreturn_t netup_i2c_interrupt(struct netup_i2c *i2c);
+irqreturn_t netup_spi_interrupt(struct netup_spi *spi);
+int netup_unidvb_ci_register(struct netup_unidvb_dev *dev,
+			     int num, struct pci_dev *pci_dev);
+void netup_unidvb_ci_unregister(struct netup_unidvb_dev *dev, int num);
+int netup_spi_init(struct netup_unidvb_dev *ndev);
+void netup_spi_release(struct netup_unidvb_dev *ndev);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
new file mode 100644
index 0000000..751b51b
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
@@ -0,0 +1,248 @@
+/*
+ * netup_unidvb_ci.c
+ *
+ * DVB CAM support for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kmod.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "netup_unidvb.h"
+
+/* CI slot 0 base address */
+#define CAM0_CONFIG		0x0
+#define CAM0_IO			0x8000
+#define CAM0_MEM		0x10000
+#define CAM0_SZ			32
+/* CI slot 1 base address */
+#define CAM1_CONFIG		0x20000
+#define CAM1_IO			0x28000
+#define CAM1_MEM		0x30000
+#define CAM1_SZ			32
+/* ctrlstat registers */
+#define CAM_CTRLSTAT_READ_SET	0x4980
+#define CAM_CTRLSTAT_CLR	0x4982
+/* register bits */
+#define BIT_CAM_STCHG		(1<<0)
+#define BIT_CAM_PRESENT		(1<<1)
+#define BIT_CAM_RESET		(1<<2)
+#define BIT_CAM_BYPASS		(1<<3)
+#define BIT_CAM_READY		(1<<4)
+#define BIT_CAM_ERROR		(1<<5)
+#define BIT_CAM_OVERCURR	(1<<6)
+/* BIT_CAM_BYPASS bit shift for SLOT 1 */
+#define CAM1_SHIFT 8
+
+irqreturn_t netup_ci_interrupt(struct netup_unidvb_dev *ndev)
+{
+	writew(0x101, ndev->bmmio0 + CAM_CTRLSTAT_CLR);
+	return IRQ_HANDLED;
+}
+
+static int netup_unidvb_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221,
+				       int slot)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+	u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0;
+
+	dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT=0x%x\n",
+		__func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+	if (slot != 0)
+		return -EINVAL;
+	/* pass data to CAM module */
+	writew(BIT_CAM_BYPASS << shift, dev->bmmio0 + CAM_CTRLSTAT_CLR);
+	dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT=0x%x done\n",
+		__func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+	return 0;
+}
+
+static int netup_unidvb_ci_slot_shutdown(struct dvb_ca_en50221 *en50221,
+					 int slot)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+
+	dev_dbg(&dev->pci_dev->dev, "%s()\n", __func__);
+	return 0;
+}
+
+static int netup_unidvb_ci_slot_reset(struct dvb_ca_en50221 *en50221,
+				      int slot)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+	unsigned long timeout = 0;
+	u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0;
+	u16 ci_stat = 0;
+	int reset_counter = 3;
+
+	dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT_READ_SET=0x%x\n",
+		__func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+reset:
+	timeout = jiffies + msecs_to_jiffies(5000);
+	/* start reset */
+	writew(BIT_CAM_RESET << shift, dev->bmmio0 + CAM_CTRLSTAT_READ_SET);
+	dev_dbg(&dev->pci_dev->dev, "%s(): waiting for reset\n", __func__);
+	/* wait until reset done */
+	while (time_before(jiffies, timeout)) {
+		ci_stat = readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET);
+		if (ci_stat & (BIT_CAM_READY << shift))
+			break;
+		udelay(1000);
+	}
+	if (!(ci_stat & (BIT_CAM_READY << shift)) && reset_counter > 0) {
+		dev_dbg(&dev->pci_dev->dev,
+			"%s(): CAMP reset timeout! Will try again..\n",
+			 __func__);
+		reset_counter--;
+		goto reset;
+	}
+	return 0;
+}
+
+static int netup_unidvb_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+					    int slot, int open)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+	u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0;
+	u16 ci_stat = 0;
+
+	dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT_READ_SET=0x%x\n",
+		__func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+	ci_stat = readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET);
+	if (ci_stat & (BIT_CAM_READY << shift)) {
+		state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
+			DVB_CA_EN50221_POLL_CAM_READY;
+	} else if (ci_stat & (BIT_CAM_PRESENT << shift)) {
+		state->status = DVB_CA_EN50221_POLL_CAM_PRESENT;
+	} else {
+		state->status = 0;
+	}
+	return state->status;
+}
+
+static int netup_unidvb_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
+					      int slot, int addr)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+	u8 val = state->membase8_config[addr];
+
+	dev_dbg(&dev->pci_dev->dev,
+		"%s(): addr=0x%x val=0x%x\n", __func__, addr, val);
+	return val;
+}
+
+static int netup_unidvb_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
+					       int slot, int addr, u8 data)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+
+	dev_dbg(&dev->pci_dev->dev,
+		"%s(): addr=0x%x data=0x%x\n", __func__, addr, data);
+	state->membase8_config[addr] = data;
+	return 0;
+}
+
+static int netup_unidvb_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221,
+					int slot, u8 addr)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+	u8 val = state->membase8_io[addr];
+
+	dev_dbg(&dev->pci_dev->dev,
+		"%s(): addr=0x%x val=0x%x\n", __func__, addr, val);
+	return val;
+}
+
+static int netup_unidvb_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221,
+					 int slot, u8 addr, u8 data)
+{
+	struct netup_ci_state *state = en50221->data;
+	struct netup_unidvb_dev *dev = state->dev;
+
+	dev_dbg(&dev->pci_dev->dev,
+		"%s(): addr=0x%x data=0x%x\n", __func__, addr, data);
+	state->membase8_io[addr] = data;
+	return 0;
+}
+
+int netup_unidvb_ci_register(struct netup_unidvb_dev *dev,
+			     int num, struct pci_dev *pci_dev)
+{
+	int result;
+	struct netup_ci_state *state;
+
+	if (num < 0 || num > 1) {
+		dev_err(&pci_dev->dev, "%s(): invalid CI adapter %d\n",
+			__func__, num);
+		return -EINVAL;
+	}
+	state = &dev->ci[num];
+	state->nr = num;
+	state->membase8_config = dev->bmmio1 +
+		((num == 0) ? CAM0_CONFIG : CAM1_CONFIG);
+	state->membase8_io = dev->bmmio1 +
+		((num == 0) ? CAM0_IO : CAM1_IO);
+	state->dev = dev;
+	state->ca.owner = THIS_MODULE;
+	state->ca.read_attribute_mem = netup_unidvb_ci_read_attribute_mem;
+	state->ca.write_attribute_mem = netup_unidvb_ci_write_attribute_mem;
+	state->ca.read_cam_control = netup_unidvb_ci_read_cam_ctl;
+	state->ca.write_cam_control = netup_unidvb_ci_write_cam_ctl;
+	state->ca.slot_reset = netup_unidvb_ci_slot_reset;
+	state->ca.slot_shutdown = netup_unidvb_ci_slot_shutdown;
+	state->ca.slot_ts_enable = netup_unidvb_ci_slot_ts_ctl;
+	state->ca.poll_slot_status = netup_unidvb_poll_ci_slot_status;
+	state->ca.data = state;
+	result = dvb_ca_en50221_init(&dev->frontends[num].adapter,
+		&state->ca, 0, 1);
+	if (result < 0) {
+		dev_err(&pci_dev->dev,
+			"%s(): dvb_ca_en50221_init result %d\n",
+			__func__, result);
+		return result;
+	}
+	writew(NETUP_UNIDVB_IRQ_CI, (u16 *)(dev->bmmio0 + REG_IMASK_SET));
+	dev_info(&pci_dev->dev,
+		"%s(): CI adapter %d init done\n", __func__, num);
+	return 0;
+}
+
+void netup_unidvb_ci_unregister(struct netup_unidvb_dev *dev, int num)
+{
+	struct netup_ci_state *state;
+
+	dev_dbg(&dev->pci_dev->dev, "%s()\n", __func__);
+	if (num < 0 || num > 1) {
+		dev_err(&dev->pci_dev->dev, "%s(): invalid CI adapter %d\n",
+				__func__, num);
+		return;
+	}
+	state = &dev->ci[num];
+	dvb_ca_en50221_release(&state->ca);
+}
+
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
new file mode 100644
index 0000000..6d8bf627
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -0,0 +1,1001 @@
+/*
+ * netup_unidvb_core.c
+ *
+ * Main module for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kmod.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "netup_unidvb.h"
+#include "cxd2841er.h"
+#include "horus3a.h"
+#include "ascot2e.h"
+#include "lnbh25.h"
+
+static int spi_enable;
+module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
+MODULE_AUTHOR("info@netup.ru");
+MODULE_VERSION(NETUP_UNIDVB_VERSION);
+MODULE_LICENSE("GPL");
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* Avalon-MM PCI-E registers */
+#define	AVL_PCIE_IENR		0x50
+#define AVL_PCIE_ISR		0x40
+#define AVL_IRQ_ENABLE		0x80
+#define AVL_IRQ_ASSERTED	0x80
+/* GPIO registers */
+#define GPIO_REG_IO		0x4880
+#define GPIO_REG_IO_TOGGLE	0x4882
+#define GPIO_REG_IO_SET		0x4884
+#define GPIO_REG_IO_CLEAR	0x4886
+/* GPIO bits */
+#define GPIO_FEA_RESET		(1 << 0)
+#define GPIO_FEB_RESET		(1 << 1)
+#define GPIO_RFA_CTL		(1 << 2)
+#define GPIO_RFB_CTL		(1 << 3)
+#define GPIO_FEA_TU_RESET	(1 << 4)
+#define GPIO_FEB_TU_RESET	(1 << 5)
+/* DMA base address */
+#define NETUP_DMA0_ADDR		0x4900
+#define NETUP_DMA1_ADDR		0x4940
+/* 8 DMA blocks * 128 packets * 188 bytes*/
+#define NETUP_DMA_BLOCKS_COUNT	8
+#define NETUP_DMA_PACKETS_COUNT	128
+/* DMA status bits */
+#define BIT_DMA_RUN		1
+#define BIT_DMA_ERROR		2
+#define BIT_DMA_IRQ		0x200
+
+/**
+ * struct netup_dma_regs - the map of DMA module registers
+ * @ctrlstat_set:	Control register, write to set control bits
+ * @ctrlstat_clear:	Control register, write to clear control bits
+ * @start_addr_lo:	DMA ring buffer start address, lower part
+ * @start_addr_hi:	DMA ring buffer start address, higher part
+ * @size:		DMA ring buffer size register
+			Bits [0-7]:	DMA packet size, 188 bytes
+			Bits [16-23]:	packets count in block, 128 packets
+			Bits [24-31]:	blocks count, 8 blocks
+ * @timeout:		DMA timeout in units of 8ns
+			For example, value of 375000000 equals to 3 sec
+ * @curr_addr_lo:	Current ring buffer head address, lower part
+ * @curr_addr_hi:	Current ring buffer head address, higher part
+ * @stat_pkt_received:	Statistic register, not tested
+ * @stat_pkt_accepted:	Statistic register, not tested
+ * @stat_pkt_overruns:	Statistic register, not tested
+ * @stat_pkt_underruns:	Statistic register, not tested
+ * @stat_fifo_overruns:	Statistic register, not tested
+ */
+struct netup_dma_regs {
+	__le32	ctrlstat_set;
+	__le32	ctrlstat_clear;
+	__le32	start_addr_lo;
+	__le32	start_addr_hi;
+	__le32	size;
+	__le32	timeout;
+	__le32	curr_addr_lo;
+	__le32	curr_addr_hi;
+	__le32	stat_pkt_received;
+	__le32	stat_pkt_accepted;
+	__le32	stat_pkt_overruns;
+	__le32	stat_pkt_underruns;
+	__le32	stat_fifo_overruns;
+} __packed __aligned(1);
+
+struct netup_unidvb_buffer {
+	struct vb2_buffer	vb;
+	struct list_head	list;
+	u32			size;
+};
+
+static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
+static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
+
+static struct cxd2841er_config demod_config = {
+	.i2c_addr = 0xc8
+};
+
+static struct horus3a_config horus3a_conf = {
+	.i2c_address = 0xc0,
+	.xtal_freq_mhz = 16,
+	.set_tuner_callback = netup_unidvb_tuner_ctrl
+};
+
+static struct ascot2e_config ascot2e_conf = {
+	.i2c_address = 0xc2,
+	.set_tuner_callback = netup_unidvb_tuner_ctrl
+};
+
+static struct lnbh25_config lnbh25_conf = {
+	.i2c_address = 0x10,
+	.data2_config = LNBH25_TEN | LNBH25_EXTM
+};
+
+static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
+{
+	u8 reg, mask;
+	struct netup_dma *dma = priv;
+	struct netup_unidvb_dev *ndev;
+
+	if (!priv)
+		return -EINVAL;
+	ndev = dma->ndev;
+	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
+		__func__, dma->num, is_dvb_tc);
+	reg = readb(ndev->bmmio0 + GPIO_REG_IO);
+	mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
+	if (!is_dvb_tc)
+		reg |= mask;
+	else
+		reg &= ~mask;
+	writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
+	return 0;
+}
+
+static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
+{
+	u16 gpio_reg;
+
+	/* enable PCI-E interrupts */
+	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
+	/* unreset frontends bits[0:1] */
+	writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
+	msleep(100);
+	gpio_reg =
+		GPIO_FEA_RESET | GPIO_FEB_RESET |
+		GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
+		GPIO_RFA_CTL | GPIO_RFB_CTL;
+	writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
+	dev_dbg(&ndev->pci_dev->dev,
+		"%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
+		__func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
+		(int)readb(ndev->bmmio0 + GPIO_REG_IO));
+
+}
+
+static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
+{
+	u32 irq_mask = (dma->num == 0 ?
+		NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
+
+	dev_dbg(&dma->ndev->pci_dev->dev,
+		"%s(): DMA%d enable %d\n", __func__, dma->num, enable);
+	if (enable) {
+		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
+		writew(irq_mask,
+			(u16 *)(dma->ndev->bmmio0 + REG_IMASK_SET));
+	} else {
+		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
+		writew(irq_mask,
+			(u16 *)(dma->ndev->bmmio0 + REG_IMASK_CLEAR));
+	}
+}
+
+static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
+{
+	u64 addr_curr;
+	u32 size;
+	unsigned long flags;
+	struct device *dev = &dma->ndev->pci_dev->dev;
+
+	spin_lock_irqsave(&dma->lock, flags);
+	addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
+		(u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
+	/* clear IRQ */
+	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
+	/* sanity check */
+	if (addr_curr < dma->addr_phys ||
+			addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
+		if (addr_curr != 0) {
+			dev_err(dev,
+				"%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
+				__func__, addr_curr, (u64)dma->addr_phys,
+				(u64)(dma->addr_phys + dma->ring_buffer_size));
+		}
+		goto irq_handled;
+	}
+	size = (addr_curr >= dma->addr_last) ?
+		(u32)(addr_curr - dma->addr_last) :
+		(u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
+	if (dma->data_size != 0) {
+		printk_ratelimited("%s(): lost interrupt, data size %d\n",
+			__func__, dma->data_size);
+		dma->data_size += size;
+	}
+	if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
+		dma->data_size = size;
+		dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
+	}
+	dma->addr_last = addr_curr;
+	queue_work(dma->ndev->wq, &dma->work);
+irq_handled:
+	spin_unlock_irqrestore(&dma->lock, flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
+{
+	struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
+	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
+	u32 reg40, reg_isr;
+	irqreturn_t iret = IRQ_NONE;
+
+	/* disable interrupts */
+	writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
+	/* check IRQ source */
+	reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
+	if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
+		/* IRQ is being signaled */
+		reg_isr = readw(ndev->bmmio0 + REG_ISR);
+		if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
+			iret = netup_i2c_interrupt(&ndev->i2c[0]);
+		} else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
+			iret = netup_i2c_interrupt(&ndev->i2c[1]);
+		} else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
+			iret = netup_spi_interrupt(ndev->spi);
+		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
+			iret = netup_dma_interrupt(&ndev->dma[0]);
+		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
+			iret = netup_dma_interrupt(&ndev->dma[1]);
+		} else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
+			iret = netup_ci_interrupt(ndev);
+		} else {
+			dev_err(&pci_dev->dev,
+				"%s(): unknown interrupt 0x%x\n",
+				__func__, reg_isr);
+		}
+	}
+	/* re-enable interrupts */
+	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
+	return iret;
+}
+
+static int netup_unidvb_queue_setup(struct vb2_queue *vq,
+				    const struct v4l2_format *fmt,
+				    unsigned int *nbuffers,
+				    unsigned int *nplanes,
+				    unsigned int sizes[],
+				    void *alloc_ctxs[])
+{
+	struct netup_dma *dma = vb2_get_drv_priv(vq);
+
+	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
+
+	*nplanes = 1;
+	if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
+		*nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
+	sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
+	dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
+		__func__, *nbuffers, sizes[0]);
+	return 0;
+}
+
+static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
+{
+	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+	struct netup_unidvb_buffer *buf = container_of(vb,
+				struct netup_unidvb_buffer, vb);
+
+	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
+	buf->size = 0;
+	return 0;
+}
+
+static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
+{
+	unsigned long flags;
+	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+	struct netup_unidvb_buffer *buf = container_of(vb,
+				struct netup_unidvb_buffer, vb);
+
+	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
+	spin_lock_irqsave(&dma->lock, flags);
+	list_add_tail(&buf->list, &dma->free_buffers);
+	spin_unlock_irqrestore(&dma->lock, flags);
+	mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
+}
+
+static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct netup_dma *dma = vb2_get_drv_priv(q);
+
+	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
+	netup_unidvb_dma_enable(dma, 1);
+	return 0;
+}
+
+static void netup_unidvb_stop_streaming(struct vb2_queue *q)
+{
+	struct netup_dma *dma = vb2_get_drv_priv(q);
+
+	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
+	netup_unidvb_dma_enable(dma, 0);
+	netup_unidvb_queue_cleanup(dma);
+}
+
+static struct vb2_ops dvb_qops = {
+	.queue_setup		= netup_unidvb_queue_setup,
+	.buf_prepare		= netup_unidvb_buf_prepare,
+	.buf_queue		= netup_unidvb_buf_queue,
+	.start_streaming	= netup_unidvb_start_streaming,
+	.stop_streaming		= netup_unidvb_stop_streaming,
+};
+
+static int netup_unidvb_queue_init(struct netup_dma *dma,
+				   struct vb2_queue *vb_queue)
+{
+	int res;
+
+	/* Init videobuf2 queue structure */
+	vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+	vb_queue->drv_priv = dma;
+	vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
+	vb_queue->ops = &dvb_qops;
+	vb_queue->mem_ops = &vb2_vmalloc_memops;
+	vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	res = vb2_queue_init(vb_queue);
+	if (res != 0) {
+		dev_err(&dma->ndev->pci_dev->dev,
+			"%s(): vb2_queue_init failed (%d)\n", __func__, res);
+	}
+	return res;
+}
+
+static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
+				 int num)
+{
+	struct vb2_dvb_frontend *fe0, *fe1, *fe2;
+
+	if (num < 0 || num > 1) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to init DVB bus %d\n", __func__, num);
+		return -ENODEV;
+	}
+	mutex_init(&ndev->frontends[num].lock);
+	INIT_LIST_HEAD(&ndev->frontends[num].felist);
+	if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL ||
+		vb2_dvb_alloc_frontend(
+			&ndev->frontends[num], 2) == NULL ||
+		vb2_dvb_alloc_frontend(
+			&ndev->frontends[num], 3) == NULL) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to to alllocate vb2_dvb_frontend\n",
+			__func__);
+		return -ENOMEM;
+	}
+	fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1);
+	fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2);
+	fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3);
+	if (fe0 == NULL || fe1 == NULL || fe2 == NULL) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): frontends has not been allocated\n", __func__);
+		return -EINVAL;
+	}
+	netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq);
+	netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq);
+	netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq);
+	fe0->dvb.name = "netup_fe0";
+	fe1->dvb.name = "netup_fe1";
+	fe2->dvb.name = "netup_fe2";
+	fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s,
+		&demod_config, &ndev->i2c[num].adap);
+	if (fe0->dvb.frontend == NULL) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to attach DVB-S/S2 frontend\n",
+			__func__);
+		goto frontend_detach;
+	}
+	horus3a_conf.set_tuner_priv = &ndev->dma[num];
+	if (!dvb_attach(horus3a_attach, fe0->dvb.frontend,
+			&horus3a_conf, &ndev->i2c[num].adap)) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to attach DVB-S/S2 tuner frontend\n",
+			__func__);
+		goto frontend_detach;
+	}
+	if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend,
+			&lnbh25_conf, &ndev->i2c[num].adap)) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to attach SEC frontend\n", __func__);
+		goto frontend_detach;
+	}
+	/* DVB-T/T2 frontend */
+	fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t,
+		&demod_config, &ndev->i2c[num].adap);
+	if (fe1->dvb.frontend == NULL) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to attach DVB-T frontend\n", __func__);
+		goto frontend_detach;
+	}
+	fe1->dvb.frontend->id = 1;
+	ascot2e_conf.set_tuner_priv = &ndev->dma[num];
+	if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend,
+			&ascot2e_conf, &ndev->i2c[num].adap)) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to attach DVB-T tuner frontend\n",
+			__func__);
+		goto frontend_detach;
+	}
+	/* DVB-C/C2 frontend */
+	fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c,
+				&demod_config, &ndev->i2c[num].adap);
+	if (fe2->dvb.frontend == NULL) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to attach DVB-C frontend\n", __func__);
+		goto frontend_detach;
+	}
+	fe2->dvb.frontend->id = 2;
+	if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend,
+			&ascot2e_conf, &ndev->i2c[num].adap)) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to attach DVB-T/C tuner frontend\n",
+			__func__);
+		goto frontend_detach;
+	}
+
+	if (vb2_dvb_register_bus(&ndev->frontends[num],
+			THIS_MODULE, NULL,
+			&ndev->pci_dev->dev, adapter_nr, 1)) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): unable to register DVB bus %d\n",
+			__func__, num);
+		goto frontend_detach;
+	}
+	dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
+	return 0;
+frontend_detach:
+	vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
+	return -EINVAL;
+}
+
+static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
+{
+	if (num < 0 || num > 1) {
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): unable to unregister DVB bus %d\n",
+			__func__, num);
+		return;
+	}
+	vb2_dvb_unregister_bus(&ndev->frontends[num]);
+	dev_info(&ndev->pci_dev->dev,
+		"%s(): DVB bus %d unregistered\n", __func__, num);
+}
+
+static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
+{
+	int res;
+
+	res = netup_unidvb_dvb_init(ndev, 0);
+	if (res)
+		return res;
+	res = netup_unidvb_dvb_init(ndev, 1);
+	if (res) {
+		netup_unidvb_dvb_fini(ndev, 0);
+		return res;
+	}
+	return 0;
+}
+
+static int netup_unidvb_ring_copy(struct netup_dma *dma,
+				  struct netup_unidvb_buffer *buf)
+{
+	u32 copy_bytes, ring_bytes;
+	u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
+	u8 *p = vb2_plane_vaddr(&buf->vb, 0);
+	struct netup_unidvb_dev *ndev = dma->ndev;
+
+	if (p == NULL) {
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): buffer is NULL\n", __func__);
+		return -EINVAL;
+	}
+	p += buf->size;
+	if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
+		ring_bytes = dma->ring_buffer_size - dma->data_offset;
+		copy_bytes = (ring_bytes > buff_bytes) ?
+			buff_bytes : ring_bytes;
+		memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
+		p += copy_bytes;
+		buf->size += copy_bytes;
+		buff_bytes -= copy_bytes;
+		dma->data_size -= copy_bytes;
+		dma->data_offset += copy_bytes;
+		if (dma->data_offset == dma->ring_buffer_size)
+			dma->data_offset = 0;
+	}
+	if (buff_bytes > 0) {
+		ring_bytes = dma->data_size;
+		copy_bytes = (ring_bytes > buff_bytes) ?
+				buff_bytes : ring_bytes;
+		memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
+		buf->size += copy_bytes;
+		dma->data_size -= copy_bytes;
+		dma->data_offset += copy_bytes;
+		if (dma->data_offset == dma->ring_buffer_size)
+			dma->data_offset = 0;
+	}
+	return 0;
+}
+
+static void netup_unidvb_dma_worker(struct work_struct *work)
+{
+	struct netup_dma *dma = container_of(work, struct netup_dma, work);
+	struct netup_unidvb_dev *ndev = dma->ndev;
+	struct netup_unidvb_buffer *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dma->lock, flags);
+	if (dma->data_size == 0) {
+		dev_dbg(&ndev->pci_dev->dev,
+			"%s(): data_size == 0\n", __func__);
+		goto work_done;
+	}
+	while (dma->data_size > 0) {
+		if (list_empty(&dma->free_buffers)) {
+			dev_dbg(&ndev->pci_dev->dev,
+				"%s(): no free buffers\n", __func__);
+			goto work_done;
+		}
+		buf = list_first_entry(&dma->free_buffers,
+			struct netup_unidvb_buffer, list);
+		if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
+			dev_dbg(&ndev->pci_dev->dev,
+				"%s(): buffer overflow, size %d\n",
+				__func__, buf->size);
+			goto work_done;
+		}
+		if (netup_unidvb_ring_copy(dma, buf))
+			goto work_done;
+		if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
+			list_del(&buf->list);
+			dev_dbg(&ndev->pci_dev->dev,
+				"%s(): buffer %p done, size %d\n",
+				__func__, buf, buf->size);
+			v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+			vb2_set_plane_payload(&buf->vb, 0, buf->size);
+			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+		}
+	}
+work_done:
+	dma->data_size = 0;
+	spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
+{
+	struct netup_unidvb_buffer *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dma->lock, flags);
+	while (!list_empty(&dma->free_buffers)) {
+		buf = list_first_entry(&dma->free_buffers,
+			struct netup_unidvb_buffer, list);
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+	spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static void netup_unidvb_dma_timeout(unsigned long data)
+{
+	struct netup_dma *dma = (struct netup_dma *)data;
+	struct netup_unidvb_dev *ndev = dma->ndev;
+
+	dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
+	netup_unidvb_queue_cleanup(dma);
+}
+
+static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
+{
+	struct netup_dma *dma;
+	struct device *dev = &ndev->pci_dev->dev;
+
+	if (num < 0 || num > 1) {
+		dev_err(dev, "%s(): unable to register DMA%d\n",
+			__func__, num);
+		return -ENODEV;
+	}
+	dma = &ndev->dma[num];
+	dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
+	dma->num = num;
+	dma->ndev = ndev;
+	spin_lock_init(&dma->lock);
+	INIT_WORK(&dma->work, netup_unidvb_dma_worker);
+	INIT_LIST_HEAD(&dma->free_buffers);
+	dma->timeout.function = netup_unidvb_dma_timeout;
+	dma->timeout.data = (unsigned long)dma;
+	init_timer(&dma->timeout);
+	dma->ring_buffer_size = ndev->dma_size / 2;
+	dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
+	dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
+		dma->ring_buffer_size * num);
+	dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
+		__func__, num, dma->addr_virt,
+		(unsigned long long)dma->addr_phys,
+		dma->ring_buffer_size);
+	memset_io(dma->addr_virt, 0, dma->ring_buffer_size);
+	dma->addr_last = dma->addr_phys;
+	dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
+	dma->regs = (struct netup_dma_regs *)(num == 0 ?
+		ndev->bmmio0 + NETUP_DMA0_ADDR :
+		ndev->bmmio0 + NETUP_DMA1_ADDR);
+	writel((NETUP_DMA_BLOCKS_COUNT << 24) |
+		(NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
+	writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
+	writel(0, &dma->regs->start_addr_hi);
+	writel(dma->high_addr, ndev->bmmio0 + 0x1000);
+	writel(375000000, &dma->regs->timeout);
+	msleep(1000);
+	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
+	return 0;
+}
+
+static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
+{
+	struct netup_dma *dma;
+
+	if (num < 0 || num > 1)
+		return;
+	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
+	dma = &ndev->dma[num];
+	netup_unidvb_dma_enable(dma, 0);
+	msleep(50);
+	cancel_work_sync(&dma->work);
+	del_timer(&dma->timeout);
+}
+
+static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
+{
+	int res;
+
+	res = netup_unidvb_dma_init(ndev, 0);
+	if (res)
+		return res;
+	res = netup_unidvb_dma_init(ndev, 1);
+	if (res) {
+		netup_unidvb_dma_fini(ndev, 0);
+		return res;
+	}
+	netup_unidvb_dma_enable(&ndev->dma[0], 0);
+	netup_unidvb_dma_enable(&ndev->dma[1], 0);
+	return 0;
+}
+
+static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
+				 struct pci_dev *pci_dev)
+{
+	int res;
+
+	writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
+	res = netup_unidvb_ci_register(ndev, 0, pci_dev);
+	if (res)
+		return res;
+	res = netup_unidvb_ci_register(ndev, 1, pci_dev);
+	if (res)
+		netup_unidvb_ci_unregister(ndev, 0);
+	return res;
+}
+
+static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
+{
+	if (!request_mem_region(pci_resource_start(pci_dev, 0),
+			pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
+		dev_err(&pci_dev->dev,
+			"%s(): unable to request MMIO bar 0 at 0x%llx\n",
+			__func__,
+			(unsigned long long)pci_resource_start(pci_dev, 0));
+		return -EBUSY;
+	}
+	if (!request_mem_region(pci_resource_start(pci_dev, 1),
+			pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
+		dev_err(&pci_dev->dev,
+			"%s(): unable to request MMIO bar 1 at 0x%llx\n",
+			__func__,
+			(unsigned long long)pci_resource_start(pci_dev, 1));
+		release_mem_region(pci_resource_start(pci_dev, 0),
+			pci_resource_len(pci_dev, 0));
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int netup_unidvb_request_modules(struct device *dev)
+{
+	static const char * const modules[] = {
+		"lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
+	};
+	const char * const *curr_mod = modules;
+	int err;
+
+	while (*curr_mod != NULL) {
+		err = request_module(*curr_mod);
+		if (err) {
+			dev_warn(dev, "request_module(%s) failed: %d\n",
+				*curr_mod, err);
+		}
+		++curr_mod;
+	}
+	return 0;
+}
+
+static int netup_unidvb_initdev(struct pci_dev *pci_dev,
+				const struct pci_device_id *pci_id)
+{
+	u8 board_revision;
+	u16 board_vendor;
+	struct netup_unidvb_dev *ndev;
+	int old_firmware = 0;
+
+	netup_unidvb_request_modules(&pci_dev->dev);
+
+	/* Check card revision */
+	if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
+		dev_err(&pci_dev->dev,
+			"netup_unidvb: expected card revision %d, got %d\n",
+			NETUP_PCI_DEV_REVISION, pci_dev->revision);
+		dev_err(&pci_dev->dev,
+			"Please upgrade firmware!\n");
+		dev_err(&pci_dev->dev,
+			"Instructions on http://www.netup.tv\n");
+		old_firmware = 1;
+		spi_enable = 1;
+	}
+
+	/* allocate device context */
+	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+
+	if (!ndev)
+		goto dev_alloc_err;
+	memset(ndev, 0, sizeof(*ndev));
+	ndev->old_fw = old_firmware;
+	ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
+	if (!ndev->wq) {
+		dev_err(&pci_dev->dev,
+			"%s(): unable to create workqueue\n", __func__);
+		goto wq_create_err;
+	}
+	ndev->pci_dev = pci_dev;
+	ndev->pci_bus = pci_dev->bus->number;
+	ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
+	ndev->pci_func = PCI_FUNC(pci_dev->devfn);
+	ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
+	pci_set_drvdata(pci_dev, ndev);
+	/* PCI init */
+	dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
+		__func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
+
+	if (pci_enable_device(pci_dev)) {
+		dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
+			__func__);
+		goto pci_enable_err;
+	}
+	/* read PCI info */
+	pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
+	pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
+	if (board_vendor != NETUP_VENDOR_ID) {
+		dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
+			__func__, board_vendor);
+		goto pci_detect_err;
+	}
+	dev_info(&pci_dev->dev,
+		"%s(): board vendor 0x%x, revision 0x%x\n",
+		__func__, board_vendor, board_revision);
+	pci_set_master(pci_dev);
+	if (!pci_dma_supported(pci_dev, 0xffffffff)) {
+		dev_err(&pci_dev->dev,
+			"%s(): 32bit PCI DMA is not supported\n", __func__);
+		goto pci_detect_err;
+	}
+	dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
+	/* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+	pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
+		PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+		PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+	/* Adjust PCIe completion timeout. */
+	pcie_capability_clear_and_set_word(pci_dev,
+		PCI_EXP_DEVCTL2, 0xf, 0x2);
+
+	if (netup_unidvb_request_mmio(pci_dev)) {
+		dev_err(&pci_dev->dev,
+			"%s(): unable to request MMIO regions\n", __func__);
+		goto pci_detect_err;
+	}
+	ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
+		pci_resource_len(pci_dev, 0));
+	if (!ndev->lmmio0) {
+		dev_err(&pci_dev->dev,
+			"%s(): unable to remap MMIO bar 0\n", __func__);
+		goto pci_bar0_error;
+	}
+	ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
+		pci_resource_len(pci_dev, 1));
+	if (!ndev->lmmio1) {
+		dev_err(&pci_dev->dev,
+			"%s(): unable to remap MMIO bar 1\n", __func__);
+		goto pci_bar1_error;
+	}
+	ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
+	ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
+	dev_info(&pci_dev->dev,
+		"%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
+		__func__,
+		ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
+		ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
+		pci_dev->irq);
+	if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
+			"netup_unidvb", pci_dev) < 0) {
+		dev_err(&pci_dev->dev,
+			"%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
+		goto irq_request_err;
+	}
+	ndev->dma_size = 2 * 188 *
+		NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
+	ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
+		ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
+	if (!ndev->dma_virt) {
+		dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
+			__func__);
+		goto dma_alloc_err;
+	}
+	netup_unidvb_dev_enable(ndev);
+	if (spi_enable && netup_spi_init(ndev)) {
+		dev_warn(&pci_dev->dev,
+			"netup_unidvb: SPI flash setup failed\n");
+		goto spi_setup_err;
+	}
+	if (old_firmware) {
+		dev_err(&pci_dev->dev,
+			"netup_unidvb: card initialization was incomplete\n");
+		return 0;
+	}
+	if (netup_i2c_register(ndev)) {
+		dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
+		goto i2c_setup_err;
+	}
+	/* enable I2C IRQs */
+	writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
+		ndev->bmmio0 + REG_IMASK_SET);
+	usleep_range(5000, 10000);
+	if (netup_unidvb_dvb_setup(ndev)) {
+		dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
+		goto dvb_setup_err;
+	}
+	if (netup_unidvb_ci_setup(ndev, pci_dev)) {
+		dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
+		goto ci_setup_err;
+	}
+	if (netup_unidvb_dma_setup(ndev)) {
+		dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
+		goto dma_setup_err;
+	}
+	dev_info(&pci_dev->dev,
+		"netup_unidvb: device has been initialized\n");
+	return 0;
+dma_setup_err:
+	netup_unidvb_ci_unregister(ndev, 0);
+	netup_unidvb_ci_unregister(ndev, 1);
+ci_setup_err:
+	netup_unidvb_dvb_fini(ndev, 0);
+	netup_unidvb_dvb_fini(ndev, 1);
+dvb_setup_err:
+	netup_i2c_unregister(ndev);
+i2c_setup_err:
+	if (ndev->spi)
+		netup_spi_release(ndev);
+spi_setup_err:
+	dma_free_coherent(&pci_dev->dev, ndev->dma_size,
+			ndev->dma_virt, ndev->dma_phys);
+dma_alloc_err:
+	free_irq(pci_dev->irq, pci_dev);
+irq_request_err:
+	iounmap(ndev->lmmio1);
+pci_bar1_error:
+	iounmap(ndev->lmmio0);
+pci_bar0_error:
+	release_mem_region(pci_resource_start(pci_dev, 0),
+		pci_resource_len(pci_dev, 0));
+	release_mem_region(pci_resource_start(pci_dev, 1),
+		pci_resource_len(pci_dev, 1));
+pci_detect_err:
+	pci_disable_device(pci_dev);
+pci_enable_err:
+	pci_set_drvdata(pci_dev, NULL);
+	destroy_workqueue(ndev->wq);
+wq_create_err:
+	kfree(ndev);
+dev_alloc_err:
+	dev_err(&pci_dev->dev,
+		"%s(): failed to initizalize device\n", __func__);
+	return -EIO;
+}
+
+static void netup_unidvb_finidev(struct pci_dev *pci_dev)
+{
+	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
+
+	dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
+	if (!ndev->old_fw) {
+		netup_unidvb_dma_fini(ndev, 0);
+		netup_unidvb_dma_fini(ndev, 1);
+		netup_unidvb_ci_unregister(ndev, 0);
+		netup_unidvb_ci_unregister(ndev, 1);
+		netup_unidvb_dvb_fini(ndev, 0);
+		netup_unidvb_dvb_fini(ndev, 1);
+		netup_i2c_unregister(ndev);
+	}
+	if (ndev->spi)
+		netup_spi_release(ndev);
+	writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
+	dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
+			ndev->dma_virt, ndev->dma_phys);
+	free_irq(pci_dev->irq, pci_dev);
+	iounmap(ndev->lmmio0);
+	iounmap(ndev->lmmio1);
+	release_mem_region(pci_resource_start(pci_dev, 0),
+		pci_resource_len(pci_dev, 0));
+	release_mem_region(pci_resource_start(pci_dev, 1),
+		pci_resource_len(pci_dev, 1));
+	pci_disable_device(pci_dev);
+	pci_set_drvdata(pci_dev, NULL);
+	destroy_workqueue(ndev->wq);
+	kfree(ndev);
+	dev_info(&pci_dev->dev,
+		"%s(): device has been successfully stopped\n", __func__);
+}
+
+
+static struct pci_device_id netup_unidvb_pci_tbl[] = {
+	{ PCI_DEVICE(0x1b55, 0x18f6) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
+
+static struct pci_driver netup_unidvb_pci_driver = {
+	.name     = "netup_unidvb",
+	.id_table = netup_unidvb_pci_tbl,
+	.probe    = netup_unidvb_initdev,
+	.remove   = netup_unidvb_finidev,
+	.suspend  = NULL,
+	.resume   = NULL,
+};
+
+static int __init netup_unidvb_init(void)
+{
+	return pci_register_driver(&netup_unidvb_pci_driver);
+}
+
+static void __exit netup_unidvb_fini(void)
+{
+	pci_unregister_driver(&netup_unidvb_pci_driver);
+}
+
+module_init(netup_unidvb_init);
+module_exit(netup_unidvb_fini);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
new file mode 100644
index 0000000..eaaa2d0
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -0,0 +1,381 @@
+/*
+ * netup_unidvb_i2c.c
+ *
+ * Internal I2C bus driver for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "netup_unidvb.h"
+
+#define NETUP_I2C_BUS0_ADDR		0x4800
+#define NETUP_I2C_BUS1_ADDR		0x4840
+#define NETUP_I2C_TIMEOUT		1000
+
+/* twi_ctrl0_stat reg bits */
+#define TWI_IRQEN_COMPL	0x1
+#define TWI_IRQEN_ANACK 0x2
+#define TWI_IRQEN_DNACK 0x4
+#define TWI_IRQ_COMPL	(TWI_IRQEN_COMPL << 8)
+#define TWI_IRQ_ANACK	(TWI_IRQEN_ANACK << 8)
+#define TWI_IRQ_DNACK	(TWI_IRQEN_DNACK << 8)
+#define TWI_IRQ_TX	0x800
+#define TWI_IRQ_RX	0x1000
+#define TWI_IRQEN	(TWI_IRQEN_COMPL | TWI_IRQEN_ANACK | TWI_IRQEN_DNACK)
+/* twi_addr_ctrl1 reg bits*/
+#define TWI_TRANSFER	0x100
+#define TWI_NOSTOP	0x200
+#define TWI_SOFT_RESET	0x2000
+/* twi_clkdiv reg value */
+#define TWI_CLKDIV	156
+/* fifo_stat_ctrl reg bits */
+#define FIFO_IRQEN	0x8000
+#define FIFO_RESET	0x4000
+/* FIFO size */
+#define FIFO_SIZE	16
+
+struct netup_i2c_fifo_regs {
+	union {
+		__u8	data8;
+		__le16	data16;
+		__le32	data32;
+	};
+	__u8		padding[4];
+	__le16		stat_ctrl;
+} __packed __aligned(1);
+
+struct netup_i2c_regs {
+	__le16				clkdiv;
+	__le16				twi_ctrl0_stat;
+	__le16				twi_addr_ctrl1;
+	__le16				length;
+	__u8				padding1[8];
+	struct netup_i2c_fifo_regs	tx_fifo;
+	__u8				padding2[6];
+	struct netup_i2c_fifo_regs	rx_fifo;
+} __packed __aligned(1);
+
+irqreturn_t netup_i2c_interrupt(struct netup_i2c *i2c)
+{
+	u16 reg, tmp;
+	unsigned long flags;
+	irqreturn_t iret = IRQ_HANDLED;
+
+	spin_lock_irqsave(&i2c->lock, flags);
+	reg = readw(&i2c->regs->twi_ctrl0_stat);
+	writew(reg & ~TWI_IRQEN, &i2c->regs->twi_ctrl0_stat);
+	dev_dbg(i2c->adap.dev.parent,
+		"%s(): twi_ctrl0_state 0x%x\n", __func__, reg);
+	if ((reg & TWI_IRQEN_COMPL) != 0 && (reg & TWI_IRQ_COMPL)) {
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): TWI_IRQEN_COMPL\n", __func__);
+		i2c->state = STATE_DONE;
+		goto irq_ok;
+	}
+	if ((reg & TWI_IRQEN_ANACK) != 0 && (reg & TWI_IRQ_ANACK)) {
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): TWI_IRQEN_ANACK\n", __func__);
+		i2c->state = STATE_ERROR;
+		goto irq_ok;
+	}
+	if ((reg & TWI_IRQEN_DNACK) != 0 && (reg & TWI_IRQ_DNACK)) {
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): TWI_IRQEN_DNACK\n", __func__);
+		i2c->state = STATE_ERROR;
+		goto irq_ok;
+	}
+	if ((reg & TWI_IRQ_RX) != 0) {
+		tmp = readw(&i2c->regs->rx_fifo.stat_ctrl);
+		writew(tmp & ~FIFO_IRQEN, &i2c->regs->rx_fifo.stat_ctrl);
+		i2c->state = STATE_WANT_READ;
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): want read\n", __func__);
+		goto irq_ok;
+	}
+	if ((reg & TWI_IRQ_TX) != 0) {
+		tmp = readw(&i2c->regs->tx_fifo.stat_ctrl);
+		writew(tmp & ~FIFO_IRQEN, &i2c->regs->tx_fifo.stat_ctrl);
+		i2c->state = STATE_WANT_WRITE;
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): want write\n", __func__);
+		goto irq_ok;
+	}
+	dev_warn(&i2c->adap.dev, "%s(): not mine interrupt\n", __func__);
+	iret = IRQ_NONE;
+irq_ok:
+	spin_unlock_irqrestore(&i2c->lock, flags);
+	if (iret == IRQ_HANDLED)
+		wake_up(&i2c->wq);
+	return iret;
+}
+
+static void netup_i2c_reset(struct netup_i2c *i2c)
+{
+	dev_dbg(i2c->adap.dev.parent, "%s()\n", __func__);
+	i2c->state = STATE_DONE;
+	writew(TWI_SOFT_RESET, &i2c->regs->twi_addr_ctrl1);
+	writew(TWI_CLKDIV, &i2c->regs->clkdiv);
+	writew(FIFO_RESET, &i2c->regs->tx_fifo.stat_ctrl);
+	writew(FIFO_RESET, &i2c->regs->rx_fifo.stat_ctrl);
+	writew(0x800, &i2c->regs->tx_fifo.stat_ctrl);
+	writew(0x800, &i2c->regs->rx_fifo.stat_ctrl);
+}
+
+static void netup_i2c_fifo_tx(struct netup_i2c *i2c)
+{
+	u8 data;
+	u32 fifo_space = FIFO_SIZE -
+		(readw(&i2c->regs->tx_fifo.stat_ctrl) & 0x3f);
+	u32 msg_length = i2c->msg->len - i2c->xmit_size;
+
+	msg_length = (msg_length < fifo_space ? msg_length : fifo_space);
+	while (msg_length--) {
+		data = i2c->msg->buf[i2c->xmit_size++];
+		writeb(data, &i2c->regs->tx_fifo.data8);
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): write 0x%02x\n", __func__, data);
+	}
+	if (i2c->xmit_size < i2c->msg->len) {
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): TX IRQ enabled\n", __func__);
+		writew(readw(&i2c->regs->tx_fifo.stat_ctrl) | FIFO_IRQEN,
+			&i2c->regs->tx_fifo.stat_ctrl);
+	}
+}
+
+static void netup_i2c_fifo_rx(struct netup_i2c *i2c)
+{
+	u8 data;
+	u32 fifo_size = readw(&i2c->regs->rx_fifo.stat_ctrl) & 0x3f;
+
+	dev_dbg(i2c->adap.dev.parent,
+		"%s(): RX fifo size %d\n", __func__, fifo_size);
+	while (fifo_size--) {
+		data = readb(&i2c->regs->rx_fifo.data8);
+		if ((i2c->msg->flags & I2C_M_RD) != 0 &&
+					i2c->xmit_size < i2c->msg->len) {
+			i2c->msg->buf[i2c->xmit_size++] = data;
+			dev_dbg(i2c->adap.dev.parent,
+				"%s(): read 0x%02x\n", __func__, data);
+		}
+	}
+	if (i2c->xmit_size < i2c->msg->len) {
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): RX IRQ enabled\n", __func__);
+		writew(readw(&i2c->regs->rx_fifo.stat_ctrl) | FIFO_IRQEN,
+			&i2c->regs->rx_fifo.stat_ctrl);
+	}
+}
+
+static void netup_i2c_start_xfer(struct netup_i2c *i2c)
+{
+	u16 rdflag = ((i2c->msg->flags & I2C_M_RD) ? 1 : 0);
+	u16 reg = readw(&i2c->regs->twi_ctrl0_stat);
+
+	writew(TWI_IRQEN | reg, &i2c->regs->twi_ctrl0_stat);
+	writew(i2c->msg->len, &i2c->regs->length);
+	writew(TWI_TRANSFER | (i2c->msg->addr << 1) | rdflag,
+		&i2c->regs->twi_addr_ctrl1);
+	dev_dbg(i2c->adap.dev.parent,
+		"%s(): length %d twi_addr_ctrl1 0x%x twi_ctrl0_stat 0x%x\n",
+		__func__, readw(&i2c->regs->length),
+		readw(&i2c->regs->twi_addr_ctrl1),
+		readw(&i2c->regs->twi_ctrl0_stat));
+	i2c->state = STATE_WAIT;
+	i2c->xmit_size = 0;
+	if (!rdflag)
+		netup_i2c_fifo_tx(i2c);
+	else
+		writew(FIFO_IRQEN | readw(&i2c->regs->rx_fifo.stat_ctrl),
+			&i2c->regs->rx_fifo.stat_ctrl);
+}
+
+static int netup_i2c_xfer(struct i2c_adapter *adap,
+			  struct i2c_msg *msgs, int num)
+{
+	unsigned long flags;
+	int i, trans_done, res = num;
+	struct netup_i2c *i2c = i2c_get_adapdata(adap);
+	u16 reg;
+
+	if (num <= 0) {
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): num == %d\n", __func__, num);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&i2c->lock, flags);
+	if (i2c->state != STATE_DONE) {
+		dev_dbg(i2c->adap.dev.parent,
+			"%s(): i2c->state == %d, resetting I2C\n",
+			__func__, i2c->state);
+		netup_i2c_reset(i2c);
+	}
+	dev_dbg(i2c->adap.dev.parent, "%s() num %d\n", __func__, num);
+	for (i = 0; i < num; i++) {
+		i2c->msg = &msgs[i];
+		netup_i2c_start_xfer(i2c);
+		trans_done = 0;
+		while (!trans_done) {
+			spin_unlock_irqrestore(&i2c->lock, flags);
+			if (wait_event_timeout(i2c->wq,
+					i2c->state != STATE_WAIT,
+					msecs_to_jiffies(NETUP_I2C_TIMEOUT))) {
+				spin_lock_irqsave(&i2c->lock, flags);
+				switch (i2c->state) {
+				case STATE_WANT_READ:
+					netup_i2c_fifo_rx(i2c);
+					break;
+				case STATE_WANT_WRITE:
+					netup_i2c_fifo_tx(i2c);
+					break;
+				case STATE_DONE:
+					if ((i2c->msg->flags & I2C_M_RD) != 0 &&
+						i2c->xmit_size != i2c->msg->len)
+						netup_i2c_fifo_rx(i2c);
+					dev_dbg(i2c->adap.dev.parent,
+						"%s(): msg %d OK\n",
+						__func__, i);
+					trans_done = 1;
+					break;
+				case STATE_ERROR:
+					res = -EIO;
+					dev_dbg(i2c->adap.dev.parent,
+						"%s(): error state\n",
+						__func__);
+					goto done;
+				default:
+					dev_dbg(i2c->adap.dev.parent,
+						"%s(): invalid state %d\n",
+						__func__, i2c->state);
+					res = -EINVAL;
+					goto done;
+				}
+				if (!trans_done) {
+					i2c->state = STATE_WAIT;
+					reg = readw(
+						&i2c->regs->twi_ctrl0_stat);
+					writew(TWI_IRQEN | reg,
+						&i2c->regs->twi_ctrl0_stat);
+				}
+				spin_unlock_irqrestore(&i2c->lock, flags);
+			} else {
+				spin_lock_irqsave(&i2c->lock, flags);
+				dev_dbg(i2c->adap.dev.parent,
+					"%s(): wait timeout\n", __func__);
+				res = -ETIMEDOUT;
+				goto done;
+			}
+			spin_lock_irqsave(&i2c->lock, flags);
+		}
+	}
+done:
+	spin_unlock_irqrestore(&i2c->lock, flags);
+	dev_dbg(i2c->adap.dev.parent, "%s(): result %d\n", __func__, res);
+	return res;
+}
+
+static u32 netup_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm netup_i2c_algorithm = {
+	.master_xfer	= netup_i2c_xfer,
+	.functionality	= netup_i2c_func,
+};
+
+static struct i2c_adapter netup_i2c_adapter = {
+	.owner		= THIS_MODULE,
+	.name		= NETUP_UNIDVB_NAME,
+	.class		= I2C_CLASS_HWMON | I2C_CLASS_SPD,
+	.algo		= &netup_i2c_algorithm,
+};
+
+static int netup_i2c_init(struct netup_unidvb_dev *ndev, int bus_num)
+{
+	int ret;
+	struct netup_i2c *i2c;
+
+	if (bus_num < 0 || bus_num > 1) {
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): invalid bus_num %d\n", __func__, bus_num);
+		return -EINVAL;
+	}
+	i2c = &ndev->i2c[bus_num];
+	spin_lock_init(&i2c->lock);
+	init_waitqueue_head(&i2c->wq);
+	i2c->regs = (struct netup_i2c_regs *)(ndev->bmmio0 +
+		(bus_num == 0 ? NETUP_I2C_BUS0_ADDR : NETUP_I2C_BUS1_ADDR));
+	netup_i2c_reset(i2c);
+	i2c->adap = netup_i2c_adapter;
+	i2c->adap.dev.parent = &ndev->pci_dev->dev;
+	i2c_set_adapdata(&i2c->adap, i2c);
+	ret = i2c_add_adapter(&i2c->adap);
+	if (ret) {
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): failed to add I2C adapter\n", __func__);
+		return ret;
+	}
+	dev_info(&ndev->pci_dev->dev,
+		"%s(): registered I2C bus %d at 0x%x\n",
+		__func__,
+		bus_num, (bus_num == 0 ?
+			NETUP_I2C_BUS0_ADDR :
+			NETUP_I2C_BUS1_ADDR));
+	return 0;
+}
+
+static void netup_i2c_remove(struct netup_unidvb_dev *ndev, int bus_num)
+{
+	struct netup_i2c *i2c;
+
+	if (bus_num < 0 || bus_num > 1) {
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): invalid bus number %d\n", __func__, bus_num);
+		return;
+	}
+	i2c = &ndev->i2c[bus_num];
+	netup_i2c_reset(i2c);
+	/* remove adapter */
+	i2c_del_adapter(&i2c->adap);
+	dev_info(&ndev->pci_dev->dev,
+		"netup_i2c_remove: unregistered I2C bus %d\n", bus_num);
+}
+
+int netup_i2c_register(struct netup_unidvb_dev *ndev)
+{
+	int ret;
+
+	ret = netup_i2c_init(ndev, 0);
+	if (ret)
+		return ret;
+	ret = netup_i2c_init(ndev, 1);
+	if (ret) {
+		netup_i2c_remove(ndev, 0);
+		return ret;
+	}
+	return 0;
+}
+
+void netup_i2c_unregister(struct netup_unidvb_dev *ndev)
+{
+	netup_i2c_remove(ndev, 0);
+	netup_i2c_remove(ndev, 1);
+}
+
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
new file mode 100644
index 0000000..f55b327
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
@@ -0,0 +1,252 @@
+/*
+ * netup_unidvb_spi.c
+ *
+ * Internal SPI driver for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "netup_unidvb.h"
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/partitions.h>
+#include <mtd/mtd-abi.h>
+
+#define NETUP_SPI_CTRL_IRQ	0x1000
+#define NETUP_SPI_CTRL_IMASK	0x2000
+#define NETUP_SPI_CTRL_START	0x8000
+#define NETUP_SPI_CTRL_LAST_CS	0x4000
+
+#define NETUP_SPI_TIMEOUT	6000
+
+enum netup_spi_state {
+	SPI_STATE_START,
+	SPI_STATE_DONE,
+};
+
+struct netup_spi_regs {
+	__u8	data[1024];
+	__le16	control_stat;
+	__le16	clock_divider;
+} __packed __aligned(1);
+
+struct netup_spi {
+	struct device			*dev;
+	struct spi_master		*master;
+	struct netup_spi_regs		*regs;
+	u8 __iomem			*mmio;
+	spinlock_t			lock;
+	wait_queue_head_t		waitq;
+	enum netup_spi_state		state;
+};
+
+static char netup_spi_name[64] = "fpga";
+
+static struct mtd_partition netup_spi_flash_partitions = {
+	.name = netup_spi_name,
+	.size = 0x1000000, /* 16MB */
+	.offset = 0,
+	.mask_flags = MTD_CAP_ROM
+};
+
+static struct flash_platform_data spi_flash_data = {
+	.name = "netup0_m25p128",
+	.parts = &netup_spi_flash_partitions,
+	.nr_parts = 1,
+};
+
+static struct spi_board_info netup_spi_board = {
+	.modalias = "m25p128",
+	.max_speed_hz = 11000000,
+	.chip_select = 0,
+	.mode = SPI_MODE_0,
+	.platform_data = &spi_flash_data,
+};
+
+irqreturn_t netup_spi_interrupt(struct netup_spi *spi)
+{
+	u16 reg;
+	unsigned long flags;
+
+	if (!spi) {
+		dev_dbg(&spi->master->dev,
+			"%s(): SPI not initialized\n", __func__);
+		return IRQ_NONE;
+	}
+	spin_lock_irqsave(&spi->lock, flags);
+	reg = readw(&spi->regs->control_stat);
+	if (!(reg & NETUP_SPI_CTRL_IRQ)) {
+		spin_unlock_irqrestore(&spi->lock, flags);
+		dev_dbg(&spi->master->dev,
+			"%s(): not mine interrupt\n", __func__);
+		return IRQ_NONE;
+	}
+	writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
+	reg = readw(&spi->regs->control_stat);
+	writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat);
+	spi->state = SPI_STATE_DONE;
+	wake_up(&spi->waitq);
+	spin_unlock_irqrestore(&spi->lock, flags);
+	dev_dbg(&spi->master->dev,
+		"%s(): SPI interrupt handled\n", __func__);
+	return IRQ_HANDLED;
+}
+
+static int netup_spi_transfer(struct spi_master *master,
+			      struct spi_message *msg)
+{
+	struct netup_spi *spi = spi_master_get_devdata(master);
+	struct spi_transfer *t;
+	int result = 0;
+	u32 tr_size;
+
+	/* reset CS */
+	writew(NETUP_SPI_CTRL_LAST_CS, &spi->regs->control_stat);
+	writew(0, &spi->regs->control_stat);
+	list_for_each_entry(t, &msg->transfers, transfer_list) {
+		tr_size = t->len;
+		while (tr_size) {
+			u32 frag_offset = t->len - tr_size;
+			u32 frag_size = (tr_size > sizeof(spi->regs->data)) ?
+					sizeof(spi->regs->data) : tr_size;
+			int frag_last = 0;
+
+			if (list_is_last(&t->transfer_list,
+					&msg->transfers) &&
+					frag_offset + frag_size == t->len) {
+				frag_last = 1;
+			}
+			if (t->tx_buf) {
+				memcpy_toio(spi->regs->data,
+					t->tx_buf + frag_offset,
+					frag_size);
+			} else {
+				memset_io(spi->regs->data,
+					0, frag_size);
+			}
+			spi->state = SPI_STATE_START;
+			writew((frag_size & 0x3ff) |
+				NETUP_SPI_CTRL_IMASK |
+				NETUP_SPI_CTRL_START |
+				(frag_last ? NETUP_SPI_CTRL_LAST_CS : 0),
+				&spi->regs->control_stat);
+			dev_dbg(&spi->master->dev,
+				"%s(): control_stat 0x%04x\n",
+				__func__, readw(&spi->regs->control_stat));
+			wait_event_timeout(spi->waitq,
+				spi->state != SPI_STATE_START,
+				msecs_to_jiffies(NETUP_SPI_TIMEOUT));
+			if (spi->state == SPI_STATE_DONE) {
+				if (t->rx_buf) {
+					memcpy_fromio(t->rx_buf + frag_offset,
+						spi->regs->data, frag_size);
+				}
+			} else {
+				if (spi->state == SPI_STATE_START) {
+					dev_dbg(&spi->master->dev,
+						"%s(): transfer timeout\n",
+						__func__);
+				} else {
+					dev_dbg(&spi->master->dev,
+						"%s(): invalid state %d\n",
+						__func__, spi->state);
+				}
+				result = -EIO;
+				goto done;
+			}
+			tr_size -= frag_size;
+			msg->actual_length += frag_size;
+		}
+	}
+done:
+	msg->status = result;
+	spi_finalize_current_message(master);
+	return result;
+}
+
+static int netup_spi_setup(struct spi_device *spi)
+{
+	return 0;
+}
+
+int netup_spi_init(struct netup_unidvb_dev *ndev)
+{
+	struct spi_master *master;
+	struct netup_spi *nspi;
+
+	master = spi_alloc_master(&ndev->pci_dev->dev,
+		sizeof(struct netup_spi));
+	if (!master) {
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): unable to alloc SPI master\n", __func__);
+		return -EINVAL;
+	}
+	nspi = spi_master_get_devdata(master);
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+	master->bus_num = -1;
+	master->num_chipselect = 1;
+	master->transfer_one_message = netup_spi_transfer;
+	master->setup = netup_spi_setup;
+	spin_lock_init(&nspi->lock);
+	init_waitqueue_head(&nspi->waitq);
+	nspi->master = master;
+	nspi->regs = (struct netup_spi_regs *)(ndev->bmmio0 + 0x4000);
+	writew(2, &nspi->regs->clock_divider);
+	writew(NETUP_UNIDVB_IRQ_SPI, ndev->bmmio0 + REG_IMASK_SET);
+	ndev->spi = nspi;
+	if (spi_register_master(master)) {
+		ndev->spi = NULL;
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): unable to register SPI bus\n", __func__);
+		return -EINVAL;
+	}
+	snprintf(netup_spi_name,
+		sizeof(netup_spi_name),
+		"fpga_%02x:%02x.%01x",
+		ndev->pci_bus,
+		ndev->pci_slot,
+		ndev->pci_func);
+	if (!spi_new_device(master, &netup_spi_board)) {
+		ndev->spi = NULL;
+		dev_err(&ndev->pci_dev->dev,
+			"%s(): unable to create SPI device\n", __func__);
+		return -EINVAL;
+	}
+	dev_dbg(&ndev->pci_dev->dev, "%s(): SPI init OK\n", __func__);
+	return 0;
+}
+
+void netup_spi_release(struct netup_unidvb_dev *ndev)
+{
+	u16 reg;
+	unsigned long flags;
+	struct netup_spi *spi = ndev->spi;
+
+	if (!spi) {
+		dev_dbg(&spi->master->dev,
+			"%s(): SPI not initialized\n", __func__);
+		return;
+	}
+	spin_lock_irqsave(&spi->lock, flags);
+	reg = readw(&spi->regs->control_stat);
+	writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
+	reg = readw(&spi->regs->control_stat);
+	writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat);
+	spin_unlock_irqrestore(&spi->lock, flags);
+	spi_unregister_master(spi->master);
+	ndev->spi = NULL;
+}
+
+
diff --git a/drivers/media/pci/smipcie/Kconfig b/drivers/media/pci/smipcie/Kconfig
index 21a1583..c11c772 100644
--- a/drivers/media/pci/smipcie/Kconfig
+++ b/drivers/media/pci/smipcie/Kconfig
@@ -7,6 +7,7 @@
 	select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+	depends on RC_CORE
 	help
 	  Support for cards with SMI PCIe bridge:
 	  - DVBSky S950 V3
diff --git a/drivers/media/pci/smipcie/Makefile b/drivers/media/pci/smipcie/Makefile
index be55481..013bc3f 100644
--- a/drivers/media/pci/smipcie/Makefile
+++ b/drivers/media/pci/smipcie/Makefile
@@ -1,3 +1,6 @@
+
+smipcie-objs	:= smipcie-main.o smipcie-ir.o
+
 obj-$(CONFIG_DVB_SMIPCIE) += smipcie.o
 
 ccflags-y += -Idrivers/media/tuners
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
new file mode 100644
index 0000000..d018673
--- /dev/null
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -0,0 +1,232 @@
+/*
+ * SMI PCIe driver for DVBSky cards.
+ *
+ * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#include "smipcie.h"
+
+static void smi_ir_enableInterrupt(struct smi_rc *ir)
+{
+	struct smi_dev *dev = ir->dev;
+
+	smi_write(MSI_INT_ENA_SET, IR_X_INT);
+}
+
+static void smi_ir_disableInterrupt(struct smi_rc *ir)
+{
+	struct smi_dev *dev = ir->dev;
+
+	smi_write(MSI_INT_ENA_CLR, IR_X_INT);
+}
+
+static void smi_ir_clearInterrupt(struct smi_rc *ir)
+{
+	struct smi_dev *dev = ir->dev;
+
+	smi_write(MSI_INT_STATUS_CLR, IR_X_INT);
+}
+
+static void smi_ir_stop(struct smi_rc *ir)
+{
+	struct smi_dev *dev = ir->dev;
+
+	smi_ir_disableInterrupt(ir);
+	smi_clear(IR_Init_Reg, 0x80);
+}
+
+#define BITS_PER_COMMAND 14
+#define GROUPS_PER_BIT 2
+#define IR_RC5_MIN_BIT 36
+#define IR_RC5_MAX_BIT 52
+static u32 smi_decode_rc5(u8 *pData, u8 size)
+{
+	u8 index, current_bit, bit_count;
+	u8 group_array[BITS_PER_COMMAND * GROUPS_PER_BIT + 4];
+	u8 group_index = 0;
+	u32 command = 0xFFFFFFFF;
+
+	group_array[group_index++] = 1;
+
+	for (index = 0; index < size; index++) {
+
+		current_bit = (pData[index] & 0x80) ? 1 : 0;
+		bit_count = pData[index] & 0x7f;
+
+		if ((current_bit == 1) && (bit_count >= 2*IR_RC5_MAX_BIT + 1)) {
+			goto process_code;
+		} else if ((bit_count >= IR_RC5_MIN_BIT) &&
+			   (bit_count <= IR_RC5_MAX_BIT)) {
+				group_array[group_index++] = current_bit;
+		} else if ((bit_count > IR_RC5_MAX_BIT) &&
+			   (bit_count <= 2*IR_RC5_MAX_BIT)) {
+				group_array[group_index++] = current_bit;
+				group_array[group_index++] = current_bit;
+		} else {
+			goto invalid_timing;
+		}
+		if (group_index >= BITS_PER_COMMAND*GROUPS_PER_BIT)
+			goto process_code;
+
+		if ((group_index == BITS_PER_COMMAND*GROUPS_PER_BIT - 1)
+		    && (group_array[group_index-1] == 0)) {
+			group_array[group_index++] = 1;
+			goto process_code;
+		}
+	}
+
+process_code:
+	if (group_index == (BITS_PER_COMMAND*GROUPS_PER_BIT-1))
+		group_array[group_index++] = 1;
+
+	if (group_index == BITS_PER_COMMAND*GROUPS_PER_BIT) {
+		command = 0;
+		for (index = 0; index < (BITS_PER_COMMAND*GROUPS_PER_BIT);
+		     index = index + 2) {
+			if ((group_array[index] == 1) &&
+			    (group_array[index+1] == 0)) {
+				command |= (1 << (BITS_PER_COMMAND -
+						   (index/2) - 1));
+			} else if ((group_array[index] == 0) &&
+				   (group_array[index+1] == 1)) {
+				/* */
+			} else {
+				command = 0xFFFFFFFF;
+				goto invalid_timing;
+			}
+		}
+	}
+
+invalid_timing:
+	return command;
+}
+
+static void smi_ir_decode(struct work_struct *work)
+{
+	struct smi_rc *ir = container_of(work, struct smi_rc, work);
+	struct smi_dev *dev = ir->dev;
+	struct rc_dev *rc_dev = ir->rc_dev;
+	u32 dwIRControl, dwIRData, dwIRCode, scancode;
+	u8 index, ucIRCount, readLoop, rc5_command, rc5_system, toggle;
+
+	dwIRControl = smi_read(IR_Init_Reg);
+	if (dwIRControl & rbIRVld) {
+		ucIRCount = (u8) smi_read(IR_Data_Cnt);
+
+		if (ucIRCount < 4)
+			goto end_ir_decode;
+
+		readLoop = ucIRCount/4;
+		if (ucIRCount % 4)
+			readLoop += 1;
+		for (index = 0; index < readLoop; index++) {
+			dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index*4));
+
+			ir->irData[index*4 + 0] = (u8)(dwIRData);
+			ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
+			ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
+			ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
+		}
+		dwIRCode = smi_decode_rc5(ir->irData, ucIRCount);
+
+		if (dwIRCode != 0xFFFFFFFF) {
+			rc5_command = dwIRCode & 0x3F;
+			rc5_system = (dwIRCode & 0x7C0) >> 6;
+			toggle = (dwIRCode & 0x800) ? 1 : 0;
+			scancode = rc5_system << 8 | rc5_command;
+			rc_keydown(rc_dev, RC_TYPE_RC5, scancode, toggle);
+		}
+	}
+end_ir_decode:
+	smi_set(IR_Init_Reg, 0x04);
+	smi_ir_enableInterrupt(ir);
+}
+
+/* ir functions call by main driver.*/
+int smi_ir_irq(struct smi_rc *ir, u32 int_status)
+{
+	int handled = 0;
+
+	if (int_status & IR_X_INT) {
+		smi_ir_disableInterrupt(ir);
+		smi_ir_clearInterrupt(ir);
+		schedule_work(&ir->work);
+		handled = 1;
+	}
+	return handled;
+}
+
+void smi_ir_start(struct smi_rc *ir)
+{
+	struct smi_dev *dev = ir->dev;
+
+	smi_write(IR_Idle_Cnt_Low, 0x00140070);
+	msleep(20);
+	smi_set(IR_Init_Reg, 0x90);
+
+	smi_ir_enableInterrupt(ir);
+}
+
+int smi_ir_init(struct smi_dev *dev)
+{
+	int ret;
+	struct rc_dev *rc_dev;
+	struct smi_rc *ir = &dev->ir;
+
+	rc_dev = rc_allocate_device();
+	if (!rc_dev)
+		return -ENOMEM;
+
+	/* init input device */
+	snprintf(ir->input_name, sizeof(ir->input_name), "IR (%s)",
+		 dev->info->name);
+	snprintf(ir->input_phys, sizeof(ir->input_phys), "pci-%s/ir0",
+		 pci_name(dev->pci_dev));
+
+	rc_dev->driver_name = "SMI_PCIe";
+	rc_dev->input_phys = ir->input_phys;
+	rc_dev->input_name = ir->input_name;
+	rc_dev->input_id.bustype = BUS_PCI;
+	rc_dev->input_id.version = 1;
+	rc_dev->input_id.vendor = dev->pci_dev->subsystem_vendor;
+	rc_dev->input_id.product = dev->pci_dev->subsystem_device;
+	rc_dev->dev.parent = &dev->pci_dev->dev;
+
+	rc_dev->driver_type = RC_DRIVER_SCANCODE;
+	rc_dev->map_name = RC_MAP_DVBSKY;
+
+	ir->rc_dev = rc_dev;
+	ir->dev = dev;
+
+	INIT_WORK(&ir->work, smi_ir_decode);
+	smi_ir_disableInterrupt(ir);
+
+	ret = rc_register_device(rc_dev);
+	if (ret)
+		goto ir_err;
+
+	return 0;
+ir_err:
+	rc_free_device(rc_dev);
+	return ret;
+}
+
+void smi_ir_exit(struct smi_dev *dev)
+{
+	struct smi_rc *ir = &dev->ir;
+	struct rc_dev *rc_dev = ir->rc_dev;
+
+	smi_ir_stop(ir);
+	rc_unregister_device(rc_dev);
+	ir->rc_dev = NULL;
+}
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
new file mode 100644
index 0000000..b039a22
--- /dev/null
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -0,0 +1,1114 @@
+/*
+ * SMI PCIe driver for DVBSky cards.
+ *
+ * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#include "smipcie.h"
+#include "m88ds3103.h"
+#include "ts2020.h"
+#include "m88rs6000t.h"
+#include "si2168.h"
+#include "si2157.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+static int smi_hw_init(struct smi_dev *dev)
+{
+	u32 port_mux, port_ctrl, int_stat;
+
+	/* set port mux.*/
+	port_mux = smi_read(MUX_MODE_CTRL);
+	port_mux &= ~(rbPaMSMask);
+	port_mux |= rbPaMSDtvNoGpio;
+	port_mux &= ~(rbPbMSMask);
+	port_mux |= rbPbMSDtvNoGpio;
+	port_mux &= ~(0x0f0000);
+	port_mux |= 0x50000;
+	smi_write(MUX_MODE_CTRL, port_mux);
+
+	/* set DTV register.*/
+	/* Port A */
+	port_ctrl = smi_read(VIDEO_CTRL_STATUS_A);
+	port_ctrl &= ~0x01;
+	smi_write(VIDEO_CTRL_STATUS_A, port_ctrl);
+	port_ctrl = smi_read(MPEG2_CTRL_A);
+	port_ctrl &= ~0x40;
+	port_ctrl |= 0x80;
+	smi_write(MPEG2_CTRL_A, port_ctrl);
+	/* Port B */
+	port_ctrl = smi_read(VIDEO_CTRL_STATUS_B);
+	port_ctrl &= ~0x01;
+	smi_write(VIDEO_CTRL_STATUS_B, port_ctrl);
+	port_ctrl = smi_read(MPEG2_CTRL_B);
+	port_ctrl &= ~0x40;
+	port_ctrl |= 0x80;
+	smi_write(MPEG2_CTRL_B, port_ctrl);
+
+	/* disable and clear interrupt.*/
+	smi_write(MSI_INT_ENA_CLR, ALL_INT);
+	int_stat = smi_read(MSI_INT_STATUS);
+	smi_write(MSI_INT_STATUS_CLR, int_stat);
+
+	/* reset demod.*/
+	smi_clear(PERIPHERAL_CTRL, 0x0303);
+	msleep(50);
+	smi_set(PERIPHERAL_CTRL, 0x0101);
+	return 0;
+}
+
+/* i2c bit bus.*/
+static void smi_i2c_cfg(struct smi_dev *dev, u32 sw_ctl)
+{
+	u32 dwCtrl;
+
+	dwCtrl = smi_read(sw_ctl);
+	dwCtrl &= ~0x18; /* disable output.*/
+	dwCtrl |= 0x21; /* reset and software mode.*/
+	dwCtrl &= ~0xff00;
+	dwCtrl |= 0x6400;
+	smi_write(sw_ctl, dwCtrl);
+	msleep(20);
+	dwCtrl = smi_read(sw_ctl);
+	dwCtrl &= ~0x20;
+	smi_write(sw_ctl, dwCtrl);
+}
+
+static void smi_i2c_setsda(struct smi_dev *dev, int state, u32 sw_ctl)
+{
+	if (state) {
+		/* set as input.*/
+		smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
+	} else {
+		smi_clear(sw_ctl, SW_I2C_MSK_DAT_OUT);
+		/* set as output.*/
+		smi_set(sw_ctl, SW_I2C_MSK_DAT_EN);
+	}
+}
+
+static void smi_i2c_setscl(void *data, int state, u32 sw_ctl)
+{
+	struct smi_dev *dev = data;
+
+	if (state) {
+		/* set as input.*/
+		smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
+	} else {
+		smi_clear(sw_ctl, SW_I2C_MSK_CLK_OUT);
+		/* set as output.*/
+		smi_set(sw_ctl, SW_I2C_MSK_CLK_EN);
+	}
+}
+
+static int smi_i2c_getsda(void *data, u32 sw_ctl)
+{
+	struct smi_dev *dev = data;
+	/* set as input.*/
+	smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
+	udelay(1);
+	return (smi_read(sw_ctl) & SW_I2C_MSK_DAT_IN) ? 1 : 0;
+}
+
+static int smi_i2c_getscl(void *data, u32 sw_ctl)
+{
+	struct smi_dev *dev = data;
+	/* set as input.*/
+	smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
+	udelay(1);
+	return (smi_read(sw_ctl) & SW_I2C_MSK_CLK_IN) ? 1 : 0;
+}
+/* i2c 0.*/
+static void smi_i2c0_setsda(void *data, int state)
+{
+	struct smi_dev *dev = data;
+
+	smi_i2c_setsda(dev, state, I2C_A_SW_CTL);
+}
+
+static void smi_i2c0_setscl(void *data, int state)
+{
+	struct smi_dev *dev = data;
+
+	smi_i2c_setscl(dev, state, I2C_A_SW_CTL);
+}
+
+static int smi_i2c0_getsda(void *data)
+{
+	struct smi_dev *dev = data;
+
+	return	smi_i2c_getsda(dev, I2C_A_SW_CTL);
+}
+
+static int smi_i2c0_getscl(void *data)
+{
+	struct smi_dev *dev = data;
+
+	return	smi_i2c_getscl(dev, I2C_A_SW_CTL);
+}
+/* i2c 1.*/
+static void smi_i2c1_setsda(void *data, int state)
+{
+	struct smi_dev *dev = data;
+
+	smi_i2c_setsda(dev, state, I2C_B_SW_CTL);
+}
+
+static void smi_i2c1_setscl(void *data, int state)
+{
+	struct smi_dev *dev = data;
+
+	smi_i2c_setscl(dev, state, I2C_B_SW_CTL);
+}
+
+static int smi_i2c1_getsda(void *data)
+{
+	struct smi_dev *dev = data;
+
+	return	smi_i2c_getsda(dev, I2C_B_SW_CTL);
+}
+
+static int smi_i2c1_getscl(void *data)
+{
+	struct smi_dev *dev = data;
+
+	return	smi_i2c_getscl(dev, I2C_B_SW_CTL);
+}
+
+static int smi_i2c_init(struct smi_dev *dev)
+{
+	int ret;
+
+	/* i2c bus 0 */
+	smi_i2c_cfg(dev, I2C_A_SW_CTL);
+	i2c_set_adapdata(&dev->i2c_bus[0], dev);
+	strcpy(dev->i2c_bus[0].name, "SMI-I2C0");
+	dev->i2c_bus[0].owner = THIS_MODULE;
+	dev->i2c_bus[0].dev.parent = &dev->pci_dev->dev;
+	dev->i2c_bus[0].algo_data = &dev->i2c_bit[0];
+	dev->i2c_bit[0].data = dev;
+	dev->i2c_bit[0].setsda = smi_i2c0_setsda;
+	dev->i2c_bit[0].setscl = smi_i2c0_setscl;
+	dev->i2c_bit[0].getsda = smi_i2c0_getsda;
+	dev->i2c_bit[0].getscl = smi_i2c0_getscl;
+	dev->i2c_bit[0].udelay = 12;
+	dev->i2c_bit[0].timeout = 10;
+	/* Raise SCL and SDA */
+	smi_i2c0_setsda(dev, 1);
+	smi_i2c0_setscl(dev, 1);
+
+	ret = i2c_bit_add_bus(&dev->i2c_bus[0]);
+	if (ret < 0)
+		return ret;
+
+	/* i2c bus 1 */
+	smi_i2c_cfg(dev, I2C_B_SW_CTL);
+	i2c_set_adapdata(&dev->i2c_bus[1], dev);
+	strcpy(dev->i2c_bus[1].name, "SMI-I2C1");
+	dev->i2c_bus[1].owner = THIS_MODULE;
+	dev->i2c_bus[1].dev.parent = &dev->pci_dev->dev;
+	dev->i2c_bus[1].algo_data = &dev->i2c_bit[1];
+	dev->i2c_bit[1].data = dev;
+	dev->i2c_bit[1].setsda = smi_i2c1_setsda;
+	dev->i2c_bit[1].setscl = smi_i2c1_setscl;
+	dev->i2c_bit[1].getsda = smi_i2c1_getsda;
+	dev->i2c_bit[1].getscl = smi_i2c1_getscl;
+	dev->i2c_bit[1].udelay = 12;
+	dev->i2c_bit[1].timeout = 10;
+	/* Raise SCL and SDA */
+	smi_i2c1_setsda(dev, 1);
+	smi_i2c1_setscl(dev, 1);
+
+	ret = i2c_bit_add_bus(&dev->i2c_bus[1]);
+	if (ret < 0)
+		i2c_del_adapter(&dev->i2c_bus[0]);
+
+	return ret;
+}
+
+static void smi_i2c_exit(struct smi_dev *dev)
+{
+	i2c_del_adapter(&dev->i2c_bus[0]);
+	i2c_del_adapter(&dev->i2c_bus[1]);
+}
+
+static int smi_read_eeprom(struct i2c_adapter *i2c, u16 reg, u8 *data, u16 size)
+{
+	int ret;
+	u8 b0[2] = { (reg >> 8) & 0xff, reg & 0xff };
+
+	struct i2c_msg msg[] = {
+		{ .addr = 0x50, .flags = 0,
+			.buf = b0, .len = 2 },
+		{ .addr = 0x50, .flags = I2C_M_RD,
+			.buf = data, .len = size }
+	};
+
+	ret = i2c_transfer(i2c, msg, 2);
+
+	if (ret != 2) {
+		dev_err(&i2c->dev, "%s: reg=0x%x (error=%d)\n",
+			__func__, reg, ret);
+		return ret;
+	}
+	return ret;
+}
+
+/* ts port interrupt operations */
+static void smi_port_disableInterrupt(struct smi_port *port)
+{
+	struct smi_dev *dev = port->dev;
+
+	smi_write(MSI_INT_ENA_CLR,
+		(port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
+}
+
+static void smi_port_enableInterrupt(struct smi_port *port)
+{
+	struct smi_dev *dev = port->dev;
+
+	smi_write(MSI_INT_ENA_SET,
+		(port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
+}
+
+static void smi_port_clearInterrupt(struct smi_port *port)
+{
+	struct smi_dev *dev = port->dev;
+
+	smi_write(MSI_INT_STATUS_CLR,
+		(port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
+}
+
+/* tasklet handler: DMA data to dmx.*/
+static void smi_dma_xfer(unsigned long data)
+{
+	struct smi_port *port = (struct smi_port *) data;
+	struct smi_dev *dev = port->dev;
+	u32 intr_status, finishedData, dmaManagement;
+	u8 dmaChan0State, dmaChan1State;
+
+	intr_status = port->_int_status;
+	dmaManagement = smi_read(port->DMA_MANAGEMENT);
+	dmaChan0State = (u8)((dmaManagement & 0x00000030) >> 4);
+	dmaChan1State = (u8)((dmaManagement & 0x00300000) >> 20);
+
+	/* CH-0 DMA interrupt.*/
+	if ((intr_status & port->_dmaInterruptCH0) && (dmaChan0State == 0x01)) {
+		dev_dbg(&dev->pci_dev->dev,
+			"Port[%d]-DMA CH0 engine complete successful !\n",
+			port->idx);
+		finishedData = smi_read(port->DMA_CHAN0_TRANS_STATE);
+		finishedData &= 0x003FFFFF;
+		/* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
+		 * indicate dma total transfer length and
+		 * zero of [21:0] indicate dma total transfer length
+		 * equal to 0x400000 (4MB)*/
+		if (finishedData == 0)
+			finishedData = 0x00400000;
+		if (finishedData != SMI_TS_DMA_BUF_SIZE) {
+			dev_dbg(&dev->pci_dev->dev,
+				"DMA CH0 engine complete length mismatched, finish data=%d !\n",
+				finishedData);
+		}
+		dvb_dmx_swfilter_packets(&port->demux,
+			port->cpu_addr[0], (finishedData / 188));
+		/*dvb_dmx_swfilter(&port->demux,
+			port->cpu_addr[0], finishedData);*/
+	}
+	/* CH-1 DMA interrupt.*/
+	if ((intr_status & port->_dmaInterruptCH1) && (dmaChan1State == 0x01)) {
+		dev_dbg(&dev->pci_dev->dev,
+			"Port[%d]-DMA CH1 engine complete successful !\n",
+			port->idx);
+		finishedData = smi_read(port->DMA_CHAN1_TRANS_STATE);
+		finishedData &= 0x003FFFFF;
+		/* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
+		 * indicate dma total transfer length and
+		 * zero of [21:0] indicate dma total transfer length
+		 * equal to 0x400000 (4MB)*/
+		if (finishedData == 0)
+			finishedData = 0x00400000;
+		if (finishedData != SMI_TS_DMA_BUF_SIZE) {
+			dev_dbg(&dev->pci_dev->dev,
+				"DMA CH1 engine complete length mismatched, finish data=%d !\n",
+				finishedData);
+		}
+		dvb_dmx_swfilter_packets(&port->demux,
+			port->cpu_addr[1], (finishedData / 188));
+		/*dvb_dmx_swfilter(&port->demux,
+			port->cpu_addr[1], finishedData);*/
+	}
+	/* restart DMA.*/
+	if (intr_status & port->_dmaInterruptCH0)
+		dmaManagement |= 0x00000002;
+	if (intr_status & port->_dmaInterruptCH1)
+		dmaManagement |= 0x00020000;
+	smi_write(port->DMA_MANAGEMENT, dmaManagement);
+	/* Re-enable interrupts */
+	smi_port_enableInterrupt(port);
+}
+
+static void smi_port_dma_free(struct smi_port *port)
+{
+	if (port->cpu_addr[0]) {
+		pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
+				    port->cpu_addr[0], port->dma_addr[0]);
+		port->cpu_addr[0] = NULL;
+	}
+	if (port->cpu_addr[1]) {
+		pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
+				    port->cpu_addr[1], port->dma_addr[1]);
+		port->cpu_addr[1] = NULL;
+	}
+}
+
+static int smi_port_init(struct smi_port *port, int dmaChanUsed)
+{
+	dev_dbg(&port->dev->pci_dev->dev,
+		"%s, port %d, dmaused %d\n", __func__, port->idx, dmaChanUsed);
+	port->enable = 0;
+	if (port->idx == 0) {
+		/* Port A */
+		port->_dmaInterruptCH0 = dmaChanUsed & 0x01;
+		port->_dmaInterruptCH1 = dmaChanUsed & 0x02;
+
+		port->DMA_CHAN0_ADDR_LOW	= DMA_PORTA_CHAN0_ADDR_LOW;
+		port->DMA_CHAN0_ADDR_HI		= DMA_PORTA_CHAN0_ADDR_HI;
+		port->DMA_CHAN0_TRANS_STATE	= DMA_PORTA_CHAN0_TRANS_STATE;
+		port->DMA_CHAN0_CONTROL		= DMA_PORTA_CHAN0_CONTROL;
+		port->DMA_CHAN1_ADDR_LOW	= DMA_PORTA_CHAN1_ADDR_LOW;
+		port->DMA_CHAN1_ADDR_HI		= DMA_PORTA_CHAN1_ADDR_HI;
+		port->DMA_CHAN1_TRANS_STATE	= DMA_PORTA_CHAN1_TRANS_STATE;
+		port->DMA_CHAN1_CONTROL		= DMA_PORTA_CHAN1_CONTROL;
+		port->DMA_MANAGEMENT		= DMA_PORTA_MANAGEMENT;
+	} else {
+		/* Port B */
+		port->_dmaInterruptCH0 = (dmaChanUsed << 2) & 0x04;
+		port->_dmaInterruptCH1 = (dmaChanUsed << 2) & 0x08;
+
+		port->DMA_CHAN0_ADDR_LOW	= DMA_PORTB_CHAN0_ADDR_LOW;
+		port->DMA_CHAN0_ADDR_HI		= DMA_PORTB_CHAN0_ADDR_HI;
+		port->DMA_CHAN0_TRANS_STATE	= DMA_PORTB_CHAN0_TRANS_STATE;
+		port->DMA_CHAN0_CONTROL		= DMA_PORTB_CHAN0_CONTROL;
+		port->DMA_CHAN1_ADDR_LOW	= DMA_PORTB_CHAN1_ADDR_LOW;
+		port->DMA_CHAN1_ADDR_HI		= DMA_PORTB_CHAN1_ADDR_HI;
+		port->DMA_CHAN1_TRANS_STATE	= DMA_PORTB_CHAN1_TRANS_STATE;
+		port->DMA_CHAN1_CONTROL		= DMA_PORTB_CHAN1_CONTROL;
+		port->DMA_MANAGEMENT		= DMA_PORTB_MANAGEMENT;
+	}
+
+	if (port->_dmaInterruptCH0) {
+		port->cpu_addr[0] = pci_alloc_consistent(port->dev->pci_dev,
+					SMI_TS_DMA_BUF_SIZE,
+					&port->dma_addr[0]);
+		if (!port->cpu_addr[0]) {
+			dev_err(&port->dev->pci_dev->dev,
+				"Port[%d] DMA CH0 memory allocation failed!\n",
+				port->idx);
+			goto err;
+		}
+	}
+
+	if (port->_dmaInterruptCH1) {
+		port->cpu_addr[1] = pci_alloc_consistent(port->dev->pci_dev,
+					SMI_TS_DMA_BUF_SIZE,
+					&port->dma_addr[1]);
+		if (!port->cpu_addr[1]) {
+			dev_err(&port->dev->pci_dev->dev,
+				"Port[%d] DMA CH1 memory allocation failed!\n",
+				port->idx);
+			goto err;
+		}
+	}
+
+	smi_port_disableInterrupt(port);
+	tasklet_init(&port->tasklet, smi_dma_xfer, (unsigned long)port);
+	tasklet_disable(&port->tasklet);
+	port->enable = 1;
+	return 0;
+err:
+	smi_port_dma_free(port);
+	return -ENOMEM;
+}
+
+static void smi_port_exit(struct smi_port *port)
+{
+	smi_port_disableInterrupt(port);
+	tasklet_kill(&port->tasklet);
+	smi_port_dma_free(port);
+	port->enable = 0;
+}
+
+static int smi_port_irq(struct smi_port *port, u32 int_status)
+{
+	u32 port_req_irq = port->_dmaInterruptCH0 | port->_dmaInterruptCH1;
+	int handled = 0;
+
+	if (int_status & port_req_irq) {
+		smi_port_disableInterrupt(port);
+		port->_int_status = int_status;
+		smi_port_clearInterrupt(port);
+		tasklet_schedule(&port->tasklet);
+		handled = 1;
+	}
+	return handled;
+}
+
+static irqreturn_t smi_irq_handler(int irq, void *dev_id)
+{
+	struct smi_dev *dev = dev_id;
+	struct smi_port *port0 = &dev->ts_port[0];
+	struct smi_port *port1 = &dev->ts_port[1];
+	struct smi_rc *ir = &dev->ir;
+	int handled = 0;
+
+	u32 intr_status = smi_read(MSI_INT_STATUS);
+
+	/* ts0 interrupt.*/
+	if (dev->info->ts_0)
+		handled += smi_port_irq(port0, intr_status);
+
+	/* ts1 interrupt.*/
+	if (dev->info->ts_1)
+		handled += smi_port_irq(port1, intr_status);
+
+	/* ir interrupt.*/
+	handled += smi_ir_irq(ir, intr_status);
+
+	return IRQ_RETVAL(handled);
+}
+
+static struct i2c_client *smi_add_i2c_client(struct i2c_adapter *adapter,
+			struct i2c_board_info *info)
+{
+	struct i2c_client *client;
+
+	request_module(info->type);
+	client = i2c_new_device(adapter, info);
+	if (client == NULL || client->dev.driver == NULL)
+		goto err_add_i2c_client;
+
+	if (!try_module_get(client->dev.driver->owner)) {
+		i2c_unregister_device(client);
+		goto err_add_i2c_client;
+	}
+	return client;
+
+err_add_i2c_client:
+	client = NULL;
+	return client;
+}
+
+static void smi_del_i2c_client(struct i2c_client *client)
+{
+	module_put(client->dev.driver->owner);
+	i2c_unregister_device(client);
+}
+
+static const struct m88ds3103_config smi_dvbsky_m88ds3103_cfg = {
+	.i2c_addr = 0x68,
+	.clock = 27000000,
+	.i2c_wr_max = 33,
+	.clock_out = 0,
+	.ts_mode = M88DS3103_TS_PARALLEL,
+	.ts_clk = 16000,
+	.ts_clk_pol = 1,
+	.agc = 0x99,
+	.lnb_hv_pol = 0,
+	.lnb_en_pol = 1,
+};
+
+static int smi_dvbsky_m88ds3103_fe_attach(struct smi_port *port)
+{
+	int ret = 0;
+	struct smi_dev *dev = port->dev;
+	struct i2c_adapter *i2c;
+	/* tuner I2C module */
+	struct i2c_adapter *tuner_i2c_adapter;
+	struct i2c_client *tuner_client;
+	struct i2c_board_info tuner_info;
+	struct ts2020_config ts2020_config = {};
+	memset(&tuner_info, 0, sizeof(struct i2c_board_info));
+	i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
+
+	/* attach demod */
+	port->fe = dvb_attach(m88ds3103_attach,
+			&smi_dvbsky_m88ds3103_cfg, i2c, &tuner_i2c_adapter);
+	if (!port->fe) {
+		ret = -ENODEV;
+		return ret;
+	}
+	/* attach tuner */
+	ts2020_config.fe = port->fe;
+	strlcpy(tuner_info.type, "ts2020", I2C_NAME_SIZE);
+	tuner_info.addr = 0x60;
+	tuner_info.platform_data = &ts2020_config;
+	tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
+	if (!tuner_client) {
+		ret = -ENODEV;
+		goto err_tuner_i2c_device;
+	}
+
+	/* delegate signal strength measurement to tuner */
+	port->fe->ops.read_signal_strength =
+			port->fe->ops.tuner_ops.get_rf_strength;
+
+	port->i2c_client_tuner = tuner_client;
+	return ret;
+
+err_tuner_i2c_device:
+	dvb_frontend_detach(port->fe);
+	return ret;
+}
+
+static const struct m88ds3103_config smi_dvbsky_m88rs6000_cfg = {
+	.i2c_addr = 0x69,
+	.clock = 27000000,
+	.i2c_wr_max = 33,
+	.ts_mode = M88DS3103_TS_PARALLEL,
+	.ts_clk = 16000,
+	.ts_clk_pol = 1,
+	.agc = 0x99,
+	.lnb_hv_pol = 0,
+	.lnb_en_pol = 1,
+};
+
+static int smi_dvbsky_m88rs6000_fe_attach(struct smi_port *port)
+{
+	int ret = 0;
+	struct smi_dev *dev = port->dev;
+	struct i2c_adapter *i2c;
+	/* tuner I2C module */
+	struct i2c_adapter *tuner_i2c_adapter;
+	struct i2c_client *tuner_client;
+	struct i2c_board_info tuner_info;
+	struct m88rs6000t_config m88rs6000t_config;
+
+	memset(&tuner_info, 0, sizeof(struct i2c_board_info));
+	i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
+
+	/* attach demod */
+	port->fe = dvb_attach(m88ds3103_attach,
+			&smi_dvbsky_m88rs6000_cfg, i2c, &tuner_i2c_adapter);
+	if (!port->fe) {
+		ret = -ENODEV;
+		return ret;
+	}
+	/* attach tuner */
+	m88rs6000t_config.fe = port->fe;
+	strlcpy(tuner_info.type, "m88rs6000t", I2C_NAME_SIZE);
+	tuner_info.addr = 0x21;
+	tuner_info.platform_data = &m88rs6000t_config;
+	tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
+	if (!tuner_client) {
+		ret = -ENODEV;
+		goto err_tuner_i2c_device;
+	}
+
+	/* delegate signal strength measurement to tuner */
+	port->fe->ops.read_signal_strength =
+			port->fe->ops.tuner_ops.get_rf_strength;
+
+	port->i2c_client_tuner = tuner_client;
+	return ret;
+
+err_tuner_i2c_device:
+	dvb_frontend_detach(port->fe);
+	return ret;
+}
+
+static int smi_dvbsky_sit2_fe_attach(struct smi_port *port)
+{
+	int ret = 0;
+	struct smi_dev *dev = port->dev;
+	struct i2c_adapter *i2c;
+	struct i2c_adapter *tuner_i2c_adapter;
+	struct i2c_client *client_tuner, *client_demod;
+	struct i2c_board_info client_info;
+	struct si2168_config si2168_config;
+	struct si2157_config si2157_config;
+
+	/* select i2c bus */
+	i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
+
+	/* attach demod */
+	memset(&si2168_config, 0, sizeof(si2168_config));
+	si2168_config.i2c_adapter = &tuner_i2c_adapter;
+	si2168_config.fe = &port->fe;
+	si2168_config.ts_mode = SI2168_TS_PARALLEL;
+
+	memset(&client_info, 0, sizeof(struct i2c_board_info));
+	strlcpy(client_info.type, "si2168", I2C_NAME_SIZE);
+	client_info.addr = 0x64;
+	client_info.platform_data = &si2168_config;
+
+	client_demod = smi_add_i2c_client(i2c, &client_info);
+	if (!client_demod) {
+		ret = -ENODEV;
+		return ret;
+	}
+	port->i2c_client_demod = client_demod;
+
+	/* attach tuner */
+	memset(&si2157_config, 0, sizeof(si2157_config));
+	si2157_config.fe = port->fe;
+	si2157_config.if_port = 1;
+
+	memset(&client_info, 0, sizeof(struct i2c_board_info));
+	strlcpy(client_info.type, "si2157", I2C_NAME_SIZE);
+	client_info.addr = 0x60;
+	client_info.platform_data = &si2157_config;
+
+	client_tuner = smi_add_i2c_client(tuner_i2c_adapter, &client_info);
+	if (!client_tuner) {
+		smi_del_i2c_client(port->i2c_client_demod);
+		port->i2c_client_demod = NULL;
+		ret = -ENODEV;
+		return ret;
+	}
+	port->i2c_client_tuner = client_tuner;
+	return ret;
+}
+
+static int smi_fe_init(struct smi_port *port)
+{
+	int ret = 0;
+	struct smi_dev *dev = port->dev;
+	struct dvb_adapter *adap = &port->dvb_adapter;
+	u8 mac_ee[16];
+
+	dev_dbg(&port->dev->pci_dev->dev,
+		"%s: port %d, fe_type = %d\n",
+		__func__, port->idx, port->fe_type);
+	switch (port->fe_type) {
+	case DVBSKY_FE_M88DS3103:
+		ret = smi_dvbsky_m88ds3103_fe_attach(port);
+		break;
+	case DVBSKY_FE_M88RS6000:
+		ret = smi_dvbsky_m88rs6000_fe_attach(port);
+		break;
+	case DVBSKY_FE_SIT2:
+		ret = smi_dvbsky_sit2_fe_attach(port);
+		break;
+	}
+	if (ret < 0)
+		return ret;
+
+	/* register dvb frontend */
+	ret = dvb_register_frontend(adap, port->fe);
+	if (ret < 0) {
+		if (port->i2c_client_tuner)
+			smi_del_i2c_client(port->i2c_client_tuner);
+		if (port->i2c_client_demod)
+			smi_del_i2c_client(port->i2c_client_demod);
+		dvb_frontend_detach(port->fe);
+		return ret;
+	}
+	/* init MAC.*/
+	ret = smi_read_eeprom(&dev->i2c_bus[0], 0xc0, mac_ee, 16);
+	dev_info(&port->dev->pci_dev->dev,
+		"DVBSky SMI PCIe MAC= %pM\n", mac_ee + (port->idx)*8);
+	memcpy(adap->proposed_mac, mac_ee + (port->idx)*8, 6);
+	return ret;
+}
+
+static void smi_fe_exit(struct smi_port *port)
+{
+	dvb_unregister_frontend(port->fe);
+	/* remove I2C demod and tuner */
+	if (port->i2c_client_tuner)
+		smi_del_i2c_client(port->i2c_client_tuner);
+	if (port->i2c_client_demod)
+		smi_del_i2c_client(port->i2c_client_demod);
+	dvb_frontend_detach(port->fe);
+}
+
+static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+			    int (*start_feed)(struct dvb_demux_feed *),
+			    int (*stop_feed)(struct dvb_demux_feed *),
+			    void *priv)
+{
+	dvbdemux->priv = priv;
+
+	dvbdemux->filternum = 256;
+	dvbdemux->feednum = 256;
+	dvbdemux->start_feed = start_feed;
+	dvbdemux->stop_feed = stop_feed;
+	dvbdemux->write_to_decoder = NULL;
+	dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
+				      DMX_SECTION_FILTERING |
+				      DMX_MEMORY_BASED_FILTERING);
+	return dvb_dmx_init(dvbdemux);
+}
+
+static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+			       struct dvb_demux *dvbdemux,
+			       struct dmx_frontend *hw_frontend,
+			       struct dmx_frontend *mem_frontend,
+			       struct dvb_adapter *dvb_adapter)
+{
+	int ret;
+
+	dmxdev->filternum = 256;
+	dmxdev->demux = &dvbdemux->dmx;
+	dmxdev->capabilities = 0;
+	ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
+	if (ret < 0)
+		return ret;
+
+	hw_frontend->source = DMX_FRONTEND_0;
+	dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
+	mem_frontend->source = DMX_MEMORY_FE;
+	dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
+	return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
+}
+
+static u32 smi_config_DMA(struct smi_port *port)
+{
+	struct smi_dev *dev = port->dev;
+	u32 totalLength = 0, dmaMemPtrLow, dmaMemPtrHi, dmaCtlReg;
+	u8 chanLatencyTimer = 0, dmaChanEnable = 1, dmaTransStart = 1;
+	u32 dmaManagement = 0, tlpTransUnit = DMA_TRANS_UNIT_188;
+	u8 tlpTc = 0, tlpTd = 1, tlpEp = 0, tlpAttr = 0;
+	u64 mem;
+
+	dmaManagement = smi_read(port->DMA_MANAGEMENT);
+	/* Setup Channel-0 */
+	if (port->_dmaInterruptCH0) {
+		totalLength = SMI_TS_DMA_BUF_SIZE;
+		mem = port->dma_addr[0];
+		dmaMemPtrLow = mem & 0xffffffff;
+		dmaMemPtrHi = mem >> 32;
+		dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
+			| (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
+		dmaManagement |= dmaChanEnable | (dmaTransStart << 1)
+			| (chanLatencyTimer << 8);
+		/* write DMA register, start DMA engine */
+		smi_write(port->DMA_CHAN0_ADDR_LOW, dmaMemPtrLow);
+		smi_write(port->DMA_CHAN0_ADDR_HI, dmaMemPtrHi);
+		smi_write(port->DMA_CHAN0_CONTROL, dmaCtlReg);
+	}
+	/* Setup Channel-1 */
+	if (port->_dmaInterruptCH1) {
+		totalLength = SMI_TS_DMA_BUF_SIZE;
+		mem = port->dma_addr[1];
+		dmaMemPtrLow = mem & 0xffffffff;
+		dmaMemPtrHi = mem >> 32;
+		dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
+			| (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
+		dmaManagement |= (dmaChanEnable << 16) | (dmaTransStart << 17)
+			| (chanLatencyTimer << 24);
+		/* write DMA register, start DMA engine */
+		smi_write(port->DMA_CHAN1_ADDR_LOW, dmaMemPtrLow);
+		smi_write(port->DMA_CHAN1_ADDR_HI, dmaMemPtrHi);
+		smi_write(port->DMA_CHAN1_CONTROL, dmaCtlReg);
+	}
+	return dmaManagement;
+}
+
+static int smi_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+	struct smi_port *port = dvbdmx->priv;
+	struct smi_dev *dev = port->dev;
+	u32 dmaManagement;
+
+	if (port->users++ == 0) {
+		dmaManagement = smi_config_DMA(port);
+		smi_port_clearInterrupt(port);
+		smi_port_enableInterrupt(port);
+		smi_write(port->DMA_MANAGEMENT, dmaManagement);
+		tasklet_enable(&port->tasklet);
+	}
+	return port->users;
+}
+
+static int smi_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+	struct smi_port *port = dvbdmx->priv;
+	struct smi_dev *dev = port->dev;
+
+	if (--port->users)
+		return port->users;
+
+	tasklet_disable(&port->tasklet);
+	smi_port_disableInterrupt(port);
+	smi_clear(port->DMA_MANAGEMENT, 0x30003);
+	return 0;
+}
+
+static int smi_dvb_init(struct smi_port *port)
+{
+	int ret;
+	struct dvb_adapter *adap = &port->dvb_adapter;
+	struct dvb_demux *dvbdemux = &port->demux;
+
+	dev_dbg(&port->dev->pci_dev->dev,
+		"%s, port %d\n", __func__, port->idx);
+
+	ret = dvb_register_adapter(adap, "SMI_DVB", THIS_MODULE,
+				   &port->dev->pci_dev->dev,
+				   adapter_nr);
+	if (ret < 0) {
+		dev_err(&port->dev->pci_dev->dev, "Fail to register DVB adapter.\n");
+		return ret;
+	}
+	ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
+				      smi_start_feed,
+				      smi_stop_feed, port);
+	if (ret < 0)
+		goto err_del_dvb_register_adapter;
+
+	ret = my_dvb_dmxdev_ts_card_init(&port->dmxdev, &port->demux,
+					 &port->hw_frontend,
+					 &port->mem_frontend, adap);
+	if (ret < 0)
+		goto err_del_dvb_dmx;
+
+	ret = dvb_net_init(adap, &port->dvbnet, port->dmxdev.demux);
+	if (ret < 0)
+		goto err_del_dvb_dmxdev;
+	return 0;
+err_del_dvb_dmxdev:
+	dvbdemux->dmx.close(&dvbdemux->dmx);
+	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
+	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
+	dvb_dmxdev_release(&port->dmxdev);
+err_del_dvb_dmx:
+	dvb_dmx_release(&port->demux);
+err_del_dvb_register_adapter:
+	dvb_unregister_adapter(&port->dvb_adapter);
+	return ret;
+}
+
+static void smi_dvb_exit(struct smi_port *port)
+{
+	struct dvb_demux *dvbdemux = &port->demux;
+
+	dvb_net_release(&port->dvbnet);
+
+	dvbdemux->dmx.close(&dvbdemux->dmx);
+	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
+	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
+	dvb_dmxdev_release(&port->dmxdev);
+	dvb_dmx_release(&port->demux);
+
+	dvb_unregister_adapter(&port->dvb_adapter);
+}
+
+static int smi_port_attach(struct smi_dev *dev,
+		struct smi_port *port, int index)
+{
+	int ret, dmachs;
+
+	port->dev = dev;
+	port->idx = index;
+	port->fe_type = (index == 0) ? dev->info->fe_0 : dev->info->fe_1;
+	dmachs = (index == 0) ? dev->info->ts_0 : dev->info->ts_1;
+	/* port init.*/
+	ret = smi_port_init(port, dmachs);
+	if (ret < 0)
+		return ret;
+	/* dvb init.*/
+	ret = smi_dvb_init(port);
+	if (ret < 0)
+		goto err_del_port_init;
+	/* fe init.*/
+	ret = smi_fe_init(port);
+	if (ret < 0)
+		goto err_del_dvb_init;
+	return 0;
+err_del_dvb_init:
+	smi_dvb_exit(port);
+err_del_port_init:
+	smi_port_exit(port);
+	return ret;
+}
+
+static void smi_port_detach(struct smi_port *port)
+{
+	smi_fe_exit(port);
+	smi_dvb_exit(port);
+	smi_port_exit(port);
+}
+
+static int smi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct smi_dev *dev;
+	int ret = -ENOMEM;
+
+	if (pci_enable_device(pdev) < 0)
+		return -ENODEV;
+
+	dev = kzalloc(sizeof(struct smi_dev), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto err_pci_disable_device;
+	}
+
+	dev->pci_dev = pdev;
+	pci_set_drvdata(pdev, dev);
+	dev->info = (struct smi_cfg_info *) id->driver_data;
+	dev_info(&dev->pci_dev->dev,
+		"card detected: %s\n", dev->info->name);
+
+	dev->nr = dev->info->type;
+	dev->lmmio = ioremap(pci_resource_start(dev->pci_dev, 0),
+			    pci_resource_len(dev->pci_dev, 0));
+	if (!dev->lmmio) {
+		ret = -ENOMEM;
+		goto err_kfree;
+	}
+
+	/* should we set to 32bit DMA? */
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret < 0)
+		goto err_pci_iounmap;
+
+	pci_set_master(pdev);
+
+	ret = smi_hw_init(dev);
+	if (ret < 0)
+		goto err_pci_iounmap;
+
+	ret = smi_i2c_init(dev);
+	if (ret < 0)
+		goto err_pci_iounmap;
+
+	if (dev->info->ts_0) {
+		ret = smi_port_attach(dev, &dev->ts_port[0], 0);
+		if (ret < 0)
+			goto err_del_i2c_adaptor;
+	}
+
+	if (dev->info->ts_1) {
+		ret = smi_port_attach(dev, &dev->ts_port[1], 1);
+		if (ret < 0)
+			goto err_del_port0_attach;
+	}
+
+	ret = smi_ir_init(dev);
+	if (ret < 0)
+		goto err_del_port1_attach;
+
+#ifdef CONFIG_PCI_MSI /* to do msi interrupt.???*/
+	if (pci_msi_enabled())
+		ret = pci_enable_msi(dev->pci_dev);
+	if (ret)
+		dev_info(&dev->pci_dev->dev, "MSI not available.\n");
+#endif
+
+	ret = request_irq(dev->pci_dev->irq, smi_irq_handler,
+			   IRQF_SHARED, "SMI_PCIE", dev);
+	if (ret < 0)
+		goto err_del_ir;
+
+	smi_ir_start(&dev->ir);
+	return 0;
+
+err_del_ir:
+	smi_ir_exit(dev);
+err_del_port1_attach:
+	if (dev->info->ts_1)
+		smi_port_detach(&dev->ts_port[1]);
+err_del_port0_attach:
+	if (dev->info->ts_0)
+		smi_port_detach(&dev->ts_port[0]);
+err_del_i2c_adaptor:
+	smi_i2c_exit(dev);
+err_pci_iounmap:
+	iounmap(dev->lmmio);
+err_kfree:
+	pci_set_drvdata(pdev, NULL);
+	kfree(dev);
+err_pci_disable_device:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static void smi_remove(struct pci_dev *pdev)
+{
+	struct smi_dev *dev = pci_get_drvdata(pdev);
+
+	smi_write(MSI_INT_ENA_CLR, ALL_INT);
+	free_irq(dev->pci_dev->irq, dev);
+#ifdef CONFIG_PCI_MSI
+	pci_disable_msi(dev->pci_dev);
+#endif
+	if (dev->info->ts_1)
+		smi_port_detach(&dev->ts_port[1]);
+	if (dev->info->ts_0)
+		smi_port_detach(&dev->ts_port[0]);
+
+	smi_ir_exit(dev);
+	smi_i2c_exit(dev);
+	iounmap(dev->lmmio);
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+	kfree(dev);
+}
+
+/* DVBSky cards */
+static struct smi_cfg_info dvbsky_s950_cfg = {
+	.type = SMI_DVBSKY_S950,
+	.name = "DVBSky S950 V3",
+	.ts_0 = SMI_TS_NULL,
+	.ts_1 = SMI_TS_DMA_BOTH,
+	.fe_0 = DVBSKY_FE_NULL,
+	.fe_1 = DVBSKY_FE_M88DS3103,
+};
+
+static struct smi_cfg_info dvbsky_s952_cfg = {
+	.type = SMI_DVBSKY_S952,
+	.name = "DVBSky S952 V3",
+	.ts_0 = SMI_TS_DMA_BOTH,
+	.ts_1 = SMI_TS_DMA_BOTH,
+	.fe_0 = DVBSKY_FE_M88RS6000,
+	.fe_1 = DVBSKY_FE_M88RS6000,
+};
+
+static struct smi_cfg_info dvbsky_t9580_cfg = {
+	.type = SMI_DVBSKY_T9580,
+	.name = "DVBSky T9580 V3",
+	.ts_0 = SMI_TS_DMA_BOTH,
+	.ts_1 = SMI_TS_DMA_BOTH,
+	.fe_0 = DVBSKY_FE_SIT2,
+	.fe_1 = DVBSKY_FE_M88DS3103,
+};
+
+/* PCI IDs */
+#define SMI_ID(_subvend, _subdev, _driverdata) {	\
+	.vendor      = SMI_VID,    .device    = SMI_PID, \
+	.subvendor   = _subvend, .subdevice = _subdev, \
+	.driver_data = (unsigned long)&_driverdata }
+
+static const struct pci_device_id smi_id_table[] = {
+	SMI_ID(0x4254, 0x0550, dvbsky_s950_cfg),
+	SMI_ID(0x4254, 0x0552, dvbsky_s952_cfg),
+	SMI_ID(0x4254, 0x5580, dvbsky_t9580_cfg),
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, smi_id_table);
+
+static struct pci_driver smipcie_driver = {
+	.name = "SMI PCIe driver",
+	.id_table = smi_id_table,
+	.probe = smi_probe,
+	.remove = smi_remove,
+};
+
+module_pci_driver(smipcie_driver);
+
+MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
+MODULE_DESCRIPTION("SMI PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/smipcie/smipcie.c b/drivers/media/pci/smipcie/smipcie.c
deleted file mode 100644
index 143fd78..0000000
--- a/drivers/media/pci/smipcie/smipcie.c
+++ /dev/null
@@ -1,1102 +0,0 @@
-/*
- * SMI PCIe driver for DVBSky cards.
- *
- * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2 of the License, or
- *    (at your option) any later version.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *    GNU General Public License for more details.
- */
-
-#include "smipcie.h"
-#include "m88ds3103.h"
-#include "ts2020.h"
-#include "m88rs6000t.h"
-#include "si2168.h"
-#include "si2157.h"
-
-DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-
-static int smi_hw_init(struct smi_dev *dev)
-{
-	u32 port_mux, port_ctrl, int_stat;
-
-	/* set port mux.*/
-	port_mux = smi_read(MUX_MODE_CTRL);
-	port_mux &= ~(rbPaMSMask);
-	port_mux |= rbPaMSDtvNoGpio;
-	port_mux &= ~(rbPbMSMask);
-	port_mux |= rbPbMSDtvNoGpio;
-	port_mux &= ~(0x0f0000);
-	port_mux |= 0x50000;
-	smi_write(MUX_MODE_CTRL, port_mux);
-
-	/* set DTV register.*/
-	/* Port A */
-	port_ctrl = smi_read(VIDEO_CTRL_STATUS_A);
-	port_ctrl &= ~0x01;
-	smi_write(VIDEO_CTRL_STATUS_A, port_ctrl);
-	port_ctrl = smi_read(MPEG2_CTRL_A);
-	port_ctrl &= ~0x40;
-	port_ctrl |= 0x80;
-	smi_write(MPEG2_CTRL_A, port_ctrl);
-	/* Port B */
-	port_ctrl = smi_read(VIDEO_CTRL_STATUS_B);
-	port_ctrl &= ~0x01;
-	smi_write(VIDEO_CTRL_STATUS_B, port_ctrl);
-	port_ctrl = smi_read(MPEG2_CTRL_B);
-	port_ctrl &= ~0x40;
-	port_ctrl |= 0x80;
-	smi_write(MPEG2_CTRL_B, port_ctrl);
-
-	/* disable and clear interrupt.*/
-	smi_write(MSI_INT_ENA_CLR, ALL_INT);
-	int_stat = smi_read(MSI_INT_STATUS);
-	smi_write(MSI_INT_STATUS_CLR, int_stat);
-
-	/* reset demod.*/
-	smi_clear(PERIPHERAL_CTRL, 0x0303);
-	msleep(50);
-	smi_set(PERIPHERAL_CTRL, 0x0101);
-	return 0;
-}
-
-/* i2c bit bus.*/
-static void smi_i2c_cfg(struct smi_dev *dev, u32 sw_ctl)
-{
-	u32 dwCtrl;
-
-	dwCtrl = smi_read(sw_ctl);
-	dwCtrl &= ~0x18; /* disable output.*/
-	dwCtrl |= 0x21; /* reset and software mode.*/
-	dwCtrl &= ~0xff00;
-	dwCtrl |= 0x6400;
-	smi_write(sw_ctl, dwCtrl);
-	msleep(20);
-	dwCtrl = smi_read(sw_ctl);
-	dwCtrl &= ~0x20;
-	smi_write(sw_ctl, dwCtrl);
-}
-
-static void smi_i2c_setsda(struct smi_dev *dev, int state, u32 sw_ctl)
-{
-	if (state) {
-		/* set as input.*/
-		smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
-	} else {
-		smi_clear(sw_ctl, SW_I2C_MSK_DAT_OUT);
-		/* set as output.*/
-		smi_set(sw_ctl, SW_I2C_MSK_DAT_EN);
-	}
-}
-
-static void smi_i2c_setscl(void *data, int state, u32 sw_ctl)
-{
-	struct smi_dev *dev = data;
-
-	if (state) {
-		/* set as input.*/
-		smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
-	} else {
-		smi_clear(sw_ctl, SW_I2C_MSK_CLK_OUT);
-		/* set as output.*/
-		smi_set(sw_ctl, SW_I2C_MSK_CLK_EN);
-	}
-}
-
-static int smi_i2c_getsda(void *data, u32 sw_ctl)
-{
-	struct smi_dev *dev = data;
-	/* set as input.*/
-	smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
-	udelay(1);
-	return (smi_read(sw_ctl) & SW_I2C_MSK_DAT_IN) ? 1 : 0;
-}
-
-static int smi_i2c_getscl(void *data, u32 sw_ctl)
-{
-	struct smi_dev *dev = data;
-	/* set as input.*/
-	smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
-	udelay(1);
-	return (smi_read(sw_ctl) & SW_I2C_MSK_CLK_IN) ? 1 : 0;
-}
-/* i2c 0.*/
-static void smi_i2c0_setsda(void *data, int state)
-{
-	struct smi_dev *dev = data;
-
-	smi_i2c_setsda(dev, state, I2C_A_SW_CTL);
-}
-
-static void smi_i2c0_setscl(void *data, int state)
-{
-	struct smi_dev *dev = data;
-
-	smi_i2c_setscl(dev, state, I2C_A_SW_CTL);
-}
-
-static int smi_i2c0_getsda(void *data)
-{
-	struct smi_dev *dev = data;
-
-	return	smi_i2c_getsda(dev, I2C_A_SW_CTL);
-}
-
-static int smi_i2c0_getscl(void *data)
-{
-	struct smi_dev *dev = data;
-
-	return	smi_i2c_getscl(dev, I2C_A_SW_CTL);
-}
-/* i2c 1.*/
-static void smi_i2c1_setsda(void *data, int state)
-{
-	struct smi_dev *dev = data;
-
-	smi_i2c_setsda(dev, state, I2C_B_SW_CTL);
-}
-
-static void smi_i2c1_setscl(void *data, int state)
-{
-	struct smi_dev *dev = data;
-
-	smi_i2c_setscl(dev, state, I2C_B_SW_CTL);
-}
-
-static int smi_i2c1_getsda(void *data)
-{
-	struct smi_dev *dev = data;
-
-	return	smi_i2c_getsda(dev, I2C_B_SW_CTL);
-}
-
-static int smi_i2c1_getscl(void *data)
-{
-	struct smi_dev *dev = data;
-
-	return	smi_i2c_getscl(dev, I2C_B_SW_CTL);
-}
-
-static int smi_i2c_init(struct smi_dev *dev)
-{
-	int ret;
-
-	/* i2c bus 0 */
-	smi_i2c_cfg(dev, I2C_A_SW_CTL);
-	i2c_set_adapdata(&dev->i2c_bus[0], dev);
-	strcpy(dev->i2c_bus[0].name, "SMI-I2C0");
-	dev->i2c_bus[0].owner = THIS_MODULE;
-	dev->i2c_bus[0].dev.parent = &dev->pci_dev->dev;
-	dev->i2c_bus[0].algo_data = &dev->i2c_bit[0];
-	dev->i2c_bit[0].data = dev;
-	dev->i2c_bit[0].setsda = smi_i2c0_setsda;
-	dev->i2c_bit[0].setscl = smi_i2c0_setscl;
-	dev->i2c_bit[0].getsda = smi_i2c0_getsda;
-	dev->i2c_bit[0].getscl = smi_i2c0_getscl;
-	dev->i2c_bit[0].udelay = 12;
-	dev->i2c_bit[0].timeout = 10;
-	/* Raise SCL and SDA */
-	smi_i2c0_setsda(dev, 1);
-	smi_i2c0_setscl(dev, 1);
-
-	ret = i2c_bit_add_bus(&dev->i2c_bus[0]);
-	if (ret < 0)
-		return ret;
-
-	/* i2c bus 1 */
-	smi_i2c_cfg(dev, I2C_B_SW_CTL);
-	i2c_set_adapdata(&dev->i2c_bus[1], dev);
-	strcpy(dev->i2c_bus[1].name, "SMI-I2C1");
-	dev->i2c_bus[1].owner = THIS_MODULE;
-	dev->i2c_bus[1].dev.parent = &dev->pci_dev->dev;
-	dev->i2c_bus[1].algo_data = &dev->i2c_bit[1];
-	dev->i2c_bit[1].data = dev;
-	dev->i2c_bit[1].setsda = smi_i2c1_setsda;
-	dev->i2c_bit[1].setscl = smi_i2c1_setscl;
-	dev->i2c_bit[1].getsda = smi_i2c1_getsda;
-	dev->i2c_bit[1].getscl = smi_i2c1_getscl;
-	dev->i2c_bit[1].udelay = 12;
-	dev->i2c_bit[1].timeout = 10;
-	/* Raise SCL and SDA */
-	smi_i2c1_setsda(dev, 1);
-	smi_i2c1_setscl(dev, 1);
-
-	ret = i2c_bit_add_bus(&dev->i2c_bus[1]);
-	if (ret < 0)
-		i2c_del_adapter(&dev->i2c_bus[0]);
-
-	return ret;
-}
-
-static void smi_i2c_exit(struct smi_dev *dev)
-{
-	i2c_del_adapter(&dev->i2c_bus[0]);
-	i2c_del_adapter(&dev->i2c_bus[1]);
-}
-
-static int smi_read_eeprom(struct i2c_adapter *i2c, u16 reg, u8 *data, u16 size)
-{
-	int ret;
-	u8 b0[2] = { (reg >> 8) & 0xff, reg & 0xff };
-
-	struct i2c_msg msg[] = {
-		{ .addr = 0x50, .flags = 0,
-			.buf = b0, .len = 2 },
-		{ .addr = 0x50, .flags = I2C_M_RD,
-			.buf = data, .len = size }
-	};
-
-	ret = i2c_transfer(i2c, msg, 2);
-
-	if (ret != 2) {
-		dev_err(&i2c->dev, "%s: reg=0x%x (error=%d)\n",
-			__func__, reg, ret);
-		return ret;
-	}
-	return ret;
-}
-
-/* ts port interrupt operations */
-static void smi_port_disableInterrupt(struct smi_port *port)
-{
-	struct smi_dev *dev = port->dev;
-
-	smi_write(MSI_INT_ENA_CLR,
-		(port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
-}
-
-static void smi_port_enableInterrupt(struct smi_port *port)
-{
-	struct smi_dev *dev = port->dev;
-
-	smi_write(MSI_INT_ENA_SET,
-		(port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
-}
-
-static void smi_port_clearInterrupt(struct smi_port *port)
-{
-	struct smi_dev *dev = port->dev;
-
-	smi_write(MSI_INT_STATUS_CLR,
-		(port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
-}
-
-/* tasklet handler: DMA data to dmx.*/
-static void smi_dma_xfer(unsigned long data)
-{
-	struct smi_port *port = (struct smi_port *) data;
-	struct smi_dev *dev = port->dev;
-	u32 intr_status, finishedData, dmaManagement;
-	u8 dmaChan0State, dmaChan1State;
-
-	intr_status = port->_int_status;
-	dmaManagement = smi_read(port->DMA_MANAGEMENT);
-	dmaChan0State = (u8)((dmaManagement & 0x00000030) >> 4);
-	dmaChan1State = (u8)((dmaManagement & 0x00300000) >> 20);
-
-	/* CH-0 DMA interrupt.*/
-	if ((intr_status & port->_dmaInterruptCH0) && (dmaChan0State == 0x01)) {
-		dev_dbg(&dev->pci_dev->dev,
-			"Port[%d]-DMA CH0 engine complete successful !\n",
-			port->idx);
-		finishedData = smi_read(port->DMA_CHAN0_TRANS_STATE);
-		finishedData &= 0x003FFFFF;
-		/* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
-		 * indicate dma total transfer length and
-		 * zero of [21:0] indicate dma total transfer length
-		 * equal to 0x400000 (4MB)*/
-		if (finishedData == 0)
-			finishedData = 0x00400000;
-		if (finishedData != SMI_TS_DMA_BUF_SIZE) {
-			dev_dbg(&dev->pci_dev->dev,
-				"DMA CH0 engine complete length mismatched, finish data=%d !\n",
-				finishedData);
-		}
-		dvb_dmx_swfilter_packets(&port->demux,
-			port->cpu_addr[0], (finishedData / 188));
-		/*dvb_dmx_swfilter(&port->demux,
-			port->cpu_addr[0], finishedData);*/
-	}
-	/* CH-1 DMA interrupt.*/
-	if ((intr_status & port->_dmaInterruptCH1) && (dmaChan1State == 0x01)) {
-		dev_dbg(&dev->pci_dev->dev,
-			"Port[%d]-DMA CH1 engine complete successful !\n",
-			port->idx);
-		finishedData = smi_read(port->DMA_CHAN1_TRANS_STATE);
-		finishedData &= 0x003FFFFF;
-		/* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
-		 * indicate dma total transfer length and
-		 * zero of [21:0] indicate dma total transfer length
-		 * equal to 0x400000 (4MB)*/
-		if (finishedData == 0)
-			finishedData = 0x00400000;
-		if (finishedData != SMI_TS_DMA_BUF_SIZE) {
-			dev_dbg(&dev->pci_dev->dev,
-				"DMA CH1 engine complete length mismatched, finish data=%d !\n",
-				finishedData);
-		}
-		dvb_dmx_swfilter_packets(&port->demux,
-			port->cpu_addr[1], (finishedData / 188));
-		/*dvb_dmx_swfilter(&port->demux,
-			port->cpu_addr[1], finishedData);*/
-	}
-	/* restart DMA.*/
-	if (intr_status & port->_dmaInterruptCH0)
-		dmaManagement |= 0x00000002;
-	if (intr_status & port->_dmaInterruptCH1)
-		dmaManagement |= 0x00020000;
-	smi_write(port->DMA_MANAGEMENT, dmaManagement);
-	/* Re-enable interrupts */
-	smi_port_enableInterrupt(port);
-}
-
-static void smi_port_dma_free(struct smi_port *port)
-{
-	if (port->cpu_addr[0]) {
-		pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
-				    port->cpu_addr[0], port->dma_addr[0]);
-		port->cpu_addr[0] = NULL;
-	}
-	if (port->cpu_addr[1]) {
-		pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
-				    port->cpu_addr[1], port->dma_addr[1]);
-		port->cpu_addr[1] = NULL;
-	}
-}
-
-static int smi_port_init(struct smi_port *port, int dmaChanUsed)
-{
-	dev_dbg(&port->dev->pci_dev->dev,
-		"%s, port %d, dmaused %d\n", __func__, port->idx, dmaChanUsed);
-	port->enable = 0;
-	if (port->idx == 0) {
-		/* Port A */
-		port->_dmaInterruptCH0 = dmaChanUsed & 0x01;
-		port->_dmaInterruptCH1 = dmaChanUsed & 0x02;
-
-		port->DMA_CHAN0_ADDR_LOW	= DMA_PORTA_CHAN0_ADDR_LOW;
-		port->DMA_CHAN0_ADDR_HI		= DMA_PORTA_CHAN0_ADDR_HI;
-		port->DMA_CHAN0_TRANS_STATE	= DMA_PORTA_CHAN0_TRANS_STATE;
-		port->DMA_CHAN0_CONTROL		= DMA_PORTA_CHAN0_CONTROL;
-		port->DMA_CHAN1_ADDR_LOW	= DMA_PORTA_CHAN1_ADDR_LOW;
-		port->DMA_CHAN1_ADDR_HI		= DMA_PORTA_CHAN1_ADDR_HI;
-		port->DMA_CHAN1_TRANS_STATE	= DMA_PORTA_CHAN1_TRANS_STATE;
-		port->DMA_CHAN1_CONTROL		= DMA_PORTA_CHAN1_CONTROL;
-		port->DMA_MANAGEMENT		= DMA_PORTA_MANAGEMENT;
-	} else {
-		/* Port B */
-		port->_dmaInterruptCH0 = (dmaChanUsed << 2) & 0x04;
-		port->_dmaInterruptCH1 = (dmaChanUsed << 2) & 0x08;
-
-		port->DMA_CHAN0_ADDR_LOW	= DMA_PORTB_CHAN0_ADDR_LOW;
-		port->DMA_CHAN0_ADDR_HI		= DMA_PORTB_CHAN0_ADDR_HI;
-		port->DMA_CHAN0_TRANS_STATE	= DMA_PORTB_CHAN0_TRANS_STATE;
-		port->DMA_CHAN0_CONTROL		= DMA_PORTB_CHAN0_CONTROL;
-		port->DMA_CHAN1_ADDR_LOW	= DMA_PORTB_CHAN1_ADDR_LOW;
-		port->DMA_CHAN1_ADDR_HI		= DMA_PORTB_CHAN1_ADDR_HI;
-		port->DMA_CHAN1_TRANS_STATE	= DMA_PORTB_CHAN1_TRANS_STATE;
-		port->DMA_CHAN1_CONTROL		= DMA_PORTB_CHAN1_CONTROL;
-		port->DMA_MANAGEMENT		= DMA_PORTB_MANAGEMENT;
-	}
-
-	if (port->_dmaInterruptCH0) {
-		port->cpu_addr[0] = pci_alloc_consistent(port->dev->pci_dev,
-					SMI_TS_DMA_BUF_SIZE,
-					&port->dma_addr[0]);
-		if (!port->cpu_addr[0]) {
-			dev_err(&port->dev->pci_dev->dev,
-				"Port[%d] DMA CH0 memory allocation failed!\n",
-				port->idx);
-			goto err;
-		}
-	}
-
-	if (port->_dmaInterruptCH1) {
-		port->cpu_addr[1] = pci_alloc_consistent(port->dev->pci_dev,
-					SMI_TS_DMA_BUF_SIZE,
-					&port->dma_addr[1]);
-		if (!port->cpu_addr[1]) {
-			dev_err(&port->dev->pci_dev->dev,
-				"Port[%d] DMA CH1 memory allocation failed!\n",
-				port->idx);
-			goto err;
-		}
-	}
-
-	smi_port_disableInterrupt(port);
-	tasklet_init(&port->tasklet, smi_dma_xfer, (unsigned long)port);
-	tasklet_disable(&port->tasklet);
-	port->enable = 1;
-	return 0;
-err:
-	smi_port_dma_free(port);
-	return -ENOMEM;
-}
-
-static void smi_port_exit(struct smi_port *port)
-{
-	smi_port_disableInterrupt(port);
-	tasklet_kill(&port->tasklet);
-	smi_port_dma_free(port);
-	port->enable = 0;
-}
-
-static int smi_port_irq(struct smi_port *port, u32 int_status)
-{
-	u32 port_req_irq = port->_dmaInterruptCH0 | port->_dmaInterruptCH1;
-	int handled = 0;
-
-	if (int_status & port_req_irq) {
-		smi_port_disableInterrupt(port);
-		port->_int_status = int_status;
-		smi_port_clearInterrupt(port);
-		tasklet_schedule(&port->tasklet);
-		handled = 1;
-	}
-	return handled;
-}
-
-static irqreturn_t smi_irq_handler(int irq, void *dev_id)
-{
-	struct smi_dev *dev = dev_id;
-	struct smi_port *port0 = &dev->ts_port[0];
-	struct smi_port *port1 = &dev->ts_port[1];
-	int handled = 0;
-
-	u32 intr_status = smi_read(MSI_INT_STATUS);
-
-	/* ts0 interrupt.*/
-	if (dev->info->ts_0)
-		handled += smi_port_irq(port0, intr_status);
-
-	/* ts1 interrupt.*/
-	if (dev->info->ts_1)
-		handled += smi_port_irq(port1, intr_status);
-
-	return IRQ_RETVAL(handled);
-}
-
-static struct i2c_client *smi_add_i2c_client(struct i2c_adapter *adapter,
-			struct i2c_board_info *info)
-{
-	struct i2c_client *client;
-
-	request_module(info->type);
-	client = i2c_new_device(adapter, info);
-	if (client == NULL || client->dev.driver == NULL)
-		goto err_add_i2c_client;
-
-	if (!try_module_get(client->dev.driver->owner)) {
-		i2c_unregister_device(client);
-		goto err_add_i2c_client;
-	}
-	return client;
-
-err_add_i2c_client:
-	client = NULL;
-	return client;
-}
-
-static void smi_del_i2c_client(struct i2c_client *client)
-{
-	module_put(client->dev.driver->owner);
-	i2c_unregister_device(client);
-}
-
-static const struct m88ds3103_config smi_dvbsky_m88ds3103_cfg = {
-	.i2c_addr = 0x68,
-	.clock = 27000000,
-	.i2c_wr_max = 33,
-	.clock_out = 0,
-	.ts_mode = M88DS3103_TS_PARALLEL,
-	.ts_clk = 16000,
-	.ts_clk_pol = 1,
-	.agc = 0x99,
-	.lnb_hv_pol = 0,
-	.lnb_en_pol = 1,
-};
-
-static int smi_dvbsky_m88ds3103_fe_attach(struct smi_port *port)
-{
-	int ret = 0;
-	struct smi_dev *dev = port->dev;
-	struct i2c_adapter *i2c;
-	/* tuner I2C module */
-	struct i2c_adapter *tuner_i2c_adapter;
-	struct i2c_client *tuner_client;
-	struct i2c_board_info tuner_info;
-	struct ts2020_config ts2020_config = {};
-	memset(&tuner_info, 0, sizeof(struct i2c_board_info));
-	i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
-
-	/* attach demod */
-	port->fe = dvb_attach(m88ds3103_attach,
-			&smi_dvbsky_m88ds3103_cfg, i2c, &tuner_i2c_adapter);
-	if (!port->fe) {
-		ret = -ENODEV;
-		return ret;
-	}
-	/* attach tuner */
-	ts2020_config.fe = port->fe;
-	strlcpy(tuner_info.type, "ts2020", I2C_NAME_SIZE);
-	tuner_info.addr = 0x60;
-	tuner_info.platform_data = &ts2020_config;
-	tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
-	if (!tuner_client) {
-		ret = -ENODEV;
-		goto err_tuner_i2c_device;
-	}
-
-	/* delegate signal strength measurement to tuner */
-	port->fe->ops.read_signal_strength =
-			port->fe->ops.tuner_ops.get_rf_strength;
-
-	port->i2c_client_tuner = tuner_client;
-	return ret;
-
-err_tuner_i2c_device:
-	dvb_frontend_detach(port->fe);
-	return ret;
-}
-
-static const struct m88ds3103_config smi_dvbsky_m88rs6000_cfg = {
-	.i2c_addr = 0x69,
-	.clock = 27000000,
-	.i2c_wr_max = 33,
-	.ts_mode = M88DS3103_TS_PARALLEL,
-	.ts_clk = 16000,
-	.ts_clk_pol = 1,
-	.agc = 0x99,
-	.lnb_hv_pol = 0,
-	.lnb_en_pol = 1,
-};
-
-static int smi_dvbsky_m88rs6000_fe_attach(struct smi_port *port)
-{
-	int ret = 0;
-	struct smi_dev *dev = port->dev;
-	struct i2c_adapter *i2c;
-	/* tuner I2C module */
-	struct i2c_adapter *tuner_i2c_adapter;
-	struct i2c_client *tuner_client;
-	struct i2c_board_info tuner_info;
-	struct m88rs6000t_config m88rs6000t_config;
-
-	memset(&tuner_info, 0, sizeof(struct i2c_board_info));
-	i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
-
-	/* attach demod */
-	port->fe = dvb_attach(m88ds3103_attach,
-			&smi_dvbsky_m88rs6000_cfg, i2c, &tuner_i2c_adapter);
-	if (!port->fe) {
-		ret = -ENODEV;
-		return ret;
-	}
-	/* attach tuner */
-	m88rs6000t_config.fe = port->fe;
-	strlcpy(tuner_info.type, "m88rs6000t", I2C_NAME_SIZE);
-	tuner_info.addr = 0x21;
-	tuner_info.platform_data = &m88rs6000t_config;
-	tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
-	if (!tuner_client) {
-		ret = -ENODEV;
-		goto err_tuner_i2c_device;
-	}
-
-	/* delegate signal strength measurement to tuner */
-	port->fe->ops.read_signal_strength =
-			port->fe->ops.tuner_ops.get_rf_strength;
-
-	port->i2c_client_tuner = tuner_client;
-	return ret;
-
-err_tuner_i2c_device:
-	dvb_frontend_detach(port->fe);
-	return ret;
-}
-
-static int smi_dvbsky_sit2_fe_attach(struct smi_port *port)
-{
-	int ret = 0;
-	struct smi_dev *dev = port->dev;
-	struct i2c_adapter *i2c;
-	struct i2c_adapter *tuner_i2c_adapter;
-	struct i2c_client *client_tuner, *client_demod;
-	struct i2c_board_info client_info;
-	struct si2168_config si2168_config;
-	struct si2157_config si2157_config;
-
-	/* select i2c bus */
-	i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
-
-	/* attach demod */
-	memset(&si2168_config, 0, sizeof(si2168_config));
-	si2168_config.i2c_adapter = &tuner_i2c_adapter;
-	si2168_config.fe = &port->fe;
-	si2168_config.ts_mode = SI2168_TS_PARALLEL;
-
-	memset(&client_info, 0, sizeof(struct i2c_board_info));
-	strlcpy(client_info.type, "si2168", I2C_NAME_SIZE);
-	client_info.addr = 0x64;
-	client_info.platform_data = &si2168_config;
-
-	client_demod = smi_add_i2c_client(i2c, &client_info);
-	if (!client_demod) {
-		ret = -ENODEV;
-		return ret;
-	}
-	port->i2c_client_demod = client_demod;
-
-	/* attach tuner */
-	memset(&si2157_config, 0, sizeof(si2157_config));
-	si2157_config.fe = port->fe;
-	si2157_config.if_port = 1;
-
-	memset(&client_info, 0, sizeof(struct i2c_board_info));
-	strlcpy(client_info.type, "si2157", I2C_NAME_SIZE);
-	client_info.addr = 0x60;
-	client_info.platform_data = &si2157_config;
-
-	client_tuner = smi_add_i2c_client(tuner_i2c_adapter, &client_info);
-	if (!client_tuner) {
-		smi_del_i2c_client(port->i2c_client_demod);
-		port->i2c_client_demod = NULL;
-		ret = -ENODEV;
-		return ret;
-	}
-	port->i2c_client_tuner = client_tuner;
-	return ret;
-}
-
-static int smi_fe_init(struct smi_port *port)
-{
-	int ret = 0;
-	struct smi_dev *dev = port->dev;
-	struct dvb_adapter *adap = &port->dvb_adapter;
-	u8 mac_ee[16];
-
-	dev_dbg(&port->dev->pci_dev->dev,
-		"%s: port %d, fe_type = %d\n",
-		__func__, port->idx, port->fe_type);
-	switch (port->fe_type) {
-	case DVBSKY_FE_M88DS3103:
-		ret = smi_dvbsky_m88ds3103_fe_attach(port);
-		break;
-	case DVBSKY_FE_M88RS6000:
-		ret = smi_dvbsky_m88rs6000_fe_attach(port);
-		break;
-	case DVBSKY_FE_SIT2:
-		ret = smi_dvbsky_sit2_fe_attach(port);
-		break;
-	}
-	if (ret < 0)
-		return ret;
-
-	/* register dvb frontend */
-	ret = dvb_register_frontend(adap, port->fe);
-	if (ret < 0) {
-		if (port->i2c_client_tuner)
-			smi_del_i2c_client(port->i2c_client_tuner);
-		if (port->i2c_client_demod)
-			smi_del_i2c_client(port->i2c_client_demod);
-		dvb_frontend_detach(port->fe);
-		return ret;
-	}
-	/* init MAC.*/
-	ret = smi_read_eeprom(&dev->i2c_bus[0], 0xc0, mac_ee, 16);
-	dev_info(&port->dev->pci_dev->dev,
-		"DVBSky SMI PCIe MAC= %pM\n", mac_ee + (port->idx)*8);
-	memcpy(adap->proposed_mac, mac_ee + (port->idx)*8, 6);
-	return ret;
-}
-
-static void smi_fe_exit(struct smi_port *port)
-{
-	dvb_unregister_frontend(port->fe);
-	/* remove I2C demod and tuner */
-	if (port->i2c_client_tuner)
-		smi_del_i2c_client(port->i2c_client_tuner);
-	if (port->i2c_client_demod)
-		smi_del_i2c_client(port->i2c_client_demod);
-	dvb_frontend_detach(port->fe);
-}
-
-static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
-			    int (*start_feed)(struct dvb_demux_feed *),
-			    int (*stop_feed)(struct dvb_demux_feed *),
-			    void *priv)
-{
-	dvbdemux->priv = priv;
-
-	dvbdemux->filternum = 256;
-	dvbdemux->feednum = 256;
-	dvbdemux->start_feed = start_feed;
-	dvbdemux->stop_feed = stop_feed;
-	dvbdemux->write_to_decoder = NULL;
-	dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
-				      DMX_SECTION_FILTERING |
-				      DMX_MEMORY_BASED_FILTERING);
-	return dvb_dmx_init(dvbdemux);
-}
-
-static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
-			       struct dvb_demux *dvbdemux,
-			       struct dmx_frontend *hw_frontend,
-			       struct dmx_frontend *mem_frontend,
-			       struct dvb_adapter *dvb_adapter)
-{
-	int ret;
-
-	dmxdev->filternum = 256;
-	dmxdev->demux = &dvbdemux->dmx;
-	dmxdev->capabilities = 0;
-	ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
-	if (ret < 0)
-		return ret;
-
-	hw_frontend->source = DMX_FRONTEND_0;
-	dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
-	mem_frontend->source = DMX_MEMORY_FE;
-	dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
-	return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
-}
-
-static u32 smi_config_DMA(struct smi_port *port)
-{
-	struct smi_dev *dev = port->dev;
-	u32 totalLength = 0, dmaMemPtrLow, dmaMemPtrHi, dmaCtlReg;
-	u8 chanLatencyTimer = 0, dmaChanEnable = 1, dmaTransStart = 1;
-	u32 dmaManagement = 0, tlpTransUnit = DMA_TRANS_UNIT_188;
-	u8 tlpTc = 0, tlpTd = 1, tlpEp = 0, tlpAttr = 0;
-	u64 mem;
-
-	dmaManagement = smi_read(port->DMA_MANAGEMENT);
-	/* Setup Channel-0 */
-	if (port->_dmaInterruptCH0) {
-		totalLength = SMI_TS_DMA_BUF_SIZE;
-		mem = port->dma_addr[0];
-		dmaMemPtrLow = mem & 0xffffffff;
-		dmaMemPtrHi = mem >> 32;
-		dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
-			| (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
-		dmaManagement |= dmaChanEnable | (dmaTransStart << 1)
-			| (chanLatencyTimer << 8);
-		/* write DMA register, start DMA engine */
-		smi_write(port->DMA_CHAN0_ADDR_LOW, dmaMemPtrLow);
-		smi_write(port->DMA_CHAN0_ADDR_HI, dmaMemPtrHi);
-		smi_write(port->DMA_CHAN0_CONTROL, dmaCtlReg);
-	}
-	/* Setup Channel-1 */
-	if (port->_dmaInterruptCH1) {
-		totalLength = SMI_TS_DMA_BUF_SIZE;
-		mem = port->dma_addr[1];
-		dmaMemPtrLow = mem & 0xffffffff;
-		dmaMemPtrHi = mem >> 32;
-		dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
-			| (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
-		dmaManagement |= (dmaChanEnable << 16) | (dmaTransStart << 17)
-			| (chanLatencyTimer << 24);
-		/* write DMA register, start DMA engine */
-		smi_write(port->DMA_CHAN1_ADDR_LOW, dmaMemPtrLow);
-		smi_write(port->DMA_CHAN1_ADDR_HI, dmaMemPtrHi);
-		smi_write(port->DMA_CHAN1_CONTROL, dmaCtlReg);
-	}
-	return dmaManagement;
-}
-
-static int smi_start_feed(struct dvb_demux_feed *dvbdmxfeed)
-{
-	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
-	struct smi_port *port = dvbdmx->priv;
-	struct smi_dev *dev = port->dev;
-	u32 dmaManagement;
-
-	if (port->users++ == 0) {
-		dmaManagement = smi_config_DMA(port);
-		smi_port_clearInterrupt(port);
-		smi_port_enableInterrupt(port);
-		smi_write(port->DMA_MANAGEMENT, dmaManagement);
-		tasklet_enable(&port->tasklet);
-	}
-	return port->users;
-}
-
-static int smi_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
-{
-	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
-	struct smi_port *port = dvbdmx->priv;
-	struct smi_dev *dev = port->dev;
-
-	if (--port->users)
-		return port->users;
-
-	tasklet_disable(&port->tasklet);
-	smi_port_disableInterrupt(port);
-	smi_clear(port->DMA_MANAGEMENT, 0x30003);
-	return 0;
-}
-
-static int smi_dvb_init(struct smi_port *port)
-{
-	int ret;
-	struct dvb_adapter *adap = &port->dvb_adapter;
-	struct dvb_demux *dvbdemux = &port->demux;
-
-	dev_dbg(&port->dev->pci_dev->dev,
-		"%s, port %d\n", __func__, port->idx);
-
-	ret = dvb_register_adapter(adap, "SMI_DVB", THIS_MODULE,
-				   &port->dev->pci_dev->dev,
-				   adapter_nr);
-	if (ret < 0) {
-		dev_err(&port->dev->pci_dev->dev, "Fail to register DVB adapter.\n");
-		return ret;
-	}
-	ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
-				      smi_start_feed,
-				      smi_stop_feed, port);
-	if (ret < 0)
-		goto err_del_dvb_register_adapter;
-
-	ret = my_dvb_dmxdev_ts_card_init(&port->dmxdev, &port->demux,
-					 &port->hw_frontend,
-					 &port->mem_frontend, adap);
-	if (ret < 0)
-		goto err_del_dvb_dmx;
-
-	ret = dvb_net_init(adap, &port->dvbnet, port->dmxdev.demux);
-	if (ret < 0)
-		goto err_del_dvb_dmxdev;
-	return 0;
-err_del_dvb_dmxdev:
-	dvbdemux->dmx.close(&dvbdemux->dmx);
-	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
-	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
-	dvb_dmxdev_release(&port->dmxdev);
-err_del_dvb_dmx:
-	dvb_dmx_release(&port->demux);
-err_del_dvb_register_adapter:
-	dvb_unregister_adapter(&port->dvb_adapter);
-	return ret;
-}
-
-static void smi_dvb_exit(struct smi_port *port)
-{
-	struct dvb_demux *dvbdemux = &port->demux;
-
-	dvb_net_release(&port->dvbnet);
-
-	dvbdemux->dmx.close(&dvbdemux->dmx);
-	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
-	dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
-	dvb_dmxdev_release(&port->dmxdev);
-	dvb_dmx_release(&port->demux);
-
-	dvb_unregister_adapter(&port->dvb_adapter);
-}
-
-static int smi_port_attach(struct smi_dev *dev,
-		struct smi_port *port, int index)
-{
-	int ret, dmachs;
-
-	port->dev = dev;
-	port->idx = index;
-	port->fe_type = (index == 0) ? dev->info->fe_0 : dev->info->fe_1;
-	dmachs = (index == 0) ? dev->info->ts_0 : dev->info->ts_1;
-	/* port init.*/
-	ret = smi_port_init(port, dmachs);
-	if (ret < 0)
-		return ret;
-	/* dvb init.*/
-	ret = smi_dvb_init(port);
-	if (ret < 0)
-		goto err_del_port_init;
-	/* fe init.*/
-	ret = smi_fe_init(port);
-	if (ret < 0)
-		goto err_del_dvb_init;
-	return 0;
-err_del_dvb_init:
-	smi_dvb_exit(port);
-err_del_port_init:
-	smi_port_exit(port);
-	return ret;
-}
-
-static void smi_port_detach(struct smi_port *port)
-{
-	smi_fe_exit(port);
-	smi_dvb_exit(port);
-	smi_port_exit(port);
-}
-
-static int smi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-	struct smi_dev *dev;
-	int ret = -ENOMEM;
-
-	if (pci_enable_device(pdev) < 0)
-		return -ENODEV;
-
-	dev = kzalloc(sizeof(struct smi_dev), GFP_KERNEL);
-	if (!dev) {
-		ret = -ENOMEM;
-		goto err_pci_disable_device;
-	}
-
-	dev->pci_dev = pdev;
-	pci_set_drvdata(pdev, dev);
-	dev->info = (struct smi_cfg_info *) id->driver_data;
-	dev_info(&dev->pci_dev->dev,
-		"card detected: %s\n", dev->info->name);
-
-	dev->nr = dev->info->type;
-	dev->lmmio = ioremap(pci_resource_start(dev->pci_dev, 0),
-			    pci_resource_len(dev->pci_dev, 0));
-	if (!dev->lmmio) {
-		ret = -ENOMEM;
-		goto err_kfree;
-	}
-
-	/* should we set to 32bit DMA? */
-	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (ret < 0)
-		goto err_pci_iounmap;
-
-	pci_set_master(pdev);
-
-	ret = smi_hw_init(dev);
-	if (ret < 0)
-		goto err_pci_iounmap;
-
-	ret = smi_i2c_init(dev);
-	if (ret < 0)
-		goto err_pci_iounmap;
-
-	if (dev->info->ts_0) {
-		ret = smi_port_attach(dev, &dev->ts_port[0], 0);
-		if (ret < 0)
-			goto err_del_i2c_adaptor;
-	}
-
-	if (dev->info->ts_1) {
-		ret = smi_port_attach(dev, &dev->ts_port[1], 1);
-		if (ret < 0)
-			goto err_del_port0_attach;
-	}
-
-#ifdef CONFIG_PCI_MSI /* to do msi interrupt.???*/
-	if (pci_msi_enabled())
-		ret = pci_enable_msi(dev->pci_dev);
-	if (ret)
-		dev_info(&dev->pci_dev->dev, "MSI not available.\n");
-#endif
-
-	ret = request_irq(dev->pci_dev->irq, smi_irq_handler,
-			   IRQF_SHARED, "SMI_PCIE", dev);
-	if (ret < 0)
-		goto err_del_port1_attach;
-
-	return 0;
-
-err_del_port1_attach:
-	if (dev->info->ts_1)
-		smi_port_detach(&dev->ts_port[1]);
-err_del_port0_attach:
-	if (dev->info->ts_0)
-		smi_port_detach(&dev->ts_port[0]);
-err_del_i2c_adaptor:
-	smi_i2c_exit(dev);
-err_pci_iounmap:
-	iounmap(dev->lmmio);
-err_kfree:
-	pci_set_drvdata(pdev, NULL);
-	kfree(dev);
-err_pci_disable_device:
-	pci_disable_device(pdev);
-	return ret;
-}
-
-static void smi_remove(struct pci_dev *pdev)
-{
-	struct smi_dev *dev = pci_get_drvdata(pdev);
-
-	smi_write(MSI_INT_ENA_CLR, ALL_INT);
-	free_irq(dev->pci_dev->irq, dev);
-#ifdef CONFIG_PCI_MSI
-	pci_disable_msi(dev->pci_dev);
-#endif
-	if (dev->info->ts_1)
-		smi_port_detach(&dev->ts_port[1]);
-	if (dev->info->ts_0)
-		smi_port_detach(&dev->ts_port[0]);
-
-	smi_i2c_exit(dev);
-	iounmap(dev->lmmio);
-	pci_set_drvdata(pdev, NULL);
-	pci_disable_device(pdev);
-	kfree(dev);
-}
-
-/* DVBSky cards */
-static struct smi_cfg_info dvbsky_s950_cfg = {
-	.type = SMI_DVBSKY_S950,
-	.name = "DVBSky S950 V3",
-	.ts_0 = SMI_TS_NULL,
-	.ts_1 = SMI_TS_DMA_BOTH,
-	.fe_0 = DVBSKY_FE_NULL,
-	.fe_1 = DVBSKY_FE_M88DS3103,
-};
-
-static struct smi_cfg_info dvbsky_s952_cfg = {
-	.type = SMI_DVBSKY_S952,
-	.name = "DVBSky S952 V3",
-	.ts_0 = SMI_TS_DMA_BOTH,
-	.ts_1 = SMI_TS_DMA_BOTH,
-	.fe_0 = DVBSKY_FE_M88RS6000,
-	.fe_1 = DVBSKY_FE_M88RS6000,
-};
-
-static struct smi_cfg_info dvbsky_t9580_cfg = {
-	.type = SMI_DVBSKY_T9580,
-	.name = "DVBSky T9580 V3",
-	.ts_0 = SMI_TS_DMA_BOTH,
-	.ts_1 = SMI_TS_DMA_BOTH,
-	.fe_0 = DVBSKY_FE_SIT2,
-	.fe_1 = DVBSKY_FE_M88DS3103,
-};
-
-/* PCI IDs */
-#define SMI_ID(_subvend, _subdev, _driverdata) {	\
-	.vendor      = SMI_VID,    .device    = SMI_PID, \
-	.subvendor   = _subvend, .subdevice = _subdev, \
-	.driver_data = (unsigned long)&_driverdata }
-
-static const struct pci_device_id smi_id_table[] = {
-	SMI_ID(0x4254, 0x0550, dvbsky_s950_cfg),
-	SMI_ID(0x4254, 0x0552, dvbsky_s952_cfg),
-	SMI_ID(0x4254, 0x5580, dvbsky_t9580_cfg),
-	{0}
-};
-MODULE_DEVICE_TABLE(pci, smi_id_table);
-
-static struct pci_driver smipcie_driver = {
-	.name = "SMI PCIe driver",
-	.id_table = smi_id_table,
-	.probe = smi_probe,
-	.remove = smi_remove,
-};
-
-module_pci_driver(smipcie_driver);
-
-MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
-MODULE_DESCRIPTION("SMI PCIe driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 10cdf20..68cdda2 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -234,6 +234,17 @@
 	int fe_1;
 };
 
+struct smi_rc {
+	struct smi_dev *dev;
+	struct rc_dev *rc_dev;
+	char input_phys[64];
+	char input_name[64];
+	struct work_struct work;
+	u8 irData[256];
+
+	int users;
+};
+
 struct smi_port {
 	struct smi_dev *dev;
 	int idx;
@@ -284,6 +295,9 @@
 	/* i2c */
 	struct i2c_adapter i2c_bus[2];
 	struct i2c_algo_bit_data i2c_bit[2];
+
+	/* ir */
+	struct smi_rc ir;
 };
 
 #define smi_read(reg)             readl(dev->lmmio + ((reg)>>2))
@@ -296,4 +310,9 @@
 #define smi_set(reg, bit)          smi_andor((reg), (bit), (bit))
 #define smi_clear(reg, bit)        smi_andor((reg), (bit), 0)
 
+int smi_ir_irq(struct smi_rc *ir, u32 int_status);
+void smi_ir_start(struct smi_rc *ir);
+void smi_ir_exit(struct smi_dev *dev);
+int smi_ir_init(struct smi_dev *dev);
+
 #endif /* #ifndef _SMI_PCIE_H_ */
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 570d119..f50d072 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -134,23 +134,11 @@
 
 static void free_solo_dev(struct solo_dev *solo_dev)
 {
-	struct pci_dev *pdev;
-
-	if (!solo_dev)
-		return;
+	struct pci_dev *pdev = solo_dev->pdev;
 
 	if (solo_dev->dev.parent)
 		device_unregister(&solo_dev->dev);
 
-	pdev = solo_dev->pdev;
-
-	/* If we never initialized the PCI device, then nothing else
-	 * below here needs cleanup */
-	if (!pdev) {
-		kfree(solo_dev);
-		return;
-	}
-
 	if (solo_dev->reg_base) {
 		/* Bring down the sub-devices first */
 		solo_g723_exit(solo_dev);
@@ -164,9 +152,8 @@
 
 		/* Now cleanup the PCI device */
 		solo_irq_off(solo_dev, ~0);
+		free_irq(pdev->irq, solo_dev);
 		pci_iounmap(pdev, solo_dev->reg_base);
-		if (pdev->irq)
-			free_irq(pdev->irq, solo_dev);
 	}
 
 	pci_release_regions(pdev);
@@ -483,7 +470,6 @@
 
 	solo_dev->type = id->driver_data;
 	solo_dev->pdev = pdev;
-	spin_lock_init(&solo_dev->reg_io_lock);
 	ret = v4l2_device_register(&pdev->dev, &solo_dev->v4l2_dev);
 	if (ret)
 		goto fail_probe;
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 7ddc767..4a37a1c 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -48,10 +48,8 @@
 /* The solo writes to 1k byte pages, 32 pages, in the dma. Each 1k page
  * is broken down to 20 * 48 byte regions (one for each channel possible)
  * with the rest of the page being dummy data. */
-#define G723_MAX_BUFFER		(G723_PERIOD_BYTES * PERIODS_MAX)
+#define PERIODS			G723_FDMA_PAGES
 #define G723_INTR_ORDER		4 /* 0 - 4 */
-#define PERIODS_MIN		(1 << G723_INTR_ORDER)
-#define PERIODS_MAX		G723_FDMA_PAGES
 
 struct solo_snd_pcm {
 	int				on;
@@ -130,11 +128,11 @@
 	.rate_max		= SAMPLERATE,
 	.channels_min		= 1,
 	.channels_max		= 1,
-	.buffer_bytes_max	= G723_MAX_BUFFER,
+	.buffer_bytes_max	= G723_PERIOD_BYTES * PERIODS,
 	.period_bytes_min	= G723_PERIOD_BYTES,
 	.period_bytes_max	= G723_PERIOD_BYTES,
-	.periods_min		= PERIODS_MIN,
-	.periods_max		= PERIODS_MAX,
+	.periods_min		= PERIODS,
+	.periods_max		= PERIODS,
 };
 
 static int snd_solo_pcm_open(struct snd_pcm_substream *ss)
@@ -340,7 +338,8 @@
 	ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
 					SNDRV_DMA_TYPE_CONTINUOUS,
 					snd_dma_continuous_data(GFP_KERNEL),
-					G723_MAX_BUFFER, G723_MAX_BUFFER);
+					G723_PERIOD_BYTES * PERIODS,
+					G723_PERIOD_BYTES * PERIODS);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 1ca54b0..27423d7 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -199,7 +199,6 @@
 	int			nr_ext;
 	u32			irq_mask;
 	u32			motion_mask;
-	spinlock_t		reg_io_lock;
 	struct v4l2_device	v4l2_dev;
 
 	/* tw28xx accounting */
@@ -281,36 +280,13 @@
 
 static inline u32 solo_reg_read(struct solo_dev *solo_dev, int reg)
 {
-	unsigned long flags;
-	u32 ret;
-	u16 val;
-
-	spin_lock_irqsave(&solo_dev->reg_io_lock, flags);
-
-	ret = readl(solo_dev->reg_base + reg);
-	rmb();
-	pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
-	rmb();
-
-	spin_unlock_irqrestore(&solo_dev->reg_io_lock, flags);
-
-	return ret;
+	return readl(solo_dev->reg_base + reg);
 }
 
 static inline void solo_reg_write(struct solo_dev *solo_dev, int reg,
 				  u32 data)
 {
-	unsigned long flags;
-	u16 val;
-
-	spin_lock_irqsave(&solo_dev->reg_io_lock, flags);
-
 	writel(data, solo_dev->reg_base + reg);
-	wmb();
-	pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
-	rmb();
-
-	spin_unlock_irqrestore(&solo_dev->reg_io_lock, flags);
 }
 
 static inline void solo_irq_on(struct solo_dev *dev, u32 mask)
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 54c9910..3e469d4 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1508,7 +1508,7 @@
 	if (i2c_readregs(&budget_av->budget.i2c_adap, 0xa0, 0x30, mac, 6)) {
 		pr_err("KNC1-%d: Could not read MAC from KNC1 card\n",
 		       budget_av->budget.dvb_adapter.num);
-		memset(mac, 0, 6);
+		eth_zero_addr(mac);
 	} else {
 		pr_info("KNC1-%d: MAC addr = %pM\n",
 			budget_av->budget.dvb_adapter.num, mac);
diff --git a/drivers/media/pci/ttpci/ttpci-eeprom.c b/drivers/media/pci/ttpci/ttpci-eeprom.c
index 32d4315..079ee09 100644
--- a/drivers/media/pci/ttpci/ttpci-eeprom.c
+++ b/drivers/media/pci/ttpci/ttpci-eeprom.c
@@ -36,6 +36,7 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/i2c.h>
+#include <linux/etherdevice.h>
 
 #include "ttpci-eeprom.h"
 
@@ -145,7 +146,7 @@
 
 	if (ret != 0) {		/* Will only be -ENODEV */
 		dprintk("Couldn't read from EEPROM: not there?\n");
-		memset(proposed_mac, 0, 6);
+		eth_zero_addr(proposed_mac);
 		return ret;
 	}
 
@@ -157,14 +158,12 @@
 			dprintk( "%.2x:", encodedMAC[i]);
 		}
 		dprintk("%.2x\n", encodedMAC[19]);
-		memset(proposed_mac, 0, 6);
+		eth_zero_addr(proposed_mac);
 		return ret;
 	}
 
 	memcpy(proposed_mac, decodedMAC, 6);
-	dprintk("adapter has MAC addr = %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
-		decodedMAC[0], decodedMAC[1], decodedMAC[2],
-		decodedMAC[3], decodedMAC[4], decodedMAC[5]);
+	dprintk("adapter has MAC addr = %pM\n", decodedMAC);
 	return 0;
 }
 
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index c135165..04706cc 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -37,6 +37,7 @@
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/dma-mapping.h>
+#include <linux/pci_ids.h>
 #include <linux/pm.h>
 
 #include <media/v4l2-dev.h>
@@ -70,13 +71,13 @@
  * added under vendor 0x1797 (Techwell Inc.) as subsystem IDs.
  */
 static const struct pci_device_id tw68_pci_tbl[] = {
-	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6800)},
-	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6801)},
-	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6804)},
-	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_1)},
-	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_2)},
-	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_3)},
-	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_4)},
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6800)},
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6801)},
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6804)},
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_4)},
 	{0,}
 };
 
@@ -263,15 +264,15 @@
 	}
 
 	switch (pci_id->device) {
-	case PCI_DEVICE_ID_6800:	/* TW6800 */
+	case PCI_DEVICE_ID_TECHWELL_6800:	/* TW6800 */
 		dev->vdecoder = TW6800;
 		dev->board_virqmask = TW68_VID_INTS;
 		break;
-	case PCI_DEVICE_ID_6801:	/* Video decoder for TW6802 */
+	case PCI_DEVICE_ID_TECHWELL_6801:	/* Video decoder for TW6802 */
 		dev->vdecoder = TW6801;
 		dev->board_virqmask = TW68_VID_INTS | TW68_VID_INTSX;
 		break;
-	case PCI_DEVICE_ID_6804:	/* Video decoder for TW6804 */
+	case PCI_DEVICE_ID_TECHWELL_6804:	/* Video decoder for TW6804 */
 		dev->vdecoder = TW6804;
 		dev->board_virqmask = TW68_VID_INTS | TW68_VID_INTSX;
 		break;
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index 93f2335..ef51e4d 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -42,22 +42,6 @@
 
 #define	UNSET	(-1U)
 
-/* system vendor and device ID's */
-#define	PCI_VENDOR_ID_TECHWELL	0x1797
-#define	PCI_DEVICE_ID_6800	0x6800
-#define	PCI_DEVICE_ID_6801	0x6801
-#define	PCI_DEVICE_ID_AUDIO2	0x6802
-#define	PCI_DEVICE_ID_TS3	0x6803
-#define	PCI_DEVICE_ID_6804	0x6804
-#define	PCI_DEVICE_ID_AUDIO5	0x6805
-#define	PCI_DEVICE_ID_TS6	0x6806
-
-/* tw6816 based cards */
-#define	PCI_DEVICE_ID_6816_1   0x6810
-#define	PCI_DEVICE_ID_6816_2   0x6811
-#define	PCI_DEVICE_ID_6816_3   0x6812
-#define	PCI_DEVICE_ID_6816_4   0x6813
-
 #define TW68_NORMS ( \
 	V4L2_STD_NTSC    | V4L2_STD_PAL       | V4L2_STD_SECAM    | \
 	V4L2_STD_PAL_M   | V4L2_STD_PAL_Nc    | V4L2_STD_PAL_60)
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 5e04008..4e7db89 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -32,6 +32,8 @@
 #define _BUZ_H_
 
 #include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
 
 struct zoran_sync {
 	unsigned long frame;	/* number of buffer that has been free'd */
@@ -216,6 +218,7 @@
 
 /* zoran_fh contains per-open() settings */
 struct zoran_fh {
+	struct v4l2_fh fh;
 	struct zoran *zr;
 
 	enum zoran_map_mode map_mode;		/* Flag which bufferset will map by next mmap() */
@@ -268,6 +271,7 @@
 
 struct zoran {
 	struct v4l2_device v4l2_dev;
+	struct v4l2_ctrl_handler hdl;
 	struct video_device *video_dev;
 
 	struct i2c_adapter i2c_adapter;	/* */
@@ -280,8 +284,7 @@
 	struct videocodec *codec;	/* video codec */
 	struct videocodec *vfe;	/* video front end */
 
-	struct mutex resource_lock;	/* prevent evil stuff */
-	struct mutex other_lock;	/* please merge with above */
+	struct mutex lock;	/* file ops serialize lock */
 
 	u8 initialized;		/* flag if zoran has been correctly initialized */
 	int user;		/* number of current users */
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index cec5b75..1136d92 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -1049,8 +1049,9 @@
 	/*
 	 *   Now add the template and register the device unit.
 	 */
-	memcpy(zr->video_dev, &zoran_template, sizeof(zoran_template));
+	*zr->video_dev = zoran_template;
 	zr->video_dev->v4l2_dev = &zr->v4l2_dev;
+	zr->video_dev->lock = &zr->lock;
 	strcpy(zr->video_dev->name, ZR_DEVNAME(zr));
 	/* It's not a mem2mem device, but you can both capture and output from
 	   one and the same device. This should really be split up into two
@@ -1116,6 +1117,7 @@
 	pci_disable_device(zr->pci_dev);
 	video_unregister_device(zr->video_dev);
 exit_free:
+	v4l2_ctrl_handler_free(&zr->hdl);
 	v4l2_device_unregister(&zr->v4l2_dev);
 	kfree(zr);
 }
@@ -1219,9 +1221,11 @@
 	zr->pci_dev = pdev;
 	zr->id = nr;
 	snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id);
+	if (v4l2_ctrl_handler_init(&zr->hdl, 10))
+		goto zr_unreg;
+	zr->v4l2_dev.ctrl_handler = &zr->hdl;
 	spin_lock_init(&zr->spinlock);
-	mutex_init(&zr->resource_lock);
-	mutex_init(&zr->other_lock);
+	mutex_init(&zr->lock);
 	if (pci_enable_device(pdev))
 		goto zr_unreg;
 	zr->revision = zr->pci_dev->revision;
@@ -1443,6 +1447,7 @@
 zr_unmap:
 	iounmap(zr->zr36057_mem);
 zr_unreg:
+	v4l2_ctrl_handler_free(&zr->hdl);
 	v4l2_device_unregister(&zr->v4l2_dev);
 zr_free_mem:
 	kfree(zr);
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index 40119b3..4d47dda 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -31,6 +31,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/ktime.h>
 
 #include <linux/interrupt.h>
 #include <linux/proc_fs.h>
@@ -181,20 +182,11 @@
 	}
 }
 
-static inline unsigned long
-get_time (void)
-{
-	struct timeval tv;
-
-	do_gettimeofday(&tv);
-	return (1000000 * tv.tv_sec + tv.tv_usec);
-}
-
 void
 detect_guest_activity (struct zoran *zr)
 {
 	int timeout, i, j, res, guest[8], guest0[8], change[8][3];
-	unsigned long t0, t1;
+	ktime_t t0, t1;
 
 	dump_guests(zr);
 	printk(KERN_INFO "%s: Detecting guests activity, please wait...\n",
@@ -205,15 +197,15 @@
 
 	timeout = 0;
 	j = 0;
-	t0 = get_time();
+	t0 = ktime_get();
 	while (timeout < 10000) {
 		udelay(10);
 		timeout++;
 		for (i = 1; (i < 8) && (j < 8); i++) {
 			res = post_office_read(zr, i, 0);
 			if (res != guest[i]) {
-				t1 = get_time();
-				change[j][0] = (t1 - t0);
+				t1 = ktime_get();
+				change[j][0] = ktime_to_us(ktime_sub(t1, t0));
 				t0 = t1;
 				change[j][1] = i;
 				change[j][2] = res;
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 2b25d31..80caa70 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -61,6 +61,7 @@
 #include <linux/videodev2.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
 #include "videocodec.h"
 
 #include <asm/byteorder.h>
@@ -592,10 +593,14 @@
 		return -EPROTO;
 	}
 
+	mutex_unlock(&zr->lock);
 	/* wait on this buffer to get ready */
 	if (!wait_event_interruptible_timeout(zr->v4l_capq,
-		(zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ))
+		(zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ)) {
+		mutex_lock(&zr->lock);
 		return -ETIME;
+	}
+	mutex_lock(&zr->lock);
 	if (signal_pending(current))
 		return -ERESTARTSYS;
 
@@ -783,6 +788,7 @@
 			ZR_DEVNAME(zr), __func__);
 		return -EINVAL;
 	}
+	mutex_unlock(&zr->lock);
 	if (!wait_event_interruptible_timeout(zr->jpg_capq,
 			(zr->jpg_que_tail != zr->jpg_dma_tail ||
 			 zr->jpg_dma_tail == zr->jpg_dma_head),
@@ -793,6 +799,7 @@
 		udelay(1);
 		zr->codec->control(zr->codec, CODEC_G_STATUS,
 					   sizeof(isr), &isr);
+		mutex_lock(&zr->lock);
 		dprintk(1,
 			KERN_ERR
 			"%s: %s - timeout: codec isr=0x%02x\n",
@@ -801,6 +808,7 @@
 		return -ETIME;
 
 	}
+	mutex_lock(&zr->lock);
 	if (signal_pending(current))
 		return -ERESTARTSYS;
 
@@ -911,7 +919,7 @@
 	dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n",
 		ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1);
 
-	mutex_lock(&zr->other_lock);
+	mutex_lock(&zr->lock);
 
 	if (zr->user >= 2048) {
 		dprintk(1, KERN_ERR "%s: too many users (%d) on device\n",
@@ -930,6 +938,8 @@
 		res = -ENOMEM;
 		goto fail_unlock;
 	}
+	v4l2_fh_init(&fh->fh, video_devdata(file));
+
 	/* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows
 	 * on norm-change! */
 	fh->overlay_mask =
@@ -946,8 +956,6 @@
 	if (zr->user++ == 0)
 		first_open = 1;
 
-	/*mutex_unlock(&zr->resource_lock);*/
-
 	/* default setup - TODO: look at flags */
 	if (first_open) {	/* First device open */
 		zr36057_restart(zr);
@@ -961,14 +969,15 @@
 	file->private_data = fh;
 	fh->zr = zr;
 	zoran_open_init_session(fh);
-	mutex_unlock(&zr->other_lock);
+	v4l2_fh_add(&fh->fh);
+	mutex_unlock(&zr->lock);
 
 	return 0;
 
 fail_fh:
 	kfree(fh);
 fail_unlock:
-	mutex_unlock(&zr->other_lock);
+	mutex_unlock(&zr->lock);
 
 	dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n",
 		ZR_DEVNAME(zr), res, zr->user);
@@ -987,7 +996,7 @@
 
 	/* kernel locks (fs/device.c), so don't do that ourselves
 	 * (prevents deadlocks) */
-	mutex_lock(&zr->other_lock);
+	mutex_lock(&zr->lock);
 
 	zoran_close_end_session(fh);
 
@@ -1021,9 +1030,10 @@
 			encoder_call(zr, video, s_routing, 2, 0, 0);
 		}
 	}
-	mutex_unlock(&zr->other_lock);
+	mutex_unlock(&zr->lock);
 
-	file->private_data = NULL;
+	v4l2_fh_del(&fh->fh);
+	v4l2_fh_exit(&fh->fh);
 	kfree(fh->overlay_mask);
 	kfree(fh);
 
@@ -1032,29 +1042,6 @@
 	return 0;
 }
 
-
-static ssize_t
-zoran_read (struct file *file,
-	    char        __user *data,
-	    size_t       count,
-	    loff_t      *ppos)
-{
-	/* we simply don't support read() (yet)... */
-
-	return -EINVAL;
-}
-
-static ssize_t
-zoran_write (struct file *file,
-	     const char  __user *data,
-	     size_t       count,
-	     loff_t      *ppos)
-{
-	/* ...and the same goes for write() */
-
-	return -EINVAL;
-}
-
 static int setup_fbuffer(struct zoran_fh *fh,
 	       void                      *base,
 	       const struct zoran_format *fmt,
@@ -1523,7 +1510,6 @@
 	struct zoran_fh *fh = __fh;
 	struct zoran *zr = fh->zr;
 
-	memset(cap, 0, sizeof(*cap));
 	strncpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)-1);
 	strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
 	snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
@@ -1583,9 +1569,6 @@
 					struct v4l2_format *fmt)
 {
 	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
-
-	mutex_lock(&zr->resource_lock);
 
 	fmt->fmt.pix.width = fh->jpg_settings.img_width / fh->jpg_settings.HorDcm;
 	fmt->fmt.pix.height = fh->jpg_settings.img_height * 2 /
@@ -1601,7 +1584,6 @@
 	fmt->fmt.pix.bytesperline = 0;
 	fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
 
-	mutex_unlock(&zr->resource_lock);
 	return 0;
 }
 
@@ -1614,7 +1596,6 @@
 	if (fh->map_mode != ZORAN_MAP_MODE_RAW)
 		return zoran_g_fmt_vid_out(file, fh, fmt);
 
-	mutex_lock(&zr->resource_lock);
 	fmt->fmt.pix.width = fh->v4l_settings.width;
 	fmt->fmt.pix.height = fh->v4l_settings.height;
 	fmt->fmt.pix.sizeimage = fh->v4l_settings.bytesperline *
@@ -1626,7 +1607,6 @@
 		fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
 	else
 		fmt->fmt.pix.field = V4L2_FIELD_TOP;
-	mutex_unlock(&zr->resource_lock);
 	return 0;
 }
 
@@ -1636,8 +1616,6 @@
 	struct zoran_fh *fh = __fh;
 	struct zoran *zr = fh->zr;
 
-	mutex_lock(&zr->resource_lock);
-
 	fmt->fmt.win.w.left = fh->overlay_settings.x;
 	fmt->fmt.win.w.top = fh->overlay_settings.y;
 	fmt->fmt.win.w.width = fh->overlay_settings.width;
@@ -1647,7 +1625,6 @@
 	else
 		fmt->fmt.win.field = V4L2_FIELD_TOP;
 
-	mutex_unlock(&zr->resource_lock);
 	return 0;
 }
 
@@ -1657,8 +1634,6 @@
 	struct zoran_fh *fh = __fh;
 	struct zoran *zr = fh->zr;
 
-	mutex_lock(&zr->resource_lock);
-
 	if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH)
 		fmt->fmt.win.w.width = BUZ_MAX_WIDTH;
 	if (fmt->fmt.win.w.width < BUZ_MIN_WIDTH)
@@ -1668,7 +1643,6 @@
 	if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT)
 		fmt->fmt.win.w.height = BUZ_MIN_HEIGHT;
 
-	mutex_unlock(&zr->resource_lock);
 	return 0;
 }
 
@@ -1683,7 +1657,6 @@
 	if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
 		return -EINVAL;
 
-	mutex_lock(&zr->resource_lock);
 	settings = fh->jpg_settings;
 
 	/* we actually need to set 'real' parameters now */
@@ -1718,7 +1691,7 @@
 	/* check */
 	res = zoran_check_jpg_settings(zr, &settings, 1);
 	if (res)
-		goto tryfmt_unlock_and_return;
+		return res;
 
 	/* tell the user what we actually did */
 	fmt->fmt.pix.width = settings.img_width / settings.HorDcm;
@@ -1734,8 +1707,6 @@
 	fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings);
 	fmt->fmt.pix.bytesperline = 0;
 	fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-tryfmt_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -1750,23 +1721,17 @@
 	if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
 		return zoran_try_fmt_vid_out(file, fh, fmt);
 
-	mutex_lock(&zr->resource_lock);
-
 	for (i = 0; i < NUM_FORMATS; i++)
 		if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
 			break;
 
-	if (i == NUM_FORMATS) {
-		mutex_unlock(&zr->resource_lock);
+	if (i == NUM_FORMATS)
 		return -EINVAL;
-	}
 
 	bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
 	v4l_bound_align_image(
 		&fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
 		&fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
-	mutex_unlock(&zr->resource_lock);
-
 	return 0;
 }
 
@@ -1774,7 +1739,6 @@
 					struct v4l2_format *fmt)
 {
 	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
 	int res;
 
 	dprintk(3, "x=%d, y=%d, w=%d, h=%d, cnt=%d, map=0x%p\n",
@@ -1783,12 +1747,10 @@
 			fmt->fmt.win.w.height,
 			fmt->fmt.win.clipcount,
 			fmt->fmt.win.bitmap);
-	mutex_lock(&zr->resource_lock);
 	res = setup_window(fh, fmt->fmt.win.w.left, fmt->fmt.win.w.top,
 			   fmt->fmt.win.w.width, fmt->fmt.win.w.height,
 			   (struct v4l2_clip __user *)fmt->fmt.win.clips,
 			   fmt->fmt.win.clipcount, fmt->fmt.win.bitmap);
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -1808,13 +1770,11 @@
 	if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
 		return -EINVAL;
 
-	mutex_lock(&zr->resource_lock);
-
 	if (fh->buffers.allocated) {
 		dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
 			ZR_DEVNAME(zr));
 		res = -EBUSY;
-		goto sfmtjpg_unlock_and_return;
+		return res;
 	}
 
 	settings = fh->jpg_settings;
@@ -1851,7 +1811,7 @@
 	/* check */
 	res = zoran_check_jpg_settings(zr, &settings, 0);
 	if (res)
-		goto sfmtjpg_unlock_and_return;
+		return res;
 
 	/* it's ok, so set them */
 	fh->jpg_settings = settings;
@@ -1872,9 +1832,6 @@
 	fmt->fmt.pix.bytesperline = 0;
 	fmt->fmt.pix.sizeimage = fh->buffers.buffer_size;
 	fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-
-sfmtjpg_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -1898,14 +1855,12 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&zr->resource_lock);
-
 	if ((fh->map_mode != ZORAN_MAP_MODE_RAW && fh->buffers.allocated) ||
 	    fh->buffers.active != ZORAN_FREE) {
 		dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
 				ZR_DEVNAME(zr));
 		res = -EBUSY;
-		goto sfmtv4l_unlock_and_return;
+		return res;
 	}
 	if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
 		fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
@@ -1917,7 +1872,7 @@
 	res = zoran_v4l_set_format(fh, fmt->fmt.pix.width, fmt->fmt.pix.height,
 				   &zoran_formats[i]);
 	if (res)
-		goto sfmtv4l_unlock_and_return;
+		return res;
 
 	/* tell the user the results/missing stuff */
 	fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline;
@@ -1927,9 +1882,6 @@
 		fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
 	else
 		fmt->fmt.pix.field = V4L2_FIELD_TOP;
-
-sfmtv4l_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -1940,14 +1892,12 @@
 	struct zoran *zr = fh->zr;
 
 	memset(fb, 0, sizeof(*fb));
-	mutex_lock(&zr->resource_lock);
 	fb->base = zr->vbuf_base;
 	fb->fmt.width = zr->vbuf_width;
 	fb->fmt.height = zr->vbuf_height;
 	if (zr->overlay_settings.format)
 		fb->fmt.pixelformat = fh->overlay_settings.format->fourcc;
 	fb->fmt.bytesperline = zr->vbuf_bytesperline;
-	mutex_unlock(&zr->resource_lock);
 	fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
 	fb->fmt.field = V4L2_FIELD_INTERLACED;
 	fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
@@ -1973,10 +1923,8 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&zr->resource_lock);
 	res = setup_fbuffer(fh, fb->base, &zoran_formats[i], fb->fmt.width,
 			    fb->fmt.height, fb->fmt.bytesperline);
-	mutex_unlock(&zr->resource_lock);
 
 	return res;
 }
@@ -1984,12 +1932,9 @@
 static int zoran_overlay(struct file *file, void *__fh, unsigned int on)
 {
 	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
 	int res;
 
-	mutex_lock(&zr->resource_lock);
 	res = setup_overlay(fh, on);
-	mutex_unlock(&zr->resource_lock);
 
 	return res;
 }
@@ -2013,14 +1958,13 @@
 	if (req->count == 0)
 		return zoran_streamoff(file, fh, req->type);
 
-	mutex_lock(&zr->resource_lock);
 	if (fh->buffers.allocated) {
 		dprintk(2,
 				KERN_ERR
 				"%s: VIDIOC_REQBUFS - buffers already allocated\n",
 				ZR_DEVNAME(zr));
 		res = -EBUSY;
-		goto v4l2reqbuf_unlock_and_return;
+		return res;
 	}
 
 	if (fh->map_mode == ZORAN_MAP_MODE_RAW &&
@@ -2037,7 +1981,7 @@
 
 		if (v4l_fbuffer_alloc(fh)) {
 			res = -ENOMEM;
-			goto v4l2reqbuf_unlock_and_return;
+			return res;
 		}
 	} else if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC ||
 		   fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) {
@@ -2054,7 +1998,7 @@
 
 		if (jpg_fbuffer_alloc(fh)) {
 			res = -ENOMEM;
-			goto v4l2reqbuf_unlock_and_return;
+			return res;
 		}
 	} else {
 		dprintk(1,
@@ -2062,23 +2006,17 @@
 				"%s: VIDIOC_REQBUFS - unknown type %d\n",
 				ZR_DEVNAME(zr), req->type);
 		res = -EINVAL;
-		goto v4l2reqbuf_unlock_and_return;
+		return res;
 	}
-v4l2reqbuf_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
 static int zoran_querybuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
 {
 	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
 	int res;
 
-	mutex_lock(&zr->resource_lock);
 	res = zoran_v4l2_buffer_status(fh, buf, buf->index);
-	mutex_unlock(&zr->resource_lock);
 
 	return res;
 }
@@ -2089,8 +2027,6 @@
 	struct zoran *zr = fh->zr;
 	int res = 0, codec_mode, buf_type;
 
-	mutex_lock(&zr->resource_lock);
-
 	switch (fh->map_mode) {
 	case ZORAN_MAP_MODE_RAW:
 		if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -2098,12 +2034,12 @@
 				"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
 				ZR_DEVNAME(zr), buf->type, fh->map_mode);
 			res = -EINVAL;
-			goto qbuf_unlock_and_return;
+			return res;
 		}
 
 		res = zoran_v4l_queue_frame(fh, buf->index);
 		if (res)
-			goto qbuf_unlock_and_return;
+			return res;
 		if (!zr->v4l_memgrab_active && fh->buffers.active == ZORAN_LOCKED)
 			zr36057_set_memgrab(zr, 1);
 		break;
@@ -2123,12 +2059,12 @@
 				"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
 				ZR_DEVNAME(zr), buf->type, fh->map_mode);
 			res = -EINVAL;
-			goto qbuf_unlock_and_return;
+			return res;
 		}
 
 		res = zoran_jpg_queue_frame(fh, buf->index, codec_mode);
 		if (res != 0)
-			goto qbuf_unlock_and_return;
+			return res;
 		if (zr->codec_mode == BUZ_MODE_IDLE &&
 		    fh->buffers.active == ZORAN_LOCKED)
 			zr36057_enable_jpg(zr, codec_mode);
@@ -2142,9 +2078,6 @@
 		res = -EINVAL;
 		break;
 	}
-qbuf_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
@@ -2154,8 +2087,6 @@
 	struct zoran *zr = fh->zr;
 	int res = 0, buf_type, num = -1;	/* compiler borks here (?) */
 
-	mutex_lock(&zr->resource_lock);
-
 	switch (fh->map_mode) {
 	case ZORAN_MAP_MODE_RAW:
 		if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -2163,18 +2094,18 @@
 				"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
 				ZR_DEVNAME(zr), buf->type, fh->map_mode);
 			res = -EINVAL;
-			goto dqbuf_unlock_and_return;
+			return res;
 		}
 
 		num = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME];
 		if (file->f_flags & O_NONBLOCK &&
 		    zr->v4l_buffers.buffer[num].state != BUZ_STATE_DONE) {
 			res = -EAGAIN;
-			goto dqbuf_unlock_and_return;
+			return res;
 		}
 		res = v4l_sync(fh, num);
 		if (res)
-			goto dqbuf_unlock_and_return;
+			return res;
 		zr->v4l_sync_tail++;
 		res = zoran_v4l2_buffer_status(fh, buf, num);
 		break;
@@ -2194,7 +2125,7 @@
 				"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
 				ZR_DEVNAME(zr), buf->type, fh->map_mode);
 			res = -EINVAL;
-			goto dqbuf_unlock_and_return;
+			return res;
 		}
 
 		num = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
@@ -2202,12 +2133,12 @@
 		if (file->f_flags & O_NONBLOCK &&
 		    zr->jpg_buffers.buffer[num].state != BUZ_STATE_DONE) {
 			res = -EAGAIN;
-			goto dqbuf_unlock_and_return;
+			return res;
 		}
 		bs.frame = 0; /* suppress compiler warning */
 		res = jpg_sync(fh, &bs);
 		if (res)
-			goto dqbuf_unlock_and_return;
+			return res;
 		res = zoran_v4l2_buffer_status(fh, buf, bs.frame);
 		break;
 	}
@@ -2219,9 +2150,6 @@
 		res = -EINVAL;
 		break;
 	}
-dqbuf_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
@@ -2231,14 +2159,12 @@
 	struct zoran *zr = fh->zr;
 	int res = 0;
 
-	mutex_lock(&zr->resource_lock);
-
 	switch (fh->map_mode) {
 	case ZORAN_MAP_MODE_RAW:	/* raw capture */
 		if (zr->v4l_buffers.active != ZORAN_ACTIVE ||
 		    fh->buffers.active != ZORAN_ACTIVE) {
 			res = -EBUSY;
-			goto strmon_unlock_and_return;
+			return res;
 		}
 
 		zr->v4l_buffers.active = fh->buffers.active = ZORAN_LOCKED;
@@ -2257,7 +2183,7 @@
 		if (zr->jpg_buffers.active != ZORAN_ACTIVE ||
 		    fh->buffers.active != ZORAN_ACTIVE) {
 			res = -EBUSY;
-			goto strmon_unlock_and_return;
+			return res;
 		}
 
 		zr->jpg_buffers.active = fh->buffers.active = ZORAN_LOCKED;
@@ -2276,9 +2202,6 @@
 		res = -EINVAL;
 		break;
 	}
-strmon_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
@@ -2289,17 +2212,15 @@
 	int i, res = 0;
 	unsigned long flags;
 
-	mutex_lock(&zr->resource_lock);
-
 	switch (fh->map_mode) {
 	case ZORAN_MAP_MODE_RAW:	/* raw capture */
 		if (fh->buffers.active == ZORAN_FREE &&
 		    zr->v4l_buffers.active != ZORAN_FREE) {
 			res = -EPERM;	/* stay off other's settings! */
-			goto strmoff_unlock_and_return;
+			return res;
 		}
 		if (zr->v4l_buffers.active == ZORAN_FREE)
-			goto strmoff_unlock_and_return;
+			return res;
 
 		spin_lock_irqsave(&zr->spinlock, flags);
 		/* unload capture */
@@ -2327,17 +2248,17 @@
 		if (fh->buffers.active == ZORAN_FREE &&
 		    zr->jpg_buffers.active != ZORAN_FREE) {
 			res = -EPERM;	/* stay off other's settings! */
-			goto strmoff_unlock_and_return;
+			return res;
 		}
 		if (zr->jpg_buffers.active == ZORAN_FREE)
-			goto strmoff_unlock_and_return;
+			return res;
 
 		res = jpg_qbuf(fh, -1,
 			     (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ?
 			     BUZ_MODE_MOTION_COMPRESS :
 			     BUZ_MODE_MOTION_DECOMPRESS);
 		if (res)
-			goto strmoff_unlock_and_return;
+			return res;
 		break;
 	default:
 		dprintk(1, KERN_ERR
@@ -2346,70 +2267,14 @@
 		res = -EINVAL;
 		break;
 	}
-strmoff_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
-
-static int zoran_queryctrl(struct file *file, void *__fh,
-					struct v4l2_queryctrl *ctrl)
-{
-	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
-
-	/* we only support hue/saturation/contrast/brightness */
-	if (ctrl->id < V4L2_CID_BRIGHTNESS ||
-	    ctrl->id > V4L2_CID_HUE)
-		return -EINVAL;
-
-	decoder_call(zr, core, queryctrl, ctrl);
-
-	return 0;
-}
-
-static int zoran_g_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl)
-{
-	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
-
-	/* we only support hue/saturation/contrast/brightness */
-	if (ctrl->id < V4L2_CID_BRIGHTNESS ||
-	    ctrl->id > V4L2_CID_HUE)
-		return -EINVAL;
-
-	mutex_lock(&zr->resource_lock);
-	decoder_call(zr, core, g_ctrl, ctrl);
-	mutex_unlock(&zr->resource_lock);
-
-	return 0;
-}
-
-static int zoran_s_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl)
-{
-	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
-
-	/* we only support hue/saturation/contrast/brightness */
-	if (ctrl->id < V4L2_CID_BRIGHTNESS ||
-	    ctrl->id > V4L2_CID_HUE)
-		return -EINVAL;
-
-	mutex_lock(&zr->resource_lock);
-	decoder_call(zr, core, s_ctrl, ctrl);
-	mutex_unlock(&zr->resource_lock);
-
-	return 0;
-}
-
 static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
 {
 	struct zoran_fh *fh = __fh;
 	struct zoran *zr = fh->zr;
 
-	mutex_lock(&zr->resource_lock);
 	*std = zr->norm;
-	mutex_unlock(&zr->resource_lock);
 	return 0;
 }
 
@@ -2419,14 +2284,11 @@
 	struct zoran *zr = fh->zr;
 	int res = 0;
 
-	mutex_lock(&zr->resource_lock);
 	res = zoran_set_norm(zr, std);
 	if (res)
-		goto sstd_unlock_and_return;
+		return res;
 
 	res = wait_grab_pending(zr);
-sstd_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -2445,9 +2307,7 @@
 	inp->std = V4L2_STD_ALL;
 
 	/* Get status of video decoder */
-	mutex_lock(&zr->resource_lock);
 	decoder_call(zr, video, g_input_status, &inp->status);
-	mutex_unlock(&zr->resource_lock);
 	return 0;
 }
 
@@ -2456,9 +2316,7 @@
 	struct zoran_fh *fh = __fh;
 	struct zoran *zr = fh->zr;
 
-	mutex_lock(&zr->resource_lock);
 	*input = zr->input;
-	mutex_unlock(&zr->resource_lock);
 
 	return 0;
 }
@@ -2469,15 +2327,12 @@
 	struct zoran *zr = fh->zr;
 	int res;
 
-	mutex_lock(&zr->resource_lock);
 	res = zoran_set_input(zr, input);
 	if (res)
-		goto sinput_unlock_and_return;
+		return res;
 
 	/* Make sure the changes come into effect */
 	res = wait_grab_pending(zr);
-sinput_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -2520,8 +2375,6 @@
 	memset(cropcap, 0, sizeof(*cropcap));
 	cropcap->type = type;
 
-	mutex_lock(&zr->resource_lock);
-
 	if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
 	    (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
 	     fh->map_mode == ZORAN_MAP_MODE_RAW)) {
@@ -2529,7 +2382,7 @@
 			"%s: VIDIOC_CROPCAP - subcapture only supported for compressed capture\n",
 			ZR_DEVNAME(zr));
 		res = -EINVAL;
-		goto cropcap_unlock_and_return;
+		return res;
 	}
 
 	cropcap->bounds.top = cropcap->bounds.left = 0;
@@ -2538,8 +2391,6 @@
 	cropcap->defrect.top = cropcap->defrect.left = 0;
 	cropcap->defrect.width = BUZ_MIN_WIDTH;
 	cropcap->defrect.height = BUZ_MIN_HEIGHT;
-cropcap_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -2552,8 +2403,6 @@
 	memset(crop, 0, sizeof(*crop));
 	crop->type = type;
 
-	mutex_lock(&zr->resource_lock);
-
 	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
 	    (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
 	     fh->map_mode == ZORAN_MAP_MODE_RAW)) {
@@ -2562,17 +2411,13 @@
 			"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
 			ZR_DEVNAME(zr));
 		res = -EINVAL;
-		goto gcrop_unlock_and_return;
+		return res;
 	}
 
 	crop->c.top = fh->jpg_settings.img_y;
 	crop->c.left = fh->jpg_settings.img_x;
 	crop->c.width = fh->jpg_settings.img_width;
 	crop->c.height = fh->jpg_settings.img_height;
-
-gcrop_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
@@ -2585,14 +2430,12 @@
 
 	settings = fh->jpg_settings;
 
-	mutex_lock(&zr->resource_lock);
-
 	if (fh->buffers.allocated) {
 		dprintk(1, KERN_ERR
 			"%s: VIDIOC_S_CROP - cannot change settings while active\n",
 			ZR_DEVNAME(zr));
 		res = -EBUSY;
-		goto scrop_unlock_and_return;
+		return res;
 	}
 
 	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
@@ -2602,7 +2445,7 @@
 			"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
 			ZR_DEVNAME(zr));
 		res = -EINVAL;
-		goto scrop_unlock_and_return;
+		return res;
 	}
 
 	/* move into a form that we understand */
@@ -2614,13 +2457,10 @@
 	/* check validity */
 	res = zoran_check_jpg_settings(zr, &settings, 0);
 	if (res)
-		goto scrop_unlock_and_return;
+		return res;
 
 	/* accept */
 	fh->jpg_settings = settings;
-
-scrop_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
 	return res;
 }
 
@@ -2628,11 +2468,8 @@
 					struct v4l2_jpegcompression *params)
 {
 	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
 	memset(params, 0, sizeof(*params));
 
-	mutex_lock(&zr->resource_lock);
-
 	params->quality = fh->jpg_settings.jpg_comp.quality;
 	params->APPn = fh->jpg_settings.jpg_comp.APPn;
 	memcpy(params->APP_data,
@@ -2646,8 +2483,6 @@
 	params->jpeg_markers =
 	    fh->jpg_settings.jpg_comp.jpeg_markers;
 
-	mutex_unlock(&zr->resource_lock);
-
 	return 0;
 }
 
@@ -2663,26 +2498,21 @@
 
 	settings.jpg_comp = *params;
 
-	mutex_lock(&zr->resource_lock);
-
 	if (fh->buffers.active != ZORAN_FREE) {
 		dprintk(1, KERN_WARNING
 			"%s: VIDIOC_S_JPEGCOMP called while in playback/capture mode\n",
 			ZR_DEVNAME(zr));
 		res = -EBUSY;
-		goto sjpegc_unlock_and_return;
+		return res;
 	}
 
 	res = zoran_check_jpg_settings(zr, &settings, 0);
 	if (res)
-		goto sjpegc_unlock_and_return;
+		return res;
 	if (!fh->buffers.allocated)
 		fh->buffers.buffer_size =
 			zoran_v4l2_calc_bufsize(&fh->jpg_settings);
 	fh->jpg_settings.jpg_comp = settings.jpg_comp;
-sjpegc_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
@@ -2692,7 +2522,8 @@
 {
 	struct zoran_fh *fh = file->private_data;
 	struct zoran *zr = fh->zr;
-	int res = 0, frame;
+	int res = v4l2_ctrl_poll(file, wait);
+	int frame;
 	unsigned long flags;
 
 	/* we should check whether buffers are ready to be synced on
@@ -2703,8 +2534,6 @@
 	 * if no buffers queued or so, return POLLNVAL
 	 */
 
-	mutex_lock(&zr->resource_lock);
-
 	switch (fh->map_mode) {
 	case ZORAN_MAP_MODE_RAW:
 		poll_wait(file, &zr->v4l_capq, wait);
@@ -2722,7 +2551,7 @@
 		if (fh->buffers.active != ZORAN_FREE &&
 		    /* Buffer ready to DQBUF? */
 		    zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE)
-			res = POLLIN | POLLRDNORM;
+			res |= POLLIN | POLLRDNORM;
 		spin_unlock_irqrestore(&zr->spinlock, flags);
 
 		break;
@@ -2743,9 +2572,9 @@
 		if (fh->buffers.active != ZORAN_FREE &&
 		    zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) {
 			if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC)
-				res = POLLIN | POLLRDNORM;
+				res |= POLLIN | POLLRDNORM;
 			else
-				res = POLLOUT | POLLWRNORM;
+				res |= POLLOUT | POLLWRNORM;
 		}
 		spin_unlock_irqrestore(&zr->spinlock, flags);
 
@@ -2756,11 +2585,9 @@
 			KERN_ERR
 			"%s: %s - internal error, unknown map_mode=%d\n",
 			ZR_DEVNAME(zr), __func__, fh->map_mode);
-		res = POLLNVAL;
+		res |= POLLERR;
 	}
 
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
@@ -2792,9 +2619,6 @@
 	struct zoran *zr = fh->zr;
 	int i;
 
-	if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock))
-		return;
-
 	dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
 		__func__, mode_name(fh->map_mode));
 
@@ -2807,7 +2631,6 @@
 	/* Any buffers still mapped? */
 	for (i = 0; i < fh->buffers.num_buffers; i++) {
 		if (fh->buffers.buffer[i].map) {
-			mutex_unlock(&zr->resource_lock);
 			return;
 		}
 	}
@@ -2815,7 +2638,6 @@
 	dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
 		__func__, mode_name(fh->map_mode));
 
-
 	if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
 		if (fh->buffers.active != ZORAN_FREE) {
 			unsigned long flags;
@@ -2835,8 +2657,6 @@
 		}
 		jpg_fbuffer_free(fh);
 	}
-
-	mutex_unlock(&zr->resource_lock);
 }
 
 static const struct vm_operations_struct zoran_vm_ops = {
@@ -2872,15 +2692,13 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&zr->resource_lock);
-
 	if (!fh->buffers.allocated) {
 		dprintk(1,
 			KERN_ERR
 			"%s: %s(%s) - buffers not yet allocated\n",
 			ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode));
 		res = -ENOMEM;
-		goto mmap_unlock_and_return;
+		return res;
 	}
 
 	first = offset / fh->buffers.buffer_size;
@@ -2896,7 +2714,7 @@
 			fh->buffers.buffer_size,
 			fh->buffers.num_buffers);
 		res = -EINVAL;
-		goto mmap_unlock_and_return;
+		return res;
 	}
 
 	/* Check if any buffers are already mapped */
@@ -2907,7 +2725,7 @@
 				"%s: %s(%s) - buffer %d already mapped\n",
 				ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), i);
 			res = -EBUSY;
-			goto mmap_unlock_and_return;
+			return res;
 		}
 	}
 
@@ -2915,7 +2733,7 @@
 	map = kmalloc(sizeof(struct zoran_mapping), GFP_KERNEL);
 	if (!map) {
 		res = -ENOMEM;
-		goto mmap_unlock_and_return;
+		return res;
 	}
 	map->fh = fh;
 	atomic_set(&map->count, 1);
@@ -2937,7 +2755,7 @@
 					"%s: %s(V4L) - remap_pfn_range failed\n",
 					ZR_DEVNAME(zr), __func__);
 				res = -EAGAIN;
-				goto mmap_unlock_and_return;
+				return res;
 			}
 			size -= todo;
 			start += todo;
@@ -2969,7 +2787,7 @@
 						"%s: %s(V4L) - remap_pfn_range failed\n",
 						ZR_DEVNAME(zr), __func__);
 					res = -EAGAIN;
-					goto mmap_unlock_and_return;
+					return res;
 				}
 				size -= todo;
 				start += todo;
@@ -2985,10 +2803,6 @@
 
 		}
 	}
-
-mmap_unlock_and_return:
-	mutex_unlock(&zr->resource_lock);
-
 	return res;
 }
 
@@ -3028,33 +2842,15 @@
 	.vidioc_try_fmt_vid_cap  	    = zoran_try_fmt_vid_cap,
 	.vidioc_try_fmt_vid_out 	    = zoran_try_fmt_vid_out,
 	.vidioc_try_fmt_vid_overlay 	    = zoran_try_fmt_vid_overlay,
-	.vidioc_queryctrl 		    = zoran_queryctrl,
-	.vidioc_s_ctrl       		    = zoran_s_ctrl,
-	.vidioc_g_ctrl       		    = zoran_g_ctrl,
+	.vidioc_subscribe_event             = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event           = v4l2_event_unsubscribe,
 };
 
-/* please use zr->resource_lock consistently and kill this wrapper */
-static long zoran_ioctl(struct file *file, unsigned int cmd,
-			unsigned long arg)
-{
-	struct zoran_fh *fh = file->private_data;
-	struct zoran *zr = fh->zr;
-	int ret;
-
-	mutex_lock(&zr->other_lock);
-	ret = video_ioctl2(file, cmd, arg);
-	mutex_unlock(&zr->other_lock);
-
-	return ret;
-}
-
 static const struct v4l2_file_operations zoran_fops = {
 	.owner = THIS_MODULE,
 	.open = zoran_open,
 	.release = zoran_close,
-	.unlocked_ioctl = zoran_ioctl,
-	.read = zoran_read,
-	.write = zoran_write,
+	.unlocked_ioctl = video_ioctl2,
 	.mmap = zoran_mmap,
 	.poll = zoran_poll,
 };
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f6bed19..dc75694 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -1,6 +1,6 @@
 #
 # Platform drivers
-#	All drivers here are currently for webcam support
+#	Most drivers here are currently for webcam support
 
 menuconfig V4L_PLATFORM_DRIVERS
 	bool "V4L platform devices"
@@ -86,7 +86,7 @@
 config VIDEO_OMAP3
 	tristate "OMAP 3 Camera support"
 	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
-	depends on HAS_DMA
+	depends on HAS_DMA && OF
 	depends on OMAP_IOMMU
 	select ARM_DMA_USE_IOMMU
 	select VIDEOBUF2_DMA_CONTIG
@@ -231,6 +231,18 @@
 	    Support for the Video Engine Unit (VEU) on SuperH and
 	    SH-Mobile SoCs.
 
+config VIDEO_RENESAS_JPU
+	tristate "Renesas JPEG Processing Unit"
+	depends on VIDEO_DEV && VIDEO_V4L2
+	depends on ARCH_SHMOBILE || COMPILE_TEST
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	---help---
+	  This is a V4L2 driver for the Renesas JPEG Processing Unit.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called rcar_jpu.
+
 config VIDEO_RENESAS_VSP1
 	tristate "Renesas VSP1 Video Processing Engine"
 	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
@@ -280,3 +292,14 @@
 	  This is a virtual test device for the memory-to-memory driver
 	  framework.
 endif #V4L_TEST_DRIVERS
+
+menuconfig DVB_PLATFORM_DRIVERS
+	bool "DVB platform devices"
+	depends on MEDIA_DIGITAL_TV_SUPPORT
+	default n
+	---help---
+	  Say Y here to enable support for platform-specific Digital TV drivers.
+
+if DVB_PLATFORM_DRIVERS
+source "drivers/media/platform/sti/c8sectpfe/Kconfig"
+endif #DVB_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 114f9ab..efa0295 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -35,6 +35,7 @@
 obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC)	+= exynos-gsc/
 
 obj-$(CONFIG_VIDEO_STI_BDISP)		+= sti/bdisp/
+obj-$(CONFIG_DVB_C8SECTPFE)		+= sti/c8sectpfe/
 
 obj-$(CONFIG_BLACKFIN)                  += blackfin/
 
@@ -44,6 +45,7 @@
 
 obj-$(CONFIG_SOC_CAMERA)		+= soc_camera/
 
+obj-$(CONFIG_VIDEO_RENESAS_JPU) 	+= rcar_jpu.o
 obj-$(CONFIG_VIDEO_RENESAS_VSP1)	+= vsp1/
 
 obj-y	+= omap/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 1fba339..c8447fa 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1186,14 +1186,24 @@
 static int vpfe_release(struct file *file)
 {
 	struct vpfe_device *vpfe = video_drvdata(file);
+	bool fh_singular;
 	int ret;
 
 	mutex_lock(&vpfe->lock);
 
-	if (v4l2_fh_is_singular_file(file))
-		vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
+	/* Save the singular status before we call the clean-up helper */
+	fh_singular = v4l2_fh_is_singular_file(file);
+
+	/* the release helper will cleanup any on-going streaming */
 	ret = _vb2_fop_release(file, NULL);
 
+	/*
+	 * If this was the last open file.
+	 * Then de-initialize hw module.
+	 */
+	if (fh_singular)
+		vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
+
 	mutex_unlock(&vpfe->lock);
 
 	return ret;
@@ -1565,7 +1575,7 @@
 		return -EBUSY;
 	}
 
-	ret = vpfe_try_fmt(file, priv, fmt);
+	ret = vpfe_try_fmt(file, priv, &format);
 	if (ret)
 		return ret;
 
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
index 834e504..9342ac5 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/coda/Makefile
@@ -1,5 +1,5 @@
 ccflags-y += -I$(src)
 
-coda-objs := coda-common.o coda-bit.o coda-h264.o coda-jpeg.o
+coda-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o
 
 obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 109797b..fd7819d 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -226,8 +226,12 @@
 {
 	struct vb2_buffer *src_buf;
 	struct coda_buffer_meta *meta;
+	unsigned long flags;
 	u32 start;
 
+	if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG)
+		return;
+
 	while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
 		/*
 		 * Only queue a single JPEG into the bitstream buffer, except
@@ -252,6 +256,13 @@
 			continue;
 		}
 
+		/* Dump empty buffers */
+		if (!vb2_get_plane_payload(src_buf, 0)) {
+			src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+			continue;
+		}
+
 		/* Buffer start position */
 		start = ctx->bitstream_fifo.kfifo.in &
 			ctx->bitstream_fifo.kfifo.mask;
@@ -271,8 +282,13 @@
 				meta->start = start;
 				meta->end = ctx->bitstream_fifo.kfifo.in &
 					    ctx->bitstream_fifo.kfifo.mask;
+				spin_lock_irqsave(&ctx->buffer_meta_lock,
+						  flags);
 				list_add_tail(&meta->list,
 					      &ctx->buffer_meta_list);
+				ctx->num_metas++;
+				spin_unlock_irqrestore(&ctx->buffer_meta_lock,
+						       flags);
 
 				trace_coda_bit_queue(ctx, src_buf, meta);
 			}
@@ -331,7 +347,6 @@
 {
 	struct coda_dev *dev = ctx->dev;
 	int width, height;
-	dma_addr_t paddr;
 	int ysize;
 	int ret;
 	int i;
@@ -351,7 +366,10 @@
 		size_t size;
 		char *name;
 
-		size = ysize + ysize / 2;
+		if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+			size = round_up(ysize, 4096) + ysize / 2;
+		else
+			size = ysize + ysize / 2;
 		if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
 		    dev->devtype->product != CODA_DX6)
 			size += ysize / 4;
@@ -367,11 +385,23 @@
 
 	/* Register frame buffers in the parameter buffer */
 	for (i = 0; i < ctx->num_internal_frames; i++) {
-		paddr = ctx->internal_frames[i].paddr;
+		u32 y, cb, cr;
+
 		/* Start addresses of Y, Cb, Cr planes */
-		coda_parabuf_write(ctx, i * 3 + 0, paddr);
-		coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize);
-		coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize / 4);
+		y = ctx->internal_frames[i].paddr;
+		cb = y + ysize;
+		cr = y + ysize + ysize/4;
+		if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP) {
+			cb = round_up(cb, 4096);
+			cr = 0;
+			/* Packed 20-bit MSB of base addresses */
+			/* YYYYYCCC, CCyyyyyc, cccc.... */
+			y = (y & 0xfffff000) | cb >> 20;
+			cb = (cb & 0x000ff000) << 12;
+		}
+		coda_parabuf_write(ctx, i * 3 + 0, y);
+		coda_parabuf_write(ctx, i * 3 + 1, cb);
+		coda_parabuf_write(ctx, i * 3 + 2, cr);
 
 		/* mvcol buffer for h.264 */
 		if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
@@ -384,7 +414,7 @@
 	/* mvcol buffer for mpeg4 */
 	if ((dev->devtype->product != CODA_DX6) &&
 	    (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4))
-		coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr +
+		coda_parabuf_write(ctx, 97, ctx->internal_frames[0].paddr +
 					    ysize + ysize/4 + ysize/4);
 
 	return 0;
@@ -712,6 +742,32 @@
 	return ret;
 }
 
+static void coda9_set_frame_cache(struct coda_ctx *ctx, u32 fourcc)
+{
+	u32 cache_size, cache_config;
+
+	if (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) {
+		/* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
+		cache_size = 0x20262024;
+		cache_config = 2 << CODA9_CACHE_PAGEMERGE_OFFSET;
+	} else {
+		/* Luma 0x2 page, 4x4 cache, chroma 0x2 page, 4x3 cache size */
+		cache_size = 0x02440243;
+		cache_config = 1 << CODA9_CACHE_PAGEMERGE_OFFSET;
+	}
+	coda_write(ctx->dev, cache_size, CODA9_CMD_SET_FRAME_CACHE_SIZE);
+	if (fourcc == V4L2_PIX_FMT_NV12) {
+		cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
+				16 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
+				0 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
+	} else {
+		cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
+				8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
+				8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
+	}
+	coda_write(ctx->dev, cache_config, CODA9_CMD_SET_FRAME_CACHE_CONFIG);
+}
+
 /*
  * Encoder context operations
  */
@@ -789,9 +845,12 @@
 		break;
 	}
 
-	ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
+	ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
+				 CODA9_FRAME_TILED2LINEAR);
 	if (q_data_src->fourcc == V4L2_PIX_FMT_NV12)
 		ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+	if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+		ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
 	coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
 
 	if (dev->devtype->product == CODA_DX6) {
@@ -913,6 +972,9 @@
 		value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK)
 			<< CODA_RATECONTROL_BITRATE_OFFSET;
 		value |=  1 & CODA_RATECONTROL_ENABLE_MASK;
+		value |= (ctx->params.vbv_delay &
+			  CODA_RATECONTROL_INITIALDELAY_MASK)
+			 << CODA_RATECONTROL_INITIALDELAY_OFFSET;
 		if (dev->devtype->product == CODA_960)
 			value |= BIT(31); /* disable autoskip */
 	} else {
@@ -920,7 +982,7 @@
 	}
 	coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
 
-	coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
+	coda_write(dev, ctx->params.vbv_size, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
 	coda_write(dev, ctx->params.intra_refresh,
 		   CODA_CMD_ENC_SEQ_INTRA_REFRESH);
 
@@ -996,6 +1058,7 @@
 		ret = -EFAULT;
 		goto out;
 	}
+	ctx->initialized = 1;
 
 	if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
 		if (dev->devtype->product == CODA_960)
@@ -1036,6 +1099,8 @@
 			coda_write(dev, ctx->iram_info.buf_btp_use,
 					CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
 
+			coda9_set_frame_cache(ctx, q_data_src->fourcc);
+
 			/* FIXME */
 			coda_write(dev, ctx->internal_frames[2].paddr,
 				   CODA9_CMD_SET_FRAME_SUBSAMP_A);
@@ -1326,6 +1391,9 @@
 	mutex_lock(&ctx->buffer_mutex);
 	mutex_lock(&dev->coda_mutex);
 
+	if (ctx->initialized == 0)
+		goto out;
+
 	v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
 		 "%d: %s: sent command 'SEQ_END' to coda\n", ctx->idx,
 		 __func__);
@@ -1334,11 +1402,22 @@
 			 "CODA_COMMAND_SEQ_END failed\n");
 	}
 
+	/*
+	 * FIXME: Sometimes h.264 encoding fails with 8-byte sequences missing
+	 * from the output stream after the h.264 decoder has run. Resetting the
+	 * hardware after the decoder has finished seems to help.
+	 */
+	if (dev->devtype->product == CODA_960)
+		coda_hw_reset(ctx);
+
 	kfifo_init(&ctx->bitstream_fifo,
 		ctx->bitstream.vaddr, ctx->bitstream.size);
 
 	coda_free_framebuffers(ctx);
 
+	ctx->initialized = 0;
+
+out:
 	mutex_unlock(&dev->coda_mutex);
 	mutex_unlock(&ctx->buffer_mutex);
 }
@@ -1448,9 +1527,12 @@
 	/* Update coda bitstream read and write pointers from kfifo */
 	coda_kfifo_sync_to_device_full(ctx);
 
-	ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
+	ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
+				 CODA9_FRAME_TILED2LINEAR);
 	if (dst_fourcc == V4L2_PIX_FMT_NV12)
 		ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+	if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+		ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
 	coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
 
 	ctx->display_idx = -1;
@@ -1496,6 +1578,7 @@
 		coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
 		return -ETIMEDOUT;
 	}
+	ctx->initialized = 1;
 
 	/* Update kfifo out pointer from coda bitstream read pointer */
 	coda_kfifo_sync_from_device(ctx);
@@ -1578,30 +1661,13 @@
 				CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
 		coda_write(dev, ctx->iram_info.buf_ovl_use,
 				CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
-		if (dev->devtype->product == CODA_960)
+		if (dev->devtype->product == CODA_960) {
 			coda_write(dev, ctx->iram_info.buf_btp_use,
 					CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
-	}
 
-	if (dev->devtype->product == CODA_960) {
-		int cbb_size, crb_size;
-
-		coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
-		/* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
-		coda_write(dev, 0x20262024, CODA9_CMD_SET_FRAME_CACHE_SIZE);
-
-		if (dst_fourcc == V4L2_PIX_FMT_NV12) {
-			cbb_size = 0;
-			crb_size = 16;
-		} else {
-			cbb_size = 8;
-			crb_size = 8;
+			coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
+			coda9_set_frame_cache(ctx, dst_fourcc);
 		}
-		coda_write(dev, 2 << CODA9_CACHE_PAGEMERGE_OFFSET |
-				32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
-				cbb_size << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET |
-				crb_size << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET,
-				CODA9_CMD_SET_FRAME_CACHE_CONFIG);
 	}
 
 	if (src_fourcc == V4L2_PIX_FMT_H264) {
@@ -1654,6 +1720,7 @@
 	struct coda_dev *dev = ctx->dev;
 	struct coda_q_data *q_data_dst;
 	struct coda_buffer_meta *meta;
+	unsigned long flags;
 	u32 reg_addr, reg_stride;
 
 	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1732,6 +1799,7 @@
 		coda_write(dev, ctx->iram_info.axi_sram_use,
 				CODA7_REG_BIT_AXI_SRAM_USE);
 
+	spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
 	meta = list_first_entry_or_null(&ctx->buffer_meta_list,
 					struct coda_buffer_meta, list);
 
@@ -1751,6 +1819,7 @@
 			kfifo_in(&ctx->bitstream_fifo, buf, pad);
 		}
 	}
+	spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
 
 	coda_kfifo_sync_to_device_full(ctx);
 
@@ -1772,6 +1841,7 @@
 	struct vb2_buffer *dst_buf;
 	struct coda_buffer_meta *meta;
 	unsigned long payload;
+	unsigned long flags;
 	int width, height;
 	int decoded_idx;
 	int display_idx;
@@ -1897,12 +1967,21 @@
 	} else {
 		val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
 		val -= ctx->sequence_offset;
-		mutex_lock(&ctx->bitstream_mutex);
+		spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
 		if (!list_empty(&ctx->buffer_meta_list)) {
 			meta = list_first_entry(&ctx->buffer_meta_list,
 					      struct coda_buffer_meta, list);
 			list_del(&meta->list);
-			if (val != (meta->sequence & 0xffff)) {
+			ctx->num_metas--;
+			spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+			/*
+			 * Clamp counters to 16 bits for comparison, as the HW
+			 * counter rolls over at this point for h.264. This
+			 * may be different for other formats, but using 16 bits
+			 * should be enough to detect most errors and saves us
+			 * from doing different things based on the format.
+			 */
+			if ((val & 0xffff) != (meta->sequence & 0xffff)) {
 				v4l2_err(&dev->v4l2_dev,
 					 "sequence number mismatch (%d(%d) != %d)\n",
 					 val, ctx->sequence_offset,
@@ -1911,13 +1990,13 @@
 			ctx->frame_metas[decoded_idx] = *meta;
 			kfree(meta);
 		} else {
+			spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
 			v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
 			memset(&ctx->frame_metas[decoded_idx], 0,
 			       sizeof(struct coda_buffer_meta));
 			ctx->frame_metas[decoded_idx].sequence = val;
 			ctx->sequence_offset++;
 		}
-		mutex_unlock(&ctx->bitstream_mutex);
 
 		trace_coda_dec_pic_done(ctx, &ctx->frame_metas[decoded_idx]);
 
@@ -1960,7 +2039,7 @@
 		dst_buf->v4l2_buf.timecode = meta->timecode;
 		dst_buf->v4l2_buf.timestamp = meta->timestamp;
 
-		trace_coda_dec_rot_done(ctx, meta, dst_buf);
+		trace_coda_dec_rot_done(ctx, dst_buf, meta);
 
 		switch (q_data_dst->fourcc) {
 		case V4L2_PIX_FMT_YUV420:
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 58f6548..a4654e0 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -15,6 +15,7 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/firmware.h>
+#include <linux/gcd.h>
 #include <linux/genalloc.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -61,10 +62,9 @@
 module_param(coda_debug, int, 0644);
 MODULE_PARM_DESC(coda_debug, "Debug level (0-2)");
 
-struct coda_fmt {
-	char *name;
-	u32 fourcc;
-};
+static int disable_tiling;
+module_param(disable_tiling, int, 0644);
+MODULE_PARM_DESC(disable_tiling, "Disable tiled frame buffers");
 
 void coda_write(struct coda_dev *dev, u32 data, u32 reg)
 {
@@ -90,17 +90,17 @@
 	u32 base_cb, base_cr;
 
 	switch (q_data->fourcc) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_YUV420:
+	default:
+		base_cb = base_y + q_data->bytesperline * q_data->height;
+		base_cr = base_cb + q_data->bytesperline * q_data->height / 4;
+		break;
 	case V4L2_PIX_FMT_YVU420:
 		/* Switch Cb and Cr for YVU420 format */
 		base_cr = base_y + q_data->bytesperline * q_data->height;
 		base_cb = base_cr + q_data->bytesperline * q_data->height / 4;
 		break;
-	case V4L2_PIX_FMT_YUV420:
-	case V4L2_PIX_FMT_NV12:
-	default:
-		base_cb = base_y + q_data->bytesperline * q_data->height;
-		base_cr = base_cb + q_data->bytesperline * q_data->height / 4;
-		break;
 	case V4L2_PIX_FMT_YUV422P:
 		base_cb = base_y + q_data->bytesperline * q_data->height;
 		base_cr = base_cb + q_data->bytesperline * q_data->height / 2;
@@ -111,40 +111,6 @@
 	coda_write(ctx->dev, base_cr, reg_y + 8);
 }
 
-/*
- * Array of all formats supported by any version of Coda:
- */
-static const struct coda_fmt coda_formats[] = {
-	{
-		.name = "YUV 4:2:0 Planar, YCbCr",
-		.fourcc = V4L2_PIX_FMT_YUV420,
-	},
-	{
-		.name = "YUV 4:2:0 Planar, YCrCb",
-		.fourcc = V4L2_PIX_FMT_YVU420,
-	},
-	{
-		.name = "YUV 4:2:0 Partial interleaved Y/CbCr",
-		.fourcc = V4L2_PIX_FMT_NV12,
-	},
-	{
-		.name = "YUV 4:2:2 Planar, YCbCr",
-		.fourcc = V4L2_PIX_FMT_YUV422P,
-	},
-	{
-		.name = "H264 Encoded Stream",
-		.fourcc = V4L2_PIX_FMT_H264,
-	},
-	{
-		.name = "MPEG4 Encoded Stream",
-		.fourcc = V4L2_PIX_FMT_MPEG4,
-	},
-	{
-		.name = "JPEG Encoded Images",
-		.fourcc = V4L2_PIX_FMT_JPEG,
-	},
-};
-
 #define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
 	{ mode, src_fourcc, dst_fourcc, max_w, max_h }
 
@@ -190,9 +156,9 @@
 	.type = CODA_INST_ENCODER,
 	.ops = &coda_bit_encode_ops,
 	.src_formats = {
+		V4L2_PIX_FMT_NV12,
 		V4L2_PIX_FMT_YUV420,
 		V4L2_PIX_FMT_YVU420,
-		V4L2_PIX_FMT_NV12,
 	},
 	.dst_formats = {
 		V4L2_PIX_FMT_H264,
@@ -205,9 +171,9 @@
 	.type = CODA_INST_ENCODER,
 	.ops = &coda_bit_encode_ops,
 	.src_formats = {
+		V4L2_PIX_FMT_NV12,
 		V4L2_PIX_FMT_YUV420,
 		V4L2_PIX_FMT_YVU420,
-		V4L2_PIX_FMT_NV12,
 		V4L2_PIX_FMT_YUV422P,
 	},
 	.dst_formats = {
@@ -224,9 +190,9 @@
 		V4L2_PIX_FMT_MPEG4,
 	},
 	.dst_formats = {
+		V4L2_PIX_FMT_NV12,
 		V4L2_PIX_FMT_YUV420,
 		V4L2_PIX_FMT_YVU420,
-		V4L2_PIX_FMT_NV12,
 	},
 };
 
@@ -238,9 +204,9 @@
 		V4L2_PIX_FMT_JPEG,
 	},
 	.dst_formats = {
+		V4L2_PIX_FMT_NV12,
 		V4L2_PIX_FMT_YUV420,
 		V4L2_PIX_FMT_YVU420,
-		V4L2_PIX_FMT_NV12,
 		V4L2_PIX_FMT_YUV422P,
 	},
 };
@@ -261,38 +227,21 @@
 	&coda_bit_decoder,
 };
 
-static bool coda_format_is_yuv(u32 fourcc)
-{
-	switch (fourcc) {
-	case V4L2_PIX_FMT_YUV420:
-	case V4L2_PIX_FMT_YVU420:
-	case V4L2_PIX_FMT_NV12:
-	case V4L2_PIX_FMT_YUV422P:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static const char *coda_format_name(u32 fourcc)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(coda_formats); i++) {
-		if (coda_formats[i].fourcc == fourcc)
-			return coda_formats[i].name;
-	}
-
-	return NULL;
-}
-
 /*
  * Normalize all supported YUV 4:2:0 formats to the value used in the codec
  * tables.
  */
 static u32 coda_format_normalize_yuv(u32 fourcc)
 {
-	return coda_format_is_yuv(fourcc) ? V4L2_PIX_FMT_YUV420 : fourcc;
+	switch (fourcc) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_YVU420:
+	case V4L2_PIX_FMT_YUV422P:
+		return V4L2_PIX_FMT_YUV420;
+	default:
+		return fourcc;
+	}
 }
 
 static const struct coda_codec *coda_find_codec(struct coda_dev *dev,
@@ -396,7 +345,6 @@
 	struct video_device *vdev = video_devdata(file);
 	const struct coda_video_device *cvd = to_coda_video_device(vdev);
 	const u32 *formats;
-	const char *name;
 
 	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
 		formats = cvd->src_formats;
@@ -408,11 +356,7 @@
 	if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
 		return -EINVAL;
 
-	name = coda_format_name(formats[f->index]);
-	strlcpy(f->description, name, sizeof(f->description));
 	f->pixelformat = formats[f->index];
-	if (!coda_format_is_yuv(formats[f->index]))
-		f->flags |= V4L2_FMT_FLAG_COMPRESSED;
 
 	return 0;
 }
@@ -504,9 +448,9 @@
 			      S_ALIGN);
 
 	switch (f->fmt.pix.pixelformat) {
+	case V4L2_PIX_FMT_NV12:
 	case V4L2_PIX_FMT_YUV420:
 	case V4L2_PIX_FMT_YVU420:
-	case V4L2_PIX_FMT_NV12:
 		/*
 		 * Frame stride must be at least multiple of 8,
 		 * but multiple of 16 for h.264 or JPEG 4:2:x
@@ -645,6 +589,22 @@
 	q_data->rect.width = f->fmt.pix.width;
 	q_data->rect.height = f->fmt.pix.height;
 
+	switch (f->fmt.pix.pixelformat) {
+	case V4L2_PIX_FMT_NV12:
+		if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+			ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
+			if (!disable_tiling)
+				break;
+		}
+		/* else fall through */
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_YVU420:
+		ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
+		break;
+	default:
+		break;
+	}
+
 	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
 		"Setting format for type %d, wxh: %dx%d, fmt: %d\n",
 		f->type, q_data->width, q_data->height, q_data->fourcc);
@@ -831,6 +791,104 @@
 	return 0;
 }
 
+static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct coda_ctx *ctx = fh_to_ctx(fh);
+	struct v4l2_fract *tpf;
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+	tpf = &a->parm.output.timeperframe;
+	tpf->denominator = ctx->params.framerate & CODA_FRATE_RES_MASK;
+	tpf->numerator = 1 + (ctx->params.framerate >>
+			      CODA_FRATE_DIV_OFFSET);
+
+	return 0;
+}
+
+/*
+ * Approximate timeperframe v4l2_fract with values that can be written
+ * into the 16-bit CODA_FRATE_DIV and CODA_FRATE_RES fields.
+ */
+static void coda_approximate_timeperframe(struct v4l2_fract *timeperframe)
+{
+	struct v4l2_fract s = *timeperframe;
+	struct v4l2_fract f0;
+	struct v4l2_fract f1 = { 1, 0 };
+	struct v4l2_fract f2 = { 0, 1 };
+	unsigned int i, div, s_denominator;
+
+	/* Lower bound is 1/65535 */
+	if (s.numerator == 0 || s.denominator / s.numerator > 65535) {
+		timeperframe->numerator = 1;
+		timeperframe->denominator = 65535;
+		return;
+	}
+
+	/* Upper bound is 65536/1, map everything above to infinity */
+	if (s.denominator == 0 || s.numerator / s.denominator > 65536) {
+		timeperframe->numerator = 1;
+		timeperframe->denominator = 0;
+		return;
+	}
+
+	/* Reduce fraction to lowest terms */
+	div = gcd(s.numerator, s.denominator);
+	if (div > 1) {
+		s.numerator /= div;
+		s.denominator /= div;
+	}
+
+	if (s.numerator <= 65536 && s.denominator < 65536) {
+		*timeperframe = s;
+		return;
+	}
+
+	/* Find successive convergents from continued fraction expansion */
+	while (f2.numerator <= 65536 && f2.denominator < 65536) {
+		f0 = f1;
+		f1 = f2;
+
+		/* Stop when f2 exactly equals timeperframe */
+		if (s.numerator == 0)
+			break;
+
+		i = s.denominator / s.numerator;
+
+		f2.numerator = f0.numerator + i * f1.numerator;
+		f2.denominator = f0.denominator + i * f2.denominator;
+
+		s_denominator = s.numerator;
+		s.numerator = s.denominator % s.numerator;
+		s.denominator = s_denominator;
+	}
+
+	*timeperframe = f1;
+}
+
+static uint32_t coda_timeperframe_to_frate(struct v4l2_fract *timeperframe)
+{
+	return ((timeperframe->numerator - 1) << CODA_FRATE_DIV_OFFSET) |
+		timeperframe->denominator;
+}
+
+static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct coda_ctx *ctx = fh_to_ctx(fh);
+	struct v4l2_fract *tpf;
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	tpf = &a->parm.output.timeperframe;
+	coda_approximate_timeperframe(tpf);
+	ctx->params.framerate = coda_timeperframe_to_frate(tpf);
+
+	return 0;
+}
+
 static int coda_subscribe_event(struct v4l2_fh *fh,
 				const struct v4l2_event_subscription *sub)
 {
@@ -871,31 +929,13 @@
 	.vidioc_try_decoder_cmd	= coda_try_decoder_cmd,
 	.vidioc_decoder_cmd	= coda_decoder_cmd,
 
+	.vidioc_g_parm		= coda_g_parm,
+	.vidioc_s_parm		= coda_s_parm,
+
 	.vidioc_subscribe_event = coda_subscribe_event,
 	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
 };
 
-void coda_set_gdi_regs(struct coda_ctx *ctx)
-{
-	struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
-	struct coda_dev *dev = ctx->dev;
-	int i;
-
-	for (i = 0; i < 16; i++)
-		coda_write(dev, tiled_map->xy2ca_map[i],
-				CODA9_GDI_XY2_CAS_0 + 4 * i);
-	for (i = 0; i < 4; i++)
-		coda_write(dev, tiled_map->xy2ba_map[i],
-				CODA9_GDI_XY2_BA_0 + 4 * i);
-	for (i = 0; i < 16; i++)
-		coda_write(dev, tiled_map->xy2ra_map[i],
-				CODA9_GDI_XY2_RAS_0 + 4 * i);
-	coda_write(dev, tiled_map->xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
-	for (i = 0; i < 32; i++)
-		coda_write(dev, tiled_map->rbc2axi_map[i],
-				CODA9_GDI_RBC2_AXI_0 + 4 * i);
-}
-
 /*
  * Mem-to-mem operations.
  */
@@ -949,14 +989,14 @@
 static int coda_job_ready(void *m2m_priv)
 {
 	struct coda_ctx *ctx = m2m_priv;
+	int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
 
 	/*
 	 * For both 'P' and 'key' frame cases 1 picture
 	 * and 1 frame are needed. In the decoder case,
 	 * the compressed frame can be in the bitstream.
 	 */
-	if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
-	    ctx->inst_type != CODA_INST_DECODER) {
+	if (!src_bufs && ctx->inst_type != CODA_INST_DECODER) {
 		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
 			 "not ready: not enough video buffers.\n");
 		return 0;
@@ -969,27 +1009,17 @@
 	}
 
 	if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
-		struct list_head *meta;
-		bool stream_end;
-		int num_metas;
-		int src_bufs;
+		bool stream_end = ctx->bit_stream_param &
+				  CODA_BIT_STREAM_END_FLAG;
+		int num_metas = ctx->num_metas;
 
-		if (ctx->hold && !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+		if (ctx->hold && !src_bufs) {
 			v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
 				 "%d: not ready: on hold for more buffers.\n",
 				 ctx->idx);
 			return 0;
 		}
 
-		stream_end = ctx->bit_stream_param &
-			     CODA_BIT_STREAM_END_FLAG;
-
-		num_metas = 0;
-		list_for_each(meta, &ctx->buffer_meta_list)
-			num_metas++;
-
-		src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
-
 		if (!stream_end && (num_metas + src_bufs) < 2) {
 			v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
 				 "%d: not ready: need 2 buffers available (%d, %d)\n",
@@ -998,8 +1028,8 @@
 		}
 
 
-		if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
-		    !stream_end && (coda_get_bitstream_payload(ctx) < 512)) {
+		if (!src_bufs && !stream_end &&
+		    (coda_get_bitstream_payload(ctx) < 512)) {
 			v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
 				 "%d: not ready: not enough bitstream data (%d).\n",
 				 ctx->idx, coda_get_bitstream_payload(ctx));
@@ -1015,6 +1045,7 @@
 
 	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
 			"job ready\n");
+
 	return 1;
 }
 
@@ -1052,32 +1083,6 @@
 	.unlock		= coda_unlock,
 };
 
-static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
-{
-	struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
-	int luma_map, chro_map, i;
-
-	memset(tiled_map, 0, sizeof(*tiled_map));
-
-	luma_map = 64;
-	chro_map = 64;
-	tiled_map->map_type = tiled_map_type;
-	for (i = 0; i < 16; i++)
-		tiled_map->xy2ca_map[i] = luma_map << 8 | chro_map;
-	for (i = 0; i < 4; i++)
-		tiled_map->xy2ba_map[i] = luma_map << 8 | chro_map;
-	for (i = 0; i < 16; i++)
-		tiled_map->xy2ra_map[i] = luma_map << 8 | chro_map;
-
-	if (tiled_map_type == GDI_LINEAR_FRAME_MAP) {
-		tiled_map->xy2rbc_config = 0;
-	} else {
-		dev_err(&ctx->dev->plat_dev->dev, "invalid map type: %d\n",
-			tiled_map_type);
-		return;
-	}
-}
-
 static void set_default_params(struct coda_ctx *ctx)
 {
 	unsigned int max_w, max_h, usize, csize;
@@ -1094,8 +1099,8 @@
 	ctx->params.framerate = 30;
 
 	/* Default formats for output and input queues */
-	ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc;
-	ctx->q_data[V4L2_M2M_DST].fourcc = ctx->codec->dst_fourcc;
+	ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->cvd->src_formats[0];
+	ctx->q_data[V4L2_M2M_DST].fourcc = ctx->cvd->dst_formats[0];
 	ctx->q_data[V4L2_M2M_SRC].width = max_w;
 	ctx->q_data[V4L2_M2M_SRC].height = max_h;
 	ctx->q_data[V4L2_M2M_DST].width = max_w;
@@ -1116,8 +1121,11 @@
 	ctx->q_data[V4L2_M2M_DST].rect.width = max_w;
 	ctx->q_data[V4L2_M2M_DST].rect.height = max_h;
 
-	if (ctx->dev->devtype->product == CODA_960)
-		coda_set_tiled_map_type(ctx, GDI_LINEAR_FRAME_MAP);
+	/*
+	 * Since the RBC2AXI logic only supports a single chroma plane,
+	 * macroblock tiling only works for to NV12 pixel format.
+	 */
+	ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
 }
 
 /*
@@ -1244,9 +1252,7 @@
 
 	q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
 	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		if (q_data_src->fourcc == V4L2_PIX_FMT_H264 ||
-		    (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
-		     ctx->dev->devtype->product == CODA_7541)) {
+		if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
 			/* copy the buffers that were queued before streamon */
 			mutex_lock(&ctx->bitstream_mutex);
 			coda_fill_bitstream(ctx, false);
@@ -1315,7 +1321,6 @@
 			goto err;
 	}
 
-	ctx->initialized = 1;
 	return ret;
 
 err:
@@ -1334,6 +1339,7 @@
 	struct coda_ctx *ctx = vb2_get_drv_priv(q);
 	struct coda_dev *dev = ctx->dev;
 	struct vb2_buffer *buf;
+	unsigned long flags;
 	bool stop;
 
 	stop = ctx->streamon_out && ctx->streamon_cap;
@@ -1368,20 +1374,23 @@
 			queue_work(dev->workqueue, &ctx->seq_end_work);
 			flush_work(&ctx->seq_end_work);
 		}
-		mutex_lock(&ctx->bitstream_mutex);
+		spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
 		while (!list_empty(&ctx->buffer_meta_list)) {
 			meta = list_first_entry(&ctx->buffer_meta_list,
 						struct coda_buffer_meta, list);
 			list_del(&meta->list);
 			kfree(meta);
 		}
-		mutex_unlock(&ctx->bitstream_mutex);
+		ctx->num_metas = 0;
+		spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
 		kfifo_init(&ctx->bitstream_fifo,
 			ctx->bitstream.vaddr, ctx->bitstream.size);
-		ctx->initialized = 0;
 		ctx->runcounter = 0;
 		ctx->aborting = 0;
 	}
+
+	if (!ctx->streamon_out && !ctx->streamon_cap)
+		ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
 }
 
 static const struct vb2_ops coda_qops = {
@@ -1469,6 +1478,12 @@
 	case V4L2_CID_JPEG_RESTART_INTERVAL:
 		ctx->params.jpeg_restart_interval = ctrl->val;
 		break;
+	case V4L2_CID_MPEG_VIDEO_VBV_DELAY:
+		ctx->params.vbv_delay = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+		ctx->params.vbv_size = min(ctrl->val * 8192, 0x7fffffff);
+		break;
 	default:
 		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
 			"Invalid control, id=%d, val=%d\n",
@@ -1528,6 +1543,14 @@
 	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
 		V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0,
 		1920 * 1088 / 256, 1, 0);
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_VBV_DELAY, 0, 0x7fff, 1, 0);
+	/*
+	 * The maximum VBV size value is 0x7fffffff bits,
+	 * one bit less than 262144 KiB
+	 */
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_VBV_SIZE, 0, 262144, 1, 0);
 }
 
 static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
@@ -1726,6 +1749,7 @@
 	mutex_init(&ctx->bitstream_mutex);
 	mutex_init(&ctx->buffer_mutex);
 	INIT_LIST_HEAD(&ctx->buffer_meta_list);
+	spin_lock_init(&ctx->buffer_meta_lock);
 
 	coda_lock(ctx);
 	list_add(&ctx->list, &dev->instances);
@@ -1769,7 +1793,7 @@
 	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
 
 	/* In case the instance was not running, we still need to call SEQ_END */
-	if (ctx->initialized && ctx->ops->seq_end_work) {
+	if (ctx->ops->seq_end_work) {
 		queue_work(dev->workqueue, &ctx->seq_end_work);
 		flush_work(&ctx->seq_end_work);
 	}
@@ -2157,7 +2181,7 @@
 	/* Get IRAM pool from device tree or platform data */
 	pool = of_gen_pool_get(np, "iram", 0);
 	if (!pool && pdata)
-		pool = gen_pool_get(pdata->iram_dev);
+		pool = gen_pool_get(pdata->iram_dev, NULL);
 	if (!pool) {
 		dev_err(&pdev->dev, "iram pool not available\n");
 		return -ENOMEM;
diff --git a/drivers/media/platform/coda/coda-gdi.c b/drivers/media/platform/coda/coda-gdi.c
new file mode 100644
index 0000000..aaa7afc
--- /dev/null
+++ b/drivers/media/platform/coda/coda-gdi.c
@@ -0,0 +1,150 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include "coda.h"
+
+#define XY2_INVERT	BIT(7)
+#define XY2_ZERO	BIT(6)
+#define XY2_TB_XOR	BIT(5)
+#define XY2_XYSEL	BIT(4)
+#define XY2_Y		(1 << 4)
+#define XY2_X		(0 << 4)
+
+#define XY2(luma_sel, luma_bit, chroma_sel, chroma_bit) \
+	(((XY2_##luma_sel) | (luma_bit)) << 8 | \
+	 (XY2_##chroma_sel) | (chroma_bit))
+
+static const u16 xy2ca_zero_map[16] = {
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+};
+
+static const u16 xy2ca_tiled_map[16] = {
+	XY2(Y,    0, Y,    0),
+	XY2(Y,    1, Y,    1),
+	XY2(Y,    2, Y,    2),
+	XY2(Y,    3, X,    3),
+	XY2(X,    3, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+	XY2(ZERO, 0, ZERO, 0),
+};
+
+/*
+ * RA[15:0], CA[15:8] are hardwired to contain the 24-bit macroblock
+ * start offset (macroblock size is 16x16 for luma, 16x8 for chroma).
+ * Bits CA[4:0] are set using XY2CA above. BA[3:0] seems to be unused.
+ */
+
+#define RBC_CA		(0 << 4)
+#define RBC_BA		(1 << 4)
+#define RBC_RA		(2 << 4)
+#define RBC_ZERO	(3 << 4)
+
+#define RBC(luma_sel, luma_bit, chroma_sel, chroma_bit) \
+	(((RBC_##luma_sel) | (luma_bit)) << 6 | \
+	 (RBC_##chroma_sel) | (chroma_bit))
+
+static const u16 rbc2axi_tiled_map[32] = {
+	RBC(ZERO, 0, ZERO, 0),
+	RBC(ZERO, 0, ZERO, 0),
+	RBC(ZERO, 0, ZERO, 0),
+	RBC(CA,   0, CA,   0),
+	RBC(CA,   1, CA,   1),
+	RBC(CA,   2, CA,   2),
+	RBC(CA,   3, CA,   3),
+	RBC(CA,   4, CA,   8),
+	RBC(CA,   8, CA,   9),
+	RBC(CA,   9, CA,  10),
+	RBC(CA,  10, CA,  11),
+	RBC(CA,  11, CA,  12),
+	RBC(CA,  12, CA,  13),
+	RBC(CA,  13, CA,  14),
+	RBC(CA,  14, CA,  15),
+	RBC(CA,  15, RA,   0),
+	RBC(RA,   0, RA,   1),
+	RBC(RA,   1, RA,   2),
+	RBC(RA,   2, RA,   3),
+	RBC(RA,   3, RA,   4),
+	RBC(RA,   4, RA,   5),
+	RBC(RA,   5, RA,   6),
+	RBC(RA,   6, RA,   7),
+	RBC(RA,   7, RA,   8),
+	RBC(RA,   8, RA,   9),
+	RBC(RA,   9, RA,  10),
+	RBC(RA,  10, RA,  11),
+	RBC(RA,  11, RA,  12),
+	RBC(RA,  12, RA,  13),
+	RBC(RA,  13, RA,  14),
+	RBC(RA,  14, RA,  15),
+	RBC(RA,  15, ZERO, 0),
+};
+
+void coda_set_gdi_regs(struct coda_ctx *ctx)
+{
+	struct coda_dev *dev = ctx->dev;
+	const u16 *xy2ca_map;
+	u32 xy2rbc_config;
+	int i;
+
+	switch (ctx->tiled_map_type) {
+	case GDI_LINEAR_FRAME_MAP:
+	default:
+		xy2ca_map = xy2ca_zero_map;
+		xy2rbc_config = 0;
+		break;
+	case GDI_TILED_FRAME_MB_RASTER_MAP:
+		xy2ca_map = xy2ca_tiled_map;
+		xy2rbc_config = CODA9_XY2RBC_TILED_MAP |
+				CODA9_XY2RBC_CA_INC_HOR |
+				(16 - 1) << 12 | (8 - 1) << 4;
+		break;
+	}
+
+	for (i = 0; i < 16; i++)
+		coda_write(dev, xy2ca_map[i],
+				CODA9_GDI_XY2_CAS_0 + 4 * i);
+	for (i = 0; i < 4; i++)
+		coda_write(dev, XY2(ZERO, 0, ZERO, 0),
+				CODA9_GDI_XY2_BA_0 + 4 * i);
+	for (i = 0; i < 16; i++)
+		coda_write(dev, XY2(ZERO, 0, ZERO, 0),
+				CODA9_GDI_XY2_RAS_0 + 4 * i);
+	coda_write(dev, xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
+	if (xy2rbc_config) {
+		for (i = 0; i < 32; i++)
+			coda_write(dev, rbc2axi_tiled_map[i],
+					CODA9_GDI_RBC2_AXI_0 + 4 * i);
+	}
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 8e0af22..59b2af9 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -128,6 +128,8 @@
 	enum v4l2_mpeg_video_multi_slice_mode slice_mode;
 	u32			framerate;
 	u16			bitrate;
+	u16			vbv_delay;
+	u32			vbv_size;
 	u32			slice_max_bits;
 	u32			slice_max_mb;
 };
@@ -165,15 +167,8 @@
 	phys_addr_t	next_paddr;
 };
 
-struct gdi_tiled_map {
-	int xy2ca_map[16];
-	int xy2ba_map[16];
-	int xy2ra_map[16];
-	int rbc2axi_map[32];
-	int xy2rbc_config;
-	int map_type;
 #define GDI_LINEAR_FRAME_MAP 0
-};
+#define GDI_TILED_FRAME_MB_RASTER_MAP 1
 
 struct coda_ctx;
 
@@ -227,12 +222,14 @@
 	struct coda_buffer_meta		frame_metas[CODA_MAX_FRAMEBUFFERS];
 	u32				frame_errors[CODA_MAX_FRAMEBUFFERS];
 	struct list_head		buffer_meta_list;
+	spinlock_t			buffer_meta_lock;
+	int				num_metas;
 	struct coda_aux_buf		workbuf;
 	int				num_internal_frames;
 	int				idx;
 	int				reg_idx;
 	struct coda_iram_info		iram_info;
-	struct gdi_tiled_map		tiled_map;
+	int				tiled_map_type;
 	u32				bit_stream_param;
 	u32				frm_dis_flg;
 	u32				frame_mem_ctrl;
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 7d02624..3490602 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -51,6 +51,7 @@
 #define		CODA7_STREAM_SEL_64BITS_ENDIAN	(1 << 1)
 #define		CODA_STREAM_ENDIAN_SELECT	(1 << 0)
 #define CODA_REG_BIT_FRAME_MEM_CTRL		0x110
+#define		CODA9_FRAME_TILED2LINEAR	(1 << 11)
 #define		CODA_FRAME_CHROMA_INTERLEAVE	(1 << 2)
 #define		CODA_IMAGE_ENDIAN_SELECT	(1 << 0)
 #define CODA_REG_BIT_BIT_STREAM_PARAM		0x114
@@ -263,6 +264,10 @@
 #define		CODADX6_PICHEIGHT_MASK				0x3ff
 #define		CODA7_PICHEIGHT_MASK				0xffff
 #define CODA_CMD_ENC_SEQ_SRC_F_RATE				0x194
+#define		CODA_FRATE_RES_OFFSET				0
+#define		CODA_FRATE_RES_MASK				0xffff
+#define		CODA_FRATE_DIV_OFFSET				16
+#define		CODA_FRATE_DIV_MASK				0xffff
 #define CODA_CMD_ENC_SEQ_MP4_PARA				0x198
 #define		CODA_MP4PARAM_VERID_OFFSET			6
 #define		CODA_MP4PARAM_VERID_MASK			0x01
@@ -448,7 +453,12 @@
 #define CODA9_GDI_XY2_RAS_F			(CODA9_GDMA_BASE + 0x88c)
 
 #define CODA9_GDI_XY2_RBC_CONFIG		(CODA9_GDMA_BASE + 0x890)
+#define		CODA9_XY2RBC_SEPARATE_MAP		BIT(19)
+#define		CODA9_XY2RBC_TOP_BOT_SPLIT		BIT(18)
+#define		CODA9_XY2RBC_TILED_MAP			BIT(17)
+#define		CODA9_XY2RBC_CA_INC_HOR			BIT(16)
 #define CODA9_GDI_RBC2_AXI_0			(CODA9_GDMA_BASE + 0x8a0)
 #define CODA9_GDI_RBC2_AXI_1F			(CODA9_GDMA_BASE + 0x91c)
+#define	CODA9_GDI_TILEDBUF_BASE			(CODA9_GDMA_BASE + 0x920)
 
 #endif
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index 781bf72..d9099a0 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -48,7 +48,7 @@
 	TP_printk("minor = %d, ctx = %d", __entry->minor, __entry->ctx)
 );
 
-TRACE_EVENT(coda_enc_pic_run,
+DECLARE_EVENT_CLASS(coda_buf_class,
 	TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
 
 	TP_ARGS(ctx, buf),
@@ -69,28 +69,17 @@
 		  __entry->minor, __entry->index, __entry->ctx)
 );
 
-TRACE_EVENT(coda_enc_pic_done,
+DEFINE_EVENT(coda_buf_class, coda_enc_pic_run,
 	TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
-
-	TP_ARGS(ctx, buf),
-
-	TP_STRUCT__entry(
-		__field(int, minor)
-		__field(int, index)
-		__field(int, ctx)
-	),
-
-	TP_fast_assign(
-		__entry->minor = ctx->fh.vdev->minor;
-		__entry->index = buf->v4l2_buf.index;
-		__entry->ctx = ctx->idx;
-	),
-
-	TP_printk("minor = %d, index = %d, ctx = %d",
-		  __entry->minor, __entry->index, __entry->ctx)
+	TP_ARGS(ctx, buf)
 );
 
-TRACE_EVENT(coda_bit_queue,
+DEFINE_EVENT(coda_buf_class, coda_enc_pic_done,
+	TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+	TP_ARGS(ctx, buf)
+);
+
+DECLARE_EVENT_CLASS(coda_buf_meta_class,
 	TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
 		 struct coda_buffer_meta *meta),
 
@@ -117,7 +106,13 @@
 		  __entry->ctx)
 );
 
-TRACE_EVENT(coda_dec_pic_run,
+DEFINE_EVENT(coda_buf_meta_class, coda_bit_queue,
+	TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+		 struct coda_buffer_meta *meta),
+	TP_ARGS(ctx, buf, meta)
+);
+
+DECLARE_EVENT_CLASS(coda_meta_class,
 	TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
 
 	TP_ARGS(ctx, meta),
@@ -140,54 +135,20 @@
 		  __entry->minor, __entry->start, __entry->end, __entry->ctx)
 );
 
-TRACE_EVENT(coda_dec_pic_done,
+DEFINE_EVENT(coda_meta_class, coda_dec_pic_run,
 	TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
-
-	TP_ARGS(ctx, meta),
-
-	TP_STRUCT__entry(
-		__field(int, minor)
-		__field(int, start)
-		__field(int, end)
-		__field(int, ctx)
-	),
-
-	TP_fast_assign(
-		__entry->minor = ctx->fh.vdev->minor;
-		__entry->start = meta->start;
-		__entry->end = meta->end;
-		__entry->ctx = ctx->idx;
-	),
-
-	TP_printk("minor = %d, start = 0x%x, end = 0x%x, ctx = %d",
-		  __entry->minor, __entry->start, __entry->end, __entry->ctx)
+	TP_ARGS(ctx, meta)
 );
 
-TRACE_EVENT(coda_dec_rot_done,
-	TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta,
-		 struct vb2_buffer *buf),
+DEFINE_EVENT(coda_meta_class, coda_dec_pic_done,
+	TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
+	TP_ARGS(ctx, meta)
+);
 
-	TP_ARGS(ctx, meta, buf),
-
-	TP_STRUCT__entry(
-		__field(int, minor)
-		__field(int, start)
-		__field(int, end)
-		__field(int, index)
-		__field(int, ctx)
-	),
-
-	TP_fast_assign(
-		__entry->minor = ctx->fh.vdev->minor;
-		__entry->start = meta->start;
-		__entry->end = meta->end;
-		__entry->index = buf->v4l2_buf.index;
-		__entry->ctx = ctx->idx;
-	),
-
-	TP_printk("minor = %d, start = 0x%x, end = 0x%x, index = %d, ctx = %d",
-		  __entry->minor, __entry->start, __entry->end, __entry->index,
-		  __entry->ctx)
+DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
+	TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+		 struct coda_buffer_meta *meta),
+	TP_ARGS(ctx, buf, meta)
 );
 
 #endif /* __CODA_TRACE_H__ */
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 5b76e3d..ae8c6b3 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -28,6 +28,9 @@
 #include <media/v4l2-common.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
 #include <media/videobuf-dma-contig.h>
 
 #define DRV_NAME		"fsl_viu"
@@ -40,49 +43,6 @@
 /* I2C address of video decoder chip is 0x4A */
 #define VIU_VIDEO_DECODER_ADDR	0x25
 
-/* supported controls */
-static struct v4l2_queryctrl viu_qctrl[] = {
-	{
-		.id            = V4L2_CID_BRIGHTNESS,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-		.name          = "Brightness",
-		.minimum       = 0,
-		.maximum       = 255,
-		.step          = 1,
-		.default_value = 127,
-		.flags         = 0,
-	}, {
-		.id            = V4L2_CID_CONTRAST,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-		.name          = "Contrast",
-		.minimum       = 0,
-		.maximum       = 255,
-		.step          = 0x1,
-		.default_value = 0x10,
-		.flags         = 0,
-	}, {
-		.id            = V4L2_CID_SATURATION,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-		.name          = "Saturation",
-		.minimum       = 0,
-		.maximum       = 255,
-		.step          = 0x1,
-		.default_value = 127,
-		.flags         = 0,
-	}, {
-		.id            = V4L2_CID_HUE,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-		.name          = "Hue",
-		.minimum       = -128,
-		.maximum       = 127,
-		.step          = 0x1,
-		.default_value = 0,
-		.flags         = 0,
-	}
-};
-
-static int qctl_regs[ARRAY_SIZE(viu_qctrl)];
-
 static int info_level;
 
 #define dprintk(level, fmt, arg...)					\
@@ -95,7 +55,6 @@
  * Basic structures
  */
 struct viu_fmt {
-	char  name[32];
 	u32   fourcc;		/* v4l2 format id */
 	u32   pixelformat;
 	int   depth;
@@ -103,12 +62,10 @@
 
 static struct viu_fmt formats[] = {
 	{
-		.name		= "RGB-16 (5/B-6/G-5/R)",
 		.fourcc		= V4L2_PIX_FMT_RGB565,
 		.pixelformat	= V4L2_PIX_FMT_RGB565,
 		.depth		= 16,
 	}, {
-		.name		= "RGB-32 (A-R-G-B)",
 		.fourcc		= V4L2_PIX_FMT_RGB32,
 		.pixelformat	= V4L2_PIX_FMT_RGB32,
 		.depth		= 32,
@@ -156,6 +113,7 @@
 
 struct viu_dev {
 	struct v4l2_device	v4l2_dev;
+	struct v4l2_ctrl_handler hdl;
 	struct mutex		lock;
 	spinlock_t		slock;
 	int			users;
@@ -195,6 +153,8 @@
 };
 
 struct viu_fh {
+	/* must remain the first field of this struct */
+	struct v4l2_fh		fh;
 	struct viu_dev		*dev;
 
 	/* video capture */
@@ -604,6 +564,7 @@
 {
 	strcpy(cap->driver, "viu");
 	strcpy(cap->card, "viu");
+	strcpy(cap->bus_info, "platform:viu");
 	cap->device_caps =	V4L2_CAP_VIDEO_CAPTURE |
 				V4L2_CAP_STREAMING     |
 				V4L2_CAP_VIDEO_OVERLAY |
@@ -617,10 +578,9 @@
 {
 	int index = f->index;
 
-	if (f->index > NUM_FORMATS)
+	if (f->index >= NUM_FORMATS)
 		return -EINVAL;
 
-	strlcpy(f->description, formats[index].name, sizeof(f->description));
 	f->pixelformat = formats[index].fourcc;
 	return 0;
 }
@@ -637,6 +597,7 @@
 	f->fmt.pix.bytesperline =
 			(f->fmt.pix.width * fh->fmt->depth) >> 3;
 	f->fmt.pix.sizeimage	= fh->sizeimage;
+	f->fmt.pix.colorspace	= V4L2_COLORSPACE_SMPTE170M;
 	return 0;
 }
 
@@ -644,7 +605,6 @@
 					struct v4l2_format *f)
 {
 	struct viu_fmt *fmt;
-	enum v4l2_field field;
 	unsigned int maxw, maxh;
 
 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -654,19 +614,10 @@
 		return -EINVAL;
 	}
 
-	field = f->fmt.pix.field;
-
-	if (field == V4L2_FIELD_ANY) {
-		field = V4L2_FIELD_INTERLACED;
-	} else if (field != V4L2_FIELD_INTERLACED) {
-		dprintk(1, "Field type invalid.\n");
-		return -EINVAL;
-	}
-
 	maxw  = norm_maxw();
 	maxh  = norm_maxh();
 
-	f->fmt.pix.field = field;
+	f->fmt.pix.field = V4L2_FIELD_INTERLACED;
 	if (f->fmt.pix.height < 32)
 		f->fmt.pix.height = 32;
 	if (f->fmt.pix.height > maxh)
@@ -678,6 +629,8 @@
 	f->fmt.pix.width &= ~0x03;
 	f->fmt.pix.bytesperline =
 		(f->fmt.pix.width * fmt->depth) >> 3;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
 
 	return 0;
 }
@@ -698,7 +651,6 @@
 	fh->sizeimage     = f->fmt.pix.sizeimage;
 	fh->vb_vidq.field = f->fmt.pix.field;
 	fh->type          = f->type;
-	dprintk(1, "set to pixelformat '%4.6s'\n", (char *)&fh->fmt->name);
 	return 0;
 }
 
@@ -764,8 +716,8 @@
 {
 	int bpp;
 
-	dprintk(1, "%s %dx%d %s\n", __func__,
-		fh->win.w.width, fh->win.w.height, dev->ovfmt->name);
+	dprintk(1, "%s %dx%d\n", __func__,
+		fh->win.w.width, fh->win.w.height);
 
 	reg_val.status_cfg = 0;
 
@@ -1002,58 +954,13 @@
 {
 	struct viu_fh *fh = priv;
 
-	if (i > 1)
+	if (i)
 		return -EINVAL;
 
 	decoder_call(fh->dev, video, s_routing, i, 0, 0);
 	return 0;
 }
 
-/* Controls */
-static int vidioc_queryctrl(struct file *file, void *priv,
-				struct v4l2_queryctrl *qc)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
-		if (qc->id && qc->id == viu_qctrl[i].id) {
-			memcpy(qc, &(viu_qctrl[i]), sizeof(*qc));
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
-				struct v4l2_control *ctrl)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
-		if (ctrl->id == viu_qctrl[i].id) {
-			ctrl->value = qctl_regs[i];
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-static int vidioc_s_ctrl(struct file *file, void *priv,
-				struct v4l2_control *ctrl)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
-		if (ctrl->id == viu_qctrl[i].id) {
-			if (ctrl->value < viu_qctrl[i].minimum
-				|| ctrl->value > viu_qctrl[i].maximum)
-					return -ERANGE;
-			qctl_regs[i] = ctrl->value;
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-
 inline void viu_activate_next_buf(struct viu_dev *dev,
 				struct viu_dmaqueue *viuq)
 {
@@ -1265,7 +1172,6 @@
 	struct viu_reg *vr;
 	int minor = vdev->minor;
 	u32 status_cfg;
-	int i;
 
 	dprintk(1, "viu: open (minor=%d)\n", minor);
 
@@ -1293,6 +1199,7 @@
 		return -ENOMEM;
 	}
 
+	v4l2_fh_init(&fh->fh, vdev);
 	file->private_data = fh;
 	fh->dev = dev;
 
@@ -1303,10 +1210,6 @@
 	dev->crop_current.width  = fh->width;
 	dev->crop_current.height = fh->height;
 
-	/* Put all controls at a sane state */
-	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++)
-		qctl_regs[i] = viu_qctrl[i].default_value;
-
 	dprintk(1, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n",
 		(unsigned long)fh, (unsigned long)dev,
 		(unsigned long)&dev->vidq);
@@ -1332,6 +1235,7 @@
 				       fh->type, V4L2_FIELD_INTERLACED,
 				       sizeof(struct viu_buf), fh,
 				       &fh->dev->lock);
+	v4l2_fh_add(&fh->fh);
 	mutex_unlock(&dev->lock);
 	return 0;
 }
@@ -1364,13 +1268,17 @@
 	struct viu_fh *fh = file->private_data;
 	struct videobuf_queue *q = &fh->vb_vidq;
 	struct viu_dev *dev = fh->dev;
-	unsigned int res;
+	unsigned long req_events = poll_requested_events(wait);
+	unsigned int res = v4l2_ctrl_poll(file, wait);
 
 	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
 		return POLLERR;
 
+	if (!(req_events & (POLLIN | POLLRDNORM)))
+		return res;
+
 	mutex_lock(&dev->lock);
-	res = videobuf_poll_stream(file, q, wait);
+	res |= videobuf_poll_stream(file, q, wait);
 	mutex_unlock(&dev->lock);
 	return res;
 }
@@ -1385,6 +1293,8 @@
 	viu_stop_dma(dev);
 	videobuf_stop(&fh->vb_vidq);
 	videobuf_mmap_free(&fh->vb_vidq);
+	v4l2_fh_del(&fh->fh);
+	v4l2_fh_exit(&fh->fh);
 	mutex_unlock(&dev->lock);
 
 	kfree(fh);
@@ -1463,11 +1373,11 @@
 	.vidioc_enum_input    = vidioc_enum_input,
 	.vidioc_g_input       = vidioc_g_input,
 	.vidioc_s_input       = vidioc_s_input,
-	.vidioc_queryctrl     = vidioc_queryctrl,
-	.vidioc_g_ctrl        = vidioc_g_ctrl,
-	.vidioc_s_ctrl        = vidioc_s_ctrl,
 	.vidioc_streamon      = vidioc_streamon,
 	.vidioc_streamoff     = vidioc_streamoff,
+	.vidioc_log_status    = v4l2_ctrl_log_status,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
 };
 
 static struct video_device viu_template = {
@@ -1543,6 +1453,16 @@
 	}
 
 	ad = i2c_get_adapter(0);
+
+	v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
+	if (viu_dev->hdl.error) {
+		ret = viu_dev->hdl.error;
+		dev_err(&op->dev, "couldn't register control\n");
+		goto err_vdev;
+	}
+	/* This control handler will inherit the control(s) from the
+	   sub-device(s). */
+	viu_dev->v4l2_dev.ctrl_handler = &viu_dev->hdl;
 	viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
 			"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
 
@@ -1559,7 +1479,7 @@
 		goto err_vdev;
 	}
 
-	memcpy(vdev, &viu_template, sizeof(viu_template));
+	*vdev = viu_template;
 
 	vdev->v4l2_dev = &viu_dev->v4l2_dev;
 
@@ -1614,6 +1534,7 @@
 err_clk:
 	video_unregister_device(viu_dev->vdev);
 err_vdev:
+	v4l2_ctrl_handler_free(&viu_dev->hdl);
 	mutex_unlock(&viu_dev->lock);
 	i2c_put_adapter(ad);
 	v4l2_device_unregister(&viu_dev->v4l2_dev);
@@ -1635,6 +1556,7 @@
 
 	clk_disable_unprepare(dev->clk);
 
+	v4l2_ctrl_handler_free(&dev->hdl);
 	video_unregister_device(dev->vdev);
 	i2c_put_adapter(client->adapter);
 	v4l2_device_unregister(&dev->v4l2_dev);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 18d0a87..56e683b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -101,7 +101,6 @@
 			0x0000, /* csi2a, len 0x0170 */
 			0x0170, /* csiphy2, len 0x000c */
 		},
-		.syscon_offset = 0xdc,
 		.phy_type = ISP_PHY_TYPE_3430,
 	},
 	{
@@ -124,7 +123,6 @@
 			0x0570, /* csiphy1, len 0x000c */
 			0x05c0, /* csi2c, len 0x0040 (2nd area) */
 		},
-		.syscon_offset = 0x2f0,
 		.phy_type = ISP_PHY_TYPE_3630,
 	},
 };
@@ -829,14 +827,14 @@
 	int ret;
 
 	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
-	    !(link->flags & MEDIA_LNK_FL_ENABLED)) {
+	    !(flags & MEDIA_LNK_FL_ENABLED)) {
 		/* Powering off entities is assumed to never fail. */
 		isp_pipeline_pm_power(source, -sink_use);
 		isp_pipeline_pm_power(sink, -source_use);
 		return 0;
 	}
 
-	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+	if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
 		(flags & MEDIA_LNK_FL_ENABLED)) {
 
 		ret = isp_pipeline_pm_power(source, sink_use);
@@ -1796,47 +1794,6 @@
 	media_device_unregister(&isp->media_dev);
 }
 
-/*
- * isp_register_subdev - Register a sub-device
- * @isp: OMAP3 ISP device
- * @isp_subdev: platform data related to a sub-device
- *
- * Register an I2C sub-device which has not been registered by other
- * means (such as the Device Tree).
- *
- * Return a pointer to the sub-device if it has been successfully
- * registered, or NULL otherwise.
- */
-static struct v4l2_subdev *
-isp_register_subdev(struct isp_device *isp,
-		    struct isp_platform_subdev *isp_subdev)
-{
-	struct i2c_adapter *adapter;
-	struct v4l2_subdev *sd;
-
-	if (isp_subdev->board_info == NULL)
-		return NULL;
-
-	adapter = i2c_get_adapter(isp_subdev->i2c_adapter_id);
-	if (adapter == NULL) {
-		dev_err(isp->dev,
-			"%s: Unable to get I2C adapter %d for device %s\n",
-			__func__, isp_subdev->i2c_adapter_id,
-			isp_subdev->board_info->type);
-		return NULL;
-	}
-
-	sd = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
-				       isp_subdev->board_info, NULL);
-	if (sd == NULL) {
-		dev_err(isp->dev, "%s: Unable to register subdev %s\n",
-			__func__, isp_subdev->board_info->type);
-		return NULL;
-	}
-
-	return sd;
-}
-
 static int isp_link_entity(
 	struct isp_device *isp, struct media_entity *entity,
 	enum isp_interface_type interface)
@@ -1910,8 +1867,6 @@
 
 static int isp_register_entities(struct isp_device *isp)
 {
-	struct isp_platform_data *pdata = isp->pdata;
-	struct isp_platform_subdev *isp_subdev;
 	int ret;
 
 	isp->media_dev.dev = isp->dev;
@@ -1968,42 +1923,9 @@
 	if (ret < 0)
 		goto done;
 
-	/*
-	 * Device Tree --- the external sub-devices will be registered
-	 * later. The same goes for the sub-device node registration.
-	 */
-	if (isp->dev->of_node)
-		return 0;
-
-	/* Register external entities */
-	for (isp_subdev = pdata ? pdata->subdevs : NULL;
-	     isp_subdev && isp_subdev->board_info; isp_subdev++) {
-		struct v4l2_subdev *sd;
-
-		sd = isp_register_subdev(isp, isp_subdev);
-
-		/*
-		 * No bus information --- this is either a flash or a
-		 * lens subdev.
-		 */
-		if (!sd || !isp_subdev->bus)
-			continue;
-
-		sd->host_priv = isp_subdev->bus;
-
-		ret = isp_link_entity(isp, &sd->entity,
-				      isp_subdev->bus->interface);
-		if (ret < 0)
-			goto done;
-	}
-
-	ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
-
 done:
-	if (ret < 0) {
+	if (ret < 0)
 		isp_unregister_entities(isp);
-		v4l2_async_notifier_unregister(&isp->notifier);
-	}
 
 	return ret;
 }
@@ -2404,37 +2326,24 @@
 		return -ENOMEM;
 	}
 
-	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
-		ret = of_property_read_u32(pdev->dev.of_node, "ti,phy-type",
-					   &isp->phy_type);
-		if (ret)
-			return ret;
+	ret = of_property_read_u32(pdev->dev.of_node, "ti,phy-type",
+				   &isp->phy_type);
+	if (ret)
+		return ret;
 
-		isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-							      "syscon");
-		if (IS_ERR(isp->syscon))
-			return PTR_ERR(isp->syscon);
+	isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						      "syscon");
+	if (IS_ERR(isp->syscon))
+		return PTR_ERR(isp->syscon);
 
-		ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1,
-						 &isp->syscon_offset);
-		if (ret)
-			return ret;
+	ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1,
+					 &isp->syscon_offset);
+	if (ret)
+		return ret;
 
-		ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
-		if (ret < 0)
-			return ret;
-		ret = v4l2_async_notifier_register(&isp->v4l2_dev,
-						   &isp->notifier);
-		if (ret)
-			return ret;
-	} else {
-		isp->pdata = pdev->dev.platform_data;
-		isp->syscon = syscon_regmap_lookup_by_pdevname("syscon.0");
-		if (IS_ERR(isp->syscon))
-			return PTR_ERR(isp->syscon);
-		dev_warn(&pdev->dev,
-			 "Platform data support is deprecated! Please move to DT now!\n");
-	}
+	ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
+	if (ret < 0)
+		return ret;
 
 	isp->autoidle = autoidle;
 
@@ -2513,11 +2422,6 @@
 		goto error_isp;
 	}
 
-	if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) {
-		isp->syscon_offset = isp_res_maps[m].syscon_offset;
-		isp->phy_type = isp_res_maps[m].phy_type;
-	}
-
 	for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++)
 		isp->mmio_base[i] =
 			isp->mmio_base[0] + isp_res_maps[m].offset[i];
@@ -2557,18 +2461,24 @@
 	if (ret < 0)
 		goto error_iommu;
 
-	isp->notifier.bound = isp_subdev_notifier_bound;
-	isp->notifier.complete = isp_subdev_notifier_complete;
-
 	ret = isp_register_entities(isp);
 	if (ret < 0)
 		goto error_modules;
 
+	isp->notifier.bound = isp_subdev_notifier_bound;
+	isp->notifier.complete = isp_subdev_notifier_complete;
+
+	ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
+	if (ret)
+		goto error_register_entities;
+
 	isp_core_init(isp, 1);
 	omap3isp_put(isp);
 
 	return 0;
 
+error_register_entities:
+	isp_unregister_entities(isp);
 error_modules:
 	isp_cleanup_modules(isp);
 error_iommu:
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index e579943..5acc2e6 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -17,7 +17,6 @@
 #ifndef OMAP3_ISP_CORE_H
 #define OMAP3_ISP_CORE_H
 
-#include <media/omap3isp.h>
 #include <media/v4l2-async.h>
 #include <media/v4l2-device.h>
 #include <linux/clk-provider.h>
@@ -27,6 +26,7 @@
 #include <linux/platform_device.h>
 #include <linux/wait.h>
 
+#include "omap3isp.h"
 #include "ispstat.h"
 #include "ispccdc.h"
 #include "ispreg.h"
@@ -101,15 +101,11 @@
  * struct isp_res_mapping - Map ISP io resources to ISP revision.
  * @isp_rev: ISP_REVISION_x_x
  * @offset: register offsets of various ISP sub-blocks
- * @syscon_offset: offset of the syscon register for 343x / 3630
- *	    (CONTROL_CSIRXFE / CONTROL_CAMERA_PHY_CTRL, respectively)
- *	    from the syscon base address
  * @phy_type: ISP_PHY_TYPE_{3430,3630}
  */
 struct isp_res_mapping {
 	u32 isp_rev;
 	u32 offset[OMAP3_ISP_IOMEM_LAST];
-	u32 syscon_offset;
 	u32 phy_type;
 };
 
@@ -184,7 +180,6 @@
 	u32 revision;
 
 	/* platform HW resources */
-	struct isp_platform_data *pdata;
 	unsigned int irq_num;
 
 	void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
index e17c88b..28b63b2 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -17,7 +17,7 @@
 #ifndef OMAP3_ISP_CSI_PHY_H
 #define OMAP3_ISP_CSI_PHY_H
 
-#include <media/omap3isp.h>
+#include "omap3isp.h"
 
 struct isp_csi2_device;
 struct regulator;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index d285af1..41bb8df 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1018,8 +1018,7 @@
 
 	pipe->entities = 0;
 
-	if (video->isp->pdata && video->isp->pdata->set_constraints)
-		video->isp->pdata->set_constraints(video->isp, true);
+	/* TODO: Implement PM QoS */
 	pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
 	pipe->max_rate = pipe->l3_ick;
 
@@ -1100,8 +1099,7 @@
 err_check_format:
 	media_entity_pipeline_stop(&video->video.entity);
 err_pipeline_start:
-	if (video->isp->pdata && video->isp->pdata->set_constraints)
-		video->isp->pdata->set_constraints(video->isp, false);
+	/* TODO: Implement PM QoS */
 	/* The DMA queue must be emptied here, otherwise CCDC interrupts that
 	 * will get triggered the next time the CCDC is powered up will try to
 	 * access buffers that might have been freed but still present in the
@@ -1161,8 +1159,7 @@
 	video->queue = NULL;
 	video->error = false;
 
-	if (video->isp->pdata && video->isp->pdata->set_constraints)
-		video->isp->pdata->set_constraints(video->isp, false);
+	/* TODO: Implement PM QoS */
 	media_entity_pipeline_stop(&video->video.entity);
 
 done:
diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h
new file mode 100644
index 0000000..190e259
--- /dev/null
+++ b/drivers/media/platform/omap3isp/omap3isp.h
@@ -0,0 +1,132 @@
+/*
+ * omap3isp.h
+ *
+ * TI OMAP3 ISP - Bus Configuration
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __OMAP3ISP_H__
+#define __OMAP3ISP_H__
+
+enum isp_interface_type {
+	ISP_INTERFACE_PARALLEL,
+	ISP_INTERFACE_CSI2A_PHY2,
+	ISP_INTERFACE_CCP2B_PHY1,
+	ISP_INTERFACE_CCP2B_PHY2,
+	ISP_INTERFACE_CSI2C_PHY1,
+};
+
+/**
+ * struct isp_parallel_cfg - Parallel interface configuration
+ * @data_lane_shift: Data lane shifter
+ *		0 - CAMEXT[13:0] -> CAM[13:0]
+ *		1 - CAMEXT[13:2] -> CAM[11:0]
+ *		2 - CAMEXT[13:4] -> CAM[9:0]
+ *		3 - CAMEXT[13:6] -> CAM[7:0]
+ * @clk_pol: Pixel clock polarity
+ *		0 - Sample on rising edge, 1 - Sample on falling edge
+ * @hs_pol: Horizontal synchronization polarity
+ *		0 - Active high, 1 - Active low
+ * @vs_pol: Vertical synchronization polarity
+ *		0 - Active high, 1 - Active low
+ * @fld_pol: Field signal polarity
+ *		0 - Positive, 1 - Negative
+ * @data_pol: Data polarity
+ *		0 - Normal, 1 - One's complement
+ */
+struct isp_parallel_cfg {
+	unsigned int data_lane_shift:2;
+	unsigned int clk_pol:1;
+	unsigned int hs_pol:1;
+	unsigned int vs_pol:1;
+	unsigned int fld_pol:1;
+	unsigned int data_pol:1;
+};
+
+enum {
+	ISP_CCP2_PHY_DATA_CLOCK = 0,
+	ISP_CCP2_PHY_DATA_STROBE = 1,
+};
+
+enum {
+	ISP_CCP2_MODE_MIPI = 0,
+	ISP_CCP2_MODE_CCP2 = 1,
+};
+
+/**
+ * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
+ * @pos: position of the lane
+ * @pol: polarity of the lane
+ */
+struct isp_csiphy_lane {
+	u8 pos;
+	u8 pol;
+};
+
+#define ISP_CSIPHY1_NUM_DATA_LANES	1
+#define ISP_CSIPHY2_NUM_DATA_LANES	2
+
+/**
+ * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
+ * @data: Configuration of one or two data lanes
+ * @clk: Clock lane configuration
+ */
+struct isp_csiphy_lanes_cfg {
+	struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
+	struct isp_csiphy_lane clk;
+};
+
+/**
+ * struct isp_ccp2_cfg - CCP2 interface configuration
+ * @strobe_clk_pol: Strobe/clock polarity
+ *		0 - Non Inverted, 1 - Inverted
+ * @crc: Enable the cyclic redundancy check
+ * @ccp2_mode: Enable CCP2 compatibility mode
+ *		ISP_CCP2_MODE_MIPI - MIPI-CSI1 mode
+ *		ISP_CCP2_MODE_CCP2 - CCP2 mode
+ * @phy_layer: Physical layer selection
+ *		ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
+ *		ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
+ * @vpclk_div: Video port output clock control
+ */
+struct isp_ccp2_cfg {
+	unsigned int strobe_clk_pol:1;
+	unsigned int crc:1;
+	unsigned int ccp2_mode:1;
+	unsigned int phy_layer:1;
+	unsigned int vpclk_div:2;
+	struct isp_csiphy_lanes_cfg lanecfg;
+};
+
+/**
+ * struct isp_csi2_cfg - CSI2 interface configuration
+ * @crc: Enable the cyclic redundancy check
+ */
+struct isp_csi2_cfg {
+	unsigned crc:1;
+	struct isp_csiphy_lanes_cfg lanecfg;
+};
+
+struct isp_bus_cfg {
+	enum isp_interface_type interface;
+	union {
+		struct isp_parallel_cfg parallel;
+		struct isp_ccp2_cfg ccp2;
+		struct isp_csi2_cfg csi2;
+	} bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
+};
+
+#endif	/* __OMAP3ISP_H__ */
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
new file mode 100644
index 0000000..2973f07
--- /dev/null
+++ b/drivers/media/platform/rcar_jpu.c
@@ -0,0 +1,1794 @@
+/*
+ * Author: Mikhail Ulyanov
+ * Copyright (C) 2014-2015 Cogent Embedded, Inc.  <source@cogentembedded.com>
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ *
+ * This is based on the drivers/media/platform/s5p-jpeg driver by
+ * Andrzej Pietrasiewicz and Jacek Anaszewski.
+ * Some portions of code inspired by VSP1 driver by Laurent Pinchart.
+ *
+ * TODO in order of priority:
+ *      1) Rotation
+ *      2) Cropping
+ *      3) V4L2_CID_JPEG_ACTIVE_MARKER
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+
+#define DRV_NAME "rcar_jpu"
+
+/*
+ * Align JPEG header end to cache line to make sure we will not have any issues
+ * with cache; additionally to requerment (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_SIZE		(ALIGN(0x258, L1_CACHE_BYTES))
+#define JPU_JPEG_MAX_BYTES_PER_PIXEL	2	/* 16 bit precision format */
+#define JPU_JPEG_MIN_SIZE		25	/* SOI + SOF + EOI */
+#define JPU_JPEG_QTBL_SIZE		0x40
+#define JPU_JPEG_HDCTBL_SIZE		0x1c
+#define JPU_JPEG_HACTBL_SIZE		0xb2
+#define JPU_JPEG_HEIGHT_OFFSET		0x91
+#define JPU_JPEG_WIDTH_OFFSET		0x93
+#define JPU_JPEG_SUBS_OFFSET		0x97
+#define JPU_JPEG_QTBL_LUM_OFFSET	0x07
+#define JPU_JPEG_QTBL_CHR_OFFSET	0x4c
+#define JPU_JPEG_HDCTBL_LUM_OFFSET	0xa4
+#define JPU_JPEG_HACTBL_LUM_OFFSET	0xc5
+#define JPU_JPEG_HDCTBL_CHR_OFFSET	0x17c
+#define JPU_JPEG_HACTBL_CHR_OFFSET	0x19d
+#define JPU_JPEG_PADDING_OFFSET		0x24f
+#define JPU_JPEG_LUM 0x00
+#define JPU_JPEG_CHR 0x01
+#define JPU_JPEG_DC  0x00
+#define JPU_JPEG_AC  0x10
+
+#define JPU_JPEG_422 0x21
+#define JPU_JPEG_420 0x22
+
+#define JPU_JPEG_DEFAULT_422_PIX_FMT V4L2_PIX_FMT_NV16M
+#define JPU_JPEG_DEFAULT_420_PIX_FMT V4L2_PIX_FMT_NV12M
+
+/* JPEG markers */
+#define TEM	0x01
+#define SOF0	0xc0
+#define RST	0xd0
+#define SOI	0xd8
+#define EOI	0xd9
+#define DHP	0xde
+#define DHT	0xc4
+#define COM	0xfe
+#define DQT	0xdb
+#define DRI	0xdd
+#define APP0	0xe0
+
+#define JPU_RESET_TIMEOUT	100 /* ms */
+#define JPU_JOB_TIMEOUT		300 /* ms */
+#define JPU_MAX_QUALITY		4
+#define JPU_WIDTH_MIN		16
+#define JPU_HEIGHT_MIN		16
+#define JPU_WIDTH_MAX		4096
+#define JPU_HEIGHT_MAX		4096
+#define JPU_MEMALIGN		8
+
+/* Flags that indicate a format can be used for capture/output */
+#define JPU_FMT_TYPE_OUTPUT	0
+#define JPU_FMT_TYPE_CAPTURE	1
+#define JPU_ENC_CAPTURE		(1 << 0)
+#define JPU_ENC_OUTPUT		(1 << 1)
+#define JPU_DEC_CAPTURE		(1 << 2)
+#define JPU_DEC_OUTPUT		(1 << 3)
+
+/*
+ * JPEG registers and bits
+ */
+
+/* JPEG code mode register */
+#define JCMOD	0x00
+#define JCMOD_PCTR		(1 << 7)
+#define JCMOD_MSKIP_ENABLE	(1 << 5)
+#define JCMOD_DSP_ENC		(0 << 3)
+#define JCMOD_DSP_DEC		(1 << 3)
+#define JCMOD_REDU		(7 << 0)
+#define JCMOD_REDU_422		(1 << 0)
+#define JCMOD_REDU_420		(2 << 0)
+
+/* JPEG code command register */
+#define JCCMD	0x04
+#define JCCMD_SRST	(1 << 12)
+#define JCCMD_JEND	(1 << 2)
+#define JCCMD_JSRT	(1 << 0)
+
+/* JPEG code quantanization table number register */
+#define JCQTN	0x0c
+#define JCQTN_SHIFT(t)		(((t) - 1) << 1)
+
+/* JPEG code Huffman table number register */
+#define JCHTN	0x10
+#define JCHTN_AC_SHIFT(t)	(((t) << 1) - 1)
+#define JCHTN_DC_SHIFT(t)	(((t) - 1) << 1)
+
+#define JCVSZU	0x1c /* JPEG code vertical size upper register */
+#define JCVSZD	0x20 /* JPEG code vertical size lower register */
+#define JCHSZU	0x24 /* JPEG code horizontal size upper register */
+#define JCHSZD	0x28 /* JPEG code horizontal size lower register */
+#define JCSZ_MASK 0xff /* JPEG code h/v size register contains only 1 byte*/
+
+#define JCDTCU	0x2c /* JPEG code data count upper register */
+#define JCDTCM	0x30 /* JPEG code data count middle register */
+#define JCDTCD	0x34 /* JPEG code data count lower register */
+
+/* JPEG interrupt enable register */
+#define JINTE	0x38
+#define JINTE_ERR		(7 << 5) /* INT5 + INT6 + INT7 */
+#define JINTE_TRANSF_COMPL	(1 << 10)
+
+/* JPEG interrupt status register */
+#define JINTS	0x3c
+#define JINTS_MASK	0x7c68
+#define JINTS_ERR		(1 << 5)
+#define JINTS_PROCESS_COMPL	(1 << 6)
+#define JINTS_TRANSF_COMPL	(1 << 10)
+
+#define JCDERR	0x40 /* JPEG code decode error register */
+#define JCDERR_MASK	0xf /* JPEG code decode error register mask*/
+
+/* JPEG interface encoding */
+#define JIFECNT	0x70
+#define JIFECNT_INFT_422	0
+#define JIFECNT_INFT_420	1
+#define JIFECNT_SWAP_WB		(3 << 4) /* to JPU */
+
+#define JIFESYA1	0x74	/* encode source Y address register 1 */
+#define JIFESCA1	0x78	/* encode source C address register 1 */
+#define JIFESYA2	0x7c	/* encode source Y address register 2 */
+#define JIFESCA2	0x80	/* encode source C address register 2 */
+#define JIFESMW		0x84	/* encode source memory width register */
+#define JIFESVSZ	0x88	/* encode source vertical size register */
+#define JIFESHSZ	0x8c	/* encode source horizontal size register */
+#define JIFEDA1		0x90	/* encode destination address register 1 */
+#define JIFEDA2		0x94	/* encode destination address register 2 */
+
+/* JPEG decoding control register */
+#define JIFDCNT	0xa0
+#define JIFDCNT_SWAP_WB		(3 << 1) /* from JPU */
+
+#define JIFDSA1		0xa4	/* decode source address register 1 */
+#define JIFDDMW		0xb0	/* decode destination  memory width register */
+#define JIFDDVSZ	0xb4	/* decode destination  vert. size register */
+#define JIFDDHSZ	0xb8	/* decode destination  horiz. size register */
+#define JIFDDYA1	0xbc	/* decode destination  Y address register 1 */
+#define JIFDDCA1	0xc0	/* decode destination  C address register 1 */
+
+#define JCQTBL(n)	(0x10000 + (n) * 0x40)	/* quantization tables regs */
+#define JCHTBD(n)	(0x10100 + (n) * 0x100)	/* Huffman table DC regs */
+#define JCHTBA(n)	(0x10120 + (n) * 0x100)	/* Huffman table AC regs */
+
+/**
+ * struct jpu - JPEG IP abstraction
+ * @mutex: the mutex protecting this structure
+ * @lock: spinlock protecting the device contexts
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @vfd_encoder: video device node for encoder mem2mem mode
+ * @vfd_decoder: video device node for decoder mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @curr: pointer to current context
+ * @irq_queue:	interrupt handler waitqueue
+ * @regs: JPEG IP registers mapping
+ * @irq: JPEG IP irq
+ * @clk: JPEG IP clock
+ * @dev: JPEG IP struct device
+ * @alloc_ctx: videobuf2 memory allocator's context
+ * @ref_count: reference counter
+ */
+struct jpu {
+	struct mutex	mutex;
+	spinlock_t	lock;
+	struct v4l2_device	v4l2_dev;
+	struct video_device	vfd_encoder;
+	struct video_device	vfd_decoder;
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct jpu_ctx		*curr;
+	wait_queue_head_t	irq_queue;
+
+	void __iomem		*regs;
+	unsigned int		irq;
+	struct clk		*clk;
+	struct device		*dev;
+	void			*alloc_ctx;
+	int			ref_count;
+};
+
+/**
+ * struct jpu_buffer - driver's specific video buffer
+ * @buf: m2m buffer
+ * @compr_quality: destination image quality in compression mode
+ * @subsampling: source image subsampling in decompression mode
+ */
+struct jpu_buffer {
+	struct v4l2_m2m_buffer buf;
+	unsigned short	compr_quality;
+	unsigned char	subsampling;
+};
+
+/**
+ * struct jpu_fmt - driver's internal format data
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @colorspace: the colorspace specifier
+ * @bpp: number of bits per pixel per plane
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @subsampling: (horizontal:4 | vertical:4) subsampling factor
+ * @num_planes: number of planes
+ * @types: types of queue this format is applicable to
+ */
+struct jpu_fmt {
+	u32 fourcc;
+	u32 colorspace;
+	u8 bpp[2];
+	u8 h_align;
+	u8 v_align;
+	u8 subsampling;
+	u8 num_planes;
+	u16 types;
+};
+
+/**
+ * jpu_q_data - parameters of one queue
+ * @fmtinfo: driver-specific format of this queue
+ * @format: multiplanar format of this queue
+ * @sequence: sequence number
+ */
+struct jpu_q_data {
+	struct jpu_fmt *fmtinfo;
+	struct v4l2_pix_format_mplane format;
+	unsigned int sequence;
+};
+
+/**
+ * jpu_ctx - the device context data
+ * @jpu: JPEG IP device for this context
+ * @encoder: compression (encode) operation or decompression (decode)
+ * @compr_quality: destination image quality in compression (encode) mode
+ * @out_q: source (output) queue information
+ * @cap_q: destination (capture) queue information
+ * @fh: file handler
+ * @ctrl_handler: controls handler
+ */
+struct jpu_ctx {
+	struct jpu		*jpu;
+	bool			encoder;
+	unsigned short		compr_quality;
+	struct jpu_q_data	out_q;
+	struct jpu_q_data	cap_q;
+	struct v4l2_fh		fh;
+	struct v4l2_ctrl_handler ctrl_handler;
+};
+
+ /**
+ * jpeg_buffer - description of memory containing input JPEG data
+ * @end: end position in the buffer
+ * @curr: current position in the buffer
+ */
+struct jpeg_buffer {
+	void *end;
+	void *curr;
+};
+
+static struct jpu_fmt jpu_formats[] = {
+	{ V4L2_PIX_FMT_JPEG, V4L2_COLORSPACE_JPEG,
+	  {0, 0}, 0, 0, 0, 1, JPU_ENC_CAPTURE | JPU_DEC_OUTPUT },
+	{ V4L2_PIX_FMT_NV16M, V4L2_COLORSPACE_SRGB,
+	  {8, 8}, 2, 2, JPU_JPEG_422, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+	{ V4L2_PIX_FMT_NV12M, V4L2_COLORSPACE_SRGB,
+	  {8, 4}, 2, 2, JPU_JPEG_420, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+	{ V4L2_PIX_FMT_NV16, V4L2_COLORSPACE_SRGB,
+	  {16, 0}, 2, 2, JPU_JPEG_422, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+	{ V4L2_PIX_FMT_NV12, V4L2_COLORSPACE_SRGB,
+	  {12, 0}, 2, 2, JPU_JPEG_420, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+};
+
+static const u8 zigzag[] = {
+	0x03, 0x02, 0x0b, 0x13, 0x0a, 0x01, 0x00, 0x09,
+	0x12, 0x1b, 0x23, 0x1a, 0x11, 0x08, 0x07, 0x06,
+	0x0f, 0x10, 0x19, 0x22, 0x2b, 0x33, 0x2a, 0x21,
+	0x18, 0x17, 0x0e, 0x05, 0x04, 0x0d, 0x16, 0x1f,
+	0x20, 0x29, 0x32, 0x3b, 0x3a, 0x31, 0x28, 0x27,
+	0x1e, 0x15, 0x0e, 0x14, 0x10, 0x26, 0x2f, 0x30,
+	0x39, 0x38, 0x37, 0x2e, 0x25, 0x1c, 0x24, 0x2b,
+	0x36, 0x3f, 0x3e, 0x35, 0x2c, 0x34, 0x3d, 0x3c
+};
+
+#define QTBL_SIZE (ALIGN(JPU_JPEG_QTBL_SIZE, \
+			  sizeof(unsigned int)) / sizeof(unsigned int))
+#define HDCTBL_SIZE (ALIGN(JPU_JPEG_HDCTBL_SIZE, \
+			  sizeof(unsigned int)) / sizeof(unsigned int))
+#define HACTBL_SIZE (ALIGN(JPU_JPEG_HACTBL_SIZE, \
+			  sizeof(unsigned int)) / sizeof(unsigned int))
+/*
+ * Start of image; Quantization tables
+ * SOF0 (17 bytes payload) is Baseline DCT - Sample precision, height, width,
+ * Number of image components, (Ci:8 - Hi:4 - Vi:4 - Tq:8) * 3 - Y,Cb,Cr;
+ * Huffman tables; Padding with 0xff (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_BLOB {                                                    \
+	0xff, SOI, 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM,    \
+	[JPU_JPEG_QTBL_LUM_OFFSET ...                                          \
+		JPU_JPEG_QTBL_LUM_OFFSET + JPU_JPEG_QTBL_SIZE - 1] = 0x00,     \
+	0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR,               \
+	[JPU_JPEG_QTBL_CHR_OFFSET ... JPU_JPEG_QTBL_CHR_OFFSET +               \
+		JPU_JPEG_QTBL_SIZE - 1] = 0x00, 0xff, SOF0, 0x00, 0x11, 0x08,  \
+	[JPU_JPEG_HEIGHT_OFFSET ... JPU_JPEG_HEIGHT_OFFSET + 1] = 0x00,        \
+	[JPU_JPEG_WIDTH_OFFSET ... JPU_JPEG_WIDTH_OFFSET + 1] = 0x00,          \
+	0x03, 0x01, [JPU_JPEG_SUBS_OFFSET] = 0x00, JPU_JPEG_LUM,               \
+	0x02, 0x11, JPU_JPEG_CHR, 0x03, 0x11, JPU_JPEG_CHR,                    \
+	0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_DC, \
+	[JPU_JPEG_HDCTBL_LUM_OFFSET ...                                        \
+		JPU_JPEG_HDCTBL_LUM_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+	0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_AC, \
+	[JPU_JPEG_HACTBL_LUM_OFFSET ...                                        \
+		JPU_JPEG_HACTBL_LUM_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+	0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_DC, \
+	[JPU_JPEG_HDCTBL_CHR_OFFSET ...                                        \
+		JPU_JPEG_HDCTBL_CHR_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+	0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_AC, \
+	[JPU_JPEG_HACTBL_CHR_OFFSET ...                                        \
+		JPU_JPEG_HACTBL_CHR_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+	[JPU_JPEG_PADDING_OFFSET ... JPU_JPEG_HDR_SIZE - 1] = 0xff             \
+}
+
+static unsigned char jpeg_hdrs[JPU_MAX_QUALITY][JPU_JPEG_HDR_SIZE] = {
+	[0 ... JPU_MAX_QUALITY - 1] = JPU_JPEG_HDR_BLOB
+};
+
+static const unsigned int qtbl_lum[JPU_MAX_QUALITY][QTBL_SIZE] = {
+	{
+		0x14101927, 0x322e3e44, 0x10121726, 0x26354144,
+		0x19171f26, 0x35414444, 0x27262635, 0x41444444,
+		0x32263541, 0x44444444, 0x2e354144, 0x44444444,
+		0x3e414444, 0x44444444, 0x44444444, 0x44444444
+	},
+	{
+		0x100b0b10, 0x171b1f1e, 0x0b0c0c0f, 0x1417171e,
+		0x0b0c0d10, 0x171a232f, 0x100f1017, 0x1a252f40,
+		0x1714171a, 0x27334040, 0x1b171a25, 0x33404040,
+		0x1f17232f, 0x40404040, 0x1e1e2f40, 0x40404040
+	},
+	{
+		0x0c08080c, 0x11151817, 0x0809090b, 0x0f131217,
+		0x08090a0c, 0x13141b24, 0x0c0b0c15, 0x141c2435,
+		0x110f1314, 0x1e27333b, 0x1513141c, 0x27333b3b,
+		0x18121b24, 0x333b3b3b, 0x17172435, 0x3b3b3b3b
+	},
+	{
+		0x08060608, 0x0c0e1011, 0x06060608, 0x0a0d0c0f,
+		0x06060708, 0x0d0e1218, 0x0808080e, 0x0d131823,
+		0x0c0a0d0d, 0x141a2227, 0x0e0d0e13, 0x1a222727,
+		0x100c1318, 0x22272727, 0x110f1823, 0x27272727
+	}
+};
+
+static const unsigned int qtbl_chr[JPU_MAX_QUALITY][QTBL_SIZE] = {
+	{
+		0x15192026, 0x36444444, 0x191c1826, 0x36444444,
+		0x2018202b, 0x42444444, 0x26262b35, 0x44444444,
+		0x36424444, 0x44444444, 0x44444444, 0x44444444,
+		0x44444444, 0x44444444, 0x44444444, 0x44444444
+	},
+	{
+		0x110f1115, 0x141a2630, 0x0f131211, 0x141a232b,
+		0x11121416, 0x1a1e2e35, 0x1511161c, 0x1e273540,
+		0x14141a1e, 0x27304040, 0x1a1a1e27, 0x303f4040,
+		0x26232e35, 0x40404040, 0x302b3540, 0x40404040
+	},
+	{
+		0x0d0b0d10, 0x14141d25, 0x0b0e0e0e, 0x10141a20,
+		0x0d0e0f11, 0x14172328, 0x100e1115, 0x171e2832,
+		0x14101417, 0x1e25323b, 0x1414171e, 0x25303b3b,
+		0x1d1a2328, 0x323b3b3b, 0x25202832, 0x3b3b3b3b
+	},
+	{
+		0x0908090b, 0x0e111318, 0x080a090b, 0x0e0d1116,
+		0x09090d0e, 0x0d0f171a, 0x0b0b0e0e, 0x0f141a21,
+		0x0e0e0d0f, 0x14182127, 0x110d0f14, 0x18202727,
+		0x1311171a, 0x21272727, 0x18161a21, 0x27272727
+	}
+};
+
+static const unsigned int hdctbl_lum[HDCTBL_SIZE] = {
+	0x00010501, 0x01010101, 0x01000000, 0x00000000,
+	0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hdctbl_chr[HDCTBL_SIZE] = {
+	0x00010501, 0x01010101, 0x01000000, 0x00000000,
+	0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hactbl_lum[HACTBL_SIZE] = {
+	0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+	0x21314106, 0x13516107,	0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+	0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+	0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+	0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+	0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+	0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+	0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const unsigned int hactbl_chr[HACTBL_SIZE] = {
+	0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+	0x21314106, 0x13516107,	0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+	0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+	0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+	0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+	0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+	0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+	0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const char *error_to_text[16] = {
+	"Normal",
+	"SOI not detected",
+	"SOF1 to SOFF detected",
+	"Subsampling not detected",
+	"SOF accuracy error",
+	"DQT accuracy error",
+	"Component error 1",
+	"Component error 2",
+	"SOF0, DQT, and DHT not detected when SOS detected",
+	"SOS not detected",
+	"EOI not detected",
+	"Restart interval data number error detected",
+	"Image size error",
+	"Last MCU data number error",
+	"Block data number error",
+	"Unknown"
+};
+
+static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_buffer *vb)
+{
+	struct v4l2_m2m_buffer *b =
+		container_of(vb, struct v4l2_m2m_buffer, vb);
+
+	return container_of(b, struct jpu_buffer, buf);
+}
+
+static u32 jpu_read(struct jpu *jpu, unsigned int reg)
+{
+	return ioread32(jpu->regs + reg);
+}
+
+static void jpu_write(struct jpu *jpu, u32 val, unsigned int reg)
+{
+	iowrite32(val, jpu->regs + reg);
+}
+
+static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
+{
+	return container_of(c->handler, struct jpu_ctx, ctrl_handler);
+}
+
+static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+	return container_of(fh, struct jpu_ctx, fh);
+}
+
+static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl,
+			unsigned int len) {
+	unsigned int i;
+
+	for (i = 0; i < len; i++)
+		jpu_write(jpu, tbl[i], reg + (i << 2));
+}
+
+static void jpu_set_qtbl(struct jpu *jpu, unsigned short quality)
+{
+	jpu_set_tbl(jpu, JCQTBL(0), qtbl_lum[quality], QTBL_SIZE);
+	jpu_set_tbl(jpu, JCQTBL(1), qtbl_chr[quality], QTBL_SIZE);
+}
+
+static void jpu_set_htbl(struct jpu *jpu)
+{
+	jpu_set_tbl(jpu, JCHTBD(0), hdctbl_lum, HDCTBL_SIZE);
+	jpu_set_tbl(jpu, JCHTBA(0), hactbl_lum, HACTBL_SIZE);
+	jpu_set_tbl(jpu, JCHTBD(1), hdctbl_chr, HDCTBL_SIZE);
+	jpu_set_tbl(jpu, JCHTBA(1), hactbl_chr, HACTBL_SIZE);
+}
+
+static int jpu_wait_reset(struct jpu *jpu)
+{
+	unsigned long timeout;
+
+	timeout = jiffies + msecs_to_jiffies(JPU_RESET_TIMEOUT);
+
+	while (jpu_read(jpu, JCCMD) & JCCMD_SRST) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(jpu->dev, "timed out in reset\n");
+			return -ETIMEDOUT;
+		}
+		schedule();
+	}
+
+	return 0;
+}
+
+static int jpu_reset(struct jpu *jpu)
+{
+	jpu_write(jpu, JCCMD_SRST, JCCMD);
+	return jpu_wait_reset(jpu);
+}
+
+/*
+ * ============================================================================
+ * video ioctl operations
+ * ============================================================================
+ */
+static void put_qtbl(u8 *p, const u8 *qtbl)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(zigzag); i++)
+		p[i] = *(qtbl + zigzag[i]);
+}
+
+static void put_htbl(u8 *p, const u8 *htbl, unsigned int len)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < len; i += 4)
+		for (j = 0; j < 4 && (i + j) < len; ++j)
+			p[i + j] = htbl[i + 3 - j];
+}
+
+static void jpu_generate_hdr(unsigned short quality, unsigned char *p)
+{
+	put_qtbl(p + JPU_JPEG_QTBL_LUM_OFFSET, (const u8 *)qtbl_lum[quality]);
+	put_qtbl(p + JPU_JPEG_QTBL_CHR_OFFSET, (const u8 *)qtbl_chr[quality]);
+
+	put_htbl(p + JPU_JPEG_HDCTBL_LUM_OFFSET, (const u8 *)hdctbl_lum,
+		 JPU_JPEG_HDCTBL_SIZE);
+	put_htbl(p + JPU_JPEG_HACTBL_LUM_OFFSET, (const u8 *)hactbl_lum,
+		 JPU_JPEG_HACTBL_SIZE);
+
+	put_htbl(p + JPU_JPEG_HDCTBL_CHR_OFFSET, (const u8 *)hdctbl_chr,
+		 JPU_JPEG_HDCTBL_SIZE);
+	put_htbl(p + JPU_JPEG_HACTBL_CHR_OFFSET, (const u8 *)hactbl_chr,
+		 JPU_JPEG_HACTBL_SIZE);
+}
+
+static int get_byte(struct jpeg_buffer *buf)
+{
+	if (buf->curr >= buf->end)
+		return -1;
+
+	return *(u8 *)buf->curr++;
+}
+
+static int get_word_be(struct jpeg_buffer *buf, unsigned int *word)
+{
+	if (buf->end - buf->curr < 2)
+		return -1;
+
+	*word = get_unaligned_be16(buf->curr);
+	buf->curr += 2;
+
+	return 0;
+}
+
+static void skip(struct jpeg_buffer *buf, unsigned long len)
+{
+	buf->curr += min((unsigned long)(buf->end - buf->curr), len);
+}
+
+static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
+			  unsigned int *height)
+{
+	struct jpeg_buffer jpeg_buffer;
+	unsigned int word;
+	bool soi = false;
+
+	jpeg_buffer.end = buffer + size;
+	jpeg_buffer.curr = buffer;
+
+	/*
+	 * basic size check and EOI - we don't want to let JPU cross
+	 * buffer bounds in any case. Hope it's stopping by EOI.
+	 */
+	if (size < JPU_JPEG_MIN_SIZE || *(u8 *)(buffer + size - 1) != EOI)
+		return 0;
+
+	for (;;) {
+		int c;
+
+		/* skip preceding filler bytes */
+		do
+			c = get_byte(&jpeg_buffer);
+		while (c == 0xff || c == 0);
+
+		if (!soi && c == SOI) {
+			soi = true;
+			continue;
+		} else if (soi != (c != SOI))
+			return 0;
+
+		switch (c) {
+		case SOF0: /* SOF0: baseline JPEG */
+			skip(&jpeg_buffer, 3); /* segment length and bpp */
+			if (get_word_be(&jpeg_buffer, height) ||
+			    get_word_be(&jpeg_buffer, width) ||
+			    get_byte(&jpeg_buffer) != 3) /* YCbCr only */
+				return 0;
+
+			skip(&jpeg_buffer, 1);
+			return get_byte(&jpeg_buffer);
+		case DHT:
+		case DQT:
+		case COM:
+		case DRI:
+		case APP0 ... APP0 + 0x0f:
+			if (get_word_be(&jpeg_buffer, &word))
+				return 0;
+			skip(&jpeg_buffer, (long)word - 2);
+		case 0:
+			break;
+		default:
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int jpu_querycap(struct file *file, void *priv,
+			struct v4l2_capability *cap)
+{
+	struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->encoder)
+		strlcpy(cap->card, DRV_NAME " encoder", sizeof(cap->card));
+	else
+		strlcpy(cap->card, DRV_NAME " decoder", sizeof(cap->card));
+
+	strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+		 dev_name(ctx->jpu->dev));
+	cap->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+	cap->capabilities = V4L2_CAP_DEVICE_CAPS | cap->device_caps;
+	memset(cap->reserved, 0, sizeof(cap->reserved));
+
+	return 0;
+}
+
+static struct jpu_fmt *jpu_find_format(bool encoder, u32 pixelformat,
+				       unsigned int fmt_type)
+{
+	unsigned int i, fmt_flag;
+
+	if (encoder)
+		fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_ENC_OUTPUT :
+							     JPU_ENC_CAPTURE;
+	else
+		fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_DEC_OUTPUT :
+							     JPU_DEC_CAPTURE;
+
+	for (i = 0; i < ARRAY_SIZE(jpu_formats); i++) {
+		struct jpu_fmt *fmt = &jpu_formats[i];
+
+		if (fmt->fourcc == pixelformat && fmt->types & fmt_flag)
+			return fmt;
+	}
+
+	return NULL;
+}
+
+static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+	unsigned int i, num = 0;
+
+	for (i = 0; i < ARRAY_SIZE(jpu_formats); ++i) {
+		if (jpu_formats[i].types & type) {
+			if (num == f->index)
+				break;
+			++num;
+		}
+	}
+
+	if (i >= ARRAY_SIZE(jpu_formats))
+		return -EINVAL;
+
+	f->pixelformat = jpu_formats[i].fourcc;
+
+	return 0;
+}
+
+static int jpu_enum_fmt_cap(struct file *file, void *priv,
+			    struct v4l2_fmtdesc *f)
+{
+	struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+	return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE :
+			    JPU_DEC_CAPTURE);
+}
+
+static int jpu_enum_fmt_out(struct file *file, void *priv,
+			    struct v4l2_fmtdesc *f)
+{
+	struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+	return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT);
+}
+
+static struct jpu_q_data *jpu_get_q_data(struct jpu_ctx *ctx,
+					 enum v4l2_buf_type type)
+{
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		return &ctx->out_q;
+	else
+		return &ctx->cap_q;
+}
+
+static void jpu_bound_align_image(u32 *w, unsigned int w_min,
+				  unsigned int w_max, unsigned int w_align,
+				  u32 *h, unsigned int h_min,
+				  unsigned int h_max, unsigned int h_align)
+{
+	unsigned int width, height, w_step, h_step;
+
+	width = *w;
+	height = *h;
+
+	w_step = 1U << w_align;
+	h_step = 1U << h_align;
+	v4l_bound_align_image(w, w_min, w_max, w_align, h, h_min, h_max,
+			      h_align, 3);
+
+	if (*w < width && *w + w_step < w_max)
+		*w += w_step;
+	if (*h < height && *h + h_step < h_max)
+		*h += h_step;
+}
+
+static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
+			 struct v4l2_pix_format_mplane *pix,
+			 enum v4l2_buf_type type)
+{
+	struct jpu_fmt *fmt;
+	unsigned int f_type, w, h;
+
+	f_type = V4L2_TYPE_IS_OUTPUT(type) ? JPU_FMT_TYPE_OUTPUT :
+						JPU_FMT_TYPE_CAPTURE;
+
+	fmt = jpu_find_format(ctx->encoder, pix->pixelformat, f_type);
+	if (!fmt) {
+		unsigned int pixelformat;
+
+		dev_dbg(ctx->jpu->dev, "unknown format; set default format\n");
+		if (ctx->encoder)
+			pixelformat = f_type == JPU_FMT_TYPE_OUTPUT ?
+				V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+		else
+			pixelformat = f_type == JPU_FMT_TYPE_CAPTURE ?
+				V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+		fmt = jpu_find_format(ctx->encoder, pixelformat, f_type);
+	}
+
+	pix->pixelformat = fmt->fourcc;
+	pix->colorspace = fmt->colorspace;
+	pix->field = V4L2_FIELD_NONE;
+	pix->num_planes = fmt->num_planes;
+	memset(pix->reserved, 0, sizeof(pix->reserved));
+
+	jpu_bound_align_image(&pix->width, JPU_WIDTH_MIN, JPU_WIDTH_MAX,
+			      fmt->h_align, &pix->height, JPU_HEIGHT_MIN,
+			      JPU_HEIGHT_MAX, fmt->v_align);
+
+	w = pix->width;
+	h = pix->height;
+
+	if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
+		/* ignore userspaces's sizeimage for encoding */
+		if (pix->plane_fmt[0].sizeimage <= 0 || ctx->encoder)
+			pix->plane_fmt[0].sizeimage = JPU_JPEG_HDR_SIZE +
+				(JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h);
+		pix->plane_fmt[0].bytesperline = 0;
+		memset(pix->plane_fmt[0].reserved, 0,
+		       sizeof(pix->plane_fmt[0].reserved));
+	} else {
+		unsigned int i, bpl = 0;
+
+		for (i = 0; i < pix->num_planes; ++i)
+			bpl = max(bpl, pix->plane_fmt[i].bytesperline);
+
+		bpl = clamp_t(unsigned int, bpl, w, JPU_WIDTH_MAX);
+		bpl = round_up(bpl, JPU_MEMALIGN);
+
+		for (i = 0; i < pix->num_planes; ++i) {
+			pix->plane_fmt[i].bytesperline = bpl;
+			pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8;
+			memset(pix->plane_fmt[i].reserved, 0,
+			       sizeof(pix->plane_fmt[i].reserved));
+		}
+	}
+
+	if (fmtinfo)
+		*fmtinfo = fmt;
+
+	return 0;
+}
+
+static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+	if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+		return -EINVAL;
+
+	return __jpu_try_fmt(ctx, NULL, &f->fmt.pix_mp, f->type);
+}
+
+static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct jpu_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+	struct jpu_fmt *fmtinfo;
+	struct jpu_q_data *q_data;
+	int ret;
+
+	vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ctx->jpu->v4l2_dev, "%s queue busy\n", __func__);
+		return -EBUSY;
+	}
+
+	ret = __jpu_try_fmt(ctx, &fmtinfo, &f->fmt.pix_mp, f->type);
+	if (ret < 0)
+		return ret;
+
+	q_data = jpu_get_q_data(ctx, f->type);
+
+	q_data->format = f->fmt.pix_mp;
+	q_data->fmtinfo = fmtinfo;
+
+	return 0;
+}
+
+static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct jpu_q_data *q_data;
+	struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+	if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+		return -EINVAL;
+
+	q_data = jpu_get_q_data(ctx, f->type);
+	f->fmt.pix_mp = q_data->format;
+
+	return 0;
+}
+
+/*
+ * V4L2 controls
+ */
+static int jpu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct jpu_ctx *ctx = ctrl_to_ctx(ctrl);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->jpu->lock, flags);
+	if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY)
+		ctx->compr_quality = ctrl->val;
+	spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops jpu_ctrl_ops = {
+	.s_ctrl		= jpu_s_ctrl,
+};
+
+static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+	struct jpu_ctx *ctx = fh_to_ctx(priv);
+	struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref;
+	enum v4l2_buf_type adj_type;
+
+	src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+	dst_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+	if (ctx->encoder) {
+		adj = *src_q_data;
+		orig = src_q_data;
+		ref = dst_q_data;
+		adj_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	} else {
+		adj = *dst_q_data;
+		orig = dst_q_data;
+		ref = src_q_data;
+		adj_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	}
+
+	adj.format.width = ref->format.width;
+	adj.format.height = ref->format.height;
+
+	__jpu_try_fmt(ctx, NULL, &adj.format, adj_type);
+
+	if (adj.format.width != orig->format.width ||
+	    adj.format.height != orig->format.height) {
+		dev_err(ctx->jpu->dev, "src and dst formats do not match.\n");
+		/* maybe we can return -EPIPE here? */
+		return -EINVAL;
+	}
+
+	return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops jpu_ioctl_ops = {
+	.vidioc_querycap		= jpu_querycap,
+
+	.vidioc_enum_fmt_vid_cap_mplane = jpu_enum_fmt_cap,
+	.vidioc_enum_fmt_vid_out_mplane = jpu_enum_fmt_out,
+	.vidioc_g_fmt_vid_cap_mplane	= jpu_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane	= jpu_g_fmt,
+	.vidioc_try_fmt_vid_cap_mplane	= jpu_try_fmt,
+	.vidioc_try_fmt_vid_out_mplane	= jpu_try_fmt,
+	.vidioc_s_fmt_vid_cap_mplane	= jpu_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane	= jpu_s_fmt,
+
+	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_create_bufs             = v4l2_m2m_ioctl_create_bufs,
+	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
+	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
+	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
+
+	.vidioc_streamon		= jpu_streamon,
+	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
+
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe
+};
+
+static int jpu_controls_create(struct jpu_ctx *ctx)
+{
+	struct v4l2_ctrl *ctrl;
+	int ret;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
+
+	ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler, &jpu_ctrl_ops,
+				 V4L2_CID_JPEG_COMPRESSION_QUALITY,
+				 0, JPU_MAX_QUALITY - 1, 1, 0);
+
+	if (ctx->ctrl_handler.error) {
+		ret = ctx->ctrl_handler.error;
+		goto error_free;
+	}
+
+	if (!ctx->encoder)
+		ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
+				V4L2_CTRL_FLAG_READ_ONLY;
+
+	ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+	if (ret < 0)
+		goto error_free;
+
+	return 0;
+
+error_free:
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	return ret;
+}
+
+/*
+ * ============================================================================
+ * Queue operations
+ * ============================================================================
+ */
+static int jpu_queue_setup(struct vb2_queue *vq,
+			   const struct v4l2_format *fmt,
+			   unsigned int *nbuffers, unsigned int *nplanes,
+			   unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+	struct jpu_q_data *q_data;
+	unsigned int i;
+
+	q_data = jpu_get_q_data(ctx, vq->type);
+
+	*nplanes = q_data->format.num_planes;
+
+	for (i = 0; i < *nplanes; i++) {
+		unsigned int q_size = q_data->format.plane_fmt[i].sizeimage;
+		unsigned int f_size = fmt ?
+			fmt->fmt.pix_mp.plane_fmt[i].sizeimage : 0;
+
+		if (fmt && f_size < q_size)
+			return -EINVAL;
+
+		sizes[i] = fmt ? f_size : q_size;
+		alloc_ctxs[i] = ctx->jpu->alloc_ctx;
+	}
+
+	return 0;
+}
+
+static int jpu_buf_prepare(struct vb2_buffer *vb)
+{
+	struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct jpu_q_data *q_data;
+	unsigned int i;
+
+	q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
+
+	if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+		if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
+			vb->v4l2_buf.field = V4L2_FIELD_NONE;
+		if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+			dev_err(ctx->jpu->dev, "%s field isn't supported\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < q_data->format.num_planes; i++) {
+		unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+		if (vb2_plane_size(vb, i) < size) {
+			dev_err(ctx->jpu->dev,
+				"%s: data will not fit into plane (%lu < %lu)\n",
+			       __func__, vb2_plane_size(vb, i), size);
+			return -EINVAL;
+		}
+
+		/* decoder capture queue */
+		if (!ctx->encoder && !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
+			vb2_set_plane_payload(vb, i, size);
+	}
+
+	return 0;
+}
+
+static void jpu_buf_queue(struct vb2_buffer *vb)
+{
+	struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+	if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+		struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+		struct jpu_q_data *q_data, adjust;
+		void *buffer = vb2_plane_vaddr(vb, 0);
+		unsigned long buf_size = vb2_get_plane_payload(vb, 0);
+		unsigned int width, height;
+
+		u8 subsampling = jpu_parse_hdr(buffer, buf_size, &width,
+						 &height);
+
+		/* check if JPEG data basic parsing was successful */
+		if (subsampling != JPU_JPEG_422 && subsampling != JPU_JPEG_420)
+			goto format_error;
+
+		q_data = &ctx->out_q;
+
+		adjust = *q_data;
+		adjust.format.width = width;
+		adjust.format.height = height;
+
+		__jpu_try_fmt(ctx, &adjust.fmtinfo, &adjust.format,
+			      V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+		if (adjust.format.width != q_data->format.width ||
+		    adjust.format.height != q_data->format.height)
+			goto format_error;
+
+		/*
+		 * keep subsampling in buffer to check it
+		 * for compatibility in device_run
+		 */
+		jpu_buf->subsampling = subsampling;
+	}
+
+	if (ctx->fh.m2m_ctx)
+		v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+
+	return;
+
+format_error:
+	dev_err(ctx->jpu->dev, "incompatible or corrupted JPEG data\n");
+	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void jpu_buf_finish(struct vb2_buffer *vb)
+{
+	struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+	struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct jpu_q_data *q_data = &ctx->out_q;
+	enum v4l2_buf_type type = vb->vb2_queue->type;
+	u8 *buffer;
+
+	if (vb->state == VB2_BUF_STATE_DONE)
+		vb->v4l2_buf.sequence = jpu_get_q_data(ctx, type)->sequence++;
+
+	if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
+	    V4L2_TYPE_IS_OUTPUT(type))
+		return;
+
+	buffer = vb2_plane_vaddr(vb, 0);
+
+	memcpy(buffer, jpeg_hdrs[jpu_buf->compr_quality], JPU_JPEG_HDR_SIZE);
+	*(u16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) =
+					cpu_to_be16(q_data->format.height);
+	*(u16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) =
+					cpu_to_be16(q_data->format.width);
+	*(buffer + JPU_JPEG_SUBS_OFFSET) = q_data->fmtinfo->subsampling;
+}
+
+static int jpu_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+	struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+	struct jpu_q_data *q_data = jpu_get_q_data(ctx, vq->type);
+
+	q_data->sequence = 0;
+	return 0;
+}
+
+static void jpu_stop_streaming(struct vb2_queue *vq)
+{
+	struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+	struct vb2_buffer *vb;
+	unsigned long flags;
+
+	for (;;) {
+		if (V4L2_TYPE_IS_OUTPUT(vq->type))
+			vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+		else
+			vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+		if (vb == NULL)
+			return;
+		spin_lock_irqsave(&ctx->jpu->lock, flags);
+		v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+		spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+	}
+}
+
+static struct vb2_ops jpu_qops = {
+	.queue_setup		= jpu_queue_setup,
+	.buf_prepare		= jpu_buf_prepare,
+	.buf_queue		= jpu_buf_queue,
+	.buf_finish		= jpu_buf_finish,
+	.start_streaming	= jpu_start_streaming,
+	.stop_streaming		= jpu_stop_streaming,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+};
+
+static int jpu_queue_init(void *priv, struct vb2_queue *src_vq,
+			  struct vb2_queue *dst_vq)
+{
+	struct jpu_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct jpu_buffer);
+	src_vq->ops = &jpu_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->jpu->mutex;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct jpu_buffer);
+	dst_vq->ops = &jpu_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->jpu->mutex;
+
+	return vb2_queue_init(dst_vq);
+}
+
+/*
+ * ============================================================================
+ * Device file operations
+ * ============================================================================
+ */
+static int jpu_open(struct file *file)
+{
+	struct jpu *jpu = video_drvdata(file);
+	struct video_device *vfd = video_devdata(file);
+	struct jpu_ctx *ctx;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	v4l2_fh_init(&ctx->fh, vfd);
+	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+
+	ctx->jpu = jpu;
+	ctx->encoder = vfd == &jpu->vfd_encoder;
+
+	__jpu_try_fmt(ctx, &ctx->out_q.fmtinfo, &ctx->out_q.format,
+		      V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+	__jpu_try_fmt(ctx, &ctx->cap_q.fmtinfo, &ctx->cap_q.format,
+		      V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpu->m2m_dev, ctx, jpu_queue_init);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		ret = PTR_ERR(ctx->fh.m2m_ctx);
+		goto v4l_prepare_rollback;
+	}
+
+	ret = jpu_controls_create(ctx);
+	if (ret < 0)
+		goto v4l_prepare_rollback;
+
+	if (mutex_lock_interruptible(&jpu->mutex)) {
+		ret = -ERESTARTSYS;
+		goto v4l_prepare_rollback;
+	}
+
+	if (jpu->ref_count == 0) {
+		ret = clk_prepare_enable(jpu->clk);
+		if (ret < 0)
+			goto device_prepare_rollback;
+		/* ...issue software reset */
+		ret = jpu_reset(jpu);
+		if (ret)
+			goto device_prepare_rollback;
+	}
+
+	jpu->ref_count++;
+
+	mutex_unlock(&jpu->mutex);
+	return 0;
+
+device_prepare_rollback:
+	mutex_unlock(&jpu->mutex);
+v4l_prepare_rollback:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+	return ret;
+}
+
+static int jpu_release(struct file *file)
+{
+	struct jpu *jpu = video_drvdata(file);
+	struct jpu_ctx *ctx = fh_to_ctx(file->private_data);
+
+	mutex_lock(&jpu->mutex);
+	if (--jpu->ref_count == 0)
+		clk_disable_unprepare(jpu->clk);
+	mutex_unlock(&jpu->mutex);
+
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+
+	return 0;
+}
+
+static const struct v4l2_file_operations jpu_fops = {
+	.owner		= THIS_MODULE,
+	.open		= jpu_open,
+	.release	= jpu_release,
+	.unlocked_ioctl	= video_ioctl2,
+	.poll		= v4l2_m2m_fop_poll,
+	.mmap		= v4l2_m2m_fop_mmap,
+};
+
+/*
+ * ============================================================================
+ * mem2mem callbacks
+ * ============================================================================
+ */
+static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
+{
+	/* remove current buffers and finish job */
+	struct vb2_buffer *src_buf, *dst_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+	v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+	v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+
+	/* ...and give it a chance on next run */
+	if (reset)
+		jpu_write(ctx->jpu, JCCMD_SRST, JCCMD);
+
+	spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+	v4l2_m2m_job_finish(ctx->jpu->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void jpu_device_run(void *priv)
+{
+	struct jpu_ctx *ctx = priv;
+	struct jpu *jpu = ctx->jpu;
+	struct jpu_buffer *jpu_buf;
+	struct jpu_q_data *q_data;
+	struct vb2_buffer *src_buf, *dst_buf;
+	unsigned int w, h, bpl;
+	unsigned char num_planes, subsampling;
+	unsigned long flags;
+
+	/* ...wait until module reset completes; we have mutex locked here */
+	if (jpu_wait_reset(jpu)) {
+		jpu_cleanup(ctx, true);
+		return;
+	}
+
+	spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+	jpu->curr = ctx;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+	if (ctx->encoder) {
+		jpu_buf = vb2_to_jpu_buffer(dst_buf);
+		q_data = &ctx->out_q;
+	} else {
+		jpu_buf = vb2_to_jpu_buffer(src_buf);
+		q_data = &ctx->cap_q;
+	}
+
+	w = q_data->format.width;
+	h = q_data->format.height;
+	bpl = q_data->format.plane_fmt[0].bytesperline;
+	num_planes = q_data->fmtinfo->num_planes;
+	subsampling = q_data->fmtinfo->subsampling;
+
+	if (ctx->encoder) {
+		unsigned long src_1_addr, src_2_addr, dst_addr;
+		unsigned int redu, inft;
+
+		dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+		src_1_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+		if (num_planes > 1)
+			src_2_addr = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+		else
+			src_2_addr = src_1_addr + w * h;
+
+		jpu_buf->compr_quality = ctx->compr_quality;
+
+		if (subsampling == JPU_JPEG_420) {
+			redu = JCMOD_REDU_420;
+			inft = JIFECNT_INFT_420;
+		} else {
+			redu = JCMOD_REDU_422;
+			inft = JIFECNT_INFT_422;
+		}
+
+		/* only no marker mode works for encoding */
+		jpu_write(jpu, JCMOD_DSP_ENC | JCMOD_PCTR | redu |
+			  JCMOD_MSKIP_ENABLE, JCMOD);
+
+		jpu_write(jpu, JIFECNT_SWAP_WB | inft, JIFECNT);
+		jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+		jpu_write(jpu, JINTE_TRANSF_COMPL, JINTE);
+
+		/* Y and C components source addresses */
+		jpu_write(jpu, src_1_addr, JIFESYA1);
+		jpu_write(jpu, src_2_addr, JIFESCA1);
+
+		/* memory width */
+		jpu_write(jpu, bpl, JIFESMW);
+
+		jpu_write(jpu, (w >> 8) & JCSZ_MASK, JCHSZU);
+		jpu_write(jpu, w & JCSZ_MASK, JCHSZD);
+
+		jpu_write(jpu, (h >> 8) & JCSZ_MASK, JCVSZU);
+		jpu_write(jpu, h & JCSZ_MASK, JCVSZD);
+
+		jpu_write(jpu, w, JIFESHSZ);
+		jpu_write(jpu, h, JIFESVSZ);
+
+		jpu_write(jpu, dst_addr + JPU_JPEG_HDR_SIZE, JIFEDA1);
+
+		jpu_write(jpu, 0 << JCQTN_SHIFT(1) | 1 << JCQTN_SHIFT(2) |
+			  1 << JCQTN_SHIFT(3), JCQTN);
+
+		jpu_write(jpu, 0 << JCHTN_AC_SHIFT(1) | 0 << JCHTN_DC_SHIFT(1) |
+			  1 << JCHTN_AC_SHIFT(2) | 1 << JCHTN_DC_SHIFT(2) |
+			  1 << JCHTN_AC_SHIFT(3) | 1 << JCHTN_DC_SHIFT(3),
+			  JCHTN);
+
+		jpu_set_qtbl(jpu, ctx->compr_quality);
+		jpu_set_htbl(jpu);
+	} else {
+		unsigned long src_addr, dst_1_addr, dst_2_addr;
+
+		if (jpu_buf->subsampling != subsampling) {
+			dev_err(ctx->jpu->dev,
+				"src and dst formats do not match.\n");
+			spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+			jpu_cleanup(ctx, false);
+			return;
+		}
+
+		src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+		dst_1_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+		if (q_data->fmtinfo->num_planes > 1)
+			dst_2_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+		else
+			dst_2_addr = dst_1_addr + w * h;
+
+		/* ...set up decoder operation */
+		jpu_write(jpu, JCMOD_DSP_DEC | JCMOD_PCTR, JCMOD);
+		jpu_write(jpu, JIFECNT_SWAP_WB, JIFECNT);
+		jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+
+		/* ...enable interrupts on transfer completion and d-g error */
+		jpu_write(jpu, JINTE_TRANSF_COMPL | JINTE_ERR, JINTE);
+
+		/* ...set source/destination addresses of encoded data */
+		jpu_write(jpu, src_addr, JIFDSA1);
+		jpu_write(jpu, dst_1_addr, JIFDDYA1);
+		jpu_write(jpu, dst_2_addr, JIFDDCA1);
+
+		jpu_write(jpu, bpl, JIFDDMW);
+	}
+
+	/* ...start encoder/decoder operation */
+	jpu_write(jpu, JCCMD_JSRT, JCCMD);
+
+	spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+}
+
+static int jpu_job_ready(void *priv)
+{
+	return 1;
+}
+
+static void jpu_job_abort(void *priv)
+{
+	struct jpu_ctx *ctx = priv;
+
+	if (!wait_event_timeout(ctx->jpu->irq_queue, !ctx->jpu->curr,
+				msecs_to_jiffies(JPU_JOB_TIMEOUT)))
+		jpu_cleanup(ctx, true);
+}
+
+static struct v4l2_m2m_ops jpu_m2m_ops = {
+	.device_run	= jpu_device_run,
+	.job_ready	= jpu_job_ready,
+	.job_abort	= jpu_job_abort,
+};
+
+/*
+ * ============================================================================
+ * IRQ handler
+ * ============================================================================
+ */
+static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
+{
+	struct jpu *jpu = dev_id;
+	struct jpu_ctx *curr_ctx;
+	struct vb2_buffer *src_buf, *dst_buf;
+	unsigned int int_status;
+
+	int_status = jpu_read(jpu, JINTS);
+
+	/* ...spurious interrupt */
+	if (!((JINTS_TRANSF_COMPL | JINTS_PROCESS_COMPL | JINTS_ERR) &
+	    int_status))
+		return IRQ_NONE;
+
+	/* ...clear interrupts */
+	jpu_write(jpu, ~(int_status & JINTS_MASK), JINTS);
+	if (int_status & (JINTS_ERR | JINTS_PROCESS_COMPL))
+		jpu_write(jpu, JCCMD_JEND, JCCMD);
+
+	spin_lock(&jpu->lock);
+
+	if ((int_status & JINTS_PROCESS_COMPL) &&
+	   !(int_status & JINTS_TRANSF_COMPL))
+		goto handled;
+
+	curr_ctx = v4l2_m2m_get_curr_priv(jpu->m2m_dev);
+	if (!curr_ctx) {
+		/* ...instance is not running */
+		dev_err(jpu->dev, "no active context for m2m\n");
+		goto handled;
+	}
+
+	src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+	if (int_status & JINTS_TRANSF_COMPL) {
+		if (curr_ctx->encoder) {
+			unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
+						   | jpu_read(jpu, JCDTCM) << 8
+						   | jpu_read(jpu, JCDTCD);
+			vb2_set_plane_payload(dst_buf, 0,
+				payload_size + JPU_JPEG_HDR_SIZE);
+		}
+
+		dst_buf->v4l2_buf.field = src_buf->v4l2_buf.field;
+		dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+		if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
+			dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
+		dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+		dst_buf->v4l2_buf.flags |= src_buf->v4l2_buf.flags &
+					V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+		dst_buf->v4l2_buf.flags = src_buf->v4l2_buf.flags &
+			(V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
+			 V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
+			 V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+	} else if (int_status & JINTS_ERR) {
+		unsigned char error = jpu_read(jpu, JCDERR) & JCDERR_MASK;
+
+		dev_dbg(jpu->dev, "processing error: %#X: %s\n", error,
+			error_to_text[error]);
+
+		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+	}
+
+	jpu->curr = NULL;
+
+	/* ...reset JPU after completion */
+	jpu_write(jpu, JCCMD_SRST, JCCMD);
+	spin_unlock(&jpu->lock);
+
+	v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx);
+
+	/* ...wakeup abort routine if needed */
+	wake_up(&jpu->irq_queue);
+
+	return IRQ_HANDLED;
+
+handled:
+	spin_unlock(&jpu->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * ============================================================================
+ * Driver basic infrastructure
+ * ============================================================================
+ */
+static const struct of_device_id jpu_dt_ids[] = {
+	{ .compatible = "renesas,jpu-r8a7790" }, /* H2 */
+	{ .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
+	{ .compatible = "renesas,jpu-r8a7792" }, /* V2H */
+	{ .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
+	{ },
+};
+MODULE_DEVICE_TABLE(of, jpu_dt_ids);
+
+static int jpu_probe(struct platform_device *pdev)
+{
+	struct jpu *jpu;
+	struct resource *res;
+	int ret;
+	unsigned int i;
+
+	jpu = devm_kzalloc(&pdev->dev, sizeof(*jpu), GFP_KERNEL);
+	if (!jpu)
+		return -ENOMEM;
+
+	init_waitqueue_head(&jpu->irq_queue);
+	mutex_init(&jpu->mutex);
+	spin_lock_init(&jpu->lock);
+	jpu->dev = &pdev->dev;
+
+	/* memory-mapped registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	jpu->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(jpu->regs))
+		return PTR_ERR(jpu->regs);
+
+	/* interrupt service routine registration */
+	jpu->irq = ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "cannot find IRQ\n");
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, jpu->irq, jpu_irq_handler, 0,
+			       dev_name(&pdev->dev), jpu);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpu->irq);
+		return ret;
+	}
+
+	/* clocks */
+	jpu->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(jpu->clk)) {
+		dev_err(&pdev->dev, "cannot get clock\n");
+		return PTR_ERR(jpu->clk);
+	}
+
+	/* v4l2 device */
+	ret = v4l2_device_register(&pdev->dev, &jpu->v4l2_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+		return ret;
+	}
+
+	/* mem2mem device */
+	jpu->m2m_dev = v4l2_m2m_init(&jpu_m2m_ops);
+	if (IS_ERR(jpu->m2m_dev)) {
+		v4l2_err(&jpu->v4l2_dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(jpu->m2m_dev);
+		goto device_register_rollback;
+	}
+
+	jpu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(jpu->alloc_ctx)) {
+		v4l2_err(&jpu->v4l2_dev, "Failed to init memory allocator\n");
+		ret = PTR_ERR(jpu->alloc_ctx);
+		goto m2m_init_rollback;
+	}
+
+	/* fill in qantization and Huffman tables for encoder */
+	for (i = 0; i < JPU_MAX_QUALITY; i++)
+		jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]);
+
+	strlcpy(jpu->vfd_encoder.name, DRV_NAME, sizeof(jpu->vfd_encoder.name));
+	jpu->vfd_encoder.fops		= &jpu_fops;
+	jpu->vfd_encoder.ioctl_ops	= &jpu_ioctl_ops;
+	jpu->vfd_encoder.minor		= -1;
+	jpu->vfd_encoder.release	= video_device_release_empty;
+	jpu->vfd_encoder.lock		= &jpu->mutex;
+	jpu->vfd_encoder.v4l2_dev	= &jpu->v4l2_dev;
+	jpu->vfd_encoder.vfl_dir	= VFL_DIR_M2M;
+
+	ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+		goto vb2_allocator_rollback;
+	}
+
+	video_set_drvdata(&jpu->vfd_encoder, jpu);
+
+	strlcpy(jpu->vfd_decoder.name, DRV_NAME, sizeof(jpu->vfd_decoder.name));
+	jpu->vfd_decoder.fops		= &jpu_fops;
+	jpu->vfd_decoder.ioctl_ops	= &jpu_ioctl_ops;
+	jpu->vfd_decoder.minor		= -1;
+	jpu->vfd_decoder.release	= video_device_release_empty;
+	jpu->vfd_decoder.lock		= &jpu->mutex;
+	jpu->vfd_decoder.v4l2_dev	= &jpu->v4l2_dev;
+	jpu->vfd_decoder.vfl_dir	= VFL_DIR_M2M;
+
+	ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+		goto enc_vdev_register_rollback;
+	}
+
+	video_set_drvdata(&jpu->vfd_decoder, jpu);
+	platform_set_drvdata(pdev, jpu);
+
+	v4l2_info(&jpu->v4l2_dev, "encoder device registered as /dev/video%d\n",
+		  jpu->vfd_encoder.num);
+	v4l2_info(&jpu->v4l2_dev, "decoder device registered as /dev/video%d\n",
+		  jpu->vfd_decoder.num);
+
+	return 0;
+
+enc_vdev_register_rollback:
+	video_unregister_device(&jpu->vfd_encoder);
+
+vb2_allocator_rollback:
+	vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
+
+m2m_init_rollback:
+	v4l2_m2m_release(jpu->m2m_dev);
+
+device_register_rollback:
+	v4l2_device_unregister(&jpu->v4l2_dev);
+
+	return ret;
+}
+
+static int jpu_remove(struct platform_device *pdev)
+{
+	struct jpu *jpu = platform_get_drvdata(pdev);
+
+	video_unregister_device(&jpu->vfd_decoder);
+	video_unregister_device(&jpu->vfd_encoder);
+	vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
+	v4l2_m2m_release(jpu->m2m_dev);
+	v4l2_device_unregister(&jpu->v4l2_dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int jpu_suspend(struct device *dev)
+{
+	struct jpu *jpu = dev_get_drvdata(dev);
+
+	if (jpu->ref_count == 0)
+		return 0;
+
+	clk_disable_unprepare(jpu->clk);
+
+	return 0;
+}
+
+static int jpu_resume(struct device *dev)
+{
+	struct jpu *jpu = dev_get_drvdata(dev);
+
+	if (jpu->ref_count == 0)
+		return 0;
+
+	clk_prepare_enable(jpu->clk);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops jpu_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(jpu_suspend, jpu_resume)
+};
+
+static struct platform_driver jpu_driver = {
+	.probe = jpu_probe,
+	.remove = jpu_remove,
+	.driver = {
+		.of_match_table = jpu_dt_ids,
+		.name = DRV_NAME,
+		.pm = &jpu_pm_ops,
+	},
+};
+
+module_platform_driver(jpu_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Mikhail Ulianov <mikhail.ulyanov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index bfbf157..9690f9d 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2544,7 +2544,8 @@
 	ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
 	if (ret) {
 		v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
-		goto enc_vdev_alloc_rollback;
+		video_device_release(jpeg->vfd_encoder);
+		goto vb2_allocator_rollback;
 	}
 
 	video_set_drvdata(jpeg->vfd_encoder, jpeg);
@@ -2572,7 +2573,8 @@
 	ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
 	if (ret) {
 		v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
-		goto dec_vdev_alloc_rollback;
+		video_device_release(jpeg->vfd_decoder);
+		goto enc_vdev_register_rollback;
 	}
 
 	video_set_drvdata(jpeg->vfd_decoder, jpeg);
@@ -2589,15 +2591,9 @@
 
 	return 0;
 
-dec_vdev_alloc_rollback:
-	video_device_release(jpeg->vfd_decoder);
-
 enc_vdev_register_rollback:
 	video_unregister_device(jpeg->vfd_encoder);
 
-enc_vdev_alloc_rollback:
-	video_device_release(jpeg->vfd_encoder);
-
 vb2_allocator_rollback:
 	vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
 
@@ -2622,9 +2618,7 @@
 	pm_runtime_disable(jpeg->dev);
 
 	video_unregister_device(jpeg->vfd_decoder);
-	video_device_release(jpeg->vfd_decoder);
 	video_unregister_device(jpeg->vfd_encoder);
-	video_device_release(jpeg->vfd_encoder);
 	vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
 	v4l2_m2m_release(jpeg->m2m_dev);
 	v4l2_device_unregister(&jpeg->v4l2_dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
index f176096..b1b1491 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -37,8 +37,12 @@
 {
 	struct s5p_mfc_cmd_args h2r_args;
 	struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+	int ret;
 
-	s5p_mfc_hw_call(dev->mfc_ops, alloc_dev_context_buffer, dev);
+	ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_dev_context_buffer, dev);
+	if (ret)
+		return ret;
+
 	mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
 	mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
 	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index e65993f..2e57e9f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1819,11 +1819,12 @@
 	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
 	struct s5p_mfc_dev *dev = ctx->dev;
 
-	if (ctx->state != MFCINST_GOT_INST) {
-		mfc_err("inavlid state: %d\n", ctx->state);
-		return -EINVAL;
-	}
 	if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		if (ctx->state != MFCINST_GOT_INST) {
+			mfc_err("inavlid state: %d\n", ctx->state);
+			return -EINVAL;
+		}
+
 		if (ctx->dst_fmt)
 			*plane_count = ctx->dst_fmt->num_planes;
 		else
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 00a1d8b..1e72502 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -37,10 +37,9 @@
 		dev->mfc_regs = s5p_mfc_init_regs_v6_plus(dev);
 }
 
-int s5p_mfc_alloc_priv_buf(struct device *dev,
+int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
 					struct s5p_mfc_priv_buf *b)
 {
-
 	mfc_debug(3, "Allocating priv: %zu\n", b->size);
 
 	b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
@@ -50,6 +49,14 @@
 		return -ENOMEM;
 	}
 
+	if (b->dma < base) {
+		mfc_err("Invaling memory configuration!\n");
+		mfc_err("Allocated buffer (%pad) is lower than memory base address (%pad)\n",
+			&b->dma, &base);
+		dma_free_coherent(dev, b->size, b->virt, b->dma);
+		return -ENOMEM;
+	}
+
 	mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
 	return 0;
 }
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index 22dfb3e..77a08b1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -334,7 +334,7 @@
 
 void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev);
 void s5p_mfc_init_regs(struct s5p_mfc_dev *dev);
-int s5p_mfc_alloc_priv_buf(struct device *dev,
+int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
 					struct s5p_mfc_priv_buf *b);
 void s5p_mfc_release_priv_buf(struct device *dev,
 					struct s5p_mfc_priv_buf *b);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 9a923b1..6402f76 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -41,7 +41,7 @@
 	int ret;
 
 	ctx->dsc.size = buf_size->dsc;
-	ret =  s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->dsc);
+	ret =  s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->dsc);
 	if (ret) {
 		mfc_err("Failed to allocate temporary buffer\n");
 		return ret;
@@ -172,7 +172,8 @@
 	/* Allocate only if memory from bank 1 is necessary */
 	if (ctx->bank1.size > 0) {
 
-		ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+		ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
+					     &ctx->bank1);
 		if (ret) {
 			mfc_err("Failed to allocate Bank1 temporary buffer\n");
 			return ret;
@@ -181,7 +182,8 @@
 	}
 	/* Allocate only if memory from bank 2 is necessary */
 	if (ctx->bank2.size > 0) {
-		ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, &ctx->bank2);
+		ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, dev->bank2,
+					     &ctx->bank2);
 		if (ret) {
 			mfc_err("Failed to allocate Bank2 temporary buffer\n");
 			s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
@@ -212,7 +214,7 @@
 	else
 		ctx->ctx.size = buf_size->non_h264_ctx;
 
-	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->ctx);
 	if (ret) {
 		mfc_err("Failed to allocate instance buffer\n");
 		return ret;
@@ -225,7 +227,7 @@
 
 	/* Initialize shared memory */
 	ctx->shm.size = buf_size->shm;
-	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->shm);
+	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->shm);
 	if (ret) {
 		mfc_err("Failed to allocate shared memory buffer\n");
 		s5p_mfc_release_priv_buf(dev->mem_dev_l, &ctx->ctx);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 906c83c..e5cb30e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -239,7 +239,8 @@
 
 	/* Allocate only if memory from bank 1 is necessary */
 	if (ctx->bank1.size > 0) {
-		ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+		ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
+					     &ctx->bank1);
 		if (ret) {
 			mfc_err("Failed to allocate Bank1 memory\n");
 			return ret;
@@ -291,7 +292,7 @@
 		break;
 	}
 
-	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->ctx);
 	if (ret) {
 		mfc_err("Failed to allocate instance buffer\n");
 		return ret;
@@ -320,7 +321,8 @@
 	mfc_debug_enter();
 
 	dev->ctx_buf.size = buf_size->dev_ctx;
-	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &dev->ctx_buf);
+	ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
+				     &dev->ctx_buf);
 	if (ret) {
 		mfc_err("Failed to allocate device context buffer\n");
 		return ret;
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
index c2f2e35..aae6523 100644
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
@@ -315,7 +315,6 @@
 static struct i2c_driver hdmiphy_driver = {
 	.driver = {
 		.name	= "s5p-hdmiphy",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= hdmiphy_probe,
 	.remove		= hdmiphy_remove,
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
index b713403..5127acb 100644
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
@@ -357,17 +357,15 @@
 
 int mxr_reg_wait4vsync(struct mxr_device *mdev)
 {
-	int ret;
+	long time_left;
 
 	clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
 	/* TODO: consider adding interruptible */
-	ret = wait_event_timeout(mdev->event_queue,
-		test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
-		msecs_to_jiffies(1000));
-	if (ret > 0)
+	time_left = wait_event_timeout(mdev->event_queue,
+			test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
+				 msecs_to_jiffies(1000));
+	if (time_left > 0)
 		return 0;
-	if (ret < 0)
-		return ret;
 	mxr_warn(mdev, "no vsync detected - timeout\n");
 	return -ETIME;
 }
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
index db8c17b..8d17131 100644
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -397,7 +397,6 @@
 static struct i2c_driver sii9234_driver = {
 	.driver = {
 		.name	= "sii9234",
-		.owner	= THIS_MODULE,
 		.pm = &sii9234_pm_ops,
 	},
 	.probe		= sii9234_probe,
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 2554f37..f5e3eb3a 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -211,7 +211,7 @@
 	case V4L2_PIX_FMT_NV12:
 	case V4L2_PIX_FMT_NV16:
 	case V4L2_PIX_FMT_NV24:
-		return V4L2_COLORSPACE_JPEG;
+		return V4L2_COLORSPACE_SMPTE170M;
 	case V4L2_PIX_FMT_RGB332:
 	case V4L2_PIX_FMT_RGB444:
 	case V4L2_PIX_FMT_RGB565:
@@ -958,6 +958,7 @@
 	src_vq->ops = &sh_veu_qops;
 	src_vq->mem_ops = &vb2_dma_contig_memops;
 	src_vq->lock = &veu->fop_lock;
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
 
 	ret = vb2_queue_init(src_vq);
 	if (ret < 0)
@@ -971,6 +972,7 @@
 	dst_vq->ops = &sh_veu_qops;
 	dst_vq->mem_ops = &vb2_dma_contig_memops;
 	dst_vq->lock = &veu->fop_lock;
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
 
 	return vb2_queue_init(dst_vq);
 }
@@ -1103,6 +1105,12 @@
 	if (!src || !dst)
 		return IRQ_NONE;
 
+	dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
+	dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+	dst->v4l2_buf.flags |=
+		src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+	dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
+
 	spin_lock(&veu->lock);
 	v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
 	v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 8b799ba..fe5c8ab 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -27,7 +27,7 @@
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-mediabus.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-dma-contig.h>
 
 /* Mirror addresses are not available for all registers */
 #define VOUER	0
@@ -57,31 +57,40 @@
 	SH_VOU_RUNNING,
 };
 
+#define VOU_MIN_IMAGE_WIDTH	16
 #define VOU_MAX_IMAGE_WIDTH	720
-#define VOU_MAX_IMAGE_HEIGHT	576
+#define VOU_MIN_IMAGE_HEIGHT	16
+
+struct sh_vou_buffer {
+	struct vb2_buffer vb;
+	struct list_head list;
+};
+
+static inline struct sh_vou_buffer *to_sh_vou_buffer(struct vb2_buffer *vb2)
+{
+	return container_of(vb2, struct sh_vou_buffer, vb);
+}
 
 struct sh_vou_device {
 	struct v4l2_device v4l2_dev;
 	struct video_device vdev;
-	atomic_t use_count;
 	struct sh_vou_pdata *pdata;
 	spinlock_t lock;
 	void __iomem *base;
 	/* State information */
 	struct v4l2_pix_format pix;
 	struct v4l2_rect rect;
-	struct list_head queue;
+	struct list_head buf_list;
 	v4l2_std_id std;
 	int pix_idx;
-	struct videobuf_buffer *active;
+	struct vb2_queue queue;
+	struct vb2_alloc_ctx *alloc_ctx;
+	struct sh_vou_buffer *active;
 	enum sh_vou_status status;
+	unsigned sequence;
 	struct mutex fop_lock;
 };
 
-struct sh_vou_file {
-	struct videobuf_queue vbq;
-};
-
 /* Register access routines for sides A, B and mirror addresses */
 static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
 			       u32 value)
@@ -133,6 +142,7 @@
 	u32		pfmt;
 	char		*desc;
 	unsigned char	bpp;
+	unsigned char	bpl;
 	unsigned char	rgb;
 	unsigned char	yf;
 	unsigned char	pkf;
@@ -143,6 +153,7 @@
 	{
 		.pfmt	= V4L2_PIX_FMT_NV12,
 		.bpp	= 12,
+		.bpl	= 1,
 		.desc	= "YVU420 planar",
 		.yf	= 0,
 		.rgb	= 0,
@@ -150,6 +161,7 @@
 	{
 		.pfmt	= V4L2_PIX_FMT_NV16,
 		.bpp	= 16,
+		.bpl	= 1,
 		.desc	= "YVYU planar",
 		.yf	= 1,
 		.rgb	= 0,
@@ -157,6 +169,7 @@
 	{
 		.pfmt	= V4L2_PIX_FMT_RGB24,
 		.bpp	= 24,
+		.bpl	= 3,
 		.desc	= "RGB24",
 		.pkf	= 2,
 		.rgb	= 1,
@@ -164,6 +177,7 @@
 	{
 		.pfmt	= V4L2_PIX_FMT_RGB565,
 		.bpp	= 16,
+		.bpl	= 2,
 		.desc	= "RGB565",
 		.pkf	= 3,
 		.rgb	= 1,
@@ -171,6 +185,7 @@
 	{
 		.pfmt	= V4L2_PIX_FMT_RGB565X,
 		.bpp	= 16,
+		.bpl	= 2,
 		.desc	= "RGB565 byteswapped",
 		.pkf	= 3,
 		.rgb	= 1,
@@ -178,11 +193,11 @@
 };
 
 static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
-				 struct videobuf_buffer *vb)
+				 struct vb2_buffer *vb)
 {
 	dma_addr_t addr1, addr2;
 
-	addr1 = videobuf_to_dma_contig(vb);
+	addr1 = vb2_dma_contig_plane_dma_addr(vb, 0);
 	switch (vou_dev->pix.pixelformat) {
 	case V4L2_PIX_FMT_NV12:
 	case V4L2_PIX_FMT_NV16:
@@ -196,8 +211,7 @@
 	sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
 }
 
-static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
-				struct videobuf_buffer *vb)
+static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
 {
 	unsigned int row_coeff;
 #ifdef __LITTLE_ENDIAN
@@ -224,167 +238,136 @@
 
 	sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
 	sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
-	sh_vou_schedule_next(vou_dev, vb);
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
-{
-	BUG_ON(in_interrupt());
-
-	/* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
-	videobuf_waiton(vq, vb, 0, 0);
-	videobuf_dma_contig_free(vq, vb);
-	vb->state = VIDEOBUF_NEEDS_INIT;
 }
 
 /* Locking: caller holds fop_lock mutex */
-static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
-			    unsigned int *size)
+static int sh_vou_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+		       unsigned int *nbuffers, unsigned int *nplanes,
+		       unsigned int sizes[], void *alloc_ctxs[])
 {
-	struct video_device *vdev = vq->priv_data;
-	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
-
-	*size = vou_fmt[vou_dev->pix_idx].bpp * vou_dev->pix.width *
-		vou_dev->pix.height / 8;
-
-	if (*count < 2)
-		*count = 2;
-
-	/* Taking into account maximum frame size, *count will stay >= 2 */
-	if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
-		*count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): count=%d, size=%d\n", __func__,
-		*count, *size);
-
-	return 0;
-}
-
-/* Locking: caller holds fop_lock mutex */
-static int sh_vou_buf_prepare(struct videobuf_queue *vq,
-			      struct videobuf_buffer *vb,
-			      enum v4l2_field field)
-{
-	struct video_device *vdev = vq->priv_data;
-	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
 	struct v4l2_pix_format *pix = &vou_dev->pix;
 	int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
-	int ret;
 
 	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
 
-	if (vb->width	!= pix->width ||
-	    vb->height	!= pix->height ||
-	    vb->field	!= pix->field) {
-		vb->width	= pix->width;
-		vb->height	= pix->height;
-		vb->field	= field;
-		if (vb->state != VIDEOBUF_NEEDS_INIT)
-			free_buffer(vq, vb);
-	}
+	if (fmt && fmt->fmt.pix.sizeimage < pix->height * bytes_per_line)
+		return -EINVAL;
+	*nplanes = 1;
+	sizes[0] = fmt ? fmt->fmt.pix.sizeimage : pix->height * bytes_per_line;
+	alloc_ctxs[0] = vou_dev->alloc_ctx;
+	return 0;
+}
 
-	vb->size = vb->height * bytes_per_line;
-	if (vb->baddr && vb->bsize < vb->size) {
+static int sh_vou_buf_prepare(struct vb2_buffer *vb)
+{
+	struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct v4l2_pix_format *pix = &vou_dev->pix;
+	unsigned bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+	unsigned size = pix->height * bytes_per_line;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+	if (vb2_plane_size(vb, 0) < size) {
 		/* User buffer too small */
-		dev_warn(vq->dev, "User buffer too small: [%zu] @ %lx\n",
-			 vb->bsize, vb->baddr);
+		dev_warn(vou_dev->v4l2_dev.dev, "buffer too small (%lu < %u)\n",
+			 vb2_plane_size(vb, 0), size);
 		return -EINVAL;
 	}
 
-	if (vb->state == VIDEOBUF_NEEDS_INIT) {
-		ret = videobuf_iolock(vq, vb, NULL);
-		if (ret < 0) {
-			dev_warn(vq->dev, "IOLOCK buf-type %d: %d\n",
-				 vb->memory, ret);
-			return ret;
-		}
-		vb->state = VIDEOBUF_PREPARED;
-	}
-
-	dev_dbg(vou_dev->v4l2_dev.dev,
-		"%s(): fmt #%d, %u bytes per line, phys %pad, type %d, state %d\n",
-		__func__, vou_dev->pix_idx, bytes_per_line,
-		({ dma_addr_t addr = videobuf_to_dma_contig(vb); &addr; }),
-		vb->memory, vb->state);
-
+	vb2_set_plane_payload(vb, 0, size);
 	return 0;
 }
 
 /* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
-static void sh_vou_buf_queue(struct videobuf_queue *vq,
-			     struct videobuf_buffer *vb)
+static void sh_vou_buf_queue(struct vb2_buffer *vb)
 {
-	struct video_device *vdev = vq->priv_data;
-	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	vb->state = VIDEOBUF_QUEUED;
-	list_add_tail(&vb->queue, &vou_dev->queue);
-
-	if (vou_dev->status == SH_VOU_RUNNING) {
-		return;
-	} else if (!vou_dev->active) {
-		vou_dev->active = vb;
-		/* Start from side A: we use mirror addresses, so, set B */
-		sh_vou_reg_a_write(vou_dev, VOURPR, 1);
-		dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
-			__func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
-		sh_vou_schedule_next(vou_dev, vb);
-		/* Only activate VOU after the second buffer */
-	} else if (vou_dev->active->queue.next == &vb->queue) {
-		/* Second buffer - initialise register side B */
-		sh_vou_reg_a_write(vou_dev, VOURPR, 0);
-		sh_vou_stream_start(vou_dev, vb);
-
-		/* Register side switching with frame VSYNC */
-		sh_vou_reg_a_write(vou_dev, VOURCR, 5);
-		dev_dbg(vou_dev->v4l2_dev.dev, "%s: second buffer status 0x%x\n",
-			__func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
-
-		/* Enable End-of-Frame (VSYNC) interrupts */
-		sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
-		/* Two buffers on the queue - activate the hardware */
-
-		vou_dev->status = SH_VOU_RUNNING;
-		sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
-	}
-}
-
-static void sh_vou_buf_release(struct videobuf_queue *vq,
-			       struct videobuf_buffer *vb)
-{
-	struct video_device *vdev = vq->priv_data;
-	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vb);
 	unsigned long flags;
 
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
 	spin_lock_irqsave(&vou_dev->lock, flags);
-
-	if (vou_dev->active == vb) {
-		/* disable output */
-		sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
-		/* ...but the current frame will complete */
-		sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
-		vou_dev->active = NULL;
-	}
-
-	if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED)) {
-		vb->state = VIDEOBUF_ERROR;
-		list_del(&vb->queue);
-	}
-
+	list_add_tail(&shbuf->list, &vou_dev->buf_list);
 	spin_unlock_irqrestore(&vou_dev->lock, flags);
-
-	free_buffer(vq, vb);
 }
 
-static struct videobuf_queue_ops sh_vou_video_qops = {
-	.buf_setup	= sh_vou_buf_setup,
-	.buf_prepare	= sh_vou_buf_prepare,
-	.buf_queue	= sh_vou_buf_queue,
-	.buf_release	= sh_vou_buf_release,
+static int sh_vou_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+	struct sh_vou_buffer *buf, *node;
+	int ret;
+
+	vou_dev->sequence = 0;
+	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+					 video, s_stream, 1);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+			list_del(&buf->list);
+		}
+		vou_dev->active = NULL;
+		return ret;
+	}
+
+	buf = list_entry(vou_dev->buf_list.next, struct sh_vou_buffer, list);
+
+	vou_dev->active = buf;
+
+	/* Start from side A: we use mirror addresses, so, set B */
+	sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
+		__func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
+	sh_vou_schedule_next(vou_dev, &buf->vb);
+
+	buf = list_entry(buf->list.next, struct sh_vou_buffer, list);
+
+	/* Second buffer - initialise register side B */
+	sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+	sh_vou_schedule_next(vou_dev, &buf->vb);
+
+	/* Register side switching with frame VSYNC */
+	sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+
+	sh_vou_stream_config(vou_dev);
+	/* Enable End-of-Frame (VSYNC) interrupts */
+	sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+
+	/* Two buffers on the queue - activate the hardware */
+	vou_dev->status = SH_VOU_RUNNING;
+	sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+	return 0;
+}
+
+static void sh_vou_stop_streaming(struct vb2_queue *vq)
+{
+	struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+	struct sh_vou_buffer *buf, *node;
+	unsigned long flags;
+
+	v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+					 video, s_stream, 0);
+	/* disable output */
+	sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+	/* ...but the current frame will complete */
+	sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+	msleep(50);
+	spin_lock_irqsave(&vou_dev->lock, flags);
+	list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+		list_del(&buf->list);
+	}
+	vou_dev->active = NULL;
+	spin_unlock_irqrestore(&vou_dev->lock, flags);
+}
+
+static struct vb2_ops sh_vou_qops = {
+	.queue_setup		= sh_vou_queue_setup,
+	.buf_prepare		= sh_vou_buf_prepare,
+	.buf_queue		= sh_vou_buf_queue,
+	.start_streaming	= sh_vou_start_streaming,
+	.stop_streaming		= sh_vou_stop_streaming,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
 };
 
 /* Video IOCTLs */
@@ -396,7 +379,10 @@
 	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
 
 	strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
-	cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+	strlcpy(cap->driver, "sh-vou", sizeof(cap->driver));
+	strlcpy(cap->bus_info, "platform:sh-vou", sizeof(cap->bus_info));
+	cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE |
+			   V4L2_CAP_STREAMING;
 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 	return 0;
 }
@@ -540,8 +526,10 @@
 		img_height_max = 576;
 
 	/* Image width must be a multiple of 4 */
-	v4l_bound_align_image(&geo->in_width, 0, VOU_MAX_IMAGE_WIDTH, 2,
-			      &geo->in_height, 0, img_height_max, 1, 0);
+	v4l_bound_align_image(&geo->in_width,
+			      VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+			      &geo->in_height,
+			      VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
 
 	/* Select scales to come as close as possible to the output image */
 	for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
@@ -671,34 +659,19 @@
 		 vou_scale_v_num[idx_v], vou_scale_v_den[idx_v], best);
 }
 
-static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
-				struct v4l2_format *fmt)
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *fmt)
 {
 	struct sh_vou_device *vou_dev = video_drvdata(file);
 	struct v4l2_pix_format *pix = &fmt->fmt.pix;
 	unsigned int img_height_max;
 	int pix_idx;
-	struct sh_vou_geometry geo;
-	struct v4l2_subdev_format format = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-		/* Revisit: is this the correct code? */
-		.format.code = MEDIA_BUS_FMT_YUYV8_2X8,
-		.format.field = V4L2_FIELD_INTERLACED,
-		.format.colorspace = V4L2_COLORSPACE_SMPTE170M,
-	};
-	struct v4l2_mbus_framefmt *mbfmt = &format.format;
-	int ret;
 
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
-		vou_dev->rect.width, vou_dev->rect.height,
-		pix->width, pix->height);
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
 
-	if (pix->field == V4L2_FIELD_ANY)
-		pix->field = V4L2_FIELD_NONE;
-
-	if (fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
-	    pix->field != V4L2_FIELD_NONE)
-		return -EINVAL;
+	pix->field = V4L2_FIELD_INTERLACED;
+	pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+	pix->ycbcr_enc = pix->quantization = 0;
 
 	for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
 		if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
@@ -712,9 +685,38 @@
 	else
 		img_height_max = 576;
 
-	/* Image width must be a multiple of 4 */
-	v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 2,
-			      &pix->height, 0, img_height_max, 1, 0);
+	v4l_bound_align_image(&pix->width,
+			      VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+			      &pix->height,
+			      VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+	pix->bytesperline = pix->width * vou_fmt[pix_idx].bpl;
+	pix->sizeimage = pix->height * ((pix->width * vou_fmt[pix_idx].bpp) >> 3);
+
+	return 0;
+}
+
+static int sh_vou_set_fmt_vid_out(struct sh_vou_device *vou_dev,
+				struct v4l2_pix_format *pix)
+{
+	unsigned int img_height_max;
+	struct sh_vou_geometry geo;
+	struct v4l2_subdev_format format = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		/* Revisit: is this the correct code? */
+		.format.code = MEDIA_BUS_FMT_YUYV8_2X8,
+		.format.field = V4L2_FIELD_INTERLACED,
+		.format.colorspace = V4L2_COLORSPACE_SMPTE170M,
+	};
+	struct v4l2_mbus_framefmt *mbfmt = &format.format;
+	int pix_idx;
+	int ret;
+
+	if (vb2_is_busy(&vou_dev->queue))
+		return -EBUSY;
+
+	for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+		if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+			break;
 
 	geo.in_width = pix->width;
 	geo.in_height = pix->height;
@@ -733,6 +735,11 @@
 	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
 		geo.output.width, geo.output.height, mbfmt->width, mbfmt->height);
 
+	if (vou_dev->std & V4L2_STD_525_60)
+		img_height_max = 480;
+	else
+		img_height_max = 576;
+
 	/* Sanity checks */
 	if ((unsigned)mbfmt->width > VOU_MAX_IMAGE_WIDTH ||
 	    (unsigned)mbfmt->height > img_height_max ||
@@ -765,111 +772,41 @@
 	return 0;
 }
 
-static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
-				  struct v4l2_format *fmt)
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *fmt)
 {
 	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct v4l2_pix_format *pix = &fmt->fmt.pix;
-	int i;
+	int ret = sh_vou_try_fmt_vid_out(file, priv, fmt);
 
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-	pix->field = V4L2_FIELD_NONE;
-
-	v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
-			      &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
-
-	for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
-		if (vou_fmt[i].pfmt == pix->pixelformat)
-			return 0;
-
-	pix->pixelformat = vou_fmt[0].pfmt;
-
-	return 0;
-}
-
-static int sh_vou_reqbufs(struct file *file, void *priv,
-			  struct v4l2_requestbuffers *req)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = priv;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
-		return -EINVAL;
-
-	return videobuf_reqbufs(&vou_file->vbq, req);
-}
-
-static int sh_vou_querybuf(struct file *file, void *priv,
-			   struct v4l2_buffer *b)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = priv;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	return videobuf_querybuf(&vou_file->vbq, b);
-}
-
-static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = priv;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	return videobuf_qbuf(&vou_file->vbq, b);
-}
-
-static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = priv;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
-}
-
-static int sh_vou_streamon(struct file *file, void *priv,
-			   enum v4l2_buf_type buftype)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = priv;
-	int ret;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
-					 video, s_stream, 1);
-	if (ret < 0 && ret != -ENOIOCTLCMD)
+	if (ret)
 		return ret;
-
-	/* This calls our .buf_queue() (== sh_vou_buf_queue) */
-	return videobuf_streamon(&vou_file->vbq);
+	return sh_vou_set_fmt_vid_out(vou_dev, &fmt->fmt.pix);
 }
 
-static int sh_vou_streamoff(struct file *file, void *priv,
-			    enum v4l2_buf_type buftype)
+static int sh_vou_enum_output(struct file *file, void *fh,
+			      struct v4l2_output *a)
 {
 	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = priv;
 
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	/*
-	 * This calls buf_release from host driver's videobuf_queue_ops for all
-	 * remaining buffers. When the last buffer is freed, stop streaming
-	 */
-	videobuf_streamoff(&vou_file->vbq);
-	v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0);
-
+	if (a->index)
+		return -EINVAL;
+	strlcpy(a->name, "Video Out", sizeof(a->name));
+	a->type = V4L2_OUTPUT_TYPE_ANALOG;
+	a->std = vou_dev->vdev.tvnorms;
 	return 0;
 }
 
+static int sh_vou_g_output(struct file *file, void *fh, unsigned int *i)
+{
+	*i = 0;
+	return 0;
+}
+
+static int sh_vou_s_output(struct file *file, void *fh, unsigned int i)
+{
+	return i ? -EINVAL : 0;
+}
+
 static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
 {
 	switch (bus_fmt) {
@@ -892,8 +829,11 @@
 
 	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, std_id);
 
-	if (std_id & ~vou_dev->vdev.tvnorms)
-		return -EINVAL;
+	if (std_id == vou_dev->std)
+		return 0;
+
+	if (vb2_is_busy(&vou_dev->queue))
+		return -EBUSY;
 
 	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
 					 s_std_output, std_id);
@@ -901,13 +841,25 @@
 	if (ret < 0 && ret != -ENOIOCTLCMD)
 		return ret;
 
-	if (std_id & V4L2_STD_525_60)
+	vou_dev->rect.top = vou_dev->rect.left = 0;
+	vou_dev->rect.width = VOU_MAX_IMAGE_WIDTH;
+	if (std_id & V4L2_STD_525_60) {
 		sh_vou_reg_ab_set(vou_dev, VOUCR,
 			sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
-	else
+		vou_dev->rect.height = 480;
+	} else {
 		sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+		vou_dev->rect.height = 576;
+	}
 
+	vou_dev->pix.width = vou_dev->rect.width;
+	vou_dev->pix.height = vou_dev->rect.height;
+	vou_dev->pix.bytesperline =
+		vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpl;
+	vou_dev->pix.sizeimage = vou_dev->pix.height *
+		((vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpp) >> 3);
 	vou_dev->std = std_id;
+	sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
 
 	return 0;
 }
@@ -923,24 +875,66 @@
 	return 0;
 }
 
-static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+static int sh_vou_log_status(struct file *file, void *priv)
 {
 	struct sh_vou_device *vou_dev = video_drvdata(file);
 
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+	pr_info("VOUER:   0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUER));
+	pr_info("VOUCR:   0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUCR));
+	pr_info("VOUSTR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSTR));
+	pr_info("VOUVCR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVCR));
+	pr_info("VOUISR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUISR));
+	pr_info("VOUBCR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUBCR));
+	pr_info("VOUDPR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDPR));
+	pr_info("VOUDSR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDSR));
+	pr_info("VOUVPR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVPR));
+	pr_info("VOUIR:   0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUIR));
+	pr_info("VOUSRR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSRR));
+	pr_info("VOUMSR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUMSR));
+	pr_info("VOUHIR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUHIR));
+	pr_info("VOUDFR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDFR));
+	pr_info("VOUAD1R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD1R));
+	pr_info("VOUAD2R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD2R));
+	pr_info("VOUAIR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAIR));
+	pr_info("VOUSWR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSWR));
+	pr_info("VOURCR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURCR));
+	pr_info("VOURPR:  0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURPR));
+	return 0;
+}
 
-	a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-	a->c = vou_dev->rect;
+static int sh_vou_g_selection(struct file *file, void *fh,
+			      struct v4l2_selection *sel)
+{
+	struct sh_vou_device *vou_dev = video_drvdata(file);
 
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_COMPOSE:
+		sel->r = vou_dev->rect;
+		break;
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = VOU_MAX_IMAGE_WIDTH;
+		if (vou_dev->std & V4L2_STD_525_60)
+			sel->r.height = 480;
+		else
+			sel->r.height = 576;
+		break;
+	default:
+		return -EINVAL;
+	}
 	return 0;
 }
 
 /* Assume a dull encoder, do all the work ourselves. */
-static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
+static int sh_vou_s_selection(struct file *file, void *fh,
+			      struct v4l2_selection *sel)
 {
-	struct v4l2_crop a_writable = *a;
+	struct v4l2_rect *rect = &sel->r;
 	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct v4l2_rect *rect = &a_writable.c;
 	struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
 	struct v4l2_pix_format *pix = &vou_dev->pix;
 	struct sh_vou_geometry geo;
@@ -954,19 +948,22 @@
 	unsigned int img_height_max;
 	int ret;
 
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u@%u:%u\n", __func__,
-		rect->width, rect->height, rect->left, rect->top);
-
-	if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+	    sel->target != V4L2_SEL_TGT_COMPOSE)
 		return -EINVAL;
 
+	if (vb2_is_busy(&vou_dev->queue))
+		return -EBUSY;
+
 	if (vou_dev->std & V4L2_STD_525_60)
 		img_height_max = 480;
 	else
 		img_height_max = 576;
 
-	v4l_bound_align_image(&rect->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
-			      &rect->height, 0, img_height_max, 1, 0);
+	v4l_bound_align_image(&rect->width,
+			      VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 1,
+			      &rect->height,
+			      VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
 
 	if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
 		rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
@@ -1021,41 +1018,11 @@
 	return 0;
 }
 
-/*
- * Total field: NTSC 858 x 2 * 262/263, PAL 864 x 2 * 312/313, default rectangle
- * is the initial register values, height takes the interlaced format into
- * account. The actual image can only go up to 720 x 2 * 240, So, VOUVPR can
- * actually only meaningfully contain values <= 720 and <= 240 respectively, and
- * not <= 864 and <= 312.
- */
-static int sh_vou_cropcap(struct file *file, void *priv,
-			  struct v4l2_cropcap *a)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	a->type				= V4L2_BUF_TYPE_VIDEO_OUTPUT;
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= VOU_MAX_IMAGE_WIDTH;
-	a->bounds.height		= VOU_MAX_IMAGE_HEIGHT;
-	/* Default = max, set VOUDPR = 0, which is not hardware default */
-	a->defrect.left			= 0;
-	a->defrect.top			= 0;
-	a->defrect.width		= VOU_MAX_IMAGE_WIDTH;
-	a->defrect.height		= VOU_MAX_IMAGE_HEIGHT;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
-}
-
 static irqreturn_t sh_vou_isr(int irq, void *dev_id)
 {
 	struct sh_vou_device *vou_dev = dev_id;
 	static unsigned long j;
-	struct videobuf_buffer *vb;
+	struct sh_vou_buffer *vb;
 	static int cnt;
 	u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
 	u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
@@ -1068,7 +1035,7 @@
 	}
 
 	spin_lock(&vou_dev->lock);
-	if (!vou_dev->active || list_empty(&vou_dev->queue)) {
+	if (!vou_dev->active || list_empty(&vou_dev->buf_list)) {
 		if (printk_timed_ratelimit(&j, 500))
 			dev_warn(vou_dev->v4l2_dev.dev,
 				 "IRQ without active buffer: %x!\n", irq_status);
@@ -1090,33 +1057,30 @@
 	sh_vou_reg_a_write(vou_dev, VOUIR, masked);
 
 	vb = vou_dev->active;
-	list_del(&vb->queue);
-
-	vb->state = VIDEOBUF_DONE;
-	v4l2_get_timestamp(&vb->ts);
-	vb->field_count++;
-	wake_up(&vb->done);
-
-	if (list_empty(&vou_dev->queue)) {
-		/* Stop VOU */
-		dev_dbg(vou_dev->v4l2_dev.dev, "%s: queue empty after %d\n",
-			__func__, cnt);
-		sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
-		vou_dev->active = NULL;
-		vou_dev->status = SH_VOU_INITIALISING;
-		/* Disable End-of-Frame (VSYNC) interrupts */
-		sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+	if (list_is_singular(&vb->list)) {
+		/* Keep cycling while no next buffer is available */
+		sh_vou_schedule_next(vou_dev, &vb->vb);
 		spin_unlock(&vou_dev->lock);
 		return IRQ_HANDLED;
 	}
 
-	vou_dev->active = list_entry(vou_dev->queue.next,
-				     struct videobuf_buffer, queue);
+	list_del(&vb->list);
 
-	if (vou_dev->active->queue.next != &vou_dev->queue) {
-		struct videobuf_buffer *new = list_entry(vou_dev->active->queue.next,
-						struct videobuf_buffer, queue);
-		sh_vou_schedule_next(vou_dev, new);
+	v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp);
+	vb->vb.v4l2_buf.sequence = vou_dev->sequence++;
+	vb->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+	vb2_buffer_done(&vb->vb, VB2_BUF_STATE_DONE);
+
+	vou_dev->active = list_entry(vou_dev->buf_list.next,
+				     struct sh_vou_buffer, list);
+
+	if (list_is_singular(&vou_dev->buf_list)) {
+		/* Keep cycling while no next buffer is available */
+		sh_vou_schedule_next(vou_dev, &vou_dev->active->vb);
+	} else {
+		struct sh_vou_buffer *new = list_entry(vou_dev->active->list.next,
+						struct sh_vou_buffer, list);
+		sh_vou_schedule_next(vou_dev, &new->vb);
 	}
 
 	spin_unlock(&vou_dev->lock);
@@ -1156,6 +1120,8 @@
 	/* Default - fixed HSYNC length, can be made configurable is required */
 	sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
 
+	sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
+
 	return 0;
 }
 
@@ -1163,96 +1129,47 @@
 static int sh_vou_open(struct file *file)
 {
 	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
-					       GFP_KERNEL);
+	int err;
 
-	if (!vou_file)
-		return -ENOMEM;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	if (mutex_lock_interruptible(&vou_dev->fop_lock)) {
-		kfree(vou_file);
+	if (mutex_lock_interruptible(&vou_dev->fop_lock))
 		return -ERESTARTSYS;
-	}
-	if (atomic_inc_return(&vou_dev->use_count) == 1) {
-		int ret;
+
+	err = v4l2_fh_open(file);
+	if (err)
+		goto done_open;
+	if (v4l2_fh_is_singular_file(file) &&
+	    vou_dev->status == SH_VOU_INITIALISING) {
 		/* First open */
-		vou_dev->status = SH_VOU_INITIALISING;
 		pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
-		ret = sh_vou_hw_init(vou_dev);
-		if (ret < 0) {
-			atomic_dec(&vou_dev->use_count);
+		err = sh_vou_hw_init(vou_dev);
+		if (err < 0) {
 			pm_runtime_put(vou_dev->v4l2_dev.dev);
+			v4l2_fh_release(file);
+		} else {
 			vou_dev->status = SH_VOU_IDLE;
-			mutex_unlock(&vou_dev->fop_lock);
-			kfree(vou_file);
-			return ret;
 		}
 	}
-
-	videobuf_queue_dma_contig_init(&vou_file->vbq, &sh_vou_video_qops,
-				       vou_dev->v4l2_dev.dev, &vou_dev->lock,
-				       V4L2_BUF_TYPE_VIDEO_OUTPUT,
-				       V4L2_FIELD_NONE,
-				       sizeof(struct videobuf_buffer),
-				       &vou_dev->vdev, &vou_dev->fop_lock);
+done_open:
 	mutex_unlock(&vou_dev->fop_lock);
-
-	file->private_data = vou_file;
-
-	return 0;
+	return err;
 }
 
 static int sh_vou_release(struct file *file)
 {
 	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = file->private_data;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	if (!atomic_dec_return(&vou_dev->use_count)) {
-		mutex_lock(&vou_dev->fop_lock);
-		/* Last close */
-		vou_dev->status = SH_VOU_IDLE;
-		sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
-		pm_runtime_put(vou_dev->v4l2_dev.dev);
-		mutex_unlock(&vou_dev->fop_lock);
-	}
-
-	file->private_data = NULL;
-	kfree(vou_file);
-
-	return 0;
-}
-
-static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = file->private_data;
-	int ret;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
-	if (mutex_lock_interruptible(&vou_dev->fop_lock))
-		return -ERESTARTSYS;
-	ret = videobuf_mmap_mapper(&vou_file->vbq, vma);
-	mutex_unlock(&vou_dev->fop_lock);
-	return ret;
-}
-
-static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
-{
-	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct sh_vou_file *vou_file = file->private_data;
-	unsigned int res;
-
-	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+	bool is_last;
 
 	mutex_lock(&vou_dev->fop_lock);
-	res = videobuf_poll_stream(file, &vou_file->vbq, wait);
+	is_last = v4l2_fh_is_singular_file(file);
+	_vb2_fop_release(file, NULL);
+	if (is_last) {
+		/* Last close */
+		vou_dev->status = SH_VOU_INITIALISING;
+		sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
+		pm_runtime_put(vou_dev->v4l2_dev.dev);
+	}
 	mutex_unlock(&vou_dev->fop_lock);
-	return res;
+	return 0;
 }
 
 /* sh_vou display ioctl operations */
@@ -1262,17 +1179,23 @@
 	.vidioc_g_fmt_vid_out		= sh_vou_g_fmt_vid_out,
 	.vidioc_s_fmt_vid_out		= sh_vou_s_fmt_vid_out,
 	.vidioc_try_fmt_vid_out		= sh_vou_try_fmt_vid_out,
-	.vidioc_reqbufs			= sh_vou_reqbufs,
-	.vidioc_querybuf		= sh_vou_querybuf,
-	.vidioc_qbuf			= sh_vou_qbuf,
-	.vidioc_dqbuf			= sh_vou_dqbuf,
-	.vidioc_streamon		= sh_vou_streamon,
-	.vidioc_streamoff		= sh_vou_streamoff,
+	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
+	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
+	.vidioc_querybuf		= vb2_ioctl_querybuf,
+	.vidioc_qbuf			= vb2_ioctl_qbuf,
+	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
+	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
+	.vidioc_streamon		= vb2_ioctl_streamon,
+	.vidioc_streamoff		= vb2_ioctl_streamoff,
+	.vidioc_expbuf			= vb2_ioctl_expbuf,
+	.vidioc_g_output		= sh_vou_g_output,
+	.vidioc_s_output		= sh_vou_s_output,
+	.vidioc_enum_output		= sh_vou_enum_output,
 	.vidioc_s_std			= sh_vou_s_std,
 	.vidioc_g_std			= sh_vou_g_std,
-	.vidioc_cropcap			= sh_vou_cropcap,
-	.vidioc_g_crop			= sh_vou_g_crop,
-	.vidioc_s_crop			= sh_vou_s_crop,
+	.vidioc_g_selection		= sh_vou_g_selection,
+	.vidioc_s_selection		= sh_vou_s_selection,
+	.vidioc_log_status		= sh_vou_log_status,
 };
 
 static const struct v4l2_file_operations sh_vou_fops = {
@@ -1280,8 +1203,9 @@
 	.open		= sh_vou_open,
 	.release	= sh_vou_release,
 	.unlocked_ioctl	= video_ioctl2,
-	.mmap		= sh_vou_mmap,
-	.poll		= sh_vou_poll,
+	.mmap		= vb2_fop_mmap,
+	.poll		= vb2_fop_poll,
+	.write		= vb2_fop_write,
 };
 
 static const struct video_device sh_vou_video_template = {
@@ -1300,8 +1224,9 @@
 	struct i2c_adapter *i2c_adap;
 	struct video_device *vdev;
 	struct sh_vou_device *vou_dev;
-	struct resource *reg_res, *region;
+	struct resource *reg_res;
 	struct v4l2_subdev *subdev;
+	struct vb2_queue *q;
 	int irq, ret;
 
 	reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1312,16 +1237,16 @@
 		return -ENODEV;
 	}
 
-	vou_dev = kzalloc(sizeof(*vou_dev), GFP_KERNEL);
+	vou_dev = devm_kzalloc(&pdev->dev, sizeof(*vou_dev), GFP_KERNEL);
 	if (!vou_dev)
 		return -ENOMEM;
 
-	INIT_LIST_HEAD(&vou_dev->queue);
+	INIT_LIST_HEAD(&vou_dev->buf_list);
 	spin_lock_init(&vou_dev->lock);
 	mutex_init(&vou_dev->fop_lock);
-	atomic_set(&vou_dev->use_count, 0);
 	vou_dev->pdata = vou_pdata;
-	vou_dev->status = SH_VOU_IDLE;
+	vou_dev->status = SH_VOU_INITIALISING;
+	vou_dev->pix_idx = 1;
 
 	rect = &vou_dev->rect;
 	pix = &vou_dev->pix;
@@ -1334,34 +1259,24 @@
 	rect->height		= 480;
 	pix->width		= VOU_MAX_IMAGE_WIDTH;
 	pix->height		= 480;
-	pix->pixelformat	= V4L2_PIX_FMT_YVYU;
-	pix->field		= V4L2_FIELD_NONE;
-	pix->bytesperline	= VOU_MAX_IMAGE_WIDTH * 2;
+	pix->pixelformat	= V4L2_PIX_FMT_NV16;
+	pix->field		= V4L2_FIELD_INTERLACED;
+	pix->bytesperline	= VOU_MAX_IMAGE_WIDTH;
 	pix->sizeimage		= VOU_MAX_IMAGE_WIDTH * 2 * 480;
 	pix->colorspace		= V4L2_COLORSPACE_SMPTE170M;
 
-	region = request_mem_region(reg_res->start, resource_size(reg_res),
-				    pdev->name);
-	if (!region) {
-		dev_err(&pdev->dev, "VOU region already claimed\n");
-		ret = -EBUSY;
-		goto ereqmemreg;
-	}
+	vou_dev->base = devm_ioremap_resource(&pdev->dev, reg_res);
+	if (IS_ERR(vou_dev->base))
+		return PTR_ERR(vou_dev->base);
 
-	vou_dev->base = ioremap(reg_res->start, resource_size(reg_res));
-	if (!vou_dev->base) {
-		ret = -ENOMEM;
-		goto emap;
-	}
-
-	ret = request_irq(irq, sh_vou_isr, 0, "vou", vou_dev);
+	ret = devm_request_irq(&pdev->dev, irq, sh_vou_isr, 0, "vou", vou_dev);
 	if (ret < 0)
-		goto ereqirq;
+		return ret;
 
 	ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Error registering v4l2 device\n");
-		goto ev4l2devreg;
+		return ret;
 	}
 
 	vdev = &vou_dev->vdev;
@@ -1374,6 +1289,30 @@
 
 	video_set_drvdata(vdev, vou_dev);
 
+	/* Initialize the vb2 queue */
+	q = &vou_dev->queue;
+	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+	q->drv_priv = vou_dev;
+	q->buf_struct_size = sizeof(struct sh_vou_buffer);
+	q->ops = &sh_vou_qops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	q->min_buffers_needed = 2;
+	q->lock = &vou_dev->fop_lock;
+	ret = vb2_queue_init(q);
+	if (ret)
+		goto einitctx;
+
+	vou_dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(vou_dev->alloc_ctx)) {
+		dev_err(&pdev->dev, "Can't allocate buffer context");
+		ret = PTR_ERR(vou_dev->alloc_ctx);
+		goto einitctx;
+	}
+	vdev->queue = q;
+	INIT_LIST_HEAD(&vou_dev->buf_list);
+
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_resume(&pdev->dev);
 
@@ -1405,41 +1344,27 @@
 ereset:
 	i2c_put_adapter(i2c_adap);
 ei2cgadap:
+	vb2_dma_contig_cleanup_ctx(vou_dev->alloc_ctx);
+einitctx:
 	pm_runtime_disable(&pdev->dev);
 	v4l2_device_unregister(&vou_dev->v4l2_dev);
-ev4l2devreg:
-	free_irq(irq, vou_dev);
-ereqirq:
-	iounmap(vou_dev->base);
-emap:
-	release_mem_region(reg_res->start, resource_size(reg_res));
-ereqmemreg:
-	kfree(vou_dev);
 	return ret;
 }
 
 static int sh_vou_remove(struct platform_device *pdev)
 {
-	int irq = platform_get_irq(pdev, 0);
 	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
 	struct sh_vou_device *vou_dev = container_of(v4l2_dev,
 						struct sh_vou_device, v4l2_dev);
 	struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
 					    struct v4l2_subdev, list);
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct resource *reg_res;
 
-	if (irq > 0)
-		free_irq(irq, vou_dev);
 	pm_runtime_disable(&pdev->dev);
 	video_unregister_device(&vou_dev->vdev);
 	i2c_put_adapter(client->adapter);
+	vb2_dma_contig_cleanup_ctx(vou_dev->alloc_ctx);
 	v4l2_device_unregister(&vou_dev->v4l2_dev);
-	iounmap(vou_dev->base);
-	reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (reg_res)
-		release_mem_region(reg_res->start, resource_size(reg_res));
-	kfree(vou_dev);
 	return 0;
 }
 
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 2879026..9070172 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -20,6 +20,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 #include <media/atmel-isi.h>
@@ -34,7 +35,6 @@
 #define VID_LIMIT_BYTES			(16 * 1024 * 1024)
 #define MIN_FRAME_RATE			15
 #define FRAME_INTERVAL_MILLI_SEC	(1000 / MIN_FRAME_RATE)
-#define ISI_DEFAULT_MCLK_FREQ		25000000
 
 /* Frame buffer descriptor */
 struct fbd {
@@ -82,8 +82,6 @@
 	struct completion		complete;
 	/* ISI peripherial clock */
 	struct clk			*pclk;
-	/* ISI_MCK, feed to camera sensor to generate pixel clock */
-	struct clk			*mck;
 	unsigned int			irq;
 
 	struct isi_platform_data	pdata;
@@ -386,10 +384,13 @@
 	struct atmel_isi *isi = ici->priv;
 	int ret;
 
+	pm_runtime_get_sync(ici->v4l2_dev.dev);
+
 	/* Reset ISI */
 	ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
 	if (ret < 0) {
 		dev_err(icd->parent, "Reset ISI timed out\n");
+		pm_runtime_put(ici->v4l2_dev.dev);
 		return ret;
 	}
 	/* Disable all interrupts */
@@ -431,11 +432,9 @@
 			time_before(jiffies, timeout))
 		msleep(1);
 
-	if (time_after(jiffies, timeout)) {
+	if (time_after(jiffies, timeout))
 		dev_err(icd->parent,
 			"Timeout waiting for finishing codec request\n");
-		return;
-	}
 
 	/* Disable interrupts */
 	isi_writel(isi, ISI_INTDIS,
@@ -445,6 +444,8 @@
 	ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
 	if (ret < 0)
 		dev_err(icd->parent, "Disable ISI timed out\n");
+
+	pm_runtime_put(ici->v4l2_dev.dev);
 }
 
 static struct vb2_ops isi_video_qops = {
@@ -516,7 +517,13 @@
 	if (mf->code != xlate->code)
 		return -EINVAL;
 
+	/* Enable PM and peripheral clock before operate isi registers */
+	pm_runtime_get_sync(ici->v4l2_dev.dev);
+
 	ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
+
+	pm_runtime_put(ici->v4l2_dev.dev);
+
 	if (ret < 0)
 		return ret;
 
@@ -730,37 +737,6 @@
 		 icd->devnum);
 }
 
-/* Called with .host_lock held */
-static int isi_camera_clock_start(struct soc_camera_host *ici)
-{
-	struct atmel_isi *isi = ici->priv;
-	int ret;
-
-	ret = clk_prepare_enable(isi->pclk);
-	if (ret)
-		return ret;
-
-	if (!IS_ERR(isi->mck)) {
-		ret = clk_prepare_enable(isi->mck);
-		if (ret) {
-			clk_disable_unprepare(isi->pclk);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-/* Called with .host_lock held */
-static void isi_camera_clock_stop(struct soc_camera_host *ici)
-{
-	struct atmel_isi *isi = ici->priv;
-
-	if (!IS_ERR(isi->mck))
-		clk_disable_unprepare(isi->mck);
-	clk_disable_unprepare(isi->pclk);
-}
-
 static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
 {
 	struct soc_camera_device *icd = file->private_data;
@@ -855,9 +831,14 @@
 
 	cfg1 |= ISI_CFG1_THMASK_BEATS_16;
 
+	/* Enable PM and peripheral clock before operate isi registers */
+	pm_runtime_get_sync(ici->v4l2_dev.dev);
+
 	isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
 	isi_writel(isi, ISI_CFG1, cfg1);
 
+	pm_runtime_put(ici->v4l2_dev.dev);
+
 	return 0;
 }
 
@@ -865,8 +846,6 @@
 	.owner		= THIS_MODULE,
 	.add		= isi_camera_add_device,
 	.remove		= isi_camera_remove_device,
-	.clock_start	= isi_camera_clock_start,
-	.clock_stop	= isi_camera_clock_stop,
 	.set_fmt	= isi_camera_set_fmt,
 	.try_fmt	= isi_camera_try_fmt,
 	.get_formats	= isi_camera_get_formats,
@@ -889,6 +868,7 @@
 			sizeof(struct fbd) * MAX_BUFFER_NUM,
 			isi->p_fb_descriptors,
 			isi->fb_descriptors_phys);
+	pm_runtime_disable(&pdev->dev);
 
 	return 0;
 }
@@ -902,7 +882,6 @@
 
 	/* Default settings for ISI */
 	isi->pdata.full_mode = 1;
-	isi->pdata.mck_hz = ISI_DEFAULT_MCLK_FREQ;
 	isi->pdata.frate = ISI_CFG1_FRATE_CAPTURE_ALL;
 
 	np = of_graph_get_next_endpoint(np, NULL);
@@ -978,21 +957,6 @@
 	INIT_LIST_HEAD(&isi->video_buffer_list);
 	INIT_LIST_HEAD(&isi->dma_desc_head);
 
-	/* ISI_MCK is the sensor master clock. It should be handled by the
-	 * sensor driver directly, as the ISI has no use for that clock. Make
-	 * the clock optional here while platforms transition to the correct
-	 * model.
-	 */
-	isi->mck = devm_clk_get(dev, "isi_mck");
-	if (!IS_ERR(isi->mck)) {
-		/* Set ISI_MCK's frequency, it should be faster than pixel
-		 * clock.
-		 */
-		ret = clk_set_rate(isi->mck, isi->pdata.mck_hz);
-		if (ret < 0)
-			return ret;
-	}
-
 	isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
 				sizeof(struct fbd) * MAX_BUFFER_NUM,
 				&isi->fb_descriptors_phys,
@@ -1027,8 +991,6 @@
 	if (isi->pdata.data_width_flags & ISI_DATAWIDTH_10)
 		isi->width_flags |= 1 << 9;
 
-	isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
-
 	irq = platform_get_irq(pdev, 0);
 	if (IS_ERR_VALUE(irq)) {
 		ret = irq;
@@ -1049,6 +1011,9 @@
 	soc_host->v4l2_dev.dev	= &pdev->dev;
 	soc_host->nr		= pdev->id;
 
+	pm_suspend_ignore_children(&pdev->dev, true);
+	pm_runtime_enable(&pdev->dev);
+
 	if (isi->pdata.asd_sizes) {
 		soc_host->asd = isi->pdata.asd;
 		soc_host->asd_sizes = isi->pdata.asd_sizes;
@@ -1062,6 +1027,7 @@
 	return 0;
 
 err_register_soc_camera_host:
+	pm_runtime_disable(&pdev->dev);
 err_req_irq:
 err_ioremap:
 	vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
@@ -1074,6 +1040,30 @@
 	return ret;
 }
 
+static int atmel_isi_runtime_suspend(struct device *dev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(dev);
+	struct atmel_isi *isi = container_of(soc_host,
+					struct atmel_isi, soc_host);
+
+	clk_disable_unprepare(isi->pclk);
+
+	return 0;
+}
+static int atmel_isi_runtime_resume(struct device *dev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(dev);
+	struct atmel_isi *isi = container_of(soc_host,
+					struct atmel_isi, soc_host);
+
+	return clk_prepare_enable(isi->pclk);
+}
+
+static const struct dev_pm_ops atmel_isi_dev_pm_ops = {
+	SET_RUNTIME_PM_OPS(atmel_isi_runtime_suspend,
+				atmel_isi_runtime_resume, NULL)
+};
+
 static const struct of_device_id atmel_isi_of_match[] = {
 	{ .compatible = "atmel,at91sam9g45-isi" },
 	{ }
@@ -1085,6 +1075,7 @@
 	.driver		= {
 		.name = "atmel_isi",
 		.of_match_table = of_match_ptr(atmel_isi_of_match),
+		.pm	= &atmel_isi_dev_pm_ops,
 	},
 };
 
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index db7700b..71dd71c 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -98,6 +98,7 @@
 #define VNMC_INF_YUV10_BT656	(2 << 16)
 #define VNMC_INF_YUV10_BT601	(3 << 16)
 #define VNMC_INF_YUV16		(5 << 16)
+#define VNMC_INF_RGB888		(6 << 16)
 #define VNMC_VUP		(1 << 10)
 #define VNMC_IM_ODD		(0 << 3)
 #define VNMC_IM_ODD_EVEN	(1 << 3)
@@ -540,6 +541,9 @@
 		unsigned int bytes_per_line;
 		int ret;
 
+		if (fmt->fmt.pix.sizeimage < icd->sizeimage)
+			return -EINVAL;
+
 		xlate = soc_camera_xlate_by_fourcc(icd,
 						   fmt->fmt.pix.pixelformat);
 		if (!xlate)
@@ -589,7 +593,7 @@
 	struct soc_camera_device *icd = priv->ici.icd;
 	struct rcar_vin_cam *cam = icd->host_priv;
 	u32 vnmc, dmr, interrupts;
-	bool progressive = false, output_is_yuv = false;
+	bool progressive = false, output_is_yuv = false, input_is_yuv = false;
 
 	switch (priv->field) {
 	case V4L2_FIELD_TOP:
@@ -623,16 +627,22 @@
 	case MEDIA_BUS_FMT_YUYV8_1X16:
 		/* BT.601/BT.1358 16bit YCbCr422 */
 		vnmc |= VNMC_INF_YUV16;
+		input_is_yuv = true;
 		break;
 	case MEDIA_BUS_FMT_YUYV8_2X8:
 		/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
 		vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
 			VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
+		input_is_yuv = true;
+		break;
+	case MEDIA_BUS_FMT_RGB888_1X24:
+		vnmc |= VNMC_INF_RGB888;
 		break;
 	case MEDIA_BUS_FMT_YUYV10_2X10:
 		/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
 		vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
 			VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
+		input_is_yuv = true;
 		break;
 	default:
 		break;
@@ -676,7 +686,7 @@
 	vnmc |= VNMC_VUP;
 
 	/* If input and output use the same colorspace, use bypass mode */
-	if (output_is_yuv)
+	if (input_is_yuv == output_is_yuv)
 		vnmc |= VNMC_BPS;
 
 	/* progressive or interlaced mode */
@@ -1423,6 +1433,7 @@
 	case MEDIA_BUS_FMT_YUYV8_1X16:
 	case MEDIA_BUS_FMT_YUYV8_2X8:
 	case MEDIA_BUS_FMT_YUYV10_2X10:
+	case MEDIA_BUS_FMT_RGB888_1X24:
 		if (cam->extra_fmt)
 			break;
 
@@ -1783,6 +1794,7 @@
 	strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
 	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s%d", DRV_NAME, ici->nr);
 
 	return 0;
 }
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index c5c6c4e..efdeea4 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1665,6 +1665,8 @@
 				  struct v4l2_capability *cap)
 {
 	strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
+	strlcpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver));
+	strlcpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info));
 	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
@@ -1773,6 +1775,7 @@
 		pcdev->max_height = pcdev->pdata->max_height;
 		pcdev->flags = pcdev->pdata->flags;
 	}
+	pcdev->field = V4L2_FIELD_NONE;
 
 	if (!pcdev->max_width) {
 		unsigned int v;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index d708df4..9087fed 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -309,11 +309,14 @@
 static int soc_camera_enum_input(struct file *file, void *priv,
 				 struct v4l2_input *inp)
 {
+	struct soc_camera_device *icd = file->private_data;
+
 	if (inp->index != 0)
 		return -EINVAL;
 
 	/* default is camera */
 	inp->type = V4L2_INPUT_TYPE_CAMERA;
+	inp->std = icd->vdev->tvnorms;
 	strcpy(inp->name, "Camera");
 
 	return 0;
@@ -381,9 +384,8 @@
 		ret = vb2_reqbufs(&icd->vb2_vidq, p);
 	}
 
-	if (!ret && !icd->streamer)
-		icd->streamer = file;
-
+	if (!ret)
+		icd->streamer = p->count ? file : NULL;
 	return ret;
 }
 
@@ -440,12 +442,19 @@
 {
 	struct soc_camera_device *icd = file->private_data;
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	int ret;
 
 	/* videobuf2 only */
 	if (ici->ops->init_videobuf)
-		return -EINVAL;
-	else
-		return vb2_create_bufs(&icd->vb2_vidq, create);
+		return -ENOTTY;
+
+	if (icd->streamer && icd->streamer != file)
+		return -EBUSY;
+
+	ret = vb2_create_bufs(&icd->vb2_vidq, create);
+	if (!ret)
+		icd->streamer = file;
+	return ret;
 }
 
 static int soc_camera_prepare_buf(struct file *file, void *priv,
@@ -467,14 +476,13 @@
 	struct soc_camera_device *icd = file->private_data;
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
 
-	if (icd->streamer != file)
-		return -EBUSY;
-
 	/* videobuf2 only */
 	if (ici->ops->init_videobuf)
-		return -EINVAL;
-	else
-		return vb2_expbuf(&icd->vb2_vidq, p);
+		return -ENOTTY;
+
+	if (icd->streamer && icd->streamer != file)
+		return -EBUSY;
+	return vb2_expbuf(&icd->vb2_vidq, p);
 }
 
 /* Always entered with .host_lock held */
@@ -780,20 +788,21 @@
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
 
 	mutex_lock(&ici->host_lock);
+	if (icd->streamer == file) {
+		if (ici->ops->init_videobuf2)
+			vb2_queue_release(&icd->vb2_vidq);
+		icd->streamer = NULL;
+	}
 	icd->use_count--;
 	if (!icd->use_count) {
 		pm_runtime_suspend(&icd->vdev->dev);
 		pm_runtime_disable(&icd->vdev->dev);
 
-		if (ici->ops->init_videobuf2)
-			vb2_queue_release(&icd->vb2_vidq);
 		__soc_camera_power_off(icd);
 
 		soc_camera_remove_device(icd);
 	}
 
-	if (icd->streamer == file)
-		icd->streamer = NULL;
 	mutex_unlock(&ici->host_lock);
 
 	module_put(ici->ops->owner);
@@ -992,6 +1001,7 @@
 	struct soc_camera_device *icd = file->private_data;
 	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	int ret;
 
 	WARN_ON(priv != file->private_data);
 
@@ -1006,13 +1016,13 @@
 	 * remaining buffers. When the last buffer is freed, stop capture
 	 */
 	if (ici->ops->init_videobuf)
-		videobuf_streamoff(&icd->vb_vidq);
+		ret = videobuf_streamoff(&icd->vb_vidq);
 	else
-		vb2_streamoff(&icd->vb2_vidq, i);
+		ret = vb2_streamoff(&icd->vb2_vidq, i);
 
 	v4l2_subdev_call(sd, video, s_stream, 0);
 
-	return 0;
+	return ret;
 }
 
 static int soc_camera_cropcap(struct file *file, void *fh,
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index 18282a0..79c5635 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -116,6 +116,9 @@
 	case BDISP_RGB565:
 		seq_puts(s, "RGB565 - ");
 		break;
+	case BDISP_RGB888:
+		seq_puts(s, "RGB888 - ");
+		break;
 	case BDISP_XRGB8888:
 		seq_puts(s, "xRGB888 - ");
 		break;
@@ -185,6 +188,9 @@
 	case BDISP_RGB565:
 		seq_puts(s, "RGB565 - ");
 		break;
+	case BDISP_RGB888:
+		seq_puts(s, "RGB888 - ");
+		break;
 	case BDISP_XRGB8888:
 		seq_puts(s, "xRGB888 - ");
 		break;
@@ -420,6 +426,8 @@
 			return "NV12";
 	case V4L2_PIX_FMT_RGB565:
 		return "RGB16";
+	case V4L2_PIX_FMT_RGB24:
+		return "RGB24";
 	case V4L2_PIX_FMT_XBGR32:
 		return "XRGB";
 	case V4L2_PIX_FMT_ABGR32:
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index 465828e..052c932 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -336,8 +336,8 @@
 
 	src_w = ctx->src.crop.width;
 	src_h = ctx->src.crop.height;
-	dst_w = ctx->dst.width;
-	dst_h = ctx->dst.height;
+	dst_w = ctx->dst.crop.width;
+	dst_h = ctx->dst.crop.height;
 
 	if (bdisp_hw_get_inc(src_w, dst_w, h_inc) ||
 	    bdisp_hw_get_inc(src_h, dst_h, v_inc)) {
@@ -483,9 +483,9 @@
 	src_rect.width -= src_x_offset;
 	src_rect.width = min_t(__s32, MAX_SRC_WIDTH, src_rect.width);
 
-	dst_x_offset = (src_x_offset * dst->width) / ctx->src.crop.width;
+	dst_x_offset = (src_x_offset * dst_width) / ctx->src.crop.width;
 	dst_rect.left += dst_x_offset;
-	dst_rect.width = (src_rect.width * dst->width) / ctx->src.crop.width;
+	dst_rect.width = (src_rect.width * dst_width) / ctx->src.crop.width;
 
 	/* General */
 	src_fmt = src->fmt->pixelformat;
@@ -768,12 +768,12 @@
 		/* Allocate memory if not done yet */
 		if (!copy_node[i]) {
 			copy_node[i] = devm_kzalloc(ctx->bdisp_dev->dev,
-						    sizeof(*copy_node),
+						    sizeof(*copy_node[i]),
 						    GFP_KERNEL);
 			if (!copy_node[i])
 				return;
 		}
-		copy_node[i] = node[i];
+		*copy_node[i] = *node[i];
 	}
 }
 
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 9e782eb..df61355 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -851,33 +851,56 @@
 	struct bdisp_frame *frame;
 	struct bdisp_ctx *ctx = fh_to_ctx(fh);
 
-	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		/* Composing  / capture is not supported */
-		dev_dbg(ctx->bdisp_dev->dev, "Not supported for capture\n");
-		return -EINVAL;
-	}
-
 	frame = ctx_get_frame(ctx, s->type);
 	if (IS_ERR(frame)) {
 		dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
 		return PTR_ERR(frame);
 	}
 
-	switch (s->target) {
-	case V4L2_SEL_TGT_CROP:
-		/* cropped frame */
-		s->r = frame->crop;
+	switch (s->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		switch (s->target) {
+		case V4L2_SEL_TGT_CROP:
+			/* cropped frame */
+			s->r = frame->crop;
+			break;
+		case V4L2_SEL_TGT_CROP_DEFAULT:
+		case V4L2_SEL_TGT_CROP_BOUNDS:
+			/* complete frame */
+			s->r.left = 0;
+			s->r.top = 0;
+			s->r.width = frame->width;
+			s->r.height = frame->height;
+			break;
+		default:
+			dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+			return -EINVAL;
+		}
 		break;
-	case V4L2_SEL_TGT_CROP_DEFAULT:
-	case V4L2_SEL_TGT_CROP_BOUNDS:
-		/* complete frame */
-		s->r.left = 0;
-		s->r.top = 0;
-		s->r.width = frame->width;
-		s->r.height = frame->height;
+
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		switch (s->target) {
+		case V4L2_SEL_TGT_COMPOSE:
+		case V4L2_SEL_TGT_COMPOSE_PADDED:
+			/* composed (cropped) frame */
+			s->r = frame->crop;
+			break;
+		case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+		case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+			/* complete frame */
+			s->r.left = 0;
+			s->r.top = 0;
+			s->r.width = frame->width;
+			s->r.height = frame->height;
+			break;
+		default:
+			dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+			return -EINVAL;
+		}
 		break;
+
 	default:
-		dev_dbg(ctx->bdisp_dev->dev, "Invalid target\n");
+		dev_err(ctx->bdisp_dev->dev, "Invalid type\n");
 		return -EINVAL;
 	}
 
@@ -906,15 +929,18 @@
 	struct bdisp_frame *frame;
 	struct bdisp_ctx *ctx = fh_to_ctx(fh);
 	struct v4l2_rect *in, out;
+	bool valid = false;
 
-	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		/* Composing  / capture is not supported */
-		dev_dbg(ctx->bdisp_dev->dev, "Not supported for capture\n");
-		return -EINVAL;
-	}
+	if ((s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
+	    (s->target == V4L2_SEL_TGT_CROP))
+		valid = true;
 
-	if (s->target != V4L2_SEL_TGT_CROP) {
-		dev_dbg(ctx->bdisp_dev->dev, "Invalid target\n");
+	if ((s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+	    (s->target == V4L2_SEL_TGT_COMPOSE))
+		valid = true;
+
+	if (!valid) {
+		dev_err(ctx->bdisp_dev->dev, "Invalid type / target\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/sti/c8sectpfe/Kconfig
new file mode 100644
index 0000000..641ad8f
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/Kconfig
@@ -0,0 +1,28 @@
+config DVB_C8SECTPFE
+	tristate "STMicroelectronics C8SECTPFE DVB support"
+	depends on PINCTRL && DVB_CORE && I2C
+	depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
+	select FW_LOADER
+	select FW_LOADER_USER_HELPER_FALLBACK
+	select DEBUG_FS
+	select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
+	select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
+
+	---help---
+	  This adds support for DVB front-end cards connected
+	  to TS inputs of STiH407/410 SoC.
+
+	  The driver currently supports C8SECTPFE's TS input block,
+	  memdma engine, and HW PID filtering.
+
+	  Supported DVB front-end cards are:
+	  - STMicroelectronics DVB-T B2100A (STV0367 + TDA18212)
+	  - STMicroelectronics DVB-S/S2 STV0903 + STV6110 + LNBP24 board
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called c8sectpfe.
diff --git a/drivers/media/platform/sti/c8sectpfe/Makefile b/drivers/media/platform/sti/c8sectpfe/Makefile
new file mode 100644
index 0000000..b578c7c
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/Makefile
@@ -0,0 +1,9 @@
+c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o \
+		c8sectpfe-debugfs.o
+
+obj-$(CONFIG_DVB_C8SECTPFE) += c8sectpfe.o
+
+ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
+ccflags-y += -Idrivers/media/dvb-core/ -Idrivers/media/dvb-frontends/ \
+		-Idrivers/media/tuners/
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c
new file mode 100644
index 0000000..95223ab
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c
@@ -0,0 +1,265 @@
+/*
+ * c8sectpfe-common.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ *   Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License as
+ *      published by the Free Software Foundation; either version 2 of
+ *      the License, or (at your option) any later version.
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dvb/dmx.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+static int register_dvb(struct stdemux *demux, struct dvb_adapter *adap,
+				void *start_feed, void *stop_feed,
+				struct c8sectpfei *fei)
+{
+	int result;
+
+	demux->dvb_demux.dmx.capabilities = DMX_TS_FILTERING |
+					DMX_SECTION_FILTERING |
+					DMX_MEMORY_BASED_FILTERING;
+
+	demux->dvb_demux.priv = demux;
+	demux->dvb_demux.filternum = C8SECTPFE_MAXCHANNEL;
+	demux->dvb_demux.feednum = C8SECTPFE_MAXCHANNEL;
+
+	demux->dvb_demux.start_feed = start_feed;
+	demux->dvb_demux.stop_feed = stop_feed;
+	demux->dvb_demux.write_to_decoder = NULL;
+
+	result = dvb_dmx_init(&demux->dvb_demux);
+	if (result < 0) {
+		dev_err(fei->dev, "dvb_dmx_init failed (errno = %d)\n",
+			result);
+		goto err_dmx;
+	}
+
+	demux->dmxdev.filternum = demux->dvb_demux.filternum;
+	demux->dmxdev.demux = &demux->dvb_demux.dmx;
+	demux->dmxdev.capabilities = 0;
+
+	result = dvb_dmxdev_init(&demux->dmxdev, adap);
+	if (result < 0) {
+		dev_err(fei->dev, "dvb_dmxdev_init failed (errno = %d)\n",
+			result);
+
+		goto err_dmxdev;
+	}
+
+	demux->hw_frontend.source = DMX_FRONTEND_0 + demux->tsin_index;
+
+	result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+						&demux->hw_frontend);
+	if (result < 0) {
+		dev_err(fei->dev, "add_frontend failed (errno = %d)\n", result);
+		goto err_fe_hw;
+	}
+
+	demux->mem_frontend.source = DMX_MEMORY_FE;
+	result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+						&demux->mem_frontend);
+	if (result < 0) {
+		dev_err(fei->dev, "add_frontend failed (%d)\n", result);
+		goto err_fe_mem;
+	}
+
+	result = demux->dvb_demux.dmx.connect_frontend(&demux->dvb_demux.dmx,
+							&demux->hw_frontend);
+	if (result < 0) {
+		dev_err(fei->dev, "connect_frontend (%d)\n", result);
+		goto err_fe_con;
+	}
+
+	return 0;
+
+err_fe_con:
+	demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+						     &demux->mem_frontend);
+err_fe_mem:
+	demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+						     &demux->hw_frontend);
+err_fe_hw:
+	dvb_dmxdev_release(&demux->dmxdev);
+err_dmxdev:
+	dvb_dmx_release(&demux->dvb_demux);
+err_dmx:
+	return result;
+
+}
+
+static void unregister_dvb(struct stdemux *demux)
+{
+
+	demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+						     &demux->mem_frontend);
+
+	demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+						     &demux->hw_frontend);
+
+	dvb_dmxdev_release(&demux->dmxdev);
+
+	dvb_dmx_release(&demux->dvb_demux);
+}
+
+static struct c8sectpfe *c8sectpfe_create(struct c8sectpfei *fei,
+				void *start_feed,
+				void *stop_feed)
+{
+	struct c8sectpfe *c8sectpfe;
+	int result;
+	int i, j;
+
+	short int ids[] = { -1 };
+
+	c8sectpfe = kzalloc(sizeof(struct c8sectpfe), GFP_KERNEL);
+	if (!c8sectpfe)
+		goto err1;
+
+	mutex_init(&c8sectpfe->lock);
+
+	c8sectpfe->device = fei->dev;
+
+	result = dvb_register_adapter(&c8sectpfe->adapter, "STi c8sectpfe",
+					THIS_MODULE, fei->dev, ids);
+	if (result < 0) {
+		dev_err(fei->dev, "dvb_register_adapter failed (errno = %d)\n",
+			result);
+		goto err2;
+	}
+
+	c8sectpfe->adapter.priv = fei;
+
+	for (i = 0; i < fei->tsin_count; i++) {
+
+		c8sectpfe->demux[i].tsin_index = i;
+		c8sectpfe->demux[i].c8sectpfei = fei;
+
+		result = register_dvb(&c8sectpfe->demux[i], &c8sectpfe->adapter,
+				start_feed, stop_feed, fei);
+		if (result < 0) {
+			dev_err(fei->dev,
+				"register_dvb feed=%d failed (errno = %d)\n",
+				result, i);
+
+			/* we take a all or nothing approach */
+			for (j = 0; j < i; j++)
+				unregister_dvb(&c8sectpfe->demux[j]);
+			goto err3;
+		}
+	}
+
+	c8sectpfe->num_feeds = fei->tsin_count;
+
+	return c8sectpfe;
+err3:
+	dvb_unregister_adapter(&c8sectpfe->adapter);
+err2:
+	kfree(c8sectpfe);
+err1:
+	return NULL;
+};
+
+static void c8sectpfe_delete(struct c8sectpfe *c8sectpfe)
+{
+	int i;
+
+	if (!c8sectpfe)
+		return;
+
+	for (i = 0; i < c8sectpfe->num_feeds; i++)
+		unregister_dvb(&c8sectpfe->demux[i]);
+
+	dvb_unregister_adapter(&c8sectpfe->adapter);
+
+	kfree(c8sectpfe);
+};
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+					struct c8sectpfei *fei)
+{
+	int n;
+	struct channel_info *tsin;
+
+	for (n = 0; n < fei->tsin_count; n++) {
+
+		tsin = fei->channel_data[n];
+
+		if (tsin && tsin->frontend) {
+			dvb_unregister_frontend(tsin->frontend);
+			dvb_frontend_detach(tsin->frontend);
+		}
+
+		if (tsin && tsin->i2c_adapter)
+			i2c_put_adapter(tsin->i2c_adapter);
+
+		if (tsin && tsin->i2c_client) {
+			if (tsin->i2c_client->dev.driver->owner)
+				module_put(tsin->i2c_client->dev.driver->owner);
+			i2c_unregister_device(tsin->i2c_client);
+		}
+	}
+
+	c8sectpfe_delete(c8sectpfe);
+};
+
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+				struct c8sectpfei *fei,
+				void *start_feed,
+				void *stop_feed)
+{
+	struct channel_info *tsin;
+	struct dvb_frontend *frontend;
+	int n, res;
+
+	*c8sectpfe = c8sectpfe_create(fei, start_feed, stop_feed);
+	if (!*c8sectpfe)
+		return -ENOMEM;
+
+	for (n = 0; n < fei->tsin_count; n++) {
+		tsin = fei->channel_data[n];
+
+		res = c8sectpfe_frontend_attach(&frontend, *c8sectpfe, tsin, n);
+		if (res)
+			goto err;
+
+		res = dvb_register_frontend(&c8sectpfe[0]->adapter, frontend);
+		if (res < 0) {
+			dev_err(fei->dev, "dvb_register_frontend failed (%d)\n",
+				res);
+			goto err;
+		}
+
+		tsin->frontend = frontend;
+	}
+
+	return 0;
+
+err:
+	c8sectpfe_tuner_unregister_frontend(*c8sectpfe, fei);
+	return res;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h
new file mode 100644
index 0000000..da21c0a
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h
@@ -0,0 +1,64 @@
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ *   Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License as
+ *      published by the Free Software Foundation; either version 2 of
+ *      the License, or (at your option) any later version.
+ */
+#ifndef _C8SECTPFE_COMMON_H_
+#define _C8SECTPFE_COMMON_H_
+
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/gpio.h>
+#include <linux/version.h>
+
+#include "dmxdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+/* Maximum number of channels */
+#define C8SECTPFE_MAXADAPTER (4)
+#define C8SECTPFE_MAXCHANNEL 64
+#define STPTI_MAXCHANNEL 64
+
+#define MAX_INPUTBLOCKS 7
+
+struct c8sectpfe;
+struct stdemux;
+
+struct stdemux {
+	struct dvb_demux	dvb_demux;
+	struct dmxdev		dmxdev;
+	struct dmx_frontend	hw_frontend;
+	struct dmx_frontend	mem_frontend;
+	int			tsin_index;
+	int			running_feed_count;
+	struct			c8sectpfei *c8sectpfei;
+};
+
+struct c8sectpfe {
+	struct stdemux demux[MAX_INPUTBLOCKS];
+	struct mutex lock;
+	struct dvb_adapter adapter;
+	struct device *device;
+	int mapping;
+	int num_feeds;
+};
+
+/* Channel registration */
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+					struct c8sectpfei *fei,
+					void *start_feed,
+					void *stop_feed);
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+						struct c8sectpfei *fei);
+
+#endif
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
new file mode 100644
index 0000000..486aef5
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -0,0 +1,1236 @@
+/*
+ * c8sectpfe-core.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ *   Author:Peter Bennett <peter.bennett@st.com>
+ *	    Peter Griffin <peter.griffin@linaro.org>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License as
+ *	published by the Free Software Foundation; either version 2 of
+ *	the License, or (at your option) any later version.
+ */
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/version.h>
+#include <linux/wait.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-debugfs.h"
+#include "dmxdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
+MODULE_FIRMWARE(FIRMWARE_MEMDMA);
+
+#define PID_TABLE_SIZE 1024
+#define POLL_MSECS 50
+
+static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei);
+
+#define TS_PKT_SIZE 188
+#define HEADER_SIZE (4)
+#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
+
+#define FEI_ALIGNMENT (32)
+/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
+#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
+
+#define FIFO_LEN 1024
+
+static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+{
+	struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+	struct channel_info *channel;
+	int chan_num;
+
+	/* iterate through input block channels */
+	for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
+		channel = fei->channel_data[chan_num];
+
+		/* is this descriptor initialised and TP enabled */
+		if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
+			tasklet_schedule(&channel->tsklet);
+	}
+
+	fei->timer.expires = jiffies +	msecs_to_jiffies(POLL_MSECS);
+	add_timer(&fei->timer);
+}
+
+static void channel_swdemux_tsklet(unsigned long data)
+{
+	struct channel_info *channel = (struct channel_info *)data;
+	struct c8sectpfei *fei = channel->fei;
+	unsigned long wp, rp;
+	int pos, num_packets, n, size;
+	u8 *buf;
+
+	if (unlikely(!channel || !channel->irec))
+		return;
+
+	wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
+	rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
+
+	pos = rp - channel->back_buffer_busaddr;
+
+	/* has it wrapped */
+	if (wp < rp)
+		wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
+
+	size = wp - rp;
+	num_packets = size / PACKET_SIZE;
+
+	/* manage cache so data is visible to CPU */
+	dma_sync_single_for_cpu(fei->dev,
+				rp,
+				size,
+				DMA_FROM_DEVICE);
+
+	buf = (u8 *) channel->back_buffer_aligned;
+
+	dev_dbg(fei->dev,
+		"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
+		"rp=0x%lx, wp=0x%lx\n",
+		channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
+
+	for (n = 0; n < num_packets; n++) {
+		dvb_dmx_swfilter_packets(
+			&fei->c8sectpfe[0]->
+				demux[channel->demux_mapping].dvb_demux,
+			&buf[pos], 1);
+
+		pos += PACKET_SIZE;
+	}
+
+	/* advance the read pointer */
+	if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
+		writel(channel->back_buffer_busaddr, channel->irec +
+			DMA_PRDS_BUSRP_TP(0));
+	else
+		writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
+}
+
+static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+	struct dvb_demux *demux = dvbdmxfeed->demux;
+	struct stdemux *stdemux = (struct stdemux *)demux->priv;
+	struct c8sectpfei *fei = stdemux->c8sectpfei;
+	struct channel_info *channel;
+	u32 tmp;
+	unsigned long *bitmap;
+
+	switch (dvbdmxfeed->type) {
+	case DMX_TYPE_TS:
+		break;
+	case DMX_TYPE_SEC:
+		break;
+	default:
+		dev_err(fei->dev, "%s:%d Error bailing\n"
+			, __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (dvbdmxfeed->type == DMX_TYPE_TS) {
+		switch (dvbdmxfeed->pes_type) {
+		case DMX_PES_VIDEO:
+		case DMX_PES_AUDIO:
+		case DMX_PES_TELETEXT:
+		case DMX_PES_PCR:
+		case DMX_PES_OTHER:
+			break;
+		default:
+			dev_err(fei->dev, "%s:%d Error bailing\n"
+				, __func__, __LINE__);
+			return -EINVAL;
+		}
+	}
+
+	if (!atomic_read(&fei->fw_loaded)) {
+		dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&fei->lock);
+
+	channel = fei->channel_data[stdemux->tsin_index];
+
+	bitmap = (unsigned long *) channel->pid_buffer_aligned;
+
+	/* 8192 is a special PID */
+	if (dvbdmxfeed->pid == 8192) {
+		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+		tmp &= ~C8SECTPFE_PID_ENABLE;
+		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+
+	} else {
+		bitmap_set(bitmap, dvbdmxfeed->pid, 1);
+	}
+
+	/* manage cache so PID bitmap is visible to HW */
+	dma_sync_single_for_device(fei->dev,
+					channel->pid_buffer_busaddr,
+					PID_TABLE_SIZE,
+					DMA_TO_DEVICE);
+
+	channel->active = 1;
+
+	if (fei->global_feed_count == 0) {
+		fei->timer.expires = jiffies +
+			msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
+
+		add_timer(&fei->timer);
+	}
+
+	if (stdemux->running_feed_count == 0) {
+
+		dev_dbg(fei->dev, "Starting channel=%p\n", channel);
+
+		tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
+			     (unsigned long) channel);
+
+		/* Reset the internal inputblock sram pointers */
+		writel(channel->fifo,
+			fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
+		writel(channel->fifo + FIFO_LEN - 1,
+			fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
+
+		writel(channel->fifo,
+			fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
+		writel(channel->fifo,
+			fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
+
+
+		/* reset read / write memdma ptrs for this channel */
+		writel(channel->back_buffer_busaddr, channel->irec +
+			DMA_PRDS_BUSBASE_TP(0));
+
+		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+		writel(channel->back_buffer_busaddr, channel->irec +
+			DMA_PRDS_BUSWP_TP(0));
+
+		/* Issue a reset and enable InputBlock */
+		writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
+			, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+		/* and enable the tp */
+		writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
+
+		dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
+			, __func__, __LINE__, stdemux);
+	}
+
+	stdemux->running_feed_count++;
+	fei->global_feed_count++;
+
+	mutex_unlock(&fei->lock);
+
+	return 0;
+}
+
+static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+
+	struct dvb_demux *demux = dvbdmxfeed->demux;
+	struct stdemux *stdemux = (struct stdemux *)demux->priv;
+	struct c8sectpfei *fei = stdemux->c8sectpfei;
+	struct channel_info *channel;
+	int idlereq;
+	u32 tmp;
+	int ret;
+	unsigned long *bitmap;
+
+	if (!atomic_read(&fei->fw_loaded)) {
+		dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&fei->lock);
+
+	channel = fei->channel_data[stdemux->tsin_index];
+
+	bitmap = (unsigned long *) channel->pid_buffer_aligned;
+
+	if (dvbdmxfeed->pid == 8192) {
+		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+		tmp |= C8SECTPFE_PID_ENABLE;
+		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+	} else {
+		bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
+	}
+
+	/* manage cache so data is visible to HW */
+	dma_sync_single_for_device(fei->dev,
+					channel->pid_buffer_busaddr,
+					PID_TABLE_SIZE,
+					DMA_TO_DEVICE);
+
+	if (--stdemux->running_feed_count == 0) {
+
+		channel = fei->channel_data[stdemux->tsin_index];
+
+		/* TP re-configuration on page 168 of functional spec */
+
+		/* disable IB (prevents more TS data going to memdma) */
+		writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+		/* disable this channels descriptor */
+		writel(0,  channel->irec + DMA_PRDS_TPENABLE);
+
+		tasklet_disable(&channel->tsklet);
+
+		/* now request memdma channel goes idle */
+		idlereq = (1 << channel->tsin_id) | IDLEREQ;
+		writel(idlereq, fei->io + DMA_IDLE_REQ);
+
+		/* wait for idle irq handler to signal completion */
+		ret = wait_for_completion_timeout(&channel->idle_completion,
+						msecs_to_jiffies(100));
+
+		if (ret == 0)
+			dev_warn(fei->dev,
+				"Timeout waiting for idle irq on tsin%d\n",
+				channel->tsin_id);
+
+		reinit_completion(&channel->idle_completion);
+
+		/* reset read / write ptrs for this channel */
+
+		writel(channel->back_buffer_busaddr,
+			channel->irec + DMA_PRDS_BUSBASE_TP(0));
+
+		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+		writel(channel->back_buffer_busaddr,
+			channel->irec + DMA_PRDS_BUSWP_TP(0));
+
+		dev_dbg(fei->dev,
+			"%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
+			__func__, __LINE__, stdemux, channel->tsin_id);
+
+		/* turn off all PIDS in the bitmap */
+		memset((void *)channel->pid_buffer_aligned
+			, 0x00, PID_TABLE_SIZE);
+
+		/* manage cache so data is visible to HW */
+		dma_sync_single_for_device(fei->dev,
+					channel->pid_buffer_busaddr,
+					PID_TABLE_SIZE,
+					DMA_TO_DEVICE);
+
+		channel->active = 0;
+	}
+
+	if (--fei->global_feed_count == 0) {
+		dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
+			, __func__, __LINE__, fei->global_feed_count);
+
+		del_timer(&fei->timer);
+	}
+
+	mutex_unlock(&fei->lock);
+
+	return 0;
+}
+
+static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
+{
+	int i;
+
+	for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
+		if (!fei->channel_data[i])
+			continue;
+
+		if (fei->channel_data[i]->tsin_id == tsin_num)
+			return fei->channel_data[i];
+	}
+
+	return NULL;
+}
+
+static void c8sectpfe_getconfig(struct c8sectpfei *fei)
+{
+	struct c8sectpfe_hw *hw = &fei->hw_stats;
+
+	hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
+	hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
+	hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
+	hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
+	hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
+	hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
+	hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
+
+	dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
+	dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
+	dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
+	dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
+				, hw->num_swts);
+	dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
+	dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
+	dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
+	dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
+			, hw->num_tp);
+}
+
+static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
+{
+	struct c8sectpfei *fei = priv;
+	struct channel_info *chan;
+	int bit;
+	unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
+
+	/* page 168 of functional spec: Clear the idle request
+	   by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
+
+	/* signal idle completion */
+	for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
+
+		chan = find_channel(fei, bit);
+
+		if (chan)
+			complete(&chan->idle_completion);
+	}
+
+	writel(0, fei->io + DMA_IDLE_REQ);
+
+	return IRQ_HANDLED;
+}
+
+
+static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
+{
+	if (!fei || !tsin)
+		return;
+
+	if (tsin->back_buffer_busaddr)
+		if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
+			dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
+				FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
+
+	kfree(tsin->back_buffer_start);
+
+	if (tsin->pid_buffer_busaddr)
+		if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
+			dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
+				PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
+
+	kfree(tsin->pid_buffer_start);
+}
+
+#define MAX_NAME 20
+
+static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
+				struct channel_info *tsin)
+{
+	int ret;
+	u32 tmp;
+	char tsin_pin_name[MAX_NAME];
+
+	if (!fei || !tsin)
+		return -EINVAL;
+
+	dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
+		, __func__, __LINE__, tsin, tsin->tsin_id);
+
+	init_completion(&tsin->idle_completion);
+
+	tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
+					FEI_ALIGNMENT, GFP_KERNEL);
+
+	if (!tsin->back_buffer_start) {
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+
+	/* Ensure backbuffer is 32byte aligned */
+	tsin->back_buffer_aligned = tsin->back_buffer_start
+		+ FEI_ALIGNMENT;
+
+	tsin->back_buffer_aligned = (void *)
+		(((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
+
+	tsin->back_buffer_busaddr = dma_map_single(fei->dev,
+					(void *)tsin->back_buffer_aligned,
+					FEI_BUFFER_SIZE,
+					DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
+		dev_err(fei->dev, "failed to map back_buffer\n");
+		ret = -EFAULT;
+		goto err_unmap;
+	}
+
+	/*
+	 * The pid buffer can be configured (in hw) for byte or bit
+	 * per pid. By powers of deduction we conclude stih407 family
+	 * is configured (at SoC design stage) for bit per pid.
+	 */
+	tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
+
+	if (!tsin->pid_buffer_start) {
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+
+	/*
+	 * PID buffer needs to be aligned to size of the pid table
+	 * which at bit per pid is 1024 bytes (8192 pids / 8).
+	 * PIDF_BASE register enforces this alignment when writing
+	 * the register.
+	 */
+
+	tsin->pid_buffer_aligned = tsin->pid_buffer_start +
+		PID_TABLE_SIZE;
+
+	tsin->pid_buffer_aligned = (void *)
+		(((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
+
+	tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
+						tsin->pid_buffer_aligned,
+						PID_TABLE_SIZE,
+						DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
+		dev_err(fei->dev, "failed to map pid_bitmap\n");
+		ret = -EFAULT;
+		goto err_unmap;
+	}
+
+	/* manage cache so pid bitmap is visible to HW */
+	dma_sync_single_for_device(fei->dev,
+				tsin->pid_buffer_busaddr,
+				PID_TABLE_SIZE,
+				DMA_TO_DEVICE);
+
+	snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
+		(tsin->serial_not_parallel ? "serial" : "parallel"));
+
+	tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
+	if (IS_ERR(tsin->pstate)) {
+		dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
+			, __func__, tsin_pin_name);
+		ret = PTR_ERR(tsin->pstate);
+		goto err_unmap;
+	}
+
+	ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
+
+	if (ret) {
+		dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
+			, __func__);
+		goto err_unmap;
+	}
+
+	/* Enable this input block */
+	tmp = readl(fei->io + SYS_INPUT_CLKEN);
+	tmp |= BIT(tsin->tsin_id);
+	writel(tmp, fei->io + SYS_INPUT_CLKEN);
+
+	if (tsin->serial_not_parallel)
+		tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
+
+	if (tsin->invert_ts_clk)
+		tmp |= C8SECTPFE_INVERT_TSCLK;
+
+	if (tsin->async_not_sync)
+		tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
+
+	tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
+
+	writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
+
+	writel(C8SECTPFE_SYNC(0x9) |
+		C8SECTPFE_DROP(0x9) |
+		C8SECTPFE_TOKEN(0x47),
+		fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
+
+	writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
+
+	/* Place the FIFO's at the end of the irec descriptors */
+
+	tsin->fifo = (tsin->tsin_id * FIFO_LEN);
+
+	writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
+	writel(tsin->fifo + FIFO_LEN - 1,
+		fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
+
+	writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
+	writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
+
+	writel(tsin->pid_buffer_busaddr,
+		fei->io + PIDF_BASE(tsin->tsin_id));
+
+	dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
+		tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
+		&tsin->pid_buffer_busaddr);
+
+	/* Configure and enable HW PID filtering */
+
+	/*
+	 * The PID value is created by assembling the first 8 bytes of
+	 * the TS packet into a 64-bit word in big-endian format. A
+	 * slice of that 64-bit word is taken from
+	 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
+	 */
+	tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
+		| C8SECTPFE_PID_OFFSET(40));
+
+	writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
+
+	dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
+		tsin->tsin_id,
+		readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
+		readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
+		readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
+		readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
+
+	/* Get base addpress of pointer record block from DMEM */
+	tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
+			readl(fei->io + DMA_PTRREC_BASE);
+
+	/* fill out pointer record data structure */
+
+	/* advance pointer record block to our channel */
+	tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
+
+	writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
+
+	writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
+
+	writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
+
+	writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
+
+	/* read/write pointers with physical bus address */
+
+	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
+
+	tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+	writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
+
+	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
+	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
+
+	/* initialize tasklet */
+	tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
+		(unsigned long) tsin);
+
+	return 0;
+
+err_unmap:
+	free_input_block(fei, tsin);
+	return ret;
+}
+
+static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
+{
+	struct c8sectpfei *fei = priv;
+
+	dev_err(fei->dev, "%s: error handling not yet implemented\n"
+		, __func__);
+
+	/*
+	 * TODO FIXME we should detect some error conditions here
+	 * and ideally so something about them!
+	 */
+
+	return IRQ_HANDLED;
+}
+
+static int c8sectpfe_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *child, *np = dev->of_node;
+	struct c8sectpfei *fei;
+	struct resource *res;
+	int ret, index = 0;
+	struct channel_info *tsin;
+
+	/* Allocate the c8sectpfei structure */
+	fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
+	if (!fei)
+		return -ENOMEM;
+
+	fei->dev = dev;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
+	fei->io = devm_ioremap_resource(dev, res);
+	if (IS_ERR(fei->io))
+		return PTR_ERR(fei->io);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					"c8sectpfe-ram");
+	fei->sram = devm_ioremap_resource(dev, res);
+	if (IS_ERR(fei->sram))
+		return PTR_ERR(fei->sram);
+
+	fei->sram_size = res->end - res->start;
+
+	fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
+	if (fei->idle_irq < 0) {
+		dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
+		return fei->idle_irq;
+	}
+
+	fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
+	if (fei->error_irq < 0) {
+		dev_err(dev, "Can't get c8sectpfe-error-irq\n");
+		return fei->error_irq;
+	}
+
+	platform_set_drvdata(pdev, fei);
+
+	fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
+	if (IS_ERR(fei->c8sectpfeclk)) {
+		dev_err(dev, "c8sectpfe clk not found\n");
+		return PTR_ERR(fei->c8sectpfeclk);
+	}
+
+	ret = clk_prepare_enable(fei->c8sectpfeclk);
+	if (ret) {
+		dev_err(dev, "Failed to enable c8sectpfe clock\n");
+		return ret;
+	}
+
+	/* to save power disable all IP's (on by default) */
+	writel(0, fei->io + SYS_INPUT_CLKEN);
+
+	/* Enable memdma clock */
+	writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
+
+	/* clear internal sram */
+	memset_io(fei->sram, 0x0, fei->sram_size);
+
+	c8sectpfe_getconfig(fei);
+
+	ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
+			0, "c8sectpfe-idle-irq", fei);
+	if (ret) {
+		dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
+		goto err_clk_disable;
+	}
+
+	ret = devm_request_irq(dev, fei->error_irq,
+				c8sectpfe_error_irq_handler, 0,
+				"c8sectpfe-error-irq", fei);
+	if (ret) {
+		dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
+		goto err_clk_disable;
+	}
+
+	fei->tsin_count = of_get_child_count(np);
+
+	if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
+		fei->tsin_count > fei->hw_stats.num_ib) {
+
+		dev_err(dev, "More tsin declared than exist on SoC!\n");
+		ret = -EINVAL;
+		goto err_clk_disable;
+	}
+
+	fei->pinctrl = devm_pinctrl_get(dev);
+
+	if (IS_ERR(fei->pinctrl)) {
+		dev_err(dev, "Error getting tsin pins\n");
+		ret = PTR_ERR(fei->pinctrl);
+		goto err_clk_disable;
+	}
+
+	for_each_child_of_node(np, child) {
+		struct device_node *i2c_bus;
+
+		fei->channel_data[index] = devm_kzalloc(dev,
+						sizeof(struct channel_info),
+						GFP_KERNEL);
+
+		if (!fei->channel_data[index]) {
+			ret = -ENOMEM;
+			goto err_clk_disable;
+		}
+
+		tsin = fei->channel_data[index];
+
+		tsin->fei = fei;
+
+		ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
+		if (ret) {
+			dev_err(&pdev->dev, "No tsin_num found\n");
+			goto err_clk_disable;
+		}
+
+		/* sanity check value */
+		if (tsin->tsin_id > fei->hw_stats.num_ib) {
+			dev_err(&pdev->dev,
+				"tsin-num %d specified greater than number\n\t"
+				"of input block hw in SoC! (%d)",
+				tsin->tsin_id, fei->hw_stats.num_ib);
+			ret = -EINVAL;
+			goto err_clk_disable;
+		}
+
+		tsin->invert_ts_clk = of_property_read_bool(child,
+							"invert-ts-clk");
+
+		tsin->serial_not_parallel = of_property_read_bool(child,
+							"serial-not-parallel");
+
+		tsin->async_not_sync = of_property_read_bool(child,
+							"async-not-sync");
+
+		ret = of_property_read_u32(child, "dvb-card",
+					&tsin->dvb_card);
+		if (ret) {
+			dev_err(&pdev->dev, "No dvb-card found\n");
+			goto err_clk_disable;
+		}
+
+		i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
+		if (!i2c_bus) {
+			dev_err(&pdev->dev, "No i2c-bus found\n");
+			goto err_clk_disable;
+		}
+		tsin->i2c_adapter =
+			of_find_i2c_adapter_by_node(i2c_bus);
+		if (!tsin->i2c_adapter) {
+			dev_err(&pdev->dev, "No i2c adapter found\n");
+			of_node_put(i2c_bus);
+			goto err_clk_disable;
+		}
+		of_node_put(i2c_bus);
+
+		tsin->rst_gpio = of_get_named_gpio(child, "rst-gpio", 0);
+
+		ret = gpio_is_valid(tsin->rst_gpio);
+		if (!ret) {
+			dev_err(dev,
+				"reset gpio for tsin%d not valid (gpio=%d)\n",
+				tsin->tsin_id, tsin->rst_gpio);
+			goto err_clk_disable;
+		}
+
+		ret = devm_gpio_request_one(dev, tsin->rst_gpio,
+					GPIOF_OUT_INIT_LOW, "NIM reset");
+		if (ret && ret != -EBUSY) {
+			dev_err(dev, "Can't request tsin%d reset gpio\n"
+				, fei->channel_data[index]->tsin_id);
+			goto err_clk_disable;
+		}
+
+		if (!ret) {
+			/* toggle reset lines */
+			gpio_direction_output(tsin->rst_gpio, 0);
+			usleep_range(3500, 5000);
+			gpio_direction_output(tsin->rst_gpio, 1);
+			usleep_range(3000, 5000);
+		}
+
+		tsin->demux_mapping = index;
+
+		dev_dbg(fei->dev,
+			"channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
+			"serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
+			fei->channel_data[index], index,
+			tsin->tsin_id, tsin->invert_ts_clk,
+			tsin->serial_not_parallel, tsin->async_not_sync,
+			tsin->dvb_card);
+
+		index++;
+	}
+
+	/* Setup timer interrupt */
+	init_timer(&fei->timer);
+	fei->timer.function = c8sectpfe_timer_interrupt;
+	fei->timer.data = (unsigned long)fei;
+
+	mutex_init(&fei->lock);
+
+	/* Get the configuration information about the tuners */
+	ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
+					(void *)fei,
+					c8sectpfe_start_feed,
+					c8sectpfe_stop_feed);
+	if (ret) {
+		dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
+			ret);
+		goto err_clk_disable;
+	}
+
+	/* ensure all other init has been done before requesting firmware */
+	ret = load_c8sectpfe_fw_step1(fei);
+	if (ret) {
+		dev_err(dev, "Couldn't load slim core firmware\n");
+		goto err_clk_disable;
+	}
+
+	c8sectpfe_debugfs_init(fei);
+
+	return 0;
+
+err_clk_disable:
+	/* TODO uncomment when upstream has taken a reference on this clk */
+	/*clk_disable_unprepare(fei->c8sectpfeclk);*/
+	return ret;
+}
+
+static int c8sectpfe_remove(struct platform_device *pdev)
+{
+	struct c8sectpfei *fei = platform_get_drvdata(pdev);
+	struct channel_info *channel;
+	int i;
+
+	wait_for_completion(&fei->fw_ack);
+
+	c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
+
+	/*
+	 * Now loop through and un-configure each of the InputBlock resources
+	 */
+	for (i = 0; i < fei->tsin_count; i++) {
+		channel = fei->channel_data[i];
+		free_input_block(fei, channel);
+	}
+
+	c8sectpfe_debugfs_exit(fei);
+
+	dev_info(fei->dev, "Stopping memdma SLIM core\n");
+	if (readl(fei->io + DMA_CPU_RUN))
+		writel(0x0,  fei->io + DMA_CPU_RUN);
+
+	/* unclock all internal IP's */
+	if (readl(fei->io + SYS_INPUT_CLKEN))
+		writel(0, fei->io + SYS_INPUT_CLKEN);
+
+	if (readl(fei->io + SYS_OTHER_CLKEN))
+		writel(0, fei->io + SYS_OTHER_CLKEN);
+
+	/* TODO uncomment when upstream has taken a reference on this clk */
+	/*
+	if (fei->c8sectpfeclk)
+		clk_disable_unprepare(fei->c8sectpfeclk);
+	*/
+
+	return 0;
+}
+
+
+static int configure_channels(struct c8sectpfei *fei)
+{
+	int index = 0, ret;
+	struct channel_info *tsin;
+	struct device_node *child, *np = fei->dev->of_node;
+
+	/* iterate round each tsin and configure memdma descriptor and IB hw */
+	for_each_child_of_node(np, child) {
+
+		tsin = fei->channel_data[index];
+
+		ret = configure_memdma_and_inputblock(fei,
+						fei->channel_data[index]);
+
+		if (ret) {
+			dev_err(fei->dev,
+				"configure_memdma_and_inputblock failed\n");
+			goto err_unmap;
+		}
+		index++;
+	}
+
+	return 0;
+
+err_unmap:
+	for (index = 0; index < fei->tsin_count; index++) {
+		tsin = fei->channel_data[index];
+		free_input_block(fei, tsin);
+	}
+	return ret;
+}
+
+static int
+c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
+{
+	struct elf32_hdr *ehdr;
+	char class;
+
+	if (!fw) {
+		dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
+		return -EINVAL;
+	}
+
+	if (fw->size < sizeof(struct elf32_hdr)) {
+		dev_err(fei->dev, "Image is too small\n");
+		return -EINVAL;
+	}
+
+	ehdr = (struct elf32_hdr *)fw->data;
+
+	/* We only support ELF32 at this point */
+	class = ehdr->e_ident[EI_CLASS];
+	if (class != ELFCLASS32) {
+		dev_err(fei->dev, "Unsupported class: %d\n", class);
+		return -EINVAL;
+	}
+
+	if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
+		dev_err(fei->dev, "Unsupported firmware endianness\n");
+		return -EINVAL;
+	}
+
+	if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
+		dev_err(fei->dev, "Image is too small\n");
+		return -EINVAL;
+	}
+
+	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+		dev_err(fei->dev, "Image is corrupted (bad magic)\n");
+		return -EINVAL;
+	}
+
+	/* Check ELF magic */
+	ehdr = (Elf32_Ehdr *)fw->data;
+	if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
+	    ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
+	    ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
+	    ehdr->e_ident[EI_MAG3] != ELFMAG3) {
+		dev_err(fei->dev, "Invalid ELF magic\n");
+		return -EINVAL;
+	}
+
+	if (ehdr->e_type != ET_EXEC) {
+		dev_err(fei->dev, "Unsupported ELF header type\n");
+		return -EINVAL;
+	}
+
+	if (ehdr->e_phoff > fw->size) {
+		dev_err(fei->dev, "Firmware size is too small\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+			const struct firmware *fw, u8 __iomem *dest,
+			int seg_num)
+{
+	const u8 *imem_src = fw->data + phdr->p_offset;
+	int i;
+
+	/*
+	 * For IMEM segments, the segment contains 24-bit
+	 * instructions which must be padded to 32-bit
+	 * instructions before being written. The written
+	 * segment is padded with NOP instructions.
+	 */
+
+	dev_dbg(fei->dev,
+		"Loading IMEM segment %d 0x%08x\n\t"
+		" (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
+		phdr->p_paddr, phdr->p_filesz,
+		dest, phdr->p_memsz + phdr->p_memsz / 3);
+
+	for (i = 0; i < phdr->p_filesz; i++) {
+
+		writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
+
+		/* Every 3 bytes, add an additional
+		 * padding zero in destination */
+		if (i % 3 == 2) {
+			dest++;
+			writeb(0x00, (void __iomem *)dest);
+		}
+
+		dest++;
+		imem_src++;
+	}
+}
+
+static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+			const struct firmware *fw, u8 __iomem *dst, int seg_num)
+{
+	/*
+	 * For DMEM segments copy the segment data from the ELF
+	 * file and pad segment with zeroes
+	 */
+
+	dev_dbg(fei->dev,
+		"Loading DMEM segment %d 0x%08x\n\t"
+		"(0x%x bytes) -> 0x%p (0x%x bytes)\n",
+		seg_num, phdr->p_paddr, phdr->p_filesz,
+		dst, phdr->p_memsz);
+
+	memcpy((void __iomem *)dst, (void *)fw->data + phdr->p_offset,
+		phdr->p_filesz);
+
+	memset((void __iomem *)dst + phdr->p_filesz, 0,
+		phdr->p_memsz - phdr->p_filesz);
+}
+
+static int load_slim_core_fw(const struct firmware *fw, void *context)
+{
+	struct c8sectpfei *fei = context;
+	Elf32_Ehdr *ehdr;
+	Elf32_Phdr *phdr;
+	u8 __iomem *dst;
+	int err, i;
+
+	if (!fw || !context)
+		return -EINVAL;
+
+	ehdr = (Elf32_Ehdr *)fw->data;
+	phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
+
+	/* go through the available ELF segments */
+	for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) {
+
+		/* Only consider LOAD segments */
+		if (phdr->p_type != PT_LOAD)
+			continue;
+
+		/*
+		 * Check segment is contained within the fw->data buffer
+		 */
+		if (phdr->p_offset + phdr->p_filesz > fw->size) {
+			dev_err(fei->dev,
+				"Segment %d is outside of firmware file\n", i);
+			err = -EINVAL;
+			break;
+		}
+
+		/*
+		 * MEMDMA IMEM has executable flag set, otherwise load
+		 * this segment into DMEM.
+		 *
+		 */
+
+		if (phdr->p_flags & PF_X) {
+			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
+			/*
+			 * The Slim ELF file uses 32-bit word addressing for
+			 * load offsets.
+			 */
+			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+			load_imem_segment(fei, phdr, fw, dst, i);
+		} else {
+			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
+			/*
+			 * The Slim ELF file uses 32-bit word addressing for
+			 * load offsets.
+			 */
+			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+			load_dmem_segment(fei, phdr, fw, dst, i);
+		}
+	}
+
+	release_firmware(fw);
+	return err;
+}
+
+static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
+{
+	struct c8sectpfei *fei = context;
+	int err;
+
+	err = c8sectpfe_elf_sanity_check(fei, fw);
+	if (err) {
+		dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
+			, err);
+		goto err;
+	}
+
+	err = load_slim_core_fw(fw, context);
+	if (err) {
+		dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
+		goto err;
+	}
+
+	/* now the firmware is loaded configure the input blocks */
+	err = configure_channels(fei);
+	if (err) {
+		dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
+		goto err;
+	}
+
+	/*
+	 * STBus target port can access IMEM and DMEM ports
+	 * without waiting for CPU
+	 */
+	writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
+
+	dev_info(fei->dev, "Boot the memdma SLIM core\n");
+	writel(0x1,  fei->io + DMA_CPU_RUN);
+
+	atomic_set(&fei->fw_loaded, 1);
+err:
+	complete_all(&fei->fw_ack);
+}
+
+static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
+{
+	int ret;
+	int err;
+
+	dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
+
+	init_completion(&fei->fw_ack);
+	atomic_set(&fei->fw_loaded, 0);
+
+	err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+				FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei,
+				load_c8sectpfe_fw_cb);
+
+	if (err) {
+		dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
+		complete_all(&fei->fw_ack);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id c8sectpfe_match[] = {
+	{ .compatible = "st,stih407-c8sectpfe" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, c8sectpfe_match);
+
+static struct platform_driver c8sectpfe_driver = {
+	.driver = {
+		.name = "c8sectpfe",
+		.of_match_table = of_match_ptr(c8sectpfe_match),
+	},
+	.probe	= c8sectpfe_probe,
+	.remove	= c8sectpfe_remove,
+};
+
+module_platform_driver(c8sectpfe_driver);
+
+MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
new file mode 100644
index 0000000..39e7a22
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
@@ -0,0 +1,288 @@
+/*
+ * c8sectpfe-core.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ *   Author:Peter Bennett <peter.bennett@st.com>
+ *	    Peter Griffin <peter.griffin@linaro.org>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License as
+ *	published by the Free Software Foundation; either version 2 of
+ *	the License, or (at your option) any later version.
+ */
+#ifndef _C8SECTPFE_CORE_H_
+#define _C8SECTPFE_CORE_H_
+
+#define C8SECTPFEI_MAXCHANNEL 16
+#define C8SECTPFEI_MAXADAPTER 3
+
+#define C8SECTPFE_MAX_TSIN_CHAN 8
+
+struct channel_info {
+
+	int tsin_id;
+	bool invert_ts_clk;
+	bool serial_not_parallel;
+	bool async_not_sync;
+	int i2c;
+	int dvb_card;
+
+	int rst_gpio;
+
+	struct i2c_adapter  *i2c_adapter;
+	struct i2c_adapter  *tuner_i2c;
+	struct i2c_adapter  *lnb_i2c;
+	struct i2c_client   *i2c_client;
+	struct dvb_frontend *frontend;
+
+	struct pinctrl_state *pstate;
+
+	int demux_mapping;
+	int active;
+
+	void *back_buffer_start;
+	void *back_buffer_aligned;
+	dma_addr_t back_buffer_busaddr;
+
+	void *pid_buffer_start;
+	void *pid_buffer_aligned;
+	dma_addr_t pid_buffer_busaddr;
+
+	unsigned long  fifo;
+
+	struct completion idle_completion;
+	struct tasklet_struct tsklet;
+
+	struct c8sectpfei *fei;
+	void __iomem *irec;
+
+};
+
+struct c8sectpfe_hw {
+	int num_ib;
+	int num_mib;
+	int num_swts;
+	int num_tsout;
+	int num_ccsc;
+	int num_ram;
+	int num_tp;
+};
+
+struct c8sectpfei {
+
+	struct device *dev;
+	struct pinctrl *pinctrl;
+
+	struct dentry *root;
+	struct debugfs_regset32	*regset;
+	struct completion fw_ack;
+	atomic_t fw_loaded;
+
+	int tsin_count;
+
+	struct c8sectpfe_hw hw_stats;
+
+	struct c8sectpfe *c8sectpfe[C8SECTPFEI_MAXADAPTER];
+
+	int mapping[C8SECTPFEI_MAXCHANNEL];
+
+	struct mutex lock;
+
+	struct timer_list timer;	/* timer interrupts for outputs */
+
+	void __iomem *io;
+	void __iomem *sram;
+
+	unsigned long sram_size;
+
+	struct channel_info *channel_data[C8SECTPFE_MAX_TSIN_CHAN];
+
+	struct clk *c8sectpfeclk;
+	int nima_rst_gpio;
+	int nimb_rst_gpio;
+
+	int idle_irq;
+	int error_irq;
+
+	int global_feed_count;
+};
+
+/* C8SECTPFE SYS Regs list */
+
+#define SYS_INPUT_ERR_STATUS	0x0
+#define SYS_OTHER_ERR_STATUS	0x8
+#define SYS_INPUT_ERR_MASK	0x10
+#define SYS_OTHER_ERR_MASK	0x18
+#define SYS_DMA_ROUTE		0x20
+#define SYS_INPUT_CLKEN		0x30
+#define IBENABLE_MASK			0x7F
+
+#define SYS_OTHER_CLKEN		0x38
+#define TSDMAENABLE			BIT(1)
+#define MEMDMAENABLE			BIT(0)
+
+#define SYS_CFG_NUM_IB		0x200
+#define SYS_CFG_NUM_MIB		0x204
+#define SYS_CFG_NUM_SWTS	0x208
+#define SYS_CFG_NUM_TSOUT	0x20C
+#define SYS_CFG_NUM_CCSC	0x210
+#define SYS_CFG_NUM_RAM		0x214
+#define SYS_CFG_NUM_TP		0x218
+
+/* Input Block Regs */
+
+#define C8SECTPFE_INPUTBLK_OFFSET	0x1000
+#define C8SECTPFE_CHANNEL_OFFSET(x)	((x*0x40) + C8SECTPFE_INPUTBLK_OFFSET)
+
+#define C8SECTPFE_IB_IP_FMT_CFG(x)      (C8SECTPFE_CHANNEL_OFFSET(x) + 0x00)
+#define C8SECTPFE_IGNORE_ERR_AT_SOP     BIT(7)
+#define C8SECTPFE_IGNORE_ERR_IN_PKT     BIT(6)
+#define C8SECTPFE_IGNORE_ERR_IN_BYTE    BIT(5)
+#define C8SECTPFE_INVERT_TSCLK          BIT(4)
+#define C8SECTPFE_ALIGN_BYTE_SOP        BIT(3)
+#define C8SECTPFE_ASYNC_NOT_SYNC        BIT(2)
+#define C8SECTPFE_BYTE_ENDIANNESS_MSB    BIT(1)
+#define C8SECTPFE_SERIAL_NOT_PARALLEL   BIT(0)
+
+#define C8SECTPFE_IB_SYNCLCKDRP_CFG(x)   (C8SECTPFE_CHANNEL_OFFSET(x) + 0x04)
+#define C8SECTPFE_SYNC(x)                (x & 0xf)
+#define C8SECTPFE_DROP(x)                ((x<<4) & 0xf)
+#define C8SECTPFE_TOKEN(x)               ((x<<8) & 0xff00)
+#define C8SECTPFE_SLDENDIANNESS          BIT(16)
+
+#define C8SECTPFE_IB_TAGBYTES_CFG(x)     (C8SECTPFE_CHANNEL_OFFSET(x) + 0x08)
+#define C8SECTPFE_TAG_HEADER(x)          (x << 16)
+#define C8SECTPFE_TAG_COUNTER(x)         ((x<<1) & 0x7fff)
+#define C8SECTPFE_TAG_ENABLE             BIT(0)
+
+#define C8SECTPFE_IB_PID_SET(x)          (C8SECTPFE_CHANNEL_OFFSET(x) + 0x0C)
+#define C8SECTPFE_PID_OFFSET(x)          (x & 0x3f)
+#define C8SECTPFE_PID_NUMBITS(x)         ((x << 6) & 0xfff)
+#define C8SECTPFE_PID_ENABLE             BIT(31)
+
+#define C8SECTPFE_IB_PKT_LEN(x)          (C8SECTPFE_CHANNEL_OFFSET(x) + 0x10)
+
+#define C8SECTPFE_IB_BUFF_STRT(x)        (C8SECTPFE_CHANNEL_OFFSET(x) + 0x14)
+#define C8SECTPFE_IB_BUFF_END(x)         (C8SECTPFE_CHANNEL_OFFSET(x) + 0x18)
+#define C8SECTPFE_IB_READ_PNT(x)         (C8SECTPFE_CHANNEL_OFFSET(x) + 0x1C)
+#define C8SECTPFE_IB_WRT_PNT(x)          (C8SECTPFE_CHANNEL_OFFSET(x) + 0x20)
+
+#define C8SECTPFE_IB_PRI_THRLD(x)        (C8SECTPFE_CHANNEL_OFFSET(x) + 0x24)
+#define C8SECTPFE_PRI_VALUE(x)           (x & 0x7fffff)
+#define C8SECTPFE_PRI_LOWPRI(x)          ((x & 0xf) << 24)
+#define C8SECTPFE_PRI_HIGHPRI(x)         ((x & 0xf) << 28)
+
+#define C8SECTPFE_IB_STAT(x)             (C8SECTPFE_CHANNEL_OFFSET(x) + 0x28)
+#define C8SECTPFE_STAT_FIFO_OVERFLOW(x)  (x & 0x1)
+#define C8SECTPFE_STAT_BUFFER_OVERFLOW(x) (x & 0x2)
+#define C8SECTPFE_STAT_OUTOFORDERRP(x)   (x & 0x4)
+#define C8SECTPFE_STAT_PID_OVERFLOW(x)   (x & 0x8)
+#define C8SECTPFE_STAT_PKT_OVERFLOW(x)   (x & 0x10)
+#define C8SECTPFE_STAT_ERROR_PACKETS(x)  ((x >> 8) & 0xf)
+#define C8SECTPFE_STAT_SHORT_PACKETS(x)  ((x >> 12) & 0xf)
+
+#define C8SECTPFE_IB_MASK(x)             (C8SECTPFE_CHANNEL_OFFSET(x) + 0x2C)
+#define C8SECTPFE_MASK_FIFO_OVERFLOW     BIT(0)
+#define C8SECTPFE_MASK_BUFFER_OVERFLOW   BIT(1)
+#define C8SECTPFE_MASK_OUTOFORDERRP(x)   BIT(2)
+#define C8SECTPFE_MASK_PID_OVERFLOW(x)   BIT(3)
+#define C8SECTPFE_MASK_PKT_OVERFLOW(x)   BIT(4)
+#define C8SECTPFE_MASK_ERROR_PACKETS(x)  ((x & 0xf) << 8)
+#define C8SECTPFE_MASK_SHORT_PACKETS(x)  ((x & 0xf) >> 12)
+
+#define C8SECTPFE_IB_SYS(x)              (C8SECTPFE_CHANNEL_OFFSET(x) + 0x30)
+#define C8SECTPFE_SYS_RESET              BIT(1)
+#define C8SECTPFE_SYS_ENABLE             BIT(0)
+
+/*
+ * Ponter record data structure required for each input block
+ * see Table 82 on page 167 of functional specification.
+ */
+
+#define DMA_PRDS_MEMBASE	0x0 /* Internal sram base address */
+#define DMA_PRDS_MEMTOP		0x4 /* Internal sram top address */
+
+/*
+ * TS packet size, including tag bytes added by input block,
+ * rounded up to the next multiple of 8 bytes. The packet size,
+ * including any tagging bytes and rounded up to the nearest
+ * multiple of 8 bytes must be less than 255 bytes.
+ */
+#define DMA_PRDS_PKTSIZE	0x8
+#define DMA_PRDS_TPENABLE	0xc
+
+#define TP0_OFFSET		0x10
+#define DMA_PRDS_BUSBASE_TP(x)	((0x10*x) + TP0_OFFSET)
+#define DMA_PRDS_BUSTOP_TP(x)	((0x10*x) + TP0_OFFSET + 0x4)
+#define DMA_PRDS_BUSWP_TP(x)	((0x10*x) + TP0_OFFSET + 0x8)
+#define DMA_PRDS_BUSRP_TP(x)	((0x10*x) + TP0_OFFSET + 0xc)
+
+#define DMA_PRDS_SIZE		(0x20)
+
+#define DMA_MEMDMA_OFFSET	0x4000
+#define DMA_IMEM_OFFSET		0x0
+#define DMA_DMEM_OFFSET		0x4000
+#define DMA_CPU			0x8000
+#define DMA_PER_OFFSET		0xb000
+
+#define DMA_MEMDMA_DMEM (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET)
+#define DMA_MEMDMA_IMEM (DMA_MEMDMA_OFFSET + DMA_IMEM_OFFSET)
+
+/* XP70 Slim core regs */
+#define DMA_CPU_ID	(DMA_MEMDMA_OFFSET + DMA_CPU + 0x0)
+#define DMA_CPU_VCR	(DMA_MEMDMA_OFFSET + DMA_CPU + 0x4)
+#define DMA_CPU_RUN	(DMA_MEMDMA_OFFSET + DMA_CPU + 0x8)
+#define DMA_CPU_CLOCKGATE	(DMA_MEMDMA_OFFSET + DMA_CPU + 0xc)
+#define DMA_CPU_PC	(DMA_MEMDMA_OFFSET + DMA_CPU + 0x20)
+
+/* Enable Interrupt for a IB */
+#define DMA_PER_TPn_DREQ_MASK	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd00)
+/* Ack interrupt by setting corresponding bit */
+#define DMA_PER_TPn_DACK_SET	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd80)
+#define DMA_PER_TPn_DREQ	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe00)
+#define DMA_PER_TPn_DACK	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe80)
+#define DMA_PER_DREQ_MODE	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf80)
+#define DMA_PER_STBUS_SYNC	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf88)
+#define DMA_PER_STBUS_ACCESS	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf8c)
+#define DMA_PER_STBUS_ADDRESS	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf90)
+#define DMA_PER_IDLE_INT	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfa8)
+#define DMA_PER_PRIORITY	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfac)
+#define DMA_PER_MAX_OPCODE	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb0)
+#define DMA_PER_MAX_CHUNK	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb4)
+#define DMA_PER_PAGE_SIZE	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfbc)
+#define DMA_PER_MBOX_STATUS	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc0)
+#define DMA_PER_MBOX_SET	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc8)
+#define DMA_PER_MBOX_CLEAR	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd0)
+#define DMA_PER_MBOX_MASK	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd8)
+#define DMA_PER_INJECT_PKT_SRC	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe0)
+#define DMA_PER_INJECT_PKT_DEST	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe4)
+#define DMA_PER_INJECT_PKT_ADDR	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe8)
+#define DMA_PER_INJECT_PKT	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfec)
+#define DMA_PER_PAT_PTR_INIT	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff0)
+#define DMA_PER_PAT_PTR		(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff4)
+#define DMA_PER_SLEEP_MASK	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff8)
+#define DMA_PER_SLEEP_COUNTER	(DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xffc)
+/* #define DMA_RF_CPUREGn	DMA_RFBASEADDR n=0 to 15) slim regsa */
+
+/* The following are from DMA_DMEM_BaseAddress */
+#define DMA_FIRMWARE_VERSION	(DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x0)
+#define DMA_PTRREC_BASE		(DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x4)
+#define DMA_PTRREC_INPUT_OFFSET	(DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x8)
+#define DMA_ERRREC_BASE		(DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0xc)
+#define DMA_ERROR_RECORD(n)	((n*4) + DMA_ERRREC_BASE + 0x4)
+#define DMA_IDLE_REQ		(DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x10)
+#define IDLEREQ			BIT(31)
+
+#define DMA_FIRMWARE_CONFIG	(DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x14)
+
+/* Regs for PID Filter */
+
+#define PIDF_OFFSET		0x2800
+#define PIDF_BASE(n)		((n*4) + PIDF_OFFSET)
+#define PIDF_LEAK_ENABLE	(PIDF_OFFSET + 0x100)
+#define PIDF_LEAK_STATUS	(PIDF_OFFSET + 0x108)
+#define PIDF_LEAK_COUNT_RESET	(PIDF_OFFSET + 0x110)
+#define PIDF_LEAK_COUNTER	(PIDF_OFFSET + 0x114)
+
+#endif /* _C8SECTPFE_CORE_H_ */
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
new file mode 100644
index 0000000..e9ba13d
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
@@ -0,0 +1,271 @@
+/*
+ * c8sectpfe-debugfs.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "c8sectpfe-debugfs.h"
+
+#define dump_register(nm ...)			\
+{						\
+	.name	= #nm,				\
+	.offset	= nm,				\
+}
+
+static const struct debugfs_reg32 fei_sys_regs[] = {
+	dump_register(SYS_INPUT_ERR_STATUS),
+	dump_register(SYS_OTHER_ERR_STATUS),
+	dump_register(SYS_INPUT_ERR_MASK),
+	dump_register(SYS_DMA_ROUTE),
+	dump_register(SYS_INPUT_CLKEN),
+	dump_register(IBENABLE_MASK),
+	dump_register(SYS_OTHER_CLKEN),
+	dump_register(SYS_CFG_NUM_IB),
+	dump_register(SYS_CFG_NUM_MIB),
+	dump_register(SYS_CFG_NUM_SWTS),
+	dump_register(SYS_CFG_NUM_TSOUT),
+	dump_register(SYS_CFG_NUM_CCSC),
+	dump_register(SYS_CFG_NUM_RAM),
+	dump_register(SYS_CFG_NUM_TP),
+
+	dump_register(C8SECTPFE_IB_IP_FMT_CFG(0)),
+	dump_register(C8SECTPFE_IB_TAGBYTES_CFG(0)),
+	dump_register(C8SECTPFE_IB_PID_SET(0)),
+	dump_register(C8SECTPFE_IB_PKT_LEN(0)),
+	dump_register(C8SECTPFE_IB_BUFF_STRT(0)),
+	dump_register(C8SECTPFE_IB_BUFF_END(0)),
+	dump_register(C8SECTPFE_IB_READ_PNT(0)),
+	dump_register(C8SECTPFE_IB_WRT_PNT(0)),
+	dump_register(C8SECTPFE_IB_PRI_THRLD(0)),
+	dump_register(C8SECTPFE_IB_STAT(0)),
+	dump_register(C8SECTPFE_IB_MASK(0)),
+	dump_register(C8SECTPFE_IB_SYS(0)),
+
+	dump_register(C8SECTPFE_IB_IP_FMT_CFG(1)),
+	dump_register(C8SECTPFE_IB_TAGBYTES_CFG(1)),
+	dump_register(C8SECTPFE_IB_PID_SET(1)),
+	dump_register(C8SECTPFE_IB_PKT_LEN(1)),
+	dump_register(C8SECTPFE_IB_BUFF_STRT(1)),
+	dump_register(C8SECTPFE_IB_BUFF_END(1)),
+	dump_register(C8SECTPFE_IB_READ_PNT(1)),
+	dump_register(C8SECTPFE_IB_WRT_PNT(1)),
+	dump_register(C8SECTPFE_IB_PRI_THRLD(1)),
+	dump_register(C8SECTPFE_IB_STAT(1)),
+	dump_register(C8SECTPFE_IB_MASK(1)),
+	dump_register(C8SECTPFE_IB_SYS(1)),
+
+	dump_register(C8SECTPFE_IB_IP_FMT_CFG(2)),
+	dump_register(C8SECTPFE_IB_TAGBYTES_CFG(2)),
+	dump_register(C8SECTPFE_IB_PID_SET(2)),
+	dump_register(C8SECTPFE_IB_PKT_LEN(2)),
+	dump_register(C8SECTPFE_IB_BUFF_STRT(2)),
+	dump_register(C8SECTPFE_IB_BUFF_END(2)),
+	dump_register(C8SECTPFE_IB_READ_PNT(2)),
+	dump_register(C8SECTPFE_IB_WRT_PNT(2)),
+	dump_register(C8SECTPFE_IB_PRI_THRLD(2)),
+	dump_register(C8SECTPFE_IB_STAT(2)),
+	dump_register(C8SECTPFE_IB_MASK(2)),
+	dump_register(C8SECTPFE_IB_SYS(2)),
+
+	dump_register(C8SECTPFE_IB_IP_FMT_CFG(3)),
+	dump_register(C8SECTPFE_IB_TAGBYTES_CFG(3)),
+	dump_register(C8SECTPFE_IB_PID_SET(3)),
+	dump_register(C8SECTPFE_IB_PKT_LEN(3)),
+	dump_register(C8SECTPFE_IB_BUFF_STRT(3)),
+	dump_register(C8SECTPFE_IB_BUFF_END(3)),
+	dump_register(C8SECTPFE_IB_READ_PNT(3)),
+	dump_register(C8SECTPFE_IB_WRT_PNT(3)),
+	dump_register(C8SECTPFE_IB_PRI_THRLD(3)),
+	dump_register(C8SECTPFE_IB_STAT(3)),
+	dump_register(C8SECTPFE_IB_MASK(3)),
+	dump_register(C8SECTPFE_IB_SYS(3)),
+
+	dump_register(C8SECTPFE_IB_IP_FMT_CFG(4)),
+	dump_register(C8SECTPFE_IB_TAGBYTES_CFG(4)),
+	dump_register(C8SECTPFE_IB_PID_SET(4)),
+	dump_register(C8SECTPFE_IB_PKT_LEN(4)),
+	dump_register(C8SECTPFE_IB_BUFF_STRT(4)),
+	dump_register(C8SECTPFE_IB_BUFF_END(4)),
+	dump_register(C8SECTPFE_IB_READ_PNT(4)),
+	dump_register(C8SECTPFE_IB_WRT_PNT(4)),
+	dump_register(C8SECTPFE_IB_PRI_THRLD(4)),
+	dump_register(C8SECTPFE_IB_STAT(4)),
+	dump_register(C8SECTPFE_IB_MASK(4)),
+	dump_register(C8SECTPFE_IB_SYS(4)),
+
+	dump_register(C8SECTPFE_IB_IP_FMT_CFG(5)),
+	dump_register(C8SECTPFE_IB_TAGBYTES_CFG(5)),
+	dump_register(C8SECTPFE_IB_PID_SET(5)),
+	dump_register(C8SECTPFE_IB_PKT_LEN(5)),
+	dump_register(C8SECTPFE_IB_BUFF_STRT(5)),
+	dump_register(C8SECTPFE_IB_BUFF_END(5)),
+	dump_register(C8SECTPFE_IB_READ_PNT(5)),
+	dump_register(C8SECTPFE_IB_WRT_PNT(5)),
+	dump_register(C8SECTPFE_IB_PRI_THRLD(5)),
+	dump_register(C8SECTPFE_IB_STAT(5)),
+	dump_register(C8SECTPFE_IB_MASK(5)),
+	dump_register(C8SECTPFE_IB_SYS(5)),
+
+	dump_register(C8SECTPFE_IB_IP_FMT_CFG(6)),
+	dump_register(C8SECTPFE_IB_TAGBYTES_CFG(6)),
+	dump_register(C8SECTPFE_IB_PID_SET(6)),
+	dump_register(C8SECTPFE_IB_PKT_LEN(6)),
+	dump_register(C8SECTPFE_IB_BUFF_STRT(6)),
+	dump_register(C8SECTPFE_IB_BUFF_END(6)),
+	dump_register(C8SECTPFE_IB_READ_PNT(6)),
+	dump_register(C8SECTPFE_IB_WRT_PNT(6)),
+	dump_register(C8SECTPFE_IB_PRI_THRLD(6)),
+	dump_register(C8SECTPFE_IB_STAT(6)),
+	dump_register(C8SECTPFE_IB_MASK(6)),
+	dump_register(C8SECTPFE_IB_SYS(6)),
+
+	dump_register(DMA_CPU_ID),
+	dump_register(DMA_CPU_VCR),
+	dump_register(DMA_CPU_RUN),
+	dump_register(DMA_CPU_PC),
+
+	dump_register(DMA_PER_TPn_DREQ_MASK),
+	dump_register(DMA_PER_TPn_DACK_SET),
+	dump_register(DMA_PER_TPn_DREQ),
+	dump_register(DMA_PER_TPn_DACK),
+	dump_register(DMA_PER_DREQ_MODE),
+	dump_register(DMA_PER_STBUS_SYNC),
+	dump_register(DMA_PER_STBUS_ACCESS),
+	dump_register(DMA_PER_STBUS_ADDRESS),
+	dump_register(DMA_PER_IDLE_INT),
+	dump_register(DMA_PER_PRIORITY),
+	dump_register(DMA_PER_MAX_OPCODE),
+	dump_register(DMA_PER_MAX_CHUNK),
+	dump_register(DMA_PER_PAGE_SIZE),
+	dump_register(DMA_PER_MBOX_STATUS),
+	dump_register(DMA_PER_MBOX_SET),
+	dump_register(DMA_PER_MBOX_CLEAR),
+	dump_register(DMA_PER_MBOX_MASK),
+	dump_register(DMA_PER_INJECT_PKT_SRC),
+	dump_register(DMA_PER_INJECT_PKT_DEST),
+	dump_register(DMA_PER_INJECT_PKT_ADDR),
+	dump_register(DMA_PER_INJECT_PKT),
+	dump_register(DMA_PER_PAT_PTR_INIT),
+	dump_register(DMA_PER_PAT_PTR),
+	dump_register(DMA_PER_SLEEP_MASK),
+	dump_register(DMA_PER_SLEEP_COUNTER),
+
+	dump_register(DMA_FIRMWARE_VERSION),
+	dump_register(DMA_PTRREC_BASE),
+	dump_register(DMA_PTRREC_INPUT_OFFSET),
+	dump_register(DMA_ERRREC_BASE),
+
+	dump_register(DMA_ERROR_RECORD(0)),
+	dump_register(DMA_ERROR_RECORD(1)),
+	dump_register(DMA_ERROR_RECORD(2)),
+	dump_register(DMA_ERROR_RECORD(3)),
+	dump_register(DMA_ERROR_RECORD(4)),
+	dump_register(DMA_ERROR_RECORD(5)),
+	dump_register(DMA_ERROR_RECORD(6)),
+	dump_register(DMA_ERROR_RECORD(7)),
+	dump_register(DMA_ERROR_RECORD(8)),
+	dump_register(DMA_ERROR_RECORD(9)),
+	dump_register(DMA_ERROR_RECORD(10)),
+	dump_register(DMA_ERROR_RECORD(11)),
+	dump_register(DMA_ERROR_RECORD(12)),
+	dump_register(DMA_ERROR_RECORD(13)),
+	dump_register(DMA_ERROR_RECORD(14)),
+	dump_register(DMA_ERROR_RECORD(15)),
+	dump_register(DMA_ERROR_RECORD(16)),
+	dump_register(DMA_ERROR_RECORD(17)),
+	dump_register(DMA_ERROR_RECORD(18)),
+	dump_register(DMA_ERROR_RECORD(19)),
+	dump_register(DMA_ERROR_RECORD(20)),
+	dump_register(DMA_ERROR_RECORD(21)),
+	dump_register(DMA_ERROR_RECORD(22)),
+
+	dump_register(DMA_IDLE_REQ),
+	dump_register(DMA_FIRMWARE_CONFIG),
+
+	dump_register(PIDF_BASE(0)),
+	dump_register(PIDF_BASE(1)),
+	dump_register(PIDF_BASE(2)),
+	dump_register(PIDF_BASE(3)),
+	dump_register(PIDF_BASE(4)),
+	dump_register(PIDF_BASE(5)),
+	dump_register(PIDF_BASE(6)),
+	dump_register(PIDF_BASE(7)),
+	dump_register(PIDF_BASE(8)),
+	dump_register(PIDF_BASE(9)),
+	dump_register(PIDF_BASE(10)),
+	dump_register(PIDF_BASE(11)),
+	dump_register(PIDF_BASE(12)),
+	dump_register(PIDF_BASE(13)),
+	dump_register(PIDF_BASE(14)),
+	dump_register(PIDF_BASE(15)),
+	dump_register(PIDF_BASE(16)),
+	dump_register(PIDF_BASE(17)),
+	dump_register(PIDF_BASE(18)),
+	dump_register(PIDF_BASE(19)),
+	dump_register(PIDF_BASE(20)),
+	dump_register(PIDF_BASE(21)),
+	dump_register(PIDF_BASE(22)),
+	dump_register(PIDF_LEAK_ENABLE),
+	dump_register(PIDF_LEAK_STATUS),
+	dump_register(PIDF_LEAK_COUNT_RESET),
+	dump_register(PIDF_LEAK_COUNTER),
+};
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *fei)
+{
+	struct dentry		*root;
+	struct dentry		*file;
+
+	root = debugfs_create_dir("c8sectpfe", NULL);
+	if (!root)
+		goto err;
+
+	fei->root = root;
+
+	fei->regset =  devm_kzalloc(fei->dev, sizeof(*fei->regset), GFP_KERNEL);
+	if (!fei->regset)
+		goto err;
+
+	fei->regset->regs = fei_sys_regs;
+	fei->regset->nregs = ARRAY_SIZE(fei_sys_regs);
+	fei->regset->base = fei->io;
+
+	file = debugfs_create_regset32("registers", S_IRUGO, root,
+				fei->regset);
+	if (!file) {
+		dev_err(fei->dev,
+			"%s not able to create 'registers' debugfs\n"
+			, __func__);
+		goto err;
+	}
+
+	return;
+
+err:
+	debugfs_remove_recursive(root);
+}
+
+void c8sectpfe_debugfs_exit(struct c8sectpfei *fei)
+{
+	debugfs_remove_recursive(fei->root);
+	fei->root = NULL;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h
new file mode 100644
index 0000000..8af1ac1
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h
@@ -0,0 +1,26 @@
+/**
+ * c8sectpfe-debugfs.h - C8SECTPFE STi DVB driver debugfs header
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Authors: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __C8SECTPFE_DEBUG_H
+#define __C8SECTPFE_DEBUG_H
+
+#include "c8sectpfe-core.h"
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *);
+void c8sectpfe_debugfs_exit(struct c8sectpfei *);
+
+#endif /* __C8SECTPFE_DEBUG_H */
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
new file mode 100644
index 0000000..69d7fe4
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -0,0 +1,244 @@
+/*
+ *  c8sectpfe-dvb.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ *  Author Peter Griffin <peter.griffin@linaro.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *
+ *  GNU General Public License for more details.
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+
+#include <dt-bindings/media/c8sectpfe.h>
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+#include "dvb-pll.h"
+#include "lnbh24.h"
+#include "stv0367.h"
+#include "stv0367_priv.h"
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "tda18212.h"
+
+static inline const char *dvb_card_str(unsigned int c)
+{
+	switch (c) {
+	case STV0367_TDA18212_NIMA_1:	return "STV0367_TDA18212_NIMA_1";
+	case STV0367_TDA18212_NIMA_2:	return "STV0367_TDA18212_NIMA_2";
+	case STV0367_TDA18212_NIMB_1:	return "STV0367_TDA18212_NIMB_1";
+	case STV0367_TDA18212_NIMB_2:	return "STV0367_TDA18212_NIMB_2";
+	case STV0903_6110_LNB24_NIMA:	return "STV0903_6110_LNB24_NIMA";
+	case STV0903_6110_LNB24_NIMB:	return "STV0903_6110_LNB24_NIMB";
+	default:			return "unknown dvb frontend card";
+	}
+}
+
+static struct stv090x_config stv090x_config = {
+	.device                 = STV0903,
+	.demod_mode             = STV090x_SINGLE,
+	.clk_mode               = STV090x_CLK_EXT,
+	.xtal                   = 16000000,
+	.address                = 0x69,
+
+	.ts1_mode               = STV090x_TSMODE_SERIAL_CONTINUOUS,
+	.ts2_mode               = STV090x_TSMODE_SERIAL_CONTINUOUS,
+
+	.repeater_level         = STV090x_RPTLEVEL_64,
+
+	.tuner_init             = NULL,
+	.tuner_set_mode         = NULL,
+	.tuner_set_frequency    = NULL,
+	.tuner_get_frequency    = NULL,
+	.tuner_set_bandwidth    = NULL,
+	.tuner_get_bandwidth    = NULL,
+	.tuner_set_bbgain       = NULL,
+	.tuner_get_bbgain       = NULL,
+	.tuner_set_refclk       = NULL,
+	.tuner_get_status       = NULL,
+};
+
+static struct stv6110x_config stv6110x_config = {
+	.addr                   = 0x60,
+	.refclk                 = 16000000,
+};
+
+#define NIMA 0
+#define NIMB 1
+
+static struct stv0367_config stv0367_tda18212_config[] = {
+	{
+		.demod_address = 0x1c,
+		.xtal = 16000000,
+		.if_khz = 4500,
+		.if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+		.ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+		.clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+	}, {
+		.demod_address = 0x1d,
+		.xtal = 16000000,
+		.if_khz = 4500,
+		.if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+		.ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+		.clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+	}, {
+		.demod_address = 0x1e,
+		.xtal = 16000000,
+		.if_khz = 4500,
+		.if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+		.ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+		.clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+	},
+};
+
+static struct tda18212_config tda18212_conf = {
+	.if_dvbt_6 = 4150,
+	.if_dvbt_7 = 4150,
+	.if_dvbt_8 = 4500,
+	.if_dvbc = 5000,
+};
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+		struct c8sectpfe *c8sectpfe,
+		struct channel_info *tsin, int chan_num)
+{
+	struct tda18212_config *tda18212;
+	struct stv6110x_devctl *fe2;
+	struct i2c_client *client;
+	struct i2c_board_info tda18212_info = {
+		.type = "tda18212",
+		.addr = 0x60,
+	};
+
+	if (!tsin)
+		return -EINVAL;
+
+	switch (tsin->dvb_card) {
+
+	case STV0367_TDA18212_NIMA_1:
+	case STV0367_TDA18212_NIMA_2:
+	case STV0367_TDA18212_NIMB_1:
+	case STV0367_TDA18212_NIMB_2:
+		if (tsin->dvb_card == STV0367_TDA18212_NIMA_1)
+			*fe = dvb_attach(stv0367ter_attach,
+				 &stv0367_tda18212_config[0],
+					tsin->i2c_adapter);
+		else if (tsin->dvb_card == STV0367_TDA18212_NIMB_1)
+			*fe = dvb_attach(stv0367ter_attach,
+				 &stv0367_tda18212_config[1],
+					tsin->i2c_adapter);
+		else
+			*fe = dvb_attach(stv0367ter_attach,
+				 &stv0367_tda18212_config[2],
+					tsin->i2c_adapter);
+
+		if (!*fe) {
+			dev_err(c8sectpfe->device,
+				"%s: stv0367ter_attach failed for NIM card %s\n"
+				, __func__, dvb_card_str(tsin->dvb_card));
+			return -ENODEV;
+		};
+
+		/*
+		 * init the demod so that i2c gate_ctrl
+		 * to the tuner works correctly
+		 */
+		(*fe)->ops.init(*fe);
+
+		/* Allocate the tda18212 structure */
+		tda18212 = devm_kzalloc(c8sectpfe->device,
+					sizeof(struct tda18212_config),
+					GFP_KERNEL);
+		if (!tda18212) {
+			dev_err(c8sectpfe->device,
+				"%s: devm_kzalloc failed\n", __func__);
+			return -ENOMEM;
+		}
+
+		memcpy(tda18212, &tda18212_conf,
+			sizeof(struct tda18212_config));
+
+		tda18212->fe = (*fe);
+
+		tda18212_info.platform_data = tda18212;
+
+		/* attach tuner */
+		request_module("tda18212");
+		client = i2c_new_device(tsin->i2c_adapter, &tda18212_info);
+		if (!client || !client->dev.driver) {
+			dvb_frontend_detach(*fe);
+			return -ENODEV;
+		}
+
+		if (!try_module_get(client->dev.driver->owner)) {
+			i2c_unregister_device(client);
+			dvb_frontend_detach(*fe);
+			return -ENODEV;
+		}
+
+		tsin->i2c_client = client;
+
+		break;
+
+	case STV0903_6110_LNB24_NIMA:
+		*fe = dvb_attach(stv090x_attach,	&stv090x_config,
+				tsin->i2c_adapter, STV090x_DEMODULATOR_0);
+		if (!*fe) {
+			dev_err(c8sectpfe->device, "%s: stv090x_attach failed\n"
+				"\tfor NIM card %s\n",
+				__func__, dvb_card_str(tsin->dvb_card));
+			return -ENODEV;
+		}
+
+		fe2 = dvb_attach(stv6110x_attach, *fe,
+					&stv6110x_config, tsin->i2c_adapter);
+		if (!fe2) {
+			dev_err(c8sectpfe->device,
+				"%s: stv6110x_attach failed for NIM card %s\n"
+				, __func__, dvb_card_str(tsin->dvb_card));
+			return -ENODEV;
+		};
+
+		stv090x_config.tuner_init = fe2->tuner_init;
+		stv090x_config.tuner_set_mode = fe2->tuner_set_mode;
+		stv090x_config.tuner_set_frequency = fe2->tuner_set_frequency;
+		stv090x_config.tuner_get_frequency = fe2->tuner_get_frequency;
+		stv090x_config.tuner_set_bandwidth = fe2->tuner_set_bandwidth;
+		stv090x_config.tuner_get_bandwidth = fe2->tuner_get_bandwidth;
+		stv090x_config.tuner_set_bbgain = fe2->tuner_set_bbgain;
+		stv090x_config.tuner_get_bbgain = fe2->tuner_get_bbgain;
+		stv090x_config.tuner_set_refclk = fe2->tuner_set_refclk;
+		stv090x_config.tuner_get_status = fe2->tuner_get_status;
+
+		dvb_attach(lnbh24_attach, *fe, tsin->i2c_adapter, 0, 0, 0x9);
+		break;
+
+	default:
+		dev_err(c8sectpfe->device,
+			"%s: DVB frontend card %s not yet supported\n",
+			__func__, dvb_card_str(tsin->dvb_card));
+		return -ENODEV;
+	}
+
+	(*fe)->id = chan_num;
+
+	dev_info(c8sectpfe->device,
+			"DVB frontend card %s successfully attached",
+			dvb_card_str(tsin->dvb_card));
+	return 0;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h
new file mode 100644
index 0000000..bd366db
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h
@@ -0,0 +1,20 @@
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ *   Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License as
+ *      published by the Free Software Foundation; either version 2 of
+ *      the License, or (at your option) any later version.
+ */
+#ifndef _C8SECTPFE_DVB_H_
+#define _C8SECTPFE_DVB_H_
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+			struct c8sectpfe *c8sectpfe, struct channel_info *tsin,
+			int chan_num);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index c4268d1..ed0b878 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -1627,7 +1627,7 @@
 	h_freq = (u32)bt->pixelclock / total_h_pixel;
 
 	if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
-		if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync,
+		if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
 				    bt->polarities, bt->interlaced, timings))
 			return true;
 	}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 0862c1f..c404e27 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -1124,15 +1124,26 @@
 	return 0;
 }
 
+static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
+{
+	struct v4l2_bt_timings *bt = &timings->bt;
+
+	if ((bt->standards & (V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF)) &&
+	    v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, NULL, NULL))
+		return true;
+
+	return false;
+}
+
 int vivid_vid_out_s_dv_timings(struct file *file, void *_fh,
 				    struct v4l2_dv_timings *timings)
 {
 	struct vivid_dev *dev = video_drvdata(file);
-
 	if (!vivid_is_hdmi_out(dev))
 		return -ENODATA;
 	if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
-				0, NULL, NULL))
+				0, NULL, NULL) &&
+	    !valid_cvt_gtf_timings(timings))
 		return -EINVAL;
 	if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0))
 		return 0;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 913485a..4e61886 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -1,7 +1,7 @@
 /*
  * vsp1_drv.c  --  R-Car VSP1 Driver
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -403,7 +403,10 @@
 	if (vsp1->ref_count == 0)
 		return 0;
 
+	vsp1_pipelines_suspend(vsp1);
+
 	clk_disable_unprepare(vsp1->clock);
+
 	return 0;
 }
 
@@ -413,10 +416,14 @@
 
 	WARN_ON(mutex_is_locked(&vsp1->lock));
 
-	if (vsp1->ref_count)
+	if (vsp1->ref_count == 0)
 		return 0;
 
-	return clk_prepare_enable(vsp1->clock);
+	clk_prepare_enable(vsp1->clock);
+
+	vsp1_pipelines_resume(vsp1);
+
+	return 0;
 }
 #endif
 
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index a453bb4..fd95a75 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -24,22 +24,24 @@
 
 bool vsp1_entity_is_streaming(struct vsp1_entity *entity)
 {
+	unsigned long flags;
 	bool streaming;
 
-	mutex_lock(&entity->lock);
+	spin_lock_irqsave(&entity->lock, flags);
 	streaming = entity->streaming;
-	mutex_unlock(&entity->lock);
+	spin_unlock_irqrestore(&entity->lock, flags);
 
 	return streaming;
 }
 
 int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
 {
+	unsigned long flags;
 	int ret;
 
-	mutex_lock(&entity->lock);
+	spin_lock_irqsave(&entity->lock, flags);
 	entity->streaming = streaming;
-	mutex_unlock(&entity->lock);
+	spin_unlock_irqrestore(&entity->lock, flags);
 
 	if (!streaming)
 		return 0;
@@ -49,9 +51,9 @@
 
 	ret = v4l2_ctrl_handler_setup(entity->subdev.ctrl_handler);
 	if (ret < 0) {
-		mutex_lock(&entity->lock);
+		spin_lock_irqsave(&entity->lock, flags);
 		entity->streaming = false;
-		mutex_unlock(&entity->lock);
+		spin_unlock_irqrestore(&entity->lock, flags);
 	}
 
 	return ret;
@@ -193,7 +195,7 @@
 	if (i == ARRAY_SIZE(vsp1_routes))
 		return -EINVAL;
 
-	mutex_init(&entity->lock);
+	spin_lock_init(&entity->lock);
 
 	entity->vsp1 = vsp1;
 	entity->source_pad = num_pads - 1;
@@ -228,6 +230,4 @@
 	if (entity->subdev.ctrl_handler)
 		v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
 	media_entity_cleanup(&entity->subdev.entity);
-
-	mutex_destroy(&entity->lock);
 }
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 62c768d..8867a57 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -14,7 +14,7 @@
 #define __VSP1_ENTITY_H__
 
 #include <linux/list.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 #include <media/v4l2-subdev.h>
 
@@ -73,7 +73,7 @@
 
 	struct vsp1_video *video;
 
-	struct mutex lock;		/* Protects the streaming field */
+	spinlock_t lock;		/* Protects the streaming field */
 	bool streaming;
 };
 
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index da3c573..25b4873 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -238,7 +238,7 @@
 #define VI6_WPF_SZCLIP_EN		(1 << 28)
 #define VI6_WPF_SZCLIP_OFST_MASK	(0xff << 16)
 #define VI6_WPF_SZCLIP_OFST_SHIFT	16
-#define VI6_WPF_SZCLIP_SIZE_MASK	(0x1fff << 0)
+#define VI6_WPF_SZCLIP_SIZE_MASK	(0xfff << 0)
 #define VI6_WPF_SZCLIP_SIZE_SHIFT	0
 
 #define VI6_WPF_OUTFMT			0x100c
@@ -304,9 +304,9 @@
 #define VI6_DPR_HST_ROUTE		0x2044
 #define VI6_DPR_HSI_ROUTE		0x2048
 #define VI6_DPR_BRU_ROUTE		0x204c
-#define VI6_DPR_ROUTE_FXA_MASK		(0xff << 8)
+#define VI6_DPR_ROUTE_FXA_MASK		(0xff << 16)
 #define VI6_DPR_ROUTE_FXA_SHIFT		16
-#define VI6_DPR_ROUTE_FP_MASK		(0xff << 8)
+#define VI6_DPR_ROUTE_FP_MASK		(0x3f << 8)
 #define VI6_DPR_ROUTE_FP_SHIFT		8
 #define VI6_DPR_ROUTE_RT_MASK		(0x3f << 0)
 #define VI6_DPR_ROUTE_RT_SHIFT		0
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index fa71f46..9688c21 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -197,6 +197,17 @@
 	 */
 	format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SINK,
 					    sel->which);
+
+	/* Restrict the crop rectangle coordinates to multiples of 2 to avoid
+	 * shifting the color plane.
+	 */
+	if (format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+		sel->r.left = ALIGN(sel->r.left, 2);
+		sel->r.top = ALIGN(sel->r.top, 2);
+		sel->r.width = round_down(sel->r.width, 2);
+		sel->r.height = round_down(sel->r.height, 2);
+	}
+
 	sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
 	sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
 	if (rwpf->entity.type == VSP1_ENTITY_WPF) {
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index d91f19a..3c124c1 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1,7 +1,7 @@
 /*
  * vsp1_video.c  --  R-Car VSP1 Video Node
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -245,7 +245,7 @@
 	 * the datasheet, strides not aligned to a multiple of 128 bytes result
 	 * in image corruption.
 	 */
-	for (i = 0; i < max(info->planes, 2U); ++i) {
+	for (i = 0; i < min(info->planes, 2U); ++i) {
 		unsigned int hsub = i > 0 ? info->hsub : 1;
 		unsigned int vsub = i > 0 ? info->vsub : 1;
 		unsigned int align = 128;
@@ -514,6 +514,18 @@
 	pipe->buffers_ready = 0;
 }
 
+static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
+{
+	unsigned long flags;
+	bool stopped;
+
+	spin_lock_irqsave(&pipe->irqlock, flags);
+	stopped = pipe->state == VSP1_PIPELINE_STOPPED,
+	spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+	return stopped;
+}
+
 static int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
 {
 	struct vsp1_entity *entity;
@@ -525,7 +537,7 @@
 		pipe->state = VSP1_PIPELINE_STOPPING;
 	spin_unlock_irqrestore(&pipe->irqlock, flags);
 
-	ret = wait_event_timeout(pipe->wq, pipe->state == VSP1_PIPELINE_STOPPED,
+	ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
 				 msecs_to_jiffies(500));
 	ret = ret == 0 ? -ETIMEDOUT : 0;
 
@@ -703,6 +715,73 @@
 	}
 }
 
+void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
+{
+	unsigned long flags;
+	unsigned int i;
+	int ret;
+
+	/* To avoid increasing the system suspend time needlessly, loop over the
+	 * pipelines twice, first to set them all to the stopping state, and then
+	 * to wait for the stop to complete.
+	 */
+	for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+		struct vsp1_rwpf *wpf = vsp1->wpf[i];
+		struct vsp1_pipeline *pipe;
+
+		if (wpf == NULL)
+			continue;
+
+		pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+		if (pipe == NULL)
+			continue;
+
+		spin_lock_irqsave(&pipe->irqlock, flags);
+		if (pipe->state == VSP1_PIPELINE_RUNNING)
+			pipe->state = VSP1_PIPELINE_STOPPING;
+		spin_unlock_irqrestore(&pipe->irqlock, flags);
+	}
+
+	for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+		struct vsp1_rwpf *wpf = vsp1->wpf[i];
+		struct vsp1_pipeline *pipe;
+
+		if (wpf == NULL)
+			continue;
+
+		pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+		if (pipe == NULL)
+			continue;
+
+		ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+					 msecs_to_jiffies(500));
+		if (ret == 0)
+			dev_warn(vsp1->dev, "pipeline %u stop timeout\n",
+				 wpf->entity.index);
+	}
+}
+
+void vsp1_pipelines_resume(struct vsp1_device *vsp1)
+{
+	unsigned int i;
+
+	/* Resume pipeline all running pipelines. */
+	for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+		struct vsp1_rwpf *wpf = vsp1->wpf[i];
+		struct vsp1_pipeline *pipe;
+
+		if (wpf == NULL)
+			continue;
+
+		pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+		if (pipe == NULL)
+			continue;
+
+		if (vsp1_pipeline_ready(pipe))
+			vsp1_pipeline_run(pipe);
+	}
+}
+
 /* -----------------------------------------------------------------------------
  * videobuf2 Queue Operations
  */
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index fd2851a..0887a4d 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -1,7 +1,7 @@
 /*
  * vsp1_video.h  --  R-Car VSP1 Video Node
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -149,4 +149,7 @@
 				   struct vsp1_entity *input,
 				   unsigned int alpha);
 
+void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
+void vsp1_pipelines_resume(struct vsp1_device *vsp1);
+
 #endif /* __VSP1_VIDEO_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 98e50e4..e779c93 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -699,8 +699,10 @@
 
 	/* ... and the buffers queue... */
 	dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev);
-	if (IS_ERR(dma->alloc_ctx))
+	if (IS_ERR(dma->alloc_ctx)) {
+		ret = PTR_ERR(dma->alloc_ctx);
 		goto error;
+	}
 
 	/* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
 	 * V4L2 APIs would be inefficient. Testing on the command line with a
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index cc39901..a1930b3 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -526,7 +526,6 @@
 static struct i2c_driver tea5764_i2c_driver = {
 	.driver = {
 		.name = "radio-tea5764",
-		.owner = THIS_MODULE,
 	},
 	.probe = tea5764_i2c_probe,
 	.remove = tea5764_i2c_remove,
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index ec805b0..ba8e357 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -336,19 +336,7 @@
 	.s_ctrl = saa7706h_s_ctrl,
 };
 
-static const struct v4l2_subdev_core_ops saa7706h_core_ops = {
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
-};
-
-static const struct v4l2_subdev_ops saa7706h_ops = {
-	.core = &saa7706h_core_ops,
-};
+static const struct v4l2_subdev_ops empty_ops = {};
 
 /*
  * Generic i2c probe
@@ -373,7 +361,7 @@
 	if (state == NULL)
 		return -ENOMEM;
 	sd = &state->sd;
-	v4l2_i2c_subdev_init(sd, client, &saa7706h_ops);
+	v4l2_i2c_subdev_init(sd, client, &empty_ops);
 
 	v4l2_ctrl_handler_init(&state->hdl, 4);
 	v4l2_ctrl_new_std(&state->hdl, &saa7706h_ctrl_ops,
@@ -429,7 +417,6 @@
 
 static struct i2c_driver saa7706h_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= DRIVER_NAME,
 	},
 	.probe		= saa7706h_probe,
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index a9319a2..9f879f0 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -195,7 +195,6 @@
 
 static struct i2c_driver tef6862_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= DRIVER_NAME,
 	},
 	.probe		= tef6862_probe,
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 704397f..ebc73b0 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -689,7 +689,6 @@
 static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
 		struct fm_rdsdata_format *rds_format)
 {
-	u8 byte1;
 	u8 index = 0;
 	u8 *rds_buff;
 
@@ -701,9 +700,7 @@
 	if (fmdev->asci_id != 0x6350) {
 		rds_buff = &rds_format->data.groupdatabuff.buff[0];
 		while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
-			byte1 = rds_buff[index];
-			rds_buff[index] = rds_buff[index + 1];
-			rds_buff[index + 1] = byte1;
+			swap(rds_buff[index], rds_buff[index + 1]);
 			index += 2;
 		}
 	}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index ddfab25..b6e1311 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -371,21 +371,21 @@
 	tristate "ST remote control receiver"
 	depends on RC_CORE
 	depends on ARCH_STI || COMPILE_TEST
-	help
-	 Say Y here if you want support for ST remote control driver
-	 which allows both IR and UHF RX.
-	 The driver passes raw pulse and space information to the LIRC decoder.
+	---help---
+	   Say Y here if you want support for ST remote control driver
+	   which allows both IR and UHF RX.
+	   The driver passes raw pulse and space information to the LIRC decoder.
 
-	 If you're not sure, select N here.
+	   If you're not sure, select N here.
 
 config IR_SUNXI
-    tristate "SUNXI IR remote control"
-    depends on RC_CORE
-    depends on ARCH_SUNXI || COMPILE_TEST
-    ---help---
-      Say Y if you want to use sunXi internal IR Controller
+	tristate "SUNXI IR remote control"
+	depends on RC_CORE
+	depends on ARCH_SUNXI || COMPILE_TEST
+	---help---
+	   Say Y if you want to use sunXi internal IR Controller
 
-      To compile this driver as a module, choose M here: the module will
-      be called sunxi-ir.
+	   To compile this driver as a module, choose M here: the module will
+	   be called sunxi-ir.
 
 endif #RC_DEVICES
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 98893a8..a32659f 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,9 +35,6 @@
 	struct lirc_codec *lirc = &dev->raw->lirc;
 	int sample;
 
-	if (!(dev->enabled_protocols & RC_BIT_LIRC))
-		return 0;
-
 	if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
 		return -EINVAL;
 
@@ -424,7 +421,7 @@
 }
 
 static struct ir_raw_handler lirc_handler = {
-	.protocols	= RC_BIT_LIRC,
+	.protocols	= 0,
 	.decode		= ir_lirc_decode,
 	.raw_register	= ir_lirc_register,
 	.raw_unregister	= ir_lirc_unregister,
diff --git a/drivers/media/rc/keymaps/rc-lirc.c b/drivers/media/rc/keymaps/rc-lirc.c
index fbf08fa..e172f5d 100644
--- a/drivers/media/rc/keymaps/rc-lirc.c
+++ b/drivers/media/rc/keymaps/rc-lirc.c
@@ -20,7 +20,7 @@
 	.map = {
 		.scan    = lirc,
 		.size    = ARRAY_SIZE(lirc),
-		.rc_type = RC_TYPE_LIRC,
+		.rc_type = RC_TYPE_OTHER,
 		.name    = RC_MAP_LIRC,
 	}
 };
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index 51f18bb..2b0027c 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -15,74 +15,74 @@
 
 static struct rc_map_table lme2510_rc[] = {
 	/* Type 1 - 26 buttons */
-	{ 0x10ed45, KEY_0 },
-	{ 0x10ed5f, KEY_1 },
-	{ 0x10ed50, KEY_2 },
-	{ 0x10ed5d, KEY_3 },
-	{ 0x10ed41, KEY_4 },
-	{ 0x10ed0a, KEY_5 },
-	{ 0x10ed42, KEY_6 },
-	{ 0x10ed47, KEY_7 },
-	{ 0x10ed49, KEY_8 },
-	{ 0x10ed05, KEY_9 },
-	{ 0x10ed43, KEY_POWER },
-	{ 0x10ed46, KEY_SUBTITLE },
-	{ 0x10ed06, KEY_PAUSE },
-	{ 0x10ed03, KEY_MEDIA_REPEAT},
-	{ 0x10ed02, KEY_PAUSE },
-	{ 0x10ed5e, KEY_VOLUMEUP },
-	{ 0x10ed5c, KEY_VOLUMEDOWN },
-	{ 0x10ed09, KEY_CHANNELUP },
-	{ 0x10ed1a, KEY_CHANNELDOWN },
-	{ 0x10ed1e, KEY_PLAY },
-	{ 0x10ed1b, KEY_ZOOM },
-	{ 0x10ed59, KEY_MUTE },
-	{ 0x10ed5a, KEY_TV },
-	{ 0x10ed18, KEY_RECORD },
-	{ 0x10ed07, KEY_EPG },
-	{ 0x10ed01, KEY_STOP },
+	{ 0xef12ba45, KEY_0 },
+	{ 0xef12a05f, KEY_1 },
+	{ 0xef12af50, KEY_2 },
+	{ 0xef12a25d, KEY_3 },
+	{ 0xef12be41, KEY_4 },
+	{ 0xef12f50a, KEY_5 },
+	{ 0xef12bd42, KEY_6 },
+	{ 0xef12b847, KEY_7 },
+	{ 0xef12b649, KEY_8 },
+	{ 0xef12fa05, KEY_9 },
+	{ 0xef12bc43, KEY_POWER },
+	{ 0xef12b946, KEY_SUBTITLE },
+	{ 0xef12f906, KEY_PAUSE },
+	{ 0xef12fc03, KEY_MEDIA_REPEAT},
+	{ 0xef12fd02, KEY_PAUSE },
+	{ 0xef12a15e, KEY_VOLUMEUP },
+	{ 0xef12a35c, KEY_VOLUMEDOWN },
+	{ 0xef12f609, KEY_CHANNELUP },
+	{ 0xef12e51a, KEY_CHANNELDOWN },
+	{ 0xef12e11e, KEY_PLAY },
+	{ 0xef12e41b, KEY_ZOOM },
+	{ 0xef12a659, KEY_MUTE },
+	{ 0xef12a55a, KEY_TV },
+	{ 0xef12e718, KEY_RECORD },
+	{ 0xef12f807, KEY_EPG },
+	{ 0xef12fe01, KEY_STOP },
 	/* Type 2 - 20 buttons */
-	{ 0xbf15, KEY_0 },
-	{ 0xbf08, KEY_1 },
-	{ 0xbf09, KEY_2 },
-	{ 0xbf0a, KEY_3 },
-	{ 0xbf0c, KEY_4 },
-	{ 0xbf0d, KEY_5 },
-	{ 0xbf0e, KEY_6 },
-	{ 0xbf10, KEY_7 },
-	{ 0xbf11, KEY_8 },
-	{ 0xbf12, KEY_9 },
-	{ 0xbf00, KEY_POWER },
-	{ 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
-	{ 0xbf1a, KEY_PAUSE }, /* Timeshift */
-	{ 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
-	{ 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
-	{ 0xbf01, KEY_CHANNELUP },
-	{ 0xbf05, KEY_CHANNELDOWN },
-	{ 0xbf14, KEY_ZOOM },
-	{ 0xbf18, KEY_RECORD },
-	{ 0xbf16, KEY_STOP },
+	{ 0xff40ea15, KEY_0 },
+	{ 0xff40f708, KEY_1 },
+	{ 0xff40f609, KEY_2 },
+	{ 0xff40f50a, KEY_3 },
+	{ 0xff40f30c, KEY_4 },
+	{ 0xff40f20d, KEY_5 },
+	{ 0xff40f10e, KEY_6 },
+	{ 0xff40ef10, KEY_7 },
+	{ 0xff40ee11, KEY_8 },
+	{ 0xff40ed12, KEY_9 },
+	{ 0xff40ff00, KEY_POWER },
+	{ 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */
+	{ 0xff40e51a, KEY_PAUSE }, /* Timeshift */
+	{ 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+	{ 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+	{ 0xff40fe01, KEY_CHANNELUP },
+	{ 0xff40fa05, KEY_CHANNELDOWN },
+	{ 0xff40eb14, KEY_ZOOM },
+	{ 0xff40e718, KEY_RECORD },
+	{ 0xff40e916, KEY_STOP },
 	/* Type 3 - 20 buttons */
-	{ 0x1c, KEY_0 },
-	{ 0x07, KEY_1 },
-	{ 0x15, KEY_2 },
-	{ 0x09, KEY_3 },
-	{ 0x16, KEY_4 },
-	{ 0x19, KEY_5 },
-	{ 0x0d, KEY_6 },
-	{ 0x0c, KEY_7 },
-	{ 0x18, KEY_8 },
-	{ 0x5e, KEY_9 },
-	{ 0x45, KEY_POWER },
-	{ 0x44, KEY_MEDIA_REPEAT}, /* Recall */
-	{ 0x4a, KEY_PAUSE }, /* Timeshift */
-	{ 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
-	{ 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
-	{ 0x46, KEY_CHANNELUP },
-	{ 0x40, KEY_CHANNELDOWN },
-	{ 0x08, KEY_ZOOM },
-	{ 0x42, KEY_RECORD },
-	{ 0x5a, KEY_STOP },
+	{ 0xff00e31c, KEY_0 },
+	{ 0xff00f807, KEY_1 },
+	{ 0xff00ea15, KEY_2 },
+	{ 0xff00f609, KEY_3 },
+	{ 0xff00e916, KEY_4 },
+	{ 0xff00e619, KEY_5 },
+	{ 0xff00f20d, KEY_6 },
+	{ 0xff00f30c, KEY_7 },
+	{ 0xff00e718, KEY_8 },
+	{ 0xff00a15e, KEY_9 },
+	{ 0xff00ba45, KEY_POWER },
+	{ 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */
+	{ 0xff00b54a, KEY_PAUSE }, /* Timeshift */
+	{ 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+	{ 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+	{ 0xff00b946, KEY_CHANNELUP },
+	{ 0xff00bf40, KEY_CHANNELDOWN },
+	{ 0xff00f708, KEY_ZOOM },
+	{ 0xff00bd42, KEY_RECORD },
+	{ 0xff00a55a, KEY_STOP },
 };
 
 static struct rc_map_list lme2510_map = {
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b732ac6..ad26052 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -271,7 +271,7 @@
 
 	spin_lock_init(&dev->raw->lock);
 	dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
-				       "rc%ld", dev->devno);
+				       "rc%u", dev->minor);
 
 	if (IS_ERR(dev->raw->thread)) {
 		rc = PTR_ERR(dev->raw->thread);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 0ff388a..3f0f71a 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -18,17 +18,15 @@
 #include <linux/input.h>
 #include <linux/leds.h>
 #include <linux/slab.h>
+#include <linux/idr.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include "rc-core-priv.h"
 
-/* Bitmap to store allocated device numbers from 0 to IRRCV_NUM_DEVICES - 1 */
-#define IRRCV_NUM_DEVICES      256
-static DECLARE_BITMAP(ir_core_dev_number, IRRCV_NUM_DEVICES);
-
 /* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
 #define IR_TAB_MIN_SIZE	256
 #define IR_TAB_MAX_SIZE	8192
+#define RC_DEV_MAX	256
 
 /* FIXME: IR_KEYPRESS_TIMEOUT should be protocol specific */
 #define IR_KEYPRESS_TIMEOUT 250
@@ -38,6 +36,9 @@
 static DEFINE_SPINLOCK(rc_map_lock);
 static struct led_trigger *led_feedback;
 
+/* Used to keep track of rc devices */
+static DEFINE_IDA(rc_ida);
+
 static struct rc_map_list *seek_rc_map(const char *name)
 {
 	struct rc_map_list *map = NULL;
@@ -799,7 +800,6 @@
 	{ RC_BIT_SANYO,		"sanyo"		},
 	{ RC_BIT_SHARP,		"sharp"		},
 	{ RC_BIT_MCE_KBD,	"mce_kbd"	},
-	{ RC_BIT_LIRC,		"lirc"		},
 	{ RC_BIT_XMP,		"xmp"		},
 };
 
@@ -828,6 +828,23 @@
 		.mask = (_mask),					\
 	}
 
+static bool lirc_is_present(void)
+{
+#if defined(CONFIG_LIRC_MODULE)
+	struct module *lirc;
+
+	mutex_lock(&module_mutex);
+	lirc = find_module("lirc_dev");
+	mutex_unlock(&module_mutex);
+
+	return lirc ? true : false;
+#elif defined(CONFIG_LIRC)
+	return true;
+#else
+	return false;
+#endif
+}
+
 /**
  * show_protocols() - shows the current/wakeup IR protocol(s)
  * @device:	the device descriptor
@@ -882,6 +899,9 @@
 			allowed &= ~proto_names[i].type;
 	}
 
+	if (dev->driver_type == RC_DRIVER_IR_RAW && lirc_is_present())
+		tmp += sprintf(tmp, "[lirc] ");
+
 	if (tmp != buf)
 		tmp--;
 	*tmp = '\n';
@@ -933,8 +953,12 @@
 		}
 
 		if (i == ARRAY_SIZE(proto_names)) {
-			IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
-			return -EINVAL;
+			if (!strcasecmp(tmp, "lirc"))
+				mask = 0;
+			else {
+				IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
+				return -EINVAL;
+			}
 		}
 
 		count++;
@@ -1191,9 +1215,6 @@
 {
 	struct rc_dev *dev = to_rc_dev(device);
 
-	if (!dev || !dev->input_dev)
-		return -ENODEV;
-
 	if (dev->rc_map.name)
 		ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
 	if (dev->driver_name)
@@ -1312,7 +1333,9 @@
 	static bool raw_init = false; /* raw decoders loaded? */
 	struct rc_map *rc_map;
 	const char *path;
-	int rc, devno, attr = 0;
+	int attr = 0;
+	int minor;
+	int rc;
 
 	if (!dev || !dev->map_name)
 		return -EINVAL;
@@ -1332,13 +1355,13 @@
 	if (dev->close)
 		dev->input_dev->close = ir_close;
 
-	do {
-		devno = find_first_zero_bit(ir_core_dev_number,
-					    IRRCV_NUM_DEVICES);
-		/* No free device slots */
-		if (devno >= IRRCV_NUM_DEVICES)
-			return -ENOMEM;
-	} while (test_and_set_bit(devno, ir_core_dev_number));
+	minor = ida_simple_get(&rc_ida, 0, RC_DEV_MAX, GFP_KERNEL);
+	if (minor < 0)
+		return minor;
+
+	dev->minor = minor;
+	dev_set_name(&dev->dev, "rc%u", dev->minor);
+	dev_set_drvdata(&dev->dev, dev);
 
 	dev->dev.groups = dev->sysfs_groups;
 	dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
@@ -1358,9 +1381,6 @@
 	 */
 	mutex_lock(&dev->lock);
 
-	dev->devno = devno;
-	dev_set_name(&dev->dev, "rc%ld", dev->devno);
-	dev_set_drvdata(&dev->dev, dev);
 	rc = device_add(&dev->dev);
 	if (rc)
 		goto out_unlock;
@@ -1423,8 +1443,6 @@
 
 	if (dev->change_protocol) {
 		u64 rc_type = (1ll << rc_map->rc_type);
-		if (dev->driver_type == RC_DRIVER_IR_RAW)
-			rc_type |= RC_BIT_LIRC;
 		rc = dev->change_protocol(dev, &rc_type);
 		if (rc < 0)
 			goto out_raw;
@@ -1433,8 +1451,8 @@
 
 	mutex_unlock(&dev->lock);
 
-	IR_dprintk(1, "Registered rc%ld (driver: %s, remote: %s, mode %s)\n",
-		   dev->devno,
+	IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n",
+		   dev->minor,
 		   dev->driver_name ? dev->driver_name : "unknown",
 		   rc_map->name ? rc_map->name : "unknown",
 		   dev->driver_type == RC_DRIVER_IR_RAW ? "raw" : "cooked");
@@ -1453,7 +1471,7 @@
 	device_del(&dev->dev);
 out_unlock:
 	mutex_unlock(&dev->lock);
-	clear_bit(dev->devno, ir_core_dev_number);
+	ida_simple_remove(&rc_ida, minor);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(rc_register_device);
@@ -1465,8 +1483,6 @@
 
 	del_timer_sync(&dev->timer_keyup);
 
-	clear_bit(dev->devno, ir_core_dev_number);
-
 	if (dev->driver_type == RC_DRIVER_IR_RAW)
 		ir_raw_event_unregister(dev);
 
@@ -1479,6 +1495,8 @@
 
 	device_del(&dev->dev);
 
+	ida_simple_remove(&rc_ida, dev->minor);
+
 	rc_free_device(dev);
 }
 
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 8294af9..05998f0 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -15,7 +15,7 @@
 	select MEDIA_TUNER_MC44S803 if MEDIA_SUBDRV_AUTOSELECT
 
 menu "Customize TV tuners"
-	visible if !MEDIA_SUBDRV_AUTOSELECT
+	visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
 	depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
 
 config MEDIA_TUNER_SIMPLE
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 03538f8..564a000 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -752,7 +752,6 @@
 
 static struct i2c_driver e4000_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "e4000",
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 12f916e..f4d4665 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -632,7 +632,6 @@
 
 static struct i2c_driver fc2580_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "fc2580",
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
index a076c87..5c96da6 100644
--- a/drivers/media/tuners/it913x.c
+++ b/drivers/media/tuners/it913x.c
@@ -463,7 +463,6 @@
 
 static struct i2c_driver it913x_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "it913x",
 	},
 	.probe		= it913x_probe,
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index d4c13fe..504bfbc 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -729,7 +729,6 @@
 
 static struct i2c_driver m88rs6000t_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "m88rs6000t",
 	},
 	.probe		= m88rs6000t_probe,
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index a6245ef..5073821 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -469,7 +469,6 @@
 
 static struct i2c_driver si2157_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "si2157",
 	},
 	.probe		= si2157_probe,
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index d93e066..7b80683 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -277,7 +277,6 @@
 
 static struct i2c_driver tda18212_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tda18212",
 	},
 	.probe		= tda18212_probe,
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index d4f6ca0..9d70378 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -267,7 +267,6 @@
 
 static struct i2c_driver tua9001_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tua9001",
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 4069234..8f2e1c2 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -937,9 +937,6 @@
 	ret = airspy_ctrl_msg(s, CMD_SET_VGA_GAIN, 0, s->if_gain->val,
 			&u8tmp, 1);
 	if (ret)
-		goto err;
-err:
-	if (ret)
 		dev_dbg(s->dev, "failed=%d\n", ret);
 
 	return ret;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index c6ff896..9798160 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1875,7 +1875,7 @@
 			v4l2_fh_exit(&fh->fh);
 			kfree(fh);
 			dev->users--;
-			wake_up_interruptible_nr(&dev->open, 1);
+			wake_up_interruptible(&dev->open);
 			return 0;
 		}
 
@@ -1908,7 +1908,7 @@
 	}
 	v4l2_fh_exit(&fh->fh);
 	kfree(fh);
-	wake_up_interruptible_nr(&dev->open, 1);
+	wake_up_interruptible(&dev->open);
 	return 0;
 }
 
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 4cc55b3..3721ee6 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -348,15 +348,16 @@
 		switch (ibuf[0]) {
 		case 0xaa:
 			debug_data_snipet(1, "INT Remote data snipet", ibuf);
-			if ((ibuf[4] + ibuf[5]) == 0xff) {
-				key = RC_SCANCODE_NECX((ibuf[2] ^ 0xff) << 8 |
-						       (ibuf[3] > 0) ? (ibuf[3] ^ 0xff) : 0,
-						       ibuf[5]);
-				deb_info(1, "INT Key =%08x", key);
-				if (adap_to_d(adap)->rc_dev != NULL)
-					rc_keydown(adap_to_d(adap)->rc_dev,
-						   RC_TYPE_NEC, key, 0);
-			}
+			if (!adap_to_d(adap)->rc_dev)
+				break;
+
+			key = RC_SCANCODE_NEC32(ibuf[2] << 24 |
+						ibuf[3] << 16 |
+						ibuf[4] << 8  |
+						ibuf[5]);
+
+			deb_info(1, "INT Key = 0x%08x", key);
+			rc_keydown(adap_to_d(adap)->rc_dev, RC_TYPE_NEC, key, 0);
 			break;
 		case 0xbb:
 			switch (st->tuner_config) {
@@ -1344,7 +1345,7 @@
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("2.06");
+MODULE_VERSION("2.07");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(LME2510_C_S7395);
 MODULE_FIRMWARE(LME2510_C_LG);
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index d17618f..ec397c4 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -611,7 +611,7 @@
 	return 0;
 
 failed:
-	memset(mac, 0, 6);
+	eth_zero_addr(mac);
 
 	return ret;
 }
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 03f334d..6c3c477 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -707,7 +707,7 @@
 
 			.stream = {
 				.type = USB_ISOC,
-				.count = 8,
+				.count = 4,
 				.endpoint = 0x2,
 				.u = {
 					.isoc = {
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a382483..357be76 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -808,10 +808,6 @@
 	.gate = TDA18271_GATE_DIGITAL,
 };
 
-static const struct a8293_config em28xx_a8293_config = {
-	.i2c_addr = 0x08, /* (0x10 >> 1) */
-};
-
 static struct zl10353_config em28xx_zl10353_no_i2c_gate_dev = {
 	.demod_address = (0x1e >> 1),
 	.disable_i2c_gate_ctrl = 1,
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 5c2a495..1466db1 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -629,7 +629,6 @@
 
 static struct i2c_driver s2250_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "s2250",
 	},
 	.probe		= s2250_probe,
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
index 7cbc3a0..bf6b215 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
@@ -177,7 +177,7 @@
 	__s32 vflip, hflip;
 
 	set_current_state(TASK_INTERRUPTIBLE);
-	while (!schedule_timeout(100)) {
+	while (!schedule_timeout(msecs_to_jiffies(100))) {
 		if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
 			break;
 
diff --git a/drivers/media/usb/gspca/sn9c2028.c b/drivers/media/usb/gspca/sn9c2028.c
index c75b738..4f2050a 100644
--- a/drivers/media/usb/gspca/sn9c2028.c
+++ b/drivers/media/usb/gspca/sn9c2028.c
@@ -140,7 +140,7 @@
 		status = sn9c2028_read1(gspca_dev);
 	if (status < 0) {
 		pr_err("long command status read error %d\n", status);
-		return (status < 0) ? status : -EIO;
+		return status;
 	}
 
 	memset(reading, 0, 4);
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index 03504dc..1b6836f 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -162,7 +162,7 @@
 {
 	struct stk1160 *dev = container_of(v4l2_dev, struct stk1160, v4l2_dev);
 
-	stk1160_info("releasing all resources\n");
+	stk1160_dbg("releasing all resources\n");
 
 	stk1160_i2c_unregister(dev);
 
@@ -363,9 +363,6 @@
 	dev->sd_saa7115 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
 		"saa7115_auto", 0, saa7113_addrs);
 
-	stk1160_info("driver ver %s successfully loaded\n",
-		STK1160_VERSION);
-
 	/* i2c reset saa711x */
 	v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
 	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
diff --git a/drivers/media/usb/stk1160/stk1160-reg.h b/drivers/media/usb/stk1160/stk1160-reg.h
index 3e49da6..81ff3a1 100644
--- a/drivers/media/usb/stk1160/stk1160-reg.h
+++ b/drivers/media/usb/stk1160/stk1160-reg.h
@@ -33,6 +33,40 @@
  */
 #define STK1160_DCTRL			0x100
 
+/*
+ * Decimation Control Register:
+ * Byte 104: Horizontal Decimation Line Unit Count
+ * Byte 105: Vertical Decimation Line Unit Count
+ * Byte 106: Decimation Control
+ * Bit 0 - Horizontal Decimation Control
+ *   0 Horizontal decimation is disabled.
+ *   1 Horizontal decimation is enabled.
+ * Bit 1 - Decimates Half or More Column
+ *   0 Decimates less than half from original column,
+ *     send count unit (0x105) before each unit skipped.
+ *   1 Decimates half or more from original column,
+ *     skip count unit (0x105) before each unit sent.
+ * Bit 2 - Vertical Decimation Control
+ *   0 Vertical decimation is disabled.
+ *   1 Vertical decimation is enabled.
+ * Bit 3 - Vertical Greater or Equal to Half
+ *   0 Decimates less than half from original row,
+ *     send count unit (0x105) before each unit skipped.
+ *   1 Decimates half or more from original row,
+ *     skip count unit (0x105) before each unit sent.
+ * Bit 4 - Decimation Unit
+ *  0 Decimation will work with 2 rows or columns per unit.
+ *  1 Decimation will work with 4 rows or columns per unit.
+ */
+#define STK1160_DMCTRL_H_UNITS		0x104
+#define STK1160_DMCTRL_V_UNITS		0x105
+#define STK1160_DMCTRL			0x106
+#define  STK1160_H_DEC_EN		BIT(0)
+#define  STK1160_H_DEC_MODE		BIT(1)
+#define  STK1160_V_DEC_EN		BIT(2)
+#define  STK1160_V_DEC_MODE		BIT(3)
+#define  STK1160_DEC_UNIT_SIZE		BIT(4)
+
 /* Capture Frame Start Position */
 #define STK116_CFSPO			0x110
 #define STK116_CFSPO_STX_L		0x110
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 4d313ed..e12b103 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -42,6 +42,17 @@
 module_param(keep_buffers, bool, 0644);
 MODULE_PARM_DESC(keep_buffers, "don't release buffers upon stop streaming");
 
+enum stk1160_decimate_mode {
+	STK1160_DECIMATE_MORE_THAN_HALF,
+	STK1160_DECIMATE_LESS_THAN_HALF,
+};
+
+struct stk1160_decimate_ctrl {
+	bool col_en, row_en;
+	enum stk1160_decimate_mode col_mode, row_mode;
+	unsigned int col_n, row_n;
+};
+
 /* supported video standards */
 static struct stk1160_fmt format[] = {
 	{
@@ -51,6 +62,19 @@
 	}
 };
 
+/*
+ * Helper to find the next divisor that results in modulo being zero.
+ * This is required to guarantee valid decimation unit counts.
+ */
+static unsigned int
+div_round_integer(unsigned int x, unsigned int y)
+{
+	for (;; y++) {
+		if (x % y == 0)
+			return x / y;
+	}
+}
+
 static void stk1160_set_std(struct stk1160 *dev)
 {
 	int i;
@@ -106,6 +130,41 @@
 
 }
 
+static void stk1160_set_fmt(struct stk1160 *dev,
+			    struct stk1160_decimate_ctrl *ctrl)
+{
+	u32 val = 0;
+
+	if (ctrl) {
+		/*
+		 * Since the format is UYVY, the device must skip or send
+		 * a number of rows/columns multiple of four. This way, the
+		 * colour format is preserved. The STK1160_DEC_UNIT_SIZE bit
+		 * does exactly this.
+		 */
+		val |= STK1160_DEC_UNIT_SIZE;
+		val |= ctrl->col_en ? STK1160_H_DEC_EN : 0;
+		val |= ctrl->row_en ? STK1160_V_DEC_EN : 0;
+		val |= ctrl->col_mode ==
+			STK1160_DECIMATE_MORE_THAN_HALF ?
+			STK1160_H_DEC_MODE : 0;
+		val |= ctrl->row_mode ==
+			STK1160_DECIMATE_MORE_THAN_HALF ?
+			STK1160_V_DEC_MODE : 0;
+
+		/* Horizontal count units */
+		stk1160_write_reg(dev, STK1160_DMCTRL_H_UNITS, ctrl->col_n);
+		/* Vertical count units */
+		stk1160_write_reg(dev, STK1160_DMCTRL_V_UNITS, ctrl->row_n);
+
+		stk1160_dbg("decimate 0x%x, column units %d, row units %d\n",
+			    val, ctrl->col_n, ctrl->row_n);
+	}
+
+	/* Decimation control */
+	stk1160_write_reg(dev, STK1160_DMCTRL, val);
+}
+
 /*
  * Set a new alternate setting.
  * Returns true is dev->max_pkt_size has changed, false otherwise.
@@ -136,7 +195,7 @@
 			dev->alt = i;
 	}
 
-	stk1160_info("setting alternate %d\n", dev->alt);
+	stk1160_dbg("setting alternate %d\n", dev->alt);
 
 	if (dev->alt != prev_alt) {
 		stk1160_dbg("minimum isoc packet size: %u (alt=%d)\n",
@@ -194,6 +253,8 @@
 	/* Start saa711x */
 	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
 
+	dev->sequence = 0;
+
 	/* Start stk1160 */
 	stk1160_write_reg(dev, STK1160_DCTRL, 0xb3);
 	stk1160_write_reg(dev, STK1160_DCTRL+3, 0x00);
@@ -224,7 +285,7 @@
 
 	/* set alternate 0 */
 	dev->alt = 0;
-	stk1160_info("setting alternate %d\n", dev->alt);
+	stk1160_dbg("setting alternate %d\n", dev->alt);
 	usb_set_interface(dev->udev, 0, 0);
 
 	/* Stop stk1160 */
@@ -321,27 +382,115 @@
 	return 0;
 }
 
+static int stk1160_try_fmt(struct stk1160 *dev, struct v4l2_format *f,
+			    struct stk1160_decimate_ctrl *ctrl)
+{
+	unsigned int width, height;
+	unsigned int base_width, base_height;
+	unsigned int col_n, row_n;
+	enum stk1160_decimate_mode col_mode, row_mode;
+	bool col_en, row_en;
+
+	base_width = 720;
+	base_height = (dev->norm & V4L2_STD_525_60) ? 480 : 576;
+
+	/* Minimum width and height is 5% the frame size */
+	width = clamp_t(unsigned int, f->fmt.pix.width,
+			base_width / 20, base_width);
+	height = clamp_t(unsigned int, f->fmt.pix.height,
+			base_height / 20, base_height);
+
+	/* Let's set default no decimation values */
+	col_n = 0;
+	row_n = 0;
+	col_en = false;
+	row_en = false;
+	f->fmt.pix.width = base_width;
+	f->fmt.pix.height = base_height;
+	row_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+	col_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+
+	if (width < base_width && width > base_width / 2) {
+		/*
+		 * The device will send count units for each
+		 * unit skipped. This means count unit is:
+		 *
+		 * n = width / (frame width - width)
+		 *
+		 * And the width is:
+		 *
+		 * width = (n / n + 1) * frame width
+		 */
+		col_n = div_round_integer(width, base_width - width);
+		if (col_n > 0 && col_n <= 255) {
+			col_en = true;
+			col_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+			f->fmt.pix.width = (base_width * col_n) / (col_n + 1);
+		}
+
+	} else if (width <= base_width / 2) {
+
+		/*
+		 * The device will skip count units for each
+		 * unit sent. This means count is:
+		 *
+		 * n = (frame width / width) - 1
+		 *
+		 * And the width is:
+		 *
+		 * width = frame width / (n + 1)
+		 */
+		col_n = div_round_integer(base_width, width) - 1;
+		if (col_n > 0 && col_n <= 255) {
+			col_en = true;
+			col_mode = STK1160_DECIMATE_MORE_THAN_HALF;
+			f->fmt.pix.width = base_width / (col_n + 1);
+		}
+	}
+
+	if (height < base_height && height > base_height / 2) {
+		row_n = div_round_integer(height, base_height - height);
+		if (row_n > 0 && row_n <= 255) {
+			row_en = true;
+			row_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+			f->fmt.pix.height = (base_height * row_n) / (row_n + 1);
+		}
+
+	} else if (height <= base_height / 2) {
+		row_n = div_round_integer(base_height, height) - 1;
+		if (row_n > 0 && row_n <= 255) {
+			row_en = true;
+			row_mode = STK1160_DECIMATE_MORE_THAN_HALF;
+			f->fmt.pix.height = base_height / (row_n + 1);
+		}
+	}
+
+	f->fmt.pix.pixelformat = dev->fmt->fourcc;
+	f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+	f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+	if (ctrl) {
+		ctrl->col_en = col_en;
+		ctrl->col_n = col_n;
+		ctrl->col_mode = col_mode;
+		ctrl->row_en = row_en;
+		ctrl->row_n = row_n;
+		ctrl->row_mode = row_mode;
+	}
+
+	stk1160_dbg("width %d, height %d\n",
+		    f->fmt.pix.width, f->fmt.pix.height);
+	return 0;
+}
+
 static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
-			struct v4l2_format *f)
+				  struct v4l2_format *f)
 {
 	struct stk1160 *dev = video_drvdata(file);
 
-	/*
-	 * User can't choose size at his own will,
-	 * so we just return him the current size chosen
-	 * at standard selection.
-	 * TODO: Implement frame scaling?
-	 */
-
-	f->fmt.pix.pixelformat = dev->fmt->fourcc;
-	f->fmt.pix.width = dev->width;
-	f->fmt.pix.height = dev->height;
-	f->fmt.pix.field = V4L2_FIELD_INTERLACED;
-	f->fmt.pix.bytesperline = dev->width * 2;
-	f->fmt.pix.sizeimage = dev->height * f->fmt.pix.bytesperline;
-	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-
-	return 0;
+	return stk1160_try_fmt(dev, f, NULL);
 }
 
 static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
@@ -349,13 +498,18 @@
 {
 	struct stk1160 *dev = video_drvdata(file);
 	struct vb2_queue *q = &dev->vb_vidq;
+	struct stk1160_decimate_ctrl ctrl;
+	int rc;
 
 	if (vb2_is_busy(q))
 		return -EBUSY;
 
-	vidioc_try_fmt_vid_cap(file, priv, f);
-
-	/* We don't support any format changes */
+	rc = stk1160_try_fmt(dev, f, &ctrl);
+	if (rc < 0)
+		return rc;
+	dev->width = f->fmt.pix.width;
+	dev->height = f->fmt.pix.height;
+	stk1160_set_fmt(dev, &ctrl);
 
 	return 0;
 }
@@ -391,22 +545,15 @@
 		return -ENODEV;
 
 	/* We need to set this now, before we call stk1160_set_std */
+	dev->width = 720;
+	dev->height = (norm & V4L2_STD_525_60) ? 480 : 576;
 	dev->norm = norm;
 
-	/* This is taken from saa7115 video decoder */
-	if (dev->norm & V4L2_STD_525_60) {
-		dev->width = 720;
-		dev->height = 480;
-	} else if (dev->norm & V4L2_STD_625_50) {
-		dev->width = 720;
-		dev->height = 576;
-	} else {
-		stk1160_err("invalid standard\n");
-		return -EINVAL;
-	}
-
 	stk1160_set_std(dev);
 
+	/* Calling with NULL disables frame decimation */
+	stk1160_set_fmt(dev, NULL);
+
 	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_std,
 			dev->norm);
 
@@ -538,8 +685,8 @@
 
 	sizes[0] = size;
 
-	stk1160_info("%s: buffer count %d, each %ld bytes\n",
-			__func__, *nbuffers, size);
+	stk1160_dbg("%s: buffer count %d, each %ld bytes\n",
+		    __func__, *nbuffers, size);
 
 	return 0;
 }
@@ -623,8 +770,8 @@
 			struct stk1160_buffer, list);
 		list_del(&buf->list);
 		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
-		stk1160_info("buffer [%p/%d] aborted\n",
-				buf, buf->vb.v4l2_buf.index);
+		stk1160_dbg("buffer [%p/%d] aborted\n",
+			    buf, buf->vb.v4l2_buf.index);
 	}
 
 	/* It's important to release the current buffer */
@@ -633,8 +780,8 @@
 		dev->isoc_ctl.buf = NULL;
 
 		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
-		stk1160_info("buffer [%p/%d] aborted\n",
-				buf, buf->vb.v4l2_buf.index);
+		stk1160_dbg("buffer [%p/%d] aborted\n",
+			    buf, buf->vb.v4l2_buf.index);
 	}
 	spin_unlock_irqrestore(&dev->buf_lock, flags);
 }
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 39f1aae..940c3ea 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -96,9 +96,7 @@
 {
 	struct stk1160_buffer *buf = dev->isoc_ctl.buf;
 
-	dev->field_count++;
-
-	buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
+	buf->vb.v4l2_buf.sequence = dev->sequence++;
 	buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
 	buf->vb.v4l2_buf.bytesused = buf->bytesused;
 	v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index abdea48..72cc8e8 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -58,7 +58,6 @@
  * new drivers should use.
  *
  */
-#define DEBUG
 #ifdef DEBUG
 #define stk1160_dbg(fmt, args...) \
 	printk(KERN_DEBUG "stk1160: " fmt,  ## args)
@@ -151,8 +150,7 @@
 	v4l2_std_id norm;	  /* current norm */
 	struct stk1160_fmt *fmt;  /* selected format */
 
-	unsigned int field_count; /* not sure ??? */
-	enum v4l2_field field;    /* also not sure :/ */
+	unsigned int sequence;
 
 	/* i2c i/o */
 	struct i2c_adapter i2c_adap;
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 322b53a..7c3a7c5 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -593,14 +593,9 @@
 
 static void swap_bytes(u8 *b, int length)
 {
-	u8 c;
-
 	length -= length % 2;
-	for (; length; b += 2, length -= 2) {
-		c = *b;
-		*b = *(b + 1);
-		*(b + 1) = c;
-	}
+	for (; length; b += 2, length -= 2)
+		swap(*b, *(b + 1));
 }
 
 static void ttusb_dec_process_urb_frame(struct ttusb_dec *dec, u8 *b,
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index 7c04ef6..dc3b4d5 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -1367,7 +1367,7 @@
 int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg)
 {
 	int err_code = 0;
-	unsigned char buffer[1];
+	unsigned char *buffer = usbvision->ctrl_urb_buffer;
 
 	if (!USBVISION_IS_OPERATIONAL(usbvision))
 		return -1;
@@ -1401,10 +1401,12 @@
 	if (!USBVISION_IS_OPERATIONAL(usbvision))
 		return 0;
 
+	usbvision->ctrl_urb_buffer[0] = value;
 	err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
 				USBVISION_OP_CODE,
 				USB_DIR_OUT | USB_TYPE_VENDOR |
-				USB_RECIP_ENDPOINT, 0, (__u16) reg, &value, 1, HZ);
+				USB_RECIP_ENDPOINT, 0, (__u16) reg,
+				usbvision->ctrl_urb_buffer, 1, HZ);
 
 	if (err_code < 0) {
 		dev_err(&usbvision->dev->dev,
@@ -1596,7 +1598,7 @@
 		{ 0x27, 0x00, 0x00 }, { 0x28, 0x00, 0x00 }, { 0x29, 0x00, 0x00 }, { 0x08, 0x80, 0x60 },
 		{ 0x0f, 0x2d, 0x24 }, { 0x0c, 0x80, 0x80 }
 	};
-	char value[3];
+	unsigned char *value = usbvision->ctrl_urb_buffer;
 
 	/* the only difference between PAL and NTSC init_values */
 	if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_NTSC)
@@ -1635,8 +1637,8 @@
 static int usbvision_set_video_format(struct usb_usbvision *usbvision, int format)
 {
 	static const char proc[] = "usbvision_set_video_format";
+	unsigned char *value = usbvision->ctrl_urb_buffer;
 	int rc;
-	unsigned char value[2];
 
 	if (!USBVISION_IS_OPERATIONAL(usbvision))
 		return 0;
@@ -1677,7 +1679,7 @@
 	int err_code = 0;
 	int usb_width, usb_height;
 	unsigned int frame_rate = 0, frame_drop = 0;
-	unsigned char value[4];
+	unsigned char *value = usbvision->ctrl_urb_buffer;
 
 	if (!USBVISION_IS_OPERATIONAL(usbvision))
 		return 0;
@@ -1789,10 +1791,6 @@
 		usbvision->num_frames--;
 	}
 
-	spin_lock_init(&usbvision->queue_lock);
-	init_waitqueue_head(&usbvision->wait_frame);
-	init_waitqueue_head(&usbvision->wait_stream);
-
 	/* Allocate all buffers */
 	for (i = 0; i < usbvision->num_frames; i++) {
 		usbvision->frame[i].index = i;
@@ -1872,7 +1870,7 @@
 {
 	static const char proc[] = "usbvision_set_compresion_params: ";
 	int rc;
-	unsigned char value[6];
+	unsigned char *value = usbvision->ctrl_urb_buffer;
 
 	value[0] = 0x0F;    /* Intra-Compression cycle */
 	value[1] = 0x01;    /* Reg.45 one line per strip */
@@ -1946,7 +1944,7 @@
 {
 	static const char proc[] = "usbvision_set_input: ";
 	int rc;
-	unsigned char value[8];
+	unsigned char *value = usbvision->ctrl_urb_buffer;
 	unsigned char dvi_yuv_value;
 
 	if (!USBVISION_IS_OPERATIONAL(usbvision))
@@ -2062,8 +2060,8 @@
 
 static int usbvision_set_dram_settings(struct usb_usbvision *usbvision)
 {
+	unsigned char *value = usbvision->ctrl_urb_buffer;
 	int rc;
-	unsigned char value[8];
 
 	if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) {
 		value[0] = 0x42;
@@ -2161,55 +2159,6 @@
 
 
 /*
- * usbvision timer stuff
- */
-
-/* to call usbvision_power_off from task queue */
-static void call_usbvision_power_off(struct work_struct *work)
-{
-	struct usb_usbvision *usbvision = container_of(work, struct usb_usbvision, power_off_work);
-
-	PDEBUG(DBG_FUNC, "");
-	if (mutex_lock_interruptible(&usbvision->v4l2_lock))
-		return;
-
-	if (usbvision->user == 0) {
-		usbvision_i2c_unregister(usbvision);
-
-		usbvision_power_off(usbvision);
-		usbvision->initialized = 0;
-	}
-	mutex_unlock(&usbvision->v4l2_lock);
-}
-
-static void usbvision_power_off_timer(unsigned long data)
-{
-	struct usb_usbvision *usbvision = (void *)data;
-
-	PDEBUG(DBG_FUNC, "");
-	del_timer(&usbvision->power_off_timer);
-	INIT_WORK(&usbvision->power_off_work, call_usbvision_power_off);
-	(void) schedule_work(&usbvision->power_off_work);
-}
-
-void usbvision_init_power_off_timer(struct usb_usbvision *usbvision)
-{
-	setup_timer(&usbvision->power_off_timer, usbvision_power_off_timer,
-		    (unsigned long)usbvision);
-}
-
-void usbvision_set_power_off_timer(struct usb_usbvision *usbvision)
-{
-	mod_timer(&usbvision->power_off_timer, jiffies + USBVISION_POWEROFF_TIME);
-}
-
-void usbvision_reset_power_off_timer(struct usb_usbvision *usbvision)
-{
-	if (timer_pending(&usbvision->power_off_timer))
-		del_timer(&usbvision->power_off_timer);
-}
-
-/*
  * usbvision_begin_streaming()
  * Sure you have to put bit 7 to 0, if not incoming frames are droped, but no
  * idea about the rest
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c
index 26dbcb1..120de2e 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/media/usb/usbvision/usbvision-i2c.c
@@ -343,7 +343,7 @@
 {
 	int rc, retries;
 	int i;
-	unsigned char value[6];
+	unsigned char *value = usbvision->ctrl_urb_buffer;
 	unsigned char ser_cont;
 
 	ser_cont = (len & 0x07) | 0x10;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 1c6d31f..b693206 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -62,6 +62,7 @@
 #include <media/saa7115.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
 #include <media/tuner.h>
 
 #include <linux/workqueue.h>
@@ -122,8 +123,6 @@
 static int isoc_mode = ISOC_MODE_COMPRESS;
 /* Set the default Debug Mode of the device driver */
 static int video_debug;
-/* Set the default device to power on at startup */
-static int power_on_at_open = 1;
 /* Sequential Number of Video Device */
 static int video_nr = -1;
 /* Sequential Number of Radio Device */
@@ -134,13 +133,11 @@
 /* Showing parameters under SYSFS */
 module_param(isoc_mode, int, 0444);
 module_param(video_debug, int, 0444);
-module_param(power_on_at_open, int, 0444);
 module_param(video_nr, int, 0444);
 module_param(radio_nr, int, 0444);
 
 MODULE_PARM_DESC(isoc_mode, " Set the default format for ISOC endpoint.  Default: 0x60 (Compression On)");
 MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver.  Default: 0 (Off)");
-MODULE_PARM_DESC(power_on_at_open, " Set the default device to power on when device is opened.  Default: 1 (On)");
 MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX).  Default: -1 (autodetect)");
 MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX).  Default: -1 (autodetect)");
 
@@ -351,11 +348,14 @@
 
 	if (mutex_lock_interruptible(&usbvision->v4l2_lock))
 		return -ERESTARTSYS;
-	usbvision_reset_power_off_timer(usbvision);
 
-	if (usbvision->user)
+	if (usbvision->user) {
 		err_code = -EBUSY;
-	else {
+	} else {
+		err_code = v4l2_fh_open(file);
+		if (err_code)
+			goto unlock;
+
 		/* Allocate memory for the scratch ring buffer */
 		err_code = usbvision_scratch_alloc(usbvision);
 		if (isoc_mode == ISOC_MODE_COMPRESS) {
@@ -372,11 +372,6 @@
 
 	/* If so far no errors then we shall start the camera */
 	if (!err_code) {
-		if (usbvision->power == 0) {
-			usbvision_power_on(usbvision);
-			usbvision_i2c_register(usbvision);
-		}
-
 		/* Send init sequence only once, it's large! */
 		if (!usbvision->initialized) {
 			int setup_ok = 0;
@@ -392,18 +387,14 @@
 			err_code = usbvision_init_isoc(usbvision);
 			/* device must be initialized before isoc transfer */
 			usbvision_muxsel(usbvision, 0);
+
+			/* prepare queues */
+			usbvision_empty_framequeues(usbvision);
 			usbvision->user++;
-		} else {
-			if (power_on_at_open) {
-				usbvision_i2c_unregister(usbvision);
-				usbvision_power_off(usbvision);
-				usbvision->initialized = 0;
-			}
 		}
 	}
 
-	/* prepare queues */
-	usbvision_empty_framequeues(usbvision);
+unlock:
 	mutex_unlock(&usbvision->v4l2_lock);
 
 	PDEBUG(DBG_IO, "success");
@@ -435,23 +426,16 @@
 	usbvision_scratch_free(usbvision);
 
 	usbvision->user--;
-
-	if (power_on_at_open) {
-		/* power off in a little while
-		   to avoid off/on every close/open short sequences */
-		usbvision_set_power_off_timer(usbvision);
-		usbvision->initialized = 0;
-	}
+	mutex_unlock(&usbvision->v4l2_lock);
 
 	if (usbvision->remove_pending) {
 		printk(KERN_INFO "%s: Final disconnect\n", __func__);
 		usbvision_release(usbvision);
 		return 0;
 	}
-	mutex_unlock(&usbvision->v4l2_lock);
 
 	PDEBUG(DBG_IO, "success");
-	return 0;
+	return v4l2_fh_release(file);
 }
 
 
@@ -503,18 +487,24 @@
 					struct v4l2_capability *vc)
 {
 	struct usb_usbvision *usbvision = video_drvdata(file);
+	struct video_device *vdev = video_devdata(file);
 
 	strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
 	strlcpy(vc->card,
 		usbvision_device_data[usbvision->dev_model].model_string,
 		sizeof(vc->card));
 	usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
-	vc->device_caps = V4L2_CAP_VIDEO_CAPTURE |
-		V4L2_CAP_AUDIO |
-		V4L2_CAP_READWRITE |
-		V4L2_CAP_STREAMING |
-		(usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
-	vc->capabilities = vc->device_caps | V4L2_CAP_DEVICE_CAPS;
+	vc->device_caps = usbvision->have_tuner ? V4L2_CAP_TUNER : 0;
+	if (vdev->vfl_type == VFL_TYPE_GRABBER)
+		vc->device_caps |= V4L2_CAP_VIDEO_CAPTURE |
+			V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+	else
+		vc->device_caps |= V4L2_CAP_RADIO;
+
+	vc->capabilities = vc->device_caps | V4L2_CAP_VIDEO_CAPTURE |
+		V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+	if (usbvision_device_data[usbvision->dev_model].radio)
+		vc->capabilities |= V4L2_CAP_RADIO;
 	return 0;
 }
 
@@ -540,7 +530,6 @@
 		} else {
 			strcpy(vi->name, "Television");
 			vi->type = V4L2_INPUT_TYPE_TUNER;
-			vi->audioset = 1;
 			vi->tuner = chan;
 			vi->std = USBVISION_NORMS;
 		}
@@ -551,7 +540,7 @@
 			strcpy(vi->name, "Green Video Input");
 		else
 			strcpy(vi->name, "Composite Video Input");
-		vi->std = V4L2_STD_PAL;
+		vi->std = USBVISION_NORMS;
 		break;
 	case 2:
 		vi->type = V4L2_INPUT_TYPE_CAMERA;
@@ -559,12 +548,12 @@
 			strcpy(vi->name, "Yellow Video Input");
 		else
 			strcpy(vi->name, "S-Video Input");
-		vi->std = V4L2_STD_PAL;
+		vi->std = USBVISION_NORMS;
 		break;
 	case 3:
 		vi->type = V4L2_INPUT_TYPE_CAMERA;
 		strcpy(vi->name, "Red Video Input");
-		vi->std = V4L2_STD_PAL;
+		vi->std = USBVISION_NORMS;
 		break;
 	}
 	return 0;
@@ -619,14 +608,13 @@
 {
 	struct usb_usbvision *usbvision = video_drvdata(file);
 
-	if (!usbvision->have_tuner || vt->index)	/* Only tuner 0 */
+	if (vt->index)	/* Only tuner 0 */
 		return -EINVAL;
-	if (usbvision->radio) {
+	if (vt->type == V4L2_TUNER_RADIO)
 		strcpy(vt->name, "Radio");
-		vt->type = V4L2_TUNER_RADIO;
-	} else {
+	else
 		strcpy(vt->name, "Television");
-	}
+
 	/* Let clients fill in the remainder of this struct */
 	call_all(usbvision, tuner, g_tuner, vt);
 
@@ -638,8 +626,8 @@
 {
 	struct usb_usbvision *usbvision = video_drvdata(file);
 
-	/* Only no or one tuner for now */
-	if (!usbvision->have_tuner || vt->index)
+	/* Only one tuner for now */
+	if (vt->index)
 		return -EINVAL;
 	/* let clients handle this */
 	call_all(usbvision, tuner, s_tuner, vt);
@@ -652,12 +640,13 @@
 {
 	struct usb_usbvision *usbvision = video_drvdata(file);
 
-	freq->tuner = 0; /* Only one tuner */
-	if (usbvision->radio)
-		freq->type = V4L2_TUNER_RADIO;
+	/* Only one tuner */
+	if (freq->tuner)
+		return -EINVAL;
+	if (freq->type == V4L2_TUNER_RADIO)
+		freq->frequency = usbvision->radio_freq;
 	else
-		freq->type = V4L2_TUNER_ANALOG_TV;
-	freq->frequency = usbvision->freq;
+		freq->frequency = usbvision->tv_freq;
 
 	return 0;
 }
@@ -666,68 +655,22 @@
 				const struct v4l2_frequency *freq)
 {
 	struct usb_usbvision *usbvision = video_drvdata(file);
+	struct v4l2_frequency new_freq = *freq;
 
-	/* Only no or one tuner for now */
-	if (!usbvision->have_tuner || freq->tuner)
+	/* Only one tuner for now */
+	if (freq->tuner)
 		return -EINVAL;
 
-	usbvision->freq = freq->frequency;
 	call_all(usbvision, tuner, s_frequency, freq);
-
-	return 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
-{
-	struct usb_usbvision *usbvision = video_drvdata(file);
-
-	if (usbvision->radio)
-		strcpy(a->name, "Radio");
+	call_all(usbvision, tuner, g_frequency, &new_freq);
+	if (freq->type == V4L2_TUNER_RADIO)
+		usbvision->radio_freq = new_freq.frequency;
 	else
-		strcpy(a->name, "TV");
+		usbvision->tv_freq = new_freq.frequency;
 
 	return 0;
 }
 
-static int vidioc_s_audio(struct file *file, void *fh,
-			  const struct v4l2_audio *a)
-{
-	if (a->index)
-		return -EINVAL;
-	return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
-			    struct v4l2_queryctrl *ctrl)
-{
-	struct usb_usbvision *usbvision = video_drvdata(file);
-
-	call_all(usbvision, core, queryctrl, ctrl);
-
-	if (!ctrl->type)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
-				struct v4l2_control *ctrl)
-{
-	struct usb_usbvision *usbvision = video_drvdata(file);
-
-	call_all(usbvision, core, g_ctrl, ctrl);
-	return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
-				struct v4l2_control *ctrl)
-{
-	struct usb_usbvision *usbvision = video_drvdata(file);
-
-	call_all(usbvision, core, s_ctrl, ctrl);
-	return 0;
-}
-
 static int vidioc_reqbufs(struct file *file,
 			   void *priv, struct v4l2_requestbuffers *vr)
 {
@@ -937,6 +880,8 @@
 	vf->fmt.pix.bytesperline = vf->fmt.pix.width*
 		usbvision->palette.bytes_per_pixel;
 	vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
+	vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
 
 	return 0;
 }
@@ -1167,20 +1112,15 @@
 
 	if (mutex_lock_interruptible(&usbvision->v4l2_lock))
 		return -ERESTARTSYS;
+	err_code = v4l2_fh_open(file);
+	if (err_code)
+		goto out;
 	if (usbvision->user) {
 		dev_err(&usbvision->rdev.dev,
 			"%s: Someone tried to open an already opened USBVision Radio!\n",
 				__func__);
 		err_code = -EBUSY;
 	} else {
-		if (power_on_at_open) {
-			usbvision_reset_power_off_timer(usbvision);
-			if (usbvision->power == 0) {
-				usbvision_power_on(usbvision);
-				usbvision_i2c_register(usbvision);
-			}
-		}
-
 		/* Alternate interface 1 is is the biggest frame size */
 		err_code = usbvision_set_alternate(usbvision);
 		if (err_code < 0) {
@@ -1195,14 +1135,6 @@
 		usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO);
 		usbvision->user++;
 	}
-
-	if (err_code) {
-		if (power_on_at_open) {
-			usbvision_i2c_unregister(usbvision);
-			usbvision_power_off(usbvision);
-			usbvision->initialized = 0;
-		}
-	}
 out:
 	mutex_unlock(&usbvision->v4l2_lock);
 	return err_code;
@@ -1212,34 +1144,29 @@
 static int usbvision_radio_close(struct file *file)
 {
 	struct usb_usbvision *usbvision = video_drvdata(file);
-	int err_code = 0;
 
 	PDEBUG(DBG_IO, "");
 
 	mutex_lock(&usbvision->v4l2_lock);
 	/* Set packet size to 0 */
 	usbvision->iface_alt = 0;
-	err_code = usb_set_interface(usbvision->dev, usbvision->iface,
+	usb_set_interface(usbvision->dev, usbvision->iface,
 				    usbvision->iface_alt);
 
 	usbvision_audio_off(usbvision);
 	usbvision->radio = 0;
 	usbvision->user--;
 
-	if (power_on_at_open) {
-		usbvision_set_power_off_timer(usbvision);
-		usbvision->initialized = 0;
-	}
-
 	if (usbvision->remove_pending) {
 		printk(KERN_INFO "%s: Final disconnect\n", __func__);
+		v4l2_fh_release(file);
 		usbvision_release(usbvision);
-		return err_code;
+		return 0;
 	}
 
 	mutex_unlock(&usbvision->v4l2_lock);
 	PDEBUG(DBG_IO, "success");
-	return err_code;
+	return v4l2_fh_release(file);
 }
 
 /* Video registration stuff */
@@ -1252,7 +1179,6 @@
 	.read		= usbvision_v4l2_read,
 	.mmap		= usbvision_v4l2_mmap,
 	.unlocked_ioctl	= video_ioctl2,
-/*	.poll		= video_poll, */
 };
 
 static const struct v4l2_ioctl_ops usbvision_ioctl_ops = {
@@ -1270,17 +1196,15 @@
 	.vidioc_enum_input    = vidioc_enum_input,
 	.vidioc_g_input       = vidioc_g_input,
 	.vidioc_s_input       = vidioc_s_input,
-	.vidioc_queryctrl     = vidioc_queryctrl,
-	.vidioc_g_audio       = vidioc_g_audio,
-	.vidioc_s_audio       = vidioc_s_audio,
-	.vidioc_g_ctrl        = vidioc_g_ctrl,
-	.vidioc_s_ctrl        = vidioc_s_ctrl,
 	.vidioc_streamon      = vidioc_streamon,
 	.vidioc_streamoff     = vidioc_streamoff,
 	.vidioc_g_tuner       = vidioc_g_tuner,
 	.vidioc_s_tuner       = vidioc_s_tuner,
 	.vidioc_g_frequency   = vidioc_g_frequency,
 	.vidioc_s_frequency   = vidioc_s_frequency,
+	.vidioc_log_status    = v4l2_ctrl_log_status,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.vidioc_g_register    = vidioc_g_register,
 	.vidioc_s_register    = vidioc_s_register,
@@ -1301,23 +1225,19 @@
 	.owner             = THIS_MODULE,
 	.open		= usbvision_radio_open,
 	.release	= usbvision_radio_close,
+	.poll		= v4l2_ctrl_poll,
 	.unlocked_ioctl	= video_ioctl2,
 };
 
 static const struct v4l2_ioctl_ops usbvision_radio_ioctl_ops = {
 	.vidioc_querycap      = vidioc_querycap,
-	.vidioc_enum_input    = vidioc_enum_input,
-	.vidioc_g_input       = vidioc_g_input,
-	.vidioc_s_input       = vidioc_s_input,
-	.vidioc_queryctrl     = vidioc_queryctrl,
-	.vidioc_g_audio       = vidioc_g_audio,
-	.vidioc_s_audio       = vidioc_s_audio,
-	.vidioc_g_ctrl        = vidioc_g_ctrl,
-	.vidioc_s_ctrl        = vidioc_s_ctrl,
 	.vidioc_g_tuner       = vidioc_g_tuner,
 	.vidioc_s_tuner       = vidioc_s_tuner,
 	.vidioc_g_frequency   = vidioc_g_frequency,
 	.vidioc_s_frequency   = vidioc_s_frequency,
+	.vidioc_log_status    = v4l2_ctrl_log_status,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
 };
 
 static struct video_device usbvision_radio_template = {
@@ -1369,9 +1289,17 @@
 /* register video4linux devices */
 static int usbvision_register_video(struct usb_usbvision *usbvision)
 {
+	int res = -ENOMEM;
+
 	/* Video Device: */
 	usbvision_vdev_init(usbvision, &usbvision->vdev,
 			      &usbvision_video_template, "USBVision Video");
+	if (!usbvision->have_tuner) {
+		v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_G_FREQUENCY);
+		v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_S_TUNER);
+		v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_G_FREQUENCY);
+		v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_S_TUNER);
+	}
 	if (video_register_device(&usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
 		goto err_exit;
 	printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
@@ -1395,7 +1323,7 @@
 		"USBVision[%d]: video_register_device() failed\n",
 			usbvision->nr);
 	usbvision_unregister_video(usbvision);
-	return -1;
+	return res;
 }
 
 /*
@@ -1420,6 +1348,9 @@
 	if (v4l2_device_register(&intf->dev, &usbvision->v4l2_dev))
 		goto err_free;
 
+	if (v4l2_ctrl_handler_init(&usbvision->hdl, 4))
+		goto err_unreg;
+	usbvision->v4l2_dev.ctrl_handler = &usbvision->hdl;
 	mutex_init(&usbvision->v4l2_lock);
 
 	/* prepare control urb for control messages during interrupts */
@@ -1428,11 +1359,10 @@
 		goto err_unreg;
 	init_waitqueue_head(&usbvision->ctrl_urb_wq);
 
-	usbvision_init_power_off_timer(usbvision);
-
 	return usbvision;
 
 err_unreg:
+	v4l2_ctrl_handler_free(&usbvision->hdl);
 	v4l2_device_unregister(&usbvision->v4l2_dev);
 err_free:
 	kfree(usbvision);
@@ -1450,8 +1380,6 @@
 {
 	PDEBUG(DBG_PROBE, "");
 
-	usbvision_reset_power_off_timer(usbvision);
-
 	usbvision->initialized = 0;
 
 	usbvision_remove_sysfs(&usbvision->vdev);
@@ -1460,6 +1388,7 @@
 
 	usb_free_urb(usbvision->ctrl_urb);
 
+	v4l2_ctrl_handler_free(&usbvision->hdl);
 	v4l2_device_unregister(&usbvision->v4l2_dev);
 	kfree(usbvision);
 
@@ -1487,19 +1416,18 @@
 	}
 
 	usbvision->tvnorm_id = usbvision_device_data[model].video_norm;
-
 	usbvision->video_inputs = usbvision_device_data[model].video_channels;
 	usbvision->ctl_input = 0;
+	usbvision->radio_freq = 87.5 * 16000;
+	usbvision->tv_freq = 400 * 16;
 
 	/* This should be here to make i2c clients to be able to register */
 	/* first switch off audio */
 	if (usbvision_device_data[model].audio_channels > 0)
 		usbvision_audio_off(usbvision);
-	if (!power_on_at_open) {
-		/* and then power up the noisy tuner */
-		usbvision_power_on(usbvision);
-		usbvision_i2c_register(usbvision);
-	}
+	/* and then power up the tuner */
+	usbvision_power_on(usbvision);
+	usbvision_i2c_register(usbvision);
 }
 
 /*
@@ -1592,6 +1520,10 @@
 
 	usbvision->nr = usbvision_nr++;
 
+	spin_lock_init(&usbvision->queue_lock);
+	init_waitqueue_head(&usbvision->wait_frame);
+	init_waitqueue_head(&usbvision->wait_stream);
+
 	usbvision->have_tuner = usbvision_device_data[model].tuner;
 	if (usbvision->have_tuner)
 		usbvision->tuner_type = usbvision_device_data[model].tuner_type;
@@ -1646,11 +1578,7 @@
 	usbvision_stop_isoc(usbvision);
 
 	v4l2_device_disconnect(&usbvision->v4l2_dev);
-
-	if (usbvision->power) {
-		usbvision_i2c_unregister(usbvision);
-		usbvision_power_off(usbvision);
-	}
+	usbvision_i2c_unregister(usbvision);
 	usbvision->remove_pending = 1;	/* Now all ISO data will be ignored */
 
 	usb_put_dev(usbvision->dev);
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 140a1f6..4f2e4fd 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -36,6 +36,7 @@
 #include <linux/i2c.h>
 #include <linux/mutex.h>
 #include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
 #include <media/tuner.h>
 #include <linux/videodev2.h>
 
@@ -357,6 +358,7 @@
 
 struct usb_usbvision {
 	struct v4l2_device v4l2_dev;
+	struct v4l2_ctrl_handler hdl;
 	struct video_device vdev;					/* Video Device */
 	struct video_device rdev;					/* Radio Device */
 
@@ -376,7 +378,8 @@
 	int bridge_type;						/* NT1003, NT1004, NT1005 */
 	int radio;
 	int video_inputs;						/* # of inputs */
-	unsigned long freq;
+	unsigned long radio_freq;
+	unsigned long tv_freq;
 	int audio_mute;
 	int audio_channel;
 	int isoc_mode;							/* format of video data for the usb isoc-transfer */
@@ -391,8 +394,6 @@
 	unsigned char iface_alt;					/* Alt settings */
 	unsigned char vin_reg2_preset;
 	struct mutex v4l2_lock;
-	struct timer_list power_off_timer;
-	struct work_struct power_off_work;
 	int power;							/* is the device powered on? */
 	int user;							/* user count for exclusive use */
 	int initialized;						/* Had we already sent init sequence? */
@@ -510,9 +511,6 @@
 int usbvision_set_input(struct usb_usbvision *usbvision);
 int usbvision_set_output(struct usb_usbvision *usbvision, int width, int height);
 
-void usbvision_init_power_off_timer(struct usb_usbvision *usbvision);
-void usbvision_set_power_off_timer(struct usb_usbvision *usbvision);
-void usbvision_reset_power_off_timer(struct usb_usbvision *usbvision);
 int usbvision_power_off(struct usb_usbvision *usbvision);
 int usbvision_power_on(struct usb_usbvision *usbvision);
 
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index dc3de00..d1dd440 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -13,6 +13,9 @@
 ifeq ($(CONFIG_OF),y)
   videodev-objs += v4l2-of.o
 endif
+ifeq ($(CONFIG_TRACEPOINTS),y)
+  videodev-objs += v4l2-trace.o
+endif
 
 obj-$(CONFIG_VIDEO_V4L2) += videodev.o
 obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index abdcffa..581e21a 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -1366,7 +1366,6 @@
 
 static struct i2c_driver tuner_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tuner",
 		.pm	= &tuner_pm_ops,
 	},
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index e3a3468..b6b7dcc 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1678,21 +1678,6 @@
 	unsigned idx;
 	int err = 0;
 
-	if (!ctrl->is_ptr) {
-		switch (ctrl->type) {
-		case V4L2_CTRL_TYPE_INTEGER:
-		case V4L2_CTRL_TYPE_INTEGER_MENU:
-		case V4L2_CTRL_TYPE_MENU:
-		case V4L2_CTRL_TYPE_BITMASK:
-		case V4L2_CTRL_TYPE_BOOLEAN:
-		case V4L2_CTRL_TYPE_BUTTON:
-		case V4L2_CTRL_TYPE_CTRL_CLASS:
-		case V4L2_CTRL_TYPE_INTEGER64:
-			return ctrl->type_ops->validate(ctrl, 0, p_new);
-		default:
-			break;
-		}
-	}
 	for (idx = 0; !err && idx < ctrl->elems; idx++)
 		err = ctrl->type_ops->validate(ctrl, idx, p_new);
 	return err;
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 04dc71e..6a83d61 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -256,6 +256,7 @@
 {
 	const struct v4l2_bt_timings *bt = &t->bt;
 	u32 htot, vtot;
+	u32 fps;
 
 	if (t->type != V4L2_DV_BT_656_1120)
 		return;
@@ -265,13 +266,15 @@
 	if (bt->interlaced)
 		vtot /= 2;
 
+	fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock),
+				  (htot * vtot)) : 0;
+
 	if (prefix == NULL)
 		prefix = "";
 
-	pr_info("%s: %s%ux%u%s%u (%ux%u)\n", dev_prefix, prefix,
+	pr_info("%s: %s%ux%u%s%u.%u (%ux%u)\n", dev_prefix, prefix,
 		bt->width, bt->height, bt->interlaced ? "i" : "p",
-		(htot * vtot) > 0 ? ((u32)bt->pixelclock / (htot * vtot)) : 0,
-		htot, vtot);
+		fps / 100, fps % 100, htot, vtot);
 
 	if (!detailed)
 		return;
@@ -290,9 +293,11 @@
 			(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
 			bt->il_vsync, bt->il_vbackporch);
 	pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
-	pr_info("%s: flags (0x%x):%s%s%s%s%s\n", dev_prefix, bt->flags,
+	pr_info("%s: flags (0x%x):%s%s%s%s%s%s\n", dev_prefix, bt->flags,
 			(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
 			" REDUCED_BLANKING" : "",
+			((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
+			 bt->vsync == 8) ? " (V2)" : "",
 			(bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
 			" CAN_REDUCE_FPS" : "",
 			(bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
@@ -316,6 +321,7 @@
  */
 
 #define CVT_PXL_CLK_GRAN	250000	/* pixel clock granularity */
+#define CVT_PXL_CLK_GRAN_RB_V2 1000	/* granularity for reduced blanking v2*/
 
 /* Normal blanking */
 #define CVT_MIN_V_BPORCH	7	/* lines */
@@ -335,15 +341,22 @@
 /* Reduced Blanking */
 #define CVT_RB_MIN_V_BPORCH    7       /* lines  */
 #define CVT_RB_V_FPORCH        3       /* lines  */
-#define CVT_RB_MIN_V_BLANK   460     /* us     */
+#define CVT_RB_MIN_V_BLANK   460       /* us     */
 #define CVT_RB_H_SYNC         32       /* pixels */
-#define CVT_RB_H_BPORCH       80       /* pixels */
 #define CVT_RB_H_BLANK       160       /* pixels */
+/* Reduce blanking Version 2 */
+#define CVT_RB_V2_H_BLANK     80       /* pixels */
+#define CVT_RB_MIN_V_FPORCH    3       /* lines  */
+#define CVT_RB_V2_MIN_V_FPORCH 1       /* lines  */
+#define CVT_RB_V_BPORCH        6       /* lines  */
 
 /** v4l2_detect_cvt - detect if the given timings follow the CVT standard
  * @frame_height - the total height of the frame (including blanking) in lines.
  * @hfreq - the horizontal frequency in Hz.
  * @vsync - the height of the vertical sync in lines.
+ * @active_width - active width of image (does not include blanking). This
+ * information is needed only in case of version 2 of reduced blanking.
+ * In other cases, this parameter does not have any effect on timings.
  * @polarities - the horizontal and vertical polarities (same as struct
  *		v4l2_bt_timings polarities).
  * @interlaced - if this flag is true, it indicates interlaced format
@@ -352,20 +365,22 @@
  * This function will attempt to detect if the given values correspond to a
  * valid CVT format. If so, then it will return true, and fmt will be filled
  * in with the found CVT timings.
- *
- * TODO: VESA defined a new version 2 of their reduced blanking
- * formula. Support for that is currently missing in this CVT
- * detection function.
  */
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
-		u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt)
+bool v4l2_detect_cvt(unsigned frame_height,
+		     unsigned hfreq,
+		     unsigned vsync,
+		     unsigned active_width,
+		     u32 polarities,
+		     bool interlaced,
+		     struct v4l2_dv_timings *fmt)
 {
 	int  v_fp, v_bp, h_fp, h_bp, hsync;
 	int  frame_width, image_height, image_width;
 	bool reduced_blanking;
+	bool rb_v2 = false;
 	unsigned pix_clk;
 
-	if (vsync < 4 || vsync > 7)
+	if (vsync < 4 || vsync > 8)
 		return false;
 
 	if (polarities == V4L2_DV_VSYNC_POS_POL)
@@ -375,17 +390,35 @@
 	else
 		return false;
 
+	if (reduced_blanking && vsync == 8)
+		rb_v2 = true;
+
+	if (rb_v2 && active_width == 0)
+		return false;
+
+	if (!rb_v2 && vsync > 7)
+		return false;
+
 	if (hfreq == 0)
 		return false;
 
 	/* Vertical */
 	if (reduced_blanking) {
-		v_fp = CVT_RB_V_FPORCH;
-		v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
-		v_bp -= vsync + v_fp;
+		if (rb_v2) {
+			v_bp = CVT_RB_V_BPORCH;
+			v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+			v_fp -= vsync + v_bp;
 
-		if (v_bp < CVT_RB_MIN_V_BPORCH)
-			v_bp = CVT_RB_MIN_V_BPORCH;
+			if (v_fp < CVT_RB_V2_MIN_V_FPORCH)
+				v_fp = CVT_RB_V2_MIN_V_FPORCH;
+		} else {
+			v_fp = CVT_RB_V_FPORCH;
+			v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+			v_bp -= vsync + v_fp;
+
+			if (v_bp < CVT_RB_MIN_V_BPORCH)
+				v_bp = CVT_RB_MIN_V_BPORCH;
+		}
 	} else {
 		v_fp = CVT_MIN_V_PORCH_RND;
 		v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync;
@@ -422,22 +455,32 @@
 		else
 			return false;
 		break;
+	case 8:
+		image_width = active_width;
+		break;
 	default:
 		return false;
 	}
 
-	image_width = image_width & ~7;
+	if (!rb_v2)
+		image_width = image_width & ~7;
 
 	/* Horizontal */
 	if (reduced_blanking) {
-		pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
-		pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+		int h_blank;
+		int clk_gran;
 
-		h_bp = CVT_RB_H_BPORCH;
+		h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK;
+		clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN;
+
+		pix_clk = (image_width + h_blank) * hfreq;
+		pix_clk = (pix_clk / clk_gran) * clk_gran;
+
+		h_bp  = h_blank / 2;
 		hsync = CVT_RB_H_SYNC;
-		h_fp = CVT_RB_H_BLANK - h_bp - hsync;
+		h_fp  = h_blank - h_bp - hsync;
 
-		frame_width = image_width + CVT_RB_H_BLANK;
+		frame_width = image_width + h_blank;
 	} else {
 		unsigned ideal_duty_cycle_per_myriad =
 			100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
@@ -665,7 +708,6 @@
 struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
 {
 	struct v4l2_fract aspect = { 16, 9 };
-	u32 tmp;
 	u8 ratio;
 
 	/* Nothing filled in, fallback to 16:9 */
@@ -697,9 +739,7 @@
 	if (hor_landscape)
 		return aspect;
 	/* The aspect ratio is for portrait, so swap numerator and denominator */
-	tmp = aspect.denominator;
-	aspect.denominator = aspect.numerator;
-	aspect.numerator = tmp;
+	swap(aspect.denominator, aspect.numerator);
 	return aspect;
 }
 EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 8761aab..8d3171c 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -172,6 +172,9 @@
 	unsigned long flags;
 	struct timespec timestamp;
 
+	if (vdev == NULL)
+		return;
+
 	ktime_get_ts(&timestamp);
 
 	spin_lock_irqsave(&vdev->fh_lock, flags);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 85de455..4a384fc 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -28,7 +28,6 @@
 #include <media/v4l2-device.h>
 #include <media/videobuf2-core.h>
 
-#define CREATE_TRACE_POINTS
 #include <trace/events/v4l2.h>
 
 /* Zero out the end of the struct pointed to by p.  Everything after, but
@@ -1025,8 +1024,9 @@
 	 * Drivers MUST fill in device_caps, so check for this and
 	 * warn if it was forgotten.
 	 */
-	WARN_ON(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
-		!cap->device_caps);
+	WARN(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
+		!cap->device_caps, "Bad caps for driver %s, %x %x",
+		cap->driver, cap->capabilities, cap->device_caps);
 	cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
 
 	return ret;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index dc853e5..ec3ad4e 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -357,9 +357,16 @@
 		     struct v4l2_requestbuffers *reqbufs)
 {
 	struct vb2_queue *vq;
+	int ret;
 
 	vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
-	return vb2_reqbufs(vq, reqbufs);
+	ret = vb2_reqbufs(vq, reqbufs);
+	/* If count == 0, then the owner has released all buffers and he
+	   is no longer owner of the queue. Otherwise we have an owner. */
+	if (ret == 0)
+		vq->owner = reqbufs->count ? file->private_data : NULL;
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
 
@@ -874,18 +881,8 @@
 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct v4l2_fh *fh = file->private_data;
-	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
-	int ret;
 
-	if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
-		return -ERESTARTSYS;
-
-	ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
-
-	if (m2m_ctx->q_lock)
-		mutex_unlock(m2m_ctx->q_lock);
-
-	return ret;
+	return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
 
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 6359606..83615b8 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -588,3 +588,21 @@
 #endif
 }
 EXPORT_SYMBOL(v4l2_subdev_init);
+
+/**
+ * v4l2_subdev_notify_event() - Delivers event notification for subdevice
+ * @sd: The subdev for which to deliver the event
+ * @ev: The event to deliver
+ *
+ * Will deliver the specified event to all userspace event listeners which are
+ * subscribed to the v42l subdev event queue as well as to the bridge driver
+ * using the notify callback. The notification type for the notify callback
+ * will be V4L2_DEVICE_NOTIFY_EVENT.
+ */
+void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
+			      const struct v4l2_event *ev)
+{
+	v4l2_event_queue(sd->devnode, ev);
+	v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
diff --git a/drivers/media/v4l2-core/v4l2-trace.c b/drivers/media/v4l2-core/v4l2-trace.c
new file mode 100644
index 0000000..ae10b02
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-trace.c
@@ -0,0 +1,11 @@
+#include <media/v4l2-common.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index a14c428..f1022d8 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -30,6 +30,8 @@
 #include <media/v4l2-common.h>
 #include <media/videobuf2-core.h>
 
+#include <trace/events/v4l2.h>
+
 static int debug;
 module_param(debug, int, 0644);
 
@@ -1213,6 +1215,8 @@
 	atomic_dec(&q->owned_by_drv_count);
 	spin_unlock_irqrestore(&q->done_lock, flags);
 
+	trace_vb2_buf_done(q, vb);
+
 	switch (state) {
 	case VB2_BUF_STATE_QUEUED:
 		return;
@@ -1639,6 +1643,8 @@
 	vb->state = VB2_BUF_STATE_ACTIVE;
 	atomic_inc(&q->owned_by_drv_count);
 
+	trace_vb2_buf_queue(q, vb);
+
 	/* sync buffers */
 	for (plane = 0; plane < vb->num_planes; ++plane)
 		call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
@@ -1888,6 +1894,8 @@
 			vb->v4l2_buf.timecode = b->timecode;
 	}
 
+	trace_vb2_qbuf(q, vb);
+
 	/*
 	 * If already streaming, give the buffer to driver for processing.
 	 * If not, the buffer will be given to driver on next streamon.
@@ -2133,6 +2141,9 @@
 	/* Remove from videobuf queue */
 	list_del(&vb->queued_entry);
 	q->queued_count--;
+
+	trace_vb2_dqbuf(q, vb);
+
 	if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
 	    vb->v4l2_buf.flags & V4L2_BUF_FLAG_LAST)
 		q->last_buffer_dequeued = true;
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 410c397..e87459f 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -62,7 +62,7 @@
 		return -ENODEV;
 
 	for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
-		u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+		u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
 		if (cspr & CSPR_V && (cspr & CSPR_BA) ==
 				convert_ifc_address(addr_base))
 			return i;
@@ -79,16 +79,16 @@
 	/*
 	 * Clear all the common status and event registers
 	 */
-	if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
-		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+	if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+		ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
 
 	/* enable all error and events */
-	out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+	ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en);
 
 	/* enable all error and event interrupts */
-	out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
-	out_be32(&ifc->cm_erattr0, 0x0);
-	out_be32(&ifc->cm_erattr1, 0x0);
+	ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en);
+	ifc_out32(0x0, &ifc->cm_erattr0);
+	ifc_out32(0x0, &ifc->cm_erattr1);
 
 	return 0;
 }
@@ -127,9 +127,9 @@
 
 	spin_lock_irqsave(&nand_irq_lock, flags);
 
-	stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+	stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
 	if (stat) {
-		out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+		ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat);
 		ctrl->nand_stat = stat;
 		wake_up(&ctrl->nand_wait);
 	}
@@ -161,16 +161,16 @@
 	irqreturn_t ret = IRQ_NONE;
 
 	/* read for chip select error */
-	cs_err = in_be32(&ifc->cm_evter_stat);
+	cs_err = ifc_in32(&ifc->cm_evter_stat);
 	if (cs_err) {
 		dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
 				"any memory bank 0x%08X\n", cs_err);
 		/* clear the chip select error */
-		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+		ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
 
 		/* read error attribute registers print the error information */
-		status = in_be32(&ifc->cm_erattr0);
-		err_addr = in_be32(&ifc->cm_erattr1);
+		status = ifc_in32(&ifc->cm_erattr0);
+		err_addr = ifc_in32(&ifc->cm_erattr1);
 
 		if (status & IFC_CM_ERATTR0_ERTYP_READ)
 			dev_err(ctrl->dev, "Read transaction error"
@@ -231,6 +231,23 @@
 		goto err;
 	}
 
+	version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+			FSL_IFC_VERSION_MASK;
+	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
+	dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
+		version >> 24, (version >> 16) & 0xf, banks);
+
+	fsl_ifc_ctrl_dev->version = version;
+	fsl_ifc_ctrl_dev->banks = banks;
+
+	if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
+		fsl_ifc_ctrl_dev->little_endian = true;
+		dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
+	} else {
+		fsl_ifc_ctrl_dev->little_endian = false;
+		dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
+	}
+
 	version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
 			FSL_IFC_VERSION_MASK;
 	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index c8765db..ba8fff3 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra114-mc.h>
 
 #include "mc.h"
@@ -914,20 +912,6 @@
 	{ .name = "tsec",      .swgroup = TEGRA_SWGROUP_TSEC,      .reg = 0x294 },
 };
 
-static void tegra114_flush_dcache(struct page *page, unsigned long offset,
-				  size_t size)
-{
-	phys_addr_t phys = page_to_phys(page) + offset;
-	void *virt = page_address(page) + offset;
-
-	__cpuc_flush_dcache_area(virt, size);
-	outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra114_smmu_ops = {
-	.flush_dcache = tegra114_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra114_smmu_soc = {
 	.clients = tegra114_mc_clients,
 	.num_clients = ARRAY_SIZE(tegra114_mc_clients),
@@ -935,8 +919,8 @@
 	.num_swgroups = ARRAY_SIZE(tegra114_swgroups),
 	.supports_round_robin_arbitration = false,
 	.supports_request_limit = false,
+	.num_tlb_lines = 32,
 	.num_asids = 4,
-	.ops = &tegra114_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra114_mc_soc = {
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 060fb3d..21e7255 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra124-mc.h>
 
 #include "mc.h"
@@ -1002,20 +1000,6 @@
 };
 
 #ifdef CONFIG_ARCH_TEGRA_124_SOC
-static void tegra124_flush_dcache(struct page *page, unsigned long offset,
-				  size_t size)
-{
-	phys_addr_t phys = page_to_phys(page) + offset;
-	void *virt = page_address(page) + offset;
-
-	__cpuc_flush_dcache_area(virt, size);
-	outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra124_smmu_ops = {
-	.flush_dcache = tegra124_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra124_smmu_soc = {
 	.clients = tegra124_mc_clients,
 	.num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1024,7 +1008,6 @@
 	.supports_round_robin_arbitration = true,
 	.supports_request_limit = true,
 	.num_asids = 128,
-	.ops = &tegra124_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra124_mc_soc = {
@@ -1040,18 +1023,6 @@
 #endif /* CONFIG_ARCH_TEGRA_124_SOC */
 
 #ifdef CONFIG_ARCH_TEGRA_132_SOC
-static void tegra132_flush_dcache(struct page *page, unsigned long offset,
-				  size_t size)
-{
-	void *virt = page_address(page) + offset;
-
-	__flush_dcache_area(virt, size);
-}
-
-static const struct tegra_smmu_ops tegra132_smmu_ops = {
-	.flush_dcache = tegra132_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra132_smmu_soc = {
 	.clients = tegra124_mc_clients,
 	.num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1059,8 +1030,8 @@
 	.num_swgroups = ARRAY_SIZE(tegra124_swgroups),
 	.supports_round_robin_arbitration = true,
 	.supports_request_limit = true,
+	.num_tlb_lines = 32,
 	.num_asids = 128,
-	.ops = &tegra132_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra132_mc_soc = {
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 52e16c7..b447378 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra30-mc.h>
 
 #include "mc.h"
@@ -936,20 +934,6 @@
 	{ .name = "isp",  .swgroup = TEGRA_SWGROUP_ISP,  .reg = 0x258 },
 };
 
-static void tegra30_flush_dcache(struct page *page, unsigned long offset,
-				 size_t size)
-{
-	phys_addr_t phys = page_to_phys(page) + offset;
-	void *virt = page_address(page) + offset;
-
-	__cpuc_flush_dcache_area(virt, size);
-	outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra30_smmu_ops = {
-	.flush_dcache = tegra30_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra30_smmu_soc = {
 	.clients = tegra30_mc_clients,
 	.num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -957,8 +941,8 @@
 	.num_swgroups = ARRAY_SIZE(tegra30_swgroups),
 	.supports_round_robin_arbitration = false,
 	.supports_request_limit = false,
+	.num_tlb_lines = 16,
 	.num_asids = 4,
-	.ops = &tegra30_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra30_mc_soc = {
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 841717a..f2d9fb4 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -609,7 +609,6 @@
 static struct i2c_driver pm800_driver = {
 	.driver = {
 		.name = "88PM800",
-		.owner = THIS_MODULE,
 		.pm = &pm80x_pm_ops,
 		},
 	.probe = pm800_probe,
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index e9d5064..39f2302 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -267,7 +267,6 @@
 static struct i2c_driver pm805_driver = {
 	.driver = {
 		.name = "88PM805",
-		.owner = THIS_MODULE,
 		.pm = &pm80x_pm_ops,
 		},
 	.probe = pm805_probe,
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index e03b7f4..3269a99 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -558,11 +558,7 @@
 	irq_set_chip_data(virq, d->host_data);
 	irq_set_chip_and_handler(virq, &pm860x_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 	return 0;
 }
 
@@ -1258,7 +1254,6 @@
 static struct i2c_driver pm860x_driver = {
 	.driver	= {
 		.name	= "88PM860x",
-		.owner	= THIS_MODULE,
 		.pm     = &pm860x_pm_ops,
 		.of_match_table	= pm860x_dt_ids,
 	},
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 076f593..99d6367 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -97,6 +97,7 @@
 	select MFD_CORE
 	select CHROME_PLATFORMS
 	select CROS_EC_PROTO
+	depends on X86 || ARM || COMPILE_TEST
 	help
 	  If you say Y here you get support for the ChromeOS Embedded
 	  Controller (EC) providing keyboard, battery and power services.
@@ -105,7 +106,7 @@
 
 config MFD_CROS_EC_I2C
 	tristate "ChromeOS Embedded Controller (I2C)"
-	depends on MFD_CROS_EC && CROS_EC_PROTO && I2C
+	depends on MFD_CROS_EC && I2C
 
 	help
 	  If you say Y here, you get support for talking to the ChromeOS
@@ -115,7 +116,7 @@
 
 config MFD_CROS_EC_SPI
 	tristate "ChromeOS Embedded Controller (SPI)"
-	depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
+	depends on MFD_CROS_EC && SPI
 
 	---help---
 	  If you say Y here, you get support for talking to the ChromeOS EC
@@ -186,6 +187,18 @@
 	  This driver can be built as a module. If built as a module it will be
 	  called "da9055"
 
+config MFD_DA9062
+	tristate "Dialog Semiconductor DA9062 PMIC Support"
+	select MFD_CORE
+	select REGMAP_I2C
+	select REGMAP_IRQ
+	depends on I2C=y
+	help
+	  Say yes here for support for the Dialog Semiconductor DA9062 PMIC.
+	  This includes the I2C driver and core APIs.
+	  Additional drivers must be enabled in order to use the functionality
+	  of the device.
+
 config MFD_DA9063
 	bool "Dialog Semiconductor DA9063 PMIC Support"
 	select MFD_CORE
@@ -318,6 +331,7 @@
 
 config INTEL_SOC_PMIC
 	bool "Support for Intel Atom SoC PMIC"
+	depends on GPIOLIB
 	depends on I2C=y
 	select MFD_CORE
 	select REGMAP_I2C
@@ -398,12 +412,14 @@
 	  device may provide functions like watchdog, GPIO, UART and I2C bus.
 
 	  The following modules are supported:
+		* COMe-bBL6
 		* COMe-bHL6
 		* COMe-bIP#
 		* COMe-bPC2 (ETXexpress-PC)
 		* COMe-bSC# (ETXexpress-SC T#)
 		* COMe-cBL6
 		* COMe-cBT6
+		* COMe-cBW6
 		* COMe-cCT6
 		* COMe-cDC2 (microETXexpress-DC)
 		* COMe-cHL6
@@ -1379,6 +1395,12 @@
 	help
 	  Support for Wolfson Microelectronics WM8997 low power audio SoC
 
+config MFD_WM8998
+	bool "Wolfson Microelectronics WM8998"
+	depends on MFD_ARIZONA
+	help
+	  Support for Wolfson Microelectronics WM8998 low power audio SoC
+
 config MFD_WM8400
 	bool "Wolfson Microelectronics WM8400"
 	select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 9d730a2..a59e3fc 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -48,6 +48,9 @@
 ifeq ($(CONFIG_MFD_WM8997),y)
 obj-$(CONFIG_MFD_ARIZONA)	+= wm8997-tables.o
 endif
+ifeq ($(CONFIG_MFD_WM8998),y)
+obj-$(CONFIG_MFD_ARIZONA)	+= wm8998-tables.o
+endif
 obj-$(CONFIG_MFD_WM8400)	+= wm8400-core.o
 wm831x-objs			:= wm831x-core.o wm831x-irq.o wm831x-otp.o
 wm831x-objs			+= wm831x-auxadc.o
@@ -110,10 +113,11 @@
 
 da9055-objs			:= da9055-core.o da9055-i2c.o
 obj-$(CONFIG_MFD_DA9055)	+= da9055.o
-
+obj-$(CONFIG_MFD_DA9062)	+= da9062-core.o
 da9063-objs			:= da9063-core.o da9063-irq.o da9063-i2c.o
 obj-$(CONFIG_MFD_DA9063)	+= da9063.o
 obj-$(CONFIG_MFD_DA9150)	+= da9150-core.o
+
 obj-$(CONFIG_MFD_MAX14577)	+= max14577.o
 obj-$(CONFIG_MFD_MAX77686)	+= max77686.o
 obj-$(CONFIG_MFD_MAX77693)	+= max77693.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 4e6e03d..29b6a2d 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -500,7 +500,6 @@
 static struct i2c_driver aat2870_i2c_driver = {
 	.driver = {
 		.name	= "aat2870",
-		.owner	= THIS_MODULE,
 		.pm	= &aat2870_pm_ops,
 	},
 	.probe		= aat2870_i2c_probe,
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 4659ac1..f0afb44 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -972,7 +972,6 @@
 static struct i2c_driver ab3100_driver = {
 	.driver = {
 		.name	= "ab3100",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= ab3100_id,
 	.probe		= ab3100_probe,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 000da72..fefbe4c 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -565,11 +565,7 @@
 	irq_set_chip_and_handler(virq, &ab8500_irq_chip,
 				handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f495b8b..ae88654 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -351,7 +351,6 @@
 static struct i2c_driver adp5520_driver = {
 	.driver = {
 		.name	= "adp5520",
-		.owner	= THIS_MODULE,
 		.pm	= &adp5520_pm,
 	},
 	.probe		= adp5520_probe,
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index a72ddb29..44cfdbb 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -30,7 +30,7 @@
 
 #include "arizona.h"
 
-static const char *wm5102_core_supplies[] = {
+static const char * const wm5102_core_supplies[] = {
 	"AVDD",
 	"DBVDD1",
 };
@@ -146,17 +146,31 @@
 static irqreturn_t arizona_overclocked(int irq, void *data)
 {
 	struct arizona *arizona = data;
-	unsigned int val[2];
+	unsigned int val[3];
 	int ret;
-	
+
 	ret = regmap_bulk_read(arizona->regmap, ARIZONA_INTERRUPT_RAW_STATUS_6,
-			       &val[0], 2);
+			       &val[0], 3);
 	if (ret != 0) {
 		dev_err(arizona->dev, "Failed to read overclock status: %d\n",
 			ret);
 		return IRQ_NONE;
 	}
 
+	switch (arizona->type) {
+	case WM8998:
+	case WM1814:
+		/* Some bits are shifted on WM8998,
+		 * rearrange to match the standard bit layout
+		 */
+		val[0] = ((val[0] & 0x60e0) >> 1) |
+			 ((val[0] & 0x1e00) >> 2) |
+			 (val[0] & 0x000f);
+		break;
+	default:
+		break;
+	}
+
 	if (val[0] & ARIZONA_PWM_OVERCLOCKED_STS)
 		dev_err(arizona->dev, "PWM overclocked\n");
 	if (val[0] & ARIZONA_FX_CORE_OVERCLOCKED_STS)
@@ -201,6 +215,9 @@
 	if (val[1] & ARIZONA_ISRC1_OVERCLOCKED_STS)
 		dev_err(arizona->dev, "ISRC1 overclocked\n");
 
+	if (val[2] & ARIZONA_SPDIF_OVERCLOCKED_STS)
+		dev_err(arizona->dev, "SPDIF overclocked\n");
+
 	return IRQ_HANDLED;
 }
 
@@ -392,7 +409,7 @@
  * Register patch to some of the CODECs internal write sequences
  * to ensure a clean exit from the low power sleep state.
  */
-static const struct reg_default wm5110_sleep_patch[] = {
+static const struct reg_sequence wm5110_sleep_patch[] = {
 	{ 0x337A, 0xC100 },
 	{ 0x337B, 0x0041 },
 	{ 0x3300, 0xA210 },
@@ -550,9 +567,8 @@
 		break;
 	default:
 		ret = arizona_wait_for_boot(arizona);
-		if (ret != 0) {
+		if (ret != 0)
 			goto err;
-		}
 
 		if (arizona->external_dcvdd) {
 			ret = regmap_update_bits(arizona->regmap,
@@ -759,8 +775,8 @@
 
 	ret = of_property_read_u32_array(arizona->dev->of_node,
 					 "wlf,gpio-defaults",
-					 arizona->pdata.gpio_defaults,
-					 ARRAY_SIZE(arizona->pdata.gpio_defaults));
+					 pdata->gpio_defaults,
+					 ARRAY_SIZE(pdata->gpio_defaults));
 	if (ret >= 0) {
 		/*
 		 * All values are literal except out of range values
@@ -768,11 +784,11 @@
 		 * data which uses 0 as chip default and out of range
 		 * as zero.
 		 */
-		for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
-			if (arizona->pdata.gpio_defaults[i] > 0xffff)
-				arizona->pdata.gpio_defaults[i] = 0;
-			else if (arizona->pdata.gpio_defaults[i] == 0)
-				arizona->pdata.gpio_defaults[i] = 0x10000;
+		for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+			if (pdata->gpio_defaults[i] > 0xffff)
+				pdata->gpio_defaults[i] = 0;
+			else if (pdata->gpio_defaults[i] == 0)
+				pdata->gpio_defaults[i] = 0x10000;
 		}
 	} else {
 		dev_err(arizona->dev, "Failed to parse GPIO defaults: %d\n",
@@ -781,20 +797,20 @@
 
 	of_property_for_each_u32(arizona->dev->of_node, "wlf,inmode", prop,
 				 cur, val) {
-		if (count == ARRAY_SIZE(arizona->pdata.inmode))
+		if (count == ARRAY_SIZE(pdata->inmode))
 			break;
 
-		arizona->pdata.inmode[count] = val;
+		pdata->inmode[count] = val;
 		count++;
 	}
 
 	count = 0;
 	of_property_for_each_u32(arizona->dev->of_node, "wlf,dmic-ref", prop,
 				 cur, val) {
-		if (count == ARRAY_SIZE(arizona->pdata.dmic_ref))
+		if (count == ARRAY_SIZE(pdata->dmic_ref))
 			break;
 
-		arizona->pdata.dmic_ref[count] = val;
+		pdata->dmic_ref[count] = val;
 		count++;
 	}
 
@@ -806,6 +822,8 @@
 	{ .compatible = "wlf,wm5110", .data = (void *)WM5110 },
 	{ .compatible = "wlf,wm8280", .data = (void *)WM8280 },
 	{ .compatible = "wlf,wm8997", .data = (void *)WM8997 },
+	{ .compatible = "wlf,wm8998", .data = (void *)WM8998 },
+	{ .compatible = "wlf,wm1814", .data = (void *)WM1814 },
 	{},
 };
 EXPORT_SYMBOL_GPL(arizona_of_match);
@@ -820,7 +838,7 @@
 	{ .name = "arizona-ldo1" },
 };
 
-static const char *wm5102_supplies[] = {
+static const char * const wm5102_supplies[] = {
 	"MICVDD",
 	"DBVDD2",
 	"DBVDD3",
@@ -863,7 +881,7 @@
 	},
 };
 
-static const char *wm8997_supplies[] = {
+static const char * const wm8997_supplies[] = {
 	"MICVDD",
 	"DBVDD2",
 	"CPVDD",
@@ -887,11 +905,28 @@
 	},
 };
 
+static const struct mfd_cell wm8998_devs[] = {
+	{
+		.name = "arizona-extcon",
+		.parent_supplies = wm5102_supplies,
+		.num_parent_supplies = 1, /* We only need MICVDD */
+	},
+	{ .name = "arizona-gpio" },
+	{ .name = "arizona-haptics" },
+	{ .name = "arizona-pwm" },
+	{
+		.name = "wm8998-codec",
+		.parent_supplies = wm5102_supplies,
+		.num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+	},
+	{ .name = "arizona-micsupp" },
+};
+
 int arizona_dev_init(struct arizona *arizona)
 {
 	struct device *dev = arizona->dev;
 	const char *type_name;
-	unsigned int reg, val;
+	unsigned int reg, val, mask;
 	int (*apply_patch)(struct arizona *) = NULL;
 	int ret, i;
 
@@ -911,6 +946,8 @@
 	case WM5110:
 	case WM8280:
 	case WM8997:
+	case WM8998:
+	case WM1814:
 		for (i = 0; i < ARRAY_SIZE(wm5102_core_supplies); i++)
 			arizona->core_supplies[i].supply
 				= wm5102_core_supplies[i];
@@ -992,6 +1029,7 @@
 	switch (reg) {
 	case 0x5102:
 	case 0x5110:
+	case 0x6349:
 	case 0x8997:
 		break;
 	default:
@@ -1093,6 +1131,27 @@
 		apply_patch = wm8997_patch;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case 0x6349:
+		switch (arizona->type) {
+		case WM8998:
+			type_name = "WM8998";
+			break;
+
+		case WM1814:
+			type_name = "WM1814";
+			break;
+
+		default:
+			type_name = "WM8998";
+			dev_err(arizona->dev, "WM8998 registered as %d\n",
+				arizona->type);
+			arizona->type = WM8998;
+		}
+
+		apply_patch = wm8998_patch;
+		break;
+#endif
 	default:
 		dev_err(arizona->dev, "Unknown device ID %x\n", reg);
 		goto err_reset;
@@ -1204,14 +1263,38 @@
 			<< ARIZONA_IN1_DMIC_SUP_SHIFT;
 		if (arizona->pdata.inmode[i] & ARIZONA_INMODE_DMIC)
 			val |= 1 << ARIZONA_IN1_MODE_SHIFT;
-		if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
-			val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+		switch (arizona->type) {
+		case WM8998:
+		case WM1814:
+			regmap_update_bits(arizona->regmap,
+				ARIZONA_ADC_DIGITAL_VOLUME_1L + (i * 8),
+				ARIZONA_IN1L_SRC_SE_MASK,
+				(arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+					<< ARIZONA_IN1L_SRC_SE_SHIFT);
+
+			regmap_update_bits(arizona->regmap,
+				ARIZONA_ADC_DIGITAL_VOLUME_1R + (i * 8),
+				ARIZONA_IN1R_SRC_SE_MASK,
+				(arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+					<< ARIZONA_IN1R_SRC_SE_SHIFT);
+
+			mask = ARIZONA_IN1_DMIC_SUP_MASK |
+				ARIZONA_IN1_MODE_MASK;
+			break;
+		default:
+			if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+				val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+			mask = ARIZONA_IN1_DMIC_SUP_MASK |
+				ARIZONA_IN1_MODE_MASK |
+				ARIZONA_IN1_SINGLE_ENDED_MASK;
+			break;
+		}
 
 		regmap_update_bits(arizona->regmap,
 				   ARIZONA_IN1L_CONTROL + (i * 8),
-				   ARIZONA_IN1_DMIC_SUP_MASK |
-				   ARIZONA_IN1_MODE_MASK |
-				   ARIZONA_IN1_SINGLE_ENDED_MASK, val);
+				   mask, val);
 	}
 
 	for (i = 0; i < ARIZONA_MAX_OUTPUT; i++) {
@@ -1273,6 +1356,11 @@
 		ret = mfd_add_devices(arizona->dev, -1, wm8997_devs,
 				      ARRAY_SIZE(wm8997_devs), NULL, 0, NULL);
 		break;
+	case WM8998:
+	case WM1814:
+		ret = mfd_add_devices(arizona->dev, -1, wm8998_devs,
+				      ARRAY_SIZE(wm8998_devs), NULL, 0, NULL);
+		break;
 	}
 
 	if (ret != 0) {
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index ff782a5..cea1b40 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -53,6 +53,12 @@
 		regmap_config = &wm8997_i2c_regmap;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case WM8998:
+	case WM1814:
+		regmap_config = &wm8998_i2c_regmap;
+		break;
+#endif
 	default:
 		dev_err(&i2c->dev, "Unknown device type %ld\n",
 			id->driver_data);
@@ -90,6 +96,8 @@
 	{ "wm5110", WM5110 },
 	{ "wm8280", WM8280 },
 	{ "wm8997", WM8997 },
+	{ "wm8998", WM8998 },
+	{ "wm1814", WM1814 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, arizona_i2c_id);
@@ -97,7 +105,6 @@
 static struct i2c_driver arizona_i2c_driver = {
 	.driver = {
 		.name	= "arizona",
-		.owner	= THIS_MODULE,
 		.pm	= &arizona_pm_ops,
 		.of_match_table	= of_match_ptr(arizona_of_match),
 	},
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 2b9965d5..2cac4f4 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -174,14 +174,7 @@
 	irq_set_chip_data(virq, data);
 	irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -234,6 +227,15 @@
 		arizona->ctrlif_error = false;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case WM8998:
+	case WM1814:
+		aod = &wm8998_aod;
+		irq = &wm8998_irq;
+
+		arizona->ctrlif_error = false;
+		break;
+#endif
 	default:
 		BUG_ON("Unknown Arizona class device" == NULL);
 		return -EINVAL;
diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h
index fbe2843..3af12e9 100644
--- a/drivers/mfd/arizona.h
+++ b/drivers/mfd/arizona.h
@@ -27,6 +27,8 @@
 
 extern const struct regmap_config wm8997_i2c_regmap;
 
+extern const struct regmap_config wm8998_i2c_regmap;
+
 extern const struct dev_pm_ops arizona_pm_ops;
 
 extern const struct of_device_id arizona_of_match[];
@@ -41,6 +43,9 @@
 extern const struct regmap_irq_chip wm8997_aod;
 extern const struct regmap_irq_chip wm8997_irq;
 
+extern struct regmap_irq_chip wm8998_aod;
+extern struct regmap_irq_chip wm8998_irq;
+
 int arizona_dev_init(struct arizona *arizona);
 int arizona_dev_exit(struct arizona *arizona);
 int arizona_irq_init(struct arizona *arizona);
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index d9706ed..d001f7e2 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -211,7 +211,6 @@
 static struct i2c_driver as3711_i2c_driver = {
 	.driver = {
 		   .name = "as3711",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(as3711_of_match),
 	},
 	.probe = as3711_i2c_probe,
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 39fa554..924ea90 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -437,7 +437,6 @@
 static struct i2c_driver as3722_i2c_driver = {
 	.driver = {
 		.name = "as3722",
-		.owner = THIS_MODULE,
 		.of_match_table = as3722_of_match,
 	},
 	.probe = as3722_i2c_probe,
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 120df5c..4b54128 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -411,7 +411,7 @@
 
 		irq_set_chip_data(irq, asic);
 		irq_set_handler(irq, handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
@@ -431,7 +431,7 @@
 	irq_base = asic->irq_base;
 
 	for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
-		set_irq_flags(irq, 0);
+		irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		irq_set_chip_and_handler(irq, NULL, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index cfd58f4..3fff6b5 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/iopoll.h>
 #include <linux/mfd/atmel-hlcdc.h>
 #include <linux/mfd/core.h>
 #include <linux/module.h>
@@ -26,6 +27,10 @@
 
 #define ATMEL_HLCDC_REG_MAX		(0x4000 - 0x4)
 
+struct atmel_hlcdc_regmap {
+	void __iomem *regs;
+};
+
 static const struct mfd_cell atmel_hlcdc_cells[] = {
 	{
 		.name = "atmel-hlcdc-pwm",
@@ -37,28 +42,62 @@
 	},
 };
 
+static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	struct atmel_hlcdc_regmap *hregmap = context;
+
+	if (reg <= ATMEL_HLCDC_DIS) {
+		u32 status;
+
+		readl_poll_timeout(hregmap->regs + ATMEL_HLCDC_SR, status,
+				   !(status & ATMEL_HLCDC_SIP), 1, 100);
+	}
+
+	writel(val, hregmap->regs + reg);
+
+	return 0;
+}
+
+static int regmap_atmel_hlcdc_reg_read(void *context, unsigned int reg,
+				       unsigned int *val)
+{
+	struct atmel_hlcdc_regmap *hregmap = context;
+
+	*val = readl(hregmap->regs + reg);
+
+	return 0;
+}
+
 static const struct regmap_config atmel_hlcdc_regmap_config = {
 	.reg_bits = 32,
 	.val_bits = 32,
 	.reg_stride = 4,
 	.max_register = ATMEL_HLCDC_REG_MAX,
+	.reg_write = regmap_atmel_hlcdc_reg_write,
+	.reg_read = regmap_atmel_hlcdc_reg_read,
+	.fast_io = true,
 };
 
 static int atmel_hlcdc_probe(struct platform_device *pdev)
 {
+	struct atmel_hlcdc_regmap *hregmap;
 	struct device *dev = &pdev->dev;
 	struct atmel_hlcdc *hlcdc;
 	struct resource *res;
-	void __iomem *regs;
+
+	hregmap = devm_kzalloc(dev, sizeof(*hregmap), GFP_KERNEL);
+	if (!hregmap)
+		return -ENOMEM;
 
 	hlcdc = devm_kzalloc(dev, sizeof(*hlcdc), GFP_KERNEL);
 	if (!hlcdc)
 		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(regs))
-		return PTR_ERR(regs);
+	hregmap->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hregmap->regs))
+		return PTR_ERR(hregmap->regs);
 
 	hlcdc->irq = platform_get_irq(pdev, 0);
 	if (hlcdc->irq < 0)
@@ -82,8 +121,8 @@
 		return PTR_ERR(hlcdc->slow_clk);
 	}
 
-	hlcdc->regmap = devm_regmap_init_mmio(dev, regs,
-					      &atmel_hlcdc_regmap_config);
+	hlcdc->regmap = devm_regmap_init(dev, NULL, hregmap,
+					 &atmel_hlcdc_regmap_config);
 	if (IS_ERR(hlcdc->regmap))
 		return PTR_ERR(hlcdc->regmap);
 
@@ -102,7 +141,11 @@
 }
 
 static const struct of_device_id atmel_hlcdc_match[] = {
+	{ .compatible = "atmel,at91sam9n12-hlcdc" },
+	{ .compatible = "atmel,at91sam9x5-hlcdc" },
+	{ .compatible = "atmel,sama5d2-hlcdc" },
 	{ .compatible = "atmel,sama5d3-hlcdc" },
+	{ .compatible = "atmel,sama5d4-hlcdc" },
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 6df9155..3f576b7 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -30,19 +30,47 @@
 #define AXP20X_OFF	0x80
 
 static const char * const axp20x_model_names[] = {
+	"AXP152",
 	"AXP202",
 	"AXP209",
 	"AXP221",
 	"AXP288",
 };
 
+static const struct regmap_range axp152_writeable_ranges[] = {
+	regmap_reg_range(AXP152_LDO3456_DC1234_CTRL, AXP152_IRQ3_STATE),
+	regmap_reg_range(AXP152_DCDC_MODE, AXP152_PWM1_DUTY_CYCLE),
+};
+
+static const struct regmap_range axp152_volatile_ranges[] = {
+	regmap_reg_range(AXP152_PWR_OP_MODE, AXP152_PWR_OP_MODE),
+	regmap_reg_range(AXP152_IRQ1_EN, AXP152_IRQ3_STATE),
+	regmap_reg_range(AXP152_GPIO_INPUT, AXP152_GPIO_INPUT),
+};
+
+static const struct regmap_access_table axp152_writeable_table = {
+	.yes_ranges	= axp152_writeable_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(axp152_writeable_ranges),
+};
+
+static const struct regmap_access_table axp152_volatile_table = {
+	.yes_ranges	= axp152_volatile_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(axp152_volatile_ranges),
+};
+
 static const struct regmap_range axp20x_writeable_ranges[] = {
 	regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE),
 	regmap_reg_range(AXP20X_DCDC_MODE, AXP20X_FG_RES),
+	regmap_reg_range(AXP20X_RDC_H, AXP20X_OCV(AXP20X_OCV_MAX)),
 };
 
 static const struct regmap_range axp20x_volatile_ranges[] = {
+	regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_USB_OTG_STATUS),
+	regmap_reg_range(AXP20X_CHRG_CTRL1, AXP20X_CHRG_CTRL2),
 	regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
+	regmap_reg_range(AXP20X_ACIN_V_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
+	regmap_reg_range(AXP20X_GPIO20_SS, AXP20X_GPIO3_CTRL),
+	regmap_reg_range(AXP20X_FG_RES, AXP20X_RDC_L),
 };
 
 static const struct regmap_access_table axp20x_writeable_table = {
@@ -93,6 +121,11 @@
 	.n_yes_ranges	= ARRAY_SIZE(axp288_volatile_ranges),
 };
 
+static struct resource axp152_pek_resources[] = {
+	DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
+	DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
+};
+
 static struct resource axp20x_pek_resources[] = {
 	{
 		.name	= "PEK_DBR",
@@ -107,6 +140,13 @@
 	},
 };
 
+static struct resource axp20x_usb_power_supply_resources[] = {
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_VALID, "VBUS_VALID"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"),
+};
+
 static struct resource axp22x_pek_resources[] = {
 	{
 		.name   = "PEK_DBR",
@@ -154,12 +194,21 @@
 	},
 };
 
+static const struct regmap_config axp152_regmap_config = {
+	.reg_bits	= 8,
+	.val_bits	= 8,
+	.wr_table	= &axp152_writeable_table,
+	.volatile_table	= &axp152_volatile_table,
+	.max_register	= AXP152_PWM1_DUTY_CYCLE,
+	.cache_type	= REGCACHE_RBTREE,
+};
+
 static const struct regmap_config axp20x_regmap_config = {
 	.reg_bits	= 8,
 	.val_bits	= 8,
 	.wr_table	= &axp20x_writeable_table,
 	.volatile_table	= &axp20x_volatile_table,
-	.max_register	= AXP20X_FG_RES,
+	.max_register	= AXP20X_OCV(AXP20X_OCV_MAX),
 	.cache_type	= REGCACHE_RBTREE,
 };
 
@@ -184,6 +233,26 @@
 #define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask)			\
 	[_variant##_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) }
 
+static const struct regmap_irq axp152_regmap_irqs[] = {
+	INIT_REGMAP_IRQ(AXP152, LDO0IN_CONNECT,		0, 6),
+	INIT_REGMAP_IRQ(AXP152, LDO0IN_REMOVAL,		0, 5),
+	INIT_REGMAP_IRQ(AXP152, ALDO0IN_CONNECT,	0, 3),
+	INIT_REGMAP_IRQ(AXP152, ALDO0IN_REMOVAL,	0, 2),
+	INIT_REGMAP_IRQ(AXP152, DCDC1_V_LOW,		1, 5),
+	INIT_REGMAP_IRQ(AXP152, DCDC2_V_LOW,		1, 4),
+	INIT_REGMAP_IRQ(AXP152, DCDC3_V_LOW,		1, 3),
+	INIT_REGMAP_IRQ(AXP152, DCDC4_V_LOW,		1, 2),
+	INIT_REGMAP_IRQ(AXP152, PEK_SHORT,		1, 1),
+	INIT_REGMAP_IRQ(AXP152, PEK_LONG,		1, 0),
+	INIT_REGMAP_IRQ(AXP152, TIMER,			2, 7),
+	INIT_REGMAP_IRQ(AXP152, PEK_RIS_EDGE,		2, 6),
+	INIT_REGMAP_IRQ(AXP152, PEK_FAL_EDGE,		2, 5),
+	INIT_REGMAP_IRQ(AXP152, GPIO3_INPUT,		2, 3),
+	INIT_REGMAP_IRQ(AXP152, GPIO2_INPUT,		2, 2),
+	INIT_REGMAP_IRQ(AXP152, GPIO1_INPUT,		2, 1),
+	INIT_REGMAP_IRQ(AXP152, GPIO0_INPUT,		2, 0),
+};
+
 static const struct regmap_irq axp20x_regmap_irqs[] = {
 	INIT_REGMAP_IRQ(AXP20X, ACIN_OVER_V,		0, 7),
 	INIT_REGMAP_IRQ(AXP20X, ACIN_PLUGIN,		0, 6),
@@ -293,6 +362,7 @@
 };
 
 static const struct of_device_id axp20x_of_match[] = {
+	{ .compatible = "x-powers,axp152", .data = (void *) AXP152_ID },
 	{ .compatible = "x-powers,axp202", .data = (void *) AXP202_ID },
 	{ .compatible = "x-powers,axp209", .data = (void *) AXP209_ID },
 	{ .compatible = "x-powers,axp221", .data = (void *) AXP221_ID },
@@ -317,6 +387,18 @@
 };
 MODULE_DEVICE_TABLE(acpi, axp20x_acpi_match);
 
+static const struct regmap_irq_chip axp152_regmap_irq_chip = {
+	.name			= "axp152_irq_chip",
+	.status_base		= AXP152_IRQ1_STATE,
+	.ack_base		= AXP152_IRQ1_STATE,
+	.mask_base		= AXP152_IRQ1_EN,
+	.mask_invert		= true,
+	.init_ack_masked	= true,
+	.irqs			= axp152_regmap_irqs,
+	.num_irqs		= ARRAY_SIZE(axp152_regmap_irqs),
+	.num_regs		= 3,
+};
+
 static const struct regmap_irq_chip axp20x_regmap_irq_chip = {
 	.name			= "axp20x_irq_chip",
 	.status_base		= AXP20X_IRQ1_STATE,
@@ -357,11 +439,16 @@
 
 static struct mfd_cell axp20x_cells[] = {
 	{
-		.name			= "axp20x-pek",
-		.num_resources		= ARRAY_SIZE(axp20x_pek_resources),
-		.resources		= axp20x_pek_resources,
+		.name		= "axp20x-pek",
+		.num_resources	= ARRAY_SIZE(axp20x_pek_resources),
+		.resources	= axp20x_pek_resources,
 	}, {
-		.name			= "axp20x-regulator",
+		.name		= "axp20x-regulator",
+	}, {
+		.name		= "axp20x-usb-power-supply",
+		.of_compatible	= "x-powers,axp202-usb-power-supply",
+		.num_resources	= ARRAY_SIZE(axp20x_usb_power_supply_resources),
+		.resources	= axp20x_usb_power_supply_resources,
 	},
 };
 
@@ -375,6 +462,14 @@
 	},
 };
 
+static struct mfd_cell axp152_cells[] = {
+	{
+		.name			= "axp20x-pek",
+		.num_resources		= ARRAY_SIZE(axp152_pek_resources),
+		.resources		= axp152_pek_resources,
+	},
+};
+
 static struct resource axp288_adc_resources[] = {
 	{
 		.name  = "GPADC",
@@ -513,6 +608,12 @@
 	}
 
 	switch (axp20x->variant) {
+	case AXP152_ID:
+		axp20x->nr_cells = ARRAY_SIZE(axp152_cells);
+		axp20x->cells = axp152_cells;
+		axp20x->regmap_cfg = &axp152_regmap_config;
+		axp20x->regmap_irq_chip = &axp152_regmap_irq_chip;
+		break;
 	case AXP202_ID:
 	case AXP209_ID:
 		axp20x->nr_cells = ARRAY_SIZE(axp20x_cells);
@@ -613,7 +714,6 @@
 static struct i2c_driver axp20x_i2c_driver = {
 	.driver = {
 		.name	= "axp20x",
-		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(axp20x_of_match),
 		.acpi_match_table = ACPI_PTR(axp20x_acpi_match),
 	},
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index e334de0..da2af5b 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -117,7 +117,6 @@
 static struct i2c_driver bcm590xx_i2c_driver = {
 	.driver = {
 		   .name = "bcm590xx",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(bcm590xx_of_match),
 	},
 	.probe = bcm590xx_i2c_probe,
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c
index b9a0963..d06e4b4 100644
--- a/drivers/mfd/cros_ec_i2c.c
+++ b/drivers/mfd/cros_ec_i2c.c
@@ -353,7 +353,6 @@
 static struct i2c_driver cros_ec_driver = {
 	.driver	= {
 		.name	= "cros-ec-i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &cros_ec_i2c_pm_ops,
 	},
 	.probe		= cros_ec_i2c_probe,
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 16f228d..30a296b 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -701,6 +701,12 @@
 static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend,
 			 cros_ec_spi_resume);
 
+static const struct of_device_id cros_ec_spi_of_match[] = {
+	{ .compatible = "google,cros-ec-spi", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cros_ec_spi_of_match);
+
 static const struct spi_device_id cros_ec_spi_id[] = {
 	{ "cros-ec-spi", 0 },
 	{ }
@@ -710,6 +716,7 @@
 static struct spi_driver cros_ec_driver_spi = {
 	.driver	= {
 		.name	= "cros-ec-spi",
+		.of_match_table = of_match_ptr(cros_ec_spi_of_match),
 		.owner	= THIS_MODULE,
 		.pm	= &cros_ec_spi_pm_ops,
 	},
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index e0a2e0e..ef7fe2a 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -550,7 +550,6 @@
 static struct i2c_driver da903x_driver = {
 	.driver	= {
 		.name	= "da903x",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= da903x_probe,
 	.remove		= da903x_remove,
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index ec39287..0288700 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -195,7 +195,6 @@
 	.id_table = da9052_i2c_id,
 	.driver = {
 		.name = "da9052",
-		.owner = THIS_MODULE,
 #ifdef CONFIG_OF
 		.of_match_table = dialog_dt_ids,
 #endif
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index d4d4c16..b53e100 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -79,7 +79,6 @@
 	.id_table = da9055_i2c_id,
 	.driver = {
 		.name = "da9055-pmic",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9055_of_match),
 	},
 };
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
new file mode 100644
index 0000000..f80d947
--- /dev/null
+++ b/drivers/mfd/da9062-core.c
@@ -0,0 +1,533 @@
+/*
+ * Core, IRQ and I2C device driver for DA9062 PMIC
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/mfd/da9062/core.h>
+#include <linux/mfd/da9062/registers.h>
+#include <linux/regulator/of_regulator.h>
+
+#define	DA9062_REG_EVENT_A_OFFSET	0
+#define	DA9062_REG_EVENT_B_OFFSET	1
+#define	DA9062_REG_EVENT_C_OFFSET	2
+
+static struct regmap_irq da9062_irqs[] = {
+	/* EVENT A */
+	[DA9062_IRQ_ONKEY] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_NONKEY_MASK,
+	},
+	[DA9062_IRQ_ALARM] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_ALARM_MASK,
+	},
+	[DA9062_IRQ_TICK] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_TICK_MASK,
+	},
+	[DA9062_IRQ_WDG_WARN] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_WDG_WARN_MASK,
+	},
+	[DA9062_IRQ_SEQ_RDY] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_SEQ_RDY_MASK,
+	},
+	/* EVENT B */
+	[DA9062_IRQ_TEMP] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_TEMP_MASK,
+	},
+	[DA9062_IRQ_LDO_LIM] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_LDO_LIM_MASK,
+	},
+	[DA9062_IRQ_DVC_RDY] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_DVC_RDY_MASK,
+	},
+	[DA9062_IRQ_VDD_WARN] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_VDD_WARN_MASK,
+	},
+	/* EVENT C */
+	[DA9062_IRQ_GPI0] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI0_MASK,
+	},
+	[DA9062_IRQ_GPI1] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI1_MASK,
+	},
+	[DA9062_IRQ_GPI2] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI2_MASK,
+	},
+	[DA9062_IRQ_GPI3] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI3_MASK,
+	},
+	[DA9062_IRQ_GPI4] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI4_MASK,
+	},
+};
+
+static struct regmap_irq_chip da9062_irq_chip = {
+	.name = "da9062-irq",
+	.irqs = da9062_irqs,
+	.num_irqs = DA9062_NUM_IRQ,
+	.num_regs = 3,
+	.status_base = DA9062AA_EVENT_A,
+	.mask_base = DA9062AA_IRQ_MASK_A,
+	.ack_base = DA9062AA_EVENT_A,
+};
+
+static struct resource da9062_core_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_VDD_WARN, 1, "VDD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_regulators_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_LDO_LIM, 1, "LDO_LIM", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_thermal_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_TEMP, 1, "THERMAL", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_wdt_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_WDG_WARN, 1, "WD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_rtc_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_ALARM, 1, "ALARM", IORESOURCE_IRQ),
+	DEFINE_RES_NAMED(DA9062_IRQ_TICK, 1, "TICK", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_onkey_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_ONKEY, 1, "ONKEY", IORESOURCE_IRQ),
+};
+
+static const struct mfd_cell da9062_devs[] = {
+	{
+		.name		= "da9062-core",
+		.num_resources	= ARRAY_SIZE(da9062_core_resources),
+		.resources	= da9062_core_resources,
+	},
+	{
+		.name		= "da9062-regulators",
+		.num_resources	= ARRAY_SIZE(da9062_regulators_resources),
+		.resources	= da9062_regulators_resources,
+	},
+	{
+		.name		= "da9062-watchdog",
+		.num_resources	= ARRAY_SIZE(da9062_wdt_resources),
+		.resources	= da9062_wdt_resources,
+		.of_compatible  = "dlg,da9062-wdt",
+	},
+	{
+		.name		= "da9062-thermal",
+		.num_resources	= ARRAY_SIZE(da9062_thermal_resources),
+		.resources	= da9062_thermal_resources,
+		.of_compatible  = "dlg,da9062-thermal",
+	},
+	{
+		.name		= "da9062-rtc",
+		.num_resources	= ARRAY_SIZE(da9062_rtc_resources),
+		.resources	= da9062_rtc_resources,
+		.of_compatible  = "dlg,da9062-rtc",
+	},
+	{
+		.name		= "da9062-onkey",
+		.num_resources	= ARRAY_SIZE(da9062_onkey_resources),
+		.resources	= da9062_onkey_resources,
+		.of_compatible = "dlg,da9062-onkey",
+	},
+};
+
+static int da9062_clear_fault_log(struct da9062 *chip)
+{
+	int ret;
+	int fault_log;
+
+	ret = regmap_read(chip->regmap, DA9062AA_FAULT_LOG, &fault_log);
+	if (ret < 0)
+		return ret;
+
+	if (fault_log) {
+		if (fault_log & DA9062AA_TWD_ERROR_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: TWD_ERROR\n");
+		if (fault_log & DA9062AA_POR_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: POR\n");
+		if (fault_log & DA9062AA_VDD_FAULT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: VDD_FAULT\n");
+		if (fault_log & DA9062AA_VDD_START_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: VDD_START\n");
+		if (fault_log & DA9062AA_TEMP_CRIT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: TEMP_CRIT\n");
+		if (fault_log & DA9062AA_KEY_RESET_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: KEY_RESET\n");
+		if (fault_log & DA9062AA_NSHUTDOWN_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: NSHUTDOWN\n");
+		if (fault_log & DA9062AA_WAIT_SHUT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: WAIT_SHUT\n");
+
+		ret = regmap_write(chip->regmap, DA9062AA_FAULT_LOG,
+				   fault_log);
+	}
+
+	return ret;
+}
+
+int get_device_type(struct da9062 *chip)
+{
+	int device_id, variant_id, variant_mrc;
+	int ret;
+
+	ret = regmap_read(chip->regmap, DA9062AA_DEVICE_ID, &device_id);
+	if (ret < 0) {
+		dev_err(chip->dev, "Cannot read chip ID.\n");
+		return -EIO;
+	}
+	if (device_id != DA9062_PMIC_DEVICE_ID) {
+		dev_err(chip->dev, "Invalid device ID: 0x%02x\n", device_id);
+		return -ENODEV;
+	}
+
+	ret = regmap_read(chip->regmap, DA9062AA_VARIANT_ID, &variant_id);
+	if (ret < 0) {
+		dev_err(chip->dev, "Cannot read chip variant id.\n");
+		return -EIO;
+	}
+
+	dev_info(chip->dev,
+		 "Device detected (device-ID: 0x%02X, var-ID: 0x%02X)\n",
+		 device_id, variant_id);
+
+	variant_mrc = (variant_id & DA9062AA_MRC_MASK) >> DA9062AA_MRC_SHIFT;
+
+	if (variant_mrc < DA9062_PMIC_VARIANT_MRC_AA) {
+		dev_err(chip->dev,
+			"Cannot support variant MRC: 0x%02X\n", variant_mrc);
+		return -ENODEV;
+	}
+
+	return ret;
+}
+
+static const struct regmap_range da9062_aa_readable_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_STATUS_B,
+	}, {
+		.range_min = DA9062AA_STATUS_D,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_IRQ_MASK_A,
+		.range_max = DA9062AA_IRQ_MASK_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_A,
+		.range_max = DA9062AA_GPIO_4,
+	}, {
+		.range_min = DA9062AA_GPIO_WKUP_MODE,
+		.range_max = DA9062AA_BUCK4_CONT,
+	}, {
+		.range_min = DA9062AA_BUCK3_CONT,
+		.range_max = DA9062AA_BUCK3_CONT,
+	}, {
+		.range_min = DA9062AA_LDO1_CONT,
+		.range_max = DA9062AA_LDO4_CONT,
+	}, {
+		.range_min = DA9062AA_DVC_1,
+		.range_max = DA9062AA_DVC_1,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_SECOND_D,
+	}, {
+		.range_min = DA9062AA_SEQ,
+		.range_max = DA9062AA_ID_4_3,
+	}, {
+		.range_min = DA9062AA_ID_12_11,
+		.range_max = DA9062AA_ID_16_15,
+	}, {
+		.range_min = DA9062AA_ID_22_21,
+		.range_max = DA9062AA_ID_32_31,
+	}, {
+		.range_min = DA9062AA_SEQ_A,
+		.range_max = DA9062AA_BUCK3_CFG,
+	}, {
+		.range_min = DA9062AA_VBUCK2_A,
+		.range_max = DA9062AA_VBUCK4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK3_A,
+		.range_max = DA9062AA_VBUCK3_A,
+	}, {
+		.range_min = DA9062AA_VLDO1_A,
+		.range_max = DA9062AA_VLDO4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK2_B,
+		.range_max = DA9062AA_VBUCK4_B,
+	}, {
+		.range_min = DA9062AA_VBUCK3_B,
+		.range_max = DA9062AA_VBUCK3_B,
+	}, {
+		.range_min = DA9062AA_VLDO1_B,
+		.range_max = DA9062AA_VLDO4_B,
+	}, {
+		.range_min = DA9062AA_BBAT_CONT,
+		.range_max = DA9062AA_BBAT_CONT,
+	}, {
+		.range_min = DA9062AA_INTERFACE,
+		.range_max = DA9062AA_CONFIG_E,
+	}, {
+		.range_min = DA9062AA_CONFIG_G,
+		.range_max = DA9062AA_CONFIG_K,
+	}, {
+		.range_min = DA9062AA_CONFIG_M,
+		.range_max = DA9062AA_CONFIG_M,
+	}, {
+		.range_min = DA9062AA_TRIM_CLDR,
+		.range_max = DA9062AA_GP_ID_19,
+	}, {
+		.range_min = DA9062AA_DEVICE_ID,
+		.range_max = DA9062AA_CONFIG_ID,
+	},
+};
+
+static const struct regmap_range da9062_aa_writeable_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_PAGE_CON,
+	}, {
+		.range_min = DA9062AA_FAULT_LOG,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_IRQ_MASK_A,
+		.range_max = DA9062AA_IRQ_MASK_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_A,
+		.range_max = DA9062AA_GPIO_4,
+	}, {
+		.range_min = DA9062AA_GPIO_WKUP_MODE,
+		.range_max = DA9062AA_BUCK4_CONT,
+	}, {
+		.range_min = DA9062AA_BUCK3_CONT,
+		.range_max = DA9062AA_BUCK3_CONT,
+	}, {
+		.range_min = DA9062AA_LDO1_CONT,
+		.range_max = DA9062AA_LDO4_CONT,
+	}, {
+		.range_min = DA9062AA_DVC_1,
+		.range_max = DA9062AA_DVC_1,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_ALARM_Y,
+	}, {
+		.range_min = DA9062AA_SEQ,
+		.range_max = DA9062AA_ID_4_3,
+	}, {
+		.range_min = DA9062AA_ID_12_11,
+		.range_max = DA9062AA_ID_16_15,
+	}, {
+		.range_min = DA9062AA_ID_22_21,
+		.range_max = DA9062AA_ID_32_31,
+	}, {
+		.range_min = DA9062AA_SEQ_A,
+		.range_max = DA9062AA_BUCK3_CFG,
+	}, {
+		.range_min = DA9062AA_VBUCK2_A,
+		.range_max = DA9062AA_VBUCK4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK3_A,
+		.range_max = DA9062AA_VBUCK3_A,
+	}, {
+		.range_min = DA9062AA_VLDO1_A,
+		.range_max = DA9062AA_VLDO4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK2_B,
+		.range_max = DA9062AA_VBUCK4_B,
+	}, {
+		.range_min = DA9062AA_VBUCK3_B,
+		.range_max = DA9062AA_VBUCK3_B,
+	}, {
+		.range_min = DA9062AA_VLDO1_B,
+		.range_max = DA9062AA_VLDO4_B,
+	}, {
+		.range_min = DA9062AA_BBAT_CONT,
+		.range_max = DA9062AA_BBAT_CONT,
+	}, {
+		.range_min = DA9062AA_GP_ID_0,
+		.range_max = DA9062AA_GP_ID_19,
+	},
+};
+
+static const struct regmap_range da9062_aa_volatile_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_STATUS_B,
+	}, {
+		.range_min = DA9062AA_STATUS_D,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_F,
+		.range_max = DA9062AA_CONTROL_F,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_SECOND_D,
+	},
+};
+
+static const struct regmap_access_table da9062_aa_readable_table = {
+	.yes_ranges = da9062_aa_readable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_readable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_writeable_table = {
+	.yes_ranges = da9062_aa_writeable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_writeable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_volatile_table = {
+	.yes_ranges = da9062_aa_volatile_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_volatile_ranges),
+};
+
+static const struct regmap_range_cfg da9062_range_cfg[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_CONFIG_ID,
+		.selector_reg = DA9062AA_PAGE_CON,
+		.selector_mask = 1 << DA9062_I2C_PAGE_SEL_SHIFT,
+		.selector_shift = DA9062_I2C_PAGE_SEL_SHIFT,
+		.window_start = 0,
+		.window_len = 256,
+	}
+};
+
+static struct regmap_config da9062_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.ranges = da9062_range_cfg,
+	.num_ranges = ARRAY_SIZE(da9062_range_cfg),
+	.max_register = DA9062AA_CONFIG_ID,
+	.cache_type = REGCACHE_RBTREE,
+	.rd_table = &da9062_aa_readable_table,
+	.wr_table = &da9062_aa_writeable_table,
+	.volatile_table = &da9062_aa_volatile_table,
+};
+
+static int da9062_i2c_probe(struct i2c_client *i2c,
+	const struct i2c_device_id *id)
+{
+	struct da9062 *chip;
+	unsigned int irq_base;
+	int ret;
+
+	chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, chip);
+	chip->dev = &i2c->dev;
+
+	if (!i2c->irq) {
+		dev_err(chip->dev, "No IRQ configured\n");
+		return -EINVAL;
+	}
+
+	chip->regmap = devm_regmap_init_i2c(i2c, &da9062_regmap_config);
+	if (IS_ERR(chip->regmap)) {
+		ret = PTR_ERR(chip->regmap);
+		dev_err(chip->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = da9062_clear_fault_log(chip);
+	if (ret < 0)
+		dev_warn(chip->dev, "Cannot clear fault log\n");
+
+	ret = get_device_type(chip);
+	if (ret)
+		return ret;
+
+	ret = regmap_add_irq_chip(chip->regmap, i2c->irq,
+			IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+			-1, &da9062_irq_chip,
+			&chip->regmap_irq);
+	if (ret) {
+		dev_err(chip->dev, "Failed to request IRQ %d: %d\n",
+			i2c->irq, ret);
+		return ret;
+	}
+
+	irq_base = regmap_irq_chip_get_base(chip->regmap_irq);
+
+	ret = mfd_add_devices(chip->dev, PLATFORM_DEVID_NONE, da9062_devs,
+			      ARRAY_SIZE(da9062_devs), NULL, irq_base,
+			      NULL);
+	if (ret) {
+		dev_err(chip->dev, "Cannot register child devices\n");
+		regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int da9062_i2c_remove(struct i2c_client *i2c)
+{
+	struct da9062 *chip = i2c_get_clientdata(i2c);
+
+	mfd_remove_devices(chip->dev);
+	regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+
+	return 0;
+}
+
+static const struct i2c_device_id da9062_i2c_id[] = {
+	{ "da9062", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, da9062_i2c_id);
+
+static const struct of_device_id da9062_dt_ids[] = {
+	{ .compatible = "dlg,da9062", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, da9062_dt_ids);
+
+static struct i2c_driver da9062_i2c_driver = {
+	.driver = {
+		.name = "da9062",
+		.of_match_table = of_match_ptr(da9062_dt_ids),
+	},
+	.probe    = da9062_i2c_probe,
+	.remove   = da9062_i2c_remove,
+	.id_table = da9062_i2c_id,
+};
+
+module_i2c_driver(da9062_i2c_driver);
+
+MODULE_DESCRIPTION("Core device driver for Dialog DA9062");
+MODULE_AUTHOR("Steve Twiss <stwiss.opensource@diasemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 6f3a7c0..2d4e3e0 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -264,7 +264,6 @@
 static struct i2c_driver da9063_i2c_driver = {
 	.driver = {
 		.name = "da9063",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9063_dt_ids),
 	},
 	.probe    = da9063_i2c_probe,
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index eaf1ec9..2630263 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -77,6 +77,10 @@
 		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
 		.mask = DA9063_M_UVOV,
 	},
+	[DA9063_IRQ_DVC_RDY] = {
+		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
+		.mask = DA9063_M_DVC_RDY,
+	},
 	[DA9063_IRQ_VDD_MON] = {
 		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
 		.mask = DA9063_M_VDD_MON,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 8b14740..e6e4bac 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2654,7 +2654,6 @@
 {
 	irq_set_chip_and_handler(virq, &prcmu_irq_chip,
 				handle_simple_irq);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 5991fad..a76eb6e 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -207,7 +207,7 @@
 
 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct pcap_chip *pcap = irq_get_handler_data(irq);
+	struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
 
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	queue_work(pcap->workqueue, &pcap->isr_work);
@@ -463,11 +463,7 @@
 	for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
 		irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
 		irq_set_chip_data(i, pcap);
-#ifdef CONFIG_ARM
-		set_irq_flags(i, IRQF_VALID);
-#else
-		irq_set_noprobe(i);
-#endif
+		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	/* mask/ack all PCAP interrupts */
@@ -476,8 +472,7 @@
 	pcap->msr = PCAP_MASK_ALL_INTERRUPT;
 
 	irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
-	irq_set_handler_data(spi->irq, pcap);
-	irq_set_chained_handler(spi->irq, pcap_irq_handler);
+	irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap);
 	irq_set_irq_wake(spi->irq, 1);
 
 	/* ADC */
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 49f39fe..9131cdc 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -350,11 +350,11 @@
 			irq_set_chip_and_handler(irq, &egpio_muxed_chip,
 						 handle_simple_irq);
 			irq_set_chip_data(irq, ei);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING);
-		irq_set_handler_data(ei->chained_irq, ei);
-		irq_set_chained_handler(ei->chained_irq, egpio_handler);
+		irq_set_chained_handler_and_data(ei->chained_irq,
+						 egpio_handler, ei);
 		ack_irqs(ei);
 
 		device_init_wakeup(&pdev->dev, 1);
@@ -376,7 +376,7 @@
 		irq_end = ei->irq_start + ei->nirqs;
 		for (irq = ei->irq_start; irq < irq_end; irq++) {
 			irq_set_chip_and_handler(irq, NULL, NULL);
-			set_irq_flags(irq, 0);
+			irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		irq_set_chained_handler(ei->chained_irq, NULL);
 		device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index b54baad..1bd5b04 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -330,11 +330,7 @@
 		irq_set_chip_and_handler(irq, &htcpld_muxed_chip,
 					 handle_simple_irq);
 		irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#else
-		irq_set_probe(irq);
-#endif
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	return ret;
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 7b50b6b..d9e15cf 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -24,8 +24,25 @@
 #include <linux/acpi.h>
 #include <linux/regmap.h>
 #include <linux/mfd/intel_soc_pmic.h>
+#include <linux/gpio/machine.h>
+#include <linux/pwm.h>
 #include "intel_soc_pmic_core.h"
 
+/* Lookup table for the Panel Enable/Disable line as GPIO signals */
+static struct gpiod_lookup_table panel_gpio_table = {
+	/* Intel GFX is consumer */
+	.dev_id = "0000:00:02.0",
+	.table = {
+		/* Panel EN/DISABLE */
+		GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+	},
+};
+
+/* PWM consumed by the Intel GFX */
+static struct pwm_lookup crc_pwm_lookup[] = {
+	PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+};
+
 static int intel_soc_pmic_find_gpio_irq(struct device *dev)
 {
 	struct gpio_desc *desc;
@@ -85,6 +102,12 @@
 	if (ret)
 		dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
 
+	/* Add lookup table binding for Panel Control to the GPIO Chip */
+	gpiod_add_lookup_table(&panel_gpio_table);
+
+	/* Add lookup table for crc-pwm */
+	pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
 	ret = mfd_add_devices(dev, -1, config->cell_dev,
 			      config->n_cell_devs, NULL, 0,
 			      regmap_irq_get_domain(pmic->irq_chip_data));
@@ -104,6 +127,12 @@
 
 	regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
 
+	/* Remove lookup table for Panel Control from the GPIO Chip */
+	gpiod_remove_lookup_table(&panel_gpio_table);
+
+	/* remove crc-pwm lookup table */
+	pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
 	mfd_remove_devices(&i2c->dev);
 
 	return 0;
@@ -147,7 +176,7 @@
 MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id);
 
 #if defined(CONFIG_ACPI)
-static struct acpi_device_id intel_soc_pmic_acpi_match[] = {
+static const struct acpi_device_id intel_soc_pmic_acpi_match[] = {
 	{"INT33FD", (kernel_ulong_t)&intel_soc_pmic_config_crc},
 	{ },
 };
@@ -157,7 +186,6 @@
 static struct i2c_driver intel_soc_pmic_i2c_driver = {
 	.driver = {
 		.name = "intel_soc_pmic_i2c",
-		.owner = THIS_MODULE,
 		.pm = &intel_soc_pmic_pm_ops,
 		.acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match),
 	},
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 7436075..4a74948 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -109,6 +109,9 @@
 	{
 		.name = "crystal_cove_pmic",
 	},
+	{
+		.name = "crystal_cove_pwm",
+	},
 };
 
 static const struct regmap_config crystal_cove_regmap_config = {
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 8df3266..a41859c 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -53,8 +53,8 @@
 	tx->buf[bp++] = checksum;
 	tx->len = bp;
 	tx->index = 0;
-	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
-		       tx->buf, tx->len, true);
+	print_hex_dump_debug("data: ", DUMP_PREFIX_OFFSET, 16, 1,
+			     tx->buf, tx->len, true);
 
 	/* Enable interrupt */
 	val = readl(micro->base + UTCR3);
@@ -242,7 +242,7 @@
 	return data[1] << 8 | data[0];
 }
 
-static void ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
+static void __init ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
 {
 	u8 dump[256];
 	char *str;
@@ -250,7 +250,7 @@
 	ipaq_micro_eeprom_read(micro, 0, 128, dump);
 	str = ipaq_micro_str(dump, 10);
 	if (str) {
-		dev_info(micro->dev, "HM version %s\n", str);
+		dev_info(micro->dev, "HW version %s\n", str);
 		kfree(str);
 	}
 	str = ipaq_micro_str(dump+10, 40);
@@ -281,8 +281,8 @@
 	dev_info(micro->dev, "RAM size: %u KiB\n", ipaq_micro_to_u16(dump+92));
 	dev_info(micro->dev, "screen: %u x %u\n",
 		 ipaq_micro_to_u16(dump+94), ipaq_micro_to_u16(dump+96));
-	print_hex_dump(KERN_DEBUG, "eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
-		       dump, 256, true);
+	print_hex_dump_debug("eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
+			     dump, 256, true);
 
 }
 
@@ -386,7 +386,7 @@
 	return 0;
 }
 
-static int micro_probe(struct platform_device *pdev)
+static int __init micro_probe(struct platform_device *pdev)
 {
 	struct ipaq_micro *micro;
 	struct resource *res;
@@ -448,21 +448,6 @@
 	return 0;
 }
 
-static int micro_remove(struct platform_device *pdev)
-{
-	struct ipaq_micro *micro = platform_get_drvdata(pdev);
-	u32 val;
-
-	mfd_remove_devices(&pdev->dev);
-
-	val = readl(micro->base + UTCR3);
-	val &= ~(UTCR3_RXE | UTCR3_RIE); /* disable receive interrupt */
-	val &= ~(UTCR3_TXE | UTCR3_TIE); /* disable transmit interrupt */
-	writel(val, micro->base + UTCR3);
-
-	return 0;
-}
-
 static const struct dev_pm_ops micro_dev_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(NULL, micro_resume)
 };
@@ -471,12 +456,7 @@
 	.driver   = {
 		.name	= "ipaq-h3xxx-micro",
 		.pm	= &micro_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
-	.probe    = micro_probe,
-	.remove   = micro_remove,
-	/* .shutdown = micro_suspend, // FIXME */
 };
-module_platform_driver(micro_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("driver for iPAQ Atmel micro core and backlight");
+builtin_platform_driver_probe(micro_device_driver, micro_probe);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index b31c54e..5bb49f0 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,12 +273,12 @@
 	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
 	ct->chip.irq_ack = irq_gc_ack_set_bit;
 
-	irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
+	irq_setup_generic_chip(gc, IRQ_MSK(5), IRQ_GC_INIT_MASK_CACHE, 0,
+				IRQ_NOPROBE | IRQ_LEVEL);
 
 	adc->gc = gc;
 
-	irq_set_handler_data(adc->irq, gc);
-	irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
+	irq_set_chained_handler_and_data(adc->irq, jz4740_adc_irq_demux, gc);
 
 	writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
 	writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
@@ -308,8 +308,7 @@
 
 	irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
 	kfree(adc->gc);
-	irq_set_handler_data(adc->irq, NULL);
-	irq_set_chained_handler(adc->irq, NULL);
+	irq_set_chained_handler_and_data(adc->irq, NULL, NULL);
 
 	iounmap(adc->base);
 	release_mem_region(adc->mem->start, resource_size(adc->mem));
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 8057849..463f4ea 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -501,6 +501,14 @@
 
 static struct dmi_system_id kempld_dmi_table[] __initdata = {
 	{
+		.ident = "BBL6",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+			DMI_MATCH(DMI_BOARD_NAME, "COMe-bBL6"),
+		},
+		.driver_data = (void *)&kempld_platform_data_generic,
+		.callback = kempld_create_platform_device,
+	}, {
 		.ident = "BHL6",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -517,6 +525,14 @@
 		.driver_data = (void *)&kempld_platform_data_generic,
 		.callback = kempld_create_platform_device,
 	}, {
+		.ident = "CBW6",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+			DMI_MATCH(DMI_BOARD_NAME, "COMe-cBW6"),
+		},
+		.driver_data = (void *)&kempld_platform_data_generic,
+		.callback = kempld_create_platform_device,
+	}, {
 		.ident = "CCR2",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index d42fbb6..643f375 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -640,7 +640,6 @@
 static struct i2c_driver lm3533_i2c_driver = {
 	.driver = {
 		   .name = "lm3533",
-		   .owner = THIS_MODULE,
 	},
 	.id_table	= lm3533_i2c_ids,
 	.probe		= lm3533_i2c_probe,
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
index 335b930..eecbb13 100644
--- a/drivers/mfd/lp3943.c
+++ b/drivers/mfd/lp3943.c
@@ -154,7 +154,6 @@
 	.remove = lp3943_remove,
 	.driver = {
 		.name = "lp3943",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(lp3943_of_match),
 	},
 	.id_table = lp3943_ids,
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index a87f2b5..c7a9825 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -141,12 +141,7 @@
 	irq_set_chip_data(virq, irqd);
 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index a30bc15..acf6165 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -221,7 +221,6 @@
 static struct i2c_driver lp8788_driver = {
 	.driver = {
 		.name = "lp8788",
-		.owner = THIS_MODULE,
 	},
 	.probe = lp8788_probe,
 	.remove = lp8788_remove,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 8de3439..c5a9a08 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -66,6 +66,7 @@
 #include <linux/pci.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #define ACPIBASE		0x40
 #define ACPIBASE_GPE_OFF	0x28
@@ -835,9 +836,31 @@
 	priv->actrl_pbase_save = reg_save;
 }
 
-static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
+static int lpc_ich_finalize_wdt_cell(struct pci_dev *dev)
+{
+	struct itco_wdt_platform_data *pdata;
+	struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+	struct lpc_ich_info *info;
+	struct mfd_cell *cell = &lpc_ich_cells[LPC_WDT];
+
+	pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	info = &lpc_chipset_info[priv->chipset];
+
+	pdata->version = info->iTCO_version;
+	strlcpy(pdata->name, info->name, sizeof(pdata->name));
+
+	cell->platform_data = pdata;
+	cell->pdata_size = sizeof(*pdata);
+	return 0;
+}
+
+static void lpc_ich_finalize_gpio_cell(struct pci_dev *dev)
 {
 	struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+	struct mfd_cell *cell = &lpc_ich_cells[LPC_GPIO];
 
 	cell->platform_data = &lpc_chipset_info[priv->chipset];
 	cell->pdata_size = sizeof(struct lpc_ich_info);
@@ -933,7 +956,7 @@
 	lpc_chipset_info[priv->chipset].use_gpio = ret;
 	lpc_ich_enable_gpio_space(dev);
 
-	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
+	lpc_ich_finalize_gpio_cell(dev);
 	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
 			      &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL);
 
@@ -1007,7 +1030,10 @@
 		res->end = base_addr + ACPIBASE_PMC_END;
 	}
 
-	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
+	ret = lpc_ich_finalize_wdt_cell(dev);
+	if (ret)
+		goto wdt_done;
+
 	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
 			      &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL);
 
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 3bf8def..56e216d 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -532,7 +532,6 @@
 static struct i2c_driver max14577_i2c_driver = {
 	.driver = {
 		.name = "max14577",
-		.owner = THIS_MODULE,
 		.pm = &max14577_pm,
 		.of_match_table = max14577_dt_match,
 	},
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 760d08d..d19be64 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -391,7 +391,6 @@
 static struct i2c_driver max77686_i2c_driver = {
 	.driver = {
 		   .name = "max77686",
-		   .owner = THIS_MODULE,
 		   .pm = &max77686_pm,
 		   .of_match_table = of_match_ptr(max77686_pmic_dt_match),
 	},
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 67bc53f..007f729 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -373,7 +373,6 @@
 static struct i2c_driver max77693_i2c_driver = {
 	.driver = {
 		   .name = "max77693",
-		   .owner = THIS_MODULE,
 		   .pm = &max77693_pm,
 		   .of_match_table = of_match_ptr(max77693_dt_match),
 	},
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 232749c..2974c8b1 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -321,7 +321,6 @@
 static struct i2c_driver max8907_i2c_driver = {
 	.driver = {
 		.name = "max8907",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max8907_of_match),
 	},
 	.probe = max8907_i2c_probe,
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 8520bd6..fd8b15c 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -650,11 +650,8 @@
 	irq_set_chip_data(virq, d->host_data);
 	irq_set_chip_and_handler(virq, &max8925_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index c880c89..b0fe810 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -245,7 +245,6 @@
 static struct i2c_driver max8925_driver = {
 	.driver	= {
 		.name	= "max8925",
-		.owner	= THIS_MODULE,
 		.pm     = &max8925_pm_ops,
 		.of_match_table = max8925_dt_ids,
 	},
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index d3025be..b95a46d 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -113,14 +113,14 @@
 
 static void max8997_irq_lock(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&max8997->irqlock);
 }
 
 static void max8997_irq_sync_unlock(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) {
@@ -140,26 +140,25 @@
 }
 
 static const inline struct max8997_irq_data *
-irq_to_max8997_irq(struct max8997_dev *max8997, int irq)
+irq_to_max8997_irq(struct max8997_dev *max8997, struct irq_data *data)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
 	return &max8997_irqs[data->hwirq];
 }
 
 static void max8997_irq_mask(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
-								data->irq);
+								     data);
 
 	max8997->irq_masks_cur[irq_data->group] |= irq_data->mask;
 }
 
 static void max8997_irq_unmask(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
-								data->irq);
+								     data);
 
 	max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
 }
@@ -295,11 +294,8 @@
 	irq_set_chip_data(irq, max8997);
 	irq_set_chip_and_handler(irq, &max8997_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 595364e..d3cfa9cf 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -508,7 +508,6 @@
 static struct i2c_driver max8997_i2c_driver = {
 	.driver = {
 		   .name = "max8997",
-		   .owner = THIS_MODULE,
 		   .pm = &max8997_pm,
 		   .of_match_table = of_match_ptr(max8997_pmic_dt_match),
 	},
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 3702056..90bad9f 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -98,9 +98,8 @@
 };
 
 static inline struct max8998_irq_data *
-irq_to_max8998_irq(struct max8998_dev *max8998, int irq)
+irq_to_max8998_irq(struct max8998_dev *max8998, struct irq_data *data)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
 	return &max8998_irqs[data->hwirq];
 }
 
@@ -134,8 +133,7 @@
 static void max8998_irq_unmask(struct irq_data *data)
 {
 	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
-							       data->irq);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
@@ -143,8 +141,7 @@
 static void max8998_irq_mask(struct irq_data *data)
 {
 	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
-							       data->irq);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
@@ -206,11 +203,8 @@
 	irq_set_chip_data(irq, max8998);
 	irq_set_chip_and_handler(irq, &max8998_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index a37cb74..a7afe3b 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -377,7 +377,6 @@
 static struct i2c_driver max8998_i2c_driver = {
 	.driver = {
 		   .name = "max8998",
-		   .owner = THIS_MODULE,
 		   .pm = &max8998_pm,
 		   .of_match_table = of_match_ptr(max8998_dt_match),
 	},
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 68b8448..67e4c9a 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -96,7 +96,6 @@
 static struct i2c_driver mc13xxx_i2c_driver = {
 	.id_table = mc13xxx_i2c_device_id,
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "mc13xxx",
 		.of_match_table = mc13xxx_dt_ids,
 	},
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 03929a6..1749c1c 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -60,14 +60,14 @@
 
 static void mt6397_irq_lock(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&mt6397->irqlock);
 }
 
 static void mt6397_irq_sync_unlock(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 
 	regmap_write(mt6397->regmap, MT6397_INT_CON0, mt6397->irq_masks_cur[0]);
 	regmap_write(mt6397->regmap, MT6397_INT_CON1, mt6397->irq_masks_cur[1]);
@@ -77,7 +77,7 @@
 
 static void mt6397_irq_disable(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 	int shift = data->hwirq & 0xf;
 	int reg = data->hwirq >> 4;
 
@@ -86,19 +86,38 @@
 
 static void mt6397_irq_enable(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 	int shift = data->hwirq & 0xf;
 	int reg = data->hwirq >> 4;
 
 	mt6397->irq_masks_cur[reg] |= BIT(shift);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
+	int shift = irq_data->hwirq & 0xf;
+	int reg = irq_data->hwirq >> 4;
+
+	if (on)
+		mt6397->wake_mask[reg] |= BIT(shift);
+	else
+		mt6397->wake_mask[reg] &= ~BIT(shift);
+
+	return 0;
+}
+#else
+#define mt6397_irq_set_wake NULL
+#endif
+
 static struct irq_chip mt6397_irq_chip = {
 	.name = "mt6397-irq",
 	.irq_bus_lock = mt6397_irq_lock,
 	.irq_bus_sync_unlock = mt6397_irq_sync_unlock,
 	.irq_enable = mt6397_irq_enable,
 	.irq_disable = mt6397_irq_disable,
+	.irq_set_wake = mt6397_irq_set_wake,
 };
 
 static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
@@ -142,11 +161,7 @@
 	irq_set_chip_data(irq, mt6397);
 	irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -183,6 +198,35 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_suspend(struct device *dev)
+{
+	struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+	regmap_write(chip->regmap, MT6397_INT_CON0, chip->wake_mask[0]);
+	regmap_write(chip->regmap, MT6397_INT_CON1, chip->wake_mask[1]);
+
+	enable_irq_wake(chip->irq);
+
+	return 0;
+}
+
+static int mt6397_irq_resume(struct device *dev)
+{
+	struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+	regmap_write(chip->regmap, MT6397_INT_CON0, chip->irq_masks_cur[0]);
+	regmap_write(chip->regmap, MT6397_INT_CON1, chip->irq_masks_cur[1]);
+
+	disable_irq_wake(chip->irq);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
+			mt6397_irq_resume);
+
 static int mt6397_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -237,6 +281,7 @@
 	.driver = {
 		.name = "mt6397",
 		.of_match_table = of_match_ptr(mt6397_of_match),
+		.pm = &mt6397_pm_ops,
 	},
 };
 
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 28cb048..8f8bacb 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -719,7 +719,6 @@
 	.driver = {
 		   .name = "palmas",
 		   .of_match_table = of_palmas_match_tbl,
-		   .owner = THIS_MODULE,
 	},
 	.probe = palmas_i2c_probe,
 	.remove = palmas_i2c_remove,
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 5a92646..59502d0 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -236,11 +236,49 @@
 	return pm8xxx_config_irq(chip, block, config);
 }
 
+static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
+					enum irqchip_irq_state which,
+					bool *state)
+{
+	struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+	unsigned int pmirq = irqd_to_hwirq(d);
+	unsigned int bits;
+	int irq_bit;
+	u8 block;
+	int rc;
+
+	if (which != IRQCHIP_STATE_LINE_LEVEL)
+		return -EINVAL;
+
+	block = pmirq / 8;
+	irq_bit = pmirq % 8;
+
+	spin_lock(&chip->pm_irq_lock);
+	rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
+	if (rc) {
+		pr_err("Failed Selecting Block %d rc=%d\n", block, rc);
+		goto bail;
+	}
+
+	rc = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
+	if (rc) {
+		pr_err("Failed Reading Status rc=%d\n", rc);
+		goto bail;
+	}
+
+	*state = !!(bits & BIT(irq_bit));
+bail:
+	spin_unlock(&chip->pm_irq_lock);
+
+	return rc;
+}
+
 static struct irq_chip pm8xxx_irq_chip = {
 	.name		= "pm8xxx",
 	.irq_mask_ack	= pm8xxx_irq_mask_ack,
 	.irq_unmask	= pm8xxx_irq_unmask,
 	.irq_set_type	= pm8xxx_irq_set_type,
+	.irq_get_irqchip_state = pm8xxx_irq_get_irqchip_state,
 	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -251,11 +289,8 @@
 
 	irq_set_chip_and_handler(irq, &pm8xxx_irq_chip, handle_level_irq);
 	irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
@@ -336,14 +371,12 @@
 	if (!chip->irqdomain)
 		return -ENODEV;
 
-	irq_set_handler_data(irq, chip);
-	irq_set_chained_handler(irq, pm8xxx_irq_handler);
+	irq_set_chained_handler_and_data(irq, pm8xxx_irq_handler, chip);
 	irq_set_irq_wake(irq, 1);
 
 	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	if (rc) {
-		irq_set_chained_handler(irq, NULL);
-		irq_set_handler_data(irq, NULL);
+		irq_set_chained_handler_and_data(irq, NULL, NULL);
 		irq_domain_remove(chip->irqdomain);
 	}
 
@@ -362,8 +395,7 @@
 	struct pm_irq_chip *chip = platform_get_drvdata(pdev);
 
 	device_for_each_child(&pdev->dev, NULL, pm8921_remove_child);
-	irq_set_chained_handler(irq, NULL);
-	irq_set_handler_data(irq, NULL);
+	irq_set_chained_handler_and_data(irq, NULL, NULL);
 	irq_domain_remove(chip->irqdomain);
 
 	return 0;
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 12e3243..6afc9fa 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -149,6 +149,7 @@
 	[QCOM_RPM_USB_OTG_SWITCH] =		{ 210, 125, 82, 1 },
 	[QCOM_RPM_HDMI_SWITCH] =		{ 211, 126, 83, 1 },
 	[QCOM_RPM_DDR_DMM] =			{ 212, 127, 84, 2 },
+	[QCOM_RPM_QDSS_CLK] =			{ 214, ~0, 7, 1 },
 	[QCOM_RPM_VDDMIN_GPIO] =		{ 215, 131, 89, 1 },
 };
 
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index bb85020..3f8812d 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -386,9 +386,7 @@
 		irq_set_chip_and_handler(__irq, &rc5t583_irq_chip,
 					 handle_simple_irq);
 		irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
-		set_irq_flags(__irq, IRQF_VALID);
-#endif
+		irq_clear_status_flags(__irq, IRQ_NOREQUEST);
 	}
 
 	ret = request_threaded_irq(irq, NULL, rc5t583_irq, IRQF_ONESHOT,
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index df276ad..e10f02f 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -322,7 +322,6 @@
 static struct i2c_driver rc5t583_i2c_driver = {
 	.driver = {
 		   .name = "rc5t583",
-		   .owner = THIS_MODULE,
 		   },
 	.probe = rc5t583_i2c_probe,
 	.remove = rc5t583_i2c_remove,
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 2d64430..d4c114a 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -311,7 +311,6 @@
 static struct i2c_driver retu_driver = {
 	.driver		= {
 		.name = "retu-mfd",
-		.owner = THIS_MODULE,
 	},
 	.probe		= retu_probe,
 	.remove		= retu_remove,
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index db395a6..d60f916 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -124,6 +124,7 @@
 	{ .compatible = "richtek,rt5033", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, rt5033_dt_match);
 
 static struct i2c_driver rt5033_driver = {
 	.driver = {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 4a69afb..d206a3e 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -486,7 +486,6 @@
 static struct i2c_driver sec_pmic_driver = {
 	.driver = {
 		   .name = "sec_pmic",
-		   .owner = THIS_MODULE,
 		   .pm = &sec_pmic_pm_ops,
 		   .of_match_table = of_match_ptr(sec_dt_match),
 	},
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index e3deb46..fb4ce6d0 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -873,7 +873,6 @@
 static struct i2c_driver si476x_core_driver = {
 	.driver		= {
 		.name	= "si476x-core",
-		.owner  = THIS_MODULE,
 	},
 	.probe		= si476x_core_probe,
 	.remove         = si476x_core_remove,
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 0324688..a4c0df7 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -98,7 +98,6 @@
 static struct i2c_driver smsc_i2c_driver = {
 	.driver = {
 		   .name = "smsc",
-		   .owner = THIS_MODULE,
 	},
 	.probe = smsc_i2c_probe,
 	.remove = smsc_i2c_remove,
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index e14c8c9..c3f4aab 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -112,7 +112,6 @@
 static struct i2c_driver stmpe_i2c_driver = {
 	.driver = {
 		.name = "stmpe-i2c",
-		.owner = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm = &stmpe_dev_pm_ops,
 #endif
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 6fdb30e..618ba24 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -11,6 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/types.h>
 #include "stmpe.h"
 
@@ -108,6 +109,17 @@
 	return stmpe_remove(stmpe);
 }
 
+static const struct of_device_id stmpe_spi_of_match[] = {
+	{ .compatible = "st,stmpe610", },
+	{ .compatible = "st,stmpe801", },
+	{ .compatible = "st,stmpe811", },
+	{ .compatible = "st,stmpe1601", },
+	{ .compatible = "st,stmpe2401", },
+	{ .compatible = "st,stmpe2403", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stmpe_spi_of_match);
+
 static const struct spi_device_id stmpe_spi_id[] = {
 	{ "stmpe610", STMPE610 },
 	{ "stmpe801", STMPE801 },
@@ -122,6 +134,7 @@
 static struct spi_driver stmpe_spi_driver = {
 	.driver = {
 		.name	= "stmpe-spi",
+		.of_match_table = of_match_ptr(stmpe_spi_of_match),
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &stmpe_dev_pm_ops,
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 18c4d72..e971af8 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -971,20 +971,13 @@
 	irq_set_chip_data(virq, stmpe);
 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-		set_irq_flags(virq, 0);
-#endif
 		irq_set_chip_and_handler(virq, NULL, NULL);
 		irq_set_chip_data(virq, NULL);
 }
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index 7ceb3df..ca613df 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -231,6 +231,7 @@
 	{ "stw481x", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, stw481x_id);
 
 static const struct of_device_id stw481x_match[] = {
 	{ .compatible = "st,stw4810", },
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index c09fb5d..16fc1ad 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -187,7 +187,7 @@
 /* Handle the T7L66XB interrupt mux */
 static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct t7l66xb *t7l66xb = irq_get_handler_data(irq);
+	struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
 	unsigned int i, irq_base;
 
@@ -246,14 +246,10 @@
 	for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
 		irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq);
 		irq_set_chip_data(irq, t7l66xb);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
 	}
 
 	irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
-	irq_set_handler_data(t7l66xb->irq, t7l66xb);
-	irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq);
+	irq_set_chained_handler_and_data(t7l66xb->irq, t7l66xb_irq, t7l66xb);
 }
 
 static void t7l66xb_detach_irq(struct platform_device *dev)
@@ -263,13 +259,9 @@
 
 	irq_base = t7l66xb->irq_base;
 
-	irq_set_chained_handler(t7l66xb->irq, NULL);
-	irq_set_handler_data(t7l66xb->irq, NULL);
+	irq_set_chained_handler_and_data(t7l66xb->irq, NULL, NULL);
 
 	for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, 0);
-#endif
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
@@ -318,7 +310,7 @@
 	struct resource *iomem, *rscr;
 	int ret;
 
-	if (pdata == NULL)
+	if (!pdata)
 		return -EINVAL;
 
 	iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -371,7 +363,7 @@
 
 	clk_prepare_enable(t7l66xb->clk48m);
 
-	if (pdata && pdata->enable)
+	if (pdata->enable)
 		pdata->enable(dev);
 
 	/* Mask all interrupts */
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 96d420d..274bf39 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -215,20 +215,13 @@
 	irq_set_chip_and_handler(virq, &dummy_irq_chip,
 				handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, 0);
-#endif
 	irq_set_chip_and_handler(virq, NULL, NULL);
 	irq_set_chip_data(virq, NULL);
 }
@@ -492,7 +485,6 @@
 static struct i2c_driver tc3589x_driver = {
 	.driver = {
 		.name	= "tc3589x",
-		.owner	= THIS_MODULE,
 		.pm	= &tc3589x_dev_pm_ops,
 		.of_match_table = of_match_ptr(tc3589x_match),
 	},
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 63458b3..775b9ac 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -525,7 +525,7 @@
 static void
 tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct tc6393xb *tc6393xb = irq_get_handler_data(irq);
+	struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
 	unsigned int i, irq_base;
 
@@ -586,12 +586,12 @@
 	for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
 		irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
 		irq_set_chip_data(irq, tc6393xb);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
-	irq_set_handler_data(tc6393xb->irq, tc6393xb);
-	irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq);
+	irq_set_chained_handler_and_data(tc6393xb->irq, tc6393xb_irq,
+					 tc6393xb);
 }
 
 static void tc6393xb_detach_irq(struct platform_device *dev)
@@ -599,13 +599,12 @@
 	struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
 	unsigned int irq, irq_base;
 
-	irq_set_chained_handler(tc6393xb->irq, NULL);
-	irq_set_handler_data(tc6393xb->irq, NULL);
+	irq_set_chained_handler_and_data(tc6393xb->irq, NULL, NULL);
 
 	irq_base = tc6393xb->irq_base;
 
 	for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
-		set_irq_flags(irq, 0);
+		irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index a2e1990..1ab3dd6 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -129,7 +129,6 @@
 static struct i2c_driver tps6507x_i2c_driver = {
 	.driver = {
 		   .name = "tps6507x",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(tps6507x_of_match),
 	},
 	.probe = tps6507x_i2c_probe,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 14b62e11a..f88085a 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -259,7 +259,6 @@
 static struct i2c_driver tps65090_driver = {
 	.driver	= {
 		.name	= "tps65090",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tps65090_of_match),
 	},
 	.probe		= tps65090_i2c_probe,
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 7d1cfc1..55add04 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -156,6 +156,7 @@
 	{ .compatible = "ti,tps65217", .data = (void *)TPS65217 },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, tps65217_of_match);
 
 static int tps65217_probe(struct i2c_client *client,
 				const struct i2c_device_id *ids)
@@ -248,7 +249,6 @@
 static struct i2c_driver tps65217_driver = {
 	.driver		= {
 		.name	= "tps65217",
-		.owner	= THIS_MODULE,
 		.of_match_table = tps65217_of_match,
 	},
 	.id_table	= tps65217_id_table,
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 7af11a8..80b9dc3 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -211,6 +211,7 @@
 	{ .compatible = "ti,tps65218", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, of_tps65218_match_table);
 
 static int tps65218_probe(struct i2c_client *client,
 				const struct i2c_device_id *ids)
@@ -280,7 +281,6 @@
 static struct i2c_driver tps65218_driver = {
 	.driver		= {
 		.name	= "tps65218",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_tps65218_match_table,
 	},
 	.probe		= tps65218_probe,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index e0a2583..5628a6b 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -52,7 +52,7 @@
 #define TPS6586X_VERSIONCRC	0xcd
 
 /* Maximum register */
-#define TPS6586X_MAX_REGISTER	(TPS6586X_VERSIONCRC + 1)
+#define TPS6586X_MAX_REGISTER	TPS6586X_VERSIONCRC
 
 struct tps6586x_irq_data {
 	u8	mask_reg;
@@ -299,14 +299,7 @@
 	irq_set_chip_data(virq, tps6586x);
 	irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -467,7 +460,7 @@
 static const struct regmap_config tps6586x_regmap_config = {
 	.reg_bits = 8,
 	.val_bits = 8,
-	.max_register = TPS6586X_MAX_REGISTER - 1,
+	.max_register = TPS6586X_MAX_REGISTER,
 	.volatile_reg = is_volatile_reg,
 	.cache_type = REGCACHE_RBTREE,
 };
@@ -610,7 +603,6 @@
 static struct i2c_driver tps6586x_driver = {
 	.driver	= {
 		.name	= "tps6586x",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tps6586x_of_match),
 	},
 	.probe		= tps6586x_i2c_probe,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 7612d89..f7ab115 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -544,7 +544,6 @@
 static struct i2c_driver tps65910_i2c_driver = {
 	.driver = {
 		   .name = "tps65910",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(tps65910_of_match),
 	},
 	.probe = tps65910_i2c_probe,
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 6a6343e..7e55640 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -109,7 +109,6 @@
 static struct i2c_driver tps65912_i2c_driver = {
 	.driver = {
 		   .name = "tps65912",
-		   .owner = THIS_MODULE,
 	},
 	.probe = tps65912_i2c_probe,
 	.remove = tps65912_i2c_remove,
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
index fbecec7..db2c29c 100644
--- a/drivers/mfd/tps65912-irq.c
+++ b/drivers/mfd/tps65912-irq.c
@@ -197,13 +197,7 @@
 		irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
 					 handle_edge_irq);
 		irq_set_nested_thread(cur_irq, 1);
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+		irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index ed6c5b0..0812df3 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -549,7 +549,6 @@
 static struct i2c_driver tps80031_driver = {
 	.driver	= {
 		.name	= "tps80031",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= tps80031_probe,
 	.remove		= tps80031_remove,
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index a3fa7f4..40e51b0 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -419,16 +419,7 @@
 
 static inline void activate_irq(int irq)
 {
-#ifdef CONFIG_ARM
-	/*
-	 * ARM requires an extra step to clear IRQ_NOREQUEST, which it
-	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-	 */
-	set_irq_flags(irq, IRQF_VALID);
-#else
-	/* same effect on other architectures */
-	irq_set_noprobe(irq);
-#endif
+	irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 }
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 20fb581..5357450 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -231,7 +231,7 @@
 
 static int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	struct twl6030_irq *pdata = irq_get_chip_data(d->irq);
+	struct twl6030_irq *pdata = irq_data_get_irq_chip_data(d);
 
 	if (on)
 		atomic_inc(&pdata->wakeirqs);
@@ -352,26 +352,13 @@
 	irq_set_chip_and_handler(virq,  &pdata->irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, true);
 	irq_set_parent(virq, pdata->twl_irq);
-
-#ifdef CONFIG_ARM
-	/*
-	 * ARM requires an extra step to clear IRQ_NOREQUEST, which it
-	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-	 */
-	set_irq_flags(virq, IRQF_VALID);
-#else
-	/* same effect on other architectures */
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void twl6030_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, 0);
-#endif
 	irq_set_chip_and_handler(virq, NULL, NULL);
 	irq_set_chip_data(virq, NULL);
 }
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index c5265c1..a151ee2 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -86,7 +86,7 @@
 	{ 0x2E, 0x00 }, /* REG_STATUS	(ro) */
 };
 
-static struct reg_default twl6040_patch[] = {
+static struct reg_sequence twl6040_patch[] = {
 	/*
 	 * Select I2C bus access to dual access registers
 	 * Interrupt register is cleared on read
@@ -801,7 +801,6 @@
 static struct i2c_driver twl6040_driver = {
 	.driver = {
 		.name = "twl6040",
-		.owner = THIS_MODULE,
 	},
 	.probe		= twl6040_probe,
 	.remove		= twl6040_remove,
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 3591550..9a23021 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -282,7 +282,7 @@
  * SIBCLK to talk to the chip.  We leave the clock running until
  * we have finished processing all interrupts from the chip.
  */
-static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
+static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
 	unsigned int isr, i;
@@ -292,7 +292,7 @@
 	ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
 	ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
 
-	for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
+	for (i = 0; i < 16 && isr; i++, isr >>= 1)
 		if (isr & 1)
 			generic_handle_irq(ucb->irq_base + i);
 	ucb1x00_disable(ucb);
@@ -562,7 +562,7 @@
 
 		irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
 		irq_set_chip_data(irq, ucb);
-		set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
 	}
 
 	irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index aeae6ec..0386eaf 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -21,7 +21,7 @@
 #define WM5102_NUM_AOD_ISR 2
 #define WM5102_NUM_ISR 5
 
-static const struct reg_default wm5102_reva_patch[] = {
+static const struct reg_sequence wm5102_reva_patch[] = {
 	{ 0x80, 0x0003 },
 	{ 0x221, 0x0090 },
 	{ 0x211, 0x0014 },
@@ -57,7 +57,7 @@
 	{ 0x80, 0x0000 },
 };
 
-static const struct reg_default wm5102_revb_patch[] = {
+static const struct reg_sequence wm5102_revb_patch[] = {
 	{ 0x19, 0x0001 },
 	{ 0x80, 0x0003 },
 	{ 0x081, 0xE022 },
@@ -80,7 +80,7 @@
 /* We use a function so we can use ARRAY_SIZE() */
 int wm5102_patch(struct arizona *arizona)
 {
-	const struct reg_default *wm5102_patch;
+	const struct reg_sequence *wm5102_patch;
 	int patch_size;
 
 	switch (arizona->rev) {
@@ -266,8 +266,6 @@
 	{ 0x00000069, 0x01FF },   /* R105   - Always On Triggers Sequence Select 4 */
 	{ 0x0000006A, 0x01FF },   /* R106   - Always On Triggers Sequence Select 5 */
 	{ 0x0000006B, 0x01FF },   /* R107   - Always On Triggers Sequence Select 6 */
-	{ 0x0000006E, 0x01FF },   /* R110   - Trigger Sequence Select 32 */
-	{ 0x0000006F, 0x01FF },   /* R111   - Trigger Sequence Select 33 */
 	{ 0x00000070, 0x0000 },   /* R112   - Comfort Noise Generator */ 
 	{ 0x00000090, 0x0000 },   /* R144   - Haptics Control 1 */ 
 	{ 0x00000091, 0x7FFF },   /* R145   - Haptics Control 2 */ 
@@ -300,7 +298,6 @@
 	{ 0x00000175, 0x0004 },   /* R373   - FLL1 Control 5 */ 
 	{ 0x00000176, 0x0000 },   /* R374   - FLL1 Control 6 */ 
 	{ 0x00000177, 0x0181 },   /* R375   - FLL1 Loop Filter Test 1 */ 
-	{ 0x00000178, 0x0000 },   /* R376   - FLL1 NCO Test 0 */
 	{ 0x00000179, 0x0000 },   /* R377   - FLL1 Control 7 */
 	{ 0x00000181, 0x0000 },   /* R385   - FLL1 Synchroniser 1 */ 
 	{ 0x00000182, 0x0000 },   /* R386   - FLL1 Synchroniser 2 */ 
@@ -318,7 +315,6 @@
 	{ 0x00000195, 0x0004 },   /* R405   - FLL2 Control 5 */ 
 	{ 0x00000196, 0x0000 },   /* R406   - FLL2 Control 6 */ 
 	{ 0x00000197, 0x0000 },   /* R407   - FLL2 Loop Filter Test 1 */ 
-	{ 0x00000198, 0x0000 },   /* R408   - FLL2 NCO Test 0 */
 	{ 0x00000199, 0x0000 },   /* R409   - FLL2 Control 7 */
 	{ 0x000001A1, 0x0000 },   /* R417   - FLL2 Synchroniser 1 */ 
 	{ 0x000001A2, 0x0000 },   /* R418   - FLL2 Synchroniser 2 */ 
@@ -338,12 +334,9 @@
 	{ 0x0000021A, 0x01A6 },   /* R538   - Mic Bias Ctrl 3 */ 
 	{ 0x00000293, 0x0000 },   /* R659   - Accessory Detect Mode 1 */ 
 	{ 0x0000029B, 0x0020 },   /* R667   - Headphone Detect 1 */ 
-	{ 0x0000029C, 0x0000 },   /* R668   - Headphone Detect 2 */
-	{ 0x0000029F, 0x0000 },   /* R671   - Headphone Detect Test */
 	{ 0x000002A2, 0x0000 },   /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },   /* R675   - Mic Detect 1 */ 
 	{ 0x000002A4, 0x009F },   /* R676   - Mic Detect 2 */ 
-	{ 0x000002A5, 0x0000 },   /* R677   - Mic Detect 3 */ 
 	{ 0x000002A6, 0x3737 },   /* R678   - Mic Detect Level 1 */
 	{ 0x000002A7, 0x372C },   /* R679   - Mic Detect Level 2 */
 	{ 0x000002A8, 0x1422 },   /* R680   - Mic Detect Level 3 */
@@ -887,11 +880,11 @@
 	{ 0x00000D1B, 0xFFFF },   /* R3355  - IRQ2 Status 4 Mask */ 
 	{ 0x00000D1C, 0xFFFF },   /* R3356  - IRQ2 Status 5 Mask */ 
 	{ 0x00000D1F, 0x0000 },   /* R3359  - IRQ2 Control */ 
+	{ 0x00000D41, 0x0000 },   /* R3393  - ADSP2 IRQ0 */
 	{ 0x00000D53, 0xFFFF },   /* R3411  - AOD IRQ Mask IRQ1 */ 
 	{ 0x00000D54, 0xFFFF },   /* R3412  - AOD IRQ Mask IRQ2 */ 
 	{ 0x00000D56, 0x0000 },   /* R3414  - Jack detect debounce */ 
 	{ 0x00000E00, 0x0000 },   /* R3584  - FX_Ctrl1 */ 
-	{ 0x00000E01, 0x0000 },   /* R3585  - FX_Ctrl2 */ 
 	{ 0x00000E10, 0x6318 },   /* R3600  - EQ1_1 */ 
 	{ 0x00000E11, 0x6300 },   /* R3601  - EQ1_2 */ 
 	{ 0x00000E12, 0x0FC8 },   /* R3602  - EQ1_3 */ 
@@ -991,6 +984,7 @@
 	{ 0x00000ECD, 0x0000 },   /* R3789  - HPLPF4_2 */ 
 	{ 0x00000EE0, 0x0000 },   /* R3808  - ASRC_ENABLE */ 
 	{ 0x00000EE2, 0x0000 },   /* R3810  - ASRC_RATE1 */ 
+	{ 0x00000EE3, 0x0400 },   /* R3811  - ASRC_RATE2 */
 	{ 0x00000EF0, 0x0000 },   /* R3824  - ISRC 1 CTRL 1 */ 
 	{ 0x00000EF1, 0x0000 },   /* R3825  - ISRC 1 CTRL 2 */ 
 	{ 0x00000EF2, 0x0000 },   /* R3826  - ISRC 1 CTRL 3 */ 
@@ -998,7 +992,6 @@
 	{ 0x00000EF4, 0x0000 },   /* R3828  - ISRC 2 CTRL 2 */ 
 	{ 0x00000EF5, 0x0000 },   /* R3829  - ISRC 2 CTRL 3 */ 
 	{ 0x00001100, 0x0010 },   /* R4352  - DSP1 Control 1 */ 
-	{ 0x00001101, 0x0000 },   /* R4353  - DSP1 Clocking 1 */ 
 };
 
 static bool wm5102_readable_register(struct device *dev, unsigned int reg)
@@ -1008,12 +1001,10 @@
 	case ARIZONA_DEVICE_REVISION:
 	case ARIZONA_CTRL_IF_SPI_CFG_1:
 	case ARIZONA_CTRL_IF_I2C1_CFG_1:
-	case ARIZONA_CTRL_IF_STATUS_1:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_3:
-	case ARIZONA_WRITE_SEQUENCER_PROM:
 	case ARIZONA_TONE_GENERATOR_1:
 	case ARIZONA_TONE_GENERATOR_2:
 	case ARIZONA_TONE_GENERATOR_3:
@@ -1034,8 +1025,6 @@
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
-	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7:
-	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8:
 	case ARIZONA_COMFORT_NOISE_GENERATOR:
 	case ARIZONA_HAPTICS_CONTROL_1:
 	case ARIZONA_HAPTICS_CONTROL_2:
@@ -1176,7 +1165,6 @@
 	case ARIZONA_DAC_DIGITAL_VOLUME_4L:
 	case ARIZONA_OUT_VOLUME_4L:
 	case ARIZONA_NOISE_GATE_SELECT_4L:
-	case ARIZONA_OUTPUT_PATH_CONFIG_4R:
 	case ARIZONA_DAC_DIGITAL_VOLUME_4R:
 	case ARIZONA_OUT_VOLUME_4R:
 	case ARIZONA_NOISE_GATE_SELECT_4R:
@@ -1184,7 +1172,6 @@
 	case ARIZONA_DAC_DIGITAL_VOLUME_5L:
 	case ARIZONA_DAC_VOLUME_LIMIT_5L:
 	case ARIZONA_NOISE_GATE_SELECT_5L:
-	case ARIZONA_OUTPUT_PATH_CONFIG_5R:
 	case ARIZONA_DAC_DIGITAL_VOLUME_5R:
 	case ARIZONA_DAC_VOLUME_LIMIT_5R:
 	case ARIZONA_NOISE_GATE_SELECT_5R:
@@ -1195,8 +1182,6 @@
 	case ARIZONA_NOISE_GATE_CONTROL:
 	case ARIZONA_PDM_SPK1_CTRL_1:
 	case ARIZONA_PDM_SPK1_CTRL_2:
-	case ARIZONA_SPK_CTRL_2:
-	case ARIZONA_SPK_CTRL_3:
 	case ARIZONA_DAC_COMP_1:
 	case ARIZONA_DAC_COMP_2:
 	case ARIZONA_DAC_COMP_3:
@@ -1228,7 +1213,6 @@
 	case ARIZONA_AIF1_FRAME_CTRL_18:
 	case ARIZONA_AIF1_TX_ENABLES:
 	case ARIZONA_AIF1_RX_ENABLES:
-	case ARIZONA_AIF1_FORCE_WRITE:
 	case ARIZONA_AIF2_BCLK_CTRL:
 	case ARIZONA_AIF2_TX_PIN_CTRL:
 	case ARIZONA_AIF2_RX_PIN_CTRL:
@@ -1244,7 +1228,6 @@
 	case ARIZONA_AIF2_FRAME_CTRL_12:
 	case ARIZONA_AIF2_TX_ENABLES:
 	case ARIZONA_AIF2_RX_ENABLES:
-	case ARIZONA_AIF2_FORCE_WRITE:
 	case ARIZONA_AIF3_BCLK_CTRL:
 	case ARIZONA_AIF3_TX_PIN_CTRL:
 	case ARIZONA_AIF3_RX_PIN_CTRL:
@@ -1260,7 +1243,6 @@
 	case ARIZONA_AIF3_FRAME_CTRL_12:
 	case ARIZONA_AIF3_TX_ENABLES:
 	case ARIZONA_AIF3_RX_ENABLES:
-	case ARIZONA_AIF3_FORCE_WRITE:
 	case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
 	case ARIZONA_SLIMBUS_RATES_1:
 	case ARIZONA_SLIMBUS_RATES_2:
@@ -1586,22 +1568,6 @@
 	case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
 	case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
 	case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
 	case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
 	case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
 	case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
@@ -1810,11 +1776,6 @@
 	case ARIZONA_DRC1_CTRL3:
 	case ARIZONA_DRC1_CTRL4:
 	case ARIZONA_DRC1_CTRL5:
-	case ARIZONA_DRC2_CTRL1:
-	case ARIZONA_DRC2_CTRL2:
-	case ARIZONA_DRC2_CTRL3:
-	case ARIZONA_DRC2_CTRL4:
-	case ARIZONA_DRC2_CTRL5:
 	case ARIZONA_HPLPF1_1:
 	case ARIZONA_HPLPF1_2:
 	case ARIZONA_HPLPF2_1:
@@ -1832,9 +1793,6 @@
 	case ARIZONA_ISRC_2_CTRL_1:
 	case ARIZONA_ISRC_2_CTRL_2:
 	case ARIZONA_ISRC_2_CTRL_3:
-	case ARIZONA_ISRC_3_CTRL_1:
-	case ARIZONA_ISRC_3_CTRL_2:
-	case ARIZONA_ISRC_3_CTRL_3:
 	case ARIZONA_DSP1_CONTROL_1:
 	case ARIZONA_DSP1_CLOCKING_1:
 	case ARIZONA_DSP1_STATUS_1:
@@ -1883,7 +1841,6 @@
 	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_3:
 	case ARIZONA_OUTPUT_STATUS_1:
-	case ARIZONA_RAW_OUTPUT_STATUS_1:
 	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
 	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
 	case ARIZONA_SAMPLE_RATE_1_STATUS:
@@ -1969,6 +1926,8 @@
 	.reg_bits = 32,
 	.pad_bits = 16,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5102_MAX_REGISTER,
 	.readable_reg = wm5102_readable_register,
@@ -1983,6 +1942,8 @@
 const struct regmap_config wm5102_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5102_MAX_REGISTER,
 	.readable_reg = wm5102_readable_register,
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 12cad94..c4b9374 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -21,7 +21,7 @@
 #define WM5110_NUM_AOD_ISR 2
 #define WM5110_NUM_ISR 5
 
-static const struct reg_default wm5110_reva_patch[] = {
+static const struct reg_sequence wm5110_reva_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x44, 0x20 },
 	{ 0x45, 0x40 },
@@ -134,7 +134,7 @@
 	{ 0x209, 0x002A },
 };
 
-static const struct reg_default wm5110_revb_patch[] = {
+static const struct reg_sequence wm5110_revb_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x36e, 0x0210 },
 	{ 0x370, 0x0210 },
@@ -224,7 +224,7 @@
 	{ 0x80, 0x0 },
 };
 
-static const struct reg_default wm5110_revd_patch[] = {
+static const struct reg_sequence wm5110_revd_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x80, 0x3 },
 	{ 0x393, 0x27 },
@@ -249,6 +249,16 @@
 	{ 0x80, 0x0 },
 };
 
+/* Add extra headphone write sequence locations */
+static const struct reg_default wm5110_reve_patch[] = {
+	{ 0x80, 0x3 },
+	{ 0x80, 0x3 },
+	{ 0x4b, 0x138 },
+	{ 0x4c, 0x13d },
+	{ 0x80, 0x0 },
+	{ 0x80, 0x0 },
+};
+
 /* We use a function so we can use ARRAY_SIZE() */
 int wm5110_patch(struct arizona *arizona)
 {
@@ -266,7 +276,9 @@
 					     wm5110_revd_patch,
 					     ARRAY_SIZE(wm5110_revd_patch));
 	default:
-		return 0;
+		return regmap_register_patch(arizona->regmap,
+					     wm5110_reve_patch,
+					     ARRAY_SIZE(wm5110_reve_patch));
 	}
 }
 EXPORT_SYMBOL_GPL(wm5110_patch);
@@ -676,6 +688,7 @@
 	{ 0x00000032, 0x0100 },    /* R50    - PWM Drive 3 */
 	{ 0x00000040, 0x0000 },    /* R64    - Wake control */
 	{ 0x00000041, 0x0000 },    /* R65    - Sequence control */
+	{ 0x00000042, 0x0000 },    /* R66    - Spare Triggers */
 	{ 0x00000061, 0x01FF },    /* R97    - Sample Rate Sequence Select 1 */
 	{ 0x00000062, 0x01FF },    /* R98    - Sample Rate Sequence Select 2 */
 	{ 0x00000063, 0x01FF },    /* R99    - Sample Rate Sequence Select 3 */
@@ -754,11 +767,9 @@
 	{ 0x0000021A, 0x01A6 },    /* R538   - Mic Bias Ctrl 3 */
 	{ 0x00000293, 0x0000 },    /* R659   - Accessory Detect Mode 1 */
 	{ 0x0000029B, 0x0028 },    /* R667   - Headphone Detect 1 */
-	{ 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
 	{ 0x000002A2, 0x0000 },    /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
 	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
-	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
 	{ 0x000002A6, 0x3737 },    /* R678   - Mic Detect Level 1 */
 	{ 0x000002A7, 0x372C },    /* R679   - Mic Detect Level 2 */
 	{ 0x000002A8, 0x1422 },    /* R680   - Mic Detect Level 3 */
@@ -848,8 +859,6 @@
 	{ 0x00000440, 0x8FFF },    /* R1088  - DRE Enable */
 	{ 0x00000450, 0x0000 },    /* R1104  - DAC AEC Control 1 */
 	{ 0x00000458, 0x0000 },    /* R1112  - Noise Gate Control */
-	{ 0x00000480, 0x0040 },    /* R1152  - Class W ANC Threshold 1 */
-	{ 0x00000481, 0x0040 },    /* R1153  - Class W ANC Threshold 2 */
 	{ 0x00000490, 0x0069 },    /* R1168  - PDM SPK1 CTRL 1 */
 	{ 0x00000491, 0x0000 },    /* R1169  - PDM SPK1 CTRL 2 */
 	{ 0x00000492, 0x0069 },    /* R1170  - PDM SPK2 CTRL 1 */
@@ -1508,7 +1517,6 @@
 	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
 	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
 	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
-	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
 	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
 	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
 	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
@@ -1625,14 +1633,9 @@
 	{ 0x00000F00, 0x0000 },    /* R3840  - Clock Control */
 	{ 0x00000F01, 0x0000 },    /* R3841  - ANC_SRC */
 	{ 0x00001100, 0x0010 },    /* R4352  - DSP1 Control 1 */
-	{ 0x00001101, 0x0000 },    /* R4353  - DSP1 Clocking 1 */
 	{ 0x00001200, 0x0010 },    /* R4608  - DSP2 Control 1 */
-	{ 0x00001201, 0x0000 },    /* R4609  - DSP2 Clocking 1 */
 	{ 0x00001300, 0x0010 },    /* R4864  - DSP3 Control 1 */
-	{ 0x00001301, 0x0000 },    /* R4865  - DSP3 Clocking 1 */
 	{ 0x00001400, 0x0010 },    /* R5120  - DSP4 Control 1 */
-	{ 0x00001401, 0x0000 },    /* R5121  - DSP4 Clocking 1 */
-	{ 0x00001404, 0x0000 },    /* R5124  - DSP4 Status 1 */
 };
 
 static bool wm5110_is_rev_b_adsp_memory(unsigned int reg)
@@ -1716,6 +1719,7 @@
 	case ARIZONA_PWM_DRIVE_3:
 	case ARIZONA_WAKE_CONTROL:
 	case ARIZONA_SEQUENCE_CONTROL:
+	case ARIZONA_SPARE_TRIGGERS:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
@@ -3007,6 +3011,8 @@
 	.reg_bits = 32,
 	.pad_bits = 16,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5110_MAX_REGISTER,
 	.readable_reg = wm5110_readable_register,
@@ -3021,6 +3027,8 @@
 const struct regmap_config wm5110_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5110_MAX_REGISTER,
 	.readable_reg = wm5110_readable_register,
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index a4cbefe..824bcba 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -93,7 +93,6 @@
 static struct i2c_driver wm831x_i2c_driver = {
 	.driver = {
 		.name = "wm831x",
-		.owner = THIS_MODULE,
 		.pm = &wm831x_pm_ops,
 	},
 	.probe = wm831x_i2c_probe,
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 3da8126..dfea8b9 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -552,14 +552,7 @@
 	irq_set_chip_data(virq, h->host_data);
 	irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 6a16a8a..9358f03 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -69,7 +69,6 @@
 static struct i2c_driver wm8350_i2c_driver = {
 	.driver = {
 		   .name = "wm8350",
-		   .owner = THIS_MODULE,
 	},
 	.probe = wm8350_i2c_probe,
 	.remove = wm8350_i2c_remove,
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 813ff50..27054f3 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -526,13 +526,7 @@
 					 handle_edge_irq);
 		irq_set_nested_thread(cur_irq, 1);
 
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+		irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index c6fb5d1..3bd44a4 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -194,7 +194,6 @@
 static struct i2c_driver wm8400_i2c_driver = {
 	.driver = {
 		.name = "WM8400",
-		.owner = THIS_MODULE,
 	},
 	.probe    = wm8400_i2c_probe,
 	.remove   = wm8400_i2c_remove,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 53ae5af..7eec619 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -243,21 +243,21 @@
 }
 #endif
 
-static const struct reg_default wm8994_revc_patch[] = {
+static const struct reg_sequence wm8994_revc_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0x56, 0x3 },
 	{ 0x817, 0x0 },
 	{ 0x102, 0x0 },
 };
 
-static const struct reg_default wm8958_reva_patch[] = {
+static const struct reg_sequence wm8958_reva_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0xcb, 0x81 },
 	{ 0x817, 0x0 },
 	{ 0x102, 0x0 },
 };
 
-static const struct reg_default wm1811_reva_patch[] = {
+static const struct reg_sequence wm1811_reva_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0x56, 0xc07 },
 	{ 0x5d, 0x7e },
@@ -326,7 +326,7 @@
 {
 	struct wm8994_pdata *pdata;
 	struct regmap_config *regmap_config;
-	const struct reg_default *regmap_patch = NULL;
+	const struct reg_sequence *regmap_patch = NULL;
 	const char *devname;
 	int ret, i, patch_regs = 0;
 	int pulls = 0;
@@ -677,7 +677,6 @@
 static struct i2c_driver wm8994_i2c_driver = {
 	.driver = {
 		.name = "wm8994",
-		.owner = THIS_MODULE,
 		.pm = &wm8994_pm_ops,
 		.of_match_table = of_match_ptr(wm8994_of_match),
 	},
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 55c380a..18710f3 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -172,14 +172,7 @@
 	irq_set_chip_data(virq, wm8994);
 	irq_set_chip_and_handler(virq, &wm8994_edge_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -193,7 +186,7 @@
 {
 	int ret;
 	unsigned long irqflags;
-	struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
+	struct wm8994_pdata *pdata = &wm8994->pdata;
 
 	if (!wm8994->irq) {
 		dev_warn(wm8994->dev,
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index 300e9b6..c56b160 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -19,7 +19,7 @@
 
 #include "wm8994.h"
 
-static struct reg_default wm1811_defaults[] = {
+static const struct reg_default wm1811_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1    - Power Management (1) */
 	{ 0x0002, 0x6000 },    /* R2    - Power Management (2) */
 	{ 0x0003, 0x0000 },    /* R3    - Power Management (3) */
@@ -251,7 +251,7 @@
 	{ 0x0748, 0x003F },    /* R1864 - IRQ Debounce */
 };
 
-static struct reg_default wm8994_defaults[] = {
+static const struct reg_default wm8994_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1     - Power Management (1) */ 
 	{ 0x0002, 0x6000 },    /* R2     - Power Management (2) */ 
 	{ 0x0003, 0x0000 },    /* R3     - Power Management (3) */ 
@@ -470,7 +470,7 @@
 	{ 0x0748, 0x003F },    /* R1864  - IRQ Debounce */ 
 };
 
-static struct reg_default wm8958_defaults[] = {
+static const struct reg_default wm8958_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1     - Power Management (1) */
 	{ 0x0002, 0x6000 },    /* R2     - Power Management (2) */
 	{ 0x0003, 0x0000 },    /* R3     - Power Management (3) */
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index c0c25d75..ca41a56 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -17,7 +17,7 @@
 
 #include "arizona.h"
 
-static const struct reg_default wm8997_reva_patch[] = {
+static const struct reg_sequence wm8997_reva_patch[] = {
 	{ 0x80, 0x0003 },
 	{ 0x214, 0x0008 },
 	{ 0x458, 0x0000 },
@@ -243,7 +243,6 @@
 	{ 0x0000029B, 0x0020 },    /* R667   - Headphone Detect 1 */
 	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
 	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
-	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
 	{ 0x000002C3, 0x0000 },    /* R707   - Mic noise mix control 1 */
 	{ 0x000002CB, 0x0000 },    /* R715   - Isolation control */
 	{ 0x000002D3, 0x0000 },    /* R723   - Jack detect analogue */
@@ -684,7 +683,6 @@
 	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
 	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
 	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
-	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
 	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
 	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
 	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
@@ -788,8 +786,6 @@
 	{ 0x00000EF3, 0x0000 },    /* R3827  - ISRC 2 CTRL 1 */
 	{ 0x00000EF4, 0x0000 },    /* R3828  - ISRC 2 CTRL 2 */
 	{ 0x00000EF5, 0x0000 },    /* R3829  - ISRC 2 CTRL 3 */
-	{ 0x00001100, 0x0010 },    /* R4352  - DSP1 Control 1 */
-	{ 0x00001101, 0x0000 },    /* R4353  - DSP1 Clocking 1 */
 };
 
 static bool wm8997_readable_register(struct device *dev, unsigned int reg)
@@ -1480,6 +1476,8 @@
 	case ARIZONA_SAMPLE_RATE_2_STATUS:
 	case ARIZONA_SAMPLE_RATE_3_STATUS:
 	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_FLL1_NCO_TEST_0:
+	case ARIZONA_FLL2_NCO_TEST_0:
 	case ARIZONA_MIC_DETECT_3:
 	case ARIZONA_HP_CTRL_1L:
 	case ARIZONA_HP_CTRL_1R:
@@ -1521,6 +1519,8 @@
 const struct regmap_config wm8997_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM8997_MAX_REGISTER,
 	.readable_reg = wm8997_readable_register,
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
new file mode 100644
index 0000000..e6de3cd
--- /dev/null
+++ b/drivers/mfd/wm8998-tables.c
@@ -0,0 +1,1594 @@
+/*
+ * wm8998-tables.c  --  data tables for wm8998-class codecs
+ *
+ * Copyright 2014 Wolfson Microelectronics plc
+ *
+ * Author: Richard Fitzgerald <rf@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+#include <linux/device.h>
+
+#include "arizona.h"
+
+#define WM8998_NUM_AOD_ISR 2
+#define WM8998_NUM_ISR 5
+
+static const struct reg_default wm8998_rev_a_patch[] = {
+	{ 0x0212, 0x0000 },
+	{ 0x0211, 0x0014 },
+	{ 0x04E4, 0x0E0D },
+	{ 0x04E5, 0x0E0D },
+	{ 0x04E6, 0x0E0D },
+	{ 0x04EB, 0x060E },
+	{ 0x0441, 0xC759 },
+	{ 0x0442, 0x2A08 },
+	{ 0x0443, 0x5CFA },
+	{ 0x026E, 0x0064 },
+	{ 0x026F, 0x00EA },
+	{ 0x0270, 0x1F16 },
+	{ 0x0410, 0x2080 },
+	{ 0x0418, 0x2080 },
+	{ 0x0420, 0x2080 },
+	{ 0x04B8, 0x1120 },
+	{ 0x047E, 0x080E },
+	{ 0x0448, 0x03EF },
+};
+
+/* We use a function so we can use ARRAY_SIZE() */
+int wm8998_patch(struct arizona *arizona)
+{
+	return regmap_register_patch(arizona->regmap,
+				     wm8998_rev_a_patch,
+				     ARRAY_SIZE(wm8998_rev_a_patch));
+}
+
+static const struct regmap_irq wm8998_aod_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+		.mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+	},
+	[ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+		.mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+	},
+	[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
+	[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
+	[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
+	[ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 },
+};
+
+struct regmap_irq_chip wm8998_aod = {
+	.name = "wm8998 AOD",
+	.status_base = ARIZONA_AOD_IRQ1,
+	.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
+	.ack_base = ARIZONA_AOD_IRQ1,
+	.wake_base = ARIZONA_WAKE_CONTROL,
+	.wake_invert = 1,
+	.num_regs = 1,
+	.irqs = wm8998_aod_irqs,
+	.num_irqs = ARRAY_SIZE(wm8998_aod_irqs),
+};
+
+static const struct regmap_irq wm8998_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
+	[ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
+	[ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
+	[ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
+
+	[ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+		.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
+	},
+	[ARIZONA_IRQ_SPK_OVERHEAT] = {
+		.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
+	},
+	[ARIZONA_IRQ_HPDET] = {
+		.reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
+	},
+	[ARIZONA_IRQ_MICDET] = {
+		.reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
+	},
+	[ARIZONA_IRQ_WSEQ_DONE] = {
+		.reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
+	},
+	[ARIZONA_IRQ_DRC1_SIG_DET] = {
+		.reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
+	},
+	[ARIZONA_IRQ_ASRC2_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_ASRC1_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_UNDERCLOCKED] = {
+		.reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
+	},
+	[ARIZONA_IRQ_OVERCLOCKED] = {
+		.reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
+	},
+	[ARIZONA_IRQ_FLL2_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_FLL1_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_CLKGEN_ERR] = {
+		.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
+	},
+	[ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
+		.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
+	},
+
+	[ARIZONA_IRQ_ASRC_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF3_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF2_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF1_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1
+	},
+	[ARIZONA_IRQ_CTRLIF_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1
+	},
+	[ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
+		.reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1
+	},
+	[ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
+		.reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1
+	},
+	[ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
+		.reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1
+	},
+	[ARIZONA_IRQ_ISRC1_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1
+	},
+	[ARIZONA_IRQ_ISRC2_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
+	},
+
+	[ARIZONA_IRQ_BOOT_DONE] = {
+		.reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
+	},
+	[ARIZONA_IRQ_FLL2_CLOCK_OK] = {
+		.reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
+	},
+	[ARIZONA_IRQ_FLL1_CLOCK_OK] = {
+		.reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
+	},
+};
+
+struct regmap_irq_chip wm8998_irq = {
+	.name = "wm8998 IRQ",
+	.status_base = ARIZONA_INTERRUPT_STATUS_1,
+	.mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
+	.ack_base = ARIZONA_INTERRUPT_STATUS_1,
+	.num_regs = 5,
+	.irqs = wm8998_irqs,
+	.num_irqs = ARRAY_SIZE(wm8998_irqs),
+};
+
+static const struct reg_default wm8998_reg_default[] = {
+	{ 0x00000009, 0x0001 },    /* R9     - Ctrl IF I2C1 CFG 1 */
+	{ 0x0000000B, 0x001A },    /* R11    - Ctrl IF I2C1 CFG 2 */
+	{ 0x00000020, 0x0000 },    /* R32    - Tone Generator 1 */
+	{ 0x00000021, 0x1000 },    /* R33    - Tone Generator 2 */
+	{ 0x00000022, 0x0000 },    /* R34    - Tone Generator 3 */
+	{ 0x00000023, 0x1000 },    /* R35    - Tone Generator 4 */
+	{ 0x00000024, 0x0000 },    /* R36    - Tone Generator 5 */
+	{ 0x00000030, 0x0000 },    /* R48    - PWM Drive 1 */
+	{ 0x00000031, 0x0100 },    /* R49    - PWM Drive 2 */
+	{ 0x00000032, 0x0100 },    /* R50    - PWM Drive 3 */
+	{ 0x00000040, 0x0000 },    /* R64    - Wake control */
+	{ 0x00000041, 0x0000 },    /* R65    - Sequence control */
+	{ 0x00000061, 0x01FF },    /* R97    - Sample Rate Sequence Select 1 */
+	{ 0x00000062, 0x01FF },    /* R98    - Sample Rate Sequence Select 2 */
+	{ 0x00000063, 0x01FF },    /* R99    - Sample Rate Sequence Select 3 */
+	{ 0x00000064, 0x01FF },    /* R100   - Sample Rate Sequence Select 4 */
+	{ 0x00000066, 0x01FF },    /* R102   - Always On Triggers Sequence Select 1 */
+	{ 0x00000067, 0x01FF },    /* R103   - Always On Triggers Sequence Select 2 */
+	{ 0x00000068, 0x01FF },    /* R104   - Always On Triggers Sequence Select 3 */
+	{ 0x00000069, 0x01FF },    /* R105   - Always On Triggers Sequence Select 4 */
+	{ 0x0000006A, 0x01FF },    /* R106   - Always On Triggers Sequence Select 5 */
+	{ 0x0000006B, 0x01FF },    /* R107   - Always On Triggers Sequence Select 6 */
+	{ 0x0000006E, 0x01FF },    /* R110   - Trigger Sequence Select 32 */
+	{ 0x0000006F, 0x01FF },    /* R111   - Trigger Sequence Select 33 */
+	{ 0x00000090, 0x0000 },    /* R144   - Haptics Control 1 */
+	{ 0x00000091, 0x7FFF },    /* R145   - Haptics Control 2 */
+	{ 0x00000092, 0x0000 },    /* R146   - Haptics phase 1 intensity */
+	{ 0x00000093, 0x0000 },    /* R147   - Haptics phase 1 duration */
+	{ 0x00000094, 0x0000 },    /* R148   - Haptics phase 2 intensity */
+	{ 0x00000095, 0x0000 },    /* R149   - Haptics phase 2 duration */
+	{ 0x00000096, 0x0000 },    /* R150   - Haptics phase 3 intensity */
+	{ 0x00000097, 0x0000 },    /* R151   - Haptics phase 3 duration */
+	{ 0x00000100, 0x0002 },    /* R256   - Clock 32k 1 */
+	{ 0x00000101, 0x0304 },    /* R257   - System Clock 1 */
+	{ 0x00000102, 0x0011 },    /* R258   - Sample rate 1 */
+	{ 0x00000103, 0x0011 },    /* R259   - Sample rate 2 */
+	{ 0x00000104, 0x0011 },    /* R260   - Sample rate 3 */
+	{ 0x00000112, 0x0305 },    /* R274   - Async clock 1 */
+	{ 0x00000113, 0x0011 },    /* R275   - Async sample rate 1 */
+	{ 0x00000114, 0x0011 },    /* R276   - Async sample rate 2 */
+	{ 0x00000149, 0x0000 },    /* R329   - Output system clock */
+	{ 0x0000014A, 0x0000 },    /* R330   - Output async clock */
+	{ 0x00000152, 0x0000 },    /* R338   - Rate Estimator 1 */
+	{ 0x00000153, 0x0000 },    /* R339   - Rate Estimator 2 */
+	{ 0x00000154, 0x0000 },    /* R340   - Rate Estimator 3 */
+	{ 0x00000155, 0x0000 },    /* R341   - Rate Estimator 4 */
+	{ 0x00000156, 0x0000 },    /* R342   - Rate Estimator 5 */
+	{ 0x00000161, 0x0000 },    /* R353   - Dynamic Frequency Scaling 1 */
+	{ 0x00000171, 0x0002 },    /* R369   - FLL1 Control 1 */
+	{ 0x00000172, 0x0008 },    /* R370   - FLL1 Control 2 */
+	{ 0x00000173, 0x0018 },    /* R371   - FLL1 Control 3 */
+	{ 0x00000174, 0x007D },    /* R372   - FLL1 Control 4 */
+	{ 0x00000175, 0x0004 },    /* R373   - FLL1 Control 5 */
+	{ 0x00000176, 0x0000 },    /* R374   - FLL1 Control 6 */
+	{ 0x00000177, 0x0181 },    /* R375   - FLL1 Loop Filter Test 1 */
+	{ 0x00000178, 0x0000 },    /* R376   - FLL1 NCO Test 0 */
+	{ 0x00000179, 0x0000 },    /* R377   - FLL1 Control 7 */
+	{ 0x00000181, 0x0000 },    /* R385   - FLL1 Synchroniser 1 */
+	{ 0x00000182, 0x0000 },    /* R386   - FLL1 Synchroniser 2 */
+	{ 0x00000183, 0x0000 },    /* R387   - FLL1 Synchroniser 3 */
+	{ 0x00000184, 0x0000 },    /* R388   - FLL1 Synchroniser 4 */
+	{ 0x00000185, 0x0000 },    /* R389   - FLL1 Synchroniser 5 */
+	{ 0x00000186, 0x0000 },    /* R390   - FLL1 Synchroniser 6 */
+	{ 0x00000187, 0x0001 },    /* R391   - FLL1 Synchroniser 7 */
+	{ 0x00000189, 0x0000 },    /* R393   - FLL1 Spread Spectrum */
+	{ 0x0000018A, 0x0004 },    /* R394   - FLL1 GPIO Clock */
+	{ 0x00000191, 0x0000 },    /* R401   - FLL2 Control 1 */
+	{ 0x00000192, 0x0008 },    /* R402   - FLL2 Control 2 */
+	{ 0x00000193, 0x0018 },    /* R403   - FLL2 Control 3 */
+	{ 0x00000194, 0x007D },    /* R404   - FLL2 Control 4 */
+	{ 0x00000195, 0x0004 },    /* R405   - FLL2 Control 5 */
+	{ 0x00000196, 0x0000 },    /* R406   - FLL2 Control 6 */
+	{ 0x00000197, 0x0000 },    /* R407   - FLL2 Loop Filter Test 1 */
+	{ 0x00000198, 0x0000 },    /* R408   - FLL2 NCO Test 0 */
+	{ 0x00000199, 0x0000 },    /* R409   - FLL2 Control 7 */
+	{ 0x000001A1, 0x0000 },    /* R417   - FLL2 Synchroniser 1 */
+	{ 0x000001A2, 0x0000 },    /* R418   - FLL2 Synchroniser 2 */
+	{ 0x000001A3, 0x0000 },    /* R419   - FLL2 Synchroniser 3 */
+	{ 0x000001A4, 0x0000 },    /* R420   - FLL2 Synchroniser 4 */
+	{ 0x000001A5, 0x0000 },    /* R421   - FLL2 Synchroniser 5 */
+	{ 0x000001A6, 0x0000 },    /* R422   - FLL2 Synchroniser 6 */
+	{ 0x000001A7, 0x0001 },    /* R423   - FLL2 Synchroniser 7 */
+	{ 0x000001A9, 0x0000 },    /* R425   - FLL2 Spread Spectrum */
+	{ 0x000001AA, 0x0004 },    /* R426   - FLL2 GPIO Clock */
+	{ 0x00000200, 0x0006 },    /* R512   - Mic Charge Pump 1 */
+	{ 0x00000210, 0x00D4 },    /* R528   - LDO1 Control 1 */
+	{ 0x00000212, 0x0000 },    /* R530   - LDO1 Control 2 */
+	{ 0x00000213, 0x0344 },    /* R531   - LDO2 Control 1 */
+	{ 0x00000218, 0x01A6 },    /* R536   - Mic Bias Ctrl 1 */
+	{ 0x00000219, 0x01A6 },    /* R537   - Mic Bias Ctrl 2 */
+	{ 0x0000021A, 0x01A6 },    /* R538   - Mic Bias Ctrl 3 */
+	{ 0x00000293, 0x0080 },    /* R659   - Accessory Detect Mode 1 */
+	{ 0x0000029B, 0x0000 },    /* R667   - Headphone Detect 1 */
+	{ 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
+	{ 0x000002A2, 0x0000 },    /* R674   - Micd Clamp control */
+	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
+	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
+	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
+	{ 0x000002A6, 0x3737 },    /* R678   - Mic Detect Level 1 */
+	{ 0x000002A7, 0x2C37 },    /* R679   - Mic Detect Level 2 */
+	{ 0x000002A8, 0x1422 },    /* R680   - Mic Detect Level 3 */
+	{ 0x000002A9, 0x030A },    /* R681   - Mic Detect Level 4 */
+	{ 0x000002AB, 0x0000 },    /* R683   - Mic Detect 4 */
+	{ 0x000002CB, 0x0000 },    /* R715   - Isolation control */
+	{ 0x000002D3, 0x0000 },    /* R723   - Jack detect analogue */
+	{ 0x00000300, 0x0000 },    /* R768   - Input Enables */
+	{ 0x00000308, 0x0000 },    /* R776   - Input Rate */
+	{ 0x00000309, 0x0022 },    /* R777   - Input Volume Ramp */
+	{ 0x0000030C, 0x0002 },    /* R780   - HPF Control */
+	{ 0x00000310, 0x2080 },    /* R784   - IN1L Control */
+	{ 0x00000311, 0x0180 },    /* R785   - ADC Digital Volume 1L */
+	{ 0x00000312, 0x0000 },    /* R786   - DMIC1L Control */
+	{ 0x00000314, 0x0080 },    /* R788   - IN1R Control */
+	{ 0x00000315, 0x0180 },    /* R789   - ADC Digital Volume 1R */
+	{ 0x00000316, 0x0000 },    /* R790   - DMIC1R Control */
+	{ 0x00000318, 0x2080 },    /* R792   - IN2L Control */
+	{ 0x00000319, 0x0180 },    /* R793   - ADC Digital Volume 2L */
+	{ 0x0000031A, 0x0000 },    /* R794   - DMIC2L Control */
+	{ 0x00000400, 0x0000 },    /* R1024  - Output Enables 1 */
+	{ 0x00000408, 0x0000 },    /* R1032  - Output Rate 1 */
+	{ 0x00000409, 0x0022 },    /* R1033  - Output Volume Ramp */
+	{ 0x00000410, 0x2080 },    /* R1040  - Output Path Config 1L */
+	{ 0x00000411, 0x0180 },    /* R1041  - DAC Digital Volume 1L */
+	{ 0x00000413, 0x0001 },    /* R1043  - Noise Gate Select 1L */
+	{ 0x00000414, 0x0080 },    /* R1044  - Output Path Config 1R */
+	{ 0x00000415, 0x0180 },    /* R1045  - DAC Digital Volume 1R */
+	{ 0x00000417, 0x0002 },    /* R1047  - Noise Gate Select 1R */
+	{ 0x00000418, 0x2080 },    /* R1048  - Output Path Config 2L */
+	{ 0x00000419, 0x0180 },    /* R1049  - DAC Digital Volume 2L */
+	{ 0x0000041B, 0x0004 },    /* R1051  - Noise Gate Select 2L */
+	{ 0x0000041C, 0x0080 },    /* R1052  - Output Path Config 2R */
+	{ 0x0000041D, 0x0180 },    /* R1053  - DAC Digital Volume 2R */
+	{ 0x0000041F, 0x0008 },    /* R1055  - Noise Gate Select 2R */
+	{ 0x00000420, 0x2080 },    /* R1056  - Output Path Config 3L */
+	{ 0x00000421, 0x0180 },    /* R1057  - DAC Digital Volume 3L */
+	{ 0x00000423, 0x0010 },    /* R1059  - Noise Gate Select 3L */
+	{ 0x00000428, 0x0000 },    /* R1064  - Output Path Config 4L */
+	{ 0x00000429, 0x0180 },    /* R1065  - DAC Digital Volume 4L */
+	{ 0x0000042B, 0x0040 },    /* R1067  - Noise Gate Select 4L */
+	{ 0x0000042C, 0x0000 },    /* R1068  - Output Path Config 4R */
+	{ 0x0000042D, 0x0180 },    /* R1069  - DAC Digital Volume 4R */
+	{ 0x0000042F, 0x0080 },    /* R1071  - Noise Gate Select 4R */
+	{ 0x00000430, 0x0000 },    /* R1072  - Output Path Config 5L */
+	{ 0x00000431, 0x0180 },    /* R1073  - DAC Digital Volume 5L */
+	{ 0x00000433, 0x0100 },    /* R1075  - Noise Gate Select 5L */
+	{ 0x00000434, 0x0000 },    /* R1076  - Output Path Config 5R */
+	{ 0x00000435, 0x0180 },    /* R1077  - DAC Digital Volume 5R */
+	{ 0x00000437, 0x0200 },    /* R1079  - Noise Gate Select 5R */
+	{ 0x00000440, 0x8FFF },    /* R1088  - DRE Enable */
+	{ 0x00000441, 0xC759 },    /* R1089  - DRE Control 1 */
+	{ 0x00000442, 0x2A08 },    /* R1089  - DRE Control 2 */
+	{ 0x00000443, 0x5CFA },    /* R1089  - DRE Control 3 */
+	{ 0x00000448, 0x03EF },    /* R1096  - EDRE Enable */
+	{ 0x00000450, 0x0000 },    /* R1104  - DAC AEC Control 1 */
+	{ 0x00000451, 0x0000 },    /* R1105  - DAC AEC Control 2 */
+	{ 0x00000458, 0x0000 },    /* R1112  - Noise Gate Control */
+	{ 0x00000490, 0x0069 },    /* R1168  - PDM SPK1 CTRL 1 */
+	{ 0x00000491, 0x0000 },    /* R1169  - PDM SPK1 CTRL 2 */
+	{ 0x0000049A, 0x0000 },    /* R1178  - HP_TEST_CTRL_13 */
+	{ 0x00000500, 0x000C },    /* R1280  - AIF1 BCLK Ctrl */
+	{ 0x00000501, 0x0008 },    /* R1281  - AIF1 Tx Pin Ctrl */
+	{ 0x00000502, 0x0000 },    /* R1282  - AIF1 Rx Pin Ctrl */
+	{ 0x00000503, 0x0000 },    /* R1283  - AIF1 Rate Ctrl */
+	{ 0x00000504, 0x0000 },    /* R1284  - AIF1 Format */
+	{ 0x00000506, 0x0040 },    /* R1286  - AIF1 Rx BCLK Rate */
+	{ 0x00000507, 0x1818 },    /* R1287  - AIF1 Frame Ctrl 1 */
+	{ 0x00000508, 0x1818 },    /* R1288  - AIF1 Frame Ctrl 2 */
+	{ 0x00000509, 0x0000 },    /* R1289  - AIF1 Frame Ctrl 3 */
+	{ 0x0000050A, 0x0001 },    /* R1290  - AIF1 Frame Ctrl 4 */
+	{ 0x0000050B, 0x0002 },    /* R1291  - AIF1 Frame Ctrl 5 */
+	{ 0x0000050C, 0x0003 },    /* R1292  - AIF1 Frame Ctrl 6 */
+	{ 0x0000050D, 0x0004 },    /* R1293  - AIF1 Frame Ctrl 7 */
+	{ 0x0000050E, 0x0005 },    /* R1294  - AIF1 Frame Ctrl 8 */
+	{ 0x00000511, 0x0000 },    /* R1297  - AIF1 Frame Ctrl 11 */
+	{ 0x00000512, 0x0001 },    /* R1298  - AIF1 Frame Ctrl 12 */
+	{ 0x00000513, 0x0002 },    /* R1299  - AIF1 Frame Ctrl 13 */
+	{ 0x00000514, 0x0003 },    /* R1300  - AIF1 Frame Ctrl 14 */
+	{ 0x00000515, 0x0004 },    /* R1301  - AIF1 Frame Ctrl 15 */
+	{ 0x00000516, 0x0005 },    /* R1302  - AIF1 Frame Ctrl 16 */
+	{ 0x00000519, 0x0000 },    /* R1305  - AIF1 Tx Enables */
+	{ 0x0000051A, 0x0000 },    /* R1306  - AIF1 Rx Enables */
+	{ 0x00000540, 0x000C },    /* R1344  - AIF2 BCLK Ctrl */
+	{ 0x00000541, 0x0008 },    /* R1345  - AIF2 Tx Pin Ctrl */
+	{ 0x00000542, 0x0000 },    /* R1346  - AIF2 Rx Pin Ctrl */
+	{ 0x00000543, 0x0000 },    /* R1347  - AIF2 Rate Ctrl */
+	{ 0x00000544, 0x0000 },    /* R1348  - AIF2 Format */
+	{ 0x00000546, 0x0040 },    /* R1350  - AIF2 Rx BCLK Rate */
+	{ 0x00000547, 0x1818 },    /* R1351  - AIF2 Frame Ctrl 1 */
+	{ 0x00000548, 0x1818 },    /* R1352  - AIF2 Frame Ctrl 2 */
+	{ 0x00000549, 0x0000 },    /* R1353  - AIF2 Frame Ctrl 3 */
+	{ 0x0000054A, 0x0001 },    /* R1354  - AIF2 Frame Ctrl 4 */
+	{ 0x0000054B, 0x0002 },    /* R1355  - AIF2 Frame Ctrl 5 */
+	{ 0x0000054C, 0x0003 },    /* R1356  - AIF2 Frame Ctrl 6 */
+	{ 0x0000054D, 0x0004 },    /* R1357  - AIF2 Frame Ctrl 7 */
+	{ 0x0000054E, 0x0005 },    /* R1358  - AIF2 Frame Ctrl 8 */
+	{ 0x00000551, 0x0000 },    /* R1361  - AIF2 Frame Ctrl 11 */
+	{ 0x00000552, 0x0001 },    /* R1362  - AIF2 Frame Ctrl 12 */
+	{ 0x00000553, 0x0002 },    /* R1363  - AIF2 Frame Ctrl 13 */
+	{ 0x00000554, 0x0003 },    /* R1364  - AIF2 Frame Ctrl 14 */
+	{ 0x00000555, 0x0004 },    /* R1365  - AIF2 Frame Ctrl 15 */
+	{ 0x00000556, 0x0005 },    /* R1366  - AIF2 Frame Ctrl 16 */
+	{ 0x00000559, 0x0000 },    /* R1369  - AIF2 Tx Enables */
+	{ 0x0000055A, 0x0000 },    /* R1370  - AIF2 Rx Enables */
+	{ 0x00000580, 0x000C },    /* R1408  - AIF3 BCLK Ctrl */
+	{ 0x00000581, 0x0008 },    /* R1409  - AIF3 Tx Pin Ctrl */
+	{ 0x00000582, 0x0000 },    /* R1410  - AIF3 Rx Pin Ctrl */
+	{ 0x00000583, 0x0000 },    /* R1411  - AIF3 Rate Ctrl */
+	{ 0x00000584, 0x0000 },    /* R1412  - AIF3 Format */
+	{ 0x00000586, 0x0040 },    /* R1414  - AIF3 Rx BCLK Rate */
+	{ 0x00000587, 0x1818 },    /* R1415  - AIF3 Frame Ctrl 1 */
+	{ 0x00000588, 0x1818 },    /* R1416  - AIF3 Frame Ctrl 2 */
+	{ 0x00000589, 0x0000 },    /* R1417  - AIF3 Frame Ctrl 3 */
+	{ 0x0000058A, 0x0001 },    /* R1418  - AIF3 Frame Ctrl 4 */
+	{ 0x00000591, 0x0000 },    /* R1425  - AIF3 Frame Ctrl 11 */
+	{ 0x00000592, 0x0001 },    /* R1426  - AIF3 Frame Ctrl 12 */
+	{ 0x00000599, 0x0000 },    /* R1433  - AIF3 Tx Enables */
+	{ 0x0000059A, 0x0000 },    /* R1434  - AIF3 Rx Enables */
+	{ 0x000005C2, 0x0000 },    /* R1474  - SPD1 TX Control */
+	{ 0x000005C3, 0x0000 },    /* R1475  - SPD1 TX Channel Status 1 */
+	{ 0x000005C4, 0x0B01 },    /* R1476  - SPD1 TX Channel Status 2 */
+	{ 0x000005C5, 0x0000 },    /* R1477  - SPD1 TX Channel Status 3 */
+	{ 0x000005E3, 0x0004 },    /* R1507  - SLIMbus Framer Ref Gear */
+	{ 0x000005E5, 0x0000 },    /* R1509  - SLIMbus Rates 1 */
+	{ 0x000005E6, 0x0000 },    /* R1510  - SLIMbus Rates 2 */
+	{ 0x000005E9, 0x0000 },    /* R1513  - SLIMbus Rates 5 */
+	{ 0x000005EA, 0x0000 },    /* R1514  - SLIMbus Rates 6 */
+	{ 0x000005EB, 0x0000 },    /* R1515  - SLIMbus Rates 7 */
+	{ 0x000005F5, 0x0000 },    /* R1525  - SLIMbus RX Channel Enable */
+	{ 0x000005F6, 0x0000 },    /* R1526  - SLIMbus TX Channel Enable */
+	{ 0x00000640, 0x0000 },    /* R1600  - PWM1MIX Input 1 Source */
+	{ 0x00000641, 0x0080 },    /* R1601  - PWM1MIX Input 1 Volume */
+	{ 0x00000642, 0x0000 },    /* R1602  - PWM1MIX Input 2 Source */
+	{ 0x00000643, 0x0080 },    /* R1603  - PWM1MIX Input 2 Volume */
+	{ 0x00000644, 0x0000 },    /* R1604  - PWM1MIX Input 3 Source */
+	{ 0x00000645, 0x0080 },    /* R1605  - PWM1MIX Input 3 Volume */
+	{ 0x00000646, 0x0000 },    /* R1606  - PWM1MIX Input 4 Source */
+	{ 0x00000647, 0x0080 },    /* R1607  - PWM1MIX Input 4 Volume */
+	{ 0x00000648, 0x0000 },    /* R1608  - PWM2MIX Input 1 Source */
+	{ 0x00000649, 0x0080 },    /* R1609  - PWM2MIX Input 1 Volume */
+	{ 0x0000064A, 0x0000 },    /* R1610  - PWM2MIX Input 2 Source */
+	{ 0x0000064B, 0x0080 },    /* R1611  - PWM2MIX Input 2 Volume */
+	{ 0x0000064C, 0x0000 },    /* R1612  - PWM2MIX Input 3 Source */
+	{ 0x0000064D, 0x0080 },    /* R1613  - PWM2MIX Input 3 Volume */
+	{ 0x0000064E, 0x0000 },    /* R1614  - PWM2MIX Input 4 Source */
+	{ 0x0000064F, 0x0080 },    /* R1615  - PWM2MIX Input 4 Volume */
+	{ 0x00000680, 0x0000 },    /* R1664  - OUT1LMIX Input 1 Source */
+	{ 0x00000681, 0x0080 },    /* R1665  - OUT1LMIX Input 1 Volume */
+	{ 0x00000682, 0x0000 },    /* R1666  - OUT1LMIX Input 2 Source */
+	{ 0x00000683, 0x0080 },    /* R1667  - OUT1LMIX Input 2 Volume */
+	{ 0x00000684, 0x0000 },    /* R1668  - OUT1LMIX Input 3 Source */
+	{ 0x00000685, 0x0080 },    /* R1669  - OUT1LMIX Input 3 Volume */
+	{ 0x00000686, 0x0000 },    /* R1670  - OUT1LMIX Input 4 Source */
+	{ 0x00000687, 0x0080 },    /* R1671  - OUT1LMIX Input 4 Volume */
+	{ 0x00000688, 0x0000 },    /* R1672  - OUT1RMIX Input 1 Source */
+	{ 0x00000689, 0x0080 },    /* R1673  - OUT1RMIX Input 1 Volume */
+	{ 0x0000068A, 0x0000 },    /* R1674  - OUT1RMIX Input 2 Source */
+	{ 0x0000068B, 0x0080 },    /* R1675  - OUT1RMIX Input 2 Volume */
+	{ 0x0000068C, 0x0000 },    /* R1676  - OUT1RMIX Input 3 Source */
+	{ 0x0000068D, 0x0080 },    /* R1677  - OUT1RMIX Input 3 Volume */
+	{ 0x0000068E, 0x0000 },    /* R1678  - OUT1RMIX Input 4 Source */
+	{ 0x0000068F, 0x0080 },    /* R1679  - OUT1RMIX Input 4 Volume */
+	{ 0x00000690, 0x0000 },    /* R1680  - OUT2LMIX Input 1 Source */
+	{ 0x00000691, 0x0080 },    /* R1681  - OUT2LMIX Input 1 Volume */
+	{ 0x00000692, 0x0000 },    /* R1682  - OUT2LMIX Input 2 Source */
+	{ 0x00000693, 0x0080 },    /* R1683  - OUT2LMIX Input 2 Volume */
+	{ 0x00000694, 0x0000 },    /* R1684  - OUT2LMIX Input 3 Source */
+	{ 0x00000695, 0x0080 },    /* R1685  - OUT2LMIX Input 3 Volume */
+	{ 0x00000696, 0x0000 },    /* R1686  - OUT2LMIX Input 4 Source */
+	{ 0x00000697, 0x0080 },    /* R1687  - OUT2LMIX Input 4 Volume */
+	{ 0x00000698, 0x0000 },    /* R1688  - OUT2RMIX Input 1 Source */
+	{ 0x00000699, 0x0080 },    /* R1689  - OUT2RMIX Input 1 Volume */
+	{ 0x0000069A, 0x0000 },    /* R1690  - OUT2RMIX Input 2 Source */
+	{ 0x0000069B, 0x0080 },    /* R1691  - OUT2RMIX Input 2 Volume */
+	{ 0x0000069C, 0x0000 },    /* R1692  - OUT2RMIX Input 3 Source */
+	{ 0x0000069D, 0x0080 },    /* R1693  - OUT2RMIX Input 3 Volume */
+	{ 0x0000069E, 0x0000 },    /* R1694  - OUT2RMIX Input 4 Source */
+	{ 0x0000069F, 0x0080 },    /* R1695  - OUT2RMIX Input 4 Volume */
+	{ 0x000006A0, 0x0000 },    /* R1696  - OUT3LMIX Input 1 Source */
+	{ 0x000006A1, 0x0080 },    /* R1697  - OUT3LMIX Input 1 Volume */
+	{ 0x000006A2, 0x0000 },    /* R1698  - OUT3LMIX Input 2 Source */
+	{ 0x000006A3, 0x0080 },    /* R1699  - OUT3LMIX Input 2 Volume */
+	{ 0x000006A4, 0x0000 },    /* R1700  - OUT3LMIX Input 3 Source */
+	{ 0x000006A5, 0x0080 },    /* R1701  - OUT3LMIX Input 3 Volume */
+	{ 0x000006A6, 0x0000 },    /* R1702  - OUT3LMIX Input 4 Source */
+	{ 0x000006A7, 0x0080 },    /* R1703  - OUT3LMIX Input 4 Volume */
+	{ 0x000006B0, 0x0000 },    /* R1712  - OUT4LMIX Input 1 Source */
+	{ 0x000006B1, 0x0080 },    /* R1713  - OUT4LMIX Input 1 Volume */
+	{ 0x000006B2, 0x0000 },    /* R1714  - OUT4LMIX Input 2 Source */
+	{ 0x000006B3, 0x0080 },    /* R1715  - OUT4LMIX Input 2 Volume */
+	{ 0x000006B4, 0x0000 },    /* R1716  - OUT4LMIX Input 3 Source */
+	{ 0x000006B5, 0x0080 },    /* R1717  - OUT4LMIX Input 3 Volume */
+	{ 0x000006B6, 0x0000 },    /* R1718  - OUT4LMIX Input 4 Source */
+	{ 0x000006B7, 0x0080 },    /* R1719  - OUT4LMIX Input 4 Volume */
+	{ 0x000006B8, 0x0000 },    /* R1720  - OUT4RMIX Input 1 Source */
+	{ 0x000006B9, 0x0080 },    /* R1721  - OUT4RMIX Input 1 Volume */
+	{ 0x000006BA, 0x0000 },    /* R1722  - OUT4RMIX Input 2 Source */
+	{ 0x000006BB, 0x0080 },    /* R1723  - OUT4RMIX Input 2 Volume */
+	{ 0x000006BC, 0x0000 },    /* R1724  - OUT4RMIX Input 3 Source */
+	{ 0x000006BD, 0x0080 },    /* R1725  - OUT4RMIX Input 3 Volume */
+	{ 0x000006BE, 0x0000 },    /* R1726  - OUT4RMIX Input 4 Source */
+	{ 0x000006BF, 0x0080 },    /* R1727  - OUT4RMIX Input 4 Volume */
+	{ 0x000006C0, 0x0000 },    /* R1728  - OUT5LMIX Input 1 Source */
+	{ 0x000006C1, 0x0080 },    /* R1729  - OUT5LMIX Input 1 Volume */
+	{ 0x000006C2, 0x0000 },    /* R1730  - OUT5LMIX Input 2 Source */
+	{ 0x000006C3, 0x0080 },    /* R1731  - OUT5LMIX Input 2 Volume */
+	{ 0x000006C4, 0x0000 },    /* R1732  - OUT5LMIX Input 3 Source */
+	{ 0x000006C5, 0x0080 },    /* R1733  - OUT5LMIX Input 3 Volume */
+	{ 0x000006C6, 0x0000 },    /* R1734  - OUT5LMIX Input 4 Source */
+	{ 0x000006C7, 0x0080 },    /* R1735  - OUT5LMIX Input 4 Volume */
+	{ 0x000006C8, 0x0000 },    /* R1736  - OUT5RMIX Input 1 Source */
+	{ 0x000006C9, 0x0080 },    /* R1737  - OUT5RMIX Input 1 Volume */
+	{ 0x000006CA, 0x0000 },    /* R1738  - OUT5RMIX Input 2 Source */
+	{ 0x000006CB, 0x0080 },    /* R1739  - OUT5RMIX Input 2 Volume */
+	{ 0x000006CC, 0x0000 },    /* R1740  - OUT5RMIX Input 3 Source */
+	{ 0x000006CD, 0x0080 },    /* R1741  - OUT5RMIX Input 3 Volume */
+	{ 0x000006CE, 0x0000 },    /* R1742  - OUT5RMIX Input 4 Source */
+	{ 0x000006CF, 0x0080 },    /* R1743  - OUT5RMIX Input 4 Volume */
+	{ 0x00000700, 0x0000 },    /* R1792  - AIF1TX1MIX Input 1 Source */
+	{ 0x00000701, 0x0080 },    /* R1793  - AIF1TX1MIX Input 1 Volume */
+	{ 0x00000702, 0x0000 },    /* R1794  - AIF1TX1MIX Input 2 Source */
+	{ 0x00000703, 0x0080 },    /* R1795  - AIF1TX1MIX Input 2 Volume */
+	{ 0x00000704, 0x0000 },    /* R1796  - AIF1TX1MIX Input 3 Source */
+	{ 0x00000705, 0x0080 },    /* R1797  - AIF1TX1MIX Input 3 Volume */
+	{ 0x00000706, 0x0000 },    /* R1798  - AIF1TX1MIX Input 4 Source */
+	{ 0x00000707, 0x0080 },    /* R1799  - AIF1TX1MIX Input 4 Volume */
+	{ 0x00000708, 0x0000 },    /* R1800  - AIF1TX2MIX Input 1 Source */
+	{ 0x00000709, 0x0080 },    /* R1801  - AIF1TX2MIX Input 1 Volume */
+	{ 0x0000070A, 0x0000 },    /* R1802  - AIF1TX2MIX Input 2 Source */
+	{ 0x0000070B, 0x0080 },    /* R1803  - AIF1TX2MIX Input 2 Volume */
+	{ 0x0000070C, 0x0000 },    /* R1804  - AIF1TX2MIX Input 3 Source */
+	{ 0x0000070D, 0x0080 },    /* R1805  - AIF1TX2MIX Input 3 Volume */
+	{ 0x0000070E, 0x0000 },    /* R1806  - AIF1TX2MIX Input 4 Source */
+	{ 0x0000070F, 0x0080 },    /* R1807  - AIF1TX2MIX Input 4 Volume */
+	{ 0x00000710, 0x0000 },    /* R1808  - AIF1TX3MIX Input 1 Source */
+	{ 0x00000711, 0x0080 },    /* R1809  - AIF1TX3MIX Input 1 Volume */
+	{ 0x00000712, 0x0000 },    /* R1810  - AIF1TX3MIX Input 2 Source */
+	{ 0x00000713, 0x0080 },    /* R1811  - AIF1TX3MIX Input 2 Volume */
+	{ 0x00000714, 0x0000 },    /* R1812  - AIF1TX3MIX Input 3 Source */
+	{ 0x00000715, 0x0080 },    /* R1813  - AIF1TX3MIX Input 3 Volume */
+	{ 0x00000716, 0x0000 },    /* R1814  - AIF1TX3MIX Input 4 Source */
+	{ 0x00000717, 0x0080 },    /* R1815  - AIF1TX3MIX Input 4 Volume */
+	{ 0x00000718, 0x0000 },    /* R1816  - AIF1TX4MIX Input 1 Source */
+	{ 0x00000719, 0x0080 },    /* R1817  - AIF1TX4MIX Input 1 Volume */
+	{ 0x0000071A, 0x0000 },    /* R1818  - AIF1TX4MIX Input 2 Source */
+	{ 0x0000071B, 0x0080 },    /* R1819  - AIF1TX4MIX Input 2 Volume */
+	{ 0x0000071C, 0x0000 },    /* R1820  - AIF1TX4MIX Input 3 Source */
+	{ 0x0000071D, 0x0080 },    /* R1821  - AIF1TX4MIX Input 3 Volume */
+	{ 0x0000071E, 0x0000 },    /* R1822  - AIF1TX4MIX Input 4 Source */
+	{ 0x0000071F, 0x0080 },    /* R1823  - AIF1TX4MIX Input 4 Volume */
+	{ 0x00000720, 0x0000 },    /* R1824  - AIF1TX5MIX Input 1 Source */
+	{ 0x00000721, 0x0080 },    /* R1825  - AIF1TX5MIX Input 1 Volume */
+	{ 0x00000722, 0x0000 },    /* R1826  - AIF1TX5MIX Input 2 Source */
+	{ 0x00000723, 0x0080 },    /* R1827  - AIF1TX5MIX Input 2 Volume */
+	{ 0x00000724, 0x0000 },    /* R1828  - AIF1TX5MIX Input 3 Source */
+	{ 0x00000725, 0x0080 },    /* R1829  - AIF1TX5MIX Input 3 Volume */
+	{ 0x00000726, 0x0000 },    /* R1830  - AIF1TX5MIX Input 4 Source */
+	{ 0x00000727, 0x0080 },    /* R1831  - AIF1TX5MIX Input 4 Volume */
+	{ 0x00000728, 0x0000 },    /* R1832  - AIF1TX6MIX Input 1 Source */
+	{ 0x00000729, 0x0080 },    /* R1833  - AIF1TX6MIX Input 1 Volume */
+	{ 0x0000072A, 0x0000 },    /* R1834  - AIF1TX6MIX Input 2 Source */
+	{ 0x0000072B, 0x0080 },    /* R1835  - AIF1TX6MIX Input 2 Volume */
+	{ 0x0000072C, 0x0000 },    /* R1836  - AIF1TX6MIX Input 3 Source */
+	{ 0x0000072D, 0x0080 },    /* R1837  - AIF1TX6MIX Input 3 Volume */
+	{ 0x0000072E, 0x0000 },    /* R1838  - AIF1TX6MIX Input 4 Source */
+	{ 0x0000072F, 0x0080 },    /* R1839  - AIF1TX6MIX Input 4 Volume */
+	{ 0x00000740, 0x0000 },    /* R1856  - AIF2TX1MIX Input 1 Source */
+	{ 0x00000741, 0x0080 },    /* R1857  - AIF2TX1MIX Input 1 Volume */
+	{ 0x00000742, 0x0000 },    /* R1858  - AIF2TX1MIX Input 2 Source */
+	{ 0x00000743, 0x0080 },    /* R1859  - AIF2TX1MIX Input 2 Volume */
+	{ 0x00000744, 0x0000 },    /* R1860  - AIF2TX1MIX Input 3 Source */
+	{ 0x00000745, 0x0080 },    /* R1861  - AIF2TX1MIX Input 3 Volume */
+	{ 0x00000746, 0x0000 },    /* R1862  - AIF2TX1MIX Input 4 Source */
+	{ 0x00000747, 0x0080 },    /* R1863  - AIF2TX1MIX Input 4 Volume */
+	{ 0x00000748, 0x0000 },    /* R1864  - AIF2TX2MIX Input 1 Source */
+	{ 0x00000749, 0x0080 },    /* R1865  - AIF2TX2MIX Input 1 Volume */
+	{ 0x0000074A, 0x0000 },    /* R1866  - AIF2TX2MIX Input 2 Source */
+	{ 0x0000074B, 0x0080 },    /* R1867  - AIF2TX2MIX Input 2 Volume */
+	{ 0x0000074C, 0x0000 },    /* R1868  - AIF2TX2MIX Input 3 Source */
+	{ 0x0000074D, 0x0080 },    /* R1869  - AIF2TX2MIX Input 3 Volume */
+	{ 0x0000074E, 0x0000 },    /* R1870  - AIF2TX2MIX Input 4 Source */
+	{ 0x0000074F, 0x0080 },    /* R1871  - AIF2TX2MIX Input 4 Volume */
+	{ 0x00000750, 0x0000 },    /* R1872  - AIF2TX3MIX Input 1 Source */
+	{ 0x00000751, 0x0080 },    /* R1873  - AIF2TX3MIX Input 1 Volume */
+	{ 0x00000752, 0x0000 },    /* R1874  - AIF2TX3MIX Input 2 Source */
+	{ 0x00000753, 0x0080 },    /* R1875  - AIF2TX3MIX Input 2 Volume */
+	{ 0x00000754, 0x0000 },    /* R1876  - AIF2TX3MIX Input 3 Source */
+	{ 0x00000755, 0x0080 },    /* R1877  - AIF2TX3MIX Input 3 Volume */
+	{ 0x00000756, 0x0000 },    /* R1878  - AIF2TX3MIX Input 4 Source */
+	{ 0x00000757, 0x0080 },    /* R1879  - AIF2TX3MIX Input 4 Volume */
+	{ 0x00000758, 0x0000 },    /* R1880  - AIF2TX4MIX Input 1 Source */
+	{ 0x00000759, 0x0080 },    /* R1881  - AIF2TX4MIX Input 1 Volume */
+	{ 0x0000075A, 0x0000 },    /* R1882  - AIF2TX4MIX Input 2 Source */
+	{ 0x0000075B, 0x0080 },    /* R1883  - AIF2TX4MIX Input 2 Volume */
+	{ 0x0000075C, 0x0000 },    /* R1884  - AIF2TX4MIX Input 3 Source */
+	{ 0x0000075D, 0x0080 },    /* R1885  - AIF2TX4MIX Input 3 Volume */
+	{ 0x0000075E, 0x0000 },    /* R1886  - AIF2TX4MIX Input 4 Source */
+	{ 0x0000075F, 0x0080 },    /* R1887  - AIF2TX4MIX Input 4 Volume */
+	{ 0x00000760, 0x0000 },    /* R1888  - AIF2TX5MIX Input 1 Source */
+	{ 0x00000761, 0x0080 },    /* R1889  - AIF2TX5MIX Input 1 Volume */
+	{ 0x00000762, 0x0000 },    /* R1890  - AIF2TX5MIX Input 2 Source */
+	{ 0x00000763, 0x0080 },    /* R1891  - AIF2TX5MIX Input 2 Volume */
+	{ 0x00000764, 0x0000 },    /* R1892  - AIF2TX5MIX Input 3 Source */
+	{ 0x00000765, 0x0080 },    /* R1893  - AIF2TX5MIX Input 3 Volume */
+	{ 0x00000766, 0x0000 },    /* R1894  - AIF2TX5MIX Input 4 Source */
+	{ 0x00000767, 0x0080 },    /* R1895  - AIF2TX5MIX Input 4 Volume */
+	{ 0x00000768, 0x0000 },    /* R1896  - AIF2TX6MIX Input 1 Source */
+	{ 0x00000769, 0x0080 },    /* R1897  - AIF2TX6MIX Input 1 Volume */
+	{ 0x0000076A, 0x0000 },    /* R1898  - AIF2TX6MIX Input 2 Source */
+	{ 0x0000076B, 0x0080 },    /* R1899  - AIF2TX6MIX Input 2 Volume */
+	{ 0x0000076C, 0x0000 },    /* R1900  - AIF2TX6MIX Input 3 Source */
+	{ 0x0000076D, 0x0080 },    /* R1901  - AIF2TX6MIX Input 3 Volume */
+	{ 0x0000076E, 0x0000 },    /* R1902  - AIF2TX6MIX Input 4 Source */
+	{ 0x0000076F, 0x0080 },    /* R1903  - AIF2TX6MIX Input 4 Volume */
+	{ 0x00000780, 0x0000 },    /* R1920  - AIF3TX1MIX Input 1 Source */
+	{ 0x00000781, 0x0080 },    /* R1921  - AIF3TX1MIX Input 1 Volume */
+	{ 0x00000782, 0x0000 },    /* R1922  - AIF3TX1MIX Input 2 Source */
+	{ 0x00000783, 0x0080 },    /* R1923  - AIF3TX1MIX Input 2 Volume */
+	{ 0x00000784, 0x0000 },    /* R1924  - AIF3TX1MIX Input 3 Source */
+	{ 0x00000785, 0x0080 },    /* R1925  - AIF3TX1MIX Input 3 Volume */
+	{ 0x00000786, 0x0000 },    /* R1926  - AIF3TX1MIX Input 4 Source */
+	{ 0x00000787, 0x0080 },    /* R1927  - AIF3TX1MIX Input 4 Volume */
+	{ 0x00000788, 0x0000 },    /* R1928  - AIF3TX2MIX Input 1 Source */
+	{ 0x00000789, 0x0080 },    /* R1929  - AIF3TX2MIX Input 1 Volume */
+	{ 0x0000078A, 0x0000 },    /* R1930  - AIF3TX2MIX Input 2 Source */
+	{ 0x0000078B, 0x0080 },    /* R1931  - AIF3TX2MIX Input 2 Volume */
+	{ 0x0000078C, 0x0000 },    /* R1932  - AIF3TX2MIX Input 3 Source */
+	{ 0x0000078D, 0x0080 },    /* R1933  - AIF3TX2MIX Input 3 Volume */
+	{ 0x0000078E, 0x0000 },    /* R1934  - AIF3TX2MIX Input 4 Source */
+	{ 0x0000078F, 0x0080 },    /* R1935  - AIF3TX2MIX Input 4 Volume */
+	{ 0x000007C0, 0x0000 },    /* R1984  - SLIMTX1MIX Input 1 Source */
+	{ 0x000007C1, 0x0080 },    /* R1985  - SLIMTX1MIX Input 1 Volume */
+	{ 0x000007C8, 0x0000 },    /* R1992  - SLIMTX2MIX Input 1 Source */
+	{ 0x000007C9, 0x0080 },    /* R1993  - SLIMTX2MIX Input 1 Volume */
+	{ 0x000007D0, 0x0000 },    /* R2000  - SLIMTX3MIX Input 1 Source */
+	{ 0x000007D1, 0x0080 },    /* R2001  - SLIMTX3MIX Input 1 Volume */
+	{ 0x000007D8, 0x0000 },    /* R2008  - SLIMTX4MIX Input 1 Source */
+	{ 0x000007D9, 0x0080 },    /* R2009  - SLIMTX4MIX Input 1 Volume */
+	{ 0x000007E0, 0x0000 },    /* R2016  - SLIMTX5MIX Input 1 Source */
+	{ 0x000007E1, 0x0080 },    /* R2017  - SLIMTX5MIX Input 1 Volume */
+	{ 0x000007E8, 0x0000 },    /* R2024  - SLIMTX6MIX Input 1 Source */
+	{ 0x000007E9, 0x0080 },    /* R2025  - SLIMTX6MIX Input 1 Volume */
+	{ 0x00000800, 0x0000 },    /* R2048  - SPDIF1TX1MIX Input 1 Source */
+	{ 0x00000801, 0x0080 },    /* R2049  - SPDIF1TX1MIX Input 1 Volume */
+	{ 0x00000808, 0x0000 },    /* R2056  - SPDIF1TX2MIX Input 1 Source */
+	{ 0x00000809, 0x0080 },    /* R2057  - SPDIF1TX2MIX Input 1 Volume */
+	{ 0x00000880, 0x0000 },    /* R2176  - EQ1MIX Input 1 Source */
+	{ 0x00000881, 0x0080 },    /* R2177  - EQ1MIX Input 1 Volume */
+	{ 0x00000888, 0x0000 },    /* R2184  - EQ2MIX Input 1 Source */
+	{ 0x00000889, 0x0080 },    /* R2185  - EQ2MIX Input 1 Volume */
+	{ 0x00000890, 0x0000 },    /* R2192  - EQ3MIX Input 1 Source */
+	{ 0x00000891, 0x0080 },    /* R2193  - EQ3MIX Input 1 Volume */
+	{ 0x00000898, 0x0000 },    /* R2200  - EQ4MIX Input 1 Source */
+	{ 0x00000899, 0x0080 },    /* R2201  - EQ4MIX Input 1 Volume */
+	{ 0x000008C0, 0x0000 },    /* R2240  - DRC1LMIX Input 1 Source */
+	{ 0x000008C1, 0x0080 },    /* R2241  - DRC1LMIX Input 1 Volume */
+	{ 0x000008C8, 0x0000 },    /* R2248  - DRC1RMIX Input 1 Source */
+	{ 0x000008C9, 0x0080 },    /* R2249  - DRC1RMIX Input 1 Volume */
+	{ 0x00000900, 0x0000 },    /* R2304  - HPLP1MIX Input 1 Source */
+	{ 0x00000901, 0x0080 },    /* R2305  - HPLP1MIX Input 1 Volume */
+	{ 0x00000902, 0x0000 },    /* R2306  - HPLP1MIX Input 2 Source */
+	{ 0x00000903, 0x0080 },    /* R2307  - HPLP1MIX Input 2 Volume */
+	{ 0x00000904, 0x0000 },    /* R2308  - HPLP1MIX Input 3 Source */
+	{ 0x00000905, 0x0080 },    /* R2309  - HPLP1MIX Input 3 Volume */
+	{ 0x00000906, 0x0000 },    /* R2310  - HPLP1MIX Input 4 Source */
+	{ 0x00000907, 0x0080 },    /* R2311  - HPLP1MIX Input 4 Volume */
+	{ 0x00000908, 0x0000 },    /* R2312  - HPLP2MIX Input 1 Source */
+	{ 0x00000909, 0x0080 },    /* R2313  - HPLP2MIX Input 1 Volume */
+	{ 0x0000090A, 0x0000 },    /* R2314  - HPLP2MIX Input 2 Source */
+	{ 0x0000090B, 0x0080 },    /* R2315  - HPLP2MIX Input 2 Volume */
+	{ 0x0000090C, 0x0000 },    /* R2316  - HPLP2MIX Input 3 Source */
+	{ 0x0000090D, 0x0080 },    /* R2317  - HPLP2MIX Input 3 Volume */
+	{ 0x0000090E, 0x0000 },    /* R2318  - HPLP2MIX Input 4 Source */
+	{ 0x0000090F, 0x0080 },    /* R2319  - HPLP2MIX Input 4 Volume */
+	{ 0x00000910, 0x0000 },    /* R2320  - HPLP3MIX Input 1 Source */
+	{ 0x00000911, 0x0080 },    /* R2321  - HPLP3MIX Input 1 Volume */
+	{ 0x00000912, 0x0000 },    /* R2322  - HPLP3MIX Input 2 Source */
+	{ 0x00000913, 0x0080 },    /* R2323  - HPLP3MIX Input 2 Volume */
+	{ 0x00000914, 0x0000 },    /* R2324  - HPLP3MIX Input 3 Source */
+	{ 0x00000915, 0x0080 },    /* R2325  - HPLP3MIX Input 3 Volume */
+	{ 0x00000916, 0x0000 },    /* R2326  - HPLP3MIX Input 4 Source */
+	{ 0x00000917, 0x0080 },    /* R2327  - HPLP3MIX Input 4 Volume */
+	{ 0x00000918, 0x0000 },    /* R2328  - HPLP4MIX Input 1 Source */
+	{ 0x00000919, 0x0080 },    /* R2329  - HPLP4MIX Input 1 Volume */
+	{ 0x0000091A, 0x0000 },    /* R2330  - HPLP4MIX Input 2 Source */
+	{ 0x0000091B, 0x0080 },    /* R2331  - HPLP4MIX Input 2 Volume */
+	{ 0x0000091C, 0x0000 },    /* R2332  - HPLP4MIX Input 3 Source */
+	{ 0x0000091D, 0x0080 },    /* R2333  - HPLP4MIX Input 3 Volume */
+	{ 0x0000091E, 0x0000 },    /* R2334  - HPLP4MIX Input 4 Source */
+	{ 0x0000091F, 0x0080 },    /* R2335  - HPLP4MIX Input 4 Volume */
+	{ 0x00000A80, 0x0000 },    /* R2688  - ASRC1LMIX Input 1 Source */
+	{ 0x00000A88, 0x0000 },    /* R2696  - ASRC1RMIX Input 1 Source */
+	{ 0x00000A90, 0x0000 },    /* R2704  - ASRC2LMIX Input 1 Source */
+	{ 0x00000A98, 0x0000 },    /* R2712  - ASRC2RMIX Input 1 Source */
+	{ 0x00000B00, 0x0000 },    /* R2816  - ISRC1DEC1MIX Input 1 Source */
+	{ 0x00000B08, 0x0000 },    /* R2824  - ISRC1DEC2MIX Input 1 Source */
+	{ 0x00000B10, 0x0000 },    /* R2832  - ISRC1DEC3MIX Input 1 Source */
+	{ 0x00000B18, 0x0000 },    /* R2840  - ISRC1DEC4MIX Input 1 Source */
+	{ 0x00000B20, 0x0000 },    /* R2848  - ISRC1INT1MIX Input 1 Source */
+	{ 0x00000B28, 0x0000 },    /* R2856  - ISRC1INT2MIX Input 1 Source */
+	{ 0x00000B30, 0x0000 },    /* R2864  - ISRC1INT3MIX Input 1 Source */
+	{ 0x00000B38, 0x0000 },    /* R2872  - ISRC1INT4MIX Input 1 Source */
+	{ 0x00000B40, 0x0000 },    /* R2880  - ISRC2DEC1MIX Input 1 Source */
+	{ 0x00000B48, 0x0000 },    /* R2888  - ISRC2DEC2MIX Input 1 Source */
+	{ 0x00000B60, 0x0000 },    /* R2912  - ISRC2INT1MIX Input 1 Source */
+	{ 0x00000B68, 0x0000 },    /* R2920  - ISRC2INT2MIX Input 1 Source */
+	{ 0x00000C00, 0xA101 },    /* R3072  - GPIO1 CTRL */
+	{ 0x00000C01, 0xA101 },    /* R3073  - GPIO2 CTRL */
+	{ 0x00000C02, 0xA101 },    /* R3074  - GPIO3 CTRL */
+	{ 0x00000C03, 0xA101 },    /* R3075  - GPIO4 CTRL */
+	{ 0x00000C04, 0xA101 },    /* R3076  - GPIO5 CTRL */
+	{ 0x00000C0F, 0x0400 },    /* R3087  - IRQ CTRL 1 */
+	{ 0x00000C10, 0x1000 },    /* R3088  - GPIO Debounce Config */
+	{ 0x00000C18, 0x0000 },    /* R3096  - GP Switch 1 */
+	{ 0x00000C20, 0x8002 },    /* R3104  - Misc Pad Ctrl 1 */
+	{ 0x00000C21, 0x8001 },    /* R3105  - Misc Pad Ctrl 2 */
+	{ 0x00000C22, 0x0000 },    /* R3106  - Misc Pad Ctrl 3 */
+	{ 0x00000C23, 0x0000 },    /* R3107  - Misc Pad Ctrl 4 */
+	{ 0x00000C24, 0x0000 },    /* R3108  - Misc Pad Ctrl 5 */
+	{ 0x00000C25, 0x0000 },    /* R3109  - Misc Pad Ctrl 6 */
+	{ 0x00000D08, 0xFFFF },    /* R3336  - Interrupt Status 1 Mask */
+	{ 0x00000D09, 0xFFFF },    /* R3337  - Interrupt Status 2 Mask */
+	{ 0x00000D0A, 0xFFFF },    /* R3338  - Interrupt Status 3 Mask */
+	{ 0x00000D0B, 0xFFFF },    /* R3339  - Interrupt Status 4 Mask */
+	{ 0x00000D0C, 0xFEFF },    /* R3340  - Interrupt Status 5 Mask */
+	{ 0x00000D0F, 0x0000 },    /* R3343  - Interrupt Control */
+	{ 0x00000D18, 0xFFFF },    /* R3352  - IRQ2 Status 1 Mask */
+	{ 0x00000D19, 0xFFFF },    /* R3353  - IRQ2 Status 2 Mask */
+	{ 0x00000D1A, 0xFFFF },    /* R3354  - IRQ2 Status 3 Mask */
+	{ 0x00000D1B, 0xFFFF },    /* R3355  - IRQ2 Status 4 Mask */
+	{ 0x00000D1C, 0xFEFF },    /* R3356  - IRQ2 Status 5 Mask */
+	{ 0x00000D1D, 0xFFFF },    /* R3357  - IRQ2 Status 6 Mask */
+	{ 0x00000D1F, 0x0000 },    /* R3359  - IRQ2 Control */
+	{ 0x00000D53, 0xFFFF },    /* R3411  - AOD IRQ Mask IRQ1 */
+	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
+	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
+	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
+	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
+	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
+	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
+	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
+	{ 0x00000E13, 0x03FE },    /* R3603  - EQ1_4 */
+	{ 0x00000E14, 0x00E0 },    /* R3604  - EQ1_5 */
+	{ 0x00000E15, 0x1EC4 },    /* R3605  - EQ1_6 */
+	{ 0x00000E16, 0xF136 },    /* R3606  - EQ1_7 */
+	{ 0x00000E17, 0x0409 },    /* R3607  - EQ1_8 */
+	{ 0x00000E18, 0x04CC },    /* R3608  - EQ1_9 */
+	{ 0x00000E19, 0x1C9B },    /* R3609  - EQ1_10 */
+	{ 0x00000E1A, 0xF337 },    /* R3610  - EQ1_11 */
+	{ 0x00000E1B, 0x040B },    /* R3611  - EQ1_12 */
+	{ 0x00000E1C, 0x0CBB },    /* R3612  - EQ1_13 */
+	{ 0x00000E1D, 0x16F8 },    /* R3613  - EQ1_14 */
+	{ 0x00000E1E, 0xF7D9 },    /* R3614  - EQ1_15 */
+	{ 0x00000E1F, 0x040A },    /* R3615  - EQ1_16 */
+	{ 0x00000E20, 0x1F14 },    /* R3616  - EQ1_17 */
+	{ 0x00000E21, 0x058C },    /* R3617  - EQ1_18 */
+	{ 0x00000E22, 0x0563 },    /* R3618  - EQ1_19 */
+	{ 0x00000E23, 0x4000 },    /* R3619  - EQ1_20 */
+	{ 0x00000E24, 0x0B75 },    /* R3620  - EQ1_21 */
+	{ 0x00000E26, 0x6318 },    /* R3622  - EQ2_1 */
+	{ 0x00000E27, 0x6300 },    /* R3623  - EQ2_2 */
+	{ 0x00000E28, 0x0FC8 },    /* R3624  - EQ2_3 */
+	{ 0x00000E29, 0x03FE },    /* R3625  - EQ2_4 */
+	{ 0x00000E2A, 0x00E0 },    /* R3626  - EQ2_5 */
+	{ 0x00000E2B, 0x1EC4 },    /* R3627  - EQ2_6 */
+	{ 0x00000E2C, 0xF136 },    /* R3628  - EQ2_7 */
+	{ 0x00000E2D, 0x0409 },    /* R3629  - EQ2_8 */
+	{ 0x00000E2E, 0x04CC },    /* R3630  - EQ2_9 */
+	{ 0x00000E2F, 0x1C9B },    /* R3631  - EQ2_10 */
+	{ 0x00000E30, 0xF337 },    /* R3632  - EQ2_11 */
+	{ 0x00000E31, 0x040B },    /* R3633  - EQ2_12 */
+	{ 0x00000E32, 0x0CBB },    /* R3634  - EQ2_13 */
+	{ 0x00000E33, 0x16F8 },    /* R3635  - EQ2_14 */
+	{ 0x00000E34, 0xF7D9 },    /* R3636  - EQ2_15 */
+	{ 0x00000E35, 0x040A },    /* R3637  - EQ2_16 */
+	{ 0x00000E36, 0x1F14 },    /* R3638  - EQ2_17 */
+	{ 0x00000E37, 0x058C },    /* R3639  - EQ2_18 */
+	{ 0x00000E38, 0x0563 },    /* R3640  - EQ2_19 */
+	{ 0x00000E39, 0x4000 },    /* R3641  - EQ2_20 */
+	{ 0x00000E3A, 0x0B75 },    /* R3642  - EQ2_21 */
+	{ 0x00000E3C, 0x6318 },    /* R3644  - EQ3_1 */
+	{ 0x00000E3D, 0x6300 },    /* R3645  - EQ3_2 */
+	{ 0x00000E3E, 0x0FC8 },    /* R3646  - EQ3_3 */
+	{ 0x00000E3F, 0x03FE },    /* R3647  - EQ3_4 */
+	{ 0x00000E40, 0x00E0 },    /* R3648  - EQ3_5 */
+	{ 0x00000E41, 0x1EC4 },    /* R3649  - EQ3_6 */
+	{ 0x00000E42, 0xF136 },    /* R3650  - EQ3_7 */
+	{ 0x00000E43, 0x0409 },    /* R3651  - EQ3_8 */
+	{ 0x00000E44, 0x04CC },    /* R3652  - EQ3_9 */
+	{ 0x00000E45, 0x1C9B },    /* R3653  - EQ3_10 */
+	{ 0x00000E46, 0xF337 },    /* R3654  - EQ3_11 */
+	{ 0x00000E47, 0x040B },    /* R3655  - EQ3_12 */
+	{ 0x00000E48, 0x0CBB },    /* R3656  - EQ3_13 */
+	{ 0x00000E49, 0x16F8 },    /* R3657  - EQ3_14 */
+	{ 0x00000E4A, 0xF7D9 },    /* R3658  - EQ3_15 */
+	{ 0x00000E4B, 0x040A },    /* R3659  - EQ3_16 */
+	{ 0x00000E4C, 0x1F14 },    /* R3660  - EQ3_17 */
+	{ 0x00000E4D, 0x058C },    /* R3661  - EQ3_18 */
+	{ 0x00000E4E, 0x0563 },    /* R3662  - EQ3_19 */
+	{ 0x00000E4F, 0x4000 },    /* R3663  - EQ3_20 */
+	{ 0x00000E50, 0x0B75 },    /* R3664  - EQ3_21 */
+	{ 0x00000E52, 0x6318 },    /* R3666  - EQ4_1 */
+	{ 0x00000E53, 0x6300 },    /* R3667  - EQ4_2 */
+	{ 0x00000E54, 0x0FC8 },    /* R3668  - EQ4_3 */
+	{ 0x00000E55, 0x03FE },    /* R3669  - EQ4_4 */
+	{ 0x00000E56, 0x00E0 },    /* R3670  - EQ4_5 */
+	{ 0x00000E57, 0x1EC4 },    /* R3671  - EQ4_6 */
+	{ 0x00000E58, 0xF136 },    /* R3672  - EQ4_7 */
+	{ 0x00000E59, 0x0409 },    /* R3673  - EQ4_8 */
+	{ 0x00000E5A, 0x04CC },    /* R3674  - EQ4_9 */
+	{ 0x00000E5B, 0x1C9B },    /* R3675  - EQ4_10 */
+	{ 0x00000E5C, 0xF337 },    /* R3676  - EQ4_11 */
+	{ 0x00000E5D, 0x040B },    /* R3677  - EQ4_12 */
+	{ 0x00000E5E, 0x0CBB },    /* R3678  - EQ4_13 */
+	{ 0x00000E5F, 0x16F8 },    /* R3679  - EQ4_14 */
+	{ 0x00000E60, 0xF7D9 },    /* R3680  - EQ4_15 */
+	{ 0x00000E61, 0x040A },    /* R3681  - EQ4_16 */
+	{ 0x00000E62, 0x1F14 },    /* R3682  - EQ4_17 */
+	{ 0x00000E63, 0x058C },    /* R3683  - EQ4_18 */
+	{ 0x00000E64, 0x0563 },    /* R3684  - EQ4_19 */
+	{ 0x00000E65, 0x4000 },    /* R3685  - EQ4_20 */
+	{ 0x00000E66, 0x0B75 },    /* R3686  - EQ4_21 */
+	{ 0x00000E80, 0x0018 },    /* R3712  - DRC1 ctrl1 */
+	{ 0x00000E81, 0x0933 },    /* R3713  - DRC1 ctrl2 */
+	{ 0x00000E82, 0x0018 },    /* R3714  - DRC1 ctrl3 */
+	{ 0x00000E83, 0x0000 },    /* R3715  - DRC1 ctrl4 */
+	{ 0x00000E84, 0x0000 },    /* R3716  - DRC1 ctrl5 */
+	{ 0x00000EC0, 0x0000 },    /* R3776  - HPLPF1_1 */
+	{ 0x00000EC1, 0x0000 },    /* R3777  - HPLPF1_2 */
+	{ 0x00000EC4, 0x0000 },    /* R3780  - HPLPF2_1 */
+	{ 0x00000EC5, 0x0000 },    /* R3781  - HPLPF2_2 */
+	{ 0x00000EC8, 0x0000 },    /* R3784  - HPLPF3_1 */
+	{ 0x00000EC9, 0x0000 },    /* R3785  - HPLPF3_2 */
+	{ 0x00000ECC, 0x0000 },    /* R3788  - HPLPF4_1 */
+	{ 0x00000ECD, 0x0000 },    /* R3789  - HPLPF4_2 */
+	{ 0x00000EE0, 0x0000 },    /* R3808  - ASRC_ENABLE */
+	{ 0x00000EE2, 0x0000 },    /* R3810  - ASRC_RATE1 */
+	{ 0x00000EE3, 0x4000 },    /* R3811  - ASRC_RATE2 */
+	{ 0x00000EF0, 0x0000 },    /* R3824  - ISRC 1 CTRL 1 */
+	{ 0x00000EF1, 0x0001 },    /* R3825  - ISRC 1 CTRL 2 */
+	{ 0x00000EF2, 0x0000 },    /* R3826  - ISRC 1 CTRL 3 */
+	{ 0x00000EF3, 0x0000 },    /* R3827  - ISRC 2 CTRL 1 */
+	{ 0x00000EF4, 0x0001 },    /* R3828  - ISRC 2 CTRL 2 */
+	{ 0x00000EF5, 0x0000 },    /* R3829  - ISRC 2 CTRL 3 */
+	{ 0x00001700, 0x0000 },    /* R5888  - FRF_COEFF_1 */
+	{ 0x00001701, 0x0000 },    /* R5889  - FRF_COEFF_2 */
+	{ 0x00001702, 0x0000 },    /* R5890  - FRF_COEFF_3 */
+	{ 0x00001703, 0x0000 },    /* R5891  - FRF_COEFF_4 */
+	{ 0x00001704, 0x0000 },    /* R5892  - DAC_COMP_1 */
+	{ 0x00001705, 0x0000 },    /* R5893  - DAC_COMP_2 */
+};
+
+static bool wm8998_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ARIZONA_SOFTWARE_RESET:
+	case ARIZONA_DEVICE_REVISION:
+	case ARIZONA_CTRL_IF_SPI_CFG_1:
+	case ARIZONA_CTRL_IF_I2C1_CFG_1:
+	case ARIZONA_CTRL_IF_I2C1_CFG_2:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+	case ARIZONA_TONE_GENERATOR_1:
+	case ARIZONA_TONE_GENERATOR_2:
+	case ARIZONA_TONE_GENERATOR_3:
+	case ARIZONA_TONE_GENERATOR_4:
+	case ARIZONA_TONE_GENERATOR_5:
+	case ARIZONA_PWM_DRIVE_1:
+	case ARIZONA_PWM_DRIVE_2:
+	case ARIZONA_PWM_DRIVE_3:
+	case ARIZONA_WAKE_CONTROL:
+	case ARIZONA_SEQUENCE_CONTROL:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
+	case ARIZONA_HAPTICS_CONTROL_1:
+	case ARIZONA_HAPTICS_CONTROL_2:
+	case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_1_DURATION:
+	case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_2_DURATION:
+	case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_3_DURATION:
+	case ARIZONA_HAPTICS_STATUS:
+	case ARIZONA_CLOCK_32K_1:
+	case ARIZONA_SYSTEM_CLOCK_1:
+	case ARIZONA_SAMPLE_RATE_1:
+	case ARIZONA_SAMPLE_RATE_2:
+	case ARIZONA_SAMPLE_RATE_3:
+	case ARIZONA_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_SAMPLE_RATE_3_STATUS:
+	case ARIZONA_ASYNC_CLOCK_1:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_OUTPUT_SYSTEM_CLOCK:
+	case ARIZONA_OUTPUT_ASYNC_CLOCK:
+	case ARIZONA_RATE_ESTIMATOR_1:
+	case ARIZONA_RATE_ESTIMATOR_2:
+	case ARIZONA_RATE_ESTIMATOR_3:
+	case ARIZONA_RATE_ESTIMATOR_4:
+	case ARIZONA_RATE_ESTIMATOR_5:
+	case ARIZONA_DYNAMIC_FREQUENCY_SCALING_1:
+	case ARIZONA_FLL1_CONTROL_1:
+	case ARIZONA_FLL1_CONTROL_2:
+	case ARIZONA_FLL1_CONTROL_3:
+	case ARIZONA_FLL1_CONTROL_4:
+	case ARIZONA_FLL1_CONTROL_5:
+	case ARIZONA_FLL1_CONTROL_6:
+	case ARIZONA_FLL1_CONTROL_7:
+	case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+	case ARIZONA_FLL1_NCO_TEST_0:
+	case ARIZONA_FLL1_SYNCHRONISER_1:
+	case ARIZONA_FLL1_SYNCHRONISER_2:
+	case ARIZONA_FLL1_SYNCHRONISER_3:
+	case ARIZONA_FLL1_SYNCHRONISER_4:
+	case ARIZONA_FLL1_SYNCHRONISER_5:
+	case ARIZONA_FLL1_SYNCHRONISER_6:
+	case ARIZONA_FLL1_SYNCHRONISER_7:
+	case ARIZONA_FLL1_SPREAD_SPECTRUM:
+	case ARIZONA_FLL1_GPIO_CLOCK:
+	case ARIZONA_FLL2_CONTROL_1:
+	case ARIZONA_FLL2_CONTROL_2:
+	case ARIZONA_FLL2_CONTROL_3:
+	case ARIZONA_FLL2_CONTROL_4:
+	case ARIZONA_FLL2_CONTROL_5:
+	case ARIZONA_FLL2_CONTROL_6:
+	case ARIZONA_FLL2_CONTROL_7:
+	case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+	case ARIZONA_FLL2_NCO_TEST_0:
+	case ARIZONA_FLL2_SYNCHRONISER_1:
+	case ARIZONA_FLL2_SYNCHRONISER_2:
+	case ARIZONA_FLL2_SYNCHRONISER_3:
+	case ARIZONA_FLL2_SYNCHRONISER_4:
+	case ARIZONA_FLL2_SYNCHRONISER_5:
+	case ARIZONA_FLL2_SYNCHRONISER_6:
+	case ARIZONA_FLL2_SYNCHRONISER_7:
+	case ARIZONA_FLL2_SPREAD_SPECTRUM:
+	case ARIZONA_FLL2_GPIO_CLOCK:
+	case ARIZONA_MIC_CHARGE_PUMP_1:
+	case ARIZONA_LDO1_CONTROL_1:
+	case ARIZONA_LDO1_CONTROL_2:
+	case ARIZONA_LDO2_CONTROL_1:
+	case ARIZONA_MIC_BIAS_CTRL_1:
+	case ARIZONA_MIC_BIAS_CTRL_2:
+	case ARIZONA_MIC_BIAS_CTRL_3:
+	case ARIZONA_ACCESSORY_DETECT_MODE_1:
+	case ARIZONA_HEADPHONE_DETECT_1:
+	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_MICD_CLAMP_CONTROL:
+	case ARIZONA_MIC_DETECT_1:
+	case ARIZONA_MIC_DETECT_2:
+	case ARIZONA_MIC_DETECT_3:
+	case ARIZONA_MIC_DETECT_4:
+	case ARIZONA_MIC_DETECT_LEVEL_1:
+	case ARIZONA_MIC_DETECT_LEVEL_2:
+	case ARIZONA_MIC_DETECT_LEVEL_3:
+	case ARIZONA_MIC_DETECT_LEVEL_4:
+	case ARIZONA_ISOLATION_CONTROL:
+	case ARIZONA_JACK_DETECT_ANALOGUE:
+	case ARIZONA_INPUT_ENABLES:
+	case ARIZONA_INPUT_ENABLES_STATUS:
+	case ARIZONA_INPUT_RATE:
+	case ARIZONA_INPUT_VOLUME_RAMP:
+	case ARIZONA_HPF_CONTROL:
+	case ARIZONA_IN1L_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_1L:
+	case ARIZONA_DMIC1L_CONTROL:
+	case ARIZONA_IN1R_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_1R:
+	case ARIZONA_DMIC1R_CONTROL:
+	case ARIZONA_IN2L_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_2L:
+	case ARIZONA_DMIC2L_CONTROL:
+	case ARIZONA_OUTPUT_ENABLES_1:
+	case ARIZONA_OUTPUT_STATUS_1:
+	case ARIZONA_RAW_OUTPUT_STATUS_1:
+	case ARIZONA_OUTPUT_RATE_1:
+	case ARIZONA_OUTPUT_VOLUME_RAMP:
+	case ARIZONA_OUTPUT_PATH_CONFIG_1L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_1L:
+	case ARIZONA_NOISE_GATE_SELECT_1L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_1R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_1R:
+	case ARIZONA_NOISE_GATE_SELECT_1R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_2L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_2L:
+	case ARIZONA_NOISE_GATE_SELECT_2L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_2R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_2R:
+	case ARIZONA_NOISE_GATE_SELECT_2R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_3L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_3L:
+	case ARIZONA_NOISE_GATE_SELECT_3L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_4L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_4L:
+	case ARIZONA_NOISE_GATE_SELECT_4L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_4R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_4R:
+	case ARIZONA_NOISE_GATE_SELECT_4R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_5L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_5L:
+	case ARIZONA_NOISE_GATE_SELECT_5L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_5R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_5R:
+	case ARIZONA_NOISE_GATE_SELECT_5R:
+	case ARIZONA_DRE_ENABLE:
+	case ARIZONA_DRE_CONTROL_1:
+	case ARIZONA_DRE_CONTROL_2:
+	case ARIZONA_DRE_CONTROL_3:
+	case ARIZONA_EDRE_ENABLE:
+	case ARIZONA_DAC_AEC_CONTROL_1:
+	case ARIZONA_DAC_AEC_CONTROL_2:
+	case ARIZONA_NOISE_GATE_CONTROL:
+	case ARIZONA_PDM_SPK1_CTRL_1:
+	case ARIZONA_PDM_SPK1_CTRL_2:
+	case ARIZONA_HP_TEST_CTRL_13:
+	case ARIZONA_AIF1_BCLK_CTRL:
+	case ARIZONA_AIF1_TX_PIN_CTRL:
+	case ARIZONA_AIF1_RX_PIN_CTRL:
+	case ARIZONA_AIF1_RATE_CTRL:
+	case ARIZONA_AIF1_FORMAT:
+	case ARIZONA_AIF1_RX_BCLK_RATE:
+	case ARIZONA_AIF1_FRAME_CTRL_1:
+	case ARIZONA_AIF1_FRAME_CTRL_2:
+	case ARIZONA_AIF1_FRAME_CTRL_3:
+	case ARIZONA_AIF1_FRAME_CTRL_4:
+	case ARIZONA_AIF1_FRAME_CTRL_5:
+	case ARIZONA_AIF1_FRAME_CTRL_6:
+	case ARIZONA_AIF1_FRAME_CTRL_7:
+	case ARIZONA_AIF1_FRAME_CTRL_8:
+	case ARIZONA_AIF1_FRAME_CTRL_11:
+	case ARIZONA_AIF1_FRAME_CTRL_12:
+	case ARIZONA_AIF1_FRAME_CTRL_13:
+	case ARIZONA_AIF1_FRAME_CTRL_14:
+	case ARIZONA_AIF1_FRAME_CTRL_15:
+	case ARIZONA_AIF1_FRAME_CTRL_16:
+	case ARIZONA_AIF1_TX_ENABLES:
+	case ARIZONA_AIF1_RX_ENABLES:
+	case ARIZONA_AIF2_BCLK_CTRL:
+	case ARIZONA_AIF2_TX_PIN_CTRL:
+	case ARIZONA_AIF2_RX_PIN_CTRL:
+	case ARIZONA_AIF2_RATE_CTRL:
+	case ARIZONA_AIF2_FORMAT:
+	case ARIZONA_AIF2_RX_BCLK_RATE:
+	case ARIZONA_AIF2_FRAME_CTRL_1:
+	case ARIZONA_AIF2_FRAME_CTRL_2:
+	case ARIZONA_AIF2_FRAME_CTRL_3:
+	case ARIZONA_AIF2_FRAME_CTRL_4:
+	case ARIZONA_AIF2_FRAME_CTRL_5:
+	case ARIZONA_AIF2_FRAME_CTRL_6:
+	case ARIZONA_AIF2_FRAME_CTRL_7:
+	case ARIZONA_AIF2_FRAME_CTRL_8:
+	case ARIZONA_AIF2_FRAME_CTRL_11:
+	case ARIZONA_AIF2_FRAME_CTRL_12:
+	case ARIZONA_AIF2_FRAME_CTRL_13:
+	case ARIZONA_AIF2_FRAME_CTRL_14:
+	case ARIZONA_AIF2_FRAME_CTRL_15:
+	case ARIZONA_AIF2_FRAME_CTRL_16:
+	case ARIZONA_AIF2_TX_ENABLES:
+	case ARIZONA_AIF2_RX_ENABLES:
+	case ARIZONA_AIF3_BCLK_CTRL:
+	case ARIZONA_AIF3_TX_PIN_CTRL:
+	case ARIZONA_AIF3_RX_PIN_CTRL:
+	case ARIZONA_AIF3_RATE_CTRL:
+	case ARIZONA_AIF3_FORMAT:
+	case ARIZONA_AIF3_RX_BCLK_RATE:
+	case ARIZONA_AIF3_FRAME_CTRL_1:
+	case ARIZONA_AIF3_FRAME_CTRL_2:
+	case ARIZONA_AIF3_FRAME_CTRL_3:
+	case ARIZONA_AIF3_FRAME_CTRL_4:
+	case ARIZONA_AIF3_FRAME_CTRL_11:
+	case ARIZONA_AIF3_FRAME_CTRL_12:
+	case ARIZONA_AIF3_TX_ENABLES:
+	case ARIZONA_AIF3_RX_ENABLES:
+	case ARIZONA_SPD1_TX_CONTROL:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_1:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_2:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_3:
+	case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
+	case ARIZONA_SLIMBUS_RATES_1:
+	case ARIZONA_SLIMBUS_RATES_2:
+	case ARIZONA_SLIMBUS_RATES_5:
+	case ARIZONA_SLIMBUS_RATES_6:
+	case ARIZONA_SLIMBUS_RATES_7:
+	case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
+	case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
+	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+	case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
+	case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
+	case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
+	case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
+	case ARIZONA_GPIO1_CTRL:
+	case ARIZONA_GPIO2_CTRL:
+	case ARIZONA_GPIO3_CTRL:
+	case ARIZONA_GPIO4_CTRL:
+	case ARIZONA_GPIO5_CTRL:
+	case ARIZONA_IRQ_CTRL_1:
+	case ARIZONA_GPIO_DEBOUNCE_CONFIG:
+	case ARIZONA_GP_SWITCH_1:
+	case ARIZONA_MISC_PAD_CTRL_1:
+	case ARIZONA_MISC_PAD_CTRL_2:
+	case ARIZONA_MISC_PAD_CTRL_3:
+	case ARIZONA_MISC_PAD_CTRL_4:
+	case ARIZONA_MISC_PAD_CTRL_5:
+	case ARIZONA_MISC_PAD_CTRL_6:
+	case ARIZONA_INTERRUPT_STATUS_1:
+	case ARIZONA_INTERRUPT_STATUS_2:
+	case ARIZONA_INTERRUPT_STATUS_3:
+	case ARIZONA_INTERRUPT_STATUS_4:
+	case ARIZONA_INTERRUPT_STATUS_5:
+	case ARIZONA_INTERRUPT_STATUS_1_MASK:
+	case ARIZONA_INTERRUPT_STATUS_2_MASK:
+	case ARIZONA_INTERRUPT_STATUS_3_MASK:
+	case ARIZONA_INTERRUPT_STATUS_4_MASK:
+	case ARIZONA_INTERRUPT_STATUS_5_MASK:
+	case ARIZONA_INTERRUPT_CONTROL:
+	case ARIZONA_IRQ2_STATUS_1:
+	case ARIZONA_IRQ2_STATUS_2:
+	case ARIZONA_IRQ2_STATUS_3:
+	case ARIZONA_IRQ2_STATUS_4:
+	case ARIZONA_IRQ2_STATUS_5:
+	case ARIZONA_IRQ2_STATUS_1_MASK:
+	case ARIZONA_IRQ2_STATUS_2_MASK:
+	case ARIZONA_IRQ2_STATUS_3_MASK:
+	case ARIZONA_IRQ2_STATUS_4_MASK:
+	case ARIZONA_IRQ2_STATUS_5_MASK:
+	case ARIZONA_IRQ2_CONTROL:
+	case ARIZONA_INTERRUPT_RAW_STATUS_2:
+	case ARIZONA_INTERRUPT_RAW_STATUS_3:
+	case ARIZONA_INTERRUPT_RAW_STATUS_4:
+	case ARIZONA_INTERRUPT_RAW_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_6:
+	case ARIZONA_INTERRUPT_RAW_STATUS_7:
+	case ARIZONA_INTERRUPT_RAW_STATUS_8:
+	case ARIZONA_IRQ_PIN_STATUS:
+	case ARIZONA_AOD_WKUP_AND_TRIG:
+	case ARIZONA_AOD_IRQ1:
+	case ARIZONA_AOD_IRQ2:
+	case ARIZONA_AOD_IRQ_MASK_IRQ1:
+	case ARIZONA_AOD_IRQ_MASK_IRQ2:
+	case ARIZONA_AOD_IRQ_RAW_STATUS:
+	case ARIZONA_JACK_DETECT_DEBOUNCE:
+	case ARIZONA_FX_CTRL1:
+	case ARIZONA_FX_CTRL2:
+	case ARIZONA_EQ1_1:
+	case ARIZONA_EQ1_2:
+	case ARIZONA_EQ1_3:
+	case ARIZONA_EQ1_4:
+	case ARIZONA_EQ1_5:
+	case ARIZONA_EQ1_6:
+	case ARIZONA_EQ1_7:
+	case ARIZONA_EQ1_8:
+	case ARIZONA_EQ1_9:
+	case ARIZONA_EQ1_10:
+	case ARIZONA_EQ1_11:
+	case ARIZONA_EQ1_12:
+	case ARIZONA_EQ1_13:
+	case ARIZONA_EQ1_14:
+	case ARIZONA_EQ1_15:
+	case ARIZONA_EQ1_16:
+	case ARIZONA_EQ1_17:
+	case ARIZONA_EQ1_18:
+	case ARIZONA_EQ1_19:
+	case ARIZONA_EQ1_20:
+	case ARIZONA_EQ1_21:
+	case ARIZONA_EQ2_1:
+	case ARIZONA_EQ2_2:
+	case ARIZONA_EQ2_3:
+	case ARIZONA_EQ2_4:
+	case ARIZONA_EQ2_5:
+	case ARIZONA_EQ2_6:
+	case ARIZONA_EQ2_7:
+	case ARIZONA_EQ2_8:
+	case ARIZONA_EQ2_9:
+	case ARIZONA_EQ2_10:
+	case ARIZONA_EQ2_11:
+	case ARIZONA_EQ2_12:
+	case ARIZONA_EQ2_13:
+	case ARIZONA_EQ2_14:
+	case ARIZONA_EQ2_15:
+	case ARIZONA_EQ2_16:
+	case ARIZONA_EQ2_17:
+	case ARIZONA_EQ2_18:
+	case ARIZONA_EQ2_19:
+	case ARIZONA_EQ2_20:
+	case ARIZONA_EQ2_21:
+	case ARIZONA_EQ3_1:
+	case ARIZONA_EQ3_2:
+	case ARIZONA_EQ3_3:
+	case ARIZONA_EQ3_4:
+	case ARIZONA_EQ3_5:
+	case ARIZONA_EQ3_6:
+	case ARIZONA_EQ3_7:
+	case ARIZONA_EQ3_8:
+	case ARIZONA_EQ3_9:
+	case ARIZONA_EQ3_10:
+	case ARIZONA_EQ3_11:
+	case ARIZONA_EQ3_12:
+	case ARIZONA_EQ3_13:
+	case ARIZONA_EQ3_14:
+	case ARIZONA_EQ3_15:
+	case ARIZONA_EQ3_16:
+	case ARIZONA_EQ3_17:
+	case ARIZONA_EQ3_18:
+	case ARIZONA_EQ3_19:
+	case ARIZONA_EQ3_20:
+	case ARIZONA_EQ3_21:
+	case ARIZONA_EQ4_1:
+	case ARIZONA_EQ4_2:
+	case ARIZONA_EQ4_3:
+	case ARIZONA_EQ4_4:
+	case ARIZONA_EQ4_5:
+	case ARIZONA_EQ4_6:
+	case ARIZONA_EQ4_7:
+	case ARIZONA_EQ4_8:
+	case ARIZONA_EQ4_9:
+	case ARIZONA_EQ4_10:
+	case ARIZONA_EQ4_11:
+	case ARIZONA_EQ4_12:
+	case ARIZONA_EQ4_13:
+	case ARIZONA_EQ4_14:
+	case ARIZONA_EQ4_15:
+	case ARIZONA_EQ4_16:
+	case ARIZONA_EQ4_17:
+	case ARIZONA_EQ4_18:
+	case ARIZONA_EQ4_19:
+	case ARIZONA_EQ4_20:
+	case ARIZONA_EQ4_21:
+	case ARIZONA_DRC1_CTRL1:
+	case ARIZONA_DRC1_CTRL2:
+	case ARIZONA_DRC1_CTRL3:
+	case ARIZONA_DRC1_CTRL4:
+	case ARIZONA_DRC1_CTRL5:
+	case ARIZONA_HPLPF1_1:
+	case ARIZONA_HPLPF1_2:
+	case ARIZONA_HPLPF2_1:
+	case ARIZONA_HPLPF2_2:
+	case ARIZONA_HPLPF3_1:
+	case ARIZONA_HPLPF3_2:
+	case ARIZONA_HPLPF4_1:
+	case ARIZONA_HPLPF4_2:
+	case ARIZONA_ASRC_ENABLE:
+	case ARIZONA_ASRC_STATUS:
+	case ARIZONA_ASRC_RATE1:
+	case ARIZONA_ASRC_RATE2:
+	case ARIZONA_ISRC_1_CTRL_1:
+	case ARIZONA_ISRC_1_CTRL_2:
+	case ARIZONA_ISRC_1_CTRL_3:
+	case ARIZONA_ISRC_2_CTRL_1:
+	case ARIZONA_ISRC_2_CTRL_2:
+	case ARIZONA_ISRC_2_CTRL_3:
+	case ARIZONA_FRF_COEFF_1:
+	case ARIZONA_FRF_COEFF_2:
+	case ARIZONA_FRF_COEFF_3:
+	case ARIZONA_FRF_COEFF_4:
+	case ARIZONA_V2_DAC_COMP_1:
+	case ARIZONA_V2_DAC_COMP_2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool wm8998_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ARIZONA_SOFTWARE_RESET:
+	case ARIZONA_DEVICE_REVISION:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+	case ARIZONA_HAPTICS_STATUS:
+	case ARIZONA_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_SAMPLE_RATE_3_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_MIC_DETECT_3:
+	case ARIZONA_MIC_DETECT_4:
+	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_INPUT_ENABLES_STATUS:
+	case ARIZONA_OUTPUT_STATUS_1:
+	case ARIZONA_RAW_OUTPUT_STATUS_1:
+	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+	case ARIZONA_INTERRUPT_STATUS_1:
+	case ARIZONA_INTERRUPT_STATUS_2:
+	case ARIZONA_INTERRUPT_STATUS_3:
+	case ARIZONA_INTERRUPT_STATUS_4:
+	case ARIZONA_INTERRUPT_STATUS_5:
+	case ARIZONA_IRQ2_STATUS_1:
+	case ARIZONA_IRQ2_STATUS_2:
+	case ARIZONA_IRQ2_STATUS_3:
+	case ARIZONA_IRQ2_STATUS_4:
+	case ARIZONA_IRQ2_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_2:
+	case ARIZONA_INTERRUPT_RAW_STATUS_3:
+	case ARIZONA_INTERRUPT_RAW_STATUS_4:
+	case ARIZONA_INTERRUPT_RAW_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_6:
+	case ARIZONA_INTERRUPT_RAW_STATUS_7:
+	case ARIZONA_INTERRUPT_RAW_STATUS_8:
+	case ARIZONA_IRQ_PIN_STATUS:
+	case ARIZONA_AOD_WKUP_AND_TRIG:
+	case ARIZONA_AOD_IRQ1:
+	case ARIZONA_AOD_IRQ2:
+	case ARIZONA_AOD_IRQ_RAW_STATUS:
+	case ARIZONA_FX_CTRL2:
+	case ARIZONA_ASRC_STATUS:
+		return true;
+	default:
+		return false;
+	}
+}
+
+#define WM8998_MAX_REGISTER 0x31ff
+
+const struct regmap_config wm8998_i2c_regmap = {
+	.reg_bits = 32,
+	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
+
+	.max_register = WM8998_MAX_REGISTER,
+	.readable_reg = wm8998_readable_register,
+	.volatile_reg = wm8998_volatile_register,
+
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = wm8998_reg_default,
+	.num_reg_defaults = ARRAY_SIZE(wm8998_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm8998_i2c_regmap);
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index b6db9eb..8756d06 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -11,11 +11,16 @@
 	bool
 	default n
 
+config CXL_EEH
+	bool
+	default n
+
 config CXL
 	tristate "Support for IBM Coherent Accelerators (CXL)"
-	depends on PPC_POWERNV && PCI_MSI
+	depends on PPC_POWERNV && PCI_MSI && EEH
 	select CXL_BASE
 	select CXL_KERNEL_API
+	select CXL_EEH
 	default m
 	help
 	  Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 14e3f82..6f484df 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,3 +1,5 @@
+ccflags-y := -Werror
+
 cxl-y				+= main.o file.o irq.o fault.o native.o
 cxl-y				+= context.o sysfs.o debugfs.o pci.o trace.o
 cxl-y				+= vphb.o api.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 729e085..8af12c8 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -12,11 +12,13 @@
 #include <linux/anon_inodes.h>
 #include <linux/file.h>
 #include <misc/cxl.h>
+#include <linux/fs.h>
 
 #include "cxl.h"
 
 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 {
+	struct address_space *mapping;
 	struct cxl_afu *afu;
 	struct cxl_context  *ctx;
 	int rc;
@@ -25,19 +27,42 @@
 
 	get_device(&afu->dev);
 	ctx = cxl_context_alloc();
-	if (IS_ERR(ctx))
-		return ctx;
+	if (IS_ERR(ctx)) {
+		rc = PTR_ERR(ctx);
+		goto err_dev;
+	}
+
+	ctx->kernelapi = true;
+
+	/*
+	 * Make our own address space since we won't have one from the
+	 * filesystem like the user api has, and even if we do associate a file
+	 * with this context we don't want to use the global anonymous inode's
+	 * address space as that can invalidate unrelated users:
+	 */
+	mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
+	if (!mapping) {
+		rc = -ENOMEM;
+		goto err_ctx;
+	}
+	address_space_init_once(mapping);
 
 	/* Make it a slave context.  We can promote it later? */
-	rc = cxl_context_init(ctx, afu, false, NULL);
-	if (rc) {
-		kfree(ctx);
-		put_device(&afu->dev);
-		return ERR_PTR(-ENOMEM);
-	}
+	rc = cxl_context_init(ctx, afu, false, mapping);
+	if (rc)
+		goto err_mapping;
+
 	cxl_assign_psn_space(ctx);
 
 	return ctx;
+
+err_mapping:
+	kfree(mapping);
+err_ctx:
+	kfree(ctx);
+err_dev:
+	put_device(&afu->dev);
+	return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
 
@@ -59,7 +84,7 @@
 
 int cxl_release_context(struct cxl_context *ctx)
 {
-	if (ctx->status != CLOSED)
+	if (ctx->status >= STARTED)
 		return -EBUSY;
 
 	put_device(&ctx->afu->dev);
@@ -255,9 +280,16 @@
 
 	file = anon_inode_getfile("cxl", fops, ctx, flags);
 	if (IS_ERR(file))
-		put_unused_fd(fdtmp);
+		goto err_fd;
+
+	file->f_mapping = ctx->mapping;
+
 	*fd = fdtmp;
 	return file;
+
+err_fd:
+	put_unused_fd(fdtmp);
+	return NULL;
 }
 EXPORT_SYMBOL_GPL(cxl_get_fd);
 
@@ -327,3 +359,10 @@
 	return cxl_afu_check_and_enable(afu);
 }
 EXPORT_SYMBOL_GPL(cxl_afu_reset);
+
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+				  bool perst_reloads_same_image)
+{
+	afu->adapter->perst_same_image = perst_reloads_same_image;
+}
+EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 1287148..e762f85 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -126,6 +126,18 @@
 	if (ctx->status != STARTED) {
 		mutex_unlock(&ctx->status_mutex);
 		pr_devel("%s: Context not started, failing problem state access\n", __func__);
+		if (ctx->mmio_err_ff) {
+			if (!ctx->ff_page) {
+				ctx->ff_page = alloc_page(GFP_USER);
+				if (!ctx->ff_page)
+					return VM_FAULT_OOM;
+				memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
+			}
+			get_page(ctx->ff_page);
+			vmf->page = ctx->ff_page;
+			vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+			return 0;
+		}
 		return VM_FAULT_SIGBUS;
 	}
 
@@ -193,7 +205,11 @@
 	if (status != STARTED)
 		return -EBUSY;
 
-	WARN_ON(cxl_detach_process(ctx));
+	/* Only warn if we detached while the link was OK.
+	 * If detach fails when hw is down, we don't care.
+	 */
+	WARN_ON(cxl_detach_process(ctx) &&
+		cxl_adapter_link_ok(ctx->afu->adapter));
 	flush_work(&ctx->fault_work); /* Only needed for dedicated process */
 	put_pid(ctx->pid);
 	cxl_ctx_put();
@@ -253,7 +269,11 @@
 	struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
 
 	free_page((u64)ctx->sstp);
+	if (ctx->ff_page)
+		__free_page(ctx->ff_page);
 	ctx->sstp = NULL;
+	if (ctx->kernelapi)
+		kfree(ctx->mapping);
 
 	kfree(ctx);
 }
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 4fd66ca..1c30ef7 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -34,7 +34,7 @@
  * Bump version each time a user API change is made, whether it is
  * backwards compatible ot not.
  */
-#define CXL_API_VERSION 1
+#define CXL_API_VERSION 2
 #define CXL_API_VERSION_COMPATIBLE 1
 
 /*
@@ -83,8 +83,10 @@
 /* 0x00C0:7EFF Implementation dependent area */
 static const cxl_p1_reg_t CXL_PSL_FIR1      = {0x0100};
 static const cxl_p1_reg_t CXL_PSL_FIR2      = {0x0108};
+static const cxl_p1_reg_t CXL_PSL_Timebase  = {0x0110};
 static const cxl_p1_reg_t CXL_PSL_VERSION   = {0x0118};
 static const cxl_p1_reg_t CXL_PSL_RESLCKTO  = {0x0128};
+static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140};
 static const cxl_p1_reg_t CXL_PSL_FIR_CNTL  = {0x0148};
 static const cxl_p1_reg_t CXL_PSL_DSNDCTL   = {0x0150};
 static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
@@ -152,6 +154,9 @@
 #define CXL_PSL_SPAP_Size_Shift 4
 #define CXL_PSL_SPAP_V    0x0000000000000001ULL
 
+/****** CXL_PSL_Control ****************************************************/
+#define CXL_PSL_Control_tb 0x0000000000000001ULL
+
 /****** CXL_PSL_DLCNTL *****************************************************/
 #define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
 #define CXL_PSL_DLCNTL_C (0x1ull << (63-29))
@@ -418,6 +423,9 @@
 	/* Used to unmap any mmaps when force detaching */
 	struct address_space *mapping;
 	struct mutex mapping_lock;
+	struct page *ff_page;
+	bool mmio_err_ff;
+	bool kernelapi;
 
 	spinlock_t sste_lock; /* Protects segment table entries */
 	struct cxl_sste *sstp;
@@ -493,6 +501,7 @@
 	bool user_image_loaded;
 	bool perst_loads_image;
 	bool perst_select_user;
+	bool perst_same_image;
 };
 
 int cxl_alloc_one_irq(struct cxl *adapter);
@@ -531,16 +540,33 @@
 	__be32 software_state;
 } __packed;
 
+static inline bool cxl_adapter_link_ok(struct cxl *cxl)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(cxl->dev.parent);
+	return !pci_channel_offline(pdev);
+}
+
 static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
 {
 	WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
 	return cxl->p1_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p1_write(cxl, reg, val) \
-	out_be64(_cxl_p1_addr(cxl, reg), val)
-#define cxl_p1_read(cxl, reg) \
-	in_be64(_cxl_p1_addr(cxl, reg))
+static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(cxl)))
+		out_be64(_cxl_p1_addr(cxl, reg), val);
+}
+
+static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(cxl)))
+		return in_be64(_cxl_p1_addr(cxl, reg));
+	else
+		return ~0ULL;
+}
 
 static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
 {
@@ -548,26 +574,56 @@
 	return afu->p1n_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p1n_write(afu, reg, val) \
-	out_be64(_cxl_p1n_addr(afu, reg), val)
-#define cxl_p1n_read(afu, reg) \
-	in_be64(_cxl_p1n_addr(afu, reg))
+static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		out_be64(_cxl_p1n_addr(afu, reg), val);
+}
+
+static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_be64(_cxl_p1n_addr(afu, reg));
+	else
+		return ~0ULL;
+}
 
 static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg)
 {
 	return afu->p2n_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p2n_write(afu, reg, val) \
-	out_be64(_cxl_p2n_addr(afu, reg), val)
-#define cxl_p2n_read(afu, reg) \
-	in_be64(_cxl_p2n_addr(afu, reg))
+static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		out_be64(_cxl_p2n_addr(afu, reg), val);
+}
 
+static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_be64(_cxl_p2n_addr(afu, reg));
+	else
+		return ~0ULL;
+}
 
-#define cxl_afu_cr_read64(afu, cr, off) \
-	in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
-#define cxl_afu_cr_read32(afu, cr, off) \
-	in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
+static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset +
+			       ((cr) * (afu)->crs_len) + (off));
+	else
+		return ~0ULL;
+}
+
+static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset +
+			       ((cr) * (afu)->crs_len) + (off));
+	else
+		return 0xffffffff;
+}
 u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
 u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
 
@@ -585,6 +641,9 @@
 int cxl_alloc_adapter_nr(struct cxl *adapter);
 void cxl_remove_adapter_nr(struct cxl *adapter);
 
+int cxl_alloc_spa(struct cxl_afu *afu);
+void cxl_release_spa(struct cxl_afu *afu);
+
 int cxl_file_init(void);
 void cxl_file_exit(void);
 int cxl_register_adapter(struct cxl *adapter);
@@ -675,6 +734,7 @@
 
 void cxl_stop_trace(struct cxl *cxl);
 int cxl_pci_vphb_add(struct cxl_afu *afu);
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu);
 void cxl_pci_vphb_remove(struct cxl_afu *afu);
 
 extern struct pci_driver cxl_pci_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 825c412..18df6f4 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -48,7 +48,7 @@
 static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
 					    struct dentry *parent, u64 __iomem *value)
 {
-	return debugfs_create_file(name, mode, parent, (void *)value, &fops_io_x64);
+	return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
 }
 
 int cxl_debugfs_adapter_add(struct cxl *adapter)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e3f4b69..a30bf28 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -73,6 +73,11 @@
 	if (!afu->current_mode)
 		goto err_put_afu;
 
+	if (!cxl_adapter_link_ok(adapter)) {
+		rc = -EIO;
+		goto err_put_afu;
+	}
+
 	if (!(ctx = cxl_context_alloc())) {
 		rc = -ENOMEM;
 		goto err_put_afu;
@@ -179,6 +184,8 @@
 	if (work.flags & CXL_START_WORK_AMR)
 		amr = work.amr & mfspr(SPRN_UAMOR);
 
+	ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
+
 	/*
 	 * We grab the PID here and not in the file open to allow for the case
 	 * where a process (master, some daemon, etc) has opened the chardev on
@@ -238,6 +245,9 @@
 	if (ctx->status == CLOSED)
 		return -EIO;
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	pr_devel("afu_ioctl\n");
 	switch (cmd) {
 	case CXL_IOCTL_START_WORK:
@@ -251,7 +261,7 @@
 	return -EINVAL;
 }
 
-long afu_compat_ioctl(struct file *file, unsigned int cmd,
+static long afu_compat_ioctl(struct file *file, unsigned int cmd,
 			     unsigned long arg)
 {
 	return afu_ioctl(file, cmd, arg);
@@ -265,6 +275,9 @@
 	if (ctx->status != STARTED)
 		return -EIO;
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	return cxl_context_iomap(ctx, vm);
 }
 
@@ -309,6 +322,9 @@
 	int rc;
 	DEFINE_WAIT(wait);
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	if (count < CXL_READ_MIN_SIZE)
 		return -EINVAL;
 
@@ -319,6 +335,11 @@
 		if (ctx_event_pending(ctx))
 			break;
 
+		if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+			rc = -EIO;
+			goto out;
+		}
+
 		if (file->f_flags & O_NONBLOCK) {
 			rc = -EAGAIN;
 			goto out;
@@ -396,7 +417,7 @@
 	.mmap           = afu_mmap,
 };
 
-const struct file_operations afu_master_fops = {
+static const struct file_operations afu_master_fops = {
 	.owner		= THIS_MODULE,
 	.open           = afu_master_open,
 	.poll		= afu_poll,
@@ -519,7 +540,7 @@
 	 * If these change we really need to update API.  Either change some
 	 * flags or update API version number CXL_API_VERSION.
 	 */
-	BUILD_BUG_ON(CXL_API_VERSION != 1);
+	BUILD_BUG_ON(CXL_API_VERSION != 2);
 	BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
 	BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
 	BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 680cd26..583b42a 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -30,12 +30,12 @@
 	serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
 	afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
 
-	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
-	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
-	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
-	dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
-	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
-	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
+	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
+	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
+	dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
 
 	dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
 	cxl_stop_trace(ctx->afu->adapter);
@@ -54,10 +54,10 @@
 	fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
 	errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
 	afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
-	dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
-	dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
-	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
-	dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+	dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+	dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
+	dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
 
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
 
@@ -72,7 +72,7 @@
 	WARN(1, "CXL ERROR interrupt %i\n", irq);
 
 	err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
-	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
+	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
 
 	dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
 	cxl_stop_trace(adapter);
@@ -80,7 +80,7 @@
 	fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
 	fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
 
-	dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
+	dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
 
 	return IRQ_HANDLED;
 }
@@ -147,7 +147,7 @@
 	if (dsisr & CXL_PSL_DSISR_An_PE)
 		return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
 	if (dsisr & CXL_PSL_DSISR_An_AE) {
-		pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err);
+		pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
 
 		if (ctx->pending_afu_err) {
 			/*
@@ -158,7 +158,7 @@
 			 * probably best that we log them somewhere:
 			 */
 			dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
-					    "undelivered to pe %i: %.llx\n",
+					    "undelivered to pe %i: 0x%016llx\n",
 					    ctx->pe, irq_info->afu_err);
 		} else {
 			spin_lock(&ctx->lock);
@@ -211,8 +211,8 @@
 	}
 	rcu_read_unlock();
 
-	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
-		" %.16llx\n(Possible AFU HW issue - was a term/remove acked"
+	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
+		" %016llx\n(Possible AFU HW issue - was a term/remove acked"
 		" with outstanding transactions?)\n", ph, irq_info.dsisr,
 		irq_info.dar);
 	return fail_psl_irq(afu, &irq_info);
@@ -341,6 +341,9 @@
 
 void cxl_release_psl_err_irq(struct cxl *adapter)
 {
+	if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
+		return;
+
 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
 	cxl_unmap_irq(adapter->err_virq, adapter);
 	cxl_release_one_irq(adapter, adapter->err_hwirq);
@@ -374,6 +377,9 @@
 
 void cxl_release_serr_irq(struct cxl_afu *afu)
 {
+	if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+		return;
+
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
 	cxl_unmap_irq(afu->serr_virq, afu);
 	cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
@@ -400,12 +406,15 @@
 
 void cxl_release_psl_irq(struct cxl_afu *afu)
 {
+	if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
+		return;
+
 	cxl_unmap_irq(afu->psl_virq, afu);
 	cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
 	kfree(afu->psl_irq_name);
 }
 
-void afu_irq_name_free(struct cxl_context *ctx)
+static void afu_irq_name_free(struct cxl_context *ctx)
 {
 	struct cxl_irq_name *irq_name, *tmp;
 
@@ -421,6 +430,9 @@
 	int rc, r, i, j = 1;
 	struct cxl_irq_name *irq_name;
 
+	/* Initialize the list head to hold irq names */
+	INIT_LIST_HEAD(&ctx->irq_names);
+
 	if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
 		return rc;
 
@@ -432,13 +444,12 @@
 	ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
 				  sizeof(*ctx->irq_bitmap), GFP_KERNEL);
 	if (!ctx->irq_bitmap)
-		return -ENOMEM;
+		goto out;
 
 	/*
 	 * Allocate names first.  If any fail, bail out before allocating
 	 * actual hardware IRQs.
 	 */
-	INIT_LIST_HEAD(&ctx->irq_names);
 	for (r = 1; r < CXL_IRQ_RANGES; r++) {
 		for (i = 0; i < ctx->irqs.range[r]; i++) {
 			irq_name = kmalloc(sizeof(struct cxl_irq_name),
@@ -460,11 +471,12 @@
 	return 0;
 
 out:
+	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 	afu_irq_name_free(ctx);
 	return -ENOMEM;
 }
 
-void afu_register_hwirqs(struct cxl_context *ctx)
+static void afu_register_hwirqs(struct cxl_context *ctx)
 {
 	irq_hw_number_t hwirq;
 	struct cxl_irq_name *irq_name;
@@ -511,4 +523,8 @@
 
 	afu_irq_name_free(ctx);
 	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+
+	kfree(ctx->irq_bitmap);
+	ctx->irq_bitmap = NULL;
+	ctx->irq_count = 0;
 }
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 4a164ab..9fde75e 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -222,6 +222,7 @@
 	cxl_debugfs_exit();
 	cxl_file_exit();
 	unregister_cxl_calls(&cxl_calls);
+	idr_destroy(&cxl_adapter_idr);
 }
 
 module_init(init_cxl);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 10567f2..b37f2e8 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -41,7 +41,14 @@
 			rc = -EBUSY;
 			goto out;
 		}
-		pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
+
+		if (!cxl_adapter_link_ok(afu->adapter)) {
+			afu->enabled = enabled;
+			rc = -EIO;
+			goto out;
+		}
+
+		pr_devel_ratelimited("AFU control... (0x%016llx)\n",
 				     AFU_Cntl | command);
 		cpu_relax();
 		AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
@@ -85,6 +92,10 @@
 
 int cxl_afu_check_and_enable(struct cxl_afu *afu)
 {
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		WARN(1, "Refusing to enable afu while link down!\n");
+		return -EIO;
+	}
 	if (afu->enabled)
 		return 0;
 	return afu_enable(afu);
@@ -103,6 +114,12 @@
 
 	pr_devel("PSL purge request\n");
 
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
+		rc = -EIO;
+		goto out;
+	}
+
 	if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
 		WARN(1, "psl_purge request while AFU not disabled!\n");
 		cxl_afu_disable(afu);
@@ -119,14 +136,19 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		if (!cxl_adapter_link_ok(afu->adapter)) {
+			rc = -EIO;
+			goto out;
+		}
+
 		dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
-		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx  PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
+		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
 		if (dsisr & CXL_PSL_DSISR_TRANS) {
 			dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
-			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar);
+			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
 		} else if (dsisr) {
-			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr);
+			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
 		} else {
 			cpu_relax();
@@ -161,10 +183,8 @@
 	return ((spa_size / 8) - 96) / 17;
 }
 
-static int alloc_spa(struct cxl_afu *afu)
+int cxl_alloc_spa(struct cxl_afu *afu)
 {
-	u64 spap;
-
 	/* Work out how many pages to allocate */
 	afu->spa_order = 0;
 	do {
@@ -183,6 +203,13 @@
 	pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
 		 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
 
+	return 0;
+}
+
+static void attach_spa(struct cxl_afu *afu)
+{
+	u64 spap;
+
 	afu->sw_command_status = (__be64 *)((char *)afu->spa +
 					    ((afu->spa_max_procs + 3) * 128));
 
@@ -191,14 +218,19 @@
 	spap |= CXL_PSL_SPAP_V;
 	pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
-
-	return 0;
 }
 
-static void release_spa(struct cxl_afu *afu)
+static inline void detach_spa(struct cxl_afu *afu)
 {
 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
-	free_pages((unsigned long) afu->spa, afu->spa_order);
+}
+
+void cxl_release_spa(struct cxl_afu *afu)
+{
+	if (afu->spa) {
+		free_pages((unsigned long) afu->spa, afu->spa_order);
+		afu->spa = NULL;
+	}
 }
 
 int cxl_tlb_slb_invalidate(struct cxl *adapter)
@@ -215,6 +247,8 @@
 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
 			return -EBUSY;
 		}
+		if (!cxl_adapter_link_ok(adapter))
+			return -EIO;
 		cpu_relax();
 	}
 
@@ -224,6 +258,8 @@
 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
 			return -EBUSY;
 		}
+		if (!cxl_adapter_link_ok(adapter))
+			return -EIO;
 		cpu_relax();
 	}
 	return 0;
@@ -240,6 +276,11 @@
 			dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
 			return -EBUSY;
 		}
+		/* If the adapter has gone down, we can assume that we
+		 * will PERST it and that will invalidate everything.
+		 */
+		if (!cxl_adapter_link_ok(afu->adapter))
+			return -EIO;
 		cpu_relax();
 	}
 	return 0;
@@ -279,6 +320,8 @@
 	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
 
 	while (1) {
+		if (!cxl_adapter_link_ok(adapter))
+			break;
 		slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
 		if (!(slbia & CXL_TLB_SLB_P))
 			break;
@@ -308,6 +351,11 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+			dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
+			rc = -EIO;
+			goto out;
+		}
 		state = be64_to_cpup(ctx->afu->sw_command_status);
 		if (state == ~0ULL) {
 			pr_err("cxl: Error adding process element to AFU\n");
@@ -355,8 +403,13 @@
 
 	mutex_lock(&ctx->afu->spa_mutex);
 	pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
-	rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
-				    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
+	/* We could be asked to terminate when the hw is down. That
+	 * should always succeed: it's not running if the hw has gone
+	 * away and is being reset.
+	 */
+	if (cxl_adapter_link_ok(ctx->afu->adapter))
+		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
+					    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
 	ctx->elem->software_state = 0;	/* Remove Valid bit */
 	pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
 	mutex_unlock(&ctx->afu->spa_mutex);
@@ -369,7 +422,14 @@
 
 	mutex_lock(&ctx->afu->spa_mutex);
 	pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
-	if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0)))
+
+	/* We could be asked to remove when the hw is down. Again, if
+	 * the hw is down, the PE is gone, so we succeed.
+	 */
+	if (cxl_adapter_link_ok(ctx->afu->adapter))
+		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
+
+	if (!rc)
 		ctx->pe_inserted = false;
 	slb_invalid(ctx);
 	pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
@@ -397,8 +457,11 @@
 
 	dev_info(&afu->dev, "Activating AFU directed mode\n");
 
-	if (alloc_spa(afu))
-		return -ENOMEM;
+	if (afu->spa == NULL) {
+		if (cxl_alloc_spa(afu))
+			return -ENOMEM;
+	}
+	attach_spa(afu);
 
 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
 	cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
@@ -492,9 +555,7 @@
 	if ((result = cxl_afu_check_and_enable(ctx->afu)))
 		return result;
 
-	add_process_element(ctx);
-
-	return 0;
+	return add_process_element(ctx);
 }
 
 static int deactivate_afu_directed(struct cxl_afu *afu)
@@ -511,8 +572,6 @@
 	cxl_afu_disable(afu);
 	cxl_psl_purge(afu);
 
-	release_spa(afu);
-
 	return 0;
 }
 
@@ -614,6 +673,11 @@
 	if (!(mode & afu->modes_supported))
 		return -EINVAL;
 
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		WARN(1, "Device link is down, refusing to activate!\n");
+		return -EIO;
+	}
+
 	if (mode == CXL_MODE_DIRECTED)
 		return activate_afu_directed(afu);
 	if (mode == CXL_MODE_DEDICATED)
@@ -624,6 +688,11 @@
 
 int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
 {
+	if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+		WARN(1, "Device link is down, refusing to attach process!\n");
+		return -EIO;
+	}
+
 	ctx->kernel = kernel;
 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
 		return attach_afu_directed(ctx, wed, amr);
@@ -668,6 +737,12 @@
 {
 	u64 pidtid;
 
+	/* If the adapter has gone away, we can't get any meaningful
+	 * information.
+	 */
+	if (!cxl_adapter_link_ok(afu->adapter))
+		return -EIO;
+
 	info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
 	info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
 	info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
@@ -684,7 +759,7 @@
 {
 	u64 dsisr;
 
-	pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat);
+	pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
 
 	/* Clear PSL_DSISR[PE] */
 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 32ad097..02c8516 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -24,6 +24,7 @@
 #include <asm/io.h>
 
 #include "cxl.h"
+#include <misc/cxl.h>
 
 
 #define CXL_PCI_VSEC_ID	0x1280
@@ -133,7 +134,7 @@
 	return (val >> ((off & 0x3) * 8)) & 0xff;
 }
 
-static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
+static const struct pci_device_id cxl_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
@@ -369,6 +370,55 @@
 	return 0;
 }
 
+#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
+#define _2048_250MHZ_CYCLES 1
+
+static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
+{
+	u64 psl_tb;
+	int delta;
+	unsigned int retry = 0;
+	struct device_node *np;
+
+	if (!(np = pnv_pci_get_phb_node(dev)))
+		return -ENODEV;
+
+	/* Do not fail when CAPP timebase sync is not supported by OPAL */
+	of_node_get(np);
+	if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
+		of_node_put(np);
+		pr_err("PSL: Timebase sync: OPAL support missing\n");
+		return 0;
+	}
+	of_node_put(np);
+
+	/*
+	 * Setup PSL Timebase Control and Status register
+	 * with the recommended Timebase Sync Count value
+	 */
+	cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
+		     TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
+
+	/* Enable PSL Timebase */
+	cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
+	cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
+
+	/* Wait until CORE TB and PSL TB difference <= 16usecs */
+	do {
+		msleep(1);
+		if (retry++ > 5) {
+			pr_err("PSL: Timebase sync: giving up!\n");
+			return -EIO;
+		}
+		psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
+		delta = mftb() - psl_tb;
+		if (delta < 0)
+			delta = -delta;
+	} while (cputime_to_usecs(delta) > 16);
+
+	return 0;
+}
+
 static int init_implementation_afu_regs(struct cxl_afu *afu)
 {
 	/* read/write masks for this slice */
@@ -539,10 +589,18 @@
 
 static void cxl_unmap_slice_regs(struct cxl_afu *afu)
 {
-	if (afu->p2n_mmio)
+	if (afu->p2n_mmio) {
 		iounmap(afu->p2n_mmio);
-	if (afu->p1n_mmio)
+		afu->p2n_mmio = NULL;
+	}
+	if (afu->p1n_mmio) {
 		iounmap(afu->p1n_mmio);
+		afu->p1n_mmio = NULL;
+	}
+	if (afu->afu_desc_mmio) {
+		iounmap(afu->afu_desc_mmio);
+		afu->afu_desc_mmio = NULL;
+	}
 }
 
 static void cxl_release_afu(struct device *dev)
@@ -551,6 +609,9 @@
 
 	pr_devel("cxl_release_afu\n");
 
+	idr_destroy(&afu->contexts_idr);
+	cxl_release_spa(afu);
+
 	kfree(afu);
 }
 
@@ -656,7 +717,7 @@
 	 */
 	reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
 	if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
-		dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
 		if (__cxl_afu_reset(afu))
 			return -EIO;
 		if (cxl_afu_disable(afu))
@@ -677,7 +738,7 @@
 	cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
 	reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
 	if (reg) {
-		dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
 		if (reg & CXL_PSL_DSISR_TRANS)
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
 		else
@@ -686,12 +747,12 @@
 	reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
 	if (reg) {
 		if (reg & ~0xffff)
-			dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg);
+			dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
 		cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
 	}
 	reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
 	if (reg) {
-		dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
 		cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
 	}
 
@@ -742,45 +803,70 @@
 	return count;
 }
 
-static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
 {
-	struct cxl_afu *afu;
-	bool free = true;
 	int rc;
 
-	if (!(afu = cxl_alloc_afu(adapter, slice)))
-		return -ENOMEM;
-
-	if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice)))
-		goto err1;
-
 	if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
-		goto err1;
+		return rc;
 
 	if ((rc = sanitise_afu_regs(afu)))
-		goto err2;
+		goto err1;
 
 	/* We need to reset the AFU before we can read the AFU descriptor */
 	if ((rc = __cxl_afu_reset(afu)))
-		goto err2;
+		goto err1;
 
 	if (cxl_verbose)
 		dump_afu_descriptor(afu);
 
 	if ((rc = cxl_read_afu_descriptor(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_afu_descriptor_looks_ok(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = init_implementation_afu_regs(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_register_serr_irq(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_register_psl_irq(afu)))
-		goto err3;
+		goto err2;
+
+	return 0;
+
+err2:
+	cxl_release_serr_irq(afu);
+err1:
+	cxl_unmap_slice_regs(afu);
+	return rc;
+}
+
+static void cxl_deconfigure_afu(struct cxl_afu *afu)
+{
+	cxl_release_psl_irq(afu);
+	cxl_release_serr_irq(afu);
+	cxl_unmap_slice_regs(afu);
+}
+
+static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+{
+	struct cxl_afu *afu;
+	int rc;
+
+	afu = cxl_alloc_afu(adapter, slice);
+	if (!afu)
+		return -ENOMEM;
+
+	rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
+	if (rc)
+		goto err_free;
+
+	rc = cxl_configure_afu(afu, adapter, dev);
+	if (rc)
+		goto err_free;
 
 	/* Don't care if this fails */
 	cxl_debugfs_afu_add(afu);
@@ -795,10 +881,6 @@
 	if ((rc = cxl_sysfs_afu_add(afu)))
 		goto err_put1;
 
-
-	if ((rc = cxl_afu_select_best_mode(afu)))
-		goto err_put2;
-
 	adapter->afu[afu->slice] = afu;
 
 	if ((rc = cxl_pci_vphb_add(afu)))
@@ -806,21 +888,16 @@
 
 	return 0;
 
-err_put2:
-	cxl_sysfs_afu_remove(afu);
 err_put1:
-	device_unregister(&afu->dev);
-	free = false;
+	cxl_deconfigure_afu(afu);
 	cxl_debugfs_afu_remove(afu);
-	cxl_release_psl_irq(afu);
-err3:
-	cxl_release_serr_irq(afu);
-err2:
-	cxl_unmap_slice_regs(afu);
-err1:
-	if (free)
-		kfree(afu);
+	device_unregister(&afu->dev);
 	return rc;
+
+err_free:
+	kfree(afu);
+	return rc;
+
 }
 
 static void cxl_remove_afu(struct cxl_afu *afu)
@@ -840,10 +917,7 @@
 	cxl_context_detach_all(afu);
 	cxl_afu_deactivate_mode(afu);
 
-	cxl_release_psl_irq(afu);
-	cxl_release_serr_irq(afu);
-	cxl_unmap_slice_regs(afu);
-
+	cxl_deconfigure_afu(afu);
 	device_unregister(&afu->dev);
 }
 
@@ -851,16 +925,15 @@
 {
 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
 	int rc;
-	int i;
-	u32 val;
+
+	if (adapter->perst_same_image) {
+		dev_warn(&dev->dev,
+			 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
+		return -EINVAL;
+	}
 
 	dev_info(&dev->dev, "CXL reset\n");
 
-	for (i = 0; i < adapter->slices; i++) {
-		cxl_pci_vphb_remove(adapter->afu[i]);
-		cxl_remove_afu(adapter->afu[i]);
-	}
-
 	/* pcie_warm_reset requests a fundamental pci reset which includes a
 	 * PERST assert/deassert.  PERST triggers a loading of the image
 	 * if "user" or "factory" is selected in sysfs */
@@ -869,20 +942,6 @@
 		return rc;
 	}
 
-	/* the PERST done above fences the PHB.  So, reset depends on EEH
-	 * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
-	 * the driver.  Do an mmio read explictly to ensure EEH notices the
-	 * fenced PHB.  Retry for a few seconds before giving up. */
-	i = 0;
-	while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
-		(i < 5)) {
-		msleep(500);
-		i++;
-	}
-
-	if (val != 0xffffffff)
-		dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
-
 	return rc;
 }
 
@@ -893,7 +952,7 @@
 	if (pci_request_region(dev, 0, "priv 1 regs"))
 		goto err2;
 
-	pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
+	pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
 			p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
 
 	if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
@@ -917,10 +976,16 @@
 
 static void cxl_unmap_adapter_regs(struct cxl *adapter)
 {
-	if (adapter->p1_mmio)
+	if (adapter->p1_mmio) {
 		iounmap(adapter->p1_mmio);
-	if (adapter->p2_mmio)
+		adapter->p1_mmio = NULL;
+		pci_release_region(to_pci_dev(adapter->dev.parent), 2);
+	}
+	if (adapter->p2_mmio) {
 		iounmap(adapter->p2_mmio);
+		adapter->p2_mmio = NULL;
+		pci_release_region(to_pci_dev(adapter->dev.parent), 0);
+	}
 }
 
 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
@@ -949,7 +1014,6 @@
 	CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
 	CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
 	adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
-	adapter->perst_loads_image = true;
 	adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
 
 	CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
@@ -1009,82 +1073,139 @@
 
 	pr_devel("cxl_release_adapter\n");
 
+	cxl_remove_adapter_nr(adapter);
+
 	kfree(adapter);
 }
 
-static struct cxl *cxl_alloc_adapter(struct pci_dev *dev)
+static struct cxl *cxl_alloc_adapter(void)
 {
 	struct cxl *adapter;
 
 	if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
 		return NULL;
 
-	adapter->dev.parent = &dev->dev;
-	adapter->dev.release = cxl_release_adapter;
-	pci_set_drvdata(dev, adapter);
 	spin_lock_init(&adapter->afu_list_lock);
 
+	if (cxl_alloc_adapter_nr(adapter))
+		goto err1;
+
+	if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
+		goto err2;
+
 	return adapter;
+
+err2:
+	cxl_remove_adapter_nr(adapter);
+err1:
+	kfree(adapter);
+	return NULL;
 }
 
+#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
+
 static int sanitise_adapter_regs(struct cxl *adapter)
 {
-	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+	/* Clear PSL tberror bit by writing 1 to it */
+	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
 	return cxl_tlb_slb_invalidate(adapter);
 }
 
+/* This should contain *only* operations that can safely be done in
+ * both creation and recovery.
+ */
+static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
+{
+	int rc;
+
+	adapter->dev.parent = &dev->dev;
+	adapter->dev.release = cxl_release_adapter;
+	pci_set_drvdata(dev, adapter);
+
+	rc = pci_enable_device(dev);
+	if (rc) {
+		dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
+		return rc;
+	}
+
+	if ((rc = cxl_read_vsec(adapter, dev)))
+		return rc;
+
+	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
+	        return rc;
+
+	if ((rc = setup_cxl_bars(dev)))
+		return rc;
+
+	if ((rc = switch_card_to_cxl(dev)))
+		return rc;
+
+	if ((rc = cxl_update_image_control(adapter)))
+		return rc;
+
+	if ((rc = cxl_map_adapter_regs(adapter, dev)))
+		return rc;
+
+	if ((rc = sanitise_adapter_regs(adapter)))
+		goto err;
+
+	if ((rc = init_implementation_adapter_regs(adapter, dev)))
+		goto err;
+
+	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
+		goto err;
+
+	/* If recovery happened, the last step is to turn on snooping.
+	 * In the non-recovery case this has no effect */
+	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
+		goto err;
+
+	if ((rc = cxl_setup_psl_timebase(adapter, dev)))
+		goto err;
+
+	if ((rc = cxl_register_psl_err_irq(adapter)))
+		goto err;
+
+	return 0;
+
+err:
+	cxl_unmap_adapter_regs(adapter);
+	return rc;
+
+}
+
+static void cxl_deconfigure_adapter(struct cxl *adapter)
+{
+	struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
+
+	cxl_release_psl_err_irq(adapter);
+	cxl_unmap_adapter_regs(adapter);
+
+	pci_disable_device(pdev);
+}
+
 static struct cxl *cxl_init_adapter(struct pci_dev *dev)
 {
 	struct cxl *adapter;
-	bool free = true;
 	int rc;
 
-
-	if (!(adapter = cxl_alloc_adapter(dev)))
+	adapter = cxl_alloc_adapter();
+	if (!adapter)
 		return ERR_PTR(-ENOMEM);
 
-	if ((rc = cxl_read_vsec(adapter, dev)))
-		goto err1;
+	/* Set defaults for parameters which need to persist over
+	 * configure/reconfigure
+	 */
+	adapter->perst_loads_image = true;
+	adapter->perst_same_image = false;
 
-	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
-		goto err1;
-
-	if ((rc = setup_cxl_bars(dev)))
-		goto err1;
-
-	if ((rc = switch_card_to_cxl(dev)))
-		goto err1;
-
-	if ((rc = cxl_alloc_adapter_nr(adapter)))
-		goto err1;
-
-	if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
-		goto err2;
-
-	if ((rc = cxl_update_image_control(adapter)))
-		goto err2;
-
-	if ((rc = cxl_map_adapter_regs(adapter, dev)))
-		goto err2;
-
-	if ((rc = sanitise_adapter_regs(adapter)))
-		goto err2;
-
-	if ((rc = init_implementation_adapter_regs(adapter, dev)))
-		goto err3;
-
-	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
-		goto err3;
-
-	/* If recovery happened, the last step is to turn on snooping.
-	 * In the non-recovery case this has no effect */
-	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
-		goto err3;
+	rc = cxl_configure_adapter(adapter, dev);
+	if (rc) {
+		pci_disable_device(dev);
+		cxl_release_adapter(&adapter->dev);
+		return ERR_PTR(rc);
 	}
 
-	if ((rc = cxl_register_psl_err_irq(adapter)))
-		goto err3;
-
 	/* Don't care if this one fails: */
 	cxl_debugfs_adapter_add(adapter);
 
@@ -1101,37 +1222,25 @@
 	return adapter;
 
 err_put1:
-	device_unregister(&adapter->dev);
-	free = false;
+	/* This should mirror cxl_remove_adapter, except without the
+	 * sysfs parts
+	 */
 	cxl_debugfs_adapter_remove(adapter);
-	cxl_release_psl_err_irq(adapter);
-err3:
-	cxl_unmap_adapter_regs(adapter);
-err2:
-	cxl_remove_adapter_nr(adapter);
-err1:
-	if (free)
-		kfree(adapter);
+	cxl_deconfigure_adapter(adapter);
+	device_unregister(&adapter->dev);
 	return ERR_PTR(rc);
 }
 
 static void cxl_remove_adapter(struct cxl *adapter)
 {
-	struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
-
-	pr_devel("cxl_release_adapter\n");
+	pr_devel("cxl_remove_adapter\n");
 
 	cxl_sysfs_adapter_remove(adapter);
 	cxl_debugfs_adapter_remove(adapter);
-	cxl_release_psl_err_irq(adapter);
-	cxl_unmap_adapter_regs(adapter);
-	cxl_remove_adapter_nr(adapter);
+
+	cxl_deconfigure_adapter(adapter);
 
 	device_unregister(&adapter->dev);
-
-	pci_release_region(pdev, 0);
-	pci_release_region(pdev, 2);
-	pci_disable_device(pdev);
 }
 
 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -1145,21 +1254,21 @@
 	if (cxl_verbose)
 		dump_cxl_config_space(dev);
 
-	if ((rc = pci_enable_device(dev))) {
-		dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
-		return rc;
-	}
-
 	adapter = cxl_init_adapter(dev);
 	if (IS_ERR(adapter)) {
 		dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
-		pci_disable_device(dev);
 		return PTR_ERR(adapter);
 	}
 
 	for (slice = 0; slice < adapter->slices; slice++) {
-		if ((rc = cxl_init_afu(adapter, slice, dev)))
+		if ((rc = cxl_init_afu(adapter, slice, dev))) {
 			dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
+			continue;
+		}
+
+		rc = cxl_afu_select_best_mode(adapter->afu[slice]);
+		if (rc)
+			dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
 	}
 
 	return 0;
@@ -1183,10 +1292,262 @@
 	cxl_remove_adapter(adapter);
 }
 
+static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
+						pci_channel_state_t state)
+{
+	struct pci_dev *afu_dev;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
+
+	/* There should only be one entry, but go through the list
+	 * anyway
+	 */
+	list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+		if (!afu_dev->driver)
+			continue;
+
+		afu_dev->error_state = state;
+
+		if (afu_dev->driver->err_handler)
+			afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
+										  state);
+		/* Disconnect trumps all, NONE trumps NEED_RESET */
+		if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+			result = PCI_ERS_RESULT_DISCONNECT;
+		else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+			 (result == PCI_ERS_RESULT_NEED_RESET))
+			result = PCI_ERS_RESULT_NONE;
+	}
+	return result;
+}
+
+static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
+					       pci_channel_state_t state)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	int i;
+
+	/* At this point, we could still have an interrupt pending.
+	 * Let's try to get them out of the way before they do
+	 * anything we don't like.
+	 */
+	schedule();
+
+	/* If we're permanently dead, give up. */
+	if (state == pci_channel_io_perm_failure) {
+		/* Tell the AFU drivers; but we don't care what they
+		 * say, we're going away.
+		 */
+		for (i = 0; i < adapter->slices; i++) {
+			afu = adapter->afu[i];
+			cxl_vphb_error_detected(afu, state);
+		}
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	/* Are we reflashing?
+	 *
+	 * If we reflash, we could come back as something entirely
+	 * different, including a non-CAPI card. As such, by default
+	 * we don't participate in the process. We'll be unbound and
+	 * the slot re-probed. (TODO: check EEH doesn't blindly rebind
+	 * us!)
+	 *
+	 * However, this isn't the entire story: for reliablity
+	 * reasons, we usually want to reflash the FPGA on PERST in
+	 * order to get back to a more reliable known-good state.
+	 *
+	 * This causes us a bit of a problem: if we reflash we can't
+	 * trust that we'll come back the same - we could have a new
+	 * image and been PERSTed in order to load that
+	 * image. However, most of the time we actually *will* come
+	 * back the same - for example a regular EEH event.
+	 *
+	 * Therefore, we allow the user to assert that the image is
+	 * indeed the same and that we should continue on into EEH
+	 * anyway.
+	 */
+	if (adapter->perst_loads_image && !adapter->perst_same_image) {
+		/* TODO take the PHB out of CXL mode */
+		dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
+		return PCI_ERS_RESULT_NONE;
+	}
+
+	/*
+	 * At this point, we want to try to recover.  We'll always
+	 * need a complete slot reset: we don't trust any other reset.
+	 *
+	 * Now, we go through each AFU:
+	 *  - We send the driver, if bound, an error_detected callback.
+	 *    We expect it to clean up, but it can also tell us to give
+	 *    up and permanently detach the card. To simplify things, if
+	 *    any bound AFU driver doesn't support EEH, we give up on EEH.
+	 *
+	 *  - We detach all contexts associated with the AFU. This
+	 *    does not free them, but puts them into a CLOSED state
+	 *    which causes any the associated files to return useful
+	 *    errors to userland. It also unmaps, but does not free,
+	 *    any IRQs.
+	 *
+	 *  - We clean up our side: releasing and unmapping resources we hold
+	 *    so we can wire them up again when the hardware comes back up.
+	 *
+	 * Driver authors should note:
+	 *
+	 *  - Any contexts you create in your kernel driver (except
+	 *    those associated with anonymous file descriptors) are
+	 *    your responsibility to free and recreate. Likewise with
+	 *    any attached resources.
+	 *
+	 *  - We will take responsibility for re-initialising the
+	 *    device context (the one set up for you in
+	 *    cxl_pci_enable_device_hook and accessed through
+	 *    cxl_get_context). If you've attached IRQs or other
+	 *    resources to it, they remains yours to free.
+	 *
+	 * You can call the same functions to release resources as you
+	 * normally would: we make sure that these functions continue
+	 * to work when the hardware is down.
+	 *
+	 * Two examples:
+	 *
+	 * 1) If you normally free all your resources at the end of
+	 *    each request, or if you use anonymous FDs, your
+	 *    error_detected callback can simply set a flag to tell
+	 *    your driver not to start any new calls. You can then
+	 *    clear the flag in the resume callback.
+	 *
+	 * 2) If you normally allocate your resources on startup:
+	 *     * Set a flag in error_detected as above.
+	 *     * Let CXL detach your contexts.
+	 *     * In slot_reset, free the old resources and allocate new ones.
+	 *     * In resume, clear the flag to allow things to start.
+	 */
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		result = cxl_vphb_error_detected(afu, state);
+
+		/* Only continue if everyone agrees on NEED_RESET */
+		if (result != PCI_ERS_RESULT_NEED_RESET)
+			return result;
+
+		cxl_context_detach_all(afu);
+		cxl_afu_deactivate_mode(afu);
+		cxl_deconfigure_afu(afu);
+	}
+	cxl_deconfigure_adapter(adapter);
+
+	return result;
+}
+
+static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	struct cxl_context *ctx;
+	struct pci_dev *afu_dev;
+	pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
+	pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
+	int i;
+
+	if (cxl_configure_adapter(adapter, pdev))
+		goto err;
+
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		if (cxl_configure_afu(afu, adapter, pdev))
+			goto err;
+
+		if (cxl_afu_select_best_mode(afu))
+			goto err;
+
+		cxl_pci_vphb_reconfigure(afu);
+
+		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+			/* Reset the device context.
+			 * TODO: make this less disruptive
+			 */
+			ctx = cxl_get_context(afu_dev);
+
+			if (ctx && cxl_release_context(ctx))
+				goto err;
+
+			ctx = cxl_dev_context_init(afu_dev);
+			if (!ctx)
+				goto err;
+
+			afu_dev->dev.archdata.cxl_ctx = ctx;
+
+			if (cxl_afu_check_and_enable(afu))
+				goto err;
+
+			afu_dev->error_state = pci_channel_io_normal;
+
+			/* If there's a driver attached, allow it to
+			 * chime in on recovery. Drivers should check
+			 * if everything has come back OK, but
+			 * shouldn't start new work until we call
+			 * their resume function.
+			 */
+			if (!afu_dev->driver)
+				continue;
+
+			if (afu_dev->driver->err_handler &&
+			    afu_dev->driver->err_handler->slot_reset)
+				afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
+
+			if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+				result = PCI_ERS_RESULT_DISCONNECT;
+		}
+	}
+	return result;
+
+err:
+	/* All the bits that happen in both error_detected and cxl_remove
+	 * should be idempotent, so we don't need to worry about leaving a mix
+	 * of unconfigured and reconfigured resources.
+	 */
+	dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
+	return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void cxl_pci_resume(struct pci_dev *pdev)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	struct pci_dev *afu_dev;
+	int i;
+
+	/* Everything is back now. Drivers should restart work now.
+	 * This is not the place to be checking if everything came back up
+	 * properly, because there's no return value: do that in slot_reset.
+	 */
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+			if (afu_dev->driver && afu_dev->driver->err_handler &&
+			    afu_dev->driver->err_handler->resume)
+				afu_dev->driver->err_handler->resume(afu_dev);
+		}
+	}
+}
+
+static const struct pci_error_handlers cxl_err_handler = {
+	.error_detected = cxl_pci_error_detected,
+	.slot_reset = cxl_pci_slot_reset,
+	.resume = cxl_pci_resume,
+};
+
 struct pci_driver cxl_pci_driver = {
 	.name = "cxl-pci",
 	.id_table = cxl_pci_tbl,
 	.probe = cxl_probe,
 	.remove = cxl_remove,
 	.shutdown = cxl_remove,
+	.err_handler = &cxl_err_handler,
 };
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 87cd747..25868c2 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -112,12 +112,38 @@
 	return count;
 }
 
+static ssize_t perst_reloads_same_image_show(struct device *device,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cxl *adapter = to_cxl_adapter(device);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
+}
+
+static ssize_t perst_reloads_same_image_store(struct device *device,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct cxl *adapter = to_cxl_adapter(device);
+	int rc;
+	int val;
+
+	rc = sscanf(buf, "%i", &val);
+	if ((rc != 1) || !(val == 1 || val == 0))
+		return -EINVAL;
+
+	adapter->perst_same_image = (val == 1 ? true : false);
+	return count;
+}
+
 static struct device_attribute adapter_attrs[] = {
 	__ATTR_RO(caia_version),
 	__ATTR_RO(psl_revision),
 	__ATTR_RO(base_image),
 	__ATTR_RO(image_loaded),
 	__ATTR_RW(load_image_on_perst),
+	__ATTR_RW(perst_reloads_same_image),
 	__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
 };
 
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
index ae434d8..6e1e2ad 100644
--- a/drivers/misc/cxl/trace.h
+++ b/drivers/misc/cxl/trace.h
@@ -105,7 +105,7 @@
 		__entry->num_interrupts = num_interrupts;
 	),
 
-	TP_printk("afu%i.%i pid=%i pe=%i wed=0x%.16llx irqs=%i amr=0x%llx",
+	TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pid,
@@ -177,7 +177,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -233,7 +233,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -264,7 +264,7 @@
 		__entry->v = v;
 	),
 
-	TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%.16llx V=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -295,7 +295,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 2eba002..6dd16a6 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -138,6 +138,26 @@
 	return 0;
 }
 
+
+static inline bool cxl_config_link_ok(struct pci_bus *bus)
+{
+	struct pci_controller *phb;
+	struct cxl_afu *afu;
+
+	/* Config space IO is based on phb->cfg_addr, which is based on
+	 * afu_desc_mmio. This isn't safe to read/write when the link
+	 * goes down, as EEH tears down MMIO space.
+	 *
+	 * Check if the link is OK before proceeding.
+	 */
+
+	phb = pci_bus_to_host(bus);
+	if (phb == NULL)
+		return false;
+	afu = (struct cxl_afu *)phb->private_data;
+	return cxl_adapter_link_ok(afu->adapter);
+}
+
 static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
 				int offset, int len, u32 *val)
 {
@@ -150,6 +170,9 @@
 	if (rc)
 		return rc;
 
+	if (!cxl_config_link_ok(bus))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
 	/* Can only read 32 bits */
 	*val = (in_le32(ioaddr) >> shift) & mask;
 	return PCIBIOS_SUCCESSFUL;
@@ -167,6 +190,9 @@
 	if (rc)
 		return rc;
 
+	if (!cxl_config_link_ok(bus))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
 	/* Can only write 32 bits so do read-modify-write */
 	mask <<= shift;
 	val <<= shift;
@@ -240,6 +266,14 @@
 	return 0;
 }
 
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
+{
+	/* When we are reconfigured, the AFU's MMIO space is unmapped
+	 * and remapped. We need to reflect this in the PHB's view of
+	 * the world.
+	 */
+	afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
+}
 
 void cxl_pci_vphb_remove(struct cxl_afu *afu)
 {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2b254f3..c6cb7f8 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -186,19 +186,11 @@
 	if (count > io_limit)
 		count = io_limit;
 
-	switch (at24->use_smbus) {
-	case I2C_SMBUS_I2C_BLOCK_DATA:
+	if (at24->use_smbus) {
 		/* Smaller eeproms can work given some SMBus extension calls */
 		if (count > I2C_SMBUS_BLOCK_MAX)
 			count = I2C_SMBUS_BLOCK_MAX;
-		break;
-	case I2C_SMBUS_WORD_DATA:
-		count = 2;
-		break;
-	case I2C_SMBUS_BYTE_DATA:
-		count = 1;
-		break;
-	default:
+	} else {
 		/*
 		 * When we have a better choice than SMBus calls, use a
 		 * combined I2C message. Write address; then read up to
@@ -229,27 +221,10 @@
 	timeout = jiffies + msecs_to_jiffies(write_timeout);
 	do {
 		read_time = jiffies;
-		switch (at24->use_smbus) {
-		case I2C_SMBUS_I2C_BLOCK_DATA:
-			status = i2c_smbus_read_i2c_block_data(client, offset,
-					count, buf);
-			break;
-		case I2C_SMBUS_WORD_DATA:
-			status = i2c_smbus_read_word_data(client, offset);
-			if (status >= 0) {
-				buf[0] = status & 0xff;
-				buf[1] = status >> 8;
-				status = count;
-			}
-			break;
-		case I2C_SMBUS_BYTE_DATA:
-			status = i2c_smbus_read_byte_data(client, offset);
-			if (status >= 0) {
-				buf[0] = status;
-				status = count;
-			}
-			break;
-		default:
+		if (at24->use_smbus) {
+			status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
+									   count, buf);
+		} else {
 			status = i2c_transfer(client->adapter, msg, 2);
 			if (status == 2)
 				status = count;
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 9aa4332..e4dd93b 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -191,6 +191,7 @@
 	{ "max6875", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, max6875_id);
 
 static struct i2c_driver max6875_driver = {
 	.driver = {
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 95c8944..340b44d 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -239,7 +239,7 @@
 	mq->mmr_blade = uv_cpu_to_blade_id(cpu);
 
 	nid = cpu_to_node(cpu);
-	page = alloc_pages_exact_node(nid,
+	page = __alloc_pages_node(nid,
 				      GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
 				      pg_order);
 	if (page == NULL) {
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 15c33cc..431e1dd 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -186,10 +186,10 @@
 	if (IS_ERR(sram->virt_base))
 		return PTR_ERR(sram->virt_base);
 
-	sram->pool = devm_gen_pool_create(sram->dev,
-					  ilog2(SRAM_GRANULARITY), -1);
-	if (!sram->pool)
-		return -ENOMEM;
+	sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
+					  NUMA_NO_NODE, NULL);
+	if (IS_ERR(sram->pool))
+		return PTR_ERR(sram->pool);
 
 	ret = sram_reserve_regions(sram, res);
 	if (ret)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1b820f..c742cfd 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -47,10 +47,13 @@
 #include "queue.h"
 
 MODULE_ALIAS("mmc:block");
+
+#ifdef KERNEL
 #ifdef MODULE_PARAM_PREFIX
 #undef MODULE_PARAM_PREFIX
 #endif
 #define MODULE_PARAM_PREFIX "mmcblk."
+#endif
 
 #define INAND_CMD38_ARG_EXT_CSD  113
 #define INAND_CMD38_ARG_ERASE    0x00
@@ -2386,6 +2389,7 @@
 #define CID_MANFID_TOSHIBA	0x11
 #define CID_MANFID_MICRON	0x13
 #define CID_MANFID_SAMSUNG	0x15
+#define CID_MANFID_KINGSTON	0x70
 
 static const struct mmc_fixup blk_fixups[] =
 {
@@ -2408,6 +2412,10 @@
 	 *
 	 * N.B. This doesn't affect SD cards.
 	 */
+	MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
 	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_BLK_NO_CMD23),
 	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
@@ -2444,6 +2452,15 @@
 	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
 
+	/*
+	 *  On Some Kingston eMMCs, performing trim can result in
+	 *  unrecoverable data conrruption occasionally due to a firmware bug.
+	 */
+	MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_TRIM_BROKEN),
+	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_TRIM_BROKEN),
+
 	END_FIXUP
 };
 
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9ad73f3..0520064 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -358,8 +358,10 @@
  */
 static void mmc_wait_data_done(struct mmc_request *mrq)
 {
-	mrq->host->context_info.is_done_rcv = true;
-	wake_up_interruptible(&mrq->host->context_info.wait);
+	struct mmc_context_info *context_info = &mrq->host->context_info;
+
+	context_info->is_done_rcv = true;
+	wake_up_interruptible(&context_info->wait);
 }
 
 static void mmc_wait_done(struct mmc_request *mrq)
@@ -2168,6 +2170,7 @@
 	      unsigned int arg)
 {
 	unsigned int rem, to = from + nr;
+	int err;
 
 	if (!(card->host->caps & MMC_CAP_ERASE) ||
 	    !(card->csd.cmdclass & CCC_ERASE))
@@ -2218,6 +2221,22 @@
 	/* 'from' and 'to' are inclusive */
 	to -= 1;
 
+	/*
+	 * Special case where only one erase-group fits in the timeout budget:
+	 * If the region crosses an erase-group boundary on this particular
+	 * case, we will be trimming more than one erase-group which, does not
+	 * fit in the timeout budget of the controller, so we need to split it
+	 * and call mmc_do_erase() twice if necessary. This special case is
+	 * identified by the card->eg_boundary flag.
+	 */
+	rem = card->erase_size - (from % card->erase_size);
+	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
+		err = mmc_do_erase(card, from, from + rem - 1, arg);
+		from += rem;
+		if ((err) || (to <= from))
+			return err;
+	}
+
 	return mmc_do_erase(card, from, to, arg);
 }
 EXPORT_SYMBOL(mmc_erase);
@@ -2233,7 +2252,8 @@
 
 int mmc_can_trim(struct mmc_card *card)
 {
-	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
+	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
+	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
 		return 1;
 	return 0;
 }
@@ -2313,16 +2333,28 @@
 	if (!qty)
 		return 0;
 
+	/*
+	 * When specifying a sector range to trim, chances are we might cross
+	 * an erase-group boundary even if the amount of sectors is less than
+	 * one erase-group.
+	 * If we can only fit one erase-group in the controller timeout budget,
+	 * we have to care that erase-group boundaries are not crossed by a
+	 * single trim operation. We flag that special case with "eg_boundary".
+	 * In all other cases we can just decrement qty and pretend that we
+	 * always touch (qty + 1) erase-groups as a simple optimization.
+	 */
 	if (qty == 1)
-		return 1;
+		card->eg_boundary = 1;
+	else
+		qty--;
 
 	/* Convert qty to sectors */
 	if (card->erase_shift)
-		max_discard = --qty << card->erase_shift;
+		max_discard = qty << card->erase_shift;
 	else if (mmc_card_sd(card))
-		max_discard = qty;
+		max_discard = qty + 1;
 	else
-		max_discard = --qty * card->erase_size;
+		max_discard = qty * card->erase_size;
 
 	return max_discard;
 }
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 99a9c90..abd933b 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -398,7 +398,7 @@
 {
 	struct device_node *np;
 	u32 bus_width;
-	int len, ret;
+	int ret;
 	bool cd_cap_invert, cd_gpio_invert = false;
 	bool ro_cap_invert, ro_gpio_invert = false;
 
@@ -445,12 +445,12 @@
 	 */
 
 	/* Parse Card Detection */
-	if (of_find_property(np, "non-removable", &len)) {
+	if (of_property_read_bool(np, "non-removable")) {
 		host->caps |= MMC_CAP_NONREMOVABLE;
 	} else {
 		cd_cap_invert = of_property_read_bool(np, "cd-inverted");
 
-		if (of_find_property(np, "broken-cd", &len))
+		if (of_property_read_bool(np, "broken-cd"))
 			host->caps |= MMC_CAP_NEEDS_POLL;
 
 		ret = mmc_gpiod_request_cd(host, "cd", 0, true,
@@ -491,41 +491,41 @@
 	if (ro_cap_invert ^ ro_gpio_invert)
 		host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
-	if (of_find_property(np, "cap-sd-highspeed", &len))
+	if (of_property_read_bool(np, "cap-sd-highspeed"))
 		host->caps |= MMC_CAP_SD_HIGHSPEED;
-	if (of_find_property(np, "cap-mmc-highspeed", &len))
+	if (of_property_read_bool(np, "cap-mmc-highspeed"))
 		host->caps |= MMC_CAP_MMC_HIGHSPEED;
-	if (of_find_property(np, "sd-uhs-sdr12", &len))
+	if (of_property_read_bool(np, "sd-uhs-sdr12"))
 		host->caps |= MMC_CAP_UHS_SDR12;
-	if (of_find_property(np, "sd-uhs-sdr25", &len))
+	if (of_property_read_bool(np, "sd-uhs-sdr25"))
 		host->caps |= MMC_CAP_UHS_SDR25;
-	if (of_find_property(np, "sd-uhs-sdr50", &len))
+	if (of_property_read_bool(np, "sd-uhs-sdr50"))
 		host->caps |= MMC_CAP_UHS_SDR50;
-	if (of_find_property(np, "sd-uhs-sdr104", &len))
+	if (of_property_read_bool(np, "sd-uhs-sdr104"))
 		host->caps |= MMC_CAP_UHS_SDR104;
-	if (of_find_property(np, "sd-uhs-ddr50", &len))
+	if (of_property_read_bool(np, "sd-uhs-ddr50"))
 		host->caps |= MMC_CAP_UHS_DDR50;
-	if (of_find_property(np, "cap-power-off-card", &len))
+	if (of_property_read_bool(np, "cap-power-off-card"))
 		host->caps |= MMC_CAP_POWER_OFF_CARD;
-	if (of_find_property(np, "cap-sdio-irq", &len))
+	if (of_property_read_bool(np, "cap-sdio-irq"))
 		host->caps |= MMC_CAP_SDIO_IRQ;
-	if (of_find_property(np, "full-pwr-cycle", &len))
+	if (of_property_read_bool(np, "full-pwr-cycle"))
 		host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
-	if (of_find_property(np, "keep-power-in-suspend", &len))
+	if (of_property_read_bool(np, "keep-power-in-suspend"))
 		host->pm_caps |= MMC_PM_KEEP_POWER;
-	if (of_find_property(np, "enable-sdio-wakeup", &len))
+	if (of_property_read_bool(np, "enable-sdio-wakeup"))
 		host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
-	if (of_find_property(np, "mmc-ddr-1_8v", &len))
+	if (of_property_read_bool(np, "mmc-ddr-1_8v"))
 		host->caps |= MMC_CAP_1_8V_DDR;
-	if (of_find_property(np, "mmc-ddr-1_2v", &len))
+	if (of_property_read_bool(np, "mmc-ddr-1_2v"))
 		host->caps |= MMC_CAP_1_2V_DDR;
-	if (of_find_property(np, "mmc-hs200-1_8v", &len))
+	if (of_property_read_bool(np, "mmc-hs200-1_8v"))
 		host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
-	if (of_find_property(np, "mmc-hs200-1_2v", &len))
+	if (of_property_read_bool(np, "mmc-hs200-1_2v"))
 		host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
-	if (of_find_property(np, "mmc-hs400-1_8v", &len))
+	if (of_property_read_bool(np, "mmc-hs400-1_8v"))
 		host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
-	if (of_find_property(np, "mmc-hs400-1_2v", &len))
+	if (of_property_read_bool(np, "mmc-hs400-1_2v"))
 		host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
 
 	host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 6a0f9c7..8a1e349 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -129,6 +129,14 @@
 
 	  If unsure, say N.
 
+config MMC_SDHCI_OF_AT91
+	tristate "SDHCI OF support for the Atmel SDMMC controller"
+	depends on MMC_SDHCI_PLTFM
+	depends on OF
+	select MMC_SDHCI_IO_ACCESSORS
+	help
+	  This selects the Atmel SDMMC driver
+
 config MMC_SDHCI_OF_ESDHC
 	tristate "SDHCI OF support for the Freescale eSDHC controller"
 	depends on MMC_SDHCI_PLTFM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e928d61..4f3452a 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -67,6 +67,7 @@
 obj-$(CONFIG_MMC_SDHCI_DOVE)		+= sdhci-dove.o
 obj-$(CONFIG_MMC_SDHCI_TEGRA)		+= sdhci-tegra.o
 obj-$(CONFIG_MMC_SDHCI_OF_ARASAN)	+= sdhci-of-arasan.o
+obj-$(CONFIG_MMC_SDHCI_OF_AT91)		+= sdhci-of-at91.o
 obj-$(CONFIG_MMC_SDHCI_OF_ESDHC)	+= sdhci-of-esdhc.o
 obj-$(CONFIG_MMC_SDHCI_OF_HLWD)		+= sdhci-of-hlwd.o
 obj-$(CONFIG_MMC_SDHCI_BCM_KONA)	+= sdhci-bcm-kona.o
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index b1eac71..dca5518 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -118,7 +118,7 @@
 	struct mmc_host		*mmc;
 	struct device		*dev;
 	unsigned char		id; /* 16xx chips have 2 MMC blocks */
-	void __iomem		*virt_base;
+	void			*virt_base;
 	unsigned int		phys_base;
 	int			irq;
 	unsigned char		bus_mode;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9a39e0b..bf62e42 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <linux/stat.h>
 #include <linux/types.h>
-#include <linux/platform_data/atmel.h>
 #include <linux/platform_data/mmc-atmel-mci.h>
 
 #include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index de15121..bc76aa2 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -73,6 +73,9 @@
 	/* It is slot 8 on Rockchip SoCs */
 	host->sdio_id0 = 8;
 
+	/* It needs this quirk on all Rockchip SoCs */
+	host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
+
 	return 0;
 }
 
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 40e9d8e..fcbf552 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -99,6 +99,9 @@
 
 	__le32		des3;	/* buffer 2 physical address */
 };
+
+/* Each descriptor can transfer up to 4KB of data in chained mode */
+#define DW_MCI_DESC_DATA_LENGTH	0x1000
 #endif /* CONFIG_MMC_DW_IDMAC */
 
 static bool dw_mci_reset(struct dw_mci *host);
@@ -235,8 +238,8 @@
 	struct dw_mci *host = slot->host;
 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
 	u32 cmdr;
-	cmd->error = -EINPROGRESS;
 
+	cmd->error = -EINPROGRESS;
 	cmdr = cmd->opcode;
 
 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
@@ -371,7 +374,7 @@
 		 cmd->arg, cmd_flags);
 
 	mci_writel(host, CMDARG, cmd->arg);
-	wmb();
+	wmb(); /* drain writebuffer */
 	dw_mci_wait_while_busy(host, cmd_flags);
 
 	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
@@ -380,6 +383,7 @@
 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 {
 	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+
 	dw_mci_start_command(host, stop, host->stop_cmdr);
 }
 
@@ -462,69 +466,102 @@
 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
 				    unsigned int sg_len)
 {
+	unsigned int desc_len;
 	int i;
-	if (host->dma_64bit_address == 1) {
-		struct idmac_desc_64addr *desc = host->sg_cpu;
 
-		for (i = 0; i < sg_len; i++, desc++) {
+	if (host->dma_64bit_address == 1) {
+		struct idmac_desc_64addr *desc_first, *desc_last, *desc;
+
+		desc_first = desc_last = desc = host->sg_cpu;
+
+		for (i = 0; i < sg_len; i++) {
 			unsigned int length = sg_dma_len(&data->sg[i]);
+
 			u64 mem_addr = sg_dma_address(&data->sg[i]);
 
-			/*
-			 * Set the OWN bit and disable interrupts for this
-			 * descriptor
-			 */
-			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
-						IDMAC_DES0_CH;
-			/* Buffer length */
-			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
+			for ( ; length ; desc++) {
+				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+					   length : DW_MCI_DESC_DATA_LENGTH;
 
-			/* Physical address to DMA to/from */
-			desc->des4 = mem_addr & 0xffffffff;
-			desc->des5 = mem_addr >> 32;
+				length -= desc_len;
+
+				/*
+				 * Set the OWN bit and disable interrupts
+				 * for this descriptor
+				 */
+				desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
+							IDMAC_DES0_CH;
+
+				/* Buffer length */
+				IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
+
+				/* Physical address to DMA to/from */
+				desc->des4 = mem_addr & 0xffffffff;
+				desc->des5 = mem_addr >> 32;
+
+				/* Update physical address for the next desc */
+				mem_addr += desc_len;
+
+				/* Save pointer to the last descriptor */
+				desc_last = desc;
+			}
 		}
 
 		/* Set first descriptor */
-		desc = host->sg_cpu;
-		desc->des0 |= IDMAC_DES0_FD;
+		desc_first->des0 |= IDMAC_DES0_FD;
 
 		/* Set last descriptor */
-		desc = host->sg_cpu + (i - 1) *
-				sizeof(struct idmac_desc_64addr);
-		desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
-		desc->des0 |= IDMAC_DES0_LD;
+		desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+		desc_last->des0 |= IDMAC_DES0_LD;
 
 	} else {
-		struct idmac_desc *desc = host->sg_cpu;
+		struct idmac_desc *desc_first, *desc_last, *desc;
 
-		for (i = 0; i < sg_len; i++, desc++) {
+		desc_first = desc_last = desc = host->sg_cpu;
+
+		for (i = 0; i < sg_len; i++) {
 			unsigned int length = sg_dma_len(&data->sg[i]);
+
 			u32 mem_addr = sg_dma_address(&data->sg[i]);
 
-			/*
-			 * Set the OWN bit and disable interrupts for this
-			 * descriptor
-			 */
-			desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
-					IDMAC_DES0_DIC | IDMAC_DES0_CH);
-			/* Buffer length */
-			IDMAC_SET_BUFFER1_SIZE(desc, length);
+			for ( ; length ; desc++) {
+				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+					   length : DW_MCI_DESC_DATA_LENGTH;
 
-			/* Physical address to DMA to/from */
-			desc->des2 = cpu_to_le32(mem_addr);
+				length -= desc_len;
+
+				/*
+				 * Set the OWN bit and disable interrupts
+				 * for this descriptor
+				 */
+				desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
+							 IDMAC_DES0_DIC |
+							 IDMAC_DES0_CH);
+
+				/* Buffer length */
+				IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
+
+				/* Physical address to DMA to/from */
+				desc->des2 = cpu_to_le32(mem_addr);
+
+				/* Update physical address for the next desc */
+				mem_addr += desc_len;
+
+				/* Save pointer to the last descriptor */
+				desc_last = desc;
+			}
 		}
 
 		/* Set first descriptor */
-		desc = host->sg_cpu;
-		desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+		desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
 
 		/* Set last descriptor */
-		desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
-		desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
-		desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+		desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
+					       IDMAC_DES0_DIC));
+		desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
 	}
 
-	wmb();
+	wmb(); /* drain writebuffer */
 }
 
 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
@@ -542,6 +579,7 @@
 	temp |= SDMMC_CTRL_USE_IDMAC;
 	mci_writel(host, CTRL, temp);
 
+	/* drain writebuffer */
 	wmb();
 
 	/* Enable the IDMAC */
@@ -589,7 +627,9 @@
 		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
 
 		/* Forward link the descriptor list */
-		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
+		for (i = 0, p = host->sg_cpu;
+		     i < host->ring_size - 1;
+		     i++, p++) {
 			p->des3 = cpu_to_le32(host->sg_dma +
 					(sizeof(struct idmac_desc) * (i + 1)));
 			p->des1 = 0;
@@ -718,7 +758,7 @@
 	u32 fifo_width = 1 << host->data_shift;
 	u32 blksz_depth = blksz / fifo_width, fifoth_val;
 	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
-	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
+	int idx = ARRAY_SIZE(mszs) - 1;
 
 	tx_wmark = (host->fifo_depth) / 2;
 	tx_wmark_invers = host->fifo_depth - tx_wmark;
@@ -843,6 +883,7 @@
 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
 {
 	unsigned long irqflags;
+	int flags = SG_MITER_ATOMIC;
 	u32 temp;
 
 	data->error = -EINPROGRESS;
@@ -859,7 +900,6 @@
 	}
 
 	if (dw_mci_submit_data_dma(host, data)) {
-		int flags = SG_MITER_ATOMIC;
 		if (host->data->flags & MMC_DATA_READ)
 			flags |= SG_MITER_TO_SG;
 		else
@@ -906,7 +946,7 @@
 	unsigned int cmd_status = 0;
 
 	mci_writel(host, CMDARG, arg);
-	wmb();
+	wmb(); /* drain writebuffer */
 	dw_mci_wait_while_busy(host, cmd);
 	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
 
@@ -1019,7 +1059,7 @@
 
 	if (data) {
 		dw_mci_submit_data(host, data);
-		wmb();
+		wmb(); /* drain writebuffer */
 	}
 
 	dw_mci_start_command(host, cmd, cmdflags);
@@ -1384,14 +1424,15 @@
 	struct dw_mci_slot *slot = mmc_priv(mmc);
 	struct dw_mci *host = slot->host;
 	const struct dw_mci_drv_data *drv_data = host->drv_data;
-	int err = -ENOSYS;
+	int err = -EINVAL;
 
 	if (drv_data && drv_data->execute_tuning)
 		err = drv_data->execute_tuning(slot);
 	return err;
 }
 
-static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
+				       struct mmc_ios *ios)
 {
 	struct dw_mci_slot *slot = mmc_priv(mmc);
 	struct dw_mci *host = slot->host;
@@ -1533,6 +1574,20 @@
 	return data->error;
 }
 
+static void dw_mci_set_drto(struct dw_mci *host)
+{
+	unsigned int drto_clks;
+	unsigned int drto_ms;
+
+	drto_clks = mci_readl(host, TMOUT) >> 8;
+	drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
+
+	/* add a bit spare time */
+	drto_ms += 10;
+
+	mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+}
+
 static void dw_mci_tasklet_func(unsigned long priv)
 {
 	struct dw_mci *host = (struct dw_mci *)priv;
@@ -1610,8 +1665,16 @@
 			}
 
 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
-						&host->pending_events))
+						&host->pending_events)) {
+				/*
+				 * If all data-related interrupts don't come
+				 * within the given time in reading data state.
+				 */
+				if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
+				    (host->dir_status == DW_MCI_RECV_STATUS))
+					dw_mci_set_drto(host);
 				break;
+			}
 
 			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
 
@@ -1644,8 +1707,17 @@
 
 		case STATE_DATA_BUSY:
 			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
-						&host->pending_events))
+						&host->pending_events)) {
+				/*
+				 * If data error interrupt comes but data over
+				 * interrupt doesn't come within the given time.
+				 * in reading data state.
+				 */
+				if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
+				    (host->dir_status == DW_MCI_RECV_STATUS))
+					dw_mci_set_drto(host);
 				break;
+			}
 
 			host->data = NULL;
 			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
@@ -1743,7 +1815,7 @@
 /* pull first bytes from part_buf, only use during pull */
 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
 {
-	cnt = min(cnt, (int)host->part_buf_count);
+	cnt = min_t(int, cnt, host->part_buf_count);
 	if (cnt) {
 		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
 		       cnt);
@@ -1769,6 +1841,7 @@
 	/* try and push anything in the part_buf */
 	if (unlikely(host->part_buf_count)) {
 		int len = dw_mci_push_part_bytes(host, buf, cnt);
+
 		buf += len;
 		cnt -= len;
 		if (host->part_buf_count == 2) {
@@ -1795,6 +1868,7 @@
 #endif
 	{
 		u16 *pdata = buf;
+
 		for (; cnt >= 2; cnt -= 2)
 			mci_fifo_writew(host->fifo_reg, *pdata++);
 		buf = pdata;
@@ -1819,6 +1893,7 @@
 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
 			int items = len >> 1;
 			int i;
+
 			for (i = 0; i < items; ++i)
 				aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
 			/* memcpy from aligned buffer into output buffer */
@@ -1830,6 +1905,7 @@
 #endif
 	{
 		u16 *pdata = buf;
+
 		for (; cnt >= 2; cnt -= 2)
 			*pdata++ = mci_fifo_readw(host->fifo_reg);
 		buf = pdata;
@@ -1848,6 +1924,7 @@
 	/* try and push anything in the part_buf */
 	if (unlikely(host->part_buf_count)) {
 		int len = dw_mci_push_part_bytes(host, buf, cnt);
+
 		buf += len;
 		cnt -= len;
 		if (host->part_buf_count == 4) {
@@ -1874,6 +1951,7 @@
 #endif
 	{
 		u32 *pdata = buf;
+
 		for (; cnt >= 4; cnt -= 4)
 			mci_fifo_writel(host->fifo_reg, *pdata++);
 		buf = pdata;
@@ -1898,6 +1976,7 @@
 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
 			int items = len >> 2;
 			int i;
+
 			for (i = 0; i < items; ++i)
 				aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
 			/* memcpy from aligned buffer into output buffer */
@@ -1909,6 +1988,7 @@
 #endif
 	{
 		u32 *pdata = buf;
+
 		for (; cnt >= 4; cnt -= 4)
 			*pdata++ = mci_fifo_readl(host->fifo_reg);
 		buf = pdata;
@@ -1927,6 +2007,7 @@
 	/* try and push anything in the part_buf */
 	if (unlikely(host->part_buf_count)) {
 		int len = dw_mci_push_part_bytes(host, buf, cnt);
+
 		buf += len;
 		cnt -= len;
 
@@ -1954,6 +2035,7 @@
 #endif
 	{
 		u64 *pdata = buf;
+
 		for (; cnt >= 8; cnt -= 8)
 			mci_fifo_writeq(host->fifo_reg, *pdata++);
 		buf = pdata;
@@ -1978,6 +2060,7 @@
 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
 			int items = len >> 3;
 			int i;
+
 			for (i = 0; i < items; ++i)
 				aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
 
@@ -1990,6 +2073,7 @@
 #endif
 	{
 		u64 *pdata = buf;
+
 		for (; cnt >= 8; cnt -= 8)
 			*pdata++ = mci_fifo_readq(host->fifo_reg);
 		buf = pdata;
@@ -2065,7 +2149,7 @@
 done:
 	sg_miter_stop(sg_miter);
 	host->sg = NULL;
-	smp_wmb();
+	smp_wmb(); /* drain writebuffer */
 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 }
 
@@ -2119,7 +2203,7 @@
 done:
 	sg_miter_stop(sg_miter);
 	host->sg = NULL;
-	smp_wmb();
+	smp_wmb(); /* drain writebuffer */
 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 }
 
@@ -2128,7 +2212,7 @@
 	if (!host->cmd_status)
 		host->cmd_status = status;
 
-	smp_wmb();
+	smp_wmb(); /* drain writebuffer */
 
 	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
 	tasklet_schedule(&host->tasklet);
@@ -2192,7 +2276,7 @@
 		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
 			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
 			host->cmd_status = pending;
-			smp_wmb();
+			smp_wmb(); /* drain writebuffer */
 			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
 		}
 
@@ -2200,16 +2284,19 @@
 			/* if there is an error report DATA_ERROR */
 			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
 			host->data_status = pending;
-			smp_wmb();
+			smp_wmb(); /* drain writebuffer */
 			set_bit(EVENT_DATA_ERROR, &host->pending_events);
 			tasklet_schedule(&host->tasklet);
 		}
 
 		if (pending & SDMMC_INT_DATA_OVER) {
+			if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
+				del_timer(&host->dto_timer);
+
 			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
 			if (!host->data_status)
 				host->data_status = pending;
-			smp_wmb();
+			smp_wmb(); /* drain writebuffer */
 			if (host->dir_status == DW_MCI_RECV_STATUS) {
 				if (host->sg != NULL)
 					dw_mci_read_data_pio(host, true);
@@ -2383,27 +2470,20 @@
 	if (ret)
 		goto err_host_allocated;
 
-	if (host->pdata->blk_settings) {
-		mmc->max_segs = host->pdata->blk_settings->max_segs;
-		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
-		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
-		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
-		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
-	} else {
-		/* Useful defaults if platform data is unset. */
-#ifdef CONFIG_MMC_DW_IDMAC
+	/* Useful defaults if platform data is unset. */
+	if (host->use_dma) {
 		mmc->max_segs = host->ring_size;
 		mmc->max_blk_size = 65536;
 		mmc->max_seg_size = 0x1000;
 		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
 		mmc->max_blk_count = mmc->max_req_size / 512;
-#else
+	} else {
 		mmc->max_segs = 64;
 		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
 		mmc->max_blk_count = 512;
-		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+		mmc->max_req_size = mmc->max_blk_size *
+				    mmc->max_blk_count;
 		mmc->max_seg_size = mmc->max_req_size;
-#endif /* CONFIG_MMC_DW_IDMAC */
 	}
 
 	if (dw_mci_get_cd(mmc))
@@ -2473,8 +2553,8 @@
 	if (host->dma_ops->init && host->dma_ops->start &&
 	    host->dma_ops->stop && host->dma_ops->cleanup) {
 		if (host->dma_ops->init(host)) {
-			dev_err(host->dev, "%s: Unable to initialize "
-				"DMA Controller.\n", __func__);
+			dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
+				__func__);
 			goto no_dma;
 		}
 	} else {
@@ -2488,7 +2568,6 @@
 no_dma:
 	dev_info(host->dev, "Using PIO mode.\n");
 	host->use_dma = 0;
-	return;
 }
 
 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
@@ -2542,6 +2621,7 @@
 		if (host->use_dma) {
 			unsigned long timeout = jiffies + msecs_to_jiffies(500);
 			u32 status;
+
 			do {
 				status = mci_readl(host, STATUS);
 				if (!(status & SDMMC_STATUS_DMA_REQ))
@@ -2551,8 +2631,8 @@
 
 			if (status & SDMMC_STATUS_DMA_REQ) {
 				dev_err(host->dev,
-					"%s: Timeout waiting for dma_req to "
-					"clear during reset\n", __func__);
+					"%s: Timeout waiting for dma_req to clear during reset\n",
+					__func__);
 				goto ciu_out;
 			}
 
@@ -2563,8 +2643,8 @@
 	} else {
 		/* if the controller reset bit did clear, then set clock regs */
 		if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
-			dev_err(host->dev, "%s: fifo/dma reset bits didn't "
-				"clear but ciu was reset, doing clock update\n",
+			dev_err(host->dev,
+				"%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
 				__func__);
 			goto ciu_out;
 		}
@@ -2598,6 +2678,28 @@
 	tasklet_schedule(&host->tasklet);
 }
 
+static void dw_mci_dto_timer(unsigned long arg)
+{
+	struct dw_mci *host = (struct dw_mci *)arg;
+
+	switch (host->state) {
+	case STATE_SENDING_DATA:
+	case STATE_DATA_BUSY:
+		/*
+		 * If DTO interrupt does NOT come in sending data state,
+		 * we should notify the driver to terminate current transfer
+		 * and report a data timeout to the core.
+		 */
+		host->data_status = SDMMC_INT_DRTO;
+		set_bit(EVENT_DATA_ERROR, &host->pending_events);
+		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+		tasklet_schedule(&host->tasklet);
+		break;
+	default:
+		break;
+	}
+}
+
 #ifdef CONFIG_OF
 static struct dw_mci_of_quirks {
 	char *quirk;
@@ -2625,8 +2727,8 @@
 	/* find out number of slots supported */
 	if (of_property_read_u32(dev->of_node, "num-slots",
 				&pdata->num_slots)) {
-		dev_info(dev, "num-slots property not found, "
-				"assuming 1 slot is available\n");
+		dev_info(dev,
+			 "num-slots property not found, assuming 1 slot is available\n");
 		pdata->num_slots = 1;
 	}
 
@@ -2636,8 +2738,8 @@
 			pdata->quirks |= of_quirks[idx].id;
 
 	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
-		dev_info(dev, "fifo-depth property not found, using "
-				"value of FIFOTH register as default\n");
+		dev_info(dev,
+			 "fifo-depth property not found, using value of FIFOTH register as default\n");
 
 	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
 
@@ -2650,8 +2752,10 @@
 			return ERR_PTR(ret);
 	}
 
-	if (of_find_property(np, "supports-highspeed", NULL))
+	if (of_find_property(np, "supports-highspeed", NULL)) {
+		dev_info(dev, "supports-highspeed property is deprecated.\n");
 		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+	}
 
 	return pdata;
 }
@@ -2706,7 +2810,7 @@
 		}
 	}
 
-	if (host->pdata->num_slots > 1) {
+	if (host->pdata->num_slots < 1) {
 		dev_err(host->dev,
 			"Platform data must supply num_slots.\n");
 		return -ENODEV;
@@ -2774,6 +2878,10 @@
 
 	host->quirks = host->pdata->quirks;
 
+	if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
+		setup_timer(&host->dto_timer,
+			    dw_mci_dto_timer, (unsigned long)host);
+
 	spin_lock_init(&host->lock);
 	spin_lock_init(&host->irq_lock);
 	INIT_LIST_HEAD(&host->queue);
@@ -2874,11 +2982,11 @@
 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
 		   DW_MCI_ERROR_FLAGS);
-	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+	/* Enable mci interrupt */
+	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
 
-	dev_info(host->dev, "DW MMC controller at irq %d, "
-		 "%d bit host data width, "
-		 "%u deep fifo\n",
+	dev_info(host->dev,
+		 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
 		 host->irq, width, fifo_size);
 
 	/* We need at least one slot to succeed */
@@ -2893,8 +3001,9 @@
 	if (init_slots) {
 		dev_info(host->dev, "%d slots initialized\n", init_slots);
 	} else {
-		dev_dbg(host->dev, "attempted to initialize %d slots, "
-					"but failed on all\n", host->num_slots);
+		dev_dbg(host->dev,
+			"attempted to initialize %d slots, but failed on all\n",
+			host->num_slots);
 		goto err_dmaunmap;
 	}
 
@@ -2992,6 +3101,7 @@
 
 	for (i = 0; i < host->num_slots; i++) {
 		struct dw_mci_slot *slot = host->slot[i];
+
 		if (!slot)
 			continue;
 		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 68dd6c7..b763b11 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -948,6 +948,7 @@
 {
 	struct mmc_data *data = req->data;
 	int i, use_dma = 1, block_size;
+	struct scatterlist *sg;
 	unsigned sg_len;
 
 	host->data = data;
@@ -972,8 +973,8 @@
 	sg_len = (data->blocks == 1) ? 1 : data->sg_len;
 
 	/* Only do DMA for entire blocks */
-	for (i = 0; i < sg_len; i++) {
-		if ((data->sg[i].length % block_size) != 0) {
+	for_each_sg(data->sg, sg, sg_len, i) {
+		if ((sg->length % block_size) != 0) {
 			use_dma = 0;
 			break;
 		}
@@ -1419,8 +1420,10 @@
 	host->reg_shift = (mmc_omap7xx() ? 1 : 2);
 
 	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
-	if (!host->mmc_omap_wq)
+	if (!host->mmc_omap_wq) {
+		ret = -ENOMEM;
 		goto err_plat_cleanup;
+	}
 
 	for (i = 0; i < pdata->nr_slots; i++) {
 		ret = mmc_omap_new_slot(host, i);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4d12032..781e4db 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -181,18 +181,9 @@
 	struct	mmc_data	*data;
 	struct	clk		*fclk;
 	struct	clk		*dbclk;
-	/*
-	 * vcc == configured supply
-	 * vcc_aux == optional
-	 *   -	MMC1, supply for DAT4..DAT7
-	 *   -	MMC2/MMC2, external level shifter voltage supply, for
-	 *	chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
-	 */
-	struct	regulator	*vcc;
-	struct	regulator	*vcc_aux;
 	struct	regulator	*pbias;
-	bool			pbias_enabled;
 	void	__iomem		*base;
+	int			vqmmc_enabled;
 	resource_size_t		mapbase;
 	spinlock_t		irq_lock; /* Prevent races with irq handler */
 	unsigned int		dma_len;
@@ -213,7 +204,6 @@
 	int			context_loss;
 	int			protect_card;
 	int			reqs_blocked;
-	int			use_reg;
 	int			req_in_progress;
 	unsigned long		clk_rate;
 	unsigned int		flags;
@@ -254,32 +244,133 @@
 	return mmc_gpio_get_cd(host->mmc);
 }
 
-#ifdef CONFIG_REGULATOR
+static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
+{
+	int ret;
+	struct omap_hsmmc_host *host = mmc_priv(mmc);
+	struct mmc_ios *ios = &mmc->ios;
+
+	if (mmc->supply.vmmc) {
+		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+		if (ret)
+			return ret;
+	}
+
+	/* Enable interface voltage rail, if needed */
+	if (mmc->supply.vqmmc && !host->vqmmc_enabled) {
+		ret = regulator_enable(mmc->supply.vqmmc);
+		if (ret) {
+			dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
+			goto err_vqmmc;
+		}
+		host->vqmmc_enabled = 1;
+	}
+
+	return 0;
+
+err_vqmmc:
+	if (mmc->supply.vmmc)
+		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+	return ret;
+}
+
+static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
+{
+	int ret;
+	int status;
+	struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+	if (mmc->supply.vqmmc && host->vqmmc_enabled) {
+		ret = regulator_disable(mmc->supply.vqmmc);
+		if (ret) {
+			dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
+			return ret;
+		}
+		host->vqmmc_enabled = 0;
+	}
+
+	if (mmc->supply.vmmc) {
+		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+		if (ret)
+			goto err_set_ocr;
+	}
+
+	return 0;
+
+err_set_ocr:
+	if (mmc->supply.vqmmc) {
+		status = regulator_enable(mmc->supply.vqmmc);
+		if (status)
+			dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
+	}
+
+	return ret;
+}
+
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
+				int vdd)
+{
+	int ret;
+
+	if (!host->pbias)
+		return 0;
+
+	if (power_on) {
+		if (vdd <= VDD_165_195)
+			ret = regulator_set_voltage(host->pbias, VDD_1V8,
+						    VDD_1V8);
+		else
+			ret = regulator_set_voltage(host->pbias, VDD_3V0,
+						    VDD_3V0);
+		if (ret < 0) {
+			dev_err(host->dev, "pbias set voltage fail\n");
+			return ret;
+		}
+
+		if (!regulator_is_enabled(host->pbias)) {
+			ret = regulator_enable(host->pbias);
+			if (ret) {
+				dev_err(host->dev, "pbias reg enable fail\n");
+				return ret;
+			}
+		}
+	} else {
+		if (regulator_is_enabled(host->pbias)) {
+			ret = regulator_disable(host->pbias);
+			if (ret) {
+				dev_err(host->dev, "pbias reg disable fail\n");
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
 
 static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
 {
 	struct omap_hsmmc_host *host =
 		platform_get_drvdata(to_platform_device(dev));
+	struct mmc_host *mmc = host->mmc;
 	int ret = 0;
 
+	if (mmc_pdata(host)->set_power)
+		return mmc_pdata(host)->set_power(dev, power_on, vdd);
+
 	/*
 	 * If we don't see a Vcc regulator, assume it's a fixed
 	 * voltage always-on regulator.
 	 */
-	if (!host->vcc)
+	if (!mmc->supply.vmmc)
 		return 0;
 
 	if (mmc_pdata(host)->before_set_reg)
 		mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
 
-	if (host->pbias) {
-		if (host->pbias_enabled == 1) {
-			ret = regulator_disable(host->pbias);
-			if (!ret)
-				host->pbias_enabled = 0;
-		}
-		regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
-	}
+	ret = omap_hsmmc_set_pbias(host, false, 0);
+	if (ret)
+		return ret;
 
 	/*
 	 * Assume Vcc regulator is used only to power the card ... OMAP
@@ -295,129 +386,138 @@
 	 * chips/cards need an interface voltage rail too.
 	 */
 	if (power_on) {
-		if (host->vcc)
-			ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
-		/* Enable interface voltage rail, if needed */
-		if (ret == 0 && host->vcc_aux) {
-			ret = regulator_enable(host->vcc_aux);
-			if (ret < 0 && host->vcc)
-				ret = mmc_regulator_set_ocr(host->mmc,
-							host->vcc, 0);
-		}
+		ret = omap_hsmmc_enable_supply(mmc);
+		if (ret)
+			return ret;
+
+		ret = omap_hsmmc_set_pbias(host, true, vdd);
+		if (ret)
+			goto err_set_voltage;
 	} else {
-		/* Shut down the rail */
-		if (host->vcc_aux)
-			ret = regulator_disable(host->vcc_aux);
-		if (host->vcc) {
-			/* Then proceed to shut down the local regulator */
-			ret = mmc_regulator_set_ocr(host->mmc,
-						host->vcc, 0);
-		}
-	}
-
-	if (host->pbias) {
-		if (vdd <= VDD_165_195)
-			ret = regulator_set_voltage(host->pbias, VDD_1V8,
-								VDD_1V8);
-		else
-			ret = regulator_set_voltage(host->pbias, VDD_3V0,
-								VDD_3V0);
-		if (ret < 0)
-			goto error_set_power;
-
-		if (host->pbias_enabled == 0) {
-			ret = regulator_enable(host->pbias);
-			if (!ret)
-				host->pbias_enabled = 1;
-		}
+		ret = omap_hsmmc_disable_supply(mmc);
+		if (ret)
+			return ret;
 	}
 
 	if (mmc_pdata(host)->after_set_reg)
 		mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
 
-error_set_power:
+	return 0;
+
+err_set_voltage:
+	omap_hsmmc_disable_supply(mmc);
+
 	return ret;
 }
 
+static int omap_hsmmc_disable_boot_regulator(struct regulator *reg)
+{
+	int ret;
+
+	if (!reg)
+		return 0;
+
+	if (regulator_is_enabled(reg)) {
+		ret = regulator_enable(reg);
+		if (ret)
+			return ret;
+
+		ret = regulator_disable(reg);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host)
+{
+	struct mmc_host *mmc = host->mmc;
+	int ret;
+
+	/*
+	 * disable regulators enabled during boot and get the usecount
+	 * right so that regulators can be enabled/disabled by checking
+	 * the return value of regulator_is_enabled
+	 */
+	ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc);
+	if (ret) {
+		dev_err(host->dev, "fail to disable boot enabled vmmc reg\n");
+		return ret;
+	}
+
+	ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc);
+	if (ret) {
+		dev_err(host->dev,
+			"fail to disable boot enabled vmmc_aux reg\n");
+		return ret;
+	}
+
+	ret = omap_hsmmc_disable_boot_regulator(host->pbias);
+	if (ret) {
+		dev_err(host->dev,
+			"failed to disable boot enabled pbias reg\n");
+		return ret;
+	}
+
+	return 0;
+}
+
 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
 {
-	struct regulator *reg;
 	int ocr_value = 0;
+	int ret;
+	struct mmc_host *mmc = host->mmc;
 
-	reg = devm_regulator_get(host->dev, "vmmc");
-	if (IS_ERR(reg)) {
-		dev_err(host->dev, "unable to get vmmc regulator %ld\n",
-			PTR_ERR(reg));
-		return PTR_ERR(reg);
+	if (mmc_pdata(host)->set_power)
+		return 0;
+
+	mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
+	if (IS_ERR(mmc->supply.vmmc)) {
+		ret = PTR_ERR(mmc->supply.vmmc);
+		if (ret != -ENODEV)
+			return ret;
+		dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
+			PTR_ERR(mmc->supply.vmmc));
+		mmc->supply.vmmc = NULL;
 	} else {
-		host->vcc = reg;
-		ocr_value = mmc_regulator_get_ocrmask(reg);
-		if (!mmc_pdata(host)->ocr_mask) {
+		ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+		if (ocr_value > 0)
 			mmc_pdata(host)->ocr_mask = ocr_value;
-		} else {
-			if (!(mmc_pdata(host)->ocr_mask & ocr_value)) {
-				dev_err(host->dev, "ocrmask %x is not supported\n",
-					mmc_pdata(host)->ocr_mask);
-				mmc_pdata(host)->ocr_mask = 0;
-				return -EINVAL;
-			}
-		}
 	}
-	mmc_pdata(host)->set_power = omap_hsmmc_set_power;
 
 	/* Allow an aux regulator */
-	reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
-	host->vcc_aux = IS_ERR(reg) ? NULL : reg;
+	mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
+	if (IS_ERR(mmc->supply.vqmmc)) {
+		ret = PTR_ERR(mmc->supply.vqmmc);
+		if (ret != -ENODEV)
+			return ret;
+		dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
+			PTR_ERR(mmc->supply.vqmmc));
+		mmc->supply.vqmmc = NULL;
+	}
 
-	reg = devm_regulator_get_optional(host->dev, "pbias");
-	host->pbias = IS_ERR(reg) ? NULL : reg;
+	host->pbias = devm_regulator_get_optional(host->dev, "pbias");
+	if (IS_ERR(host->pbias)) {
+		ret = PTR_ERR(host->pbias);
+		if (ret != -ENODEV)
+			return ret;
+		dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
+			PTR_ERR(host->pbias));
+		host->pbias = NULL;
+	}
 
 	/* For eMMC do not power off when not in sleep state */
 	if (mmc_pdata(host)->no_regulator_off_init)
 		return 0;
-	/*
-	 * To disable boot_on regulator, enable regulator
-	 * to increase usecount and then disable it.
-	 */
-	if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
-	    (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
-		int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
 
-		mmc_pdata(host)->set_power(host->dev, 1, vdd);
-		mmc_pdata(host)->set_power(host->dev, 0, 0);
-	}
+	ret = omap_hsmmc_disable_boot_regulators(host);
+	if (ret)
+		return ret;
 
 	return 0;
 }
 
-static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
-{
-	mmc_pdata(host)->set_power = NULL;
-}
-
-static inline int omap_hsmmc_have_reg(void)
-{
-	return 1;
-}
-
-#else
-
-static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
-{
-	return -EINVAL;
-}
-
-static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
-{
-}
-
-static inline int omap_hsmmc_have_reg(void)
-{
-	return 0;
-}
-
-#endif
-
 static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
 
 static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
@@ -1149,11 +1249,11 @@
 		clk_disable_unprepare(host->dbclk);
 
 	/* Turn the power off */
-	ret = mmc_pdata(host)->set_power(host->dev, 0, 0);
+	ret = omap_hsmmc_set_power(host->dev, 0, 0);
 
 	/* Turn the power ON with given VDD 1.8 or 3.0v */
 	if (!ret)
-		ret = mmc_pdata(host)->set_power(host->dev, 1, vdd);
+		ret = omap_hsmmc_set_power(host->dev, 1, vdd);
 	pm_runtime_get_sync(host->dev);
 	if (host->dbclk)
 		clk_prepare_enable(host->dbclk);
@@ -1552,10 +1652,10 @@
 	if (ios->power_mode != host->power_mode) {
 		switch (ios->power_mode) {
 		case MMC_POWER_OFF:
-			mmc_pdata(host)->set_power(host->dev, 0, 0);
+			omap_hsmmc_set_power(host->dev, 0, 0);
 			break;
 		case MMC_POWER_UP:
-			mmc_pdata(host)->set_power(host->dev, 1, ios->vdd);
+			omap_hsmmc_set_power(host->dev, 1, ios->vdd);
 			break;
 		case MMC_POWER_ON:
 			do_send_init_stream = 1;
@@ -1953,7 +2053,7 @@
 	host->base	= base + pdata->reg_offset;
 	host->power_mode = MMC_POWER_OFF;
 	host->next_data.cookie = 1;
-	host->pbias_enabled = 0;
+	host->vqmmc_enabled = 0;
 
 	ret = omap_hsmmc_gpio_init(mmc, host, pdata);
 	if (ret)
@@ -2078,12 +2178,9 @@
 		goto err_irq;
 	}
 
-	if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) {
-		ret = omap_hsmmc_reg_get(host);
-		if (ret)
-			goto err_irq;
-		host->use_reg = 1;
-	}
+	ret = omap_hsmmc_reg_get(host);
+	if (ret)
+		goto err_irq;
 
 	mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
 
@@ -2125,8 +2222,6 @@
 
 err_slot_name:
 	mmc_remove_host(mmc);
-	if (host->use_reg)
-		omap_hsmmc_reg_put(host);
 err_irq:
 	device_init_wakeup(&pdev->dev, false);
 	if (host->tx_chan)
@@ -2150,8 +2245,6 @@
 
 	pm_runtime_get_sync(host->dev);
 	mmc_remove_host(host->mmc);
-	if (host->use_reg)
-		omap_hsmmc_reg_put(host);
 
 	if (host->tx_chan)
 		dma_release_channel(host->tx_chan);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1b6d0bf..1420f29 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -22,7 +22,9 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/mmc/host.h>
@@ -37,7 +39,6 @@
 #include <asm/sizes.h>
 
 #include <mach/hardware.h>
-#include <mach/dma.h>
 #include <linux/platform_data/mmc-pxamci.h>
 
 #include "pxamci.h"
@@ -58,7 +59,6 @@
 	struct clk		*clk;
 	unsigned long		clkrate;
 	int			irq;
-	int			dma;
 	unsigned int		clkrt;
 	unsigned int		cmdat;
 	unsigned int		imask;
@@ -69,8 +69,10 @@
 	struct mmc_command	*cmd;
 	struct mmc_data		*data;
 
+	struct dma_chan		*dma_chan_rx;
+	struct dma_chan		*dma_chan_tx;
+	dma_cookie_t		dma_cookie;
 	dma_addr_t		sg_dma;
-	struct pxa_dma_desc	*sg_cpu;
 	unsigned int		dma_len;
 
 	unsigned int		dma_dir;
@@ -173,14 +175,18 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static void pxamci_dma_irq(void *param);
+
 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
 {
+	struct dma_async_tx_descriptor *tx;
+	enum dma_data_direction direction;
+	struct dma_slave_config	config;
+	struct dma_chan *chan;
 	unsigned int nob = data->blocks;
 	unsigned long long clks;
 	unsigned int timeout;
-	bool dalgn = 0;
-	u32 dcmd;
-	int i;
+	int ret;
 
 	host->data = data;
 
@@ -195,54 +201,48 @@
 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
 
+	memset(&config, 0, sizeof(config));
+	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	config.src_addr = host->res->start + MMC_RXFIFO;
+	config.dst_addr = host->res->start + MMC_TXFIFO;
+	config.src_maxburst = 32;
+	config.dst_maxburst = 32;
+
 	if (data->flags & MMC_DATA_READ) {
 		host->dma_dir = DMA_FROM_DEVICE;
-		dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
-		DRCMR(host->dma_drcmrtx) = 0;
-		DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
+		direction = DMA_DEV_TO_MEM;
+		chan = host->dma_chan_rx;
 	} else {
 		host->dma_dir = DMA_TO_DEVICE;
-		dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
-		DRCMR(host->dma_drcmrrx) = 0;
-		DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
+		direction = DMA_MEM_TO_DEV;
+		chan = host->dma_chan_tx;
 	}
 
-	dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
+	config.direction = direction;
 
-	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+	ret = dmaengine_slave_config(chan, &config);
+	if (ret < 0) {
+		dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
+		return;
+	}
+
+	host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
 				   host->dma_dir);
 
-	for (i = 0; i < host->dma_len; i++) {
-		unsigned int length = sg_dma_len(&data->sg[i]);
-		host->sg_cpu[i].dcmd = dcmd | length;
-		if (length & 31 && !(data->flags & MMC_DATA_READ))
-			host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
-		/* Not aligned to 8-byte boundary? */
-		if (sg_dma_address(&data->sg[i]) & 0x7)
-			dalgn = 1;
-		if (data->flags & MMC_DATA_READ) {
-			host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
-			host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
-		} else {
-			host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
-			host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
-		}
-		host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
-					sizeof(struct pxa_dma_desc);
+	tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
+				     DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+		return;
 	}
-	host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
-	wmb();
 
-	/*
-	 * The PXA27x DMA controller encounters overhead when working with
-	 * unaligned (to 8-byte boundaries) data, so switch on byte alignment
-	 * mode only if we have unaligned data.
-	 */
-	if (dalgn)
-		DALGN |= (1 << host->dma);
-	else
-		DALGN &= ~(1 << host->dma);
-	DDADR(host->dma) = host->sg_dma;
+	if (!(data->flags & MMC_DATA_READ)) {
+		tx->callback = pxamci_dma_irq;
+		tx->callback_param = host;
+	}
+
+	host->dma_cookie = dmaengine_submit(tx);
 
 	/*
 	 * workaround for erratum #91:
@@ -251,7 +251,7 @@
 	 * before starting DMA.
 	 */
 	if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
-		DCSR(host->dma) = DCSR_RUN;
+		dma_async_issue_pending(chan);
 }
 
 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
@@ -343,7 +343,7 @@
 		 * enable DMA late
 		 */
 		if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
-			DCSR(host->dma) = DCSR_RUN;
+			dma_async_issue_pending(host->dma_chan_tx);
 	} else {
 		pxamci_finish_request(host, host->mrq);
 	}
@@ -354,13 +354,17 @@
 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
 {
 	struct mmc_data *data = host->data;
+	struct dma_chan *chan;
 
 	if (!data)
 		return 0;
 
-	DCSR(host->dma) = 0;
-	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-		     host->dma_dir);
+	if (data->flags & MMC_DATA_READ)
+		chan = host->dma_chan_rx;
+	else
+		chan = host->dma_chan_tx;
+	dma_unmap_sg(chan->device->dev,
+		     data->sg, data->sg_len, host->dma_dir);
 
 	if (stat & STAT_READ_TIME_OUT)
 		data->error = -ETIMEDOUT;
@@ -552,20 +556,37 @@
 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
 };
 
-static void pxamci_dma_irq(int dma, void *devid)
+static void pxamci_dma_irq(void *param)
 {
-	struct pxamci_host *host = devid;
-	int dcsr = DCSR(dma);
-	DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
+	struct pxamci_host *host = param;
+	struct dma_tx_state state;
+	enum dma_status status;
+	struct dma_chan *chan;
+	unsigned long flags;
 
-	if (dcsr & DCSR_ENDINTR) {
+	spin_lock_irqsave(&host->lock, flags);
+
+	if (!host->data)
+		goto out_unlock;
+
+	if (host->data->flags & MMC_DATA_READ)
+		chan = host->dma_chan_rx;
+	else
+		chan = host->dma_chan_tx;
+
+	status = dmaengine_tx_status(chan, host->dma_cookie, &state);
+
+	if (likely(status == DMA_COMPLETE)) {
 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
 	} else {
-		pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",
-		       mmc_hostname(host->mmc), dma, dcsr);
+		pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
+			host->data->flags & MMC_DATA_READ ? "rx" : "tx");
 		host->data->error = -EIO;
 		pxamci_data_done(host, 0);
 	}
+
+out_unlock:
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
@@ -625,7 +646,9 @@
 	struct mmc_host *mmc;
 	struct pxamci_host *host = NULL;
 	struct resource *r, *dmarx, *dmatx;
+	struct pxad_param param_rx, param_tx;
 	int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
+	dma_cap_mask_t mask;
 
 	ret = pxamci_of_init(pdev);
 	if (ret)
@@ -671,7 +694,6 @@
 
 	host = mmc_priv(mmc);
 	host->mmc = mmc;
-	host->dma = -1;
 	host->pdata = pdev->dev.platform_data;
 	host->clkrt = CLKRT_OFF;
 
@@ -702,12 +724,6 @@
 				     MMC_CAP_SD_HIGHSPEED;
 	}
 
-	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
-	if (!host->sg_cpu) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
 	spin_lock_init(&host->lock);
 	host->res = r;
 	host->irq = irq;
@@ -728,32 +744,45 @@
 	writel(64, host->base + MMC_RESTO);
 	writel(host->imask, host->base + MMC_I_MASK);
 
-	host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
-				    pxamci_dma_irq, host);
-	if (host->dma < 0) {
-		ret = -EBUSY;
-		goto out;
-	}
-
 	ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
 	if (ret)
 		goto out;
 
 	platform_set_drvdata(pdev, mmc);
 
-	dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!dmarx) {
-		ret = -ENXIO;
-		goto out;
+	if (!pdev->dev.of_node) {
+		dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+		dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+		if (!dmarx || !dmatx) {
+			ret = -ENXIO;
+			goto out;
+		}
+		param_rx.prio = PXAD_PRIO_LOWEST;
+		param_rx.drcmr = dmarx->start;
+		param_tx.prio = PXAD_PRIO_LOWEST;
+		param_tx.drcmr = dmatx->start;
 	}
-	host->dma_drcmrrx = dmarx->start;
 
-	dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!dmatx) {
-		ret = -ENXIO;
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	host->dma_chan_rx =
+		dma_request_slave_channel_compat(mask, pxad_filter_fn,
+						 &param_rx, &pdev->dev, "rx");
+	if (host->dma_chan_rx == NULL) {
+		dev_err(&pdev->dev, "unable to request rx dma channel\n");
+		ret = -ENODEV;
 		goto out;
 	}
-	host->dma_drcmrtx = dmatx->start;
+
+	host->dma_chan_tx =
+		dma_request_slave_channel_compat(mask, pxad_filter_fn,
+						 &param_tx,  &pdev->dev, "tx");
+	if (host->dma_chan_tx == NULL) {
+		dev_err(&pdev->dev, "unable to request tx dma channel\n");
+		ret = -ENODEV;
+		goto out;
+	}
 
 	if (host->pdata) {
 		gpio_cd = host->pdata->gpio_card_detect;
@@ -814,12 +843,12 @@
 	gpio_free(gpio_power);
  out:
 	if (host) {
-		if (host->dma >= 0)
-			pxa_free_dma(host->dma);
+		if (host->dma_chan_rx)
+			dma_release_channel(host->dma_chan_rx);
+		if (host->dma_chan_tx)
+			dma_release_channel(host->dma_chan_tx);
 		if (host->base)
 			iounmap(host->base);
-		if (host->sg_cpu)
-			dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
 		if (host->clk)
 			clk_put(host->clk);
 	}
@@ -863,13 +892,12 @@
 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
 		       host->base + MMC_I_MASK);
 
-		DRCMR(host->dma_drcmrrx) = 0;
-		DRCMR(host->dma_drcmrtx) = 0;
-
 		free_irq(host->irq, host);
-		pxa_free_dma(host->dma);
+		dmaengine_terminate_all(host->dma_chan_rx);
+		dmaengine_terminate_all(host->dma_chan_tx);
+		dma_release_channel(host->dma_chan_rx);
+		dma_release_channel(host->dma_chan_tx);
 		iounmap(host->base);
-		dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
 
 		clk_put(host->clk);
 
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index c6b9f64..886d230 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -32,6 +32,7 @@
 #include "sdhci-esdhc.h"
 
 #define	ESDHC_CTRL_D3CD			0x08
+#define ESDHC_BURST_LEN_EN_INCR		(1 << 27)
 /* VENDOR SPEC register */
 #define ESDHC_VENDOR_SPEC		0xc0
 #define  ESDHC_VENDOR_SPEC_SDIO_QUIRK	(1 << 1)
@@ -44,6 +45,7 @@
 #define  ESDHC_MIX_CTRL_EXE_TUNE	(1 << 22)
 #define  ESDHC_MIX_CTRL_SMPCLK_SEL	(1 << 23)
 #define  ESDHC_MIX_CTRL_FBCLK_SEL	(1 << 25)
+#define  ESDHC_MIX_CTRL_HS400_EN	(1 << 26)
 /* Bits 3 and 6 are not SDHCI standard definitions */
 #define  ESDHC_MIX_CTRL_SDHCI_MASK	0xb7
 /* Tuning bits */
@@ -60,10 +62,21 @@
 #define  ESDHC_TUNE_CTRL_MIN		0
 #define  ESDHC_TUNE_CTRL_MAX		((1 << 7) - 1)
 
+/* strobe dll register */
+#define ESDHC_STROBE_DLL_CTRL		0x70
+#define ESDHC_STROBE_DLL_CTRL_ENABLE	(1 << 0)
+#define ESDHC_STROBE_DLL_CTRL_RESET	(1 << 1)
+#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT	3
+
+#define ESDHC_STROBE_DLL_STATUS		0x74
+#define ESDHC_STROBE_DLL_STS_REF_LOCK	(1 << 1)
+#define ESDHC_STROBE_DLL_STS_SLV_LOCK	0x1
+
 #define ESDHC_TUNING_CTRL		0xcc
 #define ESDHC_STD_TUNING_EN		(1 << 24)
 /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
 #define ESDHC_TUNING_START_TAP		0x1
+#define ESDHC_TUNING_STEP_SHIFT		16
 
 /* pinctrl state */
 #define ESDHC_PINCTRL_STATE_100MHZ	"state_100mhz"
@@ -120,6 +133,11 @@
 #define ESDHC_FLAG_ERR004536		BIT(7)
 /* The IP supports HS200 mode */
 #define ESDHC_FLAG_HS200		BIT(8)
+/* The IP supports HS400 mode */
+#define ESDHC_FLAG_HS400		BIT(9)
+
+/* A higher clock ferquency than this rate requires strobell dll control */
+#define ESDHC_STROBE_DLL_CLK_FREQ	100000000
 
 struct esdhc_soc_data {
 	u32 flags;
@@ -156,6 +174,12 @@
 			| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
 };
 
+static struct esdhc_soc_data usdhc_imx7d_data = {
+	.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+			| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+			| ESDHC_FLAG_HS400,
+};
+
 struct pltfm_imx_data {
 	u32 scratchpad;
 	struct pinctrl *pinctrl;
@@ -199,6 +223,7 @@
 	{ .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
 	{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
 	{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
+	{ .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -274,6 +299,9 @@
 				val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
 					| SDHCI_SUPPORT_SDR50
 					| SDHCI_USE_SDR50_TUNING;
+
+			if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+				val |= SDHCI_SUPPORT_HS400;
 		}
 	}
 
@@ -448,6 +476,7 @@
 		} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
 			u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
 			u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+			u32 tuning_ctrl;
 			if (val & SDHCI_CTRL_TUNED_CLK) {
 				v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
 			} else {
@@ -458,6 +487,11 @@
 			if (val & SDHCI_CTRL_EXEC_TUNING) {
 				v |= ESDHC_MIX_CTRL_EXE_TUNE;
 				m |= ESDHC_MIX_CTRL_FBCLK_SEL;
+				tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+				tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP;
+				if (imx_data->boarddata.tuning_step)
+					tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT;
+					writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
 			} else {
 				v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
 			}
@@ -774,6 +808,7 @@
 		break;
 	case MMC_TIMING_UHS_SDR104:
 	case MMC_TIMING_MMC_HS200:
+	case MMC_TIMING_MMC_HS400:
 		pinctrl = imx_data->pins_200mhz;
 		break;
 	default:
@@ -784,24 +819,68 @@
 	return pinctrl_select_state(imx_data->pinctrl, pinctrl);
 }
 
+/*
+ * For HS400 eMMC, there is a data_strobe line, this signal is generated
+ * by the device and used for data output and CRC status response output
+ * in HS400 mode. The frequency of this signal follows the frequency of
+ * CLK generated by host. Host receive the data which is aligned to the
+ * edge of data_strobe line. Due to the time delay between CLK line and
+ * data_strobe line, if the delay time is larger than one clock cycle,
+ * then CLK and data_strobe line will misaligned, read error shows up.
+ * So when the CLK is higher than 100MHz, each clock cycle is short enough,
+ * host should config the delay target.
+ */
+static void esdhc_set_strobe_dll(struct sdhci_host *host)
+{
+	u32 v;
+
+	if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) {
+		/* force a reset on strobe dll */
+		writel(ESDHC_STROBE_DLL_CTRL_RESET,
+			host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+		/*
+		 * enable strobe dll ctrl and adjust the delay target
+		 * for the uSDHC loopback read clock
+		 */
+		v = ESDHC_STROBE_DLL_CTRL_ENABLE |
+			(7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
+		writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+		/* wait 1us to make sure strobe dll status register stable */
+		udelay(1);
+		v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
+		if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
+			dev_warn(mmc_dev(host->mmc),
+				"warning! HS400 strobe DLL status REF not lock!\n");
+		if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
+			dev_warn(mmc_dev(host->mmc),
+				"warning! HS400 strobe DLL status SLV not lock!\n");
+	}
+}
+
 static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
 {
+	u32 m;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct pltfm_imx_data *imx_data = pltfm_host->priv;
 	struct esdhc_platform_data *boarddata = &imx_data->boarddata;
 
+	/* disable ddr mode and disable HS400 mode */
+	m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+	m &= ~(ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN);
+	imx_data->is_ddr = 0;
+
 	switch (timing) {
 	case MMC_TIMING_UHS_SDR12:
 	case MMC_TIMING_UHS_SDR25:
 	case MMC_TIMING_UHS_SDR50:
 	case MMC_TIMING_UHS_SDR104:
 	case MMC_TIMING_MMC_HS200:
+		writel(m, host->ioaddr + ESDHC_MIX_CTRL);
 		break;
 	case MMC_TIMING_UHS_DDR50:
 	case MMC_TIMING_MMC_DDR52:
-		writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
-				ESDHC_MIX_CTRL_DDREN,
-				host->ioaddr + ESDHC_MIX_CTRL);
+		m |= ESDHC_MIX_CTRL_DDREN;
+		writel(m, host->ioaddr + ESDHC_MIX_CTRL);
 		imx_data->is_ddr = 1;
 		if (boarddata->delay_line) {
 			u32 v;
@@ -813,6 +892,12 @@
 			writel(v, host->ioaddr + ESDHC_DLL_CTRL);
 		}
 		break;
+	case MMC_TIMING_MMC_HS400:
+		m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN;
+		writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+		imx_data->is_ddr = 1;
+		esdhc_set_strobe_dll(host);
+		break;
 	}
 
 	esdhc_change_pinstate(host, timing);
@@ -886,6 +971,8 @@
 	if (gpio_is_valid(boarddata->wp_gpio))
 		boarddata->wp_type = ESDHC_WP_GPIO;
 
+	of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
+
 	if (of_find_property(np, "no-1-8-v", NULL))
 		boarddata->support_vsel = false;
 	else
@@ -1073,10 +1160,26 @@
 	 * to something insane.  Change it back here.
 	 */
 	if (esdhc_is_usdhc(imx_data)) {
-		writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+		writel(0x10401040, host->ioaddr + ESDHC_WTMK_LVL);
+
 		host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
+		/*
+		 * ROM code will change the bit burst_length_enable setting
+		 * to zero if this usdhc is choosed to boot system. Change
+		 * it back here, otherwise it will impact the performance a
+		 * lot. This bit is used to enable/disable the burst length
+		 * for the external AHB2AXI bridge, it's usefully especially
+		 * for INCR transfer because without burst length indicator,
+		 * the AHB2AXI bridge does not know the burst length in
+		 * advance. And without burst length indicator, AHB INCR
+		 * transfer can only be converted to singles on the AXI side.
+		 */
+		writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
+			| ESDHC_BURST_LEN_EN_INCR,
+			host->ioaddr + SDHCI_HOST_CONTROL);
+
 		if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
 			host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
 
@@ -1100,6 +1203,9 @@
 	if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
 		host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 
+	if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+		host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
+
 	if (of_id)
 		err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
 	else
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index a870c42..163ac99 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -21,7 +21,8 @@
 #define ESDHC_DEFAULT_QUIRKS	(SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
 				SDHCI_QUIRK_NO_BUSY_IRQ | \
 				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
-				SDHCI_QUIRK_PIO_NEEDS_DELAY)
+				SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+				SDHCI_QUIRK_NO_HISPD_BIT)
 
 #define ESDHC_SYSTEM_CONTROL	0x2c
 #define ESDHC_CLOCK_MASK	0x0000fff0
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4a09f76..4bcee03 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -489,6 +489,11 @@
 		goto pclk_disable;
 	}
 
+	/* Vote for maximum clock rate for maximum performance */
+	ret = clk_set_rate(msm_host->clk, INT_MAX);
+	if (ret)
+		dev_warn(&pdev->dev, "core clock boost failed\n");
+
 	ret = clk_prepare_enable(msm_host->clk);
 	if (ret)
 		goto pclk_disable;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 21c0c08..75379cb 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -63,6 +63,9 @@
 
 static struct sdhci_pltfm_data sdhci_arasan_pdata = {
 	.ops = &sdhci_arasan_ops,
+	.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+			SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -214,6 +217,7 @@
 
 static const struct of_device_id sdhci_arasan_of_match[] = {
 	{ .compatible = "arasan,sdhci-8.9a" },
+	{ .compatible = "arasan,sdhci-5.1" },
 	{ .compatible = "arasan,sdhci-4.9a" },
 	{ }
 };
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
new file mode 100644
index 0000000..d155664
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -0,0 +1,191 @@
+/*
+ * Atmel SDMMC controller driver.
+ *
+ * Copyright (C) 2015 Atmel,
+ *		 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDMMC_CACR	0x230
+#define		SDMMC_CACR_CAPWREN	BIT(0)
+#define		SDMMC_CACR_KEY		(0x46 << 8)
+
+struct sdhci_at91_priv {
+	struct clk *hclock;
+	struct clk *gck;
+	struct clk *mainck;
+};
+
+static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+	.set_clock		= sdhci_set_clock,
+	.set_bus_width		= sdhci_set_bus_width,
+	.reset			= sdhci_reset,
+	.set_uhs_signaling	= sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+	.ops = &sdhci_at91_sama5d2_ops,
+};
+
+static const struct of_device_id sdhci_at91_dt_match[] = {
+	{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+	{}
+};
+
+static int sdhci_at91_probe(struct platform_device *pdev)
+{
+	const struct of_device_id	*match;
+	const struct sdhci_pltfm_data	*soc_data;
+	struct sdhci_host		*host;
+	struct sdhci_pltfm_host		*pltfm_host;
+	struct sdhci_at91_priv		*priv;
+	unsigned int			caps0, caps1;
+	unsigned int			clk_base, clk_mul;
+	unsigned int			gck_rate, real_gck_rate;
+	int				ret;
+
+	match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
+	if (!match)
+		return -EINVAL;
+	soc_data = match->data;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "unable to allocate private data\n");
+		return -ENOMEM;
+	}
+
+	priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
+	if (IS_ERR(priv->mainck)) {
+		dev_err(&pdev->dev, "failed to get baseclk\n");
+		return PTR_ERR(priv->mainck);
+	}
+
+	priv->hclock = devm_clk_get(&pdev->dev, "hclock");
+	if (IS_ERR(priv->hclock)) {
+		dev_err(&pdev->dev, "failed to get hclock\n");
+		return PTR_ERR(priv->hclock);
+	}
+
+	priv->gck = devm_clk_get(&pdev->dev, "multclk");
+	if (IS_ERR(priv->gck)) {
+		dev_err(&pdev->dev, "failed to get multclk\n");
+		return PTR_ERR(priv->gck);
+	}
+
+	host = sdhci_pltfm_init(pdev, soc_data, 0);
+	if (IS_ERR(host))
+		return PTR_ERR(host);
+
+	/*
+	 * The mult clock is provided by as a generated clock by the PMC
+	 * controller. In order to set the rate of gck, we have to get the
+	 * base clock rate and the clock mult from capabilities.
+	 */
+	clk_prepare_enable(priv->hclock);
+	caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
+	caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
+	clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
+	clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
+	gck_rate = clk_base * 1000000 * (clk_mul + 1);
+	ret = clk_set_rate(priv->gck, gck_rate);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to set gck");
+		goto hclock_disable_unprepare;
+		return -EINVAL;
+	}
+	/*
+	 * We need to check if we have the requested rate for gck because in
+	 * some cases this rate could be not supported. If it happens, the rate
+	 * is the closest one gck can provide. We have to update the value
+	 * of clk mul.
+	 */
+	real_gck_rate = clk_get_rate(priv->gck);
+	if (real_gck_rate != gck_rate) {
+		clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
+		caps1 &= (~SDHCI_CLOCK_MUL_MASK);
+		caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK);
+		/* Set capabilities in r/w mode. */
+		writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+		writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+		/* Set capabilities in ro mode. */
+		writel(0, host->ioaddr + SDMMC_CACR);
+		dev_info(&pdev->dev, "update clk mul to %u as gck rate is %u Hz\n",
+			 clk_mul, real_gck_rate);
+	}
+
+	clk_prepare_enable(priv->mainck);
+	clk_prepare_enable(priv->gck);
+
+	pltfm_host = sdhci_priv(host);
+	pltfm_host->priv = priv;
+
+	ret = mmc_of_parse(host->mmc);
+	if (ret)
+		goto clocks_disable_unprepare;
+
+	sdhci_get_of_property(pdev);
+
+	ret = sdhci_add_host(host);
+	if (ret)
+		goto clocks_disable_unprepare;
+
+	return 0;
+
+clocks_disable_unprepare:
+	clk_disable_unprepare(priv->gck);
+	clk_disable_unprepare(priv->mainck);
+hclock_disable_unprepare:
+	clk_disable_unprepare(priv->hclock);
+	sdhci_pltfm_free(pdev);
+	return ret;
+}
+
+static int sdhci_at91_remove(struct platform_device *pdev)
+{
+	struct sdhci_host	*host = platform_get_drvdata(pdev);
+	struct sdhci_pltfm_host	*pltfm_host = sdhci_priv(host);
+	struct sdhci_at91_priv	*priv = pltfm_host->priv;
+
+	sdhci_pltfm_unregister(pdev);
+
+	clk_disable_unprepare(priv->gck);
+	clk_disable_unprepare(priv->hclock);
+	clk_disable_unprepare(priv->mainck);
+
+	return 0;
+}
+
+static struct platform_driver sdhci_at91_driver = {
+	.driver		= {
+		.name	= "sdhci-at91",
+		.of_match_table = sdhci_at91_dt_match,
+		.pm	= SDHCI_PLTFM_PMOPS,
+	},
+	.probe		= sdhci_at91_probe,
+	.remove		= sdhci_at91_remove,
+};
+
+module_platform_driver(sdhci_at91_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for at91");
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 797be75..653f335 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -208,6 +208,12 @@
 	if (clock == 0)
 		return;
 
+	/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+	temp = esdhc_readw(host, SDHCI_HOST_VERSION);
+	temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+	if (temp < VENDOR_V_23)
+		pre_div = 2;
+
 	/* Workaround to reduce the clock frequency for p1010 esdhc */
 	if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
 		if (clock > 20000000)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 94f54d2..b3b0a3e 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -618,6 +618,7 @@
 static const struct sdhci_pci_fixes sdhci_o2 = {
 	.probe = sdhci_pci_o2_probe,
 	.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
 	.probe_slot = sdhci_pci_o2_probe_slot,
 	.resume = sdhci_pci_o2_resume,
 };
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 0110bae..8842945 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -161,8 +161,8 @@
 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
 		SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
 		SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
-		SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-		SDHCI_QUIRK_DELAY_AFTER_POWER,
+		SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
+	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
 static int sdhci_sirf_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1dbe932..64b7fdb 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -54,8 +54,7 @@
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
-					struct mmc_data *data,
-					struct sdhci_host_next *next);
+					struct mmc_data *data);
 static int sdhci_do_get_cd(struct sdhci_host *host);
 
 #ifdef CONFIG_PM
@@ -207,8 +206,7 @@
 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
 {
 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
-		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
-			SDHCI_CARD_PRESENT))
+		if (!sdhci_do_get_cd(host))
 			return;
 	}
 
@@ -496,7 +494,7 @@
 		goto fail;
 	BUG_ON(host->align_addr & host->align_mask);
 
-	host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
+	host->sg_count = sdhci_pre_dma_transfer(host, data);
 	if (host->sg_count < 0)
 		goto unmap_align;
 
@@ -635,9 +633,11 @@
 		}
 	}
 
-	if (!data->host_cookie)
+	if (data->host_cookie == COOKIE_MAPPED) {
 		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 			data->sg_len, direction);
+		data->host_cookie = COOKIE_UNMAPPED;
+	}
 }
 
 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -833,7 +833,7 @@
 		} else {
 			int sg_cnt;
 
-			sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
+			sg_cnt = sdhci_pre_dma_transfer(host, data);
 			if (sg_cnt <= 0) {
 				/*
 				 * This only happens when someone fed
@@ -949,11 +949,13 @@
 		if (host->flags & SDHCI_USE_ADMA)
 			sdhci_adma_table_post(host, data);
 		else {
-			if (!data->host_cookie)
+			if (data->host_cookie == COOKIE_MAPPED) {
 				dma_unmap_sg(mmc_dev(host->mmc),
 					data->sg, data->sg_len,
 					(data->flags & MMC_DATA_READ) ?
 					DMA_FROM_DEVICE : DMA_TO_DEVICE);
+				data->host_cookie = COOKIE_UNMAPPED;
+			}
 		}
 	}
 
@@ -1132,6 +1134,7 @@
 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
 		break;
 	case MMC_TIMING_UHS_DDR50:
+	case MMC_TIMING_MMC_DDR52:
 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
 		break;
 	case MMC_TIMING_MMC_HS400:
@@ -1152,6 +1155,7 @@
 	int real_div = div, clk_mul = 1;
 	u16 clk = 0;
 	unsigned long timeout;
+	bool switch_base_clk = false;
 
 	host->mmc->actual_clock = 0;
 
@@ -1189,15 +1193,25 @@
 					<= clock)
 					break;
 			}
-			/*
-			 * Set Programmable Clock Mode in the Clock
-			 * Control register.
-			 */
-			clk = SDHCI_PROG_CLOCK_MODE;
-			real_div = div;
-			clk_mul = host->clk_mul;
-			div--;
-		} else {
+			if ((host->max_clk * host->clk_mul / div) <= clock) {
+				/*
+				 * Set Programmable Clock Mode in the Clock
+				 * Control register.
+				 */
+				clk = SDHCI_PROG_CLOCK_MODE;
+				real_div = div;
+				clk_mul = host->clk_mul;
+				div--;
+			} else {
+				/*
+				 * Divisor can be too small to reach clock
+				 * speed requirement. Then use the base clock.
+				 */
+				switch_base_clk = true;
+			}
+		}
+
+		if (!host->clk_mul || switch_base_clk) {
 			/* Version 3.00 divisors must be a multiple of 2. */
 			if (host->max_clk <= clock)
 				div = 1;
@@ -1210,6 +1224,9 @@
 			}
 			real_div = div;
 			div >>= 1;
+			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
+				&& !div && host->max_clk <= 25000000)
+				div = 1;
 		}
 	} else {
 		/* Version 2.00 divisors must be a power of 2. */
@@ -1559,7 +1576,8 @@
 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
-				 (ios->timing == MMC_TIMING_UHS_DDR50))) {
+				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
+				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
 			u16 preset;
 
 			sdhci_enable_preset_value(host, true);
@@ -1601,15 +1619,21 @@
 	if (host->flags & SDHCI_DEVICE_DEAD)
 		return 0;
 
-	/* If polling/nonremovable, assume that the card is always present. */
-	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
-	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+	/* If nonremovable, assume that the card is always present. */
+	if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
 		return 1;
 
-	/* Try slot gpio detect */
+	/*
+	 * Try slot gpio detect, if defined it take precedence
+	 * over build in controller functionality
+	 */
 	if (!IS_ERR_VALUE(gpio_cd))
 		return !!gpio_cd;
 
+	/* If polling, assume that the card is always present. */
+	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		return 1;
+
 	/* Host native card detect */
 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
 }
@@ -2097,49 +2121,36 @@
 	struct mmc_data *data = mrq->data;
 
 	if (host->flags & SDHCI_REQ_USE_DMA) {
-		if (data->host_cookie)
+		if (data->host_cookie == COOKIE_GIVEN ||
+				data->host_cookie == COOKIE_MAPPED)
 			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
 					 data->flags & MMC_DATA_WRITE ?
 					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		mrq->data->host_cookie = 0;
+		data->host_cookie = COOKIE_UNMAPPED;
 	}
 }
 
 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
-				       struct mmc_data *data,
-				       struct sdhci_host_next *next)
+				       struct mmc_data *data)
 {
 	int sg_count;
 
-	if (!next && data->host_cookie &&
-	    data->host_cookie != host->next_data.cookie) {
-		pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
-			__func__, data->host_cookie, host->next_data.cookie);
-		data->host_cookie = 0;
+	if (data->host_cookie == COOKIE_MAPPED) {
+		data->host_cookie = COOKIE_GIVEN;
+		return data->sg_count;
 	}
 
-	/* Check if next job is already prepared */
-	if (next ||
-	    (!next && data->host_cookie != host->next_data.cookie)) {
-		sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
-				     data->sg_len,
-				     data->flags & MMC_DATA_WRITE ?
-				     DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	WARN_ON(data->host_cookie == COOKIE_GIVEN);
 
-	} else {
-		sg_count = host->next_data.sg_count;
-		host->next_data.sg_count = 0;
-	}
-
+	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+				data->flags & MMC_DATA_WRITE ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
 	if (sg_count == 0)
-		return -EINVAL;
+		return -ENOSPC;
 
-	if (next) {
-		next->sg_count = sg_count;
-		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
-	} else
-		host->sg_count = sg_count;
+	data->sg_count = sg_count;
+	data->host_cookie = COOKIE_MAPPED;
 
 	return sg_count;
 }
@@ -2149,16 +2160,10 @@
 {
 	struct sdhci_host *host = mmc_priv(mmc);
 
-	if (mrq->data->host_cookie) {
-		mrq->data->host_cookie = 0;
-		return;
-	}
+	mrq->data->host_cookie = COOKIE_UNMAPPED;
 
 	if (host->flags & SDHCI_REQ_USE_DMA)
-		if (sdhci_pre_dma_transfer(host,
-					mrq->data,
-					&host->next_data) < 0)
-			mrq->data->host_cookie = 0;
+		sdhci_pre_dma_transfer(host, mrq->data);
 }
 
 static void sdhci_card_event(struct mmc_host *mmc)
@@ -3030,7 +3035,6 @@
 		host->max_clk = host->ops->get_max_clock(host);
 	}
 
-	host->next_data.cookie = 1;
 	/*
 	 * In case of Host Controller v3.00, find out whether clock
 	 * multiplier is supported.
@@ -3126,7 +3130,8 @@
 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
-	    !(mmc->caps & MMC_CAP_NONREMOVABLE))
+	    !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
+	    IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
 	/* If there are external regulators, get them */
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 5521d29..7c02ff4 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -309,9 +309,10 @@
  */
 #define SDHCI_MAX_SEGS		128
 
-struct sdhci_host_next {
-	unsigned int	sg_count;
-	s32		cookie;
+enum sdhci_cookie {
+	COOKIE_UNMAPPED,
+	COOKIE_MAPPED,
+	COOKIE_GIVEN,
 };
 
 struct sdhci_host {
@@ -409,6 +410,8 @@
 #define SDHCI_QUIRK2_SUPPORT_SINGLE			(1<<13)
 /* Controller broken with using ACMD23 */
 #define SDHCI_QUIRK2_ACMD23_BROKEN			(1<<14)
+/* Broken Clock divider zero in controller */
+#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN		(1<<15)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
@@ -503,7 +506,6 @@
 	unsigned int		tuning_mode;	/* Re-tuning mode supported by host */
 #define SDHCI_TUNING_MODE_1	0
 
-	struct sdhci_host_next	next_data;
 	unsigned long private[0] ____cacheline_aligned;
 };
 
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5a1fdd4..ad9ffea 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1632,7 +1632,9 @@
 {
 	struct sh_mmcif_host *host = dev_get_drvdata(dev);
 
+	pm_runtime_get_sync(dev);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+	pm_runtime_put(dev);
 
 	return 0;
 }
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 4d3e1ff..a7b7a67 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -595,7 +595,7 @@
 
 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
 {
-	unsigned long expire = jiffies + msecs_to_jiffies(250);
+	unsigned long expire = jiffies + msecs_to_jiffies(750);
 	u32 rval;
 
 	rval = mmc_readl(host, REG_CLKCR);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index e3dcf31..a10fde4 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -83,6 +83,8 @@
 	return --host->sg_len;
 }
 
+#define CMDREQ_TIMEOUT	5000
+
 #ifdef CONFIG_MMC_DEBUG
 
 #define STATUS_TO_TEXT(a, status, i) \
@@ -230,7 +232,7 @@
 	 */
 	if (IS_ERR_OR_NULL(mrq)
 	    || time_is_after_jiffies(host->last_req_ts +
-		msecs_to_jiffies(2000))) {
+		msecs_to_jiffies(CMDREQ_TIMEOUT))) {
 		spin_unlock_irqrestore(&host->lock, flags);
 		return;
 	}
@@ -818,7 +820,7 @@
 	ret = tmio_mmc_start_command(host, mrq->cmd);
 	if (!ret) {
 		schedule_delayed_work(&host->delayed_reset_work,
-				      msecs_to_jiffies(2000));
+				      msecs_to_jiffies(CMDREQ_TIMEOUT));
 		return;
 	}
 
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 54b082b..4498e92 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1611,7 +1611,7 @@
 		return IRQ_NONE;
 
 	/* Ack */
-	usdhi6_write(host, USDHI6_SD_INFO1, !status);
+	usdhi6_write(host, USDHI6_SD_INFO1, ~status);
 
 	if (!work_pending(&mmc->detect.work) &&
 	    (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
@@ -1634,6 +1634,7 @@
 	struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
 	struct mmc_request *mrq = host->mrq;
 	struct mmc_data *data = mrq ? mrq->data : NULL;
+	struct scatterlist *sg = host->sg ?: data->sg;
 
 	dev_warn(mmc_dev(host->mmc),
 		 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
@@ -1669,7 +1670,7 @@
 			"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
 			data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
 			host->offset, data->blocks, data->blksz, data->sg_len,
-			sg_dma_len(host->sg), host->sg->offset);
+			sg_dma_len(sg), sg->offset);
 		usdhi6_sg_unmap(host, true);
 		/*
 		 * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
@@ -1715,12 +1716,14 @@
 	if (!mmc)
 		return -ENOMEM;
 
+	ret = mmc_regulator_get_supply(mmc);
+	if (ret == -EPROBE_DEFER)
+		goto e_free_mmc;
+
 	ret = mmc_of_parse(mmc);
 	if (ret < 0)
 		goto e_free_mmc;
 
-	mmc_regulator_get_supply(mmc);
-
 	host		= mmc_priv(mmc);
 	host->mmc	= mmc;
 	host->wait	= USDHI6_WAIT_FOR_REQUEST;
@@ -1734,8 +1737,10 @@
 	}
 
 	host->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(host->clk))
+	if (IS_ERR(host->clk)) {
+		ret = PTR_ERR(host->clk);
 		goto e_free_mmc;
+	}
 
 	host->imclk = clk_get_rate(host->clk);
 
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 2fc4957c..a70eb83 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -41,7 +41,7 @@
 #include <linux/fs.h>
 #include <linux/ioctl.h>
 #include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/mtd/mtd.h>
 
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 7da266a..0802158 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -24,7 +24,7 @@
 #include <linux/rslib.h>
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 51394e5..a4e27e8 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -238,8 +238,8 @@
 
 	ifc_nand_ctrl->page = page_addr;
 	/* Program ROW0/COL0 */
-	iowrite32be(page_addr, &ifc->ifc_nand.row0);
-	iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+	ifc_out32(page_addr, &ifc->ifc_nand.row0);
+	ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
 
 	buf_num = page_addr & priv->bufnum_mask;
 
@@ -301,19 +301,19 @@
 	int i;
 
 	/* set the chip select for NAND Transaction */
-	iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
-		    &ifc->ifc_nand.nand_csel);
+	ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+		  &ifc->ifc_nand.nand_csel);
 
 	dev_vdbg(priv->dev,
 			"%s: fir0=%08x fcr0=%08x\n",
 			__func__,
-			ioread32be(&ifc->ifc_nand.nand_fir0),
-			ioread32be(&ifc->ifc_nand.nand_fcr0));
+			ifc_in32(&ifc->ifc_nand.nand_fir0),
+			ifc_in32(&ifc->ifc_nand.nand_fcr0));
 
 	ctrl->nand_stat = 0;
 
 	/* start read/write seq */
-	iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
 	/* wait for command complete flag or timeout */
 	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -336,7 +336,7 @@
 		int sector_end = sector + chip->ecc.steps - 1;
 
 		for (i = sector / 4; i <= sector_end / 4; i++)
-			eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
+			eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
 
 		for (i = sector; i <= sector_end; i++) {
 			errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -376,33 +376,33 @@
 
 	/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
 	if (mtd->writesize > 512) {
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-			    (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
-			    (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
 
-		iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			    (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
 	} else {
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
-			    (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
 
 		if (oob)
-			iowrite32be(NAND_CMD_READOOB <<
-				    IFC_NAND_FCR0_CMD0_SHIFT,
-				    &ifc->ifc_nand.nand_fcr0);
+			ifc_out32(NAND_CMD_READOOB <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
 		else
-			iowrite32be(NAND_CMD_READ0 <<
-				    IFC_NAND_FCR0_CMD0_SHIFT,
-				    &ifc->ifc_nand.nand_fcr0);
+			ifc_out32(NAND_CMD_READ0 <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
 	}
 }
 
@@ -422,7 +422,7 @@
 	switch (command) {
 	/* READ0 read the entire buffer to use hardware ECC. */
 	case NAND_CMD_READ0:
-		iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, 0, page_addr, 0);
 
 		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -437,7 +437,7 @@
 
 	/* READOOB reads only the OOB because no ECC is performed. */
 	case NAND_CMD_READOOB:
-		iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, column, page_addr, 1);
 
 		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -453,19 +453,19 @@
 		if (command == NAND_CMD_PARAM)
 			timing = IFC_FIR_OP_RBCD;
 
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (timing << IFC_NAND_FIR0_OP2_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
-		iowrite32be(column, &ifc->ifc_nand.row3);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (timing << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(column, &ifc->ifc_nand.row3);
 
 		/*
 		 * although currently it's 8 bytes for READID, we always read
 		 * the maximum 256 bytes(for PARAM)
 		 */
-		iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
 		ifc_nand_ctrl->read_bytes = 256;
 
 		set_addr(mtd, 0, 0, 0);
@@ -480,16 +480,16 @@
 
 	/* ERASE2 uses the block and page address from ERASE1 */
 	case NAND_CMD_ERASE2:
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
 
-		iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			    (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
 
-		iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		ifc_nand_ctrl->read_bytes = 0;
 		fsl_ifc_run_command(mtd);
 		return;
@@ -506,19 +506,18 @@
 				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
 				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
 
-			iowrite32be(
-				 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-				 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-				 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-				 (IFC_FIR_OP_WBCD  << IFC_NAND_FIR0_OP3_SHIFT) |
-				 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
-				 &ifc->ifc_nand.nand_fir0);
-			iowrite32be(
-				 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				 (IFC_FIR_OP_RDSTAT <<
-					IFC_NAND_FIR1_OP6_SHIFT) |
-				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
-				 &ifc->ifc_nand.nand_fir1);
+			ifc_out32(
+				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+				&ifc->ifc_nand.nand_fir0);
+			ifc_out32(
+				(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
 		} else {
 			nand_fcr0 = ((NAND_CMD_PAGEPROG <<
 					IFC_NAND_FCR0_CMD1_SHIFT) |
@@ -527,20 +526,19 @@
 				    (NAND_CMD_STATUS <<
 					IFC_NAND_FCR0_CMD3_SHIFT));
 
-			iowrite32be(
+			ifc_out32(
 				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
 				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
 				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
 				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
 				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
 				&ifc->ifc_nand.nand_fir0);
-			iowrite32be(
-				 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				 (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
-				 (IFC_FIR_OP_RDSTAT <<
-					IFC_NAND_FIR1_OP7_SHIFT) |
-				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
-				  &ifc->ifc_nand.nand_fir1);
+			ifc_out32(
+				(IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
 
 			if (column >= mtd->writesize)
 				nand_fcr0 |=
@@ -555,7 +553,7 @@
 			column -= mtd->writesize;
 			ifc_nand_ctrl->oob = 1;
 		}
-		iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
 		set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
 		return;
 	}
@@ -563,24 +561,26 @@
 	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
 	case NAND_CMD_PAGEPROG: {
 		if (ifc_nand_ctrl->oob) {
-			iowrite32be(ifc_nand_ctrl->index -
-				    ifc_nand_ctrl->column,
-				    &ifc->ifc_nand.nand_fbcr);
+			ifc_out32(ifc_nand_ctrl->index -
+				  ifc_nand_ctrl->column,
+				  &ifc->ifc_nand.nand_fbcr);
 		} else {
-			iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+			ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		}
 
 		fsl_ifc_run_command(mtd);
 		return;
 	}
 
-	case NAND_CMD_STATUS:
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
-		iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+	case NAND_CMD_STATUS: {
+		void __iomem *addr;
+
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, 0, 0, 0);
 		ifc_nand_ctrl->read_bytes = 1;
 
@@ -590,17 +590,19 @@
 		 * The chip always seems to report that it is
 		 * write-protected, even when it is not.
 		 */
+		addr = ifc_nand_ctrl->addr;
 		if (chip->options & NAND_BUSWIDTH_16)
-			setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+			ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
 		else
-			setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+			ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
 		return;
+	}
 
 	case NAND_CMD_RESET:
-		iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
 		fsl_ifc_run_command(mtd);
 		return;
 
@@ -658,7 +660,7 @@
 	 */
 	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
 		offset = ifc_nand_ctrl->index++;
-		return in_8(ifc_nand_ctrl->addr + offset);
+		return ifc_in8(ifc_nand_ctrl->addr + offset);
 	}
 
 	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
@@ -680,7 +682,7 @@
 	 * next byte.
 	 */
 	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
-		data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+		data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
 		ifc_nand_ctrl->index += 2;
 		return (uint8_t) data;
 	}
@@ -726,18 +728,18 @@
 	u32 nand_fsr;
 
 	/* Use READ_STATUS command, but wait for the device to be ready */
-	iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		    (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
-		    &ifc->ifc_nand.nand_fir0);
-	iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-		    &ifc->ifc_nand.nand_fcr0);
-	iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		  (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+		  &ifc->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+		  &ifc->ifc_nand.nand_fcr0);
+	ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
 	set_addr(mtd, 0, 0, 0);
 	ifc_nand_ctrl->read_bytes = 1;
 
 	fsl_ifc_run_command(mtd);
 
-	nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
+	nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
 
 	/*
 	 * The chip always seems to report that it is
@@ -829,34 +831,34 @@
 	uint32_t cs = priv->bank;
 
 	/* Save CSOR and CSOR_ext */
-	csor = ioread32be(&ifc->csor_cs[cs].csor);
-	csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
+	csor = ifc_in32(&ifc->csor_cs[cs].csor);
+	csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext);
 
 	/* chage PageSize 8K and SpareSize 1K*/
 	csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
-	iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
-	iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor_8k, &ifc->csor_cs[cs].csor);
+	ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext);
 
 	/* READID */
-	iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-		    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
-		    &ifc->ifc_nand.nand_fir0);
-	iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
-		    &ifc->ifc_nand.nand_fcr0);
-	iowrite32be(0x0, &ifc->ifc_nand.row3);
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+		  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+		  &ifc->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+		  &ifc->ifc_nand.nand_fcr0);
+	ifc_out32(0x0, &ifc->ifc_nand.row3);
 
-	iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
+	ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr);
 
 	/* Program ROW0/COL0 */
-	iowrite32be(0x0, &ifc->ifc_nand.row0);
-	iowrite32be(0x0, &ifc->ifc_nand.col0);
+	ifc_out32(0x0, &ifc->ifc_nand.row0);
+	ifc_out32(0x0, &ifc->ifc_nand.col0);
 
 	/* set the chip select for NAND Transaction */
-	iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+	ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
 
 	/* start read seq */
-	iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
 	/* wait for command complete flag or timeout */
 	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -866,8 +868,8 @@
 		printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
 
 	/* Restore CSOR and CSOR_ext */
-	iowrite32be(csor, &ifc->csor_cs[cs].csor);
-	iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor, &ifc->csor_cs[cs].csor);
+	ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext);
 }
 
 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -884,7 +886,7 @@
 
 	/* fill in nand_chip structure */
 	/* set up function call table */
-	if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+	if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
 		chip->read_byte = fsl_ifc_read_byte16;
 	else
 		chip->read_byte = fsl_ifc_read_byte;
@@ -898,13 +900,13 @@
 	chip->bbt_td = &bbt_main_descr;
 	chip->bbt_md = &bbt_mirror_descr;
 
-	iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
+	ifc_out32(0x0, &ifc->ifc_nand.ncfgr);
 
 	/* set up nand options */
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->options = NAND_NO_SUBPAGE_WRITE;
 
-	if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+	if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
 		chip->read_byte = fsl_ifc_read_byte16;
 		chip->options |= NAND_BUSWIDTH_16;
 	} else {
@@ -917,7 +919,7 @@
 	chip->ecc.read_page = fsl_ifc_read_page;
 	chip->ecc.write_page = fsl_ifc_write_page;
 
-	csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
+	csor = ifc_in32(&ifc->csor_cs[priv->bank].csor);
 
 	/* Hardware generates ECC per 512 Bytes */
 	chip->ecc.size = 512;
@@ -1006,7 +1008,7 @@
 static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
 		      phys_addr_t addr)
 {
-	u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
+	u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr);
 
 	if (!(cspr & CSPR_V))
 		return 0;
@@ -1092,16 +1094,16 @@
 
 	dev_set_drvdata(priv->dev, priv);
 
-	iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
-		    IFC_NAND_EVTER_EN_FTOER_EN |
-		    IFC_NAND_EVTER_EN_WPER_EN,
-		    &ifc->ifc_nand.nand_evter_en);
+	ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+		  IFC_NAND_EVTER_EN_FTOER_EN |
+		  IFC_NAND_EVTER_EN_WPER_EN,
+		  &ifc->ifc_nand.nand_evter_en);
 
 	/* enable NAND Machine Interrupts */
-	iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
-		    IFC_NAND_EVTER_INTR_FTOERIR_EN |
-		    IFC_NAND_EVTER_INTR_WPERIR_EN,
-		    &ifc->ifc_nand.nand_evter_intr_en);
+	ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+		  IFC_NAND_EVTER_INTR_FTOERIR_EN |
+		  IFC_NAND_EVTER_INTR_WPERIR_EN,
+		  &ifc->ifc_nand.nand_evter_intr_en);
 	priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
 	if (!priv->mtd.name) {
 		ret = -ENOMEM;
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 32a216d..ab7bda0 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -18,7 +18,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/onenand.h>
 #include <linux/mtd/partitions.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /*
  * Note: Driver name and platform data format have been updated!
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index dd94300..cba3d9f 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -41,6 +41,8 @@
 #include <linux/gpio.h>
 #include <linux/atomic.h>
 
+#include <asm/mach-ar7/ar7.h>
+
 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
 MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 2f77f1d..fac80c6 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -318,19 +318,15 @@
 	struct i2c_client *client = phy->i2c_dev;
 	struct gpio_desc *gpiod_en, *gpiod_fw, *gpiod_irq;
 
-	gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2);
-	gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1);
-	gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0);
+	gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW);
+	gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW);
+	gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN);
 
 	if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw) || IS_ERR(gpiod_irq)) {
 		nfc_err(&client->dev, "No GPIOs\n");
 		return -EINVAL;
 	}
 
-	gpiod_direction_output(gpiod_en, 0);
-	gpiod_direction_output(gpiod_fw, 0);
-	gpiod_direction_input(gpiod_irq);
-
 	client->irq = gpiod_to_irq(gpiod_irq);
 	if (client->irq < 0) {
 		nfc_err(&client->dev, "No IRQ\n");
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 72226ac..53c1162 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -21,6 +21,7 @@
 	default LIBNVDIMM
 	depends on HAS_IOMEM
 	select ND_BTT if BTT
+	select ND_PFN if NVDIMM_PFN
 	help
 	  Memory ranges for PMEM are described by either an NFIT
 	  (NVDIMM Firmware Interface Table, see CONFIG_NFIT_ACPI), a
@@ -47,12 +48,16 @@
 	  (CONFIG_ACPI_NFIT), or otherwise exposes BLK-mode
 	  capabilities.
 
+config ND_CLAIM
+	bool
+
 config ND_BTT
 	tristate
 
 config BTT
 	bool "BTT: Block Translation Table (atomic sector updates)"
 	default y if LIBNVDIMM
+	select ND_CLAIM
 	help
 	  The Block Translation Table (BTT) provides atomic sector
 	  update semantics for persistent memory devices, so that
@@ -65,4 +70,22 @@
 
 	  Select Y if unsure
 
+config ND_PFN
+	tristate
+
+config NVDIMM_PFN
+	bool "PFN: Map persistent (device) memory"
+	default LIBNVDIMM
+	depends on ZONE_DEVICE
+	select ND_CLAIM
+	help
+	  Map persistent memory, i.e. advertise it to the memory
+	  management sub-system.  By default persistent memory does
+	  not support direct I/O, RDMA, or any other usage that
+	  requires a 'struct page' to mediate an I/O request.  This
+	  driver allocates and initializes the infrastructure needed
+	  to support those use cases.
+
+	  Select Y if unsure
+
 endif
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 594bb97..ea84d3c 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -2,6 +2,7 @@
 obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
 obj-$(CONFIG_ND_BTT) += nd_btt.o
 obj-$(CONFIG_ND_BLK) += nd_blk.o
+obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
 
 nd_pmem-y := pmem.o
 
@@ -9,6 +10,8 @@
 
 nd_blk-y := blk.o
 
+nd_e820-y := e820.o
+
 libnvdimm-y := core.o
 libnvdimm-y += bus.o
 libnvdimm-y += dimm_devs.o
@@ -17,4 +20,6 @@
 libnvdimm-y += region.o
 libnvdimm-y += namespace_devs.o
 libnvdimm-y += label.o
+libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
 libnvdimm-$(CONFIG_BTT) += btt_devs.o
+libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 341202e..2542397 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -583,33 +583,6 @@
 }
 
 /*
- * This function checks if the metadata layout is valid and error free
- */
-static int arena_is_valid(struct arena_info *arena, struct btt_sb *super,
-				u8 *uuid, u32 lbasize)
-{
-	u64 checksum;
-
-	if (memcmp(super->uuid, uuid, 16))
-		return 0;
-
-	checksum = le64_to_cpu(super->checksum);
-	super->checksum = 0;
-	if (checksum != nd_btt_sb_checksum(super))
-		return 0;
-	super->checksum = cpu_to_le64(checksum);
-
-	if (lbasize != le32_to_cpu(super->external_lbasize))
-		return 0;
-
-	/* TODO: figure out action for this */
-	if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0)
-		dev_info(to_dev(arena), "Found arena with an error flag\n");
-
-	return 1;
-}
-
-/*
  * This function reads an existing valid btt superblock and
  * populates the corresponding arena_info struct
  */
@@ -632,8 +605,9 @@
 	arena->logoff = arena_off + le64_to_cpu(super->logoff);
 	arena->info2off = arena_off + le64_to_cpu(super->info2off);
 
-	arena->size = (super->nextoff > 0) ? (le64_to_cpu(super->nextoff)) :
-			(arena->info2off - arena->infooff + BTT_PG_SIZE);
+	arena->size = (le64_to_cpu(super->nextoff) > 0)
+		? (le64_to_cpu(super->nextoff))
+		: (arena->info2off - arena->infooff + BTT_PG_SIZE);
 
 	arena->flags = le32_to_cpu(super->flags);
 }
@@ -665,8 +639,7 @@
 		if (ret)
 			goto out;
 
-		if (!arena_is_valid(arena, super, btt->nd_btt->uuid,
-				btt->lbasize)) {
+		if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
 			if (remaining == btt->rawsize) {
 				btt->init_state = INIT_NOTFOUND;
 				dev_info(to_dev(arena), "No existing arenas\n");
@@ -755,10 +728,13 @@
  * It is only called for an uninitialized arena when a write
  * to that arena occurs for the first time.
  */
-static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid)
+static int btt_arena_write_layout(struct arena_info *arena)
 {
 	int ret;
+	u64 sum;
 	struct btt_sb *super;
+	struct nd_btt *nd_btt = arena->nd_btt;
+	const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
 
 	ret = btt_map_init(arena);
 	if (ret)
@@ -773,7 +749,8 @@
 		return -ENOMEM;
 
 	strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
-	memcpy(super->uuid, uuid, 16);
+	memcpy(super->uuid, nd_btt->uuid, 16);
+	memcpy(super->parent_uuid, parent_uuid, 16);
 	super->flags = cpu_to_le32(arena->flags);
 	super->version_major = cpu_to_le16(arena->version_major);
 	super->version_minor = cpu_to_le16(arena->version_minor);
@@ -794,7 +771,8 @@
 	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
 
 	super->flags = 0;
-	super->checksum = cpu_to_le64(nd_btt_sb_checksum(super));
+	sum = nd_sb_checksum((struct nd_gen_sb *) super);
+	super->checksum = cpu_to_le64(sum);
 
 	ret = btt_info_write(arena, super);
 
@@ -813,7 +791,7 @@
 
 	mutex_lock(&btt->init_lock);
 	list_for_each_entry(arena, &btt->arena_list, list) {
-		ret = btt_arena_write_layout(arena, btt->nd_btt->uuid);
+		ret = btt_arena_write_layout(arena);
 		if (ret)
 			goto unlock;
 
@@ -1447,8 +1425,6 @@
 {
 	int rc;
 
-	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
-
 	btt_major = register_blkdev(0, "btt");
 	if (btt_major < 0)
 		return btt_major;
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 75b0d80..b2f8651 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -182,4 +182,7 @@
 	int init_state;
 	int num_arenas;
 };
+
+bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super);
+
 #endif
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 6ac8c0f..59ad54a6 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -21,63 +21,13 @@
 #include "btt.h"
 #include "nd.h"
 
-static void __nd_btt_detach_ndns(struct nd_btt *nd_btt)
-{
-	struct nd_namespace_common *ndns = nd_btt->ndns;
-
-	dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex)
-			|| ndns->claim != &nd_btt->dev,
-			"%s: invalid claim\n", __func__);
-	ndns->claim = NULL;
-	nd_btt->ndns = NULL;
-	put_device(&ndns->dev);
-}
-
-static void nd_btt_detach_ndns(struct nd_btt *nd_btt)
-{
-	struct nd_namespace_common *ndns = nd_btt->ndns;
-
-	if (!ndns)
-		return;
-	get_device(&ndns->dev);
-	device_lock(&ndns->dev);
-	__nd_btt_detach_ndns(nd_btt);
-	device_unlock(&ndns->dev);
-	put_device(&ndns->dev);
-}
-
-static bool __nd_btt_attach_ndns(struct nd_btt *nd_btt,
-		struct nd_namespace_common *ndns)
-{
-	if (ndns->claim)
-		return false;
-	dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex)
-			|| nd_btt->ndns,
-			"%s: invalid claim\n", __func__);
-	ndns->claim = &nd_btt->dev;
-	nd_btt->ndns = ndns;
-	get_device(&ndns->dev);
-	return true;
-}
-
-static bool nd_btt_attach_ndns(struct nd_btt *nd_btt,
-		struct nd_namespace_common *ndns)
-{
-	bool claimed;
-
-	device_lock(&ndns->dev);
-	claimed = __nd_btt_attach_ndns(nd_btt, ndns);
-	device_unlock(&ndns->dev);
-	return claimed;
-}
-
 static void nd_btt_release(struct device *dev)
 {
 	struct nd_region *nd_region = to_nd_region(dev->parent);
 	struct nd_btt *nd_btt = to_nd_btt(dev);
 
 	dev_dbg(dev, "%s\n", __func__);
-	nd_btt_detach_ndns(nd_btt);
+	nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
 	ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
 	kfree(nd_btt->uuid);
 	kfree(nd_btt);
@@ -172,104 +122,15 @@
 	return rc;
 }
 
-static int namespace_match(struct device *dev, void *data)
-{
-	char *name = data;
-
-	return strcmp(name, dev_name(dev)) == 0;
-}
-
-static bool is_nd_btt_idle(struct device *dev)
-{
-	struct nd_region *nd_region = to_nd_region(dev->parent);
-	struct nd_btt *nd_btt = to_nd_btt(dev);
-
-	if (nd_region->btt_seed == dev || nd_btt->ndns || dev->driver)
-		return false;
-	return true;
-}
-
-static ssize_t __namespace_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t len)
-{
-	struct nd_btt *nd_btt = to_nd_btt(dev);
-	struct nd_namespace_common *ndns;
-	struct device *found;
-	char *name;
-
-	if (dev->driver) {
-		dev_dbg(dev, "%s: -EBUSY\n", __func__);
-		return -EBUSY;
-	}
-
-	name = kstrndup(buf, len, GFP_KERNEL);
-	if (!name)
-		return -ENOMEM;
-	strim(name);
-
-	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
-		/* pass */;
-	else {
-		len = -EINVAL;
-		goto out;
-	}
-
-	ndns = nd_btt->ndns;
-	if (strcmp(name, "") == 0) {
-		/* detach the namespace and destroy / reset the btt device */
-		nd_btt_detach_ndns(nd_btt);
-		if (is_nd_btt_idle(dev))
-			nd_device_unregister(dev, ND_ASYNC);
-		else {
-			nd_btt->lbasize = 0;
-			kfree(nd_btt->uuid);
-			nd_btt->uuid = NULL;
-		}
-		goto out;
-	} else if (ndns) {
-		dev_dbg(dev, "namespace already set to: %s\n",
-				dev_name(&ndns->dev));
-		len = -EBUSY;
-		goto out;
-	}
-
-	found = device_find_child(dev->parent, name, namespace_match);
-	if (!found) {
-		dev_dbg(dev, "'%s' not found under %s\n", name,
-				dev_name(dev->parent));
-		len = -ENODEV;
-		goto out;
-	}
-
-	ndns = to_ndns(found);
-	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
-		dev_dbg(dev, "%s too small to host btt\n", name);
-		len = -ENXIO;
-		goto out_attach;
-	}
-
-	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nd_btt->dev));
-	if (!nd_btt_attach_ndns(nd_btt, ndns)) {
-		dev_dbg(dev, "%s already claimed\n",
-				dev_name(&ndns->dev));
-		len = -EBUSY;
-	}
-
- out_attach:
-	put_device(&ndns->dev); /* from device_find_child */
- out:
-	kfree(name);
-	return len;
-}
-
 static ssize_t namespace_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
+	struct nd_btt *nd_btt = to_nd_btt(dev);
 	ssize_t rc;
 
 	nvdimm_bus_lock(dev);
 	device_lock(dev);
-	rc = __namespace_store(dev, attr, buf, len);
+	rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
 	dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
 			rc, buf, buf[len - 1] == '\n' ? "" : "\n");
 	device_unlock(dev);
@@ -324,7 +185,7 @@
 	dev->type = &nd_btt_device_type;
 	dev->groups = nd_btt_attribute_groups;
 	device_initialize(&nd_btt->dev);
-	if (ndns && !__nd_btt_attach_ndns(nd_btt, ndns)) {
+	if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
 		dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
 				__func__, dev_name(ndns->claim));
 		put_device(dev);
@@ -342,30 +203,54 @@
 	return dev;
 }
 
-/*
- * nd_btt_sb_checksum: compute checksum for btt info block
- *
- * Returns a fletcher64 checksum of everything in the given info block
- * except the last field (since that's where the checksum lives).
- */
-u64 nd_btt_sb_checksum(struct btt_sb *btt_sb)
+static bool uuid_is_null(u8 *uuid)
 {
-	u64 sum;
-	__le64 sum_save;
+	static const u8 null_uuid[16];
 
-	sum_save = btt_sb->checksum;
-	btt_sb->checksum = 0;
-	sum = nd_fletcher64(btt_sb, sizeof(*btt_sb), 1);
-	btt_sb->checksum = sum_save;
-	return sum;
+	return (memcmp(uuid, null_uuid, 16) == 0);
 }
-EXPORT_SYMBOL(nd_btt_sb_checksum);
+
+/**
+ * nd_btt_arena_is_valid - check if the metadata layout is valid
+ * @nd_btt:	device with BTT geometry and backing device info
+ * @super:	pointer to the arena's info block being tested
+ *
+ * Check consistency of the btt info block with itself by validating
+ * the checksum, and with the parent namespace by verifying the
+ * parent_uuid contained in the info block with the one supplied in.
+ *
+ * Returns:
+ * false for an invalid info block, true for a valid one
+ */
+bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
+{
+	const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
+	u64 checksum;
+
+	if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
+		return false;
+
+	if (!uuid_is_null(super->parent_uuid))
+		if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
+			return false;
+
+	checksum = le64_to_cpu(super->checksum);
+	super->checksum = 0;
+	if (checksum != nd_sb_checksum((struct nd_gen_sb *) super))
+		return false;
+	super->checksum = cpu_to_le64(checksum);
+
+	/* TODO: figure out action for this */
+	if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0)
+		dev_info(&nd_btt->dev, "Found arena with an error flag\n");
+
+	return true;
+}
+EXPORT_SYMBOL(nd_btt_arena_is_valid);
 
 static int __nd_btt_probe(struct nd_btt *nd_btt,
 		struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
 {
-	u64 checksum;
-
 	if (!btt_sb || !ndns || !nd_btt)
 		return -ENODEV;
 
@@ -375,15 +260,9 @@
 	if (nvdimm_namespace_capacity(ndns) < SZ_16M)
 		return -ENXIO;
 
-	if (memcmp(btt_sb->signature, BTT_SIG, BTT_SIG_LEN) != 0)
+	if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
 		return -ENODEV;
 
-	checksum = le64_to_cpu(btt_sb->checksum);
-	btt_sb->checksum = 0;
-	if (checksum != nd_btt_sb_checksum(btt_sb))
-		return -ENODEV;
-	btt_sb->checksum = cpu_to_le64(checksum);
-
 	nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize);
 	nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL);
 	if (!nd_btt->uuid)
@@ -416,7 +295,9 @@
 	dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__,
 			rc == 0 ? dev_name(dev) : "<none>");
 	if (rc < 0) {
-		__nd_btt_detach_ndns(to_nd_btt(dev));
+		struct nd_btt *nd_btt = to_nd_btt(dev);
+
+		__nd_detach_ndns(dev, &nd_btt->ndns);
 		put_device(dev);
 	}
 
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
new file mode 100644
index 0000000..e8f03b0
--- /dev/null
+++ b/drivers/nvdimm/claim.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/sizes.h>
+#include "nd-core.h"
+#include "pfn.h"
+#include "btt.h"
+#include "nd.h"
+
+void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
+{
+	struct nd_namespace_common *ndns = *_ndns;
+
+	dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex)
+			|| ndns->claim != dev,
+			"%s: invalid claim\n", __func__);
+	ndns->claim = NULL;
+	*_ndns = NULL;
+	put_device(&ndns->dev);
+}
+
+void nd_detach_ndns(struct device *dev,
+		struct nd_namespace_common **_ndns)
+{
+	struct nd_namespace_common *ndns = *_ndns;
+
+	if (!ndns)
+		return;
+	get_device(&ndns->dev);
+	device_lock(&ndns->dev);
+	__nd_detach_ndns(dev, _ndns);
+	device_unlock(&ndns->dev);
+	put_device(&ndns->dev);
+}
+
+bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+		struct nd_namespace_common **_ndns)
+{
+	if (attach->claim)
+		return false;
+	dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex)
+			|| *_ndns,
+			"%s: invalid claim\n", __func__);
+	attach->claim = dev;
+	*_ndns = attach;
+	get_device(&attach->dev);
+	return true;
+}
+
+bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+		struct nd_namespace_common **_ndns)
+{
+	bool claimed;
+
+	device_lock(&attach->dev);
+	claimed = __nd_attach_ndns(dev, attach, _ndns);
+	device_unlock(&attach->dev);
+	return claimed;
+}
+
+static int namespace_match(struct device *dev, void *data)
+{
+	char *name = data;
+
+	return strcmp(name, dev_name(dev)) == 0;
+}
+
+static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
+{
+	struct nd_region *nd_region = to_nd_region(dev->parent);
+	struct device *seed = NULL;
+
+	if (is_nd_btt(dev))
+		seed = nd_region->btt_seed;
+	else if (is_nd_pfn(dev))
+		seed = nd_region->pfn_seed;
+
+	if (seed == dev || ndns || dev->driver)
+		return false;
+	return true;
+}
+
+static void nd_detach_and_reset(struct device *dev,
+		struct nd_namespace_common **_ndns)
+{
+	/* detach the namespace and destroy / reset the device */
+	nd_detach_ndns(dev, _ndns);
+	if (is_idle(dev, *_ndns)) {
+		nd_device_unregister(dev, ND_ASYNC);
+	} else if (is_nd_btt(dev)) {
+		struct nd_btt *nd_btt = to_nd_btt(dev);
+
+		nd_btt->lbasize = 0;
+		kfree(nd_btt->uuid);
+		nd_btt->uuid = NULL;
+	} else if (is_nd_pfn(dev)) {
+		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+		kfree(nd_pfn->uuid);
+		nd_pfn->uuid = NULL;
+		nd_pfn->mode = PFN_MODE_NONE;
+	}
+}
+
+ssize_t nd_namespace_store(struct device *dev,
+		struct nd_namespace_common **_ndns, const char *buf,
+		size_t len)
+{
+	struct nd_namespace_common *ndns;
+	struct device *found;
+	char *name;
+
+	if (dev->driver) {
+		dev_dbg(dev, "%s: -EBUSY\n", __func__);
+		return -EBUSY;
+	}
+
+	name = kstrndup(buf, len, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+	strim(name);
+
+	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
+		/* pass */;
+	else {
+		len = -EINVAL;
+		goto out;
+	}
+
+	ndns = *_ndns;
+	if (strcmp(name, "") == 0) {
+		nd_detach_and_reset(dev, _ndns);
+		goto out;
+	} else if (ndns) {
+		dev_dbg(dev, "namespace already set to: %s\n",
+				dev_name(&ndns->dev));
+		len = -EBUSY;
+		goto out;
+	}
+
+	found = device_find_child(dev->parent, name, namespace_match);
+	if (!found) {
+		dev_dbg(dev, "'%s' not found under %s\n", name,
+				dev_name(dev->parent));
+		len = -ENODEV;
+		goto out;
+	}
+
+	ndns = to_ndns(found);
+	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
+		dev_dbg(dev, "%s too small to host\n", name);
+		len = -ENXIO;
+		goto out_attach;
+	}
+
+	WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
+	if (!nd_attach_ndns(dev, ndns, _ndns)) {
+		dev_dbg(dev, "%s already claimed\n",
+				dev_name(&ndns->dev));
+		len = -EBUSY;
+	}
+
+ out_attach:
+	put_device(&ndns->dev); /* from device_find_child */
+ out:
+	kfree(name);
+	return len;
+}
+
+/*
+ * nd_sb_checksum: compute checksum for a generic info block
+ *
+ * Returns a fletcher64 checksum of everything in the given info block
+ * except the last field (since that's where the checksum lives).
+ */
+u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
+{
+	u64 sum;
+	__le64 sum_save;
+
+	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
+	BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
+	BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
+
+	sum_save = nd_gen_sb->checksum;
+	nd_gen_sb->checksum = 0;
+	sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
+	nd_gen_sb->checksum = sum_save;
+	return sum;
+}
+EXPORT_SYMBOL(nd_sb_checksum);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index c05eb80..651b8d1 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -241,10 +241,7 @@
 		nvdimm_free_dpa(ndd, res);
 	nvdimm_bus_unlock(dev);
 
-	if (ndd->data && is_vmalloc_addr(ndd->data))
-		vfree(ndd->data);
-	else
-		kfree(ndd->data);
+	kvfree(ndd->data);
 	kfree(ndd);
 	put_device(dev);
 }
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
new file mode 100644
index 0000000..8282db2
--- /dev/null
+++ b/drivers/nvdimm/e820.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015, Christoph Hellwig.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+#include <linux/platform_device.h>
+#include <linux/libnvdimm.h>
+#include <linux/module.h>
+
+static const struct attribute_group *e820_pmem_attribute_groups[] = {
+	&nvdimm_bus_attribute_group,
+	NULL,
+};
+
+static const struct attribute_group *e820_pmem_region_attribute_groups[] = {
+	&nd_region_attribute_group,
+	&nd_device_attribute_group,
+	NULL,
+};
+
+static int e820_pmem_remove(struct platform_device *pdev)
+{
+	struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
+
+	nvdimm_bus_unregister(nvdimm_bus);
+	return 0;
+}
+
+static int e820_pmem_probe(struct platform_device *pdev)
+{
+	static struct nvdimm_bus_descriptor nd_desc;
+	struct device *dev = &pdev->dev;
+	struct nvdimm_bus *nvdimm_bus;
+	struct resource *p;
+
+	nd_desc.attr_groups = e820_pmem_attribute_groups;
+	nd_desc.provider_name = "e820";
+	nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
+	if (!nvdimm_bus)
+		goto err;
+	platform_set_drvdata(pdev, nvdimm_bus);
+
+	for (p = iomem_resource.child; p ; p = p->sibling) {
+		struct nd_region_desc ndr_desc;
+
+		if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0)
+			continue;
+
+		memset(&ndr_desc, 0, sizeof(ndr_desc));
+		ndr_desc.res = p;
+		ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
+		ndr_desc.numa_node = NUMA_NO_NODE;
+		set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
+		if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
+			goto err;
+	}
+
+	return 0;
+
+ err:
+	nvdimm_bus_unregister(nvdimm_bus);
+	dev_err(dev, "failed to register legacy persistent memory ranges\n");
+	return -ENXIO;
+}
+
+static struct platform_driver e820_pmem_driver = {
+	.probe = e820_pmem_probe,
+	.remove = e820_pmem_remove,
+	.driver = {
+		.name = "e820_pmem",
+	},
+};
+
+static __init int e820_pmem_init(void)
+{
+	return platform_driver_register(&e820_pmem_driver);
+}
+
+static __exit void e820_pmem_exit(void)
+{
+	platform_driver_unregister(&e820_pmem_driver);
+}
+
+MODULE_ALIAS("platform:e820_pmem*");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+module_init(e820_pmem_init);
+module_exit(e820_pmem_exit);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index fef0dd8..0955b2c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/slab.h>
+#include <linux/pmem.h>
 #include <linux/nd.h>
 #include "nd-core.h"
 #include "nd.h"
@@ -76,22 +77,54 @@
 	return dev ? dev->type == &namespace_io_device_type : false;
 }
 
+bool pmem_should_map_pages(struct device *dev)
+{
+	struct nd_region *nd_region = to_nd_region(dev->parent);
+
+	if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
+		return false;
+
+	if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
+		return false;
+
+	if (is_nd_pfn(dev) || is_nd_btt(dev))
+		return false;
+
+#ifdef ARCH_MEMREMAP_PMEM
+	return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
+#else
+	return false;
+#endif
+}
+EXPORT_SYMBOL(pmem_should_map_pages);
+
 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
 		char *name)
 {
 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
-	const char *suffix = "";
+	const char *suffix = NULL;
 
-	if (ndns->claim && is_nd_btt(ndns->claim))
-		suffix = "s";
+	if (ndns->claim) {
+		if (is_nd_btt(ndns->claim))
+			suffix = "s";
+		else if (is_nd_pfn(ndns->claim))
+			suffix = "m";
+		else
+			dev_WARN_ONCE(&ndns->dev, 1,
+					"unknown claim type by %s\n",
+					dev_name(ndns->claim));
+	}
 
-	if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev))
-		sprintf(name, "pmem%d%s", nd_region->id, suffix);
-	else if (is_namespace_blk(&ndns->dev)) {
+	if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
+		if (!suffix && pmem_should_map_pages(&ndns->dev))
+			suffix = "m";
+		sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
+	} else if (is_namespace_blk(&ndns->dev)) {
 		struct nd_namespace_blk *nsblk;
 
 		nsblk = to_nd_namespace_blk(&ndns->dev);
-		sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix);
+		sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
+				suffix ? suffix : "");
 	} else {
 		return NULL;
 	}
@@ -100,6 +133,26 @@
 }
 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
 
+const u8 *nd_dev_to_uuid(struct device *dev)
+{
+	static const u8 null_uuid[16];
+
+	if (!dev)
+		return null_uuid;
+
+	if (is_namespace_pmem(dev)) {
+		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
+
+		return nspm->uuid;
+	} else if (is_namespace_blk(dev)) {
+		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
+
+		return nsblk->uuid;
+	} else
+		return null_uuid;
+}
+EXPORT_SYMBOL(nd_dev_to_uuid);
+
 static ssize_t nstype_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -1235,12 +1288,22 @@
 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
 {
 	struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
+	struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
 	struct nd_namespace_common *ndns;
 	resource_size_t size;
 
-	if (nd_btt) {
-		ndns = nd_btt->ndns;
-		if (!ndns)
+	if (nd_btt || nd_pfn) {
+		struct device *host = NULL;
+
+		if (nd_btt) {
+			host = &nd_btt->dev;
+			ndns = nd_btt->ndns;
+		} else if (nd_pfn) {
+			host = &nd_pfn->dev;
+			ndns = nd_pfn->ndns;
+		}
+
+		if (!ndns || !host)
 			return ERR_PTR(-ENODEV);
 
 		/*
@@ -1251,12 +1314,12 @@
 		device_unlock(&ndns->dev);
 		if (ndns->dev.driver) {
 			dev_dbg(&ndns->dev, "is active, can't bind %s\n",
-					dev_name(&nd_btt->dev));
+					dev_name(host));
 			return ERR_PTR(-EBUSY);
 		}
-		if (dev_WARN_ONCE(&ndns->dev, ndns->claim != &nd_btt->dev,
+		if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
 					"host (%s) vs claim (%s) mismatch\n",
-					dev_name(&nd_btt->dev),
+					dev_name(host),
 					dev_name(ndns->claim)))
 			return ERR_PTR(-ENXIO);
 	} else {
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index e1970c7..159aed5 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -80,4 +80,13 @@
 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
 void get_ndd(struct nvdimm_drvdata *ndd);
 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
+void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
+void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
+bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+		struct nd_namespace_common **_ndns);
+bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+		struct nd_namespace_common **_ndns);
+ssize_t nd_namespace_store(struct device *dev,
+		struct nd_namespace_common **_ndns, const char *buf,
+		size_t len);
 #endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index c41f53e74..417e521 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -29,6 +29,13 @@
 	ND_MAX_LANES = 256,
 	SECTOR_SHIFT = 9,
 	INT_LBASIZE_ALIGNMENT = 64,
+#if IS_ENABLED(CONFIG_NVDIMM_PFN)
+	ND_PFN_ALIGN = PAGES_PER_SECTION * PAGE_SIZE,
+	ND_PFN_MASK = ND_PFN_ALIGN - 1,
+#else
+	ND_PFN_ALIGN = 0,
+	ND_PFN_MASK = 0,
+#endif
 };
 
 struct nvdimm_drvdata {
@@ -92,8 +99,11 @@
 	struct device dev;
 	struct ida ns_ida;
 	struct ida btt_ida;
+	struct ida pfn_ida;
+	unsigned long flags;
 	struct device *ns_seed;
 	struct device *btt_seed;
+	struct device *pfn_seed;
 	u16 ndr_mappings;
 	u64 ndr_size;
 	u64 ndr_start;
@@ -133,6 +143,22 @@
 	int id;
 };
 
+enum nd_pfn_mode {
+	PFN_MODE_NONE,
+	PFN_MODE_RAM,
+	PFN_MODE_PMEM,
+};
+
+struct nd_pfn {
+	int id;
+	u8 *uuid;
+	struct device dev;
+	unsigned long npfns;
+	enum nd_pfn_mode mode;
+	struct nd_pfn_sb *pfn_sb;
+	struct nd_namespace_common *ndns;
+};
+
 enum nd_async_mode {
 	ND_SYNC,
 	ND_ASYNC,
@@ -159,14 +185,19 @@
 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
 		void *buf, size_t len);
 struct nd_btt *to_nd_btt(struct device *dev);
-struct btt_sb;
-u64 nd_btt_sb_checksum(struct btt_sb *btt_sb);
+
+struct nd_gen_sb {
+	char reserved[SZ_4K - 8];
+	__le64 checksum;
+};
+
+u64 nd_sb_checksum(struct nd_gen_sb *sb);
 #if IS_ENABLED(CONFIG_BTT)
 int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata);
 bool is_nd_btt(struct device *dev);
 struct device *nd_btt_create(struct nd_region *nd_region);
 #else
-static inline nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
 {
 	return -ENODEV;
 }
@@ -180,8 +211,36 @@
 {
 	return NULL;
 }
-
 #endif
+
+struct nd_pfn *to_nd_pfn(struct device *dev);
+#if IS_ENABLED(CONFIG_NVDIMM_PFN)
+int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata);
+bool is_nd_pfn(struct device *dev);
+struct device *nd_pfn_create(struct nd_region *nd_region);
+int nd_pfn_validate(struct nd_pfn *nd_pfn);
+#else
+static inline int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+{
+	return -ENODEV;
+}
+
+static inline bool is_nd_pfn(struct device *dev)
+{
+	return false;
+}
+
+static inline struct device *nd_pfn_create(struct nd_region *nd_region)
+{
+	return NULL;
+}
+
+static inline int nd_pfn_validate(struct nd_pfn *nd_pfn)
+{
+	return -ENODEV;
+}
+#endif
+
 struct nd_region *to_nd_region(struct device *dev);
 int nd_region_to_nstype(struct nd_region *nd_region);
 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
@@ -217,4 +276,6 @@
 }
 void nd_iostat_end(struct bio *bio, unsigned long start);
 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
+const u8 *nd_dev_to_uuid(struct device *dev);
+bool pmem_should_map_pages(struct device *dev);
 #endif /* __ND_H__ */
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
new file mode 100644
index 0000000..cc24375
--- /dev/null
+++ b/drivers/nvdimm/pfn.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __NVDIMM_PFN_H
+#define __NVDIMM_PFN_H
+
+#include <linux/types.h>
+
+#define PFN_SIG_LEN 16
+#define PFN_SIG "NVDIMM_PFN_INFO\0"
+
+struct nd_pfn_sb {
+	u8 signature[PFN_SIG_LEN];
+	u8 uuid[16];
+	u8 parent_uuid[16];
+	__le32 flags;
+	__le16 version_major;
+	__le16 version_minor;
+	__le64 dataoff;
+	__le64 npfns;
+	__le32 mode;
+	u8 padding[4012];
+	__le64 checksum;
+};
+#endif /* __NVDIMM_PFN_H */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
new file mode 100644
index 0000000..3fd7d0d
--- /dev/null
+++ b/drivers/nvdimm/pfn_devs.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/genhd.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include "nd-core.h"
+#include "pfn.h"
+#include "nd.h"
+
+static void nd_pfn_release(struct device *dev)
+{
+	struct nd_region *nd_region = to_nd_region(dev->parent);
+	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+	dev_dbg(dev, "%s\n", __func__);
+	nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
+	ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
+	kfree(nd_pfn->uuid);
+	kfree(nd_pfn);
+}
+
+static struct device_type nd_pfn_device_type = {
+	.name = "nd_pfn",
+	.release = nd_pfn_release,
+};
+
+bool is_nd_pfn(struct device *dev)
+{
+	return dev ? dev->type == &nd_pfn_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_pfn);
+
+struct nd_pfn *to_nd_pfn(struct device *dev)
+{
+	struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
+
+	WARN_ON(!is_nd_pfn(dev));
+	return nd_pfn;
+}
+EXPORT_SYMBOL(to_nd_pfn);
+
+static ssize_t mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+	switch (nd_pfn->mode) {
+	case PFN_MODE_RAM:
+		return sprintf(buf, "ram\n");
+	case PFN_MODE_PMEM:
+		return sprintf(buf, "pmem\n");
+	default:
+		return sprintf(buf, "none\n");
+	}
+}
+
+static ssize_t mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	ssize_t rc = 0;
+
+	device_lock(dev);
+	nvdimm_bus_lock(dev);
+	if (dev->driver)
+		rc = -EBUSY;
+	else {
+		size_t n = len - 1;
+
+		if (strncmp(buf, "pmem\n", n) == 0
+				|| strncmp(buf, "pmem", n) == 0) {
+			/* TODO: allocate from PMEM support */
+			rc = -ENOTTY;
+		} else if (strncmp(buf, "ram\n", n) == 0
+				|| strncmp(buf, "ram", n) == 0)
+			nd_pfn->mode = PFN_MODE_RAM;
+		else if (strncmp(buf, "none\n", n) == 0
+				|| strncmp(buf, "none", n) == 0)
+			nd_pfn->mode = PFN_MODE_NONE;
+		else
+			rc = -EINVAL;
+	}
+	dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
+			rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+	nvdimm_bus_unlock(dev);
+	device_unlock(dev);
+
+	return rc ? rc : len;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t uuid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+	if (nd_pfn->uuid)
+		return sprintf(buf, "%pUb\n", nd_pfn->uuid);
+	return sprintf(buf, "\n");
+}
+
+static ssize_t uuid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	ssize_t rc;
+
+	device_lock(dev);
+	rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
+	dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
+			rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+	device_unlock(dev);
+
+	return rc ? rc : len;
+}
+static DEVICE_ATTR_RW(uuid);
+
+static ssize_t namespace_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	ssize_t rc;
+
+	nvdimm_bus_lock(dev);
+	rc = sprintf(buf, "%s\n", nd_pfn->ndns
+			? dev_name(&nd_pfn->ndns->dev) : "");
+	nvdimm_bus_unlock(dev);
+	return rc;
+}
+
+static ssize_t namespace_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	ssize_t rc;
+
+	nvdimm_bus_lock(dev);
+	device_lock(dev);
+	rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
+	dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
+			rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+	device_unlock(dev);
+	nvdimm_bus_unlock(dev);
+
+	return rc;
+}
+static DEVICE_ATTR_RW(namespace);
+
+static struct attribute *nd_pfn_attributes[] = {
+	&dev_attr_mode.attr,
+	&dev_attr_namespace.attr,
+	&dev_attr_uuid.attr,
+	NULL,
+};
+
+static struct attribute_group nd_pfn_attribute_group = {
+	.attrs = nd_pfn_attributes,
+};
+
+static const struct attribute_group *nd_pfn_attribute_groups[] = {
+	&nd_pfn_attribute_group,
+	&nd_device_attribute_group,
+	&nd_numa_attribute_group,
+	NULL,
+};
+
+static struct device *__nd_pfn_create(struct nd_region *nd_region,
+		u8 *uuid, enum nd_pfn_mode mode,
+		struct nd_namespace_common *ndns)
+{
+	struct nd_pfn *nd_pfn;
+	struct device *dev;
+
+	/* we can only create pages for contiguous ranged of pmem */
+	if (!is_nd_pmem(&nd_region->dev))
+		return NULL;
+
+	nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
+	if (!nd_pfn)
+		return NULL;
+
+	nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
+	if (nd_pfn->id < 0) {
+		kfree(nd_pfn);
+		return NULL;
+	}
+
+	nd_pfn->mode = mode;
+	if (uuid)
+		uuid = kmemdup(uuid, 16, GFP_KERNEL);
+	nd_pfn->uuid = uuid;
+	dev = &nd_pfn->dev;
+	dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
+	dev->parent = &nd_region->dev;
+	dev->type = &nd_pfn_device_type;
+	dev->groups = nd_pfn_attribute_groups;
+	device_initialize(&nd_pfn->dev);
+	if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
+		dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
+				__func__, dev_name(ndns->claim));
+		put_device(dev);
+		return NULL;
+	}
+	return dev;
+}
+
+struct device *nd_pfn_create(struct nd_region *nd_region)
+{
+	struct device *dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE,
+			NULL);
+
+	if (dev)
+		__nd_device_register(dev);
+	return dev;
+}
+
+int nd_pfn_validate(struct nd_pfn *nd_pfn)
+{
+	struct nd_namespace_common *ndns = nd_pfn->ndns;
+	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+	struct nd_namespace_io *nsio;
+	u64 checksum, offset;
+
+	if (!pfn_sb || !ndns)
+		return -ENODEV;
+
+	if (!is_nd_pmem(nd_pfn->dev.parent))
+		return -ENODEV;
+
+	/* section alignment for simple hotplug */
+	if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN)
+		return -ENODEV;
+
+	if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)))
+		return -ENXIO;
+
+	if (memcmp(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN) != 0)
+		return -ENODEV;
+
+	checksum = le64_to_cpu(pfn_sb->checksum);
+	pfn_sb->checksum = 0;
+	if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
+		return -ENODEV;
+	pfn_sb->checksum = cpu_to_le64(checksum);
+
+	switch (le32_to_cpu(pfn_sb->mode)) {
+	case PFN_MODE_RAM:
+		break;
+	case PFN_MODE_PMEM:
+		/* TODO: allocate from PMEM support */
+		return -ENOTTY;
+	default:
+		return -ENXIO;
+	}
+
+	if (!nd_pfn->uuid) {
+		/* from probe we allocate */
+		nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
+		if (!nd_pfn->uuid)
+			return -ENOMEM;
+	} else {
+		/* from init we validate */
+		if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
+			return -EINVAL;
+	}
+
+	/*
+	 * These warnings are verbose because they can only trigger in
+	 * the case where the physical address alignment of the
+	 * namespace has changed since the pfn superblock was
+	 * established.
+	 */
+	offset = le64_to_cpu(pfn_sb->dataoff);
+	nsio = to_nd_namespace_io(&ndns->dev);
+	if (nsio->res.start & ND_PFN_MASK) {
+		dev_err(&nd_pfn->dev,
+				"init failed: %s not section aligned\n",
+				dev_name(&ndns->dev));
+		return -EBUSY;
+	} else if (offset >= resource_size(&nsio->res)) {
+		dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
+				dev_name(&ndns->dev));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(nd_pfn_validate);
+
+int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+{
+	int rc;
+	struct device *dev;
+	struct nd_pfn *nd_pfn;
+	struct nd_pfn_sb *pfn_sb;
+	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
+
+	if (ndns->force_raw)
+		return -ENODEV;
+
+	nvdimm_bus_lock(&ndns->dev);
+	dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns);
+	nvdimm_bus_unlock(&ndns->dev);
+	if (!dev)
+		return -ENOMEM;
+	dev_set_drvdata(dev, drvdata);
+	pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
+	nd_pfn = to_nd_pfn(dev);
+	nd_pfn->pfn_sb = pfn_sb;
+	rc = nd_pfn_validate(nd_pfn);
+	nd_pfn->pfn_sb = NULL;
+	kfree(pfn_sb);
+	dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__,
+			rc == 0 ? dev_name(dev) : "<none>");
+	if (rc < 0) {
+		__nd_detach_ndns(dev, &nd_pfn->ndns);
+		put_device(dev);
+	} else
+		__nd_device_register(&nd_pfn->dev);
+
+	return rc;
+}
+EXPORT_SYMBOL(nd_pfn_probe);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 4c079d5..b952538 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -21,18 +21,24 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
+#include <linux/memory_hotplug.h>
 #include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/pmem.h>
 #include <linux/nd.h>
+#include "pfn.h"
 #include "nd.h"
 
 struct pmem_device {
 	struct request_queue	*pmem_queue;
 	struct gendisk		*pmem_disk;
+	struct nd_namespace_common *ndns;
 
 	/* One contiguous memory region per device */
 	phys_addr_t		phys_addr;
+	/* when non-zero this device is hosting a 'pfn' instance */
+	phys_addr_t		data_offset;
 	void __pmem		*virt_addr;
 	size_t			size;
 };
@@ -44,7 +50,7 @@
 			sector_t sector)
 {
 	void *mem = kmap_atomic(page);
-	size_t pmem_off = sector << 9;
+	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
 	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
 
 	if (rw == READ) {
@@ -92,19 +98,26 @@
 }
 
 static long pmem_direct_access(struct block_device *bdev, sector_t sector,
-			      void **kaddr, unsigned long *pfn, long size)
+		      void __pmem **kaddr, unsigned long *pfn)
 {
 	struct pmem_device *pmem = bdev->bd_disk->private_data;
-	size_t offset = sector << 9;
+	resource_size_t offset = sector * 512 + pmem->data_offset;
+	resource_size_t size;
 
-	if (!pmem)
-		return -ENODEV;
+	if (pmem->data_offset) {
+		/*
+		 * Limit the direct_access() size to what is covered by
+		 * the memmap
+		 */
+		size = (pmem->size - offset) & ~ND_PFN_MASK;
+	} else
+		size = pmem->size - offset;
 
 	/* FIXME convert DAX to comprehend that this mapping has a lifetime */
-	*kaddr = (void __force *) pmem->virt_addr + offset;
+	*kaddr = pmem->virt_addr + offset;
 	*pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
 
-	return pmem->size - offset;
+	return size;
 }
 
 static const struct block_device_operations pmem_fops = {
@@ -119,27 +132,33 @@
 {
 	struct pmem_device *pmem;
 
-	pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
+	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
 	if (!pmem)
 		return ERR_PTR(-ENOMEM);
 
 	pmem->phys_addr = res->start;
 	pmem->size = resource_size(res);
-	if (!arch_has_pmem_api())
+	if (!arch_has_wmb_pmem())
 		dev_warn(dev, "unable to guarantee persistence of writes\n");
 
-	if (!request_mem_region(pmem->phys_addr, pmem->size, dev_name(dev))) {
+	if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
+			dev_name(dev))) {
 		dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
 				&pmem->phys_addr, pmem->size);
-		kfree(pmem);
 		return ERR_PTR(-EBUSY);
 	}
 
-	pmem->virt_addr = memremap_pmem(pmem->phys_addr, pmem->size);
-	if (!pmem->virt_addr) {
-		release_mem_region(pmem->phys_addr, pmem->size);
-		kfree(pmem);
-		return ERR_PTR(-ENXIO);
+	if (pmem_should_map_pages(dev)) {
+		void *addr = devm_memremap_pages(dev, res);
+
+		if (IS_ERR(addr))
+			return addr;
+		pmem->virt_addr = (void __pmem *) addr;
+	} else {
+		pmem->virt_addr = memremap_pmem(dev, pmem->phys_addr,
+				pmem->size);
+		if (!pmem->virt_addr)
+			return ERR_PTR(-ENXIO);
 	}
 
 	return pmem;
@@ -147,13 +166,16 @@
 
 static void pmem_detach_disk(struct pmem_device *pmem)
 {
+	if (!pmem->pmem_disk)
+		return;
+
 	del_gendisk(pmem->pmem_disk);
 	put_disk(pmem->pmem_disk);
 	blk_cleanup_queue(pmem->pmem_queue);
 }
 
-static int pmem_attach_disk(struct nd_namespace_common *ndns,
-		struct pmem_device *pmem)
+static int pmem_attach_disk(struct device *dev,
+		struct nd_namespace_common *ndns, struct pmem_device *pmem)
 {
 	struct gendisk *disk;
 
@@ -162,6 +184,7 @@
 		return -ENOMEM;
 
 	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
+	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
 	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
 	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
@@ -179,8 +202,8 @@
 	disk->queue		= pmem->pmem_queue;
 	disk->flags		= GENHD_FL_EXT_DEVT;
 	nvdimm_namespace_disk_name(ndns, disk->disk_name);
-	disk->driverfs_dev = &ndns->dev;
-	set_capacity(disk, pmem->size >> 9);
+	disk->driverfs_dev = dev;
+	set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
 	pmem->pmem_disk = disk;
 
 	add_disk(disk);
@@ -209,11 +232,152 @@
 	return 0;
 }
 
-static void pmem_free(struct pmem_device *pmem)
+static int nd_pfn_init(struct nd_pfn *nd_pfn)
 {
-	memunmap_pmem(pmem->virt_addr);
-	release_mem_region(pmem->phys_addr, pmem->size);
-	kfree(pmem);
+	struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
+	struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
+	struct nd_namespace_common *ndns = nd_pfn->ndns;
+	struct nd_region *nd_region;
+	unsigned long npfns;
+	phys_addr_t offset;
+	u64 checksum;
+	int rc;
+
+	if (!pfn_sb)
+		return -ENOMEM;
+
+	nd_pfn->pfn_sb = pfn_sb;
+	rc = nd_pfn_validate(nd_pfn);
+	if (rc == 0 || rc == -EBUSY)
+		return rc;
+
+	/* section alignment for simple hotplug */
+	if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN
+			|| pmem->phys_addr & ND_PFN_MASK)
+		return -ENODEV;
+
+	nd_region = to_nd_region(nd_pfn->dev.parent);
+	if (nd_region->ro) {
+		dev_info(&nd_pfn->dev,
+				"%s is read-only, unable to init metadata\n",
+				dev_name(&nd_region->dev));
+		goto err;
+	}
+
+	memset(pfn_sb, 0, sizeof(*pfn_sb));
+	npfns = (pmem->size - SZ_8K) / SZ_4K;
+	/*
+	 * Note, we use 64 here for the standard size of struct page,
+	 * debugging options may cause it to be larger in which case the
+	 * implementation will limit the pfns advertised through
+	 * ->direct_access() to those that are included in the memmap.
+	 */
+	if (nd_pfn->mode == PFN_MODE_PMEM)
+		offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE);
+	else if (nd_pfn->mode == PFN_MODE_RAM)
+		offset = SZ_8K;
+	else
+		goto err;
+
+	npfns = (pmem->size - offset) / SZ_4K;
+	pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
+	pfn_sb->dataoff = cpu_to_le64(offset);
+	pfn_sb->npfns = cpu_to_le64(npfns);
+	memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
+	memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
+	pfn_sb->version_major = cpu_to_le16(1);
+	checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+	pfn_sb->checksum = cpu_to_le64(checksum);
+
+	rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
+	if (rc)
+		goto err;
+
+	return 0;
+ err:
+	nd_pfn->pfn_sb = NULL;
+	kfree(pfn_sb);
+	return -ENXIO;
+}
+
+static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
+{
+	struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
+	struct pmem_device *pmem;
+
+	/* free pmem disk */
+	pmem = dev_get_drvdata(&nd_pfn->dev);
+	pmem_detach_disk(pmem);
+
+	/* release nd_pfn resources */
+	kfree(nd_pfn->pfn_sb);
+	nd_pfn->pfn_sb = NULL;
+
+	return 0;
+}
+
+static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
+{
+	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
+	struct device *dev = &nd_pfn->dev;
+	struct vmem_altmap *altmap;
+	struct nd_region *nd_region;
+	struct nd_pfn_sb *pfn_sb;
+	struct pmem_device *pmem;
+	phys_addr_t offset;
+	int rc;
+
+	if (!nd_pfn->uuid || !nd_pfn->ndns)
+		return -ENODEV;
+
+	nd_region = to_nd_region(dev->parent);
+	rc = nd_pfn_init(nd_pfn);
+	if (rc)
+		return rc;
+
+	if (PAGE_SIZE != SZ_4K) {
+		dev_err(dev, "only supported on systems with 4K PAGE_SIZE\n");
+		return -ENXIO;
+	}
+	if (nsio->res.start & ND_PFN_MASK) {
+		dev_err(dev, "%s not memory hotplug section aligned\n",
+				dev_name(&ndns->dev));
+		return -ENXIO;
+	}
+
+	pfn_sb = nd_pfn->pfn_sb;
+	offset = le64_to_cpu(pfn_sb->dataoff);
+	nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
+	if (nd_pfn->mode == PFN_MODE_RAM) {
+		if (offset != SZ_8K)
+			return -EINVAL;
+		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
+		altmap = NULL;
+	} else {
+		rc = -ENXIO;
+		goto err;
+	}
+
+	/* establish pfn range for lookup, and switch to direct map */
+	pmem = dev_get_drvdata(dev);
+	memunmap_pmem(dev, pmem->virt_addr);
+	pmem->virt_addr = (void __pmem *)devm_memremap_pages(dev, &nsio->res);
+	if (IS_ERR(pmem->virt_addr)) {
+		rc = PTR_ERR(pmem->virt_addr);
+		goto err;
+	}
+
+	/* attach pmem disk in "pfn-mode" */
+	pmem->data_offset = offset;
+	rc = pmem_attach_disk(dev, ndns, pmem);
+	if (rc)
+		goto err;
+
+	return rc;
+ err:
+	nvdimm_namespace_detach_pfn(ndns);
+	return rc;
 }
 
 static int nd_pmem_probe(struct device *dev)
@@ -222,7 +386,6 @@
 	struct nd_namespace_common *ndns;
 	struct nd_namespace_io *nsio;
 	struct pmem_device *pmem;
-	int rc;
 
 	ndns = nvdimm_namespace_common_probe(dev);
 	if (IS_ERR(ndns))
@@ -233,18 +396,27 @@
 	if (IS_ERR(pmem))
 		return PTR_ERR(pmem);
 
+	pmem->ndns = ndns;
 	dev_set_drvdata(dev, pmem);
 	ndns->rw_bytes = pmem_rw_bytes;
+
 	if (is_nd_btt(dev))
-		rc = nvdimm_namespace_attach_btt(ndns);
-	else if (nd_btt_probe(ndns, pmem) == 0) {
+		return nvdimm_namespace_attach_btt(ndns);
+
+	if (is_nd_pfn(dev))
+		return nvdimm_namespace_attach_pfn(ndns);
+
+	if (nd_btt_probe(ndns, pmem) == 0) {
 		/* we'll come back as btt-pmem */
-		rc = -ENXIO;
-	} else
-		rc = pmem_attach_disk(ndns, pmem);
-	if (rc)
-		pmem_free(pmem);
-	return rc;
+		return -ENXIO;
+	}
+
+	if (nd_pfn_probe(ndns, pmem) == 0) {
+		/* we'll come back as pfn-pmem */
+		return -ENXIO;
+	}
+
+	return pmem_attach_disk(dev, ndns, pmem);
 }
 
 static int nd_pmem_remove(struct device *dev)
@@ -252,10 +424,11 @@
 	struct pmem_device *pmem = dev_get_drvdata(dev);
 
 	if (is_nd_btt(dev))
-		nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
+		nvdimm_namespace_detach_btt(pmem->ndns);
+	else if (is_nd_pfn(dev))
+		nvdimm_namespace_detach_pfn(pmem->ndns);
 	else
 		pmem_detach_disk(pmem);
-	pmem_free(pmem);
 
 	return 0;
 }
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index f28f78c..7da63ea 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -53,6 +53,7 @@
 		return -ENODEV;
 
 	nd_region->btt_seed = nd_btt_create(nd_region);
+	nd_region->pfn_seed = nd_pfn_create(nd_region);
 	if (err == 0)
 		return 0;
 
@@ -84,6 +85,7 @@
 	nvdimm_bus_lock(dev);
 	nd_region->ns_seed = NULL;
 	nd_region->btt_seed = NULL;
+	nd_region->pfn_seed = NULL;
 	dev_set_drvdata(dev, NULL);
 	nvdimm_bus_unlock(dev);
 
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 7384455..529f3f0 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -345,6 +345,23 @@
 }
 static DEVICE_ATTR_RO(btt_seed);
 
+static ssize_t pfn_seed_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nd_region *nd_region = to_nd_region(dev);
+	ssize_t rc;
+
+	nvdimm_bus_lock(dev);
+	if (nd_region->pfn_seed)
+		rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
+	else
+		rc = sprintf(buf, "\n");
+	nvdimm_bus_unlock(dev);
+
+	return rc;
+}
+static DEVICE_ATTR_RO(pfn_seed);
+
 static ssize_t read_only_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -373,6 +390,7 @@
 	&dev_attr_nstype.attr,
 	&dev_attr_mappings.attr,
 	&dev_attr_btt_seed.attr,
+	&dev_attr_pfn_seed.attr,
 	&dev_attr_read_only.attr,
 	&dev_attr_set_cookie.attr,
 	&dev_attr_available_size.attr,
@@ -740,10 +758,12 @@
 	nd_region->provider_data = ndr_desc->provider_data;
 	nd_region->nd_set = ndr_desc->nd_set;
 	nd_region->num_lanes = ndr_desc->num_lanes;
+	nd_region->flags = ndr_desc->flags;
 	nd_region->ro = ro;
 	nd_region->numa_node = ndr_desc->numa_node;
 	ida_init(&nd_region->ns_ida);
 	ida_init(&nd_region->btt_ida);
+	ida_init(&nd_region->pfn_ida);
 	dev = &nd_region->dev;
 	dev_set_name(dev, "region%d", nd_region->id);
 	dev->parent = &nvdimm_bus->dev;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0749656..6e82bc42 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -967,7 +967,9 @@
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
-#define MAX_PHYS_ADDR	((phys_addr_t)~0)
+#ifndef MAX_MEMBLOCK_ADDR
+#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
+#endif
 
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
@@ -984,16 +986,16 @@
 	}
 	size &= PAGE_MASK;
 
-	if (base > MAX_PHYS_ADDR) {
+	if (base > MAX_MEMBLOCK_ADDR) {
 		pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
 				base, base + size);
 		return;
 	}
 
-	if (base + size - 1 > MAX_PHYS_ADDR) {
+	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
 		pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
-				((u64)MAX_PHYS_ADDR) + 1, base + size);
-		size = MAX_PHYS_ADDR - base + 1;
+				((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
+		size = MAX_MEMBLOCK_ADDR - base + 1;
 	}
 
 	if (base + size < phys_offset) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 2956d72..55317fa 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -432,6 +432,7 @@
 
 	return of_irq_get(dev, index);
 }
+EXPORT_SYMBOL_GPL(of_irq_get_byname);
 
 /**
  * of_irq_count - Count the number of IRQs a node uses
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 901e1a3..7b9e89b 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1555,8 +1555,11 @@
 	if (lba_dev->hba.lmmio_space.flags)
 		pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
 					lba_dev->hba.lmmio_space_offset);
-	if (lba_dev->hba.gmmio_space.flags)
-		pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
+	if (lba_dev->hba.gmmio_space.flags) {
+		/* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */
+		pr_warn("LBA: Not registering GMMIO space %pR\n",
+			&lba_dev->hba.gmmio_space);
+	}
 
 	pci_add_resource(&resources, &lba_dev->hba.bus_num);
 
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 944f500..73de4ef 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -2,7 +2,7 @@
 # PCI configuration
 #
 config PCI_BUS_ADDR_T_64BIT
-	def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
+	def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
 	depends on PCI
 
 config PCI_MSI
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8177f3b..0b2be17 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -326,8 +326,7 @@
 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
 		dev->rom_base_reg = rom;
 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
-				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
-				IORESOURCE_SIZEALIGN;
+				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
 		__pci_read_base(dev, pci_bar_mem32, res, rom);
 	}
 }
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 984a8ff..483f919 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -296,20 +296,18 @@
 		goto err0;
 	}
 
-	clk = clk_get(&dev->dev, NULL);
+	clk = devm_clk_get(&dev->dev, NULL);
 	if (IS_ERR(clk))
 		return -ENODEV;
 
 	pxa2xx_drv_pcmcia_ops(ops);
 
-	sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
-	if (!sinfo) {
-		clk_put(clk);
+	sinfo = devm_kzalloc(&dev->dev, SKT_DEV_INFO_SIZE(ops->nr),
+			     GFP_KERNEL);
+	if (!sinfo)
 		return -ENOMEM;
-	}
 
 	sinfo->nskt = ops->nr;
-	sinfo->clk = clk;
 
 	/* Initialize processor specific parameters */
 	for (i = 0; i < ops->nr; i++) {
@@ -332,8 +330,7 @@
 err1:
 	while (--i >= 0)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
-	clk_put(clk);
-	kfree(sinfo);
+
 err0:
 	return ret;
 }
@@ -343,13 +340,9 @@
 	struct skt_dev_info *sinfo = platform_get_drvdata(dev);
 	int i;
 
-	platform_set_drvdata(dev, NULL);
-
 	for (i = 0; i < sinfo->nskt; i++)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
 
-	clk_put(sinfo->clk);
-	kfree(sinfo);
 	return 0;
 }
 
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 8039452..66acdc8 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -93,8 +93,6 @@
 	for (i = 0; i < sinfo->nskt; i++)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
 
-	clk_put(sinfo->clk);
-	kfree(sinfo);
 	return 0;
 }
 
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 80b8e9d..a1531fe 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -135,8 +135,13 @@
 	int (*add)(struct soc_pcmcia_socket *))
 {
 	struct sa1111_pcmcia_socket *s;
+	struct clk *clk;
 	int i, ret = 0;
 
+	clk = devm_clk_get(&dev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
 	ops->socket_state = sa1111_pcmcia_socket_state;
 
 	for (i = 0; i < ops->nr; i++) {
@@ -145,12 +150,8 @@
 			return -ENOMEM;
 
 		s->soc.nr = ops->first + i;
-		s->soc.clk = clk_get(&dev->dev, NULL);
-		if (IS_ERR(s->soc.clk)) {
-			ret = PTR_ERR(s->soc.clk);
-			kfree(s);
-			return ret;
-		}
+		s->soc.clk = clk;
+
 		soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
 		s->dev = dev;
 		if (s->soc.nr) {
@@ -226,7 +227,6 @@
 	for (; s; s = next) {
 		next = s->next;
 		soc_pcmcia_remove_one(&s->soc);
-		clk_put(s->soc.clk);
 		kfree(s);
 	}
 
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index cf6de2c..9f6ec87 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -222,18 +222,17 @@
 	int i, ret = 0;
 	struct clk *clk;
 
-	clk = clk_get(dev, NULL);
+	clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(clk))
 		return PTR_ERR(clk);
 
 	sa11xx_drv_pcmcia_ops(ops);
 
-	sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
+	sinfo = devm_kzalloc(dev, SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
 	if (!sinfo)
 		return -ENOMEM;
 
 	sinfo->nskt = nr;
-	sinfo->clk = clk;
 
 	/* Initialize processor specific parameters */
 	for (i = 0; i < nr; i++) {
@@ -251,8 +250,6 @@
 	if (ret) {
 		while (--i >= 0)
 			soc_pcmcia_remove_one(&sinfo->skt[i]);
-		clk_put(clk);
-		kfree(sinfo);
 	} else {
 		dev_set_drvdata(dev, sinfo);
 	}
@@ -261,16 +258,6 @@
 }
 EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe);
 
-static int __init sa11xx_pcmcia_init(void)
-{
-	return 0;
-}
-fs_initcall(sa11xx_pcmcia_init);
-
-static void __exit sa11xx_pcmcia_exit(void) {}
-
-module_exit(sa11xx_pcmcia_exit);
-
 MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
 MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11xx core socket driver");
 MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index e6fcbea..94762a5 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -68,7 +68,6 @@
 
 struct skt_dev_info {
 	int nskt;
-	struct clk *clk;
 	struct soc_pcmcia_socket skt[0];
 };
 
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
new file mode 100644
index 0000000..d9de36e
--- /dev/null
+++ b/drivers/perf/Kconfig
@@ -0,0 +1,15 @@
+#
+# Performance Monitor Drivers
+#
+
+menu "Performance monitor support"
+
+config ARM_PMU
+	depends on PERF_EVENTS && ARM
+	bool "ARM PMU framework"
+	default y
+	help
+	  Say y if you want to use CPU performance monitors on ARM-based
+	  systems.
+
+endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
new file mode 100644
index 0000000..acd2397
--- /dev/null
+++ b/drivers/perf/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_PMU) += arm_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
new file mode 100644
index 0000000..2365a32
--- /dev/null
+++ b/drivers/perf/arm_pmu.c
@@ -0,0 +1,921 @@
+#undef DEBUG
+
+/*
+ * ARM performance counter support.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based on the sparc64 perf event code, which is in turn based
+ * on the x86 code.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+
+static int
+armpmu_map_cache_event(const unsigned (*cache_map)
+				      [PERF_COUNT_HW_CACHE_MAX]
+				      [PERF_COUNT_HW_CACHE_OP_MAX]
+				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
+		       u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result, ret;
+
+	cache_type = (config >>  0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return -EINVAL;
+
+	cache_op = (config >>  8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return -EINVAL;
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return -EINVAL;
+
+	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
+
+	if (ret == CACHE_OP_UNSUPPORTED)
+		return -ENOENT;
+
+	return ret;
+}
+
+static int
+armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+{
+	int mapping;
+
+	if (config >= PERF_COUNT_HW_MAX)
+		return -EINVAL;
+
+	mapping = (*event_map)[config];
+	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+}
+
+static int
+armpmu_map_raw_event(u32 raw_event_mask, u64 config)
+{
+	return (int)(config & raw_event_mask);
+}
+
+int
+armpmu_map_event(struct perf_event *event,
+		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+		 const unsigned (*cache_map)
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX],
+		 u32 raw_event_mask)
+{
+	u64 config = event->attr.config;
+	int type = event->attr.type;
+
+	if (type == event->pmu->type)
+		return armpmu_map_raw_event(raw_event_mask, config);
+
+	switch (type) {
+	case PERF_TYPE_HARDWARE:
+		return armpmu_map_hw_event(event_map, config);
+	case PERF_TYPE_HW_CACHE:
+		return armpmu_map_cache_event(cache_map, config);
+	case PERF_TYPE_RAW:
+		return armpmu_map_raw_event(raw_event_mask, config);
+	}
+
+	return -ENOENT;
+}
+
+int armpmu_event_set_period(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	s64 left = local64_read(&hwc->period_left);
+	s64 period = hwc->sample_period;
+	int ret = 0;
+
+	if (unlikely(left <= -period)) {
+		left = period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (unlikely(left <= 0)) {
+		left += period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	/*
+	 * Limit the maximum period to prevent the counter value
+	 * from overtaking the one we are about to program. In
+	 * effect we are reducing max_period to account for
+	 * interrupt latency (and we are being very conservative).
+	 */
+	if (left > (armpmu->max_period >> 1))
+		left = armpmu->max_period >> 1;
+
+	local64_set(&hwc->prev_count, (u64)-left);
+
+	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
+
+	perf_event_update_userpage(event);
+
+	return ret;
+}
+
+u64 armpmu_event_update(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_raw_count, new_raw_count;
+
+again:
+	prev_raw_count = local64_read(&hwc->prev_count);
+	new_raw_count = armpmu->read_counter(event);
+
+	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+			     new_raw_count) != prev_raw_count)
+		goto again;
+
+	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+
+	return new_raw_count;
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+	armpmu_event_update(event);
+}
+
+static void
+armpmu_stop(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to update the counter, so ignore
+	 * PERF_EF_UPDATE, see comments in armpmu_start().
+	 */
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		armpmu->disable(event);
+		armpmu_event_update(event);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static void armpmu_start(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to reprogram the period, so ignore
+	 * PERF_EF_RELOAD, see the comment below.
+	 */
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+	/*
+	 * Set the period again. Some counters can't be stopped, so when we
+	 * were stopped we simply disabled the IRQ source and the counter
+	 * may have been left counting. If we don't do this step then we may
+	 * get an interrupt too soon or *way* too late if the overflow has
+	 * happened since disabling.
+	 */
+	armpmu_event_set_period(event);
+	armpmu->enable(event);
+}
+
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	armpmu_stop(event, PERF_EF_UPDATE);
+	hw_events->events[idx] = NULL;
+	clear_bit(idx, hw_events->used_mask);
+	if (armpmu->clear_event_idx)
+		armpmu->clear_event_idx(hw_events, event);
+
+	perf_event_update_userpage(event);
+}
+
+static int
+armpmu_add(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	/* An event following a process won't be stopped earlier */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return -ENOENT;
+
+	perf_pmu_disable(event->pmu);
+
+	/* If we don't have a space for the counter then finish early. */
+	idx = armpmu->get_event_idx(hw_events, event);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then make
+	 * sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	armpmu->disable(event);
+	hw_events->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		armpmu_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+static int
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+			       struct perf_event *event)
+{
+	struct arm_pmu *armpmu;
+
+	if (is_software_event(event))
+		return 1;
+
+	/*
+	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+	 * core perf code won't check that the pmu->ctx == leader->ctx
+	 * until after pmu->event_init(event).
+	 */
+	if (event->pmu != pmu)
+		return 0;
+
+	if (event->state < PERF_EVENT_STATE_OFF)
+		return 1;
+
+	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+		return 1;
+
+	armpmu = to_arm_pmu(event->pmu);
+	return armpmu->get_event_idx(hw_events, event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct pmu_hw_events fake_pmu;
+
+	/*
+	 * Initialise the fake PMU. We only need to populate the
+	 * used_mask for the purposes of validation.
+	 */
+	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
+
+	if (!validate_event(event->pmu, &fake_pmu, leader))
+		return -EINVAL;
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!validate_event(event->pmu, &fake_pmu, sibling))
+			return -EINVAL;
+	}
+
+	if (!validate_event(event->pmu, &fake_pmu, event))
+		return -EINVAL;
+
+	return 0;
+}
+
+static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+{
+	struct arm_pmu *armpmu;
+	struct platform_device *plat_device;
+	struct arm_pmu_platdata *plat;
+	int ret;
+	u64 start_clock, finish_clock;
+
+	/*
+	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
+	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
+	 * do any necessary shifting, we just need to perform the first
+	 * dereference.
+	 */
+	armpmu = *(void **)dev;
+	plat_device = armpmu->plat_device;
+	plat = dev_get_platdata(&plat_device->dev);
+
+	start_clock = sched_clock();
+	if (plat && plat->handle_irq)
+		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
+	else
+		ret = armpmu->handle_irq(irq, armpmu);
+	finish_clock = sched_clock();
+
+	perf_sample_event_took(finish_clock - start_clock);
+	return ret;
+}
+
+static void
+armpmu_release_hardware(struct arm_pmu *armpmu)
+{
+	armpmu->free_irq(armpmu);
+}
+
+static int
+armpmu_reserve_hardware(struct arm_pmu *armpmu)
+{
+	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
+	if (err) {
+		armpmu_release_hardware(armpmu);
+		return err;
+	}
+
+	return 0;
+}
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	atomic_t *active_events	 = &armpmu->active_events;
+	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
+
+	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
+		armpmu_release_hardware(armpmu);
+		mutex_unlock(pmu_reserve_mutex);
+	}
+}
+
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+	return attr->exclude_idle || attr->exclude_user ||
+	       attr->exclude_kernel || attr->exclude_hv;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int mapping;
+
+	mapping = armpmu->map_event(event);
+
+	if (mapping < 0) {
+		pr_debug("event %x:%llx not supported\n", event->attr.type,
+			 event->attr.config);
+		return mapping;
+	}
+
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet. For SMP systems, each core has it's own PMU so we can't do any
+	 * clever allocation or constraints checking at this point.
+	 */
+	hwc->idx		= -1;
+	hwc->config_base	= 0;
+	hwc->config		= 0;
+	hwc->event_base		= 0;
+
+	/*
+	 * Check whether we need to exclude the counter from certain modes.
+	 */
+	if ((!armpmu->set_event_filter ||
+	     armpmu->set_event_filter(hwc, &event->attr)) &&
+	     event_requires_mode_exclusion(&event->attr)) {
+		pr_debug("ARM performance counters do not support "
+			 "mode exclusion\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Store the event encoding into the config_base field.
+	 */
+	hwc->config_base	    |= (unsigned long)mapping;
+
+	if (!is_sampling_event(event)) {
+		/*
+		 * For non-sampling runs, limit the sample_period to half
+		 * of the counter width. That way, the new counter value
+		 * is far less likely to overtake the previous one unless
+		 * you have some serious IRQ latency issues.
+		 */
+		hwc->sample_period  = armpmu->max_period >> 1;
+		hwc->last_period    = hwc->sample_period;
+		local64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	if (event->group_leader != event) {
+		if (validate_group(event) != 0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int armpmu_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	int err = 0;
+	atomic_t *active_events = &armpmu->active_events;
+
+	/*
+	 * Reject CPU-affine events for CPUs that are of a different class to
+	 * that which this PMU handles. Process-following events (where
+	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
+	 * reject them later (in armpmu_add) if they're scheduled on a
+	 * different class of CPU.
+	 */
+	if (event->cpu != -1 &&
+		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
+		return -ENOENT;
+
+	/* does not support taken branch sampling */
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	if (armpmu->map_event(event) == -ENOENT)
+		return -ENOENT;
+
+	event->destroy = hw_perf_event_destroy;
+
+	if (!atomic_inc_not_zero(active_events)) {
+		mutex_lock(&armpmu->reserve_mutex);
+		if (atomic_read(active_events) == 0)
+			err = armpmu_reserve_hardware(armpmu);
+
+		if (!err)
+			atomic_inc(active_events);
+		mutex_unlock(&armpmu->reserve_mutex);
+	}
+
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err;
+}
+
+static void armpmu_enable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+	/* For task-bound events we may be called on other CPUs */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return;
+
+	if (enabled)
+		armpmu->start(armpmu);
+}
+
+static void armpmu_disable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+
+	/* For task-bound events we may be called on other CPUs */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return;
+
+	armpmu->stop(armpmu);
+}
+
+/*
+ * In heterogeneous systems, events are specific to a particular
+ * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
+ * the same microarchitecture.
+ */
+static int armpmu_filter_match(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	unsigned int cpu = smp_processor_id();
+	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+}
+
+static void armpmu_init(struct arm_pmu *armpmu)
+{
+	atomic_set(&armpmu->active_events, 0);
+	mutex_init(&armpmu->reserve_mutex);
+
+	armpmu->pmu = (struct pmu) {
+		.pmu_enable	= armpmu_enable,
+		.pmu_disable	= armpmu_disable,
+		.event_init	= armpmu_event_init,
+		.add		= armpmu_add,
+		.del		= armpmu_del,
+		.start		= armpmu_start,
+		.stop		= armpmu_stop,
+		.read		= armpmu_read,
+		.filter_match	= armpmu_filter_match,
+	};
+}
+
+int armpmu_register(struct arm_pmu *armpmu, int type)
+{
+	armpmu_init(armpmu);
+	pr_info("enabled with %s PMU driver, %d counters available\n",
+			armpmu->name, armpmu->num_events);
+	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
+}
+
+/* Set at runtime when we know what CPU type we are. */
+static struct arm_pmu *__oprofile_cpu_pmu;
+
+/*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+const char *perf_pmu_name(void)
+{
+	if (!__oprofile_cpu_pmu)
+		return NULL;
+
+	return __oprofile_cpu_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+	int max_events = 0;
+
+	if (__oprofile_cpu_pmu != NULL)
+		max_events = __oprofile_cpu_pmu->num_events;
+
+	return max_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+static void cpu_pmu_enable_percpu_irq(void *data)
+{
+	int irq = *(int *)data;
+
+	enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void cpu_pmu_disable_percpu_irq(void *data)
+{
+	int irq = *(int *)data;
+
+	disable_percpu_irq(irq);
+}
+
+static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+{
+	int i, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
+		free_percpu_irq(irq, &hw_events->percpu_pmu);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			int cpu = i;
+
+			if (cpu_pmu->irq_affinity)
+				cpu = cpu_pmu->irq_affinity[i];
+
+			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+				continue;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq >= 0)
+				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+		}
+	}
+}
+
+static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+{
+	int i, err, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+	if (!pmu_device)
+		return -ENODEV;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+	if (irqs < 1) {
+		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+		return 0;
+	}
+
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		err = request_percpu_irq(irq, handler, "arm-pmu",
+					 &hw_events->percpu_pmu);
+		if (err) {
+			pr_err("unable to request IRQ%d for ARM PMU counters\n",
+				irq);
+			return err;
+		}
+		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			int cpu = i;
+
+			err = 0;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq < 0)
+				continue;
+
+			if (cpu_pmu->irq_affinity)
+				cpu = cpu_pmu->irq_affinity[i];
+
+			/*
+			 * If we have a single PMU interrupt that we can't shift,
+			 * assume that we're running on a uniprocessor machine and
+			 * continue. Otherwise, continue without this interrupt.
+			 */
+			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+					irq, cpu);
+				continue;
+			}
+
+			err = request_irq(irq, handler,
+					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+			if (err) {
+				pr_err("unable to request IRQ%d for ARM PMU counters\n",
+					irq);
+				return err;
+			}
+
+			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+			  void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
+
+	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+		return NOTIFY_DONE;
+
+	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+		return NOTIFY_DONE;
+
+	if (pmu->reset)
+		pmu->reset(pmu);
+	else
+		return NOTIFY_DONE;
+
+	return NOTIFY_OK;
+}
+
+static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	int err;
+	int cpu;
+	struct pmu_hw_events __percpu *cpu_hw_events;
+
+	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
+	if (!cpu_hw_events)
+		return -ENOMEM;
+
+	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
+	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
+	if (err)
+		goto out_hw_events;
+
+	for_each_possible_cpu(cpu) {
+		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
+		raw_spin_lock_init(&events->pmu_lock);
+		events->percpu_pmu = cpu_pmu;
+	}
+
+	cpu_pmu->hw_events	= cpu_hw_events;
+	cpu_pmu->request_irq	= cpu_pmu_request_irq;
+	cpu_pmu->free_irq	= cpu_pmu_free_irq;
+
+	/* Ensure the PMU has sane values out of reset. */
+	if (cpu_pmu->reset)
+		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
+			 cpu_pmu, 1);
+
+	/* If no interrupts available, set the corresponding capability flag */
+	if (!platform_get_irq(cpu_pmu->plat_device, 0))
+		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
+	return 0;
+
+out_hw_events:
+	free_percpu(cpu_hw_events);
+	return err;
+}
+
+static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
+{
+	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+	free_percpu(cpu_pmu->hw_events);
+}
+
+/*
+ * CPU PMU identification and probing.
+ */
+static int probe_current_pmu(struct arm_pmu *pmu,
+			     const struct pmu_probe_info *info)
+{
+	int cpu = get_cpu();
+	unsigned int cpuid = read_cpuid_id();
+	int ret = -ENODEV;
+
+	pr_info("probing PMU on CPU %d\n", cpu);
+
+	for (; info->init != NULL; info++) {
+		if ((cpuid & info->mask) != info->cpuid)
+			continue;
+		ret = info->init(pmu);
+		break;
+	}
+
+	put_cpu();
+	return ret;
+}
+
+static int of_pmu_irq_cfg(struct arm_pmu *pmu)
+{
+	int *irqs, i = 0;
+	bool using_spi = false;
+	struct platform_device *pdev = pmu->plat_device;
+
+	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+	if (!irqs)
+		return -ENOMEM;
+
+	do {
+		struct device_node *dn;
+		int cpu, irq;
+
+		/* See if we have an affinity entry */
+		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
+		if (!dn)
+			break;
+
+		/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
+		irq = platform_get_irq(pdev, i);
+		if (irq >= 0) {
+			bool spi = !irq_is_percpu(irq);
+
+			if (i > 0 && spi != using_spi) {
+				pr_err("PPI/SPI IRQ type mismatch for %s!\n",
+					dn->name);
+				kfree(irqs);
+				return -EINVAL;
+			}
+
+			using_spi = spi;
+		}
+
+		/* Now look up the logical CPU number */
+		for_each_possible_cpu(cpu)
+			if (dn == of_cpu_device_node_get(cpu))
+				break;
+
+		if (cpu >= nr_cpu_ids) {
+			pr_warn("Failed to find logical CPU for %s\n",
+				dn->name);
+			of_node_put(dn);
+			cpumask_setall(&pmu->supported_cpus);
+			break;
+		}
+		of_node_put(dn);
+
+		/* For SPIs, we need to track the affinity per IRQ */
+		if (using_spi) {
+			if (i >= pdev->num_resources) {
+				of_node_put(dn);
+				break;
+			}
+
+			irqs[i] = cpu;
+		}
+
+		/* Keep track of the CPUs containing this PMU type */
+		cpumask_set_cpu(cpu, &pmu->supported_cpus);
+		of_node_put(dn);
+		i++;
+	} while (1);
+
+	/* If we didn't manage to parse anything, claim to support all CPUs */
+	if (cpumask_weight(&pmu->supported_cpus) == 0)
+		cpumask_setall(&pmu->supported_cpus);
+
+	/* If we matched up the IRQ affinities, use them to route the SPIs */
+	if (using_spi && i == pdev->num_resources)
+		pmu->irq_affinity = irqs;
+	else
+		kfree(irqs);
+
+	return 0;
+}
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+			 const struct of_device_id *of_table,
+			 const struct pmu_probe_info *probe_table)
+{
+	const struct of_device_id *of_id;
+	const int (*init_fn)(struct arm_pmu *);
+	struct device_node *node = pdev->dev.of_node;
+	struct arm_pmu *pmu;
+	int ret = -ENODEV;
+
+	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+	if (!pmu) {
+		pr_info("failed to allocate PMU device!\n");
+		return -ENOMEM;
+	}
+
+	if (!__oprofile_cpu_pmu)
+		__oprofile_cpu_pmu = pmu;
+
+	pmu->plat_device = pdev;
+
+	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
+		init_fn = of_id->data;
+
+		ret = of_pmu_irq_cfg(pmu);
+		if (!ret)
+			ret = init_fn(pmu);
+	} else {
+		ret = probe_current_pmu(pmu, probe_table);
+		cpumask_setall(&pmu->supported_cpus);
+	}
+
+	if (ret) {
+		pr_info("failed to probe PMU!\n");
+		goto out_free;
+	}
+
+	ret = cpu_pmu_init(pmu);
+	if (ret)
+		goto out_free;
+
+	ret = armpmu_register(pmu, -1);
+	if (ret)
+		goto out_destroy;
+
+	return 0;
+
+out_destroy:
+	cpu_pmu_destroy(pmu);
+out_free:
+	pr_info("failed to register PMU devices!\n");
+	kfree(pmu);
+	return ret;
+}
diff --git a/drivers/phy/phy-tusb1210.c b/drivers/phy/phy-tusb1210.c
index 2535d79..4f6d5e7 100644
--- a/drivers/phy/phy-tusb1210.c
+++ b/drivers/phy/phy-tusb1210.c
@@ -61,32 +61,26 @@
 
 static int tusb1210_probe(struct ulpi *ulpi)
 {
-	struct gpio_desc *gpio;
 	struct tusb1210 *tusb;
 	u8 val, reg;
-	int ret;
 
 	tusb = devm_kzalloc(&ulpi->dev, sizeof(*tusb), GFP_KERNEL);
 	if (!tusb)
 		return -ENOMEM;
 
-	gpio = devm_gpiod_get(&ulpi->dev, "reset");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		gpiod_set_value_cansleep(gpio, 1);
-		tusb->gpio_reset = gpio;
-	}
+	tusb->gpio_reset = devm_gpiod_get_optional(&ulpi->dev, "reset",
+						   GPIOD_OUT_LOW);
+	if (IS_ERR(tusb->gpio_reset))
+		return PTR_ERR(tusb->gpio_reset);
 
-	gpio = devm_gpiod_get(&ulpi->dev, "cs");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		gpiod_set_value_cansleep(gpio, 1);
-		tusb->gpio_cs = gpio;
-	}
+	gpiod_set_value_cansleep(tusb->gpio_reset, 1);
+
+	tusb->gpio_cs = devm_gpiod_get_optional(&ulpi->dev, "cs",
+						GPIOD_OUT_LOW);
+	if (IS_ERR(tusb->gpio_cs))
+		return PTR_ERR(tusb->gpio_cs);
+
+	gpiod_set_value_cansleep(tusb->gpio_cs, 1);
 
 	/*
 	 * VENDOR_SPECIFIC2 register in TUSB1210 can be used for configuring eye
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 100d9ac..84dd2ed 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,6 +82,12 @@
 	  Requires ACPI/FDT device enumeration code to set up a platform
 	  device.
 
+config PINCTRL_DIGICOLOR
+	bool
+	depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
+	select PINMUX
+	select GENERIC_PINCONF
+
 config PINCTRL_LANTIQ
 	bool
 	depends on LANTIQ
@@ -240,6 +246,7 @@
 source "drivers/pinctrl/sh-pfc/Kconfig"
 source "drivers/pinctrl/spear/Kconfig"
 source "drivers/pinctrl/sunxi/Kconfig"
+source "drivers/pinctrl/uniphier/Kconfig"
 source "drivers/pinctrl/vt8500/Kconfig"
 source "drivers/pinctrl/mediatek/Kconfig"
 
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index f4216d9..cad077c 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -2,12 +2,10 @@
 
 subdir-ccflags-$(CONFIG_DEBUG_PINCTRL)	+= -DDEBUG
 
-obj-$(CONFIG_PINCTRL)		+= core.o pinctrl-utils.o
+obj-y				+= core.o pinctrl-utils.o
 obj-$(CONFIG_PINMUX)		+= pinmux.o
 obj-$(CONFIG_PINCONF)		+= pinconf.o
-ifeq ($(CONFIG_OF),y)
-obj-$(CONFIG_PINCTRL)		+= devicetree.o
-endif
+obj-$(CONFIG_OF)		+= devicetree.o
 obj-$(CONFIG_GENERIC_PINCONF)	+= pinconf-generic.o
 obj-$(CONFIG_PINCTRL_ADI2)	+= pinctrl-adi2.o
 obj-$(CONFIG_PINCTRL_AS3722)	+= pinctrl-as3722.o
@@ -15,6 +13,7 @@
 obj-$(CONFIG_PINCTRL_BF60x)	+= pinctrl-adi2-bf60x.o
 obj-$(CONFIG_PINCTRL_AT91)	+= pinctrl-at91.o
 obj-$(CONFIG_PINCTRL_AMD)	+= pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_DIGICOLOR)	+= pinctrl-digicolor.o
 obj-$(CONFIG_PINCTRL_FALCON)	+= pinctrl-falcon.o
 obj-$(CONFIG_PINCTRL_MESON)	+= meson/
 obj-$(CONFIG_PINCTRL_PALMAS)	+= pinctrl-palmas.o
@@ -51,5 +50,6 @@
 obj-$(CONFIG_PINCTRL_SH_PFC)	+= sh-pfc/
 obj-$(CONFIG_PLAT_SPEAR)	+= spear/
 obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
+obj-$(CONFIG_ARCH_UNIPHIER)	+= uniphier/
 obj-$(CONFIG_ARCH_VT8500)	+= vt8500/
 obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 6177315..8efa235 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -586,9 +586,9 @@
 		ret = __bcm2835_gpio_irq_set_type_disabled(pc, gpio, type);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(data->irq, handle_edge_irq);
+		irq_set_handler_locked(data, handle_edge_irq);
 	else
-		__irq_set_handler_locked(data->irq, handle_level_irq);
+		irq_set_handler_locked(data, handle_level_irq);
 
 	spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 
@@ -989,7 +989,6 @@
 		irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip,
 				handle_level_irq);
 		irq_set_chip_data(irq, pc);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 
 	for (i = 0; i < BCM2835_NUM_BANKS; i++) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 8b8f3a0..69723e0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -231,8 +231,7 @@
 
 	pindesc = pin_desc_get(pctldev, number);
 	if (pindesc != NULL) {
-		pr_err("pin %d already registered on %s\n", number,
-		       pctldev->desc->name);
+		dev_err(pctldev->dev, "pin %d already registered\n", number);
 		return -EINVAL;
 	}
 
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 0bbf7d7..fe04e74 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -97,13 +97,7 @@
 
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 {
-	struct pinctrl_dev *pctldev;
-
-	pctldev = get_pinctrl_dev_from_of_node(np);
-	if (!pctldev)
-		return NULL;
-
-	return pctldev;
+	return get_pinctrl_dev_from_of_node(np);
 }
 
 static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 12ef544..debe121 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -87,6 +87,13 @@
 	help
 	  Say Y here to enable the imx6sx pinctrl driver
 
+config PINCTRL_IMX6UL
+	bool "IMX6UL pinctrl driver"
+	depends on SOC_IMX6UL
+	select PINCTRL_IMX
+	help
+	  Say Y here to enable the imx6ul pinctrl driver
+
 config PINCTRL_IMX7D
 	bool "IMX7D pinctrl driver"
 	depends on SOC_IMX7D
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 343cb43..d44c9e2 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -12,6 +12,7 @@
 obj-$(CONFIG_PINCTRL_IMX6Q)	+= pinctrl-imx6dl.o
 obj-$(CONFIG_PINCTRL_IMX6SL)	+= pinctrl-imx6sl.o
 obj-$(CONFIG_PINCTRL_IMX6SX)	+= pinctrl-imx6sx.o
+obj-$(CONFIG_PINCTRL_IMX6UL)	+= pinctrl-imx6ul.o
 obj-$(CONFIG_PINCTRL_IMX7D)	+= pinctrl-imx7d.o
 obj-$(CONFIG_PINCTRL_VF610)	+= pinctrl-vf610.o
 obj-$(CONFIG_PINCTRL_MXS)	+= pinctrl-mxs.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
new file mode 100644
index 0000000..08e7576
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx6ul_pads {
+	MX6UL_PAD_RESERVE0 = 0,
+	MX6UL_PAD_RESERVE1 = 1,
+	MX6UL_PAD_RESERVE2 = 2,
+	MX6UL_PAD_RESERVE3 = 3,
+	MX6UL_PAD_RESERVE4 = 4,
+	MX6UL_PAD_RESERVE5 = 5,
+	MX6UL_PAD_RESERVE6 = 6,
+	MX6UL_PAD_RESERVE7 = 7,
+	MX6UL_PAD_RESERVE8 = 8,
+	MX6UL_PAD_RESERVE9 = 9,
+	MX6UL_PAD_RESERVE10 = 10,
+	MX6UL_PAD_SNVS_TAMPER4 = 11,
+	MX6UL_PAD_RESERVE12 = 12,
+	MX6UL_PAD_RESERVE13 = 13,
+	MX6UL_PAD_RESERVE14 = 14,
+	MX6UL_PAD_RESERVE15 = 15,
+	MX6UL_PAD_RESERVE16 = 16,
+	MX6UL_PAD_JTAG_MOD = 17,
+	MX6UL_PAD_JTAG_TMS = 18,
+	MX6UL_PAD_JTAG_TDO = 19,
+	MX6UL_PAD_JTAG_TDI = 20,
+	MX6UL_PAD_JTAG_TCK = 21,
+	MX6UL_PAD_JTAG_TRST_B = 22,
+	MX6UL_PAD_GPIO1_IO00 = 23,
+	MX6UL_PAD_GPIO1_IO01 = 24,
+	MX6UL_PAD_GPIO1_IO02 = 25,
+	MX6UL_PAD_GPIO1_IO03 = 26,
+	MX6UL_PAD_GPIO1_IO04 = 27,
+	MX6UL_PAD_GPIO1_IO05 = 28,
+	MX6UL_PAD_GPIO1_IO06 = 29,
+	MX6UL_PAD_GPIO1_IO07 = 30,
+	MX6UL_PAD_GPIO1_IO08 = 31,
+	MX6UL_PAD_GPIO1_IO09 = 32,
+	MX6UL_PAD_UART1_TX_DATA = 33,
+	MX6UL_PAD_UART1_RX_DATA = 34,
+	MX6UL_PAD_UART1_CTS_B = 35,
+	MX6UL_PAD_UART1_RTS_B = 36,
+	MX6UL_PAD_UART2_TX_DATA = 37,
+	MX6UL_PAD_UART2_RX_DATA = 38,
+	MX6UL_PAD_UART2_CTS_B = 39,
+	MX6UL_PAD_UART2_RTS_B = 40,
+	MX6UL_PAD_UART3_TX_DATA = 41,
+	MX6UL_PAD_UART3_RX_DATA = 42,
+	MX6UL_PAD_UART3_CTS_B = 43,
+	MX6UL_PAD_UART3_RTS_B = 44,
+	MX6UL_PAD_UART4_TX_DATA = 45,
+	MX6UL_PAD_UART4_RX_DATA = 46,
+	MX6UL_PAD_UART5_TX_DATA = 47,
+	MX6UL_PAD_UART5_RX_DATA = 48,
+	MX6UL_PAD_ENET1_RX_DATA0 = 49,
+	MX6UL_PAD_ENET1_RX_DATA1 = 50,
+	MX6UL_PAD_ENET1_RX_EN = 51,
+	MX6UL_PAD_ENET1_TX_DATA0 = 52,
+	MX6UL_PAD_ENET1_TX_DATA1 = 53,
+	MX6UL_PAD_ENET1_TX_EN = 54,
+	MX6UL_PAD_ENET1_TX_CLK = 55,
+	MX6UL_PAD_ENET1_RX_ER = 56,
+	MX6UL_PAD_ENET2_RX_DATA0 = 57,
+	MX6UL_PAD_ENET2_RX_DATA1 = 58,
+	MX6UL_PAD_ENET2_RX_EN = 59,
+	MX6UL_PAD_ENET2_TX_DATA0 = 60,
+	MX6UL_PAD_ENET2_TX_DATA1 = 61,
+	MX6UL_PAD_ENET2_TX_EN = 62,
+	MX6UL_PAD_ENET2_TX_CLK = 63,
+	MX6UL_PAD_ENET2_RX_ER = 64,
+	MX6UL_PAD_LCD_CLK = 65,
+	MX6UL_PAD_LCD_ENABLE = 66,
+	MX6UL_PAD_LCD_HSYNC = 67,
+	MX6UL_PAD_LCD_VSYNC = 68,
+	MX6UL_PAD_LCD_RESET = 69,
+	MX6UL_PAD_LCD_DATA00 = 70,
+	MX6UL_PAD_LCD_DATA01 = 71,
+	MX6UL_PAD_LCD_DATA02 = 72,
+	MX6UL_PAD_LCD_DATA03 = 73,
+	MX6UL_PAD_LCD_DATA04 = 74,
+	MX6UL_PAD_LCD_DATA05 = 75,
+	MX6UL_PAD_LCD_DATA06 = 76,
+	MX6UL_PAD_LCD_DATA07 = 77,
+	MX6UL_PAD_LCD_DATA08 = 78,
+	MX6UL_PAD_LCD_DATA09 = 79,
+	MX6UL_PAD_LCD_DATA10 = 80,
+	MX6UL_PAD_LCD_DATA11 = 81,
+	MX6UL_PAD_LCD_DATA12 = 82,
+	MX6UL_PAD_LCD_DATA13 = 83,
+	MX6UL_PAD_LCD_DATA14 = 84,
+	MX6UL_PAD_LCD_DATA15 = 85,
+	MX6UL_PAD_LCD_DATA16 = 86,
+	MX6UL_PAD_LCD_DATA17 = 87,
+	MX6UL_PAD_LCD_DATA18 = 88,
+	MX6UL_PAD_LCD_DATA19 = 89,
+	MX6UL_PAD_LCD_DATA20 = 90,
+	MX6UL_PAD_LCD_DATA21 = 91,
+	MX6UL_PAD_LCD_DATA22 = 92,
+	MX6UL_PAD_LCD_DATA23 = 93,
+	MX6UL_PAD_NAND_RE_B = 94,
+	MX6UL_PAD_NAND_WE_B = 95,
+	MX6UL_PAD_NAND_DATA00 = 96,
+	MX6UL_PAD_NAND_DATA01 = 97,
+	MX6UL_PAD_NAND_DATA02 = 98,
+	MX6UL_PAD_NAND_DATA03 = 99,
+	MX6UL_PAD_NAND_DATA04 = 100,
+	MX6UL_PAD_NAND_DATA05 = 101,
+	MX6UL_PAD_NAND_DATA06 = 102,
+	MX6UL_PAD_NAND_DATA07 = 103,
+	MX6UL_PAD_NAND_ALE = 104,
+	MX6UL_PAD_NAND_WP_B = 105,
+	MX6UL_PAD_NAND_READY_B = 106,
+	MX6UL_PAD_NAND_CE0_B = 107,
+	MX6UL_PAD_NAND_CE1_B = 108,
+	MX6UL_PAD_NAND_CLE = 109,
+	MX6UL_PAD_NAND_DQS = 110,
+	MX6UL_PAD_SD1_CMD = 111,
+	MX6UL_PAD_SD1_CLK = 112,
+	MX6UL_PAD_SD1_DATA0 = 113,
+	MX6UL_PAD_SD1_DATA1 = 114,
+	MX6UL_PAD_SD1_DATA2 = 115,
+	MX6UL_PAD_SD1_DATA3 = 116,
+	MX6UL_PAD_CSI_MCLK = 117,
+	MX6UL_PAD_CSI_PIXCLK = 118,
+	MX6UL_PAD_CSI_VSYNC = 119,
+	MX6UL_PAD_CSI_HSYNC = 120,
+	MX6UL_PAD_CSI_DATA00 = 121,
+	MX6UL_PAD_CSI_DATA01 = 122,
+	MX6UL_PAD_CSI_DATA02 = 123,
+	MX6UL_PAD_CSI_DATA03 = 124,
+	MX6UL_PAD_CSI_DATA04 = 125,
+	MX6UL_PAD_CSI_DATA05 = 126,
+	MX6UL_PAD_CSI_DATA06 = 127,
+	MX6UL_PAD_CSI_DATA07 = 128,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx6ul_pinctrl_pads[] = {
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE2),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE3),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE4),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE5),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE6),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE7),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE8),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE9),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE10),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SNVS_TAMPER4),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE12),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE13),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE14),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE15),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE16),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_MOD),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TMS),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDO),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDI),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TCK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TRST_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO08),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO09),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART4_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART4_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART5_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART5_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_ER),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_ER),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_ENABLE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_HSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_VSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_RESET),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA08),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA09),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA10),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA11),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA12),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA13),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA14),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA15),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA16),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA17),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA18),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA19),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA20),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA21),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA22),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA23),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_RE_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WE_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_ALE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WP_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_READY_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE0_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE1_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CLE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DQS),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CMD),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA2),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA3),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_MCLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_PIXCLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_VSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_HSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA07),
+};
+
+static struct imx_pinctrl_soc_info imx6ul_pinctrl_info = {
+	.pins = imx6ul_pinctrl_pads,
+	.npins = ARRAY_SIZE(imx6ul_pinctrl_pads),
+};
+
+static struct of_device_id imx6ul_pinctrl_of_match[] = {
+	{ .compatible = "fsl,imx6ul-iomuxc", },
+	{ /* sentinel */ }
+};
+
+static int imx6ul_pinctrl_probe(struct platform_device *pdev)
+{
+	return imx_pinctrl_probe(pdev, &imx6ul_pinctrl_info);
+}
+
+static struct platform_driver imx6ul_pinctrl_driver = {
+	.driver = {
+		.name = "imx6ul-pinctrl",
+		.of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
+	},
+	.probe = imx6ul_pinctrl_probe,
+	.remove = imx_pinctrl_remove,
+};
+
+static int __init imx6ul_pinctrl_init(void)
+{
+	return platform_driver_register(&imx6ul_pinctrl_driver);
+}
+arch_initcall(imx6ul_pinctrl_init);
+
+static void __exit imx6ul_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx6ul_pinctrl_driver);
+}
+module_exit(imx6ul_pinctrl_exit);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>");
+MODULE_DESCRIPTION("Freescale imx6ul pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 2062c22..dac4865 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -12,11 +12,6 @@
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  */
 
 #include <linux/kernel.h>
@@ -146,7 +141,7 @@
 struct byt_gpio {
 	struct gpio_chip		chip;
 	struct platform_device		*pdev;
-	spinlock_t			lock;
+	raw_spinlock_t			lock;
 	void __iomem			*reg_base;
 	struct pinctrl_gpio_range	*range;
 	struct byt_gpio_pin_context	*saved_context;
@@ -174,11 +169,11 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
 	writel(value, reg);
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
@@ -201,6 +196,9 @@
 	struct byt_gpio *vg = to_byt_gpio(chip);
 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
 	u32 value, gpio_mux;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	/*
 	 * In most cases, func pin mux 000 means GPIO function.
@@ -214,18 +212,16 @@
 	value = readl(reg) & BYT_PIN_MUX;
 	gpio_mux = byt_get_gpio_mux(vg, offset);
 	if (WARN_ON(gpio_mux != value)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&vg->lock, flags);
 		value = readl(reg) & ~BYT_PIN_MUX;
 		value |= gpio_mux;
 		writel(value, reg);
-		spin_unlock_irqrestore(&vg->lock, flags);
 
 		dev_warn(&vg->pdev->dev,
 			 "pin %u forcibly re-configured as GPIO\n", offset);
 	}
 
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
 	pm_runtime_get(&vg->pdev->dev);
 
 	return 0;
@@ -250,7 +246,7 @@
 	if (offset >= vg->chip.ngpio)
 		return -EINVAL;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 
 	WARN(value & BYT_DIRECT_IRQ_EN,
@@ -265,11 +261,11 @@
 	writel(value, reg);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -277,7 +273,15 @@
 static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
-	return readl(reg) & BYT_LEVEL;
+	struct byt_gpio *vg = to_byt_gpio(chip);
+	unsigned long flags;
+	u32 val;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+	val = readl(reg);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+	return val & BYT_LEVEL;
 }
 
 static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -287,7 +291,7 @@
 	unsigned long flags;
 	u32 old_val;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	old_val = readl(reg);
 
@@ -296,7 +300,7 @@
 	else
 		writel(old_val & ~BYT_LEVEL, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -306,13 +310,13 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	value = readl(reg) | BYT_DIR_MASK;
 	value &= ~BYT_INPUT_EN;		/* active low */
 	writel(value, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -326,7 +330,7 @@
 	unsigned long flags;
 	u32 reg_val;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	/*
 	 * Before making any direction modifications, do a check if gpio
@@ -345,7 +349,7 @@
 	else
 		writel(reg_val & ~BYT_LEVEL, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -354,18 +358,19 @@
 {
 	struct byt_gpio *vg = to_byt_gpio(chip);
 	int i;
-	unsigned long flags;
 	u32 conf0, val, offs;
 
-	spin_lock_irqsave(&vg->lock, flags);
-
 	for (i = 0; i < vg->chip.ngpio; i++) {
 		const char *pull_str = NULL;
 		const char *pull = NULL;
+		unsigned long flags;
 		const char *label;
 		offs = vg->range->pins[i] * 16;
+
+		raw_spin_lock_irqsave(&vg->lock, flags);
 		conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
 		val = readl(vg->reg_base + offs + BYT_VAL_REG);
+		raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 		label = gpiochip_is_requested(chip, i);
 		if (!label)
@@ -418,7 +423,6 @@
 
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -450,8 +454,10 @@
 	unsigned offset = irqd_to_hwirq(d);
 	void __iomem *reg;
 
+	raw_spin_lock(&vg->lock);
 	reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
 	writel(BIT(offset % 32), reg);
+	raw_spin_unlock(&vg->lock);
 }
 
 static void byt_irq_unmask(struct irq_data *d)
@@ -463,9 +469,9 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
-
 	reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 
 	switch (irqd_get_trigger_type(d)) {
@@ -486,7 +492,7 @@
 
 	writel(value, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_irq_mask(struct irq_data *d)
@@ -578,7 +584,7 @@
 	if (IS_ERR(vg->reg_base))
 		return PTR_ERR(vg->reg_base);
 
-	spin_lock_init(&vg->lock);
+	raw_spin_lock_init(&vg->lock);
 
 	gc = &vg->chip;
 	gc->label = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3f737da..2d5d3dd 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -174,7 +174,7 @@
 	struct pinctrl_dev *pctldev;
 	struct gpio_chip chip;
 	void __iomem *regs;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	unsigned intr_lines[16];
 	const struct chv_community *community;
 	u32 saved_intmask;
@@ -720,13 +720,13 @@
 	u32 ctrl0, ctrl1;
 	bool locked;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
 	ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
 	locked = chv_pad_locked(pctrl, offset);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
 		seq_puts(s, "GPIO ");
@@ -789,14 +789,14 @@
 
 	grp = &pctrl->community->groups[group];
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	/* Check first that the pad is not locked */
 	for (i = 0; i < grp->npins; i++) {
 		if (chv_pad_locked(pctrl, grp->pins[i])) {
 			dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
 				 grp->pins[i]);
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EBUSY;
 		}
 	}
@@ -839,7 +839,7 @@
 			pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
 	}
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -853,13 +853,13 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	if (chv_pad_locked(pctrl, offset)) {
 		value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
 		if (!(value & CHV_PADCTRL0_GPIOEN)) {
 			/* Locked so cannot enable */
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EBUSY;
 		}
 	} else {
@@ -899,7 +899,7 @@
 		chv_writel(value, reg);
 	}
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -913,13 +913,13 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
 	value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
 	chv_writel(value, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -931,7 +931,7 @@
 	unsigned long flags;
 	u32 ctrl0;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
 	if (input)
@@ -940,7 +940,7 @@
 		ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
 	chv_writel(ctrl0, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -965,10 +965,10 @@
 	u16 arg = 0;
 	u32 term;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
 
@@ -1042,7 +1042,7 @@
 	unsigned long flags;
 	u32 ctrl0, pull;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(reg);
 
 	switch (param) {
@@ -1065,7 +1065,7 @@
 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
 			break;
 		default:
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EINVAL;
 		}
 
@@ -1083,7 +1083,7 @@
 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
 			break;
 		default:
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EINVAL;
 		}
 
@@ -1091,12 +1091,12 @@
 		break;
 
 	default:
-		spin_unlock_irqrestore(&pctrl->lock, flags);
+		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 		return -EINVAL;
 	}
 
 	chv_writel(ctrl0, reg);
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -1169,9 +1169,12 @@
 {
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
 	int pin = chv_gpio_offset_to_pin(pctrl, offset);
+	unsigned long flags;
 	u32 ctrl0, cfg;
 
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
 	cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1189,7 +1192,7 @@
 	void __iomem *reg;
 	u32 ctrl0;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
 	ctrl0 = readl(reg);
@@ -1201,7 +1204,7 @@
 
 	chv_writel(ctrl0, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -1209,8 +1212,11 @@
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
 	unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
 	u32 ctrl0, direction;
+	unsigned long flags;
 
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
 	direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1248,14 +1254,14 @@
 	int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
 	u32 intr_line;
 
-	spin_lock(&pctrl->lock);
+	raw_spin_lock(&pctrl->lock);
 
 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
 	intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
 	chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
 
-	spin_unlock(&pctrl->lock);
+	raw_spin_unlock(&pctrl->lock);
 }
 
 static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -1266,7 +1272,7 @@
 	u32 value, intr_line;
 	unsigned long flags;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
@@ -1279,7 +1285,7 @@
 		value |= BIT(intr_line);
 	chv_writel(value, pctrl->regs + CHV_INTMASK);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static void chv_gpio_irq_mask(struct irq_data *d)
@@ -1313,6 +1319,7 @@
 		unsigned long flags;
 		u32 intsel, value;
 
+		raw_spin_lock_irqsave(&pctrl->lock, flags);
 		intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 		intsel &= CHV_PADCTRL0_INTSEL_MASK;
 		intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1323,12 +1330,11 @@
 		else
 			handler = handle_edge_irq;
 
-		spin_lock_irqsave(&pctrl->lock, flags);
 		if (!pctrl->intr_lines[intsel]) {
-			__irq_set_handler_locked(d->irq, handler);
+			irq_set_handler_locked(d, handler);
 			pctrl->intr_lines[intsel] = offset;
 		}
-		spin_unlock_irqrestore(&pctrl->lock, flags);
+		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 	}
 
 	chv_gpio_irq_unmask(d);
@@ -1344,7 +1350,7 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	/*
 	 * Pins which can be used as shared interrupt are configured in
@@ -1389,11 +1395,11 @@
 	pctrl->intr_lines[value] = offset;
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -1412,7 +1418,7 @@
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned long pending;
 	u32 intr_line;
 
@@ -1505,7 +1511,7 @@
 	if (i == ARRAY_SIZE(chv_communities))
 		return -ENODEV;
 
-	spin_lock_init(&pctrl->lock);
+	raw_spin_lock_init(&pctrl->lock);
 	pctrl->dev = &pdev->dev;
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index f9ee0d6..bb377c1 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -758,9 +758,9 @@
 	writel(value, reg);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -840,7 +840,7 @@
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int i;
 
 	chained_irq_enter(chip, desc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index d0c811d..ad27184 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -385,6 +385,7 @@
 	.driver = {
 		.name = "mediatek-mt8173-pinctrl",
 		.of_match_table = mt8173_pctrl_match,
+		.pm = &mtk_eint_pm_ops,
 	},
 };
 
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index ad1ea16..7726c6c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -33,6 +33,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <dt-bindings/pinctrl/mt65xx.h>
 
 #include "../core.h"
@@ -702,7 +703,7 @@
 
 	ret = mtk_pctrl_is_function_valid(pctl, g->pin, function);
 	if (!ret) {
-		dev_err(pctl->dev, "invaild function %d on group %d .\n",
+		dev_err(pctl->dev, "invalid function %d on group %d .\n",
 				function, group);
 		return -EINVAL;
 	}
@@ -1062,6 +1063,77 @@
 	return 0;
 }
 
+static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+	int shift = d->hwirq & 0x1f;
+	int reg = d->hwirq >> 5;
+
+	if (on)
+		pctl->wake_mask[reg] |= BIT(shift);
+	else
+		pctl->wake_mask[reg] &= ~BIT(shift);
+
+	return 0;
+}
+
+static void mtk_eint_chip_write_mask(const struct mtk_eint_offsets *chip,
+		void __iomem *eint_reg_base, u32 *buf)
+{
+	int port;
+	void __iomem *reg;
+
+	for (port = 0; port < chip->ports; port++) {
+		reg = eint_reg_base + (port << 2);
+		writel_relaxed(~buf[port], reg + chip->mask_set);
+		writel_relaxed(buf[port], reg + chip->mask_clr);
+	}
+}
+
+static void mtk_eint_chip_read_mask(const struct mtk_eint_offsets *chip,
+		void __iomem *eint_reg_base, u32 *buf)
+{
+	int port;
+	void __iomem *reg;
+
+	for (port = 0; port < chip->ports; port++) {
+		reg = eint_reg_base + chip->mask + (port << 2);
+		buf[port] = ~readl_relaxed(reg);
+		/* Mask is 0 when irq is enabled, and 1 when disabled. */
+	}
+}
+
+static int mtk_eint_suspend(struct device *device)
+{
+	void __iomem *reg;
+	struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+	const struct mtk_eint_offsets *eint_offsets =
+			&pctl->devdata->eint_offsets;
+
+	reg = pctl->eint_reg_base;
+	mtk_eint_chip_read_mask(eint_offsets, reg, pctl->cur_mask);
+	mtk_eint_chip_write_mask(eint_offsets, reg, pctl->wake_mask);
+
+	return 0;
+}
+
+static int mtk_eint_resume(struct device *device)
+{
+	struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+	const struct mtk_eint_offsets *eint_offsets =
+			&pctl->devdata->eint_offsets;
+
+	mtk_eint_chip_write_mask(eint_offsets,
+			pctl->eint_reg_base, pctl->cur_mask);
+
+	return 0;
+}
+
+const struct dev_pm_ops mtk_eint_pm_ops = {
+	.suspend = mtk_eint_suspend,
+	.resume = mtk_eint_resume,
+};
+
 static void mtk_eint_ack(struct irq_data *d)
 {
 	struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
@@ -1076,10 +1148,12 @@
 
 static struct irq_chip mtk_pinctrl_irq_chip = {
 	.name = "mt-eint",
+	.irq_disable = mtk_eint_mask,
 	.irq_mask = mtk_eint_mask,
 	.irq_unmask = mtk_eint_unmask,
 	.irq_ack = mtk_eint_ack,
 	.irq_set_type = mtk_eint_set_type,
+	.irq_set_wake = mtk_eint_irq_set_wake,
 	.irq_request_resources = mtk_pinctrl_irq_request_resources,
 	.irq_release_resources = mtk_pinctrl_irq_release_resources,
 };
@@ -1118,8 +1192,8 @@
 
 static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct mtk_pinctrl *pctl = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
 	unsigned int status, eint_num;
 	int offset, index, virq;
 	const struct mtk_eint_offsets *eint_offsets =
@@ -1202,12 +1276,6 @@
 	return 0;
 }
 
-static struct pinctrl_desc mtk_pctrl_desc = {
-	.confops	= &mtk_pconf_ops,
-	.pctlops	= &mtk_pctrl_ops,
-	.pmxops		= &mtk_pmx_ops,
-};
-
 int mtk_pctrl_init(struct platform_device *pdev,
 		const struct mtk_pinctrl_devdata *data,
 		struct regmap *regmap)
@@ -1217,7 +1285,7 @@
 	struct device_node *np = pdev->dev.of_node, *node;
 	struct property *prop;
 	struct resource *res;
-	int i, ret, irq;
+	int i, ret, irq, ports_buf;
 
 	pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
 	if (!pctl)
@@ -1265,12 +1333,17 @@
 
 	for (i = 0; i < pctl->devdata->npins; i++)
 		pins[i] = pctl->devdata->pins[i].pin;
-	mtk_pctrl_desc.name = dev_name(&pdev->dev);
-	mtk_pctrl_desc.owner = THIS_MODULE;
-	mtk_pctrl_desc.pins = pins;
-	mtk_pctrl_desc.npins = pctl->devdata->npins;
+
+	pctl->pctl_desc.name = dev_name(&pdev->dev);
+	pctl->pctl_desc.owner = THIS_MODULE;
+	pctl->pctl_desc.pins = pins;
+	pctl->pctl_desc.npins = pctl->devdata->npins;
+	pctl->pctl_desc.confops = &mtk_pconf_ops;
+	pctl->pctl_desc.pctlops = &mtk_pctrl_ops;
+	pctl->pctl_desc.pmxops = &mtk_pmx_ops;
 	pctl->dev = &pdev->dev;
-	pctl->pctl_dev = pinctrl_register(&mtk_pctrl_desc, &pdev->dev, pctl);
+
+	pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
 	if (IS_ERR(pctl->pctl_dev)) {
 		dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
 		return PTR_ERR(pctl->pctl_dev);
@@ -1319,6 +1392,21 @@
 		goto chip_error;
 	}
 
+	ports_buf = pctl->devdata->eint_offsets.ports;
+	pctl->wake_mask = devm_kcalloc(&pdev->dev, ports_buf,
+					sizeof(*pctl->wake_mask), GFP_KERNEL);
+	if (!pctl->wake_mask) {
+		ret = -ENOMEM;
+		goto chip_error;
+	}
+
+	pctl->cur_mask = devm_kcalloc(&pdev->dev, ports_buf,
+					sizeof(*pctl->cur_mask), GFP_KERNEL);
+	if (!pctl->cur_mask) {
+		ret = -ENOMEM;
+		goto chip_error;
+	}
+
 	pctl->eint_dual_edges = devm_kcalloc(&pdev->dev, pctl->devdata->ap_num,
 					     sizeof(int), GFP_KERNEL);
 	if (!pctl->eint_dual_edges) {
@@ -1348,11 +1436,9 @@
 		irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
 			handle_level_irq);
 		irq_set_chip_data(virq, pctl);
-		set_irq_flags(virq, IRQF_VALID);
 	};
 
 	irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
-	set_irq_flags(irq, IRQF_VALID);
 	return 0;
 
 chip_error:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 30213e5..55a5343 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -256,6 +256,7 @@
 struct mtk_pinctrl {
 	struct regmap	*regmap1;
 	struct regmap	*regmap2;
+	struct pinctrl_desc pctl_desc;
 	struct device           *dev;
 	struct gpio_chip	*chip;
 	struct mtk_pinctrl_group	*groups;
@@ -266,6 +267,8 @@
 	void __iomem		*eint_reg_base;
 	struct irq_domain	*domain;
 	int			*eint_dual_edges;
+	u32 *wake_mask;
+	u32 *cur_mask;
 };
 
 int mtk_pctrl_init(struct platform_device *pdev,
@@ -281,4 +284,6 @@
 		const struct mtk_pin_ies_smt_set *ies_smt_infos, unsigned int info_num,
 		unsigned int pin, unsigned char align, int value);
 
+extern const struct dev_pm_ops mtk_eint_pm_ops;
+
 #endif /* __PINCTRL_MTK_COMMON_H */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index c748407..8392083 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -355,25 +355,6 @@
 	PINCTRL_PIN(DB8500_PIN_AC27, "GPIO267_AC27"),
 };
 
-#define DB8500_GPIO_RANGE(a, b, c) { .name = "DB8500", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8500_ranges[] = {
-	DB8500_GPIO_RANGE(0, 0, 32),
-	DB8500_GPIO_RANGE(1, 32, 5),
-	DB8500_GPIO_RANGE(2, 64, 32),
-	DB8500_GPIO_RANGE(3, 96, 2),
-	DB8500_GPIO_RANGE(4, 128, 32),
-	DB8500_GPIO_RANGE(5, 160, 12),
-	DB8500_GPIO_RANGE(6, 192, 32),
-	DB8500_GPIO_RANGE(7, 224, 7),
-	DB8500_GPIO_RANGE(8, 256, 12),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -1238,8 +1219,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_db8500_soc = {
-	.gpio_ranges = nmk_db8500_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_db8500_ranges),
 	.pins = nmk_db8500_pins,
 	.npins = ARRAY_SIZE(nmk_db8500_pins),
 	.functions = nmk_db8500_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
index d7ba544..2860eaf 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
@@ -341,28 +341,6 @@
 	PINCTRL_PIN(DB8540_PIN_D17, "GPIO267_D17"),
 };
 
-#define DB8540_GPIO_RANGE(a, b, c) { .name = "db8540", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8540_ranges[] = {
-	DB8540_GPIO_RANGE(0, 0, 18),
-	DB8540_GPIO_RANGE(0, 22, 7),
-	DB8540_GPIO_RANGE(1, 33, 6),
-	DB8540_GPIO_RANGE(2, 64, 4),
-	DB8540_GPIO_RANGE(2, 70, 18),
-	DB8540_GPIO_RANGE(3, 116, 12),
-	DB8540_GPIO_RANGE(4, 128, 32),
-	DB8540_GPIO_RANGE(5, 160, 9),
-	DB8540_GPIO_RANGE(6, 192, 23),
-	DB8540_GPIO_RANGE(6, 219, 5),
-	DB8540_GPIO_RANGE(7, 224, 9),
-	DB8540_GPIO_RANGE(8, 256, 12),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -1247,8 +1225,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
-	.gpio_ranges = nmk_db8540_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_db8540_ranges),
 	.pins = nmk_db8540_pins,
 	.npins = ARRAY_SIZE(nmk_db8540_pins),
 	.functions = nmk_db8540_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 2cd7147..587b222 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -264,20 +264,6 @@
 	PINCTRL_PIN(STN8815_PIN_J22, "GPIO123_J22"),
 };
 
-#define STN8815_GPIO_RANGE(a, b, c) { .name = "STN8815", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_stn8815_ranges[] = {
-	STN8815_GPIO_RANGE(0, 0, 32),
-	STN8815_GPIO_RANGE(1, 32, 32),
-	STN8815_GPIO_RANGE(2, 64, 32),
-	STN8815_GPIO_RANGE(3, 96, 28),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -285,9 +271,11 @@
  */
 
 /* Altfunction A */
-static const unsigned u0_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5,
-	STN8815_PIN_C5, STN8815_PIN_A4, STN8815_PIN_B5, STN8815_PIN_D6,
-	STN8815_PIN_C6, STN8815_PIN_B6 };
+static const unsigned u0txrx_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5 };
+static const unsigned u0ctsrts_a_1_pins[] = { STN8815_PIN_C5, STN8815_PIN_B6 };
+/* Modem pins: DCD, DSR, RI, DTR */
+static const unsigned u0modem_a_1_pins[] = { STN8815_PIN_A4, STN8815_PIN_B5,
+	STN8815_PIN_D6, STN8815_PIN_C6 };
 static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10,
 	STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12,
 	STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 };
@@ -304,7 +292,9 @@
 			.npins = ARRAY_SIZE(a##_pins), .altsetting = b }
 
 static const struct nmk_pingroup nmk_stn8815_groups[] = {
-	STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
 	STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
 	STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
 	STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
@@ -318,7 +308,7 @@
 #define STN8815_FUNC_GROUPS(a, b...)	   \
 static const char * const a##_groups[] = { b };
 
-STN8815_FUNC_GROUPS(u0, "u0_a_1");
+STN8815_FUNC_GROUPS(u0, "u0txrx_a_1", "u0ctsrts_a_1", "u0modem_a_1");
 STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1", "mmcsd_b_1");
 STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
 STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
@@ -342,8 +332,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
-	.gpio_ranges = nmk_stn8815_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_stn8815_ranges),
 	.pins = nmk_stn8815_pins,
 	.npins = ARRAY_SIZE(nmk_stn8815_pins),
 	.functions = nmk_stn8815_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 809d884..352ede1 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -203,6 +203,7 @@
 
 #define GPIO_BLOCK_SHIFT 5
 #define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
+#define NMK_MAX_BANKS DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)
 
 /* Register in the logic block */
 #define NMK_GPIO_DAT	0x00
@@ -282,8 +283,7 @@
 	void __iomem *prcm_base;
 };
 
-static struct nmk_gpio_chip *
-nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
+static struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
 
 static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
 
@@ -843,10 +843,9 @@
 	clk_disable(nmk_chip->clk);
 }
 
-static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
-				   u32 status)
+static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
 {
-	struct irq_chip *host_chip = irq_get_chip(irq);
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 
 	chained_irq_enter(host_chip, desc);
@@ -871,17 +870,16 @@
 	status = readl(nmk_chip->addr + NMK_GPIO_IS);
 	clk_disable(nmk_chip->clk);
 
-	__nmk_gpio_irq_handler(irq, desc, status);
+	__nmk_gpio_irq_handler(desc, status);
 }
 
-static void nmk_gpio_latent_irq_handler(unsigned int irq,
-					   struct irq_desc *desc)
+static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 	u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
 
-	__nmk_gpio_irq_handler(irq, desc, status);
+	__nmk_gpio_irq_handler(desc, status);
 }
 
 /* I/O Functions */
@@ -1012,6 +1010,7 @@
 		int irq = gpio_to_irq(gpio);
 		struct irq_desc	*desc = irq_to_desc(irq);
 		int pullidx = 0;
+		int val;
 
 		if (pull)
 			pullidx = data_out ? 1 : 2;
@@ -1021,6 +1020,10 @@
 			   label ?: "(none)",
 			   pulls[pullidx],
 			   (mode < 0) ? "unknown" : modes[mode]);
+
+		val = nmk_gpio_get_input(chip, offset);
+		seq_printf(s, " VAL %d", val);
+
 		/*
 		 * This races with request_irq(), set_irq_type(),
 		 * and set_irq_wake() ... but those are "rare".
@@ -1162,29 +1165,90 @@
 	}
 }
 
+/*
+ * We will allocate memory for the state container using devm* allocators
+ * binding to the first device reaching this point, it doesn't matter if
+ * it is the pin controller or GPIO driver. However we need to use the right
+ * platform device when looking up resources so pay attention to pdev.
+ */
+static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
+						struct platform_device *pdev)
+{
+	struct nmk_gpio_chip *nmk_chip;
+	struct platform_device *gpio_pdev;
+	struct gpio_chip *chip;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *base;
+	u32 id;
+
+	gpio_pdev = of_find_device_by_node(np);
+	if (!gpio_pdev) {
+		pr_err("populate \"%s\": device not found\n", np->name);
+		return ERR_PTR(-ENODEV);
+	}
+	if (of_property_read_u32(np, "gpio-bank", &id)) {
+		dev_err(&pdev->dev, "populate: gpio-bank property not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Already populated? */
+	nmk_chip = nmk_gpio_chips[id];
+	if (nmk_chip)
+		return nmk_chip;
+
+	nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL);
+	if (!nmk_chip)
+		return ERR_PTR(-ENOMEM);
+
+	nmk_chip->bank = id;
+	chip = &nmk_chip->chip;
+	chip->base = id * NMK_GPIO_PER_CHIP;
+	chip->ngpio = NMK_GPIO_PER_CHIP;
+	chip->label = dev_name(&gpio_pdev->dev);
+	chip->dev = &gpio_pdev->dev;
+
+	res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return base;
+	nmk_chip->addr = base;
+
+	clk = clk_get(&gpio_pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return (void *) clk;
+	clk_prepare(clk);
+	nmk_chip->clk = clk;
+
+	BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
+	nmk_gpio_chips[id] = nmk_chip;
+	return nmk_chip;
+}
+
 static int nmk_gpio_probe(struct platform_device *dev)
 {
 	struct device_node *np = dev->dev.of_node;
 	struct nmk_gpio_chip *nmk_chip;
 	struct gpio_chip *chip;
 	struct irq_chip *irqchip;
-	struct resource *res;
-	struct clk *clk;
 	int latent_irq;
 	bool supports_sleepmode;
-	void __iomem *base;
 	int irq;
 	int ret;
 
+	nmk_chip = nmk_gpio_populate_chip(np, dev);
+	if (IS_ERR(nmk_chip)) {
+		dev_err(&dev->dev, "could not populate nmk chip struct\n");
+		return PTR_ERR(nmk_chip);
+	}
+
 	if (of_get_property(np, "st,supports-sleepmode", NULL))
 		supports_sleepmode = true;
 	else
 		supports_sleepmode = false;
 
-	if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
-		dev_err(&dev->dev, "gpio-bank property not found\n");
-		return -EINVAL;
-	}
+	/* Correct platform device ID */
+	dev->id = nmk_chip->bank;
 
 	irq = platform_get_irq(dev, 0);
 	if (irq < 0)
@@ -1193,27 +1257,10 @@
 	/* It's OK for this IRQ not to be present */
 	latent_irq = platform_get_irq(dev, 1);
 
-	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(&dev->dev, res);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
-
-	clk = devm_clk_get(&dev->dev, NULL);
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
-	clk_prepare(clk);
-
-	nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
-	if (!nmk_chip)
-		return -ENOMEM;
-
 	/*
 	 * The virt address in nmk_chip->addr is in the nomadik register space,
 	 * so we can simply convert the resource address, without remapping
 	 */
-	nmk_chip->bank = dev->id;
-	nmk_chip->clk = clk;
-	nmk_chip->addr = base;
 	nmk_chip->parent_irq = irq;
 	nmk_chip->latent_parent_irq = latent_irq;
 	nmk_chip->sleepmode = supports_sleepmode;
@@ -1228,10 +1275,6 @@
 	chip->set = nmk_gpio_set_output;
 	chip->dbg_show = nmk_gpio_dbg_show;
 	chip->can_sleep = false;
-	chip->base = dev->id * NMK_GPIO_PER_CHIP;
-	chip->ngpio = NMK_GPIO_PER_CHIP;
-	chip->label = dev_name(&dev->dev);
-	chip->dev = &dev->dev;
 	chip->owner = THIS_MODULE;
 
 	irqchip = &nmk_chip->irqchip;
@@ -1253,14 +1296,10 @@
 	clk_disable(nmk_chip->clk);
 	chip->of_node = np;
 
-	ret = gpiochip_add(&nmk_chip->chip);
+	ret = gpiochip_add(chip);
 	if (ret)
 		return ret;
 
-	BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
-
-	nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
-
 	platform_set_drvdata(dev, nmk_chip);
 
 	/*
@@ -1320,35 +1359,40 @@
 	return 0;
 }
 
-static struct pinctrl_gpio_range *
-nmk_match_gpio_range(struct pinctrl_dev *pctldev, unsigned offset)
+static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned pin)
 {
-	struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
 	int i;
+	struct nmk_gpio_chip *nmk_gpio;
 
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
-		struct pinctrl_gpio_range *range;
-
-		range = &npct->soc->gpio_ranges[i];
-		if (offset >= range->pin_base &&
-		    offset <= (range->pin_base + range->npins - 1))
-			return range;
+	for(i = 0; i < NMK_MAX_BANKS; i++) {
+		nmk_gpio = nmk_gpio_chips[i];
+		if (!nmk_gpio)
+			continue;
+		if (pin >= nmk_gpio->chip.base &&
+			pin < nmk_gpio->chip.base + nmk_gpio->chip.ngpio)
+			return nmk_gpio;
 	}
 	return NULL;
 }
 
+static struct gpio_chip *find_gc_from_pin(unsigned pin)
+{
+	struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin);
+
+	if (nmk_gpio)
+		return &nmk_gpio->chip;
+	return NULL;
+}
+
 static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
 		   unsigned offset)
 {
-	struct pinctrl_gpio_range *range;
-	struct gpio_chip *chip;
+	struct gpio_chip *chip = find_gc_from_pin(offset);
 
-	range = nmk_match_gpio_range(pctldev, offset);
-	if (!range || !range->gc) {
+	if (!chip) {
 		seq_printf(s, "invalid pin offset");
 		return;
 	}
-	chip = range->gc;
 	nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
 }
 
@@ -1693,25 +1737,16 @@
 	}
 
 	for (i = 0; i < g->npins; i++) {
-		struct pinctrl_gpio_range *range;
 		struct nmk_gpio_chip *nmk_chip;
-		struct gpio_chip *chip;
 		unsigned bit;
 
-		range = nmk_match_gpio_range(pctldev, g->pins[i]);
-		if (!range) {
+		nmk_chip = find_nmk_gpio_from_pin(g->pins[i]);
+		if (!nmk_chip) {
 			dev_err(npct->dev,
 				"invalid pin offset %d in group %s at index %d\n",
 				g->pins[i], g->name, i);
 			goto out_glitch;
 		}
-		if (!range->gc) {
-			dev_err(npct->dev, "GPIO chip missing in range for pin offset %d in group %s at index %d\n",
-				g->pins[i], g->name, i);
-			goto out_glitch;
-		}
-		chip = range->gc;
-		nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 		dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting);
 
 		clk_enable(nmk_chip->clk);
@@ -1827,25 +1862,17 @@
 	};
 	struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
 	struct nmk_gpio_chip *nmk_chip;
-	struct pinctrl_gpio_range *range;
-	struct gpio_chip *chip;
 	unsigned bit;
 	pin_cfg_t cfg;
 	int pull, slpm, output, val, i;
 	bool lowemi, gpiomode, sleep;
 
-	range = nmk_match_gpio_range(pctldev, pin);
-	if (!range) {
-		dev_err(npct->dev, "invalid pin offset %d\n", pin);
+	nmk_chip = find_nmk_gpio_from_pin(pin);
+	if (!nmk_chip) {
+		dev_err(npct->dev,
+			"invalid pin offset %d\n", pin);
 		return -EINVAL;
 	}
-	if (!range->gc) {
-		dev_err(npct->dev, "GPIO chip missing in range for pin %d\n",
-			pin);
-		return -EINVAL;
-	}
-	chip = range->gc;
-	nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 
 	for (i = 0; i < num_configs; i++) {
 		/*
@@ -1997,6 +2024,31 @@
 	if (version == PINCTRL_NMK_DB8540)
 		nmk_pinctrl_db8540_init(&npct->soc);
 
+	/*
+	 * Since we depend on the GPIO chips to provide clock and register base
+	 * for the pin control operations, make sure that we have these
+	 * populated before we continue. Follow the phandles to instantiate
+	 * them. The GPIO portion of the actual hardware may be probed before
+	 * or after this point: it shouldn't matter as the APIs are orthogonal.
+	 */
+	for (i = 0; i < NMK_MAX_BANKS; i++) {
+		struct device_node *gpio_np;
+		struct nmk_gpio_chip *nmk_chip;
+
+		gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i);
+		if (gpio_np) {
+			dev_info(&pdev->dev,
+				 "populate NMK GPIO %d \"%s\"\n",
+				 i, gpio_np->name);
+			nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev);
+			if (IS_ERR(nmk_chip))
+				dev_err(&pdev->dev,
+					"could not populate nmk chip struct "
+					"- continue anyway\n");
+			of_node_put(gpio_np);
+		}
+	}
+
 	prcm_np = of_parse_phandle(np, "prcm", 0);
 	if (prcm_np)
 		npct->prcm_base = of_iomap(prcm_np, 0);
@@ -2011,19 +2063,6 @@
 		}
 	}
 
-	/*
-	 * We need all the GPIO drivers to probe FIRST, or we will not be able
-	 * to obtain references to the struct gpio_chip * for them, and we
-	 * need this to proceed.
-	 */
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
-		if (!nmk_gpio_chips[npct->soc->gpio_ranges[i].id]) {
-			dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-			return -EPROBE_DEFER;
-		}
-		npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[npct->soc->gpio_ranges[i].id]->chip;
-	}
-
 	nmk_pinctrl_desc.pins = npct->soc->pins;
 	nmk_pinctrl_desc.npins = npct->soc->npins;
 	npct->dev = &pdev->dev;
@@ -2034,10 +2073,6 @@
 		return PTR_ERR(npct->pctl);
 	}
 
-	/* We will handle a range of GPIO pins */
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++)
-		pinctrl_add_gpio_range(npct->pctl, &npct->soc->gpio_ranges[i]);
-
 	platform_set_drvdata(pdev, npct);
 	dev_info(&pdev->dev, "initialized Nomadik pin control driver\n");
 
@@ -2072,15 +2107,15 @@
 
 static int __init nmk_gpio_init(void)
 {
-	int ret;
+	return platform_driver_register(&nmk_gpio_driver);
+}
+subsys_initcall(nmk_gpio_init);
 
-	ret = platform_driver_register(&nmk_gpio_driver);
-	if (ret)
-		return ret;
+static int __init nmk_pinctrl_init(void)
+{
 	return platform_driver_register(&nmk_pinctrl_driver);
 }
-
-core_initcall(nmk_gpio_init);
+core_initcall(nmk_pinctrl_init);
 
 MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
 MODULE_DESCRIPTION("Nomadik GPIO Driver");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index d8215f1..30bba2a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
@@ -121,8 +121,6 @@
 
 /**
  * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
- * @gpio_ranges: An array of GPIO ranges for this SoC
- * @gpio_num_ranges: The number of GPIO ranges for this SoC
  * @pins:	An array describing all pins the pin controller affects.
  *		All pins which are also GPIOs must be listed first within the
  *		array, and be numbered identically to the GPIO controller's
@@ -137,8 +135,6 @@
  * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
  */
 struct nmk_pinctrl_soc_data {
-	struct pinctrl_gpio_range *gpio_ranges;
-	unsigned gpio_num_ranges;
 	const struct pinctrl_pin_desc *pins;
 	unsigned npins;
 	const struct nmk_function *functions;
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 1fc09dc..29a7bb1 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -61,8 +61,8 @@
 	const struct pinconf_ops *ops = pctldev->desc->confops;
 
 	if (!ops || !ops->pin_config_get) {
-		dev_dbg(pctldev->dev, "cannot get pin configuration, missing "
-			"pin_config_get() function in driver\n");
+		dev_dbg(pctldev->dev,
+			"cannot get pin configuration, .pin_config_get missing in driver\n");
 		return -ENOTSUPP;
 	}
 
@@ -202,18 +202,34 @@
 
 #ifdef CONFIG_DEBUG_FS
 
-void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
+		      unsigned long *configs, unsigned num_configs)
 {
-	struct pinctrl_dev *pctldev;
 	const struct pinconf_ops *confops;
 	int i;
 
-	pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
 	if (pctldev)
 		confops = pctldev->desc->confops;
 	else
 		confops = NULL;
 
+	for (i = 0; i < num_configs; i++) {
+		seq_puts(s, "config ");
+		if (confops && confops->pin_config_config_dbg_show)
+			confops->pin_config_config_dbg_show(pctldev, s,
+							    configs[i]);
+		else
+			seq_printf(s, "%08lx", configs[i]);
+		seq_puts(s, "\n");
+	}
+}
+
+void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+{
+	struct pinctrl_dev *pctldev;
+
+	pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
+
 	switch (map->type) {
 	case PIN_MAP_TYPE_CONFIGS_PIN:
 		seq_printf(s, "pin ");
@@ -227,15 +243,8 @@
 
 	seq_printf(s, "%s\n", map->data.configs.group_or_pin);
 
-	for (i = 0; i < map->data.configs.num_configs; i++) {
-		seq_printf(s, "config ");
-		if (confops && confops->pin_config_config_dbg_show)
-			confops->pin_config_config_dbg_show(pctldev, s,
-						map->data.configs.configs[i]);
-		else
-			seq_printf(s, "%08lx", map->data.configs.configs[i]);
-		seq_printf(s, "\n");
-	}
+	pinconf_show_config(s, pctldev, map->data.configs.configs,
+			    map->data.configs.num_configs);
 }
 
 void pinconf_show_setting(struct seq_file *s,
@@ -243,9 +252,7 @@
 {
 	struct pinctrl_dev *pctldev = setting->pctldev;
 	const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
-	const struct pinconf_ops *confops = pctldev->desc->confops;
 	struct pin_desc *desc;
-	int i;
 
 	switch (setting->type) {
 	case PIN_MAP_TYPE_CONFIGS_PIN:
@@ -269,17 +276,8 @@
 	 * FIXME: We should really get the pin controler to dump the config
 	 * values, so they can be decoded to something meaningful.
 	 */
-	for (i = 0; i < setting->data.configs.num_configs; i++) {
-		seq_printf(s, " ");
-		if (confops && confops->pin_config_config_dbg_show)
-			confops->pin_config_config_dbg_show(pctldev, s,
-				setting->data.configs.configs[i]);
-		else
-			seq_printf(s, "%08lx",
-				   setting->data.configs.configs[i]);
-	}
-
-	seq_printf(s, "\n");
+	pinconf_show_config(s, pctldev, setting->data.configs.configs,
+			    setting->data.configs.num_configs);
 }
 
 static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
@@ -412,10 +410,8 @@
 	const struct pinctrl_map *map;
 	const struct pinctrl_map *found = NULL;
 	struct pinctrl_dev *pctldev;
-	const struct pinconf_ops *confops = NULL;
 	struct dbg_cfg *dbg = &pinconf_dbg_conf;
 	int i, j;
-	unsigned long config;
 
 	mutex_lock(&pinctrl_maps_mutex);
 
@@ -449,16 +445,10 @@
 	}
 
 	pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
-	config = *found->data.configs.configs;
-	seq_printf(s, "Dev %s has config of %s in state %s: 0x%08lX\n",
-			dbg->dev_name, dbg->pin_name,
-			dbg->state_name, config);
-
-	if (pctldev)
-		confops = pctldev->desc->confops;
-
-	if (confops && confops->pin_config_config_dbg_show)
-		confops->pin_config_config_dbg_show(pctldev, s, config);
+	seq_printf(s, "Dev %s has config of %s in state %s:\n",
+		   dbg->dev_name, dbg->pin_name, dbg->state_name);
+	pinconf_show_config(s, pctldev, found->data.configs.configs,
+			    found->data.configs.num_configs);
 
 exit:
 	mutex_unlock(&pinctrl_maps_mutex);
@@ -470,10 +460,12 @@
  * pinconf_dbg_config_write() - modify the pinctrl config in the pinctrl
  * map, of a dev/pin/state entry based on user entries to pinconf-config
  * @user_buf: contains the modification request with expected format:
- *     modify config_pin <devicename> <state> <pinname> <newvalue>
+ *     modify <config> <devicename> <state> <name> <newvalue>
  * modify is literal string, alternatives like add/delete not supported yet
- * config_pin is literal, alternatives like config_mux not supported yet
- * <devicename> <state> <pinname> are values that should match the pinctrl-maps
+ * <config> is the configuration to be changed. Supported configs are
+ *     "config_pin" or "config_group", alternatives like config_mux are not
+ *     supported yet.
+ * <devicename> <state> <name> are values that should match the pinctrl-maps
  * <newvalue> reflects the new config and is driver dependant
  */
 static ssize_t pinconf_dbg_config_write(struct file *file,
@@ -511,13 +503,19 @@
 	if (strcmp(token, "modify"))
 		return -EINVAL;
 
-	/* Get arg type: "config_pin" type supported so far */
+	/*
+	 * Get arg type: "config_pin" and "config_group"
+	 *                types are supported so far
+	 */
 	token = strsep(&b, " ");
 	if (!token)
 		return -EINVAL;
-	if (strcmp(token, "config_pin"))
+	if (!strcmp(token, "config_pin"))
+		dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
+	else if (!strcmp(token, "config_group"))
+		dbg->map_type = PIN_MAP_TYPE_CONFIGS_GROUP;
+	else
 		return -EINVAL;
-	dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
 
 	/* get arg 'device_name' */
 	token = strsep(&b, " ");
diff --git a/drivers/pinctrl/pinctrl-adi2-bf60x.c b/drivers/pinctrl/pinctrl-adi2-bf60x.c
index 4cb59fe..fcfa008 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf60x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf60x.c
@@ -394,25 +394,25 @@
 static const unsigned short lp0_mux[] = {
 	P_LP0_CLK, P_LP0_ACK, P_LP0_D0, P_LP0_D1, P_LP0_D2,
 	P_LP0_D3, P_LP0_D4, P_LP0_D5, P_LP0_D6, P_LP0_D7,
-        0
+	0
 };
 
 static const unsigned short lp1_mux[] = {
 	P_LP1_CLK, P_LP1_ACK, P_LP1_D0, P_LP1_D1, P_LP1_D2,
 	P_LP1_D3, P_LP1_D4, P_LP1_D5, P_LP1_D6, P_LP1_D7,
-        0
+	0
 };
 
 static const unsigned short lp2_mux[] = {
 	P_LP2_CLK, P_LP2_ACK, P_LP2_D0, P_LP2_D1, P_LP2_D2,
 	P_LP2_D3, P_LP2_D4, P_LP2_D5, P_LP2_D6, P_LP2_D7,
-        0
+	0
 };
 
 static const unsigned short lp3_mux[] = {
 	P_LP3_CLK, P_LP3_ACK, P_LP3_D0, P_LP3_D1, P_LP3_D2,
 	P_LP3_D3, P_LP3_D4, P_LP3_D5, P_LP3_D6, P_LP3_D7,
-        0
+	0
 };
 
 static const struct adi_pin_group adi_pin_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index c3c3d23..a5976eb 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -427,10 +427,10 @@
 
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 		writel(pintmask, &pint_regs->edge_set);
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	} else {
 		writel(pintmask, &pint_regs->edge_clear);
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	}
 
 out:
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index d8e3f7c..5e86bb8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -420,7 +420,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_EDGE_FALLING:
@@ -428,7 +428,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
@@ -436,7 +436,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_HIGH:
@@ -445,7 +445,7 @@
 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
 		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
 		pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_LOW:
@@ -454,7 +454,7 @@
 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
 		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
 		pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	case IRQ_TYPE_NONE:
@@ -492,8 +492,9 @@
 	.irq_set_type = amd_gpio_irq_set_type,
 };
 
-static void amd_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	u32 i;
 	u32 off;
 	u32 reg;
@@ -501,7 +502,7 @@
 	u64 reg64;
 	int handled = 0;
 	unsigned long flags;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct amd_gpio *gpio_dev = to_amd_gpio(gc);
 
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index a082447..bae0012 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -320,6 +320,9 @@
 static void __iomem *pin_to_controller(struct at91_pinctrl *info,
 				 unsigned int bank)
 {
+	if (!gpio_chips[bank])
+		return NULL;
+
 	return gpio_chips[bank]->regbase;
 }
 
@@ -729,6 +732,10 @@
 		pin = &pins_conf[i];
 		at91_pin_dbg(info->dev, pin);
 		pio = pin_to_controller(info, pin->bank);
+
+		if (!pio)
+			continue;
+
 		mask = pin_to_mask(pin->pin);
 		at91_mux_disable_interrupt(pio, mask);
 		switch (pin->mux) {
@@ -848,6 +855,10 @@
 	*config = 0;
 	dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
 	pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+	if (!pio)
+		return -EINVAL;
+
 	pin = pin_id % MAX_NB_GPIO_PER_BANK;
 
 	if (at91_mux_get_multidrive(pio, pin))
@@ -889,6 +900,10 @@
 			"%s:%d, pin_id=%d, config=0x%lx",
 			__func__, __LINE__, pin_id, config);
 		pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+		if (!pio)
+			return -EINVAL;
+
 		pin = pin_id % MAX_NB_GPIO_PER_BANK;
 		mask = pin_to_mask(pin);
 
@@ -1444,22 +1459,22 @@
 
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_HIGH:
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
@@ -1468,7 +1483,7 @@
 		 * disable additional interrupt modes:
 		 * fall back to default behavior
 		 */
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_AIMDR);
 		return 0;
 	case IRQ_TYPE_NONE:
@@ -1488,28 +1503,6 @@
 	/* the interrupt is already cleared before by reading ISR */
 }
 
-static int gpio_irq_request_res(struct irq_data *d)
-{
-	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
-	unsigned	pin = d->hwirq;
-	int ret;
-
-	ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
-	if (ret)
-		dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
-			d->hwirq);
-
-	return ret;
-}
-
-static void gpio_irq_release_res(struct irq_data *d)
-{
-	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
-	unsigned	pin = d->hwirq;
-
-	gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
-}
-
 #ifdef CONFIG_PM
 
 static u32 wakeups[MAX_GPIO_BANKS];
@@ -1585,8 +1578,6 @@
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
 	.irq_ack	= gpio_irq_ack,
-	.irq_request_resources = gpio_irq_request_res,
-	.irq_release_resources = gpio_irq_release_res,
 	.irq_disable	= gpio_irq_mask,
 	.irq_mask	= gpio_irq_mask,
 	.irq_unmask	= gpio_irq_unmask,
@@ -1596,7 +1587,7 @@
 
 static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc);
 	struct at91_gpio_chip *at91_gpio = container_of(gpio_chip,
 					   struct at91_gpio_chip, chip);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 29cbbab..3731cc6 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -519,10 +519,11 @@
 	.irq_set_type		= u300_gpio_irq_type,
 };
 
-static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
-	struct irq_chip *parent_chip = irq_get_chip(irq);
-	struct gpio_chip *chip = irq_get_handler_data(irq);
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_chip *parent_chip = irq_desc_get_chip(desc);
+	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct u300_gpio *gpio = to_u300_gpio(chip);
 	struct u300_gpio_port *port = &gpio->ports[irq - chip->base];
 	int pinoffset = port->number << 3; /* get the right stride */
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
new file mode 100644
index 0000000..461fffc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -0,0 +1,378 @@
+/*
+ *  Driver for Conexant Digicolor General Purpose Pin Mapping
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * TODO:
+ * - GPIO interrupt support
+ * - Pin pad configuration (pull up/down, strength)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include "pinctrl-utils.h"
+
+#define DRIVER_NAME	"pinctrl-digicolor"
+
+#define GP_CLIENTSEL(clct)	((clct)*8 + 0x20)
+#define GP_DRIVE0(clct)		(GP_CLIENTSEL(clct) + 2)
+#define GP_OUTPUT0(clct)	(GP_CLIENTSEL(clct) + 3)
+#define GP_INPUT(clct)		(GP_CLIENTSEL(clct) + 6)
+
+#define PIN_COLLECTIONS		('R' - 'A' + 1)
+#define PINS_PER_COLLECTION	8
+#define PINS_COUNT		(PIN_COLLECTIONS * PINS_PER_COLLECTION)
+
+struct dc_pinmap {
+	void __iomem		*regs;
+	struct device		*dev;
+	struct pinctrl_dev	*pctl;
+
+	struct pinctrl_desc	*desc;
+	const char		*pin_names[PINS_COUNT];
+
+	struct gpio_chip	chip;
+	spinlock_t		lock;
+};
+
+static int dc_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return PINS_COUNT;
+}
+
+static const char *dc_get_group_name(struct pinctrl_dev *pctldev,
+				     unsigned selector)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	/* Exactly one group per pin */
+	return pmap->desc->pins[selector].name;
+}
+
+static int dc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+			     const unsigned **pins,
+			     unsigned *num_pins)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pmap->desc->pins[selector].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static struct pinctrl_ops dc_pinctrl_ops = {
+	.get_groups_count	= dc_get_groups_count,
+	.get_group_name		= dc_get_group_name,
+	.get_group_pins		= dc_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_pin,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static const char *const dc_functions[] = {
+	"gpio",
+	"client_a",
+	"client_b",
+	"client_c",
+};
+
+static int dc_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(dc_functions);
+}
+
+static const char *dc_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
+{
+	return dc_functions[selector];
+}
+
+static int dc_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+			 const char * const **groups,
+			 unsigned * const num_groups)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pmap->pin_names;
+	*num_groups = PINS_COUNT;
+
+	return 0;
+}
+
+static void dc_client_sel(int pin_num, int *reg, int *bit)
+{
+	*bit = (pin_num % PINS_PER_COLLECTION) * 2;
+	*reg = GP_CLIENTSEL(pin_num/PINS_PER_COLLECTION);
+
+	if (*bit >= PINS_PER_COLLECTION) {
+		*bit -= PINS_PER_COLLECTION;
+		*reg += 1;
+	}
+}
+
+static int dc_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+		      unsigned group)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+	int bit_off, reg_off;
+	u8 reg;
+
+	dc_client_sel(group, &reg_off, &bit_off);
+
+	reg = readb_relaxed(pmap->regs + reg_off);
+	reg &= ~(3 << bit_off);
+	reg |= (selector << bit_off);
+	writeb_relaxed(reg, pmap->regs + reg_off);
+
+	return 0;
+}
+
+static int dc_pmx_request_gpio(struct pinctrl_dev *pcdev,
+			       struct pinctrl_gpio_range *range,
+			       unsigned offset)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pcdev);
+	int bit_off, reg_off;
+	u8 reg;
+
+	dc_client_sel(offset, &reg_off, &bit_off);
+
+	reg = readb_relaxed(pmap->regs + reg_off);
+	if ((reg & (3 << bit_off)) != 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static struct pinmux_ops dc_pmxops = {
+	.get_functions_count	= dc_get_functions_count,
+	.get_function_name	= dc_get_fname,
+	.get_function_groups	= dc_get_groups,
+	.set_mux		= dc_set_mux,
+	.gpio_request_enable	= dc_pmx_request_gpio,
+};
+
+static int dc_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+	return pinctrl_request_gpio(chip->base + gpio);
+}
+
+static void dc_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+	pinctrl_free_gpio(chip->base + gpio);
+}
+
+static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 drive;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	drive = readb_relaxed(pmap->regs + reg_off);
+	drive &= ~BIT(bit_off);
+	writeb_relaxed(drive, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+
+	return 0;
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value);
+
+static int dc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+				    int value)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 drive;
+	unsigned long flags;
+
+	dc_gpio_set(chip, gpio, value);
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	drive = readb_relaxed(pmap->regs + reg_off);
+	drive |= BIT(bit_off);
+	writeb_relaxed(drive, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+
+	return 0;
+}
+
+static int dc_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_INPUT(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 input;
+
+	input = readb_relaxed(pmap->regs + reg_off);
+
+	return !!(input & BIT(bit_off));
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_OUTPUT0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 output;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	output = readb_relaxed(pmap->regs + reg_off);
+	if (value)
+		output |= BIT(bit_off);
+	else
+		output &= ~BIT(bit_off);
+	writeb_relaxed(output, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+}
+
+static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
+{
+	struct gpio_chip *chip = &pmap->chip;
+	int ret;
+
+	chip->label		= DRIVER_NAME;
+	chip->dev		= pmap->dev;
+	chip->request		= dc_gpio_request;
+	chip->free		= dc_gpio_free;
+	chip->direction_input	= dc_gpio_direction_input;
+	chip->direction_output	= dc_gpio_direction_output;
+	chip->get		= dc_gpio_get;
+	chip->set		= dc_gpio_set;
+	chip->base		= -1;
+	chip->ngpio		= PINS_COUNT;
+	chip->of_node		= np;
+	chip->of_gpio_n_cells	= 2;
+
+	spin_lock_init(&pmap->lock);
+
+	ret = gpiochip_add(chip);
+	if (ret < 0)
+		return ret;
+
+	ret = gpiochip_add_pin_range(chip, dev_name(pmap->dev), 0, 0,
+				     PINS_COUNT);
+	if (ret < 0) {
+		gpiochip_remove(chip);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dc_pinctrl_probe(struct platform_device *pdev)
+{
+	struct dc_pinmap *pmap;
+	struct resource *r;
+	struct pinctrl_pin_desc *pins;
+	struct pinctrl_desc *pctl_desc;
+	char *pin_names;
+	int name_len = strlen("GP_xx") + 1;
+	int i, j, ret;
+
+	pmap = devm_kzalloc(&pdev->dev, sizeof(*pmap), GFP_KERNEL);
+	if (!pmap)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(pmap->regs))
+		return PTR_ERR(pmap->regs);
+
+	pins = devm_kzalloc(&pdev->dev, sizeof(*pins)*PINS_COUNT, GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+	pin_names = devm_kzalloc(&pdev->dev, name_len * PINS_COUNT,
+				 GFP_KERNEL);
+	if (!pin_names)
+		return -ENOMEM;
+
+	for (i = 0; i < PIN_COLLECTIONS; i++) {
+		for (j = 0; j < PINS_PER_COLLECTION; j++) {
+			int pin_id = i*PINS_PER_COLLECTION + j;
+			char *name = &pin_names[pin_id * name_len];
+
+			snprintf(name, name_len, "GP_%c%c", 'A'+i, '0'+j);
+
+			pins[pin_id].number = pin_id;
+			pins[pin_id].name = name;
+			pmap->pin_names[pin_id] = name;
+		}
+	}
+
+	pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
+	if (!pctl_desc)
+		return -ENOMEM;
+
+	pctl_desc->name	= DRIVER_NAME,
+	pctl_desc->owner = THIS_MODULE,
+	pctl_desc->pctlops = &dc_pinctrl_ops,
+	pctl_desc->pmxops = &dc_pmxops,
+	pctl_desc->npins = PINS_COUNT;
+	pctl_desc->pins = pins;
+	pmap->desc = pctl_desc;
+
+	pmap->dev = &pdev->dev;
+
+	pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
+	if (!pmap->pctl) {
+		dev_err(&pdev->dev, "pinctrl driver registration failed\n");
+		return -EINVAL;
+	}
+
+	ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
+	if (ret < 0) {
+		pinctrl_unregister(pmap->pctl);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dc_pinctrl_remove(struct platform_device *pdev)
+{
+	struct dc_pinmap *pmap = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(pmap->pctl);
+	gpiochip_remove(&pmap->chip);
+
+	return 0;
+}
+
+static const struct of_device_id dc_pinctrl_ids[] = {
+	{ .compatible = "cnxt,cx92755-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pinctrl_ids);
+
+static struct platform_driver dc_pinctrl_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = dc_pinctrl_ids,
+	},
+	.probe = dc_pinctrl_probe,
+	.remove = dc_pinctrl_remove,
+};
+module_platform_driver(dc_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 347c763..f0bebbe 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -37,6 +37,9 @@
 #define LPC18XX_SCU_PIN_EHD_MASK	0x300
 #define LPC18XX_SCU_PIN_EHD_POS		8
 
+#define LPC18XX_SCU_USB1_EPD		BIT(2)
+#define LPC18XX_SCU_USB1_EPWR		BIT(4)
+
 #define LPC18XX_SCU_I2C0_EFP		BIT(0)
 #define LPC18XX_SCU_I2C0_EHD		BIT(2)
 #define LPC18XX_SCU_I2C0_EZI		BIT(3)
@@ -617,8 +620,31 @@
 
 static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
 {
-	/* TODO */
-	return -ENOTSUPP;
+	switch (param) {
+	case PIN_CONFIG_LOW_POWER_MODE:
+		if (reg & LPC18XX_SCU_USB1_EPWR)
+			*arg = 0;
+		else
+			*arg = 1;
+		break;
+
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (reg & LPC18XX_SCU_USB1_EPD)
+			return -EINVAL;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (reg & LPC18XX_SCU_USB1_EPD)
+			*arg = 1;
+		else
+			return -EINVAL;
+		break;
+
+	default:
+		return -ENOTSUPP;
+	}
+
+	return 0;
 }
 
 static int lpc18xx_pconf_get_i2c0(enum pin_config_param param, int *arg, u32 reg,
@@ -782,8 +808,28 @@
 				  enum pin_config_param param,
 				  u16 param_val, u32 *reg)
 {
-	/* TODO */
-	return -ENOTSUPP;
+	switch (param) {
+	case PIN_CONFIG_LOW_POWER_MODE:
+		if (param_val)
+			*reg &= ~LPC18XX_SCU_USB1_EPWR;
+		else
+			*reg |= LPC18XX_SCU_USB1_EPWR;
+		break;
+
+	case PIN_CONFIG_BIAS_DISABLE:
+		*reg &= ~LPC18XX_SCU_USB1_EPD;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		*reg |= LPC18XX_SCU_USB1_EPD;
+		break;
+
+	default:
+		dev_err(pctldev->dev, "Property not supported\n");
+		return -ENOTSUPP;
+	}
+
+	return 0;
 }
 
 static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 63100be..3dc2ae1 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1310,9 +1310,11 @@
 	return 0;
 }
 
-static void pistachio_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pistachio_gpio_irq_handler(unsigned int __irq,
+				       struct irq_desc *desc)
 {
-	struct gpio_chip *gc = irq_get_handler_data(irq);
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct pistachio_gpio_bank *bank = gc_to_bank(gc);
 	struct irq_chip *chip = irq_get_chip(irq);
 	unsigned long pending;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 9affcd7..c5246c0 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -945,6 +945,7 @@
 	if (ret < 0)
 		return ret;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -956,6 +957,7 @@
 	writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
 
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 
 	return 0;
 }
@@ -1389,6 +1391,7 @@
 	unsigned long flags;
 	u32 data;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl(reg);
@@ -1398,6 +1401,7 @@
 	writel(data, reg);
 
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 }
 
 /*
@@ -1409,7 +1413,9 @@
 	struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
 	u32 data;
 
+	clk_enable(bank->clk);
 	data = readl(bank->reg_base + GPIO_EXT_PORT);
+	clk_disable(bank->clk);
 	data >>= offset;
 	data &= 1;
 	return data;
@@ -1469,10 +1475,10 @@
  * Interrupt handling
  */
 
-static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
 	u32 pend;
 
 	dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1482,7 +1488,7 @@
 	pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int irq, virq;
 
 		irq = __ffs(pend);
 		pend &= ~BIT(irq);
@@ -1546,6 +1552,7 @@
 	if (ret < 0)
 		return ret;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -1555,9 +1562,9 @@
 	spin_unlock_irqrestore(&bank->slock, flags);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
 	spin_lock_irqsave(&bank->slock, flags);
 	irq_gc_lock(gc);
@@ -1603,6 +1610,7 @@
 	default:
 		irq_gc_unlock(gc);
 		spin_unlock_irqrestore(&bank->slock, flags);
+		clk_disable(bank->clk);
 		return -EINVAL;
 	}
 
@@ -1611,6 +1619,7 @@
 
 	irq_gc_unlock(gc);
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 
 	return 0;
 }
@@ -1620,8 +1629,10 @@
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct rockchip_pin_bank *bank = gc->private;
 
+	clk_enable(bank->clk);
 	bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
 	irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
+	clk_disable(bank->clk);
 }
 
 static void rockchip_irq_resume(struct irq_data *d)
@@ -1629,7 +1640,27 @@
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct rockchip_pin_bank *bank = gc->private;
 
+	clk_enable(bank->clk);
 	irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
+	clk_disable(bank->clk);
+}
+
+static void rockchip_irq_gc_mask_clr_bit(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct rockchip_pin_bank *bank = gc->private;
+
+	clk_enable(bank->clk);
+	irq_gc_mask_clr_bit(d);
+}
+
+void rockchip_irq_gc_mask_set_bit(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct rockchip_pin_bank *bank = gc->private;
+
+	irq_gc_mask_set_bit(d);
+	clk_disable(bank->clk);
 }
 
 static int rockchip_interrupts_register(struct platform_device *pdev,
@@ -1640,7 +1671,7 @@
 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 	struct irq_chip_generic *gc;
 	int ret;
-	int i;
+	int i, j;
 
 	for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
 		if (!bank->valid) {
@@ -1649,11 +1680,19 @@
 			continue;
 		}
 
+		ret = clk_enable(bank->clk);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to enable clock for bank %s\n",
+				bank->name);
+			continue;
+		}
+
 		bank->domain = irq_domain_add_linear(bank->of_node, 32,
 						&irq_generic_chip_ops, NULL);
 		if (!bank->domain) {
 			dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
 				 bank->name);
+			clk_disable(bank->clk);
 			continue;
 		}
 
@@ -1664,6 +1703,7 @@
 			dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
 				bank->name);
 			irq_domain_remove(bank->domain);
+			clk_disable(bank->clk);
 			continue;
 		}
 
@@ -1681,16 +1721,23 @@
 		gc->chip_types[0].regs.mask = GPIO_INTMASK;
 		gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
 		gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-		gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
-		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+		gc->chip_types[0].chip.irq_mask = rockchip_irq_gc_mask_set_bit;
+		gc->chip_types[0].chip.irq_unmask =
+						  rockchip_irq_gc_mask_clr_bit;
 		gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
 		gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
 		gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
 		gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
 		gc->wake_enabled = IRQ_MSK(bank->nr_pins);
 
-		irq_set_handler_data(bank->irq, bank);
-		irq_set_chained_handler(bank->irq, rockchip_irq_demux);
+		irq_set_chained_handler_and_data(bank->irq,
+						 rockchip_irq_demux, bank);
+
+		/* map the gpio irqs here, when the clock is still running */
+		for (j = 0 ; j < 32 ; j++)
+			irq_create_mapping(bank->domain, j);
+
+		clk_disable(bank->clk);
 	}
 
 	return 0;
@@ -1808,7 +1855,7 @@
 	if (IS_ERR(bank->clk))
 		return PTR_ERR(bank->clk);
 
-	return clk_prepare_enable(bank->clk);
+	return clk_prepare(bank->clk);
 }
 
 static const struct of_device_id rockchip_pinctrl_dt_match[];
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 0b8d480..bf548c2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1684,7 +1684,7 @@
 	struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip;
 
-	chip = irq_get_chip(irq);
+	chip = irq_desc_get_chip(desc);
 	chained_irq_enter(chip, desc);
 	pcs_irq_handle(pcs_soc);
 	/* REVISIT: export and add handle_bad_irq(irq, desc)? */
@@ -1716,12 +1716,7 @@
 	irq_set_chip_data(irq, pcs_soc);
 	irq_set_chip_and_handler(irq, &pcs->chip,
 				 handle_level_irq);
-
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -1768,9 +1763,9 @@
 			return res;
 		}
 	} else {
-		irq_set_handler_data(pcs_soc->irq, pcs_soc);
-		irq_set_chained_handler(pcs_soc->irq,
-					pcs_irq_chain_handler);
+		irq_set_chained_handler_and_data(pcs_soc->irq,
+						 pcs_irq_chain_handler,
+						 pcs_soc);
 	}
 
 	/*
@@ -1983,7 +1978,6 @@
 };
 
 static const struct pcs_soc_data pinctrl_single_dra7 = {
-	.flags = PCS_QUIRK_SHARED_IRQ,
 	.irq_enable_mask = (1 << 24),	/* WAKEUPENABLE */
 	.irq_status_mask = (1 << 25),	/* WAKEUPEVENT */
 };
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index c262e5f..f8338d2 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1463,7 +1463,7 @@
 static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
 	/* interrupt dedicated per bank */
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct st_gpio_bank *bank = gpio_chip_to_bank(gc);
 
@@ -1474,8 +1474,8 @@
 
 static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct st_pinctrl *info = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct st_pinctrl *info = irq_desc_get_handler_data(desc);
 	unsigned long status;
 	int n;
 
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 7ce23b6..5aafea8 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -706,10 +706,10 @@
 		"gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
 		"gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
 		"gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_wp_grp"};
-static const char * const smc0_nor_groups[] = {"smc0_nor"};
+static const char * const smc0_nor_groups[] = {"smc0_nor_grp"};
 static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"};
 static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"};
-static const char * const smc0_nand_groups[] = {"smc0_nand"};
+static const char * const smc0_nand_groups[] = {"smc0_nand_grp"};
 static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp",
 		"can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp",
 		"can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp",
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index e7ae890..67e08cb 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -322,8 +322,7 @@
 		selector++;
 	}
 
-	pr_err("%s does not support function %s\n",
-	       pinctrl_dev_get_name(pctldev), function);
+	dev_err(pctldev->dev, "function '%s' not supported\n", function);
 	return -EINVAL;
 }
 
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 58f5632..383263a 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -63,6 +63,14 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm TLMM block found on the Qualcomm 8916 platform.
 
+config PINCTRL_QDF2XXX
+	tristate "Qualcomm Technologies QDF2xxx pin controller driver"
+	depends on GPIOLIB && ACPI
+	select PINCTRL_MSM
+	help
+	  This is the GPIO driver for the TLMM block found on the
+	  Qualcomm Technologies QDF2xxx SOCs.
+
 config PINCTRL_QCOM_SPMI_PMIC
        tristate "Qualcomm SPMI PMIC pin controller driver"
        depends on GPIOLIB && OF && SPMI
@@ -76,4 +84,16 @@
          which are using SPMI for communication with SoC. Example PMIC's
          devices are pm8841, pm8941 and pma8084.
 
+config PINCTRL_QCOM_SSBI_PMIC
+       tristate "Qualcomm SSBI PMIC pin controller driver"
+       depends on GPIOLIB && OF
+       select PINMUX
+       select PINCONF
+       select GENERIC_PINCONF
+       help
+         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+         Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+         which are using SSBI for communication with SoC. Example PMIC's
+         devices are pm8058 and pm8921.
+
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 3666c70..13b190e 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -7,5 +7,8 @@
 obj-$(CONFIG_PINCTRL_MSM8960)	+= pinctrl-msm8960.o
 obj-$(CONFIG_PINCTRL_MSM8X74)	+= pinctrl-msm8x74.o
 obj-$(CONFIG_PINCTRL_MSM8916)	+= pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_QDF2XXX)	+= pinctrl-qdf2xxx.o
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e457d52..492cdd5 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -28,6 +28,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/reboot.h>
+#include <linux/pm.h>
 
 #include "../core.h"
 #include "../pinconf.h"
@@ -733,9 +734,9 @@
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 }
@@ -764,12 +765,13 @@
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
-static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = to_msm_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int irq_pin;
 	int handled = 0;
 	u32 val;
@@ -855,6 +857,13 @@
 	return NOTIFY_DONE;
 }
 
+static struct msm_pinctrl *poweroff_pctrl;
+
+static void msm_ps_hold_poweroff(void)
+{
+	msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL);
+}
+
 static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
 {
 	int i;
@@ -867,6 +876,8 @@
 			if (register_restart_handler(&pctrl->restart_nb))
 				dev_err(pctrl->dev,
 					"failed to setup restart handler.\n");
+			poweroff_pctrl = pctrl;
+			pm_power_off = msm_ps_hold_poweroff;
 			break;
 		}
 }
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
new file mode 100644
index 0000000..e9ff3bc
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * GPIO and pin control functions on this SOC are handled by the "TLMM"
+ * device.  The driver which controls this device is pinctrl-msm.c.  Each
+ * SOC with a TLMM is expected to create a client driver that registers
+ * with pinctrl-msm.c.  This means that all TLMM drivers are pin control
+ * drivers.
+ *
+ * This pin control driver is intended to be used only an ACPI-enabled
+ * system.  As such, UEFI will handle all pin control configuration, so
+ * this driver does not provide pin control functions.  It is effectively
+ * a GPIO-only driver.  The alternative is to duplicate the GPIO code of
+ * pinctrl-msm.c into another driver.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/acpi.h>
+
+#include "pinctrl-msm.h"
+
+static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
+
+static int qdf2xxx_pinctrl_probe(struct platform_device *pdev)
+{
+	struct pinctrl_pin_desc *pins;
+	struct msm_pingroup *groups;
+	unsigned int i;
+	u32 num_gpios;
+	int ret;
+
+	/* Query the number of GPIOs from ACPI */
+	ret = device_property_read_u32(&pdev->dev, "num-gpios", &num_gpios);
+	if (ret < 0)
+		return ret;
+
+	if (!num_gpios) {
+		dev_warn(&pdev->dev, "missing num-gpios property\n");
+		return -ENODEV;
+	}
+
+	pins = devm_kcalloc(&pdev->dev, num_gpios,
+		sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
+	groups = devm_kcalloc(&pdev->dev, num_gpios,
+		sizeof(struct msm_pingroup), GFP_KERNEL);
+
+	for (i = 0; i < num_gpios; i++) {
+		pins[i].number = i;
+
+		groups[i].npins = 1,
+		groups[i].pins = &pins[i].number;
+		groups[i].ctl_reg = 0x10000 * i;
+		groups[i].io_reg = 0x04 + 0x10000 * i;
+		groups[i].intr_cfg_reg = 0x08 + 0x10000 * i;
+		groups[i].intr_status_reg = 0x0c + 0x10000 * i;
+		groups[i].intr_target_reg = 0x08 + 0x10000 * i;
+
+		groups[i].mux_bit = 2;
+		groups[i].pull_bit = 0;
+		groups[i].drv_bit = 6;
+		groups[i].oe_bit = 9;
+		groups[i].in_bit = 0;
+		groups[i].out_bit = 1;
+		groups[i].intr_enable_bit = 0;
+		groups[i].intr_status_bit = 0;
+		groups[i].intr_target_bit = 5;
+		groups[i].intr_target_kpss_val = 1;
+		groups[i].intr_raw_status_bit = 4;
+		groups[i].intr_polarity_bit = 1;
+		groups[i].intr_detection_bit = 2;
+		groups[i].intr_detection_width = 2;
+	}
+
+	qdf2xxx_pinctrl.pins = pins;
+	qdf2xxx_pinctrl.groups = groups;
+	qdf2xxx_pinctrl.npins = num_gpios;
+	qdf2xxx_pinctrl.ngroups = num_gpios;
+	qdf2xxx_pinctrl.ngpios = num_gpios;
+
+	return msm_pinctrl_probe(pdev, &qdf2xxx_pinctrl);
+}
+
+static const struct acpi_device_id qdf2xxx_acpi_ids[] = {
+	{"QCOM8001"},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, qdf2xxx_acpi_ids);
+
+static struct platform_driver qdf2xxx_pinctrl_driver = {
+	.driver = {
+		.name = "qdf2xxx-pinctrl",
+		.acpi_match_table = ACPI_PTR(qdf2xxx_acpi_ids),
+	},
+	.probe = qdf2xxx_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init qdf2xxx_pinctrl_init(void)
+{
+	return platform_driver_register(&qdf2xxx_pinctrl_driver);
+}
+arch_initcall(qdf2xxx_pinctrl_init);
+
+static void __exit qdf2xxx_pinctrl_exit(void)
+{
+	platform_driver_unregister(&qdf2xxx_pinctrl_driver);
+}
+module_exit(qdf2xxx_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies QDF2xxx pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 3121de9..e3be3ce 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -61,7 +61,9 @@
 #define PMIC_MPP_REG_DIG_PULL_CTL		0x42
 #define PMIC_MPP_REG_DIG_IN_CTL			0x43
 #define PMIC_MPP_REG_EN_CTL			0x46
+#define PMIC_MPP_REG_AOUT_CTL			0x48
 #define PMIC_MPP_REG_AIN_CTL			0x4a
+#define PMIC_MPP_REG_SINK_CTL			0x4c
 
 /* PMIC_MPP_REG_MODE_CTL */
 #define PMIC_MPP_REG_MODE_VALUE_MASK		0x1
@@ -85,11 +87,25 @@
 #define PMIC_MPP_REG_AIN_ROUTE_SHIFT		0
 #define PMIC_MPP_REG_AIN_ROUTE_MASK		0x7
 
+#define PMIC_MPP_MODE_DIGITAL_INPUT		0
+#define PMIC_MPP_MODE_DIGITAL_OUTPUT		1
+#define PMIC_MPP_MODE_DIGITAL_BIDIR		2
+#define PMIC_MPP_MODE_ANALOG_BIDIR		3
+#define PMIC_MPP_MODE_ANALOG_INPUT		4
+#define PMIC_MPP_MODE_ANALOG_OUTPUT		5
+#define PMIC_MPP_MODE_CURRENT_SINK		6
+
+#define PMIC_MPP_SELECTOR_NORMAL		0
+#define PMIC_MPP_SELECTOR_PAIRED		1
+#define PMIC_MPP_SELECTOR_DTEST_FIRST		4
+
 #define PMIC_MPP_PHYSICAL_OFFSET		1
 
 /* Qualcomm specific pin configurations */
 #define PMIC_MPP_CONF_AMUX_ROUTE		(PIN_CONFIG_END + 1)
-#define PMIC_MPP_CONF_ANALOG_MODE		(PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_ANALOG_LEVEL		(PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_DTEST_SELECTOR		(PIN_CONFIG_END + 3)
+#define PMIC_MPP_CONF_PAIRED			(PIN_CONFIG_END + 4)
 
 /**
  * struct pmic_mpp_pad - keep current MPP settings
@@ -99,13 +115,15 @@
  * @out_value: Cached pin output value.
  * @output_enabled: Set to true if MPP output logic is enabled.
  * @input_enabled: Set to true if MPP input buffer logic is enabled.
- * @analog_mode: Set to true when MPP should operate in Analog Input, Analog
- *	Output or Bidirectional Analog mode.
+ * @paired: Pin operates in paired mode
  * @num_sources: Number of power-sources supported by this MPP.
  * @power_source: Current power-source used.
  * @amux_input: Set the source for analog input.
+ * @aout_level: Analog output level
  * @pullup: Pullup resistor value. Valid in Bidirectional mode only.
  * @function: See pmic_mpp_functions[].
+ * @drive_strength: Amount of current in sink mode
+ * @dtest: DTEST route selector
  */
 struct pmic_mpp_pad {
 	u16		base;
@@ -114,12 +132,15 @@
 	bool		out_value;
 	bool		output_enabled;
 	bool		input_enabled;
-	bool		analog_mode;
+	bool		paired;
 	unsigned int	num_sources;
 	unsigned int	power_source;
 	unsigned int	amux_input;
+	unsigned int	aout_level;
 	unsigned int	pullup;
 	unsigned int	function;
+	unsigned int	drive_strength;
+	unsigned int	dtest;
 };
 
 struct pmic_mpp_state {
@@ -129,25 +150,32 @@
 	struct gpio_chip chip;
 };
 
-struct pmic_mpp_bindings {
-	const char	*property;
-	unsigned	param;
+static const struct pinconf_generic_params pmic_mpp_bindings[] = {
+	{"qcom,amux-route",	PMIC_MPP_CONF_AMUX_ROUTE,	0},
+	{"qcom,analog-level",	PMIC_MPP_CONF_ANALOG_LEVEL,	0},
+	{"qcom,dtest",		PMIC_MPP_CONF_DTEST_SELECTOR,	0},
+	{"qcom,paired",		PMIC_MPP_CONF_PAIRED,		0},
 };
 
-static struct pmic_mpp_bindings pmic_mpp_bindings[] = {
-	{"qcom,amux-route",	PMIC_MPP_CONF_AMUX_ROUTE},
-	{"qcom,analog-mode",	PMIC_MPP_CONF_ANALOG_MODE},
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pmic_conf_items[] = {
+	PCONFDUMP(PMIC_MPP_CONF_AMUX_ROUTE, "analog mux", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_ANALOG_LEVEL, "analog level", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_DTEST_SELECTOR, "dtest", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_PAIRED, "paired", NULL, false),
 };
+#endif
 
 static const char *const pmic_mpp_groups[] = {
 	"mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
 };
 
+#define PMIC_MPP_DIGITAL	0
+#define PMIC_MPP_ANALOG		1
+#define PMIC_MPP_SINK		2
+
 static const char *const pmic_mpp_functions[] = {
-	PMIC_MPP_FUNC_NORMAL, PMIC_MPP_FUNC_PAIRED,
-	"reserved1", "reserved2",
-	PMIC_MPP_FUNC_DTEST1, PMIC_MPP_FUNC_DTEST2,
-	PMIC_MPP_FUNC_DTEST3, PMIC_MPP_FUNC_DTEST4,
+	"digital", "analog", "sink"
 };
 
 static inline struct pmic_mpp_state *to_mpp_state(struct gpio_chip *chip)
@@ -204,118 +232,11 @@
 	return 0;
 }
 
-static int pmic_mpp_parse_dt_config(struct device_node *np,
-				    struct pinctrl_dev *pctldev,
-				    unsigned long **configs,
-				    unsigned int *nconfs)
-{
-	struct pmic_mpp_bindings *par;
-	unsigned long cfg;
-	int ret, i;
-	u32 val;
-
-	for (i = 0; i < ARRAY_SIZE(pmic_mpp_bindings); i++) {
-		par = &pmic_mpp_bindings[i];
-		ret = of_property_read_u32(np, par->property, &val);
-
-		/* property not found */
-		if (ret == -EINVAL)
-			continue;
-
-		/* use zero as default value, when no value is specified */
-		if (ret)
-			val = 0;
-
-		dev_dbg(pctldev->dev, "found %s with value %u\n",
-			par->property, val);
-
-		cfg = pinconf_to_config_packed(par->param, val);
-
-		ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int pmic_mpp_dt_subnode_to_map(struct pinctrl_dev *pctldev,
-				      struct device_node *np,
-				      struct pinctrl_map **map,
-				      unsigned *reserv, unsigned *nmaps,
-				      enum pinctrl_map_type type)
-{
-	unsigned long *configs = NULL;
-	unsigned nconfs = 0;
-	struct property *prop;
-	const char *group;
-	int ret;
-
-	ret = pmic_mpp_parse_dt_config(np, pctldev, &configs, &nconfs);
-	if (ret < 0)
-		return ret;
-
-	if (!nconfs)
-		return 0;
-
-	ret = of_property_count_strings(np, "pins");
-	if (ret < 0)
-		goto exit;
-
-	ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
-	if (ret < 0)
-		goto exit;
-
-	of_property_for_each_string(np, "pins", prop, group) {
-		ret = pinctrl_utils_add_map_configs(pctldev, map,
-						    reserv, nmaps, group,
-						    configs, nconfs, type);
-		if (ret < 0)
-			break;
-	}
-exit:
-	kfree(configs);
-	return ret;
-}
-
-static int pmic_mpp_dt_node_to_map(struct pinctrl_dev *pctldev,
-				   struct device_node *np_config,
-				   struct pinctrl_map **map, unsigned *nmaps)
-{
-	struct device_node *np;
-	enum pinctrl_map_type type;
-	unsigned reserv;
-	int ret;
-
-	ret = 0;
-	*map = NULL;
-	*nmaps = 0;
-	reserv = 0;
-	type = PIN_MAP_TYPE_CONFIGS_GROUP;
-
-	for_each_child_of_node(np_config, np) {
-		ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
-							&reserv, nmaps, type);
-		if (ret)
-			break;
-
-		ret = pmic_mpp_dt_subnode_to_map(pctldev, np, map, &reserv,
-						 nmaps, type);
-		if (ret)
-			break;
-	}
-
-	if (ret < 0)
-		pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
-
-	return ret;
-}
-
 static const struct pinctrl_ops pmic_mpp_pinctrl_ops = {
 	.get_groups_count	= pmic_mpp_get_groups_count,
 	.get_group_name		= pmic_mpp_get_group_name,
 	.get_group_pins		= pmic_mpp_get_group_pins,
-	.dt_node_to_map		= pmic_mpp_dt_node_to_map,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
 	.dt_free_map		= pinctrl_utils_dt_free_map,
 };
 
@@ -340,6 +261,53 @@
 	return 0;
 }
 
+static int pmic_mpp_write_mode_ctl(struct pmic_mpp_state *state,
+				   struct pmic_mpp_pad *pad)
+{
+	unsigned int mode;
+	unsigned int sel;
+	unsigned int val;
+	unsigned int en;
+
+	switch (pad->function) {
+	case PMIC_MPP_ANALOG:
+		if (pad->input_enabled && pad->output_enabled)
+			mode = PMIC_MPP_MODE_ANALOG_BIDIR;
+		else if (pad->input_enabled)
+			mode = PMIC_MPP_MODE_ANALOG_INPUT;
+		else
+			mode = PMIC_MPP_MODE_ANALOG_OUTPUT;
+		break;
+	case PMIC_MPP_DIGITAL:
+		if (pad->input_enabled && pad->output_enabled)
+			mode = PMIC_MPP_MODE_DIGITAL_BIDIR;
+		else if (pad->input_enabled)
+			mode = PMIC_MPP_MODE_DIGITAL_INPUT;
+		else
+			mode = PMIC_MPP_MODE_DIGITAL_OUTPUT;
+		break;
+	case PMIC_MPP_SINK:
+	default:
+		mode = PMIC_MPP_MODE_CURRENT_SINK;
+		break;
+	}
+
+	if (pad->dtest)
+		sel = PMIC_MPP_SELECTOR_DTEST_FIRST + pad->dtest - 1;
+	else if (pad->paired)
+		sel = PMIC_MPP_SELECTOR_PAIRED;
+	else
+		sel = PMIC_MPP_SELECTOR_NORMAL;
+
+	en = !!pad->out_value;
+
+	val = mode << PMIC_MPP_REG_MODE_DIR_SHIFT |
+	      sel << PMIC_MPP_REG_MODE_FUNCTION_SHIFT |
+	      en;
+
+	return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+}
+
 static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
 				unsigned pin)
 {
@@ -352,31 +320,7 @@
 
 	pad->function = function;
 
-	if (!pad->analog_mode) {
-		val = 0;	/* just digital input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 2; /* digital input and output */
-			else
-				val = 1; /* just digital output */
-		}
-	} else {
-		val = 4;	/* just analog input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 3; /* analog input and output */
-			else
-				val = 5; /* just analog output */
-		}
-	}
-
-	val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
-
-	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
-	if (ret < 0)
-		return ret;
+	ret = pmic_mpp_write_mode_ctl(state, pad);
 
 	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
 
@@ -433,11 +377,20 @@
 	case PIN_CONFIG_OUTPUT:
 		arg = pad->out_value;
 		break;
+	case PMIC_MPP_CONF_DTEST_SELECTOR:
+		arg = pad->dtest;
+		break;
 	case PMIC_MPP_CONF_AMUX_ROUTE:
 		arg = pad->amux_input;
 		break;
-	case PMIC_MPP_CONF_ANALOG_MODE:
-		arg = pad->analog_mode;
+	case PMIC_MPP_CONF_PAIRED:
+		arg = pad->paired;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		arg = pad->drive_strength;
+		break;
+	case PMIC_MPP_CONF_ANALOG_LEVEL:
+		arg = pad->aout_level;
 		break;
 	default:
 		return -EINVAL;
@@ -459,6 +412,9 @@
 
 	pad = pctldev->desc->pins[pin].drv_data;
 
+	/* Make it possible to enable the pin, by not setting high impedance */
+	pad->is_enabled = true;
+
 	for (i = 0; i < nconfs; i++) {
 		param = pinconf_to_config_param(configs[i]);
 		arg = pinconf_to_config_argument(configs[i]);
@@ -497,13 +453,22 @@
 			pad->output_enabled = true;
 			pad->out_value = arg;
 			break;
+		case PMIC_MPP_CONF_DTEST_SELECTOR:
+			pad->dtest = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			arg = pad->drive_strength;
+			break;
 		case PMIC_MPP_CONF_AMUX_ROUTE:
 			if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
 				return -EINVAL;
 			pad->amux_input = arg;
 			break;
-		case PMIC_MPP_CONF_ANALOG_MODE:
-			pad->analog_mode = true;
+		case PMIC_MPP_CONF_ANALOG_LEVEL:
+			pad->aout_level = arg;
+			break;
+		case PMIC_MPP_CONF_PAIRED:
+			pad->paired = !!arg;
 			break;
 		default:
 			return -EINVAL;
@@ -528,29 +493,17 @@
 	if (ret < 0)
 		return ret;
 
-	if (!pad->analog_mode) {
-		val = 0;	/* just digital input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 2; /* digital input and output */
-			else
-				val = 1; /* just digital output */
-		}
-	} else {
-		val = 4;	/* just analog input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 3; /* analog input and output */
-			else
-				val = 5; /* just analog output */
-		}
-	}
+	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_AOUT_CTL, pad->aout_level);
+	if (ret < 0)
+		return ret;
 
-	val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
+	ret = pmic_mpp_write_mode_ctl(state, pad);
+	if (ret < 0)
+		return ret;
 
-	return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
+
+	return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
 }
 
 static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
@@ -558,20 +511,17 @@
 {
 	struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
 	struct pmic_mpp_pad *pad;
-	int ret, val;
+	int ret;
 
 	static const char *const biases[] = {
 		"0.6kOhm", "10kOhm", "30kOhm", "Disabled"
 	};
 
-
 	pad = pctldev->desc->pins[pin].drv_data;
 
 	seq_printf(s, " mpp%-2d:", pin + PMIC_MPP_PHYSICAL_OFFSET);
 
-	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
-
-	if (val < 0 || !(val >> PMIC_MPP_REG_MASTER_EN_SHIFT)) {
+	if (!pad->is_enabled) {
 		seq_puts(s, " ---");
 	} else {
 
@@ -585,15 +535,20 @@
 		}
 
 		seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
-		seq_printf(s, " %-4s", pad->analog_mode ? "ana" : "dig");
 		seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
 		seq_printf(s, " vin-%d", pad->power_source);
+		seq_printf(s, " %d", pad->aout_level);
 		seq_printf(s, " %-8s", biases[pad->pullup]);
 		seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+		if (pad->dtest)
+			seq_printf(s, " dtest%d", pad->dtest);
+		if (pad->paired)
+			seq_puts(s, " paired");
 	}
 }
 
 static const struct pinconf_ops pmic_mpp_pinconf_ops = {
+	.is_generic = true,
 	.pin_config_group_get		= pmic_mpp_config_get,
 	.pin_config_group_set		= pmic_mpp_config_set,
 	.pin_config_group_dbg_show	= pmic_mpp_config_dbg_show,
@@ -709,6 +664,7 @@
 			     struct pmic_mpp_pad *pad)
 {
 	int type, subtype, val, dir;
+	unsigned int sel;
 
 	type = pmic_mpp_read(state, pad, PMIC_MPP_REG_TYPE);
 	if (type < 0)
@@ -751,43 +707,53 @@
 	dir &= PMIC_MPP_REG_MODE_DIR_MASK;
 
 	switch (dir) {
-	case 0:
+	case PMIC_MPP_MODE_DIGITAL_INPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = false;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 1:
+	case PMIC_MPP_MODE_DIGITAL_OUTPUT:
 		pad->input_enabled = false;
 		pad->output_enabled = true;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 2:
+	case PMIC_MPP_MODE_DIGITAL_BIDIR:
 		pad->input_enabled = true;
 		pad->output_enabled = true;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 3:
+	case PMIC_MPP_MODE_ANALOG_BIDIR:
 		pad->input_enabled = true;
 		pad->output_enabled = true;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
 		break;
-	case 4:
+	case PMIC_MPP_MODE_ANALOG_INPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = false;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
 		break;
-	case 5:
+	case PMIC_MPP_MODE_ANALOG_OUTPUT:
 		pad->input_enabled = false;
 		pad->output_enabled = true;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
+		break;
+	case PMIC_MPP_MODE_CURRENT_SINK:
+		pad->input_enabled = false;
+		pad->output_enabled = true;
+		pad->function = PMIC_MPP_SINK;
 		break;
 	default:
 		dev_err(state->dev, "unknown MPP direction\n");
 		return -ENODEV;
 	}
 
-	pad->function = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	pad->function &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+	sel = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
+	sel &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+
+	if (sel >= PMIC_MPP_SELECTOR_DTEST_FIRST)
+		pad->dtest = sel + 1;
+	else if (sel == PMIC_MPP_SELECTOR_PAIRED)
+		pad->paired = true;
 
 	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_VIN_CTL);
 	if (val < 0)
@@ -810,8 +776,24 @@
 	pad->amux_input = val >> PMIC_MPP_REG_AIN_ROUTE_SHIFT;
 	pad->amux_input &= PMIC_MPP_REG_AIN_ROUTE_MASK;
 
-	/* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
-	pad->is_enabled = true;
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_SINK_CTL);
+	if (val < 0)
+		return val;
+
+	pad->drive_strength = val;
+
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AOUT_CTL);
+	if (val < 0)
+		return val;
+
+	pad->aout_level = val;
+
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
+	if (val < 0)
+		return val;
+
+	pad->is_enabled = !!val;
+
 	return 0;
 }
 
@@ -866,6 +848,12 @@
 	pctrldesc->pins = pindesc;
 	pctrldesc->npins = npins;
 
+	pctrldesc->num_custom_params = ARRAY_SIZE(pmic_mpp_bindings);
+	pctrldesc->custom_params = pmic_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrldesc->custom_conf_items = pmic_conf_items;
+#endif
+
 	for (i = 0; i < npins; i++, pindesc++) {
 		pad = &pads[i];
 		pindesc->drv_data = pad;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
new file mode 100644
index 0000000..c978b31
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -0,0 +1,791 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* mode */
+#define PM8XXX_GPIO_MODE_ENABLED	BIT(0)
+#define PM8XXX_GPIO_MODE_INPUT		0
+#define PM8XXX_GPIO_MODE_OUTPUT		2
+
+/* output buffer */
+#define PM8XXX_GPIO_PUSH_PULL		0
+#define PM8XXX_GPIO_OPEN_DRAIN		1
+
+/* bias */
+#define PM8XXX_GPIO_BIAS_PU_30		0
+#define PM8XXX_GPIO_BIAS_PU_1P5		1
+#define PM8XXX_GPIO_BIAS_PU_31P5	2
+#define PM8XXX_GPIO_BIAS_PU_1P5_30	3
+#define PM8XXX_GPIO_BIAS_PD		4
+#define PM8XXX_GPIO_BIAS_NP		5
+
+/* GPIO registers */
+#define SSBI_REG_ADDR_GPIO_BASE		0x150
+#define SSBI_REG_ADDR_GPIO(n)		(SSBI_REG_ADDR_GPIO_BASE + n)
+
+#define PM8XXX_BANK_WRITE		BIT(7)
+
+#define PM8XXX_MAX_GPIOS               44
+
+/* custom pinconf parameters */
+#define PM8XXX_QCOM_DRIVE_STRENGH      (PIN_CONFIG_END + 1)
+#define PM8XXX_QCOM_PULL_UP_STRENGTH   (PIN_CONFIG_END + 2)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg:               address of the control register
+ * @irq:               IRQ from the PMIC interrupt controller
+ * @power_source:      logical selected voltage source, mapping in static data
+ *                     is used translate to register values
+ * @mode:              operating mode for the pin (input/output)
+ * @open_drain:        output buffer configured as open-drain (vs push-pull)
+ * @output_value:      configured output value
+ * @bias:              register view of configured bias
+ * @pull_up_strength:  placeholder for selected pull up strength
+ *                     only used to configure bias when pull up is selected
+ * @output_strength:   selector of output-strength
+ * @disable:           pin disabled / configured as tristate
+ * @function:          pinmux selector
+ * @inverted:          pin logic is inverted
+ */
+struct pm8xxx_pin_data {
+	unsigned reg;
+	int irq;
+	u8 power_source;
+	u8 mode;
+	bool open_drain;
+	bool output_value;
+	u8 bias;
+	u8 pull_up_strength;
+	u8 output_strength;
+	bool disable;
+	u8 function;
+	bool inverted;
+};
+
+struct pm8xxx_gpio {
+	struct device *dev;
+	struct regmap *regmap;
+	struct pinctrl_dev *pctrl;
+	struct gpio_chip chip;
+
+	struct pinctrl_desc desc;
+	unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
+	{"qcom,drive-strength",		PM8XXX_QCOM_DRIVE_STRENGH,	0},
+	{"qcom,pull-up-strength",	PM8XXX_QCOM_PULL_UP_STRENGTH,	0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[ARRAY_SIZE(pm8xxx_gpio_bindings)] = {
+	PCONFDUMP(PM8XXX_QCOM_DRIVE_STRENGH, "drive-strength", NULL, true),
+	PCONFDUMP(PM8XXX_QCOM_PULL_UP_STRENGTH,  "pull up strength", NULL, true),
+};
+#endif
+
+static const char * const pm8xxx_groups[PM8XXX_MAX_GPIOS] = {
+	"gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8",
+	"gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+	"gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+	"gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+	"gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+	"gpio44",
+};
+
+static const char * const pm8xxx_gpio_functions[] = {
+	PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
+	PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
+	PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
+	PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
+};
+
+static int pm8xxx_read_bank(struct pm8xxx_gpio *pctrl,
+			    struct pm8xxx_pin_data *pin, int bank)
+{
+	unsigned int val = bank << 4;
+	int ret;
+
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to select bank %d\n", bank);
+		return ret;
+	}
+
+	ret = regmap_read(pctrl->regmap, pin->reg, &val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to read register %d\n", bank);
+		return ret;
+	}
+
+	return val;
+}
+
+static int pm8xxx_write_bank(struct pm8xxx_gpio *pctrl,
+			     struct pm8xxx_pin_data *pin,
+			     int bank,
+			     u8 val)
+{
+	int ret;
+
+	val |= PM8XXX_BANK_WRITE;
+	val |= bank << 4;
+
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret)
+		dev_err(pctrl->dev, "failed to write register\n");
+
+	return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+					 unsigned group)
+{
+	return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+				 unsigned group,
+				 const unsigned **pins,
+				 unsigned *num_pins)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pctrl->desc.pins[group].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+	.get_groups_count	= pm8xxx_get_groups_count,
+	.get_group_name		= pm8xxx_get_group_name,
+	.get_group_pins         = pm8xxx_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(pm8xxx_gpio_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+					    unsigned function)
+{
+	return pm8xxx_gpio_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+				      unsigned function,
+				      const char * const **groups,
+				      unsigned * const num_groups)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pm8xxx_groups;
+	*num_groups = pctrl->npins;
+	return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+				 unsigned function,
+				 unsigned group)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+	u8 val;
+
+	pin->function = function;
+	val = pin->function << 1;
+
+	pm8xxx_write_bank(pctrl, pin, 4, val);
+
+	return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+	.get_functions_count	= pm8xxx_get_functions_count,
+	.get_function_name	= pm8xxx_get_function_name,
+	.get_function_groups	= pm8xxx_get_function_groups,
+	.set_mux		= pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *config)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param = pinconf_to_config_param(*config);
+	unsigned arg;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
+		break;
+	case PM8XXX_QCOM_PULL_UP_STRENGTH:
+		arg = pin->pull_up_strength;
+		break;
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		arg = pin->disable;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
+		break;
+	case PIN_CONFIG_OUTPUT:
+		if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
+			arg = pin->output_value;
+		else
+			arg = 0;
+		break;
+	case PIN_CONFIG_POWER_SOURCE:
+		arg = pin->power_source;
+		break;
+	case PM8XXX_QCOM_DRIVE_STRENGH:
+		arg = pin->output_strength;
+		break;
+	case PIN_CONFIG_DRIVE_PUSH_PULL:
+		arg = !pin->open_drain;
+		break;
+	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+		arg = pin->open_drain;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *configs,
+				 unsigned num_configs)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param;
+	unsigned arg;
+	unsigned i;
+	u8 banks = 0;
+	u8 val;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			pin->bias = PM8XXX_GPIO_BIAS_NP;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			pin->bias = PM8XXX_GPIO_BIAS_PD;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PM8XXX_QCOM_PULL_UP_STRENGTH:
+			if (arg > PM8XXX_GPIO_BIAS_PU_1P5_30) {
+				dev_err(pctrl->dev, "invalid pull-up strength\n");
+				return -EINVAL;
+			}
+			pin->pull_up_strength = arg;
+			/* FALLTHROUGH */
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pin->bias = pin->pull_up_strength;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+			pin->disable = 1;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pin->mode = PM8XXX_GPIO_MODE_INPUT;
+			banks |= BIT(0) | BIT(1);
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+			pin->output_value = !!arg;
+			banks |= BIT(0) | BIT(1);
+			break;
+		case PIN_CONFIG_POWER_SOURCE:
+			pin->power_source = arg;
+			banks |= BIT(0);
+			break;
+		case PM8XXX_QCOM_DRIVE_STRENGH:
+			if (arg > PMIC_GPIO_STRENGTH_LOW) {
+				dev_err(pctrl->dev, "invalid drive strength\n");
+				return -EINVAL;
+			}
+			pin->output_strength = arg;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_DRIVE_PUSH_PULL:
+			pin->open_drain = 0;
+			banks |= BIT(1);
+			break;
+		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+			pin->open_drain = 1;
+			banks |= BIT(1);
+			break;
+		default:
+			dev_err(pctrl->dev,
+				"unsupported config parameter: %x\n",
+				param);
+			return -EINVAL;
+		}
+	}
+
+	if (banks & BIT(0)) {
+		val = pin->power_source << 1;
+		val |= PM8XXX_GPIO_MODE_ENABLED;
+		pm8xxx_write_bank(pctrl, pin, 0, val);
+	}
+
+	if (banks & BIT(1)) {
+		val = pin->mode << 2;
+		val |= pin->open_drain << 1;
+		val |= pin->output_value;
+		pm8xxx_write_bank(pctrl, pin, 1, val);
+	}
+
+	if (banks & BIT(2)) {
+		val = pin->bias << 1;
+		pm8xxx_write_bank(pctrl, pin, 2, val);
+	}
+
+	if (banks & BIT(3)) {
+		val = pin->output_strength << 2;
+		val |= pin->disable;
+		pm8xxx_write_bank(pctrl, pin, 3, val);
+	}
+
+	if (banks & BIT(4)) {
+		val = pin->function << 1;
+		pm8xxx_write_bank(pctrl, pin, 4, val);
+	}
+
+	if (banks & BIT(5)) {
+		val = 0;
+		if (!pin->inverted)
+			val |= BIT(3);
+		pm8xxx_write_bank(pctrl, pin, 5, val);
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_group_get = pm8xxx_pin_config_get,
+	.pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+	.name = "pm8xxx_gpio",
+	.pctlops = &pm8xxx_pinctrl_ops,
+	.pmxops = &pm8xxx_pinmux_ops,
+	.confops = &pm8xxx_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->mode = PM8XXX_GPIO_MODE_INPUT;
+	val = pin->mode << 2;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+
+	return 0;
+}
+
+static int pm8xxx_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset,
+					int value)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+	pin->output_value = !!value;
+
+	val = pin->mode << 2;
+	val |= pin->open_drain << 1;
+	val |= pin->output_value;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+
+	return 0;
+}
+
+static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	bool state;
+	int ret;
+
+	if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
+		ret = pin->output_value;
+	} else {
+		ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+		if (!ret)
+			ret = !!state;
+	}
+
+	return ret;
+}
+
+static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->output_value = !!value;
+
+	val = pin->mode << 2;
+	val |= pin->open_drain << 1;
+	val |= pin->output_value;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+}
+
+static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
+				const struct of_phandle_args *gpio_desc,
+				u32 *flags)
+{
+	if (chip->of_gpio_n_cells < 2)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpio_desc->args[1];
+
+	return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_gpio_dbg_show_one(struct seq_file *s,
+				  struct pinctrl_dev *pctldev,
+				  struct gpio_chip *chip,
+				  unsigned offset,
+				  unsigned gpio)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	static const char * const modes[] = {
+		"in", "both", "out", "off"
+	};
+	static const char * const biases[] = {
+		"pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
+		"pull-up 1.5uA + 30uA boost", "pull-down 10uA", "no pull"
+	};
+	static const char * const buffer_types[] = {
+		"push-pull", "open-drain"
+	};
+	static const char * const strengths[] = {
+		"no", "high", "medium", "low"
+	};
+
+	seq_printf(s, " gpio%-2d:", offset + 1);
+	if (pin->disable) {
+		seq_puts(s, " ---");
+	} else {
+		seq_printf(s, " %-4s", modes[pin->mode]);
+		seq_printf(s, " %-7s", pm8xxx_gpio_functions[pin->function]);
+		seq_printf(s, " VIN%d", pin->power_source);
+		seq_printf(s, " %-27s", biases[pin->bias]);
+		seq_printf(s, " %-10s", buffer_types[pin->open_drain]);
+		seq_printf(s, " %-4s", pin->output_value ? "high" : "low");
+		seq_printf(s, " %-7s", strengths[pin->output_strength]);
+		if (pin->inverted)
+			seq_puts(s, " inverted");
+	}
+}
+
+static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	unsigned gpio = chip->base;
+	unsigned i;
+
+	for (i = 0; i < chip->ngpio; i++, gpio++) {
+		pm8xxx_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+		seq_puts(s, "\n");
+	}
+}
+
+#else
+#define msm_gpio_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_gpio_template = {
+	.direction_input = pm8xxx_gpio_direction_input,
+	.direction_output = pm8xxx_gpio_direction_output,
+	.get = pm8xxx_gpio_get,
+	.set = pm8xxx_gpio_set,
+	.of_xlate = pm8xxx_gpio_of_xlate,
+	.to_irq = pm8xxx_gpio_to_irq,
+	.dbg_show = pm8xxx_gpio_dbg_show,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl,
+			       struct pm8xxx_pin_data *pin)
+{
+	int val;
+
+	val = pm8xxx_read_bank(pctrl, pin, 0);
+	if (val < 0)
+		return val;
+
+	pin->power_source = (val >> 1) & 0x7;
+
+	val = pm8xxx_read_bank(pctrl, pin, 1);
+	if (val < 0)
+		return val;
+
+	pin->mode = (val >> 2) & 0x3;
+	pin->open_drain = !!(val & BIT(1));
+	pin->output_value = val & BIT(0);
+
+	val = pm8xxx_read_bank(pctrl, pin, 2);
+	if (val < 0)
+		return val;
+
+	pin->bias = (val >> 1) & 0x7;
+	if (pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30)
+		pin->pull_up_strength = pin->bias;
+	else
+		pin->pull_up_strength = PM8XXX_GPIO_BIAS_PU_30;
+
+	val = pm8xxx_read_bank(pctrl, pin, 3);
+	if (val < 0)
+		return val;
+
+	pin->output_strength = (val >> 2) & 0x3;
+	pin->disable = val & BIT(0);
+
+	val = pm8xxx_read_bank(pctrl, pin, 4);
+	if (val < 0)
+		return val;
+
+	pin->function = (val >> 1) & 0x7;
+
+	val = pm8xxx_read_bank(pctrl, pin, 5);
+	if (val < 0)
+		return val;
+
+	pin->inverted = !(val & BIT(3));
+
+	return 0;
+}
+
+static const struct of_device_id pm8xxx_gpio_of_match[] = {
+	{ .compatible = "qcom,pm8018-gpio", .data = (void *)6 },
+	{ .compatible = "qcom,pm8038-gpio", .data = (void *)12 },
+	{ .compatible = "qcom,pm8058-gpio", .data = (void *)40 },
+	{ .compatible = "qcom,pm8917-gpio", .data = (void *)38 },
+	{ .compatible = "qcom,pm8921-gpio", .data = (void *)44 },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match);
+
+static int pm8xxx_gpio_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_pin_data *pin_data;
+	struct pinctrl_pin_desc *pins;
+	struct pm8xxx_gpio *pctrl;
+	int ret;
+	int i;
+
+	pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+	if (!pctrl)
+		return -ENOMEM;
+
+	pctrl->dev = &pdev->dev;
+	pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+	pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pctrl->regmap) {
+		dev_err(&pdev->dev, "parent regmap unavailable\n");
+		return -ENXIO;
+	}
+
+	pctrl->desc = pm8xxx_pinctrl_desc;
+	pctrl->desc.npins = pctrl->npins;
+
+	pins = devm_kcalloc(&pdev->dev,
+			    pctrl->desc.npins,
+			    sizeof(struct pinctrl_pin_desc),
+			    GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	pin_data = devm_kcalloc(&pdev->dev,
+				pctrl->desc.npins,
+				sizeof(struct pm8xxx_pin_data),
+				GFP_KERNEL);
+	if (!pin_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pctrl->desc.npins; i++) {
+		pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
+		pin_data[i].irq = platform_get_irq(pdev, i);
+		if (pin_data[i].irq < 0) {
+			dev_err(&pdev->dev,
+				"missing interrupts for pin %d\n", i);
+			return pin_data[i].irq;
+		}
+
+		ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+		if (ret)
+			return ret;
+
+		pins[i].number = i;
+		pins[i].name = pm8xxx_groups[i];
+		pins[i].drv_data = &pin_data[i];
+	}
+	pctrl->desc.pins = pins;
+
+	pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_gpio_bindings);
+	pctrl->desc.custom_params = pm8xxx_gpio_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	if (!pctrl->pctrl) {
+		dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
+		return -ENODEV;
+	}
+
+	pctrl->chip = pm8xxx_gpio_template;
+	pctrl->chip.base = -1;
+	pctrl->chip.dev = &pdev->dev;
+	pctrl->chip.of_node = pdev->dev.of_node;
+	pctrl->chip.of_gpio_n_cells = 2;
+	pctrl->chip.label = dev_name(pctrl->dev);
+	pctrl->chip.ngpio = pctrl->npins;
+	ret = gpiochip_add(&pctrl->chip);
+	if (ret) {
+		dev_err(&pdev->dev, "failed register gpiochip\n");
+		goto unregister_pinctrl;
+	}
+
+	ret = gpiochip_add_pin_range(&pctrl->chip,
+				     dev_name(pctrl->dev),
+				     0, 0, pctrl->chip.ngpio);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to add pin range\n");
+		goto unregister_gpiochip;
+	}
+
+	platform_set_drvdata(pdev, pctrl);
+
+	dev_dbg(&pdev->dev, "Qualcomm pm8xxx gpio driver probed\n");
+
+	return 0;
+
+unregister_gpiochip:
+	gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+	pinctrl_unregister(pctrl->pctrl);
+
+	return ret;
+}
+
+static int pm8xxx_gpio_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&pctrl->chip);
+
+	pinctrl_unregister(pctrl->pctrl);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_gpio_driver = {
+	.driver = {
+		.name = "qcom-ssbi-gpio",
+		.of_match_table = pm8xxx_gpio_of_match,
+	},
+	.probe = pm8xxx_gpio_probe,
+	.remove = pm8xxx_gpio_remove,
+};
+
+module_platform_driver(pm8xxx_gpio_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
new file mode 100644
index 0000000..2d1b69f
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* MPP registers */
+#define SSBI_REG_ADDR_MPP_BASE		0x50
+#define SSBI_REG_ADDR_MPP(n)		(SSBI_REG_ADDR_MPP_BASE + n)
+
+/* MPP Type: type */
+#define PM8XXX_MPP_TYPE_D_INPUT         0
+#define PM8XXX_MPP_TYPE_D_OUTPUT        1
+#define PM8XXX_MPP_TYPE_D_BI_DIR        2
+#define PM8XXX_MPP_TYPE_A_INPUT         3
+#define PM8XXX_MPP_TYPE_A_OUTPUT        4
+#define PM8XXX_MPP_TYPE_SINK            5
+#define PM8XXX_MPP_TYPE_DTEST_SINK      6
+#define PM8XXX_MPP_TYPE_DTEST_OUTPUT    7
+
+/* Digital Input: control */
+#define PM8XXX_MPP_DIN_TO_INT           0
+#define PM8XXX_MPP_DIN_TO_DBUS1         1
+#define PM8XXX_MPP_DIN_TO_DBUS2         2
+#define PM8XXX_MPP_DIN_TO_DBUS3         3
+
+/* Digital Output: control */
+#define PM8XXX_MPP_DOUT_CTRL_LOW        0
+#define PM8XXX_MPP_DOUT_CTRL_HIGH       1
+#define PM8XXX_MPP_DOUT_CTRL_MPP        2
+#define PM8XXX_MPP_DOUT_CTRL_INV_MPP    3
+
+/* Bidirectional: control */
+#define PM8XXX_MPP_BI_PULLUP_1KOHM      0
+#define PM8XXX_MPP_BI_PULLUP_OPEN       1
+#define PM8XXX_MPP_BI_PULLUP_10KOHM     2
+#define PM8XXX_MPP_BI_PULLUP_30KOHM     3
+
+/* Analog Output: control */
+#define PM8XXX_MPP_AOUT_CTRL_DISABLE            0
+#define PM8XXX_MPP_AOUT_CTRL_ENABLE             1
+#define PM8XXX_MPP_AOUT_CTRL_MPP_HIGH_EN        2
+#define PM8XXX_MPP_AOUT_CTRL_MPP_LOW_EN         3
+
+/* Current Sink: control */
+#define PM8XXX_MPP_CS_CTRL_DISABLE      0
+#define PM8XXX_MPP_CS_CTRL_ENABLE       1
+#define PM8XXX_MPP_CS_CTRL_MPP_HIGH_EN  2
+#define PM8XXX_MPP_CS_CTRL_MPP_LOW_EN   3
+
+/* DTEST Current Sink: control */
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN1    0
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN2    1
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN3    2
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN4    3
+
+/* DTEST Digital Output: control */
+#define PM8XXX_MPP_DTEST_DBUS1          0
+#define PM8XXX_MPP_DTEST_DBUS2          1
+#define PM8XXX_MPP_DTEST_DBUS3          2
+#define PM8XXX_MPP_DTEST_DBUS4          3
+
+/* custom pinconf parameters */
+#define PM8XXX_CONFIG_AMUX		(PIN_CONFIG_END + 1)
+#define PM8XXX_CONFIG_DTEST_SELECTOR	(PIN_CONFIG_END + 2)
+#define PM8XXX_CONFIG_ALEVEL		(PIN_CONFIG_END + 3)
+#define PM8XXX_CONFIG_PAIRED		(PIN_CONFIG_END + 4)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg:		address of the control register
+ * @irq:		IRQ from the PMIC interrupt controller
+ * @mode:		operating mode for the pin (digital, analog or current sink)
+ * @input:		pin is input
+ * @output:		pin is output
+ * @high_z:		pin is floating
+ * @paired:		mpp operates in paired mode
+ * @output_value:	logical output value of the mpp
+ * @power_source:	selected power source
+ * @dtest:		DTEST route selector
+ * @amux:		input muxing in analog mode
+ * @aout_level:		selector of the output in analog mode
+ * @drive_strength:	drive strength of the current sink
+ * @pullup:		pull up value, when in digital bidirectional mode
+ */
+struct pm8xxx_pin_data {
+	unsigned reg;
+	int irq;
+
+	u8 mode;
+
+	bool input;
+	bool output;
+	bool high_z;
+	bool paired;
+	bool output_value;
+
+	u8 power_source;
+	u8 dtest;
+	u8 amux;
+	u8 aout_level;
+	u8 drive_strength;
+	unsigned pullup;
+};
+
+struct pm8xxx_mpp {
+	struct device *dev;
+	struct regmap *regmap;
+	struct pinctrl_dev *pctrl;
+	struct gpio_chip chip;
+
+	struct pinctrl_desc desc;
+	unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_mpp_bindings[] = {
+	{"qcom,amux-route",	PM8XXX_CONFIG_AMUX,		0},
+	{"qcom,analog-level",	PM8XXX_CONFIG_ALEVEL,		0},
+	{"qcom,dtest",		PM8XXX_CONFIG_DTEST_SELECTOR,	0},
+	{"qcom,paired",		PM8XXX_CONFIG_PAIRED,		0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[] = {
+	PCONFDUMP(PM8XXX_CONFIG_AMUX, "analog mux", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_ALEVEL, "analog level", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_DTEST_SELECTOR, "dtest", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_PAIRED, "paired", NULL, false),
+};
+#endif
+
+#define PM8XXX_MAX_MPPS	12
+static const char * const pm8xxx_groups[PM8XXX_MAX_MPPS] = {
+	"mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
+	"mpp9", "mpp10", "mpp11", "mpp12",
+};
+
+#define PM8XXX_MPP_DIGITAL	0
+#define PM8XXX_MPP_ANALOG	1
+#define PM8XXX_MPP_SINK		2
+
+static const char * const pm8xxx_mpp_functions[] = {
+	"digital", "analog", "sink",
+};
+
+static int pm8xxx_mpp_update(struct pm8xxx_mpp *pctrl,
+			     struct pm8xxx_pin_data *pin)
+{
+	unsigned level;
+	unsigned ctrl;
+	unsigned type;
+	int ret;
+	u8 val;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		if (pin->dtest) {
+			type = PM8XXX_MPP_TYPE_DTEST_OUTPUT;
+			ctrl = pin->dtest - 1;
+		} else if (pin->input && pin->output) {
+			type = PM8XXX_MPP_TYPE_D_BI_DIR;
+			if (pin->high_z)
+				ctrl = PM8XXX_MPP_BI_PULLUP_OPEN;
+			else if (pin->pullup == 600)
+				ctrl = PM8XXX_MPP_BI_PULLUP_1KOHM;
+			else if (pin->pullup == 10000)
+				ctrl = PM8XXX_MPP_BI_PULLUP_10KOHM;
+			else
+				ctrl = PM8XXX_MPP_BI_PULLUP_30KOHM;
+		} else if (pin->input) {
+			type = PM8XXX_MPP_TYPE_D_INPUT;
+			if (pin->dtest)
+				ctrl = pin->dtest;
+			else
+				ctrl = PM8XXX_MPP_DIN_TO_INT;
+		} else {
+			type = PM8XXX_MPP_TYPE_D_OUTPUT;
+			ctrl = !!pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		}
+
+		level = pin->power_source;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		if (pin->output) {
+			type = PM8XXX_MPP_TYPE_A_OUTPUT;
+			level = pin->aout_level;
+			ctrl = pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		} else {
+			type = PM8XXX_MPP_TYPE_A_INPUT;
+			level = pin->amux;
+			ctrl = 0;
+		}
+		break;
+	case PM8XXX_MPP_SINK:
+		level = (pin->drive_strength / 5) - 1;
+		if (pin->dtest) {
+			type = PM8XXX_MPP_TYPE_DTEST_SINK;
+			ctrl = pin->dtest - 1;
+		} else {
+			type = PM8XXX_MPP_TYPE_SINK;
+			ctrl = pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val = type << 5 | level << 2 | ctrl;
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret)
+		dev_err(pctrl->dev, "failed to write register\n");
+
+	return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+					 unsigned group)
+{
+	return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+				 unsigned group,
+				 const unsigned **pins,
+				 unsigned *num_pins)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pctrl->desc.pins[group].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+	.get_groups_count	= pm8xxx_get_groups_count,
+	.get_group_name		= pm8xxx_get_group_name,
+	.get_group_pins         = pm8xxx_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(pm8xxx_mpp_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+					    unsigned function)
+{
+	return pm8xxx_mpp_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+				      unsigned function,
+				      const char * const **groups,
+				      unsigned * const num_groups)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pm8xxx_groups;
+	*num_groups = pctrl->npins;
+	return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+				 unsigned function,
+				 unsigned group)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+
+	pin->mode = function;
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+	.get_functions_count	= pm8xxx_get_functions_count,
+	.get_function_name	= pm8xxx_get_function_name,
+	.get_function_groups	= pm8xxx_get_function_groups,
+	.set_mux		= pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *config)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param = pinconf_to_config_param(*config);
+	unsigned arg;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pin->pullup;
+		break;
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		arg = pin->high_z;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = pin->input;
+		break;
+	case PIN_CONFIG_OUTPUT:
+		arg = pin->output_value;
+		break;
+	case PIN_CONFIG_POWER_SOURCE:
+		arg = pin->power_source;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		arg = pin->drive_strength;
+		break;
+	case PM8XXX_CONFIG_DTEST_SELECTOR:
+		arg = pin->dtest;
+		break;
+	case PM8XXX_CONFIG_AMUX:
+		arg = pin->amux;
+		break;
+	case PM8XXX_CONFIG_ALEVEL:
+		arg = pin->aout_level;
+		break;
+	case PM8XXX_CONFIG_PAIRED:
+		arg = pin->paired;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *configs,
+				 unsigned num_configs)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param;
+	unsigned arg;
+	unsigned i;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pin->pullup = arg;
+			break;
+		case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+			pin->high_z = true;
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pin->input = true;
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pin->output = true;
+			pin->output_value = !!arg;
+			break;
+		case PIN_CONFIG_POWER_SOURCE:
+			pin->power_source = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			pin->drive_strength = arg;
+			break;
+		case PM8XXX_CONFIG_DTEST_SELECTOR:
+			pin->dtest = arg;
+			break;
+		case PM8XXX_CONFIG_AMUX:
+			pin->amux = arg;
+			break;
+		case PM8XXX_CONFIG_ALEVEL:
+			pin->aout_level = arg;
+			break;
+		case PM8XXX_CONFIG_PAIRED:
+			pin->paired = !!arg;
+			break;
+		default:
+			dev_err(pctrl->dev,
+				"unsupported config parameter: %x\n",
+				param);
+			return -EINVAL;
+		}
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_group_get = pm8xxx_pin_config_get,
+	.pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+	.name = "pm8xxx_mpp",
+	.pctlops = &pm8xxx_pinctrl_ops,
+	.pmxops = &pm8xxx_pinmux_ops,
+	.confops = &pm8xxx_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_mpp_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		pin->input = true;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		pin->input = true;
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_SINK:
+		return -EINVAL;
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static int pm8xxx_mpp_direction_output(struct gpio_chip *chip,
+					unsigned offset,
+					int value)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		pin->input = false;
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_SINK:
+		pin->input = false;
+		pin->output = true;
+		break;
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	bool state;
+	int ret;
+
+	if (!pin->input)
+		return pin->output_value;
+
+	ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+	if (!ret)
+		ret = !!state;
+
+	return ret;
+}
+
+static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	pin->output_value = !!value;
+
+	pm8xxx_mpp_update(pctrl, pin);
+}
+
+static int pm8xxx_mpp_of_xlate(struct gpio_chip *chip,
+				const struct of_phandle_args *gpio_desc,
+				u32 *flags)
+{
+	if (chip->of_gpio_n_cells < 2)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpio_desc->args[1];
+
+	return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_mpp_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
+				  struct pinctrl_dev *pctldev,
+				  struct gpio_chip *chip,
+				  unsigned offset,
+				  unsigned gpio)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	static const char * const aout_lvls[] = {
+		"1v25", "1v25_2", "0v625", "0v3125", "mpp", "abus1", "abus2",
+		"abus3"
+	};
+
+	static const char * const amuxs[] = {
+		"amux5", "amux6", "amux7", "amux8", "amux9", "abus1", "abus2",
+		"abus3",
+	};
+
+	seq_printf(s, " mpp%-2d:", offset + 1);
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		seq_puts(s, " digital ");
+		if (pin->dtest) {
+			seq_printf(s, "dtest%d\n", pin->dtest);
+		} else if (pin->input && pin->output) {
+			if (pin->high_z)
+				seq_puts(s, "bi-dir high-z");
+			else
+				seq_printf(s, "bi-dir %dOhm", pin->pullup);
+		} else if (pin->input) {
+			if (pin->dtest)
+				seq_printf(s, "in dtest%d", pin->dtest);
+			else
+				seq_puts(s, "in gpio");
+		} else if (pin->output) {
+			seq_puts(s, "out ");
+
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		}
+		break;
+	case PM8XXX_MPP_ANALOG:
+		seq_puts(s, " analog ");
+		if (pin->output) {
+			seq_printf(s, "out %s ", aout_lvls[pin->aout_level]);
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		} else {
+			seq_printf(s, "input mux %s", amuxs[pin->amux]);
+		}
+		break;
+	case PM8XXX_MPP_SINK:
+		seq_printf(s, " sink %dmA ", pin->drive_strength);
+		if (pin->dtest) {
+			seq_printf(s, "dtest%d", pin->dtest);
+		} else {
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		}
+		break;
+	}
+
+}
+
+static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	unsigned gpio = chip->base;
+	unsigned i;
+
+	for (i = 0; i < chip->ngpio; i++, gpio++) {
+		pm8xxx_mpp_dbg_show_one(s, NULL, chip, i, gpio);
+		seq_puts(s, "\n");
+	}
+}
+
+#else
+#define msm_mpp_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_mpp_template = {
+	.direction_input = pm8xxx_mpp_direction_input,
+	.direction_output = pm8xxx_mpp_direction_output,
+	.get = pm8xxx_mpp_get,
+	.set = pm8xxx_mpp_set,
+	.of_xlate = pm8xxx_mpp_of_xlate,
+	.to_irq = pm8xxx_mpp_to_irq,
+	.dbg_show = pm8xxx_mpp_dbg_show,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl,
+			       struct pm8xxx_pin_data *pin)
+{
+	unsigned int val;
+	unsigned level;
+	unsigned ctrl;
+	unsigned type;
+	int ret;
+
+	ret = regmap_read(pctrl->regmap, pin->reg, &val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to read register\n");
+		return ret;
+	}
+
+	type = (val >> 5) & 7;
+	level = (val >> 2) & 7;
+	ctrl = (val) & 3;
+
+	switch (type) {
+	case PM8XXX_MPP_TYPE_D_INPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->input = true;
+		pin->power_source = level;
+		pin->dtest = ctrl;
+		break;
+	case PM8XXX_MPP_TYPE_D_OUTPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->output = true;
+		pin->power_source = level;
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_D_BI_DIR:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->input = true;
+		pin->output = true;
+		pin->power_source = level;
+		switch (ctrl) {
+		case PM8XXX_MPP_BI_PULLUP_1KOHM:
+			pin->pullup = 600;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_OPEN:
+			pin->high_z = true;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_10KOHM:
+			pin->pullup = 10000;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_30KOHM:
+			pin->pullup = 30000;
+			break;
+		}
+		break;
+	case PM8XXX_MPP_TYPE_A_INPUT:
+		pin->mode = PM8XXX_MPP_ANALOG;
+		pin->input = true;
+		pin->amux = level;
+		break;
+	case PM8XXX_MPP_TYPE_A_OUTPUT:
+		pin->mode = PM8XXX_MPP_ANALOG;
+		pin->output = true;
+		pin->aout_level = level;
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_SINK:
+		pin->mode = PM8XXX_MPP_SINK;
+		pin->drive_strength = 5 * (level + 1);
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_DTEST_SINK:
+		pin->mode = PM8XXX_MPP_SINK;
+		pin->dtest = ctrl + 1;
+		pin->drive_strength = 5 * (level + 1);
+		break;
+	case PM8XXX_MPP_TYPE_DTEST_OUTPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->power_source = level;
+		if (ctrl >= 1)
+			pin->dtest = ctrl;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id pm8xxx_mpp_of_match[] = {
+	{ .compatible = "qcom,pm8018-mpp", .data = (void *)6 },
+	{ .compatible = "qcom,pm8038-mpp", .data = (void *)6 },
+	{ .compatible = "qcom,pm8917-mpp", .data = (void *)10 },
+	{ .compatible = "qcom,pm8821-mpp", .data = (void *)4 },
+	{ .compatible = "qcom,pm8921-mpp", .data = (void *)12 },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_mpp_of_match);
+
+static int pm8xxx_mpp_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_pin_data *pin_data;
+	struct pinctrl_pin_desc *pins;
+	struct pm8xxx_mpp *pctrl;
+	int ret;
+	int i;
+
+	pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+	if (!pctrl)
+		return -ENOMEM;
+
+	pctrl->dev = &pdev->dev;
+	pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+	pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pctrl->regmap) {
+		dev_err(&pdev->dev, "parent regmap unavailable\n");
+		return -ENXIO;
+	}
+
+	pctrl->desc = pm8xxx_pinctrl_desc;
+	pctrl->desc.npins = pctrl->npins;
+
+	pins = devm_kcalloc(&pdev->dev,
+			    pctrl->desc.npins,
+			    sizeof(struct pinctrl_pin_desc),
+			    GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	pin_data = devm_kcalloc(&pdev->dev,
+				pctrl->desc.npins,
+				sizeof(struct pm8xxx_pin_data),
+				GFP_KERNEL);
+	if (!pin_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pctrl->desc.npins; i++) {
+		pin_data[i].reg = SSBI_REG_ADDR_MPP(i);
+		pin_data[i].irq = platform_get_irq(pdev, i);
+		if (pin_data[i].irq < 0) {
+			dev_err(&pdev->dev,
+				"missing interrupts for pin %d\n", i);
+			return pin_data[i].irq;
+		}
+
+		ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+		if (ret)
+			return ret;
+
+		pins[i].number = i;
+		pins[i].name = pm8xxx_groups[i];
+		pins[i].drv_data = &pin_data[i];
+	}
+	pctrl->desc.pins = pins;
+
+	pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_mpp_bindings);
+	pctrl->desc.custom_params = pm8xxx_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	if (!pctrl->pctrl) {
+		dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
+		return -ENODEV;
+	}
+
+	pctrl->chip = pm8xxx_mpp_template;
+	pctrl->chip.base = -1;
+	pctrl->chip.dev = &pdev->dev;
+	pctrl->chip.of_node = pdev->dev.of_node;
+	pctrl->chip.of_gpio_n_cells = 2;
+	pctrl->chip.label = dev_name(pctrl->dev);
+	pctrl->chip.ngpio = pctrl->npins;
+	ret = gpiochip_add(&pctrl->chip);
+	if (ret) {
+		dev_err(&pdev->dev, "failed register gpiochip\n");
+		goto unregister_pinctrl;
+	}
+
+	ret = gpiochip_add_pin_range(&pctrl->chip,
+				     dev_name(pctrl->dev),
+				     0, 0, pctrl->chip.ngpio);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to add pin range\n");
+		goto unregister_gpiochip;
+	}
+
+	platform_set_drvdata(pdev, pctrl);
+
+	dev_dbg(&pdev->dev, "Qualcomm pm8xxx mpp driver probed\n");
+
+	return 0;
+
+unregister_gpiochip:
+	gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+	pinctrl_unregister(pctrl->pctrl);
+
+	return ret;
+}
+
+static int pm8xxx_mpp_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_mpp *pctrl = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&pctrl->chip);
+
+	pinctrl_unregister(pctrl->pctrl);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_mpp_driver = {
+	.driver = {
+		.name = "qcom-ssbi-mpp",
+		.of_match_table = pm8xxx_mpp_of_match,
+	},
+	.probe = pm8xxx_mpp_probe,
+	.remove = pm8xxx_mpp_remove,
+};
+
+module_platform_driver(pm8xxx_mpp_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx MPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b18dabb..5f45caa 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -148,9 +148,9 @@
 	}
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irqd->irq, handle_edge_irq);
+		irq_set_handler_locked(irqd, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irqd->irq, handle_level_irq);
+		irq_set_handler_locked(irqd, handle_level_irq);
 
 	con = readl(d->virt_base + reg_con);
 	con &= ~(EXYNOS_EINT_CON_MASK << shift);
@@ -256,7 +256,6 @@
 	irq_set_chip_data(virq, b);
 	irq_set_chip_and_handler(virq, &b->irq_chip->chip,
 					handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -422,9 +421,9 @@
 /* interrupt handler for wakeup interrupts 0..15 */
 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
 {
-	struct exynos_weint_data *eintd = irq_get_handler_data(irq);
+	struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
 	struct samsung_pin_bank *bank = eintd->bank;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int eint_irq;
 
 	chained_irq_enter(chip, desc);
@@ -454,8 +453,8 @@
 /* interrupt handler for wakeup interrupt 16 */
 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct exynos_muxed_weint_data *eintd = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata;
 	unsigned long pend;
 	unsigned long mask;
@@ -542,8 +541,9 @@
 			}
 			weint_data[idx].irq = idx;
 			weint_data[idx].bank = bank;
-			irq_set_handler_data(irq, &weint_data[idx]);
-			irq_set_chained_handler(irq, exynos_irq_eint0_15);
+			irq_set_chained_handler_and_data(irq,
+							 exynos_irq_eint0_15,
+							 &weint_data[idx]);
 		}
 	}
 
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index f5619fb..9ce0b86 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -44,9 +44,7 @@
 #define PIN_NAME_LENGTH		10
 
 #define GROUP_SUFFIX		"-grp"
-#define GSUFFIX_LEN		sizeof(GROUP_SUFFIX)
 #define FUNCTION_SUFFIX		"-mux"
-#define FSUFFIX_LEN		sizeof(FUNCTION_SUFFIX)
 
 /*
  * pin configuration type and its value are packed together into a 16-bits.
@@ -205,22 +203,17 @@
 
 	/* Allocate memory for pin-map entries */
 	map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
-	if (!map) {
-		dev_err(dev, "could not alloc memory for pin-maps\n");
+	if (!map)
 		return -ENOMEM;
-	}
 	*nmaps = 0;
 
 	/*
 	 * Allocate memory for pin group name. The pin group name is derived
 	 * from the node name from which these map entries are be created.
 	 */
-	gname = kzalloc(strlen(np->name) + GSUFFIX_LEN, GFP_KERNEL);
-	if (!gname) {
-		dev_err(dev, "failed to alloc memory for group name\n");
+	gname = kasprintf(GFP_KERNEL, "%s%s", np->name, GROUP_SUFFIX);
+	if (!gname)
 		goto free_map;
-	}
-	snprintf(gname, strlen(np->name) + 4, "%s%s", np->name, GROUP_SUFFIX);
 
 	/*
 	 * don't have config options? then skip over to creating function
@@ -231,10 +224,8 @@
 
 	/* Allocate memory for config entries */
 	cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL);
-	if (!cfg) {
-		dev_err(dev, "failed to alloc memory for configs\n");
+	if (!cfg)
 		goto free_gname;
-	}
 
 	/* Prepare a list of config settings */
 	for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
@@ -254,13 +245,10 @@
 skip_cfgs:
 	/* create the function map entry */
 	if (of_find_property(np, "samsung,exynos5440-pin-function", NULL)) {
-		fname = kzalloc(strlen(np->name) + FSUFFIX_LEN,	GFP_KERNEL);
-		if (!fname) {
-			dev_err(dev, "failed to alloc memory for func name\n");
+		fname = kasprintf(GFP_KERNEL,
+				  "%s%s", np->name, FUNCTION_SUFFIX);
+		if (!fname)
 			goto free_cfg;
-		}
-		snprintf(fname, strlen(np->name) + 4, "%s%s", np->name,
-			 FUNCTION_SUFFIX);
 
 		map[*nmaps].data.mux.group = gname;
 		map[*nmaps].data.mux.function = fname;
@@ -651,10 +639,8 @@
 	}
 
 	*pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL);
-	if (!*pin_list) {
-		dev_err(dev, "failed to allocate memory for pin list\n");
+	if (!*pin_list)
 		return -ENOMEM;
-	}
 
 	return of_property_read_u32_array(cfg_np, "samsung,exynos5440-pins",
 			*pin_list, *npins);
@@ -682,17 +668,15 @@
 		return -EINVAL;
 
 	groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL);
-	if (!groups) {
-		dev_err(dev, "failed allocate memory for ping group list\n");
+	if (!groups)
 		return -EINVAL;
-	}
+
 	grp = groups;
 
 	functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL);
-	if (!functions) {
-		dev_err(dev, "failed to allocate memory for function list\n");
+	if (!functions)
 		return -EINVAL;
-	}
+
 	func = functions;
 
 	/*
@@ -710,14 +694,10 @@
 		}
 
 		/* derive pin group name from the node name */
-		gname = devm_kzalloc(dev, strlen(cfg_np->name) + GSUFFIX_LEN,
-					GFP_KERNEL);
-		if (!gname) {
-			dev_err(dev, "failed to alloc memory for group name\n");
+		gname = devm_kasprintf(dev, GFP_KERNEL,
+				       "%s%s", cfg_np->name, GROUP_SUFFIX);
+		if (!gname)
 			return -ENOMEM;
-		}
-		snprintf(gname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
-			 GROUP_SUFFIX);
 
 		grp->name = gname;
 		grp->pins = pin_list;
@@ -731,22 +711,15 @@
 			continue;
 
 		/* derive function name from the node name */
-		fname = devm_kzalloc(dev, strlen(cfg_np->name) + FSUFFIX_LEN,
-					GFP_KERNEL);
-		if (!fname) {
-			dev_err(dev, "failed to alloc memory for func name\n");
+		fname = devm_kasprintf(dev, GFP_KERNEL,
+				       "%s%s", cfg_np->name, FUNCTION_SUFFIX);
+		if (!fname)
 			return -ENOMEM;
-		}
-		snprintf(fname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
-			 FUNCTION_SUFFIX);
 
 		func->name = fname;
 		func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
-		if (!func->groups) {
-			dev_err(dev, "failed to alloc memory for group list "
-					"in pin function");
+		if (!func->groups)
 			return -ENOMEM;
-		}
 		func->groups[0] = gname;
 		func->num_groups = gname ? 1 : 0;
 		func->function = function;
@@ -774,10 +747,8 @@
 	int pin, ret;
 
 	ctrldesc = devm_kzalloc(dev, sizeof(*ctrldesc), GFP_KERNEL);
-	if (!ctrldesc) {
-		dev_err(dev, "could not allocate memory for pinctrl desc\n");
+	if (!ctrldesc)
 		return -ENOMEM;
-	}
 
 	ctrldesc->name = "exynos5440-pinctrl";
 	ctrldesc->owner = THIS_MODULE;
@@ -787,10 +758,8 @@
 
 	pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
 				EXYNOS5440_MAX_PINS, GFP_KERNEL);
-	if (!pindesc) {
-		dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
+	if (!pindesc)
 		return -ENOMEM;
-	}
 	ctrldesc->pins = pindesc;
 	ctrldesc->npins = EXYNOS5440_MAX_PINS;
 
@@ -804,10 +773,8 @@
 	 */
 	pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
 					ctrldesc->npins, GFP_KERNEL);
-	if (!pin_names) {
-		dev_err(&pdev->dev, "mem alloc for pin names failed\n");
+	if (!pin_names)
 		return -ENOMEM;
-	}
 
 	/* for each pin, set the name of the pin */
 	for (pin = 0; pin < ctrldesc->npins; pin++) {
@@ -844,10 +811,8 @@
 	int ret;
 
 	gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
-	if (!gc) {
-		dev_err(&pdev->dev, "mem alloc for gpio_chip failed\n");
+	if (!gc)
 		return -ENOMEM;
-	}
 
 	priv->gc = gc;
 	gc->base = 0;
@@ -929,7 +894,6 @@
 	irq_set_chip_data(virq, d);
 	irq_set_chip_and_handler(virq, &exynos5440_gpio_irq_chip,
 					handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -949,10 +913,8 @@
 
 	intd = devm_kzalloc(dev, sizeof(*intd) * EXYNOS5440_MAX_GPIO_INT,
 					GFP_KERNEL);
-	if (!intd) {
-		dev_err(dev, "failed to allocate memory for gpio intr data\n");
+	if (!intd)
 		return -ENOMEM;
-	}
 
 	for (i = 0; i < EXYNOS5440_MAX_GPIO_INT; i++) {
 		irq = irq_of_parse_and_map(dev->of_node, i);
@@ -995,10 +957,8 @@
 	}
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		dev_err(dev, "could not allocate memory for private data\n");
+	if (!priv)
 		return -ENOMEM;
-	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 01b43db..019844d 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -131,13 +131,13 @@
 	}
 }
 
-static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type)
+static void s3c24xx_eint_set_handler(struct irq_data *d, unsigned int type)
 {
 	/* Edge- and level-triggered interrupts need different handlers */
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 }
 
 static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
@@ -181,7 +181,7 @@
 		return -EINVAL;
 	}
 
-	s3c24xx_eint_set_handler(data->irq, type);
+	s3c24xx_eint_set_handler(data, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINT_REG(index);
@@ -243,7 +243,7 @@
 static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
 	unsigned int virq;
 
 	/* the first 4 eints have a simple 1 to 1 mapping */
@@ -297,9 +297,9 @@
 
 static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	unsigned int virq;
 
 	chained_irq_enter(chip, desc);
@@ -357,11 +357,11 @@
 	.irq_set_type	= s3c24xx_eint_type,
 };
 
-static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc,
+static inline void s3c24xx_demux_eint(struct irq_desc *desc,
 				      u32 offset, u32 range)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c24xx_eint_data *data = irq_get_handler_data(irq);
+	struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_irq_chip(desc);
 	struct samsung_pinctrl_drv_data *d = data->drvdata;
 	unsigned int pend, mask;
 
@@ -374,7 +374,7 @@
 	pend &= range;
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int virq, irq;
 
 		irq = __ffs(pend);
 		pend &= ~(1 << irq);
@@ -390,12 +390,12 @@
 
 static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc)
 {
-	s3c24xx_demux_eint(irq, desc, 0, 0xf0);
+	s3c24xx_demux_eint(desc, 0, 0xf0);
 }
 
 static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc)
 {
-	s3c24xx_demux_eint(irq, desc, 8, 0xffff00);
+	s3c24xx_demux_eint(desc, 8, 0xffff00);
 }
 
 static irq_flow_handler_t s3c2410_eint_handlers[NUM_EINT_IRQ] = {
@@ -437,7 +437,6 @@
 					 handle_edge_irq);
 	}
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -457,7 +456,6 @@
 
 	irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, handle_edge_irq);
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index ec8cc3b..f5ea40a 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -260,13 +260,13 @@
 	return trigger;
 }
 
-static void s3c64xx_irq_set_handler(unsigned int irq, unsigned int type)
+static void s3c64xx_irq_set_handler(struct irq_data *d, unsigned int type)
 {
 	/* Edge- and level-triggered interrupts need different handlers */
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 }
 
 static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
@@ -356,7 +356,7 @@
 		return -EINVAL;
 	}
 
-	s3c64xx_irq_set_handler(irqd->irq, type);
+	s3c64xx_irq_set_handler(irqd, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINTCON_REG(bank->eint_offset);
@@ -395,7 +395,6 @@
 	irq_set_chip_and_handler(virq,
 				&s3c64xx_gpio_irq_chip, handle_level_irq);
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
@@ -410,8 +409,8 @@
 
 static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c64xx_eint_gpio_data *data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
 
 	chained_irq_enter(chip, desc);
@@ -567,7 +566,7 @@
 		return -EINVAL;
 	}
 
-	s3c64xx_irq_set_handler(irqd->irq, type);
+	s3c64xx_irq_set_handler(irqd, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINT0CON0_REG;
@@ -599,11 +598,10 @@
 	.irq_set_type	= s3c64xx_eint0_irq_set_type,
 };
 
-static inline void s3c64xx_irq_demux_eint(unsigned int irq,
-					struct irq_desc *desc, u32 range)
+static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c64xx_eint0_data *data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
 	unsigned int pend, mask;
 
@@ -616,11 +614,10 @@
 	pend &= range;
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int virq, irq;
 
 		irq = fls(pend) - 1;
 		pend &= ~(1 << irq);
-
 		virq = irq_linear_revmap(data->domains[irq], data->pins[irq]);
 		/*
 		 * Something must be really wrong if an unmapped EINT
@@ -636,22 +633,22 @@
 
 static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xf);
+	s3c64xx_irq_demux_eint(desc, 0xf);
 }
 
 static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff0);
+	s3c64xx_irq_demux_eint(desc, 0xff0);
 }
 
 static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff000);
+	s3c64xx_irq_demux_eint(desc, 0xff000);
 }
 
 static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff00000);
+	s3c64xx_irq_demux_eint(desc, 0xff00000);
 }
 
 static irq_flow_handler_t s3c64xx_eint0_handlers[NUM_EINT0_IRQ] = {
@@ -673,7 +670,6 @@
 	irq_set_chip_and_handler(virq,
 				&s3c64xx_eint0_irq_chip, handle_level_irq);
 	irq_set_chip_data(virq, ddata);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 865d235..fb9c448 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,24 +29,25 @@
 static int sh_pfc_map_resources(struct sh_pfc *pfc,
 				struct platform_device *pdev)
 {
-	unsigned int num_windows = 0;
-	unsigned int num_irqs = 0;
+	unsigned int num_windows, num_irqs;
 	struct sh_pfc_window *windows;
 	unsigned int *irqs = NULL;
 	struct resource *res;
 	unsigned int i;
+	int irq;
 
 	/* Count the MEM and IRQ resources. */
-	for (i = 0; i < pdev->num_resources; ++i) {
-		switch (resource_type(&pdev->resource[i])) {
-		case IORESOURCE_MEM:
-			num_windows++;
+	for (num_windows = 0;; num_windows++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, num_windows);
+		if (!res)
 			break;
-
-		case IORESOURCE_IRQ:
-			num_irqs++;
+	}
+	for (num_irqs = 0;; num_irqs++) {
+		irq = platform_get_irq(pdev, num_irqs);
+		if (irq == -EPROBE_DEFER)
+			return irq;
+		if (irq < 0)
 			break;
-		}
 	}
 
 	if (num_windows == 0)
@@ -72,22 +73,17 @@
 	}
 
 	/* Fill them. */
-	for (i = 0, res = pdev->resource; i < pdev->num_resources; i++, res++) {
-		switch (resource_type(res)) {
-		case IORESOURCE_MEM:
-			windows->phys = res->start;
-			windows->size = resource_size(res);
-			windows->virt = devm_ioremap_resource(pfc->dev, res);
-			if (IS_ERR(windows->virt))
-				return -ENOMEM;
-			windows++;
-			break;
-
-		case IORESOURCE_IRQ:
-			*irqs++ = res->start;
-			break;
-		}
+	for (i = 0; i < num_windows; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		windows->phys = res->start;
+		windows->size = resource_size(res);
+		windows->virt = devm_ioremap_resource(pfc->dev, res);
+		if (IS_ERR(windows->virt))
+			return -ENOMEM;
+		windows++;
 	}
+	for (i = 0; i < num_irqs; i++)
+		*irqs++ = platform_get_irq(pdev, i);
 
 	return 0;
 }
@@ -591,9 +587,6 @@
 }
 
 static const struct platform_device_id sh_pfc_id_table[] = {
-#ifdef CONFIG_PINCTRL_PFC_R8A7740
-	{ "pfc-r8a7740", (kernel_ulong_t)&r8a7740_pinmux_info },
-#endif
 #ifdef CONFIG_PINCTRL_PFC_R8A7778
 	{ "pfc-r8a7778", (kernel_ulong_t)&r8a7778_pinmux_info },
 #endif
@@ -609,9 +602,6 @@
 #ifdef CONFIG_PINCTRL_PFC_SH7269
 	{ "pfc-sh7269", (kernel_ulong_t)&sh7269_pinmux_info },
 #endif
-#ifdef CONFIG_PINCTRL_PFC_SH73A0
-	{ "pfc-sh73a0", (kernel_ulong_t)&sh73a0_pinmux_info },
-#endif
 #ifdef CONFIG_PINCTRL_PFC_SH7720
 	{ "pfc-sh7720", (kernel_ulong_t)&sh7720_pinmux_info },
 #endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index d0bb145..82ef186 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -22,10 +22,6 @@
 #include <linux/kernel.h>
 #include <linux/pinctrl/pinconf-generic.h>
 
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
 #include "core.h"
 #include "sh_pfc.h"
 
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index baab81e..fc344a7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -27,10 +27,27 @@
 #include "core.h"
 #include "sh_pfc.h"
 
+#define PORT_GP_30(bank, fn, sfx)					\
+	PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),	\
+	PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),	\
+	PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),	\
+	PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),	\
+	PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),	\
+	PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),	\
+	PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),	\
+	PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),	\
+	PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),	\
+	PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),	\
+	PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),	\
+	PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),	\
+	PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx),	\
+	PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx),	\
+	PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx)
+
 #define CPU_ALL_PORT(fn, sfx)						\
 	PORT_GP_32(0, fn, sfx),						\
-	PORT_GP_32(1, fn, sfx),						\
-	PORT_GP_32(2, fn, sfx),						\
+	PORT_GP_30(1, fn, sfx),						\
+	PORT_GP_30(2, fn, sfx),						\
 	PORT_GP_32(3, fn, sfx),						\
 	PORT_GP_32(4, fn, sfx),						\
 	PORT_GP_32(5, fn, sfx)
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 3ddf23e..25e8117 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -14,15 +14,30 @@
 #include "core.h"
 #include "sh_pfc.h"
 
+#define PORT_GP_26(bank, fn, sfx)					\
+	PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),	\
+	PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),	\
+	PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),	\
+	PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),	\
+	PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),	\
+	PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),	\
+	PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),	\
+	PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),	\
+	PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),	\
+	PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),	\
+	PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),	\
+	PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),	\
+	PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx)
+
 #define CPU_ALL_PORT(fn, sfx)						\
 	PORT_GP_32(0, fn, sfx),						\
-	PORT_GP_32(1, fn, sfx),						\
+	PORT_GP_26(1, fn, sfx),						\
 	PORT_GP_32(2, fn, sfx),						\
 	PORT_GP_32(3, fn, sfx),						\
 	PORT_GP_32(4, fn, sfx),						\
 	PORT_GP_32(5, fn, sfx),						\
 	PORT_GP_32(6, fn, sfx),						\
-	PORT_GP_32(7, fn, sfx)
+	PORT_GP_26(7, fn, sfx)
 
 enum {
 	PINMUX_RESERVED = 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index bfdcac4..5248685 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -2770,6 +2770,24 @@
 static const unsigned int sdhi2_wp_mux[] = {
 	SD2_WP_MARK,
 };
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+	RCAR_GP_PIN(5, 24), /* PWEN */
+	RCAR_GP_PIN(5, 25), /* OVC */
+};
+static const unsigned int usb0_mux[] = {
+	USB0_PWEN_MARK,
+	USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+	RCAR_GP_PIN(5, 26), /* PWEN */
+	RCAR_GP_PIN(5, 27), /* OVC */
+};
+static const unsigned int usb1_mux[] = {
+	USB1_PWEN_MARK,
+	USB1_OVC_MARK,
+};
 
 static const struct sh_pfc_pin_group pinmux_groups[] = {
 	SH_PFC_PIN_GROUP(eth_link),
@@ -2945,6 +2963,8 @@
 	SH_PFC_PIN_GROUP(sdhi2_ctrl),
 	SH_PFC_PIN_GROUP(sdhi2_cd),
 	SH_PFC_PIN_GROUP(sdhi2_wp),
+	SH_PFC_PIN_GROUP(usb0),
+	SH_PFC_PIN_GROUP(usb1),
 };
 
 static const char * const eth_groups[] = {
@@ -3219,6 +3239,14 @@
 	"sdhi2_wp",
 };
 
+static const char * const usb0_groups[] = {
+	"usb0",
+};
+
+static const char * const usb1_groups[] = {
+	"usb1",
+};
+
 static const struct sh_pfc_function pinmux_functions[] = {
 	SH_PFC_FUNCTION(eth),
 	SH_PFC_FUNCTION(hscif0),
@@ -3253,6 +3281,8 @@
 	SH_PFC_FUNCTION(sdhi0),
 	SH_PFC_FUNCTION(sdhi1),
 	SH_PFC_FUNCTION(sdhi2),
+	SH_PFC_FUNCTION(usb0),
+	SH_PFC_FUNCTION(usb1),
 };
 
 static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index d2efbfb..0975265 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -26,10 +26,6 @@
 #include <linux/regulator/machine.h>
 #include <linux/slab.h>
 
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
 #include "core.h"
 #include "sh_pfc.h"
 
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index ff67896..863c3e3 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -40,6 +40,10 @@
 
 	struct pinctrl_pin_desc *pins;
 	struct sh_pfc_pin_config *configs;
+
+	const char *func_prop_name;
+	const char *groups_prop_name;
+	const char *pins_prop_name;
 };
 
 static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
@@ -96,10 +100,13 @@
 	return 0;
 }
 
-static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
+static int sh_pfc_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+				    struct device_node *np,
 				    struct pinctrl_map **map,
 				    unsigned int *num_maps, unsigned int *index)
 {
+	struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+	struct device *dev = pmx->pfc->dev;
 	struct pinctrl_map *maps = *map;
 	unsigned int nmaps = *num_maps;
 	unsigned int idx = *index;
@@ -113,10 +120,27 @@
 	const char *pin;
 	int ret;
 
+	/* Support both the old Renesas-specific properties and the new standard
+	 * properties. Mixing old and new properties isn't allowed, neither
+	 * inside a subnode nor across subnodes.
+	 */
+	if (!pmx->func_prop_name) {
+		if (of_find_property(np, "groups", NULL) ||
+		    of_find_property(np, "pins", NULL)) {
+			pmx->func_prop_name = "function";
+			pmx->groups_prop_name = "groups";
+			pmx->pins_prop_name = "pins";
+		} else {
+			pmx->func_prop_name = "renesas,function";
+			pmx->groups_prop_name = "renesas,groups";
+			pmx->pins_prop_name = "renesas,pins";
+		}
+	}
+
 	/* Parse the function and configuration properties. At least a function
 	 * or one configuration must be specified.
 	 */
-	ret = of_property_read_string(np, "renesas,function", &function);
+	ret = of_property_read_string(np, pmx->func_prop_name, &function);
 	if (ret < 0 && ret != -EINVAL) {
 		dev_err(dev, "Invalid function in DT\n");
 		return ret;
@@ -129,11 +153,12 @@
 	if (!function && num_configs == 0) {
 		dev_err(dev,
 			"DT node must contain at least a function or config\n");
+		ret = -ENODEV;
 		goto done;
 	}
 
 	/* Count the number of pins and groups and reallocate mappings. */
-	ret = of_property_count_strings(np, "renesas,pins");
+	ret = of_property_count_strings(np, pmx->pins_prop_name);
 	if (ret == -EINVAL) {
 		num_pins = 0;
 	} else if (ret < 0) {
@@ -143,7 +168,7 @@
 		num_pins = ret;
 	}
 
-	ret = of_property_count_strings(np, "renesas,groups");
+	ret = of_property_count_strings(np, pmx->groups_prop_name);
 	if (ret == -EINVAL) {
 		num_groups = 0;
 	} else if (ret < 0) {
@@ -174,7 +199,7 @@
 	*num_maps = nmaps;
 
 	/* Iterate over pins and groups and create the mappings. */
-	of_property_for_each_string(np, "renesas,groups", prop, group) {
+	of_property_for_each_string(np, pmx->groups_prop_name, prop, group) {
 		if (function) {
 			maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
 			maps[idx].data.mux.group = group;
@@ -198,7 +223,7 @@
 		goto done;
 	}
 
-	of_property_for_each_string(np, "renesas,pins", prop, pin) {
+	of_property_for_each_string(np, pmx->pins_prop_name, prop, pin) {
 		ret = sh_pfc_map_add_config(&maps[idx], pin,
 					    PIN_MAP_TYPE_CONFIGS_PIN,
 					    configs, num_configs);
@@ -246,7 +271,7 @@
 	index = 0;
 
 	for_each_child_of_node(np, child) {
-		ret = sh_pfc_dt_subnode_to_map(dev, child, map, num_maps,
+		ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps,
 					       &index);
 		if (ret < 0)
 			goto done;
@@ -254,7 +279,8 @@
 
 	/* If no mapping has been found in child nodes try the config node. */
 	if (*num_maps == 0) {
-		ret = sh_pfc_dt_subnode_to_map(dev, np, map, num_maps, &index);
+		ret = sh_pfc_dt_subnode_to_map(pctldev, np, map, num_maps,
+					       &index);
 		if (ret < 0)
 			goto done;
 	}
@@ -465,6 +491,9 @@
 	case PIN_CONFIG_BIAS_PULL_DOWN:
 		return pin->configs & SH_PFC_PIN_CFG_PULL_DOWN;
 
+	case PIN_CONFIG_POWER_SOURCE:
+		return pin->configs & SH_PFC_PIN_CFG_IO_VOLTAGE;
+
 	default:
 		return false;
 	}
@@ -477,7 +506,6 @@
 	struct sh_pfc *pfc = pmx->pfc;
 	enum pin_config_param param = pinconf_to_config_param(*config);
 	unsigned long flags;
-	unsigned int bias;
 
 	if (!sh_pfc_pinconf_validate(pfc, _pin, param))
 		return -ENOTSUPP;
@@ -485,7 +513,9 @@
 	switch (param) {
 	case PIN_CONFIG_BIAS_DISABLE:
 	case PIN_CONFIG_BIAS_PULL_UP:
-	case PIN_CONFIG_BIAS_PULL_DOWN:
+	case PIN_CONFIG_BIAS_PULL_DOWN: {
+		unsigned int bias;
+
 		if (!pfc->info->ops || !pfc->info->ops->get_bias)
 			return -ENOTSUPP;
 
@@ -498,6 +528,24 @@
 
 		*config = 0;
 		break;
+	}
+
+	case PIN_CONFIG_POWER_SOURCE: {
+		int ret;
+
+		if (!pfc->info->ops || !pfc->info->ops->get_io_voltage)
+			return -ENOTSUPP;
+
+		spin_lock_irqsave(&pfc->lock, flags);
+		ret = pfc->info->ops->get_io_voltage(pfc, _pin);
+		spin_unlock_irqrestore(&pfc->lock, flags);
+
+		if (ret < 0)
+			return ret;
+
+		*config = ret;
+		break;
+	}
 
 	default:
 		return -ENOTSUPP;
@@ -534,6 +582,24 @@
 
 			break;
 
+		case PIN_CONFIG_POWER_SOURCE: {
+			unsigned int arg =
+				pinconf_to_config_argument(configs[i]);
+			int ret;
+
+			if (!pfc->info->ops || !pfc->info->ops->set_io_voltage)
+				return -ENOTSUPP;
+
+			spin_lock_irqsave(&pfc->lock, flags);
+			ret = pfc->info->ops->set_io_voltage(pfc, _pin, arg);
+			spin_unlock_irqrestore(&pfc->lock, flags);
+
+			if (ret)
+				return ret;
+
+			break;
+		}
+
 		default:
 			return -ENOTSUPP;
 		}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 0874cfe..15afd49 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -12,6 +12,7 @@
 #define __SH_PFC_H
 
 #include <linux/bug.h>
+#include <linux/pinctrl/pinconf-generic.h>
 #include <linux/stringify.h>
 
 enum {
@@ -26,6 +27,7 @@
 #define SH_PFC_PIN_CFG_OUTPUT		(1 << 1)
 #define SH_PFC_PIN_CFG_PULL_UP		(1 << 2)
 #define SH_PFC_PIN_CFG_PULL_DOWN	(1 << 3)
+#define SH_PFC_PIN_CFG_IO_VOLTAGE	(1 << 4)
 #define SH_PFC_PIN_CFG_NO_GPIO		(1 << 31)
 
 struct sh_pfc_pin {
@@ -121,6 +123,9 @@
 	unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin);
 	void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
 			 unsigned int bias);
+	int (*get_io_voltage)(struct sh_pfc *pfc, unsigned int pin);
+	int (*set_io_voltage)(struct sh_pfc *pfc, unsigned int pin,
+			      u16 voltage_mV);
 };
 
 struct sh_pfc_soc_info {
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 9384e0a..9df0c5f 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -148,6 +148,19 @@
 #define DIV_DISABLE	0x1
 #define DIV_ENABLE	0x0
 
+/* Number of Function input disable registers */
+#define NUM_OF_IN_DISABLE_REG	0x2
+
+/* Offset of Function input disable registers */
+#define IN_DISABLE_0_REG_SET		0x0A00
+#define IN_DISABLE_0_REG_CLR		0x0A04
+#define IN_DISABLE_1_REG_SET		0x0A08
+#define IN_DISABLE_1_REG_CLR		0x0A0C
+#define IN_DISABLE_VAL_0_REG_SET	0x0A80
+#define IN_DISABLE_VAL_0_REG_CLR	0x0A84
+#define IN_DISABLE_VAL_1_REG_SET	0x0A88
+#define IN_DISABLE_VAL_1_REG_CLR	0x0A8C
+
 struct dt_params {
 	const char *property;
 	int value;
@@ -197,6 +210,16 @@
 	}
 
 /**
+ * struct atlas7_pad_status - Atlas7 Pad status
+ */
+struct atlas7_pad_status {
+	u8 func;
+	u8 pull;
+	u8 dstr;
+	u8 reserved;
+};
+
+/**
  * struct atlas7_pad_mux - Atlas7 mux
  * @bank:		The bank of this pad's registers on.
  * @pin	:		The ID of this Pad.
@@ -285,6 +308,9 @@
 /* Platform info of atlas7 pinctrl */
 #define ATLAS7_PINCTRL_REG_BANKS	2
 #define ATLAS7_PINCTRL_BANK_0_PINS	18
+#define ATLAS7_PINCTRL_BANK_1_PINS	141
+#define ATLAS7_PINCTRL_TOTAL_PINS	\
+	(ATLAS7_PINCTRL_BANK_0_PINS + ATLAS7_PINCTRL_BANK_1_PINS)
 
 /**
  * Atlas7 GPIO Chip
@@ -316,6 +342,7 @@
 	unsigned int gpio_offset;
 	unsigned int ngpio;
 	const unsigned int *gpio_pins;
+	u32 sleep_data[NGPIO_OF_BANK];
 };
 
 struct atlas7_gpio_chip {
@@ -343,6 +370,9 @@
 	struct pinctrl_desc pctl_desc;
 	struct atlas7_pinctrl_data *pctl_data;
 	void __iomem *regs[ATLAS7_PINCTRL_REG_BANKS];
+	u32 status_ds[NUM_OF_IN_DISABLE_REG];
+	u32 status_dsv[NUM_OF_IN_DISABLE_REG];
+	struct atlas7_pad_status sleep_data[ATLAS7_PINCTRL_TOTAL_PINS];
 };
 
 /*
@@ -3480,6 +3510,160 @@
 	.confs_cnt = ARRAY_SIZE(atlas7_ioc_pad_confs),
 };
 
+/* Simple map data structure */
+struct map_data {
+	u8 idx;
+	u8 data;
+};
+
+/**
+ * struct atlas7_pull_info - Atlas7 Pad pull info
+ * @type:The type of this Pad.
+ * @mask:The mas value of this pin's pull bits.
+ * @v2s: The map of pull register value to pull status.
+ * @s2v: The map of pull status to pull register value.
+ */
+struct atlas7_pull_info {
+	u8 pad_type;
+	u8 mask;
+	const struct map_data *v2s;
+	const struct map_data *s2v;
+};
+
+/* Pull Register value map to status */
+static const struct map_data p4we_pull_v2s[] = {
+	{ P4WE_PULL_UP, PULL_UP },
+	{ P4WE_HIGH_HYSTERESIS, HIGH_HYSTERESIS },
+	{ P4WE_HIGH_Z, HIGH_Z },
+	{ P4WE_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data p16st_pull_v2s[] = {
+	{ P16ST_PULL_UP, PULL_UP },
+	{ PD, PULL_UNKNOWN },
+	{ P16ST_HIGH_Z, HIGH_Z },
+	{ P16ST_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data pm31_pull_v2s[] = {
+	{ PM31_PULL_DISABLED, PULL_DOWN },
+	{ PM31_PULL_ENABLED, PULL_UP },
+};
+
+static const struct map_data pangd_pull_v2s[] = {
+	{ PANGD_PULL_UP, PULL_UP },
+	{ PD, PULL_UNKNOWN },
+	{ PANGD_HIGH_Z, HIGH_Z },
+	{ PANGD_PULL_DOWN, PULL_DOWN },
+};
+
+/* Pull status map to register value */
+static const struct map_data p4we_pull_s2v[] = {
+	{ PULL_UP, P4WE_PULL_UP },
+	{ HIGH_HYSTERESIS, P4WE_HIGH_HYSTERESIS },
+	{ HIGH_Z, P4WE_HIGH_Z },
+	{ PULL_DOWN, P4WE_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data p16st_pull_s2v[] = {
+	{ PULL_UP, P16ST_PULL_UP },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, P16ST_HIGH_Z },
+	{ PULL_DOWN, P16ST_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data pm31_pull_s2v[] = {
+	{ PULL_UP, PM31_PULL_ENABLED },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, -1 },
+	{ PULL_DOWN, PM31_PULL_DISABLED },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data pangd_pull_s2v[] = {
+	{ PULL_UP, PANGD_PULL_UP },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, PANGD_HIGH_Z },
+	{ PULL_DOWN, PANGD_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct atlas7_pull_info atlas7_pull_map[] = {
+	{ PAD_T_4WE_PD, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+	{ PAD_T_4WE_PU, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+	{ PAD_T_16ST, P16ST_PULL_MASK, p16st_pull_v2s, p16st_pull_s2v },
+	{ PAD_T_M31_0204_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0204_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0610_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0610_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_AD, PANGD_PULL_MASK, pangd_pull_v2s, pangd_pull_s2v },
+};
+
+/**
+ * struct atlas7_ds_ma_info - Atlas7 Pad DriveStrength & currents info
+ * @ma:		The Drive Strength in current value .
+ * @ds_16st:	The correspond raw value of 16st pad.
+ * @ds_4we:	The correspond raw value of 4we pad.
+ * @ds_0204m31:	The correspond raw value of 0204m31 pad.
+ * @ds_0610m31:	The correspond raw value of 0610m31 pad.
+ */
+struct atlas7_ds_ma_info {
+	u32 ma;
+	u32 ds_16st;
+	u32 ds_4we;
+	u32 ds_0204m31;
+	u32 ds_0610m31;
+};
+
+static const struct atlas7_ds_ma_info atlas7_ma2ds_map[] = {
+	{ 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL },
+	{ 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL },
+	{ 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0 },
+	{ 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL },
+	{ 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1 },
+	{ 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL },
+	{ 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL },
+	{ 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL },
+	{ 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL },
+	{ 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL },
+	{ 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL },
+	{ 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL },
+	{ 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL },
+	{ 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL },
+	{ 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL },
+	{ 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL },
+};
+
+/**
+ * struct atlas7_ds_info - Atlas7 Pad DriveStrength info
+ * @type:		The type of this Pad.
+ * @mask:		The mask value of this pin's pull bits.
+ * @imval:		The immediate value of drives trength register.
+ */
+struct atlas7_ds_info {
+	u8 type;
+	u8 mask;
+	u8 imval;
+	u8 reserved;
+};
+
+static const struct atlas7_ds_info atlas7_ds_map[] = {
+	{ PAD_T_4WE_PD, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+	{ PAD_T_4WE_PU, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+	{ PAD_T_16ST, DS_4BIT_MASK, DS_4BIT_IM_VAL },
+	{ PAD_T_M31_0204_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0204_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0610_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0610_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_AD, DS_NULL, DS_NULL },
+};
+
 static inline u32 atlas7_pin_to_bank(u32 pin)
 {
 	return (pin >= ATLAS7_PINCTRL_BANK_0_PINS) ? 1 : 0;
@@ -3682,49 +3866,22 @@
 	return 0;
 }
 
-struct atlas7_ds_info {
-	u32 ma;
-	u32 ds_16st;
-	u32 ds_4we;
-	u32 ds_0204m31;
-	u32 ds_0610m31;
-};
-
-const struct atlas7_ds_info atlas7_ds_map[] = {
-	{ 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL},
-	{ 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL},
-	{ 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0},
-	{ 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL},
-	{ 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1},
-	{ 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL},
-	{ 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL},
-	{ 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL},
-	{ 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL},
-	{ 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL},
-	{ 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL},
-	{ 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL},
-	{ 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL},
-	{ 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL},
-	{ 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL},
-	{ 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL},
-};
-
 static u32 convert_current_to_drive_strength(u32 type, u32 ma)
 {
 	int idx;
 
-	for (idx = 0; idx < ARRAY_SIZE(atlas7_ds_map); idx++) {
-		if (atlas7_ds_map[idx].ma != ma)
+	for (idx = 0; idx < ARRAY_SIZE(atlas7_ma2ds_map); idx++) {
+		if (atlas7_ma2ds_map[idx].ma != ma)
 			continue;
 
 		if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU)
-			return atlas7_ds_map[idx].ds_4we;
+			return atlas7_ma2ds_map[idx].ds_4we;
 		else if (type == PAD_T_16ST)
-			return atlas7_ds_map[idx].ds_16st;
+			return atlas7_ma2ds_map[idx].ds_16st;
 		else if (type == PAD_T_M31_0204_PD || type == PAD_T_M31_0204_PU)
-			return atlas7_ds_map[idx].ds_0204m31;
+			return atlas7_ma2ds_map[idx].ds_0204m31;
 		else if (type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU)
-			return atlas7_ds_map[idx].ds_0610m31;
+			return atlas7_ma2ds_map[idx].ds_0610m31;
 	}
 
 	return DS_NULL;
@@ -3735,78 +3892,21 @@
 {
 	struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 	struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
-	u32 type = conf->type;
-	u32 shift = conf->pupd_bit;
-	u32 bank = atlas7_pin_to_bank(pin);
-	void __iomem *pull_sel_reg, *pull_clr_reg;
+	const struct atlas7_pull_info *pull_info;
+	u32 bank;
+	unsigned long regv;
+	void __iomem *pull_sel_reg;
 
+	bank = atlas7_pin_to_bank(pin);
+	pull_info = &atlas7_pull_map[conf->type];
 	pull_sel_reg = pmx->regs[bank] + conf->pupd_reg;
-	pull_clr_reg = CLR_REG(pull_sel_reg);
 
-	if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
-		writel(P4WE_PULL_MASK << shift, pull_clr_reg);
+	/* Retrieve correspond register value from table by sel */
+	regv = pull_info->s2v[sel].data & pull_info->mask;
 
-		if (sel == PULL_UP)
-			writel(P4WE_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_HYSTERESIS)
-			writel(P4WE_HIGH_HYSTERESIS << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(P4WE_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(P4WE_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for 4WEPAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_16ST) {
-		writel(P16ST_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(P16ST_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(P16ST_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(P16ST_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for 16STPAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_M31_0204_PD ||
-		type == PAD_T_M31_0204_PU ||
-		type == PAD_T_M31_0610_PD ||
-		type == PAD_T_M31_0610_PU) {
-		writel(PM31_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(PM31_PULL_ENABLED << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(PM31_PULL_DISABLED << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for M31PAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_AD) {
-		writel(PANGD_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(PANGD_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(PANGD_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(PANGD_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for A/D PAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else {
-			pr_err("Unknown Pad type[%d] for pull select PAD#%d\n",
-				type, pin);
-			return -ENOTSUPP;
-	}
+	/* Clear & Set new value to pull register */
+	writel(pull_info->mask << conf->pupd_bit, CLR_REG(pull_sel_reg));
+	writel(regv << conf->pupd_bit, pull_sel_reg);
 
 	pr_debug("PIN_CFG ### SET PIN#%d PULL SELECTOR:%d == OK ####\n",
 		pin, sel);
@@ -3818,43 +3918,25 @@
 {
 	struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 	struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
-	u32 type = conf->type;
-	u32 shift = conf->drvstr_bit;
-	u32 bank = atlas7_pin_to_bank(pin);
-	void __iomem *ds_sel_reg, *ds_clr_reg;
+	const struct atlas7_ds_info *ds_info;
+	u32 bank;
+	void __iomem *ds_sel_reg;
 
+	ds_info = &atlas7_ds_map[conf->type];
+	if (sel & (~(ds_info->mask)))
+		goto unsupport;
+
+	bank = atlas7_pin_to_bank(pin);
 	ds_sel_reg = pmx->regs[bank] + conf->drvstr_reg;
-	ds_clr_reg = CLR_REG(ds_sel_reg);
-	if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
-		if (sel & (~DS_2BIT_MASK))
-			goto unsupport;
 
-		writel(DS_2BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
+	writel(ds_info->imval << conf->drvstr_bit, CLR_REG(ds_sel_reg));
+	writel(sel << conf->drvstr_bit, ds_sel_reg);
 
-		return 0;
-	} else if (type == PAD_T_16ST) {
-		if (sel & (~DS_4BIT_MASK))
-			goto unsupport;
-
-		writel(DS_4BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
-
-		return 0;
-	} else if (type == PAD_T_M31_0204_PD ||	type == PAD_T_M31_0204_PU ||
-		type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU) {
-		if (sel & (~DS_1BIT_MASK))
-			goto unsupport;
-
-		writel(DS_1BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
-
-		return 0;
-	}
+	return 0;
 
 unsupport:
 	pr_err("Pad#%d type[%d] doesn't support ds code[%d]!\n",
-		pin, type, sel);
+		pin, conf->type, sel);
 	return -ENOTSUPP;
 }
 
@@ -4101,14 +4183,135 @@
 	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_pinmux_suspend_noirq(struct device *dev)
+{
+	struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+	struct atlas7_pad_status *status;
+	struct atlas7_pad_config *conf;
+	const struct atlas7_ds_info *ds_info;
+	const struct atlas7_pull_info *pull_info;
+	int idx;
+	u32 bank;
+	unsigned long regv;
+
+	for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+		/* Get this Pad's descriptor from PINCTRL */
+		conf = &pmx->pctl_data->confs[idx];
+		bank = atlas7_pin_to_bank(idx);
+		status = &pmx->sleep_data[idx];
+
+		/* Save Function selector */
+		regv = readl(pmx->regs[bank] + conf->mux_reg);
+		status->func = (regv >> conf->mux_bit) & FUNC_CLEAR_MASK;
+
+		/* Check if Pad is in Analogue selector */
+		if (conf->ad_ctrl_reg == -1)
+			goto save_ds_sel;
+
+		regv = readl(pmx->regs[bank] + conf->ad_ctrl_reg);
+		if (!(regv & (conf->ad_ctrl_bit << ANA_CLEAR_MASK)))
+			status->func = FUNC_ANALOGUE;
+
+save_ds_sel:
+		if (conf->drvstr_reg == -1)
+			goto save_pull_sel;
+
+		/* Save Drive Strength selector */
+		ds_info = &atlas7_ds_map[conf->type];
+		regv = readl(pmx->regs[bank] + conf->drvstr_reg);
+		status->dstr = (regv >> conf->drvstr_bit) & ds_info->mask;
+
+save_pull_sel:
+		/* Save Pull selector */
+		pull_info = &atlas7_pull_map[conf->type];
+		regv = readl(pmx->regs[bank] + conf->pupd_reg);
+		regv = (regv >> conf->pupd_bit) & pull_info->mask;
+		status->pull = pull_info->v2s[regv].data;
+	}
+
+	/*
+	 * Save disable input selector, this selector is not for Pin,
+	 * but for Mux function.
+	 */
+	for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+		pmx->status_ds[idx] = readl(pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_SET + 0x8 * idx);
+		pmx->status_dsv[idx] = readl(pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+	}
+
+	return 0;
+}
+
+static int atlas7_pinmux_resume_noirq(struct device *dev)
+{
+	struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+	struct atlas7_pad_status *status;
+	struct atlas7_pad_config *conf;
+	int idx;
+	u32 bank;
+
+	for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+		/* Get this Pad's descriptor from PINCTRL */
+		conf = &pmx->pctl_data->confs[idx];
+		bank = atlas7_pin_to_bank(idx);
+		status = &pmx->sleep_data[idx];
+
+		/* Restore Function selector */
+		__atlas7_pmx_pin_enable(pmx, idx, (u32)status->func & 0xff);
+
+		if (FUNC_ANALOGUE == status->func)
+			goto restore_pull_sel;
+
+		/* Restore Drive Strength selector */
+		__altas7_pinctrl_set_drive_strength_sel(pmx->pctl, idx,
+						(u32)status->dstr & 0xff);
+
+restore_pull_sel:
+		/* Restore Pull selector */
+		altas7_pinctrl_set_pull_sel(pmx->pctl, idx,
+						(u32)status->pull & 0xff);
+	}
+
+	/*
+	 * Restore disable input selector, this selector is not for Pin,
+	 * but for Mux function
+	 */
+	for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+		writel(~0, pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_CLR + 0x8 * idx);
+		writel(pmx->status_ds[idx], pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_SET + 0x8 * idx);
+		writel(~0, pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_CLR + 0x8 * idx);
+		writel(pmx->status_dsv[idx], pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops atlas7_pinmux_pm_ops = {
+	.suspend_noirq = atlas7_pinmux_suspend_noirq,
+	.resume_noirq = atlas7_pinmux_resume_noirq,
+	.freeze_noirq = atlas7_pinmux_suspend_noirq,
+	.restore_noirq = atlas7_pinmux_resume_noirq,
+};
+#endif
+
 static const struct of_device_id atlas7_pinmux_ids[] = {
 	{ .compatible = "sirf,atlas7-ioc",},
+	{},
 };
 
 static struct platform_driver atlas7_pinmux_driver = {
 	.driver = {
 		.name = "atlas7-ioc",
 		.of_match_table = atlas7_pinmux_ids,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &atlas7_pinmux_pm_ops,
+#endif
 	},
 	.probe = atlas7_pinmux_probe,
 };
@@ -4286,14 +4489,15 @@
 	.irq_set_type = atlas7_gpio_irq_type,
 };
 
-static void atlas7_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc);
 	struct atlas7_gpio_bank *bank = NULL;
 	u32 status, ctrl;
 	int pin_in_bank = 0, idx;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 
 	for (idx = 0; idx < a7gc->nbank; idx++) {
 		bank = &a7gc->banks[idx];
@@ -4496,6 +4700,7 @@
 
 static const struct of_device_id atlas7_gpio_ids[] = {
 	{ .compatible = "sirf,atlas7-gpio", },
+	{},
 };
 
 static int atlas7_gpio_probe(struct platform_device *pdev)
@@ -4612,17 +4817,65 @@
 		BUG_ON(!bank->pctldev);
 	}
 
+	platform_set_drvdata(pdev, a7gc);
 	dev_info(&pdev->dev, "add to system.\n");
 	return 0;
 failed:
 	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_gpio_suspend_noirq(struct device *dev)
+{
+	struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+	struct atlas7_gpio_bank *bank;
+	void __iomem *ctrl_reg;
+	u32 idx, pin;
+
+	for (idx = 0; idx < a7gc->nbank; idx++) {
+		bank = &a7gc->banks[idx];
+		for (pin = 0; pin < bank->ngpio; pin++) {
+			ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+			bank->sleep_data[pin] = readl(ctrl_reg);
+		}
+	}
+
+	return 0;
+}
+
+static int atlas7_gpio_resume_noirq(struct device *dev)
+{
+	struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+	struct atlas7_gpio_bank *bank;
+	void __iomem *ctrl_reg;
+	u32 idx, pin;
+
+	for (idx = 0; idx < a7gc->nbank; idx++) {
+		bank = &a7gc->banks[idx];
+		for (pin = 0; pin < bank->ngpio; pin++) {
+			ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+			writel(bank->sleep_data[pin], ctrl_reg);
+		}
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops atlas7_gpio_pm_ops = {
+	.suspend_noirq = atlas7_gpio_suspend_noirq,
+	.resume_noirq = atlas7_gpio_resume_noirq,
+	.freeze_noirq = atlas7_gpio_suspend_noirq,
+	.restore_noirq = atlas7_gpio_resume_noirq,
+};
+#endif
+
 static struct platform_driver atlas7_gpio_driver = {
 	.driver = {
 		.name = "atlas7-gpio",
-		.owner = THIS_MODULE,
 		.of_match_table = atlas7_gpio_ids,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &atlas7_gpio_pm_ops,
+#endif
 	},
 	.probe = atlas7_gpio_probe,
 };
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 8ba26e4..f8bd9fb 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -545,14 +545,15 @@
 	.irq_set_type = sirfsoc_gpio_irq_type,
 };
 
-static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct sirfsoc_gpio_chip *sgpio = to_sirfsoc_gpio(gc);
 	struct sirfsoc_gpio_bank *bank;
 	u32 status, ctrl;
 	int idx = 0;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int i;
 
 	for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 7376a97..862a096 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -135,7 +135,14 @@
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
-		  SUNXI_FUNCTION(0x2, "ir0")),		/* TX */
+		  SUNXI_FUNCTION(0x2, "ir0"),		/* TX */
+		/*
+		 * The SPDIF block is not referenced at all in the A10 user
+		 * manual. However it is described in the code leaked and the
+		 * pin descriptions are declared in the A20 user manual which
+		 * is pin compatible with this device.
+		 */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF MCLK */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -176,11 +183,15 @@
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "i2s"),		/* DI */
-		  SUNXI_FUNCTION(0x3, "ac97")),		/* DI */
+		  SUNXI_FUNCTION(0x3, "ac97"),		/* DI */
+		/* Undocumented mux function - See SPDIF MCLK above */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF IN */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
-		  SUNXI_FUNCTION(0x2, "spi2")),		/* CS1 */
+		  SUNXI_FUNCTION(0x2, "spi2"),		/* CS1 */
+		/* Undocumented mux function - See SPDIF MCLK above */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF OUT */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f09573e..fb4669c0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -588,7 +588,6 @@
 static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-	struct irq_desc *desc = container_of(d, struct irq_desc, irq_data);
 	u32 reg = sunxi_irq_cfg_reg(d->hwirq);
 	u8 index = sunxi_irq_cfg_offset(d->hwirq);
 	unsigned long flags;
@@ -615,16 +614,17 @@
 		return -EINVAL;
 	}
 
-	if (type & IRQ_TYPE_LEVEL_MASK) {
-		d->chip = &sunxi_pinctrl_level_irq_chip;
-		desc->handle_irq = handle_fasteoi_irq;
-	} else {
-		d->chip = &sunxi_pinctrl_edge_irq_chip;
-		desc->handle_irq = handle_edge_irq;
-	}
-
 	spin_lock_irqsave(&pctl->lock, flags);
 
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_chip_handler_name_locked(d->irq,
+						   &sunxi_pinctrl_level_irq_chip,
+						   handle_fasteoi_irq, NULL);
+	else
+		__irq_set_chip_handler_name_locked(d->irq,
+						   &sunxi_pinctrl_edge_irq_chip,
+						   handle_edge_irq, NULL);
+
 	regval = readl(pctl->membase + reg);
 	regval &= ~(IRQ_CFG_IRQ_MASK << index);
 	writel(regval | (mode << index), pctl->membase + reg);
@@ -685,6 +685,7 @@
 }
 
 static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
+	.name		= "sunxi_pio_edge",
 	.irq_ack	= sunxi_pinctrl_irq_ack,
 	.irq_mask	= sunxi_pinctrl_irq_mask,
 	.irq_unmask	= sunxi_pinctrl_irq_unmask,
@@ -695,6 +696,7 @@
 };
 
 static struct irq_chip sunxi_pinctrl_level_irq_chip = {
+	.name		= "sunxi_pio_level",
 	.irq_eoi	= sunxi_pinctrl_irq_ack,
 	.irq_mask	= sunxi_pinctrl_irq_mask,
 	.irq_unmask	= sunxi_pinctrl_irq_unmask,
@@ -709,10 +711,42 @@
 			  IRQCHIP_EOI_IF_HANDLED,
 };
 
-static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
+				      struct device_node *node,
+				      const u32 *intspec,
+				      unsigned int intsize,
+				      unsigned long *out_hwirq,
+				      unsigned int *out_type)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
+	struct sunxi_desc_function *desc;
+	int pin, base;
+
+	if (intsize < 3)
+		return -EINVAL;
+
+	base = PINS_PER_BANK * intspec[0];
+	pin = base + intspec[1];
+
+	desc = sunxi_pinctrl_desc_find_function_by_pin(d->host_data,
+						       pin, "irq");
+	if (!desc)
+		return -EINVAL;
+
+	*out_hwirq = desc->irqbank * PINS_PER_BANK + desc->irqnum;
+	*out_type = intspec[2];
+
+	return 0;
+}
+
+static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
+	.xlate		= sunxi_pinctrl_irq_of_xlate,
+};
+
+static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct sunxi_pinctrl *pctl = irq_desc_get_handler_data(desc);
 	unsigned long bank, reg, val;
 
 	for (bank = 0; bank < pctl->desc->irq_banks; bank++)
@@ -983,8 +1017,8 @@
 
 	pctl->domain = irq_domain_add_linear(node,
 					     pctl->desc->irq_banks * IRQ_PER_BANK,
-					     &irq_domain_simple_ops,
-					     NULL);
+					     &sunxi_pinctrl_irq_domain_ops,
+					     pctl);
 	if (!pctl->domain) {
 		dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
 		ret = -ENOMEM;
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
new file mode 100644
index 0000000..eab23ef
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -0,0 +1,32 @@
+if ARCH_UNIPHIER
+
+config PINCTRL_UNIPHIER_CORE
+	bool
+	select PINMUX
+	select GENERIC_PINCONF
+
+config PINCTRL_UNIPHIER_PH1_LD4
+	tristate "UniPhier PH1-LD4 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO4
+	tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_SLD8
+	tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO5
+	tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PROXSTREAM2
+	tristate "UniPhier ProXstream2 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_LD6B
+	tristate "UniPhier PH1-LD6b SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+endif
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
new file mode 100644
index 0000000..e215b10
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_PINCTRL_UNIPHIER_CORE)		+= pinctrl-uniphier-core.o
+
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4)		+= pinctrl-ph1-ld4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4)		+= pinctrl-ph1-pro4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_SLD8)		+= pinctrl-ph1-sld8.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO5)		+= pinctrl-ph1-pro5.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PROXSTREAM2)	+= pinctrl-proxstream2.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD6B)		+= pinctrl-ph1-ld6b.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
new file mode 100644
index 0000000..7beb87e
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld4_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "EA1", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "EA2", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "EA3", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "EA4", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "EA5", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "EA6", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "EA7", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "EA8", 0,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "EA9", 0,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "EA10", 0,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "EA11", 0,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "EA12", 0,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "EA13", 0,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "EA14", 0,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "EA15", 0,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "ECLK", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(18, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(19, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(20, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(22, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     146, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(23, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     147, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(24, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     148, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     149, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     150, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     151, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_8_12_16_20,
+			     152, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_8_12_16_20,
+			     153, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_8_12_16_20,
+			     154, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_8_12_16_20,
+			     155, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "RMII_RXD0", 6,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "RMII_RXD1", 6,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "RMII_CRS_DV", 6,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "RMII_RXER", 6,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(36, "RMII_REFCLK", 6,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(37, "RMII_TXD0", 6,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(38, "RMII_TXD1", 6,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "RMII_TXEN", 6,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "MDC", 6,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "MDIO", 6,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "MDIO_INTL", 6,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "PHYRSTL", 6,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     156, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     157, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     158, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     159, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     160, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     161, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(50, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(53, "USB0VBUS", 0,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "USB0OD", 0,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "USB1VBUS", 0,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB1OD", 0,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "PCRESET", 0,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "PCREG", 0,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "PCCE2", 0,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "PCVS1", 0,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "PCCD2", 0,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "PCCD1", 0,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "PCREADY", 0,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "PCDOE", 0,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "PCCE1", 0,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "PCWE", 0,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "PCOE", 0,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "PCWAIT", 0,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "PCIOWR", 0,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "PCIORD", 0,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HS0DIN0", 0,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HS0DIN1", 0,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HS0DIN2", 0,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HS0DIN3", 0,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HS0DIN4", 0,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HS0DIN5", 0,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HS0DIN6", 0,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HS0DIN7", 0,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HS0BCLKIN", 0,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HS0VALIN", 0,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS0SYNCIN", 0,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HSDOUT0", 0,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HSDOUT1", 0,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HSDOUT2", 0,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HSDOUT3", 0,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HSDOUT4", 0,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HSDOUT5", 0,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HSDOUT6", 0,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HSDOUT7", 0,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HSBCLKOUT", 0,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HSVALOUT", 0,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "HSSYNCOUT", 0,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "AGCI", 3,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "AGCR", 4,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "AGCBS", 5,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "IECOUT", 0,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "ASMCK", 0,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "HIN", 1,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(113, "VIN", 2,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(114, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(115, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(116, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(117, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "TCON6", 0,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "TCON7", 0,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "PWMA", 0,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "XIRQ1", 0,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XIRQ2", 0,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "XIRQ3", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "XIRQ4", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "XIRQ5", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "XIRQ6", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "XIRQ7", 0,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "XIRQ8", 0,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "XIRQ9", 0,
+			     119, UNIPHIER_PIN_DRV_4_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "XIRQ10", 0,
+			     120, UNIPHIER_PIN_DRV_4_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "XIRQ11", 0,
+			     121, UNIPHIER_PIN_DRV_4_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "XIRQ14", 0,
+			     122, UNIPHIER_PIN_DRV_4_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "PORT00", 0,
+			     123, UNIPHIER_PIN_DRV_4_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "PORT01", 0,
+			     124, UNIPHIER_PIN_DRV_4_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "PORT02", 0,
+			     125, UNIPHIER_PIN_DRV_4_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "PORT03", 0,
+			     126, UNIPHIER_PIN_DRV_4_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "PORT04", 0,
+			     127, UNIPHIER_PIN_DRV_4_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "PORT05", 0,
+			     128, UNIPHIER_PIN_DRV_4_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "PORT06", 0,
+			     129, UNIPHIER_PIN_DRV_4_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "PORT07", 0,
+			     130, UNIPHIER_PIN_DRV_4_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "PORT10", 0,
+			     131, UNIPHIER_PIN_DRV_4_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "PORT11", 0,
+			     132, UNIPHIER_PIN_DRV_4_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "PORT12", 0,
+			     133, UNIPHIER_PIN_DRV_4_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PORT13", 0,
+			     134, UNIPHIER_PIN_DRV_4_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "PORT14", 0,
+			     135, UNIPHIER_PIN_DRV_4_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "PORT15", 0,
+			     136, UNIPHIER_PIN_DRV_4_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "PORT16", 0,
+			     137, UNIPHIER_PIN_DRV_4_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+			     138, UNIPHIER_PIN_DRV_4_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "PORT20", 0,
+			     139, UNIPHIER_PIN_DRV_4_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "PORT21", 0,
+			     140, UNIPHIER_PIN_DRV_4_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "PORT22", 0,
+			     141, UNIPHIER_PIN_DRV_4_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "PORT23", 0,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "PORT24", UNIPHIER_PIN_IECTRL_NONE,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "PORT25", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "PORT26", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(159, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(160, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(164, "NANDRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {24, 25, 26, 27, 28, 29, 30, 31, 158, 159,
+				     160, 161, 162, 163, 164};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {85, 88};
+static const unsigned uart0_muxvals[] = {1, 1};
+static const unsigned uart1_pins[] = {155, 156};
+static const unsigned uart1_muxvals[] = {13, 13};
+static const unsigned uart1b_pins[] = {69, 70};
+static const unsigned uart1b_muxvals[] = {23, 23};
+static const unsigned uart2_pins[] = {128, 129};
+static const unsigned uart2_muxvals[] = {13, 13};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {53, 54};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {55, 56};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {155, 156};
+static const unsigned usb2_muxvals[] = {4, 4};
+static const unsigned usb2b_pins[] = {67, 68};
+static const unsigned usb2b_muxvals[] = {23, 23};
+static const unsigned port_range0_pins[] = {
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT0x */
+	143, 144, 145, 146, 147, 148, 149, 150,		/* PORT1x */
+	151, 152, 153, 154, 155, 156, 157, 0,		/* PORT2x */
+	1, 2, 3, 4, 5, 120, 121, 122,			/* PORT3x */
+	24, 25, 26, 27, 28, 29, 30, 31,			/* PORT4x */
+	40, 41, 42, 43, 44, 45, 46, 47,			/* PORT5x */
+	48, 49, 50, 51, 52, 53, 54, 55,			/* PORT6x */
+	56, 85, 84, 59, 82, 61, 64, 65,			/* PORT7x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT8x */
+	66, 67, 68, 69, 70, 71, 72, 73,			/* PORT9x */
+	74, 75, 89, 86, 78, 79, 80, 81,			/* PORT10x */
+	60, 83, 58, 57, 88, 87, 77, 76,			/* PORT11x */
+	90, 91, 92, 93, 94, 95, 96, 97,			/* PORT12x */
+	98, 99, 100, 6, 101, 114, 115, 116,		/* PORT13x */
+	103, 108, 21, 22, 23, 117, 118, 119,		/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0,				/* PORT0x */
+	0, 0, 0, 0, 0, 0, 0, 0,				/* PORT1x */
+	0, 0, 0, 0, 0, 0, 0, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	7,						/* PORT166 */
+};
+static const unsigned port_range1_muxvals[] = {
+	15,						/* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+	151, 123, 124, 125, 126, 127, 128, 129,		/* XIRQ0-7 */
+	130, 131, 132, 133, 62,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+	14, 0, 0, 0, 0, 0, 0, 0,			/* XIRQ0-7 */
+	0, 0, 0, 0, 14,					/* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+	134, 63,					/* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+	0, 14,						/* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart1b),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb2b),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2", "usb2b"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-164 missing */
+	/* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld4_pindata = {
+	.groups = ph1_ld4_groups,
+	.groups_count = ARRAY_SIZE(ph1_ld4_groups),
+	.functions = ph1_ld4_functions,
+	.functions_count = ARRAY_SIZE(ph1_ld4_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld4_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_ld4_pins,
+	.npins = ARRAY_SIZE(ph1_ld4_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_ld4_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_ld4_pinctrl_desc,
+				      &ph1_ld4_pindata);
+}
+
+static const struct of_device_id ph1_ld4_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-ld4-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld4_pinctrl_match);
+
+static struct platform_driver ph1_ld4_pinctrl_driver = {
+	.probe = ph1_ld4_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_ld4_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_ld4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
new file mode 100644
index 0000000..9720e697
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld6b-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld6b_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(25, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(26, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(27, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(28, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "HS0BCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "HS0SYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "HS0VALOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "HS0DOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "HS0DOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "HS0DOUT2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "HS0DOUT3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HS0DOUT4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HS0DOUT5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HS0DOUT6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HS0DOUT7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HS2BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HS2SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HS2VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HS2DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HS2DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HS2DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "HS2DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "HS2DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "HS2DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "HS2DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "HS2DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(113, "SBO0", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "SBI0", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "PWSRA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "XIRQ8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "A_D_PCD00OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "A_D_PCD01OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "A_D_PCD02OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "A_D_PCD03OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "A_D_PCD04OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "A_D_PCD05OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "A_D_PCD06OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "A_D_PCD07OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "A_D_PCD00IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "A_D_PCD01IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "A_D_PCD02IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "A_D_PCD03IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "A_D_PCD04IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "A_D_PCD05IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "A_D_PCD06IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "A_D_PCD07IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "A_D_PCDNOE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "A_D_PC0READY", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "A_D_PC0CD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "A_D_PC0CD2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "A_D_PC0WAIT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "A_D_PC0RESET", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "A_D_PC0CE1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "A_D_PC0WE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "A_D_PC0OE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "A_D_PC0IOWR", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "A_D_PC0IORD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "A_D_PC0NOE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "A_D_HS0BCLKIN", 0,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "A_D_HS0SYNCIN", 0,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "A_D_HS0VALIN", 0,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "A_D_HS0DIN0", 0,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "A_D_HS0DIN1", 0,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "A_D_HS0DIN2", 0,
+			     192, UNIPHIER_PIN_DRV_4_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "A_D_HS0DIN3", 0,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "A_D_HS0DIN4", 0,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "A_D_HS0DIN5", 0,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "A_D_HS0DIN6", 0,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "A_D_HS0DIN7", 0,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "A_D_AO1ARC", 0,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "A_D_SPIXRST", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "A_D_SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "A_D_SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "A_D_SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "A_D_DMDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "A_D_DMDPSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "A_D_DMDVAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "A_D_DMDDATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "A_D_HDMIRXXIRQ", 0,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "A_D_VBIXIRQ", 0,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "A_D_HDMITXXIRQ", 0,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "A_D_DMDIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "A_D_SPICIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "A_D_SPIBIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "A_D_BESDAOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "A_D_BESDAIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "A_D_BESCLOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "A_D_VDACCLKOUT", 0,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "A_D_VDACDOUT5", 0,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "A_D_VDACDOUT6", 0,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "A_D_VDACDOUT7", 0,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "A_D_VDACDOUT8", 0,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "A_D_VDACDOUT9", 0,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "A_D_SIFBCKIN", 0,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "A_D_SIFLRCKIN", 0,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "A_D_SIFDIN", 0,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "A_D_LIBCKOUT", 0,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "A_D_LILRCKOUT", 0,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "A_D_LIDIN", 0,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "A_D_LODOUT", 0,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "A_D_HPDOUT", 0,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "A_D_MCLK", 0,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "A_D_A2PLLREFOUT", 0,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "A_D_HDMI3DSDAOUT", 0,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "A_D_HDMI3DSDAIN", 0,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "A_D_HDMI3DSCLIN", 0,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned adinter_pins[] = {
+	159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+	173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+	187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+	201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+	215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+	229, 230, 231, 232, 233, 234,
+};
+static const unsigned adinter_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0,
+};
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {115, 116};
+static const unsigned i2c2_muxvals[] = {1, 1};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {1, 1};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+				     42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {135, 136};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart0b_pins[] = {11, 12};
+static const unsigned uart0b_muxvals[] = {2, 2};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart1b_pins[] = {113, 114};
+static const unsigned uart1b_muxvals[] = {1, 1};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {2, 2};
+static const unsigned uart2b_pins[] = {86, 87};
+static const unsigned uart2b_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	127, 128, 129, 130, 131, 132, 133, 134,		/* PORT0x */
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT1x */
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT2x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT3x */
+	16, 17, 18, 19, 21, 22, 23, 24,			/* PORT4x */
+	25, 30, 31, 32, 33, 34, 35, 36,			/* PORT5x */
+	37, 38, 39, 40, 41, 42, 43, 44,			/* PORT6x */
+	45, 46, 47, 48, 49, 50, 51, 52,			/* PORT7x */
+	53, 54, 55, 56, 57, 58, 59, 60,			/* PORT8x */
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT9x */
+	69, 70, 71, 76, 77, 78, 79, 80,			/* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+	81, 82, 83, 84, 85, 86, 87, 88,			/* PORT12x */
+	89, 90, 95, 96, 97, 98, 99, 100,		/* PORT13x */
+	101, 102, 103, 104, 105, 106, 107, 108,		/* PORT14x */
+	118, 119, 120, 121, 122, 123, 124, 125,		/* PORT15x */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* PORT16x */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* PORT17x */
+	109, 110, 111, 112, 113, 114, 115, 116,		/* PORT18x */
+	117, 143, 144, 145, 146, 147, 148, 149,		/* PORT19x */
+	150, 151, 152, 153, 154, 155, 156, 157,		/* PORT20x */
+	158, 159, 160, 161, 162, 163, 164, 165,		/* PORT21x */
+	166, 178, 179, 180, 181, 182, 183, 184,		/* PORT22x */
+	185, 187, 188, 189, 190, 191, 192, 193,		/* PORT23x */
+	194, 195, 196, 197, 198, 199, 200, 201,		/* PORT24x */
+	202, 203, 204, 205, 206, 207, 208, 209,		/* PORT25x */
+	210, 211, 212, 213, 214, 215, 216, 217,		/* PORT26x */
+	218, 219, 220, 221, 223, 224, 225, 226,		/* PORT27x */
+	227, 228, 229, 230, 231, 232, 233, 234,		/* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT15x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT16x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT17x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+	118, 119, 120, 121, 122, 123, 124, 125,		/* XIRQ0-7 */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* XIRQ8-15 */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(adinter),
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart1b),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart2b),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const adinter_groups[] = {"adinter"};
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2", "uart2b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	/* port110-117 missing */
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	"port150", "port151", "port152", "port153",
+	"port154", "port155", "port156", "port157",
+	"port160", "port161", "port162", "port163",
+	"port164", "port165", "port166", "port167",
+	"port170", "port171", "port172", "port173",
+	"port174", "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(adinter), /* Achip-Dchip interconnect */
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld6b_pindata = {
+	.groups = ph1_ld6b_groups,
+	.groups_count = ARRAY_SIZE(ph1_ld6b_groups),
+	.functions = ph1_ld6b_functions,
+	.functions_count = ARRAY_SIZE(ph1_ld6b_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld6b_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_ld6b_pins,
+	.npins = ARRAY_SIZE(ph1_ld6b_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_ld6b_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_ld6b_pinctrl_desc,
+				      &ph1_ld6b_pindata);
+}
+
+static const struct of_device_id ph1_ld6b_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-ld6b-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match);
+
+static struct platform_driver ph1_ld6b_pinctrl_driver = {
+	.probe = ph1_ld6b_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_ld6b_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_ld6b_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD6b pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
new file mode 100644
index 0000000..96921e4
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
@@ -0,0 +1,1554 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro4_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "CK24O", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "VC27A", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "CK27AI", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "CK27AO", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "CKSEL", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(5, "CK27AV", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "AEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ASEL", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "ARCRESET", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "ARCUNLOCK", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "XSRST", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "XNMIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(12, "XSCIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(13, "EXTRG", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "TRCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "TRCCTL", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "TRCD0", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "TRCD1", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "TRCD2", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "TRCD3", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "TRCD4", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "TRCD5", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "TRCD6", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "TRCD7", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(31, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(32, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(36, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(37, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(38, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "BOOTSWAP", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_NONE,
+			     39, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(40, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_8_12_16_20,
+			     40, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(41, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_8_12_16_20,
+			     41, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(42, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     42, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(43, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_8_12_16_20,
+			     43, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(44, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_8_12_16_20,
+			     44, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_8_12_16_20,
+			     45, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     46, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_8_12_16_20,
+			     47, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(49, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(50, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(53, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_8_12_16_20,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(54, "NRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(55, "DMDSCLTST", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_NONE,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(56, "DMDSDATST", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(57, "AGCI0", 3,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(59, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(60, "AGCBS0", 5,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(62, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(63, "ANTSHORT", UNIPHIER_PIN_IECTRL_NONE,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "CKFEO", UNIPHIER_PIN_IECTRL_NONE,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "XFERST", UNIPHIER_PIN_IECTRL_NONE,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "P_FE_ON", UNIPHIER_PIN_IECTRL_NONE,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "P_TU0_ON", UNIPHIER_PIN_IECTRL_NONE,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "XFEIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "XFEIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "XFEIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "XFEIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "XFEIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "XFEIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "XFEIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(106, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(107, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(109, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(114, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "XINTM", UNIPHIER_PIN_IECTRL_NONE,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "SCLKM", UNIPHIER_PIN_IECTRL_NONE,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "SBMTP", UNIPHIER_PIN_IECTRL_NONE,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(118, "SBPTM", UNIPHIER_PIN_IECTRL_NONE,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(119, "XMPREQ", UNIPHIER_PIN_IECTRL_NONE,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(120, "XINTP", UNIPHIER_PIN_IECTRL_NONE,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(121, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "SDBOOT", UNIPHIER_PIN_IECTRL_NONE,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(123, "BFAIL", UNIPHIER_PIN_IECTRL_NONE,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(125, "RF_COM_RDY", UNIPHIER_PIN_IECTRL_NONE,
+			     119, UNIPHIER_PIN_DRV_4_8,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(126, "XDIAG0", UNIPHIER_PIN_IECTRL_NONE,
+			     120, UNIPHIER_PIN_DRV_4_8,
+			     120, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(127, "RXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     121, UNIPHIER_PIN_DRV_4_8,
+			     121, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(128, "TXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     122, UNIPHIER_PIN_DRV_4_8,
+			     122, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(129, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     123, UNIPHIER_PIN_DRV_4_8,
+			     123, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(130, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     124, UNIPHIER_PIN_DRV_4_8,
+			     124, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(131, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     125, UNIPHIER_PIN_DRV_4_8,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(132, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     126, UNIPHIER_PIN_DRV_4_8,
+			     126, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(133, "SS0CS", UNIPHIER_PIN_IECTRL_NONE,
+			     127, UNIPHIER_PIN_DRV_4_8,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(134, "SS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     128, UNIPHIER_PIN_DRV_4_8,
+			     128, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(135, "SS0DO", UNIPHIER_PIN_IECTRL_NONE,
+			     129, UNIPHIER_PIN_DRV_4_8,
+			     129, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(136, "SS0DI", UNIPHIER_PIN_IECTRL_NONE,
+			     130, UNIPHIER_PIN_DRV_4_8,
+			     130, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(137, "MS0CS0", UNIPHIER_PIN_IECTRL_NONE,
+			     131, UNIPHIER_PIN_DRV_4_8,
+			     131, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(138, "MS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     132, UNIPHIER_PIN_DRV_4_8,
+			     132, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(139, "MS0DI", UNIPHIER_PIN_IECTRL_NONE,
+			     133, UNIPHIER_PIN_DRV_4_8,
+			     133, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(140, "MS0DO", UNIPHIER_PIN_IECTRL_NONE,
+			     134, UNIPHIER_PIN_DRV_4_8,
+			     134, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(141, "XMDMRST", UNIPHIER_PIN_IECTRL_NONE,
+			     135, UNIPHIER_PIN_DRV_4_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(143, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(144, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(145, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(146, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(147, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(148, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(149, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(150, "SD0DAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     136, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(151, "SD0DAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_8_12_16_20,
+			     137, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(152, "SD0DAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_8_12_16_20,
+			     138, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(153, "SD0DAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_8_12_16_20,
+			     139, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(154, "SD0CMD", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_8_12_16_20,
+			     141, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(155, "SD0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_8_12_16_20,
+			     140, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(156, "SD0CD", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(157, "SD0WP", UNIPHIER_PIN_IECTRL_NONE,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(158, "SD0VTCG", UNIPHIER_PIN_IECTRL_NONE,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(159, "CK25O", UNIPHIER_PIN_IECTRL_NONE,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(160, "RGMII_TXCLK", 6,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "RGMII_TXD0", 6,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "RGMII_TXD1", 6,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "RGMII_TXD2", 6,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "RGMII_TXD3", 6,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "RGMII_TXCTL", 6,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "MII_TXER", UNIPHIER_PIN_IECTRL_NONE,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "RGMII_RXCLK", 6,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "RGMII_RXD0", 6,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "RGMII_RXD1", 6,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "RGMII_RXD2", 6,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "RGMII_RXD3", 6,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "RGMII_RXCTL", 6,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "MII_RXER", 6,
+			     159, UNIPHIER_PIN_DRV_4_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "MII_CRS", 6,
+			     160, UNIPHIER_PIN_DRV_4_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "MII_COL", 6,
+			     161, UNIPHIER_PIN_DRV_4_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "MDC", 6,
+			     162, UNIPHIER_PIN_DRV_4_8,
+			     162, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(177, "MDIO", 6,
+			     163, UNIPHIER_PIN_DRV_4_8,
+			     163, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(178, "MDIO_INTL", 6,
+			     164, UNIPHIER_PIN_DRV_4_8,
+			     164, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(179, "XETH_RST", 6,
+			     165, UNIPHIER_PIN_DRV_4_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     166, UNIPHIER_PIN_DRV_4_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     167, UNIPHIER_PIN_DRV_4_8,
+			     167, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(182, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     168, UNIPHIER_PIN_DRV_4_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     169, UNIPHIER_PIN_DRV_4_8,
+			     169, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(184, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     170, UNIPHIER_PIN_DRV_4_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     171, UNIPHIER_PIN_DRV_4_8,
+			     171, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(186, "USB2ID", UNIPHIER_PIN_IECTRL_NONE,
+			     172, UNIPHIER_PIN_DRV_4_8,
+			     172, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(187, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     173, UNIPHIER_PIN_DRV_4_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     174, UNIPHIER_PIN_DRV_4_8,
+			     174, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(189, "LINKCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     175, UNIPHIER_PIN_DRV_4_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "LINKREQ", UNIPHIER_PIN_IECTRL_NONE,
+			     176, UNIPHIER_PIN_DRV_4_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "LINKCTL0", UNIPHIER_PIN_IECTRL_NONE,
+			     177, UNIPHIER_PIN_DRV_4_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "LINKCTL1", UNIPHIER_PIN_IECTRL_NONE,
+			     178, UNIPHIER_PIN_DRV_4_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "LINKDT0", UNIPHIER_PIN_IECTRL_NONE,
+			     179, UNIPHIER_PIN_DRV_4_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "LINKDT1", UNIPHIER_PIN_IECTRL_NONE,
+			     180, UNIPHIER_PIN_DRV_4_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "LINKDT2", UNIPHIER_PIN_IECTRL_NONE,
+			     181, UNIPHIER_PIN_DRV_4_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "LINKDT3", UNIPHIER_PIN_IECTRL_NONE,
+			     182, UNIPHIER_PIN_DRV_4_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "LINKDT4", UNIPHIER_PIN_IECTRL_NONE,
+			     183, UNIPHIER_PIN_DRV_4_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "LINKDT5", UNIPHIER_PIN_IECTRL_NONE,
+			     184, UNIPHIER_PIN_DRV_4_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "LINKDT6", UNIPHIER_PIN_IECTRL_NONE,
+			     185, UNIPHIER_PIN_DRV_4_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "LINKDT7", UNIPHIER_PIN_IECTRL_NONE,
+			     186, UNIPHIER_PIN_DRV_4_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "CKDVO", UNIPHIER_PIN_IECTRL_NONE,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "PHY_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "X1394_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "VOUT_MUTE_L", UNIPHIER_PIN_IECTRL_NONE,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "CLK54O", UNIPHIER_PIN_IECTRL_NONE,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "CLK54I", UNIPHIER_PIN_IECTRL_NONE,
+			     192, UNIPHIER_PIN_DRV_NONE,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "YIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "YIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "YIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "YIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "YIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "YIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "CIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "CIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "CIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "CIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "CIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     203, UNIPHIER_PIN_DRV_4_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "CIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     204, UNIPHIER_PIN_DRV_4_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "GCP", UNIPHIER_PIN_IECTRL_NONE,
+			     205, UNIPHIER_PIN_DRV_4_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "ADFLG", UNIPHIER_PIN_IECTRL_NONE,
+			     206, UNIPHIER_PIN_DRV_4_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "CK27AIOF", UNIPHIER_PIN_IECTRL_NONE,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "DACOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "DAFLG", UNIPHIER_PIN_IECTRL_NONE,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "VBIH", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "VBIL", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "XSUB_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "XADC_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     213, UNIPHIER_PIN_DRV_4_8,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     214, UNIPHIER_PIN_DRV_4_8,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "AI1DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "CK27HD", UNIPHIER_PIN_IECTRL_NONE,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "XHD_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "INTHD", UNIPHIER_PIN_IECTRL_NONE,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(235, "VO1HDCK", UNIPHIER_PIN_IECTRL_NONE,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(236, "VO1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(237, "VO1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(238, "VO1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(239, "VO1Y0", UNIPHIER_PIN_IECTRL_NONE,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(240, "VO1Y1", UNIPHIER_PIN_IECTRL_NONE,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(241, "VO1Y2", UNIPHIER_PIN_IECTRL_NONE,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(242, "VO1Y3", UNIPHIER_PIN_IECTRL_NONE,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(243, "VO1Y4", UNIPHIER_PIN_IECTRL_NONE,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(244, "VO1Y5", UNIPHIER_PIN_IECTRL_NONE,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(245, "VO1Y6", UNIPHIER_PIN_IECTRL_NONE,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(246, "VO1Y7", UNIPHIER_PIN_IECTRL_NONE,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(247, "VO1Y8", UNIPHIER_PIN_IECTRL_NONE,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(248, "VO1Y9", UNIPHIER_PIN_IECTRL_NONE,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(249, "VO1Y10", UNIPHIER_PIN_IECTRL_NONE,
+			     235, UNIPHIER_PIN_DRV_4_8,
+			     235, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(250, "VO1Y11", UNIPHIER_PIN_IECTRL_NONE,
+			     236, UNIPHIER_PIN_DRV_4_8,
+			     236, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(251, "VO1CB0", UNIPHIER_PIN_IECTRL_NONE,
+			     237, UNIPHIER_PIN_DRV_4_8,
+			     237, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(252, "VO1CB1", UNIPHIER_PIN_IECTRL_NONE,
+			     238, UNIPHIER_PIN_DRV_4_8,
+			     238, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(253, "VO1CB2", UNIPHIER_PIN_IECTRL_NONE,
+			     239, UNIPHIER_PIN_DRV_4_8,
+			     239, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(254, "VO1CB3", UNIPHIER_PIN_IECTRL_NONE,
+			     240, UNIPHIER_PIN_DRV_4_8,
+			     240, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(255, "VO1CB4", UNIPHIER_PIN_IECTRL_NONE,
+			     241, UNIPHIER_PIN_DRV_4_8,
+			     241, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(256, "VO1CB5", UNIPHIER_PIN_IECTRL_NONE,
+			     242, UNIPHIER_PIN_DRV_4_8,
+			     242, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(257, "VO1CB6", UNIPHIER_PIN_IECTRL_NONE,
+			     243, UNIPHIER_PIN_DRV_4_8,
+			     243, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(258, "VO1CB7", UNIPHIER_PIN_IECTRL_NONE,
+			     244, UNIPHIER_PIN_DRV_4_8,
+			     244, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(259, "VO1CB8", UNIPHIER_PIN_IECTRL_NONE,
+			     245, UNIPHIER_PIN_DRV_4_8,
+			     245, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(260, "VO1CB9", UNIPHIER_PIN_IECTRL_NONE,
+			     246, UNIPHIER_PIN_DRV_4_8,
+			     246, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(261, "VO1CB10", UNIPHIER_PIN_IECTRL_NONE,
+			     247, UNIPHIER_PIN_DRV_4_8,
+			     247, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(262, "VO1CB11", UNIPHIER_PIN_IECTRL_NONE,
+			     248, UNIPHIER_PIN_DRV_4_8,
+			     248, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(263, "VO1CR0", UNIPHIER_PIN_IECTRL_NONE,
+			     249, UNIPHIER_PIN_DRV_4_8,
+			     249, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(264, "VO1CR1", UNIPHIER_PIN_IECTRL_NONE,
+			     250, UNIPHIER_PIN_DRV_4_8,
+			     250, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(265, "VO1CR2", UNIPHIER_PIN_IECTRL_NONE,
+			     251, UNIPHIER_PIN_DRV_4_8,
+			     251, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(266, "VO1CR3", UNIPHIER_PIN_IECTRL_NONE,
+			     252, UNIPHIER_PIN_DRV_4_8,
+			     252, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(267, "VO1CR4", UNIPHIER_PIN_IECTRL_NONE,
+			     253, UNIPHIER_PIN_DRV_4_8,
+			     253, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(268, "VO1CR5", UNIPHIER_PIN_IECTRL_NONE,
+			     254, UNIPHIER_PIN_DRV_4_8,
+			     254, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(269, "VO1CR6", UNIPHIER_PIN_IECTRL_NONE,
+			     255, UNIPHIER_PIN_DRV_4_8,
+			     255, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(270, "VO1CR7", UNIPHIER_PIN_IECTRL_NONE,
+			     256, UNIPHIER_PIN_DRV_4_8,
+			     256, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(271, "VO1CR8", UNIPHIER_PIN_IECTRL_NONE,
+			     257, UNIPHIER_PIN_DRV_4_8,
+			     257, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(272, "VO1CR9", UNIPHIER_PIN_IECTRL_NONE,
+			     258, UNIPHIER_PIN_DRV_4_8,
+			     258, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(273, "VO1CR10", UNIPHIER_PIN_IECTRL_NONE,
+			     259, UNIPHIER_PIN_DRV_4_8,
+			     259, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(274, "VO1CR11", UNIPHIER_PIN_IECTRL_NONE,
+			     260, UNIPHIER_PIN_DRV_4_8,
+			     260, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(275, "VO1EX0", UNIPHIER_PIN_IECTRL_NONE,
+			     261, UNIPHIER_PIN_DRV_4_8,
+			     261, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(276, "VO1EX1", UNIPHIER_PIN_IECTRL_NONE,
+			     262, UNIPHIER_PIN_DRV_4_8,
+			     262, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(277, "VO1EX2", UNIPHIER_PIN_IECTRL_NONE,
+			     263, UNIPHIER_PIN_DRV_4_8,
+			     263, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(278, "VO1EX3", UNIPHIER_PIN_IECTRL_NONE,
+			     264, UNIPHIER_PIN_DRV_4_8,
+			     264, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(279, "VEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+			     265, UNIPHIER_PIN_DRV_4_8,
+			     265, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(280, "VSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     266, UNIPHIER_PIN_DRV_4_8,
+			     266, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(281, "VSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     267, UNIPHIER_PIN_DRV_4_8,
+			     267, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(282, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     268, UNIPHIER_PIN_DRV_4_8,
+			     268, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(283, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     269, UNIPHIER_PIN_DRV_4_8,
+			     269, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(284, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     270, UNIPHIER_PIN_DRV_4_8,
+			     270, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(285, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     271, UNIPHIER_PIN_DRV_4_8,
+			     271, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(286, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     272, UNIPHIER_PIN_DRV_4_8,
+			     272, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(287, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     273, UNIPHIER_PIN_DRV_4_8,
+			     273, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(288, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     274, UNIPHIER_PIN_DRV_4_8,
+			     274, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(289, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     275, UNIPHIER_PIN_DRV_4_8,
+			     275, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(290, "XDAC_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     276, UNIPHIER_PIN_DRV_4_8,
+			     276, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(291, "EX_A_MUTE", UNIPHIER_PIN_IECTRL_NONE,
+			     277, UNIPHIER_PIN_DRV_4_8,
+			     277, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(292, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     278, UNIPHIER_PIN_DRV_4_8,
+			     278, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(293, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     279, UNIPHIER_PIN_DRV_4_8,
+			     279, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(294, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     280, UNIPHIER_PIN_DRV_4_8,
+			     280, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(295, "AO2DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     281, UNIPHIER_PIN_DRV_4_8,
+			     281, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(296, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     282, UNIPHIER_PIN_DRV_4_8,
+			     282, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(297, "HTHPD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(298, "HTSCL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(299, "HTSDA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(300, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+			     284, UNIPHIER_PIN_DRV_4_8,
+			     284, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(301, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+			     285, UNIPHIER_PIN_DRV_4_8,
+			     285, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(302, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+			     286, UNIPHIER_PIN_DRV_4_8,
+			     286, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(303, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+			     287, UNIPHIER_PIN_DRV_4_8,
+			     287, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(304, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+			     288, UNIPHIER_PIN_DRV_4_8,
+			     288, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(305, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     289, UNIPHIER_PIN_DRV_4_8,
+			     289, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(306, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     290, UNIPHIER_PIN_DRV_4_8,
+			     290, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(307, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+			     291, UNIPHIER_PIN_DRV_4_8,
+			     291, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(308, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+			     292, UNIPHIER_PIN_DRV_4_8,
+			     292, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(309, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+			     293, UNIPHIER_PIN_DRV_4_8,
+			     293, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(310, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+			     294, UNIPHIER_PIN_DRV_4_8,
+			     294, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(311, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+			     295, UNIPHIER_PIN_DRV_4_8,
+			     295, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(312, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+			     296, UNIPHIER_PIN_DRV_4_8,
+			     296, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(313, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+			     297, UNIPHIER_PIN_DRV_4_8,
+			     297, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(314, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+			     298, UNIPHIER_PIN_DRV_4_8,
+			     298, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(315, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+			     299, UNIPHIER_PIN_DRV_4_8,
+			     299, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(316, "PORT20", UNIPHIER_PIN_IECTRL_NONE,
+			     300, UNIPHIER_PIN_DRV_4_8,
+			     300, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(317, "PORT21", UNIPHIER_PIN_IECTRL_NONE,
+			     301, UNIPHIER_PIN_DRV_4_8,
+			     301, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(318, "PORT22", UNIPHIER_PIN_IECTRL_NONE,
+			     302, UNIPHIER_PIN_DRV_4_8,
+			     302, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(319, "SD1DAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     303, UNIPHIER_PIN_DRV_4_8,
+			     303, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(320, "SD1DAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     304, UNIPHIER_PIN_DRV_4_8,
+			     304, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(321, "SD1DAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     305, UNIPHIER_PIN_DRV_4_8,
+			     305, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(322, "SD1DAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     306, UNIPHIER_PIN_DRV_4_8,
+			     306, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(323, "SD1CMD", UNIPHIER_PIN_IECTRL_NONE,
+			     307, UNIPHIER_PIN_DRV_4_8,
+			     307, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(324, "SD1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     308, UNIPHIER_PIN_DRV_4_8,
+			     308, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(325, "SD1CD", UNIPHIER_PIN_IECTRL_NONE,
+			     309, UNIPHIER_PIN_DRV_4_8,
+			     309, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(326, "SD1WP", UNIPHIER_PIN_IECTRL_NONE,
+			     310, UNIPHIER_PIN_DRV_4_8,
+			     310, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(327, "SD1VTCG", UNIPHIER_PIN_IECTRL_NONE,
+			     311, UNIPHIER_PIN_DRV_4_8,
+			     311, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(328, "DMDISO", UNIPHIER_PIN_IECTRL_NONE,
+			     312, UNIPHIER_PIN_DRV_NONE,
+			     312, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {40, 41, 42, 43, 51, 52, 53};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {44, 45, 46, 47};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {142, 143};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {144, 145};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {146, 147};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {148, 149};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c6_pins[] = {308, 309};
+static const unsigned i2c6_muxvals[] = {6, 6};
+static const unsigned nand_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+				     50, 51, 52, 53, 54};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {131, 132};
+static const unsigned nand_cs1_muxvals[] = {1, 1};
+static const unsigned uart0_pins[] = {127, 128};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart1_pins[] = {129, 130};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {131, 132};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {88, 89};
+static const unsigned uart3_muxvals[] = {2, 2};
+static const unsigned usb0_pins[] = {180, 181};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {182, 183};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {184, 185};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	300, 301, 302, 303, 304, 305, 306, 307,		/* PORT0x */
+	308, 309, 310, 311, 312, 313, 314, 315,		/* PORT1x */
+	316, 317, 318, 16, 17, 18, 19, 20,		/* PORT2x */
+	21, 22, 23, 4, 93, 94, 95, 63,			/* PORT3x */
+	123, 122, 124, 125, 126, 141, 202, 203,		/* PORT4x */
+	204, 226, 227, 290, 291, 233, 280, 281,		/* PORT5x */
+	8, 7, 10, 29, 30, 48, 49, 50,			/* PORT6x */
+	40, 41, 42, 43, 44, 45, 46, 47,			/* PORT7x */
+	54, 51, 52, 53, 127, 128, 129, 130,		/* PORT8x */
+	131, 132, 57, 60, 134, 133, 135, 136,		/* PORT9x */
+	138, 137, 140, 139, 64, 65, 66, 67,		/* PORT10x */
+	107, 106, 105, 104, 113, 112, 111, 110,		/* PORT11x */
+	68, 69, 70, 71, 72, 73, 74, 75,			/* PORT12x */
+	76, 77, 78, 79, 80, 81, 82, 83,			/* PORT13x */
+	84, 85, 86, 87, 88, 89, 90, 91,			/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT0x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT1x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT2x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT3x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT4x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT5x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT6x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT7x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT8x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT9x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT10x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT11x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT12x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT13x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	13, 14, 15,					/* PORT175-177 */
+	157, 158, 156, 154, 150, 151, 152, 153,		/* PORT18x */
+	326, 327, 325, 323, 319, 320, 321, 322,		/* PORT19x */
+	160, 161, 162, 163, 164, 165, 166, 167,		/* PORT20x */
+	168, 169, 170, 171, 172, 173, 174, 175,		/* PORT21x */
+	180, 181, 182, 183, 184, 185, 187, 188,		/* PORT22x */
+	193, 194, 195, 196, 197, 198, 199, 200,		/* PORT23x */
+	191, 192, 215, 216, 217, 218, 219, 220,		/* PORT24x */
+	222, 223, 224, 225, 228, 229, 230, 231,		/* PORT25x */
+	282, 283, 284, 285, 286, 287, 288, 289,		/* PORT26x */
+	292, 293, 294, 295, 296, 236, 237, 238,		/* PORT27x */
+	275, 276, 277, 278, 239, 240, 249, 250,		/* PORT28x */
+	251, 252, 261, 262, 263, 264, 273, 274,		/* PORT29x */
+	31, 32, 33, 34, 35, 36, 37, 38,			/* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+	7, 7, 7,					/* PORT175-177 */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT18x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT19x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT20x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT21x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT22x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT23x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT24x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT25x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT26x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT27x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT28x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT29x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+	11, 9, 12, 96, 97, 98, 108, 114,		/* XIRQ0-7 */
+	234, 186, 99, 100, 101, 102, 184, 301,		/* XIRQ8-15 */
+	302, 303, 304, 305, 306,			/* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+	7, 7, 7, 7, 7, 7, 7, 7,				/* XIRQ0-7 */
+	7, 7, 7, 7, 7, 7, 2, 2,				/* XIRQ8-15 */
+	2, 2, 2, 2, 2,					/* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+	184, 310, 316,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+	2, 2, 2,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 2),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-174 missing */
+	/* none */ "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+	"port290", "port291", "port292", "port293",
+	"port294", "port295", "port296", "port297",
+	"port300", "port301", "port302", "port303",
+	"port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20",
+	"xirq14b", "xirq17b", "xirq18b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro4_pindata = {
+	.groups = ph1_pro4_groups,
+	.groups_count = ARRAY_SIZE(ph1_pro4_groups),
+	.functions = ph1_pro4_functions,
+	.functions_count = ARRAY_SIZE(ph1_pro4_functions),
+	.mux_bits = 4,
+	.reg_stride = 8,
+	.load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro4_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_pro4_pins,
+	.npins = ARRAY_SIZE(ph1_pro4_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_pro4_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_pro4_pinctrl_desc,
+				      &ph1_pro4_pindata);
+}
+
+static const struct of_device_id ph1_pro4_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-pro4-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro4_pinctrl_match);
+
+static struct platform_driver ph1_pro4_pinctrl_driver = {
+	.probe = ph1_pro4_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_pro4_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_pro4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
new file mode 100644
index 0000000..9af4559
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
@@ -0,0 +1,1351 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro5-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro5_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "AEXCKA1", 0,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "AEXCKA2", 0,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "CK27EXI", 0,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "CK54EXI", 0,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(13, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(14, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(15, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(16, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(18, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(19, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(20, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(33, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(34, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(35, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(40, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(41, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(42, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(43, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(44, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "TXD0", 0,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "RXD0", 0,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(49, "TXD1", 0,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(50, "RXD1", 0,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(53, "TXD3", 0,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(54, "RXD3", 0,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(55, "MS0CS0", 0,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "MS0DO", 0,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "MS0DI", 0,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "MS0CLK", 0,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "CSCLK", 0,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "CSBPTM", 0,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "CSBMTP", 0,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "XCINTP", 0,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "XCINTM", 0,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "XCMPREQ", 0,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "XSRST", 0,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "PWMA", 0,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "XIRQ0", 0,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "XIRQ1", 0,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "XIRQ2", 0,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "XIRQ3", 0,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "XIRQ4", 0,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "XIRQ5", 0,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "XIRQ6", 0,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "XIRQ7", 0,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "XIRQ8", 0,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "XIRQ9", 0,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "XIRQ10", 0,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "XIRQ11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "XIRQ12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "XIRQ13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "XIRQ14", 0,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "XIRQ15", 0,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "XIRQ16", 0,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "XIRQ19", 0,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "XIRQ20", 0,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "PORT00", 0,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "PORT01", 0,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "PORT02", 0,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "PORT03", 0,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "PORT04", 0,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "PORT05", 0,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "PORT06", 0,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "PORT07", 0,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "PORT10", 0,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "PORT11", 0,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "PORT12", 0,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "PORT13", 0,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "PORT14", 0,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "PORT15", 0,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "PORT16", 0,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "PORT17", 0,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "T0HPD", 0,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "T1HPD", 0,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "R0HPD", 0,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "R1HPD", 0,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "XPERST", 0,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(110, "XPEWAKE", 0,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "XPECLKRQ", 0,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(118, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     118, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(119, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(120, "SPISYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "SPISCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "SPITXD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "SPIRXD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(126, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(128, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(130, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "CH7CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "CH7PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "CH7VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "CH7DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "AI1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "AI1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "AI1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "AI3ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "AI3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "AI3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "AI3D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     196, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(197, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     197, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(198, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "AO2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "AO2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "AO2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "AO4DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "AO4BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "AO4LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "AO4DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "VI1C0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "VI1C1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "VI1C2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "VI1C3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "VI1C4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "VI1C5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "VI1C6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "VI1C7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "VI1C8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "VI1C9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "VI1Y0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "VI1Y1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "VI1Y2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "VI1Y3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "VI1Y4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "VI1Y5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "VI1Y6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "VI1Y7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "VI1Y8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "VI1Y9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(235, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     235, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(236, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     236, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(237, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     237, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(238, "VO1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     238, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(239, "VO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     239, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(240, "VO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     240, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(241, "VO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     241, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(242, "VO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     242, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(243, "VO1D4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     243, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(244, "VO1D5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     244, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(245, "VO1D6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     245, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(246, "VO1D7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     246, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(247, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     247, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(248, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     248, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(249, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     249, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(250, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(251, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(252, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(253, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(254, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(255, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {0, 0, 0, 0};
+static const unsigned i2c0_pins[] = {112, 113};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {114, 115};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {116, 117};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c5_pins[] = {87, 88};
+static const unsigned i2c5_muxvals[] = {2, 2};
+static const unsigned i2c5b_pins[] = {196, 197};
+static const unsigned i2c5b_muxvals[] = {2, 2};
+static const unsigned i2c5c_pins[] = {215, 216};
+static const unsigned i2c5c_muxvals[] = {2, 2};
+static const unsigned i2c6_pins[] = {101, 102};
+static const unsigned i2c6_muxvals[] = {2, 2};
+static const unsigned nand_pins[] = {19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
+				     31, 32, 33, 34, 35};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {26, 27};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {47, 48};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart0b_pins[] = {227, 228};
+static const unsigned uart0b_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {49, 50};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {51, 52};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {53, 54};
+static const unsigned uart3_muxvals[] = {0, 0};
+static const unsigned usb0_pins[] = {124, 125};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {126, 127};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {128, 129};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	89, 90, 91, 92, 93, 94, 95, 96,			/* PORT0x */
+	97, 98, 99, 100, 101, 102, 103, 104,		/* PORT1x */
+	251, 252, 253, 254, 255, 247, 248, 249,		/* PORT2x */
+	39, 40, 41, 42, 43, 44, 45, 46,			/* PORT3x */
+	156, 157, 158, 159, 160, 161, 162, 163,		/* PORT4x */
+	164, 165, 166, 167, 168, 169, 170, 171,		/* PORT5x */
+	190, 191, 192, 193, 194, 195, 196, 197,		/* PORT6x */
+	198, 199, 200, 201, 202, 203, 204, 205,		/* PORT7x */
+	120, 121, 122, 123, 55, 56, 57, 58,		/* PORT8x */
+	124, 125, 126, 127, 49, 50, 53, 54,		/* PORT9x */
+	148, 149, 150, 151, 152, 153, 154, 155,		/* PORT10x */
+	133, 134, 131, 130, 138, 139, 136, 135,		/* PORT11x */
+	28, 29, 30, 31, 32, 33, 34, 35,			/* PORT12x */
+	179, 180, 181, 182, 186, 187, 188, 189,		/* PORT13x */
+	4, 5, 6, 7, 8, 9, 10, 11,			/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	109, 110, 111,					/* PORT175-177 */
+	206, 207, 208, 209, 210, 211, 212, 213,		/* PORT18x */
+	12, 13, 14, 15, 16, 17, 107, 108,		/* PORT19x */
+	140, 141, 142, 143, 144, 145, 146, 147,		/* PORT20x */
+	59, 60, 61, 62, 63, 64, 65, 66,			/* PORT21x */
+	214, 215, 216, 217, 218, 219, 220, 221,		/* PORT22x */
+	222, 223, 224, 225, 226, 227, 228, 229,		/* PORT23x */
+	19, 20, 21, 22, 23, 24, 25, 26,			/* PORT24x */
+	230, 231, 232, 233, 234, 235, 236, 237,		/* PORT25x */
+	239, 240, 241, 242, 243, 244, 245, 246,		/* PORT26x */
+	172, 173, 174, 175, 176, 177, 178, 129,		/* PORT27x */
+	0, 1, 2, 67, 85, 86, 87, 88,			/* PORT28x */
+	105, 106, 18, 27, 36, 128, 132, 137,		/* PORT29x */
+	183, 184, 185, 84, 47, 48, 51, 52,		/* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15,					/* PORT175-177 */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT29x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+	68, 69, 70, 71, 72, 73, 74, 75,			/* XIRQ0-7 */
+	76, 77, 78, 79, 80, 81, 82, 83,			/* XIRQ8-15 */
+	84, 85, 86, 87, 88,				/* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14,				/* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+	91, 92, 239, 144, 240, 156, 241, 106, 128,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14, 14,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c5),
+	UNIPHIER_PINCTRL_GROUP(i2c5b),
+	UNIPHIER_PINCTRL_GROUP(i2c5c),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17c, xirq_alternatives, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18c, xirq_alternatives, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 8),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-174 missing */
+	/* none */ "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+	"port290", "port291", "port292", "port293",
+	"port294", "port295", "port296", "port297",
+	"port300", "port301", "port302", "port303",
+	"port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20",
+	"xirq3b", "xirq4b", "xirq16b", "xirq17b", "xirq17c",
+	"xirq18b", "xirq18c", "xirq19b", "xirq20b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c5),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro5_pindata = {
+	.groups = ph1_pro5_groups,
+	.groups_count = ARRAY_SIZE(ph1_pro5_groups),
+	.functions = ph1_pro5_functions,
+	.functions_count = ARRAY_SIZE(ph1_pro5_functions),
+	.mux_bits = 4,
+	.reg_stride = 8,
+	.load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro5_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_pro5_pins,
+	.npins = ARRAY_SIZE(ph1_pro5_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_pro5_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_pro5_pinctrl_desc,
+				      &ph1_pro5_pindata);
+}
+
+static const struct of_device_id ph1_pro5_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-pro5-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro5_pinctrl_match);
+
+static struct platform_driver ph1_pro5_pinctrl_driver = {
+	.probe = ph1_pro5_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_pro5_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_pro5_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro5 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
new file mode 100644
index 0000000..7e9dae5
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-sld8-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(16, "XNFWE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "NFALE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "NFCLE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "XNFWP_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "XNFCE0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "NANDRYBY0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(22, "XNFCE1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(23, "NANDRYBY1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     120, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(24, "NFD0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     121, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "NFD1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     122, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "NFD2_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     123, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "NFD3_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     124, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "NFD4_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_8_12_16_20,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "NFD5_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_8_12_16_20,
+			     126, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "NFD6_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_8_12_16_20,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_8_12_16_20,
+			     128, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {15, 16, 17, 18, 19, 20, 21, 24, 25, 26,
+				     27, 28, 29, 30, 31};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {70, 71};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {114, 115};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {112, 113};
+static const unsigned uart2_muxvals[] = {1, 1};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {41, 42};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {43, 44};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {114, 115};
+static const unsigned usb2_muxvals[] = {1, 1};
+static const unsigned port_range0_pins[] = {
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT0x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT1x */
+	32, 33, 34, 35, 36, 37, 38, 39,			/* PORT2x */
+	59, 60, 61, 62, 63, 64, 65, 66,			/* PORT3x */
+	95, 96, 97, 98, 99, 100, 101, 57,		/* PORT4x */
+	70, 71, 72, 73, 74, 75, 76, 77,			/* PORT5x */
+	81, 83, 84, 85, 86, 89, 90, 91,			/* PORT6x */
+	118, 119, 120, 121, 122, 53, 54, 55,		/* PORT7x */
+	41, 42, 43, 44, 79, 80, 18, 19,			/* PORT8x */
+	110, 111, 112, 113, 114, 115, 16, 17,		/* PORT9x */
+	40, 67, 68, 69, 78, 92, 93, 94,			/* PORT10x */
+	48, 49, 46, 45, 123, 124, 125, 126,		/* PORT11x */
+	47, 127, 20, 56, 22,				/* PORT120-124 */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15,				/* PORT120-124 */
+};
+static const unsigned port_range1_pins[] = {
+	116, 117,					/* PORT130-131 */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15,						/* PORT130-131 */
+};
+static const unsigned port_range2_pins[] = {
+	102, 103, 104, 105, 106, 107, 108, 109,		/* PORT14x */
+};
+static const unsigned port_range2_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range3_pins[] = {
+	23,						/* PORT166 */
+};
+static const unsigned port_range3_muxvals[] = {
+	15,						/* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+	128, 129, 130, 131, 132, 133, 134, 135,		/* XIRQ0-7 */
+	82, 87, 88, 50, 51,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0,				/* XIRQ0-7 */
+	14, 14, 14, 14, 14,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+	52, 58,						/* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+	14, 14,						/* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range2, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range2, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range2, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range2, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range2, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range2, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range2, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range2, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range3, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-164 missing */
+	/* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_sld8_pindata = {
+	.groups = ph1_sld8_groups,
+	.groups_count = ARRAY_SIZE(ph1_sld8_groups),
+	.functions = ph1_sld8_functions,
+	.functions_count = ARRAY_SIZE(ph1_sld8_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_sld8_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_sld8_pins,
+	.npins = ARRAY_SIZE(ph1_sld8_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_sld8_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_sld8_pinctrl_desc,
+				      &ph1_sld8_pindata);
+}
+
+static const struct of_device_id ph1_sld8_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-sld8-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_sld8_pinctrl_match);
+
+static struct platform_driver ph1_sld8_pinctrl_driver = {
+	.probe = ph1_sld8_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_sld8_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_sld8_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-sLD8 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
new file mode 100644
index 0000000..3f036e2
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "proxstream2-pinctrl"
+
+static const struct pinctrl_pin_desc proxstream2_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "SMTCLK0CG", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(25, "SMTCLK1CG", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(26, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(27, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(28, "XIRQ19", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "XIRQ20", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "XIRQ9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "XIRQ10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "XIRQ16", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "STS0CLKO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "STS0SYNCO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "STS0VALO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "STS0DATAO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "PORT163", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "PORT165", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "PORT166", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "PORT132", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "PORT133", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(113, "TXD2", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "RXD2", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "PORT190", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "VI1G2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "VI1G3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "VI1G4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "VI1G5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "VI1G6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "VI1G7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "VI1G8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "VI1G9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "VI1R2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "VI1R3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "VI1R4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "VI1R5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "VI1R6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "VI1R7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "VI1R8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "VI1R9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "PORT222", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "PORT223", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "PORT224", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "PORT225", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "PORT226", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "PORT227", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "PORT230", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "FANPWM", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "HRDDCSDA0", 0,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "HRDDCSCL0", 0,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "HRDDCSDA1", 0,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "HRDDCSCL1", 0,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "HTDDCSDA0", 0,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "HTDDCSCL0", 0,
+			     192, UNIPHIER_PIN_DRV_4_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "HTDDCSDA1", 0,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "HTDDCSCL1", 0,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "PORT241", 0,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "PORT242", 0,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "PORT243", 0,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "MVSYNC", 0,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "SPISYNC0", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "CK54EXI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "AEXCKA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "AEXCKA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "CK27EXI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "STCDIN", 0,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "PHSYNI", 0,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "PVSYNI", 0,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "MVSYN", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "STCV", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "PORT262", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "USB0VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "USB1VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "PORT265", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "CK25O", 0,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "TXD0", 0,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "RXD0", 0,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "TXD3", 0,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "RXD3", 0,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "PORT273", 0,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "STCDOUTC", 0,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "PORT274", 0,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "PORT275", 0,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "PORT276", 0,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "PORT277", 0,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "PORT280", 0,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "PORT281", 0,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "PORT282", 0,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "PORT283", 0,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "PORT284", 0,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "PORT285", 0,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "T0HPD", 0,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "T1HPD", 0,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {9, 9, 9, 9};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {8, 8};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {8, 8};
+static const unsigned i2c2_pins[] = {171, 172};
+static const unsigned i2c2_muxvals[] = {8, 8};
+static const unsigned i2c3_pins[] = {159, 160};
+static const unsigned i2c3_muxvals[] = {8, 8};
+static const unsigned i2c5_pins[] = {183, 184};
+static const unsigned i2c5_muxvals[] = {11, 11};
+static const unsigned i2c6_pins[] = {185, 186};
+static const unsigned i2c6_muxvals[] = {11, 11};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+				     42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+					8, 8};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {8, 8};
+static const unsigned uart0_pins[] = {217, 218};
+static const unsigned uart0_muxvals[] = {8, 8};
+static const unsigned uart0b_pins[] = {179, 180};
+static const unsigned uart0b_muxvals[] = {10, 10};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {8, 8};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {8, 8};
+static const unsigned uart3_pins[] = {219, 220};
+static const unsigned uart3_muxvals[] = {8, 8};
+static const unsigned uart3b_pins[] = {181, 182};
+static const unsigned uart3b_muxvals[] = {10, 10};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {8, 8};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {8, 8};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {8, 8};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {8, 8};
+static const unsigned port_range0_pins[] = {
+	127, 128, 129, 130, 131, 132, 133, 134,		/* PORT0x */
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT1x */
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT2x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT3x */
+	16, 17, 18, 19, 21, 22, 23, 24,			/* PORT4x */
+	25, 30, 31, 32, 33, 34, 35, 36,			/* PORT5x */
+	37, 38, 39, 40, 41, 42, 43, 44,			/* PORT6x */
+	45, 46, 47, 48, 49, 50, 51, 52,			/* PORT7x */
+	53, 54, 55, 56, 57, 58, 59, 60,			/* PORT8x */
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT9x */
+	69, 70, 71, 76, 77, 78, 79, 80,			/* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+	81, 82, 83, 84, 85, 86, 87, 88,			/* PORT12x */
+	89, 90, 95, 96, 97, 98, 99, 100,		/* PORT13x */
+	101, 102, 103, 104, 105, 106, 107, 108,		/* PORT14x */
+	118, 119, 120, 121, 122, 123, 124, 125,		/* PORT15x */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* PORT16x */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* PORT17x */
+	109, 110, 111, 112, 113, 114, 115, 116,		/* PORT18x */
+	117, 143, 144, 145, 146, 147, 148, 149,		/* PORT19x */
+	150, 151, 152, 153, 154, 155, 156, 157,		/* PORT20x */
+	158, 159, 160, 161, 162, 163, 164, 165,		/* PORT21x */
+	166, 178, 179, 180, 181, 182, 183, 184,		/* PORT22x */
+	185, 187, 188, 189, 190, 191, 192, 193,		/* PORT23x */
+	194, 195, 196, 197, 198, 199, 200, 201,		/* PORT24x */
+	202, 203, 204, 205, 206, 207, 208, 209,		/* PORT25x */
+	210, 211, 212, 213, 214, 215, 216, 217,		/* PORT26x */
+	218, 219, 220, 221, 223, 224, 225, 226,		/* PORT27x */
+	227, 228, 229, 230, 231, 232, 233, 234,		/* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT15x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT16x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT17x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+	118, 119, 120, 121, 122, 123, 124, 125,		/* XIRQ0-7 */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* XIRQ8-15 */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group proxstream2_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c5),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(uart3b),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3", "uart3b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	/* port110-117 missing */
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	"port150", "port151", "port152", "port153",
+	"port154", "port155", "port156", "port157",
+	"port160", "port161", "port162", "port163",
+	"port164", "port165", "port166", "port167",
+	"port170", "port171", "port172", "port173",
+	"port174", "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function proxstream2_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c5),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata proxstream2_pindata = {
+	.groups = proxstream2_groups,
+	.groups_count = ARRAY_SIZE(proxstream2_groups),
+	.functions = proxstream2_functions,
+	.functions_count = ARRAY_SIZE(proxstream2_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc proxstream2_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = proxstream2_pins,
+	.npins = ARRAY_SIZE(proxstream2_pins),
+	.owner = THIS_MODULE,
+};
+
+static int proxstream2_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &proxstream2_pinctrl_desc,
+				      &proxstream2_pindata);
+}
+
+static const struct of_device_id proxstream2_pinctrl_match[] = {
+	{ .compatible = "socionext,proxstream2-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, proxstream2_pinctrl_match);
+
+static struct platform_driver proxstream2_pinctrl_driver = {
+	.probe = proxstream2_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = proxstream2_pinctrl_match,
+	},
+};
+module_platform_driver(proxstream2_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier ProXstream2 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
new file mode 100644
index 0000000..918f3b6
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/mfd/syscon.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-uniphier.h"
+
+struct uniphier_pinctrl_priv {
+	struct pinctrl_dev *pctldev;
+	struct regmap *regmap;
+	struct uniphier_pinctrl_socdata *socdata;
+};
+
+static int uniphier_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->groups_count;
+}
+
+static const char *uniphier_pctl_get_group_name(struct pinctrl_dev *pctldev,
+						unsigned selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->groups[selector].name;
+}
+
+static int uniphier_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+					unsigned selector,
+					const unsigned **pins,
+					unsigned *num_pins)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = priv->socdata->groups[selector].pins;
+	*num_pins = priv->socdata->groups[selector].num_pins;
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+				       struct seq_file *s, unsigned offset)
+{
+	const struct pinctrl_pin_desc *pin = &pctldev->desc->pins[offset];
+	const char *pull_dir, *drv_str;
+
+	switch (uniphier_pin_get_pull_dir(pin->drv_data)) {
+	case UNIPHIER_PIN_PULL_UP:
+		pull_dir = "UP";
+		break;
+	case UNIPHIER_PIN_PULL_DOWN:
+		pull_dir = "DOWN";
+		break;
+	case UNIPHIER_PIN_PULL_NONE:
+		pull_dir = "NONE";
+		break;
+	default:
+		BUG();
+	}
+
+	switch (uniphier_pin_get_drv_str(pin->drv_data)) {
+	case UNIPHIER_PIN_DRV_4_8:
+		drv_str = "4/8(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		drv_str = "8/12/16/20(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_4:
+		drv_str = "4(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_5:
+		drv_str = "5(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_8:
+		drv_str = "8(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_NONE:
+		drv_str = "NONE";
+		break;
+	default:
+		BUG();
+	}
+
+	seq_printf(s, " PULL_DIR=%s  DRV_STR=%s", pull_dir, drv_str);
+}
+#endif
+
+static const struct pinctrl_ops uniphier_pctlops = {
+	.get_groups_count = uniphier_pctl_get_groups_count,
+	.get_group_name = uniphier_pctl_get_group_name,
+	.get_group_pins = uniphier_pctl_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+	.pin_dbg_show = uniphier_pctl_pin_dbg_show,
+#endif
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+	.dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
+				      const struct pinctrl_pin_desc *pin,
+				      enum pin_config_param param)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_pull_dir pull_dir =
+				uniphier_pin_get_pull_dir(pin->drv_data);
+	unsigned int pupdctrl, reg, shift, val;
+	unsigned int expected = 1;
+	int ret;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+			return 0;
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+		    pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+			return -EINVAL;
+		expected = 0;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_UP)
+			return -EINVAL;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_DOWN)
+			return -EINVAL;
+		break;
+	default:
+		BUG();
+	}
+
+	pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+	reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+	shift = pupdctrl % 32;
+
+	ret = regmap_read(priv->regmap, reg, &val);
+	if (ret)
+		return ret;
+
+	val = (val >> shift) & 1;
+
+	return (val == expected) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
+				       const struct pinctrl_pin_desc *pin,
+				       u16 *strength)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_drv_str drv_str =
+				uniphier_pin_get_drv_str(pin->drv_data);
+	const unsigned int strength_4_8[] = {4, 8};
+	const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20};
+	const unsigned int *supported_strength;
+	unsigned int drvctrl, reg, shift, mask, width, val;
+	int ret;
+
+	switch (drv_str) {
+	case UNIPHIER_PIN_DRV_4_8:
+		supported_strength = strength_4_8;
+		width = 1;
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		supported_strength = strength_8_12_16_20;
+		width = 2;
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_4:
+		*strength = 4;
+		return 0;
+	case UNIPHIER_PIN_DRV_FIXED_5:
+		*strength = 5;
+		return 0;
+	case UNIPHIER_PIN_DRV_FIXED_8:
+		*strength = 8;
+		return 0;
+	default:
+		/* drive strength control is not supported for this pin */
+		return -EINVAL;
+	}
+
+	drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+	drvctrl *= width;
+
+	reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+			     UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+	reg += drvctrl / 32 * 4;
+	shift = drvctrl % 32;
+	mask = (1U << width) - 1;
+
+	ret = regmap_read(priv->regmap, reg, &val);
+	if (ret)
+		return ret;
+
+	*strength = supported_strength[(val >> shift) & mask];
+
+	return 0;
+}
+
+static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev,
+					const struct pinctrl_pin_desc *pin)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+	unsigned int val;
+	int ret;
+
+	if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+		/* This pin is always input-enabled. */
+		return 0;
+
+	ret = regmap_read(priv->regmap, UNIPHIER_PINCTRL_IECTRL, &val);
+	if (ret)
+		return ret;
+
+	return val & BIT(iectrl) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
+					unsigned pin,
+					unsigned long *configs)
+{
+	const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+	enum pin_config_param param = pinconf_to_config_param(*configs);
+	bool has_arg = false;
+	u16 arg;
+	int ret;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+	case PIN_CONFIG_BIAS_PULL_UP:
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		ret = uniphier_conf_pin_bias_get(pctldev, pin_desc, param);
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		ret = uniphier_conf_pin_drive_get(pctldev, pin_desc, &arg);
+		has_arg = true;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		ret = uniphier_conf_pin_input_enable_get(pctldev, pin_desc);
+		break;
+	default:
+		/* unsupported parameter */
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret == 0 && has_arg)
+		*configs = pinconf_to_config_packed(param, arg);
+
+	return ret;
+}
+
+static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
+				      const struct pinctrl_pin_desc *pin,
+				      enum pin_config_param param,
+				      u16 arg)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_pull_dir pull_dir =
+				uniphier_pin_get_pull_dir(pin->drv_data);
+	unsigned int pupdctrl, reg, shift;
+	unsigned int val = 1;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+			return 0;
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+		    pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED) {
+			dev_err(pctldev->dev,
+				"can not disable pull register for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		val = 0;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED && arg != 0)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_UP) {
+			dev_err(pctldev->dev,
+				"pull-up is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		if (arg == 0) {
+			dev_err(pctldev->dev, "pull-up can not be total\n");
+			return -EINVAL;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED && arg != 0)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_DOWN) {
+			dev_err(pctldev->dev,
+				"pull-down is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		if (arg == 0) {
+			dev_err(pctldev->dev, "pull-down can not be total\n");
+			return -EINVAL;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE) {
+			dev_err(pctldev->dev,
+				"pull-up/down is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+
+		if (arg == 0)
+			return 0; /* configuration ingored */
+		break;
+	default:
+		BUG();
+	}
+
+	pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+	reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+	shift = pupdctrl % 32;
+
+	return regmap_update_bits(priv->regmap, reg, 1 << shift, val << shift);
+}
+
+static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
+				       const struct pinctrl_pin_desc *pin,
+				       u16 strength)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_drv_str drv_str =
+				uniphier_pin_get_drv_str(pin->drv_data);
+	const unsigned int strength_4_8[] = {4, 8, -1};
+	const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20, -1};
+	const unsigned int *supported_strength;
+	unsigned int drvctrl, reg, shift, mask, width, val;
+
+	switch (drv_str) {
+	case UNIPHIER_PIN_DRV_4_8:
+		supported_strength = strength_4_8;
+		width = 1;
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		supported_strength = strength_8_12_16_20;
+		width = 2;
+		break;
+	default:
+		dev_err(pctldev->dev,
+			"cannot change drive strength for pin %u (%s)\n",
+			pin->number, pin->name);
+		return -EINVAL;
+	}
+
+	for (val = 0; supported_strength[val] > 0; val++) {
+		if (supported_strength[val] > strength)
+			break;
+	}
+
+	if (val == 0) {
+		dev_err(pctldev->dev,
+			"unsupported drive strength %u mA for pin %u (%s)\n",
+			strength, pin->number, pin->name);
+		return -EINVAL;
+	}
+
+	val--;
+
+	drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+	drvctrl *= width;
+
+	reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+			     UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+	reg += drvctrl / 32 * 4;
+	shift = drvctrl % 32;
+	mask = (1U << width) - 1;
+
+	return regmap_update_bits(priv->regmap, reg,
+				  mask << shift, val << shift);
+}
+
+static int uniphier_conf_pin_input_enable(struct pinctrl_dev *pctldev,
+					  const struct pinctrl_pin_desc *pin,
+					  u16 enable)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+
+	if (enable == 0) {
+		/*
+		 * Multiple pins share one input enable, so per-pin disabling
+		 * is impossible.
+		 */
+		dev_err(pctldev->dev, "unable to disable input\n");
+		return -EINVAL;
+	}
+
+	if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+		/* This pin is always input-enabled. nothing to do. */
+		return 0;
+
+	return regmap_update_bits(priv->regmap, UNIPHIER_PINCTRL_IECTRL,
+				  BIT(iectrl), BIT(iectrl));
+}
+
+static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
+					unsigned pin,
+					unsigned long *configs,
+					unsigned num_configs)
+{
+	const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+	int i, ret;
+
+	for (i = 0; i < num_configs; i++) {
+		enum pin_config_param param =
+					pinconf_to_config_param(configs[i]);
+		u16 arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+		case PIN_CONFIG_BIAS_PULL_UP:
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+		case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+			ret = uniphier_conf_pin_bias_set(pctldev, pin_desc,
+							 param, arg);
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			ret = uniphier_conf_pin_drive_set(pctldev, pin_desc,
+							  arg);
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			ret = uniphier_conf_pin_input_enable(pctldev,
+							     pin_desc, arg);
+			break;
+		default:
+			dev_err(pctldev->dev,
+				"unsupported configuration parameter %u\n",
+				param);
+			return -EINVAL;
+		}
+
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+					      unsigned selector,
+					      unsigned long *configs,
+					      unsigned num_configs)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const unsigned *pins = priv->socdata->groups[selector].pins;
+	unsigned num_pins = priv->socdata->groups[selector].num_pins;
+	int i, ret;
+
+	for (i = 0; i < num_pins; i++) {
+		ret = uniphier_conf_pin_config_set(pctldev, pins[i],
+						   configs, num_configs);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops uniphier_confops = {
+	.is_generic = true,
+	.pin_config_get = uniphier_conf_pin_config_get,
+	.pin_config_set = uniphier_conf_pin_config_set,
+	.pin_config_group_set = uniphier_conf_pin_config_group_set,
+};
+
+static int uniphier_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->functions_count;
+}
+
+static const char *uniphier_pmx_get_function_name(struct pinctrl_dev *pctldev,
+						  unsigned selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->functions[selector].name;
+}
+
+static int uniphier_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+					    unsigned selector,
+					    const char * const **groups,
+					    unsigned *num_groups)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = priv->socdata->functions[selector].groups;
+	*num_groups = priv->socdata->functions[selector].num_groups;
+
+	return 0;
+}
+
+static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
+				    unsigned muxval)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned mux_bits = priv->socdata->mux_bits;
+	unsigned reg_stride = priv->socdata->reg_stride;
+	unsigned reg, reg_end, shift, mask;
+	int ret;
+
+	reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride;
+	reg_end = reg + reg_stride;
+	shift = pin * mux_bits % 32;
+	mask = (1U << mux_bits) - 1;
+
+	/*
+	 * If reg_stride is greater than 4, the MSB of each pinsel shall be
+	 * stored in the offset+4.
+	 */
+	for (; reg < reg_end; reg += 4) {
+		ret = regmap_update_bits(priv->regmap, reg,
+					 mask << shift, muxval << shift);
+		if (ret)
+			return ret;
+		muxval >>= mux_bits;
+	}
+
+	if (priv->socdata->load_pinctrl) {
+		ret = regmap_write(priv->regmap,
+				   UNIPHIER_PINCTRL_LOAD_PINMUX, 1);
+		if (ret)
+			return ret;
+	}
+
+	/* some pins need input-enabling */
+	return uniphier_conf_pin_input_enable(pctldev,
+					      &pctldev->desc->pins[pin], 1);
+}
+
+static int uniphier_pmx_set_mux(struct pinctrl_dev *pctldev,
+				unsigned func_selector,
+				unsigned group_selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const struct uniphier_pinctrl_group *grp =
+					&priv->socdata->groups[group_selector];
+	int i;
+	int ret;
+
+	for (i = 0; i < grp->num_pins; i++) {
+		ret = uniphier_pmx_set_one_mux(pctldev, grp->pins[i],
+					       grp->muxvals[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+					    struct pinctrl_gpio_range *range,
+					    unsigned offset)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const struct uniphier_pinctrl_group *groups = priv->socdata->groups;
+	int groups_count = priv->socdata->groups_count;
+	enum uniphier_pinmux_gpio_range_type range_type;
+	int i, j;
+
+	if (strstr(range->name, "irq"))
+		range_type = UNIPHIER_PINMUX_GPIO_RANGE_IRQ;
+	else
+		range_type = UNIPHIER_PINMUX_GPIO_RANGE_PORT;
+
+	for (i = 0; i < groups_count; i++) {
+		if (groups[i].range_type != range_type)
+			continue;
+
+		for (j = 0; j < groups[i].num_pins; j++)
+			if (groups[i].pins[j] == offset)
+				goto found;
+	}
+
+	dev_err(pctldev->dev, "pin %u does not support GPIO\n", offset);
+	return -EINVAL;
+
+found:
+	return uniphier_pmx_set_one_mux(pctldev, offset, groups[i].muxvals[j]);
+}
+
+static const struct pinmux_ops uniphier_pmxops = {
+	.get_functions_count = uniphier_pmx_get_functions_count,
+	.get_function_name = uniphier_pmx_get_function_name,
+	.get_function_groups = uniphier_pmx_get_function_groups,
+	.set_mux = uniphier_pmx_set_mux,
+	.gpio_request_enable = uniphier_pmx_gpio_request_enable,
+	.strict = true,
+};
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+			   struct pinctrl_desc *desc,
+			   struct uniphier_pinctrl_socdata *socdata)
+{
+	struct device *dev = &pdev->dev;
+	struct uniphier_pinctrl_priv *priv;
+
+	if (!socdata ||
+	    !socdata->groups ||
+	    !socdata->groups_count ||
+	    !socdata->functions ||
+	    !socdata->functions_count ||
+	    !socdata->mux_bits ||
+	    !socdata->reg_stride) {
+		dev_err(dev, "pinctrl socdata lacks necessary members\n");
+		return -EINVAL;
+	}
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = syscon_node_to_regmap(dev->of_node);
+	if (IS_ERR(priv->regmap)) {
+		dev_err(dev, "failed to get regmap\n");
+		return PTR_ERR(priv->regmap);
+	}
+
+	priv->socdata = socdata;
+	desc->pctlops = &uniphier_pctlops;
+	desc->pmxops = &uniphier_pmxops;
+	desc->confops = &uniphier_confops;
+
+	priv->pctldev = pinctrl_register(desc, dev, priv);
+	if (IS_ERR(priv->pctldev)) {
+		dev_err(dev, "failed to register UniPhier pinctrl driver\n");
+		return PTR_ERR(priv->pctldev);
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_probe);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev)
+{
+	struct uniphier_pinctrl_priv *priv = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(priv->pctldev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_remove);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
new file mode 100644
index 0000000..e1e98b8
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_UNIPHIER_H__
+#define __PINCTRL_UNIPHIER_H__
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#define UNIPHIER_PINCTRL_PINMUX_BASE	0x0
+#define UNIPHIER_PINCTRL_LOAD_PINMUX	0x700
+#define UNIPHIER_PINCTRL_DRVCTRL_BASE	0x800
+#define UNIPHIER_PINCTRL_DRV2CTRL_BASE	0x900
+#define UNIPHIER_PINCTRL_PUPDCTRL_BASE	0xa00
+#define UNIPHIER_PINCTRL_IECTRL		0xd00
+
+/* input enable control register bit */
+#define UNIPHIER_PIN_IECTRL_SHIFT	0
+#define UNIPHIER_PIN_IECTRL_BITS	8
+#define UNIPHIER_PIN_IECTRL_MASK	((1UL << (UNIPHIER_PIN_IECTRL_BITS)) \
+					 - 1)
+
+/* drive strength control register number */
+#define UNIPHIER_PIN_DRVCTRL_SHIFT	((UNIPHIER_PIN_IECTRL_SHIFT) + \
+					(UNIPHIER_PIN_IECTRL_BITS))
+#define UNIPHIER_PIN_DRVCTRL_BITS	9
+#define UNIPHIER_PIN_DRVCTRL_MASK	((1UL << (UNIPHIER_PIN_DRVCTRL_BITS)) \
+					 - 1)
+
+/* supported drive strength (mA) */
+#define UNIPHIER_PIN_DRV_STR_SHIFT	((UNIPHIER_PIN_DRVCTRL_SHIFT) + \
+					 (UNIPHIER_PIN_DRVCTRL_BITS))
+#define UNIPHIER_PIN_DRV_STR_BITS	3
+#define UNIPHIER_PIN_DRV_STR_MASK	((1UL << (UNIPHIER_PIN_DRV_STR_BITS)) \
+					 - 1)
+
+/* pull-up / pull-down register number */
+#define UNIPHIER_PIN_PUPDCTRL_SHIFT	((UNIPHIER_PIN_DRV_STR_SHIFT) + \
+					 (UNIPHIER_PIN_DRV_STR_BITS))
+#define UNIPHIER_PIN_PUPDCTRL_BITS	9
+#define UNIPHIER_PIN_PUPDCTRL_MASK	((1UL << (UNIPHIER_PIN_PUPDCTRL_BITS))\
+					 - 1)
+
+/* direction of pull register */
+#define UNIPHIER_PIN_PULL_DIR_SHIFT	((UNIPHIER_PIN_PUPDCTRL_SHIFT) + \
+					 (UNIPHIER_PIN_PUPDCTRL_BITS))
+#define UNIPHIER_PIN_PULL_DIR_BITS	3
+#define UNIPHIER_PIN_PULL_DIR_MASK	((1UL << (UNIPHIER_PIN_PULL_DIR_BITS))\
+					 - 1)
+
+#if UNIPHIER_PIN_PULL_DIR_SHIFT + UNIPHIER_PIN_PULL_DIR_BITS > BITS_PER_LONG
+#error "unable to pack pin attributes."
+#endif
+
+#define UNIPHIER_PIN_IECTRL_NONE	(UNIPHIER_PIN_IECTRL_MASK)
+
+/* selectable drive strength */
+enum uniphier_pin_drv_str {
+	UNIPHIER_PIN_DRV_4_8,		/* 2 level control: 4/8 mA */
+	UNIPHIER_PIN_DRV_8_12_16_20,	/* 4 level control: 8/12/16/20 mA */
+	UNIPHIER_PIN_DRV_FIXED_4,	/* fixed to 4mA */
+	UNIPHIER_PIN_DRV_FIXED_5,	/* fixed to 5mA */
+	UNIPHIER_PIN_DRV_FIXED_8,	/* fixed to 8mA */
+	UNIPHIER_PIN_DRV_NONE,		/* no support (input only pin) */
+};
+
+/* direction of pull register (no pin supports bi-directional pull biasing) */
+enum uniphier_pin_pull_dir {
+	UNIPHIER_PIN_PULL_UP,		/* pull-up or disabled */
+	UNIPHIER_PIN_PULL_DOWN,		/* pull-down or disabled */
+	UNIPHIER_PIN_PULL_UP_FIXED,	/* always pull-up */
+	UNIPHIER_PIN_PULL_DOWN_FIXED,	/* always pull-down */
+	UNIPHIER_PIN_PULL_NONE,		/* no pull register */
+};
+
+#define UNIPHIER_PIN_IECTRL(x) \
+	(((x) & (UNIPHIER_PIN_IECTRL_MASK)) << (UNIPHIER_PIN_IECTRL_SHIFT))
+#define UNIPHIER_PIN_DRVCTRL(x) \
+	(((x) & (UNIPHIER_PIN_DRVCTRL_MASK)) << (UNIPHIER_PIN_DRVCTRL_SHIFT))
+#define UNIPHIER_PIN_DRV_STR(x) \
+	(((x) & (UNIPHIER_PIN_DRV_STR_MASK)) << (UNIPHIER_PIN_DRV_STR_SHIFT))
+#define UNIPHIER_PIN_PUPDCTRL(x) \
+	(((x) & (UNIPHIER_PIN_PUPDCTRL_MASK)) << (UNIPHIER_PIN_PUPDCTRL_SHIFT))
+#define UNIPHIER_PIN_PULL_DIR(x) \
+	(((x) & (UNIPHIER_PIN_PULL_DIR_MASK)) << (UNIPHIER_PIN_PULL_DIR_SHIFT))
+
+#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_str, pupdctrl, pull_dir)\
+				(UNIPHIER_PIN_IECTRL(iectrl) |		\
+				 UNIPHIER_PIN_DRVCTRL(drvctrl) |	\
+				 UNIPHIER_PIN_DRV_STR(drv_str) |	\
+				 UNIPHIER_PIN_PUPDCTRL(pupdctrl) |	\
+				 UNIPHIER_PIN_PULL_DIR(pull_dir))
+
+static inline unsigned int uniphier_pin_get_iectrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_IECTRL_SHIFT) &
+						UNIPHIER_PIN_IECTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drvctrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_DRVCTRL_SHIFT) &
+						UNIPHIER_PIN_DRVCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drv_str(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_STR_SHIFT) &
+						UNIPHIER_PIN_DRV_STR_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pupdctrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_PUPDCTRL_SHIFT) &
+						UNIPHIER_PIN_PUPDCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pull_dir(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_PULL_DIR_SHIFT) &
+						UNIPHIER_PIN_PULL_DIR_MASK;
+}
+
+enum uniphier_pinmux_gpio_range_type {
+	UNIPHIER_PINMUX_GPIO_RANGE_PORT,
+	UNIPHIER_PINMUX_GPIO_RANGE_IRQ,
+	UNIPHIER_PINMUX_GPIO_RANGE_NONE,
+};
+
+struct uniphier_pinctrl_group {
+	const char *name;
+	const unsigned *pins;
+	unsigned num_pins;
+	const unsigned *muxvals;
+	enum uniphier_pinmux_gpio_range_type range_type;
+};
+
+struct uniphier_pinmux_function {
+	const char *name;
+	const char * const *groups;
+	unsigned num_groups;
+};
+
+struct uniphier_pinctrl_socdata {
+	const struct uniphier_pinctrl_group *groups;
+	int groups_count;
+	const struct uniphier_pinmux_function *functions;
+	int functions_count;
+	unsigned mux_bits;
+	unsigned reg_stride;
+	bool load_pinctrl;
+};
+
+#define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g)			\
+{									\
+	.number = a,							\
+	.name = b,							\
+	.drv_data = (void *)UNIPHIER_PIN_ATTR_PACKED(c, d, e, f, g),	\
+}
+
+#define __UNIPHIER_PINCTRL_GROUP(grp, type)				\
+	{								\
+		.name = #grp,						\
+		.pins = grp##_pins,					\
+		.num_pins = ARRAY_SIZE(grp##_pins),			\
+		.muxvals = grp##_muxvals +				\
+			BUILD_BUG_ON_ZERO(ARRAY_SIZE(grp##_pins) !=	\
+					  ARRAY_SIZE(grp##_muxvals)),	\
+		.range_type = type,					\
+	}
+
+#define UNIPHIER_PINCTRL_GROUP(grp)					\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_NONE)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(grp)			\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_PORT)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(grp)			\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_IRQ)
+
+#define UNIPHIER_PINCTRL_GROUP_SINGLE(grp, array, ofst)			\
+	{								\
+		.name = #grp,						\
+		.pins = array##_pins + ofst,				\
+		.num_pins = 1,						\
+		.muxvals = array##_muxvals + ofst,			\
+	}
+
+#define UNIPHIER_PINMUX_FUNCTION(func)					\
+	{								\
+		.name = #func,						\
+		.groups = func##_groups,				\
+		.num_groups = ARRAY_SIZE(func##_groups),		\
+	}
+
+struct platform_device;
+struct pinctrl_desc;
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+			   struct pinctrl_desc *desc,
+			   struct uniphier_pinctrl_socdata *socdata);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev);
+
+#endif /* __PINCTRL_UNIPHIER_H__ */
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index a04019a..0207274 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -23,7 +23,7 @@
 
 #include <linux/dmi.h>
 #include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -111,6 +111,7 @@
 	.irqflags		= IRQF_TRIGGER_FALLING,
 	.t19_num_keys		= ARRAY_SIZE(mxt_t19_keys),
 	.t19_keymap		= mxt_t19_keys,
+	.suspend_mode		= MXT_SUSPEND_T9_CTRL,
 };
 
 static struct i2c_board_info atmel_224s_tp_device = {
@@ -121,6 +122,7 @@
 
 static struct mxt_platform_data atmel_1664s_platform_data = {
 	.irqflags		= IRQF_TRIGGER_FALLING,
+	.suspend_mode		= MXT_SUSPEND_T9_CTRL,
 };
 
 static struct i2c_board_info atmel_1664s_device = {
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6dc13e4..c69bb70 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -919,4 +919,9 @@
 	The PMC is an ARC processor which defines IPC commands for communication
 	with other entities in the CPU.
 
+config SURFACE_PRO3_BUTTON
+	tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3 tablet"
+	depends on ACPI && INPUT
+	---help---
+	  This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3 tablet.
 endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index dda95a9..ada5128 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -60,3 +60,4 @@
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
 obj-$(CONFIG_ALIENWARE_WMI)	+= alienware-wmi.o
 obj-$(CONFIG_INTEL_PMC_IPC)	+= intel_pmc_ipc.o
+obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index f6b280d..d773b9d 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -807,6 +807,7 @@
 	{ "IBM0068", 0},
 	{ "LEN0068", 0},
 	{ "SNY5001", 0},	/* sony-laptop in charge */
+	{ "HPQ6601", 0},
 	{ "", 0},
 };
 
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 58d29c4..f2b5d0a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -332,6 +332,7 @@
 	{KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
 	{KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
 	{KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
+	{KE_KEY, 0x6A, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad Fn + F9 */
 	{KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad */
 	{KE_KEY, 0x6C, { KEY_SLEEP } }, /* Suspend */
 	{KE_KEY, 0x6D, { KEY_SLEEP } }, /* Hibernate */
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 4e4cc8b..988eedb 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -114,14 +114,9 @@
 
 	pr_info("Initializing HPQ6001 module\n");
 	err = acpi_bus_register_driver(&hpwl_driver);
-	if (err) {
+	if (err)
 		pr_err("Unable to register HP wireless control driver.\n");
-		goto error_acpi_register;
-	}
 
-	return 0;
-
-error_acpi_register:
 	return err;
 }
 
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 76b5738..fce49f3 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -853,6 +853,20 @@
 		},
 	},
 	{
+		.ident = "Lenovo Yoga 3 14",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"),
+		},
+	},
+	{
+		.ident = "Lenovo Yoga 2 11 / 13 / Pro",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_BOARD_NAME, "Yoga2"),
+		},
+	},
+	{
 		.ident = "Lenovo Yoga 3 Pro 1370",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 105cfff..28b2a12 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,7 +33,7 @@
 #include <linux/suspend.h>
 #include <linux/acpi.h>
 #include <asm/intel_pmc_ipc.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 /*
  * IPC registers
@@ -473,9 +473,9 @@
 	},
 };
 
-static struct lpc_ich_info tco_info = {
+static struct itco_wdt_platform_data tco_info = {
 	.name = "Apollo Lake SoC",
-	.iTCO_version = 3,
+	.version = 3,
 };
 
 static int ipc_create_punit_device(void)
@@ -552,8 +552,7 @@
 		goto err;
 	}
 
-	ret = platform_device_add_data(pdev, &tco_info,
-				       sizeof(struct lpc_ich_info));
+	ret = platform_device_add_data(pdev, &tco_info, sizeof(tco_info));
 	if (ret) {
 		dev_err(ipcdev.dev, "Failed to add tco platform data\n");
 		goto err;
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
new file mode 100644
index 0000000..f7dade3
--- /dev/null
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -0,0 +1,216 @@
+/*
+ * power/home/volume button support for
+ * Microsoft Surface Pro 3 tablet.
+ *
+ * Copyright (c) 2015 Intel Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+#include <acpi/button.h>
+
+#define SURFACE_BUTTON_HID		"MSHW0028"
+#define SURFACE_BUTTON_OBJ_NAME		"VGBI"
+#define SURFACE_BUTTON_DEVICE_NAME	"Surface Pro 3 Buttons"
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_POWER	0xc6
+#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER	0xc7
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_HOME	0xc4
+#define SURFACE_BUTTON_NOTIFY_RELEASE_HOME	0xc5
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP	0xc0
+#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP	0xc1
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN	0xc2
+#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN	0xc3
+
+ACPI_MODULE_NAME("surface pro 3 button");
+
+MODULE_AUTHOR("Chen Yu");
+MODULE_DESCRIPTION("Surface Pro3 Button Driver");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * Power button, Home button, Volume buttons support is supposed to
+ * be covered by drivers/input/misc/soc_button_array.c, which is implemented
+ * according to "Windows ACPI Design Guide for SoC Platforms".
+ * However surface pro3 seems not to obey the specs, instead it uses
+ * device VGBI(MSHW0028) for dispatching the events.
+ * We choose acpi_driver rather than platform_driver/i2c_driver because
+ * although VGBI has an i2c resource connected to i2c controller, it
+ * is not embedded in any i2c controller's scope, thus neither platform_device
+ * will be created, nor i2c_client will be enumerated, we have to use
+ * acpi_driver.
+ */
+static const struct acpi_device_id surface_button_device_ids[] = {
+	{SURFACE_BUTTON_HID,    0},
+	{"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, surface_button_device_ids);
+
+struct surface_button {
+	unsigned int type;
+	struct input_dev *input;
+	char phys[32];			/* for input device */
+	unsigned long pushed;
+	bool suspended;
+};
+
+static void surface_button_notify(struct acpi_device *device, u32 event)
+{
+	struct surface_button *button = acpi_driver_data(device);
+	struct input_dev *input;
+	int key_code = KEY_RESERVED;
+	bool pressed = false;
+
+	switch (event) {
+	/* Power button press,release handle */
+	case SURFACE_BUTTON_NOTIFY_PRESS_POWER:
+		pressed = true;
+		/*fall through*/
+	case SURFACE_BUTTON_NOTIFY_RELEASE_POWER:
+		key_code = KEY_POWER;
+		break;
+	/* Home button press,release handle */
+	case SURFACE_BUTTON_NOTIFY_PRESS_HOME:
+		pressed = true;
+		/*fall through*/
+	case SURFACE_BUTTON_NOTIFY_RELEASE_HOME:
+		key_code = KEY_LEFTMETA;
+		break;
+	/* Volume up button press,release handle */
+	case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP:
+		pressed = true;
+		/*fall through*/
+	case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP:
+		key_code = KEY_VOLUMEUP;
+		break;
+	/* Volume down button press,release handle */
+	case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN:
+		pressed = true;
+		/*fall through*/
+	case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
+		key_code = KEY_VOLUMEDOWN;
+		break;
+	default:
+		dev_info_ratelimited(&device->dev,
+				  "Unsupported event [0x%x]\n", event);
+		break;
+	}
+	input = button->input;
+	if (KEY_RESERVED == key_code)
+		return;
+	if (pressed)
+		pm_wakeup_event(&device->dev, 0);
+	if (button->suspended)
+		return;
+	input_report_key(input, key_code, pressed?1:0);
+	input_sync(input);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int surface_button_suspend(struct device *dev)
+{
+	struct acpi_device *device = to_acpi_device(dev);
+	struct surface_button *button = acpi_driver_data(device);
+
+	button->suspended = true;
+	return 0;
+}
+
+static int surface_button_resume(struct device *dev)
+{
+	struct acpi_device *device = to_acpi_device(dev);
+	struct surface_button *button = acpi_driver_data(device);
+
+	button->suspended = false;
+	return 0;
+}
+#endif
+
+static int surface_button_add(struct acpi_device *device)
+{
+	struct surface_button *button;
+	struct input_dev *input;
+	const char *hid = acpi_device_hid(device);
+	char *name;
+	int error;
+
+	if (strncmp(acpi_device_bid(device), SURFACE_BUTTON_OBJ_NAME,
+	    strlen(SURFACE_BUTTON_OBJ_NAME)))
+		return -ENODEV;
+
+	button = kzalloc(sizeof(struct surface_button), GFP_KERNEL);
+	if (!button)
+		return -ENOMEM;
+
+	device->driver_data = button;
+	button->input = input = input_allocate_device();
+	if (!input) {
+		error = -ENOMEM;
+		goto err_free_button;
+	}
+
+	name = acpi_device_name(device);
+	strcpy(name, SURFACE_BUTTON_DEVICE_NAME);
+	snprintf(button->phys, sizeof(button->phys), "%s/buttons", hid);
+
+	input->name = name;
+	input->phys = button->phys;
+	input->id.bustype = BUS_HOST;
+	input->dev.parent = &device->dev;
+	input_set_capability(input, EV_KEY, KEY_POWER);
+	input_set_capability(input, EV_KEY, KEY_LEFTMETA);
+	input_set_capability(input, EV_KEY, KEY_VOLUMEUP);
+	input_set_capability(input, EV_KEY, KEY_VOLUMEDOWN);
+
+	error = input_register_device(input);
+	if (error)
+		goto err_free_input;
+	dev_info(&device->dev,
+			"%s [%s]\n", name, acpi_device_bid(device));
+	return 0;
+
+ err_free_input:
+	input_free_device(input);
+ err_free_button:
+	kfree(button);
+	return error;
+}
+
+static int surface_button_remove(struct acpi_device *device)
+{
+	struct surface_button *button = acpi_driver_data(device);
+
+	input_unregister_device(button->input);
+	kfree(button);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(surface_button_pm,
+		surface_button_suspend, surface_button_resume);
+
+static struct acpi_driver surface_button_driver = {
+	.name = "surface_pro3_button",
+	.class = "SurfacePro3",
+	.ids = surface_button_device_ids,
+	.ops = {
+		.add = surface_button_add,
+		.remove = surface_button_remove,
+		.notify = surface_button_notify,
+	},
+	.drv.pm = &surface_button_pm,
+};
+
+module_acpi_driver(surface_button_driver);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 33e488c..131dd74 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -402,7 +402,7 @@
 #else
 static inline const char *str_supported(int is_supported) { return ""; }
 #define vdbg_printk(a_dbg_level, format, arg...)	\
-	no_printk(format, ##arg)
+	do { if (0) no_printk(format, ##arg); } while (0)
 #endif
 
 static void tpacpi_log_usertask(const char * const what)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 3ad7b1f..6740c51 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -31,7 +31,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define TOSHIBA_ACPI_VERSION	"0.22"
+#define TOSHIBA_ACPI_VERSION	"0.23"
 #define PROC_INTERFACE_VERSION	1
 
 #include <linux/kernel.h>
@@ -50,6 +50,8 @@
 #include <linux/acpi.h>
 #include <linux/dmi.h>
 #include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/toshiba.h>
 #include <acpi/video.h>
 
 MODULE_AUTHOR("John Belmonte");
@@ -91,6 +93,7 @@
 
 /* Return codes */
 #define TOS_SUCCESS			0x0000
+#define TOS_SUCCESS2			0x0001
 #define TOS_OPEN_CLOSE_OK		0x0044
 #define TOS_FAILURE			0x1000
 #define TOS_NOT_SUPPORTED		0x8000
@@ -111,7 +114,6 @@
 #define HCI_VIDEO_OUT			0x001c
 #define HCI_HOTKEY_EVENT		0x001e
 #define HCI_LCD_BRIGHTNESS		0x002a
-#define HCI_WIRELESS			0x0056
 #define HCI_ACCELEROMETER		0x006d
 #define HCI_KBD_ILLUMINATION		0x0095
 #define HCI_ECO_MODE			0x0097
@@ -140,10 +142,6 @@
 #define HCI_VIDEO_OUT_LCD		0x1
 #define HCI_VIDEO_OUT_CRT		0x2
 #define HCI_VIDEO_OUT_TV		0x4
-#define HCI_WIRELESS_KILL_SWITCH	0x01
-#define HCI_WIRELESS_BT_PRESENT		0x0f
-#define HCI_WIRELESS_BT_ATTACH		0x40
-#define HCI_WIRELESS_BT_POWER		0x80
 #define SCI_KBD_MODE_MASK		0x1f
 #define SCI_KBD_MODE_FNZ		0x1
 #define SCI_KBD_MODE_AUTO		0x2
@@ -170,6 +168,7 @@
 	struct led_classdev led_dev;
 	struct led_classdev kbd_led;
 	struct led_classdev eco_led;
+	struct miscdevice miscdev;
 
 	int force_fan;
 	int last_key_event;
@@ -189,7 +188,6 @@
 	unsigned int info_supported:1;
 	unsigned int tr_backlight_supported:1;
 	unsigned int kbd_illum_supported:1;
-	unsigned int kbd_led_registered:1;
 	unsigned int touchpad_supported:1;
 	unsigned int eco_supported:1;
 	unsigned int accelerometer_supported:1;
@@ -200,6 +198,10 @@
 	unsigned int panel_power_on_supported:1;
 	unsigned int usb_three_supported:1;
 	unsigned int sysfs_created:1;
+
+	bool kbd_led_registered;
+	bool illumination_led_registered;
+	bool eco_led_registered;
 };
 
 static struct toshiba_acpi_dev *toshiba_acpi;
@@ -248,16 +250,16 @@
 };
 
 static const struct key_entry toshiba_acpi_alt_keymap[] = {
-	{ KE_KEY, 0x157, { KEY_MUTE } },
 	{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
 	{ KE_KEY, 0x103, { KEY_ZOOMIN } },
 	{ KE_KEY, 0x12c, { KEY_KBDILLUMTOGGLE } },
 	{ KE_KEY, 0x139, { KEY_ZOOMRESET } },
-	{ KE_KEY, 0x13e, { KEY_SWITCHVIDEOMODE } },
 	{ KE_KEY, 0x13c, { KEY_BRIGHTNESSDOWN } },
 	{ KE_KEY, 0x13d, { KEY_BRIGHTNESSUP } },
-	{ KE_KEY, 0x158, { KEY_WLAN } },
+	{ KE_KEY, 0x13e, { KEY_SWITCHVIDEOMODE } },
 	{ KE_KEY, 0x13f, { KEY_TOUCHPAD_TOGGLE } },
+	{ KE_KEY, 0x157, { KEY_MUTE } },
+	{ KE_KEY, 0x158, { KEY_WLAN } },
 	{ KE_END, 0 },
 };
 
@@ -441,26 +443,24 @@
 }
 
 /* Illumination support */
-static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
+static void toshiba_illumination_available(struct toshiba_acpi_dev *dev)
 {
 	u32 in[TCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
 	u32 out[TCI_WORDS];
 	acpi_status status;
 
+	dev->illumination_supported = 0;
+	dev->illumination_led_registered = false;
+
 	if (!sci_open(dev))
-		return 0;
+		return;
 
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
-	if (ACPI_FAILURE(status)) {
+	if (ACPI_FAILURE(status))
 		pr_err("ACPI call to query Illumination support failed\n");
-		return 0;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("Illumination device not available\n");
-		return 0;
-	}
-
-	return 1;
+	else if (out[0] == TOS_SUCCESS)
+		dev->illumination_supported = 1;
 }
 
 static void toshiba_illumination_set(struct led_classdev *cdev,
@@ -468,7 +468,8 @@
 {
 	struct toshiba_acpi_dev *dev = container_of(cdev,
 			struct toshiba_acpi_dev, led_dev);
-	u32 state, result;
+	u32 result;
+	u32 state;
 
 	/* First request : initialize communication. */
 	if (!sci_open(dev))
@@ -478,13 +479,8 @@
 	state = brightness ? 1 : 0;
 	result = sci_write(dev, SCI_ILLUMINATION, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call for illumination failed\n");
-		return;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Illumination not supported\n");
-		return;
-	}
 }
 
 static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
@@ -500,11 +496,10 @@
 	/* Check the illumination */
 	result = sci_read(dev, SCI_ILLUMINATION, &state);
 	sci_close(dev);
-	if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+	if (result == TOS_FAILURE) {
 		pr_err("ACPI call for illumination failed\n");
 		return LED_OFF;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Illumination not supported\n");
+	} else if (result != TOS_SUCCESS) {
 		return LED_OFF;
 	}
 
@@ -512,41 +507,40 @@
 }
 
 /* KBD Illumination */
-static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
+static void toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
 {
 	u32 in[TCI_WORDS] = { SCI_GET, SCI_KBD_ILLUM_STATUS, 0, 0, 0, 0 };
 	u32 out[TCI_WORDS];
 	acpi_status status;
 
+	dev->kbd_illum_supported = 0;
+	dev->kbd_led_registered = false;
+
 	if (!sci_open(dev))
-		return 0;
+		return;
 
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
-	if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to query kbd illumination support failed\n");
-		return 0;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("Keyboard illumination not available\n");
-		return 0;
+	} else if (out[0] == TOS_SUCCESS) {
+		/*
+		 * Check for keyboard backlight timeout max value,
+		 * previous kbd backlight implementation set this to
+		 * 0x3c0003, and now the new implementation set this
+		 * to 0x3c001a, use this to distinguish between them.
+		 */
+		if (out[3] == SCI_KBD_TIME_MAX)
+			dev->kbd_type = 2;
+		else
+			dev->kbd_type = 1;
+		/* Get the current keyboard backlight mode */
+		dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
+		/* Get the current time (1-60 seconds) */
+		dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
+		/* Flag as supported */
+		dev->kbd_illum_supported = 1;
 	}
-
-	/*
-	 * Check for keyboard backlight timeout max value,
-	 * previous kbd backlight implementation set this to
-	 * 0x3c0003, and now the new implementation set this
-	 * to 0x3c001a, use this to distinguish between them.
-	 */
-	if (out[3] == SCI_KBD_TIME_MAX)
-		dev->kbd_type = 2;
-	else
-		dev->kbd_type = 1;
-	/* Get the current keyboard backlight mode */
-	dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
-	/* Get the current time (1-60 seconds) */
-	dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
-
-	return 1;
 }
 
 static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
@@ -558,15 +552,12 @@
 
 	result = sci_write(dev, SCI_KBD_ILLUM_STATUS, time);
 	sci_close(dev);
-	if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set KBD backlight status failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Keyboard backlight status not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
@@ -578,30 +569,27 @@
 
 	result = sci_read(dev, SCI_KBD_ILLUM_STATUS, time);
 	sci_close(dev);
-	if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to get KBD backlight status failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Keyboard backlight status not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
 {
 	struct toshiba_acpi_dev *dev = container_of(cdev,
 			struct toshiba_acpi_dev, kbd_led);
-	u32 state, result;
+	u32 result;
+	u32 state;
 
 	/* Check the keyboard backlight state */
 	result = hci_read(dev, HCI_KBD_ILLUMINATION, &state);
-	if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+	if (result == TOS_FAILURE) {
 		pr_err("ACPI call to get the keyboard backlight failed\n");
 		return LED_OFF;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Keyboard backlight not supported\n");
+	} else if (result != TOS_SUCCESS) {
 		return LED_OFF;
 	}
 
@@ -613,18 +601,14 @@
 {
 	struct toshiba_acpi_dev *dev = container_of(cdev,
 			struct toshiba_acpi_dev, kbd_led);
-	u32 state, result;
+	u32 result;
+	u32 state;
 
 	/* Set the keyboard backlight state */
 	state = brightness ? 1 : 0;
 	result = hci_write(dev, HCI_KBD_ILLUMINATION, state);
-	if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set KBD Illumination mode failed\n");
-		return;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Keyboard backlight not supported\n");
-		return;
-	}
 }
 
 /* TouchPad support */
@@ -637,14 +621,12 @@
 
 	result = sci_write(dev, SCI_TOUCHPAD, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set the touchpad failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
@@ -656,28 +638,27 @@
 
 	result = sci_read(dev, SCI_TOUCHPAD, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to query the touchpad failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 /* Eco Mode support */
-static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
+static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
 {
 	acpi_status status;
 	u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
 	u32 out[TCI_WORDS];
 
+	dev->eco_supported = 0;
+	dev->eco_led_registered = false;
+
 	status = tci_raw(dev, in, out);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get ECO led failed\n");
-	} else if (out[0] == TOS_NOT_INSTALLED) {
-		pr_info("ECO led not installed");
 	} else if (out[0] == TOS_INPUT_DATA_ERROR) {
 		/*
 		 * If we receive 0x8300 (Input Data Error), it means that the
@@ -690,13 +671,11 @@
 		 */
 		in[3] = 1;
 		status = tci_raw(dev, in, out);
-		if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE)
+		if (ACPI_FAILURE(status))
 			pr_err("ACPI call to get ECO led failed\n");
 		else if (out[0] == TOS_SUCCESS)
-			return 1;
+			dev->eco_supported = 1;
 	}
-
-	return 0;
 }
 
 static enum led_brightness
@@ -709,9 +688,11 @@
 	acpi_status status;
 
 	status = tci_raw(dev, in, out);
-	if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get ECO led failed\n");
 		return LED_OFF;
+	} else if (out[0] != TOS_SUCCESS) {
+		return LED_OFF;
 	}
 
 	return out[2] ? LED_FULL : LED_OFF;
@@ -729,41 +710,32 @@
 	/* Switch the Eco Mode led on/off */
 	in[2] = (brightness) ? 1 : 0;
 	status = tci_raw(dev, in, out);
-	if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+	if (ACPI_FAILURE(status))
 		pr_err("ACPI call to set ECO led failed\n");
-		return;
-	}
 }
 
 /* Accelerometer support */
-static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
+static void toshiba_accelerometer_available(struct toshiba_acpi_dev *dev)
 {
 	u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
 	u32 out[TCI_WORDS];
 	acpi_status status;
 
+	dev->accelerometer_supported = 0;
+
 	/*
 	 * Check if the accelerometer call exists,
 	 * this call also serves as initialization
 	 */
 	status = tci_raw(dev, in, out);
-	if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+	if (ACPI_FAILURE(status))
 		pr_err("ACPI call to query the accelerometer failed\n");
-		return -EIO;
-	} else if (out[0] == TOS_DATA_NOT_AVAILABLE ||
-		   out[0] == TOS_NOT_INITIALIZED) {
-		pr_err("Accelerometer not initialized\n");
-		return -EIO;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("Accelerometer not supported\n");
-		return -ENODEV;
-	}
-
-	return 0;
+	else if (out[0] == TOS_SUCCESS)
+		dev->accelerometer_supported = 1;
 }
 
 static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
-				      u32 *xy, u32 *z)
+				     u32 *xy, u32 *z)
 {
 	u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
 	u32 out[TCI_WORDS];
@@ -771,15 +743,18 @@
 
 	/* Check the Accelerometer status */
 	status = tci_raw(dev, in, out);
-	if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to query the accelerometer failed\n");
 		return -EIO;
+	} else if (out[0] == TOS_NOT_SUPPORTED) {
+		return -ENODEV;
+	} else if (out[0] == TOS_SUCCESS) {
+		*xy = out[2];
+		*z = out[4];
+		return 0;
 	}
 
-	*xy = out[2];
-	*z = out[4];
-
-	return 0;
+	return -EIO;
 }
 
 /* Sleep (Charge and Music) utilities support */
@@ -789,7 +764,6 @@
 	u32 out[TCI_WORDS];
 	acpi_status status;
 
-	/* Set the feature to "not supported" in case of error */
 	dev->usb_sleep_charge_supported = 0;
 
 	if (!sci_open(dev))
@@ -801,7 +775,6 @@
 		sci_close(dev);
 		return;
 	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("USB Sleep and Charge not supported\n");
 		sci_close(dev);
 		return;
 	} else if (out[0] == TOS_SUCCESS) {
@@ -810,25 +783,15 @@
 
 	in[5] = SCI_USB_CHARGE_BAT_LVL;
 	status = tci_raw(dev, in, out);
+	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
-		sci_close(dev);
-		return;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("USB Sleep and Charge not supported\n");
-		sci_close(dev);
-		return;
 	} else if (out[0] == TOS_SUCCESS) {
 		dev->usbsc_bat_level = out[2];
-		/*
-		 * If we reach this point, it means that the laptop has support
-		 * for this feature and all values are initialized.
-		 * Set it as supported.
-		 */
+		/* Flag as supported */
 		dev->usb_sleep_charge_supported = 1;
 	}
 
-	sci_close(dev);
 }
 
 static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
@@ -841,17 +804,12 @@
 
 	result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set USB S&C mode failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("USB Sleep and Charge not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
@@ -864,17 +822,12 @@
 
 	result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set USB S&C mode failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("USB Sleep and Charge not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
@@ -892,17 +845,14 @@
 	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get USB S&C battery level failed\n");
-		return -EIO;
 	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("USB Sleep and Charge not supported\n");
 		return -ENODEV;
-	} else if (out[0] == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
+	} else if (out[0] == TOS_SUCCESS) {
+		*mode = out[2];
+		return 0;
 	}
 
-	*mode = out[2];
-
-	return 0;
+	return -EIO;
 }
 
 static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
@@ -919,17 +869,12 @@
 	in[5] = SCI_USB_CHARGE_BAT_LVL;
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
-	if (ACPI_FAILURE(status)) {
+	if (ACPI_FAILURE(status))
 		pr_err("ACPI call to set USB S&C battery level failed\n");
-		return -EIO;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("USB Sleep and Charge not supported\n");
+	else if (out[0] == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (out[0] == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return out[0] == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
@@ -947,16 +892,14 @@
 	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get USB Rapid Charge failed\n");
-		return -EIO;
-	} else if (out[0] == TOS_NOT_SUPPORTED ||
-		   out[0] == TOS_INPUT_DATA_ERROR) {
-		pr_info("USB Rapid Charge not supported\n");
+	} else if (out[0] == TOS_NOT_SUPPORTED) {
 		return -ENODEV;
+	} else if (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) {
+		*state = out[2];
+		return 0;
 	}
 
-	*state = out[2];
-
-	return 0;
+	return -EIO;
 }
 
 static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
@@ -973,17 +916,12 @@
 	in[5] = SCI_USB_CHARGE_RAPID_DSP;
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
-	if (ACPI_FAILURE(status)) {
+	if (ACPI_FAILURE(status))
 		pr_err("ACPI call to set USB Rapid Charge failed\n");
-		return -EIO;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("USB Rapid Charge not supported\n");
+	else if (out[0] == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (out[0] == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) ? 0 : -EIO;
 }
 
 static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
@@ -995,17 +933,12 @@
 
 	result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to get Sleep and Music failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Sleep and Music not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return result = TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -1017,17 +950,12 @@
 
 	result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set Sleep and Music failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Sleep and Music not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 /* Keyboard function keys */
@@ -1040,15 +968,12 @@
 
 	result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode);
 	sci_close(dev);
-	if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to get KBD function keys failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("KBD function keys not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	}
 
-	return 0;
+	return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
 }
 
 static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
@@ -1060,15 +985,12 @@
 
 	result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode);
 	sci_close(dev);
-	if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set KBD function keys failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("KBD function keys not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	}
 
-	return 0;
+	return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
 }
 
 /* Panel Power ON */
@@ -1081,17 +1003,12 @@
 
 	result = sci_read(dev, SCI_PANEL_POWER_ON, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to get Panel Power ON failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Panel Power on not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -1103,17 +1020,12 @@
 
 	result = sci_write(dev, SCI_PANEL_POWER_ON, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set Panel Power ON failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("Panel Power ON not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 /* USB Three */
@@ -1126,17 +1038,12 @@
 
 	result = sci_read(dev, SCI_USB_THREE, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to get USB 3 failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("USB 3 not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
 }
 
 static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -1148,17 +1055,12 @@
 
 	result = sci_write(dev, SCI_USB_THREE, state);
 	sci_close(dev);
-	if (result == TOS_FAILURE) {
+	if (result == TOS_FAILURE)
 		pr_err("ACPI call to set USB 3 failed\n");
-		return -EIO;
-	} else if (result == TOS_NOT_SUPPORTED) {
-		pr_info("USB 3 not supported\n");
+	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	} else if (result == TOS_INPUT_DATA_ERROR) {
-		return -EIO;
-	}
 
-	return 0;
+	return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
 }
 
 /* Hotkey Event type */
@@ -1172,35 +1074,39 @@
 	status = tci_raw(dev, in, out);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get System type failed\n");
-		return -EIO;
 	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		pr_info("System type not supported\n");
 		return -ENODEV;
+	} else if (out[0] == TOS_SUCCESS) {
+		*type = out[3];
+		return 0;
 	}
 
-	*type = out[3];
-
-	return 0;
+	return -EIO;
 }
 
 /* Transflective Backlight */
-static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 *status)
 {
-	u32 hci_result;
-	u32 status;
+	u32 result = hci_read(dev, HCI_TR_BACKLIGHT, status);
 
-	hci_result = hci_read(dev, HCI_TR_BACKLIGHT, &status);
-	*enabled = !status;
-	return hci_result == TOS_SUCCESS ? 0 : -EIO;
+	if (result == TOS_FAILURE)
+		pr_err("ACPI call to get Transflective Backlight failed\n");
+	else if (result == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
-static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 status)
 {
-	u32 hci_result;
-	u32 value = !enable;
+	u32 result = hci_write(dev, HCI_TR_BACKLIGHT, !status);
 
-	hci_result = hci_write(dev, HCI_TR_BACKLIGHT, value);
-	return hci_result == TOS_SUCCESS ? 0 : -EIO;
+	if (result == TOS_FAILURE)
+		pr_err("ACPI call to set Transflective Backlight failed\n");
+	else if (result == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static struct proc_dir_entry *toshiba_proc_dir;
@@ -1208,23 +1114,26 @@
 /* LCD Brightness */
 static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
 {
-	u32 hci_result;
+	u32 result;
 	u32 value;
 	int brightness = 0;
 
 	if (dev->tr_backlight_supported) {
-		bool enabled;
-		int ret = get_tr_backlight_status(dev, &enabled);
+		int ret = get_tr_backlight_status(dev, &value);
 
 		if (ret)
 			return ret;
-		if (enabled)
+		if (value)
 			return 0;
 		brightness++;
 	}
 
-	hci_result = hci_read(dev, HCI_LCD_BRIGHTNESS, &value);
-	if (hci_result == TOS_SUCCESS)
+	result = hci_read(dev, HCI_LCD_BRIGHTNESS, &value);
+	if (result == TOS_FAILURE)
+		pr_err("ACPI call to get LCD Brightness failed\n");
+	else if (result == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+	if (result == TOS_SUCCESS)
 		return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
 
 	return -EIO;
@@ -1240,8 +1149,8 @@
 static int lcd_proc_show(struct seq_file *m, void *v)
 {
 	struct toshiba_acpi_dev *dev = m->private;
-	int value;
 	int levels;
+	int value;
 
 	if (!dev->backlight_dev)
 		return -ENODEV;
@@ -1255,6 +1164,7 @@
 	}
 
 	pr_err("Error reading LCD brightness\n");
+
 	return -EIO;
 }
 
@@ -1265,11 +1175,10 @@
 
 static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
 {
-	u32 hci_result;
+	u32 result;
 
 	if (dev->tr_backlight_supported) {
-		bool enable = !value;
-		int ret = set_tr_backlight_status(dev, enable);
+		int ret = set_tr_backlight_status(dev, !value);
 
 		if (ret)
 			return ret;
@@ -1278,8 +1187,13 @@
 	}
 
 	value = value << HCI_LCD_BRIGHTNESS_SHIFT;
-	hci_result = hci_write(dev, HCI_LCD_BRIGHTNESS, value);
-	return hci_result == TOS_SUCCESS ? 0 : -EIO;
+	result = hci_write(dev, HCI_LCD_BRIGHTNESS, value);
+	if (result == TOS_FAILURE)
+		pr_err("ACPI call to set LCD Brightness failed\n");
+	else if (result == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int set_lcd_status(struct backlight_device *bd)
@@ -1295,24 +1209,22 @@
 	struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
 	char cmd[42];
 	size_t len;
-	int value;
-	int ret;
 	int levels = dev->backlight_dev->props.max_brightness + 1;
+	int value;
 
 	len = min(count, sizeof(cmd) - 1);
 	if (copy_from_user(cmd, buf, len))
 		return -EFAULT;
 	cmd[len] = '\0';
 
-	if (sscanf(cmd, " brightness : %i", &value) == 1 &&
-	    value >= 0 && value < levels) {
-		ret = set_lcd_brightness(dev, value);
-		if (ret == 0)
-			ret = count;
-	} else {
-		ret = -EINVAL;
-	}
-	return ret;
+	if (sscanf(cmd, " brightness : %i", &value) != 1 &&
+	    value < 0 && value > levels)
+		return -EINVAL;
+
+	if (set_lcd_brightness(dev, value))
+		return -EIO;
+
+	return count;
 }
 
 static const struct file_operations lcd_proc_fops = {
@@ -1324,22 +1236,25 @@
 	.write		= lcd_proc_write,
 };
 
+/* Video-Out */
 static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
 {
-	u32 hci_result;
+	u32 result = hci_read(dev, HCI_VIDEO_OUT, status);
 
-	hci_result = hci_read(dev, HCI_VIDEO_OUT, status);
-	return hci_result == TOS_SUCCESS ? 0 : -EIO;
+	if (result == TOS_FAILURE)
+		pr_err("ACPI call to get Video-Out failed\n");
+	else if (result == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int video_proc_show(struct seq_file *m, void *v)
 {
 	struct toshiba_acpi_dev *dev = m->private;
 	u32 value;
-	int ret;
 
-	ret = get_video_status(dev, &value);
-	if (!ret) {
+	if (!get_video_status(dev, &value)) {
 		int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
 		int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
 		int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
@@ -1347,9 +1262,10 @@
 		seq_printf(m, "lcd_out:                 %d\n", is_lcd);
 		seq_printf(m, "crt_out:                 %d\n", is_crt);
 		seq_printf(m, "tv_out:                  %d\n", is_tv);
+		return 0;
 	}
 
-	return ret;
+	return -EIO;
 }
 
 static int video_proc_open(struct inode *inode, struct file *file)
@@ -1361,13 +1277,14 @@
 				size_t count, loff_t *pos)
 {
 	struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
-	char *cmd, *buffer;
-	int ret;
-	int value;
+	char *buffer;
+	char *cmd;
 	int remain = count;
 	int lcd_out = -1;
 	int crt_out = -1;
 	int tv_out = -1;
+	int value;
+	int ret;
 	u32 video_out;
 
 	cmd = kmalloc(count + 1, GFP_KERNEL);
@@ -1419,7 +1336,7 @@
 			ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
 	}
 
-	return ret ? ret : count;
+	return ret ? -EIO : count;
 }
 
 static const struct file_operations video_proc_fops = {
@@ -1431,27 +1348,43 @@
 	.write		= video_proc_write,
 };
 
+/* Fan status */
 static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status)
 {
-	u32 hci_result;
+	u32 result = hci_read(dev, HCI_FAN, status);
 
-	hci_result = hci_read(dev, HCI_FAN, status);
-	return hci_result == TOS_SUCCESS ? 0 : -EIO;
+	if (result == TOS_FAILURE)
+		pr_err("ACPI call to get Fan status failed\n");
+	else if (result == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_fan_status(struct toshiba_acpi_dev *dev, u32 status)
+{
+	u32 result = hci_write(dev, HCI_FAN, status);
+
+	if (result == TOS_FAILURE)
+		pr_err("ACPI call to set Fan status failed\n");
+	else if (result == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int fan_proc_show(struct seq_file *m, void *v)
 {
 	struct toshiba_acpi_dev *dev = m->private;
-	int ret;
 	u32 value;
 
-	ret = get_fan_status(dev, &value);
-	if (!ret) {
-		seq_printf(m, "running:                 %d\n", (value > 0));
-		seq_printf(m, "force_on:                %d\n", dev->force_fan);
-	}
+	if (get_fan_status(dev, &value))
+		return -EIO;
 
-	return ret;
+	seq_printf(m, "running:                 %d\n", (value > 0));
+	seq_printf(m, "force_on:                %d\n", dev->force_fan);
+
+	return 0;
 }
 
 static int fan_proc_open(struct inode *inode, struct file *file)
@@ -1466,23 +1399,20 @@
 	char cmd[42];
 	size_t len;
 	int value;
-	u32 hci_result;
 
 	len = min(count, sizeof(cmd) - 1);
 	if (copy_from_user(cmd, buf, len))
 		return -EFAULT;
 	cmd[len] = '\0';
 
-	if (sscanf(cmd, " force_on : %i", &value) == 1 &&
-	    value >= 0 && value <= 1) {
-		hci_result = hci_write(dev, HCI_FAN, value);
-		if (hci_result == TOS_SUCCESS)
-			dev->force_fan = value;
-		else
-			return -EIO;
-	} else {
+	if (sscanf(cmd, " force_on : %i", &value) != 1 &&
+	    value != 0 && value != 1)
 		return -EINVAL;
-	}
+
+	if (set_fan_status(dev, value))
+		return -EIO;
+
+	dev->force_fan = value;
 
 	return count;
 }
@@ -1499,32 +1429,10 @@
 static int keys_proc_show(struct seq_file *m, void *v)
 {
 	struct toshiba_acpi_dev *dev = m->private;
-	u32 hci_result;
-	u32 value;
-
-	if (!dev->key_event_valid && dev->system_event_supported) {
-		hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
-		if (hci_result == TOS_SUCCESS) {
-			dev->key_event_valid = 1;
-			dev->last_key_event = value;
-		} else if (hci_result == TOS_FIFO_EMPTY) {
-			/* Better luck next time */
-		} else if (hci_result == TOS_NOT_SUPPORTED) {
-			/*
-			 * This is a workaround for an unresolved issue on
-			 * some machines where system events sporadically
-			 * become disabled.
-			 */
-			hci_result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
-			pr_notice("Re-enabled hotkeys\n");
-		} else {
-			pr_err("Error reading hotkey status\n");
-			return -EIO;
-		}
-	}
 
 	seq_printf(m, "hotkey_ready:            %d\n", dev->key_event_valid);
 	seq_printf(m, "hotkey:                  0x%04x\n", dev->last_key_event);
+
 	return 0;
 }
 
@@ -1641,7 +1549,6 @@
 			 const char *buf, size_t count)
 {
 	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
-	u32 result;
 	int state;
 	int ret;
 
@@ -1652,11 +1559,9 @@
 	if (state != 0 && state != 1)
 		return -EINVAL;
 
-	result = hci_write(toshiba, HCI_FAN, state);
-	if (result == TOS_FAILURE)
-		return -EIO;
-	else if (result == TOS_NOT_SUPPORTED)
-		return -ENODEV;
+	ret = set_fan_status(toshiba, state);
+	if (ret)
+		return ret;
 
 	return count;
 }
@@ -1682,7 +1587,6 @@
 {
 	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
 	int mode;
-	int time;
 	int ret;
 
 
@@ -1713,7 +1617,7 @@
 	/* Only make a change if the actual mode has changed */
 	if (toshiba->kbd_mode != mode) {
 		/* Shift the time to "base time" (0x3c0000 == 60 seconds) */
-		time = toshiba->kbd_time << HCI_MISC_SHIFT;
+		int time = toshiba->kbd_time << HCI_MISC_SHIFT;
 
 		/* OR the "base time" to the actual method format */
 		if (toshiba->kbd_type == 1) {
@@ -2262,6 +2166,81 @@
 };
 
 /*
+ * Misc device
+ */
+static int toshiba_acpi_smm_bridge(SMMRegisters *regs)
+{
+	u32 in[TCI_WORDS] = { regs->eax, regs->ebx, regs->ecx,
+			      regs->edx, regs->esi, regs->edi };
+	u32 out[TCI_WORDS];
+	acpi_status status;
+
+	status = tci_raw(toshiba_acpi, in, out);
+	if (ACPI_FAILURE(status)) {
+		pr_err("ACPI call to query SMM registers failed\n");
+		return -EIO;
+	}
+
+	/* Fillout the SMM struct with the TCI call results */
+	regs->eax = out[0];
+	regs->ebx = out[1];
+	regs->ecx = out[2];
+	regs->edx = out[3];
+	regs->esi = out[4];
+	regs->edi = out[5];
+
+	return 0;
+}
+
+static long toshiba_acpi_ioctl(struct file *fp, unsigned int cmd,
+			       unsigned long arg)
+{
+	SMMRegisters __user *argp = (SMMRegisters __user *)arg;
+	SMMRegisters regs;
+	int ret;
+
+	if (!argp)
+		return -EINVAL;
+
+	switch (cmd) {
+	case TOSH_SMM:
+		if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+			return -EFAULT;
+		ret = toshiba_acpi_smm_bridge(&regs);
+		if (ret)
+			return ret;
+		if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+			return -EFAULT;
+		break;
+	case TOSHIBA_ACPI_SCI:
+		if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+			return -EFAULT;
+		/* Ensure we are being called with a SCI_{GET, SET} register */
+		if (regs.eax != SCI_GET && regs.eax != SCI_SET)
+			return -EINVAL;
+		if (!sci_open(toshiba_acpi))
+			return -EIO;
+		ret = toshiba_acpi_smm_bridge(&regs);
+		sci_close(toshiba_acpi);
+		if (ret)
+			return ret;
+		if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+			return -EFAULT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct file_operations toshiba_acpi_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl = toshiba_acpi_ioctl,
+	.llseek		= noop_llseek,
+};
+
+/*
  * Hotkeys
  */
 static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
@@ -2361,22 +2340,28 @@
 
 static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
 {
-	u32 hci_result, value;
-	int retries = 3;
-	int scancode;
-
 	if (dev->info_supported) {
-		scancode = toshiba_acpi_query_hotkey(dev);
-		if (scancode < 0)
+		int scancode = toshiba_acpi_query_hotkey(dev);
+
+		if (scancode < 0) {
 			pr_err("Failed to query hotkey event\n");
-		else if (scancode != 0)
+		} else if (scancode != 0) {
 			toshiba_acpi_report_hotkey(dev, scancode);
+			dev->key_event_valid = 1;
+			dev->last_key_event = scancode;
+		}
 	} else if (dev->system_event_supported) {
+		u32 result;
+		u32 value;
+		int retries = 3;
+
 		do {
-			hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
-			switch (hci_result) {
+			result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
+			switch (result) {
 			case TOS_SUCCESS:
 				toshiba_acpi_report_hotkey(dev, (int)value);
+				dev->key_event_valid = 1;
+				dev->last_key_event = value;
 				break;
 			case TOS_NOT_SUPPORTED:
 				/*
@@ -2384,15 +2369,15 @@
 				 * issue on some machines where system events
 				 * sporadically become disabled.
 				 */
-				hci_result =
-					hci_write(dev, HCI_SYSTEM_EVENT, 1);
-				pr_notice("Re-enabled hotkeys\n");
+				result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
+				if (result == TOS_SUCCESS)
+					pr_notice("Re-enabled hotkeys\n");
 				/* Fall through */
 			default:
 				retries--;
 				break;
 			}
-		} while (retries && hci_result != TOS_FIFO_EMPTY);
+		} while (retries && result != TOS_FIFO_EMPTY);
 	}
 }
 
@@ -2404,6 +2389,11 @@
 	u32 hci_result;
 	int error;
 
+	if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) {
+		pr_info("WMI event detected, hotkeys will not be monitored\n");
+		return 0;
+	}
+
 	error = toshiba_acpi_enable_hotkeys(dev);
 	if (error)
 		return error;
@@ -2496,7 +2486,6 @@
 	struct backlight_properties props;
 	int brightness;
 	int ret;
-	bool enabled;
 
 	/*
 	 * Some machines don't support the backlight methods at all, and
@@ -2513,10 +2502,6 @@
 		return 0;
 	}
 
-	/* Determine whether or not BIOS supports transflective backlight */
-	ret = get_tr_backlight_status(dev, &enabled);
-	dev->tr_backlight_supported = !ret;
-
 	/*
 	 * Tell acpi-video-detect code to prefer vendor backlight on all
 	 * systems with transflective backlight and on dmi matched systems.
@@ -2552,10 +2537,52 @@
 	return 0;
 }
 
+static void print_supported_features(struct toshiba_acpi_dev *dev)
+{
+	pr_info("Supported laptop features:");
+
+	if (dev->hotkey_dev)
+		pr_cont(" hotkeys");
+	if (dev->backlight_dev)
+		pr_cont(" backlight");
+	if (dev->video_supported)
+		pr_cont(" video-out");
+	if (dev->fan_supported)
+		pr_cont(" fan");
+	if (dev->tr_backlight_supported)
+		pr_cont(" transflective-backlight");
+	if (dev->illumination_supported)
+		pr_cont(" illumination");
+	if (dev->kbd_illum_supported)
+		pr_cont(" keyboard-backlight");
+	if (dev->touchpad_supported)
+		pr_cont(" touchpad");
+	if (dev->eco_supported)
+		pr_cont(" eco-led");
+	if (dev->accelerometer_supported)
+		pr_cont(" accelerometer-axes");
+	if (dev->usb_sleep_charge_supported)
+		pr_cont(" usb-sleep-charge");
+	if (dev->usb_rapid_charge_supported)
+		pr_cont(" usb-rapid-charge");
+	if (dev->usb_sleep_music_supported)
+		pr_cont(" usb-sleep-music");
+	if (dev->kbd_function_keys_supported)
+		pr_cont(" special-function-keys");
+	if (dev->panel_power_on_supported)
+		pr_cont(" panel-power-on");
+	if (dev->usb_three_supported)
+		pr_cont(" usb3");
+
+	pr_cont("\n");
+}
+
 static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
 {
 	struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
 
+	misc_deregister(&dev->miscdev);
+
 	remove_toshiba_proc_entries(dev);
 
 	if (dev->sysfs_created)
@@ -2574,13 +2601,13 @@
 
 	backlight_device_unregister(dev->backlight_dev);
 
-	if (dev->illumination_supported)
+	if (dev->illumination_led_registered)
 		led_classdev_unregister(&dev->led_dev);
 
 	if (dev->kbd_led_registered)
 		led_classdev_unregister(&dev->kbd_led);
 
-	if (dev->eco_supported)
+	if (dev->eco_led_registered)
 		led_classdev_unregister(&dev->eco_led);
 
 	if (toshiba_acpi)
@@ -2627,6 +2654,17 @@
 		return -ENOMEM;
 	dev->acpi_dev = acpi_dev;
 	dev->method_hci = hci_method;
+	dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+	dev->miscdev.name = "toshiba_acpi";
+	dev->miscdev.fops = &toshiba_acpi_fops;
+
+	ret = misc_register(&dev->miscdev);
+	if (ret) {
+		pr_err("Failed to register miscdevice\n");
+		kfree(dev);
+		return ret;
+	}
+
 	acpi_dev->driver_data = dev;
 	dev_set_drvdata(&acpi_dev->dev, dev);
 
@@ -2643,29 +2681,35 @@
 	if (toshiba_acpi_setup_keyboard(dev))
 		pr_info("Unable to activate hotkeys\n");
 
+	/* Determine whether or not BIOS supports transflective backlight */
+	ret = get_tr_backlight_status(dev, &dummy);
+	dev->tr_backlight_supported = !ret;
+
 	ret = toshiba_acpi_setup_backlight(dev);
 	if (ret)
 		goto error;
 
-	if (toshiba_illumination_available(dev)) {
+	toshiba_illumination_available(dev);
+	if (dev->illumination_supported) {
 		dev->led_dev.name = "toshiba::illumination";
 		dev->led_dev.max_brightness = 1;
 		dev->led_dev.brightness_set = toshiba_illumination_set;
 		dev->led_dev.brightness_get = toshiba_illumination_get;
 		if (!led_classdev_register(&acpi_dev->dev, &dev->led_dev))
-			dev->illumination_supported = 1;
+			dev->illumination_led_registered = true;
 	}
 
-	if (toshiba_eco_mode_available(dev)) {
+	toshiba_eco_mode_available(dev);
+	if (dev->eco_supported) {
 		dev->eco_led.name = "toshiba::eco_mode";
 		dev->eco_led.max_brightness = 1;
 		dev->eco_led.brightness_set = toshiba_eco_mode_set_status;
 		dev->eco_led.brightness_get = toshiba_eco_mode_get_status;
 		if (!led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led))
-			dev->eco_supported = 1;
+			dev->eco_led_registered = true;
 	}
 
-	dev->kbd_illum_supported = toshiba_kbd_illum_available(dev);
+	toshiba_kbd_illum_available(dev);
 	/*
 	 * Only register the LED if KBD illumination is supported
 	 * and the keyboard backlight operation mode is set to FN-Z
@@ -2676,14 +2720,13 @@
 		dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
 		dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
 		if (!led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led))
-			dev->kbd_led_registered = 1;
+			dev->kbd_led_registered = true;
 	}
 
 	ret = toshiba_touchpad_get(dev, &dummy);
 	dev->touchpad_supported = !ret;
 
-	ret = toshiba_accelerometer_supported(dev);
-	dev->accelerometer_supported = !ret;
+	toshiba_accelerometer_available(dev);
 
 	toshiba_usb_sleep_charge_available(dev);
 
@@ -2705,6 +2748,8 @@
 	ret = get_fan_status(dev, &dummy);
 	dev->fan_supported = !ret;
 
+	print_supported_features(dev);
+
 	/*
 	 * Enable the "Special Functions" mode only if they are
 	 * supported and if they are activated.
@@ -2738,6 +2783,14 @@
 
 	switch (event) {
 	case 0x80: /* Hotkeys and some system events */
+		/*
+		 * Machines with this WMI GUID aren't supported due to bugs in
+		 * their AML.
+		 *
+		 * Return silently to avoid triggering a netlink event.
+		 */
+		if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+			return;
 		toshiba_acpi_process_hotkeys(dev);
 		break;
 	case 0x81: /* Dock events */
@@ -2781,10 +2834,14 @@
 static int toshiba_acpi_suspend(struct device *device)
 {
 	struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
-	u32 result;
 
-	if (dev->hotkey_dev)
+	if (dev->hotkey_dev) {
+		u32 result;
+
 		result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE);
+		if (result != TOS_SUCCESS)
+			pr_info("Unable to disable hotkeys\n");
+	}
 
 	return 0;
 }
@@ -2792,10 +2849,10 @@
 static int toshiba_acpi_resume(struct device *device)
 {
 	struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
-	int error;
 
 	if (dev->hotkey_dev) {
-		error = toshiba_acpi_enable_hotkeys(dev);
+		int error = toshiba_acpi_enable_hotkeys(dev);
+
 		if (error)
 			pr_info("Unable to re-enable hotkeys\n");
 	}
@@ -2824,14 +2881,6 @@
 {
 	int ret;
 
-	/*
-	 * Machines with this WMI guid aren't supported due to bugs in
-	 * their AML. This check relies on wmi initializing before
-	 * toshiba_acpi to guarantee guids have been identified.
-	 */
-	if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
-		return -ENODEV;
-
 	toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
 	if (!toshiba_proc_dir) {
 		pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 9357aa7..7ad3295 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -97,8 +97,6 @@
 	/* ??? rule->flags restricted to 8 bits, all tests bogus ??? */
 	if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
 		res->flags |= IORESOURCE_READONLY;
-	if (rule->flags & IORESOURCE_MEM_CACHEABLE)
-		res->flags |= IORESOURCE_CACHEABLE;
 	if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
 		res->flags |= IORESOURCE_RANGELENGTH;
 	if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index b1541f4..948d9ab 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -111,6 +111,13 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-clps711x.
 
+config PWM_CRC
+	bool "Intel Crystalcove (CRC) PWM support"
+	depends on X86 && INTEL_SOC_PMIC
+	help
+	  Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
+	  control.
+
 config PWM_EP93XX
 	tristate "Cirrus Logic EP93xx PWM support"
 	depends on ARCH_EP93XX
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index ec50eb5..d186f35 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_PWM_BCM2835)	+= pwm-bcm2835.o
 obj-$(CONFIG_PWM_BFIN)		+= pwm-bfin.o
 obj-$(CONFIG_PWM_CLPS711X)	+= pwm-clps711x.o
+obj-$(CONFIG_PWM_CRC)		+= pwm-crc.o
 obj-$(CONFIG_PWM_EP93XX)	+= pwm-ep93xx.o
 obj-$(CONFIG_PWM_FSL_FTM)	+= pwm-fsl-ftm.o
 obj-$(CONFIG_PWM_IMG)		+= pwm-img.o
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
new file mode 100644
index 0000000..7101c70
--- /dev/null
+++ b/drivers/pwm/pwm-crc.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pwm.h>
+
+#define PWM0_CLK_DIV		0x4B
+#define  PWM_OUTPUT_ENABLE	BIT(7)
+#define  PWM_DIV_CLK_0		0x00 /* DIVIDECLK = BASECLK */
+#define  PWM_DIV_CLK_100	0x63 /* DIVIDECLK = BASECLK/100 */
+#define  PWM_DIV_CLK_128	0x7F /* DIVIDECLK = BASECLK/128 */
+
+#define PWM0_DUTY_CYCLE		0x4E
+#define BACKLIGHT_EN		0x51
+
+#define PWM_MAX_LEVEL		0xFF
+
+#define PWM_BASE_CLK		6000000  /* 6 MHz */
+#define PWM_MAX_PERIOD_NS	21333    /* 46.875KHz */
+
+/**
+ * struct crystalcove_pwm - Crystal Cove PWM controller
+ * @chip: the abstract pwm_chip structure.
+ * @regmap: the regmap from the parent device.
+ */
+struct crystalcove_pwm {
+	struct pwm_chip chip;
+	struct regmap *regmap;
+};
+
+static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
+{
+	return container_of(pc, struct crystalcove_pwm, chip);
+}
+
+static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+	regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+
+	return 0;
+}
+
+static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+	regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+}
+
+static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
+			  int duty_ns, int period_ns)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+	struct device *dev = crc_pwm->chip.dev;
+	int level;
+
+	if (period_ns > PWM_MAX_PERIOD_NS) {
+		dev_err(dev, "un-supported period_ns\n");
+		return -EINVAL;
+	}
+
+	if (pwm->period != period_ns) {
+		int clk_div;
+
+		/* changing the clk divisor, need to disable fisrt */
+		crc_pwm_disable(c, pwm);
+		clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC;
+
+		regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
+					clk_div | PWM_OUTPUT_ENABLE);
+
+		/* enable back */
+		crc_pwm_enable(c, pwm);
+	}
+
+	/* change the pwm duty cycle */
+	level = duty_ns * PWM_MAX_LEVEL / period_ns;
+	regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+
+	return 0;
+}
+
+static const struct pwm_ops crc_pwm_ops = {
+	.config = crc_pwm_config,
+	.enable = crc_pwm_enable,
+	.disable = crc_pwm_disable,
+};
+
+static int crystalcove_pwm_probe(struct platform_device *pdev)
+{
+	struct crystalcove_pwm *pwm;
+	struct device *dev = pdev->dev.parent;
+	struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+	pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+	if (!pwm)
+		return -ENOMEM;
+
+	pwm->chip.dev = &pdev->dev;
+	pwm->chip.ops = &crc_pwm_ops;
+	pwm->chip.base = -1;
+	pwm->chip.npwm = 1;
+
+	/* get the PMIC regmap */
+	pwm->regmap = pmic->regmap;
+
+	platform_set_drvdata(pdev, pwm);
+
+	return pwmchip_add(&pwm->chip);
+}
+
+static int crystalcove_pwm_remove(struct platform_device *pdev)
+{
+	struct crystalcove_pwm *pwm = platform_get_drvdata(pdev);
+
+	return pwmchip_remove(&pwm->chip);
+}
+
+static struct platform_driver crystalcove_pwm_driver = {
+	.probe = crystalcove_pwm_probe,
+	.remove = crystalcove_pwm_remove,
+	.driver = {
+		.name = "crystal_cove_pwm",
+	},
+};
+
+builtin_platform_driver(crystalcove_pwm_driver);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index de9f272..7a85ac9 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1262,7 +1262,7 @@
 	regulator->debugfs = debugfs_create_dir(regulator->supply_name,
 						rdev->debugfs);
 	if (!regulator->debugfs) {
-		rdev_warn(rdev, "Failed to create debugfs directory\n");
+		rdev_dbg(rdev, "Failed to create debugfs directory\n");
 	} else {
 		debugfs_create_u32("uA_load", 0444, regulator->debugfs,
 				   &regulator->uA_load);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 533bfa3..9d42906 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -945,11 +945,11 @@
 	  will be called rtc-da9055
 
 config RTC_DRV_DA9063
-	tristate "Dialog Semiconductor DA9063 RTC"
-	depends on MFD_DA9063
+	tristate "Dialog Semiconductor DA9063/DA9062 RTC"
+	depends on MFD_DA9063 || MFD_DA9062
 	help
 	  If you say yes here you will get support for the RTC subsystem
-	  of the Dialog Semiconductor DA9063.
+	  for the Dialog Semiconductor PMIC chips DA9063 and DA9062.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called "rtc-da9063".
@@ -1116,6 +1116,13 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-opal.
 
+config RTC_DRV_ZYNQMP
+	tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
+	depends on OF
+	help
+	  If you say yes here you get support for the RTC controller found on
+	  Xilinx Zynq Ultrascale+ MPSoC.
+
 comment "on-CPU RTC drivers"
 
 config RTC_DRV_DAVINCI
@@ -1306,11 +1313,13 @@
 	  just say Y.
 
 config RTC_DRV_PXA
-	tristate "PXA27x/PXA3xx"
-	depends on ARCH_PXA
-	help
-	 If you say Y here you will get access to the real time clock
-	 built into your PXA27x or PXA3xx CPU.
+       tristate "PXA27x/PXA3xx"
+       depends on ARCH_PXA
+       select RTC_DRV_SA1100
+       help
+         If you say Y here you will get access to the real time clock
+         built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
+         consisting of an SA1100 compatible RTC and the extended PXA RTC.
 
 	 This RTC driver uses PXA RTC registers available since pxa27x
 	 series (RDxR, RYxR) instead of legacy RCNR, RTAR.
@@ -1456,6 +1465,18 @@
 	  This driver can also be buillt as a module. If so, the module
 	  will be called rtc-jz4740.
 
+config RTC_DRV_LPC24XX
+	tristate "NXP RTC for LPC178x/18xx/408x/43xx"
+	depends on ARCH_LPC18XX || COMPILE_TEST
+	depends on OF && HAS_IOMEM
+	help
+	  This enables support for the NXP RTC found which can be found on
+	  NXP LPC178x/18xx/408x/43xx devices.
+
+	  If you have one of the devices above enable this driver to use
+	  the hardware RTC. This driver can also be buillt as a module. If
+	  so, the module will be called rtc-lpc24xx.
+
 config RTC_DRV_LPC32XX
 	depends on ARCH_LPC32XX
 	tristate "NXP LPC32XX RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 1b09a62..e491eb5 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -74,6 +74,7 @@
 obj-$(CONFIG_RTC_DRV_ISL1208)	+= rtc-isl1208.o
 obj-$(CONFIG_RTC_DRV_JZ4740)	+= rtc-jz4740.o
 obj-$(CONFIG_RTC_DRV_LP8788)	+= rtc-lp8788.o
+obj-$(CONFIG_RTC_DRV_LPC24XX)	+= rtc-lpc24xx.o
 obj-$(CONFIG_RTC_DRV_LPC32XX)	+= rtc-lpc32xx.o
 obj-$(CONFIG_RTC_DRV_LOONGSON1)	+= rtc-ls1x.o
 obj-$(CONFIG_RTC_DRV_M41T80)	+= rtc-m41t80.o
@@ -158,3 +159,4 @@
 obj-$(CONFIG_RTC_DRV_WM8350)	+= rtc-wm8350.o
 obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
 obj-$(CONFIG_RTC_DRV_XGENE)	+= rtc-xgene.o
+obj-$(CONFIG_RTC_DRV_ZYNQMP)	+= rtc-zynqmp.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index ea2a315..de86578 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -202,6 +202,7 @@
 	rtc->max_user_freq = 64;
 	rtc->dev.parent = dev;
 	rtc->dev.class = rtc_class;
+	rtc->dev.groups = rtc_get_dev_attribute_groups();
 	rtc->dev.release = rtc_device_release;
 
 	mutex_init(&rtc->ops_lock);
@@ -234,12 +235,12 @@
 
 	err = device_register(&rtc->dev);
 	if (err) {
+		/* This will free both memory and the ID */
 		put_device(&rtc->dev);
-		goto exit_kfree;
+		goto exit;
 	}
 
 	rtc_dev_add_device(rtc);
-	rtc_sysfs_add_device(rtc);
 	rtc_proc_add_device(rtc);
 
 	dev_info(dev, "rtc core: registered %s as %s\n",
@@ -247,9 +248,6 @@
 
 	return rtc;
 
-exit_kfree:
-	kfree(rtc);
-
 exit_ida:
 	ida_simple_remove(&rtc_ida, id);
 
@@ -268,19 +266,17 @@
  */
 void rtc_device_unregister(struct rtc_device *rtc)
 {
-	if (get_device(&rtc->dev) != NULL) {
-		mutex_lock(&rtc->ops_lock);
-		/* remove innards of this RTC, then disable it, before
-		 * letting any rtc_class_open() users access it again
-		 */
-		rtc_sysfs_del_device(rtc);
-		rtc_dev_del_device(rtc);
-		rtc_proc_del_device(rtc);
-		device_unregister(&rtc->dev);
-		rtc->ops = NULL;
-		mutex_unlock(&rtc->ops_lock);
-		put_device(&rtc->dev);
-	}
+	mutex_lock(&rtc->ops_lock);
+	/*
+	 * Remove innards of this RTC, then disable it, before
+	 * letting any rtc_class_open() users access it again
+	 */
+	rtc_dev_del_device(rtc);
+	rtc_proc_del_device(rtc);
+	device_del(&rtc->dev);
+	rtc->ops = NULL;
+	mutex_unlock(&rtc->ops_lock);
+	put_device(&rtc->dev);
 }
 EXPORT_SYMBOL_GPL(rtc_device_unregister);
 
@@ -363,7 +359,6 @@
 	}
 	rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
 	rtc_dev_init();
-	rtc_sysfs_init(rtc_class);
 	return 0;
 }
 
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 11b6390..5836751 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -564,7 +564,7 @@
 void rtc_update_irq(struct rtc_device *rtc,
 		unsigned long num, unsigned long events)
 {
-	if (unlikely(IS_ERR_OR_NULL(rtc)))
+	if (IS_ERR_OR_NULL(rtc))
 		return;
 
 	pm_stay_awake(rtc->dev.parent);
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 7df0579..466bf7f 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -251,17 +251,26 @@
 static int pm80x_rtc_probe(struct platform_device *pdev)
 {
 	struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
-	struct pm80x_platform_data *pm80x_pdata =
-				dev_get_platdata(pdev->dev.parent);
-	struct pm80x_rtc_pdata *pdata = NULL;
+	struct pm80x_rtc_pdata *pdata = dev_get_platdata(&pdev->dev);
 	struct pm80x_rtc_info *info;
+	struct device_node *node = pdev->dev.of_node;
 	struct rtc_time tm;
 	unsigned long ticks = 0;
 	int ret;
 
-	pdata = dev_get_platdata(&pdev->dev);
-	if (pdata == NULL)
-		dev_warn(&pdev->dev, "No platform data!\n");
+	if (!pdata && !node) {
+		dev_err(&pdev->dev,
+			"pm80x-rtc requires platform data or of_node\n");
+		return -EINVAL;
+	}
+
+	if (!pdata) {
+		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+		if (!pdata) {
+			dev_err(&pdev->dev, "failed to allocate memory\n");
+			return -ENOMEM;
+		}
+	}
 
 	info =
 	    devm_kzalloc(&pdev->dev, sizeof(struct pm80x_rtc_info), GFP_KERNEL);
@@ -327,11 +336,8 @@
 	regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO,
 			   PM800_RTC1_USE_XO);
 
-	if (pm80x_pdata) {
-		pdata = pm80x_pdata->rtc;
-		if (pdata)
-			info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
-	}
+	/* remember whether this power up is caused by PMIC RTC or not */
+	info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
 
 	device_init_wakeup(&pdev->dev, 1);
 
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index b5cbc1b..a319bf1 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -1009,6 +1009,7 @@
 	{ .compatible = "abracon,abb5zes3" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, abb5zes3_dt_match);
 #endif
 
 static const struct i2c_device_id abb5zes3_id[] = {
@@ -1020,7 +1021,6 @@
 static struct i2c_driver abb5zes3_driver = {
 	.driver = {
 		.name = DRV_NAME,
-		.owner = THIS_MODULE,
 		.pm = &abb5zes3_rtc_pm_ops,
 		.of_match_table = of_match_ptr(abb5zes3_dt_match),
 	},
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 133d2e2..51407c4 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -445,7 +445,9 @@
 static const struct platform_device_id ab85xx_rtc_ids[] = {
 	{ "ab8500-rtc", (kernel_ulong_t)&ab8500_rtc_ops, },
 	{ "ab8540-rtc", (kernel_ulong_t)&ab8540_rtc_ops, },
+	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(platform, ab85xx_rtc_ids);
 
 static int ab8500_rtc_probe(struct platform_device *pdev)
 {
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 4337c3b..afea84c 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -28,7 +28,7 @@
 #define ABX8XX_REG_WD		0x07
 
 #define ABX8XX_REG_CTRL1	0x10
-#define ABX8XX_CTRL_WRITE	BIT(1)
+#define ABX8XX_CTRL_WRITE	BIT(0)
 #define ABX8XX_CTRL_12_24	BIT(6)
 
 #define ABX8XX_REG_CFG_KEY	0x1f
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 2b08cac..9a3f2a6 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -40,13 +40,6 @@
 	void __iomem	    *regs;
 	void __iomem	    *regs_soc;
 	spinlock_t	    lock;
-	/*
-	 * While setting the time, the RTC TIME register should not be
-	 * accessed. Setting the RTC time involves sleeping during
-	 * 100ms, so a mutex instead of a spinlock is used to protect
-	 * it
-	 */
-	struct mutex	    mutex_time;
 	int		    irq;
 };
 
@@ -64,9 +57,9 @@
 static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct armada38x_rtc *rtc = dev_get_drvdata(dev);
-	unsigned long time, time_check;
+	unsigned long time, time_check, flags;
 
-	mutex_lock(&rtc->mutex_time);
+	spin_lock_irqsave(&rtc->lock, flags);
 	time = readl(rtc->regs + RTC_TIME);
 	/*
 	 * WA for failing time set attempts. As stated in HW ERRATA if
@@ -77,7 +70,7 @@
 	if ((time_check - time) > 1)
 		time_check = readl(rtc->regs + RTC_TIME);
 
-	mutex_unlock(&rtc->mutex_time);
+	spin_unlock_irqrestore(&rtc->lock, flags);
 
 	rtc_time_to_tm(time_check, tm);
 
@@ -88,23 +81,23 @@
 {
 	struct armada38x_rtc *rtc = dev_get_drvdata(dev);
 	int ret = 0;
-	unsigned long time;
+	unsigned long time, flags;
 
 	ret = rtc_tm_to_time(tm, &time);
 
 	if (ret)
 		goto out;
 	/*
-	 * Setting the RTC time not always succeeds. According to the
-	 * errata we need to first write on the status register and
-	 * then wait for 100ms before writing to the time register to be
-	 * sure that the data will be taken into account.
+	 * According to errata FE-3124064, Write to RTC TIME register
+	 * may fail. As a workaround, after writing to RTC TIME
+	 * register, issue a dummy write of 0x0 twice to RTC Status
+	 * register.
 	 */
-	mutex_lock(&rtc->mutex_time);
-	rtc_delayed_write(0, rtc, RTC_STATUS);
-	msleep(100);
+	spin_lock_irqsave(&rtc->lock, flags);
 	rtc_delayed_write(time, rtc, RTC_TIME);
-	mutex_unlock(&rtc->mutex_time);
+	rtc_delayed_write(0, rtc, RTC_STATUS);
+	rtc_delayed_write(0, rtc, RTC_STATUS);
+	spin_unlock_irqrestore(&rtc->lock, flags);
 
 out:
 	return ret;
@@ -229,7 +222,6 @@
 		return -ENOMEM;
 
 	spin_lock_init(&rtc->lock);
-	mutex_init(&rtc->mutex_time);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
 	rtc->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -303,6 +295,7 @@
 	{ .compatible = "marvell,armada-380-rtc", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, armada38x_rtc_of_match_table);
 #endif
 
 static struct platform_driver armada38x_rtc_driver = {
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 9f38eda..56cc582 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -45,7 +45,7 @@
 	rbuff[1] = bin2bcd(tm->tm_min);
 	rbuff[2] = bin2bcd(tm->tm_hour);
 	rbuff[3] = bin2bcd(tm->tm_mday);
-	rbuff[4] = bin2bcd(tm->tm_mon);
+	rbuff[4] = bin2bcd(tm->tm_mon + 1);
 	rbuff[5] = bin2bcd(tm->tm_year - (AS3722_RTC_START_YEAR - 1900));
 }
 
@@ -55,7 +55,7 @@
 	tm->tm_min = bcd2bin(rbuff[1] & 0x7F);
 	tm->tm_hour = bcd2bin(rbuff[2] & 0x3F);
 	tm->tm_mday = bcd2bin(rbuff[3] & 0x3F);
-	tm->tm_mon = bcd2bin(rbuff[4] & 0x1F);
+	tm->tm_mon = bcd2bin(rbuff[4] & 0x1F) - 1;
 	tm->tm_year = (AS3722_RTC_START_YEAR - 1900) + bcd2bin(rbuff[5] & 0x7F);
 	return;
 }
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 35efd3f..cb62e21 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -18,20 +18,21 @@
  *
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/time.h>
-#include <linux/rtc.h>
 #include <linux/bcd.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/ioctl.h>
+#include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
 #include <linux/io.h>
-#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/spinlock.h>
 #include <linux/suspend.h>
+#include <linux/time.h>
 #include <linux/uaccess.h>
 
 #include "rtc-at91rm9200.h"
@@ -59,6 +60,7 @@
 static DEFINE_SPINLOCK(suspended_lock);
 static unsigned long cached_events;
 static u32 at91_rtc_imr;
+static struct clk *sclk;
 
 static void at91_rtc_write_ier(u32 mask)
 {
@@ -407,6 +409,16 @@
 		return -ENOMEM;
 	}
 
+	sclk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(sclk))
+		return PTR_ERR(sclk);
+
+	ret = clk_prepare_enable(sclk);
+	if (ret) {
+		dev_err(&pdev->dev, "Could not enable slow clock\n");
+		return ret;
+	}
+
 	at91_rtc_write(AT91_RTC_CR, 0);
 	at91_rtc_write(AT91_RTC_MR, 0);		/* 24 hour mode */
 
@@ -420,7 +432,7 @@
 			       "at91_rtc", pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
-		return ret;
+		goto err_clk;
 	}
 
 	/* cpu init code should really have flagged this device as
@@ -431,8 +443,10 @@
 
 	rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
 				&at91_rtc_ops, THIS_MODULE);
-	if (IS_ERR(rtc))
-		return PTR_ERR(rtc);
+	if (IS_ERR(rtc)) {
+		ret = PTR_ERR(rtc);
+		goto err_clk;
+	}
 	platform_set_drvdata(pdev, rtc);
 
 	/* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
@@ -442,6 +456,11 @@
 
 	dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
 	return 0;
+
+err_clk:
+	clk_disable_unprepare(sclk);
+
+	return ret;
 }
 
 /*
@@ -454,6 +473,8 @@
 					AT91_RTC_SECEV | AT91_RTC_TIMEV |
 					AT91_RTC_CALEV);
 
+	clk_disable_unprepare(sclk);
+
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 5ccaee3..7206e2f 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -11,20 +11,20 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/time.h>
-#include <linux/rtc.h>
+#include <linux/clk.h>
 #include <linux/interrupt.h>
 #include <linux/ioctl.h>
-#include <linux/slab.h>
-#include <linux/platform_data/atmel.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
 #include <linux/suspend.h>
-#include <linux/clk.h>
+#include <linux/time.h>
 
 /*
  * This driver uses two configurable hardware resources that live in the
@@ -425,18 +425,19 @@
 	if (IS_ERR(rtc->sclk))
 		return PTR_ERR(rtc->sclk);
 
-	sclk_rate = clk_get_rate(rtc->sclk);
-	if (!sclk_rate || sclk_rate > AT91_RTT_RTPRES) {
-		dev_err(&pdev->dev, "Invalid slow clock rate\n");
-		return -EINVAL;
-	}
-
 	ret = clk_prepare_enable(rtc->sclk);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not enable slow clock\n");
 		return ret;
 	}
 
+	sclk_rate = clk_get_rate(rtc->sclk);
+	if (!sclk_rate || sclk_rate > AT91_RTT_RTPRES) {
+		dev_err(&pdev->dev, "Invalid slow clock rate\n");
+		ret = -EINVAL;
+		goto err_clk;
+	}
+
 	mr = rtt_readl(rtc, MR);
 
 	/* unless RTT is counting at 1 Hz, re-initialize it */
@@ -451,8 +452,10 @@
 
 	rtc->rtcdev = devm_rtc_device_register(&pdev->dev, pdev->name,
 					&at91_rtc_ops, THIS_MODULE);
-	if (IS_ERR(rtc->rtcdev))
-		return PTR_ERR(rtc->rtcdev);
+	if (IS_ERR(rtc->rtcdev)) {
+		ret = PTR_ERR(rtc->rtcdev);
+		goto err_clk;
+	}
 
 	/* register irq handler after we know what name we'll use */
 	ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
@@ -460,7 +463,7 @@
 			       dev_name(&rtc->rtcdev->dev), rtc);
 	if (ret) {
 		dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
-		return ret;
+		goto err_clk;
 	}
 
 	/* NOTE:  sam9260 rev A silicon has a ROM bug which resets the
@@ -474,6 +477,11 @@
 				dev_name(&rtc->rtcdev->dev));
 
 	return 0;
+
+err_clk:
+	clk_disable_unprepare(rtc->sclk);
+
+	return ret;
 }
 
 /*
@@ -487,8 +495,7 @@
 	/* disable all interrupts */
 	rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
 
-	if (!IS_ERR(rtc->sclk))
-		clk_disable_unprepare(rtc->sclk);
+	clk_disable_unprepare(rtc->sclk);
 
 	return 0;
 }
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 3d44b11..535a5f9 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -361,7 +361,7 @@
 	/* Register our RTC with the RTC framework */
 	rtc->rtc_dev = devm_rtc_device_register(dev, pdev->name, &bfin_rtc_ops,
 						THIS_MODULE);
-	if (unlikely(IS_ERR(rtc->rtc_dev)))
+	if (IS_ERR(rtc->rtc_dev))
 		return PTR_ERR(rtc->rtc_dev);
 
 	/* Grab the IRQ and init the hardware */
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 92679df..0299988 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -212,7 +212,7 @@
 	if (error)
 		return error;
 
-	if (client && client->dev.of_node)
+	if (client->dev.of_node)
 		trickle_charger_of_init(dev, client->dev.of_node);
 
 	rtc = devm_rtc_device_register(&client->dev, bq32k_driver.driver.name,
@@ -234,7 +234,6 @@
 static struct i2c_driver bq32k_driver = {
 	.driver = {
 		.name	= "bq32k",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= bq32k_probe,
 	.id_table	= bq32k_id,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index a82556a..8f7034b 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -41,7 +41,6 @@
 #include <linux/pm.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
-#include <linux/dmi.h>
 
 /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
 #include <asm-generic/rtc.h>
@@ -51,6 +50,7 @@
 	struct device		*dev;
 	int			irq;
 	struct resource		*iomem;
+	time64_t		alarm_expires;
 
 	void			(*wake_on)(struct device *);
 	void			(*wake_off)(struct device *);
@@ -377,53 +377,11 @@
 
 	spin_unlock_irq(&rtc_lock);
 
+	cmos->alarm_expires = rtc_tm_to_time64(&t->time);
+
 	return 0;
 }
 
-/*
- * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
- */
-static bool alarm_disable_quirk;
-
-static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
-{
-	alarm_disable_quirk = true;
-	pr_info("BIOS has alarm-disable quirk - RTC alarms disabled\n");
-	return 0;
-}
-
-static const struct dmi_system_id rtc_quirks[] __initconst = {
-	/* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
-	{
-		.callback = set_alarm_disable_quirk,
-		.ident    = "IBM Truman",
-		.matches  = {
-			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
-		},
-	},
-	/* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
-	{
-		.callback = set_alarm_disable_quirk,
-		.ident    = "Gigabyte GA-990XA-UD3",
-		.matches  = {
-			DMI_MATCH(DMI_SYS_VENDOR,
-					"Gigabyte Technology Co., Ltd."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
-		},
-	},
-	/* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
-	{
-		.callback = set_alarm_disable_quirk,
-		.ident    = "Toshiba Satellite L300",
-		.matches  = {
-			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
-		},
-	},
-	{}
-};
-
 static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
@@ -432,9 +390,6 @@
 	if (!is_valid_irq(cmos->irq))
 		return -EINVAL;
 
-	if (alarm_disable_quirk)
-		return 0;
-
 	spin_lock_irqsave(&rtc_lock, flags);
 
 	if (enabled)
@@ -512,13 +467,6 @@
 {
 	int	retval;
 
-	if (unlikely(off >= attr->size))
-		return 0;
-	if (unlikely(off < 0))
-		return -EINVAL;
-	if ((off + count) > attr->size)
-		count = attr->size - off;
-
 	off += NVRAM_OFFSET;
 	spin_lock_irq(&rtc_lock);
 	for (retval = 0; count; count--, off++, retval++) {
@@ -543,12 +491,6 @@
 	int		retval;
 
 	cmos = dev_get_drvdata(container_of(kobj, struct device, kobj));
-	if (unlikely(off >= attr->size))
-		return -EFBIG;
-	if (unlikely(off < 0))
-		return -EINVAL;
-	if ((off + count) > attr->size)
-		count = attr->size - off;
 
 	/* NOTE:  on at least PCs and Ataris, the boot firmware uses a
 	 * checksum on part of the NVRAM data.  That's currently ignored
@@ -860,6 +802,51 @@
 	cmos->dev = NULL;
 }
 
+static int cmos_aie_poweroff(struct device *dev)
+{
+	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
+	struct rtc_time now;
+	time64_t t_now;
+	int retval = 0;
+	unsigned char rtc_control;
+
+	if (!cmos->alarm_expires)
+		return -EINVAL;
+
+	spin_lock_irq(&rtc_lock);
+	rtc_control = CMOS_READ(RTC_CONTROL);
+	spin_unlock_irq(&rtc_lock);
+
+	/* We only care about the situation where AIE is disabled. */
+	if (rtc_control & RTC_AIE)
+		return -EBUSY;
+
+	cmos_read_time(dev, &now);
+	t_now = rtc_tm_to_time64(&now);
+
+	/*
+	 * When enabling "RTC wake-up" in BIOS setup, the machine reboots
+	 * automatically right after shutdown on some buggy boxes.
+	 * This automatic rebooting issue won't happen when the alarm
+	 * time is larger than now+1 seconds.
+	 *
+	 * If the alarm time is equal to now+1 seconds, the issue can be
+	 * prevented by cancelling the alarm.
+	 */
+	if (cmos->alarm_expires == t_now + 1) {
+		struct rtc_wkalrm alarm;
+
+		/* Cancel the AIE timer by configuring the past time. */
+		rtc_time64_to_tm(t_now - 1, &alarm.time);
+		alarm.enabled = 0;
+		retval = cmos_set_alarm(dev, &alarm);
+	} else if (cmos->alarm_expires > t_now + 1) {
+		retval = -EBUSY;
+	}
+
+	return retval;
+}
+
 #ifdef CONFIG_PM
 
 static int cmos_suspend(struct device *dev)
@@ -1094,8 +1081,12 @@
 	struct device *dev = &pnp->dev;
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 
-	if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(dev))
-		return;
+	if (system_state == SYSTEM_POWER_OFF) {
+		int retval = cmos_poweroff(dev);
+
+		if (cmos_aie_poweroff(dev) < 0 && !retval)
+			return;
+	}
 
 	cmos_do_shutdown(cmos->irq);
 }
@@ -1200,8 +1191,12 @@
 	struct device *dev = &pdev->dev;
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 
-	if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(dev))
-		return;
+	if (system_state == SYSTEM_POWER_OFF) {
+		int retval = cmos_poweroff(dev);
+
+		if (cmos_aie_poweroff(dev) < 0 && !retval)
+			return;
+	}
 
 	cmos_do_shutdown(cmos->irq);
 }
@@ -1243,8 +1238,6 @@
 			platform_driver_registered = true;
 	}
 
-	dmi_check_system(rtc_quirks);
-
 	if (retval == 0)
 		return 0;
 
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 56343b2..101b7a2 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -263,6 +263,7 @@
 	{ .compatible = "stericsson,coh901331" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, coh901331_dt_match);
 
 static struct platform_driver coh901331_driver = {
 	.driver = {
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 5f9df74..a098aea 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -48,23 +48,10 @@
 #endif
 
 #ifdef CONFIG_RTC_INTF_SYSFS
-
-extern void __init rtc_sysfs_init(struct class *);
-extern void rtc_sysfs_add_device(struct rtc_device *rtc);
-extern void rtc_sysfs_del_device(struct rtc_device *rtc);
-
+const struct attribute_group **rtc_get_dev_attribute_groups(void);
 #else
-
-static inline void rtc_sysfs_init(struct class *rtc)
+static inline const struct attribute_group **rtc_get_dev_attribute_groups(void)
 {
+	return NULL;
 }
-
-static inline void rtc_sysfs_add_device(struct rtc_device *rtc)
-{
-}
-
-static inline void rtc_sysfs_del_device(struct rtc_device *rtc)
-{
-}
-
 #endif
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 7ffc570..00a8f7f 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -12,15 +12,18 @@
  * Library General Public License for more details.
  */
 
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/interrupt.h>
+#include <linux/regmap.h>
 #include <linux/rtc.h>
 #include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/regmap.h>
+
+#include <linux/mfd/da9062/registers.h>
 #include <linux/mfd/da9063/registers.h>
 #include <linux/mfd/da9063/core.h>
 
@@ -29,99 +32,231 @@
 #define YEARS_FROM_DA9063(year)		((year) + 100)
 #define MONTHS_FROM_DA9063(month)	((month) - 1)
 
-#define RTC_ALARM_DATA_LEN (DA9063_AD_REG_ALARM_Y - DA9063_AD_REG_ALARM_MI + 1)
-
-#define RTC_DATA_LEN	(DA9063_REG_COUNT_Y - DA9063_REG_COUNT_S + 1)
-#define RTC_SEC		0
-#define RTC_MIN		1
-#define RTC_HOUR	2
-#define RTC_DAY		3
-#define RTC_MONTH	4
-#define RTC_YEAR	5
-
-struct da9063_rtc {
-	struct rtc_device	*rtc_dev;
-	struct da9063		*hw;
-	struct rtc_time		alarm_time;
-	bool			rtc_sync;
-	int			alarm_year;
-	int			alarm_start;
-	int			alarm_len;
-	int			data_start;
+enum {
+	RTC_SEC	= 0,
+	RTC_MIN	= 1,
+	RTC_HOUR = 2,
+	RTC_DAY	= 3,
+	RTC_MONTH = 4,
+	RTC_YEAR = 5,
+	RTC_DATA_LEN
 };
 
-static void da9063_data_to_tm(u8 *data, struct rtc_time *tm)
+struct da9063_compatible_rtc_regmap {
+	/* REGS */
+	int rtc_enable_reg;
+	int rtc_enable_32k_crystal_reg;
+	int rtc_alarm_secs_reg;
+	int rtc_alarm_year_reg;
+	int rtc_count_secs_reg;
+	int rtc_count_year_reg;
+	int rtc_event_reg;
+	/* MASKS */
+	int rtc_enable_mask;
+	int rtc_crystal_mask;
+	int rtc_event_alarm_mask;
+	int rtc_alarm_on_mask;
+	int rtc_alarm_status_mask;
+	int rtc_tick_on_mask;
+	int rtc_ready_to_read_mask;
+	int rtc_count_sec_mask;
+	int rtc_count_min_mask;
+	int rtc_count_hour_mask;
+	int rtc_count_day_mask;
+	int rtc_count_month_mask;
+	int rtc_count_year_mask;
+	/* ALARM CONFIG */
+	int rtc_data_start;
+	int rtc_alarm_len;
+};
+
+struct da9063_compatible_rtc {
+	struct rtc_device *rtc_dev;
+	struct rtc_time alarm_time;
+	struct regmap *regmap;
+	const struct da9063_compatible_rtc_regmap *config;
+	bool rtc_sync;
+};
+
+static const struct da9063_compatible_rtc_regmap da9063_ad_regs = {
+	/* REGS */
+	.rtc_enable_reg             = DA9063_REG_CONTROL_E,
+	.rtc_alarm_secs_reg         = DA9063_AD_REG_ALARM_MI,
+	.rtc_alarm_year_reg         = DA9063_AD_REG_ALARM_Y,
+	.rtc_count_secs_reg         = DA9063_REG_COUNT_S,
+	.rtc_count_year_reg         = DA9063_REG_COUNT_Y,
+	.rtc_event_reg              = DA9063_REG_EVENT_A,
+	/* MASKS */
+	.rtc_enable_mask            = DA9063_RTC_EN,
+	.rtc_crystal_mask           = DA9063_CRYSTAL,
+	.rtc_enable_32k_crystal_reg = DA9063_REG_EN_32K,
+	.rtc_event_alarm_mask       = DA9063_E_ALARM,
+	.rtc_alarm_on_mask          = DA9063_ALARM_ON,
+	.rtc_alarm_status_mask      = DA9063_ALARM_STATUS_ALARM |
+				      DA9063_ALARM_STATUS_TICK,
+	.rtc_tick_on_mask           = DA9063_TICK_ON,
+	.rtc_ready_to_read_mask     = DA9063_RTC_READ,
+	.rtc_count_sec_mask         = DA9063_COUNT_SEC_MASK,
+	.rtc_count_min_mask         = DA9063_COUNT_MIN_MASK,
+	.rtc_count_hour_mask        = DA9063_COUNT_HOUR_MASK,
+	.rtc_count_day_mask         = DA9063_COUNT_DAY_MASK,
+	.rtc_count_month_mask       = DA9063_COUNT_MONTH_MASK,
+	.rtc_count_year_mask        = DA9063_COUNT_YEAR_MASK,
+	/* ALARM CONFIG */
+	.rtc_data_start             = RTC_MIN,
+	.rtc_alarm_len              = RTC_DATA_LEN - 1,
+};
+
+static const struct da9063_compatible_rtc_regmap da9063_bb_regs = {
+	/* REGS */
+	.rtc_enable_reg             = DA9063_REG_CONTROL_E,
+	.rtc_alarm_secs_reg         = DA9063_BB_REG_ALARM_S,
+	.rtc_alarm_year_reg         = DA9063_BB_REG_ALARM_Y,
+	.rtc_count_secs_reg         = DA9063_REG_COUNT_S,
+	.rtc_count_year_reg         = DA9063_REG_COUNT_Y,
+	.rtc_event_reg              = DA9063_REG_EVENT_A,
+	/* MASKS */
+	.rtc_enable_mask            = DA9063_RTC_EN,
+	.rtc_crystal_mask           = DA9063_CRYSTAL,
+	.rtc_enable_32k_crystal_reg = DA9063_REG_EN_32K,
+	.rtc_event_alarm_mask       = DA9063_E_ALARM,
+	.rtc_alarm_on_mask          = DA9063_ALARM_ON,
+	.rtc_alarm_status_mask      = DA9063_ALARM_STATUS_ALARM |
+				      DA9063_ALARM_STATUS_TICK,
+	.rtc_tick_on_mask           = DA9063_TICK_ON,
+	.rtc_ready_to_read_mask     = DA9063_RTC_READ,
+	.rtc_count_sec_mask         = DA9063_COUNT_SEC_MASK,
+	.rtc_count_min_mask         = DA9063_COUNT_MIN_MASK,
+	.rtc_count_hour_mask        = DA9063_COUNT_HOUR_MASK,
+	.rtc_count_day_mask         = DA9063_COUNT_DAY_MASK,
+	.rtc_count_month_mask       = DA9063_COUNT_MONTH_MASK,
+	.rtc_count_year_mask        = DA9063_COUNT_YEAR_MASK,
+	/* ALARM CONFIG */
+	.rtc_data_start             = RTC_SEC,
+	.rtc_alarm_len              = RTC_DATA_LEN,
+};
+
+static const struct da9063_compatible_rtc_regmap da9062_aa_regs = {
+	/* REGS */
+	.rtc_enable_reg             = DA9062AA_CONTROL_E,
+	.rtc_alarm_secs_reg         = DA9062AA_ALARM_S,
+	.rtc_alarm_year_reg         = DA9062AA_ALARM_Y,
+	.rtc_count_secs_reg         = DA9062AA_COUNT_S,
+	.rtc_count_year_reg         = DA9062AA_COUNT_Y,
+	.rtc_event_reg              = DA9062AA_EVENT_A,
+	/* MASKS */
+	.rtc_enable_mask            = DA9062AA_RTC_EN_MASK,
+	.rtc_crystal_mask           = DA9062AA_CRYSTAL_MASK,
+	.rtc_enable_32k_crystal_reg = DA9062AA_EN_32K,
+	.rtc_event_alarm_mask       = DA9062AA_M_ALARM_MASK,
+	.rtc_alarm_on_mask          = DA9062AA_ALARM_ON_MASK,
+	.rtc_alarm_status_mask      = (0x02 << 6),
+	.rtc_tick_on_mask           = DA9062AA_TICK_ON_MASK,
+	.rtc_ready_to_read_mask     = DA9062AA_RTC_READ_MASK,
+	.rtc_count_sec_mask         = DA9062AA_COUNT_SEC_MASK,
+	.rtc_count_min_mask         = DA9062AA_COUNT_MIN_MASK,
+	.rtc_count_hour_mask        = DA9062AA_COUNT_HOUR_MASK,
+	.rtc_count_day_mask         = DA9062AA_COUNT_DAY_MASK,
+	.rtc_count_month_mask       = DA9062AA_COUNT_MONTH_MASK,
+	.rtc_count_year_mask        = DA9062AA_COUNT_YEAR_MASK,
+	/* ALARM CONFIG */
+	.rtc_data_start             = RTC_SEC,
+	.rtc_alarm_len              = RTC_DATA_LEN,
+};
+
+static const struct of_device_id da9063_compatible_reg_id_table[] = {
+	{ .compatible = "dlg,da9063-rtc", .data = &da9063_bb_regs },
+	{ .compatible = "dlg,da9062-rtc", .data = &da9062_aa_regs },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
+
+static void da9063_data_to_tm(u8 *data, struct rtc_time *tm,
+			      struct da9063_compatible_rtc *rtc)
 {
-	tm->tm_sec  = data[RTC_SEC]  & DA9063_COUNT_SEC_MASK;
-	tm->tm_min  = data[RTC_MIN]  & DA9063_COUNT_MIN_MASK;
-	tm->tm_hour = data[RTC_HOUR] & DA9063_COUNT_HOUR_MASK;
-	tm->tm_mday = data[RTC_DAY]  & DA9063_COUNT_DAY_MASK;
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
+
+	tm->tm_sec  = data[RTC_SEC]  & config->rtc_count_sec_mask;
+	tm->tm_min  = data[RTC_MIN]  & config->rtc_count_min_mask;
+	tm->tm_hour = data[RTC_HOUR] & config->rtc_count_hour_mask;
+	tm->tm_mday = data[RTC_DAY]  & config->rtc_count_day_mask;
 	tm->tm_mon  = MONTHS_FROM_DA9063(data[RTC_MONTH] &
-					 DA9063_COUNT_MONTH_MASK);
+					 config->rtc_count_month_mask);
 	tm->tm_year = YEARS_FROM_DA9063(data[RTC_YEAR] &
-					DA9063_COUNT_YEAR_MASK);
+					config->rtc_count_year_mask);
 }
 
-static void da9063_tm_to_data(struct rtc_time *tm, u8 *data)
+static void da9063_tm_to_data(struct rtc_time *tm, u8 *data,
+			      struct da9063_compatible_rtc *rtc)
 {
-	data[RTC_SEC] &= ~DA9063_COUNT_SEC_MASK;
-	data[RTC_SEC] |= tm->tm_sec & DA9063_COUNT_SEC_MASK;
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 
-	data[RTC_MIN] &= ~DA9063_COUNT_MIN_MASK;
-	data[RTC_MIN] |= tm->tm_min & DA9063_COUNT_MIN_MASK;
+	data[RTC_SEC] &= ~config->rtc_count_sec_mask;
+	data[RTC_SEC] |= tm->tm_sec & config->rtc_count_sec_mask;
 
-	data[RTC_HOUR] &= ~DA9063_COUNT_HOUR_MASK;
-	data[RTC_HOUR] |= tm->tm_hour & DA9063_COUNT_HOUR_MASK;
+	data[RTC_MIN] &= ~config->rtc_count_min_mask;
+	data[RTC_MIN] |= tm->tm_min & config->rtc_count_min_mask;
 
-	data[RTC_DAY] &= ~DA9063_COUNT_DAY_MASK;
-	data[RTC_DAY] |= tm->tm_mday & DA9063_COUNT_DAY_MASK;
+	data[RTC_HOUR] &= ~config->rtc_count_hour_mask;
+	data[RTC_HOUR] |= tm->tm_hour & config->rtc_count_hour_mask;
 
-	data[RTC_MONTH] &= ~DA9063_COUNT_MONTH_MASK;
+	data[RTC_DAY] &= ~config->rtc_count_day_mask;
+	data[RTC_DAY] |= tm->tm_mday & config->rtc_count_day_mask;
+
+	data[RTC_MONTH] &= ~config->rtc_count_month_mask;
 	data[RTC_MONTH] |= MONTHS_TO_DA9063(tm->tm_mon) &
-				DA9063_COUNT_MONTH_MASK;
+				config->rtc_count_month_mask;
 
-	data[RTC_YEAR] &= ~DA9063_COUNT_YEAR_MASK;
+	data[RTC_YEAR] &= ~config->rtc_count_year_mask;
 	data[RTC_YEAR] |= YEARS_TO_DA9063(tm->tm_year) &
-				DA9063_COUNT_YEAR_MASK;
+				config->rtc_count_year_mask;
 }
 
 static int da9063_rtc_stop_alarm(struct device *dev)
 {
-	struct da9063_rtc *rtc = dev_get_drvdata(dev);
+	struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 
-	return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
-				  DA9063_ALARM_ON, 0);
+	return regmap_update_bits(rtc->regmap,
+				  config->rtc_alarm_year_reg,
+				  config->rtc_alarm_on_mask,
+				  0);
 }
 
 static int da9063_rtc_start_alarm(struct device *dev)
 {
-	struct da9063_rtc *rtc = dev_get_drvdata(dev);
+	struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 
-	return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
-				  DA9063_ALARM_ON, DA9063_ALARM_ON);
+	return regmap_update_bits(rtc->regmap,
+				  config->rtc_alarm_year_reg,
+				  config->rtc_alarm_on_mask,
+				  config->rtc_alarm_on_mask);
 }
 
 static int da9063_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-	struct da9063_rtc *rtc = dev_get_drvdata(dev);
+	struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 	unsigned long tm_secs;
 	unsigned long al_secs;
 	u8 data[RTC_DATA_LEN];
 	int ret;
 
-	ret = regmap_bulk_read(rtc->hw->regmap, DA9063_REG_COUNT_S,
+	ret = regmap_bulk_read(rtc->regmap,
+			       config->rtc_count_secs_reg,
 			       data, RTC_DATA_LEN);
 	if (ret < 0) {
 		dev_err(dev, "Failed to read RTC time data: %d\n", ret);
 		return ret;
 	}
 
-	if (!(data[RTC_SEC] & DA9063_RTC_READ)) {
+	if (!(data[RTC_SEC] & config->rtc_ready_to_read_mask)) {
 		dev_dbg(dev, "RTC not yet ready to be read by the host\n");
 		return -EINVAL;
 	}
 
-	da9063_data_to_tm(data, tm);
+	da9063_data_to_tm(data, tm, rtc);
 
 	rtc_tm_to_time(tm, &tm_secs);
 	rtc_tm_to_time(&rtc->alarm_time, &al_secs);
@@ -137,12 +272,14 @@
 
 static int da9063_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
-	struct da9063_rtc *rtc = dev_get_drvdata(dev);
+	struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 	u8 data[RTC_DATA_LEN];
 	int ret;
 
-	da9063_tm_to_data(tm, data);
-	ret = regmap_bulk_write(rtc->hw->regmap, DA9063_REG_COUNT_S,
+	da9063_tm_to_data(tm, data, rtc);
+	ret = regmap_bulk_write(rtc->regmap,
+				config->rtc_count_secs_reg,
 				data, RTC_DATA_LEN);
 	if (ret < 0)
 		dev_err(dev, "Failed to set RTC time data: %d\n", ret);
@@ -152,26 +289,31 @@
 
 static int da9063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
-	struct da9063_rtc *rtc = dev_get_drvdata(dev);
+	struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 	u8 data[RTC_DATA_LEN];
 	int ret;
 	unsigned int val;
 
 	data[RTC_SEC] = 0;
-	ret = regmap_bulk_read(rtc->hw->regmap, rtc->alarm_start,
-			       &data[rtc->data_start], rtc->alarm_len);
+	ret = regmap_bulk_read(rtc->regmap,
+			       config->rtc_alarm_secs_reg,
+			       &data[config->rtc_data_start],
+			       config->rtc_alarm_len);
 	if (ret < 0)
 		return ret;
 
-	da9063_data_to_tm(data, &alrm->time);
+	da9063_data_to_tm(data, &alrm->time, rtc);
 
-	alrm->enabled = !!(data[RTC_YEAR] & DA9063_ALARM_ON);
+	alrm->enabled = !!(data[RTC_YEAR] & config->rtc_alarm_on_mask);
 
-	ret = regmap_read(rtc->hw->regmap, DA9063_REG_EVENT_A, &val);
+	ret = regmap_read(rtc->regmap,
+			  config->rtc_event_reg,
+			  &val);
 	if (ret < 0)
 		return ret;
 
-	if (val & (DA9063_E_ALARM))
+	if (val & config->rtc_event_alarm_mask)
 		alrm->pending = 1;
 	else
 		alrm->pending = 0;
@@ -181,11 +323,12 @@
 
 static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
-	struct da9063_rtc *rtc = dev_get_drvdata(dev);
+	struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 	u8 data[RTC_DATA_LEN];
 	int ret;
 
-	da9063_tm_to_data(&alrm->time, data);
+	da9063_tm_to_data(&alrm->time, data, rtc);
 
 	ret = da9063_rtc_stop_alarm(dev);
 	if (ret < 0) {
@@ -193,14 +336,16 @@
 		return ret;
 	}
 
-	ret = regmap_bulk_write(rtc->hw->regmap, rtc->alarm_start,
-			       &data[rtc->data_start], rtc->alarm_len);
+	ret = regmap_bulk_write(rtc->regmap,
+				config->rtc_alarm_secs_reg,
+				&data[config->rtc_data_start],
+				config->rtc_alarm_len);
 	if (ret < 0) {
 		dev_err(dev, "Failed to write alarm: %d\n", ret);
 		return ret;
 	}
 
-	da9063_data_to_tm(data, &rtc->alarm_time);
+	da9063_data_to_tm(data, &rtc->alarm_time, rtc);
 
 	if (alrm->enabled) {
 		ret = da9063_rtc_start_alarm(dev);
@@ -213,7 +358,8 @@
 	return ret;
 }
 
-static int da9063_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+static int da9063_rtc_alarm_irq_enable(struct device *dev,
+				       unsigned int enabled)
 {
 	if (enabled)
 		return da9063_rtc_start_alarm(dev);
@@ -223,10 +369,13 @@
 
 static irqreturn_t da9063_alarm_event(int irq, void *data)
 {
-	struct da9063_rtc *rtc = data;
+	struct da9063_compatible_rtc *rtc = data;
+	const struct da9063_compatible_rtc_regmap *config = rtc->config;
 
-	regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
-			   DA9063_ALARM_ON, 0);
+	regmap_update_bits(rtc->regmap,
+			   config->rtc_alarm_year_reg,
+			   config->rtc_alarm_on_mask,
+			   0);
 
 	rtc->rtc_sync = true;
 	rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
@@ -244,72 +393,92 @@
 
 static int da9063_rtc_probe(struct platform_device *pdev)
 {
-	struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
-	struct da9063_rtc *rtc;
+	struct da9063_compatible_rtc *rtc;
+	const struct da9063_compatible_rtc_regmap *config;
+	const struct of_device_id *match;
 	int irq_alarm;
 	u8 data[RTC_DATA_LEN];
 	int ret;
 
-	ret = regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_E,
-				 DA9063_RTC_EN, DA9063_RTC_EN);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to enable RTC\n");
-		goto err;
-	}
+	if (!pdev->dev.of_node)
+		return -ENXIO;
 
-	ret = regmap_update_bits(da9063->regmap, DA9063_REG_EN_32K,
-				 DA9063_CRYSTAL, DA9063_CRYSTAL);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to run 32kHz oscillator\n");
-		goto err;
-	}
+	match = of_match_node(da9063_compatible_reg_id_table,
+			      pdev->dev.of_node);
 
 	rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
 	if (!rtc)
 		return -ENOMEM;
 
-	if (da9063->variant_code == PMIC_DA9063_AD) {
-		rtc->alarm_year = DA9063_AD_REG_ALARM_Y;
-		rtc->alarm_start = DA9063_AD_REG_ALARM_MI;
-		rtc->alarm_len = RTC_ALARM_DATA_LEN;
-		rtc->data_start = RTC_MIN;
-	} else {
-		rtc->alarm_year = DA9063_BB_REG_ALARM_Y;
-		rtc->alarm_start = DA9063_BB_REG_ALARM_S;
-		rtc->alarm_len = RTC_DATA_LEN;
-		rtc->data_start = RTC_SEC;
+	rtc->config = match->data;
+	if (of_device_is_compatible(pdev->dev.of_node, "dlg,da9063-rtc")) {
+		struct da9063 *chip = dev_get_drvdata(pdev->dev.parent);
+
+		if (chip->variant_code == PMIC_DA9063_AD)
+			rtc->config = &da9063_ad_regs;
 	}
 
-	ret = regmap_update_bits(da9063->regmap, rtc->alarm_start,
-			DA9063_ALARM_STATUS_TICK | DA9063_ALARM_STATUS_ALARM,
-			0);
+	rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!rtc->regmap) {
+		dev_warn(&pdev->dev, "Parent regmap unavailable.\n");
+		return -ENXIO;
+	}
+
+	config = rtc->config;
+	ret = regmap_update_bits(rtc->regmap,
+				 config->rtc_enable_reg,
+				 config->rtc_enable_mask,
+				 config->rtc_enable_mask);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to enable RTC\n");
+		return ret;
+	}
+
+	ret = regmap_update_bits(rtc->regmap,
+				 config->rtc_enable_32k_crystal_reg,
+				 config->rtc_crystal_mask,
+				 config->rtc_crystal_mask);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to run 32kHz oscillator\n");
+		return ret;
+	}
+
+	ret = regmap_update_bits(rtc->regmap,
+				 config->rtc_alarm_secs_reg,
+				 config->rtc_alarm_status_mask,
+				 0);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
-		goto err;
+		return ret;
 	}
 
-	ret = regmap_update_bits(da9063->regmap, rtc->alarm_start,
+	ret = regmap_update_bits(rtc->regmap,
+				 config->rtc_alarm_secs_reg,
 				 DA9063_ALARM_STATUS_ALARM,
 				 DA9063_ALARM_STATUS_ALARM);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
-		goto err;
+		return ret;
 	}
 
-	ret = regmap_update_bits(da9063->regmap, rtc->alarm_year,
-				 DA9063_TICK_ON, 0);
+	ret = regmap_update_bits(rtc->regmap,
+				 config->rtc_alarm_year_reg,
+				 config->rtc_tick_on_mask,
+				 0);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to disable TICKs\n");
-		goto err;
+		return ret;
 	}
 
 	data[RTC_SEC] = 0;
-	ret = regmap_bulk_read(da9063->regmap, rtc->alarm_start,
-			       &data[rtc->data_start], rtc->alarm_len);
+	ret = regmap_bulk_read(rtc->regmap,
+			       config->rtc_alarm_secs_reg,
+			       &data[config->rtc_data_start],
+			       config->rtc_alarm_len);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to read initial alarm data: %d\n",
 			ret);
-		goto err;
+		return ret;
 	}
 
 	platform_set_drvdata(pdev, rtc);
@@ -322,18 +491,16 @@
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
 			irq_alarm, ret);
-		goto err;
+		return ret;
 	}
 
-	rtc->hw = da9063;
 	rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
 					   &da9063_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rtc->rtc_dev))
 		return PTR_ERR(rtc->rtc_dev);
 
-	da9063_data_to_tm(data, &rtc->alarm_time);
+	da9063_data_to_tm(data, &rtc->alarm_time, rtc);
 	rtc->rtc_sync = false;
-err:
 	return ret;
 }
 
@@ -341,6 +508,7 @@
 	.probe		= da9063_rtc_probe,
 	.driver		= {
 		.name	= DA9063_DRVNAME_RTC,
+		.of_match_table = da9063_compatible_reg_id_table,
 	},
 };
 
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 799c34b..a6d9434 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -477,6 +477,7 @@
 
 	cdev_init(&rtc->char_dev, &rtc_dev_fops);
 	rtc->char_dev.owner = rtc->owner;
+	rtc->char_dev.kobj.parent = &rtc->dev.kobj;
 }
 
 void rtc_dev_add_device(struct rtc_device *rtc)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 12b0715..baa5d04 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -538,15 +538,6 @@
 
 	spi = container_of(kobj, struct spi_device, dev.kobj);
 
-	if (unlikely(off >= DS1305_NVRAM_LEN))
-		return 0;
-	if (count >= DS1305_NVRAM_LEN)
-		count = DS1305_NVRAM_LEN;
-	if ((off + count) > DS1305_NVRAM_LEN)
-		count = DS1305_NVRAM_LEN - off;
-	if (unlikely(!count))
-		return count;
-
 	addr = DS1305_NVRAM + off;
 	msg_init(&m, x, &addr, count, NULL, buf);
 
@@ -569,15 +560,6 @@
 
 	spi = container_of(kobj, struct spi_device, dev.kobj);
 
-	if (unlikely(off >= DS1305_NVRAM_LEN))
-		return -EFBIG;
-	if (count >= DS1305_NVRAM_LEN)
-		count = DS1305_NVRAM_LEN;
-	if ((off + count) > DS1305_NVRAM_LEN)
-		count = DS1305_NVRAM_LEN - off;
-	if (unlikely(!count))
-		return count;
-
 	addr = (DS1305_WRITE | DS1305_NVRAM) + off;
 	msg_init(&m, x, &addr, count, buf, NULL);
 
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 6e76de1..a705e64 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -11,14 +11,17 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/string.h>
-#include <linux/rtc.h>
 #include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/rtc/ds1307.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 
 /*
  * We can't determine type by probing, but if we expect pre-Linux code
@@ -114,7 +117,7 @@
 #define HAS_ALARM	1		/* bit 1 == irq claimed */
 	struct i2c_client	*client;
 	struct rtc_device	*rtc;
-	struct work_struct	work;
+	int			wakeirq;
 	s32 (*read_block_data)(const struct i2c_client *client, u8 command,
 			       u8 length, u8 *values);
 	s32 (*write_block_data)(const struct i2c_client *client, u8 command,
@@ -311,27 +314,17 @@
 /*----------------------------------------------------------------------*/
 
 /*
- * The IRQ logic includes a "real" handler running in IRQ context just
- * long enough to schedule this workqueue entry.   We need a task context
- * to talk to the RTC, since I2C I/O calls require that; and disable the
- * IRQ until we clear its status on the chip, so that this handler can
- * work with any type of triggering (not just falling edge).
- *
  * The ds1337 and ds1339 both have two alarms, but we only use the first
  * one (with a "seconds" field).  For ds1337 we expect nINTA is our alarm
  * signal; ds1339 chips have only one alarm signal.
  */
-static void ds1307_work(struct work_struct *work)
+static irqreturn_t ds1307_irq(int irq, void *dev_id)
 {
-	struct ds1307		*ds1307;
-	struct i2c_client	*client;
-	struct mutex		*lock;
+	struct i2c_client	*client = dev_id;
+	struct ds1307		*ds1307 = i2c_get_clientdata(client);
+	struct mutex		*lock = &ds1307->rtc->ops_lock;
 	int			stat, control;
 
-	ds1307 = container_of(work, struct ds1307, work);
-	client = ds1307->client;
-	lock = &ds1307->rtc->ops_lock;
-
 	mutex_lock(lock);
 	stat = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
 	if (stat < 0)
@@ -352,18 +345,8 @@
 	}
 
 out:
-	if (test_bit(HAS_ALARM, &ds1307->flags))
-		enable_irq(client->irq);
 	mutex_unlock(lock);
-}
 
-static irqreturn_t ds1307_irq(int irq, void *dev_id)
-{
-	struct i2c_client	*client = dev_id;
-	struct ds1307		*ds1307 = i2c_get_clientdata(client);
-
-	disable_irq_nosync(irq);
-	schedule_work(&ds1307->work);
 	return IRQ_HANDLED;
 }
 
@@ -634,13 +617,14 @@
 					 MCP794XX_BIT_ALMX_C1 | \
 					 MCP794XX_BIT_ALMX_C2)
 
-static void mcp794xx_work(struct work_struct *work)
+static irqreturn_t mcp794xx_irq(int irq, void *dev_id)
 {
-	struct ds1307 *ds1307 = container_of(work, struct ds1307, work);
-	struct i2c_client *client = ds1307->client;
+	struct i2c_client       *client = dev_id;
+	struct ds1307           *ds1307 = i2c_get_clientdata(client);
+	struct mutex            *lock = &ds1307->rtc->ops_lock;
 	int reg, ret;
 
-	mutex_lock(&ds1307->rtc->ops_lock);
+	mutex_lock(lock);
 
 	/* Check and clear alarm 0 interrupt flag. */
 	reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_ALARM0_CTRL);
@@ -665,9 +649,9 @@
 	rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
 
 out:
-	if (test_bit(HAS_ALARM, &ds1307->flags))
-		enable_irq(client->irq);
-	mutex_unlock(&ds1307->rtc->ops_lock);
+	mutex_unlock(lock);
+
+	return IRQ_HANDLED;
 }
 
 static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -798,13 +782,6 @@
 	client = kobj_to_i2c_client(kobj);
 	ds1307 = i2c_get_clientdata(client);
 
-	if (unlikely(off >= ds1307->nvram->size))
-		return 0;
-	if ((off + count) > ds1307->nvram->size)
-		count = ds1307->nvram->size - off;
-	if (unlikely(!count))
-		return count;
-
 	result = ds1307->read_block_data(client, ds1307->nvram_offset + off,
 								count, buf);
 	if (result < 0)
@@ -824,13 +801,6 @@
 	client = kobj_to_i2c_client(kobj);
 	ds1307 = i2c_get_clientdata(client);
 
-	if (unlikely(off >= ds1307->nvram->size))
-		return -EFBIG;
-	if ((off + count) > ds1307->nvram->size)
-		count = ds1307->nvram->size - off;
-	if (unlikely(!count))
-		return count;
-
 	result = ds1307->write_block_data(client, ds1307->nvram_offset + off,
 								count, buf);
 	if (result < 0) {
@@ -896,6 +866,8 @@
 	bool			want_irq = false;
 	unsigned char		*buf;
 	struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
+	irq_handler_t	irq_handler = ds1307_irq;
+
 	static const int	bbsqi_bitpos[] = {
 		[ds_1337] = 0,
 		[ds_1339] = DS1339_BIT_BBSQI,
@@ -962,8 +934,6 @@
 		 * running on Vbackup (BBSQI/BBSQW)
 		 */
 		if (ds1307->client->irq > 0 && chip->alarm) {
-			INIT_WORK(&ds1307->work, ds1307_work);
-
 			ds1307->regs[0] |= DS1337_BIT_INTCN
 					| bbsqi_bitpos[ds1307->type];
 			ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
@@ -1053,7 +1023,7 @@
 	case mcp794xx:
 		rtc_ops = &mcp794xx_rtc_ops;
 		if (ds1307->client->irq > 0 && chip->alarm) {
-			INIT_WORK(&ds1307->work, mcp794xx_work);
+			irq_handler = mcp794xx_irq;
 			want_irq = true;
 		}
 		break;
@@ -1176,18 +1146,43 @@
 	}
 
 	if (want_irq) {
-		err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
-			  ds1307->rtc->name, client);
+		struct device_node *node = client->dev.of_node;
+
+		err = devm_request_threaded_irq(&client->dev,
+						client->irq, NULL, irq_handler,
+						IRQF_SHARED | IRQF_ONESHOT,
+						ds1307->rtc->name, client);
 		if (err) {
 			client->irq = 0;
 			dev_err(&client->dev, "unable to request IRQ!\n");
-		} else {
+			goto no_irq;
+		}
 
-			set_bit(HAS_ALARM, &ds1307->flags);
-			dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
+		set_bit(HAS_ALARM, &ds1307->flags);
+		dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
+
+		/* Currently supported by OF code only! */
+		if (!node)
+			goto no_irq;
+
+		err = of_irq_get(node, 1);
+		if (err <= 0) {
+			if (err == -EPROBE_DEFER)
+				goto exit;
+			goto no_irq;
+		}
+		ds1307->wakeirq = err;
+
+		err = dev_pm_set_dedicated_wake_irq(&client->dev,
+						    ds1307->wakeirq);
+		if (err) {
+			dev_err(&client->dev, "unable to setup wakeIRQ %d!\n",
+				err);
+			goto exit;
 		}
 	}
 
+no_irq:
 	if (chip->nvram_size) {
 
 		ds1307->nvram = devm_kzalloc(&client->dev,
@@ -1231,10 +1226,8 @@
 {
 	struct ds1307 *ds1307 = i2c_get_clientdata(client);
 
-	if (test_and_clear_bit(HAS_ALARM, &ds1307->flags)) {
-		free_irq(client->irq, client);
-		cancel_work_sync(&ds1307->work);
-	}
+	if (ds1307->wakeirq)
+		dev_pm_clear_wake_irq(&client->dev);
 
 	if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags))
 		sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
@@ -1245,7 +1238,6 @@
 static struct i2c_driver ds1307_driver = {
 	.driver = {
 		.name	= "rtc-ds1307",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= ds1307_probe,
 	.remove		= ds1307_remove,
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index ae9f997..79a06dd 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -162,12 +162,6 @@
 	struct device *dev = kobj_to_dev(kobj);
 	struct ds1343_priv *priv = dev_get_drvdata(dev);
 
-	if (unlikely(!count))
-		return count;
-
-	if ((count + off) > DS1343_NVRAM_LEN)
-		count = DS1343_NVRAM_LEN - off;
-
 	address = DS1343_NVRAM + off;
 
 	ret = regmap_bulk_write(priv->map, address, buf, count);
@@ -187,12 +181,6 @@
 	struct device *dev = kobj_to_dev(kobj);
 	struct ds1343_priv *priv = dev_get_drvdata(dev);
 
-	if (unlikely(!count))
-		return count;
-
-	if ((count + off) > DS1343_NVRAM_LEN)
-		count = DS1343_NVRAM_LEN - off;
-
 	address = DS1343_NVRAM + off;
 
 	ret = regmap_bulk_read(priv->map, address, buf, count);
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 72c9333..3b3049c 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -664,8 +664,6 @@
 {
 	struct ds1374 *ds1374 = i2c_get_clientdata(client);
 #ifdef CONFIG_RTC_DRV_DS1374_WDT
-	int res;
-
 	misc_deregister(&ds1374_miscdev);
 	ds1374_miscdev.parent = NULL;
 	unregister_reboot_notifier(&ds1374_wdt_notifier);
@@ -688,7 +686,7 @@
 {
 	struct i2c_client *client = to_i2c_client(dev);
 
-	if (client->irq >= 0 && device_may_wakeup(&client->dev))
+	if (client->irq > 0 && device_may_wakeup(&client->dev))
 		enable_irq_wake(client->irq);
 	return 0;
 }
@@ -697,7 +695,7 @@
 {
 	struct i2c_client *client = to_i2c_client(dev);
 
-	if (client->irq >= 0 && device_may_wakeup(&client->dev))
+	if (client->irq > 0 && device_may_wakeup(&client->dev))
 		disable_irq_wake(client->irq);
 	return 0;
 }
@@ -708,7 +706,6 @@
 static struct i2c_driver ds1374_driver = {
 	.driver = {
 		.name = "rtc-ds1374",
-		.owner = THIS_MODULE,
 		.pm = &ds1374_pm,
 	},
 	.probe = ds1374_probe,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 7415c2b..da3d04c 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -64,7 +64,7 @@
 #define DS1511_KIE	0x04
 #define DS1511_WDE	0x02
 #define DS1511_WDS	0x01
-#define DS1511_RAM_MAX	0xff
+#define DS1511_RAM_MAX	0x100
 
 #define RTC_CMD		DS1511_CONTROL_B
 #define RTC_CMD1	DS1511_CONTROL_A
@@ -159,7 +159,7 @@
 	/*
 	 * set wdog enable and wdog 'steering' bit to issue a reset
 	 */
-	rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD);
+	rtc_write(rtc_read(RTC_CMD) | DS1511_WDE | DS1511_WDS, RTC_CMD);
 }
 
 void
@@ -407,26 +407,10 @@
 {
 	ssize_t count;
 
-	/*
-	 * if count is more than one, turn on "burst" mode
-	 * turn it off when you're done
-	 */
-	if (size > 1)
-		rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
-
-	if (pos > DS1511_RAM_MAX)
-		pos = DS1511_RAM_MAX;
-
-	if (size + pos > DS1511_RAM_MAX + 1)
-		size = DS1511_RAM_MAX - pos + 1;
-
 	rtc_write(pos, DS1511_RAMADDR_LSB);
-	for (count = 0; size > 0; count++, size--)
+	for (count = 0; count < size; count++)
 		*buf++ = rtc_read(DS1511_RAMDATA);
 
-	if (count > 1)
-		rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
-
 	return count;
 }
 
@@ -437,26 +421,10 @@
 {
 	ssize_t count;
 
-	/*
-	 * if count is more than one, turn on "burst" mode
-	 * turn it off when you're done
-	 */
-	if (size > 1)
-		rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
-
-	if (pos > DS1511_RAM_MAX)
-		pos = DS1511_RAM_MAX;
-
-	if (size + pos > DS1511_RAM_MAX + 1)
-		size = DS1511_RAM_MAX - pos + 1;
-
 	rtc_write(pos, DS1511_RAMADDR_LSB);
-	for (count = 0; size > 0; count++, size--)
+	for (count = 0; count < size; count++)
 		rtc_write(*buf++, DS1511_RAMDATA);
 
-	if (count > 1)
-		rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
-
 	return count;
 }
 
@@ -490,7 +458,7 @@
 	/*
 	 * turn on the clock and the crystal, etc.
 	 */
-	rtc_write(0, RTC_CMD);
+	rtc_write(DS1511_BME, RTC_CMD);
 	rtc_write(0, RTC_CMD1);
 	/*
 	 * clear the wdog counter
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index a24e091..38422ab 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -245,7 +245,7 @@
 	void __iomem *ioaddr = pdata->ioaddr;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; count < size; count++)
 		*buf++ = readb(ioaddr + pos++);
 	return count;
 }
@@ -260,7 +260,7 @@
 	void __iomem *ioaddr = pdata->ioaddr;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; count < size; count++)
 		writeb(*buf++, ioaddr + pos++);
 	return count;
 }
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 818a363..05a51ef 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -2145,27 +2145,7 @@
 	.probe		= ds1685_rtc_probe,
 	.remove		= ds1685_rtc_remove,
 };
-
-/**
- * ds1685_rtc_init - rtc module init.
- */
-static int __init
-ds1685_rtc_init(void)
-{
-	return platform_driver_register(&ds1685_rtc_driver);
-}
-
-/**
- * ds1685_rtc_exit - rtc module exit.
- */
-static void __exit
-ds1685_rtc_exit(void)
-{
-	platform_driver_unregister(&ds1685_rtc_driver);
-}
-
-module_init(ds1685_rtc_init);
-module_exit(ds1685_rtc_exit);
+module_platform_driver(ds1685_rtc_driver);
 /* ----------------------------------------------------------------------- */
 
 
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 0f8d8ac..c5168b3 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -134,7 +134,7 @@
 	void __iomem *ioaddr = pdata->ioaddr_nvram;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
+	for (count = 0; count < size; count++)
 		*buf++ = readb(ioaddr + pos++);
 	return count;
 }
@@ -149,7 +149,7 @@
 	void __iomem *ioaddr = pdata->ioaddr_nvram;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
+	for (count = 0; count < size; count++)
 		writeb(*buf++, ioaddr + pos++);
 	return count;
 }
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 7e48e53..4e99ace 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -443,7 +443,7 @@
 {
 	struct ds3232 *ds3232 = i2c_get_clientdata(client);
 
-	if (client->irq >= 0) {
+	if (client->irq > 0) {
 		mutex_lock(&ds3232->mutex);
 		ds3232->exiting = 1;
 		mutex_unlock(&ds3232->mutex);
@@ -463,7 +463,10 @@
 
 	if (device_can_wakeup(dev)) {
 		ds3232->suspended = true;
-		irq_set_irq_wake(client->irq, 1);
+		if (irq_set_irq_wake(client->irq, 1)) {
+			dev_warn_once(dev, "Cannot set wakeup source\n");
+			ds3232->suspended = false;
+		}
 	}
 
 	return 0;
@@ -500,7 +503,6 @@
 static struct i2c_driver ds3232_driver = {
 	.driver = {
 		.name = "rtc-ds3232",
-		.owner = THIS_MODULE,
 		.pm	= &ds3232_pm_ops,
 	},
 	.probe = ds3232_probe,
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 83c3b30..576eadb 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -523,7 +523,6 @@
 static struct i2c_driver fm3130_driver = {
 	.driver = {
 		.name	= "rtc-fm3130",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= fm3130_probe,
 	.id_table	= fm3130_id,
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c
index 35f4486..e841846 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-gemini.c
@@ -148,10 +148,7 @@
 
 	rtc->rtc_dev = rtc_device_register(pdev->name, dev,
 					   &gemini_rtc_ops, THIS_MODULE);
-	if (likely(IS_ERR(rtc->rtc_dev)))
-		return PTR_ERR(rtc->rtc_dev);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(rtc->rtc_dev);
 }
 
 static int gemini_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index e9da795..097325d 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -599,7 +599,6 @@
 static struct i2c_driver hym8563_driver = {
 	.driver		= {
 		.name	= "rtc-hym8563",
-		.owner	= THIS_MODULE,
 		.pm	= &hym8563_pm_ops,
 		.of_match_table	= hym8563_dt_idtable,
 	},
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index f9b0827..839d1fd 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -151,12 +151,7 @@
 		tm->tm_sec, tm->tm_min, tm->tm_hour,
 		tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
-	/* The clock can give out invalid datetime, but we cannot return
-	 * -EINVAL otherwise hwclock will refuse to set the time on bootup. */
-	if (rtc_valid_tm(tm) < 0)
-		dev_err(&client->dev, "retrieved date and time is invalid.\n");
-
-	return 0;
+	return rtc_valid_tm(tm);
 }
 
 static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm)
@@ -279,6 +274,7 @@
 	{ .compatible = "isil,isl12022" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, isl12022_dt_match);
 #endif
 
 static const struct i2c_device_id isl12022_id[] = {
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index da818d3..a0462e5 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -648,6 +648,7 @@
 	{ .compatible = "isil,isl12057" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, isl12057_dt_match);
 #endif
 
 static const struct i2c_device_id isl12057_id[] = {
@@ -659,7 +660,6 @@
 static struct i2c_driver isl12057_driver = {
 	.driver = {
 		.name = DRV_NAME,
-		.owner = THIS_MODULE,
 		.pm = &isl12057_rtc_pm_ops,
 		.of_match_table = of_match_ptr(isl12057_dt_match),
 	},
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
new file mode 100644
index 0000000..59d9959
--- /dev/null
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -0,0 +1,310 @@
+/*
+ * RTC driver for NXP LPC178x/18xx/43xx Real-Time Clock (RTC)
+ *
+ * Copyright (C) 2011 NXP Semiconductors
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* LPC24xx RTC register offsets and bits */
+#define LPC24XX_ILR		0x00
+#define  LPC24XX_RTCCIF		BIT(0)
+#define  LPC24XX_RTCALF		BIT(1)
+#define LPC24XX_CTC		0x04
+#define LPC24XX_CCR		0x08
+#define  LPC24XX_CLKEN		BIT(0)
+#define  LPC178X_CCALEN		BIT(4)
+#define LPC24XX_CIIR		0x0c
+#define LPC24XX_AMR		0x10
+#define  LPC24XX_ALARM_DISABLE	0xff
+#define LPC24XX_CTIME0		0x14
+#define LPC24XX_CTIME1		0x18
+#define LPC24XX_CTIME2		0x1c
+#define LPC24XX_SEC		0x20
+#define LPC24XX_MIN		0x24
+#define LPC24XX_HOUR		0x28
+#define LPC24XX_DOM		0x2c
+#define LPC24XX_DOW		0x30
+#define LPC24XX_DOY		0x34
+#define LPC24XX_MONTH		0x38
+#define LPC24XX_YEAR		0x3c
+#define LPC24XX_ALSEC		0x60
+#define LPC24XX_ALMIN		0x64
+#define LPC24XX_ALHOUR		0x68
+#define LPC24XX_ALDOM		0x6c
+#define LPC24XX_ALDOW		0x70
+#define LPC24XX_ALDOY		0x74
+#define LPC24XX_ALMON		0x78
+#define LPC24XX_ALYEAR		0x7c
+
+/* Macros to read fields in consolidated time (CT) registers */
+#define CT0_SECS(x)		(((x) >> 0)  & 0x3f)
+#define CT0_MINS(x)		(((x) >> 8)  & 0x3f)
+#define CT0_HOURS(x)		(((x) >> 16) & 0x1f)
+#define CT0_DOW(x)		(((x) >> 24) & 0x07)
+#define CT1_DOM(x)		(((x) >> 0)  & 0x1f)
+#define CT1_MONTH(x)		(((x) >> 8)  & 0x0f)
+#define CT1_YEAR(x)		(((x) >> 16) & 0xfff)
+#define CT2_DOY(x)		(((x) >> 0)  & 0xfff)
+
+#define rtc_readl(dev, reg)		readl((dev)->rtc_base + (reg))
+#define rtc_writel(dev, reg, val)	writel((val), (dev)->rtc_base + (reg))
+
+struct lpc24xx_rtc {
+	void __iomem *rtc_base;
+	struct rtc_device *rtc;
+	struct clk *clk_rtc;
+	struct clk *clk_reg;
+};
+
+static int lpc24xx_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+
+	/* Disable RTC during update */
+	rtc_writel(rtc, LPC24XX_CCR, LPC178X_CCALEN);
+
+	rtc_writel(rtc, LPC24XX_SEC,	tm->tm_sec);
+	rtc_writel(rtc, LPC24XX_MIN,	tm->tm_min);
+	rtc_writel(rtc, LPC24XX_HOUR,	tm->tm_hour);
+	rtc_writel(rtc, LPC24XX_DOW,	tm->tm_wday);
+	rtc_writel(rtc, LPC24XX_DOM,	tm->tm_mday);
+	rtc_writel(rtc, LPC24XX_DOY,	tm->tm_yday);
+	rtc_writel(rtc, LPC24XX_MONTH,	tm->tm_mon);
+	rtc_writel(rtc, LPC24XX_YEAR,	tm->tm_year);
+
+	rtc_writel(rtc, LPC24XX_CCR, LPC24XX_CLKEN | LPC178X_CCALEN);
+
+	return 0;
+}
+
+static int lpc24xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+	u32 ct0, ct1, ct2;
+
+	ct0 = rtc_readl(rtc, LPC24XX_CTIME0);
+	ct1 = rtc_readl(rtc, LPC24XX_CTIME1);
+	ct2 = rtc_readl(rtc, LPC24XX_CTIME2);
+
+	tm->tm_sec  = CT0_SECS(ct0);
+	tm->tm_min  = CT0_MINS(ct0);
+	tm->tm_hour = CT0_HOURS(ct0);
+	tm->tm_wday = CT0_DOW(ct0);
+	tm->tm_mon  = CT1_MONTH(ct1);
+	tm->tm_mday = CT1_DOM(ct1);
+	tm->tm_year = CT1_YEAR(ct1);
+	tm->tm_yday = CT2_DOY(ct2);
+
+	return rtc_valid_tm(tm);
+}
+
+static int lpc24xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+	struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+	struct rtc_time *tm = &wkalrm->time;
+
+	tm->tm_sec  = rtc_readl(rtc, LPC24XX_ALSEC);
+	tm->tm_min  = rtc_readl(rtc, LPC24XX_ALMIN);
+	tm->tm_hour = rtc_readl(rtc, LPC24XX_ALHOUR);
+	tm->tm_mday = rtc_readl(rtc, LPC24XX_ALDOM);
+	tm->tm_wday = rtc_readl(rtc, LPC24XX_ALDOW);
+	tm->tm_yday = rtc_readl(rtc, LPC24XX_ALDOY);
+	tm->tm_mon  = rtc_readl(rtc, LPC24XX_ALMON);
+	tm->tm_year = rtc_readl(rtc, LPC24XX_ALYEAR);
+
+	wkalrm->enabled = rtc_readl(rtc, LPC24XX_AMR) == 0;
+	wkalrm->pending = !!(rtc_readl(rtc, LPC24XX_ILR) & LPC24XX_RTCCIF);
+
+	return rtc_valid_tm(&wkalrm->time);
+}
+
+static int lpc24xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+	struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+	struct rtc_time *tm = &wkalrm->time;
+
+	/* Disable alarm irq during update */
+	rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+
+	rtc_writel(rtc, LPC24XX_ALSEC,  tm->tm_sec);
+	rtc_writel(rtc, LPC24XX_ALMIN,  tm->tm_min);
+	rtc_writel(rtc, LPC24XX_ALHOUR, tm->tm_hour);
+	rtc_writel(rtc, LPC24XX_ALDOM,  tm->tm_mday);
+	rtc_writel(rtc, LPC24XX_ALDOW,  tm->tm_wday);
+	rtc_writel(rtc, LPC24XX_ALDOY,  tm->tm_yday);
+	rtc_writel(rtc, LPC24XX_ALMON,  tm->tm_mon);
+	rtc_writel(rtc, LPC24XX_ALYEAR, tm->tm_year);
+
+	if (wkalrm->enabled)
+		rtc_writel(rtc, LPC24XX_AMR, 0);
+
+	return 0;
+}
+
+static int lpc24xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+	struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+
+	if (enable)
+		rtc_writel(rtc, LPC24XX_AMR, 0);
+	else
+		rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+
+	return 0;
+}
+
+static irqreturn_t lpc24xx_rtc_interrupt(int irq, void *data)
+{
+	unsigned long events = RTC_IRQF;
+	struct lpc24xx_rtc *rtc = data;
+	u32 rtc_iir;
+
+	/* Check interrupt cause */
+	rtc_iir = rtc_readl(rtc, LPC24XX_ILR);
+	if (rtc_iir & LPC24XX_RTCALF) {
+		events |= RTC_AF;
+		rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+	}
+
+	/* Clear interrupt status and report event */
+	rtc_writel(rtc, LPC24XX_ILR, rtc_iir);
+	rtc_update_irq(rtc->rtc, 1, events);
+
+	return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops lpc24xx_rtc_ops = {
+	.read_time		= lpc24xx_rtc_read_time,
+	.set_time		= lpc24xx_rtc_set_time,
+	.read_alarm		= lpc24xx_rtc_read_alarm,
+	.set_alarm		= lpc24xx_rtc_set_alarm,
+	.alarm_irq_enable	= lpc24xx_rtc_alarm_irq_enable,
+};
+
+static int lpc24xx_rtc_probe(struct platform_device *pdev)
+{
+	struct lpc24xx_rtc *rtc;
+	struct resource *res;
+	int irq, ret;
+
+	rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+	if (!rtc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rtc->rtc_base))
+		return PTR_ERR(rtc->rtc_base);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_warn(&pdev->dev, "can't get interrupt resource\n");
+		return irq;
+	}
+
+	rtc->clk_rtc = devm_clk_get(&pdev->dev, "rtc");
+	if (IS_ERR(rtc->clk_rtc)) {
+		dev_err(&pdev->dev, "error getting rtc clock\n");
+		return PTR_ERR(rtc->clk_rtc);
+	}
+
+	rtc->clk_reg = devm_clk_get(&pdev->dev, "reg");
+	if (IS_ERR(rtc->clk_reg)) {
+		dev_err(&pdev->dev, "error getting reg clock\n");
+		return PTR_ERR(rtc->clk_reg);
+	}
+
+	ret = clk_prepare_enable(rtc->clk_rtc);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable rtc clock\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(rtc->clk_reg);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable reg clock\n");
+		goto disable_rtc_clk;
+	}
+
+	platform_set_drvdata(pdev, rtc);
+
+	/* Clear any pending interrupts */
+	rtc_writel(rtc, LPC24XX_ILR, LPC24XX_RTCCIF | LPC24XX_RTCALF);
+
+	/* Enable RTC count */
+	rtc_writel(rtc, LPC24XX_CCR, LPC24XX_CLKEN | LPC178X_CCALEN);
+
+	ret = devm_request_irq(&pdev->dev, irq, lpc24xx_rtc_interrupt, 0,
+			       pdev->name, rtc);
+	if (ret < 0) {
+		dev_warn(&pdev->dev, "can't request interrupt\n");
+		goto disable_clks;
+	}
+
+	rtc->rtc = devm_rtc_device_register(&pdev->dev, "lpc24xx-rtc",
+					    &lpc24xx_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc->rtc)) {
+		dev_err(&pdev->dev, "can't register rtc device\n");
+		ret = PTR_ERR(rtc->rtc);
+		goto disable_clks;
+	}
+
+	return 0;
+
+disable_clks:
+	clk_disable_unprepare(rtc->clk_reg);
+disable_rtc_clk:
+	clk_disable_unprepare(rtc->clk_rtc);
+	return ret;
+}
+
+static int lpc24xx_rtc_remove(struct platform_device *pdev)
+{
+	struct lpc24xx_rtc *rtc = platform_get_drvdata(pdev);
+
+	/* Ensure all interrupt sources are masked */
+	rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+	rtc_writel(rtc, LPC24XX_CIIR, 0);
+
+	rtc_writel(rtc, LPC24XX_CCR, LPC178X_CCALEN);
+
+	clk_disable_unprepare(rtc->clk_rtc);
+	clk_disable_unprepare(rtc->clk_reg);
+
+	return 0;
+}
+
+static const struct of_device_id lpc24xx_rtc_match[] = {
+	{ .compatible = "nxp,lpc1788-rtc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lpc24xx_rtc_match);
+
+static struct platform_driver lpc24xx_rtc_driver = {
+	.probe	= lpc24xx_rtc_probe,
+	.remove	= lpc24xx_rtc_remove,
+	.driver	= {
+		.name = "lpc24xx-rtc",
+		.of_match_table	= lpc24xx_rtc_match,
+	},
+};
+module_platform_driver(lpc24xx_rtc_driver);
+
+MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com>");
+MODULE_DESCRIPTION("RTC driver for the LPC178x/18xx/408x/43xx SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 90abb5b..d99a705 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -345,11 +345,12 @@
 	ssize_t cnt = 0;
 	unsigned long flags;
 
-	for (; size > 0 && pos < pdata->offset; cnt++, size--) {
-		spin_lock_irqsave(&m48t59->lock, flags);
+	spin_lock_irqsave(&m48t59->lock, flags);
+
+	for (; cnt < size; cnt++)
 		*buf++ = M48T59_READ(cnt);
-		spin_unlock_irqrestore(&m48t59->lock, flags);
-	}
+
+	spin_unlock_irqrestore(&m48t59->lock, flags);
 
 	return cnt;
 }
@@ -365,11 +366,12 @@
 	ssize_t cnt = 0;
 	unsigned long flags;
 
-	for (; size > 0 && pos < pdata->offset; cnt++, size--) {
-		spin_lock_irqsave(&m48t59->lock, flags);
+	spin_lock_irqsave(&m48t59->lock, flags);
+
+	for (; cnt < size; cnt++)
 		M48T59_WRITE(*buf++, cnt);
-		spin_unlock_irqrestore(&m48t59->lock, flags);
-	}
+
+	spin_unlock_irqrestore(&m48t59->lock, flags);
 
 	return cnt;
 }
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index 9e02bcd..db984d4 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -521,6 +521,7 @@
 	{ "max8997-rtc", 0 },
 	{},
 };
+MODULE_DEVICE_TABLE(platform, rtc_id);
 
 static struct platform_driver max8997_rtc_driver = {
 	.driver		= {
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
index 73759c9..07b30a3 100644
--- a/drivers/rtc/rtc-moxart.c
+++ b/drivers/rtc/rtc-moxart.c
@@ -312,6 +312,7 @@
 	{ .compatible = "moxa,moxart-rtc" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, moxart_rtc_match);
 
 static struct platform_driver moxart_rtc_driver = {
 	.probe	= moxart_rtc_probe,
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 1767e18..4ca4daa 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -406,6 +406,7 @@
 	{ .compatible = "fsl,mpc5200-rtc", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, mpc5121_rtc_match);
 #endif
 
 static struct platform_driver mpc5121_rtc_driver = {
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index eab230b..06a5c52 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -373,15 +373,42 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_rtc_suspend(struct device *dev)
+{
+	struct mt6397_rtc *rtc = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(rtc->irq);
+
+	return 0;
+}
+
+static int mt6397_rtc_resume(struct device *dev)
+{
+	struct mt6397_rtc *rtc = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(rtc->irq);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
+			mt6397_rtc_resume);
+
 static const struct of_device_id mt6397_rtc_of_match[] = {
 	{ .compatible = "mediatek,mt6397-rtc", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, mt6397_rtc_of_match);
 
 static struct platform_driver mtk_rtc_driver = {
 	.driver = {
 		.name = "mt6397-rtc",
 		.of_match_table = mt6397_rtc_of_match,
+		.pm = &mt6397_pm_ops,
 	},
 	.probe	= mtk_rtc_probe,
 	.remove = mtk_rtc_remove,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 7f50d2e..79bb286 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -324,6 +324,7 @@
 	{ .compatible = "marvell,orion-rtc", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, rtc_mv_of_match_table);
 #endif
 
 static struct platform_driver mv_rtc_driver = {
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 8b6355f..ec2e9c5 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -25,6 +25,7 @@
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/io.h>
+#include <linux/clk.h>
 
 /*
  * The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
@@ -107,6 +108,7 @@
 
 /* OMAP_RTC_OSC_REG bit fields: */
 #define OMAP_RTC_OSC_32KCLK_EN		BIT(6)
+#define OMAP_RTC_OSC_SEL_32KCLK_SRC	BIT(3)
 
 /* OMAP_RTC_IRQWAKEEN bit fields: */
 #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN	BIT(1)
@@ -132,10 +134,12 @@
 struct omap_rtc {
 	struct rtc_device *rtc;
 	void __iomem *base;
+	struct clk *clk;
 	int irq_alarm;
 	int irq_timer;
 	u8 interrupts_reg;
 	bool is_pmic_controller;
+	bool has_ext_clk;
 	const struct omap_rtc_device_type *type;
 };
 
@@ -553,6 +557,15 @@
 	if (rtc->irq_alarm <= 0)
 		return -ENOENT;
 
+	rtc->clk = devm_clk_get(&pdev->dev, "ext-clk");
+	if (!IS_ERR(rtc->clk))
+		rtc->has_ext_clk = true;
+	else
+		rtc->clk = devm_clk_get(&pdev->dev, "int-clk");
+
+	if (!IS_ERR(rtc->clk))
+		clk_prepare_enable(rtc->clk);
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	rtc->base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(rtc->base))
@@ -627,6 +640,16 @@
 	if (reg != new_ctrl)
 		rtc_write(rtc, OMAP_RTC_CTRL_REG, new_ctrl);
 
+	/*
+	 * If we have the external clock then switch to it so we can keep
+	 * ticking across suspend.
+	 */
+	if (rtc->has_ext_clk) {
+		reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
+		rtc_write(rtc, OMAP_RTC_OSC_REG,
+			  reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
+	}
+
 	rtc->type->lock(rtc);
 
 	device_init_wakeup(&pdev->dev, true);
@@ -672,6 +695,7 @@
 static int __exit omap_rtc_remove(struct platform_device *pdev)
 {
 	struct omap_rtc *rtc = platform_get_drvdata(pdev);
+	u8 reg;
 
 	if (pm_power_off == omap_rtc_power_off &&
 			omap_rtc_power_off_rtc == rtc) {
@@ -681,10 +705,19 @@
 
 	device_init_wakeup(&pdev->dev, 0);
 
+	if (!IS_ERR(rtc->clk))
+		clk_disable_unprepare(rtc->clk);
+
 	rtc->type->unlock(rtc);
 	/* leave rtc running, but disable irqs */
 	rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
 
+	if (rtc->has_ext_clk) {
+		reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
+		reg &= ~OMAP_RTC_OSC_SEL_32KCLK_SRC;
+		rtc_write(rtc, OMAP_RTC_OSC_REG, reg);
+	}
+
 	rtc->type->lock(rtc);
 
 	/* Disable the clock/module */
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 7061dca..6fbf9e6 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -190,11 +190,9 @@
 	return rc;
 }
 
-static const struct rtc_class_ops opal_rtc_ops = {
+static struct rtc_class_ops opal_rtc_ops = {
 	.read_time	= opal_get_rtc_time,
 	.set_time	= opal_set_rtc_time,
-	.read_alarm	= opal_get_tpo_time,
-	.set_alarm	= opal_set_tpo_time,
 };
 
 static int opal_rtc_probe(struct platform_device *pdev)
@@ -202,8 +200,11 @@
 	struct rtc_device *rtc;
 
 	if (pdev->dev.of_node && of_get_property(pdev->dev.of_node, "has-tpo",
-						 NULL))
+						 NULL)) {
 		device_set_wakeup_capable(&pdev->dev, true);
+		opal_rtc_ops.read_alarm	= opal_get_tpo_time;
+		opal_rtc_ops.set_alarm = opal_set_tpo_time;
+	}
 
 	rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops,
 				       THIS_MODULE);
@@ -236,7 +237,6 @@
 	.id_table	= opal_rtc_driver_ids,
 	.driver		= {
 		.name		= DRVNAME,
-		.owner		= THIS_MODULE,
 		.of_match_table	= opal_rtc_match,
 	},
 };
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 8a7556c..1c47650 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -165,13 +165,7 @@
 			tm->tm_sec, tm->tm_min, tm->tm_hour,
 			tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
-	/* the clock can give out invalid datetime, but we cannot return
-	 * -EINVAL otherwise hwclock will refuse to set the time on bootup.
-	 */
-	if (rtc_valid_tm(tm) < 0)
-		dev_err(dev, "retrieved date/time is not valid.\n");
-
-	return 0;
+	return rtc_valid_tm(tm);
 }
 
 static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9bd842e..4b11d31 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -33,11 +33,14 @@
 #define PCF2127_REG_MO          (0x08)
 #define PCF2127_REG_YR          (0x09)
 
+#define PCF2127_OSF             BIT(7)  /* Oscillator Fail flag */
+
 static struct i2c_driver pcf2127_driver;
 
 struct pcf2127 {
 	struct rtc_device *rtc;
 	int voltage_low; /* indicates if a low_voltage was detected */
+	int oscillator_failed; /* OSF was detected and date is unreliable */
 };
 
 /*
@@ -59,7 +62,18 @@
 	if (buf[PCF2127_REG_CTRL3] & 0x04) {
 		pcf2127->voltage_low = 1;
 		dev_info(&client->dev,
-			"low voltage detected, date/time is not reliable.\n");
+			"low voltage detected, check/replace RTC battery.\n");
+	}
+
+	if (buf[PCF2127_REG_SC] & PCF2127_OSF) {
+		/*
+		 * no need clear the flag here,
+		 * it will be cleared once the new date is saved
+		 */
+		pcf2127->oscillator_failed = 1;
+		dev_warn(&client->dev,
+			 "oscillator stop detected, date/time is not reliable\n");
+		return -EINVAL;
 	}
 
 	dev_dbg(&client->dev,
@@ -88,17 +102,12 @@
 		tm->tm_sec, tm->tm_min, tm->tm_hour,
 		tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
-	/* the clock can give out invalid datetime, but we cannot return
-	 * -EINVAL otherwise hwclock will refuse to set the time on bootup.
-	 */
-	if (rtc_valid_tm(tm) < 0)
-		dev_err(&client->dev, "retrieved date/time is not valid.\n");
-
-	return 0;
+	return rtc_valid_tm(tm);
 }
 
 static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
+	struct pcf2127 *pcf2127 = i2c_get_clientdata(client);
 	unsigned char buf[8];
 	int i = 0, err;
 
@@ -112,7 +121,7 @@
 	buf[i++] = PCF2127_REG_SC;
 
 	/* hours, minutes and seconds */
-	buf[i++] = bin2bcd(tm->tm_sec);
+	buf[i++] = bin2bcd(tm->tm_sec);	/* this will also clear OSF flag */
 	buf[i++] = bin2bcd(tm->tm_min);
 	buf[i++] = bin2bcd(tm->tm_hour);
 	buf[i++] = bin2bcd(tm->tm_mday);
@@ -132,6 +141,9 @@
 		return -EIO;
 	}
 
+	/* clear OSF flag in client data */
+	pcf2127->oscillator_failed = 0;
+
 	return 0;
 }
 
@@ -144,7 +156,9 @@
 	switch (cmd) {
 	case RTC_VL_READ:
 		if (pcf2127->voltage_low)
-			dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+			dev_info(dev, "low voltage detected, check/replace battery\n");
+		if (pcf2127->oscillator_failed)
+			dev_info(dev, "oscillator stop detected, date/time is not reliable\n");
 
 		if (copy_to_user((void __user *)arg, &pcf2127->voltage_low,
 					sizeof(int)))
@@ -217,7 +231,6 @@
 static struct i2c_driver pcf2127_driver = {
 	.driver		= {
 		.name	= "rtc-pcf2127",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pcf2127_of_match),
 	},
 	.probe		= pcf2127_probe,
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 6a12bf6..b6d73dd 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -189,7 +189,6 @@
 static struct i2c_driver pcf85063_driver = {
 	.driver		= {
 		.name	= "rtc-pcf85063",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pcf85063_of_match),
 	},
 	.probe		= pcf85063_probe,
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 4cdb64b..e7ebcc0b7 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -334,7 +334,6 @@
 static struct i2c_driver pcf8523_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(pcf8523_of_match),
 	},
 	.probe = pcf8523_probe,
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 8bba022..e569243 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -483,7 +483,6 @@
 static struct i2c_driver pcf8563_driver = {
 	.driver		= {
 		.name	= "rtc-pcf8563",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pcf8563_of_match),
 	},
 	.probe		= pcf8563_probe,
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 5911a6d..7ca9e88 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -309,7 +309,6 @@
 static struct i2c_driver pcf8583_driver = {
 	.driver = {
 		.name	= "pcf8583",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= pcf8583_probe,
 	.id_table	= pcf8583_id,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 99181fff..41dcb7d 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -476,6 +476,6 @@
 
 module_amba_driver(pl031_driver);
 
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net");
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
 MODULE_DESCRIPTION("ARM AMBA PL031 RTC Driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 4561f37..fe4985b 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -32,6 +32,8 @@
 
 #include <mach/hardware.h>
 
+#include "rtc-sa1100.h"
+
 #define RTC_DEF_DIVIDER		(32768 - 1)
 #define RTC_DEF_TRIM		0
 #define MAXFREQ_PERIODIC	1000
@@ -86,10 +88,9 @@
 	__raw_writel((value), (pxa_rtc)->base + (reg))
 
 struct pxa_rtc {
+	struct sa1100_rtc sa1100_rtc;
 	struct resource	*ress;
 	void __iomem		*base;
-	int			irq_1Hz;
-	int			irq_Alrm;
 	struct rtc_device	*rtc;
 	spinlock_t		lock;		/* Protects this structure */
 };
@@ -184,25 +185,25 @@
 	struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
 	int ret;
 
-	ret = request_irq(pxa_rtc->irq_1Hz, pxa_rtc_irq, 0,
+	ret = request_irq(pxa_rtc->sa1100_rtc.irq_1hz, pxa_rtc_irq, 0,
 			  "rtc 1Hz", dev);
 	if (ret < 0) {
-		dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_1Hz,
-			ret);
+		dev_err(dev, "can't get irq %i, err %d\n",
+			pxa_rtc->sa1100_rtc.irq_1hz, ret);
 		goto err_irq_1Hz;
 	}
-	ret = request_irq(pxa_rtc->irq_Alrm, pxa_rtc_irq, 0,
+	ret = request_irq(pxa_rtc->sa1100_rtc.irq_alarm, pxa_rtc_irq, 0,
 			  "rtc Alrm", dev);
 	if (ret < 0) {
-		dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_Alrm,
-			ret);
+		dev_err(dev, "can't get irq %i, err %d\n",
+			pxa_rtc->sa1100_rtc.irq_alarm, ret);
 		goto err_irq_Alrm;
 	}
 
 	return 0;
 
 err_irq_Alrm:
-	free_irq(pxa_rtc->irq_1Hz, dev);
+	free_irq(pxa_rtc->sa1100_rtc.irq_1hz, dev);
 err_irq_1Hz:
 	return ret;
 }
@@ -215,8 +216,8 @@
 	rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE);
 	spin_unlock_irq(&pxa_rtc->lock);
 
-	free_irq(pxa_rtc->irq_Alrm, dev);
-	free_irq(pxa_rtc->irq_1Hz, dev);
+	free_irq(pxa_rtc->sa1100_rtc.irq_1hz, dev);
+	free_irq(pxa_rtc->sa1100_rtc.irq_alarm, dev);
 }
 
 static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
@@ -320,12 +321,13 @@
 {
 	struct device *dev = &pdev->dev;
 	struct pxa_rtc *pxa_rtc;
+	struct sa1100_rtc *sa1100_rtc;
 	int ret;
-	u32 rttr;
 
 	pxa_rtc = devm_kzalloc(dev, sizeof(*pxa_rtc), GFP_KERNEL);
 	if (!pxa_rtc)
 		return -ENOMEM;
+	sa1100_rtc = &pxa_rtc->sa1100_rtc;
 
 	spin_lock_init(&pxa_rtc->lock);
 	platform_set_drvdata(pdev, pxa_rtc);
@@ -336,13 +338,13 @@
 		return -ENXIO;
 	}
 
-	pxa_rtc->irq_1Hz = platform_get_irq(pdev, 0);
-	if (pxa_rtc->irq_1Hz < 0) {
+	sa1100_rtc->irq_1hz = platform_get_irq(pdev, 0);
+	if (sa1100_rtc->irq_1hz < 0) {
 		dev_err(dev, "No 1Hz IRQ resource defined\n");
 		return -ENXIO;
 	}
-	pxa_rtc->irq_Alrm = platform_get_irq(pdev, 1);
-	if (pxa_rtc->irq_Alrm < 0) {
+	sa1100_rtc->irq_alarm = platform_get_irq(pdev, 1);
+	if (sa1100_rtc->irq_alarm < 0) {
 		dev_err(dev, "No alarm IRQ resource defined\n");
 		return -ENXIO;
 	}
@@ -354,15 +356,14 @@
 		return -ENOMEM;
 	}
 
-	/*
-	 * If the clock divider is uninitialized then reset it to the
-	 * default value to get the 1Hz clock.
-	 */
-	if (rtc_readl(pxa_rtc, RTTR) == 0) {
-		rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
-		rtc_writel(pxa_rtc, RTTR, rttr);
-		dev_warn(dev, "warning: initializing default clock"
-			 " divider/trim value\n");
+	sa1100_rtc->rcnr = pxa_rtc->base + 0x0;
+	sa1100_rtc->rtsr = pxa_rtc->base + 0x8;
+	sa1100_rtc->rtar = pxa_rtc->base + 0x4;
+	sa1100_rtc->rttr = pxa_rtc->base + 0xc;
+	ret = sa1100_rtc_init(pdev, sa1100_rtc);
+	if (!ret) {
+		dev_err(dev, "Unable to init SA1100 RTC sub-device\n");
+		return ret;
 	}
 
 	rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE);
@@ -402,7 +403,7 @@
 	struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
 
 	if (device_may_wakeup(dev))
-		enable_irq_wake(pxa_rtc->irq_Alrm);
+		enable_irq_wake(pxa_rtc->sa1100_rtc.irq_alarm);
 	return 0;
 }
 
@@ -411,7 +412,7 @@
 	struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
 
 	if (device_may_wakeup(dev))
-		disable_irq_wake(pxa_rtc->irq_Alrm);
+		disable_irq_wake(pxa_rtc->sa1100_rtc.irq_alarm);
 	return 0;
 }
 #endif
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index b548551..0260353 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -170,7 +170,7 @@
 
 	spin_lock_irq(&priv->lock);
 
-	for (count = 0; size > 0 && pos < RP5C01_MODE; count++, size--) {
+	for (count = 0; count < size; count++) {
 		u8 data;
 
 		rp5c01_write(priv,
@@ -200,7 +200,7 @@
 
 	spin_lock_irq(&priv->lock);
 
-	for (count = 0; size > 0 && pos < RP5C01_MODE; count++, size--) {
+	for (count = 0; count < size; count++) {
 		u8 data = *buf++;
 
 		rp5c01_write(priv,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index e6298e0..24c3d69 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -18,13 +18,11 @@
  * modify it under the terms of the GNU General Public License
  * version 2 as published by the Free Software Foundation.
  */
+#include <linux/bcd.h>
+#include <linux/bitops.h>
+#include <linux/i2c.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/bcd.h>
-#include <linux/i2c.h>
-#include <linux/list.h>
 #include <linux/rtc.h>
 
 /* Register definitions */
@@ -48,17 +46,17 @@
 #define RX8025_BIT_CTRL1_CT	(7 << 0)
 /* 1 Hz periodic level irq */
 #define RX8025_BIT_CTRL1_CT_1HZ	4
-#define RX8025_BIT_CTRL1_TEST	(1 << 3)
-#define RX8025_BIT_CTRL1_1224	(1 << 5)
-#define RX8025_BIT_CTRL1_DALE	(1 << 6)
-#define RX8025_BIT_CTRL1_WALE	(1 << 7)
+#define RX8025_BIT_CTRL1_TEST	BIT(3)
+#define RX8025_BIT_CTRL1_1224	BIT(5)
+#define RX8025_BIT_CTRL1_DALE	BIT(6)
+#define RX8025_BIT_CTRL1_WALE	BIT(7)
 
-#define RX8025_BIT_CTRL2_DAFG	(1 << 0)
-#define RX8025_BIT_CTRL2_WAFG	(1 << 1)
-#define RX8025_BIT_CTRL2_CTFG	(1 << 2)
-#define RX8025_BIT_CTRL2_PON	(1 << 4)
-#define RX8025_BIT_CTRL2_XST	(1 << 5)
-#define RX8025_BIT_CTRL2_VDET	(1 << 6)
+#define RX8025_BIT_CTRL2_DAFG	BIT(0)
+#define RX8025_BIT_CTRL2_WAFG	BIT(1)
+#define RX8025_BIT_CTRL2_CTFG	BIT(2)
+#define RX8025_BIT_CTRL2_PON	BIT(4)
+#define RX8025_BIT_CTRL2_XST	BIT(5)
+#define RX8025_BIT_CTRL2_VDET	BIT(6)
 
 /* Clock precision adjustment */
 #define RX8025_ADJ_RESOLUTION	3050 /* in ppb */
@@ -74,84 +72,84 @@
 struct rx8025_data {
 	struct i2c_client *client;
 	struct rtc_device *rtc;
-	struct work_struct work;
 	u8 ctrl1;
-	unsigned exiting:1;
 };
 
-static int rx8025_read_reg(struct i2c_client *client, int number, u8 *value)
+static s32 rx8025_read_reg(const struct i2c_client *client, u8 number)
 {
-	int ret = i2c_smbus_read_byte_data(client, (number << 4) | 0x08);
-
-	if (ret < 0) {
-		dev_err(&client->dev, "Unable to read register #%d\n", number);
-		return ret;
-	}
-
-	*value = ret;
-	return 0;
+	return i2c_smbus_read_byte_data(client, number << 4);
 }
 
-static int rx8025_read_regs(struct i2c_client *client,
-			    int number, u8 length, u8 *values)
+static int rx8025_read_regs(const struct i2c_client *client,
+			    u8 number, u8 length, u8 *values)
 {
-	int ret = i2c_smbus_read_i2c_block_data(client, (number << 4) | 0x08,
-						length, values);
-
-	if (ret != length) {
-		dev_err(&client->dev, "Unable to read registers #%d..#%d\n",
-			number, number + length - 1);
+	int ret = i2c_smbus_read_i2c_block_data(client, number << 4, length,
+						values);
+	if (ret != length)
 		return ret < 0 ? ret : -EIO;
+
+	return 0;
+}
+
+static s32 rx8025_write_reg(const struct i2c_client *client, u8 number,
+			    u8 value)
+{
+	return i2c_smbus_write_byte_data(client, number << 4, value);
+}
+
+static s32 rx8025_write_regs(const struct i2c_client *client,
+			     u8 number, u8 length, const u8 *values)
+{
+	return i2c_smbus_write_i2c_block_data(client, number << 4,
+					      length, values);
+}
+
+static int rx8025_check_validity(struct device *dev)
+{
+	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+	int ctrl2;
+
+	ctrl2 = rx8025_read_reg(rx8025->client, RX8025_REG_CTRL2);
+	if (ctrl2 < 0)
+		return ctrl2;
+
+	if (ctrl2 & RX8025_BIT_CTRL2_VDET)
+		dev_warn(dev, "power voltage drop detected\n");
+
+	if (ctrl2 & RX8025_BIT_CTRL2_PON) {
+		dev_warn(dev, "power-on reset detected, date is invalid\n");
+		return -EINVAL;
+	}
+
+	if (!(ctrl2 & RX8025_BIT_CTRL2_XST)) {
+		dev_warn(dev, "crystal stopped, date is invalid\n");
+		return -EINVAL;
 	}
 
 	return 0;
 }
 
-static int rx8025_write_reg(struct i2c_client *client, int number, u8 value)
+static int rx8025_reset_validity(struct i2c_client *client)
 {
-	int ret = i2c_smbus_write_byte_data(client, number << 4, value);
+	int ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
 
-	if (ret)
-		dev_err(&client->dev, "Unable to write register #%d\n",
-			number);
+	if (ctrl2 < 0)
+		return ctrl2;
 
-	return ret;
+	ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET);
+
+	return rx8025_write_reg(client, RX8025_REG_CTRL2,
+				ctrl2 | RX8025_BIT_CTRL2_XST);
 }
 
-static int rx8025_write_regs(struct i2c_client *client,
-			     int number, u8 length, u8 *values)
-{
-	int ret = i2c_smbus_write_i2c_block_data(client, (number << 4) | 0x08,
-						 length, values);
-
-	if (ret)
-		dev_err(&client->dev, "Unable to write registers #%d..#%d\n",
-			number, number + length - 1);
-
-	return ret;
-}
-
-static irqreturn_t rx8025_irq(int irq, void *dev_id)
+static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
 {
 	struct i2c_client *client = dev_id;
 	struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+	int status;
 
-	disable_irq_nosync(irq);
-	schedule_work(&rx8025->work);
-	return IRQ_HANDLED;
-}
-
-static void rx8025_work(struct work_struct *work)
-{
-	struct rx8025_data *rx8025 = container_of(work, struct rx8025_data,
-						  work);
-	struct i2c_client *client = rx8025->client;
-	struct mutex *lock = &rx8025->rtc->ops_lock;
-	u8 status;
-
-	mutex_lock(lock);
-
-	if (rx8025_read_reg(client, RX8025_REG_CTRL2, &status))
+	status = rx8025_read_reg(client, RX8025_REG_CTRL2);
+	if (status < 0)
 		goto out;
 
 	if (!(status & RX8025_BIT_CTRL2_XST))
@@ -161,9 +159,7 @@
 	if (status & RX8025_BIT_CTRL2_CTFG) {
 		/* periodic */
 		status &= ~RX8025_BIT_CTRL2_CTFG;
-		local_irq_disable();
 		rtc_update_irq(rx8025->rtc, 1, RTC_PF | RTC_IRQF);
-		local_irq_enable();
 	}
 
 	if (status & RX8025_BIT_CTRL2_DAFG) {
@@ -172,20 +168,11 @@
 		if (rx8025_write_reg(client, RX8025_REG_CTRL1,
 				     rx8025->ctrl1 & ~RX8025_BIT_CTRL1_DALE))
 			goto out;
-		local_irq_disable();
 		rtc_update_irq(rx8025->rtc, 1, RTC_AF | RTC_IRQF);
-		local_irq_enable();
 	}
 
-	/* acknowledge IRQ */
-	rx8025_write_reg(client, RX8025_REG_CTRL2,
-			 status | RX8025_BIT_CTRL2_XST);
-
 out:
-	if (!rx8025->exiting)
-		enable_irq(client->irq);
-
-	mutex_unlock(lock);
+	return IRQ_HANDLED;
 }
 
 static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
@@ -194,6 +181,10 @@
 	u8 date[7];
 	int err;
 
+	err = rx8025_check_validity(dev);
+	if (err)
+		return err;
+
 	err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date);
 	if (err)
 		return err;
@@ -213,10 +204,7 @@
 
 	dt->tm_mday = bcd2bin(date[RX8025_REG_MDAY] & 0x3f);
 	dt->tm_mon = bcd2bin(date[RX8025_REG_MONTH] & 0x1f) - 1;
-	dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]);
-
-	if (dt->tm_year < 70)
-		dt->tm_year += 100;
+	dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]) + 100;
 
 	dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
 		dt->tm_sec, dt->tm_min, dt->tm_hour,
@@ -229,12 +217,10 @@
 {
 	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
 	u8 date[7];
+	int ret;
 
-	/*
-	 * BUG: The HW assumes every year that is a multiple of 4 to be a leap
-	 * year.  Next time this is wrong is 2100, which will not be a leap
-	 * year.
-	 */
+	if ((dt->tm_year < 100) || (dt->tm_year > 199))
+		return -EINVAL;
 
 	/*
 	 * Here the read-only bits are written as "0".  I'm not sure if that
@@ -251,17 +237,21 @@
 	date[RX8025_REG_WDAY] = bin2bcd(dt->tm_wday);
 	date[RX8025_REG_MDAY] = bin2bcd(dt->tm_mday);
 	date[RX8025_REG_MONTH] = bin2bcd(dt->tm_mon + 1);
-	date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year % 100);
+	date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year - 100);
 
 	dev_dbg(dev,
 		"%s: write 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
 		__func__,
 		date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
 
-	return rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+	ret = rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+	if (ret < 0)
+		return ret;
+
+	return rx8025_reset_validity(rx8025->client);
 }
 
-static int rx8025_init_client(struct i2c_client *client, int *need_reset)
+static int rx8025_init_client(struct i2c_client *client)
 {
 	struct rx8025_data *rx8025 = i2c_get_clientdata(client);
 	u8 ctrl[2], ctrl2;
@@ -275,38 +265,18 @@
 	/* Keep test bit zero ! */
 	rx8025->ctrl1 = ctrl[0] & ~RX8025_BIT_CTRL1_TEST;
 
-	if (ctrl[1] & RX8025_BIT_CTRL2_PON) {
-		dev_warn(&client->dev, "power-on reset was detected, "
-			 "you may have to readjust the clock\n");
-		*need_reset = 1;
-	}
-
-	if (ctrl[1] & RX8025_BIT_CTRL2_VDET) {
-		dev_warn(&client->dev, "a power voltage drop was detected, "
-			 "you may have to readjust the clock\n");
-		*need_reset = 1;
-	}
-
-	if (!(ctrl[1] & RX8025_BIT_CTRL2_XST)) {
-		dev_warn(&client->dev, "Oscillation stop was detected,"
-			 "you may have to readjust the clock\n");
-		*need_reset = 1;
-	}
-
 	if (ctrl[1] & (RX8025_BIT_CTRL2_DAFG | RX8025_BIT_CTRL2_WAFG)) {
 		dev_warn(&client->dev, "Alarm was detected\n");
 		need_clear = 1;
 	}
 
-	if (!(ctrl[1] & RX8025_BIT_CTRL2_CTFG))
+	if (ctrl[1] & RX8025_BIT_CTRL2_CTFG)
 		need_clear = 1;
 
-	if (*need_reset || need_clear) {
-		ctrl2 = ctrl[0];
-		ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET |
-			   RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG |
+	if (need_clear) {
+		ctrl2 = ctrl[1];
+		ctrl2 &= ~(RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG |
 			   RX8025_BIT_CTRL2_DAFG);
-		ctrl2 |= RX8025_BIT_CTRL2_XST;
 
 		err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
 	}
@@ -319,8 +289,8 @@
 {
 	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
 	struct i2c_client *client = rx8025->client;
-	u8 ctrl2, ald[2];
-	int err;
+	u8 ald[2];
+	int ctrl2, err;
 
 	if (client->irq <= 0)
 		return -EINVAL;
@@ -329,9 +299,9 @@
 	if (err)
 		return err;
 
-	err = rx8025_read_reg(client, RX8025_REG_CTRL2, &ctrl2);
-	if (err)
-		return err;
+	ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
+	if (ctrl2 < 0)
+		return ctrl2;
 
 	dev_dbg(dev, "%s: read alarm 0x%02x 0x%02x ctrl2 %02x\n",
 		__func__, ald[0], ald[1], ctrl2);
@@ -452,12 +422,11 @@
 static int rx8025_get_clock_adjust(struct device *dev, int *adj)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	u8 digoff;
-	int err;
+	int digoff;
 
-	err = rx8025_read_reg(client, RX8025_REG_DIGOFF, &digoff);
-	if (err)
-		return err;
+	digoff = rx8025_read_reg(client, RX8025_REG_DIGOFF);
+	if (digoff < 0)
+		return digoff;
 
 	*adj = digoff >= 64 ? digoff - 128 : digoff;
 	if (*adj > 0)
@@ -539,88 +508,53 @@
 {
 	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 	struct rx8025_data *rx8025;
-	int err, need_reset = 0;
+	int err = 0;
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
 				     | I2C_FUNC_SMBUS_I2C_BLOCK)) {
 		dev_err(&adapter->dev,
 			"doesn't support required functionality\n");
-		err = -EIO;
-		goto errout;
+		return -EIO;
 	}
 
 	rx8025 = devm_kzalloc(&client->dev, sizeof(*rx8025), GFP_KERNEL);
 	if (!rx8025) {
-		err = -ENOMEM;
-		goto errout;
+		return -ENOMEM;
 	}
 
 	rx8025->client = client;
 	i2c_set_clientdata(client, rx8025);
-	INIT_WORK(&rx8025->work, rx8025_work);
 
-	err = rx8025_init_client(client, &need_reset);
+	err = rx8025_init_client(client);
 	if (err)
-		goto errout;
-
-	if (need_reset) {
-		struct rtc_time tm;
-		dev_info(&client->dev,
-			 "bad conditions detected, resetting date\n");
-		rtc_time_to_tm(0, &tm);	/* 1970/1/1 */
-		rx8025_set_time(&client->dev, &tm);
-	}
+		return err;
 
 	rx8025->rtc = devm_rtc_device_register(&client->dev, client->name,
 					  &rx8025_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rx8025->rtc)) {
-		err = PTR_ERR(rx8025->rtc);
 		dev_err(&client->dev, "unable to register the class device\n");
-		goto errout;
+		return PTR_ERR(rx8025->rtc);
 	}
 
 	if (client->irq > 0) {
 		dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
-		err = request_irq(client->irq, rx8025_irq,
-				  0, "rx8025", client);
+		err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+						rx8025_handle_irq, 0, "rx8025",
+						client);
 		if (err) {
-			dev_err(&client->dev, "unable to request IRQ\n");
-			goto errout;
+			dev_err(&client->dev, "unable to request IRQ, alarms disabled\n");
+			client->irq = 0;
 		}
 	}
 
-	rx8025->rtc->irq_freq = 1;
 	rx8025->rtc->max_user_freq = 1;
 
 	err = rx8025_sysfs_register(&client->dev);
-	if (err)
-		goto errout_irq;
-
-	return 0;
-
-errout_irq:
-	if (client->irq > 0)
-		free_irq(client->irq, client);
-
-errout:
-	dev_err(&adapter->dev, "probing for rx8025 failed\n");
 	return err;
 }
 
 static int rx8025_remove(struct i2c_client *client)
 {
-	struct rx8025_data *rx8025 = i2c_get_clientdata(client);
-	struct mutex *lock = &rx8025->rtc->ops_lock;
-
-	if (client->irq > 0) {
-		mutex_lock(lock);
-		rx8025->exiting = 1;
-		mutex_unlock(lock);
-
-		free_irq(client->irq, client);
-		cancel_work_sync(&rx8025->work);
-	}
-
 	rx8025_sysfs_unregister(&client->dev);
 	return 0;
 }
@@ -628,7 +562,6 @@
 static struct i2c_driver rx8025_driver = {
 	.driver = {
 		.name = "rtc-rx8025",
-		.owner = THIS_MODULE,
 	},
 	.probe		= rx8025_probe,
 	.remove		= rx8025_remove,
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index de8d9c4..161e25d0 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -315,7 +315,6 @@
 static struct i2c_driver rx8581_driver = {
 	.driver		= {
 		.name	= "rtc-rx8581",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= rx8581_probe,
 	.id_table	= rx8581_id,
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0f8323..7cc8f73 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -39,6 +39,7 @@
 	void __iomem *base;
 	struct clk *rtc_clk;
 	struct clk *rtc_src_clk;
+	bool clk_disabled;
 
 	struct s3c_rtc_data *data;
 
@@ -71,9 +72,12 @@
 	unsigned long irq_flags;
 
 	spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
-	clk_enable(info->rtc_clk);
-	if (info->data->needs_src_clk)
-		clk_enable(info->rtc_src_clk);
+	if (info->clk_disabled) {
+		clk_enable(info->rtc_clk);
+		if (info->data->needs_src_clk)
+			clk_enable(info->rtc_src_clk);
+		info->clk_disabled = false;
+	}
 	spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
 }
 
@@ -82,9 +86,12 @@
 	unsigned long irq_flags;
 
 	spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
-	if (info->data->needs_src_clk)
-		clk_disable(info->rtc_src_clk);
-	clk_disable(info->rtc_clk);
+	if (!info->clk_disabled) {
+		if (info->data->needs_src_clk)
+			clk_disable(info->rtc_src_clk);
+		clk_disable(info->rtc_clk);
+		info->clk_disabled = true;
+	}
 	spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
 }
 
@@ -128,6 +135,11 @@
 
 	s3c_rtc_disable_clk(info);
 
+	if (enabled)
+		s3c_rtc_enable_clk(info);
+	else
+		s3c_rtc_disable_clk(info);
+
 	return 0;
 }
 
@@ -410,8 +422,9 @@
 
 	s3c_rtc_setaie(info->dev, 0);
 
+	if (info->data->needs_src_clk)
+		clk_unprepare(info->rtc_src_clk);
 	clk_unprepare(info->rtc_clk);
-	info->rtc_clk = NULL;
 
 	return 0;
 }
@@ -482,6 +495,7 @@
 		if (IS_ERR(info->rtc_src_clk)) {
 			dev_err(&pdev->dev,
 				"failed to find rtc source clock\n");
+			clk_disable_unprepare(info->rtc_clk);
 			return PTR_ERR(info->rtc_src_clk);
 		}
 		clk_prepare_enable(info->rtc_src_clk);
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 8c70d78..f2504b4 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -635,6 +635,16 @@
 	case S2MPS13X:
 		data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
 		ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
+		if (ret < 0)
+			break;
+
+		/*
+		 * Should set WUDR & (RUDR or AUDR) bits to high after writing
+		 * RTC_CTRL register like writing Alarm registers. We can't find
+		 * the description from datasheet but vendor code does that
+		 * really.
+		 */
+		ret = s5m8767_rtc_set_alarm_reg(info);
 		break;
 
 	default:
@@ -797,6 +807,7 @@
 	{ "s2mps14-rtc",	S2MPS14X },
 	{ },
 };
+MODULE_DEVICE_TABLE(platform, s5m_rtc_id);
 
 static struct platform_driver s5m_rtc_driver = {
 	.driver		= {
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index b6e1ca0..c2187bf 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -35,24 +35,17 @@
 #include <linux/bitops.h>
 #include <linux/io.h>
 
-#include <mach/hardware.h>
-#include <mach/irqs.h>
+#define RTSR_HZE		BIT(3)	/* HZ interrupt enable */
+#define RTSR_ALE		BIT(2)	/* RTC alarm interrupt enable */
+#define RTSR_HZ			BIT(1)	/* HZ rising-edge detected */
+#define RTSR_AL			BIT(0)	/* RTC alarm detected */
 
-#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
-#include <mach/regs-rtc.h>
-#endif
+#include "rtc-sa1100.h"
 
 #define RTC_DEF_DIVIDER		(32768 - 1)
 #define RTC_DEF_TRIM		0
 #define RTC_FREQ		1024
 
-struct sa1100_rtc {
-	spinlock_t		lock;
-	int			irq_1hz;
-	int			irq_alarm;
-	struct rtc_device	*rtc;
-	struct clk		*clk;
-};
 
 static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
 {
@@ -63,16 +56,16 @@
 
 	spin_lock(&info->lock);
 
-	rtsr = RTSR;
+	rtsr = readl_relaxed(info->rtsr);
 	/* clear interrupt sources */
-	RTSR = 0;
+	writel_relaxed(0, info->rtsr);
 	/* Fix for a nasty initialization problem the in SA11xx RTSR register.
 	 * See also the comments in sa1100_rtc_probe(). */
 	if (rtsr & (RTSR_ALE | RTSR_HZE)) {
 		/* This is the original code, before there was the if test
 		 * above. This code does not clear interrupts that were not
 		 * enabled. */
-		RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
+		writel_relaxed((RTSR_AL | RTSR_HZ) & (rtsr >> 2), info->rtsr);
 	} else {
 		/* For some reason, it is possible to enter this routine
 		 * without interruptions enabled, it has been tested with
@@ -81,13 +74,13 @@
 		 * This situation leads to an infinite "loop" of interrupt
 		 * routine calling and as a result the processor seems to
 		 * lock on its first call to open(). */
-		RTSR = RTSR_AL | RTSR_HZ;
+		writel_relaxed(RTSR_AL | RTSR_HZ, info->rtsr);
 	}
 
 	/* clear alarm interrupt if it has occurred */
 	if (rtsr & RTSR_AL)
 		rtsr &= ~RTSR_ALE;
-	RTSR = rtsr & (RTSR_ALE | RTSR_HZE);
+	writel_relaxed(rtsr & (RTSR_ALE | RTSR_HZE), info->rtsr);
 
 	/* update irq data & counter */
 	if (rtsr & RTSR_AL)
@@ -135,7 +128,7 @@
 	struct sa1100_rtc *info = dev_get_drvdata(dev);
 
 	spin_lock_irq(&info->lock);
-	RTSR = 0;
+	writel_relaxed(0, info->rtsr);
 	spin_unlock_irq(&info->lock);
 
 	free_irq(info->irq_alarm, dev);
@@ -144,39 +137,46 @@
 
 static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
+	u32 rtsr;
 	struct sa1100_rtc *info = dev_get_drvdata(dev);
 
 	spin_lock_irq(&info->lock);
+	rtsr = readl_relaxed(info->rtsr);
 	if (enabled)
-		RTSR |= RTSR_ALE;
+		rtsr |= RTSR_ALE;
 	else
-		RTSR &= ~RTSR_ALE;
+		rtsr &= ~RTSR_ALE;
+	writel_relaxed(rtsr, info->rtsr);
 	spin_unlock_irq(&info->lock);
 	return 0;
 }
 
 static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-	rtc_time_to_tm(RCNR, tm);
+	struct sa1100_rtc *info = dev_get_drvdata(dev);
+
+	rtc_time_to_tm(readl_relaxed(info->rcnr), tm);
 	return 0;
 }
 
 static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
+	struct sa1100_rtc *info = dev_get_drvdata(dev);
 	unsigned long time;
 	int ret;
 
 	ret = rtc_tm_to_time(tm, &time);
 	if (ret == 0)
-		RCNR = time;
+		writel_relaxed(time, info->rcnr);
 	return ret;
 }
 
 static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
 	u32	rtsr;
+	struct sa1100_rtc *info = dev_get_drvdata(dev);
 
-	rtsr = RTSR;
+	rtsr = readl_relaxed(info->rtsr);
 	alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0;
 	alrm->pending = (rtsr & RTSR_AL) ? 1 : 0;
 	return 0;
@@ -192,12 +192,13 @@
 	ret = rtc_tm_to_time(&alrm->time, &time);
 	if (ret != 0)
 		goto out;
-	RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
-	RTAR = time;
+	writel_relaxed(readl_relaxed(info->rtsr) &
+		(RTSR_HZE | RTSR_ALE | RTSR_AL), info->rtsr);
+	writel_relaxed(time, info->rtar);
 	if (alrm->enabled)
-		RTSR |= RTSR_ALE;
+		writel_relaxed(readl_relaxed(info->rtsr) | RTSR_ALE, info->rtsr);
 	else
-		RTSR &= ~RTSR_ALE;
+		writel_relaxed(readl_relaxed(info->rtsr) & ~RTSR_ALE, info->rtsr);
 out:
 	spin_unlock_irq(&info->lock);
 
@@ -206,8 +207,10 @@
 
 static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
 {
-	seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
-	seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
+	struct sa1100_rtc *info = dev_get_drvdata(dev);
+
+	seq_printf(seq, "trim/divider\t\t: 0x%08x\n", readl_relaxed(info->rttr));
+	seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", readl_relaxed(info->rtsr));
 
 	return 0;
 }
@@ -223,29 +226,18 @@
 	.alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
 };
 
-static int sa1100_rtc_probe(struct platform_device *pdev)
+int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
 {
 	struct rtc_device *rtc;
-	struct sa1100_rtc *info;
-	int irq_1hz, irq_alarm, ret = 0;
+	int ret;
 
-	irq_1hz = platform_get_irq_byname(pdev, "rtc 1Hz");
-	irq_alarm = platform_get_irq_byname(pdev, "rtc alarm");
-	if (irq_1hz < 0 || irq_alarm < 0)
-		return -ENODEV;
+	spin_lock_init(&info->lock);
 
-	info = devm_kzalloc(&pdev->dev, sizeof(struct sa1100_rtc), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
 	info->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(info->clk)) {
 		dev_err(&pdev->dev, "failed to find rtc clock source\n");
 		return PTR_ERR(info->clk);
 	}
-	info->irq_1hz = irq_1hz;
-	info->irq_alarm = irq_alarm;
-	spin_lock_init(&info->lock);
-	platform_set_drvdata(pdev, info);
 
 	ret = clk_prepare_enable(info->clk);
 	if (ret)
@@ -257,22 +249,19 @@
 	 * If the clock divider is uninitialized then reset it to the
 	 * default value to get the 1Hz clock.
 	 */
-	if (RTTR == 0) {
-		RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
+	if (readl_relaxed(info->rttr) == 0) {
+		writel_relaxed(RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16), info->rttr);
 		dev_warn(&pdev->dev, "warning: "
 			"initializing default clock divider/trim value\n");
 		/* The current RTC value probably doesn't make sense either */
-		RCNR = 0;
+		writel_relaxed(0, info->rcnr);
 	}
 
-	device_init_wakeup(&pdev->dev, 1);
-
 	rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
 					THIS_MODULE);
-
 	if (IS_ERR(rtc)) {
-		ret = PTR_ERR(rtc);
-		goto err_dev;
+		clk_disable_unprepare(info->clk);
+		return PTR_ERR(rtc);
 	}
 	info->rtc = rtc;
 
@@ -298,12 +287,52 @@
 	 *
 	 * Notice that clearing bit 1 and 0 is accomplished by writting ONES to
 	 * the corresponding bits in RTSR. */
-	RTSR = RTSR_AL | RTSR_HZ;
+	writel_relaxed(RTSR_AL | RTSR_HZ, info->rtsr);
 
 	return 0;
-err_dev:
-	clk_disable_unprepare(info->clk);
-	return ret;
+}
+EXPORT_SYMBOL_GPL(sa1100_rtc_init);
+
+static int sa1100_rtc_probe(struct platform_device *pdev)
+{
+	struct sa1100_rtc *info;
+	struct resource *iores;
+	void __iomem *base;
+	int irq_1hz, irq_alarm;
+
+	irq_1hz = platform_get_irq_byname(pdev, "rtc 1Hz");
+	irq_alarm = platform_get_irq_byname(pdev, "rtc alarm");
+	if (irq_1hz < 0 || irq_alarm < 0)
+		return -ENODEV;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(struct sa1100_rtc), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+	info->irq_1hz = irq_1hz;
+	info->irq_alarm = irq_alarm;
+
+	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, iores);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	if (IS_ENABLED(CONFIG_ARCH_SA1100) ||
+	    of_device_is_compatible(pdev->dev.of_node, "mrvl,sa1100-rtc")) {
+		info->rcnr = base + 0x04;
+		info->rtsr = base + 0x10;
+		info->rtar = base + 0x00;
+		info->rttr = base + 0x08;
+	} else {
+		info->rcnr = base + 0x0;
+		info->rtsr = base + 0x8;
+		info->rtar = base + 0x4;
+		info->rttr = base + 0xc;
+	}
+
+	platform_set_drvdata(pdev, info);
+	device_init_wakeup(&pdev->dev, 1);
+
+	return sa1100_rtc_init(pdev, info);
 }
 
 static int sa1100_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sa1100.h b/drivers/rtc/rtc-sa1100.h
new file mode 100644
index 0000000..2c79c0c
--- /dev/null
+++ b/drivers/rtc/rtc-sa1100.h
@@ -0,0 +1,23 @@
+#ifndef __RTC_SA1100_H__
+#define __RTC_SA1100_H__
+
+#include <linux/kernel.h>
+
+struct clk;
+struct platform_device;
+
+struct sa1100_rtc {
+	spinlock_t		lock;
+	void __iomem		*rcnr;
+	void __iomem		*rtar;
+	void __iomem		*rtsr;
+	void __iomem		*rttr;
+	int			irq_1hz;
+	int			irq_alarm;
+	struct rtc_device	*rtc;
+	struct clk		*clk;
+};
+
+int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info);
+
+#endif
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index edc3b432..7367f61 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/regmap.h>
 #include <linux/rtc/sirfsoc_rtciobrg.h>
 
 
@@ -48,12 +49,27 @@
 	/* Overflow for every 8 years extra time */
 	u32			overflow_rtc;
 	spinlock_t		lock;
+	struct regmap *regmap;
 #ifdef CONFIG_PM
 	u32		saved_counter;
 	u32		saved_overflow_rtc;
 #endif
 };
 
+static u32 sirfsoc_rtc_readl(struct sirfsoc_rtc_drv *rtcdrv, u32 offset)
+{
+	u32 val;
+
+	regmap_read(rtcdrv->regmap, rtcdrv->rtc_base + offset, &val);
+	return val;
+}
+
+static void sirfsoc_rtc_writel(struct sirfsoc_rtc_drv *rtcdrv,
+			       u32 offset, u32 val)
+{
+	regmap_write(rtcdrv->regmap, rtcdrv->rtc_base + offset, val);
+}
+
 static int sirfsoc_rtc_read_alarm(struct device *dev,
 		struct rtc_wkalrm *alrm)
 {
@@ -64,9 +80,9 @@
 
 	spin_lock_irq(&rtcdrv->lock);
 
-	rtc_count = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+	rtc_count = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
 
-	rtc_alarm = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_ALARM0);
+	rtc_alarm = sirfsoc_rtc_readl(rtcdrv, RTC_ALARM0);
 	memset(alrm, 0, sizeof(struct rtc_wkalrm));
 
 	/*
@@ -82,8 +98,7 @@
 		rtc_time_to_tm(rtcdrv->overflow_rtc
 				<< (BITS_PER_LONG - RTC_SHIFT)
 				| rtc_alarm >> RTC_SHIFT, &(alrm->time));
-	if (sirfsoc_rtc_iobrg_readl(
-			rtcdrv->rtc_base + RTC_STATUS) & SIRFSOC_RTC_AL0E)
+	if (sirfsoc_rtc_readl(rtcdrv, RTC_STATUS) & SIRFSOC_RTC_AL0E)
 		alrm->enabled = 1;
 
 	spin_unlock_irq(&rtcdrv->lock);
@@ -103,8 +118,7 @@
 
 		spin_lock_irq(&rtcdrv->lock);
 
-		rtc_status_reg = sirfsoc_rtc_iobrg_readl(
-				rtcdrv->rtc_base + RTC_STATUS);
+		rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
 		if (rtc_status_reg & SIRFSOC_RTC_AL0E) {
 			/*
 			 * An ongoing alarm in progress - ingore it and not
@@ -113,8 +127,7 @@
 			dev_info(dev, "An old alarm was set, will be replaced by a new one\n");
 		}
 
-		sirfsoc_rtc_iobrg_writel(
-			rtc_alarm << RTC_SHIFT, rtcdrv->rtc_base + RTC_ALARM0);
+		sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, rtc_alarm << RTC_SHIFT);
 		rtc_status_reg &= ~0x07; /* mask out the lower status bits */
 		/*
 		 * This bit RTC_AL sets it as a wake-up source for Sleep Mode
@@ -123,8 +136,7 @@
 		rtc_status_reg |= SIRFSOC_RTC_AL0;
 		/* enable the RTC alarm interrupt */
 		rtc_status_reg |= SIRFSOC_RTC_AL0E;
-		sirfsoc_rtc_iobrg_writel(
-			rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
+		sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
 
 		spin_unlock_irq(&rtcdrv->lock);
 	} else {
@@ -135,8 +147,7 @@
 		 */
 		spin_lock_irq(&rtcdrv->lock);
 
-		rtc_status_reg = sirfsoc_rtc_iobrg_readl(
-				rtcdrv->rtc_base + RTC_STATUS);
+		rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
 		if (rtc_status_reg & SIRFSOC_RTC_AL0E) {
 			/* clear the RTC status register's alarm bit */
 			rtc_status_reg &= ~0x07;
@@ -145,8 +156,8 @@
 			/* Clear the Alarm enable bit */
 			rtc_status_reg &= ~(SIRFSOC_RTC_AL0E);
 
-			sirfsoc_rtc_iobrg_writel(rtc_status_reg,
-					rtcdrv->rtc_base + RTC_STATUS);
+			sirfsoc_rtc_writel(rtcdrv, RTC_STATUS,
+					   rtc_status_reg);
 		}
 
 		spin_unlock_irq(&rtcdrv->lock);
@@ -167,9 +178,9 @@
 	 * fail, read several times to make sure get stable value.
 	 */
 	do {
-		tmp_rtc = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+		tmp_rtc = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
 		cpu_relax();
-	} while (tmp_rtc != sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN));
+	} while (tmp_rtc != sirfsoc_rtc_readl(rtcdrv, RTC_CN));
 
 	rtc_time_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT) |
 					tmp_rtc >> RTC_SHIFT, tm);
@@ -187,10 +198,8 @@
 
 	rtcdrv->overflow_rtc = rtc_time >> (BITS_PER_LONG - RTC_SHIFT);
 
-	sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc,
-			rtcdrv->rtc_base + RTC_SW_VALUE);
-	sirfsoc_rtc_iobrg_writel(
-			rtc_time << RTC_SHIFT, rtcdrv->rtc_base + RTC_CN);
+	sirfsoc_rtc_writel(rtcdrv, RTC_SW_VALUE, rtcdrv->overflow_rtc);
+	sirfsoc_rtc_writel(rtcdrv, RTC_CN, rtc_time << RTC_SHIFT);
 
 	return 0;
 }
@@ -222,14 +231,13 @@
 
 	spin_lock_irq(&rtcdrv->lock);
 
-	rtc_status_reg = sirfsoc_rtc_iobrg_readl(
-				rtcdrv->rtc_base + RTC_STATUS);
+	rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
 	if (enabled)
 		rtc_status_reg |= SIRFSOC_RTC_AL0E;
 	else
 		rtc_status_reg &= ~SIRFSOC_RTC_AL0E;
 
-	sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
+	sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
 
 	spin_unlock_irq(&rtcdrv->lock);
 
@@ -254,7 +262,7 @@
 
 	spin_lock(&rtcdrv->lock);
 
-	rtc_status_reg = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_STATUS);
+	rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
 	/* this bit will be set ONLY if an alarm was active
 	 * and it expired NOW
 	 * So this is being used as an ASSERT
@@ -270,7 +278,8 @@
 		/* Clear the Alarm enable bit */
 		rtc_status_reg &= ~(SIRFSOC_RTC_AL0E);
 	}
-	sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
+
+	sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
 
 	spin_unlock(&rtcdrv->lock);
 
@@ -287,6 +296,13 @@
 	{ .compatible = "sirf,prima2-sysrtc"},
 	{},
 };
+
+const struct regmap_config sysrtc_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.fast_io = true,
+};
+
 MODULE_DEVICE_TABLE(of, sirfsoc_rtc_of_match);
 
 static int sirfsoc_rtc_probe(struct platform_device *pdev)
@@ -314,27 +330,35 @@
 	/* Register rtc alarm as a wakeup source */
 	device_init_wakeup(&pdev->dev, 1);
 
+	rtcdrv->regmap = devm_regmap_init_iobg(&pdev->dev,
+			&sysrtc_regmap_config);
+	if (IS_ERR(rtcdrv->regmap)) {
+		err = PTR_ERR(rtcdrv->regmap);
+		dev_err(&pdev->dev, "Failed to allocate register map: %d\n",
+			err);
+		return err;
+	}
+
 	/*
 	 * Set SYS_RTC counter in RTC_HZ HZ Units
 	 * We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1
 	 * If 16HZ, therefore RTC_DIV = 1023;
 	 */
 	rtc_div = ((32768 / RTC_HZ) / 2) - 1;
-	sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
+	sirfsoc_rtc_writel(rtcdrv, RTC_DIV, rtc_div);
 
 	/* 0x3 -> RTC_CLK */
-	sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
-			rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
+	sirfsoc_rtc_writel(rtcdrv, RTC_CLOCK_SWITCH, SIRFSOC_RTC_CLK);
 
 	/* reset SYS RTC ALARM0 */
-	sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM0);
+	sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, 0x0);
 
 	/* reset SYS RTC ALARM1 */
-	sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM1);
+	sirfsoc_rtc_writel(rtcdrv, RTC_ALARM1, 0x0);
 
 	/* Restore RTC Overflow From Register After Command Reboot */
 	rtcdrv->overflow_rtc =
-		sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
+		sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
 
 	rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
 			&sirfsoc_rtc_ops, THIS_MODULE);
@@ -372,10 +396,10 @@
 {
 	struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev);
 	rtcdrv->overflow_rtc =
-		sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
+		sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
 
 	rtcdrv->saved_counter =
-		sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+		sirfsoc_rtc_readl(rtcdrv, RTC_CN);
 	rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
 	if (device_may_wakeup(dev) && !enable_irq_wake(rtcdrv->irq))
 		rtcdrv->irq_wake = 1;
@@ -392,12 +416,10 @@
 	 * if resume from snapshot and the rtc power is lost,
 	 * restroe the rtc settings
 	 */
-	if (SIRFSOC_RTC_CLK != sirfsoc_rtc_iobrg_readl(
-			rtcdrv->rtc_base + RTC_CLOCK_SWITCH)) {
+	if (SIRFSOC_RTC_CLK != sirfsoc_rtc_readl(rtcdrv, RTC_CLOCK_SWITCH)) {
 		u32 rtc_div;
 		/* 0x3 -> RTC_CLK */
-		sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
-			rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
+		sirfsoc_rtc_writel(rtcdrv, RTC_CLOCK_SWITCH, SIRFSOC_RTC_CLK);
 		/*
 		 * Set SYS_RTC counter in RTC_HZ HZ Units
 		 * We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1
@@ -405,13 +427,13 @@
 		 */
 		rtc_div = ((32768 / RTC_HZ) / 2) - 1;
 
-		sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
+		sirfsoc_rtc_writel(rtcdrv, RTC_DIV, rtc_div);
 
 		/* reset SYS RTC ALARM0 */
-		sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM0);
+		sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, 0x0);
 
 		/* reset SYS RTC ALARM1 */
-		sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM1);
+		sirfsoc_rtc_writel(rtcdrv, RTC_ALARM1, 0x0);
 	}
 	rtcdrv->overflow_rtc = rtcdrv->saved_overflow_rtc;
 
@@ -419,15 +441,14 @@
 	 * if current counter is small than previous,
 	 * it means overflow in sleep
 	 */
-	tmp = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+	tmp = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
 	if (tmp <= rtcdrv->saved_counter)
 		rtcdrv->overflow_rtc++;
 	/*
 	 *PWRC Value Be Changed When Suspend, Restore Overflow
 	 * In Memory To Register
 	 */
-	sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc,
-			rtcdrv->rtc_base + RTC_SW_VALUE);
+	sirfsoc_rtc_writel(rtcdrv, RTC_SW_VALUE, rtcdrv->overflow_rtc);
 
 	if (device_may_wakeup(dev) && rtcdrv->irq_wake) {
 		disable_irq_wake(rtcdrv->irq);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 3f9d0ac..74c0a33 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -208,7 +208,7 @@
 		return -EINVAL;
 	}
 
-	/* LPC can either run in RTC or WDT mode */
+	/* LPC can either run as a Clocksource or in RTC or WDT mode */
 	if (mode != ST_LPC_MODE_RTC)
 		return -ENODEV;
 
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 0e93b71..ba6a83b 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -254,7 +254,7 @@
 	void __iomem *ioaddr = pdata->ioaddr;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; count < size; count++)
 		*buf++ = readb(ioaddr + pos++);
 	return count;
 }
@@ -269,7 +269,7 @@
 	void __iomem *ioaddr = pdata->ioaddr;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; count < size; count++)
 		writeb(*buf++, ioaddr + pos++);
 	return count;
 }
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index babd43b..7273855 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -122,20 +122,8 @@
 }
 static DEVICE_ATTR_RO(hctosys);
 
-static struct attribute *rtc_attrs[] = {
-	&dev_attr_name.attr,
-	&dev_attr_date.attr,
-	&dev_attr_time.attr,
-	&dev_attr_since_epoch.attr,
-	&dev_attr_max_user_freq.attr,
-	&dev_attr_hctosys.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(rtc);
-
 static ssize_t
-rtc_sysfs_show_wakealarm(struct device *dev, struct device_attribute *attr,
-		char *buf)
+wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	ssize_t retval;
 	unsigned long alarm;
@@ -159,7 +147,7 @@
 }
 
 static ssize_t
-rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr,
+wakealarm_store(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t n)
 {
 	ssize_t retval;
@@ -221,45 +209,57 @@
 	retval = rtc_set_alarm(rtc, &alm);
 	return (retval < 0) ? retval : n;
 }
-static DEVICE_ATTR(wakealarm, S_IRUGO | S_IWUSR,
-		rtc_sysfs_show_wakealarm, rtc_sysfs_set_wakealarm);
+static DEVICE_ATTR_RW(wakealarm);
 
+static struct attribute *rtc_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_date.attr,
+	&dev_attr_time.attr,
+	&dev_attr_since_epoch.attr,
+	&dev_attr_max_user_freq.attr,
+	&dev_attr_hctosys.attr,
+	&dev_attr_wakealarm.attr,
+	NULL,
+};
 
 /* The reason to trigger an alarm with no process watching it (via sysfs)
  * is its side effect:  waking from a system state like suspend-to-RAM or
  * suspend-to-disk.  So: no attribute unless that side effect is possible.
  * (Userspace may disable that mechanism later.)
  */
-static inline int rtc_does_wakealarm(struct rtc_device *rtc)
+static bool rtc_does_wakealarm(struct rtc_device *rtc)
 {
 	if (!device_can_wakeup(rtc->dev.parent))
-		return 0;
+		return false;
+
 	return rtc->ops->set_alarm != NULL;
 }
 
-
-void rtc_sysfs_add_device(struct rtc_device *rtc)
+static umode_t rtc_attr_is_visible(struct kobject *kobj,
+				   struct attribute *attr, int n)
 {
-	int err;
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct rtc_device *rtc = to_rtc_device(dev);
+	umode_t mode = attr->mode;
 
-	/* not all RTCs support both alarms and wakeup */
-	if (!rtc_does_wakealarm(rtc))
-		return;
+	if (attr == &dev_attr_wakealarm.attr)
+		if (!rtc_does_wakealarm(rtc))
+			mode = 0;
 
-	err = device_create_file(&rtc->dev, &dev_attr_wakealarm);
-	if (err)
-		dev_err(rtc->dev.parent,
-			"failed to create alarm attribute, %d\n", err);
+	return mode;
 }
 
-void rtc_sysfs_del_device(struct rtc_device *rtc)
-{
-	/* REVISIT did we add it successfully? */
-	if (rtc_does_wakealarm(rtc))
-		device_remove_file(&rtc->dev, &dev_attr_wakealarm);
-}
+static struct attribute_group rtc_attr_group = {
+	.is_visible	= rtc_attr_is_visible,
+	.attrs		= rtc_attrs,
+};
 
-void __init rtc_sysfs_init(struct class *rtc_class)
+static const struct attribute_group *rtc_attr_groups[] = {
+	&rtc_attr_group,
+	NULL
+};
+
+const struct attribute_group **rtc_get_dev_attribute_groups(void)
 {
-	rtc_class->dev_groups = rtc_groups;
+	return rtc_attr_groups;
 }
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index cb7f94e..560d9a5 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -199,8 +199,7 @@
 	ssize_t count;
 
 	spin_lock_irq(&pdata->lock);
-	for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
-	     count++, size--) {
+	for (count = 0; count < size; count++) {
 		__raw_writel(pos++, &rtcreg->adr);
 		*buf++ = __raw_readl(&rtcreg->dat);
 	}
@@ -218,8 +217,7 @@
 	ssize_t count;
 
 	spin_lock_irq(&pdata->lock);
-	for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
-	     count++, size--) {
+	for (count = 0; count < size; count++) {
 		__raw_writel(pos++, &rtcreg->adr);
 		__raw_writel(*buf++, &rtcreg->dat);
 	}
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index a58b6d1..27e8969 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -271,6 +271,7 @@
 	{ .compatible = "via,vt8500-rtc", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, wmt_dt_ids);
 
 static struct platform_driver vt8500_rtc_driver = {
 	.probe		= vt8500_rtc_probe,
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
new file mode 100644
index 0000000..8b28762
--- /dev/null
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -0,0 +1,279 @@
+/*
+ * Xilinx Zynq Ultrascale+ MPSoC Real Time Clock Driver
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* RTC Registers */
+#define RTC_SET_TM_WR		0x00
+#define RTC_SET_TM_RD		0x04
+#define RTC_CALIB_WR		0x08
+#define RTC_CALIB_RD		0x0C
+#define RTC_CUR_TM		0x10
+#define RTC_CUR_TICK		0x14
+#define RTC_ALRM		0x18
+#define RTC_INT_STS		0x20
+#define RTC_INT_MASK		0x24
+#define RTC_INT_EN		0x28
+#define RTC_INT_DIS		0x2C
+#define RTC_CTRL		0x40
+
+#define RTC_FR_EN		BIT(20)
+#define RTC_FR_DATSHIFT		16
+#define RTC_TICK_MASK		0xFFFF
+#define RTC_INT_SEC		BIT(0)
+#define RTC_INT_ALRM		BIT(1)
+#define RTC_OSC_EN		BIT(24)
+
+#define RTC_CALIB_DEF		0x198233
+#define RTC_CALIB_MASK		0x1FFFFF
+#define RTC_SEC_MAX_VAL		0xFFFFFFFF
+
+struct xlnx_rtc_dev {
+	struct rtc_device	*rtc;
+	void __iomem		*reg_base;
+	int			alarm_irq;
+	int			sec_irq;
+};
+
+static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+	unsigned long new_time;
+
+	new_time = rtc_tm_to_time64(tm);
+
+	if (new_time > RTC_SEC_MAX_VAL)
+		return -EINVAL;
+
+	writel(new_time, xrtcdev->reg_base + RTC_SET_TM_WR);
+
+	return 0;
+}
+
+static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+
+	rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_CUR_TM), tm);
+
+	return rtc_valid_tm(tm);
+}
+
+static int xlnx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+
+	rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_ALRM), &alrm->time);
+	alrm->enabled = readl(xrtcdev->reg_base + RTC_INT_MASK) & RTC_INT_ALRM;
+
+	return 0;
+}
+
+static int xlnx_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
+{
+	struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+
+	if (enabled)
+		writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN);
+	else
+		writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
+
+	return 0;
+}
+
+static int xlnx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+	unsigned long alarm_time;
+
+	alarm_time = rtc_tm_to_time64(&alrm->time);
+
+	if (alarm_time > RTC_SEC_MAX_VAL)
+		return -EINVAL;
+
+	writel((u32)alarm_time, (xrtcdev->reg_base + RTC_ALRM));
+
+	xlnx_rtc_alarm_irq_enable(dev, alrm->enabled);
+
+	return 0;
+}
+
+static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev, u32 calibval)
+{
+	/*
+	 * Based on crystal freq of 33.330 KHz
+	 * set the seconds counter and enable, set fractions counter
+	 * to default value suggested as per design spec
+	 * to correct RTC delay in frequency over period of time.
+	 */
+	calibval &= RTC_CALIB_MASK;
+	writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+}
+
+static const struct rtc_class_ops xlnx_rtc_ops = {
+	.set_time	  = xlnx_rtc_set_time,
+	.read_time	  = xlnx_rtc_read_time,
+	.read_alarm	  = xlnx_rtc_read_alarm,
+	.set_alarm	  = xlnx_rtc_set_alarm,
+	.alarm_irq_enable = xlnx_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
+{
+	struct xlnx_rtc_dev *xrtcdev = (struct xlnx_rtc_dev *)id;
+	unsigned int status;
+
+	status = readl(xrtcdev->reg_base + RTC_INT_STS);
+	/* Check if interrupt asserted */
+	if (!(status & (RTC_INT_SEC | RTC_INT_ALRM)))
+		return IRQ_NONE;
+
+	/* Clear interrupt */
+	writel(status, xrtcdev->reg_base + RTC_INT_STS);
+
+	if (status & RTC_INT_SEC)
+		rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_UF);
+	if (status & RTC_INT_ALRM)
+		rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_AF);
+
+	return IRQ_HANDLED;
+}
+
+static int xlnx_rtc_probe(struct platform_device *pdev)
+{
+	struct xlnx_rtc_dev *xrtcdev;
+	struct resource *res;
+	int ret;
+	unsigned int calibvalue;
+
+	xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL);
+	if (!xrtcdev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, xrtcdev);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	xrtcdev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(xrtcdev->reg_base))
+		return PTR_ERR(xrtcdev->reg_base);
+
+	xrtcdev->alarm_irq = platform_get_irq_byname(pdev, "alarm");
+	if (xrtcdev->alarm_irq < 0) {
+		dev_err(&pdev->dev, "no irq resource\n");
+		return xrtcdev->alarm_irq;
+	}
+	ret = devm_request_irq(&pdev->dev, xrtcdev->alarm_irq,
+			       xlnx_rtc_interrupt, 0,
+			       dev_name(&pdev->dev), xrtcdev);
+	if (ret) {
+		dev_err(&pdev->dev, "request irq failed\n");
+		return ret;
+	}
+
+	xrtcdev->sec_irq = platform_get_irq_byname(pdev, "sec");
+	if (xrtcdev->sec_irq < 0) {
+		dev_err(&pdev->dev, "no irq resource\n");
+		return xrtcdev->sec_irq;
+	}
+	ret = devm_request_irq(&pdev->dev, xrtcdev->sec_irq,
+			       xlnx_rtc_interrupt, 0,
+			       dev_name(&pdev->dev), xrtcdev);
+	if (ret) {
+		dev_err(&pdev->dev, "request irq failed\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "calibration",
+				   &calibvalue);
+	if (ret)
+		calibvalue = RTC_CALIB_DEF;
+
+	xlnx_init_rtc(xrtcdev, calibvalue);
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	xrtcdev->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+					 &xlnx_rtc_ops, THIS_MODULE);
+	return PTR_ERR_OR_ZERO(xrtcdev->rtc);
+}
+
+static int xlnx_rtc_remove(struct platform_device *pdev)
+{
+	xlnx_rtc_alarm_irq_enable(&pdev->dev, 0);
+	device_init_wakeup(&pdev->dev, 0);
+
+	return 0;
+}
+
+static int __maybe_unused xlnx_rtc_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct xlnx_rtc_dev *xrtcdev = platform_get_drvdata(pdev);
+
+	if (device_may_wakeup(&pdev->dev))
+		enable_irq_wake(xrtcdev->alarm_irq);
+	else
+		xlnx_rtc_alarm_irq_enable(dev, 0);
+
+	return 0;
+}
+
+static int __maybe_unused xlnx_rtc_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct xlnx_rtc_dev *xrtcdev = platform_get_drvdata(pdev);
+
+	if (device_may_wakeup(&pdev->dev))
+		disable_irq_wake(xrtcdev->alarm_irq);
+	else
+		xlnx_rtc_alarm_irq_enable(dev, 1);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xlnx_rtc_pm_ops, xlnx_rtc_suspend, xlnx_rtc_resume);
+
+static const struct of_device_id xlnx_rtc_of_match[] = {
+	{.compatible = "xlnx,zynqmp-rtc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, xlnx_rtc_of_match);
+
+static struct platform_driver xlnx_rtc_driver = {
+	.probe		= xlnx_rtc_probe,
+	.remove		= xlnx_rtc_remove,
+	.driver		= {
+		.name	= KBUILD_MODNAME,
+		.pm	= &xlnx_rtc_pm_ops,
+		.of_match_table	= xlnx_rtc_of_match,
+	},
+};
+
+module_platform_driver(xlnx_rtc_driver);
+
+MODULE_DESCRIPTION("Xilinx Zynq MPSoC RTC driver");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 2b744fb..5ed44fe 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -29,7 +29,7 @@
 static void dcssblk_release(struct gendisk *disk, fmode_t mode);
 static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
 static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
-				 void **kaddr, unsigned long *pfn, long size);
+			 void __pmem **kaddr, unsigned long *pfn);
 
 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
 
@@ -881,18 +881,20 @@
 
 static long
 dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
-			void **kaddr, unsigned long *pfn, long size)
+			void __pmem **kaddr, unsigned long *pfn)
 {
 	struct dcssblk_dev_info *dev_info;
 	unsigned long offset, dev_sz;
+	void *addr;
 
 	dev_info = bdev->bd_disk->private_data;
 	if (!dev_info)
 		return -ENODEV;
 	dev_sz = dev_info->end - dev_info->start;
 	offset = secnum * 512;
-	*kaddr = (void *) (dev_info->start + offset);
-	*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+	addr = (void *) (dev_info->start + offset);
+	*pfn = virt_to_phys(addr) >> PAGE_SHIFT;
+	*kaddr = (void __pmem *) addr;
 
 	return dev_sz - offset;
 }
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 01a7339..c00ac46 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -529,7 +529,7 @@
 	list_add_tail(&port->list, &adapter->port_list);
 	write_unlock_irq(&adapter->port_list_lock);
 
-	atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
+	atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
 
 	return port;
 
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index acde3f5..3fb4109 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -190,7 +190,7 @@
 		if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
 			if (scsi_device_get(sdev))
 				return NULL;
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
 				&zfcp_sdev->status);
 		erp_action = &zfcp_sdev->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
@@ -206,7 +206,7 @@
 		if (!get_device(&port->dev))
 			return NULL;
 		zfcp_erp_action_dismiss_port(port);
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		erp_action = &port->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 		erp_action->port = port;
@@ -217,7 +217,7 @@
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		kref_get(&adapter->ref);
 		zfcp_erp_action_dismiss_adapter(adapter);
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
 		erp_action = &adapter->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 		if (!(atomic_read(&adapter->status) &
@@ -254,7 +254,7 @@
 	act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
 	if (!act)
 		goto out;
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
@@ -486,14 +486,14 @@
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
 		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
 static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
 		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
 static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
@@ -502,7 +502,7 @@
 
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
 		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
@@ -642,7 +642,7 @@
 	read_lock_irqsave(&adapter->erp_lock, flags);
 	if (list_empty(&adapter->erp_ready_head) &&
 	    list_empty(&adapter->erp_running_head)) {
-			atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+			atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
 					  &adapter->status);
 			wake_up(&adapter->erp_done_wqh);
 	}
@@ -665,16 +665,16 @@
 	int sleep = 1;
 	struct zfcp_adapter *adapter = erp_action->adapter;
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
 
 	for (retries = 7; retries; retries--) {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+		atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 				  &adapter->status);
 		write_lock_irq(&adapter->erp_lock);
 		zfcp_erp_action_to_running(erp_action);
 		write_unlock_irq(&adapter->erp_lock);
 		if (zfcp_fsf_exchange_config_data(erp_action)) {
-			atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+			atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 					  &adapter->status);
 			return ZFCP_ERP_FAILED;
 		}
@@ -692,7 +692,7 @@
 		sleep *= 2;
 	}
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+	atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 			  &adapter->status);
 
 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
@@ -764,7 +764,7 @@
 	/* all ports and LUNs are closed */
 	zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 }
 
@@ -773,7 +773,7 @@
 	struct zfcp_adapter *adapter = act->adapter;
 
 	if (zfcp_qdio_open(adapter->qdio)) {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+		atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
 				  &adapter->status);
 		return ZFCP_ERP_FAILED;
@@ -784,7 +784,7 @@
 		return ZFCP_ERP_FAILED;
 	}
 
-	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
+	atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
 
 	return ZFCP_ERP_SUCCEEDED;
 }
@@ -948,7 +948,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
 			  &zfcp_sdev->status);
 }
 
@@ -1187,18 +1187,18 @@
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
 		zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &zfcp_sdev->status);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &erp_action->port->status);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &erp_action->adapter->status);
 		break;
 	}
@@ -1422,19 +1422,19 @@
 	unsigned long flags;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 
-	atomic_set_mask(mask, &adapter->status);
+	atomic_or(mask, &adapter->status);
 
 	if (!common_mask)
 		return;
 
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
-		atomic_set_mask(common_mask, &port->status);
+		atomic_or(common_mask, &port->status);
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host)
-		atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
 	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
 }
 
@@ -1453,7 +1453,7 @@
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
 
-	atomic_clear_mask(mask, &adapter->status);
+	atomic_andnot(mask, &adapter->status);
 
 	if (!common_mask)
 		return;
@@ -1463,7 +1463,7 @@
 
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list) {
-		atomic_clear_mask(common_mask, &port->status);
+		atomic_andnot(common_mask, &port->status);
 		if (clear_counter)
 			atomic_set(&port->erp_counter, 0);
 	}
@@ -1471,7 +1471,7 @@
 
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host) {
-		atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
 		if (clear_counter)
 			atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
 	}
@@ -1491,7 +1491,7 @@
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	unsigned long flags;
 
-	atomic_set_mask(mask, &port->status);
+	atomic_or(mask, &port->status);
 
 	if (!common_mask)
 		return;
@@ -1499,7 +1499,7 @@
 	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
-			atomic_set_mask(common_mask,
+			atomic_or(common_mask,
 					&sdev_to_zfcp(sdev)->status);
 	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
 }
@@ -1518,7 +1518,7 @@
 	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
 	unsigned long flags;
 
-	atomic_clear_mask(mask, &port->status);
+	atomic_andnot(mask, &port->status);
 
 	if (!common_mask)
 		return;
@@ -1529,7 +1529,7 @@
 	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port) {
-			atomic_clear_mask(common_mask,
+			atomic_andnot(common_mask,
 					  &sdev_to_zfcp(sdev)->status);
 			if (clear_counter)
 				atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
@@ -1546,7 +1546,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_set_mask(mask, &zfcp_sdev->status);
+	atomic_or(mask, &zfcp_sdev->status);
 }
 
 /**
@@ -1558,7 +1558,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(mask, &zfcp_sdev->status);
+	atomic_andnot(mask, &zfcp_sdev->status);
 
 	if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
 		atomic_set(&zfcp_sdev->erp_counter, 0);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 25d49f3..237688a 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -508,7 +508,7 @@
 	/* port is good, unblock rport without going through erp */
 	zfcp_scsi_schedule_rport_register(port);
  out:
-	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	put_device(&port->dev);
 	kmem_cache_free(zfcp_fc_req_cache, fc_req);
 }
@@ -564,14 +564,14 @@
 	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
 		goto out;
 
-	atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 
 	retval = zfcp_fc_adisc(port);
 	if (retval == 0)
 		return;
 
 	/* send of ADISC was not possible */
-	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
 
 out:
@@ -640,7 +640,7 @@
 	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
 		return;
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
+	atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
 
 	if ((port->supported_classes != 0) ||
 	    !list_empty(&port->unit_list))
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4ac73e0..522a633 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -114,7 +114,7 @@
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
 		return;
 
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 
 	zfcp_scsi_schedule_rports_block(adapter);
 
@@ -345,7 +345,7 @@
 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
 		break;
 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 				&adapter->status);
 		break;
 	case FSF_PROT_DUPLICATE_REQUEST_ID:
@@ -554,7 +554,7 @@
 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
 			return;
 		}
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 		break;
 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -567,7 +567,7 @@
 
 		/* avoids adapter shutdown to be able to recognize
 		 * events such as LINK UP */
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
@@ -1394,9 +1394,9 @@
 		break;
 	case FSF_GOOD:
 		port->handle = header->port_handle;
-		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
+		atomic_or(ZFCP_STATUS_COMMON_OPEN |
 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
+		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
 		                  &port->status);
 		/* check whether D_ID has changed during open */
 		/*
@@ -1677,10 +1677,10 @@
 	case FSF_PORT_BOXED:
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
-		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
 		shost_for_each_device(sdev, port->adapter->scsi_host)
 			if (sdev_to_zfcp(sdev)->port == port)
-				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
 						  &sdev_to_zfcp(sdev)->status);
 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -1700,10 +1700,10 @@
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
 		 */
-		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
 		shost_for_each_device(sdev, port->adapter->scsi_host)
 			if (sdev_to_zfcp(sdev)->port == port)
-				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
 						  &sdev_to_zfcp(sdev)->status);
 		break;
 	}
@@ -1766,7 +1766,7 @@
 
 	zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
 			  &zfcp_sdev->status);
 
@@ -1822,7 +1822,7 @@
 
 	case FSF_GOOD:
 		zfcp_sdev->lun_handle = header->lun_handle;
-		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
 		break;
 	}
 }
@@ -1913,7 +1913,7 @@
 		}
 		break;
 	case FSF_GOOD:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
 		break;
 	}
 }
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 495e1cb..dbf2b547 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -349,7 +349,7 @@
 
 	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
 	spin_lock_irq(&qdio->req_q_lock);
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+	atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
 	spin_unlock_irq(&qdio->req_q_lock);
 
 	wake_up(&qdio->req_q_wq);
@@ -384,7 +384,7 @@
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
 		return -EIO;
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 			  &qdio->adapter->status);
 
 	zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -396,14 +396,14 @@
 		goto failed_qdio;
 
 	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+		atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
 				&qdio->adapter->status);
 
 	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
 	} else {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
 	}
 
@@ -427,7 +427,7 @@
 	/* set index of first available SBALS / number of available SBALS */
 	qdio->req_q_idx = 0;
 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 
 	if (adapter->scsi_host) {
 		adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
@@ -499,6 +499,6 @@
 
 	rc = ccw_device_siosl(adapter->ccw_device);
 	if (!rc)
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+		atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 				&adapter->status);
 }
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 31e8576..f6c336b 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -100,12 +100,7 @@
 				   pci_name(asd_ha->pcidev));
 			goto Err;
 		}
-		if (io_handle->flags & IORESOURCE_CACHEABLE)
-			io_handle->addr = ioremap(io_handle->start,
-						  io_handle->len);
-		else
-			io_handle->addr = ioremap_nocache(io_handle->start,
-							  io_handle->len);
+		io_handle->addr = ioremap(io_handle->start, io_handle->len);
 		if (!io_handle->addr) {
 			asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
 				   pci_name(asd_ha->pcidev));
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 6ac74fb..333db59 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -259,10 +259,7 @@
 		addr = (unsigned long)pci_resource_start(pdev, 0);
 		range = pci_resource_len(pdev, 0);
 		flags = pci_resource_flags(pdev, 0);
-		if (flags & IORESOURCE_CACHEABLE)
-			mem_base0 = ioremap(addr, range);
-		else
-			mem_base0 = ioremap_nocache(addr, range);
+		mem_base0 = ioremap(addr, range);
 		if (!mem_base0) {
 			pr_notice("arcmsr%d: memory mapping region fail\n",
 				acb->host->host_no);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index f466a6a..e2d555c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -324,13 +324,9 @@
 			goto err_out;
 
 		res_flag_ex = pci_resource_flags(pdev, bar_ex);
-		if (res_flag_ex & IORESOURCE_MEM) {
-			if (res_flag_ex & IORESOURCE_CACHEABLE)
-				mvi->regs_ex = ioremap(res_start, res_len);
-			else
-				mvi->regs_ex = ioremap_nocache(res_start,
-						res_len);
-		} else
+		if (res_flag_ex & IORESOURCE_MEM)
+			mvi->regs_ex = ioremap(res_start, res_len);
+		else
 			mvi->regs_ex = (void *)res_start;
 		if (!mvi->regs_ex)
 			goto err_out;
@@ -345,10 +341,7 @@
 	}
 
 	res_flag = pci_resource_flags(pdev, bar);
-	if (res_flag & IORESOURCE_CACHEABLE)
-		mvi->regs = ioremap(res_start, res_len);
-	else
-		mvi->regs = ioremap_nocache(res_start, res_len);
+	mvi->regs = ioremap(res_start, res_len);
 
 	if (!mvi->regs) {
 		if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index e26e81d..d50c5ed8 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -12,9 +12,9 @@
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 
 #include <asm/sun3x.h>
-#include <asm/io.h>
 #include <asm/dma.h>
 #include <asm/dvma.h>
 
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 0768bc42..14ef1f6 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -28,6 +28,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/io.h>
 #include "../comedidev.h"
 
 /*
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 8bc68e2..fb55e59 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -342,14 +342,6 @@
 		.deemphasis		= 50,
 		.region			= 3,
 	},
-	/* Japan wide band */
-	{
-		.channel_spacing	= 10,
-		.bottom_frequency	= 76000,
-		.top_frequency		= 108000,
-		.deemphasis		= 50,
-		.region			= 4,
-	},
 };
 
 /*
@@ -741,6 +733,18 @@
 
 	mutex_lock(&bdev->mutex);
 	bdev->region_info = region_configs[region];
+
+	if (region_configs[region].bottom_frequency < 87500)
+		bdev->cache_fm_ctrl |= BCM2048_BAND_SELECT;
+	else
+		bdev->cache_fm_ctrl &= ~BCM2048_BAND_SELECT;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+					bdev->cache_fm_ctrl);
+	if (err) {
+		mutex_unlock(&bdev->mutex);
+		goto done;
+	}
 	mutex_unlock(&bdev->mutex);
 
 	if (bdev->frequency < region_configs[region].bottom_frequency ||
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 62ec9f7..534b810 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -739,7 +739,7 @@
 
 		ep = &iface_desc->endpoint[i].desc;
 		ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
-		ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+		ep_type = usb_endpoint_type(ep);
 
 		if (!ir_ep_found &&
 			ep_dir == USB_DIR_IN &&
@@ -785,13 +785,13 @@
 	}
 
 	driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
-	if (!driver) {
+	if (!driver)
 		goto free_context;
-	}
+
 	rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
-	if (!rbuf) {
+	if (!rbuf)
 		goto free_driver;
-	}
+
 	if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
 		dev_err(dev, "%s: lirc_buffer_init failed\n", __func__);
 		goto free_rbuf;
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 9e56743..b247649a 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -184,7 +184,7 @@
 		       __func__, retval);
 	else
 		dev_info(&context->dev->dev,
-		         "Deregistered Sasem driver (minor:%d)\n", minor);
+			 "Deregistered Sasem driver (minor:%d)\n", minor);
 
 }
 
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 261e27d..ce3b5f2 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1364,10 +1364,10 @@
 	{ "ir_rx_z8f0811_hdpvr", ID_FLAG_HDPVR              },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, ir_transceiver_id);
 
 static struct i2c_driver driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "Zilog/Hauppauge i2c IR",
 	},
 	.probe		= ir_probe,
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
index a8d45f4..cf2e96b 100644
--- a/drivers/staging/media/mn88472/mn88472.c
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -561,7 +561,6 @@
 
 static struct i2c_driver mn88472_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "mn88472",
 	},
 	.probe		= mn88472_probe,
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/staging/media/mn88473/mn88473.c
index f9146a1..a222e99 100644
--- a/drivers/staging/media/mn88473/mn88473.c
+++ b/drivers/staging/media/mn88473/mn88473.c
@@ -507,7 +507,6 @@
 
 static struct i2c_driver mn88473_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "mn88473",
 	},
 	.probe		= mn88473_probe,
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 072dac0..8d4e3bd 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,5 +1,5 @@
 config VIDEO_OMAP4
-	bool "OMAP 4 Camera support"
+	tristate "OMAP 4 Camera support"
 	depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
 	depends on HAS_DMA
 	select MFD_SYSCON
diff --git a/drivers/staging/media/omap4iss/TODO b/drivers/staging/media/omap4iss/TODO
index fcde888..4d220ef 100644
--- a/drivers/staging/media/omap4iss/TODO
+++ b/drivers/staging/media/omap4iss/TODO
@@ -1,4 +1,3 @@
-* Make the driver compile as a module
 * Fix FIFO/buffer overflows and underflows
 * Replace dummy resizer code with a real implementation
 * Fix checkpatch errors and warnings
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 85c54fe..40405d8 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -640,76 +640,6 @@
 }
 
 static int
-iss_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
-{
-	struct iss_video *video = video_drvdata(file);
-	struct v4l2_subdev *subdev;
-	int ret;
-
-	subdev = iss_video_remote_subdev(video, NULL);
-	if (subdev == NULL)
-		return -EINVAL;
-
-	mutex_lock(&video->mutex);
-	ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
-	mutex_unlock(&video->mutex);
-
-	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-}
-
-static int
-iss_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
-{
-	struct iss_video *video = video_drvdata(file);
-	struct v4l2_subdev_format format;
-	struct v4l2_subdev *subdev;
-	u32 pad;
-	int ret;
-
-	subdev = iss_video_remote_subdev(video, &pad);
-	if (subdev == NULL)
-		return -EINVAL;
-
-	/* Try the get crop operation first and fallback to get format if not
-	 * implemented.
-	 */
-	ret = v4l2_subdev_call(subdev, video, g_crop, crop);
-	if (ret != -ENOIOCTLCMD)
-		return ret;
-
-	format.pad = pad;
-	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
-	if (ret < 0)
-		return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-
-	crop->c.left = 0;
-	crop->c.top = 0;
-	crop->c.width = format.format.width;
-	crop->c.height = format.format.height;
-
-	return 0;
-}
-
-static int
-iss_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
-{
-	struct iss_video *video = video_drvdata(file);
-	struct v4l2_subdev *subdev;
-	int ret;
-
-	subdev = iss_video_remote_subdev(video, NULL);
-	if (subdev == NULL)
-		return -EINVAL;
-
-	mutex_lock(&video->mutex);
-	ret = v4l2_subdev_call(subdev, video, s_crop, crop);
-	mutex_unlock(&video->mutex);
-
-	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-}
-
-static int
 iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
 {
 	struct iss_video_fh *vfh = to_iss_video_fh(fh);
@@ -1018,9 +948,6 @@
 	.vidioc_g_fmt_vid_out		= iss_video_get_format,
 	.vidioc_s_fmt_vid_out		= iss_video_set_format,
 	.vidioc_try_fmt_vid_out		= iss_video_try_format,
-	.vidioc_cropcap			= iss_video_cropcap,
-	.vidioc_g_crop			= iss_video_get_crop,
-	.vidioc_s_crop			= iss_video_set_crop,
 	.vidioc_g_parm			= iss_video_get_param,
 	.vidioc_s_parm			= iss_video_set_param,
 	.vidioc_reqbufs			= iss_video_reqbufs,
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 22853d3..d1a33a9 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -70,7 +70,14 @@
  */
 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
 {
-	if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
+	int port;
+
+	if (octeon_has_feature(OCTEON_FEATURE_PKND))
+		port = work->word0.pip.cn68xx.pknd;
+	else
+		port = work->word1.cn38xx.ipprt;
+
+	if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
 		/*
 		 * Ignore length errors on min size packets. Some
 		 * equipment incorrectly pads packets to 64+4FCS
@@ -87,8 +94,8 @@
 		 * packet to determine if we can remove a non spec
 		 * preamble and generate a correct packet.
 		 */
-		int interface = cvmx_helper_get_interface_num(work->ipprt);
-		int index = cvmx_helper_get_interface_index_num(work->ipprt);
+		int interface = cvmx_helper_get_interface_num(port);
+		int index = cvmx_helper_get_interface_index_num(port);
 		union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
 
 		gmxx_rxx_frm_ctl.u64 =
@@ -99,7 +106,7 @@
 			    cvmx_phys_to_ptr(work->packet_ptr.s.addr);
 			int i = 0;
 
-			while (i < work->len - 1) {
+			while (i < work->word1.len - 1) {
 				if (*ptr != 0x55)
 					break;
 				ptr++;
@@ -109,18 +116,18 @@
 			if (*ptr == 0xd5) {
 				/*
 				  printk_ratelimited("Port %d received 0xd5 preamble\n",
-					  work->ipprt);
+					  port);
 				 */
 				work->packet_ptr.s.addr += i + 1;
-				work->len -= i + 5;
+				work->word1.len -= i + 5;
 			} else if ((*ptr & 0xf) == 0xd) {
 				/*
 				  printk_ratelimited("Port %d received 0x?d preamble\n",
-					  work->ipprt);
+					  port);
 				 */
 				work->packet_ptr.s.addr += i;
-				work->len -= i + 4;
-				for (i = 0; i < work->len; i++) {
+				work->word1.len -= i + 4;
+				for (i = 0; i < work->word1.len; i++) {
 					*ptr =
 					    ((*ptr & 0xf0) >> 4) |
 					    ((*(ptr + 1) & 0xf) << 4);
@@ -128,7 +135,7 @@
 				}
 			} else {
 				printk_ratelimited("Port %d unknown preamble, packet dropped\n",
-						   work->ipprt);
+						   port);
 				/*
 				   cvmx_helper_dump_packet(work);
 				 */
@@ -138,7 +145,7 @@
 		}
 	} else {
 		printk_ratelimited("Port %d receive error code %d, packet dropped\n",
-				   work->ipprt, work->word2.snoip.err_code);
+				   port, work->word2.snoip.err_code);
 		cvm_oct_free_work(work);
 		return 1;
 	}
@@ -172,9 +179,16 @@
 	}
 
 	/* Only allow work for our group (and preserve priorities) */
-	old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
-	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
-		       (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
+		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
+				1ull << pow_receive_group);
+		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+	} else {
+		old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
+		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
+			(old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+	}
 
 	if (USE_ASYNC_IOBDMA) {
 		cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
@@ -186,6 +200,7 @@
 		struct sk_buff **pskb = NULL;
 		int skb_in_hw;
 		cvmx_wqe_t *work;
+		int port;
 
 		if (USE_ASYNC_IOBDMA && did_work_request)
 			work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
@@ -195,12 +210,19 @@
 		prefetch(work);
 		did_work_request = 0;
 		if (work == NULL) {
-			union cvmx_pow_wq_int wq_int;
+			if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+				cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
+					       1ull << pow_receive_group);
+				cvmx_write_csr(CVMX_SSO_WQ_INT,
+					       1ull << pow_receive_group);
+			} else {
+				union cvmx_pow_wq_int wq_int;
 
-			wq_int.u64 = 0;
-			wq_int.s.iq_dis = 1 << pow_receive_group;
-			wq_int.s.wq_int = 1 << pow_receive_group;
-			cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+				wq_int.u64 = 0;
+				wq_int.s.iq_dis = 1 << pow_receive_group;
+				wq_int.s.wq_int = 1 << pow_receive_group;
+				cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+			}
 			break;
 		}
 		pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
@@ -220,7 +242,13 @@
 			prefetch(&skb->head);
 			prefetch(&skb->len);
 		}
-		prefetch(cvm_oct_device[work->ipprt]);
+
+		if (octeon_has_feature(OCTEON_FEATURE_PKND))
+			port = work->word0.pip.cn68xx.pknd;
+		else
+			port = work->word1.cn38xx.ipprt;
+
+		prefetch(cvm_oct_device[port]);
 
 		/* Immediately throw away all packets with receive errors */
 		if (unlikely(work->word2.snoip.rcv_error)) {
@@ -237,7 +265,7 @@
 			skb->data = skb->head + work->packet_ptr.s.addr -
 				cvmx_ptr_to_phys(skb->head);
 			prefetch(skb->data);
-			skb->len = work->len;
+			skb->len = work->word1.len;
 			skb_set_tail_pointer(skb, skb->len);
 			packet_not_copied = 1;
 		} else {
@@ -245,7 +273,7 @@
 			 * We have to copy the packet. First allocate
 			 * an skbuff for it.
 			 */
-			skb = dev_alloc_skb(work->len);
+			skb = dev_alloc_skb(work->word1.len);
 			if (!skb) {
 				cvm_oct_free_work(work);
 				continue;
@@ -268,13 +296,14 @@
 					else
 						ptr += 6;
 				}
-				memcpy(skb_put(skb, work->len), ptr, work->len);
+				memcpy(skb_put(skb, work->word1.len), ptr,
+				       work->word1.len);
 				/* No packet buffers to free */
 			} else {
 				int segments = work->word2.s.bufs;
 				union cvmx_buf_ptr segment_ptr =
 				    work->packet_ptr;
-				int len = work->len;
+				int len = work->word1.len;
 
 				while (segments--) {
 					union cvmx_buf_ptr next_ptr =
@@ -310,10 +339,9 @@
 			}
 			packet_not_copied = 0;
 		}
-
-		if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
-			   cvm_oct_device[work->ipprt])) {
-			struct net_device *dev = cvm_oct_device[work->ipprt];
+		if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
+			   cvm_oct_device[port])) {
+			struct net_device *dev = cvm_oct_device[port];
 			struct octeon_ethernet *priv = netdev_priv(dev);
 
 			/*
@@ -333,7 +361,7 @@
 					skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 				/* Increment RX stats for virtual ports */
-				if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+				if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
 #ifdef CONFIG_64BIT
 					atomic64_add(1,
 						     (atomic64_t *)&priv->stats.rx_packets);
@@ -368,7 +396,7 @@
 			 * doesn't exist.
 			 */
 			printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
-				   work->ipprt);
+				   port);
 			dev_kfree_skb_irq(skb);
 		}
 		/*
@@ -390,7 +418,13 @@
 		}
 	}
 	/* Restore the original POW group mask */
-	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
+		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+	} else {
+		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+	}
+
 	if (USE_ASYNC_IOBDMA) {
 		/* Restore the scratch area */
 		cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
@@ -422,8 +456,6 @@
 {
 	int i;
 	struct net_device *dev_for_napi = NULL;
-	union cvmx_pow_wq_int_thrx int_thr;
-	union cvmx_pow_wq_int_pc int_pc;
 
 	for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
 		if (cvm_oct_device[i]) {
@@ -449,15 +481,34 @@
 
 	disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
 
-	int_thr.u64 = 0;
-	int_thr.s.tc_en = 1;
-	int_thr.s.tc_thr = 1;
 	/* Enable POW interrupt when our port has at least one packet */
-	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_sso_wq_int_thrx int_thr;
+		union cvmx_pow_wq_int_pc int_pc;
 
-	int_pc.u64 = 0;
-	int_pc.s.pc_thr = 5;
-	cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+		int_thr.u64 = 0;
+		int_thr.s.tc_en = 1;
+		int_thr.s.tc_thr = 1;
+		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
+			       int_thr.u64);
+
+		int_pc.u64 = 0;
+		int_pc.s.pc_thr = 5;
+		cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+	} else {
+		union cvmx_pow_wq_int_thrx int_thr;
+		union cvmx_pow_wq_int_pc int_pc;
+
+		int_thr.u64 = 0;
+		int_thr.s.tc_en = 1;
+		int_thr.s.tc_thr = 1;
+		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
+			       int_thr.u64);
+
+		int_pc.u64 = 0;
+		int_pc.s.pc_thr = 5;
+		cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+	}
 
 	/* Schedule NAPI now. This will indirectly enable the interrupt. */
 	napi_schedule(&cvm_oct_napi);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index e2df041..9e2116f 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -591,13 +591,14 @@
 	 * Fill in some of the work queue fields. We may need to add
 	 * more if the software at the other end needs them.
 	 */
-	work->hw_chksum = skb->csum;
-	work->len = skb->len;
-	work->ipprt = priv->port;
-	work->qos = priv->port & 0x7;
-	work->grp = pow_send_group;
-	work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
-	work->tag = pow_send_group;	/* FIXME */
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+		work->word0.pip.cn38xx.hw_chksum = skb->csum;
+	work->word1.len = skb->len;
+	cvmx_wqe_set_port(work, priv->port);
+	cvmx_wqe_set_qos(work, priv->port & 0x7);
+	cvmx_wqe_set_grp(work, pow_send_group);
+	work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	work->word1.tag = pow_send_group;	/* FIXME */
 	/* Default to zero. Sets of zero later are commented out */
 	work->word2.u64 = 0;
 	work->word2.s.bufs = 1;
@@ -677,8 +678,8 @@
 	}
 
 	/* Submit the packet to the POW */
-	cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
-			     work->grp);
+	cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
+			     cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
 	priv->stats.tx_packets++;
 	priv->stats.tx_bytes += skb->len;
 	dev_consume_skb_any(skb);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 1ba789a..45f024b 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -8,6 +8,10 @@
  * published by the Free Software Foundation.
  */
 
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+
 /**
  * cvm_oct_get_buffer_ptr - convert packet data address to pointer
  * @packet_ptr: Packet data hardware address
@@ -28,14 +32,12 @@
  */
 static inline int INTERFACE(int ipd_port)
 {
-	if (ipd_port < 32)	/* Interface 0 or 1 for RGMII,GMII,SPI, etc */
-		return ipd_port >> 4;
-	else if (ipd_port < 36)	/* Interface 2 for NPI */
-		return 2;
-	else if (ipd_port < 40)	/* Interface 3 for loopback */
-		return 3;
-	else if (ipd_port == 40)	/* Non existent interface for POW0 */
-		return 4;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+
+	if (interface >= 0)
+		return interface;
+	else if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
+		return 10;
 	panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
 }
 
@@ -47,7 +49,5 @@
  */
 static inline int INDEX(int ipd_port)
 {
-	if (ipd_port < 32)
-		return ipd_port & 15;
-	return ipd_port & 3;
+	return cvmx_helper_get_interface_index_num(ipd_port);
 }
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 0718b35..7274fda 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -152,7 +152,7 @@
 			     num_packet_buffers);
 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
 		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
-				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
 
 #ifdef __LITTLE_ENDIAN
 	{
@@ -860,7 +860,10 @@
 	int port;
 
 	/* Disable POW interrupt */
-	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
+	else
+		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
 
 	cvmx_ipd_disable();
 
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 6da7e49..2693c46 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -20,6 +20,7 @@
  */
 
 #include <linux/uuid.h>
+#include <linux/io.h>
 
 #include "version.h"
 #include "visorbus.h"
@@ -35,7 +36,7 @@
 struct visorchannel {
 	u64 physaddr;
 	ulong nbytes;
-	void __iomem *mapped;
+	void *mapped;
 	bool requested;
 	struct channel_header chan_hdr;
 	uuid_le guid;
@@ -92,7 +93,7 @@
 		}
 	}
 
-	channel->mapped = ioremap_cache(physaddr, size);
+	channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
 	if (!channel->mapped) {
 		release_mem_region(physaddr, size);
 		goto cleanup;
@@ -112,7 +113,7 @@
 	if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
 		guid = channel->chan_hdr.chtype;
 
-	iounmap(channel->mapped);
+	memunmap(channel->mapped);
 	if (channel->requested)
 		release_mem_region(channel->physaddr, channel->nbytes);
 	channel->mapped = NULL;
@@ -125,7 +126,8 @@
 		}
 	}
 
-	channel->mapped = ioremap_cache(channel->physaddr, channel_bytes);
+	channel->mapped = memremap(channel->physaddr, channel_bytes,
+			MEMREMAP_WB);
 	if (!channel->mapped) {
 		release_mem_region(channel->physaddr, channel_bytes);
 		goto cleanup;
@@ -166,7 +168,7 @@
 	if (!channel)
 		return;
 	if (channel->mapped) {
-		iounmap(channel->mapped);
+		memunmap(channel->mapped);
 		if (channel->requested)
 			release_mem_region(channel->physaddr, channel->nbytes);
 	}
@@ -240,7 +242,7 @@
 	if (offset + nbytes > channel->nbytes)
 		return -EIO;
 
-	memcpy_fromio(local, channel->mapped + offset, nbytes);
+	memcpy(local, channel->mapped + offset, nbytes);
 
 	return 0;
 }
@@ -262,7 +264,7 @@
 		       local, copy_size);
 	}
 
-	memcpy_toio(channel->mapped + offset, local, nbytes);
+	memcpy(channel->mapped + offset, local, nbytes);
 
 	return 0;
 }
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 4b76cb4..94419c3 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -118,7 +118,7 @@
 
 /* Manages the request payload in the controlvm channel */
 struct visor_controlvm_payload_info {
-	u8 __iomem *ptr;	/* pointer to base address of payload pool */
+	u8 *ptr;		/* pointer to base address of payload pool */
 	u64 offset;		/* offset from beginning of controlvm
 				 * channel to beginning of payload * pool */
 	u32 bytes;		/* number of bytes in payload pool */
@@ -400,21 +400,22 @@
 		p = __va((unsigned long) (addr));
 		memcpy(ctx->data, p, bytes);
 	} else {
-		void __iomem *mapping;
+		void *mapping;
 
 		if (!request_mem_region(addr, bytes, "visorchipset")) {
 			rc = NULL;
 			goto cleanup;
 		}
 
-		mapping = ioremap_cache(addr, bytes);
+		mapping = memremap(addr, bytes, MEMREMAP_WB);
 		if (!mapping) {
 			release_mem_region(addr, bytes);
 			rc = NULL;
 			goto cleanup;
 		}
-		memcpy_fromio(ctx->data, mapping, bytes);
+		memcpy(ctx->data, mapping, bytes);
 		release_mem_region(addr, bytes);
+		memunmap(mapping);
 	}
 
 	ctx->byte_stream = true;
@@ -1327,7 +1328,7 @@
 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
 				  struct visor_controlvm_payload_info *info)
 {
-	u8 __iomem *payload = NULL;
+	u8 *payload = NULL;
 	int rc = CONTROLVM_RESP_SUCCESS;
 
 	if (!info) {
@@ -1339,7 +1340,7 @@
 		rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
 		goto cleanup;
 	}
-	payload = ioremap_cache(phys_addr + offset, bytes);
+	payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
 	if (!payload) {
 		rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
 		goto cleanup;
@@ -1352,7 +1353,7 @@
 cleanup:
 	if (rc < 0) {
 		if (payload) {
-			iounmap(payload);
+			memunmap(payload);
 			payload = NULL;
 		}
 	}
@@ -1363,7 +1364,7 @@
 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
 {
 	if (info->ptr) {
-		iounmap(info->ptr);
+		memunmap(info->ptr);
 		info->ptr = NULL;
 	}
 	memset(info, 0, sizeof(struct visor_controlvm_payload_info));
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 76c515d..88c759d 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -214,7 +214,7 @@
 
 	sensor->ops = sensor->cdata->ops;
 
-	ret = sensor->ops->regmap_init(sensor);
+	ret = (sensor->ops->regmap_init)(sensor);
 	if (ret)
 		return ret;
 
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 4190199..a75146f 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -240,9 +240,9 @@
 {
 	struct hvsi_control *header = (struct hvsi_control *)packet;
 
-	switch (header->verb) {
+	switch (be16_to_cpu(header->verb)) {
 		case VSV_MODEM_CTL_UPDATE:
-			if ((header->word & HVSI_TSCD) == 0) {
+			if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) {
 				/* CD went away; no more connection */
 				pr_debug("hvsi%i: CD dropped\n", hp->index);
 				hp->mctrl &= TIOCM_CD;
@@ -267,6 +267,7 @@
 static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
 {
 	struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
+	uint32_t mctrl_word;
 
 	switch (hp->state) {
 		case HVSI_WAIT_FOR_VER_RESPONSE:
@@ -274,9 +275,10 @@
 			break;
 		case HVSI_WAIT_FOR_MCTRL_RESPONSE:
 			hp->mctrl = 0;
-			if (resp->u.mctrl_word & HVSI_TSDTR)
+			mctrl_word = be32_to_cpu(resp->u.mctrl_word);
+			if (mctrl_word & HVSI_TSDTR)
 				hp->mctrl |= TIOCM_DTR;
-			if (resp->u.mctrl_word & HVSI_TSCD)
+			if (mctrl_word & HVSI_TSCD)
 				hp->mctrl |= TIOCM_CD;
 			__set_state(hp, HVSI_OPEN);
 			break;
@@ -295,10 +297,10 @@
 
 	packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query_response);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-	packet.verb = VSV_SEND_VERSION_NUMBER;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+	packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
 	packet.u.version = HVSI_VERSION;
-	packet.query_seqno = query_seqno+1;
+	packet.query_seqno = cpu_to_be16(query_seqno+1);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -319,7 +321,7 @@
 
 	switch (hp->state) {
 		case HVSI_WAIT_FOR_VER_QUERY:
-			hvsi_version_respond(hp, query->hdr.seqno);
+			hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno));
 			__set_state(hp, HVSI_OPEN);
 			break;
 		default:
@@ -555,8 +557,8 @@
 
 	packet.hdr.type = VS_QUERY_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-	packet.verb = verb;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+	packet.verb = cpu_to_be16(verb);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -596,14 +598,14 @@
 	struct hvsi_control packet __ALIGNED__;
 	int wrote;
 
-	packet.hdr.type = VS_CONTROL_PACKET_HEADER,
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = sizeof(struct hvsi_control);
-	packet.verb = VSV_SET_MODEM_CTL;
-	packet.mask = HVSI_TSDTR;
+	packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+	packet.mask = cpu_to_be32(HVSI_TSDTR);
 
 	if (mctrl & TIOCM_DTR)
-		packet.word = HVSI_TSDTR;
+		packet.word = cpu_to_be32(HVSI_TSDTR);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -680,7 +682,7 @@
 	BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
 
 	packet.hdr.type = VS_DATA_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = count + sizeof(struct hvsi_header);
 	memcpy(&packet.data, buf, count);
 
@@ -697,9 +699,9 @@
 	struct hvsi_control packet __ALIGNED__;
 
 	packet.hdr.type = VS_CONTROL_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = 6;
-	packet.verb = VSV_CLOSE_PROTOCOL;
+	packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -1180,7 +1182,7 @@
 	/* search device tree for vty nodes */
 	for_each_compatible_node(vty, "serial", "hvterm-protocol") {
 		struct hvsi_struct *hp;
-		const uint32_t *vtermno, *irq;
+		const __be32 *vtermno, *irq;
 
 		vtermno = of_get_property(vty, "reg", NULL);
 		irq = of_get_property(vty, "interrupts", NULL);
@@ -1202,11 +1204,11 @@
 		hp->index = hvsi_count;
 		hp->inbuf_end = hp->inbuf;
 		hp->state = HVSI_CLOSED;
-		hp->vtermno = *vtermno;
-		hp->virq = irq_create_mapping(NULL, irq[0]);
+		hp->vtermno = be32_to_cpup(vtermno);
+		hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq));
 		if (hp->virq == 0) {
 			printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
-				__func__, irq[0]);
+			       __func__, be32_to_cpup(irq));
 			tty_port_destroy(&hp->port);
 			continue;
 		}
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 358323c..a8c8cfd 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -879,6 +879,11 @@
 	.chars_in_buffer	= mips_ejtag_fdc_tty_chars_in_buffer,
 };
 
+int __weak get_c0_fdc_int(void)
+{
+	return -1;
+}
+
 static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
 {
 	int ret, nport;
@@ -967,9 +972,7 @@
 	wake_up_process(priv->thread);
 
 	/* Look for an FDC IRQ */
-	priv->irq = -1;
-	if (get_c0_fdc_int)
-		priv->irq = get_c0_fdc_int();
+	priv->irq = get_c0_fdc_int();
 
 	/* Try requesting the IRQ */
 	if (priv->irq >= 0) {
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index cfbb9d7..271d121 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -36,11 +36,11 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
+#include <linux/io.h>
 #ifdef CONFIG_SPARC
 #include <linux/sunserialcore.h>
 #endif
 
-#include <asm/io.h>
 #include <asm/irq.h>
 
 #include "8250.h"
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index b5b4278..95b330a 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -353,9 +353,16 @@
 
 static void moom_callback(struct work_struct *ignored)
 {
+	const gfp_t gfp_mask = GFP_KERNEL;
+	struct oom_control oc = {
+		.zonelist = node_zonelist(first_memory_node, gfp_mask),
+		.nodemask = NULL,
+		.gfp_mask = gfp_mask,
+		.order = -1,
+	};
+
 	mutex_lock(&oom_lock);
-	if (!out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL),
-			   GFP_KERNEL, 0, NULL, true))
+	if (!out_of_memory(&oc))
 		pr_info("OOM request ignored because killer is disabled\n");
 	mutex_unlock(&oom_lock);
 }
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8bf495f..e0606c0 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -22,9 +22,7 @@
 source "drivers/gpu/host1x/Kconfig"
 source "drivers/gpu/ipu-v3/Kconfig"
 
-menu "Direct Rendering Manager"
 source "drivers/gpu/drm/Kconfig"
-endmenu
 
 menu "Frame buffer Devices"
 source "drivers/video/fbdev/Kconfig"
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0505b79..5ffa4b4 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -299,6 +299,13 @@
 	  If you have an Sharp SL-6000 Zaurus say Y to enable a driver
 	  for its backlight
 
+config BACKLIGHT_PM8941_WLED
+	tristate "Qualcomm PM8941 WLED Driver"
+	select REGMAP
+	help
+	  If you have the Qualcomm PM8941, say Y to enable a driver for the
+	  WLED block.
+
 config BACKLIGHT_SAHARA
 	tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
 	depends on X86
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index d67073f..16ec534 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_BACKLIGHT_OT200)		+= ot200_bl.o
 obj-$(CONFIG_BACKLIGHT_PANDORA)		+= pandora_bl.o
 obj-$(CONFIG_BACKLIGHT_PCF50633)	+= pcf50633-backlight.o
+obj-$(CONFIG_BACKLIGHT_PM8941_WLED)	+= pm8941-wled.o
 obj-$(CONFIG_BACKLIGHT_PWM)		+= pwm_bl.o
 obj-$(CONFIG_BACKLIGHT_SAHARA)		+= kb3886_bl.o
 obj-$(CONFIG_BACKLIGHT_SKY81452)	+= sky81452-backlight.o
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 88116b4..f88df9e 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -73,6 +73,7 @@
 	struct device *dev;
 	struct lp855x_platform_data *pdata;
 	struct pwm_device *pwm;
+	struct regulator *supply;	/* regulator for VDD input */
 };
 
 static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
@@ -378,13 +379,6 @@
 		pdata->rom_data = &rom[0];
 	}
 
-	pdata->supply = devm_regulator_get(dev, "power");
-	if (IS_ERR(pdata->supply)) {
-		if (PTR_ERR(pdata->supply) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		pdata->supply = NULL;
-	}
-
 	lp->pdata = pdata;
 
 	return 0;
@@ -425,8 +419,15 @@
 	else
 		lp->mode = REGISTER_BASED;
 
-	if (lp->pdata->supply) {
-		ret = regulator_enable(lp->pdata->supply);
+	lp->supply = devm_regulator_get(lp->dev, "power");
+	if (IS_ERR(lp->supply)) {
+		if (PTR_ERR(lp->supply) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		lp->supply = NULL;
+	}
+
+	if (lp->supply) {
+		ret = regulator_enable(lp->supply);
 		if (ret < 0) {
 			dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
 			return ret;
@@ -464,8 +465,8 @@
 
 	lp->bl->props.brightness = 0;
 	backlight_update_status(lp->bl);
-	if (lp->pdata->supply)
-		regulator_disable(lp->pdata->supply);
+	if (lp->supply)
+		regulator_disable(lp->supply);
 	sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
 
 	return 0;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index e418d5b..5d583d7 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -221,8 +221,7 @@
 {
 	struct backlight_device *bl_dev = bl->bl_dev;
 
-	if (bl_dev)
-		backlight_device_unregister(bl_dev);
+	backlight_device_unregister(bl_dev);
 }
 
 static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
diff --git a/drivers/video/backlight/pm8941-wled.c b/drivers/video/backlight/pm8941-wled.c
new file mode 100644
index 0000000..c704c32
--- /dev/null
+++ b/drivers/video/backlight/pm8941-wled.c
@@ -0,0 +1,427 @@
+/* Copyright (c) 2015, Sony Mobile Communications, AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define PM8941_WLED_REG_VAL_BASE		0x40
+#define  PM8941_WLED_REG_VAL_MAX		0xFFF
+
+#define PM8941_WLED_REG_MOD_EN			0x46
+#define  PM8941_WLED_REG_MOD_EN_BIT		BIT(7)
+#define  PM8941_WLED_REG_MOD_EN_MASK		BIT(7)
+
+#define PM8941_WLED_REG_SYNC			0x47
+#define  PM8941_WLED_REG_SYNC_MASK		0x07
+#define  PM8941_WLED_REG_SYNC_LED1		BIT(0)
+#define  PM8941_WLED_REG_SYNC_LED2		BIT(1)
+#define  PM8941_WLED_REG_SYNC_LED3		BIT(2)
+#define  PM8941_WLED_REG_SYNC_ALL		0x07
+#define  PM8941_WLED_REG_SYNC_CLEAR		0x00
+
+#define PM8941_WLED_REG_FREQ			0x4c
+#define  PM8941_WLED_REG_FREQ_MASK		0x0f
+
+#define PM8941_WLED_REG_OVP			0x4d
+#define  PM8941_WLED_REG_OVP_MASK		0x03
+
+#define PM8941_WLED_REG_BOOST			0x4e
+#define  PM8941_WLED_REG_BOOST_MASK		0x07
+
+#define PM8941_WLED_REG_SINK			0x4f
+#define  PM8941_WLED_REG_SINK_MASK		0xe0
+#define  PM8941_WLED_REG_SINK_SHFT		0x05
+
+/* Per-'string' registers below */
+#define PM8941_WLED_REG_STR_OFFSET		0x10
+
+#define PM8941_WLED_REG_STR_MOD_EN_BASE		0x60
+#define  PM8941_WLED_REG_STR_MOD_MASK		BIT(7)
+#define  PM8941_WLED_REG_STR_MOD_EN		BIT(7)
+
+#define PM8941_WLED_REG_STR_SCALE_BASE		0x62
+#define  PM8941_WLED_REG_STR_SCALE_MASK		0x1f
+
+#define PM8941_WLED_REG_STR_MOD_SRC_BASE	0x63
+#define  PM8941_WLED_REG_STR_MOD_SRC_MASK	0x01
+#define  PM8941_WLED_REG_STR_MOD_SRC_INT	0x00
+#define  PM8941_WLED_REG_STR_MOD_SRC_EXT	0x01
+
+#define PM8941_WLED_REG_STR_CABC_BASE		0x66
+#define  PM8941_WLED_REG_STR_CABC_MASK		BIT(7)
+#define  PM8941_WLED_REG_STR_CABC_EN		BIT(7)
+
+struct pm8941_wled_config {
+	u32 i_boost_limit;
+	u32 ovp;
+	u32 switch_freq;
+	u32 num_strings;
+	u32 i_limit;
+	bool cs_out_en;
+	bool ext_gen;
+	bool cabc_en;
+};
+
+struct pm8941_wled {
+	const char *name;
+	struct regmap *regmap;
+	u16 addr;
+
+	struct pm8941_wled_config cfg;
+};
+
+static int pm8941_wled_update_status(struct backlight_device *bl)
+{
+	struct pm8941_wled *wled = bl_get_data(bl);
+	u16 val = bl->props.brightness;
+	u8 ctrl = 0;
+	int rc;
+	int i;
+
+	if (bl->props.power != FB_BLANK_UNBLANK ||
+	    bl->props.fb_blank != FB_BLANK_UNBLANK ||
+	    bl->props.state & BL_CORE_FBBLANK)
+		val = 0;
+
+	if (val != 0)
+		ctrl = PM8941_WLED_REG_MOD_EN_BIT;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_MOD_EN,
+			PM8941_WLED_REG_MOD_EN_MASK, ctrl);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < wled->cfg.num_strings; ++i) {
+		u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
+
+		rc = regmap_bulk_write(wled->regmap,
+				wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
+				v, 2);
+		if (rc)
+			return rc;
+	}
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_SYNC,
+			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_SYNC,
+			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
+	return rc;
+}
+
+static int pm8941_wled_setup(struct pm8941_wled *wled)
+{
+	int rc;
+	int i;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_OVP,
+			PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_BOOST,
+			PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_FREQ,
+			PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
+	if (rc)
+		return rc;
+
+	if (wled->cfg.cs_out_en) {
+		u8 all = (BIT(wled->cfg.num_strings) - 1)
+				<< PM8941_WLED_REG_SINK_SHFT;
+
+		rc = regmap_update_bits(wled->regmap,
+				wled->addr + PM8941_WLED_REG_SINK,
+				PM8941_WLED_REG_SINK_MASK, all);
+		if (rc)
+			return rc;
+	}
+
+	for (i = 0; i < wled->cfg.num_strings; ++i) {
+		u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
+				PM8941_WLED_REG_STR_MOD_MASK,
+				PM8941_WLED_REG_STR_MOD_EN);
+		if (rc)
+			return rc;
+
+		if (wled->cfg.ext_gen) {
+			rc = regmap_update_bits(wled->regmap,
+					addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
+					PM8941_WLED_REG_STR_MOD_SRC_MASK,
+					PM8941_WLED_REG_STR_MOD_SRC_EXT);
+			if (rc)
+				return rc;
+		}
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_SCALE_BASE,
+				PM8941_WLED_REG_STR_SCALE_MASK,
+				wled->cfg.i_limit);
+		if (rc)
+			return rc;
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_CABC_BASE,
+				PM8941_WLED_REG_STR_CABC_MASK,
+				wled->cfg.cabc_en ?
+					PM8941_WLED_REG_STR_CABC_EN : 0);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static const struct pm8941_wled_config pm8941_wled_config_defaults = {
+	.i_boost_limit = 3,
+	.i_limit = 20,
+	.ovp = 2,
+	.switch_freq = 5,
+	.num_strings = 0,
+	.cs_out_en = false,
+	.ext_gen = false,
+	.cabc_en = false,
+};
+
+struct pm8941_wled_var_cfg {
+	const u32 *values;
+	u32 (*fn)(u32);
+	int size;
+};
+
+static const u32 pm8941_wled_i_boost_limit_values[] = {
+	105, 385, 525, 805, 980, 1260, 1400, 1680,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
+	.values = pm8941_wled_i_boost_limit_values,
+	.size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
+};
+
+static const u32 pm8941_wled_ovp_values[] = {
+	35, 32, 29, 27,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
+	.values = pm8941_wled_ovp_values,
+	.size = ARRAY_SIZE(pm8941_wled_ovp_values),
+};
+
+static u32 pm8941_wled_num_strings_values_fn(u32 idx)
+{
+	return idx + 1;
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
+	.fn = pm8941_wled_num_strings_values_fn,
+	.size = 3,
+};
+
+static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
+{
+	return 19200 / (2 * (1 + idx));
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
+	.fn = pm8941_wled_switch_freq_values_fn,
+	.size = 16,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
+	.size = 26,
+};
+
+static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
+{
+	if (idx >= cfg->size)
+		return UINT_MAX;
+	if (cfg->fn)
+		return cfg->fn(idx);
+	if (cfg->values)
+		return cfg->values[idx];
+	return idx;
+}
+
+static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
+{
+	struct pm8941_wled_config *cfg = &wled->cfg;
+	u32 val;
+	int rc;
+	u32 c;
+	int i;
+	int j;
+
+	const struct {
+		const char *name;
+		u32 *val_ptr;
+		const struct pm8941_wled_var_cfg *cfg;
+	} u32_opts[] = {
+		{
+			"qcom,current-boost-limit",
+			&cfg->i_boost_limit,
+			.cfg = &pm8941_wled_i_boost_limit_cfg,
+		},
+		{
+			"qcom,current-limit",
+			&cfg->i_limit,
+			.cfg = &pm8941_wled_i_limit_cfg,
+		},
+		{
+			"qcom,ovp",
+			&cfg->ovp,
+			.cfg = &pm8941_wled_ovp_cfg,
+		},
+		{
+			"qcom,switching-freq",
+			&cfg->switch_freq,
+			.cfg = &pm8941_wled_switch_freq_cfg,
+		},
+		{
+			"qcom,num-strings",
+			&cfg->num_strings,
+			.cfg = &pm8941_wled_num_strings_cfg,
+		},
+	};
+	const struct {
+		const char *name;
+		bool *val_ptr;
+	} bool_opts[] = {
+		{ "qcom,cs-out", &cfg->cs_out_en, },
+		{ "qcom,ext-gen", &cfg->ext_gen, },
+		{ "qcom,cabc", &cfg->cabc_en, },
+	};
+
+	rc = of_property_read_u32(dev->of_node, "reg", &val);
+	if (rc || val > 0xffff) {
+		dev_err(dev, "invalid IO resources\n");
+		return rc ? rc : -EINVAL;
+	}
+	wled->addr = val;
+
+	rc = of_property_read_string(dev->of_node, "label", &wled->name);
+	if (rc)
+		wled->name = dev->of_node->name;
+
+	*cfg = pm8941_wled_config_defaults;
+	for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
+		rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
+		if (rc == -EINVAL) {
+			continue;
+		} else if (rc) {
+			dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
+			return rc;
+		}
+
+		c = UINT_MAX;
+		for (j = 0; c != val; j++) {
+			c = pm8941_wled_values(u32_opts[i].cfg, j);
+			if (c == UINT_MAX) {
+				dev_err(dev, "invalid value for '%s'\n",
+					u32_opts[i].name);
+				return -EINVAL;
+			}
+		}
+
+		dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
+		*u32_opts[i].val_ptr = j;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
+		if (of_property_read_bool(dev->of_node, bool_opts[i].name))
+			*bool_opts[i].val_ptr = true;
+	}
+
+	cfg->num_strings = cfg->num_strings + 1;
+
+	return 0;
+}
+
+static const struct backlight_ops pm8941_wled_ops = {
+	.update_status = pm8941_wled_update_status,
+};
+
+static int pm8941_wled_probe(struct platform_device *pdev)
+{
+	struct backlight_properties props;
+	struct backlight_device *bl;
+	struct pm8941_wled *wled;
+	struct regmap *regmap;
+	int rc;
+
+	regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!regmap) {
+		dev_err(&pdev->dev, "Unable to get regmap\n");
+		return -EINVAL;
+	}
+
+	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
+	if (!wled)
+		return -ENOMEM;
+
+	wled->regmap = regmap;
+
+	rc = pm8941_wled_configure(wled, &pdev->dev);
+	if (rc)
+		return rc;
+
+	rc = pm8941_wled_setup(wled);
+	if (rc)
+		return rc;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = PM8941_WLED_REG_VAL_MAX;
+	bl = devm_backlight_device_register(&pdev->dev, wled->name,
+					    &pdev->dev, wled,
+					    &pm8941_wled_ops, &props);
+	if (IS_ERR(bl))
+		return PTR_ERR(bl);
+
+	return 0;
+};
+
+static const struct of_device_id pm8941_wled_match_table[] = {
+	{ .compatible = "qcom,pm8941-wled" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
+
+static struct platform_driver pm8941_wled_driver = {
+	.probe = pm8941_wled_probe,
+	.driver	= {
+		.name = "pm8941-wled",
+		.of_match_table	= pm8941_wled_match_table,
+	},
+};
+
+module_platform_driver(pm8941_wled_driver);
+
+MODULE_DESCRIPTION("pm8941 wled driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 052fa1b..d414c7a 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -65,7 +65,7 @@
 
 	if (brightness > 0) {
 		ret = regmap_write(regmap, SKY81452_REG0, brightness - 1);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			return ret;
 
 		return regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
@@ -87,12 +87,12 @@
 	int ret;
 
 	ret = kstrtoul(buf, 16, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	ret = regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
 					value << CTZ(SKY81452_EN));
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	return count;
@@ -108,7 +108,7 @@
 
 	reg = !strcmp(attr->attr.name, "open") ? SKY81452_REG5 : SKY81452_REG4;
 	ret = regmap_read(regmap, reg, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	if (value & SKY81452_SHRT) {
@@ -136,7 +136,7 @@
 	int ret;
 
 	ret = regmap_read(regmap, SKY81452_REG4, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	*buf = 0;
@@ -196,7 +196,7 @@
 	pdata->gpio_enable = of_get_gpio(np, 0);
 
 	ret = of_property_count_u32_elems(np, "led-sources");
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pdata->enable = SKY81452_EN >> CTZ(SKY81452_EN);
 	} else {
 		num_entry = ret;
@@ -205,7 +205,7 @@
 
 		ret = of_property_read_u32_array(np, "led-sources", sources,
 					num_entry);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(dev, "led-sources node is invalid.\n");
 			return ERR_PTR(-EINVAL);
 		}
@@ -218,12 +218,12 @@
 	ret = of_property_read_u32(np,
 			"skyworks,short-detection-threshold-volt",
 			&pdata->short_detection_threshold);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pdata->short_detection_threshold = 7;
 
 	ret = of_property_read_u32(np, "skyworks,current-limit-mA",
 			&pdata->boost_current_limit);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pdata->boost_current_limit = 2750;
 
 	of_node_put(np);
@@ -278,14 +278,14 @@
 	if (gpio_is_valid(pdata->gpio_enable)) {
 		ret = devm_gpio_request_one(dev, pdata->gpio_enable,
 					GPIOF_OUT_INIT_HIGH, "sky81452-en");
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(dev, "failed to request GPIO. err=%d\n", ret);
 			return ret;
 		}
 	}
 
 	ret = sky81452_bl_init_device(regmap, pdata);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "failed to initialize. err=%d\n", ret);
 		return ret;
 	}
@@ -302,8 +302,8 @@
 
 	platform_set_drvdata(pdev, bd);
 
-	ret  = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
-	if (IS_ERR_VALUE(ret)) {
+	ret = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
+	if (ret < 0) {
 		dev_err(dev, "failed to create attribute. err=%d\n", ret);
 		return ret;
 	}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 3ad6765..83742d8 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -158,6 +158,7 @@
 	{ "tosa-bl", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, tosa_bl_id);
 
 static struct i2c_driver tosa_bl_driver = {
 	.driver = {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index ba97efc..38da6e2 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -9,7 +9,7 @@
 	depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
 		!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
 		(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
-		!ARM64
+		!ARM64 && !ARC && !MICROBLAZE
 	default y
 	help
 	  Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 811acfc..8b1d371 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2464,7 +2464,7 @@
 	tristate "Solomon SSD1307 framebuffer support"
 	depends on FB && I2C
 	depends on OF
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select FB_SYS_FOPS
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index abadc49..19eb42b 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -19,7 +19,6 @@
 #include <linux/backlight.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
-#include <linux/platform_data/atmel.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
@@ -999,7 +998,7 @@
 	[ATMEL_LCDC_WIRING_RGB]	= "RGB",
 };
 
-const int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
+static int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
 {
 	const char *mode;
 	int err, i;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index d787533..47c3191 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1072,9 +1072,9 @@
 
 	for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
 		int idx = svd[i - specs->modedb_len - num];
-		if (!idx || idx > 63) {
+		if (!idx || idx >= ARRAY_SIZE(cea_modes)) {
 			pr_warning("Reserved SVD code %d\n", idx);
-		} else if (idx > ARRAY_SIZE(cea_modes) || !cea_modes[idx].xres) {
+		} else if (!cea_modes[idx].xres) {
 			pr_warning("Unimplemented SVD code %d\n", idx);
 		} else {
 			memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 60c3f0a..15755ce 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -485,7 +485,7 @@
 
 	mutex_lock(&fb_info->bl_curve_mutex);
 	for (i = 0; i < FB_BACKLIGHT_LEVELS; i += 8)
-		len += snprintf(&buf[len], PAGE_SIZE, "%8ph\n",
+		len += scnprintf(&buf[len], PAGE_SIZE - len, "%8ph\n",
 				fb_info->bl_curve + i);
 	mutex_unlock(&fb_info->bl_curve_mutex);
 
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 7d07cf8..2510fa72 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -289,7 +289,7 @@
 };
 
 #ifdef CONFIG_FB_MODE_HELPERS
-const struct fb_videomode cea_modes[64] = {
+const struct fb_videomode cea_modes[65] = {
 	/* #1: 640x480p@59.94/60Hz */
 	[1] = {
 		NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index de98196..c9293ae 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -325,7 +325,6 @@
 		dev_err(&pdev->dev, "I/O resource request failed\n");
 		return -ENXIO;
 	}
-	res->flags &= ~IORESOURCE_CACHEABLE;
 	fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(fbdev->regs))
 		return PTR_ERR(fbdev->regs);
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c b/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
index a14d993..8c246c2 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
@@ -266,7 +266,6 @@
 	.remove	= __exit_p(opa362_remove),
 	.driver	= {
 		.name	= "amplifier-opa362",
-		.owner	= THIS_MODULE,
 		.of_match_table = opa362_of_match,
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index e209b03..efb57c0 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -615,7 +615,7 @@
 		return -EINVAL;
 	}
 
-	clk = clk_get(&pdev->dev, "LCDCLK");
+	clk = devm_clk_get(&pdev->dev, "LCDCLK");
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "unable to get LCDCLK");
 		return PTR_ERR(clk);
@@ -624,21 +624,18 @@
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL) {
 		dev_err(&pdev->dev, "no IO memory defined\n");
-		ret = -ENOENT;
-		goto failed_put_clk;
+		return -ENOENT;
 	}
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		dev_err(&pdev->dev, "no IRQ defined\n");
-		ret = -ENOENT;
-		goto failed_put_clk;
+		return -ENOENT;
 	}
 
 	info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
 	if (info == NULL) {
-		ret = -ENOMEM;
-		goto failed_put_clk;
+		return -ENOMEM;
 	}
 
 	/* Initialize private data */
@@ -776,8 +773,6 @@
 			info->screen_base, fbi->fb_start_dma);
 failed_free_info:
 	kfree(info);
-failed_put_clk:
-	clk_put(clk);
 
 	dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
 	return ret;
@@ -813,7 +808,6 @@
 				info->screen_base, info->fix.smem_start);
 
 	clk_disable(fbi->clk);
-	clk_put(fbi->clk);
 
 	framebuffer_release(info);
 
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 83433cb..96aa46d 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -32,8 +32,7 @@
 #include <linux/spinlock_types.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <video/s1d13xxxfb.h>
 
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 7e3a05f..f72dd12 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1938,7 +1938,7 @@
 	},
 };
 
-static struct platform_device_id s3c_fb_driver_ids[] = {
+static const struct platform_device_id s3c_fb_driver_ids[] = {
 	{
 		.name		= "s3c-fb",
 		.driver_data	= (unsigned long)&s3c_fb_data_64xx,
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 3e153c0..93f4c90 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -656,8 +656,9 @@
 	bl = backlight_device_register(bl_name, &client->dev, par,
 				       &ssd1307fb_bl_ops, NULL);
 	if (IS_ERR(bl)) {
-		dev_err(&client->dev, "unable to register backlight device: %ld\n",
-			PTR_ERR(bl));
+		ret = PTR_ERR(bl);
+		dev_err(&client->dev, "unable to register backlight device: %d\n",
+			ret);
 		goto bl_init_error;
 	}
 
@@ -719,7 +720,6 @@
 	.driver = {
 		.name = "ssd1307fb",
 		.of_match_table = ssd1307fb_of_match,
-		.owner = THIS_MODULE,
 	},
 };
 
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 735355b..7df4228 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -64,6 +64,7 @@
 #include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/io.h>
 
 #include <asm/grfioctl.h>	/* for HP-UX compatibility */
 #include <asm/uaccess.h>
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index ff2b873..e9c2f7b 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -279,7 +279,7 @@
 {
 	char *buf;
 	char *wrptr;
-	int retval = 0;
+	int retval;
 	int writesize;
 	struct urb *urb;
 
@@ -1505,8 +1505,7 @@
 	char *desc;
 	char *buf;
 	char *desc_end;
-
-	int total_len = 0;
+	int total_len;
 
 	buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
 	if (!buf)
@@ -1582,7 +1581,7 @@
 			const struct usb_device_id *id)
 {
 	struct usb_device *usbdev;
-	struct dlfb_data *dev = NULL;
+	struct dlfb_data *dev;
 	int retval = -ENOMEM;
 
 	/* usb initialization */
@@ -1665,7 +1664,6 @@
 	/* allocates framebuffer driver structure, not framebuffer memory */
 	info = framebuffer_alloc(0, dev->gdev);
 	if (!info) {
-		retval = -ENOMEM;
 		pr_err("framebuffer_alloc failed\n");
 		goto error;
 	}
@@ -1912,7 +1910,7 @@
 
 static struct urb *dlfb_get_urb(struct dlfb_data *dev)
 {
-	int ret = 0;
+	int ret;
 	struct list_head *entry;
 	struct urb_node *unode;
 	struct urb *urb = NULL;
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 70a897b..b9c2f81 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -51,7 +51,14 @@
 	if (!mem)
 		return NULL;
 
-	memset(mem, 0, size); /* Clear the ram out, no junk to the user */
+	/*
+	 * VFB must clear memory to prevent kernel info
+	 * leakage into userspace
+	 * VGA-based drivers MUST NOT clear memory if
+	 * they want to be able to take over vgacon
+	 */
+
+	memset(mem, 0, size);
 	adr = (unsigned long) mem;
 	while (size > 0) {
 		SetPageReserved(vmalloc_to_page((void *)adr));
@@ -490,14 +497,6 @@
 	if (!(videomemory = rvmalloc(videomemorysize)))
 		return retval;
 
-	/*
-	 * VFB must clear memory to prevent kernel info
-	 * leakage into userspace
-	 * VGA-based drivers MUST NOT clear memory if
-	 * they want to be able to take over vgacon
-	 */
-	memset(videomemory, 0, videomemorysize);
-
 	info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
 	if (!info)
 		goto err;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 241fafd..55c4b5b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -797,7 +797,8 @@
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
 	select WATCHDOG_CORE
-	select LPC_ICH
+	select LPC_ICH if !EXPERT
+	select I2C_I801 if !EXPERT
 	---help---
 	  Hardware driver for the intel TCO timer based watchdog devices.
 	  These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 3c3fd41..0acc6c5 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,8 +66,7 @@
 #include <linux/spinlock.h>		/* For spin_lock/spin_unlock/... */
 #include <linux/uaccess.h>		/* For copy_to_user/put_user/... */
 #include <linux/io.h>			/* For inb/outb/... */
-#include <linux/mfd/core.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #include "iTCO_vendor.h"
 
@@ -146,59 +145,67 @@
 	return iTCO_wdt_private.iTCO_version == 3 ? ticks : (ticks * 6) / 10;
 }
 
+static inline u32 no_reboot_bit(void)
+{
+	u32 enable_bit;
+
+	switch (iTCO_wdt_private.iTCO_version) {
+	case 3:
+		enable_bit = 0x00000010;
+		break;
+	case 2:
+		enable_bit = 0x00000020;
+		break;
+	case 4:
+	case 1:
+	default:
+		enable_bit = 0x00000002;
+		break;
+	}
+
+	return enable_bit;
+}
+
 static void iTCO_wdt_set_NO_REBOOT_bit(void)
 {
 	u32 val32;
 
 	/* Set the NO_REBOOT bit: this disables reboots */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	if (iTCO_wdt_private.iTCO_version >= 2) {
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 |= 0x00000010;
-		writel(val32, iTCO_wdt_private.gcs_pmc);
-	} else if (iTCO_wdt_private.iTCO_version == 2) {
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 |= 0x00000020;
+		val32 |= no_reboot_bit();
 		writel(val32, iTCO_wdt_private.gcs_pmc);
 	} else if (iTCO_wdt_private.iTCO_version == 1) {
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		val32 |= 0x00000002;
+		val32 |= no_reboot_bit();
 		pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
 	}
 }
 
 static int iTCO_wdt_unset_NO_REBOOT_bit(void)
 {
-	int ret = 0;
-	u32 val32;
+	u32 enable_bit = no_reboot_bit();
+	u32 val32 = 0;
 
 	/* Unset the NO_REBOOT bit: this enables reboots */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	if (iTCO_wdt_private.iTCO_version >= 2) {
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 &= 0xffffffef;
+		val32 &= ~enable_bit;
 		writel(val32, iTCO_wdt_private.gcs_pmc);
 
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		if (val32 & 0x00000010)
-			ret = -EIO;
-	} else if (iTCO_wdt_private.iTCO_version == 2) {
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 &= 0xffffffdf;
-		writel(val32, iTCO_wdt_private.gcs_pmc);
-
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		if (val32 & 0x00000020)
-			ret = -EIO;
 	} else if (iTCO_wdt_private.iTCO_version == 1) {
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		val32 &= 0xfffffffd;
+		val32 &= ~enable_bit;
 		pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
 
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		if (val32 & 0x00000002)
-			ret = -EIO;
 	}
 
-	return ret; /* returns: 0 = OK, -EIO = Error */
+	if (val32 & enable_bit)
+		return -EIO;
+
+	return 0;
 }
 
 static int iTCO_wdt_start(struct watchdog_device *wd_dev)
@@ -418,9 +425,9 @@
 {
 	int ret = -ENODEV;
 	unsigned long val32;
-	struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev);
+	struct itco_wdt_platform_data *pdata = dev_get_platdata(&dev->dev);
 
-	if (!ich_info)
+	if (!pdata)
 		goto out;
 
 	spin_lock_init(&iTCO_wdt_private.io_lock);
@@ -435,7 +442,7 @@
 	if (!iTCO_wdt_private.smi_res)
 		goto out;
 
-	iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+	iTCO_wdt_private.iTCO_version = pdata->version;
 	iTCO_wdt_private.dev = dev;
 	iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
 
@@ -501,15 +508,24 @@
 	}
 
 	pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
-		ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
+		pdata->name, pdata->version, (u64)TCOBASE);
 
 	/* Clear out the (probably old) status */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	switch (iTCO_wdt_private.iTCO_version) {
+	case 4:
+		outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
+		outw(0x0002, TCO2_STS);	/* Clear SECOND_TO_STS bit */
+		break;
+	case 3:
 		outl(0x20008, TCO1_STS);
-	} else {
+		break;
+	case 2:
+	case 1:
+	default:
 		outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
 		outw(0x0002, TCO2_STS);	/* Clear SECOND_TO_STS bit */
 		outw(0x0004, TCO2_STS);	/* Clear BOOT_STS bit */
+		break;
 	}
 
 	iTCO_wdt_watchdog_dev.bootstatus = 0;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 7cd226d..73708ac 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -280,4 +280,15 @@
 	def_bool y
 	depends on X86 && ACPI
 
+config XEN_SYMS
+       bool "Xen symbols"
+       depends on X86 && XEN_DOM0 && XENFS
+       default y if KALLSYMS
+       help
+          Exports hypervisor symbols (along with their types and addresses) via
+          /proc/xen/xensyms file, similar to /proc/kallsyms
+
+config XEN_HAVE_VPMU
+       bool
+
 endmenu
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index bf4a23c..1fa633b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -638,9 +638,9 @@
 	 * regions (see arch/x86/xen/setup.c).
 	 */
 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
-		if (xen_extra_mem[i].size)
-			balloon_add_region(PFN_UP(xen_extra_mem[i].start),
-					   PFN_DOWN(xen_extra_mem[i].size));
+		if (xen_extra_mem[i].n_pfns)
+			balloon_add_region(xen_extra_mem[i].start_pfn,
+					   xen_extra_mem[i].n_pfns);
 
 	return 0;
 }
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index ed8bf10..68d1290 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1301,11 +1301,7 @@
 	if (!VALID_EVTCHN(evtchn))
 		return -1;
 
-	/*
-	 * Events delivered via platform PCI interrupts are always
-	 * routed to vcpu 0 and hence cannot be rebound.
-	 */
-	if (xen_hvm_domain() && !xen_have_vector_callback)
+	if (!xen_support_evtchn_rebind())
 		return -1;
 
 	/* Send future instances of this interrupt to other vcpu. */
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 96453f8..b5a7342 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -20,6 +20,9 @@
 #include <xen/xenbus.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/version.h>
+#ifdef CONFIG_XEN_HAVE_VPMU
+#include <xen/interface/xenpmu.h>
+#endif
 
 #define HYPERVISOR_ATTR_RO(_name) \
 static struct hyp_sysfs_attr  _name##_attr = __ATTR_RO(_name)
@@ -368,6 +371,126 @@
 	sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
 }
 
+#ifdef CONFIG_XEN_HAVE_VPMU
+struct pmu_mode {
+	const char *name;
+	uint32_t mode;
+};
+
+static struct pmu_mode pmu_modes[] = {
+	{"off", XENPMU_MODE_OFF},
+	{"self", XENPMU_MODE_SELF},
+	{"hv", XENPMU_MODE_HV},
+	{"all", XENPMU_MODE_ALL}
+};
+
+static ssize_t pmu_mode_store(struct hyp_sysfs_attr *attr,
+			      const char *buffer, size_t len)
+{
+	int ret;
+	struct xen_pmu_params xp;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pmu_modes); i++) {
+		if (strncmp(buffer, pmu_modes[i].name, len - 1) == 0) {
+			xp.val = pmu_modes[i].mode;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(pmu_modes))
+		return -EINVAL;
+
+	xp.version.maj = XENPMU_VER_MAJ;
+	xp.version.min = XENPMU_VER_MIN;
+	ret = HYPERVISOR_xenpmu_op(XENPMU_mode_set, &xp);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t pmu_mode_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+	int ret;
+	struct xen_pmu_params xp;
+	int i;
+	uint32_t mode;
+
+	xp.version.maj = XENPMU_VER_MAJ;
+	xp.version.min = XENPMU_VER_MIN;
+	ret = HYPERVISOR_xenpmu_op(XENPMU_mode_get, &xp);
+	if (ret)
+		return ret;
+
+	mode = (uint32_t)xp.val;
+	for (i = 0; i < ARRAY_SIZE(pmu_modes); i++) {
+		if (mode == pmu_modes[i].mode)
+			return sprintf(buffer, "%s\n", pmu_modes[i].name);
+	}
+
+	return -EINVAL;
+}
+HYPERVISOR_ATTR_RW(pmu_mode);
+
+static ssize_t pmu_features_store(struct hyp_sysfs_attr *attr,
+				  const char *buffer, size_t len)
+{
+	int ret;
+	uint32_t features;
+	struct xen_pmu_params xp;
+
+	ret = kstrtou32(buffer, 0, &features);
+	if (ret)
+		return ret;
+
+	xp.val = features;
+	xp.version.maj = XENPMU_VER_MAJ;
+	xp.version.min = XENPMU_VER_MIN;
+	ret = HYPERVISOR_xenpmu_op(XENPMU_feature_set, &xp);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+static ssize_t pmu_features_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+	int ret;
+	struct xen_pmu_params xp;
+
+	xp.version.maj = XENPMU_VER_MAJ;
+	xp.version.min = XENPMU_VER_MIN;
+	ret = HYPERVISOR_xenpmu_op(XENPMU_feature_get, &xp);
+	if (ret)
+		return ret;
+
+	return sprintf(buffer, "0x%x\n", (uint32_t)xp.val);
+}
+HYPERVISOR_ATTR_RW(pmu_features);
+
+static struct attribute *xen_pmu_attrs[] = {
+	&pmu_mode_attr.attr,
+	&pmu_features_attr.attr,
+	NULL
+};
+
+static const struct attribute_group xen_pmu_group = {
+	.name = "pmu",
+	.attrs = xen_pmu_attrs,
+};
+
+static int __init xen_pmu_init(void)
+{
+	return sysfs_create_group(hypervisor_kobj, &xen_pmu_group);
+}
+
+static void xen_pmu_destroy(void)
+{
+	sysfs_remove_group(hypervisor_kobj, &xen_pmu_group);
+}
+#endif
+
 static int __init hyper_sysfs_init(void)
 {
 	int ret;
@@ -390,7 +513,15 @@
 	ret = xen_properties_init();
 	if (ret)
 		goto prop_out;
-
+#ifdef CONFIG_XEN_HAVE_VPMU
+	if (xen_initial_domain()) {
+		ret = xen_pmu_init();
+		if (ret) {
+			xen_properties_destroy();
+			goto prop_out;
+		}
+	}
+#endif
 	goto out;
 
 prop_out:
@@ -407,6 +538,9 @@
 
 static void __exit hyper_sysfs_exit(void)
 {
+#ifdef CONFIG_XEN_HAVE_VPMU
+	xen_pmu_destroy();
+#endif
 	xen_properties_destroy();
 	xen_compilation_destroy();
 	xen_sysfs_uuid_destroy();
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
index b019865..1a83010 100644
--- a/drivers/xen/xenfs/Makefile
+++ b/drivers/xen/xenfs/Makefile
@@ -2,3 +2,4 @@
 
 xenfs-y			  = super.o
 xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
+xenfs-$(CONFIG_XEN_SYMS) += xensyms.o
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 06092e0..8559a71 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -57,6 +57,9 @@
 		{ "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
 		{ "xsd_kva", &xsd_kva_file_ops, S_IRUSR|S_IWUSR},
 		{ "xsd_port", &xsd_port_file_ops, S_IRUSR|S_IWUSR},
+#ifdef CONFIG_XEN_SYMS
+		{ "xensyms", &xensyms_ops, S_IRUSR},
+#endif
 		{""},
 	};
 
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index 6b80c77..2c5934e 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -3,5 +3,6 @@
 
 extern const struct file_operations xsd_kva_file_ops;
 extern const struct file_operations xsd_port_file_ops;
+extern const struct file_operations xensyms_ops;
 
 #endif	/* _XENFS_XENBUS_H */
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
new file mode 100644
index 0000000..f8b1285
--- /dev/null
+++ b/drivers/xen/xenfs/xensyms.c
@@ -0,0 +1,152 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+#include <xen/xen-ops.h>
+#include "xenfs.h"
+
+
+#define XEN_KSYM_NAME_LEN 127 /* Hypervisor may have different name length */
+
+struct xensyms {
+	struct xen_platform_op op;
+	char *name;
+	uint32_t namelen;
+};
+
+/* Grab next output page from the hypervisor */
+static int xensyms_next_sym(struct xensyms *xs)
+{
+	int ret;
+	struct xenpf_symdata *symdata = &xs->op.u.symdata;
+	uint64_t symnum;
+
+	memset(xs->name, 0, xs->namelen);
+	symdata->namelen = xs->namelen;
+
+	symnum = symdata->symnum;
+
+	ret = HYPERVISOR_dom0_op(&xs->op);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * If hypervisor's symbol didn't fit into the buffer then allocate
+	 * a larger buffer and try again.
+	 */
+	if (unlikely(symdata->namelen > xs->namelen)) {
+		kfree(xs->name);
+
+		xs->namelen = symdata->namelen;
+		xs->name = kzalloc(xs->namelen, GFP_KERNEL);
+		if (!xs->name)
+			return -ENOMEM;
+
+		set_xen_guest_handle(symdata->name, xs->name);
+		symdata->symnum--; /* Rewind */
+
+		ret = HYPERVISOR_dom0_op(&xs->op);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (symdata->symnum == symnum)
+		/* End of symbols */
+		return 1;
+
+	return 0;
+}
+
+static void *xensyms_start(struct seq_file *m, loff_t *pos)
+{
+	struct xensyms *xs = (struct xensyms *)m->private;
+
+	xs->op.u.symdata.symnum = *pos;
+
+	if (xensyms_next_sym(xs))
+		return NULL;
+
+	return m->private;
+}
+
+static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
+{
+	struct xensyms *xs = (struct xensyms *)m->private;
+
+	xs->op.u.symdata.symnum = ++(*pos);
+
+	if (xensyms_next_sym(xs))
+		return NULL;
+
+	return p;
+}
+
+static int xensyms_show(struct seq_file *m, void *p)
+{
+	struct xensyms *xs = (struct xensyms *)m->private;
+	struct xenpf_symdata *symdata = &xs->op.u.symdata;
+
+	seq_printf(m, "%016llx %c %s\n", symdata->address,
+		   symdata->type, xs->name);
+
+	return 0;
+}
+
+static void xensyms_stop(struct seq_file *m, void *p)
+{
+}
+
+static const struct seq_operations xensyms_seq_ops = {
+	.start = xensyms_start,
+	.next = xensyms_next,
+	.show = xensyms_show,
+	.stop = xensyms_stop,
+};
+
+static int xensyms_open(struct inode *inode, struct file *file)
+{
+	struct seq_file *m;
+	struct xensyms *xs;
+	int ret;
+
+	ret = seq_open_private(file, &xensyms_seq_ops,
+			       sizeof(struct xensyms));
+	if (ret)
+		return ret;
+
+	m = file->private_data;
+	xs = (struct xensyms *)m->private;
+
+	xs->namelen = XEN_KSYM_NAME_LEN + 1;
+	xs->name = kzalloc(xs->namelen, GFP_KERNEL);
+	if (!xs->name) {
+		seq_release_private(inode, file);
+		return -ENOMEM;
+	}
+	set_xen_guest_handle(xs->op.u.symdata.name, xs->name);
+	xs->op.cmd = XENPF_get_symbol;
+	xs->op.u.symdata.namelen = xs->namelen;
+
+	return 0;
+}
+
+static int xensyms_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *m = file->private_data;
+	struct xensyms *xs = (struct xensyms *)m->private;
+
+	kfree(xs->name);
+	return seq_release_private(inode, file);
+}
+
+const struct file_operations xensyms_ops = {
+	.open = xensyms_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = xensyms_release
+};
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 8aa56bb..6caca02 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -52,7 +52,7 @@
 	/* Options that take integer arguments */
 	Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid,
 	/* String options */
-	Opt_uname, Opt_remotename, Opt_trans, Opt_cache, Opt_cachetag,
+	Opt_uname, Opt_remotename, Opt_cache, Opt_cachetag,
 	/* Options that take no arguments */
 	Opt_nodevmap,
 	/* Cache options */
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 1ef16bd..3abc447 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -381,7 +381,7 @@
 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
 	struct p9_fid *fid = iocb->ki_filp->private_data;
-	int ret, err;
+	int ret, err = 0;
 
 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
 		 iov_iter_count(to), iocb->ki_pos);
diff --git a/fs/Kconfig b/fs/Kconfig
index 011f433..da3f32f 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -11,18 +11,15 @@
 if BLOCK
 
 source "fs/ext2/Kconfig"
-source "fs/ext3/Kconfig"
 source "fs/ext4/Kconfig"
-source "fs/jbd/Kconfig"
 source "fs/jbd2/Kconfig"
 
 config FS_MBCACHE
 # Meta block cache for Extended Attributes (ext2/ext3/ext4)
 	tristate
 	default y if EXT2_FS=y && EXT2_FS_XATTR
-	default y if EXT3_FS=y && EXT3_FS_XATTR
 	default y if EXT4_FS=y
-	default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS
+	default m if EXT2_FS_XATTR || EXT4_FS
 
 source "fs/reiserfs/Kconfig"
 source "fs/jfs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index cb20e4b..f79cf40 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_SIGNALFD)		+= signalfd.o
 obj-$(CONFIG_TIMERFD)		+= timerfd.o
 obj-$(CONFIG_EVENTFD)		+= eventfd.o
+obj-$(CONFIG_USERFAULTFD)	+= userfaultfd.o
 obj-$(CONFIG_AIO)               += aio.o
 obj-$(CONFIG_FS_DAX)		+= dax.o
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
@@ -62,12 +63,10 @@
 # Do not add any filesystems before this line
 obj-$(CONFIG_FSCACHE)		+= fscache/
 obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
-obj-$(CONFIG_EXT3_FS)		+= ext3/ # Before ext2 so root fs can be ext3
 obj-$(CONFIG_EXT2_FS)		+= ext2/
 # We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
 # unless explicitly requested by rootfstype
 obj-$(CONFIG_EXT4_FS)		+= ext4/
-obj-$(CONFIG_JBD)		+= jbd/
 obj-$(CONFIG_JBD2)		+= jbd2/
 obj-$(CONFIG_CRAMFS)		+= cramfs/
 obj-$(CONFIG_SQUASHFS)		+= squashfs/
diff --git a/fs/aio.c b/fs/aio.c
index 480440f..155f842 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -308,15 +308,9 @@
 	}
 }
 
-static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+static int aio_ring_mremap(struct vm_area_struct *vma)
 {
-	vma->vm_flags |= VM_DONTEXPAND;
-	vma->vm_ops = &generic_file_vm_ops;
-	return 0;
-}
-
-static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
-{
+	struct file *file = vma->vm_file;
 	struct mm_struct *mm = vma->vm_mm;
 	struct kioctx_table *table;
 	int i, res = -EINVAL;
@@ -342,9 +336,24 @@
 	return res;
 }
 
+static const struct vm_operations_struct aio_ring_vm_ops = {
+	.mremap		= aio_ring_mremap,
+#if IS_ENABLED(CONFIG_MMU)
+	.fault		= filemap_fault,
+	.map_pages	= filemap_map_pages,
+	.page_mkwrite	= filemap_page_mkwrite,
+#endif
+};
+
+static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	vma->vm_flags |= VM_DONTEXPAND;
+	vma->vm_ops = &aio_ring_vm_ops;
+	return 0;
+}
+
 static const struct file_operations aio_ring_fops = {
 	.mmap = aio_ring_mmap,
-	.mremap = aio_ring_remap,
 };
 
 #if IS_ENABLED(CONFIG_MIGRATION)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1982437..22ea424 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -28,6 +28,7 @@
 #include <linux/namei.h>
 #include <linux/log2.h>
 #include <linux/cleancache.h>
+#include <linux/dax.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -441,7 +442,7 @@
  * accessible at this address.
  */
 long bdev_direct_access(struct block_device *bdev, sector_t sector,
-			void **addr, unsigned long *pfn, long size)
+			void __pmem **addr, unsigned long *pfn, long size)
 {
 	long avail;
 	const struct block_device_operations *ops = bdev->bd_disk->fops;
@@ -462,7 +463,7 @@
 	sector += get_start_sect(bdev);
 	if (sector % (PAGE_SIZE / 512))
 		return -EINVAL;
-	avail = ops->direct_access(bdev, sector, addr, pfn, size);
+	avail = ops->direct_access(bdev, sector, addr, pfn);
 	if (!avail)
 		return -ERANGE;
 	return min(avail, size);
@@ -1769,7 +1770,7 @@
 {
 	struct inode *inode, *old_inode = NULL;
 
-	spin_lock(&inode_sb_list_lock);
+	spin_lock(&blockdev_superblock->s_inode_list_lock);
 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
 		struct address_space *mapping = inode->i_mapping;
 
@@ -1781,13 +1782,13 @@
 		}
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
-		spin_unlock(&inode_sb_list_lock);
+		spin_unlock(&blockdev_superblock->s_inode_list_lock);
 		/*
 		 * We hold a reference to 'inode' so it couldn't have been
 		 * removed from s_inodes list while we dropped the
-		 * inode_sb_list_lock.  We cannot iput the inode now as we can
+		 * s_inode_list_lock  We cannot iput the inode now as we can
 		 * be holding the last reference and we cannot iput it under
-		 * inode_sb_list_lock. So we keep the reference and iput it
+		 * s_inode_list_lock. So we keep the reference and iput it
 		 * later.
 		 */
 		iput(old_inode);
@@ -1795,8 +1796,8 @@
 
 		func(I_BDEV(inode), arg);
 
-		spin_lock(&inode_sb_list_lock);
+		spin_lock(&blockdev_superblock->s_inode_list_lock);
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&blockdev_superblock->s_inode_list_lock);
 	iput(old_inode);
 }
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 802fabb..ecbc63d 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -206,10 +206,33 @@
 		return -ENOMEM;
 
 	ref->root_id = root_id;
-	if (key)
+	if (key) {
 		ref->key_for_search = *key;
-	else
+		/*
+		 * We can often find data backrefs with an offset that is too
+		 * large (>= LLONG_MAX, maximum allowed file offset) due to
+		 * underflows when subtracting a file's offset with the data
+		 * offset of its corresponding extent data item. This can
+		 * happen for example in the clone ioctl.
+		 * So if we detect such case we set the search key's offset to
+		 * zero to make sure we will find the matching file extent item
+		 * at add_all_parents(), otherwise we will miss it because the
+		 * offset taken form the backref is much larger then the offset
+		 * of the file extent item. This can make us scan a very large
+		 * number of file extent items, but at least it will not make
+		 * us miss any.
+		 * This is an ugly workaround for a behaviour that should have
+		 * never existed, but it does and a fix for the clone ioctl
+		 * would touch a lot of places, cause backwards incompatibility
+		 * and would not fix the problem for extents cloned with older
+		 * kernels.
+		 */
+		if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
+		    ref->key_for_search.offset >= LLONG_MAX)
+			ref->key_for_search.offset = 0;
+	} else {
 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
+	}
 
 	ref->inode_list = NULL;
 	ref->level = level;
@@ -632,7 +655,7 @@
 			struct btrfs_delayed_tree_ref *ref;
 
 			ref = btrfs_delayed_node_to_tree_ref(node);
-			ret = __add_prelim_ref(prefs, ref->root, NULL,
+			ret = __add_prelim_ref(prefs, 0, NULL,
 					       ref->level + 1, ref->parent,
 					       node->bytenr,
 					       node->ref_mod * sgn, GFP_ATOMIC);
@@ -664,11 +687,7 @@
 			struct btrfs_delayed_data_ref *ref;
 
 			ref = btrfs_delayed_node_to_data_ref(node);
-
-			key.objectid = ref->objectid;
-			key.type = BTRFS_EXTENT_DATA_KEY;
-			key.offset = ref->offset;
-			ret = __add_prelim_ref(prefs, ref->root, &key, 0,
+			ret = __add_prelim_ref(prefs, 0, NULL, 0,
 					       ref->parent, node->bytenr,
 					       node->ref_mod * sgn, GFP_ATOMIC);
 			break;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 54114b4..5f745ea 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1159,8 +1159,10 @@
 
 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
-		if (ret)
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
 			return ret;
+		}
 	}
 
 	if (buf == root->node) {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index aac314e..938efe3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1300,7 +1300,7 @@
 	/* for raid56, this is a full stripe, without parity */
 	unsigned long full_stripe_len;
 
-	unsigned int ro:1;
+	unsigned int ro;
 	unsigned int iref:1;
 	unsigned int has_caching_ctl:1;
 	unsigned int removed:1;
@@ -1518,12 +1518,6 @@
 	 */
 	struct mutex ordered_operations_mutex;
 
-	/*
-	 * Same as ordered_operations_mutex except this is for ordered extents
-	 * and not the operations.
-	 */
-	struct mutex ordered_extent_flush_mutex;
-
 	struct rw_semaphore commit_root_sem;
 
 	struct rw_semaphore cleanup_work_sem;
@@ -3437,6 +3431,8 @@
 			     struct btrfs_root *root, u64 group_start,
 			     struct extent_map *em);
 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
+void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
+void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 				       struct btrfs_root *root);
 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
@@ -3495,9 +3491,9 @@
 void btrfs_block_rsv_release(struct btrfs_root *root,
 			     struct btrfs_block_rsv *block_rsv,
 			     u64 num_bytes);
-int btrfs_set_block_group_ro(struct btrfs_root *root,
+int btrfs_inc_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache);
-void btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_dec_block_group_ro(struct btrfs_root *root,
 			      struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
@@ -4073,6 +4069,7 @@
 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
 		     unsigned int line, int errno, const char *fmt, ...);
 
+const char *btrfs_decode_error(int errno);
 
 __cold
 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
@@ -4185,8 +4182,7 @@
 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root, struct extent_buffer *buf,
 			  struct extent_buffer *cow);
-void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
-			      struct btrfs_pending_snapshot *pending,
+void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
 			      u64 *bytes_to_reserve);
 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
 			      struct btrfs_pending_snapshot *pending);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5e307bd..9ebd34f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1730,6 +1730,7 @@
 	bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
 	bdi->congested_fn	= btrfs_congested_fn;
 	bdi->congested_data	= info;
+	bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
 	return 0;
 }
 
@@ -2613,7 +2614,6 @@
 
 
 	mutex_init(&fs_info->ordered_operations_mutex);
-	mutex_init(&fs_info->ordered_extent_flush_mutex);
 	mutex_init(&fs_info->tree_log_mutex);
 	mutex_init(&fs_info->chunk_mutex);
 	mutex_init(&fs_info->transaction_kthread_mutex);
@@ -2955,8 +2955,9 @@
 	if (fs_info->fs_devices->missing_devices >
 	     fs_info->num_tolerated_disk_barrier_failures &&
 	    !(sb->s_flags & MS_RDONLY)) {
-		printk(KERN_WARNING "BTRFS: "
-			"too many missing devices, writeable mount is not allowed\n");
+		pr_warn("BTRFS: missing devices(%llu) exceeds the limit(%d), writeable mount is not allowed\n",
+			fs_info->fs_devices->missing_devices,
+			fs_info->num_tolerated_disk_barrier_failures);
 		goto fail_sysfs;
 	}
 
@@ -3763,6 +3764,15 @@
 	cancel_work_sync(&fs_info->async_reclaim_work);
 
 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+		/*
+		 * If the cleaner thread is stopped and there are
+		 * block groups queued for removal, the deletion will be
+		 * skipped when we quit the cleaner thread.
+		 */
+		mutex_lock(&root->fs_info->cleaner_mutex);
+		btrfs_delete_unused_bgs(root->fs_info);
+		mutex_unlock(&root->fs_info->cleaner_mutex);
+
 		ret = btrfs_commit_super(root);
 		if (ret)
 			btrfs_err(fs_info, "commit super ret %d", ret);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 07204bf..5411f0a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1316,8 +1316,7 @@
 	return ret;
 }
 
-static noinline u32 extent_data_ref_count(struct btrfs_root *root,
-					  struct btrfs_path *path,
+static noinline u32 extent_data_ref_count(struct btrfs_path *path,
 					  struct btrfs_extent_inline_ref *iref)
 {
 	struct btrfs_key key;
@@ -1883,10 +1882,77 @@
 	return ret;
 }
 
-static int btrfs_issue_discard(struct block_device *bdev,
-				u64 start, u64 len)
+#define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
+static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
+			       u64 *discarded_bytes)
 {
-	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
+	int j, ret = 0;
+	u64 bytes_left, end;
+	u64 aligned_start = ALIGN(start, 1 << 9);
+
+	if (WARN_ON(start != aligned_start)) {
+		len -= aligned_start - start;
+		len = round_down(len, 1 << 9);
+		start = aligned_start;
+	}
+
+	*discarded_bytes = 0;
+
+	if (!len)
+		return 0;
+
+	end = start + len;
+	bytes_left = len;
+
+	/* Skip any superblocks on this device. */
+	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
+		u64 sb_start = btrfs_sb_offset(j);
+		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
+		u64 size = sb_start - start;
+
+		if (!in_range(sb_start, start, bytes_left) &&
+		    !in_range(sb_end, start, bytes_left) &&
+		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
+			continue;
+
+		/*
+		 * Superblock spans beginning of range.  Adjust start and
+		 * try again.
+		 */
+		if (sb_start <= start) {
+			start += sb_end - start;
+			if (start > end) {
+				bytes_left = 0;
+				break;
+			}
+			bytes_left = end - start;
+			continue;
+		}
+
+		if (size) {
+			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
+						   GFP_NOFS, 0);
+			if (!ret)
+				*discarded_bytes += size;
+			else if (ret != -EOPNOTSUPP)
+				return ret;
+		}
+
+		start = sb_end;
+		if (start > end) {
+			bytes_left = 0;
+			break;
+		}
+		bytes_left = end - start;
+	}
+
+	if (bytes_left) {
+		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
+					   GFP_NOFS, 0);
+		if (!ret)
+			*discarded_bytes += bytes_left;
+	}
+	return ret;
 }
 
 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
@@ -1907,14 +1973,16 @@
 
 
 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
+			u64 bytes;
 			if (!stripe->dev->can_discard)
 				continue;
 
 			ret = btrfs_issue_discard(stripe->dev->bdev,
 						  stripe->physical,
-						  stripe->length);
+						  stripe->length,
+						  &bytes);
 			if (!ret)
-				discarded_bytes += stripe->length;
+				discarded_bytes += bytes;
 			else if (ret != -EOPNOTSUPP)
 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
 
@@ -6062,20 +6130,19 @@
 			       struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_block_group_cache *block_group, *tmp;
+	struct list_head *deleted_bgs;
 	struct extent_io_tree *unpin;
 	u64 start;
 	u64 end;
 	int ret;
 
-	if (trans->aborted)
-		return 0;
-
 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
 		unpin = &fs_info->freed_extents[1];
 	else
 		unpin = &fs_info->freed_extents[0];
 
-	while (1) {
+	while (!trans->aborted) {
 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
 		ret = find_first_extent_bit(unpin, 0, &start, &end,
 					    EXTENT_DIRTY, NULL);
@@ -6094,6 +6161,34 @@
 		cond_resched();
 	}
 
+	/*
+	 * Transaction is finished.  We don't need the lock anymore.  We
+	 * do need to clean up the block groups in case of a transaction
+	 * abort.
+	 */
+	deleted_bgs = &trans->transaction->deleted_bgs;
+	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
+		u64 trimmed = 0;
+
+		ret = -EROFS;
+		if (!trans->aborted)
+			ret = btrfs_discard_extent(root,
+						   block_group->key.objectid,
+						   block_group->key.offset,
+						   &trimmed);
+
+		list_del_init(&block_group->bg_list);
+		btrfs_put_block_group_trimming(block_group);
+		btrfs_put_block_group(block_group);
+
+		if (ret) {
+			const char *errstr = btrfs_decode_error(ret);
+			btrfs_warn(fs_info,
+				   "Discard failed while removing blockgroup: errno=%d %s\n",
+				   ret, errstr);
+		}
+	}
+
 	return 0;
 }
 
@@ -6349,7 +6444,7 @@
 	} else {
 		if (found_extent) {
 			BUG_ON(is_data && refs_to_drop !=
-			       extent_data_ref_count(root, path, iref));
+			       extent_data_ref_count(path, iref));
 			if (iref) {
 				BUG_ON(path->slots[0] != extent_slot);
 			} else {
@@ -7567,9 +7662,6 @@
 
 /*
  * finds a free extent and does all the dirty work required for allocation
- * returns the key for the extent through ins, and a tree buffer for
- * the first block of the extent through buf.
- *
  * returns the tree buffer or an ERR_PTR on error.
  */
 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
@@ -8723,14 +8815,13 @@
 	return flags;
 }
 
-static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
 	u64 num_bytes;
 	u64 min_allocable_bytes;
 	int ret = -ENOSPC;
 
-
 	/*
 	 * We need some metadata space and system metadata space for
 	 * allocating chunks in some corner cases until we force to set
@@ -8747,6 +8838,7 @@
 	spin_lock(&cache->lock);
 
 	if (cache->ro) {
+		cache->ro++;
 		ret = 0;
 		goto out;
 	}
@@ -8758,7 +8850,7 @@
 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
 	    min_allocable_bytes <= sinfo->total_bytes) {
 		sinfo->bytes_readonly += num_bytes;
-		cache->ro = 1;
+		cache->ro++;
 		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
 		ret = 0;
 	}
@@ -8768,7 +8860,7 @@
 	return ret;
 }
 
-int btrfs_set_block_group_ro(struct btrfs_root *root,
+int btrfs_inc_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache)
 
 {
@@ -8776,8 +8868,6 @@
 	u64 alloc_flags;
 	int ret;
 
-	BUG_ON(cache->ro);
-
 again:
 	trans = btrfs_join_transaction(root);
 	if (IS_ERR(trans))
@@ -8820,7 +8910,7 @@
 			goto out;
 	}
 
-	ret = set_block_group_ro(cache, 0);
+	ret = inc_block_group_ro(cache, 0);
 	if (!ret)
 		goto out;
 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
@@ -8828,7 +8918,7 @@
 			     CHUNK_ALLOC_FORCE);
 	if (ret < 0)
 		goto out;
-	ret = set_block_group_ro(cache, 0);
+	ret = inc_block_group_ro(cache, 0);
 out:
 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
 		alloc_flags = update_block_group_flags(root, cache->flags);
@@ -8891,7 +8981,7 @@
 	return free_bytes;
 }
 
-void btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_dec_block_group_ro(struct btrfs_root *root,
 			      struct btrfs_block_group_cache *cache)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
@@ -8901,11 +8991,13 @@
 
 	spin_lock(&sinfo->lock);
 	spin_lock(&cache->lock);
-	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
-		    cache->bytes_super - btrfs_block_group_used(&cache->item);
-	sinfo->bytes_readonly -= num_bytes;
-	cache->ro = 0;
-	list_del_init(&cache->ro_list);
+	if (!--cache->ro) {
+		num_bytes = cache->key.offset - cache->reserved -
+			    cache->pinned - cache->bytes_super -
+			    btrfs_block_group_used(&cache->item);
+		sinfo->bytes_readonly -= num_bytes;
+		list_del_init(&cache->ro_list);
+	}
 	spin_unlock(&cache->lock);
 	spin_unlock(&sinfo->lock);
 }
@@ -9421,7 +9513,7 @@
 
 		set_avail_alloc_bits(root->fs_info, cache->flags);
 		if (btrfs_chunk_readonly(root, cache->key.objectid)) {
-			set_block_group_ro(cache, 1);
+			inc_block_group_ro(cache, 1);
 		} else if (btrfs_block_group_used(&cache->item) == 0) {
 			spin_lock(&info->unused_bgs_lock);
 			/* Should always be true but just in case. */
@@ -9449,11 +9541,11 @@
 		list_for_each_entry(cache,
 				&space_info->block_groups[BTRFS_RAID_RAID0],
 				list)
-			set_block_group_ro(cache, 1);
+			inc_block_group_ro(cache, 1);
 		list_for_each_entry(cache,
 				&space_info->block_groups[BTRFS_RAID_SINGLE],
 				list)
-			set_block_group_ro(cache, 1);
+			inc_block_group_ro(cache, 1);
 	}
 
 	init_global_block_rsv(info);
@@ -9834,6 +9926,11 @@
 	 * currently running transaction might finish and a new one start,
 	 * allowing for new block groups to be created that can reuse the same
 	 * physical device locations unless we take this special care.
+	 *
+	 * There may also be an implicit trim operation if the file system
+	 * is mounted with -odiscard. The same protections must remain
+	 * in place until the extents have been discarded completely when
+	 * the transaction commit has completed.
 	 */
 	remove_em = (atomic_read(&block_group->trimming) == 0);
 	/*
@@ -9908,6 +10005,7 @@
 	spin_lock(&fs_info->unused_bgs_lock);
 	while (!list_empty(&fs_info->unused_bgs)) {
 		u64 start, end;
+		int trimming;
 
 		block_group = list_first_entry(&fs_info->unused_bgs,
 					       struct btrfs_block_group_cache,
@@ -9941,7 +10039,7 @@
 		spin_unlock(&block_group->lock);
 
 		/* We don't want to force the issue, only flip if it's ok. */
-		ret = set_block_group_ro(block_group, 0);
+		ret = inc_block_group_ro(block_group, 0);
 		up_write(&space_info->groups_sem);
 		if (ret < 0) {
 			ret = 0;
@@ -9955,7 +10053,7 @@
 		/* 1 for btrfs_orphan_reserve_metadata() */
 		trans = btrfs_start_transaction(root, 1);
 		if (IS_ERR(trans)) {
-			btrfs_set_block_group_rw(root, block_group);
+			btrfs_dec_block_group_ro(root, block_group);
 			ret = PTR_ERR(trans);
 			goto next;
 		}
@@ -9982,14 +10080,14 @@
 				  EXTENT_DIRTY, GFP_NOFS);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-			btrfs_set_block_group_rw(root, block_group);
+			btrfs_dec_block_group_ro(root, block_group);
 			goto end_trans;
 		}
 		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
 				  EXTENT_DIRTY, GFP_NOFS);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-			btrfs_set_block_group_rw(root, block_group);
+			btrfs_dec_block_group_ro(root, block_group);
 			goto end_trans;
 		}
 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
@@ -10007,12 +10105,39 @@
 		spin_unlock(&block_group->lock);
 		spin_unlock(&space_info->lock);
 
+		/* DISCARD can flip during remount */
+		trimming = btrfs_test_opt(root, DISCARD);
+
+		/* Implicit trim during transaction commit. */
+		if (trimming)
+			btrfs_get_block_group_trimming(block_group);
+
 		/*
 		 * Btrfs_remove_chunk will abort the transaction if things go
 		 * horribly wrong.
 		 */
 		ret = btrfs_remove_chunk(trans, root,
 					 block_group->key.objectid);
+
+		if (ret) {
+			if (trimming)
+				btrfs_put_block_group_trimming(block_group);
+			goto end_trans;
+		}
+
+		/*
+		 * If we're not mounted with -odiscard, we can just forget
+		 * about this block group. Otherwise we'll need to wait
+		 * until transaction commit to do the actual discard.
+		 */
+		if (trimming) {
+			WARN_ON(!list_empty(&block_group->bg_list));
+			spin_lock(&trans->transaction->deleted_bgs_lock);
+			list_move(&block_group->bg_list,
+				  &trans->transaction->deleted_bgs);
+			spin_unlock(&trans->transaction->deleted_bgs_lock);
+			btrfs_get_block_group(block_group);
+		}
 end_trans:
 		btrfs_end_transaction(trans, root);
 next:
@@ -10066,10 +10191,99 @@
 	return unpin_extent_range(root, start, end, false);
 }
 
+/*
+ * It used to be that old block groups would be left around forever.
+ * Iterating over them would be enough to trim unused space.  Since we
+ * now automatically remove them, we also need to iterate over unallocated
+ * space.
+ *
+ * We don't want a transaction for this since the discard may take a
+ * substantial amount of time.  We don't require that a transaction be
+ * running, but we do need to take a running transaction into account
+ * to ensure that we're not discarding chunks that were released in
+ * the current transaction.
+ *
+ * Holding the chunks lock will prevent other threads from allocating
+ * or releasing chunks, but it won't prevent a running transaction
+ * from committing and releasing the memory that the pending chunks
+ * list head uses.  For that, we need to take a reference to the
+ * transaction.
+ */
+static int btrfs_trim_free_extents(struct btrfs_device *device,
+				   u64 minlen, u64 *trimmed)
+{
+	u64 start = 0, len = 0;
+	int ret;
+
+	*trimmed = 0;
+
+	/* Not writeable = nothing to do. */
+	if (!device->writeable)
+		return 0;
+
+	/* No free space = nothing to do. */
+	if (device->total_bytes <= device->bytes_used)
+		return 0;
+
+	ret = 0;
+
+	while (1) {
+		struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
+		struct btrfs_transaction *trans;
+		u64 bytes;
+
+		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
+		if (ret)
+			return ret;
+
+		down_read(&fs_info->commit_root_sem);
+
+		spin_lock(&fs_info->trans_lock);
+		trans = fs_info->running_transaction;
+		if (trans)
+			atomic_inc(&trans->use_count);
+		spin_unlock(&fs_info->trans_lock);
+
+		ret = find_free_dev_extent_start(trans, device, minlen, start,
+						 &start, &len);
+		if (trans)
+			btrfs_put_transaction(trans);
+
+		if (ret) {
+			up_read(&fs_info->commit_root_sem);
+			mutex_unlock(&fs_info->chunk_mutex);
+			if (ret == -ENOSPC)
+				ret = 0;
+			break;
+		}
+
+		ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
+		up_read(&fs_info->commit_root_sem);
+		mutex_unlock(&fs_info->chunk_mutex);
+
+		if (ret)
+			break;
+
+		start += len;
+		*trimmed += bytes;
+
+		if (fatal_signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		cond_resched();
+	}
+
+	return ret;
+}
+
 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_group_cache *cache = NULL;
+	struct btrfs_device *device;
+	struct list_head *devices;
 	u64 group_trimmed;
 	u64 start;
 	u64 end;
@@ -10124,6 +10338,18 @@
 		cache = next_block_group(fs_info->tree_root, cache);
 	}
 
+	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+	devices = &root->fs_info->fs_devices->alloc_list;
+	list_for_each_entry(device, devices, dev_alloc_list) {
+		ret = btrfs_trim_free_extents(device, range->minlen,
+					      &group_trimmed);
+		if (ret)
+			break;
+
+		trimmed += group_trimmed;
+	}
+	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
 	range->len = trimmed;
 	return ret;
 }
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 68b12bb..f1018cf 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2723,6 +2723,12 @@
 		btrfs_bio->csum = NULL;
 		btrfs_bio->csum_allocated = NULL;
 		btrfs_bio->end_io = NULL;
+
+#ifdef CONFIG_BLK_CGROUP
+		/* FIXME, put this into bio_clone_bioset */
+		if (bio->bi_css)
+			bio_associate_blkcg(new, bio->bi_css);
+#endif
 	}
 	return new;
 }
@@ -2783,6 +2789,7 @@
 }
 
 static int submit_extent_page(int rw, struct extent_io_tree *tree,
+			      struct writeback_control *wbc,
 			      struct page *page, sector_t sector,
 			      size_t size, unsigned long offset,
 			      struct block_device *bdev,
@@ -2817,6 +2824,8 @@
 			}
 			bio = NULL;
 		} else {
+			if (wbc)
+				wbc_account_io(wbc, page, page_size);
 			return 0;
 		}
 	}
@@ -2829,6 +2838,10 @@
 	bio_add_page(bio, page, page_size, offset);
 	bio->bi_end_io = end_io_func;
 	bio->bi_private = tree;
+	if (wbc) {
+		wbc_init_bio(wbc, bio);
+		wbc_account_io(wbc, page, page_size);
+	}
 
 	if (bio_ret)
 		*bio_ret = bio;
@@ -3039,7 +3052,7 @@
 		}
 
 		pnr -= page->index;
-		ret = submit_extent_page(rw, tree, page,
+		ret = submit_extent_page(rw, tree, NULL, page,
 					 sector, disk_io_size, pg_offset,
 					 bdev, bio, pnr,
 					 end_bio_extent_readpage, mirror_num,
@@ -3434,7 +3447,7 @@
 				       page->index, cur, end);
 			}
 
-			ret = submit_extent_page(write_flags, tree, page,
+			ret = submit_extent_page(write_flags, tree, wbc, page,
 						 sector, iosize, pg_offset,
 						 bdev, &epd->bio, max_nr,
 						 end_bio_extent_writepage,
@@ -3738,7 +3751,7 @@
 
 		clear_page_dirty_for_io(p);
 		set_page_writeback(p);
-		ret = submit_extent_page(rw, tree, p, offset >> 9,
+		ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
 					 -1, end_bio_extent_buffer_writepage,
 					 0, epd->bio_flags, bio_flags);
@@ -4603,9 +4616,7 @@
 {
 	struct extent_buffer *eb = NULL;
 
-	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS);
-	if (eb == NULL)
-		return NULL;
+	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
 	eb->start = start;
 	eb->len = len;
 	eb->fs_info = fs_info;
@@ -4863,7 +4874,7 @@
 		return NULL;
 
 	for (i = 0; i < num_pages; i++, index++) {
-		p = find_or_create_page(mapping, index, GFP_NOFS);
+		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
 		if (!p)
 			goto free_eb;
 
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index fb5a6b1..abe3a66 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -3272,35 +3272,23 @@
 	return ret;
 }
 
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
-			   u64 *trimmed, u64 start, u64 end, u64 minlen)
+void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
 {
-	int ret;
+	atomic_inc(&cache->trimming);
+}
 
-	*trimmed = 0;
+void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
+{
+	struct extent_map_tree *em_tree;
+	struct extent_map *em;
+	bool cleanup;
 
 	spin_lock(&block_group->lock);
-	if (block_group->removed) {
-		spin_unlock(&block_group->lock);
-		return 0;
-	}
-	atomic_inc(&block_group->trimming);
+	cleanup = (atomic_dec_and_test(&block_group->trimming) &&
+		   block_group->removed);
 	spin_unlock(&block_group->lock);
 
-	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
-	if (ret)
-		goto out;
-
-	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
-out:
-	spin_lock(&block_group->lock);
-	if (atomic_dec_and_test(&block_group->trimming) &&
-	    block_group->removed) {
-		struct extent_map_tree *em_tree;
-		struct extent_map *em;
-
-		spin_unlock(&block_group->lock);
-
+	if (cleanup) {
 		lock_chunks(block_group->fs_info->chunk_root);
 		em_tree = &block_group->fs_info->mapping_tree.map_tree;
 		write_lock(&em_tree->lock);
@@ -3324,10 +3312,31 @@
 		 * this block group have left 1 entry each one. Free them.
 		 */
 		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
-	} else {
-		spin_unlock(&block_group->lock);
 	}
+}
 
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+			   u64 *trimmed, u64 start, u64 end, u64 minlen)
+{
+	int ret;
+
+	*trimmed = 0;
+
+	spin_lock(&block_group->lock);
+	if (block_group->removed) {
+		spin_unlock(&block_group->lock);
+		return 0;
+	}
+	btrfs_get_block_group_trimming(block_group);
+	spin_unlock(&block_group->lock);
+
+	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
+	if (ret)
+		goto out;
+
+	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
+out:
+	btrfs_put_block_group_trimming(block_group);
 	return ret;
 }
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f924d9a..237da01 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3658,6 +3658,35 @@
 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
 			&BTRFS_I(inode)->runtime_flags);
 
+	/*
+	 * We don't persist the id of the transaction where an unlink operation
+	 * against the inode was last made. So here we assume the inode might
+	 * have been evicted, and therefore the exact value of last_unlink_trans
+	 * lost, and set it to last_trans to avoid metadata inconsistencies
+	 * between the inode and its parent if the inode is fsync'ed and the log
+	 * replayed. For example, in the scenario:
+	 *
+	 * touch mydir/foo
+	 * ln mydir/foo mydir/bar
+	 * sync
+	 * unlink mydir/bar
+	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
+	 * xfs_io -c fsync mydir/foo
+	 * <power failure>
+	 * mount fs, triggers fsync log replay
+	 *
+	 * We must make sure that when we fsync our inode foo we also log its
+	 * parent inode, otherwise after log replay the parent still has the
+	 * dentry with the "bar" name but our inode foo has a link count of 1
+	 * and doesn't have an inode ref with the name "bar" anymore.
+	 *
+	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
+	 * but it guarantees correctness at the expense of ocassional full
+	 * transaction commits on fsync if our inode is a directory, or if our
+	 * inode is not a directory, logging its parent unnecessarily.
+	 */
+	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+
 	path->slots[0]++;
 	if (inode->i_nlink != 1 ||
 	    path->slots[0] >= btrfs_header_nritems(leaf))
@@ -7958,7 +7987,11 @@
 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 				       u64 first_sector, gfp_t gfp_flags)
 {
-	return btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
+	struct bio *bio;
+	bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
+	if (bio)
+		bio_associate_current(bio);
+	return bio;
 }
 
 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0770c91..0adf542 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1030,6 +1030,7 @@
 	struct extent_map *em;
 	int ret = 1;
 	bool next_mergeable = true;
+	bool prev_mergeable = true;
 
 	/*
 	 * make sure that once we start defragging an extent, we keep on
@@ -1050,13 +1051,16 @@
 		goto out;
 	}
 
+	if (!*defrag_end)
+		prev_mergeable = false;
+
 	next_mergeable = defrag_check_next_extent(inode, em);
 	/*
 	 * we hit a real extent, if it is big or the next extent is not a
 	 * real extent, don't bother defragging it
 	 */
 	if (!compress && (*last_len == 0 || *last_len >= thresh) &&
-	    (em->len >= thresh || !next_mergeable))
+	    (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
 		ret = 0;
 out:
 	/*
@@ -1933,6 +1937,7 @@
 	u64 found_transid;
 	struct extent_buffer *leaf;
 	struct btrfs_ioctl_search_header sh;
+	struct btrfs_key test;
 	unsigned long item_off;
 	unsigned long item_len;
 	int nritems;
@@ -2016,12 +2021,17 @@
 	}
 advance_key:
 	ret = 0;
-	if (key->offset < (u64)-1 && key->offset < sk->max_offset)
+	test.objectid = sk->max_objectid;
+	test.type = sk->max_type;
+	test.offset = sk->max_offset;
+	if (btrfs_comp_cpu_keys(key, &test) >= 0)
+		ret = 1;
+	else if (key->offset < (u64)-1)
 		key->offset++;
-	else if (key->type < (u8)-1 && key->type < sk->max_type) {
+	else if (key->type < (u8)-1) {
 		key->offset = 0;
 		key->type++;
-	} else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) {
+	} else if (key->objectid < (u64)-1) {
 		key->offset = 0;
 		key->type = 0;
 		key->objectid++;
@@ -2842,8 +2852,7 @@
 		swap(inode1, inode2);
 
 	mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
-	if (inode1 != inode2)
-		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+	mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
 }
 
 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
@@ -2861,8 +2870,7 @@
 		swap(loff1, loff2);
 	}
 	lock_extent_range(inode1, loff1, len);
-	if (inode1 != inode2)
-		lock_extent_range(inode2, loff2, len);
+	lock_extent_range(inode2, loff2, len);
 }
 
 struct cmp_pages {
@@ -3787,13 +3795,7 @@
 		goto out_fput;
 
 	if (!same_inode) {
-		if (inode < src) {
-			mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
-			mutex_lock_nested(&src->i_mutex, I_MUTEX_CHILD);
-		} else {
-			mutex_lock_nested(&src->i_mutex, I_MUTEX_PARENT);
-			mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
-		}
+		btrfs_double_inode_lock(src, inode);
 	} else {
 		mutex_lock(&src->i_mutex);
 	}
@@ -3843,8 +3845,7 @@
 
 		lock_extent_range(src, lock_start, lock_len);
 	} else {
-		lock_extent_range(src, off, len);
-		lock_extent_range(inode, destoff, len);
+		btrfs_double_extent_lock(src, off, inode, destoff, len);
 	}
 
 	ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
@@ -3855,9 +3856,7 @@
 
 		unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
 	} else {
-		unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
-		unlock_extent(&BTRFS_I(inode)->io_tree, destoff,
-			      destoff + len - 1);
+		btrfs_double_extent_unlock(src, off, inode, destoff, len);
 	}
 	/*
 	 * Truncate page cache pages so that future reads will see the cloned
@@ -3866,17 +3865,10 @@
 	truncate_inode_pages_range(&inode->i_data, destoff,
 				   PAGE_CACHE_ALIGN(destoff + len) - 1);
 out_unlock:
-	if (!same_inode) {
-		if (inode < src) {
-			mutex_unlock(&src->i_mutex);
-			mutex_unlock(&inode->i_mutex);
-		} else {
-			mutex_unlock(&inode->i_mutex);
-			mutex_unlock(&src->i_mutex);
-		}
-	} else {
+	if (!same_inode)
+		btrfs_double_inode_unlock(src, inode);
+	else
 		mutex_unlock(&src->i_mutex);
-	}
 out_fput:
 	fdput(src_file);
 out_drop_write:
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index f8229ef..d7e6baf 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -241,6 +241,7 @@
  */
 void btrfs_tree_lock(struct extent_buffer *eb)
 {
+	WARN_ON(eb->lock_owner == current->pid);
 again:
 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
 	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0a02e24..fcf7265 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -61,9 +61,10 @@
 #define RBIO_CACHE_SIZE 1024
 
 enum btrfs_rbio_ops {
-	BTRFS_RBIO_WRITE	= 0,
-	BTRFS_RBIO_READ_REBUILD	= 1,
-	BTRFS_RBIO_PARITY_SCRUB	= 2,
+	BTRFS_RBIO_WRITE,
+	BTRFS_RBIO_READ_REBUILD,
+	BTRFS_RBIO_PARITY_SCRUB,
+	BTRFS_RBIO_REBUILD_MISSING,
 };
 
 struct btrfs_raid_bio {
@@ -602,6 +603,10 @@
 	    cur->operation == BTRFS_RBIO_PARITY_SCRUB)
 		return 0;
 
+	if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
+	    cur->operation == BTRFS_RBIO_REBUILD_MISSING)
+		return 0;
+
 	return 1;
 }
 
@@ -793,7 +798,10 @@
 
 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
 				async_read_rebuild(next);
-			else if (next->operation == BTRFS_RBIO_WRITE) {
+			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
+				steal_rbio(rbio, next);
+				async_read_rebuild(next);
+			} else if (next->operation == BTRFS_RBIO_WRITE) {
 				steal_rbio(rbio, next);
 				async_rmw_stripe(next);
 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
@@ -1805,7 +1813,8 @@
 	faila = rbio->faila;
 	failb = rbio->failb;
 
-	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
+	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
 		spin_lock_irq(&rbio->bio_list_lock);
 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 		spin_unlock_irq(&rbio->bio_list_lock);
@@ -1830,7 +1839,8 @@
 			 * if we're rebuilding a read, we have to use
 			 * pages from the bio list
 			 */
-			if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
+			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
 			    (stripe == faila || stripe == failb)) {
 				page = page_in_rbio(rbio, stripe, pagenr, 0);
 			} else {
@@ -1939,7 +1949,8 @@
 			 * if we're rebuilding a read, we have to use
 			 * pages from the bio list
 			 */
-			if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
+			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
 			    (stripe == faila || stripe == failb)) {
 				page = page_in_rbio(rbio, stripe, pagenr, 0);
 			} else {
@@ -1961,6 +1972,8 @@
 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 
 		rbio_orig_end_io(rbio, err);
+	} else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
+		rbio_orig_end_io(rbio, err);
 	} else if (err == 0) {
 		rbio->faila = -1;
 		rbio->failb = -1;
@@ -2096,7 +2109,8 @@
 	return 0;
 
 cleanup:
-	if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
+	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
 		rbio_orig_end_io(rbio, -EIO);
 	return -EIO;
 }
@@ -2227,8 +2241,9 @@
 	return rbio;
 }
 
-void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
-				   struct page *page, u64 logical)
+/* Used for both parity scrub and missing. */
+void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
+			    u64 logical)
 {
 	int stripe_offset;
 	int index;
@@ -2662,3 +2677,55 @@
 	if (!lock_stripe_add(rbio))
 		async_scrub_parity(rbio);
 }
+
+/* The following code is used for dev replace of a missing RAID 5/6 device. */
+
+struct btrfs_raid_bio *
+raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+			  struct btrfs_bio *bbio, u64 length)
+{
+	struct btrfs_raid_bio *rbio;
+
+	rbio = alloc_rbio(root, bbio, length);
+	if (IS_ERR(rbio))
+		return NULL;
+
+	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
+	bio_list_add(&rbio->bio_list, bio);
+	/*
+	 * This is a special bio which is used to hold the completion handler
+	 * and make the scrub rbio is similar to the other types
+	 */
+	ASSERT(!bio->bi_iter.bi_size);
+
+	rbio->faila = find_logical_bio_stripe(rbio, bio);
+	if (rbio->faila == -1) {
+		BUG();
+		kfree(rbio);
+		return NULL;
+	}
+
+	return rbio;
+}
+
+static void missing_raid56_work(struct btrfs_work *work)
+{
+	struct btrfs_raid_bio *rbio;
+
+	rbio = container_of(work, struct btrfs_raid_bio, work);
+	__raid56_parity_recover(rbio);
+}
+
+static void async_missing_raid56(struct btrfs_raid_bio *rbio)
+{
+	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+			missing_raid56_work, NULL, NULL);
+
+	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
+}
+
+void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
+{
+	if (!lock_stripe_add(rbio))
+		async_missing_raid56(rbio);
+}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 2b5d797..8b69469 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -48,15 +48,21 @@
 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
 			       struct btrfs_bio *bbio, u64 stripe_len);
 
+void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
+			    u64 logical);
+
 struct btrfs_raid_bio *
 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
 			       struct btrfs_bio *bbio, u64 stripe_len,
 			       struct btrfs_device *scrub_dev,
 			       unsigned long *dbitmap, int stripe_nsectors);
-void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
-				   struct page *page, u64 logical);
 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
 
+struct btrfs_raid_bio *
+raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+			  struct btrfs_bio *bbio, u64 length);
+void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
+
 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
 #endif
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 0e7beea..4645cd1 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -328,6 +328,7 @@
 	struct btrfs_device *prev_dev;
 	u32 blocksize;
 	u64 length;
+	int real_stripes;
 	int nzones = 0;
 	int i;
 	unsigned long index = logical >> PAGE_CACHE_SHIFT;
@@ -369,7 +370,8 @@
 		goto error;
 	}
 
-	for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
+	real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
+	for (nzones = 0; nzones < real_stripes; ++nzones) {
 		struct reada_zone *zone;
 
 		dev = bbio->stripes[nzones].dev;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 88cbb59..303babe 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2523,8 +2523,7 @@
  * counted. return -ENOENT if the block is root of reloc tree.
  */
 static noinline_for_stack
-struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
-				   struct backref_node *node)
+struct btrfs_root *select_one_root(struct backref_node *node)
 {
 	struct backref_node *next;
 	struct btrfs_root *root;
@@ -2912,7 +2911,7 @@
 		return 0;
 
 	BUG_ON(node->processed);
-	root = select_one_root(trans, node);
+	root = select_one_root(node);
 	if (root == ERR_PTR(-ENOENT)) {
 		update_processed_blocks(rc, node);
 		goto out;
@@ -3755,8 +3754,7 @@
  * helper to find next unprocessed extent
  */
 static noinline_for_stack
-int find_next_extent(struct btrfs_trans_handle *trans,
-		     struct reloc_control *rc, struct btrfs_path *path,
+int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
 		     struct btrfs_key *extent_key)
 {
 	struct btrfs_key key;
@@ -3951,7 +3949,7 @@
 			continue;
 		}
 
-		ret = find_next_extent(trans, rc, path, &key);
+		ret = find_next_extent(rc, path, &key);
 		if (ret < 0)
 			err = ret;
 		if (ret != 0)
@@ -3976,6 +3974,10 @@
 			       sizeof(struct btrfs_extent_item_v0));
 			ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
 						  &path_change);
+			if (ret < 0) {
+				err = ret;
+				break;
+			}
 			if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
 				flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
 			else
@@ -4140,7 +4142,7 @@
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root;
 	struct btrfs_key key;
-	u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
+	u64 objectid;
 	int err = 0;
 
 	root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
@@ -4215,14 +4217,12 @@
 	rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
 	BUG_ON(!rc->block_group);
 
-	if (!rc->block_group->ro) {
-		ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
-		if (ret) {
-			err = ret;
-			goto out;
-		}
-		rw = 1;
+	ret = btrfs_inc_block_group_ro(extent_root, rc->block_group);
+	if (ret) {
+		err = ret;
+		goto out;
 	}
+	rw = 1;
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -4294,7 +4294,7 @@
 	WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
 out:
 	if (err && rw)
-		btrfs_set_block_group_rw(extent_root, rc->block_group);
+		btrfs_dec_block_group_ro(extent_root, rc->block_group);
 	iput(rc->data_inode);
 	btrfs_put_block_group(rc->block_group);
 	kfree(rc);
@@ -4594,8 +4594,7 @@
  * called before creating snapshot. it calculates metadata reservation
  * requried for relocating tree blocks in the snapshot
  */
-void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
-			      struct btrfs_pending_snapshot *pending,
+void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
 			      u64 *bytes_to_reserve)
 {
 	struct btrfs_root *root;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9c146d8..9a11db0 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -125,6 +125,7 @@
 		/* It is for the data with checksum */
 		unsigned int	data_corrected:1;
 	};
+	struct btrfs_work	work;
 };
 
 /* Used for the chunks with parity stripe such RAID5/6 */
@@ -332,11 +333,14 @@
 	}
 }
 
-static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 {
 	atomic_inc(&fs_info->scrubs_paused);
 	wake_up(&fs_info->scrub_pause_wait);
+}
 
+static void scrub_pause_off(struct btrfs_fs_info *fs_info)
+{
 	mutex_lock(&fs_info->scrub_lock);
 	__scrub_blocked_if_needed(fs_info);
 	atomic_dec(&fs_info->scrubs_paused);
@@ -345,6 +349,12 @@
 	wake_up(&fs_info->scrub_pause_wait);
 }
 
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+	scrub_pause_on(fs_info);
+	scrub_pause_off(fs_info);
+}
+
 /*
  * used for workers that require transaction commits (i.e., for the
  * NOCOW case)
@@ -2074,21 +2084,7 @@
 	sbio = sctx->bios[sctx->curr];
 	sctx->curr = -1;
 	scrub_pending_bio_inc(sctx);
-
-	if (!sbio->bio->bi_bdev) {
-		/*
-		 * this case should not happen. If btrfs_map_block() is
-		 * wrong, it could happen for dev-replace operations on
-		 * missing devices when no mirrors are available, but in
-		 * this case it should already fail the mount.
-		 * This case is handled correctly (but _very_ slowly).
-		 */
-		printk_ratelimited(KERN_WARNING
-			"BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
-		bio_io_error(sbio->bio);
-	} else {
-		btrfsic_submit_bio(READ, sbio->bio);
-	}
+	btrfsic_submit_bio(READ, sbio->bio);
 }
 
 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
@@ -2165,6 +2161,134 @@
 	return 0;
 }
 
+static void scrub_missing_raid56_end_io(struct bio *bio)
+{
+	struct scrub_block *sblock = bio->bi_private;
+	struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
+
+	if (bio->bi_error)
+		sblock->no_io_error_seen = 0;
+
+	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
+}
+
+static void scrub_missing_raid56_worker(struct btrfs_work *work)
+{
+	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
+	struct scrub_ctx *sctx = sblock->sctx;
+	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	unsigned int is_metadata;
+	unsigned int have_csum;
+	u8 *csum;
+	u64 generation;
+	u64 logical;
+	struct btrfs_device *dev;
+
+	is_metadata = !(sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA);
+	have_csum = sblock->pagev[0]->have_csum;
+	csum = sblock->pagev[0]->csum;
+	generation = sblock->pagev[0]->generation;
+	logical = sblock->pagev[0]->logical;
+	dev = sblock->pagev[0]->dev;
+
+	if (sblock->no_io_error_seen) {
+		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
+					     have_csum, csum, generation,
+					     sctx->csum_size);
+	}
+
+	if (!sblock->no_io_error_seen) {
+		spin_lock(&sctx->stat_lock);
+		sctx->stat.read_errors++;
+		spin_unlock(&sctx->stat_lock);
+		printk_ratelimited_in_rcu(KERN_ERR
+			"BTRFS: I/O error rebulding logical %llu for dev %s\n",
+			logical, rcu_str_deref(dev->name));
+	} else if (sblock->header_error || sblock->checksum_error) {
+		spin_lock(&sctx->stat_lock);
+		sctx->stat.uncorrectable_errors++;
+		spin_unlock(&sctx->stat_lock);
+		printk_ratelimited_in_rcu(KERN_ERR
+			"BTRFS: failed to rebuild valid logical %llu for dev %s\n",
+			logical, rcu_str_deref(dev->name));
+	} else {
+		scrub_write_block_to_dev_replace(sblock);
+	}
+
+	scrub_block_put(sblock);
+
+	if (sctx->is_dev_replace &&
+	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
+		mutex_lock(&sctx->wr_ctx.wr_lock);
+		scrub_wr_submit(sctx);
+		mutex_unlock(&sctx->wr_ctx.wr_lock);
+	}
+
+	scrub_pending_bio_dec(sctx);
+}
+
+static void scrub_missing_raid56_pages(struct scrub_block *sblock)
+{
+	struct scrub_ctx *sctx = sblock->sctx;
+	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	u64 length = sblock->page_count * PAGE_SIZE;
+	u64 logical = sblock->pagev[0]->logical;
+	struct btrfs_bio *bbio;
+	struct bio *bio;
+	struct btrfs_raid_bio *rbio;
+	int ret;
+	int i;
+
+	ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
+			       &bbio, 0, 1);
+	if (ret || !bbio || !bbio->raid_map)
+		goto bbio_out;
+
+	if (WARN_ON(!sctx->is_dev_replace ||
+		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
+		/*
+		 * We shouldn't be scrubbing a missing device. Even for dev
+		 * replace, we should only get here for RAID 5/6. We either
+		 * managed to mount something with no mirrors remaining or
+		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
+		 */
+		goto bbio_out;
+	}
+
+	bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
+	if (!bio)
+		goto bbio_out;
+
+	bio->bi_iter.bi_sector = logical >> 9;
+	bio->bi_private = sblock;
+	bio->bi_end_io = scrub_missing_raid56_end_io;
+
+	rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
+	if (!rbio)
+		goto rbio_out;
+
+	for (i = 0; i < sblock->page_count; i++) {
+		struct scrub_page *spage = sblock->pagev[i];
+
+		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
+	}
+
+	btrfs_init_work(&sblock->work, btrfs_scrub_helper,
+			scrub_missing_raid56_worker, NULL, NULL);
+	scrub_block_get(sblock);
+	scrub_pending_bio_inc(sctx);
+	raid56_submit_missing_rbio(rbio);
+	return;
+
+rbio_out:
+	bio_put(bio);
+bbio_out:
+	btrfs_put_bbio(bbio);
+	spin_lock(&sctx->stat_lock);
+	sctx->stat.malloc_errors++;
+	spin_unlock(&sctx->stat_lock);
+}
+
 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 		       u64 physical, struct btrfs_device *dev, u64 flags,
 		       u64 gen, int mirror_num, u8 *csum, int force,
@@ -2228,19 +2352,27 @@
 	}
 
 	WARN_ON(sblock->page_count == 0);
-	for (index = 0; index < sblock->page_count; index++) {
-		struct scrub_page *spage = sblock->pagev[index];
-		int ret;
+	if (dev->missing) {
+		/*
+		 * This case should only be hit for RAID 5/6 device replace. See
+		 * the comment in scrub_missing_raid56_pages() for details.
+		 */
+		scrub_missing_raid56_pages(sblock);
+	} else {
+		for (index = 0; index < sblock->page_count; index++) {
+			struct scrub_page *spage = sblock->pagev[index];
+			int ret;
 
-		ret = scrub_add_page_to_rd_bio(sctx, spage);
-		if (ret) {
-			scrub_block_put(sblock);
-			return ret;
+			ret = scrub_add_page_to_rd_bio(sctx, spage);
+			if (ret) {
+				scrub_block_put(sblock);
+				return ret;
+			}
 		}
-	}
 
-	if (force)
-		scrub_submit(sctx);
+		if (force)
+			scrub_submit(sctx);
+	}
 
 	/* last one frees, either here or in bio completion for last page */
 	scrub_block_put(sblock);
@@ -2551,6 +2683,11 @@
 	u8 csum[BTRFS_CSUM_SIZE];
 	u32 blocksize;
 
+	if (dev->missing) {
+		scrub_parity_mark_sectors_error(sparity, logical, len);
+		return 0;
+	}
+
 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
 		blocksize = sctx->sectorsize;
 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
@@ -2689,7 +2826,7 @@
 			   sparity->nsectors))
 		goto out;
 
-	length = sparity->logic_end - sparity->logic_start + 1;
+	length = sparity->logic_end - sparity->logic_start;
 	ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
 			       sparity->logic_start,
 			       &length, &bbio, 0, 1);
@@ -2712,8 +2849,7 @@
 		goto rbio_out;
 
 	list_for_each_entry(spage, &sparity->spages, list)
-		raid56_parity_add_scrub_pages(rbio, spage->page,
-					      spage->logical);
+		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
 
 	scrub_pending_bio_inc(sctx);
 	raid56_parity_submit_scrub_rbio(rbio);
@@ -2761,6 +2897,7 @@
 	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_root *csum_root = fs_info->csum_root;
 	struct btrfs_extent_item *extent;
+	struct btrfs_bio *bbio = NULL;
 	u64 flags;
 	int ret;
 	int slot;
@@ -2770,6 +2907,7 @@
 	u64 extent_logical;
 	u64 extent_physical;
 	u64 extent_len;
+	u64 mapped_length;
 	struct btrfs_device *extent_dev;
 	struct scrub_parity *sparity;
 	int nsectors;
@@ -2843,6 +2981,10 @@
 			}
 			btrfs_item_key_to_cpu(l, &key, slot);
 
+			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+			    key.type != BTRFS_METADATA_ITEM_KEY)
+				goto next;
+
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
 				bytes = root->nodesize;
 			else
@@ -2851,11 +2993,7 @@
 			if (key.objectid + bytes <= logic_start)
 				goto next;
 
-			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
-			    key.type != BTRFS_METADATA_ITEM_KEY)
-				goto next;
-
-			if (key.objectid > logic_end) {
+			if (key.objectid >= logic_end) {
 				stop_loop = 1;
 				break;
 			}
@@ -2868,11 +3006,12 @@
 			flags = btrfs_extent_flags(l, extent);
 			generation = btrfs_extent_generation(l, extent);
 
-			if (key.objectid < logic_start &&
-			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
-				btrfs_err(fs_info,
-					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
-					   key.objectid, logic_start);
+			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
+			    (key.objectid < logic_start ||
+			     key.objectid + bytes >
+			     logic_start + map->stripe_len)) {
+				btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
+					  key.objectid, logic_start);
 				goto next;
 			}
 again:
@@ -2892,10 +3031,21 @@
 			scrub_parity_mark_sectors_data(sparity, extent_logical,
 						       extent_len);
 
-			scrub_remap_extent(fs_info, extent_logical,
-					   extent_len, &extent_physical,
-					   &extent_dev,
-					   &extent_mirror_num);
+			mapped_length = extent_len;
+			ret = btrfs_map_block(fs_info, READ, extent_logical,
+					      &mapped_length, &bbio, 0);
+			if (!ret) {
+				if (!bbio || mapped_length < extent_len)
+					ret = -EIO;
+			}
+			if (ret) {
+				btrfs_put_bbio(bbio);
+				goto out;
+			}
+			extent_physical = bbio->stripes[0].physical;
+			extent_mirror_num = bbio->mirror_num;
+			extent_dev = bbio->stripes[0].dev;
+			btrfs_put_bbio(bbio);
 
 			ret = btrfs_lookup_csums_range(csum_root,
 						extent_logical,
@@ -2910,10 +3060,12 @@
 						      extent_dev, flags,
 						      generation,
 						      extent_mirror_num);
+
+			scrub_free_csums(sctx);
+
 			if (ret)
 				goto out;
 
-			scrub_free_csums(sctx);
 			if (extent_logical + extent_len <
 			    key.objectid + bytes) {
 				logic_start += map->stripe_len;
@@ -2942,7 +3094,7 @@
 out:
 	if (ret < 0)
 		scrub_parity_mark_sectors_error(sparity, logic_start,
-						logic_end - logic_start + 1);
+						logic_end - logic_start);
 	scrub_parity_put(sparity);
 	scrub_submit(sctx);
 	mutex_lock(&sctx->wr_ctx.wr_lock);
@@ -3091,22 +3243,6 @@
 	 */
 	ret = 0;
 	while (physical < physical_end) {
-		/* for raid56, we skip parity stripe */
-		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
-			ret = get_raid56_logic_offset(physical, num,
-					map, &logical, &stripe_logical);
-			logical += base;
-			if (ret) {
-				stripe_logical += base;
-				stripe_end = stripe_logical + increment - 1;
-				ret = scrub_raid56_parity(sctx, map, scrub_dev,
-						ppath, stripe_logical,
-						stripe_end);
-				if (ret)
-					goto out;
-				goto skip;
-			}
-		}
 		/*
 		 * canceled?
 		 */
@@ -3131,6 +3267,24 @@
 			scrub_blocked_if_needed(fs_info);
 		}
 
+		/* for raid56, we skip parity stripe */
+		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+			ret = get_raid56_logic_offset(physical, num, map,
+						      &logical,
+						      &stripe_logical);
+			logical += base;
+			if (ret) {
+				stripe_logical += base;
+				stripe_end = stripe_logical + increment;
+				ret = scrub_raid56_parity(sctx, map, scrub_dev,
+							  ppath, stripe_logical,
+							  stripe_end);
+				if (ret)
+					goto out;
+				goto skip;
+			}
+		}
+
 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
 			key.type = BTRFS_METADATA_ITEM_KEY;
 		else
@@ -3175,6 +3329,10 @@
 			}
 			btrfs_item_key_to_cpu(l, &key, slot);
 
+			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+			    key.type != BTRFS_METADATA_ITEM_KEY)
+				goto next;
+
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
 				bytes = root->nodesize;
 			else
@@ -3183,10 +3341,6 @@
 			if (key.objectid + bytes <= logical)
 				goto next;
 
-			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
-			    key.type != BTRFS_METADATA_ITEM_KEY)
-				goto next;
-
 			if (key.objectid >= logical + map->stripe_len) {
 				/* out of this device extent */
 				if (key.objectid >= logic_end)
@@ -3199,8 +3353,10 @@
 			flags = btrfs_extent_flags(l, extent);
 			generation = btrfs_extent_generation(l, extent);
 
-			if (key.objectid < logical &&
-			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
+			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
+			    (key.objectid < logical ||
+			     key.objectid + bytes >
+			     logical + map->stripe_len)) {
 				btrfs_err(fs_info,
 					   "scrub: tree block %llu spanning "
 					   "stripes, ignored. logical=%llu",
@@ -3234,9 +3390,11 @@
 						   &extent_dev,
 						   &extent_mirror_num);
 
-			ret = btrfs_lookup_csums_range(csum_root, logical,
-						logical + map->stripe_len - 1,
-						&sctx->csum_list, 1);
+			ret = btrfs_lookup_csums_range(csum_root,
+						       extent_logical,
+						       extent_logical +
+						       extent_len - 1,
+						       &sctx->csum_list, 1);
 			if (ret)
 				goto out;
 
@@ -3244,10 +3402,12 @@
 					   extent_physical, extent_dev, flags,
 					   generation, extent_mirror_num,
 					   extent_logical - logical + physical);
+
+			scrub_free_csums(sctx);
+
 			if (ret)
 				goto out;
 
-			scrub_free_csums(sctx);
 			if (extent_logical + extent_len <
 			    key.objectid + bytes) {
 				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
@@ -3265,7 +3425,7 @@
 					if (ret && physical < physical_end) {
 						stripe_logical += base;
 						stripe_end = stripe_logical +
-								increment - 1;
+								increment;
 						ret = scrub_raid56_parity(sctx,
 							map, scrub_dev, ppath,
 							stripe_logical,
@@ -3374,7 +3534,7 @@
 	u64 chunk_tree;
 	u64 chunk_objectid;
 	u64 chunk_offset;
-	int ret;
+	int ret = 0;
 	int slot;
 	struct extent_buffer *l;
 	struct btrfs_key key;
@@ -3402,8 +3562,14 @@
 			if (path->slots[0] >=
 			    btrfs_header_nritems(path->nodes[0])) {
 				ret = btrfs_next_leaf(root, path);
-				if (ret)
+				if (ret < 0)
 					break;
+				if (ret > 0) {
+					ret = 0;
+					break;
+				}
+			} else {
+				ret = 0;
 			}
 		}
 
@@ -3445,6 +3611,22 @@
 		if (!cache)
 			goto skip;
 
+		/*
+		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
+		 * to avoid deadlock caused by:
+		 * btrfs_inc_block_group_ro()
+		 * -> btrfs_wait_for_commit()
+		 * -> btrfs_commit_transaction()
+		 * -> btrfs_scrub_pause()
+		 */
+		scrub_pause_on(fs_info);
+		ret = btrfs_inc_block_group_ro(root, cache);
+		scrub_pause_off(fs_info);
+		if (ret) {
+			btrfs_put_block_group(cache);
+			break;
+		}
+
 		dev_replace->cursor_right = found_key.offset + length;
 		dev_replace->cursor_left = found_key.offset;
 		dev_replace->item_needs_writeback = 1;
@@ -3470,8 +3652,8 @@
 
 		wait_event(sctx->list_wait,
 			   atomic_read(&sctx->bios_in_flight) == 0);
-		atomic_inc(&fs_info->scrubs_paused);
-		wake_up(&fs_info->scrub_pause_wait);
+
+		scrub_pause_on(fs_info);
 
 		/*
 		 * must be called before we decrease @scrub_paused.
@@ -3482,11 +3664,9 @@
 			   atomic_read(&sctx->workers_pending) == 0);
 		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
 
-		mutex_lock(&fs_info->scrub_lock);
-		__scrub_blocked_if_needed(fs_info);
-		atomic_dec(&fs_info->scrubs_paused);
-		mutex_unlock(&fs_info->scrub_lock);
-		wake_up(&fs_info->scrub_pause_wait);
+		scrub_pause_off(fs_info);
+
+		btrfs_dec_block_group_ro(root, cache);
 
 		btrfs_put_block_group(cache);
 		if (ret)
@@ -3510,11 +3690,7 @@
 
 	btrfs_free_path(path);
 
-	/*
-	 * ret can still be 1 from search_slot or next_leaf,
-	 * that's not an error
-	 */
-	return ret < 0 ? ret : 0;
+	return ret;
 }
 
 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6bad633..2b07b35 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -69,7 +69,7 @@
 
 static int btrfs_remount(struct super_block *sb, int *flags, char *data);
 
-static const char *btrfs_decode_error(int errno)
+const char *btrfs_decode_error(int errno)
 {
 	char *errstr = "unknown";
 
@@ -1033,6 +1033,7 @@
 	sb->s_flags |= MS_POSIXACL;
 #endif
 	sb->s_flags |= MS_I_VERSION;
+	sb->s_iflags |= SB_I_CGROUPWB;
 	err = open_ctree(sb, fs_devices, (char *)data);
 	if (err) {
 		printk(KERN_ERR "BTRFS: open_ctree failed\n");
@@ -1650,6 +1651,17 @@
 
 		sb->s_flags |= MS_RDONLY;
 
+		/*
+		 * Setting MS_RDONLY will put the cleaner thread to
+		 * sleep at the next loop if it's already active.
+		 * If it's already asleep, we'll leave unused block
+		 * groups on disk until we're mounted read-write again
+		 * unless we clean them up here.
+		 */
+		mutex_lock(&root->fs_info->cleaner_mutex);
+		btrfs_delete_unused_bgs(fs_info);
+		mutex_unlock(&root->fs_info->cleaner_mutex);
+
 		btrfs_dev_replace_suspend_for_unmount(fs_info);
 		btrfs_scrub_cancel(fs_info);
 		btrfs_pause_balance(fs_info);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f5021fc..8f259b3 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -258,6 +258,8 @@
 	mutex_init(&cur_trans->cache_write_mutex);
 	cur_trans->num_dirty_bgs = 0;
 	spin_lock_init(&cur_trans->dirty_bgs_lock);
+	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
+	spin_lock_init(&cur_trans->deleted_bgs_lock);
 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 	extent_io_tree_init(&cur_trans->dirty_pages,
 			     fs_info->btree_inode->i_mapping);
@@ -1301,7 +1303,7 @@
 	 */
 	btrfs_set_skip_qgroup(trans, objectid);
 
-	btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
+	btrfs_reloc_pre_snapshot(pending, &to_reserve);
 
 	if (to_reserve > 0) {
 		pending->error = btrfs_block_rsv_add(root,
@@ -1638,9 +1640,7 @@
 	 * Tell lockdep about it.
 	 */
 	if (ac->newtrans->type & __TRANS_FREEZABLE)
-		rwsem_acquire_read(
-		     &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
-		     0, 1, _THIS_IP_);
+		__sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
 
 	current->journal_info = ac->newtrans;
 
@@ -1679,9 +1679,7 @@
 	 * async commit thread will be the one to unlock it.
 	 */
 	if (ac->newtrans->type & __TRANS_FREEZABLE)
-		rwsem_release(
-			&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
-			1, _THIS_IP_);
+		__sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
 
 	schedule_work(&ac->work);
 
@@ -1893,8 +1891,11 @@
 			spin_unlock(&root->fs_info->trans_lock);
 
 			wait_for_commit(root, prev_trans);
+			ret = prev_trans->aborted;
 
 			btrfs_put_transaction(prev_trans);
+			if (ret)
+				goto cleanup_transaction;
 		} else {
 			spin_unlock(&root->fs_info->trans_lock);
 		}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index eb09c20..edc2fbc 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -74,6 +74,8 @@
 	 */
 	struct mutex cache_write_mutex;
 	spinlock_t dirty_bgs_lock;
+	struct list_head deleted_bgs;
+	spinlock_t deleted_bgs_lock;
 	struct btrfs_delayed_ref_root delayed_refs;
 	int aborted;
 	int dirty_bg_run;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9c45431..1bbaace 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -140,55 +140,46 @@
 			   struct btrfs_root *root,
 			   struct btrfs_log_ctx *ctx)
 {
-	int index;
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&root->log_mutex);
+
 	if (root->log_root) {
 		if (btrfs_need_log_full_commit(root->fs_info, trans)) {
 			ret = -EAGAIN;
 			goto out;
 		}
+
 		if (!root->log_start_pid) {
-			root->log_start_pid = current->pid;
 			clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
+			root->log_start_pid = current->pid;
 		} else if (root->log_start_pid != current->pid) {
 			set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
 		}
+	} else {
+		mutex_lock(&root->fs_info->tree_log_mutex);
+		if (!root->fs_info->log_root_tree)
+			ret = btrfs_init_log_root_tree(trans, root->fs_info);
+		mutex_unlock(&root->fs_info->tree_log_mutex);
+		if (ret)
+			goto out;
 
-		atomic_inc(&root->log_batch);
-		atomic_inc(&root->log_writers);
-		if (ctx) {
-			index = root->log_transid % 2;
-			list_add_tail(&ctx->list, &root->log_ctxs[index]);
-			ctx->log_transid = root->log_transid;
-		}
-		mutex_unlock(&root->log_mutex);
-		return 0;
-	}
-
-	ret = 0;
-	mutex_lock(&root->fs_info->tree_log_mutex);
-	if (!root->fs_info->log_root_tree)
-		ret = btrfs_init_log_root_tree(trans, root->fs_info);
-	mutex_unlock(&root->fs_info->tree_log_mutex);
-	if (ret)
-		goto out;
-
-	if (!root->log_root) {
 		ret = btrfs_add_log_tree(trans, root);
 		if (ret)
 			goto out;
+
+		clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
+		root->log_start_pid = current->pid;
 	}
-	clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
-	root->log_start_pid = current->pid;
+
 	atomic_inc(&root->log_batch);
 	atomic_inc(&root->log_writers);
 	if (ctx) {
-		index = root->log_transid % 2;
+		int index = root->log_transid % 2;
 		list_add_tail(&ctx->list, &root->log_ctxs[index]);
 		ctx->log_transid = root->log_transid;
 	}
+
 out:
 	mutex_unlock(&root->log_mutex);
 	return ret;
@@ -731,12 +722,66 @@
 						&ordered_sums, 0);
 			if (ret)
 				goto out;
+			/*
+			 * Now delete all existing cums in the csum root that
+			 * cover our range. We do this because we can have an
+			 * extent that is completely referenced by one file
+			 * extent item and partially referenced by another
+			 * file extent item (like after using the clone or
+			 * extent_same ioctls). In this case if we end up doing
+			 * the replay of the one that partially references the
+			 * extent first, and we do not do the csum deletion
+			 * below, we can get 2 csum items in the csum tree that
+			 * overlap each other. For example, imagine our log has
+			 * the two following file extent items:
+			 *
+			 * key (257 EXTENT_DATA 409600)
+			 *     extent data disk byte 12845056 nr 102400
+			 *     extent data offset 20480 nr 20480 ram 102400
+			 *
+			 * key (257 EXTENT_DATA 819200)
+			 *     extent data disk byte 12845056 nr 102400
+			 *     extent data offset 0 nr 102400 ram 102400
+			 *
+			 * Where the second one fully references the 100K extent
+			 * that starts at disk byte 12845056, and the log tree
+			 * has a single csum item that covers the entire range
+			 * of the extent:
+			 *
+			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+			 *
+			 * After the first file extent item is replayed, the
+			 * csum tree gets the following csum item:
+			 *
+			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+			 *
+			 * Which covers the 20K sub-range starting at offset 20K
+			 * of our extent. Now when we replay the second file
+			 * extent item, if we do not delete existing csum items
+			 * that cover any of its blocks, we end up getting two
+			 * csum items in our csum tree that overlap each other:
+			 *
+			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+			 *
+			 * Which is a problem, because after this anyone trying
+			 * to lookup up for the checksum of any block of our
+			 * extent starting at an offset of 40K or higher, will
+			 * end up looking at the second csum item only, which
+			 * does not contain the checksum for any block starting
+			 * at offset 40K or higher of our extent.
+			 */
 			while (!list_empty(&ordered_sums)) {
 				struct btrfs_ordered_sum *sums;
 				sums = list_entry(ordered_sums.next,
 						struct btrfs_ordered_sum,
 						list);
 				if (!ret)
+					ret = btrfs_del_csums(trans,
+						      root->fs_info->csum_root,
+						      sums->bytenr,
+						      sums->len);
+				if (!ret)
 					ret = btrfs_csum_file_blocks(trans,
 						root->fs_info->csum_root,
 						sums);
@@ -1549,9 +1594,8 @@
  */
 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
 				    struct btrfs_root *root,
-				    struct btrfs_path *path,
 				    u64 dirid, u64 index,
-				    char *name, int name_len, u8 type,
+				    char *name, int name_len,
 				    struct btrfs_key *location)
 {
 	struct inode *inode;
@@ -1613,6 +1657,9 @@
  * not exist in the FS, it is skipped.  fsyncs on directories
  * do not force down inodes inside that directory, just changes to the
  * names or unlinks in a directory.
+ *
+ * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
+ * non-existing inode) and 1 if the name was replayed.
  */
 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
 				    struct btrfs_root *root,
@@ -1631,6 +1678,7 @@
 	int exists;
 	int ret = 0;
 	bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
+	bool name_added = false;
 
 	dir = read_one_inode(root, key->objectid);
 	if (!dir)
@@ -1708,6 +1756,8 @@
 	}
 	kfree(name);
 	iput(dir);
+	if (!ret && name_added)
+		ret = 1;
 	return ret;
 
 insert:
@@ -1719,10 +1769,12 @@
 		goto out;
 	}
 	btrfs_release_path(path);
-	ret = insert_one_name(trans, root, path, key->objectid, key->offset,
-			      name, name_len, log_type, &log_key);
+	ret = insert_one_name(trans, root, key->objectid, key->offset,
+			      name, name_len, &log_key);
 	if (ret && ret != -ENOENT && ret != -EEXIST)
 		goto out;
+	if (!ret)
+		name_added = true;
 	update_size = false;
 	ret = 0;
 	goto out;
@@ -1740,12 +1792,13 @@
 					struct extent_buffer *eb, int slot,
 					struct btrfs_key *key)
 {
-	int ret;
+	int ret = 0;
 	u32 item_size = btrfs_item_size_nr(eb, slot);
 	struct btrfs_dir_item *di;
 	int name_len;
 	unsigned long ptr;
 	unsigned long ptr_end;
+	struct btrfs_path *fixup_path = NULL;
 
 	ptr = btrfs_item_ptr_offset(eb, slot);
 	ptr_end = ptr + item_size;
@@ -1755,12 +1808,59 @@
 			return -EIO;
 		name_len = btrfs_dir_name_len(eb, di);
 		ret = replay_one_name(trans, root, path, eb, di, key);
-		if (ret)
-			return ret;
+		if (ret < 0)
+			break;
 		ptr = (unsigned long)(di + 1);
 		ptr += name_len;
+
+		/*
+		 * If this entry refers to a non-directory (directories can not
+		 * have a link count > 1) and it was added in the transaction
+		 * that was not committed, make sure we fixup the link count of
+		 * the inode it the entry points to. Otherwise something like
+		 * the following would result in a directory pointing to an
+		 * inode with a wrong link that does not account for this dir
+		 * entry:
+		 *
+		 * mkdir testdir
+		 * touch testdir/foo
+		 * touch testdir/bar
+		 * sync
+		 *
+		 * ln testdir/bar testdir/bar_link
+		 * ln testdir/foo testdir/foo_link
+		 * xfs_io -c "fsync" testdir/bar
+		 *
+		 * <power failure>
+		 *
+		 * mount fs, log replay happens
+		 *
+		 * File foo would remain with a link count of 1 when it has two
+		 * entries pointing to it in the directory testdir. This would
+		 * make it impossible to ever delete the parent directory has
+		 * it would result in stale dentries that can never be deleted.
+		 */
+		if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
+			struct btrfs_key di_key;
+
+			if (!fixup_path) {
+				fixup_path = btrfs_alloc_path();
+				if (!fixup_path) {
+					ret = -ENOMEM;
+					break;
+				}
+			}
+
+			btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+			ret = link_to_fixup_dir(trans, root, fixup_path,
+						di_key.objectid);
+			if (ret)
+				break;
+		}
+		ret = 0;
 	}
-	return 0;
+	btrfs_free_path(fixup_path);
+	return ret;
 }
 
 /*
@@ -2535,8 +2635,7 @@
 	return ret;
 }
 
-static void wait_log_commit(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, int transid)
+static void wait_log_commit(struct btrfs_root *root, int transid)
 {
 	DEFINE_WAIT(wait);
 	int index = transid % 2;
@@ -2561,8 +2660,7 @@
 		 atomic_read(&root->log_commit[index]));
 }
 
-static void wait_for_writer(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root)
+static void wait_for_writer(struct btrfs_root *root)
 {
 	DEFINE_WAIT(wait);
 
@@ -2642,7 +2740,7 @@
 
 	index1 = log_transid % 2;
 	if (atomic_read(&root->log_commit[index1])) {
-		wait_log_commit(trans, root, log_transid);
+		wait_log_commit(root, log_transid);
 		mutex_unlock(&root->log_mutex);
 		return ctx->log_ret;
 	}
@@ -2651,7 +2749,7 @@
 
 	/* wait for previous tree log sync to complete */
 	if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
-		wait_log_commit(trans, root, log_transid - 1);
+		wait_log_commit(root, log_transid - 1);
 
 	while (1) {
 		int batch = atomic_read(&root->log_batch);
@@ -2662,7 +2760,7 @@
 			schedule_timeout_uninterruptible(1);
 			mutex_lock(&root->log_mutex);
 		}
-		wait_for_writer(trans, root);
+		wait_for_writer(root);
 		if (batch == atomic_read(&root->log_batch))
 			break;
 	}
@@ -2759,7 +2857,7 @@
 		ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
 						mark);
 		btrfs_wait_logged_extents(trans, log, log_transid);
-		wait_log_commit(trans, log_root_tree,
+		wait_log_commit(log_root_tree,
 				root_log_ctx.log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
 		if (!ret)
@@ -2770,11 +2868,11 @@
 	atomic_set(&log_root_tree->log_commit[index2], 1);
 
 	if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
-		wait_log_commit(trans, log_root_tree,
+		wait_log_commit(log_root_tree,
 				root_log_ctx.log_transid - 1);
 	}
 
-	wait_for_writer(trans, log_root_tree);
+	wait_for_writer(log_root_tree);
 
 	/*
 	 * now that we've moved on to the tree of log tree roots,
@@ -4904,6 +5002,94 @@
 	return ret;
 }
 
+static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+				 struct inode *inode,
+				 struct btrfs_log_ctx *ctx)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	const u64 ino = btrfs_ino(inode);
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	path->skip_locking = 1;
+	path->search_commit_root = 1;
+
+	key.objectid = ino;
+	key.type = BTRFS_INODE_REF_KEY;
+	key.offset = 0;
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+
+	while (true) {
+		struct extent_buffer *leaf = path->nodes[0];
+		int slot = path->slots[0];
+		u32 cur_offset = 0;
+		u32 item_size;
+		unsigned long ptr;
+
+		if (slot >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto out;
+			else if (ret > 0)
+				break;
+			continue;
+		}
+
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		/* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
+		if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
+			break;
+
+		item_size = btrfs_item_size_nr(leaf, slot);
+		ptr = btrfs_item_ptr_offset(leaf, slot);
+		while (cur_offset < item_size) {
+			struct btrfs_key inode_key;
+			struct inode *dir_inode;
+
+			inode_key.type = BTRFS_INODE_ITEM_KEY;
+			inode_key.offset = 0;
+
+			if (key.type == BTRFS_INODE_EXTREF_KEY) {
+				struct btrfs_inode_extref *extref;
+
+				extref = (struct btrfs_inode_extref *)
+					(ptr + cur_offset);
+				inode_key.objectid = btrfs_inode_extref_parent(
+					leaf, extref);
+				cur_offset += sizeof(*extref);
+				cur_offset += btrfs_inode_extref_name_len(leaf,
+					extref);
+			} else {
+				inode_key.objectid = key.offset;
+				cur_offset = item_size;
+			}
+
+			dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
+					       root, NULL);
+			/* If parent inode was deleted, skip it. */
+			if (IS_ERR(dir_inode))
+				continue;
+
+			ret = btrfs_log_inode(trans, root, dir_inode,
+					      LOG_INODE_ALL, 0, LLONG_MAX, ctx);
+			iput(dir_inode);
+			if (ret)
+				goto out;
+		}
+		path->slots[0]++;
+	}
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
 /*
  * helper function around btrfs_log_inode to make sure newly created
  * parent directories also end up in the log.  A minimal inode and backref
@@ -4923,9 +5109,6 @@
 	struct dentry *old_parent = NULL;
 	int ret = 0;
 	u64 last_committed = root->fs_info->last_trans_committed;
-	const struct dentry * const first_parent = parent;
-	const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans >
-				 last_committed);
 	bool log_dentries = false;
 	struct inode *orig_inode = inode;
 
@@ -4986,6 +5169,53 @@
 	if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
 		log_dentries = true;
 
+	/*
+	 * On unlink we must make sure all our current and old parent directores
+	 * inodes are fully logged. This is to prevent leaving dangling
+	 * directory index entries in directories that were our parents but are
+	 * not anymore. Not doing this results in old parent directory being
+	 * impossible to delete after log replay (rmdir will always fail with
+	 * error -ENOTEMPTY).
+	 *
+	 * Example 1:
+	 *
+	 * mkdir testdir
+	 * touch testdir/foo
+	 * ln testdir/foo testdir/bar
+	 * sync
+	 * unlink testdir/bar
+	 * xfs_io -c fsync testdir/foo
+	 * <power failure>
+	 * mount fs, triggers log replay
+	 *
+	 * If we don't log the parent directory (testdir), after log replay the
+	 * directory still has an entry pointing to the file inode using the bar
+	 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
+	 * the file inode has a link count of 1.
+	 *
+	 * Example 2:
+	 *
+	 * mkdir testdir
+	 * touch foo
+	 * ln foo testdir/foo2
+	 * ln foo testdir/foo3
+	 * sync
+	 * unlink testdir/foo3
+	 * xfs_io -c fsync foo
+	 * <power failure>
+	 * mount fs, triggers log replay
+	 *
+	 * Similar as the first example, after log replay the parent directory
+	 * testdir still has an entry pointing to the inode file with name foo3
+	 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
+	 * and has a link count of 2.
+	 */
+	if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
+		ret = btrfs_log_all_parents(trans, orig_inode, ctx);
+		if (ret)
+			goto end_trans;
+	}
+
 	while (1) {
 		if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
 			break;
@@ -4994,23 +5224,9 @@
 		if (root != BTRFS_I(inode)->root)
 			break;
 
-		/*
-		 * On unlink we must make sure our immediate parent directory
-		 * inode is fully logged. This is to prevent leaving dangling
-		 * directory index entries and a wrong directory inode's i_size.
-		 * Not doing so can result in a directory being impossible to
-		 * delete after log replay (rmdir will always fail with error
-		 * -ENOTEMPTY).
-		 */
-		if (did_unlink && parent == first_parent)
-			inode_only = LOG_INODE_ALL;
-		else
-			inode_only = LOG_INODE_EXISTS;
-
-		if (BTRFS_I(inode)->generation >
-		    root->fs_info->last_trans_committed ||
-		    inode_only == LOG_INODE_ALL) {
-			ret = btrfs_log_inode(trans, root, inode, inode_only,
+		if (BTRFS_I(inode)->generation > last_committed) {
+			ret = btrfs_log_inode(trans, root, inode,
+					      LOG_INODE_EXISTS,
 					      0, LLONG_MAX, ctx);
 			if (ret)
 				goto end_trans;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 762476f..76201d6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1116,15 +1116,18 @@
 	return ret;
 }
 
-static int contains_pending_extent(struct btrfs_trans_handle *trans,
+static int contains_pending_extent(struct btrfs_transaction *transaction,
 				   struct btrfs_device *device,
 				   u64 *start, u64 len)
 {
+	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
 	struct extent_map *em;
-	struct list_head *search_list = &trans->transaction->pending_chunks;
+	struct list_head *search_list = &fs_info->pinned_chunks;
 	int ret = 0;
 	u64 physical_start = *start;
 
+	if (transaction)
+		search_list = &transaction->pending_chunks;
 again:
 	list_for_each_entry(em, search_list, list) {
 		struct map_lookup *map;
@@ -1159,8 +1162,8 @@
 			}
 		}
 	}
-	if (search_list == &trans->transaction->pending_chunks) {
-		search_list = &trans->root->fs_info->pinned_chunks;
+	if (search_list != &fs_info->pinned_chunks) {
+		search_list = &fs_info->pinned_chunks;
 		goto again;
 	}
 
@@ -1169,12 +1172,13 @@
 
 
 /*
- * find_free_dev_extent - find free space in the specified device
- * @device:	the device which we search the free space in
- * @num_bytes:	the size of the free space that we need
- * @start:	store the start of the free space.
- * @len:	the size of the free space. that we find, or the size of the max
- * 		free space if we don't find suitable free space
+ * find_free_dev_extent_start - find free space in the specified device
+ * @device:	  the device which we search the free space in
+ * @num_bytes:	  the size of the free space that we need
+ * @search_start: the position from which to begin the search
+ * @start:	  store the start of the free space.
+ * @len:	  the size of the free space. that we find, or the size
+ *		  of the max free space if we don't find suitable free space
  *
  * this uses a pretty simple search, the expectation is that it is
  * called very infrequently and that a given device has a small number
@@ -1188,9 +1192,9 @@
  * But if we don't find suitable free space, it is used to store the size of
  * the max free space.
  */
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
-			 struct btrfs_device *device, u64 num_bytes,
-			 u64 *start, u64 *len)
+int find_free_dev_extent_start(struct btrfs_transaction *transaction,
+			       struct btrfs_device *device, u64 num_bytes,
+			       u64 search_start, u64 *start, u64 *len)
 {
 	struct btrfs_key key;
 	struct btrfs_root *root = device->dev_root;
@@ -1200,19 +1204,11 @@
 	u64 max_hole_start;
 	u64 max_hole_size;
 	u64 extent_end;
-	u64 search_start;
 	u64 search_end = device->total_bytes;
 	int ret;
 	int slot;
 	struct extent_buffer *l;
 
-	/* FIXME use last free of some kind */
-
-	/* we don't want to overwrite the superblock on the drive,
-	 * so we make sure to start at an offset of at least 1MB
-	 */
-	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -1273,7 +1269,7 @@
 			 * Have to check before we set max_hole_start, otherwise
 			 * we could end up sending back this offset anyway.
 			 */
-			if (contains_pending_extent(trans, device,
+			if (contains_pending_extent(transaction, device,
 						    &search_start,
 						    hole_size)) {
 				if (key.offset >= search_start) {
@@ -1322,7 +1318,7 @@
 	if (search_end > search_start) {
 		hole_size = search_end - search_start;
 
-		if (contains_pending_extent(trans, device, &search_start,
+		if (contains_pending_extent(transaction, device, &search_start,
 					    hole_size)) {
 			btrfs_release_path(path);
 			goto again;
@@ -1348,6 +1344,24 @@
 	return ret;
 }
 
+int find_free_dev_extent(struct btrfs_trans_handle *trans,
+			 struct btrfs_device *device, u64 num_bytes,
+			 u64 *start, u64 *len)
+{
+	struct btrfs_root *root = device->dev_root;
+	u64 search_start;
+
+	/* FIXME use last free of some kind */
+
+	/*
+	 * we don't want to overwrite the superblock on the drive,
+	 * so we make sure to start at an offset of at least 1MB
+	 */
+	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
+	return find_free_dev_extent_start(trans->transaction, device,
+					  num_bytes, search_start, start, len);
+}
+
 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
 			  struct btrfs_device *device,
 			  u64 start, u64 *dev_extent_len)
@@ -2755,9 +2769,7 @@
 	return ret;
 }
 
-static int btrfs_relocate_chunk(struct btrfs_root *root,
-				u64 chunk_objectid,
-				u64 chunk_offset)
+static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
 {
 	struct btrfs_root *extent_root;
 	struct btrfs_trans_handle *trans;
@@ -2785,7 +2797,9 @@
 		return -ENOSPC;
 
 	/* step one, relocate all the extents inside this chunk */
+	btrfs_scrub_pause(root);
 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
+	btrfs_scrub_continue(root);
 	if (ret)
 		return ret;
 
@@ -2855,7 +2869,6 @@
 
 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
 			ret = btrfs_relocate_chunk(chunk_root,
-						   found_key.objectid,
 						   found_key.offset);
 			if (ret == -ENOSPC)
 				failed++;
@@ -3375,7 +3388,6 @@
 		}
 
 		ret = btrfs_relocate_chunk(chunk_root,
-					   found_key.objectid,
 					   found_key.offset);
 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 		if (ret && ret != -ENOSPC)
@@ -4077,7 +4089,6 @@
 	struct btrfs_dev_extent *dev_extent = NULL;
 	struct btrfs_path *path;
 	u64 length;
-	u64 chunk_objectid;
 	u64 chunk_offset;
 	int ret;
 	int slot;
@@ -4154,11 +4165,10 @@
 			break;
 		}
 
-		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
 		btrfs_release_path(path);
 
-		ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
+		ret = btrfs_relocate_chunk(root, chunk_offset);
 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
 		if (ret && ret != -ENOSPC)
 			goto done;
@@ -4200,7 +4210,8 @@
 		u64 start = new_size;
 		u64 len = old_size - new_size;
 
-		if (contains_pending_extent(trans, device, &start, len)) {
+		if (contains_pending_extent(trans->transaction, device,
+					    &start, len)) {
 			unlock_chunks(root);
 			checked_pending_chunks = true;
 			failed = 0;
@@ -5071,9 +5082,7 @@
 		 * and the stripes
 		 */
 		sizeof(u64) * (total_stripes),
-		GFP_NOFS);
-	if (!bbio)
-		return NULL;
+		GFP_NOFS|__GFP_NOFAIL);
 
 	atomic_set(&bbio->error, 0);
 	atomic_set(&bbio->refs, 1);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 95842a9..2ca784a 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -453,6 +453,9 @@
 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
+int find_free_dev_extent_start(struct btrfs_transaction *transaction,
+			 struct btrfs_device *device, u64 num_bytes,
+			 u64 search_start, u64 *start, u64 *max_avail);
 int find_free_dev_extent(struct btrfs_trans_handle *trans,
 			 struct btrfs_device *device, u64 num_bytes,
 			 u64 *start, u64 *max_avail);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index d1c833c..7b6bfcb 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -479,7 +479,7 @@
 	if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
 		seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
 	if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
-		seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
+		seq_show_option(m, "snapdirname", fsopt->snapdir_name);
 
 	return 0;
 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0a9fb6b..6a1119e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -394,17 +394,17 @@
 	struct sockaddr *srcaddr;
 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 
-	seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
+	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
 	cifs_show_security(s, tcon->ses);
 	cifs_show_cache_flavor(s, cifs_sb);
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
 		seq_puts(s, ",multiuser");
 	else if (tcon->ses->user_name)
-		seq_printf(s, ",username=%s", tcon->ses->user_name);
+		seq_show_option(s, "username", tcon->ses->user_name);
 
 	if (tcon->ses->domainName)
-		seq_printf(s, ",domain=%s", tcon->ses->domainName);
+		seq_show_option(s, "domain", tcon->ses->domainName);
 
 	if (srcaddr->sa_family != AF_UNSPEC) {
 		struct sockaddr_in *saddr4;
diff --git a/fs/dax.c b/fs/dax.c
index a7f77e1..e43389c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -23,6 +23,7 @@
 #include <linux/memcontrol.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
+#include <linux/pmem.h>
 #include <linux/sched.h>
 #include <linux/uio.h>
 #include <linux/vmstat.h>
@@ -34,7 +35,7 @@
 
 	might_sleep();
 	do {
-		void *addr;
+		void __pmem *addr;
 		unsigned long pfn;
 		long count;
 
@@ -46,10 +47,7 @@
 			unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
 			if (pgsz > count)
 				pgsz = count;
-			if (pgsz < PAGE_SIZE)
-				memset(addr, 0, pgsz);
-			else
-				clear_page(addr);
+			clear_pmem(addr, pgsz);
 			addr += pgsz;
 			size -= pgsz;
 			count -= pgsz;
@@ -59,26 +57,29 @@
 		}
 	} while (size);
 
+	wmb_pmem();
 	return 0;
 }
 EXPORT_SYMBOL_GPL(dax_clear_blocks);
 
-static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
+static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
+		unsigned blkbits)
 {
 	unsigned long pfn;
 	sector_t sector = bh->b_blocknr << (blkbits - 9);
 	return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
 }
 
-static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos,
-			loff_t end)
+/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
+static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
+		loff_t pos, loff_t end)
 {
 	loff_t final = end - pos + first; /* The final byte of the buffer */
 
 	if (first > 0)
-		memset(addr, 0, first);
+		clear_pmem(addr, first);
 	if (final < size)
-		memset(addr + final, 0, size - final);
+		clear_pmem(addr + final, size - final);
 }
 
 static bool buffer_written(struct buffer_head *bh)
@@ -106,14 +107,15 @@
 	loff_t pos = start;
 	loff_t max = start;
 	loff_t bh_max = start;
-	void *addr;
+	void __pmem *addr;
 	bool hole = false;
+	bool need_wmb = false;
 
 	if (iov_iter_rw(iter) != WRITE)
 		end = min(end, i_size_read(inode));
 
 	while (pos < end) {
-		unsigned len;
+		size_t len;
 		if (pos == max) {
 			unsigned blkbits = inode->i_blkbits;
 			sector_t block = pos >> blkbits;
@@ -145,19 +147,23 @@
 				retval = dax_get_addr(bh, &addr, blkbits);
 				if (retval < 0)
 					break;
-				if (buffer_unwritten(bh) || buffer_new(bh))
+				if (buffer_unwritten(bh) || buffer_new(bh)) {
 					dax_new_buf(addr, retval, first, pos,
 									end);
+					need_wmb = true;
+				}
 				addr += first;
 				size = retval - first;
 			}
 			max = min(pos + size, end);
 		}
 
-		if (iov_iter_rw(iter) == WRITE)
-			len = copy_from_iter_nocache(addr, max - pos, iter);
-		else if (!hole)
-			len = copy_to_iter(addr, max - pos, iter);
+		if (iov_iter_rw(iter) == WRITE) {
+			len = copy_from_iter_pmem(addr, max - pos, iter);
+			need_wmb = true;
+		} else if (!hole)
+			len = copy_to_iter((void __force *)addr, max - pos,
+					iter);
 		else
 			len = iov_iter_zero(max - pos, iter);
 
@@ -168,6 +174,9 @@
 		addr += len;
 	}
 
+	if (need_wmb)
+		wmb_pmem();
+
 	return (pos == start) ? retval : pos - start;
 }
 
@@ -260,11 +269,13 @@
 static int copy_user_bh(struct page *to, struct buffer_head *bh,
 			unsigned blkbits, unsigned long vaddr)
 {
-	void *vfrom, *vto;
+	void __pmem *vfrom;
+	void *vto;
+
 	if (dax_get_addr(bh, &vfrom, blkbits) < 0)
 		return -EIO;
 	vto = kmap_atomic(to);
-	copy_user_page(vto, vfrom, vaddr, to);
+	copy_user_page(vto, (void __force *)vfrom, vaddr, to);
 	kunmap_atomic(vto);
 	return 0;
 }
@@ -272,16 +283,13 @@
 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
 			struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	struct address_space *mapping = inode->i_mapping;
 	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
-	void *addr;
+	void __pmem *addr;
 	unsigned long pfn;
 	pgoff_t size;
 	int error;
 
-	i_mmap_lock_read(mapping);
-
 	/*
 	 * Check truncate didn't happen while we were allocating a block.
 	 * If it did, this block may or may not be still allocated to the
@@ -303,14 +311,14 @@
 		goto out;
 	}
 
-	if (buffer_unwritten(bh) || buffer_new(bh))
-		clear_page(addr);
+	if (buffer_unwritten(bh) || buffer_new(bh)) {
+		clear_pmem(addr, PAGE_SIZE);
+		wmb_pmem();
+	}
 
 	error = vm_insert_mixed(vma, vaddr, pfn);
 
  out:
-	i_mmap_unlock_read(mapping);
-
 	return error;
 }
 
@@ -372,15 +380,17 @@
 			 * from a read fault and we've raced with a truncate
 			 */
 			error = -EIO;
-			goto unlock_page;
+			goto unlock;
 		}
+	} else {
+		i_mmap_lock_write(mapping);
 	}
 
 	error = get_block(inode, block, &bh, 0);
 	if (!error && (bh.b_size < PAGE_SIZE))
 		error = -EIO;		/* fs corruption? */
 	if (error)
-		goto unlock_page;
+		goto unlock;
 
 	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
 		if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -391,8 +401,9 @@
 			if (!error && (bh.b_size < PAGE_SIZE))
 				error = -EIO;
 			if (error)
-				goto unlock_page;
+				goto unlock;
 		} else {
+			i_mmap_unlock_write(mapping);
 			return dax_load_hole(mapping, page, vmf);
 		}
 	}
@@ -404,17 +415,15 @@
 		else
 			clear_user_highpage(new_page, vaddr);
 		if (error)
-			goto unlock_page;
+			goto unlock;
 		vmf->page = page;
 		if (!page) {
-			i_mmap_lock_read(mapping);
 			/* Check we didn't race with truncate */
 			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
 								PAGE_SHIFT;
 			if (vmf->pgoff >= size) {
-				i_mmap_unlock_read(mapping);
 				error = -EIO;
-				goto out;
+				goto unlock;
 			}
 		}
 		return VM_FAULT_LOCKED;
@@ -450,6 +459,8 @@
 			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
 	}
 
+	if (!page)
+		i_mmap_unlock_write(mapping);
  out:
 	if (error == -ENOMEM)
 		return VM_FAULT_OOM | major;
@@ -458,11 +469,14 @@
 		return VM_FAULT_SIGBUS | major;
 	return VM_FAULT_NOPAGE | major;
 
- unlock_page:
+ unlock:
 	if (page) {
 		unlock_page(page);
 		page_cache_release(page);
+	} else {
+		i_mmap_unlock_write(mapping);
 	}
+
 	goto out;
 }
 EXPORT_SYMBOL(__dax_fault);
@@ -494,6 +508,176 @@
 }
 EXPORT_SYMBOL_GPL(dax_fault);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
+ * more often than one might expect in the below function.
+ */
+#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
+
+int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+		pmd_t *pmd, unsigned int flags, get_block_t get_block,
+		dax_iodone_t complete_unwritten)
+{
+	struct file *file = vma->vm_file;
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	struct buffer_head bh;
+	unsigned blkbits = inode->i_blkbits;
+	unsigned long pmd_addr = address & PMD_MASK;
+	bool write = flags & FAULT_FLAG_WRITE;
+	long length;
+	void *kaddr;
+	pgoff_t size, pgoff;
+	sector_t block, sector;
+	unsigned long pfn;
+	int result = 0;
+
+	/* Fall back to PTEs if we're going to COW */
+	if (write && !(vma->vm_flags & VM_SHARED))
+		return VM_FAULT_FALLBACK;
+	/* If the PMD would extend outside the VMA */
+	if (pmd_addr < vma->vm_start)
+		return VM_FAULT_FALLBACK;
+	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
+		return VM_FAULT_FALLBACK;
+
+	pgoff = linear_page_index(vma, pmd_addr);
+	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (pgoff >= size)
+		return VM_FAULT_SIGBUS;
+	/* If the PMD would cover blocks out of the file */
+	if ((pgoff | PG_PMD_COLOUR) >= size)
+		return VM_FAULT_FALLBACK;
+
+	memset(&bh, 0, sizeof(bh));
+	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
+
+	bh.b_size = PMD_SIZE;
+	i_mmap_lock_write(mapping);
+	length = get_block(inode, block, &bh, write);
+	if (length)
+		return VM_FAULT_SIGBUS;
+
+	/*
+	 * If the filesystem isn't willing to tell us the length of a hole,
+	 * just fall back to PTEs.  Calling get_block 512 times in a loop
+	 * would be silly.
+	 */
+	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
+		goto fallback;
+
+	if (buffer_unwritten(&bh) || buffer_new(&bh)) {
+		int i;
+		for (i = 0; i < PTRS_PER_PMD; i++)
+			clear_page(kaddr + i * PAGE_SIZE);
+		count_vm_event(PGMAJFAULT);
+		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+		result |= VM_FAULT_MAJOR;
+	}
+
+	/*
+	 * If we allocated new storage, make sure no process has any
+	 * zero pages covering this hole
+	 */
+	if (buffer_new(&bh)) {
+		i_mmap_unlock_write(mapping);
+		unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
+		i_mmap_lock_write(mapping);
+	}
+
+	/*
+	 * If a truncate happened while we were allocating blocks, we may
+	 * leave blocks allocated to the file that are beyond EOF.  We can't
+	 * take i_mutex here, so just leave them hanging; they'll be freed
+	 * when the file is deleted.
+	 */
+	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (pgoff >= size) {
+		result = VM_FAULT_SIGBUS;
+		goto out;
+	}
+	if ((pgoff | PG_PMD_COLOUR) >= size)
+		goto fallback;
+
+	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
+		spinlock_t *ptl;
+		pmd_t entry;
+		struct page *zero_page = get_huge_zero_page();
+
+		if (unlikely(!zero_page))
+			goto fallback;
+
+		ptl = pmd_lock(vma->vm_mm, pmd);
+		if (!pmd_none(*pmd)) {
+			spin_unlock(ptl);
+			goto fallback;
+		}
+
+		entry = mk_pmd(zero_page, vma->vm_page_prot);
+		entry = pmd_mkhuge(entry);
+		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
+		result = VM_FAULT_NOPAGE;
+		spin_unlock(ptl);
+	} else {
+		sector = bh.b_blocknr << (blkbits - 9);
+		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
+						bh.b_size);
+		if (length < 0) {
+			result = VM_FAULT_SIGBUS;
+			goto out;
+		}
+		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
+			goto fallback;
+
+		result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
+	}
+
+ out:
+	if (buffer_unwritten(&bh))
+		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
+
+	i_mmap_unlock_write(mapping);
+
+	return result;
+
+ fallback:
+	count_vm_event(THP_FAULT_FALLBACK);
+	result = VM_FAULT_FALLBACK;
+	goto out;
+}
+EXPORT_SYMBOL_GPL(__dax_pmd_fault);
+
+/**
+ * dax_pmd_fault - handle a PMD fault on a DAX file
+ * @vma: The virtual memory area where the fault occurred
+ * @vmf: The description of the fault
+ * @get_block: The filesystem method used to translate file offsets to blocks
+ *
+ * When a page fault occurs, filesystems may call this helper in their
+ * pmd_fault handler for DAX files.
+ */
+int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+			pmd_t *pmd, unsigned int flags, get_block_t get_block,
+			dax_iodone_t complete_unwritten)
+{
+	int result;
+	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
+
+	if (flags & FAULT_FLAG_WRITE) {
+		sb_start_pagefault(sb);
+		file_update_time(vma->vm_file);
+	}
+	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
+				complete_unwritten);
+	if (flags & FAULT_FLAG_WRITE)
+		sb_end_pagefault(sb);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(dax_pmd_fault);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 /**
  * dax_pfn_mkwrite - handle first write to DAX page
  * @vma: The virtual memory area where the fault occurred
@@ -548,11 +732,12 @@
 	if (err < 0)
 		return err;
 	if (buffer_written(&bh)) {
-		void *addr;
+		void __pmem *addr;
 		err = dax_get_addr(&bh, &addr, inode->i_blkbits);
 		if (err < 0)
 			return err;
-		memset(addr + offset, 0, length);
+		clear_pmem(addr + offset, length);
+		wmb_pmem();
 	}
 
 	return 0;
diff --git a/fs/dcache.c b/fs/dcache.c
index 9b5fe50..5c33aeb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2718,7 +2718,7 @@
  * This helper attempts to cope with remotely renamed directories
  *
  * It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
+ * dentry->d_parent->d_inode->i_mutex, and rename_lock
  *
  * Note: If ever the locking in lock_rename() changes, then please
  * remember to update this too...
@@ -2744,7 +2744,6 @@
 	__d_move(alias, dentry, false);
 	ret = 0;
 out_err:
-	spin_unlock(&inode->i_lock);
 	if (m2)
 		mutex_unlock(m2);
 	if (m1)
@@ -2790,10 +2789,11 @@
 	if (S_ISDIR(inode->i_mode)) {
 		struct dentry *new = __d_find_any_alias(inode);
 		if (unlikely(new)) {
+			/* The reference to new ensures it remains an alias */
+			spin_unlock(&inode->i_lock);
 			write_seqlock(&rename_lock);
 			if (unlikely(d_ancestor(new, dentry))) {
 				write_sequnlock(&rename_lock);
-				spin_unlock(&inode->i_lock);
 				dput(new);
 				new = ERR_PTR(-ELOOP);
 				pr_warn_ratelimited(
@@ -2812,7 +2812,6 @@
 			} else {
 				__d_move(new, dentry, false);
 				write_sequnlock(&rename_lock);
-				spin_unlock(&inode->i_lock);
 				security_d_instantiate(new, inode);
 			}
 			iput(inode);
@@ -2926,6 +2925,13 @@
 
 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
 			struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
+			/* Escaped? */
+			if (dentry != vfsmnt->mnt_root) {
+				bptr = *buffer;
+				blen = *buflen;
+				error = 3;
+				break;
+			}
 			/* Global root? */
 			if (mnt != parent) {
 				dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 284f9aa..6c55ade 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -435,8 +435,8 @@
 }
 EXPORT_SYMBOL_GPL(debugfs_create_atomic_t);
 
-static ssize_t read_file_bool(struct file *file, char __user *user_buf,
-			      size_t count, loff_t *ppos)
+ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
+			       size_t count, loff_t *ppos)
 {
 	char buf[3];
 	u32 *val = file->private_data;
@@ -449,9 +449,10 @@
 	buf[2] = 0x00;
 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 }
+EXPORT_SYMBOL_GPL(debugfs_read_file_bool);
 
-static ssize_t write_file_bool(struct file *file, const char __user *user_buf,
-			       size_t count, loff_t *ppos)
+ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
+				size_t count, loff_t *ppos)
 {
 	char buf[32];
 	size_t buf_size;
@@ -468,10 +469,11 @@
 
 	return count;
 }
+EXPORT_SYMBOL_GPL(debugfs_write_file_bool);
 
 static const struct file_operations fops_bool = {
-	.read =		read_file_bool,
-	.write =	write_file_bool,
+	.read =		debugfs_read_file_bool,
+	.write =	debugfs_write_file_bool,
 	.open =		simple_open,
 	.llseek =	default_llseek,
 };
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 754fd6c..87e9d79 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -120,12 +120,11 @@
 	struct cbuf cb;
 	int retries;
 #define MAX_CONNECT_RETRIES 3
-	int sctp_assoc;
 	struct hlist_node list;
 	struct connection *othercon;
 	struct work_struct rwork; /* Receive workqueue */
 	struct work_struct swork; /* Send workqueue */
-	bool try_new_addr;
+	void (*orig_error_report)(struct sock *sk);
 };
 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
 
@@ -252,26 +251,6 @@
 	return con;
 }
 
-/* This is a bit drastic, but only called when things go wrong */
-static struct connection *assoc2con(int assoc_id)
-{
-	int i;
-	struct connection *con;
-
-	mutex_lock(&connections_lock);
-
-	for (i = 0 ; i < CONN_HASH_SIZE; i++) {
-		hlist_for_each_entry(con, &connection_hash[i], list) {
-			if (con->sctp_assoc == assoc_id) {
-				mutex_unlock(&connections_lock);
-				return con;
-			}
-		}
-	}
-	mutex_unlock(&connections_lock);
-	return NULL;
-}
-
 static struct dlm_node_addr *find_node_addr(int nodeid)
 {
 	struct dlm_node_addr *na;
@@ -322,14 +301,14 @@
 	spin_lock(&dlm_node_addrs_spin);
 	na = find_node_addr(nodeid);
 	if (na && na->addr_count) {
+		memcpy(&sas, na->addr[na->curr_addr_index],
+		       sizeof(struct sockaddr_storage));
+
 		if (try_new_addr) {
 			na->curr_addr_index++;
 			if (na->curr_addr_index == na->addr_count)
 				na->curr_addr_index = 0;
 		}
-
-		memcpy(&sas, na->addr[na->curr_addr_index ],
-			sizeof(struct sockaddr_storage));
 	}
 	spin_unlock(&dlm_node_addrs_spin);
 
@@ -459,18 +438,23 @@
 
 static void lowcomms_state_change(struct sock *sk)
 {
-	if (sk->sk_state == TCP_ESTABLISHED)
+	/* SCTP layer is not calling sk_data_ready when the connection
+	 * is done, so we catch the signal through here. Also, it
+	 * doesn't switch socket state when entering shutdown, so we
+	 * skip the write in that case.
+	 */
+	if (sk->sk_shutdown) {
+		if (sk->sk_shutdown == RCV_SHUTDOWN)
+			lowcomms_data_ready(sk);
+	} else if (sk->sk_state == TCP_ESTABLISHED) {
 		lowcomms_write_space(sk);
+	}
 }
 
 int dlm_lowcomms_connect_node(int nodeid)
 {
 	struct connection *con;
 
-	/* with sctp there's no connecting without sending */
-	if (dlm_config.ci_protocol != 0)
-		return 0;
-
 	if (nodeid == dlm_our_nodeid())
 		return 0;
 
@@ -481,6 +465,43 @@
 	return 0;
 }
 
+static void lowcomms_error_report(struct sock *sk)
+{
+	struct connection *con = sock2con(sk);
+	struct sockaddr_storage saddr;
+
+	if (nodeid_to_addr(con->nodeid, &saddr, NULL, false)) {
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d, port %d, "
+				   "sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, dlm_config.ci_tcp_port,
+				   sk->sk_err, sk->sk_err_soft);
+		return;
+	} else if (saddr.ss_family == AF_INET) {
+		struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
+
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d at %pI4, port %d, "
+				   "sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, &sin4->sin_addr.s_addr,
+				   dlm_config.ci_tcp_port, sk->sk_err,
+				   sk->sk_err_soft);
+	} else {
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
+
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d at %u.%u.%u.%u, "
+				   "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, sin6->sin6_addr.s6_addr32[0],
+				   sin6->sin6_addr.s6_addr32[1],
+				   sin6->sin6_addr.s6_addr32[2],
+				   sin6->sin6_addr.s6_addr32[3],
+				   dlm_config.ci_tcp_port, sk->sk_err,
+				   sk->sk_err_soft);
+	}
+	con->orig_error_report(sk);
+}
+
 /* Make a socket active */
 static void add_sock(struct socket *sock, struct connection *con)
 {
@@ -492,6 +513,8 @@
 	con->sock->sk->sk_state_change = lowcomms_state_change;
 	con->sock->sk->sk_user_data = con;
 	con->sock->sk->sk_allocation = GFP_NOFS;
+	con->orig_error_report = con->sock->sk->sk_error_report;
+	con->sock->sk->sk_error_report = lowcomms_error_report;
 }
 
 /* Add the port number to an IPv6 or 4 sockaddr and return the address
@@ -514,17 +537,24 @@
 }
 
 /* Close a remote connection and tidy up */
-static void close_connection(struct connection *con, bool and_other)
+static void close_connection(struct connection *con, bool and_other,
+			     bool tx, bool rx)
 {
-	mutex_lock(&con->sock_mutex);
+	clear_bit(CF_CONNECT_PENDING, &con->flags);
+	clear_bit(CF_WRITE_PENDING, &con->flags);
+	if (tx && cancel_work_sync(&con->swork))
+		log_print("canceled swork for node %d", con->nodeid);
+	if (rx && cancel_work_sync(&con->rwork))
+		log_print("canceled rwork for node %d", con->nodeid);
 
+	mutex_lock(&con->sock_mutex);
 	if (con->sock) {
 		sock_release(con->sock);
 		con->sock = NULL;
 	}
 	if (con->othercon && and_other) {
 		/* Will only re-enter once. */
-		close_connection(con->othercon, false);
+		close_connection(con->othercon, false, true, true);
 	}
 	if (con->rx_page) {
 		__free_page(con->rx_page);
@@ -535,254 +565,6 @@
 	mutex_unlock(&con->sock_mutex);
 }
 
-/* We only send shutdown messages to nodes that are not part of the cluster */
-static void sctp_send_shutdown(sctp_assoc_t associd)
-{
-	static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
-	struct msghdr outmessage;
-	struct cmsghdr *cmsg;
-	struct sctp_sndrcvinfo *sinfo;
-	int ret;
-	struct connection *con;
-
-	con = nodeid2con(0,0);
-	BUG_ON(con == NULL);
-
-	outmessage.msg_name = NULL;
-	outmessage.msg_namelen = 0;
-	outmessage.msg_control = outcmsg;
-	outmessage.msg_controllen = sizeof(outcmsg);
-	outmessage.msg_flags = MSG_EOR;
-
-	cmsg = CMSG_FIRSTHDR(&outmessage);
-	cmsg->cmsg_level = IPPROTO_SCTP;
-	cmsg->cmsg_type = SCTP_SNDRCV;
-	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	outmessage.msg_controllen = cmsg->cmsg_len;
-	sinfo = CMSG_DATA(cmsg);
-	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
-
-	sinfo->sinfo_flags |= MSG_EOF;
-	sinfo->sinfo_assoc_id = associd;
-
-	ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0);
-
-	if (ret != 0)
-		log_print("send EOF to node failed: %d", ret);
-}
-
-static void sctp_init_failed_foreach(struct connection *con)
-{
-
-	/*
-	 * Don't try to recover base con and handle race where the
-	 * other node's assoc init creates a assoc and we get that
-	 * notification, then we get a notification that our attempt
-	 * failed due. This happens when we are still trying the primary
-	 * address, but the other node has already tried secondary addrs
-	 * and found one that worked.
-	 */
-	if (!con->nodeid || con->sctp_assoc)
-		return;
-
-	log_print("Retrying SCTP association init for node %d\n", con->nodeid);
-
-	con->try_new_addr = true;
-	con->sctp_assoc = 0;
-	if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) {
-		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
-			queue_work(send_workqueue, &con->swork);
-	}
-}
-
-/* INIT failed but we don't know which node...
-   restart INIT on all pending nodes */
-static void sctp_init_failed(void)
-{
-	mutex_lock(&connections_lock);
-
-	foreach_conn(sctp_init_failed_foreach);
-
-	mutex_unlock(&connections_lock);
-}
-
-static void retry_failed_sctp_send(struct connection *recv_con,
-				   struct sctp_send_failed *sn_send_failed,
-				   char *buf)
-{
-	int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed);
-	struct dlm_mhandle *mh;
-	struct connection *con;
-	char *retry_buf;
-	int nodeid = sn_send_failed->ssf_info.sinfo_ppid;
-
-	log_print("Retry sending %d bytes to node id %d", len, nodeid);
-	
-	if (!nodeid) {
-		log_print("Shouldn't resend data via listening connection.");
-		return;
-	}
-
-	con = nodeid2con(nodeid, 0);
-	if (!con) {
-		log_print("Could not look up con for nodeid %d\n",
-			  nodeid);
-		return;
-	}
-
-	mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf);
-	if (!mh) {
-		log_print("Could not allocate buf for retry.");
-		return;
-	}
-	memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len);
-	dlm_lowcomms_commit_buffer(mh);
-
-	/*
-	 * If we got a assoc changed event before the send failed event then
-	 * we only need to retry the send.
-	 */
-	if (con->sctp_assoc) {
-		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
-			queue_work(send_workqueue, &con->swork);
-	} else
-		sctp_init_failed_foreach(con);
-}
-
-/* Something happened to an association */
-static void process_sctp_notification(struct connection *con,
-				      struct msghdr *msg, char *buf)
-{
-	union sctp_notification *sn = (union sctp_notification *)buf;
-	struct linger linger;
-
-	switch (sn->sn_header.sn_type) {
-	case SCTP_SEND_FAILED:
-		retry_failed_sctp_send(con, &sn->sn_send_failed, buf);
-		break;
-	case SCTP_ASSOC_CHANGE:
-		switch (sn->sn_assoc_change.sac_state) {
-		case SCTP_COMM_UP:
-		case SCTP_RESTART:
-		{
-			/* Check that the new node is in the lockspace */
-			struct sctp_prim prim;
-			int nodeid;
-			int prim_len, ret;
-			int addr_len;
-			struct connection *new_con;
-
-			/*
-			 * We get this before any data for an association.
-			 * We verify that the node is in the cluster and
-			 * then peel off a socket for it.
-			 */
-			if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
-				log_print("COMM_UP for invalid assoc ID %d",
-					 (int)sn->sn_assoc_change.sac_assoc_id);
-				sctp_init_failed();
-				return;
-			}
-			memset(&prim, 0, sizeof(struct sctp_prim));
-			prim_len = sizeof(struct sctp_prim);
-			prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id;
-
-			ret = kernel_getsockopt(con->sock,
-						IPPROTO_SCTP,
-						SCTP_PRIMARY_ADDR,
-						(char*)&prim,
-						&prim_len);
-			if (ret < 0) {
-				log_print("getsockopt/sctp_primary_addr on "
-					  "new assoc %d failed : %d",
-					  (int)sn->sn_assoc_change.sac_assoc_id,
-					  ret);
-
-				/* Retry INIT later */
-				new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
-				if (new_con)
-					clear_bit(CF_CONNECT_PENDING, &con->flags);
-				return;
-			}
-			make_sockaddr(&prim.ssp_addr, 0, &addr_len);
-			if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
-				unsigned char *b=(unsigned char *)&prim.ssp_addr;
-				log_print("reject connect from unknown addr");
-				print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
-						     b, sizeof(struct sockaddr_storage));
-				sctp_send_shutdown(prim.ssp_assoc_id);
-				return;
-			}
-
-			new_con = nodeid2con(nodeid, GFP_NOFS);
-			if (!new_con)
-				return;
-
-			/* Peel off a new sock */
-			lock_sock(con->sock->sk);
-			ret = sctp_do_peeloff(con->sock->sk,
-				sn->sn_assoc_change.sac_assoc_id,
-				&new_con->sock);
-			release_sock(con->sock->sk);
-			if (ret < 0) {
-				log_print("Can't peel off a socket for "
-					  "connection %d to node %d: err=%d",
-					  (int)sn->sn_assoc_change.sac_assoc_id,
-					  nodeid, ret);
-				return;
-			}
-			add_sock(new_con->sock, new_con);
-
-			linger.l_onoff = 1;
-			linger.l_linger = 0;
-			ret = kernel_setsockopt(new_con->sock, SOL_SOCKET, SO_LINGER,
-						(char *)&linger, sizeof(linger));
-			if (ret < 0)
-				log_print("set socket option SO_LINGER failed");
-
-			log_print("connecting to %d sctp association %d",
-				 nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
-
-			new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id;
-			new_con->try_new_addr = false;
-			/* Send any pending writes */
-			clear_bit(CF_CONNECT_PENDING, &new_con->flags);
-			clear_bit(CF_INIT_PENDING, &new_con->flags);
-			if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) {
-				queue_work(send_workqueue, &new_con->swork);
-			}
-			if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags))
-				queue_work(recv_workqueue, &new_con->rwork);
-		}
-		break;
-
-		case SCTP_COMM_LOST:
-		case SCTP_SHUTDOWN_COMP:
-		{
-			con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
-			if (con) {
-				con->sctp_assoc = 0;
-			}
-		}
-		break;
-
-		case SCTP_CANT_STR_ASSOC:
-		{
-			/* Will retry init when we get the send failed notification */
-			log_print("Can't start SCTP association - retrying");
-		}
-		break;
-
-		default:
-			log_print("unexpected SCTP assoc change id=%d state=%d",
-				  (int)sn->sn_assoc_change.sac_assoc_id,
-				  sn->sn_assoc_change.sac_state);
-		}
-	default:
-		; /* fall through */
-	}
-}
-
 /* Data received from remote end */
 static int receive_from_sock(struct connection *con)
 {
@@ -793,7 +575,6 @@
 	int r;
 	int call_again_soon = 0;
 	int nvec;
-	char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
 
 	mutex_lock(&con->sock_mutex);
 
@@ -801,6 +582,10 @@
 		ret = -EAGAIN;
 		goto out_close;
 	}
+	if (con->nodeid == 0) {
+		ret = -EINVAL;
+		goto out_close;
+	}
 
 	if (con->rx_page == NULL) {
 		/*
@@ -813,11 +598,6 @@
 		cbuf_init(&con->cb, PAGE_CACHE_SIZE);
 	}
 
-	/* Only SCTP needs these really */
-	memset(&incmsg, 0, sizeof(incmsg));
-	msg.msg_control = incmsg;
-	msg.msg_controllen = sizeof(incmsg);
-
 	/*
 	 * iov[0] is the bit of the circular buffer between the current end
 	 * point (cb.base + cb.len) and the end of the buffer.
@@ -843,31 +623,18 @@
 			       MSG_DONTWAIT | MSG_NOSIGNAL);
 	if (ret <= 0)
 		goto out_close;
-
-	/* Process SCTP notifications */
-	if (msg.msg_flags & MSG_NOTIFICATION) {
-		msg.msg_control = incmsg;
-		msg.msg_controllen = sizeof(incmsg);
-
-		process_sctp_notification(con, &msg,
-				page_address(con->rx_page) + con->cb.base);
-		mutex_unlock(&con->sock_mutex);
-		return 0;
-	}
-	BUG_ON(con->nodeid == 0);
-
-	if (ret == len)
+	else if (ret == len)
 		call_again_soon = 1;
+
 	cbuf_add(&con->cb, ret);
 	ret = dlm_process_incoming_buffer(con->nodeid,
 					  page_address(con->rx_page),
 					  con->cb.base, con->cb.len,
 					  PAGE_CACHE_SIZE);
 	if (ret == -EBADMSG) {
-		log_print("lowcomms: addr=%p, base=%u, len=%u, "
-			  "iov_len=%u, iov_base[0]=%p, read=%d",
-			  page_address(con->rx_page), con->cb.base, con->cb.len,
-			  len, iov[0].iov_base, r);
+		log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
+			  page_address(con->rx_page), con->cb.base,
+			  con->cb.len, r);
 	}
 	if (ret < 0)
 		goto out_close;
@@ -892,7 +659,7 @@
 out_close:
 	mutex_unlock(&con->sock_mutex);
 	if (ret != -EAGAIN) {
-		close_connection(con, false);
+		close_connection(con, false, true, false);
 		/* Reconnect when there is something to send */
 	}
 	/* Don't return success if we really got EOF */
@@ -1033,6 +800,120 @@
 	return result;
 }
 
+static int sctp_accept_from_sock(struct connection *con)
+{
+	/* Check that the new node is in the lockspace */
+	struct sctp_prim prim;
+	int nodeid;
+	int prim_len, ret;
+	int addr_len;
+	struct connection *newcon;
+	struct connection *addcon;
+	struct socket *newsock;
+
+	mutex_lock(&connections_lock);
+	if (!dlm_allow_conn) {
+		mutex_unlock(&connections_lock);
+		return -1;
+	}
+	mutex_unlock(&connections_lock);
+
+	mutex_lock_nested(&con->sock_mutex, 0);
+
+	ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
+	if (ret < 0)
+		goto accept_err;
+
+	memset(&prim, 0, sizeof(struct sctp_prim));
+	prim_len = sizeof(struct sctp_prim);
+
+	ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
+				(char *)&prim, &prim_len);
+	if (ret < 0) {
+		log_print("getsockopt/sctp_primary_addr failed: %d", ret);
+		goto accept_err;
+	}
+
+	make_sockaddr(&prim.ssp_addr, 0, &addr_len);
+	if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
+		unsigned char *b = (unsigned char *)&prim.ssp_addr;
+
+		log_print("reject connect from unknown addr");
+		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
+				     b, sizeof(struct sockaddr_storage));
+		goto accept_err;
+	}
+
+	newcon = nodeid2con(nodeid, GFP_NOFS);
+	if (!newcon) {
+		ret = -ENOMEM;
+		goto accept_err;
+	}
+
+	mutex_lock_nested(&newcon->sock_mutex, 1);
+
+	if (newcon->sock) {
+		struct connection *othercon = newcon->othercon;
+
+		if (!othercon) {
+			othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
+			if (!othercon) {
+				log_print("failed to allocate incoming socket");
+				mutex_unlock(&newcon->sock_mutex);
+				ret = -ENOMEM;
+				goto accept_err;
+			}
+			othercon->nodeid = nodeid;
+			othercon->rx_action = receive_from_sock;
+			mutex_init(&othercon->sock_mutex);
+			INIT_WORK(&othercon->swork, process_send_sockets);
+			INIT_WORK(&othercon->rwork, process_recv_sockets);
+			set_bit(CF_IS_OTHERCON, &othercon->flags);
+		}
+		if (!othercon->sock) {
+			newcon->othercon = othercon;
+			othercon->sock = newsock;
+			newsock->sk->sk_user_data = othercon;
+			add_sock(newsock, othercon);
+			addcon = othercon;
+		} else {
+			printk("Extra connection from node %d attempted\n", nodeid);
+			ret = -EAGAIN;
+			mutex_unlock(&newcon->sock_mutex);
+			goto accept_err;
+		}
+	} else {
+		newsock->sk->sk_user_data = newcon;
+		newcon->rx_action = receive_from_sock;
+		add_sock(newsock, newcon);
+		addcon = newcon;
+	}
+
+	log_print("connected to %d", nodeid);
+
+	mutex_unlock(&newcon->sock_mutex);
+
+	/*
+	 * Add it to the active queue in case we got data
+	 * between processing the accept adding the socket
+	 * to the read_sockets list
+	 */
+	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
+		queue_work(recv_workqueue, &addcon->rwork);
+	mutex_unlock(&con->sock_mutex);
+
+	return 0;
+
+accept_err:
+	mutex_unlock(&con->sock_mutex);
+	if (newsock)
+		sock_release(newsock);
+	if (ret != -EAGAIN)
+		log_print("error accepting connection from node: %d", ret);
+
+	return ret;
+}
+
 static void free_entry(struct writequeue_entry *e)
 {
 	__free_page(e->page);
@@ -1057,97 +938,129 @@
 	}
 }
 
+/*
+ * sctp_bind_addrs - bind a SCTP socket to all our addresses
+ */
+static int sctp_bind_addrs(struct connection *con, uint16_t port)
+{
+	struct sockaddr_storage localaddr;
+	int i, addr_len, result = 0;
+
+	for (i = 0; i < dlm_local_count; i++) {
+		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
+		make_sockaddr(&localaddr, port, &addr_len);
+
+		if (!i)
+			result = kernel_bind(con->sock,
+					     (struct sockaddr *)&localaddr,
+					     addr_len);
+		else
+			result = kernel_setsockopt(con->sock, SOL_SCTP,
+						   SCTP_SOCKOPT_BINDX_ADD,
+						   (char *)&localaddr, addr_len);
+
+		if (result < 0) {
+			log_print("Can't bind to %d addr number %d, %d.\n",
+				  port, i + 1, result);
+			break;
+		}
+	}
+	return result;
+}
+
 /* Initiate an SCTP association.
    This is a special case of send_to_sock() in that we don't yet have a
    peeled-off socket for this association, so we use the listening socket
    and add the primary IP address of the remote node.
  */
-static void sctp_init_assoc(struct connection *con)
+static void sctp_connect_to_sock(struct connection *con)
 {
-	struct sockaddr_storage rem_addr;
-	char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
-	struct msghdr outmessage;
-	struct cmsghdr *cmsg;
-	struct sctp_sndrcvinfo *sinfo;
-	struct connection *base_con;
-	struct writequeue_entry *e;
-	int len, offset;
-	int ret;
-	int addrlen;
-	struct kvec iov[1];
+	struct sockaddr_storage daddr;
+	int one = 1;
+	int result;
+	int addr_len;
+	struct socket *sock;
+
+	if (con->nodeid == 0) {
+		log_print("attempt to connect sock 0 foiled");
+		return;
+	}
 
 	mutex_lock(&con->sock_mutex);
-	if (test_and_set_bit(CF_INIT_PENDING, &con->flags))
-		goto unlock;
 
-	if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr,
-			   con->try_new_addr)) {
+	/* Some odd races can cause double-connects, ignore them */
+	if (con->retries++ > MAX_CONNECT_RETRIES)
+		goto out;
+
+	if (con->sock) {
+		log_print("node %d already connected.", con->nodeid);
+		goto out;
+	}
+
+	memset(&daddr, 0, sizeof(daddr));
+	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
+	if (result < 0) {
 		log_print("no address for nodeid %d", con->nodeid);
-		goto unlock;
-	}
-	base_con = nodeid2con(0, 0);
-	BUG_ON(base_con == NULL);
-
-	make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen);
-
-	outmessage.msg_name = &rem_addr;
-	outmessage.msg_namelen = addrlen;
-	outmessage.msg_control = outcmsg;
-	outmessage.msg_controllen = sizeof(outcmsg);
-	outmessage.msg_flags = MSG_EOR;
-
-	spin_lock(&con->writequeue_lock);
-
-	if (list_empty(&con->writequeue)) {
-		spin_unlock(&con->writequeue_lock);
-		log_print("writequeue empty for nodeid %d", con->nodeid);
-		goto unlock;
+		goto out;
 	}
 
-	e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
-	len = e->len;
-	offset = e->offset;
+	/* Create a socket to communicate with */
+	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+				  SOCK_STREAM, IPPROTO_SCTP, &sock);
+	if (result < 0)
+		goto socket_err;
 
-	/* Send the first block off the write queue */
-	iov[0].iov_base = page_address(e->page)+offset;
-	iov[0].iov_len = len;
-	spin_unlock(&con->writequeue_lock);
+	sock->sk->sk_user_data = con;
+	con->rx_action = receive_from_sock;
+	con->connect_action = sctp_connect_to_sock;
+	add_sock(sock, con);
 
-	if (rem_addr.ss_family == AF_INET) {
-		struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr;
-		log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr);
-	} else {
-		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr;
-		log_print("Trying to connect to %pI6", &sin6->sin6_addr);
-	}
+	/* Bind to all addresses. */
+	if (sctp_bind_addrs(con, 0))
+		goto bind_err;
 
-	cmsg = CMSG_FIRSTHDR(&outmessage);
-	cmsg->cmsg_level = IPPROTO_SCTP;
-	cmsg->cmsg_type = SCTP_SNDRCV;
-	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	sinfo = CMSG_DATA(cmsg);
-	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
-	sinfo->sinfo_ppid = cpu_to_le32(con->nodeid);
-	outmessage.msg_controllen = cmsg->cmsg_len;
-	sinfo->sinfo_flags |= SCTP_ADDR_OVER;
+	make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
 
-	ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len);
-	if (ret < 0) {
-		log_print("Send first packet to node %d failed: %d",
-			  con->nodeid, ret);
+	log_print("connecting to %d", con->nodeid);
 
-		/* Try again later */
+	/* Turn off Nagle's algorithm */
+	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
+			  sizeof(one));
+
+	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
+				   O_NONBLOCK);
+	if (result == -EINPROGRESS)
+		result = 0;
+	if (result == 0)
+		goto out;
+
+
+bind_err:
+	con->sock = NULL;
+	sock_release(sock);
+
+socket_err:
+	/*
+	 * Some errors are fatal and this list might need adjusting. For other
+	 * errors we try again until the max number of retries is reached.
+	 */
+	if (result != -EHOSTUNREACH &&
+	    result != -ENETUNREACH &&
+	    result != -ENETDOWN &&
+	    result != -EINVAL &&
+	    result != -EPROTONOSUPPORT) {
+		log_print("connect %d try %d error %d", con->nodeid,
+			  con->retries, result);
+		mutex_unlock(&con->sock_mutex);
+		msleep(1000);
 		clear_bit(CF_CONNECT_PENDING, &con->flags);
-		clear_bit(CF_INIT_PENDING, &con->flags);
-	}
-	else {
-		spin_lock(&con->writequeue_lock);
-		writequeue_entry_complete(e, ret);
-		spin_unlock(&con->writequeue_lock);
+		lowcomms_connect_sock(con);
+		return;
 	}
 
-unlock:
+out:
 	mutex_unlock(&con->sock_mutex);
+	set_bit(CF_WRITE_PENDING, &con->flags);
 }
 
 /* Connect a new socket to its peer */
@@ -1236,11 +1149,13 @@
 			  con->retries, result);
 		mutex_unlock(&con->sock_mutex);
 		msleep(1000);
+		clear_bit(CF_CONNECT_PENDING, &con->flags);
 		lowcomms_connect_sock(con);
 		return;
 	}
 out:
 	mutex_unlock(&con->sock_mutex);
+	set_bit(CF_WRITE_PENDING, &con->flags);
 	return;
 }
 
@@ -1325,37 +1240,11 @@
 	}
 }
 
-/* Bind to an IP address. SCTP allows multiple address so it can do
-   multi-homing */
-static int add_sctp_bind_addr(struct connection *sctp_con,
-			      struct sockaddr_storage *addr,
-			      int addr_len, int num)
-{
-	int result = 0;
-
-	if (num == 1)
-		result = kernel_bind(sctp_con->sock,
-				     (struct sockaddr *) addr,
-				     addr_len);
-	else
-		result = kernel_setsockopt(sctp_con->sock, SOL_SCTP,
-					   SCTP_SOCKOPT_BINDX_ADD,
-					   (char *)addr, addr_len);
-
-	if (result < 0)
-		log_print("Can't bind to port %d addr number %d",
-			  dlm_config.ci_tcp_port, num);
-
-	return result;
-}
-
 /* Initialise SCTP socket and bind to all interfaces */
 static int sctp_listen_for_all(void)
 {
 	struct socket *sock = NULL;
-	struct sockaddr_storage localaddr;
-	struct sctp_event_subscribe subscribe;
-	int result = -EINVAL, num = 1, i, addr_len;
+	int result = -EINVAL;
 	struct connection *con = nodeid2con(0, GFP_NOFS);
 	int bufsize = NEEDED_RMEM;
 	int one = 1;
@@ -1366,33 +1255,17 @@
 	log_print("Using SCTP for communications");
 
 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
-				  SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
+				  SOCK_STREAM, IPPROTO_SCTP, &sock);
 	if (result < 0) {
 		log_print("Can't create comms socket, check SCTP is loaded");
 		goto out;
 	}
 
-	/* Listen for events */
-	memset(&subscribe, 0, sizeof(subscribe));
-	subscribe.sctp_data_io_event = 1;
-	subscribe.sctp_association_event = 1;
-	subscribe.sctp_send_failure_event = 1;
-	subscribe.sctp_shutdown_event = 1;
-	subscribe.sctp_partial_delivery_event = 1;
-
 	result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
 				 (char *)&bufsize, sizeof(bufsize));
 	if (result)
 		log_print("Error increasing buffer space on socket %d", result);
 
-	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS,
-				   (char *)&subscribe, sizeof(subscribe));
-	if (result < 0) {
-		log_print("Failed to set SCTP_EVENTS on socket: result=%d",
-			  result);
-		goto create_delsock;
-	}
-
 	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
 				   sizeof(one));
 	if (result < 0)
@@ -1402,19 +1275,12 @@
 	sock->sk->sk_user_data = con;
 	con->sock = sock;
 	con->sock->sk->sk_data_ready = lowcomms_data_ready;
-	con->rx_action = receive_from_sock;
-	con->connect_action = sctp_init_assoc;
+	con->rx_action = sctp_accept_from_sock;
+	con->connect_action = sctp_connect_to_sock;
 
-	/* Bind to all interfaces. */
-	for (i = 0; i < dlm_local_count; i++) {
-		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
-		make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len);
-
-		result = add_sctp_bind_addr(con, &localaddr, addr_len, num);
-		if (result)
-			goto create_delsock;
-		++num;
-	}
+	/* Bind to all addresses. */
+	if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
+		goto create_delsock;
 
 	result = sock->ops->listen(sock, 5);
 	if (result < 0) {
@@ -1612,14 +1478,13 @@
 
 send_error:
 	mutex_unlock(&con->sock_mutex);
-	close_connection(con, false);
+	close_connection(con, false, false, true);
 	lowcomms_connect_sock(con);
 	return;
 
 out_connect:
 	mutex_unlock(&con->sock_mutex);
-	if (!test_bit(CF_INIT_PENDING, &con->flags))
-		lowcomms_connect_sock(con);
+	lowcomms_connect_sock(con);
 }
 
 static void clean_one_writequeue(struct connection *con)
@@ -1644,15 +1509,9 @@
 	log_print("closing connection to node %d", nodeid);
 	con = nodeid2con(nodeid, 0);
 	if (con) {
-		clear_bit(CF_CONNECT_PENDING, &con->flags);
-		clear_bit(CF_WRITE_PENDING, &con->flags);
 		set_bit(CF_CLOSE, &con->flags);
-		if (cancel_work_sync(&con->swork))
-			log_print("canceled swork for node %d", nodeid);
-		if (cancel_work_sync(&con->rwork))
-			log_print("canceled rwork for node %d", nodeid);
+		close_connection(con, true, true, true);
 		clean_one_writequeue(con);
-		close_connection(con, true);
 	}
 
 	spin_lock(&dlm_node_addrs_spin);
@@ -1685,10 +1544,8 @@
 {
 	struct connection *con = container_of(work, struct connection, swork);
 
-	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags))
 		con->connect_action(con);
-		set_bit(CF_WRITE_PENDING, &con->flags);
-	}
 	if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags))
 		send_to_sock(con);
 }
@@ -1735,7 +1592,7 @@
 
 static void free_conn(struct connection *con)
 {
-	close_connection(con, true);
+	close_connection(con, true, true, true);
 	if (con->othercon)
 		kmem_cache_free(con_cache, con->othercon);
 	hlist_del(&con->list);
@@ -1806,7 +1663,7 @@
 	dlm_allow_conn = 0;
 	con = nodeid2con(0,0);
 	if (con) {
-		close_connection(con, false);
+		close_connection(con, false, true, true);
 		kmem_cache_free(con_cache, con);
 	}
 fail_destroy:
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 75ecc0d..173b387 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -782,6 +782,7 @@
 	DECLARE_WAITQUEUE(wait, current);
 	struct dlm_callback cb;
 	int rv, resid, copy_lvb = 0;
+	int old_mode, new_mode;
 
 	if (count == sizeof(struct dlm_device_version)) {
 		rv = copy_version_to_user(buf, count);
@@ -838,6 +839,9 @@
 
 	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
 
+	/* rem_lkb_callback sets a new lkb_last_cast */
+	old_mode = lkb->lkb_last_cast.mode;
+
 	rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
 	if (rv < 0) {
 		/* this shouldn't happen; lkb should have been removed from
@@ -861,9 +865,6 @@
 	}
 
 	if (cb.flags & DLM_CB_CAST) {
-		int old_mode, new_mode;
-
-		old_mode = lkb->lkb_last_cast.mode;
 		new_mode = cb.mode;
 
 		if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 5718cb9..d72d52b 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -17,7 +17,7 @@
 {
 	struct inode *inode, *toput_inode = NULL;
 
-	spin_lock(&inode_sb_list_lock);
+	spin_lock(&sb->s_inode_list_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 		spin_lock(&inode->i_lock);
 		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
@@ -27,13 +27,15 @@
 		}
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
-		spin_unlock(&inode_sb_list_lock);
+		spin_unlock(&sb->s_inode_list_lock);
+
 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
 		iput(toput_inode);
 		toput_inode = inode;
-		spin_lock(&inode_sb_list_lock);
+
+		spin_lock(&sb->s_inode_list_lock);
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&sb->s_inode_list_lock);
 	iput(toput_inode);
 }
 
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 97315f2..80d6901 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -258,8 +258,7 @@
 				 &mount_crypt_stat->global_auth_tok_list,
 				 mount_crypt_stat_list) {
 		list_del(&auth_tok->mount_crypt_stat_list);
-		if (auth_tok->global_auth_tok_key
-		    && !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID))
+		if (!(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID))
 			key_put(auth_tok->global_auth_tok_key);
 		kmem_cache_free(ecryptfs_global_auth_tok_cache, auth_tok);
 	}
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 8db0b46..63cd2c1 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -45,20 +45,20 @@
 static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
 	struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
-	int rc;
-
-	if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
-		return 1;
+	int rc = 1;
 
 	if (flags & LOOKUP_RCU)
 		return -ECHILD;
 
-	rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
-	if (d_really_is_positive(dentry)) {
-		struct inode *lower_inode =
-			ecryptfs_inode_to_lower(d_inode(dentry));
+	if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
+		rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
 
-		fsstack_copy_attr_all(d_inode(dentry), lower_inode);
+	if (d_really_is_positive(dentry)) {
+		struct inode *inode = d_inode(dentry);
+
+		fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode));
+		if (!inode->i_nlink)
+			return 0;
 	}
 	return rc;
 }
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 3b57c9f..1982c3f 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -20,6 +20,7 @@
 
 #include <linux/time.h>
 #include <linux/pagemap.h>
+#include <linux/dax.h>
 #include <linux/quotaops.h>
 #include "ext2.h"
 #include "xattr.h"
@@ -31,6 +32,12 @@
 	return dax_fault(vma, vmf, ext2_get_block, NULL);
 }
 
+static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+						pmd_t *pmd, unsigned int flags)
+{
+	return dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL);
+}
+
 static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	return dax_mkwrite(vma, vmf, ext2_get_block, NULL);
@@ -38,6 +45,7 @@
 
 static const struct vm_operations_struct ext2_dax_vm_ops = {
 	.fault		= ext2_dax_fault,
+	.pmd_fault	= ext2_dax_pmd_fault,
 	.page_mkwrite	= ext2_dax_mkwrite,
 	.pfn_mkwrite	= dax_pfn_mkwrite,
 };
@@ -49,7 +57,7 @@
 
 	file_accessed(file);
 	vma->vm_ops = &ext2_dax_vm_ops;
-	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
 	return 0;
 }
 #else
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 5c04a0d..efe5fb2 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -577,7 +577,10 @@
 		goto fail;
 	}
 
-	dquot_initialize(inode);
+	err = dquot_initialize(inode);
+	if (err)
+		goto fail_drop;
+
 	err = dquot_alloc_inode(inode);
 	if (err)
 		goto fail_drop;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 5c09776..c60a248 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -25,6 +25,7 @@
 #include <linux/time.h>
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
+#include <linux/dax.h>
 #include <linux/quotaops.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h>
@@ -1552,8 +1553,11 @@
 	if (error)
 		return error;
 
-	if (is_quota_modification(inode, iattr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, iattr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
 	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
 		error = dquot_transfer(inode, iattr);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 13ec54a..b4841e3 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -96,8 +96,11 @@
 static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, bool excl)
 {
 	struct inode *inode;
+	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode = ext2_new_inode(dir, mode, &dentry->d_name);
 	if (IS_ERR(inode))
@@ -143,7 +146,9 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode = ext2_new_inode (dir, mode, &dentry->d_name);
 	err = PTR_ERR(inode);
@@ -169,7 +174,9 @@
 	if (l > sb->s_blocksize)
 		goto out;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto out;
 
 	inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO, &dentry->d_name);
 	err = PTR_ERR(inode);
@@ -212,7 +219,9 @@
 	struct inode *inode = d_inode(old_dentry);
 	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode->i_ctime = CURRENT_TIME_SEC;
 	inode_inc_link_count(inode);
@@ -233,7 +242,9 @@
 	struct inode * inode;
 	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode_inc_link_count(dir);
 
@@ -279,13 +290,17 @@
 	struct inode * inode = d_inode(dentry);
 	struct ext2_dir_entry_2 * de;
 	struct page * page;
-	int err = -ENOENT;
+	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto out;
 
 	de = ext2_find_entry (dir, &dentry->d_name, &page);
-	if (!de)
+	if (!de) {
+		err = -ENOENT;
 		goto out;
+	}
 
 	err = ext2_delete_entry (de, page);
 	if (err)
@@ -323,14 +338,21 @@
 	struct ext2_dir_entry_2 * dir_de = NULL;
 	struct page * old_page;
 	struct ext2_dir_entry_2 * old_de;
-	int err = -ENOENT;
+	int err;
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	err = dquot_initialize(old_dir);
+	if (err)
+		goto out;
+
+	err = dquot_initialize(new_dir);
+	if (err)
+		goto out;
 
 	old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page);
-	if (!old_de)
+	if (!old_de) {
+		err = -ENOENT;
 		goto out;
+	}
 
 	if (S_ISDIR(old_inode->i_mode)) {
 		err = -EIO;
diff --git a/fs/ext3/Kconfig b/fs/ext3/Kconfig
deleted file mode 100644
index e8c6ba0..0000000
--- a/fs/ext3/Kconfig
+++ /dev/null
@@ -1,89 +0,0 @@
-config EXT3_FS
-	tristate "Ext3 journalling file system support"
-	select JBD
-	help
-	  This is the journalling version of the Second extended file system
-	  (often called ext3), the de facto standard Linux file system
-	  (method to organize files on a storage device) for hard disks.
-
-	  The journalling code included in this driver means you do not have
-	  to run e2fsck (file system checker) on your file systems after a
-	  crash.  The journal keeps track of any changes that were being made
-	  at the time the system crashed, and can ensure that your file system
-	  is consistent without the need for a lengthy check.
-
-	  Other than adding the journal to the file system, the on-disk format
-	  of ext3 is identical to ext2.  It is possible to freely switch
-	  between using the ext3 driver and the ext2 driver, as long as the
-	  file system has been cleanly unmounted, or e2fsck is run on the file
-	  system.
-
-	  To add a journal on an existing ext2 file system or change the
-	  behavior of ext3 file systems, you can use the tune2fs utility ("man
-	  tune2fs").  To modify attributes of files and directories on ext3
-	  file systems, use chattr ("man chattr").  You need to be using
-	  e2fsprogs version 1.20 or later in order to create ext3 journals
-	  (available at <http://sourceforge.net/projects/e2fsprogs/>).
-
-	  To compile this file system support as a module, choose M here: the
-	  module will be called ext3.
-
-config EXT3_DEFAULTS_TO_ORDERED
-	bool "Default to 'data=ordered' in ext3"
-	depends on EXT3_FS
-	default y
-	help
-	  The journal mode options for ext3 have different tradeoffs
-	  between when data is guaranteed to be on disk and
-	  performance.	The use of "data=writeback" can cause
-	  unwritten data to appear in files after an system crash or
-	  power failure, which can be a security issue.	 However,
-	  "data=ordered" mode can also result in major performance
-	  problems, including seconds-long delays before an fsync()
-	  call returns.	 For details, see:
-
-	  http://ext4.wiki.kernel.org/index.php/Ext3_data_mode_tradeoffs
-
-	  If you have been historically happy with ext3's performance,
-	  data=ordered mode will be a safe choice and you should
-	  answer 'y' here.  If you understand the reliability and data
-	  privacy issues of data=writeback and are willing to make
-	  that trade off, answer 'n'.
-
-config EXT3_FS_XATTR
-	bool "Ext3 extended attributes"
-	depends on EXT3_FS
-	default y
-	help
-	  Extended attributes are name:value pairs associated with inodes by
-	  the kernel or by users (see the attr(5) manual page, or visit
-	  <http://acl.bestbits.at/> for details).
-
-	  If unsure, say N.
-
-	  You need this for POSIX ACL support on ext3.
-
-config EXT3_FS_POSIX_ACL
-	bool "Ext3 POSIX Access Control Lists"
-	depends on EXT3_FS_XATTR
-	select FS_POSIX_ACL
-	help
-	  Posix Access Control Lists (ACLs) support permissions for users and
-	  groups beyond the owner/group/world scheme.
-
-	  To learn more about Access Control Lists, visit the Posix ACLs for
-	  Linux website <http://acl.bestbits.at/>.
-
-	  If you don't know what Access Control Lists are, say N
-
-config EXT3_FS_SECURITY
-	bool "Ext3 Security Labels"
-	depends on EXT3_FS_XATTR
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute handler for file security
-	  labels in the ext3 filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for file security labels, say N.
diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile
deleted file mode 100644
index e77766a..0000000
--- a/fs/ext3/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for the linux ext3-filesystem routines.
-#
-
-obj-$(CONFIG_EXT3_FS) += ext3.o
-
-ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-	   ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
-
-ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
-ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-ext3-$(CONFIG_EXT3_FS_SECURITY)	 += xattr_security.o
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
deleted file mode 100644
index 8bbaf5b..0000000
--- a/fs/ext3/acl.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * linux/fs/ext3/acl.c
- *
- * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * Convert from filesystem to in-memory representation.
- */
-static struct posix_acl *
-ext3_acl_from_disk(const void *value, size_t size)
-{
-	const char *end = (char *)value + size;
-	int n, count;
-	struct posix_acl *acl;
-
-	if (!value)
-		return NULL;
-	if (size < sizeof(ext3_acl_header))
-		 return ERR_PTR(-EINVAL);
-	if (((ext3_acl_header *)value)->a_version !=
-	    cpu_to_le32(EXT3_ACL_VERSION))
-		return ERR_PTR(-EINVAL);
-	value = (char *)value + sizeof(ext3_acl_header);
-	count = ext3_acl_count(size);
-	if (count < 0)
-		return ERR_PTR(-EINVAL);
-	if (count == 0)
-		return NULL;
-	acl = posix_acl_alloc(count, GFP_NOFS);
-	if (!acl)
-		return ERR_PTR(-ENOMEM);
-	for (n=0; n < count; n++) {
-		ext3_acl_entry *entry =
-			(ext3_acl_entry *)value;
-		if ((char *)value + sizeof(ext3_acl_entry_short) > end)
-			goto fail;
-		acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
-		acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
-		switch(acl->a_entries[n].e_tag) {
-			case ACL_USER_OBJ:
-			case ACL_GROUP_OBJ:
-			case ACL_MASK:
-			case ACL_OTHER:
-				value = (char *)value +
-					sizeof(ext3_acl_entry_short);
-				break;
-
-			case ACL_USER:
-				value = (char *)value + sizeof(ext3_acl_entry);
-				if ((char *)value > end)
-					goto fail;
-				acl->a_entries[n].e_uid =
-					make_kuid(&init_user_ns,
-						  le32_to_cpu(entry->e_id));
-				break;
-			case ACL_GROUP:
-				value = (char *)value + sizeof(ext3_acl_entry);
-				if ((char *)value > end)
-					goto fail;
-				acl->a_entries[n].e_gid =
-					make_kgid(&init_user_ns,
-						  le32_to_cpu(entry->e_id));
-				break;
-
-			default:
-				goto fail;
-		}
-	}
-	if (value != end)
-		goto fail;
-	return acl;
-
-fail:
-	posix_acl_release(acl);
-	return ERR_PTR(-EINVAL);
-}
-
-/*
- * Convert from in-memory to filesystem representation.
- */
-static void *
-ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
-{
-	ext3_acl_header *ext_acl;
-	char *e;
-	size_t n;
-
-	*size = ext3_acl_size(acl->a_count);
-	ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count *
-			sizeof(ext3_acl_entry), GFP_NOFS);
-	if (!ext_acl)
-		return ERR_PTR(-ENOMEM);
-	ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
-	e = (char *)ext_acl + sizeof(ext3_acl_header);
-	for (n=0; n < acl->a_count; n++) {
-		const struct posix_acl_entry *acl_e = &acl->a_entries[n];
-		ext3_acl_entry *entry = (ext3_acl_entry *)e;
-		entry->e_tag  = cpu_to_le16(acl_e->e_tag);
-		entry->e_perm = cpu_to_le16(acl_e->e_perm);
-		switch(acl_e->e_tag) {
-			case ACL_USER:
-				entry->e_id = cpu_to_le32(
-					from_kuid(&init_user_ns, acl_e->e_uid));
-				e += sizeof(ext3_acl_entry);
-				break;
-			case ACL_GROUP:
-				entry->e_id = cpu_to_le32(
-					from_kgid(&init_user_ns, acl_e->e_gid));
-				e += sizeof(ext3_acl_entry);
-				break;
-
-			case ACL_USER_OBJ:
-			case ACL_GROUP_OBJ:
-			case ACL_MASK:
-			case ACL_OTHER:
-				e += sizeof(ext3_acl_entry_short);
-				break;
-
-			default:
-				goto fail;
-		}
-	}
-	return (char *)ext_acl;
-
-fail:
-	kfree(ext_acl);
-	return ERR_PTR(-EINVAL);
-}
-
-/*
- * Inode operation get_posix_acl().
- *
- * inode->i_mutex: don't care
- */
-struct posix_acl *
-ext3_get_acl(struct inode *inode, int type)
-{
-	int name_index;
-	char *value = NULL;
-	struct posix_acl *acl;
-	int retval;
-
-	switch (type) {
-	case ACL_TYPE_ACCESS:
-		name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
-		break;
-	case ACL_TYPE_DEFAULT:
-		name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
-		break;
-	default:
-		BUG();
-	}
-
-	retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
-	if (retval > 0) {
-		value = kmalloc(retval, GFP_NOFS);
-		if (!value)
-			return ERR_PTR(-ENOMEM);
-		retval = ext3_xattr_get(inode, name_index, "", value, retval);
-	}
-	if (retval > 0)
-		acl = ext3_acl_from_disk(value, retval);
-	else if (retval == -ENODATA || retval == -ENOSYS)
-		acl = NULL;
-	else
-		acl = ERR_PTR(retval);
-	kfree(value);
-
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
-	return acl;
-}
-
-/*
- * Set the access or default ACL of an inode.
- *
- * inode->i_mutex: down unless called from ext3_new_inode
- */
-static int
-__ext3_set_acl(handle_t *handle, struct inode *inode, int type,
-	     struct posix_acl *acl)
-{
-	int name_index;
-	void *value = NULL;
-	size_t size = 0;
-	int error;
-
-	switch(type) {
-		case ACL_TYPE_ACCESS:
-			name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
-			if (acl) {
-				error = posix_acl_equiv_mode(acl, &inode->i_mode);
-				if (error < 0)
-					return error;
-				else {
-					inode->i_ctime = CURRENT_TIME_SEC;
-					ext3_mark_inode_dirty(handle, inode);
-					if (error == 0)
-						acl = NULL;
-				}
-			}
-			break;
-
-		case ACL_TYPE_DEFAULT:
-			name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
-			if (!S_ISDIR(inode->i_mode))
-				return acl ? -EACCES : 0;
-			break;
-
-		default:
-			return -EINVAL;
-	}
-	if (acl) {
-		value = ext3_acl_to_disk(acl, &size);
-		if (IS_ERR(value))
-			return (int)PTR_ERR(value);
-	}
-
-	error = ext3_xattr_set_handle(handle, inode, name_index, "",
-				      value, size, 0);
-
-	kfree(value);
-
-	if (!error)
-		set_cached_acl(inode, type, acl);
-
-	return error;
-}
-
-int
-ext3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
-{
-	handle_t *handle;
-	int error, retries = 0;
-
-retry:
-	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	error = __ext3_set_acl(handle, inode, type, acl);
-	ext3_journal_stop(handle);
-	if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-	return error;
-}
-
-/*
- * Initialize the ACLs of a new inode. Called from ext3_new_inode.
- *
- * dir->i_mutex: down
- * inode->i_mutex: up (access to inode is still exclusive)
- */
-int
-ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
-{
-	struct posix_acl *default_acl, *acl;
-	int error;
-
-	error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
-	if (error)
-		return error;
-
-	if (default_acl) {
-		error = __ext3_set_acl(handle, inode, ACL_TYPE_DEFAULT,
-				       default_acl);
-		posix_acl_release(default_acl);
-	}
-	if (acl) {
-		if (!error)
-			error = __ext3_set_acl(handle, inode, ACL_TYPE_ACCESS,
-					       acl);
-		posix_acl_release(acl);
-	}
-	return error;
-}
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
deleted file mode 100644
index ea1c69e..0000000
--- a/fs/ext3/acl.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-  File: fs/ext3/acl.h
-
-  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
-*/
-
-#include <linux/posix_acl_xattr.h>
-
-#define EXT3_ACL_VERSION	0x0001
-
-typedef struct {
-	__le16		e_tag;
-	__le16		e_perm;
-	__le32		e_id;
-} ext3_acl_entry;
-
-typedef struct {
-	__le16		e_tag;
-	__le16		e_perm;
-} ext3_acl_entry_short;
-
-typedef struct {
-	__le32		a_version;
-} ext3_acl_header;
-
-static inline size_t ext3_acl_size(int count)
-{
-	if (count <= 4) {
-		return sizeof(ext3_acl_header) +
-		       count * sizeof(ext3_acl_entry_short);
-	} else {
-		return sizeof(ext3_acl_header) +
-		       4 * sizeof(ext3_acl_entry_short) +
-		       (count - 4) * sizeof(ext3_acl_entry);
-	}
-}
-
-static inline int ext3_acl_count(size_t size)
-{
-	ssize_t s;
-	size -= sizeof(ext3_acl_header);
-	s = size - 4 * sizeof(ext3_acl_entry_short);
-	if (s < 0) {
-		if (size % sizeof(ext3_acl_entry_short))
-			return -1;
-		return size / sizeof(ext3_acl_entry_short);
-	} else {
-		if (s % sizeof(ext3_acl_entry))
-			return -1;
-		return s / sizeof(ext3_acl_entry) + 4;
-	}
-}
-
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-
-/* acl.c */
-extern struct posix_acl *ext3_get_acl(struct inode *inode, int type);
-extern int ext3_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
-
-#else  /* CONFIG_EXT3_FS_POSIX_ACL */
-#include <linux/sched.h>
-#define ext3_get_acl NULL
-#define ext3_set_acl NULL
-
-static inline int
-ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
-{
-	return 0;
-}
-#endif  /* CONFIG_EXT3_FS_POSIX_ACL */
-
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
deleted file mode 100644
index 158b5d4..0000000
--- a/fs/ext3/balloc.c
+++ /dev/null
@@ -1,2158 +0,0 @@
-/*
- *  linux/fs/ext3/balloc.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/quotaops.h>
-#include <linux/blkdev.h>
-#include "ext3.h"
-
-/*
- * balloc.c contains the blocks allocation and deallocation routines
- */
-
-/*
- * The free blocks are managed by bitmaps.  A file system contains several
- * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
- * block for inodes, N blocks for the inode table and data blocks.
- *
- * The file system contains group descriptors which are located after the
- * super block.  Each descriptor contains the number of the bitmap block and
- * the free blocks count in the block.  The descriptors are loaded in memory
- * when a file system is mounted (see ext3_fill_super).
- */
-
-
-#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
-
-/*
- * Calculate the block group number and offset, given a block number
- */
-static void ext3_get_group_no_and_offset(struct super_block *sb,
-	ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
-	if (offsetp)
-		*offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
-	if (blockgrpp)
-		*blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
-}
-
-/**
- * ext3_get_group_desc() -- load group descriptor from disk
- * @sb:			super block
- * @block_group:	given block group
- * @bh:			pointer to the buffer head to store the block
- *			group descriptor
- */
-struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-					     unsigned int block_group,
-					     struct buffer_head ** bh)
-{
-	unsigned long group_desc;
-	unsigned long offset;
-	struct ext3_group_desc * desc;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (block_group >= sbi->s_groups_count) {
-		ext3_error (sb, "ext3_get_group_desc",
-			    "block_group >= groups_count - "
-			    "block_group = %d, groups_count = %lu",
-			    block_group, sbi->s_groups_count);
-
-		return NULL;
-	}
-	smp_rmb();
-
-	group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
-	offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
-	if (!sbi->s_group_desc[group_desc]) {
-		ext3_error (sb, "ext3_get_group_desc",
-			    "Group descriptor not loaded - "
-			    "block_group = %d, group_desc = %lu, desc = %lu",
-			     block_group, group_desc, offset);
-		return NULL;
-	}
-
-	desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
-	if (bh)
-		*bh = sbi->s_group_desc[group_desc];
-	return desc + offset;
-}
-
-static int ext3_valid_block_bitmap(struct super_block *sb,
-					struct ext3_group_desc *desc,
-					unsigned int block_group,
-					struct buffer_head *bh)
-{
-	ext3_grpblk_t offset;
-	ext3_grpblk_t next_zero_bit;
-	ext3_fsblk_t bitmap_blk;
-	ext3_fsblk_t group_first_block;
-
-	group_first_block = ext3_group_first_block_no(sb, block_group);
-
-	/* check whether block bitmap block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
-	offset = bitmap_blk - group_first_block;
-	if (!ext3_test_bit(offset, bh->b_data))
-		/* bad block bitmap */
-		goto err_out;
-
-	/* check whether the inode bitmap block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
-	offset = bitmap_blk - group_first_block;
-	if (!ext3_test_bit(offset, bh->b_data))
-		/* bad block bitmap */
-		goto err_out;
-
-	/* check whether the inode table block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_inode_table);
-	offset = bitmap_blk - group_first_block;
-	next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
-				offset + EXT3_SB(sb)->s_itb_per_group,
-				offset);
-	if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
-		/* good bitmap for inode tables */
-		return 1;
-
-err_out:
-	ext3_error(sb, __func__,
-			"Invalid block bitmap - "
-			"block_group = %d, block = %lu",
-			block_group, bitmap_blk);
-	return 0;
-}
-
-/**
- * read_block_bitmap()
- * @sb:			super block
- * @block_group:	given block group
- *
- * Read the bitmap for a given block_group,and validate the
- * bits for block/inode/inode tables are set in the bitmaps
- *
- * Return buffer_head on success or NULL in case of failure.
- */
-static struct buffer_head *
-read_block_bitmap(struct super_block *sb, unsigned int block_group)
-{
-	struct ext3_group_desc * desc;
-	struct buffer_head * bh = NULL;
-	ext3_fsblk_t bitmap_blk;
-
-	desc = ext3_get_group_desc(sb, block_group, NULL);
-	if (!desc)
-		return NULL;
-	trace_ext3_read_block_bitmap(sb, block_group);
-	bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
-	bh = sb_getblk(sb, bitmap_blk);
-	if (unlikely(!bh)) {
-		ext3_error(sb, __func__,
-			    "Cannot read block bitmap - "
-			    "block_group = %d, block_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_block_bitmap));
-		return NULL;
-	}
-	if (likely(bh_uptodate_or_lock(bh)))
-		return bh;
-
-	if (bh_submit_read(bh) < 0) {
-		brelse(bh);
-		ext3_error(sb, __func__,
-			    "Cannot read block bitmap - "
-			    "block_group = %d, block_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_block_bitmap));
-		return NULL;
-	}
-	ext3_valid_block_bitmap(sb, desc, block_group, bh);
-	/*
-	 * file system mounted not to panic on error, continue with corrupt
-	 * bitmap
-	 */
-	return bh;
-}
-/*
- * The reservation window structure operations
- * --------------------------------------------
- * Operations include:
- * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
- *
- * We use a red-black tree to represent per-filesystem reservation
- * windows.
- *
- */
-
-/**
- * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
- * @rb_root:		root of per-filesystem reservation rb tree
- * @verbose:		verbose mode
- * @fn:			function which wishes to dump the reservation map
- *
- * If verbose is turned on, it will print the whole block reservation
- * windows(start, end).	Otherwise, it will only print out the "bad" windows,
- * those windows that overlap with their immediate neighbors.
- */
-#if 1
-static void __rsv_window_dump(struct rb_root *root, int verbose,
-			      const char *fn)
-{
-	struct rb_node *n;
-	struct ext3_reserve_window_node *rsv, *prev;
-	int bad;
-
-restart:
-	n = rb_first(root);
-	bad = 0;
-	prev = NULL;
-
-	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
-	while (n) {
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-		if (verbose)
-			printk("reservation window 0x%p "
-			       "start:  %lu, end:  %lu\n",
-			       rsv, rsv->rsv_start, rsv->rsv_end);
-		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
-			printk("Bad reservation %p (start >= end)\n",
-			       rsv);
-			bad = 1;
-		}
-		if (prev && prev->rsv_end >= rsv->rsv_start) {
-			printk("Bad reservation %p (prev->end >= start)\n",
-			       rsv);
-			bad = 1;
-		}
-		if (bad) {
-			if (!verbose) {
-				printk("Restarting reservation walk in verbose mode\n");
-				verbose = 1;
-				goto restart;
-			}
-		}
-		n = rb_next(n);
-		prev = rsv;
-	}
-	printk("Window map complete.\n");
-	BUG_ON(bad);
-}
-#define rsv_window_dump(root, verbose) \
-	__rsv_window_dump((root), (verbose), __func__)
-#else
-#define rsv_window_dump(root, verbose) do {} while (0)
-#endif
-
-/**
- * goal_in_my_reservation()
- * @rsv:		inode's reservation window
- * @grp_goal:		given goal block relative to the allocation block group
- * @group:		the current allocation block group
- * @sb:			filesystem super block
- *
- * Test if the given goal block (group relative) is within the file's
- * own block reservation window range.
- *
- * If the reservation window is outside the goal allocation group, return 0;
- * grp_goal (given goal block) could be -1, which means no specific
- * goal block. In this case, always return 1.
- * If the goal block is within the reservation window, return 1;
- * otherwise, return 0;
- */
-static int
-goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
-			unsigned int group, struct super_block * sb)
-{
-	ext3_fsblk_t group_first_block, group_last_block;
-
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	if ((rsv->_rsv_start > group_last_block) ||
-	    (rsv->_rsv_end < group_first_block))
-		return 0;
-	if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
-		|| (grp_goal + group_first_block > rsv->_rsv_end)))
-		return 0;
-	return 1;
-}
-
-/**
- * search_reserve_window()
- * @rb_root:		root of reservation tree
- * @goal:		target allocation block
- *
- * Find the reserved window which includes the goal, or the previous one
- * if the goal is not in any window.
- * Returns NULL if there are no windows or if all windows start after the goal.
- */
-static struct ext3_reserve_window_node *
-search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
-{
-	struct rb_node *n = root->rb_node;
-	struct ext3_reserve_window_node *rsv;
-
-	if (!n)
-		return NULL;
-
-	do {
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-
-		if (goal < rsv->rsv_start)
-			n = n->rb_left;
-		else if (goal > rsv->rsv_end)
-			n = n->rb_right;
-		else
-			return rsv;
-	} while (n);
-	/*
-	 * We've fallen off the end of the tree: the goal wasn't inside
-	 * any particular node.  OK, the previous node must be to one
-	 * side of the interval containing the goal.  If it's the RHS,
-	 * we need to back up one.
-	 */
-	if (rsv->rsv_start > goal) {
-		n = rb_prev(&rsv->rsv_node);
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-	}
-	return rsv;
-}
-
-/**
- * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
- * @sb:			super block
- * @rsv:		reservation window to add
- *
- * Must be called with rsv_lock hold.
- */
-void ext3_rsv_window_add(struct super_block *sb,
-		    struct ext3_reserve_window_node *rsv)
-{
-	struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
-	struct rb_node *node = &rsv->rsv_node;
-	ext3_fsblk_t start = rsv->rsv_start;
-
-	struct rb_node ** p = &root->rb_node;
-	struct rb_node * parent = NULL;
-	struct ext3_reserve_window_node *this;
-
-	trace_ext3_rsv_window_add(sb, rsv);
-	while (*p)
-	{
-		parent = *p;
-		this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
-
-		if (start < this->rsv_start)
-			p = &(*p)->rb_left;
-		else if (start > this->rsv_end)
-			p = &(*p)->rb_right;
-		else {
-			rsv_window_dump(root, 1);
-			BUG();
-		}
-	}
-
-	rb_link_node(node, parent, p);
-	rb_insert_color(node, root);
-}
-
-/**
- * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
- * @sb:			super block
- * @rsv:		reservation window to remove
- *
- * Mark the block reservation window as not allocated, and unlink it
- * from the filesystem reservation window rb tree. Must be called with
- * rsv_lock hold.
- */
-static void rsv_window_remove(struct super_block *sb,
-			      struct ext3_reserve_window_node *rsv)
-{
-	rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	rsv->rsv_alloc_hit = 0;
-	rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
-}
-
-/*
- * rsv_is_empty() -- Check if the reservation window is allocated.
- * @rsv:		given reservation window to check
- *
- * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
- */
-static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
-{
-	/* a valid reservation end block could not be 0 */
-	return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-}
-
-/**
- * ext3_init_block_alloc_info()
- * @inode:		file inode structure
- *
- * Allocate and initialize the	reservation window structure, and
- * link the window to the ext3 inode structure at last
- *
- * The reservation window structure is only dynamically allocated
- * and linked to ext3 inode the first time the open file
- * needs a new block. So, before every ext3_new_block(s) call, for
- * regular files, we should check whether the reservation window
- * structure exists or not. In the latter case, this function is called.
- * Fail to do so will result in block reservation being turned off for that
- * open file.
- *
- * This function is called from ext3_get_blocks_handle(), also called
- * when setting the reservation window size through ioctl before the file
- * is open for write (needs block allocation).
- *
- * Needs truncate_mutex protection prior to call this function.
- */
-void ext3_init_block_alloc_info(struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *block_i;
-	struct super_block *sb = inode->i_sb;
-
-	block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
-	if (block_i) {
-		struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
-
-		rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-		rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-
-		/*
-		 * if filesystem is mounted with NORESERVATION, the goal
-		 * reservation window size is set to zero to indicate
-		 * block reservation is off
-		 */
-		if (!test_opt(sb, RESERVATION))
-			rsv->rsv_goal_size = 0;
-		else
-			rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
-		rsv->rsv_alloc_hit = 0;
-		block_i->last_alloc_logical_block = 0;
-		block_i->last_alloc_physical_block = 0;
-	}
-	ei->i_block_alloc_info = block_i;
-}
-
-/**
- * ext3_discard_reservation()
- * @inode:		inode
- *
- * Discard(free) block reservation window on last file close, or truncate
- * or at last iput().
- *
- * It is being called in three cases:
- *	ext3_release_file(): last writer close the file
- *	ext3_clear_inode(): last iput(), when nobody link to this file.
- *	ext3_truncate(): when the block indirect map is about to change.
- *
- */
-void ext3_discard_reservation(struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
-	struct ext3_reserve_window_node *rsv;
-	spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
-
-	if (!block_i)
-		return;
-
-	rsv = &block_i->rsv_window_node;
-	if (!rsv_is_empty(&rsv->rsv_window)) {
-		spin_lock(rsv_lock);
-		if (!rsv_is_empty(&rsv->rsv_window)) {
-			trace_ext3_discard_reservation(inode, rsv);
-			rsv_window_remove(inode->i_sb, rsv);
-		}
-		spin_unlock(rsv_lock);
-	}
-}
-
-/**
- * ext3_free_blocks_sb() -- Free given blocks and update quota
- * @handle:			handle to this transaction
- * @sb:				super block
- * @block:			start physical block to free
- * @count:			number of blocks to free
- * @pdquot_freed_blocks:	pointer to quota
- */
-void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
-			 ext3_fsblk_t block, unsigned long count,
-			 unsigned long *pdquot_freed_blocks)
-{
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *gd_bh;
-	unsigned long block_group;
-	ext3_grpblk_t bit;
-	unsigned long i;
-	unsigned long overflow;
-	struct ext3_group_desc * desc;
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi;
-	int err = 0, ret;
-	ext3_grpblk_t group_freed;
-
-	*pdquot_freed_blocks = 0;
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	if (block < le32_to_cpu(es->s_first_data_block) ||
-	    block + count < block ||
-	    block + count > le32_to_cpu(es->s_blocks_count)) {
-		ext3_error (sb, "ext3_free_blocks",
-			    "Freeing blocks not in datazone - "
-			    "block = "E3FSBLK", count = %lu", block, count);
-		goto error_return;
-	}
-
-	ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
-
-do_more:
-	overflow = 0;
-	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-		      EXT3_BLOCKS_PER_GROUP(sb);
-	bit = (block - le32_to_cpu(es->s_first_data_block)) %
-		      EXT3_BLOCKS_PER_GROUP(sb);
-	/*
-	 * Check to see if we are freeing blocks across a group
-	 * boundary.
-	 */
-	if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-		overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-		count -= overflow;
-	}
-	brelse(bitmap_bh);
-	bitmap_bh = read_block_bitmap(sb, block_group);
-	if (!bitmap_bh)
-		goto error_return;
-	desc = ext3_get_group_desc (sb, block_group, &gd_bh);
-	if (!desc)
-		goto error_return;
-
-	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
-	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
-	    in_range (block, le32_to_cpu(desc->bg_inode_table),
-		      sbi->s_itb_per_group) ||
-	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
-		      sbi->s_itb_per_group)) {
-		ext3_error (sb, "ext3_free_blocks",
-			    "Freeing blocks in system zones - "
-			    "Block = "E3FSBLK", count = %lu",
-			    block, count);
-		goto error_return;
-	}
-
-	/*
-	 * We are about to start releasing blocks in the bitmap,
-	 * so we need undo access.
-	 */
-	/* @@@ check errors */
-	BUFFER_TRACE(bitmap_bh, "getting undo access");
-	err = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (err)
-		goto error_return;
-
-	/*
-	 * We are about to modify some metadata.  Call the journal APIs
-	 * to unshare ->b_data if a currently-committing transaction is
-	 * using it
-	 */
-	BUFFER_TRACE(gd_bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, gd_bh);
-	if (err)
-		goto error_return;
-
-	jbd_lock_bh_state(bitmap_bh);
-
-	for (i = 0, group_freed = 0; i < count; i++) {
-		/*
-		 * An HJ special.  This is expensive...
-		 */
-#ifdef CONFIG_JBD_DEBUG
-		jbd_unlock_bh_state(bitmap_bh);
-		{
-			struct buffer_head *debug_bh;
-			debug_bh = sb_find_get_block(sb, block + i);
-			if (debug_bh) {
-				BUFFER_TRACE(debug_bh, "Deleted!");
-				if (!bh2jh(bitmap_bh)->b_committed_data)
-					BUFFER_TRACE(debug_bh,
-						"No committed data in bitmap");
-				BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
-				__brelse(debug_bh);
-			}
-		}
-		jbd_lock_bh_state(bitmap_bh);
-#endif
-		if (need_resched()) {
-			jbd_unlock_bh_state(bitmap_bh);
-			cond_resched();
-			jbd_lock_bh_state(bitmap_bh);
-		}
-		/* @@@ This prevents newly-allocated data from being
-		 * freed and then reallocated within the same
-		 * transaction.
-		 *
-		 * Ideally we would want to allow that to happen, but to
-		 * do so requires making journal_forget() capable of
-		 * revoking the queued write of a data block, which
-		 * implies blocking on the journal lock.  *forget()
-		 * cannot block due to truncate races.
-		 *
-		 * Eventually we can fix this by making journal_forget()
-		 * return a status indicating whether or not it was able
-		 * to revoke the buffer.  On successful revoke, it is
-		 * safe not to set the allocation bit in the committed
-		 * bitmap, because we know that there is no outstanding
-		 * activity on the buffer any more and so it is safe to
-		 * reallocate it.
-		 */
-		BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
-		J_ASSERT_BH(bitmap_bh,
-				bh2jh(bitmap_bh)->b_committed_data != NULL);
-		ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
-				bh2jh(bitmap_bh)->b_committed_data);
-
-		/*
-		 * We clear the bit in the bitmap after setting the committed
-		 * data bit, because this is the reverse order to that which
-		 * the allocator uses.
-		 */
-		BUFFER_TRACE(bitmap_bh, "clear bit");
-		if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-						bit + i, bitmap_bh->b_data)) {
-			jbd_unlock_bh_state(bitmap_bh);
-			ext3_error(sb, __func__,
-				"bit already cleared for block "E3FSBLK,
-				 block + i);
-			jbd_lock_bh_state(bitmap_bh);
-			BUFFER_TRACE(bitmap_bh, "bit already cleared");
-		} else {
-			group_freed++;
-		}
-	}
-	jbd_unlock_bh_state(bitmap_bh);
-
-	spin_lock(sb_bgl_lock(sbi, block_group));
-	le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
-	spin_unlock(sb_bgl_lock(sbi, block_group));
-	percpu_counter_add(&sbi->s_freeblocks_counter, count);
-
-	/* We dirtied the bitmap block */
-	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-
-	/* And the group descriptor block */
-	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-	ret = ext3_journal_dirty_metadata(handle, gd_bh);
-	if (!err) err = ret;
-	*pdquot_freed_blocks += group_freed;
-
-	if (overflow && !err) {
-		block += count;
-		count = overflow;
-		goto do_more;
-	}
-
-error_return:
-	brelse(bitmap_bh);
-	ext3_std_error(sb, err);
-	return;
-}
-
-/**
- * ext3_free_blocks() -- Free given blocks and update quota
- * @handle:		handle for this transaction
- * @inode:		inode
- * @block:		start physical block to free
- * @count:		number of blocks to count
- */
-void ext3_free_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t block, unsigned long count)
-{
-	struct super_block *sb = inode->i_sb;
-	unsigned long dquot_freed_blocks;
-
-	trace_ext3_free_blocks(inode, block, count);
-	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
-	if (dquot_freed_blocks)
-		dquot_free_block(inode, dquot_freed_blocks);
-	return;
-}
-
-/**
- * ext3_test_allocatable()
- * @nr:			given allocation block group
- * @bh:			bufferhead contains the bitmap of the given block group
- *
- * For ext3 allocations, we must not reuse any blocks which are
- * allocated in the bitmap buffer's "last committed data" copy.  This
- * prevents deletes from freeing up the page for reuse until we have
- * committed the delete transaction.
- *
- * If we didn't do this, then deleting something and reallocating it as
- * data would allow the old block to be overwritten before the
- * transaction committed (because we force data to disk before commit).
- * This would lead to corruption if we crashed between overwriting the
- * data and committing the delete.
- *
- * @@@ We may want to make this allocation behaviour conditional on
- * data-writes at some point, and disable it for metadata allocations or
- * sync-data inodes.
- */
-static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
-{
-	int ret;
-	struct journal_head *jh = bh2jh(bh);
-
-	if (ext3_test_bit(nr, bh->b_data))
-		return 0;
-
-	jbd_lock_bh_state(bh);
-	if (!jh->b_committed_data)
-		ret = 1;
-	else
-		ret = !ext3_test_bit(nr, jh->b_committed_data);
-	jbd_unlock_bh_state(bh);
-	return ret;
-}
-
-/**
- * bitmap_search_next_usable_block()
- * @start:		the starting block (group relative) of the search
- * @bh:			bufferhead contains the block group bitmap
- * @maxblocks:		the ending block (group relative) of the reservation
- *
- * The bitmap search --- search forward alternately through the actual
- * bitmap on disk and the last-committed copy in journal, until we find a
- * bit free in both bitmaps.
- */
-static ext3_grpblk_t
-bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
-					ext3_grpblk_t maxblocks)
-{
-	ext3_grpblk_t next;
-	struct journal_head *jh = bh2jh(bh);
-
-	while (start < maxblocks) {
-		next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
-		if (next >= maxblocks)
-			return -1;
-		if (ext3_test_allocatable(next, bh))
-			return next;
-		jbd_lock_bh_state(bh);
-		if (jh->b_committed_data)
-			start = ext3_find_next_zero_bit(jh->b_committed_data,
-							maxblocks, next);
-		jbd_unlock_bh_state(bh);
-	}
-	return -1;
-}
-
-/**
- * find_next_usable_block()
- * @start:		the starting block (group relative) to find next
- *			allocatable block in bitmap.
- * @bh:			bufferhead contains the block group bitmap
- * @maxblocks:		the ending block (group relative) for the search
- *
- * Find an allocatable block in a bitmap.  We honor both the bitmap and
- * its last-committed copy (if that exists), and perform the "most
- * appropriate allocation" algorithm of looking for a free block near
- * the initial goal; then for a free byte somewhere in the bitmap; then
- * for any free bit in the bitmap.
- */
-static ext3_grpblk_t
-find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
-			ext3_grpblk_t maxblocks)
-{
-	ext3_grpblk_t here, next;
-	char *p, *r;
-
-	if (start > 0) {
-		/*
-		 * The goal was occupied; search forward for a free
-		 * block within the next XX blocks.
-		 *
-		 * end_goal is more or less random, but it has to be
-		 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
-		 * next 64-bit boundary is simple..
-		 */
-		ext3_grpblk_t end_goal = (start + 63) & ~63;
-		if (end_goal > maxblocks)
-			end_goal = maxblocks;
-		here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
-		if (here < end_goal && ext3_test_allocatable(here, bh))
-			return here;
-		ext3_debug("Bit not found near goal\n");
-	}
-
-	here = start;
-	if (here < 0)
-		here = 0;
-
-	p = bh->b_data + (here >> 3);
-	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
-	next = (r - bh->b_data) << 3;
-
-	if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
-		return next;
-
-	/*
-	 * The bitmap search --- search forward alternately through the actual
-	 * bitmap and the last-committed copy until we find a bit free in
-	 * both
-	 */
-	here = bitmap_search_next_usable_block(here, bh, maxblocks);
-	return here;
-}
-
-/**
- * claim_block()
- * @lock:		the spin lock for this block group
- * @block:		the free block (group relative) to allocate
- * @bh:			the buffer_head contains the block group bitmap
- *
- * We think we can allocate this block in this bitmap.  Try to set the bit.
- * If that succeeds then check that nobody has allocated and then freed the
- * block since we saw that is was not marked in b_committed_data.  If it _was_
- * allocated and freed then clear the bit in the bitmap again and return
- * zero (failure).
- */
-static inline int
-claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
-{
-	struct journal_head *jh = bh2jh(bh);
-	int ret;
-
-	if (ext3_set_bit_atomic(lock, block, bh->b_data))
-		return 0;
-	jbd_lock_bh_state(bh);
-	if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
-		ext3_clear_bit_atomic(lock, block, bh->b_data);
-		ret = 0;
-	} else {
-		ret = 1;
-	}
-	jbd_unlock_bh_state(bh);
-	return ret;
-}
-
-/**
- * ext3_try_to_allocate()
- * @sb:			superblock
- * @handle:		handle to this transaction
- * @group:		given allocation block group
- * @bitmap_bh:		bufferhead holds the block bitmap
- * @grp_goal:		given target block within the group
- * @count:		target number of blocks to allocate
- * @my_rsv:		reservation window
- *
- * Attempt to allocate blocks within a give range. Set the range of allocation
- * first, then find the first free bit(s) from the bitmap (within the range),
- * and at last, allocate the blocks by claiming the found free bit as allocated.
- *
- * To set the range of this allocation:
- *	if there is a reservation window, only try to allocate block(s) from the
- *	file's own reservation window;
- *	Otherwise, the allocation range starts from the give goal block, ends at
- *	the block group's last block.
- *
- * If we failed to allocate the desired block then we may end up crossing to a
- * new bitmap.  In that case we must release write access to the old one via
- * ext3_journal_release_buffer(), else we'll run out of credits.
- */
-static ext3_grpblk_t
-ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
-			struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
-			unsigned long *count, struct ext3_reserve_window *my_rsv)
-{
-	ext3_fsblk_t group_first_block;
-	ext3_grpblk_t start, end;
-	unsigned long num = 0;
-
-	/* we do allocation within the reservation window if we have a window */
-	if (my_rsv) {
-		group_first_block = ext3_group_first_block_no(sb, group);
-		if (my_rsv->_rsv_start >= group_first_block)
-			start = my_rsv->_rsv_start - group_first_block;
-		else
-			/* reservation window cross group boundary */
-			start = 0;
-		end = my_rsv->_rsv_end - group_first_block + 1;
-		if (end > EXT3_BLOCKS_PER_GROUP(sb))
-			/* reservation window crosses group boundary */
-			end = EXT3_BLOCKS_PER_GROUP(sb);
-		if ((start <= grp_goal) && (grp_goal < end))
-			start = grp_goal;
-		else
-			grp_goal = -1;
-	} else {
-		if (grp_goal > 0)
-			start = grp_goal;
-		else
-			start = 0;
-		end = EXT3_BLOCKS_PER_GROUP(sb);
-	}
-
-	BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
-
-repeat:
-	if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
-		grp_goal = find_next_usable_block(start, bitmap_bh, end);
-		if (grp_goal < 0)
-			goto fail_access;
-		if (!my_rsv) {
-			int i;
-
-			for (i = 0; i < 7 && grp_goal > start &&
-					ext3_test_allocatable(grp_goal - 1,
-								bitmap_bh);
-					i++, grp_goal--)
-				;
-		}
-	}
-	start = grp_goal;
-
-	if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
-		grp_goal, bitmap_bh)) {
-		/*
-		 * The block was allocated by another thread, or it was
-		 * allocated and then freed by another thread
-		 */
-		start++;
-		grp_goal++;
-		if (start >= end)
-			goto fail_access;
-		goto repeat;
-	}
-	num++;
-	grp_goal++;
-	while (num < *count && grp_goal < end
-		&& ext3_test_allocatable(grp_goal, bitmap_bh)
-		&& claim_block(sb_bgl_lock(EXT3_SB(sb), group),
-				grp_goal, bitmap_bh)) {
-		num++;
-		grp_goal++;
-	}
-	*count = num;
-	return grp_goal - num;
-fail_access:
-	*count = num;
-	return -1;
-}
-
-/**
- *	find_next_reservable_window():
- *		find a reservable space within the given range.
- *		It does not allocate the reservation window for now:
- *		alloc_new_reservation() will do the work later.
- *
- *	@search_head: the head of the searching list;
- *		This is not necessarily the list head of the whole filesystem
- *
- *		We have both head and start_block to assist the search
- *		for the reservable space. The list starts from head,
- *		but we will shift to the place where start_block is,
- *		then start from there, when looking for a reservable space.
- *
- *	@my_rsv: the reservation window
- *
- *	@sb: the super block
- *
- *	@start_block: the first block we consider to start
- *			the real search from
- *
- *	@last_block:
- *		the maximum block number that our goal reservable space
- *		could start from. This is normally the last block in this
- *		group. The search will end when we found the start of next
- *		possible reservable space is out of this boundary.
- *		This could handle the cross boundary reservation window
- *		request.
- *
- *	basically we search from the given range, rather than the whole
- *	reservation double linked list, (start_block, last_block)
- *	to find a free region that is of my size and has not
- *	been reserved.
- *
- */
-static int find_next_reservable_window(
-				struct ext3_reserve_window_node *search_head,
-				struct ext3_reserve_window_node *my_rsv,
-				struct super_block * sb,
-				ext3_fsblk_t start_block,
-				ext3_fsblk_t last_block)
-{
-	struct rb_node *next;
-	struct ext3_reserve_window_node *rsv, *prev;
-	ext3_fsblk_t cur;
-	int size = my_rsv->rsv_goal_size;
-
-	/* TODO: make the start of the reservation window byte-aligned */
-	/* cur = *start_block & ~7;*/
-	cur = start_block;
-	rsv = search_head;
-	if (!rsv)
-		return -1;
-
-	while (1) {
-		if (cur <= rsv->rsv_end)
-			cur = rsv->rsv_end + 1;
-
-		/* TODO?
-		 * in the case we could not find a reservable space
-		 * that is what is expected, during the re-search, we could
-		 * remember what's the largest reservable space we could have
-		 * and return that one.
-		 *
-		 * For now it will fail if we could not find the reservable
-		 * space with expected-size (or more)...
-		 */
-		if (cur > last_block)
-			return -1;		/* fail */
-
-		prev = rsv;
-		next = rb_next(&rsv->rsv_node);
-		rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
-
-		/*
-		 * Reached the last reservation, we can just append to the
-		 * previous one.
-		 */
-		if (!next)
-			break;
-
-		if (cur + size <= rsv->rsv_start) {
-			/*
-			 * Found a reserveable space big enough.  We could
-			 * have a reservation across the group boundary here
-			 */
-			break;
-		}
-	}
-	/*
-	 * we come here either :
-	 * when we reach the end of the whole list,
-	 * and there is empty reservable space after last entry in the list.
-	 * append it to the end of the list.
-	 *
-	 * or we found one reservable space in the middle of the list,
-	 * return the reservation window that we could append to.
-	 * succeed.
-	 */
-
-	if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
-		rsv_window_remove(sb, my_rsv);
-
-	/*
-	 * Let's book the whole available window for now.  We will check the
-	 * disk bitmap later and then, if there are free blocks then we adjust
-	 * the window size if it's larger than requested.
-	 * Otherwise, we will remove this node from the tree next time
-	 * call find_next_reservable_window.
-	 */
-	my_rsv->rsv_start = cur;
-	my_rsv->rsv_end = cur + size - 1;
-	my_rsv->rsv_alloc_hit = 0;
-
-	if (prev != my_rsv)
-		ext3_rsv_window_add(sb, my_rsv);
-
-	return 0;
-}
-
-/**
- *	alloc_new_reservation()--allocate a new reservation window
- *
- *		To make a new reservation, we search part of the filesystem
- *		reservation list (the list that inside the group). We try to
- *		allocate a new reservation window near the allocation goal,
- *		or the beginning of the group, if there is no goal.
- *
- *		We first find a reservable space after the goal, then from
- *		there, we check the bitmap for the first free block after
- *		it. If there is no free block until the end of group, then the
- *		whole group is full, we failed. Otherwise, check if the free
- *		block is inside the expected reservable space, if so, we
- *		succeed.
- *		If the first free block is outside the reservable space, then
- *		start from the first free block, we search for next available
- *		space, and go on.
- *
- *	on succeed, a new reservation will be found and inserted into the list
- *	It contains at least one free block, and it does not overlap with other
- *	reservation windows.
- *
- *	failed: we failed to find a reservation window in this group
- *
- *	@my_rsv: the reservation window
- *
- *	@grp_goal: The goal (group-relative).  It is where the search for a
- *		free reservable space should start from.
- *		if we have a grp_goal(grp_goal >0 ), then start from there,
- *		no grp_goal(grp_goal = -1), we start from the first block
- *		of the group.
- *
- *	@sb: the super block
- *	@group: the group we are trying to allocate in
- *	@bitmap_bh: the block group block bitmap
- *
- */
-static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
-		ext3_grpblk_t grp_goal, struct super_block *sb,
-		unsigned int group, struct buffer_head *bitmap_bh)
-{
-	struct ext3_reserve_window_node *search_head;
-	ext3_fsblk_t group_first_block, group_end_block, start_block;
-	ext3_grpblk_t first_free_block;
-	struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
-	unsigned long size;
-	int ret;
-	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
-
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	if (grp_goal < 0)
-		start_block = group_first_block;
-	else
-		start_block = grp_goal + group_first_block;
-
-	trace_ext3_alloc_new_reservation(sb, start_block);
-	size = my_rsv->rsv_goal_size;
-
-	if (!rsv_is_empty(&my_rsv->rsv_window)) {
-		/*
-		 * if the old reservation is cross group boundary
-		 * and if the goal is inside the old reservation window,
-		 * we will come here when we just failed to allocate from
-		 * the first part of the window. We still have another part
-		 * that belongs to the next group. In this case, there is no
-		 * point to discard our window and try to allocate a new one
-		 * in this group(which will fail). we should
-		 * keep the reservation window, just simply move on.
-		 *
-		 * Maybe we could shift the start block of the reservation
-		 * window to the first block of next group.
-		 */
-
-		if ((my_rsv->rsv_start <= group_end_block) &&
-				(my_rsv->rsv_end > group_end_block) &&
-				(start_block >= my_rsv->rsv_start))
-			return -1;
-
-		if ((my_rsv->rsv_alloc_hit >
-		     (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
-			/*
-			 * if the previously allocation hit ratio is
-			 * greater than 1/2, then we double the size of
-			 * the reservation window the next time,
-			 * otherwise we keep the same size window
-			 */
-			size = size * 2;
-			if (size > EXT3_MAX_RESERVE_BLOCKS)
-				size = EXT3_MAX_RESERVE_BLOCKS;
-			my_rsv->rsv_goal_size= size;
-		}
-	}
-
-	spin_lock(rsv_lock);
-	/*
-	 * shift the search start to the window near the goal block
-	 */
-	search_head = search_reserve_window(fs_rsv_root, start_block);
-
-	/*
-	 * find_next_reservable_window() simply finds a reservable window
-	 * inside the given range(start_block, group_end_block).
-	 *
-	 * To make sure the reservation window has a free bit inside it, we
-	 * need to check the bitmap after we found a reservable window.
-	 */
-retry:
-	ret = find_next_reservable_window(search_head, my_rsv, sb,
-						start_block, group_end_block);
-
-	if (ret == -1) {
-		if (!rsv_is_empty(&my_rsv->rsv_window))
-			rsv_window_remove(sb, my_rsv);
-		spin_unlock(rsv_lock);
-		return -1;
-	}
-
-	/*
-	 * On success, find_next_reservable_window() returns the
-	 * reservation window where there is a reservable space after it.
-	 * Before we reserve this reservable space, we need
-	 * to make sure there is at least a free block inside this region.
-	 *
-	 * searching the first free bit on the block bitmap and copy of
-	 * last committed bitmap alternatively, until we found a allocatable
-	 * block. Search start from the start block of the reservable space
-	 * we just found.
-	 */
-	spin_unlock(rsv_lock);
-	first_free_block = bitmap_search_next_usable_block(
-			my_rsv->rsv_start - group_first_block,
-			bitmap_bh, group_end_block - group_first_block + 1);
-
-	if (first_free_block < 0) {
-		/*
-		 * no free block left on the bitmap, no point
-		 * to reserve the space. return failed.
-		 */
-		spin_lock(rsv_lock);
-		if (!rsv_is_empty(&my_rsv->rsv_window))
-			rsv_window_remove(sb, my_rsv);
-		spin_unlock(rsv_lock);
-		return -1;		/* failed */
-	}
-
-	start_block = first_free_block + group_first_block;
-	/*
-	 * check if the first free block is within the
-	 * free space we just reserved
-	 */
-	if (start_block >= my_rsv->rsv_start &&
-	    start_block <= my_rsv->rsv_end) {
-		trace_ext3_reserved(sb, start_block, my_rsv);
-		return 0;		/* success */
-	}
-	/*
-	 * if the first free bit we found is out of the reservable space
-	 * continue search for next reservable space,
-	 * start from where the free block is,
-	 * we also shift the list head to where we stopped last time
-	 */
-	search_head = my_rsv;
-	spin_lock(rsv_lock);
-	goto retry;
-}
-
-/**
- * try_to_extend_reservation()
- * @my_rsv:		given reservation window
- * @sb:			super block
- * @size:		the delta to extend
- *
- * Attempt to expand the reservation window large enough to have
- * required number of free blocks
- *
- * Since ext3_try_to_allocate() will always allocate blocks within
- * the reservation window range, if the window size is too small,
- * multiple blocks allocation has to stop at the end of the reservation
- * window. To make this more efficient, given the total number of
- * blocks needed and the current size of the window, we try to
- * expand the reservation window size if necessary on a best-effort
- * basis before ext3_new_blocks() tries to allocate blocks,
- */
-static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
-			struct super_block *sb, int size)
-{
-	struct ext3_reserve_window_node *next_rsv;
-	struct rb_node *next;
-	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
-
-	if (!spin_trylock(rsv_lock))
-		return;
-
-	next = rb_next(&my_rsv->rsv_node);
-
-	if (!next)
-		my_rsv->rsv_end += size;
-	else {
-		next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
-
-		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
-			my_rsv->rsv_end += size;
-		else
-			my_rsv->rsv_end = next_rsv->rsv_start - 1;
-	}
-	spin_unlock(rsv_lock);
-}
-
-/**
- * ext3_try_to_allocate_with_rsv()
- * @sb:			superblock
- * @handle:		handle to this transaction
- * @group:		given allocation block group
- * @bitmap_bh:		bufferhead holds the block bitmap
- * @grp_goal:		given target block within the group
- * @my_rsv:		reservation window
- * @count:		target number of blocks to allocate
- * @errp:		pointer to store the error code
- *
- * This is the main function used to allocate a new block and its reservation
- * window.
- *
- * Each time when a new block allocation is need, first try to allocate from
- * its own reservation.  If it does not have a reservation window, instead of
- * looking for a free bit on bitmap first, then look up the reservation list to
- * see if it is inside somebody else's reservation window, we try to allocate a
- * reservation window for it starting from the goal first. Then do the block
- * allocation within the reservation window.
- *
- * This will avoid keeping on searching the reservation list again and
- * again when somebody is looking for a free block (without
- * reservation), and there are lots of free blocks, but they are all
- * being reserved.
- *
- * We use a red-black tree for the per-filesystem reservation list.
- *
- */
-static ext3_grpblk_t
-ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
-			unsigned int group, struct buffer_head *bitmap_bh,
-			ext3_grpblk_t grp_goal,
-			struct ext3_reserve_window_node * my_rsv,
-			unsigned long *count, int *errp)
-{
-	ext3_fsblk_t group_first_block, group_last_block;
-	ext3_grpblk_t ret = 0;
-	int fatal;
-	unsigned long num = *count;
-
-	*errp = 0;
-
-	/*
-	 * Make sure we use undo access for the bitmap, because it is critical
-	 * that we do the frozen_data COW on bitmap buffers in all cases even
-	 * if the buffer is in BJ_Forget state in the committing transaction.
-	 */
-	BUFFER_TRACE(bitmap_bh, "get undo access for new block");
-	fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (fatal) {
-		*errp = fatal;
-		return -1;
-	}
-
-	/*
-	 * we don't deal with reservation when
-	 * filesystem is mounted without reservation
-	 * or the file is not a regular file
-	 * or last attempt to allocate a block with reservation turned on failed
-	 */
-	if (my_rsv == NULL ) {
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
-						grp_goal, count, NULL);
-		goto out;
-	}
-	/*
-	 * grp_goal is a group relative block number (if there is a goal)
-	 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
-	 * first block is a filesystem wide block number
-	 * first block is the block number of the first block in this group
-	 */
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	/*
-	 * Basically we will allocate a new block from inode's reservation
-	 * window.
-	 *
-	 * We need to allocate a new reservation window, if:
-	 * a) inode does not have a reservation window; or
-	 * b) last attempt to allocate a block from existing reservation
-	 *    failed; or
-	 * c) we come here with a goal and with a reservation window
-	 *
-	 * We do not need to allocate a new reservation window if we come here
-	 * at the beginning with a goal and the goal is inside the window, or
-	 * we don't have a goal but already have a reservation window.
-	 * then we could go to allocate from the reservation window directly.
-	 */
-	while (1) {
-		if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
-			!goal_in_my_reservation(&my_rsv->rsv_window,
-						grp_goal, group, sb)) {
-			if (my_rsv->rsv_goal_size < *count)
-				my_rsv->rsv_goal_size = *count;
-			ret = alloc_new_reservation(my_rsv, grp_goal, sb,
-							group, bitmap_bh);
-			if (ret < 0)
-				break;			/* failed */
-
-			if (!goal_in_my_reservation(&my_rsv->rsv_window,
-							grp_goal, group, sb))
-				grp_goal = -1;
-		} else if (grp_goal >= 0) {
-			int curr = my_rsv->rsv_end -
-					(grp_goal + group_first_block) + 1;
-
-			if (curr < *count)
-				try_to_extend_reservation(my_rsv, sb,
-							*count - curr);
-		}
-
-		if ((my_rsv->rsv_start > group_last_block) ||
-				(my_rsv->rsv_end < group_first_block)) {
-			rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
-			BUG();
-		}
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
-					   grp_goal, &num, &my_rsv->rsv_window);
-		if (ret >= 0) {
-			my_rsv->rsv_alloc_hit += num;
-			*count = num;
-			break;				/* succeed */
-		}
-		num = *count;
-	}
-out:
-	if (ret >= 0) {
-		BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
-					"bitmap block");
-		fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
-		if (fatal) {
-			*errp = fatal;
-			return -1;
-		}
-		return ret;
-	}
-
-	BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
-	ext3_journal_release_buffer(handle, bitmap_bh);
-	return ret;
-}
-
-/**
- * ext3_has_free_blocks()
- * @sbi:		in-core super block structure.
- *
- * Check if filesystem has at least 1 free block available for allocation.
- */
-static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
-{
-	ext3_fsblk_t free_blocks, root_blocks;
-
-	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-	root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
-		!use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
-		(gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
-		 !in_group_p (sbi->s_resgid))) {
-		return 0;
-	}
-	return 1;
-}
-
-/**
- * ext3_should_retry_alloc()
- * @sb:			super block
- * @retries		number of attemps has been made
- *
- * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
- * it is profitable to retry the operation, this function will wait
- * for the current or committing transaction to complete, and then
- * return TRUE.
- *
- * if the total number of retries exceed three times, return FALSE.
- */
-int ext3_should_retry_alloc(struct super_block *sb, int *retries)
-{
-	if (!ext3_has_free_blocks(EXT3_SB(sb), 0) || (*retries)++ > 3)
-		return 0;
-
-	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
-
-	return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
-}
-
-/**
- * ext3_new_blocks() -- core block(s) allocation function
- * @handle:		handle to this transaction
- * @inode:		file inode
- * @goal:		given target block(filesystem wide)
- * @count:		target number of blocks to allocate
- * @errp:		error code
- *
- * ext3_new_blocks uses a goal block to assist allocation.  It tries to
- * allocate block(s) from the block group contains the goal block first. If that
- * fails, it will try to allocate block(s) from other block groups without
- * any specific goal block.
- *
- */
-ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, unsigned long *count, int *errp)
-{
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *gdp_bh;
-	int group_no;
-	int goal_group;
-	ext3_grpblk_t grp_target_blk;	/* blockgroup relative goal block */
-	ext3_grpblk_t grp_alloc_blk;	/* blockgroup-relative allocated block*/
-	ext3_fsblk_t ret_block;		/* filesyetem-wide allocated block */
-	int bgi;			/* blockgroup iteration index */
-	int fatal = 0, err;
-	int performed_allocation = 0;
-	ext3_grpblk_t free_blocks;	/* number of free blocks in a group */
-	struct super_block *sb;
-	struct ext3_group_desc *gdp;
-	struct ext3_super_block *es;
-	struct ext3_sb_info *sbi;
-	struct ext3_reserve_window_node *my_rsv = NULL;
-	struct ext3_block_alloc_info *block_i;
-	unsigned short windowsz = 0;
-#ifdef EXT3FS_DEBUG
-	static int goal_hits, goal_attempts;
-#endif
-	unsigned long ngroups;
-	unsigned long num = *count;
-
-	*errp = -ENOSPC;
-	sb = inode->i_sb;
-
-	/*
-	 * Check quota for allocation of this block.
-	 */
-	err = dquot_alloc_block(inode, num);
-	if (err) {
-		*errp = err;
-		return 0;
-	}
-
-	trace_ext3_request_blocks(inode, goal, num);
-
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	ext3_debug("goal=%lu.\n", goal);
-	/*
-	 * Allocate a block from reservation only when
-	 * filesystem is mounted with reservation(default,-o reservation), and
-	 * it's a regular file, and
-	 * the desired window size is greater than 0 (One could use ioctl
-	 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
-	 * reservation on that particular file)
-	 */
-	block_i = EXT3_I(inode)->i_block_alloc_info;
-	if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
-		my_rsv = &block_i->rsv_window_node;
-
-	if (!ext3_has_free_blocks(sbi, IS_NOQUOTA(inode))) {
-		*errp = -ENOSPC;
-		goto out;
-	}
-
-	/*
-	 * First, test whether the goal block is free.
-	 */
-	if (goal < le32_to_cpu(es->s_first_data_block) ||
-	    goal >= le32_to_cpu(es->s_blocks_count))
-		goal = le32_to_cpu(es->s_first_data_block);
-	group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
-			EXT3_BLOCKS_PER_GROUP(sb);
-	goal_group = group_no;
-retry_alloc:
-	gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-	if (!gdp)
-		goto io_error;
-
-	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-	/*
-	 * if there is not enough free blocks to make a new resevation
-	 * turn off reservation for this allocation
-	 */
-	if (my_rsv && (free_blocks < windowsz)
-		&& (free_blocks > 0)
-		&& (rsv_is_empty(&my_rsv->rsv_window)))
-		my_rsv = NULL;
-
-	if (free_blocks > 0) {
-		grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
-				EXT3_BLOCKS_PER_GROUP(sb));
-		bitmap_bh = read_block_bitmap(sb, group_no);
-		if (!bitmap_bh)
-			goto io_error;
-		grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
-					group_no, bitmap_bh, grp_target_blk,
-					my_rsv,	&num, &fatal);
-		if (fatal)
-			goto out;
-		if (grp_alloc_blk >= 0)
-			goto allocated;
-	}
-
-	ngroups = EXT3_SB(sb)->s_groups_count;
-	smp_rmb();
-
-	/*
-	 * Now search the rest of the groups.  We assume that
-	 * group_no and gdp correctly point to the last group visited.
-	 */
-	for (bgi = 0; bgi < ngroups; bgi++) {
-		group_no++;
-		if (group_no >= ngroups)
-			group_no = 0;
-		gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-		if (!gdp)
-			goto io_error;
-		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-		/*
-		 * skip this group (and avoid loading bitmap) if there
-		 * are no free blocks
-		 */
-		if (!free_blocks)
-			continue;
-		/*
-		 * skip this group if the number of
-		 * free blocks is less than half of the reservation
-		 * window size.
-		 */
-		if (my_rsv && (free_blocks <= (windowsz/2)))
-			continue;
-
-		brelse(bitmap_bh);
-		bitmap_bh = read_block_bitmap(sb, group_no);
-		if (!bitmap_bh)
-			goto io_error;
-		/*
-		 * try to allocate block(s) from this group, without a goal(-1).
-		 */
-		grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
-					group_no, bitmap_bh, -1, my_rsv,
-					&num, &fatal);
-		if (fatal)
-			goto out;
-		if (grp_alloc_blk >= 0)
-			goto allocated;
-	}
-	/*
-	 * We may end up a bogus earlier ENOSPC error due to
-	 * filesystem is "full" of reservations, but
-	 * there maybe indeed free blocks available on disk
-	 * In this case, we just forget about the reservations
-	 * just do block allocation as without reservations.
-	 */
-	if (my_rsv) {
-		my_rsv = NULL;
-		windowsz = 0;
-		group_no = goal_group;
-		goto retry_alloc;
-	}
-	/* No space left on the device */
-	*errp = -ENOSPC;
-	goto out;
-
-allocated:
-
-	ext3_debug("using block group %d(%d)\n",
-			group_no, gdp->bg_free_blocks_count);
-
-	BUFFER_TRACE(gdp_bh, "get_write_access");
-	fatal = ext3_journal_get_write_access(handle, gdp_bh);
-	if (fatal)
-		goto out;
-
-	ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
-
-	if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
-	    in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
-	    in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
-		      EXT3_SB(sb)->s_itb_per_group) ||
-	    in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
-		      EXT3_SB(sb)->s_itb_per_group)) {
-		ext3_error(sb, "ext3_new_block",
-			    "Allocating block in system zone - "
-			    "blocks from "E3FSBLK", length %lu",
-			     ret_block, num);
-		/*
-		 * claim_block() marked the blocks we allocated as in use. So we
-		 * may want to selectively mark some of the blocks as free.
-		 */
-		goto retry_alloc;
-	}
-
-	performed_allocation = 1;
-
-#ifdef CONFIG_JBD_DEBUG
-	{
-		struct buffer_head *debug_bh;
-
-		/* Record bitmap buffer state in the newly allocated block */
-		debug_bh = sb_find_get_block(sb, ret_block);
-		if (debug_bh) {
-			BUFFER_TRACE(debug_bh, "state when allocated");
-			BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
-			brelse(debug_bh);
-		}
-	}
-	jbd_lock_bh_state(bitmap_bh);
-	spin_lock(sb_bgl_lock(sbi, group_no));
-	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
-		int i;
-
-		for (i = 0; i < num; i++) {
-			if (ext3_test_bit(grp_alloc_blk+i,
-					bh2jh(bitmap_bh)->b_committed_data)) {
-				printk("%s: block was unexpectedly set in "
-					"b_committed_data\n", __func__);
-			}
-		}
-	}
-	ext3_debug("found bit %d\n", grp_alloc_blk);
-	spin_unlock(sb_bgl_lock(sbi, group_no));
-	jbd_unlock_bh_state(bitmap_bh);
-#endif
-
-	if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
-		ext3_error(sb, "ext3_new_block",
-			    "block("E3FSBLK") >= blocks count(%d) - "
-			    "block_group = %d, es == %p ", ret_block,
-			le32_to_cpu(es->s_blocks_count), group_no, es);
-		goto out;
-	}
-
-	/*
-	 * It is up to the caller to add the new buffer to a journal
-	 * list of some description.  We don't know in advance whether
-	 * the caller wants to use it as metadata or data.
-	 */
-	ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
-			ret_block, goal_hits, goal_attempts);
-
-	spin_lock(sb_bgl_lock(sbi, group_no));
-	le16_add_cpu(&gdp->bg_free_blocks_count, -num);
-	spin_unlock(sb_bgl_lock(sbi, group_no));
-	percpu_counter_sub(&sbi->s_freeblocks_counter, num);
-
-	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
-	fatal = ext3_journal_dirty_metadata(handle, gdp_bh);
-	if (fatal)
-		goto out;
-
-	*errp = 0;
-	brelse(bitmap_bh);
-
-	if (num < *count) {
-		dquot_free_block(inode, *count-num);
-		*count = num;
-	}
-
-	trace_ext3_allocate_blocks(inode, goal, num,
-				   (unsigned long long)ret_block);
-
-	return ret_block;
-
-io_error:
-	*errp = -EIO;
-out:
-	if (fatal) {
-		*errp = fatal;
-		ext3_std_error(sb, fatal);
-	}
-	/*
-	 * Undo the block allocation
-	 */
-	if (!performed_allocation)
-		dquot_free_block(inode, *count);
-	brelse(bitmap_bh);
-	return 0;
-}
-
-ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int *errp)
-{
-	unsigned long count = 1;
-
-	return ext3_new_blocks(handle, inode, goal, &count, errp);
-}
-
-/**
- * ext3_count_free_blocks() -- count filesystem free blocks
- * @sb:		superblock
- *
- * Adds up the number of free blocks from each block group.
- */
-ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
-{
-	ext3_fsblk_t desc_count;
-	struct ext3_group_desc *gdp;
-	int i;
-	unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
-#ifdef EXT3FS_DEBUG
-	struct ext3_super_block *es;
-	ext3_fsblk_t bitmap_count;
-	unsigned long x;
-	struct buffer_head *bitmap_bh = NULL;
-
-	es = EXT3_SB(sb)->s_es;
-	desc_count = 0;
-	bitmap_count = 0;
-	gdp = NULL;
-
-	smp_rmb();
-	for (i = 0; i < ngroups; i++) {
-		gdp = ext3_get_group_desc(sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
-		brelse(bitmap_bh);
-		bitmap_bh = read_block_bitmap(sb, i);
-		if (bitmap_bh == NULL)
-			continue;
-
-		x = ext3_count_free(bitmap_bh, sb->s_blocksize);
-		printk("group %d: stored = %d, counted = %lu\n",
-			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
-		bitmap_count += x;
-	}
-	brelse(bitmap_bh);
-	printk("ext3_count_free_blocks: stored = "E3FSBLK
-		", computed = "E3FSBLK", "E3FSBLK"\n",
-	       (ext3_fsblk_t)le32_to_cpu(es->s_free_blocks_count),
-		desc_count, bitmap_count);
-	return bitmap_count;
-#else
-	desc_count = 0;
-	smp_rmb();
-	for (i = 0; i < ngroups; i++) {
-		gdp = ext3_get_group_desc(sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
-	}
-
-	return desc_count;
-#endif
-}
-
-static inline int test_root(int a, int b)
-{
-	int num = b;
-
-	while (a > num)
-		num *= b;
-	return num == a;
-}
-
-static int ext3_group_sparse(int group)
-{
-	if (group <= 1)
-		return 1;
-	if (!(group & 1))
-		return 0;
-	return (test_root(group, 7) || test_root(group, 5) ||
-		test_root(group, 3));
-}
-
-/**
- *	ext3_bg_has_super - number of blocks used by the superblock in group
- *	@sb: superblock for filesystem
- *	@group: group number to check
- *
- *	Return the number of blocks used by the superblock (primary or backup)
- *	in this group.  Currently this will be only 0 or 1.
- */
-int ext3_bg_has_super(struct super_block *sb, int group)
-{
-	if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
-				EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
-			!ext3_group_sparse(group))
-		return 0;
-	return 1;
-}
-
-static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
-{
-	unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
-	unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb);
-	unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1;
-
-	if (group == first || group == first + 1 || group == last)
-		return 1;
-	return 0;
-}
-
-static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
-{
-	return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0;
-}
-
-/**
- *	ext3_bg_num_gdb - number of blocks used by the group table in group
- *	@sb: superblock for filesystem
- *	@group: group number to check
- *
- *	Return the number of blocks used by the group descriptor table
- *	(primary or backup) in this group.  In the future there may be a
- *	different number of descriptor blocks in each group.
- */
-unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
-{
-	unsigned long first_meta_bg =
-			le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg);
-	unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) ||
-			metagroup < first_meta_bg)
-		return ext3_bg_num_gdb_nometa(sb,group);
-
-	return ext3_bg_num_gdb_meta(sb,group);
-
-}
-
-/**
- * ext3_trim_all_free -- function to trim all free space in alloc. group
- * @sb:			super block for file system
- * @group:		allocation group to trim
- * @start:		first group block to examine
- * @max:		last group block to examine
- * @gdp:		allocation group description structure
- * @minblocks:		minimum extent block count
- *
- * ext3_trim_all_free walks through group's block bitmap searching for free
- * blocks. When the free block is found, it tries to allocate this block and
- * consequent free block to get the biggest free extent possible, until it
- * reaches any used block. Then issue a TRIM command on this extent and free
- * the extent in the block bitmap. This is done until whole group is scanned.
- */
-static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb,
-					unsigned int group,
-					ext3_grpblk_t start, ext3_grpblk_t max,
-					ext3_grpblk_t minblocks)
-{
-	handle_t *handle;
-	ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
-	ext3_fsblk_t discard_block;
-	struct ext3_sb_info *sbi;
-	struct buffer_head *gdp_bh, *bitmap_bh = NULL;
-	struct ext3_group_desc *gdp;
-	int err = 0, ret = 0;
-
-	/*
-	 * We will update one block bitmap, and one group descriptor
-	 */
-	handle = ext3_journal_start_sb(sb, 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	bitmap_bh = read_block_bitmap(sb, group);
-	if (!bitmap_bh) {
-		err = -EIO;
-		goto err_out;
-	}
-
-	BUFFER_TRACE(bitmap_bh, "getting undo access");
-	err = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (err)
-		goto err_out;
-
-	gdp = ext3_get_group_desc(sb, group, &gdp_bh);
-	if (!gdp) {
-		err = -EIO;
-		goto err_out;
-	}
-
-	BUFFER_TRACE(gdp_bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, gdp_bh);
-	if (err)
-		goto err_out;
-
-	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-	sbi = EXT3_SB(sb);
-
-	 /* Walk through the whole group */
-	while (start <= max) {
-		start = bitmap_search_next_usable_block(start, bitmap_bh, max);
-		if (start < 0)
-			break;
-		next = start;
-
-		/*
-		 * Allocate contiguous free extents by setting bits in the
-		 * block bitmap
-		 */
-		while (next <= max
-			&& claim_block(sb_bgl_lock(sbi, group),
-					next, bitmap_bh)) {
-			next++;
-		}
-
-		 /* We did not claim any blocks */
-		if (next == start)
-			continue;
-
-		discard_block = (ext3_fsblk_t)start +
-				ext3_group_first_block_no(sb, group);
-
-		/* Update counters */
-		spin_lock(sb_bgl_lock(sbi, group));
-		le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
-		spin_unlock(sb_bgl_lock(sbi, group));
-		percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
-
-		free_blocks -= next - start;
-		/* Do not issue a TRIM on extents smaller than minblocks */
-		if ((next - start) < minblocks)
-			goto free_extent;
-
-		trace_ext3_discard_blocks(sb, discard_block, next - start);
-		 /* Send the TRIM command down to the device */
-		err = sb_issue_discard(sb, discard_block, next - start,
-				       GFP_NOFS, 0);
-		count += (next - start);
-free_extent:
-		freed = 0;
-
-		/*
-		 * Clear bits in the bitmap
-		 */
-		for (bit = start; bit < next; bit++) {
-			BUFFER_TRACE(bitmap_bh, "clear bit");
-			if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
-						bit, bitmap_bh->b_data)) {
-				ext3_error(sb, __func__,
-					"bit already cleared for block "E3FSBLK,
-					 (unsigned long)bit);
-				BUFFER_TRACE(bitmap_bh, "bit already cleared");
-			} else {
-				freed++;
-			}
-		}
-
-		/* Update couters */
-		spin_lock(sb_bgl_lock(sbi, group));
-		le16_add_cpu(&gdp->bg_free_blocks_count, freed);
-		spin_unlock(sb_bgl_lock(sbi, group));
-		percpu_counter_add(&sbi->s_freeblocks_counter, freed);
-
-		start = next;
-		if (err < 0) {
-			if (err != -EOPNOTSUPP)
-				ext3_warning(sb, __func__, "Discard command "
-					     "returned error %d\n", err);
-			break;
-		}
-
-		if (fatal_signal_pending(current)) {
-			err = -ERESTARTSYS;
-			break;
-		}
-
-		cond_resched();
-
-		/* No more suitable extents */
-		if (free_blocks < minblocks)
-			break;
-	}
-
-	/* We dirtied the bitmap block */
-	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
-	if (!err)
-		err = ret;
-
-	/* And the group descriptor block */
-	BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
-	ret = ext3_journal_dirty_metadata(handle, gdp_bh);
-	if (!err)
-		err = ret;
-
-	ext3_debug("trimmed %d blocks in the group %d\n",
-		count, group);
-
-err_out:
-	if (err)
-		count = err;
-	ext3_journal_stop(handle);
-	brelse(bitmap_bh);
-
-	return count;
-}
-
-/**
- * ext3_trim_fs() -- trim ioctl handle function
- * @sb:			superblock for filesystem
- * @start:		First Byte to trim
- * @len:		number of Bytes to trim from start
- * @minlen:		minimum extent length in Bytes
- *
- * ext3_trim_fs goes through all allocation groups containing Bytes from
- * start to start+len. For each such a group ext3_trim_all_free function
- * is invoked to trim all free space.
- */
-int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
-{
-	ext3_grpblk_t last_block, first_block;
-	unsigned long group, first_group, last_group;
-	struct ext3_group_desc *gdp;
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-	uint64_t start, minlen, end, trimmed = 0;
-	ext3_fsblk_t first_data_blk =
-			le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
-	ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
-	int ret = 0;
-
-	start = range->start >> sb->s_blocksize_bits;
-	end = start + (range->len >> sb->s_blocksize_bits) - 1;
-	minlen = range->minlen >> sb->s_blocksize_bits;
-
-	if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
-	    start >= max_blks ||
-	    range->len < sb->s_blocksize)
-		return -EINVAL;
-	if (end >= max_blks)
-		end = max_blks - 1;
-	if (end <= first_data_blk)
-		goto out;
-	if (start < first_data_blk)
-		start = first_data_blk;
-
-	smp_rmb();
-
-	/* Determine first and last group to examine based on start and len */
-	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
-				     &first_group, &first_block);
-	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) end,
-				     &last_group, &last_block);
-
-	/* end now represents the last block to discard in this group */
-	end = EXT3_BLOCKS_PER_GROUP(sb) - 1;
-
-	for (group = first_group; group <= last_group; group++) {
-		gdp = ext3_get_group_desc(sb, group, NULL);
-		if (!gdp)
-			break;
-
-		/*
-		 * For all the groups except the last one, last block will
-		 * always be EXT3_BLOCKS_PER_GROUP(sb)-1, so we only need to
-		 * change it for the last group, note that last_block is
-		 * already computed earlier by ext3_get_group_no_and_offset()
-		 */
-		if (group == last_group)
-			end = last_block;
-
-		if (le16_to_cpu(gdp->bg_free_blocks_count) >= minlen) {
-			ret = ext3_trim_all_free(sb, group, first_block,
-						 end, minlen);
-			if (ret < 0)
-				break;
-			trimmed += ret;
-		}
-
-		/*
-		 * For every group except the first one, we are sure
-		 * that the first block to discard will be block #0.
-		 */
-		first_block = 0;
-	}
-
-	if (ret > 0)
-		ret = 0;
-
-out:
-	range->len = trimmed * sb->s_blocksize;
-	return ret;
-}
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
deleted file mode 100644
index ef9c643..0000000
--- a/fs/ext3/bitmap.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  linux/fs/ext3/bitmap.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- */
-
-#include "ext3.h"
-
-#ifdef EXT3FS_DEBUG
-
-unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
-{
-	return numchars * BITS_PER_BYTE - memweight(map->b_data, numchars);
-}
-
-#endif  /*  EXT3FS_DEBUG  */
-
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
deleted file mode 100644
index 17742ee..0000000
--- a/fs/ext3/dir.c
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- *  linux/fs/ext3/dir.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/dir.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 directory handling functions
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *
- * Hash Tree Directory indexing (c) 2001  Daniel Phillips
- *
- */
-
-#include <linux/compat.h>
-#include "ext3.h"
-
-static unsigned char ext3_filetype_table[] = {
-	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-};
-
-static int ext3_dx_readdir(struct file *, struct dir_context *);
-
-static unsigned char get_dtype(struct super_block *sb, int filetype)
-{
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
-	    (filetype >= EXT3_FT_MAX))
-		return DT_UNKNOWN;
-
-	return (ext3_filetype_table[filetype]);
-}
-
-/**
- * Check if the given dir-inode refers to an htree-indexed directory
- * (or a directory which could potentially get converted to use htree
- * indexing).
- *
- * Return 1 if it is a dx dir, 0 if not
- */
-static int is_dx_dir(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-
-	if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-		     EXT3_FEATURE_COMPAT_DIR_INDEX) &&
-	    ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
-	     ((inode->i_size >> sb->s_blocksize_bits) == 1)))
-		return 1;
-
-	return 0;
-}
-
-int ext3_check_dir_entry (const char * function, struct inode * dir,
-			  struct ext3_dir_entry_2 * de,
-			  struct buffer_head * bh,
-			  unsigned long offset)
-{
-	const char * error_msg = NULL;
-	const int rlen = ext3_rec_len_from_disk(de->rec_len);
-
-	if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
-		error_msg = "rec_len is smaller than minimal";
-	else if (unlikely(rlen % 4 != 0))
-		error_msg = "rec_len % 4 != 0";
-	else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
-		error_msg = "rec_len is too small for name_len";
-	else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
-		error_msg = "directory entry across blocks";
-	else if (unlikely(le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
-		error_msg = "inode out of bounds";
-
-	if (unlikely(error_msg != NULL))
-		ext3_error (dir->i_sb, function,
-			"bad entry in directory #%lu: %s - "
-			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
-			dir->i_ino, error_msg, offset,
-			(unsigned long) le32_to_cpu(de->inode),
-			rlen, de->name_len);
-
-	return error_msg == NULL ? 1 : 0;
-}
-
-static int ext3_readdir(struct file *file, struct dir_context *ctx)
-{
-	unsigned long offset;
-	int i;
-	struct ext3_dir_entry_2 *de;
-	int err;
-	struct inode *inode = file_inode(file);
-	struct super_block *sb = inode->i_sb;
-	int dir_has_error = 0;
-
-	if (is_dx_dir(inode)) {
-		err = ext3_dx_readdir(file, ctx);
-		if (err != ERR_BAD_DX_DIR)
-			return err;
-		/*
-		 * We don't set the inode dirty flag since it's not
-		 * critical that it get flushed back to the disk.
-		 */
-		EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
-	}
-	offset = ctx->pos & (sb->s_blocksize - 1);
-
-	while (ctx->pos < inode->i_size) {
-		unsigned long blk = ctx->pos >> EXT3_BLOCK_SIZE_BITS(sb);
-		struct buffer_head map_bh;
-		struct buffer_head *bh = NULL;
-
-		map_bh.b_state = 0;
-		err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
-		if (err > 0) {
-			pgoff_t index = map_bh.b_blocknr >>
-					(PAGE_CACHE_SHIFT - inode->i_blkbits);
-			if (!ra_has_index(&file->f_ra, index))
-				page_cache_sync_readahead(
-					sb->s_bdev->bd_inode->i_mapping,
-					&file->f_ra, file,
-					index, 1);
-			file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
-			bh = ext3_bread(NULL, inode, blk, 0, &err);
-		}
-
-		/*
-		 * We ignore I/O errors on directories so users have a chance
-		 * of recovering data when there's a bad sector
-		 */
-		if (!bh) {
-			if (!dir_has_error) {
-				ext3_error(sb, __func__, "directory #%lu "
-					"contains a hole at offset %lld",
-					inode->i_ino, ctx->pos);
-				dir_has_error = 1;
-			}
-			/* corrupt size?  Maybe no more blocks to read */
-			if (ctx->pos > inode->i_blocks << 9)
-				break;
-			ctx->pos += sb->s_blocksize - offset;
-			continue;
-		}
-
-		/* If the dir block has changed since the last call to
-		 * readdir(2), then we might be pointing to an invalid
-		 * dirent right now.  Scan from the start of the block
-		 * to make sure. */
-		if (offset && file->f_version != inode->i_version) {
-			for (i = 0; i < sb->s_blocksize && i < offset; ) {
-				de = (struct ext3_dir_entry_2 *)
-					(bh->b_data + i);
-				/* It's too expensive to do a full
-				 * dirent test each time round this
-				 * loop, but we do have to test at
-				 * least that it is non-zero.  A
-				 * failure will be detected in the
-				 * dirent test below. */
-				if (ext3_rec_len_from_disk(de->rec_len) <
-						EXT3_DIR_REC_LEN(1))
-					break;
-				i += ext3_rec_len_from_disk(de->rec_len);
-			}
-			offset = i;
-			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
-				| offset;
-			file->f_version = inode->i_version;
-		}
-
-		while (ctx->pos < inode->i_size
-		       && offset < sb->s_blocksize) {
-			de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
-			if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
-						   bh, offset)) {
-				/* On error, skip the to the
-                                   next block. */
-				ctx->pos = (ctx->pos |
-						(sb->s_blocksize - 1)) + 1;
-				break;
-			}
-			offset += ext3_rec_len_from_disk(de->rec_len);
-			if (le32_to_cpu(de->inode)) {
-				if (!dir_emit(ctx, de->name, de->name_len,
-					      le32_to_cpu(de->inode),
-					      get_dtype(sb, de->file_type))) {
-					brelse(bh);
-					return 0;
-				}
-			}
-			ctx->pos += ext3_rec_len_from_disk(de->rec_len);
-		}
-		offset = 0;
-		brelse (bh);
-		if (ctx->pos < inode->i_size)
-			if (!dir_relax(inode))
-				return 0;
-	}
-	return 0;
-}
-
-static inline int is_32bit_api(void)
-{
-#ifdef CONFIG_COMPAT
-	return is_compat_task();
-#else
-	return (BITS_PER_LONG == 32);
-#endif
-}
-
-/*
- * These functions convert from the major/minor hash to an f_pos
- * value for dx directories
- *
- * Upper layer (for example NFS) should specify FMODE_32BITHASH or
- * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
- * directly on both 32-bit and 64-bit nodes, under such case, neither
- * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
- */
-static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return major >> 1;
-	else
-		return ((__u64)(major >> 1) << 32) | (__u64)minor;
-}
-
-static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return (pos << 1) & 0xffffffff;
-	else
-		return ((pos >> 32) << 1) & 0xffffffff;
-}
-
-static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return 0;
-	else
-		return pos & 0xffffffff;
-}
-
-/*
- * Return 32- or 64-bit end-of-file for dx directories
- */
-static inline loff_t ext3_get_htree_eof(struct file *filp)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return EXT3_HTREE_EOF_32BIT;
-	else
-		return EXT3_HTREE_EOF_64BIT;
-}
-
-
-/*
- * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
- * non-htree and htree directories, where the "offset" is in terms
- * of the filename hash value instead of the byte offset.
- *
- * Because we may return a 64-bit hash that is well beyond s_maxbytes,
- * we need to pass the max hash as the maximum allowable offset in
- * the htree directory case.
- *
- * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
- *       will be invalid once the directory was converted into a dx directory
- */
-static loff_t ext3_dir_llseek(struct file *file, loff_t offset, int whence)
-{
-	struct inode *inode = file->f_mapping->host;
-	int dx_dir = is_dx_dir(inode);
-	loff_t htree_max = ext3_get_htree_eof(file);
-
-	if (likely(dx_dir))
-		return generic_file_llseek_size(file, offset, whence,
-					        htree_max, htree_max);
-	else
-		return generic_file_llseek(file, offset, whence);
-}
-
-/*
- * This structure holds the nodes of the red-black tree used to store
- * the directory entry in hash order.
- */
-struct fname {
-	__u32		hash;
-	__u32		minor_hash;
-	struct rb_node	rb_hash;
-	struct fname	*next;
-	__u32		inode;
-	__u8		name_len;
-	__u8		file_type;
-	char		name[0];
-};
-
-/*
- * This functoin implements a non-recursive way of freeing all of the
- * nodes in the red-black tree.
- */
-static void free_rb_tree_fname(struct rb_root *root)
-{
-	struct fname *fname, *next;
-
-	rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash)
-		do {
-			struct fname *old = fname;
-			fname = fname->next;
-			kfree(old);
-		} while (fname);
-
-	*root = RB_ROOT;
-}
-
-static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
-							   loff_t pos)
-{
-	struct dir_private_info *p;
-
-	p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
-	if (!p)
-		return NULL;
-	p->curr_hash = pos2maj_hash(filp, pos);
-	p->curr_minor_hash = pos2min_hash(filp, pos);
-	return p;
-}
-
-void ext3_htree_free_dir_info(struct dir_private_info *p)
-{
-	free_rb_tree_fname(&p->root);
-	kfree(p);
-}
-
-/*
- * Given a directory entry, enter it into the fname rb tree.
- */
-int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
-			     __u32 minor_hash,
-			     struct ext3_dir_entry_2 *dirent)
-{
-	struct rb_node **p, *parent = NULL;
-	struct fname * fname, *new_fn;
-	struct dir_private_info *info;
-	int len;
-
-	info = (struct dir_private_info *) dir_file->private_data;
-	p = &info->root.rb_node;
-
-	/* Create and allocate the fname structure */
-	len = sizeof(struct fname) + dirent->name_len + 1;
-	new_fn = kzalloc(len, GFP_KERNEL);
-	if (!new_fn)
-		return -ENOMEM;
-	new_fn->hash = hash;
-	new_fn->minor_hash = minor_hash;
-	new_fn->inode = le32_to_cpu(dirent->inode);
-	new_fn->name_len = dirent->name_len;
-	new_fn->file_type = dirent->file_type;
-	memcpy(new_fn->name, dirent->name, dirent->name_len);
-	new_fn->name[dirent->name_len] = 0;
-
-	while (*p) {
-		parent = *p;
-		fname = rb_entry(parent, struct fname, rb_hash);
-
-		/*
-		 * If the hash and minor hash match up, then we put
-		 * them on a linked list.  This rarely happens...
-		 */
-		if ((new_fn->hash == fname->hash) &&
-		    (new_fn->minor_hash == fname->minor_hash)) {
-			new_fn->next = fname->next;
-			fname->next = new_fn;
-			return 0;
-		}
-
-		if (new_fn->hash < fname->hash)
-			p = &(*p)->rb_left;
-		else if (new_fn->hash > fname->hash)
-			p = &(*p)->rb_right;
-		else if (new_fn->minor_hash < fname->minor_hash)
-			p = &(*p)->rb_left;
-		else /* if (new_fn->minor_hash > fname->minor_hash) */
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&new_fn->rb_hash, parent, p);
-	rb_insert_color(&new_fn->rb_hash, &info->root);
-	return 0;
-}
-
-
-
-/*
- * This is a helper function for ext3_dx_readdir.  It calls filldir
- * for all entres on the fname linked list.  (Normally there is only
- * one entry on the linked list, unless there are 62 bit hash collisions.)
- */
-static bool call_filldir(struct file *file, struct dir_context *ctx,
-			struct fname *fname)
-{
-	struct dir_private_info *info = file->private_data;
-	struct inode *inode = file_inode(file);
-	struct super_block *sb = inode->i_sb;
-
-	if (!fname) {
-		printk("call_filldir: called with null fname?!?\n");
-		return true;
-	}
-	ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
-	while (fname) {
-		if (!dir_emit(ctx, fname->name, fname->name_len,
-				fname->inode,
-				get_dtype(sb, fname->file_type))) {
-			info->extra_fname = fname;
-			return false;
-		}
-		fname = fname->next;
-	}
-	return true;
-}
-
-static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
-{
-	struct dir_private_info *info = file->private_data;
-	struct inode *inode = file_inode(file);
-	struct fname *fname;
-	int	ret;
-
-	if (!info) {
-		info = ext3_htree_create_dir_info(file, ctx->pos);
-		if (!info)
-			return -ENOMEM;
-		file->private_data = info;
-	}
-
-	if (ctx->pos == ext3_get_htree_eof(file))
-		return 0;	/* EOF */
-
-	/* Some one has messed with f_pos; reset the world */
-	if (info->last_pos != ctx->pos) {
-		free_rb_tree_fname(&info->root);
-		info->curr_node = NULL;
-		info->extra_fname = NULL;
-		info->curr_hash = pos2maj_hash(file, ctx->pos);
-		info->curr_minor_hash = pos2min_hash(file, ctx->pos);
-	}
-
-	/*
-	 * If there are any leftover names on the hash collision
-	 * chain, return them first.
-	 */
-	if (info->extra_fname) {
-		if (!call_filldir(file, ctx, info->extra_fname))
-			goto finished;
-		info->extra_fname = NULL;
-		goto next_node;
-	} else if (!info->curr_node)
-		info->curr_node = rb_first(&info->root);
-
-	while (1) {
-		/*
-		 * Fill the rbtree if we have no more entries,
-		 * or the inode has changed since we last read in the
-		 * cached entries.
-		 */
-		if ((!info->curr_node) ||
-		    (file->f_version != inode->i_version)) {
-			info->curr_node = NULL;
-			free_rb_tree_fname(&info->root);
-			file->f_version = inode->i_version;
-			ret = ext3_htree_fill_tree(file, info->curr_hash,
-						   info->curr_minor_hash,
-						   &info->next_hash);
-			if (ret < 0)
-				return ret;
-			if (ret == 0) {
-				ctx->pos = ext3_get_htree_eof(file);
-				break;
-			}
-			info->curr_node = rb_first(&info->root);
-		}
-
-		fname = rb_entry(info->curr_node, struct fname, rb_hash);
-		info->curr_hash = fname->hash;
-		info->curr_minor_hash = fname->minor_hash;
-		if (!call_filldir(file, ctx, fname))
-			break;
-	next_node:
-		info->curr_node = rb_next(info->curr_node);
-		if (info->curr_node) {
-			fname = rb_entry(info->curr_node, struct fname,
-					 rb_hash);
-			info->curr_hash = fname->hash;
-			info->curr_minor_hash = fname->minor_hash;
-		} else {
-			if (info->next_hash == ~0) {
-				ctx->pos = ext3_get_htree_eof(file);
-				break;
-			}
-			info->curr_hash = info->next_hash;
-			info->curr_minor_hash = 0;
-		}
-	}
-finished:
-	info->last_pos = ctx->pos;
-	return 0;
-}
-
-static int ext3_release_dir (struct inode * inode, struct file * filp)
-{
-       if (filp->private_data)
-		ext3_htree_free_dir_info(filp->private_data);
-
-	return 0;
-}
-
-const struct file_operations ext3_dir_operations = {
-	.llseek		= ext3_dir_llseek,
-	.read		= generic_read_dir,
-	.iterate	= ext3_readdir,
-	.unlocked_ioctl = ext3_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= ext3_compat_ioctl,
-#endif
-	.fsync		= ext3_sync_file,
-	.release	= ext3_release_dir,
-};
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
deleted file mode 100644
index f483a80..0000000
--- a/fs/ext3/ext3.h
+++ /dev/null
@@ -1,1332 +0,0 @@
-/*
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/magic.h>
-#include <linux/bug.h>
-#include <linux/blockgroup_lock.h>
-
-/*
- * The second extended filesystem constants/structures
- */
-
-/*
- * Define EXT3FS_DEBUG to produce debug messages
- */
-#undef EXT3FS_DEBUG
-
-/*
- * Define EXT3_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT3_DEFAULT_RESERVE_BLOCKS     8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT3_MAX_RESERVE_BLOCKS         1027
-#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
-
-/*
- * Debug code
- */
-#ifdef EXT3FS_DEBUG
-#define ext3_debug(f, a...)						\
-	do {								\
-		printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:",	\
-			__FILE__, __LINE__, __func__);		\
-		printk (KERN_DEBUG f, ## a);				\
-	} while (0)
-#else
-#define ext3_debug(f, a...)	do {} while (0)
-#endif
-
-/*
- * Special inodes numbers
- */
-#define	EXT3_BAD_INO		 1	/* Bad blocks inode */
-#define EXT3_ROOT_INO		 2	/* Root inode */
-#define EXT3_BOOT_LOADER_INO	 5	/* Boot loader inode */
-#define EXT3_UNDEL_DIR_INO	 6	/* Undelete directory inode */
-#define EXT3_RESIZE_INO		 7	/* Reserved group descriptors inode */
-#define EXT3_JOURNAL_INO	 8	/* Journal inode */
-
-/* First non-reserved inode for old ext3 filesystems */
-#define EXT3_GOOD_OLD_FIRST_INO	11
-
-/*
- * Maximal count of links to a file
- */
-#define EXT3_LINK_MAX		32000
-
-/*
- * Macro-instructions used to manage several block sizes
- */
-#define EXT3_MIN_BLOCK_SIZE		1024
-#define	EXT3_MAX_BLOCK_SIZE		65536
-#define EXT3_MIN_BLOCK_LOG_SIZE		10
-#define EXT3_BLOCK_SIZE(s)		((s)->s_blocksize)
-#define	EXT3_ADDR_PER_BLOCK(s)		(EXT3_BLOCK_SIZE(s) / sizeof (__u32))
-#define EXT3_BLOCK_SIZE_BITS(s)	((s)->s_blocksize_bits)
-#define	EXT3_ADDR_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_addr_per_block_bits)
-#define EXT3_INODE_SIZE(s)		(EXT3_SB(s)->s_inode_size)
-#define EXT3_FIRST_INO(s)		(EXT3_SB(s)->s_first_ino)
-
-/*
- * Macro-instructions used to manage fragments
- */
-#define EXT3_MIN_FRAG_SIZE		1024
-#define	EXT3_MAX_FRAG_SIZE		4096
-#define EXT3_MIN_FRAG_LOG_SIZE		  10
-#define EXT3_FRAG_SIZE(s)		(EXT3_SB(s)->s_frag_size)
-#define EXT3_FRAGS_PER_BLOCK(s)		(EXT3_SB(s)->s_frags_per_block)
-
-/*
- * Structure of a blocks group descriptor
- */
-struct ext3_group_desc
-{
-	__le32	bg_block_bitmap;		/* Blocks bitmap block */
-	__le32	bg_inode_bitmap;		/* Inodes bitmap block */
-	__le32	bg_inode_table;		/* Inodes table block */
-	__le16	bg_free_blocks_count;	/* Free blocks count */
-	__le16	bg_free_inodes_count;	/* Free inodes count */
-	__le16	bg_used_dirs_count;	/* Directories count */
-	__u16	bg_pad;
-	__le32	bg_reserved[3];
-};
-
-/*
- * Macro-instructions used to manage group descriptors
- */
-#define EXT3_BLOCKS_PER_GROUP(s)	(EXT3_SB(s)->s_blocks_per_group)
-#define EXT3_DESC_PER_BLOCK(s)		(EXT3_SB(s)->s_desc_per_block)
-#define EXT3_INODES_PER_GROUP(s)	(EXT3_SB(s)->s_inodes_per_group)
-#define EXT3_DESC_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_desc_per_block_bits)
-
-/*
- * Constants relative to the data blocks
- */
-#define	EXT3_NDIR_BLOCKS		12
-#define	EXT3_IND_BLOCK			EXT3_NDIR_BLOCKS
-#define	EXT3_DIND_BLOCK			(EXT3_IND_BLOCK + 1)
-#define	EXT3_TIND_BLOCK			(EXT3_DIND_BLOCK + 1)
-#define	EXT3_N_BLOCKS			(EXT3_TIND_BLOCK + 1)
-
-/*
- * Inode flags
- */
-#define	EXT3_SECRM_FL			0x00000001 /* Secure deletion */
-#define	EXT3_UNRM_FL			0x00000002 /* Undelete */
-#define	EXT3_COMPR_FL			0x00000004 /* Compress file */
-#define EXT3_SYNC_FL			0x00000008 /* Synchronous updates */
-#define EXT3_IMMUTABLE_FL		0x00000010 /* Immutable file */
-#define EXT3_APPEND_FL			0x00000020 /* writes to file may only append */
-#define EXT3_NODUMP_FL			0x00000040 /* do not dump file */
-#define EXT3_NOATIME_FL			0x00000080 /* do not update atime */
-/* Reserved for compression usage... */
-#define EXT3_DIRTY_FL			0x00000100
-#define EXT3_COMPRBLK_FL		0x00000200 /* One or more compressed clusters */
-#define EXT3_NOCOMPR_FL			0x00000400 /* Don't compress */
-#define EXT3_ECOMPR_FL			0x00000800 /* Compression error */
-/* End compression flags --- maybe not all used */
-#define EXT3_INDEX_FL			0x00001000 /* hash-indexed directory */
-#define EXT3_IMAGIC_FL			0x00002000 /* AFS directory */
-#define EXT3_JOURNAL_DATA_FL		0x00004000 /* file data should be journaled */
-#define EXT3_NOTAIL_FL			0x00008000 /* file tail should not be merged */
-#define EXT3_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
-#define EXT3_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
-#define EXT3_RESERVED_FL		0x80000000 /* reserved for ext3 lib */
-
-#define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
-
-/* Flags that should be inherited by new inodes from their parent. */
-#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
-			   EXT3_SYNC_FL | EXT3_NODUMP_FL |\
-			   EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
-			   EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
-			   EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
-
-/* Mask out flags that are inappropriate for the given type of inode. */
-static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
-{
-	if (S_ISDIR(mode))
-		return flags;
-	else if (S_ISREG(mode))
-		return flags & EXT3_REG_FLMASK;
-	else
-		return flags & EXT3_OTHER_FLMASK;
-}
-
-/* Used to pass group descriptor data when online resize is done */
-struct ext3_new_group_input {
-	__u32 group;            /* Group number for this data */
-	__u32 block_bitmap;     /* Absolute block number of block bitmap */
-	__u32 inode_bitmap;     /* Absolute block number of inode bitmap */
-	__u32 inode_table;      /* Absolute block number of inode table start */
-	__u32 blocks_count;     /* Total number of blocks in this group */
-	__u16 reserved_blocks;  /* Number of reserved blocks in this group */
-	__u16 unused;
-};
-
-/* The struct ext3_new_group_input in kernel space, with free_blocks_count */
-struct ext3_new_group_data {
-	__u32 group;
-	__u32 block_bitmap;
-	__u32 inode_bitmap;
-	__u32 inode_table;
-	__u32 blocks_count;
-	__u16 reserved_blocks;
-	__u16 unused;
-	__u32 free_blocks_count;
-};
-
-
-/*
- * ioctl commands
- */
-#define	EXT3_IOC_GETFLAGS		FS_IOC_GETFLAGS
-#define	EXT3_IOC_SETFLAGS		FS_IOC_SETFLAGS
-#define	EXT3_IOC_GETVERSION		_IOR('f', 3, long)
-#define	EXT3_IOC_SETVERSION		_IOW('f', 4, long)
-#define EXT3_IOC_GROUP_EXTEND		_IOW('f', 7, unsigned long)
-#define EXT3_IOC_GROUP_ADD		_IOW('f', 8,struct ext3_new_group_input)
-#define	EXT3_IOC_GETVERSION_OLD		FS_IOC_GETVERSION
-#define	EXT3_IOC_SETVERSION_OLD		FS_IOC_SETVERSION
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC_WAIT_FOR_READONLY	_IOR('f', 99, long)
-#endif
-#define EXT3_IOC_GETRSVSZ		_IOR('f', 5, long)
-#define EXT3_IOC_SETRSVSZ		_IOW('f', 6, long)
-
-/*
- * ioctl commands in 32 bit emulation
- */
-#define EXT3_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
-#define EXT3_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
-#define EXT3_IOC32_GETVERSION		_IOR('f', 3, int)
-#define EXT3_IOC32_SETVERSION		_IOW('f', 4, int)
-#define EXT3_IOC32_GETRSVSZ		_IOR('f', 5, int)
-#define EXT3_IOC32_SETRSVSZ		_IOW('f', 6, int)
-#define EXT3_IOC32_GROUP_EXTEND		_IOW('f', 7, unsigned int)
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC32_WAIT_FOR_READONLY	_IOR('f', 99, int)
-#endif
-#define EXT3_IOC32_GETVERSION_OLD	FS_IOC32_GETVERSION
-#define EXT3_IOC32_SETVERSION_OLD	FS_IOC32_SETVERSION
-
-/* Number of supported quota types */
-#define EXT3_MAXQUOTAS 2
-
-/*
- *  Mount options
- */
-struct ext3_mount_options {
-	unsigned long s_mount_opt;
-	kuid_t s_resuid;
-	kgid_t s_resgid;
-	unsigned long s_commit_interval;
-#ifdef CONFIG_QUOTA
-	int s_jquota_fmt;
-	char *s_qf_names[EXT3_MAXQUOTAS];
-#endif
-};
-
-/*
- * Structure of an inode on the disk
- */
-struct ext3_inode {
-	__le16	i_mode;		/* File mode */
-	__le16	i_uid;		/* Low 16 bits of Owner Uid */
-	__le32	i_size;		/* Size in bytes */
-	__le32	i_atime;	/* Access time */
-	__le32	i_ctime;	/* Creation time */
-	__le32	i_mtime;	/* Modification time */
-	__le32	i_dtime;	/* Deletion Time */
-	__le16	i_gid;		/* Low 16 bits of Group Id */
-	__le16	i_links_count;	/* Links count */
-	__le32	i_blocks;	/* Blocks count */
-	__le32	i_flags;	/* File flags */
-	union {
-		struct {
-			__u32  l_i_reserved1;
-		} linux1;
-		struct {
-			__u32  h_i_translator;
-		} hurd1;
-		struct {
-			__u32  m_i_reserved1;
-		} masix1;
-	} osd1;				/* OS dependent 1 */
-	__le32	i_block[EXT3_N_BLOCKS];/* Pointers to blocks */
-	__le32	i_generation;	/* File version (for NFS) */
-	__le32	i_file_acl;	/* File ACL */
-	__le32	i_dir_acl;	/* Directory ACL */
-	__le32	i_faddr;	/* Fragment address */
-	union {
-		struct {
-			__u8	l_i_frag;	/* Fragment number */
-			__u8	l_i_fsize;	/* Fragment size */
-			__u16	i_pad1;
-			__le16	l_i_uid_high;	/* these 2 fields    */
-			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
-		} linux2;
-		struct {
-			__u8	h_i_frag;	/* Fragment number */
-			__u8	h_i_fsize;	/* Fragment size */
-			__u16	h_i_mode_high;
-			__u16	h_i_uid_high;
-			__u16	h_i_gid_high;
-			__u32	h_i_author;
-		} hurd2;
-		struct {
-			__u8	m_i_frag;	/* Fragment number */
-			__u8	m_i_fsize;	/* Fragment size */
-			__u16	m_pad1;
-			__u32	m_i_reserved2[2];
-		} masix2;
-	} osd2;				/* OS dependent 2 */
-	__le16	i_extra_isize;
-	__le16	i_pad1;
-};
-
-#define i_size_high	i_dir_acl
-
-#define i_reserved1	osd1.linux1.l_i_reserved1
-#define i_frag		osd2.linux2.l_i_frag
-#define i_fsize		osd2.linux2.l_i_fsize
-#define i_uid_low	i_uid
-#define i_gid_low	i_gid
-#define i_uid_high	osd2.linux2.l_i_uid_high
-#define i_gid_high	osd2.linux2.l_i_gid_high
-#define i_reserved2	osd2.linux2.l_i_reserved2
-
-/*
- * File system states
- */
-#define	EXT3_VALID_FS			0x0001	/* Unmounted cleanly */
-#define	EXT3_ERROR_FS			0x0002	/* Errors detected */
-#define	EXT3_ORPHAN_FS			0x0004	/* Orphans being recovered */
-
-/*
- * Misc. filesystem flags
- */
-#define EXT2_FLAGS_SIGNED_HASH		0x0001  /* Signed dirhash in use */
-#define EXT2_FLAGS_UNSIGNED_HASH	0x0002  /* Unsigned dirhash in use */
-#define EXT2_FLAGS_TEST_FILESYS		0x0004	/* to test development code */
-
-/*
- * Mount flags
- */
-#define EXT3_MOUNT_CHECK		0x00001	/* Do mount-time checks */
-/* EXT3_MOUNT_OLDALLOC was there */
-#define EXT3_MOUNT_GRPID		0x00004	/* Create files with directory's group */
-#define EXT3_MOUNT_DEBUG		0x00008	/* Some debugging messages */
-#define EXT3_MOUNT_ERRORS_CONT		0x00010	/* Continue on errors */
-#define EXT3_MOUNT_ERRORS_RO		0x00020	/* Remount fs ro on errors */
-#define EXT3_MOUNT_ERRORS_PANIC		0x00040	/* Panic on errors */
-#define EXT3_MOUNT_MINIX_DF		0x00080	/* Mimics the Minix statfs */
-#define EXT3_MOUNT_NOLOAD		0x00100	/* Don't use existing journal*/
-#define EXT3_MOUNT_ABORT		0x00200	/* Fatal error detected */
-#define EXT3_MOUNT_DATA_FLAGS		0x00C00	/* Mode for data writes: */
-#define EXT3_MOUNT_JOURNAL_DATA		0x00400	/* Write data to journal */
-#define EXT3_MOUNT_ORDERED_DATA		0x00800	/* Flush data before commit */
-#define EXT3_MOUNT_WRITEBACK_DATA	0x00C00	/* No data ordering */
-#define EXT3_MOUNT_UPDATE_JOURNAL	0x01000	/* Update the journal format */
-#define EXT3_MOUNT_NO_UID32		0x02000  /* Disable 32-bit UIDs */
-#define EXT3_MOUNT_XATTR_USER		0x04000	/* Extended user attributes */
-#define EXT3_MOUNT_POSIX_ACL		0x08000	/* POSIX Access Control Lists */
-#define EXT3_MOUNT_RESERVATION		0x10000	/* Preallocation */
-#define EXT3_MOUNT_BARRIER		0x20000 /* Use block barriers */
-#define EXT3_MOUNT_QUOTA		0x80000 /* Some quota option set */
-#define EXT3_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
-#define EXT3_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
-#define EXT3_MOUNT_DATA_ERR_ABORT	0x400000 /* Abort on file data write
-						  * error in ordered mode */
-
-/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
-#ifndef _LINUX_EXT2_FS_H
-#define clear_opt(o, opt)		o &= ~EXT3_MOUNT_##opt
-#define set_opt(o, opt)			o |= EXT3_MOUNT_##opt
-#define test_opt(sb, opt)		(EXT3_SB(sb)->s_mount_opt & \
-					 EXT3_MOUNT_##opt)
-#else
-#define EXT2_MOUNT_NOLOAD		EXT3_MOUNT_NOLOAD
-#define EXT2_MOUNT_ABORT		EXT3_MOUNT_ABORT
-#define EXT2_MOUNT_DATA_FLAGS		EXT3_MOUNT_DATA_FLAGS
-#endif
-
-#define ext3_set_bit			__set_bit_le
-#define ext3_set_bit_atomic		ext2_set_bit_atomic
-#define ext3_clear_bit			__clear_bit_le
-#define ext3_clear_bit_atomic		ext2_clear_bit_atomic
-#define ext3_test_bit			test_bit_le
-#define ext3_find_next_zero_bit		find_next_zero_bit_le
-
-/*
- * Maximal mount counts between two filesystem checks
- */
-#define EXT3_DFL_MAX_MNT_COUNT		20	/* Allow 20 mounts */
-#define EXT3_DFL_CHECKINTERVAL		0	/* Don't use interval check */
-
-/*
- * Behaviour when detecting errors
- */
-#define EXT3_ERRORS_CONTINUE		1	/* Continue execution */
-#define EXT3_ERRORS_RO			2	/* Remount fs read-only */
-#define EXT3_ERRORS_PANIC		3	/* Panic */
-#define EXT3_ERRORS_DEFAULT		EXT3_ERRORS_CONTINUE
-
-/*
- * Structure of the super block
- */
-struct ext3_super_block {
-/*00*/	__le32	s_inodes_count;		/* Inodes count */
-	__le32	s_blocks_count;		/* Blocks count */
-	__le32	s_r_blocks_count;	/* Reserved blocks count */
-	__le32	s_free_blocks_count;	/* Free blocks count */
-/*10*/	__le32	s_free_inodes_count;	/* Free inodes count */
-	__le32	s_first_data_block;	/* First Data Block */
-	__le32	s_log_block_size;	/* Block size */
-	__le32	s_log_frag_size;	/* Fragment size */
-/*20*/	__le32	s_blocks_per_group;	/* # Blocks per group */
-	__le32	s_frags_per_group;	/* # Fragments per group */
-	__le32	s_inodes_per_group;	/* # Inodes per group */
-	__le32	s_mtime;		/* Mount time */
-/*30*/	__le32	s_wtime;		/* Write time */
-	__le16	s_mnt_count;		/* Mount count */
-	__le16	s_max_mnt_count;	/* Maximal mount count */
-	__le16	s_magic;		/* Magic signature */
-	__le16	s_state;		/* File system state */
-	__le16	s_errors;		/* Behaviour when detecting errors */
-	__le16	s_minor_rev_level;	/* minor revision level */
-/*40*/	__le32	s_lastcheck;		/* time of last check */
-	__le32	s_checkinterval;	/* max. time between checks */
-	__le32	s_creator_os;		/* OS */
-	__le32	s_rev_level;		/* Revision level */
-/*50*/	__le16	s_def_resuid;		/* Default uid for reserved blocks */
-	__le16	s_def_resgid;		/* Default gid for reserved blocks */
-	/*
-	 * These fields are for EXT3_DYNAMIC_REV superblocks only.
-	 *
-	 * Note: the difference between the compatible feature set and
-	 * the incompatible feature set is that if there is a bit set
-	 * in the incompatible feature set that the kernel doesn't
-	 * know about, it should refuse to mount the filesystem.
-	 *
-	 * e2fsck's requirements are more strict; if it doesn't know
-	 * about a feature in either the compatible or incompatible
-	 * feature set, it must abort and not try to meddle with
-	 * things it doesn't understand...
-	 */
-	__le32	s_first_ino;		/* First non-reserved inode */
-	__le16   s_inode_size;		/* size of inode structure */
-	__le16	s_block_group_nr;	/* block group # of this superblock */
-	__le32	s_feature_compat;	/* compatible feature set */
-/*60*/	__le32	s_feature_incompat;	/* incompatible feature set */
-	__le32	s_feature_ro_compat;	/* readonly-compatible feature set */
-/*68*/	__u8	s_uuid[16];		/* 128-bit uuid for volume */
-/*78*/	char	s_volume_name[16];	/* volume name */
-/*88*/	char	s_last_mounted[64];	/* directory where last mounted */
-/*C8*/	__le32	s_algorithm_usage_bitmap; /* For compression */
-	/*
-	 * Performance hints.  Directory preallocation should only
-	 * happen if the EXT3_FEATURE_COMPAT_DIR_PREALLOC flag is on.
-	 */
-	__u8	s_prealloc_blocks;	/* Nr of blocks to try to preallocate*/
-	__u8	s_prealloc_dir_blocks;	/* Nr to preallocate for dirs */
-	__le16	s_reserved_gdt_blocks;	/* Per group desc for online growth */
-	/*
-	 * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
-	 */
-/*D0*/	__u8	s_journal_uuid[16];	/* uuid of journal superblock */
-/*E0*/	__le32	s_journal_inum;		/* inode number of journal file */
-	__le32	s_journal_dev;		/* device number of journal file */
-	__le32	s_last_orphan;		/* start of list of inodes to delete */
-	__le32	s_hash_seed[4];		/* HTREE hash seed */
-	__u8	s_def_hash_version;	/* Default hash version to use */
-	__u8	s_reserved_char_pad;
-	__u16	s_reserved_word_pad;
-	__le32	s_default_mount_opts;
-	__le32	s_first_meta_bg;	/* First metablock block group */
-	__le32	s_mkfs_time;		/* When the filesystem was created */
-	__le32	s_jnl_blocks[17];	/* Backup of the journal inode */
-	/* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
-/*150*/	__le32	s_blocks_count_hi;	/* Blocks count */
-	__le32	s_r_blocks_count_hi;	/* Reserved blocks count */
-	__le32	s_free_blocks_count_hi;	/* Free blocks count */
-	__le16	s_min_extra_isize;	/* All inodes have at least # bytes */
-	__le16	s_want_extra_isize; 	/* New inodes should reserve # bytes */
-	__le32	s_flags;		/* Miscellaneous flags */
-	__le16  s_raid_stride;		/* RAID stride */
-	__le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
-	__le64  s_mmp_block;            /* Block for multi-mount protection */
-	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
-	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
-	__u8	s_reserved_char_pad2;
-	__le16  s_reserved_pad;
-	__u32   s_reserved[162];        /* Padding to the end of the block */
-};
-
-/* data type for block offset of block group */
-typedef int ext3_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext3_fsblk_t;
-
-#define E3FSBLK "%lu"
-
-struct ext3_reserve_window {
-	ext3_fsblk_t	_rsv_start;	/* First byte reserved */
-	ext3_fsblk_t	_rsv_end;	/* Last byte reserved or 0 */
-};
-
-struct ext3_reserve_window_node {
-	struct rb_node		rsv_node;
-	__u32			rsv_goal_size;
-	__u32			rsv_alloc_hit;
-	struct ext3_reserve_window	rsv_window;
-};
-
-struct ext3_block_alloc_info {
-	/* information about reservation window */
-	struct ext3_reserve_window_node	rsv_window_node;
-	/*
-	 * was i_next_alloc_block in ext3_inode_info
-	 * is the logical (file-relative) number of the
-	 * most-recently-allocated block in this file.
-	 * We use this for detecting linearly ascending allocation requests.
-	 */
-	__u32                   last_alloc_logical_block;
-	/*
-	 * Was i_next_alloc_goal in ext3_inode_info
-	 * is the *physical* companion to i_next_alloc_block.
-	 * it the physical block number of the block which was most-recentl
-	 * allocated to this file.  This give us the goal (target) for the next
-	 * allocation when we detect linearly ascending requests.
-	 */
-	ext3_fsblk_t		last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * third extended file system inode data in memory
- */
-struct ext3_inode_info {
-	__le32	i_data[15];	/* unconverted */
-	__u32	i_flags;
-#ifdef EXT3_FRAGMENTS
-	__u32	i_faddr;
-	__u8	i_frag_no;
-	__u8	i_frag_size;
-#endif
-	ext3_fsblk_t	i_file_acl;
-	__u32	i_dir_acl;
-	__u32	i_dtime;
-
-	/*
-	 * i_block_group is the number of the block group which contains
-	 * this file's inode.  Constant across the lifetime of the inode,
-	 * it is ued for making block allocation decisions - we try to
-	 * place a file's data blocks near its inode block, and new inodes
-	 * near to their parent directory's inode.
-	 */
-	__u32	i_block_group;
-	unsigned long	i_state_flags;	/* Dynamic state flags for ext3 */
-
-	/* block reservation info */
-	struct ext3_block_alloc_info *i_block_alloc_info;
-
-	__u32	i_dir_start_lookup;
-#ifdef CONFIG_EXT3_FS_XATTR
-	/*
-	 * Extended attributes can be read independently of the main file
-	 * data. Taking i_mutex even when reading would cause contention
-	 * between readers of EAs and writers of regular file data, so
-	 * instead we synchronize on xattr_sem when reading or changing
-	 * EAs.
-	 */
-	struct rw_semaphore xattr_sem;
-#endif
-
-	struct list_head i_orphan;	/* unlinked but open inodes */
-
-	/*
-	 * i_disksize keeps track of what the inode size is ON DISK, not
-	 * in memory.  During truncate, i_size is set to the new size by
-	 * the VFS prior to calling ext3_truncate(), but the filesystem won't
-	 * set i_disksize to 0 until the truncate is actually under way.
-	 *
-	 * The intent is that i_disksize always represents the blocks which
-	 * are used by this file.  This allows recovery to restart truncate
-	 * on orphans if we crash during truncate.  We actually write i_disksize
-	 * into the on-disk inode when writing inodes out, instead of i_size.
-	 *
-	 * The only time when i_disksize and i_size may be different is when
-	 * a truncate is in progress.  The only things which change i_disksize
-	 * are ext3_get_block (growth) and ext3_truncate (shrinkth).
-	 */
-	loff_t	i_disksize;
-
-	/* on-disk additional length */
-	__u16 i_extra_isize;
-
-	/*
-	 * truncate_mutex is for serialising ext3_truncate() against
-	 * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
-	 * data tree are chopped off during truncate. We can't do that in
-	 * ext3 because whenever we perform intermediate commits during
-	 * truncate, the inode and all the metadata blocks *must* be in a
-	 * consistent state which allows truncation of the orphans to restart
-	 * during recovery.  Hence we must fix the get_block-vs-truncate race
-	 * by other means, so we have truncate_mutex.
-	 */
-	struct mutex truncate_mutex;
-
-	/*
-	 * Transactions that contain inode's metadata needed to complete
-	 * fsync and fdatasync, respectively.
-	 */
-	atomic_t i_sync_tid;
-	atomic_t i_datasync_tid;
-
-#ifdef CONFIG_QUOTA
-	struct dquot *i_dquot[MAXQUOTAS];
-#endif
-
-	struct inode vfs_inode;
-};
-
-/*
- * third extended-fs super-block data in memory
- */
-struct ext3_sb_info {
-	unsigned long s_frag_size;	/* Size of a fragment in bytes */
-	unsigned long s_frags_per_block;/* Number of fragments per block */
-	unsigned long s_inodes_per_block;/* Number of inodes per block */
-	unsigned long s_frags_per_group;/* Number of fragments in a group */
-	unsigned long s_blocks_per_group;/* Number of blocks in a group */
-	unsigned long s_inodes_per_group;/* Number of inodes in a group */
-	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
-	unsigned long s_gdb_count;	/* Number of group descriptor blocks */
-	unsigned long s_desc_per_block;	/* Number of group descriptors per block */
-	unsigned long s_groups_count;	/* Number of groups in the fs */
-	unsigned long s_overhead_last;  /* Last calculated overhead */
-	unsigned long s_blocks_last;    /* Last seen block count */
-	struct buffer_head * s_sbh;	/* Buffer containing the super block */
-	struct ext3_super_block * s_es;	/* Pointer to the super block in the buffer */
-	struct buffer_head ** s_group_desc;
-	unsigned long  s_mount_opt;
-	ext3_fsblk_t s_sb_block;
-	kuid_t s_resuid;
-	kgid_t s_resgid;
-	unsigned short s_mount_state;
-	unsigned short s_pad;
-	int s_addr_per_block_bits;
-	int s_desc_per_block_bits;
-	int s_inode_size;
-	int s_first_ino;
-	spinlock_t s_next_gen_lock;
-	u32 s_next_generation;
-	u32 s_hash_seed[4];
-	int s_def_hash_version;
-	int s_hash_unsigned;	/* 3 if hash should be signed, 0 if not */
-	struct percpu_counter s_freeblocks_counter;
-	struct percpu_counter s_freeinodes_counter;
-	struct percpu_counter s_dirs_counter;
-	struct blockgroup_lock *s_blockgroup_lock;
-
-	/* root of the per fs reservation window tree */
-	spinlock_t s_rsv_window_lock;
-	struct rb_root s_rsv_window_root;
-	struct ext3_reserve_window_node s_rsv_window_head;
-
-	/* Journaling */
-	struct inode * s_journal_inode;
-	struct journal_s * s_journal;
-	struct list_head s_orphan;
-	struct mutex s_orphan_lock;
-	struct mutex s_resize_lock;
-	unsigned long s_commit_interval;
-	struct block_device *journal_bdev;
-#ifdef CONFIG_QUOTA
-	char *s_qf_names[EXT3_MAXQUOTAS];	/* Names of quota files with journalled quota */
-	int s_jquota_fmt;			/* Format of quota to use */
-#endif
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
-{
-	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
-{
-	return container_of(inode, struct ext3_inode_info, vfs_inode);
-}
-
-static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
-{
-	return ino == EXT3_ROOT_INO ||
-		ino == EXT3_JOURNAL_INO ||
-		ino == EXT3_RESIZE_INO ||
-		(ino >= EXT3_FIRST_INO(sb) &&
-		 ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
-}
-
-/*
- * Inode dynamic state flags
- */
-enum {
-	EXT3_STATE_JDATA,		/* journaled data exists */
-	EXT3_STATE_NEW,			/* inode is newly created */
-	EXT3_STATE_XATTR,		/* has in-inode xattrs */
-	EXT3_STATE_FLUSH_ON_CLOSE,	/* flush dirty pages on close */
-};
-
-static inline int ext3_test_inode_state(struct inode *inode, int bit)
-{
-	return test_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_set_inode_state(struct inode *inode, int bit)
-{
-	set_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_clear_inode_state(struct inode *inode, int bit)
-{
-	clear_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
-
-/*
- * Codes for operating systems
- */
-#define EXT3_OS_LINUX		0
-#define EXT3_OS_HURD		1
-#define EXT3_OS_MASIX		2
-#define EXT3_OS_FREEBSD		3
-#define EXT3_OS_LITES		4
-
-/*
- * Revision levels
- */
-#define EXT3_GOOD_OLD_REV	0	/* The good old (original) format */
-#define EXT3_DYNAMIC_REV	1	/* V2 format w/ dynamic inode sizes */
-
-#define EXT3_CURRENT_REV	EXT3_GOOD_OLD_REV
-#define EXT3_MAX_SUPP_REV	EXT3_DYNAMIC_REV
-
-#define EXT3_GOOD_OLD_INODE_SIZE 128
-
-/*
- * Feature set definitions
- */
-
-#define EXT3_HAS_COMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_RO_COMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_INCOMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
-#define EXT3_SET_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT3_SET_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT3_SET_INCOMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT3_CLEAR_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_INCOMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
-#define EXT3_FEATURE_COMPAT_DIR_PREALLOC	0x0001
-#define EXT3_FEATURE_COMPAT_IMAGIC_INODES	0x0002
-#define EXT3_FEATURE_COMPAT_HAS_JOURNAL		0x0004
-#define EXT3_FEATURE_COMPAT_EXT_ATTR		0x0008
-#define EXT3_FEATURE_COMPAT_RESIZE_INODE	0x0010
-#define EXT3_FEATURE_COMPAT_DIR_INDEX		0x0020
-
-#define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER	0x0001
-#define EXT3_FEATURE_RO_COMPAT_LARGE_FILE	0x0002
-#define EXT3_FEATURE_RO_COMPAT_BTREE_DIR	0x0004
-
-#define EXT3_FEATURE_INCOMPAT_COMPRESSION	0x0001
-#define EXT3_FEATURE_INCOMPAT_FILETYPE		0x0002
-#define EXT3_FEATURE_INCOMPAT_RECOVER		0x0004 /* Needs recovery */
-#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV	0x0008 /* Journal device */
-#define EXT3_FEATURE_INCOMPAT_META_BG		0x0010
-
-#define EXT3_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
-#define EXT3_FEATURE_INCOMPAT_SUPP	(EXT3_FEATURE_INCOMPAT_FILETYPE| \
-					 EXT3_FEATURE_INCOMPAT_RECOVER| \
-					 EXT3_FEATURE_INCOMPAT_META_BG)
-#define EXT3_FEATURE_RO_COMPAT_SUPP	(EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-					 EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
-					 EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
-/*
- * Default values for user and/or group using reserved blocks
- */
-#define	EXT3_DEF_RESUID		0
-#define	EXT3_DEF_RESGID		0
-
-/*
- * Default mount options
- */
-#define EXT3_DEFM_DEBUG		0x0001
-#define EXT3_DEFM_BSDGROUPS	0x0002
-#define EXT3_DEFM_XATTR_USER	0x0004
-#define EXT3_DEFM_ACL		0x0008
-#define EXT3_DEFM_UID16		0x0010
-#define EXT3_DEFM_JMODE		0x0060
-#define EXT3_DEFM_JMODE_DATA	0x0020
-#define EXT3_DEFM_JMODE_ORDERED	0x0040
-#define EXT3_DEFM_JMODE_WBACK	0x0060
-
-/*
- * Structure of a directory entry
- */
-#define EXT3_NAME_LEN 255
-
-struct ext3_dir_entry {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__le16	name_len;		/* Name length */
-	char	name[EXT3_NAME_LEN];	/* File name */
-};
-
-/*
- * The new version of the directory entry.  Since EXT3 structures are
- * stored in intel byte order, and the name_len field could never be
- * bigger than 255 chars, it's safe to reclaim the extra byte for the
- * file_type field.
- */
-struct ext3_dir_entry_2 {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__u8	name_len;		/* Name length */
-	__u8	file_type;
-	char	name[EXT3_NAME_LEN];	/* File name */
-};
-
-/*
- * Ext3 directory file types.  Only the low 3 bits are used.  The
- * other bits are reserved for now.
- */
-#define EXT3_FT_UNKNOWN		0
-#define EXT3_FT_REG_FILE	1
-#define EXT3_FT_DIR		2
-#define EXT3_FT_CHRDEV		3
-#define EXT3_FT_BLKDEV		4
-#define EXT3_FT_FIFO		5
-#define EXT3_FT_SOCK		6
-#define EXT3_FT_SYMLINK		7
-
-#define EXT3_FT_MAX		8
-
-/*
- * EXT3_DIR_PAD defines the directory entries boundaries
- *
- * NOTE: It must be a multiple of 4
- */
-#define EXT3_DIR_PAD			4
-#define EXT3_DIR_ROUND			(EXT3_DIR_PAD - 1)
-#define EXT3_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT3_DIR_ROUND) & \
-					 ~EXT3_DIR_ROUND)
-#define EXT3_MAX_REC_LEN		((1<<16)-1)
-
-/*
- * Tests against MAX_REC_LEN etc were put in place for 64k block
- * sizes; if that is not possible on this arch, we can skip
- * those tests and speed things up.
- */
-static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
-{
-	unsigned len = le16_to_cpu(dlen);
-
-#if (PAGE_CACHE_SIZE >= 65536)
-	if (len == EXT3_MAX_REC_LEN)
-		return 1 << 16;
-#endif
-	return len;
-}
-
-static inline __le16 ext3_rec_len_to_disk(unsigned len)
-{
-#if (PAGE_CACHE_SIZE >= 65536)
-	if (len == (1 << 16))
-		return cpu_to_le16(EXT3_MAX_REC_LEN);
-	else if (len > (1 << 16))
-		BUG();
-#endif
-	return cpu_to_le16(len);
-}
-
-/*
- * Hash Tree Directory indexing
- * (c) Daniel Phillips, 2001
- */
-
-#define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
-				      EXT3_FEATURE_COMPAT_DIR_INDEX) && \
-		      (EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
-#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
-#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
-
-/* Legal values for the dx_root hash_version field: */
-
-#define DX_HASH_LEGACY		0
-#define DX_HASH_HALF_MD4	1
-#define DX_HASH_TEA		2
-#define DX_HASH_LEGACY_UNSIGNED	3
-#define DX_HASH_HALF_MD4_UNSIGNED	4
-#define DX_HASH_TEA_UNSIGNED		5
-
-/* hash info structure used by the directory hash */
-struct dx_hash_info
-{
-	u32		hash;
-	u32		minor_hash;
-	int		hash_version;
-	u32		*seed;
-};
-
-
-/* 32 and 64 bit signed EOF for dx directories */
-#define EXT3_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
-#define EXT3_HTREE_EOF_64BIT   ((1ULL << (64 - 1)) - 1)
-
-
-/*
- * Control parameters used by ext3_htree_next_block
- */
-#define HASH_NB_ALWAYS		1
-
-
-/*
- * Describe an inode's exact location on disk and in memory
- */
-struct ext3_iloc
-{
-	struct buffer_head *bh;
-	unsigned long offset;
-	unsigned long block_group;
-};
-
-static inline struct ext3_inode *ext3_raw_inode(struct ext3_iloc *iloc)
-{
-	return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
-}
-
-/*
- * This structure is stuffed into the struct file's private_data field
- * for directories.  It is where we put information so that we can do
- * readdir operations in hash tree order.
- */
-struct dir_private_info {
-	struct rb_root	root;
-	struct rb_node	*curr_node;
-	struct fname	*extra_fname;
-	loff_t		last_pos;
-	__u32		curr_hash;
-	__u32		curr_minor_hash;
-	__u32		next_hash;
-};
-
-/* calculate the first block number of the group */
-static inline ext3_fsblk_t
-ext3_group_first_block_no(struct super_block *sb, unsigned long group_no)
-{
-	return group_no * (ext3_fsblk_t)EXT3_BLOCKS_PER_GROUP(sb) +
-		le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
-}
-
-/*
- * Special error return code only used by dx_probe() and its callers.
- */
-#define ERR_BAD_DX_DIR	-75000
-
-/*
- * Function prototypes
- */
-
-/*
- * Ok, these declarations are also in <linux/kernel.h> but none of the
- * ext3 source programs needs to include it so they are duplicated here.
- */
-# define NORET_TYPE    /**/
-# define ATTRIB_NORET  __attribute__((noreturn))
-# define NORET_AND     noreturn,
-
-/* balloc.c */
-extern int ext3_bg_has_super(struct super_block *sb, int group);
-extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
-extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int *errp);
-extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, unsigned long *count, int *errp);
-extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t block, unsigned long count);
-extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
-				 ext3_fsblk_t block, unsigned long count,
-				unsigned long *pdquot_freed_blocks);
-extern ext3_fsblk_t ext3_count_free_blocks (struct super_block *);
-extern void ext3_check_blocks_bitmap (struct super_block *);
-extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-						    unsigned int block_group,
-						    struct buffer_head ** bh);
-extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
-extern void ext3_init_block_alloc_info(struct inode *);
-extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
-extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
-
-/* dir.c */
-extern int ext3_check_dir_entry(const char *, struct inode *,
-				struct ext3_dir_entry_2 *,
-				struct buffer_head *, unsigned long);
-extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
-				    __u32 minor_hash,
-				    struct ext3_dir_entry_2 *dirent);
-extern void ext3_htree_free_dir_info(struct dir_private_info *p);
-
-/* fsync.c */
-extern int ext3_sync_file(struct file *, loff_t, loff_t, int);
-
-/* hash.c */
-extern int ext3fs_dirhash(const char *name, int len, struct
-			  dx_hash_info *hinfo);
-
-/* ialloc.c */
-extern struct inode * ext3_new_inode (handle_t *, struct inode *,
-				      const struct qstr *, umode_t);
-extern void ext3_free_inode (handle_t *, struct inode *);
-extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
-extern unsigned long ext3_count_free_inodes (struct super_block *);
-extern unsigned long ext3_count_dirs (struct super_block *);
-extern void ext3_check_inodes_bitmap (struct super_block *);
-extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
-
-
-/* inode.c */
-int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
-		struct buffer_head *bh, ext3_fsblk_t blocknr);
-struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
-	sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
-	int create);
-
-extern struct inode *ext3_iget(struct super_block *, unsigned long);
-extern int  ext3_write_inode (struct inode *, struct writeback_control *);
-extern int  ext3_setattr (struct dentry *, struct iattr *);
-extern void ext3_evict_inode (struct inode *);
-extern int  ext3_sync_inode (handle_t *, struct inode *);
-extern void ext3_discard_reservation (struct inode *);
-extern void ext3_dirty_inode(struct inode *, int);
-extern int ext3_change_inode_journal_flag(struct inode *, int);
-extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
-extern int ext3_can_truncate(struct inode *inode);
-extern void ext3_truncate(struct inode *inode);
-extern void ext3_set_inode_flags(struct inode *);
-extern void ext3_get_inode_flags(struct ext3_inode_info *);
-extern void ext3_set_aops(struct inode *inode);
-extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-		       u64 start, u64 len);
-
-/* ioctl.c */
-extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
-extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* namei.c */
-extern int ext3_orphan_add(handle_t *, struct inode *);
-extern int ext3_orphan_del(handle_t *, struct inode *);
-extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-				__u32 start_minor_hash, __u32 *next_hash);
-
-/* resize.c */
-extern int ext3_group_add(struct super_block *sb,
-				struct ext3_new_group_data *input);
-extern int ext3_group_extend(struct super_block *sb,
-				struct ext3_super_block *es,
-				ext3_fsblk_t n_blocks_count);
-
-/* super.c */
-extern __printf(3, 4)
-void ext3_error(struct super_block *, const char *, const char *, ...);
-extern void __ext3_std_error (struct super_block *, const char *, int);
-extern __printf(3, 4)
-void ext3_abort(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_warning(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_msg(struct super_block *, const char *, const char *, ...);
-extern void ext3_update_dynamic_rev (struct super_block *sb);
-
-#define ext3_std_error(sb, errno)				\
-do {								\
-	if ((errno))						\
-		__ext3_std_error((sb), __func__, (errno));	\
-} while (0)
-
-/*
- * Inodes and files operations
- */
-
-/* dir.c */
-extern const struct file_operations ext3_dir_operations;
-
-/* file.c */
-extern const struct inode_operations ext3_file_inode_operations;
-extern const struct file_operations ext3_file_operations;
-
-/* namei.c */
-extern const struct inode_operations ext3_dir_inode_operations;
-extern const struct inode_operations ext3_special_inode_operations;
-
-/* symlink.c */
-extern const struct inode_operations ext3_symlink_inode_operations;
-extern const struct inode_operations ext3_fast_symlink_inode_operations;
-
-#define EXT3_JOURNAL(inode)	(EXT3_SB((inode)->i_sb)->s_journal)
-
-/* Define the number of blocks we need to account to a transaction to
- * modify one block of data.
- *
- * We may have to touch one inode, one bitmap buffer, up to three
- * indirection blocks, the group and superblock summaries, and the data
- * block to complete the transaction.  */
-
-#define EXT3_SINGLEDATA_TRANS_BLOCKS	8U
-
-/* Extended attribute operations touch at most two data buffers,
- * two bitmap buffers, and two group summaries, in addition to the inode
- * and the superblock, which are already accounted for. */
-
-#define EXT3_XATTR_TRANS_BLOCKS		6U
-
-/* Define the minimum size for a transaction which modifies data.  This
- * needs to take into account the fact that we may end up modifying two
- * quota files too (one for the group, one for the user quota).  The
- * superblock only gets updated once, of course, so don't bother
- * counting that again for the quota updates. */
-
-#define EXT3_DATA_TRANS_BLOCKS(sb)	(EXT3_SINGLEDATA_TRANS_BLOCKS + \
-					 EXT3_XATTR_TRANS_BLOCKS - 2 + \
-					 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
-
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
- * generous.  We can grow the delete transaction later if necessary. */
-
-#define EXT3_DELETE_TRANS_BLOCKS(sb)   (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
-
-/* Define an arbitrary limit for the amount of data we will anticipate
- * writing to any given transaction.  For unbounded transactions such as
- * write(2) and truncate(2) we can write more than this, but we always
- * start off at the maximum transaction size and grow the transaction
- * optimistically as we go. */
-
-#define EXT3_MAX_TRANS_DATA		64U
-
-/* We break up a large truncate or write transaction once the handle's
- * buffer credits gets this low, we need either to extend the
- * transaction or to start a new one.  Reserve enough space here for
- * inode, bitmap, superblock, group and indirection updates for at least
- * one block, plus two quota updates.  Quota allocations are not
- * needed. */
-
-#define EXT3_RESERVE_TRANS_BLOCKS	12U
-
-#define EXT3_INDEX_EXTRA_TRANS_BLOCKS	8
-
-#ifdef CONFIG_QUOTA
-/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
-/* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
-#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
-#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
-#else
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
-#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
-#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
-#endif
-#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
-
-int
-ext3_mark_iloc_dirty(handle_t *handle,
-		     struct inode *inode,
-		     struct ext3_iloc *iloc);
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-			struct ext3_iloc *iloc);
-
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
-
-/*
- * Wrapper functions with which ext3 calls into JBD.  The intent here is
- * to allow these to be turned into appropriate stubs so ext3 can control
- * ext2 filesystems, so ext2+ext3 systems only nee one fs.  This work hasn't
- * been done yet.
- */
-
-static inline void ext3_journal_release_buffer(handle_t *handle,
-						struct buffer_head *bh)
-{
-	journal_release_buffer(handle, bh);
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-				unsigned long blocknr, struct buffer_head *bh);
-
-int __ext3_journal_get_create_access(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-int __ext3_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-#define ext3_journal_get_undo_access(handle, bh) \
-	__ext3_journal_get_undo_access(__func__, (handle), (bh))
-#define ext3_journal_get_write_access(handle, bh) \
-	__ext3_journal_get_write_access(__func__, (handle), (bh))
-#define ext3_journal_revoke(handle, blocknr, bh) \
-	__ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
-#define ext3_journal_get_create_access(handle, bh) \
-	__ext3_journal_get_create_access(__func__, (handle), (bh))
-#define ext3_journal_dirty_metadata(handle, bh) \
-	__ext3_journal_dirty_metadata(__func__, (handle), (bh))
-#define ext3_journal_forget(handle, bh) \
-	__ext3_journal_forget(__func__, (handle), (bh))
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
-
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
-int __ext3_journal_stop(const char *where, handle_t *handle);
-
-static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
-{
-	return ext3_journal_start_sb(inode->i_sb, nblocks);
-}
-
-#define ext3_journal_stop(handle) \
-	__ext3_journal_stop(__func__, (handle))
-
-static inline handle_t *ext3_journal_current_handle(void)
-{
-	return journal_current_handle();
-}
-
-static inline int ext3_journal_extend(handle_t *handle, int nblocks)
-{
-	return journal_extend(handle, nblocks);
-}
-
-static inline int ext3_journal_restart(handle_t *handle, int nblocks)
-{
-	return journal_restart(handle, nblocks);
-}
-
-static inline int ext3_journal_blocks_per_page(struct inode *inode)
-{
-	return journal_blocks_per_page(inode);
-}
-
-static inline int ext3_journal_force_commit(journal_t *journal)
-{
-	return journal_force_commit(journal);
-}
-
-/* super.c */
-int ext3_force_commit(struct super_block *sb);
-
-static inline int ext3_should_journal_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 1;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
-		return 1;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_order_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_writeback_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
-		return 1;
-	return 0;
-}
-
-#include <trace/events/ext3.h>
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
deleted file mode 100644
index 785a326..0000000
--- a/fs/ext3/ext3_jbd.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Interface between ext3 and JBD
- */
-
-#include "ext3.h"
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_undo_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_write_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_forget(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-				unsigned long blocknr, struct buffer_head *bh)
-{
-	int err = journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_get_create_access(const char *where,
-				handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_get_create_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
deleted file mode 100644
index 3b8f650..0000000
--- a/fs/ext3/file.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *  linux/fs/ext3/file.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/file.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 fs regular file handling primitives
- *
- *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *	(jj@sunsite.ms.mff.cuni.cz)
- */
-
-#include <linux/quotaops.h>
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * Called when an inode is released. Note that this is different
- * from ext3_file_open: open gets called at every open, but release
- * gets called only when /all/ the files are closed.
- */
-static int ext3_release_file (struct inode * inode, struct file * filp)
-{
-	if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) {
-		filemap_flush(inode->i_mapping);
-		ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
-	}
-	/* if we are the last writer on the inode, drop the block reservation */
-	if ((filp->f_mode & FMODE_WRITE) &&
-			(atomic_read(&inode->i_writecount) == 1))
-	{
-		mutex_lock(&EXT3_I(inode)->truncate_mutex);
-		ext3_discard_reservation(inode);
-		mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-	}
-	if (is_dx(inode) && filp->private_data)
-		ext3_htree_free_dir_info(filp->private_data);
-
-	return 0;
-}
-
-const struct file_operations ext3_file_operations = {
-	.llseek		= generic_file_llseek,
-	.read_iter	= generic_file_read_iter,
-	.write_iter	= generic_file_write_iter,
-	.unlocked_ioctl	= ext3_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= ext3_compat_ioctl,
-#endif
-	.mmap		= generic_file_mmap,
-	.open		= dquot_file_open,
-	.release	= ext3_release_file,
-	.fsync		= ext3_sync_file,
-	.splice_read	= generic_file_splice_read,
-	.splice_write	= iter_file_splice_write,
-};
-
-const struct inode_operations ext3_file_inode_operations = {
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-	.fiemap		= ext3_fiemap,
-};
-
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
deleted file mode 100644
index 1cb9c7e..0000000
--- a/fs/ext3/fsync.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *  linux/fs/ext3/fsync.c
- *
- *  Copyright (C) 1993  Stephen Tweedie (sct@redhat.com)
- *  from
- *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
- *                      Laboratoire MASI - Institut Blaise Pascal
- *                      Universite Pierre et Marie Curie (Paris VI)
- *  from
- *  linux/fs/minix/truncate.c   Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3fs fsync primitive
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *
- *  Removed unnecessary code duplication for little endian machines
- *  and excessive __inline__s.
- *        Andi Kleen, 1997
- *
- * Major simplications and cleanup - we only need to do the metadata, because
- * we can depend on generic_block_fdatasync() to sync the data blocks.
- */
-
-#include <linux/blkdev.h>
-#include <linux/writeback.h>
-#include "ext3.h"
-
-/*
- * akpm: A new design for ext3_sync_file().
- *
- * This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
- * There cannot be a transaction open by this task.
- * Another task could have dirtied this inode.  Its data can be in any
- * state in the journalling system.
- *
- * What we do is just kick off a commit and wait on it.  This will snapshot the
- * inode to disk.
- */
-
-int ext3_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
-{
-	struct inode *inode = file->f_mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
-	int ret, needs_barrier = 0;
-	tid_t commit_tid;
-
-	trace_ext3_sync_file_enter(file, datasync);
-
-	if (inode->i_sb->s_flags & MS_RDONLY) {
-		/* Make sure that we read updated state */
-		smp_rmb();
-		if (EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)
-			return -EROFS;
-		return 0;
-	}
-	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-	if (ret)
-		goto out;
-
-	J_ASSERT(ext3_journal_current_handle() == NULL);
-
-	/*
-	 * data=writeback,ordered:
-	 *  The caller's filemap_fdatawrite()/wait will sync the data.
-	 *  Metadata is in the journal, we wait for a proper transaction
-	 *  to commit here.
-	 *
-	 * data=journal:
-	 *  filemap_fdatawrite won't do anything (the buffers are clean).
-	 *  ext3_force_commit will write the file data into the journal and
-	 *  will wait on that.
-	 *  filemap_fdatawait() will encounter a ton of newly-dirtied pages
-	 *  (they were dirtied by commit).  But that's OK - the blocks are
-	 *  safe in-journal, which is all fsync() needs to ensure.
-	 */
-	if (ext3_should_journal_data(inode)) {
-		ret = ext3_force_commit(inode->i_sb);
-		goto out;
-	}
-
-	if (datasync)
-		commit_tid = atomic_read(&ei->i_datasync_tid);
-	else
-		commit_tid = atomic_read(&ei->i_sync_tid);
-
-	if (test_opt(inode->i_sb, BARRIER) &&
-	    !journal_trans_will_send_data_barrier(journal, commit_tid))
-		needs_barrier = 1;
-	log_start_commit(journal, commit_tid);
-	ret = log_wait_commit(journal, commit_tid);
-
-	/*
-	 * In case we didn't commit a transaction, we have to flush
-	 * disk caches manually so that data really is on persistent
-	 * storage
-	 */
-	if (needs_barrier) {
-		int err;
-
-		err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
-		if (!ret)
-			ret = err;
-	}
-out:
-	trace_ext3_sync_file_exit(inode, ret);
-	return ret;
-}
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
deleted file mode 100644
index ede315c..0000000
--- a/fs/ext3/hash.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- *  linux/fs/ext3/hash.c
- *
- * Copyright (C) 2002 by Theodore Ts'o
- *
- * This file is released under the GPL v2.
- *
- * This file may be redistributed under the terms of the GNU Public
- * License.
- */
-
-#include "ext3.h"
-#include <linux/cryptohash.h>
-
-#define DELTA 0x9E3779B9
-
-static void TEA_transform(__u32 buf[4], __u32 const in[])
-{
-	__u32	sum = 0;
-	__u32	b0 = buf[0], b1 = buf[1];
-	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
-	int	n = 16;
-
-	do {
-		sum += DELTA;
-		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
-		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
-	} while(--n);
-
-	buf[0] += b0;
-	buf[1] += b1;
-}
-
-
-/* The old legacy hash */
-static __u32 dx_hack_hash_unsigned(const char *name, int len)
-{
-	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	const unsigned char *ucp = (const unsigned char *) name;
-
-	while (len--) {
-		hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
-
-		if (hash & 0x80000000)
-			hash -= 0x7fffffff;
-		hash1 = hash0;
-		hash0 = hash;
-	}
-	return hash0 << 1;
-}
-
-static __u32 dx_hack_hash_signed(const char *name, int len)
-{
-	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	const signed char *scp = (const signed char *) name;
-
-	while (len--) {
-		hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
-
-		if (hash & 0x80000000)
-			hash -= 0x7fffffff;
-		hash1 = hash0;
-		hash0 = hash;
-	}
-	return hash0 << 1;
-}
-
-static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
-{
-	__u32	pad, val;
-	int	i;
-	const signed char *scp = (const signed char *) msg;
-
-	pad = (__u32)len | ((__u32)len << 8);
-	pad |= pad << 16;
-
-	val = pad;
-	if (len > num*4)
-		len = num * 4;
-	for (i = 0; i < len; i++) {
-		if ((i % 4) == 0)
-			val = pad;
-		val = ((int) scp[i]) + (val << 8);
-		if ((i % 4) == 3) {
-			*buf++ = val;
-			val = pad;
-			num--;
-		}
-	}
-	if (--num >= 0)
-		*buf++ = val;
-	while (--num >= 0)
-		*buf++ = pad;
-}
-
-static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
-{
-	__u32	pad, val;
-	int	i;
-	const unsigned char *ucp = (const unsigned char *) msg;
-
-	pad = (__u32)len | ((__u32)len << 8);
-	pad |= pad << 16;
-
-	val = pad;
-	if (len > num*4)
-		len = num * 4;
-	for (i=0; i < len; i++) {
-		if ((i % 4) == 0)
-			val = pad;
-		val = ((int) ucp[i]) + (val << 8);
-		if ((i % 4) == 3) {
-			*buf++ = val;
-			val = pad;
-			num--;
-		}
-	}
-	if (--num >= 0)
-		*buf++ = val;
-	while (--num >= 0)
-		*buf++ = pad;
-}
-
-/*
- * Returns the hash of a filename.  If len is 0 and name is NULL, then
- * this function can be used to test whether or not a hash version is
- * supported.
- *
- * The seed is an 4 longword (32 bits) "secret" which can be used to
- * uniquify a hash.  If the seed is all zero's, then some default seed
- * may be used.
- *
- * A particular hash version specifies whether or not the seed is
- * represented, and whether or not the returned hash is 32 bits or 64
- * bits.  32 bit hashes will return 0 for the minor hash.
- */
-int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
-{
-	__u32	hash;
-	__u32	minor_hash = 0;
-	const char	*p;
-	int		i;
-	__u32		in[8], buf[4];
-	void		(*str2hashbuf)(const char *, int, __u32 *, int) =
-				str2hashbuf_signed;
-
-	/* Initialize the default seed for the hash checksum functions */
-	buf[0] = 0x67452301;
-	buf[1] = 0xefcdab89;
-	buf[2] = 0x98badcfe;
-	buf[3] = 0x10325476;
-
-	/* Check to see if the seed is all zero's */
-	if (hinfo->seed) {
-		for (i=0; i < 4; i++) {
-			if (hinfo->seed[i])
-				break;
-		}
-		if (i < 4)
-			memcpy(buf, hinfo->seed, sizeof(buf));
-	}
-
-	switch (hinfo->hash_version) {
-	case DX_HASH_LEGACY_UNSIGNED:
-		hash = dx_hack_hash_unsigned(name, len);
-		break;
-	case DX_HASH_LEGACY:
-		hash = dx_hack_hash_signed(name, len);
-		break;
-	case DX_HASH_HALF_MD4_UNSIGNED:
-		str2hashbuf = str2hashbuf_unsigned;
-	case DX_HASH_HALF_MD4:
-		p = name;
-		while (len > 0) {
-			(*str2hashbuf)(p, len, in, 8);
-			half_md4_transform(buf, in);
-			len -= 32;
-			p += 32;
-		}
-		minor_hash = buf[2];
-		hash = buf[1];
-		break;
-	case DX_HASH_TEA_UNSIGNED:
-		str2hashbuf = str2hashbuf_unsigned;
-	case DX_HASH_TEA:
-		p = name;
-		while (len > 0) {
-			(*str2hashbuf)(p, len, in, 4);
-			TEA_transform(buf, in);
-			len -= 16;
-			p += 16;
-		}
-		hash = buf[0];
-		minor_hash = buf[1];
-		break;
-	default:
-		hinfo->hash = 0;
-		return -1;
-	}
-	hash = hash & ~1;
-	if (hash == (EXT3_HTREE_EOF_32BIT << 1))
-		hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
-	hinfo->hash = hash;
-	hinfo->minor_hash = minor_hash;
-	return 0;
-}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
deleted file mode 100644
index 3ad242e..0000000
--- a/fs/ext3/ialloc.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
- *  linux/fs/ext3/ialloc.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  BSD ufs-inspired inode and directory allocation by
- *  Stephen Tweedie (sct@redhat.com), 1993
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/quotaops.h>
-#include <linux/random.h>
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * ialloc.c contains the inodes allocation and deallocation routines
- */
-
-/*
- * The free inodes are managed by bitmaps.  A file system contains several
- * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
- * block for inodes, N blocks for the inode table and data blocks.
- *
- * The file system contains group descriptors which are located after the
- * super block.  Each descriptor contains the number of the bitmap block and
- * the free blocks count in the block.
- */
-
-
-/*
- * Read the inode allocation bitmap for a given block_group, reading
- * into the specified slot in the superblock's bitmap cache.
- *
- * Return buffer_head of bitmap on success or NULL.
- */
-static struct buffer_head *
-read_inode_bitmap(struct super_block * sb, unsigned long block_group)
-{
-	struct ext3_group_desc *desc;
-	struct buffer_head *bh = NULL;
-
-	desc = ext3_get_group_desc(sb, block_group, NULL);
-	if (!desc)
-		goto error_out;
-
-	bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
-	if (!bh)
-		ext3_error(sb, "read_inode_bitmap",
-			    "Cannot read inode bitmap - "
-			    "block_group = %lu, inode_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_inode_bitmap));
-error_out:
-	return bh;
-}
-
-/*
- * NOTE! When we get the inode, we're the only people
- * that have access to it, and as such there are no
- * race conditions we have to worry about. The inode
- * is not on the hash-lists, and it cannot be reached
- * through the filesystem because the directory entry
- * has been deleted earlier.
- *
- * HOWEVER: we must make sure that we get no aliases,
- * which means that we have to call "clear_inode()"
- * _before_ we mark the inode not in use in the inode
- * bitmaps. Otherwise a newly created file might use
- * the same inode number (not actually the same pointer
- * though), and then we'd have two inodes sharing the
- * same inode number and space on the harddisk.
- */
-void ext3_free_inode (handle_t *handle, struct inode * inode)
-{
-	struct super_block * sb = inode->i_sb;
-	int is_directory;
-	unsigned long ino;
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *bh2;
-	unsigned long block_group;
-	unsigned long bit;
-	struct ext3_group_desc * gdp;
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi;
-	int fatal = 0, err;
-
-	if (atomic_read(&inode->i_count) > 1) {
-		printk ("ext3_free_inode: inode has count=%d\n",
-					atomic_read(&inode->i_count));
-		return;
-	}
-	if (inode->i_nlink) {
-		printk ("ext3_free_inode: inode has nlink=%d\n",
-			inode->i_nlink);
-		return;
-	}
-	if (!sb) {
-		printk("ext3_free_inode: inode on nonexistent device\n");
-		return;
-	}
-	sbi = EXT3_SB(sb);
-
-	ino = inode->i_ino;
-	ext3_debug ("freeing inode %lu\n", ino);
-	trace_ext3_free_inode(inode);
-
-	is_directory = S_ISDIR(inode->i_mode);
-
-	es = EXT3_SB(sb)->s_es;
-	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
-		ext3_error (sb, "ext3_free_inode",
-			    "reserved or nonexistent inode %lu", ino);
-		goto error_return;
-	}
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
-	bitmap_bh = read_inode_bitmap(sb, block_group);
-	if (!bitmap_bh)
-		goto error_return;
-
-	BUFFER_TRACE(bitmap_bh, "get_write_access");
-	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
-	if (fatal)
-		goto error_return;
-
-	/* Ok, now we can actually update the inode bitmaps.. */
-	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-					bit, bitmap_bh->b_data))
-		ext3_error (sb, "ext3_free_inode",
-			      "bit already cleared for inode %lu", ino);
-	else {
-		gdp = ext3_get_group_desc (sb, block_group, &bh2);
-
-		BUFFER_TRACE(bh2, "get_write_access");
-		fatal = ext3_journal_get_write_access(handle, bh2);
-		if (fatal) goto error_return;
-
-		if (gdp) {
-			spin_lock(sb_bgl_lock(sbi, block_group));
-			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
-			if (is_directory)
-				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
-			spin_unlock(sb_bgl_lock(sbi, block_group));
-			percpu_counter_inc(&sbi->s_freeinodes_counter);
-			if (is_directory)
-				percpu_counter_dec(&sbi->s_dirs_counter);
-
-		}
-		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, bh2);
-		if (!fatal) fatal = err;
-	}
-	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-	if (!fatal)
-		fatal = err;
-
-error_return:
-	brelse(bitmap_bh);
-	ext3_std_error(sb, fatal);
-}
-
-/*
- * Orlov's allocator for directories.
- *
- * We always try to spread first-level directories.
- *
- * If there are blockgroups with both free inodes and free blocks counts
- * not worse than average we return one with smallest directory count.
- * Otherwise we simply return a random group.
- *
- * For the rest rules look so:
- *
- * It's OK to put directory into a group unless
- * it has too many directories already (max_dirs) or
- * it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks).
- * Parent's group is preferred, if it doesn't satisfy these
- * conditions we search cyclically through the rest. If none
- * of the groups look good we just look for a group with more
- * free inodes than average (starting at parent's group).
- *
- * Debt is incremented each time we allocate a directory and decremented
- * when we allocate an inode, within 0--255.
- */
-
-static int find_group_orlov(struct super_block *sb, struct inode *parent)
-{
-	int parent_group = EXT3_I(parent)->i_block_group;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int ngroups = sbi->s_groups_count;
-	int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
-	unsigned int freei, avefreei;
-	ext3_fsblk_t freeb, avefreeb;
-	unsigned int ndirs;
-	int max_dirs, min_inodes;
-	ext3_grpblk_t min_blocks;
-	int group = -1, i;
-	struct ext3_group_desc *desc;
-
-	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
-	avefreei = freei / ngroups;
-	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-	avefreeb = freeb / ngroups;
-	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
-
-	if ((parent == d_inode(sb->s_root)) ||
-	    (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) {
-		int best_ndir = inodes_per_group;
-		int best_group = -1;
-
-		group = prandom_u32();
-		parent_group = (unsigned)group % ngroups;
-		for (i = 0; i < ngroups; i++) {
-			group = (parent_group + i) % ngroups;
-			desc = ext3_get_group_desc (sb, group, NULL);
-			if (!desc || !desc->bg_free_inodes_count)
-				continue;
-			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
-				continue;
-			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
-				continue;
-			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
-				continue;
-			best_group = group;
-			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
-		}
-		if (best_group >= 0)
-			return best_group;
-		goto fallback;
-	}
-
-	max_dirs = ndirs / ngroups + inodes_per_group / 16;
-	min_inodes = avefreei - inodes_per_group / 4;
-	min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
-
-	for (i = 0; i < ngroups; i++) {
-		group = (parent_group + i) % ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
-			continue;
-		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
-			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
-			continue;
-		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
-			continue;
-		return group;
-	}
-
-fallback:
-	for (i = 0; i < ngroups; i++) {
-		group = (parent_group + i) % ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
-			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
-			return group;
-	}
-
-	if (avefreei) {
-		/*
-		 * The free-inodes counter is approximate, and for really small
-		 * filesystems the above test can fail to find any blockgroups
-		 */
-		avefreei = 0;
-		goto fallback;
-	}
-
-	return -1;
-}
-
-static int find_group_other(struct super_block *sb, struct inode *parent)
-{
-	int parent_group = EXT3_I(parent)->i_block_group;
-	int ngroups = EXT3_SB(sb)->s_groups_count;
-	struct ext3_group_desc *desc;
-	int group, i;
-
-	/*
-	 * Try to place the inode in its parent directory
-	 */
-	group = parent_group;
-	desc = ext3_get_group_desc (sb, group, NULL);
-	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-			le16_to_cpu(desc->bg_free_blocks_count))
-		return group;
-
-	/*
-	 * We're going to place this inode in a different blockgroup from its
-	 * parent.  We want to cause files in a common directory to all land in
-	 * the same blockgroup.  But we want files which are in a different
-	 * directory which shares a blockgroup with our parent to land in a
-	 * different blockgroup.
-	 *
-	 * So add our directory's i_ino into the starting point for the hash.
-	 */
-	group = (group + parent->i_ino) % ngroups;
-
-	/*
-	 * Use a quadratic hash to find a group with a free inode and some free
-	 * blocks.
-	 */
-	for (i = 1; i < ngroups; i <<= 1) {
-		group += i;
-		if (group >= ngroups)
-			group -= ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-				le16_to_cpu(desc->bg_free_blocks_count))
-			return group;
-	}
-
-	/*
-	 * That failed: try linear search for a free inode, even if that group
-	 * has no free blocks.
-	 */
-	group = parent_group;
-	for (i = 0; i < ngroups; i++) {
-		if (++group >= ngroups)
-			group = 0;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
-			return group;
-	}
-
-	return -1;
-}
-
-/*
- * There are two policies for allocating an inode.  If the new inode is
- * a directory, then a forward search is made for a block group with both
- * free space and a low directory-to-inode ratio; if that fails, then of
- * the groups with above-average free space, that group with the fewest
- * directories already is chosen.
- *
- * For other inodes, search forward from the parent directory's block
- * group to find a free inode.
- */
-struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
-			     const struct qstr *qstr, umode_t mode)
-{
-	struct super_block *sb;
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *bh2;
-	int group;
-	unsigned long ino = 0;
-	struct inode * inode;
-	struct ext3_group_desc * gdp = NULL;
-	struct ext3_super_block * es;
-	struct ext3_inode_info *ei;
-	struct ext3_sb_info *sbi;
-	int err = 0;
-	struct inode *ret;
-	int i;
-
-	/* Cannot create files in a deleted directory */
-	if (!dir || !dir->i_nlink)
-		return ERR_PTR(-EPERM);
-
-	sb = dir->i_sb;
-	trace_ext3_request_inode(dir, mode);
-	inode = new_inode(sb);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-	ei = EXT3_I(inode);
-
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	if (S_ISDIR(mode))
-		group = find_group_orlov(sb, dir);
-	else
-		group = find_group_other(sb, dir);
-
-	err = -ENOSPC;
-	if (group == -1)
-		goto out;
-
-	for (i = 0; i < sbi->s_groups_count; i++) {
-		err = -EIO;
-
-		gdp = ext3_get_group_desc(sb, group, &bh2);
-		if (!gdp)
-			goto fail;
-
-		brelse(bitmap_bh);
-		bitmap_bh = read_inode_bitmap(sb, group);
-		if (!bitmap_bh)
-			goto fail;
-
-		ino = 0;
-
-repeat_in_this_group:
-		ino = ext3_find_next_zero_bit((unsigned long *)
-				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
-		if (ino < EXT3_INODES_PER_GROUP(sb)) {
-
-			BUFFER_TRACE(bitmap_bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, bitmap_bh);
-			if (err)
-				goto fail;
-
-			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
-						ino, bitmap_bh->b_data)) {
-				/* we won it */
-				BUFFER_TRACE(bitmap_bh,
-					"call ext3_journal_dirty_metadata");
-				err = ext3_journal_dirty_metadata(handle,
-								bitmap_bh);
-				if (err)
-					goto fail;
-				goto got;
-			}
-			/* we lost it */
-			journal_release_buffer(handle, bitmap_bh);
-
-			if (++ino < EXT3_INODES_PER_GROUP(sb))
-				goto repeat_in_this_group;
-		}
-
-		/*
-		 * This case is possible in concurrent environment.  It is very
-		 * rare.  We cannot repeat the find_group_xxx() call because
-		 * that will simply return the same blockgroup, because the
-		 * group descriptor metadata has not yet been updated.
-		 * So we just go onto the next blockgroup.
-		 */
-		if (++group == sbi->s_groups_count)
-			group = 0;
-	}
-	err = -ENOSPC;
-	goto out;
-
-got:
-	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
-	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
-		ext3_error (sb, "ext3_new_inode",
-			    "reserved inode or inode > inodes count - "
-			    "block_group = %d, inode=%lu", group, ino);
-		err = -EIO;
-		goto fail;
-	}
-
-	BUFFER_TRACE(bh2, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh2);
-	if (err) goto fail;
-	spin_lock(sb_bgl_lock(sbi, group));
-	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
-	if (S_ISDIR(mode)) {
-		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
-	}
-	spin_unlock(sb_bgl_lock(sbi, group));
-	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bh2);
-	if (err) goto fail;
-
-	percpu_counter_dec(&sbi->s_freeinodes_counter);
-	if (S_ISDIR(mode))
-		percpu_counter_inc(&sbi->s_dirs_counter);
-
-
-	if (test_opt(sb, GRPID)) {
-		inode->i_mode = mode;
-		inode->i_uid = current_fsuid();
-		inode->i_gid = dir->i_gid;
-	} else
-		inode_init_owner(inode, dir, mode);
-
-	inode->i_ino = ino;
-	/* This is the optimal IO size (for stat), not the fs block size */
-	inode->i_blocks = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-
-	memset(ei->i_data, 0, sizeof(ei->i_data));
-	ei->i_dir_start_lookup = 0;
-	ei->i_disksize = 0;
-
-	ei->i_flags =
-		ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
-#ifdef EXT3_FRAGMENTS
-	ei->i_faddr = 0;
-	ei->i_frag_no = 0;
-	ei->i_frag_size = 0;
-#endif
-	ei->i_file_acl = 0;
-	ei->i_dir_acl = 0;
-	ei->i_dtime = 0;
-	ei->i_block_alloc_info = NULL;
-	ei->i_block_group = group;
-
-	ext3_set_inode_flags(inode);
-	if (IS_DIRSYNC(inode))
-		handle->h_sync = 1;
-	if (insert_inode_locked(inode) < 0) {
-		/*
-		 * Likely a bitmap corruption causing inode to be allocated
-		 * twice.
-		 */
-		err = -EIO;
-		goto fail;
-	}
-	spin_lock(&sbi->s_next_gen_lock);
-	inode->i_generation = sbi->s_next_generation++;
-	spin_unlock(&sbi->s_next_gen_lock);
-
-	ei->i_state_flags = 0;
-	ext3_set_inode_state(inode, EXT3_STATE_NEW);
-
-	/* See comment in ext3_iget for explanation */
-	if (ino >= EXT3_FIRST_INO(sb) + 1 &&
-	    EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
-		ei->i_extra_isize =
-			sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
-	} else {
-		ei->i_extra_isize = 0;
-	}
-
-	ret = inode;
-	dquot_initialize(inode);
-	err = dquot_alloc_inode(inode);
-	if (err)
-		goto fail_drop;
-
-	err = ext3_init_acl(handle, inode, dir);
-	if (err)
-		goto fail_free_drop;
-
-	err = ext3_init_security(handle, inode, dir, qstr);
-	if (err)
-		goto fail_free_drop;
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	if (err) {
-		ext3_std_error(sb, err);
-		goto fail_free_drop;
-	}
-
-	ext3_debug("allocating inode %lu\n", inode->i_ino);
-	trace_ext3_allocate_inode(inode, dir, mode);
-	goto really_out;
-fail:
-	ext3_std_error(sb, err);
-out:
-	iput(inode);
-	ret = ERR_PTR(err);
-really_out:
-	brelse(bitmap_bh);
-	return ret;
-
-fail_free_drop:
-	dquot_free_inode(inode);
-
-fail_drop:
-	dquot_drop(inode);
-	inode->i_flags |= S_NOQUOTA;
-	clear_nlink(inode);
-	unlock_new_inode(inode);
-	iput(inode);
-	brelse(bitmap_bh);
-	return ERR_PTR(err);
-}
-
-/* Verify that we are loading a valid orphan from disk */
-struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
-{
-	unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
-	unsigned long block_group;
-	int bit;
-	struct buffer_head *bitmap_bh;
-	struct inode *inode = NULL;
-	long err = -EIO;
-
-	/* Error cases - e2fsck has already cleaned up for us */
-	if (ino > max_ino) {
-		ext3_warning(sb, __func__,
-			     "bad orphan ino %lu!  e2fsck was run?", ino);
-		goto error;
-	}
-
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
-	bitmap_bh = read_inode_bitmap(sb, block_group);
-	if (!bitmap_bh) {
-		ext3_warning(sb, __func__,
-			     "inode bitmap error for orphan %lu", ino);
-		goto error;
-	}
-
-	/* Having the inode bit set should be a 100% indicator that this
-	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
-	 * inodes that were being truncated, so we can't check i_nlink==0.
-	 */
-	if (!ext3_test_bit(bit, bitmap_bh->b_data))
-		goto bad_orphan;
-
-	inode = ext3_iget(sb, ino);
-	if (IS_ERR(inode))
-		goto iget_failed;
-
-	/*
-	 * If the orphans has i_nlinks > 0 then it should be able to be
-	 * truncated, otherwise it won't be removed from the orphan list
-	 * during processing and an infinite loop will result.
-	 */
-	if (inode->i_nlink && !ext3_can_truncate(inode))
-		goto bad_orphan;
-
-	if (NEXT_ORPHAN(inode) > max_ino)
-		goto bad_orphan;
-	brelse(bitmap_bh);
-	return inode;
-
-iget_failed:
-	err = PTR_ERR(inode);
-	inode = NULL;
-bad_orphan:
-	ext3_warning(sb, __func__,
-		     "bad orphan inode %lu!  e2fsck was run?", ino);
-	printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
-	       bit, (unsigned long long)bitmap_bh->b_blocknr,
-	       ext3_test_bit(bit, bitmap_bh->b_data));
-	printk(KERN_NOTICE "inode=%p\n", inode);
-	if (inode) {
-		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
-		       is_bad_inode(inode));
-		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
-		       NEXT_ORPHAN(inode));
-		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
-		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
-		/* Avoid freeing blocks if we got a bad deleted inode */
-		if (inode->i_nlink == 0)
-			inode->i_blocks = 0;
-		iput(inode);
-	}
-	brelse(bitmap_bh);
-error:
-	return ERR_PTR(err);
-}
-
-unsigned long ext3_count_free_inodes (struct super_block * sb)
-{
-	unsigned long desc_count;
-	struct ext3_group_desc *gdp;
-	int i;
-#ifdef EXT3FS_DEBUG
-	struct ext3_super_block *es;
-	unsigned long bitmap_count, x;
-	struct buffer_head *bitmap_bh = NULL;
-
-	es = EXT3_SB(sb)->s_es;
-	desc_count = 0;
-	bitmap_count = 0;
-	gdp = NULL;
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
-		brelse(bitmap_bh);
-		bitmap_bh = read_inode_bitmap(sb, i);
-		if (!bitmap_bh)
-			continue;
-
-		x = ext3_count_free(bitmap_bh, EXT3_INODES_PER_GROUP(sb) / 8);
-		printk("group %d: stored = %d, counted = %lu\n",
-			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
-		bitmap_count += x;
-	}
-	brelse(bitmap_bh);
-	printk("ext3_count_free_inodes: stored = %u, computed = %lu, %lu\n",
-		le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
-	return desc_count;
-#else
-	desc_count = 0;
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
-		cond_resched();
-	}
-	return desc_count;
-#endif
-}
-
-/* Called at mount-time, super-block is locked */
-unsigned long ext3_count_dirs (struct super_block * sb)
-{
-	unsigned long count = 0;
-	int i;
-
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		struct ext3_group_desc *gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		count += le16_to_cpu(gdp->bg_used_dirs_count);
-	}
-	return count;
-}
-
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
deleted file mode 100644
index 6c7e546..0000000
--- a/fs/ext3/inode.c
+++ /dev/null
@@ -1,3574 +0,0 @@
-/*
- *  linux/fs/ext3/inode.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/inode.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Goal-directed block allocation by Stephen Tweedie
- *	(sct@redhat.com), 1993, 1998
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *	(jj@sunsite.ms.mff.cuni.cz)
- *
- *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
- */
-
-#include <linux/highuid.h>
-#include <linux/quotaops.h>
-#include <linux/writeback.h>
-#include <linux/mpage.h>
-#include <linux/namei.h>
-#include <linux/uio.h>
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-static int ext3_writepage_trans_blocks(struct inode *inode);
-static int ext3_block_truncate_page(struct inode *inode, loff_t from);
-
-/*
- * Test whether an inode is a fast symlink.
- */
-static int ext3_inode_is_fast_symlink(struct inode *inode)
-{
-	int ea_blocks = EXT3_I(inode)->i_file_acl ?
-		(inode->i_sb->s_blocksize >> 9) : 0;
-
-	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
-}
-
-/*
- * The ext3 forget function must perform a revoke if we are freeing data
- * which has been journaled.  Metadata (eg. indirect blocks) must be
- * revoked in all cases.
- *
- * "bh" may be NULL: a metadata block may have been freed from memory
- * but there may still be a record of it in the journal, and that record
- * still needs to be revoked.
- */
-int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
-			struct buffer_head *bh, ext3_fsblk_t blocknr)
-{
-	int err;
-
-	might_sleep();
-
-	trace_ext3_forget(inode, is_metadata, blocknr);
-	BUFFER_TRACE(bh, "enter");
-
-	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
-		  "data mode %lx\n",
-		  bh, is_metadata, inode->i_mode,
-		  test_opt(inode->i_sb, DATA_FLAGS));
-
-	/* Never use the revoke function if we are doing full data
-	 * journaling: there is no need to, and a V1 superblock won't
-	 * support it.  Otherwise, only skip the revoke on un-journaled
-	 * data blocks. */
-
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
-	    (!is_metadata && !ext3_should_journal_data(inode))) {
-		if (bh) {
-			BUFFER_TRACE(bh, "call journal_forget");
-			return ext3_journal_forget(handle, bh);
-		}
-		return 0;
-	}
-
-	/*
-	 * data!=journal && (is_metadata || should_journal_data(inode))
-	 */
-	BUFFER_TRACE(bh, "call ext3_journal_revoke");
-	err = ext3_journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_abort(inode->i_sb, __func__,
-			   "error %d when attempting revoke", err);
-	BUFFER_TRACE(bh, "exit");
-	return err;
-}
-
-/*
- * Work out how many blocks we need to proceed with the next chunk of a
- * truncate transaction.
- */
-static unsigned long blocks_for_truncate(struct inode *inode)
-{
-	unsigned long needed;
-
-	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
-
-	/* Give ourselves just enough room to cope with inodes in which
-	 * i_blocks is corrupt: we've seen disk corruptions in the past
-	 * which resulted in random data in an inode which looked enough
-	 * like a regular file for ext3 to try to delete it.  Things
-	 * will go a bit crazy if that happens, but at least we should
-	 * try not to panic the whole kernel. */
-	if (needed < 2)
-		needed = 2;
-
-	/* But we need to bound the transaction so we don't overflow the
-	 * journal. */
-	if (needed > EXT3_MAX_TRANS_DATA)
-		needed = EXT3_MAX_TRANS_DATA;
-
-	return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
-}
-
-/*
- * Truncate transactions can be complex and absolutely huge.  So we need to
- * be able to restart the transaction at a conventient checkpoint to make
- * sure we don't overflow the journal.
- *
- * start_transaction gets us a new handle for a truncate transaction,
- * and extend_transaction tries to extend the existing one a bit.  If
- * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
- */
-static handle_t *start_transaction(struct inode *inode)
-{
-	handle_t *result;
-
-	result = ext3_journal_start(inode, blocks_for_truncate(inode));
-	if (!IS_ERR(result))
-		return result;
-
-	ext3_std_error(inode->i_sb, PTR_ERR(result));
-	return result;
-}
-
-/*
- * Try to extend this transaction for the purposes of truncation.
- *
- * Returns 0 if we managed to create more room.  If we can't create more
- * room, and the transaction must be restarted we return 1.
- */
-static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
-{
-	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
-		return 0;
-	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
-		return 0;
-	return 1;
-}
-
-/*
- * Restart the transaction associated with *handle.  This does a commit,
- * so before we call here everything must be consistently dirtied against
- * this transaction.
- */
-static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
-{
-	int ret;
-
-	jbd_debug(2, "restarting handle %p\n", handle);
-	/*
-	 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
-	 * At this moment, get_block can be called only for blocks inside
-	 * i_size since page cache has been already dropped and writes are
-	 * blocked by i_mutex. So we can safely drop the truncate_mutex.
-	 */
-	mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-	ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
-	mutex_lock(&EXT3_I(inode)->truncate_mutex);
-	return ret;
-}
-
-/*
- * Called at inode eviction from icache
- */
-void ext3_evict_inode (struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *rsv;
-	handle_t *handle;
-	int want_delete = 0;
-
-	trace_ext3_evict_inode(inode);
-	if (!inode->i_nlink && !is_bad_inode(inode)) {
-		dquot_initialize(inode);
-		want_delete = 1;
-	}
-
-	/*
-	 * When journalling data dirty buffers are tracked only in the journal.
-	 * So although mm thinks everything is clean and ready for reaping the
-	 * inode might still have some pages to write in the running
-	 * transaction or waiting to be checkpointed. Thus calling
-	 * journal_invalidatepage() (via truncate_inode_pages()) to discard
-	 * these buffers can cause data loss. Also even if we did not discard
-	 * these buffers, we would have no way to find them after the inode
-	 * is reaped and thus user could see stale data if he tries to read
-	 * them before the transaction is checkpointed. So be careful and
-	 * force everything to disk here... We use ei->i_datasync_tid to
-	 * store the newest transaction containing inode's data.
-	 *
-	 * Note that directories do not have this problem because they don't
-	 * use page cache.
-	 *
-	 * The s_journal check handles the case when ext3_get_journal() fails
-	 * and puts the journal inode.
-	 */
-	if (inode->i_nlink && ext3_should_journal_data(inode) &&
-	    EXT3_SB(inode->i_sb)->s_journal &&
-	    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
-	    inode->i_ino != EXT3_JOURNAL_INO) {
-		tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
-		journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
-
-		log_start_commit(journal, commit_tid);
-		log_wait_commit(journal, commit_tid);
-		filemap_write_and_wait(&inode->i_data);
-	}
-	truncate_inode_pages_final(&inode->i_data);
-
-	ext3_discard_reservation(inode);
-	rsv = ei->i_block_alloc_info;
-	ei->i_block_alloc_info = NULL;
-	if (unlikely(rsv))
-		kfree(rsv);
-
-	if (!want_delete)
-		goto no_delete;
-
-	handle = start_transaction(inode);
-	if (IS_ERR(handle)) {
-		/*
-		 * If we're going to skip the normal cleanup, we still need to
-		 * make sure that the in-core orphan linked list is properly
-		 * cleaned up.
-		 */
-		ext3_orphan_del(NULL, inode);
-		goto no_delete;
-	}
-
-	if (IS_SYNC(inode))
-		handle->h_sync = 1;
-	inode->i_size = 0;
-	if (inode->i_blocks)
-		ext3_truncate(inode);
-	/*
-	 * Kill off the orphan record created when the inode lost the last
-	 * link.  Note that ext3_orphan_del() has to be able to cope with the
-	 * deletion of a non-existent orphan - ext3_truncate() could
-	 * have removed the record.
-	 */
-	ext3_orphan_del(handle, inode);
-	ei->i_dtime = get_seconds();
-
-	/*
-	 * One subtle ordering requirement: if anything has gone wrong
-	 * (transaction abort, IO errors, whatever), then we can still
-	 * do these next steps (the fs will already have been marked as
-	 * having errors), but we can't free the inode if the mark_dirty
-	 * fails.
-	 */
-	if (ext3_mark_inode_dirty(handle, inode)) {
-		/* If that failed, just dquot_drop() and be done with that */
-		dquot_drop(inode);
-		clear_inode(inode);
-	} else {
-		ext3_xattr_delete_inode(handle, inode);
-		dquot_free_inode(inode);
-		dquot_drop(inode);
-		clear_inode(inode);
-		ext3_free_inode(handle, inode);
-	}
-	ext3_journal_stop(handle);
-	return;
-no_delete:
-	clear_inode(inode);
-	dquot_drop(inode);
-}
-
-typedef struct {
-	__le32	*p;
-	__le32	key;
-	struct buffer_head *bh;
-} Indirect;
-
-static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
-{
-	p->key = *(p->p = v);
-	p->bh = bh;
-}
-
-static int verify_chain(Indirect *from, Indirect *to)
-{
-	while (from <= to && from->key == *from->p)
-		from++;
-	return (from > to);
-}
-
-/**
- *	ext3_block_to_path - parse the block number into array of offsets
- *	@inode: inode in question (we are only interested in its superblock)
- *	@i_block: block number to be parsed
- *	@offsets: array to store the offsets in
- *      @boundary: set this non-zero if the referred-to block is likely to be
- *             followed (on disk) by an indirect block.
- *
- *	To store the locations of file's data ext3 uses a data structure common
- *	for UNIX filesystems - tree of pointers anchored in the inode, with
- *	data blocks at leaves and indirect blocks in intermediate nodes.
- *	This function translates the block number into path in that tree -
- *	return value is the path length and @offsets[n] is the offset of
- *	pointer to (n+1)th node in the nth one. If @block is out of range
- *	(negative or too large) warning is printed and zero returned.
- *
- *	Note: function doesn't find node addresses, so no IO is needed. All
- *	we need to know is the capacity of indirect blocks (taken from the
- *	inode->i_sb).
- */
-
-/*
- * Portability note: the last comparison (check that we fit into triple
- * indirect block) is spelled differently, because otherwise on an
- * architecture with 32-bit longs and 8Kb pages we might get into trouble
- * if our filesystem had 8Kb blocks. We might use long long, but that would
- * kill us on x86. Oh, well, at least the sign propagation does not matter -
- * i_block would have to be negative in the very beginning, so we would not
- * get there at all.
- */
-
-static int ext3_block_to_path(struct inode *inode,
-			long i_block, int offsets[4], int *boundary)
-{
-	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
-	const long direct_blocks = EXT3_NDIR_BLOCKS,
-		indirect_blocks = ptrs,
-		double_blocks = (1 << (ptrs_bits * 2));
-	int n = 0;
-	int final = 0;
-
-	if (i_block < 0) {
-		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
-	} else if (i_block < direct_blocks) {
-		offsets[n++] = i_block;
-		final = direct_blocks;
-	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
-		offsets[n++] = EXT3_IND_BLOCK;
-		offsets[n++] = i_block;
-		final = ptrs;
-	} else if ((i_block -= indirect_blocks) < double_blocks) {
-		offsets[n++] = EXT3_DIND_BLOCK;
-		offsets[n++] = i_block >> ptrs_bits;
-		offsets[n++] = i_block & (ptrs - 1);
-		final = ptrs;
-	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
-		offsets[n++] = EXT3_TIND_BLOCK;
-		offsets[n++] = i_block >> (ptrs_bits * 2);
-		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
-		offsets[n++] = i_block & (ptrs - 1);
-		final = ptrs;
-	} else {
-		ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
-	}
-	if (boundary)
-		*boundary = final - 1 - (i_block & (ptrs - 1));
-	return n;
-}
-
-/**
- *	ext3_get_branch - read the chain of indirect blocks leading to data
- *	@inode: inode in question
- *	@depth: depth of the chain (1 - direct pointer, etc.)
- *	@offsets: offsets of pointers in inode/indirect blocks
- *	@chain: place to store the result
- *	@err: here we store the error value
- *
- *	Function fills the array of triples <key, p, bh> and returns %NULL
- *	if everything went OK or the pointer to the last filled triple
- *	(incomplete one) otherwise. Upon the return chain[i].key contains
- *	the number of (i+1)-th block in the chain (as it is stored in memory,
- *	i.e. little-endian 32-bit), chain[i].p contains the address of that
- *	number (it points into struct inode for i==0 and into the bh->b_data
- *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
- *	block for i>0 and NULL for i==0. In other words, it holds the block
- *	numbers of the chain, addresses they were taken from (and where we can
- *	verify that chain did not change) and buffer_heads hosting these
- *	numbers.
- *
- *	Function stops when it stumbles upon zero pointer (absent block)
- *		(pointer to last triple returned, *@err == 0)
- *	or when it gets an IO error reading an indirect block
- *		(ditto, *@err == -EIO)
- *	or when it notices that chain had been changed while it was reading
- *		(ditto, *@err == -EAGAIN)
- *	or when it reads all @depth-1 indirect blocks successfully and finds
- *	the whole chain, all way to the data (returns %NULL, *err == 0).
- */
-static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
-				 Indirect chain[4], int *err)
-{
-	struct super_block *sb = inode->i_sb;
-	Indirect *p = chain;
-	struct buffer_head *bh;
-
-	*err = 0;
-	/* i_data is not going away, no lock needed */
-	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
-	if (!p->key)
-		goto no_block;
-	while (--depth) {
-		bh = sb_bread(sb, le32_to_cpu(p->key));
-		if (!bh)
-			goto failure;
-		/* Reader: pointers */
-		if (!verify_chain(chain, p))
-			goto changed;
-		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
-		/* Reader: end */
-		if (!p->key)
-			goto no_block;
-	}
-	return NULL;
-
-changed:
-	brelse(bh);
-	*err = -EAGAIN;
-	goto no_block;
-failure:
-	*err = -EIO;
-no_block:
-	return p;
-}
-
-/**
- *	ext3_find_near - find a place for allocation with sufficient locality
- *	@inode: owner
- *	@ind: descriptor of indirect block.
- *
- *	This function returns the preferred place for block allocation.
- *	It is used when heuristic for sequential allocation fails.
- *	Rules are:
- *	  + if there is a block to the left of our position - allocate near it.
- *	  + if pointer will live in indirect block - allocate near that block.
- *	  + if pointer will live in inode - allocate in the same
- *	    cylinder group.
- *
- * In the latter case we colour the starting block by the callers PID to
- * prevent it from clashing with concurrent allocations for a different inode
- * in the same block group.   The PID is used here so that functionally related
- * files will be close-by on-disk.
- *
- *	Caller must make sure that @ind is valid and will stay that way.
- */
-static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
-	__le32 *p;
-	ext3_fsblk_t bg_start;
-	ext3_grpblk_t colour;
-
-	/* Try to find previous block */
-	for (p = ind->p - 1; p >= start; p--) {
-		if (*p)
-			return le32_to_cpu(*p);
-	}
-
-	/* No such thing, so let's try location of indirect block */
-	if (ind->bh)
-		return ind->bh->b_blocknr;
-
-	/*
-	 * It is going to be referred to from the inode itself? OK, just put it
-	 * into the same cylinder group then.
-	 */
-	bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
-	colour = (current->pid % 16) *
-			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-	return bg_start + colour;
-}
-
-/**
- *	ext3_find_goal - find a preferred place for allocation.
- *	@inode: owner
- *	@block:  block we want
- *	@partial: pointer to the last triple within a chain
- *
- *	Normally this function find the preferred place for block allocation,
- *	returns it.
- */
-
-static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
-				   Indirect *partial)
-{
-	struct ext3_block_alloc_info *block_i;
-
-	block_i =  EXT3_I(inode)->i_block_alloc_info;
-
-	/*
-	 * try the heuristic for sequential allocation,
-	 * failing that at least try to get decent locality.
-	 */
-	if (block_i && (block == block_i->last_alloc_logical_block + 1)
-		&& (block_i->last_alloc_physical_block != 0)) {
-		return block_i->last_alloc_physical_block + 1;
-	}
-
-	return ext3_find_near(inode, partial);
-}
-
-/**
- *	ext3_blks_to_allocate - Look up the block map and count the number
- *	of direct blocks need to be allocated for the given branch.
- *
- *	@branch: chain of indirect blocks
- *	@k: number of blocks need for indirect blocks
- *	@blks: number of data blocks to be mapped.
- *	@blocks_to_boundary:  the offset in the indirect block
- *
- *	return the total number of blocks to be allocate, including the
- *	direct and indirect blocks.
- */
-static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
-		int blocks_to_boundary)
-{
-	unsigned long count = 0;
-
-	/*
-	 * Simple case, [t,d]Indirect block(s) has not allocated yet
-	 * then it's clear blocks on that path have not allocated
-	 */
-	if (k > 0) {
-		/* right now we don't handle cross boundary allocation */
-		if (blks < blocks_to_boundary + 1)
-			count += blks;
-		else
-			count += blocks_to_boundary + 1;
-		return count;
-	}
-
-	count++;
-	while (count < blks && count <= blocks_to_boundary &&
-		le32_to_cpu(*(branch[0].p + count)) == 0) {
-		count++;
-	}
-	return count;
-}
-
-/**
- *	ext3_alloc_blocks - multiple allocate blocks needed for a branch
- *	@handle: handle for this transaction
- *	@inode: owner
- *	@goal: preferred place for allocation
- *	@indirect_blks: the number of blocks need to allocate for indirect
- *			blocks
- *	@blks:	number of blocks need to allocated for direct blocks
- *	@new_blocks: on return it will store the new block numbers for
- *	the indirect blocks(if needed) and the first direct block,
- *	@err: here we store the error value
- *
- *	return the number of direct blocks allocated
- */
-static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int indirect_blks, int blks,
-			ext3_fsblk_t new_blocks[4], int *err)
-{
-	int target, i;
-	unsigned long count = 0;
-	int index = 0;
-	ext3_fsblk_t current_block = 0;
-	int ret = 0;
-
-	/*
-	 * Here we try to allocate the requested multiple blocks at once,
-	 * on a best-effort basis.
-	 * To build a branch, we should allocate blocks for
-	 * the indirect blocks(if not allocated yet), and at least
-	 * the first direct block of this branch.  That's the
-	 * minimum number of blocks need to allocate(required)
-	 */
-	target = blks + indirect_blks;
-
-	while (1) {
-		count = target;
-		/* allocating blocks for indirect blocks and direct blocks */
-		current_block = ext3_new_blocks(handle,inode,goal,&count,err);
-		if (*err)
-			goto failed_out;
-
-		target -= count;
-		/* allocate blocks for indirect blocks */
-		while (index < indirect_blks && count) {
-			new_blocks[index++] = current_block++;
-			count--;
-		}
-
-		if (count > 0)
-			break;
-	}
-
-	/* save the new block number for the first direct block */
-	new_blocks[index] = current_block;
-
-	/* total number of blocks allocated for direct blocks */
-	ret = count;
-	*err = 0;
-	return ret;
-failed_out:
-	for (i = 0; i <index; i++)
-		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-	return ret;
-}
-
-/**
- *	ext3_alloc_branch - allocate and set up a chain of blocks.
- *	@handle: handle for this transaction
- *	@inode: owner
- *	@indirect_blks: number of allocated indirect blocks
- *	@blks: number of allocated direct blocks
- *	@goal: preferred place for allocation
- *	@offsets: offsets (in the blocks) to store the pointers to next.
- *	@branch: place to store the chain in.
- *
- *	This function allocates blocks, zeroes out all but the last one,
- *	links them into chain and (if we are synchronous) writes them to disk.
- *	In other words, it prepares a branch that can be spliced onto the
- *	inode. It stores the information about that chain in the branch[], in
- *	the same format as ext3_get_branch() would do. We are calling it after
- *	we had read the existing part of chain and partial points to the last
- *	triple of that (one with zero ->key). Upon the exit we have the same
- *	picture as after the successful ext3_get_block(), except that in one
- *	place chain is disconnected - *branch->p is still zero (we did not
- *	set the last link), but branch->key contains the number that should
- *	be placed into *branch->p to fill that gap.
- *
- *	If allocation fails we free all blocks we've allocated (and forget
- *	their buffer_heads) and return the error value the from failed
- *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
- *	as described above and return 0.
- */
-static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
-			int indirect_blks, int *blks, ext3_fsblk_t goal,
-			int *offsets, Indirect *branch)
-{
-	int blocksize = inode->i_sb->s_blocksize;
-	int i, n = 0;
-	int err = 0;
-	struct buffer_head *bh;
-	int num;
-	ext3_fsblk_t new_blocks[4];
-	ext3_fsblk_t current_block;
-
-	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
-				*blks, new_blocks, &err);
-	if (err)
-		return err;
-
-	branch[0].key = cpu_to_le32(new_blocks[0]);
-	/*
-	 * metadata blocks and data blocks are allocated.
-	 */
-	for (n = 1; n <= indirect_blks;  n++) {
-		/*
-		 * Get buffer_head for parent block, zero it out
-		 * and set the pointer to new one, then send
-		 * parent to disk.
-		 */
-		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
-		if (unlikely(!bh)) {
-			err = -ENOMEM;
-			goto failed;
-		}
-		branch[n].bh = bh;
-		lock_buffer(bh);
-		BUFFER_TRACE(bh, "call get_create_access");
-		err = ext3_journal_get_create_access(handle, bh);
-		if (err) {
-			unlock_buffer(bh);
-			brelse(bh);
-			goto failed;
-		}
-
-		memset(bh->b_data, 0, blocksize);
-		branch[n].p = (__le32 *) bh->b_data + offsets[n];
-		branch[n].key = cpu_to_le32(new_blocks[n]);
-		*branch[n].p = branch[n].key;
-		if ( n == indirect_blks) {
-			current_block = new_blocks[n];
-			/*
-			 * End of chain, update the last new metablock of
-			 * the chain to point to the new allocated
-			 * data blocks numbers
-			 */
-			for (i=1; i < num; i++)
-				*(branch[n].p + i) = cpu_to_le32(++current_block);
-		}
-		BUFFER_TRACE(bh, "marking uptodate");
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-
-		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, bh);
-		if (err)
-			goto failed;
-	}
-	*blks = num;
-	return err;
-failed:
-	/* Allocation failed, free what we already allocated */
-	for (i = 1; i <= n ; i++) {
-		BUFFER_TRACE(branch[i].bh, "call journal_forget");
-		ext3_journal_forget(handle, branch[i].bh);
-	}
-	for (i = 0; i < indirect_blks; i++)
-		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-
-	ext3_free_blocks(handle, inode, new_blocks[i], num);
-
-	return err;
-}
-
-/**
- * ext3_splice_branch - splice the allocated branch onto inode.
- * @handle: handle for this transaction
- * @inode: owner
- * @block: (logical) number of block we are adding
- * @where: location of missing link
- * @num:   number of indirect blocks we are adding
- * @blks:  number of direct blocks we are adding
- *
- * This function fills the missing link and does all housekeeping needed in
- * inode (->i_blocks, etc.). In case of success we end up with the full
- * chain to new block and return 0.
- */
-static int ext3_splice_branch(handle_t *handle, struct inode *inode,
-			long block, Indirect *where, int num, int blks)
-{
-	int i;
-	int err = 0;
-	struct ext3_block_alloc_info *block_i;
-	ext3_fsblk_t current_block;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct timespec now;
-
-	block_i = ei->i_block_alloc_info;
-	/*
-	 * If we're splicing into a [td]indirect block (as opposed to the
-	 * inode) then we need to get write access to the [td]indirect block
-	 * before the splice.
-	 */
-	if (where->bh) {
-		BUFFER_TRACE(where->bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, where->bh);
-		if (err)
-			goto err_out;
-	}
-	/* That's it */
-
-	*where->p = where->key;
-
-	/*
-	 * Update the host buffer_head or inode to point to more just allocated
-	 * direct blocks blocks
-	 */
-	if (num == 0 && blks > 1) {
-		current_block = le32_to_cpu(where->key) + 1;
-		for (i = 1; i < blks; i++)
-			*(where->p + i ) = cpu_to_le32(current_block++);
-	}
-
-	/*
-	 * update the most recently allocated logical & physical block
-	 * in i_block_alloc_info, to assist find the proper goal block for next
-	 * allocation
-	 */
-	if (block_i) {
-		block_i->last_alloc_logical_block = block + blks - 1;
-		block_i->last_alloc_physical_block =
-				le32_to_cpu(where[num].key) + blks - 1;
-	}
-
-	/* We are done with atomic stuff, now do the rest of housekeeping */
-	now = CURRENT_TIME_SEC;
-	if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) {
-		inode->i_ctime = now;
-		ext3_mark_inode_dirty(handle, inode);
-	}
-	/* ext3_mark_inode_dirty already updated i_sync_tid */
-	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-
-	/* had we spliced it onto indirect block? */
-	if (where->bh) {
-		/*
-		 * If we spliced it onto an indirect block, we haven't
-		 * altered the inode.  Note however that if it is being spliced
-		 * onto an indirect block at the very end of the file (the
-		 * file is growing) then we *will* alter the inode to reflect
-		 * the new i_size.  But that is not done here - it is done in
-		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
-		 */
-		jbd_debug(5, "splicing indirect only\n");
-		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, where->bh);
-		if (err)
-			goto err_out;
-	} else {
-		/*
-		 * OK, we spliced it into the inode itself on a direct block.
-		 * Inode was dirtied above.
-		 */
-		jbd_debug(5, "splicing direct\n");
-	}
-	return err;
-
-err_out:
-	for (i = 1; i <= num; i++) {
-		BUFFER_TRACE(where[i].bh, "call journal_forget");
-		ext3_journal_forget(handle, where[i].bh);
-		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
-	}
-	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
-
-	return err;
-}
-
-/*
- * Allocation strategy is simple: if we have to allocate something, we will
- * have to go the whole way to leaf. So let's do it before attaching anything
- * to tree, set linkage between the newborn blocks, write them if sync is
- * required, recheck the path, free and repeat if check fails, otherwise
- * set the last missing link (that will protect us from any truncate-generated
- * removals - all blocks on the path are immune now) and possibly force the
- * write on the parent block.
- * That has a nice additional property: no special recovery from the failed
- * allocations is needed - we simply release blocks and do not touch anything
- * reachable from inode.
- *
- * `handle' can be NULL if create == 0.
- *
- * The BKL may not be held on entry here.  Be sure to take it early.
- * return > 0, # of blocks mapped or allocated.
- * return = 0, if plain lookup failed.
- * return < 0, error case.
- */
-int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
-		sector_t iblock, unsigned long maxblocks,
-		struct buffer_head *bh_result,
-		int create)
-{
-	int err = -EIO;
-	int offsets[4];
-	Indirect chain[4];
-	Indirect *partial;
-	ext3_fsblk_t goal;
-	int indirect_blks;
-	int blocks_to_boundary = 0;
-	int depth;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	int count = 0;
-	ext3_fsblk_t first_block = 0;
-
-
-	trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
-	J_ASSERT(handle != NULL || create == 0);
-	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
-
-	if (depth == 0)
-		goto out;
-
-	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
-
-	/* Simplest case - block found, no allocation needed */
-	if (!partial) {
-		first_block = le32_to_cpu(chain[depth - 1].key);
-		clear_buffer_new(bh_result);
-		count++;
-		/*map more blocks*/
-		while (count < maxblocks && count <= blocks_to_boundary) {
-			ext3_fsblk_t blk;
-
-			if (!verify_chain(chain, chain + depth - 1)) {
-				/*
-				 * Indirect block might be removed by
-				 * truncate while we were reading it.
-				 * Handling of that case: forget what we've
-				 * got now. Flag the err as EAGAIN, so it
-				 * will reread.
-				 */
-				err = -EAGAIN;
-				count = 0;
-				break;
-			}
-			blk = le32_to_cpu(*(chain[depth-1].p + count));
-
-			if (blk == first_block + count)
-				count++;
-			else
-				break;
-		}
-		if (err != -EAGAIN)
-			goto got_it;
-	}
-
-	/* Next simple case - plain lookup or failed read of indirect block */
-	if (!create || err == -EIO)
-		goto cleanup;
-
-	/*
-	 * Block out ext3_truncate while we alter the tree
-	 */
-	mutex_lock(&ei->truncate_mutex);
-
-	/*
-	 * If the indirect block is missing while we are reading
-	 * the chain(ext3_get_branch() returns -EAGAIN err), or
-	 * if the chain has been changed after we grab the semaphore,
-	 * (either because another process truncated this branch, or
-	 * another get_block allocated this branch) re-grab the chain to see if
-	 * the request block has been allocated or not.
-	 *
-	 * Since we already block the truncate/other get_block
-	 * at this point, we will have the current copy of the chain when we
-	 * splice the branch into the tree.
-	 */
-	if (err == -EAGAIN || !verify_chain(chain, partial)) {
-		while (partial > chain) {
-			brelse(partial->bh);
-			partial--;
-		}
-		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
-		if (!partial) {
-			count++;
-			mutex_unlock(&ei->truncate_mutex);
-			if (err)
-				goto cleanup;
-			clear_buffer_new(bh_result);
-			goto got_it;
-		}
-	}
-
-	/*
-	 * Okay, we need to do block allocation.  Lazily initialize the block
-	 * allocation info here if necessary
-	*/
-	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
-		ext3_init_block_alloc_info(inode);
-
-	goal = ext3_find_goal(inode, iblock, partial);
-
-	/* the number of blocks need to allocate for [d,t]indirect blocks */
-	indirect_blks = (chain + depth) - partial - 1;
-
-	/*
-	 * Next look up the indirect map to count the totoal number of
-	 * direct blocks to allocate for this branch.
-	 */
-	count = ext3_blks_to_allocate(partial, indirect_blks,
-					maxblocks, blocks_to_boundary);
-	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
-				offsets + (partial - chain), partial);
-
-	/*
-	 * The ext3_splice_branch call will free and forget any buffers
-	 * on the new chain if there is a failure, but that risks using
-	 * up transaction credits, especially for bitmaps where the
-	 * credits cannot be returned.  Can we handle this somehow?  We
-	 * may need to return -EAGAIN upwards in the worst case.  --sct
-	 */
-	if (!err)
-		err = ext3_splice_branch(handle, inode, iblock,
-					partial, indirect_blks, count);
-	mutex_unlock(&ei->truncate_mutex);
-	if (err)
-		goto cleanup;
-
-	set_buffer_new(bh_result);
-got_it:
-	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-	if (count > blocks_to_boundary)
-		set_buffer_boundary(bh_result);
-	err = count;
-	/* Clean up and exit */
-	partial = chain + depth - 1;	/* the whole chain */
-cleanup:
-	while (partial > chain) {
-		BUFFER_TRACE(partial->bh, "call brelse");
-		brelse(partial->bh);
-		partial--;
-	}
-	BUFFER_TRACE(bh_result, "returned");
-out:
-	trace_ext3_get_blocks_exit(inode, iblock,
-				   depth ? le32_to_cpu(chain[depth-1].key) : 0,
-				   count, err);
-	return err;
-}
-
-/* Maximum number of blocks we map for direct IO at once. */
-#define DIO_MAX_BLOCKS 4096
-/*
- * Number of credits we need for writing DIO_MAX_BLOCKS:
- * We need sb + group descriptor + bitmap + inode -> 4
- * For B blocks with A block pointers per block we need:
- * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
- * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
- */
-#define DIO_CREDITS 25
-
-static int ext3_get_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	int ret = 0, started = 0;
-	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
-
-	if (create && !handle) {	/* Direct IO write... */
-		if (max_blocks > DIO_MAX_BLOCKS)
-			max_blocks = DIO_MAX_BLOCKS;
-		handle = ext3_journal_start(inode, DIO_CREDITS +
-				EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			goto out;
-		}
-		started = 1;
-	}
-
-	ret = ext3_get_blocks_handle(handle, inode, iblock,
-					max_blocks, bh_result, create);
-	if (ret > 0) {
-		bh_result->b_size = (ret << inode->i_blkbits);
-		ret = 0;
-	}
-	if (started)
-		ext3_journal_stop(handle);
-out:
-	return ret;
-}
-
-int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-		u64 start, u64 len)
-{
-	return generic_block_fiemap(inode, fieinfo, start, len,
-				    ext3_get_block);
-}
-
-/*
- * `handle' can be NULL if create is zero
- */
-struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
-				long block, int create, int *errp)
-{
-	struct buffer_head dummy;
-	int fatal = 0, err;
-
-	J_ASSERT(handle != NULL || create == 0);
-
-	dummy.b_state = 0;
-	dummy.b_blocknr = -1000;
-	buffer_trace_init(&dummy.b_history);
-	err = ext3_get_blocks_handle(handle, inode, block, 1,
-					&dummy, create);
-	/*
-	 * ext3_get_blocks_handle() returns number of blocks
-	 * mapped. 0 in case of a HOLE.
-	 */
-	if (err > 0) {
-		WARN_ON(err > 1);
-		err = 0;
-	}
-	*errp = err;
-	if (!err && buffer_mapped(&dummy)) {
-		struct buffer_head *bh;
-		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-		if (unlikely(!bh)) {
-			*errp = -ENOMEM;
-			goto err;
-		}
-		if (buffer_new(&dummy)) {
-			J_ASSERT(create != 0);
-			J_ASSERT(handle != NULL);
-
-			/*
-			 * Now that we do not always journal data, we should
-			 * keep in mind whether this should always journal the
-			 * new buffer as metadata.  For now, regular file
-			 * writes use ext3_get_block instead, so it's not a
-			 * problem.
-			 */
-			lock_buffer(bh);
-			BUFFER_TRACE(bh, "call get_create_access");
-			fatal = ext3_journal_get_create_access(handle, bh);
-			if (!fatal && !buffer_uptodate(bh)) {
-				memset(bh->b_data,0,inode->i_sb->s_blocksize);
-				set_buffer_uptodate(bh);
-			}
-			unlock_buffer(bh);
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (!fatal)
-				fatal = err;
-		} else {
-			BUFFER_TRACE(bh, "not a new buffer");
-		}
-		if (fatal) {
-			*errp = fatal;
-			brelse(bh);
-			bh = NULL;
-		}
-		return bh;
-	}
-err:
-	return NULL;
-}
-
-struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
-			       int block, int create, int *err)
-{
-	struct buffer_head * bh;
-
-	bh = ext3_getblk(handle, inode, block, create, err);
-	if (!bh)
-		return bh;
-	if (bh_uptodate_or_lock(bh))
-		return bh;
-	get_bh(bh);
-	bh->b_end_io = end_buffer_read_sync;
-	submit_bh(READ | REQ_META | REQ_PRIO, bh);
-	wait_on_buffer(bh);
-	if (buffer_uptodate(bh))
-		return bh;
-	put_bh(bh);
-	*err = -EIO;
-	return NULL;
-}
-
-static int walk_page_buffers(	handle_t *handle,
-				struct buffer_head *head,
-				unsigned from,
-				unsigned to,
-				int *partial,
-				int (*fn)(	handle_t *handle,
-						struct buffer_head *bh))
-{
-	struct buffer_head *bh;
-	unsigned block_start, block_end;
-	unsigned blocksize = head->b_size;
-	int err, ret = 0;
-	struct buffer_head *next;
-
-	for (	bh = head, block_start = 0;
-		ret == 0 && (bh != head || !block_start);
-		block_start = block_end, bh = next)
-	{
-		next = bh->b_this_page;
-		block_end = block_start + blocksize;
-		if (block_end <= from || block_start >= to) {
-			if (partial && !buffer_uptodate(bh))
-				*partial = 1;
-			continue;
-		}
-		err = (*fn)(handle, bh);
-		if (!ret)
-			ret = err;
-	}
-	return ret;
-}
-
-/*
- * To preserve ordering, it is essential that the hole instantiation and
- * the data write be encapsulated in a single transaction.  We cannot
- * close off a transaction and start a new one between the ext3_get_block()
- * and the commit_write().  So doing the journal_start at the start of
- * prepare_write() is the right place.
- *
- * Also, this function can nest inside ext3_writepage() ->
- * block_write_full_page(). In that case, we *know* that ext3_writepage()
- * has generated enough buffer credits to do the whole page.  So we won't
- * block on the journal in that case, which is good, because the caller may
- * be PF_MEMALLOC.
- *
- * By accident, ext3 can be reentered when a transaction is open via
- * quota file writes.  If we were to commit the transaction while thus
- * reentered, there can be a deadlock - we would be holding a quota
- * lock, and the commit would never complete if another thread had a
- * transaction open and was blocking on the quota lock - a ranking
- * violation.
- *
- * So what we do is to rely on the fact that journal_stop/journal_start
- * will _not_ run commit under these circumstances because handle->h_ref
- * is elevated.  We'll still have enough credits for the tiny quotafile
- * write.
- */
-static int do_journal_get_write_access(handle_t *handle,
-					struct buffer_head *bh)
-{
-	int dirty = buffer_dirty(bh);
-	int ret;
-
-	if (!buffer_mapped(bh) || buffer_freed(bh))
-		return 0;
-	/*
-	 * __block_prepare_write() could have dirtied some buffers. Clean
-	 * the dirty bit as jbd2_journal_get_write_access() could complain
-	 * otherwise about fs integrity issues. Setting of the dirty bit
-	 * by __block_prepare_write() isn't a real problem here as we clear
-	 * the bit before releasing a page lock and thus writeback cannot
-	 * ever write the buffer.
-	 */
-	if (dirty)
-		clear_buffer_dirty(bh);
-	ret = ext3_journal_get_write_access(handle, bh);
-	if (!ret && dirty)
-		ret = ext3_journal_dirty_metadata(handle, bh);
-	return ret;
-}
-
-/*
- * Truncate blocks that were not used by write. We have to truncate the
- * pagecache as well so that corresponding buffers get properly unmapped.
- */
-static void ext3_truncate_failed_write(struct inode *inode)
-{
-	truncate_inode_pages(inode->i_mapping, inode->i_size);
-	ext3_truncate(inode);
-}
-
-/*
- * Truncate blocks that were not used by direct IO write. We have to zero out
- * the last file block as well because direct IO might have written to it.
- */
-static void ext3_truncate_failed_direct_write(struct inode *inode)
-{
-	ext3_block_truncate_page(inode, inode->i_size);
-	ext3_truncate(inode);
-}
-
-static int ext3_write_begin(struct file *file, struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned flags,
-				struct page **pagep, void **fsdata)
-{
-	struct inode *inode = mapping->host;
-	int ret;
-	handle_t *handle;
-	int retries = 0;
-	struct page *page;
-	pgoff_t index;
-	unsigned from, to;
-	/* Reserve one block more for addition to orphan list in case
-	 * we allocate blocks but write fails for some reason */
-	int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
-
-	trace_ext3_write_begin(inode, pos, len, flags);
-
-	index = pos >> PAGE_CACHE_SHIFT;
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + len;
-
-retry:
-	page = grab_cache_page_write_begin(mapping, index, flags);
-	if (!page)
-		return -ENOMEM;
-	*pagep = page;
-
-	handle = ext3_journal_start(inode, needed_blocks);
-	if (IS_ERR(handle)) {
-		unlock_page(page);
-		page_cache_release(page);
-		ret = PTR_ERR(handle);
-		goto out;
-	}
-	ret = __block_write_begin(page, pos, len, ext3_get_block);
-	if (ret)
-		goto write_begin_failed;
-
-	if (ext3_should_journal_data(inode)) {
-		ret = walk_page_buffers(handle, page_buffers(page),
-				from, to, NULL, do_journal_get_write_access);
-	}
-write_begin_failed:
-	if (ret) {
-		/*
-		 * block_write_begin may have instantiated a few blocks
-		 * outside i_size.  Trim these off again. Don't need
-		 * i_size_read because we hold i_mutex.
-		 *
-		 * Add inode to orphan list in case we crash before truncate
-		 * finishes. Do this only if ext3_can_truncate() agrees so
-		 * that orphan processing code is happy.
-		 */
-		if (pos + len > inode->i_size && ext3_can_truncate(inode))
-			ext3_orphan_add(handle, inode);
-		ext3_journal_stop(handle);
-		unlock_page(page);
-		page_cache_release(page);
-		if (pos + len > inode->i_size)
-			ext3_truncate_failed_write(inode);
-	}
-	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-out:
-	return ret;
-}
-
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_data(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(__func__, __func__,
-						bh, handle, err);
-	return err;
-}
-
-/* For ordered writepage and write_end functions */
-static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
-{
-	/*
-	 * Write could have mapped the buffer but it didn't copy the data in
-	 * yet. So avoid filing such buffer into a transaction.
-	 */
-	if (buffer_mapped(bh) && buffer_uptodate(bh))
-		return ext3_journal_dirty_data(handle, bh);
-	return 0;
-}
-
-/* For write_end() in data=journal mode */
-static int write_end_fn(handle_t *handle, struct buffer_head *bh)
-{
-	if (!buffer_mapped(bh) || buffer_freed(bh))
-		return 0;
-	set_buffer_uptodate(bh);
-	return ext3_journal_dirty_metadata(handle, bh);
-}
-
-/*
- * This is nasty and subtle: ext3_write_begin() could have allocated blocks
- * for the whole page but later we failed to copy the data in. Update inode
- * size according to what we managed to copy. The rest is going to be
- * truncated in write_end function.
- */
-static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
-{
-	/* What matters to us is i_disksize. We don't write i_size anywhere */
-	if (pos + copied > inode->i_size)
-		i_size_write(inode, pos + copied);
-	if (pos + copied > EXT3_I(inode)->i_disksize) {
-		EXT3_I(inode)->i_disksize = pos + copied;
-		mark_inode_dirty(inode);
-	}
-}
-
-/*
- * We need to pick up the new inode size which generic_commit_write gave us
- * `file' can be NULL - eg, when called from page_symlink().
- *
- * ext3 never places buffers on inode->i_mapping->private_list.  metadata
- * buffers are managed internally.
- */
-static int ext3_ordered_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = file->f_mapping->host;
-	unsigned from, to;
-	int ret = 0, ret2;
-
-	trace_ext3_ordered_write_end(inode, pos, len, copied);
-	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + copied;
-	ret = walk_page_buffers(handle, page_buffers(page),
-		from, to, NULL, journal_dirty_data_fn);
-
-	if (ret == 0)
-		update_file_sizes(inode, pos, copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ret2 = ext3_journal_stop(handle);
-	if (!ret)
-		ret = ret2;
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-static int ext3_writeback_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = file->f_mapping->host;
-	int ret;
-
-	trace_ext3_writeback_write_end(inode, pos, len, copied);
-	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-	update_file_sizes(inode, pos, copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ret = ext3_journal_stop(handle);
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-static int ext3_journalled_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	int ret = 0, ret2;
-	int partial = 0;
-	unsigned from, to;
-
-	trace_ext3_journalled_write_end(inode, pos, len, copied);
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + len;
-
-	if (copied < len) {
-		if (!PageUptodate(page))
-			copied = 0;
-		page_zero_new_buffers(page, from + copied, to);
-		to = from + copied;
-	}
-
-	ret = walk_page_buffers(handle, page_buffers(page), from,
-				to, &partial, write_end_fn);
-	if (!partial)
-		SetPageUptodate(page);
-
-	if (pos + copied > inode->i_size)
-		i_size_write(inode, pos + copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ext3_set_inode_state(inode, EXT3_STATE_JDATA);
-	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-	if (inode->i_size > ei->i_disksize) {
-		ei->i_disksize = inode->i_size;
-		ret2 = ext3_mark_inode_dirty(handle, inode);
-		if (!ret)
-			ret = ret2;
-	}
-
-	ret2 = ext3_journal_stop(handle);
-	if (!ret)
-		ret = ret2;
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-/*
- * bmap() is special.  It gets used by applications such as lilo and by
- * the swapper to find the on-disk block of a specific piece of data.
- *
- * Naturally, this is dangerous if the block concerned is still in the
- * journal.  If somebody makes a swapfile on an ext3 data-journaling
- * filesystem and enables swap, then they may get a nasty shock when the
- * data getting swapped to that swapfile suddenly gets overwritten by
- * the original zero's written out previously to the journal and
- * awaiting writeback in the kernel's buffer cache.
- *
- * So, if we see any bmap calls here on a modified, data-journaled file,
- * take extra steps to flush any blocks which might be in the cache.
- */
-static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
-{
-	struct inode *inode = mapping->host;
-	journal_t *journal;
-	int err;
-
-	if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
-		/*
-		 * This is a REALLY heavyweight approach, but the use of
-		 * bmap on dirty files is expected to be extremely rare:
-		 * only if we run lilo or swapon on a freshly made file
-		 * do we expect this to happen.
-		 *
-		 * (bmap requires CAP_SYS_RAWIO so this does not
-		 * represent an unprivileged user DOS attack --- we'd be
-		 * in trouble if mortal users could trigger this path at
-		 * will.)
-		 *
-		 * NB. EXT3_STATE_JDATA is not set on files other than
-		 * regular files.  If somebody wants to bmap a directory
-		 * or symlink and gets confused because the buffer
-		 * hasn't yet been flushed to disk, they deserve
-		 * everything they get.
-		 */
-
-		ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
-		journal = EXT3_JOURNAL(inode);
-		journal_lock_updates(journal);
-		err = journal_flush(journal);
-		journal_unlock_updates(journal);
-
-		if (err)
-			return 0;
-	}
-
-	return generic_block_bmap(mapping,block,ext3_get_block);
-}
-
-static int bget_one(handle_t *handle, struct buffer_head *bh)
-{
-	get_bh(bh);
-	return 0;
-}
-
-static int bput_one(handle_t *handle, struct buffer_head *bh)
-{
-	put_bh(bh);
-	return 0;
-}
-
-static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
-{
-	return !buffer_mapped(bh);
-}
-
-/*
- * Note that whenever we need to map blocks we start a transaction even if
- * we're not journalling data.  This is to preserve ordering: any hole
- * instantiation within __block_write_full_page -> ext3_get_block() should be
- * journalled along with the data so we don't crash and then get metadata which
- * refers to old data.
- *
- * In all journalling modes block_write_full_page() will start the I/O.
- *
- * We don't honour synchronous mounts for writepage().  That would be
- * disastrous.  Any write() or metadata operation will sync the fs for
- * us.
- */
-static int ext3_ordered_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	struct buffer_head *page_bufs;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	/*
-	 * We give up here if we're reentered, because it might be for a
-	 * different filesystem.
-	 */
-	if (ext3_journal_current_handle())
-		goto out_fail;
-
-	trace_ext3_ordered_writepage(page);
-	if (!page_has_buffers(page)) {
-		create_empty_buffers(page, inode->i_sb->s_blocksize,
-				(1 << BH_Dirty)|(1 << BH_Uptodate));
-		page_bufs = page_buffers(page);
-	} else {
-		page_bufs = page_buffers(page);
-		if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
-				       NULL, buffer_unmapped)) {
-			/* Provide NULL get_block() to catch bugs if buffers
-			 * weren't really mapped */
-			return block_write_full_page(page, NULL, wbc);
-		}
-	}
-	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
-
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out_fail;
-	}
-
-	walk_page_buffers(handle, page_bufs, 0,
-			PAGE_CACHE_SIZE, NULL, bget_one);
-
-	ret = block_write_full_page(page, ext3_get_block, wbc);
-
-	/*
-	 * The page can become unlocked at any point now, and
-	 * truncate can then come in and change things.  So we
-	 * can't touch *page from now on.  But *page_bufs is
-	 * safe due to elevated refcount.
-	 */
-
-	/*
-	 * And attach them to the current transaction.  But only if
-	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
-	 * and generally junk.
-	 */
-	if (ret == 0)
-		ret = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
-					NULL, journal_dirty_data_fn);
-	walk_page_buffers(handle, page_bufs, 0,
-			PAGE_CACHE_SIZE, NULL, bput_one);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-
-out_fail:
-	redirty_page_for_writepage(wbc, page);
-	unlock_page(page);
-	return ret;
-}
-
-static int ext3_writeback_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	if (ext3_journal_current_handle())
-		goto out_fail;
-
-	trace_ext3_writeback_writepage(page);
-	if (page_has_buffers(page)) {
-		if (!walk_page_buffers(NULL, page_buffers(page), 0,
-				      PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
-			/* Provide NULL get_block() to catch bugs if buffers
-			 * weren't really mapped */
-			return block_write_full_page(page, NULL, wbc);
-		}
-	}
-
-	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out_fail;
-	}
-
-	ret = block_write_full_page(page, ext3_get_block, wbc);
-
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-
-out_fail:
-	redirty_page_for_writepage(wbc, page);
-	unlock_page(page);
-	return ret;
-}
-
-static int ext3_journalled_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	trace_ext3_journalled_writepage(page);
-	if (!page_has_buffers(page) || PageChecked(page)) {
-		if (ext3_journal_current_handle())
-			goto no_write;
-
-		handle = ext3_journal_start(inode,
-					    ext3_writepage_trans_blocks(inode));
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			goto no_write;
-		}
-		/*
-		 * It's mmapped pagecache.  Add buffers and journal it.  There
-		 * doesn't seem much point in redirtying the page here.
-		 */
-		ClearPageChecked(page);
-		ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
-					  ext3_get_block);
-		if (ret != 0) {
-			ext3_journal_stop(handle);
-			goto out_unlock;
-		}
-		ret = walk_page_buffers(handle, page_buffers(page), 0,
-			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
-
-		err = walk_page_buffers(handle, page_buffers(page), 0,
-				PAGE_CACHE_SIZE, NULL, write_end_fn);
-		if (ret == 0)
-			ret = err;
-		ext3_set_inode_state(inode, EXT3_STATE_JDATA);
-		atomic_set(&EXT3_I(inode)->i_datasync_tid,
-			   handle->h_transaction->t_tid);
-		unlock_page(page);
-		err = ext3_journal_stop(handle);
-		if (!ret)
-			ret = err;
-	} else {
-		/*
-		 * It is a page full of checkpoint-mode buffers. Go and write
-		 * them. They should have been already mapped when they went
-		 * to the journal so provide NULL get_block function to catch
-		 * errors.
-		 */
-		ret = block_write_full_page(page, NULL, wbc);
-	}
-out:
-	return ret;
-
-no_write:
-	redirty_page_for_writepage(wbc, page);
-out_unlock:
-	unlock_page(page);
-	goto out;
-}
-
-static int ext3_readpage(struct file *file, struct page *page)
-{
-	trace_ext3_readpage(page);
-	return mpage_readpage(page, ext3_get_block);
-}
-
-static int
-ext3_readpages(struct file *file, struct address_space *mapping,
-		struct list_head *pages, unsigned nr_pages)
-{
-	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
-}
-
-static void ext3_invalidatepage(struct page *page, unsigned int offset,
-				unsigned int length)
-{
-	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
-
-	trace_ext3_invalidatepage(page, offset, length);
-
-	/*
-	 * If it's a full truncate we just forget about the pending dirtying
-	 */
-	if (offset == 0 && length == PAGE_CACHE_SIZE)
-		ClearPageChecked(page);
-
-	journal_invalidatepage(journal, page, offset, length);
-}
-
-static int ext3_releasepage(struct page *page, gfp_t wait)
-{
-	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
-
-	trace_ext3_releasepage(page);
-	WARN_ON(PageChecked(page));
-	if (!page_has_buffers(page))
-		return 0;
-	return journal_try_to_free_buffers(journal, page, wait);
-}
-
-/*
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list.  So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- * If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file. But current
- * VFS code falls back into buffered path in that case so we are safe.
- */
-static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			      loff_t offset)
-{
-	struct file *file = iocb->ki_filp;
-	struct inode *inode = file->f_mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	handle_t *handle;
-	ssize_t ret;
-	int orphan = 0;
-	size_t count = iov_iter_count(iter);
-	int retries = 0;
-
-	trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
-
-	if (iov_iter_rw(iter) == WRITE) {
-		loff_t final_size = offset + count;
-
-		if (final_size > inode->i_size) {
-			/* Credits for sb + inode write */
-			handle = ext3_journal_start(inode, 2);
-			if (IS_ERR(handle)) {
-				ret = PTR_ERR(handle);
-				goto out;
-			}
-			ret = ext3_orphan_add(handle, inode);
-			if (ret) {
-				ext3_journal_stop(handle);
-				goto out;
-			}
-			orphan = 1;
-			ei->i_disksize = inode->i_size;
-			ext3_journal_stop(handle);
-		}
-	}
-
-retry:
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
-	/*
-	 * In case of error extending write may have instantiated a few
-	 * blocks outside i_size. Trim these off again.
-	 */
-	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
-		loff_t isize = i_size_read(inode);
-		loff_t end = offset + count;
-
-		if (end > isize)
-			ext3_truncate_failed_direct_write(inode);
-	}
-	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-
-	if (orphan) {
-		int err;
-
-		/* Credits for sb + inode write */
-		handle = ext3_journal_start(inode, 2);
-		if (IS_ERR(handle)) {
-			/* This is really bad luck. We've written the data
-			 * but cannot extend i_size. Truncate allocated blocks
-			 * and pretend the write failed... */
-			ext3_truncate_failed_direct_write(inode);
-			ret = PTR_ERR(handle);
-			if (inode->i_nlink)
-				ext3_orphan_del(NULL, inode);
-			goto out;
-		}
-		if (inode->i_nlink)
-			ext3_orphan_del(handle, inode);
-		if (ret > 0) {
-			loff_t end = offset + ret;
-			if (end > inode->i_size) {
-				ei->i_disksize = end;
-				i_size_write(inode, end);
-				/*
-				 * We're going to return a positive `ret'
-				 * here due to non-zero-length I/O, so there's
-				 * no way of reporting error returns from
-				 * ext3_mark_inode_dirty() to userspace.  So
-				 * ignore it.
-				 */
-				ext3_mark_inode_dirty(handle, inode);
-			}
-		}
-		err = ext3_journal_stop(handle);
-		if (ret == 0)
-			ret = err;
-	}
-out:
-	trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
-	return ret;
-}
-
-/*
- * Pages can be marked dirty completely asynchronously from ext3's journalling
- * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
- * much here because ->set_page_dirty is called under VFS locks.  The page is
- * not necessarily locked.
- *
- * We cannot just dirty the page and leave attached buffers clean, because the
- * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
- * or jbddirty because all the journalling code will explode.
- *
- * So what we do is to mark the page "pending dirty" and next time writepage
- * is called, propagate that into the buffers appropriately.
- */
-static int ext3_journalled_set_page_dirty(struct page *page)
-{
-	SetPageChecked(page);
-	return __set_page_dirty_nobuffers(page);
-}
-
-static const struct address_space_operations ext3_ordered_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_ordered_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_ordered_write_end,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.direct_IO		= ext3_direct_IO,
-	.migratepage		= buffer_migrate_page,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.is_dirty_writeback	= buffer_check_dirty_writeback,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-static const struct address_space_operations ext3_writeback_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_writeback_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_writeback_write_end,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.direct_IO		= ext3_direct_IO,
-	.migratepage		= buffer_migrate_page,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-static const struct address_space_operations ext3_journalled_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_journalled_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_journalled_write_end,
-	.set_page_dirty		= ext3_journalled_set_page_dirty,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-void ext3_set_aops(struct inode *inode)
-{
-	if (ext3_should_order_data(inode))
-		inode->i_mapping->a_ops = &ext3_ordered_aops;
-	else if (ext3_should_writeback_data(inode))
-		inode->i_mapping->a_ops = &ext3_writeback_aops;
-	else
-		inode->i_mapping->a_ops = &ext3_journalled_aops;
-}
-
-/*
- * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
- * up to the end of the block which corresponds to `from'.
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
-static int ext3_block_truncate_page(struct inode *inode, loff_t from)
-{
-	ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
-	unsigned blocksize, iblock, length, pos;
-	struct page *page;
-	handle_t *handle = NULL;
-	struct buffer_head *bh;
-	int err = 0;
-
-	/* Truncated on block boundary - nothing to do */
-	blocksize = inode->i_sb->s_blocksize;
-	if ((from & (blocksize - 1)) == 0)
-		return 0;
-
-	page = grab_cache_page(inode->i_mapping, index);
-	if (!page)
-		return -ENOMEM;
-	length = blocksize - (offset & (blocksize - 1));
-	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-
-	if (!page_has_buffers(page))
-		create_empty_buffers(page, blocksize, 0);
-
-	/* Find the buffer that contains "offset" */
-	bh = page_buffers(page);
-	pos = blocksize;
-	while (offset >= pos) {
-		bh = bh->b_this_page;
-		iblock++;
-		pos += blocksize;
-	}
-
-	err = 0;
-	if (buffer_freed(bh)) {
-		BUFFER_TRACE(bh, "freed: skip");
-		goto unlock;
-	}
-
-	if (!buffer_mapped(bh)) {
-		BUFFER_TRACE(bh, "unmapped");
-		ext3_get_block(inode, iblock, bh, 0);
-		/* unmapped? It's a hole - nothing to do */
-		if (!buffer_mapped(bh)) {
-			BUFFER_TRACE(bh, "still unmapped");
-			goto unlock;
-		}
-	}
-
-	/* Ok, it's mapped. Make sure it's up-to-date */
-	if (PageUptodate(page))
-		set_buffer_uptodate(bh);
-
-	if (!bh_uptodate_or_lock(bh)) {
-		err = bh_submit_read(bh);
-		/* Uhhuh. Read error. Complain and punt. */
-		if (err)
-			goto unlock;
-	}
-
-	/* data=writeback mode doesn't need transaction to zero-out data */
-	if (!ext3_should_writeback_data(inode)) {
-		/* We journal at most one block */
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			clear_highpage(page);
-			flush_dcache_page(page);
-			err = PTR_ERR(handle);
-			goto unlock;
-		}
-	}
-
-	if (ext3_should_journal_data(inode)) {
-		BUFFER_TRACE(bh, "get write access");
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err)
-			goto stop;
-	}
-
-	zero_user(page, offset, length);
-	BUFFER_TRACE(bh, "zeroed end of block");
-
-	err = 0;
-	if (ext3_should_journal_data(inode)) {
-		err = ext3_journal_dirty_metadata(handle, bh);
-	} else {
-		if (ext3_should_order_data(inode))
-			err = ext3_journal_dirty_data(handle, bh);
-		mark_buffer_dirty(bh);
-	}
-stop:
-	if (handle)
-		ext3_journal_stop(handle);
-
-unlock:
-	unlock_page(page);
-	page_cache_release(page);
-	return err;
-}
-
-/*
- * Probably it should be a library function... search for first non-zero word
- * or memcmp with zero_page, whatever is better for particular architecture.
- * Linus?
- */
-static inline int all_zeroes(__le32 *p, __le32 *q)
-{
-	while (p < q)
-		if (*p++)
-			return 0;
-	return 1;
-}
-
-/**
- *	ext3_find_shared - find the indirect blocks for partial truncation.
- *	@inode:	  inode in question
- *	@depth:	  depth of the affected branch
- *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
- *	@chain:	  place to store the pointers to partial indirect blocks
- *	@top:	  place to the (detached) top of branch
- *
- *	This is a helper function used by ext3_truncate().
- *
- *	When we do truncate() we may have to clean the ends of several
- *	indirect blocks but leave the blocks themselves alive. Block is
- *	partially truncated if some data below the new i_size is referred
- *	from it (and it is on the path to the first completely truncated
- *	data block, indeed).  We have to free the top of that path along
- *	with everything to the right of the path. Since no allocation
- *	past the truncation point is possible until ext3_truncate()
- *	finishes, we may safely do the latter, but top of branch may
- *	require special attention - pageout below the truncation point
- *	might try to populate it.
- *
- *	We atomically detach the top of branch from the tree, store the
- *	block number of its root in *@top, pointers to buffer_heads of
- *	partially truncated blocks - in @chain[].bh and pointers to
- *	their last elements that should not be removed - in
- *	@chain[].p. Return value is the pointer to last filled element
- *	of @chain.
- *
- *	The work left to caller to do the actual freeing of subtrees:
- *		a) free the subtree starting from *@top
- *		b) free the subtrees whose roots are stored in
- *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
- *		c) free the subtrees growing from the inode past the @chain[0].
- *			(no partially truncated stuff there).  */
-
-static Indirect *ext3_find_shared(struct inode *inode, int depth,
-			int offsets[4], Indirect chain[4], __le32 *top)
-{
-	Indirect *partial, *p;
-	int k, err;
-
-	*top = 0;
-	/* Make k index the deepest non-null offset + 1 */
-	for (k = depth; k > 1 && !offsets[k-1]; k--)
-		;
-	partial = ext3_get_branch(inode, k, offsets, chain, &err);
-	/* Writer: pointers */
-	if (!partial)
-		partial = chain + k-1;
-	/*
-	 * If the branch acquired continuation since we've looked at it -
-	 * fine, it should all survive and (new) top doesn't belong to us.
-	 */
-	if (!partial->key && *partial->p)
-		/* Writer: end */
-		goto no_top;
-	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
-		;
-	/*
-	 * OK, we've found the last block that must survive. The rest of our
-	 * branch should be detached before unlocking. However, if that rest
-	 * of branch is all ours and does not grow immediately from the inode
-	 * it's easier to cheat and just decrement partial->p.
-	 */
-	if (p == chain + k - 1 && p > chain) {
-		p->p--;
-	} else {
-		*top = *p->p;
-		/* Nope, don't do this in ext3.  Must leave the tree intact */
-#if 0
-		*p->p = 0;
-#endif
-	}
-	/* Writer: end */
-
-	while(partial > p) {
-		brelse(partial->bh);
-		partial--;
-	}
-no_top:
-	return partial;
-}
-
-/*
- * Zero a number of block pointers in either an inode or an indirect block.
- * If we restart the transaction we must again get write access to the
- * indirect block for further modification.
- *
- * We release `count' blocks on disk, but (last - first) may be greater
- * than `count' because there can be holes in there.
- */
-static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
-		struct buffer_head *bh, ext3_fsblk_t block_to_free,
-		unsigned long count, __le32 *first, __le32 *last)
-{
-	__le32 *p;
-	if (try_to_extend_transaction(handle, inode)) {
-		if (bh) {
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			if (ext3_journal_dirty_metadata(handle, bh))
-				return;
-		}
-		ext3_mark_inode_dirty(handle, inode);
-		truncate_restart_transaction(handle, inode);
-		if (bh) {
-			BUFFER_TRACE(bh, "retaking write access");
-			if (ext3_journal_get_write_access(handle, bh))
-				return;
-		}
-	}
-
-	/*
-	 * Any buffers which are on the journal will be in memory. We find
-	 * them on the hash table so journal_revoke() will run journal_forget()
-	 * on them.  We've already detached each block from the file, so
-	 * bforget() in journal_forget() should be safe.
-	 *
-	 * AKPM: turn on bforget in journal_forget()!!!
-	 */
-	for (p = first; p < last; p++) {
-		u32 nr = le32_to_cpu(*p);
-		if (nr) {
-			struct buffer_head *bh;
-
-			*p = 0;
-			bh = sb_find_get_block(inode->i_sb, nr);
-			ext3_forget(handle, 0, inode, bh, nr);
-		}
-	}
-
-	ext3_free_blocks(handle, inode, block_to_free, count);
-}
-
-/**
- * ext3_free_data - free a list of data blocks
- * @handle:	handle for this transaction
- * @inode:	inode we are dealing with
- * @this_bh:	indirect buffer_head which contains *@first and *@last
- * @first:	array of block numbers
- * @last:	points immediately past the end of array
- *
- * We are freeing all blocks referred from that array (numbers are stored as
- * little-endian 32-bit) and updating @inode->i_blocks appropriately.
- *
- * We accumulate contiguous runs of blocks to free.  Conveniently, if these
- * blocks are contiguous then releasing them at one time will only affect one
- * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
- * actually use a lot of journal space.
- *
- * @this_bh will be %NULL if @first and @last point into the inode's direct
- * block pointers.
- */
-static void ext3_free_data(handle_t *handle, struct inode *inode,
-			   struct buffer_head *this_bh,
-			   __le32 *first, __le32 *last)
-{
-	ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
-	unsigned long count = 0;	    /* Number of blocks in the run */
-	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
-					       corresponding to
-					       block_to_free */
-	ext3_fsblk_t nr;		    /* Current block # */
-	__le32 *p;			    /* Pointer into inode/ind
-					       for current block */
-	int err;
-
-	if (this_bh) {				/* For indirect block */
-		BUFFER_TRACE(this_bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, this_bh);
-		/* Important: if we can't update the indirect pointers
-		 * to the blocks, we can't free them. */
-		if (err)
-			return;
-	}
-
-	for (p = first; p < last; p++) {
-		nr = le32_to_cpu(*p);
-		if (nr) {
-			/* accumulate blocks to free if they're contiguous */
-			if (count == 0) {
-				block_to_free = nr;
-				block_to_free_p = p;
-				count = 1;
-			} else if (nr == block_to_free + count) {
-				count++;
-			} else {
-				ext3_clear_blocks(handle, inode, this_bh,
-						  block_to_free,
-						  count, block_to_free_p, p);
-				block_to_free = nr;
-				block_to_free_p = p;
-				count = 1;
-			}
-		}
-	}
-
-	if (count > 0)
-		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
-				  count, block_to_free_p, p);
-
-	if (this_bh) {
-		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
-
-		/*
-		 * The buffer head should have an attached journal head at this
-		 * point. However, if the data is corrupted and an indirect
-		 * block pointed to itself, it would have been detached when
-		 * the block was cleared. Check for this instead of OOPSing.
-		 */
-		if (bh2jh(this_bh))
-			ext3_journal_dirty_metadata(handle, this_bh);
-		else
-			ext3_error(inode->i_sb, "ext3_free_data",
-				   "circular indirect block detected, "
-				   "inode=%lu, block=%llu",
-				   inode->i_ino,
-				   (unsigned long long)this_bh->b_blocknr);
-	}
-}
-
-/**
- *	ext3_free_branches - free an array of branches
- *	@handle: JBD handle for this transaction
- *	@inode:	inode we are dealing with
- *	@parent_bh: the buffer_head which contains *@first and *@last
- *	@first:	array of block numbers
- *	@last:	pointer immediately past the end of array
- *	@depth:	depth of the branches to free
- *
- *	We are freeing all blocks referred from these branches (numbers are
- *	stored as little-endian 32-bit) and updating @inode->i_blocks
- *	appropriately.
- */
-static void ext3_free_branches(handle_t *handle, struct inode *inode,
-			       struct buffer_head *parent_bh,
-			       __le32 *first, __le32 *last, int depth)
-{
-	ext3_fsblk_t nr;
-	__le32 *p;
-
-	if (is_handle_aborted(handle))
-		return;
-
-	if (depth--) {
-		struct buffer_head *bh;
-		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-		p = last;
-		while (--p >= first) {
-			nr = le32_to_cpu(*p);
-			if (!nr)
-				continue;		/* A hole */
-
-			/* Go read the buffer for the next level down */
-			bh = sb_bread(inode->i_sb, nr);
-
-			/*
-			 * A read failure? Report error and clear slot
-			 * (should be rare).
-			 */
-			if (!bh) {
-				ext3_error(inode->i_sb, "ext3_free_branches",
-					   "Read failure, inode=%lu, block="E3FSBLK,
-					   inode->i_ino, nr);
-				continue;
-			}
-
-			/* This zaps the entire block.  Bottom up. */
-			BUFFER_TRACE(bh, "free child branches");
-			ext3_free_branches(handle, inode, bh,
-					   (__le32*)bh->b_data,
-					   (__le32*)bh->b_data + addr_per_block,
-					   depth);
-
-			/*
-			 * Everything below this this pointer has been
-			 * released.  Now let this top-of-subtree go.
-			 *
-			 * We want the freeing of this indirect block to be
-			 * atomic in the journal with the updating of the
-			 * bitmap block which owns it.  So make some room in
-			 * the journal.
-			 *
-			 * We zero the parent pointer *after* freeing its
-			 * pointee in the bitmaps, so if extend_transaction()
-			 * for some reason fails to put the bitmap changes and
-			 * the release into the same transaction, recovery
-			 * will merely complain about releasing a free block,
-			 * rather than leaking blocks.
-			 */
-			if (is_handle_aborted(handle))
-				return;
-			if (try_to_extend_transaction(handle, inode)) {
-				ext3_mark_inode_dirty(handle, inode);
-				truncate_restart_transaction(handle, inode);
-			}
-
-			/*
-			 * We've probably journalled the indirect block several
-			 * times during the truncate.  But it's no longer
-			 * needed and we now drop it from the transaction via
-			 * journal_revoke().
-			 *
-			 * That's easy if it's exclusively part of this
-			 * transaction.  But if it's part of the committing
-			 * transaction then journal_forget() will simply
-			 * brelse() it.  That means that if the underlying
-			 * block is reallocated in ext3_get_block(),
-			 * unmap_underlying_metadata() will find this block
-			 * and will try to get rid of it.  damn, damn. Thus
-			 * we don't allow a block to be reallocated until
-			 * a transaction freeing it has fully committed.
-			 *
-			 * We also have to make sure journal replay after a
-			 * crash does not overwrite non-journaled data blocks
-			 * with old metadata when the block got reallocated for
-			 * data.  Thus we have to store a revoke record for a
-			 * block in the same transaction in which we free the
-			 * block.
-			 */
-			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
-
-			ext3_free_blocks(handle, inode, nr, 1);
-
-			if (parent_bh) {
-				/*
-				 * The block which we have just freed is
-				 * pointed to by an indirect block: journal it
-				 */
-				BUFFER_TRACE(parent_bh, "get_write_access");
-				if (!ext3_journal_get_write_access(handle,
-								   parent_bh)){
-					*p = 0;
-					BUFFER_TRACE(parent_bh,
-					"call ext3_journal_dirty_metadata");
-					ext3_journal_dirty_metadata(handle,
-								    parent_bh);
-				}
-			}
-		}
-	} else {
-		/* We have reached the bottom of the tree. */
-		BUFFER_TRACE(parent_bh, "free data blocks");
-		ext3_free_data(handle, inode, parent_bh, first, last);
-	}
-}
-
-int ext3_can_truncate(struct inode *inode)
-{
-	if (S_ISREG(inode->i_mode))
-		return 1;
-	if (S_ISDIR(inode->i_mode))
-		return 1;
-	if (S_ISLNK(inode->i_mode))
-		return !ext3_inode_is_fast_symlink(inode);
-	return 0;
-}
-
-/*
- * ext3_truncate()
- *
- * We block out ext3_get_block() block instantiations across the entire
- * transaction, and VFS/VM ensures that ext3_truncate() cannot run
- * simultaneously on behalf of the same inode.
- *
- * As we work through the truncate and commit bits of it to the journal there
- * is one core, guiding principle: the file's tree must always be consistent on
- * disk.  We must be able to restart the truncate after a crash.
- *
- * The file's tree may be transiently inconsistent in memory (although it
- * probably isn't), but whenever we close off and commit a journal transaction,
- * the contents of (the filesystem + the journal) must be consistent and
- * restartable.  It's pretty simple, really: bottom up, right to left (although
- * left-to-right works OK too).
- *
- * Note that at recovery time, journal replay occurs *before* the restart of
- * truncate against the orphan inode list.
- *
- * The committed inode has the new, desired i_size (which is the same as
- * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
- * that this inode's truncate did not complete and it will again call
- * ext3_truncate() to have another go.  So there will be instantiated blocks
- * to the right of the truncation point in a crashed ext3 filesystem.  But
- * that's fine - as long as they are linked from the inode, the post-crash
- * ext3_truncate() run will find them and release them.
- */
-void ext3_truncate(struct inode *inode)
-{
-	handle_t *handle;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	__le32 *i_data = ei->i_data;
-	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-	int offsets[4];
-	Indirect chain[4];
-	Indirect *partial;
-	__le32 nr = 0;
-	int n;
-	long last_block;
-	unsigned blocksize = inode->i_sb->s_blocksize;
-
-	trace_ext3_truncate_enter(inode);
-
-	if (!ext3_can_truncate(inode))
-		goto out_notrans;
-
-	if (inode->i_size == 0 && ext3_should_writeback_data(inode))
-		ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
-
-	handle = start_transaction(inode);
-	if (IS_ERR(handle))
-		goto out_notrans;
-
-	last_block = (inode->i_size + blocksize-1)
-					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
-	n = ext3_block_to_path(inode, last_block, offsets, NULL);
-	if (n == 0)
-		goto out_stop;	/* error */
-
-	/*
-	 * OK.  This truncate is going to happen.  We add the inode to the
-	 * orphan list, so that if this truncate spans multiple transactions,
-	 * and we crash, we will resume the truncate when the filesystem
-	 * recovers.  It also marks the inode dirty, to catch the new size.
-	 *
-	 * Implication: the file must always be in a sane, consistent
-	 * truncatable state while each transaction commits.
-	 */
-	if (ext3_orphan_add(handle, inode))
-		goto out_stop;
-
-	/*
-	 * The orphan list entry will now protect us from any crash which
-	 * occurs before the truncate completes, so it is now safe to propagate
-	 * the new, shorter inode size (held for now in i_size) into the
-	 * on-disk inode. We do this via i_disksize, which is the value which
-	 * ext3 *really* writes onto the disk inode.
-	 */
-	ei->i_disksize = inode->i_size;
-
-	/*
-	 * From here we block out all ext3_get_block() callers who want to
-	 * modify the block allocation tree.
-	 */
-	mutex_lock(&ei->truncate_mutex);
-
-	if (n == 1) {		/* direct blocks */
-		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
-			       i_data + EXT3_NDIR_BLOCKS);
-		goto do_indirects;
-	}
-
-	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
-	/* Kill the top of shared branch (not detached) */
-	if (nr) {
-		if (partial == chain) {
-			/* Shared branch grows from the inode */
-			ext3_free_branches(handle, inode, NULL,
-					   &nr, &nr+1, (chain+n-1) - partial);
-			*partial->p = 0;
-			/*
-			 * We mark the inode dirty prior to restart,
-			 * and prior to stop.  No need for it here.
-			 */
-		} else {
-			/* Shared branch grows from an indirect block */
-			ext3_free_branches(handle, inode, partial->bh,
-					partial->p,
-					partial->p+1, (chain+n-1) - partial);
-		}
-	}
-	/* Clear the ends of indirect blocks on the shared branch */
-	while (partial > chain) {
-		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
-				   (__le32*)partial->bh->b_data+addr_per_block,
-				   (chain+n-1) - partial);
-		BUFFER_TRACE(partial->bh, "call brelse");
-		brelse (partial->bh);
-		partial--;
-	}
-do_indirects:
-	/* Kill the remaining (whole) subtrees */
-	switch (offsets[0]) {
-	default:
-		nr = i_data[EXT3_IND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
-			i_data[EXT3_IND_BLOCK] = 0;
-		}
-	case EXT3_IND_BLOCK:
-		nr = i_data[EXT3_DIND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
-			i_data[EXT3_DIND_BLOCK] = 0;
-		}
-	case EXT3_DIND_BLOCK:
-		nr = i_data[EXT3_TIND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
-			i_data[EXT3_TIND_BLOCK] = 0;
-		}
-	case EXT3_TIND_BLOCK:
-		;
-	}
-
-	ext3_discard_reservation(inode);
-
-	mutex_unlock(&ei->truncate_mutex);
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, inode);
-
-	/*
-	 * In a multi-transaction truncate, we only make the final transaction
-	 * synchronous
-	 */
-	if (IS_SYNC(inode))
-		handle->h_sync = 1;
-out_stop:
-	/*
-	 * If this was a simple ftruncate(), and the file will remain alive
-	 * then we need to clear up the orphan record which we created above.
-	 * However, if this was a real unlink then we were called by
-	 * ext3_evict_inode(), and we allow that function to clean up the
-	 * orphan info for us.
-	 */
-	if (inode->i_nlink)
-		ext3_orphan_del(handle, inode);
-
-	ext3_journal_stop(handle);
-	trace_ext3_truncate_exit(inode);
-	return;
-out_notrans:
-	/*
-	 * Delete the inode from orphan list so that it doesn't stay there
-	 * forever and trigger assertion on umount.
-	 */
-	if (inode->i_nlink)
-		ext3_orphan_del(NULL, inode);
-	trace_ext3_truncate_exit(inode);
-}
-
-static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
-		unsigned long ino, struct ext3_iloc *iloc)
-{
-	unsigned long block_group;
-	unsigned long offset;
-	ext3_fsblk_t block;
-	struct ext3_group_desc *gdp;
-
-	if (!ext3_valid_inum(sb, ino)) {
-		/*
-		 * This error is already checked for in namei.c unless we are
-		 * looking at an NFS filehandle, in which case no error
-		 * report is needed
-		 */
-		return 0;
-	}
-
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	gdp = ext3_get_group_desc(sb, block_group, NULL);
-	if (!gdp)
-		return 0;
-	/*
-	 * Figure out the offset within the block group inode table
-	 */
-	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
-		EXT3_INODE_SIZE(sb);
-	block = le32_to_cpu(gdp->bg_inode_table) +
-		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
-
-	iloc->block_group = block_group;
-	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
-	return block;
-}
-
-/*
- * ext3_get_inode_loc returns with an extra refcount against the inode's
- * underlying buffer_head on success. If 'in_mem' is true, we have all
- * data in memory that is needed to recreate the on-disk version of this
- * inode.
- */
-static int __ext3_get_inode_loc(struct inode *inode,
-				struct ext3_iloc *iloc, int in_mem)
-{
-	ext3_fsblk_t block;
-	struct buffer_head *bh;
-
-	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
-	if (!block)
-		return -EIO;
-
-	bh = sb_getblk(inode->i_sb, block);
-	if (unlikely(!bh)) {
-		ext3_error (inode->i_sb, "ext3_get_inode_loc",
-				"unable to read inode block - "
-				"inode=%lu, block="E3FSBLK,
-				 inode->i_ino, block);
-		return -ENOMEM;
-	}
-	if (!buffer_uptodate(bh)) {
-		lock_buffer(bh);
-
-		/*
-		 * If the buffer has the write error flag, we have failed
-		 * to write out another inode in the same block.  In this
-		 * case, we don't have to read the block because we may
-		 * read the old inode data successfully.
-		 */
-		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
-			set_buffer_uptodate(bh);
-
-		if (buffer_uptodate(bh)) {
-			/* someone brought it uptodate while we waited */
-			unlock_buffer(bh);
-			goto has_buffer;
-		}
-
-		/*
-		 * If we have all information of the inode in memory and this
-		 * is the only valid inode in the block, we need not read the
-		 * block.
-		 */
-		if (in_mem) {
-			struct buffer_head *bitmap_bh;
-			struct ext3_group_desc *desc;
-			int inodes_per_buffer;
-			int inode_offset, i;
-			int block_group;
-			int start;
-
-			block_group = (inode->i_ino - 1) /
-					EXT3_INODES_PER_GROUP(inode->i_sb);
-			inodes_per_buffer = bh->b_size /
-				EXT3_INODE_SIZE(inode->i_sb);
-			inode_offset = ((inode->i_ino - 1) %
-					EXT3_INODES_PER_GROUP(inode->i_sb));
-			start = inode_offset & ~(inodes_per_buffer - 1);
-
-			/* Is the inode bitmap in cache? */
-			desc = ext3_get_group_desc(inode->i_sb,
-						block_group, NULL);
-			if (!desc)
-				goto make_io;
-
-			bitmap_bh = sb_getblk(inode->i_sb,
-					le32_to_cpu(desc->bg_inode_bitmap));
-			if (unlikely(!bitmap_bh))
-				goto make_io;
-
-			/*
-			 * If the inode bitmap isn't in cache then the
-			 * optimisation may end up performing two reads instead
-			 * of one, so skip it.
-			 */
-			if (!buffer_uptodate(bitmap_bh)) {
-				brelse(bitmap_bh);
-				goto make_io;
-			}
-			for (i = start; i < start + inodes_per_buffer; i++) {
-				if (i == inode_offset)
-					continue;
-				if (ext3_test_bit(i, bitmap_bh->b_data))
-					break;
-			}
-			brelse(bitmap_bh);
-			if (i == start + inodes_per_buffer) {
-				/* all other inodes are free, so skip I/O */
-				memset(bh->b_data, 0, bh->b_size);
-				set_buffer_uptodate(bh);
-				unlock_buffer(bh);
-				goto has_buffer;
-			}
-		}
-
-make_io:
-		/*
-		 * There are other valid inodes in the buffer, this inode
-		 * has in-inode xattrs, or we don't have this inode in memory.
-		 * Read the block from disk.
-		 */
-		trace_ext3_load_inode(inode);
-		get_bh(bh);
-		bh->b_end_io = end_buffer_read_sync;
-		submit_bh(READ | REQ_META | REQ_PRIO, bh);
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			ext3_error(inode->i_sb, "ext3_get_inode_loc",
-					"unable to read inode block - "
-					"inode=%lu, block="E3FSBLK,
-					inode->i_ino, block);
-			brelse(bh);
-			return -EIO;
-		}
-	}
-has_buffer:
-	iloc->bh = bh;
-	return 0;
-}
-
-int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
-{
-	/* We have all inode data except xattrs in memory here. */
-	return __ext3_get_inode_loc(inode, iloc,
-		!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
-}
-
-void ext3_set_inode_flags(struct inode *inode)
-{
-	unsigned int flags = EXT3_I(inode)->i_flags;
-
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
-	if (flags & EXT3_SYNC_FL)
-		inode->i_flags |= S_SYNC;
-	if (flags & EXT3_APPEND_FL)
-		inode->i_flags |= S_APPEND;
-	if (flags & EXT3_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
-	if (flags & EXT3_NOATIME_FL)
-		inode->i_flags |= S_NOATIME;
-	if (flags & EXT3_DIRSYNC_FL)
-		inode->i_flags |= S_DIRSYNC;
-}
-
-/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
-void ext3_get_inode_flags(struct ext3_inode_info *ei)
-{
-	unsigned int flags = ei->vfs_inode.i_flags;
-
-	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
-			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
-	if (flags & S_SYNC)
-		ei->i_flags |= EXT3_SYNC_FL;
-	if (flags & S_APPEND)
-		ei->i_flags |= EXT3_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT3_IMMUTABLE_FL;
-	if (flags & S_NOATIME)
-		ei->i_flags |= EXT3_NOATIME_FL;
-	if (flags & S_DIRSYNC)
-		ei->i_flags |= EXT3_DIRSYNC_FL;
-}
-
-struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
-{
-	struct ext3_iloc iloc;
-	struct ext3_inode *raw_inode;
-	struct ext3_inode_info *ei;
-	struct buffer_head *bh;
-	struct inode *inode;
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-	transaction_t *transaction;
-	long ret;
-	int block;
-	uid_t i_uid;
-	gid_t i_gid;
-
-	inode = iget_locked(sb, ino);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-	if (!(inode->i_state & I_NEW))
-		return inode;
-
-	ei = EXT3_I(inode);
-	ei->i_block_alloc_info = NULL;
-
-	ret = __ext3_get_inode_loc(inode, &iloc, 0);
-	if (ret < 0)
-		goto bad_inode;
-	bh = iloc.bh;
-	raw_inode = ext3_raw_inode(&iloc);
-	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
-	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
-	if(!(test_opt (inode->i_sb, NO_UID32))) {
-		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
-		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
-	}
-	i_uid_write(inode, i_uid);
-	i_gid_write(inode, i_gid);
-	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
-	inode->i_size = le32_to_cpu(raw_inode->i_size);
-	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
-	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
-	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
-	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
-
-	ei->i_state_flags = 0;
-	ei->i_dir_start_lookup = 0;
-	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
-	/* We now have enough fields to check if the inode was active or not.
-	 * This is needed because nfsd might try to access dead inodes
-	 * the test is that same one that e2fsck uses
-	 * NeilBrown 1999oct15
-	 */
-	if (inode->i_nlink == 0) {
-		if (inode->i_mode == 0 ||
-		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
-			/* this inode is deleted */
-			brelse (bh);
-			ret = -ESTALE;
-			goto bad_inode;
-		}
-		/* The only unlinked inodes we let through here have
-		 * valid i_mode and are being read by the orphan
-		 * recovery code: that's fine, we're about to complete
-		 * the process of deleting those. */
-	}
-	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
-	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
-#ifdef EXT3_FRAGMENTS
-	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
-	ei->i_frag_no = raw_inode->i_frag;
-	ei->i_frag_size = raw_inode->i_fsize;
-#endif
-	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
-	if (!S_ISREG(inode->i_mode)) {
-		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
-	} else {
-		inode->i_size |=
-			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
-	}
-	ei->i_disksize = inode->i_size;
-	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
-	ei->i_block_group = iloc.block_group;
-	/*
-	 * NOTE! The in-memory inode i_data array is in little-endian order
-	 * even on big-endian machines: we do NOT byteswap the block numbers!
-	 */
-	for (block = 0; block < EXT3_N_BLOCKS; block++)
-		ei->i_data[block] = raw_inode->i_block[block];
-	INIT_LIST_HEAD(&ei->i_orphan);
-
-	/*
-	 * Set transaction id's of transactions that have to be committed
-	 * to finish f[data]sync. We set them to currently running transaction
-	 * as we cannot be sure that the inode or some of its metadata isn't
-	 * part of the transaction - the inode could have been reclaimed and
-	 * now it is reread from disk.
-	 */
-	if (journal) {
-		tid_t tid;
-
-		spin_lock(&journal->j_state_lock);
-		if (journal->j_running_transaction)
-			transaction = journal->j_running_transaction;
-		else
-			transaction = journal->j_committing_transaction;
-		if (transaction)
-			tid = transaction->t_tid;
-		else
-			tid = journal->j_commit_sequence;
-		spin_unlock(&journal->j_state_lock);
-		atomic_set(&ei->i_sync_tid, tid);
-		atomic_set(&ei->i_datasync_tid, tid);
-	}
-
-	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
-	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
-		/*
-		 * When mke2fs creates big inodes it does not zero out
-		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
-		 * so ignore those first few inodes.
-		 */
-		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-		    EXT3_INODE_SIZE(inode->i_sb)) {
-			brelse (bh);
-			ret = -EIO;
-			goto bad_inode;
-		}
-		if (ei->i_extra_isize == 0) {
-			/* The extra space is currently unused. Use it. */
-			ei->i_extra_isize = sizeof(struct ext3_inode) -
-					    EXT3_GOOD_OLD_INODE_SIZE;
-		} else {
-			__le32 *magic = (void *)raw_inode +
-					EXT3_GOOD_OLD_INODE_SIZE +
-					ei->i_extra_isize;
-			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
-				 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
-		}
-	} else
-		ei->i_extra_isize = 0;
-
-	if (S_ISREG(inode->i_mode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-	} else if (S_ISDIR(inode->i_mode)) {
-		inode->i_op = &ext3_dir_inode_operations;
-		inode->i_fop = &ext3_dir_operations;
-	} else if (S_ISLNK(inode->i_mode)) {
-		if (ext3_inode_is_fast_symlink(inode)) {
-			inode->i_op = &ext3_fast_symlink_inode_operations;
-			nd_terminate_link(ei->i_data, inode->i_size,
-				sizeof(ei->i_data) - 1);
-			inode->i_link = (char *)ei->i_data;
-		} else {
-			inode->i_op = &ext3_symlink_inode_operations;
-			ext3_set_aops(inode);
-		}
-	} else {
-		inode->i_op = &ext3_special_inode_operations;
-		if (raw_inode->i_block[0])
-			init_special_inode(inode, inode->i_mode,
-			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
-		else
-			init_special_inode(inode, inode->i_mode,
-			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
-	}
-	brelse (iloc.bh);
-	ext3_set_inode_flags(inode);
-	unlock_new_inode(inode);
-	return inode;
-
-bad_inode:
-	iget_failed(inode);
-	return ERR_PTR(ret);
-}
-
-/*
- * Post the struct inode info into an on-disk inode location in the
- * buffer-cache.  This gobbles the caller's reference to the
- * buffer_head in the inode location struct.
- *
- * The caller must have write access to iloc->bh.
- */
-static int ext3_do_update_inode(handle_t *handle,
-				struct inode *inode,
-				struct ext3_iloc *iloc)
-{
-	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct buffer_head *bh = iloc->bh;
-	int err = 0, rc, block;
-	int need_datasync = 0;
-	__le32 disksize;
-	uid_t i_uid;
-	gid_t i_gid;
-
-again:
-	/* we can't allow multiple procs in here at once, its a bit racey */
-	lock_buffer(bh);
-
-	/* For fields not not tracking in the in-memory inode,
-	 * initialise them to zero for new inodes. */
-	if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
-		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
-
-	ext3_get_inode_flags(ei);
-	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
-	i_uid = i_uid_read(inode);
-	i_gid = i_gid_read(inode);
-	if(!(test_opt(inode->i_sb, NO_UID32))) {
-		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
-		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
-/*
- * Fix up interoperability with old kernels. Otherwise, old inodes get
- * re-used with the upper 16 bits of the uid/gid intact
- */
-		if(!ei->i_dtime) {
-			raw_inode->i_uid_high =
-				cpu_to_le16(high_16_bits(i_uid));
-			raw_inode->i_gid_high =
-				cpu_to_le16(high_16_bits(i_gid));
-		} else {
-			raw_inode->i_uid_high = 0;
-			raw_inode->i_gid_high = 0;
-		}
-	} else {
-		raw_inode->i_uid_low =
-			cpu_to_le16(fs_high2lowuid(i_uid));
-		raw_inode->i_gid_low =
-			cpu_to_le16(fs_high2lowgid(i_gid));
-		raw_inode->i_uid_high = 0;
-		raw_inode->i_gid_high = 0;
-	}
-	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
-	disksize = cpu_to_le32(ei->i_disksize);
-	if (disksize != raw_inode->i_size) {
-		need_datasync = 1;
-		raw_inode->i_size = disksize;
-	}
-	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
-	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
-	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
-	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
-	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-#ifdef EXT3_FRAGMENTS
-	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
-	raw_inode->i_frag = ei->i_frag_no;
-	raw_inode->i_fsize = ei->i_frag_size;
-#endif
-	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
-	if (!S_ISREG(inode->i_mode)) {
-		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
-	} else {
-		disksize = cpu_to_le32(ei->i_disksize >> 32);
-		if (disksize != raw_inode->i_size_high) {
-			raw_inode->i_size_high = disksize;
-			need_datasync = 1;
-		}
-		if (ei->i_disksize > 0x7fffffffULL) {
-			struct super_block *sb = inode->i_sb;
-			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
-			    EXT3_SB(sb)->s_es->s_rev_level ==
-					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
-			       /* If this is the first large file
-				* created, add a flag to the superblock.
-				*/
-				unlock_buffer(bh);
-				err = ext3_journal_get_write_access(handle,
-						EXT3_SB(sb)->s_sbh);
-				if (err)
-					goto out_brelse;
-
-				ext3_update_dynamic_rev(sb);
-				EXT3_SET_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
-				handle->h_sync = 1;
-				err = ext3_journal_dirty_metadata(handle,
-						EXT3_SB(sb)->s_sbh);
-				/* get our lock and start over */
-				goto again;
-			}
-		}
-	}
-	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
-	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-		if (old_valid_dev(inode->i_rdev)) {
-			raw_inode->i_block[0] =
-				cpu_to_le32(old_encode_dev(inode->i_rdev));
-			raw_inode->i_block[1] = 0;
-		} else {
-			raw_inode->i_block[0] = 0;
-			raw_inode->i_block[1] =
-				cpu_to_le32(new_encode_dev(inode->i_rdev));
-			raw_inode->i_block[2] = 0;
-		}
-	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
-		raw_inode->i_block[block] = ei->i_data[block];
-
-	if (ei->i_extra_isize)
-		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
-
-	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-	unlock_buffer(bh);
-	rc = ext3_journal_dirty_metadata(handle, bh);
-	if (!err)
-		err = rc;
-	ext3_clear_inode_state(inode, EXT3_STATE_NEW);
-
-	atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
-	if (need_datasync)
-		atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-out_brelse:
-	brelse (bh);
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * ext3_write_inode()
- *
- * We are called from a few places:
- *
- * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
- *   Here, there will be no transaction running. We wait for any running
- *   transaction to commit.
- *
- * - Within flush work (for sys_sync(), kupdate and such).
- *   We wait on commit, if told to.
- *
- * - Within iput_final() -> write_inode_now()
- *   We wait on commit, if told to.
- *
- * In all cases it is actually safe for us to return without doing anything,
- * because the inode has been copied into a raw inode buffer in
- * ext3_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
- * writeback.
- *
- * Note that we are absolutely dependent upon all inode dirtiers doing the
- * right thing: they *must* call mark_inode_dirty() after dirtying info in
- * which we are interested.
- *
- * It would be a bug for them to not do this.  The code:
- *
- *	mark_inode_dirty(inode)
- *	stuff();
- *	inode->i_size = expr;
- *
- * is in error because write_inode() could occur while `stuff()' is running,
- * and the new i_size will be lost.  Plus the inode will no longer be on the
- * superblock's dirty inode list.
- */
-int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
-	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
-		return 0;
-
-	if (ext3_journal_current_handle()) {
-		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
-		dump_stack();
-		return -EIO;
-	}
-
-	/*
-	 * No need to force transaction in WB_SYNC_NONE mode. Also
-	 * ext3_sync_fs() will force the commit after everything is
-	 * written.
-	 */
-	if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
-		return 0;
-
-	return ext3_force_commit(inode->i_sb);
-}
-
-/*
- * ext3_setattr()
- *
- * Called from notify_change.
- *
- * We want to trap VFS attempts to truncate the file as soon as
- * possible.  In particular, we want to make sure that when the VFS
- * shrinks i_size, we put the inode on the orphan list and modify
- * i_disksize immediately, so that during the subsequent flushing of
- * dirty pages and freeing of disk blocks, we can guarantee that any
- * commit will leave the blocks being flushed in an unused state on
- * disk.  (On recovery, the inode will get truncated and the blocks will
- * be freed, so we have a strong guarantee that no future commit will
- * leave these blocks visible to the user.)
- *
- * Called with inode->sem down.
- */
-int ext3_setattr(struct dentry *dentry, struct iattr *attr)
-{
-	struct inode *inode = d_inode(dentry);
-	int error, rc = 0;
-	const unsigned int ia_valid = attr->ia_valid;
-
-	error = inode_change_ok(inode, attr);
-	if (error)
-		return error;
-
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
-	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
-	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
-		handle_t *handle;
-
-		/* (user+group)*(old+new) structure, inode write (sb,
-		 * inode block, ? - but truncate inode update has it) */
-		handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
-					EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
-		if (IS_ERR(handle)) {
-			error = PTR_ERR(handle);
-			goto err_out;
-		}
-		error = dquot_transfer(inode, attr);
-		if (error) {
-			ext3_journal_stop(handle);
-			return error;
-		}
-		/* Update corresponding info in inode so that everything is in
-		 * one transaction */
-		if (attr->ia_valid & ATTR_UID)
-			inode->i_uid = attr->ia_uid;
-		if (attr->ia_valid & ATTR_GID)
-			inode->i_gid = attr->ia_gid;
-		error = ext3_mark_inode_dirty(handle, inode);
-		ext3_journal_stop(handle);
-	}
-
-	if (attr->ia_valid & ATTR_SIZE)
-		inode_dio_wait(inode);
-
-	if (S_ISREG(inode->i_mode) &&
-	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
-		handle_t *handle;
-
-		handle = ext3_journal_start(inode, 3);
-		if (IS_ERR(handle)) {
-			error = PTR_ERR(handle);
-			goto err_out;
-		}
-
-		error = ext3_orphan_add(handle, inode);
-		if (error) {
-			ext3_journal_stop(handle);
-			goto err_out;
-		}
-		EXT3_I(inode)->i_disksize = attr->ia_size;
-		error = ext3_mark_inode_dirty(handle, inode);
-		ext3_journal_stop(handle);
-		if (error) {
-			/* Some hard fs error must have happened. Bail out. */
-			ext3_orphan_del(NULL, inode);
-			goto err_out;
-		}
-		rc = ext3_block_truncate_page(inode, attr->ia_size);
-		if (rc) {
-			/* Cleanup orphan list and exit */
-			handle = ext3_journal_start(inode, 3);
-			if (IS_ERR(handle)) {
-				ext3_orphan_del(NULL, inode);
-				goto err_out;
-			}
-			ext3_orphan_del(handle, inode);
-			ext3_journal_stop(handle);
-			goto err_out;
-		}
-	}
-
-	if ((attr->ia_valid & ATTR_SIZE) &&
-	    attr->ia_size != i_size_read(inode)) {
-		truncate_setsize(inode, attr->ia_size);
-		ext3_truncate(inode);
-	}
-
-	setattr_copy(inode, attr);
-	mark_inode_dirty(inode);
-
-	if (ia_valid & ATTR_MODE)
-		rc = posix_acl_chmod(inode, inode->i_mode);
-
-err_out:
-	ext3_std_error(inode->i_sb, error);
-	if (!error)
-		error = rc;
-	return error;
-}
-
-
-/*
- * How many blocks doth make a writepage()?
- *
- * With N blocks per page, it may be:
- * N data blocks
- * 2 indirect block
- * 2 dindirect
- * 1 tindirect
- * N+5 bitmap blocks (from the above)
- * N+5 group descriptor summary blocks
- * 1 inode block
- * 1 superblock.
- * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
- *
- * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
- *
- * With ordered or writeback data it's the same, less the N data blocks.
- *
- * If the inode's direct blocks can hold an integral number of pages then a
- * page cannot straddle two indirect blocks, and we can only touch one indirect
- * and dindirect block, and the "5" above becomes "3".
- *
- * This still overestimates under most circumstances.  If we were to pass the
- * start and end offsets in here as well we could do block_to_path() on each
- * block and work out the exact number of indirects which are touched.  Pah.
- */
-
-static int ext3_writepage_trans_blocks(struct inode *inode)
-{
-	int bpp = ext3_journal_blocks_per_page(inode);
-	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
-	int ret;
-
-	if (ext3_should_journal_data(inode))
-		ret = 3 * (bpp + indirects) + 2;
-	else
-		ret = 2 * (bpp + indirects) + indirects + 2;
-
-#ifdef CONFIG_QUOTA
-	/* We know that structure was already allocated during dquot_initialize so
-	 * we will be updating only the data blocks + inodes */
-	ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
-#endif
-
-	return ret;
-}
-
-/*
- * The caller must have previously called ext3_reserve_inode_write().
- * Give this, we know that the caller already has write access to iloc->bh.
- */
-int ext3_mark_iloc_dirty(handle_t *handle,
-		struct inode *inode, struct ext3_iloc *iloc)
-{
-	int err = 0;
-
-	/* the do_update_inode consumes one bh->b_count */
-	get_bh(iloc->bh);
-
-	/* ext3_do_update_inode() does journal_dirty_metadata */
-	err = ext3_do_update_inode(handle, inode, iloc);
-	put_bh(iloc->bh);
-	return err;
-}
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int
-ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-			 struct ext3_iloc *iloc)
-{
-	int err = 0;
-	if (handle) {
-		err = ext3_get_inode_loc(inode, iloc);
-		if (!err) {
-			BUFFER_TRACE(iloc->bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, iloc->bh);
-			if (err) {
-				brelse(iloc->bh);
-				iloc->bh = NULL;
-			}
-		}
-	}
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * What we do here is to mark the in-core inode as clean with respect to inode
- * dirtiness (it may still be data-dirty).
- * This means that the in-core inode may be reaped by prune_icache
- * without having to perform any I/O.  This is a very good thing,
- * because *any* task may call prune_icache - even ones which
- * have a transaction open against a different journal.
- *
- * Is this cheating?  Not really.  Sure, we haven't written the
- * inode out, but prune_icache isn't a user-visible syncing function.
- * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
- * we start and wait on commits.
- */
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
-{
-	struct ext3_iloc iloc;
-	int err;
-
-	might_sleep();
-	trace_ext3_mark_inode_dirty(inode, _RET_IP_);
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (!err)
-		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	return err;
-}
-
-/*
- * ext3_dirty_inode() is called from __mark_inode_dirty()
- *
- * We're really interested in the case where a file is being extended.
- * i_size has been changed by generic_commit_write() and we thus need
- * to include the updated inode in the current transaction.
- *
- * Also, dquot_alloc_space() will always dirty the inode when blocks
- * are allocated to the file.
- *
- * If the inode is marked synchronous, we don't honour that here - doing
- * so would cause a commit on atime updates, which we don't bother doing.
- * We handle synchronous inodes at the highest possible level.
- */
-void ext3_dirty_inode(struct inode *inode, int flags)
-{
-	handle_t *current_handle = ext3_journal_current_handle();
-	handle_t *handle;
-
-	handle = ext3_journal_start(inode, 2);
-	if (IS_ERR(handle))
-		goto out;
-	if (current_handle &&
-		current_handle->h_transaction != handle->h_transaction) {
-		/* This task has a transaction open against a different fs */
-		printk(KERN_EMERG "%s: transactions do not match!\n",
-		       __func__);
-	} else {
-		jbd_debug(5, "marking dirty.  outer handle=%p\n",
-				current_handle);
-		ext3_mark_inode_dirty(handle, inode);
-	}
-	ext3_journal_stop(handle);
-out:
-	return;
-}
-
-#if 0
-/*
- * Bind an inode's backing buffer_head into this transaction, to prevent
- * it from being flushed to disk early.  Unlike
- * ext3_reserve_inode_write, this leaves behind no bh reference and
- * returns no iloc structure, so the caller needs to repeat the iloc
- * lookup to mark the inode dirty later.
- */
-static int ext3_pin_inode(handle_t *handle, struct inode *inode)
-{
-	struct ext3_iloc iloc;
-
-	int err = 0;
-	if (handle) {
-		err = ext3_get_inode_loc(inode, &iloc);
-		if (!err) {
-			BUFFER_TRACE(iloc.bh, "get_write_access");
-			err = journal_get_write_access(handle, iloc.bh);
-			if (!err)
-				err = ext3_journal_dirty_metadata(handle,
-								  iloc.bh);
-			brelse(iloc.bh);
-		}
-	}
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-#endif
-
-int ext3_change_inode_journal_flag(struct inode *inode, int val)
-{
-	journal_t *journal;
-	handle_t *handle;
-	int err;
-
-	/*
-	 * We have to be very careful here: changing a data block's
-	 * journaling status dynamically is dangerous.  If we write a
-	 * data block to the journal, change the status and then delete
-	 * that block, we risk forgetting to revoke the old log record
-	 * from the journal and so a subsequent replay can corrupt data.
-	 * So, first we make sure that the journal is empty and that
-	 * nobody is changing anything.
-	 */
-
-	journal = EXT3_JOURNAL(inode);
-	if (is_journal_aborted(journal))
-		return -EROFS;
-
-	journal_lock_updates(journal);
-	journal_flush(journal);
-
-	/*
-	 * OK, there are no updates running now, and all cached data is
-	 * synced to disk.  We are now in a completely consistent state
-	 * which doesn't have anything in the journal, and we know that
-	 * no filesystem updates are running, so it is safe to modify
-	 * the inode's in-core data-journaling state flag now.
-	 */
-
-	if (val)
-		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
-	else
-		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
-	ext3_set_aops(inode);
-
-	journal_unlock_updates(journal);
-
-	/* Finally we can mark the inode as dirty. */
-
-	handle = ext3_journal_start(inode, 1);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	handle->h_sync = 1;
-	ext3_journal_stop(handle);
-	ext3_std_error(inode->i_sb, err);
-
-	return err;
-}
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
deleted file mode 100644
index 4d96e9a..0000000
--- a/fs/ext3/ioctl.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * linux/fs/ext3/ioctl.c
- *
- * Copyright (C) 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- */
-
-#include <linux/mount.h>
-#include <linux/compat.h>
-#include <asm/uaccess.h>
-#include "ext3.h"
-
-long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	struct inode *inode = file_inode(filp);
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	unsigned int flags;
-	unsigned short rsv_window_size;
-
-	ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
-
-	switch (cmd) {
-	case EXT3_IOC_GETFLAGS:
-		ext3_get_inode_flags(ei);
-		flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
-		return put_user(flags, (int __user *) arg);
-	case EXT3_IOC_SETFLAGS: {
-		handle_t *handle = NULL;
-		int err;
-		struct ext3_iloc iloc;
-		unsigned int oldflags;
-		unsigned int jflag;
-
-		if (!inode_owner_or_capable(inode))
-			return -EACCES;
-
-		if (get_user(flags, (int __user *) arg))
-			return -EFAULT;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		flags = ext3_mask_flags(inode->i_mode, flags);
-
-		mutex_lock(&inode->i_mutex);
-
-		/* Is it quota file? Do not allow user to mess with it */
-		err = -EPERM;
-		if (IS_NOQUOTA(inode))
-			goto flags_out;
-
-		oldflags = ei->i_flags;
-
-		/* The JOURNAL_DATA flag is modifiable only by root */
-		jflag = flags & EXT3_JOURNAL_DATA_FL;
-
-		/*
-		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
-		 * the relevant capability.
-		 *
-		 * This test looks nicer. Thanks to Pauline Middelink
-		 */
-		if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
-			if (!capable(CAP_LINUX_IMMUTABLE))
-				goto flags_out;
-		}
-
-		/*
-		 * The JOURNAL_DATA flag can only be changed by
-		 * the relevant capability.
-		 */
-		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) {
-			if (!capable(CAP_SYS_RESOURCE))
-				goto flags_out;
-		}
-
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto flags_out;
-		}
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-		err = ext3_reserve_inode_write(handle, inode, &iloc);
-		if (err)
-			goto flags_err;
-
-		flags = flags & EXT3_FL_USER_MODIFIABLE;
-		flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
-		ei->i_flags = flags;
-
-		ext3_set_inode_flags(inode);
-		inode->i_ctime = CURRENT_TIME_SEC;
-
-		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-flags_err:
-		ext3_journal_stop(handle);
-		if (err)
-			goto flags_out;
-
-		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL))
-			err = ext3_change_inode_journal_flag(inode, jflag);
-flags_out:
-		mutex_unlock(&inode->i_mutex);
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GETVERSION:
-	case EXT3_IOC_GETVERSION_OLD:
-		return put_user(inode->i_generation, (int __user *) arg);
-	case EXT3_IOC_SETVERSION:
-	case EXT3_IOC_SETVERSION_OLD: {
-		handle_t *handle;
-		struct ext3_iloc iloc;
-		__u32 generation;
-		int err;
-
-		if (!inode_owner_or_capable(inode))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-		if (get_user(generation, (int __user *) arg)) {
-			err = -EFAULT;
-			goto setversion_out;
-		}
-
-		mutex_lock(&inode->i_mutex);
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto unlock_out;
-		}
-		err = ext3_reserve_inode_write(handle, inode, &iloc);
-		if (err == 0) {
-			inode->i_ctime = CURRENT_TIME_SEC;
-			inode->i_generation = generation;
-			err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-		}
-		ext3_journal_stop(handle);
-
-unlock_out:
-		mutex_unlock(&inode->i_mutex);
-setversion_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GETRSVSZ:
-		if (test_opt(inode->i_sb, RESERVATION)
-			&& S_ISREG(inode->i_mode)
-			&& ei->i_block_alloc_info) {
-			rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
-			return put_user(rsv_window_size, (int __user *)arg);
-		}
-		return -ENOTTY;
-	case EXT3_IOC_SETRSVSZ: {
-		int err;
-
-		if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
-			return -ENOTTY;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (!inode_owner_or_capable(inode)) {
-			err = -EACCES;
-			goto setrsvsz_out;
-		}
-
-		if (get_user(rsv_window_size, (int __user *)arg)) {
-			err = -EFAULT;
-			goto setrsvsz_out;
-		}
-
-		if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS)
-			rsv_window_size = EXT3_MAX_RESERVE_BLOCKS;
-
-		/*
-		 * need to allocate reservation structure for this inode
-		 * before set the window size
-		 */
-		mutex_lock(&ei->truncate_mutex);
-		if (!ei->i_block_alloc_info)
-			ext3_init_block_alloc_info(inode);
-
-		if (ei->i_block_alloc_info){
-			struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
-			rsv->rsv_goal_size = rsv_window_size;
-		}
-		mutex_unlock(&ei->truncate_mutex);
-setrsvsz_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GROUP_EXTEND: {
-		ext3_fsblk_t n_blocks_count;
-		struct super_block *sb = inode->i_sb;
-		int err, err2;
-
-		if (!capable(CAP_SYS_RESOURCE))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (get_user(n_blocks_count, (__u32 __user *)arg)) {
-			err = -EFAULT;
-			goto group_extend_out;
-		}
-		err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err2 = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err == 0)
-			err = err2;
-group_extend_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GROUP_ADD: {
-		struct ext3_new_group_data input;
-		struct super_block *sb = inode->i_sb;
-		int err, err2;
-
-		if (!capable(CAP_SYS_RESOURCE))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg,
-				sizeof(input))) {
-			err = -EFAULT;
-			goto group_add_out;
-		}
-
-		err = ext3_group_add(sb, &input);
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err2 = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err == 0)
-			err = err2;
-group_add_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case FITRIM: {
-
-		struct super_block *sb = inode->i_sb;
-		struct fstrim_range range;
-		int ret = 0;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
-				   sizeof(range)))
-			return -EFAULT;
-
-		ret = ext3_trim_fs(sb, &range);
-		if (ret < 0)
-			return ret;
-
-		if (copy_to_user((struct fstrim_range __user *)arg, &range,
-				 sizeof(range)))
-			return -EFAULT;
-
-		return 0;
-	}
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-#ifdef CONFIG_COMPAT
-long ext3_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	/* These are just misnamed, they actually get/put from/to user an int */
-	switch (cmd) {
-	case EXT3_IOC32_GETFLAGS:
-		cmd = EXT3_IOC_GETFLAGS;
-		break;
-	case EXT3_IOC32_SETFLAGS:
-		cmd = EXT3_IOC_SETFLAGS;
-		break;
-	case EXT3_IOC32_GETVERSION:
-		cmd = EXT3_IOC_GETVERSION;
-		break;
-	case EXT3_IOC32_SETVERSION:
-		cmd = EXT3_IOC_SETVERSION;
-		break;
-	case EXT3_IOC32_GROUP_EXTEND:
-		cmd = EXT3_IOC_GROUP_EXTEND;
-		break;
-	case EXT3_IOC32_GETVERSION_OLD:
-		cmd = EXT3_IOC_GETVERSION_OLD;
-		break;
-	case EXT3_IOC32_SETVERSION_OLD:
-		cmd = EXT3_IOC_SETVERSION_OLD;
-		break;
-#ifdef CONFIG_JBD_DEBUG
-	case EXT3_IOC32_WAIT_FOR_READONLY:
-		cmd = EXT3_IOC_WAIT_FOR_READONLY;
-		break;
-#endif
-	case EXT3_IOC32_GETRSVSZ:
-		cmd = EXT3_IOC_GETRSVSZ;
-		break;
-	case EXT3_IOC32_SETRSVSZ:
-		cmd = EXT3_IOC_SETRSVSZ;
-		break;
-	case EXT3_IOC_GROUP_ADD:
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
-	return ext3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
-}
-#endif
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
deleted file mode 100644
index c9e767c..0000000
--- a/fs/ext3/namei.c
+++ /dev/null
@@ -1,2586 +0,0 @@
-/*
- *  linux/fs/ext3/namei.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/namei.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *  Directory entry file type support and forward compatibility hooks
- *	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
- *  Hash Tree Directory indexing (c)
- *	Daniel Phillips, 2001
- *  Hash Tree Directory indexing porting
- *	Christopher Li, 2002
- *  Hash Tree Directory indexing cleanup
- *	Theodore Ts'o, 2002
- */
-
-#include <linux/quotaops.h>
-#include "ext3.h"
-#include "namei.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * define how far ahead to read directories while searching them.
- */
-#define NAMEI_RA_CHUNKS  2
-#define NAMEI_RA_BLOCKS  4
-#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-
-static struct buffer_head *ext3_append(handle_t *handle,
-					struct inode *inode,
-					u32 *block, int *err)
-{
-	struct buffer_head *bh;
-
-	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
-
-	if ((bh = ext3_dir_bread(handle, inode, *block, 1, err))) {
-		inode->i_size += inode->i_sb->s_blocksize;
-		EXT3_I(inode)->i_disksize = inode->i_size;
-		*err = ext3_journal_get_write_access(handle, bh);
-		if (*err) {
-			brelse(bh);
-			bh = NULL;
-		}
-	}
-	return bh;
-}
-
-#ifndef assert
-#define assert(test) J_ASSERT(test)
-#endif
-
-#ifdef DX_DEBUG
-#define dxtrace(command) command
-#else
-#define dxtrace(command)
-#endif
-
-struct fake_dirent
-{
-	__le32 inode;
-	__le16 rec_len;
-	u8 name_len;
-	u8 file_type;
-};
-
-struct dx_countlimit
-{
-	__le16 limit;
-	__le16 count;
-};
-
-struct dx_entry
-{
-	__le32 hash;
-	__le32 block;
-};
-
-/*
- * dx_root_info is laid out so that if it should somehow get overlaid by a
- * dirent the two low bits of the hash version will be zero.  Therefore, the
- * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
- */
-
-struct dx_root
-{
-	struct fake_dirent dot;
-	char dot_name[4];
-	struct fake_dirent dotdot;
-	char dotdot_name[4];
-	struct dx_root_info
-	{
-		__le32 reserved_zero;
-		u8 hash_version;
-		u8 info_length; /* 8 */
-		u8 indirect_levels;
-		u8 unused_flags;
-	}
-	info;
-	struct dx_entry	entries[0];
-};
-
-struct dx_node
-{
-	struct fake_dirent fake;
-	struct dx_entry	entries[0];
-};
-
-
-struct dx_frame
-{
-	struct buffer_head *bh;
-	struct dx_entry *entries;
-	struct dx_entry *at;
-};
-
-struct dx_map_entry
-{
-	u32 hash;
-	u16 offs;
-	u16 size;
-};
-
-static inline unsigned dx_get_block (struct dx_entry *entry);
-static void dx_set_block (struct dx_entry *entry, unsigned value);
-static inline unsigned dx_get_hash (struct dx_entry *entry);
-static void dx_set_hash (struct dx_entry *entry, unsigned value);
-static unsigned dx_get_count (struct dx_entry *entries);
-static unsigned dx_get_limit (struct dx_entry *entries);
-static void dx_set_count (struct dx_entry *entries, unsigned value);
-static void dx_set_limit (struct dx_entry *entries, unsigned value);
-static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
-static unsigned dx_node_limit (struct inode *dir);
-static struct dx_frame *dx_probe(struct qstr *entry,
-				 struct inode *dir,
-				 struct dx_hash_info *hinfo,
-				 struct dx_frame *frame,
-				 int *err);
-static void dx_release (struct dx_frame *frames);
-static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
-			struct dx_hash_info *hinfo, struct dx_map_entry map[]);
-static void dx_sort_map(struct dx_map_entry *map, unsigned count);
-static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
-		struct dx_map_entry *offsets, int count);
-static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize);
-static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
-static int ext3_htree_next_block(struct inode *dir, __u32 hash,
-				 struct dx_frame *frame,
-				 struct dx_frame *frames,
-				 __u32 *start_hash);
-static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
-			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
-			int *err);
-static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode);
-
-/*
- * p is at least 6 bytes before the end of page
- */
-static inline struct ext3_dir_entry_2 *
-ext3_next_entry(struct ext3_dir_entry_2 *p)
-{
-	return (struct ext3_dir_entry_2 *)((char *)p +
-		ext3_rec_len_from_disk(p->rec_len));
-}
-
-/*
- * Future: use high four bits of block for coalesce-on-delete flags
- * Mask them off for now.
- */
-
-static inline unsigned dx_get_block (struct dx_entry *entry)
-{
-	return le32_to_cpu(entry->block) & 0x00ffffff;
-}
-
-static inline void dx_set_block (struct dx_entry *entry, unsigned value)
-{
-	entry->block = cpu_to_le32(value);
-}
-
-static inline unsigned dx_get_hash (struct dx_entry *entry)
-{
-	return le32_to_cpu(entry->hash);
-}
-
-static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
-{
-	entry->hash = cpu_to_le32(value);
-}
-
-static inline unsigned dx_get_count (struct dx_entry *entries)
-{
-	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
-}
-
-static inline unsigned dx_get_limit (struct dx_entry *entries)
-{
-	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
-}
-
-static inline void dx_set_count (struct dx_entry *entries, unsigned value)
-{
-	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
-}
-
-static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
-{
-	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
-}
-
-static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
-{
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
-		EXT3_DIR_REC_LEN(2) - infosize;
-	return entry_space / sizeof(struct dx_entry);
-}
-
-static inline unsigned dx_node_limit (struct inode *dir)
-{
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
-	return entry_space / sizeof(struct dx_entry);
-}
-
-/*
- * Debug
- */
-#ifdef DX_DEBUG
-static void dx_show_index (char * label, struct dx_entry *entries)
-{
-        int i, n = dx_get_count (entries);
-        printk("%s index ", label);
-        for (i = 0; i < n; i++)
-        {
-                printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
-        }
-        printk("\n");
-}
-
-struct stats
-{
-	unsigned names;
-	unsigned space;
-	unsigned bcount;
-};
-
-static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
-				 int size, int show_names)
-{
-	unsigned names = 0, space = 0;
-	char *base = (char *) de;
-	struct dx_hash_info h = *hinfo;
-
-	printk("names: ");
-	while ((char *) de < base + size)
-	{
-		if (de->inode)
-		{
-			if (show_names)
-			{
-				int len = de->name_len;
-				char *name = de->name;
-				while (len--) printk("%c", *name++);
-				ext3fs_dirhash(de->name, de->name_len, &h);
-				printk(":%x.%u ", h.hash,
-				       (unsigned) ((char *) de - base));
-			}
-			space += EXT3_DIR_REC_LEN(de->name_len);
-			names++;
-		}
-		de = ext3_next_entry(de);
-	}
-	printk("(%i)\n", names);
-	return (struct stats) { names, space, 1 };
-}
-
-struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
-			     struct dx_entry *entries, int levels)
-{
-	unsigned blocksize = dir->i_sb->s_blocksize;
-	unsigned count = dx_get_count (entries), names = 0, space = 0, i;
-	unsigned bcount = 0;
-	struct buffer_head *bh;
-	int err;
-	printk("%i indexed blocks...\n", count);
-	for (i = 0; i < count; i++, entries++)
-	{
-		u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
-		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
-		struct stats stats;
-		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
-		if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
-		stats = levels?
-		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
-		   dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
-		names += stats.names;
-		space += stats.space;
-		bcount += stats.bcount;
-		brelse (bh);
-	}
-	if (bcount)
-		printk("%snames %u, fullness %u (%u%%)\n", levels?"":"   ",
-			names, space/bcount,(space/bcount)*100/blocksize);
-	return (struct stats) { names, space, bcount};
-}
-#endif /* DX_DEBUG */
-
-/*
- * Probe for a directory leaf block to search.
- *
- * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
- * error in the directory index, and the caller should fall back to
- * searching the directory normally.  The callers of dx_probe **MUST**
- * check for this error code, and make sure it never gets reflected
- * back to userspace.
- */
-static struct dx_frame *
-dx_probe(struct qstr *entry, struct inode *dir,
-	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
-{
-	unsigned count, indirect;
-	struct dx_entry *at, *entries, *p, *q, *m;
-	struct dx_root *root;
-	struct buffer_head *bh;
-	struct dx_frame *frame = frame_in;
-	u32 hash;
-
-	frame->bh = NULL;
-	if (!(bh = ext3_dir_bread(NULL, dir, 0, 0, err))) {
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-	root = (struct dx_root *) bh->b_data;
-	if (root->info.hash_version != DX_HASH_TEA &&
-	    root->info.hash_version != DX_HASH_HALF_MD4 &&
-	    root->info.hash_version != DX_HASH_LEGACY) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unrecognised inode hash code %d",
-			     root->info.hash_version);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-	hinfo->hash_version = root->info.hash_version;
-	if (hinfo->hash_version <= DX_HASH_TEA)
-		hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
-	hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-	if (entry)
-		ext3fs_dirhash(entry->name, entry->len, hinfo);
-	hash = hinfo->hash;
-
-	if (root->info.unused_flags & 1) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unimplemented inode hash flags: %#06x",
-			     root->info.unused_flags);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	if ((indirect = root->info.indirect_levels) > 1) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unimplemented inode hash depth: %#06x",
-			     root->info.indirect_levels);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	entries = (struct dx_entry *) (((char *)&root->info) +
-				       root->info.info_length);
-
-	if (dx_get_limit(entries) != dx_root_limit(dir,
-						   root->info.info_length)) {
-		ext3_warning(dir->i_sb, __func__,
-			     "dx entry: limit != root limit");
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	dxtrace (printk("Look up %x", hash));
-	while (1)
-	{
-		count = dx_get_count(entries);
-		if (!count || count > dx_get_limit(entries)) {
-			ext3_warning(dir->i_sb, __func__,
-				     "dx entry: no count or count > limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-
-		p = entries + 1;
-		q = entries + count - 1;
-		while (p <= q)
-		{
-			m = p + (q - p)/2;
-			dxtrace(printk("."));
-			if (dx_get_hash(m) > hash)
-				q = m - 1;
-			else
-				p = m + 1;
-		}
-
-		if (0) // linear search cross check
-		{
-			unsigned n = count - 1;
-			at = entries;
-			while (n--)
-			{
-				dxtrace(printk(","));
-				if (dx_get_hash(++at) > hash)
-				{
-					at--;
-					break;
-				}
-			}
-			assert (at == p - 1);
-		}
-
-		at = p - 1;
-		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
-		frame->bh = bh;
-		frame->entries = entries;
-		frame->at = at;
-		if (!indirect--) return frame;
-		if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(at), 0, err))) {
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-		at = entries = ((struct dx_node *) bh->b_data)->entries;
-		if (dx_get_limit(entries) != dx_node_limit (dir)) {
-			ext3_warning(dir->i_sb, __func__,
-				     "dx entry: limit != node limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-		frame++;
-		frame->bh = NULL;
-	}
-fail2:
-	while (frame >= frame_in) {
-		brelse(frame->bh);
-		frame--;
-	}
-fail:
-	if (*err == ERR_BAD_DX_DIR)
-		ext3_warning(dir->i_sb, __func__,
-			     "Corrupt dir inode %ld, running e2fsck is "
-			     "recommended.", dir->i_ino);
-	return NULL;
-}
-
-static void dx_release (struct dx_frame *frames)
-{
-	if (frames[0].bh == NULL)
-		return;
-
-	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
-		brelse(frames[1].bh);
-	brelse(frames[0].bh);
-}
-
-/*
- * This function increments the frame pointer to search the next leaf
- * block, and reads in the necessary intervening nodes if the search
- * should be necessary.  Whether or not the search is necessary is
- * controlled by the hash parameter.  If the hash value is even, then
- * the search is only continued if the next block starts with that
- * hash value.  This is used if we are searching for a specific file.
- *
- * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
- *
- * This function returns 1 if the caller should continue to search,
- * or 0 if it should not.  If there is an error reading one of the
- * index blocks, it will a negative error code.
- *
- * If start_hash is non-null, it will be filled in with the starting
- * hash of the next page.
- */
-static int ext3_htree_next_block(struct inode *dir, __u32 hash,
-				 struct dx_frame *frame,
-				 struct dx_frame *frames,
-				 __u32 *start_hash)
-{
-	struct dx_frame *p;
-	struct buffer_head *bh;
-	int err, num_frames = 0;
-	__u32 bhash;
-
-	p = frame;
-	/*
-	 * Find the next leaf page by incrementing the frame pointer.
-	 * If we run out of entries in the interior node, loop around and
-	 * increment pointer in the parent node.  When we break out of
-	 * this loop, num_frames indicates the number of interior
-	 * nodes need to be read.
-	 */
-	while (1) {
-		if (++(p->at) < p->entries + dx_get_count(p->entries))
-			break;
-		if (p == frames)
-			return 0;
-		num_frames++;
-		p--;
-	}
-
-	/*
-	 * If the hash is 1, then continue only if the next page has a
-	 * continuation hash of any value.  This is used for readdir
-	 * handling.  Otherwise, check to see if the hash matches the
-	 * desired contiuation hash.  If it doesn't, return since
-	 * there's no point to read in the successive index pages.
-	 */
-	bhash = dx_get_hash(p->at);
-	if (start_hash)
-		*start_hash = bhash;
-	if ((hash & 1) == 0) {
-		if ((bhash & ~1) != hash)
-			return 0;
-	}
-	/*
-	 * If the hash is HASH_NB_ALWAYS, we always go to the next
-	 * block so no check is necessary
-	 */
-	while (num_frames--) {
-		if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(p->at),
-					  0, &err)))
-			return err; /* Failure */
-		p++;
-		brelse (p->bh);
-		p->bh = bh;
-		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
-	}
-	return 1;
-}
-
-
-/*
- * This function fills a red-black tree with information from a
- * directory block.  It returns the number directory entries loaded
- * into the tree.  If there is an error it is returned in err.
- */
-static int htree_dirblock_to_tree(struct file *dir_file,
-				  struct inode *dir, int block,
-				  struct dx_hash_info *hinfo,
-				  __u32 start_hash, __u32 start_minor_hash)
-{
-	struct buffer_head *bh;
-	struct ext3_dir_entry_2 *de, *top;
-	int err = 0, count = 0;
-
-	dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
-
-	if (!(bh = ext3_dir_bread(NULL, dir, block, 0, &err)))
-		return err;
-
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	top = (struct ext3_dir_entry_2 *) ((char *) de +
-					   dir->i_sb->s_blocksize -
-					   EXT3_DIR_REC_LEN(0));
-	for (; de < top; de = ext3_next_entry(de)) {
-		if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
-					(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
-						+((char *)de - bh->b_data))) {
-			/* silently ignore the rest of the block */
-			break;
-		}
-		ext3fs_dirhash(de->name, de->name_len, hinfo);
-		if ((hinfo->hash < start_hash) ||
-		    ((hinfo->hash == start_hash) &&
-		     (hinfo->minor_hash < start_minor_hash)))
-			continue;
-		if (de->inode == 0)
-			continue;
-		if ((err = ext3_htree_store_dirent(dir_file,
-				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
-			brelse(bh);
-			return err;
-		}
-		count++;
-	}
-	brelse(bh);
-	return count;
-}
-
-
-/*
- * This function fills a red-black tree with information from a
- * directory.  We start scanning the directory in hash order, starting
- * at start_hash and start_minor_hash.
- *
- * This function returns the number of entries inserted into the tree,
- * or a negative error code.
- */
-int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-			 __u32 start_minor_hash, __u32 *next_hash)
-{
-	struct dx_hash_info hinfo;
-	struct ext3_dir_entry_2 *de;
-	struct dx_frame frames[2], *frame;
-	struct inode *dir;
-	int block, err;
-	int count = 0;
-	int ret;
-	__u32 hashval;
-
-	dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
-		       start_minor_hash));
-	dir = file_inode(dir_file);
-	if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
-		hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
-		if (hinfo.hash_version <= DX_HASH_TEA)
-			hinfo.hash_version +=
-				EXT3_SB(dir->i_sb)->s_hash_unsigned;
-		hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
-					       start_hash, start_minor_hash);
-		*next_hash = ~0;
-		return count;
-	}
-	hinfo.hash = start_hash;
-	hinfo.minor_hash = 0;
-	frame = dx_probe(NULL, file_inode(dir_file), &hinfo, frames, &err);
-	if (!frame)
-		return err;
-
-	/* Add '.' and '..' from the htree header */
-	if (!start_hash && !start_minor_hash) {
-		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
-		if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
-			goto errout;
-		count++;
-	}
-	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
-		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
-		de = ext3_next_entry(de);
-		if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0)
-			goto errout;
-		count++;
-	}
-
-	while (1) {
-		block = dx_get_block(frame->at);
-		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
-					     start_hash, start_minor_hash);
-		if (ret < 0) {
-			err = ret;
-			goto errout;
-		}
-		count += ret;
-		hashval = ~0;
-		ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
-					    frame, frames, &hashval);
-		*next_hash = hashval;
-		if (ret < 0) {
-			err = ret;
-			goto errout;
-		}
-		/*
-		 * Stop if:  (a) there are no more entries, or
-		 * (b) we have inserted at least one entry and the
-		 * next hash value is not a continuation
-		 */
-		if ((ret == 0) ||
-		    (count && ((hashval & 1) == 0)))
-			break;
-	}
-	dx_release(frames);
-	dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
-		       count, *next_hash));
-	return count;
-errout:
-	dx_release(frames);
-	return (err);
-}
-
-
-/*
- * Directory block splitting, compacting
- */
-
-/*
- * Create map of hash values, offsets, and sizes, stored at end of block.
- * Returns number of entries mapped.
- */
-static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
-		struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
-{
-	int count = 0;
-	char *base = (char *) de;
-	struct dx_hash_info h = *hinfo;
-
-	while ((char *) de < base + blocksize)
-	{
-		if (de->name_len && de->inode) {
-			ext3fs_dirhash(de->name, de->name_len, &h);
-			map_tail--;
-			map_tail->hash = h.hash;
-			map_tail->offs = (u16) ((char *) de - base);
-			map_tail->size = le16_to_cpu(de->rec_len);
-			count++;
-			cond_resched();
-		}
-		/* XXX: do we need to check rec_len == 0 case? -Chris */
-		de = ext3_next_entry(de);
-	}
-	return count;
-}
-
-/* Sort map by hash value */
-static void dx_sort_map (struct dx_map_entry *map, unsigned count)
-{
-        struct dx_map_entry *p, *q, *top = map + count - 1;
-        int more;
-        /* Combsort until bubble sort doesn't suck */
-        while (count > 2)
-	{
-                count = count*10/13;
-                if (count - 9 < 2) /* 9, 10 -> 11 */
-                        count = 11;
-                for (p = top, q = p - count; q >= map; p--, q--)
-                        if (p->hash < q->hash)
-                                swap(*p, *q);
-        }
-        /* Garden variety bubble sort */
-        do {
-                more = 0;
-                q = top;
-                while (q-- > map)
-		{
-                        if (q[1].hash >= q[0].hash)
-				continue;
-                        swap(*(q+1), *q);
-                        more = 1;
-		}
-	} while(more);
-}
-
-static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
-{
-	struct dx_entry *entries = frame->entries;
-	struct dx_entry *old = frame->at, *new = old + 1;
-	int count = dx_get_count(entries);
-
-	assert(count < dx_get_limit(entries));
-	assert(old < entries + count);
-	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
-	dx_set_hash(new, hash);
-	dx_set_block(new, block);
-	dx_set_count(entries, count + 1);
-}
-
-static void ext3_update_dx_flag(struct inode *inode)
-{
-	if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-				     EXT3_FEATURE_COMPAT_DIR_INDEX))
-		EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
-}
-
-/*
- * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
- *
- * `len <= EXT3_NAME_LEN' is guaranteed by caller.
- * `de != NULL' is guaranteed by caller.
- */
-static inline int ext3_match (int len, const char * const name,
-			      struct ext3_dir_entry_2 * de)
-{
-	if (len != de->name_len)
-		return 0;
-	if (!de->inode)
-		return 0;
-	return !memcmp(name, de->name, len);
-}
-
-/*
- * Returns 0 if not found, -1 on failure, and 1 on success
- */
-static inline int search_dirblock(struct buffer_head * bh,
-				  struct inode *dir,
-				  struct qstr *child,
-				  unsigned long offset,
-				  struct ext3_dir_entry_2 ** res_dir)
-{
-	struct ext3_dir_entry_2 * de;
-	char * dlimit;
-	int de_len;
-	const char *name = child->name;
-	int namelen = child->len;
-
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	dlimit = bh->b_data + dir->i_sb->s_blocksize;
-	while ((char *) de < dlimit) {
-		/* this code is executed quadratically often */
-		/* do minimal checking `by hand' */
-
-		if ((char *) de + namelen <= dlimit &&
-		    ext3_match (namelen, name, de)) {
-			/* found a match - just to be sure, do a full check */
-			if (!ext3_check_dir_entry("ext3_find_entry",
-						  dir, de, bh, offset))
-				return -1;
-			*res_dir = de;
-			return 1;
-		}
-		/* prevent looping on a bad block */
-		de_len = ext3_rec_len_from_disk(de->rec_len);
-		if (de_len <= 0)
-			return -1;
-		offset += de_len;
-		de = (struct ext3_dir_entry_2 *) ((char *) de + de_len);
-	}
-	return 0;
-}
-
-
-/*
- *	ext3_find_entry()
- *
- * finds an entry in the specified directory with the wanted name. It
- * returns the cache buffer in which the entry was found, and the entry
- * itself (as a parameter - res_dir). It does NOT read the inode of the
- * entry - you'll have to do that yourself if you want to.
- *
- * The returned buffer_head has ->b_count elevated.  The caller is expected
- * to brelse() it when appropriate.
- */
-static struct buffer_head *ext3_find_entry(struct inode *dir,
-					struct qstr *entry,
-					struct ext3_dir_entry_2 **res_dir)
-{
-	struct super_block * sb;
-	struct buffer_head * bh_use[NAMEI_RA_SIZE];
-	struct buffer_head * bh, *ret = NULL;
-	unsigned long start, block, b;
-	const u8 *name = entry->name;
-	int ra_max = 0;		/* Number of bh's in the readahead
-				   buffer, bh_use[] */
-	int ra_ptr = 0;		/* Current index into readahead
-				   buffer */
-	int num = 0;
-	int nblocks, i, err;
-	int namelen;
-
-	*res_dir = NULL;
-	sb = dir->i_sb;
-	namelen = entry->len;
-	if (namelen > EXT3_NAME_LEN)
-		return NULL;
-	if ((namelen <= 2) && (name[0] == '.') &&
-	    (name[1] == '.' || name[1] == 0)) {
-		/*
-		 * "." or ".." will only be in the first block
-		 * NFS may look up ".."; "." should be handled by the VFS
-		 */
-		block = start = 0;
-		nblocks = 1;
-		goto restart;
-	}
-	if (is_dx(dir)) {
-		bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
-		/*
-		 * On success, or if the error was file not found,
-		 * return.  Otherwise, fall back to doing a search the
-		 * old fashioned way.
-		 */
-		if (bh || (err != ERR_BAD_DX_DIR))
-			return bh;
-		dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
-	}
-	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
-	start = EXT3_I(dir)->i_dir_start_lookup;
-	if (start >= nblocks)
-		start = 0;
-	block = start;
-restart:
-	do {
-		/*
-		 * We deal with the read-ahead logic here.
-		 */
-		if (ra_ptr >= ra_max) {
-			/* Refill the readahead buffer */
-			ra_ptr = 0;
-			b = block;
-			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
-				/*
-				 * Terminate if we reach the end of the
-				 * directory and must wrap, or if our
-				 * search has finished at this block.
-				 */
-				if (b >= nblocks || (num && block == start)) {
-					bh_use[ra_max] = NULL;
-					break;
-				}
-				num++;
-				bh = ext3_getblk(NULL, dir, b++, 0, &err);
-				bh_use[ra_max] = bh;
-				if (bh && !bh_uptodate_or_lock(bh)) {
-					get_bh(bh);
-					bh->b_end_io = end_buffer_read_sync;
-					submit_bh(READ | REQ_META | REQ_PRIO,
-						  bh);
-				}
-			}
-		}
-		if ((bh = bh_use[ra_ptr++]) == NULL)
-			goto next;
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			/* read error, skip block & hope for the best */
-			ext3_error(sb, __func__, "reading directory #%lu "
-				   "offset %lu", dir->i_ino, block);
-			brelse(bh);
-			goto next;
-		}
-		i = search_dirblock(bh, dir, entry,
-			    block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
-		if (i == 1) {
-			EXT3_I(dir)->i_dir_start_lookup = block;
-			ret = bh;
-			goto cleanup_and_exit;
-		} else {
-			brelse(bh);
-			if (i < 0)
-				goto cleanup_and_exit;
-		}
-	next:
-		if (++block >= nblocks)
-			block = 0;
-	} while (block != start);
-
-	/*
-	 * If the directory has grown while we were searching, then
-	 * search the last part of the directory before giving up.
-	 */
-	block = nblocks;
-	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
-	if (block < nblocks) {
-		start = 0;
-		goto restart;
-	}
-
-cleanup_and_exit:
-	/* Clean up the read-ahead blocks */
-	for (; ra_ptr < ra_max; ra_ptr++)
-		brelse (bh_use[ra_ptr]);
-	return ret;
-}
-
-static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
-			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
-			int *err)
-{
-	struct super_block *sb = dir->i_sb;
-	struct dx_hash_info	hinfo;
-	struct dx_frame frames[2], *frame;
-	struct buffer_head *bh;
-	unsigned long block;
-	int retval;
-
-	if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
-		return NULL;
-	do {
-		block = dx_get_block(frame->at);
-		if (!(bh = ext3_dir_bread (NULL, dir, block, 0, err)))
-			goto errout;
-
-		retval = search_dirblock(bh, dir, entry,
-					 block << EXT3_BLOCK_SIZE_BITS(sb),
-					 res_dir);
-		if (retval == 1) {
-			dx_release(frames);
-			return bh;
-		}
-		brelse(bh);
-		if (retval == -1) {
-			*err = ERR_BAD_DX_DIR;
-			goto errout;
-		}
-
-		/* Check to see if we should continue to search */
-		retval = ext3_htree_next_block(dir, hinfo.hash, frame,
-					       frames, NULL);
-		if (retval < 0) {
-			ext3_warning(sb, __func__,
-			     "error reading index page in directory #%lu",
-			     dir->i_ino);
-			*err = retval;
-			goto errout;
-		}
-	} while (retval == 1);
-
-	*err = -ENOENT;
-errout:
-	dxtrace(printk("%s not found\n", entry->name));
-	dx_release (frames);
-	return NULL;
-}
-
-static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
-{
-	struct inode * inode;
-	struct ext3_dir_entry_2 * de;
-	struct buffer_head * bh;
-
-	if (dentry->d_name.len > EXT3_NAME_LEN)
-		return ERR_PTR(-ENAMETOOLONG);
-
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	inode = NULL;
-	if (bh) {
-		unsigned long ino = le32_to_cpu(de->inode);
-		brelse (bh);
-		if (!ext3_valid_inum(dir->i_sb, ino)) {
-			ext3_error(dir->i_sb, "ext3_lookup",
-				   "bad inode number: %lu", ino);
-			return ERR_PTR(-EIO);
-		}
-		inode = ext3_iget(dir->i_sb, ino);
-		if (inode == ERR_PTR(-ESTALE)) {
-			ext3_error(dir->i_sb, __func__,
-					"deleted inode referenced: %lu",
-					ino);
-			return ERR_PTR(-EIO);
-		}
-	}
-	return d_splice_alias(inode, dentry);
-}
-
-
-struct dentry *ext3_get_parent(struct dentry *child)
-{
-	unsigned long ino;
-	struct qstr dotdot = QSTR_INIT("..", 2);
-	struct ext3_dir_entry_2 * de;
-	struct buffer_head *bh;
-
-	bh = ext3_find_entry(d_inode(child), &dotdot, &de);
-	if (!bh)
-		return ERR_PTR(-ENOENT);
-	ino = le32_to_cpu(de->inode);
-	brelse(bh);
-
-	if (!ext3_valid_inum(d_inode(child)->i_sb, ino)) {
-		ext3_error(d_inode(child)->i_sb, "ext3_get_parent",
-			   "bad inode number: %lu", ino);
-		return ERR_PTR(-EIO);
-	}
-
-	return d_obtain_alias(ext3_iget(d_inode(child)->i_sb, ino));
-}
-
-#define S_SHIFT 12
-static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
-	[S_IFREG >> S_SHIFT]	= EXT3_FT_REG_FILE,
-	[S_IFDIR >> S_SHIFT]	= EXT3_FT_DIR,
-	[S_IFCHR >> S_SHIFT]	= EXT3_FT_CHRDEV,
-	[S_IFBLK >> S_SHIFT]	= EXT3_FT_BLKDEV,
-	[S_IFIFO >> S_SHIFT]	= EXT3_FT_FIFO,
-	[S_IFSOCK >> S_SHIFT]	= EXT3_FT_SOCK,
-	[S_IFLNK >> S_SHIFT]	= EXT3_FT_SYMLINK,
-};
-
-static inline void ext3_set_de_type(struct super_block *sb,
-				struct ext3_dir_entry_2 *de,
-				umode_t mode) {
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE))
-		de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
-}
-
-/*
- * Move count entries from end of map between two memory locations.
- * Returns pointer to last entry moved.
- */
-static struct ext3_dir_entry_2 *
-dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
-{
-	unsigned rec_len = 0;
-
-	while (count--) {
-		struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
-		rec_len = EXT3_DIR_REC_LEN(de->name_len);
-		memcpy (to, de, rec_len);
-		((struct ext3_dir_entry_2 *) to)->rec_len =
-				ext3_rec_len_to_disk(rec_len);
-		de->inode = 0;
-		map++;
-		to += rec_len;
-	}
-	return (struct ext3_dir_entry_2 *) (to - rec_len);
-}
-
-/*
- * Compact each dir entry in the range to the minimal rec_len.
- * Returns pointer to last entry in range.
- */
-static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize)
-{
-	struct ext3_dir_entry_2 *next, *to, *prev;
-	struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *)base;
-	unsigned rec_len = 0;
-
-	prev = to = de;
-	while ((char *)de < base + blocksize) {
-		next = ext3_next_entry(de);
-		if (de->inode && de->name_len) {
-			rec_len = EXT3_DIR_REC_LEN(de->name_len);
-			if (de > to)
-				memmove(to, de, rec_len);
-			to->rec_len = ext3_rec_len_to_disk(rec_len);
-			prev = to;
-			to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
-		}
-		de = next;
-	}
-	return prev;
-}
-
-/*
- * Split a full leaf block to make room for a new dir entry.
- * Allocate a new block, and move entries so that they are approx. equally full.
- * Returns pointer to de in block into which the new entry will be inserted.
- */
-static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-			struct buffer_head **bh,struct dx_frame *frame,
-			struct dx_hash_info *hinfo, int *error)
-{
-	unsigned blocksize = dir->i_sb->s_blocksize;
-	unsigned count, continued;
-	struct buffer_head *bh2;
-	u32 newblock;
-	u32 hash2;
-	struct dx_map_entry *map;
-	char *data1 = (*bh)->b_data, *data2;
-	unsigned split, move, size;
-	struct ext3_dir_entry_2 *de = NULL, *de2;
-	int	err = 0, i;
-
-	bh2 = ext3_append (handle, dir, &newblock, &err);
-	if (!(bh2)) {
-		brelse(*bh);
-		*bh = NULL;
-		goto errout;
-	}
-
-	BUFFER_TRACE(*bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, *bh);
-	if (err)
-		goto journal_error;
-
-	BUFFER_TRACE(frame->bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, frame->bh);
-	if (err)
-		goto journal_error;
-
-	data2 = bh2->b_data;
-
-	/* create map in the end of data2 block */
-	map = (struct dx_map_entry *) (data2 + blocksize);
-	count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
-			     blocksize, hinfo, map);
-	map -= count;
-	dx_sort_map (map, count);
-	/* Split the existing block in the middle, size-wise */
-	size = 0;
-	move = 0;
-	for (i = count-1; i >= 0; i--) {
-		/* is more than half of this entry in 2nd half of the block? */
-		if (size + map[i].size/2 > blocksize/2)
-			break;
-		size += map[i].size;
-		move++;
-	}
-	/* map index at which we will split */
-	split = count - move;
-	hash2 = map[split].hash;
-	continued = hash2 == map[split - 1].hash;
-	dxtrace(printk("Split block %i at %x, %i/%i\n",
-		dx_get_block(frame->at), hash2, split, count-split));
-
-	/* Fancy dance to stay within two buffers */
-	de2 = dx_move_dirents(data1, data2, map + split, count - split);
-	de = dx_pack_dirents(data1,blocksize);
-	de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
-	de2->rec_len = ext3_rec_len_to_disk(data2 + blocksize - (char *) de2);
-	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
-	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
-
-	/* Which block gets the new entry? */
-	if (hinfo->hash >= hash2)
-	{
-		swap(*bh, bh2);
-		de = de2;
-	}
-	dx_insert_block (frame, hash2 + continued, newblock);
-	err = ext3_journal_dirty_metadata (handle, bh2);
-	if (err)
-		goto journal_error;
-	err = ext3_journal_dirty_metadata (handle, frame->bh);
-	if (err)
-		goto journal_error;
-	brelse (bh2);
-	dxtrace(dx_show_index ("frame", frame->entries));
-	return de;
-
-journal_error:
-	brelse(*bh);
-	brelse(bh2);
-	*bh = NULL;
-	ext3_std_error(dir->i_sb, err);
-errout:
-	*error = err;
-	return NULL;
-}
-
-
-/*
- * Add a new entry into a directory (leaf) block.  If de is non-NULL,
- * it points to a directory entry which is guaranteed to be large
- * enough for new directory entry.  If de is NULL, then
- * add_dirent_to_buf will attempt search the directory block for
- * space.  It will return -ENOSPC if no space is available, and -EIO
- * and -EEXIST if directory entry already exists.
- *
- * NOTE!  bh is NOT released in the case where ENOSPC is returned.  In
- * all other cases bh is released.
- */
-static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode, struct ext3_dir_entry_2 *de,
-			     struct buffer_head * bh)
-{
-	struct inode	*dir = d_inode(dentry->d_parent);
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
-	unsigned long	offset = 0;
-	unsigned short	reclen;
-	int		nlen, rlen, err;
-	char		*top;
-
-	reclen = EXT3_DIR_REC_LEN(namelen);
-	if (!de) {
-		de = (struct ext3_dir_entry_2 *)bh->b_data;
-		top = bh->b_data + dir->i_sb->s_blocksize - reclen;
-		while ((char *) de <= top) {
-			if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
-						  bh, offset)) {
-				brelse (bh);
-				return -EIO;
-			}
-			if (ext3_match (namelen, name, de)) {
-				brelse (bh);
-				return -EEXIST;
-			}
-			nlen = EXT3_DIR_REC_LEN(de->name_len);
-			rlen = ext3_rec_len_from_disk(de->rec_len);
-			if ((de->inode? rlen - nlen: rlen) >= reclen)
-				break;
-			de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
-			offset += rlen;
-		}
-		if ((char *) de > top)
-			return -ENOSPC;
-	}
-	BUFFER_TRACE(bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh);
-	if (err) {
-		ext3_std_error(dir->i_sb, err);
-		brelse(bh);
-		return err;
-	}
-
-	/* By now the buffer is marked for journaling */
-	nlen = EXT3_DIR_REC_LEN(de->name_len);
-	rlen = ext3_rec_len_from_disk(de->rec_len);
-	if (de->inode) {
-		struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
-		de1->rec_len = ext3_rec_len_to_disk(rlen - nlen);
-		de->rec_len = ext3_rec_len_to_disk(nlen);
-		de = de1;
-	}
-	de->file_type = EXT3_FT_UNKNOWN;
-	if (inode) {
-		de->inode = cpu_to_le32(inode->i_ino);
-		ext3_set_de_type(dir->i_sb, de, inode->i_mode);
-	} else
-		de->inode = 0;
-	de->name_len = namelen;
-	memcpy (de->name, name, namelen);
-	/*
-	 * XXX shouldn't update any times until successful
-	 * completion of syscall, but too many callers depend
-	 * on this.
-	 *
-	 * XXX similarly, too many callers depend on
-	 * ext3_new_inode() setting the times, but error
-	 * recovery deletes the inode, so the worst that can
-	 * happen is that the times are slightly out of date
-	 * and/or different from the directory change time.
-	 */
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(dir);
-	dir->i_version++;
-	ext3_mark_inode_dirty(handle, dir);
-	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_std_error(dir->i_sb, err);
-	brelse(bh);
-	return 0;
-}
-
-/*
- * This converts a one block unindexed directory to a 3 block indexed
- * directory, and adds the dentry to the indexed directory.
- */
-static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
-			    struct inode *inode, struct buffer_head *bh)
-{
-	struct inode	*dir = d_inode(dentry->d_parent);
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
-	struct buffer_head *bh2;
-	struct dx_root	*root;
-	struct dx_frame	frames[2], *frame;
-	struct dx_entry *entries;
-	struct ext3_dir_entry_2	*de, *de2;
-	char		*data1, *top;
-	unsigned	len;
-	int		retval;
-	unsigned	blocksize;
-	struct dx_hash_info hinfo;
-	u32		block;
-	struct fake_dirent *fde;
-
-	blocksize =  dir->i_sb->s_blocksize;
-	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
-	retval = ext3_journal_get_write_access(handle, bh);
-	if (retval) {
-		ext3_std_error(dir->i_sb, retval);
-		brelse(bh);
-		return retval;
-	}
-	root = (struct dx_root *) bh->b_data;
-
-	/* The 0th block becomes the root, move the dirents out */
-	fde = &root->dotdot;
-	de = (struct ext3_dir_entry_2 *)((char *)fde +
-			ext3_rec_len_from_disk(fde->rec_len));
-	if ((char *) de >= (((char *) root) + blocksize)) {
-		ext3_error(dir->i_sb, __func__,
-			   "invalid rec_len for '..' in inode %lu",
-			   dir->i_ino);
-		brelse(bh);
-		return -EIO;
-	}
-	len = ((char *) root) + blocksize - (char *) de;
-
-	bh2 = ext3_append (handle, dir, &block, &retval);
-	if (!(bh2)) {
-		brelse(bh);
-		return retval;
-	}
-	EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
-	data1 = bh2->b_data;
-
-	memcpy (data1, de, len);
-	de = (struct ext3_dir_entry_2 *) data1;
-	top = data1 + len;
-	while ((char *)(de2 = ext3_next_entry(de)) < top)
-		de = de2;
-	de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
-	/* Initialize the root; the dot dirents already exist */
-	de = (struct ext3_dir_entry_2 *) (&root->dotdot);
-	de->rec_len = ext3_rec_len_to_disk(blocksize - EXT3_DIR_REC_LEN(2));
-	memset (&root->info, 0, sizeof(root->info));
-	root->info.info_length = sizeof(root->info);
-	root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
-	entries = root->entries;
-	dx_set_block (entries, 1);
-	dx_set_count (entries, 1);
-	dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
-
-	/* Initialize as for dx_probe */
-	hinfo.hash_version = root->info.hash_version;
-	if (hinfo.hash_version <= DX_HASH_TEA)
-		hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
-	hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-	ext3fs_dirhash(name, namelen, &hinfo);
-	frame = frames;
-	frame->entries = entries;
-	frame->at = entries;
-	frame->bh = bh;
-	bh = bh2;
-	/*
-	 * Mark buffers dirty here so that if do_split() fails we write a
-	 * consistent set of buffers to disk.
-	 */
-	ext3_journal_dirty_metadata(handle, frame->bh);
-	ext3_journal_dirty_metadata(handle, bh);
-	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-	if (!de) {
-		ext3_mark_inode_dirty(handle, dir);
-		dx_release(frames);
-		return retval;
-	}
-	dx_release(frames);
-
-	return add_dirent_to_buf(handle, dentry, inode, de, bh);
-}
-
-/*
- *	ext3_add_entry()
- *
- * adds a file entry to the specified directory, using the same
- * semantics as ext3_find_entry(). It returns NULL if it failed.
- *
- * NOTE!! The inode part of 'de' is left at 0 - which means you
- * may not sleep between calling this and putting something into
- * the entry, as someone else might have used it while you slept.
- */
-static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
-	struct inode *inode)
-{
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 *de;
-	struct super_block * sb;
-	int	retval;
-	int	dx_fallback=0;
-	unsigned blocksize;
-	u32 block, blocks;
-
-	sb = dir->i_sb;
-	blocksize = sb->s_blocksize;
-	if (!dentry->d_name.len)
-		return -EINVAL;
-	if (is_dx(dir)) {
-		retval = ext3_dx_add_entry(handle, dentry, inode);
-		if (!retval || (retval != ERR_BAD_DX_DIR))
-			return retval;
-		EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
-		dx_fallback++;
-		ext3_mark_inode_dirty(handle, dir);
-	}
-	blocks = dir->i_size >> sb->s_blocksize_bits;
-	for (block = 0; block < blocks; block++) {
-		if (!(bh = ext3_dir_bread(handle, dir, block, 0, &retval)))
-			return retval;
-
-		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-		if (retval != -ENOSPC)
-			return retval;
-
-		if (blocks == 1 && !dx_fallback &&
-		    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
-			return make_indexed_dir(handle, dentry, inode, bh);
-		brelse(bh);
-	}
-	bh = ext3_append(handle, dir, &block, &retval);
-	if (!bh)
-		return retval;
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	de->inode = 0;
-	de->rec_len = ext3_rec_len_to_disk(blocksize);
-	return add_dirent_to_buf(handle, dentry, inode, de, bh);
-}
-
-/*
- * Returns 0 for success, or a negative error value
- */
-static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode)
-{
-	struct dx_frame frames[2], *frame;
-	struct dx_entry *entries, *at;
-	struct dx_hash_info hinfo;
-	struct buffer_head * bh;
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct super_block * sb = dir->i_sb;
-	struct ext3_dir_entry_2 *de;
-	int err;
-
-	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-	if (!frame)
-		return err;
-	entries = frame->entries;
-	at = frame->at;
-
-	if (!(bh = ext3_dir_bread(handle, dir, dx_get_block(frame->at), 0, &err)))
-		goto cleanup;
-
-	BUFFER_TRACE(bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh);
-	if (err)
-		goto journal_error;
-
-	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-	if (err != -ENOSPC) {
-		bh = NULL;
-		goto cleanup;
-	}
-
-	/* Block full, should compress but for now just split */
-	dxtrace(printk("using %u of %u node entries\n",
-		       dx_get_count(entries), dx_get_limit(entries)));
-	/* Need to split index? */
-	if (dx_get_count(entries) == dx_get_limit(entries)) {
-		u32 newblock;
-		unsigned icount = dx_get_count(entries);
-		int levels = frame - frames;
-		struct dx_entry *entries2;
-		struct dx_node *node2;
-		struct buffer_head *bh2;
-
-		if (levels && (dx_get_count(frames->entries) ==
-			       dx_get_limit(frames->entries))) {
-			ext3_warning(sb, __func__,
-				     "Directory index full!");
-			err = -ENOSPC;
-			goto cleanup;
-		}
-		bh2 = ext3_append (handle, dir, &newblock, &err);
-		if (!(bh2))
-			goto cleanup;
-		node2 = (struct dx_node *)(bh2->b_data);
-		entries2 = node2->entries;
-		memset(&node2->fake, 0, sizeof(struct fake_dirent));
-		node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize);
-		BUFFER_TRACE(frame->bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, frame->bh);
-		if (err)
-			goto journal_error;
-		if (levels) {
-			unsigned icount1 = icount/2, icount2 = icount - icount1;
-			unsigned hash2 = dx_get_hash(entries + icount1);
-			dxtrace(printk("Split index %i/%i\n", icount1, icount2));
-
-			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
-			err = ext3_journal_get_write_access(handle,
-							     frames[0].bh);
-			if (err)
-				goto journal_error;
-
-			memcpy ((char *) entries2, (char *) (entries + icount1),
-				icount2 * sizeof(struct dx_entry));
-			dx_set_count (entries, icount1);
-			dx_set_count (entries2, icount2);
-			dx_set_limit (entries2, dx_node_limit(dir));
-
-			/* Which index block gets the new entry? */
-			if (at - entries >= icount1) {
-				frame->at = at = at - entries - icount1 + entries2;
-				frame->entries = entries = entries2;
-				swap(frame->bh, bh2);
-			}
-			dx_insert_block (frames + 0, hash2, newblock);
-			dxtrace(dx_show_index ("node", frames[1].entries));
-			dxtrace(dx_show_index ("node",
-			       ((struct dx_node *) bh2->b_data)->entries));
-			err = ext3_journal_dirty_metadata(handle, bh2);
-			if (err)
-				goto journal_error;
-			brelse (bh2);
-		} else {
-			dxtrace(printk("Creating second level index...\n"));
-			memcpy((char *) entries2, (char *) entries,
-			       icount * sizeof(struct dx_entry));
-			dx_set_limit(entries2, dx_node_limit(dir));
-
-			/* Set up root */
-			dx_set_count(entries, 1);
-			dx_set_block(entries + 0, newblock);
-			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
-
-			/* Add new access path frame */
-			frame = frames + 1;
-			frame->at = at = at - entries + entries2;
-			frame->entries = entries = entries2;
-			frame->bh = bh2;
-			err = ext3_journal_get_write_access(handle,
-							     frame->bh);
-			if (err)
-				goto journal_error;
-		}
-		err = ext3_journal_dirty_metadata(handle, frames[0].bh);
-		if (err)
-			goto journal_error;
-	}
-	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-	if (!de)
-		goto cleanup;
-	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
-	bh = NULL;
-	goto cleanup;
-
-journal_error:
-	ext3_std_error(dir->i_sb, err);
-cleanup:
-	if (bh)
-		brelse(bh);
-	dx_release(frames);
-	return err;
-}
-
-/*
- * ext3_delete_entry deletes a directory entry by merging it with the
- * previous entry
- */
-static int ext3_delete_entry (handle_t *handle,
-			      struct inode * dir,
-			      struct ext3_dir_entry_2 * de_del,
-			      struct buffer_head * bh)
-{
-	struct ext3_dir_entry_2 * de, * pde;
-	int i;
-
-	i = 0;
-	pde = NULL;
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	while (i < bh->b_size) {
-		if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
-			return -EIO;
-		if (de == de_del)  {
-			int err;
-
-			BUFFER_TRACE(bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, bh);
-			if (err)
-				goto journal_error;
-
-			if (pde)
-				pde->rec_len = ext3_rec_len_to_disk(
-					ext3_rec_len_from_disk(pde->rec_len) +
-					ext3_rec_len_from_disk(de->rec_len));
-			else
-				de->inode = 0;
-			dir->i_version++;
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (err) {
-journal_error:
-				ext3_std_error(dir->i_sb, err);
-				return err;
-			}
-			return 0;
-		}
-		i += ext3_rec_len_from_disk(de->rec_len);
-		pde = de;
-		de = ext3_next_entry(de);
-	}
-	return -ENOENT;
-}
-
-static int ext3_add_nondir(handle_t *handle,
-		struct dentry *dentry, struct inode *inode)
-{
-	int err = ext3_add_entry(handle, dentry, inode);
-	if (!err) {
-		ext3_mark_inode_dirty(handle, inode);
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
-		return 0;
-	}
-	drop_nlink(inode);
-	unlock_new_inode(inode);
-	iput(inode);
-	return err;
-}
-
-/*
- * By the time this is called, we already have created
- * the directory cache entry for the new file, but it
- * is so far negative - it has no inode.
- *
- * If the create succeeds, we fill in the inode information
- * with d_instantiate().
- */
-static int ext3_create (struct inode * dir, struct dentry * dentry, umode_t mode,
-		bool excl)
-{
-	handle_t *handle;
-	struct inode * inode;
-	int err, retries = 0;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-		err = ext3_add_nondir(handle, dentry, inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-static int ext3_mknod (struct inode * dir, struct dentry *dentry,
-			umode_t mode, dev_t rdev)
-{
-	handle_t *handle;
-	struct inode *inode;
-	int err, retries = 0;
-
-	if (!new_valid_dev(rdev))
-		return -EINVAL;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT3_FS_XATTR
-		inode->i_op = &ext3_special_inode_operations;
-#endif
-		err = ext3_add_nondir(handle, dentry, inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-static int ext3_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
-	handle_t *handle;
-	struct inode *inode;
-	int err, retries = 0;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
-			  4 + EXT3_XATTR_TRANS_BLOCKS);
-
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	inode = ext3_new_inode (handle, dir, NULL, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-		d_tmpfile(dentry, inode);
-		err = ext3_orphan_add(handle, inode);
-		if (err)
-			goto err_unlock_inode;
-		mark_inode_dirty(inode);
-		unlock_new_inode(inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-err_unlock_inode:
-	ext3_journal_stop(handle);
-	unlock_new_inode(inode);
-	return err;
-}
-
-static int ext3_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
-{
-	handle_t *handle;
-	struct inode * inode;
-	struct buffer_head * dir_block = NULL;
-	struct ext3_dir_entry_2 * de;
-	int err, retries = 0;
-
-	if (dir->i_nlink >= EXT3_LINK_MAX)
-		return -EMLINK;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFDIR | mode);
-	err = PTR_ERR(inode);
-	if (IS_ERR(inode))
-		goto out_stop;
-
-	inode->i_op = &ext3_dir_inode_operations;
-	inode->i_fop = &ext3_dir_operations;
-	inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
-	if (!(dir_block = ext3_dir_bread(handle, inode, 0, 1, &err)))
-		goto out_clear_inode;
-
-	BUFFER_TRACE(dir_block, "get_write_access");
-	err = ext3_journal_get_write_access(handle, dir_block);
-	if (err)
-		goto out_clear_inode;
-
-	de = (struct ext3_dir_entry_2 *) dir_block->b_data;
-	de->inode = cpu_to_le32(inode->i_ino);
-	de->name_len = 1;
-	de->rec_len = ext3_rec_len_to_disk(EXT3_DIR_REC_LEN(de->name_len));
-	strcpy (de->name, ".");
-	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
-	de = ext3_next_entry(de);
-	de->inode = cpu_to_le32(dir->i_ino);
-	de->rec_len = ext3_rec_len_to_disk(inode->i_sb->s_blocksize -
-					EXT3_DIR_REC_LEN(1));
-	de->name_len = 2;
-	strcpy (de->name, "..");
-	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
-	set_nlink(inode, 2);
-	BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, dir_block);
-	if (err)
-		goto out_clear_inode;
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	if (!err)
-		err = ext3_add_entry (handle, dentry, inode);
-
-	if (err) {
-out_clear_inode:
-		clear_nlink(inode);
-		unlock_new_inode(inode);
-		ext3_mark_inode_dirty(handle, inode);
-		iput (inode);
-		goto out_stop;
-	}
-	inc_nlink(dir);
-	ext3_update_dx_flag(dir);
-	err = ext3_mark_inode_dirty(handle, dir);
-	if (err)
-		goto out_clear_inode;
-
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
-out_stop:
-	brelse(dir_block);
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-/*
- * routine to check that the specified directory is empty (for rmdir)
- */
-static int empty_dir (struct inode * inode)
-{
-	unsigned long offset;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de, * de1;
-	struct super_block * sb;
-	int err = 0;
-
-	sb = inode->i_sb;
-	if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
-	    !(bh = ext3_dir_bread(NULL, inode, 0, 0, &err))) {
-		if (err)
-			ext3_error(inode->i_sb, __func__,
-				   "error %d reading directory #%lu offset 0",
-				   err, inode->i_ino);
-		else
-			ext3_warning(inode->i_sb, __func__,
-				     "bad directory (dir #%lu) - no data block",
-				     inode->i_ino);
-		return 1;
-	}
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	de1 = ext3_next_entry(de);
-	if (le32_to_cpu(de->inode) != inode->i_ino ||
-			!le32_to_cpu(de1->inode) ||
-			strcmp (".", de->name) ||
-			strcmp ("..", de1->name)) {
-		ext3_warning (inode->i_sb, "empty_dir",
-			      "bad directory (dir #%lu) - no `.' or `..'",
-			      inode->i_ino);
-		brelse (bh);
-		return 1;
-	}
-	offset = ext3_rec_len_from_disk(de->rec_len) +
-			ext3_rec_len_from_disk(de1->rec_len);
-	de = ext3_next_entry(de1);
-	while (offset < inode->i_size ) {
-		if (!bh ||
-			(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
-			err = 0;
-			brelse (bh);
-			if (!(bh = ext3_dir_bread (NULL, inode,
-				offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err))) {
-				if (err)
-					ext3_error(sb, __func__,
-						   "error %d reading directory"
-						   " #%lu offset %lu",
-						   err, inode->i_ino, offset);
-				offset += sb->s_blocksize;
-				continue;
-			}
-			de = (struct ext3_dir_entry_2 *) bh->b_data;
-		}
-		if (!ext3_check_dir_entry("empty_dir", inode, de, bh, offset)) {
-			de = (struct ext3_dir_entry_2 *)(bh->b_data +
-							 sb->s_blocksize);
-			offset = (offset | (sb->s_blocksize - 1)) + 1;
-			continue;
-		}
-		if (le32_to_cpu(de->inode)) {
-			brelse (bh);
-			return 0;
-		}
-		offset += ext3_rec_len_from_disk(de->rec_len);
-		de = ext3_next_entry(de);
-	}
-	brelse (bh);
-	return 1;
-}
-
-/* ext3_orphan_add() links an unlinked or truncated inode into a list of
- * such inodes, starting at the superblock, in case we crash before the
- * file is closed/deleted, or in case the inode truncate spans multiple
- * transactions and the last transaction is not recovered after a crash.
- *
- * At filesystem recovery time, we walk this list deleting unlinked
- * inodes and truncating linked inodes in ext3_orphan_cleanup().
- */
-int ext3_orphan_add(handle_t *handle, struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct ext3_iloc iloc;
-	int err = 0, rc;
-
-	mutex_lock(&EXT3_SB(sb)->s_orphan_lock);
-	if (!list_empty(&EXT3_I(inode)->i_orphan))
-		goto out_unlock;
-
-	/* Orphan handling is only valid for files with data blocks
-	 * being truncated, or files being unlinked. */
-
-	/* @@@ FIXME: Observation from aviro:
-	 * I think I can trigger J_ASSERT in ext3_orphan_add().  We block
-	 * here (on s_orphan_lock), so race with ext3_link() which might bump
-	 * ->i_nlink. For, say it, character device. Not a regular file,
-	 * not a directory, not a symlink and ->i_nlink > 0.
-	 *
-	 * tytso, 4/25/2009: I'm not sure how that could happen;
-	 * shouldn't the fs core protect us from these sort of
-	 * unlink()/link() races?
-	 */
-	J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-		S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
-
-	BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-	if (err)
-		goto out_unlock;
-
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (err)
-		goto out_unlock;
-
-	/* Insert this inode at the head of the on-disk orphan list... */
-	NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan);
-	EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	rc = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	if (!err)
-		err = rc;
-
-	/* Only add to the head of the in-memory list if all the
-	 * previous operations succeeded.  If the orphan_add is going to
-	 * fail (possibly taking the journal offline), we can't risk
-	 * leaving the inode on the orphan list: stray orphan-list
-	 * entries can cause panics at unmount time.
-	 *
-	 * This is safe: on error we're going to ignore the orphan list
-	 * anyway on the next recovery. */
-	if (!err)
-		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-
-	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
-	jbd_debug(4, "orphan inode %lu will point to %d\n",
-			inode->i_ino, NEXT_ORPHAN(inode));
-out_unlock:
-	mutex_unlock(&EXT3_SB(sb)->s_orphan_lock);
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * ext3_orphan_del() removes an unlinked or truncated inode from the list
- * of such inodes stored on disk, because it is finally being cleaned up.
- */
-int ext3_orphan_del(handle_t *handle, struct inode *inode)
-{
-	struct list_head *prev;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_sb_info *sbi;
-	unsigned long ino_next;
-	struct ext3_iloc iloc;
-	int err = 0;
-
-	mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
-	if (list_empty(&ei->i_orphan))
-		goto out;
-
-	ino_next = NEXT_ORPHAN(inode);
-	prev = ei->i_orphan.prev;
-	sbi = EXT3_SB(inode->i_sb);
-
-	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
-
-	list_del_init(&ei->i_orphan);
-
-	/* If we're on an error path, we may not have a valid
-	 * transaction handle with which to update the orphan list on
-	 * disk, but we still need to remove the inode from the linked
-	 * list in memory. */
-	if (!handle)
-		goto out;
-
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (err)
-		goto out_err;
-
-	if (prev == &sbi->s_orphan) {
-		jbd_debug(4, "superblock will point to %lu\n", ino_next);
-		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, sbi->s_sbh);
-		if (err)
-			goto out_brelse;
-		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-		err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
-	} else {
-		struct ext3_iloc iloc2;
-		struct inode *i_prev =
-			&list_entry(prev, struct ext3_inode_info, i_orphan)->vfs_inode;
-
-		jbd_debug(4, "orphan inode %lu will point to %lu\n",
-			  i_prev->i_ino, ino_next);
-		err = ext3_reserve_inode_write(handle, i_prev, &iloc2);
-		if (err)
-			goto out_brelse;
-		NEXT_ORPHAN(i_prev) = ino_next;
-		err = ext3_mark_iloc_dirty(handle, i_prev, &iloc2);
-	}
-	if (err)
-		goto out_brelse;
-	NEXT_ORPHAN(inode) = 0;
-	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-
-out_err:
-	ext3_std_error(inode->i_sb, err);
-out:
-	mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
-	return err;
-
-out_brelse:
-	brelse(iloc.bh);
-	goto out_err;
-}
-
-static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
-{
-	int retval;
-	struct inode * inode;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de;
-	handle_t *handle;
-
-	/* Initialize quotas before so that eventual writes go in
-	 * separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
-
-	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	retval = -ENOENT;
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	if (!bh)
-		goto end_rmdir;
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = d_inode(dentry);
-
-	retval = -EIO;
-	if (le32_to_cpu(de->inode) != inode->i_ino)
-		goto end_rmdir;
-
-	retval = -ENOTEMPTY;
-	if (!empty_dir (inode))
-		goto end_rmdir;
-
-	retval = ext3_delete_entry(handle, dir, de, bh);
-	if (retval)
-		goto end_rmdir;
-	if (inode->i_nlink != 2)
-		ext3_warning (inode->i_sb, "ext3_rmdir",
-			      "empty directory has nlink!=2 (%d)",
-			      inode->i_nlink);
-	inode->i_version++;
-	clear_nlink(inode);
-	/* There's no need to set i_disksize: the fact that i_nlink is
-	 * zero will ensure that the right thing happens during any
-	 * recovery. */
-	inode->i_size = 0;
-	ext3_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, inode);
-	drop_nlink(dir);
-	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
-
-end_rmdir:
-	ext3_journal_stop(handle);
-	brelse (bh);
-	return retval;
-}
-
-static int ext3_unlink(struct inode * dir, struct dentry *dentry)
-{
-	int retval;
-	struct inode * inode;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de;
-	handle_t *handle;
-
-	trace_ext3_unlink_enter(dir, dentry);
-	/* Initialize quotas before so that eventual writes go
-	 * in separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
-
-	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	retval = -ENOENT;
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	if (!bh)
-		goto end_unlink;
-
-	inode = d_inode(dentry);
-
-	retval = -EIO;
-	if (le32_to_cpu(de->inode) != inode->i_ino)
-		goto end_unlink;
-
-	if (!inode->i_nlink) {
-		ext3_warning (inode->i_sb, "ext3_unlink",
-			      "Deleting nonexistent file (%lu), %d",
-			      inode->i_ino, inode->i_nlink);
-		set_nlink(inode, 1);
-	}
-	retval = ext3_delete_entry(handle, dir, de, bh);
-	if (retval)
-		goto end_unlink;
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
-	drop_nlink(inode);
-	if (!inode->i_nlink)
-		ext3_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime;
-	ext3_mark_inode_dirty(handle, inode);
-	retval = 0;
-
-end_unlink:
-	ext3_journal_stop(handle);
-	brelse (bh);
-	trace_ext3_unlink_exit(dentry, retval);
-	return retval;
-}
-
-static int ext3_symlink (struct inode * dir,
-		struct dentry *dentry, const char * symname)
-{
-	handle_t *handle;
-	struct inode * inode;
-	int l, err, retries = 0;
-	int credits;
-
-	l = strlen(symname)+1;
-	if (l > dir->i_sb->s_blocksize)
-		return -ENAMETOOLONG;
-
-	dquot_initialize(dir);
-
-	if (l > EXT3_N_BLOCKS * 4) {
-		/*
-		 * For non-fast symlinks, we just allocate inode and put it on
-		 * orphan list in the first transaction => we need bitmap,
-		 * group descriptor, sb, inode block, quota blocks, and
-		 * possibly selinux xattr blocks.
-		 */
-		credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
-			  EXT3_XATTR_TRANS_BLOCKS;
-	} else {
-		/*
-		 * Fast symlink. We have to add entry to directory
-		 * (EXT3_DATA_TRANS_BLOCKS + EXT3_INDEX_EXTRA_TRANS_BLOCKS),
-		 * allocate new inode (bitmap, group descriptor, inode block,
-		 * quota blocks, sb is already counted in previous macros).
-		 */
-		credits = EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-			  EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-			  EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
-	}
-retry:
-	handle = ext3_journal_start(dir, credits);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFLNK|S_IRWXUGO);
-	err = PTR_ERR(inode);
-	if (IS_ERR(inode))
-		goto out_stop;
-
-	if (l > EXT3_N_BLOCKS * 4) {
-		inode->i_op = &ext3_symlink_inode_operations;
-		ext3_set_aops(inode);
-		/*
-		 * We cannot call page_symlink() with transaction started
-		 * because it calls into ext3_write_begin() which acquires page
-		 * lock which ranks below transaction start (and it can also
-		 * wait for journal commit if we are running out of space). So
-		 * we have to stop transaction now and restart it when symlink
-		 * contents is written. 
-		 *
-		 * To keep fs consistent in case of crash, we have to put inode
-		 * to orphan list in the mean time.
-		 */
-		drop_nlink(inode);
-		err = ext3_orphan_add(handle, inode);
-		ext3_journal_stop(handle);
-		if (err)
-			goto err_drop_inode;
-		err = __page_symlink(inode, symname, l, 1);
-		if (err)
-			goto err_drop_inode;
-		/*
-		 * Now inode is being linked into dir (EXT3_DATA_TRANS_BLOCKS
-		 * + EXT3_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
-		 */
-		handle = ext3_journal_start(dir,
-				EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-				EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto err_drop_inode;
-		}
-		set_nlink(inode, 1);
-		err = ext3_orphan_del(handle, inode);
-		if (err) {
-			ext3_journal_stop(handle);
-			drop_nlink(inode);
-			goto err_drop_inode;
-		}
-	} else {
-		inode->i_op = &ext3_fast_symlink_inode_operations;
-		inode->i_link = (char*)&EXT3_I(inode)->i_data;
-		memcpy(inode->i_link, symname, l);
-		inode->i_size = l-1;
-	}
-	EXT3_I(inode)->i_disksize = inode->i_size;
-	err = ext3_add_nondir(handle, dentry, inode);
-out_stop:
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-err_drop_inode:
-	unlock_new_inode(inode);
-	iput(inode);
-	return err;
-}
-
-static int ext3_link (struct dentry * old_dentry,
-		struct inode * dir, struct dentry *dentry)
-{
-	handle_t *handle;
-	struct inode *inode = d_inode(old_dentry);
-	int err, retries = 0;
-
-	if (inode->i_nlink >= EXT3_LINK_MAX)
-		return -EMLINK;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode->i_ctime = CURRENT_TIME_SEC;
-	inc_nlink(inode);
-	ihold(inode);
-
-	err = ext3_add_entry(handle, dentry, inode);
-	if (!err) {
-		ext3_mark_inode_dirty(handle, inode);
-		/* this can happen only for tmpfile being
-		 * linked the first time
-		 */
-		if (inode->i_nlink == 1)
-			ext3_orphan_del(handle, inode);
-		d_instantiate(dentry, inode);
-	} else {
-		drop_nlink(inode);
-		iput(inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-#define PARENT_INO(buffer) \
-	(ext3_next_entry((struct ext3_dir_entry_2 *)(buffer))->inode)
-
-/*
- * Anybody can rename anything with this: the permission checks are left to the
- * higher-level routines.
- */
-static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
-			   struct inode * new_dir,struct dentry *new_dentry)
-{
-	handle_t *handle;
-	struct inode * old_inode, * new_inode;
-	struct buffer_head * old_bh, * new_bh, * dir_bh;
-	struct ext3_dir_entry_2 * old_de, * new_de;
-	int retval, flush_file = 0;
-
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
-
-	old_bh = new_bh = dir_bh = NULL;
-
-	/* Initialize quotas before so that eventual writes go
-	 * in separate transaction */
-	if (d_really_is_positive(new_dentry))
-		dquot_initialize(d_inode(new_dentry));
-	handle = ext3_journal_start(old_dir, 2 *
-					EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
-		handle->h_sync = 1;
-
-	old_bh = ext3_find_entry(old_dir, &old_dentry->d_name, &old_de);
-	/*
-	 *  Check for inode number is _not_ due to possible IO errors.
-	 *  We might rmdir the source, keep it as pwd of some process
-	 *  and merrily kill the link to whatever was created under the
-	 *  same name. Goodbye sticky bit ;-<
-	 */
-	old_inode = d_inode(old_dentry);
-	retval = -ENOENT;
-	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
-		goto end_rename;
-
-	new_inode = d_inode(new_dentry);
-	new_bh = ext3_find_entry(new_dir, &new_dentry->d_name, &new_de);
-	if (new_bh) {
-		if (!new_inode) {
-			brelse (new_bh);
-			new_bh = NULL;
-		}
-	}
-	if (S_ISDIR(old_inode->i_mode)) {
-		if (new_inode) {
-			retval = -ENOTEMPTY;
-			if (!empty_dir (new_inode))
-				goto end_rename;
-		}
-		retval = -EIO;
-		dir_bh = ext3_dir_bread(handle, old_inode, 0, 0, &retval);
-		if (!dir_bh)
-			goto end_rename;
-		if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
-			goto end_rename;
-		retval = -EMLINK;
-		if (!new_inode && new_dir!=old_dir &&
-				new_dir->i_nlink >= EXT3_LINK_MAX)
-			goto end_rename;
-	}
-	if (!new_bh) {
-		retval = ext3_add_entry (handle, new_dentry, old_inode);
-		if (retval)
-			goto end_rename;
-	} else {
-		BUFFER_TRACE(new_bh, "get write access");
-		retval = ext3_journal_get_write_access(handle, new_bh);
-		if (retval)
-			goto journal_error;
-		new_de->inode = cpu_to_le32(old_inode->i_ino);
-		if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
-					      EXT3_FEATURE_INCOMPAT_FILETYPE))
-			new_de->file_type = old_de->file_type;
-		new_dir->i_version++;
-		new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
-		ext3_mark_inode_dirty(handle, new_dir);
-		BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
-		retval = ext3_journal_dirty_metadata(handle, new_bh);
-		if (retval)
-			goto journal_error;
-		brelse(new_bh);
-		new_bh = NULL;
-	}
-
-	/*
-	 * Like most other Unix systems, set the ctime for inodes on a
-	 * rename.
-	 */
-	old_inode->i_ctime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, old_inode);
-
-	/*
-	 * ok, that's it
-	 */
-	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
-	    old_de->name_len != old_dentry->d_name.len ||
-	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
-	    (retval = ext3_delete_entry(handle, old_dir,
-					old_de, old_bh)) == -ENOENT) {
-		/* old_de could have moved from under us during htree split, so
-		 * make sure that we are deleting the right entry.  We might
-		 * also be pointing to a stale entry in the unused part of
-		 * old_bh so just checking inum and the name isn't enough. */
-		struct buffer_head *old_bh2;
-		struct ext3_dir_entry_2 *old_de2;
-
-		old_bh2 = ext3_find_entry(old_dir, &old_dentry->d_name,
-					  &old_de2);
-		if (old_bh2) {
-			retval = ext3_delete_entry(handle, old_dir,
-						   old_de2, old_bh2);
-			brelse(old_bh2);
-		}
-	}
-	if (retval) {
-		ext3_warning(old_dir->i_sb, "ext3_rename",
-				"Deleting old file (%lu), %d, error=%d",
-				old_dir->i_ino, old_dir->i_nlink, retval);
-	}
-
-	if (new_inode) {
-		drop_nlink(new_inode);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
-	}
-	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(old_dir);
-	if (dir_bh) {
-		BUFFER_TRACE(dir_bh, "get_write_access");
-		retval = ext3_journal_get_write_access(handle, dir_bh);
-		if (retval)
-			goto journal_error;
-		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
-		BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
-		retval = ext3_journal_dirty_metadata(handle, dir_bh);
-		if (retval) {
-journal_error:
-			ext3_std_error(new_dir->i_sb, retval);
-			goto end_rename;
-		}
-		drop_nlink(old_dir);
-		if (new_inode) {
-			drop_nlink(new_inode);
-		} else {
-			inc_nlink(new_dir);
-			ext3_update_dx_flag(new_dir);
-			ext3_mark_inode_dirty(handle, new_dir);
-		}
-	}
-	ext3_mark_inode_dirty(handle, old_dir);
-	if (new_inode) {
-		ext3_mark_inode_dirty(handle, new_inode);
-		if (!new_inode->i_nlink)
-			ext3_orphan_add(handle, new_inode);
-		if (ext3_should_writeback_data(new_inode))
-			flush_file = 1;
-	}
-	retval = 0;
-
-end_rename:
-	brelse (dir_bh);
-	brelse (old_bh);
-	brelse (new_bh);
-	ext3_journal_stop(handle);
-	if (retval == 0 && flush_file)
-		filemap_flush(old_inode->i_mapping);
-	return retval;
-}
-
-/*
- * directories can handle most operations...
- */
-const struct inode_operations ext3_dir_inode_operations = {
-	.create		= ext3_create,
-	.lookup		= ext3_lookup,
-	.link		= ext3_link,
-	.unlink		= ext3_unlink,
-	.symlink	= ext3_symlink,
-	.mkdir		= ext3_mkdir,
-	.rmdir		= ext3_rmdir,
-	.mknod		= ext3_mknod,
-	.tmpfile	= ext3_tmpfile,
-	.rename		= ext3_rename,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-};
-
-const struct inode_operations ext3_special_inode_operations = {
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-};
diff --git a/fs/ext3/namei.h b/fs/ext3/namei.h
deleted file mode 100644
index 46304d8..0000000
--- a/fs/ext3/namei.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*  linux/fs/ext3/namei.h
- *
- * Copyright (C) 2005 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
-*/
-
-extern struct dentry *ext3_get_parent(struct dentry *child);
-
-static inline struct buffer_head *ext3_dir_bread(handle_t *handle,
-						 struct inode *inode,
-						 int block, int create,
-						 int *err)
-{
-	struct buffer_head *bh;
-
-	bh = ext3_bread(handle, inode, block, create, err);
-
-	if (!bh && !(*err)) {
-		*err = -EIO;
-		ext3_error(inode->i_sb, __func__,
-			   "Directory hole detected on inode %lu\n",
-			   inode->i_ino);
-		return NULL;
-	}
-	return bh;
-}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
deleted file mode 100644
index 2710565..0000000
--- a/fs/ext3/resize.c
+++ /dev/null
@@ -1,1117 +0,0 @@
-/*
- *  linux/fs/ext3/resize.c
- *
- * Support for resizing an ext3 filesystem while it is mounted.
- *
- * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
- *
- * This could probably be made into a module, because it is not often in use.
- */
-
-
-#define EXT3FS_DEBUG
-
-#include "ext3.h"
-
-
-#define outside(b, first, last)	((b) < (first) || (b) >= (last))
-#define inside(b, first, last)	((b) >= (first) && (b) < (last))
-
-static int verify_group_input(struct super_block *sb,
-			      struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	ext3_fsblk_t start = le32_to_cpu(es->s_blocks_count);
-	ext3_fsblk_t end = start + input->blocks_count;
-	unsigned group = input->group;
-	ext3_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
-	unsigned overhead = ext3_bg_has_super(sb, group) ?
-		(1 + ext3_bg_num_gdb(sb, group) +
-		 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
-	ext3_fsblk_t metaend = start + overhead;
-	struct buffer_head *bh = NULL;
-	ext3_grpblk_t free_blocks_count;
-	int err = -EINVAL;
-
-	input->free_blocks_count = free_blocks_count =
-		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks "
-		       "(%d free, %u reserved)\n",
-		       ext3_bg_has_super(sb, input->group) ? "normal" :
-		       "no-super", input->group, input->blocks_count,
-		       free_blocks_count, input->reserved_blocks);
-
-	if (group != sbi->s_groups_count)
-		ext3_warning(sb, __func__,
-			     "Cannot add at group %u (only %lu groups)",
-			     input->group, sbi->s_groups_count);
-	else if ((start - le32_to_cpu(es->s_first_data_block)) %
-		 EXT3_BLOCKS_PER_GROUP(sb))
-		ext3_warning(sb, __func__, "Last group not full");
-	else if (input->reserved_blocks > input->blocks_count / 5)
-		ext3_warning(sb, __func__, "Reserved blocks too high (%u)",
-			     input->reserved_blocks);
-	else if (free_blocks_count < 0)
-		ext3_warning(sb, __func__, "Bad blocks count %u",
-			     input->blocks_count);
-	else if (!(bh = sb_bread(sb, end - 1)))
-		ext3_warning(sb, __func__,
-			     "Cannot read last block ("E3FSBLK")",
-			     end - 1);
-	else if (outside(input->block_bitmap, start, end))
-		ext3_warning(sb, __func__,
-			     "Block bitmap not in group (block %u)",
-			     input->block_bitmap);
-	else if (outside(input->inode_bitmap, start, end))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap not in group (block %u)",
-			     input->inode_bitmap);
-	else if (outside(input->inode_table, start, end) ||
-	         outside(itend - 1, start, end))
-		ext3_warning(sb, __func__,
-			     "Inode table not in group (blocks %u-"E3FSBLK")",
-			     input->inode_table, itend - 1);
-	else if (input->inode_bitmap == input->block_bitmap)
-		ext3_warning(sb, __func__,
-			     "Block bitmap same as inode bitmap (%u)",
-			     input->block_bitmap);
-	else if (inside(input->block_bitmap, input->inode_table, itend))
-		ext3_warning(sb, __func__,
-			     "Block bitmap (%u) in inode table (%u-"E3FSBLK")",
-			     input->block_bitmap, input->inode_table, itend-1);
-	else if (inside(input->inode_bitmap, input->inode_table, itend))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
-			     input->inode_bitmap, input->inode_table, itend-1);
-	else if (inside(input->block_bitmap, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Block bitmap (%u) in GDT table"
-			     " ("E3FSBLK"-"E3FSBLK")",
-			     input->block_bitmap, start, metaend - 1);
-	else if (inside(input->inode_bitmap, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap (%u) in GDT table"
-			     " ("E3FSBLK"-"E3FSBLK")",
-			     input->inode_bitmap, start, metaend - 1);
-	else if (inside(input->inode_table, start, metaend) ||
-	         inside(itend - 1, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Inode table (%u-"E3FSBLK") overlaps"
-			     "GDT table ("E3FSBLK"-"E3FSBLK")",
-			     input->inode_table, itend - 1, start, metaend - 1);
-	else
-		err = 0;
-	brelse(bh);
-
-	return err;
-}
-
-static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
-				  ext3_fsblk_t blk)
-{
-	struct buffer_head *bh;
-	int err;
-
-	bh = sb_getblk(sb, blk);
-	if (unlikely(!bh))
-		return ERR_PTR(-ENOMEM);
-	if ((err = ext3_journal_get_write_access(handle, bh))) {
-		brelse(bh);
-		bh = ERR_PTR(err);
-	} else {
-		lock_buffer(bh);
-		memset(bh->b_data, 0, sb->s_blocksize);
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-	}
-
-	return bh;
-}
-
-/*
- * To avoid calling the atomic setbit hundreds or thousands of times, we only
- * need to use it within a single byte (to ensure we get endianness right).
- * We can use memset for the rest of the bitmap as there are no other users.
- */
-static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
-{
-	int i;
-
-	if (start_bit >= end_bit)
-		return;
-
-	ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
-	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
-		ext3_set_bit(i, bitmap);
-	if (i < end_bit)
-		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
-}
-
-/*
- * If we have fewer than thresh credits, extend by EXT3_MAX_TRANS_DATA.
- * If that fails, restart the transaction & regain write access for the
- * buffer head which is used for block_bitmap modifications.
- */
-static int extend_or_restart_transaction(handle_t *handle, int thresh,
-					 struct buffer_head *bh)
-{
-	int err;
-
-	if (handle->h_buffer_credits >= thresh)
-		return 0;
-
-	err = ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA);
-	if (err < 0)
-		return err;
-	if (err) {
-		err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA);
-		if (err)
-			return err;
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-/*
- * Set up the block and inode bitmaps, and the inode table for the new group.
- * This doesn't need to be part of the main transaction, since we are only
- * changing blocks outside the actual filesystem.  We still do journaling to
- * ensure the recovery is correct in case of a failure just after resize.
- * If any part of this fails, we simply abort the resize.
- */
-static int setup_new_group_blocks(struct super_block *sb,
-				  struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	ext3_fsblk_t start = ext3_group_first_block_no(sb, input->group);
-	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
-		le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
-	unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group);
-	struct buffer_head *bh;
-	handle_t *handle;
-	ext3_fsblk_t block;
-	ext3_grpblk_t bit;
-	int i;
-	int err = 0, err2;
-
-	/* This transaction may be extended/restarted along the way */
-	handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
-
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	mutex_lock(&sbi->s_resize_lock);
-	if (input->group != sbi->s_groups_count) {
-		err = -EBUSY;
-		goto exit_journal;
-	}
-
-	if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
-		err = PTR_ERR(bh);
-		goto exit_journal;
-	}
-
-	if (ext3_bg_has_super(sb, input->group)) {
-		ext3_debug("mark backup superblock %#04lx (+0)\n", start);
-		ext3_set_bit(0, bh->b_data);
-	}
-
-	/* Copy all of the GDT blocks into the backup in this group */
-	for (i = 0, bit = 1, block = start + 1;
-	     i < gdblocks; i++, block++, bit++) {
-		struct buffer_head *gdb;
-
-		ext3_debug("update backup group %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		gdb = sb_getblk(sb, block);
-		if (unlikely(!gdb)) {
-			err = -ENOMEM;
-			goto exit_bh;
-		}
-		if ((err = ext3_journal_get_write_access(handle, gdb))) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		lock_buffer(gdb);
-		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
-		set_buffer_uptodate(gdb);
-		unlock_buffer(gdb);
-		err = ext3_journal_dirty_metadata(handle, gdb);
-		if (err) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		ext3_set_bit(bit, bh->b_data);
-		brelse(gdb);
-	}
-
-	/* Zero out all of the reserved backup group descriptor table blocks */
-	for (i = 0, bit = gdblocks + 1, block = start + bit;
-	     i < reserved_gdb; i++, block++, bit++) {
-		struct buffer_head *gdb;
-
-		ext3_debug("clear reserved block %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		if (IS_ERR(gdb = bclean(handle, sb, block))) {
-			err = PTR_ERR(gdb);
-			goto exit_bh;
-		}
-		err = ext3_journal_dirty_metadata(handle, gdb);
-		if (err) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		ext3_set_bit(bit, bh->b_data);
-		brelse(gdb);
-	}
-	ext3_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
-		   input->block_bitmap - start);
-	ext3_set_bit(input->block_bitmap - start, bh->b_data);
-	ext3_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
-		   input->inode_bitmap - start);
-	ext3_set_bit(input->inode_bitmap - start, bh->b_data);
-
-	/* Zero out all of the inode table blocks */
-	for (i = 0, block = input->inode_table, bit = block - start;
-	     i < sbi->s_itb_per_group; i++, bit++, block++) {
-		struct buffer_head *it;
-
-		ext3_debug("clear inode block %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		if (IS_ERR(it = bclean(handle, sb, block))) {
-			err = PTR_ERR(it);
-			goto exit_bh;
-		}
-		err = ext3_journal_dirty_metadata(handle, it);
-		if (err) {
-			brelse(it);
-			goto exit_bh;
-		}
-		brelse(it);
-		ext3_set_bit(bit, bh->b_data);
-	}
-
-	err = extend_or_restart_transaction(handle, 2, bh);
-	if (err)
-		goto exit_bh;
-
-	mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
-			bh->b_data);
-	err = ext3_journal_dirty_metadata(handle, bh);
-	if (err)
-		goto exit_bh;
-	brelse(bh);
-
-	/* Mark unused entries in inode bitmap used */
-	ext3_debug("clear inode bitmap %#04x (+%ld)\n",
-		   input->inode_bitmap, input->inode_bitmap - start);
-	if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
-		err = PTR_ERR(bh);
-		goto exit_journal;
-	}
-
-	mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
-			bh->b_data);
-	err = ext3_journal_dirty_metadata(handle, bh);
-exit_bh:
-	brelse(bh);
-
-exit_journal:
-	mutex_unlock(&sbi->s_resize_lock);
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-
-	return err;
-}
-
-/*
- * Iterate through the groups which hold BACKUP superblock/GDT copies in an
- * ext3 filesystem.  The counters should be initialized to 1, 5, and 7 before
- * calling this for the first time.  In a sparse filesystem it will be the
- * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
- * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
- */
-static unsigned ext3_list_backups(struct super_block *sb, unsigned *three,
-				  unsigned *five, unsigned *seven)
-{
-	unsigned *min = three;
-	int mult = 3;
-	unsigned ret;
-
-	if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-		ret = *min;
-		*min += 1;
-		return ret;
-	}
-
-	if (*five < *min) {
-		min = five;
-		mult = 5;
-	}
-	if (*seven < *min) {
-		min = seven;
-		mult = 7;
-	}
-
-	ret = *min;
-	*min *= mult;
-
-	return ret;
-}
-
-/*
- * Check that all of the backup GDT blocks are held in the primary GDT block.
- * It is assumed that they are stored in group order.  Returns the number of
- * groups in current filesystem that have BACKUPS, or -ve error code.
- */
-static int verify_reserved_gdb(struct super_block *sb,
-			       struct buffer_head *primary)
-{
-	const ext3_fsblk_t blk = primary->b_blocknr;
-	const unsigned long end = EXT3_SB(sb)->s_groups_count;
-	unsigned three = 1;
-	unsigned five = 5;
-	unsigned seven = 7;
-	unsigned grp;
-	__le32 *p = (__le32 *)primary->b_data;
-	int gdbackups = 0;
-
-	while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
-		if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
-			ext3_warning(sb, __func__,
-				     "reserved GDT "E3FSBLK
-				     " missing grp %d ("E3FSBLK")",
-				     blk, grp,
-				     grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
-			return -EINVAL;
-		}
-		if (++gdbackups > EXT3_ADDR_PER_BLOCK(sb))
-			return -EFBIG;
-	}
-
-	return gdbackups;
-}
-
-/*
- * Called when we need to bring a reserved group descriptor table block into
- * use from the resize inode.  The primary copy of the new GDT block currently
- * is an indirect block (under the double indirect block in the resize inode).
- * The new backup GDT blocks will be stored as leaf blocks in this indirect
- * block, in group order.  Even though we know all the block numbers we need,
- * we check to ensure that the resize inode has actually reserved these blocks.
- *
- * Don't need to update the block bitmaps because the blocks are still in use.
- *
- * We get all of the error cases out of the way, so that we are sure to not
- * fail once we start modifying the data on disk, because JBD has no rollback.
- */
-static int add_new_gdb(handle_t *handle, struct inode *inode,
-		       struct ext3_new_group_data *input,
-		       struct buffer_head **primary)
-{
-	struct super_block *sb = inode->i_sb;
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-	unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
-	ext3_fsblk_t gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
-	struct buffer_head **o_group_desc, **n_group_desc;
-	struct buffer_head *dind;
-	int gdbackups;
-	struct ext3_iloc iloc;
-	__le32 *data;
-	int err;
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG
-		       "EXT3-fs: ext3_add_new_gdb: adding group block %lu\n",
-		       gdb_num);
-
-	/*
-	 * If we are not using the primary superblock/GDT copy don't resize,
-	 * because the user tools have no way of handling this.  Probably a
-	 * bad time to do it anyways.
-	 */
-	if (EXT3_SB(sb)->s_sbh->b_blocknr !=
-	    le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
-		ext3_warning(sb, __func__,
-			"won't resize using backup superblock at %llu",
-			(unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
-		return -EPERM;
-	}
-
-	*primary = sb_bread(sb, gdblock);
-	if (!*primary)
-		return -EIO;
-
-	if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
-		err = gdbackups;
-		goto exit_bh;
-	}
-
-	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_bh;
-	}
-
-	data = (__le32 *)dind->b_data;
-	if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
-		ext3_warning(sb, __func__,
-			     "new group %u GDT block "E3FSBLK" not reserved",
-			     input->group, gdblock);
-		err = -EINVAL;
-		goto exit_dind;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh)))
-		goto exit_dind;
-
-	if ((err = ext3_journal_get_write_access(handle, *primary)))
-		goto exit_sbh;
-
-	if ((err = ext3_journal_get_write_access(handle, dind)))
-		goto exit_primary;
-
-	/* ext3_reserve_inode_write() gets a reference on the iloc */
-	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
-		goto exit_dindj;
-
-	n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
-			GFP_NOFS);
-	if (!n_group_desc) {
-		err = -ENOMEM;
-		ext3_warning (sb, __func__,
-			      "not enough memory for %lu groups", gdb_num + 1);
-		goto exit_inode;
-	}
-
-	/*
-	 * Finally, we have all of the possible failures behind us...
-	 *
-	 * Remove new GDT block from inode double-indirect block and clear out
-	 * the new GDT block for use (which also "frees" the backup GDT blocks
-	 * from the reserved inode).  We don't need to change the bitmaps for
-	 * these blocks, because they are marked as in-use from being in the
-	 * reserved inode, and will become GDT blocks (primary and backup).
-	 */
-	data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
-	err = ext3_journal_dirty_metadata(handle, dind);
-	if (err)
-		goto exit_group_desc;
-	brelse(dind);
-	dind = NULL;
-	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
-	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	if (err)
-		goto exit_group_desc;
-	memset((*primary)->b_data, 0, sb->s_blocksize);
-	err = ext3_journal_dirty_metadata(handle, *primary);
-	if (err)
-		goto exit_group_desc;
-
-	o_group_desc = EXT3_SB(sb)->s_group_desc;
-	memcpy(n_group_desc, o_group_desc,
-	       EXT3_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
-	n_group_desc[gdb_num] = *primary;
-	EXT3_SB(sb)->s_group_desc = n_group_desc;
-	EXT3_SB(sb)->s_gdb_count++;
-	kfree(o_group_desc);
-
-	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	if (err)
-		goto exit_inode;
-
-	return 0;
-
-exit_group_desc:
-	kfree(n_group_desc);
-exit_inode:
-	//ext3_journal_release_buffer(handle, iloc.bh);
-	brelse(iloc.bh);
-exit_dindj:
-	//ext3_journal_release_buffer(handle, dind);
-exit_primary:
-	//ext3_journal_release_buffer(handle, *primary);
-exit_sbh:
-	//ext3_journal_release_buffer(handle, *primary);
-exit_dind:
-	brelse(dind);
-exit_bh:
-	brelse(*primary);
-
-	ext3_debug("leaving with error %d\n", err);
-	return err;
-}
-
-/*
- * Called when we are adding a new group which has a backup copy of each of
- * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
- * We need to add these reserved backup GDT blocks to the resize inode, so
- * that they are kept for future resizing and not allocated to files.
- *
- * Each reserved backup GDT block will go into a different indirect block.
- * The indirect blocks are actually the primary reserved GDT blocks,
- * so we know in advance what their block numbers are.  We only get the
- * double-indirect block to verify it is pointing to the primary reserved
- * GDT blocks so we don't overwrite a data block by accident.  The reserved
- * backup GDT blocks are stored in their reserved primary GDT block.
- */
-static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
-			      struct ext3_new_group_data *input)
-{
-	struct super_block *sb = inode->i_sb;
-	int reserved_gdb =le16_to_cpu(EXT3_SB(sb)->s_es->s_reserved_gdt_blocks);
-	struct buffer_head **primary;
-	struct buffer_head *dind;
-	struct ext3_iloc iloc;
-	ext3_fsblk_t blk;
-	__le32 *data, *end;
-	int gdbackups = 0;
-	int res, i;
-	int err;
-
-	primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
-	if (!primary)
-		return -ENOMEM;
-
-	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_free;
-	}
-
-	blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
-	data = (__le32 *)dind->b_data + (EXT3_SB(sb)->s_gdb_count %
-					 EXT3_ADDR_PER_BLOCK(sb));
-	end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
-
-	/* Get each reserved primary GDT block and verify it holds backups */
-	for (res = 0; res < reserved_gdb; res++, blk++) {
-		if (le32_to_cpu(*data) != blk) {
-			ext3_warning(sb, __func__,
-				     "reserved block "E3FSBLK
-				     " not at offset %ld",
-				     blk,
-				     (long)(data - (__le32 *)dind->b_data));
-			err = -EINVAL;
-			goto exit_bh;
-		}
-		primary[res] = sb_bread(sb, blk);
-		if (!primary[res]) {
-			err = -EIO;
-			goto exit_bh;
-		}
-		if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
-			brelse(primary[res]);
-			err = gdbackups;
-			goto exit_bh;
-		}
-		if (++data >= end)
-			data = (__le32 *)dind->b_data;
-	}
-
-	for (i = 0; i < reserved_gdb; i++) {
-		if ((err = ext3_journal_get_write_access(handle, primary[i]))) {
-			/*
-			int j;
-			for (j = 0; j < i; j++)
-				ext3_journal_release_buffer(handle, primary[j]);
-			 */
-			goto exit_bh;
-		}
-	}
-
-	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
-		goto exit_bh;
-
-	/*
-	 * Finally we can add each of the reserved backup GDT blocks from
-	 * the new group to its reserved primary GDT block.
-	 */
-	blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
-	for (i = 0; i < reserved_gdb; i++) {
-		int err2;
-		data = (__le32 *)primary[i]->b_data;
-		/* printk("reserving backup %lu[%u] = %lu\n",
-		       primary[i]->b_blocknr, gdbackups,
-		       blk + primary[i]->b_blocknr); */
-		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
-		err2 = ext3_journal_dirty_metadata(handle, primary[i]);
-		if (!err)
-			err = err2;
-	}
-	inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
-	ext3_mark_iloc_dirty(handle, inode, &iloc);
-
-exit_bh:
-	while (--res >= 0)
-		brelse(primary[res]);
-	brelse(dind);
-
-exit_free:
-	kfree(primary);
-
-	return err;
-}
-
-/*
- * Update the backup copies of the ext3 metadata.  These don't need to be part
- * of the main resize transaction, because e2fsck will re-write them if there
- * is a problem (basically only OOM will cause a problem).  However, we
- * _should_ update the backups if possible, in case the primary gets trashed
- * for some reason and we need to run e2fsck from a backup superblock.  The
- * important part is that the new block and inode counts are in the backup
- * superblocks, and the location of the new group metadata in the GDT backups.
- *
- * We do not need take the s_resize_lock for this, because these
- * blocks are not otherwise touched by the filesystem code when it is
- * mounted.  We don't need to worry about last changing from
- * sbi->s_groups_count, because the worst that can happen is that we
- * do not copy the full number of backups at this time.  The resize
- * which changed s_groups_count will backup again.
- */
-static void update_backups(struct super_block *sb,
-			   int blk_off, char *data, int size)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	const unsigned long last = sbi->s_groups_count;
-	const int bpg = EXT3_BLOCKS_PER_GROUP(sb);
-	unsigned three = 1;
-	unsigned five = 5;
-	unsigned seven = 7;
-	unsigned group;
-	int rest = sb->s_blocksize - size;
-	handle_t *handle;
-	int err = 0, err2;
-
-	handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
-	if (IS_ERR(handle)) {
-		group = 1;
-		err = PTR_ERR(handle);
-		goto exit_err;
-	}
-
-	while ((group = ext3_list_backups(sb, &three, &five, &seven)) < last) {
-		struct buffer_head *bh;
-
-		/* Out of journal space, and can't get more - abort - so sad */
-		if (handle->h_buffer_credits == 0 &&
-		    ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA) &&
-		    (err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA)))
-			break;
-
-		bh = sb_getblk(sb, group * bpg + blk_off);
-		if (unlikely(!bh)) {
-			err = -ENOMEM;
-			break;
-		}
-		ext3_debug("update metadata backup %#04lx\n",
-			  (unsigned long)bh->b_blocknr);
-		if ((err = ext3_journal_get_write_access(handle, bh))) {
-			brelse(bh);
-			break;
-		}
-		lock_buffer(bh);
-		memcpy(bh->b_data, data, size);
-		if (rest)
-			memset(bh->b_data + size, 0, rest);
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-		err = ext3_journal_dirty_metadata(handle, bh);
-		brelse(bh);
-		if (err)
-			break;
-	}
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-
-	/*
-	 * Ugh! Need to have e2fsck write the backup copies.  It is too
-	 * late to revert the resize, we shouldn't fail just because of
-	 * the backup copies (they are only needed in case of corruption).
-	 *
-	 * However, if we got here we have a journal problem too, so we
-	 * can't really start a transaction to mark the superblock.
-	 * Chicken out and just set the flag on the hope it will be written
-	 * to disk, and if not - we will simply wait until next fsck.
-	 */
-exit_err:
-	if (err) {
-		ext3_warning(sb, __func__,
-			     "can't update backup for group %d (err %d), "
-			     "forcing fsck on next reboot", group, err);
-		sbi->s_mount_state &= ~EXT3_VALID_FS;
-		sbi->s_es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
-		mark_buffer_dirty(sbi->s_sbh);
-	}
-}
-
-/* Add group descriptor data to an existing or new group descriptor block.
- * Ensure we handle all possible error conditions _before_ we start modifying
- * the filesystem, because we cannot abort the transaction and not have it
- * write the data to disk.
- *
- * If we are on a GDT block boundary, we need to get the reserved GDT block.
- * Otherwise, we may need to add backup GDT blocks for a sparse group.
- *
- * We only need to hold the superblock lock while we are actually adding
- * in the new group's counts to the superblock.  Prior to that we have
- * not really "added" the group at all.  We re-check that we are still
- * adding in the last group in case things have changed since verifying.
- */
-int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
-		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
-	struct buffer_head *primary = NULL;
-	struct ext3_group_desc *gdp;
-	struct inode *inode = NULL;
-	handle_t *handle;
-	int gdb_off, gdb_num;
-	int err, err2;
-
-	gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
-	gdb_off = input->group % EXT3_DESC_PER_BLOCK(sb);
-
-	if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-		ext3_warning(sb, __func__,
-			     "Can't resize non-sparse filesystem further");
-		return -EPERM;
-	}
-
-	if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
-	    le32_to_cpu(es->s_blocks_count)) {
-		ext3_warning(sb, __func__, "blocks_count overflow\n");
-		return -EINVAL;
-	}
-
-	if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
-	    le32_to_cpu(es->s_inodes_count)) {
-		ext3_warning(sb, __func__, "inodes_count overflow\n");
-		return -EINVAL;
-	}
-
-	if (reserved_gdb || gdb_off == 0) {
-		if (!EXT3_HAS_COMPAT_FEATURE(sb,
-					     EXT3_FEATURE_COMPAT_RESIZE_INODE)
-		    || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
-			ext3_warning(sb, __func__,
-				     "No reserved GDT blocks, can't resize");
-			return -EPERM;
-		}
-		inode = ext3_iget(sb, EXT3_RESIZE_INO);
-		if (IS_ERR(inode)) {
-			ext3_warning(sb, __func__,
-				     "Error opening resize inode");
-			return PTR_ERR(inode);
-		}
-	}
-
-	if ((err = verify_group_input(sb, input)))
-		goto exit_put;
-
-	if ((err = setup_new_group_blocks(sb, input)))
-		goto exit_put;
-
-	/*
-	 * We will always be modifying at least the superblock and a GDT
-	 * block.  If we are adding a group past the last current GDT block,
-	 * we will also modify the inode and the dindirect block.  If we
-	 * are adding a group with superblock/GDT backups  we will also
-	 * modify each of the reserved GDT dindirect blocks.
-	 */
-	handle = ext3_journal_start_sb(sb,
-				       ext3_bg_has_super(sb, input->group) ?
-				       3 + reserved_gdb : 4);
-	if (IS_ERR(handle)) {
-		err = PTR_ERR(handle);
-		goto exit_put;
-	}
-
-	mutex_lock(&sbi->s_resize_lock);
-	if (input->group != sbi->s_groups_count) {
-		ext3_warning(sb, __func__,
-			     "multiple resizers run on filesystem!");
-		err = -EBUSY;
-		goto exit_journal;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle, sbi->s_sbh)))
-		goto exit_journal;
-
-	/*
-	 * We will only either add reserved group blocks to a backup group
-	 * or remove reserved blocks for the first group in a new group block.
-	 * Doing both would be mean more complex code, and sane people don't
-	 * use non-sparse filesystems anymore.  This is already checked above.
-	 */
-	if (gdb_off) {
-		primary = sbi->s_group_desc[gdb_num];
-		if ((err = ext3_journal_get_write_access(handle, primary)))
-			goto exit_journal;
-
-		if (reserved_gdb && ext3_bg_num_gdb(sb, input->group) &&
-		    (err = reserve_backup_gdb(handle, inode, input)))
-			goto exit_journal;
-	} else if ((err = add_new_gdb(handle, inode, input, &primary)))
-		goto exit_journal;
-
-	/*
-	 * OK, now we've set up the new group.  Time to make it active.
-	 *
-	 * We do not lock all allocations via s_resize_lock
-	 * so we have to be safe wrt. concurrent accesses the group
-	 * data.  So we need to be careful to set all of the relevant
-	 * group descriptor data etc. *before* we enable the group.
-	 *
-	 * The key field here is sbi->s_groups_count: as long as
-	 * that retains its old value, nobody is going to access the new
-	 * group.
-	 *
-	 * So first we update all the descriptor metadata for the new
-	 * group; then we update the total disk blocks count; then we
-	 * update the groups count to enable the group; then finally we
-	 * update the free space counts so that the system can start
-	 * using the new disk blocks.
-	 */
-
-	/* Update group descriptor block for new group */
-	gdp = (struct ext3_group_desc *)primary->b_data + gdb_off;
-
-	gdp->bg_block_bitmap = cpu_to_le32(input->block_bitmap);
-	gdp->bg_inode_bitmap = cpu_to_le32(input->inode_bitmap);
-	gdp->bg_inode_table = cpu_to_le32(input->inode_table);
-	gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
-	gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb));
-
-	/*
-	 * Make the new blocks and inodes valid next.  We do this before
-	 * increasing the group count so that once the group is enabled,
-	 * all of its blocks and inodes are already valid.
-	 *
-	 * We always allocate group-by-group, then block-by-block or
-	 * inode-by-inode within a group, so enabling these
-	 * blocks/inodes before the group is live won't actually let us
-	 * allocate the new space yet.
-	 */
-	le32_add_cpu(&es->s_blocks_count, input->blocks_count);
-	le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb));
-
-	/*
-	 * We need to protect s_groups_count against other CPUs seeing
-	 * inconsistent state in the superblock.
-	 *
-	 * The precise rules we use are:
-	 *
-	 * * Writers of s_groups_count *must* hold s_resize_lock
-	 * AND
-	 * * Writers must perform a smp_wmb() after updating all dependent
-	 *   data and before modifying the groups count
-	 *
-	 * * Readers must hold s_resize_lock over the access
-	 * OR
-	 * * Readers must perform an smp_rmb() after reading the groups count
-	 *   and before reading any dependent data.
-	 *
-	 * NB. These rules can be relaxed when checking the group count
-	 * while freeing data, as we can only allocate from a block
-	 * group after serialising against the group count, and we can
-	 * only then free after serialising in turn against that
-	 * allocation.
-	 */
-	smp_wmb();
-
-	/* Update the global fs size fields */
-	sbi->s_groups_count++;
-
-	err = ext3_journal_dirty_metadata(handle, primary);
-	if (err)
-		goto exit_journal;
-
-	/* Update the reserved block counts only once the new group is
-	 * active. */
-	le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks);
-
-	/* Update the free space counts */
-	percpu_counter_add(&sbi->s_freeblocks_counter,
-			   input->free_blocks_count);
-	percpu_counter_add(&sbi->s_freeinodes_counter,
-			   EXT3_INODES_PER_GROUP(sb));
-
-	err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
-
-exit_journal:
-	mutex_unlock(&sbi->s_resize_lock);
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-	if (!err) {
-		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
-			       sizeof(struct ext3_super_block));
-		update_backups(sb, primary->b_blocknr, primary->b_data,
-			       primary->b_size);
-	}
-exit_put:
-	iput(inode);
-	return err;
-} /* ext3_group_add */
-
-/* Extend the filesystem to the new number of blocks specified.  This entry
- * point is only used to extend the current filesystem to the end of the last
- * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
- * for emergencies (because it has no dependencies on reserved blocks).
- *
- * If we _really_ wanted, we could use default values to call ext3_group_add()
- * allow the "remount" trick to work for arbitrary resizing, assuming enough
- * GDT blocks are reserved to grow to the desired size.
- */
-int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
-		      ext3_fsblk_t n_blocks_count)
-{
-	ext3_fsblk_t o_blocks_count;
-	ext3_grpblk_t last;
-	ext3_grpblk_t add;
-	struct buffer_head * bh;
-	handle_t *handle;
-	int err;
-	unsigned long freed_blocks;
-
-	/* We don't need to worry about locking wrt other resizers just
-	 * yet: we're going to revalidate es->s_blocks_count after
-	 * taking the s_resize_lock below. */
-	o_blocks_count = le32_to_cpu(es->s_blocks_count);
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
-		       " up to "E3FSBLK" blocks\n",
-		       o_blocks_count, n_blocks_count);
-
-	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
-		return 0;
-
-	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-		printk(KERN_ERR "EXT3-fs: filesystem on %s:"
-			" too large to resize to "E3FSBLK" blocks safely\n",
-			sb->s_id, n_blocks_count);
-		if (sizeof(sector_t) < 8)
-			ext3_warning(sb, __func__,
-			"CONFIG_LBDAF not enabled\n");
-		return -EINVAL;
-	}
-
-	if (n_blocks_count < o_blocks_count) {
-		ext3_warning(sb, __func__,
-			     "can't shrink FS - resize aborted");
-		return -EBUSY;
-	}
-
-	/* Handle the remaining blocks in the last group only. */
-	last = (o_blocks_count - le32_to_cpu(es->s_first_data_block)) %
-		EXT3_BLOCKS_PER_GROUP(sb);
-
-	if (last == 0) {
-		ext3_warning(sb, __func__,
-			     "need to use ext2online to resize further");
-		return -EPERM;
-	}
-
-	add = EXT3_BLOCKS_PER_GROUP(sb) - last;
-
-	if (o_blocks_count + add < o_blocks_count) {
-		ext3_warning(sb, __func__, "blocks_count overflow");
-		return -EINVAL;
-	}
-
-	if (o_blocks_count + add > n_blocks_count)
-		add = n_blocks_count - o_blocks_count;
-
-	if (o_blocks_count + add < n_blocks_count)
-		ext3_warning(sb, __func__,
-			     "will only finish group ("E3FSBLK
-			     " blocks, %u new)",
-			     o_blocks_count + add, add);
-
-	/* See if the device is actually as big as what was requested */
-	bh = sb_bread(sb, o_blocks_count + add -1);
-	if (!bh) {
-		ext3_warning(sb, __func__,
-			     "can't read last block, resize aborted");
-		return -ENOSPC;
-	}
-	brelse(bh);
-
-	/* We will update the superblock, one block bitmap, and
-	 * one group descriptor via ext3_free_blocks().
-	 */
-	handle = ext3_journal_start_sb(sb, 3);
-	if (IS_ERR(handle)) {
-		err = PTR_ERR(handle);
-		ext3_warning(sb, __func__, "error %d on journal start",err);
-		goto exit_put;
-	}
-
-	mutex_lock(&EXT3_SB(sb)->s_resize_lock);
-	if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
-		ext3_warning(sb, __func__,
-			     "multiple resizers run on filesystem!");
-		mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-		ext3_journal_stop(handle);
-		err = -EBUSY;
-		goto exit_put;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle,
-						 EXT3_SB(sb)->s_sbh))) {
-		ext3_warning(sb, __func__,
-			     "error %d on journal write access", err);
-		mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-		ext3_journal_stop(handle);
-		goto exit_put;
-	}
-	es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-	if (err) {
-		ext3_warning(sb, __func__,
-			     "error %d on journal dirty metadata", err);
-		ext3_journal_stop(handle);
-		goto exit_put;
-	}
-	ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
-		   o_blocks_count, o_blocks_count + add);
-	ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
-	ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
-		   o_blocks_count, o_blocks_count + add);
-	if ((err = ext3_journal_stop(handle)))
-		goto exit_put;
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n",
-		       le32_to_cpu(es->s_blocks_count));
-	update_backups(sb, EXT3_SB(sb)->s_sbh->b_blocknr, (char *)es,
-		       sizeof(struct ext3_super_block));
-exit_put:
-	return err;
-} /* ext3_group_extend */
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
deleted file mode 100644
index 5ed0044..0000000
--- a/fs/ext3/super.c
+++ /dev/null
@@ -1,3165 +0,0 @@
-/*
- *  linux/fs/ext3/super.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/inode.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/parser.h>
-#include <linux/exportfs.h>
-#include <linux/statfs.h>
-#include <linux/random.h>
-#include <linux/mount.h>
-#include <linux/quotaops.h>
-#include <linux/seq_file.h>
-#include <linux/log2.h>
-#include <linux/cleancache.h>
-#include <linux/namei.h>
-
-#include <asm/uaccess.h>
-
-#define CREATE_TRACE_POINTS
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-#include "namei.h"
-
-#ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED
-  #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA
-#else
-  #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA
-#endif
-
-static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
-			     unsigned long journal_devnum);
-static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
-			       unsigned int);
-static int ext3_commit_super(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       int sync);
-static void ext3_mark_recovery_complete(struct super_block * sb,
-					struct ext3_super_block * es);
-static void ext3_clear_journal_err(struct super_block * sb,
-				   struct ext3_super_block * es);
-static int ext3_sync_fs(struct super_block *sb, int wait);
-static const char *ext3_decode_error(struct super_block * sb, int errno,
-				     char nbuf[16]);
-static int ext3_remount (struct super_block * sb, int * flags, char * data);
-static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf);
-static int ext3_unfreeze(struct super_block *sb);
-static int ext3_freeze(struct super_block *sb);
-
-/*
- * Wrappers for journal_start/end.
- */
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
-{
-	journal_t *journal;
-
-	if (sb->s_flags & MS_RDONLY)
-		return ERR_PTR(-EROFS);
-
-	/* Special case here: if the journal has aborted behind our
-	 * backs (eg. EIO in the commit thread), then we still need to
-	 * take the FS itself readonly cleanly. */
-	journal = EXT3_SB(sb)->s_journal;
-	if (is_journal_aborted(journal)) {
-		ext3_abort(sb, __func__,
-			   "Detected aborted journal");
-		return ERR_PTR(-EROFS);
-	}
-
-	return journal_start(journal, nblocks);
-}
-
-int __ext3_journal_stop(const char *where, handle_t *handle)
-{
-	struct super_block *sb;
-	int err;
-	int rc;
-
-	sb = handle->h_transaction->t_journal->j_private;
-	err = handle->h_err;
-	rc = journal_stop(handle);
-
-	if (!err)
-		err = rc;
-	if (err)
-		__ext3_std_error(sb, where, err);
-	return err;
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err)
-{
-	char nbuf[16];
-	const char *errstr = ext3_decode_error(NULL, err, nbuf);
-
-	if (bh)
-		BUFFER_TRACE(bh, "abort");
-
-	if (!handle->h_err)
-		handle->h_err = err;
-
-	if (is_handle_aborted(handle))
-		return;
-
-	printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n",
-		caller, errstr, err_fn);
-
-	journal_abort_handle(handle);
-}
-
-void ext3_msg(struct super_block *sb, const char *prefix,
-		const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
-
-	va_end(args);
-}
-
-/* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
- * On ext2, we can store the error state of the filesystem in the
- * superblock.  That is not possible on ext3, because we may have other
- * write ordering constraints on the superblock which prevent us from
- * writing it out straight away; and given that the journal is about to
- * be aborted, we can't rely on the current, or future, transactions to
- * write out the superblock safely.
- *
- * We'll just use the journal_abort() error code to record an error in
- * the journal instead.  On recovery, the journal will complain about
- * that error until we've noted it down and cleared it.
- */
-
-static void ext3_handle_error(struct super_block *sb)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-	es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-
-	if (sb->s_flags & MS_RDONLY)
-		return;
-
-	if (!test_opt (sb, ERRORS_CONT)) {
-		journal_t *journal = EXT3_SB(sb)->s_journal;
-
-		set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
-		if (journal)
-			journal_abort(journal, -EIO);
-	}
-	if (test_opt (sb, ERRORS_RO)) {
-		ext3_msg(sb, KERN_CRIT,
-			"error: remounting filesystem read-only");
-		/*
-		 * Make sure updated value of ->s_mount_state will be visible
-		 * before ->s_flags update.
-		 */
-		smp_wmb();
-		sb->s_flags |= MS_RDONLY;
-	}
-	ext3_commit_super(sb, es, 1);
-	if (test_opt(sb, ERRORS_PANIC))
-		panic("EXT3-fs (%s): panic forced after error\n",
-			sb->s_id);
-}
-
-void ext3_error(struct super_block *sb, const char *function,
-		const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-
-	ext3_handle_error(sb);
-}
-
-static const char *ext3_decode_error(struct super_block * sb, int errno,
-				     char nbuf[16])
-{
-	char *errstr = NULL;
-
-	switch (errno) {
-	case -EIO:
-		errstr = "IO failure";
-		break;
-	case -ENOMEM:
-		errstr = "Out of memory";
-		break;
-	case -EROFS:
-		if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT)
-			errstr = "Journal has aborted";
-		else
-			errstr = "Readonly filesystem";
-		break;
-	default:
-		/* If the caller passed in an extra buffer for unknown
-		 * errors, textualise them now.  Else we just return
-		 * NULL. */
-		if (nbuf) {
-			/* Check for truncated error codes... */
-			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
-				errstr = nbuf;
-		}
-		break;
-	}
-
-	return errstr;
-}
-
-/* __ext3_std_error decodes expected errors from journaling functions
- * automatically and invokes the appropriate error response.  */
-
-void __ext3_std_error (struct super_block * sb, const char * function,
-		       int errno)
-{
-	char nbuf[16];
-	const char *errstr;
-
-	/* Special case: if the error is EROFS, and we're not already
-	 * inside a transaction, then there's really no point in logging
-	 * an error. */
-	if (errno == -EROFS && journal_current_handle() == NULL &&
-	    (sb->s_flags & MS_RDONLY))
-		return;
-
-	errstr = ext3_decode_error(sb, errno, nbuf);
-	ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr);
-
-	ext3_handle_error(sb);
-}
-
-/*
- * ext3_abort is a much stronger failure handler than ext3_error.  The
- * abort function may be used to deal with unrecoverable failures such
- * as journal IO errors or ENOMEM at a critical moment in log management.
- *
- * We unconditionally force the filesystem into an ABORT|READONLY state,
- * unless the error response on the fs has been set to panic in which
- * case we take the easy way out and panic immediately.
- */
-
-void ext3_abort(struct super_block *sb, const char *function,
-		 const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-
-	if (test_opt(sb, ERRORS_PANIC))
-		panic("EXT3-fs: panic from previous error\n");
-
-	if (sb->s_flags & MS_RDONLY)
-		return;
-
-	ext3_msg(sb, KERN_CRIT,
-		"error: remounting filesystem read-only");
-	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-	set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
-	/*
-	 * Make sure updated value of ->s_mount_state will be visible
-	 * before ->s_flags update.
-	 */
-	smp_wmb();
-	sb->s_flags |= MS_RDONLY;
-
-	if (EXT3_SB(sb)->s_journal)
-		journal_abort(EXT3_SB(sb)->s_journal, -EIO);
-}
-
-void ext3_warning(struct super_block *sb, const char *function,
-		  const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-}
-
-void ext3_update_dynamic_rev(struct super_block *sb)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV)
-		return;
-
-	ext3_msg(sb, KERN_WARNING,
-		"warning: updating to rev %d because of "
-		"new feature flag, running e2fsck is recommended",
-		EXT3_DYNAMIC_REV);
-
-	es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO);
-	es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE);
-	es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV);
-	/* leave es->s_feature_*compat flags alone */
-	/* es->s_uuid will be set by e2fsck if empty */
-
-	/*
-	 * The rest of the superblock fields should be zero, and if not it
-	 * means they are likely already in use, so leave them alone.  We
-	 * can leave it up to e2fsck to clean up any inconsistencies there.
-	 */
-}
-
-/*
- * Open the external journal device
- */
-static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb)
-{
-	struct block_device *bdev;
-	char b[BDEVNAME_SIZE];
-
-	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
-	if (IS_ERR(bdev))
-		goto fail;
-	return bdev;
-
-fail:
-	ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld",
-		__bdevname(dev, b), PTR_ERR(bdev));
-
-	return NULL;
-}
-
-/*
- * Release the journal device
- */
-static void ext3_blkdev_put(struct block_device *bdev)
-{
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-}
-
-static void ext3_blkdev_remove(struct ext3_sb_info *sbi)
-{
-	struct block_device *bdev;
-	bdev = sbi->journal_bdev;
-	if (bdev) {
-		ext3_blkdev_put(bdev);
-		sbi->journal_bdev = NULL;
-	}
-}
-
-static inline struct inode *orphan_list_entry(struct list_head *l)
-{
-	return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode;
-}
-
-static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
-{
-	struct list_head *l;
-
-	ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d",
-	       le32_to_cpu(sbi->s_es->s_last_orphan));
-
-	ext3_msg(sb, KERN_ERR, "sb_info orphan list:");
-	list_for_each(l, &sbi->s_orphan) {
-		struct inode *inode = orphan_list_entry(l);
-		ext3_msg(sb, KERN_ERR, "  "
-		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
-		       inode->i_sb->s_id, inode->i_ino, inode,
-		       inode->i_mode, inode->i_nlink,
-		       NEXT_ORPHAN(inode));
-	}
-}
-
-static void ext3_put_super (struct super_block * sb)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	int i, err;
-
-	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-	ext3_xattr_put_super(sb);
-	err = journal_destroy(sbi->s_journal);
-	sbi->s_journal = NULL;
-	if (err < 0)
-		ext3_abort(sb, __func__, "Couldn't clean up the journal");
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		es->s_state = cpu_to_le16(sbi->s_mount_state);
-		BUFFER_TRACE(sbi->s_sbh, "marking dirty");
-		mark_buffer_dirty(sbi->s_sbh);
-		ext3_commit_super(sb, es, 1);
-	}
-
-	for (i = 0; i < sbi->s_gdb_count; i++)
-		brelse(sbi->s_group_desc[i]);
-	kfree(sbi->s_group_desc);
-	percpu_counter_destroy(&sbi->s_freeblocks_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	brelse(sbi->s_sbh);
-#ifdef CONFIG_QUOTA
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
-#endif
-
-	/* Debugging code just in case the in-memory inode orphan list
-	 * isn't empty.  The on-disk one can be non-empty if we've
-	 * detected an error and taken the fs readonly, but the
-	 * in-memory list had better be clean by this point. */
-	if (!list_empty(&sbi->s_orphan))
-		dump_orphan_list(sb, sbi);
-	J_ASSERT(list_empty(&sbi->s_orphan));
-
-	invalidate_bdev(sb->s_bdev);
-	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
-		/*
-		 * Invalidate the journal device's buffers.  We don't want them
-		 * floating about in memory - the physical journal device may
-		 * hotswapped, and it breaks the `ro-after' testing code.
-		 */
-		sync_blockdev(sbi->journal_bdev);
-		invalidate_bdev(sbi->journal_bdev);
-		ext3_blkdev_remove(sbi);
-	}
-	sb->s_fs_info = NULL;
-	kfree(sbi->s_blockgroup_lock);
-	mutex_destroy(&sbi->s_orphan_lock);
-	mutex_destroy(&sbi->s_resize_lock);
-	kfree(sbi);
-}
-
-static struct kmem_cache *ext3_inode_cachep;
-
-/*
- * Called inside transaction, so use GFP_NOFS
- */
-static struct inode *ext3_alloc_inode(struct super_block *sb)
-{
-	struct ext3_inode_info *ei;
-
-	ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
-	if (!ei)
-		return NULL;
-	ei->i_block_alloc_info = NULL;
-	ei->vfs_inode.i_version = 1;
-	atomic_set(&ei->i_datasync_tid, 0);
-	atomic_set(&ei->i_sync_tid, 0);
-#ifdef CONFIG_QUOTA
-	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
-#endif
-
-	return &ei->vfs_inode;
-}
-
-static int ext3_drop_inode(struct inode *inode)
-{
-	int drop = generic_drop_inode(inode);
-
-	trace_ext3_drop_inode(inode, drop);
-	return drop;
-}
-
-static void ext3_i_callback(struct rcu_head *head)
-{
-	struct inode *inode = container_of(head, struct inode, i_rcu);
-	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
-}
-
-static void ext3_destroy_inode(struct inode *inode)
-{
-	if (!list_empty(&(EXT3_I(inode)->i_orphan))) {
-		printk("EXT3 Inode %p: orphan list check failed!\n",
-			EXT3_I(inode));
-		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
-				EXT3_I(inode), sizeof(struct ext3_inode_info),
-				false);
-		dump_stack();
-	}
-	call_rcu(&inode->i_rcu, ext3_i_callback);
-}
-
-static void init_once(void *foo)
-{
-	struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
-
-	INIT_LIST_HEAD(&ei->i_orphan);
-#ifdef CONFIG_EXT3_FS_XATTR
-	init_rwsem(&ei->xattr_sem);
-#endif
-	mutex_init(&ei->truncate_mutex);
-	inode_init_once(&ei->vfs_inode);
-}
-
-static int __init init_inodecache(void)
-{
-	ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
-					     sizeof(struct ext3_inode_info),
-					     0, (SLAB_RECLAIM_ACCOUNT|
-						SLAB_MEM_SPREAD),
-					     init_once);
-	if (ext3_inode_cachep == NULL)
-		return -ENOMEM;
-	return 0;
-}
-
-static void destroy_inodecache(void)
-{
-	/*
-	 * Make sure all delayed rcu free inodes are flushed before we
-	 * destroy cache.
-	 */
-	rcu_barrier();
-	kmem_cache_destroy(ext3_inode_cachep);
-}
-
-static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb)
-{
-#if defined(CONFIG_QUOTA)
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sbi->s_jquota_fmt) {
-		char *fmtname = "";
-
-		switch (sbi->s_jquota_fmt) {
-		case QFMT_VFS_OLD:
-			fmtname = "vfsold";
-			break;
-		case QFMT_VFS_V0:
-			fmtname = "vfsv0";
-			break;
-		case QFMT_VFS_V1:
-			fmtname = "vfsv1";
-			break;
-		}
-		seq_printf(seq, ",jqfmt=%s", fmtname);
-	}
-
-	if (sbi->s_qf_names[USRQUOTA])
-		seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
-
-	if (sbi->s_qf_names[GRPQUOTA])
-		seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
-
-	if (test_opt(sb, USRQUOTA))
-		seq_puts(seq, ",usrquota");
-
-	if (test_opt(sb, GRPQUOTA))
-		seq_puts(seq, ",grpquota");
-#endif
-}
-
-static char *data_mode_string(unsigned long mode)
-{
-	switch (mode) {
-	case EXT3_MOUNT_JOURNAL_DATA:
-		return "journal";
-	case EXT3_MOUNT_ORDERED_DATA:
-		return "ordered";
-	case EXT3_MOUNT_WRITEBACK_DATA:
-		return "writeback";
-	}
-	return "unknown";
-}
-
-/*
- * Show an option if
- *  - it's set to a non-default value OR
- *  - if the per-sb default is different from the global default
- */
-static int ext3_show_options(struct seq_file *seq, struct dentry *root)
-{
-	struct super_block *sb = root->d_sb;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	unsigned long def_mount_opts;
-
-	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-
-	if (sbi->s_sb_block != 1)
-		seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
-	if (test_opt(sb, MINIX_DF))
-		seq_puts(seq, ",minixdf");
-	if (test_opt(sb, GRPID))
-		seq_puts(seq, ",grpid");
-	if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS))
-		seq_puts(seq, ",nogrpid");
-	if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT3_DEF_RESUID)) ||
-	    le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) {
-		seq_printf(seq, ",resuid=%u",
-				from_kuid_munged(&init_user_ns, sbi->s_resuid));
-	}
-	if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT3_DEF_RESGID)) ||
-	    le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) {
-		seq_printf(seq, ",resgid=%u",
-				from_kgid_munged(&init_user_ns, sbi->s_resgid));
-	}
-	if (test_opt(sb, ERRORS_RO)) {
-		int def_errors = le16_to_cpu(es->s_errors);
-
-		if (def_errors == EXT3_ERRORS_PANIC ||
-		    def_errors == EXT3_ERRORS_CONTINUE) {
-			seq_puts(seq, ",errors=remount-ro");
-		}
-	}
-	if (test_opt(sb, ERRORS_CONT))
-		seq_puts(seq, ",errors=continue");
-	if (test_opt(sb, ERRORS_PANIC))
-		seq_puts(seq, ",errors=panic");
-	if (test_opt(sb, NO_UID32))
-		seq_puts(seq, ",nouid32");
-	if (test_opt(sb, DEBUG))
-		seq_puts(seq, ",debug");
-#ifdef CONFIG_EXT3_FS_XATTR
-	if (test_opt(sb, XATTR_USER))
-		seq_puts(seq, ",user_xattr");
-	if (!test_opt(sb, XATTR_USER) &&
-	    (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
-		seq_puts(seq, ",nouser_xattr");
-	}
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	if (test_opt(sb, POSIX_ACL))
-		seq_puts(seq, ",acl");
-	if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL))
-		seq_puts(seq, ",noacl");
-#endif
-	if (!test_opt(sb, RESERVATION))
-		seq_puts(seq, ",noreservation");
-	if (sbi->s_commit_interval) {
-		seq_printf(seq, ",commit=%u",
-			   (unsigned) (sbi->s_commit_interval / HZ));
-	}
-
-	/*
-	 * Always display barrier state so it's clear what the status is.
-	 */
-	seq_puts(seq, ",barrier=");
-	seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
-	seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS)));
-	if (test_opt(sb, DATA_ERR_ABORT))
-		seq_puts(seq, ",data_err=abort");
-
-	if (test_opt(sb, NOLOAD))
-		seq_puts(seq, ",norecovery");
-
-	ext3_show_quota_options(seq, sb);
-
-	return 0;
-}
-
-
-static struct inode *ext3_nfs_get_inode(struct super_block *sb,
-		u64 ino, u32 generation)
-{
-	struct inode *inode;
-
-	if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO)
-		return ERR_PTR(-ESTALE);
-	if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count))
-		return ERR_PTR(-ESTALE);
-
-	/* iget isn't really right if the inode is currently unallocated!!
-	 *
-	 * ext3_read_inode will return a bad_inode if the inode had been
-	 * deleted, so we should be safe.
-	 *
-	 * Currently we don't know the generation for parent directory, so
-	 * a generation of 0 means "accept any"
-	 */
-	inode = ext3_iget(sb, ino);
-	if (IS_ERR(inode))
-		return ERR_CAST(inode);
-	if (generation && inode->i_generation != generation) {
-		iput(inode);
-		return ERR_PTR(-ESTALE);
-	}
-
-	return inode;
-}
-
-static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid,
-		int fh_len, int fh_type)
-{
-	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
-				    ext3_nfs_get_inode);
-}
-
-static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid,
-		int fh_len, int fh_type)
-{
-	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
-				    ext3_nfs_get_inode);
-}
-
-/*
- * Try to release metadata pages (indirect blocks, directories) which are
- * mapped via the block device.  Since these pages could have journal heads
- * which would prevent try_to_free_buffers() from freeing them, we must use
- * jbd layer's try_to_free_buffers() function to release them.
- */
-static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
-				 gfp_t wait)
-{
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-
-	WARN_ON(PageChecked(page));
-	if (!page_has_buffers(page))
-		return 0;
-	if (journal)
-		return journal_try_to_free_buffers(journal, page, 
-						   wait & ~__GFP_WAIT);
-	return try_to_free_buffers(page);
-}
-
-#ifdef CONFIG_QUOTA
-#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
-#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-
-static int ext3_write_dquot(struct dquot *dquot);
-static int ext3_acquire_dquot(struct dquot *dquot);
-static int ext3_release_dquot(struct dquot *dquot);
-static int ext3_mark_dquot_dirty(struct dquot *dquot);
-static int ext3_write_info(struct super_block *sb, int type);
-static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 struct path *path);
-static int ext3_quota_on_mount(struct super_block *sb, int type);
-static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
-			       size_t len, loff_t off);
-static ssize_t ext3_quota_write(struct super_block *sb, int type,
-				const char *data, size_t len, loff_t off);
-static struct dquot **ext3_get_dquots(struct inode *inode)
-{
-	return EXT3_I(inode)->i_dquot;
-}
-
-static const struct dquot_operations ext3_quota_operations = {
-	.write_dquot	= ext3_write_dquot,
-	.acquire_dquot	= ext3_acquire_dquot,
-	.release_dquot	= ext3_release_dquot,
-	.mark_dirty	= ext3_mark_dquot_dirty,
-	.write_info	= ext3_write_info,
-	.alloc_dquot	= dquot_alloc,
-	.destroy_dquot	= dquot_destroy,
-};
-
-static const struct quotactl_ops ext3_qctl_operations = {
-	.quota_on	= ext3_quota_on,
-	.quota_off	= dquot_quota_off,
-	.quota_sync	= dquot_quota_sync,
-	.get_state	= dquot_get_state,
-	.set_info	= dquot_set_dqinfo,
-	.get_dqblk	= dquot_get_dqblk,
-	.set_dqblk	= dquot_set_dqblk
-};
-#endif
-
-static const struct super_operations ext3_sops = {
-	.alloc_inode	= ext3_alloc_inode,
-	.destroy_inode	= ext3_destroy_inode,
-	.write_inode	= ext3_write_inode,
-	.dirty_inode	= ext3_dirty_inode,
-	.drop_inode	= ext3_drop_inode,
-	.evict_inode	= ext3_evict_inode,
-	.put_super	= ext3_put_super,
-	.sync_fs	= ext3_sync_fs,
-	.freeze_fs	= ext3_freeze,
-	.unfreeze_fs	= ext3_unfreeze,
-	.statfs		= ext3_statfs,
-	.remount_fs	= ext3_remount,
-	.show_options	= ext3_show_options,
-#ifdef CONFIG_QUOTA
-	.quota_read	= ext3_quota_read,
-	.quota_write	= ext3_quota_write,
-	.get_dquots	= ext3_get_dquots,
-#endif
-	.bdev_try_to_free_page = bdev_try_to_free_page,
-};
-
-static const struct export_operations ext3_export_ops = {
-	.fh_to_dentry = ext3_fh_to_dentry,
-	.fh_to_parent = ext3_fh_to_parent,
-	.get_parent = ext3_get_parent,
-};
-
-enum {
-	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
-	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
-	Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
-	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
-	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
-	Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
-	Opt_journal_path,
-	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
-	Opt_data_err_abort, Opt_data_err_ignore,
-	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
-	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
-	Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
-	Opt_resize, Opt_usrquota, Opt_grpquota
-};
-
-static const match_table_t tokens = {
-	{Opt_bsd_df, "bsddf"},
-	{Opt_minix_df, "minixdf"},
-	{Opt_grpid, "grpid"},
-	{Opt_grpid, "bsdgroups"},
-	{Opt_nogrpid, "nogrpid"},
-	{Opt_nogrpid, "sysvgroups"},
-	{Opt_resgid, "resgid=%u"},
-	{Opt_resuid, "resuid=%u"},
-	{Opt_sb, "sb=%u"},
-	{Opt_err_cont, "errors=continue"},
-	{Opt_err_panic, "errors=panic"},
-	{Opt_err_ro, "errors=remount-ro"},
-	{Opt_nouid32, "nouid32"},
-	{Opt_nocheck, "nocheck"},
-	{Opt_nocheck, "check=none"},
-	{Opt_debug, "debug"},
-	{Opt_oldalloc, "oldalloc"},
-	{Opt_orlov, "orlov"},
-	{Opt_user_xattr, "user_xattr"},
-	{Opt_nouser_xattr, "nouser_xattr"},
-	{Opt_acl, "acl"},
-	{Opt_noacl, "noacl"},
-	{Opt_reservation, "reservation"},
-	{Opt_noreservation, "noreservation"},
-	{Opt_noload, "noload"},
-	{Opt_noload, "norecovery"},
-	{Opt_nobh, "nobh"},
-	{Opt_bh, "bh"},
-	{Opt_commit, "commit=%u"},
-	{Opt_journal_update, "journal=update"},
-	{Opt_journal_inum, "journal=%u"},
-	{Opt_journal_dev, "journal_dev=%u"},
-	{Opt_journal_path, "journal_path=%s"},
-	{Opt_abort, "abort"},
-	{Opt_data_journal, "data=journal"},
-	{Opt_data_ordered, "data=ordered"},
-	{Opt_data_writeback, "data=writeback"},
-	{Opt_data_err_abort, "data_err=abort"},
-	{Opt_data_err_ignore, "data_err=ignore"},
-	{Opt_offusrjquota, "usrjquota="},
-	{Opt_usrjquota, "usrjquota=%s"},
-	{Opt_offgrpjquota, "grpjquota="},
-	{Opt_grpjquota, "grpjquota=%s"},
-	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
-	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
-	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
-	{Opt_grpquota, "grpquota"},
-	{Opt_noquota, "noquota"},
-	{Opt_quota, "quota"},
-	{Opt_usrquota, "usrquota"},
-	{Opt_barrier, "barrier=%u"},
-	{Opt_barrier, "barrier"},
-	{Opt_nobarrier, "nobarrier"},
-	{Opt_resize, "resize"},
-	{Opt_err, NULL},
-};
-
-static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
-{
-	ext3_fsblk_t	sb_block;
-	char		*options = (char *) *data;
-
-	if (!options || strncmp(options, "sb=", 3) != 0)
-		return 1;	/* Default location */
-	options += 3;
-	/*todo: use simple_strtoll with >32bit ext3 */
-	sb_block = simple_strtoul(options, &options, 0);
-	if (*options && *options != ',') {
-		ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s",
-		       (char *) *data);
-		return 1;
-	}
-	if (*options == ',')
-		options++;
-	*data = (void *) options;
-	return sb_block;
-}
-
-#ifdef CONFIG_QUOTA
-static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	char *qname;
-
-	if (sb_any_quota_loaded(sb) &&
-		!sbi->s_qf_names[qtype]) {
-		ext3_msg(sb, KERN_ERR,
-			"Cannot change journaled "
-			"quota options when quota turned on");
-		return 0;
-	}
-	qname = match_strdup(args);
-	if (!qname) {
-		ext3_msg(sb, KERN_ERR,
-			"Not enough memory for storing quotafile name");
-		return 0;
-	}
-	if (sbi->s_qf_names[qtype]) {
-		int same = !strcmp(sbi->s_qf_names[qtype], qname);
-
-		kfree(qname);
-		if (!same) {
-			ext3_msg(sb, KERN_ERR,
-				 "%s quota file already specified",
-				 QTYPE2NAME(qtype));
-		}
-		return same;
-	}
-	if (strchr(qname, '/')) {
-		ext3_msg(sb, KERN_ERR,
-			"quotafile must be on filesystem root");
-		kfree(qname);
-		return 0;
-	}
-	sbi->s_qf_names[qtype] = qname;
-	set_opt(sbi->s_mount_opt, QUOTA);
-	return 1;
-}
-
-static int clear_qf_name(struct super_block *sb, int qtype) {
-
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sb_any_quota_loaded(sb) &&
-		sbi->s_qf_names[qtype]) {
-		ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options"
-			" when quota turned on");
-		return 0;
-	}
-	if (sbi->s_qf_names[qtype]) {
-		kfree(sbi->s_qf_names[qtype]);
-		sbi->s_qf_names[qtype] = NULL;
-	}
-	return 1;
-}
-#endif
-
-static int parse_options (char *options, struct super_block *sb,
-			  unsigned int *inum, unsigned long *journal_devnum,
-			  ext3_fsblk_t *n_blocks_count, int is_remount)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	char * p;
-	substring_t args[MAX_OPT_ARGS];
-	int data_opt = 0;
-	int option;
-	kuid_t uid;
-	kgid_t gid;
-	char *journal_path;
-	struct inode *journal_inode;
-	struct path path;
-	int error;
-
-#ifdef CONFIG_QUOTA
-	int qfmt;
-#endif
-
-	if (!options)
-		return 1;
-
-	while ((p = strsep (&options, ",")) != NULL) {
-		int token;
-		if (!*p)
-			continue;
-		/*
-		 * Initialize args struct so we know whether arg was
-		 * found; some options take optional arguments.
-		 */
-		args[0].to = args[0].from = NULL;
-		token = match_token(p, tokens, args);
-		switch (token) {
-		case Opt_bsd_df:
-			clear_opt (sbi->s_mount_opt, MINIX_DF);
-			break;
-		case Opt_minix_df:
-			set_opt (sbi->s_mount_opt, MINIX_DF);
-			break;
-		case Opt_grpid:
-			set_opt (sbi->s_mount_opt, GRPID);
-			break;
-		case Opt_nogrpid:
-			clear_opt (sbi->s_mount_opt, GRPID);
-			break;
-		case Opt_resuid:
-			if (match_int(&args[0], &option))
-				return 0;
-			uid = make_kuid(current_user_ns(), option);
-			if (!uid_valid(uid)) {
-				ext3_msg(sb, KERN_ERR, "Invalid uid value %d", option);
-				return 0;
-
-			}
-			sbi->s_resuid = uid;
-			break;
-		case Opt_resgid:
-			if (match_int(&args[0], &option))
-				return 0;
-			gid = make_kgid(current_user_ns(), option);
-			if (!gid_valid(gid)) {
-				ext3_msg(sb, KERN_ERR, "Invalid gid value %d", option);
-				return 0;
-			}
-			sbi->s_resgid = gid;
-			break;
-		case Opt_sb:
-			/* handled by get_sb_block() instead of here */
-			/* *sb_block = match_int(&args[0]); */
-			break;
-		case Opt_err_panic:
-			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt (sbi->s_mount_opt, ERRORS_RO);
-			set_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			break;
-		case Opt_err_ro:
-			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt (sbi->s_mount_opt, ERRORS_RO);
-			break;
-		case Opt_err_cont:
-			clear_opt (sbi->s_mount_opt, ERRORS_RO);
-			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt (sbi->s_mount_opt, ERRORS_CONT);
-			break;
-		case Opt_nouid32:
-			set_opt (sbi->s_mount_opt, NO_UID32);
-			break;
-		case Opt_nocheck:
-			clear_opt (sbi->s_mount_opt, CHECK);
-			break;
-		case Opt_debug:
-			set_opt (sbi->s_mount_opt, DEBUG);
-			break;
-		case Opt_oldalloc:
-			ext3_msg(sb, KERN_WARNING,
-				"Ignoring deprecated oldalloc option");
-			break;
-		case Opt_orlov:
-			ext3_msg(sb, KERN_WARNING,
-				"Ignoring deprecated orlov option");
-			break;
-#ifdef CONFIG_EXT3_FS_XATTR
-		case Opt_user_xattr:
-			set_opt (sbi->s_mount_opt, XATTR_USER);
-			break;
-		case Opt_nouser_xattr:
-			clear_opt (sbi->s_mount_opt, XATTR_USER);
-			break;
-#else
-		case Opt_user_xattr:
-		case Opt_nouser_xattr:
-			ext3_msg(sb, KERN_INFO,
-				"(no)user_xattr options not supported");
-			break;
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-		case Opt_acl:
-			set_opt(sbi->s_mount_opt, POSIX_ACL);
-			break;
-		case Opt_noacl:
-			clear_opt(sbi->s_mount_opt, POSIX_ACL);
-			break;
-#else
-		case Opt_acl:
-		case Opt_noacl:
-			ext3_msg(sb, KERN_INFO,
-				"(no)acl options not supported");
-			break;
-#endif
-		case Opt_reservation:
-			set_opt(sbi->s_mount_opt, RESERVATION);
-			break;
-		case Opt_noreservation:
-			clear_opt(sbi->s_mount_opt, RESERVATION);
-			break;
-		case Opt_journal_update:
-			/* @@@ FIXME */
-			/* Eventually we will want to be able to create
-			   a journal file here.  For now, only allow the
-			   user to specify an existing inode to be the
-			   journal file. */
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-					"journal on remount");
-				return 0;
-			}
-			set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
-			break;
-		case Opt_journal_inum:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option))
-				return 0;
-			*inum = option;
-			break;
-		case Opt_journal_dev:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option))
-				return 0;
-			*journal_devnum = option;
-			break;
-		case Opt_journal_path:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-
-			journal_path = match_strdup(&args[0]);
-			if (!journal_path) {
-				ext3_msg(sb, KERN_ERR, "error: could not dup "
-					"journal device string");
-				return 0;
-			}
-
-			error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
-			if (error) {
-				ext3_msg(sb, KERN_ERR, "error: could not find "
-					"journal device path: error %d", error);
-				kfree(journal_path);
-				return 0;
-			}
-
-			journal_inode = d_inode(path.dentry);
-			if (!S_ISBLK(journal_inode->i_mode)) {
-				ext3_msg(sb, KERN_ERR, "error: journal path %s "
-					"is not a block device", journal_path);
-				path_put(&path);
-				kfree(journal_path);
-				return 0;
-			}
-
-			*journal_devnum = new_encode_dev(journal_inode->i_rdev);
-			path_put(&path);
-			kfree(journal_path);
-			break;
-		case Opt_noload:
-			set_opt (sbi->s_mount_opt, NOLOAD);
-			break;
-		case Opt_commit:
-			if (match_int(&args[0], &option))
-				return 0;
-			if (option < 0)
-				return 0;
-			if (option == 0)
-				option = JBD_DEFAULT_MAX_COMMIT_AGE;
-			sbi->s_commit_interval = HZ * option;
-			break;
-		case Opt_data_journal:
-			data_opt = EXT3_MOUNT_JOURNAL_DATA;
-			goto datacheck;
-		case Opt_data_ordered:
-			data_opt = EXT3_MOUNT_ORDERED_DATA;
-			goto datacheck;
-		case Opt_data_writeback:
-			data_opt = EXT3_MOUNT_WRITEBACK_DATA;
-		datacheck:
-			if (is_remount) {
-				if (test_opt(sb, DATA_FLAGS) == data_opt)
-					break;
-				ext3_msg(sb, KERN_ERR,
-					"error: cannot change "
-					"data mode on remount. The filesystem "
-					"is mounted in data=%s mode and you "
-					"try to remount it in data=%s mode.",
-					data_mode_string(test_opt(sb,
-							DATA_FLAGS)),
-					data_mode_string(data_opt));
-				return 0;
-			} else {
-				clear_opt(sbi->s_mount_opt, DATA_FLAGS);
-				sbi->s_mount_opt |= data_opt;
-			}
-			break;
-		case Opt_data_err_abort:
-			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
-			break;
-		case Opt_data_err_ignore:
-			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
-			break;
-#ifdef CONFIG_QUOTA
-		case Opt_usrjquota:
-			if (!set_qf_name(sb, USRQUOTA, &args[0]))
-				return 0;
-			break;
-		case Opt_grpjquota:
-			if (!set_qf_name(sb, GRPQUOTA, &args[0]))
-				return 0;
-			break;
-		case Opt_offusrjquota:
-			if (!clear_qf_name(sb, USRQUOTA))
-				return 0;
-			break;
-		case Opt_offgrpjquota:
-			if (!clear_qf_name(sb, GRPQUOTA))
-				return 0;
-			break;
-		case Opt_jqfmt_vfsold:
-			qfmt = QFMT_VFS_OLD;
-			goto set_qf_format;
-		case Opt_jqfmt_vfsv0:
-			qfmt = QFMT_VFS_V0;
-			goto set_qf_format;
-		case Opt_jqfmt_vfsv1:
-			qfmt = QFMT_VFS_V1;
-set_qf_format:
-			if (sb_any_quota_loaded(sb) &&
-			    sbi->s_jquota_fmt != qfmt) {
-				ext3_msg(sb, KERN_ERR, "error: cannot change "
-					"journaled quota options when "
-					"quota turned on.");
-				return 0;
-			}
-			sbi->s_jquota_fmt = qfmt;
-			break;
-		case Opt_quota:
-		case Opt_usrquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, USRQUOTA);
-			break;
-		case Opt_grpquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, GRPQUOTA);
-			break;
-		case Opt_noquota:
-			if (sb_any_quota_loaded(sb)) {
-				ext3_msg(sb, KERN_ERR, "error: cannot change "
-					"quota options when quota turned on.");
-				return 0;
-			}
-			clear_opt(sbi->s_mount_opt, QUOTA);
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
-			break;
-#else
-		case Opt_quota:
-		case Opt_usrquota:
-		case Opt_grpquota:
-			ext3_msg(sb, KERN_ERR,
-				"error: quota options not supported.");
-			break;
-		case Opt_usrjquota:
-		case Opt_grpjquota:
-		case Opt_offusrjquota:
-		case Opt_offgrpjquota:
-		case Opt_jqfmt_vfsold:
-		case Opt_jqfmt_vfsv0:
-		case Opt_jqfmt_vfsv1:
-			ext3_msg(sb, KERN_ERR,
-				"error: journaled quota options not "
-				"supported.");
-			break;
-		case Opt_noquota:
-			break;
-#endif
-		case Opt_abort:
-			set_opt(sbi->s_mount_opt, ABORT);
-			break;
-		case Opt_nobarrier:
-			clear_opt(sbi->s_mount_opt, BARRIER);
-			break;
-		case Opt_barrier:
-			if (args[0].from) {
-				if (match_int(&args[0], &option))
-					return 0;
-			} else
-				option = 1;	/* No argument, default to 1 */
-			if (option)
-				set_opt(sbi->s_mount_opt, BARRIER);
-			else
-				clear_opt(sbi->s_mount_opt, BARRIER);
-			break;
-		case Opt_ignore:
-			break;
-		case Opt_resize:
-			if (!is_remount) {
-				ext3_msg(sb, KERN_ERR,
-					"error: resize option only available "
-					"for remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option) != 0)
-				return 0;
-			*n_blocks_count = option;
-			break;
-		case Opt_nobh:
-			ext3_msg(sb, KERN_WARNING,
-				"warning: ignoring deprecated nobh option");
-			break;
-		case Opt_bh:
-			ext3_msg(sb, KERN_WARNING,
-				"warning: ignoring deprecated bh option");
-			break;
-		default:
-			ext3_msg(sb, KERN_ERR,
-				"error: unrecognized mount option \"%s\" "
-				"or missing value", p);
-			return 0;
-		}
-	}
-#ifdef CONFIG_QUOTA
-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
-		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
-
-		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
-			ext3_msg(sb, KERN_ERR, "error: old and new quota "
-					"format mixing.");
-			return 0;
-		}
-
-		if (!sbi->s_jquota_fmt) {
-			ext3_msg(sb, KERN_ERR, "error: journaled quota format "
-					"not specified.");
-			return 0;
-		}
-	}
-#endif
-	return 1;
-}
-
-static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
-			    int read_only)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int res = 0;
-
-	if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) {
-		ext3_msg(sb, KERN_ERR,
-			"error: revision level too high, "
-			"forcing read-only mode");
-		res = MS_RDONLY;
-	}
-	if (read_only)
-		return res;
-	if (!(sbi->s_mount_state & EXT3_VALID_FS))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: mounting unchecked fs, "
-			"running e2fsck is recommended");
-	else if ((sbi->s_mount_state & EXT3_ERROR_FS))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: mounting fs with errors, "
-			"running e2fsck is recommended");
-	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
-		 le16_to_cpu(es->s_mnt_count) >=
-			le16_to_cpu(es->s_max_mnt_count))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: maximal mount count reached, "
-			"running e2fsck is recommended");
-	else if (le32_to_cpu(es->s_checkinterval) &&
-		(le32_to_cpu(es->s_lastcheck) +
-			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: checktime reached, "
-			"running e2fsck is recommended");
-#if 0
-		/* @@@ We _will_ want to clear the valid bit if we find
-                   inconsistencies, to force a fsck at reboot.  But for
-                   a plain journaled filesystem we can keep it set as
-                   valid forever! :) */
-	es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
-#endif
-	if (!le16_to_cpu(es->s_max_mnt_count))
-		es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
-	le16_add_cpu(&es->s_mnt_count, 1);
-	es->s_mtime = cpu_to_le32(get_seconds());
-	ext3_update_dynamic_rev(sb);
-	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-
-	ext3_commit_super(sb, es, 1);
-	if (test_opt(sb, DEBUG))
-		ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, "
-				"bpg=%lu, ipg=%lu, mo=%04lx]",
-			sb->s_blocksize,
-			sbi->s_groups_count,
-			EXT3_BLOCKS_PER_GROUP(sb),
-			EXT3_INODES_PER_GROUP(sb),
-			sbi->s_mount_opt);
-
-	if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
-		char b[BDEVNAME_SIZE];
-		ext3_msg(sb, KERN_INFO, "using external journal on %s",
-			bdevname(EXT3_SB(sb)->s_journal->j_dev, b));
-	} else {
-		ext3_msg(sb, KERN_INFO, "using internal journal");
-	}
-	cleancache_init_fs(sb);
-	return res;
-}
-
-/* Called at mount-time, super-block is locked */
-static int ext3_check_descriptors(struct super_block *sb)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int i;
-
-	ext3_debug ("Checking group descriptors");
-
-	for (i = 0; i < sbi->s_groups_count; i++) {
-		struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
-		ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i);
-		ext3_fsblk_t last_block;
-
-		if (i == sbi->s_groups_count - 1)
-			last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
-		else
-			last_block = first_block +
-				(EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-		if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
-		    le32_to_cpu(gdp->bg_block_bitmap) > last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Block bitmap for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_block_bitmap));
-			return 0;
-		}
-		if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
-		    le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Inode bitmap for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_inode_bitmap));
-			return 0;
-		}
-		if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
-		    le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
-		    last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Inode table for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_inode_table));
-			return 0;
-		}
-	}
-
-	sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
-	sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb));
-	return 1;
-}
-
-
-/* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at
- * the superblock) which were deleted from all directories, but held open by
- * a process at the time of a crash.  We walk the list and try to delete these
- * inodes at recovery time (only with a read-write filesystem).
- *
- * In order to keep the orphan inode chain consistent during traversal (in
- * case of crash during recovery), we link each inode into the superblock
- * orphan list_head and handle it the same way as an inode deletion during
- * normal operation (which journals the operations for us).
- *
- * We only do an iget() and an iput() on each inode, which is very safe if we
- * accidentally point at an in-use or already deleted inode.  The worst that
- * can happen in this case is that we get a "bit already cleared" message from
- * ext3_free_inode().  The only reason we would point at a wrong inode is if
- * e2fsck was run on this filesystem, and it must have already done the orphan
- * inode cleanup for us, so we can safely abort without any further action.
- */
-static void ext3_orphan_cleanup (struct super_block * sb,
-				 struct ext3_super_block * es)
-{
-	unsigned int s_flags = sb->s_flags;
-	int nr_orphans = 0, nr_truncates = 0;
-#ifdef CONFIG_QUOTA
-	int i;
-#endif
-	if (!es->s_last_orphan) {
-		jbd_debug(4, "no orphan inodes to clean up\n");
-		return;
-	}
-
-	if (bdev_read_only(sb->s_bdev)) {
-		ext3_msg(sb, KERN_ERR, "error: write access "
-			"unavailable, skipping orphan cleanup.");
-		return;
-	}
-
-	/* Check if feature set allows readwrite operations */
-	if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
-		ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
-			 "unknown ROCOMPAT features");
-		return;
-	}
-
-	if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
-		/* don't clear list on RO mount w/ errors */
-		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
-			jbd_debug(1, "Errors on filesystem, "
-				  "clearing orphan list.\n");
-			es->s_last_orphan = 0;
-		}
-		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
-		return;
-	}
-
-	if (s_flags & MS_RDONLY) {
-		ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
-		sb->s_flags &= ~MS_RDONLY;
-	}
-#ifdef CONFIG_QUOTA
-	/* Needed for iput() to work correctly and not trash data */
-	sb->s_flags |= MS_ACTIVE;
-	/* Turn on quotas so that they are updated correctly */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		if (EXT3_SB(sb)->s_qf_names[i]) {
-			int ret = ext3_quota_on_mount(sb, i);
-			if (ret < 0)
-				ext3_msg(sb, KERN_ERR,
-					"error: cannot turn on journaled "
-					"quota: %d", ret);
-		}
-	}
-#endif
-
-	while (es->s_last_orphan) {
-		struct inode *inode;
-
-		inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
-		if (IS_ERR(inode)) {
-			es->s_last_orphan = 0;
-			break;
-		}
-
-		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-		dquot_initialize(inode);
-		if (inode->i_nlink) {
-			printk(KERN_DEBUG
-				"%s: truncating inode %lu to %Ld bytes\n",
-				__func__, inode->i_ino, inode->i_size);
-			jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
-				  inode->i_ino, inode->i_size);
-			ext3_truncate(inode);
-			nr_truncates++;
-		} else {
-			printk(KERN_DEBUG
-				"%s: deleting unreferenced inode %lu\n",
-				__func__, inode->i_ino);
-			jbd_debug(2, "deleting unreferenced inode %lu\n",
-				  inode->i_ino);
-			nr_orphans++;
-		}
-		iput(inode);  /* The delete magic happens here! */
-	}
-
-#define PLURAL(x) (x), ((x)==1) ? "" : "s"
-
-	if (nr_orphans)
-		ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
-		       PLURAL(nr_orphans));
-	if (nr_truncates)
-		ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
-		       PLURAL(nr_truncates));
-#ifdef CONFIG_QUOTA
-	/* Turn quotas off */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		if (sb_dqopt(sb)->files[i])
-			dquot_quota_off(sb, i);
-	}
-#endif
-	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
-}
-
-/*
- * Maximal file size.  There is a direct, and {,double-,triple-}indirect
- * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
- * We need to be 1 filesystem block less than the 2^32 sector limit.
- */
-static loff_t ext3_max_size(int bits)
-{
-	loff_t res = EXT3_NDIR_BLOCKS;
-	int meta_blocks;
-	loff_t upper_limit;
-
-	/* This is calculated to be the largest file size for a
-	 * dense, file such that the total number of
-	 * sectors in the file, including data and all indirect blocks,
-	 * does not exceed 2^32 -1
-	 * __u32 i_blocks representing the total number of
-	 * 512 bytes blocks of the file
-	 */
-	upper_limit = (1LL << 32) - 1;
-
-	/* total blocks in file system block size */
-	upper_limit >>= (bits - 9);
-
-
-	/* indirect blocks */
-	meta_blocks = 1;
-	/* double indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2));
-	/* tripple indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
-
-	upper_limit -= meta_blocks;
-	upper_limit <<= bits;
-
-	res += 1LL << (bits-2);
-	res += 1LL << (2*(bits-2));
-	res += 1LL << (3*(bits-2));
-	res <<= bits;
-	if (res > upper_limit)
-		res = upper_limit;
-
-	if (res > MAX_LFS_FILESIZE)
-		res = MAX_LFS_FILESIZE;
-
-	return res;
-}
-
-static ext3_fsblk_t descriptor_loc(struct super_block *sb,
-				    ext3_fsblk_t logic_sb_block,
-				    int nr)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	unsigned long bg, first_meta_bg;
-	int has_super = 0;
-
-	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) ||
-	    nr < first_meta_bg)
-		return (logic_sb_block + nr + 1);
-	bg = sbi->s_desc_per_block * nr;
-	if (ext3_bg_has_super(sb, bg))
-		has_super = 1;
-	return (has_super + ext3_group_first_block_no(sb, bg));
-}
-
-
-static int ext3_fill_super (struct super_block *sb, void *data, int silent)
-{
-	struct buffer_head * bh;
-	struct ext3_super_block *es = NULL;
-	struct ext3_sb_info *sbi;
-	ext3_fsblk_t block;
-	ext3_fsblk_t sb_block = get_sb_block(&data, sb);
-	ext3_fsblk_t logic_sb_block;
-	unsigned long offset = 0;
-	unsigned int journal_inum = 0;
-	unsigned long journal_devnum = 0;
-	unsigned long def_mount_opts;
-	struct inode *root;
-	int blocksize;
-	int hblock;
-	int db_count;
-	int i;
-	int needs_recovery;
-	int ret = -EINVAL;
-	__le32 features;
-	int err;
-
-	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
-	if (!sbi)
-		return -ENOMEM;
-
-	sbi->s_blockgroup_lock =
-		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
-	if (!sbi->s_blockgroup_lock) {
-		kfree(sbi);
-		return -ENOMEM;
-	}
-	sb->s_fs_info = sbi;
-	sbi->s_sb_block = sb_block;
-
-	blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
-	if (!blocksize) {
-		ext3_msg(sb, KERN_ERR, "error: unable to set blocksize");
-		goto out_fail;
-	}
-
-	/*
-	 * The ext3 superblock will not be buffer aligned for other than 1kB
-	 * block sizes.  We need to calculate the offset from buffer start.
-	 */
-	if (blocksize != EXT3_MIN_BLOCK_SIZE) {
-		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
-		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-	} else {
-		logic_sb_block = sb_block;
-	}
-
-	if (!(bh = sb_bread(sb, logic_sb_block))) {
-		ext3_msg(sb, KERN_ERR, "error: unable to read superblock");
-		goto out_fail;
-	}
-	/*
-	 * Note: s_es must be initialized as soon as possible because
-	 *       some ext3 macro-instructions depend on its value
-	 */
-	es = (struct ext3_super_block *) (bh->b_data + offset);
-	sbi->s_es = es;
-	sb->s_magic = le16_to_cpu(es->s_magic);
-	if (sb->s_magic != EXT3_SUPER_MAGIC)
-		goto cantfind_ext3;
-
-	/* Set defaults before we parse the mount options */
-	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-	if (def_mount_opts & EXT3_DEFM_DEBUG)
-		set_opt(sbi->s_mount_opt, DEBUG);
-	if (def_mount_opts & EXT3_DEFM_BSDGROUPS)
-		set_opt(sbi->s_mount_opt, GRPID);
-	if (def_mount_opts & EXT3_DEFM_UID16)
-		set_opt(sbi->s_mount_opt, NO_UID32);
-#ifdef CONFIG_EXT3_FS_XATTR
-	if (def_mount_opts & EXT3_DEFM_XATTR_USER)
-		set_opt(sbi->s_mount_opt, XATTR_USER);
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	if (def_mount_opts & EXT3_DEFM_ACL)
-		set_opt(sbi->s_mount_opt, POSIX_ACL);
-#endif
-	if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA)
-		set_opt(sbi->s_mount_opt, JOURNAL_DATA);
-	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED)
-		set_opt(sbi->s_mount_opt, ORDERED_DATA);
-	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK)
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
-
-	if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
-		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
-	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE)
-		set_opt(sbi->s_mount_opt, ERRORS_CONT);
-	else
-		set_opt(sbi->s_mount_opt, ERRORS_RO);
-
-	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
-	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
-
-	/* enable barriers by default */
-	set_opt(sbi->s_mount_opt, BARRIER);
-	set_opt(sbi->s_mount_opt, RESERVATION);
-
-	if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
-			    NULL, 0))
-		goto failed_mount;
-
-	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
-
-	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
-	    (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
-	     EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
-	     EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: feature flags set on rev 0 fs, "
-			"running e2fsck is recommended");
-	/*
-	 * Check feature flags regardless of the revision level, since we
-	 * previously didn't change the revision level when setting the flags,
-	 * so there is a chance incompat flags are set on a rev 0 filesystem.
-	 */
-	features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP);
-	if (features) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount because of unsupported "
-			"optional features (%x)", le32_to_cpu(features));
-		goto failed_mount;
-	}
-	features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP);
-	if (!(sb->s_flags & MS_RDONLY) && features) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount RDWR because of unsupported "
-			"optional features (%x)", le32_to_cpu(features));
-		goto failed_mount;
-	}
-	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
-
-	if (blocksize < EXT3_MIN_BLOCK_SIZE ||
-	    blocksize > EXT3_MAX_BLOCK_SIZE) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount because of unsupported "
-			"filesystem blocksize %d", blocksize);
-		goto failed_mount;
-	}
-
-	hblock = bdev_logical_block_size(sb->s_bdev);
-	if (sb->s_blocksize != blocksize) {
-		/*
-		 * Make sure the blocksize for the filesystem is larger
-		 * than the hardware sectorsize for the machine.
-		 */
-		if (blocksize < hblock) {
-			ext3_msg(sb, KERN_ERR,
-				"error: fsblocksize %d too small for "
-				"hardware sectorsize %d", blocksize, hblock);
-			goto failed_mount;
-		}
-
-		brelse (bh);
-		if (!sb_set_blocksize(sb, blocksize)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: bad blocksize %d", blocksize);
-			goto out_fail;
-		}
-		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
-		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-		bh = sb_bread(sb, logic_sb_block);
-		if (!bh) {
-			ext3_msg(sb, KERN_ERR,
-			       "error: can't read superblock on 2nd try");
-			goto failed_mount;
-		}
-		es = (struct ext3_super_block *)(bh->b_data + offset);
-		sbi->s_es = es;
-		if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: magic mismatch");
-			goto failed_mount;
-		}
-	}
-
-	sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits);
-
-	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) {
-		sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE;
-		sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO;
-	} else {
-		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
-		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
-		if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) ||
-		    (!is_power_of_2(sbi->s_inode_size)) ||
-		    (sbi->s_inode_size > blocksize)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: unsupported inode size: %d",
-				sbi->s_inode_size);
-			goto failed_mount;
-		}
-	}
-	sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
-				   le32_to_cpu(es->s_log_frag_size);
-	if (blocksize != sbi->s_frag_size) {
-		ext3_msg(sb, KERN_ERR,
-		       "error: fragsize %lu != blocksize %u (unsupported)",
-		       sbi->s_frag_size, blocksize);
-		goto failed_mount;
-	}
-	sbi->s_frags_per_block = 1;
-	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
-	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
-	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
-	if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0)
-		goto cantfind_ext3;
-	sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb);
-	if (sbi->s_inodes_per_block == 0)
-		goto cantfind_ext3;
-	sbi->s_itb_per_group = sbi->s_inodes_per_group /
-					sbi->s_inodes_per_block;
-	sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc);
-	sbi->s_sbh = bh;
-	sbi->s_mount_state = le16_to_cpu(es->s_state);
-	sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb));
-	sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb));
-	for (i = 0; i < 4; i++)
-		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
-	sbi->s_def_hash_version = es->s_def_hash_version;
-	i = le32_to_cpu(es->s_flags);
-	if (i & EXT2_FLAGS_UNSIGNED_HASH)
-		sbi->s_hash_unsigned = 3;
-	else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
-#ifdef __CHAR_UNSIGNED__
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
-		sbi->s_hash_unsigned = 3;
-#else
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
-#endif
-	}
-
-	if (sbi->s_blocks_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"#blocks per group too big: %lu",
-			sbi->s_blocks_per_group);
-		goto failed_mount;
-	}
-	if (sbi->s_frags_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"error: #fragments per group too big: %lu",
-			sbi->s_frags_per_group);
-		goto failed_mount;
-	}
-	if (sbi->s_inodes_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"error: #inodes per group too big: %lu",
-			sbi->s_inodes_per_group);
-		goto failed_mount;
-	}
-
-	err = generic_check_addressable(sb->s_blocksize_bits,
-					le32_to_cpu(es->s_blocks_count));
-	if (err) {
-		ext3_msg(sb, KERN_ERR,
-			"error: filesystem is too large to mount safely");
-		if (sizeof(sector_t) < 8)
-			ext3_msg(sb, KERN_ERR,
-				"error: CONFIG_LBDAF not enabled");
-		ret = err;
-		goto failed_mount;
-	}
-
-	if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
-		goto cantfind_ext3;
-	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
-			       le32_to_cpu(es->s_first_data_block) - 1)
-				       / EXT3_BLOCKS_PER_GROUP(sb)) + 1;
-	db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb));
-	sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
-				    GFP_KERNEL);
-	if (sbi->s_group_desc == NULL) {
-		ext3_msg(sb, KERN_ERR,
-			"error: not enough memory");
-		ret = -ENOMEM;
-		goto failed_mount;
-	}
-
-	bgl_lock_init(sbi->s_blockgroup_lock);
-
-	for (i = 0; i < db_count; i++) {
-		block = descriptor_loc(sb, logic_sb_block, i);
-		sbi->s_group_desc[i] = sb_bread(sb, block);
-		if (!sbi->s_group_desc[i]) {
-			ext3_msg(sb, KERN_ERR,
-				"error: can't read group descriptor %d", i);
-			db_count = i;
-			goto failed_mount2;
-		}
-	}
-	if (!ext3_check_descriptors (sb)) {
-		ext3_msg(sb, KERN_ERR,
-			"error: group descriptors corrupted");
-		goto failed_mount2;
-	}
-	sbi->s_gdb_count = db_count;
-	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
-	spin_lock_init(&sbi->s_next_gen_lock);
-
-	/* per fileystem reservation list head & lock */
-	spin_lock_init(&sbi->s_rsv_window_lock);
-	sbi->s_rsv_window_root = RB_ROOT;
-	/* Add a single, static dummy reservation to the start of the
-	 * reservation window list --- it gives us a placeholder for
-	 * append-at-start-of-list which makes the allocation logic
-	 * _much_ simpler. */
-	sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
-	sbi->s_rsv_window_head.rsv_goal_size = 0;
-	ext3_rsv_window_add(sb, &sbi->s_rsv_window_head);
-
-	/*
-	 * set up enough so that it can read an inode
-	 */
-	sb->s_op = &ext3_sops;
-	sb->s_export_op = &ext3_export_ops;
-	sb->s_xattr = ext3_xattr_handlers;
-#ifdef CONFIG_QUOTA
-	sb->s_qcop = &ext3_qctl_operations;
-	sb->dq_op = &ext3_quota_operations;
-	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
-#endif
-	memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
-	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
-	mutex_init(&sbi->s_orphan_lock);
-	mutex_init(&sbi->s_resize_lock);
-
-	sb->s_root = NULL;
-
-	needs_recovery = (es->s_last_orphan != 0 ||
-			  EXT3_HAS_INCOMPAT_FEATURE(sb,
-				    EXT3_FEATURE_INCOMPAT_RECOVER));
-
-	/*
-	 * The first inode we look at is the journal inode.  Don't try
-	 * root first: it may be modified in the journal!
-	 */
-	if (!test_opt(sb, NOLOAD) &&
-	    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
-		if (ext3_load_journal(sb, es, journal_devnum))
-			goto failed_mount2;
-	} else if (journal_inum) {
-		if (ext3_create_journal(sb, es, journal_inum))
-			goto failed_mount2;
-	} else {
-		if (!silent)
-			ext3_msg(sb, KERN_ERR,
-				"error: no journal found. "
-				"mounting ext3 over ext2?");
-		goto failed_mount2;
-	}
-	err = percpu_counter_init(&sbi->s_freeblocks_counter,
-			ext3_count_free_blocks(sb), GFP_KERNEL);
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_freeinodes_counter,
-				ext3_count_free_inodes(sb), GFP_KERNEL);
-	}
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_dirs_counter,
-				ext3_count_dirs(sb), GFP_KERNEL);
-	}
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error: insufficient memory");
-		ret = err;
-		goto failed_mount3;
-	}
-
-	/* We have now updated the journal if required, so we can
-	 * validate the data journaling mode. */
-	switch (test_opt(sb, DATA_FLAGS)) {
-	case 0:
-		/* No mode set, assume a default based on the journal
-                   capabilities: ORDERED_DATA if the journal can
-                   cope, else JOURNAL_DATA */
-		if (journal_check_available_features
-		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE))
-			set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE);
-		else
-			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
-		break;
-
-	case EXT3_MOUNT_ORDERED_DATA:
-	case EXT3_MOUNT_WRITEBACK_DATA:
-		if (!journal_check_available_features
-		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: journal does not support "
-				"requested data journaling mode");
-			goto failed_mount3;
-		}
-	default:
-		break;
-	}
-
-	/*
-	 * The journal_load will have done any necessary log recovery,
-	 * so we can safely mount the rest of the filesystem now.
-	 */
-
-	root = ext3_iget(sb, EXT3_ROOT_INO);
-	if (IS_ERR(root)) {
-		ext3_msg(sb, KERN_ERR, "error: get root inode failed");
-		ret = PTR_ERR(root);
-		goto failed_mount3;
-	}
-	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
-		iput(root);
-		ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
-		goto failed_mount3;
-	}
-	sb->s_root = d_make_root(root);
-	if (!sb->s_root) {
-		ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
-		ret = -ENOMEM;
-		goto failed_mount3;
-	}
-
-	if (ext3_setup_super(sb, es, sb->s_flags & MS_RDONLY))
-		sb->s_flags |= MS_RDONLY;
-
-	EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
-	ext3_orphan_cleanup(sb, es);
-	EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
-	if (needs_recovery) {
-		ext3_mark_recovery_complete(sb, es);
-		ext3_msg(sb, KERN_INFO, "recovery complete");
-	}
-	ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode",
-		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
-		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
-		"writeback");
-
-	return 0;
-
-cantfind_ext3:
-	if (!silent)
-		ext3_msg(sb, KERN_INFO,
-			"error: can't find ext3 filesystem on dev %s.",
-		       sb->s_id);
-	goto failed_mount;
-
-failed_mount3:
-	percpu_counter_destroy(&sbi->s_freeblocks_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	journal_destroy(sbi->s_journal);
-failed_mount2:
-	for (i = 0; i < db_count; i++)
-		brelse(sbi->s_group_desc[i]);
-	kfree(sbi->s_group_desc);
-failed_mount:
-#ifdef CONFIG_QUOTA
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
-#endif
-	ext3_blkdev_remove(sbi);
-	brelse(bh);
-out_fail:
-	sb->s_fs_info = NULL;
-	kfree(sbi->s_blockgroup_lock);
-	kfree(sbi);
-	return ret;
-}
-
-/*
- * Setup any per-fs journal parameters now.  We'll do this both on
- * initial mount, once the journal has been initialised but before we've
- * done any recovery; and again on any subsequent remount.
- */
-static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sbi->s_commit_interval)
-		journal->j_commit_interval = sbi->s_commit_interval;
-	/* We could also set up an ext3-specific default for the commit
-	 * interval here, but for now we'll just fall back to the jbd
-	 * default. */
-
-	spin_lock(&journal->j_state_lock);
-	if (test_opt(sb, BARRIER))
-		journal->j_flags |= JFS_BARRIER;
-	else
-		journal->j_flags &= ~JFS_BARRIER;
-	if (test_opt(sb, DATA_ERR_ABORT))
-		journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR;
-	else
-		journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR;
-	spin_unlock(&journal->j_state_lock);
-}
-
-static journal_t *ext3_get_journal(struct super_block *sb,
-				   unsigned int journal_inum)
-{
-	struct inode *journal_inode;
-	journal_t *journal;
-
-	/* First, test for the existence of a valid inode on disk.  Bad
-	 * things happen if we iget() an unused inode, as the subsequent
-	 * iput() will try to delete it. */
-
-	journal_inode = ext3_iget(sb, journal_inum);
-	if (IS_ERR(journal_inode)) {
-		ext3_msg(sb, KERN_ERR, "error: no journal found");
-		return NULL;
-	}
-	if (!journal_inode->i_nlink) {
-		make_bad_inode(journal_inode);
-		iput(journal_inode);
-		ext3_msg(sb, KERN_ERR, "error: journal inode is deleted");
-		return NULL;
-	}
-
-	jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
-		  journal_inode, journal_inode->i_size);
-	if (!S_ISREG(journal_inode->i_mode)) {
-		ext3_msg(sb, KERN_ERR, "error: invalid journal inode");
-		iput(journal_inode);
-		return NULL;
-	}
-
-	journal = journal_init_inode(journal_inode);
-	if (!journal) {
-		ext3_msg(sb, KERN_ERR, "error: could not load journal inode");
-		iput(journal_inode);
-		return NULL;
-	}
-	journal->j_private = sb;
-	ext3_init_journal_params(sb, journal);
-	return journal;
-}
-
-static journal_t *ext3_get_dev_journal(struct super_block *sb,
-				       dev_t j_dev)
-{
-	struct buffer_head * bh;
-	journal_t *journal;
-	ext3_fsblk_t start;
-	ext3_fsblk_t len;
-	int hblock, blocksize;
-	ext3_fsblk_t sb_block;
-	unsigned long offset;
-	struct ext3_super_block * es;
-	struct block_device *bdev;
-
-	bdev = ext3_blkdev_get(j_dev, sb);
-	if (bdev == NULL)
-		return NULL;
-
-	blocksize = sb->s_blocksize;
-	hblock = bdev_logical_block_size(bdev);
-	if (blocksize < hblock) {
-		ext3_msg(sb, KERN_ERR,
-			"error: blocksize too small for journal device");
-		goto out_bdev;
-	}
-
-	sb_block = EXT3_MIN_BLOCK_SIZE / blocksize;
-	offset = EXT3_MIN_BLOCK_SIZE % blocksize;
-	set_blocksize(bdev, blocksize);
-	if (!(bh = __bread(bdev, sb_block, blocksize))) {
-		ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of "
-			"external journal");
-		goto out_bdev;
-	}
-
-	es = (struct ext3_super_block *) (bh->b_data + offset);
-	if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
-	    !(le32_to_cpu(es->s_feature_incompat) &
-	      EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
-		ext3_msg(sb, KERN_ERR, "error: external journal has "
-			"bad superblock");
-		brelse(bh);
-		goto out_bdev;
-	}
-
-	if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
-		ext3_msg(sb, KERN_ERR, "error: journal UUID does not match");
-		brelse(bh);
-		goto out_bdev;
-	}
-
-	len = le32_to_cpu(es->s_blocks_count);
-	start = sb_block + 1;
-	brelse(bh);	/* we're done with the superblock */
-
-	journal = journal_init_dev(bdev, sb->s_bdev,
-					start, len, blocksize);
-	if (!journal) {
-		ext3_msg(sb, KERN_ERR,
-			"error: failed to create device journal");
-		goto out_bdev;
-	}
-	journal->j_private = sb;
-	if (!bh_uptodate_or_lock(journal->j_sb_buffer)) {
-		if (bh_submit_read(journal->j_sb_buffer)) {
-			ext3_msg(sb, KERN_ERR, "I/O error on journal device");
-			goto out_journal;
-		}
-	}
-	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
-		ext3_msg(sb, KERN_ERR,
-			"error: external journal has more than one "
-			"user (unsupported) - %d",
-			be32_to_cpu(journal->j_superblock->s_nr_users));
-		goto out_journal;
-	}
-	EXT3_SB(sb)->journal_bdev = bdev;
-	ext3_init_journal_params(sb, journal);
-	return journal;
-out_journal:
-	journal_destroy(journal);
-out_bdev:
-	ext3_blkdev_put(bdev);
-	return NULL;
-}
-
-static int ext3_load_journal(struct super_block *sb,
-			     struct ext3_super_block *es,
-			     unsigned long journal_devnum)
-{
-	journal_t *journal;
-	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
-	dev_t journal_dev;
-	int err = 0;
-	int really_read_only;
-
-	if (journal_devnum &&
-	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-		ext3_msg(sb, KERN_INFO, "external journal device major/minor "
-			"numbers have changed");
-		journal_dev = new_decode_dev(journal_devnum);
-	} else
-		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
-
-	really_read_only = bdev_read_only(sb->s_bdev);
-
-	/*
-	 * Are we loading a blank journal or performing recovery after a
-	 * crash?  For recovery, we need to check in advance whether we
-	 * can get read-write access to the device.
-	 */
-
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) {
-		if (sb->s_flags & MS_RDONLY) {
-			ext3_msg(sb, KERN_INFO,
-				"recovery required on readonly filesystem");
-			if (really_read_only) {
-				ext3_msg(sb, KERN_ERR, "error: write access "
-					"unavailable, cannot proceed");
-				return -EROFS;
-			}
-			ext3_msg(sb, KERN_INFO,
-				"write access will be enabled during recovery");
-		}
-	}
-
-	if (journal_inum && journal_dev) {
-		ext3_msg(sb, KERN_ERR, "error: filesystem has both journal "
-		       "and inode journals");
-		return -EINVAL;
-	}
-
-	if (journal_inum) {
-		if (!(journal = ext3_get_journal(sb, journal_inum)))
-			return -EINVAL;
-	} else {
-		if (!(journal = ext3_get_dev_journal(sb, journal_dev)))
-			return -EINVAL;
-	}
-
-	if (!(journal->j_flags & JFS_BARRIER))
-		printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
-
-	if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
-		err = journal_update_format(journal);
-		if (err)  {
-			ext3_msg(sb, KERN_ERR, "error updating journal");
-			journal_destroy(journal);
-			return err;
-		}
-	}
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER))
-		err = journal_wipe(journal, !really_read_only);
-	if (!err)
-		err = journal_load(journal);
-
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error loading journal");
-		journal_destroy(journal);
-		return err;
-	}
-
-	EXT3_SB(sb)->s_journal = journal;
-	ext3_clear_journal_err(sb, es);
-
-	if (!really_read_only && journal_devnum &&
-	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-		es->s_journal_dev = cpu_to_le32(journal_devnum);
-
-		/* Make sure we flush the recovery flag to disk. */
-		ext3_commit_super(sb, es, 1);
-	}
-
-	return 0;
-}
-
-static int ext3_create_journal(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       unsigned int journal_inum)
-{
-	journal_t *journal;
-	int err;
-
-	if (sb->s_flags & MS_RDONLY) {
-		ext3_msg(sb, KERN_ERR,
-			"error: readonly filesystem when trying to "
-			"create journal");
-		return -EROFS;
-	}
-
-	journal = ext3_get_journal(sb, journal_inum);
-	if (!journal)
-		return -EINVAL;
-
-	ext3_msg(sb, KERN_INFO, "creating new journal on inode %u",
-	       journal_inum);
-
-	err = journal_create(journal);
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error creating journal");
-		journal_destroy(journal);
-		return -EIO;
-	}
-
-	EXT3_SB(sb)->s_journal = journal;
-
-	ext3_update_dynamic_rev(sb);
-	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-	EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL);
-
-	es->s_journal_inum = cpu_to_le32(journal_inum);
-
-	/* Make sure we flush the recovery flag to disk. */
-	ext3_commit_super(sb, es, 1);
-
-	return 0;
-}
-
-static int ext3_commit_super(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       int sync)
-{
-	struct buffer_head *sbh = EXT3_SB(sb)->s_sbh;
-	int error = 0;
-
-	if (!sbh)
-		return error;
-
-	if (buffer_write_io_error(sbh)) {
-		/*
-		 * Oh, dear.  A previous attempt to write the
-		 * superblock failed.  This could happen because the
-		 * USB device was yanked out.  Or it could happen to
-		 * be a transient write error and maybe the block will
-		 * be remapped.  Nothing we can do but to retry the
-		 * write and hope for the best.
-		 */
-		ext3_msg(sb, KERN_ERR, "previous I/O error to "
-		       "superblock detected");
-		clear_buffer_write_io_error(sbh);
-		set_buffer_uptodate(sbh);
-	}
-	/*
-	 * If the file system is mounted read-only, don't update the
-	 * superblock write time.  This avoids updating the superblock
-	 * write time when we are mounting the root file system
-	 * read/only but we need to replay the journal; at that point,
-	 * for people who are east of GMT and who make their clock
-	 * tick in localtime for Windows bug-for-bug compatibility,
-	 * the clock is set in the future, and this will cause e2fsck
-	 * to complain and force a full file system check.
-	 */
-	if (!(sb->s_flags & MS_RDONLY))
-		es->s_wtime = cpu_to_le32(get_seconds());
-	es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
-	es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
-	BUFFER_TRACE(sbh, "marking dirty");
-	mark_buffer_dirty(sbh);
-	if (sync) {
-		error = sync_dirty_buffer(sbh);
-		if (buffer_write_io_error(sbh)) {
-			ext3_msg(sb, KERN_ERR, "I/O error while writing "
-			       "superblock");
-			clear_buffer_write_io_error(sbh);
-			set_buffer_uptodate(sbh);
-		}
-	}
-	return error;
-}
-
-
-/*
- * Have we just finished recovery?  If so, and if we are mounting (or
- * remounting) the filesystem readonly, then we will end up with a
- * consistent fs on disk.  Record that fact.
- */
-static void ext3_mark_recovery_complete(struct super_block * sb,
-					struct ext3_super_block * es)
-{
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-
-	journal_lock_updates(journal);
-	if (journal_flush(journal) < 0)
-		goto out;
-
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
-	    sb->s_flags & MS_RDONLY) {
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		ext3_commit_super(sb, es, 1);
-	}
-
-out:
-	journal_unlock_updates(journal);
-}
-
-/*
- * If we are mounting (or read-write remounting) a filesystem whose journal
- * has recorded an error from a previous lifetime, move that error to the
- * main filesystem now.
- */
-static void ext3_clear_journal_err(struct super_block *sb,
-				   struct ext3_super_block *es)
-{
-	journal_t *journal;
-	int j_errno;
-	const char *errstr;
-
-	journal = EXT3_SB(sb)->s_journal;
-
-	/*
-	 * Now check for any error status which may have been recorded in the
-	 * journal by a prior ext3_error() or ext3_abort()
-	 */
-
-	j_errno = journal_errno(journal);
-	if (j_errno) {
-		char nbuf[16];
-
-		errstr = ext3_decode_error(sb, j_errno, nbuf);
-		ext3_warning(sb, __func__, "Filesystem error recorded "
-			     "from previous mount: %s", errstr);
-		ext3_warning(sb, __func__, "Marking fs in need of "
-			     "filesystem check.");
-
-		EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-		es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-		ext3_commit_super (sb, es, 1);
-
-		journal_clear_err(journal);
-	}
-}
-
-/*
- * Force the running and committing transactions to commit,
- * and wait on the commit.
- */
-int ext3_force_commit(struct super_block *sb)
-{
-	journal_t *journal;
-	int ret;
-
-	if (sb->s_flags & MS_RDONLY)
-		return 0;
-
-	journal = EXT3_SB(sb)->s_journal;
-	ret = ext3_journal_force_commit(journal);
-	return ret;
-}
-
-static int ext3_sync_fs(struct super_block *sb, int wait)
-{
-	tid_t target;
-
-	trace_ext3_sync_fs(sb, wait);
-	/*
-	 * Writeback quota in non-journalled quota case - journalled quota has
-	 * no dirty dquots
-	 */
-	dquot_writeback_dquots(sb, -1);
-	if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
-		if (wait)
-			log_wait_commit(EXT3_SB(sb)->s_journal, target);
-	}
-	return 0;
-}
-
-/*
- * LVM calls this function before a (read-only) snapshot is created.  This
- * gives us a chance to flush the journal completely and mark the fs clean.
- */
-static int ext3_freeze(struct super_block *sb)
-{
-	int error = 0;
-	journal_t *journal;
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		journal = EXT3_SB(sb)->s_journal;
-
-		/* Now we set up the journal barrier. */
-		journal_lock_updates(journal);
-
-		/*
-		 * We don't want to clear needs_recovery flag when we failed
-		 * to flush the journal.
-		 */
-		error = journal_flush(journal);
-		if (error < 0)
-			goto out;
-
-		/* Journal blocked and flushed, clear needs_recovery flag. */
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
-		if (error)
-			goto out;
-	}
-	return 0;
-
-out:
-	journal_unlock_updates(journal);
-	return error;
-}
-
-/*
- * Called by LVM after the snapshot is done.  We need to reset the RECOVER
- * flag here, even though the filesystem is not technically dirty yet.
- */
-static int ext3_unfreeze(struct super_block *sb)
-{
-	if (!(sb->s_flags & MS_RDONLY)) {
-		/* Reser the needs_recovery flag before the fs is unlocked. */
-		EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-	}
-	return 0;
-}
-
-static int ext3_remount (struct super_block * sb, int * flags, char * data)
-{
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	ext3_fsblk_t n_blocks_count = 0;
-	unsigned long old_sb_flags;
-	struct ext3_mount_options old_opts;
-	int enable_quota = 0;
-	int err;
-#ifdef CONFIG_QUOTA
-	int i;
-#endif
-
-	sync_filesystem(sb);
-
-	/* Store the original options */
-	old_sb_flags = sb->s_flags;
-	old_opts.s_mount_opt = sbi->s_mount_opt;
-	old_opts.s_resuid = sbi->s_resuid;
-	old_opts.s_resgid = sbi->s_resgid;
-	old_opts.s_commit_interval = sbi->s_commit_interval;
-#ifdef CONFIG_QUOTA
-	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		if (sbi->s_qf_names[i]) {
-			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
-							 GFP_KERNEL);
-			if (!old_opts.s_qf_names[i]) {
-				int j;
-
-				for (j = 0; j < i; j++)
-					kfree(old_opts.s_qf_names[j]);
-				return -ENOMEM;
-			}
-		} else
-			old_opts.s_qf_names[i] = NULL;
-#endif
-
-	/*
-	 * Allow the "check" option to be passed as a remount option.
-	 */
-	if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) {
-		err = -EINVAL;
-		goto restore_opts;
-	}
-
-	if (test_opt(sb, ABORT))
-		ext3_abort(sb, __func__, "Abort forced by user");
-
-	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
-
-	es = sbi->s_es;
-
-	ext3_init_journal_params(sb, sbi->s_journal);
-
-	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
-		n_blocks_count > le32_to_cpu(es->s_blocks_count)) {
-		if (test_opt(sb, ABORT)) {
-			err = -EROFS;
-			goto restore_opts;
-		}
-
-		if (*flags & MS_RDONLY) {
-			err = dquot_suspend(sb, -1);
-			if (err < 0)
-				goto restore_opts;
-
-			/*
-			 * First of all, the unconditional stuff we have to do
-			 * to disable replay of the journal when we next remount
-			 */
-			sb->s_flags |= MS_RDONLY;
-
-			/*
-			 * OK, test if we are remounting a valid rw partition
-			 * readonly, and if so set the rdonly flag and then
-			 * mark the partition as valid again.
-			 */
-			if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) &&
-			    (sbi->s_mount_state & EXT3_VALID_FS))
-				es->s_state = cpu_to_le16(sbi->s_mount_state);
-
-			ext3_mark_recovery_complete(sb, es);
-		} else {
-			__le32 ret;
-			if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					~EXT3_FEATURE_RO_COMPAT_SUPP))) {
-				ext3_msg(sb, KERN_WARNING,
-					"warning: couldn't remount RDWR "
-					"because of unsupported optional "
-					"features (%x)", le32_to_cpu(ret));
-				err = -EROFS;
-				goto restore_opts;
-			}
-
-			/*
-			 * If we have an unprocessed orphan list hanging
-			 * around from a previously readonly bdev mount,
-			 * require a full umount & mount for now.
-			 */
-			if (es->s_last_orphan) {
-				ext3_msg(sb, KERN_WARNING, "warning: couldn't "
-				       "remount RDWR because of unprocessed "
-				       "orphan inode list.  Please "
-				       "umount & mount instead.");
-				err = -EINVAL;
-				goto restore_opts;
-			}
-
-			/*
-			 * Mounting a RDONLY partition read-write, so reread
-			 * and store the current valid flag.  (It may have
-			 * been changed by e2fsck since we originally mounted
-			 * the partition.)
-			 */
-			ext3_clear_journal_err(sb, es);
-			sbi->s_mount_state = le16_to_cpu(es->s_state);
-			if ((err = ext3_group_extend(sb, es, n_blocks_count)))
-				goto restore_opts;
-			if (!ext3_setup_super (sb, es, 0))
-				sb->s_flags &= ~MS_RDONLY;
-			enable_quota = 1;
-		}
-	}
-#ifdef CONFIG_QUOTA
-	/* Release old quota file names */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(old_opts.s_qf_names[i]);
-#endif
-	if (enable_quota)
-		dquot_resume(sb, -1);
-	return 0;
-restore_opts:
-	sb->s_flags = old_sb_flags;
-	sbi->s_mount_opt = old_opts.s_mount_opt;
-	sbi->s_resuid = old_opts.s_resuid;
-	sbi->s_resgid = old_opts.s_resgid;
-	sbi->s_commit_interval = old_opts.s_commit_interval;
-#ifdef CONFIG_QUOTA
-	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		kfree(sbi->s_qf_names[i]);
-		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
-	}
-#endif
-	return err;
-}
-
-static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
-{
-	struct super_block *sb = dentry->d_sb;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	u64 fsid;
-
-	if (test_opt(sb, MINIX_DF)) {
-		sbi->s_overhead_last = 0;
-	} else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
-		unsigned long ngroups = sbi->s_groups_count, i;
-		ext3_fsblk_t overhead = 0;
-		smp_rmb();
-
-		/*
-		 * Compute the overhead (FS structures).  This is constant
-		 * for a given filesystem unless the number of block groups
-		 * changes so we cache the previous value until it does.
-		 */
-
-		/*
-		 * All of the blocks before first_data_block are
-		 * overhead
-		 */
-		overhead = le32_to_cpu(es->s_first_data_block);
-
-		/*
-		 * Add the overhead attributed to the superblock and
-		 * block group descriptors.  If the sparse superblocks
-		 * feature is turned on, then not all groups have this.
-		 */
-		for (i = 0; i < ngroups; i++) {
-			overhead += ext3_bg_has_super(sb, i) +
-				ext3_bg_num_gdb(sb, i);
-			cond_resched();
-		}
-
-		/*
-		 * Every block group has an inode bitmap, a block
-		 * bitmap, and an inode table.
-		 */
-		overhead += ngroups * (2 + sbi->s_itb_per_group);
-
-		/* Add the internal journal blocks as well */
-		if (sbi->s_journal && !sbi->journal_bdev)
-			overhead += sbi->s_journal->j_maxlen;
-
-		sbi->s_overhead_last = overhead;
-		smp_wmb();
-		sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
-	}
-
-	buf->f_type = EXT3_SUPER_MAGIC;
-	buf->f_bsize = sb->s_blocksize;
-	buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
-	buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
-	buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
-	if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
-		buf->f_bavail = 0;
-	buf->f_files = le32_to_cpu(es->s_inodes_count);
-	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
-	buf->f_namelen = EXT3_NAME_LEN;
-	fsid = le64_to_cpup((void *)es->s_uuid) ^
-	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
-	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
-	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
-	return 0;
-}
-
-/* Helper function for writing quotas on sync - we need to start transaction before quota file
- * is locked for write. Otherwise the are possible deadlocks:
- * Process 1                         Process 2
- * ext3_create()                     quota_sync()
- *   journal_start()                   write_dquot()
- *   dquot_initialize()                       down(dqio_mutex)
- *     down(dqio_mutex)                    journal_start()
- *
- */
-
-#ifdef CONFIG_QUOTA
-
-static inline struct inode *dquot_to_inode(struct dquot *dquot)
-{
-	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
-}
-
-static int ext3_write_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-	struct inode *inode;
-
-	inode = dquot_to_inode(dquot);
-	handle = ext3_journal_start(inode,
-					EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_commit(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_acquire_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-
-	handle = ext3_journal_start(dquot_to_inode(dquot),
-					EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_acquire(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_release_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-
-	handle = ext3_journal_start(dquot_to_inode(dquot),
-					EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle)) {
-		/* Release dquot anyway to avoid endless cycle in dqput() */
-		dquot_release(dquot);
-		return PTR_ERR(handle);
-	}
-	ret = dquot_release(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_mark_dquot_dirty(struct dquot *dquot)
-{
-	/* Are we journaling quotas? */
-	if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
-	    EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
-		dquot_mark_dquot_dirty(dquot);
-		return ext3_write_dquot(dquot);
-	} else {
-		return dquot_mark_dquot_dirty(dquot);
-	}
-}
-
-static int ext3_write_info(struct super_block *sb, int type)
-{
-	int ret, err;
-	handle_t *handle;
-
-	/* Data block + inode block */
-	handle = ext3_journal_start(d_inode(sb->s_root), 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_commit_info(sb, type);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-/*
- * Turn on quotas during mount time - we need to find
- * the quota file and such...
- */
-static int ext3_quota_on_mount(struct super_block *sb, int type)
-{
-	return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type],
-					EXT3_SB(sb)->s_jquota_fmt, type);
-}
-
-/*
- * Standard function to be called on quota_on
- */
-static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 struct path *path)
-{
-	int err;
-
-	if (!test_opt(sb, QUOTA))
-		return -EINVAL;
-
-	/* Quotafile not on the same filesystem? */
-	if (path->dentry->d_sb != sb)
-		return -EXDEV;
-	/* Journaling quota? */
-	if (EXT3_SB(sb)->s_qf_names[type]) {
-		/* Quotafile not of fs root? */
-		if (path->dentry->d_parent != sb->s_root)
-			ext3_msg(sb, KERN_WARNING,
-				"warning: Quota file not on filesystem root. "
-				"Journaled quota will not work.");
-	}
-
-	/*
-	 * When we journal data on quota file, we have to flush journal to see
-	 * all updates to the file when we bypass pagecache...
-	 */
-	if (ext3_should_journal_data(d_inode(path->dentry))) {
-		/*
-		 * We don't need to lock updates but journal_flush() could
-		 * otherwise be livelocked...
-		 */
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err)
-			return err;
-	}
-
-	return dquot_quota_on(sb, type, format_id, path);
-}
-
-/* Read data from quotafile - avoid pagecache and such because we cannot afford
- * acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and no one else should touch the files)
- * we don't have to be afraid of races */
-static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
-			       size_t len, loff_t off)
-{
-	struct inode *inode = sb_dqopt(sb)->files[type];
-	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
-	int err = 0;
-	int offset = off & (sb->s_blocksize - 1);
-	int tocopy;
-	size_t toread;
-	struct buffer_head *bh;
-	loff_t i_size = i_size_read(inode);
-
-	if (off > i_size)
-		return 0;
-	if (off+len > i_size)
-		len = i_size-off;
-	toread = len;
-	while (toread > 0) {
-		tocopy = sb->s_blocksize - offset < toread ?
-				sb->s_blocksize - offset : toread;
-		bh = ext3_bread(NULL, inode, blk, 0, &err);
-		if (err)
-			return err;
-		if (!bh)	/* A hole? */
-			memset(data, 0, tocopy);
-		else
-			memcpy(data, bh->b_data+offset, tocopy);
-		brelse(bh);
-		offset = 0;
-		toread -= tocopy;
-		data += tocopy;
-		blk++;
-	}
-	return len;
-}
-
-/* Write to quotafile (we know the transaction is already started and has
- * enough credits) */
-static ssize_t ext3_quota_write(struct super_block *sb, int type,
-				const char *data, size_t len, loff_t off)
-{
-	struct inode *inode = sb_dqopt(sb)->files[type];
-	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
-	int err = 0;
-	int offset = off & (sb->s_blocksize - 1);
-	int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL;
-	struct buffer_head *bh;
-	handle_t *handle = journal_current_handle();
-
-	if (!handle) {
-		ext3_msg(sb, KERN_WARNING,
-			"warning: quota write (off=%llu, len=%llu)"
-			" cancelled because transaction is not started.",
-			(unsigned long long)off, (unsigned long long)len);
-		return -EIO;
-	}
-
-	/*
-	 * Since we account only one data block in transaction credits,
-	 * then it is impossible to cross a block boundary.
-	 */
-	if (sb->s_blocksize - offset < len) {
-		ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
-			" cancelled because not block aligned",
-			(unsigned long long)off, (unsigned long long)len);
-		return -EIO;
-	}
-	bh = ext3_bread(handle, inode, blk, 1, &err);
-	if (!bh)
-		goto out;
-	if (journal_quota) {
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err) {
-			brelse(bh);
-			goto out;
-		}
-	}
-	lock_buffer(bh);
-	memcpy(bh->b_data+offset, data, len);
-	flush_dcache_page(bh->b_page);
-	unlock_buffer(bh);
-	if (journal_quota)
-		err = ext3_journal_dirty_metadata(handle, bh);
-	else {
-		/* Always do at least ordered writes for quotas */
-		err = ext3_journal_dirty_data(handle, bh);
-		mark_buffer_dirty(bh);
-	}
-	brelse(bh);
-out:
-	if (err)
-		return err;
-	if (inode->i_size < off + len) {
-		i_size_write(inode, off + len);
-		EXT3_I(inode)->i_disksize = inode->i_size;
-	}
-	inode->i_version++;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	ext3_mark_inode_dirty(handle, inode);
-	return len;
-}
-
-#endif
-
-static struct dentry *ext3_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data)
-{
-	return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super);
-}
-
-static struct file_system_type ext3_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "ext3",
-	.mount		= ext3_mount,
-	.kill_sb	= kill_block_super,
-	.fs_flags	= FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("ext3");
-
-static int __init init_ext3_fs(void)
-{
-	int err = init_ext3_xattr();
-	if (err)
-		return err;
-	err = init_inodecache();
-	if (err)
-		goto out1;
-        err = register_filesystem(&ext3_fs_type);
-	if (err)
-		goto out;
-	return 0;
-out:
-	destroy_inodecache();
-out1:
-	exit_ext3_xattr();
-	return err;
-}
-
-static void __exit exit_ext3_fs(void)
-{
-	unregister_filesystem(&ext3_fs_type);
-	destroy_inodecache();
-	exit_ext3_xattr();
-}
-
-MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
-MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
-MODULE_LICENSE("GPL");
-module_init(init_ext3_fs)
-module_exit(exit_ext3_fs)
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
deleted file mode 100644
index c08c590..0000000
--- a/fs/ext3/symlink.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *  linux/fs/ext3/symlink.c
- *
- * Only fast symlinks left here - the rest is done by generic code. AV, 1999
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/symlink.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 symlink handling code
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-const struct inode_operations ext3_symlink_inode_operations = {
-	.readlink	= generic_readlink,
-	.follow_link	= page_follow_link_light,
-	.put_link	= page_put_link,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-};
-
-const struct inode_operations ext3_fast_symlink_inode_operations = {
-	.readlink	= generic_readlink,
-	.follow_link	= simple_follow_link,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-};
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
deleted file mode 100644
index 7cf3650..0000000
--- a/fs/ext3/xattr.c
+++ /dev/null
@@ -1,1330 +0,0 @@
-/*
- * linux/fs/ext3/xattr.c
- *
- * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- *
- * Fix by Harrison Xing <harrison@mountainviewdata.com>.
- * Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>.
- * Extended attributes for symlinks and special files added per
- *  suggestion of Luka Renko <luka.renko@hermes.si>.
- * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
- *  Red Hat Inc.
- * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
- *  and Andreas Gruenbacher <agruen@suse.de>.
- */
-
-/*
- * Extended attributes are stored directly in inodes (on file systems with
- * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
- * field contains the block number if an inode uses an additional block. All
- * attributes must fit in the inode and one additional block. Blocks that
- * contain the identical set of attributes may be shared among several inodes.
- * Identical blocks are detected by keeping a cache of blocks that have
- * recently been accessed.
- *
- * The attributes in inodes and on blocks have a different header; the entries
- * are stored in the same format:
- *
- *   +------------------+
- *   | header           |
- *   | entry 1          | |
- *   | entry 2          | | growing downwards
- *   | entry 3          | v
- *   | four null bytes  |
- *   | . . .            |
- *   | value 1          | ^
- *   | value 3          | | growing upwards
- *   | value 2          | |
- *   +------------------+
- *
- * The header is followed by multiple entry descriptors. In disk blocks, the
- * entry descriptors are kept sorted. In inodes, they are unsorted. The
- * attribute values are aligned to the end of the block in no specific order.
- *
- * Locking strategy
- * ----------------
- * EXT3_I(inode)->i_file_acl is protected by EXT3_I(inode)->xattr_sem.
- * EA blocks are only changed if they are exclusive to an inode, so
- * holding xattr_sem also means that nothing but the EA block's reference
- * count can change. Multiple writers to the same block are synchronized
- * by the buffer lock.
- */
-
-#include "ext3.h"
-#include <linux/mbcache.h>
-#include <linux/quotaops.h>
-#include "xattr.h"
-#include "acl.h"
-
-#define BHDR(bh) ((struct ext3_xattr_header *)((bh)->b_data))
-#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr))
-#define BFIRST(bh) ENTRY(BHDR(bh)+1)
-#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
-
-#define IHDR(inode, raw_inode) \
-	((struct ext3_xattr_ibody_header *) \
-		((void *)raw_inode + \
-		 EXT3_GOOD_OLD_INODE_SIZE + \
-		 EXT3_I(inode)->i_extra_isize))
-#define IFIRST(hdr) ((struct ext3_xattr_entry *)((hdr)+1))
-
-#ifdef EXT3_XATTR_DEBUG
-# define ea_idebug(inode, f...) do { \
-		printk(KERN_DEBUG "inode %s:%lu: ", \
-			inode->i_sb->s_id, inode->i_ino); \
-		printk(f); \
-		printk("\n"); \
-	} while (0)
-# define ea_bdebug(bh, f...) do { \
-		char b[BDEVNAME_SIZE]; \
-		printk(KERN_DEBUG "block %s:%lu: ", \
-			bdevname(bh->b_bdev, b), \
-			(unsigned long) bh->b_blocknr); \
-		printk(f); \
-		printk("\n"); \
-	} while (0)
-#else
-# define ea_idebug(f...)
-# define ea_bdebug(f...)
-#endif
-
-static void ext3_xattr_cache_insert(struct buffer_head *);
-static struct buffer_head *ext3_xattr_cache_find(struct inode *,
-						 struct ext3_xattr_header *,
-						 struct mb_cache_entry **);
-static void ext3_xattr_rehash(struct ext3_xattr_header *,
-			      struct ext3_xattr_entry *);
-static int ext3_xattr_list(struct dentry *dentry, char *buffer,
-			   size_t buffer_size);
-
-static struct mb_cache *ext3_xattr_cache;
-
-static const struct xattr_handler *ext3_xattr_handler_map[] = {
-	[EXT3_XATTR_INDEX_USER]		     = &ext3_xattr_user_handler,
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	[EXT3_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
-	[EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
-#endif
-	[EXT3_XATTR_INDEX_TRUSTED]	     = &ext3_xattr_trusted_handler,
-#ifdef CONFIG_EXT3_FS_SECURITY
-	[EXT3_XATTR_INDEX_SECURITY]	     = &ext3_xattr_security_handler,
-#endif
-};
-
-const struct xattr_handler *ext3_xattr_handlers[] = {
-	&ext3_xattr_user_handler,
-	&ext3_xattr_trusted_handler,
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	&posix_acl_access_xattr_handler,
-	&posix_acl_default_xattr_handler,
-#endif
-#ifdef CONFIG_EXT3_FS_SECURITY
-	&ext3_xattr_security_handler,
-#endif
-	NULL
-};
-
-static inline const struct xattr_handler *
-ext3_xattr_handler(int name_index)
-{
-	const struct xattr_handler *handler = NULL;
-
-	if (name_index > 0 && name_index < ARRAY_SIZE(ext3_xattr_handler_map))
-		handler = ext3_xattr_handler_map[name_index];
-	return handler;
-}
-
-/*
- * Inode operation listxattr()
- *
- * d_inode(dentry)->i_mutex: don't care
- */
-ssize_t
-ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
-	return ext3_xattr_list(dentry, buffer, size);
-}
-
-static int
-ext3_xattr_check_names(struct ext3_xattr_entry *entry, void *end)
-{
-	while (!IS_LAST_ENTRY(entry)) {
-		struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(entry);
-		if ((void *)next >= end)
-			return -EIO;
-		entry = next;
-	}
-	return 0;
-}
-
-static inline int
-ext3_xattr_check_block(struct buffer_head *bh)
-{
-	int error;
-
-	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-	    BHDR(bh)->h_blocks != cpu_to_le32(1))
-		return -EIO;
-	error = ext3_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
-	return error;
-}
-
-static inline int
-ext3_xattr_check_entry(struct ext3_xattr_entry *entry, size_t size)
-{
-	size_t value_size = le32_to_cpu(entry->e_value_size);
-
-	if (entry->e_value_block != 0 || value_size > size ||
-	    le16_to_cpu(entry->e_value_offs) + value_size > size)
-		return -EIO;
-	return 0;
-}
-
-static int
-ext3_xattr_find_entry(struct ext3_xattr_entry **pentry, int name_index,
-		      const char *name, size_t size, int sorted)
-{
-	struct ext3_xattr_entry *entry;
-	size_t name_len;
-	int cmp = 1;
-
-	if (name == NULL)
-		return -EINVAL;
-	name_len = strlen(name);
-	entry = *pentry;
-	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
-		cmp = name_index - entry->e_name_index;
-		if (!cmp)
-			cmp = name_len - entry->e_name_len;
-		if (!cmp)
-			cmp = memcmp(name, entry->e_name, name_len);
-		if (cmp <= 0 && (sorted || cmp == 0))
-			break;
-	}
-	*pentry = entry;
-	if (!cmp && ext3_xattr_check_entry(entry, size))
-			return -EIO;
-	return cmp ? -ENODATA : 0;
-}
-
-static int
-ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
-		     void *buffer, size_t buffer_size)
-{
-	struct buffer_head *bh = NULL;
-	struct ext3_xattr_entry *entry;
-	size_t size;
-	int error;
-
-	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
-		  name_index, name, buffer, (long)buffer_size);
-
-	error = -ENODATA;
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	if (!bh)
-		goto cleanup;
-	ea_bdebug(bh, "b_count=%d, refcount=%d",
-		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext3_xattr_check_block(bh)) {
-bad_block:	ext3_error(inode->i_sb, __func__,
-			   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-			   EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		goto cleanup;
-	}
-	ext3_xattr_cache_insert(bh);
-	entry = BFIRST(bh);
-	error = ext3_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
-	if (error == -EIO)
-		goto bad_block;
-	if (error)
-		goto cleanup;
-	size = le32_to_cpu(entry->e_value_size);
-	if (buffer) {
-		error = -ERANGE;
-		if (size > buffer_size)
-			goto cleanup;
-		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
-		       size);
-	}
-	error = size;
-
-cleanup:
-	brelse(bh);
-	return error;
-}
-
-static int
-ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
-		     void *buffer, size_t buffer_size)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_xattr_entry *entry;
-	struct ext3_inode *raw_inode;
-	struct ext3_iloc iloc;
-	size_t size;
-	void *end;
-	int error;
-
-	if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
-		return -ENODATA;
-	error = ext3_get_inode_loc(inode, &iloc);
-	if (error)
-		return error;
-	raw_inode = ext3_raw_inode(&iloc);
-	header = IHDR(inode, raw_inode);
-	entry = IFIRST(header);
-	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	error = ext3_xattr_check_names(entry, end);
-	if (error)
-		goto cleanup;
-	error = ext3_xattr_find_entry(&entry, name_index, name,
-				      end - (void *)entry, 0);
-	if (error)
-		goto cleanup;
-	size = le32_to_cpu(entry->e_value_size);
-	if (buffer) {
-		error = -ERANGE;
-		if (size > buffer_size)
-			goto cleanup;
-		memcpy(buffer, (void *)IFIRST(header) +
-		       le16_to_cpu(entry->e_value_offs), size);
-	}
-	error = size;
-
-cleanup:
-	brelse(iloc.bh);
-	return error;
-}
-
-/*
- * ext3_xattr_get()
- *
- * Copy an extended attribute into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
- * Returns a negative error number on failure, or the number of bytes
- * used / required on success.
- */
-int
-ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-	       void *buffer, size_t buffer_size)
-{
-	int error;
-
-	down_read(&EXT3_I(inode)->xattr_sem);
-	error = ext3_xattr_ibody_get(inode, name_index, name, buffer,
-				     buffer_size);
-	if (error == -ENODATA)
-		error = ext3_xattr_block_get(inode, name_index, name, buffer,
-					     buffer_size);
-	up_read(&EXT3_I(inode)->xattr_sem);
-	return error;
-}
-
-static int
-ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
-			char *buffer, size_t buffer_size)
-{
-	size_t rest = buffer_size;
-
-	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
-		const struct xattr_handler *handler =
-			ext3_xattr_handler(entry->e_name_index);
-
-		if (handler) {
-			size_t size = handler->list(dentry, buffer, rest,
-						    entry->e_name,
-						    entry->e_name_len,
-						    handler->flags);
-			if (buffer) {
-				if (size > rest)
-					return -ERANGE;
-				buffer += size;
-			}
-			rest -= size;
-		}
-	}
-	return buffer_size - rest;
-}
-
-static int
-ext3_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	struct inode *inode = d_inode(dentry);
-	struct buffer_head *bh = NULL;
-	int error;
-
-	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
-		  buffer, (long)buffer_size);
-
-	error = 0;
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	error = -EIO;
-	if (!bh)
-		goto cleanup;
-	ea_bdebug(bh, "b_count=%d, refcount=%d",
-		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext3_xattr_check_block(bh)) {
-		ext3_error(inode->i_sb, __func__,
-			   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-			   EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		goto cleanup;
-	}
-	ext3_xattr_cache_insert(bh);
-	error = ext3_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
-
-cleanup:
-	brelse(bh);
-
-	return error;
-}
-
-static int
-ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	struct inode *inode = d_inode(dentry);
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_inode *raw_inode;
-	struct ext3_iloc iloc;
-	void *end;
-	int error;
-
-	if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
-		return 0;
-	error = ext3_get_inode_loc(inode, &iloc);
-	if (error)
-		return error;
-	raw_inode = ext3_raw_inode(&iloc);
-	header = IHDR(inode, raw_inode);
-	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	error = ext3_xattr_check_names(IFIRST(header), end);
-	if (error)
-		goto cleanup;
-	error = ext3_xattr_list_entries(dentry, IFIRST(header),
-					buffer, buffer_size);
-
-cleanup:
-	brelse(iloc.bh);
-	return error;
-}
-
-/*
- * ext3_xattr_list()
- *
- * Copy a list of attribute names into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
- * Returns a negative error number on failure, or the number of bytes
- * used / required on success.
- */
-static int
-ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	int i_error, b_error;
-
-	down_read(&EXT3_I(d_inode(dentry))->xattr_sem);
-	i_error = ext3_xattr_ibody_list(dentry, buffer, buffer_size);
-	if (i_error < 0) {
-		b_error = 0;
-	} else {
-		if (buffer) {
-			buffer += i_error;
-			buffer_size -= i_error;
-		}
-		b_error = ext3_xattr_block_list(dentry, buffer, buffer_size);
-		if (b_error < 0)
-			i_error = 0;
-	}
-	up_read(&EXT3_I(d_inode(dentry))->xattr_sem);
-	return i_error + b_error;
-}
-
-/*
- * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
- * not set, set it.
- */
-static void ext3_xattr_update_super_block(handle_t *handle,
-					  struct super_block *sb)
-{
-	if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
-		return;
-
-	if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
-		EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR);
-		ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	}
-}
-
-/*
- * Release the xattr block BH: If the reference count is > 1, decrement
- * it; otherwise free the block.
- */
-static void
-ext3_xattr_release_block(handle_t *handle, struct inode *inode,
-			 struct buffer_head *bh)
-{
-	struct mb_cache_entry *ce = NULL;
-	int error = 0;
-
-	ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr);
-	error = ext3_journal_get_write_access(handle, bh);
-	if (error)
-		 goto out;
-
-	lock_buffer(bh);
-
-	if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
-		ea_bdebug(bh, "refcount now=0; freeing");
-		if (ce)
-			mb_cache_entry_free(ce);
-		ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-		get_bh(bh);
-		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
-	} else {
-		le32_add_cpu(&BHDR(bh)->h_refcount, -1);
-		error = ext3_journal_dirty_metadata(handle, bh);
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-		dquot_free_block(inode, 1);
-		ea_bdebug(bh, "refcount now=%d; releasing",
-			  le32_to_cpu(BHDR(bh)->h_refcount));
-		if (ce)
-			mb_cache_entry_release(ce);
-	}
-	unlock_buffer(bh);
-out:
-	ext3_std_error(inode->i_sb, error);
-	return;
-}
-
-struct ext3_xattr_info {
-	int name_index;
-	const char *name;
-	const void *value;
-	size_t value_len;
-};
-
-struct ext3_xattr_search {
-	struct ext3_xattr_entry *first;
-	void *base;
-	void *end;
-	struct ext3_xattr_entry *here;
-	int not_found;
-};
-
-static int
-ext3_xattr_set_entry(struct ext3_xattr_info *i, struct ext3_xattr_search *s)
-{
-	struct ext3_xattr_entry *last;
-	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
-
-	/* Compute min_offs and last. */
-	last = s->first;
-	for (; !IS_LAST_ENTRY(last); last = EXT3_XATTR_NEXT(last)) {
-		if (!last->e_value_block && last->e_value_size) {
-			size_t offs = le16_to_cpu(last->e_value_offs);
-			if (offs < min_offs)
-				min_offs = offs;
-		}
-	}
-	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
-	if (!s->not_found) {
-		if (!s->here->e_value_block && s->here->e_value_size) {
-			size_t size = le32_to_cpu(s->here->e_value_size);
-			free += EXT3_XATTR_SIZE(size);
-		}
-		free += EXT3_XATTR_LEN(name_len);
-	}
-	if (i->value) {
-		if (free < EXT3_XATTR_LEN(name_len) +
-			   EXT3_XATTR_SIZE(i->value_len))
-			return -ENOSPC;
-	}
-
-	if (i->value && s->not_found) {
-		/* Insert the new name. */
-		size_t size = EXT3_XATTR_LEN(name_len);
-		size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
-		memmove((void *)s->here + size, s->here, rest);
-		memset(s->here, 0, size);
-		s->here->e_name_index = i->name_index;
-		s->here->e_name_len = name_len;
-		memcpy(s->here->e_name, i->name, name_len);
-	} else {
-		if (!s->here->e_value_block && s->here->e_value_size) {
-			void *first_val = s->base + min_offs;
-			size_t offs = le16_to_cpu(s->here->e_value_offs);
-			void *val = s->base + offs;
-			size_t size = EXT3_XATTR_SIZE(
-				le32_to_cpu(s->here->e_value_size));
-
-			if (i->value && size == EXT3_XATTR_SIZE(i->value_len)) {
-				/* The old and the new value have the same
-				   size. Just replace. */
-				s->here->e_value_size =
-					cpu_to_le32(i->value_len);
-				memset(val + size - EXT3_XATTR_PAD, 0,
-				       EXT3_XATTR_PAD); /* Clear pad bytes. */
-				memcpy(val, i->value, i->value_len);
-				return 0;
-			}
-
-			/* Remove the old value. */
-			memmove(first_val + size, first_val, val - first_val);
-			memset(first_val, 0, size);
-			s->here->e_value_size = 0;
-			s->here->e_value_offs = 0;
-			min_offs += size;
-
-			/* Adjust all value offsets. */
-			last = s->first;
-			while (!IS_LAST_ENTRY(last)) {
-				size_t o = le16_to_cpu(last->e_value_offs);
-				if (!last->e_value_block &&
-				    last->e_value_size && o < offs)
-					last->e_value_offs =
-						cpu_to_le16(o + size);
-				last = EXT3_XATTR_NEXT(last);
-			}
-		}
-		if (!i->value) {
-			/* Remove the old name. */
-			size_t size = EXT3_XATTR_LEN(name_len);
-			last = ENTRY((void *)last - size);
-			memmove(s->here, (void *)s->here + size,
-				(void *)last - (void *)s->here + sizeof(__u32));
-			memset(last, 0, size);
-		}
-	}
-
-	if (i->value) {
-		/* Insert the new value. */
-		s->here->e_value_size = cpu_to_le32(i->value_len);
-		if (i->value_len) {
-			size_t size = EXT3_XATTR_SIZE(i->value_len);
-			void *val = s->base + min_offs - size;
-			s->here->e_value_offs = cpu_to_le16(min_offs - size);
-			memset(val + size - EXT3_XATTR_PAD, 0,
-			       EXT3_XATTR_PAD); /* Clear the pad bytes. */
-			memcpy(val, i->value, i->value_len);
-		}
-	}
-	return 0;
-}
-
-struct ext3_xattr_block_find {
-	struct ext3_xattr_search s;
-	struct buffer_head *bh;
-};
-
-static int
-ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
-		      struct ext3_xattr_block_find *bs)
-{
-	struct super_block *sb = inode->i_sb;
-	int error;
-
-	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
-		  i->name_index, i->name, i->value, (long)i->value_len);
-
-	if (EXT3_I(inode)->i_file_acl) {
-		/* The inode already has an extended attribute block. */
-		bs->bh = sb_bread(sb, EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		if (!bs->bh)
-			goto cleanup;
-		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
-			atomic_read(&(bs->bh->b_count)),
-			le32_to_cpu(BHDR(bs->bh)->h_refcount));
-		if (ext3_xattr_check_block(bs->bh)) {
-			ext3_error(sb, __func__,
-				"inode %lu: bad block "E3FSBLK, inode->i_ino,
-				EXT3_I(inode)->i_file_acl);
-			error = -EIO;
-			goto cleanup;
-		}
-		/* Find the named attribute. */
-		bs->s.base = BHDR(bs->bh);
-		bs->s.first = BFIRST(bs->bh);
-		bs->s.end = bs->bh->b_data + bs->bh->b_size;
-		bs->s.here = bs->s.first;
-		error = ext3_xattr_find_entry(&bs->s.here, i->name_index,
-					      i->name, bs->bh->b_size, 1);
-		if (error && error != -ENODATA)
-			goto cleanup;
-		bs->s.not_found = error;
-	}
-	error = 0;
-
-cleanup:
-	return error;
-}
-
-static int
-ext3_xattr_block_set(handle_t *handle, struct inode *inode,
-		     struct ext3_xattr_info *i,
-		     struct ext3_xattr_block_find *bs)
-{
-	struct super_block *sb = inode->i_sb;
-	struct buffer_head *new_bh = NULL;
-	struct ext3_xattr_search *s = &bs->s;
-	struct mb_cache_entry *ce = NULL;
-	int error = 0;
-
-#define header(x) ((struct ext3_xattr_header *)(x))
-
-	if (i->value && i->value_len > sb->s_blocksize)
-		return -ENOSPC;
-	if (s->base) {
-		ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev,
-					bs->bh->b_blocknr);
-		error = ext3_journal_get_write_access(handle, bs->bh);
-		if (error)
-			goto cleanup;
-		lock_buffer(bs->bh);
-
-		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
-			if (ce) {
-				mb_cache_entry_free(ce);
-				ce = NULL;
-			}
-			ea_bdebug(bs->bh, "modifying in-place");
-			error = ext3_xattr_set_entry(i, s);
-			if (!error) {
-				if (!IS_LAST_ENTRY(s->first))
-					ext3_xattr_rehash(header(s->base),
-							  s->here);
-				ext3_xattr_cache_insert(bs->bh);
-			}
-			unlock_buffer(bs->bh);
-			if (error == -EIO)
-				goto bad_block;
-			if (!error)
-				error = ext3_journal_dirty_metadata(handle,
-								    bs->bh);
-			if (error)
-				goto cleanup;
-			goto inserted;
-		} else {
-			int offset = (char *)s->here - bs->bh->b_data;
-
-			unlock_buffer(bs->bh);
-			journal_release_buffer(handle, bs->bh);
-
-			if (ce) {
-				mb_cache_entry_release(ce);
-				ce = NULL;
-			}
-			ea_bdebug(bs->bh, "cloning");
-			s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
-			error = -ENOMEM;
-			if (s->base == NULL)
-				goto cleanup;
-			memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
-			s->first = ENTRY(header(s->base)+1);
-			header(s->base)->h_refcount = cpu_to_le32(1);
-			s->here = ENTRY(s->base + offset);
-			s->end = s->base + bs->bh->b_size;
-		}
-	} else {
-		/* Allocate a buffer where we construct the new block. */
-		s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
-		/* assert(header == s->base) */
-		error = -ENOMEM;
-		if (s->base == NULL)
-			goto cleanup;
-		header(s->base)->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
-		header(s->base)->h_blocks = cpu_to_le32(1);
-		header(s->base)->h_refcount = cpu_to_le32(1);
-		s->first = ENTRY(header(s->base)+1);
-		s->here = ENTRY(header(s->base)+1);
-		s->end = s->base + sb->s_blocksize;
-	}
-
-	error = ext3_xattr_set_entry(i, s);
-	if (error == -EIO)
-		goto bad_block;
-	if (error)
-		goto cleanup;
-	if (!IS_LAST_ENTRY(s->first))
-		ext3_xattr_rehash(header(s->base), s->here);
-
-inserted:
-	if (!IS_LAST_ENTRY(s->first)) {
-		new_bh = ext3_xattr_cache_find(inode, header(s->base), &ce);
-		if (new_bh) {
-			/* We found an identical block in the cache. */
-			if (new_bh == bs->bh)
-				ea_bdebug(new_bh, "keeping");
-			else {
-				/* The old block is released after updating
-				   the inode. */
-				error = dquot_alloc_block(inode, 1);
-				if (error)
-					goto cleanup;
-				error = ext3_journal_get_write_access(handle,
-								      new_bh);
-				if (error)
-					goto cleanup_dquot;
-				lock_buffer(new_bh);
-				le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
-				ea_bdebug(new_bh, "reusing; refcount now=%d",
-					le32_to_cpu(BHDR(new_bh)->h_refcount));
-				unlock_buffer(new_bh);
-				error = ext3_journal_dirty_metadata(handle,
-								    new_bh);
-				if (error)
-					goto cleanup_dquot;
-			}
-			mb_cache_entry_release(ce);
-			ce = NULL;
-		} else if (bs->bh && s->base == bs->bh->b_data) {
-			/* We were modifying this block in-place. */
-			ea_bdebug(bs->bh, "keeping this block");
-			new_bh = bs->bh;
-			get_bh(new_bh);
-		} else {
-			/* We need to allocate a new block */
-			ext3_fsblk_t goal = ext3_group_first_block_no(sb,
-						EXT3_I(inode)->i_block_group);
-			ext3_fsblk_t block;
-
-			/*
-			 * Protect us agaist concurrent allocations to the
-			 * same inode from ext3_..._writepage(). Reservation
-			 * code does not expect racing allocations.
-			 */
-			mutex_lock(&EXT3_I(inode)->truncate_mutex);
-			block = ext3_new_block(handle, inode, goal, &error);
-			mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-			if (error)
-				goto cleanup;
-			ea_idebug(inode, "creating block %d", block);
-
-			new_bh = sb_getblk(sb, block);
-			if (unlikely(!new_bh)) {
-getblk_failed:
-				ext3_free_blocks(handle, inode, block, 1);
-				error = -ENOMEM;
-				goto cleanup;
-			}
-			lock_buffer(new_bh);
-			error = ext3_journal_get_create_access(handle, new_bh);
-			if (error) {
-				unlock_buffer(new_bh);
-				goto getblk_failed;
-			}
-			memcpy(new_bh->b_data, s->base, new_bh->b_size);
-			set_buffer_uptodate(new_bh);
-			unlock_buffer(new_bh);
-			ext3_xattr_cache_insert(new_bh);
-			error = ext3_journal_dirty_metadata(handle, new_bh);
-			if (error)
-				goto cleanup;
-		}
-	}
-
-	/* Update the inode. */
-	EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
-
-	/* Drop the previous xattr block. */
-	if (bs->bh && bs->bh != new_bh)
-		ext3_xattr_release_block(handle, inode, bs->bh);
-	error = 0;
-
-cleanup:
-	if (ce)
-		mb_cache_entry_release(ce);
-	brelse(new_bh);
-	if (!(bs->bh && s->base == bs->bh->b_data))
-		kfree(s->base);
-
-	return error;
-
-cleanup_dquot:
-	dquot_free_block(inode, 1);
-	goto cleanup;
-
-bad_block:
-	ext3_error(inode->i_sb, __func__,
-		   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-		   EXT3_I(inode)->i_file_acl);
-	goto cleanup;
-
-#undef header
-}
-
-struct ext3_xattr_ibody_find {
-	struct ext3_xattr_search s;
-	struct ext3_iloc iloc;
-};
-
-static int
-ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i,
-		      struct ext3_xattr_ibody_find *is)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_inode *raw_inode;
-	int error;
-
-	if (EXT3_I(inode)->i_extra_isize == 0)
-		return 0;
-	raw_inode = ext3_raw_inode(&is->iloc);
-	header = IHDR(inode, raw_inode);
-	is->s.base = is->s.first = IFIRST(header);
-	is->s.here = is->s.first;
-	is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) {
-		error = ext3_xattr_check_names(IFIRST(header), is->s.end);
-		if (error)
-			return error;
-		/* Find the named attribute. */
-		error = ext3_xattr_find_entry(&is->s.here, i->name_index,
-					      i->name, is->s.end -
-					      (void *)is->s.base, 0);
-		if (error && error != -ENODATA)
-			return error;
-		is->s.not_found = error;
-	}
-	return 0;
-}
-
-static int
-ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
-		     struct ext3_xattr_info *i,
-		     struct ext3_xattr_ibody_find *is)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_xattr_search *s = &is->s;
-	int error;
-
-	if (EXT3_I(inode)->i_extra_isize == 0)
-		return -ENOSPC;
-	error = ext3_xattr_set_entry(i, s);
-	if (error)
-		return error;
-	header = IHDR(inode, ext3_raw_inode(&is->iloc));
-	if (!IS_LAST_ENTRY(s->first)) {
-		header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
-		ext3_set_inode_state(inode, EXT3_STATE_XATTR);
-	} else {
-		header->h_magic = cpu_to_le32(0);
-		ext3_clear_inode_state(inode, EXT3_STATE_XATTR);
-	}
-	return 0;
-}
-
-/*
- * ext3_xattr_set_handle()
- *
- * Create, replace or remove an extended attribute for this inode.  Value
- * is NULL to remove an existing extended attribute, and non-NULL to
- * either replace an existing extended attribute, or create a new extended
- * attribute. The flags XATTR_REPLACE and XATTR_CREATE
- * specify that an extended attribute must exist and must not exist
- * previous to the call, respectively.
- *
- * Returns 0, or a negative error number on failure.
- */
-int
-ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-		      const char *name, const void *value, size_t value_len,
-		      int flags)
-{
-	struct ext3_xattr_info i = {
-		.name_index = name_index,
-		.name = name,
-		.value = value,
-		.value_len = value_len,
-
-	};
-	struct ext3_xattr_ibody_find is = {
-		.s = { .not_found = -ENODATA, },
-	};
-	struct ext3_xattr_block_find bs = {
-		.s = { .not_found = -ENODATA, },
-	};
-	int error;
-
-	if (!name)
-		return -EINVAL;
-	if (strlen(name) > 255)
-		return -ERANGE;
-	down_write(&EXT3_I(inode)->xattr_sem);
-	error = ext3_get_inode_loc(inode, &is.iloc);
-	if (error)
-		goto cleanup;
-
-	error = ext3_journal_get_write_access(handle, is.iloc.bh);
-	if (error)
-		goto cleanup;
-
-	if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) {
-		struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
-		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
-		ext3_clear_inode_state(inode, EXT3_STATE_NEW);
-	}
-
-	error = ext3_xattr_ibody_find(inode, &i, &is);
-	if (error)
-		goto cleanup;
-	if (is.s.not_found)
-		error = ext3_xattr_block_find(inode, &i, &bs);
-	if (error)
-		goto cleanup;
-	if (is.s.not_found && bs.s.not_found) {
-		error = -ENODATA;
-		if (flags & XATTR_REPLACE)
-			goto cleanup;
-		error = 0;
-		if (!value)
-			goto cleanup;
-	} else {
-		error = -EEXIST;
-		if (flags & XATTR_CREATE)
-			goto cleanup;
-	}
-	if (!value) {
-		if (!is.s.not_found)
-			error = ext3_xattr_ibody_set(handle, inode, &i, &is);
-		else if (!bs.s.not_found)
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-	} else {
-		error = ext3_xattr_ibody_set(handle, inode, &i, &is);
-		if (!error && !bs.s.not_found) {
-			i.value = NULL;
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-		} else if (error == -ENOSPC) {
-			if (EXT3_I(inode)->i_file_acl && !bs.s.base) {
-				error = ext3_xattr_block_find(inode, &i, &bs);
-				if (error)
-					goto cleanup;
-			}
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-			if (error)
-				goto cleanup;
-			if (!is.s.not_found) {
-				i.value = NULL;
-				error = ext3_xattr_ibody_set(handle, inode, &i,
-							     &is);
-			}
-		}
-	}
-	if (!error) {
-		ext3_xattr_update_super_block(handle, inode->i_sb);
-		inode->i_ctime = CURRENT_TIME_SEC;
-		error = ext3_mark_iloc_dirty(handle, inode, &is.iloc);
-		/*
-		 * The bh is consumed by ext3_mark_iloc_dirty, even with
-		 * error != 0.
-		 */
-		is.iloc.bh = NULL;
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-	}
-
-cleanup:
-	brelse(is.iloc.bh);
-	brelse(bs.bh);
-	up_write(&EXT3_I(inode)->xattr_sem);
-	return error;
-}
-
-/*
- * ext3_xattr_set()
- *
- * Like ext3_xattr_set_handle, but start from an inode. This extended
- * attribute modification is a filesystem transaction by itself.
- *
- * Returns 0, or a negative error number on failure.
- */
-int
-ext3_xattr_set(struct inode *inode, int name_index, const char *name,
-	       const void *value, size_t value_len, int flags)
-{
-	handle_t *handle;
-	int error, retries = 0;
-
-retry:
-	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle)) {
-		error = PTR_ERR(handle);
-	} else {
-		int error2;
-
-		error = ext3_xattr_set_handle(handle, inode, name_index, name,
-					      value, value_len, flags);
-		error2 = ext3_journal_stop(handle);
-		if (error == -ENOSPC &&
-		    ext3_should_retry_alloc(inode->i_sb, &retries))
-			goto retry;
-		if (error == 0)
-			error = error2;
-	}
-
-	return error;
-}
-
-/*
- * ext3_xattr_delete_inode()
- *
- * Free extended attribute resources associated with this inode. This
- * is called immediately before an inode is freed. We have exclusive
- * access to the inode.
- */
-void
-ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-	struct buffer_head *bh = NULL;
-
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	if (!bh) {
-		ext3_error(inode->i_sb, __func__,
-			"inode %lu: block "E3FSBLK" read error", inode->i_ino,
-			EXT3_I(inode)->i_file_acl);
-		goto cleanup;
-	}
-	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-	    BHDR(bh)->h_blocks != cpu_to_le32(1)) {
-		ext3_error(inode->i_sb, __func__,
-			"inode %lu: bad block "E3FSBLK, inode->i_ino,
-			EXT3_I(inode)->i_file_acl);
-		goto cleanup;
-	}
-	ext3_xattr_release_block(handle, inode, bh);
-	EXT3_I(inode)->i_file_acl = 0;
-
-cleanup:
-	brelse(bh);
-}
-
-/*
- * ext3_xattr_put_super()
- *
- * This is called when a file system is unmounted.
- */
-void
-ext3_xattr_put_super(struct super_block *sb)
-{
-	mb_cache_shrink(sb->s_bdev);
-}
-
-/*
- * ext3_xattr_cache_insert()
- *
- * Create a new entry in the extended attribute cache, and insert
- * it unless such an entry is already in the cache.
- *
- * Returns 0, or a negative error number on failure.
- */
-static void
-ext3_xattr_cache_insert(struct buffer_head *bh)
-{
-	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
-	struct mb_cache_entry *ce;
-	int error;
-
-	ce = mb_cache_entry_alloc(ext3_xattr_cache, GFP_NOFS);
-	if (!ce) {
-		ea_bdebug(bh, "out of memory");
-		return;
-	}
-	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
-	if (error) {
-		mb_cache_entry_free(ce);
-		if (error == -EBUSY) {
-			ea_bdebug(bh, "already in cache");
-			error = 0;
-		}
-	} else {
-		ea_bdebug(bh, "inserting [%x]", (int)hash);
-		mb_cache_entry_release(ce);
-	}
-}
-
-/*
- * ext3_xattr_cmp()
- *
- * Compare two extended attribute blocks for equality.
- *
- * Returns 0 if the blocks are equal, 1 if they differ, and
- * a negative error number on errors.
- */
-static int
-ext3_xattr_cmp(struct ext3_xattr_header *header1,
-	       struct ext3_xattr_header *header2)
-{
-	struct ext3_xattr_entry *entry1, *entry2;
-
-	entry1 = ENTRY(header1+1);
-	entry2 = ENTRY(header2+1);
-	while (!IS_LAST_ENTRY(entry1)) {
-		if (IS_LAST_ENTRY(entry2))
-			return 1;
-		if (entry1->e_hash != entry2->e_hash ||
-		    entry1->e_name_index != entry2->e_name_index ||
-		    entry1->e_name_len != entry2->e_name_len ||
-		    entry1->e_value_size != entry2->e_value_size ||
-		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
-			return 1;
-		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
-			return -EIO;
-		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
-			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
-			   le32_to_cpu(entry1->e_value_size)))
-			return 1;
-
-		entry1 = EXT3_XATTR_NEXT(entry1);
-		entry2 = EXT3_XATTR_NEXT(entry2);
-	}
-	if (!IS_LAST_ENTRY(entry2))
-		return 1;
-	return 0;
-}
-
-/*
- * ext3_xattr_cache_find()
- *
- * Find an identical extended attribute block.
- *
- * Returns a pointer to the block found, or NULL if such a block was
- * not found or an error occurred.
- */
-static struct buffer_head *
-ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header,
-		      struct mb_cache_entry **pce)
-{
-	__u32 hash = le32_to_cpu(header->h_hash);
-	struct mb_cache_entry *ce;
-
-	if (!header->h_hash)
-		return NULL;  /* never share */
-	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
-again:
-	ce = mb_cache_entry_find_first(ext3_xattr_cache, inode->i_sb->s_bdev,
-				       hash);
-	while (ce) {
-		struct buffer_head *bh;
-
-		if (IS_ERR(ce)) {
-			if (PTR_ERR(ce) == -EAGAIN)
-				goto again;
-			break;
-		}
-		bh = sb_bread(inode->i_sb, ce->e_block);
-		if (!bh) {
-			ext3_error(inode->i_sb, __func__,
-				"inode %lu: block %lu read error",
-				inode->i_ino, (unsigned long) ce->e_block);
-		} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
-				EXT3_XATTR_REFCOUNT_MAX) {
-			ea_idebug(inode, "block %lu refcount %d>=%d",
-				  (unsigned long) ce->e_block,
-				  le32_to_cpu(BHDR(bh)->h_refcount),
-					  EXT3_XATTR_REFCOUNT_MAX);
-		} else if (ext3_xattr_cmp(header, BHDR(bh)) == 0) {
-			*pce = ce;
-			return bh;
-		}
-		brelse(bh);
-		ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
-	}
-	return NULL;
-}
-
-#define NAME_HASH_SHIFT 5
-#define VALUE_HASH_SHIFT 16
-
-/*
- * ext3_xattr_hash_entry()
- *
- * Compute the hash of an extended attribute.
- */
-static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header,
-					 struct ext3_xattr_entry *entry)
-{
-	__u32 hash = 0;
-	char *name = entry->e_name;
-	int n;
-
-	for (n=0; n < entry->e_name_len; n++) {
-		hash = (hash << NAME_HASH_SHIFT) ^
-		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
-		       *name++;
-	}
-
-	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
-		__le32 *value = (__le32 *)((char *)header +
-			le16_to_cpu(entry->e_value_offs));
-		for (n = (le32_to_cpu(entry->e_value_size) +
-		     EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) {
-			hash = (hash << VALUE_HASH_SHIFT) ^
-			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
-			       le32_to_cpu(*value++);
-		}
-	}
-	entry->e_hash = cpu_to_le32(hash);
-}
-
-#undef NAME_HASH_SHIFT
-#undef VALUE_HASH_SHIFT
-
-#define BLOCK_HASH_SHIFT 16
-
-/*
- * ext3_xattr_rehash()
- *
- * Re-compute the extended attribute hash value after an entry has changed.
- */
-static void ext3_xattr_rehash(struct ext3_xattr_header *header,
-			      struct ext3_xattr_entry *entry)
-{
-	struct ext3_xattr_entry *here;
-	__u32 hash = 0;
-
-	ext3_xattr_hash_entry(header, entry);
-	here = ENTRY(header+1);
-	while (!IS_LAST_ENTRY(here)) {
-		if (!here->e_hash) {
-			/* Block is not shared if an entry's hash value == 0 */
-			hash = 0;
-			break;
-		}
-		hash = (hash << BLOCK_HASH_SHIFT) ^
-		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
-		       le32_to_cpu(here->e_hash);
-		here = EXT3_XATTR_NEXT(here);
-	}
-	header->h_hash = cpu_to_le32(hash);
-}
-
-#undef BLOCK_HASH_SHIFT
-
-int __init
-init_ext3_xattr(void)
-{
-	ext3_xattr_cache = mb_cache_create("ext3_xattr", 6);
-	if (!ext3_xattr_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void
-exit_ext3_xattr(void)
-{
-	if (ext3_xattr_cache)
-		mb_cache_destroy(ext3_xattr_cache);
-	ext3_xattr_cache = NULL;
-}
diff --git a/fs/ext3/xattr.h b/fs/ext3/xattr.h
deleted file mode 100644
index 32e93eb..0000000
--- a/fs/ext3/xattr.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
-  File: fs/ext3/xattr.h
-
-  On-disk format of extended attributes for the ext3 filesystem.
-
-  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
-*/
-
-#include <linux/xattr.h>
-
-/* Magic value in attribute blocks */
-#define EXT3_XATTR_MAGIC		0xEA020000
-
-/* Maximum number of references to one attribute block */
-#define EXT3_XATTR_REFCOUNT_MAX		1024
-
-/* Name indexes */
-#define EXT3_XATTR_INDEX_USER			1
-#define EXT3_XATTR_INDEX_POSIX_ACL_ACCESS	2
-#define EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT	3
-#define EXT3_XATTR_INDEX_TRUSTED		4
-#define	EXT3_XATTR_INDEX_LUSTRE			5
-#define EXT3_XATTR_INDEX_SECURITY	        6
-
-struct ext3_xattr_header {
-	__le32	h_magic;	/* magic number for identification */
-	__le32	h_refcount;	/* reference count */
-	__le32	h_blocks;	/* number of disk blocks used */
-	__le32	h_hash;		/* hash value of all attributes */
-	__u32	h_reserved[4];	/* zero right now */
-};
-
-struct ext3_xattr_ibody_header {
-	__le32	h_magic;	/* magic number for identification */
-};
-
-struct ext3_xattr_entry {
-	__u8	e_name_len;	/* length of name */
-	__u8	e_name_index;	/* attribute name index */
-	__le16	e_value_offs;	/* offset in disk block of value */
-	__le32	e_value_block;	/* disk block attribute is stored on (n/i) */
-	__le32	e_value_size;	/* size of attribute value */
-	__le32	e_hash;		/* hash value of name and value */
-	char	e_name[0];	/* attribute name */
-};
-
-#define EXT3_XATTR_PAD_BITS		2
-#define EXT3_XATTR_PAD		(1<<EXT3_XATTR_PAD_BITS)
-#define EXT3_XATTR_ROUND		(EXT3_XATTR_PAD-1)
-#define EXT3_XATTR_LEN(name_len) \
-	(((name_len) + EXT3_XATTR_ROUND + \
-	sizeof(struct ext3_xattr_entry)) & ~EXT3_XATTR_ROUND)
-#define EXT3_XATTR_NEXT(entry) \
-	( (struct ext3_xattr_entry *)( \
-	  (char *)(entry) + EXT3_XATTR_LEN((entry)->e_name_len)) )
-#define EXT3_XATTR_SIZE(size) \
-	(((size) + EXT3_XATTR_ROUND) & ~EXT3_XATTR_ROUND)
-
-# ifdef CONFIG_EXT3_FS_XATTR
-
-extern const struct xattr_handler ext3_xattr_user_handler;
-extern const struct xattr_handler ext3_xattr_trusted_handler;
-extern const struct xattr_handler ext3_xattr_security_handler;
-
-extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
-
-extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
-extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
-extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
-
-extern void ext3_xattr_delete_inode(handle_t *, struct inode *);
-extern void ext3_xattr_put_super(struct super_block *);
-
-extern int init_ext3_xattr(void);
-extern void exit_ext3_xattr(void);
-
-extern const struct xattr_handler *ext3_xattr_handlers[];
-
-# else  /* CONFIG_EXT3_FS_XATTR */
-
-static inline int
-ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-	       void *buffer, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int
-ext3_xattr_set(struct inode *inode, int name_index, const char *name,
-	       const void *value, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int
-ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-	       const char *name, const void *value, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline void
-ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-}
-
-static inline void
-ext3_xattr_put_super(struct super_block *sb)
-{
-}
-
-static inline int
-init_ext3_xattr(void)
-{
-	return 0;
-}
-
-static inline void
-exit_ext3_xattr(void)
-{
-}
-
-#define ext3_xattr_handlers	NULL
-
-# endif  /* CONFIG_EXT3_FS_XATTR */
-
-#ifdef CONFIG_EXT3_FS_SECURITY
-extern int ext3_init_security(handle_t *handle, struct inode *inode,
-			      struct inode *dir, const struct qstr *qstr);
-#else
-static inline int ext3_init_security(handle_t *handle, struct inode *inode,
-				     struct inode *dir, const struct qstr *qstr)
-{
-	return 0;
-}
-#endif
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
deleted file mode 100644
index c9506d5..0000000
--- a/fs/ext3/xattr_security.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * linux/fs/ext3/xattr_security.c
- * Handler for storing security labels as extended attributes.
- */
-
-#include <linux/security.h>
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
-			 const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_security_get(struct dentry *dentry, const char *name,
-		void *buffer, size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_security_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
-			      name, value, size, flags);
-}
-
-static int ext3_initxattrs(struct inode *inode,
-			   const struct xattr *xattr_array,
-			   void *fs_info)
-{
-	const struct xattr *xattr;
-	handle_t *handle = fs_info;
-	int err = 0;
-
-	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-		err = ext3_xattr_set_handle(handle, inode,
-					    EXT3_XATTR_INDEX_SECURITY,
-					    xattr->name, xattr->value,
-					    xattr->value_len, 0);
-		if (err < 0)
-			break;
-	}
-	return err;
-}
-
-int
-ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
-		   const struct qstr *qstr)
-{
-	return security_inode_init_security(inode, dir, qstr,
-					    &ext3_initxattrs, handle);
-}
-
-const struct xattr_handler ext3_xattr_security_handler = {
-	.prefix	= XATTR_SECURITY_PREFIX,
-	.list	= ext3_xattr_security_list,
-	.get	= ext3_xattr_security_get,
-	.set	= ext3_xattr_security_set,
-};
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
deleted file mode 100644
index 206cc66..0000000
--- a/fs/ext3/xattr_trusted.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * linux/fs/ext3/xattr_trusted.c
- * Handler for trusted extended attributes.
- *
- * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size,
-		const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return 0;
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_trusted_get(struct dentry *dentry, const char *name,
-		       void *buffer, size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_trusted_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED, name,
-			      value, size, flags);
-}
-
-const struct xattr_handler ext3_xattr_trusted_handler = {
-	.prefix	= XATTR_TRUSTED_PREFIX,
-	.list	= ext3_xattr_trusted_list,
-	.get	= ext3_xattr_trusted_get,
-	.set	= ext3_xattr_trusted_set,
-};
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
deleted file mode 100644
index 021508a..0000000
--- a/fs/ext3/xattr_user.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * linux/fs/ext3/xattr_user.c
- * Handler for extended user attributes.
- *
- * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_user_list(struct dentry *dentry, char *list, size_t list_size,
-		const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_USER_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return 0;
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_USER_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_user_get(struct dentry *dentry, const char *name, void *buffer,
-		size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return -EOPNOTSUPP;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_USER,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_user_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return -EOPNOTSUPP;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_USER,
-			      name, value, size, flags);
-}
-
-const struct xattr_handler ext3_xattr_user_handler = {
-	.prefix	= XATTR_USER_PREFIX,
-	.list	= ext3_xattr_user_list,
-	.get	= ext3_xattr_user_get,
-	.set	= ext3_xattr_user_set,
-};
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index bf8bc8a..47728da 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -1,5 +1,38 @@
+# Ext3 configs are here for backward compatibility with old configs which may
+# have EXT3_FS set but not EXT4_FS set and thus would result in non-bootable
+# kernels after the removal of ext3 driver.
+config EXT3_FS
+	tristate "The Extended 3 (ext3) filesystem"
+	# These must match EXT4_FS selects...
+	select EXT4_FS
+	select JBD2
+	select CRC16
+	select CRYPTO
+	select CRYPTO_CRC32C
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
+config EXT3_FS_POSIX_ACL
+	bool "Ext3 POSIX Access Control Lists"
+	depends on EXT3_FS
+	select EXT4_FS_POSIX_ACL
+	select FS_POSIX_ACL
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
+config EXT3_FS_SECURITY
+	bool "Ext3 Security Labels"
+	depends on EXT3_FS
+	select EXT4_FS_SECURITY
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
 config EXT4_FS
 	tristate "The Extended 4 (ext4) filesystem"
+	# Please update EXT3_FS selects when changing these
 	select JBD2
 	select CRC16
 	select CRYPTO
@@ -16,26 +49,27 @@
 	  up fsck time.  For more information, please see the web pages at
 	  http://ext4.wiki.kernel.org.
 
-	  The ext4 filesystem will support mounting an ext3
-	  filesystem; while there will be some performance gains from
-	  the delayed allocation and inode table readahead, the best
-	  performance gains will require enabling ext4 features in the
-	  filesystem, or formatting a new filesystem as an ext4
-	  filesystem initially.
+	  The ext4 filesystem supports mounting an ext3 filesystem; while there
+	  are some performance gains from the delayed allocation and inode
+	  table readahead, the best performance gains require enabling ext4
+	  features in the filesystem using tune2fs, or formatting a new
+	  filesystem as an ext4 filesystem initially. Without explicit enabling
+	  of ext4 features, the on disk filesystem format stays fully backward
+	  compatible.
 
 	  To compile this file system support as a module, choose M here. The
 	  module will be called ext4.
 
 	  If unsure, say N.
 
-config EXT4_USE_FOR_EXT23
+config EXT4_USE_FOR_EXT2
 	bool "Use ext4 for ext2/ext3 file systems"
 	depends on EXT4_FS
-	depends on EXT3_FS=n || EXT2_FS=n
+	depends on EXT2_FS=n
 	default y
 	help
-	  Allow the ext4 file system driver code to be used for ext2 or
-	  ext3 file system mounts.  This allows users to reduce their
+	  Allow the ext4 file system driver code to be used for ext2
+	  file system mounts.  This allows users to reduce their
 	  compiled kernel size by using one file system driver for
 	  ext2, ext3, and ext4 file systems.
 
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index 7dc4eb5..847f919 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -19,7 +19,6 @@
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/key.h>
-#include <linux/key.h>
 #include <linux/list.h>
 #include <linux/mempool.h>
 #include <linux/random.h>
@@ -329,6 +328,10 @@
 			return oname->len;
 		}
 	}
+	if (iname->len < EXT4_CRYPTO_BLOCK_SIZE) {
+		EXT4_ERROR_INODE(inode, "encrypted inode too small");
+		return -EUCLEAN;
+	}
 	if (EXT4_I(inode)->i_crypt_info)
 		return ext4_fname_decrypt(inode, iname, oname);
 
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index 442d24e..1d510c1 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -30,7 +30,7 @@
 
 /**
  * ext4_derive_key_aes() - Derive a key using AES-128-ECB
- * @deriving_key: Encryption key used for derivatio.
+ * @deriving_key: Encryption key used for derivation.
  * @source_key:   Source key to which to apply derivation.
  * @derived_key:  Derived key.
  *
@@ -220,6 +220,8 @@
 	BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
 	res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
 				  raw_key);
+	if (res)
+		goto out;
 got_key:
 	ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
 	if (!ctfm || IS_ERR(ctfm)) {
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
index 02c4e5d..a640ec2 100644
--- a/fs/ext4/crypto_policy.c
+++ b/fs/ext4/crypto_policy.c
@@ -12,6 +12,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 
+#include "ext4_jbd2.h"
 #include "ext4.h"
 #include "xattr.h"
 
@@ -49,7 +50,8 @@
 	struct inode *inode, const struct ext4_encryption_policy *policy)
 {
 	struct ext4_encryption_context ctx;
-	int res = 0;
+	handle_t *handle;
+	int res, res2;
 
 	res = ext4_convert_inline_data(inode);
 	if (res)
@@ -78,11 +80,22 @@
 	BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
 	get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
 
+	handle = ext4_journal_start(inode, EXT4_HT_MISC,
+				    ext4_jbd2_credits_xattr(inode));
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 	res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
 			     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
 			     sizeof(ctx), 0);
-	if (!res)
+	if (!res) {
 		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+		res = ext4_mark_inode_dirty(handle, inode);
+		if (res)
+			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
+	}
+	res2 = ext4_journal_stop(handle);
+	if (!res)
+		res = res2;
 	return res;
 }
 
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f5e9f04..fd1f28b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -187,7 +187,7 @@
 } ext4_io_end_t;
 
 struct ext4_io_submit {
-	int			io_op;
+	struct writeback_control *io_wbc;
 	struct bio		*io_bio;
 	ext4_io_end_t		*io_end;
 	sector_t		io_next_block;
@@ -2272,6 +2272,8 @@
 struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
 int ext4_get_block_write(struct inode *inode, sector_t iblock,
 			 struct buffer_head *bh_result, int create);
+int ext4_get_block_dax(struct inode *inode, sector_t iblock,
+			 struct buffer_head *bh_result, int create);
 int ext4_get_block(struct inode *inode, sector_t iblock,
 				struct buffer_head *bh_result, int create);
 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index bc313ac..113837e 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -22,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/mount.h>
 #include <linux/path.h>
+#include <linux/dax.h>
 #include <linux/quotaops.h>
 #include <linux/pagevec.h>
 #include <linux/uio.h>
@@ -195,7 +196,7 @@
 static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
 {
 	struct inode *inode = bh->b_assoc_map->host;
-	/* XXX: breaks on 32-bit > 16GB. Is that even supported? */
+	/* XXX: breaks on 32-bit > 16TB. Is that even supported? */
 	loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
 	int err;
 	if (!uptodate)
@@ -206,17 +207,74 @@
 
 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	return dax_fault(vma, vmf, ext4_get_block, ext4_end_io_unwritten);
-					/* Is this the right get_block? */
+	int result;
+	handle_t *handle = NULL;
+	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
+	bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+	if (write) {
+		sb_start_pagefault(sb);
+		file_update_time(vma->vm_file);
+		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
+						EXT4_DATA_TRANS_BLOCKS(sb));
+	}
+
+	if (IS_ERR(handle))
+		result = VM_FAULT_SIGBUS;
+	else
+		result = __dax_fault(vma, vmf, ext4_get_block_dax,
+						ext4_end_io_unwritten);
+
+	if (write) {
+		if (!IS_ERR(handle))
+			ext4_journal_stop(handle);
+		sb_end_pagefault(sb);
+	}
+
+	return result;
+}
+
+static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+						pmd_t *pmd, unsigned int flags)
+{
+	int result;
+	handle_t *handle = NULL;
+	struct inode *inode = file_inode(vma->vm_file);
+	struct super_block *sb = inode->i_sb;
+	bool write = flags & FAULT_FLAG_WRITE;
+
+	if (write) {
+		sb_start_pagefault(sb);
+		file_update_time(vma->vm_file);
+		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
+				ext4_chunk_trans_blocks(inode,
+							PMD_SIZE / PAGE_SIZE));
+	}
+
+	if (IS_ERR(handle))
+		result = VM_FAULT_SIGBUS;
+	else
+		result = __dax_pmd_fault(vma, addr, pmd, flags,
+				ext4_get_block_dax, ext4_end_io_unwritten);
+
+	if (write) {
+		if (!IS_ERR(handle))
+			ext4_journal_stop(handle);
+		sb_end_pagefault(sb);
+	}
+
+	return result;
 }
 
 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	return dax_mkwrite(vma, vmf, ext4_get_block, ext4_end_io_unwritten);
+	return dax_mkwrite(vma, vmf, ext4_get_block_dax,
+				ext4_end_io_unwritten);
 }
 
 static const struct vm_operations_struct ext4_dax_vm_ops = {
 	.fault		= ext4_dax_fault,
+	.pmd_fault	= ext4_dax_pmd_fault,
 	.page_mkwrite	= ext4_dax_mkwrite,
 	.pfn_mkwrite	= dax_pfn_mkwrite,
 };
@@ -244,7 +302,7 @@
 	file_accessed(file);
 	if (IS_DAX(file_inode(file))) {
 		vma->vm_ops = &ext4_dax_vm_ops;
-		vma->vm_flags |= VM_MIXEDMAP;
+		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
 	} else {
 		vma->vm_ops = &ext4_file_vm_ops;
 	}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 173c1ae..619bfc1 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -721,7 +721,7 @@
 	struct ext4_group_desc *gdp = NULL;
 	struct ext4_inode_info *ei;
 	struct ext4_sb_info *sbi;
-	int ret2, err = 0;
+	int ret2, err;
 	struct inode *ret;
 	ext4_group_t i;
 	ext4_group_t flex_group;
@@ -769,7 +769,9 @@
 		inode->i_gid = dir->i_gid;
 	} else
 		inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
+	err = dquot_initialize(inode);
+	if (err)
+		goto out;
 
 	if (!goal)
 		goal = sbi->s_inode_goal;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 4f6ac49..2468261 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -22,6 +22,7 @@
 
 #include "ext4_jbd2.h"
 #include "truncate.h"
+#include <linux/dax.h>
 #include <linux/uio.h>
 
 #include <trace/events/ext4.h>
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cecf9aa..612fbcf 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -22,6 +22,7 @@
 #include <linux/time.h>
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
+#include <linux/dax.h>
 #include <linux/quotaops.h>
 #include <linux/string.h>
 #include <linux/buffer_head.h>
@@ -3020,6 +3021,17 @@
 			       EXT4_GET_BLOCKS_NO_LOCK);
 }
 
+int ext4_get_block_dax(struct inode *inode, sector_t iblock,
+		   struct buffer_head *bh_result, int create)
+{
+	int flags = EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_UNWRIT_EXT;
+	if (create)
+		flags |= EXT4_GET_BLOCKS_CREATE;
+	ext4_debug("ext4_get_block_dax: inode %lu, create flag %d\n",
+		   inode->i_ino, create);
+	return _ext4_get_block(inode, iblock, bh_result, flags);
+}
+
 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
 			    ssize_t size, void *private)
 {
@@ -4661,8 +4673,11 @@
 	if (error)
 		return error;
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
 		handle_t *handle;
@@ -4725,6 +4740,14 @@
 				error = ext4_orphan_add(handle, inode);
 				orphan = 1;
 			}
+			/*
+			 * Update c/mtime on truncate up, ext4_truncate() will
+			 * update c/mtime in shrink case below
+			 */
+			if (!shrink) {
+				inode->i_mtime = ext4_current_time(inode);
+				inode->i_ctime = inode->i_mtime;
+			}
 			down_write(&EXT4_I(inode)->i_data_sem);
 			EXT4_I(inode)->i_disksize = attr->ia_size;
 			rc = ext4_mark_inode_dirty(handle, inode);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 8313ca3..6eb1a61 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -69,6 +69,7 @@
 			  ext4_fsblk_t mmp_block)
 {
 	struct mmp_struct *mmp;
+	int ret;
 
 	if (*bh)
 		clear_buffer_uptodate(*bh);
@@ -76,33 +77,36 @@
 	/* This would be sb_bread(sb, mmp_block), except we need to be sure
 	 * that the MD RAID device cache has been bypassed, and that the read
 	 * is not blocked in the elevator. */
-	if (!*bh)
+	if (!*bh) {
 		*bh = sb_getblk(sb, mmp_block);
-	if (!*bh)
-		return -ENOMEM;
-	if (*bh) {
-		get_bh(*bh);
-		lock_buffer(*bh);
-		(*bh)->b_end_io = end_buffer_read_sync;
-		submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
-		wait_on_buffer(*bh);
-		if (!buffer_uptodate(*bh)) {
-			brelse(*bh);
-			*bh = NULL;
+		if (!*bh) {
+			ret = -ENOMEM;
+			goto warn_exit;
 		}
 	}
-	if (unlikely(!*bh)) {
-		ext4_warning(sb, "Error while reading MMP block %llu",
-			     mmp_block);
-		return -EIO;
+
+	get_bh(*bh);
+	lock_buffer(*bh);
+	(*bh)->b_end_io = end_buffer_read_sync;
+	submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
+	wait_on_buffer(*bh);
+	if (!buffer_uptodate(*bh)) {
+		brelse(*bh);
+		*bh = NULL;
+		ret = -EIO;
+		goto warn_exit;
 	}
 
 	mmp = (struct mmp_struct *)((*bh)->b_data);
-	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
-	    !ext4_mmp_csum_verify(sb, mmp))
-		return -EINVAL;
+	if (le32_to_cpu(mmp->mmp_magic) == EXT4_MMP_MAGIC &&
+	    ext4_mmp_csum_verify(sb, mmp))
+		return 0;
+	ret = -EINVAL;
 
-	return 0;
+warn_exit:
+	ext4_warning(sb, "Error %d while reading MMP block %llu",
+		     ret, mmp_block);
+	return ret;
 }
 
 /*
@@ -111,7 +115,7 @@
 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
 		    const char *function, unsigned int line, const char *msg)
 {
-	__ext4_warning(sb, function, line, msg);
+	__ext4_warning(sb, function, line, "%s", msg);
 	__ext4_warning(sb, function, line,
 		       "MMP failure info: last update time: %llu, last update "
 		       "node: %s, last update device: %s\n",
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 011dcfb..9f61e76 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2436,7 +2436,9 @@
 	struct inode *inode;
 	int err, credits, retries = 0;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2470,7 +2472,9 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2499,7 +2503,9 @@
 	struct inode *inode;
 	int err, retries = 0;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 retry:
 	inode = ext4_new_inode_start_handle(dir, mode,
@@ -2612,7 +2618,9 @@
 	if (EXT4_DIR_LINK_MAX(dir))
 		return -EMLINK;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2910,8 +2918,12 @@
 
 	/* Initialize quotas before so that eventual writes go in
 	 * separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(d_inode(dentry));
+	if (retval)
+		return retval;
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -2980,8 +2992,12 @@
 	trace_ext4_unlink_enter(dir, dentry);
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(d_inode(dentry));
+	if (retval)
+		return retval;
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -3066,7 +3082,9 @@
 		goto err_free_sd;
 	}
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto err_free_sd;
 
 	if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
 		/*
@@ -3197,7 +3215,9 @@
 	if (ext4_encrypted_inode(dir) &&
 	    !ext4_is_child_context_consistent_with_parent(dir, inode))
 		return -EPERM;
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 retry:
 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -3476,13 +3496,20 @@
 	int credits;
 	u8 old_file_type;
 
-	dquot_initialize(old.dir);
-	dquot_initialize(new.dir);
+	retval = dquot_initialize(old.dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new.dir);
+	if (retval)
+		return retval;
 
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	if (new.inode)
-		dquot_initialize(new.inode);
+	if (new.inode) {
+		retval = dquot_initialize(new.inode);
+		if (retval)
+			return retval;
+	}
 
 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
 	if (IS_ERR(old.bh))
@@ -3678,8 +3705,12 @@
 							   new.inode)))
 		return -EPERM;
 
-	dquot_initialize(old.dir);
-	dquot_initialize(new.dir);
+	retval = dquot_initialize(old.dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new.dir);
+	if (retval)
+		return retval;
 
 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
 				 &old.de, &old.inlined);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 8a9d63a..84ba4d2 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -354,8 +354,10 @@
 	struct bio *bio = io->io_bio;
 
 	if (bio) {
+		int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
+			    WRITE_SYNC : WRITE;
 		bio_get(io->io_bio);
-		submit_bio(io->io_op, io->io_bio);
+		submit_bio(io_op, io->io_bio);
 		bio_put(io->io_bio);
 	}
 	io->io_bio = NULL;
@@ -364,7 +366,7 @@
 void ext4_io_submit_init(struct ext4_io_submit *io,
 			 struct writeback_control *wbc)
 {
-	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
+	io->io_wbc = wbc;
 	io->io_bio = NULL;
 	io->io_end = NULL;
 }
@@ -377,6 +379,7 @@
 	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 	if (!bio)
 		return -ENOMEM;
+	wbc_init_bio(io->io_wbc, bio);
 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
 	bio->bi_end_io = ext4_end_bio;
@@ -405,6 +408,7 @@
 	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
 	if (ret != bh->b_size)
 		goto submit_and_retry;
+	wbc_account_io(io->io_wbc, page, bh->b_size);
 	io->io_next_block++;
 	return 0;
 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 58987b5..a63c7b0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -60,6 +60,7 @@
 static struct mutex ext4_li_mtx;
 static struct ext4_features *ext4_feat;
 static int ext4_mballoc_ready;
+static struct ratelimit_state ext4_mount_msg_ratelimit;
 
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
 			     unsigned long journal_devnum);
@@ -84,7 +85,7 @@
 static void ext4_clear_request_list(void);
 static int ext4_reserve_clusters(struct ext4_sb_info *, ext4_fsblk_t);
 
-#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
 static struct file_system_type ext2_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "ext2",
@@ -100,7 +101,6 @@
 #endif
 
 
-#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "ext3",
@@ -111,9 +111,6 @@
 MODULE_ALIAS_FS("ext3");
 MODULE_ALIAS("ext3");
 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
-#else
-#define IS_EXT3_SB(sb) (0)
-#endif
 
 static int ext4_verify_csum_type(struct super_block *sb,
 				 struct ext4_super_block *es)
@@ -325,6 +322,22 @@
 	ext4_commit_super(sb, 1);
 }
 
+/*
+ * The del_gendisk() function uninitializes the disk-specific data
+ * structures, including the bdi structure, without telling anyone
+ * else.  Once this happens, any attempt to call mark_buffer_dirty()
+ * (for example, by ext4_commit_super), will cause a kernel OOPS.
+ * This is a kludge to prevent these oops until we can put in a proper
+ * hook in del_gendisk() to inform the VFS and file system layers.
+ */
+static int block_device_ejected(struct super_block *sb)
+{
+	struct inode *bd_inode = sb->s_bdev->bd_inode;
+	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
+
+	return bdi->dev == NULL;
+}
+
 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
 {
 	struct super_block		*sb = journal->j_private;
@@ -1394,9 +1407,9 @@
 	{Opt_stripe, 0, MOPT_GTE0},
 	{Opt_resuid, 0, MOPT_GTE0},
 	{Opt_resgid, 0, MOPT_GTE0},
-	{Opt_journal_dev, 0, MOPT_GTE0},
-	{Opt_journal_path, 0, MOPT_STRING},
-	{Opt_journal_ioprio, 0, MOPT_GTE0},
+	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
+	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
+	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
 	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
 	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
 	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
@@ -1763,10 +1776,10 @@
 	}
 
 	if (sbi->s_qf_names[USRQUOTA])
-		seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
+		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
 
 	if (sbi->s_qf_names[GRPQUOTA])
-		seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
+		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
 #endif
 }
 
@@ -3643,6 +3656,8 @@
 		}
 		if (test_opt(sb, DELALLOC))
 			clear_opt(sb, DELALLOC);
+	} else {
+		sb->s_iflags |= SB_I_CGROUPWB;
 	}
 
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -4275,9 +4290,10 @@
 				 "the device does not support discard");
 	}
 
-	ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-		 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
-		 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
+	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
+		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
+			 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
 
 	if (es->s_error_count)
 		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -4617,7 +4633,7 @@
 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
 	int error = 0;
 
-	if (!sbh)
+	if (!sbh || block_device_ejected(sb))
 		return error;
 	if (buffer_write_io_error(sbh)) {
 		/*
@@ -4665,7 +4681,8 @@
 	ext4_superblock_csum_set(sb);
 	mark_buffer_dirty(sbh);
 	if (sync) {
-		error = sync_dirty_buffer(sbh);
+		error = __sync_dirty_buffer(sbh,
+			test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
 		if (error)
 			return error;
 
@@ -4833,10 +4850,11 @@
 		error = jbd2_journal_flush(journal);
 		if (error < 0)
 			goto out;
+
+		/* Journal blocked and flushed, clear needs_recovery flag. */
+		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 	}
 
-	/* Journal blocked and flushed, clear needs_recovery flag. */
-	EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 	error = ext4_commit_super(sb, 1);
 out:
 	if (journal)
@@ -4854,8 +4872,11 @@
 	if (sb->s_flags & MS_RDONLY)
 		return 0;
 
-	/* Reset the needs_recovery flag before the fs is unlocked. */
-	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+	if (EXT4_SB(sb)->s_journal) {
+		/* Reset the needs_recovery flag before the fs is unlocked. */
+		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+	}
+
 	ext4_commit_super(sb, 1);
 	return 0;
 }
@@ -5500,7 +5521,7 @@
 	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
 }
 
-#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
 static inline void register_as_ext2(void)
 {
 	int err = register_filesystem(&ext2_fs_type);
@@ -5530,7 +5551,6 @@
 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
 #endif
 
-#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static inline void register_as_ext3(void)
 {
 	int err = register_filesystem(&ext3_fs_type);
@@ -5556,11 +5576,6 @@
 		return 0;
 	return 1;
 }
-#else
-static inline void register_as_ext3(void) { }
-static inline void unregister_as_ext3(void) { }
-static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; }
-#endif
 
 static struct file_system_type ext4_fs_type = {
 	.owner		= THIS_MODULE,
@@ -5610,6 +5625,7 @@
 {
 	int i, err;
 
+	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
 	ext4_li_info = NULL;
 	mutex_init(&ext4_li_mtx);
 
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index c629762..b0a9dc9 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -45,7 +45,7 @@
 	default y
 	help
 	  Posix Access Control Lists (ACLs) support permissions for users and
-	  gourps beyond the owner/group/world scheme.
+	  groups beyond the owner/group/world scheme.
 
 	  To learn more about Access Control Lists, visit the POSIX ACLs for
 	  Linux website <http://acl.bestbits.at/>.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 396be1a..08e101e 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -2,6 +2,7 @@
 
 f2fs-y		:= dir.o file.o inode.o namei.o hash.o super.o inline.o
 f2fs-y		+= checkpoint.o gc.o data.o node.o segment.o recovery.o
+f2fs-y		+= shrinker.o extent_cache.o
 f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
 f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
 f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index b70bbe1..c5a38e3 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -69,14 +69,24 @@
 
 	fio.page = page;
 
-	if (f2fs_submit_page_bio(&fio))
+	if (f2fs_submit_page_bio(&fio)) {
+		f2fs_put_page(page, 1);
 		goto repeat;
+	}
 
 	lock_page(page);
 	if (unlikely(page->mapping != mapping)) {
 		f2fs_put_page(page, 1);
 		goto repeat;
 	}
+
+	/*
+	 * if there is any IO error when accessing device, make our filesystem
+	 * readonly and make sure do not write checkpoint with non-uptodate
+	 * meta page.
+	 */
+	if (unlikely(!PageUptodate(page)))
+		f2fs_stop_checkpoint(sbi);
 out:
 	return page;
 }
@@ -326,26 +336,18 @@
 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 {
 	struct inode_management *im = &sbi->im[type];
-	struct ino_entry *e;
+	struct ino_entry *e, *tmp;
+
+	tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
 retry:
-	if (radix_tree_preload(GFP_NOFS)) {
-		cond_resched();
-		goto retry;
-	}
+	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
 
 	spin_lock(&im->ino_lock);
-
 	e = radix_tree_lookup(&im->ino_root, ino);
 	if (!e) {
-		e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
-		if (!e) {
-			spin_unlock(&im->ino_lock);
-			radix_tree_preload_end();
-			goto retry;
-		}
+		e = tmp;
 		if (radix_tree_insert(&im->ino_root, ino, e)) {
 			spin_unlock(&im->ino_lock);
-			kmem_cache_free(ino_entry_slab, e);
 			radix_tree_preload_end();
 			goto retry;
 		}
@@ -358,6 +360,9 @@
 	}
 	spin_unlock(&im->ino_lock);
 	radix_tree_preload_end();
+
+	if (e != tmp)
+		kmem_cache_free(ino_entry_slab, tmp);
 }
 
 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -458,24 +463,34 @@
 	__remove_ino_entry(sbi, ino, ORPHAN_INO);
 }
 
-static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-	struct inode *inode = f2fs_iget(sbi->sb, ino);
-	f2fs_bug_on(sbi, IS_ERR(inode));
+	struct inode *inode;
+
+	inode = f2fs_iget(sbi->sb, ino);
+	if (IS_ERR(inode)) {
+		/*
+		 * there should be a bug that we can't find the entry
+		 * to orphan inode.
+		 */
+		f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
+		return PTR_ERR(inode);
+	}
+
 	clear_nlink(inode);
 
 	/* truncate all the data during iput */
 	iput(inode);
+	return 0;
 }
 
-void recover_orphan_inodes(struct f2fs_sb_info *sbi)
+int recover_orphan_inodes(struct f2fs_sb_info *sbi)
 {
 	block_t start_blk, orphan_blocks, i, j;
+	int err;
 
 	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
-		return;
-
-	set_sbi_flag(sbi, SBI_POR_DOING);
+		return 0;
 
 	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
 	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
@@ -489,14 +504,17 @@
 		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
 		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
 			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
-			recover_orphan_inode(sbi, ino);
+			err = recover_orphan_inode(sbi, ino);
+			if (err) {
+				f2fs_put_page(page, 1);
+				return err;
+			}
 		}
 		f2fs_put_page(page, 1);
 	}
 	/* clear Orphan Flag */
 	clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
-	clear_sbi_flag(sbi, SBI_POR_DOING);
-	return;
+	return 0;
 }
 
 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
@@ -504,7 +522,7 @@
 	struct list_head *head;
 	struct f2fs_orphan_block *orphan_blk = NULL;
 	unsigned int nentries = 0;
-	unsigned short index;
+	unsigned short index = 1;
 	unsigned short orphan_blocks;
 	struct page *page = NULL;
 	struct ino_entry *orphan = NULL;
@@ -512,11 +530,6 @@
 
 	orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
 
-	for (index = 0; index < orphan_blocks; index++)
-		grab_meta_page(sbi, start_blk + index);
-
-	index = 1;
-
 	/*
 	 * we don't need to do spin_lock(&im->ino_lock) here, since all the
 	 * orphan inode operations are covered under f2fs_lock_op().
@@ -527,12 +540,10 @@
 	/* loop for each orphan inode entry and write them in Jornal block */
 	list_for_each_entry(orphan, head, list) {
 		if (!page) {
-			page = find_get_page(META_MAPPING(sbi), start_blk++);
-			f2fs_bug_on(sbi, !page);
+			page = grab_meta_page(sbi, start_blk++);
 			orphan_blk =
 				(struct f2fs_orphan_block *)page_address(page);
 			memset(orphan_blk, 0, sizeof(*orphan_blk));
-			f2fs_put_page(page, 0);
 		}
 
 		orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
@@ -704,7 +715,8 @@
 	struct inode_entry *new;
 	int ret = 0;
 
-	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
+	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+			!S_ISLNK(inode->i_mode))
 		return;
 
 	if (!S_ISDIR(inode->i_mode)) {
@@ -892,12 +904,15 @@
 	__u32 crc32 = 0;
 	int i;
 	int cp_payload_blks = __cp_payload(sbi);
+	block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
+	bool invalidate = false;
 
 	/*
 	 * This avoids to conduct wrong roll-forward operations and uses
 	 * metapages, so should be called prior to sync_meta_pages below.
 	 */
-	discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
+	if (discard_next_dnode(sbi, discard_blk))
+		invalidate = true;
 
 	/* Flush all the NAT/SIT pages */
 	while (get_pages(sbi, F2FS_DIRTY_META)) {
@@ -1026,6 +1041,14 @@
 	/* wait for previous submitted meta pages writeback */
 	wait_on_all_pages_writeback(sbi);
 
+	/*
+	 * invalidate meta page which is used temporarily for zeroing out
+	 * block at the end of warm node chain.
+	 */
+	if (invalidate)
+		invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
+								discard_blk);
+
 	release_dirty_inode(sbi);
 
 	if (unlikely(f2fs_cp_error(sbi)))
diff --git a/fs/f2fs/crypto_key.c b/fs/f2fs/crypto_key.c
index 95b8f93..9f77de2 100644
--- a/fs/f2fs/crypto_key.c
+++ b/fs/f2fs/crypto_key.c
@@ -92,8 +92,7 @@
 	if (!ci)
 		return;
 
-	if (ci->ci_keyring_key)
-		key_put(ci->ci_keyring_key);
+	key_put(ci->ci_keyring_key);
 	crypto_free_ablkcipher(ci->ci_ctfm);
 	kmem_cache_free(f2fs_crypt_info_cachep, ci);
 }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c414d49..a82abe9 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -14,6 +14,7 @@
 #include <linux/mpage.h>
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
+#include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
 #include <linux/prefetch.h>
@@ -26,9 +27,6 @@
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
-static struct kmem_cache *extent_tree_slab;
-static struct kmem_cache *extent_node_slab;
-
 static void f2fs_read_end_io(struct bio *bio)
 {
 	struct bio_vec *bvec;
@@ -92,8 +90,7 @@
 {
 	struct bio *bio;
 
-	/* No failure on bio allocation */
-	bio = bio_alloc(GFP_NOIO, npages);
+	bio = f2fs_bio_alloc(npages);
 
 	bio->bi_bdev = sbi->sb->s_bdev;
 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
@@ -158,7 +155,6 @@
 
 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
 		bio_put(bio);
-		f2fs_put_page(page, 1);
 		return -EFAULT;
 	}
 
@@ -266,645 +262,17 @@
 	return err;
 }
 
-static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
+int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
 {
-	struct f2fs_inode_info *fi = F2FS_I(inode);
-	pgoff_t start_fofs, end_fofs;
-	block_t start_blkaddr;
-
-	read_lock(&fi->ext_lock);
-	if (fi->ext.len == 0) {
-		read_unlock(&fi->ext_lock);
-		return false;
-	}
-
-	stat_inc_total_hit(inode->i_sb);
-
-	start_fofs = fi->ext.fofs;
-	end_fofs = fi->ext.fofs + fi->ext.len - 1;
-	start_blkaddr = fi->ext.blk;
-
-	if (pgofs >= start_fofs && pgofs <= end_fofs) {
-		*ei = fi->ext;
-		stat_inc_read_hit(inode->i_sb);
-		read_unlock(&fi->ext_lock);
-		return true;
-	}
-	read_unlock(&fi->ext_lock);
-	return false;
-}
-
-static bool update_extent_info(struct inode *inode, pgoff_t fofs,
-								block_t blkaddr)
-{
-	struct f2fs_inode_info *fi = F2FS_I(inode);
-	pgoff_t start_fofs, end_fofs;
-	block_t start_blkaddr, end_blkaddr;
-	int need_update = true;
-
-	write_lock(&fi->ext_lock);
-
-	start_fofs = fi->ext.fofs;
-	end_fofs = fi->ext.fofs + fi->ext.len - 1;
-	start_blkaddr = fi->ext.blk;
-	end_blkaddr = fi->ext.blk + fi->ext.len - 1;
-
-	/* Drop and initialize the matched extent */
-	if (fi->ext.len == 1 && fofs == start_fofs)
-		fi->ext.len = 0;
-
-	/* Initial extent */
-	if (fi->ext.len == 0) {
-		if (blkaddr != NULL_ADDR) {
-			fi->ext.fofs = fofs;
-			fi->ext.blk = blkaddr;
-			fi->ext.len = 1;
-		}
-		goto end_update;
-	}
-
-	/* Front merge */
-	if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
-		fi->ext.fofs--;
-		fi->ext.blk--;
-		fi->ext.len++;
-		goto end_update;
-	}
-
-	/* Back merge */
-	if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
-		fi->ext.len++;
-		goto end_update;
-	}
-
-	/* Split the existing extent */
-	if (fi->ext.len > 1 &&
-		fofs >= start_fofs && fofs <= end_fofs) {
-		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
-			fi->ext.len = fofs - start_fofs;
-		} else {
-			fi->ext.fofs = fofs + 1;
-			fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
-			fi->ext.len -= fofs - start_fofs + 1;
-		}
-	} else {
-		need_update = false;
-	}
-
-	/* Finally, if the extent is very fragmented, let's drop the cache. */
-	if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
-		fi->ext.len = 0;
-		set_inode_flag(fi, FI_NO_EXTENT);
-		need_update = true;
-	}
-end_update:
-	write_unlock(&fi->ext_lock);
-	return need_update;
-}
-
-static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_info *ei,
-				struct rb_node *parent, struct rb_node **p)
-{
-	struct extent_node *en;
-
-	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
-	if (!en)
-		return NULL;
-
-	en->ei = *ei;
-	INIT_LIST_HEAD(&en->list);
-
-	rb_link_node(&en->rb_node, parent, p);
-	rb_insert_color(&en->rb_node, &et->root);
-	et->count++;
-	atomic_inc(&sbi->total_ext_node);
-	return en;
-}
-
-static void __detach_extent_node(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	rb_erase(&en->rb_node, &et->root);
-	et->count--;
-	atomic_dec(&sbi->total_ext_node);
-
-	if (et->cached_en == en)
-		et->cached_en = NULL;
-}
-
-static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
-							nid_t ino)
-{
-	struct extent_tree *et;
-
-	down_read(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
-	if (!et) {
-		up_read(&sbi->extent_tree_lock);
-		return NULL;
-	}
-	atomic_inc(&et->refcount);
-	up_read(&sbi->extent_tree_lock);
-
-	return et;
-}
-
-static struct extent_tree *__grab_extent_tree(struct inode *inode)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	nid_t ino = inode->i_ino;
-
-	down_write(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
-	if (!et) {
-		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
-		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
-		memset(et, 0, sizeof(struct extent_tree));
-		et->ino = ino;
-		et->root = RB_ROOT;
-		et->cached_en = NULL;
-		rwlock_init(&et->lock);
-		atomic_set(&et->refcount, 0);
-		et->count = 0;
-		sbi->total_ext_tree++;
-	}
-	atomic_inc(&et->refcount);
-	up_write(&sbi->extent_tree_lock);
-
-	return et;
-}
-
-static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
-							unsigned int fofs)
-{
-	struct rb_node *node = et->root.rb_node;
-	struct extent_node *en;
-
-	if (et->cached_en) {
-		struct extent_info *cei = &et->cached_en->ei;
-
-		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
-			return et->cached_en;
-	}
-
-	while (node) {
-		en = rb_entry(node, struct extent_node, rb_node);
-
-		if (fofs < en->ei.fofs) {
-			node = node->rb_left;
-		} else if (fofs >= en->ei.fofs + en->ei.len) {
-			node = node->rb_right;
-		} else {
-			et->cached_en = en;
-			return en;
-		}
-	}
-	return NULL;
-}
-
-static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	struct extent_node *prev;
-	struct rb_node *node;
-
-	node = rb_prev(&en->rb_node);
-	if (!node)
-		return NULL;
-
-	prev = rb_entry(node, struct extent_node, rb_node);
-	if (__is_back_mergeable(&en->ei, &prev->ei)) {
-		en->ei.fofs = prev->ei.fofs;
-		en->ei.blk = prev->ei.blk;
-		en->ei.len += prev->ei.len;
-		__detach_extent_node(sbi, et, prev);
-		return prev;
-	}
-	return NULL;
-}
-
-static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	struct extent_node *next;
-	struct rb_node *node;
-
-	node = rb_next(&en->rb_node);
-	if (!node)
-		return NULL;
-
-	next = rb_entry(node, struct extent_node, rb_node);
-	if (__is_front_mergeable(&en->ei, &next->ei)) {
-		en->ei.len += next->ei.len;
-		__detach_extent_node(sbi, et, next);
-		return next;
-	}
-	return NULL;
-}
-
-static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_info *ei,
-				struct extent_node **den)
-{
-	struct rb_node **p = &et->root.rb_node;
-	struct rb_node *parent = NULL;
-	struct extent_node *en;
-
-	while (*p) {
-		parent = *p;
-		en = rb_entry(parent, struct extent_node, rb_node);
-
-		if (ei->fofs < en->ei.fofs) {
-			if (__is_front_mergeable(ei, &en->ei)) {
-				f2fs_bug_on(sbi, !den);
-				en->ei.fofs = ei->fofs;
-				en->ei.blk = ei->blk;
-				en->ei.len += ei->len;
-				*den = __try_back_merge(sbi, et, en);
-				return en;
-			}
-			p = &(*p)->rb_left;
-		} else if (ei->fofs >= en->ei.fofs + en->ei.len) {
-			if (__is_back_mergeable(ei, &en->ei)) {
-				f2fs_bug_on(sbi, !den);
-				en->ei.len += ei->len;
-				*den = __try_front_merge(sbi, et, en);
-				return en;
-			}
-			p = &(*p)->rb_right;
-		} else {
-			f2fs_bug_on(sbi, 1);
-		}
-	}
-
-	return __attach_extent_node(sbi, et, ei, parent, p);
-}
-
-static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
-					struct extent_tree *et, bool free_all)
-{
-	struct rb_node *node, *next;
-	struct extent_node *en;
-	unsigned int count = et->count;
-
-	node = rb_first(&et->root);
-	while (node) {
-		next = rb_next(node);
-		en = rb_entry(node, struct extent_node, rb_node);
-
-		if (free_all) {
-			spin_lock(&sbi->extent_lock);
-			if (!list_empty(&en->list))
-				list_del_init(&en->list);
-			spin_unlock(&sbi->extent_lock);
-		}
-
-		if (free_all || list_empty(&en->list)) {
-			__detach_extent_node(sbi, et, en);
-			kmem_cache_free(extent_node_slab, en);
-		}
-		node = next;
-	}
-
-	return count - et->count;
-}
-
-static void f2fs_init_extent_tree(struct inode *inode,
-						struct f2fs_extent *i_ext)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en;
 	struct extent_info ei;
+	struct inode *inode = dn->inode;
 
-	if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
-		return;
-
-	et = __grab_extent_tree(inode);
-
-	write_lock(&et->lock);
-	if (et->count)
-		goto out;
-
-	set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
-		le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
-
-	en = __insert_extent_tree(sbi, et, &ei, NULL);
-	if (en) {
-		et->cached_en = en;
-
-		spin_lock(&sbi->extent_lock);
-		list_add_tail(&en->list, &sbi->extent_list);
-		spin_unlock(&sbi->extent_lock);
-	}
-out:
-	write_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-}
-
-static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en;
-
-	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
-
-	et = __find_extent_tree(sbi, inode->i_ino);
-	if (!et)
-		return false;
-
-	read_lock(&et->lock);
-	en = __lookup_extent_tree(et, pgofs);
-	if (en) {
-		*ei = en->ei;
-		spin_lock(&sbi->extent_lock);
-		if (!list_empty(&en->list))
-			list_move_tail(&en->list, &sbi->extent_list);
-		spin_unlock(&sbi->extent_lock);
-		stat_inc_read_hit(sbi->sb);
-	}
-	stat_inc_total_hit(sbi->sb);
-	read_unlock(&et->lock);
-
-	trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
-
-	atomic_dec(&et->refcount);
-	return en ? true : false;
-}
-
-static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
-							block_t blkaddr)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
-	struct extent_node *den = NULL;
-	struct extent_info ei, dei;
-	unsigned int endofs;
-
-	trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
-
-	et = __grab_extent_tree(inode);
-
-	write_lock(&et->lock);
-
-	/* 1. lookup and remove existing extent info in cache */
-	en = __lookup_extent_tree(et, fofs);
-	if (!en)
-		goto update_extent;
-
-	dei = en->ei;
-	__detach_extent_node(sbi, et, en);
-
-	/* 2. if extent can be split more, split and insert the left part */
-	if (dei.len > 1) {
-		/*  insert left part of split extent into cache */
-		if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
-			set_extent_info(&ei, dei.fofs, dei.blk,
-							fofs - dei.fofs);
-			en1 = __insert_extent_tree(sbi, et, &ei, NULL);
-		}
-
-		/* insert right part of split extent into cache */
-		endofs = dei.fofs + dei.len - 1;
-		if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
-			set_extent_info(&ei, fofs + 1,
-				fofs - dei.fofs + dei.blk, endofs - fofs);
-			en2 = __insert_extent_tree(sbi, et, &ei, NULL);
-		}
+	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+		dn->data_blkaddr = ei.blk + index - ei.fofs;
+		return 0;
 	}
 
-update_extent:
-	/* 3. update extent in extent cache */
-	if (blkaddr) {
-		set_extent_info(&ei, fofs, blkaddr, 1);
-		en3 = __insert_extent_tree(sbi, et, &ei, &den);
-	}
-
-	/* 4. update in global extent list */
-	spin_lock(&sbi->extent_lock);
-	if (en && !list_empty(&en->list))
-		list_del(&en->list);
-	/*
-	 * en1 and en2 split from en, they will become more and more smaller
-	 * fragments after splitting several times. So if the length is smaller
-	 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
-	 */
-	if (en1)
-		list_add_tail(&en1->list, &sbi->extent_list);
-	if (en2)
-		list_add_tail(&en2->list, &sbi->extent_list);
-	if (en3) {
-		if (list_empty(&en3->list))
-			list_add_tail(&en3->list, &sbi->extent_list);
-		else
-			list_move_tail(&en3->list, &sbi->extent_list);
-	}
-	if (den && !list_empty(&den->list))
-		list_del(&den->list);
-	spin_unlock(&sbi->extent_lock);
-
-	/* 5. release extent node */
-	if (en)
-		kmem_cache_free(extent_node_slab, en);
-	if (den)
-		kmem_cache_free(extent_node_slab, den);
-
-	write_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-}
-
-void f2fs_preserve_extent_tree(struct inode *inode)
-{
-	struct extent_tree *et;
-	struct extent_info *ext = &F2FS_I(inode)->ext;
-	bool sync = false;
-
-	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		return;
-
-	et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
-	if (!et) {
-		if (ext->len) {
-			ext->len = 0;
-			update_inode_page(inode);
-		}
-		return;
-	}
-
-	read_lock(&et->lock);
-	if (et->count) {
-		struct extent_node *en;
-
-		if (et->cached_en) {
-			en = et->cached_en;
-		} else {
-			struct rb_node *node = rb_first(&et->root);
-
-			if (!node)
-				node = rb_last(&et->root);
-			en = rb_entry(node, struct extent_node, rb_node);
-		}
-
-		if (__is_extent_same(ext, &en->ei))
-			goto out;
-
-		*ext = en->ei;
-		sync = true;
-	} else if (ext->len) {
-		ext->len = 0;
-		sync = true;
-	}
-out:
-	read_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-
-	if (sync)
-		update_inode_page(inode);
-}
-
-void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
-{
-	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
-	struct extent_node *en, *tmp;
-	unsigned long ino = F2FS_ROOT_INO(sbi);
-	struct radix_tree_iter iter;
-	void **slot;
-	unsigned int found;
-	unsigned int node_cnt = 0, tree_cnt = 0;
-
-	if (!test_opt(sbi, EXTENT_CACHE))
-		return;
-
-	if (available_free_memory(sbi, EXTENT_CACHE))
-		return;
-
-	spin_lock(&sbi->extent_lock);
-	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
-		if (!nr_shrink--)
-			break;
-		list_del_init(&en->list);
-	}
-	spin_unlock(&sbi->extent_lock);
-
-	down_read(&sbi->extent_tree_lock);
-	while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
-				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
-		unsigned i;
-
-		ino = treevec[found - 1]->ino + 1;
-		for (i = 0; i < found; i++) {
-			struct extent_tree *et = treevec[i];
-
-			atomic_inc(&et->refcount);
-			write_lock(&et->lock);
-			node_cnt += __free_extent_tree(sbi, et, false);
-			write_unlock(&et->lock);
-			atomic_dec(&et->refcount);
-		}
-	}
-	up_read(&sbi->extent_tree_lock);
-
-	down_write(&sbi->extent_tree_lock);
-	radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
-							F2FS_ROOT_INO(sbi)) {
-		struct extent_tree *et = (struct extent_tree *)*slot;
-
-		if (!atomic_read(&et->refcount) && !et->count) {
-			radix_tree_delete(&sbi->extent_tree_root, et->ino);
-			kmem_cache_free(extent_tree_slab, et);
-			sbi->total_ext_tree--;
-			tree_cnt++;
-		}
-	}
-	up_write(&sbi->extent_tree_lock);
-
-	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
-}
-
-void f2fs_destroy_extent_tree(struct inode *inode)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	unsigned int node_cnt = 0;
-
-	if (!test_opt(sbi, EXTENT_CACHE))
-		return;
-
-	et = __find_extent_tree(sbi, inode->i_ino);
-	if (!et)
-		goto out;
-
-	/* free all extent info belong to this extent tree */
-	write_lock(&et->lock);
-	node_cnt = __free_extent_tree(sbi, et, true);
-	write_unlock(&et->lock);
-
-	atomic_dec(&et->refcount);
-
-	/* try to find and delete extent tree entry in radix tree */
-	down_write(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
-	if (!et) {
-		up_write(&sbi->extent_tree_lock);
-		goto out;
-	}
-	f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
-	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
-	kmem_cache_free(extent_tree_slab, et);
-	sbi->total_ext_tree--;
-	up_write(&sbi->extent_tree_lock);
-out:
-	trace_f2fs_destroy_extent_tree(inode, node_cnt);
-	return;
-}
-
-void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
-{
-	if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		f2fs_init_extent_tree(inode, i_ext);
-
-	write_lock(&F2FS_I(inode)->ext_lock);
-	get_extent_info(&F2FS_I(inode)->ext, *i_ext);
-	write_unlock(&F2FS_I(inode)->ext_lock);
-}
-
-static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
-{
-	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
-		return false;
-
-	if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		return f2fs_lookup_extent_tree(inode, pgofs, ei);
-
-	return lookup_extent_info(inode, pgofs, ei);
-}
-
-void f2fs_update_extent_cache(struct dnode_of_data *dn)
-{
-	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
-	pgoff_t fofs;
-
-	f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
-	if (is_inode_flag_set(fi, FI_NO_EXTENT))
-		return;
-
-	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
-							dn->ofs_in_node;
-
-	if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
-		return f2fs_update_extent_tree(dn->inode, fofs,
-							dn->data_blkaddr);
-
-	if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
-		sync_inode_page(dn);
+	return f2fs_reserve_block(dn, index);
 }
 
 struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
@@ -935,15 +303,13 @@
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
-	if (err) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(err);
-	}
+	if (err)
+		goto put_err;
 	f2fs_put_dnode(&dn);
 
 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(-ENOENT);
+		err = -ENOENT;
+		goto put_err;
 	}
 got_it:
 	if (PageUptodate(page)) {
@@ -968,8 +334,12 @@
 	fio.page = page;
 	err = f2fs_submit_page_bio(&fio);
 	if (err)
-		return ERR_PTR(err);
+		goto put_err;
 	return page;
+
+put_err:
+	f2fs_put_page(page, 1);
+	return ERR_PTR(err);
 }
 
 struct page *find_data_page(struct inode *inode, pgoff_t index)
@@ -1030,7 +400,8 @@
  *
  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
- * Note that, ipage is set only by make_empty_dir.
+ * Note that, ipage is set only by make_empty_dir, and if any error occur,
+ * ipage should be released by this function.
  */
 struct page *get_new_data_page(struct inode *inode,
 		struct page *ipage, pgoff_t index, bool new_i_size)
@@ -1041,8 +412,14 @@
 	int err;
 repeat:
 	page = grab_cache_page(mapping, index);
-	if (!page)
+	if (!page) {
+		/*
+		 * before exiting, we should make sure ipage will be released
+		 * if any error occur.
+		 */
+		f2fs_put_page(ipage, 1);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	set_new_dnode(&dn, inode, ipage, NULL, 0);
 	err = f2fs_reserve_block(&dn, index);
@@ -1107,8 +484,6 @@
 
 	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
 								&sum, seg);
-
-	/* direct IO doesn't use extent cache to maximize the performance */
 	set_data_blkaddr(dn);
 
 	/* update i_size */
@@ -1117,6 +492,9 @@
 	if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
 		i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
 
+	/* direct IO doesn't use extent cache to maximize the performance */
+	f2fs_drop_largest_extent(dn->inode, fofs);
+
 	return 0;
 }
 
@@ -1183,7 +561,7 @@
  *     c. give the block addresses to blockdev
  */
 static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
-			int create, bool fiemap)
+						int create, int flag)
 {
 	unsigned int maxblocks = map->m_len;
 	struct dnode_of_data dn;
@@ -1217,8 +595,19 @@
 			err = 0;
 		goto unlock_out;
 	}
-	if (dn.data_blkaddr == NEW_ADDR && !fiemap)
-		goto put_out;
+	if (dn.data_blkaddr == NEW_ADDR) {
+		if (flag == F2FS_GET_BLOCK_BMAP) {
+			err = -ENOENT;
+			goto put_out;
+		} else if (flag == F2FS_GET_BLOCK_READ ||
+				flag == F2FS_GET_BLOCK_DIO) {
+			goto put_out;
+		}
+		/*
+		 * if it is in fiemap call path (flag = F2FS_GET_BLOCK_FIEMAP),
+		 * mark it as mapped and unwritten block.
+		 */
+	}
 
 	if (dn.data_blkaddr != NULL_ADDR) {
 		map->m_flags = F2FS_MAP_MAPPED;
@@ -1233,6 +622,8 @@
 		map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
 		map->m_pblk = dn.data_blkaddr;
 	} else {
+		if (flag == F2FS_GET_BLOCK_BMAP)
+			err = -ENOENT;
 		goto put_out;
 	}
 
@@ -1255,7 +646,9 @@
 				err = 0;
 			goto unlock_out;
 		}
-		if (dn.data_blkaddr == NEW_ADDR && !fiemap)
+
+		if (dn.data_blkaddr == NEW_ADDR &&
+				flag != F2FS_GET_BLOCK_FIEMAP)
 			goto put_out;
 
 		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -1297,7 +690,7 @@
 }
 
 static int __get_data_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh, int create, bool fiemap)
+			struct buffer_head *bh, int create, int flag)
 {
 	struct f2fs_map_blocks map;
 	int ret;
@@ -1305,7 +698,7 @@
 	map.m_lblk = iblock;
 	map.m_len = bh->b_size >> inode->i_blkbits;
 
-	ret = f2fs_map_blocks(inode, &map, create, fiemap);
+	ret = f2fs_map_blocks(inode, &map, create, flag);
 	if (!ret) {
 		map_bh(bh, inode->i_sb, map.m_pblk);
 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
@@ -1315,15 +708,23 @@
 }
 
 static int get_data_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
+			struct buffer_head *bh_result, int create, int flag)
 {
-	return __get_data_block(inode, iblock, bh_result, create, false);
+	return __get_data_block(inode, iblock, bh_result, create, flag);
 }
 
-static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
+static int get_data_block_dio(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh_result, int create)
 {
-	return __get_data_block(inode, iblock, bh_result, create, true);
+	return __get_data_block(inode, iblock, bh_result, create,
+						F2FS_GET_BLOCK_DIO);
+}
+
+static int get_data_block_bmap(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	return __get_data_block(inode, iblock, bh_result, create,
+						F2FS_GET_BLOCK_BMAP);
 }
 
 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1367,7 +768,8 @@
 	memset(&map_bh, 0, sizeof(struct buffer_head));
 	map_bh.b_size = len;
 
-	ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0);
+	ret = get_data_block(inode, start_blk, &map_bh, 0,
+					F2FS_GET_BLOCK_FIEMAP);
 	if (ret)
 		goto out;
 
@@ -1770,6 +1172,137 @@
 	return ret;
 }
 
+/*
+ * This function was copied from write_cche_pages from mm/page-writeback.c.
+ * The major change is making write step of cold data page separately from
+ * warm/hot data page.
+ */
+static int f2fs_write_cache_pages(struct address_space *mapping,
+			struct writeback_control *wbc, writepage_t writepage,
+			void *data)
+{
+	int ret = 0;
+	int done = 0;
+	struct pagevec pvec;
+	int nr_pages;
+	pgoff_t uninitialized_var(writeback_index);
+	pgoff_t index;
+	pgoff_t end;		/* Inclusive */
+	pgoff_t done_index;
+	int cycled;
+	int range_whole = 0;
+	int tag;
+	int step = 0;
+
+	pagevec_init(&pvec, 0);
+next:
+	if (wbc->range_cyclic) {
+		writeback_index = mapping->writeback_index; /* prev offset */
+		index = writeback_index;
+		if (index == 0)
+			cycled = 1;
+		else
+			cycled = 0;
+		end = -1;
+	} else {
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
+		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+			range_whole = 1;
+		cycled = 1; /* ignore range_cyclic tests */
+	}
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+		tag = PAGECACHE_TAG_TOWRITE;
+	else
+		tag = PAGECACHE_TAG_DIRTY;
+retry:
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+		tag_pages_for_writeback(mapping, index, end);
+	done_index = index;
+	while (!done && (index <= end)) {
+		int i;
+
+		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
+		if (nr_pages == 0)
+			break;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			if (page->index > end) {
+				done = 1;
+				break;
+			}
+
+			done_index = page->index;
+
+			lock_page(page);
+
+			if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+				unlock_page(page);
+				continue;
+			}
+
+			if (!PageDirty(page)) {
+				/* someone wrote it for us */
+				goto continue_unlock;
+			}
+
+			if (step == is_cold_data(page))
+				goto continue_unlock;
+
+			if (PageWriteback(page)) {
+				if (wbc->sync_mode != WB_SYNC_NONE)
+					f2fs_wait_on_page_writeback(page, DATA);
+				else
+					goto continue_unlock;
+			}
+
+			BUG_ON(PageWriteback(page));
+			if (!clear_page_dirty_for_io(page))
+				goto continue_unlock;
+
+			ret = (*writepage)(page, wbc, data);
+			if (unlikely(ret)) {
+				if (ret == AOP_WRITEPAGE_ACTIVATE) {
+					unlock_page(page);
+					ret = 0;
+				} else {
+					done_index = page->index + 1;
+					done = 1;
+					break;
+				}
+			}
+
+			if (--wbc->nr_to_write <= 0 &&
+			    wbc->sync_mode == WB_SYNC_NONE) {
+				done = 1;
+				break;
+			}
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+
+	if (step < 1) {
+		step++;
+		goto next;
+	}
+
+	if (!cycled && !done) {
+		cycled = 1;
+		index = 0;
+		end = writeback_index - 1;
+		goto retry;
+	}
+	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+		mapping->writeback_index = done_index;
+
+	return ret;
+}
+
 static int f2fs_write_data_pages(struct address_space *mapping,
 			    struct writeback_control *wbc)
 {
@@ -1785,6 +1318,10 @@
 	if (!mapping->a_ops->writepage)
 		return 0;
 
+	/* skip writing if there is no dirty page in this inode */
+	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
+		return 0;
+
 	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
 			available_free_memory(sbi, DIRTY_DENTS))
@@ -1800,12 +1337,11 @@
 		mutex_lock(&sbi->writepages);
 		locked = true;
 	}
-	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+	f2fs_submit_merged_bio(sbi, DATA, WRITE);
 	if (locked)
 		mutex_unlock(&sbi->writepages);
 
-	f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
 	remove_dirty_dir_inode(inode);
 
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
@@ -1832,7 +1368,8 @@
 {
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct page *page, *ipage;
+	struct page *page = NULL;
+	struct page *ipage;
 	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 	struct dnode_of_data dn;
 	int err = 0;
@@ -1882,25 +1419,28 @@
 		if (err)
 			goto put_fail;
 	}
-	err = f2fs_reserve_block(&dn, index);
+
+	err = f2fs_get_block(&dn, index);
 	if (err)
 		goto put_fail;
 put_next:
 	f2fs_put_dnode(&dn);
 	f2fs_unlock_op(sbi);
 
-	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
-		return 0;
-
 	f2fs_wait_on_page_writeback(page, DATA);
 
+	if (len == PAGE_CACHE_SIZE)
+		goto out_update;
+	if (PageUptodate(page))
+		goto out_clear;
+
 	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
 		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
 		unsigned end = start + len;
 
 		/* Reading beyond i_size is simple: memset to zero */
 		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
-		goto out;
+		goto out_update;
 	}
 
 	if (dn.data_blkaddr == NEW_ADDR) {
@@ -1920,7 +1460,6 @@
 
 		lock_page(page);
 		if (unlikely(!PageUptodate(page))) {
-			f2fs_put_page(page, 1);
 			err = -EIO;
 			goto fail;
 		}
@@ -1932,14 +1471,13 @@
 		/* avoid symlink page */
 		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
 			err = f2fs_decrypt_one(inode, page);
-			if (err) {
-				f2fs_put_page(page, 1);
+			if (err)
 				goto fail;
-			}
 		}
 	}
-out:
+out_update:
 	SetPageUptodate(page);
+out_clear:
 	clear_cold_data(page);
 	return 0;
 
@@ -1947,8 +1485,8 @@
 	f2fs_put_dnode(&dn);
 unlock_fail:
 	f2fs_unlock_op(sbi);
-	f2fs_put_page(page, 1);
 fail:
+	f2fs_put_page(page, 1);
 	f2fs_write_failed(mapping, pos + len);
 	return err;
 }
@@ -1979,9 +1517,6 @@
 {
 	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
 
-	if (iov_iter_rw(iter) == READ)
-		return 0;
-
 	if (offset & blocksize_mask)
 		return -EINVAL;
 
@@ -2010,15 +1545,16 @@
 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 		return 0;
 
-	if (check_direct_IO(inode, iter, offset))
-		return 0;
+	err = check_direct_IO(inode, iter, offset);
+	if (err)
+		return err;
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
 	if (iov_iter_rw(iter) == WRITE)
 		__allocate_data_blocks(inode, offset, count);
 
-	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
+	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
 	if (err < 0 && iov_iter_rw(iter) == WRITE)
 		f2fs_write_failed(mapping, offset + count);
 
@@ -2045,6 +1581,11 @@
 		else
 			inode_dec_dirty_pages(inode);
 	}
+
+	/* This is atomic written page, keep Private */
+	if (IS_ATOMIC_WRITTEN_PAGE(page))
+		return;
+
 	ClearPagePrivate(page);
 }
 
@@ -2054,6 +1595,10 @@
 	if (PageDirty(page))
 		return 0;
 
+	/* This is atomic written page, keep Private */
+	if (IS_ATOMIC_WRITTEN_PAGE(page))
+		return 0;
+
 	ClearPagePrivate(page);
 	return 1;
 }
@@ -2068,8 +1613,15 @@
 	SetPageUptodate(page);
 
 	if (f2fs_is_atomic_file(inode)) {
-		register_inmem_page(inode, page);
-		return 1;
+		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
+			register_inmem_page(inode, page);
+			return 1;
+		}
+		/*
+		 * Previously, this page has been registered, we just
+		 * return here.
+		 */
+		return 0;
 	}
 
 	if (!PageDirty(page)) {
@@ -2090,38 +1642,7 @@
 		if (err)
 			return err;
 	}
-	return generic_block_bmap(mapping, block, get_data_block);
-}
-
-void init_extent_cache_info(struct f2fs_sb_info *sbi)
-{
-	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
-	init_rwsem(&sbi->extent_tree_lock);
-	INIT_LIST_HEAD(&sbi->extent_list);
-	spin_lock_init(&sbi->extent_lock);
-	sbi->total_ext_tree = 0;
-	atomic_set(&sbi->total_ext_node, 0);
-}
-
-int __init create_extent_cache(void)
-{
-	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
-			sizeof(struct extent_tree));
-	if (!extent_tree_slab)
-		return -ENOMEM;
-	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
-			sizeof(struct extent_node));
-	if (!extent_node_slab) {
-		kmem_cache_destroy(extent_tree_slab);
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-void destroy_extent_cache(void)
-{
-	kmem_cache_destroy(extent_node_slab);
-	kmem_cache_destroy(extent_tree_slab);
+	return generic_block_bmap(mapping, block, get_data_block_bmap);
 }
 
 const struct address_space_operations f2fs_dblock_aops = {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 75176e0..d013d84 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -33,8 +33,11 @@
 	int i;
 
 	/* validation check of the segment numbers */
-	si->hit_ext = sbi->read_hit_ext;
-	si->total_ext = sbi->total_hit_ext;
+	si->hit_largest = atomic_read(&sbi->read_hit_largest);
+	si->hit_cached = atomic_read(&sbi->read_hit_cached);
+	si->hit_rbtree = atomic_read(&sbi->read_hit_rbtree);
+	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
+	si->total_ext = atomic_read(&sbi->total_hit_ext);
 	si->ext_tree = sbi->total_ext_tree;
 	si->ext_node = atomic_read(&sbi->total_ext_node);
 	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
@@ -49,6 +52,7 @@
 	si->valid_count = valid_user_blocks(sbi);
 	si->valid_node_count = valid_node_count(sbi);
 	si->valid_inode_count = valid_inode_count(sbi);
+	si->inline_xattr = atomic_read(&sbi->inline_xattr);
 	si->inline_inode = atomic_read(&sbi->inline_inode);
 	si->inline_dir = atomic_read(&sbi->inline_dir);
 	si->utilization = utilization(sbi);
@@ -226,6 +230,8 @@
 		seq_printf(s, "Other: %u)\n  - Data: %u\n",
 			   si->valid_node_count - si->valid_inode_count,
 			   si->valid_count - si->valid_node_count);
+		seq_printf(s, "  - Inline_xattr Inode: %u\n",
+			   si->inline_xattr);
 		seq_printf(s, "  - Inline_data Inode: %u\n",
 			   si->inline_inode);
 		seq_printf(s, "  - Inline_dentry Inode: %u\n",
@@ -276,10 +282,16 @@
 				si->bg_data_blks);
 		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
 				si->bg_node_blks);
-		seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
-			   si->hit_ext, si->total_ext);
-		seq_printf(s, "\nExtent Tree Count: %d\n", si->ext_tree);
-		seq_printf(s, "\nExtent Node Count: %d\n", si->ext_node);
+		seq_puts(s, "\nExtent Cache:\n");
+		seq_printf(s, "  - Hit Count: L1-1:%d L1-2:%d L2:%d\n",
+				si->hit_largest, si->hit_cached,
+				si->hit_rbtree);
+		seq_printf(s, "  - Hit Ratio: %d%% (%d / %d)\n",
+				!si->total_ext ? 0 :
+				(si->hit_total * 100) / si->total_ext,
+				si->hit_total, si->total_ext);
+		seq_printf(s, "  - Inner Struct Count: tree: %d, node: %d\n",
+				si->ext_tree, si->ext_node);
 		seq_puts(s, "\nBalancing F2FS Async:\n");
 		seq_printf(s, "  - inmem: %4d, wb: %4d\n",
 			   si->inmem_pages, si->wb_pages);
@@ -366,6 +378,12 @@
 	si->sbi = sbi;
 	sbi->stat_info = si;
 
+	atomic_set(&sbi->total_hit_ext, 0);
+	atomic_set(&sbi->read_hit_rbtree, 0);
+	atomic_set(&sbi->read_hit_largest, 0);
+	atomic_set(&sbi->read_hit_cached, 0);
+
+	atomic_set(&sbi->inline_xattr, 0);
 	atomic_set(&sbi->inline_inode, 0);
 	atomic_set(&sbi->inline_dir, 0);
 	atomic_set(&sbi->inplace_count, 0);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index a34ebd8..8f15fc1 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -718,8 +718,8 @@
 	if (inode)
 		f2fs_drop_nlink(dir, inode, NULL);
 
-	if (bit_pos == NR_DENTRY_IN_BLOCK) {
-		truncate_hole(dir, page->index, page->index + 1);
+	if (bit_pos == NR_DENTRY_IN_BLOCK &&
+			!truncate_hole(dir, page->index, page->index + 1)) {
 		clear_page_dirty_for_io(page);
 		ClearPagePrivate(page);
 		ClearPageUptodate(page);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
new file mode 100644
index 0000000..997ac86
--- /dev/null
+++ b/fs/f2fs/extent_cache.c
@@ -0,0 +1,791 @@
+/*
+ * f2fs extent cache support
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Samsung Electronics
+ * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
+ *          Chao Yu <chao2.yu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include <trace/events/f2fs.h>
+
+static struct kmem_cache *extent_tree_slab;
+static struct kmem_cache *extent_node_slab;
+
+static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct rb_node *parent, struct rb_node **p)
+{
+	struct extent_node *en;
+
+	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
+	if (!en)
+		return NULL;
+
+	en->ei = *ei;
+	INIT_LIST_HEAD(&en->list);
+
+	rb_link_node(&en->rb_node, parent, p);
+	rb_insert_color(&en->rb_node, &et->root);
+	et->count++;
+	atomic_inc(&sbi->total_ext_node);
+	return en;
+}
+
+static void __detach_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_node *en)
+{
+	rb_erase(&en->rb_node, &et->root);
+	et->count--;
+	atomic_dec(&sbi->total_ext_node);
+
+	if (et->cached_en == en)
+		et->cached_en = NULL;
+}
+
+static struct extent_tree *__grab_extent_tree(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et;
+	nid_t ino = inode->i_ino;
+
+	down_write(&sbi->extent_tree_lock);
+	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+	if (!et) {
+		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
+		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
+		memset(et, 0, sizeof(struct extent_tree));
+		et->ino = ino;
+		et->root = RB_ROOT;
+		et->cached_en = NULL;
+		rwlock_init(&et->lock);
+		atomic_set(&et->refcount, 0);
+		et->count = 0;
+		sbi->total_ext_tree++;
+	}
+	atomic_inc(&et->refcount);
+	up_write(&sbi->extent_tree_lock);
+
+	/* never died until evict_inode */
+	F2FS_I(inode)->extent_tree = et;
+
+	return et;
+}
+
+static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, unsigned int fofs)
+{
+	struct rb_node *node = et->root.rb_node;
+	struct extent_node *en = et->cached_en;
+
+	if (en) {
+		struct extent_info *cei = &en->ei;
+
+		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
+			stat_inc_cached_node_hit(sbi);
+			return en;
+		}
+	}
+
+	while (node) {
+		en = rb_entry(node, struct extent_node, rb_node);
+
+		if (fofs < en->ei.fofs) {
+			node = node->rb_left;
+		} else if (fofs >= en->ei.fofs + en->ei.len) {
+			node = node->rb_right;
+		} else {
+			stat_inc_rbtree_node_hit(sbi);
+			return en;
+		}
+	}
+	return NULL;
+}
+
+static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei)
+{
+	struct rb_node **p = &et->root.rb_node;
+	struct extent_node *en;
+
+	en = __attach_extent_node(sbi, et, ei, NULL, p);
+	if (!en)
+		return NULL;
+
+	et->largest = en->ei;
+	et->cached_en = en;
+	return en;
+}
+
+static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+					struct extent_tree *et, bool free_all)
+{
+	struct rb_node *node, *next;
+	struct extent_node *en;
+	unsigned int count = et->count;
+
+	node = rb_first(&et->root);
+	while (node) {
+		next = rb_next(node);
+		en = rb_entry(node, struct extent_node, rb_node);
+
+		if (free_all) {
+			spin_lock(&sbi->extent_lock);
+			if (!list_empty(&en->list))
+				list_del_init(&en->list);
+			spin_unlock(&sbi->extent_lock);
+		}
+
+		if (free_all || list_empty(&en->list)) {
+			__detach_extent_node(sbi, et, en);
+			kmem_cache_free(extent_node_slab, en);
+		}
+		node = next;
+	}
+
+	return count - et->count;
+}
+
+static void __drop_largest_extent(struct inode *inode, pgoff_t fofs)
+{
+	struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
+
+	if (largest->fofs <= fofs && largest->fofs + largest->len > fofs)
+		largest->len = 0;
+}
+
+void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return;
+
+	__drop_largest_extent(inode, fofs);
+}
+
+void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et;
+	struct extent_node *en;
+	struct extent_info ei;
+
+	if (!f2fs_may_extent_tree(inode))
+		return;
+
+	et = __grab_extent_tree(inode);
+
+	if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
+		return;
+
+	set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
+		le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+
+	write_lock(&et->lock);
+	if (et->count)
+		goto out;
+
+	en = __init_extent_tree(sbi, et, &ei);
+	if (en) {
+		spin_lock(&sbi->extent_lock);
+		list_add_tail(&en->list, &sbi->extent_list);
+		spin_unlock(&sbi->extent_lock);
+	}
+out:
+	write_unlock(&et->lock);
+}
+
+static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+							struct extent_info *ei)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en;
+	bool ret = false;
+
+	f2fs_bug_on(sbi, !et);
+
+	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+
+	read_lock(&et->lock);
+
+	if (et->largest.fofs <= pgofs &&
+			et->largest.fofs + et->largest.len > pgofs) {
+		*ei = et->largest;
+		ret = true;
+		stat_inc_largest_node_hit(sbi);
+		goto out;
+	}
+
+	en = __lookup_extent_tree(sbi, et, pgofs);
+	if (en) {
+		*ei = en->ei;
+		spin_lock(&sbi->extent_lock);
+		if (!list_empty(&en->list))
+			list_move_tail(&en->list, &sbi->extent_list);
+		et->cached_en = en;
+		spin_unlock(&sbi->extent_lock);
+		ret = true;
+	}
+out:
+	stat_inc_total_hit(sbi);
+	read_unlock(&et->lock);
+
+	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
+	return ret;
+}
+
+
+/*
+ * lookup extent at @fofs, if hit, return the extent
+ * if not, return NULL and
+ * @prev_ex: extent before fofs
+ * @next_ex: extent after fofs
+ * @insert_p: insert point for new extent at fofs
+ * in order to simpfy the insertion after.
+ * tree must stay unchanged between lookup and insertion.
+ */
+static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
+				unsigned int fofs,
+				struct extent_node **prev_ex,
+				struct extent_node **next_ex,
+				struct rb_node ***insert_p,
+				struct rb_node **insert_parent)
+{
+	struct rb_node **pnode = &et->root.rb_node;
+	struct rb_node *parent = NULL, *tmp_node;
+	struct extent_node *en = et->cached_en;
+
+	*insert_p = NULL;
+	*insert_parent = NULL;
+	*prev_ex = NULL;
+	*next_ex = NULL;
+
+	if (RB_EMPTY_ROOT(&et->root))
+		return NULL;
+
+	if (en) {
+		struct extent_info *cei = &en->ei;
+
+		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
+			goto lookup_neighbors;
+	}
+
+	while (*pnode) {
+		parent = *pnode;
+		en = rb_entry(*pnode, struct extent_node, rb_node);
+
+		if (fofs < en->ei.fofs)
+			pnode = &(*pnode)->rb_left;
+		else if (fofs >= en->ei.fofs + en->ei.len)
+			pnode = &(*pnode)->rb_right;
+		else
+			goto lookup_neighbors;
+	}
+
+	*insert_p = pnode;
+	*insert_parent = parent;
+
+	en = rb_entry(parent, struct extent_node, rb_node);
+	tmp_node = parent;
+	if (parent && fofs > en->ei.fofs)
+		tmp_node = rb_next(parent);
+	*next_ex = tmp_node ?
+		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+
+	tmp_node = parent;
+	if (parent && fofs < en->ei.fofs)
+		tmp_node = rb_prev(parent);
+	*prev_ex = tmp_node ?
+		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	return NULL;
+
+lookup_neighbors:
+	if (fofs == en->ei.fofs) {
+		/* lookup prev node for merging backward later */
+		tmp_node = rb_prev(&en->rb_node);
+		*prev_ex = tmp_node ?
+			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	}
+	if (fofs == en->ei.fofs + en->ei.len - 1) {
+		/* lookup next node for merging frontward later */
+		tmp_node = rb_next(&en->rb_node);
+		*next_ex = tmp_node ?
+			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	}
+	return en;
+}
+
+static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct extent_node **den,
+				struct extent_node *prev_ex,
+				struct extent_node *next_ex)
+{
+	struct extent_node *en = NULL;
+
+	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
+		prev_ex->ei.len += ei->len;
+		ei = &prev_ex->ei;
+		en = prev_ex;
+	}
+
+	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
+		if (en) {
+			__detach_extent_node(sbi, et, prev_ex);
+			*den = prev_ex;
+		}
+		next_ex->ei.fofs = ei->fofs;
+		next_ex->ei.blk = ei->blk;
+		next_ex->ei.len += ei->len;
+		en = next_ex;
+	}
+
+	if (en) {
+		if (en->ei.len > et->largest.len)
+			et->largest = en->ei;
+		et->cached_en = en;
+	}
+	return en;
+}
+
+static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct rb_node **insert_p,
+				struct rb_node *insert_parent)
+{
+	struct rb_node **p = &et->root.rb_node;
+	struct rb_node *parent = NULL;
+	struct extent_node *en = NULL;
+
+	if (insert_p && insert_parent) {
+		parent = insert_parent;
+		p = insert_p;
+		goto do_insert;
+	}
+
+	while (*p) {
+		parent = *p;
+		en = rb_entry(parent, struct extent_node, rb_node);
+
+		if (ei->fofs < en->ei.fofs)
+			p = &(*p)->rb_left;
+		else if (ei->fofs >= en->ei.fofs + en->ei.len)
+			p = &(*p)->rb_right;
+		else
+			f2fs_bug_on(sbi, 1);
+	}
+do_insert:
+	en = __attach_extent_node(sbi, et, ei, parent, p);
+	if (!en)
+		return NULL;
+
+	if (en->ei.len > et->largest.len)
+		et->largest = en->ei;
+	et->cached_en = en;
+	return en;
+}
+
+unsigned int f2fs_update_extent_tree_range(struct inode *inode,
+				pgoff_t fofs, block_t blkaddr, unsigned int len)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
+	struct extent_node *prev_en = NULL, *next_en = NULL;
+	struct extent_info ei, dei, prev;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	unsigned int end = fofs + len;
+	unsigned int pos = (unsigned int)fofs;
+
+	if (!et)
+		return false;
+
+	write_lock(&et->lock);
+
+	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
+		write_unlock(&et->lock);
+		return false;
+	}
+
+	prev = et->largest;
+	dei.len = 0;
+
+	/* we do not guarantee that the largest extent is cached all the time */
+	__drop_largest_extent(inode, fofs);
+
+	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
+	en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
+					&insert_p, &insert_parent);
+	if (!en) {
+		if (next_en) {
+			en = next_en;
+			f2fs_bug_on(sbi, en->ei.fofs <= pos);
+			pos = en->ei.fofs;
+		} else {
+			/*
+			 * skip searching in the tree since there is no
+			 * larger extent node in the cache.
+			 */
+			goto update_extent;
+		}
+	}
+
+	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
+	while (en) {
+		struct rb_node *node;
+
+		if (pos >= end)
+			break;
+
+		dei = en->ei;
+		en1 = en2 = NULL;
+
+		node = rb_next(&en->rb_node);
+
+		/*
+		 * 2.1 there are four cases when we invalidate blkaddr in extent
+		 * node, |V: valid address, X: will be invalidated|
+		 */
+		/* case#1, invalidate right part of extent node |VVVVVXXXXX| */
+		if (pos > dei.fofs && end >= dei.fofs + dei.len) {
+			en->ei.len = pos - dei.fofs;
+
+			if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
+				__detach_extent_node(sbi, et, en);
+				insert_p = NULL;
+				insert_parent = NULL;
+				goto update;
+			}
+
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest = en->ei;
+			goto next;
+		}
+
+		/* case#2, invalidate left part of extent node |XXXXXVVVVV| */
+		if (pos <= dei.fofs && end < dei.fofs + dei.len) {
+			en->ei.fofs = end;
+			en->ei.blk += end - dei.fofs;
+			en->ei.len -= end - dei.fofs;
+
+			if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
+				__detach_extent_node(sbi, et, en);
+				insert_p = NULL;
+				insert_parent = NULL;
+				goto update;
+			}
+
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest = en->ei;
+			goto next;
+		}
+
+		__detach_extent_node(sbi, et, en);
+
+		/*
+		 * if we remove node in rb-tree, our parent node pointer may
+		 * point the wrong place, discard them.
+		 */
+		insert_p = NULL;
+		insert_parent = NULL;
+
+		/* case#3, invalidate entire extent node |XXXXXXXXXX| */
+		if (pos <= dei.fofs && end >= dei.fofs + dei.len) {
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest.len = 0;
+			goto update;
+		}
+
+		/*
+		 * case#4, invalidate data in the middle of extent node
+		 * |VVVXXXXVVV|
+		 */
+		if (dei.len > F2FS_MIN_EXTENT_LEN) {
+			unsigned int endofs;
+
+			/*  insert left part of split extent into cache */
+			if (pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+				set_extent_info(&ei, dei.fofs, dei.blk,
+							pos - dei.fofs);
+				en1 = __insert_extent_tree(sbi, et, &ei,
+								NULL, NULL);
+			}
+
+			/* insert right part of split extent into cache */
+			endofs = dei.fofs + dei.len;
+			if (endofs - end >= F2FS_MIN_EXTENT_LEN) {
+				set_extent_info(&ei, end,
+						end - dei.fofs + dei.blk,
+						endofs - end);
+				en2 = __insert_extent_tree(sbi, et, &ei,
+								NULL, NULL);
+			}
+		}
+update:
+		/* 2.2 update in global extent list */
+		spin_lock(&sbi->extent_lock);
+		if (en && !list_empty(&en->list))
+			list_del(&en->list);
+		if (en1)
+			list_add_tail(&en1->list, &sbi->extent_list);
+		if (en2)
+			list_add_tail(&en2->list, &sbi->extent_list);
+		spin_unlock(&sbi->extent_lock);
+
+		/* 2.3 release extent node */
+		if (en)
+			kmem_cache_free(extent_node_slab, en);
+next:
+		en = node ? rb_entry(node, struct extent_node, rb_node) : NULL;
+		next_en = en;
+		if (en)
+			pos = en->ei.fofs;
+	}
+
+update_extent:
+	/* 3. update extent in extent cache */
+	if (blkaddr) {
+		struct extent_node *den = NULL;
+
+		set_extent_info(&ei, fofs, blkaddr, len);
+		en3 = __try_merge_extent_node(sbi, et, &ei, &den,
+							prev_en, next_en);
+		if (!en3)
+			en3 = __insert_extent_tree(sbi, et, &ei,
+						insert_p, insert_parent);
+
+		/* give up extent_cache, if split and small updates happen */
+		if (dei.len >= 1 &&
+				prev.len < F2FS_MIN_EXTENT_LEN &&
+				et->largest.len < F2FS_MIN_EXTENT_LEN) {
+			et->largest.len = 0;
+			set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
+		}
+
+		spin_lock(&sbi->extent_lock);
+		if (en3) {
+			if (list_empty(&en3->list))
+				list_add_tail(&en3->list, &sbi->extent_list);
+			else
+				list_move_tail(&en3->list, &sbi->extent_list);
+		}
+		if (den && !list_empty(&den->list))
+			list_del(&den->list);
+		spin_unlock(&sbi->extent_lock);
+
+		if (den)
+			kmem_cache_free(extent_node_slab, den);
+	}
+
+	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+		__free_extent_tree(sbi, et, true);
+
+	write_unlock(&et->lock);
+
+	return !__is_extent_same(&prev, &et->largest);
+}
+
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
+	struct extent_node *en, *tmp;
+	unsigned long ino = F2FS_ROOT_INO(sbi);
+	struct radix_tree_root *root = &sbi->extent_tree_root;
+	unsigned int found;
+	unsigned int node_cnt = 0, tree_cnt = 0;
+	int remained;
+
+	if (!test_opt(sbi, EXTENT_CACHE))
+		return 0;
+
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	/* 1. remove unreferenced extent tree */
+	while ((found = radix_tree_gang_lookup(root,
+				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+		unsigned i;
+
+		ino = treevec[found - 1]->ino + 1;
+		for (i = 0; i < found; i++) {
+			struct extent_tree *et = treevec[i];
+
+			if (!atomic_read(&et->refcount)) {
+				write_lock(&et->lock);
+				node_cnt += __free_extent_tree(sbi, et, true);
+				write_unlock(&et->lock);
+
+				radix_tree_delete(root, et->ino);
+				kmem_cache_free(extent_tree_slab, et);
+				sbi->total_ext_tree--;
+				tree_cnt++;
+
+				if (node_cnt + tree_cnt >= nr_shrink)
+					goto unlock_out;
+			}
+		}
+	}
+	up_write(&sbi->extent_tree_lock);
+
+	/* 2. remove LRU extent entries */
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	remained = nr_shrink - (node_cnt + tree_cnt);
+
+	spin_lock(&sbi->extent_lock);
+	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
+		if (!remained--)
+			break;
+		list_del_init(&en->list);
+	}
+	spin_unlock(&sbi->extent_lock);
+
+	while ((found = radix_tree_gang_lookup(root,
+				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+		unsigned i;
+
+		ino = treevec[found - 1]->ino + 1;
+		for (i = 0; i < found; i++) {
+			struct extent_tree *et = treevec[i];
+
+			write_lock(&et->lock);
+			node_cnt += __free_extent_tree(sbi, et, false);
+			write_unlock(&et->lock);
+
+			if (node_cnt + tree_cnt >= nr_shrink)
+				break;
+		}
+	}
+unlock_out:
+	up_write(&sbi->extent_tree_lock);
+out:
+	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
+
+	return node_cnt + tree_cnt;
+}
+
+unsigned int f2fs_destroy_extent_node(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	unsigned int node_cnt = 0;
+
+	if (!et)
+		return 0;
+
+	write_lock(&et->lock);
+	node_cnt = __free_extent_tree(sbi, et, true);
+	write_unlock(&et->lock);
+
+	return node_cnt;
+}
+
+void f2fs_destroy_extent_tree(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	unsigned int node_cnt = 0;
+
+	if (!et)
+		return;
+
+	if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
+		atomic_dec(&et->refcount);
+		return;
+	}
+
+	/* free all extent info belong to this extent tree */
+	node_cnt = f2fs_destroy_extent_node(inode);
+
+	/* delete extent tree entry in radix tree */
+	down_write(&sbi->extent_tree_lock);
+	atomic_dec(&et->refcount);
+	f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
+	kmem_cache_free(extent_tree_slab, et);
+	sbi->total_ext_tree--;
+	up_write(&sbi->extent_tree_lock);
+
+	F2FS_I(inode)->extent_tree = NULL;
+
+	trace_f2fs_destroy_extent_tree(inode, node_cnt);
+}
+
+bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+					struct extent_info *ei)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return false;
+
+	return f2fs_lookup_extent_tree(inode, pgofs, ei);
+}
+
+void f2fs_update_extent_cache(struct dnode_of_data *dn)
+{
+	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
+	pgoff_t fofs;
+
+	if (!f2fs_may_extent_tree(dn->inode))
+		return;
+
+	f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
+
+
+	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+							dn->ofs_in_node;
+
+	if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
+		sync_inode_page(dn);
+}
+
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+				pgoff_t fofs, block_t blkaddr, unsigned int len)
+
+{
+	if (!f2fs_may_extent_tree(dn->inode))
+		return;
+
+	if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
+		sync_inode_page(dn);
+}
+
+void init_extent_cache_info(struct f2fs_sb_info *sbi)
+{
+	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+	init_rwsem(&sbi->extent_tree_lock);
+	INIT_LIST_HEAD(&sbi->extent_list);
+	spin_lock_init(&sbi->extent_lock);
+	sbi->total_ext_tree = 0;
+	atomic_set(&sbi->total_ext_node, 0);
+}
+
+int __init create_extent_cache(void)
+{
+	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
+			sizeof(struct extent_tree));
+	if (!extent_tree_slab)
+		return -ENOMEM;
+	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
+			sizeof(struct extent_node));
+	if (!extent_node_slab) {
+		kmem_cache_destroy(extent_tree_slab);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void destroy_extent_cache(void)
+{
+	kmem_cache_destroy(extent_node_slab);
+	kmem_cache_destroy(extent_tree_slab);
+}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index a8327ed..f1a90ff 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -19,6 +19,7 @@
 #include <linux/magic.h>
 #include <linux/kobject.h>
 #include <linux/sched.h>
+#include <linux/bio.h>
 
 #ifdef CONFIG_F2FS_CHECK_FS
 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
@@ -228,6 +229,7 @@
 #define F2FS_IOC_START_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 3)
 #define F2FS_IOC_RELEASE_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 4)
 #define F2FS_IOC_ABORT_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_GARBAGE_COLLECT	_IO(F2FS_IOCTL_MAGIC, 6)
 
 #define F2FS_IOC_SET_ENCRYPTION_POLICY					\
 		_IOR('f', 19, struct f2fs_encryption_policy)
@@ -320,7 +322,7 @@
 					 */
 };
 
-#define F2FS_LINK_MAX		32000	/* maximum link count per file */
+#define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
 
 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 
@@ -349,6 +351,7 @@
 	nid_t ino;			/* inode number */
 	struct rb_root root;		/* root of extent info rb-tree */
 	struct extent_node *cached_en;	/* recently accessed extent node */
+	struct extent_info largest;	/* largested extent info */
 	rwlock_t lock;			/* protect extent info rb-tree */
 	atomic_t refcount;		/* reference count of rb-tree */
 	unsigned int count;		/* # of extent node in rb-tree*/
@@ -372,6 +375,12 @@
 	unsigned int m_flags;
 };
 
+/* for flag in get_data_block */
+#define F2FS_GET_BLOCK_READ		0
+#define F2FS_GET_BLOCK_DIO		1
+#define F2FS_GET_BLOCK_FIEMAP		2
+#define F2FS_GET_BLOCK_BMAP		3
+
 /*
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  */
@@ -420,14 +429,13 @@
 	unsigned int clevel;		/* maximum level of given file name */
 	nid_t i_xattr_nid;		/* node id that contains xattrs */
 	unsigned long long xattr_ver;	/* cp version of xattr modification */
-	struct extent_info ext;		/* in-memory extent cache entry */
-	rwlock_t ext_lock;		/* rwlock for single extent cache */
 	struct inode_entry *dirty_dir;	/* the pointer of dirty dir */
 
-	struct radix_tree_root inmem_root;	/* radix tree for inmem pages */
 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
 	struct mutex inmem_lock;	/* lock for inmemory pages */
 
+	struct extent_tree *extent_tree;	/* cached extent_tree entry */
+
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
 	/* Encryption params */
 	struct f2fs_crypt_info *i_crypt_info;
@@ -779,7 +787,11 @@
 	unsigned int segment_count[2];		/* # of allocated segments */
 	unsigned int block_count[2];		/* # of allocated blocks */
 	atomic_t inplace_count;		/* # of inplace update */
-	int total_hit_ext, read_hit_ext;	/* extent cache hit ratio */
+	atomic_t total_hit_ext;			/* # of lookup extent cache */
+	atomic_t read_hit_rbtree;		/* # of hit rbtree extent node */
+	atomic_t read_hit_largest;		/* # of hit largest extent node */
+	atomic_t read_hit_cached;		/* # of hit cached extent node */
+	atomic_t inline_xattr;			/* # of inline_xattr inodes */
 	atomic_t inline_inode;			/* # of inline_data inodes */
 	atomic_t inline_dir;			/* # of inline_dentry inodes */
 	int bg_gc;				/* background gc calls */
@@ -791,6 +803,11 @@
 	/* For sysfs suppport */
 	struct kobject s_kobj;
 	struct completion s_kobj_unregister;
+
+	/* For shrinker support */
+	struct list_head s_list;
+	struct mutex umount_mutex;
+	unsigned int shrinker_run_no;
 };
 
 /*
@@ -1039,7 +1056,8 @@
 
 static inline void inode_dec_dirty_pages(struct inode *inode)
 {
-	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
+	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+			!S_ISLNK(inode->i_mode))
 		return;
 
 	atomic_dec(&F2FS_I(inode)->dirty_pages);
@@ -1234,16 +1252,24 @@
 						gfp_t flags)
 {
 	void *entry;
-retry:
-	entry = kmem_cache_alloc(cachep, flags);
-	if (!entry) {
-		cond_resched();
-		goto retry;
-	}
 
+	entry = kmem_cache_alloc(cachep, flags);
+	if (!entry)
+		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
 	return entry;
 }
 
+static inline struct bio *f2fs_bio_alloc(int npages)
+{
+	struct bio *bio;
+
+	/* No failure on bio allocation */
+	bio = bio_alloc(GFP_NOIO, npages);
+	if (!bio)
+		bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+	return bio;
+}
+
 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
 				unsigned long index, void *item)
 {
@@ -1342,6 +1368,7 @@
 	FI_INC_LINK,		/* need to increment i_nlink */
 	FI_ACL_MODE,		/* indicate acl mode */
 	FI_NO_ALLOC,		/* should not allocate any blocks */
+	FI_FREE_NID,		/* free allocated nide */
 	FI_UPDATE_DIR,		/* should update inode block for consistency */
 	FI_DELAY_IPUT,		/* used for the recovery */
 	FI_NO_EXTENT,		/* not to use the extent cache */
@@ -1541,6 +1568,17 @@
 	return false;
 }
 
+static inline bool f2fs_may_extent_tree(struct inode *inode)
+{
+	mode_t mode = inode->i_mode;
+
+	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
+			is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+		return false;
+
+	return S_ISREG(mode);
+}
+
 #define get_inode_mode(i) \
 	((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -1557,7 +1595,7 @@
 int f2fs_sync_file(struct file *, loff_t, loff_t, int);
 void truncate_data_blocks(struct dnode_of_data *);
 int truncate_blocks(struct inode *, u64, bool);
-void f2fs_truncate(struct inode *);
+int f2fs_truncate(struct inode *, bool);
 int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int f2fs_setattr(struct dentry *, struct iattr *);
 int truncate_hole(struct inode *, pgoff_t, pgoff_t);
@@ -1649,7 +1687,7 @@
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int truncate_xattr_node(struct inode *, struct page *);
 int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
-void remove_inode_page(struct inode *);
+int remove_inode_page(struct inode *);
 struct page *new_inode_page(struct inode *);
 struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
 void ra_node_page(struct f2fs_sb_info *, nid_t);
@@ -1660,6 +1698,7 @@
 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
+int try_to_free_nids(struct f2fs_sb_info *, int);
 void recover_inline_xattr(struct inode *, struct page *);
 void recover_xattr_data(struct inode *, struct page *, block_t);
 int recover_inode_page(struct f2fs_sb_info *, struct page *);
@@ -1675,7 +1714,7 @@
  * segment.c
  */
 void register_inmem_page(struct inode *, struct page *);
-void commit_inmem_pages(struct inode *, bool);
+int commit_inmem_pages(struct inode *, bool);
 void f2fs_balance_fs(struct f2fs_sb_info *);
 void f2fs_balance_fs_bg(struct f2fs_sb_info *);
 int f2fs_issue_flush(struct f2fs_sb_info *);
@@ -1685,7 +1724,7 @@
 void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
 void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
 void release_discard_addrs(struct f2fs_sb_info *);
-void discard_next_dnode(struct f2fs_sb_info *, block_t);
+bool discard_next_dnode(struct f2fs_sb_info *, block_t);
 int npages_for_summary_flush(struct f2fs_sb_info *, bool);
 void allocate_new_segments(struct f2fs_sb_info *);
 int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
@@ -1727,7 +1766,7 @@
 void release_orphan_inode(struct f2fs_sb_info *);
 void add_orphan_inode(struct f2fs_sb_info *, nid_t);
 void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
-void recover_orphan_inodes(struct f2fs_sb_info *);
+int recover_orphan_inodes(struct f2fs_sb_info *);
 int get_valid_checkpoint(struct f2fs_sb_info *);
 void update_dirty_page(struct inode *, struct page *);
 void add_dirty_dir_inode(struct inode *);
@@ -1746,21 +1785,14 @@
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
 void set_data_blkaddr(struct dnode_of_data *);
 int reserve_new_block(struct dnode_of_data *);
+int f2fs_get_block(struct dnode_of_data *, pgoff_t);
 int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
-void f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
-void f2fs_destroy_extent_tree(struct inode *);
-void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
-void f2fs_update_extent_cache(struct dnode_of_data *);
-void f2fs_preserve_extent_tree(struct inode *);
 struct page *get_read_data_page(struct inode *, pgoff_t, int);
 struct page *find_data_page(struct inode *, pgoff_t);
 struct page *get_lock_data_page(struct inode *, pgoff_t);
 struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
 int do_write_data_page(struct f2fs_io_info *);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
-void init_extent_cache_info(struct f2fs_sb_info *);
-int __init create_extent_cache(void);
-void destroy_extent_cache(void);
 void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
 int f2fs_release_page(struct page *, gfp_t);
 
@@ -1788,11 +1820,13 @@
 	struct f2fs_sb_info *sbi;
 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
 	int main_area_segs, main_area_sections, main_area_zones;
-	int hit_ext, total_ext, ext_tree, ext_node;
+	int hit_largest, hit_cached, hit_rbtree, hit_total, total_ext;
+	int ext_tree, ext_node;
 	int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
 	int nats, dirty_nats, sits, dirty_sits, fnids;
 	int total_count, utilization;
-	int bg_gc, inline_inode, inline_dir, inmem_pages, wb_pages;
+	int bg_gc, inmem_pages, wb_pages;
+	int inline_xattr, inline_inode, inline_dir;
 	unsigned int valid_count, valid_node_count, valid_inode_count;
 	unsigned int bimodal, avg_vblocks;
 	int util_free, util_valid, util_invalid;
@@ -1823,8 +1857,20 @@
 #define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
 #define stat_inc_dirty_dir(sbi)		((sbi)->n_dirty_dirs++)
 #define stat_dec_dirty_dir(sbi)		((sbi)->n_dirty_dirs--)
-#define stat_inc_total_hit(sb)		((F2FS_SB(sb))->total_hit_ext++)
-#define stat_inc_read_hit(sb)		((F2FS_SB(sb))->read_hit_ext++)
+#define stat_inc_total_hit(sbi)		(atomic_inc(&(sbi)->total_hit_ext))
+#define stat_inc_rbtree_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_rbtree))
+#define stat_inc_largest_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_largest))
+#define stat_inc_cached_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_cached))
+#define stat_inc_inline_xattr(inode)					\
+	do {								\
+		if (f2fs_has_inline_xattr(inode))			\
+			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
+	} while (0)
+#define stat_dec_inline_xattr(inode)					\
+	do {								\
+		if (f2fs_has_inline_xattr(inode))			\
+			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
+	} while (0)
 #define stat_inc_inline_inode(inode)					\
 	do {								\
 		if (f2fs_has_inline_data(inode))			\
@@ -1894,7 +1940,11 @@
 #define stat_inc_dirty_dir(sbi)
 #define stat_dec_dirty_dir(sbi)
 #define stat_inc_total_hit(sb)
-#define stat_inc_read_hit(sb)
+#define stat_inc_rbtree_node_hit(sb)
+#define stat_inc_largest_node_hit(sbi)
+#define stat_inc_cached_node_hit(sbi)
+#define stat_inc_inline_xattr(inode)
+#define stat_dec_inline_xattr(inode)
 #define stat_inc_inline_inode(inode)
 #define stat_dec_inline_inode(inode)
 #define stat_inc_inline_dir(inode)
@@ -1950,6 +2000,30 @@
 						struct f2fs_str *);
 
 /*
+ * shrinker.c
+ */
+unsigned long f2fs_shrink_count(struct shrinker *, struct shrink_control *);
+unsigned long f2fs_shrink_scan(struct shrinker *, struct shrink_control *);
+void f2fs_join_shrinker(struct f2fs_sb_info *);
+void f2fs_leave_shrinker(struct f2fs_sb_info *);
+
+/*
+ * extent_cache.c
+ */
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
+void f2fs_drop_largest_extent(struct inode *, pgoff_t);
+void f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+unsigned int f2fs_destroy_extent_node(struct inode *);
+void f2fs_destroy_extent_tree(struct inode *);
+bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
+void f2fs_update_extent_cache(struct dnode_of_data *);
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+						pgoff_t, block_t, unsigned int);
+void init_extent_cache_info(struct f2fs_sb_info *);
+int __init create_extent_cache(void);
+void destroy_extent_cache(void);
+
+/*
  * crypto support
  */
 static inline int f2fs_encrypted_inode(struct inode *inode)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b0f38c3..8120f86 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -27,6 +27,7 @@
 #include "segment.h"
 #include "xattr.h"
 #include "acl.h"
+#include "gc.h"
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
@@ -85,6 +86,8 @@
 mapped:
 	/* fill the page */
 	f2fs_wait_on_page_writeback(page, DATA);
+	/* if gced page is attached, don't write to cold segment */
+	clear_cold_data(page);
 out:
 	sb_end_pagefault(inode->i_sb);
 	return block_page_mkwrite_return(err);
@@ -203,8 +206,8 @@
 	}
 
 	/* if the inode is dirty, let's recover all the time */
-	if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) {
-		update_inode_page(inode);
+	if (!datasync) {
+		f2fs_write_inode(inode, NULL);
 		goto go_write;
 	}
 
@@ -442,9 +445,9 @@
 
 int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
-	int nr_free = 0, ofs = dn->ofs_in_node;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct f2fs_node *raw_node;
+	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
 	__le32 *addr;
 
 	raw_node = F2FS_NODE(dn->node_page);
@@ -457,14 +460,22 @@
 
 		dn->data_blkaddr = NULL_ADDR;
 		set_data_blkaddr(dn);
-		f2fs_update_extent_cache(dn);
 		invalidate_blocks(sbi, blkaddr);
 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 			clear_inode_flag(F2FS_I(dn->inode),
 						FI_FIRST_BLOCK_WRITTEN);
 		nr_free++;
 	}
+
 	if (nr_free) {
+		pgoff_t fofs;
+		/*
+		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
+		 * we will invalidate all blkaddr in the whole range.
+		 */
+		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
+						F2FS_I(dn->inode)) + ofs;
+		f2fs_update_extent_cache_range(dn, fofs, 0, len);
 		dec_valid_block_count(sbi, dn->inode, nr_free);
 		set_page_dirty(dn->node_page);
 		sync_inode_page(dn);
@@ -576,24 +587,30 @@
 	return err;
 }
 
-void f2fs_truncate(struct inode *inode)
+int f2fs_truncate(struct inode *inode, bool lock)
 {
+	int err;
+
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 				S_ISLNK(inode->i_mode)))
-		return;
+		return 0;
 
 	trace_f2fs_truncate(inode);
 
 	/* we should check inline_data size */
 	if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
-		if (f2fs_convert_inline_inode(inode))
-			return;
+		err = f2fs_convert_inline_inode(inode);
+		if (err)
+			return err;
 	}
 
-	if (!truncate_blocks(inode, i_size_read(inode), true)) {
-		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-		mark_inode_dirty(inode);
-	}
+	err = truncate_blocks(inode, i_size_read(inode), lock);
+	if (err)
+		return err;
+
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	mark_inode_dirty(inode);
+	return 0;
 }
 
 int f2fs_getattr(struct vfsmount *mnt,
@@ -653,7 +670,9 @@
 
 		if (attr->ia_size <= i_size_read(inode)) {
 			truncate_setsize(inode, attr->ia_size);
-			f2fs_truncate(inode);
+			err = f2fs_truncate(inode, true);
+			if (err)
+				return err;
 			f2fs_balance_fs(F2FS_I_SB(inode));
 		} else {
 			/*
@@ -692,14 +711,14 @@
 	.fiemap		= f2fs_fiemap,
 };
 
-static void fill_zero(struct inode *inode, pgoff_t index,
+static int fill_zero(struct inode *inode, pgoff_t index,
 					loff_t start, loff_t len)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct page *page;
 
 	if (!len)
-		return;
+		return 0;
 
 	f2fs_balance_fs(sbi);
 
@@ -707,12 +726,14 @@
 	page = get_new_data_page(inode, NULL, index, false);
 	f2fs_unlock_op(sbi);
 
-	if (!IS_ERR(page)) {
-		f2fs_wait_on_page_writeback(page, DATA);
-		zero_user(page, start, len);
-		set_page_dirty(page);
-		f2fs_put_page(page, 1);
-	}
+	if (IS_ERR(page))
+		return PTR_ERR(page);
+
+	f2fs_wait_on_page_writeback(page, DATA);
+	zero_user(page, start, len);
+	set_page_dirty(page);
+	f2fs_put_page(page, 1);
+	return 0;
 }
 
 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
@@ -760,14 +781,22 @@
 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
 	if (pg_start == pg_end) {
-		fill_zero(inode, pg_start, off_start,
+		ret = fill_zero(inode, pg_start, off_start,
 						off_end - off_start);
+		if (ret)
+			return ret;
 	} else {
-		if (off_start)
-			fill_zero(inode, pg_start++, off_start,
-					PAGE_CACHE_SIZE - off_start);
-		if (off_end)
-			fill_zero(inode, pg_end, 0, off_end);
+		if (off_start) {
+			ret = fill_zero(inode, pg_start++, off_start,
+						PAGE_CACHE_SIZE - off_start);
+			if (ret)
+				return ret;
+		}
+		if (off_end) {
+			ret = fill_zero(inode, pg_end, 0, off_end);
+			if (ret)
+				return ret;
+		}
 
 		if (pg_start < pg_end) {
 			struct address_space *mapping = inode->i_mapping;
@@ -797,11 +826,11 @@
 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 	int ret = 0;
 
-	f2fs_lock_op(sbi);
-
 	for (; end < nrpages; start++, end++) {
 		block_t new_addr, old_addr;
 
+		f2fs_lock_op(sbi);
+
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
 		ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
 		if (ret && ret != -ENOENT) {
@@ -817,13 +846,16 @@
 		if (new_addr == NULL_ADDR) {
 			set_new_dnode(&dn, inode, NULL, NULL, 0);
 			ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
-			if (ret && ret != -ENOENT)
+			if (ret && ret != -ENOENT) {
 				goto out;
-			else if (ret == -ENOENT)
+			} else if (ret == -ENOENT) {
+				f2fs_unlock_op(sbi);
 				continue;
+			}
 
 			if (dn.data_blkaddr == NULL_ADDR) {
 				f2fs_put_dnode(&dn);
+				f2fs_unlock_op(sbi);
 				continue;
 			} else {
 				truncate_data_blocks_range(&dn, 1);
@@ -862,8 +894,9 @@
 
 			f2fs_put_dnode(&dn);
 		}
+		f2fs_unlock_op(sbi);
 	}
-	ret = 0;
+	return 0;
 out:
 	f2fs_unlock_op(sbi);
 	return ret;
@@ -885,6 +918,14 @@
 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
 		return -EINVAL;
 
+	f2fs_balance_fs(F2FS_I_SB(inode));
+
+	if (f2fs_has_inline_data(inode)) {
+		ret = f2fs_convert_inline_inode(inode);
+		if (ret)
+			return ret;
+	}
+
 	pg_start = offset >> PAGE_CACHE_SHIFT;
 	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
 
@@ -946,14 +987,21 @@
 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
 	if (pg_start == pg_end) {
-		fill_zero(inode, pg_start, off_start, off_end - off_start);
+		ret = fill_zero(inode, pg_start, off_start,
+						off_end - off_start);
+		if (ret)
+			return ret;
+
 		if (offset + len > new_size)
 			new_size = offset + len;
 		new_size = max_t(loff_t, new_size, offset + len);
 	} else {
 		if (off_start) {
-			fill_zero(inode, pg_start++, off_start,
-					PAGE_CACHE_SIZE - off_start);
+			ret = fill_zero(inode, pg_start++, off_start,
+						PAGE_CACHE_SIZE - off_start);
+			if (ret)
+				return ret;
+
 			new_size = max_t(loff_t, new_size,
 						pg_start << PAGE_CACHE_SHIFT);
 		}
@@ -995,7 +1043,10 @@
 		}
 
 		if (off_end) {
-			fill_zero(inode, pg_end, 0, off_end);
+			ret = fill_zero(inode, pg_end, 0, off_end);
+			if (ret)
+				goto out;
+
 			new_size = max_t(loff_t, new_size, offset + len);
 		}
 	}
@@ -1033,6 +1084,12 @@
 
 	f2fs_balance_fs(sbi);
 
+	if (f2fs_has_inline_data(inode)) {
+		ret = f2fs_convert_inline_inode(inode);
+		if (ret)
+			return ret;
+	}
+
 	ret = truncate_blocks(inode, i_size_read(inode), true);
 	if (ret)
 		return ret;
@@ -1302,6 +1359,7 @@
 static int f2fs_ioc_start_atomic_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
@@ -1311,9 +1369,12 @@
 	if (f2fs_is_atomic_file(inode))
 		return 0;
 
-	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+	ret = f2fs_convert_inline_inode(inode);
+	if (ret)
+		return ret;
 
-	return f2fs_convert_inline_inode(inode);
+	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+	return 0;
 }
 
 static int f2fs_ioc_commit_atomic_write(struct file *filp)
@@ -1333,10 +1394,13 @@
 
 	if (f2fs_is_atomic_file(inode)) {
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
-		commit_inmem_pages(inode, false);
+		ret = commit_inmem_pages(inode, false);
+		if (ret)
+			goto err_out;
 	}
 
-	ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
+	ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
+err_out:
 	mnt_drop_write_file(filp);
 	return ret;
 }
@@ -1344,6 +1408,7 @@
 static int f2fs_ioc_start_volatile_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
@@ -1351,9 +1416,12 @@
 	if (f2fs_is_volatile_file(inode))
 		return 0;
 
-	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+	ret = f2fs_convert_inline_inode(inode);
+	if (ret)
+		return ret;
 
-	return f2fs_convert_inline_inode(inode);
+	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+	return 0;
 }
 
 static int f2fs_ioc_release_volatile_write(struct file *filp)
@@ -1389,7 +1457,7 @@
 
 	if (f2fs_is_atomic_file(inode)) {
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
-		commit_inmem_pages(inode, false);
+		commit_inmem_pages(inode, true);
 	}
 
 	if (f2fs_is_volatile_file(inode))
@@ -1544,6 +1612,35 @@
 	return 0;
 }
 
+static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
+{
+	struct inode *inode = file_inode(filp);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	__u32 i, count;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (get_user(count, (__u32 __user *)arg))
+		return -EFAULT;
+
+	if (!count || count > F2FS_BATCH_GC_MAX_NUM)
+		return -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		if (!mutex_trylock(&sbi->gc_mutex))
+			break;
+
+		if (f2fs_gc(sbi))
+			break;
+	}
+
+	if (put_user(i, (__u32 __user *)arg))
+		return -EFAULT;
+
+	return 0;
+}
+
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
@@ -1573,6 +1670,8 @@
 		return f2fs_ioc_get_encryption_policy(filp, arg);
 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
+	case F2FS_IOC_GARBAGE_COLLECT:
+		return f2fs_ioc_gc(filp, arg);
 	default:
 		return -ENOTTY;
 	}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 22fb5ef..782b8e7 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -391,23 +391,27 @@
  * On validity, copy that node with cold status, otherwise (invalid node)
  * ignore that.
  */
-static void gc_node_segment(struct f2fs_sb_info *sbi,
+static int gc_node_segment(struct f2fs_sb_info *sbi,
 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
 	bool initial = true;
 	struct f2fs_summary *entry;
+	block_t start_addr;
 	int off;
 
+	start_addr = START_BLOCK(sbi, segno);
+
 next_step:
 	entry = sum;
 
 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 		nid_t nid = le32_to_cpu(entry->nid);
 		struct page *node_page;
+		struct node_info ni;
 
 		/* stop BG_GC if there is not enough free sections. */
 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-			return;
+			return 0;
 
 		if (check_valid_map(sbi, segno, off) == 0)
 			continue;
@@ -426,6 +430,12 @@
 			continue;
 		}
 
+		get_node_info(sbi, nid, &ni);
+		if (ni.blk_addr != start_addr + off) {
+			f2fs_put_page(node_page, 1);
+			continue;
+		}
+
 		/* set page dirty and write it */
 		if (gc_type == FG_GC) {
 			f2fs_wait_on_page_writeback(node_page, NODE);
@@ -451,13 +461,11 @@
 		};
 		sync_node_pages(sbi, 0, &wbc);
 
-		/*
-		 * In the case of FG_GC, it'd be better to reclaim this victim
-		 * completely.
-		 */
-		if (get_valid_blocks(sbi, segno, 1) != 0)
-			goto next_step;
+		/* return 1 only if FG_GC succefully reclaimed one */
+		if (get_valid_blocks(sbi, segno, 1) == 0)
+			return 1;
 	}
+	return 0;
 }
 
 /*
@@ -487,7 +495,7 @@
 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
 }
 
-static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
 {
 	struct page *node_page;
@@ -500,13 +508,13 @@
 
 	node_page = get_node_page(sbi, nid);
 	if (IS_ERR(node_page))
-		return 0;
+		return false;
 
 	get_node_info(sbi, nid, dni);
 
 	if (sum->version != dni->version) {
 		f2fs_put_page(node_page, 1);
-		return 0;
+		return false;
 	}
 
 	*nofs = ofs_of_node(node_page);
@@ -514,8 +522,8 @@
 	f2fs_put_page(node_page, 1);
 
 	if (source_blkaddr != blkaddr)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 static void move_encrypted_block(struct inode *inode, block_t bidx)
@@ -552,7 +560,10 @@
 	fio.page = page;
 	fio.blk_addr = dn.data_blkaddr;
 
-	fio.encrypted_page = grab_cache_page(META_MAPPING(fio.sbi), fio.blk_addr);
+	fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
+					fio.blk_addr,
+					FGP_LOCK|FGP_CREAT,
+					GFP_NOFS);
 	if (!fio.encrypted_page)
 		goto put_out;
 
@@ -636,7 +647,7 @@
  * If the parent node is not valid or the data block address is different,
  * the victim data block is ignored.
  */
-static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
 {
 	struct super_block *sb = sbi->sb;
@@ -659,7 +670,7 @@
 
 		/* stop BG_GC if there is not enough free sections. */
 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-			return;
+			return 0;
 
 		if (check_valid_map(sbi, segno, off) == 0)
 			continue;
@@ -670,7 +681,7 @@
 		}
 
 		/* Get an inode by ino with checking validity */
-		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
+		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
 			continue;
 
 		if (phase == 1) {
@@ -724,15 +735,11 @@
 	if (gc_type == FG_GC) {
 		f2fs_submit_merged_bio(sbi, DATA, WRITE);
 
-		/*
-		 * In the case of FG_GC, it'd be better to reclaim this victim
-		 * completely.
-		 */
-		if (get_valid_blocks(sbi, segno, 1) != 0) {
-			phase = 2;
-			goto next_step;
-		}
+		/* return 1 only if FG_GC succefully reclaimed one */
+		if (get_valid_blocks(sbi, segno, 1) == 0)
+			return 1;
 	}
+	return 0;
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -748,12 +755,13 @@
 	return ret;
 }
 
-static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
 				struct gc_inode_list *gc_list, int gc_type)
 {
 	struct page *sum_page;
 	struct f2fs_summary_block *sum;
 	struct blk_plug plug;
+	int nfree = 0;
 
 	/* read segment summary of victim */
 	sum_page = get_sum_page(sbi, segno);
@@ -773,10 +781,11 @@
 
 	switch (GET_SUM_TYPE((&sum->footer))) {
 	case SUM_TYPE_NODE:
-		gc_node_segment(sbi, sum->entries, segno, gc_type);
+		nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
 		break;
 	case SUM_TYPE_DATA:
-		gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type);
+		nfree = gc_data_segment(sbi, sum->entries, gc_list,
+							segno, gc_type);
 		break;
 	}
 	blk_finish_plug(&plug);
@@ -785,11 +794,13 @@
 	stat_inc_call_count(sbi->stat_info);
 
 	f2fs_put_page(sum_page, 0);
+	return nfree;
 }
 
 int f2fs_gc(struct f2fs_sb_info *sbi)
 {
-	unsigned int segno, i;
+	unsigned int segno = NULL_SEGNO;
+	unsigned int i;
 	int gc_type = BG_GC;
 	int nfree = 0;
 	int ret = -1;
@@ -808,10 +819,11 @@
 
 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
 		gc_type = FG_GC;
-		write_checkpoint(sbi, &cpc);
+		if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
+			write_checkpoint(sbi, &cpc);
 	}
 
-	if (!__get_victim(sbi, &segno, gc_type))
+	if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
 		goto stop;
 	ret = 0;
 
@@ -821,13 +833,10 @@
 								META_SSA);
 
 	for (i = 0; i < sbi->segs_per_sec; i++)
-		do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
+		nfree += do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
 
-	if (gc_type == FG_GC) {
+	if (gc_type == FG_GC)
 		sbi->cur_victim_sec = NULL_SEGNO;
-		nfree++;
-		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
-	}
 
 	if (has_not_enough_free_secs(sbi, nfree))
 		goto gc_more;
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index b4a65be..c5a055b 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -19,6 +19,12 @@
 #define LIMIT_INVALID_BLOCK	40 /* percentage over total user space */
 #define LIMIT_FREE_BLOCK	40 /* percentage over invalid + free space */
 
+/*
+ * with this macro, we can control the max time we do garbage collection,
+ * when user triggers batch mode gc by ioctl.
+ */
+#define F2FS_BATCH_GC_MAX_NUM		16
+
 /* Search max. number of dirty segments to select a victim segment */
 #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index a13ffcc..3d143be 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -360,6 +360,10 @@
 	return 0;
 }
 
+/*
+ * NOTE: ipage is grabbed by caller, but if any error occurs, we should
+ * release ipage in this function.
+ */
 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
 				struct f2fs_inline_dentry *inline_dentry)
 {
@@ -369,8 +373,10 @@
 	int err;
 
 	page = grab_cache_page(dir->i_mapping, 0);
-	if (!page)
+	if (!page) {
+		f2fs_put_page(ipage, 1);
 		return -ENOMEM;
+	}
 
 	set_new_dnode(&dn, dir, ipage, NULL, 0);
 	err = f2fs_reserve_block(&dn, 0);
@@ -378,13 +384,21 @@
 		goto out;
 
 	f2fs_wait_on_page_writeback(page, DATA);
-	zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
 
 	dentry_blk = kmap_atomic(page);
 
 	/* copy data from inline dentry block to new dentry block */
 	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
 					INLINE_DENTRY_BITMAP_SIZE);
+	memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
+			SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
+	/*
+	 * we do not need to zero out remainder part of dentry and filename
+	 * field, since we have used bitmap for marking the usage status of
+	 * them, besides, we can also ignore copying/zeroing reserved space
+	 * of dentry block, because them haven't been used so far.
+	 */
 	memcpy(dentry_blk->dentry, inline_dentry->dentry,
 			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
 	memcpy(dentry_blk->filename, inline_dentry->filename,
@@ -434,8 +448,9 @@
 						slots, NR_INLINE_DENTRY);
 	if (bit_pos >= NR_INLINE_DENTRY) {
 		err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
-		if (!err)
-			err = -EAGAIN;
+		if (err)
+			return err;
+		err = -EAGAIN;
 		goto out;
 	}
 
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 2550868..35aae65 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -12,7 +12,6 @@
 #include <linux/f2fs_fs.h>
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
-#include <linux/bitops.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -34,8 +33,8 @@
 		new_fl |= S_NOATIME;
 	if (flags & FS_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
-	set_mask_bits(&inode->i_flags,
-			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+	inode_set_flags(inode, new_fl,
+			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
 }
 
 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -139,7 +138,7 @@
 	fi->i_pino = le32_to_cpu(ri->i_pino);
 	fi->i_dir_level = ri->i_dir_level;
 
-	f2fs_init_extent_cache(inode, &ri->i_ext);
+	f2fs_init_extent_tree(inode, &ri->i_ext);
 
 	get_inline_info(fi, ri);
 
@@ -155,6 +154,7 @@
 
 	f2fs_put_page(node_page, 1);
 
+	stat_inc_inline_xattr(inode);
 	stat_inc_inline_inode(inode);
 	stat_inc_inline_dir(inode);
 
@@ -237,10 +237,11 @@
 	ri->i_size = cpu_to_le64(i_size_read(inode));
 	ri->i_blocks = cpu_to_le64(inode->i_blocks);
 
-	read_lock(&F2FS_I(inode)->ext_lock);
-	set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
-	read_unlock(&F2FS_I(inode)->ext_lock);
-
+	if (F2FS_I(inode)->extent_tree)
+		set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
+							&ri->i_ext);
+	else
+		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
 	set_raw_inline(F2FS_I(inode), ri);
 
 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
@@ -314,7 +315,9 @@
 void f2fs_evict_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	nid_t xnid = fi->i_xattr_nid;
+	int err = 0;
 
 	/* some remained atomic pages should discarded */
 	if (f2fs_is_atomic_file(inode))
@@ -330,41 +333,62 @@
 	f2fs_bug_on(sbi, get_dirty_pages(inode));
 	remove_dirty_dir_inode(inode);
 
+	f2fs_destroy_extent_tree(inode);
+
 	if (inode->i_nlink || is_bad_inode(inode))
 		goto no_delete;
 
 	sb_start_intwrite(inode->i_sb);
-	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
+	set_inode_flag(fi, FI_NO_ALLOC);
 	i_size_write(inode, 0);
 
 	if (F2FS_HAS_BLOCKS(inode))
-		f2fs_truncate(inode);
+		err = f2fs_truncate(inode, true);
 
-	f2fs_lock_op(sbi);
-	remove_inode_page(inode);
-	f2fs_unlock_op(sbi);
+	if (!err) {
+		f2fs_lock_op(sbi);
+		err = remove_inode_page(inode);
+		f2fs_unlock_op(sbi);
+	}
 
 	sb_end_intwrite(inode->i_sb);
 no_delete:
+	stat_dec_inline_xattr(inode);
 	stat_dec_inline_dir(inode);
 	stat_dec_inline_inode(inode);
 
-	/* update extent info in inode */
-	if (inode->i_nlink)
-		f2fs_preserve_extent_tree(inode);
-	f2fs_destroy_extent_tree(inode);
-
 	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
 	if (xnid)
 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
-	if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE))
+	if (is_inode_flag_set(fi, FI_APPEND_WRITE))
 		add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
-	if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE))
+	if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
 		add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
+	if (is_inode_flag_set(fi, FI_FREE_NID)) {
+		if (err && err != -ENOENT)
+			alloc_nid_done(sbi, inode->i_ino);
+		else
+			alloc_nid_failed(sbi, inode->i_ino);
+		clear_inode_flag(fi, FI_FREE_NID);
+	}
+
+	if (err && err != -ENOENT) {
+		if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
+			/*
+			 * get here because we failed to release resource
+			 * of inode previously, reminder our user to run fsck
+			 * for fixing.
+			 */
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"inode (ino:%lu) resource leak, run fsck "
+				"to fix this issue!", inode->i_ino);
+		}
+	}
 out_clear:
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
-	if (F2FS_I(inode)->i_crypt_info)
-		f2fs_free_encryption_info(inode, F2FS_I(inode)->i_crypt_info);
+	if (fi->i_crypt_info)
+		f2fs_free_encryption_info(inode, fi->i_crypt_info);
 #endif
 	clear_inode(inode);
 }
@@ -373,6 +397,7 @@
 void handle_failed_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	int err = 0;
 
 	clear_nlink(inode);
 	make_bad_inode(inode);
@@ -380,13 +405,29 @@
 
 	i_size_write(inode, 0);
 	if (F2FS_HAS_BLOCKS(inode))
-		f2fs_truncate(inode);
+		err = f2fs_truncate(inode, false);
 
-	remove_inode_page(inode);
+	if (!err)
+		err = remove_inode_page(inode);
 
-	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
-	clear_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
-	alloc_nid_failed(sbi, inode->i_ino);
+	/*
+	 * if we skip truncate_node in remove_inode_page bacause we failed
+	 * before, it's better to find another way to release resource of
+	 * this inode (e.g. valid block count, node block or nid). Here we
+	 * choose to add this inode to orphan list, so that we can call iput
+	 * for releasing in orphan recovery flow.
+	 *
+	 * Note: we should add inode to orphan list before f2fs_unlock_op()
+	 * so we can prevent losing this orphan when encoutering checkpoint
+	 * and following suddenly power-off.
+	 */
+	if (err && err != -ENOENT) {
+		err = acquire_orphan_inode(sbi);
+		if (!err)
+			add_orphan_inode(sbi, inode->i_ino);
+	}
+
+	set_inode_flag(F2FS_I(inode), FI_FREE_NID);
 	f2fs_unlock_op(sbi);
 
 	/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index fdbae21..a680bf3 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -53,7 +53,7 @@
 	if (err) {
 		err = -EINVAL;
 		nid_free = true;
-		goto out;
+		goto fail;
 	}
 
 	/* If the directory encrypted, then we should encrypt the inode. */
@@ -65,6 +65,9 @@
 	if (f2fs_may_inline_dentry(inode))
 		set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
 
+	f2fs_init_extent_tree(inode, NULL);
+
+	stat_inc_inline_xattr(inode);
 	stat_inc_inline_inode(inode);
 	stat_inc_inline_dir(inode);
 
@@ -72,15 +75,12 @@
 	mark_inode_dirty(inode);
 	return inode;
 
-out:
-	clear_nlink(inode);
-	unlock_new_inode(inode);
 fail:
 	trace_f2fs_new_inode(inode, err);
 	make_bad_inode(inode);
-	iput(inode);
 	if (nid_free)
-		alloc_nid_failed(sbi, ino);
+		set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+	iput(inode);
 	return ERR_PTR(err);
 }
 
@@ -89,7 +89,14 @@
 	size_t slen = strlen(s);
 	size_t sublen = strlen(sub);
 
-	if (sublen > slen)
+	/*
+	 * filename format of multimedia file should be defined as:
+	 * "filename + '.' + extension".
+	 */
+	if (slen < sublen + 2)
+		return 0;
+
+	if (s[slen - sublen - 1] != '.')
 		return 0;
 
 	return !strncasecmp(s + slen - sublen, sub, sublen);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 7dd63b7..27d1a74 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -159,7 +159,7 @@
 
 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
 	if (!head) {
-		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
+		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
 
 		INIT_LIST_HEAD(&head->entry_list);
 		INIT_LIST_HEAD(&head->set_list);
@@ -246,7 +246,7 @@
 {
 	struct nat_entry *new;
 
-	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
+	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
 	f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
 	memset(new, 0, sizeof(struct nat_entry));
 	nat_set_nid(new, nid);
@@ -306,6 +306,10 @@
 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
 		unsigned char version = nat_get_version(e);
 		nat_set_version(e, inc_node_version(version));
+
+		/* in order to reuse the nid */
+		if (nm_i->next_scan_nid > ni->nid)
+			nm_i->next_scan_nid = ni->nid;
 	}
 
 	/* change address */
@@ -328,11 +332,11 @@
 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	int nr = nr_shrink;
 
-	if (available_free_memory(sbi, NAT_ENTRIES))
+	if (!down_write_trylock(&nm_i->nat_tree_lock))
 		return 0;
 
-	down_write(&nm_i->nat_tree_lock);
 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
 		struct nat_entry *ne;
 		ne = list_first_entry(&nm_i->nat_entries,
@@ -341,7 +345,7 @@
 		nr_shrink--;
 	}
 	up_write(&nm_i->nat_tree_lock);
-	return nr_shrink;
+	return nr - nr_shrink;
 }
 
 /*
@@ -898,17 +902,20 @@
  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
  */
-void remove_inode_page(struct inode *inode)
+int remove_inode_page(struct inode *inode)
 {
 	struct dnode_of_data dn;
+	int err;
 
 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-	if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
-		return;
+	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+	if (err)
+		return err;
 
-	if (truncate_xattr_node(inode, dn.inode_page)) {
+	err = truncate_xattr_node(inode, dn.inode_page);
+	if (err) {
 		f2fs_put_dnode(&dn);
-		return;
+		return err;
 	}
 
 	/* remove potential inline_data blocks */
@@ -922,6 +929,7 @@
 
 	/* will put inode & node pages */
 	truncate_node(&dn);
+	return 0;
 }
 
 struct page *new_inode_page(struct inode *inode)
@@ -991,8 +999,7 @@
 /*
  * Caller should do after getting the following values.
  * 0: f2fs_put_page(page, 0)
- * LOCKED_PAGE: f2fs_put_page(page, 1)
- * error: nothing
+ * LOCKED_PAGE or error: f2fs_put_page(page, 1)
  */
 static int read_node_page(struct page *page, int rw)
 {
@@ -1010,7 +1017,6 @@
 
 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
 		ClearPageUptodate(page);
-		f2fs_put_page(page, 1);
 		return -ENOENT;
 	}
 
@@ -1041,10 +1047,7 @@
 		return;
 
 	err = read_node_page(apage, READA);
-	if (err == 0)
-		f2fs_put_page(apage, 0);
-	else if (err == LOCKED_PAGE)
-		f2fs_put_page(apage, 1);
+	f2fs_put_page(apage, err ? 1 : 0);
 }
 
 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1057,10 +1060,12 @@
 		return ERR_PTR(-ENOMEM);
 
 	err = read_node_page(page, READ_SYNC);
-	if (err < 0)
+	if (err < 0) {
+		f2fs_put_page(page, 1);
 		return ERR_PTR(err);
-	else if (err != LOCKED_PAGE)
+	} else if (err != LOCKED_PAGE) {
 		lock_page(page);
+	}
 
 	if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
 		ClearPageUptodate(page);
@@ -1096,10 +1101,12 @@
 		return ERR_PTR(-ENOMEM);
 
 	err = read_node_page(page, READ_SYNC);
-	if (err < 0)
+	if (err < 0) {
+		f2fs_put_page(page, 1);
 		return ERR_PTR(err);
-	else if (err == LOCKED_PAGE)
+	} else if (err == LOCKED_PAGE) {
 		goto page_hit;
+	}
 
 	blk_start_plug(&plug);
 
@@ -1533,7 +1540,7 @@
 		if (unlikely(nid >= nm_i->max_nid))
 			nid = 0;
 
-		if (i++ == FREE_NID_PAGES)
+		if (++i >= FREE_NID_PAGES)
 			break;
 	}
 
@@ -1570,6 +1577,8 @@
 
 	/* We should not use stale free nids created by build_free_nids */
 	if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
+		struct node_info ni;
+
 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
 		list_for_each_entry(i, &nm_i->free_nid_list, list)
 			if (i->state == NID_NEW)
@@ -1580,6 +1589,13 @@
 		i->state = NID_ALLOC;
 		nm_i->fcnt--;
 		spin_unlock(&nm_i->free_nid_list_lock);
+
+		/* check nid is allocated already */
+		get_node_info(sbi, *nid, &ni);
+		if (ni.blk_addr != NULL_ADDR) {
+			alloc_nid_done(sbi, *nid);
+			goto retry;
+		}
 		return true;
 	}
 	spin_unlock(&nm_i->free_nid_list_lock);
@@ -1636,6 +1652,32 @@
 		kmem_cache_free(free_nid_slab, i);
 }
 
+int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	struct free_nid *i, *next;
+	int nr = nr_shrink;
+
+	if (!mutex_trylock(&nm_i->build_lock))
+		return 0;
+
+	spin_lock(&nm_i->free_nid_list_lock);
+	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+		if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+			break;
+		if (i->state == NID_ALLOC)
+			continue;
+		__del_from_free_nid_list(nm_i, i);
+		kmem_cache_free(free_nid_slab, i);
+		nm_i->fcnt--;
+		nr_shrink--;
+	}
+	spin_unlock(&nm_i->free_nid_list_lock);
+	mutex_unlock(&nm_i->build_lock);
+
+	return nr - nr_shrink;
+}
+
 void recover_inline_xattr(struct inode *inode, struct page *page)
 {
 	void *src_addr, *dst_addr;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 24a8c1d..faec2ca 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -399,14 +399,35 @@
 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
-	for (; start < end; start++) {
+	for (; start < end; start++, dn.ofs_in_node++) {
 		block_t src, dest;
 
 		src = datablock_addr(dn.node_page, dn.ofs_in_node);
 		dest = datablock_addr(page, dn.ofs_in_node);
 
-		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR &&
-			is_valid_blkaddr(sbi, dest, META_POR)) {
+		/* skip recovering if dest is the same as src */
+		if (src == dest)
+			continue;
+
+		/* dest is invalid, just invalidate src block */
+		if (dest == NULL_ADDR) {
+			truncate_data_blocks_range(&dn, 1);
+			continue;
+		}
+
+		/*
+		 * dest is reserved block, invalidate src block
+		 * and then reserve one new block in dnode page.
+		 */
+		if (dest == NEW_ADDR) {
+			truncate_data_blocks_range(&dn, 1);
+			err = reserve_new_block(&dn);
+			f2fs_bug_on(sbi, err);
+			continue;
+		}
+
+		/* dest is valid block, try to recover from src to dest */
+		if (is_valid_blkaddr(sbi, dest, META_POR)) {
 
 			if (src == NULL_ADDR) {
 				err = reserve_new_block(&dn);
@@ -424,7 +445,6 @@
 							ni.version, false);
 			recovered++;
 		}
-		dn.ofs_in_node++;
 	}
 
 	if (IS_INODE(dn.node_page))
@@ -525,14 +545,12 @@
 
 	INIT_LIST_HEAD(&inode_list);
 
-	/* step #1: find fsynced inode numbers */
-	set_sbi_flag(sbi, SBI_POR_DOING);
-
 	/* prevent checkpoint */
 	mutex_lock(&sbi->cp_mutex);
 
 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
+	/* step #1: find fsynced inode numbers */
 	err = find_fsync_dnodes(sbi, &inode_list);
 	if (err)
 		goto out;
@@ -561,11 +579,20 @@
 
 	clear_sbi_flag(sbi, SBI_POR_DOING);
 	if (err) {
-		discard_next_dnode(sbi, blkaddr);
+		bool invalidate = false;
+
+		if (discard_next_dnode(sbi, blkaddr))
+			invalidate = true;
 
 		/* Flush all the NAT/SIT pages */
 		while (get_pages(sbi, F2FS_DIRTY_META))
 			sync_meta_pages(sbi, META, LONG_MAX);
+
+		/* invalidate temporary meta page */
+		if (invalidate)
+			invalidate_mapping_pages(META_MAPPING(sbi),
+							blkaddr, blkaddr);
+
 		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
 		mutex_unlock(&sbi->cp_mutex);
 	} else if (need_writecp) {
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 61b97f9..78e6d06 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -197,28 +197,20 @@
 {
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct inmem_pages *new;
-	int err;
 
-	SetPagePrivate(page);
 	f2fs_trace_pid(page);
 
+	set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
+	SetPagePrivate(page);
+
 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
 
 	/* add atomic page indices to the list */
 	new->page = page;
 	INIT_LIST_HEAD(&new->list);
-retry:
+
 	/* increase reference count with clean state */
 	mutex_lock(&fi->inmem_lock);
-	err = radix_tree_insert(&fi->inmem_root, page->index, new);
-	if (err == -EEXIST) {
-		mutex_unlock(&fi->inmem_lock);
-		kmem_cache_free(inmem_entry_slab, new);
-		return;
-	} else if (err) {
-		mutex_unlock(&fi->inmem_lock);
-		goto retry;
-	}
 	get_page(page);
 	list_add_tail(&new->list, &fi->inmem_pages);
 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
@@ -227,7 +219,7 @@
 	trace_f2fs_register_inmem_page(page, INMEM);
 }
 
-void commit_inmem_pages(struct inode *inode, bool abort)
+int commit_inmem_pages(struct inode *inode, bool abort)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -239,6 +231,7 @@
 		.rw = WRITE_SYNC | REQ_PRIO,
 		.encrypted_page = NULL,
 	};
+	int err = 0;
 
 	/*
 	 * The abort is true only when f2fs_evict_inode is called.
@@ -254,8 +247,8 @@
 
 	mutex_lock(&fi->inmem_lock);
 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
+		lock_page(cur->page);
 		if (!abort) {
-			lock_page(cur->page);
 			if (cur->page->mapping == inode->i_mapping) {
 				set_page_dirty(cur->page);
 				f2fs_wait_on_page_writeback(cur->page, DATA);
@@ -263,15 +256,20 @@
 					inode_dec_dirty_pages(inode);
 				trace_f2fs_commit_inmem_page(cur->page, INMEM);
 				fio.page = cur->page;
-				do_write_data_page(&fio);
+				err = do_write_data_page(&fio);
 				submit_bio = true;
+				if (err) {
+					unlock_page(cur->page);
+					break;
+				}
 			}
-			f2fs_put_page(cur->page, 1);
 		} else {
 			trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
-			put_page(cur->page);
 		}
-		radix_tree_delete(&fi->inmem_root, cur->page->index);
+		set_page_private(cur->page, 0);
+		ClearPagePrivate(cur->page);
+		f2fs_put_page(cur->page, 1);
+
 		list_del(&cur->list);
 		kmem_cache_free(inmem_entry_slab, cur);
 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
@@ -283,6 +281,7 @@
 		if (submit_bio)
 			f2fs_submit_merged_bio(sbi, DATA, WRITE);
 	}
+	return err;
 }
 
 /*
@@ -304,10 +303,18 @@
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 {
 	/* try to shrink extent cache when there is no enough memory */
-	f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
+	if (!available_free_memory(sbi, EXTENT_CACHE))
+		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
 
-	/* check the # of cached NAT entries and prefree segments */
-	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
+	/* check the # of cached NAT entries */
+	if (!available_free_memory(sbi, NAT_ENTRIES))
+		try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
+
+	if (!available_free_memory(sbi, FREE_NIDS))
+		try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+
+	/* checkpoint is the only way to shrink partial cached entries */
+	if (!available_free_memory(sbi, NAT_ENTRIES) ||
 			excess_prefree_segs(sbi) ||
 			!available_free_memory(sbi, INO_ENTRIES))
 		f2fs_sync_fs(sbi->sb, true);
@@ -323,10 +330,12 @@
 		return 0;
 
 	if (!llist_empty(&fcc->issue_list)) {
-		struct bio *bio = bio_alloc(GFP_NOIO, 0);
+		struct bio *bio;
 		struct flush_cmd *cmd, *next;
 		int ret;
 
+		bio = f2fs_bio_alloc(0);
+
 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 
@@ -358,8 +367,15 @@
 	if (test_opt(sbi, NOBARRIER))
 		return 0;
 
-	if (!test_opt(sbi, FLUSH_MERGE))
-		return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
+	if (!test_opt(sbi, FLUSH_MERGE)) {
+		struct bio *bio = f2fs_bio_alloc(0);
+		int ret;
+
+		bio->bi_bdev = sbi->sb->s_bdev;
+		ret = submit_bio_wait(WRITE_FLUSH, bio);
+		bio_put(bio);
+		return ret;
+	}
 
 	init_completion(&cmd.wait);
 
@@ -503,7 +519,7 @@
 	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
 }
 
-void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
+bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
 {
 	int err = -ENOTSUPP;
 
@@ -513,13 +529,16 @@
 		unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 
 		if (f2fs_test_bit(offset, se->discard_map))
-			return;
+			return false;
 
 		err = f2fs_issue_discard(sbi, blkaddr, 1);
 	}
 
-	if (err)
+	if (err) {
 		update_meta_page(sbi, NULL, blkaddr);
+		return true;
+	}
+	return false;
 }
 
 static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -1218,7 +1237,8 @@
 	mutex_lock(&sit_i->sentry_lock);
 
 	/* direct_io'ed data is aligned to the segment for better performance */
-	if (direct_io && curseg->next_blkoff)
+	if (direct_io && curseg->next_blkoff &&
+				!has_not_enough_free_secs(sbi, 0))
 		__allocate_new_segments(sbi, type);
 
 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
@@ -1733,7 +1753,7 @@
 static struct sit_entry_set *grab_sit_entry_set(void)
 {
 	struct sit_entry_set *ses =
-			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
+			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
 
 	ses->entry_cnt = 0;
 	INIT_LIST_HEAD(&ses->set_list);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 79e7b87..b6e4ed1 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -177,6 +177,15 @@
 	void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
 };
 
+/*
+ * this value is set in page as a private data which indicate that
+ * the page is atomically written, and it is in inmem_pages list.
+ */
+#define ATOMIC_WRITTEN_PAGE		0x0000ffff
+
+#define IS_ATOMIC_WRITTEN_PAGE(page)			\
+		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
+
 struct inmem_pages {
 	struct list_head list;
 	struct page *page;
@@ -555,16 +564,15 @@
 	return curseg->next_blkoff;
 }
 
-#ifdef CONFIG_F2FS_CHECK_FS
 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
 {
-	BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
+	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
 }
 
 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
 {
-	BUG_ON(blk_addr < SEG0_BLKADDR(sbi));
-	BUG_ON(blk_addr >= MAX_BLKADDR(sbi));
+	f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
+					|| blk_addr >= MAX_BLKADDR(sbi));
 }
 
 /*
@@ -573,16 +581,11 @@
 static inline void check_block_count(struct f2fs_sb_info *sbi,
 		int segno, struct f2fs_sit_entry *raw_sit)
 {
+#ifdef CONFIG_F2FS_CHECK_FS
 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
 	int valid_blocks = 0;
 	int cur_pos = 0, next_pos;
 
-	/* check segment usage */
-	BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
-
-	/* check boundary of a given segment number */
-	BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
-
 	/* check bitmap with valid block count */
 	do {
 		if (is_valid) {
@@ -598,35 +601,11 @@
 		is_valid = !is_valid;
 	} while (cur_pos < sbi->blocks_per_seg);
 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
-}
-#else
-static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
-{
-	if (segno > TOTAL_SEGS(sbi) - 1)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
-
-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
-{
-	if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
-
-/*
- * Summary block is always treated as an invalid block
- */
-static inline void check_block_count(struct f2fs_sb_info *sbi,
-		int segno, struct f2fs_sit_entry *raw_sit)
-{
-	/* check segment usage */
-	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-
-	/* check boundary of a given segment number */
-	if (segno > TOTAL_SEGS(sbi) - 1)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
 #endif
+	/* check segment usage, and check boundary of a given segment number */
+	f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+					|| segno > TOTAL_SEGS(sbi) - 1);
+}
 
 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
 						unsigned int start)
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
new file mode 100644
index 0000000..da0d8e0
--- /dev/null
+++ b/fs/f2fs/shrinker.c
@@ -0,0 +1,139 @@
+/*
+ * f2fs shrinker support
+ *   the basic infra was copied from fs/ubifs/shrinker.c
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+
+static LIST_HEAD(f2fs_list);
+static DEFINE_SPINLOCK(f2fs_list_lock);
+static unsigned int shrinker_run_no;
+
+static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
+{
+	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+}
+
+static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+{
+	if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
+		return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
+	return 0;
+}
+
+static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
+{
+	return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
+}
+
+unsigned long f2fs_shrink_count(struct shrinker *shrink,
+				struct shrink_control *sc)
+{
+	struct f2fs_sb_info *sbi;
+	struct list_head *p;
+	unsigned long count = 0;
+
+	spin_lock(&f2fs_list_lock);
+	p = f2fs_list.next;
+	while (p != &f2fs_list) {
+		sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+		/* stop f2fs_put_super */
+		if (!mutex_trylock(&sbi->umount_mutex)) {
+			p = p->next;
+			continue;
+		}
+		spin_unlock(&f2fs_list_lock);
+
+		/* count extent cache entries */
+		count += __count_extent_cache(sbi);
+
+		/* shrink clean nat cache entries */
+		count += __count_nat_entries(sbi);
+
+		/* count free nids cache entries */
+		count += __count_free_nids(sbi);
+
+		spin_lock(&f2fs_list_lock);
+		p = p->next;
+		mutex_unlock(&sbi->umount_mutex);
+	}
+	spin_unlock(&f2fs_list_lock);
+	return count;
+}
+
+unsigned long f2fs_shrink_scan(struct shrinker *shrink,
+				struct shrink_control *sc)
+{
+	unsigned long nr = sc->nr_to_scan;
+	struct f2fs_sb_info *sbi;
+	struct list_head *p;
+	unsigned int run_no;
+	unsigned long freed = 0;
+
+	spin_lock(&f2fs_list_lock);
+	do {
+		run_no = ++shrinker_run_no;
+	} while (run_no == 0);
+	p = f2fs_list.next;
+	while (p != &f2fs_list) {
+		sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+		if (sbi->shrinker_run_no == run_no)
+			break;
+
+		/* stop f2fs_put_super */
+		if (!mutex_trylock(&sbi->umount_mutex)) {
+			p = p->next;
+			continue;
+		}
+		spin_unlock(&f2fs_list_lock);
+
+		sbi->shrinker_run_no = run_no;
+
+		/* shrink extent cache entries */
+		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
+
+		/* shrink clean nat cache entries */
+		if (freed < nr)
+			freed += try_to_free_nats(sbi, nr - freed);
+
+		/* shrink free nids cache entries */
+		if (freed < nr)
+			freed += try_to_free_nids(sbi, nr - freed);
+
+		spin_lock(&f2fs_list_lock);
+		p = p->next;
+		list_move_tail(&sbi->s_list, &f2fs_list);
+		mutex_unlock(&sbi->umount_mutex);
+		if (freed >= nr)
+			break;
+	}
+	spin_unlock(&f2fs_list_lock);
+	return freed;
+}
+
+void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
+{
+	spin_lock(&f2fs_list_lock);
+	list_add_tail(&sbi->s_list, &f2fs_list);
+	spin_unlock(&f2fs_list_lock);
+}
+
+void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
+{
+	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
+
+	spin_lock(&f2fs_list_lock);
+	list_del(&sbi->s_list);
+	spin_unlock(&f2fs_list_lock);
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a06b0b4..f794781 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -39,6 +39,13 @@
 static struct kmem_cache *f2fs_inode_cachep;
 static struct kset *f2fs_kset;
 
+/* f2fs-wide shrinker description */
+static struct shrinker f2fs_shrinker_info = {
+	.scan_objects = f2fs_shrink_scan,
+	.count_objects = f2fs_shrink_count,
+	.seeks = DEFAULT_SEEKS,
+};
+
 enum {
 	Opt_gc_background,
 	Opt_disable_roll_forward,
@@ -58,6 +65,7 @@
 	Opt_nobarrier,
 	Opt_fastboot,
 	Opt_extent_cache,
+	Opt_noextent_cache,
 	Opt_noinline_data,
 	Opt_err,
 };
@@ -81,6 +89,7 @@
 	{Opt_nobarrier, "nobarrier"},
 	{Opt_fastboot, "fastboot"},
 	{Opt_extent_cache, "extent_cache"},
+	{Opt_noextent_cache, "noextent_cache"},
 	{Opt_noinline_data, "noinline_data"},
 	{Opt_err, NULL},
 };
@@ -382,6 +391,9 @@
 		case Opt_extent_cache:
 			set_opt(sbi, EXTENT_CACHE);
 			break;
+		case Opt_noextent_cache:
+			clear_opt(sbi, EXTENT_CACHE);
+			break;
 		case Opt_noinline_data:
 			clear_opt(sbi, INLINE_DATA);
 			break;
@@ -410,9 +422,7 @@
 	atomic_set(&fi->dirty_pages, 0);
 	fi->i_current_depth = 1;
 	fi->i_advise = 0;
-	rwlock_init(&fi->ext_lock);
 	init_rwsem(&fi->i_sem);
-	INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS);
 	INIT_LIST_HEAD(&fi->inmem_pages);
 	mutex_init(&fi->inmem_lock);
 
@@ -441,17 +451,22 @@
 	 */
 	if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
 		if (!inode->i_nlink && !is_bad_inode(inode)) {
+			/* to avoid evict_inode call simultaneously */
+			atomic_inc(&inode->i_count);
 			spin_unlock(&inode->i_lock);
 
 			/* some remained atomic pages should discarded */
 			if (f2fs_is_atomic_file(inode))
 				commit_inmem_pages(inode, true);
 
+			/* should remain fi->extent_tree for writepage */
+			f2fs_destroy_extent_node(inode);
+
 			sb_start_intwrite(inode->i_sb);
 			i_size_write(inode, 0);
 
 			if (F2FS_HAS_BLOCKS(inode))
-				f2fs_truncate(inode);
+				f2fs_truncate(inode, true);
 
 			sb_end_intwrite(inode->i_sb);
 
@@ -461,6 +476,7 @@
 					F2FS_I(inode)->i_crypt_info);
 #endif
 			spin_lock(&inode->i_lock);
+			atomic_dec(&inode->i_count);
 		}
 		return 0;
 	}
@@ -498,9 +514,11 @@
 	}
 	kobject_del(&sbi->s_kobj);
 
-	f2fs_destroy_stats(sbi);
 	stop_gc_thread(sbi);
 
+	/* prevent remaining shrinker jobs */
+	mutex_lock(&sbi->umount_mutex);
+
 	/*
 	 * We don't need to do checkpoint when superblock is clean.
 	 * But, the previous checkpoint was not done by umount, it needs to do
@@ -514,6 +532,9 @@
 		write_checkpoint(sbi, &cpc);
 	}
 
+	/* write_checkpoint can update stat informaion */
+	f2fs_destroy_stats(sbi);
+
 	/*
 	 * normally superblock is clean, so we need to release this.
 	 * In addition, EIO will skip do checkpoint, we need this as well.
@@ -521,6 +542,9 @@
 	release_dirty_inode(sbi);
 	release_discard_addrs(sbi);
 
+	f2fs_leave_shrinker(sbi);
+	mutex_unlock(&sbi->umount_mutex);
+
 	iput(sbi->node_inode);
 	iput(sbi->meta_inode);
 
@@ -647,6 +671,8 @@
 		seq_puts(seq, ",fastboot");
 	if (test_opt(sbi, EXTENT_CACHE))
 		seq_puts(seq, ",extent_cache");
+	else
+		seq_puts(seq, ",noextent_cache");
 	seq_printf(seq, ",active_logs=%u", sbi->active_logs);
 
 	return 0;
@@ -667,7 +693,7 @@
 		struct seg_entry *se = get_seg_entry(sbi, i);
 
 		if ((i % 10) == 0)
-			seq_printf(seq, "%-5d", i);
+			seq_printf(seq, "%-10d", i);
 		seq_printf(seq, "%d|%-3u", se->type,
 					get_valid_blocks(sbi, i, 1));
 		if ((i % 10) == 9 || i == (total_segs - 1))
@@ -699,6 +725,7 @@
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_DATA);
+	set_opt(sbi, EXTENT_CACHE);
 
 #ifdef CONFIG_F2FS_FS_XATTR
 	set_opt(sbi, XATTR_USER);
@@ -970,6 +997,9 @@
 
 	sbi->dir_level = DEF_DIR_LEVEL;
 	clear_sbi_flag(sbi, SBI_NEED_FSCK);
+
+	INIT_LIST_HEAD(&sbi->s_list);
+	mutex_init(&sbi->umount_mutex);
 }
 
 /*
@@ -1135,7 +1165,9 @@
 	mutex_init(&sbi->writepages);
 	mutex_init(&sbi->cp_mutex);
 	init_rwsem(&sbi->node_write);
-	clear_sbi_flag(sbi, SBI_POR_DOING);
+
+	/* disallow all the data/node/meta page writes */
+	set_sbi_flag(sbi, SBI_POR_DOING);
 	spin_lock_init(&sbi->stat_lock);
 
 	init_rwsem(&sbi->read_io.io_rwsem);
@@ -1212,8 +1244,12 @@
 		goto free_nm;
 	}
 
+	f2fs_join_shrinker(sbi);
+
 	/* if there are nt orphan nodes free them */
-	recover_orphan_inodes(sbi);
+	err = recover_orphan_inodes(sbi);
+	if (err)
+		goto free_node_inode;
 
 	/* read root inode and dentry */
 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
@@ -1275,6 +1311,8 @@
 			goto free_kobj;
 		}
 	}
+	/* recover_fsync_data() cleared this already */
+	clear_sbi_flag(sbi, SBI_POR_DOING);
 
 	/*
 	 * If filesystem is not mounted as read-only then
@@ -1308,7 +1346,10 @@
 	dput(sb->s_root);
 	sb->s_root = NULL;
 free_node_inode:
+	mutex_lock(&sbi->umount_mutex);
+	f2fs_leave_shrinker(sbi);
 	iput(sbi->node_inode);
+	mutex_unlock(&sbi->umount_mutex);
 free_nm:
 	destroy_node_manager(sbi);
 free_sm:
@@ -1404,13 +1445,20 @@
 	err = f2fs_init_crypto();
 	if (err)
 		goto free_kset;
-	err = register_filesystem(&f2fs_fs_type);
+
+	err = register_shrinker(&f2fs_shrinker_info);
 	if (err)
 		goto free_crypto;
+
+	err = register_filesystem(&f2fs_fs_type);
+	if (err)
+		goto free_shrinker;
 	f2fs_create_root_stats();
 	f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
 	return 0;
 
+free_shrinker:
+	unregister_shrinker(&f2fs_shrinker_info);
 free_crypto:
 	f2fs_exit_crypto();
 free_kset:
@@ -1433,6 +1481,7 @@
 {
 	remove_proc_entry("fs/f2fs", NULL);
 	f2fs_destroy_root_stats();
+	unregister_shrinker(&f2fs_shrinker_info);
 	unregister_filesystem(&f2fs_fs_type);
 	f2fs_exit_crypto();
 	destroy_extent_cache();
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 07449b98..4de2286 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -499,9 +499,12 @@
 
 	len = strlen(name);
 
-	if (len > F2FS_NAME_LEN || size > MAX_VALUE_LEN(inode))
+	if (len > F2FS_NAME_LEN)
 		return -ERANGE;
 
+	if (size > MAX_VALUE_LEN(inode))
+		return -E2BIG;
+
 	base_addr = read_all_xattrs(inode, ipage);
 	if (!base_addr)
 		goto exit;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5fa588e..ae0f438 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -88,7 +88,7 @@
 
 static inline struct inode *wb_inode(struct list_head *head)
 {
-	return list_entry(head, struct inode, i_wb_list);
+	return list_entry(head, struct inode, i_io_list);
 }
 
 /*
@@ -125,22 +125,22 @@
 }
 
 /**
- * inode_wb_list_move_locked - move an inode onto a bdi_writeback IO list
+ * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
  * @inode: inode to be moved
  * @wb: target bdi_writeback
  * @head: one of @wb->b_{dirty|io|more_io}
  *
- * Move @inode->i_wb_list to @list of @wb and set %WB_has_dirty_io.
+ * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
  * Returns %true if @inode is the first occupant of the !dirty_time IO
  * lists; otherwise, %false.
  */
-static bool inode_wb_list_move_locked(struct inode *inode,
+static bool inode_io_list_move_locked(struct inode *inode,
 				      struct bdi_writeback *wb,
 				      struct list_head *head)
 {
 	assert_spin_locked(&wb->list_lock);
 
-	list_move(&inode->i_wb_list, head);
+	list_move(&inode->i_io_list, head);
 
 	/* dirty_time doesn't count as dirty_io until expiration */
 	if (head != &wb->b_dirty_time)
@@ -151,19 +151,19 @@
 }
 
 /**
- * inode_wb_list_del_locked - remove an inode from its bdi_writeback IO list
+ * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
  * @inode: inode to be removed
  * @wb: bdi_writeback @inode is being removed from
  *
  * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
  * clear %WB_has_dirty_io if all are empty afterwards.
  */
-static void inode_wb_list_del_locked(struct inode *inode,
+static void inode_io_list_del_locked(struct inode *inode,
 				     struct bdi_writeback *wb)
 {
 	assert_spin_locked(&wb->list_lock);
 
-	list_del_init(&inode->i_wb_list);
+	list_del_init(&inode->i_io_list);
 	wb_io_lists_depopulated(wb);
 }
 
@@ -351,7 +351,7 @@
 
 	/*
 	 * Once I_FREEING is visible under i_lock, the eviction path owns
-	 * the inode and we shouldn't modify ->i_wb_list.
+	 * the inode and we shouldn't modify ->i_io_list.
 	 */
 	if (unlikely(inode->i_state & I_FREEING))
 		goto skip_switch;
@@ -390,16 +390,16 @@
 	 * is always correct including from ->b_dirty_time.  The transfer
 	 * preserves @inode->dirtied_when ordering.
 	 */
-	if (!list_empty(&inode->i_wb_list)) {
+	if (!list_empty(&inode->i_io_list)) {
 		struct inode *pos;
 
-		inode_wb_list_del_locked(inode, old_wb);
+		inode_io_list_del_locked(inode, old_wb);
 		inode->i_wb = new_wb;
-		list_for_each_entry(pos, &new_wb->b_dirty, i_wb_list)
+		list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
 			if (time_after_eq(inode->dirtied_when,
 					  pos->dirtied_when))
 				break;
-		inode_wb_list_move_locked(inode, new_wb, pos->i_wb_list.prev);
+		inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
 	} else {
 		inode->i_wb = new_wb;
 	}
@@ -961,12 +961,12 @@
 /*
  * Remove the inode from the writeback list it is on.
  */
-void inode_wb_list_del(struct inode *inode)
+void inode_io_list_del(struct inode *inode)
 {
 	struct bdi_writeback *wb;
 
 	wb = inode_to_wb_and_lock_list(inode);
-	inode_wb_list_del_locked(inode, wb);
+	inode_io_list_del_locked(inode, wb);
 	spin_unlock(&wb->list_lock);
 }
 
@@ -988,7 +988,7 @@
 		if (time_before(inode->dirtied_when, tail->dirtied_when))
 			inode->dirtied_when = jiffies;
 	}
-	inode_wb_list_move_locked(inode, wb, &wb->b_dirty);
+	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
 }
 
 /*
@@ -996,7 +996,7 @@
  */
 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
 {
-	inode_wb_list_move_locked(inode, wb, &wb->b_more_io);
+	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
 }
 
 static void inode_sync_complete(struct inode *inode)
@@ -1055,7 +1055,7 @@
 		if (older_than_this &&
 		    inode_dirtied_after(inode, *older_than_this))
 			break;
-		list_move(&inode->i_wb_list, &tmp);
+		list_move(&inode->i_io_list, &tmp);
 		moved++;
 		if (flags & EXPIRE_DIRTY_ATIME)
 			set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
@@ -1078,7 +1078,7 @@
 		list_for_each_prev_safe(pos, node, &tmp) {
 			inode = wb_inode(pos);
 			if (inode->i_sb == sb)
-				list_move(&inode->i_wb_list, dispatch_queue);
+				list_move(&inode->i_io_list, dispatch_queue);
 		}
 	}
 out:
@@ -1232,10 +1232,10 @@
 		redirty_tail(inode, wb);
 	} else if (inode->i_state & I_DIRTY_TIME) {
 		inode->dirtied_when = jiffies;
-		inode_wb_list_move_locked(inode, wb, &wb->b_dirty_time);
+		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
 	} else {
 		/* The inode is clean. Remove from writeback lists. */
-		inode_wb_list_del_locked(inode, wb);
+		inode_io_list_del_locked(inode, wb);
 	}
 }
 
@@ -1378,7 +1378,7 @@
 	 * touch it. See comment above for explanation.
 	 */
 	if (!(inode->i_state & I_DIRTY_ALL))
-		inode_wb_list_del_locked(inode, wb);
+		inode_io_list_del_locked(inode, wb);
 	spin_unlock(&wb->list_lock);
 	inode_sync_complete(inode);
 out:
@@ -1439,7 +1439,9 @@
 	unsigned long start_time = jiffies;
 	long write_chunk;
 	long wrote = 0;  /* count both pages and inodes */
+	struct blk_plug plug;
 
+	blk_start_plug(&plug);
 	while (!list_empty(&wb->b_io)) {
 		struct inode *inode = wb_inode(wb->b_io.prev);
 
@@ -1537,6 +1539,7 @@
 				break;
 		}
 	}
+	blk_finish_plug(&plug);
 	return wrote;
 }
 
@@ -2088,7 +2091,7 @@
 			else
 				dirty_list = &wb->b_dirty_time;
 
-			wakeup_bdi = inode_wb_list_move_locked(inode, wb,
+			wakeup_bdi = inode_io_list_move_locked(inode, wb,
 							       dirty_list);
 
 			spin_unlock(&wb->list_lock);
@@ -2111,6 +2114,15 @@
 }
 EXPORT_SYMBOL(__mark_inode_dirty);
 
+/*
+ * The @s_sync_lock is used to serialise concurrent sync operations
+ * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
+ * Concurrent callers will block on the s_sync_lock rather than doing contending
+ * walks. The queueing maintains sync(2) required behaviour as all the IO that
+ * has been issued up to the time this function is enter is guaranteed to be
+ * completed by the time we have gained the lock and waited for all IO that is
+ * in progress regardless of the order callers are granted the lock.
+ */
 static void wait_sb_inodes(struct super_block *sb)
 {
 	struct inode *inode, *old_inode = NULL;
@@ -2121,7 +2133,8 @@
 	 */
 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
-	spin_lock(&inode_sb_list_lock);
+	mutex_lock(&sb->s_sync_lock);
+	spin_lock(&sb->s_inode_list_lock);
 
 	/*
 	 * Data integrity sync. Must wait for all pages under writeback,
@@ -2141,14 +2154,14 @@
 		}
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
-		spin_unlock(&inode_sb_list_lock);
+		spin_unlock(&sb->s_inode_list_lock);
 
 		/*
 		 * We hold a reference to 'inode' so it couldn't have been
 		 * removed from s_inodes list while we dropped the
-		 * inode_sb_list_lock.  We cannot iput the inode now as we can
+		 * s_inode_list_lock.  We cannot iput the inode now as we can
 		 * be holding the last reference and we cannot iput it under
-		 * inode_sb_list_lock. So we keep the reference and iput it
+		 * s_inode_list_lock. So we keep the reference and iput it
 		 * later.
 		 */
 		iput(old_inode);
@@ -2158,10 +2171,11 @@
 
 		cond_resched();
 
-		spin_lock(&inode_sb_list_lock);
+		spin_lock(&sb->s_inode_list_lock);
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&sb->s_inode_list_lock);
 	iput(old_inode);
+	mutex_unlock(&sb->s_sync_lock);
 }
 
 static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2982445..894fb01 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1334,11 +1334,11 @@
 	if (is_ancestor(root, sdp->sd_master_dir))
 		seq_puts(s, ",meta");
 	if (args->ar_lockproto[0])
-		seq_printf(s, ",lockproto=%s", args->ar_lockproto);
+		seq_show_option(s, "lockproto", args->ar_lockproto);
 	if (args->ar_locktable[0])
-		seq_printf(s, ",locktable=%s", args->ar_locktable);
+		seq_show_option(s, "locktable", args->ar_locktable);
 	if (args->ar_hostdata[0])
-		seq_printf(s, ",hostdata=%s", args->ar_hostdata);
+		seq_show_option(s, "hostdata", args->ar_hostdata);
 	if (args->ar_spectator)
 		seq_puts(s, ",spectator");
 	if (args->ar_localflocks)
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 55c03b9..4574fdd 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -136,9 +136,9 @@
 	struct hfs_sb_info *sbi = HFS_SB(root->d_sb);
 
 	if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
-		seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
+		seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4);
 	if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
-		seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type);
+		seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4);
 	seq_printf(seq, ",uid=%u,gid=%u",
 			from_kuid_munged(&init_user_ns, sbi->s_uid),
 			from_kgid_munged(&init_user_ns, sbi->s_gid));
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index c90b72e..bb806e5 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -218,9 +218,9 @@
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb);
 
 	if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
-		seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
+		seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4);
 	if (sbi->type != HFSPLUS_DEF_CR_TYPE)
-		seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
+		seq_show_option_n(seq, "type", (char *)&sbi->type, 4);
 	seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
 			from_kuid_munged(&init_user_ns, sbi->uid),
 			from_kgid_munged(&init_user_ns, sbi->gid));
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 059597b..2ac99db 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -260,7 +260,7 @@
 	size_t offset = strlen(root_ino) + 1;
 
 	if (strlen(root_path) > offset)
-		seq_printf(seq, ",%s", root_path + offset);
+		seq_show_option(seq, root_path + offset, NULL);
 
 	if (append)
 		seq_puts(seq, ",append");
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 8057fe4..f626114 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -10,6 +10,30 @@
 #include <linux/blkdev.h>
 #include "hpfs_fn.h"
 
+secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
+{
+	unsigned i;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
+		if (sbi->hotfix_from[i] == sec) {
+			return sbi->hotfix_to[i];
+		}
+	}
+	return sec;
+}
+
+unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
+{
+	unsigned i;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
+		if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
+			n = sbi->hotfix_from[i] - sec;
+		}
+	}
+	return n;
+}
+
 void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
 {
 	struct buffer_head *bh;
@@ -18,6 +42,9 @@
 	if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
 		return;
 
+	if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
+		return;
+
 	bh = sb_find_get_block(s, secno);
 	if (bh) {
 		if (buffer_uptodate(bh)) {
@@ -51,7 +78,7 @@
 
 	cond_resched();
 
-	*bhp = bh = sb_bread(s, secno);
+	*bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
 	if (bh != NULL)
 		return bh->b_data;
 	else {
@@ -71,7 +98,7 @@
 
 	cond_resched();
 
-	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
+	if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
 		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
 		set_buffer_uptodate(bh);
 		return bh->b_data;
@@ -99,10 +126,10 @@
 
 	hpfs_prefetch_sectors(s, secno, 4 + ahead);
 
-	if (!(qbh->bh[0] = sb_bread(s, secno + 0))) goto bail0;
-	if (!(qbh->bh[1] = sb_bread(s, secno + 1))) goto bail1;
-	if (!(qbh->bh[2] = sb_bread(s, secno + 2))) goto bail2;
-	if (!(qbh->bh[3] = sb_bread(s, secno + 3))) goto bail3;
+	if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
+	if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
+	if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
+	if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
 
 	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
 	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 7ca28d6..d3bcdd9 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -83,6 +83,11 @@
 	if (s) {
 		if (bh_result->b_size >> 9 < n_secs)
 			n_secs = bh_result->b_size >> 9;
+		n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
+		if (unlikely(!n_secs)) {
+			s = hpfs_search_hotfix_map(inode->i_sb, s);
+			n_secs = 1;
+		}
 		map_bh(bh_result, inode->i_sb, s);
 		bh_result->b_size = n_secs << 9;
 		goto ret_0;
@@ -101,7 +106,7 @@
 	inode->i_blocks++;
 	hpfs_i(inode)->mmu_private += 512;
 	set_buffer_new(bh_result);
-	map_bh(bh_result, inode->i_sb, s);
+	map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
 	ret_0:
 	r = 0;
 	ret_r:
@@ -181,7 +186,7 @@
 
 static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 {
-	return generic_block_bmap(mapping,block,hpfs_get_block);
+	return generic_block_bmap(mapping, block, hpfs_get_block);
 }
 
 const struct address_space_operations hpfs_aops = {
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index c4867b5..975654a 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -88,6 +88,10 @@
 	unsigned sb_max_fwd_alloc;	/* max forwad allocation */
 	int sb_timeshift;
 	struct rcu_head rcu;
+
+	unsigned n_hotfixes;
+	secno hotfix_from[256];
+	secno hotfix_to[256];
 };
 
 /* Four 512-byte buffers and the 2k block obtained by concatenating them */
@@ -217,6 +221,8 @@
 
 /* buffer.c */
 
+secno hpfs_search_hotfix_map(struct super_block *s, secno sec);
+unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n);
 void hpfs_prefetch_sectors(struct super_block *, unsigned, int);
 void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int);
 void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **);
@@ -285,6 +291,7 @@
 void hpfs_prefetch_bitmap(struct super_block *, unsigned);
 unsigned char *hpfs_load_code_page(struct super_block *, secno);
 __le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
+void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock);
 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
 struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
 struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 442770e..a69bbc1 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -130,6 +130,32 @@
 	return b;
 }
 
+void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock)
+{
+	struct quad_buffer_head qbh;
+	u32 *directory;
+	u32 n_hotfixes, n_used_hotfixes;
+	unsigned i;
+
+	n_hotfixes = le32_to_cpu(spareblock->n_spares);
+	n_used_hotfixes = le32_to_cpu(spareblock->n_spares_used);
+
+	if (n_hotfixes > 256 || n_used_hotfixes > n_hotfixes) {
+		hpfs_error(s, "invalid number of hotfixes: %u, used: %u", n_hotfixes, n_used_hotfixes);
+		return;
+	}
+	if (!(directory = hpfs_map_4sectors(s, le32_to_cpu(spareblock->hotfix_map), &qbh, 0))) {
+		hpfs_error(s, "can't load hotfix map");
+		return;
+	}
+	for (i = 0; i < n_used_hotfixes; i++) {
+		hpfs_sb(s)->hotfix_from[i] = le32_to_cpu(directory[i]);
+		hpfs_sb(s)->hotfix_to[i] = le32_to_cpu(directory[n_hotfixes + i]);
+	}
+	hpfs_sb(s)->n_hotfixes = n_used_hotfixes;
+	hpfs_brelse4(&qbh);
+}
+
 /*
  * Load fnode to memory
  */
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index a0872f2..9e92c9c 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -8,6 +8,17 @@
 #include <linux/sched.h>
 #include "hpfs_fn.h"
 
+static void hpfs_update_directory_times(struct inode *dir)
+{
+	time_t t = get_seconds();
+	if (t == dir->i_mtime.tv_sec &&
+	    t == dir->i_ctime.tv_sec)
+		return;
+	dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
+	dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
+	hpfs_write_inode_nolock(dir);
+}
+
 static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	const unsigned char *name = dentry->d_name.name;
@@ -99,6 +110,7 @@
 		result->i_mode = mode | S_IFDIR;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -187,6 +199,7 @@
 		result->i_mode = mode | S_IFREG;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -262,6 +275,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	brelse(bh);
 	hpfs_unlock(dir->i_sb);
@@ -340,6 +354,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -423,6 +438,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -477,6 +494,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -595,7 +614,7 @@
 		goto end1;
 	}
 
-	end:
+end:
 	hpfs_i(i)->i_parent_dir = new_dir->i_ino;
 	if (S_ISDIR(i->i_mode)) {
 		inc_nlink(new_dir);
@@ -610,6 +629,10 @@
 		brelse(bh);
 	}
 end1:
+	if (!err) {
+		hpfs_update_directory_times(old_dir);
+		hpfs_update_directory_times(new_dir);
+	}
 	hpfs_unlock(i->i_sb);
 	return err;
 }
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 68a9bed..a561591 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -628,6 +628,9 @@
 		goto bail4;
 	}
 
+	if (spareblock->n_spares_used)
+		hpfs_load_hotfix_map(s, spareblock);
+
 	/* Load bitmap directory */
 	if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
 		goto bail4;
@@ -647,18 +650,6 @@
 		mark_buffer_dirty(bh2);
 	}
 
-	if (spareblock->hotfixes_used || spareblock->n_spares_used) {
-		if (errs >= 2) {
-			pr_err("Hotfixes not supported here, try chkdsk\n");
-			mark_dirty(s, 0);
-			goto bail4;
-		}
-		hpfs_error(s, "hotfixes not supported here, try chkdsk");
-		if (errs == 0)
-			pr_err("Proceeding, but your filesystem will be probably corrupted by this driver...\n");
-		else
-			pr_err("This driver may read bad files or crash when operating on disk with hotfixes.\n");
-	}
 	if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
 		if (errs >= 2) {
 			pr_err("Spare dnodes used, try chkdsk\n");
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 973c24c..316adb9 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -12,6 +12,7 @@
 #include <linux/thread_info.h>
 #include <asm/current.h>
 #include <linux/sched.h>		/* remove ASAP */
+#include <linux/falloc.h>
 #include <linux/fs.h>
 #include <linux/mount.h>
 #include <linux/file.h>
@@ -84,6 +85,29 @@
 	{Opt_err,	NULL},
 };
 
+#ifdef CONFIG_NUMA
+static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
+					struct inode *inode, pgoff_t index)
+{
+	vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
+							index);
+}
+
+static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
+{
+	mpol_cond_put(vma->vm_policy);
+}
+#else
+static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
+					struct inode *inode, pgoff_t index)
+{
+}
+
+static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
+{
+}
+#endif
+
 static void huge_pagevec_release(struct pagevec *pvec)
 {
 	int i;
@@ -293,26 +317,61 @@
 	return -EINVAL;
 }
 
-static void truncate_huge_page(struct page *page)
+static void remove_huge_page(struct page *page)
 {
 	ClearPageDirty(page);
 	ClearPageUptodate(page);
 	delete_from_page_cache(page);
 }
 
-static void truncate_hugepages(struct inode *inode, loff_t lstart)
+
+/*
+ * remove_inode_hugepages handles two distinct cases: truncation and hole
+ * punch.  There are subtle differences in operation for each case.
+
+ * truncation is indicated by end of range being LLONG_MAX
+ *	In this case, we first scan the range and release found pages.
+ *	After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
+ *	maps and global counts.
+ * hole punch is indicated if end is not LLONG_MAX
+ *	In the hole punch case we scan the range and release found pages.
+ *	Only when releasing a page is the associated region/reserv map
+ *	deleted.  The region/reserv map for ranges without associated
+ *	pages are not modified.
+ * Note: If the passed end of range value is beyond the end of file, but
+ * not LLONG_MAX this routine still performs a hole punch operation.
+ */
+static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
+				   loff_t lend)
 {
 	struct hstate *h = hstate_inode(inode);
 	struct address_space *mapping = &inode->i_data;
 	const pgoff_t start = lstart >> huge_page_shift(h);
+	const pgoff_t end = lend >> huge_page_shift(h);
+	struct vm_area_struct pseudo_vma;
 	struct pagevec pvec;
 	pgoff_t next;
 	int i, freed = 0;
+	long lookup_nr = PAGEVEC_SIZE;
+	bool truncate_op = (lend == LLONG_MAX);
 
+	memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+	pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
 	pagevec_init(&pvec, 0);
 	next = start;
-	while (1) {
-		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+	while (next < end) {
+		/*
+		 * Make sure to never grab more pages that we
+		 * might possibly need.
+		 */
+		if (end - next < lookup_nr)
+			lookup_nr = end - next;
+
+		/*
+		 * This pagevec_lookup() may return pages past 'end',
+		 * so we must check for page->index > end.
+		 */
+		if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) {
 			if (next == start)
 				break;
 			next = start;
@@ -321,26 +380,69 @@
 
 		for (i = 0; i < pagevec_count(&pvec); ++i) {
 			struct page *page = pvec.pages[i];
+			u32 hash;
+
+			hash = hugetlb_fault_mutex_hash(h, current->mm,
+							&pseudo_vma,
+							mapping, next, 0);
+			mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
 			lock_page(page);
+			if (page->index >= end) {
+				unlock_page(page);
+				mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+				next = end;	/* we are done */
+				break;
+			}
+
+			/*
+			 * If page is mapped, it was faulted in after being
+			 * unmapped.  Do nothing in this race case.  In the
+			 * normal case page is not mapped.
+			 */
+			if (!page_mapped(page)) {
+				bool rsv_on_error = !PagePrivate(page);
+				/*
+				 * We must free the huge page and remove
+				 * from page cache (remove_huge_page) BEFORE
+				 * removing the region/reserve map
+				 * (hugetlb_unreserve_pages).  In rare out
+				 * of memory conditions, removal of the
+				 * region/reserve map could fail.  Before
+				 * free'ing the page, note PagePrivate which
+				 * is used in case of error.
+				 */
+				remove_huge_page(page);
+				freed++;
+				if (!truncate_op) {
+					if (unlikely(hugetlb_unreserve_pages(
+							inode, next,
+							next + 1, 1)))
+						hugetlb_fix_reserve_counts(
+							inode, rsv_on_error);
+				}
+			}
+
 			if (page->index > next)
 				next = page->index;
+
 			++next;
-			truncate_huge_page(page);
 			unlock_page(page);
-			freed++;
+
+			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 		}
 		huge_pagevec_release(&pvec);
 	}
-	BUG_ON(!lstart && mapping->nrpages);
-	hugetlb_unreserve_pages(inode, start, freed);
+
+	if (truncate_op)
+		(void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
 }
 
 static void hugetlbfs_evict_inode(struct inode *inode)
 {
 	struct resv_map *resv_map;
 
-	truncate_hugepages(inode, 0);
+	remove_inode_hugepages(inode, 0, LLONG_MAX);
 	resv_map = (struct resv_map *)inode->i_mapping->private_data;
 	/* root inode doesn't have the resv_map, so we should check it */
 	if (resv_map)
@@ -349,11 +451,15 @@
 }
 
 static inline void
-hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
+hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
 {
 	struct vm_area_struct *vma;
 
-	vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
+	/*
+	 * end == 0 indicates that the entire range after
+	 * start should be unmapped.
+	 */
+	vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
 		unsigned long v_offset;
 
 		/*
@@ -362,13 +468,20 @@
 		 * which overlap the truncated area starting at pgoff,
 		 * and no vma on a 32-bit arch can span beyond the 4GB.
 		 */
-		if (vma->vm_pgoff < pgoff)
-			v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
+		if (vma->vm_pgoff < start)
+			v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
 		else
 			v_offset = 0;
 
-		unmap_hugepage_range(vma, vma->vm_start + v_offset,
-				     vma->vm_end, NULL);
+		if (end) {
+			end = ((end - start) << PAGE_SHIFT) +
+			       vma->vm_start + v_offset;
+			if (end > vma->vm_end)
+				end = vma->vm_end;
+		} else
+			end = vma->vm_end;
+
+		unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
 	}
 }
 
@@ -384,12 +497,164 @@
 	i_size_write(inode, offset);
 	i_mmap_lock_write(mapping);
 	if (!RB_EMPTY_ROOT(&mapping->i_mmap))
-		hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
+		hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
 	i_mmap_unlock_write(mapping);
-	truncate_hugepages(inode, offset);
+	remove_inode_hugepages(inode, offset, LLONG_MAX);
 	return 0;
 }
 
+static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+{
+	struct hstate *h = hstate_inode(inode);
+	loff_t hpage_size = huge_page_size(h);
+	loff_t hole_start, hole_end;
+
+	/*
+	 * For hole punch round up the beginning offset of the hole and
+	 * round down the end.
+	 */
+	hole_start = round_up(offset, hpage_size);
+	hole_end = round_down(offset + len, hpage_size);
+
+	if (hole_end > hole_start) {
+		struct address_space *mapping = inode->i_mapping;
+
+		mutex_lock(&inode->i_mutex);
+		i_mmap_lock_write(mapping);
+		if (!RB_EMPTY_ROOT(&mapping->i_mmap))
+			hugetlb_vmdelete_list(&mapping->i_mmap,
+						hole_start >> PAGE_SHIFT,
+						hole_end  >> PAGE_SHIFT);
+		i_mmap_unlock_write(mapping);
+		remove_inode_hugepages(inode, hole_start, hole_end);
+		mutex_unlock(&inode->i_mutex);
+	}
+
+	return 0;
+}
+
+static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
+				loff_t len)
+{
+	struct inode *inode = file_inode(file);
+	struct address_space *mapping = inode->i_mapping;
+	struct hstate *h = hstate_inode(inode);
+	struct vm_area_struct pseudo_vma;
+	struct mm_struct *mm = current->mm;
+	loff_t hpage_size = huge_page_size(h);
+	unsigned long hpage_shift = huge_page_shift(h);
+	pgoff_t start, index, end;
+	int error;
+	u32 hash;
+
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
+
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		return hugetlbfs_punch_hole(inode, offset, len);
+
+	/*
+	 * Default preallocate case.
+	 * For this range, start is rounded down and end is rounded up
+	 * as well as being converted to page offsets.
+	 */
+	start = offset >> hpage_shift;
+	end = (offset + len + hpage_size - 1) >> hpage_shift;
+
+	mutex_lock(&inode->i_mutex);
+
+	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+	error = inode_newsize_ok(inode, offset + len);
+	if (error)
+		goto out;
+
+	/*
+	 * Initialize a pseudo vma as this is required by the huge page
+	 * allocation routines.  If NUMA is configured, use page index
+	 * as input to create an allocation policy.
+	 */
+	memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+	pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
+	pseudo_vma.vm_file = file;
+
+	for (index = start; index < end; index++) {
+		/*
+		 * This is supposed to be the vaddr where the page is being
+		 * faulted in, but we have no vaddr here.
+		 */
+		struct page *page;
+		unsigned long addr;
+		int avoid_reserve = 0;
+
+		cond_resched();
+
+		/*
+		 * fallocate(2) manpage permits EINTR; we may have been
+		 * interrupted because we are using up too much memory.
+		 */
+		if (signal_pending(current)) {
+			error = -EINTR;
+			break;
+		}
+
+		/* Set numa allocation policy based on index */
+		hugetlb_set_vma_policy(&pseudo_vma, inode, index);
+
+		/* addr is the offset within the file (zero based) */
+		addr = index * hpage_size;
+
+		/* mutex taken here, fault path and hole punch */
+		hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
+						index, addr);
+		mutex_lock(&hugetlb_fault_mutex_table[hash]);
+
+		/* See if already present in mapping to avoid alloc/free */
+		page = find_get_page(mapping, index);
+		if (page) {
+			put_page(page);
+			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+			hugetlb_drop_vma_policy(&pseudo_vma);
+			continue;
+		}
+
+		/* Allocate page and add to page cache */
+		page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
+		hugetlb_drop_vma_policy(&pseudo_vma);
+		if (IS_ERR(page)) {
+			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+			error = PTR_ERR(page);
+			goto out;
+		}
+		clear_huge_page(page, addr, pages_per_huge_page(h));
+		__SetPageUptodate(page);
+		error = huge_add_to_page_cache(page, mapping, index);
+		if (unlikely(error)) {
+			put_page(page);
+			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+			goto out;
+		}
+
+		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+
+		/*
+		 * page_put due to reference from alloc_huge_page()
+		 * unlock_page because locked by add_to_page_cache()
+		 */
+		put_page(page);
+		unlock_page(page);
+	}
+
+	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
+		i_size_write(inode, offset + len);
+	inode->i_ctime = CURRENT_TIME;
+	spin_lock(&inode->i_lock);
+	inode->i_private = NULL;
+	spin_unlock(&inode->i_lock);
+out:
+	mutex_unlock(&inode->i_mutex);
+	return error;
+}
+
 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = d_inode(dentry);
@@ -701,7 +966,8 @@
 	.mmap			= hugetlbfs_file_mmap,
 	.fsync			= noop_fsync,
 	.get_unmapped_area	= hugetlb_get_unmapped_area,
-	.llseek		= default_llseek,
+	.llseek			= default_llseek,
+	.fallocate		= hugetlbfs_fallocate,
 };
 
 static const struct inode_operations hugetlbfs_dir_inode_operations = {
diff --git a/fs/inode.c b/fs/inode.c
index d30640f..78a17b8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -28,16 +28,16 @@
  *   inode->i_state, inode->i_hash, __iget()
  * Inode LRU list locks protect:
  *   inode->i_sb->s_inode_lru, inode->i_lru
- * inode_sb_list_lock protects:
- *   sb->s_inodes, inode->i_sb_list
+ * inode->i_sb->s_inode_list_lock protects:
+ *   inode->i_sb->s_inodes, inode->i_sb_list
  * bdi->wb.list_lock protects:
- *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list
+ *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
  * inode_hash_lock protects:
  *   inode_hashtable, inode->i_hash
  *
  * Lock ordering:
  *
- * inode_sb_list_lock
+ * inode->i_sb->s_inode_list_lock
  *   inode->i_lock
  *     Inode LRU list locks
  *
@@ -45,7 +45,7 @@
  *   inode->i_lock
  *
  * inode_hash_lock
- *   inode_sb_list_lock
+ *   inode->i_sb->s_inode_list_lock
  *   inode->i_lock
  *
  * iunique_lock
@@ -57,8 +57,6 @@
 static struct hlist_head *inode_hashtable __read_mostly;
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
 
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
-
 /*
  * Empty aops. Can be used for the cases where the user does not
  * define any of the address_space operations.
@@ -359,7 +357,7 @@
 	memset(inode, 0, sizeof(*inode));
 	INIT_HLIST_NODE(&inode->i_hash);
 	INIT_LIST_HEAD(&inode->i_devices);
-	INIT_LIST_HEAD(&inode->i_wb_list);
+	INIT_LIST_HEAD(&inode->i_io_list);
 	INIT_LIST_HEAD(&inode->i_lru);
 	address_space_init_once(&inode->i_data);
 	i_size_ordered_init(inode);
@@ -426,18 +424,18 @@
  */
 void inode_sb_list_add(struct inode *inode)
 {
-	spin_lock(&inode_sb_list_lock);
+	spin_lock(&inode->i_sb->s_inode_list_lock);
 	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&inode->i_sb->s_inode_list_lock);
 }
 EXPORT_SYMBOL_GPL(inode_sb_list_add);
 
 static inline void inode_sb_list_del(struct inode *inode)
 {
 	if (!list_empty(&inode->i_sb_list)) {
-		spin_lock(&inode_sb_list_lock);
+		spin_lock(&inode->i_sb->s_inode_list_lock);
 		list_del_init(&inode->i_sb_list);
-		spin_unlock(&inode_sb_list_lock);
+		spin_unlock(&inode->i_sb->s_inode_list_lock);
 	}
 }
 
@@ -527,8 +525,8 @@
 	BUG_ON(!(inode->i_state & I_FREEING));
 	BUG_ON(!list_empty(&inode->i_lru));
 
-	if (!list_empty(&inode->i_wb_list))
-		inode_wb_list_del(inode);
+	if (!list_empty(&inode->i_io_list))
+		inode_io_list_del(inode);
 
 	inode_sb_list_del(inode);
 
@@ -577,6 +575,7 @@
 		list_del_init(&inode->i_lru);
 
 		evict(inode);
+		cond_resched();
 	}
 }
 
@@ -594,7 +593,8 @@
 	struct inode *inode, *next;
 	LIST_HEAD(dispose);
 
-	spin_lock(&inode_sb_list_lock);
+again:
+	spin_lock(&sb->s_inode_list_lock);
 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 		if (atomic_read(&inode->i_count))
 			continue;
@@ -609,8 +609,20 @@
 		inode_lru_list_del(inode);
 		spin_unlock(&inode->i_lock);
 		list_add(&inode->i_lru, &dispose);
+
+		/*
+		 * We can have a ton of inodes to evict at unmount time given
+		 * enough memory, check to see if we need to go to sleep for a
+		 * bit so we don't livelock.
+		 */
+		if (need_resched()) {
+			spin_unlock(&sb->s_inode_list_lock);
+			cond_resched();
+			dispose_list(&dispose);
+			goto again;
+		}
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&sb->s_inode_list_lock);
 
 	dispose_list(&dispose);
 }
@@ -631,7 +643,7 @@
 	struct inode *inode, *next;
 	LIST_HEAD(dispose);
 
-	spin_lock(&inode_sb_list_lock);
+	spin_lock(&sb->s_inode_list_lock);
 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 		spin_lock(&inode->i_lock);
 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
@@ -654,7 +666,7 @@
 		spin_unlock(&inode->i_lock);
 		list_add(&inode->i_lru, &dispose);
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&sb->s_inode_list_lock);
 
 	dispose_list(&dispose);
 
@@ -890,7 +902,7 @@
 {
 	struct inode *inode;
 
-	spin_lock_prefetch(&inode_sb_list_lock);
+	spin_lock_prefetch(&sb->s_inode_list_lock);
 
 	inode = new_inode_pseudo(sb);
 	if (inode)
diff --git a/fs/internal.h b/fs/internal.h
index 4d5af58..71859c4d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -112,14 +112,13 @@
 /*
  * inode.c
  */
-extern spinlock_t inode_sb_list_lock;
 extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
 extern void inode_add_lru(struct inode *inode);
 
 /*
  * fs-writeback.c
  */
-extern void inode_wb_list_del(struct inode *inode);
+extern void inode_io_list_del(struct inode *inode);
 
 extern long get_nr_dirty_inodes(void);
 extern void evict_inodes(struct super_block *);
diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig
deleted file mode 100644
index 4e28bee..0000000
--- a/fs/jbd/Kconfig
+++ /dev/null
@@ -1,30 +0,0 @@
-config JBD
-	tristate
-	help
-	  This is a generic journalling layer for block devices.  It is
-	  currently used by the ext3 file system, but it could also be
-	  used to add journal support to other file systems or block
-	  devices such as RAID or LVM.
-
-	  If you are using the ext3 file system, you need to say Y here.
-	  If you are not using ext3 then you will probably want to say N.
-
-	  To compile this device as a module, choose M here: the module will be
-	  called jbd.  If you are compiling ext3 into the kernel, you
-	  cannot compile this code as a module.
-
-config JBD_DEBUG
-	bool "JBD (ext3) debugging support"
-	depends on JBD && DEBUG_FS
-	help
-	  If you are using the ext3 journaled file system (or potentially any
-	  other file system/device using JBD), this option allows you to
-	  enable debugging output while the system is running, in order to
-	  help track down any problems you are having.  By default the
-	  debugging output will be turned off.
-
-	  If you select Y here, then you will be able to turn on debugging
-	  with "echo N > /sys/kernel/debug/jbd/jbd-debug", where N is a
-	  number between 1 and 5, the higher the number, the more debugging
-	  output is generated.  To turn debugging off again, do
-	  "echo 0 > /sys/kernel/debug/jbd/jbd-debug".
diff --git a/fs/jbd/Makefile b/fs/jbd/Makefile
deleted file mode 100644
index 54aca48..0000000
--- a/fs/jbd/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux journaling routines.
-#
-
-obj-$(CONFIG_JBD) += jbd.o
-
-jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
deleted file mode 100644
index 08c0304..0000000
--- a/fs/jbd/checkpoint.c
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * linux/fs/jbd/checkpoint.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1999 Red Hat Software --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Checkpoint routines for the generic filesystem journaling code.
- * Part of the ext2fs journaling system.
- *
- * Checkpointing is the process of ensuring that a section of the log is
- * committed fully to disk, so that that portion of the log can be
- * reused.
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <trace/events/jbd.h>
-
-/*
- * Unlink a buffer from a transaction checkpoint list.
- *
- * Called with j_list_lock held.
- */
-static inline void __buffer_unlink_first(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	jh->b_cpnext->b_cpprev = jh->b_cpprev;
-	jh->b_cpprev->b_cpnext = jh->b_cpnext;
-	if (transaction->t_checkpoint_list == jh) {
-		transaction->t_checkpoint_list = jh->b_cpnext;
-		if (transaction->t_checkpoint_list == jh)
-			transaction->t_checkpoint_list = NULL;
-	}
-}
-
-/*
- * Unlink a buffer from a transaction checkpoint(io) list.
- *
- * Called with j_list_lock held.
- */
-static inline void __buffer_unlink(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	__buffer_unlink_first(jh);
-	if (transaction->t_checkpoint_io_list == jh) {
-		transaction->t_checkpoint_io_list = jh->b_cpnext;
-		if (transaction->t_checkpoint_io_list == jh)
-			transaction->t_checkpoint_io_list = NULL;
-	}
-}
-
-/*
- * Move a buffer from the checkpoint list to the checkpoint io list
- *
- * Called with j_list_lock held
- */
-static inline void __buffer_relink_io(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	__buffer_unlink_first(jh);
-
-	if (!transaction->t_checkpoint_io_list) {
-		jh->b_cpnext = jh->b_cpprev = jh;
-	} else {
-		jh->b_cpnext = transaction->t_checkpoint_io_list;
-		jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
-		jh->b_cpprev->b_cpnext = jh;
-		jh->b_cpnext->b_cpprev = jh;
-	}
-	transaction->t_checkpoint_io_list = jh;
-}
-
-/*
- * Try to release a checkpointed buffer from its transaction.
- * Returns 1 if we released it and 2 if we also released the
- * whole transaction.
- *
- * Requires j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
- */
-static int __try_to_free_cp_buf(struct journal_head *jh)
-{
-	int ret = 0;
-	struct buffer_head *bh = jh2bh(jh);
-
-	if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
-	    !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
-		/*
-		 * Get our reference so that bh cannot be freed before
-		 * we unlock it
-		 */
-		get_bh(bh);
-		JBUFFER_TRACE(jh, "remove from checkpoint list");
-		ret = __journal_remove_checkpoint(jh) + 1;
-		jbd_unlock_bh_state(bh);
-		BUFFER_TRACE(bh, "release");
-		__brelse(bh);
-	} else {
-		jbd_unlock_bh_state(bh);
-	}
-	return ret;
-}
-
-/*
- * __log_wait_for_space: wait until there is space in the journal.
- *
- * Called under j-state_lock *only*.  It will be unlocked if we have to wait
- * for a checkpoint to free up some space in the log.
- */
-void __log_wait_for_space(journal_t *journal)
-{
-	int nblocks, space_left;
-	assert_spin_locked(&journal->j_state_lock);
-
-	nblocks = jbd_space_needed(journal);
-	while (__log_space_left(journal) < nblocks) {
-		if (journal->j_flags & JFS_ABORT)
-			return;
-		spin_unlock(&journal->j_state_lock);
-		mutex_lock(&journal->j_checkpoint_mutex);
-
-		/*
-		 * Test again, another process may have checkpointed while we
-		 * were waiting for the checkpoint lock. If there are no
-		 * transactions ready to be checkpointed, try to recover
-		 * journal space by calling cleanup_journal_tail(), and if
-		 * that doesn't work, by waiting for the currently committing
-		 * transaction to complete.  If there is absolutely no way
-		 * to make progress, this is either a BUG or corrupted
-		 * filesystem, so abort the journal and leave a stack
-		 * trace for forensic evidence.
-		 */
-		spin_lock(&journal->j_state_lock);
-		spin_lock(&journal->j_list_lock);
-		nblocks = jbd_space_needed(journal);
-		space_left = __log_space_left(journal);
-		if (space_left < nblocks) {
-			int chkpt = journal->j_checkpoint_transactions != NULL;
-			tid_t tid = 0;
-
-			if (journal->j_committing_transaction)
-				tid = journal->j_committing_transaction->t_tid;
-			spin_unlock(&journal->j_list_lock);
-			spin_unlock(&journal->j_state_lock);
-			if (chkpt) {
-				log_do_checkpoint(journal);
-			} else if (cleanup_journal_tail(journal) == 0) {
-				/* We were able to recover space; yay! */
-				;
-			} else if (tid) {
-				log_wait_commit(journal, tid);
-			} else {
-				printk(KERN_ERR "%s: needed %d blocks and "
-				       "only had %d space available\n",
-				       __func__, nblocks, space_left);
-				printk(KERN_ERR "%s: no way to get more "
-				       "journal space\n", __func__);
-				WARN_ON(1);
-				journal_abort(journal, 0);
-			}
-			spin_lock(&journal->j_state_lock);
-		} else {
-			spin_unlock(&journal->j_list_lock);
-		}
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-}
-
-/*
- * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
- * The caller must restart a list walk.  Wait for someone else to run
- * jbd_unlock_bh_state().
- */
-static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
-	__releases(journal->j_list_lock)
-{
-	get_bh(bh);
-	spin_unlock(&journal->j_list_lock);
-	jbd_lock_bh_state(bh);
-	jbd_unlock_bh_state(bh);
-	put_bh(bh);
-}
-
-/*
- * Clean up transaction's list of buffers submitted for io.
- * We wait for any pending IO to complete and remove any clean
- * buffers. Note that we take the buffers in the opposite ordering
- * from the one in which they were submitted for IO.
- *
- * Return 0 on success, and return <0 if some buffers have failed
- * to be written out.
- *
- * Called with j_list_lock held.
- */
-static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
-{
-	struct journal_head *jh;
-	struct buffer_head *bh;
-	tid_t this_tid;
-	int released = 0;
-	int ret = 0;
-
-	this_tid = transaction->t_tid;
-restart:
-	/* Did somebody clean up the transaction in the meanwhile? */
-	if (journal->j_checkpoint_transactions != transaction ||
-			transaction->t_tid != this_tid)
-		return ret;
-	while (!released && transaction->t_checkpoint_io_list) {
-		jh = transaction->t_checkpoint_io_list;
-		bh = jh2bh(jh);
-		if (!jbd_trylock_bh_state(bh)) {
-			jbd_sync_bh(journal, bh);
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		get_bh(bh);
-		if (buffer_locked(bh)) {
-			spin_unlock(&journal->j_list_lock);
-			jbd_unlock_bh_state(bh);
-			wait_on_buffer(bh);
-			/* the journal_head may have gone by now */
-			BUFFER_TRACE(bh, "brelse");
-			__brelse(bh);
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-
-		/*
-		 * Now in whatever state the buffer currently is, we know that
-		 * it has been written out and so we can drop it from the list
-		 */
-		released = __journal_remove_checkpoint(jh);
-		jbd_unlock_bh_state(bh);
-		__brelse(bh);
-	}
-
-	return ret;
-}
-
-#define NR_BATCH	64
-
-static void
-__flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
-{
-	int i;
-	struct blk_plug plug;
-
-	blk_start_plug(&plug);
-	for (i = 0; i < *batch_count; i++)
-		write_dirty_buffer(bhs[i], WRITE_SYNC);
-	blk_finish_plug(&plug);
-
-	for (i = 0; i < *batch_count; i++) {
-		struct buffer_head *bh = bhs[i];
-		clear_buffer_jwrite(bh);
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-	}
-	*batch_count = 0;
-}
-
-/*
- * Try to flush one buffer from the checkpoint list to disk.
- *
- * Return 1 if something happened which requires us to abort the current
- * scan of the checkpoint list.  Return <0 if the buffer has failed to
- * be written out.
- *
- * Called with j_list_lock held and drops it if 1 is returned
- * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
- */
-static int __process_buffer(journal_t *journal, struct journal_head *jh,
-			struct buffer_head **bhs, int *batch_count)
-{
-	struct buffer_head *bh = jh2bh(jh);
-	int ret = 0;
-
-	if (buffer_locked(bh)) {
-		get_bh(bh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		wait_on_buffer(bh);
-		/* the journal_head may have gone by now */
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-		ret = 1;
-	} else if (jh->b_transaction != NULL) {
-		transaction_t *t = jh->b_transaction;
-		tid_t tid = t->t_tid;
-
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		log_start_commit(journal, tid);
-		log_wait_commit(journal, tid);
-		ret = 1;
-	} else if (!buffer_dirty(bh)) {
-		ret = 1;
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-		get_bh(bh);
-		J_ASSERT_JH(jh, !buffer_jbddirty(bh));
-		BUFFER_TRACE(bh, "remove from checkpoint");
-		__journal_remove_checkpoint(jh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		__brelse(bh);
-	} else {
-		/*
-		 * Important: we are about to write the buffer, and
-		 * possibly block, while still holding the journal lock.
-		 * We cannot afford to let the transaction logic start
-		 * messing around with this buffer before we write it to
-		 * disk, as that would break recoverability.
-		 */
-		BUFFER_TRACE(bh, "queue");
-		get_bh(bh);
-		J_ASSERT_BH(bh, !buffer_jwrite(bh));
-		set_buffer_jwrite(bh);
-		bhs[*batch_count] = bh;
-		__buffer_relink_io(jh);
-		jbd_unlock_bh_state(bh);
-		(*batch_count)++;
-		if (*batch_count == NR_BATCH) {
-			spin_unlock(&journal->j_list_lock);
-			__flush_batch(journal, bhs, batch_count);
-			ret = 1;
-		}
-	}
-	return ret;
-}
-
-/*
- * Perform an actual checkpoint. We take the first transaction on the
- * list of transactions to be checkpointed and send all its buffers
- * to disk. We submit larger chunks of data at once.
- *
- * The journal should be locked before calling this function.
- * Called with j_checkpoint_mutex held.
- */
-int log_do_checkpoint(journal_t *journal)
-{
-	transaction_t *transaction;
-	tid_t this_tid;
-	int result;
-
-	jbd_debug(1, "Start checkpoint\n");
-
-	/*
-	 * First thing: if there are any transactions in the log which
-	 * don't need checkpointing, just eliminate them from the
-	 * journal straight away.
-	 */
-	result = cleanup_journal_tail(journal);
-	trace_jbd_checkpoint(journal, result);
-	jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
-	if (result <= 0)
-		return result;
-
-	/*
-	 * OK, we need to start writing disk blocks.  Take one transaction
-	 * and write it.
-	 */
-	result = 0;
-	spin_lock(&journal->j_list_lock);
-	if (!journal->j_checkpoint_transactions)
-		goto out;
-	transaction = journal->j_checkpoint_transactions;
-	this_tid = transaction->t_tid;
-restart:
-	/*
-	 * If someone cleaned up this transaction while we slept, we're
-	 * done (maybe it's a new transaction, but it fell at the same
-	 * address).
-	 */
-	if (journal->j_checkpoint_transactions == transaction &&
-			transaction->t_tid == this_tid) {
-		int batch_count = 0;
-		struct buffer_head *bhs[NR_BATCH];
-		struct journal_head *jh;
-		int retry = 0, err;
-
-		while (!retry && transaction->t_checkpoint_list) {
-			struct buffer_head *bh;
-
-			jh = transaction->t_checkpoint_list;
-			bh = jh2bh(jh);
-			if (!jbd_trylock_bh_state(bh)) {
-				jbd_sync_bh(journal, bh);
-				retry = 1;
-				break;
-			}
-			retry = __process_buffer(journal, jh, bhs,&batch_count);
-			if (retry < 0 && !result)
-				result = retry;
-			if (!retry && (need_resched() ||
-				spin_needbreak(&journal->j_list_lock))) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-				break;
-			}
-		}
-
-		if (batch_count) {
-			if (!retry) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-			}
-			__flush_batch(journal, bhs, &batch_count);
-		}
-
-		if (retry) {
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		/*
-		 * Now we have cleaned up the first transaction's checkpoint
-		 * list. Let's clean up the second one
-		 */
-		err = __wait_cp_io(journal, transaction);
-		if (!result)
-			result = err;
-	}
-out:
-	spin_unlock(&journal->j_list_lock);
-	if (result < 0)
-		journal_abort(journal, result);
-	else
-		result = cleanup_journal_tail(journal);
-
-	return (result < 0) ? result : 0;
-}
-
-/*
- * Check the list of checkpoint transactions for the journal to see if
- * we have already got rid of any since the last update of the log tail
- * in the journal superblock.  If so, we can instantly roll the
- * superblock forward to remove those transactions from the log.
- *
- * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
- *
- * This is the only part of the journaling code which really needs to be
- * aware of transaction aborts.  Checkpointing involves writing to the
- * main filesystem area rather than to the journal, so it can proceed
- * even in abort state, but we must not update the super block if
- * checkpointing may have failed.  Otherwise, we would lose some metadata
- * buffers which should be written-back to the filesystem.
- */
-
-int cleanup_journal_tail(journal_t *journal)
-{
-	transaction_t * transaction;
-	tid_t		first_tid;
-	unsigned int	blocknr, freed;
-
-	if (is_journal_aborted(journal))
-		return 1;
-
-	/*
-	 * OK, work out the oldest transaction remaining in the log, and
-	 * the log block it starts at.
-	 *
-	 * If the log is now empty, we need to work out which is the
-	 * next transaction ID we will write, and where it will
-	 * start.
-	 */
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&journal->j_list_lock);
-	transaction = journal->j_checkpoint_transactions;
-	if (transaction) {
-		first_tid = transaction->t_tid;
-		blocknr = transaction->t_log_start;
-	} else if ((transaction = journal->j_committing_transaction) != NULL) {
-		first_tid = transaction->t_tid;
-		blocknr = transaction->t_log_start;
-	} else if ((transaction = journal->j_running_transaction) != NULL) {
-		first_tid = transaction->t_tid;
-		blocknr = journal->j_head;
-	} else {
-		first_tid = journal->j_transaction_sequence;
-		blocknr = journal->j_head;
-	}
-	spin_unlock(&journal->j_list_lock);
-	J_ASSERT(blocknr != 0);
-
-	/* If the oldest pinned transaction is at the tail of the log
-           already then there's not much we can do right now. */
-	if (journal->j_tail_sequence == first_tid) {
-		spin_unlock(&journal->j_state_lock);
-		return 1;
-	}
-	spin_unlock(&journal->j_state_lock);
-
-	/*
-	 * We need to make sure that any blocks that were recently written out
-	 * --- perhaps by log_do_checkpoint() --- are flushed out before we
-	 * drop the transactions from the journal. Similarly we need to be sure
-	 * superblock makes it to disk before next transaction starts reusing
-	 * freed space (otherwise we could replay some blocks of the new
-	 * transaction thinking they belong to the old one). So we use
-	 * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
-	 * with an appropriately sized journal, but we need this to guarantee
-	 * correctness.  Fortunately cleanup_journal_tail() doesn't get called
-	 * all that often.
-	 */
-	journal_update_sb_log_tail(journal, first_tid, blocknr,
-				   WRITE_FLUSH_FUA);
-
-	spin_lock(&journal->j_state_lock);
-	/* OK, update the superblock to recover the freed space.
-	 * Physical blocks come first: have we wrapped beyond the end of
-	 * the log?  */
-	freed = blocknr - journal->j_tail;
-	if (blocknr < journal->j_tail)
-		freed = freed + journal->j_last - journal->j_first;
-
-	trace_jbd_cleanup_journal_tail(journal, first_tid, blocknr, freed);
-	jbd_debug(1,
-		  "Cleaning journal tail from %d to %d (offset %u), "
-		  "freeing %u\n",
-		  journal->j_tail_sequence, first_tid, blocknr, freed);
-
-	journal->j_free += freed;
-	journal->j_tail_sequence = first_tid;
-	journal->j_tail = blocknr;
-	spin_unlock(&journal->j_state_lock);
-	return 0;
-}
-
-
-/* Checkpoint list management */
-
-/*
- * journal_clean_one_cp_list
- *
- * Find all the written-back checkpoint buffers in the given list and release
- * them.
- *
- * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
- */
-
-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
-{
-	struct journal_head *last_jh;
-	struct journal_head *next_jh = jh;
-	int ret, freed = 0;
-
-	*released = 0;
-	if (!jh)
-		return 0;
-
-	last_jh = jh->b_cpprev;
-	do {
-		jh = next_jh;
-		next_jh = jh->b_cpnext;
-		/* Use trylock because of the ranking */
-		if (jbd_trylock_bh_state(jh2bh(jh))) {
-			ret = __try_to_free_cp_buf(jh);
-			if (ret) {
-				freed++;
-				if (ret == 2) {
-					*released = 1;
-					return freed;
-				}
-			}
-		}
-		/*
-		 * This function only frees up some memory
-		 * if possible so we dont have an obligation
-		 * to finish processing. Bail out if preemption
-		 * requested:
-		 */
-		if (need_resched())
-			return freed;
-	} while (jh != last_jh);
-
-	return freed;
-}
-
-/*
- * journal_clean_checkpoint_list
- *
- * Find all the written-back checkpoint buffers in the journal and release them.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
- */
-
-int __journal_clean_checkpoint_list(journal_t *journal)
-{
-	transaction_t *transaction, *last_transaction, *next_transaction;
-	int ret = 0;
-	int released;
-
-	transaction = journal->j_checkpoint_transactions;
-	if (!transaction)
-		goto out;
-
-	last_transaction = transaction->t_cpprev;
-	next_transaction = transaction;
-	do {
-		transaction = next_transaction;
-		next_transaction = transaction->t_cpnext;
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_list, &released);
-		/*
-		 * This function only frees up some memory if possible so we
-		 * dont have an obligation to finish processing. Bail out if
-		 * preemption requested:
-		 */
-		if (need_resched())
-			goto out;
-		if (released)
-			continue;
-		/*
-		 * It is essential that we are as careful as in the case of
-		 * t_checkpoint_list with removing the buffer from the list as
-		 * we can possibly see not yet submitted buffers on io_list
-		 */
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list, &released);
-		if (need_resched())
-			goto out;
-	} while (transaction != last_transaction);
-out:
-	return ret;
-}
-
-/*
- * journal_remove_checkpoint: called after a buffer has been committed
- * to disk (either by being write-back flushed to disk, or being
- * committed to the log).
- *
- * We cannot safely clean a transaction out of the log until all of the
- * buffer updates committed in that transaction have safely been stored
- * elsewhere on disk.  To achieve this, all of the buffers in a
- * transaction need to be maintained on the transaction's checkpoint
- * lists until they have been rewritten, at which point this function is
- * called to remove the buffer from the existing transaction's
- * checkpoint lists.
- *
- * The function returns 1 if it frees the transaction, 0 otherwise.
- * The function can free jh and bh.
- *
- * This function is called with j_list_lock held.
- * This function is called with jbd_lock_bh_state(jh2bh(jh))
- */
-
-int __journal_remove_checkpoint(struct journal_head *jh)
-{
-	transaction_t *transaction;
-	journal_t *journal;
-	int ret = 0;
-
-	JBUFFER_TRACE(jh, "entry");
-
-	if ((transaction = jh->b_cp_transaction) == NULL) {
-		JBUFFER_TRACE(jh, "not on transaction");
-		goto out;
-	}
-	journal = transaction->t_journal;
-
-	JBUFFER_TRACE(jh, "removing from transaction");
-	__buffer_unlink(jh);
-	jh->b_cp_transaction = NULL;
-	journal_put_journal_head(jh);
-
-	if (transaction->t_checkpoint_list != NULL ||
-	    transaction->t_checkpoint_io_list != NULL)
-		goto out;
-
-	/*
-	 * There is one special case to worry about: if we have just pulled the
-	 * buffer off a running or committing transaction's checkpoing list,
-	 * then even if the checkpoint list is empty, the transaction obviously
-	 * cannot be dropped!
-	 *
-	 * The locking here around t_state is a bit sleazy.
-	 * See the comment at the end of journal_commit_transaction().
-	 */
-	if (transaction->t_state != T_FINISHED)
-		goto out;
-
-	/* OK, that was the last buffer for the transaction: we can now
-	   safely remove this transaction from the log */
-
-	__journal_drop_transaction(journal, transaction);
-
-	/* Just in case anybody was waiting for more transactions to be
-           checkpointed... */
-	wake_up(&journal->j_wait_logspace);
-	ret = 1;
-out:
-	return ret;
-}
-
-/*
- * journal_insert_checkpoint: put a committed buffer onto a checkpoint
- * list so that we know when it is safe to clean the transaction out of
- * the log.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- */
-void __journal_insert_checkpoint(struct journal_head *jh,
-			       transaction_t *transaction)
-{
-	JBUFFER_TRACE(jh, "entry");
-	J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
-	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
-
-	/* Get reference for checkpointing transaction */
-	journal_grab_journal_head(jh2bh(jh));
-	jh->b_cp_transaction = transaction;
-
-	if (!transaction->t_checkpoint_list) {
-		jh->b_cpnext = jh->b_cpprev = jh;
-	} else {
-		jh->b_cpnext = transaction->t_checkpoint_list;
-		jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
-		jh->b_cpprev->b_cpnext = jh;
-		jh->b_cpnext->b_cpprev = jh;
-	}
-	transaction->t_checkpoint_list = jh;
-}
-
-/*
- * We've finished with this transaction structure: adios...
- *
- * The transaction must have no links except for the checkpoint by this
- * point.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- */
-
-void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
-{
-	assert_spin_locked(&journal->j_list_lock);
-	if (transaction->t_cpnext) {
-		transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
-		transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
-		if (journal->j_checkpoint_transactions == transaction)
-			journal->j_checkpoint_transactions =
-				transaction->t_cpnext;
-		if (journal->j_checkpoint_transactions == transaction)
-			journal->j_checkpoint_transactions = NULL;
-	}
-
-	J_ASSERT(transaction->t_state == T_FINISHED);
-	J_ASSERT(transaction->t_buffers == NULL);
-	J_ASSERT(transaction->t_sync_datalist == NULL);
-	J_ASSERT(transaction->t_forget == NULL);
-	J_ASSERT(transaction->t_iobuf_list == NULL);
-	J_ASSERT(transaction->t_shadow_list == NULL);
-	J_ASSERT(transaction->t_log_list == NULL);
-	J_ASSERT(transaction->t_checkpoint_list == NULL);
-	J_ASSERT(transaction->t_checkpoint_io_list == NULL);
-	J_ASSERT(transaction->t_updates == 0);
-	J_ASSERT(journal->j_committing_transaction != transaction);
-	J_ASSERT(journal->j_running_transaction != transaction);
-
-	trace_jbd_drop_transaction(journal, transaction);
-	jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
-	kfree(transaction);
-}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
deleted file mode 100644
index bb217dc..0000000
--- a/fs/jbd/commit.c
+++ /dev/null
@@ -1,1021 +0,0 @@
-/*
- * linux/fs/jbd/commit.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal commit routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <trace/events/jbd.h>
-
-/*
- * Default IO end handler for temporary BJ_IO buffer_heads.
- */
-static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
-{
-	BUFFER_TRACE(bh, "");
-	if (uptodate)
-		set_buffer_uptodate(bh);
-	else
-		clear_buffer_uptodate(bh);
-	unlock_buffer(bh);
-}
-
-/*
- * When an ext3-ordered file is truncated, it is possible that many pages are
- * not successfully freed, because they are attached to a committing transaction.
- * After the transaction commits, these pages are left on the LRU, with no
- * ->mapping, and with attached buffers.  These pages are trivially reclaimable
- * by the VM, but their apparent absence upsets the VM accounting, and it makes
- * the numbers in /proc/meminfo look odd.
- *
- * So here, we have a buffer which has just come off the forget list.  Look to
- * see if we can strip all buffers from the backing page.
- *
- * Called under journal->j_list_lock.  The caller provided us with a ref
- * against the buffer, and we drop that here.
- */
-static void release_buffer_page(struct buffer_head *bh)
-{
-	struct page *page;
-
-	if (buffer_dirty(bh))
-		goto nope;
-	if (atomic_read(&bh->b_count) != 1)
-		goto nope;
-	page = bh->b_page;
-	if (!page)
-		goto nope;
-	if (page->mapping)
-		goto nope;
-
-	/* OK, it's a truncated page */
-	if (!trylock_page(page))
-		goto nope;
-
-	page_cache_get(page);
-	__brelse(bh);
-	try_to_free_buffers(page);
-	unlock_page(page);
-	page_cache_release(page);
-	return;
-
-nope:
-	__brelse(bh);
-}
-
-/*
- * Decrement reference counter for data buffer. If it has been marked
- * 'BH_Freed', release it and the page to which it belongs if possible.
- */
-static void release_data_buffer(struct buffer_head *bh)
-{
-	if (buffer_freed(bh)) {
-		WARN_ON_ONCE(buffer_dirty(bh));
-		clear_buffer_freed(bh);
-		clear_buffer_mapped(bh);
-		clear_buffer_new(bh);
-		clear_buffer_req(bh);
-		bh->b_bdev = NULL;
-		release_buffer_page(bh);
-	} else
-		put_bh(bh);
-}
-
-/*
- * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
- * held.  For ranking reasons we must trylock.  If we lose, schedule away and
- * return 0.  j_list_lock is dropped in this case.
- */
-static int inverted_lock(journal_t *journal, struct buffer_head *bh)
-{
-	if (!jbd_trylock_bh_state(bh)) {
-		spin_unlock(&journal->j_list_lock);
-		schedule();
-		return 0;
-	}
-	return 1;
-}
-
-/* Done it all: now write the commit record.  We should have
- * cleaned up our previous buffers by now, so if we are in abort
- * mode we can now just skip the rest of the journal write
- * entirely.
- *
- * Returns 1 if the journal needs to be aborted or 0 on success
- */
-static int journal_write_commit_record(journal_t *journal,
-					transaction_t *commit_transaction)
-{
-	struct journal_head *descriptor;
-	struct buffer_head *bh;
-	journal_header_t *header;
-	int ret;
-
-	if (is_journal_aborted(journal))
-		return 0;
-
-	descriptor = journal_get_descriptor_buffer(journal);
-	if (!descriptor)
-		return 1;
-
-	bh = jh2bh(descriptor);
-
-	header = (journal_header_t *)(bh->b_data);
-	header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
-	header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
-	header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
-
-	JBUFFER_TRACE(descriptor, "write commit block");
-	set_buffer_dirty(bh);
-
-	if (journal->j_flags & JFS_BARRIER)
-		ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
-	else
-		ret = sync_dirty_buffer(bh);
-
-	put_bh(bh);		/* One for getblk() */
-	journal_put_journal_head(descriptor);
-
-	return (ret == -EIO);
-}
-
-static void journal_do_submit_data(struct buffer_head **wbuf, int bufs,
-				   int write_op)
-{
-	int i;
-
-	for (i = 0; i < bufs; i++) {
-		wbuf[i]->b_end_io = end_buffer_write_sync;
-		/*
-		 * Here we write back pagecache data that may be mmaped. Since
-		 * we cannot afford to clean the page and set PageWriteback
-		 * here due to lock ordering (page lock ranks above transaction
-		 * start), the data can change while IO is in flight. Tell the
-		 * block layer it should bounce the bio pages if stable data
-		 * during write is required.
-		 *
-		 * We use up our safety reference in submit_bh().
-		 */
-		_submit_bh(write_op, wbuf[i], 1 << BIO_SNAP_STABLE);
-	}
-}
-
-/*
- *  Submit all the data buffers to disk
- */
-static int journal_submit_data_buffers(journal_t *journal,
-				       transaction_t *commit_transaction,
-				       int write_op)
-{
-	struct journal_head *jh;
-	struct buffer_head *bh;
-	int locked;
-	int bufs = 0;
-	struct buffer_head **wbuf = journal->j_wbuf;
-	int err = 0;
-
-	/*
-	 * Whenever we unlock the journal and sleep, things can get added
-	 * onto ->t_sync_datalist, so we have to keep looping back to
-	 * write_out_data until we *know* that the list is empty.
-	 *
-	 * Cleanup any flushed data buffers from the data list.  Even in
-	 * abort mode, we want to flush this out as soon as possible.
-	 */
-write_out_data:
-	cond_resched();
-	spin_lock(&journal->j_list_lock);
-
-	while (commit_transaction->t_sync_datalist) {
-		jh = commit_transaction->t_sync_datalist;
-		bh = jh2bh(jh);
-		locked = 0;
-
-		/* Get reference just to make sure buffer does not disappear
-		 * when we are forced to drop various locks */
-		get_bh(bh);
-		/* If the buffer is dirty, we need to submit IO and hence
-		 * we need the buffer lock. We try to lock the buffer without
-		 * blocking. If we fail, we need to drop j_list_lock and do
-		 * blocking lock_buffer().
-		 */
-		if (buffer_dirty(bh)) {
-			if (!trylock_buffer(bh)) {
-				BUFFER_TRACE(bh, "needs blocking lock");
-				spin_unlock(&journal->j_list_lock);
-				trace_jbd_do_submit_data(journal,
-						     commit_transaction);
-				/* Write out all data to prevent deadlocks */
-				journal_do_submit_data(wbuf, bufs, write_op);
-				bufs = 0;
-				lock_buffer(bh);
-				spin_lock(&journal->j_list_lock);
-			}
-			locked = 1;
-		}
-		/* We have to get bh_state lock. Again out of order, sigh. */
-		if (!inverted_lock(journal, bh)) {
-			jbd_lock_bh_state(bh);
-			spin_lock(&journal->j_list_lock);
-		}
-		/* Someone already cleaned up the buffer? */
-		if (!buffer_jbd(bh) || bh2jh(bh) != jh
-			|| jh->b_transaction != commit_transaction
-			|| jh->b_jlist != BJ_SyncData) {
-			jbd_unlock_bh_state(bh);
-			if (locked)
-				unlock_buffer(bh);
-			BUFFER_TRACE(bh, "already cleaned up");
-			release_data_buffer(bh);
-			continue;
-		}
-		if (locked && test_clear_buffer_dirty(bh)) {
-			BUFFER_TRACE(bh, "needs writeout, adding to array");
-			wbuf[bufs++] = bh;
-			__journal_file_buffer(jh, commit_transaction,
-						BJ_Locked);
-			jbd_unlock_bh_state(bh);
-			if (bufs == journal->j_wbufsize) {
-				spin_unlock(&journal->j_list_lock);
-				trace_jbd_do_submit_data(journal,
-						     commit_transaction);
-				journal_do_submit_data(wbuf, bufs, write_op);
-				bufs = 0;
-				goto write_out_data;
-			}
-		} else if (!locked && buffer_locked(bh)) {
-			__journal_file_buffer(jh, commit_transaction,
-						BJ_Locked);
-			jbd_unlock_bh_state(bh);
-			put_bh(bh);
-		} else {
-			BUFFER_TRACE(bh, "writeout complete: unfile");
-			if (unlikely(!buffer_uptodate(bh)))
-				err = -EIO;
-			__journal_unfile_buffer(jh);
-			jbd_unlock_bh_state(bh);
-			if (locked)
-				unlock_buffer(bh);
-			release_data_buffer(bh);
-		}
-
-		if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
-			spin_unlock(&journal->j_list_lock);
-			goto write_out_data;
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-	trace_jbd_do_submit_data(journal, commit_transaction);
-	journal_do_submit_data(wbuf, bufs, write_op);
-
-	return err;
-}
-
-/*
- * journal_commit_transaction
- *
- * The primary function for committing a transaction to the log.  This
- * function is called by the journal thread to begin a complete commit.
- */
-void journal_commit_transaction(journal_t *journal)
-{
-	transaction_t *commit_transaction;
-	struct journal_head *jh, *new_jh, *descriptor;
-	struct buffer_head **wbuf = journal->j_wbuf;
-	int bufs;
-	int flags;
-	int err;
-	unsigned int blocknr;
-	ktime_t start_time;
-	u64 commit_time;
-	char *tagp = NULL;
-	journal_header_t *header;
-	journal_block_tag_t *tag = NULL;
-	int space_left = 0;
-	int first_tag = 0;
-	int tag_flag;
-	int i;
-	struct blk_plug plug;
-	int write_op = WRITE;
-
-	/*
-	 * First job: lock down the current transaction and wait for
-	 * all outstanding updates to complete.
-	 */
-
-	/* Do we need to erase the effects of a prior journal_flush? */
-	if (journal->j_flags & JFS_FLUSHED) {
-		jbd_debug(3, "super block updated\n");
-		mutex_lock(&journal->j_checkpoint_mutex);
-		/*
-		 * We hold j_checkpoint_mutex so tail cannot change under us.
-		 * We don't need any special data guarantees for writing sb
-		 * since journal is empty and it is ok for write to be
-		 * flushed only with transaction commit.
-		 */
-		journal_update_sb_log_tail(journal, journal->j_tail_sequence,
-					   journal->j_tail, WRITE_SYNC);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	} else {
-		jbd_debug(3, "superblock not updated\n");
-	}
-
-	J_ASSERT(journal->j_running_transaction != NULL);
-	J_ASSERT(journal->j_committing_transaction == NULL);
-
-	commit_transaction = journal->j_running_transaction;
-
-	trace_jbd_start_commit(journal, commit_transaction);
-	jbd_debug(1, "JBD: starting commit of transaction %d\n",
-			commit_transaction->t_tid);
-
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(commit_transaction->t_state == T_RUNNING);
-	commit_transaction->t_state = T_LOCKED;
-
-	trace_jbd_commit_locking(journal, commit_transaction);
-	spin_lock(&commit_transaction->t_handle_lock);
-	while (commit_transaction->t_updates) {
-		DEFINE_WAIT(wait);
-
-		prepare_to_wait(&journal->j_wait_updates, &wait,
-					TASK_UNINTERRUPTIBLE);
-		if (commit_transaction->t_updates) {
-			spin_unlock(&commit_transaction->t_handle_lock);
-			spin_unlock(&journal->j_state_lock);
-			schedule();
-			spin_lock(&journal->j_state_lock);
-			spin_lock(&commit_transaction->t_handle_lock);
-		}
-		finish_wait(&journal->j_wait_updates, &wait);
-	}
-	spin_unlock(&commit_transaction->t_handle_lock);
-
-	J_ASSERT (commit_transaction->t_outstanding_credits <=
-			journal->j_max_transaction_buffers);
-
-	/*
-	 * First thing we are allowed to do is to discard any remaining
-	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
-	 * that there are no such buffers: if a large filesystem
-	 * operation like a truncate needs to split itself over multiple
-	 * transactions, then it may try to do a journal_restart() while
-	 * there are still BJ_Reserved buffers outstanding.  These must
-	 * be released cleanly from the current transaction.
-	 *
-	 * In this case, the filesystem must still reserve write access
-	 * again before modifying the buffer in the new transaction, but
-	 * we do not require it to remember exactly which old buffers it
-	 * has reserved.  This is consistent with the existing behaviour
-	 * that multiple journal_get_write_access() calls to the same
-	 * buffer are perfectly permissible.
-	 */
-	while (commit_transaction->t_reserved_list) {
-		jh = commit_transaction->t_reserved_list;
-		JBUFFER_TRACE(jh, "reserved, unused: refile");
-		/*
-		 * A journal_get_undo_access()+journal_release_buffer() may
-		 * leave undo-committed data.
-		 */
-		if (jh->b_committed_data) {
-			struct buffer_head *bh = jh2bh(jh);
-
-			jbd_lock_bh_state(bh);
-			jbd_free(jh->b_committed_data, bh->b_size);
-			jh->b_committed_data = NULL;
-			jbd_unlock_bh_state(bh);
-		}
-		journal_refile_buffer(journal, jh);
-	}
-
-	/*
-	 * Now try to drop any written-back buffers from the journal's
-	 * checkpoint lists.  We do this *before* commit because it potentially
-	 * frees some memory
-	 */
-	spin_lock(&journal->j_list_lock);
-	__journal_clean_checkpoint_list(journal);
-	spin_unlock(&journal->j_list_lock);
-
-	jbd_debug (3, "JBD: commit phase 1\n");
-
-	/*
-	 * Clear revoked flag to reflect there is no revoked buffers
-	 * in the next transaction which is going to be started.
-	 */
-	journal_clear_buffer_revoked_flags(journal);
-
-	/*
-	 * Switch to a new revoke table.
-	 */
-	journal_switch_revoke_table(journal);
-
-	trace_jbd_commit_flushing(journal, commit_transaction);
-	commit_transaction->t_state = T_FLUSH;
-	journal->j_committing_transaction = commit_transaction;
-	journal->j_running_transaction = NULL;
-	start_time = ktime_get();
-	commit_transaction->t_log_start = journal->j_head;
-	wake_up(&journal->j_wait_transaction_locked);
-	spin_unlock(&journal->j_state_lock);
-
-	jbd_debug (3, "JBD: commit phase 2\n");
-
-	if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
-		write_op = WRITE_SYNC;
-
-	/*
-	 * Now start flushing things to disk, in the order they appear
-	 * on the transaction lists.  Data blocks go first.
-	 */
-	blk_start_plug(&plug);
-	err = journal_submit_data_buffers(journal, commit_transaction,
-					  write_op);
-	blk_finish_plug(&plug);
-
-	/*
-	 * Wait for all previously submitted IO to complete.
-	 */
-	spin_lock(&journal->j_list_lock);
-	while (commit_transaction->t_locked_list) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_locked_list->b_tprev;
-		bh = jh2bh(jh);
-		get_bh(bh);
-		if (buffer_locked(bh)) {
-			spin_unlock(&journal->j_list_lock);
-			wait_on_buffer(bh);
-			spin_lock(&journal->j_list_lock);
-		}
-		if (unlikely(!buffer_uptodate(bh))) {
-			if (!trylock_page(bh->b_page)) {
-				spin_unlock(&journal->j_list_lock);
-				lock_page(bh->b_page);
-				spin_lock(&journal->j_list_lock);
-			}
-			if (bh->b_page->mapping)
-				set_bit(AS_EIO, &bh->b_page->mapping->flags);
-
-			unlock_page(bh->b_page);
-			SetPageError(bh->b_page);
-			err = -EIO;
-		}
-		if (!inverted_lock(journal, bh)) {
-			put_bh(bh);
-			spin_lock(&journal->j_list_lock);
-			continue;
-		}
-		if (buffer_jbd(bh) && bh2jh(bh) == jh &&
-		    jh->b_transaction == commit_transaction &&
-		    jh->b_jlist == BJ_Locked)
-			__journal_unfile_buffer(jh);
-		jbd_unlock_bh_state(bh);
-		release_data_buffer(bh);
-		cond_resched_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	if (err) {
-		char b[BDEVNAME_SIZE];
-
-		printk(KERN_WARNING
-			"JBD: Detected IO errors while flushing file data "
-			"on %s\n", bdevname(journal->j_fs_dev, b));
-		if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
-			journal_abort(journal, err);
-		err = 0;
-	}
-
-	blk_start_plug(&plug);
-
-	journal_write_revoke_records(journal, commit_transaction, write_op);
-
-	/*
-	 * If we found any dirty or locked buffers, then we should have
-	 * looped back up to the write_out_data label.  If there weren't
-	 * any then journal_clean_data_list should have wiped the list
-	 * clean by now, so check that it is in fact empty.
-	 */
-	J_ASSERT (commit_transaction->t_sync_datalist == NULL);
-
-	jbd_debug (3, "JBD: commit phase 3\n");
-
-	/*
-	 * Way to go: we have now written out all of the data for a
-	 * transaction!  Now comes the tricky part: we need to write out
-	 * metadata.  Loop over the transaction's entire buffer list:
-	 */
-	spin_lock(&journal->j_state_lock);
-	commit_transaction->t_state = T_COMMIT;
-	spin_unlock(&journal->j_state_lock);
-
-	trace_jbd_commit_logging(journal, commit_transaction);
-	J_ASSERT(commit_transaction->t_nr_buffers <=
-		 commit_transaction->t_outstanding_credits);
-
-	descriptor = NULL;
-	bufs = 0;
-	while (commit_transaction->t_buffers) {
-
-		/* Find the next buffer to be journaled... */
-
-		jh = commit_transaction->t_buffers;
-
-		/* If we're in abort mode, we just un-journal the buffer and
-		   release it. */
-
-		if (is_journal_aborted(journal)) {
-			clear_buffer_jbddirty(jh2bh(jh));
-			JBUFFER_TRACE(jh, "journal is aborting: refile");
-			journal_refile_buffer(journal, jh);
-			/* If that was the last one, we need to clean up
-			 * any descriptor buffers which may have been
-			 * already allocated, even if we are now
-			 * aborting. */
-			if (!commit_transaction->t_buffers)
-				goto start_journal_io;
-			continue;
-		}
-
-		/* Make sure we have a descriptor block in which to
-		   record the metadata buffer. */
-
-		if (!descriptor) {
-			struct buffer_head *bh;
-
-			J_ASSERT (bufs == 0);
-
-			jbd_debug(4, "JBD: get descriptor\n");
-
-			descriptor = journal_get_descriptor_buffer(journal);
-			if (!descriptor) {
-				journal_abort(journal, -EIO);
-				continue;
-			}
-
-			bh = jh2bh(descriptor);
-			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
-				(unsigned long long)bh->b_blocknr, bh->b_data);
-			header = (journal_header_t *)&bh->b_data[0];
-			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
-			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
-
-			tagp = &bh->b_data[sizeof(journal_header_t)];
-			space_left = bh->b_size - sizeof(journal_header_t);
-			first_tag = 1;
-			set_buffer_jwrite(bh);
-			set_buffer_dirty(bh);
-			wbuf[bufs++] = bh;
-
-			/* Record it so that we can wait for IO
-                           completion later */
-			BUFFER_TRACE(bh, "ph3: file as descriptor");
-			journal_file_buffer(descriptor, commit_transaction,
-					BJ_LogCtl);
-		}
-
-		/* Where is the buffer to be written? */
-
-		err = journal_next_log_block(journal, &blocknr);
-		/* If the block mapping failed, just abandon the buffer
-		   and repeat this loop: we'll fall into the
-		   refile-on-abort condition above. */
-		if (err) {
-			journal_abort(journal, err);
-			continue;
-		}
-
-		/*
-		 * start_this_handle() uses t_outstanding_credits to determine
-		 * the free space in the log, but this counter is changed
-		 * by journal_next_log_block() also.
-		 */
-		commit_transaction->t_outstanding_credits--;
-
-		/* Bump b_count to prevent truncate from stumbling over
-                   the shadowed buffer!  @@@ This can go if we ever get
-                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
-		get_bh(jh2bh(jh));
-
-		/* Make a temporary IO buffer with which to write it out
-                   (this will requeue both the metadata buffer and the
-                   temporary IO buffer). new_bh goes on BJ_IO*/
-
-		set_buffer_jwrite(jh2bh(jh));
-		/*
-		 * akpm: journal_write_metadata_buffer() sets
-		 * new_bh->b_transaction to commit_transaction.
-		 * We need to clean this up before we release new_bh
-		 * (which is of type BJ_IO)
-		 */
-		JBUFFER_TRACE(jh, "ph3: write metadata");
-		flags = journal_write_metadata_buffer(commit_transaction,
-						      jh, &new_jh, blocknr);
-		set_buffer_jwrite(jh2bh(new_jh));
-		wbuf[bufs++] = jh2bh(new_jh);
-
-		/* Record the new block's tag in the current descriptor
-                   buffer */
-
-		tag_flag = 0;
-		if (flags & 1)
-			tag_flag |= JFS_FLAG_ESCAPE;
-		if (!first_tag)
-			tag_flag |= JFS_FLAG_SAME_UUID;
-
-		tag = (journal_block_tag_t *) tagp;
-		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
-		tag->t_flags = cpu_to_be32(tag_flag);
-		tagp += sizeof(journal_block_tag_t);
-		space_left -= sizeof(journal_block_tag_t);
-
-		if (first_tag) {
-			memcpy (tagp, journal->j_uuid, 16);
-			tagp += 16;
-			space_left -= 16;
-			first_tag = 0;
-		}
-
-		/* If there's no more to do, or if the descriptor is full,
-		   let the IO rip! */
-
-		if (bufs == journal->j_wbufsize ||
-		    commit_transaction->t_buffers == NULL ||
-		    space_left < sizeof(journal_block_tag_t) + 16) {
-
-			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
-
-			/* Write an end-of-descriptor marker before
-                           submitting the IOs.  "tag" still points to
-                           the last tag we set up. */
-
-			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
-
-start_journal_io:
-			for (i = 0; i < bufs; i++) {
-				struct buffer_head *bh = wbuf[i];
-				lock_buffer(bh);
-				clear_buffer_dirty(bh);
-				set_buffer_uptodate(bh);
-				bh->b_end_io = journal_end_buffer_io_sync;
-				/*
-				 * In data=journal mode, here we can end up
-				 * writing pagecache data that might be
-				 * mmapped. Since we can't afford to clean the
-				 * page and set PageWriteback (see the comment
-				 * near the other use of _submit_bh()), the
-				 * data can change while the write is in
-				 * flight.  Tell the block layer to bounce the
-				 * bio pages if stable pages are required.
-				 */
-				_submit_bh(write_op, bh, 1 << BIO_SNAP_STABLE);
-			}
-			cond_resched();
-
-			/* Force a new descriptor to be generated next
-                           time round the loop. */
-			descriptor = NULL;
-			bufs = 0;
-		}
-	}
-
-	blk_finish_plug(&plug);
-
-	/* Lo and behold: we have just managed to send a transaction to
-           the log.  Before we can commit it, wait for the IO so far to
-           complete.  Control buffers being written are on the
-           transaction's t_log_list queue, and metadata buffers are on
-           the t_iobuf_list queue.
-
-	   Wait for the buffers in reverse order.  That way we are
-	   less likely to be woken up until all IOs have completed, and
-	   so we incur less scheduling load.
-	*/
-
-	jbd_debug(3, "JBD: commit phase 4\n");
-
-	/*
-	 * akpm: these are BJ_IO, and j_list_lock is not needed.
-	 * See __journal_try_to_free_buffer.
-	 */
-wait_for_iobuf:
-	while (commit_transaction->t_iobuf_list != NULL) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_iobuf_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_iobuf;
-		}
-		if (cond_resched())
-			goto wait_for_iobuf;
-
-		if (unlikely(!buffer_uptodate(bh)))
-			err = -EIO;
-
-		clear_buffer_jwrite(bh);
-
-		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
-		journal_unfile_buffer(journal, jh);
-
-		/*
-		 * ->t_iobuf_list should contain only dummy buffer_heads
-		 * which were created by journal_write_metadata_buffer().
-		 */
-		BUFFER_TRACE(bh, "dumping temporary bh");
-		journal_put_journal_head(jh);
-		__brelse(bh);
-		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
-		free_buffer_head(bh);
-
-		/* We also have to unlock and free the corresponding
-                   shadowed buffer */
-		jh = commit_transaction->t_shadow_list->b_tprev;
-		bh = jh2bh(jh);
-		clear_buffer_jwrite(bh);
-		J_ASSERT_BH(bh, buffer_jbddirty(bh));
-
-		/* The metadata is now released for reuse, but we need
-                   to remember it against this transaction so that when
-                   we finally commit, we can do any checkpointing
-                   required. */
-		JBUFFER_TRACE(jh, "file as BJ_Forget");
-		journal_file_buffer(jh, commit_transaction, BJ_Forget);
-		/*
-		 * Wake up any transactions which were waiting for this
-		 * IO to complete. The barrier must be here so that changes
-		 * by journal_file_buffer() take effect before wake_up_bit()
-		 * does the waitqueue check.
-		 */
-		smp_mb();
-		wake_up_bit(&bh->b_state, BH_Unshadow);
-		JBUFFER_TRACE(jh, "brelse shadowed buffer");
-		__brelse(bh);
-	}
-
-	J_ASSERT (commit_transaction->t_shadow_list == NULL);
-
-	jbd_debug(3, "JBD: commit phase 5\n");
-
-	/* Here we wait for the revoke record and descriptor record buffers */
- wait_for_ctlbuf:
-	while (commit_transaction->t_log_list != NULL) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_log_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_ctlbuf;
-		}
-		if (cond_resched())
-			goto wait_for_ctlbuf;
-
-		if (unlikely(!buffer_uptodate(bh)))
-			err = -EIO;
-
-		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
-		clear_buffer_jwrite(bh);
-		journal_unfile_buffer(journal, jh);
-		journal_put_journal_head(jh);
-		__brelse(bh);		/* One for getblk */
-		/* AKPM: bforget here */
-	}
-
-	if (err)
-		journal_abort(journal, err);
-
-	jbd_debug(3, "JBD: commit phase 6\n");
-
-	/* All metadata is written, now write commit record and do cleanup */
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(commit_transaction->t_state == T_COMMIT);
-	commit_transaction->t_state = T_COMMIT_RECORD;
-	spin_unlock(&journal->j_state_lock);
-
-	if (journal_write_commit_record(journal, commit_transaction))
-		err = -EIO;
-
-	if (err)
-		journal_abort(journal, err);
-
-	/* End of a transaction!  Finally, we can do checkpoint
-           processing: any buffers committed as a result of this
-           transaction can be removed from any checkpoint list it was on
-           before. */
-
-	jbd_debug(3, "JBD: commit phase 7\n");
-
-	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
-	J_ASSERT(commit_transaction->t_buffers == NULL);
-	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
-	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
-	J_ASSERT(commit_transaction->t_shadow_list == NULL);
-	J_ASSERT(commit_transaction->t_log_list == NULL);
-
-restart_loop:
-	/*
-	 * As there are other places (journal_unmap_buffer()) adding buffers
-	 * to this list we have to be careful and hold the j_list_lock.
-	 */
-	spin_lock(&journal->j_list_lock);
-	while (commit_transaction->t_forget) {
-		transaction_t *cp_transaction;
-		struct buffer_head *bh;
-		int try_to_free = 0;
-
-		jh = commit_transaction->t_forget;
-		spin_unlock(&journal->j_list_lock);
-		bh = jh2bh(jh);
-		/*
-		 * Get a reference so that bh cannot be freed before we are
-		 * done with it.
-		 */
-		get_bh(bh);
-		jbd_lock_bh_state(bh);
-		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
-			jh->b_transaction == journal->j_running_transaction);
-
-		/*
-		 * If there is undo-protected committed data against
-		 * this buffer, then we can remove it now.  If it is a
-		 * buffer needing such protection, the old frozen_data
-		 * field now points to a committed version of the
-		 * buffer, so rotate that field to the new committed
-		 * data.
-		 *
-		 * Otherwise, we can just throw away the frozen data now.
-		 */
-		if (jh->b_committed_data) {
-			jbd_free(jh->b_committed_data, bh->b_size);
-			jh->b_committed_data = NULL;
-			if (jh->b_frozen_data) {
-				jh->b_committed_data = jh->b_frozen_data;
-				jh->b_frozen_data = NULL;
-			}
-		} else if (jh->b_frozen_data) {
-			jbd_free(jh->b_frozen_data, bh->b_size);
-			jh->b_frozen_data = NULL;
-		}
-
-		spin_lock(&journal->j_list_lock);
-		cp_transaction = jh->b_cp_transaction;
-		if (cp_transaction) {
-			JBUFFER_TRACE(jh, "remove from old cp transaction");
-			__journal_remove_checkpoint(jh);
-		}
-
-		/* Only re-checkpoint the buffer_head if it is marked
-		 * dirty.  If the buffer was added to the BJ_Forget list
-		 * by journal_forget, it may no longer be dirty and
-		 * there's no point in keeping a checkpoint record for
-		 * it. */
-
-		/*
-		 * A buffer which has been freed while still being journaled by
-		 * a previous transaction.
-		 */
-		if (buffer_freed(bh)) {
-			/*
-			 * If the running transaction is the one containing
-			 * "add to orphan" operation (b_next_transaction !=
-			 * NULL), we have to wait for that transaction to
-			 * commit before we can really get rid of the buffer.
-			 * So just clear b_modified to not confuse transaction
-			 * credit accounting and refile the buffer to
-			 * BJ_Forget of the running transaction. If the just
-			 * committed transaction contains "add to orphan"
-			 * operation, we can completely invalidate the buffer
-			 * now. We are rather throughout in that since the
-			 * buffer may be still accessible when blocksize <
-			 * pagesize and it is attached to the last partial
-			 * page.
-			 */
-			jh->b_modified = 0;
-			if (!jh->b_next_transaction) {
-				clear_buffer_freed(bh);
-				clear_buffer_jbddirty(bh);
-				clear_buffer_mapped(bh);
-				clear_buffer_new(bh);
-				clear_buffer_req(bh);
-				bh->b_bdev = NULL;
-			}
-		}
-
-		if (buffer_jbddirty(bh)) {
-			JBUFFER_TRACE(jh, "add to new checkpointing trans");
-			__journal_insert_checkpoint(jh, commit_transaction);
-			if (is_journal_aborted(journal))
-				clear_buffer_jbddirty(bh);
-		} else {
-			J_ASSERT_BH(bh, !buffer_dirty(bh));
-			/*
-			 * The buffer on BJ_Forget list and not jbddirty means
-			 * it has been freed by this transaction and hence it
-			 * could not have been reallocated until this
-			 * transaction has committed. *BUT* it could be
-			 * reallocated once we have written all the data to
-			 * disk and before we process the buffer on BJ_Forget
-			 * list.
-			 */
-			if (!jh->b_next_transaction)
-				try_to_free = 1;
-		}
-		JBUFFER_TRACE(jh, "refile or unfile freed buffer");
-		__journal_refile_buffer(jh);
-		jbd_unlock_bh_state(bh);
-		if (try_to_free)
-			release_buffer_page(bh);
-		else
-			__brelse(bh);
-		cond_resched_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-	/*
-	 * This is a bit sleazy.  We use j_list_lock to protect transition
-	 * of a transaction into T_FINISHED state and calling
-	 * __journal_drop_transaction(). Otherwise we could race with
-	 * other checkpointing code processing the transaction...
-	 */
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&journal->j_list_lock);
-	/*
-	 * Now recheck if some buffers did not get attached to the transaction
-	 * while the lock was dropped...
-	 */
-	if (commit_transaction->t_forget) {
-		spin_unlock(&journal->j_list_lock);
-		spin_unlock(&journal->j_state_lock);
-		goto restart_loop;
-	}
-
-	/* Done with this transaction! */
-
-	jbd_debug(3, "JBD: commit phase 8\n");
-
-	J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
-
-	commit_transaction->t_state = T_FINISHED;
-	J_ASSERT(commit_transaction == journal->j_committing_transaction);
-	journal->j_commit_sequence = commit_transaction->t_tid;
-	journal->j_committing_transaction = NULL;
-	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
-
-	/*
-	 * weight the commit time higher than the average time so we don't
-	 * react too strongly to vast changes in commit time
-	 */
-	if (likely(journal->j_average_commit_time))
-		journal->j_average_commit_time = (commit_time*3 +
-				journal->j_average_commit_time) / 4;
-	else
-		journal->j_average_commit_time = commit_time;
-
-	spin_unlock(&journal->j_state_lock);
-
-	if (commit_transaction->t_checkpoint_list == NULL &&
-	    commit_transaction->t_checkpoint_io_list == NULL) {
-		__journal_drop_transaction(journal, commit_transaction);
-	} else {
-		if (journal->j_checkpoint_transactions == NULL) {
-			journal->j_checkpoint_transactions = commit_transaction;
-			commit_transaction->t_cpnext = commit_transaction;
-			commit_transaction->t_cpprev = commit_transaction;
-		} else {
-			commit_transaction->t_cpnext =
-				journal->j_checkpoint_transactions;
-			commit_transaction->t_cpprev =
-				commit_transaction->t_cpnext->t_cpprev;
-			commit_transaction->t_cpnext->t_cpprev =
-				commit_transaction;
-			commit_transaction->t_cpprev->t_cpnext =
-				commit_transaction;
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	trace_jbd_end_commit(journal, commit_transaction);
-	jbd_debug(1, "JBD: commit %d complete, head %d\n",
-		  journal->j_commit_sequence, journal->j_tail_sequence);
-
-	wake_up(&journal->j_wait_done_commit);
-}
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
deleted file mode 100644
index c46a79a..0000000
--- a/fs/jbd/journal.c
+++ /dev/null
@@ -1,2145 +0,0 @@
-/*
- * linux/fs/jbd/journal.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Generic filesystem journal-writing code; part of the ext2fs
- * journaling system.
- *
- * This file manages journals: areas of disk reserved for logging
- * transactional updates.  This includes the kernel journaling thread
- * which is responsible for scheduling updates to the log.
- *
- * We do not actually manage the physical storage of the journal in this
- * file: that is left to a per-journal policy function, which allows us
- * to store the journal within a filesystem-specified area for ext2
- * journaling (ext2 can use a reserved inode for storing the log).
- */
-
-#include <linux/module.h>
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/freezer.h>
-#include <linux/pagemap.h>
-#include <linux/kthread.h>
-#include <linux/poison.h>
-#include <linux/proc_fs.h>
-#include <linux/debugfs.h>
-#include <linux/ratelimit.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/jbd.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-
-EXPORT_SYMBOL(journal_start);
-EXPORT_SYMBOL(journal_restart);
-EXPORT_SYMBOL(journal_extend);
-EXPORT_SYMBOL(journal_stop);
-EXPORT_SYMBOL(journal_lock_updates);
-EXPORT_SYMBOL(journal_unlock_updates);
-EXPORT_SYMBOL(journal_get_write_access);
-EXPORT_SYMBOL(journal_get_create_access);
-EXPORT_SYMBOL(journal_get_undo_access);
-EXPORT_SYMBOL(journal_dirty_data);
-EXPORT_SYMBOL(journal_dirty_metadata);
-EXPORT_SYMBOL(journal_release_buffer);
-EXPORT_SYMBOL(journal_forget);
-#if 0
-EXPORT_SYMBOL(journal_sync_buffer);
-#endif
-EXPORT_SYMBOL(journal_flush);
-EXPORT_SYMBOL(journal_revoke);
-
-EXPORT_SYMBOL(journal_init_dev);
-EXPORT_SYMBOL(journal_init_inode);
-EXPORT_SYMBOL(journal_update_format);
-EXPORT_SYMBOL(journal_check_used_features);
-EXPORT_SYMBOL(journal_check_available_features);
-EXPORT_SYMBOL(journal_set_features);
-EXPORT_SYMBOL(journal_create);
-EXPORT_SYMBOL(journal_load);
-EXPORT_SYMBOL(journal_destroy);
-EXPORT_SYMBOL(journal_abort);
-EXPORT_SYMBOL(journal_errno);
-EXPORT_SYMBOL(journal_ack_err);
-EXPORT_SYMBOL(journal_clear_err);
-EXPORT_SYMBOL(log_wait_commit);
-EXPORT_SYMBOL(log_start_commit);
-EXPORT_SYMBOL(journal_start_commit);
-EXPORT_SYMBOL(journal_force_commit_nested);
-EXPORT_SYMBOL(journal_wipe);
-EXPORT_SYMBOL(journal_blocks_per_page);
-EXPORT_SYMBOL(journal_invalidatepage);
-EXPORT_SYMBOL(journal_try_to_free_buffers);
-EXPORT_SYMBOL(journal_force_commit);
-
-static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
-static void __journal_abort_soft (journal_t *journal, int errno);
-static const char *journal_dev_name(journal_t *journal, char *buffer);
-
-#ifdef CONFIG_JBD_DEBUG
-void __jbd_debug(int level, const char *file, const char *func,
-		 unsigned int line, const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	if (level > journal_enable_debug)
-		return;
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
-	va_end(args);
-}
-EXPORT_SYMBOL(__jbd_debug);
-#endif
-
-/*
- * Helper function used to manage commit timeouts
- */
-
-static void commit_timeout(unsigned long __data)
-{
-	struct task_struct * p = (struct task_struct *) __data;
-
-	wake_up_process(p);
-}
-
-/*
- * kjournald: The main thread function used to manage a logging device
- * journal.
- *
- * This kernel thread is responsible for two things:
- *
- * 1) COMMIT:  Every so often we need to commit the current state of the
- *    filesystem to disk.  The journal thread is responsible for writing
- *    all of the metadata buffers to disk.
- *
- * 2) CHECKPOINT: We cannot reuse a used section of the log file until all
- *    of the data in that part of the log has been rewritten elsewhere on
- *    the disk.  Flushing these old buffers to reclaim space in the log is
- *    known as checkpointing, and this thread is responsible for that job.
- */
-
-static int kjournald(void *arg)
-{
-	journal_t *journal = arg;
-	transaction_t *transaction;
-
-	/*
-	 * Set up an interval timer which can be used to trigger a commit wakeup
-	 * after the commit interval expires
-	 */
-	setup_timer(&journal->j_commit_timer, commit_timeout,
-			(unsigned long)current);
-
-	set_freezable();
-
-	/* Record that the journal thread is running */
-	journal->j_task = current;
-	wake_up(&journal->j_wait_done_commit);
-
-	printk(KERN_INFO "kjournald starting.  Commit interval %ld seconds\n",
-			journal->j_commit_interval / HZ);
-
-	/*
-	 * And now, wait forever for commit wakeup events.
-	 */
-	spin_lock(&journal->j_state_lock);
-
-loop:
-	if (journal->j_flags & JFS_UNMOUNT)
-		goto end_loop;
-
-	jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
-		journal->j_commit_sequence, journal->j_commit_request);
-
-	if (journal->j_commit_sequence != journal->j_commit_request) {
-		jbd_debug(1, "OK, requests differ\n");
-		spin_unlock(&journal->j_state_lock);
-		del_timer_sync(&journal->j_commit_timer);
-		journal_commit_transaction(journal);
-		spin_lock(&journal->j_state_lock);
-		goto loop;
-	}
-
-	wake_up(&journal->j_wait_done_commit);
-	if (freezing(current)) {
-		/*
-		 * The simpler the better. Flushing journal isn't a
-		 * good idea, because that depends on threads that may
-		 * be already stopped.
-		 */
-		jbd_debug(1, "Now suspending kjournald\n");
-		spin_unlock(&journal->j_state_lock);
-		try_to_freeze();
-		spin_lock(&journal->j_state_lock);
-	} else {
-		/*
-		 * We assume on resume that commits are already there,
-		 * so we don't sleep
-		 */
-		DEFINE_WAIT(wait);
-		int should_sleep = 1;
-
-		prepare_to_wait(&journal->j_wait_commit, &wait,
-				TASK_INTERRUPTIBLE);
-		if (journal->j_commit_sequence != journal->j_commit_request)
-			should_sleep = 0;
-		transaction = journal->j_running_transaction;
-		if (transaction && time_after_eq(jiffies,
-						transaction->t_expires))
-			should_sleep = 0;
-		if (journal->j_flags & JFS_UNMOUNT)
-			should_sleep = 0;
-		if (should_sleep) {
-			spin_unlock(&journal->j_state_lock);
-			schedule();
-			spin_lock(&journal->j_state_lock);
-		}
-		finish_wait(&journal->j_wait_commit, &wait);
-	}
-
-	jbd_debug(1, "kjournald wakes\n");
-
-	/*
-	 * Were we woken up by a commit wakeup event?
-	 */
-	transaction = journal->j_running_transaction;
-	if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
-		journal->j_commit_request = transaction->t_tid;
-		jbd_debug(1, "woke because of timeout\n");
-	}
-	goto loop;
-
-end_loop:
-	spin_unlock(&journal->j_state_lock);
-	del_timer_sync(&journal->j_commit_timer);
-	journal->j_task = NULL;
-	wake_up(&journal->j_wait_done_commit);
-	jbd_debug(1, "Journal thread exiting.\n");
-	return 0;
-}
-
-static int journal_start_thread(journal_t *journal)
-{
-	struct task_struct *t;
-
-	t = kthread_run(kjournald, journal, "kjournald");
-	if (IS_ERR(t))
-		return PTR_ERR(t);
-
-	wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
-	return 0;
-}
-
-static void journal_kill_thread(journal_t *journal)
-{
-	spin_lock(&journal->j_state_lock);
-	journal->j_flags |= JFS_UNMOUNT;
-
-	while (journal->j_task) {
-		wake_up(&journal->j_wait_commit);
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_done_commit,
-				journal->j_task == NULL);
-		spin_lock(&journal->j_state_lock);
-	}
-	spin_unlock(&journal->j_state_lock);
-}
-
-/*
- * journal_write_metadata_buffer: write a metadata buffer to the journal.
- *
- * Writes a metadata buffer to a given disk block.  The actual IO is not
- * performed but a new buffer_head is constructed which labels the data
- * to be written with the correct destination disk block.
- *
- * Any magic-number escaping which needs to be done will cause a
- * copy-out here.  If the buffer happens to start with the
- * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
- * magic number is only written to the log for descripter blocks.  In
- * this case, we copy the data and replace the first word with 0, and we
- * return a result code which indicates that this buffer needs to be
- * marked as an escaped buffer in the corresponding log descriptor
- * block.  The missing word can then be restored when the block is read
- * during recovery.
- *
- * If the source buffer has already been modified by a new transaction
- * since we took the last commit snapshot, we use the frozen copy of
- * that data for IO.  If we end up using the existing buffer_head's data
- * for the write, then we *have* to lock the buffer to prevent anyone
- * else from using and possibly modifying it while the IO is in
- * progress.
- *
- * The function returns a pointer to the buffer_heads to be used for IO.
- *
- * We assume that the journal has already been locked in this function.
- *
- * Return value:
- *  <0: Error
- * >=0: Finished OK
- *
- * On success:
- * Bit 0 set == escape performed on the data
- * Bit 1 set == buffer copy-out performed (kfree the data after IO)
- */
-
-int journal_write_metadata_buffer(transaction_t *transaction,
-				  struct journal_head  *jh_in,
-				  struct journal_head **jh_out,
-				  unsigned int blocknr)
-{
-	int need_copy_out = 0;
-	int done_copy_out = 0;
-	int do_escape = 0;
-	char *mapped_data;
-	struct buffer_head *new_bh;
-	struct journal_head *new_jh;
-	struct page *new_page;
-	unsigned int new_offset;
-	struct buffer_head *bh_in = jh2bh(jh_in);
-	journal_t *journal = transaction->t_journal;
-
-	/*
-	 * The buffer really shouldn't be locked: only the current committing
-	 * transaction is allowed to write it, so nobody else is allowed
-	 * to do any IO.
-	 *
-	 * akpm: except if we're journalling data, and write() output is
-	 * also part of a shared mapping, and another thread has
-	 * decided to launch a writepage() against this buffer.
-	 */
-	J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
-
-	new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
-	/* keep subsequent assertions sane */
-	atomic_set(&new_bh->b_count, 1);
-	new_jh = journal_add_journal_head(new_bh);	/* This sleeps */
-
-	/*
-	 * If a new transaction has already done a buffer copy-out, then
-	 * we use that version of the data for the commit.
-	 */
-	jbd_lock_bh_state(bh_in);
-repeat:
-	if (jh_in->b_frozen_data) {
-		done_copy_out = 1;
-		new_page = virt_to_page(jh_in->b_frozen_data);
-		new_offset = offset_in_page(jh_in->b_frozen_data);
-	} else {
-		new_page = jh2bh(jh_in)->b_page;
-		new_offset = offset_in_page(jh2bh(jh_in)->b_data);
-	}
-
-	mapped_data = kmap_atomic(new_page);
-	/*
-	 * Check for escaping
-	 */
-	if (*((__be32 *)(mapped_data + new_offset)) ==
-				cpu_to_be32(JFS_MAGIC_NUMBER)) {
-		need_copy_out = 1;
-		do_escape = 1;
-	}
-	kunmap_atomic(mapped_data);
-
-	/*
-	 * Do we need to do a data copy?
-	 */
-	if (need_copy_out && !done_copy_out) {
-		char *tmp;
-
-		jbd_unlock_bh_state(bh_in);
-		tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
-		jbd_lock_bh_state(bh_in);
-		if (jh_in->b_frozen_data) {
-			jbd_free(tmp, bh_in->b_size);
-			goto repeat;
-		}
-
-		jh_in->b_frozen_data = tmp;
-		mapped_data = kmap_atomic(new_page);
-		memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
-		kunmap_atomic(mapped_data);
-
-		new_page = virt_to_page(tmp);
-		new_offset = offset_in_page(tmp);
-		done_copy_out = 1;
-	}
-
-	/*
-	 * Did we need to do an escaping?  Now we've done all the
-	 * copying, we can finally do so.
-	 */
-	if (do_escape) {
-		mapped_data = kmap_atomic(new_page);
-		*((unsigned int *)(mapped_data + new_offset)) = 0;
-		kunmap_atomic(mapped_data);
-	}
-
-	set_bh_page(new_bh, new_page, new_offset);
-	new_jh->b_transaction = NULL;
-	new_bh->b_size = jh2bh(jh_in)->b_size;
-	new_bh->b_bdev = transaction->t_journal->j_dev;
-	new_bh->b_blocknr = blocknr;
-	set_buffer_mapped(new_bh);
-	set_buffer_dirty(new_bh);
-
-	*jh_out = new_jh;
-
-	/*
-	 * The to-be-written buffer needs to get moved to the io queue,
-	 * and the original buffer whose contents we are shadowing or
-	 * copying is moved to the transaction's shadow queue.
-	 */
-	JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
-	spin_lock(&journal->j_list_lock);
-	__journal_file_buffer(jh_in, transaction, BJ_Shadow);
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh_in);
-
-	JBUFFER_TRACE(new_jh, "file as BJ_IO");
-	journal_file_buffer(new_jh, transaction, BJ_IO);
-
-	return do_escape | (done_copy_out << 1);
-}
-
-/*
- * Allocation code for the journal file.  Manage the space left in the
- * journal, so that we can begin checkpointing when appropriate.
- */
-
-/*
- * __log_space_left: Return the number of free blocks left in the journal.
- *
- * Called with the journal already locked.
- *
- * Called under j_state_lock
- */
-
-int __log_space_left(journal_t *journal)
-{
-	int left = journal->j_free;
-
-	assert_spin_locked(&journal->j_state_lock);
-
-	/*
-	 * Be pessimistic here about the number of those free blocks which
-	 * might be required for log descriptor control blocks.
-	 */
-
-#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
-
-	left -= MIN_LOG_RESERVED_BLOCKS;
-
-	if (left <= 0)
-		return 0;
-	left -= (left >> 3);
-	return left;
-}
-
-/*
- * Called under j_state_lock.  Returns true if a transaction commit was started.
- */
-int __log_start_commit(journal_t *journal, tid_t target)
-{
-	/*
-	 * The only transaction we can possibly wait upon is the
-	 * currently running transaction (if it exists).  Otherwise,
-	 * the target tid must be an old one.
-	 */
-	if (journal->j_commit_request != target &&
-	    journal->j_running_transaction &&
-	    journal->j_running_transaction->t_tid == target) {
-		/*
-		 * We want a new commit: OK, mark the request and wakeup the
-		 * commit thread.  We do _not_ do the commit ourselves.
-		 */
-
-		journal->j_commit_request = target;
-		jbd_debug(1, "JBD: requesting commit %d/%d\n",
-			  journal->j_commit_request,
-			  journal->j_commit_sequence);
-		wake_up(&journal->j_wait_commit);
-		return 1;
-	} else if (!tid_geq(journal->j_commit_request, target))
-		/* This should never happen, but if it does, preserve
-		   the evidence before kjournald goes into a loop and
-		   increments j_commit_sequence beyond all recognition. */
-		WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
-		    journal->j_commit_request, journal->j_commit_sequence,
-		    target, journal->j_running_transaction ?
-		    journal->j_running_transaction->t_tid : 0);
-	return 0;
-}
-
-int log_start_commit(journal_t *journal, tid_t tid)
-{
-	int ret;
-
-	spin_lock(&journal->j_state_lock);
-	ret = __log_start_commit(journal, tid);
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-
-/*
- * Force and wait upon a commit if the calling process is not within
- * transaction.  This is used for forcing out undo-protected data which contains
- * bitmaps, when the fs is running out of space.
- *
- * We can only force the running transaction if we don't have an active handle;
- * otherwise, we will deadlock.
- *
- * Returns true if a transaction was started.
- */
-int journal_force_commit_nested(journal_t *journal)
-{
-	transaction_t *transaction = NULL;
-	tid_t tid;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_running_transaction && !current->journal_info) {
-		transaction = journal->j_running_transaction;
-		__log_start_commit(journal, transaction->t_tid);
-	} else if (journal->j_committing_transaction)
-		transaction = journal->j_committing_transaction;
-
-	if (!transaction) {
-		spin_unlock(&journal->j_state_lock);
-		return 0;	/* Nothing to retry */
-	}
-
-	tid = transaction->t_tid;
-	spin_unlock(&journal->j_state_lock);
-	log_wait_commit(journal, tid);
-	return 1;
-}
-
-/*
- * Start a commit of the current running transaction (if any).  Returns true
- * if a transaction is going to be committed (or is currently already
- * committing), and fills its tid in at *ptid
- */
-int journal_start_commit(journal_t *journal, tid_t *ptid)
-{
-	int ret = 0;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_running_transaction) {
-		tid_t tid = journal->j_running_transaction->t_tid;
-
-		__log_start_commit(journal, tid);
-		/* There's a running transaction and we've just made sure
-		 * it's commit has been scheduled. */
-		if (ptid)
-			*ptid = tid;
-		ret = 1;
-	} else if (journal->j_committing_transaction) {
-		/*
-		 * If commit has been started, then we have to wait for
-		 * completion of that transaction.
-		 */
-		if (ptid)
-			*ptid = journal->j_committing_transaction->t_tid;
-		ret = 1;
-	}
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-
-/*
- * Wait for a specified commit to complete.
- * The caller may not hold the journal lock.
- */
-int log_wait_commit(journal_t *journal, tid_t tid)
-{
-	int err = 0;
-
-#ifdef CONFIG_JBD_DEBUG
-	spin_lock(&journal->j_state_lock);
-	if (!tid_geq(journal->j_commit_request, tid)) {
-		printk(KERN_ERR
-		       "%s: error: j_commit_request=%d, tid=%d\n",
-		       __func__, journal->j_commit_request, tid);
-	}
-	spin_unlock(&journal->j_state_lock);
-#endif
-	spin_lock(&journal->j_state_lock);
-	/*
-	 * Not running or committing trans? Must be already committed. This
-	 * saves us from waiting for a *long* time when tid overflows.
-	 */
-	if (!((journal->j_running_transaction &&
-	       journal->j_running_transaction->t_tid == tid) ||
-	      (journal->j_committing_transaction &&
-	       journal->j_committing_transaction->t_tid == tid)))
-		goto out_unlock;
-
-	if (!tid_geq(journal->j_commit_waited, tid))
-		journal->j_commit_waited = tid;
-	while (tid_gt(tid, journal->j_commit_sequence)) {
-		jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
-				  tid, journal->j_commit_sequence);
-		wake_up(&journal->j_wait_commit);
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_done_commit,
-				!tid_gt(tid, journal->j_commit_sequence));
-		spin_lock(&journal->j_state_lock);
-	}
-out_unlock:
-	spin_unlock(&journal->j_state_lock);
-
-	if (unlikely(is_journal_aborted(journal)))
-		err = -EIO;
-	return err;
-}
-
-/*
- * Return 1 if a given transaction has not yet sent barrier request
- * connected with a transaction commit. If 0 is returned, transaction
- * may or may not have sent the barrier. Used to avoid sending barrier
- * twice in common cases.
- */
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
-{
-	int ret = 0;
-	transaction_t *commit_trans;
-
-	if (!(journal->j_flags & JFS_BARRIER))
-		return 0;
-	spin_lock(&journal->j_state_lock);
-	/* Transaction already committed? */
-	if (tid_geq(journal->j_commit_sequence, tid))
-		goto out;
-	/*
-	 * Transaction is being committed and we already proceeded to
-	 * writing commit record?
-	 */
-	commit_trans = journal->j_committing_transaction;
-	if (commit_trans && commit_trans->t_tid == tid &&
-	    commit_trans->t_state >= T_COMMIT_RECORD)
-		goto out;
-	ret = 1;
-out:
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
-
-/*
- * Log buffer allocation routines:
- */
-
-int journal_next_log_block(journal_t *journal, unsigned int *retp)
-{
-	unsigned int blocknr;
-
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(journal->j_free > 1);
-
-	blocknr = journal->j_head;
-	journal->j_head++;
-	journal->j_free--;
-	if (journal->j_head == journal->j_last)
-		journal->j_head = journal->j_first;
-	spin_unlock(&journal->j_state_lock);
-	return journal_bmap(journal, blocknr, retp);
-}
-
-/*
- * Conversion of logical to physical block numbers for the journal
- *
- * On external journals the journal blocks are identity-mapped, so
- * this is a no-op.  If needed, we can use j_blk_offset - everything is
- * ready.
- */
-int journal_bmap(journal_t *journal, unsigned int blocknr,
-		 unsigned int *retp)
-{
-	int err = 0;
-	unsigned int ret;
-
-	if (journal->j_inode) {
-		ret = bmap(journal->j_inode, blocknr);
-		if (ret)
-			*retp = ret;
-		else {
-			char b[BDEVNAME_SIZE];
-
-			printk(KERN_ALERT "%s: journal block not found "
-					"at offset %u on %s\n",
-				__func__,
-				blocknr,
-				bdevname(journal->j_dev, b));
-			err = -EIO;
-			__journal_abort_soft(journal, err);
-		}
-	} else {
-		*retp = blocknr; /* +journal->j_blk_offset */
-	}
-	return err;
-}
-
-/*
- * We play buffer_head aliasing tricks to write data/metadata blocks to
- * the journal without copying their contents, but for journal
- * descriptor blocks we do need to generate bona fide buffers.
- *
- * After the caller of journal_get_descriptor_buffer() has finished modifying
- * the buffer's contents they really should run flush_dcache_page(bh->b_page).
- * But we don't bother doing that, so there will be coherency problems with
- * mmaps of blockdevs which hold live JBD-controlled filesystems.
- */
-struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
-{
-	struct buffer_head *bh;
-	unsigned int blocknr;
-	int err;
-
-	err = journal_next_log_block(journal, &blocknr);
-
-	if (err)
-		return NULL;
-
-	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh)
-		return NULL;
-	lock_buffer(bh);
-	memset(bh->b_data, 0, journal->j_blocksize);
-	set_buffer_uptodate(bh);
-	unlock_buffer(bh);
-	BUFFER_TRACE(bh, "return this buffer");
-	return journal_add_journal_head(bh);
-}
-
-/*
- * Management for journal control blocks: functions to create and
- * destroy journal_t structures, and to initialise and read existing
- * journal blocks from disk.  */
-
-/* First: create and setup a journal_t object in memory.  We initialise
- * very few fields yet: that has to wait until we have created the
- * journal structures from from scratch, or loaded them from disk. */
-
-static journal_t * journal_init_common (void)
-{
-	journal_t *journal;
-	int err;
-
-	journal = kzalloc(sizeof(*journal), GFP_KERNEL);
-	if (!journal)
-		goto fail;
-
-	init_waitqueue_head(&journal->j_wait_transaction_locked);
-	init_waitqueue_head(&journal->j_wait_logspace);
-	init_waitqueue_head(&journal->j_wait_done_commit);
-	init_waitqueue_head(&journal->j_wait_checkpoint);
-	init_waitqueue_head(&journal->j_wait_commit);
-	init_waitqueue_head(&journal->j_wait_updates);
-	mutex_init(&journal->j_checkpoint_mutex);
-	spin_lock_init(&journal->j_revoke_lock);
-	spin_lock_init(&journal->j_list_lock);
-	spin_lock_init(&journal->j_state_lock);
-
-	journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
-
-	/* The journal is marked for error until we succeed with recovery! */
-	journal->j_flags = JFS_ABORT;
-
-	/* Set up a default-sized revoke table for the new mount. */
-	err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
-	if (err) {
-		kfree(journal);
-		goto fail;
-	}
-	return journal;
-fail:
-	return NULL;
-}
-
-/* journal_init_dev and journal_init_inode:
- *
- * Create a journal structure assigned some fixed set of disk blocks to
- * the journal.  We don't actually touch those disk blocks yet, but we
- * need to set up all of the mapping information to tell the journaling
- * system where the journal blocks are.
- *
- */
-
-/**
- *  journal_t * journal_init_dev() - creates and initialises a journal structure
- *  @bdev: Block device on which to create the journal
- *  @fs_dev: Device which hold journalled filesystem for this journal.
- *  @start: Block nr Start of journal.
- *  @len:  Length of the journal in blocks.
- *  @blocksize: blocksize of journalling device
- *
- *  Returns: a newly created journal_t *
- *
- *  journal_init_dev creates a journal which maps a fixed contiguous
- *  range of blocks on an arbitrary block device.
- *
- */
-journal_t * journal_init_dev(struct block_device *bdev,
-			struct block_device *fs_dev,
-			int start, int len, int blocksize)
-{
-	journal_t *journal = journal_init_common();
-	struct buffer_head *bh;
-	int n;
-
-	if (!journal)
-		return NULL;
-
-	/* journal descriptor can store up to n blocks -bzzz */
-	journal->j_blocksize = blocksize;
-	n = journal->j_blocksize / sizeof(journal_block_tag_t);
-	journal->j_wbufsize = n;
-	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
-			__func__);
-		goto out_err;
-	}
-	journal->j_dev = bdev;
-	journal->j_fs_dev = fs_dev;
-	journal->j_blk_offset = start;
-	journal->j_maxlen = len;
-
-	bh = __getblk(journal->j_dev, start, journal->j_blocksize);
-	if (!bh) {
-		printk(KERN_ERR
-		       "%s: Cannot get buffer for journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-	journal->j_sb_buffer = bh;
-	journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
-	return journal;
-out_err:
-	kfree(journal->j_wbuf);
-	kfree(journal);
-	return NULL;
-}
-
-/**
- *  journal_t * journal_init_inode () - creates a journal which maps to a inode.
- *  @inode: An inode to create the journal in
- *
- * journal_init_inode creates a journal which maps an on-disk inode as
- * the journal.  The inode must exist already, must support bmap() and
- * must have all data blocks preallocated.
- */
-journal_t * journal_init_inode (struct inode *inode)
-{
-	struct buffer_head *bh;
-	journal_t *journal = journal_init_common();
-	int err;
-	int n;
-	unsigned int blocknr;
-
-	if (!journal)
-		return NULL;
-
-	journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
-	journal->j_inode = inode;
-	jbd_debug(1,
-		  "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
-		  journal, inode->i_sb->s_id, inode->i_ino,
-		  (long long) inode->i_size,
-		  inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
-
-	journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
-	journal->j_blocksize = inode->i_sb->s_blocksize;
-
-	/* journal descriptor can store up to n blocks -bzzz */
-	n = journal->j_blocksize / sizeof(journal_block_tag_t);
-	journal->j_wbufsize = n;
-	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
-			__func__);
-		goto out_err;
-	}
-
-	err = journal_bmap(journal, 0, &blocknr);
-	/* If that failed, give up */
-	if (err) {
-		printk(KERN_ERR "%s: Cannot locate journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-
-	bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh) {
-		printk(KERN_ERR
-		       "%s: Cannot get buffer for journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-	journal->j_sb_buffer = bh;
-	journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
-	return journal;
-out_err:
-	kfree(journal->j_wbuf);
-	kfree(journal);
-	return NULL;
-}
-
-/*
- * If the journal init or create aborts, we need to mark the journal
- * superblock as being NULL to prevent the journal destroy from writing
- * back a bogus superblock.
- */
-static void journal_fail_superblock (journal_t *journal)
-{
-	struct buffer_head *bh = journal->j_sb_buffer;
-	brelse(bh);
-	journal->j_sb_buffer = NULL;
-}
-
-/*
- * Given a journal_t structure, initialise the various fields for
- * startup of a new journaling session.  We use this both when creating
- * a journal, and after recovering an old journal to reset it for
- * subsequent use.
- */
-
-static int journal_reset(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-	unsigned int first, last;
-
-	first = be32_to_cpu(sb->s_first);
-	last = be32_to_cpu(sb->s_maxlen);
-	if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
-		printk(KERN_ERR "JBD: Journal too short (blocks %u-%u).\n",
-		       first, last);
-		journal_fail_superblock(journal);
-		return -EINVAL;
-	}
-
-	journal->j_first = first;
-	journal->j_last = last;
-
-	journal->j_head = first;
-	journal->j_tail = first;
-	journal->j_free = last - first;
-
-	journal->j_tail_sequence = journal->j_transaction_sequence;
-	journal->j_commit_sequence = journal->j_transaction_sequence - 1;
-	journal->j_commit_request = journal->j_commit_sequence;
-
-	journal->j_max_transaction_buffers = journal->j_maxlen / 4;
-
-	/*
-	 * As a special case, if the on-disk copy is already marked as needing
-	 * no recovery (s_start == 0), then we can safely defer the superblock
-	 * update until the next commit by setting JFS_FLUSHED.  This avoids
-	 * attempting a write to a potential-readonly device.
-	 */
-	if (sb->s_start == 0) {
-		jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
-			"(start %u, seq %d, errno %d)\n",
-			journal->j_tail, journal->j_tail_sequence,
-			journal->j_errno);
-		journal->j_flags |= JFS_FLUSHED;
-	} else {
-		/* Lock here to make assertions happy... */
-		mutex_lock(&journal->j_checkpoint_mutex);
-		/*
-		 * Update log tail information. We use WRITE_FUA since new
-		 * transaction will start reusing journal space and so we
-		 * must make sure information about current log tail is on
-		 * disk before that.
-		 */
-		journal_update_sb_log_tail(journal,
-					   journal->j_tail_sequence,
-					   journal->j_tail,
-					   WRITE_FUA);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-	return journal_start_thread(journal);
-}
-
-/**
- * int journal_create() - Initialise the new journal file
- * @journal: Journal to create. This structure must have been initialised
- *
- * Given a journal_t structure which tells us which disk blocks we can
- * use, create a new journal superblock and initialise all of the
- * journal fields from scratch.
- **/
-int journal_create(journal_t *journal)
-{
-	unsigned int blocknr;
-	struct buffer_head *bh;
-	journal_superblock_t *sb;
-	int i, err;
-
-	if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
-		printk (KERN_ERR "Journal length (%d blocks) too short.\n",
-			journal->j_maxlen);
-		journal_fail_superblock(journal);
-		return -EINVAL;
-	}
-
-	if (journal->j_inode == NULL) {
-		/*
-		 * We don't know what block to start at!
-		 */
-		printk(KERN_EMERG
-		       "%s: creation of journal on external device!\n",
-		       __func__);
-		BUG();
-	}
-
-	/* Zero out the entire journal on disk.  We cannot afford to
-	   have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
-	jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
-	for (i = 0; i < journal->j_maxlen; i++) {
-		err = journal_bmap(journal, i, &blocknr);
-		if (err)
-			return err;
-		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-		if (unlikely(!bh))
-			return -ENOMEM;
-		lock_buffer(bh);
-		memset (bh->b_data, 0, journal->j_blocksize);
-		BUFFER_TRACE(bh, "marking dirty");
-		mark_buffer_dirty(bh);
-		BUFFER_TRACE(bh, "marking uptodate");
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-		__brelse(bh);
-	}
-
-	sync_blockdev(journal->j_dev);
-	jbd_debug(1, "JBD: journal cleared.\n");
-
-	/* OK, fill in the initial static fields in the new superblock */
-	sb = journal->j_superblock;
-
-	sb->s_header.h_magic	 = cpu_to_be32(JFS_MAGIC_NUMBER);
-	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
-
-	sb->s_blocksize	= cpu_to_be32(journal->j_blocksize);
-	sb->s_maxlen	= cpu_to_be32(journal->j_maxlen);
-	sb->s_first	= cpu_to_be32(1);
-
-	journal->j_transaction_sequence = 1;
-
-	journal->j_flags &= ~JFS_ABORT;
-	journal->j_format_version = 2;
-
-	return journal_reset(journal);
-}
-
-static void journal_write_superblock(journal_t *journal, int write_op)
-{
-	struct buffer_head *bh = journal->j_sb_buffer;
-	int ret;
-
-	trace_journal_write_superblock(journal, write_op);
-	if (!(journal->j_flags & JFS_BARRIER))
-		write_op &= ~(REQ_FUA | REQ_FLUSH);
-	lock_buffer(bh);
-	if (buffer_write_io_error(bh)) {
-		char b[BDEVNAME_SIZE];
-		/*
-		 * Oh, dear.  A previous attempt to write the journal
-		 * superblock failed.  This could happen because the
-		 * USB device was yanked out.  Or it could happen to
-		 * be a transient write error and maybe the block will
-		 * be remapped.  Nothing we can do but to retry the
-		 * write and hope for the best.
-		 */
-		printk(KERN_ERR "JBD: previous I/O error detected "
-		       "for journal superblock update for %s.\n",
-		       journal_dev_name(journal, b));
-		clear_buffer_write_io_error(bh);
-		set_buffer_uptodate(bh);
-	}
-
-	get_bh(bh);
-	bh->b_end_io = end_buffer_write_sync;
-	ret = submit_bh(write_op, bh);
-	wait_on_buffer(bh);
-	if (buffer_write_io_error(bh)) {
-		clear_buffer_write_io_error(bh);
-		set_buffer_uptodate(bh);
-		ret = -EIO;
-	}
-	if (ret) {
-		char b[BDEVNAME_SIZE];
-		printk(KERN_ERR "JBD: Error %d detected "
-		       "when updating journal superblock for %s.\n",
-		       ret, journal_dev_name(journal, b));
-	}
-}
-
-/**
- * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
- * @journal: The journal to update.
- * @tail_tid: TID of the new transaction at the tail of the log
- * @tail_block: The first block of the transaction at the tail of the log
- * @write_op: With which operation should we write the journal sb
- *
- * Update a journal's superblock information about log tail and write it to
- * disk, waiting for the IO to complete.
- */
-void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
-				unsigned int tail_block, int write_op)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
-	jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
-		  tail_block, tail_tid);
-
-	sb->s_sequence = cpu_to_be32(tail_tid);
-	sb->s_start    = cpu_to_be32(tail_block);
-
-	journal_write_superblock(journal, write_op);
-
-	/* Log is no longer empty */
-	spin_lock(&journal->j_state_lock);
-	WARN_ON(!sb->s_sequence);
-	journal->j_flags &= ~JFS_FLUSHED;
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * mark_journal_empty() - Mark on disk journal as empty.
- * @journal: The journal to update.
- *
- * Update a journal's dynamic superblock fields to show that journal is empty.
- * Write updated superblock to disk waiting for IO to complete.
- */
-static void mark_journal_empty(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
-	spin_lock(&journal->j_state_lock);
-	/* Is it already empty? */
-	if (sb->s_start == 0) {
-		spin_unlock(&journal->j_state_lock);
-		return;
-	}
-	jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
-        	  journal->j_tail_sequence);
-
-	sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
-	sb->s_start    = cpu_to_be32(0);
-	spin_unlock(&journal->j_state_lock);
-
-	journal_write_superblock(journal, WRITE_FUA);
-
-	spin_lock(&journal->j_state_lock);
-	/* Log is empty */
-	journal->j_flags |= JFS_FLUSHED;
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * journal_update_sb_errno() - Update error in the journal.
- * @journal: The journal to update.
- *
- * Update a journal's errno.  Write updated superblock to disk waiting for IO
- * to complete.
- */
-static void journal_update_sb_errno(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	spin_lock(&journal->j_state_lock);
-	jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
-        	  journal->j_errno);
-	sb->s_errno = cpu_to_be32(journal->j_errno);
-	spin_unlock(&journal->j_state_lock);
-
-	journal_write_superblock(journal, WRITE_SYNC);
-}
-
-/*
- * Read the superblock for a given journal, performing initial
- * validation of the format.
- */
-
-static int journal_get_superblock(journal_t *journal)
-{
-	struct buffer_head *bh;
-	journal_superblock_t *sb;
-	int err = -EIO;
-
-	bh = journal->j_sb_buffer;
-
-	J_ASSERT(bh != NULL);
-	if (!buffer_uptodate(bh)) {
-		ll_rw_block(READ, 1, &bh);
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			printk (KERN_ERR
-				"JBD: IO error reading journal superblock\n");
-			goto out;
-		}
-	}
-
-	sb = journal->j_superblock;
-
-	err = -EINVAL;
-
-	if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
-	    sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
-		printk(KERN_WARNING "JBD: no valid journal superblock found\n");
-		goto out;
-	}
-
-	switch(be32_to_cpu(sb->s_header.h_blocktype)) {
-	case JFS_SUPERBLOCK_V1:
-		journal->j_format_version = 1;
-		break;
-	case JFS_SUPERBLOCK_V2:
-		journal->j_format_version = 2;
-		break;
-	default:
-		printk(KERN_WARNING "JBD: unrecognised superblock format ID\n");
-		goto out;
-	}
-
-	if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
-		journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
-	else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
-		printk (KERN_WARNING "JBD: journal file too short\n");
-		goto out;
-	}
-
-	if (be32_to_cpu(sb->s_first) == 0 ||
-	    be32_to_cpu(sb->s_first) >= journal->j_maxlen) {
-		printk(KERN_WARNING
-			"JBD: Invalid start block of journal: %u\n",
-			be32_to_cpu(sb->s_first));
-		goto out;
-	}
-
-	return 0;
-
-out:
-	journal_fail_superblock(journal);
-	return err;
-}
-
-/*
- * Load the on-disk journal superblock and read the key fields into the
- * journal_t.
- */
-
-static int load_superblock(journal_t *journal)
-{
-	int err;
-	journal_superblock_t *sb;
-
-	err = journal_get_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-
-	journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
-	journal->j_tail = be32_to_cpu(sb->s_start);
-	journal->j_first = be32_to_cpu(sb->s_first);
-	journal->j_last = be32_to_cpu(sb->s_maxlen);
-	journal->j_errno = be32_to_cpu(sb->s_errno);
-
-	return 0;
-}
-
-
-/**
- * int journal_load() - Read journal from disk.
- * @journal: Journal to act on.
- *
- * Given a journal_t structure which tells us which disk blocks contain
- * a journal, read the journal from disk to initialise the in-memory
- * structures.
- */
-int journal_load(journal_t *journal)
-{
-	int err;
-	journal_superblock_t *sb;
-
-	err = load_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-	/* If this is a V2 superblock, then we have to check the
-	 * features flags on it. */
-
-	if (journal->j_format_version >= 2) {
-		if ((sb->s_feature_ro_compat &
-		     ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
-		    (sb->s_feature_incompat &
-		     ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
-			printk (KERN_WARNING
-				"JBD: Unrecognised features on journal\n");
-			return -EINVAL;
-		}
-	}
-
-	/* Let the recovery code check whether it needs to recover any
-	 * data from the journal. */
-	if (journal_recover(journal))
-		goto recovery_error;
-
-	/* OK, we've finished with the dynamic journal bits:
-	 * reinitialise the dynamic contents of the superblock in memory
-	 * and reset them on disk. */
-	if (journal_reset(journal))
-		goto recovery_error;
-
-	journal->j_flags &= ~JFS_ABORT;
-	journal->j_flags |= JFS_LOADED;
-	return 0;
-
-recovery_error:
-	printk (KERN_WARNING "JBD: recovery failed\n");
-	return -EIO;
-}
-
-/**
- * void journal_destroy() - Release a journal_t structure.
- * @journal: Journal to act on.
- *
- * Release a journal_t structure once it is no longer in use by the
- * journaled object.
- * Return <0 if we couldn't clean up the journal.
- */
-int journal_destroy(journal_t *journal)
-{
-	int err = 0;
-
-	
-	/* Wait for the commit thread to wake up and die. */
-	journal_kill_thread(journal);
-
-	/* Force a final log commit */
-	if (journal->j_running_transaction)
-		journal_commit_transaction(journal);
-
-	/* Force any old transactions to disk */
-
-	/* We cannot race with anybody but must keep assertions happy */
-	mutex_lock(&journal->j_checkpoint_mutex);
-	/* Totally anal locking here... */
-	spin_lock(&journal->j_list_lock);
-	while (journal->j_checkpoint_transactions != NULL) {
-		spin_unlock(&journal->j_list_lock);
-		log_do_checkpoint(journal);
-		spin_lock(&journal->j_list_lock);
-	}
-
-	J_ASSERT(journal->j_running_transaction == NULL);
-	J_ASSERT(journal->j_committing_transaction == NULL);
-	J_ASSERT(journal->j_checkpoint_transactions == NULL);
-	spin_unlock(&journal->j_list_lock);
-
-	if (journal->j_sb_buffer) {
-		if (!is_journal_aborted(journal)) {
-			journal->j_tail_sequence =
-				++journal->j_transaction_sequence;
-			mark_journal_empty(journal);
-		} else
-			err = -EIO;
-		brelse(journal->j_sb_buffer);
-	}
-	mutex_unlock(&journal->j_checkpoint_mutex);
-
-	iput(journal->j_inode);
-	if (journal->j_revoke)
-		journal_destroy_revoke(journal);
-	kfree(journal->j_wbuf);
-	kfree(journal);
-
-	return err;
-}
-
-
-/**
- *int journal_check_used_features () - Check if features specified are used.
- * @journal: Journal to check.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Check whether the journal uses all of a given set of
- * features.  Return true (non-zero) if it does.
- **/
-
-int journal_check_used_features (journal_t *journal, unsigned long compat,
-				 unsigned long ro, unsigned long incompat)
-{
-	journal_superblock_t *sb;
-
-	if (!compat && !ro && !incompat)
-		return 1;
-	if (journal->j_format_version == 1)
-		return 0;
-
-	sb = journal->j_superblock;
-
-	if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
-	    ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
-	    ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
-		return 1;
-
-	return 0;
-}
-
-/**
- * int journal_check_available_features() - Check feature set in journalling layer
- * @journal: Journal to check.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Check whether the journaling code supports the use of
- * all of a given set of features on this journal.  Return true
- * (non-zero) if it can. */
-
-int journal_check_available_features (journal_t *journal, unsigned long compat,
-				      unsigned long ro, unsigned long incompat)
-{
-	if (!compat && !ro && !incompat)
-		return 1;
-
-	/* We can support any known requested features iff the
-	 * superblock is in version 2.  Otherwise we fail to support any
-	 * extended sb features. */
-
-	if (journal->j_format_version != 2)
-		return 0;
-
-	if ((compat   & JFS_KNOWN_COMPAT_FEATURES) == compat &&
-	    (ro       & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
-	    (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
-		return 1;
-
-	return 0;
-}
-
-/**
- * int journal_set_features () - Mark a given journal feature in the superblock
- * @journal: Journal to act on.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Mark a given journal feature as present on the
- * superblock.  Returns true if the requested features could be set.
- *
- */
-
-int journal_set_features (journal_t *journal, unsigned long compat,
-			  unsigned long ro, unsigned long incompat)
-{
-	journal_superblock_t *sb;
-
-	if (journal_check_used_features(journal, compat, ro, incompat))
-		return 1;
-
-	if (!journal_check_available_features(journal, compat, ro, incompat))
-		return 0;
-
-	jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
-		  compat, ro, incompat);
-
-	sb = journal->j_superblock;
-
-	sb->s_feature_compat    |= cpu_to_be32(compat);
-	sb->s_feature_ro_compat |= cpu_to_be32(ro);
-	sb->s_feature_incompat  |= cpu_to_be32(incompat);
-
-	return 1;
-}
-
-
-/**
- * int journal_update_format () - Update on-disk journal structure.
- * @journal: Journal to act on.
- *
- * Given an initialised but unloaded journal struct, poke about in the
- * on-disk structure to update it to the most recent supported version.
- */
-int journal_update_format (journal_t *journal)
-{
-	journal_superblock_t *sb;
-	int err;
-
-	err = journal_get_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-
-	switch (be32_to_cpu(sb->s_header.h_blocktype)) {
-	case JFS_SUPERBLOCK_V2:
-		return 0;
-	case JFS_SUPERBLOCK_V1:
-		return journal_convert_superblock_v1(journal, sb);
-	default:
-		break;
-	}
-	return -EINVAL;
-}
-
-static int journal_convert_superblock_v1(journal_t *journal,
-					 journal_superblock_t *sb)
-{
-	int offset, blocksize;
-	struct buffer_head *bh;
-
-	printk(KERN_WARNING
-		"JBD: Converting superblock from version 1 to 2.\n");
-
-	/* Pre-initialise new fields to zero */
-	offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb);
-	blocksize = be32_to_cpu(sb->s_blocksize);
-	memset(&sb->s_feature_compat, 0, blocksize-offset);
-
-	sb->s_nr_users = cpu_to_be32(1);
-	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
-	journal->j_format_version = 2;
-
-	bh = journal->j_sb_buffer;
-	BUFFER_TRACE(bh, "marking dirty");
-	mark_buffer_dirty(bh);
-	sync_dirty_buffer(bh);
-	return 0;
-}
-
-
-/**
- * int journal_flush () - Flush journal
- * @journal: Journal to act on.
- *
- * Flush all data for a given journal to disk and empty the journal.
- * Filesystems can use this when remounting readonly to ensure that
- * recovery does not need to happen on remount.
- */
-
-int journal_flush(journal_t *journal)
-{
-	int err = 0;
-	transaction_t *transaction = NULL;
-
-	spin_lock(&journal->j_state_lock);
-
-	/* Force everything buffered to the log... */
-	if (journal->j_running_transaction) {
-		transaction = journal->j_running_transaction;
-		__log_start_commit(journal, transaction->t_tid);
-	} else if (journal->j_committing_transaction)
-		transaction = journal->j_committing_transaction;
-
-	/* Wait for the log commit to complete... */
-	if (transaction) {
-		tid_t tid = transaction->t_tid;
-
-		spin_unlock(&journal->j_state_lock);
-		log_wait_commit(journal, tid);
-	} else {
-		spin_unlock(&journal->j_state_lock);
-	}
-
-	/* ...and flush everything in the log out to disk. */
-	spin_lock(&journal->j_list_lock);
-	while (!err && journal->j_checkpoint_transactions != NULL) {
-		spin_unlock(&journal->j_list_lock);
-		mutex_lock(&journal->j_checkpoint_mutex);
-		err = log_do_checkpoint(journal);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-		spin_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	if (is_journal_aborted(journal))
-		return -EIO;
-
-	mutex_lock(&journal->j_checkpoint_mutex);
-	cleanup_journal_tail(journal);
-
-	/* Finally, mark the journal as really needing no recovery.
-	 * This sets s_start==0 in the underlying superblock, which is
-	 * the magic code for a fully-recovered superblock.  Any future
-	 * commits of data to the journal will restore the current
-	 * s_start value. */
-	mark_journal_empty(journal);
-	mutex_unlock(&journal->j_checkpoint_mutex);
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(!journal->j_running_transaction);
-	J_ASSERT(!journal->j_committing_transaction);
-	J_ASSERT(!journal->j_checkpoint_transactions);
-	J_ASSERT(journal->j_head == journal->j_tail);
-	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
-	spin_unlock(&journal->j_state_lock);
-	return 0;
-}
-
-/**
- * int journal_wipe() - Wipe journal contents
- * @journal: Journal to act on.
- * @write: flag (see below)
- *
- * Wipe out all of the contents of a journal, safely.  This will produce
- * a warning if the journal contains any valid recovery information.
- * Must be called between journal_init_*() and journal_load().
- *
- * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
- * we merely suppress recovery.
- */
-
-int journal_wipe(journal_t *journal, int write)
-{
-	int err = 0;
-
-	J_ASSERT (!(journal->j_flags & JFS_LOADED));
-
-	err = load_superblock(journal);
-	if (err)
-		return err;
-
-	if (!journal->j_tail)
-		goto no_recovery;
-
-	printk (KERN_WARNING "JBD: %s recovery information on journal\n",
-		write ? "Clearing" : "Ignoring");
-
-	err = journal_skip_recovery(journal);
-	if (write) {
-		/* Lock to make assertions happy... */
-		mutex_lock(&journal->j_checkpoint_mutex);
-		mark_journal_empty(journal);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-
- no_recovery:
-	return err;
-}
-
-/*
- * journal_dev_name: format a character string to describe on what
- * device this journal is present.
- */
-
-static const char *journal_dev_name(journal_t *journal, char *buffer)
-{
-	struct block_device *bdev;
-
-	if (journal->j_inode)
-		bdev = journal->j_inode->i_sb->s_bdev;
-	else
-		bdev = journal->j_dev;
-
-	return bdevname(bdev, buffer);
-}
-
-/*
- * Journal abort has very specific semantics, which we describe
- * for journal abort.
- *
- * Two internal function, which provide abort to te jbd layer
- * itself are here.
- */
-
-/*
- * Quick version for internal journal use (doesn't lock the journal).
- * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
- * and don't attempt to make any other journal updates.
- */
-static void __journal_abort_hard(journal_t *journal)
-{
-	transaction_t *transaction;
-	char b[BDEVNAME_SIZE];
-
-	if (journal->j_flags & JFS_ABORT)
-		return;
-
-	printk(KERN_ERR "Aborting journal on device %s.\n",
-		journal_dev_name(journal, b));
-
-	spin_lock(&journal->j_state_lock);
-	journal->j_flags |= JFS_ABORT;
-	transaction = journal->j_running_transaction;
-	if (transaction)
-		__log_start_commit(journal, transaction->t_tid);
-	spin_unlock(&journal->j_state_lock);
-}
-
-/* Soft abort: record the abort error status in the journal superblock,
- * but don't do any other IO. */
-static void __journal_abort_soft (journal_t *journal, int errno)
-{
-	if (journal->j_flags & JFS_ABORT)
-		return;
-
-	if (!journal->j_errno)
-		journal->j_errno = errno;
-
-	__journal_abort_hard(journal);
-
-	if (errno)
-		journal_update_sb_errno(journal);
-}
-
-/**
- * void journal_abort () - Shutdown the journal immediately.
- * @journal: the journal to shutdown.
- * @errno:   an error number to record in the journal indicating
- *           the reason for the shutdown.
- *
- * Perform a complete, immediate shutdown of the ENTIRE
- * journal (not of a single transaction).  This operation cannot be
- * undone without closing and reopening the journal.
- *
- * The journal_abort function is intended to support higher level error
- * recovery mechanisms such as the ext2/ext3 remount-readonly error
- * mode.
- *
- * Journal abort has very specific semantics.  Any existing dirty,
- * unjournaled buffers in the main filesystem will still be written to
- * disk by bdflush, but the journaling mechanism will be suspended
- * immediately and no further transaction commits will be honoured.
- *
- * Any dirty, journaled buffers will be written back to disk without
- * hitting the journal.  Atomicity cannot be guaranteed on an aborted
- * filesystem, but we _do_ attempt to leave as much data as possible
- * behind for fsck to use for cleanup.
- *
- * Any attempt to get a new transaction handle on a journal which is in
- * ABORT state will just result in an -EROFS error return.  A
- * journal_stop on an existing handle will return -EIO if we have
- * entered abort state during the update.
- *
- * Recursive transactions are not disturbed by journal abort until the
- * final journal_stop, which will receive the -EIO error.
- *
- * Finally, the journal_abort call allows the caller to supply an errno
- * which will be recorded (if possible) in the journal superblock.  This
- * allows a client to record failure conditions in the middle of a
- * transaction without having to complete the transaction to record the
- * failure to disk.  ext3_error, for example, now uses this
- * functionality.
- *
- * Errors which originate from within the journaling layer will NOT
- * supply an errno; a null errno implies that absolutely no further
- * writes are done to the journal (unless there are any already in
- * progress).
- *
- */
-
-void journal_abort(journal_t *journal, int errno)
-{
-	__journal_abort_soft(journal, errno);
-}
-
-/**
- * int journal_errno () - returns the journal's error state.
- * @journal: journal to examine.
- *
- * This is the errno numbet set with journal_abort(), the last
- * time the journal was mounted - if the journal was stopped
- * without calling abort this will be 0.
- *
- * If the journal has been aborted on this mount time -EROFS will
- * be returned.
- */
-int journal_errno(journal_t *journal)
-{
-	int err;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_flags & JFS_ABORT)
-		err = -EROFS;
-	else
-		err = journal->j_errno;
-	spin_unlock(&journal->j_state_lock);
-	return err;
-}
-
-/**
- * int journal_clear_err () - clears the journal's error state
- * @journal: journal to act on.
- *
- * An error must be cleared or Acked to take a FS out of readonly
- * mode.
- */
-int journal_clear_err(journal_t *journal)
-{
-	int err = 0;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_flags & JFS_ABORT)
-		err = -EROFS;
-	else
-		journal->j_errno = 0;
-	spin_unlock(&journal->j_state_lock);
-	return err;
-}
-
-/**
- * void journal_ack_err() - Ack journal err.
- * @journal: journal to act on.
- *
- * An error must be cleared or Acked to take a FS out of readonly
- * mode.
- */
-void journal_ack_err(journal_t *journal)
-{
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_errno)
-		journal->j_flags |= JFS_ACK_ERR;
-	spin_unlock(&journal->j_state_lock);
-}
-
-int journal_blocks_per_page(struct inode *inode)
-{
-	return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-}
-
-/*
- * Journal_head storage management
- */
-static struct kmem_cache *journal_head_cache;
-#ifdef CONFIG_JBD_DEBUG
-static atomic_t nr_journal_heads = ATOMIC_INIT(0);
-#endif
-
-static int journal_init_journal_head_cache(void)
-{
-	int retval;
-
-	J_ASSERT(journal_head_cache == NULL);
-	journal_head_cache = kmem_cache_create("journal_head",
-				sizeof(struct journal_head),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
-	retval = 0;
-	if (!journal_head_cache) {
-		retval = -ENOMEM;
-		printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
-	}
-	return retval;
-}
-
-static void journal_destroy_journal_head_cache(void)
-{
-	if (journal_head_cache) {
-		kmem_cache_destroy(journal_head_cache);
-		journal_head_cache = NULL;
-	}
-}
-
-/*
- * journal_head splicing and dicing
- */
-static struct journal_head *journal_alloc_journal_head(void)
-{
-	struct journal_head *ret;
-
-#ifdef CONFIG_JBD_DEBUG
-	atomic_inc(&nr_journal_heads);
-#endif
-	ret = kmem_cache_zalloc(journal_head_cache, GFP_NOFS);
-	if (ret == NULL) {
-		jbd_debug(1, "out of memory for journal_head\n");
-		printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-				   __func__);
-
-		while (ret == NULL) {
-			yield();
-			ret = kmem_cache_zalloc(journal_head_cache, GFP_NOFS);
-		}
-	}
-	return ret;
-}
-
-static void journal_free_journal_head(struct journal_head *jh)
-{
-#ifdef CONFIG_JBD_DEBUG
-	atomic_dec(&nr_journal_heads);
-	memset(jh, JBD_POISON_FREE, sizeof(*jh));
-#endif
-	kmem_cache_free(journal_head_cache, jh);
-}
-
-/*
- * A journal_head is attached to a buffer_head whenever JBD has an
- * interest in the buffer.
- *
- * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
- * is set.  This bit is tested in core kernel code where we need to take
- * JBD-specific actions.  Testing the zeroness of ->b_private is not reliable
- * there.
- *
- * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
- *
- * When a buffer has its BH_JBD bit set it is immune from being released by
- * core kernel code, mainly via ->b_count.
- *
- * A journal_head is detached from its buffer_head when the journal_head's
- * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
- * transaction (b_cp_transaction) hold their references to b_jcount.
- *
- * Various places in the kernel want to attach a journal_head to a buffer_head
- * _before_ attaching the journal_head to a transaction.  To protect the
- * journal_head in this situation, journal_add_journal_head elevates the
- * journal_head's b_jcount refcount by one.  The caller must call
- * journal_put_journal_head() to undo this.
- *
- * So the typical usage would be:
- *
- *	(Attach a journal_head if needed.  Increments b_jcount)
- *	struct journal_head *jh = journal_add_journal_head(bh);
- *	...
- *      (Get another reference for transaction)
- *      journal_grab_journal_head(bh);
- *      jh->b_transaction = xxx;
- *      (Put original reference)
- *      journal_put_journal_head(jh);
- */
-
-/*
- * Give a buffer_head a journal_head.
- *
- * May sleep.
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh;
-	struct journal_head *new_jh = NULL;
-
-repeat:
-	if (!buffer_jbd(bh))
-		new_jh = journal_alloc_journal_head();
-
-	jbd_lock_bh_journal_head(bh);
-	if (buffer_jbd(bh)) {
-		jh = bh2jh(bh);
-	} else {
-		J_ASSERT_BH(bh,
-			(atomic_read(&bh->b_count) > 0) ||
-			(bh->b_page && bh->b_page->mapping));
-
-		if (!new_jh) {
-			jbd_unlock_bh_journal_head(bh);
-			goto repeat;
-		}
-
-		jh = new_jh;
-		new_jh = NULL;		/* We consumed it */
-		set_buffer_jbd(bh);
-		bh->b_private = jh;
-		jh->b_bh = bh;
-		get_bh(bh);
-		BUFFER_TRACE(bh, "added journal_head");
-	}
-	jh->b_jcount++;
-	jbd_unlock_bh_journal_head(bh);
-	if (new_jh)
-		journal_free_journal_head(new_jh);
-	return bh->b_private;
-}
-
-/*
- * Grab a ref against this buffer_head's journal_head.  If it ended up not
- * having a journal_head, return NULL
- */
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh = NULL;
-
-	jbd_lock_bh_journal_head(bh);
-	if (buffer_jbd(bh)) {
-		jh = bh2jh(bh);
-		jh->b_jcount++;
-	}
-	jbd_unlock_bh_journal_head(bh);
-	return jh;
-}
-
-static void __journal_remove_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh = bh2jh(bh);
-
-	J_ASSERT_JH(jh, jh->b_jcount >= 0);
-	J_ASSERT_JH(jh, jh->b_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
-	J_ASSERT_BH(bh, buffer_jbd(bh));
-	J_ASSERT_BH(bh, jh2bh(jh) == bh);
-	BUFFER_TRACE(bh, "remove journal_head");
-	if (jh->b_frozen_data) {
-		printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
-		jbd_free(jh->b_frozen_data, bh->b_size);
-	}
-	if (jh->b_committed_data) {
-		printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
-		jbd_free(jh->b_committed_data, bh->b_size);
-	}
-	bh->b_private = NULL;
-	jh->b_bh = NULL;	/* debug, really */
-	clear_buffer_jbd(bh);
-	journal_free_journal_head(jh);
-}
-
-/*
- * Drop a reference on the passed journal_head.  If it fell to zero then
- * release the journal_head from the buffer_head.
- */
-void journal_put_journal_head(struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	jbd_lock_bh_journal_head(bh);
-	J_ASSERT_JH(jh, jh->b_jcount > 0);
-	--jh->b_jcount;
-	if (!jh->b_jcount) {
-		__journal_remove_journal_head(bh);
-		jbd_unlock_bh_journal_head(bh);
-		__brelse(bh);
-	} else
-		jbd_unlock_bh_journal_head(bh);
-}
-
-/*
- * debugfs tunables
- */
-#ifdef CONFIG_JBD_DEBUG
-
-u8 journal_enable_debug __read_mostly;
-EXPORT_SYMBOL(journal_enable_debug);
-
-static struct dentry *jbd_debugfs_dir;
-static struct dentry *jbd_debug;
-
-static void __init jbd_create_debugfs_entry(void)
-{
-	jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
-	if (jbd_debugfs_dir)
-		jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR,
-					       jbd_debugfs_dir,
-					       &journal_enable_debug);
-}
-
-static void __exit jbd_remove_debugfs_entry(void)
-{
-	debugfs_remove(jbd_debug);
-	debugfs_remove(jbd_debugfs_dir);
-}
-
-#else
-
-static inline void jbd_create_debugfs_entry(void)
-{
-}
-
-static inline void jbd_remove_debugfs_entry(void)
-{
-}
-
-#endif
-
-struct kmem_cache *jbd_handle_cache;
-
-static int __init journal_init_handle_cache(void)
-{
-	jbd_handle_cache = kmem_cache_create("journal_handle",
-				sizeof(handle_t),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
-	if (jbd_handle_cache == NULL) {
-		printk(KERN_EMERG "JBD: failed to create handle cache\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static void journal_destroy_handle_cache(void)
-{
-	if (jbd_handle_cache)
-		kmem_cache_destroy(jbd_handle_cache);
-}
-
-/*
- * Module startup and shutdown
- */
-
-static int __init journal_init_caches(void)
-{
-	int ret;
-
-	ret = journal_init_revoke_caches();
-	if (ret == 0)
-		ret = journal_init_journal_head_cache();
-	if (ret == 0)
-		ret = journal_init_handle_cache();
-	return ret;
-}
-
-static void journal_destroy_caches(void)
-{
-	journal_destroy_revoke_caches();
-	journal_destroy_journal_head_cache();
-	journal_destroy_handle_cache();
-}
-
-static int __init journal_init(void)
-{
-	int ret;
-
-	BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
-
-	ret = journal_init_caches();
-	if (ret != 0)
-		journal_destroy_caches();
-	jbd_create_debugfs_entry();
-	return ret;
-}
-
-static void __exit journal_exit(void)
-{
-#ifdef CONFIG_JBD_DEBUG
-	int n = atomic_read(&nr_journal_heads);
-	if (n)
-		printk(KERN_ERR "JBD: leaked %d journal_heads!\n", n);
-#endif
-	jbd_remove_debugfs_entry();
-	journal_destroy_caches();
-}
-
-MODULE_LICENSE("GPL");
-module_init(journal_init);
-module_exit(journal_exit);
-
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
deleted file mode 100644
index a748fe2..0000000
--- a/fs/jbd/recovery.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * linux/fs/jbd/recovery.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal recovery routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- */
-
-#ifndef __KERNEL__
-#include "jfs_user.h"
-#else
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/blkdev.h>
-#endif
-
-/*
- * Maintain information about the progress of the recovery job, so that
- * the different passes can carry information between them.
- */
-struct recovery_info
-{
-	tid_t		start_transaction;
-	tid_t		end_transaction;
-
-	int		nr_replays;
-	int		nr_revokes;
-	int		nr_revoke_hits;
-};
-
-enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
-static int do_one_pass(journal_t *journal,
-				struct recovery_info *info, enum passtype pass);
-static int scan_revoke_records(journal_t *, struct buffer_head *,
-				tid_t, struct recovery_info *);
-
-#ifdef __KERNEL__
-
-/* Release readahead buffers after use */
-static void journal_brelse_array(struct buffer_head *b[], int n)
-{
-	while (--n >= 0)
-		brelse (b[n]);
-}
-
-
-/*
- * When reading from the journal, we are going through the block device
- * layer directly and so there is no readahead being done for us.  We
- * need to implement any readahead ourselves if we want it to happen at
- * all.  Recovery is basically one long sequential read, so make sure we
- * do the IO in reasonably large chunks.
- *
- * This is not so critical that we need to be enormously clever about
- * the readahead size, though.  128K is a purely arbitrary, good-enough
- * fixed value.
- */
-
-#define MAXBUF 8
-static int do_readahead(journal_t *journal, unsigned int start)
-{
-	int err;
-	unsigned int max, nbufs, next;
-	unsigned int blocknr;
-	struct buffer_head *bh;
-
-	struct buffer_head * bufs[MAXBUF];
-
-	/* Do up to 128K of readahead */
-	max = start + (128 * 1024 / journal->j_blocksize);
-	if (max > journal->j_maxlen)
-		max = journal->j_maxlen;
-
-	/* Do the readahead itself.  We'll submit MAXBUF buffer_heads at
-	 * a time to the block device IO layer. */
-
-	nbufs = 0;
-
-	for (next = start; next < max; next++) {
-		err = journal_bmap(journal, next, &blocknr);
-
-		if (err) {
-			printk (KERN_ERR "JBD: bad block at offset %u\n",
-				next);
-			goto failed;
-		}
-
-		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-		if (!bh) {
-			err = -ENOMEM;
-			goto failed;
-		}
-
-		if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
-			bufs[nbufs++] = bh;
-			if (nbufs == MAXBUF) {
-				ll_rw_block(READ, nbufs, bufs);
-				journal_brelse_array(bufs, nbufs);
-				nbufs = 0;
-			}
-		} else
-			brelse(bh);
-	}
-
-	if (nbufs)
-		ll_rw_block(READ, nbufs, bufs);
-	err = 0;
-
-failed:
-	if (nbufs)
-		journal_brelse_array(bufs, nbufs);
-	return err;
-}
-
-#endif /* __KERNEL__ */
-
-
-/*
- * Read a block from the journal
- */
-
-static int jread(struct buffer_head **bhp, journal_t *journal,
-		 unsigned int offset)
-{
-	int err;
-	unsigned int blocknr;
-	struct buffer_head *bh;
-
-	*bhp = NULL;
-
-	if (offset >= journal->j_maxlen) {
-		printk(KERN_ERR "JBD: corrupted journal superblock\n");
-		return -EIO;
-	}
-
-	err = journal_bmap(journal, offset, &blocknr);
-
-	if (err) {
-		printk (KERN_ERR "JBD: bad block at offset %u\n",
-			offset);
-		return err;
-	}
-
-	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh)
-		return -ENOMEM;
-
-	if (!buffer_uptodate(bh)) {
-		/* If this is a brand new buffer, start readahead.
-                   Otherwise, we assume we are already reading it.  */
-		if (!buffer_req(bh))
-			do_readahead(journal, offset);
-		wait_on_buffer(bh);
-	}
-
-	if (!buffer_uptodate(bh)) {
-		printk (KERN_ERR "JBD: Failed to read block at offset %u\n",
-			offset);
-		brelse(bh);
-		return -EIO;
-	}
-
-	*bhp = bh;
-	return 0;
-}
-
-
-/*
- * Count the number of in-use tags in a journal descriptor block.
- */
-
-static int count_tags(struct buffer_head *bh, int size)
-{
-	char *			tagp;
-	journal_block_tag_t *	tag;
-	int			nr = 0;
-
-	tagp = &bh->b_data[sizeof(journal_header_t)];
-
-	while ((tagp - bh->b_data + sizeof(journal_block_tag_t)) <= size) {
-		tag = (journal_block_tag_t *) tagp;
-
-		nr++;
-		tagp += sizeof(journal_block_tag_t);
-		if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
-			tagp += 16;
-
-		if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
-			break;
-	}
-
-	return nr;
-}
-
-
-/* Make sure we wrap around the log correctly! */
-#define wrap(journal, var)						\
-do {									\
-	if (var >= (journal)->j_last)					\
-		var -= ((journal)->j_last - (journal)->j_first);	\
-} while (0)
-
-/**
- * journal_recover - recovers a on-disk journal
- * @journal: the journal to recover
- *
- * The primary function for recovering the log contents when mounting a
- * journaled device.
- *
- * Recovery is done in three passes.  In the first pass, we look for the
- * end of the log.  In the second, we assemble the list of revoke
- * blocks.  In the third and final pass, we replay any un-revoked blocks
- * in the log.
- */
-int journal_recover(journal_t *journal)
-{
-	int			err, err2;
-	journal_superblock_t *	sb;
-
-	struct recovery_info	info;
-
-	memset(&info, 0, sizeof(info));
-	sb = journal->j_superblock;
-
-	/*
-	 * The journal superblock's s_start field (the current log head)
-	 * is always zero if, and only if, the journal was cleanly
-	 * unmounted.
-	 */
-
-	if (!sb->s_start) {
-		jbd_debug(1, "No recovery required, last transaction %d\n",
-			  be32_to_cpu(sb->s_sequence));
-		journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
-		return 0;
-	}
-
-	err = do_one_pass(journal, &info, PASS_SCAN);
-	if (!err)
-		err = do_one_pass(journal, &info, PASS_REVOKE);
-	if (!err)
-		err = do_one_pass(journal, &info, PASS_REPLAY);
-
-	jbd_debug(1, "JBD: recovery, exit status %d, "
-		  "recovered transactions %u to %u\n",
-		  err, info.start_transaction, info.end_transaction);
-	jbd_debug(1, "JBD: Replayed %d and revoked %d/%d blocks\n",
-		  info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
-
-	/* Restart the log at the next transaction ID, thus invalidating
-	 * any existing commit records in the log. */
-	journal->j_transaction_sequence = ++info.end_transaction;
-
-	journal_clear_revoke(journal);
-	err2 = sync_blockdev(journal->j_fs_dev);
-	if (!err)
-		err = err2;
-	/* Flush disk caches to get replayed data on the permanent storage */
-	if (journal->j_flags & JFS_BARRIER) {
-		err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
-		if (!err)
-			err = err2;
-	}
-
-	return err;
-}
-
-/**
- * journal_skip_recovery - Start journal and wipe exiting records
- * @journal: journal to startup
- *
- * Locate any valid recovery information from the journal and set up the
- * journal structures in memory to ignore it (presumably because the
- * caller has evidence that it is out of date).
- * This function does'nt appear to be exorted..
- *
- * We perform one pass over the journal to allow us to tell the user how
- * much recovery information is being erased, and to let us initialise
- * the journal transaction sequence numbers to the next unused ID.
- */
-int journal_skip_recovery(journal_t *journal)
-{
-	int			err;
-	struct recovery_info	info;
-
-	memset (&info, 0, sizeof(info));
-
-	err = do_one_pass(journal, &info, PASS_SCAN);
-
-	if (err) {
-		printk(KERN_ERR "JBD: error %d scanning journal\n", err);
-		++journal->j_transaction_sequence;
-	} else {
-#ifdef CONFIG_JBD_DEBUG
-		int dropped = info.end_transaction -
-			      be32_to_cpu(journal->j_superblock->s_sequence);
-		jbd_debug(1,
-			  "JBD: ignoring %d transaction%s from the journal.\n",
-			  dropped, (dropped == 1) ? "" : "s");
-#endif
-		journal->j_transaction_sequence = ++info.end_transaction;
-	}
-
-	journal->j_tail = 0;
-	return err;
-}
-
-static int do_one_pass(journal_t *journal,
-			struct recovery_info *info, enum passtype pass)
-{
-	unsigned int		first_commit_ID, next_commit_ID;
-	unsigned int		next_log_block;
-	int			err, success = 0;
-	journal_superblock_t *	sb;
-	journal_header_t *	tmp;
-	struct buffer_head *	bh;
-	unsigned int		sequence;
-	int			blocktype;
-
-	/*
-	 * First thing is to establish what we expect to find in the log
-	 * (in terms of transaction IDs), and where (in terms of log
-	 * block offsets): query the superblock.
-	 */
-
-	sb = journal->j_superblock;
-	next_commit_ID = be32_to_cpu(sb->s_sequence);
-	next_log_block = be32_to_cpu(sb->s_start);
-
-	first_commit_ID = next_commit_ID;
-	if (pass == PASS_SCAN)
-		info->start_transaction = first_commit_ID;
-
-	jbd_debug(1, "Starting recovery pass %d\n", pass);
-
-	/*
-	 * Now we walk through the log, transaction by transaction,
-	 * making sure that each transaction has a commit block in the
-	 * expected place.  Each complete transaction gets replayed back
-	 * into the main filesystem.
-	 */
-
-	while (1) {
-		int			flags;
-		char *			tagp;
-		journal_block_tag_t *	tag;
-		struct buffer_head *	obh;
-		struct buffer_head *	nbh;
-
-		cond_resched();
-
-		/* If we already know where to stop the log traversal,
-		 * check right now that we haven't gone past the end of
-		 * the log. */
-
-		if (pass != PASS_SCAN)
-			if (tid_geq(next_commit_ID, info->end_transaction))
-				break;
-
-		jbd_debug(2, "Scanning for sequence ID %u at %u/%u\n",
-			  next_commit_ID, next_log_block, journal->j_last);
-
-		/* Skip over each chunk of the transaction looking
-		 * either the next descriptor block or the final commit
-		 * record. */
-
-		jbd_debug(3, "JBD: checking block %u\n", next_log_block);
-		err = jread(&bh, journal, next_log_block);
-		if (err)
-			goto failed;
-
-		next_log_block++;
-		wrap(journal, next_log_block);
-
-		/* What kind of buffer is it?
-		 *
-		 * If it is a descriptor block, check that it has the
-		 * expected sequence number.  Otherwise, we're all done
-		 * here. */
-
-		tmp = (journal_header_t *)bh->b_data;
-
-		if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
-			brelse(bh);
-			break;
-		}
-
-		blocktype = be32_to_cpu(tmp->h_blocktype);
-		sequence = be32_to_cpu(tmp->h_sequence);
-		jbd_debug(3, "Found magic %d, sequence %d\n",
-			  blocktype, sequence);
-
-		if (sequence != next_commit_ID) {
-			brelse(bh);
-			break;
-		}
-
-		/* OK, we have a valid descriptor block which matches
-		 * all of the sequence number checks.  What are we going
-		 * to do with it?  That depends on the pass... */
-
-		switch(blocktype) {
-		case JFS_DESCRIPTOR_BLOCK:
-			/* If it is a valid descriptor block, replay it
-			 * in pass REPLAY; otherwise, just skip over the
-			 * blocks it describes. */
-			if (pass != PASS_REPLAY) {
-				next_log_block +=
-					count_tags(bh, journal->j_blocksize);
-				wrap(journal, next_log_block);
-				brelse(bh);
-				continue;
-			}
-
-			/* A descriptor block: we can now write all of
-			 * the data blocks.  Yay, useful work is finally
-			 * getting done here! */
-
-			tagp = &bh->b_data[sizeof(journal_header_t)];
-			while ((tagp - bh->b_data +sizeof(journal_block_tag_t))
-			       <= journal->j_blocksize) {
-				unsigned int io_block;
-
-				tag = (journal_block_tag_t *) tagp;
-				flags = be32_to_cpu(tag->t_flags);
-
-				io_block = next_log_block++;
-				wrap(journal, next_log_block);
-				err = jread(&obh, journal, io_block);
-				if (err) {
-					/* Recover what we can, but
-					 * report failure at the end. */
-					success = err;
-					printk (KERN_ERR
-						"JBD: IO error %d recovering "
-						"block %u in log\n",
-						err, io_block);
-				} else {
-					unsigned int blocknr;
-
-					J_ASSERT(obh != NULL);
-					blocknr = be32_to_cpu(tag->t_blocknr);
-
-					/* If the block has been
-					 * revoked, then we're all done
-					 * here. */
-					if (journal_test_revoke
-					    (journal, blocknr,
-					     next_commit_ID)) {
-						brelse(obh);
-						++info->nr_revoke_hits;
-						goto skip_write;
-					}
-
-					/* Find a buffer for the new
-					 * data being restored */
-					nbh = __getblk(journal->j_fs_dev,
-							blocknr,
-							journal->j_blocksize);
-					if (nbh == NULL) {
-						printk(KERN_ERR
-						       "JBD: Out of memory "
-						       "during recovery.\n");
-						err = -ENOMEM;
-						brelse(bh);
-						brelse(obh);
-						goto failed;
-					}
-
-					lock_buffer(nbh);
-					memcpy(nbh->b_data, obh->b_data,
-							journal->j_blocksize);
-					if (flags & JFS_FLAG_ESCAPE) {
-						*((__be32 *)nbh->b_data) =
-						cpu_to_be32(JFS_MAGIC_NUMBER);
-					}
-
-					BUFFER_TRACE(nbh, "marking dirty");
-					set_buffer_uptodate(nbh);
-					mark_buffer_dirty(nbh);
-					BUFFER_TRACE(nbh, "marking uptodate");
-					++info->nr_replays;
-					/* ll_rw_block(WRITE, 1, &nbh); */
-					unlock_buffer(nbh);
-					brelse(obh);
-					brelse(nbh);
-				}
-
-			skip_write:
-				tagp += sizeof(journal_block_tag_t);
-				if (!(flags & JFS_FLAG_SAME_UUID))
-					tagp += 16;
-
-				if (flags & JFS_FLAG_LAST_TAG)
-					break;
-			}
-
-			brelse(bh);
-			continue;
-
-		case JFS_COMMIT_BLOCK:
-			/* Found an expected commit block: not much to
-			 * do other than move on to the next sequence
-			 * number. */
-			brelse(bh);
-			next_commit_ID++;
-			continue;
-
-		case JFS_REVOKE_BLOCK:
-			/* If we aren't in the REVOKE pass, then we can
-			 * just skip over this block. */
-			if (pass != PASS_REVOKE) {
-				brelse(bh);
-				continue;
-			}
-
-			err = scan_revoke_records(journal, bh,
-						  next_commit_ID, info);
-			brelse(bh);
-			if (err)
-				goto failed;
-			continue;
-
-		default:
-			jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
-				  blocktype);
-			brelse(bh);
-			goto done;
-		}
-	}
-
- done:
-	/*
-	 * We broke out of the log scan loop: either we came to the
-	 * known end of the log or we found an unexpected block in the
-	 * log.  If the latter happened, then we know that the "current"
-	 * transaction marks the end of the valid log.
-	 */
-
-	if (pass == PASS_SCAN)
-		info->end_transaction = next_commit_ID;
-	else {
-		/* It's really bad news if different passes end up at
-		 * different places (but possible due to IO errors). */
-		if (info->end_transaction != next_commit_ID) {
-			printk (KERN_ERR "JBD: recovery pass %d ended at "
-				"transaction %u, expected %u\n",
-				pass, next_commit_ID, info->end_transaction);
-			if (!success)
-				success = -EIO;
-		}
-	}
-
-	return success;
-
- failed:
-	return err;
-}
-
-
-/* Scan a revoke record, marking all blocks mentioned as revoked. */
-
-static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
-			       tid_t sequence, struct recovery_info *info)
-{
-	journal_revoke_header_t *header;
-	int offset, max;
-
-	header = (journal_revoke_header_t *) bh->b_data;
-	offset = sizeof(journal_revoke_header_t);
-	max = be32_to_cpu(header->r_count);
-
-	while (offset < max) {
-		unsigned int blocknr;
-		int err;
-
-		blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
-		offset += 4;
-		err = journal_set_revoke(journal, blocknr, sequence);
-		if (err)
-			return err;
-		++info->nr_revokes;
-	}
-	return 0;
-}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
deleted file mode 100644
index dcead63..0000000
--- a/fs/jbd/revoke.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * linux/fs/jbd/revoke.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
- *
- * Copyright 2000 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal revoke routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- *
- * Revoke is the mechanism used to prevent old log records for deleted
- * metadata from being replayed on top of newer data using the same
- * blocks.  The revoke mechanism is used in two separate places:
- *
- * + Commit: during commit we write the entire list of the current
- *   transaction's revoked blocks to the journal
- *
- * + Recovery: during recovery we record the transaction ID of all
- *   revoked blocks.  If there are multiple revoke records in the log
- *   for a single block, only the last one counts, and if there is a log
- *   entry for a block beyond the last revoke, then that log entry still
- *   gets replayed.
- *
- * We can get interactions between revokes and new log data within a
- * single transaction:
- *
- * Block is revoked and then journaled:
- *   The desired end result is the journaling of the new block, so we
- *   cancel the revoke before the transaction commits.
- *
- * Block is journaled and then revoked:
- *   The revoke must take precedence over the write of the block, so we
- *   need either to cancel the journal entry or to write the revoke
- *   later in the log than the log block.  In this case, we choose the
- *   latter: journaling a block cancels any revoke record for that block
- *   in the current transaction, so any revoke for that block in the
- *   transaction must have happened after the block was journaled and so
- *   the revoke must take precedence.
- *
- * Block is revoked and then written as data:
- *   The data write is allowed to succeed, but the revoke is _not_
- *   cancelled.  We still need to prevent old log records from
- *   overwriting the new data.  We don't even need to clear the revoke
- *   bit here.
- *
- * We cache revoke status of a buffer in the current transaction in b_states
- * bits.  As the name says, revokevalid flag indicates that the cached revoke
- * status of a buffer is valid and we can rely on the cached status.
- *
- * Revoke information on buffers is a tri-state value:
- *
- * RevokeValid clear:	no cached revoke status, need to look it up
- * RevokeValid set, Revoked clear:
- *			buffer has not been revoked, and cancel_revoke
- *			need do nothing.
- * RevokeValid set, Revoked set:
- *			buffer has been revoked.
- *
- * Locking rules:
- * We keep two hash tables of revoke records. One hashtable belongs to the
- * running transaction (is pointed to by journal->j_revoke), the other one
- * belongs to the committing transaction. Accesses to the second hash table
- * happen only from the kjournald and no other thread touches this table.  Also
- * journal_switch_revoke_table() which switches which hashtable belongs to the
- * running and which to the committing transaction is called only from
- * kjournald. Therefore we need no locks when accessing the hashtable belonging
- * to the committing transaction.
- *
- * All users operating on the hash table belonging to the running transaction
- * have a handle to the transaction. Therefore they are safe from kjournald
- * switching hash tables under them. For operations on the lists of entries in
- * the hash table j_revoke_lock is used.
- *
- * Finally, also replay code uses the hash tables but at this moment no one else
- * can touch them (filesystem isn't mounted yet) and hence no locking is
- * needed.
- */
-
-#ifndef __KERNEL__
-#include "jfs_user.h"
-#else
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/bio.h>
-#endif
-#include <linux/log2.h>
-#include <linux/hash.h>
-
-static struct kmem_cache *revoke_record_cache;
-static struct kmem_cache *revoke_table_cache;
-
-/* Each revoke record represents one single revoked block.  During
-   journal replay, this involves recording the transaction ID of the
-   last transaction to revoke this block. */
-
-struct jbd_revoke_record_s
-{
-	struct list_head  hash;
-	tid_t		  sequence;	/* Used for recovery only */
-	unsigned int	  blocknr;
-};
-
-
-/* The revoke table is just a simple hash table of revoke records. */
-struct jbd_revoke_table_s
-{
-	/* It is conceivable that we might want a larger hash table
-	 * for recovery.  Must be a power of two. */
-	int		  hash_size;
-	int		  hash_shift;
-	struct list_head *hash_table;
-};
-
-
-#ifdef __KERNEL__
-static void write_one_revoke_record(journal_t *, transaction_t *,
-				    struct journal_head **, int *,
-				    struct jbd_revoke_record_s *, int);
-static void flush_descriptor(journal_t *, struct journal_head *, int, int);
-#endif
-
-/* Utility functions to maintain the revoke table */
-
-static inline int hash(journal_t *journal, unsigned int block)
-{
-	struct jbd_revoke_table_s *table = journal->j_revoke;
-
-	return hash_32(block, table->hash_shift);
-}
-
-static int insert_revoke_hash(journal_t *journal, unsigned int blocknr,
-			      tid_t seq)
-{
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-
-repeat:
-	record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
-	if (!record)
-		goto oom;
-
-	record->sequence = seq;
-	record->blocknr = blocknr;
-	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
-	spin_lock(&journal->j_revoke_lock);
-	list_add(&record->hash, hash_list);
-	spin_unlock(&journal->j_revoke_lock);
-	return 0;
-
-oom:
-	if (!journal_oom_retry)
-		return -ENOMEM;
-	jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
-	yield();
-	goto repeat;
-}
-
-/* Find a revoke record in the journal's hash table. */
-
-static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
-						      unsigned int blocknr)
-{
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-
-	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
-
-	spin_lock(&journal->j_revoke_lock);
-	record = (struct jbd_revoke_record_s *) hash_list->next;
-	while (&(record->hash) != hash_list) {
-		if (record->blocknr == blocknr) {
-			spin_unlock(&journal->j_revoke_lock);
-			return record;
-		}
-		record = (struct jbd_revoke_record_s *) record->hash.next;
-	}
-	spin_unlock(&journal->j_revoke_lock);
-	return NULL;
-}
-
-void journal_destroy_revoke_caches(void)
-{
-	if (revoke_record_cache) {
-		kmem_cache_destroy(revoke_record_cache);
-		revoke_record_cache = NULL;
-	}
-	if (revoke_table_cache) {
-		kmem_cache_destroy(revoke_table_cache);
-		revoke_table_cache = NULL;
-	}
-}
-
-int __init journal_init_revoke_caches(void)
-{
-	J_ASSERT(!revoke_record_cache);
-	J_ASSERT(!revoke_table_cache);
-
-	revoke_record_cache = kmem_cache_create("revoke_record",
-					   sizeof(struct jbd_revoke_record_s),
-					   0,
-					   SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
-					   NULL);
-	if (!revoke_record_cache)
-		goto record_cache_failure;
-
-	revoke_table_cache = kmem_cache_create("revoke_table",
-					   sizeof(struct jbd_revoke_table_s),
-					   0, SLAB_TEMPORARY, NULL);
-	if (!revoke_table_cache)
-		goto table_cache_failure;
-
-	return 0;
-
-table_cache_failure:
-	journal_destroy_revoke_caches();
-record_cache_failure:
-	return -ENOMEM;
-}
-
-static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
-{
-	int i;
-	struct jbd_revoke_table_s *table;
-
-	table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
-	if (!table)
-		goto out;
-
-	table->hash_size = hash_size;
-	table->hash_shift = ilog2(hash_size);
-	table->hash_table =
-		kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
-	if (!table->hash_table) {
-		kmem_cache_free(revoke_table_cache, table);
-		table = NULL;
-		goto out;
-	}
-
-	for (i = 0; i < hash_size; i++)
-		INIT_LIST_HEAD(&table->hash_table[i]);
-
-out:
-	return table;
-}
-
-static void journal_destroy_revoke_table(struct jbd_revoke_table_s *table)
-{
-	int i;
-	struct list_head *hash_list;
-
-	for (i = 0; i < table->hash_size; i++) {
-		hash_list = &table->hash_table[i];
-		J_ASSERT(list_empty(hash_list));
-	}
-
-	kfree(table->hash_table);
-	kmem_cache_free(revoke_table_cache, table);
-}
-
-/* Initialise the revoke table for a given journal to a given size. */
-int journal_init_revoke(journal_t *journal, int hash_size)
-{
-	J_ASSERT(journal->j_revoke_table[0] == NULL);
-	J_ASSERT(is_power_of_2(hash_size));
-
-	journal->j_revoke_table[0] = journal_init_revoke_table(hash_size);
-	if (!journal->j_revoke_table[0])
-		goto fail0;
-
-	journal->j_revoke_table[1] = journal_init_revoke_table(hash_size);
-	if (!journal->j_revoke_table[1])
-		goto fail1;
-
-	journal->j_revoke = journal->j_revoke_table[1];
-
-	spin_lock_init(&journal->j_revoke_lock);
-
-	return 0;
-
-fail1:
-	journal_destroy_revoke_table(journal->j_revoke_table[0]);
-fail0:
-	return -ENOMEM;
-}
-
-/* Destroy a journal's revoke table.  The table must already be empty! */
-void journal_destroy_revoke(journal_t *journal)
-{
-	journal->j_revoke = NULL;
-	if (journal->j_revoke_table[0])
-		journal_destroy_revoke_table(journal->j_revoke_table[0]);
-	if (journal->j_revoke_table[1])
-		journal_destroy_revoke_table(journal->j_revoke_table[1]);
-}
-
-
-#ifdef __KERNEL__
-
-/*
- * journal_revoke: revoke a given buffer_head from the journal.  This
- * prevents the block from being replayed during recovery if we take a
- * crash after this current transaction commits.  Any subsequent
- * metadata writes of the buffer in this transaction cancel the
- * revoke.
- *
- * Note that this call may block --- it is up to the caller to make
- * sure that there are no further calls to journal_write_metadata
- * before the revoke is complete.  In ext3, this implies calling the
- * revoke before clearing the block bitmap when we are deleting
- * metadata.
- *
- * Revoke performs a journal_forget on any buffer_head passed in as a
- * parameter, but does _not_ forget the buffer_head if the bh was only
- * found implicitly.
- *
- * bh_in may not be a journalled buffer - it may have come off
- * the hash tables without an attached journal_head.
- *
- * If bh_in is non-zero, journal_revoke() will decrement its b_count
- * by one.
- */
-
-int journal_revoke(handle_t *handle, unsigned int blocknr,
-		   struct buffer_head *bh_in)
-{
-	struct buffer_head *bh = NULL;
-	journal_t *journal;
-	struct block_device *bdev;
-	int err;
-
-	might_sleep();
-	if (bh_in)
-		BUFFER_TRACE(bh_in, "enter");
-
-	journal = handle->h_transaction->t_journal;
-	if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
-		J_ASSERT (!"Cannot set revoke feature!");
-		return -EINVAL;
-	}
-
-	bdev = journal->j_fs_dev;
-	bh = bh_in;
-
-	if (!bh) {
-		bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
-		if (bh)
-			BUFFER_TRACE(bh, "found on hash");
-	}
-#ifdef JBD_EXPENSIVE_CHECKING
-	else {
-		struct buffer_head *bh2;
-
-		/* If there is a different buffer_head lying around in
-		 * memory anywhere... */
-		bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
-		if (bh2) {
-			/* ... and it has RevokeValid status... */
-			if (bh2 != bh && buffer_revokevalid(bh2))
-				/* ...then it better be revoked too,
-				 * since it's illegal to create a revoke
-				 * record against a buffer_head which is
-				 * not marked revoked --- that would
-				 * risk missing a subsequent revoke
-				 * cancel. */
-				J_ASSERT_BH(bh2, buffer_revoked(bh2));
-			put_bh(bh2);
-		}
-	}
-#endif
-
-	/* We really ought not ever to revoke twice in a row without
-           first having the revoke cancelled: it's illegal to free a
-           block twice without allocating it in between! */
-	if (bh) {
-		if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
-				 "inconsistent data on disk")) {
-			if (!bh_in)
-				brelse(bh);
-			return -EIO;
-		}
-		set_buffer_revoked(bh);
-		set_buffer_revokevalid(bh);
-		if (bh_in) {
-			BUFFER_TRACE(bh_in, "call journal_forget");
-			journal_forget(handle, bh_in);
-		} else {
-			BUFFER_TRACE(bh, "call brelse");
-			__brelse(bh);
-		}
-	}
-
-	jbd_debug(2, "insert revoke for block %u, bh_in=%p\n", blocknr, bh_in);
-	err = insert_revoke_hash(journal, blocknr,
-				handle->h_transaction->t_tid);
-	BUFFER_TRACE(bh_in, "exit");
-	return err;
-}
-
-/*
- * Cancel an outstanding revoke.  For use only internally by the
- * journaling code (called from journal_get_write_access).
- *
- * We trust buffer_revoked() on the buffer if the buffer is already
- * being journaled: if there is no revoke pending on the buffer, then we
- * don't do anything here.
- *
- * This would break if it were possible for a buffer to be revoked and
- * discarded, and then reallocated within the same transaction.  In such
- * a case we would have lost the revoked bit, but when we arrived here
- * the second time we would still have a pending revoke to cancel.  So,
- * do not trust the Revoked bit on buffers unless RevokeValid is also
- * set.
- */
-int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
-{
-	struct jbd_revoke_record_s *record;
-	journal_t *journal = handle->h_transaction->t_journal;
-	int need_cancel;
-	int did_revoke = 0;	/* akpm: debug */
-	struct buffer_head *bh = jh2bh(jh);
-
-	jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
-
-	/* Is the existing Revoke bit valid?  If so, we trust it, and
-	 * only perform the full cancel if the revoke bit is set.  If
-	 * not, we can't trust the revoke bit, and we need to do the
-	 * full search for a revoke record. */
-	if (test_set_buffer_revokevalid(bh)) {
-		need_cancel = test_clear_buffer_revoked(bh);
-	} else {
-		need_cancel = 1;
-		clear_buffer_revoked(bh);
-	}
-
-	if (need_cancel) {
-		record = find_revoke_record(journal, bh->b_blocknr);
-		if (record) {
-			jbd_debug(4, "cancelled existing revoke on "
-				  "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
-			spin_lock(&journal->j_revoke_lock);
-			list_del(&record->hash);
-			spin_unlock(&journal->j_revoke_lock);
-			kmem_cache_free(revoke_record_cache, record);
-			did_revoke = 1;
-		}
-	}
-
-#ifdef JBD_EXPENSIVE_CHECKING
-	/* There better not be one left behind by now! */
-	record = find_revoke_record(journal, bh->b_blocknr);
-	J_ASSERT_JH(jh, record == NULL);
-#endif
-
-	/* Finally, have we just cleared revoke on an unhashed
-	 * buffer_head?  If so, we'd better make sure we clear the
-	 * revoked status on any hashed alias too, otherwise the revoke
-	 * state machine will get very upset later on. */
-	if (need_cancel) {
-		struct buffer_head *bh2;
-		bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
-		if (bh2) {
-			if (bh2 != bh)
-				clear_buffer_revoked(bh2);
-			__brelse(bh2);
-		}
-	}
-	return did_revoke;
-}
-
-/*
- * journal_clear_revoked_flags clears revoked flag of buffers in
- * revoke table to reflect there is no revoked buffer in the next
- * transaction which is going to be started.
- */
-void journal_clear_buffer_revoked_flags(journal_t *journal)
-{
-	struct jbd_revoke_table_s *revoke = journal->j_revoke;
-	int i = 0;
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		struct list_head *hash_list;
-		struct list_head *list_entry;
-		hash_list = &revoke->hash_table[i];
-
-		list_for_each(list_entry, hash_list) {
-			struct jbd_revoke_record_s *record;
-			struct buffer_head *bh;
-			record = (struct jbd_revoke_record_s *)list_entry;
-			bh = __find_get_block(journal->j_fs_dev,
-					      record->blocknr,
-					      journal->j_blocksize);
-			if (bh) {
-				clear_buffer_revoked(bh);
-				__brelse(bh);
-			}
-		}
-	}
-}
-
-/* journal_switch_revoke table select j_revoke for next transaction
- * we do not want to suspend any processing until all revokes are
- * written -bzzz
- */
-void journal_switch_revoke_table(journal_t *journal)
-{
-	int i;
-
-	if (journal->j_revoke == journal->j_revoke_table[0])
-		journal->j_revoke = journal->j_revoke_table[1];
-	else
-		journal->j_revoke = journal->j_revoke_table[0];
-
-	for (i = 0; i < journal->j_revoke->hash_size; i++)
-		INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
-}
-
-/*
- * Write revoke records to the journal for all entries in the current
- * revoke hash, deleting the entries as we go.
- */
-void journal_write_revoke_records(journal_t *journal,
-				  transaction_t *transaction, int write_op)
-{
-	struct journal_head *descriptor;
-	struct jbd_revoke_record_s *record;
-	struct jbd_revoke_table_s *revoke;
-	struct list_head *hash_list;
-	int i, offset, count;
-
-	descriptor = NULL;
-	offset = 0;
-	count = 0;
-
-	/* select revoke table for committing transaction */
-	revoke = journal->j_revoke == journal->j_revoke_table[0] ?
-		journal->j_revoke_table[1] : journal->j_revoke_table[0];
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		hash_list = &revoke->hash_table[i];
-
-		while (!list_empty(hash_list)) {
-			record = (struct jbd_revoke_record_s *)
-				hash_list->next;
-			write_one_revoke_record(journal, transaction,
-						&descriptor, &offset,
-						record, write_op);
-			count++;
-			list_del(&record->hash);
-			kmem_cache_free(revoke_record_cache, record);
-		}
-	}
-	if (descriptor)
-		flush_descriptor(journal, descriptor, offset, write_op);
-	jbd_debug(1, "Wrote %d revoke records\n", count);
-}
-
-/*
- * Write out one revoke record.  We need to create a new descriptor
- * block if the old one is full or if we have not already created one.
- */
-
-static void write_one_revoke_record(journal_t *journal,
-				    transaction_t *transaction,
-				    struct journal_head **descriptorp,
-				    int *offsetp,
-				    struct jbd_revoke_record_s *record,
-				    int write_op)
-{
-	struct journal_head *descriptor;
-	int offset;
-	journal_header_t *header;
-
-	/* If we are already aborting, this all becomes a noop.  We
-           still need to go round the loop in
-           journal_write_revoke_records in order to free all of the
-           revoke records: only the IO to the journal is omitted. */
-	if (is_journal_aborted(journal))
-		return;
-
-	descriptor = *descriptorp;
-	offset = *offsetp;
-
-	/* Make sure we have a descriptor with space left for the record */
-	if (descriptor) {
-		if (offset == journal->j_blocksize) {
-			flush_descriptor(journal, descriptor, offset, write_op);
-			descriptor = NULL;
-		}
-	}
-
-	if (!descriptor) {
-		descriptor = journal_get_descriptor_buffer(journal);
-		if (!descriptor)
-			return;
-		header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
-		header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-		header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
-		header->h_sequence  = cpu_to_be32(transaction->t_tid);
-
-		/* Record it so that we can wait for IO completion later */
-		JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
-		journal_file_buffer(descriptor, transaction, BJ_LogCtl);
-
-		offset = sizeof(journal_revoke_header_t);
-		*descriptorp = descriptor;
-	}
-
-	* ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
-		cpu_to_be32(record->blocknr);
-	offset += 4;
-	*offsetp = offset;
-}
-
-/*
- * Flush a revoke descriptor out to the journal.  If we are aborting,
- * this is a noop; otherwise we are generating a buffer which needs to
- * be waited for during commit, so it has to go onto the appropriate
- * journal buffer list.
- */
-
-static void flush_descriptor(journal_t *journal,
-			     struct journal_head *descriptor,
-			     int offset, int write_op)
-{
-	journal_revoke_header_t *header;
-	struct buffer_head *bh = jh2bh(descriptor);
-
-	if (is_journal_aborted(journal)) {
-		put_bh(bh);
-		return;
-	}
-
-	header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
-	header->r_count = cpu_to_be32(offset);
-	set_buffer_jwrite(bh);
-	BUFFER_TRACE(bh, "write");
-	set_buffer_dirty(bh);
-	write_dirty_buffer(bh, write_op);
-}
-#endif
-
-/*
- * Revoke support for recovery.
- *
- * Recovery needs to be able to:
- *
- *  record all revoke records, including the tid of the latest instance
- *  of each revoke in the journal
- *
- *  check whether a given block in a given transaction should be replayed
- *  (ie. has not been revoked by a revoke record in that or a subsequent
- *  transaction)
- *
- *  empty the revoke table after recovery.
- */
-
-/*
- * First, setting revoke records.  We create a new revoke record for
- * every block ever revoked in the log as we scan it for recovery, and
- * we update the existing records if we find multiple revokes for a
- * single block.
- */
-
-int journal_set_revoke(journal_t *journal,
-		       unsigned int blocknr,
-		       tid_t sequence)
-{
-	struct jbd_revoke_record_s *record;
-
-	record = find_revoke_record(journal, blocknr);
-	if (record) {
-		/* If we have multiple occurrences, only record the
-		 * latest sequence number in the hashed record */
-		if (tid_gt(sequence, record->sequence))
-			record->sequence = sequence;
-		return 0;
-	}
-	return insert_revoke_hash(journal, blocknr, sequence);
-}
-
-/*
- * Test revoke records.  For a given block referenced in the log, has
- * that block been revoked?  A revoke record with a given transaction
- * sequence number revokes all blocks in that transaction and earlier
- * ones, but later transactions still need replayed.
- */
-
-int journal_test_revoke(journal_t *journal,
-			unsigned int blocknr,
-			tid_t sequence)
-{
-	struct jbd_revoke_record_s *record;
-
-	record = find_revoke_record(journal, blocknr);
-	if (!record)
-		return 0;
-	if (tid_gt(sequence, record->sequence))
-		return 0;
-	return 1;
-}
-
-/*
- * Finally, once recovery is over, we need to clear the revoke table so
- * that it can be reused by the running filesystem.
- */
-
-void journal_clear_revoke(journal_t *journal)
-{
-	int i;
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-	struct jbd_revoke_table_s *revoke;
-
-	revoke = journal->j_revoke;
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		hash_list = &revoke->hash_table[i];
-		while (!list_empty(hash_list)) {
-			record = (struct jbd_revoke_record_s*) hash_list->next;
-			list_del(&record->hash);
-			kmem_cache_free(revoke_record_cache, record);
-		}
-	}
-}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
deleted file mode 100644
index 1695ba8..0000000
--- a/fs/jbd/transaction.c
+++ /dev/null
@@ -1,2237 +0,0 @@
-/*
- * linux/fs/jbd/transaction.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Generic filesystem transaction handling code; part of the ext2fs
- * journaling system.
- *
- * This file manages transactions (compound commits managed by the
- * journaling code) and handles (individual atomic operations by the
- * filesystem).
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/hrtimer.h>
-
-static void __journal_temp_unlink_buffer(struct journal_head *jh);
-
-/*
- * get_transaction: obtain a new transaction_t object.
- *
- * Simply allocate and initialise a new transaction.  Create it in
- * RUNNING state and add it to the current journal (which should not
- * have an existing running transaction: we only make a new transaction
- * once we have started to commit the old one).
- *
- * Preconditions:
- *	The journal MUST be locked.  We don't perform atomic mallocs on the
- *	new transaction	and we can't block without protecting against other
- *	processes trying to touch the journal while it is in transition.
- *
- * Called under j_state_lock
- */
-
-static transaction_t *
-get_transaction(journal_t *journal, transaction_t *transaction)
-{
-	transaction->t_journal = journal;
-	transaction->t_state = T_RUNNING;
-	transaction->t_start_time = ktime_get();
-	transaction->t_tid = journal->j_transaction_sequence++;
-	transaction->t_expires = jiffies + journal->j_commit_interval;
-	spin_lock_init(&transaction->t_handle_lock);
-
-	/* Set up the commit timer for the new transaction. */
-	journal->j_commit_timer.expires =
-				round_jiffies_up(transaction->t_expires);
-	add_timer(&journal->j_commit_timer);
-
-	J_ASSERT(journal->j_running_transaction == NULL);
-	journal->j_running_transaction = transaction;
-
-	return transaction;
-}
-
-/*
- * Handle management.
- *
- * A handle_t is an object which represents a single atomic update to a
- * filesystem, and which tracks all of the modifications which form part
- * of that one update.
- */
-
-/*
- * start_this_handle: Given a handle, deal with any locking or stalling
- * needed to make sure that there is enough journal space for the handle
- * to begin.  Attach the handle to a transaction and set up the
- * transaction's buffer credits.
- */
-
-static int start_this_handle(journal_t *journal, handle_t *handle)
-{
-	transaction_t *transaction;
-	int needed;
-	int nblocks = handle->h_buffer_credits;
-	transaction_t *new_transaction = NULL;
-	int ret = 0;
-
-	if (nblocks > journal->j_max_transaction_buffers) {
-		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
-		       current->comm, nblocks,
-		       journal->j_max_transaction_buffers);
-		ret = -ENOSPC;
-		goto out;
-	}
-
-alloc_transaction:
-	if (!journal->j_running_transaction) {
-		new_transaction = kzalloc(sizeof(*new_transaction),
-						GFP_NOFS|__GFP_NOFAIL);
-		if (!new_transaction) {
-			ret = -ENOMEM;
-			goto out;
-		}
-	}
-
-	jbd_debug(3, "New handle %p going live.\n", handle);
-
-repeat:
-
-	/*
-	 * We need to hold j_state_lock until t_updates has been incremented,
-	 * for proper journal barrier handling
-	 */
-	spin_lock(&journal->j_state_lock);
-repeat_locked:
-	if (is_journal_aborted(journal) ||
-	    (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
-		spin_unlock(&journal->j_state_lock);
-		ret = -EROFS;
-		goto out;
-	}
-
-	/* Wait on the journal's transaction barrier if necessary */
-	if (journal->j_barrier_count) {
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_transaction_locked,
-				journal->j_barrier_count == 0);
-		goto repeat;
-	}
-
-	if (!journal->j_running_transaction) {
-		if (!new_transaction) {
-			spin_unlock(&journal->j_state_lock);
-			goto alloc_transaction;
-		}
-		get_transaction(journal, new_transaction);
-		new_transaction = NULL;
-	}
-
-	transaction = journal->j_running_transaction;
-
-	/*
-	 * If the current transaction is locked down for commit, wait for the
-	 * lock to be released.
-	 */
-	if (transaction->t_state == T_LOCKED) {
-		DEFINE_WAIT(wait);
-
-		prepare_to_wait(&journal->j_wait_transaction_locked,
-					&wait, TASK_UNINTERRUPTIBLE);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * If there is not enough space left in the log to write all potential
-	 * buffers requested by this operation, we need to stall pending a log
-	 * checkpoint to free some more log space.
-	 */
-	spin_lock(&transaction->t_handle_lock);
-	needed = transaction->t_outstanding_credits + nblocks;
-
-	if (needed > journal->j_max_transaction_buffers) {
-		/*
-		 * If the current transaction is already too large, then start
-		 * to commit it: we can then go back and attach this handle to
-		 * a new transaction.
-		 */
-		DEFINE_WAIT(wait);
-
-		jbd_debug(2, "Handle %p starting new commit...\n", handle);
-		spin_unlock(&transaction->t_handle_lock);
-		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
-				TASK_UNINTERRUPTIBLE);
-		__log_start_commit(journal, transaction->t_tid);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * The commit code assumes that it can get enough log space
-	 * without forcing a checkpoint.  This is *critical* for
-	 * correctness: a checkpoint of a buffer which is also
-	 * associated with a committing transaction creates a deadlock,
-	 * so commit simply cannot force through checkpoints.
-	 *
-	 * We must therefore ensure the necessary space in the journal
-	 * *before* starting to dirty potentially checkpointed buffers
-	 * in the new transaction.
-	 *
-	 * The worst part is, any transaction currently committing can
-	 * reduce the free space arbitrarily.  Be careful to account for
-	 * those buffers when checkpointing.
-	 */
-
-	/*
-	 * @@@ AKPM: This seems rather over-defensive.  We're giving commit
-	 * a _lot_ of headroom: 1/4 of the journal plus the size of
-	 * the committing transaction.  Really, we only need to give it
-	 * committing_transaction->t_outstanding_credits plus "enough" for
-	 * the log control blocks.
-	 * Also, this test is inconsistent with the matching one in
-	 * journal_extend().
-	 */
-	if (__log_space_left(journal) < jbd_space_needed(journal)) {
-		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
-		spin_unlock(&transaction->t_handle_lock);
-		__log_wait_for_space(journal);
-		goto repeat_locked;
-	}
-
-	/* OK, account for the buffers that this operation expects to
-	 * use and add the handle to the running transaction. */
-
-	handle->h_transaction = transaction;
-	transaction->t_outstanding_credits += nblocks;
-	transaction->t_updates++;
-	transaction->t_handle_count++;
-	jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
-		  handle, nblocks, transaction->t_outstanding_credits,
-		  __log_space_left(journal));
-	spin_unlock(&transaction->t_handle_lock);
-	spin_unlock(&journal->j_state_lock);
-
-	lock_map_acquire(&handle->h_lockdep_map);
-out:
-	if (unlikely(new_transaction))		/* It's usually NULL */
-		kfree(new_transaction);
-	return ret;
-}
-
-static struct lock_class_key jbd_handle_key;
-
-/* Allocate a new handle.  This should probably be in a slab... */
-static handle_t *new_handle(int nblocks)
-{
-	handle_t *handle = jbd_alloc_handle(GFP_NOFS);
-	if (!handle)
-		return NULL;
-	handle->h_buffer_credits = nblocks;
-	handle->h_ref = 1;
-
-	lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0);
-
-	return handle;
-}
-
-/**
- * handle_t *journal_start() - Obtain a new handle.
- * @journal: Journal to start transaction on.
- * @nblocks: number of block buffer we might modify
- *
- * We make sure that the transaction can guarantee at least nblocks of
- * modified buffers in the log.  We block until the log can guarantee
- * that much space.
- *
- * This function is visible to journal users (like ext3fs), so is not
- * called with the journal already locked.
- *
- * Return a pointer to a newly allocated handle, or an ERR_PTR() value
- * on failure.
- */
-handle_t *journal_start(journal_t *journal, int nblocks)
-{
-	handle_t *handle = journal_current_handle();
-	int err;
-
-	if (!journal)
-		return ERR_PTR(-EROFS);
-
-	if (handle) {
-		J_ASSERT(handle->h_transaction->t_journal == journal);
-		handle->h_ref++;
-		return handle;
-	}
-
-	handle = new_handle(nblocks);
-	if (!handle)
-		return ERR_PTR(-ENOMEM);
-
-	current->journal_info = handle;
-
-	err = start_this_handle(journal, handle);
-	if (err < 0) {
-		jbd_free_handle(handle);
-		current->journal_info = NULL;
-		handle = ERR_PTR(err);
-	}
-	return handle;
-}
-
-/**
- * int journal_extend() - extend buffer credits.
- * @handle:  handle to 'extend'
- * @nblocks: nr blocks to try to extend by.
- *
- * Some transactions, such as large extends and truncates, can be done
- * atomically all at once or in several stages.  The operation requests
- * a credit for a number of buffer modications in advance, but can
- * extend its credit if it needs more.
- *
- * journal_extend tries to give the running handle more buffer credits.
- * It does not guarantee that allocation - this is a best-effort only.
- * The calling process MUST be able to deal cleanly with a failure to
- * extend here.
- *
- * Return 0 on success, non-zero on failure.
- *
- * return code < 0 implies an error
- * return code > 0 implies normal transaction-full status.
- */
-int journal_extend(handle_t *handle, int nblocks)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int result;
-	int wanted;
-
-	result = -EIO;
-	if (is_handle_aborted(handle))
-		goto out;
-
-	result = 1;
-
-	spin_lock(&journal->j_state_lock);
-
-	/* Don't extend a locked-down transaction! */
-	if (handle->h_transaction->t_state != T_RUNNING) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "transaction not running\n", handle, nblocks);
-		goto error_out;
-	}
-
-	spin_lock(&transaction->t_handle_lock);
-	wanted = transaction->t_outstanding_credits + nblocks;
-
-	if (wanted > journal->j_max_transaction_buffers) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "transaction too large\n", handle, nblocks);
-		goto unlock;
-	}
-
-	if (wanted > __log_space_left(journal)) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "insufficient log space\n", handle, nblocks);
-		goto unlock;
-	}
-
-	handle->h_buffer_credits += nblocks;
-	transaction->t_outstanding_credits += nblocks;
-	result = 0;
-
-	jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
-unlock:
-	spin_unlock(&transaction->t_handle_lock);
-error_out:
-	spin_unlock(&journal->j_state_lock);
-out:
-	return result;
-}
-
-
-/**
- * int journal_restart() - restart a handle.
- * @handle:  handle to restart
- * @nblocks: nr credits requested
- *
- * Restart a handle for a multi-transaction filesystem
- * operation.
- *
- * If the journal_extend() call above fails to grant new buffer credits
- * to a running handle, a call to journal_restart will commit the
- * handle's transaction so far and reattach the handle to a new
- * transaction capabable of guaranteeing the requested number of
- * credits.
- */
-
-int journal_restart(handle_t *handle, int nblocks)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int ret;
-
-	/* If we've had an abort of any type, don't even think about
-	 * actually doing the restart! */
-	if (is_handle_aborted(handle))
-		return 0;
-
-	/*
-	 * First unlink the handle from its current transaction, and start the
-	 * commit on that.
-	 */
-	J_ASSERT(transaction->t_updates > 0);
-	J_ASSERT(journal_current_handle() == handle);
-
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&transaction->t_handle_lock);
-	transaction->t_outstanding_credits -= handle->h_buffer_credits;
-	transaction->t_updates--;
-
-	if (!transaction->t_updates)
-		wake_up(&journal->j_wait_updates);
-	spin_unlock(&transaction->t_handle_lock);
-
-	jbd_debug(2, "restarting handle %p\n", handle);
-	__log_start_commit(journal, transaction->t_tid);
-	spin_unlock(&journal->j_state_lock);
-
-	lock_map_release(&handle->h_lockdep_map);
-	handle->h_buffer_credits = nblocks;
-	ret = start_this_handle(journal, handle);
-	return ret;
-}
-
-
-/**
- * void journal_lock_updates () - establish a transaction barrier.
- * @journal:  Journal to establish a barrier on.
- *
- * This locks out any further updates from being started, and blocks until all
- * existing updates have completed, returning only once the journal is in a
- * quiescent state with no updates running.
- *
- * We do not use simple mutex for synchronization as there are syscalls which
- * want to return with filesystem locked and that trips up lockdep. Also
- * hibernate needs to lock filesystem but locked mutex then blocks hibernation.
- * Since locking filesystem is rare operation, we use simple counter and
- * waitqueue for locking.
- */
-void journal_lock_updates(journal_t *journal)
-{
-	DEFINE_WAIT(wait);
-
-wait:
-	/* Wait for previous locked operation to finish */
-	wait_event(journal->j_wait_transaction_locked,
-		   journal->j_barrier_count == 0);
-
-	spin_lock(&journal->j_state_lock);
-	/*
-	 * Check reliably under the lock whether we are the ones winning the race
-	 * and locking the journal
-	 */
-	if (journal->j_barrier_count > 0) {
-		spin_unlock(&journal->j_state_lock);
-		goto wait;
-	}
-	++journal->j_barrier_count;
-
-	/* Wait until there are no running updates */
-	while (1) {
-		transaction_t *transaction = journal->j_running_transaction;
-
-		if (!transaction)
-			break;
-
-		spin_lock(&transaction->t_handle_lock);
-		if (!transaction->t_updates) {
-			spin_unlock(&transaction->t_handle_lock);
-			break;
-		}
-		prepare_to_wait(&journal->j_wait_updates, &wait,
-				TASK_UNINTERRUPTIBLE);
-		spin_unlock(&transaction->t_handle_lock);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_updates, &wait);
-		spin_lock(&journal->j_state_lock);
-	}
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * void journal_unlock_updates (journal_t* journal) - release barrier
- * @journal:  Journal to release the barrier on.
- *
- * Release a transaction barrier obtained with journal_lock_updates().
- */
-void journal_unlock_updates (journal_t *journal)
-{
-	J_ASSERT(journal->j_barrier_count != 0);
-
-	spin_lock(&journal->j_state_lock);
-	--journal->j_barrier_count;
-	spin_unlock(&journal->j_state_lock);
-	wake_up(&journal->j_wait_transaction_locked);
-}
-
-static void warn_dirty_buffer(struct buffer_head *bh)
-{
-	char b[BDEVNAME_SIZE];
-
-	printk(KERN_WARNING
-	       "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
-	       "There's a risk of filesystem corruption in case of system "
-	       "crash.\n",
-	       bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
-}
-
-/*
- * If the buffer is already part of the current transaction, then there
- * is nothing we need to do.  If it is already part of a prior
- * transaction which we are still committing to disk, then we need to
- * make sure that we do not overwrite the old copy: we do copy-out to
- * preserve the copy going to disk.  We also account the buffer against
- * the handle's metadata buffer credits (unless the buffer is already
- * part of the transaction, that is).
- *
- */
-static int
-do_get_write_access(handle_t *handle, struct journal_head *jh,
-			int force_copy)
-{
-	struct buffer_head *bh;
-	transaction_t *transaction;
-	journal_t *journal;
-	int error;
-	char *frozen_buffer = NULL;
-	int need_copy = 0;
-
-	if (is_handle_aborted(handle))
-		return -EROFS;
-
-	transaction = handle->h_transaction;
-	journal = transaction->t_journal;
-
-	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
-
-	JBUFFER_TRACE(jh, "entry");
-repeat:
-	bh = jh2bh(jh);
-
-	/* @@@ Need to check for errors here at some point. */
-
-	lock_buffer(bh);
-	jbd_lock_bh_state(bh);
-
-	/* We now hold the buffer lock so it is safe to query the buffer
-	 * state.  Is the buffer dirty?
-	 *
-	 * If so, there are two possibilities.  The buffer may be
-	 * non-journaled, and undergoing a quite legitimate writeback.
-	 * Otherwise, it is journaled, and we don't expect dirty buffers
-	 * in that state (the buffers should be marked JBD_Dirty
-	 * instead.)  So either the IO is being done under our own
-	 * control and this is a bug, or it's a third party IO such as
-	 * dump(8) (which may leave the buffer scheduled for read ---
-	 * ie. locked but not dirty) or tune2fs (which may actually have
-	 * the buffer dirtied, ugh.)  */
-
-	if (buffer_dirty(bh)) {
-		/*
-		 * First question: is this buffer already part of the current
-		 * transaction or the existing committing transaction?
-		 */
-		if (jh->b_transaction) {
-			J_ASSERT_JH(jh,
-				jh->b_transaction == transaction ||
-				jh->b_transaction ==
-					journal->j_committing_transaction);
-			if (jh->b_next_transaction)
-				J_ASSERT_JH(jh, jh->b_next_transaction ==
-							transaction);
-			warn_dirty_buffer(bh);
-		}
-		/*
-		 * In any case we need to clean the dirty flag and we must
-		 * do it under the buffer lock to be sure we don't race
-		 * with running write-out.
-		 */
-		JBUFFER_TRACE(jh, "Journalling dirty buffer");
-		clear_buffer_dirty(bh);
-		set_buffer_jbddirty(bh);
-	}
-
-	unlock_buffer(bh);
-
-	error = -EROFS;
-	if (is_handle_aborted(handle)) {
-		jbd_unlock_bh_state(bh);
-		goto out;
-	}
-	error = 0;
-
-	/*
-	 * The buffer is already part of this transaction if b_transaction or
-	 * b_next_transaction points to it
-	 */
-	if (jh->b_transaction == transaction ||
-	    jh->b_next_transaction == transaction)
-		goto done;
-
-	/*
-	 * this is the first time this transaction is touching this buffer,
-	 * reset the modified flag
-	 */
-	jh->b_modified = 0;
-
-	/*
-	 * If there is already a copy-out version of this buffer, then we don't
-	 * need to make another one
-	 */
-	if (jh->b_frozen_data) {
-		JBUFFER_TRACE(jh, "has frozen data");
-		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-		jh->b_next_transaction = transaction;
-		goto done;
-	}
-
-	/* Is there data here we need to preserve? */
-
-	if (jh->b_transaction && jh->b_transaction != transaction) {
-		JBUFFER_TRACE(jh, "owned by older transaction");
-		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-
-		/* There is one case we have to be very careful about.
-		 * If the committing transaction is currently writing
-		 * this buffer out to disk and has NOT made a copy-out,
-		 * then we cannot modify the buffer contents at all
-		 * right now.  The essence of copy-out is that it is the
-		 * extra copy, not the primary copy, which gets
-		 * journaled.  If the primary copy is already going to
-		 * disk then we cannot do copy-out here. */
-
-		if (jh->b_jlist == BJ_Shadow) {
-			DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
-			wait_queue_head_t *wqh;
-
-			wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
-
-			JBUFFER_TRACE(jh, "on shadow: sleep");
-			jbd_unlock_bh_state(bh);
-			/* commit wakes up all shadow buffers after IO */
-			for ( ; ; ) {
-				prepare_to_wait(wqh, &wait.wait,
-						TASK_UNINTERRUPTIBLE);
-				if (jh->b_jlist != BJ_Shadow)
-					break;
-				schedule();
-			}
-			finish_wait(wqh, &wait.wait);
-			goto repeat;
-		}
-
-		/* Only do the copy if the currently-owning transaction
-		 * still needs it.  If it is on the Forget list, the
-		 * committing transaction is past that stage.  The
-		 * buffer had better remain locked during the kmalloc,
-		 * but that should be true --- we hold the journal lock
-		 * still and the buffer is already on the BUF_JOURNAL
-		 * list so won't be flushed.
-		 *
-		 * Subtle point, though: if this is a get_undo_access,
-		 * then we will be relying on the frozen_data to contain
-		 * the new value of the committed_data record after the
-		 * transaction, so we HAVE to force the frozen_data copy
-		 * in that case. */
-
-		if (jh->b_jlist != BJ_Forget || force_copy) {
-			JBUFFER_TRACE(jh, "generate frozen data");
-			if (!frozen_buffer) {
-				JBUFFER_TRACE(jh, "allocate memory for buffer");
-				jbd_unlock_bh_state(bh);
-				frozen_buffer =
-					jbd_alloc(jh2bh(jh)->b_size,
-							 GFP_NOFS);
-				if (!frozen_buffer) {
-					printk(KERN_ERR
-					       "%s: OOM for frozen_buffer\n",
-					       __func__);
-					JBUFFER_TRACE(jh, "oom!");
-					error = -ENOMEM;
-					jbd_lock_bh_state(bh);
-					goto done;
-				}
-				goto repeat;
-			}
-			jh->b_frozen_data = frozen_buffer;
-			frozen_buffer = NULL;
-			need_copy = 1;
-		}
-		jh->b_next_transaction = transaction;
-	}
-
-
-	/*
-	 * Finally, if the buffer is not journaled right now, we need to make
-	 * sure it doesn't get written to disk before the caller actually
-	 * commits the new data
-	 */
-	if (!jh->b_transaction) {
-		JBUFFER_TRACE(jh, "no transaction");
-		J_ASSERT_JH(jh, !jh->b_next_transaction);
-		JBUFFER_TRACE(jh, "file as BJ_Reserved");
-		spin_lock(&journal->j_list_lock);
-		__journal_file_buffer(jh, transaction, BJ_Reserved);
-		spin_unlock(&journal->j_list_lock);
-	}
-
-done:
-	if (need_copy) {
-		struct page *page;
-		int offset;
-		char *source;
-
-		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
-			    "Possible IO failure.\n");
-		page = jh2bh(jh)->b_page;
-		offset = offset_in_page(jh2bh(jh)->b_data);
-		source = kmap_atomic(page);
-		memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
-		kunmap_atomic(source);
-	}
-	jbd_unlock_bh_state(bh);
-
-	/*
-	 * If we are about to journal a buffer, then any revoke pending on it is
-	 * no longer valid
-	 */
-	journal_cancel_revoke(handle, jh);
-
-out:
-	if (unlikely(frozen_buffer))	/* It's usually NULL */
-		jbd_free(frozen_buffer, bh->b_size);
-
-	JBUFFER_TRACE(jh, "exit");
-	return error;
-}
-
-/**
- * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
- * @handle: transaction to add buffer modifications to
- * @bh:     bh to be used for metadata writes
- *
- * Returns an error code or 0 on success.
- *
- * In full data journalling mode the buffer may be of type BJ_AsyncData,
- * because we're write()ing a buffer which is also part of a shared mapping.
- */
-
-int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
-{
-	struct journal_head *jh = journal_add_journal_head(bh);
-	int rc;
-
-	/* We do not want to get caught playing with fields which the
-	 * log thread also manipulates.  Make sure that the buffer
-	 * completes any outstanding IO before proceeding. */
-	rc = do_get_write_access(handle, jh, 0);
-	journal_put_journal_head(jh);
-	return rc;
-}
-
-
-/*
- * When the user wants to journal a newly created buffer_head
- * (ie. getblk() returned a new buffer and we are going to populate it
- * manually rather than reading off disk), then we need to keep the
- * buffer_head locked until it has been completely filled with new
- * data.  In this case, we should be able to make the assertion that
- * the bh is not already part of an existing transaction.
- *
- * The buffer should already be locked by the caller by this point.
- * There is no lock ranking violation: it was a newly created,
- * unlocked buffer beforehand. */
-
-/**
- * int journal_get_create_access () - notify intent to use newly created bh
- * @handle: transaction to new buffer to
- * @bh: new buffer.
- *
- * Call this if you create a new bh.
- */
-int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh = journal_add_journal_head(bh);
-	int err;
-
-	jbd_debug(5, "journal_head %p\n", jh);
-	err = -EROFS;
-	if (is_handle_aborted(handle))
-		goto out;
-	err = 0;
-
-	JBUFFER_TRACE(jh, "entry");
-	/*
-	 * The buffer may already belong to this transaction due to pre-zeroing
-	 * in the filesystem's new_block code.  It may also be on the previous,
-	 * committing transaction's lists, but it HAS to be in Forget state in
-	 * that case: the transaction must have deleted the buffer for it to be
-	 * reused here.
-	 */
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
-		jh->b_transaction == NULL ||
-		(jh->b_transaction == journal->j_committing_transaction &&
-			  jh->b_jlist == BJ_Forget)));
-
-	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-	J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
-
-	if (jh->b_transaction == NULL) {
-		/*
-		 * Previous journal_forget() could have left the buffer
-		 * with jbddirty bit set because it was being committed. When
-		 * the commit finished, we've filed the buffer for
-		 * checkpointing and marked it dirty. Now we are reallocating
-		 * the buffer so the transaction freeing it must have
-		 * committed and so it's safe to clear the dirty bit.
-		 */
-		clear_buffer_dirty(jh2bh(jh));
-
-		/* first access by this transaction */
-		jh->b_modified = 0;
-
-		JBUFFER_TRACE(jh, "file as BJ_Reserved");
-		__journal_file_buffer(jh, transaction, BJ_Reserved);
-	} else if (jh->b_transaction == journal->j_committing_transaction) {
-		/* first access by this transaction */
-		jh->b_modified = 0;
-
-		JBUFFER_TRACE(jh, "set next transaction");
-		jh->b_next_transaction = transaction;
-	}
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-
-	/*
-	 * akpm: I added this.  ext3_alloc_branch can pick up new indirect
-	 * blocks which contain freed but then revoked metadata.  We need
-	 * to cancel the revoke in case we end up freeing it yet again
-	 * and the reallocating as data - this would cause a second revoke,
-	 * which hits an assertion error.
-	 */
-	JBUFFER_TRACE(jh, "cancelling revoke");
-	journal_cancel_revoke(handle, jh);
-out:
-	journal_put_journal_head(jh);
-	return err;
-}
-
-/**
- * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences
- * @handle: transaction
- * @bh: buffer to undo
- *
- * Sometimes there is a need to distinguish between metadata which has
- * been committed to disk and that which has not.  The ext3fs code uses
- * this for freeing and allocating space, we have to make sure that we
- * do not reuse freed space until the deallocation has been committed,
- * since if we overwrote that space we would make the delete
- * un-rewindable in case of a crash.
- *
- * To deal with that, journal_get_undo_access requests write access to a
- * buffer for parts of non-rewindable operations such as delete
- * operations on the bitmaps.  The journaling code must keep a copy of
- * the buffer's contents prior to the undo_access call until such time
- * as we know that the buffer has definitely been committed to disk.
- *
- * We never need to know which transaction the committed data is part
- * of, buffers touched here are guaranteed to be dirtied later and so
- * will be committed to a new transaction in due course, at which point
- * we can discard the old committed data pointer.
- *
- * Returns error number or 0 on success.
- */
-int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
-{
-	int err;
-	struct journal_head *jh = journal_add_journal_head(bh);
-	char *committed_data = NULL;
-
-	JBUFFER_TRACE(jh, "entry");
-
-	/*
-	 * Do this first --- it can drop the journal lock, so we want to
-	 * make sure that obtaining the committed_data is done
-	 * atomically wrt. completion of any outstanding commits.
-	 */
-	err = do_get_write_access(handle, jh, 1);
-	if (err)
-		goto out;
-
-repeat:
-	if (!jh->b_committed_data) {
-		committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS);
-		if (!committed_data) {
-			printk(KERN_ERR "%s: No memory for committed data\n",
-				__func__);
-			err = -ENOMEM;
-			goto out;
-		}
-	}
-
-	jbd_lock_bh_state(bh);
-	if (!jh->b_committed_data) {
-		/* Copy out the current buffer contents into the
-		 * preserved, committed copy. */
-		JBUFFER_TRACE(jh, "generate b_committed data");
-		if (!committed_data) {
-			jbd_unlock_bh_state(bh);
-			goto repeat;
-		}
-
-		jh->b_committed_data = committed_data;
-		committed_data = NULL;
-		memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
-	}
-	jbd_unlock_bh_state(bh);
-out:
-	journal_put_journal_head(jh);
-	if (unlikely(committed_data))
-		jbd_free(committed_data, bh->b_size);
-	return err;
-}
-
-/**
- * int journal_dirty_data() - mark a buffer as containing dirty data to be flushed
- * @handle: transaction
- * @bh: bufferhead to mark
- *
- * Description:
- * Mark a buffer as containing dirty data which needs to be flushed before
- * we can commit the current transaction.
- *
- * The buffer is placed on the transaction's data list and is marked as
- * belonging to the transaction.
- *
- * Returns error number or 0 on success.
- *
- * journal_dirty_data() can be called via page_launder->ext3_writepage
- * by kswapd.
- */
-int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
-{
-	journal_t *journal = handle->h_transaction->t_journal;
-	int need_brelse = 0;
-	struct journal_head *jh;
-	int ret = 0;
-
-	if (is_handle_aborted(handle))
-		return ret;
-
-	jh = journal_add_journal_head(bh);
-	JBUFFER_TRACE(jh, "entry");
-
-	/*
-	 * The buffer could *already* be dirty.  Writeout can start
-	 * at any time.
-	 */
-	jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
-
-	/*
-	 * What if the buffer is already part of a running transaction?
-	 *
-	 * There are two cases:
-	 * 1) It is part of the current running transaction.  Refile it,
-	 *    just in case we have allocated it as metadata, deallocated
-	 *    it, then reallocated it as data.
-	 * 2) It is part of the previous, still-committing transaction.
-	 *    If all we want to do is to guarantee that the buffer will be
-	 *    written to disk before this new transaction commits, then
-	 *    being sure that the *previous* transaction has this same
-	 *    property is sufficient for us!  Just leave it on its old
-	 *    transaction.
-	 *
-	 * In case (2), the buffer must not already exist as metadata
-	 * --- that would violate write ordering (a transaction is free
-	 * to write its data at any point, even before the previous
-	 * committing transaction has committed).  The caller must
-	 * never, ever allow this to happen: there's nothing we can do
-	 * about it in this layer.
-	 */
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	/* Now that we have bh_state locked, are we really still mapped? */
-	if (!buffer_mapped(bh)) {
-		JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
-		goto no_journal;
-	}
-
-	if (jh->b_transaction) {
-		JBUFFER_TRACE(jh, "has transaction");
-		if (jh->b_transaction != handle->h_transaction) {
-			JBUFFER_TRACE(jh, "belongs to older transaction");
-			J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-
-			/* @@@ IS THIS TRUE  ? */
-			/*
-			 * Not any more.  Scenario: someone does a write()
-			 * in data=journal mode.  The buffer's transaction has
-			 * moved into commit.  Then someone does another
-			 * write() to the file.  We do the frozen data copyout
-			 * and set b_next_transaction to point to j_running_t.
-			 * And while we're in that state, someone does a
-			 * writepage() in an attempt to pageout the same area
-			 * of the file via a shared mapping.  At present that
-			 * calls journal_dirty_data(), and we get right here.
-			 * It may be too late to journal the data.  Simply
-			 * falling through to the next test will suffice: the
-			 * data will be dirty and wil be checkpointed.  The
-			 * ordering comments in the next comment block still
-			 * apply.
-			 */
-			//J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-
-			/*
-			 * If we're journalling data, and this buffer was
-			 * subject to a write(), it could be metadata, forget
-			 * or shadow against the committing transaction.  Now,
-			 * someone has dirtied the same darn page via a mapping
-			 * and it is being writepage()'d.
-			 * We *could* just steal the page from commit, with some
-			 * fancy locking there.  Instead, we just skip it -
-			 * don't tie the page's buffers to the new transaction
-			 * at all.
-			 * Implication: if we crash before the writepage() data
-			 * is written into the filesystem, recovery will replay
-			 * the write() data.
-			 */
-			if (jh->b_jlist != BJ_None &&
-					jh->b_jlist != BJ_SyncData &&
-					jh->b_jlist != BJ_Locked) {
-				JBUFFER_TRACE(jh, "Not stealing");
-				goto no_journal;
-			}
-
-			/*
-			 * This buffer may be undergoing writeout in commit.  We
-			 * can't return from here and let the caller dirty it
-			 * again because that can cause the write-out loop in
-			 * commit to never terminate.
-			 */
-			if (buffer_dirty(bh)) {
-				get_bh(bh);
-				spin_unlock(&journal->j_list_lock);
-				jbd_unlock_bh_state(bh);
-				need_brelse = 1;
-				sync_dirty_buffer(bh);
-				jbd_lock_bh_state(bh);
-				spin_lock(&journal->j_list_lock);
-				/* Since we dropped the lock... */
-				if (!buffer_mapped(bh)) {
-					JBUFFER_TRACE(jh, "buffer got unmapped");
-					goto no_journal;
-				}
-				/* The buffer may become locked again at any
-				   time if it is redirtied */
-			}
-
-			/*
-			 * We cannot remove the buffer with io error from the
-			 * committing transaction, because otherwise it would
-			 * miss the error and the commit would not abort.
-			 */
-			if (unlikely(!buffer_uptodate(bh))) {
-				ret = -EIO;
-				goto no_journal;
-			}
-			/* We might have slept so buffer could be refiled now */
-			if (jh->b_transaction != NULL &&
-			    jh->b_transaction != handle->h_transaction) {
-				JBUFFER_TRACE(jh, "unfile from commit");
-				__journal_temp_unlink_buffer(jh);
-				/* It still points to the committing
-				 * transaction; move it to this one so
-				 * that the refile assert checks are
-				 * happy. */
-				jh->b_transaction = handle->h_transaction;
-			}
-			/* The buffer will be refiled below */
-
-		}
-		/*
-		 * Special case --- the buffer might actually have been
-		 * allocated and then immediately deallocated in the previous,
-		 * committing transaction, so might still be left on that
-		 * transaction's metadata lists.
-		 */
-		if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
-			JBUFFER_TRACE(jh, "not on correct data list: unfile");
-			J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
-			JBUFFER_TRACE(jh, "file as data");
-			__journal_file_buffer(jh, handle->h_transaction,
-						BJ_SyncData);
-		}
-	} else {
-		JBUFFER_TRACE(jh, "not on a transaction");
-		__journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
-	}
-no_journal:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	if (need_brelse) {
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-	}
-	JBUFFER_TRACE(jh, "exit");
-	journal_put_journal_head(jh);
-	return ret;
-}
-
-/**
- * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
- * @handle: transaction to add buffer to.
- * @bh: buffer to mark
- *
- * Mark dirty metadata which needs to be journaled as part of the current
- * transaction.
- *
- * The buffer is placed on the transaction's metadata list and is marked
- * as belonging to the transaction.
- *
- * Returns error number or 0 on success.
- *
- * Special care needs to be taken if the buffer already belongs to the
- * current committing transaction (in which case we should have frozen
- * data present for that commit).  In that case, we don't relink the
- * buffer: that only gets done when the old transaction finally
- * completes its commit.
- */
-int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh = bh2jh(bh);
-
-	jbd_debug(5, "journal_head %p\n", jh);
-	JBUFFER_TRACE(jh, "entry");
-	if (is_handle_aborted(handle))
-		goto out;
-
-	jbd_lock_bh_state(bh);
-
-	if (jh->b_modified == 0) {
-		/*
-		 * This buffer's got modified and becoming part
-		 * of the transaction. This needs to be done
-		 * once a transaction -bzzz
-		 */
-		jh->b_modified = 1;
-		J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
-		handle->h_buffer_credits--;
-	}
-
-	/*
-	 * fastpath, to avoid expensive locking.  If this buffer is already
-	 * on the running transaction's metadata list there is nothing to do.
-	 * Nobody can take it off again because there is a handle open.
-	 * I _think_ we're OK here with SMP barriers - a mistaken decision will
-	 * result in this test being false, so we go in and take the locks.
-	 */
-	if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
-		JBUFFER_TRACE(jh, "fastpath");
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_running_transaction);
-		goto out_unlock_bh;
-	}
-
-	set_buffer_jbddirty(bh);
-
-	/*
-	 * Metadata already on the current transaction list doesn't
-	 * need to be filed.  Metadata on another transaction's list must
-	 * be committing, and will be refiled once the commit completes:
-	 * leave it alone for now.
-	 */
-	if (jh->b_transaction != transaction) {
-		JBUFFER_TRACE(jh, "already on other transaction");
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-		J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
-		/* And this case is illegal: we can't reuse another
-		 * transaction's data buffer, ever. */
-		goto out_unlock_bh;
-	}
-
-	/* That test should have eliminated the following case: */
-	J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
-
-	JBUFFER_TRACE(jh, "file as BJ_Metadata");
-	spin_lock(&journal->j_list_lock);
-	__journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
-	spin_unlock(&journal->j_list_lock);
-out_unlock_bh:
-	jbd_unlock_bh_state(bh);
-out:
-	JBUFFER_TRACE(jh, "exit");
-	return 0;
-}
-
-/*
- * journal_release_buffer: undo a get_write_access without any buffer
- * updates, if the update decided in the end that it didn't need access.
- *
- */
-void
-journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
-	BUFFER_TRACE(bh, "entry");
-}
-
-/**
- * void journal_forget() - bforget() for potentially-journaled buffers.
- * @handle: transaction handle
- * @bh:     bh to 'forget'
- *
- * We can only do the bforget if there are no commits pending against the
- * buffer.  If the buffer is dirty in the current running transaction we
- * can safely unlink it.
- *
- * bh may not be a journalled buffer at all - it may be a non-JBD
- * buffer which came off the hashtable.  Check for this.
- *
- * Decrements bh->b_count by one.
- *
- * Allow this call even if the handle has aborted --- it may be part of
- * the caller's cleanup after an abort.
- */
-int journal_forget (handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh;
-	int drop_reserve = 0;
-	int err = 0;
-	int was_modified = 0;
-
-	BUFFER_TRACE(bh, "entry");
-
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	if (!buffer_jbd(bh))
-		goto not_jbd;
-	jh = bh2jh(bh);
-
-	/* Critical error: attempting to delete a bitmap buffer, maybe?
-	 * Don't do any jbd operations, and return an error. */
-	if (!J_EXPECT_JH(jh, !jh->b_committed_data,
-			 "inconsistent data on disk")) {
-		err = -EIO;
-		goto not_jbd;
-	}
-
-	/* keep track of whether or not this transaction modified us */
-	was_modified = jh->b_modified;
-
-	/*
-	 * The buffer's going from the transaction, we must drop
-	 * all references -bzzz
-	 */
-	jh->b_modified = 0;
-
-	if (jh->b_transaction == handle->h_transaction) {
-		J_ASSERT_JH(jh, !jh->b_frozen_data);
-
-		/* If we are forgetting a buffer which is already part
-		 * of this transaction, then we can just drop it from
-		 * the transaction immediately. */
-		clear_buffer_dirty(bh);
-		clear_buffer_jbddirty(bh);
-
-		JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
-
-		/*
-		 * we only want to drop a reference if this transaction
-		 * modified the buffer
-		 */
-		if (was_modified)
-			drop_reserve = 1;
-
-		/*
-		 * We are no longer going to journal this buffer.
-		 * However, the commit of this transaction is still
-		 * important to the buffer: the delete that we are now
-		 * processing might obsolete an old log entry, so by
-		 * committing, we can satisfy the buffer's checkpoint.
-		 *
-		 * So, if we have a checkpoint on the buffer, we should
-		 * now refile the buffer on our BJ_Forget list so that
-		 * we know to remove the checkpoint after we commit.
-		 */
-
-		if (jh->b_cp_transaction) {
-			__journal_temp_unlink_buffer(jh);
-			__journal_file_buffer(jh, transaction, BJ_Forget);
-		} else {
-			__journal_unfile_buffer(jh);
-			if (!buffer_jbd(bh)) {
-				spin_unlock(&journal->j_list_lock);
-				jbd_unlock_bh_state(bh);
-				__bforget(bh);
-				goto drop;
-			}
-		}
-	} else if (jh->b_transaction) {
-		J_ASSERT_JH(jh, (jh->b_transaction ==
-				 journal->j_committing_transaction));
-		/* However, if the buffer is still owned by a prior
-		 * (committing) transaction, we can't drop it yet... */
-		JBUFFER_TRACE(jh, "belongs to older transaction");
-		/* ... but we CAN drop it from the new transaction if we
-		 * have also modified it since the original commit. */
-
-		if (jh->b_next_transaction) {
-			J_ASSERT(jh->b_next_transaction == transaction);
-			jh->b_next_transaction = NULL;
-
-			/*
-			 * only drop a reference if this transaction modified
-			 * the buffer
-			 */
-			if (was_modified)
-				drop_reserve = 1;
-		}
-	}
-
-not_jbd:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	__brelse(bh);
-drop:
-	if (drop_reserve) {
-		/* no need to reserve log space for this block -bzzz */
-		handle->h_buffer_credits++;
-	}
-	return err;
-}
-
-/**
- * int journal_stop() - complete a transaction
- * @handle: tranaction to complete.
- *
- * All done for a particular handle.
- *
- * There is not much action needed here.  We just return any remaining
- * buffer credits to the transaction and remove the handle.  The only
- * complication is that we need to start a commit operation if the
- * filesystem is marked for synchronous update.
- *
- * journal_stop itself will not usually return an error, but it may
- * do so in unusual circumstances.  In particular, expect it to
- * return -EIO if a journal_abort has been executed since the
- * transaction began.
- */
-int journal_stop(handle_t *handle)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int err;
-	pid_t pid;
-
-	J_ASSERT(journal_current_handle() == handle);
-
-	if (is_handle_aborted(handle))
-		err = -EIO;
-	else {
-		J_ASSERT(transaction->t_updates > 0);
-		err = 0;
-	}
-
-	if (--handle->h_ref > 0) {
-		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
-			  handle->h_ref);
-		return err;
-	}
-
-	jbd_debug(4, "Handle %p going down\n", handle);
-
-	/*
-	 * Implement synchronous transaction batching.  If the handle
-	 * was synchronous, don't force a commit immediately.  Let's
-	 * yield and let another thread piggyback onto this transaction.
-	 * Keep doing that while new threads continue to arrive.
-	 * It doesn't cost much - we're about to run a commit and sleep
-	 * on IO anyway.  Speeds up many-threaded, many-dir operations
-	 * by 30x or more...
-	 *
-	 * We try and optimize the sleep time against what the underlying disk
-	 * can do, instead of having a static sleep time.  This is useful for
-	 * the case where our storage is so fast that it is more optimal to go
-	 * ahead and force a flush and wait for the transaction to be committed
-	 * than it is to wait for an arbitrary amount of time for new writers to
-	 * join the transaction.  We achieve this by measuring how long it takes
-	 * to commit a transaction, and compare it with how long this
-	 * transaction has been running, and if run time < commit time then we
-	 * sleep for the delta and commit.  This greatly helps super fast disks
-	 * that would see slowdowns as more threads started doing fsyncs.
-	 *
-	 * But don't do this if this process was the most recent one to
-	 * perform a synchronous write.  We do this to detect the case where a
-	 * single process is doing a stream of sync writes.  No point in waiting
-	 * for joiners in that case.
-	 */
-	pid = current->pid;
-	if (handle->h_sync && journal->j_last_sync_writer != pid) {
-		u64 commit_time, trans_time;
-
-		journal->j_last_sync_writer = pid;
-
-		spin_lock(&journal->j_state_lock);
-		commit_time = journal->j_average_commit_time;
-		spin_unlock(&journal->j_state_lock);
-
-		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
-						   transaction->t_start_time));
-
-		commit_time = min_t(u64, commit_time,
-				    1000*jiffies_to_usecs(1));
-
-		if (trans_time < commit_time) {
-			ktime_t expires = ktime_add_ns(ktime_get(),
-						       commit_time);
-			set_current_state(TASK_UNINTERRUPTIBLE);
-			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
-		}
-	}
-
-	current->journal_info = NULL;
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&transaction->t_handle_lock);
-	transaction->t_outstanding_credits -= handle->h_buffer_credits;
-	transaction->t_updates--;
-	if (!transaction->t_updates) {
-		wake_up(&journal->j_wait_updates);
-		if (journal->j_barrier_count)
-			wake_up(&journal->j_wait_transaction_locked);
-	}
-
-	/*
-	 * If the handle is marked SYNC, we need to set another commit
-	 * going!  We also want to force a commit if the current
-	 * transaction is occupying too much of the log, or if the
-	 * transaction is too old now.
-	 */
-	if (handle->h_sync ||
-			transaction->t_outstanding_credits >
-				journal->j_max_transaction_buffers ||
-			time_after_eq(jiffies, transaction->t_expires)) {
-		/* Do this even for aborted journals: an abort still
-		 * completes the commit thread, it just doesn't write
-		 * anything to disk. */
-		tid_t tid = transaction->t_tid;
-
-		spin_unlock(&transaction->t_handle_lock);
-		jbd_debug(2, "transaction too old, requesting commit for "
-					"handle %p\n", handle);
-		/* This is non-blocking */
-		__log_start_commit(journal, transaction->t_tid);
-		spin_unlock(&journal->j_state_lock);
-
-		/*
-		 * Special case: JFS_SYNC synchronous updates require us
-		 * to wait for the commit to complete.
-		 */
-		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
-			err = log_wait_commit(journal, tid);
-	} else {
-		spin_unlock(&transaction->t_handle_lock);
-		spin_unlock(&journal->j_state_lock);
-	}
-
-	lock_map_release(&handle->h_lockdep_map);
-
-	jbd_free_handle(handle);
-	return err;
-}
-
-/**
- * int journal_force_commit() - force any uncommitted transactions
- * @journal: journal to force
- *
- * For synchronous operations: force any uncommitted transactions
- * to disk.  May seem kludgy, but it reuses all the handle batching
- * code in a very simple manner.
- */
-int journal_force_commit(journal_t *journal)
-{
-	handle_t *handle;
-	int ret;
-
-	handle = journal_start(journal, 1);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-	} else {
-		handle->h_sync = 1;
-		ret = journal_stop(handle);
-	}
-	return ret;
-}
-
-/*
- *
- * List management code snippets: various functions for manipulating the
- * transaction buffer lists.
- *
- */
-
-/*
- * Append a buffer to a transaction list, given the transaction's list head
- * pointer.
- *
- * j_list_lock is held.
- *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
- */
-
-static inline void
-__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
-{
-	if (!*list) {
-		jh->b_tnext = jh->b_tprev = jh;
-		*list = jh;
-	} else {
-		/* Insert at the tail of the list to preserve order */
-		struct journal_head *first = *list, *last = first->b_tprev;
-		jh->b_tprev = last;
-		jh->b_tnext = first;
-		last->b_tnext = first->b_tprev = jh;
-	}
-}
-
-/*
- * Remove a buffer from a transaction list, given the transaction's list
- * head pointer.
- *
- * Called with j_list_lock held, and the journal may not be locked.
- *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
- */
-
-static inline void
-__blist_del_buffer(struct journal_head **list, struct journal_head *jh)
-{
-	if (*list == jh) {
-		*list = jh->b_tnext;
-		if (*list == jh)
-			*list = NULL;
-	}
-	jh->b_tprev->b_tnext = jh->b_tnext;
-	jh->b_tnext->b_tprev = jh->b_tprev;
-}
-
-/*
- * Remove a buffer from the appropriate transaction list.
- *
- * Note that this function can *change* the value of
- * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
- * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller
- * is holding onto a copy of one of thee pointers, it could go bad.
- * Generally the caller needs to re-read the pointer from the transaction_t.
- *
- * Called under j_list_lock.  The journal may not be locked.
- */
-static void __journal_temp_unlink_buffer(struct journal_head *jh)
-{
-	struct journal_head **list = NULL;
-	transaction_t *transaction;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	transaction = jh->b_transaction;
-	if (transaction)
-		assert_spin_locked(&transaction->t_journal->j_list_lock);
-
-	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
-	if (jh->b_jlist != BJ_None)
-		J_ASSERT_JH(jh, transaction != NULL);
-
-	switch (jh->b_jlist) {
-	case BJ_None:
-		return;
-	case BJ_SyncData:
-		list = &transaction->t_sync_datalist;
-		break;
-	case BJ_Metadata:
-		transaction->t_nr_buffers--;
-		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
-		list = &transaction->t_buffers;
-		break;
-	case BJ_Forget:
-		list = &transaction->t_forget;
-		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
-	case BJ_Shadow:
-		list = &transaction->t_shadow_list;
-		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
-	case BJ_Reserved:
-		list = &transaction->t_reserved_list;
-		break;
-	case BJ_Locked:
-		list = &transaction->t_locked_list;
-		break;
-	}
-
-	__blist_del_buffer(list, jh);
-	jh->b_jlist = BJ_None;
-	if (test_clear_buffer_jbddirty(bh))
-		mark_buffer_dirty(bh);	/* Expose it to the VM */
-}
-
-/*
- * Remove buffer from all transactions.
- *
- * Called with bh_state lock and j_list_lock
- *
- * jh and bh may be already freed when this function returns.
- */
-void __journal_unfile_buffer(struct journal_head *jh)
-{
-	__journal_temp_unlink_buffer(jh);
-	jh->b_transaction = NULL;
-	journal_put_journal_head(jh);
-}
-
-void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	/* Get reference so that buffer cannot be freed before we unlock it */
-	get_bh(bh);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	__journal_unfile_buffer(jh);
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	__brelse(bh);
-}
-
-/*
- * Called from journal_try_to_free_buffers().
- *
- * Called under jbd_lock_bh_state(bh)
- */
-static void
-__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
-{
-	struct journal_head *jh;
-
-	jh = bh2jh(bh);
-
-	if (buffer_locked(bh) || buffer_dirty(bh))
-		goto out;
-
-	if (jh->b_next_transaction != NULL)
-		goto out;
-
-	spin_lock(&journal->j_list_lock);
-	if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
-		if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
-			/* A written-back ordered data buffer */
-			JBUFFER_TRACE(jh, "release data");
-			__journal_unfile_buffer(jh);
-		}
-	} else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
-		/* written-back checkpointed metadata buffer */
-		if (jh->b_jlist == BJ_None) {
-			JBUFFER_TRACE(jh, "remove from checkpoint list");
-			__journal_remove_checkpoint(jh);
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-out:
-	return;
-}
-
-/**
- * int journal_try_to_free_buffers() - try to free page buffers.
- * @journal: journal for operation
- * @page: to try and free
- * @gfp_mask: we use the mask to detect how hard should we try to release
- * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
- * release the buffers.
- *
- *
- * For all the buffers on this page,
- * if they are fully written out ordered data, move them onto BUF_CLEAN
- * so try_to_free_buffers() can reap them.
- *
- * This function returns non-zero if we wish try_to_free_buffers()
- * to be called. We do this if the page is releasable by try_to_free_buffers().
- * We also do it if the page has locked or dirty buffers and the caller wants
- * us to perform sync or async writeout.
- *
- * This complicates JBD locking somewhat.  We aren't protected by the
- * BKL here.  We wish to remove the buffer from its committing or
- * running transaction's ->t_datalist via __journal_unfile_buffer.
- *
- * This may *change* the value of transaction_t->t_datalist, so anyone
- * who looks at t_datalist needs to lock against this function.
- *
- * Even worse, someone may be doing a journal_dirty_data on this
- * buffer.  So we need to lock against that.  journal_dirty_data()
- * will come out of the lock with the buffer dirty, which makes it
- * ineligible for release here.
- *
- * Who else is affected by this?  hmm...  Really the only contender
- * is do_get_write_access() - it could be looking at the buffer while
- * journal_try_to_free_buffer() is changing its state.  But that
- * cannot happen because we never reallocate freed data as metadata
- * while the data is part of a transaction.  Yes?
- *
- * Return 0 on failure, 1 on success
- */
-int journal_try_to_free_buffers(journal_t *journal,
-				struct page *page, gfp_t gfp_mask)
-{
-	struct buffer_head *head;
-	struct buffer_head *bh;
-	int ret = 0;
-
-	J_ASSERT(PageLocked(page));
-
-	head = page_buffers(page);
-	bh = head;
-	do {
-		struct journal_head *jh;
-
-		/*
-		 * We take our own ref against the journal_head here to avoid
-		 * having to add tons of locking around each instance of
-		 * journal_put_journal_head().
-		 */
-		jh = journal_grab_journal_head(bh);
-		if (!jh)
-			continue;
-
-		jbd_lock_bh_state(bh);
-		__journal_try_to_free_buffer(journal, bh);
-		journal_put_journal_head(jh);
-		jbd_unlock_bh_state(bh);
-		if (buffer_jbd(bh))
-			goto busy;
-	} while ((bh = bh->b_this_page) != head);
-
-	ret = try_to_free_buffers(page);
-
-busy:
-	return ret;
-}
-
-/*
- * This buffer is no longer needed.  If it is on an older transaction's
- * checkpoint list we need to record it on this transaction's forget list
- * to pin this buffer (and hence its checkpointing transaction) down until
- * this transaction commits.  If the buffer isn't on a checkpoint list, we
- * release it.
- * Returns non-zero if JBD no longer has an interest in the buffer.
- *
- * Called under j_list_lock.
- *
- * Called under jbd_lock_bh_state(bh).
- */
-static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
-{
-	int may_free = 1;
-	struct buffer_head *bh = jh2bh(jh);
-
-	if (jh->b_cp_transaction) {
-		JBUFFER_TRACE(jh, "on running+cp transaction");
-		__journal_temp_unlink_buffer(jh);
-		/*
-		 * We don't want to write the buffer anymore, clear the
-		 * bit so that we don't confuse checks in
-		 * __journal_file_buffer
-		 */
-		clear_buffer_dirty(bh);
-		__journal_file_buffer(jh, transaction, BJ_Forget);
-		may_free = 0;
-	} else {
-		JBUFFER_TRACE(jh, "on running transaction");
-		__journal_unfile_buffer(jh);
-	}
-	return may_free;
-}
-
-/*
- * journal_invalidatepage
- *
- * This code is tricky.  It has a number of cases to deal with.
- *
- * There are two invariants which this code relies on:
- *
- * i_size must be updated on disk before we start calling invalidatepage on the
- * data.
- *
- *  This is done in ext3 by defining an ext3_setattr method which
- *  updates i_size before truncate gets going.  By maintaining this
- *  invariant, we can be sure that it is safe to throw away any buffers
- *  attached to the current transaction: once the transaction commits,
- *  we know that the data will not be needed.
- *
- *  Note however that we can *not* throw away data belonging to the
- *  previous, committing transaction!
- *
- * Any disk blocks which *are* part of the previous, committing
- * transaction (and which therefore cannot be discarded immediately) are
- * not going to be reused in the new running transaction
- *
- *  The bitmap committed_data images guarantee this: any block which is
- *  allocated in one transaction and removed in the next will be marked
- *  as in-use in the committed_data bitmap, so cannot be reused until
- *  the next transaction to delete the block commits.  This means that
- *  leaving committing buffers dirty is quite safe: the disk blocks
- *  cannot be reallocated to a different file and so buffer aliasing is
- *  not possible.
- *
- *
- * The above applies mainly to ordered data mode.  In writeback mode we
- * don't make guarantees about the order in which data hits disk --- in
- * particular we don't guarantee that new dirty data is flushed before
- * transaction commit --- so it is always safe just to discard data
- * immediately in that mode.  --sct
- */
-
-/*
- * The journal_unmap_buffer helper function returns zero if the buffer
- * concerned remains pinned as an anonymous buffer belonging to an older
- * transaction.
- *
- * We're outside-transaction here.  Either or both of j_running_transaction
- * and j_committing_transaction may be NULL.
- */
-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
-				int partial_page)
-{
-	transaction_t *transaction;
-	struct journal_head *jh;
-	int may_free = 1;
-
-	BUFFER_TRACE(bh, "entry");
-
-retry:
-	/*
-	 * It is safe to proceed here without the j_list_lock because the
-	 * buffers cannot be stolen by try_to_free_buffers as long as we are
-	 * holding the page lock. --sct
-	 */
-
-	if (!buffer_jbd(bh))
-		goto zap_buffer_unlocked;
-
-	spin_lock(&journal->j_state_lock);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	jh = journal_grab_journal_head(bh);
-	if (!jh)
-		goto zap_buffer_no_jh;
-
-	/*
-	 * We cannot remove the buffer from checkpoint lists until the
-	 * transaction adding inode to orphan list (let's call it T)
-	 * is committed.  Otherwise if the transaction changing the
-	 * buffer would be cleaned from the journal before T is
-	 * committed, a crash will cause that the correct contents of
-	 * the buffer will be lost.  On the other hand we have to
-	 * clear the buffer dirty bit at latest at the moment when the
-	 * transaction marking the buffer as freed in the filesystem
-	 * structures is committed because from that moment on the
-	 * block can be reallocated and used by a different page.
-	 * Since the block hasn't been freed yet but the inode has
-	 * already been added to orphan list, it is safe for us to add
-	 * the buffer to BJ_Forget list of the newest transaction.
-	 *
-	 * Also we have to clear buffer_mapped flag of a truncated buffer
-	 * because the buffer_head may be attached to the page straddling
-	 * i_size (can happen only when blocksize < pagesize) and thus the
-	 * buffer_head can be reused when the file is extended again. So we end
-	 * up keeping around invalidated buffers attached to transactions'
-	 * BJ_Forget list just to stop checkpointing code from cleaning up
-	 * the transaction this buffer was modified in.
-	 */
-	transaction = jh->b_transaction;
-	if (transaction == NULL) {
-		/* First case: not on any transaction.  If it
-		 * has no checkpoint link, then we can zap it:
-		 * it's a writeback-mode buffer so we don't care
-		 * if it hits disk safely. */
-		if (!jh->b_cp_transaction) {
-			JBUFFER_TRACE(jh, "not on any transaction: zap");
-			goto zap_buffer;
-		}
-
-		if (!buffer_dirty(bh)) {
-			/* bdflush has written it.  We can drop it now */
-			goto zap_buffer;
-		}
-
-		/* OK, it must be in the journal but still not
-		 * written fully to disk: it's metadata or
-		 * journaled data... */
-
-		if (journal->j_running_transaction) {
-			/* ... and once the current transaction has
-			 * committed, the buffer won't be needed any
-			 * longer. */
-			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
-			may_free = __dispose_buffer(jh,
-					journal->j_running_transaction);
-			goto zap_buffer;
-		} else {
-			/* There is no currently-running transaction. So the
-			 * orphan record which we wrote for this file must have
-			 * passed into commit.  We must attach this buffer to
-			 * the committing transaction, if it exists. */
-			if (journal->j_committing_transaction) {
-				JBUFFER_TRACE(jh, "give to committing trans");
-				may_free = __dispose_buffer(jh,
-					journal->j_committing_transaction);
-				goto zap_buffer;
-			} else {
-				/* The orphan record's transaction has
-				 * committed.  We can cleanse this buffer */
-				clear_buffer_jbddirty(bh);
-				goto zap_buffer;
-			}
-		}
-	} else if (transaction == journal->j_committing_transaction) {
-		JBUFFER_TRACE(jh, "on committing transaction");
-		if (jh->b_jlist == BJ_Locked) {
-			/*
-			 * The buffer is on the committing transaction's locked
-			 * list.  We have the buffer locked, so I/O has
-			 * completed.  So we can nail the buffer now.
-			 */
-			may_free = __dispose_buffer(jh, transaction);
-			goto zap_buffer;
-		}
-		/*
-		 * The buffer is committing, we simply cannot touch
-		 * it. If the page is straddling i_size we have to wait
-		 * for commit and try again.
-		 */
-		if (partial_page) {
-			tid_t tid = journal->j_committing_transaction->t_tid;
-
-			journal_put_journal_head(jh);
-			spin_unlock(&journal->j_list_lock);
-			jbd_unlock_bh_state(bh);
-			spin_unlock(&journal->j_state_lock);
-			unlock_buffer(bh);
-			log_wait_commit(journal, tid);
-			lock_buffer(bh);
-			goto retry;
-		}
-		/*
-		 * OK, buffer won't be reachable after truncate. We just set
-		 * j_next_transaction to the running transaction (if there is
-		 * one) and mark buffer as freed so that commit code knows it
-		 * should clear dirty bits when it is done with the buffer.
-		 */
-		set_buffer_freed(bh);
-		if (journal->j_running_transaction && buffer_jbddirty(bh))
-			jh->b_next_transaction = journal->j_running_transaction;
-		journal_put_journal_head(jh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		spin_unlock(&journal->j_state_lock);
-		return 0;
-	} else {
-		/* Good, the buffer belongs to the running transaction.
-		 * We are writing our own transaction's data, not any
-		 * previous one's, so it is safe to throw it away
-		 * (remember that we expect the filesystem to have set
-		 * i_size already for this truncate so recovery will not
-		 * expose the disk blocks we are discarding here.) */
-		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
-		JBUFFER_TRACE(jh, "on running transaction");
-		may_free = __dispose_buffer(jh, transaction);
-	}
-
-zap_buffer:
-	/*
-	 * This is tricky. Although the buffer is truncated, it may be reused
-	 * if blocksize < pagesize and it is attached to the page straddling
-	 * EOF. Since the buffer might have been added to BJ_Forget list of the
-	 * running transaction, journal_get_write_access() won't clear
-	 * b_modified and credit accounting gets confused. So clear b_modified
-	 * here. */
-	jh->b_modified = 0;
-	journal_put_journal_head(jh);
-zap_buffer_no_jh:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	spin_unlock(&journal->j_state_lock);
-zap_buffer_unlocked:
-	clear_buffer_dirty(bh);
-	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
-	clear_buffer_mapped(bh);
-	clear_buffer_req(bh);
-	clear_buffer_new(bh);
-	bh->b_bdev = NULL;
-	return may_free;
-}
-
-/**
- * void journal_invalidatepage() - invalidate a journal page
- * @journal: journal to use for flush
- * @page:    page to flush
- * @offset:  offset of the range to invalidate
- * @length:  length of the range to invalidate
- *
- * Reap page buffers containing data in specified range in page.
- */
-void journal_invalidatepage(journal_t *journal,
-		      struct page *page,
-		      unsigned int offset,
-		      unsigned int length)
-{
-	struct buffer_head *head, *bh, *next;
-	unsigned int stop = offset + length;
-	unsigned int curr_off = 0;
-	int partial_page = (offset || length < PAGE_CACHE_SIZE);
-	int may_free = 1;
-
-	if (!PageLocked(page))
-		BUG();
-	if (!page_has_buffers(page))
-		return;
-
-	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
-
-	/* We will potentially be playing with lists other than just the
-	 * data lists (especially for journaled data mode), so be
-	 * cautious in our locking. */
-
-	head = bh = page_buffers(page);
-	do {
-		unsigned int next_off = curr_off + bh->b_size;
-		next = bh->b_this_page;
-
-		if (next_off > stop)
-			return;
-
-		if (offset <= curr_off) {
-			/* This block is wholly outside the truncation point */
-			lock_buffer(bh);
-			may_free &= journal_unmap_buffer(journal, bh,
-							 partial_page);
-			unlock_buffer(bh);
-		}
-		curr_off = next_off;
-		bh = next;
-
-	} while (bh != head);
-
-	if (!partial_page) {
-		if (may_free && try_to_free_buffers(page))
-			J_ASSERT(!page_has_buffers(page));
-	}
-}
-
-/*
- * File a buffer on the given transaction list.
- */
-void __journal_file_buffer(struct journal_head *jh,
-			transaction_t *transaction, int jlist)
-{
-	struct journal_head **list = NULL;
-	int was_dirty = 0;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	assert_spin_locked(&transaction->t_journal->j_list_lock);
-
-	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
-	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
-				jh->b_transaction == NULL);
-
-	if (jh->b_transaction && jh->b_jlist == jlist)
-		return;
-
-	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
-	    jlist == BJ_Shadow || jlist == BJ_Forget) {
-		/*
-		 * For metadata buffers, we track dirty bit in buffer_jbddirty
-		 * instead of buffer_dirty. We should not see a dirty bit set
-		 * here because we clear it in do_get_write_access but e.g.
-		 * tune2fs can modify the sb and set the dirty bit at any time
-		 * so we try to gracefully handle that.
-		 */
-		if (buffer_dirty(bh))
-			warn_dirty_buffer(bh);
-		if (test_clear_buffer_dirty(bh) ||
-		    test_clear_buffer_jbddirty(bh))
-			was_dirty = 1;
-	}
-
-	if (jh->b_transaction)
-		__journal_temp_unlink_buffer(jh);
-	else
-		journal_grab_journal_head(bh);
-	jh->b_transaction = transaction;
-
-	switch (jlist) {
-	case BJ_None:
-		J_ASSERT_JH(jh, !jh->b_committed_data);
-		J_ASSERT_JH(jh, !jh->b_frozen_data);
-		return;
-	case BJ_SyncData:
-		list = &transaction->t_sync_datalist;
-		break;
-	case BJ_Metadata:
-		transaction->t_nr_buffers++;
-		list = &transaction->t_buffers;
-		break;
-	case BJ_Forget:
-		list = &transaction->t_forget;
-		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
-	case BJ_Shadow:
-		list = &transaction->t_shadow_list;
-		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
-	case BJ_Reserved:
-		list = &transaction->t_reserved_list;
-		break;
-	case BJ_Locked:
-		list =  &transaction->t_locked_list;
-		break;
-	}
-
-	__blist_add_buffer(list, jh);
-	jh->b_jlist = jlist;
-
-	if (was_dirty)
-		set_buffer_jbddirty(bh);
-}
-
-void journal_file_buffer(struct journal_head *jh,
-				transaction_t *transaction, int jlist)
-{
-	jbd_lock_bh_state(jh2bh(jh));
-	spin_lock(&transaction->t_journal->j_list_lock);
-	__journal_file_buffer(jh, transaction, jlist);
-	spin_unlock(&transaction->t_journal->j_list_lock);
-	jbd_unlock_bh_state(jh2bh(jh));
-}
-
-/*
- * Remove a buffer from its current buffer list in preparation for
- * dropping it from its current transaction entirely.  If the buffer has
- * already started to be used by a subsequent transaction, refile the
- * buffer on that transaction's metadata list.
- *
- * Called under j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh))
- *
- * jh and bh may be already free when this function returns
- */
-void __journal_refile_buffer(struct journal_head *jh)
-{
-	int was_dirty, jlist;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	if (jh->b_transaction)
-		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
-
-	/* If the buffer is now unused, just drop it. */
-	if (jh->b_next_transaction == NULL) {
-		__journal_unfile_buffer(jh);
-		return;
-	}
-
-	/*
-	 * It has been modified by a later transaction: add it to the new
-	 * transaction's metadata list.
-	 */
-
-	was_dirty = test_clear_buffer_jbddirty(bh);
-	__journal_temp_unlink_buffer(jh);
-	/*
-	 * We set b_transaction here because b_next_transaction will inherit
-	 * our jh reference and thus __journal_file_buffer() must not take a
-	 * new one.
-	 */
-	jh->b_transaction = jh->b_next_transaction;
-	jh->b_next_transaction = NULL;
-	if (buffer_freed(bh))
-		jlist = BJ_Forget;
-	else if (jh->b_modified)
-		jlist = BJ_Metadata;
-	else
-		jlist = BJ_Reserved;
-	__journal_file_buffer(jh, jh->b_transaction, jlist);
-	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
-
-	if (was_dirty)
-		set_buffer_jbddirty(bh);
-}
-
-/*
- * __journal_refile_buffer() with necessary locking added. We take our bh
- * reference so that we can safely unlock bh.
- *
- * The jh and bh may be freed by this call.
- */
-void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	/* Get reference so that buffer cannot be freed before we unlock it */
-	get_bh(bh);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	__journal_refile_buffer(jh);
-	jbd_unlock_bh_state(bh);
-	spin_unlock(&journal->j_list_lock);
-	__brelse(bh);
-}
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 4227dc4..8c44654 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -417,12 +417,12 @@
  * journal_clean_one_cp_list
  *
  * Find all the written-back checkpoint buffers in the given list and
- * release them.
+ * release them. If 'destroy' is set, clean all buffers unconditionally.
  *
  * Called with j_list_lock held.
  * Returns 1 if we freed the transaction, 0 otherwise.
  */
-static int journal_clean_one_cp_list(struct journal_head *jh)
+static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
 {
 	struct journal_head *last_jh;
 	struct journal_head *next_jh = jh;
@@ -436,7 +436,10 @@
 	do {
 		jh = next_jh;
 		next_jh = jh->b_cpnext;
-		ret = __try_to_free_cp_buf(jh);
+		if (!destroy)
+			ret = __try_to_free_cp_buf(jh);
+		else
+			ret = __jbd2_journal_remove_checkpoint(jh) + 1;
 		if (!ret)
 			return freed;
 		if (ret == 2)
@@ -459,10 +462,11 @@
  * journal_clean_checkpoint_list
  *
  * Find all the written-back checkpoint buffers in the journal and release them.
+ * If 'destroy' is set, release all buffers unconditionally.
  *
  * Called with j_list_lock held.
  */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
 {
 	transaction_t *transaction, *last_transaction, *next_transaction;
 	int ret;
@@ -476,7 +480,8 @@
 	do {
 		transaction = next_transaction;
 		next_transaction = transaction->t_cpnext;
-		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
+		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
+						destroy);
 		/*
 		 * This function only frees up some memory if possible so we
 		 * dont have an obligation to finish processing. Bail out if
@@ -492,7 +497,7 @@
 		 * we can possibly see not yet submitted buffers on io_list
 		 */
 		ret = journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list);
+				t_checkpoint_io_list, destroy);
 		if (need_resched())
 			return;
 		/*
@@ -506,6 +511,28 @@
 }
 
 /*
+ * Remove buffers from all checkpoint lists as journal is aborted and we just
+ * need to free memory
+ */
+void jbd2_journal_destroy_checkpoint(journal_t *journal)
+{
+	/*
+	 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
+	 * early due to a need of rescheduling.
+	 */
+	while (1) {
+		spin_lock(&journal->j_list_lock);
+		if (!journal->j_checkpoint_transactions) {
+			spin_unlock(&journal->j_list_lock);
+			break;
+		}
+		__jbd2_journal_clean_checkpoint_list(journal, true);
+		spin_unlock(&journal->j_list_lock);
+		cond_resched();
+	}
+}
+
+/*
  * journal_remove_checkpoint: called after a buffer has been committed
  * to disk (either by being write-back flushed to disk, or being
  * committed to the log).
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index b73e021..362e5f6 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -510,7 +510,7 @@
 	 * frees some memory
 	 */
 	spin_lock(&journal->j_list_lock);
-	__jbd2_journal_clean_checkpoint_list(journal);
+	__jbd2_journal_clean_checkpoint_list(journal, false);
 	spin_unlock(&journal->j_list_lock);
 
 	jbd_debug(3, "JBD2: commit phase 1\n");
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 4ff3fad..8270fe9 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1456,7 +1456,7 @@
 	sb->s_errno    = cpu_to_be32(journal->j_errno);
 	read_unlock(&journal->j_state_lock);
 
-	jbd2_write_superblock(journal, WRITE_SYNC);
+	jbd2_write_superblock(journal, WRITE_FUA);
 }
 EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
 
@@ -1693,8 +1693,17 @@
 	while (journal->j_checkpoint_transactions != NULL) {
 		spin_unlock(&journal->j_list_lock);
 		mutex_lock(&journal->j_checkpoint_mutex);
-		jbd2_log_do_checkpoint(journal);
+		err = jbd2_log_do_checkpoint(journal);
 		mutex_unlock(&journal->j_checkpoint_mutex);
+		/*
+		 * If checkpointing failed, just free the buffers to avoid
+		 * looping forever
+		 */
+		if (err) {
+			jbd2_journal_destroy_checkpoint(journal);
+			spin_lock(&journal->j_list_lock);
+			break;
+		}
 		spin_lock(&journal->j_list_lock);
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index f3d0617..6b8338e 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -204,6 +204,20 @@
 		 * attach this handle to a new transaction.
 		 */
 		atomic_sub(total, &t->t_outstanding_credits);
+
+		/*
+		 * Is the number of reserved credits in the current transaction too
+		 * big to fit this handle? Wait until reserved credits are freed.
+		 */
+		if (atomic_read(&journal->j_reserved_credits) + total >
+		    journal->j_max_transaction_buffers) {
+			read_unlock(&journal->j_state_lock);
+			wait_event(journal->j_wait_reserved,
+				   atomic_read(&journal->j_reserved_credits) + total <=
+				   journal->j_max_transaction_buffers);
+			return 1;
+		}
+
 		wait_transaction_locked(journal);
 		return 1;
 	}
@@ -262,20 +276,24 @@
 	int		rsv_blocks = 0;
 	unsigned long ts = jiffies;
 
-	/*
-	 * 1/2 of transaction can be reserved so we can practically handle
-	 * only 1/2 of maximum transaction size per operation
-	 */
-	if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) {
-		printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
-		       current->comm, blocks,
-		       journal->j_max_transaction_buffers / 2);
-		return -ENOSPC;
-	}
-
 	if (handle->h_rsv_handle)
 		rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
 
+	/*
+	 * Limit the number of reserved credits to 1/2 of maximum transaction
+	 * size and limit the number of total credits to not exceed maximum
+	 * transaction size per operation.
+	 */
+	if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
+	    (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
+		printk(KERN_ERR "JBD2: %s wants too many credits "
+		       "credits:%d rsv_credits:%d max:%d\n",
+		       current->comm, blocks, rsv_blocks,
+		       journal->j_max_transaction_buffers);
+		WARN_ON(1);
+		return -ENOSPC;
+	}
+
 alloc_transaction:
 	if (!journal->j_running_transaction) {
 		/*
@@ -1280,8 +1298,6 @@
 	triggers->t_abort(triggers, jh2bh(jh));
 }
 
-
-
 /**
  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
  * @handle: transaction to add buffer to.
@@ -1314,12 +1330,41 @@
 
 	if (is_handle_aborted(handle))
 		return -EROFS;
-	journal = transaction->t_journal;
-	jh = jbd2_journal_grab_journal_head(bh);
-	if (!jh) {
+	if (!buffer_jbd(bh)) {
 		ret = -EUCLEAN;
 		goto out;
 	}
+	/*
+	 * We don't grab jh reference here since the buffer must be part
+	 * of the running transaction.
+	 */
+	jh = bh2jh(bh);
+	/*
+	 * This and the following assertions are unreliable since we may see jh
+	 * in inconsistent state unless we grab bh_state lock. But this is
+	 * crucial to catch bugs so let's do a reliable check until the
+	 * lockless handling is fully proven.
+	 */
+	if (jh->b_transaction != transaction &&
+	    jh->b_next_transaction != transaction) {
+		jbd_lock_bh_state(bh);
+		J_ASSERT_JH(jh, jh->b_transaction == transaction ||
+				jh->b_next_transaction == transaction);
+		jbd_unlock_bh_state(bh);
+	}
+	if (jh->b_modified == 1) {
+		/* If it's in our transaction it must be in BJ_Metadata list. */
+		if (jh->b_transaction == transaction &&
+		    jh->b_jlist != BJ_Metadata) {
+			jbd_lock_bh_state(bh);
+			J_ASSERT_JH(jh, jh->b_transaction != transaction ||
+					jh->b_jlist == BJ_Metadata);
+			jbd_unlock_bh_state(bh);
+		}
+		goto out;
+	}
+
+	journal = transaction->t_journal;
 	jbd_debug(5, "journal_head %p\n", jh);
 	JBUFFER_TRACE(jh, "entry");
 
@@ -1410,7 +1455,6 @@
 	spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
 	jbd_unlock_bh_state(bh);
-	jbd2_journal_put_journal_head(jh);
 out:
 	JBUFFER_TRACE(jh, "exit");
 	return ret;
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index b9dc23c..0e026a7 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -107,8 +107,11 @@
 	if (rc)
 		return rc;
 
-	if (is_quota_modification(inode, iattr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, iattr)) {
+		rc = dquot_initialize(inode);
+		if (rc)
+			return rc;
+	}
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
 	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
 		rc = dquot_transfer(inode, iattr);
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 6b0f816..cf7936f 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -109,7 +109,9 @@
 	/*
 	 * Allocate inode to quota.
 	 */
-	dquot_initialize(inode);
+	rc = dquot_initialize(inode);
+	if (rc)
+		goto fail_drop;
 	rc = dquot_alloc_inode(inode);
 	if (rc)
 		goto fail_drop;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a5ac97b..35976bd 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -86,7 +86,9 @@
 
 	jfs_info("jfs_create: dip:0x%p name:%pd", dip, dentry);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	/*
 	 * search parent directory for entry/freespace
@@ -218,7 +220,9 @@
 
 	jfs_info("jfs_mkdir: dip:0x%p name:%pd", dip, dentry);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	/*
 	 * search parent directory for entry/freespace
@@ -355,8 +359,12 @@
 	jfs_info("jfs_rmdir: dip:0x%p name:%pd", dip, dentry);
 
 	/* Init inode for quota operations. */
-	dquot_initialize(dip);
-	dquot_initialize(ip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out;
+	rc = dquot_initialize(ip);
+	if (rc)
+		goto out;
 
 	/* directory must be empty to be removed */
 	if (!dtEmpty(ip)) {
@@ -483,8 +491,12 @@
 	jfs_info("jfs_unlink: dip:0x%p name:%pd", dip, dentry);
 
 	/* Init inode for quota operations. */
-	dquot_initialize(dip);
-	dquot_initialize(ip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out;
+	rc = dquot_initialize(ip);
+	if (rc)
+		goto out;
 
 	if ((rc = get_UCSname(&dname, dentry)))
 		goto out;
@@ -799,7 +811,9 @@
 
 	jfs_info("jfs_link: %pd %pd", old_dentry, dentry);
 
-	dquot_initialize(dir);
+	rc = dquot_initialize(dir);
+	if (rc)
+		goto out;
 
 	tid = txBegin(ip->i_sb, 0);
 
@@ -810,7 +824,7 @@
 	 * scan parent directory for entry/freespace
 	 */
 	if ((rc = get_UCSname(&dname, dentry)))
-		goto out;
+		goto out_tx;
 
 	if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE)))
 		goto free_dname;
@@ -842,12 +856,13 @@
       free_dname:
 	free_UCSname(&dname);
 
-      out:
+      out_tx:
 	txEnd(tid);
 
 	mutex_unlock(&JFS_IP(ip)->commit_mutex);
 	mutex_unlock(&JFS_IP(dir)->commit_mutex);
 
+      out:
 	jfs_info("jfs_link: rc:%d", rc);
 	return rc;
 }
@@ -891,7 +906,9 @@
 
 	jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	ssize = strlen(name) + 1;
 
@@ -1082,8 +1099,12 @@
 
 	jfs_info("jfs_rename: %pd %pd", old_dentry, new_dentry);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	rc = dquot_initialize(old_dir);
+	if (rc)
+		goto out1;
+	rc = dquot_initialize(new_dir);
+	if (rc)
+		goto out1;
 
 	old_ip = d_inode(old_dentry);
 	new_ip = d_inode(new_dentry);
@@ -1130,7 +1151,9 @@
 	} else if (new_ip) {
 		IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
 		/* Init inode for quota operations. */
-		dquot_initialize(new_ip);
+		rc = dquot_initialize(new_ip);
+		if (rc)
+			goto out_unlock;
 	}
 
 	/*
@@ -1318,6 +1341,7 @@
 
 		clear_cflag(COMMIT_Stale, old_dir);
 	}
+      out_unlock:
 	if (new_ip && !S_ISDIR(new_ip->i_mode))
 		IWRITE_UNLOCK(new_ip);
       out3:
@@ -1353,7 +1377,9 @@
 
 	jfs_info("jfs_mknod: %pd", dentry);
 
-	dquot_initialize(dir);
+	rc = dquot_initialize(dir);
+	if (rc)
+		goto out;
 
 	if ((rc = get_UCSname(&dname, dentry)))
 		goto out;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 55505cb..d678bcc 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -322,6 +322,11 @@
 	return error;
 }
 
+static struct svc_serv_ops lockd_sv_ops = {
+	.svo_shutdown		= svc_rpcb_cleanup,
+	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+};
+
 static struct svc_serv *lockd_create_svc(void)
 {
 	struct svc_serv *serv;
@@ -350,7 +355,7 @@
 		nlm_timeout = LOCKD_DFLT_TIMEO;
 	nlmsvc_timeout = nlm_timeout * HZ;
 
-	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
+	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, &lockd_sv_ops);
 	if (!serv) {
 		printk(KERN_WARNING "lockd_up: create service failed\n");
 		return ERR_PTR(-ENOMEM);
@@ -586,6 +591,7 @@
 
 	INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
 	INIT_LIST_HEAD(&ln->lockd_manager.list);
+	ln->lockd_manager.block_opens = false;
 	spin_lock_init(&ln->nsm_clnt_lock);
 	return 0;
 }
diff --git a/fs/locks.c b/fs/locks.c
index d3d558b..2a54c80 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1568,6 +1568,7 @@
  * 			    desired lease.
  * @dentry:	dentry to check
  * @arg:	type of lease that we're trying to acquire
+ * @flags:	current lock flags
  *
  * Check to see if there's an existing open fd on this file that would
  * conflict with the lease we're trying to set.
diff --git a/fs/namei.c b/fs/namei.c
index 1c2105e..29b9279 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -560,6 +560,24 @@
 	return 0;
 }
 
+/**
+ * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
+ * @path: nameidate to verify
+ *
+ * Rename can sometimes move a file or directory outside of a bind
+ * mount, path_connected allows those cases to be detected.
+ */
+static bool path_connected(const struct path *path)
+{
+	struct vfsmount *mnt = path->mnt;
+
+	/* Only bind mounts can have disconnected paths */
+	if (mnt->mnt_root == mnt->mnt_sb->s_root)
+		return true;
+
+	return is_subdir(path->dentry, mnt->mnt_root);
+}
+
 static inline int nd_alloc_stack(struct nameidata *nd)
 {
 	if (likely(nd->depth != EMBEDDED_LEVELS))
@@ -1296,6 +1314,8 @@
 				return -ECHILD;
 			nd->path.dentry = parent;
 			nd->seq = seq;
+			if (unlikely(!path_connected(&nd->path)))
+				return -ENOENT;
 			break;
 		} else {
 			struct mount *mnt = real_mount(nd->path.mnt);
@@ -1396,7 +1416,7 @@
 	}
 }
 
-static void follow_dotdot(struct nameidata *nd)
+static int follow_dotdot(struct nameidata *nd)
 {
 	if (!nd->root.mnt)
 		set_root(nd);
@@ -1412,6 +1432,8 @@
 			/* rare case of legitimate dget_parent()... */
 			nd->path.dentry = dget_parent(nd->path.dentry);
 			dput(old);
+			if (unlikely(!path_connected(&nd->path)))
+				return -ENOENT;
 			break;
 		}
 		if (!follow_up(&nd->path))
@@ -1419,6 +1441,7 @@
 	}
 	follow_mount(&nd->path);
 	nd->inode = nd->path.dentry->d_inode;
+	return 0;
 }
 
 /*
@@ -1634,7 +1657,7 @@
 		if (nd->flags & LOOKUP_RCU) {
 			return follow_dotdot_rcu(nd);
 		} else
-			follow_dotdot(nd);
+			return follow_dotdot(nd);
 	}
 	return 0;
 }
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 92dca9e..c556640 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -46,13 +46,6 @@
 
 struct pnfs_block_dev;
 
-enum pnfs_block_volume_type {
-	PNFS_BLOCK_VOLUME_SIMPLE	= 0,
-	PNFS_BLOCK_VOLUME_SLICE		= 1,
-	PNFS_BLOCK_VOLUME_CONCAT	= 2,
-	PNFS_BLOCK_VOLUME_STRIPE	= 3,
-};
-
 #define PNFS_BLOCK_MAX_UUIDS	4
 #define PNFS_BLOCK_MAX_DEVICES	64
 
@@ -117,13 +110,6 @@
 			struct pnfs_block_dev_map *map);
 };
 
-enum exstate4 {
-	PNFS_BLOCK_READWRITE_DATA	= 0,
-	PNFS_BLOCK_READ_DATA		= 1,
-	PNFS_BLOCK_INVALID_DATA		= 2, /* mapped, but data is invalid */
-	PNFS_BLOCK_NONE_DATA		= 3  /* unmapped, it's a hole */
-};
-
 /* sector_t fields are all in 512-byte sectors */
 struct pnfs_block_extent {
 	union {
@@ -134,15 +120,12 @@
 	sector_t	be_f_offset;	/* the starting offset in the file */
 	sector_t	be_length;	/* the size of the extent */
 	sector_t	be_v_offset;	/* the starting offset in the volume */
-	enum exstate4	be_state;	/* the state of this extent */
+	enum pnfs_block_extent_state be_state;	/* the state of this extent */
 #define EXTENT_WRITTEN		1
 #define EXTENT_COMMITTING	2
 	unsigned int	be_tag;
 };
 
-/* on the wire size of the extent */
-#define BL_EXTENT_SIZE	(7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
-
 struct pnfs_block_layout {
 	struct pnfs_layout_hdr	bl_layout;
 	struct rb_root		bl_ext_rw;
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index e535599..a861bbd 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -22,7 +22,7 @@
 		kfree(dev->children);
 	} else {
 		if (dev->bdev)
-			blkdev_put(dev->bdev, FMODE_READ);
+			blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE);
 	}
 }
 
@@ -65,6 +65,11 @@
 				return -EIO;
 			p = xdr_decode_hyper(p, &b->simple.sigs[i].offset);
 			b->simple.sigs[i].sig_len = be32_to_cpup(p++);
+			if (b->simple.sigs[i].sig_len > PNFS_BLOCK_UUID_LEN) {
+				pr_info("signature too long: %d\n",
+					b->simple.sigs[i].sig_len);
+				return -EIO;
+			}
 
 			p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len);
 			if (!p)
@@ -195,7 +200,7 @@
 	if (!dev)
 		return -EIO;
 
-	d->bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+	d->bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL);
 	if (IS_ERR(d->bdev)) {
 		printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
 			MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev));
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index 31d0b5e..c59a59c 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -462,6 +462,12 @@
 	return err;
 }
 
+static size_t ext_tree_layoutupdate_size(size_t count)
+{
+	return sizeof(__be32) /* number of entries */ +
+		PNFS_BLOCK_EXTENT_SIZE * count;
+}
+
 static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg,
 		size_t buffer_size)
 {
@@ -489,7 +495,7 @@
 			continue;
 
 		(*count)++;
-		if (*count * BL_EXTENT_SIZE > buffer_size) {
+		if (ext_tree_layoutupdate_size(*count) > buffer_size) {
 			/* keep counting.. */
 			ret = -ENOSPC;
 			continue;
@@ -530,7 +536,7 @@
 	if (unlikely(ret)) {
 		ext_tree_free_commitdata(arg, buffer_size);
 
-		buffer_size = sizeof(__be32) + BL_EXTENT_SIZE * count;
+		buffer_size = ext_tree_layoutupdate_size(count);
 		count = 0;
 
 		arg->layoutupdate_pages =
@@ -549,17 +555,14 @@
 	}
 
 	*start_p = cpu_to_be32(count);
-	arg->layoutupdate_len = sizeof(__be32) + BL_EXTENT_SIZE * count;
+	arg->layoutupdate_len = ext_tree_layoutupdate_size(count);
 
 	if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) {
-		__be32 *p = start_p;
+		void *p = start_p, *end = p + arg->layoutupdate_len;
 		int i = 0;
 
-		for (p = start_p;
-		     p < start_p + arg->layoutupdate_len;
-		     p += PAGE_SIZE) {
+		for ( ; p < end; p += PAGE_SIZE)
 			arg->layoutupdate_pages[i++] = vmalloc_to_page(p);
-		}
 	}
 
 	dprintk("%s found %zu ranges\n", __func__, count);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 682529c..75f7c0a 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -162,10 +162,6 @@
 	spin_lock_init(&serv->sv_cb_lock);
 	init_waitqueue_head(&serv->sv_cb_waitq);
 	rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
-	if (IS_ERR(rqstp)) {
-		svc_xprt_put(serv->sv_bc_xprt);
-		serv->sv_bc_xprt = NULL;
-	}
 	dprintk("--> %s return %d\n", __func__, PTR_ERR_OR_ZERO(rqstp));
 	return rqstp;
 }
@@ -308,6 +304,10 @@
 	return ret;
 }
 
+static struct svc_serv_ops nfs_cb_sv_ops = {
+	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+};
+
 static struct svc_serv *nfs_callback_create_svc(int minorversion)
 {
 	struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
@@ -333,7 +333,7 @@
 		printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
 			cb_info->users);
 
-	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
+	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, &nfs_cb_sv_ops);
 	if (!serv) {
 		printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
 		return ERR_PTR(-ENOMEM);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 29e3c1b..b85cf7a 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -40,8 +40,11 @@
 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 
 	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
-	if (inode == NULL)
+	if (inode == NULL) {
+		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
+				-ntohl(res->status));
 		goto out;
+	}
 	nfsi = NFS_I(inode);
 	rcu_read_lock();
 	delegation = rcu_dereference(nfsi->delegation);
@@ -60,6 +63,7 @@
 	res->status = 0;
 out_iput:
 	rcu_read_unlock();
+	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
 	iput(inode);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
@@ -194,6 +198,7 @@
 	spin_unlock(&ino->i_lock);
 	pnfs_free_lseg_list(&free_me_list);
 	pnfs_put_layout_hdr(lo);
+	trace_nfs4_cb_layoutrecall_inode(clp, &args->cbl_fh, ino, -rv);
 	iput(ino);
 out:
 	return rv;
@@ -554,7 +559,7 @@
 	status = htonl(NFS4_OK);
 
 	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
-	nfs41_server_notify_target_slotid_update(cps->clp);
+	nfs41_notify_server(cps->clp);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 4a90c9b..57c5a02 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -20,6 +20,7 @@
 #include <linux/stat.h>
 #include <linux/errno.h>
 #include <linux/unistd.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/metrics.h>
@@ -285,116 +286,6 @@
 }
 EXPORT_SYMBOL_GPL(nfs_put_client);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-/*
- * Test if two ip6 socket addresses refer to the same socket by
- * comparing relevant fields. The padding bytes specifically, are not
- * compared. sin6_flowinfo is not compared because it only affects QoS
- * and sin6_scope_id is only compared if the address is "link local"
- * because "link local" addresses need only be unique to a specific
- * link. Conversely, ordinary unicast addresses might have different
- * sin6_scope_id.
- *
- * The caller should ensure both socket addresses are AF_INET6.
- */
-static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
-				      const struct sockaddr *sa2)
-{
-	const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1;
-	const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2;
-
-	if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
-		return 0;
-	else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
-		return sin1->sin6_scope_id == sin2->sin6_scope_id;
-
-	return 1;
-}
-#else	/* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
-static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
-				      const struct sockaddr *sa2)
-{
-	return 0;
-}
-#endif
-
-/*
- * Test if two ip4 socket addresses refer to the same socket, by
- * comparing relevant fields. The padding bytes specifically, are
- * not compared.
- *
- * The caller should ensure both socket addresses are AF_INET.
- */
-static int nfs_sockaddr_match_ipaddr4(const struct sockaddr *sa1,
-				      const struct sockaddr *sa2)
-{
-	const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1;
-	const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2;
-
-	return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
-}
-
-static int nfs_sockaddr_cmp_ip6(const struct sockaddr *sa1,
-				const struct sockaddr *sa2)
-{
-	const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1;
-	const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2;
-
-	return nfs_sockaddr_match_ipaddr6(sa1, sa2) &&
-		(sin1->sin6_port == sin2->sin6_port);
-}
-
-static int nfs_sockaddr_cmp_ip4(const struct sockaddr *sa1,
-				const struct sockaddr *sa2)
-{
-	const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1;
-	const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2;
-
-	return nfs_sockaddr_match_ipaddr4(sa1, sa2) &&
-		(sin1->sin_port == sin2->sin_port);
-}
-
-#if defined(CONFIG_NFS_V4_1)
-/*
- * Test if two socket addresses represent the same actual socket,
- * by comparing (only) relevant fields, excluding the port number.
- */
-int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
-			      const struct sockaddr *sa2)
-{
-	if (sa1->sa_family != sa2->sa_family)
-		return 0;
-
-	switch (sa1->sa_family) {
-	case AF_INET:
-		return nfs_sockaddr_match_ipaddr4(sa1, sa2);
-	case AF_INET6:
-		return nfs_sockaddr_match_ipaddr6(sa1, sa2);
-	}
-	return 0;
-}
-EXPORT_SYMBOL_GPL(nfs_sockaddr_match_ipaddr);
-#endif /* CONFIG_NFS_V4_1 */
-
-/*
- * Test if two socket addresses represent the same actual socket,
- * by comparing (only) relevant fields, including the port number.
- */
-static int nfs_sockaddr_cmp(const struct sockaddr *sa1,
-			    const struct sockaddr *sa2)
-{
-	if (sa1->sa_family != sa2->sa_family)
-		return 0;
-
-	switch (sa1->sa_family) {
-	case AF_INET:
-		return nfs_sockaddr_cmp_ip4(sa1, sa2);
-	case AF_INET6:
-		return nfs_sockaddr_cmp_ip6(sa1, sa2);
-	}
-	return 0;
-}
-
 /*
  * Find an nfs_client on the list that matches the initialisation data
  * that is supplied.
@@ -421,7 +312,7 @@
 		if (clp->cl_minorversion != data->minorversion)
 			continue;
 		/* Match the full socket address */
-		if (!nfs_sockaddr_cmp(sap, clap))
+		if (!rpc_cmp_addr_port(sap, clap))
 			continue;
 
 		atomic_inc(&clp->cl_count);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 029d688..2714ef8 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -175,7 +175,7 @@
 		if (delegation->inode != NULL) {
 			nfs4_stateid_copy(&delegation->stateid, &res->delegation);
 			delegation->type = res->delegation_type;
-			delegation->maxsize = res->maxsize;
+			delegation->pagemod_limit = res->pagemod_limit;
 			oldcred = delegation->cred;
 			delegation->cred = get_rpccred(cred);
 			clear_bit(NFS_DELEGATION_NEED_RECLAIM,
@@ -337,7 +337,7 @@
 		return -ENOMEM;
 	nfs4_stateid_copy(&delegation->stateid, &res->delegation);
 	delegation->type = res->delegation_type;
-	delegation->maxsize = res->maxsize;
+	delegation->pagemod_limit = res->pagemod_limit;
 	delegation->change_attr = inode->i_version;
 	delegation->cred = get_rpccred(cred);
 	delegation->inode = inode;
@@ -900,3 +900,28 @@
 	rcu_read_unlock();
 	return ret;
 }
+
+/**
+ * nfs4_delegation_flush_on_close - Check if we must flush file on close
+ * @inode: inode to check
+ *
+ * This function checks the number of outstanding writes to the file
+ * against the delegation 'space_limit' field to see if
+ * the spec requires us to flush the file on close.
+ */
+bool nfs4_delegation_flush_on_close(const struct inode *inode)
+{
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_delegation *delegation;
+	bool ret = true;
+
+	rcu_read_lock();
+	delegation = rcu_dereference(nfsi->delegation);
+	if (delegation == NULL || !(delegation->type & FMODE_WRITE))
+		goto out;
+	if (nfsi->nrequests < delegation->pagemod_limit)
+		ret = false;
+out:
+	rcu_read_unlock();
+	return ret;
+}
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index e3c20a3..a448291 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -18,7 +18,7 @@
 	struct inode *inode;
 	nfs4_stateid stateid;
 	fmode_t type;
-	loff_t maxsize;
+	unsigned long pagemod_limit;
 	__u64 change_attr;
 	unsigned long flags;
 	spinlock_t lock;
@@ -61,6 +61,7 @@
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
 int nfs4_have_delegation(struct inode *inode, fmode_t flags);
 int nfs4_check_delegation(struct inode *inode, fmode_t flags);
+bool nfs4_delegation_flush_on_close(const struct inode *inode);
 
 #endif
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 547308a..3d8e4ff 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -583,26 +583,19 @@
 }
 
 static
-void nfs_readdir_free_pagearray(struct page **pages, unsigned int npages)
+void nfs_readdir_free_pages(struct page **pages, unsigned int npages)
 {
 	unsigned int i;
 	for (i = 0; i < npages; i++)
 		put_page(pages[i]);
 }
 
-static
-void nfs_readdir_free_large_page(void *ptr, struct page **pages,
-		unsigned int npages)
-{
-	nfs_readdir_free_pagearray(pages, npages);
-}
-
 /*
  * nfs_readdir_large_page will allocate pages that must be freed with a call
- * to nfs_readdir_free_large_page
+ * to nfs_readdir_free_pagearray
  */
 static
-int nfs_readdir_large_page(struct page **pages, unsigned int npages)
+int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages)
 {
 	unsigned int i;
 
@@ -615,7 +608,7 @@
 	return 0;
 
 out_freepages:
-	nfs_readdir_free_pagearray(pages, i);
+	nfs_readdir_free_pages(pages, i);
 	return -ENOMEM;
 }
 
@@ -623,7 +616,6 @@
 int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
 {
 	struct page *pages[NFS_MAX_READDIR_PAGES];
-	void *pages_ptr = NULL;
 	struct nfs_entry entry;
 	struct file	*file = desc->file;
 	struct nfs_cache_array *array;
@@ -653,7 +645,7 @@
 	memset(array, 0, sizeof(struct nfs_cache_array));
 	array->eof_index = -1;
 
-	status = nfs_readdir_large_page(pages, array_size);
+	status = nfs_readdir_alloc_pages(pages, array_size);
 	if (status < 0)
 		goto out_release_array;
 	do {
@@ -671,7 +663,7 @@
 		}
 	} while (array->eof_index < 0);
 
-	nfs_readdir_free_large_page(pages_ptr, pages, array_size);
+	nfs_readdir_free_pages(pages, array_size);
 out_release_array:
 	nfs_readdir_release_array(page);
 out_label_free:
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index cc4fa1e..c0f9b1e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -82,7 +82,8 @@
 	dprintk("NFS: release(%pD2)\n", filp);
 
 	nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
-	return nfs_release(inode, filp);
+	nfs_file_clear_open_context(filp);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(nfs_file_release);
 
@@ -141,7 +142,7 @@
 /*
  * Flush all dirty pages, and check for write errors.
  */
-int
+static int
 nfs_file_flush(struct file *file, fl_owner_t id)
 {
 	struct inode	*inode = file_inode(file);
@@ -152,17 +153,9 @@
 	if ((file->f_mode & FMODE_WRITE) == 0)
 		return 0;
 
-	/*
-	 * If we're holding a write delegation, then just start the i/o
-	 * but don't wait for completion (or send a commit).
-	 */
-	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
-		return filemap_fdatawrite(file->f_mapping);
-
 	/* Flush writes to the server and return any errors */
 	return vfs_fsync(file, 0);
 }
-EXPORT_SYMBOL_GPL(nfs_file_flush);
 
 ssize_t
 nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
@@ -644,12 +637,10 @@
 	.page_mkwrite = nfs_vm_page_mkwrite,
 };
 
-static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+static int nfs_need_check_write(struct file *filp, struct inode *inode)
 {
 	struct nfs_open_context *ctx;
 
-	if (IS_SYNC(inode) || (filp->f_flags & O_DSYNC))
-		return 1;
 	ctx = nfs_file_open_context(filp);
 	if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags) ||
 	    nfs_ctx_key_to_expire(ctx))
@@ -699,8 +690,8 @@
 	if (result > 0)
 		written = result;
 
-	/* Return error values for O_DSYNC and IS_SYNC() */
-	if (result >= 0 && nfs_need_sync_write(file, inode)) {
+	/* Return error values */
+	if (result >= 0 && nfs_need_check_write(file, inode)) {
 		int err = vfs_fsync(file, 0);
 		if (err < 0)
 			result = err;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b3289d7..fbc5a56 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -34,6 +34,7 @@
 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
 	if (ffl) {
 		INIT_LIST_HEAD(&ffl->error_list);
+		INIT_LIST_HEAD(&ffl->mirrors);
 		return &ffl->generic_hdr;
 	} else
 		return NULL;
@@ -135,6 +136,95 @@
 	return 0;
 }
 
+static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
+		const struct nfs4_ff_layout_mirror *m2)
+{
+	int i, j;
+
+	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
+		return false;
+	for (i = 0; i < m1->fh_versions_cnt; i++) {
+		bool found_fh = false;
+		for (j = 0; j < m2->fh_versions_cnt; i++) {
+			if (nfs_compare_fh(&m1->fh_versions[i],
+					&m2->fh_versions[j]) == 0) {
+				found_fh = true;
+				break;
+			}
+		}
+		if (!found_fh)
+			return false;
+	}
+	return true;
+}
+
+static struct nfs4_ff_layout_mirror *
+ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
+		struct nfs4_ff_layout_mirror *mirror)
+{
+	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
+	struct nfs4_ff_layout_mirror *pos;
+	struct inode *inode = lo->plh_inode;
+
+	spin_lock(&inode->i_lock);
+	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
+		if (mirror->mirror_ds != pos->mirror_ds)
+			continue;
+		if (!ff_mirror_match_fh(mirror, pos))
+			continue;
+		if (atomic_inc_not_zero(&pos->ref)) {
+			spin_unlock(&inode->i_lock);
+			return pos;
+		}
+	}
+	list_add(&mirror->mirrors, &ff_layout->mirrors);
+	mirror->layout = lo;
+	spin_unlock(&inode->i_lock);
+	return mirror;
+}
+
+static void
+ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
+{
+	struct inode *inode;
+	if (mirror->layout == NULL)
+		return;
+	inode = mirror->layout->plh_inode;
+	spin_lock(&inode->i_lock);
+	list_del(&mirror->mirrors);
+	spin_unlock(&inode->i_lock);
+	mirror->layout = NULL;
+}
+
+static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
+{
+	struct nfs4_ff_layout_mirror *mirror;
+
+	mirror = kzalloc(sizeof(*mirror), gfp_flags);
+	if (mirror != NULL) {
+		spin_lock_init(&mirror->lock);
+		atomic_set(&mirror->ref, 1);
+		INIT_LIST_HEAD(&mirror->mirrors);
+	}
+	return mirror;
+}
+
+static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
+{
+	ff_layout_remove_mirror(mirror);
+	kfree(mirror->fh_versions);
+	if (mirror->cred)
+		put_rpccred(mirror->cred);
+	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
+	kfree(mirror);
+}
+
+static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
+{
+	if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
+		ff_layout_free_mirror(mirror);
+}
+
 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
 {
 	int i;
@@ -144,11 +234,7 @@
 			/* normally mirror_ds is freed in
 			 * .free_deviceid_node but we still do it here
 			 * for .alloc_lseg error path */
-			if (fls->mirror_array[i]) {
-				kfree(fls->mirror_array[i]->fh_versions);
-				nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
-				kfree(fls->mirror_array[i]);
-			}
+			ff_layout_put_mirror(fls->mirror_array[i]);
 		}
 		kfree(fls->mirror_array);
 		fls->mirror_array = NULL;
@@ -181,6 +267,65 @@
 	}
 }
 
+static bool
+ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
+		const struct pnfs_layout_range *l2)
+{
+	u64 end1, end2;
+
+	if (l1->iomode != l2->iomode)
+		return l1->iomode != IOMODE_READ;
+	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
+	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
+	if (end1 < l2->offset)
+		return false;
+	if (end2 < l1->offset)
+		return true;
+	return l2->offset <= l1->offset;
+}
+
+static bool
+ff_lseg_merge(struct pnfs_layout_segment *new,
+		struct pnfs_layout_segment *old)
+{
+	u64 new_end, old_end;
+
+	if (new->pls_range.iomode != old->pls_range.iomode)
+		return false;
+	old_end = pnfs_calc_offset_end(old->pls_range.offset,
+			old->pls_range.length);
+	if (old_end < new->pls_range.offset)
+		return false;
+	new_end = pnfs_calc_offset_end(new->pls_range.offset,
+			new->pls_range.length);
+	if (new_end < old->pls_range.offset)
+		return false;
+
+	/* Mergeable: copy info from 'old' to 'new' */
+	if (new_end < old_end)
+		new_end = old_end;
+	if (new->pls_range.offset < old->pls_range.offset)
+		new->pls_range.offset = old->pls_range.offset;
+	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
+			new_end);
+	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
+		set_bit(NFS_LSEG_ROC, &new->pls_flags);
+	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
+		set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags);
+	return true;
+}
+
+static void
+ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
+		struct pnfs_layout_segment *lseg,
+		struct list_head *free_me)
+{
+	pnfs_generic_layout_insert_lseg(lo, lseg,
+			ff_lseg_range_is_after,
+			ff_lseg_merge,
+			free_me);
+}
+
 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
 {
 	int i, j;
@@ -246,6 +391,7 @@
 		goto out_err_free;
 
 	for (i = 0; i < fls->mirror_array_cnt; i++) {
+		struct nfs4_ff_layout_mirror *mirror;
 		struct nfs4_deviceid devid;
 		struct nfs4_deviceid_node *idnode;
 		u32 ds_count;
@@ -262,17 +408,13 @@
 		if (ds_count != 1)
 			goto out_err_free;
 
-		fls->mirror_array[i] =
-			kzalloc(sizeof(struct nfs4_ff_layout_mirror),
-				gfp_flags);
+		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
 		if (fls->mirror_array[i] == NULL) {
 			rc = -ENOMEM;
 			goto out_err_free;
 		}
 
-		spin_lock_init(&fls->mirror_array[i]->lock);
 		fls->mirror_array[i]->ds_count = ds_count;
-		fls->mirror_array[i]->lseg = &fls->generic_hdr;
 
 		/* deviceid */
 		rc = decode_deviceid(&stream, &devid);
@@ -338,6 +480,12 @@
 		if (rc)
 			goto out_err_free;
 
+		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
+		if (mirror != fls->mirror_array[i]) {
+			ff_layout_free_mirror(fls->mirror_array[i]);
+			fls->mirror_array[i] = mirror;
+		}
+
 		dprintk("%s: uid %d gid %d\n", __func__,
 			fls->mirror_array[i]->uid,
 			fls->mirror_array[i]->gid);
@@ -379,21 +527,9 @@
 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
 {
 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
-	int i;
 
 	dprintk("--> %s\n", __func__);
 
-	for (i = 0; i < fls->mirror_array_cnt; i++) {
-		if (fls->mirror_array[i]) {
-			nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
-			fls->mirror_array[i]->mirror_ds = NULL;
-			if (fls->mirror_array[i]->cred) {
-				put_rpccred(fls->mirror_array[i]->cred);
-				fls->mirror_array[i]->cred = NULL;
-			}
-		}
-	}
-
 	if (lseg->pls_range.iomode == IOMODE_RW) {
 		struct nfs4_flexfile_layout *ffl;
 		struct inode *inode;
@@ -419,48 +555,44 @@
 }
 
 static void
-nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer)
+nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 {
 	/* first IO request? */
 	if (atomic_inc_return(&timer->n_ops) == 1) {
-		timer->start_time = ktime_get();
+		timer->start_time = now;
 	}
 }
 
 static ktime_t
-nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer)
+nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 {
-	ktime_t start, now;
+	ktime_t start;
 
 	if (atomic_dec_return(&timer->n_ops) < 0)
 		WARN_ON_ONCE(1);
 
-	now = ktime_get();
 	start = timer->start_time;
 	timer->start_time = now;
 	return ktime_sub(now, start);
 }
 
-static ktime_t
-nfs4_ff_layout_calc_completion_time(struct rpc_task *task)
-{
-	return ktime_sub(ktime_get(), task->tk_start);
-}
-
 static bool
 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
-			    struct nfs4_ff_layoutstat *layoutstat)
+			    struct nfs4_ff_layoutstat *layoutstat,
+			    ktime_t now)
 {
 	static const ktime_t notime = {0};
-	ktime_t now = ktime_get();
+	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
 
-	nfs4_ff_start_busy_timer(&layoutstat->busy_timer);
+	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
 	if (ktime_equal(mirror->start_time, notime))
 		mirror->start_time = now;
 	if (ktime_equal(mirror->last_report_time, notime))
 		mirror->last_report_time = now;
+	if (layoutstats_timer != 0)
+		report_interval = (s64)layoutstats_timer * 1000LL;
 	if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
-			FF_LAYOUTSTATS_REPORT_INTERVAL) {
+			report_interval) {
 		mirror->last_report_time = now;
 		return true;
 	}
@@ -482,35 +614,39 @@
 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
 		__u64 requested,
 		__u64 completed,
-		ktime_t time_completed)
+		ktime_t time_completed,
+		ktime_t time_started)
 {
 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
+	ktime_t completion_time = ktime_sub(time_completed, time_started);
 	ktime_t timer;
 
 	iostat->ops_completed++;
 	iostat->bytes_completed += completed;
 	iostat->bytes_not_delivered += requested - completed;
 
-	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer);
+	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
 	iostat->total_busy_time =
 			ktime_add(iostat->total_busy_time, timer);
 	iostat->aggregate_completion_time =
-			ktime_add(iostat->aggregate_completion_time, time_completed);
+			ktime_add(iostat->aggregate_completion_time,
+					completion_time);
 }
 
 static void
-nfs4_ff_layout_stat_io_start_read(struct nfs4_ff_layout_mirror *mirror,
-		__u64 requested)
+nfs4_ff_layout_stat_io_start_read(struct inode *inode,
+		struct nfs4_ff_layout_mirror *mirror,
+		__u64 requested, ktime_t now)
 {
 	bool report;
 
 	spin_lock(&mirror->lock);
-	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat);
+	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
 	spin_unlock(&mirror->lock);
 
 	if (report)
-		pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode);
+		pnfs_report_layoutstat(inode, GFP_KERNEL);
 }
 
 static void
@@ -522,23 +658,24 @@
 	spin_lock(&mirror->lock);
 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
 			requested, completed,
-			nfs4_ff_layout_calc_completion_time(task));
+			ktime_get(), task->tk_start);
 	spin_unlock(&mirror->lock);
 }
 
 static void
-nfs4_ff_layout_stat_io_start_write(struct nfs4_ff_layout_mirror *mirror,
-		__u64 requested)
+nfs4_ff_layout_stat_io_start_write(struct inode *inode,
+		struct nfs4_ff_layout_mirror *mirror,
+		__u64 requested, ktime_t now)
 {
 	bool report;
 
 	spin_lock(&mirror->lock);
-	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat);
+	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
 	spin_unlock(&mirror->lock);
 
 	if (report)
-		pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode);
+		pnfs_report_layoutstat(inode, GFP_NOIO);
 }
 
 static void
@@ -553,8 +690,7 @@
 
 	spin_lock(&mirror->lock);
 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
-			requested, completed,
-			nfs4_ff_layout_calc_completion_time(task));
+			requested, completed, ktime_get(), task->tk_start);
 	spin_unlock(&mirror->lock);
 }
 
@@ -728,8 +864,6 @@
 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
 
 	/* no lseg means that pnfs is not in use, so no mirroring here */
-	pnfs_put_lseg(pgio->pg_lseg);
-	pgio->pg_lseg = NULL;
 	nfs_pageio_reset_write_mds(pgio);
 	return 1;
 }
@@ -931,18 +1065,26 @@
 	if (task->tk_status >= 0)
 		return 0;
 
-	if (task->tk_status != -EJUKEBOX) {
+	switch (task->tk_status) {
+	/* File access problems. Don't mark the device as unavailable */
+	case -EACCES:
+	case -ESTALE:
+	case -EISDIR:
+	case -EBADHANDLE:
+	case -ELOOP:
+	case -ENOSPC:
+		break;
+	case -EJUKEBOX:
+		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+		goto out_retry;
+	default:
 		dprintk("%s DS connection error %d\n", __func__,
 			task->tk_status);
 		nfs4_mark_deviceid_unavailable(devid);
-		if (ff_layout_has_available_ds(lseg))
-			return -NFS4ERR_RESET_TO_PNFS;
-		else
-			return -NFS4ERR_RESET_TO_MDS;
 	}
-
-	if (task->tk_status == -EJUKEBOX)
-		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+	/* FIXME: Need to prevent infinite looping here. */
+	return -NFS4ERR_RESET_TO_PNFS;
+out_retry:
 	task->tk_status = 0;
 	rpc_restart_call(task);
 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
@@ -972,15 +1114,41 @@
 
 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
 					int idx, u64 offset, u64 length,
-					u32 status, int opnum)
+					u32 status, int opnum, int error)
 {
 	struct nfs4_ff_layout_mirror *mirror;
 	int err;
 
+	if (status == 0) {
+		switch (error) {
+		case -ETIMEDOUT:
+		case -EPFNOSUPPORT:
+		case -EPROTONOSUPPORT:
+		case -EOPNOTSUPP:
+		case -ECONNREFUSED:
+		case -ECONNRESET:
+		case -EHOSTDOWN:
+		case -EHOSTUNREACH:
+		case -ENETUNREACH:
+		case -EADDRINUSE:
+		case -ENOBUFS:
+		case -EPIPE:
+		case -EPERM:
+			status = NFS4ERR_NXIO;
+			break;
+		case -EACCES:
+			status = NFS4ERR_ACCESS;
+			break;
+		default:
+			return;
+		}
+	}
+
 	mirror = FF_LAYOUT_COMP(lseg, idx);
 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
 				       mirror, offset, length, status, opnum,
 				       GFP_NOIO);
+	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
 }
 
@@ -989,16 +1157,14 @@
 static int ff_layout_read_done_cb(struct rpc_task *task,
 				struct nfs_pgio_header *hdr)
 {
-	struct inode *inode;
 	int err;
 
 	trace_nfs4_pnfs_read(hdr, task->tk_status);
-	if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
-		hdr->res.op_status = NFS4ERR_NXIO;
-	if (task->tk_status < 0 && hdr->res.op_status)
+	if (task->tk_status < 0)
 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
 					    hdr->args.offset, hdr->args.count,
-					    hdr->res.op_status, OP_READ);
+					    hdr->res.op_status, OP_READ,
+					    task->tk_status);
 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
 					   hdr->ds_clp, hdr->lseg,
 					   hdr->pgio_mirror_idx);
@@ -1010,8 +1176,6 @@
 		pnfs_read_resend_pnfs(hdr);
 		return task->tk_status;
 	case -NFS4ERR_RESET_TO_MDS:
-		inode = hdr->lseg->pls_layout->plh_inode;
-		pnfs_error_mark_layout_for_return(inode, hdr->lseg);
 		ff_layout_reset_read(hdr);
 		return task->tk_status;
 	case -EAGAIN:
@@ -1061,9 +1225,10 @@
 static int ff_layout_read_prepare_common(struct rpc_task *task,
 					 struct nfs_pgio_header *hdr)
 {
-	nfs4_ff_layout_stat_io_start_read(
+	nfs4_ff_layout_stat_io_start_read(hdr->inode,
 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
-			hdr->args.count);
+			hdr->args.count,
+			task->tk_start);
 
 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
 		rpc_exit(task, -EIO);
@@ -1163,32 +1328,26 @@
 static int ff_layout_write_done_cb(struct rpc_task *task,
 				struct nfs_pgio_header *hdr)
 {
-	struct inode *inode;
 	int err;
 
 	trace_nfs4_pnfs_write(hdr, task->tk_status);
-	if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
-		hdr->res.op_status = NFS4ERR_NXIO;
-	if (task->tk_status < 0 && hdr->res.op_status)
+	if (task->tk_status < 0)
 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
 					    hdr->args.offset, hdr->args.count,
-					    hdr->res.op_status, OP_WRITE);
+					    hdr->res.op_status, OP_WRITE,
+					    task->tk_status);
 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
 					   hdr->ds_clp, hdr->lseg,
 					   hdr->pgio_mirror_idx);
 
 	switch (err) {
 	case -NFS4ERR_RESET_TO_PNFS:
+		pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
+		ff_layout_reset_write(hdr, true);
+		return task->tk_status;
 	case -NFS4ERR_RESET_TO_MDS:
-		inode = hdr->lseg->pls_layout->plh_inode;
-		pnfs_error_mark_layout_for_return(inode, hdr->lseg);
-		if (err == -NFS4ERR_RESET_TO_PNFS) {
-			pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
-			ff_layout_reset_write(hdr, true);
-		} else {
-			pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
-			ff_layout_reset_write(hdr, false);
-		}
+		pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
+		ff_layout_reset_write(hdr, false);
 		return task->tk_status;
 	case -EAGAIN:
 		rpc_restart_call_prepare(task);
@@ -1199,34 +1358,35 @@
 	    hdr->res.verf->committed == NFS_DATA_SYNC)
 		ff_layout_set_layoutcommit(hdr);
 
+	/* zero out fattr since we don't care DS attr at all */
+	hdr->fattr.valid = 0;
+	if (task->tk_status >= 0)
+		nfs_writeback_update_inode(hdr);
+
 	return 0;
 }
 
 static int ff_layout_commit_done_cb(struct rpc_task *task,
 				     struct nfs_commit_data *data)
 {
-	struct inode *inode;
 	int err;
 
 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
-	if (task->tk_status == -ETIMEDOUT && !data->res.op_status)
-		data->res.op_status = NFS4ERR_NXIO;
-	if (task->tk_status < 0 && data->res.op_status)
+	if (task->tk_status < 0)
 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
 					    data->args.offset, data->args.count,
-					    data->res.op_status, OP_COMMIT);
+					    data->res.op_status, OP_COMMIT,
+					    task->tk_status);
 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
 					   data->lseg, data->ds_commit_index);
 
 	switch (err) {
 	case -NFS4ERR_RESET_TO_PNFS:
+		pnfs_set_retry_layoutget(data->lseg->pls_layout);
+		pnfs_generic_prepare_to_resend_writes(data);
+		return -EAGAIN;
 	case -NFS4ERR_RESET_TO_MDS:
-		inode = data->lseg->pls_layout->plh_inode;
-		pnfs_error_mark_layout_for_return(inode, data->lseg);
-		if (err == -NFS4ERR_RESET_TO_PNFS)
-			pnfs_set_retry_layoutget(data->lseg->pls_layout);
-		else
-			pnfs_clear_retry_layoutget(data->lseg->pls_layout);
+		pnfs_clear_retry_layoutget(data->lseg->pls_layout);
 		pnfs_generic_prepare_to_resend_writes(data);
 		return -EAGAIN;
 	case -EAGAIN:
@@ -1244,9 +1404,10 @@
 static int ff_layout_write_prepare_common(struct rpc_task *task,
 					  struct nfs_pgio_header *hdr)
 {
-	nfs4_ff_layout_stat_io_start_write(
+	nfs4_ff_layout_stat_io_start_write(hdr->inode,
 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
-			hdr->args.count);
+			hdr->args.count,
+			task->tk_start);
 
 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
 		rpc_exit(task, -EIO);
@@ -1325,9 +1486,9 @@
 static void ff_layout_commit_prepare_common(struct rpc_task *task,
 		struct nfs_commit_data *cdata)
 {
-	nfs4_ff_layout_stat_io_start_write(
+	nfs4_ff_layout_stat_io_start_write(cdata->inode,
 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
-			0);
+			0, task->tk_start);
 }
 
 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
@@ -1842,53 +2003,55 @@
 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
 }
 
-static bool
+static int
 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
-			       struct pnfs_layout_segment *pls,
-			       int *dev_count, int dev_limit)
+			       struct pnfs_layout_hdr *lo,
+			       int dev_limit)
 {
+	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
 	struct nfs4_ff_layout_mirror *mirror;
 	struct nfs4_deviceid_node *dev;
 	struct nfs42_layoutstat_devinfo *devinfo;
-	int i;
+	int i = 0;
 
-	for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
-		if (*dev_count >= dev_limit)
+	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
+		if (i >= dev_limit)
 			break;
-		mirror = FF_LAYOUT_COMP(pls, i);
-		if (!mirror || !mirror->mirror_ds)
+		if (!mirror->mirror_ds)
 			continue;
-		dev = FF_LAYOUT_DEVID_NODE(pls, i);
-		devinfo = &args->devinfo[*dev_count];
+		/* mirror refcount put in cleanup_layoutstats */
+		if (!atomic_inc_not_zero(&mirror->ref))
+			continue;
+		dev = &mirror->mirror_ds->id_node; 
+		devinfo = &args->devinfo[i];
 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
-		devinfo->offset = pls->pls_range.offset;
-		devinfo->length = pls->pls_range.length;
-		/* well, we don't really know if IO is continuous or not! */
-		devinfo->read_count = mirror->read_stat.io_stat.bytes_completed;
+		devinfo->offset = 0;
+		devinfo->length = NFS4_MAX_UINT64;
+		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
-		devinfo->write_count = mirror->write_stat.io_stat.bytes_completed;
+		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
 		devinfo->layout_type = LAYOUT_FLEX_FILES;
 		devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
 		devinfo->layout_private = mirror;
-		/* lseg refcount put in cleanup_layoutstats */
-		pnfs_get_lseg(pls);
 
-		++(*dev_count);
+		i++;
 	}
-
-	return *dev_count < dev_limit;
+	return i;
 }
 
 static int
 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
 {
-	struct pnfs_layout_segment *pls;
+	struct nfs4_flexfile_layout *ff_layout;
+	struct nfs4_ff_layout_mirror *mirror;
 	int dev_count = 0;
 
 	spin_lock(&args->inode->i_lock);
-	list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) {
-		dev_count += FF_LAYOUT_MIRROR_COUNT(pls);
+	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
+	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
+		if (atomic_read(&mirror->ref) != 0)
+			dev_count ++;
 	}
 	spin_unlock(&args->inode->i_lock);
 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
@@ -1897,20 +2060,14 @@
 			__func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
 		dev_count = PNFS_LAYOUTSTATS_MAXDEV;
 	}
-	args->devinfo = kmalloc(dev_count * sizeof(*args->devinfo), GFP_KERNEL);
+	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
 	if (!args->devinfo)
 		return -ENOMEM;
 
-	dev_count = 0;
 	spin_lock(&args->inode->i_lock);
-	list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) {
-		if (!ff_layout_mirror_prepare_stats(args, pls, &dev_count,
-						    PNFS_LAYOUTSTATS_MAXDEV)) {
-			break;
-		}
-	}
+	args->num_dev = ff_layout_mirror_prepare_stats(args,
+			&ff_layout->generic_hdr, dev_count);
 	spin_unlock(&args->inode->i_lock);
-	args->num_dev = dev_count;
 
 	return 0;
 }
@@ -1924,7 +2081,7 @@
 	for (i = 0; i < data->args.num_dev; i++) {
 		mirror = data->args.devinfo[i].layout_private;
 		data->args.devinfo[i].layout_private = NULL;
-		pnfs_put_lseg(mirror->lseg);
+		ff_layout_put_mirror(mirror);
 	}
 }
 
@@ -1936,6 +2093,7 @@
 	.free_layout_hdr	= ff_layout_free_layout_hdr,
 	.alloc_lseg		= ff_layout_alloc_lseg,
 	.free_lseg		= ff_layout_free_lseg,
+	.add_lseg		= ff_layout_add_lseg,
 	.pg_read_ops		= &ff_layout_pg_read_ops,
 	.pg_write_ops		= &ff_layout_pg_write_ops,
 	.get_ds_info		= ff_layout_get_ds_info,
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index f92f9a0..68cc0d9 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -67,7 +67,8 @@
 };
 
 struct nfs4_ff_layout_mirror {
-	struct pnfs_layout_segment	*lseg; /* back pointer */
+	struct pnfs_layout_hdr		*layout;
+	struct list_head		mirrors;
 	u32				ds_count;
 	u32				efficiency;
 	struct nfs4_ff_layout_ds	*mirror_ds;
@@ -77,6 +78,7 @@
 	u32				uid;
 	u32				gid;
 	struct rpc_cred			*cred;
+	atomic_t			ref;
 	spinlock_t			lock;
 	struct nfs4_ff_layoutstat	read_stat;
 	struct nfs4_ff_layoutstat	write_stat;
@@ -95,6 +97,7 @@
 struct nfs4_flexfile_layout {
 	struct pnfs_layout_hdr generic_hdr;
 	struct pnfs_ds_commit_info commit_info;
+	struct list_head	mirrors;
 	struct list_head	error_list; /* nfs4_ff_layout_ds_err */
 };
 
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index f13e196..e125e55 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -172,6 +172,32 @@
 	return NULL;
 }
 
+static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
+		struct nfs4_deviceid_node *devid)
+{
+	nfs4_mark_deviceid_unavailable(devid);
+	if (!ff_layout_has_available_ds(lseg))
+		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
+				lseg);
+}
+
+static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
+		struct nfs4_ff_layout_mirror *mirror)
+{
+	if (mirror == NULL || mirror->mirror_ds == NULL) {
+		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
+					lseg);
+		return false;
+	}
+	if (mirror->mirror_ds->ds == NULL) {
+		struct nfs4_deviceid_node *devid;
+		devid = &mirror->mirror_ds->id_node;
+		ff_layout_mark_devid_invalid(lseg, devid);
+		return false;
+	}
+	return true;
+}
+
 static u64
 end_offset(u64 start, u64 len)
 {
@@ -336,16 +362,10 @@
 {
 	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
 	struct nfs_fh *fh = NULL;
-	struct nfs4_deviceid_node *devid;
 
-	if (mirror == NULL || mirror->mirror_ds == NULL ||
-	    mirror->mirror_ds->ds == NULL) {
-		printk(KERN_ERR "NFS: %s: No data server for mirror offset index %d\n",
+	if (!ff_layout_mirror_valid(lseg, mirror)) {
+		pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
 			__func__, mirror_idx);
-		if (mirror && mirror->mirror_ds) {
-			devid = &mirror->mirror_ds->id_node;
-			pnfs_generic_mark_devid_invalid(devid);
-		}
 		goto out;
 	}
 
@@ -368,14 +388,9 @@
 	unsigned int max_payload;
 	rpc_authflavor_t flavor;
 
-	if (mirror == NULL || mirror->mirror_ds == NULL ||
-	    mirror->mirror_ds->ds == NULL) {
-		printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
+	if (!ff_layout_mirror_valid(lseg, mirror)) {
+		pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
 			__func__, ds_idx);
-		if (mirror && mirror->mirror_ds) {
-			devid = &mirror->mirror_ds->id_node;
-			pnfs_generic_mark_devid_invalid(devid);
-		}
 		goto out;
 	}
 
@@ -500,16 +515,19 @@
 					   range->offset, range->length))
 			continue;
 		/* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
-		 * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4)
+		 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
+		 * + status(4) + opnum(4)
 		 */
 		p = xdr_reserve_space(xdr,
-				24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
+				28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
 		if (unlikely(!p))
 			return -ENOBUFS;
 		p = xdr_encode_hyper(p, err->offset);
 		p = xdr_encode_hyper(p, err->length);
 		p = xdr_encode_opaque_fixed(p, &err->stateid,
 					    NFS4_STATEID_SIZE);
+		/* Encode 1 error */
+		*p++ = cpu_to_be32(1);
 		p = xdr_encode_opaque_fixed(p, &err->deviceid,
 					    NFS4_DEVICEID4_SIZE);
 		*p++ = cpu_to_be32(err->status);
@@ -525,11 +543,11 @@
 	return 0;
 }
 
-bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
+static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 {
 	struct nfs4_ff_layout_mirror *mirror;
 	struct nfs4_deviceid_node *devid;
-	int idx;
+	u32 idx;
 
 	for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
 		mirror = FF_LAYOUT_COMP(lseg, idx);
@@ -543,6 +561,32 @@
 	return false;
 }
 
+static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
+{
+	struct nfs4_ff_layout_mirror *mirror;
+	struct nfs4_deviceid_node *devid;
+	u32 idx;
+
+	for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
+		mirror = FF_LAYOUT_COMP(lseg, idx);
+		if (!mirror || !mirror->mirror_ds)
+			return false;
+		devid = &mirror->mirror_ds->id_node;
+		if (ff_layout_test_devid_unavailable(devid))
+			return false;
+	}
+
+	return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
+}
+
+bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
+{
+	if (lseg->pls_range.iomode == IOMODE_READ)
+		return  ff_read_layout_has_available_ds(lseg);
+	/* Note: RW layout needs all mirrors available */
+	return ff_rw_layout_has_available_ds(lseg);
+}
+
 module_param(dataserver_retrans, uint, 0644);
 MODULE_PARM_DESC(dataserver_retrans, "The  number of times the NFSv4.1 client "
 			"retries a request before it attempts further "
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 0adc7d2..326d9e1 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -504,7 +504,7 @@
 {
 	struct inode *inode = d_inode(dentry);
 	struct nfs_fattr *fattr;
-	int error = -ENOMEM;
+	int error = 0;
 
 	nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
 
@@ -513,15 +513,14 @@
 		attr->ia_valid &= ~ATTR_MODE;
 
 	if (attr->ia_valid & ATTR_SIZE) {
-		loff_t i_size;
-
 		BUG_ON(!S_ISREG(inode->i_mode));
 
-		i_size = i_size_read(inode);
-		if (attr->ia_size == i_size)
+		error = inode_newsize_ok(inode, attr->ia_size);
+		if (error)
+			return error;
+
+		if (attr->ia_size == i_size_read(inode))
 			attr->ia_valid &= ~ATTR_SIZE;
-		else if (attr->ia_size < i_size && IS_SWAPFILE(inode))
-			return -ETXTBSY;
 	}
 
 	/* Optimization: if the end result is no change, don't RPC */
@@ -536,8 +535,11 @@
 		nfs_sync_inode(inode);
 
 	fattr = nfs_alloc_fattr();
-	if (fattr == NULL)
+	if (fattr == NULL) {
+		error = -ENOMEM;
 		goto out;
+	}
+
 	/*
 	 * Return any delegations if we're going to change ACLs
 	 */
@@ -759,11 +761,13 @@
  * @ctx: pointer to context
  * @is_sync: is this a synchronous close
  *
- * always ensure that the attributes are up to date if we're mounted
- * with close-to-open semantics
+ * Ensure that the attributes are up to date if we're mounted
+ * with close-to-open semantics and we have cached data that will
+ * need to be revalidated on open.
  */
 void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
 {
+	struct nfs_inode *nfsi;
 	struct inode *inode;
 	struct nfs_server *server;
 
@@ -772,7 +776,12 @@
 	if (!is_sync)
 		return;
 	inode = d_inode(ctx->dentry);
-	if (!list_empty(&NFS_I(inode)->open_files))
+	nfsi = NFS_I(inode);
+	if (inode->i_mapping->nrpages == 0)
+		return;
+	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+		return;
+	if (!list_empty(&nfsi->open_files))
 		return;
 	server = NFS_SERVER(inode);
 	if (server->flags & NFS_MOUNT_NOCTO)
@@ -844,6 +853,11 @@
 }
 EXPORT_SYMBOL_GPL(put_nfs_open_context);
 
+static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
+{
+	__put_nfs_open_context(ctx, 1);
+}
+
 /*
  * Ensure that mmap has a recent RPC credential for use when writing out
  * shared pages
@@ -888,7 +902,7 @@
 	return ctx;
 }
 
-static void nfs_file_clear_open_context(struct file *filp)
+void nfs_file_clear_open_context(struct file *filp)
 {
 	struct nfs_open_context *ctx = nfs_file_open_context(filp);
 
@@ -899,7 +913,7 @@
 		spin_lock(&inode->i_lock);
 		list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
 		spin_unlock(&inode->i_lock);
-		__put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1);
+		put_nfs_open_context_sync(ctx);
 	}
 }
 
@@ -919,12 +933,6 @@
 	return 0;
 }
 
-int nfs_release(struct inode *inode, struct file *filp)
-{
-	nfs_file_clear_open_context(filp);
-	return 0;
-}
-
 /*
  * This function is called whenever some part of NFS notices that
  * the cached attributes have to be refreshed.
@@ -1273,13 +1281,6 @@
 	return 0;
 }
 
-static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
-{
-	if (!(fattr->valid & NFS_ATTR_FATTR_CTIME))
-		return 0;
-	return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
-}
-
 static atomic_long_t nfs_attr_generation_counter;
 
 static unsigned long nfs_read_attr_generation_counter(void)
@@ -1428,7 +1429,6 @@
 	const struct nfs_inode *nfsi = NFS_I(inode);
 
 	return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
-		nfs_ctime_need_update(inode, fattr) ||
 		((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
 }
 
@@ -1491,6 +1491,13 @@
 {
 	unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
 
+	/*
+	 * Don't revalidate the pagecache if we hold a delegation, but do
+	 * force an attribute update
+	 */
+	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+		invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
+
 	if (S_ISDIR(inode->i_mode))
 		invalid |= NFS_INO_INVALID_DATA;
 	nfs_set_cache_invalid(inode, invalid);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 9b372b8..56cfde2 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -219,10 +219,6 @@
 }
 #endif
 
-#ifdef CONFIG_NFS_V4_1
-int nfs_sockaddr_match_ipaddr(const struct sockaddr *, const struct sockaddr *);
-#endif
-
 /* callback_xdr.c */
 extern struct svc_version nfs4_callback_version1;
 extern struct svc_version nfs4_callback_version4;
@@ -364,7 +360,6 @@
 /* file.c */
 int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int);
 loff_t nfs_file_llseek(struct file *, loff_t, int);
-int nfs_file_flush(struct file *, fl_owner_t);
 ssize_t nfs_file_read(struct kiocb *, struct iov_iter *);
 ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *,
 			     size_t, unsigned int);
@@ -490,6 +485,9 @@
 void nfs_commitdata_release(struct nfs_commit_data *data);
 void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
 				 struct nfs_commit_info *cinfo);
+void nfs_request_add_commit_list_locked(struct nfs_page *req,
+		struct list_head *dst,
+		struct nfs_commit_info *cinfo);
 void nfs_request_remove_commit_list(struct nfs_page *req,
 				    struct nfs_commit_info *cinfo);
 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
@@ -623,13 +621,15 @@
  * Record the page as unstable and mark its inode as dirty.
  */
 static inline
-void nfs_mark_page_unstable(struct page *page)
+void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
 {
-	struct inode *inode = page_file_mapping(page)->host;
+	if (!cinfo->dreq) {
+		struct inode *inode = page_file_mapping(page)->host;
 
-	inc_zone_page_state(page, NR_UNSTABLE_NFS);
-	inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
-	 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+		inc_zone_page_state(page, NR_UNSTABLE_NFS);
+		inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
+		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+	}
 }
 
 /*
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 9b04c2e..267126d 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1103,6 +1103,7 @@
 {
 	encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
 	encode_symlinkdata3(xdr, args);
+	xdr->buf->flags |= XDRBUF_WRITE;
 }
 
 /*
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index ff66ae7..814c125 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -17,7 +17,5 @@
 loff_t nfs42_proc_llseek(struct file *, loff_t, int);
 int nfs42_proc_layoutstats_generic(struct nfs_server *,
 				   struct nfs42_layoutstat_data *);
-/* nfs4.2xdr.h */
-extern struct rpc_procinfo nfs4_2_procedures[];
 
 #endif /* __LINUX_FS_NFS_NFS4_2_H */
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index a6bd27d..0eb29e1 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -238,8 +238,7 @@
 	return -EIO;
 }
 
-static int decode_layoutstats(struct xdr_stream *xdr,
-			      struct nfs42_layoutstat_res *res)
+static int decode_layoutstats(struct xdr_stream *xdr)
 {
 	return decode_op_hdr(xdr, OP_LAYOUTSTATS);
 }
@@ -343,7 +342,7 @@
 		goto out;
 	WARN_ON(res->num_dev > PNFS_LAYOUTSTATS_MAXDEV);
 	for (i = 0; i < res->num_dev; i++) {
-		status = decode_layoutstats(xdr, res);
+		status = decode_layoutstats(xdr);
 		if (status)
 			goto out;
 	}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index ea3bee9..50cfc4c 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -405,9 +405,7 @@
 int nfs41_discover_server_trunking(struct nfs_client *clp,
 			struct nfs_client **, struct rpc_cred *);
 extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
-extern void nfs41_server_notify_target_slotid_update(struct nfs_client *clp);
-extern void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp);
-
+extern void nfs41_notify_server(struct nfs_client *);
 #else
 static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
 {
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 3aa6a9b..223bedd 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -729,10 +729,7 @@
 		return false;
 
 	/* Match only the IP address, not the port number */
-	if (!nfs_sockaddr_match_ipaddr(addr, clap))
-		return false;
-
-	return true;
+	return rpc_cmp_addr(addr, clap);
 }
 
 /*
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index dcd39d4..b0dbe0a 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -6,7 +6,9 @@
 #include <linux/fs.h>
 #include <linux/falloc.h>
 #include <linux/nfs_fs.h>
+#include "delegation.h"
 #include "internal.h"
+#include "iostat.h"
 #include "fscache.h"
 #include "pnfs.h"
 
@@ -27,7 +29,6 @@
 	struct inode *dir;
 	unsigned openflags = filp->f_flags;
 	struct iattr attr;
-	int opened = 0;
 	int err;
 
 	/*
@@ -66,7 +67,7 @@
 		nfs_sync_inode(inode);
 	}
 
-	inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened);
+	inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
 		switch (err) {
@@ -100,6 +101,31 @@
 	goto out_put_ctx;
 }
 
+/*
+ * Flush all dirty pages, and check for write errors.
+ */
+static int
+nfs4_file_flush(struct file *file, fl_owner_t id)
+{
+	struct inode	*inode = file_inode(file);
+
+	dprintk("NFS: flush(%pD2)\n", file);
+
+	nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
+	if ((file->f_mode & FMODE_WRITE) == 0)
+		return 0;
+
+	/*
+	 * If we're holding a write delegation, then check if we're required
+	 * to flush the i/o on close. If not, then just start the i/o now.
+	 */
+	if (!nfs4_delegation_flush_on_close(inode))
+		return filemap_fdatawrite(file->f_mapping);
+
+	/* Flush writes to the server and return any errors */
+	return vfs_fsync(file, 0);
+}
+
 static int
 nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
@@ -178,7 +204,7 @@
 	.write_iter	= nfs_file_write,
 	.mmap		= nfs_file_mmap,
 	.open		= nfs4_file_open,
-	.flush		= nfs_file_flush,
+	.flush		= nfs4_file_flush,
 	.release	= nfs_file_release,
 	.fsync		= nfs4_file_fsync,
 	.lock		= nfs_lock,
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 535dfc6..2e49022 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -184,7 +184,7 @@
 	.read		= user_read,
 };
 
-static int nfs_idmap_init_keyring(void)
+int nfs_idmap_init(void)
 {
 	struct cred *cred;
 	struct key *keyring;
@@ -230,7 +230,7 @@
 	return ret;
 }
 
-static void nfs_idmap_quit_keyring(void)
+void nfs_idmap_quit(void)
 {
 	key_revoke(id_resolver_cache->thread_keyring);
 	unregister_key_type(&key_type_id_resolver);
@@ -492,16 +492,6 @@
 	kfree(idmap);
 }
 
-int nfs_idmap_init(void)
-{
-	return nfs_idmap_init_keyring();
-}
-
-void nfs_idmap_quit(void)
-{
-	nfs_idmap_quit_keyring();
-}
-
 static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
 				     struct idmap_msg *im,
 				     struct rpc_pipe_msg *msg)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 3acb1eb..693b903 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -586,7 +586,7 @@
 	spin_unlock(&tbl->slot_tbl_lock);
 	res->sr_slot = NULL;
 	if (send_new_highest_used_slotid)
-		nfs41_server_notify_highest_slotid_update(session->clp);
+		nfs41_notify_server(session->clp);
 }
 
 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
@@ -1150,7 +1150,8 @@
 	return ret;
 }
 
-static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
+static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
+		enum open_claim_type4 claim)
 {
 	if (delegation == NULL)
 		return 0;
@@ -1158,6 +1159,16 @@
 		return 0;
 	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
 		return 0;
+	switch (claim) {
+	case NFS4_OPEN_CLAIM_NULL:
+	case NFS4_OPEN_CLAIM_FH:
+		break;
+	case NFS4_OPEN_CLAIM_PREVIOUS:
+		if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
+			break;
+	default:
+		return 0;
+	}
 	nfs_mark_delegation_referenced(delegation);
 	return 1;
 }
@@ -1220,6 +1231,7 @@
 }
 
 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
+		nfs4_stateid *arg_stateid,
 		nfs4_stateid *stateid, fmode_t fmode)
 {
 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
@@ -1238,8 +1250,9 @@
 	if (stateid == NULL)
 		return;
 	/* Handle races with OPEN */
-	if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
-	    !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
+	if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
+	    (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
+	    !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
 		nfs_resync_open_stateid_locked(state);
 		return;
 	}
@@ -1248,10 +1261,12 @@
 	nfs4_stateid_copy(&state->open_stateid, stateid);
 }
 
-static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
+static void nfs_clear_open_stateid(struct nfs4_state *state,
+	nfs4_stateid *arg_stateid,
+	nfs4_stateid *stateid, fmode_t fmode)
 {
 	write_seqlock(&state->seqlock);
-	nfs_clear_open_stateid_locked(state, stateid, fmode);
+	nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
 	write_sequnlock(&state->seqlock);
 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
 		nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
@@ -1376,6 +1391,7 @@
 	struct nfs_delegation *delegation;
 	int open_mode = opendata->o_arg.open_flags;
 	fmode_t fmode = opendata->o_arg.fmode;
+	enum open_claim_type4 claim = opendata->o_arg.claim;
 	nfs4_stateid stateid;
 	int ret = -EAGAIN;
 
@@ -1389,7 +1405,7 @@
 		spin_unlock(&state->owner->so_lock);
 		rcu_read_lock();
 		delegation = rcu_dereference(nfsi->delegation);
-		if (!can_open_delegated(delegation, fmode)) {
+		if (!can_open_delegated(delegation, fmode, claim)) {
 			rcu_read_unlock();
 			break;
 		}
@@ -1852,6 +1868,7 @@
 	struct nfs4_opendata *data = calldata;
 	struct nfs4_state_owner *sp = data->owner;
 	struct nfs_client *clp = sp->so_server->nfs_client;
+	enum open_claim_type4 claim = data->o_arg.claim;
 
 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
 		goto out_wait;
@@ -1866,15 +1883,15 @@
 			goto out_no_action;
 		rcu_read_lock();
 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
-		if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
-		    data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
-		    can_open_delegated(delegation, data->o_arg.fmode))
+		if (can_open_delegated(delegation, data->o_arg.fmode, claim))
 			goto unlock_no_action;
 		rcu_read_unlock();
 	}
 	/* Update client id. */
 	data->o_arg.clientid = clp->cl_clientid;
-	switch (data->o_arg.claim) {
+	switch (claim) {
+	default:
+		break;
 	case NFS4_OPEN_CLAIM_PREVIOUS:
 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
 	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
@@ -2294,15 +2311,25 @@
  * fields corresponding to attributes that were used to store the verifier.
  * Make sure we clobber those fields in the later setattr call
  */
-static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
+static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
+				struct iattr *sattr, struct nfs4_label **label)
 {
-	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
+	const u32 *attrset = opendata->o_res.attrset;
+
+	if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
 	    !(sattr->ia_valid & ATTR_ATIME_SET))
 		sattr->ia_valid |= ATTR_ATIME;
 
-	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
+	if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
 	    !(sattr->ia_valid & ATTR_MTIME_SET))
 		sattr->ia_valid |= ATTR_MTIME;
+
+	/* Except MODE, it seems harmless of setting twice. */
+	if ((attrset[1] & FATTR4_WORD1_MODE))
+		sattr->ia_valid &= ~ATTR_MODE;
+
+	if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
+		*label = NULL;
 }
 
 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
@@ -2425,9 +2452,9 @@
 		goto err_free_label;
 	state = ctx->state;
 
-	if ((opendata->o_arg.open_flags & O_EXCL) &&
+	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
-		nfs4_exclusive_attrset(opendata, sattr);
+		nfs4_exclusive_attrset(opendata, sattr, &label);
 
 		nfs_fattr_init(opendata->o_res.f_attr);
 		status = nfs4_do_setattr(state->inode, cred,
@@ -2439,7 +2466,7 @@
 			nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
 		}
 	}
-	if (opendata->file_created)
+	if (opened && opendata->file_created)
 		*opened |= FILE_CREATED;
 
 	if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
@@ -2661,7 +2688,7 @@
 	switch (task->tk_status) {
 		case 0:
 			res_stateid = &calldata->res.stateid;
-			if (calldata->arg.fmode == 0 && calldata->roc)
+			if (calldata->roc)
 				pnfs_roc_set_barrier(state->inode,
 						     calldata->roc_barrier);
 			renew_lease(server, calldata->timestamp);
@@ -2684,7 +2711,8 @@
 				goto out_release;
 			}
 	}
-	nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
+	nfs_clear_open_stateid(state, &calldata->arg.stateid,
+			res_stateid, calldata->arg.fmode);
 out_release:
 	nfs_release_seqid(calldata->arg.seqid);
 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
@@ -2735,14 +2763,11 @@
 		goto out_no_action;
 	}
 
-	if (calldata->arg.fmode == 0) {
+	if (calldata->arg.fmode == 0)
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
-		if (calldata->roc &&
-		    pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
-			nfs_release_seqid(calldata->arg.seqid);
-			goto out_wait;
-		    }
-	}
+	if (calldata->roc)
+		pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
+
 	calldata->arg.share_access =
 		nfs4_map_atomic_open_share(NFS_SERVER(inode),
 				calldata->arg.fmode, 0);
@@ -2883,8 +2908,10 @@
 
 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
 {
+	u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
 	struct nfs4_server_caps_arg args = {
 		.fhandle = fhandle,
+		.bitmask = bitmask,
 	};
 	struct nfs4_server_caps_res res = {};
 	struct rpc_message msg = {
@@ -2894,10 +2921,18 @@
 	};
 	int status;
 
+	bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
+		     FATTR4_WORD0_FH_EXPIRE_TYPE |
+		     FATTR4_WORD0_LINK_SUPPORT |
+		     FATTR4_WORD0_SYMLINK_SUPPORT |
+		     FATTR4_WORD0_ACLSUPPORT;
+	if (minorversion)
+		bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
+
 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
 	if (status == 0) {
 		/* Sanity check the server answers */
-		switch (server->nfs_client->cl_minorversion) {
+		switch (minorversion) {
 		case 0:
 			res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
 			res.attr_bitmask[2] = 0;
@@ -2950,6 +2985,8 @@
 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
 		server->cache_consistency_bitmask[2] = 0;
+		memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
+			sizeof(server->exclcreat_bitmask));
 		server->acl_bitmask = res.acl_bitmask;
 		server->fh_expire_type = res.fh_expire_type;
 	}
@@ -3552,7 +3589,6 @@
 	struct nfs4_label l, *ilabel = NULL;
 	struct nfs_open_context *ctx;
 	struct nfs4_state *state;
-	int opened = 0;
 	int status = 0;
 
 	ctx = alloc_nfs_open_context(dentry, FMODE_READ);
@@ -3562,7 +3598,7 @@
 	ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
 
 	sattr->ia_mode &= ~current_umask();
-	state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
+	state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
 	if (IS_ERR(state)) {
 		status = PTR_ERR(state);
 		goto out;
@@ -4978,13 +5014,12 @@
 	int result;
 	size_t len;
 	char *str;
-	bool retried = false;
 
 	if (clp->cl_owner_id != NULL)
 		return 0;
-retry:
+
 	rcu_read_lock();
-	len = 10 + strlen(clp->cl_ipaddr) + 1 +
+	len = 14 + strlen(clp->cl_ipaddr) + 1 +
 		strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
 		1 +
 		strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
@@ -5010,14 +5045,6 @@
 			rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
 	rcu_read_unlock();
 
-	/* Did something change? */
-	if (result >= len) {
-		kfree(str);
-		if (retried)
-			return -EINVAL;
-		retried = true;
-		goto retry;
-	}
 	clp->cl_owner_id = str;
 	return 0;
 }
@@ -5049,10 +5076,6 @@
 			clp->rpc_ops->version, clp->cl_minorversion,
 			nfs4_client_id_uniquifier,
 			clp->cl_rpcclient->cl_nodename);
-	if (result >= len) {
-		kfree(str);
-		return -EINVAL;
-	}
 	clp->cl_owner_id = str;
 	return 0;
 }
@@ -5088,10 +5111,6 @@
 	result = scnprintf(str, len, "Linux NFSv%u.%u %s",
 			clp->rpc_ops->version, clp->cl_minorversion,
 			clp->cl_rpcclient->cl_nodename);
-	if (result >= len) {
-		kfree(str);
-		return -EINVAL;
-	}
 	clp->cl_owner_id = str;
 	return 0;
 }
@@ -5289,9 +5308,8 @@
 
 	d_data = (struct nfs4_delegreturndata *)data;
 
-	if (d_data->roc &&
-	    pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task))
-		return;
+	if (d_data->roc)
+		pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
 
 	nfs4_setup_sequence(d_data->res.server,
 			&d_data->args.seq_args,
@@ -7746,10 +7764,19 @@
 	case 0:
 		goto out;
 	/*
+	 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
+	 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
+	 */
+	case -NFS4ERR_BADLAYOUT:
+		goto out_overflow;
+	/*
 	 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
-	 * (or clients) writing to the same RAID stripe
+	 * (or clients) writing to the same RAID stripe except when
+	 * the minlength argument is 0 (see RFC5661 section 18.43.3).
 	 */
 	case -NFS4ERR_LAYOUTTRYLATER:
+		if (lgp->args.minlength == 0)
+			goto out_overflow;
 	/*
 	 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
 	 * existing layout before getting a new one).
@@ -7805,6 +7832,10 @@
 		rpc_restart_call_prepare(task);
 out:
 	dprintk("<-- %s\n", __func__);
+	return;
+out_overflow:
+	task->tk_status = -EOVERFLOW;
+	goto out;
 }
 
 static size_t max_response_pages(struct nfs_server *server)
@@ -8661,6 +8692,7 @@
 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
 	.state_renewal_ops = &nfs41_state_renewal_ops,
+	.mig_recovery_ops = &nfs41_mig_recovery_ops,
 };
 #endif
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f2e2ad8..da73bc4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2152,23 +2152,13 @@
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
 
-static void nfs41_ping_server(struct nfs_client *clp)
+void nfs41_notify_server(struct nfs_client *clp)
 {
 	/* Use CHECK_LEASE to ping the server with a SEQUENCE */
 	set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
 	nfs4_schedule_state_manager(clp);
 }
 
-void nfs41_server_notify_target_slotid_update(struct nfs_client *clp)
-{
-	nfs41_ping_server(clp);
-}
-
-void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp)
-{
-	nfs41_ping_server(clp);
-}
-
 static void nfs4_reset_all_state(struct nfs_client *clp)
 {
 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 470af1a..28df12e 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -884,6 +884,66 @@
 DEFINE_NFS4_GETATTR_EVENT(nfs4_lookup_root);
 DEFINE_NFS4_GETATTR_EVENT(nfs4_fsinfo);
 
+DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
+		TP_PROTO(
+			const struct nfs_client *clp,
+			const struct nfs_fh *fhandle,
+			const struct inode *inode,
+			int error
+		),
+
+		TP_ARGS(clp, fhandle, inode, error),
+
+		TP_STRUCT__entry(
+			__field(int, error)
+			__field(dev_t, dev)
+			__field(u32, fhandle)
+			__field(u64, fileid)
+			__string(dstaddr, clp ?
+				rpc_peeraddr2str(clp->cl_rpcclient,
+					RPC_DISPLAY_ADDR) : "unknown")
+		),
+
+		TP_fast_assign(
+			__entry->error = error;
+			__entry->fhandle = nfs_fhandle_hash(fhandle);
+			if (inode != NULL) {
+				__entry->fileid = NFS_FILEID(inode);
+				__entry->dev = inode->i_sb->s_dev;
+			} else {
+				__entry->fileid = 0;
+				__entry->dev = 0;
+			}
+			__assign_str(dstaddr, clp ?
+				rpc_peeraddr2str(clp->cl_rpcclient,
+					RPC_DISPLAY_ADDR) : "unknown")
+		),
+
+		TP_printk(
+			"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+			"dstaddr=%s",
+			__entry->error,
+			show_nfsv4_errors(__entry->error),
+			MAJOR(__entry->dev), MINOR(__entry->dev),
+			(unsigned long long)__entry->fileid,
+			__entry->fhandle,
+			__get_str(dstaddr)
+		)
+);
+
+#define DEFINE_NFS4_INODE_CALLBACK_EVENT(name) \
+	DEFINE_EVENT(nfs4_inode_callback_event, name, \
+			TP_PROTO( \
+				const struct nfs_client *clp, \
+				const struct nfs_fh *fhandle, \
+				const struct inode *inode, \
+				int error \
+			), \
+			TP_ARGS(clp, fhandle, inode, error))
+DEFINE_NFS4_INODE_CALLBACK_EVENT(nfs4_cb_getattr);
+DEFINE_NFS4_INODE_CALLBACK_EVENT(nfs4_cb_layoutrecall_inode);
+
+
 DECLARE_EVENT_CLASS(nfs4_idmap_event,
 		TP_PROTO(
 			const char *name,
@@ -1136,6 +1196,7 @@
 
 DEFINE_NFS4_INODE_EVENT(nfs4_layoutcommit);
 DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn);
+DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn_on_close);
 
 #endif /* CONFIG_NFS_V4_1 */
 
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 558cd65d..788adf3 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -400,7 +400,8 @@
 #define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3)
 #define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \
 				encode_stateid_maxsz + \
-				1 /* FIXME: opaque lrf_body always empty at the moment */)
+				1 + \
+				XDR_QUADLEN(NFS4_OPAQUE_LIMIT))
 #define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \
 				1 + decode_stateid_maxsz)
 #define encode_secinfo_no_name_maxsz (op_encode_hdr_maxsz + 1)
@@ -1001,7 +1002,8 @@
 
 static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
 				const struct nfs4_label *label,
-				const struct nfs_server *server)
+				const struct nfs_server *server,
+				bool excl_check)
 {
 	char owner_name[IDMAP_NAMESZ];
 	char owner_group[IDMAP_NAMESZ];
@@ -1067,6 +1069,17 @@
 		bmval[1] |= FATTR4_WORD1_TIME_MODIFY_SET;
 		len += 4;
 	}
+
+	if (excl_check) {
+		const u32 *excl_bmval = server->exclcreat_bitmask;
+		bmval[0] &= excl_bmval[0];
+		bmval[1] &= excl_bmval[1];
+		bmval[2] &= excl_bmval[2];
+
+		if (!(excl_bmval[2] & FATTR4_WORD2_SECURITY_LABEL))
+			label = NULL;
+	}
+
 	if (label) {
 		len += 4 + 4 + 4 + (XDR_QUADLEN(label->len) << 2);
 		bmval[2] |= FATTR4_WORD2_SECURITY_LABEL;
@@ -1154,7 +1167,9 @@
 	case NF4LNK:
 		p = reserve_space(xdr, 4);
 		*p = cpu_to_be32(create->u.symlink.len);
-		xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len);
+		xdr_write_pages(xdr, create->u.symlink.pages, 0,
+				create->u.symlink.len);
+		xdr->buf->flags |= XDRBUF_WRITE;
 		break;
 
 	case NF4BLK: case NF4CHR:
@@ -1168,7 +1183,7 @@
 	}
 
 	encode_string(xdr, create->name->len, create->name->name);
-	encode_attrs(xdr, create->attrs, create->label, create->server);
+	encode_attrs(xdr, create->attrs, create->label, create->server, false);
 }
 
 static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr)
@@ -1382,18 +1397,17 @@
 
 static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
 {
-	struct iattr dummy;
 	__be32 *p;
 
 	p = reserve_space(xdr, 4);
 	switch(arg->createmode) {
 	case NFS4_CREATE_UNCHECKED:
 		*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
-		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server);
+		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false);
 		break;
 	case NFS4_CREATE_GUARDED:
 		*p = cpu_to_be32(NFS4_CREATE_GUARDED);
-		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server);
+		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false);
 		break;
 	case NFS4_CREATE_EXCLUSIVE:
 		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
@@ -1402,8 +1416,7 @@
 	case NFS4_CREATE_EXCLUSIVE4_1:
 		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
 		encode_nfs4_verifier(xdr, &arg->u.verifier);
-		dummy.ia_valid = 0;
-		encode_attrs(xdr, &dummy, arg->label, arg->server);
+		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, true);
 	}
 }
 
@@ -1659,7 +1672,7 @@
 {
 	encode_op_hdr(xdr, OP_SETATTR, decode_setattr_maxsz, hdr);
 	encode_nfs4_stateid(xdr, &arg->stateid);
-	encode_attrs(xdr, arg->iap, arg->label, server);
+	encode_attrs(xdr, arg->iap, arg->label, server, false);
 }
 
 static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr)
@@ -2580,6 +2593,7 @@
 				     struct xdr_stream *xdr,
 				     struct nfs4_server_caps_arg *args)
 {
+	const u32 *bitmask = args->bitmask;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
@@ -2587,11 +2601,7 @@
 	encode_compound_hdr(xdr, req, &hdr);
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fhandle, &hdr);
-	encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
-			   FATTR4_WORD0_FH_EXPIRE_TYPE|
-			   FATTR4_WORD0_LINK_SUPPORT|
-			   FATTR4_WORD0_SYMLINK_SUPPORT|
-			   FATTR4_WORD0_ACLSUPPORT, &hdr);
+	encode_getattr_three(xdr, bitmask[0], bitmask[1], bitmask[2], &hdr);
 	encode_nops(&hdr);
 }
 
@@ -3368,6 +3378,22 @@
 	return -EIO;
 }
 
+static int decode_attr_exclcreat_supported(struct xdr_stream *xdr,
+				 uint32_t *bitmap, uint32_t *bitmask)
+{
+	if (likely(bitmap[2] & FATTR4_WORD2_SUPPATTR_EXCLCREAT)) {
+		int ret;
+		ret = decode_attr_bitmap(xdr, bitmask);
+		if (unlikely(ret < 0))
+			return ret;
+		bitmap[2] &= ~FATTR4_WORD2_SUPPATTR_EXCLCREAT;
+	} else
+		bitmask[0] = bitmask[1] = bitmask[2] = 0;
+	dprintk("%s: bitmask=%08x:%08x:%08x\n", __func__,
+		bitmask[0], bitmask[1], bitmask[2]);
+	return 0;
+}
+
 static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh)
 {
 	__be32 *p;
@@ -4321,6 +4347,9 @@
 		goto xdr_error;
 	if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0)
 		goto xdr_error;
+	if ((status = decode_attr_exclcreat_supported(xdr, bitmap,
+				res->exclcreat_bitmask)) != 0)
+		goto xdr_error;
 	status = verify_attr_len(xdr, savep, attrlen);
 xdr_error:
 	dprintk("%s: xdr returned %d!\n", __func__, -status);
@@ -4903,24 +4932,28 @@
 }
 
 /* This is too sick! */
-static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
+static int decode_space_limit(struct xdr_stream *xdr,
+		unsigned long *pagemod_limit)
 {
 	__be32 *p;
 	uint32_t limit_type, nblocks, blocksize;
+	u64 maxsize = 0;
 
 	p = xdr_inline_decode(xdr, 12);
 	if (unlikely(!p))
 		goto out_overflow;
 	limit_type = be32_to_cpup(p++);
 	switch (limit_type) {
-	case 1:
-		xdr_decode_hyper(p, maxsize);
+	case NFS4_LIMIT_SIZE:
+		xdr_decode_hyper(p, &maxsize);
 		break;
-	case 2:
+	case NFS4_LIMIT_BLOCKS:
 		nblocks = be32_to_cpup(p++);
 		blocksize = be32_to_cpup(p);
-		*maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
+		maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
 	}
+	maxsize >>= PAGE_CACHE_SHIFT;
+	*pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
 	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
@@ -4948,7 +4981,7 @@
 		break;
 	case NFS4_OPEN_DELEGATE_WRITE:
 		res->delegation_type = FMODE_WRITE|FMODE_READ;
-		if (decode_space_limit(xdr, &res->maxsize) < 0)
+		if (decode_space_limit(xdr, &res->pagemod_limit) < 0)
 				return -EIO;
 	}
 	return decode_ace(xdr, NULL, res->server->nfs_client);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 4984bbe..7c5718b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -77,8 +77,8 @@
 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
 {
 	spin_lock(&hdr->lock);
-	if (pos < hdr->io_start + hdr->good_bytes) {
-		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
+	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
+	    || pos < hdr->io_start + hdr->good_bytes) {
 		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
 		hdr->good_bytes = pos - hdr->io_start;
 		hdr->error = error;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 70bf706..ba12464 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -368,7 +368,6 @@
 	if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
 		return false;
 	lo->plh_return_iomode = 0;
-	lo->plh_block_lgets++;
 	pnfs_get_layout_hdr(lo);
 	clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
 	return true;
@@ -817,25 +816,12 @@
 	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
 }
 
-static bool
-pnfs_layout_returning(const struct pnfs_layout_hdr *lo,
-		      struct pnfs_layout_range *range)
-{
-	return test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
-		(lo->plh_return_iomode == IOMODE_ANY ||
-		 lo->plh_return_iomode == range->iomode);
-}
-
 /* lget is set to 1 if called from inside send_layoutget call chain */
 static bool
-pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo,
-			struct pnfs_layout_range *range, int lget)
+pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
 {
 	return lo->plh_block_lgets ||
-		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
-		(list_empty(&lo->plh_segs) &&
-		 (atomic_read(&lo->plh_outstanding) > lget)) ||
-		pnfs_layout_returning(lo, range);
+		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
 }
 
 int
@@ -847,7 +833,7 @@
 
 	dprintk("--> %s\n", __func__);
 	spin_lock(&lo->plh_inode->i_lock);
-	if (pnfs_layoutgets_blocked(lo, range, 1)) {
+	if (pnfs_layoutgets_blocked(lo)) {
 		status = -EAGAIN;
 	} else if (!nfs4_valid_open_stateid(open_state)) {
 		status = -EBADF;
@@ -882,6 +868,7 @@
 	struct nfs_server *server = NFS_SERVER(ino);
 	struct nfs4_layoutget *lgp;
 	struct pnfs_layout_segment *lseg;
+	loff_t i_size;
 
 	dprintk("--> %s\n", __func__);
 
@@ -889,9 +876,17 @@
 	if (lgp == NULL)
 		return NULL;
 
+	i_size = i_size_read(ino);
+
 	lgp->args.minlength = PAGE_CACHE_SIZE;
 	if (lgp->args.minlength > range->length)
 		lgp->args.minlength = range->length;
+	if (range->iomode == IOMODE_READ) {
+		if (range->offset >= i_size)
+			lgp->args.minlength = 0;
+		else if (i_size - range->offset < lgp->args.minlength)
+			lgp->args.minlength = i_size - range->offset;
+	}
 	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
 	lgp->args.range = *range;
 	lgp->args.type = server->pnfs_curr_ld->id;
@@ -956,9 +951,7 @@
 	if (unlikely(lrp == NULL)) {
 		status = -ENOMEM;
 		spin_lock(&ino->i_lock);
-		lo->plh_block_lgets--;
 		pnfs_clear_layoutreturn_waitbit(lo);
-		rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
 		spin_unlock(&ino->i_lock);
 		pnfs_put_layout_hdr(lo);
 		goto out;
@@ -1080,15 +1073,14 @@
 	struct pnfs_layout_segment *lseg, *tmp;
 	nfs4_stateid stateid;
 	LIST_HEAD(tmp_list);
-	bool found = false, layoutreturn = false;
+	bool found = false, layoutreturn = false, roc = false;
 
 	spin_lock(&ino->i_lock);
 	lo = nfsi->layout;
-	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
-	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
+	if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
 		goto out_noroc;
 
-	/* Don't return layout if we hold a delegation */
+	/* no roc if we hold a delegation */
 	if (nfs4_check_delegation(ino, FMODE_READ))
 		goto out_noroc;
 
@@ -1099,34 +1091,41 @@
 			goto out_noroc;
 	}
 
+	stateid = lo->plh_stateid;
+	/* always send layoutreturn if being marked so */
+	if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+				   &lo->plh_flags))
+		layoutreturn = pnfs_prepare_layoutreturn(lo);
+
 	pnfs_clear_retry_layoutget(lo);
 	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
-		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+		/* If we are sending layoutreturn, invalidate all valid lsegs */
+		if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
 			mark_lseg_invalid(lseg, &tmp_list);
 			found = true;
 		}
-	if (!found)
-		goto out_noroc;
-	lo->plh_block_lgets++;
-	pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
+	/* pnfs_prepare_layoutreturn() grabs lo ref and it will be put
+	 * in pnfs_roc_release(). We don't really send a layoutreturn but
+	 * still want others to view us like we are sending one!
+	 *
+	 * If pnfs_prepare_layoutreturn() fails, it means someone else is doing
+	 * LAYOUTRETURN, so we proceed like there are no layouts to return.
+	 *
+	 * ROC in three conditions:
+	 * 1. there are ROC lsegs
+	 * 2. we don't send layoutreturn
+	 * 3. no others are sending layoutreturn
+	 */
+	if (found && !layoutreturn && pnfs_prepare_layoutreturn(lo))
+		roc = true;
+
+out_noroc:
 	spin_unlock(&ino->i_lock);
 	pnfs_free_lseg_list(&tmp_list);
 	pnfs_layoutcommit_inode(ino, true);
-	return true;
-
-out_noroc:
-	if (lo) {
-		stateid = lo->plh_stateid;
-		if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
-					   &lo->plh_flags))
-			layoutreturn = pnfs_prepare_layoutreturn(lo);
-	}
-	spin_unlock(&ino->i_lock);
-	if (layoutreturn) {
-		pnfs_layoutcommit_inode(ino, true);
+	if (layoutreturn)
 		pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
-	}
-	return false;
+	return roc;
 }
 
 void pnfs_roc_release(struct inode *ino)
@@ -1135,7 +1134,7 @@
 
 	spin_lock(&ino->i_lock);
 	lo = NFS_I(ino)->layout;
-	lo->plh_block_lgets--;
+	pnfs_clear_layoutreturn_waitbit(lo);
 	if (atomic_dec_and_test(&lo->plh_refcount)) {
 		pnfs_detach_layout_hdr(lo);
 		spin_unlock(&ino->i_lock);
@@ -1153,27 +1152,16 @@
 	if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
 		lo->plh_barrier = barrier;
 	spin_unlock(&ino->i_lock);
+	trace_nfs4_layoutreturn_on_close(ino, 0);
 }
 
-bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
+void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
 {
 	struct nfs_inode *nfsi = NFS_I(ino);
 	struct pnfs_layout_hdr *lo;
-	struct pnfs_layout_segment *lseg;
-	nfs4_stateid stateid;
 	u32 current_seqid;
-	bool layoutreturn = false;
 
 	spin_lock(&ino->i_lock);
-	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
-		if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
-			continue;
-		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
-			continue;
-		rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
-		spin_unlock(&ino->i_lock);
-		return true;
-	}
 	lo = nfsi->layout;
 	current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
 
@@ -1181,19 +1169,7 @@
 	 * a barrier, we choose the worst-case barrier.
 	 */
 	*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
-	stateid = lo->plh_stateid;
-	if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
-					   &lo->plh_flags))
-		layoutreturn = pnfs_prepare_layoutreturn(lo);
-	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
-		rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
-
 	spin_unlock(&ino->i_lock);
-	if (layoutreturn) {
-		pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
-		return true;
-	}
-	return false;
 }
 
 /*
@@ -1221,16 +1197,41 @@
 	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
 }
 
-static void
-pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
-		   struct pnfs_layout_segment *lseg)
+static bool
+pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
+		const struct pnfs_layout_range *l2)
 {
-	struct pnfs_layout_segment *lp;
+	return pnfs_lseg_range_cmp(l1, l2) > 0;
+}
+
+static bool
+pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
+		struct pnfs_layout_segment *old)
+{
+	return false;
+}
+
+void
+pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
+		   struct pnfs_layout_segment *lseg,
+		   bool (*is_after)(const struct pnfs_layout_range *,
+			   const struct pnfs_layout_range *),
+		   bool (*do_merge)(struct pnfs_layout_segment *,
+			   struct pnfs_layout_segment *),
+		   struct list_head *free_me)
+{
+	struct pnfs_layout_segment *lp, *tmp;
 
 	dprintk("%s:Begin\n", __func__);
 
-	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
-		if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
+	list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
+		if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
+			continue;
+		if (do_merge(lseg, lp)) {
+			mark_lseg_invalid(lp, free_me);
+			continue;
+		}
+		if (is_after(&lseg->pls_range, &lp->pls_range))
 			continue;
 		list_add_tail(&lseg->pls_list, &lp->pls_list);
 		dprintk("%s: inserted lseg %p "
@@ -1252,6 +1253,24 @@
 
 	dprintk("%s:Return\n", __func__);
 }
+EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
+
+static void
+pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
+		   struct pnfs_layout_segment *lseg,
+		   struct list_head *free_me)
+{
+	struct inode *inode = lo->plh_inode;
+	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+	if (ld->add_lseg != NULL)
+		ld->add_lseg(lo, lseg, free_me);
+	else
+		pnfs_generic_layout_insert_lseg(lo, lseg,
+				pnfs_lseg_range_is_after,
+				pnfs_lseg_no_merge,
+				free_me);
+}
 
 static struct pnfs_layout_hdr *
 alloc_init_layout_hdr(struct inode *ino,
@@ -1344,8 +1363,6 @@
 			ret = pnfs_get_lseg(lseg);
 			break;
 		}
-		if (lseg->pls_range.offset > range->offset)
-			break;
 	}
 
 	dprintk("%s:Return lseg %p ref %d\n",
@@ -1438,6 +1455,8 @@
 
 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
 {
+	if (!pnfs_should_retry_layoutget(lo))
+		return false;
 	/*
 	 * send layoutcommit as it can hold up layoutreturn due to lseg
 	 * reference
@@ -1484,6 +1503,9 @@
 	if (!pnfs_enabled_sb(NFS_SERVER(ino)))
 		goto out;
 
+	if (iomode == IOMODE_READ && i_size_read(ino) == 0)
+		goto out;
+
 	if (pnfs_within_mdsthreshold(ctx, ino, iomode))
 		goto out;
 
@@ -1533,8 +1555,7 @@
 	 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
 	 * for LAYOUTRETURN even if first is true.
 	 */
-	if (!lseg && pnfs_should_retry_layoutget(lo) &&
-	    test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
+	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
 		spin_unlock(&ino->i_lock);
 		dprintk("%s wait for layoutreturn\n", __func__);
 		if (pnfs_prepare_to_retry_layoutget(lo)) {
@@ -1547,7 +1568,7 @@
 		goto out_put_layout_hdr;
 	}
 
-	if (pnfs_layoutgets_blocked(lo, &arg, 0))
+	if (pnfs_layoutgets_blocked(lo))
 		goto out_unlock;
 	atomic_inc(&lo->plh_outstanding);
 	spin_unlock(&ino->i_lock);
@@ -1593,6 +1614,26 @@
 }
 EXPORT_SYMBOL_GPL(pnfs_update_layout);
 
+static bool
+pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
+{
+	switch (range->iomode) {
+	case IOMODE_READ:
+	case IOMODE_RW:
+		break;
+	default:
+		return false;
+	}
+	if (range->offset == NFS4_MAX_UINT64)
+		return false;
+	if (range->length == 0)
+		return false;
+	if (range->length != NFS4_MAX_UINT64 &&
+	    range->length > NFS4_MAX_UINT64 - range->offset)
+		return false;
+	return true;
+}
+
 struct pnfs_layout_segment *
 pnfs_layout_process(struct nfs4_layoutget *lgp)
 {
@@ -1601,7 +1642,10 @@
 	struct pnfs_layout_segment *lseg;
 	struct inode *ino = lo->plh_inode;
 	LIST_HEAD(free_me);
-	int status = 0;
+	int status = -EINVAL;
+
+	if (!pnfs_sanity_check_layout_range(&res->range))
+		goto out;
 
 	/* Inject layout blob into I/O device driver */
 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
@@ -1619,12 +1663,7 @@
 	lseg->pls_range = res->range;
 
 	spin_lock(&ino->i_lock);
-	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
-		dprintk("%s forget reply due to recall\n", __func__);
-		goto out_forget_reply;
-	}
-
-	if (pnfs_layoutgets_blocked(lo, &lgp->args.range, 1)) {
+	if (pnfs_layoutgets_blocked(lo)) {
 		dprintk("%s forget reply due to state\n", __func__);
 		goto out_forget_reply;
 	}
@@ -1651,12 +1690,10 @@
 	clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
 
 	pnfs_get_lseg(lseg);
-	pnfs_layout_insert_lseg(lo, lseg);
+	pnfs_layout_insert_lseg(lo, lseg, &free_me);
 
-	if (res->return_on_close) {
+	if (res->return_on_close)
 		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
-		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
-	}
 
 	spin_unlock(&ino->i_lock);
 	pnfs_free_lseg_list(&free_me);
@@ -1692,6 +1729,8 @@
 				lseg->pls_range.length);
 			set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
 			mark_lseg_invalid(lseg, tmp_list);
+			set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+					&lo->plh_flags);
 		}
 }
 
@@ -2267,7 +2306,7 @@
 
 #if IS_ENABLED(CONFIG_NFS_V4_2)
 int
-pnfs_report_layoutstat(struct inode *inode)
+pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
 {
 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
 	struct nfs_server *server = NFS_SERVER(inode);
@@ -2294,7 +2333,7 @@
 	pnfs_get_layout_hdr(hdr);
 	spin_unlock(&inode->i_lock);
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	data = kzalloc(sizeof(*data), gfp_flags);
 	if (!data) {
 		status = -ENOMEM;
 		goto out_put;
@@ -2324,3 +2363,7 @@
 }
 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
 #endif
+
+unsigned int layoutstats_timer;
+module_param(layoutstats_timer, uint, 0644);
+EXPORT_SYMBOL_GPL(layoutstats_timer);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 3e6ab7b..78c9351 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -94,7 +94,6 @@
 	NFS_LAYOUT_RO_FAILED = 0,	/* get ro layout failed stop trying */
 	NFS_LAYOUT_RW_FAILED,		/* get rw layout failed stop trying */
 	NFS_LAYOUT_BULK_RECALL,		/* bulk recall affecting layout */
-	NFS_LAYOUT_ROC,			/* some lseg had roc bit set */
 	NFS_LAYOUT_RETURN,		/* Return this layout ASAP */
 	NFS_LAYOUT_RETURN_BEFORE_CLOSE,	/* Return this layout before close */
 	NFS_LAYOUT_INVALID_STID,	/* layout stateid id is invalid */
@@ -129,6 +128,9 @@
 
 	struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
 	void (*free_lseg) (struct pnfs_layout_segment *lseg);
+	void (*add_lseg) (struct pnfs_layout_hdr *layoutid,
+			struct pnfs_layout_segment *lseg,
+			struct list_head *free_me);
 
 	void (*return_range) (struct pnfs_layout_hdr *lo,
 			      struct pnfs_layout_range *range);
@@ -184,15 +186,15 @@
 
 struct pnfs_layout_hdr {
 	atomic_t		plh_refcount;
+	atomic_t		plh_outstanding; /* number of RPCs out */
 	struct list_head	plh_layouts;   /* other client layouts */
 	struct list_head	plh_bulk_destroy;
 	struct list_head	plh_segs;      /* layout segments list */
-	nfs4_stateid		plh_stateid;
-	atomic_t		plh_outstanding; /* number of RPCs out */
 	unsigned long		plh_block_lgets; /* block LAYOUTGET if >0 */
-	u32			plh_barrier; /* ignore lower seqids */
 	unsigned long		plh_retry_timestamp;
 	unsigned long		plh_flags;
+	nfs4_stateid		plh_stateid;
+	u32			plh_barrier; /* ignore lower seqids */
 	enum pnfs_iomode	plh_return_iomode;
 	loff_t			plh_lwb; /* last write byte for layoutcommit */
 	struct rpc_cred		*plh_lc_cred; /* layoutcommit cred */
@@ -267,7 +269,7 @@
 bool pnfs_roc(struct inode *ino);
 void pnfs_roc_release(struct inode *ino);
 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
-bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task);
+void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier);
 void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t);
 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
 int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
@@ -286,6 +288,14 @@
 					       gfp_t gfp_flags);
 void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo);
 
+void pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
+		   struct pnfs_layout_segment *lseg,
+		   bool (*is_after)(const struct pnfs_layout_range *lseg_range,
+			   const struct pnfs_layout_range *old),
+		   bool (*do_merge)(struct pnfs_layout_segment *lseg,
+			   struct pnfs_layout_segment *old),
+		   struct list_head *free_me);
+
 void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *);
 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *);
@@ -529,12 +539,31 @@
 					nfss->pnfs_curr_ld->id == src->l_type);
 }
 
+static inline u64
+pnfs_calc_offset_end(u64 offset, u64 len)
+{
+	if (len == NFS4_MAX_UINT64 || len >= NFS4_MAX_UINT64 - offset)
+		return NFS4_MAX_UINT64;
+	return offset + len - 1;
+}
+
+static inline u64
+pnfs_calc_offset_length(u64 offset, u64 end)
+{
+	if (end == NFS4_MAX_UINT64 || end <= offset)
+		return NFS4_MAX_UINT64;
+	return 1 + end - offset;
+}
+
+extern unsigned int layoutstats_timer;
+
 #ifdef NFS_DEBUG
 void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
 #else
 static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id)
 {
 }
+
 #endif /* NFS_DEBUG */
 #else  /* CONFIG_NFS_V4_1 */
 
@@ -605,10 +634,9 @@
 {
 }
 
-static inline bool
-pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
+static inline void
+pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
 {
-	return false;
 }
 
 static inline void set_pnfs_layoutdriver(struct nfs_server *s,
@@ -691,10 +719,10 @@
 #endif /* CONFIG_NFS_V4_1 */
 
 #if IS_ENABLED(CONFIG_NFS_V4_2)
-int pnfs_report_layoutstat(struct inode *inode);
+int pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags);
 #else
 static inline int
-pnfs_report_layoutstat(struct inode *inode)
+pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
 {
 	return 0;
 }
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index f37e25b..24655b8 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -124,11 +124,12 @@
 	if (ret) {
 		cinfo->ds->nwritten -= ret;
 		cinfo->ds->ncommitting += ret;
-		bucket->clseg = bucket->wlseg;
-		if (list_empty(src))
+		if (bucket->clseg == NULL)
+			bucket->clseg = pnfs_get_lseg(bucket->wlseg);
+		if (list_empty(src)) {
+			pnfs_put_lseg_locked(bucket->wlseg);
 			bucket->wlseg = NULL;
-		else
-			pnfs_get_lseg(bucket->clseg);
+		}
 	}
 	return ret;
 }
@@ -182,19 +183,23 @@
 	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
 	struct pnfs_commit_bucket *bucket;
 	struct pnfs_layout_segment *freeme;
+	LIST_HEAD(pages);
 	int i;
 
+	spin_lock(cinfo->lock);
 	for (i = idx; i < fl_cinfo->nbuckets; i++) {
 		bucket = &fl_cinfo->buckets[i];
 		if (list_empty(&bucket->committing))
 			continue;
-		nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i);
-		spin_lock(cinfo->lock);
 		freeme = bucket->clseg;
 		bucket->clseg = NULL;
+		list_splice_init(&bucket->committing, &pages);
 		spin_unlock(cinfo->lock);
+		nfs_retry_commit(&pages, freeme, cinfo, i);
 		pnfs_put_lseg(freeme);
+		spin_lock(cinfo->lock);
 	}
+	spin_unlock(cinfo->lock);
 }
 
 static unsigned int
@@ -216,10 +221,6 @@
 		if (!data)
 			break;
 		data->ds_commit_index = i;
-		spin_lock(cinfo->lock);
-		data->lseg = bucket->clseg;
-		bucket->clseg = NULL;
-		spin_unlock(cinfo->lock);
 		list_add(&data->pages, list);
 		nreq++;
 	}
@@ -229,6 +230,22 @@
 	return nreq;
 }
 
+static inline
+void pnfs_fetch_commit_bucket_list(struct list_head *pages,
+		struct nfs_commit_data *data,
+		struct nfs_commit_info *cinfo)
+{
+	struct pnfs_commit_bucket *bucket;
+
+	bucket = &cinfo->ds->buckets[data->ds_commit_index];
+	spin_lock(cinfo->lock);
+	list_splice_init(&bucket->committing, pages);
+	data->lseg = bucket->clseg;
+	bucket->clseg = NULL;
+	spin_unlock(cinfo->lock);
+
+}
+
 /* This follows nfs_commit_list pretty closely */
 int
 pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
@@ -243,7 +260,7 @@
 	if (!list_empty(mds_pages)) {
 		data = nfs_commitdata_alloc();
 		if (data != NULL) {
-			data->lseg = NULL;
+			data->ds_commit_index = -1;
 			list_add(&data->pages, &list);
 			nreq++;
 		} else {
@@ -265,19 +282,16 @@
 
 	list_for_each_entry_safe(data, tmp, &list, pages) {
 		list_del_init(&data->pages);
-		if (!data->lseg) {
+		if (data->ds_commit_index < 0) {
 			nfs_init_commit(data, mds_pages, NULL, cinfo);
 			nfs_initiate_commit(NFS_CLIENT(inode), data,
 					    NFS_PROTO(data->inode),
 					    data->mds_ops, how, 0);
 		} else {
-			struct pnfs_commit_bucket *buckets;
+			LIST_HEAD(pages);
 
-			buckets = cinfo->ds->buckets;
-			nfs_init_commit(data,
-					&buckets[data->ds_commit_index].committing,
-					data->lseg,
-					cinfo);
+			pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
+			nfs_init_commit(data, &pages, data->lseg, cinfo);
 			initiate_commit(data, how);
 		}
 	}
@@ -359,26 +373,31 @@
 	return false;
 }
 
+/*
+ * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
+ * declare a match.
+ */
 static bool
 _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
 			       const struct list_head *dsaddrs2)
 {
 	struct nfs4_pnfs_ds_addr *da1, *da2;
+	struct sockaddr *sa1, *sa2;
+	bool match = false;
 
-	/* step through both lists, comparing as we go */
-	for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
-	     da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
-	     da1 != NULL && da2 != NULL;
-	     da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
-	     da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
-		if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
-				   (struct sockaddr *)&da2->da_addr))
-			return false;
+	list_for_each_entry(da1, dsaddrs1, da_node) {
+		sa1 = (struct sockaddr *)&da1->da_addr;
+		match = false;
+		list_for_each_entry(da2, dsaddrs2, da_node) {
+			sa2 = (struct sockaddr *)&da2->da_addr;
+			match = same_sockaddr(sa1, sa2);
+			if (match)
+				break;
+		}
+		if (!match)
+			break;
 	}
-	if (da1 == NULL && da2 == NULL)
-		return true;
-
-	return false;
+	return match;
 }
 
 /*
@@ -863,9 +882,10 @@
 	}
 	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
 	cinfo->ds->nwritten++;
-	spin_unlock(cinfo->lock);
 
-	nfs_request_add_commit_list(req, list, cinfo);
+	nfs_request_add_commit_list_locked(req, list, cinfo);
+	spin_unlock(cinfo->lock);
+	nfs_mark_page_unstable(req->wb_page, cinfo);
 }
 EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
 
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index aa62004..383a027 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -381,9 +381,12 @@
 	ret = nfs_register_sysctl();
 	if (ret < 0)
 		goto error_2;
-	register_shrinker(&acl_shrinker);
+	ret = register_shrinker(&acl_shrinker);
+	if (ret < 0)
+		goto error_3;
 	return 0;
-
+error_3:
+	nfs_unregister_sysctl();
 error_2:
 	unregister_nfs4_fs();
 error_1:
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 75a35a1..388f480 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -768,6 +768,28 @@
 }
 
 /**
+ * nfs_request_add_commit_list_locked - add request to a commit list
+ * @req: pointer to a struct nfs_page
+ * @dst: commit list head
+ * @cinfo: holds list lock and accounting info
+ *
+ * This sets the PG_CLEAN bit, updates the cinfo count of
+ * number of outstanding requests requiring a commit as well as
+ * the MM page stats.
+ *
+ * The caller must hold the cinfo->lock, and the nfs_page lock.
+ */
+void
+nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
+			    struct nfs_commit_info *cinfo)
+{
+	set_bit(PG_CLEAN, &req->wb_flags);
+	nfs_list_add_request(req, dst);
+	cinfo->mds->ncommit++;
+}
+EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
+
+/**
  * nfs_request_add_commit_list - add request to a commit list
  * @req: pointer to a struct nfs_page
  * @dst: commit list head
@@ -784,13 +806,10 @@
 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
 			    struct nfs_commit_info *cinfo)
 {
-	set_bit(PG_CLEAN, &(req)->wb_flags);
 	spin_lock(cinfo->lock);
-	nfs_list_add_request(req, dst);
-	cinfo->mds->ncommit++;
+	nfs_request_add_commit_list_locked(req, dst, cinfo);
 	spin_unlock(cinfo->lock);
-	if (!cinfo->dreq)
-		nfs_mark_page_unstable(req->wb_page);
+	nfs_mark_page_unstable(req->wb_page, cinfo);
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
@@ -1793,7 +1812,7 @@
 	return res;
 }
 
-static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
+int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
 	int flags = FLUSH_SYNC;
@@ -1828,11 +1847,6 @@
 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 	return ret;
 }
-
-int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
-	return nfs_commit_unstable_pages(inode, wbc);
-}
 EXPORT_SYMBOL_GPL(nfs_write_inode);
 
 /*
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index ae6e58e..fd8c9a5 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -63,14 +63,33 @@
  * lock reclaims.
  */
 int
-locks_in_grace(struct net *net)
+__state_in_grace(struct net *net, bool open)
 {
 	struct list_head *grace_list = net_generic(net, grace_net_id);
+	struct lock_manager *lm;
 
-	return !list_empty(grace_list);
+	if (!open)
+		return !list_empty(grace_list);
+
+	list_for_each_entry(lm, grace_list, list) {
+		if (lm->block_opens)
+			return true;
+	}
+	return false;
+}
+
+int locks_in_grace(struct net *net)
+{
+	return __state_in_grace(net, 0);
 }
 EXPORT_SYMBOL_GPL(locks_in_grace);
 
+int opens_in_grace(struct net *net)
+{
+	return __state_in_grace(net, 1);
+}
+EXPORT_SYMBOL_GPL(opens_in_grace);
+
 static int __net_init
 grace_init_net(struct net *net)
 {
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 9aa2796..6d834dc 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -101,7 +101,7 @@
 	}
 
 	nr_iomaps = be32_to_cpup(p++);
-	expected = sizeof(__be32) + nr_iomaps * NFS4_BLOCK_EXTENT_SIZE;
+	expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE;
 	if (len != expected) {
 		dprintk("%s: extent array size mismatch: %u/%u\n",
 			__func__, len, expected);
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index fdc7903..6de925f 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -7,13 +7,6 @@
 struct iomap;
 struct xdr_stream;
 
-enum pnfs_block_extent_state {
-	PNFS_BLOCK_READWRITE_DATA	= 0,
-	PNFS_BLOCK_READ_DATA		= 1,
-	PNFS_BLOCK_INVALID_DATA		= 2,
-	PNFS_BLOCK_NONE_DATA		= 3,
-};
-
 struct pnfs_block_extent {
 	struct nfsd4_deviceid		vol_id;
 	u64				foff;
@@ -21,14 +14,6 @@
 	u64				soff;
 	enum pnfs_block_extent_state	es;
 };
-#define NFS4_BLOCK_EXTENT_SIZE		44
-
-enum pnfs_block_volume_type {
-	PNFS_BLOCK_VOLUME_SIMPLE	= 0,
-	PNFS_BLOCK_VOLUME_SLICE		= 1,
-	PNFS_BLOCK_VOLUME_CONCAT	= 2,
-	PNFS_BLOCK_VOLUME_STRIPE	= 3,
-};
 
 /*
  * Random upper cap for the uuid length to avoid unbounded allocation.
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index f79521a..b4d84b5 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1075,73 +1075,6 @@
 	return rv;
 }
 
-/* Iterator */
-
-static void *e_start(struct seq_file *m, loff_t *pos)
-	__acquires(((struct cache_detail *)m->private)->hash_lock)
-{
-	loff_t n = *pos;
-	unsigned hash, export;
-	struct cache_head *ch;
-	struct cache_detail *cd = m->private;
-	struct cache_head **export_table = cd->hash_table;
-
-	read_lock(&cd->hash_lock);
-	if (!n--)
-		return SEQ_START_TOKEN;
-	hash = n >> 32;
-	export = n & ((1LL<<32) - 1);
-
-	
-	for (ch=export_table[hash]; ch; ch=ch->next)
-		if (!export--)
-			return ch;
-	n &= ~((1LL<<32) - 1);
-	do {
-		hash++;
-		n += 1LL<<32;
-	} while(hash < EXPORT_HASHMAX && export_table[hash]==NULL);
-	if (hash >= EXPORT_HASHMAX)
-		return NULL;
-	*pos = n+1;
-	return export_table[hash];
-}
-
-static void *e_next(struct seq_file *m, void *p, loff_t *pos)
-{
-	struct cache_head *ch = p;
-	int hash = (*pos >> 32);
-	struct cache_detail *cd = m->private;
-	struct cache_head **export_table = cd->hash_table;
-
-	if (p == SEQ_START_TOKEN)
-		hash = 0;
-	else if (ch->next == NULL) {
-		hash++;
-		*pos += 1LL<<32;
-	} else {
-		++*pos;
-		return ch->next;
-	}
-	*pos &= ~((1LL<<32) - 1);
-	while (hash < EXPORT_HASHMAX && export_table[hash] == NULL) {
-		hash++;
-		*pos += 1LL<<32;
-	}
-	if (hash >= EXPORT_HASHMAX)
-		return NULL;
-	++*pos;
-	return export_table[hash];
-}
-
-static void e_stop(struct seq_file *m, void *p)
-	__releases(((struct cache_detail *)m->private)->hash_lock)
-{
-	struct cache_detail *cd = m->private;
-
-	read_unlock(&cd->hash_lock);
-}
-
 static struct flags {
 	int flag;
 	char *name[2];
@@ -1270,9 +1203,9 @@
 }
 
 const struct seq_operations nfs_exports_op = {
-	.start	= e_start,
-	.next	= e_next,
-	.stop	= e_stop,
+	.start	= cache_seq_start,
+	.next	= cache_seq_next,
+	.stop	= cache_seq_stop,
 	.show	= e_show,
 };
 
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 1f52bfc..2e31507 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -6,6 +6,7 @@
 
 #include <linux/sunrpc/cache.h>
 #include <uapi/linux/nfsd/export.h>
+#include <linux/nfs4.h>
 
 struct knfsd_fh;
 struct svc_fh;
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
index a3f3490..23cc85d 100644
--- a/fs/nfsd/idmap.h
+++ b/fs/nfsd/idmap.h
@@ -37,9 +37,7 @@
 
 #include <linux/in.h>
 #include <linux/sunrpc/svc.h>
-
-/* XXX from linux/nfs_idmap.h */
-#define IDMAP_NAMESZ 128
+#include <linux/nfs_idmap.h>
 
 #ifdef CONFIG_NFSD_V4
 int nfsd_idmap_init(struct net *);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index ea6749a..d8b16c2 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -110,6 +110,7 @@
 	unsigned int max_connections;
 
 	u32 clientid_counter;
+	u32 clverifier_counter;
 
 	struct svc_serv *nfsd_serv;
 };
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index d54701f..1580ea6 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -44,13 +44,13 @@
 
 	inode = d_inode(fh->fh_dentry);
 
-	if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+	if (argp->mask & ~NFS_ACL_MASK)
 		RETURN_STATUS(nfserr_inval);
 	resp->mask = argp->mask;
 
 	nfserr = fh_getattr(fh, &resp->stat);
 	if (nfserr)
-		goto fail;
+		RETURN_STATUS(nfserr);
 
 	if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
 		acl = get_acl(inode, ACL_TYPE_ACCESS);
@@ -202,7 +202,7 @@
 	if (!p)
 		return 0;
 	argp->mask = ntohl(*p++);
-	if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT) ||
+	if (argp->mask & ~NFS_ACL_MASK ||
 	    !xdr_argsize_check(rqstp, p))
 		return 0;
 
@@ -293,9 +293,7 @@
 				  resp->acl_default,
 				  resp->mask & NFS_DFACL,
 				  NFS_ACL_DEFAULT);
-	if (n <= 0)
-		return 0;
-	return 1;
+	return (n > 0);
 }
 
 static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 882b1a1..01df4cd 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -41,7 +41,7 @@
 
 	inode = d_inode(fh->fh_dentry);
 
-	if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+	if (argp->mask & ~NFS_ACL_MASK)
 		RETURN_STATUS(nfserr_inval);
 	resp->mask = argp->mask;
 
@@ -148,7 +148,7 @@
 	if (!p)
 		return 0;
 	args->mask = ntohl(*p++);
-	if (args->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT) ||
+	if (args->mask & ~NFS_ACL_MASK ||
 	    !xdr_argsize_check(rqstp, p))
 		return 0;
 
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index eb5accf..6adabd6 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -34,8 +34,10 @@
  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/fs.h>
 #include <linux/slab.h>
-#include <linux/nfs_fs.h>
+#include <linux/posix_acl.h>
+
 #include "nfsfh.h"
 #include "nfsd.h"
 #include "acl.h"
@@ -100,7 +102,7 @@
 /* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the
  * side of being more restrictive, so the mode bit mapping below is
  * pessimistic.  An optimistic version would be needed to handle DENY's,
- * but we espect to coalesce all ALLOWs and DENYs before mapping to mode
+ * but we expect to coalesce all ALLOWs and DENYs before mapping to mode
  * bits. */
 
 static void
@@ -458,7 +460,7 @@
 	state->empty = 1;
 	/*
 	 * In the worst case, each individual acl could be for a distinct
-	 * named user or group, but we don't no which, so we allocate
+	 * named user or group, but we don't know which, so we allocate
 	 * enough space for either:
 	 */
 	alloc = sizeof(struct posix_ace_state_array)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index a492018..e7f50c4 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -435,12 +435,12 @@
 	 */
 	status = 0;
 out:
-	if (status)
-		nfsd4_mark_cb_fault(cb->cb_clp, status);
+	cb->cb_seq_status = status;
 	return status;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return -EIO;
+	status = -EIO;
+	goto out;
 }
 
 static int decode_cb_sequence4res(struct xdr_stream *xdr,
@@ -451,11 +451,10 @@
 	if (cb->cb_minorversion == 0)
 		return 0;
 
-	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status);
-	if (unlikely(status || cb->cb_status))
+	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
+	if (unlikely(status || cb->cb_seq_status))
 		return status;
 
-	cb->cb_update_seq_nr = true;
 	return decode_cb_sequence4resok(xdr, cb);
 }
 
@@ -527,7 +526,7 @@
 
 	if (cb != NULL) {
 		status = decode_cb_sequence4res(xdr, cb);
-		if (unlikely(status || cb->cb_status))
+		if (unlikely(status || cb->cb_seq_status))
 			return status;
 	}
 
@@ -617,7 +616,7 @@
 
 	if (cb) {
 		status = decode_cb_sequence4res(xdr, cb);
-		if (unlikely(status || cb->cb_status))
+		if (unlikely(status || cb->cb_seq_status))
 			return status;
 	}
 	return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
@@ -876,7 +875,11 @@
 	u32 minorversion = clp->cl_minorversion;
 
 	cb->cb_minorversion = minorversion;
-	cb->cb_update_seq_nr = false;
+	/*
+	 * cb_seq_status is only set in decode_cb_sequence4res,
+	 * and so will remain 1 if an rpc level failure occurs.
+	 */
+	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
 	if (minorversion) {
 		if (!nfsd41_cb_get_slot(clp, task))
@@ -885,6 +888,84 @@
 	rpc_call_start(task);
 }
 
+static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
+{
+	struct nfs4_client *clp = cb->cb_clp;
+	struct nfsd4_session *session = clp->cl_cb_session;
+	bool ret = true;
+
+	if (!clp->cl_minorversion) {
+		/*
+		 * If the backchannel connection was shut down while this
+		 * task was queued, we need to resubmit it after setting up
+		 * a new backchannel connection.
+		 *
+		 * Note that if we lost our callback connection permanently
+		 * the submission code will error out, so we don't need to
+		 * handle that case here.
+		 */
+		if (task->tk_flags & RPC_TASK_KILLED)
+			goto need_restart;
+
+		return true;
+	}
+
+	switch (cb->cb_seq_status) {
+	case 0:
+		/*
+		 * No need for lock, access serialized in nfsd4_cb_prepare
+		 *
+		 * RFC5661 20.9.3
+		 * If CB_SEQUENCE returns an error, then the state of the slot
+		 * (sequence ID, cached reply) MUST NOT change.
+		 */
+		++session->se_cb_seq_nr;
+		break;
+	case -ESERVERFAULT:
+		++session->se_cb_seq_nr;
+	case 1:
+	case -NFS4ERR_BADSESSION:
+		nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
+		ret = false;
+		break;
+	case -NFS4ERR_DELAY:
+		if (!rpc_restart_call(task))
+			goto out;
+
+		rpc_delay(task, 2 * HZ);
+		return false;
+	case -NFS4ERR_BADSLOT:
+		goto retry_nowait;
+	case -NFS4ERR_SEQ_MISORDERED:
+		if (session->se_cb_seq_nr != 1) {
+			session->se_cb_seq_nr = 1;
+			goto retry_nowait;
+		}
+		break;
+	default:
+		dprintk("%s: unprocessed error %d\n", __func__,
+			cb->cb_seq_status);
+	}
+
+	clear_bit(0, &clp->cl_cb_slot_busy);
+	rpc_wake_up_next(&clp->cl_cb_waitq);
+	dprintk("%s: freed slot, new seqid=%d\n", __func__,
+		clp->cl_cb_session->se_cb_seq_nr);
+
+	if (task->tk_flags & RPC_TASK_KILLED)
+		goto need_restart;
+out:
+	return ret;
+retry_nowait:
+	if (rpc_restart_call_prepare(task))
+		ret = false;
+	goto out;
+need_restart:
+	task->tk_status = 0;
+	cb->cb_need_restart = true;
+	return false;
+}
+
 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
 {
 	struct nfsd4_callback *cb = calldata;
@@ -893,37 +974,8 @@
 	dprintk("%s: minorversion=%d\n", __func__,
 		clp->cl_minorversion);
 
-	if (clp->cl_minorversion) {
-		/*
-		 * No need for lock, access serialized in nfsd4_cb_prepare
-		 *
-		 * RFC5661 20.9.3
-		 * If CB_SEQUENCE returns an error, then the state of the slot
-		 * (sequence ID, cached reply) MUST NOT change.
-		 */
-		if (cb->cb_update_seq_nr)
-			++clp->cl_cb_session->se_cb_seq_nr;
-
-		clear_bit(0, &clp->cl_cb_slot_busy);
-		rpc_wake_up_next(&clp->cl_cb_waitq);
-		dprintk("%s: freed slot, new seqid=%d\n", __func__,
-			clp->cl_cb_session->se_cb_seq_nr);
-	}
-
-	/*
-	 * If the backchannel connection was shut down while this
-	 * task was queued, we need to resubmit it after setting up
-	 * a new backchannel connection.
-	 *
-	 * Note that if we lost our callback connection permanently
-	 * the submission code will error out, so we don't need to
-	 * handle that case here.
-	 */
-	if (task->tk_flags & RPC_TASK_KILLED) {
-		task->tk_status = 0;
-		cb->cb_need_restart = true;
+	if (!nfsd4_cb_sequence_done(task, cb))
 		return;
-	}
 
 	if (cb->cb_status) {
 		WARN_ON_ONCE(task->tk_status);
@@ -1099,8 +1151,8 @@
 	cb->cb_msg.rpc_resp = cb;
 	cb->cb_ops = ops;
 	INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
+	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
-	cb->cb_update_seq_nr = false;
 	cb->cb_need_restart = false;
 }
 
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index e1b3d3d..5b20577 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -59,9 +59,6 @@
  * that.
  */
 
-#define IDMAP_TYPE_USER  0
-#define IDMAP_TYPE_GROUP 1
-
 struct ent {
 	struct cache_head h;
 	int               type;		       /* User / Group */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 90cfda7..4ce6b97 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -276,13 +276,13 @@
 			nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
 
 		/*
-		 * Following rfc 3530 14.2.16, use the returned bitmask
-		 * to indicate which attributes we used to store the
-		 * verifier:
+		 * Following rfc 3530 14.2.16, and rfc 5661 18.16.4
+		 * use the returned bitmask to indicate which attributes
+		 * we used to store the verifier:
 		 */
-		if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
-			open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
-							FATTR4_WORD1_TIME_MODIFY);
+		if (nfsd_create_is_exclusive(open->op_createmode) && status == 0)
+			open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
+						FATTR4_WORD1_TIME_MODIFY);
 	} else
 		/*
 		 * Note this may exit with the parent still locked.
@@ -362,7 +362,6 @@
 {
 	__be32 status;
 	struct svc_fh *resfh = NULL;
-	struct nfsd4_compoundres *resp;
 	struct net *net = SVC_NET(rqstp);
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
@@ -389,8 +388,7 @@
 		copy_clientid(&open->op_clientid, cstate->session);
 
 	/* check seqid for replay. set nfs4_owner */
-	resp = rqstp->rq_resp;
-	status = nfsd4_process_open1(&resp->cstate, open, nn);
+	status = nfsd4_process_open1(cstate, open, nn);
 	if (status == nfserr_replay_me) {
 		struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
 		fh_put(&cstate->current_fh);
@@ -417,10 +415,10 @@
 	/* Openowner is now set, so sequence id will get bumped.  Now we need
 	 * these checks before we do any creates: */
 	status = nfserr_grace;
-	if (locks_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
+	if (opens_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
 		goto out;
 	status = nfserr_no_grace;
-	if (!locks_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
+	if (!opens_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
 		goto out;
 
 	switch (open->op_claim_type) {
@@ -829,7 +827,7 @@
 {
 	__be32 status;
 
-	if (locks_in_grace(SVC_NET(rqstp)))
+	if (opens_in_grace(SVC_NET(rqstp)))
 		return nfserr_grace;
 	status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
 			     remove->rm_name, remove->rm_namelen);
@@ -848,7 +846,7 @@
 
 	if (!cstate->save_fh.fh_dentry)
 		return status;
-	if (locks_in_grace(SVC_NET(rqstp)) &&
+	if (opens_in_grace(SVC_NET(rqstp)) &&
 		!(cstate->save_fh.fh_export->ex_flags & NFSEXP_NOSUBTREECHECK))
 		return nfserr_grace;
 	status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
@@ -1364,10 +1362,6 @@
 		goto out;
 	}
 
-	nfserr = ops->proc_layoutcommit(inode, lcp);
-	if (nfserr)
-		goto out_put_stid;
-
 	if (new_size > i_size_read(inode)) {
 		lcp->lc_size_chg = 1;
 		lcp->lc_newsize = new_size;
@@ -1375,7 +1369,7 @@
 		lcp->lc_size_chg = 0;
 	}
 
-out_put_stid:
+	nfserr = ops->proc_layoutcommit(inode, lcp);
 	nfs4_put_stid(&ls->ls_stid);
 out:
 	return nfserr;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index d88ea7b..e3d4709 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -272,6 +272,7 @@
 		.ctx.actor = nfsd4_build_namelist,
 		.names = LIST_HEAD_INIT(ctx.names)
 	};
+	struct name_list *entry, *tmp;
 	int status;
 
 	status = nfs4_save_creds(&original_cred);
@@ -286,9 +287,8 @@
 
 	status = iterate_dir(nn->rec_file, &ctx.ctx);
 	mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
-	while (!list_empty(&ctx.names)) {
-		struct name_list *entry;
-		entry = list_entry(ctx.names.next, struct name_list, list);
+
+	list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
 		if (!status) {
 			struct dentry *dentry;
 			dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
@@ -304,6 +304,12 @@
 	}
 	mutex_unlock(&d_inode(dir)->i_mutex);
 	nfs4_reset_creds(original_cred);
+
+	list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
+		dprintk("NFSD: %s. Left entry %s\n", __func__, entry->name);
+		list_del(&entry->list);
+		kfree(entry);
+	}
 	return status;
 }
 
@@ -541,8 +547,7 @@
 
 	/* XXX: The legacy code won't work in a container */
 	if (net != &init_net) {
-		WARN(1, KERN_ERR "NFSD: attempt to initialize legacy client "
-			"tracking in a container!\n");
+		pr_warn("NFSD: attempt to initialize legacy client tracking in a container ignored.\n");
 		return -EINVAL;
 	}
 
@@ -1254,8 +1259,7 @@
 
 	/* XXX: The usermode helper s not working in container yet. */
 	if (net != &init_net) {
-		WARN(1, KERN_ERR "NFSD: attempt to initialize umh client "
-			"tracking in a container!\n");
+		pr_warn("NFSD: attempt to initialize umh client tracking in a container ignored.\n");
 		return -EINVAL;
 	}
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9520271..0f1d569 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -777,13 +777,16 @@
 	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
 }
 
-static void
+static bool
 unhash_delegation_locked(struct nfs4_delegation *dp)
 {
 	struct nfs4_file *fp = dp->dl_stid.sc_file;
 
 	lockdep_assert_held(&state_lock);
 
+	if (list_empty(&dp->dl_perfile))
+		return false;
+
 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
 	/* Ensure that deleg break won't try to requeue it */
 	++dp->dl_time;
@@ -792,16 +795,21 @@
 	list_del_init(&dp->dl_recall_lru);
 	list_del_init(&dp->dl_perfile);
 	spin_unlock(&fp->fi_lock);
+	return true;
 }
 
 static void destroy_delegation(struct nfs4_delegation *dp)
 {
+	bool unhashed;
+
 	spin_lock(&state_lock);
-	unhash_delegation_locked(dp);
+	unhashed = unhash_delegation_locked(dp);
 	spin_unlock(&state_lock);
-	put_clnt_odstate(dp->dl_clnt_odstate);
-	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
-	nfs4_put_stid(&dp->dl_stid);
+	if (unhashed) {
+		put_clnt_odstate(dp->dl_clnt_odstate);
+		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
+		nfs4_put_stid(&dp->dl_stid);
+	}
 }
 
 static void revoke_delegation(struct nfs4_delegation *dp)
@@ -990,6 +998,12 @@
 	}
 }
 
+static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
+{
+	kfree(sop->so_owner.data);
+	sop->so_ops->so_free(sop);
+}
+
 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
 {
 	struct nfs4_client *clp = sop->so_client;
@@ -1000,20 +1014,23 @@
 		return;
 	sop->so_ops->so_unhash(sop);
 	spin_unlock(&clp->cl_lock);
-	kfree(sop->so_owner.data);
-	sop->so_ops->so_free(sop);
+	nfs4_free_stateowner(sop);
 }
 
-static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
+static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
 {
 	struct nfs4_file *fp = stp->st_stid.sc_file;
 
 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
 
+	if (list_empty(&stp->st_perfile))
+		return false;
+
 	spin_lock(&fp->fi_lock);
-	list_del(&stp->st_perfile);
+	list_del_init(&stp->st_perfile);
 	spin_unlock(&fp->fi_lock);
 	list_del(&stp->st_perstateowner);
+	return true;
 }
 
 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
@@ -1063,25 +1080,27 @@
 	list_add(&stp->st_locks, reaplist);
 }
 
-static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
 {
 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
 
 	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
 
 	list_del_init(&stp->st_locks);
-	unhash_ol_stateid(stp);
 	nfs4_unhash_stid(&stp->st_stid);
+	return unhash_ol_stateid(stp);
 }
 
 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
 {
 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+	bool unhashed;
 
 	spin_lock(&oo->oo_owner.so_client->cl_lock);
-	unhash_lock_stateid(stp);
+	unhashed = unhash_lock_stateid(stp);
 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
-	nfs4_put_stid(&stp->st_stid);
+	if (unhashed)
+		nfs4_put_stid(&stp->st_stid);
 }
 
 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
@@ -1129,7 +1148,7 @@
 	while (!list_empty(&lo->lo_owner.so_stateids)) {
 		stp = list_first_entry(&lo->lo_owner.so_stateids,
 				struct nfs4_ol_stateid, st_perstateowner);
-		unhash_lock_stateid(stp);
+		WARN_ON(!unhash_lock_stateid(stp));
 		put_ol_stateid_locked(stp, &reaplist);
 	}
 	spin_unlock(&clp->cl_lock);
@@ -1142,21 +1161,26 @@
 {
 	struct nfs4_ol_stateid *stp;
 
+	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
+
 	while (!list_empty(&open_stp->st_locks)) {
 		stp = list_entry(open_stp->st_locks.next,
 				struct nfs4_ol_stateid, st_locks);
-		unhash_lock_stateid(stp);
+		WARN_ON(!unhash_lock_stateid(stp));
 		put_ol_stateid_locked(stp, reaplist);
 	}
 }
 
-static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
+static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
 				struct list_head *reaplist)
 {
+	bool unhashed;
+
 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
 
-	unhash_ol_stateid(stp);
+	unhashed = unhash_ol_stateid(stp);
 	release_open_stateid_locks(stp, reaplist);
+	return unhashed;
 }
 
 static void release_open_stateid(struct nfs4_ol_stateid *stp)
@@ -1164,8 +1188,8 @@
 	LIST_HEAD(reaplist);
 
 	spin_lock(&stp->st_stid.sc_client->cl_lock);
-	unhash_open_stateid(stp, &reaplist);
-	put_ol_stateid_locked(stp, &reaplist);
+	if (unhash_open_stateid(stp, &reaplist))
+		put_ol_stateid_locked(stp, &reaplist);
 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
 	free_ol_stateid_reaplist(&reaplist);
 }
@@ -1210,8 +1234,8 @@
 	while (!list_empty(&oo->oo_owner.so_stateids)) {
 		stp = list_first_entry(&oo->oo_owner.so_stateids,
 				struct nfs4_ol_stateid, st_perstateowner);
-		unhash_open_stateid(stp, &reaplist);
-		put_ol_stateid_locked(stp, &reaplist);
+		if (unhash_open_stateid(stp, &reaplist))
+			put_ol_stateid_locked(stp, &reaplist);
 	}
 	spin_unlock(&clp->cl_lock);
 	free_ol_stateid_reaplist(&reaplist);
@@ -1714,7 +1738,7 @@
 	spin_lock(&state_lock);
 	while (!list_empty(&clp->cl_delegations)) {
 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
-		unhash_delegation_locked(dp);
+		WARN_ON(!unhash_delegation_locked(dp));
 		list_add(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&state_lock);
@@ -1894,7 +1918,7 @@
 	 * __force to keep sparse happy
 	 */
 	verf[0] = (__force __be32)get_seconds();
-	verf[1] = (__force __be32)nn->clientid_counter;
+	verf[1] = (__force __be32)nn->clverifier_counter++;
 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
 }
 
@@ -2241,6 +2265,9 @@
 	 * Also note we should probably be using this in 4.0 case too.
 	 */
 	return !list_empty(&clp->cl_openowners)
+#ifdef CONFIG_NFSD_PNFS
+		|| !list_empty(&clp->cl_lo_states)
+#endif
 		|| !list_empty(&clp->cl_delegations)
 		|| !list_empty(&clp->cl_sessions);
 }
@@ -2547,11 +2574,9 @@
 			goto out_free_conn;
 		cs_slot = &conf->cl_cs_slot;
 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
-		if (status == nfserr_replay_cache) {
-			status = nfsd4_replay_create_session(cr_ses, cs_slot);
-			goto out_free_conn;
-		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
-			status = nfserr_seq_misordered;
+		if (status) {
+			if (status == nfserr_replay_cache)
+				status = nfsd4_replay_create_session(cr_ses, cs_slot);
 			goto out_free_conn;
 		}
 	} else if (unconf) {
@@ -3041,10 +3066,11 @@
 	unconf = find_unconfirmed_client_by_name(&clname, nn);
 	if (unconf)
 		unhash_client_locked(unconf);
-	if (conf && same_verf(&conf->cl_verifier, &clverifier))
+	if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
 		/* case 1: probable callback update */
 		copy_clid(new, conf);
-	else /* case 4 (new client) or cases 2, 3 (client reboot): */
+		gen_confirm(new, nn);
+	} else /* case 4 (new client) or cases 2, 3 (client reboot): */
 		gen_clid(new, nn);
 	new->cl_minorversion = 0;
 	gen_callback(new, setclid, rqstp);
@@ -3085,10 +3111,11 @@
 	/*
 	 * We try hard to give out unique clientid's, so if we get an
 	 * attempt to confirm the same clientid with a different cred,
-	 * there's a bug somewhere.  Let's charitably assume it's our
-	 * bug.
+	 * the client may be buggy; this should never happen.
+	 *
+	 * Nevertheless, RFC 7530 recommends INUSE for this case:
 	 */
-	status = nfserr_serverfault;
+	status = nfserr_clid_inuse;
 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
 		goto out;
 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
@@ -3315,7 +3342,8 @@
 		hash_openowner(oo, clp, strhashval);
 		ret = oo;
 	} else
-		nfs4_free_openowner(&oo->oo_owner);
+		nfs4_free_stateowner(&oo->oo_owner);
+
 	spin_unlock(&clp->cl_lock);
 	return ret;
 }
@@ -3482,6 +3510,9 @@
 {
 	struct nfs4_delegation *dp = cb_to_delegation(cb);
 
+	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
+	        return 1;
+
 	switch (task->tk_status) {
 	case 0:
 		return 1;
@@ -3885,12 +3916,6 @@
 	return status;
 }
 
-static void
-nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
-{
-	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
-}
-
 /* Should we give out recallable state?: */
 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
 {
@@ -3923,7 +3948,7 @@
 static int nfs4_setlease(struct nfs4_delegation *dp)
 {
 	struct nfs4_file *fp = dp->dl_stid.sc_file;
-	struct file_lock *fl, *ret;
+	struct file_lock *fl;
 	struct file *filp;
 	int status = 0;
 
@@ -3934,10 +3959,10 @@
 	if (!filp) {
 		/* We should always have a readable file here */
 		WARN_ON_ONCE(1);
+		locks_free_lock(fl);
 		return -EBADF;
 	}
 	fl->fl_file = filp;
-	ret = fl;
 	status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
 	if (fl)
 		locks_free_lock(fl);
@@ -4063,7 +4088,8 @@
 		case NFS4_OPEN_CLAIM_FH:
 			/*
 			 * Let's not give out any delegations till everyone's
-			 * had the chance to reclaim theirs....
+			 * had the chance to reclaim theirs, *and* until
+			 * NLM locks have all been reclaimed:
 			 */
 			if (locks_in_grace(clp->net))
 				goto out_no_deleg;
@@ -4209,7 +4235,7 @@
 	if (fp)
 		put_nfs4_file(fp);
 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
-		nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
+		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
 	/*
 	* To finish the open response, we just need to set the rflags.
 	*/
@@ -4338,14 +4364,12 @@
 	spin_lock(&state_lock);
 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
-		if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
-			continue;
 		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
 			t = dp->dl_time - cutoff;
 			new_timeo = min(new_timeo, t);
 			break;
 		}
-		unhash_delegation_locked(dp);
+		WARN_ON(!unhash_delegation_locked(dp));
 		list_add(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&state_lock);
@@ -4440,7 +4464,7 @@
 {
 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
 		return nfs_ok;
-	else if (locks_in_grace(net)) {
+	else if (opens_in_grace(net)) {
 		/* Answer in remaining cases depends on existence of
 		 * conflicting state; so we must wait out the grace period. */
 		return nfserr_grace;
@@ -4459,7 +4483,7 @@
 static inline int
 grace_disallows_io(struct net *net, struct inode *inode)
 {
-	return locks_in_grace(net) && mandatory_lock(inode);
+	return opens_in_grace(net) && mandatory_lock(inode);
 }
 
 /* Returns true iff a is later than b: */
@@ -4751,7 +4775,7 @@
 		if (check_for_locks(stp->st_stid.sc_file,
 				    lockowner(stp->st_stateowner)))
 			break;
-		unhash_lock_stateid(stp);
+		WARN_ON(!unhash_lock_stateid(stp));
 		spin_unlock(&cl->cl_lock);
 		nfs4_put_stid(s);
 		ret = nfs_ok;
@@ -4967,20 +4991,23 @@
 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
 {
 	struct nfs4_client *clp = s->st_stid.sc_client;
+	bool unhashed;
 	LIST_HEAD(reaplist);
 
 	s->st_stid.sc_type = NFS4_CLOSED_STID;
 	spin_lock(&clp->cl_lock);
-	unhash_open_stateid(s, &reaplist);
+	unhashed = unhash_open_stateid(s, &reaplist);
 
 	if (clp->cl_minorversion) {
-		put_ol_stateid_locked(s, &reaplist);
+		if (unhashed)
+			put_ol_stateid_locked(s, &reaplist);
 		spin_unlock(&clp->cl_lock);
 		free_ol_stateid_reaplist(&reaplist);
 	} else {
 		spin_unlock(&clp->cl_lock);
 		free_ol_stateid_reaplist(&reaplist);
-		move_to_close_lru(s, clp->net);
+		if (unhashed)
+			move_to_close_lru(s, clp->net);
 	}
 }
 
@@ -5045,9 +5072,6 @@
 	return status;
 }
 
-
-#define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
-
 static inline u64
 end_offset(u64 start, u64 len)
 {
@@ -5139,8 +5163,7 @@
 }
 
 static struct nfs4_lockowner *
-find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
-		struct nfs4_client *clp)
+find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
 {
 	unsigned int strhashval = ownerstr_hashval(owner);
 	struct nfs4_stateowner *so;
@@ -5158,13 +5181,12 @@
 }
 
 static struct nfs4_lockowner *
-find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
-		struct nfs4_client *clp)
+find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
 {
 	struct nfs4_lockowner *lo;
 
 	spin_lock(&clp->cl_lock);
-	lo = find_lockowner_str_locked(clid, owner, clp);
+	lo = find_lockowner_str_locked(clp, owner);
 	spin_unlock(&clp->cl_lock);
 	return lo;
 }
@@ -5208,14 +5230,14 @@
 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
 	lo->lo_owner.so_ops = &lockowner_ops;
 	spin_lock(&clp->cl_lock);
-	ret = find_lockowner_str_locked(&clp->cl_clientid,
-			&lock->lk_new_owner, clp);
+	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
 	if (ret == NULL) {
 		list_add(&lo->lo_owner.so_strhash,
 			 &clp->cl_ownerstr_hashtbl[strhashval]);
 		ret = lo;
 	} else
-		nfs4_free_lockowner(&lo->lo_owner);
+		nfs4_free_stateowner(&lo->lo_owner);
+
 	spin_unlock(&clp->cl_lock);
 	return ret;
 }
@@ -5298,8 +5320,8 @@
 static int
 check_lock_length(u64 offset, u64 length)
 {
-	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
-	     LOFF_OVERFLOW(offset, length)));
+	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
+		(length > ~offset)));
 }
 
 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
@@ -5328,9 +5350,9 @@
 	struct nfs4_lockowner *lo;
 	unsigned int strhashval;
 
-	lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
+	lo = find_lockowner_str(cl, &lock->lk_new_owner);
 	if (!lo) {
-		strhashval = ownerstr_hashval(&lock->v.new.owner);
+		strhashval = ownerstr_hashval(&lock->lk_new_owner);
 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
 		if (lo == NULL)
 			return nfserr_jukebox;
@@ -5391,7 +5413,7 @@
 	if (lock->lk_is_new) {
 		if (nfsd4_has_session(cstate))
 			/* See rfc 5661 18.10.3: given clientid is ignored: */
-			memcpy(&lock->v.new.clientid,
+			memcpy(&lock->lk_new_clientid,
 				&cstate->session->se_client->cl_clientid,
 				sizeof(clientid_t));
 
@@ -5409,7 +5431,7 @@
 		open_sop = openowner(open_stp->st_stateowner);
 		status = nfserr_bad_stateid;
 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
-						&lock->v.new.clientid))
+						&lock->lk_new_clientid))
 			goto out;
 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
 							&lock_stp, &new);
@@ -5603,8 +5625,7 @@
 		goto out;
 	}
 
-	lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
-				cstate->clp);
+	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
 	if (lo)
 		file_lock->fl_owner = (fl_owner_t)lo;
 	file_lock->fl_pid = current->tgid;
@@ -6019,7 +6040,7 @@
 
 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
 				    struct list_head *collect,
-				    void (*func)(struct nfs4_ol_stateid *))
+				    bool (*func)(struct nfs4_ol_stateid *))
 {
 	struct nfs4_openowner *oop;
 	struct nfs4_ol_stateid *stp, *st_next;
@@ -6033,9 +6054,9 @@
 			list_for_each_entry_safe(lst, lst_next,
 					&stp->st_locks, st_locks) {
 				if (func) {
-					func(lst);
-					nfsd_inject_add_lock_to_list(lst,
-								collect);
+					if (func(lst))
+						nfsd_inject_add_lock_to_list(lst,
+									collect);
 				}
 				++count;
 				/*
@@ -6305,7 +6326,7 @@
 				continue;
 
 			atomic_inc(&clp->cl_refcount);
-			unhash_delegation_locked(dp);
+			WARN_ON(!unhash_delegation_locked(dp));
 			list_add(&dp->dl_recall_lru, victims);
 		}
 		++count;
@@ -6584,6 +6605,7 @@
 		return ret;
 	nn->boot_time = get_seconds();
 	nn->grace_ended = false;
+	nn->nfsd4_manager.block_opens = true;
 	locks_start_grace(net, &nn->nfsd4_manager);
 	nfsd4_client_tracking_init(net);
 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
@@ -6602,7 +6624,7 @@
 	ret = set_callback_cred();
 	if (ret)
 		return -ENOMEM;
-	laundry_wq = create_singlethread_workqueue("nfsd4");
+	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
 	if (laundry_wq == NULL) {
 		ret = -ENOMEM;
 		goto out_recovery;
@@ -6635,7 +6657,7 @@
 	spin_lock(&state_lock);
 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
-		unhash_delegation_locked(dp);
+		WARN_ON(!unhash_delegation_locked(dp));
 		list_add(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&state_lock);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 75e0563..51c9e9c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2140,6 +2140,27 @@
 		return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
 }
 
+static inline __be32
+nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
+{
+	__be32 *p;
+
+	if (layout_type) {
+		p = xdr_reserve_space(xdr, 8);
+		if (!p)
+			return nfserr_resource;
+		*p++ = cpu_to_be32(1);
+		*p++ = cpu_to_be32(layout_type);
+	} else {
+		p = xdr_reserve_space(xdr, 4);
+		if (!p)
+			return nfserr_resource;
+		*p++ = cpu_to_be32(0);
+	}
+
+	return 0;
+}
+
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
 			      FATTR4_WORD0_RDATTR_ERROR)
 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
@@ -2205,6 +2226,39 @@
 	return err;
 }
 
+static __be32
+nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
+{
+	__be32 *p;
+
+	if (bmval2) {
+		p = xdr_reserve_space(xdr, 16);
+		if (!p)
+			goto out_resource;
+		*p++ = cpu_to_be32(3);
+		*p++ = cpu_to_be32(bmval0);
+		*p++ = cpu_to_be32(bmval1);
+		*p++ = cpu_to_be32(bmval2);
+	} else if (bmval1) {
+		p = xdr_reserve_space(xdr, 12);
+		if (!p)
+			goto out_resource;
+		*p++ = cpu_to_be32(2);
+		*p++ = cpu_to_be32(bmval0);
+		*p++ = cpu_to_be32(bmval1);
+	} else {
+		p = xdr_reserve_space(xdr, 8);
+		if (!p)
+			goto out_resource;
+		*p++ = cpu_to_be32(1);
+		*p++ = cpu_to_be32(bmval0);
+	}
+
+	return 0;
+out_resource:
+	return nfserr_resource;
+}
+
 /*
  * Note: @fhp can be NULL; in this case, we might have to compose the filehandle
  * ourselves.
@@ -2301,28 +2355,9 @@
 	}
 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
 
-	if (bmval2) {
-		p = xdr_reserve_space(xdr, 16);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(3);
-		*p++ = cpu_to_be32(bmval0);
-		*p++ = cpu_to_be32(bmval1);
-		*p++ = cpu_to_be32(bmval2);
-	} else if (bmval1) {
-		p = xdr_reserve_space(xdr, 12);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(2);
-		*p++ = cpu_to_be32(bmval0);
-		*p++ = cpu_to_be32(bmval1);
-	} else {
-		p = xdr_reserve_space(xdr, 8);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(1);
-		*p++ = cpu_to_be32(bmval0);
-	}
+	status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
+	if (status)
+		goto out;
 
 	attrlen_offset = xdr->buf->len;
 	p = xdr_reserve_space(xdr, 4);
@@ -2675,6 +2710,9 @@
 		*p++ = cpu_to_be32(stat.mtime.tv_nsec);
 	}
 	if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+		struct kstat parent_stat;
+		u64 ino = stat.ino;
+
 		p = xdr_reserve_space(xdr, 8);
 		if (!p)
                 	goto out_resource;
@@ -2683,25 +2721,25 @@
 		 * and this is the root of a cross-mounted filesystem.
 		 */
 		if (ignore_crossmnt == 0 &&
-		    dentry == exp->ex_path.mnt->mnt_root)
-			get_parent_attributes(exp, &stat);
-		p = xdr_encode_hyper(p, stat.ino);
+		    dentry == exp->ex_path.mnt->mnt_root) {
+			err = get_parent_attributes(exp, &parent_stat);
+			if (err)
+				goto out_nfserr;
+			ino = parent_stat.ino;
+		}
+		p = xdr_encode_hyper(p, ino);
 	}
 #ifdef CONFIG_NFSD_PNFS
-	if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
-	    (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
-		if (exp->ex_layout_type) {
-			p = xdr_reserve_space(xdr, 8);
-			if (!p)
-				goto out_resource;
-			*p++ = cpu_to_be32(1);
-			*p++ = cpu_to_be32(exp->ex_layout_type);
-		} else {
-			p = xdr_reserve_space(xdr, 4);
-			if (!p)
-				goto out_resource;
-			*p++ = cpu_to_be32(0);
-		}
+	if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
+		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+		if (status)
+			goto out;
+	}
+
+	if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
+		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+		if (status)
+			goto out;
 	}
 
 	if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
@@ -2711,21 +2749,20 @@
 		*p++ = cpu_to_be32(stat.blksize);
 	}
 #endif /* CONFIG_NFSD_PNFS */
+	if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
+		status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
+						  NFSD_SUPPATTR_EXCLCREAT_WORD1,
+						  NFSD_SUPPATTR_EXCLCREAT_WORD2);
+		if (status)
+			goto out;
+	}
+
 	if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
 		status = nfsd4_encode_security_label(xdr, rqstp, context,
 								contextlen);
 		if (status)
 			goto out;
 	}
-	if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
-		p = xdr_reserve_space(xdr, 16);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(3);
-		*p++ = cpu_to_be32(NFSD_SUPPATTR_EXCLCREAT_WORD0);
-		*p++ = cpu_to_be32(NFSD_SUPPATTR_EXCLCREAT_WORD1);
-		*p++ = cpu_to_be32(NFSD_SUPPATTR_EXCLCREAT_WORD2);
-	}
 
 	attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
 	write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
@@ -3044,13 +3081,12 @@
 	__be32 *p;
 
 	if (!nfserr) {
-		p = xdr_reserve_space(xdr, 32);
+		p = xdr_reserve_space(xdr, 20);
 		if (!p)
 			return nfserr_resource;
-		p = encode_cinfo(p, &create->cr_cinfo);
-		*p++ = cpu_to_be32(2);
-		*p++ = cpu_to_be32(create->cr_bmval[0]);
-		*p++ = cpu_to_be32(create->cr_bmval[1]);
+		encode_cinfo(p, &create->cr_cinfo);
+		nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
+				create->cr_bmval[1], create->cr_bmval[2]);
 	}
 	return nfserr;
 }
@@ -3190,16 +3226,22 @@
 	nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
 	if (nfserr)
 		goto out;
-	p = xdr_reserve_space(xdr, 40);
+	p = xdr_reserve_space(xdr, 24);
 	if (!p)
 		return nfserr_resource;
 	p = encode_cinfo(p, &open->op_cinfo);
 	*p++ = cpu_to_be32(open->op_rflags);
-	*p++ = cpu_to_be32(2);
-	*p++ = cpu_to_be32(open->op_bmval[0]);
-	*p++ = cpu_to_be32(open->op_bmval[1]);
-	*p++ = cpu_to_be32(open->op_delegate_type);
 
+	nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
+					open->op_bmval[2]);
+	if (nfserr)
+		goto out;
+
+	p = xdr_reserve_space(xdr, 4);
+	if (!p)
+		return nfserr_resource;
+
+	*p++ = cpu_to_be32(open->op_delegate_type);
 	switch (open->op_delegate_type) {
 	case NFS4_OPEN_DELEGATE_NONE:
 		break;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 9277cc9..ad4e237 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -391,6 +391,14 @@
 	return ret;
 }
 
+static struct svc_serv_ops nfsd_thread_sv_ops = {
+	.svo_shutdown		= nfsd_last_thread,
+	.svo_function		= nfsd,
+	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+	.svo_setup		= svc_set_num_threads,
+	.svo_module		= THIS_MODULE,
+};
+
 int nfsd_create_serv(struct net *net)
 {
 	int error;
@@ -405,7 +413,7 @@
 		nfsd_max_blksize = nfsd_get_default_max_blksize();
 	nfsd_reset_versions();
 	nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
-				      nfsd_last_thread, nfsd, THIS_MODULE);
+						&nfsd_thread_sv_ops);
 	if (nn->nfsd_serv == NULL)
 		return -ENOMEM;
 
@@ -500,8 +508,8 @@
 	/* apply the new numbers */
 	svc_get(nn->nfsd_serv);
 	for (i = 0; i < n; i++) {
-		err = svc_set_num_threads(nn->nfsd_serv, &nn->nfsd_serv->sv_pools[i],
-				    	  nthreads[i]);
+		err = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
+				&nn->nfsd_serv->sv_pools[i], nthreads[i]);
 		if (err)
 			break;
 	}
@@ -540,7 +548,8 @@
 	error = nfsd_startup_net(nrservs, net);
 	if (error)
 		goto out_destroy;
-	error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
+	error = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
+			NULL, nrservs);
 	if (error)
 		goto out_shutdown;
 	/* We are holding a reference to nn->nfsd_serv which
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 4874ce5..583ffc1 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -67,8 +67,8 @@
 	struct rpc_message cb_msg;
 	struct nfsd4_callback_ops *cb_ops;
 	struct work_struct cb_work;
+	int cb_seq_status;
 	int cb_status;
-	bool cb_update_seq_nr;
 	bool cb_need_restart;
 };
 
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b5e077a..45c0497 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1249,12 +1249,6 @@
 
 #ifdef CONFIG_NFSD_V3
 
-static inline int nfsd_create_is_exclusive(int createmode)
-{
-	return createmode == NFS3_CREATE_EXCLUSIVE
-	       || createmode == NFS4_CREATE_EXCLUSIVE4_1;
-}
-
 /*
  * NFSv3 and NFSv4 version of nfsd_create
  */
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 5be875e..fee2451 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -131,4 +131,10 @@
 	return nfserrno(vfs_getattr(&p, stat));
 }
 
+static inline int nfsd_create_is_exclusive(int createmode)
+{
+	return createmode == NFS3_CREATE_EXCLUSIVE
+	       || createmode == NFS4_CREATE_EXCLUSIVE4_1;
+}
+
 #endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 44523f4..6faaf71 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -154,6 +154,7 @@
 	struct dnotify_struct *dn;
 	struct dnotify_struct **prev;
 	struct inode *inode;
+	bool free = false;
 
 	inode = file_inode(filp);
 	if (!S_ISDIR(inode->i_mode))
@@ -182,11 +183,15 @@
 
 	/* nothing else could have found us thanks to the dnotify_groups
 	   mark_mutex */
-	if (dn_mark->dn == NULL)
-		fsnotify_destroy_mark_locked(fsn_mark, dnotify_group);
+	if (dn_mark->dn == NULL) {
+		fsnotify_detach_mark(fsn_mark);
+		free = true;
+	}
 
 	mutex_unlock(&dnotify_group->mark_mutex);
 
+	if (free)
+		fsnotify_free_mark(fsn_mark);
 	fsnotify_put_mark(fsn_mark);
 }
 
@@ -362,9 +367,10 @@
 	spin_unlock(&fsn_mark->lock);
 
 	if (destroy)
-		fsnotify_destroy_mark_locked(fsn_mark, dnotify_group);
-
+		fsnotify_detach_mark(fsn_mark);
 	mutex_unlock(&dnotify_group->mark_mutex);
+	if (destroy)
+		fsnotify_free_mark(fsn_mark);
 	fsnotify_put_mark(fsn_mark);
 out_err:
 	if (new_fsn_mark)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index cf27550..8e8e6bc 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -529,8 +529,10 @@
 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
 						 &destroy_mark);
 	if (destroy_mark)
-		fsnotify_destroy_mark_locked(fsn_mark, group);
+		fsnotify_detach_mark(fsn_mark);
 	mutex_unlock(&group->mark_mutex);
+	if (destroy_mark)
+		fsnotify_free_mark(fsn_mark);
 
 	fsnotify_put_mark(fsn_mark);
 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
@@ -557,8 +559,10 @@
 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
 						 &destroy_mark);
 	if (destroy_mark)
-		fsnotify_destroy_mark_locked(fsn_mark, group);
+		fsnotify_detach_mark(fsn_mark);
 	mutex_unlock(&group->mark_mutex);
+	if (destroy_mark)
+		fsnotify_free_mark(fsn_mark);
 
 	/* matches the fsnotify_find_inode_mark() */
 	fsnotify_put_mark(fsn_mark);
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 58b7cdb..6b6f0d47 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -76,7 +76,8 @@
 	struct inotify_inode_mark *inode_mark;
 	struct inode *inode;
 
-	if (!(mark->flags & (FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_INODE)))
+	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE) ||
+	    !(mark->flags & FSNOTIFY_MARK_FLAG_INODE))
 		return;
 
 	inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index dd3fb0b..db39de2 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -26,7 +26,6 @@
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
-#include "../mount.h"
 
 /*
  * Clear all of the marks on an inode when it is being evicted from core
@@ -205,6 +204,16 @@
 		mnt = NULL;
 
 	/*
+	 * Optimization: srcu_read_lock() has a memory barrier which can
+	 * be expensive.  It protects walking the *_fsnotify_marks lists.
+	 * However, if we do not walk the lists, we do not have to do
+	 * SRCU because we have no references to any objects and do not
+	 * need SRCU to keep them "alive".
+	 */
+	if (hlist_empty(&to_tell->i_fsnotify_marks) &&
+	    (!mnt || hlist_empty(&mnt->mnt_fsnotify_marks)))
+		return 0;
+	/*
 	 * if this is a modify event we may need to clear the ignored masks
 	 * otherwise return if neither the inode nor the vfsmount care about
 	 * this type of event.
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 13a00be..b44c68a 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -6,6 +6,8 @@
 #include <linux/srcu.h>
 #include <linux/types.h>
 
+#include "../mount.h"
+
 /* destroy all events sitting in this groups notification queue */
 extern void fsnotify_flush_notify(struct fsnotify_group *group);
 
@@ -38,15 +40,22 @@
 extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark);
 /* inode specific destruction of a mark */
 extern void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark);
-/* Destroy all marks in the given list */
-extern void fsnotify_destroy_marks(struct list_head *to_free);
 /* Find mark belonging to given group in the list of marks */
 extern struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
 						struct fsnotify_group *group);
-/* run the list of all marks associated with inode and flag them to be freed */
-extern void fsnotify_clear_marks_by_inode(struct inode *inode);
-/* run the list of all marks associated with vfsmount and flag them to be freed */
-extern void fsnotify_clear_marks_by_mount(struct vfsmount *mnt);
+/* Destroy all marks in the given list protected by 'lock' */
+extern void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock);
+/* run the list of all marks associated with inode and destroy them */
+static inline void fsnotify_clear_marks_by_inode(struct inode *inode)
+{
+	fsnotify_destroy_marks(&inode->i_fsnotify_marks, &inode->i_lock);
+}
+/* run the list of all marks associated with vfsmount and destroy them */
+static inline void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
+{
+	fsnotify_destroy_marks(&real_mount(mnt)->mnt_fsnotify_marks,
+			       &mnt->mnt_root->d_lock);
+}
 /*
  * update the dentry->d_flags of all of inode's children to indicate if inode cares
  * about events that happen to its children.
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 3daf513..e785fd9 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -65,26 +65,6 @@
 }
 
 /*
- * Given an inode, destroy all of the marks associated with that inode.
- */
-void fsnotify_clear_marks_by_inode(struct inode *inode)
-{
-	struct fsnotify_mark *mark;
-	struct hlist_node *n;
-	LIST_HEAD(free_list);
-
-	spin_lock(&inode->i_lock);
-	hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, obj_list) {
-		list_add(&mark->free_list, &free_list);
-		hlist_del_init_rcu(&mark->obj_list);
-		fsnotify_get_mark(mark);
-	}
-	spin_unlock(&inode->i_lock);
-
-	fsnotify_destroy_marks(&free_list);
-}
-
-/*
  * Given a group clear all of the inode marks associated with that group.
  */
 void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
@@ -163,17 +143,17 @@
 
 /**
  * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
- * @list: list of inodes being unmounted (sb->s_inodes)
+ * @sb: superblock being unmounted.
  *
  * Called during unmount with no locks held, so needs to be safe against
- * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
+ * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block.
  */
-void fsnotify_unmount_inodes(struct list_head *list)
+void fsnotify_unmount_inodes(struct super_block *sb)
 {
 	struct inode *inode, *next_i, *need_iput = NULL;
 
-	spin_lock(&inode_sb_list_lock);
-	list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
+	spin_lock(&sb->s_inode_list_lock);
+	list_for_each_entry_safe(inode, next_i, &sb->s_inodes, i_sb_list) {
 		struct inode *need_iput_tmp;
 
 		/*
@@ -209,7 +189,7 @@
 		spin_unlock(&inode->i_lock);
 
 		/* In case the dropping of a reference would nuke next_i. */
-		while (&next_i->i_sb_list != list) {
+		while (&next_i->i_sb_list != &sb->s_inodes) {
 			spin_lock(&next_i->i_lock);
 			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
 						atomic_read(&next_i->i_count)) {
@@ -224,12 +204,12 @@
 		}
 
 		/*
-		 * We can safely drop inode_sb_list_lock here because either
+		 * We can safely drop s_inode_list_lock here because either
 		 * we actually hold references on both inode and next_i or
 		 * end of list.  Also no new inodes will be added since the
 		 * umount has begun.
 		 */
-		spin_unlock(&inode_sb_list_lock);
+		spin_unlock(&sb->s_inode_list_lock);
 
 		if (need_iput_tmp)
 			iput(need_iput_tmp);
@@ -241,7 +221,7 @@
 
 		iput(inode);
 
-		spin_lock(&inode_sb_list_lock);
+		spin_lock(&sb->s_inode_list_lock);
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&sb->s_inode_list_lock);
 }
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 39ddcaf..fc0df44 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -122,26 +122,27 @@
 }
 
 /*
- * Any time a mark is getting freed we end up here.
- * The caller had better be holding a reference to this mark so we don't actually
- * do the final put under the mark->lock
+ * Remove mark from inode / vfsmount list, group list, drop inode reference
+ * if we got one.
+ *
+ * Must be called with group->mark_mutex held.
  */
-void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
-				  struct fsnotify_group *group)
+void fsnotify_detach_mark(struct fsnotify_mark *mark)
 {
 	struct inode *inode = NULL;
+	struct fsnotify_group *group = mark->group;
 
 	BUG_ON(!mutex_is_locked(&group->mark_mutex));
 
 	spin_lock(&mark->lock);
 
 	/* something else already called this function on this mark */
-	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
+	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 		spin_unlock(&mark->lock);
 		return;
 	}
 
-	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+	mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
 
 	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
 		inode = mark->inode;
@@ -150,6 +151,12 @@
 		fsnotify_destroy_vfsmount_mark(mark);
 	else
 		BUG();
+	/*
+	 * Note that we didn't update flags telling whether inode cares about
+	 * what's happening with children. We update these flags from
+	 * __fsnotify_parent() lazily when next event happens on one of our
+	 * children.
+	 */
 
 	list_del_init(&mark->g_list);
 
@@ -157,18 +164,32 @@
 
 	if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
 		iput(inode);
-	/* release lock temporarily */
-	mutex_unlock(&group->mark_mutex);
+
+	atomic_dec(&group->num_marks);
+}
+
+/*
+ * Free fsnotify mark. The freeing is actually happening from a kthread which
+ * first waits for srcu period end. Caller must have a reference to the mark
+ * or be protected by fsnotify_mark_srcu.
+ */
+void fsnotify_free_mark(struct fsnotify_mark *mark)
+{
+	struct fsnotify_group *group = mark->group;
+
+	spin_lock(&mark->lock);
+	/* something else already called this function on this mark */
+	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
+		spin_unlock(&mark->lock);
+		return;
+	}
+	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+	spin_unlock(&mark->lock);
 
 	spin_lock(&destroy_lock);
 	list_add(&mark->g_list, &destroy_list);
 	spin_unlock(&destroy_lock);
 	wake_up(&destroy_waitq);
-	/*
-	 * We don't necessarily have a ref on mark from caller so the above destroy
-	 * may have actually freed it, unless this group provides a 'freeing_mark'
-	 * function which must be holding a reference.
-	 */
 
 	/*
 	 * Some groups like to know that marks are being freed.  This is a
@@ -177,50 +198,45 @@
 	 */
 	if (group->ops->freeing_mark)
 		group->ops->freeing_mark(mark, group);
-
-	/*
-	 * __fsnotify_update_child_dentry_flags(inode);
-	 *
-	 * I really want to call that, but we can't, we have no idea if the inode
-	 * still exists the second we drop the mark->lock.
-	 *
-	 * The next time an event arrive to this inode from one of it's children
-	 * __fsnotify_parent will see that the inode doesn't care about it's
-	 * children and will update all of these flags then.  So really this
-	 * is just a lazy update (and could be a perf win...)
-	 */
-
-	atomic_dec(&group->num_marks);
-
-	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
 }
 
 void fsnotify_destroy_mark(struct fsnotify_mark *mark,
 			   struct fsnotify_group *group)
 {
 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
-	fsnotify_destroy_mark_locked(mark, group);
+	fsnotify_detach_mark(mark);
 	mutex_unlock(&group->mark_mutex);
+	fsnotify_free_mark(mark);
 }
 
-/*
- * Destroy all marks in the given list. The marks must be already detached from
- * the original inode / vfsmount.
- */
-void fsnotify_destroy_marks(struct list_head *to_free)
+void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock)
 {
-	struct fsnotify_mark *mark, *lmark;
-	struct fsnotify_group *group;
+	struct fsnotify_mark *mark;
 
-	list_for_each_entry_safe(mark, lmark, to_free, free_list) {
-		spin_lock(&mark->lock);
-		fsnotify_get_group(mark->group);
-		group = mark->group;
-		spin_unlock(&mark->lock);
-
-		fsnotify_destroy_mark(mark, group);
+	while (1) {
+		/*
+		 * We have to be careful since we can race with e.g.
+		 * fsnotify_clear_marks_by_group() and once we drop 'lock',
+		 * mark can get removed from the obj_list and destroyed. But
+		 * we are holding mark reference so mark cannot be freed and
+		 * calling fsnotify_destroy_mark() more than once is fine.
+		 */
+		spin_lock(lock);
+		if (hlist_empty(head)) {
+			spin_unlock(lock);
+			break;
+		}
+		mark = hlist_entry(head->first, struct fsnotify_mark, obj_list);
+		/*
+		 * We don't update i_fsnotify_mask / mnt_fsnotify_mask here
+		 * since inode / mount is going away anyway. So just remove
+		 * mark from the list.
+		 */
+		hlist_del_init_rcu(&mark->obj_list);
+		fsnotify_get_mark(mark);
+		spin_unlock(lock);
+		fsnotify_destroy_mark(mark, mark->group);
 		fsnotify_put_mark(mark);
-		fsnotify_put_group(group);
 	}
 }
 
@@ -332,7 +348,7 @@
 	 * inode->i_lock
 	 */
 	spin_lock(&mark->lock);
-	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
+	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
 
 	fsnotify_get_group(group);
 	mark->group = group;
@@ -438,8 +454,9 @@
 		}
 		mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
 		fsnotify_get_mark(mark);
-		fsnotify_destroy_mark_locked(mark, group);
+		fsnotify_detach_mark(mark);
 		mutex_unlock(&group->mark_mutex);
+		fsnotify_free_mark(mark);
 		fsnotify_put_mark(mark);
 	}
 }
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 326b148..a8fcab6 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -28,25 +28,6 @@
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
-#include "../mount.h"
-
-void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
-{
-	struct fsnotify_mark *mark;
-	struct hlist_node *n;
-	struct mount *m = real_mount(mnt);
-	LIST_HEAD(free_list);
-
-	spin_lock(&mnt->mnt_root->d_lock);
-	hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, obj_list) {
-		list_add(&mark->free_list, &free_list);
-		hlist_del_init_rcu(&mark->obj_list);
-		fsnotify_get_mark(mark);
-	}
-	spin_unlock(&mnt->mnt_root->d_lock);
-
-	fsnotify_destroy_marks(&free_list);
-}
 
 void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
 {
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index c1128bc..d1a8535 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2204,17 +2204,12 @@
 	return true;
 #ifdef NTFS_RW
 iput_usnjrnl_err_out:
-	if (vol->usnjrnl_j_ino)
-		iput(vol->usnjrnl_j_ino);
-	if (vol->usnjrnl_max_ino)
-		iput(vol->usnjrnl_max_ino);
-	if (vol->usnjrnl_ino)
-		iput(vol->usnjrnl_ino);
+	iput(vol->usnjrnl_j_ino);
+	iput(vol->usnjrnl_max_ino);
+	iput(vol->usnjrnl_ino);
 iput_quota_err_out:
-	if (vol->quota_q_ino)
-		iput(vol->quota_q_ino);
-	if (vol->quota_ino)
-		iput(vol->quota_ino);
+	iput(vol->quota_q_ino);
+	iput(vol->quota_ino);
 	iput(vol->extend_ino);
 #endif /* NTFS_RW */
 iput_sec_err_out:
@@ -2223,8 +2218,7 @@
 	iput(vol->root_ino);
 iput_logfile_err_out:
 #ifdef NTFS_RW
-	if (vol->logfile_ino)
-		iput(vol->logfile_ino);
+	iput(vol->logfile_ino);
 iput_vol_err_out:
 #endif /* NTFS_RW */
 	iput(vol->vol_ino);
@@ -2254,8 +2248,7 @@
 	iput(vol->mftbmp_ino);
 iput_mirr_err_out:
 #ifdef NTFS_RW
-	if (vol->mftmirr_ino)
-		iput(vol->mftmirr_ino);
+	iput(vol->mftmirr_ino);
 #endif /* NTFS_RW */
 	return false;
 }
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index c58a1bc..0cdf497 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -284,7 +284,19 @@
 
 int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
-	return ocfs2_set_acl(NULL, inode, NULL, type, acl, NULL, NULL);
+	struct buffer_head *bh = NULL;
+	int status = 0;
+
+	status = ocfs2_inode_lock(inode, &bh, 1);
+	if (status < 0) {
+		if (status != -ENOENT)
+			mlog_errno(status);
+		return status;
+	}
+	status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
+	ocfs2_inode_unlock(inode, 1);
+	brelse(bh);
+	return status;
 }
 
 struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
@@ -292,19 +304,21 @@
 	struct ocfs2_super *osb;
 	struct buffer_head *di_bh = NULL;
 	struct posix_acl *acl;
-	int ret = -EAGAIN;
+	int ret;
 
 	osb = OCFS2_SB(inode->i_sb);
 	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
 		return NULL;
-
-	ret = ocfs2_read_inode_block(inode, &di_bh);
-	if (ret < 0)
+	ret = ocfs2_inode_lock(inode, &di_bh, 0);
+	if (ret < 0) {
+		if (ret != -ENOENT)
+			mlog_errno(ret);
 		return ERR_PTR(ret);
+	}
 
 	acl = ocfs2_get_acl_nolock(inode, type, di_bh);
 
+	ocfs2_inode_unlock(inode, 0);
 	brelse(di_bh);
-
 	return acl;
 }
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 5997c00..86181d6 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -908,32 +908,30 @@
 	 */
 
 	if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
-		ocfs2_error(sb,
-			    "Extent block #%llu has bad signature %.*s",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    eb->h_signature);
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Extent block #%llu has bad signature %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 eb->h_signature);
+		goto bail;
 	}
 
 	if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb,
-			    "Extent block #%llu has an invalid h_blkno "
-			    "of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(eb->h_blkno));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Extent block #%llu has an invalid h_blkno of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(eb->h_blkno));
+		goto bail;
 	}
 
 	if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Extent block #%llu has an invalid "
-			    "h_fs_generation of #%u",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(eb->h_fs_generation));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Extent block #%llu has an invalid h_fs_generation of #%u\n",
+				 (unsigned long long)bh->b_blocknr,
+				 le32_to_cpu(eb->h_fs_generation));
+		goto bail;
 	}
-
-	return 0;
+bail:
+	return rc;
 }
 
 int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
@@ -1446,8 +1444,7 @@
 	while(le16_to_cpu(el->l_tree_depth) > 1) {
 		if (le16_to_cpu(el->l_next_free_rec) == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has empty "
-				    "extent list (next_free_rec == 0)",
+				    "Owner %llu has empty extent list (next_free_rec == 0)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
 			status = -EIO;
 			goto bail;
@@ -1456,9 +1453,7 @@
 		blkno = le64_to_cpu(el->l_recs[i].e_blkno);
 		if (!blkno) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has extent "
-				    "list where extent # %d has no physical "
-				    "block start",
+				    "Owner %llu has extent list where extent # %d has no physical block start\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
 			status = -EIO;
 			goto bail;
@@ -1788,8 +1783,7 @@
 	while (el->l_tree_depth) {
 		if (le16_to_cpu(el->l_next_free_rec) == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
-				    "Owner %llu has empty extent list at "
-				    "depth %u\n",
+				    "Owner %llu has empty extent list at depth %u\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
 				    le16_to_cpu(el->l_tree_depth));
 			ret = -EROFS;
@@ -1814,8 +1808,7 @@
 		blkno = le64_to_cpu(el->l_recs[i].e_blkno);
 		if (blkno == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
-				    "Owner %llu has bad blkno in extent list "
-				    "at depth %u (index %d)\n",
+				    "Owner %llu has bad blkno in extent list at depth %u (index %d)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
 				    le16_to_cpu(el->l_tree_depth), i);
 			ret = -EROFS;
@@ -1836,8 +1829,7 @@
 		if (le16_to_cpu(el->l_next_free_rec) >
 		    le16_to_cpu(el->l_count)) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
-				    "Owner %llu has bad count in extent list "
-				    "at block %llu (next free=%u, count=%u)\n",
+				    "Owner %llu has bad count in extent list at block %llu (next free=%u, count=%u)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
 				    (unsigned long long)bh->b_blocknr,
 				    le16_to_cpu(el->l_next_free_rec),
@@ -2116,8 +2108,7 @@
 
 	if (left_el->l_next_free_rec != left_el->l_count) {
 		ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-			    "Inode %llu has non-full interior leaf node %llu"
-			    "(next free = %u)",
+			    "Inode %llu has non-full interior leaf node %llu (next free = %u)\n",
 			    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 			    (unsigned long long)left_leaf_bh->b_blocknr,
 			    le16_to_cpu(left_el->l_next_free_rec));
@@ -2256,8 +2247,7 @@
 		 * If we got here, we never found a valid node where
 		 * the tree indicated one should be.
 		 */
-		ocfs2_error(sb,
-			    "Invalid extent tree at extent block %llu\n",
+		ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
 			    (unsigned long long)blkno);
 		ret = -EROFS;
 		goto out;
@@ -2872,8 +2862,7 @@
 		 * If we got here, we never found a valid node where
 		 * the tree indicated one should be.
 		 */
-		ocfs2_error(sb,
-			    "Invalid extent tree at extent block %llu\n",
+		ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
 			    (unsigned long long)blkno);
 		ret = -EROFS;
 		goto out;
@@ -3131,6 +3120,30 @@
 	return ret;
 }
 
+static int ocfs2_remove_rightmost_empty_extent(struct ocfs2_super *osb,
+				struct ocfs2_extent_tree *et,
+				struct ocfs2_path *path,
+				struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+	handle_t *handle;
+	int ret;
+	int credits = path->p_tree_depth * 2 + 1;
+
+	handle = ocfs2_start_trans(osb, credits);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		mlog_errno(ret);
+		return ret;
+	}
+
+	ret = ocfs2_remove_rightmost_path(handle, et, path, dealloc);
+	if (ret)
+		mlog_errno(ret);
+
+	ocfs2_commit_trans(osb, handle);
+	return ret;
+}
+
 /*
  * Left rotation of btree records.
  *
@@ -3200,7 +3213,7 @@
 		if (le16_to_cpu(el->l_next_free_rec) == 0) {
 			ret = -EIO;
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has empty extent block at %llu",
+				    "Owner %llu has empty extent block at %llu\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 				    (unsigned long long)le64_to_cpu(eb->h_blkno));
 			goto out;
@@ -3930,7 +3943,7 @@
 		next_free = le16_to_cpu(el->l_next_free_rec);
 		if (next_free == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has a bad extent list",
+				    "Owner %llu has a bad extent list\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
 			ret = -EIO;
 			return;
@@ -4355,10 +4368,7 @@
 				bh = path_leaf_bh(left_path);
 				eb = (struct ocfs2_extent_block *)bh->b_data;
 				ocfs2_error(sb,
-					    "Extent block #%llu has an "
-					    "invalid l_next_free_rec of "
-					    "%d.  It should have "
-					    "matched the l_count of %d",
+					    "Extent block #%llu has an invalid l_next_free_rec of %d.  It should have matched the l_count of %d\n",
 					    (unsigned long long)le64_to_cpu(eb->h_blkno),
 					    le16_to_cpu(new_el->l_next_free_rec),
 					    le16_to_cpu(new_el->l_count));
@@ -4413,8 +4423,7 @@
 				bh = path_leaf_bh(right_path);
 				eb = (struct ocfs2_extent_block *)bh->b_data;
 				ocfs2_error(sb,
-					    "Extent block #%llu has an "
-					    "invalid l_next_free_rec of %d",
+					    "Extent block #%llu has an invalid l_next_free_rec of %d\n",
 					    (unsigned long long)le64_to_cpu(eb->h_blkno),
 					    le16_to_cpu(new_el->l_next_free_rec));
 				status = -EINVAL;
@@ -4970,10 +4979,9 @@
 		split_index = ocfs2_search_extent_list(el, cpos);
 		if (split_index == -1) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-					"Owner %llu has an extent at cpos %u "
-					"which can no longer be found.\n",
-					(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
-					cpos);
+				    "Owner %llu has an extent at cpos %u which can no longer be found\n",
+				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+				    cpos);
 			ret = -EROFS;
 			goto out;
 		}
@@ -5158,10 +5166,9 @@
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
 		ocfs2_error(sb,
-			    "Owner %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
-			     (unsigned long long)
-			     ocfs2_metadata_cache_owner(et->et_ci), cpos);
+			    "Owner %llu has an extent at cpos %u which can no longer be found\n",
+			    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+			    cpos);
 		ret = -EROFS;
 		goto out;
 	}
@@ -5228,9 +5235,7 @@
 		cpos, len, phys);
 
 	if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
-			    "that are being written to, but the feature bit "
-			    "is not set in the super block.",
+		ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents that are being written to, but the feature bit is not set in the super block\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno);
 		ret = -EROFS;
 		goto out;
@@ -5514,8 +5519,7 @@
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
 		ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-			    "Owner %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
+			    "Owner %llu has an extent at cpos %u which can no longer be found\n",
 			    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 			    cpos);
 		ret = -EROFS;
@@ -5580,7 +5584,7 @@
 		index = ocfs2_search_extent_list(el, cpos);
 		if (index == -1) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu: split at cpos %u lost record.",
+				    "Owner %llu: split at cpos %u lost record\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 				    cpos);
 			ret = -EROFS;
@@ -5596,8 +5600,7 @@
 			ocfs2_rec_clusters(el, rec);
 		if (rec_range != trunc_range) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu: error after split at cpos %u"
-				    "trunc len %u, existing record is (%u,%u)",
+				    "Owner %llu: error after split at cpos %u trunc len %u, existing record is (%u,%u)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 				    cpos, len, le32_to_cpu(rec->e_cpos),
 				    ocfs2_rec_clusters(el, rec));
@@ -6175,7 +6178,7 @@
 		iput(tl_inode);
 	brelse(tl_bh);
 
-	if (status < 0 && (*tl_copy)) {
+	if (status < 0) {
 		kfree(*tl_copy);
 		*tl_copy = NULL;
 		mlog_errno(status);
@@ -7108,15 +7111,23 @@
 		 * to check it up here before changing the tree.
 		*/
 		if (root_el->l_tree_depth && rec->e_int_clusters == 0) {
-			ocfs2_error(inode->i_sb, "Inode %lu has an empty "
+			mlog(ML_ERROR, "Inode %lu has an empty "
 				    "extent record, depth %u\n", inode->i_ino,
 				    le16_to_cpu(root_el->l_tree_depth));
-			status = -EROFS;
-			goto bail;
+			status = ocfs2_remove_rightmost_empty_extent(osb,
+					&et, path, &dealloc);
+			if (status) {
+				mlog_errno(status);
+				goto bail;
+			}
+
+			ocfs2_reinit_path(path, 1);
+			goto start;
+		} else {
+			trunc_cpos = le32_to_cpu(rec->e_cpos);
+			trunc_len = 0;
+			blkno = 0;
 		}
-		trunc_cpos = le32_to_cpu(rec->e_cpos);
-		trunc_len = 0;
-		blkno = 0;
 	} else if (le32_to_cpu(rec->e_cpos) >= new_highest_cpos) {
 		/*
 		 * Truncate entire record.
@@ -7204,8 +7215,7 @@
 	    !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
 	    !ocfs2_supports_inline_data(osb)) {
 		ocfs2_error(inode->i_sb,
-			    "Inline data flags for inode %llu don't agree! "
-			    "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
+			    "Inline data flags for inode %llu don't agree! Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
 			    le16_to_cpu(di->i_dyn_features),
 			    OCFS2_I(inode)->ip_dyn_features,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0f5fd9d..64b11d9 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -227,7 +227,7 @@
 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 
 	if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
-		ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag",
+		ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno);
 		return -EROFS;
 	}
@@ -237,7 +237,7 @@
 	if (size > PAGE_CACHE_SIZE ||
 	    size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
 		ocfs2_error(inode->i_sb,
-			    "Inode %llu has with inline data has bad size: %Lu",
+			    "Inode %llu has with inline data has bad size: %Lu\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
 			    (unsigned long long)size);
 		return -EROFS;
@@ -533,10 +533,14 @@
 
 	inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
 
+	down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
 	/* This figures out the size of the next contiguous block, and
 	 * our logical offset */
 	ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
 					  &contig_blocks, &ext_flags);
+	up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
 	if (ret) {
 		mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
 		     (unsigned long long)iblock);
@@ -557,6 +561,8 @@
 
 		alloc_locked = 1;
 
+		down_write(&OCFS2_I(inode)->ip_alloc_sem);
+
 		/* fill hole, allocate blocks can't be larger than the size
 		 * of the hole */
 		clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len);
@@ -569,6 +575,7 @@
 		ret = ocfs2_extend_allocation(inode, cpos,
 				clusters_to_alloc, 0);
 		if (ret < 0) {
+			up_write(&OCFS2_I(inode)->ip_alloc_sem);
 			mlog_errno(ret);
 			goto bail;
 		}
@@ -576,11 +583,13 @@
 		ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
 				&contig_blocks, &ext_flags);
 		if (ret < 0) {
+			up_write(&OCFS2_I(inode)->ip_alloc_sem);
 			mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
 					(unsigned long long)iblock);
 			ret = -EIO;
 			goto bail;
 		}
+		up_write(&OCFS2_I(inode)->ip_alloc_sem);
 	}
 
 	/*
@@ -627,10 +636,13 @@
 		mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
 	}
 
-	ocfs2_iocb_clear_rw_locked(iocb);
+	/* Let rw unlock to be done later to protect append direct io write */
+	if (offset + bytes <= i_size_read(inode)) {
+		ocfs2_iocb_clear_rw_locked(iocb);
 
-	level = ocfs2_iocb_rw_locked_level(iocb);
-	ocfs2_rw_unlock(inode, level);
+		level = ocfs2_iocb_rw_locked_level(iocb);
+		ocfs2_rw_unlock(inode, level);
+	}
 }
 
 static int ocfs2_releasepage(struct page *page, gfp_t wait)
@@ -832,12 +844,17 @@
 
 		/* zeroing out the previously allocated cluster tail
 		 * that but not zeroed */
-		if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+		if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
+			down_read(&OCFS2_I(inode)->ip_alloc_sem);
 			ret = ocfs2_direct_IO_zero_extend(osb, inode, offset,
 					zero_len_tail, cluster_align_tail);
-		else
+			up_read(&OCFS2_I(inode)->ip_alloc_sem);
+		} else {
+			down_write(&OCFS2_I(inode)->ip_alloc_sem);
 			ret = ocfs2_direct_IO_extend_no_holes(osb, inode,
 					offset);
+			up_write(&OCFS2_I(inode)->ip_alloc_sem);
+		}
 		if (ret < 0) {
 			mlog_errno(ret);
 			ocfs2_inode_unlock(inode, 1);
@@ -857,7 +874,8 @@
 	written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
 				       offset, ocfs2_direct_IO_get_blocks,
 				       ocfs2_dio_end_io, NULL, 0);
-	if (unlikely(written < 0)) {
+	/* overwrite aio may return -EIOCBQUEUED, and it is not an error */
+	if ((written < 0) && (written != -EIOCBQUEUED)) {
 		loff_t i_size = i_size_read(inode);
 
 		if (offset + count > i_size) {
@@ -876,12 +894,14 @@
 
 					ocfs2_inode_unlock(inode, 1);
 					brelse(di_bh);
+					di_bh = NULL;
 					goto clean_orphan;
 				}
 			}
 
 			ocfs2_inode_unlock(inode, 1);
 			brelse(di_bh);
+			di_bh = NULL;
 
 			ret = jbd2_journal_force_commit(journal);
 			if (ret < 0)
@@ -936,10 +956,12 @@
 		if (tmp_ret < 0) {
 			ret = tmp_ret;
 			mlog_errno(ret);
+			brelse(di_bh);
 			goto out;
 		}
 
 		ocfs2_inode_unlock(inode, 1);
+		brelse(di_bh);
 
 		tmp_ret = jbd2_journal_force_commit(journal);
 		if (tmp_ret < 0) {
@@ -2185,10 +2207,7 @@
 		if (ret)
 			goto out_commit;
 	}
-	/*
-	 * We don't want this to fail in ocfs2_write_end(), so do it
-	 * here.
-	 */
+
 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
 				      OCFS2_JOURNAL_ACCESS_WRITE);
 	if (ret) {
@@ -2345,7 +2364,7 @@
 			   loff_t pos, unsigned len, unsigned copied,
 			   struct page *page, void *fsdata)
 {
-	int i;
+	int i, ret;
 	unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
 	struct inode *inode = mapping->host;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -2354,6 +2373,14 @@
 	handle_t *handle = wc->w_handle;
 	struct page *tmppage;
 
+	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
+			OCFS2_JOURNAL_ACCESS_WRITE);
+	if (ret) {
+		copied = ret;
+		mlog_errno(ret);
+		goto out;
+	}
+
 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 		ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
 		goto out_write_size;
@@ -2409,6 +2436,7 @@
 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 	ocfs2_journal_dirty(handle, wc->w_di_bh);
 
+out:
 	/* unlock pages before dealloc since it needs acquiring j_trans_barrier
 	 * lock, or it will cause a deadlock since journal commit threads holds
 	 * this lock and will ask for the page lock when flushing the data.
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 1edcb14..fe50ded 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -316,6 +316,12 @@
 		bh = bhs[i];
 
 		if (!(flags & OCFS2_BH_READAHEAD)) {
+			if (status) {
+				/* Clear the rest of the buffers on error */
+				put_bh(bh);
+				bhs[i] = NULL;
+				continue;
+			}
 			/* We know this can't have changed as we hold the
 			 * owner sem. Avoid doing any work on the bh if the
 			 * journal has it. */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 140de3c..fa15deb 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -36,7 +36,7 @@
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/bitmap.h>
-
+#include <linux/ktime.h>
 #include "heartbeat.h"
 #include "tcp.h"
 #include "nodemanager.h"
@@ -1060,37 +1060,6 @@
 	return ret;
 }
 
-/* Subtract b from a, storing the result in a. a *must* have a larger
- * value than b. */
-static void o2hb_tv_subtract(struct timeval *a,
-			     struct timeval *b)
-{
-	/* just return 0 when a is after b */
-	if (a->tv_sec < b->tv_sec ||
-	    (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) {
-		a->tv_sec = 0;
-		a->tv_usec = 0;
-		return;
-	}
-
-	a->tv_sec -= b->tv_sec;
-	a->tv_usec -= b->tv_usec;
-	while ( a->tv_usec < 0 ) {
-		a->tv_sec--;
-		a->tv_usec += 1000000;
-	}
-}
-
-static unsigned int o2hb_elapsed_msecs(struct timeval *start,
-				       struct timeval *end)
-{
-	struct timeval res = *end;
-
-	o2hb_tv_subtract(&res, start);
-
-	return res.tv_sec * 1000 + res.tv_usec / 1000;
-}
-
 /*
  * we ride the region ref that the region dir holds.  before the region
  * dir is removed and drops it ref it will wait to tear down this
@@ -1101,7 +1070,7 @@
 	int i, ret;
 	struct o2hb_region *reg = data;
 	struct o2hb_bio_wait_ctxt write_wc;
-	struct timeval before_hb, after_hb;
+	ktime_t before_hb, after_hb;
 	unsigned int elapsed_msec;
 
 	mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
@@ -1118,18 +1087,18 @@
 		 * hr_timeout_ms between disk writes. On busy systems
 		 * this should result in a heartbeat which is less
 		 * likely to time itself out. */
-		do_gettimeofday(&before_hb);
+		before_hb = ktime_get_real();
 
 		ret = o2hb_do_disk_heartbeat(reg);
 
-		do_gettimeofday(&after_hb);
-		elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
+		after_hb = ktime_get_real();
+
+		elapsed_msec = (unsigned int)
+				ktime_ms_delta(after_hb, before_hb);
 
 		mlog(ML_HEARTBEAT,
-		     "start = %lu.%lu, end = %lu.%lu, msec = %u, ret = %d\n",
-		     before_hb.tv_sec, (unsigned long) before_hb.tv_usec,
-		     after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
-		     elapsed_msec, ret);
+		     "start = %lld, end = %lld, msec = %u, ret = %d\n",
+		     before_hb.tv64, after_hb.tv64, elapsed_msec, ret);
 
 		if (!kthread_should_stop() &&
 		    elapsed_msec < reg->hr_timeout_ms) {
@@ -1619,17 +1588,13 @@
 	struct o2hb_disk_slot *slot;
 
 	reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
-	if (reg->hr_tmp_block == NULL) {
-		mlog_errno(-ENOMEM);
+	if (reg->hr_tmp_block == NULL)
 		return -ENOMEM;
-	}
 
 	reg->hr_slots = kcalloc(reg->hr_blocks,
 				sizeof(struct o2hb_disk_slot), GFP_KERNEL);
-	if (reg->hr_slots == NULL) {
-		mlog_errno(-ENOMEM);
+	if (reg->hr_slots == NULL)
 		return -ENOMEM;
-	}
 
 	for(i = 0; i < reg->hr_blocks; i++) {
 		slot = &reg->hr_slots[i];
@@ -1645,17 +1610,13 @@
 
 	reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
 				    GFP_KERNEL);
-	if (!reg->hr_slot_data) {
-		mlog_errno(-ENOMEM);
+	if (!reg->hr_slot_data)
 		return -ENOMEM;
-	}
 
 	for(i = 0; i < reg->hr_num_pages; i++) {
 		page = alloc_page(GFP_KERNEL);
-		if (!page) {
-			mlog_errno(-ENOMEM);
+		if (!page)
 			return -ENOMEM;
-		}
 
 		reg->hr_slot_data[i] = page;
 
@@ -1687,10 +1648,8 @@
 	struct o2hb_disk_heartbeat_block *hb_block;
 
 	ret = o2hb_read_slots(reg, reg->hr_blocks);
-	if (ret) {
-		mlog_errno(ret);
+	if (ret)
 		goto out;
-	}
 
 	/* We only want to get an idea of the values initially in each
 	 * slot, so we do no verification - o2hb_check_slot will
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 02878a8..ffecf89 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -480,33 +480,26 @@
 
 	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
-		rc = -EINVAL;
-		ocfs2_error(dir->i_sb,
-			    "Invalid dirblock #%llu: "
-			    "signature = %.*s\n",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    trailer->db_signature);
+		rc = ocfs2_error(dir->i_sb,
+				 "Invalid dirblock #%llu: signature = %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 trailer->db_signature);
 		goto out;
 	}
 	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
-		rc = -EINVAL;
-		ocfs2_error(dir->i_sb,
-			    "Directory block #%llu has an invalid "
-			    "db_blkno of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(trailer->db_blkno));
+		rc = ocfs2_error(dir->i_sb,
+				 "Directory block #%llu has an invalid db_blkno of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 		goto out;
 	}
 	if (le64_to_cpu(trailer->db_parent_dinode) !=
 	    OCFS2_I(dir)->ip_blkno) {
-		rc = -EINVAL;
-		ocfs2_error(dir->i_sb,
-			    "Directory block #%llu on dinode "
-			    "#%llu has an invalid parent_dinode "
-			    "of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)OCFS2_I(dir)->ip_blkno,
-			    (unsigned long long)le64_to_cpu(trailer->db_blkno));
+		rc = ocfs2_error(dir->i_sb,
+				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
+				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 		goto out;
 	}
 out:
@@ -604,14 +597,13 @@
 	}
 
 	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
-		ocfs2_error(sb,
-			    "Dir Index Root # %llu has bad signature %.*s",
-			    (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
-			    7, dx_root->dr_signature);
-		return -EINVAL;
+		ret = ocfs2_error(sb,
+				  "Dir Index Root # %llu has bad signature %.*s\n",
+				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
+				  7, dx_root->dr_signature);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
@@ -648,12 +640,11 @@
 	}
 
 	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
-		ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s",
-			    7, dx_leaf->dl_signature);
-		return -EROFS;
+		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
+				  7, dx_leaf->dl_signature);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
@@ -812,11 +803,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "btree tree block %llu\n", inode->i_ino,
-				    (unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(inode->i_sb,
+					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
+					  inode->i_ino,
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -832,11 +822,11 @@
 	}
 
 	if (!found) {
-		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-			    "record (%u, %u, 0) in btree", inode->i_ino,
-			    le32_to_cpu(rec->e_cpos),
-			    ocfs2_rec_clusters(el, rec));
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb,
+				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
+				  inode->i_ino,
+				  le32_to_cpu(rec->e_cpos),
+				  ocfs2_rec_clusters(el, rec));
 		goto out;
 	}
 
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7df88a6..6918f30 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1465,39 +1465,46 @@
 	if (status == -ENOPROTOOPT) {
 		status = 0;
 		*response = JOIN_OK_NO_MAP;
-	} else if (packet.code == JOIN_DISALLOW ||
-		   packet.code == JOIN_OK_NO_MAP) {
-		*response = packet.code;
-	} else if (packet.code == JOIN_PROTOCOL_MISMATCH) {
-		mlog(ML_NOTICE,
-		     "This node requested DLM locking protocol %u.%u and "
-		     "filesystem locking protocol %u.%u.  At least one of "
-		     "the protocol versions on node %d is not compatible, "
-		     "disconnecting\n",
-		     dlm->dlm_locking_proto.pv_major,
-		     dlm->dlm_locking_proto.pv_minor,
-		     dlm->fs_locking_proto.pv_major,
-		     dlm->fs_locking_proto.pv_minor,
-		     node);
-		status = -EPROTO;
-		*response = packet.code;
-	} else if (packet.code == JOIN_OK) {
-		*response = packet.code;
-		/* Use the same locking protocol as the remote node */
-		dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
-		dlm->fs_locking_proto.pv_minor = packet.fs_minor;
-		mlog(0,
-		     "Node %d responds JOIN_OK with DLM locking protocol "
-		     "%u.%u and fs locking protocol %u.%u\n",
-		     node,
-		     dlm->dlm_locking_proto.pv_major,
-		     dlm->dlm_locking_proto.pv_minor,
-		     dlm->fs_locking_proto.pv_major,
-		     dlm->fs_locking_proto.pv_minor);
 	} else {
-		status = -EINVAL;
-		mlog(ML_ERROR, "invalid response %d from node %u\n",
-		     packet.code, node);
+		*response = packet.code;
+		switch (packet.code) {
+		case JOIN_DISALLOW:
+		case JOIN_OK_NO_MAP:
+			break;
+		case JOIN_PROTOCOL_MISMATCH:
+			mlog(ML_NOTICE,
+			     "This node requested DLM locking protocol %u.%u and "
+			     "filesystem locking protocol %u.%u.  At least one of "
+			     "the protocol versions on node %d is not compatible, "
+			     "disconnecting\n",
+			     dlm->dlm_locking_proto.pv_major,
+			     dlm->dlm_locking_proto.pv_minor,
+			     dlm->fs_locking_proto.pv_major,
+			     dlm->fs_locking_proto.pv_minor,
+			     node);
+			status = -EPROTO;
+			break;
+		case JOIN_OK:
+			/* Use the same locking protocol as the remote node */
+			dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
+			dlm->fs_locking_proto.pv_minor = packet.fs_minor;
+			mlog(0,
+			     "Node %d responds JOIN_OK with DLM locking protocol "
+			     "%u.%u and fs locking protocol %u.%u\n",
+			     node,
+			     dlm->dlm_locking_proto.pv_major,
+			     dlm->dlm_locking_proto.pv_minor,
+			     dlm->fs_locking_proto.pv_major,
+			     dlm->fs_locking_proto.pv_minor);
+			break;
+		default:
+			status = -EINVAL;
+			mlog(ML_ERROR, "invalid response %d from node %u\n",
+			     packet.code, node);
+			/* Reset response to JOIN_DISALLOW */
+			*response = JOIN_DISALLOW;
+			break;
+		}
 	}
 
 	mlog(0, "status %d, node %d response is %d\n", status, node,
@@ -1725,12 +1732,13 @@
 
 	o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
 			    dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
+	o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
+			    dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
+
 	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
 	if (status)
 		goto bail;
 
-	o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
-			    dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
 	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
 	if (status)
 		goto bail;
@@ -1845,8 +1853,6 @@
 					sizeof(struct dlm_exit_domain),
 					dlm_begin_exit_domain_handler,
 					dlm, NULL, &dlm->dlm_domain_handlers);
-	if (status)
-		goto bail;
 
 bail:
 	if (status)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index fdf4b41..46b8b2b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -498,16 +498,6 @@
 	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
 	     res->lockname.name);
 
-	spin_lock(&dlm->track_lock);
-	if (!list_empty(&res->tracking))
-		list_del_init(&res->tracking);
-	else {
-		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
-		     res->lockname.len, res->lockname.name);
-		dlm_print_one_lock_resource(res);
-	}
-	spin_unlock(&dlm->track_lock);
-
 	atomic_dec(&dlm->res_cur_count);
 
 	if (!hlist_unhashed(&res->hash_node) ||
@@ -795,8 +785,18 @@
 		dlm_lockres_grab_inflight_ref(dlm, tmpres);
 
 		spin_unlock(&tmpres->spinlock);
-		if (res)
+		if (res) {
+			spin_lock(&dlm->track_lock);
+			if (!list_empty(&res->tracking))
+				list_del_init(&res->tracking);
+			else
+				mlog(ML_ERROR, "Resource %.*s not "
+						"on the Tracking list\n",
+						res->lockname.len,
+						res->lockname.name);
+			spin_unlock(&dlm->track_lock);
 			dlm_lockres_put(res);
+		}
 		res = tmpres;
 		goto leave;
 	}
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index ce12e0b..d0e436d 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1776,7 +1776,7 @@
 				     struct dlm_migratable_lockres *mres)
 {
 	struct dlm_migratable_lock *ml;
-	struct list_head *queue, *iter;
+	struct list_head *queue;
 	struct list_head *tmpq = NULL;
 	struct dlm_lock *newlock = NULL;
 	struct dlm_lockstatus *lksb = NULL;
@@ -1821,9 +1821,7 @@
 			spin_lock(&res->spinlock);
 			for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
 				tmpq = dlm_list_idx_to_ptr(res, j);
-				list_for_each(iter, tmpq) {
-					lock = list_entry(iter,
-						  struct dlm_lock, list);
+				list_for_each_entry(lock, tmpq, list) {
 					if (lock->ml.cookie == ml->cookie)
 						break;
 					lock = NULL;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 69aac6f..2e5e6d5 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -211,6 +211,16 @@
 
 	__dlm_unhash_lockres(dlm, res);
 
+	spin_lock(&dlm->track_lock);
+	if (!list_empty(&res->tracking))
+		list_del_init(&res->tracking);
+	else {
+		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
+				res->lockname.len, res->lockname.name);
+		__dlm_print_one_lock_resource(res);
+	}
+	spin_unlock(&dlm->track_lock);
+
 	/* lockres is not in the hash now.  drop the flag and wake up
 	 * any processes waiting in dlm_get_lock_resource. */
 	if (!master) {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 23157e4..1c91103 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3035,8 +3035,6 @@
 	ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
 
 	osb->cconn = conn;
-
-	status = 0;
 bail:
 	if (status < 0) {
 		ocfs2_dlm_shutdown_debug(osb);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 767370b..e4719e0 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -305,8 +305,8 @@
 
 	if (el->l_tree_depth) {
 		ocfs2_error(inode->i_sb,
-			    "Inode %lu has non zero tree depth in "
-			    "leaf block %llu\n", inode->i_ino,
+			    "Inode %lu has non zero tree depth in leaf block %llu\n",
+			    inode->i_ino,
 			    (unsigned long long)eb_bh->b_blocknr);
 		ret = -EROFS;
 		goto out;
@@ -441,8 +441,8 @@
 
 		if (el->l_tree_depth) {
 			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "leaf block %llu\n", inode->i_ino,
+				    "Inode %lu has non zero tree depth in leaf block %llu\n",
+				    inode->i_ino,
 				    (unsigned long long)eb_bh->b_blocknr);
 			ret = -EROFS;
 			goto out;
@@ -475,8 +475,9 @@
 	BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
 
 	if (!rec->e_blkno) {
-		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-			    "record (%u, %u, 0)", inode->i_ino,
+		ocfs2_error(inode->i_sb,
+			    "Inode %lu has bad extent record (%u, %u, 0)\n",
+			    inode->i_ino,
 			    le32_to_cpu(rec->e_cpos),
 			    ocfs2_rec_clusters(el, rec));
 		ret = -EROFS;
@@ -564,8 +565,8 @@
 
 		if (el->l_tree_depth) {
 			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "xattr leaf block %llu\n", inode->i_ino,
+				    "Inode %lu has non zero tree depth in xattr leaf block %llu\n",
+				    inode->i_ino,
 				    (unsigned long long)eb_bh->b_blocknr);
 			ret = -EROFS;
 			goto out;
@@ -582,8 +583,9 @@
 		BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
 
 		if (!rec->e_blkno) {
-			ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-				    "record (%u, %u, 0) in xattr", inode->i_ino,
+			ocfs2_error(inode->i_sb,
+				    "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
+				    inode->i_ino,
 				    le32_to_cpu(rec->e_cpos),
 				    ocfs2_rec_clusters(el, rec));
 			ret = -EROFS;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 719f7f4..0e5b451 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -105,8 +105,11 @@
 			      file->f_path.dentry->d_name.len,
 			      file->f_path.dentry->d_name.name, mode);
 
-	if (file->f_mode & FMODE_WRITE)
-		dquot_initialize(inode);
+	if (file->f_mode & FMODE_WRITE) {
+		status = dquot_initialize(inode);
+		if (status)
+			goto leave;
+	}
 
 	spin_lock(&oi->ip_lock);
 
@@ -1127,6 +1130,7 @@
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	int status = 0, size_change;
+	int inode_locked = 0;
 	struct inode *inode = d_inode(dentry);
 	struct super_block *sb = inode->i_sb;
 	struct ocfs2_super *osb = OCFS2_SB(sb);
@@ -1155,8 +1159,11 @@
 	if (status)
 		return status;
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		status = dquot_initialize(inode);
+		if (status)
+			return status;
+	}
 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
 	if (size_change) {
 		status = ocfs2_rw_lock(inode, 1);
@@ -1172,6 +1179,7 @@
 			mlog_errno(status);
 		goto bail_unlock_rw;
 	}
+	inode_locked = 1;
 
 	if (size_change) {
 		status = inode_newsize_ok(inode, attr->ia_size);
@@ -1209,8 +1217,8 @@
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
 			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
-			if (!transfer_to[USRQUOTA]) {
-				status = -ESRCH;
+			if (IS_ERR(transfer_to[USRQUOTA])) {
+				status = PTR_ERR(transfer_to[USRQUOTA]);
 				goto bail_unlock;
 			}
 		}
@@ -1218,8 +1226,8 @@
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
 			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
-			if (!transfer_to[GRPQUOTA]) {
-				status = -ESRCH;
+			if (IS_ERR(transfer_to[GRPQUOTA])) {
+				status = PTR_ERR(transfer_to[GRPQUOTA]);
 				goto bail_unlock;
 			}
 		}
@@ -1252,7 +1260,10 @@
 bail_commit:
 	ocfs2_commit_trans(osb, handle);
 bail_unlock:
-	ocfs2_inode_unlock(inode, 1);
+	if (status) {
+		ocfs2_inode_unlock(inode, 1);
+		inode_locked = 0;
+	}
 bail_unlock_rw:
 	if (size_change)
 		ocfs2_rw_unlock(inode, 1);
@@ -1268,6 +1279,8 @@
 		if (status < 0)
 			mlog_errno(status);
 	}
+	if (inode_locked)
+		ocfs2_inode_unlock(inode, 1);
 
 	return status;
 }
@@ -2256,8 +2269,6 @@
 	ssize_t written = 0;
 	ssize_t ret;
 	size_t count = iov_iter_count(from), orig_count;
-	loff_t old_size;
-	u32 old_clusters;
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -2265,6 +2276,8 @@
 			       OCFS2_MOUNT_COHERENCY_BUFFERED);
 	int unaligned_dio = 0;
 	int dropped_dio = 0;
+	int append_write = ((iocb->ki_pos + count) >=
+			i_size_read(inode) ? 1 : 0);
 
 	trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2284,8 +2297,9 @@
 	/*
 	 * Concurrent O_DIRECT writes are allowed with
 	 * mount_option "coherency=buffered".
+	 * For append write, we must take rw EX.
 	 */
-	rw_level = (!direct_io || full_coherency);
+	rw_level = (!direct_io || full_coherency || append_write);
 
 	ret = ocfs2_rw_lock(inode, rw_level);
 	if (ret < 0) {
@@ -2358,13 +2372,6 @@
 		ocfs2_iocb_set_unaligned_aio(iocb);
 	}
 
-	/*
-	 * To later detect whether a journal commit for sync writes is
-	 * necessary, we sample i_size, and cluster count here.
-	 */
-	old_size = i_size_read(inode);
-	old_clusters = OCFS2_I(inode)->ip_clusters;
-
 	/* communicate with ocfs2_dio_end_io */
 	ocfs2_iocb_set_rw_locked(iocb, rw_level);
 
@@ -2372,6 +2379,20 @@
 	/* buffered aio wouldn't have proper lock coverage today */
 	BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
 
+	/*
+	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
+	 * function pointer which is called when o_direct io completes so that
+	 * it can unlock our rw lock.
+	 * Unfortunately there are error cases which call end_io and others
+	 * that don't.  so we don't have to unlock the rw_lock if either an
+	 * async dio is going to do it in the future or an end_io after an
+	 * error has already done it.
+	 */
+	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
+		rw_level = -1;
+		unaligned_dio = 0;
+	}
+
 	if (unlikely(written <= 0))
 		goto no_sync;
 
@@ -2396,21 +2417,7 @@
 	}
 
 no_sync:
-	/*
-	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
-	 * function pointer which is called when o_direct io completes so that
-	 * it can unlock our rw lock.
-	 * Unfortunately there are error cases which call end_io and others
-	 * that don't.  so we don't have to unlock the rw_lock if either an
-	 * async dio is going to do it in the future or an end_io after an
-	 * error has already done it.
-	 */
-	if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
-		rw_level = -1;
-		unaligned_dio = 0;
-	}
-
-	if (unaligned_dio) {
+	if (unaligned_dio && ocfs2_iocb_is_unaligned_aio(iocb)) {
 		ocfs2_iocb_clear_unaligned_aio(iocb);
 		mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
 	}
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index b254416..8f87e05 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -971,6 +971,7 @@
 	int wipe, status;
 	sigset_t oldset;
 	struct buffer_head *di_bh = NULL;
+	struct ocfs2_dinode *di = NULL;
 
 	trace_ocfs2_delete_inode(inode->i_ino,
 				 (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -1025,6 +1026,14 @@
 		goto bail_unlock_nfs_sync;
 	}
 
+	di = (struct ocfs2_dinode *)di_bh->b_data;
+	/* Skip inode deletion and wait for dio orphan entry recovered
+	 * first */
+	if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
+		ocfs2_cleanup_delete_inode(inode, 0);
+		goto bail_unlock_inode;
+	}
+
 	/* Query the cluster. This will be the final decision made
 	 * before we go ahead and wipe the inode. */
 	status = ocfs2_query_inode_wipe(inode, di_bh, &wipe);
@@ -1191,17 +1200,19 @@
 int ocfs2_drop_inode(struct inode *inode)
 {
 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
-	int res;
 
 	trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
 				inode->i_nlink, oi->ip_flags);
 
-	if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
-		res = 1;
-	else
-		res = generic_drop_inode(inode);
+	assert_spin_locked(&inode->i_lock);
+	inode->i_state |= I_WILL_FREE;
+	spin_unlock(&inode->i_lock);
+	write_inode_now(inode, 1);
+	spin_lock(&inode->i_lock);
+	WARN_ON(inode->i_state & I_NEW);
+	inode->i_state &= ~I_WILL_FREE;
 
-	return res;
+	return 1;
 }
 
 /*
@@ -1350,32 +1361,32 @@
 	rc = -EINVAL;
 
 	if (!OCFS2_IS_VALID_DINODE(di)) {
-		ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    di->i_signature);
+		rc = ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 di->i_signature);
 		goto bail;
 	}
 
 	if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(di->i_blkno));
+		rc = ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(di->i_blkno));
 		goto bail;
 	}
 
 	if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
-		ocfs2_error(sb,
-			    "Invalid dinode #%llu: OCFS2_VALID_FL not set\n",
-			    (unsigned long long)bh->b_blocknr);
+		rc = ocfs2_error(sb,
+				 "Invalid dinode #%llu: OCFS2_VALID_FL not set\n",
+				 (unsigned long long)bh->b_blocknr);
 		goto bail;
 	}
 
 	if (le32_to_cpu(di->i_fs_generation) !=
 	    OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Invalid dinode #%llu: fs_generation is %u\n",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(di->i_fs_generation));
+		rc = ocfs2_error(sb,
+				 "Invalid dinode #%llu: fs_generation is %u\n",
+				 (unsigned long long)bh->b_blocknr,
+				 le32_to_cpu(di->i_fs_generation));
 		goto bail;
 	}
 
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 5e86b24..ca3431e 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -81,8 +81,6 @@
 	tid_t i_sync_tid;
 	tid_t i_datasync_tid;
 
-	wait_queue_head_t append_dio_wq;
-
 	struct dquot *i_dquot[MAXQUOTAS];
 };
 
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 7c099f7..ff82b28 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -374,7 +374,7 @@
 		mlog_errno(PTR_ERR(handle));
 
 		if (is_journal_aborted(journal)) {
-			ocfs2_abort(osb->sb, "Detected aborted journal");
+			ocfs2_abort(osb->sb, "Detected aborted journal\n");
 			handle = ERR_PTR(-EROFS);
 		}
 	} else {
@@ -668,7 +668,23 @@
 		mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
 		mlog(ML_ERROR, "b_blocknr=%llu\n",
 		     (unsigned long long)bh->b_blocknr);
-		BUG();
+
+		lock_buffer(bh);
+		/*
+		 * A previous attempt to write this buffer head failed.
+		 * Nothing we can do but to retry the write and hope for
+		 * the best.
+		 */
+		if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
+			clear_buffer_write_io_error(bh);
+			set_buffer_uptodate(bh);
+		}
+
+		if (!buffer_uptodate(bh)) {
+			unlock_buffer(bh);
+			return -EIO;
+		}
+		unlock_buffer(bh);
 	}
 
 	/* Set the current transaction information on the ci so
@@ -2170,6 +2186,7 @@
 		iter = oi->ip_next_orphan;
 		oi->ip_next_orphan = NULL;
 
+		mutex_lock(&inode->i_mutex);
 		ret = ocfs2_rw_lock(inode, 1);
 		if (ret < 0) {
 			mlog_errno(ret);
@@ -2193,7 +2210,9 @@
 			 * ocfs2_delete_inode. */
 			oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
 			spin_unlock(&oi->ip_lock);
-		} else if ((orphan_reco_type == ORPHAN_NEED_TRUNCATE) &&
+		}
+
+		if ((orphan_reco_type == ORPHAN_NEED_TRUNCATE) &&
 				(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
 			ret = ocfs2_truncate_file(inode, di_bh,
 					i_size_read(inode));
@@ -2206,17 +2225,16 @@
 			ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
 			if (ret)
 				mlog_errno(ret);
-
-			wake_up(&OCFS2_I(inode)->append_dio_wq);
 		} /* else if ORPHAN_NO_NEED_TRUNCATE, do nothing */
 unlock_inode:
 		ocfs2_inode_unlock(inode, 1);
+		brelse(di_bh);
+		di_bh = NULL;
 unlock_rw:
 		ocfs2_rw_unlock(inode, 1);
 next:
+		mutex_unlock(&inode->i_mutex);
 		iput(inode);
-		brelse(di_bh);
-		di_bh = NULL;
 		inode = iter;
 	}
 
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 857bbbc..0a4457f 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -665,8 +665,7 @@
 #ifdef CONFIG_OCFS2_DEBUG_FS
 	if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
 	    ocfs2_local_alloc_count_bits(alloc)) {
-		ocfs2_error(osb->sb, "local alloc inode %llu says it has "
-			    "%u used bits, but a count shows %u",
+		ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
 			    (unsigned long long)le64_to_cpu(alloc->i_blkno),
 			    le32_to_cpu(alloc->id1.bitmap1.i_used),
 			    ocfs2_local_alloc_count_bits(alloc));
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 56a768d..124471d 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -99,11 +99,9 @@
 
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
-		ocfs2_error(inode->i_sb,
-			    "Inode %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
-			    (unsigned long long)ino, cpos);
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb,
+				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
+				  (unsigned long long)ino, cpos);
 		goto out;
 	}
 
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 6e6abb9..b7dfac2 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -200,11 +200,12 @@
 static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
 {
 	struct inode *inode;
+	int status;
 
 	inode = new_inode(dir->i_sb);
 	if (!inode) {
 		mlog(ML_ERROR, "new_inode failed!\n");
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	/* populate as many fields early on as possible - many of
@@ -213,7 +214,10 @@
 	if (S_ISDIR(mode))
 		set_nlink(inode, 2);
 	inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
+	status = dquot_initialize(inode);
+	if (status)
+		return ERR_PTR(status);
+
 	return inode;
 }
 
@@ -264,7 +268,11 @@
 			  (unsigned long long)OCFS2_I(dir)->ip_blkno,
 			  (unsigned long)dev, mode);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		return status;
+	}
 
 	/* get our super block */
 	osb = OCFS2_SB(dir->i_sb);
@@ -311,8 +319,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, mode);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto leave;
 	}
@@ -708,7 +717,11 @@
 	if (S_ISDIR(inode->i_mode))
 		return -EPERM;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err) {
+		mlog_errno(err);
+		return err;
+	}
 
 	err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
 			&parent_fe_bh, dir, 0);
@@ -896,7 +909,11 @@
 			   (unsigned long long)OCFS2_I(dir)->ip_blkno,
 			   (unsigned long long)OCFS2_I(inode)->ip_blkno);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		return status;
+	}
 
 	BUG_ON(d_inode(dentry->d_parent) != dir);
 
@@ -1018,11 +1035,6 @@
 	if (handle)
 		ocfs2_commit_trans(osb, handle);
 
-	if (child_locked)
-		ocfs2_inode_unlock(inode, 1);
-
-	ocfs2_inode_unlock(dir, 1);
-
 	if (orphan_dir) {
 		/* This was locked for us in ocfs2_prepare_orphan_dir() */
 		ocfs2_inode_unlock(orphan_dir, 1);
@@ -1030,6 +1042,11 @@
 		iput(orphan_dir);
 	}
 
+	if (child_locked)
+		ocfs2_inode_unlock(inode, 1);
+
+	ocfs2_inode_unlock(dir, 1);
+
 	brelse(fe_bh);
 	brelse(parent_node_bh);
 
@@ -1230,8 +1247,16 @@
 			   old_dentry->d_name.len, old_dentry->d_name.name,
 			   new_dentry->d_name.len, new_dentry->d_name.name);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	status = dquot_initialize(old_dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
+	status = dquot_initialize(new_dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
 
 	osb = OCFS2_SB(old_dir->i_sb);
 
@@ -1284,6 +1309,11 @@
 	}
 	parents_locked = 1;
 
+	if (!new_dir->i_nlink) {
+		status = -EACCES;
+		goto bail;
+	}
+
 	/* make sure both dirs have bhs
 	 * get an extra ref on old_dir_bh if old==new */
 	if (!new_dir_bh) {
@@ -1544,12 +1574,25 @@
 	status = ocfs2_find_entry(old_dentry->d_name.name,
 				  old_dentry->d_name.len, old_dir,
 				  &old_entry_lookup);
-	if (status)
+	if (status) {
+		if (!is_journal_aborted(osb->journal->j_journal)) {
+			ocfs2_error(osb->sb, "new entry %.*s is added, but old entry %.*s "
+					"is not deleted.",
+					new_dentry->d_name.len, new_dentry->d_name.name,
+					old_dentry->d_name.len, old_dentry->d_name.name);
+		}
 		goto bail;
+	}
 
 	status = ocfs2_delete_entry(handle, old_dir, &old_entry_lookup);
 	if (status < 0) {
 		mlog_errno(status);
+		if (!is_journal_aborted(osb->journal->j_journal)) {
+			ocfs2_error(osb->sb, "new entry %.*s is added, but old entry %.*s "
+					"is not deleted.",
+					new_dentry->d_name.len, new_dentry->d_name.name,
+					old_dentry->d_name.len, old_dentry->d_name.name);
+		}
 		goto bail;
 	}
 
@@ -1608,21 +1651,9 @@
 	ocfs2_dentry_move(old_dentry, new_dentry, old_dir, new_dir);
 	status = 0;
 bail:
-	if (rename_lock)
-		ocfs2_rename_unlock(osb);
-
 	if (handle)
 		ocfs2_commit_trans(osb, handle);
 
-	if (parents_locked)
-		ocfs2_double_unlock(old_dir, new_dir);
-
-	if (old_child_locked)
-		ocfs2_inode_unlock(old_inode, 1);
-
-	if (new_child_locked)
-		ocfs2_inode_unlock(new_inode, 1);
-
 	if (orphan_dir) {
 		/* This was locked for us in ocfs2_prepare_orphan_dir() */
 		ocfs2_inode_unlock(orphan_dir, 1);
@@ -1630,6 +1661,18 @@
 		iput(orphan_dir);
 	}
 
+	if (new_child_locked)
+		ocfs2_inode_unlock(new_inode, 1);
+
+	if (old_child_locked)
+		ocfs2_inode_unlock(old_inode, 1);
+
+	if (parents_locked)
+		ocfs2_double_unlock(old_dir, new_dir);
+
+	if (rename_lock)
+		ocfs2_rename_unlock(osb);
+
 	if (new_inode)
 		sync_mapping_buffers(old_inode->i_mapping);
 
@@ -1786,7 +1829,11 @@
 	trace_ocfs2_symlink_begin(dir, dentry, symname,
 				  dentry->d_name.len, dentry->d_name.name);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
 
 	sb = dir->i_sb;
 	osb = OCFS2_SB(sb);
@@ -1831,8 +1878,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, S_IFLNK | S_IRWXUGO);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto bail;
 	}
@@ -2485,8 +2533,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, mode);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto leave;
 	}
@@ -2570,27 +2619,6 @@
 	return status;
 }
 
-static int ocfs2_dio_orphan_recovered(struct inode *inode)
-{
-	int ret;
-	struct buffer_head *di_bh = NULL;
-	struct ocfs2_dinode *di = NULL;
-
-	ret = ocfs2_inode_lock(inode, &di_bh, 1);
-	if (ret < 0) {
-		mlog_errno(ret);
-		return 0;
-	}
-
-	di = (struct ocfs2_dinode *) di_bh->b_data;
-	ret = !(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL));
-	ocfs2_inode_unlock(inode, 1);
-	brelse(di_bh);
-
-	return ret;
-}
-
-#define OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL 10000
 int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb,
 	struct inode *inode)
 {
@@ -2602,7 +2630,6 @@
 	handle_t *handle = NULL;
 	struct ocfs2_dinode *di = NULL;
 
-restart:
 	status = ocfs2_inode_lock(inode, &di_bh, 1);
 	if (status < 0) {
 		mlog_errno(status);
@@ -2612,15 +2639,21 @@
 	di = (struct ocfs2_dinode *) di_bh->b_data;
 	/*
 	 * Another append dio crashed?
-	 * If so, wait for recovery first.
+	 * If so, manually recover it first.
 	 */
 	if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
-		ocfs2_inode_unlock(inode, 1);
-		brelse(di_bh);
-		wait_event_interruptible_timeout(OCFS2_I(inode)->append_dio_wq,
-				ocfs2_dio_orphan_recovered(inode),
-				msecs_to_jiffies(OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL));
-		goto restart;
+		status = ocfs2_truncate_file(inode, di_bh, i_size_read(inode));
+		if (status < 0) {
+			if (status != -ENOSPC)
+				mlog_errno(status);
+			goto bail_unlock_inode;
+		}
+
+		status = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
+		if (status < 0) {
+			mlog_errno(status);
+			goto bail_unlock_inode;
+		}
 	}
 
 	status = ocfs2_prepare_orphan_dir(osb, &orphan_dir_inode,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 690ddc6..7a01262 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -286,6 +286,8 @@
 	OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
 
 	OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15,  /* Journal Async Commit */
+	OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
+	OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
 };
 
 #define OCFS2_OSB_SOFT_RO	0x0001
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 3d0b63d..8a54fd8 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -138,8 +138,7 @@
 
 	if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
 		ocfs2_error(inode->i_sb,
-			    "Quota file %llu is probably corrupted! Requested "
-			    "to read block %Lu but file has size only %Lu\n",
+			    "Quota file %llu is probably corrupted! Requested to read block %Lu but file has size only %Lu\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
 			    (unsigned long long)v_block,
 			    (unsigned long long)i_size_read(inode));
@@ -499,8 +498,8 @@
 			dquot = dqget(sb,
 				      make_kqid(&init_user_ns, type,
 						le64_to_cpu(dqblk->dqb_id)));
-			if (!dquot) {
-				status = -EIO;
+			if (IS_ERR(dquot)) {
+				status = PTR_ERR(dquot);
 				mlog(ML_ERROR, "Failed to get quota structure "
 				     "for id %u, type %d. Cannot finish quota "
 				     "file recovery.\n",
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b69dd14..e5d57cd 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -102,32 +102,30 @@
 
 
 	if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
-		ocfs2_error(sb,
-			    "Refcount block #%llu has bad signature %.*s",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    rb->rf_signature);
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Refcount block #%llu has bad signature %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 rb->rf_signature);
+		goto out;
 	}
 
 	if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb,
-			    "Refcount block #%llu has an invalid rf_blkno "
-			    "of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(rb->rf_blkno));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Refcount block #%llu has an invalid rf_blkno of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(rb->rf_blkno));
+		goto out;
 	}
 
 	if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Refcount block #%llu has an invalid "
-			    "rf_fs_generation of #%u",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(rb->rf_fs_generation));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n",
+				 (unsigned long long)bh->b_blocknr,
+				 le32_to_cpu(rb->rf_fs_generation));
+		goto out;
 	}
-
-	return 0;
+out:
+	return rc;
 }
 
 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
@@ -1102,12 +1100,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(sb,
-			"refcount tree %llu has non zero tree "
-			"depth in leaf btree tree block %llu\n",
-			(unsigned long long)ocfs2_metadata_cache_owner(ci),
-			(unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(sb,
+					  "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n",
+					  (unsigned long long)ocfs2_metadata_cache_owner(ci),
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -2359,10 +2355,8 @@
 					   cpos, len, phys);
 
 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
-			    "tree, but the feature bit is not set in the "
-			    "super block.", inode->i_ino);
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
+				  inode->i_ino);
 		goto out;
 	}
 
@@ -2545,10 +2539,8 @@
 	u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
 
 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
-			    "tree, but the feature bit is not set in the "
-			    "super block.", inode->i_ino);
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
+				  inode->i_ino);
 		goto out;
 	}
 
@@ -2672,11 +2664,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "leaf block %llu\n", inode->i_ino,
-				    (unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(inode->i_sb,
+					  "Inode %lu has non zero tree depth in leaf block %llu\n",
+					  inode->i_ino,
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -3106,11 +3097,9 @@
 
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
-		ocfs2_error(sb,
-			    "Inode %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
-			    (unsigned long long)ino, cpos);
-		ret = -EROFS;
+		ret = ocfs2_error(sb,
+				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
+				  (unsigned long long)ino, cpos);
 		goto out;
 	}
 
@@ -3376,10 +3365,8 @@
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
-			    "tree, but the feature bit is not set in the "
-			    "super block.", inode->i_ino);
-		return -EROFS;
+		return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
+				   inode->i_ino);
 	}
 
 	ocfs2_init_dealloc_ctxt(&context->dealloc);
@@ -4419,8 +4406,9 @@
 	}
 
 	mutex_lock(&inode->i_mutex);
-	dquot_initialize(dir);
-	error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
+	error = dquot_initialize(dir);
+	if (!error)
+		error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
 	mutex_unlock(&inode->i_mutex);
 	if (!error)
 		fsnotify_create(dir, new_dentry);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 4479029..d83d260 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -149,10 +149,8 @@
 	brelse(ac->ac_bh);
 	ac->ac_bh = NULL;
 	ac->ac_resv = NULL;
-	if (ac->ac_find_loc_priv) {
-		kfree(ac->ac_find_loc_priv);
-		ac->ac_find_loc_priv = NULL;
-	}
+	kfree(ac->ac_find_loc_priv);
+	ac->ac_find_loc_priv = NULL;
 }
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -167,12 +165,12 @@
 }
 
 #define do_error(fmt, ...)						\
-	do{								\
-		if (resize)					\
-			mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__);	\
-		else							\
-			ocfs2_error(sb, fmt, ##__VA_ARGS__);		\
-	} while (0)
+do {									\
+	if (resize)							\
+		mlog(ML_ERROR, fmt, ##__VA_ARGS__);			\
+	else								\
+		return ocfs2_error(sb, fmt, ##__VA_ARGS__);		\
+} while (0)
 
 static int ocfs2_validate_gd_self(struct super_block *sb,
 				  struct buffer_head *bh,
@@ -181,44 +179,35 @@
 	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 
 	if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
-		do_error("Group descriptor #%llu has bad signature %.*s",
+		do_error("Group descriptor #%llu has bad signature %.*s\n",
 			 (unsigned long long)bh->b_blocknr, 7,
 			 gd->bg_signature);
-		return -EINVAL;
 	}
 
 	if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) {
-		do_error("Group descriptor #%llu has an invalid bg_blkno "
-			 "of %llu",
+		do_error("Group descriptor #%llu has an invalid bg_blkno of %llu\n",
 			 (unsigned long long)bh->b_blocknr,
 			 (unsigned long long)le64_to_cpu(gd->bg_blkno));
-		return -EINVAL;
 	}
 
 	if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) {
-		do_error("Group descriptor #%llu has an invalid "
-			 "fs_generation of #%u",
+		do_error("Group descriptor #%llu has an invalid fs_generation of #%u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le32_to_cpu(gd->bg_generation));
-		return -EINVAL;
 	}
 
 	if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
-		do_error("Group descriptor #%llu has bit count %u but "
-			 "claims that %u are free",
+		do_error("Group descriptor #%llu has bit count %u but claims that %u are free\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_bits),
 			 le16_to_cpu(gd->bg_free_bits_count));
-		return -EINVAL;
 	}
 
 	if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
-		do_error("Group descriptor #%llu has bit count %u but "
-			 "max bitmap bits of %u",
+		do_error("Group descriptor #%llu has bit count %u but max bitmap bits of %u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_bits),
 			 8 * le16_to_cpu(gd->bg_size));
-		return -EINVAL;
 	}
 
 	return 0;
@@ -233,20 +222,17 @@
 	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 
 	if (di->i_blkno != gd->bg_parent_dinode) {
-		do_error("Group descriptor #%llu has bad parent "
-			 "pointer (%llu, expected %llu)",
+		do_error("Group descriptor #%llu has bad parent pointer (%llu, expected %llu)\n",
 			 (unsigned long long)bh->b_blocknr,
 			 (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
 			 (unsigned long long)le64_to_cpu(di->i_blkno));
-		return -EINVAL;
 	}
 
 	max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
 	if (le16_to_cpu(gd->bg_bits) > max_bits) {
-		do_error("Group descriptor #%llu has bit count of %u",
+		do_error("Group descriptor #%llu has bit count of %u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_bits));
-		return -EINVAL;
 	}
 
 	/* In resize, we may meet the case bg_chain == cl_next_free_rec. */
@@ -254,10 +240,9 @@
 	     le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
 	    ((le16_to_cpu(gd->bg_chain) ==
 	     le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
-		do_error("Group descriptor #%llu has bad chain %u",
+		do_error("Group descriptor #%llu has bad chain %u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_chain));
-		return -EINVAL;
 	}
 
 	return 0;
@@ -384,11 +369,10 @@
 	struct super_block * sb = alloc_inode->i_sb;
 
 	if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
-		ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
-			    "b_blocknr (%llu)",
-			    (unsigned long long)group_blkno,
-			    (unsigned long long) bg_bh->b_blocknr);
-		status = -EIO;
+		status = ocfs2_error(alloc_inode->i_sb,
+				     "group block (%llu) != b_blocknr (%llu)\n",
+				     (unsigned long long)group_blkno,
+				     (unsigned long long) bg_bh->b_blocknr);
 		goto bail;
 	}
 
@@ -834,9 +818,9 @@
 	BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
 
 	if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
-		ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
-			    (unsigned long long)le64_to_cpu(fe->i_blkno));
-		status = -EIO;
+		status = ocfs2_error(alloc_inode->i_sb,
+				     "Invalid chain allocator %llu\n",
+				     (unsigned long long)le64_to_cpu(fe->i_blkno));
 		goto bail;
 	}
 
@@ -1370,12 +1354,11 @@
 
 	le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
 	if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
-		ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
-			    " count %u but claims %u are freed. num_bits %d",
-			    (unsigned long long)le64_to_cpu(bg->bg_blkno),
-			    le16_to_cpu(bg->bg_bits),
-			    le16_to_cpu(bg->bg_free_bits_count), num_bits);
-		return -EROFS;
+		return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
+				   (unsigned long long)le64_to_cpu(bg->bg_blkno),
+				   le16_to_cpu(bg->bg_bits),
+				   le16_to_cpu(bg->bg_free_bits_count),
+				   num_bits);
 	}
 	while(num_bits--)
 		ocfs2_set_bit(bit_off++, bitmap);
@@ -1905,13 +1888,11 @@
 
 	if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
 	    le32_to_cpu(fe->id1.bitmap1.i_total)) {
-		ocfs2_error(ac->ac_inode->i_sb,
-			    "Chain allocator dinode %llu has %u used "
-			    "bits but only %u total.",
-			    (unsigned long long)le64_to_cpu(fe->i_blkno),
-			    le32_to_cpu(fe->id1.bitmap1.i_used),
-			    le32_to_cpu(fe->id1.bitmap1.i_total));
-		status = -EIO;
+		status = ocfs2_error(ac->ac_inode->i_sb,
+				     "Chain allocator dinode %llu has %u used bits but only %u total\n",
+				     (unsigned long long)le64_to_cpu(fe->i_blkno),
+				     le32_to_cpu(fe->id1.bitmap1.i_used),
+				     le32_to_cpu(fe->id1.bitmap1.i_total));
 		goto bail;
 	}
 
@@ -2429,12 +2410,11 @@
 	}
 	le16_add_cpu(&bg->bg_free_bits_count, num_bits);
 	if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
-		ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
-			    " count %u but claims %u are freed. num_bits %d",
-			    (unsigned long long)le64_to_cpu(bg->bg_blkno),
-			    le16_to_cpu(bg->bg_bits),
-			    le16_to_cpu(bg->bg_free_bits_count), num_bits);
-		return -EROFS;
+		return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
+				   (unsigned long long)le64_to_cpu(bg->bg_blkno),
+				   le16_to_cpu(bg->bg_bits),
+				   le16_to_cpu(bg->bg_free_bits_count),
+				   num_bits);
 	}
 
 	if (undo_fn)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 403c566..2de4c8a 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -192,6 +192,7 @@
 	Opt_resv_level,
 	Opt_dir_resv_level,
 	Opt_journal_async_commit,
+	Opt_err_cont,
 	Opt_err,
 };
 
@@ -224,6 +225,7 @@
 	{Opt_resv_level, "resv_level=%u"},
 	{Opt_dir_resv_level, "dir_resv_level=%u"},
 	{Opt_journal_async_commit, "journal_async_commit"},
+	{Opt_err_cont, "errors=continue"},
 	{Opt_err, NULL}
 };
 
@@ -1330,10 +1332,19 @@
 			mopt->mount_opt |= OCFS2_MOUNT_NOINTR;
 			break;
 		case Opt_err_panic:
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
 			mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
 			break;
 		case Opt_err_ro:
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
 			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
+			mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS;
+			break;
+		case Opt_err_cont:
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
+			mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT;
 			break;
 		case Opt_data_ordered:
 			mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
@@ -1530,6 +1541,8 @@
 
 	if (opts & OCFS2_MOUNT_ERRORS_PANIC)
 		seq_printf(s, ",errors=panic");
+	else if (opts & OCFS2_MOUNT_ERRORS_CONT)
+		seq_printf(s, ",errors=continue");
 	else
 		seq_printf(s, ",errors=remount-ro");
 
@@ -1550,8 +1563,8 @@
 		seq_printf(s, ",localflocks,");
 
 	if (osb->osb_cluster_stack[0])
-		seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN,
-			   osb->osb_cluster_stack);
+		seq_show_option_n(s, "cluster_stack", osb->osb_cluster_stack,
+				  OCFS2_STACK_LABEL_LEN);
 	if (opts & OCFS2_MOUNT_USRQUOTA)
 		seq_printf(s, ",usrquota");
 	if (opts & OCFS2_MOUNT_GRPQUOTA)
@@ -1746,8 +1759,6 @@
 	ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
 	ocfs2_lock_res_init_once(&oi->ip_open_lockres);
 
-	init_waitqueue_head(&oi->append_dio_wq);
-
 	ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode),
 				  &ocfs2_inode_caching_ops);
 
@@ -2541,31 +2552,43 @@
 	memset(osb, 0, sizeof(struct ocfs2_super));
 }
 
-/* Put OCFS2 into a readonly state, or (if the user specifies it),
- * panic(). We do not support continue-on-error operation. */
-static void ocfs2_handle_error(struct super_block *sb)
+/* Depending on the mount option passed, perform one of the following:
+ * Put OCFS2 into a readonly state (default)
+ * Return EIO so that only the process errs
+ * Fix the error as if fsck.ocfs2 -y
+ * panic
+ */
+static int ocfs2_handle_error(struct super_block *sb)
 {
 	struct ocfs2_super *osb = OCFS2_SB(sb);
-
-	if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC)
-		panic("OCFS2: (device %s): panic forced after error\n",
-		      sb->s_id);
+	int rv = 0;
 
 	ocfs2_set_osb_flag(osb, OCFS2_OSB_ERROR_FS);
+	pr_crit("On-disk corruption discovered. "
+		"Please run fsck.ocfs2 once the filesystem is unmounted.\n");
 
-	if (sb->s_flags & MS_RDONLY &&
-	    (ocfs2_is_soft_readonly(osb) ||
-	     ocfs2_is_hard_readonly(osb)))
-		return;
+	if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC) {
+		panic("OCFS2: (device %s): panic forced after error\n",
+		      sb->s_id);
+	} else if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_CONT) {
+		pr_crit("OCFS2: Returning error to the calling process.\n");
+		rv = -EIO;
+	} else { /* default option */
+		rv = -EROFS;
+		if (sb->s_flags & MS_RDONLY &&
+				(ocfs2_is_soft_readonly(osb) ||
+				 ocfs2_is_hard_readonly(osb)))
+			return rv;
 
-	printk(KERN_CRIT "File system is now read-only due to the potential "
-	       "of on-disk corruption. Please run fsck.ocfs2 once the file "
-	       "system is unmounted.\n");
-	sb->s_flags |= MS_RDONLY;
-	ocfs2_set_ro_flag(osb, 0);
+		pr_crit("OCFS2: File system is now read-only.\n");
+		sb->s_flags |= MS_RDONLY;
+		ocfs2_set_ro_flag(osb, 0);
+	}
+
+	return rv;
 }
 
-void __ocfs2_error(struct super_block *sb, const char *function,
+int __ocfs2_error(struct super_block *sb, const char *function,
 		  const char *fmt, ...)
 {
 	struct va_format vaf;
@@ -2577,12 +2600,12 @@
 
 	/* Not using mlog here because we want to show the actual
 	 * function the error came from. */
-	printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV\n",
+	printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV",
 	       sb->s_id, function, &vaf);
 
 	va_end(args);
 
-	ocfs2_handle_error(sb);
+	return ocfs2_handle_error(sb);
 }
 
 /* Handle critical errors. This is intentionally more drastic than
@@ -2599,7 +2622,7 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV\n",
+	printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV",
 	       sb->s_id, function, &vaf);
 
 	va_end(args);
diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h
index 74ff74c..b477d0b 100644
--- a/fs/ocfs2/super.h
+++ b/fs/ocfs2/super.h
@@ -32,16 +32,18 @@
 				  int node_num);
 
 __printf(3, 4)
-void __ocfs2_error(struct super_block *sb, const char *function,
+int __ocfs2_error(struct super_block *sb, const char *function,
 		   const char *fmt, ...);
 
-#define ocfs2_error(sb, fmt, args...) __ocfs2_error(sb, __PRETTY_FUNCTION__, fmt, ##args)
+#define ocfs2_error(sb, fmt, ...)					\
+	__ocfs2_error(sb, __PRETTY_FUNCTION__, fmt, ##__VA_ARGS__)
 
 __printf(3, 4)
 void __ocfs2_abort(struct super_block *sb, const char *function,
 		   const char *fmt, ...);
 
-#define ocfs2_abort(sb, fmt, args...) __ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##args)
+#define ocfs2_abort(sb, fmt, ...)					\
+	__ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##__VA_ARGS__)
 
 /*
  * Void signal blockers, because in-kernel sigprocmask() only fails
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 889f379..ebfdea7 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -499,30 +499,24 @@
 	 */
 
 	if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
-		ocfs2_error(sb,
-			    "Extended attribute block #%llu has bad "
-			    "signature %.*s",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    xb->xb_signature);
-		return -EINVAL;
+		return ocfs2_error(sb,
+				   "Extended attribute block #%llu has bad signature %.*s\n",
+				   (unsigned long long)bh->b_blocknr, 7,
+				   xb->xb_signature);
 	}
 
 	if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb,
-			    "Extended attribute block #%llu has an "
-			    "invalid xb_blkno of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(xb->xb_blkno));
-		return -EINVAL;
+		return ocfs2_error(sb,
+				   "Extended attribute block #%llu has an invalid xb_blkno of %llu\n",
+				   (unsigned long long)bh->b_blocknr,
+				   (unsigned long long)le64_to_cpu(xb->xb_blkno));
 	}
 
 	if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Extended attribute block #%llu has an invalid "
-			    "xb_fs_generation of #%u",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(xb->xb_fs_generation));
-		return -EINVAL;
+		return ocfs2_error(sb,
+				   "Extended attribute block #%llu has an invalid xb_fs_generation of #%u\n",
+				   (unsigned long long)bh->b_blocknr,
+				   le32_to_cpu(xb->xb_fs_generation));
 	}
 
 	return 0;
@@ -3694,11 +3688,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "xattr tree block %llu\n", inode->i_ino,
-				    (unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(inode->i_sb,
+					  "Inode %lu has non zero tree depth in xattr tree block %llu\n",
+					  inode->i_ino,
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -3713,11 +3706,10 @@
 	}
 
 	if (!e_blkno) {
-		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-			    "record (%u, %u, 0) in xattr", inode->i_ino,
-			    le32_to_cpu(rec->e_cpos),
-			    ocfs2_rec_clusters(el, rec));
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb, "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
+				  inode->i_ino,
+				  le32_to_cpu(rec->e_cpos),
+				  ocfs2_rec_clusters(el, rec));
 		goto out;
 	}
 
@@ -7334,6 +7326,9 @@
 	const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
 	const size_t total_len = prefix_len + name_len + 1;
 
+	if (!capable(CAP_SYS_ADMIN))
+		return 0;
+
 	if (list && total_len <= list_size) {
 		memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
 		memcpy(list + prefix_len, name, name_len);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 7466ff3..79073d6 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -588,10 +588,10 @@
 	struct super_block *sb = dentry->d_sb;
 	struct ovl_fs *ufs = sb->s_fs_info;
 
-	seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
+	seq_show_option(m, "lowerdir", ufs->config.lowerdir);
 	if (ufs->config.upperdir) {
-		seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
-		seq_printf(m, ",workdir=%s", ufs->config.workdir);
+		seq_show_option(m, "upperdir", ufs->config.upperdir);
+		seq_show_option(m, "workdir", ufs->config.workdir);
 	}
 	return 0;
 }
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ce065cf..f60f012 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -308,7 +308,8 @@
 static inline void task_cap(struct seq_file *m, struct task_struct *p)
 {
 	const struct cred *cred;
-	kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset;
+	kernel_cap_t cap_inheritable, cap_permitted, cap_effective,
+			cap_bset, cap_ambient;
 
 	rcu_read_lock();
 	cred = __task_cred(p);
@@ -316,12 +317,14 @@
 	cap_permitted	= cred->cap_permitted;
 	cap_effective	= cred->cap_effective;
 	cap_bset	= cred->cap_bset;
+	cap_ambient	= cred->cap_ambient;
 	rcu_read_unlock();
 
 	render_cap_t(m, "CapInh:\t", &cap_inheritable);
 	render_cap_t(m, "CapPrm:\t", &cap_permitted);
 	render_cap_t(m, "CapEff:\t", &cap_effective);
 	render_cap_t(m, "CapBnd:\t", &cap_bset);
+	render_cap_t(m, "CapAmb:\t", &cap_ambient);
 }
 
 static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ca1e091..41f1a50 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -446,6 +446,7 @@
 	unsigned long anonymous_thp;
 	unsigned long swap;
 	u64 pss;
+	u64 swap_pss;
 };
 
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
@@ -492,9 +493,20 @@
 	} else if (is_swap_pte(*pte)) {
 		swp_entry_t swpent = pte_to_swp_entry(*pte);
 
-		if (!non_swap_entry(swpent))
+		if (!non_swap_entry(swpent)) {
+			int mapcount;
+
 			mss->swap += PAGE_SIZE;
-		else if (is_migration_entry(swpent))
+			mapcount = swp_swapcount(swpent);
+			if (mapcount >= 2) {
+				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
+
+				do_div(pss_delta, mapcount);
+				mss->swap_pss += pss_delta;
+			} else {
+				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
+			}
+		} else if (is_migration_entry(swpent))
 			page = migration_entry_to_page(swpent);
 	}
 
@@ -597,6 +609,8 @@
 		[ilog2(VM_HUGEPAGE)]	= "hg",
 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 		[ilog2(VM_MERGEABLE)]	= "mg",
+		[ilog2(VM_UFFD_MISSING)]= "um",
+		[ilog2(VM_UFFD_WP)]	= "uw",
 	};
 	size_t i;
 
@@ -638,6 +652,7 @@
 		   "Anonymous:      %8lu kB\n"
 		   "AnonHugePages:  %8lu kB\n"
 		   "Swap:           %8lu kB\n"
+		   "SwapPss:        %8lu kB\n"
 		   "KernelPageSize: %8lu kB\n"
 		   "MMUPageSize:    %8lu kB\n"
 		   "Locked:         %8lu kB\n",
@@ -652,6 +667,7 @@
 		   mss.anonymous >> 10,
 		   mss.anonymous_thp >> 10,
 		   mss.swap >> 10,
+		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
 		   vma_kernel_pagesize(vma) >> 10,
 		   vma_mmu_pagesize(vma) >> 10,
 		   (vma->vm_flags & VM_LOCKED) ?
@@ -710,23 +726,6 @@
 	.release	= proc_map_release,
 };
 
-/*
- * We do not want to have constant page-shift bits sitting in
- * pagemap entries and are about to reuse them some time soon.
- *
- * Here's the "migration strategy":
- * 1. when the system boots these bits remain what they are,
- *    but a warning about future change is printed in log;
- * 2. once anyone clears soft-dirty bits via clear_refs file,
- *    these flag is set to denote, that user is aware of the
- *    new API and those page-shift bits change their meaning.
- *    The respective warning is printed in dmesg;
- * 3. In a couple of releases we will remove all the mentions
- *    of page-shift in pagemap entries.
- */
-
-static bool soft_dirty_cleared __read_mostly;
-
 enum clear_refs_types {
 	CLEAR_REFS_ALL = 1,
 	CLEAR_REFS_ANON,
@@ -887,13 +886,6 @@
 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
 		return -EINVAL;
 
-	if (type == CLEAR_REFS_SOFT_DIRTY) {
-		soft_dirty_cleared = true;
-		pr_warn_once("The pagemap bits 55-60 has changed their meaning!"
-			     " See the linux/Documentation/vm/pagemap.txt for "
-			     "details.\n");
-	}
-
 	task = get_proc_task(file_inode(file));
 	if (!task)
 		return -ESRCH;
@@ -961,36 +953,26 @@
 struct pagemapread {
 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
 	pagemap_entry_t *buffer;
-	bool v2;
+	bool show_pfn;
 };
 
 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
 #define PAGEMAP_WALK_MASK	(PMD_MASK)
 
-#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
-#define PM_STATUS_BITS      3
-#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
-#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
-#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
-#define PM_PSHIFT_BITS      6
-#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
-#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
-#define __PM_PSHIFT(x)      (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
-#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
-#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
-/* in "new" pagemap pshift bits are occupied with more status bits */
-#define PM_STATUS2(v2, x)   (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
+#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
+#define PM_PFRAME_BITS		55
+#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
+#define PM_SOFT_DIRTY		BIT_ULL(55)
+#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
+#define PM_FILE			BIT_ULL(61)
+#define PM_SWAP			BIT_ULL(62)
+#define PM_PRESENT		BIT_ULL(63)
 
-#define __PM_SOFT_DIRTY      (1LL)
-#define PM_PRESENT          PM_STATUS(4LL)
-#define PM_SWAP             PM_STATUS(2LL)
-#define PM_FILE             PM_STATUS(1LL)
-#define PM_NOT_PRESENT(v2)  PM_STATUS2(v2, 0)
 #define PM_END_OF_BUFFER    1
 
-static inline pagemap_entry_t make_pme(u64 val)
+static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
 {
-	return (pagemap_entry_t) { .pme = val };
+	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
 }
 
 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
@@ -1011,7 +993,7 @@
 
 	while (addr < end) {
 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
-		pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+		pagemap_entry_t pme = make_pme(0, 0);
 		/* End of address space hole, which we mark as non-present. */
 		unsigned long hole_end;
 
@@ -1031,7 +1013,7 @@
 
 		/* Addresses in the VMA. */
 		if (vma->vm_flags & VM_SOFTDIRTY)
-			pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
+			pme = make_pme(0, PM_SOFT_DIRTY);
 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
 			err = add_to_pagemap(addr, &pme, pm);
 			if (err)
@@ -1042,67 +1024,42 @@
 	return err;
 }
 
-static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
+static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 {
-	u64 frame, flags;
+	u64 frame = 0, flags = 0;
 	struct page *page = NULL;
-	int flags2 = 0;
 
 	if (pte_present(pte)) {
-		frame = pte_pfn(pte);
-		flags = PM_PRESENT;
+		if (pm->show_pfn)
+			frame = pte_pfn(pte);
+		flags |= PM_PRESENT;
 		page = vm_normal_page(vma, addr, pte);
 		if (pte_soft_dirty(pte))
-			flags2 |= __PM_SOFT_DIRTY;
+			flags |= PM_SOFT_DIRTY;
 	} else if (is_swap_pte(pte)) {
 		swp_entry_t entry;
 		if (pte_swp_soft_dirty(pte))
-			flags2 |= __PM_SOFT_DIRTY;
+			flags |= PM_SOFT_DIRTY;
 		entry = pte_to_swp_entry(pte);
 		frame = swp_type(entry) |
 			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
-		flags = PM_SWAP;
+		flags |= PM_SWAP;
 		if (is_migration_entry(entry))
 			page = migration_entry_to_page(entry);
-	} else {
-		if (vma->vm_flags & VM_SOFTDIRTY)
-			flags2 |= __PM_SOFT_DIRTY;
-		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
-		return;
 	}
 
 	if (page && !PageAnon(page))
 		flags |= PM_FILE;
-	if ((vma->vm_flags & VM_SOFTDIRTY))
-		flags2 |= __PM_SOFT_DIRTY;
+	if (page && page_mapcount(page) == 1)
+		flags |= PM_MMAP_EXCLUSIVE;
+	if (vma->vm_flags & VM_SOFTDIRTY)
+		flags |= PM_SOFT_DIRTY;
 
-	*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
+	return make_pme(frame, flags);
 }
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
-		pmd_t pmd, int offset, int pmd_flags2)
-{
-	/*
-	 * Currently pmd for thp is always present because thp can not be
-	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
-	 * This if-check is just to prepare for future implementation.
-	 */
-	if (pmd_present(pmd))
-		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
-				| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
-	else
-		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
-}
-#else
-static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
-		pmd_t pmd, int offset, int pmd_flags2)
-{
-}
-#endif
-
-static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
 			     struct mm_walk *walk)
 {
 	struct vm_area_struct *vma = walk->vma;
@@ -1111,41 +1068,58 @@
 	pte_t *pte, *orig_pte;
 	int err = 0;
 
-	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
-		int pmd_flags2;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) {
+		u64 flags = 0, frame = 0;
+		pmd_t pmd = *pmdp;
 
-		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
-			pmd_flags2 = __PM_SOFT_DIRTY;
-		else
-			pmd_flags2 = 0;
+		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
+			flags |= PM_SOFT_DIRTY;
+
+		/*
+		 * Currently pmd for thp is always present because thp
+		 * can not be swapped-out, migrated, or HWPOISONed
+		 * (split in such cases instead.)
+		 * This if-check is just to prepare for future implementation.
+		 */
+		if (pmd_present(pmd)) {
+			struct page *page = pmd_page(pmd);
+
+			if (page_mapcount(page) == 1)
+				flags |= PM_MMAP_EXCLUSIVE;
+
+			flags |= PM_PRESENT;
+			if (pm->show_pfn)
+				frame = pmd_pfn(pmd) +
+					((addr & ~PMD_MASK) >> PAGE_SHIFT);
+		}
 
 		for (; addr != end; addr += PAGE_SIZE) {
-			unsigned long offset;
-			pagemap_entry_t pme;
+			pagemap_entry_t pme = make_pme(frame, flags);
 
-			offset = (addr & ~PAGEMAP_WALK_MASK) >>
-					PAGE_SHIFT;
-			thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
 			err = add_to_pagemap(addr, &pme, pm);
 			if (err)
 				break;
+			if (pm->show_pfn && (flags & PM_PRESENT))
+				frame++;
 		}
 		spin_unlock(ptl);
 		return err;
 	}
 
-	if (pmd_trans_unstable(pmd))
+	if (pmd_trans_unstable(pmdp))
 		return 0;
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 	/*
 	 * We can assume that @vma always points to a valid one and @end never
 	 * goes beyond vma->vm_end.
 	 */
-	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
 	for (; addr < end; pte++, addr += PAGE_SIZE) {
 		pagemap_entry_t pme;
 
-		pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
 		err = add_to_pagemap(addr, &pme, pm);
 		if (err)
 			break;
@@ -1158,40 +1132,44 @@
 }
 
 #ifdef CONFIG_HUGETLB_PAGE
-static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
-					pte_t pte, int offset, int flags2)
-{
-	if (pte_present(pte))
-		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)	|
-				PM_STATUS2(pm->v2, flags2)		|
-				PM_PRESENT);
-	else
-		*pme = make_pme(PM_NOT_PRESENT(pm->v2)			|
-				PM_STATUS2(pm->v2, flags2));
-}
-
 /* This function walks within one hugetlb entry in the single call */
-static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
+static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
 				 unsigned long addr, unsigned long end,
 				 struct mm_walk *walk)
 {
 	struct pagemapread *pm = walk->private;
 	struct vm_area_struct *vma = walk->vma;
+	u64 flags = 0, frame = 0;
 	int err = 0;
-	int flags2;
-	pagemap_entry_t pme;
+	pte_t pte;
 
 	if (vma->vm_flags & VM_SOFTDIRTY)
-		flags2 = __PM_SOFT_DIRTY;
-	else
-		flags2 = 0;
+		flags |= PM_SOFT_DIRTY;
+
+	pte = huge_ptep_get(ptep);
+	if (pte_present(pte)) {
+		struct page *page = pte_page(pte);
+
+		if (!PageAnon(page))
+			flags |= PM_FILE;
+
+		if (page_mapcount(page) == 1)
+			flags |= PM_MMAP_EXCLUSIVE;
+
+		flags |= PM_PRESENT;
+		if (pm->show_pfn)
+			frame = pte_pfn(pte) +
+				((addr & ~hmask) >> PAGE_SHIFT);
+	}
 
 	for (; addr != end; addr += PAGE_SIZE) {
-		int offset = (addr & ~hmask) >> PAGE_SHIFT;
-		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
+		pagemap_entry_t pme = make_pme(frame, flags);
+
 		err = add_to_pagemap(addr, &pme, pm);
 		if (err)
 			return err;
+		if (pm->show_pfn && (flags & PM_PRESENT))
+			frame++;
 	}
 
 	cond_resched();
@@ -1209,7 +1187,9 @@
  * Bits 0-54  page frame number (PFN) if present
  * Bits 0-4   swap type if swapped
  * Bits 5-54  swap offset if swapped
- * Bits 55-60 page shift (page size = 1<<page shift)
+ * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
+ * Bit  56    page exclusively mapped
+ * Bits 57-60 zero
  * Bit  61    page is file-page or shared-anon
  * Bit  62    page swapped
  * Bit  63    page present
@@ -1227,42 +1207,37 @@
 static ssize_t pagemap_read(struct file *file, char __user *buf,
 			    size_t count, loff_t *ppos)
 {
-	struct task_struct *task = get_proc_task(file_inode(file));
-	struct mm_struct *mm;
+	struct mm_struct *mm = file->private_data;
 	struct pagemapread pm;
-	int ret = -ESRCH;
 	struct mm_walk pagemap_walk = {};
 	unsigned long src;
 	unsigned long svpfn;
 	unsigned long start_vaddr;
 	unsigned long end_vaddr;
-	int copied = 0;
+	int ret = 0, copied = 0;
 
-	if (!task)
+	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
 		goto out;
 
 	ret = -EINVAL;
 	/* file position must be aligned */
 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
-		goto out_task;
+		goto out_mm;
 
 	ret = 0;
 	if (!count)
-		goto out_task;
+		goto out_mm;
 
-	pm.v2 = soft_dirty_cleared;
+	/* do not disclose physical addresses: attack vector */
+	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
+
 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
 	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
 	ret = -ENOMEM;
 	if (!pm.buffer)
-		goto out_task;
+		goto out_mm;
 
-	mm = mm_access(task, PTRACE_MODE_READ);
-	ret = PTR_ERR(mm);
-	if (!mm || IS_ERR(mm))
-		goto out_free;
-
-	pagemap_walk.pmd_entry = pagemap_pte_range;
+	pagemap_walk.pmd_entry = pagemap_pmd_range;
 	pagemap_walk.pte_hole = pagemap_pte_hole;
 #ifdef CONFIG_HUGETLB_PAGE
 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
@@ -1273,10 +1248,10 @@
 	src = *ppos;
 	svpfn = src / PM_ENTRY_BYTES;
 	start_vaddr = svpfn << PAGE_SHIFT;
-	end_vaddr = TASK_SIZE_OF(task);
+	end_vaddr = mm->task_size;
 
 	/* watch out for wraparound */
-	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
+	if (svpfn > mm->task_size >> PAGE_SHIFT)
 		start_vaddr = end_vaddr;
 
 	/*
@@ -1303,7 +1278,7 @@
 		len = min(count, PM_ENTRY_BYTES * pm.pos);
 		if (copy_to_user(buf, pm.buffer, len)) {
 			ret = -EFAULT;
-			goto out_mm;
+			goto out_free;
 		}
 		copied += len;
 		buf += len;
@@ -1313,24 +1288,31 @@
 	if (!ret || ret == PM_END_OF_BUFFER)
 		ret = copied;
 
-out_mm:
-	mmput(mm);
 out_free:
 	kfree(pm.buffer);
-out_task:
-	put_task_struct(task);
+out_mm:
+	mmput(mm);
 out:
 	return ret;
 }
 
 static int pagemap_open(struct inode *inode, struct file *file)
 {
-	/* do not disclose physical addresses: attack vector */
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
-			"to stop being page-shift some time soon. See the "
-			"linux/Documentation/vm/pagemap.txt for details.\n");
+	struct mm_struct *mm;
+
+	mm = proc_mem_open(inode, PTRACE_MODE_READ);
+	if (IS_ERR(mm))
+		return PTR_ERR(mm);
+	file->private_data = mm;
+	return 0;
+}
+
+static int pagemap_release(struct inode *inode, struct file *file)
+{
+	struct mm_struct *mm = file->private_data;
+
+	if (mm)
+		mmdrop(mm);
 	return 0;
 }
 
@@ -1338,6 +1320,7 @@
 	.llseek		= mem_lseek, /* borrow this */
 	.read		= pagemap_read,
 	.open		= pagemap_open,
+	.release	= pagemap_release,
 };
 #endif /* CONFIG_PROC_PAGE_MONITOR */
 
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 20d1f74..ef0d64b 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -247,7 +247,7 @@
 EXPORT_SYMBOL(dqstats);
 
 static qsize_t inode_get_rsv_space(struct inode *inode);
-static void __dquot_initialize(struct inode *inode, int type);
+static int __dquot_initialize(struct inode *inode, int type);
 
 static inline unsigned int
 hashfn(const struct super_block *sb, struct kqid qid)
@@ -832,16 +832,17 @@
 struct dquot *dqget(struct super_block *sb, struct kqid qid)
 {
 	unsigned int hashent = hashfn(sb, qid);
-	struct dquot *dquot = NULL, *empty = NULL;
+	struct dquot *dquot, *empty = NULL;
 
         if (!sb_has_quota_active(sb, qid.type))
-		return NULL;
+		return ERR_PTR(-ESRCH);
 we_slept:
 	spin_lock(&dq_list_lock);
 	spin_lock(&dq_state_lock);
 	if (!sb_has_quota_active(sb, qid.type)) {
 		spin_unlock(&dq_state_lock);
 		spin_unlock(&dq_list_lock);
+		dquot = ERR_PTR(-ESRCH);
 		goto out;
 	}
 	spin_unlock(&dq_state_lock);
@@ -876,11 +877,15 @@
 	 * already finished or it will be canceled due to dq_count > 1 test */
 	wait_on_dquot(dquot);
 	/* Read the dquot / allocate space in quota file */
-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
-	    sb->dq_op->acquire_dquot(dquot) < 0) {
-		dqput(dquot);
-		dquot = NULL;
-		goto out;
+	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+		int err;
+
+		err = sb->dq_op->acquire_dquot(dquot);
+		if (err < 0) {
+			dqput(dquot);
+			dquot = ERR_PTR(err);
+			goto out;
+		}
 	}
 #ifdef CONFIG_QUOTA_DEBUG
 	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
@@ -923,7 +928,7 @@
 	int reserved = 0;
 #endif
 
-	spin_lock(&inode_sb_list_lock);
+	spin_lock(&sb->s_inode_list_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 		spin_lock(&inode->i_lock);
 		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
@@ -934,7 +939,7 @@
 		}
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
-		spin_unlock(&inode_sb_list_lock);
+		spin_unlock(&sb->s_inode_list_lock);
 
 #ifdef CONFIG_QUOTA_DEBUG
 		if (unlikely(inode_get_rsv_space(inode) > 0))
@@ -946,15 +951,15 @@
 		/*
 		 * We hold a reference to 'inode' so it couldn't have been
 		 * removed from s_inodes list while we dropped the
-		 * inode_sb_list_lock We cannot iput the inode now as we can be
+		 * s_inode_list_lock. We cannot iput the inode now as we can be
 		 * holding the last reference and we cannot iput it under
-		 * inode_sb_list_lock. So we keep the reference and iput it
+		 * s_inode_list_lock. So we keep the reference and iput it
 		 * later.
 		 */
 		old_inode = inode;
-		spin_lock(&inode_sb_list_lock);
+		spin_lock(&sb->s_inode_list_lock);
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&sb->s_inode_list_lock);
 	iput(old_inode);
 
 #ifdef CONFIG_QUOTA_DEBUG
@@ -1023,7 +1028,7 @@
 	struct inode *inode;
 	int reserved = 0;
 
-	spin_lock(&inode_sb_list_lock);
+	spin_lock(&sb->s_inode_list_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 		/*
 		 *  We have to scan also I_NEW inodes because they can already
@@ -1039,7 +1044,7 @@
 		}
 		spin_unlock(&dq_data_lock);
 	}
-	spin_unlock(&inode_sb_list_lock);
+	spin_unlock(&sb->s_inode_list_lock);
 #ifdef CONFIG_QUOTA_DEBUG
 	if (reserved) {
 		printk(KERN_WARNING "VFS (%s): Writes happened after quota"
@@ -1390,15 +1395,16 @@
  * It is better to call this function outside of any transaction as it
  * might need a lot of space in journal for dquot structure allocation.
  */
-static void __dquot_initialize(struct inode *inode, int type)
+static int __dquot_initialize(struct inode *inode, int type)
 {
 	int cnt, init_needed = 0;
 	struct dquot **dquots, *got[MAXQUOTAS];
 	struct super_block *sb = inode->i_sb;
 	qsize_t rsv;
+	int ret = 0;
 
 	if (!dquot_active(inode))
-		return;
+		return 0;
 
 	dquots = i_dquot(inode);
 
@@ -1407,6 +1413,7 @@
 		struct kqid qid;
 		kprojid_t projid;
 		int rc;
+		struct dquot *dquot;
 
 		got[cnt] = NULL;
 		if (type != -1 && cnt != type)
@@ -1438,16 +1445,25 @@
 			qid = make_kqid_projid(projid);
 			break;
 		}
-		got[cnt] = dqget(sb, qid);
+		dquot = dqget(sb, qid);
+		if (IS_ERR(dquot)) {
+			/* We raced with somebody turning quotas off... */
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		got[cnt] = dquot;
 	}
 
 	/* All required i_dquot has been initialized */
 	if (!init_needed)
-		return;
+		return 0;
 
 	spin_lock(&dq_data_lock);
 	if (IS_NOQUOTA(inode))
-		goto out_err;
+		goto out_lock;
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (type != -1 && cnt != type)
 			continue;
@@ -1469,15 +1485,18 @@
 				dquot_resv_space(dquots[cnt], rsv);
 		}
 	}
-out_err:
+out_lock:
 	spin_unlock(&dq_data_lock);
+out_put:
 	/* Drop unused references */
 	dqput_all(got);
+
+	return ret;
 }
 
-void dquot_initialize(struct inode *inode)
+int dquot_initialize(struct inode *inode)
 {
-	__dquot_initialize(inode, -1);
+	return __dquot_initialize(inode, -1);
 }
 EXPORT_SYMBOL(dquot_initialize);
 
@@ -1961,18 +1980,37 @@
 int dquot_transfer(struct inode *inode, struct iattr *iattr)
 {
 	struct dquot *transfer_to[MAXQUOTAS] = {};
+	struct dquot *dquot;
 	struct super_block *sb = inode->i_sb;
 	int ret;
 
 	if (!dquot_active(inode))
 		return 0;
 
-	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
-		transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
-	if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
-		transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
-
+	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
+		dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
+		if (IS_ERR(dquot)) {
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		transfer_to[USRQUOTA] = dquot;
+	}
+	if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
+		dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
+		if (IS_ERR(dquot)) {
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		transfer_to[GRPQUOTA] = dquot;
+	}
 	ret = __dquot_transfer(inode, transfer_to);
+out_put:
 	dqput_all(transfer_to);
 	return ret;
 }
@@ -2518,8 +2556,8 @@
 	struct dquot *dquot;
 
 	dquot = dqget(sb, qid);
-	if (!dquot)
-		return -ESRCH;
+	if (IS_ERR(dquot))
+		return PTR_ERR(dquot);
 	do_get_dqblk(dquot, di);
 	dqput(dquot);
 
@@ -2631,8 +2669,8 @@
 	int rc;
 
 	dquot = dqget(sb, qid);
-	if (!dquot) {
-		rc = -ESRCH;
+	if (IS_ERR(dquot)) {
+		rc = PTR_ERR(dquot);
 		goto out;
 	}
 	rc = do_set_dqblk(dquot, di);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 86ded73..3746367 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -141,9 +141,9 @@
 	if (tstate->flags & QCI_ROOT_SQUASH)
 		uinfo.dqi_flags |= DQF_ROOT_SQUASH;
 	uinfo.dqi_valid = IIF_ALL;
-	if (!ret && copy_to_user(addr, &uinfo, sizeof(uinfo)))
+	if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
 		return -EFAULT;
-	return ret;
+	return 0;
 }
 
 static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index f6f2fba..3d8e7e6 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3319,8 +3319,11 @@
 	/* must be turned off for recursive notify_change calls */
 	ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	reiserfs_write_lock(inode->i_sb);
 	if (attr->ia_valid & ATTR_SIZE) {
 		/*
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index b55a074..5f1c9c2 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -613,8 +613,7 @@
 	 * we have to set uid and gid here
 	 */
 	inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
-	return 0;
+	return dquot_initialize(inode);
 }
 
 static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
@@ -633,12 +632,18 @@
 	struct reiserfs_transaction_handle th;
 	struct reiserfs_security_handle security;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -710,12 +715,18 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -787,7 +798,9 @@
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
 		 REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
 	/*
@@ -800,7 +813,11 @@
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -899,7 +916,9 @@
 	    JOURNAL_PER_BALANCE_CNT * 2 + 2 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	reiserfs_write_lock(dir->i_sb);
 	retval = journal_begin(&th, dir->i_sb, jbegin_count);
@@ -985,7 +1004,9 @@
 	int jbegin_count;
 	unsigned long savelink;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	inode = d_inode(dentry);
 
@@ -1095,12 +1116,18 @@
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) +
 		 REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb));
 
-	dquot_initialize(parent_dir);
+	retval = dquot_initialize(parent_dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(parent_dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, parent_dir, mode);
+	retval = new_inode_init(inode, parent_dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	retval = reiserfs_security_init(parent_dir, inode, &dentry->d_name,
 					&security);
@@ -1184,7 +1211,9 @@
 	    JOURNAL_PER_BALANCE_CNT * 3 +
 	    2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	reiserfs_write_lock(dir->i_sb);
 	if (inode->i_nlink >= REISERFS_LINK_MAX) {
@@ -1308,8 +1337,12 @@
 	    JOURNAL_PER_BALANCE_CNT * 3 + 5 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	retval = dquot_initialize(old_dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new_dir);
+	if (retval)
+		return retval;
 
 	old_inode = d_inode(old_dentry);
 	new_dentry_inode = d_inode(new_dentry);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 0e4cf72..4a62fe8 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -714,18 +714,20 @@
 		seq_puts(seq, ",acl");
 
 	if (REISERFS_SB(s)->s_jdev)
-		seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev);
+		seq_show_option(seq, "jdev", REISERFS_SB(s)->s_jdev);
 
 	if (journal->j_max_commit_age != journal->j_default_max_commit_age)
 		seq_printf(seq, ",commit=%d", journal->j_max_commit_age);
 
 #ifdef CONFIG_QUOTA
 	if (REISERFS_SB(s)->s_qf_names[USRQUOTA])
-		seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]);
+		seq_show_option(seq, "usrjquota",
+				REISERFS_SB(s)->s_qf_names[USRQUOTA]);
 	else if (opts & (1 << REISERFS_USRQUOTA))
 		seq_puts(seq, ",usrquota");
 	if (REISERFS_SB(s)->s_qf_names[GRPQUOTA])
-		seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
+		seq_show_option(seq, "grpjquota",
+				REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
 	else if (opts & (1 << REISERFS_GRPQUOTA))
 		seq_puts(seq, ",grpquota");
 	if (REISERFS_SB(s)->s_jquota_fmt) {
diff --git a/fs/super.c b/fs/super.c
index b613723..954aeb8 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -135,6 +135,24 @@
 	return total_objects;
 }
 
+static void destroy_super_work(struct work_struct *work)
+{
+	struct super_block *s = container_of(work, struct super_block,
+							destroy_work);
+	int i;
+
+	for (i = 0; i < SB_FREEZE_LEVELS; i++)
+		percpu_free_rwsem(&s->s_writers.rw_sem[i]);
+	kfree(s);
+}
+
+static void destroy_super_rcu(struct rcu_head *head)
+{
+	struct super_block *s = container_of(head, struct super_block, rcu);
+	INIT_WORK(&s->destroy_work, destroy_super_work);
+	schedule_work(&s->destroy_work);
+}
+
 /**
  *	destroy_super	-	frees a superblock
  *	@s: superblock to free
@@ -143,16 +161,13 @@
  */
 static void destroy_super(struct super_block *s)
 {
-	int i;
 	list_lru_destroy(&s->s_dentry_lru);
 	list_lru_destroy(&s->s_inode_lru);
-	for (i = 0; i < SB_FREEZE_LEVELS; i++)
-		percpu_counter_destroy(&s->s_writers.counter[i]);
 	security_sb_free(s);
 	WARN_ON(!list_empty(&s->s_mounts));
 	kfree(s->s_subtype);
 	kfree(s->s_options);
-	kfree_rcu(s, rcu);
+	call_rcu(&s->rcu, destroy_super_rcu);
 }
 
 /**
@@ -178,19 +193,19 @@
 		goto fail;
 
 	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
-		if (percpu_counter_init(&s->s_writers.counter[i], 0,
-					GFP_KERNEL) < 0)
+		if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
+					sb_writers_name[i],
+					&type->s_writers_key[i]))
 			goto fail;
-		lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
-				 &type->s_writers_key[i], 0);
 	}
-	init_waitqueue_head(&s->s_writers.wait);
 	init_waitqueue_head(&s->s_writers.wait_unfrozen);
 	s->s_bdi = &noop_backing_dev_info;
 	s->s_flags = flags;
 	INIT_HLIST_NODE(&s->s_instances);
 	INIT_HLIST_BL_HEAD(&s->s_anon);
+	mutex_init(&s->s_sync_lock);
 	INIT_LIST_HEAD(&s->s_inodes);
+	spin_lock_init(&s->s_inode_list_lock);
 
 	if (list_lru_init_memcg(&s->s_dentry_lru))
 		goto fail;
@@ -399,7 +414,7 @@
 		sync_filesystem(sb);
 		sb->s_flags &= ~MS_ACTIVE;
 
-		fsnotify_unmount_inodes(&sb->s_inodes);
+		fsnotify_unmount_inodes(sb);
 
 		evict_inodes(sb);
 
@@ -1146,72 +1161,46 @@
  */
 void __sb_end_write(struct super_block *sb, int level)
 {
-	percpu_counter_dec(&sb->s_writers.counter[level-1]);
-	/*
-	 * Make sure s_writers are updated before we wake up waiters in
-	 * freeze_super().
-	 */
-	smp_mb();
-	if (waitqueue_active(&sb->s_writers.wait))
-		wake_up(&sb->s_writers.wait);
-	rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
+	percpu_up_read(sb->s_writers.rw_sem + level-1);
 }
 EXPORT_SYMBOL(__sb_end_write);
 
-#ifdef CONFIG_LOCKDEP
-/*
- * We want lockdep to tell us about possible deadlocks with freezing but
- * it's it bit tricky to properly instrument it. Getting a freeze protection
- * works as getting a read lock but there are subtle problems. XFS for example
- * gets freeze protection on internal level twice in some cases, which is OK
- * only because we already hold a freeze protection also on higher level. Due
- * to these cases we have to tell lockdep we are doing trylock when we
- * already hold a freeze protection for a higher freeze level.
- */
-static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
-				unsigned long ip)
-{
-	int i;
-
-	if (!trylock) {
-		for (i = 0; i < level - 1; i++)
-			if (lock_is_held(&sb->s_writers.lock_map[i])) {
-				trylock = true;
-				break;
-			}
-	}
-	rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
-}
-#endif
-
 /*
  * This is an internal function, please use sb_start_{write,pagefault,intwrite}
  * instead.
  */
 int __sb_start_write(struct super_block *sb, int level, bool wait)
 {
-retry:
-	if (unlikely(sb->s_writers.frozen >= level)) {
-		if (!wait)
-			return 0;
-		wait_event(sb->s_writers.wait_unfrozen,
-			   sb->s_writers.frozen < level);
-	}
+	bool force_trylock = false;
+	int ret = 1;
 
 #ifdef CONFIG_LOCKDEP
-	acquire_freeze_lock(sb, level, !wait, _RET_IP_);
-#endif
-	percpu_counter_inc(&sb->s_writers.counter[level-1]);
 	/*
-	 * Make sure counter is updated before we check for frozen.
-	 * freeze_super() first sets frozen and then checks the counter.
+	 * We want lockdep to tell us about possible deadlocks with freezing
+	 * but it's it bit tricky to properly instrument it. Getting a freeze
+	 * protection works as getting a read lock but there are subtle
+	 * problems. XFS for example gets freeze protection on internal level
+	 * twice in some cases, which is OK only because we already hold a
+	 * freeze protection also on higher level. Due to these cases we have
+	 * to use wait == F (trylock mode) which must not fail.
 	 */
-	smp_mb();
-	if (unlikely(sb->s_writers.frozen >= level)) {
-		__sb_end_write(sb, level);
-		goto retry;
+	if (wait) {
+		int i;
+
+		for (i = 0; i < level - 1; i++)
+			if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) {
+				force_trylock = true;
+				break;
+			}
 	}
-	return 1;
+#endif
+	if (wait && !force_trylock)
+		percpu_down_read(sb->s_writers.rw_sem + level-1);
+	else
+		ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
+
+	WARN_ON(force_trylock & !ret);
+	return ret;
 }
 EXPORT_SYMBOL(__sb_start_write);
 
@@ -1221,37 +1210,33 @@
  * @level: type of writers we wait for (normal vs page fault)
  *
  * This function waits until there are no writers of given type to given file
- * system. Caller of this function should make sure there can be no new writers
- * of type @level before calling this function. Otherwise this function can
- * livelock.
+ * system.
  */
 static void sb_wait_write(struct super_block *sb, int level)
 {
-	s64 writers;
-
+	percpu_down_write(sb->s_writers.rw_sem + level-1);
 	/*
-	 * We just cycle-through lockdep here so that it does not complain
-	 * about returning with lock to userspace
+	 * We are going to return to userspace and forget about this lock, the
+	 * ownership goes to the caller of thaw_super() which does unlock.
+	 *
+	 * FIXME: we should do this before return from freeze_super() after we
+	 * called sync_filesystem(sb) and s_op->freeze_fs(sb), and thaw_super()
+	 * should re-acquire these locks before s_op->unfreeze_fs(sb). However
+	 * this leads to lockdep false-positives, so currently we do the early
+	 * release right after acquire.
 	 */
-	rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
-	rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
+	percpu_rwsem_release(sb->s_writers.rw_sem + level-1, 0, _THIS_IP_);
+}
 
-	do {
-		DEFINE_WAIT(wait);
+static void sb_freeze_unlock(struct super_block *sb)
+{
+	int level;
 
-		/*
-		 * We use a barrier in prepare_to_wait() to separate setting
-		 * of frozen and checking of the counter
-		 */
-		prepare_to_wait(&sb->s_writers.wait, &wait,
-				TASK_UNINTERRUPTIBLE);
+	for (level = 0; level < SB_FREEZE_LEVELS; ++level)
+		percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
 
-		writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
-		if (writers)
-			schedule();
-
-		finish_wait(&sb->s_writers.wait, &wait);
-	} while (writers);
+	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
+		percpu_up_write(sb->s_writers.rw_sem + level);
 }
 
 /**
@@ -1310,20 +1295,14 @@
 		return 0;
 	}
 
-	/* From now on, no new normal writers can start */
 	sb->s_writers.frozen = SB_FREEZE_WRITE;
-	smp_wmb();
-
 	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
 	up_write(&sb->s_umount);
-
 	sb_wait_write(sb, SB_FREEZE_WRITE);
+	down_write(&sb->s_umount);
 
 	/* Now we go and block page faults... */
-	down_write(&sb->s_umount);
 	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
-	smp_wmb();
-
 	sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
 
 	/* All writers are done so after syncing there won't be dirty data */
@@ -1331,7 +1310,6 @@
 
 	/* Now wait for internal filesystem counter */
 	sb->s_writers.frozen = SB_FREEZE_FS;
-	smp_wmb();
 	sb_wait_write(sb, SB_FREEZE_FS);
 
 	if (sb->s_op->freeze_fs) {
@@ -1340,7 +1318,7 @@
 			printk(KERN_ERR
 				"VFS:Filesystem freeze failed\n");
 			sb->s_writers.frozen = SB_UNFROZEN;
-			smp_wmb();
+			sb_freeze_unlock(sb);
 			wake_up(&sb->s_writers.wait_unfrozen);
 			deactivate_locked_super(sb);
 			return ret;
@@ -1372,8 +1350,10 @@
 		return -EINVAL;
 	}
 
-	if (sb->s_flags & MS_RDONLY)
+	if (sb->s_flags & MS_RDONLY) {
+		sb->s_writers.frozen = SB_UNFROZEN;
 		goto out;
+	}
 
 	if (sb->s_op->unfreeze_fs) {
 		error = sb->s_op->unfreeze_fs(sb);
@@ -1385,12 +1365,11 @@
 		}
 	}
 
-out:
 	sb->s_writers.frozen = SB_UNFROZEN;
-	smp_wmb();
+	sb_freeze_unlock(sb);
+out:
 	wake_up(&sb->s_writers.wait_unfrozen);
 	deactivate_locked_super(sb);
-
 	return 0;
 }
 EXPORT_SYMBOL(thaw_super);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index b96f190..81155b9 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -2070,6 +2070,7 @@
 	struct udf_options uopt;
 	struct kernel_lb_addr rootdir, fileset;
 	struct udf_sb_info *sbi;
+	bool lvid_open = false;
 
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
 	uopt.uid = INVALID_UID;
@@ -2216,8 +2217,10 @@
 			 le16_to_cpu(ts.year), ts.month, ts.day,
 			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
 	}
-	if (!(sb->s_flags & MS_RDONLY))
+	if (!(sb->s_flags & MS_RDONLY)) {
 		udf_open_lvid(sb);
+		lvid_open = true;
+	}
 
 	/* Assign the root inode */
 	/* assign inodes by physical block number */
@@ -2248,7 +2251,7 @@
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
 		unload_nls(sbi->s_nls_map);
 #endif
-	if (!(sb->s_flags & MS_RDONLY))
+	if (lvid_open)
 		udf_close_lvid(sb);
 	brelse(sbi->s_lvid_bh);
 	udf_sb_free_partitions(sb);
diff --git a/fs/ufs/Makefile b/fs/ufs/Makefile
index 4d0e02b..392db25 100644
--- a/fs/ufs/Makefile
+++ b/fs/ufs/Makefile
@@ -5,5 +5,5 @@
 obj-$(CONFIG_UFS_FS) += ufs.o
 
 ufs-objs := balloc.o cylinder.o dir.o file.o ialloc.o inode.o \
-	    namei.o super.o symlink.o truncate.o util.o
+	    namei.o super.o symlink.o util.o
 ccflags-$(CONFIG_UFS_DEBUG)    += -DDEBUG
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index a7106ed..fb8b54e 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -417,7 +417,9 @@
 	if (oldcount == 0) {
 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
 		if (result) {
+			write_seqlock(&UFS_I(inode)->meta_lock);
 			ufs_cpu_to_data_ptr(sb, p, result);
+			write_sequnlock(&UFS_I(inode)->meta_lock);
 			*err = 0;
 			UFS_I(inode)->i_lastfrag =
 				max(UFS_I(inode)->i_lastfrag, fragment + count);
@@ -473,7 +475,9 @@
 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
 				   uspi->s_sbbase + tmp,
 				   uspi->s_sbbase + result, locked_page);
+		write_seqlock(&UFS_I(inode)->meta_lock);
 		ufs_cpu_to_data_ptr(sb, p, result);
+		write_sequnlock(&UFS_I(inode)->meta_lock);
 		*err = 0;
 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
 						fragment + count);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index f913a69..a064cf44 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -41,9 +41,7 @@
 #include "swab.h"
 #include "util.h"
 
-static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock);
-
-static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
+static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
 {
 	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
 	int ptrs = uspi->s_apb;
@@ -75,227 +73,232 @@
 	return n;
 }
 
+typedef struct {
+	void	*p;
+	union {
+		__fs32	key32;
+		__fs64	key64;
+	};
+	struct buffer_head *bh;
+} Indirect;
+
+static inline int grow_chain32(struct ufs_inode_info *ufsi,
+			       struct buffer_head *bh, __fs32 *v,
+			       Indirect *from, Indirect *to)
+{
+	Indirect *p;
+	unsigned seq;
+	to->bh = bh;
+	do {
+		seq = read_seqbegin(&ufsi->meta_lock);
+		to->key32 = *(__fs32 *)(to->p = v);
+		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
+			;
+	} while (read_seqretry(&ufsi->meta_lock, seq));
+	return (p > to);
+}
+
+static inline int grow_chain64(struct ufs_inode_info *ufsi,
+			       struct buffer_head *bh, __fs64 *v,
+			       Indirect *from, Indirect *to)
+{
+	Indirect *p;
+	unsigned seq;
+	to->bh = bh;
+	do {
+		seq = read_seqbegin(&ufsi->meta_lock);
+		to->key64 = *(__fs64 *)(to->p = v);
+		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
+			;
+	} while (read_seqretry(&ufsi->meta_lock, seq));
+	return (p > to);
+}
+
 /*
  * Returns the location of the fragment from
  * the beginning of the filesystem.
  */
 
-static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock)
+static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
 {
 	struct ufs_inode_info *ufsi = UFS_I(inode);
 	struct super_block *sb = inode->i_sb;
 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
 	int shift = uspi->s_apbshift-uspi->s_fpbshift;
-	sector_t offsets[4], *p;
-	int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
-	u64  ret = 0L;
-	__fs32 block;
-	__fs64 u2_block = 0L;
+	Indirect chain[4], *q = chain;
+	unsigned *p;
 	unsigned flags = UFS_SB(sb)->s_flags;
-	u64 temp = 0L;
+	u64 res = 0;
 
-	UFSD(": frag = %llu  depth = %d\n", (unsigned long long)frag, depth);
 	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
 		uspi->s_fpbshift, uspi->s_apbmask,
 		(unsigned long long)mask);
 
 	if (depth == 0)
-		return 0;
+		goto no_block;
 
+again:
 	p = offsets;
 
-	if (needs_lock)
-		lock_ufs(sb);
 	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 		goto ufs2;
 
-	block = ufsi->i_u1.i_data[*p++];
-	if (!block)
-		goto out;
+	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
+		goto changed;
+	if (!q->key32)
+		goto no_block;
 	while (--depth) {
+		__fs32 *ptr;
 		struct buffer_head *bh;
-		sector_t n = *p++;
+		unsigned n = *p++;
 
-		bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
+		bh = sb_bread(sb, uspi->s_sbbase +
+				  fs32_to_cpu(sb, q->key32) + (n>>shift));
 		if (!bh)
-			goto out;
-		block = ((__fs32 *) bh->b_data)[n & mask];
-		brelse (bh);
-		if (!block)
-			goto out;
+			goto no_block;
+		ptr = (__fs32 *)bh->b_data + (n & mask);
+		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
+			goto changed;
+		if (!q->key32)
+			goto no_block;
 	}
-	ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
-	goto out;
+	res = fs32_to_cpu(sb, q->key32);
+	goto found;
+
 ufs2:
-	u2_block = ufsi->i_u1.u2_i_data[*p++];
-	if (!u2_block)
-		goto out;
-
+	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
+		goto changed;
+	if (!q->key64)
+		goto no_block;
 
 	while (--depth) {
+		__fs64 *ptr;
 		struct buffer_head *bh;
-		sector_t n = *p++;
+		unsigned n = *p++;
 
-
-		temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
-		bh = sb_bread(sb, temp +(u64) (n>>shift));
+		bh = sb_bread(sb, uspi->s_sbbase +
+				  fs64_to_cpu(sb, q->key64) + (n>>shift));
 		if (!bh)
-			goto out;
-		u2_block = ((__fs64 *)bh->b_data)[n & mask];
-		brelse(bh);
-		if (!u2_block)
-			goto out;
+			goto no_block;
+		ptr = (__fs64 *)bh->b_data + (n & mask);
+		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
+			goto changed;
+		if (!q->key64)
+			goto no_block;
 	}
-	temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
-	ret = temp + (u64) (frag & uspi->s_fpbmask);
+	res = fs64_to_cpu(sb, q->key64);
+found:
+	res += uspi->s_sbbase;
+no_block:
+	while (q > chain) {
+		brelse(q->bh);
+		q--;
+	}
+	return res;
 
-out:
-	if (needs_lock)
-		unlock_ufs(sb);
-	return ret;
+changed:
+	while (q > chain) {
+		brelse(q->bh);
+		q--;
+	}
+	goto again;
+}
+
+/*
+ * Unpacking tails: we have a file with partial final block and
+ * we had been asked to extend it.  If the fragment being written
+ * is within the same block, we need to extend the tail just to cover
+ * that fragment.  Otherwise the tail is extended to full block.
+ *
+ * Note that we might need to create a _new_ tail, but that will
+ * be handled elsewhere; this is strictly for resizing old
+ * ones.
+ */
+static bool
+ufs_extend_tail(struct inode *inode, u64 writes_to,
+		  int *err, struct page *locked_page)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
+	unsigned block = ufs_fragstoblks(lastfrag);
+	unsigned new_size;
+	void *p;
+	u64 tmp;
+
+	if (writes_to < (lastfrag | uspi->s_fpbmask))
+		new_size = (writes_to & uspi->s_fpbmask) + 1;
+	else
+		new_size = uspi->s_fpb;
+
+	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
+	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
+				new_size, err, locked_page);
+	return tmp != 0;
 }
 
 /**
  * ufs_inode_getfrag() - allocate new fragment(s)
  * @inode: pointer to inode
- * @fragment: number of `fragment' which hold pointer
- *   to new allocated fragment(s)
+ * @index: number of block pointer within the inode's array.
  * @new_fragment: number of new allocated fragment(s)
- * @required: how many fragment(s) we require
  * @err: we set it if something wrong
- * @phys: pointer to where we save physical number of new allocated fragments,
- *   NULL if we allocate not data(indirect blocks for example).
  * @new: we set it if we allocate new block
  * @locked_page: for ufs_new_fragments()
  */
-static struct buffer_head *
-ufs_inode_getfrag(struct inode *inode, u64 fragment,
-		  sector_t new_fragment, unsigned int required, int *err,
-		  long *phys, int *new, struct page *locked_page)
+static u64
+ufs_inode_getfrag(struct inode *inode, unsigned index,
+		  sector_t new_fragment, int *err,
+		  int *new, struct page *locked_page)
 {
 	struct ufs_inode_info *ufsi = UFS_I(inode);
 	struct super_block *sb = inode->i_sb;
 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
-	struct buffer_head * result;
-	unsigned blockoff, lastblockoff;
-	u64 tmp, goal, lastfrag, block, lastblock;
-	void *p, *p2;
-
-	UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
-	     "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
-	     (unsigned long long)new_fragment, required, !phys);
+	u64 tmp, goal, lastfrag;
+	unsigned nfrags = uspi->s_fpb;
+	void *p;
 
         /* TODO : to be done for write support
         if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
              goto ufs2;
          */
 
-	block = ufs_fragstoblks (fragment);
-	blockoff = ufs_fragnum (fragment);
-	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
-
-	goal = 0;
-
-repeat:
+	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
 	tmp = ufs_data_ptr_to_cpu(sb, p);
+	if (tmp)
+		goto out;
 
 	lastfrag = ufsi->i_lastfrag;
-	if (tmp && fragment < lastfrag) {
-		if (!phys) {
-			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
-			if (tmp == ufs_data_ptr_to_cpu(sb, p)) {
-				UFSD("EXIT, result %llu\n",
-				     (unsigned long long)tmp + blockoff);
-				return result;
-			}
-			brelse (result);
-			goto repeat;
-		} else {
-			*phys = uspi->s_sbbase + tmp + blockoff;
-			return NULL;
-		}
-	}
 
-	lastblock = ufs_fragstoblks (lastfrag);
-	lastblockoff = ufs_fragnum (lastfrag);
-	/*
-	 * We will extend file into new block beyond last allocated block
-	 */
-	if (lastblock < block) {
-		/*
-		 * We must reallocate last allocated block
-		 */
-		if (lastblockoff) {
-			p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
-			tmp = ufs_new_fragments(inode, p2, lastfrag,
-						ufs_data_ptr_to_cpu(sb, p2),
-						uspi->s_fpb - lastblockoff,
-						err, locked_page);
-			if (!tmp) {
-				if (lastfrag != ufsi->i_lastfrag)
-					goto repeat;
-				else
-					return NULL;
-			}
-			lastfrag = ufsi->i_lastfrag;
-			
-		}
-		tmp = ufs_data_ptr_to_cpu(sb,
-					 ufs_get_direct_data_ptr(uspi, ufsi,
-								 lastblock));
-		if (tmp)
-			goal = tmp + uspi->s_fpb;
-		tmp = ufs_new_fragments (inode, p, fragment - blockoff, 
-					 goal, required + blockoff,
-					 err,
-					 phys != NULL ? locked_page : NULL);
-	} else if (lastblock == block) {
-	/*
-	 * We will extend last allocated block
-	 */
-		tmp = ufs_new_fragments(inode, p, fragment -
-					(blockoff - lastblockoff),
-					ufs_data_ptr_to_cpu(sb, p),
-					required +  (blockoff - lastblockoff),
-					err, phys != NULL ? locked_page : NULL);
-	} else /* (lastblock > block) */ {
-	/*
-	 * We will allocate new block before last allocated block
-	 */
-		if (block) {
-			tmp = ufs_data_ptr_to_cpu(sb,
-						 ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
-			if (tmp)
-				goal = tmp + uspi->s_fpb;
-		}
-		tmp = ufs_new_fragments(inode, p, fragment - blockoff,
-					goal, uspi->s_fpb, err,
-					phys != NULL ? locked_page : NULL);
+	/* will that be a new tail? */
+	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
+		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
+
+	goal = 0;
+	if (index) {
+		goal = ufs_data_ptr_to_cpu(sb,
+				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
+		if (goal)
+			goal += uspi->s_fpb;
 	}
+	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
+				goal, uspi->s_fpb, err, locked_page);
+
 	if (!tmp) {
-		if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) ||
-		    (blockoff && lastfrag != ufsi->i_lastfrag))
-			goto repeat;
 		*err = -ENOSPC;
-		return NULL;
+		return 0;
 	}
 
-	if (!phys) {
-		result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
-	} else {
-		*phys = uspi->s_sbbase + tmp + blockoff;
-		result = NULL;
-		*err = 0;
+	if (new)
 		*new = 1;
-	}
-
 	inode->i_ctime = CURRENT_TIME_SEC;
 	if (IS_SYNC(inode))
 		ufs_sync_inode (inode);
 	mark_inode_dirty(inode);
-	UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff);
-	return result;
+out:
+	return tmp + uspi->s_sbbase;
 
      /* This part : To be implemented ....
         Required only for writing, not required for READ-ONLY.
@@ -316,95 +319,70 @@
 /**
  * ufs_inode_getblock() - allocate new block
  * @inode: pointer to inode
- * @bh: pointer to block which hold "pointer" to new allocated block
- * @fragment: number of `fragment' which hold pointer
- *   to new allocated block
+ * @ind_block: block number of the indirect block
+ * @index: number of pointer within the indirect block
  * @new_fragment: number of new allocated fragment
  *  (block will hold this fragment and also uspi->s_fpb-1)
  * @err: see ufs_inode_getfrag()
- * @phys: see ufs_inode_getfrag()
  * @new: see ufs_inode_getfrag()
  * @locked_page: see ufs_inode_getfrag()
  */
-static struct buffer_head *
-ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
-		  u64 fragment, sector_t new_fragment, int *err,
-		  long *phys, int *new, struct page *locked_page)
+static u64
+ufs_inode_getblock(struct inode *inode, u64 ind_block,
+		  unsigned index, sector_t new_fragment, int *err,
+		  int *new, struct page *locked_page)
 {
 	struct super_block *sb = inode->i_sb;
 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
-	struct buffer_head * result;
-	unsigned blockoff;
-	u64 tmp, goal, block;
+	int shift = uspi->s_apbshift - uspi->s_fpbshift;
+	u64 tmp = 0, goal;
+	struct buffer_head *bh;
 	void *p;
 
-	block = ufs_fragstoblks (fragment);
-	blockoff = ufs_fragnum (fragment);
+	if (!ind_block)
+		return 0;
 
-	UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
-	     inode->i_ino, (unsigned long long)fragment,
-	     (unsigned long long)new_fragment, !phys);
-
-	result = NULL;
-	if (!bh)
-		goto out;
-	if (!buffer_uptodate(bh)) {
-		ll_rw_block (READ, 1, &bh);
-		wait_on_buffer (bh);
-		if (!buffer_uptodate(bh))
-			goto out;
+	bh = sb_bread(sb, ind_block + (index >> shift));
+	if (unlikely(!bh)) {
+		*err = -EIO;
+		return 0;
 	}
+
+	index &= uspi->s_apbmask >> uspi->s_fpbshift;
 	if (uspi->fs_magic == UFS2_MAGIC)
-		p = (__fs64 *)bh->b_data + block;
+		p = (__fs64 *)bh->b_data + index;
 	else
-		p = (__fs32 *)bh->b_data + block;
-repeat:
-	tmp = ufs_data_ptr_to_cpu(sb, p);
-	if (tmp) {
-		if (!phys) {
-			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
-			if (tmp == ufs_data_ptr_to_cpu(sb, p))
-				goto out;
-			brelse (result);
-			goto repeat;
-		} else {
-			*phys = uspi->s_sbbase + tmp + blockoff;
-			goto out;
-		}
-	}
+		p = (__fs32 *)bh->b_data + index;
 
-	if (block && (uspi->fs_magic == UFS2_MAGIC ?
-		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) :
-		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1]))))
+	tmp = ufs_data_ptr_to_cpu(sb, p);
+	if (tmp)
+		goto out;
+
+	if (index && (uspi->fs_magic == UFS2_MAGIC ?
+		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
+		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
 		goal = tmp + uspi->s_fpb;
 	else
 		goal = bh->b_blocknr + uspi->s_fpb;
 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
 				uspi->s_fpb, err, locked_page);
-	if (!tmp) {
-		if (ufs_data_ptr_to_cpu(sb, p))
-			goto repeat;
+	if (!tmp)
 		goto out;
-	}		
 
-
-	if (!phys) {
-		result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
-	} else {
-		*phys = uspi->s_sbbase + tmp + blockoff;
+	if (new)
 		*new = 1;
-	}
 
 	mark_buffer_dirty(bh);
 	if (IS_SYNC(inode))
 		sync_dirty_buffer(bh);
 	inode->i_ctime = CURRENT_TIME_SEC;
 	mark_inode_dirty(inode);
-	UFSD("result %llu\n", (unsigned long long)tmp + blockoff);
 out:
 	brelse (bh);
 	UFSD("EXIT\n");
-	return result;
+	if (tmp)
+		tmp += uspi->s_sbbase;
+	return tmp;
 }
 
 /**
@@ -412,103 +390,64 @@
  * readpage, writepage and so on
  */
 
-int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
+static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
 {
-	struct super_block * sb = inode->i_sb;
-	struct ufs_sb_info * sbi = UFS_SB(sb);
-	struct ufs_sb_private_info * uspi = sbi->s_uspi;
-	struct buffer_head * bh;
-	int ret, err, new;
-	unsigned long ptr,phys;
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	int err = 0, new = 0;
+	unsigned offsets[4];
+	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
 	u64 phys64 = 0;
-	bool needs_lock = (sbi->mutex_owner != current);
-	
+	unsigned frag = fragment & uspi->s_fpbmask;
+
 	if (!create) {
-		phys64 = ufs_frag_map(inode, fragment, needs_lock);
-		UFSD("phys64 = %llu\n", (unsigned long long)phys64);
-		if (phys64)
-			map_bh(bh_result, sb, phys64);
-		return 0;
+		phys64 = ufs_frag_map(inode, offsets, depth);
+		goto out;
 	}
 
         /* This code entered only while writing ....? */
 
-	err = -EIO;
-	new = 0;
-	ret = 0;
-	bh = NULL;
-
-	if (needs_lock)
-		lock_ufs(sb);
+	mutex_lock(&UFS_I(inode)->truncate_mutex);
 
 	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
-	if (fragment >
-	    ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
-	     << uspi->s_fpbshift))
-		goto abort_too_big;
-
-	err = 0;
-	ptr = fragment;
-	  
-	/*
-	 * ok, these macros clean the logic up a bit and make
-	 * it much more readable:
-	 */
-#define GET_INODE_DATABLOCK(x) \
-	ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
-			  bh_result->b_page)
-#define GET_INODE_PTR(x) \
-	ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
-			  bh_result->b_page)
-#define GET_INDIRECT_DATABLOCK(x) \
-	ufs_inode_getblock(inode, bh, x, fragment,	\
-			  &err, &phys, &new, bh_result->b_page)
-#define GET_INDIRECT_PTR(x) \
-	ufs_inode_getblock(inode, bh, x, fragment,	\
-			  &err, NULL, NULL, NULL)
-
-	if (ptr < UFS_NDIR_FRAGMENT) {
-		bh = GET_INODE_DATABLOCK(ptr);
+	if (unlikely(!depth)) {
+		ufs_warning(sb, "ufs_get_block", "block > big");
+		err = -EIO;
 		goto out;
 	}
-	ptr -= UFS_NDIR_FRAGMENT;
-	if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
-		bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
-		goto get_indirect;
-	}
-	ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
-	if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
-		bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
-		goto get_double;
-	}
-	ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
-	bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
-	bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
-get_double:
-	bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
-get_indirect:
-	bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
 
-#undef GET_INODE_DATABLOCK
-#undef GET_INODE_PTR
-#undef GET_INDIRECT_DATABLOCK
-#undef GET_INDIRECT_PTR
+	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
+		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
+		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
+		if (tailfrags && fragment >= lastfrag) {
+			if (!ufs_extend_tail(inode, fragment,
+					     &err, bh_result->b_page))
+				goto out;
+		}
+	}
 
+	if (depth == 1) {
+		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
+					   &err, &new, bh_result->b_page);
+	} else {
+		int i;
+		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
+					   &err, NULL, NULL);
+		for (i = 1; i < depth - 1; i++)
+			phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
+						fragment, &err, NULL, NULL);
+		phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
+					fragment, &err, &new, bh_result->b_page);
+	}
 out:
-	if (err)
-		goto abort;
-	if (new)
-		set_buffer_new(bh_result);
-	map_bh(bh_result, sb, phys);
-abort:
-	if (needs_lock)
-		unlock_ufs(sb);
-
+	if (phys64) {
+		phys64 += frag;
+		map_bh(bh_result, sb, phys64);
+		if (new)
+			set_buffer_new(bh_result);
+	}
+	mutex_unlock(&UFS_I(inode)->truncate_mutex);
 	return err;
-
-abort_too_big:
-	ufs_warning(sb, "ufs_get_block", "block > big");
-	goto abort;
 }
 
 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
@@ -526,12 +465,16 @@
 	return __block_write_begin(page, pos, len, ufs_getfrag_block);
 }
 
+static void ufs_truncate_blocks(struct inode *);
+
 static void ufs_write_failed(struct address_space *mapping, loff_t to)
 {
 	struct inode *inode = mapping->host;
 
-	if (to > inode->i_size)
+	if (to > inode->i_size) {
 		truncate_pagecache(inode, inode->i_size);
+		ufs_truncate_blocks(inode);
+	}
 }
 
 static int ufs_write_begin(struct file *file, struct address_space *mapping,
@@ -548,6 +491,18 @@
 	return ret;
 }
 
+static int ufs_write_end(struct file *file, struct address_space *mapping,
+			loff_t pos, unsigned len, unsigned copied,
+			struct page *page, void *fsdata)
+{
+	int ret;
+
+	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+	if (ret < len)
+		ufs_write_failed(mapping, pos + len);
+	return ret;
+}
+
 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 {
 	return generic_block_bmap(mapping,block,ufs_getfrag_block);
@@ -557,7 +512,7 @@
 	.readpage = ufs_readpage,
 	.writepage = ufs_writepage,
 	.write_begin = ufs_write_begin,
-	.write_end = generic_write_end,
+	.write_end = ufs_write_end,
 	.bmap = ufs_bmap
 };
 
@@ -599,7 +554,7 @@
 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
 		return -1;
 	}
-	
+
 	/*
 	 * Linux now has 32-bit uid and gid, so we can support EFT.
 	 */
@@ -619,7 +574,7 @@
 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 
-	
+
 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
 		       sizeof(ufs_inode->ui_u2.ui_addr));
@@ -753,7 +708,7 @@
 
 	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
 	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
-		
+
 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
 	ufs_inode->ui_atime.tv_usec = 0;
@@ -855,23 +810,19 @@
 
 		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
 	}
-		
+
 	mark_buffer_dirty(bh);
 	if (do_sync)
 		sync_dirty_buffer(bh);
 	brelse (bh);
-	
+
 	UFSD("EXIT\n");
 	return 0;
 }
 
 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-	int ret;
-	lock_ufs(inode->i_sb);
-	ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
-	unlock_ufs(inode->i_sb);
-	return ret;
+	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 }
 
 int ufs_sync_inode (struct inode *inode)
@@ -888,24 +839,389 @@
 
 	truncate_inode_pages_final(&inode->i_data);
 	if (want_delete) {
-		loff_t old_i_size;
-		/*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
-		lock_ufs(inode->i_sb);
-		mark_inode_dirty(inode);
-		ufs_update_inode(inode, IS_SYNC(inode));
-		old_i_size = inode->i_size;
 		inode->i_size = 0;
-		if (inode->i_blocks && ufs_truncate(inode, old_i_size))
-			ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n");
-		unlock_ufs(inode->i_sb);
+		if (inode->i_blocks)
+			ufs_truncate_blocks(inode);
 	}
 
 	invalidate_inode_buffers(inode);
 	clear_inode(inode);
 
-	if (want_delete) {
-		lock_ufs(inode->i_sb);
+	if (want_delete)
 		ufs_free_inode(inode);
-		unlock_ufs(inode->i_sb);
-	}
 }
+
+struct to_free {
+	struct inode *inode;
+	u64 to;
+	unsigned count;
+};
+
+static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
+{
+	if (ctx->count && ctx->to != from) {
+		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
+		ctx->count = 0;
+	}
+	ctx->count += count;
+	ctx->to = from + count;
+}
+
+#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
+#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
+
+static void ufs_trunc_direct(struct inode *inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	void *p;
+	u64 frag1, frag2, frag3, frag4, block1, block2;
+	struct to_free ctx = {.inode = inode};
+	unsigned i, tmp;
+
+	UFSD("ENTER: ino %lu\n", inode->i_ino);
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	frag1 = DIRECT_FRAGMENT;
+	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
+	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
+	frag3 = frag4 & ~uspi->s_fpbmask;
+	block1 = block2 = 0;
+	if (frag2 > frag3) {
+		frag2 = frag4;
+		frag3 = frag4 = 0;
+	} else if (frag2 < frag3) {
+		block1 = ufs_fragstoblks (frag2);
+		block2 = ufs_fragstoblks (frag3);
+	}
+
+	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
+	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
+	     (unsigned long long)frag1, (unsigned long long)frag2,
+	     (unsigned long long)block1, (unsigned long long)block2,
+	     (unsigned long long)frag3, (unsigned long long)frag4);
+
+	if (frag1 >= frag2)
+		goto next1;
+
+	/*
+	 * Free first free fragments
+	 */
+	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
+	tmp = ufs_data_ptr_to_cpu(sb, p);
+	if (!tmp )
+		ufs_panic (sb, "ufs_trunc_direct", "internal error");
+	frag2 -= frag1;
+	frag1 = ufs_fragnum (frag1);
+
+	ufs_free_fragments(inode, tmp + frag1, frag2);
+
+next1:
+	/*
+	 * Free whole blocks
+	 */
+	for (i = block1 ; i < block2; i++) {
+		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
+		tmp = ufs_data_ptr_to_cpu(sb, p);
+		if (!tmp)
+			continue;
+		write_seqlock(&ufsi->meta_lock);
+		ufs_data_ptr_clear(uspi, p);
+		write_sequnlock(&ufsi->meta_lock);
+
+		free_data(&ctx, tmp, uspi->s_fpb);
+	}
+
+	free_data(&ctx, 0, 0);
+
+	if (frag3 >= frag4)
+		goto next3;
+
+	/*
+	 * Free last free fragments
+	 */
+	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
+	tmp = ufs_data_ptr_to_cpu(sb, p);
+	if (!tmp )
+		ufs_panic(sb, "ufs_truncate_direct", "internal error");
+	frag4 = ufs_fragnum (frag4);
+	write_seqlock(&ufsi->meta_lock);
+	ufs_data_ptr_clear(uspi, p);
+	write_sequnlock(&ufsi->meta_lock);
+
+	ufs_free_fragments (inode, tmp, frag4);
+ next3:
+
+	UFSD("EXIT: ino %lu\n", inode->i_ino);
+}
+
+static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
+{
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
+	unsigned i;
+
+	if (!ubh)
+		return;
+
+	if (--depth) {
+		for (i = 0; i < uspi->s_apb; i++) {
+			void *p = ubh_get_data_ptr(uspi, ubh, i);
+			u64 block = ufs_data_ptr_to_cpu(sb, p);
+			if (block)
+				free_full_branch(inode, block, depth);
+		}
+	} else {
+		struct to_free ctx = {.inode = inode};
+
+		for (i = 0; i < uspi->s_apb; i++) {
+			void *p = ubh_get_data_ptr(uspi, ubh, i);
+			u64 block = ufs_data_ptr_to_cpu(sb, p);
+			if (block)
+				free_data(&ctx, block, uspi->s_fpb);
+		}
+		free_data(&ctx, 0, 0);
+	}
+
+	ubh_bforget(ubh);
+	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
+}
+
+static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
+{
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	unsigned i;
+
+	if (--depth) {
+		for (i = from; i < uspi->s_apb ; i++) {
+			void *p = ubh_get_data_ptr(uspi, ubh, i);
+			u64 block = ufs_data_ptr_to_cpu(sb, p);
+			if (block) {
+				write_seqlock(&UFS_I(inode)->meta_lock);
+				ufs_data_ptr_clear(uspi, p);
+				write_sequnlock(&UFS_I(inode)->meta_lock);
+				ubh_mark_buffer_dirty(ubh);
+				free_full_branch(inode, block, depth);
+			}
+		}
+	} else {
+		struct to_free ctx = {.inode = inode};
+
+		for (i = from; i < uspi->s_apb; i++) {
+			void *p = ubh_get_data_ptr(uspi, ubh, i);
+			u64 block = ufs_data_ptr_to_cpu(sb, p);
+			if (block) {
+				write_seqlock(&UFS_I(inode)->meta_lock);
+				ufs_data_ptr_clear(uspi, p);
+				write_sequnlock(&UFS_I(inode)->meta_lock);
+				ubh_mark_buffer_dirty(ubh);
+				free_data(&ctx, block, uspi->s_fpb);
+			}
+		}
+		free_data(&ctx, 0, 0);
+	}
+	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
+		ubh_sync_block(ubh);
+	ubh_brelse(ubh);
+}
+
+static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
+{
+	int err = 0;
+	struct super_block *sb = inode->i_sb;
+	struct address_space *mapping = inode->i_mapping;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	unsigned i, end;
+	sector_t lastfrag;
+	struct page *lastpage;
+	struct buffer_head *bh;
+	u64 phys64;
+
+	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
+
+	if (!lastfrag)
+		goto out;
+
+	lastfrag--;
+
+	lastpage = ufs_get_locked_page(mapping, lastfrag >>
+				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
+       if (IS_ERR(lastpage)) {
+               err = -EIO;
+               goto out;
+       }
+
+       end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
+       bh = page_buffers(lastpage);
+       for (i = 0; i < end; ++i)
+               bh = bh->b_this_page;
+
+
+       err = ufs_getfrag_block(inode, lastfrag, bh, 1);
+
+       if (unlikely(err))
+	       goto out_unlock;
+
+       if (buffer_new(bh)) {
+	       clear_buffer_new(bh);
+	       unmap_underlying_metadata(bh->b_bdev,
+					 bh->b_blocknr);
+	       /*
+		* we do not zeroize fragment, because of
+		* if it maped to hole, it already contains zeroes
+		*/
+	       set_buffer_uptodate(bh);
+	       mark_buffer_dirty(bh);
+	       set_page_dirty(lastpage);
+       }
+
+       if (lastfrag >= UFS_IND_FRAGMENT) {
+	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
+	       phys64 = bh->b_blocknr + 1;
+	       for (i = 0; i < end; ++i) {
+		       bh = sb_getblk(sb, i + phys64);
+		       lock_buffer(bh);
+		       memset(bh->b_data, 0, sb->s_blocksize);
+		       set_buffer_uptodate(bh);
+		       mark_buffer_dirty(bh);
+		       unlock_buffer(bh);
+		       sync_dirty_buffer(bh);
+		       brelse(bh);
+	       }
+       }
+out_unlock:
+       ufs_put_locked_page(lastpage);
+out:
+       return err;
+}
+
+static void __ufs_truncate_blocks(struct inode *inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	unsigned offsets[4];
+	int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
+	int depth2;
+	unsigned i;
+	struct ufs_buffer_head *ubh[3];
+	void *p;
+	u64 block;
+
+	if (!depth)
+		return;
+
+	/* find the last non-zero in offsets[] */
+	for (depth2 = depth - 1; depth2; depth2--)
+		if (offsets[depth2])
+			break;
+
+	mutex_lock(&ufsi->truncate_mutex);
+	if (depth == 1) {
+		ufs_trunc_direct(inode);
+		offsets[0] = UFS_IND_BLOCK;
+	} else {
+		/* get the blocks that should be partially emptied */
+		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
+		for (i = 0; i < depth2; i++) {
+			offsets[i]++;	/* next branch is fully freed */
+			block = ufs_data_ptr_to_cpu(sb, p);
+			if (!block)
+				break;
+			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
+			if (!ubh[i]) {
+				write_seqlock(&ufsi->meta_lock);
+				ufs_data_ptr_clear(uspi, p);
+				write_sequnlock(&ufsi->meta_lock);
+				break;
+			}
+			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
+		}
+		while (i--)
+			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
+	}
+	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
+		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
+		block = ufs_data_ptr_to_cpu(sb, p);
+		if (block) {
+			write_seqlock(&ufsi->meta_lock);
+			ufs_data_ptr_clear(uspi, p);
+			write_sequnlock(&ufsi->meta_lock);
+			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
+		}
+	}
+	ufsi->i_lastfrag = DIRECT_FRAGMENT;
+	mark_inode_dirty(inode);
+	mutex_unlock(&ufsi->truncate_mutex);
+}
+
+static int ufs_truncate(struct inode *inode, loff_t size)
+{
+	int err = 0;
+
+	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
+	     inode->i_ino, (unsigned long long)size,
+	     (unsigned long long)i_size_read(inode));
+
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	      S_ISLNK(inode->i_mode)))
+		return -EINVAL;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return -EPERM;
+
+	err = ufs_alloc_lastblock(inode, size);
+
+	if (err)
+		goto out;
+
+	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
+
+	truncate_setsize(inode, size);
+
+	__ufs_truncate_blocks(inode);
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+out:
+	UFSD("EXIT: err %d\n", err);
+	return err;
+}
+
+void ufs_truncate_blocks(struct inode *inode)
+{
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	      S_ISLNK(inode->i_mode)))
+		return;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return;
+	__ufs_truncate_blocks(inode);
+}
+
+int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = d_inode(dentry);
+	unsigned int ia_valid = attr->ia_valid;
+	int error;
+
+	error = inode_change_ok(inode, attr);
+	if (error)
+		return error;
+
+	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
+		error = ufs_truncate(inode, attr->ia_size);
+		if (error)
+			return error;
+	}
+
+	setattr_copy(inode, attr);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+const struct inode_operations ufs_file_inode_operations = {
+	.setattr = ufs_setattr,
+};
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 250579a..f6390ee 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -94,22 +94,6 @@
 #include "swab.h"
 #include "util.h"
 
-void lock_ufs(struct super_block *sb)
-{
-	struct ufs_sb_info *sbi = UFS_SB(sb);
-
-	mutex_lock(&sbi->mutex);
-	sbi->mutex_owner = current;
-}
-
-void unlock_ufs(struct super_block *sb)
-{
-	struct ufs_sb_info *sbi = UFS_SB(sb);
-
-	sbi->mutex_owner = NULL;
-	mutex_unlock(&sbi->mutex);
-}
-
 static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation)
 {
 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
@@ -694,7 +678,6 @@
 	struct ufs_super_block_third * usb3;
 	unsigned flags;
 
-	lock_ufs(sb);
 	mutex_lock(&UFS_SB(sb)->s_lock);
 
 	UFSD("ENTER\n");
@@ -714,7 +697,6 @@
 
 	UFSD("EXIT\n");
 	mutex_unlock(&UFS_SB(sb)->s_lock);
-	unlock_ufs(sb);
 
 	return 0;
 }
@@ -758,7 +740,6 @@
 
 	ubh_brelse_uspi (sbi->s_uspi);
 	kfree (sbi->s_uspi);
-	mutex_destroy(&sbi->mutex);
 	kfree (sbi);
 	sb->s_fs_info = NULL;
 	UFSD("EXIT\n");
@@ -801,7 +782,6 @@
 
 	UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
 	
-	mutex_init(&sbi->mutex);
 	mutex_init(&sbi->s_lock);
 	spin_lock_init(&sbi->work_lock);
 	INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
@@ -1257,7 +1237,6 @@
 	return 0;
 
 failed:
-	mutex_destroy(&sbi->mutex);
 	if (ubh)
 		ubh_brelse_uspi (uspi);
 	kfree (uspi);
@@ -1280,7 +1259,6 @@
 	unsigned flags;
 
 	sync_filesystem(sb);
-	lock_ufs(sb);
 	mutex_lock(&UFS_SB(sb)->s_lock);
 	uspi = UFS_SB(sb)->s_uspi;
 	flags = UFS_SB(sb)->s_flags;
@@ -1296,7 +1274,6 @@
 	ufs_set_opt (new_mount_opt, ONERROR_LOCK);
 	if (!ufs_parse_options (data, &new_mount_opt)) {
 		mutex_unlock(&UFS_SB(sb)->s_lock);
-		unlock_ufs(sb);
 		return -EINVAL;
 	}
 	if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) {
@@ -1304,14 +1281,12 @@
 	} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
 		pr_err("ufstype can't be changed during remount\n");
 		mutex_unlock(&UFS_SB(sb)->s_lock);
-		unlock_ufs(sb);
 		return -EINVAL;
 	}
 
 	if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
 		UFS_SB(sb)->s_mount_opt = new_mount_opt;
 		mutex_unlock(&UFS_SB(sb)->s_lock);
-		unlock_ufs(sb);
 		return 0;
 	}
 	
@@ -1335,7 +1310,6 @@
 #ifndef CONFIG_UFS_FS_WRITE
 		pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
 		mutex_unlock(&UFS_SB(sb)->s_lock);
-		unlock_ufs(sb);
 		return -EINVAL;
 #else
 		if (ufstype != UFS_MOUNT_UFSTYPE_SUN && 
@@ -1345,13 +1319,11 @@
 		    ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
 			pr_err("this ufstype is read-only supported\n");
 			mutex_unlock(&UFS_SB(sb)->s_lock);
-			unlock_ufs(sb);
 			return -EINVAL;
 		}
 		if (!ufs_read_cylinder_structures(sb)) {
 			pr_err("failed during remounting\n");
 			mutex_unlock(&UFS_SB(sb)->s_lock);
-			unlock_ufs(sb);
 			return -EPERM;
 		}
 		sb->s_flags &= ~MS_RDONLY;
@@ -1359,7 +1331,6 @@
 	}
 	UFS_SB(sb)->s_mount_opt = new_mount_opt;
 	mutex_unlock(&UFS_SB(sb)->s_lock);
-	unlock_ufs(sb);
 	return 0;
 }
 
@@ -1391,8 +1362,7 @@
 	struct ufs_super_block_third *usb3;
 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
-	lock_ufs(sb);
-
+	mutex_lock(&UFS_SB(sb)->s_lock);
 	usb3 = ubh_get_usb_third(uspi);
 	
 	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
@@ -1413,7 +1383,7 @@
 	buf->f_fsid.val[0] = (u32)id;
 	buf->f_fsid.val[1] = (u32)(id >> 32);
 
-	unlock_ufs(sb);
+	mutex_unlock(&UFS_SB(sb)->s_lock);
 
 	return 0;
 }
@@ -1429,6 +1399,8 @@
 		return NULL;
 
 	ei->vfs_inode.i_version = 1;
+	seqlock_init(&ei->meta_lock);
+	mutex_init(&ei->truncate_mutex);
 	return &ei->vfs_inode;
 }
 
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
deleted file mode 100644
index 2115470..0000000
--- a/fs/ufs/truncate.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/*
- *  linux/fs/ufs/truncate.c
- *
- * Copyright (C) 1998
- * Daniel Pirkl <daniel.pirkl@email.cz>
- * Charles University, Faculty of Mathematics and Physics
- *
- *  from
- *
- *  linux/fs/ext2/truncate.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/truncate.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-/*
- * Real random numbers for secure rm added 94/02/18
- * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
- */
-
-/*
- * Adoptation to use page cache and UFS2 write support by
- * Evgeniy Dushistov <dushistov@mail.ru>, 2006-2007
- */
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/fcntl.h>
-#include <linux/time.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/buffer_head.h>
-#include <linux/blkdev.h>
-#include <linux/sched.h>
-
-#include "ufs_fs.h"
-#include "ufs.h"
-#include "swab.h"
-#include "util.h"
-
-/*
- * Secure deletion currently doesn't work. It interacts very badly
- * with buffers shared with memory mappings, and for that reason
- * can't be done in the truncate() routines. It should instead be
- * done separately in "release()" before calling the truncate routines
- * that will release the actual file blocks.
- *
- *		Linus
- */
-
-#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
-#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
-
-
-static int ufs_trunc_direct(struct inode *inode)
-{
-	struct ufs_inode_info *ufsi = UFS_I(inode);
-	struct super_block * sb;
-	struct ufs_sb_private_info * uspi;
-	void *p;
-	u64 frag1, frag2, frag3, frag4, block1, block2;
-	unsigned frag_to_free, free_count;
-	unsigned i, tmp;
-	int retry;
-	
-	UFSD("ENTER: ino %lu\n", inode->i_ino);
-
-	sb = inode->i_sb;
-	uspi = UFS_SB(sb)->s_uspi;
-	
-	frag_to_free = 0;
-	free_count = 0;
-	retry = 0;
-	
-	frag1 = DIRECT_FRAGMENT;
-	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
-	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
-	frag3 = frag4 & ~uspi->s_fpbmask;
-	block1 = block2 = 0;
-	if (frag2 > frag3) {
-		frag2 = frag4;
-		frag3 = frag4 = 0;
-	} else if (frag2 < frag3) {
-		block1 = ufs_fragstoblks (frag2);
-		block2 = ufs_fragstoblks (frag3);
-	}
-
-	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
-	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
-	     (unsigned long long)frag1, (unsigned long long)frag2,
-	     (unsigned long long)block1, (unsigned long long)block2,
-	     (unsigned long long)frag3, (unsigned long long)frag4);
-
-	if (frag1 >= frag2)
-		goto next1;		
-
-	/*
-	 * Free first free fragments
-	 */
-	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
-	tmp = ufs_data_ptr_to_cpu(sb, p);
-	if (!tmp )
-		ufs_panic (sb, "ufs_trunc_direct", "internal error");
-	frag2 -= frag1;
-	frag1 = ufs_fragnum (frag1);
-
-	ufs_free_fragments(inode, tmp + frag1, frag2);
-	mark_inode_dirty(inode);
-	frag_to_free = tmp + frag1;
-
-next1:
-	/*
-	 * Free whole blocks
-	 */
-	for (i = block1 ; i < block2; i++) {
-		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
-		tmp = ufs_data_ptr_to_cpu(sb, p);
-		if (!tmp)
-			continue;
-		ufs_data_ptr_clear(uspi, p);
-
-		if (free_count == 0) {
-			frag_to_free = tmp;
-			free_count = uspi->s_fpb;
-		} else if (free_count > 0 && frag_to_free == tmp - free_count)
-			free_count += uspi->s_fpb;
-		else {
-			ufs_free_blocks (inode, frag_to_free, free_count);
-			frag_to_free = tmp;
-			free_count = uspi->s_fpb;
-		}
-		mark_inode_dirty(inode);
-	}
-	
-	if (free_count > 0)
-		ufs_free_blocks (inode, frag_to_free, free_count);
-
-	if (frag3 >= frag4)
-		goto next3;
-
-	/*
-	 * Free last free fragments
-	 */
-	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
-	tmp = ufs_data_ptr_to_cpu(sb, p);
-	if (!tmp )
-		ufs_panic(sb, "ufs_truncate_direct", "internal error");
-	frag4 = ufs_fragnum (frag4);
-	ufs_data_ptr_clear(uspi, p);
-
-	ufs_free_fragments (inode, tmp, frag4);
-	mark_inode_dirty(inode);
- next3:
-
-	UFSD("EXIT: ino %lu\n", inode->i_ino);
-	return retry;
-}
-
-
-static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
-{
-	struct super_block * sb;
-	struct ufs_sb_private_info * uspi;
-	struct ufs_buffer_head * ind_ubh;
-	void *ind;
-	u64 tmp, indirect_block, i, frag_to_free;
-	unsigned free_count;
-	int retry;
-
-	UFSD("ENTER: ino %lu, offset %llu, p: %p\n",
-	     inode->i_ino, (unsigned long long)offset, p);
-
-	BUG_ON(!p);
-		
-	sb = inode->i_sb;
-	uspi = UFS_SB(sb)->s_uspi;
-
-	frag_to_free = 0;
-	free_count = 0;
-	retry = 0;
-	
-	tmp = ufs_data_ptr_to_cpu(sb, p);
-	if (!tmp)
-		return 0;
-	ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
-	if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
-		ubh_brelse (ind_ubh);
-		return 1;
-	}
-	if (!ind_ubh) {
-		ufs_data_ptr_clear(uspi, p);
-		return 0;
-	}
-
-	indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
-	for (i = indirect_block; i < uspi->s_apb; i++) {
-		ind = ubh_get_data_ptr(uspi, ind_ubh, i);
-		tmp = ufs_data_ptr_to_cpu(sb, ind);
-		if (!tmp)
-			continue;
-
-		ufs_data_ptr_clear(uspi, ind);
-		ubh_mark_buffer_dirty(ind_ubh);
-		if (free_count == 0) {
-			frag_to_free = tmp;
-			free_count = uspi->s_fpb;
-		} else if (free_count > 0 && frag_to_free == tmp - free_count)
-			free_count += uspi->s_fpb;
-		else {
-			ufs_free_blocks (inode, frag_to_free, free_count);
-			frag_to_free = tmp;
-			free_count = uspi->s_fpb;
-		}
-
-		mark_inode_dirty(inode);
-	}
-
-	if (free_count > 0) {
-		ufs_free_blocks (inode, frag_to_free, free_count);
-	}
-	for (i = 0; i < uspi->s_apb; i++)
-		if (!ufs_is_data_ptr_zero(uspi,
-					  ubh_get_data_ptr(uspi, ind_ubh, i)))
-			break;
-	if (i >= uspi->s_apb) {
-		tmp = ufs_data_ptr_to_cpu(sb, p);
-		ufs_data_ptr_clear(uspi, p);
-
-		ufs_free_blocks (inode, tmp, uspi->s_fpb);
-		mark_inode_dirty(inode);
-		ubh_bforget(ind_ubh);
-		ind_ubh = NULL;
-	}
-	if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
-		ubh_sync_block(ind_ubh);
-	ubh_brelse (ind_ubh);
-	
-	UFSD("EXIT: ino %lu\n", inode->i_ino);
-	
-	return retry;
-}
-
-static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
-{
-	struct super_block * sb;
-	struct ufs_sb_private_info * uspi;
-	struct ufs_buffer_head *dind_bh;
-	u64 i, tmp, dindirect_block;
-	void *dind;
-	int retry = 0;
-	
-	UFSD("ENTER: ino %lu\n", inode->i_ino);
-	
-	sb = inode->i_sb;
-	uspi = UFS_SB(sb)->s_uspi;
-
-	dindirect_block = (DIRECT_BLOCK > offset) 
-		? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
-	retry = 0;
-	
-	tmp = ufs_data_ptr_to_cpu(sb, p);
-	if (!tmp)
-		return 0;
-	dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
-	if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
-		ubh_brelse (dind_bh);
-		return 1;
-	}
-	if (!dind_bh) {
-		ufs_data_ptr_clear(uspi, p);
-		return 0;
-	}
-
-	for (i = dindirect_block ; i < uspi->s_apb ; i++) {
-		dind = ubh_get_data_ptr(uspi, dind_bh, i);
-		tmp = ufs_data_ptr_to_cpu(sb, dind);
-		if (!tmp)
-			continue;
-		retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
-		ubh_mark_buffer_dirty(dind_bh);
-	}
-
-	for (i = 0; i < uspi->s_apb; i++)
-		if (!ufs_is_data_ptr_zero(uspi,
-					  ubh_get_data_ptr(uspi, dind_bh, i)))
-			break;
-	if (i >= uspi->s_apb) {
-		tmp = ufs_data_ptr_to_cpu(sb, p);
-		ufs_data_ptr_clear(uspi, p);
-
-		ufs_free_blocks(inode, tmp, uspi->s_fpb);
-		mark_inode_dirty(inode);
-		ubh_bforget(dind_bh);
-		dind_bh = NULL;
-	}
-	if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
-		ubh_sync_block(dind_bh);
-	ubh_brelse (dind_bh);
-	
-	UFSD("EXIT: ino %lu\n", inode->i_ino);
-	
-	return retry;
-}
-
-static int ufs_trunc_tindirect(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
-	struct ufs_inode_info *ufsi = UFS_I(inode);
-	struct ufs_buffer_head * tind_bh;
-	u64 tindirect_block, tmp, i;
-	void *tind, *p;
-	int retry;
-	
-	UFSD("ENTER: ino %lu\n", inode->i_ino);
-
-	retry = 0;
-	
-	tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
-		? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
-
-	p = ufs_get_direct_data_ptr(uspi, ufsi, UFS_TIND_BLOCK);
-	if (!(tmp = ufs_data_ptr_to_cpu(sb, p)))
-		return 0;
-	tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
-	if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
-		ubh_brelse (tind_bh);
-		return 1;
-	}
-	if (!tind_bh) {
-		ufs_data_ptr_clear(uspi, p);
-		return 0;
-	}
-
-	for (i = tindirect_block ; i < uspi->s_apb ; i++) {
-		tind = ubh_get_data_ptr(uspi, tind_bh, i);
-		retry |= ufs_trunc_dindirect(inode, UFS_NDADDR + 
-			uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
-		ubh_mark_buffer_dirty(tind_bh);
-	}
-	for (i = 0; i < uspi->s_apb; i++)
-		if (!ufs_is_data_ptr_zero(uspi,
-					  ubh_get_data_ptr(uspi, tind_bh, i)))
-			break;
-	if (i >= uspi->s_apb) {
-		tmp = ufs_data_ptr_to_cpu(sb, p);
-		ufs_data_ptr_clear(uspi, p);
-
-		ufs_free_blocks(inode, tmp, uspi->s_fpb);
-		mark_inode_dirty(inode);
-		ubh_bforget(tind_bh);
-		tind_bh = NULL;
-	}
-	if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
-		ubh_sync_block(tind_bh);
-	ubh_brelse (tind_bh);
-	
-	UFSD("EXIT: ino %lu\n", inode->i_ino);
-	return retry;
-}
-
-static int ufs_alloc_lastblock(struct inode *inode)
-{
-	int err = 0;
-	struct super_block *sb = inode->i_sb;
-	struct address_space *mapping = inode->i_mapping;
-	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
-	unsigned i, end;
-	sector_t lastfrag;
-	struct page *lastpage;
-	struct buffer_head *bh;
-	u64 phys64;
-
-	lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
-
-	if (!lastfrag)
-		goto out;
-
-	lastfrag--;
-
-	lastpage = ufs_get_locked_page(mapping, lastfrag >>
-				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
-       if (IS_ERR(lastpage)) {
-               err = -EIO;
-               goto out;
-       }
-
-       end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
-       bh = page_buffers(lastpage);
-       for (i = 0; i < end; ++i)
-               bh = bh->b_this_page;
-
-
-       err = ufs_getfrag_block(inode, lastfrag, bh, 1);
-
-       if (unlikely(err))
-	       goto out_unlock;
-
-       if (buffer_new(bh)) {
-	       clear_buffer_new(bh);
-	       unmap_underlying_metadata(bh->b_bdev,
-					 bh->b_blocknr);
-	       /*
-		* we do not zeroize fragment, because of
-		* if it maped to hole, it already contains zeroes
-		*/
-	       set_buffer_uptodate(bh);
-	       mark_buffer_dirty(bh);
-	       set_page_dirty(lastpage);
-       }
-
-       if (lastfrag >= UFS_IND_FRAGMENT) {
-	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
-	       phys64 = bh->b_blocknr + 1;
-	       for (i = 0; i < end; ++i) {
-		       bh = sb_getblk(sb, i + phys64);
-		       lock_buffer(bh);
-		       memset(bh->b_data, 0, sb->s_blocksize);
-		       set_buffer_uptodate(bh);
-		       mark_buffer_dirty(bh);
-		       unlock_buffer(bh);
-		       sync_dirty_buffer(bh);
-		       brelse(bh);
-	       }
-       }
-out_unlock:
-       ufs_put_locked_page(lastpage);
-out:
-       return err;
-}
-
-int ufs_truncate(struct inode *inode, loff_t old_i_size)
-{
-	struct ufs_inode_info *ufsi = UFS_I(inode);
-	struct super_block *sb = inode->i_sb;
-	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
-	int retry, err = 0;
-	
-	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
-	     inode->i_ino, (unsigned long long)i_size_read(inode),
-	     (unsigned long long)old_i_size);
-
-	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-	      S_ISLNK(inode->i_mode)))
-		return -EINVAL;
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-		return -EPERM;
-
-	err = ufs_alloc_lastblock(inode);
-
-	if (err) {
-		i_size_write(inode, old_i_size);
-		goto out;
-	}
-
-	block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
-
-	while (1) {
-		retry = ufs_trunc_direct(inode);
-		retry |= ufs_trunc_indirect(inode, UFS_IND_BLOCK,
-					    ufs_get_direct_data_ptr(uspi, ufsi,
-								    UFS_IND_BLOCK));
-		retry |= ufs_trunc_dindirect(inode, UFS_IND_BLOCK + uspi->s_apb,
-					     ufs_get_direct_data_ptr(uspi, ufsi,
-								     UFS_DIND_BLOCK));
-		retry |= ufs_trunc_tindirect (inode);
-		if (!retry)
-			break;
-		if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
-			ufs_sync_inode (inode);
-		yield();
-	}
-
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-	ufsi->i_lastfrag = DIRECT_FRAGMENT;
-	mark_inode_dirty(inode);
-out:
-	UFSD("EXIT: err %d\n", err);
-	return err;
-}
-
-int ufs_setattr(struct dentry *dentry, struct iattr *attr)
-{
-	struct inode *inode = d_inode(dentry);
-	unsigned int ia_valid = attr->ia_valid;
-	int error;
-
-	error = inode_change_ok(inode, attr);
-	if (error)
-		return error;
-
-	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
-		loff_t old_i_size = inode->i_size;
-
-		/* XXX(truncate): truncate_setsize should be called last */
-		truncate_setsize(inode, attr->ia_size);
-
-		lock_ufs(inode->i_sb);
-		error = ufs_truncate(inode, old_i_size);
-		unlock_ufs(inode->i_sb);
-		if (error)
-			return error;
-	}
-
-	setattr_copy(inode, attr);
-	mark_inode_dirty(inode);
-	return 0;
-}
-
-const struct inode_operations ufs_file_inode_operations = {
-	.setattr = ufs_setattr,
-};
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 2e31ea2..7da4aca 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -24,8 +24,6 @@
 	unsigned s_cgno[UFS_MAX_GROUP_LOADED];
 	unsigned short s_cg_loaded;
 	unsigned s_mount_opt;
-	struct mutex mutex;
-	struct task_struct *mutex_owner;
 	struct super_block *sb;
 	int work_queued; /* non-zero if the delayed work is queued */
 	struct delayed_work sync_work; /* FS sync delayed work */
@@ -46,6 +44,8 @@
 	__u32	i_oeftflag;
 	__u16	i_osync;
 	__u64	i_lastfrag;
+	seqlock_t meta_lock;
+	struct mutex	truncate_mutex;
 	__u32   i_dir_start_lookup;
 	struct inode vfs_inode;
 };
@@ -122,7 +122,7 @@
 extern int ufs_write_inode (struct inode *, struct writeback_control *);
 extern int ufs_sync_inode (struct inode *);
 extern void ufs_evict_inode (struct inode *);
-extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create);
+extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
 
 /* namei.c */
 extern const struct file_operations ufs_dir_operations;
@@ -140,10 +140,6 @@
 extern const struct inode_operations ufs_fast_symlink_inode_operations;
 extern const struct inode_operations ufs_symlink_inode_operations;
 
-/* truncate.c */
-extern int ufs_truncate (struct inode *, loff_t);
-extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
-
 static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
 {
 	return sb->s_fs_info;
@@ -170,7 +166,4 @@
 	return do_div(b, uspi->s_fpg);
 }
 
-extern void lock_ufs(struct super_block *sb);
-extern void unlock_ufs(struct super_block *sb);
-
 #endif /* _UFS_UFS_H */
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
new file mode 100644
index 0000000..634e676
--- /dev/null
+++ b/fs/userfaultfd.c
@@ -0,0 +1,1330 @@
+/*
+ *  fs/userfaultfd.c
+ *
+ *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
+ *  Copyright (C) 2008-2009 Red Hat, Inc.
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ *
+ *  Some part derived from fs/eventfd.c (anon inode setup) and
+ *  mm/ksm.c (mm hashing).
+ */
+
+#include <linux/hashtable.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/bug.h>
+#include <linux/anon_inodes.h>
+#include <linux/syscalls.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/mempolicy.h>
+#include <linux/ioctl.h>
+#include <linux/security.h>
+
+static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
+
+enum userfaultfd_state {
+	UFFD_STATE_WAIT_API,
+	UFFD_STATE_RUNNING,
+};
+
+/*
+ * Start with fault_pending_wqh and fault_wqh so they're more likely
+ * to be in the same cacheline.
+ */
+struct userfaultfd_ctx {
+	/* waitqueue head for the pending (i.e. not read) userfaults */
+	wait_queue_head_t fault_pending_wqh;
+	/* waitqueue head for the userfaults */
+	wait_queue_head_t fault_wqh;
+	/* waitqueue head for the pseudo fd to wakeup poll/read */
+	wait_queue_head_t fd_wqh;
+	/* a refile sequence protected by fault_pending_wqh lock */
+	struct seqcount refile_seq;
+	/* pseudo fd refcounting */
+	atomic_t refcount;
+	/* userfaultfd syscall flags */
+	unsigned int flags;
+	/* state machine */
+	enum userfaultfd_state state;
+	/* released */
+	bool released;
+	/* mm with one ore more vmas attached to this userfaultfd_ctx */
+	struct mm_struct *mm;
+};
+
+struct userfaultfd_wait_queue {
+	struct uffd_msg msg;
+	wait_queue_t wq;
+	struct userfaultfd_ctx *ctx;
+};
+
+struct userfaultfd_wake_range {
+	unsigned long start;
+	unsigned long len;
+};
+
+static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
+				     int wake_flags, void *key)
+{
+	struct userfaultfd_wake_range *range = key;
+	int ret;
+	struct userfaultfd_wait_queue *uwq;
+	unsigned long start, len;
+
+	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+	ret = 0;
+	/* len == 0 means wake all */
+	start = range->start;
+	len = range->len;
+	if (len && (start > uwq->msg.arg.pagefault.address ||
+		    start + len <= uwq->msg.arg.pagefault.address))
+		goto out;
+	ret = wake_up_state(wq->private, mode);
+	if (ret)
+		/*
+		 * Wake only once, autoremove behavior.
+		 *
+		 * After the effect of list_del_init is visible to the
+		 * other CPUs, the waitqueue may disappear from under
+		 * us, see the !list_empty_careful() in
+		 * handle_userfault(). try_to_wake_up() has an
+		 * implicit smp_mb__before_spinlock, and the
+		 * wq->private is read before calling the extern
+		 * function "wake_up_state" (which in turns calls
+		 * try_to_wake_up). While the spin_lock;spin_unlock;
+		 * wouldn't be enough, the smp_mb__before_spinlock is
+		 * enough to avoid an explicit smp_mb() here.
+		 */
+		list_del_init(&wq->task_list);
+out:
+	return ret;
+}
+
+/**
+ * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
+ * context.
+ * @ctx: [in] Pointer to the userfaultfd context.
+ *
+ * Returns: In case of success, returns not zero.
+ */
+static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
+{
+	if (!atomic_inc_not_zero(&ctx->refcount))
+		BUG();
+}
+
+/**
+ * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
+ * context.
+ * @ctx: [in] Pointer to userfaultfd context.
+ *
+ * The userfaultfd context reference must have been previously acquired either
+ * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
+ */
+static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
+{
+	if (atomic_dec_and_test(&ctx->refcount)) {
+		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
+		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
+		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
+		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
+		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
+		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
+		mmput(ctx->mm);
+		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+	}
+}
+
+static inline void msg_init(struct uffd_msg *msg)
+{
+	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
+	/*
+	 * Must use memset to zero out the paddings or kernel data is
+	 * leaked to userland.
+	 */
+	memset(msg, 0, sizeof(struct uffd_msg));
+}
+
+static inline struct uffd_msg userfault_msg(unsigned long address,
+					    unsigned int flags,
+					    unsigned long reason)
+{
+	struct uffd_msg msg;
+	msg_init(&msg);
+	msg.event = UFFD_EVENT_PAGEFAULT;
+	msg.arg.pagefault.address = address;
+	if (flags & FAULT_FLAG_WRITE)
+		/*
+		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WRITE was set in the
+		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
+		 * was not set in a UFFD_EVENT_PAGEFAULT, it means it
+		 * was a read fault, otherwise if set it means it's
+		 * a write fault.
+		 */
+		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
+	if (reason & VM_UFFD_WP)
+		/*
+		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
+		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
+		 * not set in a UFFD_EVENT_PAGEFAULT, it means it was
+		 * a missing fault, otherwise if set it means it's a
+		 * write protect fault.
+		 */
+		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
+	return msg;
+}
+
+/*
+ * Verify the pagetables are still not ok after having reigstered into
+ * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
+ * userfault that has already been resolved, if userfaultfd_read and
+ * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
+ * threads.
+ */
+static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
+					 unsigned long address,
+					 unsigned long flags,
+					 unsigned long reason)
+{
+	struct mm_struct *mm = ctx->mm;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd, _pmd;
+	pte_t *pte;
+	bool ret = true;
+
+	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+	pmd = pmd_offset(pud, address);
+	/*
+	 * READ_ONCE must function as a barrier with narrower scope
+	 * and it must be equivalent to:
+	 *	_pmd = *pmd; barrier();
+	 *
+	 * This is to deal with the instability (as in
+	 * pmd_trans_unstable) of the pmd.
+	 */
+	_pmd = READ_ONCE(*pmd);
+	if (!pmd_present(_pmd))
+		goto out;
+
+	ret = false;
+	if (pmd_trans_huge(_pmd))
+		goto out;
+
+	/*
+	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
+	 * and use the standard pte_offset_map() instead of parsing _pmd.
+	 */
+	pte = pte_offset_map(pmd, address);
+	/*
+	 * Lockless access: we're in a wait_event so it's ok if it
+	 * changes under us.
+	 */
+	if (pte_none(*pte))
+		ret = true;
+	pte_unmap(pte);
+
+out:
+	return ret;
+}
+
+/*
+ * The locking rules involved in returning VM_FAULT_RETRY depending on
+ * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
+ * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
+ * recommendation in __lock_page_or_retry is not an understatement.
+ *
+ * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
+ * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
+ * not set.
+ *
+ * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
+ * set, VM_FAULT_RETRY can still be returned if and only if there are
+ * fatal_signal_pending()s, and the mmap_sem must be released before
+ * returning it.
+ */
+int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+		     unsigned int flags, unsigned long reason)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	struct userfaultfd_ctx *ctx;
+	struct userfaultfd_wait_queue uwq;
+	int ret;
+	bool must_wait, return_to_userland;
+
+	BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+
+	ret = VM_FAULT_SIGBUS;
+	ctx = vma->vm_userfaultfd_ctx.ctx;
+	if (!ctx)
+		goto out;
+
+	BUG_ON(ctx->mm != mm);
+
+	VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
+	VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
+
+	/*
+	 * If it's already released don't get it. This avoids to loop
+	 * in __get_user_pages if userfaultfd_release waits on the
+	 * caller of handle_userfault to release the mmap_sem.
+	 */
+	if (unlikely(ACCESS_ONCE(ctx->released)))
+		goto out;
+
+	/*
+	 * Check that we can return VM_FAULT_RETRY.
+	 *
+	 * NOTE: it should become possible to return VM_FAULT_RETRY
+	 * even if FAULT_FLAG_TRIED is set without leading to gup()
+	 * -EBUSY failures, if the userfaultfd is to be extended for
+	 * VM_UFFD_WP tracking and we intend to arm the userfault
+	 * without first stopping userland access to the memory. For
+	 * VM_UFFD_MISSING userfaults this is enough for now.
+	 */
+	if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) {
+		/*
+		 * Validate the invariant that nowait must allow retry
+		 * to be sure not to return SIGBUS erroneously on
+		 * nowait invocations.
+		 */
+		BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT);
+#ifdef CONFIG_DEBUG_VM
+		if (printk_ratelimit()) {
+			printk(KERN_WARNING
+			       "FAULT_FLAG_ALLOW_RETRY missing %x\n", flags);
+			dump_stack();
+		}
+#endif
+		goto out;
+	}
+
+	/*
+	 * Handle nowait, not much to do other than tell it to retry
+	 * and wait.
+	 */
+	ret = VM_FAULT_RETRY;
+	if (flags & FAULT_FLAG_RETRY_NOWAIT)
+		goto out;
+
+	/* take the reference before dropping the mmap_sem */
+	userfaultfd_ctx_get(ctx);
+
+	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
+	uwq.wq.private = current;
+	uwq.msg = userfault_msg(address, flags, reason);
+	uwq.ctx = ctx;
+
+	return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
+		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	/*
+	 * After the __add_wait_queue the uwq is visible to userland
+	 * through poll/read().
+	 */
+	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
+	/*
+	 * The smp_mb() after __set_current_state prevents the reads
+	 * following the spin_unlock to happen before the list_add in
+	 * __add_wait_queue.
+	 */
+	set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
+			  TASK_KILLABLE);
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+
+	must_wait = userfaultfd_must_wait(ctx, address, flags, reason);
+	up_read(&mm->mmap_sem);
+
+	if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
+		   (return_to_userland ? !signal_pending(current) :
+		    !fatal_signal_pending(current)))) {
+		wake_up_poll(&ctx->fd_wqh, POLLIN);
+		schedule();
+		ret |= VM_FAULT_MAJOR;
+	}
+
+	__set_current_state(TASK_RUNNING);
+
+	if (return_to_userland) {
+		if (signal_pending(current) &&
+		    !fatal_signal_pending(current)) {
+			/*
+			 * If we got a SIGSTOP or SIGCONT and this is
+			 * a normal userland page fault, just let
+			 * userland return so the signal will be
+			 * handled and gdb debugging works.  The page
+			 * fault code immediately after we return from
+			 * this function is going to release the
+			 * mmap_sem and it's not depending on it
+			 * (unlike gup would if we were not to return
+			 * VM_FAULT_RETRY).
+			 *
+			 * If a fatal signal is pending we still take
+			 * the streamlined VM_FAULT_RETRY failure path
+			 * and there's no need to retake the mmap_sem
+			 * in such case.
+			 */
+			down_read(&mm->mmap_sem);
+			ret = 0;
+		}
+	}
+
+	/*
+	 * Here we race with the list_del; list_add in
+	 * userfaultfd_ctx_read(), however because we don't ever run
+	 * list_del_init() to refile across the two lists, the prev
+	 * and next pointers will never point to self. list_add also
+	 * would never let any of the two pointers to point to
+	 * self. So list_empty_careful won't risk to see both pointers
+	 * pointing to self at any time during the list refile. The
+	 * only case where list_del_init() is called is the full
+	 * removal in the wake function and there we don't re-list_add
+	 * and it's fine not to block on the spinlock. The uwq on this
+	 * kernel stack can be released after the list_del_init.
+	 */
+	if (!list_empty_careful(&uwq.wq.task_list)) {
+		spin_lock(&ctx->fault_pending_wqh.lock);
+		/*
+		 * No need of list_del_init(), the uwq on the stack
+		 * will be freed shortly anyway.
+		 */
+		list_del(&uwq.wq.task_list);
+		spin_unlock(&ctx->fault_pending_wqh.lock);
+	}
+
+	/*
+	 * ctx may go away after this if the userfault pseudo fd is
+	 * already released.
+	 */
+	userfaultfd_ctx_put(ctx);
+
+out:
+	return ret;
+}
+
+static int userfaultfd_release(struct inode *inode, struct file *file)
+{
+	struct userfaultfd_ctx *ctx = file->private_data;
+	struct mm_struct *mm = ctx->mm;
+	struct vm_area_struct *vma, *prev;
+	/* len == 0 means wake all */
+	struct userfaultfd_wake_range range = { .len = 0, };
+	unsigned long new_flags;
+
+	ACCESS_ONCE(ctx->released) = true;
+
+	/*
+	 * Flush page faults out of all CPUs. NOTE: all page faults
+	 * must be retried without returning VM_FAULT_SIGBUS if
+	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
+	 * changes while handle_userfault released the mmap_sem. So
+	 * it's critical that released is set to true (above), before
+	 * taking the mmap_sem for writing.
+	 */
+	down_write(&mm->mmap_sem);
+	prev = NULL;
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		cond_resched();
+		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
+		       !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
+		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
+			prev = vma;
+			continue;
+		}
+		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
+		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+				 new_flags, vma->anon_vma,
+				 vma->vm_file, vma->vm_pgoff,
+				 vma_policy(vma),
+				 NULL_VM_UFFD_CTX);
+		if (prev)
+			vma = prev;
+		else
+			prev = vma;
+		vma->vm_flags = new_flags;
+		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+	}
+	up_write(&mm->mmap_sem);
+
+	/*
+	 * After no new page faults can wait on this fault_*wqh, flush
+	 * the last page faults that may have been already waiting on
+	 * the fault_*wqh.
+	 */
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range);
+	__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range);
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+
+	wake_up_poll(&ctx->fd_wqh, POLLHUP);
+	userfaultfd_ctx_put(ctx);
+	return 0;
+}
+
+/* fault_pending_wqh.lock must be hold by the caller */
+static inline struct userfaultfd_wait_queue *find_userfault(
+	struct userfaultfd_ctx *ctx)
+{
+	wait_queue_t *wq;
+	struct userfaultfd_wait_queue *uwq;
+
+	VM_BUG_ON(!spin_is_locked(&ctx->fault_pending_wqh.lock));
+
+	uwq = NULL;
+	if (!waitqueue_active(&ctx->fault_pending_wqh))
+		goto out;
+	/* walk in reverse to provide FIFO behavior to read userfaults */
+	wq = list_last_entry(&ctx->fault_pending_wqh.task_list,
+			     typeof(*wq), task_list);
+	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+out:
+	return uwq;
+}
+
+static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
+{
+	struct userfaultfd_ctx *ctx = file->private_data;
+	unsigned int ret;
+
+	poll_wait(file, &ctx->fd_wqh, wait);
+
+	switch (ctx->state) {
+	case UFFD_STATE_WAIT_API:
+		return POLLERR;
+	case UFFD_STATE_RUNNING:
+		/*
+		 * poll() never guarantees that read won't block.
+		 * userfaults can be waken before they're read().
+		 */
+		if (unlikely(!(file->f_flags & O_NONBLOCK)))
+			return POLLERR;
+		/*
+		 * lockless access to see if there are pending faults
+		 * __pollwait last action is the add_wait_queue but
+		 * the spin_unlock would allow the waitqueue_active to
+		 * pass above the actual list_add inside
+		 * add_wait_queue critical section. So use a full
+		 * memory barrier to serialize the list_add write of
+		 * add_wait_queue() with the waitqueue_active read
+		 * below.
+		 */
+		ret = 0;
+		smp_mb();
+		if (waitqueue_active(&ctx->fault_pending_wqh))
+			ret = POLLIN;
+		return ret;
+	default:
+		BUG();
+	}
+}
+
+static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
+				    struct uffd_msg *msg)
+{
+	ssize_t ret;
+	DECLARE_WAITQUEUE(wait, current);
+	struct userfaultfd_wait_queue *uwq;
+
+	/* always take the fd_wqh lock before the fault_pending_wqh lock */
+	spin_lock(&ctx->fd_wqh.lock);
+	__add_wait_queue(&ctx->fd_wqh, &wait);
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock(&ctx->fault_pending_wqh.lock);
+		uwq = find_userfault(ctx);
+		if (uwq) {
+			/*
+			 * Use a seqcount to repeat the lockless check
+			 * in wake_userfault() to avoid missing
+			 * wakeups because during the refile both
+			 * waitqueue could become empty if this is the
+			 * only userfault.
+			 */
+			write_seqcount_begin(&ctx->refile_seq);
+
+			/*
+			 * The fault_pending_wqh.lock prevents the uwq
+			 * to disappear from under us.
+			 *
+			 * Refile this userfault from
+			 * fault_pending_wqh to fault_wqh, it's not
+			 * pending anymore after we read it.
+			 *
+			 * Use list_del() by hand (as
+			 * userfaultfd_wake_function also uses
+			 * list_del_init() by hand) to be sure nobody
+			 * changes __remove_wait_queue() to use
+			 * list_del_init() in turn breaking the
+			 * !list_empty_careful() check in
+			 * handle_userfault(). The uwq->wq.task_list
+			 * must never be empty at any time during the
+			 * refile, or the waitqueue could disappear
+			 * from under us. The "wait_queue_head_t"
+			 * parameter of __remove_wait_queue() is unused
+			 * anyway.
+			 */
+			list_del(&uwq->wq.task_list);
+			__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
+
+			write_seqcount_end(&ctx->refile_seq);
+
+			/* careful to always initialize msg if ret == 0 */
+			*msg = uwq->msg;
+			spin_unlock(&ctx->fault_pending_wqh.lock);
+			ret = 0;
+			break;
+		}
+		spin_unlock(&ctx->fault_pending_wqh.lock);
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		if (no_wait) {
+			ret = -EAGAIN;
+			break;
+		}
+		spin_unlock(&ctx->fd_wqh.lock);
+		schedule();
+		spin_lock(&ctx->fd_wqh.lock);
+	}
+	__remove_wait_queue(&ctx->fd_wqh, &wait);
+	__set_current_state(TASK_RUNNING);
+	spin_unlock(&ctx->fd_wqh.lock);
+
+	return ret;
+}
+
+static ssize_t userfaultfd_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct userfaultfd_ctx *ctx = file->private_data;
+	ssize_t _ret, ret = 0;
+	struct uffd_msg msg;
+	int no_wait = file->f_flags & O_NONBLOCK;
+
+	if (ctx->state == UFFD_STATE_WAIT_API)
+		return -EINVAL;
+
+	for (;;) {
+		if (count < sizeof(msg))
+			return ret ? ret : -EINVAL;
+		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
+		if (_ret < 0)
+			return ret ? ret : _ret;
+		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
+			return ret ? ret : -EFAULT;
+		ret += sizeof(msg);
+		buf += sizeof(msg);
+		count -= sizeof(msg);
+		/*
+		 * Allow to read more than one fault at time but only
+		 * block if waiting for the very first one.
+		 */
+		no_wait = O_NONBLOCK;
+	}
+}
+
+static void __wake_userfault(struct userfaultfd_ctx *ctx,
+			     struct userfaultfd_wake_range *range)
+{
+	unsigned long start, end;
+
+	start = range->start;
+	end = range->start + range->len;
+
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	/* wake all in the range and autoremove */
+	if (waitqueue_active(&ctx->fault_pending_wqh))
+		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0,
+				     range);
+	if (waitqueue_active(&ctx->fault_wqh))
+		__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range);
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+}
+
+static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
+					   struct userfaultfd_wake_range *range)
+{
+	unsigned seq;
+	bool need_wakeup;
+
+	/*
+	 * To be sure waitqueue_active() is not reordered by the CPU
+	 * before the pagetable update, use an explicit SMP memory
+	 * barrier here. PT lock release or up_read(mmap_sem) still
+	 * have release semantics that can allow the
+	 * waitqueue_active() to be reordered before the pte update.
+	 */
+	smp_mb();
+
+	/*
+	 * Use waitqueue_active because it's very frequent to
+	 * change the address space atomically even if there are no
+	 * userfaults yet. So we take the spinlock only when we're
+	 * sure we've userfaults to wake.
+	 */
+	do {
+		seq = read_seqcount_begin(&ctx->refile_seq);
+		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
+			waitqueue_active(&ctx->fault_wqh);
+		cond_resched();
+	} while (read_seqcount_retry(&ctx->refile_seq, seq));
+	if (need_wakeup)
+		__wake_userfault(ctx, range);
+}
+
+static __always_inline int validate_range(struct mm_struct *mm,
+					  __u64 start, __u64 len)
+{
+	__u64 task_size = mm->task_size;
+
+	if (start & ~PAGE_MASK)
+		return -EINVAL;
+	if (len & ~PAGE_MASK)
+		return -EINVAL;
+	if (!len)
+		return -EINVAL;
+	if (start < mmap_min_addr)
+		return -EINVAL;
+	if (start >= task_size)
+		return -EINVAL;
+	if (len > task_size - start)
+		return -EINVAL;
+	return 0;
+}
+
+static int userfaultfd_register(struct userfaultfd_ctx *ctx,
+				unsigned long arg)
+{
+	struct mm_struct *mm = ctx->mm;
+	struct vm_area_struct *vma, *prev, *cur;
+	int ret;
+	struct uffdio_register uffdio_register;
+	struct uffdio_register __user *user_uffdio_register;
+	unsigned long vm_flags, new_flags;
+	bool found;
+	unsigned long start, end, vma_end;
+
+	user_uffdio_register = (struct uffdio_register __user *) arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_register, user_uffdio_register,
+			   sizeof(uffdio_register)-sizeof(__u64)))
+		goto out;
+
+	ret = -EINVAL;
+	if (!uffdio_register.mode)
+		goto out;
+	if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
+				     UFFDIO_REGISTER_MODE_WP))
+		goto out;
+	vm_flags = 0;
+	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
+		vm_flags |= VM_UFFD_MISSING;
+	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
+		vm_flags |= VM_UFFD_WP;
+		/*
+		 * FIXME: remove the below error constraint by
+		 * implementing the wprotect tracking mode.
+		 */
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = validate_range(mm, uffdio_register.range.start,
+			     uffdio_register.range.len);
+	if (ret)
+		goto out;
+
+	start = uffdio_register.range.start;
+	end = start + uffdio_register.range.len;
+
+	down_write(&mm->mmap_sem);
+	vma = find_vma_prev(mm, start, &prev);
+
+	ret = -ENOMEM;
+	if (!vma)
+		goto out_unlock;
+
+	/* check that there's at least one vma in the range */
+	ret = -EINVAL;
+	if (vma->vm_start >= end)
+		goto out_unlock;
+
+	/*
+	 * Search for not compatible vmas.
+	 *
+	 * FIXME: this shall be relaxed later so that it doesn't fail
+	 * on tmpfs backed vmas (in addition to the current allowance
+	 * on anonymous vmas).
+	 */
+	found = false;
+	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
+		cond_resched();
+
+		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
+		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
+
+		/* check not compatible vmas */
+		ret = -EINVAL;
+		if (cur->vm_ops)
+			goto out_unlock;
+
+		/*
+		 * Check that this vma isn't already owned by a
+		 * different userfaultfd. We can't allow more than one
+		 * userfaultfd to own a single vma simultaneously or we
+		 * wouldn't know which one to deliver the userfaults to.
+		 */
+		ret = -EBUSY;
+		if (cur->vm_userfaultfd_ctx.ctx &&
+		    cur->vm_userfaultfd_ctx.ctx != ctx)
+			goto out_unlock;
+
+		found = true;
+	}
+	BUG_ON(!found);
+
+	if (vma->vm_start < start)
+		prev = vma;
+
+	ret = 0;
+	do {
+		cond_resched();
+
+		BUG_ON(vma->vm_ops);
+		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
+		       vma->vm_userfaultfd_ctx.ctx != ctx);
+
+		/*
+		 * Nothing to do: this vma is already registered into this
+		 * userfaultfd and with the right tracking mode too.
+		 */
+		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
+		    (vma->vm_flags & vm_flags) == vm_flags)
+			goto skip;
+
+		if (vma->vm_start > start)
+			start = vma->vm_start;
+		vma_end = min(end, vma->vm_end);
+
+		new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
+		prev = vma_merge(mm, prev, start, vma_end, new_flags,
+				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+				 vma_policy(vma),
+				 ((struct vm_userfaultfd_ctx){ ctx }));
+		if (prev) {
+			vma = prev;
+			goto next;
+		}
+		if (vma->vm_start < start) {
+			ret = split_vma(mm, vma, start, 1);
+			if (ret)
+				break;
+		}
+		if (vma->vm_end > end) {
+			ret = split_vma(mm, vma, end, 0);
+			if (ret)
+				break;
+		}
+	next:
+		/*
+		 * In the vma_merge() successful mprotect-like case 8:
+		 * the next vma was merged into the current one and
+		 * the current one has not been updated yet.
+		 */
+		vma->vm_flags = new_flags;
+		vma->vm_userfaultfd_ctx.ctx = ctx;
+
+	skip:
+		prev = vma;
+		start = vma->vm_end;
+		vma = vma->vm_next;
+	} while (vma && vma->vm_start < end);
+out_unlock:
+	up_write(&mm->mmap_sem);
+	if (!ret) {
+		/*
+		 * Now that we scanned all vmas we can already tell
+		 * userland which ioctls methods are guaranteed to
+		 * succeed on this range.
+		 */
+		if (put_user(UFFD_API_RANGE_IOCTLS,
+			     &user_uffdio_register->ioctls))
+			ret = -EFAULT;
+	}
+out:
+	return ret;
+}
+
+static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
+				  unsigned long arg)
+{
+	struct mm_struct *mm = ctx->mm;
+	struct vm_area_struct *vma, *prev, *cur;
+	int ret;
+	struct uffdio_range uffdio_unregister;
+	unsigned long new_flags;
+	bool found;
+	unsigned long start, end, vma_end;
+	const void __user *buf = (void __user *)arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
+		goto out;
+
+	ret = validate_range(mm, uffdio_unregister.start,
+			     uffdio_unregister.len);
+	if (ret)
+		goto out;
+
+	start = uffdio_unregister.start;
+	end = start + uffdio_unregister.len;
+
+	down_write(&mm->mmap_sem);
+	vma = find_vma_prev(mm, start, &prev);
+
+	ret = -ENOMEM;
+	if (!vma)
+		goto out_unlock;
+
+	/* check that there's at least one vma in the range */
+	ret = -EINVAL;
+	if (vma->vm_start >= end)
+		goto out_unlock;
+
+	/*
+	 * Search for not compatible vmas.
+	 *
+	 * FIXME: this shall be relaxed later so that it doesn't fail
+	 * on tmpfs backed vmas (in addition to the current allowance
+	 * on anonymous vmas).
+	 */
+	found = false;
+	ret = -EINVAL;
+	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
+		cond_resched();
+
+		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
+		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
+
+		/*
+		 * Check not compatible vmas, not strictly required
+		 * here as not compatible vmas cannot have an
+		 * userfaultfd_ctx registered on them, but this
+		 * provides for more strict behavior to notice
+		 * unregistration errors.
+		 */
+		if (cur->vm_ops)
+			goto out_unlock;
+
+		found = true;
+	}
+	BUG_ON(!found);
+
+	if (vma->vm_start < start)
+		prev = vma;
+
+	ret = 0;
+	do {
+		cond_resched();
+
+		BUG_ON(vma->vm_ops);
+
+		/*
+		 * Nothing to do: this vma is already registered into this
+		 * userfaultfd and with the right tracking mode too.
+		 */
+		if (!vma->vm_userfaultfd_ctx.ctx)
+			goto skip;
+
+		if (vma->vm_start > start)
+			start = vma->vm_start;
+		vma_end = min(end, vma->vm_end);
+
+		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
+		prev = vma_merge(mm, prev, start, vma_end, new_flags,
+				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+				 vma_policy(vma),
+				 NULL_VM_UFFD_CTX);
+		if (prev) {
+			vma = prev;
+			goto next;
+		}
+		if (vma->vm_start < start) {
+			ret = split_vma(mm, vma, start, 1);
+			if (ret)
+				break;
+		}
+		if (vma->vm_end > end) {
+			ret = split_vma(mm, vma, end, 0);
+			if (ret)
+				break;
+		}
+	next:
+		/*
+		 * In the vma_merge() successful mprotect-like case 8:
+		 * the next vma was merged into the current one and
+		 * the current one has not been updated yet.
+		 */
+		vma->vm_flags = new_flags;
+		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+
+	skip:
+		prev = vma;
+		start = vma->vm_end;
+		vma = vma->vm_next;
+	} while (vma && vma->vm_start < end);
+out_unlock:
+	up_write(&mm->mmap_sem);
+out:
+	return ret;
+}
+
+/*
+ * userfaultfd_wake may be used in combination with the
+ * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
+ */
+static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
+			    unsigned long arg)
+{
+	int ret;
+	struct uffdio_range uffdio_wake;
+	struct userfaultfd_wake_range range;
+	const void __user *buf = (void __user *)arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
+		goto out;
+
+	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
+	if (ret)
+		goto out;
+
+	range.start = uffdio_wake.start;
+	range.len = uffdio_wake.len;
+
+	/*
+	 * len == 0 means wake all and we don't want to wake all here,
+	 * so check it again to be sure.
+	 */
+	VM_BUG_ON(!range.len);
+
+	wake_userfault(ctx, &range);
+	ret = 0;
+
+out:
+	return ret;
+}
+
+static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
+			    unsigned long arg)
+{
+	__s64 ret;
+	struct uffdio_copy uffdio_copy;
+	struct uffdio_copy __user *user_uffdio_copy;
+	struct userfaultfd_wake_range range;
+
+	user_uffdio_copy = (struct uffdio_copy __user *) arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
+			   /* don't copy "copy" last field */
+			   sizeof(uffdio_copy)-sizeof(__s64)))
+		goto out;
+
+	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
+	if (ret)
+		goto out;
+	/*
+	 * double check for wraparound just in case. copy_from_user()
+	 * will later check uffdio_copy.src + uffdio_copy.len to fit
+	 * in the userland range.
+	 */
+	ret = -EINVAL;
+	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
+		goto out;
+	if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
+		goto out;
+
+	ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
+			   uffdio_copy.len);
+	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
+		return -EFAULT;
+	if (ret < 0)
+		goto out;
+	BUG_ON(!ret);
+	/* len == 0 would wake all */
+	range.len = ret;
+	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
+		range.start = uffdio_copy.dst;
+		wake_userfault(ctx, &range);
+	}
+	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
+out:
+	return ret;
+}
+
+static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
+				unsigned long arg)
+{
+	__s64 ret;
+	struct uffdio_zeropage uffdio_zeropage;
+	struct uffdio_zeropage __user *user_uffdio_zeropage;
+	struct userfaultfd_wake_range range;
+
+	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
+			   /* don't copy "zeropage" last field */
+			   sizeof(uffdio_zeropage)-sizeof(__s64)))
+		goto out;
+
+	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
+			     uffdio_zeropage.range.len);
+	if (ret)
+		goto out;
+	ret = -EINVAL;
+	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
+		goto out;
+
+	ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
+			     uffdio_zeropage.range.len);
+	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
+		return -EFAULT;
+	if (ret < 0)
+		goto out;
+	/* len == 0 would wake all */
+	BUG_ON(!ret);
+	range.len = ret;
+	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
+		range.start = uffdio_zeropage.range.start;
+		wake_userfault(ctx, &range);
+	}
+	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
+out:
+	return ret;
+}
+
+/*
+ * userland asks for a certain API version and we return which bits
+ * and ioctl commands are implemented in this kernel for such API
+ * version or -EINVAL if unknown.
+ */
+static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+			   unsigned long arg)
+{
+	struct uffdio_api uffdio_api;
+	void __user *buf = (void __user *)arg;
+	int ret;
+
+	ret = -EINVAL;
+	if (ctx->state != UFFD_STATE_WAIT_API)
+		goto out;
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
+		goto out;
+	if (uffdio_api.api != UFFD_API || uffdio_api.features) {
+		memset(&uffdio_api, 0, sizeof(uffdio_api));
+		if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+			goto out;
+		ret = -EINVAL;
+		goto out;
+	}
+	uffdio_api.features = UFFD_API_FEATURES;
+	uffdio_api.ioctls = UFFD_API_IOCTLS;
+	ret = -EFAULT;
+	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+		goto out;
+	ctx->state = UFFD_STATE_RUNNING;
+	ret = 0;
+out:
+	return ret;
+}
+
+static long userfaultfd_ioctl(struct file *file, unsigned cmd,
+			      unsigned long arg)
+{
+	int ret = -EINVAL;
+	struct userfaultfd_ctx *ctx = file->private_data;
+
+	if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
+		return -EINVAL;
+
+	switch(cmd) {
+	case UFFDIO_API:
+		ret = userfaultfd_api(ctx, arg);
+		break;
+	case UFFDIO_REGISTER:
+		ret = userfaultfd_register(ctx, arg);
+		break;
+	case UFFDIO_UNREGISTER:
+		ret = userfaultfd_unregister(ctx, arg);
+		break;
+	case UFFDIO_WAKE:
+		ret = userfaultfd_wake(ctx, arg);
+		break;
+	case UFFDIO_COPY:
+		ret = userfaultfd_copy(ctx, arg);
+		break;
+	case UFFDIO_ZEROPAGE:
+		ret = userfaultfd_zeropage(ctx, arg);
+		break;
+	}
+	return ret;
+}
+
+#ifdef CONFIG_PROC_FS
+static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
+{
+	struct userfaultfd_ctx *ctx = f->private_data;
+	wait_queue_t *wq;
+	struct userfaultfd_wait_queue *uwq;
+	unsigned long pending = 0, total = 0;
+
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
+		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+		pending++;
+		total++;
+	}
+	list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
+		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+		total++;
+	}
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+
+	/*
+	 * If more protocols will be added, there will be all shown
+	 * separated by a space. Like this:
+	 *	protocols: aa:... bb:...
+	 */
+	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
+		   pending, total, UFFD_API, UFFD_API_FEATURES,
+		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
+}
+#endif
+
+static const struct file_operations userfaultfd_fops = {
+#ifdef CONFIG_PROC_FS
+	.show_fdinfo	= userfaultfd_show_fdinfo,
+#endif
+	.release	= userfaultfd_release,
+	.poll		= userfaultfd_poll,
+	.read		= userfaultfd_read,
+	.unlocked_ioctl = userfaultfd_ioctl,
+	.compat_ioctl	= userfaultfd_ioctl,
+	.llseek		= noop_llseek,
+};
+
+static void init_once_userfaultfd_ctx(void *mem)
+{
+	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
+
+	init_waitqueue_head(&ctx->fault_pending_wqh);
+	init_waitqueue_head(&ctx->fault_wqh);
+	init_waitqueue_head(&ctx->fd_wqh);
+	seqcount_init(&ctx->refile_seq);
+}
+
+/**
+ * userfaultfd_file_create - Creates an userfaultfd file pointer.
+ * @flags: Flags for the userfaultfd file.
+ *
+ * This function creates an userfaultfd file pointer, w/out installing
+ * it into the fd table. This is useful when the userfaultfd file is
+ * used during the initialization of data structures that require
+ * extra setup after the userfaultfd creation. So the userfaultfd
+ * creation is split into the file pointer creation phase, and the
+ * file descriptor installation phase.  In this way races with
+ * userspace closing the newly installed file descriptor can be
+ * avoided.  Returns an userfaultfd file pointer, or a proper error
+ * pointer.
+ */
+static struct file *userfaultfd_file_create(int flags)
+{
+	struct file *file;
+	struct userfaultfd_ctx *ctx;
+
+	BUG_ON(!current->mm);
+
+	/* Check the UFFD_* constants for consistency.  */
+	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
+	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
+
+	file = ERR_PTR(-EINVAL);
+	if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
+		goto out;
+
+	file = ERR_PTR(-ENOMEM);
+	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
+	if (!ctx)
+		goto out;
+
+	atomic_set(&ctx->refcount, 1);
+	ctx->flags = flags;
+	ctx->state = UFFD_STATE_WAIT_API;
+	ctx->released = false;
+	ctx->mm = current->mm;
+	/* prevent the mm struct to be freed */
+	atomic_inc(&ctx->mm->mm_users);
+
+	file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
+				  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
+	if (IS_ERR(file))
+		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+out:
+	return file;
+}
+
+SYSCALL_DEFINE1(userfaultfd, int, flags)
+{
+	int fd, error;
+	struct file *file;
+
+	error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
+	if (error < 0)
+		return error;
+	fd = error;
+
+	file = userfaultfd_file_create(flags);
+	if (IS_ERR(file)) {
+		error = PTR_ERR(file);
+		goto err_put_unused_fd;
+	}
+	fd_install(fd, file);
+
+	return fd;
+
+err_put_unused_fd:
+	put_unused_fd(fd);
+
+	return error;
+}
+
+static int __init userfaultfd_init(void)
+{
+	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
+						sizeof(struct userfaultfd_ctx),
+						0,
+						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+						init_once_userfaultfd_ctx);
+	return 0;
+}
+__initcall(userfaultfd_init);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index df68285..a096841 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -33,6 +33,7 @@
 				   xfs_attr.o \
 				   xfs_attr_leaf.o \
 				   xfs_attr_remote.o \
+				   xfs_bit.o \
 				   xfs_bmap.o \
 				   xfs_bmap_btree.o \
 				   xfs_btree.o \
@@ -63,7 +64,6 @@
 xfs-y				+= xfs_aops.o \
 				   xfs_attr_inactive.o \
 				   xfs_attr_list.o \
-				   xfs_bit.o \
 				   xfs_bmap_util.o \
 				   xfs_buf.o \
 				   xfs_dir2_readdir.o \
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index f9e9ffe..ffad7f2 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -464,7 +464,7 @@
 	struct xfs_agfl	*agfl = XFS_BUF_TO_AGFL(bp);
 	int		i;
 
-	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_uuid))
+	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
 		return false;
 	if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
 		return false;
@@ -1937,7 +1937,7 @@
 	struct xfs_alloc_arg	targs;	/* local allocation arguments */
 	xfs_agblock_t		bno;	/* freelist block */
 	xfs_extlen_t		need;	/* total blocks needed in freelist */
-	int			error;
+	int			error = 0;
 
 	if (!pag->pagf_init) {
 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
@@ -2260,7 +2260,7 @@
 	struct xfs_agf	*agf = XFS_BUF_TO_AGF(bp);
 
 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
-	    !uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_uuid))
+	    !uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 
 	if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 59d521c..90de071 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -295,7 +295,7 @@
 	case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
 		if (!xfs_sb_version_hascrc(&mp->m_sb))
 			return false;
-		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
 			return false;
@@ -313,7 +313,7 @@
 	case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
 		if (!xfs_sb_version_hascrc(&mp->m_sb))
 			return false;
-		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
 			return false;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 3349c9a..ff065578 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -139,6 +139,8 @@
 
 	args.value = value;
 	args.valuelen = *valuelenp;
+	/* Entirely possible to look up a name which doesn't exist */
+	args.op_flags = XFS_DA_OP_OKNOENT;
 
 	lock_mode = xfs_ilock_attr_map_shared(ip);
 	if (!xfs_inode_hasattr(ip))
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index e9d401c..33df52d 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -262,7 +262,7 @@
 		if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC)
 			return false;
 
-		if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
 			return false;
@@ -1056,7 +1056,7 @@
 
 		hdr3->blkno = cpu_to_be64(bp->b_bn);
 		hdr3->owner = cpu_to_be64(dp->i_ino);
-		uuid_copy(&hdr3->uuid, &mp->m_sb.sb_uuid);
+		uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
 
 		ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr);
 	} else {
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index dd71403..f38f9bd 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -100,7 +100,7 @@
 		return false;
 	if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC))
 		return false;
-	if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
+	if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
 		return false;
 	if (be64_to_cpu(rmt->rm_blkno) != bno)
 		return false;
@@ -222,7 +222,7 @@
 	rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
 	rmt->rm_offset = cpu_to_be32(offset);
 	rmt->rm_bytes = cpu_to_be32(size);
-	uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
+	uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid);
 	rmt->rm_owner = cpu_to_be64(ino);
 	rmt->rm_blkno = cpu_to_be64(bno);
 
@@ -618,9 +618,8 @@
 
 		xfs_bmap_init(args->flist, args->firstblock);
 		error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
-				    XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
-				    1, args->firstblock, args->flist,
-				    &done);
+				    XFS_BMAPI_ATTRFORK, 1, args->firstblock,
+				    args->flist, &done);
 		if (!error) {
 			error = xfs_bmap_finish(&args->trans, args->flist,
 						&committed);
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/libxfs/xfs_bit.c
similarity index 100%
rename from fs/xfs/xfs_bit.c
rename to fs/xfs/libxfs/xfs_bit.c
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 63e05b6..8e2010d 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5945,6 +5945,7 @@
 	return xfs_trans_commit(tp);
 
 out:
+	xfs_bmap_cancel(&free_list);
 	xfs_trans_cancel(tp);
 	return error;
 }
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 2c44c8e..6b0cf65 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -349,7 +349,8 @@
 
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 		ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
-		ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid));
+		ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
+		       &mp->m_sb.sb_meta_uuid));
 		ASSERT(rblock->bb_u.l.bb_blkno ==
 		       cpu_to_be64(XFS_BUF_DADDR_NULL));
 	} else
@@ -647,7 +648,7 @@
 	case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
 		if (!xfs_sb_version_hascrc(&mp->m_sb))
 			return false;
-		if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
 			return false;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index c72283d..f7d7ee7 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -65,7 +65,8 @@
 
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 		lblock_ok = lblock_ok &&
-			uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid) &&
+			uuid_equal(&block->bb_u.l.bb_uuid,
+				   &mp->m_sb.sb_meta_uuid) &&
 			block->bb_u.l.bb_blkno == cpu_to_be64(
 				bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
 	}
@@ -115,7 +116,8 @@
 
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 		sblock_ok = sblock_ok &&
-			uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid) &&
+			uuid_equal(&block->bb_u.s.bb_uuid,
+				   &mp->m_sb.sb_meta_uuid) &&
 			block->bb_u.s.bb_blkno == cpu_to_be64(
 				bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
 	}
@@ -1000,7 +1002,7 @@
 		if (flags & XFS_BTREE_CRC_BLOCKS) {
 			buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
 			buf->bb_u.l.bb_owner = cpu_to_be64(owner);
-			uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid);
+			uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
 			buf->bb_u.l.bb_pad = 0;
 			buf->bb_u.l.bb_lsn = 0;
 		}
@@ -1013,7 +1015,7 @@
 		if (flags & XFS_BTREE_CRC_BLOCKS) {
 			buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
 			buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
-			uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid);
+			uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
 			buf->bb_u.s.bb_lsn = 0;
 		}
 	}
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 2385f8c..be43248 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -146,7 +146,7 @@
 		if (ichdr.magic != XFS_DA3_NODE_MAGIC)
 			return false;
 
-		if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
 			return false;
@@ -233,6 +233,7 @@
 			bp->b_ops->verify_read(bp);
 			return;
 		default:
+			xfs_buf_ioerror(bp, -EFSCORRUPTED);
 			break;
 	}
 
@@ -324,7 +325,7 @@
 		ichdr.magic = XFS_DA3_NODE_MAGIC;
 		hdr3->info.blkno = cpu_to_be64(bp->b_bn);
 		hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
-		uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid);
+		uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
 	} else {
 		ichdr.magic = XFS_DA_NODE_MAGIC;
 	}
@@ -1822,6 +1823,7 @@
 	struct xfs_da_args	*args;
 	struct xfs_da_node_entry *btree;
 	struct xfs_da3_icnode_hdr nodehdr;
+	struct xfs_buf		*bp;
 	xfs_dablk_t		blkno = 0;
 	int			level;
 	int			error;
@@ -1866,20 +1868,24 @@
 	 */
 	for (blk++, level++; level < path->active; blk++, level++) {
 		/*
-		 * Release the old block.
-		 * (if it's dirty, trans won't actually let go)
+		 * Read the next child block into a local buffer.
+		 */
+		error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
+					  args->whichfork);
+		if (error)
+			return error;
+
+		/*
+		 * Release the old block (if it's dirty, the trans doesn't
+		 * actually let go) and swap the local buffer into the path
+		 * structure. This ensures failure of the above read doesn't set
+		 * a NULL buffer in an active slot in the path.
 		 */
 		if (release)
 			xfs_trans_brelse(args->trans, blk->bp);
-
-		/*
-		 * Read the next child block.
-		 */
 		blk->blkno = blkno;
-		error = xfs_da3_node_read(args->trans, dp, blkno, -1,
-					&blk->bp, args->whichfork);
-		if (error)
-			return error;
+		blk->bp = bp;
+
 		info = blk->bp->b_addr;
 		ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
 		       info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
@@ -2351,8 +2357,8 @@
 		 * the last block to the place we want to kill.
 		 */
 		error = xfs_bunmapi(tp, dp, dead_blkno, count,
-				    xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
-				    0, args->firstblock, args->flist, &done);
+				    xfs_bmapi_aflag(w), 0, args->firstblock,
+				    args->flist, &done);
 		if (error == -ENOSPC) {
 			if (w != XFS_DATA_FORK)
 				break;
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 74bcbab..b14bbd6 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -680,8 +680,15 @@
 typedef struct xfs_attr_leafblock {
 	xfs_attr_leaf_hdr_t	hdr;	/* constant-structure header block */
 	xfs_attr_leaf_entry_t	entries[1];	/* sorted on key, not name */
-	xfs_attr_leaf_name_local_t namelist;	/* grows from bottom of buf */
-	xfs_attr_leaf_name_remote_t valuelist;	/* grows from bottom of buf */
+	/*
+	 * The rest of the block contains the following structures after the
+	 * leaf entries, growing from the bottom up. The variables are never
+	 * referenced and definining them can actually make gcc optimize away
+	 * accesses to the 'entries' array above index 0 so don't do that.
+	 *
+	 * xfs_attr_leaf_name_local_t namelist;
+	 * xfs_attr_leaf_name_remote_t valuelist;
+	 */
 } xfs_attr_leafblock_t;
 
 /*
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index a69fb3a..9de401d 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -362,6 +362,7 @@
 	struct xfs_da_args *args;
 	int		rval;
 	int		v;		/* type-checking value */
+	int		lock_mode;
 
 	ASSERT(S_ISDIR(dp->i_d.di_mode));
 	XFS_STATS_INC(xs_dir_lookup);
@@ -387,6 +388,7 @@
 	if (ci_name)
 		args->op_flags |= XFS_DA_OP_CILOOKUP;
 
+	lock_mode = xfs_ilock_data_map_shared(dp);
 	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
 		rval = xfs_dir2_sf_lookup(args);
 		goto out_check_rval;
@@ -419,6 +421,7 @@
 		}
 	}
 out_free:
+	xfs_iunlock(dp, lock_mode);
 	kmem_free(args);
 	return rval;
 }
@@ -674,25 +677,22 @@
 	mp = dp->i_mount;
 	tp = args->trans;
 	da = xfs_dir2_db_to_da(args->geo, db);
-	/*
-	 * Unmap the fsblock(s).
-	 */
-	if ((error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount,
-			XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
-			&done))) {
+
+	/* Unmap the fsblock(s). */
+	error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0,
+			    args->firstblock, args->flist, &done);
+	if (error) {
 		/*
-		 * ENOSPC actually can happen if we're in a removename with
-		 * no space reservation, and the resulting block removal
-		 * would cause a bmap btree split or conversion from extents
-		 * to btree.  This can only happen for un-fragmented
-		 * directory blocks, since you need to be punching out
-		 * the middle of an extent.
-		 * In this case we need to leave the block in the file,
-		 * and not binval it.
-		 * So the block has to be in a consistent empty state
-		 * and appropriately logged.
-		 * We don't free up the buffer, the caller can tell it
-		 * hasn't happened since it got an error back.
+		 * ENOSPC actually can happen if we're in a removename with no
+		 * space reservation, and the resulting block removal would
+		 * cause a bmap btree split or conversion from extents to btree.
+		 * This can only happen for un-fragmented directory blocks,
+		 * since you need to be punching out the middle of an extent.
+		 * In this case we need to leave the block in the file, and not
+		 * binval it.  So the block has to be in a consistent empty
+		 * state and appropriately logged.  We don't free up the buffer,
+		 * the caller can tell it hasn't happened since it got an error
+		 * back.
 		 */
 		return error;
 	}
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 9354e19..4778d1d 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -67,7 +67,7 @@
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 		if (hdr3->magic != cpu_to_be32(XFS_DIR3_BLOCK_MAGIC))
 			return false;
-		if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
 			return false;
@@ -157,7 +157,7 @@
 		hdr3->magic = cpu_to_be32(XFS_DIR3_BLOCK_MAGIC);
 		hdr3->blkno = cpu_to_be64(bp->b_bn);
 		hdr3->owner = cpu_to_be64(dp->i_ino);
-		uuid_copy(&hdr3->uuid, &mp->m_sb.sb_uuid);
+		uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
 		return;
 
 	}
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index de1ea16..824131e 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -220,7 +220,7 @@
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 		if (hdr3->magic != cpu_to_be32(XFS_DIR3_DATA_MAGIC))
 			return false;
-		if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
 			return false;
@@ -252,7 +252,8 @@
 		return;
 	case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
 	case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
-		xfs_dir3_data_verify(bp);
+		bp->b_ops = &xfs_dir3_data_buf_ops;
+		bp->b_ops->verify_read(bp);
 		return;
 	default:
 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
@@ -604,7 +605,7 @@
 		hdr3->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC);
 		hdr3->blkno = cpu_to_be64(bp->b_bn);
 		hdr3->owner = cpu_to_be64(dp->i_ino);
-		uuid_copy(&hdr3->uuid, &mp->m_sb.sb_uuid);
+		uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
 
 	} else
 		hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index 1061199..f300240 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -160,7 +160,7 @@
 
 		if (leaf3->info.hdr.magic != cpu_to_be16(magic3))
 			return false;
-		if (!uuid_equal(&leaf3->info.uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&leaf3->info.uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
 			return false;
@@ -310,7 +310,7 @@
 					 : cpu_to_be16(XFS_DIR3_LEAFN_MAGIC);
 		leaf3->info.blkno = cpu_to_be64(bp->b_bn);
 		leaf3->info.owner = cpu_to_be64(owner);
-		uuid_copy(&leaf3->info.uuid, &mp->m_sb.sb_uuid);
+		uuid_copy(&leaf3->info.uuid, &mp->m_sb.sb_meta_uuid);
 	} else {
 		memset(leaf, 0, sizeof(*leaf));
 		leaf->hdr.info.magic = cpu_to_be16(type);
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 41b80d3..cc28e92 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -93,7 +93,7 @@
 
 		if (hdr3->magic != cpu_to_be32(XFS_DIR3_FREE_MAGIC))
 			return false;
-		if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
 			return false;
@@ -226,7 +226,7 @@
 
 		hdr3->hdr.blkno = cpu_to_be64(bp->b_bn);
 		hdr3->hdr.owner = cpu_to_be64(dp->i_ino);
-		uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
+		uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_meta_uuid);
 	} else
 		hdr.magic = XFS_DIR2_FREE_MAGIC;
 	dp->d_ops->free_hdr_to_disk(bp->b_addr, &hdr);
@@ -1845,8 +1845,7 @@
 
 			if (dp->d_ops->db_to_fdb(args->geo, dbno) != fbno) {
 				xfs_alert(mp,
-			"%s: dir ino %llu needed freesp block %lld for\n"
-			"  data block %lld, got %lld ifbno %llu lastfbno %d",
+"%s: dir ino %llu needed freesp block %lld for data block %lld, got %lld ifbno %llu lastfbno %d",
 					__func__, (unsigned long long)dp->i_ino,
 					(long long)dp->d_ops->db_to_fdb(
 								args->geo, dbno),
@@ -2132,6 +2131,7 @@
 	int			error;		/* error return value */
 	int			i;		/* btree level */
 	xfs_ino_t		inum;		/* new inode number */
+	int			ftype;		/* new file type */
 	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
 	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry being changed */
 	int			rval;		/* internal return value */
@@ -2145,7 +2145,14 @@
 	state = xfs_da_state_alloc();
 	state->args = args;
 	state->mp = args->dp->i_mount;
+
+	/*
+	 * We have to save new inode number and ftype since
+	 * xfs_da3_node_lookup_int() is going to overwrite them
+	 */
 	inum = args->inumber;
+	ftype = args->filetype;
+
 	/*
 	 * Lookup the entry to change in the btree.
 	 */
@@ -2183,7 +2190,7 @@
 		 * Fill in the new inode number and log the entry.
 		 */
 		dep->inumber = cpu_to_be64(inum);
-		args->dp->d_ops->data_put_ftype(dep, args->filetype);
+		args->dp->d_ops->data_put_ftype(dep, ftype);
 		xfs_dir2_data_log_entry(args, state->extrablk.bp, dep);
 		rval = 0;
 	}
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index 6fbf2d8..5331b7f 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -163,7 +163,7 @@
 	d->dd_diskdq.d_id = cpu_to_be32(id);
 
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
-		uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+		uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
 		xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
 				 XFS_DQUOT_CRC_OFF);
 	}
@@ -198,7 +198,7 @@
 		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
 				 XFS_DQUOT_CRC_OFF))
 			return false;
-		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 	}
 	return true;
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index a0ae572..9590a06 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -100,7 +100,7 @@
 	xfs_rfsblock_t	sb_dblocks;	/* number of data blocks */
 	xfs_rfsblock_t	sb_rblocks;	/* number of realtime blocks */
 	xfs_rtblock_t	sb_rextents;	/* number of realtime extents */
-	uuid_t		sb_uuid;	/* file system unique id */
+	uuid_t		sb_uuid;	/* user-visible file system unique id */
 	xfs_fsblock_t	sb_logstart;	/* starting block of log if internal */
 	xfs_ino_t	sb_rootino;	/* root inode number */
 	xfs_ino_t	sb_rbmino;	/* bitmap inode for realtime extents */
@@ -174,6 +174,7 @@
 
 	xfs_ino_t	sb_pquotino;	/* project quota inode */
 	xfs_lsn_t	sb_lsn;		/* last write sequence */
+	uuid_t		sb_meta_uuid;	/* metadata file system unique id */
 
 	/* must be padded to 64 bit alignment */
 } xfs_sb_t;
@@ -190,7 +191,7 @@
 	__be64		sb_dblocks;	/* number of data blocks */
 	__be64		sb_rblocks;	/* number of realtime blocks */
 	__be64		sb_rextents;	/* number of realtime extents */
-	uuid_t		sb_uuid;	/* file system unique id */
+	uuid_t		sb_uuid;	/* user-visible file system unique id */
 	__be64		sb_logstart;	/* starting block of log if internal */
 	__be64		sb_rootino;	/* root inode number */
 	__be64		sb_rbmino;	/* bitmap inode for realtime extents */
@@ -260,6 +261,7 @@
 
 	__be64		sb_pquotino;	/* project quota inode */
 	__be64		sb_lsn;		/* last write sequence */
+	uuid_t		sb_meta_uuid;	/* metadata file system unique id */
 
 	/* must be padded to 64 bit alignment */
 } xfs_dsb_t;
@@ -458,9 +460,11 @@
 
 #define XFS_SB_FEAT_INCOMPAT_FTYPE	(1 << 0)	/* filetype in dirent */
 #define XFS_SB_FEAT_INCOMPAT_SPINODES	(1 << 1)	/* sparse inode chunks */
+#define XFS_SB_FEAT_INCOMPAT_META_UUID	(1 << 2)	/* metadata UUID */
 #define XFS_SB_FEAT_INCOMPAT_ALL \
 		(XFS_SB_FEAT_INCOMPAT_FTYPE|	\
-		 XFS_SB_FEAT_INCOMPAT_SPINODES)
+		 XFS_SB_FEAT_INCOMPAT_SPINODES|	\
+		 XFS_SB_FEAT_INCOMPAT_META_UUID)
 
 #define XFS_SB_FEAT_INCOMPAT_UNKNOWN	~XFS_SB_FEAT_INCOMPAT_ALL
 static inline bool
@@ -515,6 +519,18 @@
 }
 
 /*
+ * XFS_SB_FEAT_INCOMPAT_META_UUID indicates that the metadata UUID
+ * is stored separately from the user-visible UUID; this allows the
+ * user-visible UUID to be changed on V5 filesystems which have a
+ * filesystem UUID stamped into every piece of metadata.
+ */
+static inline bool xfs_sb_version_hasmetauuid(struct xfs_sb *sbp)
+{
+	return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
+		(sbp->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_META_UUID);
+}
+
+/*
  * end of superblock version macros
  */
 
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 66efc70..54deb2d 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -338,7 +338,8 @@
 			if (version == 3) {
 				free->di_ino = cpu_to_be64(ino);
 				ino++;
-				uuid_copy(&free->di_uuid, &mp->m_sb.sb_uuid);
+				uuid_copy(&free->di_uuid,
+					  &mp->m_sb.sb_meta_uuid);
 				xfs_dinode_calc_crc(mp, free);
 			} else if (tp) {
 				/* just log the inode core */
@@ -2232,7 +2233,7 @@
 	}
 
 	xfs_trans_brelse(tp, agbp);
-	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
 	if (error)
 		return error;
 
@@ -2500,7 +2501,7 @@
 	struct xfs_agi	*agi = XFS_BUF_TO_AGI(bp);
 
 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
-	    !uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_uuid))
+	    !uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 	/*
 	 * Validate the magic number of the agi block.
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 674ad8f..f39b285 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -239,7 +239,7 @@
 	case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
 		if (!xfs_sb_version_hascrc(&mp->m_sb))
 			return false;
-		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
+		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
 			return false;
 		if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
 			return false;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 6526e76..268c00f 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -304,7 +304,7 @@
 		return false;
 	if (be64_to_cpu(dip->di_ino) != ip->i_ino)
 		return false;
-	if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
+	if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
 		return false;
 	return true;
 }
@@ -366,7 +366,7 @@
 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 			ip->i_d.di_version = 3;
 			ip->i_d.di_ino = ip->i_ino;
-			uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
+			uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid);
 		} else
 			ip->i_d.di_version = 2;
 		return 0;
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index df9851c..4742514 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -131,10 +131,11 @@
 		if (xfs_sb_has_compat_feature(sbp,
 					XFS_SB_FEAT_COMPAT_UNKNOWN)) {
 			xfs_warn(mp,
-"Superblock has unknown compatible features (0x%x) enabled.\n"
-"Using a more recent kernel is recommended.",
+"Superblock has unknown compatible features (0x%x) enabled.",
 				(sbp->sb_features_compat &
 						XFS_SB_FEAT_COMPAT_UNKNOWN));
+			xfs_warn(mp,
+"Using a more recent kernel is recommended.");
 		}
 
 		if (xfs_sb_has_ro_compat_feature(sbp,
@@ -145,18 +146,21 @@
 						XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
 			if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
 				xfs_warn(mp,
-"Attempted to mount read-only compatible filesystem read-write.\n"
+"Attempted to mount read-only compatible filesystem read-write.");
+				xfs_warn(mp,
 "Filesystem can only be safely mounted read only.");
+
 				return -EINVAL;
 			}
 		}
 		if (xfs_sb_has_incompat_feature(sbp,
 					XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
 			xfs_warn(mp,
-"Superblock has unknown incompatible features (0x%x) enabled.\n"
-"Filesystem can not be safely mounted by this kernel.",
+"Superblock has unknown incompatible features (0x%x) enabled.",
 				(sbp->sb_features_incompat &
 						XFS_SB_FEAT_INCOMPAT_UNKNOWN));
+			xfs_warn(mp,
+"Filesystem can not be safely mounted by this kernel.");
 			return -EINVAL;
 		}
 	}
@@ -182,9 +186,6 @@
 	if (xfs_sb_version_hassparseinodes(sbp)) {
 		uint32_t	align;
 
-		xfs_alert(mp,
-	"EXPERIMENTAL sparse inode feature enabled. Use at your own risk!");
-
 		align = XFS_INODES_PER_CHUNK * sbp->sb_inodesize
 				>> sbp->sb_blocklog;
 		if (sbp->sb_inoalignmt != align) {
@@ -398,6 +399,14 @@
 	to->sb_spino_align = be32_to_cpu(from->sb_spino_align);
 	to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
 	to->sb_lsn = be64_to_cpu(from->sb_lsn);
+	/*
+	 * sb_meta_uuid is only on disk if it differs from sb_uuid and the
+	 * feature flag is set; if not set we keep it only in memory.
+	 */
+	if (xfs_sb_version_hasmetauuid(to))
+		uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid);
+	else
+		uuid_copy(&to->sb_meta_uuid, &from->sb_uuid);
 	/* Convert on-disk flags to in-memory flags? */
 	if (convert_xquota)
 		xfs_sb_quota_from_disk(to);
@@ -539,6 +548,8 @@
 				cpu_to_be32(from->sb_features_log_incompat);
 		to->sb_spino_align = cpu_to_be32(from->sb_spino_align);
 		to->sb_lsn = cpu_to_be64(from->sb_lsn);
+		if (xfs_sb_version_hasmetauuid(from))
+			uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid);
 	}
 }
 
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index e7e26bd..8f8af05 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -63,7 +63,7 @@
 	dsl->sl_magic = cpu_to_be32(XFS_SYMLINK_MAGIC);
 	dsl->sl_offset = cpu_to_be32(offset);
 	dsl->sl_bytes = cpu_to_be32(size);
-	uuid_copy(&dsl->sl_uuid, &mp->m_sb.sb_uuid);
+	uuid_copy(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid);
 	dsl->sl_owner = cpu_to_be64(ino);
 	dsl->sl_blkno = cpu_to_be64(bp->b_bn);
 	bp->b_ops = &xfs_symlink_buf_ops;
@@ -107,7 +107,7 @@
 		return false;
 	if (dsl->sl_magic != cpu_to_be32(XFS_SYMLINK_MAGIC))
 		return false;
-	if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_uuid))
+	if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid))
 		return false;
 	if (bp->b_bn != be64_to_cpu(dsl->sl_blkno))
 		return false;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index c77499b..50ab287 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -119,8 +119,7 @@
 	 * We may pass freeze protection with a transaction.  So tell lockdep
 	 * we released it.
 	 */
-	rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
-		      1, _THIS_IP_);
+	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
 	/*
 	 * We hand off the transaction to the completion thread now, so
 	 * clear the flag here.
@@ -171,8 +170,7 @@
 	 * Similarly for freeze protection.
 	 */
 	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
-	rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
-			   0, 1, _THIS_IP_);
+	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
 
 	return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
 }
@@ -355,7 +353,8 @@
 {
 	xfs_ioend_t		*ioend = bio->bi_private;
 
-	ioend->io_error = bio->bi_error;
+	if (!ioend->io_error)
+		ioend->io_error = bio->bi_error;
 
 	/* Toss bio and pass work off to an xfsdatad thread */
 	bio->bi_private = NULL;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 0f34886c..3bf4ad0 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -67,16 +67,15 @@
  */
 int						/* error */
 xfs_bmap_finish(
-	xfs_trans_t		**tp,		/* transaction pointer addr */
-	xfs_bmap_free_t		*flist,		/* i/o: list extents to free */
-	int			*committed)	/* xact committed or not */
+	struct xfs_trans		**tp,	/* transaction pointer addr */
+	struct xfs_bmap_free		*flist,	/* i/o: list extents to free */
+	int				*committed)/* xact committed or not */
 {
-	xfs_efd_log_item_t	*efd;		/* extent free data */
-	xfs_efi_log_item_t	*efi;		/* extent free intention */
-	int			error;		/* error return value */
-	xfs_bmap_free_item_t	*free;		/* free extent item */
-	xfs_mount_t		*mp;		/* filesystem mount structure */
-	xfs_bmap_free_item_t	*next;		/* next item on free list */
+	struct xfs_efd_log_item		*efd;	/* extent free data */
+	struct xfs_efi_log_item		*efi;	/* extent free intention */
+	int				error;	/* error return value */
+	struct xfs_bmap_free_item	*free;	/* free extent item */
+	struct xfs_bmap_free_item	*next;	/* next item on free list */
 
 	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
 	if (flist->xbf_count == 0) {
@@ -88,40 +87,48 @@
 		xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
 			free->xbfi_blockcount);
 
-	error = xfs_trans_roll(tp, NULL);
-	*committed = 1;
-	/*
-	 * We have a new transaction, so we should return committed=1,
-	 * even though we're returning an error.
-	 */
-	if (error)
-		return error;
+	error = __xfs_trans_roll(tp, NULL, committed);
+	if (error) {
+		/*
+		 * If the transaction was committed, drop the EFD reference
+		 * since we're bailing out of here. The other reference is
+		 * dropped when the EFI hits the AIL.
+		 *
+		 * If the transaction was not committed, the EFI is freed by the
+		 * EFI item unlock handler on abort. Also, we have a new
+		 * transaction so we should return committed=1 even though we're
+		 * returning an error.
+		 */
+		if (*committed) {
+			xfs_efi_release(efi);
+			xfs_force_shutdown((*tp)->t_mountp,
+				(error == -EFSCORRUPTED) ?
+					SHUTDOWN_CORRUPT_INCORE :
+					SHUTDOWN_META_IO_ERROR);
+		} else {
+			*committed = 1;
+		}
 
+		return error;
+	}
+
+	/*
+	 * Get an EFD and free each extent in the list, logging to the EFD in
+	 * the process. The remaining bmap free list is cleaned up by the caller
+	 * on error.
+	 */
 	efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
 	for (free = flist->xbf_first; free != NULL; free = next) {
 		next = free->xbfi_next;
-		if ((error = xfs_free_extent(*tp, free->xbfi_startblock,
-				free->xbfi_blockcount))) {
-			/*
-			 * The bmap free list will be cleaned up at a
-			 * higher level.  The EFI will be canceled when
-			 * this transaction is aborted.
-			 * Need to force shutdown here to make sure it
-			 * happens, since this transaction may not be
-			 * dirty yet.
-			 */
-			mp = (*tp)->t_mountp;
-			if (!XFS_FORCED_SHUTDOWN(mp))
-				xfs_force_shutdown(mp,
-						   (error == -EFSCORRUPTED) ?
-						   SHUTDOWN_CORRUPT_INCORE :
-						   SHUTDOWN_META_IO_ERROR);
+
+		error = xfs_trans_free_extent(*tp, efd, free->xbfi_startblock,
+					      free->xbfi_blockcount);
+		if (error)
 			return error;
-		}
-		xfs_trans_log_efd_extent(*tp, efd, free->xbfi_startblock,
-			free->xbfi_blockcount);
+
 		xfs_bmap_del_free(flist, NULL, free);
 	}
+
 	return 0;
 }
 
@@ -1467,7 +1474,7 @@
 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
 				XFS_QMOPT_RES_REGBLKS);
 		if (error)
-			goto out;
+			goto out_trans_cancel;
 
 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
@@ -1481,18 +1488,20 @@
 				&done, stop_fsb, &first_block, &free_list,
 				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
 		if (error)
-			goto out;
+			goto out_bmap_cancel;
 
 		error = xfs_bmap_finish(&tp, &free_list, &committed);
 		if (error)
-			goto out;
+			goto out_bmap_cancel;
 
 		error = xfs_trans_commit(tp);
 	}
 
 	return error;
 
-out:
+out_bmap_cancel:
+	xfs_bmap_cancel(&free_list);
+out_trans_cancel:
 	xfs_trans_cancel(tp);
 	return error;
 }
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 01bd678..8ecffb3 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -438,7 +438,6 @@
 	xfs_buf_flags_t		flags,
 	xfs_buf_t		*new_bp)
 {
-	size_t			numbytes;
 	struct xfs_perag	*pag;
 	struct rb_node		**rbp;
 	struct rb_node		*parent;
@@ -450,10 +449,9 @@
 
 	for (i = 0; i < nmaps; i++)
 		numblks += map[i].bm_len;
-	numbytes = BBTOB(numblks);
 
 	/* Check for IOs smaller than the sector size / not sector aligned */
-	ASSERT(!(numbytes < btp->bt_meta_sectorsize));
+	ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize));
 	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
 
 	/*
@@ -1532,9 +1530,10 @@
 			list_del_init(&bp->b_lru);
 			if (bp->b_flags & XBF_WRITE_FAIL) {
 				xfs_alert(btp->bt_mount,
-"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
-"Please run xfs_repair to determine the extent of the problem.",
+"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
 					(long long)bp->b_bn);
+				xfs_alert(btp->bt_mount,
+"Please run xfs_repair to determine the extent of the problem.");
 			}
 			xfs_buf_rele(bp);
 		}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 331c1cc..c79b717 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -23,6 +23,7 @@
 #include <linux/spinlock.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/dax.h>
 #include <linux/buffer_head.h>
 #include <linux/uio.h>
 #include <linux/list_lru.h>
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 092d652..7e986da 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -647,11 +647,7 @@
 			xfs_buf_item_relse(bp);
 		else if (aborted) {
 			ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
-			if (lip->li_flags & XFS_LI_IN_AIL) {
-				spin_lock(&lip->li_ailp->xa_lock);
-				xfs_trans_ail_delete(lip->li_ailp, lip,
-						     SHUTDOWN_LOG_IO_ERROR);
-			}
+			xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
 			xfs_buf_item_relse(bp);
 		}
 	}
@@ -750,13 +746,13 @@
  * buffer (see xfs_buf_attach_iodone() below), then put the
  * buf log item at the front.
  */
-void
+int
 xfs_buf_item_init(
-	xfs_buf_t	*bp,
-	xfs_mount_t	*mp)
+	struct xfs_buf	*bp,
+	struct xfs_mount *mp)
 {
-	xfs_log_item_t		*lip = bp->b_fspriv;
-	xfs_buf_log_item_t	*bip;
+	struct xfs_log_item	*lip = bp->b_fspriv;
+	struct xfs_buf_log_item	*bip;
 	int			chunks;
 	int			map_size;
 	int			error;
@@ -770,12 +766,11 @@
 	 */
 	ASSERT(bp->b_target->bt_mount == mp);
 	if (lip != NULL && lip->li_type == XFS_LI_BUF)
-		return;
+		return 0;
 
 	bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
 	xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
 	bip->bli_buf = bp;
-	xfs_buf_hold(bp);
 
 	/*
 	 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
@@ -788,6 +783,11 @@
 	 */
 	error = xfs_buf_item_get_format(bip, bp->b_map_count);
 	ASSERT(error == 0);
+	if (error) {	/* to stop gcc throwing set-but-unused warnings */
+		kmem_zone_free(xfs_buf_item_zone, bip);
+		return error;
+	}
+
 
 	for (i = 0; i < bip->bli_format_count; i++) {
 		chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
@@ -807,6 +807,8 @@
 	if (bp->b_fspriv)
 		bip->bli_item.li_bio_list = bp->b_fspriv;
 	bp->b_fspriv = bip;
+	xfs_buf_hold(bp);
+	return 0;
 }
 
 
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 3f3455a..f7eba99 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -61,7 +61,7 @@
 	struct xfs_buf_log_format __bli_format;	/* embedded in-log header */
 } xfs_buf_log_item_t;
 
-void	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
+int	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
 void	xfs_buf_item_relse(struct xfs_buf *);
 void	xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
 uint	xfs_buf_item_dirty(xfs_buf_log_item_t *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 098cd78..a989a9c 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -171,6 +171,7 @@
 	int			wantoff;	/* starting block offset */
 	xfs_off_t		cook;
 	struct xfs_da_geometry	*geo = args->geo;
+	int			lock_mode;
 
 	/*
 	 * If the block number in the offset is out of range, we're done.
@@ -178,7 +179,9 @@
 	if (xfs_dir2_dataptr_to_db(geo, ctx->pos) > geo->datablk)
 		return 0;
 
+	lock_mode = xfs_ilock_data_map_shared(dp);
 	error = xfs_dir3_block_read(NULL, dp, &bp);
+	xfs_iunlock(dp, lock_mode);
 	if (error)
 		return error;
 
@@ -529,9 +532,12 @@
 		 * current buffer, need to get another one.
 		 */
 		if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) {
+			int	lock_mode;
 
+			lock_mode = xfs_ilock_data_map_shared(dp);
 			error = xfs_dir2_leaf_readbuf(args, bufsize, map_info,
 						      &curoff, &bp);
+			xfs_iunlock(dp, lock_mode);
 			if (error || !map_info->map_valid)
 				break;
 
@@ -653,7 +659,6 @@
 	struct xfs_da_args	args = { NULL };
 	int			rval;
 	int			v;
-	uint			lock_mode;
 
 	trace_xfs_readdir(dp);
 
@@ -666,7 +671,7 @@
 	args.dp = dp;
 	args.geo = dp->i_mount->m_dir_geo;
 
-	lock_mode = xfs_ilock_data_map_shared(dp);
+	xfs_ilock(dp, XFS_IOLOCK_SHARED);
 	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
 		rval = xfs_dir2_sf_getdents(&args, ctx);
 	else if ((rval = xfs_dir2_isblock(&args, &v)))
@@ -675,7 +680,7 @@
 		rval = xfs_dir2_block_getdents(&args, ctx);
 	else
 		rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize);
-	xfs_iunlock(dp, lock_mode);
+	xfs_iunlock(dp, XFS_IOLOCK_SHARED);
 
 	return rval;
 }
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 4143dc7..30cb3af 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -251,7 +251,7 @@
 		d->dd_diskdq.d_id = cpu_to_be32(curid);
 		d->dd_diskdq.d_flags = type;
 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
-			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
 					 XFS_DQUOT_CRC_OFF);
 		}
@@ -954,12 +954,8 @@
 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
 
-		spin_lock(&mp->m_ail->xa_lock);
-		if (lip->li_flags & XFS_LI_IN_AIL)
-			xfs_trans_ail_delete(mp->m_ail, lip,
-					     SHUTDOWN_CORRUPT_INCORE);
-		else
-			spin_unlock(&mp->m_ail->xa_lock);
+		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
+
 		error = -EIO;
 		goto out_unlock;
 	}
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index adc8f8f..4aa0153 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -47,28 +47,6 @@
 }
 
 /*
- * Freeing the efi requires that we remove it from the AIL if it has already
- * been placed there. However, the EFI may not yet have been placed in the AIL
- * when called by xfs_efi_release() from EFD processing due to the ordering of
- * committed vs unpin operations in bulk insert operations. Hence the reference
- * count to ensure only the last caller frees the EFI.
- */
-STATIC void
-__xfs_efi_release(
-	struct xfs_efi_log_item	*efip)
-{
-	struct xfs_ail		*ailp = efip->efi_item.li_ailp;
-
-	if (atomic_dec_and_test(&efip->efi_refcount)) {
-		spin_lock(&ailp->xa_lock);
-		/* xfs_trans_ail_delete() drops the AIL lock. */
-		xfs_trans_ail_delete(ailp, &efip->efi_item,
-				     SHUTDOWN_LOG_IO_ERROR);
-		xfs_efi_item_free(efip);
-	}
-}
-
-/*
  * This returns the number of iovecs needed to log the given efi item.
  * We only need 1 iovec for an efi item.  It just logs the efi_log_format
  * structure.
@@ -128,12 +106,12 @@
 }
 
 /*
- * While EFIs cannot really be pinned, the unpin operation is the last place at
- * which the EFI is manipulated during a transaction.  If we are being asked to
- * remove the EFI it's because the transaction has been cancelled and by
- * definition that means the EFI cannot be in the AIL so remove it from the
- * transaction and free it.  Otherwise coordinate with xfs_efi_release()
- * to determine who gets to free the EFI.
+ * The unpin operation is the last place an EFI is manipulated in the log. It is
+ * either inserted in the AIL or aborted in the event of a log I/O error. In
+ * either case, the EFI transaction has been successfully committed to make it
+ * this far. Therefore, we expect whoever committed the EFI to either construct
+ * and commit the EFD or drop the EFD's reference in the event of error. Simply
+ * drop the log's EFI reference now that the log is done with it.
  */
 STATIC void
 xfs_efi_item_unpin(
@@ -141,15 +119,7 @@
 	int			remove)
 {
 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
-
-	if (remove) {
-		ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
-		if (lip->li_desc)
-			xfs_trans_del_item(lip);
-		xfs_efi_item_free(efip);
-		return;
-	}
-	__xfs_efi_release(efip);
+	xfs_efi_release(efip);
 }
 
 /*
@@ -167,6 +137,11 @@
 	return XFS_ITEM_PINNED;
 }
 
+/*
+ * The EFI has been either committed or aborted if the transaction has been
+ * cancelled. If the transaction was cancelled, an EFD isn't going to be
+ * constructed and thus we free the EFI here directly.
+ */
 STATIC void
 xfs_efi_item_unlock(
 	struct xfs_log_item	*lip)
@@ -301,23 +276,19 @@
 }
 
 /*
- * This is called by the efd item code below to release references to the given
- * efi item.  Each efd calls this with the number of extents that it has
- * logged, and when the sum of these reaches the total number of extents logged
- * by this efi item we can free the efi item.
+ * Freeing the efi requires that we remove it from the AIL if it has already
+ * been placed there. However, the EFI may not yet have been placed in the AIL
+ * when called by xfs_efi_release() from EFD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the EFI.
  */
 void
-xfs_efi_release(xfs_efi_log_item_t	*efip,
-		uint			nextents)
+xfs_efi_release(
+	struct xfs_efi_log_item	*efip)
 {
-	ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
-	if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) {
-		/* recovery needs us to drop the EFI reference, too */
-		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
-			__xfs_efi_release(efip);
-
-		__xfs_efi_release(efip);
-		/* efip may now have been freed, do not reference it again. */
+	if (atomic_dec_and_test(&efip->efi_refcount)) {
+		xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
+		xfs_efi_item_free(efip);
 	}
 }
 
@@ -415,20 +386,27 @@
 	return XFS_ITEM_PINNED;
 }
 
+/*
+ * The EFD is either committed or aborted if the transaction is cancelled. If
+ * the transaction is cancelled, drop our reference to the EFI and free the EFD.
+ */
 STATIC void
 xfs_efd_item_unlock(
 	struct xfs_log_item	*lip)
 {
-	if (lip->li_flags & XFS_LI_ABORTED)
-		xfs_efd_item_free(EFD_ITEM(lip));
+	struct xfs_efd_log_item	*efdp = EFD_ITEM(lip);
+
+	if (lip->li_flags & XFS_LI_ABORTED) {
+		xfs_efi_release(efdp->efd_efip);
+		xfs_efd_item_free(efdp);
+	}
 }
 
 /*
- * When the efd item is committed to disk, all we need to do
- * is delete our reference to our partner efi item and then
- * free ourselves.  Since we're freeing ourselves we must
- * return -1 to keep the transaction code from further referencing
- * this item.
+ * When the efd item is committed to disk, all we need to do is delete our
+ * reference to our partner efi item and then free ourselves. Since we're
+ * freeing ourselves we must return -1 to keep the transaction code from further
+ * referencing this item.
  */
 STATIC xfs_lsn_t
 xfs_efd_item_committed(
@@ -438,13 +416,14 @@
 	struct xfs_efd_log_item	*efdp = EFD_ITEM(lip);
 
 	/*
-	 * If we got a log I/O error, it's always the case that the LR with the
-	 * EFI got unpinned and freed before the EFD got aborted.
+	 * Drop the EFI reference regardless of whether the EFD has been
+	 * aborted. Once the EFD transaction is constructed, it is the sole
+	 * responsibility of the EFD to release the EFI (even if the EFI is
+	 * aborted due to log I/O error).
 	 */
-	if (!(lip->li_flags & XFS_LI_ABORTED))
-		xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents);
-
+	xfs_efi_release(efdp->efd_efip);
 	xfs_efd_item_free(efdp);
+
 	return (xfs_lsn_t)-1;
 }
 
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 0ffbce3..8fa8651 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -39,9 +39,28 @@
  * "extent free done" log item described below.
  *
  * The EFI is reference counted so that it is not freed prior to both the EFI
- * and EFD being committed and unpinned. This ensures that when the last
- * reference goes away the EFI will always be in the AIL as it has been
- * unpinned, regardless of whether the EFD is processed before or after the EFI.
+ * and EFD being committed and unpinned. This ensures the EFI is inserted into
+ * the AIL even in the event of out of order EFI/EFD processing. In other words,
+ * an EFI is born with two references:
+ *
+ * 	1.) an EFI held reference to track EFI AIL insertion
+ * 	2.) an EFD held reference to track EFD commit
+ *
+ * On allocation, both references are the responsibility of the caller. Once the
+ * EFI is added to and dirtied in a transaction, ownership of reference one
+ * transfers to the transaction. The reference is dropped once the EFI is
+ * inserted to the AIL or in the event of failure along the way (e.g., commit
+ * failure, log I/O error, etc.). Note that the caller remains responsible for
+ * the EFD reference under all circumstances to this point. The caller has no
+ * means to detect failure once the transaction is committed, however.
+ * Therefore, an EFD is required after this point, even in the event of
+ * unrelated failure.
+ *
+ * Once an EFD is allocated and dirtied in a transaction, reference two
+ * transfers to the transaction. The EFD reference is dropped once it reaches
+ * the unpin handler. Similar to the EFI, the reference also drops in the event
+ * of commit failure or log I/O errors. Note that the EFD is not inserted in the
+ * AIL, so at this point both the EFI and EFD are freed.
  */
 typedef struct xfs_efi_log_item {
 	xfs_log_item_t		efi_item;
@@ -77,5 +96,6 @@
 int			xfs_efi_copy_format(xfs_log_iovec_t *buf,
 					    xfs_efi_log_format_t *dst_efi_fmt);
 void			xfs_efi_item_free(xfs_efi_log_item_t *);
+void			xfs_efi_release(struct xfs_efi_log_item *);
 
 #endif	/* __XFS_EXTFREE_ITEM_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index db4acc1..e78feb4 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -317,24 +317,33 @@
 		return -EIO;
 
 	/*
-	 * Locking is a bit tricky here. If we take an exclusive lock
-	 * for direct IO, we effectively serialise all new concurrent
-	 * read IO to this file and block it behind IO that is currently in
-	 * progress because IO in progress holds the IO lock shared. We only
-	 * need to hold the lock exclusive to blow away the page cache, so
-	 * only take lock exclusively if the page cache needs invalidation.
-	 * This allows the normal direct IO case of no page cache pages to
-	 * proceeed concurrently without serialisation.
+	 * Locking is a bit tricky here. If we take an exclusive lock for direct
+	 * IO, we effectively serialise all new concurrent read IO to this file
+	 * and block it behind IO that is currently in progress because IO in
+	 * progress holds the IO lock shared. We only need to hold the lock
+	 * exclusive to blow away the page cache, so only take lock exclusively
+	 * if the page cache needs invalidation. This allows the normal direct
+	 * IO case of no page cache pages to proceeed concurrently without
+	 * serialisation.
 	 */
 	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 	if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
 		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
 
+		/*
+		 * The generic dio code only flushes the range of the particular
+		 * I/O. Because we take an exclusive lock here, this whole
+		 * sequence is considerably more expensive for us. This has a
+		 * noticeable performance impact for any file with cached pages,
+		 * even when outside of the range of the particular I/O.
+		 *
+		 * Hence, amortize the cost of the lock against a full file
+		 * flush and reduce the chances of repeated iolock cycles going
+		 * forward.
+		 */
 		if (inode->i_mapping->nrpages) {
-			ret = filemap_write_and_wait_range(
-							VFS_I(ip)->i_mapping,
-							pos, pos + size - 1);
+			ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 			if (ret) {
 				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
 				return ret;
@@ -345,9 +354,7 @@
 			 * we fail to invalidate a page, but this should never
 			 * happen on XFS. Warn if it does fail.
 			 */
-			ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
-					pos >> PAGE_CACHE_SHIFT,
-					(pos + size - 1) >> PAGE_CACHE_SHIFT);
+			ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
 			WARN_ON_ONCE(ret);
 			ret = 0;
 		}
@@ -733,19 +740,19 @@
 	pos = iocb->ki_pos;
 	end = pos + count - 1;
 
+	/*
+	 * See xfs_file_read_iter() for why we do a full-file flush here.
+	 */
 	if (mapping->nrpages) {
-		ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-						   pos, end);
+		ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 		if (ret)
 			goto out;
 		/*
-		 * Invalidate whole pages. This can return an error if
-		 * we fail to invalidate a page, but this should never
-		 * happen on XFS. Warn if it does fail.
+		 * Invalidate whole pages. This can return an error if we fail
+		 * to invalidate a page, but this should never happen on XFS.
+		 * Warn if it does fail.
 		 */
-		ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
-					pos >> PAGE_CACHE_SHIFT,
-					end >> PAGE_CACHE_SHIFT);
+		ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
 		WARN_ON_ONCE(ret);
 		ret = 0;
 	}
@@ -1539,8 +1546,36 @@
 	return ret;
 }
 
+STATIC int
+xfs_filemap_pmd_fault(
+	struct vm_area_struct	*vma,
+	unsigned long		addr,
+	pmd_t			*pmd,
+	unsigned int		flags)
+{
+	struct inode		*inode = file_inode(vma->vm_file);
+	struct xfs_inode	*ip = XFS_I(inode);
+	int			ret;
+
+	if (!IS_DAX(inode))
+		return VM_FAULT_FALLBACK;
+
+	trace_xfs_filemap_pmd_fault(ip);
+
+	sb_start_pagefault(inode->i_sb);
+	file_update_time(vma->vm_file);
+	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+	ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_direct,
+				    xfs_end_io_dax_write);
+	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+	sb_end_pagefault(inode->i_sb);
+
+	return ret;
+}
+
 static const struct vm_operations_struct xfs_file_vm_ops = {
 	.fault		= xfs_filemap_fault,
+	.pmd_fault	= xfs_filemap_pmd_fault,
 	.map_pages	= filemap_map_pages,
 	.page_mkwrite	= xfs_filemap_page_mkwrite,
 };
@@ -1553,7 +1588,7 @@
 	file_accessed(filp);
 	vma->vm_ops = &xfs_file_vm_ops;
 	if (IS_DAX(file_inode(filp)))
-		vma->vm_flags |= VM_MIXEDMAP;
+		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
 	return 0;
 }
 
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 9b3438a..ee3aaa0a 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -250,7 +250,7 @@
 		agf->agf_freeblks = cpu_to_be32(tmpsize);
 		agf->agf_longest = cpu_to_be32(tmpsize);
 		if (xfs_sb_version_hascrc(&mp->m_sb))
-			uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid);
+			uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
 
 		error = xfs_bwrite(bp);
 		xfs_buf_relse(bp);
@@ -273,7 +273,7 @@
 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 			agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
 			agfl->agfl_seqno = cpu_to_be32(agno);
-			uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
+			uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
 		}
 
 		agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
@@ -309,7 +309,7 @@
 		agi->agi_newino = cpu_to_be32(NULLAGINO);
 		agi->agi_dirino = cpu_to_be32(NULLAGINO);
 		if (xfs_sb_version_hascrc(&mp->m_sb))
-			uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_uuid);
+			uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
 		if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
 			agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
 			agi->agi_free_level = cpu_to_be32(1);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 76a9f278..0a326bd 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -412,6 +412,8 @@
 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 		return -EINVAL;
 
+	XFS_STATS_INC(xs_ig_attempts);
+
 	/* get the perag structure and ensure that it's inode capable */
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 	agino = XFS_INO_TO_AGINO(mp, ino);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 3da9f4d..dc40a6d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -164,7 +164,7 @@
 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
+	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 
 	if (lock_flags & XFS_IOLOCK_EXCL)
 		mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
@@ -212,7 +212,7 @@
 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
+	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 
 	if (lock_flags & XFS_IOLOCK_EXCL) {
 		if (!mrtryupdate(&ip->i_iolock))
@@ -281,7 +281,7 @@
 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
+	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 	ASSERT(lock_flags != 0);
 
 	if (lock_flags & XFS_IOLOCK_EXCL)
@@ -363,31 +363,57 @@
 #endif
 
 /*
+ * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
+ * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
+ * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
+ * errors and warnings.
+ */
+#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
+static bool
+xfs_lockdep_subclass_ok(
+	int subclass)
+{
+	return subclass < MAX_LOCKDEP_SUBCLASSES;
+}
+#else
+#define xfs_lockdep_subclass_ok(subclass)	(true)
+#endif
+
+/*
  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
- * value. This shouldn't be called for page fault locking, but we also need to
- * ensure we don't overrun the number of lockdep subclasses for the iolock or
- * mmaplock as that is limited to 12 by the mmap lock lockdep annotations.
+ * value. This can be called for any type of inode lock combination, including
+ * parent locking. Care must be taken to ensure we don't overrun the subclass
+ * storage fields in the class mask we build.
  */
 static inline int
 xfs_lock_inumorder(int lock_mode, int subclass)
 {
+	int	class = 0;
+
+	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
+			      XFS_ILOCK_RTSUM)));
+	ASSERT(xfs_lockdep_subclass_ok(subclass));
+
 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
-		ASSERT(subclass + XFS_LOCK_INUMORDER <
-			(1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT)));
-		lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
+		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
+		ASSERT(xfs_lockdep_subclass_ok(subclass +
+						XFS_IOLOCK_PARENT_VAL));
+		class += subclass << XFS_IOLOCK_SHIFT;
+		if (lock_mode & XFS_IOLOCK_PARENT)
+			class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
 	}
 
 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
-		ASSERT(subclass + XFS_LOCK_INUMORDER <
-			(1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT)));
-		lock_mode |= (subclass + XFS_LOCK_INUMORDER) <<
-							XFS_MMAPLOCK_SHIFT;
+		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
+		class += subclass << XFS_MMAPLOCK_SHIFT;
 	}
 
-	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
-		lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
+	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
+		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
+		class += subclass << XFS_ILOCK_SHIFT;
+	}
 
-	return lock_mode;
+	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 }
 
 /*
@@ -399,6 +425,11 @@
  * transaction (such as truncate). This can result in deadlock since the long
  * running trans might need to wait for the inode we just locked in order to
  * push the tail and free space in the log.
+ *
+ * xfs_lock_inodes() can only be used to lock one type of lock at a time -
+ * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
+ * lock more than one at a time, lockdep will report false positives saying we
+ * have violated locking orders.
  */
 void
 xfs_lock_inodes(
@@ -409,8 +440,29 @@
 	int		attempts = 0, i, j, try_lock;
 	xfs_log_item_t	*lp;
 
-	/* currently supports between 2 and 5 inodes */
+	/*
+	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
+	 * support an arbitrary depth of locking here, but absolute limits on
+	 * inodes depend on the the type of locking and the limits placed by
+	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
+	 * the asserts.
+	 */
 	ASSERT(ips && inodes >= 2 && inodes <= 5);
+	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
+			    XFS_ILOCK_EXCL));
+	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
+			      XFS_ILOCK_SHARED)));
+	ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
+		inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
+	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
+		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
+	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
+		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
+
+	if (lock_mode & XFS_IOLOCK_EXCL) {
+		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
+	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
+		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 
 	try_lock = 0;
 	i = 0;
@@ -629,30 +681,29 @@
 {
 	xfs_ino_t		inum;
 	int			error;
-	uint			lock_mode;
 
 	trace_xfs_lookup(dp, name);
 
 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 		return -EIO;
 
-	lock_mode = xfs_ilock_data_map_shared(dp);
+	xfs_ilock(dp, XFS_IOLOCK_SHARED);
 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
-	xfs_iunlock(dp, lock_mode);
-
 	if (error)
-		goto out;
+		goto out_unlock;
 
 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
 	if (error)
 		goto out_free_name;
 
+	xfs_iunlock(dp, XFS_IOLOCK_SHARED);
 	return 0;
 
 out_free_name:
 	if (ci_name)
 		kmem_free(ci_name->name);
-out:
+out_unlock:
+	xfs_iunlock(dp, XFS_IOLOCK_SHARED);
 	*ipp = NULL;
 	return error;
 }
@@ -787,7 +838,7 @@
 
 	if (ip->i_d.di_version == 3) {
 		ASSERT(ip->i_d.di_ino == ino);
-		ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
+		ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid));
 		ip->i_d.di_crc = 0;
 		ip->i_d.di_changecount = 1;
 		ip->i_d.di_lsn = 0;
@@ -1149,7 +1200,8 @@
 		goto out_trans_cancel;
 
 
-	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
+	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
+		      XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
 	unlock_dp_on_error = true;
 
 	xfs_bmap_init(&free_list, &first_block);
@@ -1175,11 +1227,8 @@
 	 */
 	error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
 			       prid, resblks > 0, &ip, &committed);
-	if (error) {
-		if (error == -ENOSPC)
-			goto out_trans_cancel;
+	if (error)
 		goto out_trans_cancel;
-	}
 
 	/*
 	 * Now we join the directory inode to the transaction.  We do not do it
@@ -1188,7 +1237,7 @@
 	 * the transaction cancel unlocking dp so don't do it explicitly in the
 	 * error path.
 	 */
-	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 	unlock_dp_on_error = false;
 
 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
@@ -1261,7 +1310,7 @@
 	xfs_qm_dqrele(pdqp);
 
 	if (unlock_dp_on_error)
-		xfs_iunlock(dp, XFS_ILOCK_EXCL);
+		xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 	return error;
 }
 
@@ -1318,11 +1367,8 @@
 
 	error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
 				prid, resblks > 0, &ip, NULL);
-	if (error) {
-		if (error == -ENOSPC)
-			goto out_trans_cancel;
+	if (error)
 		goto out_trans_cancel;
-	}
 
 	if (mp->m_flags & XFS_MOUNT_WSYNC)
 		xfs_trans_set_sync(tp);
@@ -1409,10 +1455,11 @@
 	if (error)
 		goto error_return;
 
+	xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 	xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
 
 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
-	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, tdp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 
 	/*
 	 * If we are using project inheritance, we only allow hard link
@@ -1791,14 +1838,15 @@
 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
 
 	/*
-	 * Just ignore errors at this point.  There is nothing we can
-	 * do except to try to keep going. Make sure it's not a silent
-	 * error.
+	 * Just ignore errors at this point.  There is nothing we can do except
+	 * to try to keep going. Make sure it's not a silent error.
 	 */
 	error = xfs_bmap_finish(&tp,  &free_list, &committed);
-	if (error)
+	if (error) {
 		xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
 			__func__, error);
+		xfs_bmap_cancel(&free_list);
+	}
 	error = xfs_trans_commit(tp);
 	if (error)
 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
@@ -2515,9 +2563,10 @@
 		goto out_trans_cancel;
 	}
 
+	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 	xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
 
-	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
 	/*
@@ -2898,6 +2947,12 @@
 	 * whether the target directory is the same as the source
 	 * directory, we can lock from 2 to 4 inodes.
 	 */
+	if (!new_parent)
+		xfs_ilock(src_dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
+	else
+		xfs_lock_two_inodes(src_dp, target_dp,
+				    XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
+
 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
 
 	/*
@@ -2905,9 +2960,9 @@
 	 * we can rely on either trans_commit or trans_cancel to unlock
 	 * them.
 	 */
-	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, src_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 	if (new_parent)
-		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
+		xfs_trans_ijoin(tp, target_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
 	if (target_ip)
 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 8f22d20..ca9e119 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -284,9 +284,9 @@
  * Flags for lockdep annotations.
  *
  * XFS_LOCK_PARENT - for directory operations that require locking a
- * parent directory inode and a child entry inode.  The parent gets locked
- * with this flag so it gets a lockdep subclass of 1 and the child entry
- * lock will have a lockdep subclass of 0.
+ * parent directory inode and a child entry inode. IOLOCK requires nesting,
+ * MMAPLOCK does not support this class, ILOCK requires a single subclass
+ * to differentiate parent from child.
  *
  * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
  * inodes do not participate in the normal lock order, and thus have their
@@ -295,30 +295,63 @@
  * XFS_LOCK_INUMORDER - for locking several inodes at the some time
  * with xfs_lock_inodes().  This flag is used as the starting subclass
  * and each subsequent lock acquired will increment the subclass by one.
- * So the first lock acquired will have a lockdep subclass of 4, the
- * second lock will have a lockdep subclass of 5, and so on. It is
- * the responsibility of the class builder to shift this to the correct
- * portion of the lock_mode lockdep mask.
+ * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly
+ * limited to the subclasses we can represent via nesting. We need at least
+ * 5 inodes nest depth for the ILOCK through rename, and we also have to support
+ * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP
+ * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all
+ * 8 subclasses supported by lockdep.
+ *
+ * This also means we have to number the sub-classes in the lowest bits of
+ * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep
+ * mask and we can't use bit-masking to build the subclasses. What a mess.
+ *
+ * Bit layout:
+ *
+ * Bit		Lock Region
+ * 16-19	XFS_IOLOCK_SHIFT dependencies
+ * 20-23	XFS_MMAPLOCK_SHIFT dependencies
+ * 24-31	XFS_ILOCK_SHIFT dependencies
+ *
+ * IOLOCK values
+ *
+ * 0-3		subclass value
+ * 4-7		PARENT subclass values
+ *
+ * MMAPLOCK values
+ *
+ * 0-3		subclass value
+ * 4-7		unused
+ *
+ * ILOCK values
+ * 0-4		subclass values
+ * 5		PARENT subclass (not nestable)
+ * 6		RTBITMAP subclass (not nestable)
+ * 7		RTSUM subclass (not nestable)
+ * 
  */
-#define XFS_LOCK_PARENT		1
-#define XFS_LOCK_RTBITMAP	2
-#define XFS_LOCK_RTSUM		3
-#define XFS_LOCK_INUMORDER	4
+#define XFS_IOLOCK_SHIFT		16
+#define XFS_IOLOCK_PARENT_VAL		4
+#define XFS_IOLOCK_MAX_SUBCLASS		(XFS_IOLOCK_PARENT_VAL - 1)
+#define XFS_IOLOCK_DEP_MASK		0x000f0000
+#define	XFS_IOLOCK_PARENT		(XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
 
-#define XFS_IOLOCK_SHIFT	16
-#define	XFS_IOLOCK_PARENT	(XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
+#define XFS_MMAPLOCK_SHIFT		20
+#define XFS_MMAPLOCK_NUMORDER		0
+#define XFS_MMAPLOCK_MAX_SUBCLASS	3
+#define XFS_MMAPLOCK_DEP_MASK		0x00f00000
 
-#define XFS_MMAPLOCK_SHIFT	20
+#define XFS_ILOCK_SHIFT			24
+#define XFS_ILOCK_PARENT_VAL		5
+#define XFS_ILOCK_MAX_SUBCLASS		(XFS_ILOCK_PARENT_VAL - 1)
+#define XFS_ILOCK_RTBITMAP_VAL		6
+#define XFS_ILOCK_RTSUM_VAL		7
+#define XFS_ILOCK_DEP_MASK		0xff000000
+#define	XFS_ILOCK_PARENT		(XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
+#define	XFS_ILOCK_RTBITMAP		(XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
+#define	XFS_ILOCK_RTSUM			(XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
 
-#define XFS_ILOCK_SHIFT		24
-#define	XFS_ILOCK_PARENT	(XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
-#define	XFS_ILOCK_RTBITMAP	(XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
-#define	XFS_ILOCK_RTSUM		(XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
-
-#define XFS_IOLOCK_DEP_MASK	0x000f0000
-#define XFS_MMAPLOCK_DEP_MASK	0x00f00000
-#define XFS_ILOCK_DEP_MASK	0xff000000
-#define XFS_LOCK_DEP_MASK	(XFS_IOLOCK_DEP_MASK | \
+#define XFS_LOCK_SUBCLASS_MASK	(XFS_IOLOCK_DEP_MASK | \
 				 XFS_MMAPLOCK_DEP_MASK | \
 				 XFS_ILOCK_DEP_MASK)
 
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index bf13a5a..62bd80f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -703,17 +703,10 @@
 	xfs_inode_log_item_t	*iip = ip->i_itemp;
 
 	if (iip) {
-		struct xfs_ail	*ailp = iip->ili_item.li_ailp;
 		if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
-			spin_lock(&ailp->xa_lock);
-			if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
-				/* xfs_trans_ail_delete() drops the AIL lock. */
-				xfs_trans_ail_delete(ailp, &iip->ili_item,
-						stale ?
-						     SHUTDOWN_LOG_IO_ERROR :
+			xfs_trans_ail_remove(&iip->ili_item,
+					     stale ? SHUTDOWN_LOG_IO_ERROR :
 						     SHUTDOWN_CORRUPT_INCORE);
-			} else
-				spin_unlock(&ailp->xa_lock);
 		}
 		iip->ili_logged = 0;
 		/*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 766b23f..8294132 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -609,7 +609,7 @@
 	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
 	if (error)
-		goto out_dqrele;
+		goto out_trans_cancel;
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 
@@ -640,7 +640,7 @@
 						NULL, capable(CAP_FOWNER) ?
 						XFS_QMOPT_FORCE_RES : 0);
 			if (error)	/* out of quota */
-				goto out_trans_cancel;
+				goto out_unlock;
 		}
 	}
 
@@ -729,10 +729,10 @@
 
 	return 0;
 
+out_unlock:
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_trans_cancel:
 	xfs_trans_cancel(tp);
-	xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out_dqrele:
 	xfs_qm_dqrele(udqp);
 	xfs_qm_dqrele(gdqp);
 	return error;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f41b0c3..930ebd8 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -473,7 +473,8 @@
 		 * pending error, then we are done.
 		 */
 del_cursor:
-		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+		xfs_btree_del_cursor(cur, error ?
+					  XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
 		xfs_buf_relse(agbp);
 		if (error)
 			break;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 08d4fe4..aaadee0 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -668,9 +668,9 @@
 			ASSERT(0);
 			goto out_free_log;
 		}
+		xfs_crit(mp, "Log size out of supported range.");
 		xfs_crit(mp,
-"Log size out of supported range. Continuing onwards, but if log hangs are\n"
-"experienced then please report this message in the bug report.");
+"Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
 	}
 
 	/*
@@ -700,6 +700,7 @@
 		if (error) {
 			xfs_warn(mp, "log mount/recovery failed: error %d",
 				error);
+			xlog_recover_cancel(mp->m_log);
 			goto out_destroy_ail;
 		}
 	}
@@ -740,18 +741,35 @@
  * it.
  */
 int
-xfs_log_mount_finish(xfs_mount_t *mp)
+xfs_log_mount_finish(
+	struct xfs_mount	*mp)
 {
 	int	error = 0;
 
-	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
-		error = xlog_recover_finish(mp->m_log);
-		if (!error)
-			xfs_log_work_queue(mp);
-	} else {
+	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
 		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
+		return 0;
 	}
 
+	error = xlog_recover_finish(mp->m_log);
+	if (!error)
+		xfs_log_work_queue(mp);
+
+	return error;
+}
+
+/*
+ * The mount has failed. Cancel the recovery if it hasn't completed and destroy
+ * the log.
+ */
+int
+xfs_log_mount_cancel(
+	struct xfs_mount	*mp)
+{
+	int			error;
+
+	error = xlog_recover_cancel(mp->m_log);
+	xfs_log_unmount(mp);
 
 	return error;
 }
@@ -1142,11 +1160,13 @@
 		 * In this case we just want to return the size of the
 		 * log as the amount of space left.
 		 */
+		xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
 		xfs_alert(log->l_mp,
-			"xlog_space_left: head behind tail\n"
-			"  tail_cycle = %d, tail_bytes = %d\n"
-			"  GH   cycle = %d, GH   bytes = %d",
-			tail_cycle, tail_bytes, head_cycle, head_bytes);
+			  "  tail_cycle = %d, tail_bytes = %d",
+			  tail_cycle, tail_bytes);
+		xfs_alert(log->l_mp,
+			  "  GH   cycle = %d, GH   bytes = %d",
+			  head_cycle, head_bytes);
 		ASSERT(0);
 		free_bytes = log->l_logsize;
 	}
@@ -1652,8 +1672,13 @@
 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
 		union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
 		int		i;
+		int		xheads;
 
-		for (i = 1; i < log->l_iclog_heads; i++) {
+		xheads = size / XLOG_HEADER_CYCLE_SIZE;
+		if (size % XLOG_HEADER_CYCLE_SIZE)
+			xheads++;
+
+		for (i = 1; i < xheads; i++) {
 			crc = crc32c(crc, &xhdr[i].hic_xheader,
 				     sizeof(struct xlog_rec_ext_header));
 		}
@@ -2028,26 +2053,24 @@
 	    "SWAPEXT"
 	};
 
-	xfs_warn(mp,
-		"xlog_write: reservation summary:\n"
-		"  trans type  = %s (%u)\n"
-		"  unit res    = %d bytes\n"
-		"  current res = %d bytes\n"
-		"  total reg   = %u bytes (o/flow = %u bytes)\n"
-		"  ophdrs      = %u (ophdr space = %u bytes)\n"
-		"  ophdr + reg = %u bytes\n"
-		"  num regions = %u",
-		((ticket->t_trans_type <= 0 ||
-		  ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
+	xfs_warn(mp, "xlog_write: reservation summary:");
+	xfs_warn(mp, "  trans type  = %s (%u)",
+		 ((ticket->t_trans_type <= 0 ||
+		   ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
 		  "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
-		ticket->t_trans_type,
-		ticket->t_unit_res,
-		ticket->t_curr_res,
-		ticket->t_res_arr_sum, ticket->t_res_o_flow,
-		ticket->t_res_num_ophdrs, ophdr_spc,
-		ticket->t_res_arr_sum +
-		ticket->t_res_o_flow + ophdr_spc,
-		ticket->t_res_num);
+		 ticket->t_trans_type);
+	xfs_warn(mp, "  unit res    = %d bytes",
+		 ticket->t_unit_res);
+	xfs_warn(mp, "  current res = %d bytes",
+		 ticket->t_curr_res);
+	xfs_warn(mp, "  total reg   = %u bytes (o/flow = %u bytes)",
+		 ticket->t_res_arr_sum, ticket->t_res_o_flow);
+	xfs_warn(mp, "  ophdrs      = %u (ophdr space = %u bytes)",
+		 ticket->t_res_num_ophdrs, ophdr_spc);
+	xfs_warn(mp, "  ophdr + reg = %u bytes",
+		 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc);
+	xfs_warn(mp, "  num regions = %u",
+		 ticket->t_res_num);
 
 	for (i = 0; i < ticket->t_res_num; i++) {
 		uint r_type = ticket->t_res_arr[i].r_type;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index fa27aae..09d91d3 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -147,6 +147,7 @@
 			xfs_daddr_t		start_block,
 			int		 	num_bblocks);
 int	  xfs_log_mount_finish(struct xfs_mount *mp);
+int	xfs_log_mount_cancel(struct xfs_mount *);
 xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
 xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
 void	  xfs_log_space_wake(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index abc2ccb..4e76493 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -307,7 +307,13 @@
 		if (!(lidp->lid_flags & XFS_LID_DIRTY))
 			continue;
 
-		list_move_tail(&lip->li_cil, &cil->xc_cil);
+		/*
+		 * Only move the item if it isn't already at the tail. This is
+		 * to prevent a transient list_empty() state when reinserting
+		 * an item that is already the only item in the CIL.
+		 */
+		if (!list_is_last(&lip->li_cil, &cil->xc_cil))
+			list_move_tail(&lip->li_cil, &cil->xc_cil);
 	}
 
 	/* account for space used by new iovec headers  */
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 1c87c8a..950f3f9 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -426,6 +426,8 @@
 extern int
 xlog_recover_finish(
 	struct xlog		*log);
+extern int
+xlog_recover_cancel(struct xlog *);
 
 extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
 			    char *dp, int size);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 480ebba..512a094 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1895,15 +1895,25 @@
 		 */
 		goto recover_immediately;
 	case XFS_SB_MAGIC:
+		/*
+		 * superblock uuids are magic. We may or may not have a
+		 * sb_meta_uuid on disk, but it will be set in the in-core
+		 * superblock. We set the uuid pointer for verification
+		 * according to the superblock feature mask to ensure we check
+		 * the relevant UUID in the superblock.
+		 */
 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
-		uuid = &((struct xfs_dsb *)blk)->sb_uuid;
+		if (xfs_sb_version_hasmetauuid(&mp->m_sb))
+			uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
+		else
+			uuid = &((struct xfs_dsb *)blk)->sb_uuid;
 		break;
 	default:
 		break;
 	}
 
 	if (lsn != (xfs_lsn_t)-1) {
-		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+		if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
 			goto recover_immediately;
 		return lsn;
 	}
@@ -2933,16 +2943,16 @@
 	struct xlog_recover_item	*item,
 	xfs_lsn_t			lsn)
 {
-	int			error;
-	xfs_mount_t		*mp = log->l_mp;
-	xfs_efi_log_item_t	*efip;
-	xfs_efi_log_format_t	*efi_formatp;
+	int				error;
+	struct xfs_mount		*mp = log->l_mp;
+	struct xfs_efi_log_item		*efip;
+	struct xfs_efi_log_format	*efi_formatp;
 
 	efi_formatp = item->ri_buf[0].i_addr;
 
 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
-	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
-					 &(efip->efi_format)))) {
+	error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
+	if (error) {
 		xfs_efi_item_free(efip);
 		return error;
 	}
@@ -2950,20 +2960,23 @@
 
 	spin_lock(&log->l_ailp->xa_lock);
 	/*
-	 * xfs_trans_ail_update() drops the AIL lock.
+	 * The EFI has two references. One for the EFD and one for EFI to ensure
+	 * it makes it into the AIL. Insert the EFI into the AIL directly and
+	 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
+	 * AIL lock.
 	 */
 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
+	xfs_efi_release(efip);
 	return 0;
 }
 
 
 /*
- * This routine is called when an efd format structure is found in
- * a committed transaction in the log.  It's purpose is to cancel
- * the corresponding efi if it was still in the log.  To do this
- * it searches the AIL for the efi with an id equal to that in the
- * efd format structure.  If we find it, we remove the efi from the
- * AIL and free it.
+ * This routine is called when an EFD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding EFI if it
+ * was still in the log. To do this it searches the AIL for the EFI with an id
+ * equal to that in the EFD format structure. If we find it we drop the EFD
+ * reference, which removes the EFI from the AIL and frees it.
  */
 STATIC int
 xlog_recover_efd_pass2(
@@ -2985,8 +2998,8 @@
 	efi_id = efd_formatp->efd_efi_id;
 
 	/*
-	 * Search for the efi with the id in the efd format structure
-	 * in the AIL.
+	 * Search for the EFI with the id in the EFD format structure in the
+	 * AIL.
 	 */
 	spin_lock(&ailp->xa_lock);
 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
@@ -2995,18 +3008,18 @@
 			efip = (xfs_efi_log_item_t *)lip;
 			if (efip->efi_format.efi_id == efi_id) {
 				/*
-				 * xfs_trans_ail_delete() drops the
-				 * AIL lock.
+				 * Drop the EFD reference to the EFI. This
+				 * removes the EFI from the AIL and frees it.
 				 */
-				xfs_trans_ail_delete(ailp, lip,
-						     SHUTDOWN_CORRUPT_INCORE);
-				xfs_efi_item_free(efip);
+				spin_unlock(&ailp->xa_lock);
+				xfs_efi_release(efip);
 				spin_lock(&ailp->xa_lock);
 				break;
 			}
 		}
 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
 	}
+
 	xfs_trans_ail_cursor_done(&cur);
 	spin_unlock(&ailp->xa_lock);
 
@@ -3034,6 +3047,11 @@
 	unsigned int		count;
 	unsigned int		isize;
 	xfs_agblock_t		length;
+	int			blks_per_cluster;
+	int			bb_per_cluster;
+	int			cancel_count;
+	int			nbufs;
+	int			i;
 
 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
 	if (icl->icl_type != XFS_LI_ICREATE) {
@@ -3092,22 +3110,45 @@
 	}
 
 	/*
-	 * Inode buffers can be freed. Do not replay the inode initialisation as
-	 * we could be overwriting something written after this inode buffer was
-	 * cancelled.
-	 *
-	 * XXX: we need to iterate all buffers and only init those that are not
-	 * cancelled. I think that a more fine grained factoring of
-	 * xfs_ialloc_inode_init may be appropriate here to enable this to be
-	 * done easily.
+	 * The icreate transaction can cover multiple cluster buffers and these
+	 * buffers could have been freed and reused. Check the individual
+	 * buffers for cancellation so we don't overwrite anything written after
+	 * a cancellation.
 	 */
-	if (xlog_check_buffer_cancelled(log,
-			XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
-		return 0;
+	blks_per_cluster = xfs_icluster_size_fsb(mp);
+	bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
+	nbufs = length / blks_per_cluster;
+	for (i = 0, cancel_count = 0; i < nbufs; i++) {
+		xfs_daddr_t	daddr;
 
-	xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno, length,
-			      be32_to_cpu(icl->icl_gen));
-	return 0;
+		daddr = XFS_AGB_TO_DADDR(mp, agno,
+					 agbno + i * blks_per_cluster);
+		if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
+			cancel_count++;
+	}
+
+	/*
+	 * We currently only use icreate for a single allocation at a time. This
+	 * means we should expect either all or none of the buffers to be
+	 * cancelled. Be conservative and skip replay if at least one buffer is
+	 * cancelled, but warn the user that something is awry if the buffers
+	 * are not consistent.
+	 *
+	 * XXX: This must be refined to only skip cancelled clusters once we use
+	 * icreate for multiple chunk allocations.
+	 */
+	ASSERT(!cancel_count || cancel_count == nbufs);
+	if (cancel_count) {
+		if (cancel_count != nbufs)
+			xfs_warn(mp,
+	"WARNING: partial inode chunk cancellation, skipped icreate.");
+		trace_xfs_log_recover_icreate_cancel(log, icl);
+		return 0;
+	}
+
+	trace_xfs_log_recover_icreate_recover(log, icl);
+	return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
+				     length, be32_to_cpu(icl->icl_gen));
 }
 
 STATIC void
@@ -3385,14 +3426,24 @@
 	char			*ptr, *old_ptr;
 	int			old_len;
 
+	/*
+	 * If the transaction is empty, the header was split across this and the
+	 * previous record. Copy the rest of the header.
+	 */
 	if (list_empty(&trans->r_itemq)) {
-		/* finish copying rest of trans header */
+		ASSERT(len < sizeof(struct xfs_trans_header));
+		if (len > sizeof(struct xfs_trans_header)) {
+			xfs_warn(log->l_mp, "%s: bad header length", __func__);
+			return -EIO;
+		}
+
 		xlog_recover_add_item(&trans->r_itemq);
 		ptr = (char *)&trans->r_theader +
-				sizeof(xfs_trans_header_t) - len;
+				sizeof(struct xfs_trans_header) - len;
 		memcpy(ptr, dp, len);
 		return 0;
 	}
+
 	/* take the tail entry */
 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
 
@@ -3441,7 +3492,19 @@
 			ASSERT(0);
 			return -EIO;
 		}
-		if (len == sizeof(xfs_trans_header_t))
+
+		if (len > sizeof(struct xfs_trans_header)) {
+			xfs_warn(log->l_mp, "%s: bad header length", __func__);
+			ASSERT(0);
+			return -EIO;
+		}
+
+		/*
+		 * The transaction header can be arbitrarily split across op
+		 * records. If we don't have the whole thing here, copy what we
+		 * do have and handle the rest in the next record.
+		 */
+		if (len == sizeof(struct xfs_trans_header))
 			xlog_recover_add_item(&trans->r_itemq);
 		memcpy(&trans->r_theader, dp, len);
 		return 0;
@@ -3744,7 +3807,7 @@
 			 * free the memory associated with it.
 			 */
 			set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
-			xfs_efi_release(efip, efip->efi_format.efi_nextents);
+			xfs_efi_release(efip);
 			return -EIO;
 		}
 	}
@@ -3757,11 +3820,11 @@
 
 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
 		extp = &(efip->efi_format.efi_extents[i]);
-		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
+		error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
+					      extp->ext_len);
 		if (error)
 			goto abort_error;
-		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
-					 extp->ext_len);
+
 	}
 
 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
@@ -3793,10 +3856,10 @@
  */
 STATIC int
 xlog_recover_process_efis(
-	struct xlog	*log)
+	struct xlog		*log)
 {
-	xfs_log_item_t		*lip;
-	xfs_efi_log_item_t	*efip;
+	struct xfs_log_item	*lip;
+	struct xfs_efi_log_item	*efip;
 	int			error = 0;
 	struct xfs_ail_cursor	cur;
 	struct xfs_ail		*ailp;
@@ -3820,7 +3883,7 @@
 		/*
 		 * Skip EFIs that we've already processed.
 		 */
-		efip = (xfs_efi_log_item_t *)lip;
+		efip = container_of(lip, struct xfs_efi_log_item, efi_item);
 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
 			continue;
@@ -3840,6 +3903,50 @@
 }
 
 /*
+ * A cancel occurs when the mount has failed and we're bailing out. Release all
+ * pending EFIs so they don't pin the AIL.
+ */
+STATIC int
+xlog_recover_cancel_efis(
+	struct xlog		*log)
+{
+	struct xfs_log_item	*lip;
+	struct xfs_efi_log_item	*efip;
+	int			error = 0;
+	struct xfs_ail_cursor	cur;
+	struct xfs_ail		*ailp;
+
+	ailp = log->l_ailp;
+	spin_lock(&ailp->xa_lock);
+	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+	while (lip != NULL) {
+		/*
+		 * We're done when we see something other than an EFI.
+		 * There should be no EFIs left in the AIL now.
+		 */
+		if (lip->li_type != XFS_LI_EFI) {
+#ifdef DEBUG
+			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
+				ASSERT(lip->li_type != XFS_LI_EFI);
+#endif
+			break;
+		}
+
+		efip = container_of(lip, struct xfs_efi_log_item, efi_item);
+
+		spin_unlock(&ailp->xa_lock);
+		xfs_efi_release(efip);
+		spin_lock(&ailp->xa_lock);
+
+		lip = xfs_trans_ail_cursor_next(ailp, &cur);
+	}
+
+	xfs_trans_ail_cursor_done(&cur);
+	spin_unlock(&ailp->xa_lock);
+	return error;
+}
+
+/*
  * This routine performs a transaction to null out a bad inode pointer
  * in an agi unlinked inode hash bucket.
  */
@@ -4532,11 +4639,13 @@
 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
 			xfs_warn(log->l_mp,
-"Superblock has unknown incompatible log features (0x%x) enabled.\n"
-"The log can not be fully and/or safely recovered by this kernel.\n"
-"Please recover the log on a kernel that supports the unknown features.",
+"Superblock has unknown incompatible log features (0x%x) enabled.",
 				(log->l_mp->m_sb.sb_features_log_incompat &
 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
+			xfs_warn(log->l_mp,
+"The log can not be fully and/or safely recovered by this kernel.");
+			xfs_warn(log->l_mp,
+"Please recover the log on a kernel that supports the unknown features.");
 			return -EINVAL;
 		}
 
@@ -4612,6 +4721,17 @@
 	return 0;
 }
 
+int
+xlog_recover_cancel(
+	struct xlog	*log)
+{
+	int		error = 0;
+
+	if (log->l_flags & XLOG_RECOVERY_NEEDED)
+		error = xlog_recover_cancel_efis(log);
+
+	return error;
+}
 
 #if defined(DEBUG)
 /*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 461e791..bf92e0c 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -615,14 +615,14 @@
  */
 int
 xfs_mountfs(
-	xfs_mount_t	*mp)
+	struct xfs_mount	*mp)
 {
-	xfs_sb_t	*sbp = &(mp->m_sb);
-	xfs_inode_t	*rip;
-	__uint64_t	resblks;
-	uint		quotamount = 0;
-	uint		quotaflags = 0;
-	int		error = 0;
+	struct xfs_sb		*sbp = &(mp->m_sb);
+	struct xfs_inode	*rip;
+	__uint64_t		resblks;
+	uint			quotamount = 0;
+	uint			quotaflags = 0;
+	int			error = 0;
 
 	xfs_sb_mount_common(mp, sbp);
 
@@ -799,7 +799,9 @@
 	}
 
 	/*
-	 * log's mount-time initialization. Perform 1st part recovery if needed
+	 * Log's mount-time initialization. The first part of recovery can place
+	 * some items on the AIL, to be handled when recovery is finished or
+	 * cancelled.
 	 */
 	error = xfs_log_mount(mp, mp->m_logdev_targp,
 			      XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
@@ -910,9 +912,9 @@
 	}
 
 	/*
-	 * Finish recovering the file system.  This part needed to be
-	 * delayed until after the root and real-time bitmap inodes
-	 * were consistently read in.
+	 * Finish recovering the file system.  This part needed to be delayed
+	 * until after the root and real-time bitmap inodes were consistently
+	 * read in.
 	 */
 	error = xfs_log_mount_finish(mp);
 	if (error) {
@@ -955,8 +957,10 @@
 	xfs_rtunmount_inodes(mp);
  out_rele_rip:
 	IRELE(rip);
+	cancel_delayed_work_sync(&mp->m_reclaim_work);
+	xfs_reclaim_inodes(mp, SYNC_WAIT);
  out_log_dealloc:
-	xfs_log_unmount(mp);
+	xfs_log_mount_cancel(mp);
  out_fail_wait:
 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
 		xfs_wait_buftarg(mp->m_logdev_targp);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index f4e8c06..ab1bac6 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -757,31 +757,30 @@
 /*
  * Allocate space to the bitmap or summary file, and zero it, for growfs.
  */
-STATIC int				/* error */
+STATIC int
 xfs_growfs_rt_alloc(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_extlen_t	oblocks,	/* old count of blocks */
-	xfs_extlen_t	nblocks,	/* new count of blocks */
-	xfs_inode_t	*ip)		/* inode (bitmap/summary) */
+	struct xfs_mount	*mp,		/* file system mount point */
+	xfs_extlen_t		oblocks,	/* old count of blocks */
+	xfs_extlen_t		nblocks,	/* new count of blocks */
+	struct xfs_inode	*ip)		/* inode (bitmap/summary) */
 {
-	xfs_fileoff_t	bno;		/* block number in file */
-	xfs_buf_t	*bp;		/* temporary buffer for zeroing */
-	int		committed;	/* transaction committed flag */
-	xfs_daddr_t	d;		/* disk block address */
-	int		error;		/* error return value */
-	xfs_fsblock_t	firstblock;	/* first block allocated in xaction */
-	xfs_bmap_free_t	flist;		/* list of freed blocks */
-	xfs_fsblock_t	fsbno;		/* filesystem block for bno */
-	xfs_bmbt_irec_t	map;		/* block map output */
-	int		nmap;		/* number of block maps */
-	int		resblks;	/* space reservation */
+	xfs_fileoff_t		bno;		/* block number in file */
+	struct xfs_buf		*bp;	/* temporary buffer for zeroing */
+	int			committed;	/* transaction committed flag */
+	xfs_daddr_t		d;		/* disk block address */
+	int			error;		/* error return value */
+	xfs_fsblock_t		firstblock;/* first block allocated in xaction */
+	struct xfs_bmap_free	flist;		/* list of freed blocks */
+	xfs_fsblock_t		fsbno;		/* filesystem block for bno */
+	struct xfs_bmbt_irec	map;		/* block map output */
+	int			nmap;		/* number of block maps */
+	int			resblks;	/* space reservation */
+	struct xfs_trans	*tp;
 
 	/*
 	 * Allocate space to the file, as necessary.
 	 */
 	while (oblocks < nblocks) {
-		xfs_trans_t	*tp;
-
 		tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
 		resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
 		/*
@@ -790,7 +789,7 @@
 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtalloc,
 					  resblks, 0);
 		if (error)
-			goto error_cancel;
+			goto out_trans_cancel;
 		/*
 		 * Lock the inode.
 		 */
@@ -808,16 +807,16 @@
 		if (!error && nmap < 1)
 			error = -ENOSPC;
 		if (error)
-			goto error_cancel;
+			goto out_bmap_cancel;
 		/*
 		 * Free any blocks freed up in the transaction, then commit.
 		 */
 		error = xfs_bmap_finish(&tp, &flist, &committed);
 		if (error)
-			goto error_cancel;
+			goto out_bmap_cancel;
 		error = xfs_trans_commit(tp);
 		if (error)
-			goto error;
+			return error;
 		/*
 		 * Now we need to clear the allocated blocks.
 		 * Do this one block per transaction, to keep it simple.
@@ -832,7 +831,7 @@
 			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
 						  0, 0);
 			if (error)
-				goto error_cancel;
+				goto out_trans_cancel;
 			/*
 			 * Lock the bitmap inode.
 			 */
@@ -846,9 +845,7 @@
 				mp->m_bsize, 0);
 			if (bp == NULL) {
 				error = -EIO;
-error_cancel:
-				xfs_trans_cancel(tp);
-				goto error;
+				goto out_trans_cancel;
 			}
 			memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
 			xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
@@ -857,16 +854,20 @@
 			 */
 			error = xfs_trans_commit(tp);
 			if (error)
-				goto error;
+				return error;
 		}
 		/*
 		 * Go on to the next extent, if any.
 		 */
 		oblocks = map.br_startoff + map.br_blockcount;
 	}
+
 	return 0;
 
-error:
+out_bmap_cancel:
+	xfs_bmap_cancel(&flist);
+out_trans_cancel:
+	xfs_trans_cancel(tp);
 	return error;
 }
 
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 1fb16562..904f637c 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -261,16 +261,8 @@
 			mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
 			if (!mp->m_rtname)
 				return -ENOMEM;
-		} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
-			if (!value || !*value) {
-				xfs_warn(mp, "%s option requires an argument",
-					this_char);
-				return -EINVAL;
-			}
-			if (kstrtoint(value, 10, &iosize))
-				return -EINVAL;
-			iosizelog = ffs(iosize) - 1;
-		} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
+		} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE) ||
+			   !strcmp(this_char, MNTOPT_BIOSIZE)) {
 			if (!value || !*value) {
 				xfs_warn(mp, "%s option requires an argument",
 					this_char);
@@ -511,9 +503,9 @@
 		seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
 
 	if (mp->m_logname)
-		seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
+		seq_show_option(m, MNTOPT_LOGDEV, mp->m_logname);
 	if (mp->m_rtname)
-		seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
+		seq_show_option(m, MNTOPT_RTDEV, mp->m_rtname);
 
 	if (mp->m_dalign > 0)
 		seq_printf(m, "," MNTOPT_SUNIT "=%d",
@@ -1528,6 +1520,10 @@
 		}
 	}
 
+	if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+		xfs_alert(mp,
+	"EXPERIMENTAL sparse inode feature enabled. Use at your own risk!");
+
 	error = xfs_mountfs(mp);
 	if (error)
 		goto out_filestream_unmount;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 4be27b0..996481e 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -240,7 +240,8 @@
 	if (error)
 		goto out_trans_cancel;
 
-	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
+	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
+		      XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
 	unlock_dp_on_error = true;
 
 	/*
@@ -288,7 +289,7 @@
 	 * the transaction cancel unlocking dp so don't do it explicitly in the
 	 * error path.
 	 */
-	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 	unlock_dp_on_error = false;
 
 	/*
@@ -421,7 +422,7 @@
 	xfs_qm_dqrele(pdqp);
 
 	if (unlock_dp_on_error)
-		xfs_iunlock(dp, XFS_ILOCK_EXCL);
+		xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
 	return error;
 }
 
@@ -501,7 +502,7 @@
 	/*
 	 * Unmap the dead block(s) to the free_list.
 	 */
-	error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
+	error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps,
 			    &first_block, &free_list, &done);
 	if (error)
 		goto error_bmap_cancel;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 8d916d3..5ed36b1 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -687,6 +687,7 @@
 DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
 
 DEFINE_INODE_EVENT(xfs_filemap_fault);
+DEFINE_INODE_EVENT(xfs_filemap_pmd_fault);
 DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite);
 
 DECLARE_EVENT_CLASS(xfs_iref_class,
@@ -2089,6 +2090,40 @@
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
 
+DECLARE_EVENT_CLASS(xfs_log_recover_icreate_item_class,
+	TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f),
+	TP_ARGS(log, in_f),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, agbno)
+		__field(unsigned int, count)
+		__field(unsigned int, isize)
+		__field(xfs_agblock_t, length)
+		__field(unsigned int, gen)
+	),
+	TP_fast_assign(
+		__entry->dev = log->l_mp->m_super->s_dev;
+		__entry->agno = be32_to_cpu(in_f->icl_ag);
+		__entry->agbno = be32_to_cpu(in_f->icl_agbno);
+		__entry->count = be32_to_cpu(in_f->icl_count);
+		__entry->isize = be32_to_cpu(in_f->icl_isize);
+		__entry->length = be32_to_cpu(in_f->icl_length);
+		__entry->gen = be32_to_cpu(in_f->icl_gen);
+	),
+	TP_printk("dev %d:%d agno %u agbno %u count %u isize %u length %u "
+		  "gen %u", MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno, __entry->agbno, __entry->count, __entry->isize,
+		  __entry->length, __entry->gen)
+)
+#define DEFINE_LOG_RECOVER_ICREATE_ITEM(name) \
+DEFINE_EVENT(xfs_log_recover_icreate_item_class, name, \
+	TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f), \
+	TP_ARGS(log, in_f))
+
+DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_cancel);
+DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_recover);
+
 DECLARE_EVENT_CLASS(xfs_discard_class,
 	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
 		 xfs_agblock_t agbno, xfs_extlen_t len),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 0582a27..a0ab1da 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1019,9 +1019,10 @@
  * chunk we've been working on and get a new transaction to continue.
  */
 int
-xfs_trans_roll(
+__xfs_trans_roll(
 	struct xfs_trans	**tpp,
-	struct xfs_inode	*dp)
+	struct xfs_inode	*dp,
+	int			*committed)
 {
 	struct xfs_trans	*trans;
 	struct xfs_trans_res	tres;
@@ -1052,6 +1053,7 @@
 	if (error)
 		return error;
 
+	*committed = 1;
 	trans = *tpp;
 
 	/*
@@ -1074,3 +1076,12 @@
 		xfs_trans_ijoin(trans, dp, 0);
 	return 0;
 }
+
+int
+xfs_trans_roll(
+	struct xfs_trans	**tpp,
+	struct xfs_inode	*dp)
+{
+	int			committed = 0;
+	return __xfs_trans_roll(tpp, dp, &committed);
+}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 3b21b4e..4643070 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -213,7 +213,6 @@
 void		xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
 void		xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
 struct xfs_efi_log_item	*xfs_trans_get_efi(xfs_trans_t *, uint);
-void		xfs_efi_release(struct xfs_efi_log_item *, uint);
 void		xfs_trans_log_efi_extent(xfs_trans_t *,
 					 struct xfs_efi_log_item *,
 					 xfs_fsblock_t,
@@ -221,11 +220,11 @@
 struct xfs_efd_log_item	*xfs_trans_get_efd(xfs_trans_t *,
 				  struct xfs_efi_log_item *,
 				  uint);
-void		xfs_trans_log_efd_extent(xfs_trans_t *,
-					 struct xfs_efd_log_item *,
-					 xfs_fsblock_t,
-					 xfs_extlen_t);
+int		xfs_trans_free_extent(struct xfs_trans *,
+				      struct xfs_efd_log_item *, xfs_fsblock_t,
+				      xfs_extlen_t);
 int		xfs_trans_commit(struct xfs_trans *);
+int		__xfs_trans_roll(struct xfs_trans **, struct xfs_inode *, int *);
 int		xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void		xfs_trans_cancel(xfs_trans_t *);
 int		xfs_trans_ail_init(struct xfs_mount *);
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 284397d..a96ae54 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -25,6 +25,7 @@
 #include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_extfree_item.h"
+#include "xfs_alloc.h"
 
 /*
  * This routine is called to allocate an "extent free intention"
@@ -108,19 +109,30 @@
 }
 
 /*
- * This routine is called to indicate that the described
- * extent is to be logged as having been freed.  It should
- * be called once for each extent freed.
+ * Free an extent and log it to the EFD. Note that the transaction is marked
+ * dirty regardless of whether the extent free succeeds or fails to support the
+ * EFI/EFD lifecycle rules.
  */
-void
-xfs_trans_log_efd_extent(xfs_trans_t		*tp,
-			 xfs_efd_log_item_t	*efdp,
-			 xfs_fsblock_t		start_block,
-			 xfs_extlen_t		ext_len)
+int
+xfs_trans_free_extent(
+	struct xfs_trans	*tp,
+	struct xfs_efd_log_item	*efdp,
+	xfs_fsblock_t		start_block,
+	xfs_extlen_t		ext_len)
 {
 	uint			next_extent;
-	xfs_extent_t		*extp;
+	struct xfs_extent	*extp;
+	int			error;
 
+	error = xfs_free_extent(tp, start_block, ext_len);
+
+	/*
+	 * Mark the transaction dirty, even on error. This ensures the
+	 * transaction is aborted, which:
+	 *
+	 * 1.) releases the EFI and frees the EFD
+	 * 2.) shuts down the filesystem
+	 */
 	tp->t_flags |= XFS_TRANS_DIRTY;
 	efdp->efd_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 
@@ -130,4 +142,6 @@
 	extp->ext_start = start_block;
 	extp->ext_len = ext_len;
 	efdp->efd_next_extent++;
+
+	return error;
 }
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 1b73629..49931b7 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -119,6 +119,21 @@
 	xfs_trans_ail_delete_bulk(ailp, &lip, 1, shutdown_type);
 }
 
+static inline void
+xfs_trans_ail_remove(
+	struct xfs_log_item	*lip,
+	int			shutdown_type)
+{
+	struct xfs_ail		*ailp = lip->li_ailp;
+
+	spin_lock(&ailp->xa_lock);
+	/* xfs_trans_ail_delete() drops the AIL lock */
+	if (lip->li_flags & XFS_LI_IN_AIL)
+		xfs_trans_ail_delete(ailp, lip, shutdown_type);
+	else
+		spin_unlock(&ailp->xa_lock);
+}
+
 void			xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
 void			xfs_ail_push_all(struct xfs_ail *);
 void			xfs_ail_push_all_sync(struct xfs_ail *);
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index b7babf0..a94cbeb 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -23,236 +23,159 @@
 typedef atomic64_t atomic_long_t;
 
 #define ATOMIC_LONG_INIT(i)	ATOMIC64_INIT(i)
+#define ATOMIC_LONG_PFX(x)	atomic64 ## x
 
-static inline long atomic_long_read(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_set(v, i);
-}
-
-static inline void atomic_long_inc(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_inc(v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_dec(v);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_add(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_sub(i, v);
-}
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_sub_and_test(i, v);
-}
-
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_dec_and_test(v);
-}
-
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_inc_and_test(v);
-}
-
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_dec_return(v);
-}
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_add_unless(v, a, u);
-}
-
-#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
-	(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
-	(atomic64_xchg((atomic64_t *)(v), (new)))
-
-#else  /*  BITS_PER_LONG == 64  */
+#else
 
 typedef atomic_t atomic_long_t;
 
 #define ATOMIC_LONG_INIT(i)	ATOMIC_INIT(i)
-static inline long atomic_long_read(atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
+#define ATOMIC_LONG_PFX(x)	atomic ## x
 
-	return (long)atomic_read(v);
+#endif
+
+#define ATOMIC_LONG_READ_OP(mo)						\
+static inline long atomic_long_read##mo(atomic_long_t *l)		\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	return (long)ATOMIC_LONG_PFX(_read##mo)(v);			\
 }
+ATOMIC_LONG_READ_OP()
+ATOMIC_LONG_READ_OP(_acquire)
 
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
-	atomic_t *v = (atomic_t *)l;
+#undef ATOMIC_LONG_READ_OP
 
-	atomic_set(v, i);
+#define ATOMIC_LONG_SET_OP(mo)						\
+static inline void atomic_long_set##mo(atomic_long_t *l, long i)	\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	ATOMIC_LONG_PFX(_set##mo)(v, i);				\
 }
+ATOMIC_LONG_SET_OP()
+ATOMIC_LONG_SET_OP(_release)
+
+#undef ATOMIC_LONG_SET_OP
+
+#define ATOMIC_LONG_ADD_SUB_OP(op, mo)					\
+static inline long							\
+atomic_long_##op##_return##mo(long i, atomic_long_t *l)			\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v);		\
+}
+ATOMIC_LONG_ADD_SUB_OP(add,)
+ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(add, _release)
+ATOMIC_LONG_ADD_SUB_OP(sub,)
+ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+
+#undef ATOMIC_LONG_ADD_SUB_OP
+
+#define atomic_long_cmpxchg_relaxed(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg_acquire(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg_release(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_xchg_relaxed(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_acquire(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_release(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg(v, new) \
+	(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
 
 static inline void atomic_long_inc(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_inc(v);
+	ATOMIC_LONG_PFX(_inc)(v);
 }
 
 static inline void atomic_long_dec(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_dec(v);
+	ATOMIC_LONG_PFX(_dec)(v);
 }
 
 static inline void atomic_long_add(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_add(i, v);
+	ATOMIC_LONG_PFX(_add)(i, v);
 }
 
 static inline void atomic_long_sub(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_sub(i, v);
+	ATOMIC_LONG_PFX(_sub)(i, v);
 }
 
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_sub_and_test(i, v);
+	return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
 }
 
 static inline int atomic_long_dec_and_test(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_dec_and_test(v);
+	return ATOMIC_LONG_PFX(_dec_and_test)(v);
 }
 
 static inline int atomic_long_inc_and_test(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_inc_and_test(v);
+	return ATOMIC_LONG_PFX(_inc_and_test)(v);
 }
 
 static inline int atomic_long_add_negative(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
-
-	return (long)atomic_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
-
-	return (long)atomic_sub_return(i, v);
+	return ATOMIC_LONG_PFX(_add_negative)(i, v);
 }
 
 static inline long atomic_long_inc_return(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_inc_return(v);
+	return (long)ATOMIC_LONG_PFX(_inc_return)(v);
 }
 
 static inline long atomic_long_dec_return(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_dec_return(v);
+	return (long)ATOMIC_LONG_PFX(_dec_return)(v);
 }
 
 static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_add_unless(v, a, u);
+	return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
 }
 
-#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
-	(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
-	(atomic_xchg((atomic_t *)(v), (new)))
-
-#endif  /*  BITS_PER_LONG == 64  */
+#define atomic_long_inc_not_zero(l) \
+	ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
 
 #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 1973ad2..d4d7e33 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -98,15 +98,16 @@
 ATOMIC_OP_RETURN(sub, -)
 #endif
 
-#ifndef atomic_clear_mask
+#ifndef atomic_and
 ATOMIC_OP(and, &)
-#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
 #endif
 
-#ifndef atomic_set_mask
-#define CONFIG_ARCH_HAS_ATOMIC_OR
+#ifndef atomic_or
 ATOMIC_OP(or, |)
-#define atomic_set_mask(i, v)	atomic_or((i), (v))
+#endif
+
+#ifndef atomic_xor
+ATOMIC_OP(xor, ^)
 #endif
 
 #undef ATOMIC_OP_RETURN
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 30ad9c8..d48e78c 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -32,6 +32,10 @@
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
 
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 55e3abc..b42afad 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -108,12 +108,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index a5de55c..734ad4d 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -11,6 +11,8 @@
 				   unsigned long size);
 extern void *early_memremap(resource_size_t phys_addr,
 			    unsigned long size);
+extern void *early_memremap_ro(resource_size_t phys_addr,
+			       unsigned long size);
 extern void early_iounmap(void __iomem *addr, unsigned long size);
 extern void early_memunmap(void *addr, unsigned long size);
 
@@ -33,6 +35,12 @@
  */
 extern void early_ioremap_reset(void);
 
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+				unsigned long size);
+
 #else
 static inline void early_ioremap_init(void) { }
 static inline void early_ioremap_setup(void) { }
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index f23174f..1cbb833 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -46,6 +46,9 @@
 #ifndef FIXMAP_PAGE_NORMAL
 #define FIXMAP_PAGE_NORMAL PAGE_KERNEL
 #endif
+#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
+#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
+#endif
 #ifndef FIXMAP_PAGE_NOCACHE
 #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
 #endif
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 14909b0..f20f407 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -69,6 +69,12 @@
 })
 #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
 
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define	__phys_to_pfn(paddr)	((unsigned long)((paddr) >> PAGE_SHIFT))
+#define	__pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
+
 #define page_to_pfn __page_to_pfn
 #define pfn_to_page __pfn_to_page
 
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54..54a8e65 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,39 +36,39 @@
 /*
  * External function declarations
  */
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
 {
 	return !(atomic_read(&lock->cnts) & _QW_WMASK);
 }
 
 /**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
 {
 	return !atomic_read(&lock->cnts);
 }
 
 /**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
 {
 	u32 cnts;
 
 	cnts = atomic_read(&lock->cnts);
 	if (likely(!(cnts & _QW_WMASK))) {
-		cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+		cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 		if (likely(!(cnts & _QW_WMASK)))
 			return 1;
 		atomic_sub(_QR_BIAS, &lock->cnts);
@@ -77,11 +77,11 @@
 }
 
 /**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
 {
 	u32 cnts;
 
@@ -89,78 +89,70 @@
 	if (unlikely(cnts))
 		return 0;
 
-	return likely(atomic_cmpxchg(&lock->cnts,
-				     cnts, cnts | _QW_LOCKED) == cnts);
+	return likely(atomic_cmpxchg_acquire(&lock->cnts,
+					     cnts, cnts | _QW_LOCKED) == cnts);
 }
 /**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
  */
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
 {
 	u32 cnts;
 
-	cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 	if (likely(!(cnts & _QW_WMASK)))
 		return;
 
 	/* The slowpath will decrement the reader count, if necessary. */
-	queue_read_lock_slowpath(lock);
+	queued_read_lock_slowpath(lock, cnts);
 }
 
 /**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
 {
 	/* Optimize for the unfair lock case where the fair flag is 0. */
-	if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
+	if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
 		return;
 
-	queue_write_lock_slowpath(lock);
+	queued_write_lock_slowpath(lock);
 }
 
 /**
- * queue_read_unlock - release read lock of a queue rwlock
+ * queued_read_unlock - release read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_read_unlock(struct qrwlock *lock)
+static inline void queued_read_unlock(struct qrwlock *lock)
 {
 	/*
 	 * Atomically decrement the reader count
 	 */
-	smp_mb__before_atomic();
-	atomic_sub(_QR_BIAS, &lock->cnts);
+	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
 }
 
-#ifndef queue_write_unlock
 /**
- * queue_write_unlock - release write lock of a queue rwlock
+ * queued_write_unlock - release write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_unlock(struct qrwlock *lock)
+static inline void queued_write_unlock(struct qrwlock *lock)
 {
-	/*
-	 * If the writer field is atomic, it can be cleared directly.
-	 * Otherwise, an atomic subtraction will be used to clear it.
-	 */
-	smp_mb__before_atomic();
-	atomic_sub(_QW_LOCKED, &lock->cnts);
+	smp_store_release((u8 *)&lock->cnts, 0);
 }
-#endif
 
 /*
  * Remapping rwlock architecture specific functions to the corresponding
  * queue rwlock functions.
  */
-#define arch_read_can_lock(l)	queue_read_can_lock(l)
-#define arch_write_can_lock(l)	queue_write_can_lock(l)
-#define arch_read_lock(l)	queue_read_lock(l)
-#define arch_write_lock(l)	queue_write_lock(l)
-#define arch_read_trylock(l)	queue_read_trylock(l)
-#define arch_write_trylock(l)	queue_write_trylock(l)
-#define arch_read_unlock(l)	queue_read_unlock(l)
-#define arch_write_unlock(l)	queue_write_unlock(l)
+#define arch_read_can_lock(l)	queued_read_can_lock(l)
+#define arch_write_can_lock(l)	queued_write_can_lock(l)
+#define arch_read_lock(l)	queued_read_lock(l)
+#define arch_write_lock(l)	queued_write_lock(l)
+#define arch_read_trylock(l)	queued_read_trylock(l)
+#define arch_write_trylock(l)	queued_write_trylock(l)
+#define arch_read_unlock(l)	queued_read_unlock(l)
+#define arch_write_unlock(l)	queued_write_unlock(l)
 
 #endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index fa86f24..4e3b655 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -16,6 +16,9 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 #include <linux/delay.h>
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
 
 #define RTC_PIE 0x40		/* periodic interrupt enable */
 #define RTC_AIE 0x20		/* alarm interrupt enable */
@@ -46,6 +49,7 @@
 {
 	unsigned char ctrl;
 	unsigned long flags;
+	unsigned char century = 0;
 
 #ifdef CONFIG_MACH_DECSTATION
 	unsigned int real_year;
@@ -79,6 +83,11 @@
 #ifdef CONFIG_MACH_DECSTATION
 	real_year = CMOS_READ(RTC_DEC_YEAR);
 #endif
+#ifdef CONFIG_ACPI
+	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+	    acpi_gbl_FADT.century)
+		century = CMOS_READ(acpi_gbl_FADT.century);
+#endif
 	ctrl = CMOS_READ(RTC_CONTROL);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
@@ -90,12 +99,16 @@
 		time->tm_mday = bcd2bin(time->tm_mday);
 		time->tm_mon = bcd2bin(time->tm_mon);
 		time->tm_year = bcd2bin(time->tm_year);
+		century = bcd2bin(century);
 	}
 
 #ifdef CONFIG_MACH_DECSTATION
 	time->tm_year += real_year - 72;
 #endif
 
+	if (century)
+		time->tm_year += (century - 19) * 100;
+
 	/*
 	 * Account for differences between how the RTC uses the values
 	 * and how they are defined in a struct rtc_time;
@@ -122,6 +135,7 @@
 #ifdef CONFIG_MACH_DECSTATION
 	unsigned int real_yrs, leap_yr;
 #endif
+	unsigned char century = 0;
 
 	yrs = time->tm_year;
 	mon = time->tm_mon + 1;   /* tm_mon starts at zero */
@@ -150,6 +164,15 @@
 		yrs = 73;
 	}
 #endif
+
+#ifdef CONFIG_ACPI
+	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+	    acpi_gbl_FADT.century) {
+		century = (yrs + 1900) / 100;
+		yrs %= 100;
+	}
+#endif
+
 	/* These limits and adjustments are independent of
 	 * whether the chip is in binary mode or not.
 	 */
@@ -169,6 +192,7 @@
 		day = bin2bcd(day);
 		mon = bin2bcd(mon);
 		yrs = bin2bcd(yrs);
+		century = bin2bcd(century);
 	}
 
 	save_control = CMOS_READ(RTC_CONTROL);
@@ -185,6 +209,11 @@
 	CMOS_WRITE(hrs, RTC_HOURS);
 	CMOS_WRITE(min, RTC_MINUTES);
 	CMOS_WRITE(sec, RTC_SECONDS);
+#ifdef CONFIG_ACPI
+	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+	    acpi_gbl_FADT.century)
+		CMOS_WRITE(century, acpi_gbl_FADT.century);
+#endif
 
 	CMOS_WRITE(save_control, RTC_CONTROL);
 	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8bd374d..1781e54e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -412,12 +412,10 @@
  * during second ld run in second ld pass when generating System.map */
 #define TEXT_TEXT							\
 		ALIGN_FUNCTION();					\
-		*(.text.hot)						\
-		*(.text .text.fixup)					\
+		*(.text.hot .text .text.fixup .text.unlikely)		\
 		*(.ref.text)						\
 	MEM_KEEP(init.text)						\
 	MEM_KEEP(exit.text)						\
-		*(.text.unlikely)
 
 
 /* sched.text is aling to function alignment to secure we have same
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 691c791..441aff9 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -9,6 +9,11 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+#ifndef _CRYPTO_PKCS7_H
+#define _CRYPTO_PKCS7_H
+
+#include <crypto/public_key.h>
+
 struct key;
 struct pkcs7_message;
 
@@ -33,4 +38,10 @@
 /*
  * pkcs7_verify.c
  */
-extern int pkcs7_verify(struct pkcs7_message *pkcs7);
+extern int pkcs7_verify(struct pkcs7_message *pkcs7,
+			enum key_being_used_for usage);
+
+extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
+				      const void *data, size_t datalen);
+
+#endif /* _CRYPTO_PKCS7_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 54add20..067c242 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -33,12 +33,27 @@
 enum pkey_id_type {
 	PKEY_ID_PGP,		/* OpenPGP generated key ID */
 	PKEY_ID_X509,		/* X.509 arbitrary subjectKeyIdentifier */
+	PKEY_ID_PKCS7,		/* Signature in PKCS#7 message */
 	PKEY_ID_TYPE__LAST
 };
 
 extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
 
 /*
+ * The use to which an asymmetric key is being put.
+ */
+enum key_being_used_for {
+	VERIFYING_MODULE_SIGNATURE,
+	VERIFYING_FIRMWARE_SIGNATURE,
+	VERIFYING_KEXEC_PE_SIGNATURE,
+	VERIFYING_KEY_SIGNATURE,
+	VERIFYING_KEY_SELF_SIGNATURE,
+	VERIFYING_UNSPECIFIED_SIGNATURE,
+	NR__KEY_BEING_USED_FOR
+};
+extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
+
+/*
  * Cryptographic data for the public-key subtype of the asymmetric key type.
  *
  * Note that this may include private part of the key as well as the public
@@ -101,7 +116,8 @@
 
 struct asymmetric_key_id;
 extern struct key *x509_request_asymmetric_key(struct key *keyring,
-					       const struct asymmetric_key_id *kid,
+					       const struct asymmetric_key_id *id,
+					       const struct asymmetric_key_id *skid,
 					       bool partial);
 
 #endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index de13bfc..bae79f3 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -12,6 +12,8 @@
 
 #include <drm/drmP.h>
 
+struct dw_hdmi;
+
 enum {
 	DW_HDMI_RES_8,
 	DW_HDMI_RES_10,
@@ -59,4 +61,9 @@
 		 void *data, struct drm_encoder *encoder,
 		 struct resource *iores, int irq,
 		 const struct dw_hdmi_plat_data *plat_data);
+
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
+
 #endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5aa5197..8b5ce7c 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -137,17 +137,18 @@
 /*@{*/
 
 /* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP     0x1
-#define DRIVER_PCI_DMA     0x8
-#define DRIVER_SG          0x10
-#define DRIVER_HAVE_DMA    0x20
-#define DRIVER_HAVE_IRQ    0x40
-#define DRIVER_IRQ_SHARED  0x80
-#define DRIVER_GEM         0x1000
-#define DRIVER_MODESET     0x2000
-#define DRIVER_PRIME       0x4000
-#define DRIVER_RENDER      0x8000
-#define DRIVER_ATOMIC      0x10000
+#define DRIVER_USE_AGP			0x1
+#define DRIVER_PCI_DMA			0x8
+#define DRIVER_SG			0x10
+#define DRIVER_HAVE_DMA			0x20
+#define DRIVER_HAVE_IRQ			0x40
+#define DRIVER_IRQ_SHARED		0x80
+#define DRIVER_GEM			0x1000
+#define DRIVER_MODESET			0x2000
+#define DRIVER_PRIME			0x4000
+#define DRIVER_RENDER			0x8000
+#define DRIVER_ATOMIC			0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT	0x20000
 
 /***********************************************************************/
 /** \name Macros to make printk easier */
@@ -675,13 +676,12 @@
 
 	/* currently active master for this node. Protected by master_mutex */
 	struct drm_master *master;
-	struct drm_mode_group mode_group;
 };
 
 
 struct drm_pending_vblank_event {
 	struct drm_pending_event base;
-	int pipe;
+	unsigned int pipe;
 	struct drm_event_vblank event;
 };
 
@@ -700,7 +700,7 @@
 					/* for wraparound handling */
 	u32 last_wait;			/* Last vblank seqno waited per CRTC */
 	unsigned int inmodeset;		/* Display driver is setting mode */
-	int crtc;			/* crtc index */
+	unsigned int pipe;		/* crtc index */
 	bool enabled;			/* so we don't call enable more than
 					   once per disable */
 };
@@ -887,6 +887,7 @@
 /*@{*/
 
 				/* Driver support (drm_drv.h) */
+extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
 extern long drm_ioctl(struct file *filp,
 		      unsigned int cmd, unsigned long arg);
 extern long drm_compat_ioctl(struct file *filp,
@@ -920,34 +921,34 @@
 extern int drm_irq_install(struct drm_device *dev, int irq);
 extern int drm_irq_uninstall(struct drm_device *dev);
 
-extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
 			   struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
 extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
 				     struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
-				     struct drm_pending_vblank_event *e);
+extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+				  struct drm_pending_vblank_event *e);
 extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
 				       struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
 extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, int crtc);
-extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
 extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
 extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, int crtc);
-extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
 extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
 
 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-						 int crtc, int *max_error,
+						 unsigned int pipe, int *max_error,
 						 struct timeval *vblank_time,
 						 unsigned flags,
 						 const struct drm_crtc *refcrtc,
@@ -968,8 +969,8 @@
 }
 
 /* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
 
 				/* Stub support (drm_stub.h) */
 extern struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 8a3a913..e67aeac 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -166,7 +166,8 @@
 static inline bool
 drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
 {
-	return state->mode_changed || state->active_changed;
+	return state->mode_changed || state->active_changed ||
+	       state->connectors_changed;
 }
 
 
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index cc1fee8..11266d1 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -87,8 +87,8 @@
 				struct drm_framebuffer *fb,
 				struct drm_pending_vblank_event *event,
 				uint32_t flags);
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-				      int mode);
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+				     int mode);
 
 /* default implementations for state handling */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3b4d8a4..faaeff7 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -255,12 +255,13 @@
  * @crtc: backpointer to the CRTC
  * @enable: whether the CRTC should be enabled, gates all other state
  * @active: whether the CRTC is actively displaying (used for DPMS)
- * @mode_changed: for use by helpers and drivers when computing state updates
- * @active_changed: for use by helpers and drivers when computing state updates
+ * @planes_changed: planes on this crtc are updated
+ * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
+ * @active_changed: crtc_state->active has been toggled.
+ * @connectors_changed: connectors to this crtc have been updated
  * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
  * @last_vblank_count: for helpers and drivers to capture the vblank of the
  * 	update to ensure framebuffer cleanup isn't done too early
- * @planes_changed: for use by helpers and drivers when computing state updates
  * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
  * @mode: current mode timings
  * @event: optional pointer to a DRM event to signal upon completion of the
@@ -283,6 +284,7 @@
 	bool planes_changed : 1;
 	bool mode_changed : 1;
 	bool active_changed : 1;
+	bool connectors_changed : 1;
 
 	/* attached planes bitmask:
 	 * WARNING: transitional helpers do not maintain plane_mask so
@@ -525,7 +527,7 @@
  * etc.
  */
 struct drm_connector_funcs {
-	void (*dpms)(struct drm_connector *connector, int mode);
+	int (*dpms)(struct drm_connector *connector, int mode);
 	void (*save)(struct drm_connector *connector);
 	void (*restore)(struct drm_connector *connector);
 	void (*reset)(struct drm_connector *connector);
@@ -861,7 +863,7 @@
 
 	uint32_t possible_crtcs;
 	uint32_t *format_types;
-	uint32_t format_count;
+	unsigned int format_count;
 	bool format_default;
 
 	struct drm_crtc *crtc;
@@ -1016,29 +1018,6 @@
 };
 
 /**
- * struct drm_mode_group - group of mode setting resources for potential sub-grouping
- * @num_crtcs: CRTC count
- * @num_encoders: encoder count
- * @num_connectors: connector count
- * @num_bridges: bridge count
- * @id_list: list of KMS object IDs in this group
- *
- * Currently this simply tracks the global mode setting state.  But in the
- * future it could allow groups of objects to be set aside into independent
- * control groups for use by different user level processes (e.g. two X servers
- * running simultaneously on different heads, each with their own mode
- * configuration and freedom of mode setting).
- */
-struct drm_mode_group {
-	uint32_t num_crtcs;
-	uint32_t num_encoders;
-	uint32_t num_connectors;
-
-	/* list of object IDs for this group */
-	uint32_t *id_list;
-};
-
-/**
  * struct drm_mode_config - Mode configuration control structure
  * @mutex: mutex protecting KMS related lists and structures
  * @connection_mutex: ww mutex protecting connector state and routing
@@ -1289,13 +1268,13 @@
 				    unsigned long possible_crtcs,
 				    const struct drm_plane_funcs *funcs,
 				    const uint32_t *formats,
-				    uint32_t format_count,
+				    unsigned int format_count,
 				    enum drm_plane_type type);
 extern int drm_plane_init(struct drm_device *dev,
 			  struct drm_plane *plane,
 			  unsigned long possible_crtcs,
 			  const struct drm_plane_funcs *funcs,
-			  const uint32_t *formats, uint32_t format_count,
+			  const uint32_t *formats, unsigned int format_count,
 			  bool is_primary);
 extern void drm_plane_cleanup(struct drm_plane *plane);
 extern unsigned int drm_plane_index(struct drm_plane *plane);
@@ -1322,9 +1301,6 @@
 extern void drm_fb_release(struct drm_file *file_priv);
 extern void drm_property_destroy_user_blobs(struct drm_device *dev,
                                             struct drm_file *file_priv);
-extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
-extern void drm_mode_group_destroy(struct drm_mode_group *group);
-extern void drm_reinit_primary_mode_group(struct drm_device *dev);
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
@@ -1577,8 +1553,45 @@
 }
 
 /* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, planelist) \
-	list_for_each_entry(plane, planelist, head) \
+#define drm_for_each_legacy_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
 		if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
+#define drm_for_each_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+#define drm_for_each_crtc(crtc, dev) \
+	list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+static inline void
+assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
+{
+	/*
+	 * The connector hotadd/remove code currently grabs both locks when
+	 * updating lists. Hence readers need only hold either of them to be
+	 * safe and the check amounts to
+	 *
+	 * WARN_ON(not_holding(A) && not_holding(B)).
+	 */
+	WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
+		!drm_modeset_is_locked(&mode_config->connection_mutex));
+}
+
+#define drm_for_each_connector(connector, dev) \
+	for (assert_drm_connector_list_read_locked(&(dev)->mode_config),	\
+	     connector = list_first_entry(&(dev)->mode_config.connector_list,	\
+					  struct drm_connector, head);		\
+	     &connector->head != (&(dev)->mode_config.connector_list);		\
+	     connector = list_next_entry(connector, head))
+
+#define drm_for_each_encoder(encoder, dev) \
+	list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#define drm_for_each_fb(fb, dev) \
+	for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),		\
+	     fb = list_first_entry(&(dev)->mode_config.fb_list,	\
+					  struct drm_framebuffer, head);	\
+	     &fb->head != (&(dev)->mode_config.fb_list);			\
+	     fb = list_next_entry(fb, head))
+
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 918aa68..2a747a9 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -108,8 +108,10 @@
 	/* atomic helpers */
 	int (*atomic_check)(struct drm_crtc *crtc,
 			    struct drm_crtc_state *state);
-	void (*atomic_begin)(struct drm_crtc *crtc);
-	void (*atomic_flush)(struct drm_crtc *crtc);
+	void (*atomic_begin)(struct drm_crtc *crtc,
+			     struct drm_crtc_state *old_crtc_state);
+	void (*atomic_flush)(struct drm_crtc *crtc,
+			     struct drm_crtc_state *old_crtc_state);
 };
 
 /**
@@ -190,7 +192,7 @@
 extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
 extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
-extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
 
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2e86f64..499e9f6 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -420,7 +420,7 @@
 
 #define DP_TEST_SINK_MISC		    0x246
 # define DP_TEST_CRC_SUPPORTED		    (1 << 5)
-# define DP_TEST_COUNT_MASK		    0x7
+# define DP_TEST_COUNT_MASK		    0xf
 
 #define DP_TEST_RESPONSE		    0x260
 # define DP_TEST_ACK			    (1 << 0)
@@ -578,6 +578,7 @@
 u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane);
 
+#define DP_BRANCH_OUI_HEADER_SIZE	0xc
 #define DP_RECEIVER_CAP_SIZE		0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE	2
 
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 0dfd94def..dbab462 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -122,6 +122,7 @@
 	bool delayed_hotplug;
 };
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
@@ -136,11 +137,38 @@
 			    struct fb_info *info);
 
 bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height);
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 			    uint32_t depth);
 
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
+
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+			       size_t count, loff_t *ppos);
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos);
+
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect);
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area);
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+				 const struct fb_image *image);
+
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect);
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area);
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+				 const struct fb_image *image);
+
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
@@ -158,4 +186,188 @@
 int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 				       struct drm_connector *connector);
+#else
+static inline void drm_fb_helper_prepare(struct drm_device *dev,
+					struct drm_fb_helper *helper,
+					const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline int drm_fb_helper_init(struct drm_device *dev,
+		       struct drm_fb_helper *helper, int crtc_count,
+		       int max_conn)
+{
+	return 0;
+}
+
+static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
+{
+}
+
+static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+					    struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_set_par(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+					  struct fb_info *info)
+{
+	return 0;
+}
+
+static inline bool
+drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+	return true;
+}
+
+static inline struct fb_info *
+drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+	return NULL;
+}
+
+static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline void drm_fb_helper_fill_var(struct fb_info *info,
+					  struct drm_fb_helper *fb_helper,
+					  uint32_t fb_width, uint32_t fb_height)
+{
+}
+
+static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+					  uint32_t depth)
+{
+}
+
+static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
+					struct fb_info *info)
+{
+	return 0;
+}
+
+static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
+					     char __user *buf, size_t count,
+					     loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
+					      const char __user *buf,
+					      size_t count, loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
+					      const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
+					      const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
+					       const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+					      const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+					      const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+					       const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
+					     int state)
+{
+}
+
+static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
+					       int bpp_sel)
+{
+	return 0;
+}
+
+static inline int
+drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
+		       int width, int height)
+{
+	return NULL;
+}
+
+static inline struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+		      int width, int height)
+{
+	return NULL;
+}
+
+static inline int
+drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+				struct drm_connector *connector)
+{
+	return 0;
+}
+
+static inline int
+drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+				   struct drm_connector *connector)
+{
+	return 0;
+}
+#endif
 #endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 70595ff..5dd18bf 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,7 +130,6 @@
 struct drm_plane;
 
 void drm_modeset_lock_all(struct drm_device *dev);
-int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
 void drm_modeset_unlock_all(struct drm_device *dev);
 void drm_modeset_lock_crtc(struct drm_crtc *crtc,
 			   struct drm_plane *plane);
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 96e1628..dda401b 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,9 +43,8 @@
  * planes.
  */
 
-extern int drm_crtc_init(struct drm_device *dev,
-			 struct drm_crtc *crtc,
-			 const struct drm_crtc_funcs *funcs);
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		  const struct drm_crtc_funcs *funcs);
 
 /**
  * drm_plane_helper_funcs - helper operations for CRTCs
@@ -79,26 +78,26 @@
 	plane->helper_private = funcs;
 }
 
-extern int drm_plane_helper_check_update(struct drm_plane *plane,
-					 struct drm_crtc *crtc,
-					 struct drm_framebuffer *fb,
-					 struct drm_rect *src,
-					 struct drm_rect *dest,
-					 const struct drm_rect *clip,
-					 int min_scale,
-					 int max_scale,
-					 bool can_position,
-					 bool can_update_disabled,
-					 bool *visible);
-extern int drm_primary_helper_update(struct drm_plane *plane,
-				     struct drm_crtc *crtc,
-				     struct drm_framebuffer *fb,
-				     int crtc_x, int crtc_y,
-				     unsigned int crtc_w, unsigned int crtc_h,
-				     uint32_t src_x, uint32_t src_y,
-				     uint32_t src_w, uint32_t src_h);
-extern int drm_primary_helper_disable(struct drm_plane *plane);
-extern void drm_primary_helper_destroy(struct drm_plane *plane);
+int drm_plane_helper_check_update(struct drm_plane *plane,
+				  struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  struct drm_rect *src,
+				  struct drm_rect *dest,
+				  const struct drm_rect *clip,
+				  int min_scale,
+				  int max_scale,
+				  bool can_position,
+				  bool can_update_disabled,
+				  bool *visible);
+int drm_primary_helper_update(struct drm_plane *plane,
+			      struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      int crtc_x, int crtc_y,
+			      unsigned int crtc_w, unsigned int crtc_h,
+			      uint32_t src_x, uint32_t src_y,
+			      uint32_t src_w, uint32_t src_h);
+int drm_primary_helper_disable(struct drm_plane *plane);
+void drm_primary_helper_destroy(struct drm_plane *plane);
 extern const struct drm_plane_funcs drm_primary_helper_funcs;
 
 int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index c9a8b64..b2d56dd 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -34,6 +34,17 @@
 		void (*codec_wake_override)(struct device *, bool enable);
 		int (*get_cdclk_freq)(struct device *);
 	} *ops;
+
+	const struct i915_audio_component_audio_ops {
+		void *audio_ptr;
+		/**
+		 * Call from i915 driver, notifying the HDA driver that
+		 * pin sense and/or ELD information has changed.
+		 * @audio_ptr:		HDA driver object
+		 * @port:		Which port has changed (PORTA / PORTB / PORTC etc)
+		 */
+		void (*pin_eld_notify)(void *audio_ptr, int port);
+	} *audio_ops;
 };
 
 #endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index b08bdad..9e9bddaa5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,8 +3,8 @@
 #ifndef _DRM_INTEL_GTT_H
 #define	_DRM_INTEL_GTT_H
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-		   phys_addr_t *mappable_base, unsigned long *mappable_end);
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+		   phys_addr_t *mappable_base, u64 *mappable_end);
 
 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 		     struct agp_bridge_data *bridge);
diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h
new file mode 100644
index 0000000..ad9e6ec
--- /dev/null
+++ b/include/dt-bindings/dma/axi-dmac.h
@@ -0,0 +1,48 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__
+#define __DT_BINDINGS_DMA_AXI_DMAC_H__
+
+#define AXI_DMAC_BUS_TYPE_AXI_MM		0
+#define AXI_DMAC_BUS_TYPE_AXI_STREAM	1
+#define AXI_DMAC_BUS_TYPE_FIFO			2
+
+#endif
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
deleted file mode 100644
index df017fd..0000000
--- a/include/dt-bindings/dma/jz4780-dma.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
-#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
-
-/*
- * Request type numbers for the JZ4780 DMA controller (written to the DRTn
- * register for the channel).
- */
-#define JZ4780_DMA_I2S1_TX	0x4
-#define JZ4780_DMA_I2S1_RX	0x5
-#define JZ4780_DMA_I2S0_TX	0x6
-#define JZ4780_DMA_I2S0_RX	0x7
-#define JZ4780_DMA_AUTO		0x8
-#define JZ4780_DMA_SADC_RX	0x9
-#define JZ4780_DMA_UART4_TX	0xc
-#define JZ4780_DMA_UART4_RX	0xd
-#define JZ4780_DMA_UART3_TX	0xe
-#define JZ4780_DMA_UART3_RX	0xf
-#define JZ4780_DMA_UART2_TX	0x10
-#define JZ4780_DMA_UART2_RX	0x11
-#define JZ4780_DMA_UART1_TX	0x12
-#define JZ4780_DMA_UART1_RX	0x13
-#define JZ4780_DMA_UART0_TX	0x14
-#define JZ4780_DMA_UART0_RX	0x15
-#define JZ4780_DMA_SSI0_TX	0x16
-#define JZ4780_DMA_SSI0_RX	0x17
-#define JZ4780_DMA_SSI1_TX	0x18
-#define JZ4780_DMA_SSI1_RX	0x19
-#define JZ4780_DMA_MSC0_TX	0x1a
-#define JZ4780_DMA_MSC0_RX	0x1b
-#define JZ4780_DMA_MSC1_TX	0x1c
-#define JZ4780_DMA_MSC1_RX	0x1d
-#define JZ4780_DMA_MSC2_TX	0x1e
-#define JZ4780_DMA_MSC2_RX	0x1f
-#define JZ4780_DMA_PCM0_TX	0x20
-#define JZ4780_DMA_PCM0_RX	0x21
-#define JZ4780_DMA_SMB0_TX	0x24
-#define JZ4780_DMA_SMB0_RX	0x25
-#define JZ4780_DMA_SMB1_TX	0x26
-#define JZ4780_DMA_SMB1_RX	0x27
-#define JZ4780_DMA_SMB2_TX	0x28
-#define JZ4780_DMA_SMB2_RX	0x29
-#define JZ4780_DMA_SMB3_TX	0x2a
-#define JZ4780_DMA_SMB3_RX	0x2b
-#define JZ4780_DMA_SMB4_TX	0x2c
-#define JZ4780_DMA_SMB4_RX	0x2d
-#define JZ4780_DMA_DES_TX	0x2e
-#define JZ4780_DMA_DES_RX	0x2f
-
-#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/i2c/i2c.h b/include/dt-bindings/i2c/i2c.h
new file mode 100644
index 0000000..1d5da81
--- /dev/null
+++ b/include/dt-bindings/i2c/i2c.h
@@ -0,0 +1,18 @@
+/*
+ * This header provides constants for I2C bindings
+ *
+ * Copyright (C) 2015 by Sang Engineering
+ * Copyright (C) 2015 by Renesas Electronics Corporation
+ *
+ * Wolfram Sang <wsa@sang-engineering.com>
+ *
+ * GPLv2 only
+ */
+
+#ifndef _DT_BINDINGS_I2C_I2C_H
+#define _DT_BINDINGS_I2C_I2C_H
+
+#define I2C_TEN_BIT_ADDRESS	(1 << 31)
+#define I2C_OWN_SLAVE_ADDRESS	(1 << 30)
+
+#endif
diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h
new file mode 100644
index 0000000..a0b5c7b
--- /dev/null
+++ b/include/dt-bindings/media/c8sectpfe.h
@@ -0,0 +1,12 @@
+#ifndef __DT_C8SECTPFE_H
+#define __DT_C8SECTPFE_H
+
+#define STV0367_TDA18212_NIMA_1	0
+#define STV0367_TDA18212_NIMA_2	1
+#define STV0367_TDA18212_NIMB_1	2
+#define STV0367_TDA18212_NIMB_2	3
+
+#define STV0903_6110_LNB24_NIMA	4
+#define STV0903_6110_LNB24_NIMB	5
+
+#endif /* __DT_C8SECTPFE_H */
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
index e3e6c75..d05894a 100644
--- a/include/dt-bindings/mfd/st-lpc.h
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -11,5 +11,6 @@
 
 #define ST_LPC_MODE_RTC		0
 #define ST_LPC_MODE_WDT		1
+#define ST_LPC_MODE_CLKSRC	2
 
 #endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index c102054..a15c170 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -7,6 +7,47 @@
 #define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
 
 /* power-source */
+
+/* Digital Input/Output: level [PM8058] */
+#define PM8058_MPP_VPH			0
+#define PM8058_MPP_S3			1
+#define PM8058_MPP_L2			2
+#define PM8058_MPP_L3			3
+
+/* Digital Input/Output: level [PM8901] */
+#define PM8901_MPP_MSMIO		0
+#define PM8901_MPP_DIG			1
+#define PM8901_MPP_L5			2
+#define PM8901_MPP_S4			3
+#define PM8901_MPP_VPH			4
+
+/* Digital Input/Output: level [PM8921] */
+#define PM8921_MPP_S4			1
+#define PM8921_MPP_L15			3
+#define PM8921_MPP_L17			4
+#define PM8921_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8821] */
+#define PM8821_MPP_1P8			0
+#define PM8821_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8018] */
+#define PM8018_MPP_L4			0
+#define PM8018_MPP_L14			1
+#define PM8018_MPP_S3			2
+#define PM8018_MPP_L6			3
+#define PM8018_MPP_L2			4
+#define PM8018_MPP_L5			5
+#define PM8018_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8038] */
+#define PM8038_MPP_L20			0
+#define PM8038_MPP_L11			1
+#define PM8038_MPP_L5			2
+#define PM8038_MPP_L15			3
+#define PM8038_MPP_L17			4
+#define PM8038_MPP_VPH			7
+
 #define PM8841_MPP_VPH			0
 #define PM8841_MPP_S3			2
 
@@ -37,6 +78,16 @@
 #define PMIC_MPP_AMUX_ROUTE_ABUS3	6
 #define PMIC_MPP_AMUX_ROUTE_ABUS4	7
 
+/* Analog Output: level */
+#define PMIC_MPP_AOUT_LVL_1V25		0
+#define PMIC_MPP_AOUT_LVL_1V25_2	1
+#define PMIC_MPP_AOUT_LVL_0V625		2
+#define PMIC_MPP_AOUT_LVL_0V3125	3
+#define PMIC_MPP_AOUT_LVL_MPP		4
+#define PMIC_MPP_AOUT_LVL_ABUS1		5
+#define PMIC_MPP_AOUT_LVL_ABUS2		6
+#define PMIC_MPP_AOUT_LVL_ABUS3		7
+
 /* To be used with "function" */
 #define PMIC_MPP_FUNC_NORMAL		"normal"
 #define PMIC_MPP_FUNC_PAIRED		"paired"
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 72665eb..b20cd88 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -15,6 +15,7 @@
 #ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
 
 #include <linux/key.h>
+#include <crypto/public_key.h>
 
 extern struct key *system_trusted_keyring;
 static inline struct key *get_system_trusted_keyring(void)
@@ -28,4 +29,10 @@
 }
 #endif
 
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+extern int system_verify_data(const void *data, unsigned long len,
+			      const void *raw_pkcs7, size_t pkcs7_len,
+			      enum key_being_used_for usage);
+#endif
+
 #endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
index 945d44a..ab3a6c0 100644
--- a/include/linux/asn1_ber_bytecode.h
+++ b/include/linux/asn1_ber_bytecode.h
@@ -45,23 +45,27 @@
 	ASN1_OP_MATCH_JUMP		= 0x04,
 	ASN1_OP_MATCH_JUMP_OR_SKIP	= 0x05,
 	ASN1_OP_MATCH_ANY		= 0x08,
+	ASN1_OP_MATCH_ANY_OR_SKIP	= 0x09,
 	ASN1_OP_MATCH_ANY_ACT		= 0x0a,
+	ASN1_OP_MATCH_ANY_ACT_OR_SKIP	= 0x0b,
 	/* Everything before here matches unconditionally */
 
 	ASN1_OP_COND_MATCH_OR_SKIP	= 0x11,
 	ASN1_OP_COND_MATCH_ACT_OR_SKIP	= 0x13,
 	ASN1_OP_COND_MATCH_JUMP_OR_SKIP	= 0x15,
 	ASN1_OP_COND_MATCH_ANY		= 0x18,
+	ASN1_OP_COND_MATCH_ANY_OR_SKIP	= 0x19,
 	ASN1_OP_COND_MATCH_ANY_ACT	= 0x1a,
+	ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b,
 
 	/* Everything before here will want a tag from the data */
-#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT
+#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP
 
 	/* These are here to help fill up space */
-	ASN1_OP_COND_FAIL		= 0x1b,
-	ASN1_OP_COMPLETE		= 0x1c,
-	ASN1_OP_ACT			= 0x1d,
-	ASN1_OP_RETURN			= 0x1e,
+	ASN1_OP_COND_FAIL		= 0x1c,
+	ASN1_OP_COMPLETE		= 0x1d,
+	ASN1_OP_ACT			= 0x1e,
+	ASN1_OP_MAYBE_ACT		= 0x1f,
 
 	/* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
 	ASN1_OP_END_SEQ			= 0x20,
@@ -76,6 +80,8 @@
 #define ASN1_OP_END__OF			  0x02
 #define ASN1_OP_END__ACT		  0x04
 
+	ASN1_OP_RETURN			= 0x28,
+
 	ASN1_OP__NR
 };
 
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a85..00a5763 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
 #ifndef _LINUX_ATOMIC_H
 #define _LINUX_ATOMIC_H
 #include <asm/atomic.h>
+#include <asm/barrier.h>
+
+/*
+ * Relaxed variants of xchg, cmpxchg and some atomic operations.
+ *
+ * We support four variants:
+ *
+ * - Fully ordered: The default implementation, no suffix required.
+ * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
+ * - Release: Provides RELEASE semantics, _release suffix.
+ * - Relaxed: No ordering guarantees, _relaxed suffix.
+ *
+ * For compound atomics performing both a load and a store, ACQUIRE
+ * semantics apply only to the load and RELEASE semantics only to the
+ * store portion of the operation. Note that a failed cmpxchg_acquire
+ * does -not- imply any memory ordering constraints.
+ *
+ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
+ */
+
+#ifndef atomic_read_acquire
+#define  atomic_read_acquire(v)		smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic_set_release
+#define  atomic_set_release(v, i)	smp_store_release(&(v)->counter, (i))
+#endif
+
+/*
+ * The idea here is to build acquire/release variants by adding explicit
+ * barriers on top of the relaxed variant. In the case where the relaxed
+ * variant is already fully ordered, no additional barriers are needed.
+ */
+#define __atomic_op_acquire(op, args...)				\
+({									\
+	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
+	smp_mb__after_atomic();						\
+	__ret;								\
+})
+
+#define __atomic_op_release(op, args...)				\
+({									\
+	smp_mb__before_atomic();					\
+	op##_relaxed(args);						\
+})
+
+#define __atomic_op_fence(op, args...)					\
+({									\
+	typeof(op##_relaxed(args)) __ret;				\
+	smp_mb__before_atomic();					\
+	__ret = op##_relaxed(args);					\
+	smp_mb__after_atomic();						\
+	__ret;								\
+})
+
+/* atomic_add_return_relaxed */
+#ifndef atomic_add_return_relaxed
+#define  atomic_add_return_relaxed	atomic_add_return
+#define  atomic_add_return_acquire	atomic_add_return
+#define  atomic_add_return_release	atomic_add_return
+
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+#define  atomic_add_return_acquire(...)					\
+	__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return_release
+#define  atomic_add_return_release(...)					\
+	__atomic_op_release(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return
+#define  atomic_add_return(...)						\
+	__atomic_op_fence(atomic_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic_add_return_relaxed */
+
+/* atomic_sub_return_relaxed */
+#ifndef atomic_sub_return_relaxed
+#define  atomic_sub_return_relaxed	atomic_sub_return
+#define  atomic_sub_return_acquire	atomic_sub_return
+#define  atomic_sub_return_release	atomic_sub_return
+
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+#define  atomic_sub_return_acquire(...)					\
+	__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return_release
+#define  atomic_sub_return_release(...)					\
+	__atomic_op_release(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return
+#define  atomic_sub_return(...)						\
+	__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic_sub_return_relaxed */
+
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define  atomic_xchg_relaxed		atomic_xchg
+#define  atomic_xchg_acquire		atomic_xchg
+#define  atomic_xchg_release		atomic_xchg
+
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define  atomic_xchg_acquire(...)					\
+	__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg_release
+#define  atomic_xchg_release(...)					\
+	__atomic_op_release(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg
+#define  atomic_xchg(...)						\
+	__atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic_xchg_relaxed */
+
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define  atomic_cmpxchg_relaxed		atomic_cmpxchg
+#define  atomic_cmpxchg_acquire		atomic_cmpxchg
+#define  atomic_cmpxchg_release		atomic_cmpxchg
+
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+#define  atomic_cmpxchg_acquire(...)					\
+	__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg_release
+#define  atomic_cmpxchg_release(...)					\
+	__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg
+#define  atomic_cmpxchg(...)						\
+	__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic64_read_acquire
+#define  atomic64_read_acquire(v)	smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic64_set_release
+#define  atomic64_set_release(v, i)	smp_store_release(&(v)->counter, (i))
+#endif
+
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define  atomic64_add_return_relaxed	atomic64_add_return
+#define  atomic64_add_return_acquire	atomic64_add_return
+#define  atomic64_add_return_release	atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define  atomic64_add_return_acquire(...)				\
+	__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define  atomic64_add_return_release(...)				\
+	__atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define  atomic64_add_return(...)					\
+	__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define  atomic64_sub_return_relaxed	atomic64_sub_return
+#define  atomic64_sub_return_acquire	atomic64_sub_return
+#define  atomic64_sub_return_release	atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define  atomic64_sub_return_acquire(...)				\
+	__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return_release
+#define  atomic64_sub_return_release(...)				\
+	__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define  atomic64_sub_return(...)					\
+	__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define  atomic64_xchg_relaxed		atomic64_xchg
+#define  atomic64_xchg_acquire		atomic64_xchg
+#define  atomic64_xchg_release		atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define  atomic64_xchg_acquire(...)					\
+	__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define  atomic64_xchg_release(...)					\
+	__atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define  atomic64_xchg(...)						\
+	__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define  atomic64_cmpxchg_relaxed	atomic64_cmpxchg
+#define  atomic64_cmpxchg_acquire	atomic64_cmpxchg
+#define  atomic64_cmpxchg_release	atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define  atomic64_cmpxchg_acquire(...)					\
+	__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define  atomic64_cmpxchg_release(...)					\
+	__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define  atomic64_cmpxchg(...)						\
+	__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
+/* cmpxchg_relaxed */
+#ifndef cmpxchg_relaxed
+#define  cmpxchg_relaxed		cmpxchg
+#define  cmpxchg_acquire		cmpxchg
+#define  cmpxchg_release		cmpxchg
+
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define  cmpxchg_acquire(...)						\
+	__atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define  cmpxchg_release(...)						\
+	__atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define  cmpxchg(...)							\
+	__atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+#endif /* cmpxchg_relaxed */
+
+/* cmpxchg64_relaxed */
+#ifndef cmpxchg64_relaxed
+#define  cmpxchg64_relaxed		cmpxchg64
+#define  cmpxchg64_acquire		cmpxchg64
+#define  cmpxchg64_release		cmpxchg64
+
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define  cmpxchg64_acquire(...)						\
+	__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define  cmpxchg64_release(...)						\
+	__atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define  cmpxchg64(...)							\
+	__atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+#endif /* cmpxchg64_relaxed */
+
+/* xchg_relaxed */
+#ifndef xchg_relaxed
+#define  xchg_relaxed			xchg
+#define  xchg_acquire			xchg
+#define  xchg_release			xchg
+
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define  xchg_acquire(...)		__atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define  xchg_release(...)		__atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define  xchg(...)			__atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+#endif /* xchg_relaxed */
 
 /**
  * atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@
 #define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
 #endif
 
+#ifndef atomic_andnot
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+	atomic_and(~i, v);
+}
+#endif
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	atomic_andnot(mask, v);
+}
+
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	atomic_or(mask, v);
+}
+
 /**
  * atomic_inc_not_zero_hint - increment if not null
  * @v: pointer of type atomic_t
@@ -111,21 +451,16 @@
 }
 #endif
 
-#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
-static inline void atomic_or(int i, atomic_t *v)
-{
-	int old;
-	int new;
-
-	do {
-		old = atomic_read(v);
-		new = old | i;
-	} while (atomic_cmpxchg(v, old, new) != old);
-}
-#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
-
 #include <asm-generic/atomic-long.h>
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
 #endif
+
+#ifndef atomic64_andnot
+static inline void atomic64_andnot(long long i, atomic64_t *v)
+{
+	atomic64_and(~i, v);
+}
+#endif
+
 #endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index c2e7e3a..b2abc99 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -27,6 +27,9 @@
 #include <linux/ptrace.h>
 #include <uapi/linux/audit.h>
 
+#define AUDIT_INO_UNSET ((unsigned long)-1)
+#define AUDIT_DEV_UNSET ((dev_t)-1)
+
 struct audit_sig_info {
 	uid_t		uid;
 	pid_t		pid;
@@ -59,6 +62,7 @@
 	struct audit_field	*inode_f; /* quick access to an inode field */
 	struct audit_watch	*watch;	/* associated watch */
 	struct audit_tree	*tree;	/* associated watched tree */
+	struct audit_fsnotify_mark	*exe;
 	struct list_head	rlist;	/* entry in audit_{watch,tree}.rules list */
 	struct list_head	list;	/* for AUDIT_LIST* purposes only */
 	u64			prio;
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 14eea94..ed3768f 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -75,5 +75,6 @@
 #define BGPIOF_UNREADABLE_REG_DIR	BIT(2) /* reg_dir is unreadable */
 #define BGPIOF_BIG_ENDIAN_BYTE_ORDER	BIT(3)
 #define BGPIOF_READ_OUTPUT_REG_SET     BIT(4) /* reg_set stores output value */
+#define BGPIOF_NO_OUTPUT		BIT(5) /* only input */
 
 #endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4b7b4eb..e813013 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -118,9 +118,8 @@
 #define BIO_USER_MAPPED 4	/* contains user pages */
 #define BIO_NULL_MAPPED 5	/* contains invalid user pages */
 #define BIO_QUIET	6	/* Make BIO Quiet */
-#define BIO_SNAP_STABLE	7	/* bio data must be snapshotted during write */
-#define BIO_CHAIN	8	/* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED	9	/* bio has elevated ->bi_cnt */
+#define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
 
 /*
  * Flags starting here get preserved by bio_reset() - this includes
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a622f27..708923b9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1569,8 +1569,8 @@
 	int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
-	long (*direct_access)(struct block_device *, sector_t,
-					void **, unsigned long *pfn, long size);
+	long (*direct_access)(struct block_device *, sector_t, void __pmem **,
+			unsigned long *pfn);
 	unsigned int (*check_events) (struct gendisk *disk,
 				      unsigned int clearing);
 	/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1588,8 +1588,8 @@
 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
 						struct writeback_control *);
-extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
-						unsigned long *pfn, long size);
+extern long bdev_direct_access(struct block_device *, sector_t,
+		void __pmem **addr, unsigned long *pfn, long size);
 #else /* CONFIG_BLOCK */
 
 struct block_device;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae..c836eb2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -252,7 +252,12 @@
 	({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-	({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+({							\
+	union { typeof(x) __val; char __c[1]; } __u =	\
+		{ .__val = (__force typeof(x)) (val) }; \
+	__write_once_size(&(x), __u.__c, sizeof(x));	\
+	__u.__val;					\
+})
 
 /**
  * READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8b6c083..8d70e13 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -137,6 +137,7 @@
 	kernel_cap_t	cap_permitted;	/* caps we're permitted */
 	kernel_cap_t	cap_effective;	/* caps we can actually use */
 	kernel_cap_t	cap_bset;	/* capability bounding set */
+	kernel_cap_t	cap_ambient;	/* Ambient capability set */
 #ifdef CONFIG_KEYS
 	unsigned char	jit_keyring;	/* default keyring to attach requested
 					 * keys to */
@@ -212,6 +213,13 @@
 }
 #endif
 
+static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+{
+	return cap_issubset(cred->cap_ambient,
+			    cap_intersect(cred->cap_permitted,
+					  cred->cap_inheritable));
+}
+
 /**
  * get_new_cred - Get a reference on a new set of credentials
  * @cred: The new credentials to reference
diff --git a/include/linux/dax.h b/include/linux/dax.h
new file mode 100644
index 0000000..b415e52
--- /dev/null
+++ b/include/linux/dax.h
@@ -0,0 +1,39 @@
+#ifndef _LINUX_DAX_H
+#define _LINUX_DAX_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+		  get_block_t, dio_iodone_t, int flags);
+int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+		dax_iodone_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+		dax_iodone_t);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+				unsigned int flags, get_block_t, dax_iodone_t);
+int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+				unsigned int flags, get_block_t, dax_iodone_t);
+#else
+static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+				pmd_t *pmd, unsigned int flags, get_block_t gb,
+				dax_iodone_t di)
+{
+	return VM_FAULT_FALLBACK;
+}
+#define __dax_pmd_fault dax_pmd_fault
+#endif
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+#define dax_mkwrite(vma, vmf, gb, iod)		dax_fault(vma, vmf, gb, iod)
+#define __dax_mkwrite(vma, vmf, gb, iod)	__dax_fault(vma, vmf, gb, iod)
+
+static inline bool vma_is_dax(struct vm_area_struct *vma)
+{
+	return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+}
+#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 420311b..9beb636 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -116,6 +116,12 @@
 
 bool debugfs_initialized(void);
 
+ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
+			       size_t count, loff_t *ppos);
+
+ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
+				size_t count, loff_t *ppos);
+
 #else
 
 #include <linux/err.h>
@@ -282,6 +288,20 @@
 	return ERR_PTR(-ENODEV);
 }
 
+static inline ssize_t debugfs_read_file_bool(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline ssize_t debugfs_write_file_bool(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	return -ENODEV;
+}
+
 #endif
 
 #endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2f5eb4..7ea9184 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,6 +66,7 @@
 	DMA_XOR_VAL,
 	DMA_PQ_VAL,
 	DMA_MEMSET,
+	DMA_MEMSET_SG,
 	DMA_INTERRUPT,
 	DMA_SG,
 	DMA_PRIVATE,
@@ -183,6 +184,8 @@
  *  operation it continues the calculation with new sources
  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
  *  on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ *  cleared or freed
  */
 enum dma_ctrl_flags {
 	DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@
 	DMA_PREP_PQ_DISABLE_Q = (1 << 3),
 	DMA_PREP_CONTINUE = (1 << 4),
 	DMA_PREP_FENCE = (1 << 5),
+	DMA_CTRL_REUSE = (1 << 6),
 };
 
 /**
@@ -400,6 +404,8 @@
  * @cmd_pause: true, if pause and thereby resume is supported
  * @cmd_terminate: true, if terminate cmd is supported
  * @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
  */
 struct dma_slave_caps {
 	u32 src_addr_widths;
@@ -408,6 +414,7 @@
 	bool cmd_pause;
 	bool cmd_terminate;
 	enum dma_residue_granularity residue_granularity;
+	bool descriptor_reuse;
 };
 
 static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@
 	dma_addr_t phys;
 	struct dma_chan *chan;
 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+	int (*desc_free)(struct dma_async_tx_descriptor *tx);
 	dma_async_tx_callback callback;
 	void *callback_param;
 	struct dmaengine_unmap_data *unmap;
@@ -585,6 +593,20 @@
 };
 
 /**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+	DMAENGINE_ALIGN_1_BYTE = 0,
+	DMAENGINE_ALIGN_2_BYTES = 1,
+	DMAENGINE_ALIGN_4_BYTES = 2,
+	DMAENGINE_ALIGN_8_BYTES = 3,
+	DMAENGINE_ALIGN_16_BYTES = 4,
+	DMAENGINE_ALIGN_32_BYTES = 5,
+	DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+/**
  * struct dma_device - info on the entity supplying DMA services
  * @chancnt: how many DMA channels are supported
  * @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -616,6 +638,7 @@
  * @device_prep_dma_pq: prepares a pq operation
  * @device_prep_dma_pq_val: prepares a pqzero_sum operation
  * @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
  * @device_prep_slave_sg: prepares a slave dma operation
  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@
 	dma_cap_mask_t  cap_mask;
 	unsigned short max_xor;
 	unsigned short max_pq;
-	u8 copy_align;
-	u8 xor_align;
-	u8 pq_align;
-	u8 fill_align;
+	enum dmaengine_alignment copy_align;
+	enum dmaengine_alignment xor_align;
+	enum dmaengine_alignment pq_align;
+	enum dmaengine_alignment fill_align;
 	#define DMA_HAS_PQ_CONTINUE (1 << 15)
 
 	int dev_id;
@@ -682,6 +705,9 @@
 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
 		unsigned long flags);
+	struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+		struct dma_chan *chan, struct scatterlist *sg,
+		unsigned int nents, int value, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
 		struct dma_chan *chan, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@
 	return desc->tx_submit(desc);
 }
 
-static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+					 size_t off1, size_t off2, size_t len)
 {
 	size_t mask;
 
@@ -1155,6 +1182,39 @@
 }
 #endif
 
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+	struct dma_slave_caps caps;
+
+	dma_get_slave_caps(tx->chan, &caps);
+
+	if (caps.descriptor_reuse) {
+		tx->flags |= DMA_CTRL_REUSE;
+		return 0;
+	} else {
+		return -EPERM;
+	}
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+	tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+	return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+	/* this is supported for reusable desc, so check that */
+	if (dmaengine_desc_test_reuse(desc))
+		return desc->desc_free(desc);
+	else
+		return -EPERM;
+}
+
 /* --- DMA device --- */
 
 int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@
 static inline struct dma_chan
 *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
 				  dma_filter_fn fn, void *fn_param,
-				  struct device *dev, char *name)
+				  struct device *dev, const char *name)
 {
 	struct dma_chan *chan;
 
@@ -1177,6 +1237,9 @@
 	if (chan)
 		return chan;
 
+	if (!fn || !fn_param)
+		return NULL;
+
 	return __dma_request_channel(mask, fn, fn_param);
 }
 #endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index e1043f7..53ba737 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -24,6 +24,12 @@
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle);
 
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+				    dma_addr_t *handle)
+{
+	return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
 
 /*
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a..25c6324 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@
 
 #define GET_DENTRY_SLOTS(x)	((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
 
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK	214
-
 /* MAX level for dir lookup */
 #define MAX_DIR_HASH_DEPTH	63
 
 /* MAX buckets in one level of dir */
 #define MAX_DIR_BUCKETS		(1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
 
+/*
+ * space utilization of regular dentry and inline dentry
+ *		regular dentry			inline dentry
+ * bitmap	1 * 27 = 27			1 * 23 = 23
+ * reserved	1 * 3 = 3			1 * 7 = 7
+ * dentry	11 * 214 = 2354			11 * 182 = 2002
+ * filename	8 * 214 = 1712			8 * 182 = 1456
+ * total	4096				3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK	214	/* the number of dentry in a block */
 #define SIZE_OF_DIR_ENTRY	11	/* by byte */
 #define SIZE_OF_DENTRY_BITMAP	((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
 					BITS_PER_BYTE)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 043f328..bc9afa7 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -788,7 +788,7 @@
 
 extern const char *fb_mode_option;
 extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[64];
+extern const struct fb_videomode cea_modes[65];
 extern const struct dmt_videomode dmt_modes[];
 
 struct fb_modelist {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index fbd780c..72d8a84 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1,7 +1,6 @@
 #ifndef _LINUX_FS_H
 #define _LINUX_FS_H
 
-
 #include <linux/linkage.h>
 #include <linux/wait.h>
 #include <linux/kdev_t.h>
@@ -30,6 +29,8 @@
 #include <linux/lockdep.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/blk_types.h>
+#include <linux/workqueue.h>
+#include <linux/percpu-rwsem.h>
 
 #include <asm/byteorder.h>
 #include <uapi/linux/fs.h>
@@ -51,7 +52,6 @@
 struct seq_file;
 struct workqueue_struct;
 struct iov_iter;
-struct vm_fault;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -636,7 +636,7 @@
 	unsigned long		dirtied_time_when;
 
 	struct hlist_node	i_hash;
-	struct list_head	i_wb_list;	/* backing dev IO list */
+	struct list_head	i_io_list;	/* backing dev IO list */
 #ifdef CONFIG_CGROUP_WRITEBACK
 	struct bdi_writeback	*i_wb;		/* the associated cgroup wb */
 
@@ -943,12 +943,18 @@
 
 struct lock_manager {
 	struct list_head list;
+	/*
+	 * NFSv4 and up also want opens blocked during the grace period;
+	 * NLM doesn't care:
+	 */
+	bool block_opens;
 };
 
 struct net;
 void locks_start_grace(struct net *, struct lock_manager *);
 void locks_end_grace(struct lock_manager *);
 int locks_in_grace(struct net *);
+int opens_in_grace(struct net *);
 
 /* that will die - we need it for nfs_lock_info */
 #include <linux/nfs_fs_i.h>
@@ -1275,16 +1281,9 @@
 #define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
 
 struct sb_writers {
-	/* Counters for counting writers at each level */
-	struct percpu_counter	counter[SB_FREEZE_LEVELS];
-	wait_queue_head_t	wait;		/* queue for waiting for
-						   writers / faults to finish */
-	int			frozen;		/* Is sb frozen? */
-	wait_queue_head_t	wait_unfrozen;	/* queue for waiting for
-						   sb to be thawed */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map	lock_map[SB_FREEZE_LEVELS];
-#endif
+	int				frozen;		/* Is sb frozen? */
+	wait_queue_head_t		wait_unfrozen;	/* for get_super_thawed() */
+	struct percpu_rw_semaphore	rw_sem[SB_FREEZE_LEVELS];
 };
 
 struct super_block {
@@ -1310,7 +1309,6 @@
 #endif
 	const struct xattr_handler **s_xattr;
 
-	struct list_head	s_inodes;	/* all inodes */
 	struct hlist_bl_head	s_anon;		/* anonymous dentries for (nfs) exporting */
 	struct list_head	s_mounts;	/* list of mounts; _not_ for fs use */
 	struct block_device	*s_bdev;
@@ -1376,11 +1374,18 @@
 	struct list_lru		s_dentry_lru ____cacheline_aligned_in_smp;
 	struct list_lru		s_inode_lru ____cacheline_aligned_in_smp;
 	struct rcu_head		rcu;
+	struct work_struct	destroy_work;
+
+	struct mutex		s_sync_lock;	/* sync serialisation lock */
 
 	/*
 	 * Indicates how deep in a filesystem stack this SB is
 	 */
 	int s_stack_depth;
+
+	/* s_inode_list_lock protects s_inodes */
+	spinlock_t		s_inode_list_lock ____cacheline_aligned_in_smp;
+	struct list_head	s_inodes;	/* all inodes */
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
@@ -1392,6 +1397,11 @@
 void __sb_end_write(struct super_block *sb, int level);
 int __sb_start_write(struct super_block *sb, int level, bool wait);
 
+#define __sb_writers_acquired(sb, lev)	\
+	percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+#define __sb_writers_release(sb, lev)	\
+	percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+
 /**
  * sb_end_write - drop write access to a superblock
  * @sb: the super we wrote to
@@ -1612,7 +1622,6 @@
 	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
 	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
 	int (*mmap) (struct file *, struct vm_area_struct *);
-	int (*mremap)(struct file *, struct vm_area_struct *);
 	int (*open) (struct inode *, struct file *);
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*release) (struct inode *, struct file *);
@@ -2609,7 +2618,7 @@
 extern void __remove_inode_hash(struct inode *);
 static inline void remove_inode_hash(struct inode *inode)
 {
-	if (!inode_unhashed(inode))
+	if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
 		__remove_inode_hash(inode);
 }
 
@@ -2668,19 +2677,6 @@
 extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
 
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
-		  get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
-int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
-int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
-		dax_iodone_t);
-int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
-		dax_iodone_t);
-int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb, iod)		dax_fault(vma, vmf, gb, iod)
-#define __dax_mkwrite(vma, vmf, gb, iod)	__dax_fault(vma, vmf, gb, iod)
-
 #ifdef CONFIG_BLOCK
 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
 			    loff_t file_offset);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321e..0023088 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@
 
 	u32 nand_stat;
 	wait_queue_head_t nand_wait;
+	bool little_endian;
 };
 
 extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
 
+static inline u32 ifc_in32(void __iomem *addr)
+{
+	u32 val;
+
+	if (fsl_ifc_ctrl_dev->little_endian)
+		val = ioread32(addr);
+	else
+		val = ioread32be(addr);
+
+	return val;
+}
+
+static inline u16 ifc_in16(void __iomem *addr)
+{
+	u16 val;
+
+	if (fsl_ifc_ctrl_dev->little_endian)
+		val = ioread16(addr);
+	else
+		val = ioread16be(addr);
+
+	return val;
+}
+
+static inline u8 ifc_in8(void __iomem *addr)
+{
+	return ioread8(addr);
+}
+
+static inline void ifc_out32(u32 val, void __iomem *addr)
+{
+	if (fsl_ifc_ctrl_dev->little_endian)
+		iowrite32(val, addr);
+	else
+		iowrite32be(val, addr);
+}
+
+static inline void ifc_out16(u16 val, void __iomem *addr)
+{
+	if (fsl_ifc_ctrl_dev->little_endian)
+		iowrite16(val, addr);
+	else
+		iowrite16be(val, addr);
+}
+
+static inline void ifc_out8(u8 val, void __iomem *addr)
+{
+	iowrite8(val, addr);
+}
 
 #endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 65a517d..533c440 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -195,40 +195,49 @@
 #define FSNOTIFY_EVENT_INODE	2
 
 /*
- * a mark is simply an object attached to an in core inode which allows an
+ * A mark is simply an object attached to an in core inode which allows an
  * fsnotify listener to indicate they are either no longer interested in events
  * of a type matching mask or only interested in those events.
  *
- * these are flushed when an inode is evicted from core and may be flushed
- * when the inode is modified (as seen by fsnotify_access).  Some fsnotify users
- * (such as dnotify) will flush these when the open fd is closed and not at
- * inode eviction or modification.
+ * These are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access).  Some fsnotify
+ * users (such as dnotify) will flush these when the open fd is closed and not
+ * at inode eviction or modification.
+ *
+ * Text in brackets is showing the lock(s) protecting modifications of a
+ * particular entry. obj_lock means either inode->i_lock or
+ * mnt->mnt_root->d_lock depending on the mark type.
  */
 struct fsnotify_mark {
-	__u32 mask;			/* mask this mark is for */
-	/* we hold ref for each i_list and g_list.  also one ref for each 'thing'
+	/* Mask this mark is for [mark->lock, group->mark_mutex] */
+	__u32 mask;
+	/* We hold one for presence in g_list. Also one ref for each 'thing'
 	 * in kernel that found and may be using this mark. */
-	atomic_t refcnt;		/* active things looking at this mark */
-	struct fsnotify_group *group;	/* group this mark is for */
-	struct list_head g_list;	/* list of marks by group->i_fsnotify_marks
-					 * Also reused for queueing mark into
-					 * destroy_list when it's waiting for
-					 * the end of SRCU period before it can
-					 * be freed */
-	spinlock_t lock;		/* protect group and inode */
-	struct hlist_node obj_list;	/* list of marks for inode / vfsmount */
-	struct list_head free_list;	/* tmp list used when freeing this mark */
-	union {
+	atomic_t refcnt;
+	/* Group this mark is for. Set on mark creation, stable until last ref
+	 * is dropped */
+	struct fsnotify_group *group;
+	/* List of marks by group->i_fsnotify_marks. Also reused for queueing
+	 * mark into destroy_list when it's waiting for the end of SRCU period
+	 * before it can be freed. [group->mark_mutex] */
+	struct list_head g_list;
+	/* Protects inode / mnt pointers, flags, masks */
+	spinlock_t lock;
+	/* List of marks for inode / vfsmount [obj_lock] */
+	struct hlist_node obj_list;
+	union {	/* Object pointer [mark->lock, group->mark_mutex] */
 		struct inode *inode;	/* inode this mark is associated with */
 		struct vfsmount *mnt;	/* vfsmount this mark is associated with */
 	};
-	__u32 ignored_mask;		/* events types to ignore */
+	/* Events types to ignore [mark->lock, group->mark_mutex] */
+	__u32 ignored_mask;
 #define FSNOTIFY_MARK_FLAG_INODE		0x01
 #define FSNOTIFY_MARK_FLAG_VFSMOUNT		0x02
 #define FSNOTIFY_MARK_FLAG_OBJECT_PINNED	0x04
 #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY	0x08
 #define FSNOTIFY_MARK_FLAG_ALIVE		0x10
-	unsigned int flags;		/* vfsmount or inode mark? */
+#define FSNOTIFY_MARK_FLAG_ATTACHED		0x20
+	unsigned int flags;		/* flags [mark->lock] */
 	void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
 };
 
@@ -345,8 +354,10 @@
 /* given a group and a mark, flag mark to be freed when all references are dropped */
 extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
 				  struct fsnotify_group *group);
-extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
-					 struct fsnotify_group *group);
+/* detach mark from inode / mount list, group list, drop inode reference */
+extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
+/* free mark */
+extern void fsnotify_free_mark(struct fsnotify_mark *mark);
 /* run all the marks in a group, and clear all of the vfsmount marks */
 extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
 /* run all the marks in a group, and clear all of the inode marks */
@@ -357,7 +368,7 @@
 extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
 extern void fsnotify_get_mark(struct fsnotify_mark *mark);
 extern void fsnotify_put_mark(struct fsnotify_mark *mark);
-extern void fsnotify_unmount_inodes(struct list_head *list);
+extern void fsnotify_unmount_inodes(struct super_block *sb);
 
 /* put here because inotify does some weird stuff when destroying watches */
 extern void fsnotify_init_event(struct fsnotify_event *event,
@@ -393,7 +404,7 @@
 	return 0;
 }
 
-static inline void fsnotify_unmount_inodes(struct list_head *list)
+static inline void fsnotify_unmount_inodes(struct super_block *sb)
 {}
 
 #endif	/* CONFIG_FSNOTIFY */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5383bb1..7ff168d 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -59,6 +59,8 @@
 
 	genpool_algo_t algo;		/* allocation function */
 	void *data;
+
+	const char *name;
 };
 
 /*
@@ -118,8 +120,8 @@
 		unsigned long start, unsigned int nr, void *data);
 
 extern struct gen_pool *devm_gen_pool_create(struct device *dev,
-		int min_alloc_order, int nid);
-extern struct gen_pool *gen_pool_get(struct device *dev);
+		int min_alloc_order, int nid, const char *name);
+extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
 
 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
 			size_t size);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ad35f30..f92cbd2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -63,7 +63,10 @@
  * but it is definitely preferable to use the flag rather than opencode endless
  * loop around allocator.
  *
- * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
+ * return NULL when direct reclaim and memory compaction have failed to allow
+ * the allocation to succeed.  The OOM killer is not called with the current
+ * implementation.
  *
  * __GFP_MOVABLE: Flag that this page will be movable by the page migration
  * mechanism or reclaimed
@@ -300,22 +303,31 @@
 	return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
 }
 
-static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
-						unsigned int order)
+/*
+ * Allocate pages, preferring the node given as nid. The node must be valid and
+ * online. For more general interface, see alloc_pages_node().
+ */
+static inline struct page *
+__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
 {
-	/* Unknown node is current node */
-	if (nid < 0)
-		nid = numa_node_id();
+	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+	VM_WARN_ON(!node_online(nid));
 
 	return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
 }
 
-static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+/*
+ * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
+ * prefer the current CPU's closest node. Otherwise node must be valid and
+ * online.
+ */
+static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 						unsigned int order)
 {
-	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
+	if (nid == NUMA_NO_NODE)
+		nid = numa_mem_id();
 
-	return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+	return __alloc_pages_node(nid, gfp_mask, order);
 }
 
 #ifdef CONFIG_NUMA
@@ -354,7 +366,6 @@
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
 void free_pages_exact(void *virt, size_t size);
-/* This is different from alloc_pages_exact_node !!! */
 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
 
 #define __get_free_page(gfp_mask) \
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index adac255..14cac67 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@
 int gpiod_count(struct device *dev, const char *con_id);
 
 /* Acquire and dispose GPIOs */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+struct gpio_desc *__must_check gpiod_get(struct device *dev,
 					 const char *con_id,
 					 enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
 					       const char *con_id,
 					       unsigned int idx,
 					       enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 						  const char *con_id,
 						  enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
 							const char *con_id,
 							unsigned int index,
 							enum gpiod_flags flags);
@@ -70,18 +70,18 @@
 void gpiod_put(struct gpio_desc *desc);
 void gpiod_put_array(struct gpio_descs *descs);
 
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
 					      const char *con_id,
 					      enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
 						    const char *con_id,
 						    unsigned int idx,
 						    enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
 						       const char *con_id,
 						       enum gpiod_flags flags);
 struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
 			      unsigned int index, enum gpiod_flags flags);
 struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
 						     const char *con_id,
@@ -146,31 +146,31 @@
 	return 0;
 }
 
-static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
-						const char *con_id,
-						enum gpiod_flags flags)
+static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
+						       const char *con_id,
+						       enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 static inline struct gpio_desc *__must_check
-__gpiod_get_index(struct device *dev,
-		  const char *con_id,
-		  unsigned int idx,
-		  enum gpiod_flags flags)
+gpiod_get_index(struct device *dev,
+		const char *con_id,
+		unsigned int idx,
+		enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__gpiod_get_optional(struct device *dev, const char *con_id,
-		     enum gpiod_flags flags)
+gpiod_get_optional(struct device *dev, const char *con_id,
+		   enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__gpiod_get_index_optional(struct device *dev, const char *con_id,
-			   unsigned int index, enum gpiod_flags flags)
+gpiod_get_index_optional(struct device *dev, const char *con_id,
+			 unsigned int index, enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
@@ -206,7 +206,7 @@
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get(struct device *dev,
+devm_gpiod_get(struct device *dev,
 		 const char *con_id,
 		 enum gpiod_flags flags)
 {
@@ -214,7 +214,7 @@
 }
 static inline
 struct gpio_desc *__must_check
-__devm_gpiod_get_index(struct device *dev,
+devm_gpiod_get_index(struct device *dev,
 		       const char *con_id,
 		       unsigned int idx,
 		       enum gpiod_flags flags)
@@ -223,14 +223,14 @@
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_optional(struct device *dev, const char *con_id,
 			  enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
 				unsigned int index, enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@
 
 #endif /* CONFIG_GPIOLIB */
 
-/*
- * Vararg-hacks! This is done to transition the kernel to always pass
- * the options flags argument to the below functions. During a transition
- * phase these vararg macros make both old-and-newstyle code compile,
- * but when all calls to the elder API are removed, these should go away
- * and the __gpiod_get() etc functions above be renamed just gpiod_get()
- * etc.
- */
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
-#define __gpiod_get_index(dev, con_id, index, flags, ...)		\
-	__gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
-#define __gpiod_get_optional(dev, con_id, flags, ...)			\
-	__gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...)	\
-	__gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...)				\
-	__gpiod_get_index_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get(dev, con_id, flags, ...)			\
-	__devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...)		\
-	__devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...)				\
-	__devm_gpiod_get_index(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...)		\
-	__devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...)				\
-	__devm_gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...)	\
-	__devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...)			\
-	__devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
-
 #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
 
 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c8393cd..1aed31c 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
 #include <linux/irq.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/lockdep.h>
 #include <linux/pinctrl/pinctrl.h>
 
 struct device;
@@ -64,6 +65,17 @@
  *	registers.
  * @irq_not_threaded: flag must be set if @can_sleep is set but the
  *	IRQs don't need to be threaded
+ * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
+ * @irqdomain: Interrupt translation domain; responsible for mapping
+ *	between GPIO hwirq number and linux irq number
+ * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
+ * @irq_handler: the irq handler to use (often a predefined irq core function)
+ *	for GPIO IRQs, provided by GPIO driver
+ * @irq_default_type: default IRQ triggering type applied during GPIO driver
+ *	initialization, provided by GPIO driver
+ * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
+ *	provided by GPIO driver
+ * @lock_key: per GPIO IRQ chip lockdep class
  *
  * A gpio_chip can help platforms abstract various sources of GPIOs so
  * they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@
 	irq_flow_handler_t	irq_handler;
 	unsigned int		irq_default_type;
 	int			irq_parent;
+	struct lock_class_key	*lock_key;
 #endif
 
 #if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@
 		int parent_irq,
 		irq_flow_handler_t parent_handler);
 
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-		struct irq_chip *irqchip,
-		unsigned int first_irq,
-		irq_flow_handler_t handler,
-		unsigned int type);
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+			  struct irq_chip *irqchip,
+			  unsigned int first_irq,
+			  irq_flow_handler_t handler,
+			  unsigned int type,
+			  struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_irqchip_add(...)				\
+(								\
+	({							\
+		static struct lock_class_key _key;		\
+		_gpiochip_irqchip_add(__VA_ARGS__, &_key);	\
+	})							\
+)
+#else
+#define gpiochip_irqchip_add(...)				\
+	_gpiochip_irqchip_add(__VA_ARGS__, NULL)
+#endif
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index e270614..c0d712d 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@
 }
 
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
 
 #endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f10b20f..ecb080d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -33,6 +33,8 @@
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 			unsigned long addr, pgprot_t newprot,
 			int prot_numa);
+int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
+			unsigned long pfn, bool write);
 
 enum transparent_hugepage_flag {
 	TRANSPARENT_HUGEPAGE_FLAG,
@@ -122,7 +124,7 @@
 #endif
 extern int hugepage_madvise(struct vm_area_struct *vma,
 			    unsigned long *vm_flags, int advice);
-extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
 				    unsigned long start,
 				    unsigned long end,
 				    long adjust_next);
@@ -138,15 +140,6 @@
 	else
 		return 0;
 }
-static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
-					 unsigned long start,
-					 unsigned long end,
-					 long adjust_next)
-{
-	if (!vma->anon_vma || vma->vm_ops)
-		return;
-	__vma_adjust_trans_huge(vma, start, end, adjust_next);
-}
 static inline int hpage_nr_pages(struct page *page)
 {
 	if (unlikely(PageTransHuge(page)))
@@ -164,6 +157,13 @@
 	return ACCESS_ONCE(huge_zero_page) == page;
 }
 
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+	return is_huge_zero_page(pmd_page(pmd));
+}
+
+struct page *get_huge_zero_page(void);
+
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d891f94..5e35379 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,9 @@
 	struct kref refs;
 	spinlock_t lock;
 	struct list_head regions;
+	long adds_in_progress;
+	struct list_head region_cache;
+	long region_cache_count;
 };
 extern struct resv_map *resv_map_alloc(void);
 void resv_map_release(struct kref *ref);
@@ -80,11 +83,18 @@
 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
 						struct vm_area_struct *vma,
 						vm_flags_t vm_flags);
-void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
+long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+						long freed);
 int dequeue_hwpoisoned_huge_page(struct page *page);
 bool isolate_huge_page(struct page *page, struct list_head *list);
 void putback_active_hugepage(struct page *page);
 void free_huge_page(struct page *page);
+void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+extern struct mutex *hugetlb_fault_mutex_table;
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+				struct vm_area_struct *vma,
+				struct address_space *mapping,
+				pgoff_t idx, unsigned long address);
 
 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -320,9 +330,13 @@
 #endif
 };
 
+struct page *alloc_huge_page(struct vm_area_struct *vma,
+				unsigned long addr, int avoid_reserve);
 struct page *alloc_huge_page_node(struct hstate *h, int nid);
 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
 				unsigned long addr, int avoid_reserve);
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+			pgoff_t idx);
 
 /* arch callback */
 int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -471,6 +485,7 @@
 
 #else	/* CONFIG_HUGETLB_PAGE */
 struct hstate {};
+#define alloc_huge_page(v, a, r) NULL
 #define alloc_huge_page_node(h, nid) NULL
 #define alloc_huge_page_noerr(v, a, r) NULL
 #define alloc_bootmem_huge_page(h) NULL
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e83a738..768063b 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -121,6 +121,9 @@
 extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
 					  u8 command, u8 length,
 					  const u8 *values);
+extern s32
+i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+					  u8 command, u8 length, u8 *values);
 #endif /* I2C */
 
 /**
@@ -550,11 +553,12 @@
 void i2c_unlock_adapter(struct i2c_adapter *);
 
 /*flags for the client struct: */
-#define I2C_CLIENT_PEC	0x04		/* Use Packet Error Checking */
-#define I2C_CLIENT_TEN	0x10		/* we have a ten bit chip address */
+#define I2C_CLIENT_PEC		0x04	/* Use Packet Error Checking */
+#define I2C_CLIENT_TEN		0x10	/* we have a ten bit chip address */
 					/* Must equal I2C_M_TEN below */
-#define I2C_CLIENT_WAKE	0x80		/* for board_info; true iff can wake */
-#define I2C_CLIENT_SCCB	0x9000		/* Use Omnivision SCCB protocol */
+#define I2C_CLIENT_SLAVE	0x20	/* we are the slave */
+#define I2C_CLIENT_WAKE		0x80	/* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB		0x9000	/* Use Omnivision SCCB protocol */
 					/* Must match I2C_M_STOP|IGNORE_NAK */
 
 /* i2c adapter classes (bitmask) */
@@ -638,6 +642,8 @@
 /* must call put_device() when done with returned i2c_adapter device */
 extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
 
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
 #else
 
 static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -649,6 +655,11 @@
 {
 	return NULL;
 }
+
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+	return NULL;
+}
 #endif /* CONFIG_OF */
 
 #endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h
deleted file mode 100644
index 02bf6ea..0000000
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Atmel maXTouch Touchscreen driver
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_ATMEL_MXT_TS_H
-#define __LINUX_ATMEL_MXT_TS_H
-
-#include <linux/types.h>
-
-/* The platform data for the Atmel maXTouch touchscreen driver */
-struct mxt_platform_data {
-	unsigned long irqflags;
-	u8 t19_num_keys;
-	const unsigned int *t19_keymap;
-};
-
-#endif /* __LINUX_ATMEL_MXT_TS_H */
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h
deleted file mode 100644
index 7bae83b..0000000
--- a/include/linux/input/pixcir_ts.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef	_PIXCIR_I2C_TS_H
-#define	_PIXCIR_I2C_TS_H
-
-/*
- * Register map
- */
-#define PIXCIR_REG_POWER_MODE	51
-#define PIXCIR_REG_INT_MODE	52
-
-/*
- * Power modes:
- * active: max scan speed
- * idle: lower scan speed with automatic transition to active on touch
- * halt: datasheet says sleep but this is more like halt as the chip
- *       clocks are cut and it can only be brought out of this mode
- *	 using the RESET pin.
- */
-enum pixcir_power_mode {
-	PIXCIR_POWER_ACTIVE,
-	PIXCIR_POWER_IDLE,
-	PIXCIR_POWER_HALT,
-};
-
-#define PIXCIR_POWER_MODE_MASK	0x03
-#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * diff coordinates: interrupt is asserted when coordinates change
- * level on touch: interrupt level asserted during touch
- * pulse on touch: interrupt pulse asserted druing touch
- *
- */
-enum pixcir_int_mode {
-	PIXCIR_INT_PERIODICAL,
-	PIXCIR_INT_DIFF_COORD,
-	PIXCIR_INT_LEVEL_TOUCH,
-	PIXCIR_INT_PULSE_TOUCH,
-};
-
-#define PIXCIR_INT_MODE_MASK	0x03
-#define PIXCIR_INT_ENABLE	(1UL << 3)
-#define PIXCIR_INT_POL_HIGH	(1UL << 2)
-
-/**
- * struct pixcir_irc_chip_data - chip related data
- * @max_fingers:	Max number of fingers reported simultaneously by h/w
- * @has_hw_ids:		Hardware supports finger tracking IDs
- *
- */
-struct pixcir_i2c_chip_data {
-	u8 max_fingers;
-	bool has_hw_ids;
-};
-
-struct pixcir_ts_platform_data {
-	int x_max;
-	int y_max;
-	int gpio_attb;		/* GPIO connected to ATTB line */
-	struct pixcir_i2c_chip_data chip;
-};
-
-#endif
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index eecc9ea..c91e137 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -9,15 +9,8 @@
 #ifndef _TOUCHSCREEN_H
 #define _TOUCHSCREEN_H
 
-#include <linux/input.h>
+struct input_dev;
 
-#ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
-#else
-static inline void touchscreen_parse_of_params(struct input_dev *dev,
-					       bool multitouch)
-{
-}
-#endif
+void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
 
 #endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d9a366d..6240063 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -344,7 +344,7 @@
 
 #ifdef CONFIG_INTEL_IOMMU
 	unsigned long 	*domain_ids; /* bitmap of domains */
-	struct dmar_domain **domains; /* ptr to domains */
+	struct dmar_domain ***domains; /* ptr to domains */
 	spinlock_t	lock; /* protect context, domain ids */
 	struct root_entry *root_entry; /* virtual address */
 
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index c27dde7..e399029 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -21,7 +21,7 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/bug.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/page.h>
 
 /*
diff --git a/include/linux/io.h b/include/linux/io.h
index fb5a998..de64c1e 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -20,10 +20,13 @@
 
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/err.h>
 #include <asm/io.h>
 #include <asm/page.h>
 
 struct device;
+struct resource;
 
 __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
 void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -80,6 +83,27 @@
 			const unsigned char *signature, int length);
 void devm_ioremap_release(struct device *dev, void *res);
 
+void *devm_memremap(struct device *dev, resource_size_t offset,
+		size_t size, unsigned long flags);
+void devm_memunmap(struct device *dev, void *addr);
+
+void *__devm_memremap_pages(struct device *dev, struct resource *res);
+
+#ifdef CONFIG_ZONE_DEVICE
+void *devm_memremap_pages(struct device *dev, struct resource *res);
+#else
+static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
+{
+	/*
+	 * Fail attempts to call devm_memremap_pages() without
+	 * ZONE_DEVICE support enabled, this requires callers to fall
+	 * back to plain devm_memremap() based on config
+	 */
+	WARN_ON_ONCE(1);
+	return ERR_PTR(-ENXIO);
+}
+#endif
+
 /*
  * Some systems do not have legacy ISA devices.
  * /dev/port is not a valid interface on these systems.
@@ -121,4 +145,13 @@
 #endif
 #endif
 
+enum {
+	/* See memremap() kernel-doc for usage description... */
+	MEMREMAP_WB = 1 << 0,
+	MEMREMAP_WT = 1 << 1,
+};
+
+void *memremap(resource_size_t offset, size_t size, unsigned long flags);
+void memunmap(void *addr);
+
 #endif /* _LINUX_IO_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index bf982e0..71e4faf 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -104,6 +104,8 @@
 #define GICR_SYNCR			0x00C0
 #define GICR_MOVLPIR			0x0100
 #define GICR_MOVALLR			0x0110
+#define GICR_ISACTIVER			GICD_ISACTIVER
+#define GICR_ICACTIVER			GICD_ICACTIVER
 #define GICR_IDREGS			GICD_IDREGS
 #define GICR_PIDR2			GICD_PIDR2
 
@@ -288,6 +290,7 @@
 #define ICH_VMCR_PMR_MASK		(0xffUL << ICH_VMCR_PMR_SHIFT)
 
 #define ICC_EOIR1_EL1			sys_reg(3, 0, 12, 12, 1)
+#define ICC_DIR_EL1			sys_reg(3, 0, 12, 11, 1)
 #define ICC_IAR1_EL1			sys_reg(3, 0, 12, 12, 0)
 #define ICC_SGI1R_EL1			sys_reg(3, 0, 12, 11, 5)
 #define ICC_PMR_EL1			sys_reg(3, 0, 4, 6, 0)
@@ -385,6 +388,12 @@
 	isb();
 }
 
+static inline void gic_write_dir(u64 irq)
+{
+	asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
+	isb();
+}
+
 struct irq_domain;
 int its_cpu_init(void);
 int its_init(struct device_node *node, struct rdists *rdists,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 65da435..af3d29f 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -20,9 +20,13 @@
 #define GIC_CPU_ALIAS_BINPOINT		0x1c
 #define GIC_CPU_ACTIVEPRIO		0xd0
 #define GIC_CPU_IDENT			0xfc
+#define GIC_CPU_DEACTIVATE		0x1000
 
 #define GICC_ENABLE			0x1
 #define GICC_INT_PRI_THRESHOLD		0xf0
+
+#define GIC_CPU_CTRL_EOImodeNS		(1 << 9)
+
 #define GICC_IAR_INT_ID_MASK		0x3ff
 #define GICC_INT_SPURIOUS		1023
 #define GICC_DIS_BYPASS_MASK		0x1e0
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 9b1ad37..4e68616 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -41,12 +41,20 @@
 
 /* Shared Global Counter */
 #define GIC_SH_COUNTER_31_00_OFS	0x0010
+/* 64-bit counter register for CM3 */
+#define GIC_SH_COUNTER_OFS		GIC_SH_COUNTER_31_00_OFS
 #define GIC_SH_COUNTER_63_32_OFS	0x0014
 #define GIC_SH_REVISIONID_OFS		0x0020
 
 /* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr)		(((intr) / 32) * 4)
-#define GIC_INTR_BIT(intr)		((intr) % 32)
+#define GIC_INTR_OFS(intr) ({				\
+	unsigned bits = mips_cm_is64 ? 64 : 32;		\
+	unsigned reg_idx = (intr) / bits;		\
+	unsigned reg_width = bits / 8;			\
+							\
+	reg_idx * reg_width;				\
+})
+#define GIC_INTR_BIT(intr)		((intr) % (mips_cm_is64 ? 64 : 32))
 
 /* Polarity : Reset Value is always 0 */
 #define GIC_SH_SET_POLARITY_OFS		0x0100
@@ -98,6 +106,8 @@
 #define GIC_VPE_WD_COUNT0_OFS		0x0094
 #define GIC_VPE_WD_INITIAL0_OFS		0x0098
 #define GIC_VPE_COMPARE_LO_OFS		0x00a0
+/* 64-bit Compare register on CM3 */
+#define GIC_VPE_COMPARE_OFS		GIC_VPE_COMPARE_LO_OFS
 #define GIC_VPE_COMPARE_HI_OFS		0x00a4
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE_OFS	0x0100
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d326152..0000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * linux/include/linux/jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>
- *
- * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Definitions for transaction data structures for the buffer cache
- * filesystem journaling support.
- */
-
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
-
-/* Allow this file to be included directly into e2fsprogs */
-#ifndef __KERNEL__
-#include "jfs_compat.h"
-#define JFS_DEBUG
-#define jfs_debug jbd_debug
-#else
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/journal-head.h>
-#include <linux/stddef.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/lockdep.h>
-#include <linux/slab.h>
-
-#define journal_oom_retry 1
-
-/*
- * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
- * certain classes of error which can occur due to failed IOs.  Under
- * normal use we want ext3 to continue after such errors, because
- * hardware _can_ fail, but for debugging purposes when running tests on
- * known-good hardware we may want to trap these errors.
- */
-#undef JBD_PARANOID_IOFAIL
-
-/*
- * The default maximum commit age, in seconds.
- */
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-
-#ifdef CONFIG_JBD_DEBUG
-/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
- * consistency checks.  By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
- */
-#define JBD_EXPENSIVE_CHECKING
-extern u8 journal_enable_debug;
-
-void __jbd_debug(int level, const char *file, const char *func,
-		 unsigned int line, const char *fmt, ...);
-
-#define jbd_debug(n, fmt, a...) \
-	__jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
-#else
-#define jbd_debug(n, fmt, a...)    /**/
-#endif
-
-static inline void *jbd_alloc(size_t size, gfp_t flags)
-{
-	return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd_free(void *ptr, size_t size)
-{
-	free_pages((unsigned long)ptr, get_order(size));
-}
-
-#define JFS_MIN_JOURNAL_BLOCKS 1024
-
-
-/**
- * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
- *
- * All filesystem modifications made by the process go
- * through this handle.  Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process.  To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time.  When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch.
- *
- * This is an opaque datatype.
- **/
-typedef struct handle_s		handle_t;	/* Atomic operation type */
-
-
-/**
- * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
- *
- * journal_t is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process.
- *
- * This is an opaque datatype.
- **/
-typedef struct journal_s	journal_t;	/* Journal control structure */
-#endif
-
-/*
- * Internal structures used by the logging mechanism:
- */
-
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
-
-/*
- * On-disk structures
- */
-
-/*
- * Descriptor block types:
- */
-
-#define JFS_DESCRIPTOR_BLOCK	1
-#define JFS_COMMIT_BLOCK	2
-#define JFS_SUPERBLOCK_V1	3
-#define JFS_SUPERBLOCK_V2	4
-#define JFS_REVOKE_BLOCK	5
-
-/*
- * Standard header for all descriptor blocks:
- */
-typedef struct journal_header_s
-{
-	__be32		h_magic;
-	__be32		h_blocktype;
-	__be32		h_sequence;
-} journal_header_t;
-
-
-/*
- * The block tag: used to describe a single buffer in the journal
- */
-typedef struct journal_block_tag_s
-{
-	__be32		t_blocknr;	/* The on-disk block number */
-	__be32		t_flags;	/* See below */
-} journal_block_tag_t;
-
-/*
- * The revoke descriptor: used on disk to describe a series of blocks to
- * be revoked from the log
- */
-typedef struct journal_revoke_header_s
-{
-	journal_header_t r_header;
-	__be32		 r_count;	/* Count of bytes used in the block */
-} journal_revoke_header_t;
-
-
-/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE		1	/* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID	2	/* block has same uuid as previous */
-#define JFS_FLAG_DELETED	4	/* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG	8	/* last tag in this descriptor block */
-
-
-/*
- * The journal superblock.  All fields are in big-endian byte order.
- */
-typedef struct journal_superblock_s
-{
-/* 0x0000 */
-	journal_header_t s_header;
-
-/* 0x000C */
-	/* Static information describing the journal */
-	__be32	s_blocksize;		/* journal device blocksize */
-	__be32	s_maxlen;		/* total blocks in journal file */
-	__be32	s_first;		/* first block of log information */
-
-/* 0x0018 */
-	/* Dynamic information describing the current state of the log */
-	__be32	s_sequence;		/* first commit ID expected in log */
-	__be32	s_start;		/* blocknr of start of log */
-
-/* 0x0020 */
-	/* Error value, as set by journal_abort(). */
-	__be32	s_errno;
-
-/* 0x0024 */
-	/* Remaining fields are only valid in a version-2 superblock */
-	__be32	s_feature_compat;	/* compatible feature set */
-	__be32	s_feature_incompat;	/* incompatible feature set */
-	__be32	s_feature_ro_compat;	/* readonly-compatible feature set */
-/* 0x0030 */
-	__u8	s_uuid[16];		/* 128-bit uuid for journal */
-
-/* 0x0040 */
-	__be32	s_nr_users;		/* Nr of filesystems sharing log */
-
-	__be32	s_dynsuper;		/* Blocknr of dynamic superblock copy*/
-
-/* 0x0048 */
-	__be32	s_max_transaction;	/* Limit of journal blocks per trans.*/
-	__be32	s_max_trans_data;	/* Limit of data blocks per trans. */
-
-/* 0x0050 */
-	__u32	s_padding[44];
-
-/* 0x0100 */
-	__u8	s_users[16*48];		/* ids of all fs'es sharing the log */
-/* 0x0400 */
-} journal_superblock_t;
-
-#define JFS_HAS_COMPAT_FEATURE(j,mask)					\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask)				\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask)				\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
-#define JFS_FEATURE_INCOMPAT_REVOKE	0x00000001
-
-/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES	0
-#define JFS_KNOWN_ROCOMPAT_FEATURES	0
-#define JFS_KNOWN_INCOMPAT_FEATURES	JFS_FEATURE_INCOMPAT_REVOKE
-
-#ifdef __KERNEL__
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-
-enum jbd_state_bits {
-	BH_JBD			/* Has an attached ext3 journal_head */
-	  = BH_PrivateStart,
-	BH_JWrite,		/* Being written to log (@@@ DEBUGGING) */
-	BH_Freed,		/* Has been freed (truncated) */
-	BH_Revoked,		/* Has been revoked from the log */
-	BH_RevokeValid,		/* Revoked flag is valid */
-	BH_JBDDirty,		/* Is dirty but journaled */
-	BH_State,		/* Pins most journal_head state */
-	BH_JournalHead,		/* Pins bh->b_private and jh->b_bh */
-	BH_Unshadow,		/* Dummy bit, for BJ_Shadow wakeup filtering */
-	BH_JBDPrivateStart,	/* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
-#include <linux/jbd_common.h>
-
-#define J_ASSERT(assert)	BUG_ON(!(assert))
-
-#define J_ASSERT_BH(bh, expr)	J_ASSERT(expr)
-#define J_ASSERT_JH(jh, expr)	J_ASSERT(expr)
-
-#if defined(JBD_PARANOID_IOFAIL)
-#define J_EXPECT(expr, why...)		J_ASSERT(expr)
-#define J_EXPECT_BH(bh, expr, why...)	J_ASSERT_BH(bh, expr)
-#define J_EXPECT_JH(jh, expr, why...)	J_ASSERT_JH(jh, expr)
-#else
-#define __journal_expect(expr, why...)					     \
-	({								     \
-		int val = (expr);					     \
-		if (!val) {						     \
-			printk(KERN_ERR					     \
-				"EXT3-fs unexpected failure: %s;\n",# expr); \
-			printk(KERN_ERR why "\n");			     \
-		}							     \
-		val;							     \
-	})
-#define J_EXPECT(expr, why...)		__journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...)	__journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...)	__journal_expect(expr, ## why)
-#endif
-
-struct jbd_revoke_table_s;
-
-/**
- * struct handle_s - this is the concrete type associated with handle_t.
- * @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
- * @h_lockdep_map: lockdep info for debugging lock problems
- */
-struct handle_s
-{
-	/* Which compound transaction is this update a part of? */
-	transaction_t		*h_transaction;
-
-	/* Number of remaining buffers we are allowed to dirty: */
-	int			h_buffer_credits;
-
-	/* Reference count on this handle */
-	int			h_ref;
-
-	/* Field for caller's use to track errors through large fs */
-	/* operations */
-	int			h_err;
-
-	/* Flags [no locking] */
-	unsigned int	h_sync:		1;	/* sync-on-close */
-	unsigned int	h_jdata:	1;	/* force data journaling */
-	unsigned int	h_aborted:	1;	/* fatal error on handle */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map	h_lockdep_map;
-#endif
-};
-
-
-/* The transaction_t type is the guts of the journaling mechanism.  It
- * tracks a compound transaction through its various states:
- *
- * RUNNING:	accepting new updates
- * LOCKED:	Updates still running but we don't accept new ones
- * RUNDOWN:	Updates are tidying up but have finished requesting
- *		new buffers to modify (state not used for now)
- * FLUSH:       All updates complete, but we are still writing to disk
- * COMMIT:      All data on disk, writing commit record
- * FINISHED:	We still have to keep the transaction for checkpointing.
- *
- * The transaction keeps track of all of the buffers modified by a
- * running transaction, and all of the buffers committed but not yet
- * flushed to home for finished transactions.
- */
-
-/*
- * Lock ranking:
- *
- *    j_list_lock
- *      ->jbd_lock_bh_journal_head()	(This is "innermost")
- *
- *    j_state_lock
- *    ->jbd_lock_bh_state()
- *
- *    jbd_lock_bh_state()
- *    ->j_list_lock
- *
- *    j_state_lock
- *    ->t_handle_lock
- *
- *    j_state_lock
- *    ->j_list_lock			(journal_unmap_buffer)
- *
- */
-
-struct transaction_s
-{
-	/* Pointer to the journal for this transaction. [no locking] */
-	journal_t		*t_journal;
-
-	/* Sequence number for this transaction [no locking] */
-	tid_t			t_tid;
-
-	/*
-	 * Transaction's current state
-	 * [no locking - only kjournald alters this]
-	 * [j_list_lock] guards transition of a transaction into T_FINISHED
-	 * state and subsequent call of __journal_drop_transaction()
-	 * FIXME: needs barriers
-	 * KLUDGE: [use j_state_lock]
-	 */
-	enum {
-		T_RUNNING,
-		T_LOCKED,
-		T_FLUSH,
-		T_COMMIT,
-		T_COMMIT_RECORD,
-		T_FINISHED
-	}			t_state;
-
-	/*
-	 * Where in the log does this transaction's commit start? [no locking]
-	 */
-	unsigned int		t_log_start;
-
-	/* Number of buffers on the t_buffers list [j_list_lock] */
-	int			t_nr_buffers;
-
-	/*
-	 * Doubly-linked circular list of all buffers reserved but not yet
-	 * modified by this transaction [j_list_lock]
-	 */
-	struct journal_head	*t_reserved_list;
-
-	/*
-	 * Doubly-linked circular list of all buffers under writeout during
-	 * commit [j_list_lock]
-	 */
-	struct journal_head	*t_locked_list;
-
-	/*
-	 * Doubly-linked circular list of all metadata buffers owned by this
-	 * transaction [j_list_lock]
-	 */
-	struct journal_head	*t_buffers;
-
-	/*
-	 * Doubly-linked circular list of all data buffers still to be
-	 * flushed before this transaction can be committed [j_list_lock]
-	 */
-	struct journal_head	*t_sync_datalist;
-
-	/*
-	 * Doubly-linked circular list of all forget buffers (superseded
-	 * buffers which we can un-checkpoint once this transaction commits)
-	 * [j_list_lock]
-	 */
-	struct journal_head	*t_forget;
-
-	/*
-	 * Doubly-linked circular list of all buffers still to be flushed before
-	 * this transaction can be checkpointed. [j_list_lock]
-	 */
-	struct journal_head	*t_checkpoint_list;
-
-	/*
-	 * Doubly-linked circular list of all buffers submitted for IO while
-	 * checkpointing. [j_list_lock]
-	 */
-	struct journal_head	*t_checkpoint_io_list;
-
-	/*
-	 * Doubly-linked circular list of temporary buffers currently undergoing
-	 * IO in the log [j_list_lock]
-	 */
-	struct journal_head	*t_iobuf_list;
-
-	/*
-	 * Doubly-linked circular list of metadata buffers being shadowed by log
-	 * IO.  The IO buffers on the iobuf list and the shadow buffers on this
-	 * list match each other one for one at all times. [j_list_lock]
-	 */
-	struct journal_head	*t_shadow_list;
-
-	/*
-	 * Doubly-linked circular list of control buffers being written to the
-	 * log. [j_list_lock]
-	 */
-	struct journal_head	*t_log_list;
-
-	/*
-	 * Protects info related to handles
-	 */
-	spinlock_t		t_handle_lock;
-
-	/*
-	 * Number of outstanding updates running on this transaction
-	 * [t_handle_lock]
-	 */
-	int			t_updates;
-
-	/*
-	 * Number of buffers reserved for use by all handles in this transaction
-	 * handle but not yet modified. [t_handle_lock]
-	 */
-	int			t_outstanding_credits;
-
-	/*
-	 * Forward and backward links for the circular list of all transactions
-	 * awaiting checkpoint. [j_list_lock]
-	 */
-	transaction_t		*t_cpnext, *t_cpprev;
-
-	/*
-	 * When will the transaction expire (become due for commit), in jiffies?
-	 * [no locking]
-	 */
-	unsigned long		t_expires;
-
-	/*
-	 * When this transaction started, in nanoseconds [no locking]
-	 */
-	ktime_t			t_start_time;
-
-	/*
-	 * How many handles used this transaction? [t_handle_lock]
-	 */
-	int t_handle_count;
-};
-
-/**
- * struct journal_s - this is the concrete type associated with journal_t.
- * @j_flags:  General journaling state flags
- * @j_errno:  Is there an outstanding uncleared error on the journal (from a
- *     prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count:  Number of processes waiting to create a barrier lock
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- *  waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- *  to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint:  Wait queue to trigger checkpointing
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- *  journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- *     journal
- * @j_fs_dev: Device which holds the client fs.  For internal journal this will
- *     be equal to j_dev
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal.  If present, all journal
- *     block numbers are mapped into this inode via bmap().
- * @j_tail_sequence:  Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- *  transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- *     commit
- * @j_commit_waited: Sequence number of the most recent transaction someone
- *     is waiting for to commit.
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers:  Maximum number of metadata buffers to allow in a
- *     single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- *  a commit?
- * @j_commit_timer:  The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- *     current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- *	number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_average_commit_time: the average amount of time in nanoseconds it
- *	takes to commit a transaction to the disk.
- * @j_private: An opaque pointer to fs-private information.
- */
-
-struct journal_s
-{
-	/* General journaling state flags [j_state_lock] */
-	unsigned long		j_flags;
-
-	/*
-	 * Is there an outstanding uncleared error on the journal (from a prior
-	 * abort)? [j_state_lock]
-	 */
-	int			j_errno;
-
-	/* The superblock buffer */
-	struct buffer_head	*j_sb_buffer;
-	journal_superblock_t	*j_superblock;
-
-	/* Version of the superblock format */
-	int			j_format_version;
-
-	/*
-	 * Protect the various scalars in the journal
-	 */
-	spinlock_t		j_state_lock;
-
-	/*
-	 * Number of processes waiting to create a barrier lock [j_state_lock]
-	 */
-	int			j_barrier_count;
-
-	/*
-	 * Transactions: The current running transaction...
-	 * [j_state_lock] [caller holding open handle]
-	 */
-	transaction_t		*j_running_transaction;
-
-	/*
-	 * the transaction we are pushing to disk
-	 * [j_state_lock] [caller holding open handle]
-	 */
-	transaction_t		*j_committing_transaction;
-
-	/*
-	 * ... and a linked circular list of all transactions waiting for
-	 * checkpointing. [j_list_lock]
-	 */
-	transaction_t		*j_checkpoint_transactions;
-
-	/*
-	 * Wait queue for waiting for a locked transaction to start committing,
-	 * or for a barrier lock to be released
-	 */
-	wait_queue_head_t	j_wait_transaction_locked;
-
-	/* Wait queue for waiting for checkpointing to complete */
-	wait_queue_head_t	j_wait_logspace;
-
-	/* Wait queue for waiting for commit to complete */
-	wait_queue_head_t	j_wait_done_commit;
-
-	/* Wait queue to trigger checkpointing */
-	wait_queue_head_t	j_wait_checkpoint;
-
-	/* Wait queue to trigger commit */
-	wait_queue_head_t	j_wait_commit;
-
-	/* Wait queue to wait for updates to complete */
-	wait_queue_head_t	j_wait_updates;
-
-	/* Semaphore for locking against concurrent checkpoints */
-	struct mutex		j_checkpoint_mutex;
-
-	/*
-	 * Journal head: identifies the first unused block in the journal.
-	 * [j_state_lock]
-	 */
-	unsigned int		j_head;
-
-	/*
-	 * Journal tail: identifies the oldest still-used block in the journal.
-	 * [j_state_lock]
-	 */
-	unsigned int		j_tail;
-
-	/*
-	 * Journal free: how many free blocks are there in the journal?
-	 * [j_state_lock]
-	 */
-	unsigned int		j_free;
-
-	/*
-	 * Journal start and end: the block numbers of the first usable block
-	 * and one beyond the last usable block in the journal. [j_state_lock]
-	 */
-	unsigned int		j_first;
-	unsigned int		j_last;
-
-	/*
-	 * Device, blocksize and starting block offset for the location where we
-	 * store the journal.
-	 */
-	struct block_device	*j_dev;
-	int			j_blocksize;
-	unsigned int		j_blk_offset;
-
-	/*
-	 * Device which holds the client fs.  For internal journal this will be
-	 * equal to j_dev.
-	 */
-	struct block_device	*j_fs_dev;
-
-	/* Total maximum capacity of the journal region on disk. */
-	unsigned int		j_maxlen;
-
-	/*
-	 * Protects the buffer lists and internal buffer state.
-	 */
-	spinlock_t		j_list_lock;
-
-	/* Optional inode where we store the journal.  If present, all */
-	/* journal block numbers are mapped into this inode via */
-	/* bmap(). */
-	struct inode		*j_inode;
-
-	/*
-	 * Sequence number of the oldest transaction in the log [j_state_lock]
-	 */
-	tid_t			j_tail_sequence;
-
-	/*
-	 * Sequence number of the next transaction to grant [j_state_lock]
-	 */
-	tid_t			j_transaction_sequence;
-
-	/*
-	 * Sequence number of the most recently committed transaction
-	 * [j_state_lock].
-	 */
-	tid_t			j_commit_sequence;
-
-	/*
-	 * Sequence number of the most recent transaction wanting commit
-	 * [j_state_lock]
-	 */
-	tid_t			j_commit_request;
-
-	/*
-	 * Sequence number of the most recent transaction someone is waiting
-	 * for to commit.
-	 * [j_state_lock]
-	 */
-	tid_t                   j_commit_waited;
-
-	/*
-	 * Journal uuid: identifies the object (filesystem, LVM volume etc)
-	 * backed by this journal.  This will eventually be replaced by an array
-	 * of uuids, allowing us to index multiple devices within a single
-	 * journal and to perform atomic updates across them.
-	 */
-	__u8			j_uuid[16];
-
-	/* Pointer to the current commit thread for this journal */
-	struct task_struct	*j_task;
-
-	/*
-	 * Maximum number of metadata buffers to allow in a single compound
-	 * commit transaction
-	 */
-	int			j_max_transaction_buffers;
-
-	/*
-	 * What is the maximum transaction lifetime before we begin a commit?
-	 */
-	unsigned long		j_commit_interval;
-
-	/* The timer used to wakeup the commit thread: */
-	struct timer_list	j_commit_timer;
-
-	/*
-	 * The revoke table: maintains the list of revoked blocks in the
-	 * current transaction.  [j_revoke_lock]
-	 */
-	spinlock_t		j_revoke_lock;
-	struct jbd_revoke_table_s *j_revoke;
-	struct jbd_revoke_table_s *j_revoke_table[2];
-
-	/*
-	 * array of bhs for journal_commit_transaction
-	 */
-	struct buffer_head	**j_wbuf;
-	int			j_wbufsize;
-
-	/*
-	 * this is the pid of the last person to run a synchronous operation
-	 * through the journal.
-	 */
-	pid_t			j_last_sync_writer;
-
-	/*
-	 * the average amount of time in nanoseconds it takes to commit a
-	 * transaction to the disk.  [j_state_lock]
-	 */
-	u64			j_average_commit_time;
-
-	/*
-	 * An opaque pointer to fs-private information.  ext3 puts its
-	 * superblock pointer here
-	 */
-	void *j_private;
-};
-
-/*
- * Journal flag definitions
- */
-#define JFS_UNMOUNT	0x001	/* Journal thread is being destroyed */
-#define JFS_ABORT	0x002	/* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR	0x004	/* The errno in the sb has been acked */
-#define JFS_FLUSHED	0x008	/* The journal superblock has been flushed */
-#define JFS_LOADED	0x010	/* The journal superblock has been loaded */
-#define JFS_BARRIER	0x020	/* Use IDE barriers */
-#define JFS_ABORT_ON_SYNCDATA_ERR	0x040  /* Abort the journal on file
-						* data write error in ordered
-						* mode */
-
-/*
- * Function declarations for the journaling transaction and buffer
- * management
- */
-
-/* Filing buffers */
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
-
-/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned int *);
-
-/* Commit management */
-extern void journal_commit_transaction(journal_t *);
-
-/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
-
-/* Buffer IO */
-extern int
-journal_write_metadata_buffer(transaction_t	  *transaction,
-			      struct journal_head  *jh_in,
-			      struct journal_head **jh_out,
-			      unsigned int blocknr);
-
-/* Transaction locking */
-extern void		__wait_on_journal (journal_t *);
-
-/*
- * Journal locking.
- *
- * We need to lock the journal during transaction state changes so that nobody
- * ever tries to take a handle on the running transaction while we are in the
- * middle of moving it to the commit phase.  j_state_lock does this.
- *
- * Note that the locking is completely interrupt unsafe.  We never touch
- * journal structures from interrupts.
- */
-
-static inline handle_t *journal_current_handle(void)
-{
-	return current->journal_info;
-}
-
-/* The journaling code user interface:
- *
- * Create and destroy handles
- * Register buffer modifications against the current transaction.
- */
-
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int	 journal_restart (handle_t *, int nblocks);
-extern int	 journal_extend (handle_t *, int nblocks);
-extern int	 journal_get_write_access(handle_t *, struct buffer_head *);
-extern int	 journal_get_create_access (handle_t *, struct buffer_head *);
-extern int	 journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int	 journal_dirty_data (handle_t *, struct buffer_head *);
-extern int	 journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void	 journal_release_buffer (handle_t *, struct buffer_head *);
-extern int	 journal_forget (handle_t *, struct buffer_head *);
-extern void	 journal_sync_buffer (struct buffer_head *);
-extern void	 journal_invalidatepage(journal_t *,
-				struct page *, unsigned int, unsigned int);
-extern int	 journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int	 journal_stop(handle_t *);
-extern int	 journal_flush (journal_t *);
-extern void	 journal_lock_updates (journal_t *);
-extern void	 journal_unlock_updates (journal_t *);
-
-extern journal_t * journal_init_dev(struct block_device *bdev,
-				struct block_device *fs_dev,
-				int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int	   journal_update_format (journal_t *);
-extern int	   journal_check_used_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_check_available_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_set_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_create     (journal_t *);
-extern int	   journal_load       (journal_t *journal);
-extern int	   journal_destroy    (journal_t *);
-extern int	   journal_recover    (journal_t *journal);
-extern int	   journal_wipe       (journal_t *, int);
-extern int	   journal_skip_recovery	(journal_t *);
-extern void	   journal_update_sb_log_tail	(journal_t *, tid_t, unsigned int,
-						 int);
-extern void	   journal_abort      (journal_t *, int);
-extern int	   journal_errno      (journal_t *);
-extern void	   journal_ack_err    (journal_t *);
-extern int	   journal_clear_err  (journal_t *);
-extern int	   journal_bmap(journal_t *, unsigned int, unsigned int *);
-extern int	   journal_force_commit(journal_t *);
-
-/*
- * journal_head management
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
-
-/*
- * handle management
- */
-extern struct kmem_cache *jbd_handle_cache;
-
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
-{
-	return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
-}
-
-static inline void jbd_free_handle(handle_t *handle)
-{
-	kmem_cache_free(jbd_handle_cache, handle);
-}
-
-/* Primary revoke support */
-#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int	   journal_init_revoke(journal_t *, int);
-extern void	   journal_destroy_revoke_caches(void);
-extern int	   journal_init_revoke_caches(void);
-
-extern void	   journal_destroy_revoke(journal_t *);
-extern int	   journal_revoke (handle_t *,
-				unsigned int, struct buffer_head *);
-extern int	   journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void	   journal_write_revoke_records(journal_t *,
-						transaction_t *, int);
-
-/* Recovery revoke support */
-extern int	journal_set_revoke(journal_t *, unsigned int, tid_t);
-extern int	journal_test_revoke(journal_t *, unsigned int, tid_t);
-extern void	journal_clear_revoke(journal_t *);
-extern void	journal_switch_revoke_table(journal_t *journal);
-extern void	journal_clear_buffer_revoked_flags(journal_t *journal);
-
-/*
- * The log thread user interface:
- *
- * Request space in the current transaction, and force transaction commit
- * transitions on demand.
- */
-
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
-
-void __log_wait_for_space(journal_t *journal);
-extern void	__journal_drop_transaction(journal_t *, transaction_t *);
-extern int	cleanup_journal_tail(journal_t *);
-
-/*
- * is_journal_abort
- *
- * Simple test wrapper function to test the JFS_ABORT state flag.  This
- * bit, when set, indicates that we have had a fatal error somewhere,
- * either inside the journaling layer or indicated to us by the client
- * (eg. ext3), and that we and should not commit any further
- * transactions.
- */
-
-static inline int is_journal_aborted(journal_t *journal)
-{
-	return journal->j_flags & JFS_ABORT;
-}
-
-static inline int is_handle_aborted(handle_t *handle)
-{
-	if (handle->h_aborted)
-		return 1;
-	return is_journal_aborted(handle->h_transaction->t_journal);
-}
-
-static inline void journal_abort_handle(handle_t *handle)
-{
-	handle->h_aborted = 1;
-}
-
-#endif /* __KERNEL__   */
-
-/* Comparison functions for transaction IDs: perform comparisons using
- * modulo arithmetic so that they work over sequence number wraps. */
-
-static inline int tid_gt(tid_t x, tid_t y)
-{
-	int difference = (x - y);
-	return (difference > 0);
-}
-
-static inline int tid_geq(tid_t x, tid_t y)
-{
-	int difference = (x - y);
-	return (difference >= 0);
-}
-
-extern int journal_blocks_per_page(struct inode *inode);
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started.  Must be called under j_state_lock.
- */
-static inline int jbd_space_needed(journal_t *journal)
-{
-	int nblocks = journal->j_max_transaction_buffers;
-	if (journal->j_committing_transaction)
-		nblocks += journal->j_committing_transaction->
-					t_outstanding_credits;
-	return nblocks;
-}
-
-/*
- * Definitions which augment the buffer_head layer
- */
-
-/* journaling buffer types */
-#define BJ_None		0	/* Not journaled */
-#define BJ_SyncData	1	/* Normal data: flush before commit */
-#define BJ_Metadata	2	/* Normal journaled metadata */
-#define BJ_Forget	3	/* Buffer superseded by this transaction */
-#define BJ_IO		4	/* Buffer is for temporary IO use */
-#define BJ_Shadow	5	/* Buffer contents being shadowed to the log */
-#define BJ_LogCtl	6	/* Buffer contains log descriptors */
-#define BJ_Reserved	7	/* Buffer is reserved for access by journal */
-#define BJ_Locked	8	/* Locked for I/O during commit */
-#define BJ_Types	9
-
-extern int jbd_blocks_per_page(struct inode *inode);
-
-#ifdef __KERNEL__
-
-#define buffer_trace_init(bh)	do {} while (0)
-#define print_buffer_fields(bh)	do {} while (0)
-#define print_buffer_trace(bh)	do {} while (0)
-#define BUFFER_TRACE(bh, info)	do {} while (0)
-#define BUFFER_TRACE2(bh, bh2, info)	do {} while (0)
-#define JBUFFER_TRACE(jh, info)	do {} while (0)
-
-#endif	/* __KERNEL__ */
-
-#endif	/* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index edb640a..df07e78 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
+#include <linux/bit_spinlock.h>
 #include <crypto/hash.h>
 #endif
 
@@ -336,7 +337,45 @@
 BUFFER_FNS(Shadow, shadow)
 BUFFER_FNS(Verified, verified)
 
-#include <linux/jbd_common.h>
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+	return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+	return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+	bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+	return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+	return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+	bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+	bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+	bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
 
 #define J_ASSERT(assert)	BUG_ON(!(assert))
 
@@ -1042,8 +1081,9 @@
 extern void jbd2_journal_commit_transaction(journal_t *);
 
 /* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
 int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
 void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc5343..0000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _LINUX_JBD_STATE_H
-#define _LINUX_JBD_STATE_H
-
-#include <linux/bit_spinlock.h>
-
-static inline struct buffer_head *jh2bh(struct journal_head *jh)
-{
-	return jh->b_bh;
-}
-
-static inline struct journal_head *bh2jh(struct buffer_head *bh)
-{
-	return bh->b_private;
-}
-
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
-	bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
-	return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
-	return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
-	bit_spin_unlock(BH_State, &bh->b_state);
-}
-
-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-{
-	bit_spin_lock(BH_JournalHead, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-{
-	bit_spin_unlock(BH_JournalHead, &bh->b_state);
-}
-
-#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473..7f653e8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,52 @@
  * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
  * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
  *
+ * DEPRECATED API:
+ *
+ * The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ *
+ * struct static_key false = STATIC_KEY_INIT_FALSE;
+ * struct static_key true = STATIC_KEY_INIT_TRUE;
+ * static_key_true()
+ * static_key_false()
+ *
+ * The updated API replacements are:
+ *
+ * DEFINE_STATIC_KEY_TRUE(key);
+ * DEFINE_STATIC_KEY_FALSE(key);
+ * static_key_likely()
+ * statick_key_unlikely()
+ *
  * Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support, the result
- * of a "if (static_key_false(&key))" statement is an unconditional branch (which
- * defaults to false - and the true block is placed out of line).
+ * self-modifying code. Assuming toolchain and architecture support, if we
+ * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
+ * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
+ * (which defaults to false - and the true block is placed out of line).
+ * Similarly, we can define an initially true key via
+ * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
+ * "if (static_branch_unlikely(&key))", in which case we will generate an
+ * unconditional branch to the out-of-line true branch. Keys that are
+ * initially true or false can be using in both static_branch_unlikely()
+ * and static_branch_likely() statements.
  *
- * However at runtime we can change the branch target using
- * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object, and for as long as there are references all branches referring to
- * that particular key will point to the (out of line) true block.
+ * At runtime we can change the branch target by setting the key
+ * to true via a call to static_branch_enable(), or false using
+ * static_branch_disable(). If the direction of the branch is switched by
+ * these calls then we run-time modify the branch target via a
+ * no-op -> jump or jump -> no-op conversion. For example, for an
+ * initially false key that is used in an "if (static_branch_unlikely(&key))"
+ * statement, setting the key to true requires us to patch in a jump
+ * to the out-of-line of true branch.
  *
- * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * In addtion to static_branch_{enable,disable}, we can also reference count
+ * the key or branch direction via static_branch_{inc,dec}. Thus,
+ * static_branch_inc() can be thought of as a 'make more true' and
+ * static_branch_dec() as a 'make more false'. The inc()/dec()
+ * interface is meant to be used exclusively from the inc()/dec() for a given
+ * key.
+ *
+ * Since this relies on modifying code, the branch modifying functions
  * must be considered absolute slow paths (machine wide synchronization etc.).
  * OTOH, since the affected branches are unconditional, their runtime overhead
  * will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +64,10 @@
  * cause significant performance degradation. Struct static_key_deferred and
  * static_key_slow_dec_deferred() provide for this.
  *
- * Lacking toolchain and or architecture support, jump labels fall back to a simple
- * conditional branch.
+ * Lacking toolchain and or architecture support, static keys fall back to a
+ * simple conditional branch.
  *
- * struct static_key my_key = STATIC_KEY_INIT_TRUE;
- *
- *   if (static_key_true(&my_key)) {
- *   }
- *
- * will result in the true case being in-line and starts the key with a single
- * reference. Mixing static_key_true() and static_key_false() on the same key is not
- * allowed.
- *
- * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE.
+ * Additional babbling in: Documentation/static-keys.txt
  */
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +111,8 @@
 #ifndef __ASSEMBLY__
 
 enum jump_label_type {
-	JUMP_LABEL_DISABLE = 0,
-	JUMP_LABEL_ENABLE,
+	JUMP_LABEL_NOP = 0,
+	JUMP_LABEL_JMP,
 };
 
 struct module;
@@ -101,33 +126,18 @@
 
 #ifdef HAVE_JUMP_LABEL
 
-#define JUMP_LABEL_TYPE_FALSE_BRANCH	0UL
-#define JUMP_LABEL_TYPE_TRUE_BRANCH	1UL
-#define JUMP_LABEL_TYPE_MASK		1UL
-
-static
-inline struct jump_entry *jump_label_get_entries(struct static_key *key)
-{
-	return (struct jump_entry *)((unsigned long)key->entries
-						& ~JUMP_LABEL_TYPE_MASK);
-}
-
-static inline bool jump_label_get_branch_default(struct static_key *key)
-{
-	if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
-	    JUMP_LABEL_TYPE_TRUE_BRANCH)
-		return true;
-	return false;
-}
+#define JUMP_TYPE_FALSE	0UL
+#define JUMP_TYPE_TRUE	1UL
+#define JUMP_TYPE_MASK	1UL
 
 static __always_inline bool static_key_false(struct static_key *key)
 {
-	return arch_static_branch(key);
+	return arch_static_branch(key, false);
 }
 
 static __always_inline bool static_key_true(struct static_key *key)
 {
-	return !static_key_false(key);
+	return !arch_static_branch(key, true);
 }
 
 extern struct jump_entry __start___jump_table[];
@@ -145,12 +155,12 @@
 extern void static_key_slow_dec(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 
-#define STATIC_KEY_INIT_TRUE ((struct static_key)		\
+#define STATIC_KEY_INIT_TRUE					\
 	{ .enabled = ATOMIC_INIT(1),				\
-	  .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
-#define STATIC_KEY_INIT_FALSE ((struct static_key)		\
+	  .entries = (void *)JUMP_TYPE_TRUE }
+#define STATIC_KEY_INIT_FALSE					\
 	{ .enabled = ATOMIC_INIT(0),				\
-	  .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+	  .entries = (void *)JUMP_TYPE_FALSE }
 
 #else  /* !HAVE_JUMP_LABEL */
 
@@ -198,10 +208,8 @@
 	return 0;
 }
 
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
-		{ .enabled = ATOMIC_INIT(1) })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
-		{ .enabled = ATOMIC_INIT(0) })
+#define STATIC_KEY_INIT_TRUE	{ .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE	{ .enabled = ATOMIC_INIT(0) }
 
 #endif	/* HAVE_JUMP_LABEL */
 
@@ -213,6 +221,157 @@
 	return static_key_count(key) > 0;
 }
 
+static inline void static_key_enable(struct static_key *key)
+{
+	int count = static_key_count(key);
+
+	WARN_ON_ONCE(count < 0 || count > 1);
+
+	if (!count)
+		static_key_slow_inc(key);
+}
+
+static inline void static_key_disable(struct static_key *key)
+{
+	int count = static_key_count(key);
+
+	WARN_ON_ONCE(count < 0 || count > 1);
+
+	if (count)
+		static_key_slow_dec(key);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Two type wrappers around static_key, such that we can use compile time
+ * type differentiation to emit the right code.
+ *
+ * All the below code is macros in order to play type games.
+ */
+
+struct static_key_true {
+	struct static_key key;
+};
+
+struct static_key_false {
+	struct static_key key;
+};
+
+#define STATIC_KEY_TRUE_INIT  (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE,  }
+#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
+
+#define DEFINE_STATIC_KEY_TRUE(name)	\
+	struct static_key_true name = STATIC_KEY_TRUE_INIT
+
+#define DEFINE_STATIC_KEY_FALSE(name)	\
+	struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+#ifdef HAVE_JUMP_LABEL
+
+/*
+ * Combine the right initial value (type) with the right branch order
+ * to generate the desired result.
+ *
+ *
+ * type\branch|	likely (1)	      |	unlikely (0)
+ * -----------+-----------------------+------------------
+ *            |                       |
+ *  true (1)  |	   ...		      |	   ...
+ *            |    NOP		      |	   JMP L
+ *            |    <br-stmts>	      |	1: ...
+ *            |	L: ...		      |
+ *            |			      |
+ *            |			      |	L: <br-stmts>
+ *            |			      |	   jmp 1b
+ *            |                       |
+ * -----------+-----------------------+------------------
+ *            |                       |
+ *  false (0) |	   ...		      |	   ...
+ *            |    JMP L	      |	   NOP
+ *            |    <br-stmts>	      |	1: ...
+ *            |	L: ...		      |
+ *            |			      |
+ *            |			      |	L: <br-stmts>
+ *            |			      |	   jmp 1b
+ *            |                       |
+ * -----------+-----------------------+------------------
+ *
+ * The initial value is encoded in the LSB of static_key::entries,
+ * type: 0 = false, 1 = true.
+ *
+ * The branch type is encoded in the LSB of jump_entry::key,
+ * branch: 0 = unlikely, 1 = likely.
+ *
+ * This gives the following logic table:
+ *
+ *	enabled	type	branch	  instuction
+ * -----------------------------+-----------
+ *	0	0	0	| NOP
+ *	0	0	1	| JMP
+ *	0	1	0	| NOP
+ *	0	1	1	| JMP
+ *
+ *	1	0	0	| JMP
+ *	1	0	1	| NOP
+ *	1	1	0	| JMP
+ *	1	1	1	| NOP
+ *
+ * Which gives the following functions:
+ *
+ *   dynamic: instruction = enabled ^ branch
+ *   static:  instruction = type ^ branch
+ *
+ * See jump_label_type() / jump_label_init_type().
+ */
+
+extern bool ____wrong_branch_error(void);
+
+#define static_branch_likely(x)							\
+({										\
+	bool branch;								\
+	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
+		branch = !arch_static_branch(&(x)->key, true);			\
+	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+		branch = !arch_static_branch_jump(&(x)->key, true);		\
+	else									\
+		branch = ____wrong_branch_error();				\
+	branch;									\
+})
+
+#define static_branch_unlikely(x)						\
+({										\
+	bool branch;								\
+	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
+		branch = arch_static_branch_jump(&(x)->key, false);		\
+	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+		branch = arch_static_branch(&(x)->key, false);			\
+	else									\
+		branch = ____wrong_branch_error();				\
+	branch;									\
+})
+
+#else /* !HAVE_JUMP_LABEL */
+
+#define static_branch_likely(x)		likely(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x)	unlikely(static_key_enabled(&(x)->key))
+
+#endif /* HAVE_JUMP_LABEL */
+
+/*
+ * Advanced usage; refcount, branch is enabled when: count != 0
+ */
+
+#define static_branch_inc(x)		static_key_slow_inc(&(x)->key)
+#define static_branch_dec(x)		static_key_slow_dec(&(x)->key)
+
+/*
+ * Normal usage; boolean enable/disable.
+ */
+
+#define static_branch_enable(x)		static_key_enable(&(x)->key)
+#define static_branch_disable(x)	static_key_disable(&(x)->key)
+
 #endif	/* _LINUX_JUMP_LABEL_H */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 869b21d..e691b6a 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -11,7 +11,7 @@
 					   const char namefmt[], ...);
 
 #define kthread_create(threadfn, data, namefmt, arg...) \
-	kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
+	kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
 
 
 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 75e3af0..3f021dc 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -31,6 +31,9 @@
 	ND_CMD_ARS_STATUS_MAX = SZ_4K,
 	ND_MAX_MAPPINGS = 32,
 
+	/* region flag indicating to direct-map persistent memory by default */
+	ND_REGION_PAGEMAP = 0,
+
 	/* mark newly adjusted resources as requiring a label update */
 	DPA_RESOURCE_ADJUSTED = 1 << 0,
 };
@@ -91,6 +94,7 @@
 	void *provider_data;
 	int num_lanes;
 	int numa_node;
+	unsigned long flags;
 };
 
 struct nvdimm_bus;
diff --git a/include/linux/list.h b/include/linux/list.h
index feb773c..3e3e64a 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -672,6 +672,11 @@
 	n->pprev = &n->next;
 }
 
+static inline bool hlist_fake(struct hlist_node *h)
+{
+	return h->pprev == &h->next;
+}
+
 /*
  * Move a list from one list head to another. Fixup the pprev
  * reference of the first entry if it exists.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0..fd4ca0b 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/atomic.h>
 #include <linux/kernel.h>
-#include <asm/cmpxchg.h>
 
 struct llist_head {
 	struct llist_node *first;
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 1cc89e9..ffb9c9d 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@
 	} fam;
 };
 
+struct lsm_ioctlop_audit {
+	struct path path;
+	u16 cmd;
+};
+
 /* Auxiliary data to use in generating the audit record. */
 struct common_audit_data {
 	char type;
@@ -53,6 +58,7 @@
 #define LSM_AUDIT_DATA_KMOD	8
 #define LSM_AUDIT_DATA_INODE	9
 #define LSM_AUDIT_DATA_DENTRY	10
+#define LSM_AUDIT_DATA_IOCTL_OP	11
 	union 	{
 		struct path path;
 		struct dentry *dentry;
@@ -68,6 +74,7 @@
 		} key_struct;
 #endif
 		char *kmod_name;
+		struct lsm_ioctlop_audit *op;
 	} u;
 	/* this union contains LSM specific data */
 	union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 9429f05..ec3a6ba 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1881,8 +1881,10 @@
 
 extern int __init security_module_enable(const char *module);
 extern void __init capability_add_hooks(void);
-#ifdef CONFIG_SECURITY_YAMA_STACKED
-void __init yama_add_hooks(void);
+#ifdef CONFIG_SECURITY_YAMA
+extern void __init yama_add_hooks(void);
+#else
+static inline void __init yama_add_hooks(void) { }
 #endif
 
 #endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 68c4245..74deadb 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -9,7 +9,7 @@
 
 #include <linux/of.h>
 #include <linux/types.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
 #include <linux/device.h>
 #include <linux/completion.h>
 
@@ -67,7 +67,8 @@
  * @txpoll_period:	If 'txdone_poll' is in effect, the API polls for
  *			last TX's status after these many millisecs
  * @of_xlate:		Controller driver specific mapping of channel via DT
- * @poll:		API private. Used to poll for TXDONE on all channels.
+ * @poll_hrt:		API private. hrtimer used to poll for TXDONE on all
+ *			channels.
  * @node:		API private. To hook into list of controllers.
  */
 struct mbox_controller {
@@ -81,7 +82,7 @@
 	struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
 				      const struct of_phandle_args *sp);
 	/* Internal to API */
-	struct timer_list poll;
+	struct hrtimer poll_hrt;
 	struct list_head node;
 };
 
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index cc4b019..c518eb5 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -77,6 +77,8 @@
 int memblock_free(phys_addr_t base, phys_addr_t size);
 int memblock_reserve(phys_addr_t base, phys_addr_t size);
 void memblock_trim_memory(phys_addr_t align);
+bool memblock_overlaps_region(struct memblock_type *type,
+			      phys_addr_t base, phys_addr_t size);
 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
@@ -323,7 +325,7 @@
 int memblock_is_memory(phys_addr_t addr);
 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
 int memblock_is_reserved(phys_addr_t addr);
-int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
 
 extern void __memblock_dump_all(void);
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 73b02b0..d92b80b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -23,6 +23,11 @@
 #include <linux/vm_event_item.h>
 #include <linux/hardirq.h>
 #include <linux/jump_label.h>
+#include <linux/page_counter.h>
+#include <linux/vmpressure.h>
+#include <linux/eventfd.h>
+#include <linux/mmzone.h>
+#include <linux/writeback.h>
 
 struct mem_cgroup;
 struct page;
@@ -67,12 +72,221 @@
 	MEMCG_NR_EVENTS,
 };
 
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. With THP,
+ * it will be incremated by the number of pages. This counter is used for
+ * for trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ */
+enum mem_cgroup_events_target {
+	MEM_CGROUP_TARGET_THRESH,
+	MEM_CGROUP_TARGET_SOFTLIMIT,
+	MEM_CGROUP_TARGET_NUMAINFO,
+	MEM_CGROUP_NTARGETS,
+};
+
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+	/* Currently active and new sockets should be assigned to cgroups */
+	MEMCG_SOCK_ACTIVE,
+	/* It was ever activated; we must disarm static keys on destruction */
+	MEMCG_SOCK_ACTIVATED,
+};
+
+struct cg_proto {
+	struct page_counter	memory_allocated;	/* Current allocated memory. */
+	struct percpu_counter	sockets_allocated;	/* Current number of sockets. */
+	int			memory_pressure;
+	long			sysctl_mem[3];
+	unsigned long		flags;
+	/*
+	 * memcg field is used to find which memcg we belong directly
+	 * Each memcg struct can hold more than one cg_proto, so container_of
+	 * won't really cut.
+	 *
+	 * The elegant solution would be having an inverse function to
+	 * proto_cgroup in struct proto, but that means polluting the structure
+	 * for everybody, instead of just for memcg users.
+	 */
+	struct mem_cgroup	*memcg;
+};
+
 #ifdef CONFIG_MEMCG
+struct mem_cgroup_stat_cpu {
+	long count[MEM_CGROUP_STAT_NSTATS];
+	unsigned long events[MEMCG_NR_EVENTS];
+	unsigned long nr_page_events;
+	unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
+struct mem_cgroup_reclaim_iter {
+	struct mem_cgroup *position;
+	/* scan generation, increased every round-trip */
+	unsigned int generation;
+};
+
+/*
+ * per-zone information in memory controller.
+ */
+struct mem_cgroup_per_zone {
+	struct lruvec		lruvec;
+	unsigned long		lru_size[NR_LRU_LISTS];
+
+	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
+
+	struct rb_node		tree_node;	/* RB tree node */
+	unsigned long		usage_in_excess;/* Set to the value by which */
+						/* the soft limit is exceeded*/
+	bool			on_tree;
+	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
+						/* use container_of	   */
+};
+
+struct mem_cgroup_per_node {
+	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_threshold {
+	struct eventfd_ctx *eventfd;
+	unsigned long threshold;
+};
+
+/* For threshold */
+struct mem_cgroup_threshold_ary {
+	/* An array index points to threshold just below or equal to usage. */
+	int current_threshold;
+	/* Size of entries[] */
+	unsigned int size;
+	/* Array of thresholds */
+	struct mem_cgroup_threshold entries[0];
+};
+
+struct mem_cgroup_thresholds {
+	/* Primary thresholds array */
+	struct mem_cgroup_threshold_ary *primary;
+	/*
+	 * Spare threshold array.
+	 * This is needed to make mem_cgroup_unregister_event() "never fail".
+	 * It must be able to store at least primary->size - 1 entries.
+	 */
+	struct mem_cgroup_threshold_ary *spare;
+};
+
+/*
+ * The memory controller data structure. The memory controller controls both
+ * page cache and RSS per cgroup. We would eventually like to provide
+ * statistics based on the statistics developed by Rik Van Riel for clock-pro,
+ * to help the administrator determine what knobs to tune.
+ */
+struct mem_cgroup {
+	struct cgroup_subsys_state css;
+
+	/* Accounted resources */
+	struct page_counter memory;
+	struct page_counter memsw;
+	struct page_counter kmem;
+
+	/* Normal memory consumption range */
+	unsigned long low;
+	unsigned long high;
+
+	unsigned long soft_limit;
+
+	/* vmpressure notifications */
+	struct vmpressure vmpressure;
+
+	/* css_online() has been completed */
+	int initialized;
+
+	/*
+	 * Should the accounting and control be hierarchical, per subtree?
+	 */
+	bool use_hierarchy;
+
+	/* protected by memcg_oom_lock */
+	bool		oom_lock;
+	int		under_oom;
+
+	int	swappiness;
+	/* OOM-Killer disable */
+	int		oom_kill_disable;
+
+	/* protect arrays of thresholds */
+	struct mutex thresholds_lock;
+
+	/* thresholds for memory usage. RCU-protected */
+	struct mem_cgroup_thresholds thresholds;
+
+	/* thresholds for mem+swap usage. RCU-protected */
+	struct mem_cgroup_thresholds memsw_thresholds;
+
+	/* For oom notifier event fd */
+	struct list_head oom_notify;
+
+	/*
+	 * Should we move charges of a task when a task is moved into this
+	 * mem_cgroup ? And what type of charges should we move ?
+	 */
+	unsigned long move_charge_at_immigrate;
+	/*
+	 * set > 0 if pages under this cgroup are moving to other cgroup.
+	 */
+	atomic_t		moving_account;
+	/* taken only while moving_account > 0 */
+	spinlock_t		move_lock;
+	struct task_struct	*move_lock_task;
+	unsigned long		move_lock_flags;
+	/*
+	 * percpu counter.
+	 */
+	struct mem_cgroup_stat_cpu __percpu *stat;
+	spinlock_t pcp_counter_lock;
+
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
+	struct cg_proto tcp_mem;
+#endif
+#if defined(CONFIG_MEMCG_KMEM)
+        /* Index in the kmem_cache->memcg_params.memcg_caches array */
+	int kmemcg_id;
+	bool kmem_acct_activated;
+	bool kmem_acct_active;
+#endif
+
+	int last_scanned_node;
+#if MAX_NUMNODES > 1
+	nodemask_t	scan_nodes;
+	atomic_t	numainfo_events;
+	atomic_t	numainfo_updating;
+#endif
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+	struct list_head cgwb_list;
+	struct wb_domain cgwb_domain;
+#endif
+
+	/* List of events which userspace want to receive */
+	struct list_head event_list;
+	spinlock_t event_list_lock;
+
+	struct mem_cgroup_per_node *nodeinfo[0];
+	/* WARNING: nodeinfo must be the last member here */
+};
 extern struct cgroup_subsys_state *mem_cgroup_root_css;
 
-void mem_cgroup_events(struct mem_cgroup *memcg,
+/**
+ * mem_cgroup_events - count memory events against a cgroup
+ * @memcg: the memory cgroup
+ * @idx: the event index
+ * @nr: the number of events to account for
+ */
+static inline void mem_cgroup_events(struct mem_cgroup *memcg,
 		       enum mem_cgroup_events_index idx,
-		       unsigned int nr);
+		       unsigned int nr)
+{
+	this_cpu_add(memcg->stat->events[idx], nr);
+}
 
 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
 
@@ -90,15 +304,31 @@
 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
 
-bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
-			      struct mem_cgroup *root);
 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
 
-extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
-extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 
-extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
+	return css ? container_of(css, struct mem_cgroup, css) : NULL;
+}
+
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+				   struct mem_cgroup *,
+				   struct mem_cgroup_reclaim_cookie *);
+void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+
+static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
+			      struct mem_cgroup *root)
+{
+	if (root == memcg)
+		return true;
+	if (!root->use_hierarchy)
+		return false;
+	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
+}
 
 static inline bool mm_match_cgroup(struct mm_struct *mm,
 				   struct mem_cgroup *memcg)
@@ -114,24 +344,67 @@
 	return match;
 }
 
-extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
-extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
 
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
-				   struct mem_cgroup *,
-				   struct mem_cgroup_reclaim_cookie *);
-void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+static inline bool mem_cgroup_disabled(void)
+{
+	if (memory_cgrp_subsys.disabled)
+		return true;
+	return false;
+}
 
 /*
  * For memory reclaim.
  */
-int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
-bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
-void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
-extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
-					struct task_struct *p);
+
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+		int nr_pages);
+
+static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+	struct mem_cgroup_per_zone *mz;
+	struct mem_cgroup *memcg;
+
+	if (mem_cgroup_disabled())
+		return true;
+
+	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+	memcg = mz->memcg;
+
+	return !!(memcg->css.flags & CSS_ONLINE);
+}
+
+static inline
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+	struct mem_cgroup_per_zone *mz;
+
+	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+	return mz->lru_size[lru];
+}
+
+static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
+{
+	unsigned long inactive_ratio;
+	unsigned long inactive;
+	unsigned long active;
+	unsigned long gb;
+
+	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
+
+	gb = (inactive + active) >> (30 - PAGE_SHIFT);
+	if (gb)
+		inactive_ratio = int_sqrt(10 * gb);
+	else
+		inactive_ratio = 1;
+
+	return inactive * inactive_ratio < active;
+}
+
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+				struct task_struct *p);
 
 static inline void mem_cgroup_oom_enable(void)
 {
@@ -156,18 +429,26 @@
 extern int do_swap_account;
 #endif
 
-static inline bool mem_cgroup_disabled(void)
-{
-	if (memory_cgrp_subsys.disabled)
-		return true;
-	return false;
-}
-
 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
-void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
-				 enum mem_cgroup_stat_index idx, int val);
 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
 
+/**
+ * mem_cgroup_update_page_stat - update page state statistics
+ * @memcg: memcg to account against
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ *
+ * See mem_cgroup_begin_page_stat() for locking requirements.
+ */
+static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+				 enum mem_cgroup_stat_index idx, int val)
+{
+	VM_BUG_ON(!rcu_read_lock_held());
+
+	if (memcg)
+		this_cpu_add(memcg->stat->count[idx], val);
+}
+
 static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
 					    enum mem_cgroup_stat_index idx)
 {
@@ -184,13 +465,31 @@
 						gfp_t gfp_mask,
 						unsigned long *total_scanned);
 
-void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
 					     enum vm_event_item idx)
 {
+	struct mem_cgroup *memcg;
+
 	if (mem_cgroup_disabled())
 		return;
-	__mem_cgroup_count_vm_event(mm, idx);
+
+	rcu_read_lock();
+	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+	if (unlikely(!memcg))
+		goto out;
+
+	switch (idx) {
+	case PGFAULT:
+		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
+		break;
+	case PGMAJFAULT:
+		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+		break;
+	default:
+		BUG();
+	}
+out:
+	rcu_read_unlock();
 }
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void mem_cgroup_split_huge_fixup(struct page *head);
@@ -199,8 +498,6 @@
 #else /* CONFIG_MEMCG */
 struct mem_cgroup;
 
-#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
-
 static inline void mem_cgroup_events(struct mem_cgroup *memcg,
 				     enum mem_cgroup_events_index idx,
 				     unsigned int nr)
@@ -275,12 +572,6 @@
 	return true;
 }
 
-static inline struct cgroup_subsys_state
-		*mem_cgroup_css(struct mem_cgroup *memcg)
-{
-	return NULL;
-}
-
 static inline struct mem_cgroup *
 mem_cgroup_iter(struct mem_cgroup *root,
 		struct mem_cgroup *prev,
@@ -428,8 +719,8 @@
 extern struct static_key memcg_kmem_enabled_key;
 
 extern int memcg_nr_cache_ids;
-extern void memcg_get_cache_ids(void);
-extern void memcg_put_cache_ids(void);
+void memcg_get_cache_ids(void);
+void memcg_put_cache_ids(void);
 
 /*
  * Helper macro to loop through all memcg-specific caches. Callers must still
@@ -444,7 +735,10 @@
 	return static_key_false(&memcg_kmem_enabled_key);
 }
 
-bool memcg_kmem_is_active(struct mem_cgroup *memcg);
+static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+	return memcg->kmem_acct_active;
+}
 
 /*
  * In general, we'll do everything in our power to not incur in any overhead
@@ -463,7 +757,15 @@
 				       struct mem_cgroup *memcg, int order);
 void __memcg_kmem_uncharge_pages(struct page *page, int order);
 
-int memcg_cache_id(struct mem_cgroup *memcg);
+/*
+ * helper for acessing a memcg's index. It will be used as an index in the
+ * child cache array in kmem_cache, and also to derive its name. This function
+ * will return -1 when this is not a kmem-limited memcg.
+ */
+static inline int memcg_cache_id(struct mem_cgroup *memcg)
+{
+	return memcg ? memcg->kmemcg_id : -1;
+}
 
 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
 void __memcg_kmem_put_cache(struct kmem_cache *cachep);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 6ffa0ac..8f60e89 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -266,8 +266,9 @@
 extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
 		void *arg, int (*func)(struct memory_block *, void *));
 extern int add_memory(int nid, u64 start, u64 size);
-extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default);
-extern int arch_add_memory(int nid, u64 start, u64 size);
+extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
+		bool for_device);
+extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 extern bool is_memblock_offlined(struct memory_block *mem);
 extern void remove_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 97cb283..8fcad63 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -60,60 +60,60 @@
 /* page 0 basic: slave adder 0x60 */
 
 #define PM800_STATUS_1			(0x01)
-#define PM800_ONKEY_STS1		(1 << 0)
-#define PM800_EXTON_STS1		(1 << 1)
-#define PM800_CHG_STS1			(1 << 2)
-#define PM800_BAT_STS1			(1 << 3)
-#define PM800_VBUS_STS1			(1 << 4)
-#define PM800_LDO_PGOOD_STS1	(1 << 5)
-#define PM800_BUCK_PGOOD_STS1	(1 << 6)
+#define PM800_ONKEY_STS1		BIT(0)
+#define PM800_EXTON_STS1		BIT(1)
+#define PM800_CHG_STS1			BIT(2)
+#define PM800_BAT_STS1			BIT(3)
+#define PM800_VBUS_STS1			BIT(4)
+#define PM800_LDO_PGOOD_STS1		BIT(5)
+#define PM800_BUCK_PGOOD_STS1		BIT(6)
 
 #define PM800_STATUS_2			(0x02)
-#define PM800_RTC_ALARM_STS2	(1 << 0)
+#define PM800_RTC_ALARM_STS2		BIT(0)
 
 /* Wakeup Registers */
-#define PM800_WAKEUP1		(0x0D)
+#define PM800_WAKEUP1			(0x0D)
 
-#define PM800_WAKEUP2		(0x0E)
-#define PM800_WAKEUP2_INV_INT		(1 << 0)
-#define PM800_WAKEUP2_INT_CLEAR		(1 << 1)
-#define PM800_WAKEUP2_INT_MASK		(1 << 2)
+#define PM800_WAKEUP2			(0x0E)
+#define PM800_WAKEUP2_INV_INT		BIT(0)
+#define PM800_WAKEUP2_INT_CLEAR		BIT(1)
+#define PM800_WAKEUP2_INT_MASK		BIT(2)
 
-#define PM800_POWER_UP_LOG	(0x10)
+#define PM800_POWER_UP_LOG		(0x10)
 
 /* Referance and low power registers */
 #define PM800_LOW_POWER1		(0x20)
 #define PM800_LOW_POWER2		(0x21)
-#define PM800_LOW_POWER_CONFIG3	(0x22)
-#define PM800_LOW_POWER_CONFIG4	(0x23)
+#define PM800_LOW_POWER_CONFIG3		(0x22)
+#define PM800_LOW_POWER_CONFIG4		(0x23)
 
 /* GPIO register */
 #define PM800_GPIO_0_1_CNTRL		(0x30)
-#define PM800_GPIO0_VAL				(1 << 0)
+#define PM800_GPIO0_VAL			BIT(0)
 #define PM800_GPIO0_GPIO_MODE(x)	(x << 1)
-#define PM800_GPIO1_VAL				(1 << 4)
+#define PM800_GPIO1_VAL			BIT(4)
 #define PM800_GPIO1_GPIO_MODE(x)	(x << 5)
 
 #define PM800_GPIO_2_3_CNTRL		(0x31)
-#define PM800_GPIO2_VAL				(1 << 0)
+#define PM800_GPIO2_VAL			BIT(0)
 #define PM800_GPIO2_GPIO_MODE(x)	(x << 1)
-#define PM800_GPIO3_VAL				(1 << 4)
+#define PM800_GPIO3_VAL			BIT(4)
 #define PM800_GPIO3_GPIO_MODE(x)	(x << 5)
 #define PM800_GPIO3_MODE_MASK		0x1F
 #define PM800_GPIO3_HEADSET_MODE	PM800_GPIO3_GPIO_MODE(6)
 
-#define PM800_GPIO_4_CNTRL			(0x32)
-#define PM800_GPIO4_VAL				(1 << 0)
+#define PM800_GPIO_4_CNTRL		(0x32)
+#define PM800_GPIO4_VAL			BIT(0)
 #define PM800_GPIO4_GPIO_MODE(x)	(x << 1)
 
 #define PM800_HEADSET_CNTRL		(0x38)
-#define PM800_HEADSET_DET_EN		(1 << 7)
-#define PM800_HSDET_SLP			(1 << 1)
+#define PM800_HEADSET_DET_EN		BIT(7)
+#define PM800_HSDET_SLP			BIT(1)
 /* PWM register */
-#define PM800_PWM1		(0x40)
-#define PM800_PWM2		(0x41)
-#define PM800_PWM3		(0x42)
-#define PM800_PWM4		(0x43)
+#define PM800_PWM1			(0x40)
+#define PM800_PWM2			(0x41)
+#define PM800_PWM3			(0x42)
+#define PM800_PWM4			(0x43)
 
 /* RTC Registers */
 #define PM800_RTC_CONTROL		(0xD0)
@@ -123,55 +123,55 @@
 #define PM800_RTC_MISC4			(0xE4)
 #define PM800_RTC_MISC5			(0xE7)
 /* bit definitions of RTC Register 1 (0xD0) */
-#define PM800_ALARM1_EN			(1 << 0)
-#define PM800_ALARM_WAKEUP		(1 << 4)
-#define PM800_ALARM			(1 << 5)
-#define PM800_RTC1_USE_XO		(1 << 7)
+#define PM800_ALARM1_EN			BIT(0)
+#define PM800_ALARM_WAKEUP		BIT(4)
+#define PM800_ALARM			BIT(5)
+#define PM800_RTC1_USE_XO		BIT(7)
 
 /* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
 
 /* buck registers */
-#define PM800_SLEEP_BUCK1	(0x30)
+#define PM800_SLEEP_BUCK1		(0x30)
 
 /* BUCK Sleep Mode Register 1: BUCK[1..4] */
-#define PM800_BUCK_SLP1		(0x5A)
-#define PM800_BUCK1_SLP1_SHIFT	0
-#define PM800_BUCK1_SLP1_MASK	(0x3 << PM800_BUCK1_SLP1_SHIFT)
+#define PM800_BUCK_SLP1			(0x5A)
+#define PM800_BUCK1_SLP1_SHIFT		0
+#define PM800_BUCK1_SLP1_MASK		(0x3 << PM800_BUCK1_SLP1_SHIFT)
 
 /* page 2 GPADC: slave adder 0x02 */
 #define PM800_GPADC_MEAS_EN1		(0x01)
-#define PM800_MEAS_EN1_VBAT         (1 << 2)
+#define PM800_MEAS_EN1_VBAT		BIT(2)
 #define PM800_GPADC_MEAS_EN2		(0x02)
-#define PM800_MEAS_EN2_RFTMP        (1 << 0)
-#define PM800_MEAS_GP0_EN			(1 << 2)
-#define PM800_MEAS_GP1_EN			(1 << 3)
-#define PM800_MEAS_GP2_EN			(1 << 4)
-#define PM800_MEAS_GP3_EN			(1 << 5)
-#define PM800_MEAS_GP4_EN			(1 << 6)
+#define PM800_MEAS_EN2_RFTMP		BIT(0)
+#define PM800_MEAS_GP0_EN		BIT(2)
+#define PM800_MEAS_GP1_EN		BIT(3)
+#define PM800_MEAS_GP2_EN		BIT(4)
+#define PM800_MEAS_GP3_EN		BIT(5)
+#define PM800_MEAS_GP4_EN		BIT(6)
 
 #define PM800_GPADC_MISC_CONFIG1	(0x05)
 #define PM800_GPADC_MISC_CONFIG2	(0x06)
-#define PM800_GPADC_MISC_GPFSM_EN	(1 << 0)
+#define PM800_GPADC_MISC_GPFSM_EN	BIT(0)
 #define PM800_GPADC_SLOW_MODE(x)	(x << 3)
 
-#define PM800_GPADC_MISC_CONFIG3		(0x09)
-#define PM800_GPADC_MISC_CONFIG4		(0x0A)
+#define PM800_GPADC_MISC_CONFIG3	(0x09)
+#define PM800_GPADC_MISC_CONFIG4	(0x0A)
 
-#define PM800_GPADC_PREBIAS1			(0x0F)
+#define PM800_GPADC_PREBIAS1		(0x0F)
 #define PM800_GPADC0_GP_PREBIAS_TIME(x)	(x << 0)
-#define PM800_GPADC_PREBIAS2			(0x10)
+#define PM800_GPADC_PREBIAS2		(0x10)
 
-#define PM800_GP_BIAS_ENA1				(0x14)
-#define PM800_GPADC_GP_BIAS_EN0			(1 << 0)
-#define PM800_GPADC_GP_BIAS_EN1			(1 << 1)
-#define PM800_GPADC_GP_BIAS_EN2			(1 << 2)
-#define PM800_GPADC_GP_BIAS_EN3			(1 << 3)
+#define PM800_GP_BIAS_ENA1		(0x14)
+#define PM800_GPADC_GP_BIAS_EN0		BIT(0)
+#define PM800_GPADC_GP_BIAS_EN1		BIT(1)
+#define PM800_GPADC_GP_BIAS_EN2		BIT(2)
+#define PM800_GPADC_GP_BIAS_EN3		BIT(3)
 
 #define PM800_GP_BIAS_OUT1		(0x15)
-#define PM800_BIAS_OUT_GP0		(1 << 0)
-#define PM800_BIAS_OUT_GP1		(1 << 1)
-#define PM800_BIAS_OUT_GP2		(1 << 2)
-#define PM800_BIAS_OUT_GP3		(1 << 3)
+#define PM800_BIAS_OUT_GP0		BIT(0)
+#define PM800_BIAS_OUT_GP1		BIT(1)
+#define PM800_BIAS_OUT_GP2		BIT(2)
+#define PM800_BIAS_OUT_GP3		BIT(3)
 
 #define PM800_GPADC0_LOW_TH		0x20
 #define PM800_GPADC1_LOW_TH		0x21
@@ -222,37 +222,37 @@
 
 #define PM805_INT_STATUS1		(0x03)
 
-#define PM805_INT1_HP1_SHRT		(1 << 0)
-#define PM805_INT1_HP2_SHRT		(1 << 1)
-#define PM805_INT1_MIC_CONFLICT		(1 << 2)
-#define PM805_INT1_CLIP_FAULT		(1 << 3)
-#define PM805_INT1_LDO_OFF			(1 << 4)
-#define PM805_INT1_SRC_DPLL_LOCK	(1 << 5)
+#define PM805_INT1_HP1_SHRT		BIT(0)
+#define PM805_INT1_HP2_SHRT		BIT(1)
+#define PM805_INT1_MIC_CONFLICT		BIT(2)
+#define PM805_INT1_CLIP_FAULT		BIT(3)
+#define PM805_INT1_LDO_OFF		BIT(4)
+#define PM805_INT1_SRC_DPLL_LOCK	BIT(5)
 
 #define PM805_INT_STATUS2		(0x04)
 
-#define PM805_INT2_MIC_DET			(1 << 0)
-#define PM805_INT2_SHRT_BTN_DET		(1 << 1)
-#define PM805_INT2_VOLM_BTN_DET		(1 << 2)
-#define PM805_INT2_VOLP_BTN_DET		(1 << 3)
-#define PM805_INT2_RAW_PLL_FAULT	(1 << 4)
-#define PM805_INT2_FINE_PLL_FAULT	(1 << 5)
+#define PM805_INT2_MIC_DET		BIT(0)
+#define PM805_INT2_SHRT_BTN_DET		BIT(1)
+#define PM805_INT2_VOLM_BTN_DET		BIT(2)
+#define PM805_INT2_VOLP_BTN_DET		BIT(3)
+#define PM805_INT2_RAW_PLL_FAULT	BIT(4)
+#define PM805_INT2_FINE_PLL_FAULT	BIT(5)
 
 #define PM805_INT_MASK1			(0x05)
 #define PM805_INT_MASK2			(0x06)
-#define PM805_SHRT_BTN_DET		(1 << 1)
+#define PM805_SHRT_BTN_DET		BIT(1)
 
 /* number of status and int reg in a row */
 #define PM805_INT_REG_NUM		(2)
 
 #define PM805_MIC_DET1			(0x07)
-#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET_EN_MIC_DET	BIT(0)
 #define PM805_MIC_DET2			(0x08)
-#define PM805_MIC_DET_STATUS1	(0x09)
+#define PM805_MIC_DET_STATUS1		(0x09)
 
-#define PM805_MIC_DET_STATUS3	(0x0A)
-#define PM805_AUTO_SEQ_STATUS1	(0x0B)
-#define PM805_AUTO_SEQ_STATUS2	(0x0C)
+#define PM805_MIC_DET_STATUS3		(0x0A)
+#define PM805_AUTO_SEQ_STATUS1		(0x0B)
+#define PM805_AUTO_SEQ_STATUS2		(0x0C)
 
 #define PM805_ADC_SETTING1		(0x10)
 #define PM805_ADC_SETTING2		(0x11)
@@ -261,7 +261,7 @@
 #define PM805_ADC_GAIN2			(0x13)
 #define PM805_DMIC_SETTING		(0x15)
 #define PM805_DWS_SETTING		(0x16)
-#define PM805_MIC_CONFLICT_STS	(0x17)
+#define PM805_MIC_CONFLICT_STS		(0x17)
 
 #define PM805_PDM_SETTING1		(0x20)
 #define PM805_PDM_SETTING2		(0x21)
@@ -270,11 +270,11 @@
 #define PM805_PDM_CONTROL2		(0x24)
 #define PM805_PDM_CONTROL3		(0x25)
 
-#define PM805_HEADPHONE_SETTING			(0x26)
-#define PM805_HEADPHONE_GAIN_A2A		(0x27)
-#define PM805_HEADPHONE_SHORT_STATE		(0x28)
-#define PM805_EARPHONE_SETTING			(0x29)
-#define PM805_AUTO_SEQ_SETTING			(0x2A)
+#define PM805_HEADPHONE_SETTING		(0x26)
+#define PM805_HEADPHONE_GAIN_A2A	(0x27)
+#define PM805_HEADPHONE_SHORT_STATE	(0x28)
+#define PM805_EARPHONE_SETTING		(0x29)
+#define PM805_AUTO_SEQ_SETTING		(0x2A)
 
 struct pm80x_rtc_pdata {
 	int		vrtc;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 2f434f4..79e607e 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@
 	WM5110 = 2,
 	WM8997 = 3,
 	WM8280 = 4,
+	WM8998 = 5,
+	WM1814 = 6,
 };
 
 #define ARIZONA_IRQ_GP1                    0
@@ -165,6 +167,7 @@
 
 int wm5110_patch(struct arizona *arizona);
 int wm8997_patch(struct arizona *arizona);
+int wm8998_patch(struct arizona *arizona);
 
 extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
 				     bool mandatory);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 43db4fa..1dc3858 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@
 	 * useful for systems where and I2S bus with multiple data
 	 * lines is mastered.
 	 */
-	int max_channels_clocked[ARIZONA_MAX_AIF];
+	unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
 
 	/** GPIO5 is used for jack detection */
 	bool jd_gpio5;
@@ -125,22 +125,22 @@
 	unsigned int hpdet_channel;
 
 	/** Extra debounce timeout used during initial mic detection (ms) */
-	int micd_detect_debounce;
+	unsigned int micd_detect_debounce;
 
 	/** GPIO for mic detection polarity */
 	int micd_pol_gpio;
 
 	/** Mic detect ramp rate */
-	int micd_bias_start_time;
+	unsigned int micd_bias_start_time;
 
 	/** Mic detect sample rate */
-	int micd_rate;
+	unsigned int micd_rate;
 
 	/** Mic detect debounce level */
-	int micd_dbtime;
+	unsigned int micd_dbtime;
 
 	/** Mic detect timeout (ms) */
-	int micd_timeout;
+	unsigned int micd_timeout;
 
 	/** Force MICBIAS on for mic detect */
 	bool micd_force_micbias;
@@ -162,6 +162,8 @@
 	/**
 	 * Mode of input structures
 	 * One of the ARIZONA_INMODE_xxx values
+	 * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
+	 * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
 	 */
 	int inmode[ARIZONA_MAX_INPUT];
 
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 3499d36..fdd70b3 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
 #define ARIZONA_PWM_DRIVE_3                      0x32
 #define ARIZONA_WAKE_CONTROL                     0x40
 #define ARIZONA_SEQUENCE_CONTROL                 0x41
+#define ARIZONA_SPARE_TRIGGERS                   0x42
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1    0x61
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2    0x62
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3    0x63
@@ -139,6 +140,7 @@
 #define ARIZONA_MIC_DETECT_LEVEL_2		 0x2A7
 #define ARIZONA_MIC_DETECT_LEVEL_3		 0x2A8
 #define ARIZONA_MIC_DETECT_LEVEL_4		 0x2A9
+#define ARIZONA_MIC_DETECT_4                     0x2AB
 #define ARIZONA_MIC_NOISE_MIX_CONTROL_1          0x2C3
 #define ARIZONA_ISOLATION_CONTROL                0x2CB
 #define ARIZONA_JACK_DETECT_ANALOGUE             0x2D3
@@ -225,14 +227,18 @@
 #define ARIZONA_DAC_VOLUME_LIMIT_6R              0x43E
 #define ARIZONA_NOISE_GATE_SELECT_6R             0x43F
 #define ARIZONA_DRE_ENABLE                       0x440
+#define ARIZONA_DRE_CONTROL_1                    0x441
 #define ARIZONA_DRE_CONTROL_2                    0x442
 #define ARIZONA_DRE_CONTROL_3                    0x443
+#define ARIZONA_EDRE_ENABLE                      0x448
 #define ARIZONA_DAC_AEC_CONTROL_1                0x450
+#define ARIZONA_DAC_AEC_CONTROL_2                0x451
 #define ARIZONA_NOISE_GATE_CONTROL               0x458
 #define ARIZONA_PDM_SPK1_CTRL_1                  0x490
 #define ARIZONA_PDM_SPK1_CTRL_2                  0x491
 #define ARIZONA_PDM_SPK2_CTRL_1                  0x492
 #define ARIZONA_PDM_SPK2_CTRL_2                  0x493
+#define ARIZONA_HP_TEST_CTRL_13                  0x49A
 #define ARIZONA_HP1_SHORT_CIRCUIT_CTRL           0x4A0
 #define ARIZONA_HP2_SHORT_CIRCUIT_CTRL           0x4A1
 #define ARIZONA_HP3_SHORT_CIRCUIT_CTRL           0x4A2
@@ -310,6 +316,10 @@
 #define ARIZONA_AIF3_TX_ENABLES                  0x599
 #define ARIZONA_AIF3_RX_ENABLES                  0x59A
 #define ARIZONA_AIF3_FORCE_WRITE                 0x59B
+#define ARIZONA_SPD1_TX_CONTROL                  0x5C2
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1         0x5C3
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2         0x5C4
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3         0x5C5
 #define ARIZONA_SLIMBUS_FRAMER_REF_GEAR          0x5E3
 #define ARIZONA_SLIMBUS_RATES_1                  0x5E5
 #define ARIZONA_SLIMBUS_RATES_2                  0x5E6
@@ -643,6 +653,10 @@
 #define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME        0x7FD
 #define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE        0x7FE
 #define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME        0x7FF
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE       0x800
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME       0x801
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE       0x808
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME       0x809
 #define ARIZONA_EQ1MIX_INPUT_1_SOURCE            0x880
 #define ARIZONA_EQ1MIX_INPUT_1_VOLUME            0x881
 #define ARIZONA_EQ1MIX_INPUT_2_SOURCE            0x882
@@ -868,6 +882,7 @@
 #define ARIZONA_GPIO5_CTRL                       0xC04
 #define ARIZONA_IRQ_CTRL_1                       0xC0F
 #define ARIZONA_GPIO_DEBOUNCE_CONFIG             0xC10
+#define ARIZONA_GP_SWITCH_1                      0xC18
 #define ARIZONA_MISC_PAD_CTRL_1                  0xC20
 #define ARIZONA_MISC_PAD_CTRL_2                  0xC21
 #define ARIZONA_MISC_PAD_CTRL_3                  0xC22
@@ -1169,6 +1184,13 @@
 #define ARIZONA_DSP4_SCRATCH_1                   0x1441
 #define ARIZONA_DSP4_SCRATCH_2                   0x1442
 #define ARIZONA_DSP4_SCRATCH_3                   0x1443
+#define ARIZONA_FRF_COEFF_1                      0x1700
+#define ARIZONA_FRF_COEFF_2                      0x1701
+#define ARIZONA_FRF_COEFF_3                      0x1702
+#define ARIZONA_FRF_COEFF_4                      0x1703
+#define ARIZONA_V2_DAC_COMP_1                    0x1704
+#define ARIZONA_V2_DAC_COMP_2                    0x1705
+
 
 /*
  * Field Definitions.
@@ -1431,6 +1453,42 @@
 #define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH               1  /* WSEQ_ENA_JD2_RISE */
 
 /*
+ * R66 (0x42) - Spare Triggers
+ */
+#define ARIZONA_WS_TRG8                          0x0080  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_MASK                     0x0080  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_SHIFT                         7  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_WIDTH                         1  /* WS_TRG8 */
+#define ARIZONA_WS_TRG7                          0x0040  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_MASK                     0x0040  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_SHIFT                         6  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_WIDTH                         1  /* WS_TRG7 */
+#define ARIZONA_WS_TRG6                          0x0020  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_MASK                     0x0020  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_SHIFT                         5  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_WIDTH                         1  /* WS_TRG6 */
+#define ARIZONA_WS_TRG5                          0x0010  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_MASK                     0x0010  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_SHIFT                         4  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_WIDTH                         1  /* WS_TRG5 */
+#define ARIZONA_WS_TRG4                          0x0008  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_MASK                     0x0008  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_SHIFT                         3  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_WIDTH                         1  /* WS_TRG4 */
+#define ARIZONA_WS_TRG3                          0x0004  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_MASK                     0x0004  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_SHIFT                         2  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_WIDTH                         1  /* WS_TRG3 */
+#define ARIZONA_WS_TRG2                          0x0002  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_MASK                     0x0002  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_SHIFT                         1  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_WIDTH                         1  /* WS_TRG2 */
+#define ARIZONA_WS_TRG1                          0x0001  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_MASK                     0x0001  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_SHIFT                         0  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_WIDTH                         1  /* WS_TRG1 */
+
+/*
  * R97 (0x61) - Sample Rate Sequence Select 1
  */
 #define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF  /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2325,6 +2383,9 @@
 #define ARIZONA_HP_IDAC_STEER_MASK               0x0004  /* HP_IDAC_STEER */
 #define ARIZONA_HP_IDAC_STEER_SHIFT                   2  /* HP_IDAC_STEER */
 #define ARIZONA_HP_IDAC_STEER_WIDTH                   1  /* HP_IDAC_STEER */
+#define WM8998_HP_RATE_MASK                      0x0006  /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_SHIFT                          1  /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_WIDTH                          2  /* HP_RATE - [2:1] */
 #define ARIZONA_HP_RATE                          0x0002  /* HP_RATE */
 #define ARIZONA_HP_RATE_MASK                     0x0002  /* HP_RATE */
 #define ARIZONA_HP_RATE_SHIFT                         1  /* HP_RATE */
@@ -2413,6 +2474,16 @@
 #define ARIZONA_MICD_STS_WIDTH                        1  /* MICD_STS */
 
 /*
+ * R683 (0x2AB) - Mic Detect 4
+ */
+#define ARIZONA_MICDET_ADCVAL_DIFF_MASK          0xFF00  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT              8  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH              8  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_MASK               0x007F  /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_SHIFT                   0  /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_WIDTH                   7  /* MICDET_ADCVAL - [15:8] */
+
+/*
  * R707 (0x2C3) - Mic noise mix control 1
  */
 #define ARIZONA_MICMUTE_RATE_MASK                0x7800  /* MICMUTE_RATE - [14:11] */
@@ -2528,6 +2599,12 @@
 /*
  * R785 (0x311) - ADC Digital Volume 1L
  */
+#define ARIZONA_IN1L_SRC_MASK                    0x4000  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SHIFT                       14  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_WIDTH                        1  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SE_MASK                 0x2000  /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_SHIFT                    13  /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_WIDTH                     1  /* IN1L_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -2560,6 +2637,12 @@
 /*
  * R789 (0x315) - ADC Digital Volume 1R
  */
+#define ARIZONA_IN1R_SRC_MASK                    0x4000  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SHIFT                       14  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_WIDTH                        1  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SE_MASK                 0x2000  /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_SHIFT                    13  /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_WIDTH                     1  /* IN1R_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -2604,6 +2687,12 @@
 /*
  * R793 (0x319) - ADC Digital Volume 2L
  */
+#define ARIZONA_IN2L_SRC_MASK                    0x4000  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SHIFT                       14  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_WIDTH                        1  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SE_MASK                 0x2000  /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_SHIFT                    13  /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_WIDTH                     1  /* IN2L_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -3412,11 +3501,45 @@
 #define ARIZONA_DRE1L_ENA_WIDTH                       1  /* DRE1L_ENA */
 
 /*
+ * R1088 (0x440) - DRE Enable (WM8998)
+ */
+#define WM8998_DRE3L_ENA                          0x0020  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_MASK                     0x0020  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_SHIFT                         5  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_WIDTH                         1  /* DRE3L_ENA */
+#define WM8998_DRE2L_ENA                          0x0008  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_MASK                     0x0008  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_SHIFT                         3  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_WIDTH                         1  /* DRE2L_ENA */
+#define WM8998_DRE2R_ENA                          0x0004  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_MASK                     0x0004  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_SHIFT                         2  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_WIDTH                         1  /* DRE2R_ENA */
+#define WM8998_DRE1L_ENA                          0x0002  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_MASK                     0x0002  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_SHIFT                         1  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_WIDTH                         1  /* DRE1L_ENA */
+#define WM8998_DRE1R_ENA                          0x0001  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_MASK                     0x0001  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_SHIFT                         0  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_WIDTH                         1  /* DRE1R_ENA */
+
+/*
+ * R1089 (0x441) - DRE Control 1
+ */
+#define ARIZONA_DRE_ENV_TC_FAST_MASK             0x0F00  /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_SHIFT                 8  /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_WIDTH                 4  /* DRE_ENV_TC_FAST - [11:8] */
+
+/*
  * R1090 (0x442) - DRE Control 2
  */
 #define ARIZONA_DRE_T_LOW_MASK                   0x3F00  /* DRE_T_LOW - [13:8] */
 #define ARIZONA_DRE_T_LOW_SHIFT                       8  /* DRE_T_LOW - [13:8] */
 #define ARIZONA_DRE_T_LOW_WIDTH                       6  /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK          0x000F  /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT              0  /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH              4  /* DRE_ALOG_VOL_DELAY - [3:0] */
 
 /*
  * R1091 (0x443) - DRE Control 3
@@ -3428,6 +3551,49 @@
 #define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT               0  /* LOW_LEVEL_ABS - [3:0] */
 #define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH               4  /* LOW_LEVEL_ABS - [3:0] */
 
+/* R486 (0x448) - EDRE_Enable
+ */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA              0x0200  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK         0x0200  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT             9  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH             1  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA              0x0100  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK         0x0100  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT             8  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH             1  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA              0x0080  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK         0x0080  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT             7  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH             1  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA              0x0040  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK         0x0040  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT             6  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH             1  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA              0x0020  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK         0x0020  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT             5  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH             1  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA              0x0010  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK         0x0010  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT             4  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH             1  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA              0x0008  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK         0x0008  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT             3  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH             1  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA              0x0004  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK         0x0004  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT             2  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH             1  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA              0x0002  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK         0x0002  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT             1  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH             1  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA              0x0001  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK         0x0001  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT             0  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH             1  /* EDRE_OUT1R_THR1_ENA */
+
 /*
  * R1104 (0x450) - DAC AEC Control 1
  */
@@ -4308,6 +4474,86 @@
 #define ARIZONA_AIF3_FRC_WR_WIDTH                     1  /* AIF3_FRC_WR */
 
 /*
+ * R1474 (0x5C2) - SPD1 TX Control
+ */
+#define ARIZONA_SPD1_VAL2                        0x2000  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_MASK                   0x2000  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_SHIFT                      13  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_WIDTH                       1  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL1                        0x1000  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_MASK                   0x1000  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_SHIFT                      12  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_WIDTH                       1  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_RATE_MASK                   0x00F0  /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_SHIFT                       4  /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_WIDTH                       4  /* SPD1_RATE */
+#define ARIZONA_SPD1_ENA                         0x0001  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_MASK                    0x0001  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_SHIFT                        0  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_WIDTH                        1  /* SPD1_ENA */
+
+/*
+ * R1475 (0x5C3) - SPD1 TX Channel Status 1
+ */
+#define ARIZONA_SPD1_CATCODE_MASK                0xFF00  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_SHIFT                    8  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_WIDTH                    8  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CHSTMODE_MASK               0x00C0  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_SHIFT                   6  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_WIDTH                   2  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_PREEMPH_MASK                0x0038  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_SHIFT                    3  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_WIDTH                    3  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_NOCOPY                      0x0004  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_MASK                 0x0004  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_SHIFT                     2  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_WIDTH                     1  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOAUDIO                     0x0002  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_MASK                0x0002  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_SHIFT                    1  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_WIDTH                    1  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_PRO                         0x0001  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_MASK                    0x0001  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_SHIFT                        0  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_WIDTH                        1  /* SPD1_PRO */
+
+/*
+ * R1475 (0x5C4) - SPD1 TX Channel Status 2
+ */
+#define ARIZONA_SPD1_FREQ_MASK                   0xF000  /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_SHIFT                      12  /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_WIDTH                       4  /* SPD1_FREQ */
+#define ARIZONA_SPD1_CHNUM2_MASK                 0x0F00  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_SHIFT                     8  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_WIDTH                     4  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM1_MASK                 0x00F0  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_SHIFT                     4  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_WIDTH                     4  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_SRCNUM_MASK                 0x000F  /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_SHIFT                     0  /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_WIDTH                     4  /* SPD1_SRCNUM */
+
+/*
+ * R1475 (0x5C5) - SPD1 TX Channel Status 3
+ */
+#define ARIZONA_SPD1_ORGSAMP_MASK                 0x0F00  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_SHIFT                     8  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_WIDTH                     4  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_TXWL_MASK                    0x00E0  /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_SHIFT                        5  /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_WIDTH                        3  /* SPD1_TXWL */
+#define ARIZONA_SPD1_MAXWL                        0x0010  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_MASK                   0x0010  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_SHIFT                       4  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_WIDTH                       1  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_CS31_30_MASK                 0x000C  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_SHIFT                     2  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_WIDTH                     2  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CLKACU_MASK                  0x0003  /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_SHIFT                      2  /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_WIDTH                      0  /* SPD1_CLKACU */
+
+/*
  * R1507 (0x5E3) - SLIMbus Framer Ref Gear
  */
 #define ARIZONA_SLIMCLK_SRC                      0x0010  /* SLIMCLK_SRC */
@@ -4562,6 +4808,13 @@
 #define ARIZONA_GP_DBTIME_WIDTH                       4  /* GP_DBTIME - [15:12] */
 
 /*
+ * R3096 (0xC18) - GP Switch 1
+ */
+#define ARIZONA_SW1_MODE_MASK                    0x0003  /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_SHIFT                        0  /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_WIDTH                        2  /* SW1_MODE - [1:0] */
+
+/*
  * R3104 (0xC20) - Misc Pad Ctrl 1
  */
 #define ARIZONA_LDO1ENA_PD                       0x8000  /* LDO1ENA_PD */
@@ -6301,6 +6554,10 @@
 /*
  * R3366 (0xD26) - Interrupt Raw Status 8
  */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS            0x8000  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK       0x8000  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT          15  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH           1  /* SPDIF_OVERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS            0x0400  /* AIF3_UNDERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK       0x0400  /* AIF3_UNDERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT          10  /* AIF3_UNDERCLOCKED_STS */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index c2aa853..cc8ad1e 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,7 +12,8 @@
 #define __LINUX_MFD_AXP20X_H
 
 enum {
-	AXP202_ID = 0,
+	AXP152_ID = 0,
+	AXP202_ID,
 	AXP209_ID,
 	AXP221_ID,
 	AXP288_ID,
@@ -22,6 +23,24 @@
 #define AXP20X_DATACACHE(m)		(0x04 + (m))
 
 /* Power supply */
+#define AXP152_PWR_OP_MODE		0x01
+#define AXP152_LDO3456_DC1234_CTRL	0x12
+#define AXP152_ALDO_OP_MODE		0x13
+#define AXP152_LDO0_CTRL		0x15
+#define AXP152_DCDC2_V_OUT		0x23
+#define AXP152_DCDC2_V_SCAL		0x25
+#define AXP152_DCDC1_V_OUT		0x26
+#define AXP152_DCDC3_V_OUT		0x27
+#define AXP152_ALDO12_V_OUT		0x28
+#define AXP152_DLDO1_V_OUT		0x29
+#define AXP152_DLDO2_V_OUT		0x2a
+#define AXP152_DCDC4_V_OUT		0x2b
+#define AXP152_V_OFF			0x31
+#define AXP152_OFF_CTRL			0x32
+#define AXP152_PEK_KEY			0x36
+#define AXP152_DCDC_FREQ		0x37
+#define AXP152_DCDC_MODE		0x80
+
 #define AXP20X_PWR_INPUT_STATUS		0x00
 #define AXP20X_PWR_OP_MODE		0x01
 #define AXP20X_USB_OTG_STATUS		0x02
@@ -69,6 +88,13 @@
 #define AXP22X_CHRG_CTRL3		0x35
 
 /* Interrupt */
+#define AXP152_IRQ1_EN			0x40
+#define AXP152_IRQ2_EN			0x41
+#define AXP152_IRQ3_EN			0x42
+#define AXP152_IRQ1_STATE		0x48
+#define AXP152_IRQ2_STATE		0x49
+#define AXP152_IRQ3_STATE		0x4a
+
 #define AXP20X_IRQ1_EN			0x40
 #define AXP20X_IRQ2_EN			0x41
 #define AXP20X_IRQ3_EN			0x42
@@ -127,6 +153,19 @@
 #define AXP22X_PWREN_CTRL2		0x8d
 
 /* GPIO */
+#define AXP152_GPIO0_CTRL		0x90
+#define AXP152_GPIO1_CTRL		0x91
+#define AXP152_GPIO2_CTRL		0x92
+#define AXP152_GPIO3_CTRL		0x93
+#define AXP152_LDOGPIO2_V_OUT		0x96
+#define AXP152_GPIO_INPUT		0x97
+#define AXP152_PWM0_FREQ_X		0x98
+#define AXP152_PWM0_FREQ_Y		0x99
+#define AXP152_PWM0_DUTY_CYCLE		0x9a
+#define AXP152_PWM1_FREQ_X		0x9b
+#define AXP152_PWM1_FREQ_Y		0x9c
+#define AXP152_PWM1_DUTY_CYCLE		0x9d
+
 #define AXP20X_GPIO0_CTRL		0x90
 #define AXP20X_LDO5_V_OUT		0x91
 #define AXP20X_GPIO1_CTRL		0x92
@@ -151,6 +190,12 @@
 #define AXP20X_CC_CTRL			0xb8
 #define AXP20X_FG_RES			0xb9
 
+/* OCV */
+#define AXP20X_RDC_H			0xba
+#define AXP20X_RDC_L			0xbb
+#define AXP20X_OCV(m)			(0xc0 + (m))
+#define AXP20X_OCV_MAX			0xf
+
 /* AXP22X specific registers */
 #define AXP22X_BATLOW_THRES1		0xe6
 
@@ -218,6 +263,26 @@
 
 /* IRQs */
 enum {
+	AXP152_IRQ_LDO0IN_CONNECT = 1,
+	AXP152_IRQ_LDO0IN_REMOVAL,
+	AXP152_IRQ_ALDO0IN_CONNECT,
+	AXP152_IRQ_ALDO0IN_REMOVAL,
+	AXP152_IRQ_DCDC1_V_LOW,
+	AXP152_IRQ_DCDC2_V_LOW,
+	AXP152_IRQ_DCDC3_V_LOW,
+	AXP152_IRQ_DCDC4_V_LOW,
+	AXP152_IRQ_PEK_SHORT,
+	AXP152_IRQ_PEK_LONG,
+	AXP152_IRQ_TIMER,
+	AXP152_IRQ_PEK_RIS_EDGE,
+	AXP152_IRQ_PEK_FAL_EDGE,
+	AXP152_IRQ_GPIO3_INPUT,
+	AXP152_IRQ_GPIO2_INPUT,
+	AXP152_IRQ_GPIO1_INPUT,
+	AXP152_IRQ_GPIO0_INPUT,
+};
+
+enum {
 	AXP20X_IRQ_ACIN_OVER_V = 1,
 	AXP20X_IRQ_ACIN_PLUGIN,
 	AXP20X_IRQ_ACIN_REMOVAL,
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
new file mode 100644
index 0000000..376ba84
--- /dev/null
+++ b/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_DA9062_CORE_H__
+#define __MFD_DA9062_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9062/registers.h>
+
+/* Interrupts */
+enum da9062_irqs {
+	/* IRQ A */
+	DA9062_IRQ_ONKEY,
+	DA9062_IRQ_ALARM,
+	DA9062_IRQ_TICK,
+	DA9062_IRQ_WDG_WARN,
+	DA9062_IRQ_SEQ_RDY,
+	/* IRQ B*/
+	DA9062_IRQ_TEMP,
+	DA9062_IRQ_LDO_LIM,
+	DA9062_IRQ_DVC_RDY,
+	DA9062_IRQ_VDD_WARN,
+	/* IRQ C */
+	DA9062_IRQ_GPI0,
+	DA9062_IRQ_GPI1,
+	DA9062_IRQ_GPI2,
+	DA9062_IRQ_GPI3,
+	DA9062_IRQ_GPI4,
+
+	DA9062_NUM_IRQ,
+};
+
+struct da9062 {
+	struct device *dev;
+	struct regmap *regmap;
+	struct regmap_irq_chip_data *regmap_irq;
+};
+
+#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
new file mode 100644
index 0000000..97790d1
--- /dev/null
+++ b/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
+/*
+ * registers.h - REGISTERS H for DA9062
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DA9062_H__
+#define __DA9062_H__
+
+#define DA9062_PMIC_DEVICE_ID		0x62
+#define DA9062_PMIC_VARIANT_MRC_AA	0x01
+
+#define DA9062_I2C_PAGE_SEL_SHIFT	1
+
+/*
+ * Registers
+ */
+
+#define DA9062AA_PAGE_CON		0x000
+#define DA9062AA_STATUS_A		0x001
+#define DA9062AA_STATUS_B		0x002
+#define DA9062AA_STATUS_D		0x004
+#define DA9062AA_FAULT_LOG		0x005
+#define DA9062AA_EVENT_A		0x006
+#define DA9062AA_EVENT_B		0x007
+#define DA9062AA_EVENT_C		0x008
+#define DA9062AA_IRQ_MASK_A		0x00A
+#define DA9062AA_IRQ_MASK_B		0x00B
+#define DA9062AA_IRQ_MASK_C		0x00C
+#define DA9062AA_CONTROL_A		0x00E
+#define DA9062AA_CONTROL_B		0x00F
+#define DA9062AA_CONTROL_C		0x010
+#define DA9062AA_CONTROL_D		0x011
+#define DA9062AA_CONTROL_E		0x012
+#define DA9062AA_CONTROL_F		0x013
+#define DA9062AA_PD_DIS			0x014
+#define DA9062AA_GPIO_0_1		0x015
+#define DA9062AA_GPIO_2_3		0x016
+#define DA9062AA_GPIO_4			0x017
+#define DA9062AA_GPIO_WKUP_MODE		0x01C
+#define DA9062AA_GPIO_MODE0_4		0x01D
+#define DA9062AA_GPIO_OUT0_2		0x01E
+#define DA9062AA_GPIO_OUT3_4		0x01F
+#define DA9062AA_BUCK2_CONT		0x020
+#define DA9062AA_BUCK1_CONT		0x021
+#define DA9062AA_BUCK4_CONT		0x022
+#define DA9062AA_BUCK3_CONT		0x024
+#define DA9062AA_LDO1_CONT		0x026
+#define DA9062AA_LDO2_CONT		0x027
+#define DA9062AA_LDO3_CONT		0x028
+#define DA9062AA_LDO4_CONT		0x029
+#define DA9062AA_DVC_1			0x032
+#define DA9062AA_COUNT_S		0x040
+#define DA9062AA_COUNT_MI		0x041
+#define DA9062AA_COUNT_H		0x042
+#define DA9062AA_COUNT_D		0x043
+#define DA9062AA_COUNT_MO		0x044
+#define DA9062AA_COUNT_Y		0x045
+#define DA9062AA_ALARM_S		0x046
+#define DA9062AA_ALARM_MI		0x047
+#define DA9062AA_ALARM_H		0x048
+#define DA9062AA_ALARM_D		0x049
+#define DA9062AA_ALARM_MO		0x04A
+#define DA9062AA_ALARM_Y		0x04B
+#define DA9062AA_SECOND_A		0x04C
+#define DA9062AA_SECOND_B		0x04D
+#define DA9062AA_SECOND_C		0x04E
+#define DA9062AA_SECOND_D		0x04F
+#define DA9062AA_SEQ			0x081
+#define DA9062AA_SEQ_TIMER		0x082
+#define DA9062AA_ID_2_1			0x083
+#define DA9062AA_ID_4_3			0x084
+#define DA9062AA_ID_12_11		0x088
+#define DA9062AA_ID_14_13		0x089
+#define DA9062AA_ID_16_15		0x08A
+#define DA9062AA_ID_22_21		0x08D
+#define DA9062AA_ID_24_23		0x08E
+#define DA9062AA_ID_26_25		0x08F
+#define DA9062AA_ID_28_27		0x090
+#define DA9062AA_ID_30_29		0x091
+#define DA9062AA_ID_32_31		0x092
+#define DA9062AA_SEQ_A			0x095
+#define DA9062AA_SEQ_B			0x096
+#define DA9062AA_WAIT			0x097
+#define DA9062AA_EN_32K			0x098
+#define DA9062AA_RESET			0x099
+#define DA9062AA_BUCK_ILIM_A		0x09A
+#define DA9062AA_BUCK_ILIM_B		0x09B
+#define DA9062AA_BUCK_ILIM_C		0x09C
+#define DA9062AA_BUCK2_CFG		0x09D
+#define DA9062AA_BUCK1_CFG		0x09E
+#define DA9062AA_BUCK4_CFG		0x09F
+#define DA9062AA_BUCK3_CFG		0x0A0
+#define DA9062AA_VBUCK2_A		0x0A3
+#define DA9062AA_VBUCK1_A		0x0A4
+#define DA9062AA_VBUCK4_A		0x0A5
+#define DA9062AA_VBUCK3_A		0x0A7
+#define DA9062AA_VLDO1_A		0x0A9
+#define DA9062AA_VLDO2_A		0x0AA
+#define DA9062AA_VLDO3_A		0x0AB
+#define DA9062AA_VLDO4_A		0x0AC
+#define DA9062AA_VBUCK2_B		0x0B4
+#define DA9062AA_VBUCK1_B		0x0B5
+#define DA9062AA_VBUCK4_B		0x0B6
+#define DA9062AA_VBUCK3_B		0x0B8
+#define DA9062AA_VLDO1_B		0x0BA
+#define DA9062AA_VLDO2_B		0x0BB
+#define DA9062AA_VLDO3_B		0x0BC
+#define DA9062AA_VLDO4_B		0x0BD
+#define DA9062AA_BBAT_CONT		0x0C5
+#define DA9062AA_INTERFACE		0x105
+#define DA9062AA_CONFIG_A		0x106
+#define DA9062AA_CONFIG_B		0x107
+#define DA9062AA_CONFIG_C		0x108
+#define DA9062AA_CONFIG_D		0x109
+#define DA9062AA_CONFIG_E		0x10A
+#define DA9062AA_CONFIG_G		0x10C
+#define DA9062AA_CONFIG_H		0x10D
+#define DA9062AA_CONFIG_I		0x10E
+#define DA9062AA_CONFIG_J		0x10F
+#define DA9062AA_CONFIG_K		0x110
+#define DA9062AA_CONFIG_M		0x112
+#define DA9062AA_TRIM_CLDR		0x120
+#define DA9062AA_GP_ID_0		0x121
+#define DA9062AA_GP_ID_1		0x122
+#define DA9062AA_GP_ID_2		0x123
+#define DA9062AA_GP_ID_3		0x124
+#define DA9062AA_GP_ID_4		0x125
+#define DA9062AA_GP_ID_5		0x126
+#define DA9062AA_GP_ID_6		0x127
+#define DA9062AA_GP_ID_7		0x128
+#define DA9062AA_GP_ID_8		0x129
+#define DA9062AA_GP_ID_9		0x12A
+#define DA9062AA_GP_ID_10		0x12B
+#define DA9062AA_GP_ID_11		0x12C
+#define DA9062AA_GP_ID_12		0x12D
+#define DA9062AA_GP_ID_13		0x12E
+#define DA9062AA_GP_ID_14		0x12F
+#define DA9062AA_GP_ID_15		0x130
+#define DA9062AA_GP_ID_16		0x131
+#define DA9062AA_GP_ID_17		0x132
+#define DA9062AA_GP_ID_18		0x133
+#define DA9062AA_GP_ID_19		0x134
+#define DA9062AA_DEVICE_ID		0x181
+#define DA9062AA_VARIANT_ID		0x182
+#define DA9062AA_CUSTOMER_ID		0x183
+#define DA9062AA_CONFIG_ID		0x184
+
+/*
+ * Bit fields
+ */
+
+/* DA9062AA_PAGE_CON = 0x000 */
+#define DA9062AA_PAGE_SHIFT		0
+#define DA9062AA_PAGE_MASK		0x3f
+#define DA9062AA_WRITE_MODE_SHIFT	6
+#define DA9062AA_WRITE_MODE_MASK	BIT(6)
+#define DA9062AA_REVERT_SHIFT		7
+#define DA9062AA_REVERT_MASK		BIT(7)
+
+/* DA9062AA_STATUS_A = 0x001 */
+#define DA9062AA_NONKEY_SHIFT		0
+#define DA9062AA_NONKEY_MASK		0x01
+#define DA9062AA_DVC_BUSY_SHIFT		2
+#define DA9062AA_DVC_BUSY_MASK		BIT(2)
+
+/* DA9062AA_STATUS_B = 0x002 */
+#define DA9062AA_GPI0_SHIFT		0
+#define DA9062AA_GPI0_MASK		0x01
+#define DA9062AA_GPI1_SHIFT		1
+#define DA9062AA_GPI1_MASK		BIT(1)
+#define DA9062AA_GPI2_SHIFT		2
+#define DA9062AA_GPI2_MASK		BIT(2)
+#define DA9062AA_GPI3_SHIFT		3
+#define DA9062AA_GPI3_MASK		BIT(3)
+#define DA9062AA_GPI4_SHIFT		4
+#define DA9062AA_GPI4_MASK		BIT(4)
+
+/* DA9062AA_STATUS_D = 0x004 */
+#define DA9062AA_LDO1_ILIM_SHIFT	0
+#define DA9062AA_LDO1_ILIM_MASK		0x01
+#define DA9062AA_LDO2_ILIM_SHIFT	1
+#define DA9062AA_LDO2_ILIM_MASK		BIT(1)
+#define DA9062AA_LDO3_ILIM_SHIFT	2
+#define DA9062AA_LDO3_ILIM_MASK		BIT(2)
+#define DA9062AA_LDO4_ILIM_SHIFT	3
+#define DA9062AA_LDO4_ILIM_MASK		BIT(3)
+
+/* DA9062AA_FAULT_LOG = 0x005 */
+#define DA9062AA_TWD_ERROR_SHIFT	0
+#define DA9062AA_TWD_ERROR_MASK		0x01
+#define DA9062AA_POR_SHIFT		1
+#define DA9062AA_POR_MASK		BIT(1)
+#define DA9062AA_VDD_FAULT_SHIFT	2
+#define DA9062AA_VDD_FAULT_MASK		BIT(2)
+#define DA9062AA_VDD_START_SHIFT	3
+#define DA9062AA_VDD_START_MASK		BIT(3)
+#define DA9062AA_TEMP_CRIT_SHIFT	4
+#define DA9062AA_TEMP_CRIT_MASK		BIT(4)
+#define DA9062AA_KEY_RESET_SHIFT	5
+#define DA9062AA_KEY_RESET_MASK		BIT(5)
+#define DA9062AA_NSHUTDOWN_SHIFT	6
+#define DA9062AA_NSHUTDOWN_MASK		BIT(6)
+#define DA9062AA_WAIT_SHUT_SHIFT	7
+#define DA9062AA_WAIT_SHUT_MASK		BIT(7)
+
+/* DA9062AA_EVENT_A = 0x006 */
+#define DA9062AA_E_NONKEY_SHIFT		0
+#define DA9062AA_E_NONKEY_MASK		0x01
+#define DA9062AA_E_ALARM_SHIFT		1
+#define DA9062AA_E_ALARM_MASK		BIT(1)
+#define DA9062AA_E_TICK_SHIFT		2
+#define DA9062AA_E_TICK_MASK		BIT(2)
+#define DA9062AA_E_WDG_WARN_SHIFT	3
+#define DA9062AA_E_WDG_WARN_MASK	BIT(3)
+#define DA9062AA_E_SEQ_RDY_SHIFT	4
+#define DA9062AA_E_SEQ_RDY_MASK		BIT(4)
+#define DA9062AA_EVENTS_B_SHIFT		5
+#define DA9062AA_EVENTS_B_MASK		BIT(5)
+#define DA9062AA_EVENTS_C_SHIFT		6
+#define DA9062AA_EVENTS_C_MASK		BIT(6)
+
+/* DA9062AA_EVENT_B = 0x007 */
+#define DA9062AA_E_TEMP_SHIFT		1
+#define DA9062AA_E_TEMP_MASK		BIT(1)
+#define DA9062AA_E_LDO_LIM_SHIFT	3
+#define DA9062AA_E_LDO_LIM_MASK		BIT(3)
+#define DA9062AA_E_DVC_RDY_SHIFT	5
+#define DA9062AA_E_DVC_RDY_MASK		BIT(5)
+#define DA9062AA_E_VDD_WARN_SHIFT	7
+#define DA9062AA_E_VDD_WARN_MASK	BIT(7)
+
+/* DA9062AA_EVENT_C = 0x008 */
+#define DA9062AA_E_GPI0_SHIFT		0
+#define DA9062AA_E_GPI0_MASK		0x01
+#define DA9062AA_E_GPI1_SHIFT		1
+#define DA9062AA_E_GPI1_MASK		BIT(1)
+#define DA9062AA_E_GPI2_SHIFT		2
+#define DA9062AA_E_GPI2_MASK		BIT(2)
+#define DA9062AA_E_GPI3_SHIFT		3
+#define DA9062AA_E_GPI3_MASK		BIT(3)
+#define DA9062AA_E_GPI4_SHIFT		4
+#define DA9062AA_E_GPI4_MASK		BIT(4)
+
+/* DA9062AA_IRQ_MASK_A = 0x00A */
+#define DA9062AA_M_NONKEY_SHIFT		0
+#define DA9062AA_M_NONKEY_MASK		0x01
+#define DA9062AA_M_ALARM_SHIFT		1
+#define DA9062AA_M_ALARM_MASK		BIT(1)
+#define DA9062AA_M_TICK_SHIFT		2
+#define DA9062AA_M_TICK_MASK		BIT(2)
+#define DA9062AA_M_WDG_WARN_SHIFT	3
+#define DA9062AA_M_WDG_WARN_MASK	BIT(3)
+#define DA9062AA_M_SEQ_RDY_SHIFT	4
+#define DA9062AA_M_SEQ_RDY_MASK		BIT(4)
+
+/* DA9062AA_IRQ_MASK_B = 0x00B */
+#define DA9062AA_M_TEMP_SHIFT		1
+#define DA9062AA_M_TEMP_MASK		BIT(1)
+#define DA9062AA_M_LDO_LIM_SHIFT	3
+#define DA9062AA_M_LDO_LIM_MASK		BIT(3)
+#define DA9062AA_M_DVC_RDY_SHIFT	5
+#define DA9062AA_M_DVC_RDY_MASK		BIT(5)
+#define DA9062AA_M_VDD_WARN_SHIFT	7
+#define DA9062AA_M_VDD_WARN_MASK	BIT(7)
+
+/* DA9062AA_IRQ_MASK_C = 0x00C */
+#define DA9062AA_M_GPI0_SHIFT		0
+#define DA9062AA_M_GPI0_MASK		0x01
+#define DA9062AA_M_GPI1_SHIFT		1
+#define DA9062AA_M_GPI1_MASK		BIT(1)
+#define DA9062AA_M_GPI2_SHIFT		2
+#define DA9062AA_M_GPI2_MASK		BIT(2)
+#define DA9062AA_M_GPI3_SHIFT		3
+#define DA9062AA_M_GPI3_MASK		BIT(3)
+#define DA9062AA_M_GPI4_SHIFT		4
+#define DA9062AA_M_GPI4_MASK		BIT(4)
+
+/* DA9062AA_CONTROL_A = 0x00E */
+#define DA9062AA_SYSTEM_EN_SHIFT	0
+#define DA9062AA_SYSTEM_EN_MASK		0x01
+#define DA9062AA_POWER_EN_SHIFT		1
+#define DA9062AA_POWER_EN_MASK		BIT(1)
+#define DA9062AA_POWER1_EN_SHIFT	2
+#define DA9062AA_POWER1_EN_MASK		BIT(2)
+#define DA9062AA_STANDBY_SHIFT		3
+#define DA9062AA_STANDBY_MASK		BIT(3)
+#define DA9062AA_M_SYSTEM_EN_SHIFT	4
+#define DA9062AA_M_SYSTEM_EN_MASK	BIT(4)
+#define DA9062AA_M_POWER_EN_SHIFT	5
+#define DA9062AA_M_POWER_EN_MASK	BIT(5)
+#define DA9062AA_M_POWER1_EN_SHIFT	6
+#define DA9062AA_M_POWER1_EN_MASK	BIT(6)
+
+/* DA9062AA_CONTROL_B = 0x00F */
+#define DA9062AA_WATCHDOG_PD_SHIFT	1
+#define DA9062AA_WATCHDOG_PD_MASK	BIT(1)
+#define DA9062AA_FREEZE_EN_SHIFT	2
+#define DA9062AA_FREEZE_EN_MASK		BIT(2)
+#define DA9062AA_NRES_MODE_SHIFT	3
+#define DA9062AA_NRES_MODE_MASK		BIT(3)
+#define DA9062AA_NONKEY_LOCK_SHIFT	4
+#define DA9062AA_NONKEY_LOCK_MASK	BIT(4)
+#define DA9062AA_NFREEZE_SHIFT		5
+#define DA9062AA_NFREEZE_MASK		(0x03 << 5)
+#define DA9062AA_BUCK_SLOWSTART_SHIFT	7
+#define DA9062AA_BUCK_SLOWSTART_MASK	BIT(7)
+
+/* DA9062AA_CONTROL_C = 0x010 */
+#define DA9062AA_DEBOUNCING_SHIFT	0
+#define DA9062AA_DEBOUNCING_MASK	0x07
+#define DA9062AA_AUTO_BOOT_SHIFT	3
+#define DA9062AA_AUTO_BOOT_MASK		BIT(3)
+#define DA9062AA_OTPREAD_EN_SHIFT	4
+#define DA9062AA_OTPREAD_EN_MASK	BIT(4)
+#define DA9062AA_SLEW_RATE_SHIFT	5
+#define DA9062AA_SLEW_RATE_MASK		(0x03 << 5)
+#define DA9062AA_DEF_SUPPLY_SHIFT	7
+#define DA9062AA_DEF_SUPPLY_MASK	BIT(7)
+
+/* DA9062AA_CONTROL_D = 0x011 */
+#define DA9062AA_TWDSCALE_SHIFT		0
+#define DA9062AA_TWDSCALE_MASK		0x07
+
+/* DA9062AA_CONTROL_E = 0x012 */
+#define DA9062AA_RTC_MODE_PD_SHIFT	0
+#define DA9062AA_RTC_MODE_PD_MASK	0x01
+#define DA9062AA_RTC_MODE_SD_SHIFT	1
+#define DA9062AA_RTC_MODE_SD_MASK	BIT(1)
+#define DA9062AA_RTC_EN_SHIFT		2
+#define DA9062AA_RTC_EN_MASK		BIT(2)
+#define DA9062AA_V_LOCK_SHIFT		7
+#define DA9062AA_V_LOCK_MASK		BIT(7)
+
+/* DA9062AA_CONTROL_F = 0x013 */
+#define DA9062AA_WATCHDOG_SHIFT		0
+#define DA9062AA_WATCHDOG_MASK		0x01
+#define DA9062AA_SHUTDOWN_SHIFT		1
+#define DA9062AA_SHUTDOWN_MASK		BIT(1)
+#define DA9062AA_WAKE_UP_SHIFT		2
+#define DA9062AA_WAKE_UP_MASK		BIT(2)
+
+/* DA9062AA_PD_DIS = 0x014 */
+#define DA9062AA_GPI_DIS_SHIFT		0
+#define DA9062AA_GPI_DIS_MASK		0x01
+#define DA9062AA_PMIF_DIS_SHIFT		2
+#define DA9062AA_PMIF_DIS_MASK		BIT(2)
+#define DA9062AA_CLDR_PAUSE_SHIFT	4
+#define DA9062AA_CLDR_PAUSE_MASK	BIT(4)
+#define DA9062AA_BBAT_DIS_SHIFT		5
+#define DA9062AA_BBAT_DIS_MASK		BIT(5)
+#define DA9062AA_OUT32K_PAUSE_SHIFT	6
+#define DA9062AA_OUT32K_PAUSE_MASK	BIT(6)
+#define DA9062AA_PMCONT_DIS_SHIFT	7
+#define DA9062AA_PMCONT_DIS_MASK	BIT(7)
+
+/* DA9062AA_GPIO_0_1 = 0x015 */
+#define DA9062AA_GPIO0_PIN_SHIFT	0
+#define DA9062AA_GPIO0_PIN_MASK		0x03
+#define DA9062AA_GPIO0_TYPE_SHIFT	2
+#define DA9062AA_GPIO0_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO0_WEN_SHIFT	3
+#define DA9062AA_GPIO0_WEN_MASK		BIT(3)
+#define DA9062AA_GPIO1_PIN_SHIFT	4
+#define DA9062AA_GPIO1_PIN_MASK		(0x03 << 4)
+#define DA9062AA_GPIO1_TYPE_SHIFT	6
+#define DA9062AA_GPIO1_TYPE_MASK	BIT(6)
+#define DA9062AA_GPIO1_WEN_SHIFT	7
+#define DA9062AA_GPIO1_WEN_MASK		BIT(7)
+
+/* DA9062AA_GPIO_2_3 = 0x016 */
+#define DA9062AA_GPIO2_PIN_SHIFT	0
+#define DA9062AA_GPIO2_PIN_MASK		0x03
+#define DA9062AA_GPIO2_TYPE_SHIFT	2
+#define DA9062AA_GPIO2_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO2_WEN_SHIFT	3
+#define DA9062AA_GPIO2_WEN_MASK		BIT(3)
+#define DA9062AA_GPIO3_PIN_SHIFT	4
+#define DA9062AA_GPIO3_PIN_MASK		(0x03 << 4)
+#define DA9062AA_GPIO3_TYPE_SHIFT	6
+#define DA9062AA_GPIO3_TYPE_MASK	BIT(6)
+#define DA9062AA_GPIO3_WEN_SHIFT	7
+#define DA9062AA_GPIO3_WEN_MASK		BIT(7)
+
+/* DA9062AA_GPIO_4 = 0x017 */
+#define DA9062AA_GPIO4_PIN_SHIFT	0
+#define DA9062AA_GPIO4_PIN_MASK		0x03
+#define DA9062AA_GPIO4_TYPE_SHIFT	2
+#define DA9062AA_GPIO4_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO4_WEN_SHIFT	3
+#define DA9062AA_GPIO4_WEN_MASK		BIT(3)
+
+/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
+#define DA9062AA_GPIO0_WKUP_MODE_SHIFT	0
+#define DA9062AA_GPIO0_WKUP_MODE_MASK	0x01
+#define DA9062AA_GPIO1_WKUP_MODE_SHIFT	1
+#define DA9062AA_GPIO1_WKUP_MODE_MASK	BIT(1)
+#define DA9062AA_GPIO2_WKUP_MODE_SHIFT	2
+#define DA9062AA_GPIO2_WKUP_MODE_MASK	BIT(2)
+#define DA9062AA_GPIO3_WKUP_MODE_SHIFT	3
+#define DA9062AA_GPIO3_WKUP_MODE_MASK	BIT(3)
+#define DA9062AA_GPIO4_WKUP_MODE_SHIFT	4
+#define DA9062AA_GPIO4_WKUP_MODE_MASK	BIT(4)
+
+/* DA9062AA_GPIO_MODE0_4 = 0x01D */
+#define DA9062AA_GPIO0_MODE_SHIFT	0
+#define DA9062AA_GPIO0_MODE_MASK	0x01
+#define DA9062AA_GPIO1_MODE_SHIFT	1
+#define DA9062AA_GPIO1_MODE_MASK	BIT(1)
+#define DA9062AA_GPIO2_MODE_SHIFT	2
+#define DA9062AA_GPIO2_MODE_MASK	BIT(2)
+#define DA9062AA_GPIO3_MODE_SHIFT	3
+#define DA9062AA_GPIO3_MODE_MASK	BIT(3)
+#define DA9062AA_GPIO4_MODE_SHIFT	4
+#define DA9062AA_GPIO4_MODE_MASK	BIT(4)
+
+/* DA9062AA_GPIO_OUT0_2 = 0x01E */
+#define DA9062AA_GPIO0_OUT_SHIFT	0
+#define DA9062AA_GPIO0_OUT_MASK		0x07
+#define DA9062AA_GPIO1_OUT_SHIFT	3
+#define DA9062AA_GPIO1_OUT_MASK		(0x07 << 3)
+#define DA9062AA_GPIO2_OUT_SHIFT	6
+#define DA9062AA_GPIO2_OUT_MASK		(0x03 << 6)
+
+/* DA9062AA_GPIO_OUT3_4 = 0x01F */
+#define DA9062AA_GPIO3_OUT_SHIFT	0
+#define DA9062AA_GPIO3_OUT_MASK		0x07
+#define DA9062AA_GPIO4_OUT_SHIFT	3
+#define DA9062AA_GPIO4_OUT_MASK		(0x03 << 3)
+
+/* DA9062AA_BUCK2_CONT = 0x020 */
+#define DA9062AA_BUCK2_EN_SHIFT		0
+#define DA9062AA_BUCK2_EN_MASK		0x01
+#define DA9062AA_BUCK2_GPI_SHIFT	1
+#define DA9062AA_BUCK2_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK2_CONF_SHIFT	3
+#define DA9062AA_BUCK2_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK2_GPI_SHIFT	5
+#define DA9062AA_VBUCK2_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK1_CONT = 0x021 */
+#define DA9062AA_BUCK1_EN_SHIFT		0
+#define DA9062AA_BUCK1_EN_MASK		0x01
+#define DA9062AA_BUCK1_GPI_SHIFT	1
+#define DA9062AA_BUCK1_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK1_CONF_SHIFT	3
+#define DA9062AA_BUCK1_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK1_GPI_SHIFT	5
+#define DA9062AA_VBUCK1_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK4_CONT = 0x022 */
+#define DA9062AA_BUCK4_EN_SHIFT		0
+#define DA9062AA_BUCK4_EN_MASK		0x01
+#define DA9062AA_BUCK4_GPI_SHIFT	1
+#define DA9062AA_BUCK4_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK4_CONF_SHIFT	3
+#define DA9062AA_BUCK4_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK4_GPI_SHIFT	5
+#define DA9062AA_VBUCK4_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK3_CONT = 0x024 */
+#define DA9062AA_BUCK3_EN_SHIFT		0
+#define DA9062AA_BUCK3_EN_MASK		0x01
+#define DA9062AA_BUCK3_GPI_SHIFT	1
+#define DA9062AA_BUCK3_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK3_CONF_SHIFT	3
+#define DA9062AA_BUCK3_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK3_GPI_SHIFT	5
+#define DA9062AA_VBUCK3_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_LDO1_CONT = 0x026 */
+#define DA9062AA_LDO1_EN_SHIFT		0
+#define DA9062AA_LDO1_EN_MASK		0x01
+#define DA9062AA_LDO1_GPI_SHIFT		1
+#define DA9062AA_LDO1_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO1_PD_DIS_SHIFT	3
+#define DA9062AA_LDO1_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO1_GPI_SHIFT	5
+#define DA9062AA_VLDO1_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO1_CONF_SHIFT	7
+#define DA9062AA_LDO1_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO2_CONT = 0x027 */
+#define DA9062AA_LDO2_EN_SHIFT		0
+#define DA9062AA_LDO2_EN_MASK		0x01
+#define DA9062AA_LDO2_GPI_SHIFT		1
+#define DA9062AA_LDO2_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO2_PD_DIS_SHIFT	3
+#define DA9062AA_LDO2_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO2_GPI_SHIFT	5
+#define DA9062AA_VLDO2_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO2_CONF_SHIFT	7
+#define DA9062AA_LDO2_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO3_CONT = 0x028 */
+#define DA9062AA_LDO3_EN_SHIFT		0
+#define DA9062AA_LDO3_EN_MASK		0x01
+#define DA9062AA_LDO3_GPI_SHIFT		1
+#define DA9062AA_LDO3_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO3_PD_DIS_SHIFT	3
+#define DA9062AA_LDO3_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO3_GPI_SHIFT	5
+#define DA9062AA_VLDO3_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO3_CONF_SHIFT	7
+#define DA9062AA_LDO3_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO4_CONT = 0x029 */
+#define DA9062AA_LDO4_EN_SHIFT		0
+#define DA9062AA_LDO4_EN_MASK		0x01
+#define DA9062AA_LDO4_GPI_SHIFT		1
+#define DA9062AA_LDO4_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO4_PD_DIS_SHIFT	3
+#define DA9062AA_LDO4_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO4_GPI_SHIFT	5
+#define DA9062AA_VLDO4_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO4_CONF_SHIFT	7
+#define DA9062AA_LDO4_CONF_MASK		BIT(7)
+
+/* DA9062AA_DVC_1 = 0x032 */
+#define DA9062AA_VBUCK1_SEL_SHIFT	0
+#define DA9062AA_VBUCK1_SEL_MASK	0x01
+#define DA9062AA_VBUCK2_SEL_SHIFT	1
+#define DA9062AA_VBUCK2_SEL_MASK	BIT(1)
+#define DA9062AA_VBUCK4_SEL_SHIFT	2
+#define DA9062AA_VBUCK4_SEL_MASK	BIT(2)
+#define DA9062AA_VBUCK3_SEL_SHIFT	3
+#define DA9062AA_VBUCK3_SEL_MASK	BIT(3)
+#define DA9062AA_VLDO1_SEL_SHIFT	4
+#define DA9062AA_VLDO1_SEL_MASK		BIT(4)
+#define DA9062AA_VLDO2_SEL_SHIFT	5
+#define DA9062AA_VLDO2_SEL_MASK		BIT(5)
+#define DA9062AA_VLDO3_SEL_SHIFT	6
+#define DA9062AA_VLDO3_SEL_MASK		BIT(6)
+#define DA9062AA_VLDO4_SEL_SHIFT	7
+#define DA9062AA_VLDO4_SEL_MASK		BIT(7)
+
+/* DA9062AA_COUNT_S = 0x040 */
+#define DA9062AA_COUNT_SEC_SHIFT	0
+#define DA9062AA_COUNT_SEC_MASK		0x3f
+#define DA9062AA_RTC_READ_SHIFT		7
+#define DA9062AA_RTC_READ_MASK		BIT(7)
+
+/* DA9062AA_COUNT_MI = 0x041 */
+#define DA9062AA_COUNT_MIN_SHIFT	0
+#define DA9062AA_COUNT_MIN_MASK		0x3f
+
+/* DA9062AA_COUNT_H = 0x042 */
+#define DA9062AA_COUNT_HOUR_SHIFT	0
+#define DA9062AA_COUNT_HOUR_MASK	0x1f
+
+/* DA9062AA_COUNT_D = 0x043 */
+#define DA9062AA_COUNT_DAY_SHIFT	0
+#define DA9062AA_COUNT_DAY_MASK		0x1f
+
+/* DA9062AA_COUNT_MO = 0x044 */
+#define DA9062AA_COUNT_MONTH_SHIFT	0
+#define DA9062AA_COUNT_MONTH_MASK	0x0f
+
+/* DA9062AA_COUNT_Y = 0x045 */
+#define DA9062AA_COUNT_YEAR_SHIFT	0
+#define DA9062AA_COUNT_YEAR_MASK	0x3f
+#define DA9062AA_MONITOR_SHIFT		6
+#define DA9062AA_MONITOR_MASK		BIT(6)
+
+/* DA9062AA_ALARM_S = 0x046 */
+#define DA9062AA_ALARM_SEC_SHIFT	0
+#define DA9062AA_ALARM_SEC_MASK		0x3f
+#define DA9062AA_ALARM_STATUS_SHIFT	6
+#define DA9062AA_ALARM_STATUS_MASK	(0x03 << 6)
+
+/* DA9062AA_ALARM_MI = 0x047 */
+#define DA9062AA_ALARM_MIN_SHIFT	0
+#define DA9062AA_ALARM_MIN_MASK		0x3f
+
+/* DA9062AA_ALARM_H = 0x048 */
+#define DA9062AA_ALARM_HOUR_SHIFT	0
+#define DA9062AA_ALARM_HOUR_MASK	0x1f
+
+/* DA9062AA_ALARM_D = 0x049 */
+#define DA9062AA_ALARM_DAY_SHIFT	0
+#define DA9062AA_ALARM_DAY_MASK		0x1f
+
+/* DA9062AA_ALARM_MO = 0x04A */
+#define DA9062AA_ALARM_MONTH_SHIFT	0
+#define DA9062AA_ALARM_MONTH_MASK	0x0f
+#define DA9062AA_TICK_TYPE_SHIFT	4
+#define DA9062AA_TICK_TYPE_MASK		BIT(4)
+#define DA9062AA_TICK_WAKE_SHIFT	5
+#define DA9062AA_TICK_WAKE_MASK		BIT(5)
+
+/* DA9062AA_ALARM_Y = 0x04B */
+#define DA9062AA_ALARM_YEAR_SHIFT	0
+#define DA9062AA_ALARM_YEAR_MASK	0x3f
+#define DA9062AA_ALARM_ON_SHIFT		6
+#define DA9062AA_ALARM_ON_MASK		BIT(6)
+#define DA9062AA_TICK_ON_SHIFT		7
+#define DA9062AA_TICK_ON_MASK		BIT(7)
+
+/* DA9062AA_SECOND_A = 0x04C */
+#define DA9062AA_SECONDS_A_SHIFT	0
+#define DA9062AA_SECONDS_A_MASK		0xff
+
+/* DA9062AA_SECOND_B = 0x04D */
+#define DA9062AA_SECONDS_B_SHIFT	0
+#define DA9062AA_SECONDS_B_MASK		0xff
+
+/* DA9062AA_SECOND_C = 0x04E */
+#define DA9062AA_SECONDS_C_SHIFT	0
+#define DA9062AA_SECONDS_C_MASK		0xff
+
+/* DA9062AA_SECOND_D = 0x04F */
+#define DA9062AA_SECONDS_D_SHIFT	0
+#define DA9062AA_SECONDS_D_MASK		0xff
+
+/* DA9062AA_SEQ = 0x081 */
+#define DA9062AA_SEQ_POINTER_SHIFT	0
+#define DA9062AA_SEQ_POINTER_MASK	0x0f
+#define DA9062AA_NXT_SEQ_START_SHIFT	4
+#define DA9062AA_NXT_SEQ_START_MASK	(0x0f << 4)
+
+/* DA9062AA_SEQ_TIMER = 0x082 */
+#define DA9062AA_SEQ_TIME_SHIFT		0
+#define DA9062AA_SEQ_TIME_MASK		0x0f
+#define DA9062AA_SEQ_DUMMY_SHIFT	4
+#define DA9062AA_SEQ_DUMMY_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_2_1 = 0x083 */
+#define DA9062AA_LDO1_STEP_SHIFT	0
+#define DA9062AA_LDO1_STEP_MASK		0x0f
+#define DA9062AA_LDO2_STEP_SHIFT	4
+#define DA9062AA_LDO2_STEP_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_4_3 = 0x084 */
+#define DA9062AA_LDO3_STEP_SHIFT	0
+#define DA9062AA_LDO3_STEP_MASK		0x0f
+#define DA9062AA_LDO4_STEP_SHIFT	4
+#define DA9062AA_LDO4_STEP_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_12_11 = 0x088 */
+#define DA9062AA_PD_DIS_STEP_SHIFT	4
+#define DA9062AA_PD_DIS_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_14_13 = 0x089 */
+#define DA9062AA_BUCK1_STEP_SHIFT	0
+#define DA9062AA_BUCK1_STEP_MASK	0x0f
+#define DA9062AA_BUCK2_STEP_SHIFT	4
+#define DA9062AA_BUCK2_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_16_15 = 0x08A */
+#define DA9062AA_BUCK4_STEP_SHIFT	0
+#define DA9062AA_BUCK4_STEP_MASK	0x0f
+#define DA9062AA_BUCK3_STEP_SHIFT	4
+#define DA9062AA_BUCK3_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_22_21 = 0x08D */
+#define DA9062AA_GP_RISE1_STEP_SHIFT	0
+#define DA9062AA_GP_RISE1_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL1_STEP_SHIFT	4
+#define DA9062AA_GP_FALL1_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_24_23 = 0x08E */
+#define DA9062AA_GP_RISE2_STEP_SHIFT	0
+#define DA9062AA_GP_RISE2_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL2_STEP_SHIFT	4
+#define DA9062AA_GP_FALL2_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_26_25 = 0x08F */
+#define DA9062AA_GP_RISE3_STEP_SHIFT	0
+#define DA9062AA_GP_RISE3_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL3_STEP_SHIFT	4
+#define DA9062AA_GP_FALL3_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_28_27 = 0x090 */
+#define DA9062AA_GP_RISE4_STEP_SHIFT	0
+#define DA9062AA_GP_RISE4_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL4_STEP_SHIFT	4
+#define DA9062AA_GP_FALL4_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_30_29 = 0x091 */
+#define DA9062AA_GP_RISE5_STEP_SHIFT	0
+#define DA9062AA_GP_RISE5_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL5_STEP_SHIFT	4
+#define DA9062AA_GP_FALL5_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_32_31 = 0x092 */
+#define DA9062AA_WAIT_STEP_SHIFT	0
+#define DA9062AA_WAIT_STEP_MASK		0x0f
+#define DA9062AA_EN32K_STEP_SHIFT	4
+#define DA9062AA_EN32K_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_SEQ_A = 0x095 */
+#define DA9062AA_SYSTEM_END_SHIFT	0
+#define DA9062AA_SYSTEM_END_MASK	0x0f
+#define DA9062AA_POWER_END_SHIFT	4
+#define DA9062AA_POWER_END_MASK		(0x0f << 4)
+
+/* DA9062AA_SEQ_B = 0x096 */
+#define DA9062AA_MAX_COUNT_SHIFT	0
+#define DA9062AA_MAX_COUNT_MASK		0x0f
+#define DA9062AA_PART_DOWN_SHIFT	4
+#define DA9062AA_PART_DOWN_MASK		(0x0f << 4)
+
+/* DA9062AA_WAIT = 0x097 */
+#define DA9062AA_WAIT_TIME_SHIFT	0
+#define DA9062AA_WAIT_TIME_MASK		0x0f
+#define DA9062AA_WAIT_MODE_SHIFT	4
+#define DA9062AA_WAIT_MODE_MASK		BIT(4)
+#define DA9062AA_TIME_OUT_SHIFT		5
+#define DA9062AA_TIME_OUT_MASK		BIT(5)
+#define DA9062AA_WAIT_DIR_SHIFT		6
+#define DA9062AA_WAIT_DIR_MASK		(0x03 << 6)
+
+/* DA9062AA_EN_32K = 0x098 */
+#define DA9062AA_STABILISATION_TIME_SHIFT	0
+#define DA9062AA_STABILISATION_TIME_MASK	0x07
+#define DA9062AA_CRYSTAL_SHIFT			3
+#define DA9062AA_CRYSTAL_MASK			BIT(3)
+#define DA9062AA_DELAY_MODE_SHIFT		4
+#define DA9062AA_DELAY_MODE_MASK		BIT(4)
+#define DA9062AA_OUT_CLOCK_SHIFT		5
+#define DA9062AA_OUT_CLOCK_MASK			BIT(5)
+#define DA9062AA_RTC_CLOCK_SHIFT		6
+#define DA9062AA_RTC_CLOCK_MASK			BIT(6)
+#define DA9062AA_EN_32KOUT_SHIFT		7
+#define DA9062AA_EN_32KOUT_MASK			BIT(7)
+
+/* DA9062AA_RESET = 0x099 */
+#define DA9062AA_RESET_TIMER_SHIFT	0
+#define DA9062AA_RESET_TIMER_MASK	0x3f
+#define DA9062AA_RESET_EVENT_SHIFT	6
+#define DA9062AA_RESET_EVENT_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK_ILIM_A = 0x09A */
+#define DA9062AA_BUCK3_ILIM_SHIFT	0
+#define DA9062AA_BUCK3_ILIM_MASK	0x0f
+
+/* DA9062AA_BUCK_ILIM_B = 0x09B */
+#define DA9062AA_BUCK4_ILIM_SHIFT	0
+#define DA9062AA_BUCK4_ILIM_MASK	0x0f
+
+/* DA9062AA_BUCK_ILIM_C = 0x09C */
+#define DA9062AA_BUCK1_ILIM_SHIFT	0
+#define DA9062AA_BUCK1_ILIM_MASK	0x0f
+#define DA9062AA_BUCK2_ILIM_SHIFT	4
+#define DA9062AA_BUCK2_ILIM_MASK	(0x0f << 4)
+
+/* DA9062AA_BUCK2_CFG = 0x09D */
+#define DA9062AA_BUCK2_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK2_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK2_MODE_SHIFT	6
+#define DA9062AA_BUCK2_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK1_CFG = 0x09E */
+#define DA9062AA_BUCK1_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK1_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK1_MODE_SHIFT	6
+#define DA9062AA_BUCK1_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK4_CFG = 0x09F */
+#define DA9062AA_BUCK4_VTTR_EN_SHIFT	3
+#define DA9062AA_BUCK4_VTTR_EN_MASK	BIT(3)
+#define DA9062AA_BUCK4_VTT_EN_SHIFT	4
+#define DA9062AA_BUCK4_VTT_EN_MASK	BIT(4)
+#define DA9062AA_BUCK4_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK4_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK4_MODE_SHIFT	6
+#define DA9062AA_BUCK4_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK3_CFG = 0x0A0 */
+#define DA9062AA_BUCK3_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK3_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK3_MODE_SHIFT	6
+#define DA9062AA_BUCK3_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_VBUCK2_A = 0x0A3 */
+#define DA9062AA_VBUCK2_A_SHIFT		0
+#define DA9062AA_VBUCK2_A_MASK		0x7f
+#define DA9062AA_BUCK2_SL_A_SHIFT	7
+#define DA9062AA_BUCK2_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK1_A = 0x0A4 */
+#define DA9062AA_VBUCK1_A_SHIFT		0
+#define DA9062AA_VBUCK1_A_MASK		0x7f
+#define DA9062AA_BUCK1_SL_A_SHIFT	7
+#define DA9062AA_BUCK1_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK4_A = 0x0A5 */
+#define DA9062AA_VBUCK4_A_SHIFT		0
+#define DA9062AA_VBUCK4_A_MASK		0x7f
+#define DA9062AA_BUCK4_SL_A_SHIFT	7
+#define DA9062AA_BUCK4_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK3_A = 0x0A7 */
+#define DA9062AA_VBUCK3_A_SHIFT		0
+#define DA9062AA_VBUCK3_A_MASK		0x7f
+#define DA9062AA_BUCK3_SL_A_SHIFT	7
+#define DA9062AA_BUCK3_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VLDO1_A = 0x0A9 */
+#define DA9062AA_VLDO1_A_SHIFT		0
+#define DA9062AA_VLDO1_A_MASK		0x3f
+#define DA9062AA_LDO1_SL_A_SHIFT	7
+#define DA9062AA_LDO1_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO2_A = 0x0AA */
+#define DA9062AA_VLDO2_A_SHIFT		0
+#define DA9062AA_VLDO2_A_MASK		0x3f
+#define DA9062AA_LDO2_SL_A_SHIFT	7
+#define DA9062AA_LDO2_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO3_A = 0x0AB */
+#define DA9062AA_VLDO3_A_SHIFT		0
+#define DA9062AA_VLDO3_A_MASK		0x3f
+#define DA9062AA_LDO3_SL_A_SHIFT	7
+#define DA9062AA_LDO3_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO4_A = 0x0AC */
+#define DA9062AA_VLDO4_A_SHIFT		0
+#define DA9062AA_VLDO4_A_MASK		0x3f
+#define DA9062AA_LDO4_SL_A_SHIFT	7
+#define DA9062AA_LDO4_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VBUCK2_B = 0x0B4 */
+#define DA9062AA_VBUCK2_B_SHIFT		0
+#define DA9062AA_VBUCK2_B_MASK		0x7f
+#define DA9062AA_BUCK2_SL_B_SHIFT	7
+#define DA9062AA_BUCK2_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK1_B = 0x0B5 */
+#define DA9062AA_VBUCK1_B_SHIFT		0
+#define DA9062AA_VBUCK1_B_MASK		0x7f
+#define DA9062AA_BUCK1_SL_B_SHIFT	7
+#define DA9062AA_BUCK1_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK4_B = 0x0B6 */
+#define DA9062AA_VBUCK4_B_SHIFT		0
+#define DA9062AA_VBUCK4_B_MASK		0x7f
+#define DA9062AA_BUCK4_SL_B_SHIFT	7
+#define DA9062AA_BUCK4_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK3_B = 0x0B8 */
+#define DA9062AA_VBUCK3_B_SHIFT		0
+#define DA9062AA_VBUCK3_B_MASK		0x7f
+#define DA9062AA_BUCK3_SL_B_SHIFT	7
+#define DA9062AA_BUCK3_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VLDO1_B = 0x0BA */
+#define DA9062AA_VLDO1_B_SHIFT		0
+#define DA9062AA_VLDO1_B_MASK		0x3f
+#define DA9062AA_LDO1_SL_B_SHIFT	7
+#define DA9062AA_LDO1_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO2_B = 0x0BB */
+#define DA9062AA_VLDO2_B_SHIFT		0
+#define DA9062AA_VLDO2_B_MASK		0x3f
+#define DA9062AA_LDO2_SL_B_SHIFT	7
+#define DA9062AA_LDO2_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO3_B = 0x0BC */
+#define DA9062AA_VLDO3_B_SHIFT		0
+#define DA9062AA_VLDO3_B_MASK		0x3f
+#define DA9062AA_LDO3_SL_B_SHIFT	7
+#define DA9062AA_LDO3_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO4_B = 0x0BD */
+#define DA9062AA_VLDO4_B_SHIFT		0
+#define DA9062AA_VLDO4_B_MASK		0x3f
+#define DA9062AA_LDO4_SL_B_SHIFT	7
+#define DA9062AA_LDO4_SL_B_MASK		BIT(7)
+
+/* DA9062AA_BBAT_CONT = 0x0C5 */
+#define DA9062AA_BCHG_VSET_SHIFT	0
+#define DA9062AA_BCHG_VSET_MASK		0x0f
+#define DA9062AA_BCHG_ISET_SHIFT	4
+#define DA9062AA_BCHG_ISET_MASK		(0x0f << 4)
+
+/* DA9062AA_INTERFACE = 0x105 */
+#define DA9062AA_IF_BASE_ADDR_SHIFT	4
+#define DA9062AA_IF_BASE_ADDR_MASK	(0x0f << 4)
+
+/* DA9062AA_CONFIG_A = 0x106 */
+#define DA9062AA_PM_I_V_SHIFT		0
+#define DA9062AA_PM_I_V_MASK		0x01
+#define DA9062AA_PM_O_TYPE_SHIFT	2
+#define DA9062AA_PM_O_TYPE_MASK		BIT(2)
+#define DA9062AA_IRQ_TYPE_SHIFT		3
+#define DA9062AA_IRQ_TYPE_MASK		BIT(3)
+#define DA9062AA_PM_IF_V_SHIFT		4
+#define DA9062AA_PM_IF_V_MASK		BIT(4)
+#define DA9062AA_PM_IF_FMP_SHIFT	5
+#define DA9062AA_PM_IF_FMP_MASK		BIT(5)
+#define DA9062AA_PM_IF_HSM_SHIFT	6
+#define DA9062AA_PM_IF_HSM_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_B = 0x107 */
+#define DA9062AA_VDD_FAULT_ADJ_SHIFT	0
+#define DA9062AA_VDD_FAULT_ADJ_MASK	0x0f
+#define DA9062AA_VDD_HYST_ADJ_SHIFT	4
+#define DA9062AA_VDD_HYST_ADJ_MASK	(0x07 << 4)
+
+/* DA9062AA_CONFIG_C = 0x108 */
+#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT	2
+#define DA9062AA_BUCK_ACTV_DISCHRG_MASK		BIT(2)
+#define DA9062AA_BUCK1_CLK_INV_SHIFT		3
+#define DA9062AA_BUCK1_CLK_INV_MASK		BIT(3)
+#define DA9062AA_BUCK4_CLK_INV_SHIFT		4
+#define DA9062AA_BUCK4_CLK_INV_MASK		BIT(4)
+#define DA9062AA_BUCK3_CLK_INV_SHIFT		6
+#define DA9062AA_BUCK3_CLK_INV_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_D = 0x109 */
+#define DA9062AA_GPI_V_SHIFT		0
+#define DA9062AA_GPI_V_MASK		0x01
+#define DA9062AA_NIRQ_MODE_SHIFT	1
+#define DA9062AA_NIRQ_MODE_MASK		BIT(1)
+#define DA9062AA_SYSTEM_EN_RD_SHIFT	2
+#define DA9062AA_SYSTEM_EN_RD_MASK	BIT(2)
+#define DA9062AA_FORCE_RESET_SHIFT	5
+#define DA9062AA_FORCE_RESET_MASK	BIT(5)
+
+/* DA9062AA_CONFIG_E = 0x10A */
+#define DA9062AA_BUCK1_AUTO_SHIFT	0
+#define DA9062AA_BUCK1_AUTO_MASK	0x01
+#define DA9062AA_BUCK2_AUTO_SHIFT	1
+#define DA9062AA_BUCK2_AUTO_MASK	BIT(1)
+#define DA9062AA_BUCK4_AUTO_SHIFT	2
+#define DA9062AA_BUCK4_AUTO_MASK	BIT(2)
+#define DA9062AA_BUCK3_AUTO_SHIFT	4
+#define DA9062AA_BUCK3_AUTO_MASK	BIT(4)
+
+/* DA9062AA_CONFIG_G = 0x10C */
+#define DA9062AA_LDO1_AUTO_SHIFT	0
+#define DA9062AA_LDO1_AUTO_MASK		0x01
+#define DA9062AA_LDO2_AUTO_SHIFT	1
+#define DA9062AA_LDO2_AUTO_MASK		BIT(1)
+#define DA9062AA_LDO3_AUTO_SHIFT	2
+#define DA9062AA_LDO3_AUTO_MASK		BIT(2)
+#define DA9062AA_LDO4_AUTO_SHIFT	3
+#define DA9062AA_LDO4_AUTO_MASK		BIT(3)
+
+/* DA9062AA_CONFIG_H = 0x10D */
+#define DA9062AA_BUCK1_2_MERGE_SHIFT	3
+#define DA9062AA_BUCK1_2_MERGE_MASK	BIT(3)
+#define DA9062AA_BUCK2_OD_SHIFT		5
+#define DA9062AA_BUCK2_OD_MASK		BIT(5)
+#define DA9062AA_BUCK1_OD_SHIFT		6
+#define DA9062AA_BUCK1_OD_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_I = 0x10E */
+#define DA9062AA_NONKEY_PIN_SHIFT	0
+#define DA9062AA_NONKEY_PIN_MASK	0x03
+#define DA9062AA_nONKEY_SD_SHIFT	2
+#define DA9062AA_nONKEY_SD_MASK		BIT(2)
+#define DA9062AA_WATCHDOG_SD_SHIFT	3
+#define DA9062AA_WATCHDOG_SD_MASK	BIT(3)
+#define DA9062AA_KEY_SD_MODE_SHIFT	4
+#define DA9062AA_KEY_SD_MODE_MASK	BIT(4)
+#define DA9062AA_HOST_SD_MODE_SHIFT	5
+#define DA9062AA_HOST_SD_MODE_MASK	BIT(5)
+#define DA9062AA_INT_SD_MODE_SHIFT	6
+#define DA9062AA_INT_SD_MODE_MASK	BIT(6)
+#define DA9062AA_LDO_SD_SHIFT		7
+#define DA9062AA_LDO_SD_MASK		BIT(7)
+
+/* DA9062AA_CONFIG_J = 0x10F */
+#define DA9062AA_KEY_DELAY_SHIFT	0
+#define DA9062AA_KEY_DELAY_MASK		0x03
+#define DA9062AA_SHUT_DELAY_SHIFT	2
+#define DA9062AA_SHUT_DELAY_MASK	(0x03 << 2)
+#define DA9062AA_RESET_DURATION_SHIFT	4
+#define DA9062AA_RESET_DURATION_MASK	(0x03 << 4)
+#define DA9062AA_TWOWIRE_TO_SHIFT	6
+#define DA9062AA_TWOWIRE_TO_MASK	BIT(6)
+#define DA9062AA_IF_RESET_SHIFT		7
+#define DA9062AA_IF_RESET_MASK		BIT(7)
+
+/* DA9062AA_CONFIG_K = 0x110 */
+#define DA9062AA_GPIO0_PUPD_SHIFT	0
+#define DA9062AA_GPIO0_PUPD_MASK	0x01
+#define DA9062AA_GPIO1_PUPD_SHIFT	1
+#define DA9062AA_GPIO1_PUPD_MASK	BIT(1)
+#define DA9062AA_GPIO2_PUPD_SHIFT	2
+#define DA9062AA_GPIO2_PUPD_MASK	BIT(2)
+#define DA9062AA_GPIO3_PUPD_SHIFT	3
+#define DA9062AA_GPIO3_PUPD_MASK	BIT(3)
+#define DA9062AA_GPIO4_PUPD_SHIFT	4
+#define DA9062AA_GPIO4_PUPD_MASK	BIT(4)
+
+/* DA9062AA_CONFIG_M = 0x112 */
+#define DA9062AA_NSHUTDOWN_PU_SHIFT	1
+#define DA9062AA_NSHUTDOWN_PU_MASK	BIT(1)
+#define DA9062AA_WDG_MODE_SHIFT		3
+#define DA9062AA_WDG_MODE_MASK		BIT(3)
+#define DA9062AA_OSC_FRQ_SHIFT		4
+#define DA9062AA_OSC_FRQ_MASK		(0x0f << 4)
+
+/* DA9062AA_TRIM_CLDR = 0x120 */
+#define DA9062AA_TRIM_CLDR_SHIFT	0
+#define DA9062AA_TRIM_CLDR_MASK		0xff
+
+/* DA9062AA_GP_ID_0 = 0x121 */
+#define DA9062AA_GP_0_SHIFT		0
+#define DA9062AA_GP_0_MASK		0xff
+
+/* DA9062AA_GP_ID_1 = 0x122 */
+#define DA9062AA_GP_1_SHIFT		0
+#define DA9062AA_GP_1_MASK		0xff
+
+/* DA9062AA_GP_ID_2 = 0x123 */
+#define DA9062AA_GP_2_SHIFT		0
+#define DA9062AA_GP_2_MASK		0xff
+
+/* DA9062AA_GP_ID_3 = 0x124 */
+#define DA9062AA_GP_3_SHIFT		0
+#define DA9062AA_GP_3_MASK		0xff
+
+/* DA9062AA_GP_ID_4 = 0x125 */
+#define DA9062AA_GP_4_SHIFT		0
+#define DA9062AA_GP_4_MASK		0xff
+
+/* DA9062AA_GP_ID_5 = 0x126 */
+#define DA9062AA_GP_5_SHIFT		0
+#define DA9062AA_GP_5_MASK		0xff
+
+/* DA9062AA_GP_ID_6 = 0x127 */
+#define DA9062AA_GP_6_SHIFT		0
+#define DA9062AA_GP_6_MASK		0xff
+
+/* DA9062AA_GP_ID_7 = 0x128 */
+#define DA9062AA_GP_7_SHIFT		0
+#define DA9062AA_GP_7_MASK		0xff
+
+/* DA9062AA_GP_ID_8 = 0x129 */
+#define DA9062AA_GP_8_SHIFT		0
+#define DA9062AA_GP_8_MASK		0xff
+
+/* DA9062AA_GP_ID_9 = 0x12A */
+#define DA9062AA_GP_9_SHIFT		0
+#define DA9062AA_GP_9_MASK		0xff
+
+/* DA9062AA_GP_ID_10 = 0x12B */
+#define DA9062AA_GP_10_SHIFT		0
+#define DA9062AA_GP_10_MASK		0xff
+
+/* DA9062AA_GP_ID_11 = 0x12C */
+#define DA9062AA_GP_11_SHIFT		0
+#define DA9062AA_GP_11_MASK		0xff
+
+/* DA9062AA_GP_ID_12 = 0x12D */
+#define DA9062AA_GP_12_SHIFT		0
+#define DA9062AA_GP_12_MASK		0xff
+
+/* DA9062AA_GP_ID_13 = 0x12E */
+#define DA9062AA_GP_13_SHIFT		0
+#define DA9062AA_GP_13_MASK		0xff
+
+/* DA9062AA_GP_ID_14 = 0x12F */
+#define DA9062AA_GP_14_SHIFT		0
+#define DA9062AA_GP_14_MASK		0xff
+
+/* DA9062AA_GP_ID_15 = 0x130 */
+#define DA9062AA_GP_15_SHIFT		0
+#define DA9062AA_GP_15_MASK		0xff
+
+/* DA9062AA_GP_ID_16 = 0x131 */
+#define DA9062AA_GP_16_SHIFT		0
+#define DA9062AA_GP_16_MASK		0xff
+
+/* DA9062AA_GP_ID_17 = 0x132 */
+#define DA9062AA_GP_17_SHIFT		0
+#define DA9062AA_GP_17_MASK		0xff
+
+/* DA9062AA_GP_ID_18 = 0x133 */
+#define DA9062AA_GP_18_SHIFT		0
+#define DA9062AA_GP_18_MASK		0xff
+
+/* DA9062AA_GP_ID_19 = 0x134 */
+#define DA9062AA_GP_19_SHIFT		0
+#define DA9062AA_GP_19_MASK		0xff
+
+/* DA9062AA_DEVICE_ID = 0x181 */
+#define DA9062AA_DEV_ID_SHIFT		0
+#define DA9062AA_DEV_ID_MASK		0xff
+
+/* DA9062AA_VARIANT_ID = 0x182 */
+#define DA9062AA_VRC_SHIFT		0
+#define DA9062AA_VRC_MASK		0x0f
+#define DA9062AA_MRC_SHIFT		4
+#define DA9062AA_MRC_MASK		(0x0f << 4)
+
+/* DA9062AA_CUSTOMER_ID = 0x183 */
+#define DA9062AA_CUST_ID_SHIFT		0
+#define DA9062AA_CUST_ID_MASK		0xff
+
+/* DA9062AA_CONFIG_ID = 0x184 */
+#define DA9062AA_CONFIG_REV_SHIFT	0
+#define DA9062AA_CONFIG_REV_MASK	0xff
+
+#endif /* __DA9062_H__ */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 79f4d82..621af82 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@
 	DA9063_IRQ_COMP_1V2,
 	DA9063_IRQ_LDO_LIM,
 	DA9063_IRQ_REG_UVOV,
+	DA9063_IRQ_DVC_RDY,
 	DA9063_IRQ_VDD_MON,
 	DA9063_IRQ_WARN,
 	DA9063_IRQ_GPI0,
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 8feac78..2b300b4 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
 #ifndef LPC_ICH_H
 #define LPC_ICH_H
 
-/* Watchdog resources */
-#define ICH_RES_IO_TCO		0
-#define ICH_RES_IO_SMI		1
-#define ICH_RES_MEM_OFF		2
-#define ICH_RES_MEM_GCS_PMC	0
-
 /* GPIO resources */
 #define ICH_RES_GPIO	0
 #define ICH_RES_GPE0	1
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index cf5265b..45b8e8a 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@
 	int irq;
 	struct irq_domain *irq_domain;
 	struct mutex irqlock;
+	u16 wake_mask[2];
 	u16 irq_masks_cur[2];
 	u16 irq_masks_cache[2];
 };
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf6f117..f25a957 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -124,8 +124,10 @@
 #define VM_MAYSHARE	0x00000080
 
 #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
+#define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
+#define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
 
 #define VM_LOCKED	0x00002000
 #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
@@ -245,7 +247,10 @@
 struct vm_operations_struct {
 	void (*open)(struct vm_area_struct * area);
 	void (*close)(struct vm_area_struct * area);
+	int (*mremap)(struct vm_area_struct * area);
 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+	int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
+						pmd_t *, unsigned int flags);
 	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 	/* notification that a previously read-only page is about to become
@@ -304,18 +309,6 @@
 #define page_private(page)		((page)->private)
 #define set_page_private(page, v)	((page)->private = (v))
 
-/* It's valid only if the page is free path or free_list */
-static inline void set_freepage_migratetype(struct page *page, int migratetype)
-{
-	page->index = migratetype;
-}
-
-/* It's valid only if the page is free path or free_list */
-static inline int get_freepage_migratetype(struct page *page)
-{
-	return page->index;
-}
-
 /*
  * FIXME: take this include out, include page-flags.h in
  * files which need it (119 of them)
@@ -356,20 +349,15 @@
 	return atomic_inc_not_zero(&page->_count);
 }
 
-/*
- * Try to drop a ref unless the page has a refcount of one, return false if
- * that is the case.
- * This is to make sure that the refcount won't become zero after this drop.
- * This can be called when MMU is off so it must not access
- * any of the virtual mappings.
- */
-static inline int put_page_unless_one(struct page *page)
-{
-	return atomic_add_unless(&page->_count, -1, 1);
-}
-
 extern int page_is_ram(unsigned long pfn);
-extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
+
+enum {
+	REGION_INTERSECTS,
+	REGION_DISJOINT,
+	REGION_MIXED,
+};
+
+int region_intersects(resource_size_t offset, size_t size, const char *type);
 
 /* Support for virtually mapped pages */
 struct page *vmalloc_to_page(const void *addr);
@@ -1257,6 +1245,11 @@
 	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
 }
 
+static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+{
+	return !vma->vm_ops;
+}
+
 static inline int stack_guard_page_start(struct vm_area_struct *vma,
 					     unsigned long addr)
 {
@@ -1833,7 +1826,7 @@
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *);
+	struct mempolicy *, struct vm_userfaultfd_ctx);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
 	struct vm_area_struct *, unsigned long addr, int new_below);
@@ -2183,6 +2176,7 @@
 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
 extern int unpoison_memory(unsigned long pfn);
 extern int get_hwpoison_page(struct page *page);
+extern void put_hwpoison_page(struct page *page);
 extern int sysctl_memory_failure_early_kill;
 extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p, int access);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 1554957..3d6baa7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -235,7 +235,7 @@
 	bool pfmemalloc;
 };
 
-typedef unsigned long __nocast vm_flags_t;
+typedef unsigned long vm_flags_t;
 
 /*
  * A region containing a mapping of a non-memory backed file under NOMMU
@@ -256,6 +256,16 @@
 						* this region */
 };
 
+#ifdef CONFIG_USERFAULTFD
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
+struct vm_userfaultfd_ctx {
+	struct userfaultfd_ctx *ctx;
+};
+#else /* CONFIG_USERFAULTFD */
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
+struct vm_userfaultfd_ctx {};
+#endif /* CONFIG_USERFAULTFD */
+
 /*
  * This struct defines a memory VMM memory area. There is one of these
  * per VM-area/task.  A VM area is any part of the process virtual memory
@@ -322,6 +332,7 @@
 #ifdef CONFIG_NUMA
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
 #endif
+	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 };
 
 struct core_thread {
@@ -543,6 +554,7 @@
 	TLB_REMOTE_SHOOTDOWN,
 	TLB_LOCAL_SHOOTDOWN,
 	TLB_LOCAL_MM_SHOOTDOWN,
+	TLB_REMOTE_SEND_IPI,
 	NR_TLB_FLUSH_REASONS,
 };
 
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d3776d..fdd0779 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -279,10 +279,13 @@
 #define MMC_QUIRK_LONG_READ_TIME (1<<9)		/* Data read time > CSD says */
 #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10)	/* Skip secure for erase/trim */
 #define MMC_QUIRK_BROKEN_IRQ_POLLING	(1<<11)	/* Polling SDIO_CCCR_INTx could create a fake interrupt */
+#define MMC_QUIRK_TRIM_BROKEN	(1<<12)		/* Skip trim */
+
 
 	unsigned int		erase_size;	/* erase size in sectors */
  	unsigned int		erase_shift;	/* if erase unit is power 2 */
  	unsigned int		pref_erase;	/* in sectors */
+	unsigned int		eg_boundary;	/* don't cross erase-group boundaries */
  	u8			erased_byte;	/* value of erased bytes */
 
 	u32			raw_cid[4];	/* raw card CID */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 5be9767..134c574 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -98,6 +98,7 @@
  * @irq_flags: The flags to be passed to request_irq.
  * @irq: The irq value to be passed to request_irq.
  * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @dto_timer: Timer for broken data transfer over scheme.
  *
  * Locking
  * =======
@@ -153,11 +154,7 @@
 	dma_addr_t		sg_dma;
 	void			*sg_cpu;
 	const struct dw_mci_dma_ops	*dma_ops;
-#ifdef CONFIG_MMC_DW_IDMAC
 	unsigned int		ring_size;
-#else
-	struct dw_mci_dma_data	*dma_data;
-#endif
 	u32			cmd_status;
 	u32			data_status;
 	u32			stop_cmdr;
@@ -204,6 +201,7 @@
 	int			sdio_id0;
 
 	struct timer_list       cmd11_timer;
+	struct timer_list       dto_timer;
 };
 
 /* DMA ops for Internal/External DMAC interface */
@@ -226,6 +224,8 @@
 #define DW_MCI_QUIRK_HIGHSPEED			BIT(2)
 /* Unreliable card detection */
 #define DW_MCI_QUIRK_BROKEN_CARD_DETECTION	BIT(3)
+/* Timer for broken data transfer over scheme */
+#define DW_MCI_QUIRK_BROKEN_DTO			BIT(4)
 
 struct dma_pdata;
 
@@ -259,7 +259,6 @@
 
 	struct dw_mci_dma_ops *dma_ops;
 	struct dma_pdata *data;
-	struct block_settings *blk_settings;
 };
 
 #endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1369e54..83b81fd 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -412,7 +412,8 @@
 {
 	host->ops->enable_sdio_irq(host, 0);
 	host->sdio_irq_pending = true;
-	wake_up_process(host->sdio_irq_thread);
+	if (host->sdio_irq_thread)
+		wake_up_process(host->sdio_irq_thread);
 }
 
 void sdio_run_irqs(struct mmc_host *host);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 754c259..d943477 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -319,7 +319,11 @@
 	ZONE_HIGHMEM,
 #endif
 	ZONE_MOVABLE,
+#ifdef CONFIG_ZONE_DEVICE
+	ZONE_DEVICE,
+#endif
 	__MAX_NR_ZONES
+
 };
 
 #ifndef __GENERATING_BOUNDS_H
@@ -690,14 +694,6 @@
 #endif
 };
 
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-struct node_active_region {
-	unsigned long start_pfn;
-	unsigned long end_pfn;
-	int nid;
-};
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
 #ifndef CONFIG_DISCONTIGMEM
 /* The array of struct pages - for discontigmem use pgdat->lmem_map */
 extern struct page *mem_map;
@@ -794,6 +790,25 @@
 	return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
 }
 
+static inline int zone_id(const struct zone *zone)
+{
+	struct pglist_data *pgdat = zone->zone_pgdat;
+
+	return zone - pgdat->node_zones;
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_dev_zone(const struct zone *zone)
+{
+	return zone_id(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool is_dev_zone(const struct zone *zone)
+{
+	return false;
+}
+#endif
+
 #include <linux/memory_hotplug.h>
 
 extern struct mutex zonelists_mutex;
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 29975c7..366cf77 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -27,9 +27,9 @@
 #include <linux/string.h>
 #include <linux/bug.h>
 #include <linux/kernel.h>
+#include <linux/io.h>
 
 #include <asm/unaligned.h>
-#include <asm/io.h>
 #include <asm/barrier.h>
 
 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b8e72aa..00121f2 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -547,6 +547,24 @@
 	NOTIFY_DEVICEID4_DELETE = 1 << 2,
 };
 
+enum pnfs_block_volume_type {
+	PNFS_BLOCK_VOLUME_SIMPLE	= 0,
+	PNFS_BLOCK_VOLUME_SLICE		= 1,
+	PNFS_BLOCK_VOLUME_CONCAT	= 2,
+	PNFS_BLOCK_VOLUME_STRIPE	= 3,
+};
+
+enum pnfs_block_extent_state {
+	PNFS_BLOCK_READWRITE_DATA	= 0,
+	PNFS_BLOCK_READ_DATA		= 1,
+	PNFS_BLOCK_INVALID_DATA		= 2,
+	PNFS_BLOCK_NONE_DATA		= 3,
+};
+
+/* on the wire size of a block layout extent */
+#define PNFS_BLOCK_EXTENT_SIZE \
+	(7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
+
 #define NFL4_UFLG_MASK			0x0000003F
 #define NFL4_UFLG_DENSE			0x00000001
 #define NFL4_UFLG_COMMIT_THRU_MDS	0x00000002
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 874b772..c0e9614 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -353,7 +353,6 @@
 extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
 extern int nfs_permission(struct inode *, int);
 extern int nfs_open(struct inode *, struct file *);
-extern int nfs_release(struct inode *, struct file *);
 extern int nfs_attribute_timeout(struct inode *inode);
 extern int nfs_attribute_cache_expired(struct inode *inode);
 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
@@ -371,6 +370,7 @@
 extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
 extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
 extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
+extern void nfs_file_clear_open_context(struct file *flip);
 extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
 extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
 extern u64 nfs_compat_user_ino64(u64 fileid);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20bc8e5..570a7df 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -173,6 +173,11 @@
 						   set of attributes supported
 						   on this filesystem excluding
 						   the label support bit. */
+	u32			exclcreat_bitmask[3];
+						/* V4 bitmask representing the
+						   set of attributes supported
+						   on this filesystem for the
+						   exclusive create. */
 	u32			cache_consistency_bitmask[3];
 						/* V4 bitmask representing the subset
 						   of change attribute, size, ctime
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7bbe505..52faf7e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,7 +379,7 @@
 	struct stateowner_id	id;
 	union {
 		struct {
-			struct iattr *  attrs;    /* UNCHECKED, GUARDED */
+			struct iattr *  attrs;    /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */
 			nfs4_verifier   verifier; /* EXCLUSIVE */
 		};
 		nfs4_stateid	delegation;		/* CLAIM_DELEGATE_CUR */
@@ -389,7 +389,7 @@
 	const struct nfs_server *server;	 /* Needed for ID mapping */
 	const u32 *		bitmask;
 	const u32 *		open_bitmap;
-	__u32			claim;
+	enum open_claim_type4	claim;
 	enum createmode4	createmode;
 	const struct nfs4_label *label;
 };
@@ -406,8 +406,8 @@
 	const struct nfs_server *server;
 	fmode_t			delegation_type;
 	nfs4_stateid		delegation;
+	unsigned long		pagemod_limit;
 	__u32			do_recall;
-	__u64			maxsize;
 	__u32			attrset[NFS4_BITMAP_SIZE];
 	struct nfs4_string	*owner;
 	struct nfs4_string	*group_owner;
@@ -1057,11 +1057,13 @@
 struct nfs4_server_caps_arg {
 	struct nfs4_sequence_args	seq_args;
 	struct nfs_fh		       *fhandle;
+	const u32 *			bitmask;
 };
 
 struct nfs4_server_caps_res {
 	struct nfs4_sequence_res	seq_res;
 	u32				attr_bitmask[3];
+	u32				exclcreat_bitmask[3];
 	u32				acl_bitmask;
 	u32				has_links;
 	u32				has_symlinks;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f94da0e..78488e0 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -27,9 +27,7 @@
 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void hardlockup_detector_disable(void);
 #else
-static inline void hardlockup_detector_disable(void)
-{
-}
+static inline void hardlockup_detector_disable(void) {}
 #endif
 
 /*
@@ -49,6 +47,12 @@
 	arch_trigger_all_cpu_backtrace(false);
 	return true;
 }
+
+/* generic implementation */
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+				   void (*raise)(cpumask_t *mask));
+bool nmi_cpu_backtrace(struct pt_regs *regs);
+
 #else
 static inline bool trigger_all_cpu_backtrace(void)
 {
@@ -80,6 +84,17 @@
 				void __user *, size_t *, loff_t *);
 extern int proc_watchdog_cpumask(struct ctl_table *, int,
 				 void __user *, size_t *, loff_t *);
+extern int lockup_detector_suspend(void);
+extern void lockup_detector_resume(void);
+#else
+static inline int lockup_detector_suspend(void)
+{
+	return 0;
+}
+
+static inline void lockup_detector_resume(void)
+{
+}
 #endif
 
 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 69dbe31..f319182 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -54,7 +54,7 @@
 			      struct of_mm_gpio_chip *mm_gc);
 extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
 
-extern void of_gpiochip_add(struct gpio_chip *gc);
+extern int of_gpiochip_add(struct gpio_chip *gc);
 extern void of_gpiochip_remove(struct gpio_chip *gc);
 extern int of_gpio_simple_xlate(struct gpio_chip *gc,
 				const struct of_phandle_args *gpiospec,
@@ -76,7 +76,7 @@
 	return -ENOSYS;
 }
 
-static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
 static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
 
 #endif /* CONFIG_OF_GPIO */
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index c2bbf67..d2fa9ca 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -41,7 +41,7 @@
 	OID_signed_data,		/* 1.2.840.113549.1.7.2 */
 	/* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */
 	OID_email_address,		/* 1.2.840.113549.1.9.1 */
-	OID_content_type,		/* 1.2.840.113549.1.9.3 */
+	OID_contentType,		/* 1.2.840.113549.1.9.3 */
 	OID_messageDigest,		/* 1.2.840.113549.1.9.4 */
 	OID_signingTime,		/* 1.2.840.113549.1.9.5 */
 	OID_smimeCapabilites,		/* 1.2.840.113549.1.9.15 */
@@ -54,6 +54,8 @@
 
 	/* Microsoft Authenticode & Software Publishing */
 	OID_msIndirectData,		/* 1.3.6.1.4.1.311.2.1.4 */
+	OID_msStatementType,		/* 1.3.6.1.4.1.311.2.1.11 */
+	OID_msSpOpusInfo,		/* 1.3.6.1.4.1.311.2.1.12 */
 	OID_msPeImageDataObjId,		/* 1.3.6.1.4.1.311.2.1.15 */
 	OID_msIndividualSPKeyPurpose,	/* 1.3.6.1.4.1.311.2.1.21 */
 	OID_msOutlookExpress,		/* 1.3.6.1.4.1.311.16.4 */
@@ -61,6 +63,9 @@
 	OID_certAuthInfoAccess,		/* 1.3.6.1.5.5.7.1.1 */
 	OID_sha1,			/* 1.3.14.3.2.26 */
 	OID_sha256,			/* 2.16.840.1.101.3.4.2.1 */
+	OID_sha384,			/* 2.16.840.1.101.3.4.2.2 */
+	OID_sha512,			/* 2.16.840.1.101.3.4.2.3 */
+	OID_sha224,			/* 2.16.840.1.101.3.4.2.4 */
 
 	/* Distinguished Name attribute IDs [RFC 2256] */
 	OID_commonName,			/* 2.5.4.3 */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7deecb7..03e6257 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -13,6 +13,27 @@
 struct task_struct;
 
 /*
+ * Details of the page allocation that triggered the oom killer that are used to
+ * determine what should be killed.
+ */
+struct oom_control {
+	/* Used to determine cpuset */
+	struct zonelist *zonelist;
+
+	/* Used to determine mempolicy */
+	nodemask_t *nodemask;
+
+	/* Used to determine cpuset and node locality requirement */
+	const gfp_t gfp_mask;
+
+	/*
+	 * order == -1 means the oom kill is required by sysrq, otherwise only
+	 * for display purposes.
+	 */
+	const int order;
+};
+
+/*
  * Types of limitations to the nodes from which allocations may occur
  */
 enum oom_constraint {
@@ -57,21 +78,18 @@
 
 extern int oom_kills_count(void);
 extern void note_oom_kill(void);
-extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
 			     unsigned int points, unsigned long totalpages,
-			     struct mem_cgroup *memcg, nodemask_t *nodemask,
-			     const char *message);
+			     struct mem_cgroup *memcg, const char *message);
 
-extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
-			       int order, const nodemask_t *nodemask,
+extern void check_panic_on_oom(struct oom_control *oc,
+			       enum oom_constraint constraint,
 			       struct mem_cgroup *memcg);
 
-extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
-		unsigned long totalpages, const nodemask_t *nodemask,
-		bool force_kill);
+extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
+		struct task_struct *task, unsigned long totalpages);
 
-extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
-		int order, nodemask_t *mask, bool force_kill);
+extern bool out_of_memory(struct oom_control *oc);
 
 extern void exit_oom_victim(void);
 
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 2dc1e16..047d647 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -65,11 +65,6 @@
 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 			bool skip_hwpoisoned_pages);
 
-/*
- * Internal functions. Changes pageblock's migrate type.
- */
-int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
-void unset_migratetype_isolate(struct page *page, unsigned migratetype);
 struct page *alloc_migrate_target(struct page *page, unsigned long private,
 				int **resultp);
 
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1a64733..e90eb22 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1227,6 +1227,8 @@
 		dma_pool_create(name, &pdev->dev, size, align, allocation)
 #define	pci_pool_destroy(pool) dma_pool_destroy(pool)
 #define	pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
+#define	pci_pool_zalloc(pool, flags, handle) \
+		dma_pool_zalloc(pool, flags, handle)
 #define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
 
 struct msix_entry {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fcff8f8..d9ba49c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2332,6 +2332,15 @@
 
 #define PCI_VENDOR_ID_CAVIUM		0x177d
 
+#define PCI_VENDOR_ID_TECHWELL		0x1797
+#define PCI_DEVICE_ID_TECHWELL_6800	0x6800
+#define PCI_DEVICE_ID_TECHWELL_6801	0x6801
+#define PCI_DEVICE_ID_TECHWELL_6804	0x6804
+#define PCI_DEVICE_ID_TECHWELL_6816_1	0x6810
+#define PCI_DEVICE_ID_TECHWELL_6816_2	0x6811
+#define PCI_DEVICE_ID_TECHWELL_6816_3	0x6812
+#define PCI_DEVICE_ID_TECHWELL_6816_4	0x6813
+
 #define PCI_VENDOR_ID_BELKIN		0x1799
 #define PCI_DEVICE_ID_BELKIN_F5D7010V7	0x701f
 
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 3e88c9a..834c4e5 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -16,6 +16,7 @@
 };
 
 extern void percpu_down_read(struct percpu_rw_semaphore *);
+extern int  percpu_down_read_trylock(struct percpu_rw_semaphore *);
 extern void percpu_up_read(struct percpu_rw_semaphore *);
 
 extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -31,4 +32,23 @@
 	__percpu_init_rwsem(brw, #brw, &rwsem_key);		\
 })
 
+
+#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
+
+static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
+					bool read, unsigned long ip)
+{
+	lock_release(&sem->rw_sem.dep_map, 1, ip);
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+	if (!read)
+		sem->rw_sem.owner = NULL;
+#endif
+}
+
+static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
+					bool read, unsigned long ip)
+{
+	lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
+}
+
 #endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 0000000..bfa673b
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
+/*
+ *  linux/arch/arm/include/asm/pmu.h
+ *
+ *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+#include <asm/cputype.h>
+
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the
+ *	interrupt and passed the address of the low level handler,
+ *	and can be used to implement any platform specific handling
+ *	before or after calling it.
+ */
+struct arm_pmu_platdata {
+	irqreturn_t (*handle_irq)(int irq, void *dev,
+				  irq_handler_t pmu_handler);
+};
+
+#ifdef CONFIG_ARM_PMU
+
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS		32
+
+#define HW_OP_UNSUPPORTED		0xFFFF
+#define C(_x)				PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED		0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED					\
+	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
+[0 ... C(MAX) - 1] = {							\
+	[0 ... C(OP_MAX) - 1] = {					\
+		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
+	},								\
+}
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+	/*
+	 * The events that are active on the PMU for the given index.
+	 */
+	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
+
+	/*
+	 * A 1 bit for an index indicates that the counter is being used for
+	 * an event. A 0 means that the counter can be used.
+	 */
+	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
+
+	/*
+	 * Hardware lock to serialize accesses to PMU registers. Needed for the
+	 * read/modify/write sequences.
+	 */
+	raw_spinlock_t		pmu_lock;
+
+	/*
+	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+	 * already have to allocate this struct per cpu.
+	 */
+	struct arm_pmu		*percpu_pmu;
+};
+
+struct arm_pmu {
+	struct pmu	pmu;
+	cpumask_t	active_irqs;
+	cpumask_t	supported_cpus;
+	int		*irq_affinity;
+	char		*name;
+	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
+	void		(*enable)(struct perf_event *event);
+	void		(*disable)(struct perf_event *event);
+	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
+					 struct perf_event *event);
+	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
+					 struct perf_event *event);
+	int		(*set_event_filter)(struct hw_perf_event *evt,
+					    struct perf_event_attr *attr);
+	u32		(*read_counter)(struct perf_event *event);
+	void		(*write_counter)(struct perf_event *event, u32 val);
+	void		(*start)(struct arm_pmu *);
+	void		(*stop)(struct arm_pmu *);
+	void		(*reset)(void *);
+	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
+	void		(*free_irq)(struct arm_pmu *);
+	int		(*map_event)(struct perf_event *event);
+	int		num_events;
+	atomic_t	active_events;
+	struct mutex	reserve_mutex;
+	u64		max_period;
+	struct platform_device	*plat_device;
+	struct pmu_hw_events	__percpu *hw_events;
+	struct notifier_block	hotplug_nb;
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
+
+int armpmu_event_set_period(struct perf_event *event);
+
+int armpmu_map_event(struct perf_event *event,
+		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+						[PERF_COUNT_HW_CACHE_OP_MAX]
+						[PERF_COUNT_HW_CACHE_RESULT_MAX],
+		     u32 raw_event_mask);
+
+struct pmu_probe_info {
+	unsigned int cpuid;
+	unsigned int mask;
+	int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn)	\
+{					\
+	.cpuid = (_cpuid),		\
+	.mask = (_mask),		\
+	.init = (_fn),			\
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+			 const struct of_device_id *of_table,
+			 const struct pmu_probe_info *probe_table);
+
+#endif /* CONFIG_ARM_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/platform_data/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
new file mode 100644
index 0000000..695035a
--- /dev/null
+++ b/include/linux/platform_data/atmel_mxt_ts.h
@@ -0,0 +1,31 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+
+#include <linux/types.h>
+
+enum mxt_suspend_mode {
+	MXT_SUSPEND_DEEP_SLEEP	= 0,
+	MXT_SUSPEND_T9_CTRL	= 1,
+};
+
+/* The platform data for the Atmel maXTouch touchscreen driver */
+struct mxt_platform_data {
+	unsigned long irqflags;
+	u8 t19_num_keys;
+	const unsigned int *t19_keymap;
+	enum mxt_suspend_mode suspend_mode;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519..0000000
--- a/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __GPIO_EM_H__
-#define __GPIO_EM_H__
-
-struct gpio_em_config {
-	unsigned int gpio_base;
-	unsigned int irq_base;
-	unsigned int number_of_pins;
-	const char *pctl_name;
-};
-
-#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
new file mode 100644
index 0000000..c68712a
--- /dev/null
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -0,0 +1,44 @@
+/*
+ * I2C multiplexer using a single register
+ *
+ * Copyright 2015 Freescale Semiconductor
+ * York Sun <yorksun@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+#define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+
+/**
+ * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of value for each channel
+ * @n_values: Number of multiplexer channels
+ * @little_endian: Indicating if the register is in little endian
+ * @write_only: Reading the register is not allowed by hardware
+ * @classes: Optional I2C auto-detection classes
+ * @idle: Value to write to mux when idle
+ * @idle_in_use: indicate if idle value is in use
+ * @reg: Virtual address of the register to switch channel
+ * @reg_size: register size in bytes
+ */
+struct i2c_mux_reg_platform_data {
+	int parent;
+	int base_nr;
+	const unsigned int *values;
+	int n_values;
+	bool little_endian;
+	bool write_only;
+	const unsigned int *classes;
+	u32 idle;
+	bool idle_in_use;
+	void __iomem *reg;
+	resource_size_t reg_size;
+};
+
+#endif	/* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 0000000..f16542c
--- /dev/null
+++ b/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Platform data for the Intel TCO Watchdog
+ */
+
+#ifndef _ITCO_WDT_H_
+#define _ITCO_WDT_H_
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO		0
+#define ICH_RES_IO_SMI		1
+#define ICH_RES_MEM_OFF		2
+#define ICH_RES_MEM_GCS_PMC	0
+
+struct itco_wdt_platform_data {
+	char name[32];
+	unsigned int version;
+};
+
+#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 9c7fd1e..1b2ba24 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@
 		Only valid when mode is PWM_BASED.
  * @size_program : total size of lp855x_rom_data
  * @rom_data : list of new eeprom/eprom registers
- * @supply : regulator that supplies 3V input
  */
 struct lp855x_platform_data {
 	const char *name;
@@ -145,7 +144,6 @@
 	unsigned int period_ns;
 	int size_program;
 	struct lp855x_rom_data *rom_data;
-	struct regulator *supply;
 };
 
 #endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index e1571ef..95ccab3 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -45,5 +45,6 @@
 	int max_bus_width;
 	bool support_vsel;
 	unsigned int delay_line;
+	unsigned int tuning_step;       /* The delay cell steps in tuning procedure */
 };
 #endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
new file mode 100644
index 0000000..646af6f
--- /dev/null
+++ b/include/linux/platform_data/pixcir_i2c_ts.h
@@ -0,0 +1,63 @@
+#ifndef	_PIXCIR_I2C_TS_H
+#define	_PIXCIR_I2C_TS_H
+
+/*
+ * Register map
+ */
+#define PIXCIR_REG_POWER_MODE	51
+#define PIXCIR_REG_INT_MODE	52
+
+/*
+ * Power modes:
+ * active: max scan speed
+ * idle: lower scan speed with automatic transition to active on touch
+ * halt: datasheet says sleep but this is more like halt as the chip
+ *       clocks are cut and it can only be brought out of this mode
+ *	 using the RESET pin.
+ */
+enum pixcir_power_mode {
+	PIXCIR_POWER_ACTIVE,
+	PIXCIR_POWER_IDLE,
+	PIXCIR_POWER_HALT,
+};
+
+#define PIXCIR_POWER_MODE_MASK	0x03
+#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * diff coordinates: interrupt is asserted when coordinates change
+ * level on touch: interrupt level asserted during touch
+ * pulse on touch: interrupt pulse asserted druing touch
+ *
+ */
+enum pixcir_int_mode {
+	PIXCIR_INT_PERIODICAL,
+	PIXCIR_INT_DIFF_COORD,
+	PIXCIR_INT_LEVEL_TOUCH,
+	PIXCIR_INT_PULSE_TOUCH,
+};
+
+#define PIXCIR_INT_MODE_MASK	0x03
+#define PIXCIR_INT_ENABLE	(1UL << 3)
+#define PIXCIR_INT_POL_HIGH	(1UL << 2)
+
+/**
+ * struct pixcir_irc_chip_data - chip related data
+ * @max_fingers:	Max number of fingers reported simultaneously by h/w
+ * @has_hw_ids:		Hardware supports finger tracking IDs
+ *
+ */
+struct pixcir_i2c_chip_data {
+	u8 max_fingers;
+	bool has_hw_ids;
+};
+
+struct pixcir_ts_platform_data {
+	int x_max;
+	int y_max;
+	struct pixcir_i2c_chip_data chip;
+};
+
+#endif
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 0472ab2..7bdece8 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
 #define _LINUX_INPUT_ZFORCE_TS_H
 
 struct zforce_ts_platdata {
-	int gpio_int;
-	int gpio_rst;
-
 	unsigned int x_max;
 	unsigned int y_max;
 };
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index d211404..85f810b3 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -14,24 +14,26 @@
 #define __PMEM_H__
 
 #include <linux/io.h>
+#include <linux/uio.h>
 
 #ifdef CONFIG_ARCH_HAS_PMEM_API
-#include <asm/cacheflush.h>
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+#include <asm/pmem.h>
 #else
-static inline void arch_wmb_pmem(void)
-{
-	BUG();
-}
-
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+/*
+ * These are simply here to enable compilation, all call sites gate
+ * calling these symbols with arch_has_pmem_api() and redirect to the
+ * implementation in asm/pmem.h.
+ */
 static inline bool __arch_has_wmb_pmem(void)
 {
 	return false;
 }
 
-static inline void __pmem *arch_memremap_pmem(resource_size_t offset,
-		unsigned long size)
+static inline void arch_wmb_pmem(void)
 {
-	return NULL;
+	BUG();
 }
 
 static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
@@ -39,22 +41,38 @@
 {
 	BUG();
 }
+
+static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+		struct iov_iter *i)
+{
+	BUG();
+	return 0;
+}
+
+static inline void arch_clear_pmem(void __pmem *addr, size_t size)
+{
+	BUG();
+}
 #endif
 
 /*
  * Architectures that define ARCH_HAS_PMEM_API must provide
- * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(),
- * arch_wmb_pmem(), and __arch_has_wmb_pmem().
+ * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
+ * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
  */
-
 static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
 {
 	memcpy(dst, (void __force const *) src, size);
 }
 
-static inline void memunmap_pmem(void __pmem *addr)
+static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
 {
-	iounmap((void __force __iomem *) addr);
+	devm_memunmap(dev, (void __force *) addr);
+}
+
+static inline bool arch_has_pmem_api(void)
+{
+	return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
 }
 
 /**
@@ -68,14 +86,7 @@
  */
 static inline bool arch_has_wmb_pmem(void)
 {
-	if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
-		return __arch_has_wmb_pmem();
-	return false;
-}
-
-static inline bool arch_has_pmem_api(void)
-{
-	return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
+	return arch_has_pmem_api() && __arch_has_wmb_pmem();
 }
 
 /*
@@ -85,16 +96,24 @@
  * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
  * making data durable relative to i/o completion.
  */
-static void default_memcpy_to_pmem(void __pmem *dst, const void *src,
+static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
 		size_t size)
 {
 	memcpy((void __force *) dst, src, size);
 }
 
-static void __pmem *default_memremap_pmem(resource_size_t offset,
-		unsigned long size)
+static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
+		size_t bytes, struct iov_iter *i)
 {
-	return (void __pmem __force *)ioremap_wt(offset, size);
+	return copy_from_iter_nocache((void __force *)addr, bytes, i);
+}
+
+static inline void default_clear_pmem(void __pmem *addr, size_t size)
+{
+	if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
+		clear_page((void __force *)addr);
+	else
+		memset((void __force *)addr, 0, size);
 }
 
 /**
@@ -109,12 +128,11 @@
  * wmb_pmem() arrange for the data to be written through the
  * cache to persistent media.
  */
-static inline void __pmem *memremap_pmem(resource_size_t offset,
-		unsigned long size)
+static inline void __pmem *memremap_pmem(struct device *dev,
+		resource_size_t offset, unsigned long size)
 {
-	if (arch_has_pmem_api())
-		return arch_memremap_pmem(offset, size);
-	return default_memremap_pmem(offset, size);
+	return (void __pmem *) devm_memremap(dev, offset, size,
+			ARCH_MEMREMAP_PMEM);
 }
 
 /**
@@ -146,7 +164,42 @@
  */
 static inline void wmb_pmem(void)
 {
-	if (arch_has_pmem_api())
+	if (arch_has_wmb_pmem())
 		arch_wmb_pmem();
+	else
+		wmb();
+}
+
+/**
+ * copy_from_iter_pmem - copy data from an iterator to PMEM
+ * @addr:	PMEM destination address
+ * @bytes:	number of bytes to copy
+ * @i:		iterator with source data
+ *
+ * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+		struct iov_iter *i)
+{
+	if (arch_has_pmem_api())
+		return arch_copy_from_iter_pmem(addr, bytes, i);
+	return default_copy_from_iter_pmem(addr, bytes, i);
+}
+
+/**
+ * clear_pmem - zero a PMEM memory range
+ * @addr:	virtual start address
+ * @size:	number of bytes to zero
+ *
+ * Write zeros into the memory range starting at 'addr' for 'size' bytes.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline void clear_pmem(void __pmem *addr, size_t size)
+{
+	if (arch_has_pmem_api())
+		arch_clear_pmem(addr, size);
+	else
+		default_clear_pmem(addr, size);
 }
 #endif /* __PMEM_H__ */
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 0000000..a682fcc
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef __LINUX_PSCI_H
+#define __LINUX_PSCI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define PSCI_POWER_STATE_TYPE_STANDBY		0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
+
+bool psci_tos_resident_on(int cpu);
+
+struct psci_operations {
+	int (*cpu_suspend)(u32 state, unsigned long entry_point);
+	int (*cpu_off)(u32 state);
+	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+	int (*migrate)(unsigned long cpuid);
+	int (*affinity_info)(unsigned long target_affinity,
+			unsigned long lowest_affinity_level);
+	int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+
+#if defined(CONFIG_ARM_PSCI_FW)
+int __init psci_dt_init(void);
+#else
+static inline int psci_dt_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
+
+#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 987a73a..061265f 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -34,6 +34,7 @@
 #define PT_TRACE_SECCOMP	PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
 
 #define PT_EXITKILL		(PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
+#define PT_SUSPEND_SECCOMP	(PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
 
 /* single stepping state bits (used on ARM and PA-RISC) */
 #define PT_SINGLESTEP_BIT	31
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca660..7a57c28 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@
 void inode_sub_rsv_space(struct inode *inode, qsize_t number);
 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
 
-void dquot_initialize(struct inode *inode);
+int dquot_initialize(struct inode *inode);
 void dquot_drop(struct inode *inode);
 struct dquot *dqget(struct super_block *sb, struct kqid qid);
 static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@
 	return 0;
 }
 
-static inline void dquot_initialize(struct inode *inode)
+static inline int dquot_initialize(struct inode *inode)
 {
+	return 0;
 }
 
 static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea..8fc0bfd 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -17,6 +17,7 @@
 #include <linux/rbtree.h>
 #include <linux/err.h>
 #include <linux/bug.h>
+#include <linux/lockdep.h>
 
 struct module;
 struct device;
@@ -50,6 +51,20 @@
 	unsigned int def;
 };
 
+/**
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ * @delay_us: Delay to be applied after the register write in microseconds
+ */
+struct reg_sequence {
+	unsigned int reg;
+	unsigned int def;
+	unsigned int delay_us;
+};
+
 #ifdef CONFIG_REGMAP
 
 enum regmap_endian {
@@ -296,8 +311,12 @@
  *                if not implemented  on a given device.
  * @async_write: Write operation which completes asynchronously, optional and
  *               must serialise with respect to non-async I/O.
+ * @reg_write: Write a single register value to the given register address. This
+ *             write operation has to complete when returning from the function.
  * @read: Read operation.  Data is returned in the buffer used to transmit
  *         data.
+ * @reg_read: Read a single register value from a given register address.
+ * @free_context: Free context.
  * @async_alloc: Allocate a regmap_async() structure.
  * @read_flag_mask: Mask to be set in the top byte of the register when doing
  *                  a read.
@@ -307,7 +326,8 @@
  * @val_format_endian_default: Default endianness for formatted register
  *     values. Used when the regmap_config specifies DEFAULT. If this is
  *     DEFAULT, BIG is assumed.
- * @async_size: Size of struct used for async work.
+ * @max_raw_read: Max raw read size that can be used on the bus.
+ * @max_raw_write: Max raw write size that can be used on the bus.
  */
 struct regmap_bus {
 	bool fast_io;
@@ -322,47 +342,186 @@
 	u8 read_flag_mask;
 	enum regmap_endian reg_format_endian_default;
 	enum regmap_endian val_format_endian_default;
+	size_t max_raw_read;
+	size_t max_raw_write;
 };
 
-struct regmap *regmap_init(struct device *dev,
-			   const struct regmap_bus *bus,
-			   void *bus_context,
-			   const struct regmap_config *config);
+/*
+ * __regmap_init functions.
+ *
+ * These functions take a lock key and name parameter, and should not be called
+ * directly. Instead, use the regmap_init macros that generate a key and name
+ * for each call.
+ */
+struct regmap *__regmap_init(struct device *dev,
+			     const struct regmap_bus *bus,
+			     void *bus_context,
+			     const struct regmap_config *config,
+			     struct lock_class_key *lock_key,
+			     const char *lock_name);
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name);
+struct regmap *__regmap_init_spi(struct spi_device *dev,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name);
+struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
+				       const struct regmap_config *config,
+				       struct lock_class_key *lock_key,
+				       const char *lock_name);
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name);
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+				      void __iomem *regs,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name);
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+				  const struct regmap_config *config,
+				  struct lock_class_key *lock_key,
+				  const char *lock_name);
+
+struct regmap *__devm_regmap_init(struct device *dev,
+				  const struct regmap_bus *bus,
+				  void *bus_context,
+				  const struct regmap_config *config,
+				  struct lock_class_key *lock_key,
+				  const char *lock_name);
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name);
+struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
+					    const struct regmap_config *config,
+					    struct lock_class_key *lock_key,
+					    const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
+					   const struct regmap_config *config,
+					   struct lock_class_key *lock_key,
+					   const char *lock_name);
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+					   const char *clk_id,
+					   void __iomem *regs,
+					   const struct regmap_config *config,
+					   struct lock_class_key *lock_key,
+					   const char *lock_name);
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+				       const struct regmap_config *config,
+				       struct lock_class_key *lock_key,
+				       const char *lock_name);
+
+/*
+ * Wrapper for regmap_init macros to include a unique lockdep key and name
+ * for each call. No-op if CONFIG_LOCKDEP is not set.
+ *
+ * @fn: Real function to call (in the form __[*_]regmap_init[_*])
+ * @name: Config variable name (#config in the calling macro)
+ **/
+#ifdef CONFIG_LOCKDEP
+#define __regmap_lockdep_wrapper(fn, name, ...)				\
+(									\
+	({								\
+		static struct lock_class_key _key;			\
+		fn(__VA_ARGS__, &_key,					\
+			KBUILD_BASENAME ":"				\
+			__stringify(__LINE__) ":"			\
+			"(" name ")->lock");				\
+	})								\
+)
+#else
+#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
+#endif
+
+/**
+ * regmap_init(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.  This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+#define regmap_init(dev, bus, bus_context, config)			\
+	__regmap_lockdep_wrapper(__regmap_init, #config,		\
+				dev, bus, bus_context, config)
 int regmap_attach_dev(struct device *dev, struct regmap *map,
-				 const struct regmap_config *config);
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
-			       const struct regmap_config *config);
-struct regmap *regmap_init_spi(struct spi_device *dev,
-			       const struct regmap_config *config);
-struct regmap *regmap_init_spmi_base(struct spmi_device *dev,
-				     const struct regmap_config *config);
-struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
-				    const struct regmap_config *config);
-struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
-				    void __iomem *regs,
-				    const struct regmap_config *config);
-struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
-				const struct regmap_config *config);
+		      const struct regmap_config *config);
 
-struct regmap *devm_regmap_init(struct device *dev,
-				const struct regmap_bus *bus,
-				void *bus_context,
-				const struct regmap_config *config);
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
-				    const struct regmap_config *config);
-struct regmap *devm_regmap_init_spi(struct spi_device *dev,
-				    const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev,
-					  const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
-					 const struct regmap_config *config);
-struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
-					 void __iomem *regs,
-					 const struct regmap_config *config);
-struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
-				     const struct regmap_config *config);
+/**
+ * regmap_init_i2c(): Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_i2c(i2c, config)					\
+	__regmap_lockdep_wrapper(__regmap_init_i2c, #config,		\
+				i2c, config)
 
-bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+/**
+ * regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spi(dev, config)					\
+	__regmap_lockdep_wrapper(__regmap_init_spi, #config,		\
+				dev, config)
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev:	SPMI device that will be interacted with
+ * @config:	Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_base(dev, config)				\
+	__regmap_lockdep_wrapper(__regmap_init_spmi_base, #config,	\
+				dev, config)
+
+/**
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev:	Device that will be interacted with
+ * @config:	Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_ext(dev, config)				\
+	__regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config,	\
+				dev, config)
+
+/**
+ * regmap_init_mmio_clk(): Initialise register map with register clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio_clk(dev, clk_id, regs, config)			\
+	__regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config,	\
+				dev, clk_id, regs, config)
 
 /**
  * regmap_init_mmio(): Initialise register map
@@ -374,12 +533,109 @@
  * The return value will be an ERR_PTR() on error or a valid pointer to
  * a struct regmap.
  */
-static inline struct regmap *regmap_init_mmio(struct device *dev,
-					void __iomem *regs,
-					const struct regmap_config *config)
-{
-	return regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define regmap_init_mmio(dev, regs, config)		\
+	regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_ac97(ac97, config)					\
+	__regmap_lockdep_wrapper(__regmap_init_ac97, #config,		\
+				ac97, config)
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+/**
+ * devm_regmap_init(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  This function should generally not be called
+ * directly, it should be called by bus-specific init functions.  The
+ * map will be automatically freed by the device management code.
+ */
+#define devm_regmap_init(dev, bus, bus_context, config)			\
+	__regmap_lockdep_wrapper(__devm_regmap_init, #config,		\
+				dev, bus, bus_context, config)
+
+/**
+ * devm_regmap_init_i2c(): Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i2c(i2c, config)				\
+	__regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config,	\
+				i2c, config)
+
+/**
+ * devm_regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi(dev, config)				\
+	__regmap_lockdep_wrapper(__devm_regmap_init_spi, #config,	\
+				dev, config)
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev:	SPMI device that will be interacted with
+ * @config:	Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_base(dev, config)				\
+	__regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config,	\
+				dev, config)
+
+/**
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev:	SPMI device that will be interacted with
+ * @config:	Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_ext(dev, config)				\
+	__regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config,	\
+				dev, config)
+
+/**
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config)		\
+	__regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config,	\
+				dev, clk_id, regs, config)
 
 /**
  * devm_regmap_init_mmio(): Initialise managed register map
@@ -392,12 +648,22 @@
  * to a struct regmap.  The regmap will be automatically freed by the
  * device management code.
  */
-static inline struct regmap *devm_regmap_init_mmio(struct device *dev,
-					void __iomem *regs,
-					const struct regmap_config *config)
-{
-	return devm_regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define devm_regmap_init_mmio(dev, regs, config)		\
+	devm_regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * devm_regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_ac97(ac97, config)				\
+	__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config,	\
+				ac97, config)
 
 void regmap_exit(struct regmap *map);
 int regmap_reinit_cache(struct regmap *map,
@@ -410,10 +676,10 @@
 		     const void *val, size_t val_len);
 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
 			size_t val_count);
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
 			int num_regs);
 int regmap_multi_reg_write_bypassed(struct regmap *map,
-				    const struct reg_default *regs,
+				    const struct reg_sequence *regs,
 				    int num_regs);
 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
 			   const void *val, size_t val_len);
@@ -424,6 +690,8 @@
 		     size_t val_count);
 int regmap_update_bits(struct regmap *map, unsigned int reg,
 		       unsigned int mask, unsigned int val);
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+		       unsigned int mask, unsigned int val);
 int regmap_update_bits_async(struct regmap *map, unsigned int reg,
 			     unsigned int mask, unsigned int val);
 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -437,6 +705,8 @@
 int regmap_get_reg_stride(struct regmap *map);
 int regmap_async_complete(struct regmap *map);
 bool regmap_can_raw_write(struct regmap *map);
+size_t regmap_get_raw_read_max(struct regmap *map);
+size_t regmap_get_raw_write_max(struct regmap *map);
 
 int regcache_sync(struct regmap *map);
 int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -450,7 +720,7 @@
 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
 			      const struct regmap_access_table *table);
 
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
 			  int num_regs);
 int regmap_parse_val(struct regmap *map, const void *buf,
 				unsigned int *val);
@@ -503,6 +773,8 @@
 
 int regmap_fields_write(struct regmap_field *field, unsigned int id,
 			unsigned int val);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+			unsigned int val);
 int regmap_fields_read(struct regmap_field *field, unsigned int id,
 		       unsigned int *val);
 int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
@@ -645,6 +917,13 @@
 	return -EINVAL;
 }
 
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+				     unsigned int mask, unsigned int val)
+{
+	WARN_ONCE(1, "regmap API is disabled");
+	return -EINVAL;
+}
+
 static inline int regmap_update_bits_async(struct regmap *map,
 					   unsigned int reg,
 					   unsigned int mask, unsigned int val)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c89c53a..29446ae 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,6 +89,9 @@
 	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
 	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
 	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+	TTU_BATCH_FLUSH = (1 << 11),	/* Batch TLB flushes where possible
+					 * and caller guarantees they will
+					 * do a final flush if necessary */
 };
 
 #ifdef CONFIG_MMU
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 119823d..a4ab9da 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1344,6 +1344,25 @@
 	perf_nr_task_contexts,
 };
 
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+	/*
+	 * Each bit set is a CPU that potentially has a TLB entry for one of
+	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+	 */
+	struct cpumask cpumask;
+
+	/* True if any bit in cpumask is set */
+	bool flush_required;
+
+	/*
+	 * If true then the PTE was dirty when unmapped. The entry must be
+	 * flushed before IO is initiated or a stale TLB entry potentially
+	 * allows an update without redirtying the page.
+	 */
+	bool writable;
+};
+
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
 	void *stack;
@@ -1700,6 +1719,10 @@
 	unsigned long numa_pages_migrated;
 #endif /* CONFIG_NUMA_BALANCING */
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+	struct tlbflush_unmap_batch tlb_ubc;
+#endif
+
 	struct rcu_head rcu;
 
 	/*
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index a19ddac..f426503 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -78,7 +78,7 @@
 
 static inline int seccomp_mode(struct seccomp *s)
 {
-	return 0;
+	return SECCOMP_MODE_DISABLED;
 }
 #endif /* CONFIG_SECCOMP */
 
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 912a7c4..d4c7271 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -149,6 +149,41 @@
 #endif
 }
 
+/**
+ * seq_show_options - display mount options with appropriate escapes.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, can be NULL
+ */
+static inline void seq_show_option(struct seq_file *m, const char *name,
+				   const char *value)
+{
+	seq_putc(m, ',');
+	seq_escape(m, name, ",= \t\n\\");
+	if (value) {
+		seq_putc(m, '=');
+		seq_escape(m, value, ", \t\n\\");
+	}
+}
+
+/**
+ * seq_show_option_n - display mount options with appropriate escapes
+ *		       where @value must be a specific length.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, cannot be NULL
+ * @length: the length of @value to display
+ *
+ * This is a macro since this uses "length" to define the size of the
+ * stack buffer.
+ */
+#define seq_show_option_n(m, name, value, length) {	\
+	char val_buf[length + 1];			\
+	strncpy(val_buf, value, length);		\
+	val_buf[length] = '\0';				\
+	seq_show_option(m, name, val_buf);		\
+}
+
 #define SEQ_START_TOKEN ((void *)1)
 /*
  * Helpers for iteration over list_head-s in seq_files
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 9f779c7..df4ab5d 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,8 @@
 #include <linux/mod_devicetable.h>
 #include <uapi/linux/serio.h>
 
+extern struct bus_type serio_bus;
+
 struct serio {
 	void *port_data;
 
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index dd0ba50..d927647 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -128,7 +128,10 @@
 #if IS_ENABLED(CONFIG_SH_DMAE_BASE)
 bool shdma_chan_filter(struct dma_chan *chan, void *arg);
 #else
-#define shdma_chan_filter NULL
+static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+	return false;
+}
 #endif
 
 #endif
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a99f0e5..7e37d44 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -290,6 +290,16 @@
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
 void kmem_cache_free(struct kmem_cache *, void *);
 
+/*
+ * Bulk allocation and freeing operations. These are accellerated in an
+ * allocator specific way to avoid taking locks repeatedly or building
+ * metadata structures unnecessarily.
+ *
+ * Note that interrupts must be enabled when calling these functions.
+ */
+void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
+bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+
 #ifdef CONFIG_NUMA
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index da3c593..e6109a6 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -48,7 +48,16 @@
 	const char			*thread_comm;
 };
 
-int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
+int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
+					   const struct cpumask *cpumask);
+
+static inline int
+smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+{
+	return smpboot_register_percpu_thread_cpumask(plug_thread,
+						      cpu_possible_mask);
+}
+
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
 int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
 					 const struct cpumask *);
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
index 07d8e53..5c9c6cd 100644
--- a/include/linux/sunrpc/addr.h
+++ b/include/linux/sunrpc/addr.h
@@ -46,8 +46,8 @@
 #define IPV6_SCOPE_DELIMITER		'%'
 #define IPV6_SCOPE_ID_LEN		sizeof("%nnnnnnnnnn")
 
-static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
-				   const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr4(const struct sockaddr *sap1,
+				 const struct sockaddr *sap2)
 {
 	const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
 	const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
@@ -67,8 +67,8 @@
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
-				   const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
+				 const struct sockaddr *sap2)
 {
 	const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
 	const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
@@ -93,7 +93,7 @@
 	return true;
 }
 #else	/* !(IS_ENABLED(CONFIG_IPV6) */
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
 				   const struct sockaddr *sap2)
 {
 	return false;
@@ -122,15 +122,28 @@
 	if (sap1->sa_family == sap2->sa_family) {
 		switch (sap1->sa_family) {
 		case AF_INET:
-			return __rpc_cmp_addr4(sap1, sap2);
+			return rpc_cmp_addr4(sap1, sap2);
 		case AF_INET6:
-			return __rpc_cmp_addr6(sap1, sap2);
+			return rpc_cmp_addr6(sap1, sap2);
 		}
 	}
 	return false;
 }
 
 /**
+ * rpc_cmp_addr_port - compare the address and port number of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ */
+static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1,
+				     const struct sockaddr *sap2)
+{
+	if (!rpc_cmp_addr(sap1, sap2))
+		return false;
+	return rpc_get_port(sap1) == rpc_get_port(sap2);
+}
+
+/**
  * rpc_copy_addr - copy the address portion of one sockaddr to another
  * @dst: destination sockaddr
  * @src: source sockaddr
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index a7cbb57..1ecf13e 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -18,9 +18,13 @@
 #include <linux/atomic.h>
 #include <linux/rcupdate.h>
 #include <linux/uidgid.h>
+#include <linux/utsname.h>
 
-/* size of the nodename buffer */
-#define UNX_MAXNODENAME	32
+/*
+ * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
+ * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
+ */
+#define UNX_MAXNODENAME	__NEW_UTS_LEN
 
 struct rpcsec_gss_info;
 
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 437ddb6..03d3b4c 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -46,7 +46,7 @@
  * 
  */
 struct cache_head {
-	struct cache_head * next;
+	struct hlist_node	cache_list;
 	time_t		expiry_time;	/* After time time, don't use the data */
 	time_t		last_refresh;   /* If CACHE_PENDING, this is when upcall 
 					 * was sent, else this is when update was received
@@ -73,7 +73,7 @@
 struct cache_detail {
 	struct module *		owner;
 	int			hash_size;
-	struct cache_head **	hash_table;
+	struct hlist_head *	hash_table;
 	rwlock_t		hash_lock;
 
 	atomic_t		inuse; /* active user-space update or lookup */
@@ -224,6 +224,11 @@
 					umode_t, struct cache_detail *);
 extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
 
+/* Must store cache_detail in seq_file->private if using next three functions */
+extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
+extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
+extern void cache_seq_stop(struct seq_file *file, void *p);
+
 extern void qword_add(char **bpp, int *lp, char *str);
 extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
 extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fae6fb9..cc0fc71 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -19,11 +19,6 @@
 #include <linux/wait.h>
 #include <linux/mm.h>
 
-/*
- * This is the RPC server thread function prototype
- */
-typedef int		(*svc_thread_fn)(void *);
-
 /* statistics for svc_pool structures */
 struct svc_pool_stats {
 	atomic_long_t	packets;
@@ -54,6 +49,25 @@
 	unsigned long		sp_flags;
 } ____cacheline_aligned_in_smp;
 
+struct svc_serv;
+
+struct svc_serv_ops {
+	/* Callback to use when last thread exits. */
+	void		(*svo_shutdown)(struct svc_serv *, struct net *);
+
+	/* function for service threads to run */
+	int		(*svo_function)(void *);
+
+	/* queue up a transport for servicing */
+	void		(*svo_enqueue_xprt)(struct svc_xprt *);
+
+	/* set up thread (or whatever) execution context */
+	int		(*svo_setup)(struct svc_serv *, struct svc_pool *, int);
+
+	/* optional module to count when adding threads (pooled svcs only) */
+	struct module	*svo_module;
+};
+
 /*
  * RPC service.
  *
@@ -85,16 +99,7 @@
 
 	unsigned int		sv_nrpools;	/* number of thread pools */
 	struct svc_pool *	sv_pools;	/* array of thread pools */
-
-	void			(*sv_shutdown)(struct svc_serv *serv,
-					       struct net *net);
-						/* Callback to use when last thread
-						 * exits.
-						 */
-
-	struct module *		sv_module;	/* optional module to count when
-						 * adding threads */
-	svc_thread_fn		sv_function;	/* main function for threads */
+	struct svc_serv_ops	*sv_ops;	/* server operations */
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 	struct list_head	sv_cb_list;	/* queue for callback requests
 						 * that arrive over the same
@@ -423,19 +428,46 @@
 };
 
 /*
+ * Mode for mapping cpus to pools.
+ */
+enum {
+	SVC_POOL_AUTO = -1,	/* choose one of the others */
+	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
+				 * (legacy & UP mode) */
+	SVC_POOL_PERCPU,	/* one pool per cpu */
+	SVC_POOL_PERNODE	/* one pool per numa node */
+};
+
+struct svc_pool_map {
+	int count;			/* How many svc_servs use us */
+	int mode;			/* Note: int not enum to avoid
+					 * warnings about "enumeration value
+					 * not handled in switch" */
+	unsigned int npools;
+	unsigned int *pool_to;		/* maps pool id to cpu or node */
+	unsigned int *to_pool;		/* maps cpu or node to pool id */
+};
+
+extern struct svc_pool_map svc_pool_map;
+
+/*
  * Function prototypes.
  */
 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
 int svc_bind(struct svc_serv *serv, struct net *net);
 struct svc_serv *svc_create(struct svc_program *, unsigned int,
-			    void (*shutdown)(struct svc_serv *, struct net *net));
+			    struct svc_serv_ops *);
+struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
+					struct svc_pool *pool, int node);
 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
 					struct svc_pool *pool, int node);
+void		   svc_rqst_free(struct svc_rqst *);
 void		   svc_exit_thread(struct svc_rqst *);
+unsigned int	   svc_pool_map_get(void);
+void		   svc_pool_map_put(void);
 struct svc_serv *  svc_create_pooled(struct svc_program *, unsigned int,
-			void (*shutdown)(struct svc_serv *, struct net *net),
-			svc_thread_fn, struct module *);
+			struct svc_serv_ops *);
 int		   svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
 int		   svc_pool_stats_open(struct svc_serv *serv, struct file *file);
 void		   svc_destroy(struct svc_serv *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cb94ee4..d5ee6d8 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -172,13 +172,6 @@
 #define RDMAXPRT_SQ_PENDING	2
 #define RDMAXPRT_CONN_PENDING	3
 
-#define RPCRDMA_MAX_SVC_SEGS	(64)	/* server max scatter/gather */
-#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
-#define RPCRDMA_MAXPAYLOAD	RPCSVC_MAXPAYLOAD
-#else
-#define RPCRDMA_MAXPAYLOAD	(RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
-#endif
-
 #define RPCRDMA_LISTEN_BACKLOG  10
 /* The default ORD value is based on two outstanding full-size writes with a
  * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ.  */
@@ -187,6 +180,8 @@
 #define RPCRDMA_MAX_REQUESTS    32
 #define RPCRDMA_MAX_REQ_SIZE    4096
 
+#define RPCSVC_MAXPAYLOAD_RDMA	RPCSVC_MAXPAYLOAD
+
 /* svc_rdma_marshal.c */
 extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
 extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -213,6 +208,8 @@
 
 /* svc_rdma_sendto.c */
 extern int svc_rdma_sendto(struct svc_rqst *);
+extern struct rpcrdma_read_chunk *
+	svc_rdma_get_read_chunk(struct rpcrdma_msg *);
 
 /* svc_rdma_transport.c */
 extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
@@ -225,7 +222,6 @@
 extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
 extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
 extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
-extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
 extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
 extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
 			      struct svc_rdma_fastreg_mr *);
@@ -238,83 +234,4 @@
 extern int svc_rdma_init(void);
 extern void svc_rdma_cleanup(void);
 
-/*
- * Returns the address of the first read chunk or <nul> if no read chunk is
- * present
- */
-static inline struct rpcrdma_read_chunk *
-svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
-{
-	struct rpcrdma_read_chunk *ch =
-		(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
-
-	if (ch->rc_discrim == 0)
-		return NULL;
-
-	return ch;
-}
-
-/*
- * Returns the address of the first read write array element or <nul> if no
- * write array list is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
-{
-	if (rmsgp->rm_body.rm_chunks[0] != 0
-	    || rmsgp->rm_body.rm_chunks[1] == 0)
-		return NULL;
-
-	return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
-}
-
-/*
- * Returns the address of the first reply array element or <nul> if no
- * reply array is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
-{
-	struct rpcrdma_read_chunk *rch;
-	struct rpcrdma_write_array *wr_ary;
-	struct rpcrdma_write_array *rp_ary;
-
-	/* XXX: Need to fix when reply list may occur with read-list and/or
-	 * write list */
-	if (rmsgp->rm_body.rm_chunks[0] != 0 ||
-	    rmsgp->rm_body.rm_chunks[1] != 0)
-		return NULL;
-
-	rch = svc_rdma_get_read_chunk(rmsgp);
-	if (rch) {
-		while (rch->rc_discrim)
-			rch++;
-
-		/* The reply list follows an empty write array located
-		 * at 'rc_position' here. The reply array is at rc_target.
-		 */
-		rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
-
-		goto found_it;
-	}
-
-	wr_ary = svc_rdma_get_write_array(rmsgp);
-	if (wr_ary) {
-		rp_ary = (struct rpcrdma_write_array *)
-			&wr_ary->
-			wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
-
-		goto found_it;
-	}
-
-	/* No read list, no write list */
-	rp_ary = (struct rpcrdma_write_array *)
-		&rmsgp->rm_body.rm_chunks[2];
-
- found_it:
-	if (rp_ary->wc_discrim == 0)
-		return NULL;
-
-	return rp_ary;
-}
 #endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79f6f8f..78512cf 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -116,6 +116,7 @@
 		      struct svc_serv *);
 int	svc_create_xprt(struct svc_serv *, const char *, struct net *,
 			const int, const unsigned short, int);
+void	svc_xprt_do_enqueue(struct svc_xprt *xprt);
 void	svc_xprt_enqueue(struct svc_xprt *xprt);
 void	svc_xprt_put(struct svc_xprt *xprt);
 void	svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b176130..b7b279b 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -49,7 +49,7 @@
  * a single chunk type per message is supported currently.
  */
 #define RPCRDMA_MIN_SLOT_TABLE	(2U)
-#define RPCRDMA_DEF_SLOT_TABLE	(32U)
+#define RPCRDMA_DEF_SLOT_TABLE	(128U)
 #define RPCRDMA_MAX_SLOT_TABLE	(256U)
 
 #define RPCRDMA_DEF_INLINE  (1024)	/* default inline max */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 31496d2..7ba7dcca 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -351,7 +351,15 @@
 extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
 #ifdef CONFIG_MEMCG
-extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
+static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+{
+	/* root ? */
+	if (mem_cgroup_disabled() || !memcg->css.parent)
+		return vm_swappiness;
+
+	return memcg->swappiness;
+}
+
 #else
 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 {
@@ -398,6 +406,9 @@
 extern struct page *lookup_swap_cache(swp_entry_t);
 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr);
+extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
+			struct vm_area_struct *vma, unsigned long addr,
+			bool *new_page_allocated);
 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr);
 
@@ -431,6 +442,7 @@
 extern sector_t map_swap_page(struct page *, struct block_device **);
 extern sector_t swapdev_block(int, pgoff_t);
 extern int page_swapcount(struct page *);
+extern int swp_swapcount(swp_entry_t entry);
 extern struct swap_info_struct *page_swap_info(struct page *);
 extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
@@ -522,6 +534,11 @@
 	return 0;
 }
 
+static inline int swp_swapcount(swp_entry_t entry)
+{
+	return 0;
+}
+
 #define reuse_swap_page(page)	(page_mapcount(page) == 1)
 
 static inline int try_to_free_swap(struct page *page)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index cedf3d3..5c3a5f3 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -164,6 +164,9 @@
 #endif
 
 #ifdef CONFIG_MEMORY_FAILURE
+
+extern atomic_long_t num_poisoned_pages __read_mostly;
+
 /*
  * Support for hardware poisoned pages
  */
@@ -177,6 +180,31 @@
 {
 	return swp_type(entry) == SWP_HWPOISON;
 }
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+	return TestSetPageHWPoison(page);
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+	atomic_long_inc(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_dec(void)
+{
+	atomic_long_dec(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_add(long num)
+{
+	atomic_long_add(num, &num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_sub(long num)
+{
+	atomic_long_sub(num, &num_poisoned_pages);
+}
 #else
 
 static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -188,6 +216,15 @@
 {
 	return 0;
 }
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+	return false;
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+}
 #endif
 
 #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b45c45b..0800131 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -810,6 +810,7 @@
 asmlinkage long sys_eventfd(unsigned int count);
 asmlinkage long sys_eventfd2(unsigned int count, int flags);
 asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
+asmlinkage long sys_userfaultfd(int flags);
 asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
 asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
 asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
new file mode 100644
index 0000000..587480a
--- /dev/null
+++ b/include/linux/userfaultfd_k.h
@@ -0,0 +1,85 @@
+/*
+ *  include/linux/userfaultfd_k.h
+ *
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_K_H
+#define _LINUX_USERFAULTFD_K_H
+
+#ifdef CONFIG_USERFAULTFD
+
+#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
+
+#include <linux/fcntl.h>
+
+/*
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from userfaultfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define UFFD_CLOEXEC O_CLOEXEC
+#define UFFD_NONBLOCK O_NONBLOCK
+
+#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
+#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
+
+extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+			    unsigned int flags, unsigned long reason);
+
+extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
+			    unsigned long src_start, unsigned long len);
+extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
+			      unsigned long dst_start,
+			      unsigned long len);
+
+/* mm helpers */
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+					struct vm_userfaultfd_ctx vm_ctx)
+{
+	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+	return vma->vm_flags & VM_UFFD_MISSING;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+	return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
+}
+
+#else /* CONFIG_USERFAULTFD */
+
+/* mm helpers */
+static inline int handle_userfault(struct vm_area_struct *vma,
+				   unsigned long address,
+				   unsigned int flags,
+				   unsigned long reason)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+					struct vm_userfaultfd_ctx vm_ctx)
+{
+	return true;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+	return false;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+	return false;
+}
+
+#endif /* CONFIG_USERFAULTFD */
+
+#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
index ac34819..da2049b 100644
--- a/include/linux/verify_pefile.h
+++ b/include/linux/verify_pefile.h
@@ -12,7 +12,11 @@
 #ifndef _LINUX_VERIFY_PEFILE_H
 #define _LINUX_VERIFY_PEFILE_H
 
+#include <crypto/public_key.h>
+
 extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
-				   struct key *trusted_keyring, bool *_trusted);
+				   struct key *trusted_keyring,
+				   enum key_being_used_for usage,
+				   bool *_trusted);
 
 #endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f..d3d0772 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,7 +147,8 @@
 
 typedef int wait_bit_action_f(struct wait_bit_key *);
 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
+			  void *key);
 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -179,7 +180,7 @@
 #define wake_up_poll(x, m)						\
 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
 #define wake_up_locked_poll(x, m)					\
-	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+	__wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
 #define wake_up_interruptible_poll(x, m)				\
 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
 #define wake_up_interruptible_sync_poll(x, m)				\
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index f47fead..d74a0e9 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -140,12 +140,4 @@
 extern int watchdog_register_device(struct watchdog_device *);
 extern void watchdog_unregister_device(struct watchdog_device *);
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void watchdog_nmi_disable_all(void);
-void watchdog_nmi_enable_all(void);
-#else
-static inline void watchdog_nmi_disable_all(void) {}
-static inline void watchdog_nmi_enable_all(void) {}
-#endif
-
 #endif  /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
index f9d41a6..e183a0a 100644
--- a/include/linux/zbud.h
+++ b/include/linux/zbud.h
@@ -9,7 +9,7 @@
 	int (*evict)(struct zbud_pool *pool, unsigned long handle);
 };
 
-struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
+struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
 void zbud_destroy_pool(struct zbud_pool *pool);
 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
 	unsigned long *handle);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index d30eff3..c924a28 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -37,7 +37,7 @@
 };
 
 struct zpool *zpool_create_pool(char *type, char *name,
-			gfp_t gfp, struct zpool_ops *ops);
+			gfp_t gfp, const struct zpool_ops *ops);
 
 char *zpool_get_type(struct zpool *pool);
 
@@ -81,7 +81,7 @@
 	atomic_t refcount;
 	struct list_head list;
 
-	void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops,
+	void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops,
 			struct zpool *zpool);
 	void (*destroy)(void *pool);
 
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 1338190..6398dfa 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -34,6 +34,11 @@
 	 */
 };
 
+struct zs_pool_stats {
+	/* How many pages were migrated (freed) */
+	unsigned long pages_compacted;
+};
+
 struct zs_pool;
 
 struct zs_pool *zs_create_pool(char *name, gfp_t flags);
@@ -49,4 +54,5 @@
 unsigned long zs_get_total_pages(struct zs_pool *pool);
 unsigned long zs_compact(struct zs_pool *pool);
 
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
 #endif
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 0dc7060..17ddae3 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -53,9 +53,13 @@
 
 /**
  * struct media_devnode - Media device node
+ * @fops:	pointer to struct media_file_operations with media device ops
+ * @dev:	struct device pointer for the media controller device
+ * @cdev:	struct cdev pointer character device
  * @parent:	parent device
  * @minor:	device node minor number
  * @flags:	flags, combination of the MEDIA_FLAG_* constants
+ * @release:	release callback called at the end of media_devnode_release()
  *
  * This structure represents a media-related device node.
  *
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
deleted file mode 100644
index 048f8f9..0000000
--- a/include/media/omap3isp.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * omap3isp.h
- *
- * TI OMAP3 ISP - Platform data
- *
- * Copyright (C) 2011 Nokia Corporation
- *
- * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *	     Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __MEDIA_OMAP3ISP_H__
-#define __MEDIA_OMAP3ISP_H__
-
-struct i2c_board_info;
-struct isp_device;
-
-enum isp_interface_type {
-	ISP_INTERFACE_PARALLEL,
-	ISP_INTERFACE_CSI2A_PHY2,
-	ISP_INTERFACE_CCP2B_PHY1,
-	ISP_INTERFACE_CCP2B_PHY2,
-	ISP_INTERFACE_CSI2C_PHY1,
-};
-
-enum {
-	ISP_LANE_SHIFT_0 = 0,
-	ISP_LANE_SHIFT_2 = 1,
-	ISP_LANE_SHIFT_4 = 2,
-	ISP_LANE_SHIFT_6 = 3,
-};
-
-/**
- * struct isp_parallel_cfg - Parallel interface configuration
- * @data_lane_shift: Data lane shifter
- *		ISP_LANE_SHIFT_0 - CAMEXT[13:0] -> CAM[13:0]
- *		ISP_LANE_SHIFT_2 - CAMEXT[13:2] -> CAM[11:0]
- *		ISP_LANE_SHIFT_4 - CAMEXT[13:4] -> CAM[9:0]
- *		ISP_LANE_SHIFT_6 - CAMEXT[13:6] -> CAM[7:0]
- * @clk_pol: Pixel clock polarity
- *		0 - Sample on rising edge, 1 - Sample on falling edge
- * @hs_pol: Horizontal synchronization polarity
- *		0 - Active high, 1 - Active low
- * @vs_pol: Vertical synchronization polarity
- *		0 - Active high, 1 - Active low
- * @fld_pol: Field signal polarity
- *		0 - Positive, 1 - Negative
- * @data_pol: Data polarity
- *		0 - Normal, 1 - One's complement
- */
-struct isp_parallel_cfg {
-	unsigned int data_lane_shift:2;
-	unsigned int clk_pol:1;
-	unsigned int hs_pol:1;
-	unsigned int vs_pol:1;
-	unsigned int fld_pol:1;
-	unsigned int data_pol:1;
-};
-
-enum {
-	ISP_CCP2_PHY_DATA_CLOCK = 0,
-	ISP_CCP2_PHY_DATA_STROBE = 1,
-};
-
-enum {
-	ISP_CCP2_MODE_MIPI = 0,
-	ISP_CCP2_MODE_CCP2 = 1,
-};
-
-/**
- * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
- * @pos: position of the lane
- * @pol: polarity of the lane
- */
-struct isp_csiphy_lane {
-	u8 pos;
-	u8 pol;
-};
-
-#define ISP_CSIPHY1_NUM_DATA_LANES	1
-#define ISP_CSIPHY2_NUM_DATA_LANES	2
-
-/**
- * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
- * @data: Configuration of one or two data lanes
- * @clk: Clock lane configuration
- */
-struct isp_csiphy_lanes_cfg {
-	struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
-	struct isp_csiphy_lane clk;
-};
-
-/**
- * struct isp_ccp2_cfg - CCP2 interface configuration
- * @strobe_clk_pol: Strobe/clock polarity
- *		0 - Non Inverted, 1 - Inverted
- * @crc: Enable the cyclic redundancy check
- * @ccp2_mode: Enable CCP2 compatibility mode
- *		ISP_CCP2_MODE_MIPI - MIPI-CSI1 mode
- *		ISP_CCP2_MODE_CCP2 - CCP2 mode
- * @phy_layer: Physical layer selection
- *		ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
- *		ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
- * @vpclk_div: Video port output clock control
- */
-struct isp_ccp2_cfg {
-	unsigned int strobe_clk_pol:1;
-	unsigned int crc:1;
-	unsigned int ccp2_mode:1;
-	unsigned int phy_layer:1;
-	unsigned int vpclk_div:2;
-	struct isp_csiphy_lanes_cfg lanecfg;
-};
-
-/**
- * struct isp_csi2_cfg - CSI2 interface configuration
- * @crc: Enable the cyclic redundancy check
- */
-struct isp_csi2_cfg {
-	unsigned crc:1;
-	struct isp_csiphy_lanes_cfg lanecfg;
-};
-
-struct isp_bus_cfg {
-	enum isp_interface_type interface;
-	union {
-		struct isp_parallel_cfg parallel;
-		struct isp_ccp2_cfg ccp2;
-		struct isp_csi2_cfg csi2;
-	} bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
-};
-
-struct isp_platform_subdev {
-	struct i2c_board_info *board_info;
-	int i2c_adapter_id;
-	struct isp_bus_cfg *bus;
-};
-
-struct isp_platform_data {
-	struct isp_platform_subdev *subdevs;
-	void (*set_constraints)(struct isp_device *isp, bool enable);
-};
-
-#endif	/* __MEDIA_OMAP3ISP_H__ */
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 644bdc6..ec921f6 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -69,7 +69,7 @@
  * @rc_map: current scan/key table
  * @lock: used to ensure we've filled in all protocol details before
  *	anyone can call show_protocols or store_protocols
- * @devno: unique remote control device number
+ * @minor: unique minor remote control device number
  * @raw: additional data for raw pulse/space devices
  * @input_dev: the input child device used to communicate events to userspace
  * @driver_type: specifies if protocol decoding is done in hardware or software
@@ -110,7 +110,7 @@
  * @s_tx_mask: set transmitter mask (for devices with multiple tx outputs)
  * @s_tx_carrier: set transmit carrier frequency
  * @s_tx_duty_cycle: set transmit duty cycle (0% - 100%)
- * @s_rx_carrier: inform driver about carrier it is expected to handle
+ * @s_rx_carrier_range: inform driver about carrier it is expected to handle
  * @tx_ir: transmit IR
  * @s_idle: enable/disable hardware idle mode, upon which,
  *	device doesn't interrupt host until it sees IR pulses
@@ -129,7 +129,7 @@
 	const char			*map_name;
 	struct rc_map			rc_map;
 	struct mutex			lock;
-	unsigned long			devno;
+	unsigned int			minor;
 	struct ir_raw_event_ctrl	*raw;
 	struct input_dev		*input_dev;
 	enum rc_driver_type		driver_type;
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 27763d5..7c4bbc4 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -14,30 +14,28 @@
 enum rc_type {
 	RC_TYPE_UNKNOWN		= 0,	/* Protocol not known */
 	RC_TYPE_OTHER		= 1,	/* Protocol known but proprietary */
-	RC_TYPE_LIRC		= 2,	/* Pass raw IR to lirc userspace */
-	RC_TYPE_RC5		= 3,	/* Philips RC5 protocol */
-	RC_TYPE_RC5X		= 4,	/* Philips RC5x protocol */
-	RC_TYPE_RC5_SZ		= 5,	/* StreamZap variant of RC5 */
-	RC_TYPE_JVC		= 6,	/* JVC protocol */
-	RC_TYPE_SONY12		= 7,	/* Sony 12 bit protocol */
-	RC_TYPE_SONY15		= 8,	/* Sony 15 bit protocol */
-	RC_TYPE_SONY20		= 9,	/* Sony 20 bit protocol */
-	RC_TYPE_NEC		= 10,	/* NEC protocol */
-	RC_TYPE_SANYO		= 11,	/* Sanyo protocol */
-	RC_TYPE_MCE_KBD		= 12,	/* RC6-ish MCE keyboard/mouse */
-	RC_TYPE_RC6_0		= 13,	/* Philips RC6-0-16 protocol */
-	RC_TYPE_RC6_6A_20	= 14,	/* Philips RC6-6A-20 protocol */
-	RC_TYPE_RC6_6A_24	= 15,	/* Philips RC6-6A-24 protocol */
-	RC_TYPE_RC6_6A_32	= 16,	/* Philips RC6-6A-32 protocol */
-	RC_TYPE_RC6_MCE		= 17,	/* MCE (Philips RC6-6A-32 subtype) protocol */
-	RC_TYPE_SHARP		= 18,	/* Sharp protocol */
-	RC_TYPE_XMP		= 19,	/* XMP protocol */
+	RC_TYPE_RC5		= 2,	/* Philips RC5 protocol */
+	RC_TYPE_RC5X		= 3,	/* Philips RC5x protocol */
+	RC_TYPE_RC5_SZ		= 4,	/* StreamZap variant of RC5 */
+	RC_TYPE_JVC		= 5,	/* JVC protocol */
+	RC_TYPE_SONY12		= 6,	/* Sony 12 bit protocol */
+	RC_TYPE_SONY15		= 7,	/* Sony 15 bit protocol */
+	RC_TYPE_SONY20		= 8,	/* Sony 20 bit protocol */
+	RC_TYPE_NEC		= 9,	/* NEC protocol */
+	RC_TYPE_SANYO		= 10,	/* Sanyo protocol */
+	RC_TYPE_MCE_KBD		= 11,	/* RC6-ish MCE keyboard/mouse */
+	RC_TYPE_RC6_0		= 12,	/* Philips RC6-0-16 protocol */
+	RC_TYPE_RC6_6A_20	= 13,	/* Philips RC6-6A-20 protocol */
+	RC_TYPE_RC6_6A_24	= 14,	/* Philips RC6-6A-24 protocol */
+	RC_TYPE_RC6_6A_32	= 15,	/* Philips RC6-6A-32 protocol */
+	RC_TYPE_RC6_MCE		= 16,	/* MCE (Philips RC6-6A-32 subtype) protocol */
+	RC_TYPE_SHARP		= 17,	/* Sharp protocol */
+	RC_TYPE_XMP		= 18,	/* XMP protocol */
 };
 
 #define RC_BIT_NONE		0
 #define RC_BIT_UNKNOWN		(1 << RC_TYPE_UNKNOWN)
 #define RC_BIT_OTHER		(1 << RC_TYPE_OTHER)
-#define RC_BIT_LIRC		(1 << RC_TYPE_LIRC)
 #define RC_BIT_RC5		(1 << RC_TYPE_RC5)
 #define RC_BIT_RC5X		(1 << RC_TYPE_RC5X)
 #define RC_BIT_RC5_SZ		(1 << RC_TYPE_RC5_SZ)
@@ -56,7 +54,7 @@
 #define RC_BIT_SHARP		(1 << RC_TYPE_SHARP)
 #define RC_BIT_XMP		(1 << RC_TYPE_XMP)
 
-#define RC_BIT_ALL	(RC_BIT_UNKNOWN | RC_BIT_OTHER | RC_BIT_LIRC | \
+#define RC_BIT_ALL	(RC_BIT_UNKNOWN | RC_BIT_OTHER | \
 			 RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
 			 RC_BIT_JVC | \
 			 RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
diff --git a/include/media/tc358743.h b/include/media/tc358743.h
new file mode 100644
index 0000000..4513f2f
--- /dev/null
+++ b/include/media/tc358743.h
@@ -0,0 +1,131 @@
+/*
+ * tc358743 - Toshiba HDMI to CSI-2 bridge
+ *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
+ * reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
+ * REF_02 - Toshiba, TC358743XBG_HDMI-CSI_Tv11p_nm.xls
+ */
+
+#ifndef _TC358743_
+#define _TC358743_
+
+enum tc358743_ddc5v_delays {
+	DDC5V_DELAY_0_MS,
+	DDC5V_DELAY_50_MS,
+	DDC5V_DELAY_100_MS,
+	DDC5V_DELAY_200_MS,
+};
+
+enum tc358743_hdmi_detection_delay {
+	HDMI_MODE_DELAY_0_MS,
+	HDMI_MODE_DELAY_25_MS,
+	HDMI_MODE_DELAY_50_MS,
+	HDMI_MODE_DELAY_100_MS,
+};
+
+struct tc358743_platform_data {
+	/* System clock connected to REFCLK (pin H5) */
+	u32 refclk_hz; /* 26 MHz, 27 MHz or 42 MHz */
+
+	/* DDC +5V debounce delay to avoid spurious interrupts when the cable
+	 * is connected.
+	 * Sets DDC5V_MODE in register DDC_CTL.
+	 * Default: DDC5V_DELAY_0_MS
+	 */
+	enum tc358743_ddc5v_delays ddc5v_delay;
+
+	bool enable_hdcp;
+
+	/*
+	 * The FIFO size is 512x32, so Toshiba recommend to set the default FIFO
+	 * level to somewhere in the middle (e.g. 300), so it can cover speed
+	 * mismatches in input and output ports.
+	 */
+	u16 fifo_level;
+
+	/* Bps pr lane is (refclk_hz / pll_prd) * pll_fbd */
+	u16 pll_prd;
+	u16 pll_fbd;
+
+	/* CSI
+	 * Calculate CSI parameters with REF_02 for the highest resolution your
+	 * CSI interface can handle. The driver will adjust the number of CSI
+	 * lanes in use according to the pixel clock.
+	 *
+	 * The values in brackets are calculated with REF_02 when the number of
+	 * bps pr lane is 823.5 MHz, and can serve as a starting point.
+	 */
+	u32 lineinitcnt;	/* (0x00001770) */
+	u32 lptxtimecnt;	/* (0x00000005) */
+	u32 tclk_headercnt;	/* (0x00001d04) */
+	u32 tclk_trailcnt;	/* (0x00000000) */
+	u32 ths_headercnt;	/* (0x00000505) */
+	u32 twakeup;		/* (0x00004650) */
+	u32 tclk_postcnt;	/* (0x00000000) */
+	u32 ths_trailcnt;	/* (0x00000004) */
+	u32 hstxvregcnt;	/* (0x00000005) */
+
+	/* DVI->HDMI detection delay to avoid unnecessary switching between DVI
+	 * and HDMI mode.
+	 * Sets HDMI_DET_V in register HDMI_DET.
+	 * Default: HDMI_MODE_DELAY_0_MS
+	 */
+	enum tc358743_hdmi_detection_delay hdmi_detection_delay;
+
+	/* Reset PHY automatically when TMDS clock goes from DC to AC.
+	 * Sets PHY_AUTO_RST2 in register PHY_CTL2.
+	 * Default: false
+	 */
+	bool hdmi_phy_auto_reset_tmds_detected;
+
+	/* Reset PHY automatically when TMDS clock passes 21 MHz.
+	 * Sets PHY_AUTO_RST3 in register PHY_CTL2.
+	 * Default: false
+	 */
+	bool hdmi_phy_auto_reset_tmds_in_range;
+
+	/* Reset PHY automatically when TMDS clock is detected.
+	 * Sets PHY_AUTO_RST4 in register PHY_CTL2.
+	 * Default: false
+	 */
+	bool hdmi_phy_auto_reset_tmds_valid;
+
+	/* Reset HDMI PHY automatically when hsync period is out of range.
+	 * Sets H_PI_RST in register HV_RST.
+	 * Default: false
+	 */
+	bool hdmi_phy_auto_reset_hsync_out_of_range;
+
+	/* Reset HDMI PHY automatically when vsync period is out of range.
+	 * Sets V_PI_RST in register HV_RST.
+	 * Default: false
+	 */
+	bool hdmi_phy_auto_reset_vsync_out_of_range;
+};
+
+/* custom controls */
+/* Audio sample rate in Hz */
+#define TC358743_CID_AUDIO_SAMPLING_RATE (V4L2_CID_USER_TC358743_BASE + 0)
+/* Audio present status */
+#define TC358743_CID_AUDIO_PRESENT       (V4L2_CID_USER_TC358743_BASE + 1)
+
+#endif
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 7683569..1d6d7da 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -32,7 +32,8 @@
 
 /**
  * struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
- * @bus_type:	subdevice bus type to select the appropriate matching method
+ *
+ * @match_type:	type of match that will be used
  * @match:	union of per-bus type matching data sets
  * @list:	used to link struct v4l2_async_subdev objects, waiting to be
  *		probed, to a notifier->waiting list
@@ -62,8 +63,9 @@
 };
 
 /**
- * v4l2_async_notifier - v4l2_device notifier data
- * @num_subdevs:number of subdevices
+ * struct v4l2_async_notifier - v4l2_device notifier data
+ *
+ * @num_subdevs: number of subdevices
  * @subdevs:	array of pointers to subdevice descriptors
  * @v4l2_dev:	pointer to struct v4l2_device
  * @waiting:	list of struct v4l2_async_subdev, waiting for their drivers
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 911f3e5..da6fe98 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -36,7 +36,8 @@
 struct v4l2_fh;
 struct poll_table_struct;
 
-/** union v4l2_ctrl_ptr - A pointer to a control value.
+/**
+ * union v4l2_ctrl_ptr - A pointer to a control value.
  * @p_s32:	Pointer to a 32-bit signed value.
  * @p_s64:	Pointer to a 64-bit signed value.
  * @p_u8:	Pointer to a 8-bit unsigned value.
@@ -55,30 +56,34 @@
 	void *p;
 };
 
-/** struct v4l2_ctrl_ops - The control operations that the driver has to provide.
-  * @g_volatile_ctrl: Get a new value for this control. Generally only relevant
-  *		for volatile (and usually read-only) controls such as a control
-  *		that returns the current signal strength which changes
-  *		continuously.
-  *		If not set, then the currently cached value will be returned.
-  * @try_ctrl:	Test whether the control's value is valid. Only relevant when
-  *		the usual min/max/step checks are not sufficient.
-  * @s_ctrl:	Actually set the new control value. s_ctrl is compulsory. The
-  *		ctrl->handler->lock is held when these ops are called, so no
-  *		one else can access controls owned by that handler.
-  */
+/**
+ * struct v4l2_ctrl_ops - The control operations that the driver has to provide.
+ * @g_volatile_ctrl: Get a new value for this control. Generally only relevant
+ *		for volatile (and usually read-only) controls such as a control
+ *		that returns the current signal strength which changes
+ *		continuously.
+ *		If not set, then the currently cached value will be returned.
+ * @try_ctrl:	Test whether the control's value is valid. Only relevant when
+ *		the usual min/max/step checks are not sufficient.
+ * @s_ctrl:	Actually set the new control value. s_ctrl is compulsory. The
+ *		ctrl->handler->lock is held when these ops are called, so no
+ *		one else can access controls owned by that handler.
+ */
 struct v4l2_ctrl_ops {
 	int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl);
 	int (*try_ctrl)(struct v4l2_ctrl *ctrl);
 	int (*s_ctrl)(struct v4l2_ctrl *ctrl);
 };
 
-/** struct v4l2_ctrl_type_ops - The control type operations that the driver has to provide.
-  * @equal: return true if both values are equal.
-  * @init: initialize the value.
-  * @log: log the value.
-  * @validate: validate the value. Return 0 on success and a negative value otherwise.
-  */
+/**
+ * struct v4l2_ctrl_type_ops - The control type operations that the driver
+ * 			       has to provide.
+ *
+ * @equal: return true if both values are equal.
+ * @init: initialize the value.
+ * @log: log the value.
+ * @validate: validate the value. Return 0 on success and a negative value otherwise.
+ */
 struct v4l2_ctrl_type_ops {
 	bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx,
 		      union v4l2_ctrl_ptr ptr1,
@@ -92,74 +97,80 @@
 
 typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
 
-/** struct v4l2_ctrl - The control structure.
-  * @node:	The list node.
-  * @ev_subs:	The list of control event subscriptions.
-  * @handler:	The handler that owns the control.
-  * @cluster:	Point to start of cluster array.
-  * @ncontrols:	Number of controls in cluster array.
-  * @done:	Internal flag: set for each processed control.
-  * @is_new:	Set when the user specified a new value for this control. It
-  *		is also set when called from v4l2_ctrl_handler_setup. Drivers
-  *		should never set this flag.
-  * @has_changed: Set when the current value differs from the new value. Drivers
-  *		should never use this flag.
-  * @is_private: If set, then this control is private to its handler and it
-  *		will not be added to any other handlers. Drivers can set
-  *		this flag.
-  * @is_auto:   If set, then this control selects whether the other cluster
-  *		members are in 'automatic' mode or 'manual' mode. This is
-  *		used for autogain/gain type clusters. Drivers should never
-  *		set this flag directly.
-  * @is_int:    If set, then this control has a simple integer value (i.e. it
-  *		uses ctrl->val).
-  * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING.
-  * @is_ptr:	If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES
-  *		and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct
-  *		v4l2_ext_control uses field p to point to the data.
-  * @is_array: If set, then this control contains an N-dimensional array.
-  * @has_volatiles: If set, then one or more members of the cluster are volatile.
-  *		Drivers should never touch this flag.
-  * @call_notify: If set, then call the handler's notify function whenever the
-  *		control's value changes.
-  * @manual_mode_value: If the is_auto flag is set, then this is the value
-  *		of the auto control that determines if that control is in
-  *		manual mode. So if the value of the auto control equals this
-  *		value, then the whole cluster is in manual mode. Drivers should
-  *		never set this flag directly.
-  * @ops:	The control ops.
-  * @type_ops:	The control type ops.
-  * @id:	The control ID.
-  * @name:	The control name.
-  * @type:	The control type.
-  * @minimum:	The control's minimum value.
-  * @maximum:	The control's maximum value.
-  * @default_value: The control's default value.
-  * @step:	The control's step value for non-menu controls.
-  * @elems:	The number of elements in the N-dimensional array.
-  * @elem_size:	The size in bytes of the control.
-  * @dims:	The size of each dimension.
-  * @nr_of_dims:The number of dimensions in @dims.
-  * @menu_skip_mask: The control's skip mask for menu controls. This makes it
-  *		easy to skip menu items that are not valid. If bit X is set,
-  *		then menu item X is skipped. Of course, this only works for
-  *		menus with <= 32 menu items. There are no menus that come
-  *		close to that number, so this is OK. Should we ever need more,
-  *		then this will have to be extended to a u64 or a bit array.
-  * @qmenu:	A const char * array for all menu items. Array entries that are
-  *		empty strings ("") correspond to non-existing menu items (this
-  *		is in addition to the menu_skip_mask above). The last entry
-  *		must be NULL.
-  * @flags:	The control's flags.
-  * @cur:	The control's current value.
-  * @val:	The control's new s32 value.
-  * @val64:	The control's new s64 value.
-  * @priv:	The control's private pointer. For use by the driver. It is
-  *		untouched by the control framework. Note that this pointer is
-  *		not freed when the control is deleted. Should this be needed
-  *		then a new internal bitfield can be added to tell the framework
-  *		to free this pointer.
-  */
+/**
+ * struct v4l2_ctrl - The control structure.
+ * @node:	The list node.
+ * @ev_subs:	The list of control event subscriptions.
+ * @handler:	The handler that owns the control.
+ * @cluster:	Point to start of cluster array.
+ * @ncontrols:	Number of controls in cluster array.
+ * @done:	Internal flag: set for each processed control.
+ * @is_new:	Set when the user specified a new value for this control. It
+ *		is also set when called from v4l2_ctrl_handler_setup. Drivers
+ *		should never set this flag.
+ * @has_changed: Set when the current value differs from the new value. Drivers
+ *		should never use this flag.
+ * @is_private: If set, then this control is private to its handler and it
+ *		will not be added to any other handlers. Drivers can set
+ *		this flag.
+ * @is_auto:   If set, then this control selects whether the other cluster
+ *		members are in 'automatic' mode or 'manual' mode. This is
+ *		used for autogain/gain type clusters. Drivers should never
+ *		set this flag directly.
+ * @is_int:    If set, then this control has a simple integer value (i.e. it
+ *		uses ctrl->val).
+ * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING.
+ * @is_ptr:	If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES
+ *		and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct
+ *		v4l2_ext_control uses field p to point to the data.
+ * @is_array: If set, then this control contains an N-dimensional array.
+ * @has_volatiles: If set, then one or more members of the cluster are volatile.
+ *		Drivers should never touch this flag.
+ * @call_notify: If set, then call the handler's notify function whenever the
+ *		control's value changes.
+ * @manual_mode_value: If the is_auto flag is set, then this is the value
+ *		of the auto control that determines if that control is in
+ *		manual mode. So if the value of the auto control equals this
+ *		value, then the whole cluster is in manual mode. Drivers should
+ *		never set this flag directly.
+ * @ops:	The control ops.
+ * @type_ops:	The control type ops.
+ * @id:	The control ID.
+ * @name:	The control name.
+ * @type:	The control type.
+ * @minimum:	The control's minimum value.
+ * @maximum:	The control's maximum value.
+ * @default_value: The control's default value.
+ * @step:	The control's step value for non-menu controls.
+ * @elems:	The number of elements in the N-dimensional array.
+ * @elem_size:	The size in bytes of the control.
+ * @dims:	The size of each dimension.
+ * @nr_of_dims:The number of dimensions in @dims.
+ * @menu_skip_mask: The control's skip mask for menu controls. This makes it
+ *		easy to skip menu items that are not valid. If bit X is set,
+ *		then menu item X is skipped. Of course, this only works for
+ *		menus with <= 32 menu items. There are no menus that come
+ *		close to that number, so this is OK. Should we ever need more,
+ *		then this will have to be extended to a u64 or a bit array.
+ * @qmenu:	A const char * array for all menu items. Array entries that are
+ *		empty strings ("") correspond to non-existing menu items (this
+ *		is in addition to the menu_skip_mask above). The last entry
+ *		must be NULL.
+ * @flags:	The control's flags.
+ * @cur:	The control's current value.
+ * @val:	The control's new s32 value.
+ * @priv:	The control's private pointer. For use by the driver. It is
+ *		untouched by the control framework. Note that this pointer is
+ *		not freed when the control is deleted. Should this be needed
+ *		then a new internal bitfield can be added to tell the framework
+ *		to free this pointer.
+ * @p_cur:	The control's current value represented via an union with
+ *		provides a standard way of accessing control types
+ *		through a pointer.
+ * @p_new:	The control's new value represented via an union with provides
+ *		a standard way of accessing control types
+ *		through a pointer.
+ */
 struct v4l2_ctrl {
 	/* Administrative fields */
 	struct list_head node;
@@ -210,16 +221,17 @@
 	union v4l2_ctrl_ptr p_cur;
 };
 
-/** struct v4l2_ctrl_ref - The control reference.
-  * @node:	List node for the sorted list.
-  * @next:	Single-link list node for the hash.
-  * @ctrl:	The actual control information.
-  * @helper:	Pointer to helper struct. Used internally in prepare_ext_ctrls().
-  *
-  * Each control handler has a list of these refs. The list_head is used to
-  * keep a sorted-by-control-ID list of all controls, while the next pointer
-  * is used to link the control in the hash's bucket.
-  */
+/**
+ * struct v4l2_ctrl_ref - The control reference.
+ * @node:	List node for the sorted list.
+ * @next:	Single-link list node for the hash.
+ * @ctrl:	The actual control information.
+ * @helper:	Pointer to helper struct. Used internally in prepare_ext_ctrls().
+ *
+ * Each control handler has a list of these refs. The list_head is used to
+ * keep a sorted-by-control-ID list of all controls, while the next pointer
+ * is used to link the control in the hash's bucket.
+ */
 struct v4l2_ctrl_ref {
 	struct list_head node;
 	struct v4l2_ctrl_ref *next;
@@ -227,25 +239,26 @@
 	struct v4l2_ctrl_helper *helper;
 };
 
-/** struct v4l2_ctrl_handler - The control handler keeps track of all the
-  * controls: both the controls owned by the handler and those inherited
-  * from other handlers.
-  * @_lock:	Default for "lock".
-  * @lock:	Lock to control access to this handler and its controls.
-  *		May be replaced by the user right after init.
-  * @ctrls:	The list of controls owned by this handler.
-  * @ctrl_refs:	The list of control references.
-  * @cached:	The last found control reference. It is common that the same
-  *		control is needed multiple times, so this is a simple
-  *		optimization.
-  * @buckets:	Buckets for the hashing. Allows for quick control lookup.
-  * @notify:	A notify callback that is called whenever the control changes value.
-  *		Note that the handler's lock is held when the notify function
-  *		is called!
-  * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
-  * @nr_of_buckets: Total number of buckets in the array.
-  * @error:	The error code of the first failed control addition.
-  */
+/**
+ * struct v4l2_ctrl_handler - The control handler keeps track of all the
+ * controls: both the controls owned by the handler and those inherited
+ * from other handlers.
+ * @_lock:	Default for "lock".
+ * @lock:	Lock to control access to this handler and its controls.
+ *		May be replaced by the user right after init.
+ * @ctrls:	The list of controls owned by this handler.
+ * @ctrl_refs:	The list of control references.
+ * @cached:	The last found control reference. It is common that the same
+ *		control is needed multiple times, so this is a simple
+ *		optimization.
+ * @buckets:	Buckets for the hashing. Allows for quick control lookup.
+ * @notify:	A notify callback that is called whenever the control changes value.
+ *		Note that the handler's lock is held when the notify function
+ *		is called!
+ * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
+ * @nr_of_buckets: Total number of buckets in the array.
+ * @error:	The error code of the first failed control addition.
+ */
 struct v4l2_ctrl_handler {
 	struct mutex _lock;
 	struct mutex *lock;
@@ -259,32 +272,35 @@
 	int error;
 };
 
-/** struct v4l2_ctrl_config - Control configuration structure.
-  * @ops:	The control ops.
-  * @type_ops:	The control type ops. Only needed for compound controls.
-  * @id:	The control ID.
-  * @name:	The control name.
-  * @type:	The control type.
-  * @min:	The control's minimum value.
-  * @max:	The control's maximum value.
-  * @step:	The control's step value for non-menu controls.
-  * @def: 	The control's default value.
-  * @dims:	The size of each dimension.
-  * @elem_size:	The size in bytes of the control.
-  * @flags:	The control's flags.
-  * @menu_skip_mask: The control's skip mask for menu controls. This makes it
-  *		easy to skip menu items that are not valid. If bit X is set,
-  *		then menu item X is skipped. Of course, this only works for
-  *		menus with <= 64 menu items. There are no menus that come
-  *		close to that number, so this is OK. Should we ever need more,
-  *		then this will have to be extended to a bit array.
-  * @qmenu:	A const char * array for all menu items. Array entries that are
-  *		empty strings ("") correspond to non-existing menu items (this
-  *		is in addition to the menu_skip_mask above). The last entry
-  *		must be NULL.
-  * @is_private: If set, then this control is private to its handler and it
-  *		will not be added to any other handlers.
-  */
+/**
+ * struct v4l2_ctrl_config - Control configuration structure.
+ * @ops:	The control ops.
+ * @type_ops:	The control type ops. Only needed for compound controls.
+ * @id:	The control ID.
+ * @name:	The control name.
+ * @type:	The control type.
+ * @min:	The control's minimum value.
+ * @max:	The control's maximum value.
+ * @step:	The control's step value for non-menu controls.
+ * @def: 	The control's default value.
+ * @dims:	The size of each dimension.
+ * @elem_size:	The size in bytes of the control.
+ * @flags:	The control's flags.
+ * @menu_skip_mask: The control's skip mask for menu controls. This makes it
+ *		easy to skip menu items that are not valid. If bit X is set,
+ *		then menu item X is skipped. Of course, this only works for
+ *		menus with <= 64 menu items. There are no menus that come
+ *		close to that number, so this is OK. Should we ever need more,
+ *		then this will have to be extended to a bit array.
+ * @qmenu:	A const char * array for all menu items. Array entries that are
+ *		empty strings ("") correspond to non-existing menu items (this
+ *		is in addition to the menu_skip_mask above). The last entry
+ *		must be NULL.
+ * @qmenu_int:	A const s64 integer array for all menu items of the type
+ * 		V4L2_CTRL_TYPE_INTEGER_MENU.
+ * @is_private: If set, then this control is private to its handler and it
+ *		will not be added to any other handlers.
+ */
 struct v4l2_ctrl_config {
 	const struct v4l2_ctrl_ops *ops;
 	const struct v4l2_ctrl_type_ops *type_ops;
@@ -304,42 +320,44 @@
 	unsigned int is_private:1;
 };
 
-/** v4l2_ctrl_fill() - Fill in the control fields based on the control ID.
-  *
-  * This works for all standard V4L2 controls.
-  * For non-standard controls it will only fill in the given arguments
-  * and @name will be NULL.
-  *
-  * This function will overwrite the contents of @name, @type and @flags.
-  * The contents of @min, @max, @step and @def may be modified depending on
-  * the type.
-  *
-  * Do not use in drivers! It is used internally for backwards compatibility
-  * control handling only. Once all drivers are converted to use the new
-  * control framework this function will no longer be exported.
-  */
+/*
+ * v4l2_ctrl_fill() - Fill in the control fields based on the control ID.
+ *
+ * This works for all standard V4L2 controls.
+ * For non-standard controls it will only fill in the given arguments
+ * and @name will be NULL.
+ *
+ * This function will overwrite the contents of @name, @type and @flags.
+ * The contents of @min, @max, @step and @def may be modified depending on
+ * the type.
+ *
+ * Do not use in drivers! It is used internally for backwards compatibility
+ * control handling only. Once all drivers are converted to use the new
+ * control framework this function will no longer be exported.
+ */
 void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
 		    s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags);
 
 
-/** v4l2_ctrl_handler_init_class() - Initialize the control handler.
-  * @hdl:	The control handler.
-  * @nr_of_controls_hint: A hint of how many controls this handler is
-  *		expected to refer to. This is the total number, so including
-  *		any inherited controls. It doesn't have to be precise, but if
-  *		it is way off, then you either waste memory (too many buckets
-  *		are allocated) or the control lookup becomes slower (not enough
-  *		buckets are allocated, so there are more slow list lookups).
-  *		It will always work, though.
-  * @key:	Used by the lock validator if CONFIG_LOCKDEP is set.
-  * @name:	Used by the lock validator if CONFIG_LOCKDEP is set.
-  *
-  * Returns an error if the buckets could not be allocated. This error will
-  * also be stored in @hdl->error.
-  *
-  * Never use this call directly, always use the v4l2_ctrl_handler_init
-  * macro that hides the @key and @name arguments.
-  */
+/**
+ * v4l2_ctrl_handler_init_class() - Initialize the control handler.
+ * @hdl:	The control handler.
+ * @nr_of_controls_hint: A hint of how many controls this handler is
+ *		expected to refer to. This is the total number, so including
+ *		any inherited controls. It doesn't have to be precise, but if
+ *		it is way off, then you either waste memory (too many buckets
+ *		are allocated) or the control lookup becomes slower (not enough
+ *		buckets are allocated, so there are more slow list lookups).
+ *		It will always work, though.
+ * @key:	Used by the lock validator if CONFIG_LOCKDEP is set.
+ * @name:	Used by the lock validator if CONFIG_LOCKDEP is set.
+ *
+ * Returns an error if the buckets could not be allocated. This error will
+ * also be stored in @hdl->error.
+ *
+ * Never use this call directly, always use the v4l2_ctrl_handler_init
+ * macro that hides the @key and @name arguments.
+ */
 int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
 				 unsigned nr_of_controls_hint,
 				 struct lock_class_key *key, const char *name);
@@ -361,289 +379,326 @@
 	v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL)
 #endif
 
-/** v4l2_ctrl_handler_free() - Free all controls owned by the handler and free
-  * the control list.
-  * @hdl:	The control handler.
-  *
-  * Does nothing if @hdl == NULL.
-  */
+/**
+ * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free
+ * the control list.
+ * @hdl:	The control handler.
+ *
+ * Does nothing if @hdl == NULL.
+ */
 void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
 
-/** v4l2_ctrl_lock() - Helper function to lock the handler
-  * associated with the control.
-  * @ctrl:	The control to lock.
-  */
+/**
+ * v4l2_ctrl_lock() - Helper function to lock the handler
+ * associated with the control.
+ * @ctrl:	The control to lock.
+ */
 static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl)
 {
 	mutex_lock(ctrl->handler->lock);
 }
 
-/** v4l2_ctrl_unlock() - Helper function to unlock the handler
-  * associated with the control.
-  * @ctrl:	The control to unlock.
-  */
+/**
+ * v4l2_ctrl_unlock() - Helper function to unlock the handler
+ * associated with the control.
+ * @ctrl:	The control to unlock.
+ */
 static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl)
 {
 	mutex_unlock(ctrl->handler->lock);
 }
 
-/** v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
-  * to the handler to initialize the hardware to the current control values.
-  * @hdl:	The control handler.
-  *
-  * Button controls will be skipped, as are read-only controls.
-  *
-  * If @hdl == NULL, then this just returns 0.
-  */
+/**
+ * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
+ * to the handler to initialize the hardware to the current control values.
+ * @hdl:	The control handler.
+ *
+ * Button controls will be skipped, as are read-only controls.
+ *
+ * If @hdl == NULL, then this just returns 0.
+ */
 int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl);
 
-/** v4l2_ctrl_handler_log_status() - Log all controls owned by the handler.
-  * @hdl:	The control handler.
-  * @prefix:	The prefix to use when logging the control values. If the
-  *		prefix does not end with a space, then ": " will be added
-  *		after the prefix. If @prefix == NULL, then no prefix will be
-  *		used.
-  *
-  * For use with VIDIOC_LOG_STATUS.
-  *
-  * Does nothing if @hdl == NULL.
-  */
+/**
+ * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler.
+ * @hdl:	The control handler.
+ * @prefix:	The prefix to use when logging the control values. If the
+ *		prefix does not end with a space, then ": " will be added
+ *		after the prefix. If @prefix == NULL, then no prefix will be
+ *		used.
+ *
+ * For use with VIDIOC_LOG_STATUS.
+ *
+ * Does nothing if @hdl == NULL.
+ */
 void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
 				  const char *prefix);
 
-/** v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2
-  * control.
-  * @hdl:	The control handler.
-  * @cfg:	The control's configuration data.
-  * @priv:	The control's driver-specific private data.
-  *
-  * If the &v4l2_ctrl struct could not be allocated then NULL is returned
-  * and @hdl->error is set to the error code (if it wasn't set already).
-  */
+/**
+ * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2
+ * control.
+ * @hdl:	The control handler.
+ * @cfg:	The control's configuration data.
+ * @priv:	The control's driver-specific private data.
+ *
+ * If the &v4l2_ctrl struct could not be allocated then NULL is returned
+ * and @hdl->error is set to the error code (if it wasn't set already).
+ */
 struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
 			const struct v4l2_ctrl_config *cfg, void *priv);
 
-/** v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control.
-  * @hdl:	The control handler.
-  * @ops:	The control ops.
-  * @id:	The control ID.
-  * @min:	The control's minimum value.
-  * @max:	The control's maximum value.
-  * @step:	The control's step value
-  * @def: 	The control's default value.
-  *
-  * If the &v4l2_ctrl struct could not be allocated, or the control
-  * ID is not known, then NULL is returned and @hdl->error is set to the
-  * appropriate error code (if it wasn't set already).
-  *
-  * If @id refers to a menu control, then this function will return NULL.
-  *
-  * Use v4l2_ctrl_new_std_menu() when adding menu controls.
-  */
+/**
+ * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control.
+ * @hdl:	The control handler.
+ * @ops:	The control ops.
+ * @id:	The control ID.
+ * @min:	The control's minimum value.
+ * @max:	The control's maximum value.
+ * @step:	The control's step value
+ * @def: 	The control's default value.
+ *
+ * If the &v4l2_ctrl struct could not be allocated, or the control
+ * ID is not known, then NULL is returned and @hdl->error is set to the
+ * appropriate error code (if it wasn't set already).
+ *
+ * If @id refers to a menu control, then this function will return NULL.
+ *
+ * Use v4l2_ctrl_new_std_menu() when adding menu controls.
+ */
 struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
 			const struct v4l2_ctrl_ops *ops,
 			u32 id, s64 min, s64 max, u64 step, s64 def);
 
-/** v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control.
-  * @hdl:	The control handler.
-  * @ops:	The control ops.
-  * @id:	The control ID.
-  * @max:	The control's maximum value.
-  * @mask: 	The control's skip mask for menu controls. This makes it
-  *		easy to skip menu items that are not valid. If bit X is set,
-  *		then menu item X is skipped. Of course, this only works for
-  *		menus with <= 64 menu items. There are no menus that come
-  *		close to that number, so this is OK. Should we ever need more,
-  *		then this will have to be extended to a bit array.
-  * @def: 	The control's default value.
-  *
-  * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value
-  * determines which menu items are to be skipped.
-  *
-  * If @id refers to a non-menu control, then this function will return NULL.
-  */
+/**
+ * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control.
+ * @hdl:	The control handler.
+ * @ops:	The control ops.
+ * @id:	The control ID.
+ * @max:	The control's maximum value.
+ * @mask: 	The control's skip mask for menu controls. This makes it
+ *		easy to skip menu items that are not valid. If bit X is set,
+ *		then menu item X is skipped. Of course, this only works for
+ *		menus with <= 64 menu items. There are no menus that come
+ *		close to that number, so this is OK. Should we ever need more,
+ *		then this will have to be extended to a bit array.
+ * @def: 	The control's default value.
+ *
+ * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value
+ * determines which menu items are to be skipped.
+ *
+ * If @id refers to a non-menu control, then this function will return NULL.
+ */
 struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
 			const struct v4l2_ctrl_ops *ops,
 			u32 id, u8 max, u64 mask, u8 def);
 
-/** v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control
-  * with driver specific menu.
-  * @hdl:	The control handler.
-  * @ops:	The control ops.
-  * @id:	The control ID.
-  * @max:	The control's maximum value.
-  * @mask:	The control's skip mask for menu controls. This makes it
-  *		easy to skip menu items that are not valid. If bit X is set,
-  *		then menu item X is skipped. Of course, this only works for
-  *		menus with <= 64 menu items. There are no menus that come
-  *		close to that number, so this is OK. Should we ever need more,
-  *		then this will have to be extended to a bit array.
-  * @def:	The control's default value.
-  * @qmenu:	The new menu.
-  *
-  * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific
-  * menu of this control.
-  *
-  */
+/**
+ * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control
+ * with driver specific menu.
+ * @hdl:	The control handler.
+ * @ops:	The control ops.
+ * @id:	The control ID.
+ * @max:	The control's maximum value.
+ * @mask:	The control's skip mask for menu controls. This makes it
+ *		easy to skip menu items that are not valid. If bit X is set,
+ *		then menu item X is skipped. Of course, this only works for
+ *		menus with <= 64 menu items. There are no menus that come
+ *		close to that number, so this is OK. Should we ever need more,
+ *		then this will have to be extended to a bit array.
+ * @def:	The control's default value.
+ * @qmenu:	The new menu.
+ *
+ * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific
+ * menu of this control.
+ *
+ */
 struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
 			const struct v4l2_ctrl_ops *ops, u32 id, u8 max,
 			u64 mask, u8 def, const char * const *qmenu);
 
-/** v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
-  * @hdl:	The control handler.
-  * @ops:	The control ops.
-  * @id:	The control ID.
-  * @max:	The control's maximum value.
-  * @def:	The control's default value.
-  * @qmenu_int:	The control's menu entries.
-  *
-  * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
-  * takes as an argument an array of integers determining the menu items.
-  *
-  * If @id refers to a non-integer-menu control, then this function will return NULL.
-  */
+/**
+ * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
+ * @hdl:	The control handler.
+ * @ops:	The control ops.
+ * @id:	The control ID.
+ * @max:	The control's maximum value.
+ * @def:	The control's default value.
+ * @qmenu_int:	The control's menu entries.
+ *
+ * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
+ * takes as an argument an array of integers determining the menu items.
+ *
+ * If @id refers to a non-integer-menu control, then this function will return NULL.
+ */
 struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
 			const struct v4l2_ctrl_ops *ops,
 			u32 id, u8 max, u8 def, const s64 *qmenu_int);
 
-/** v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
-  * @hdl:	The control handler.
-  * @ctrl:	The control to add.
-  *
-  * It will return NULL if it was unable to add the control reference.
-  * If the control already belonged to the handler, then it will do
-  * nothing and just return @ctrl.
-  */
+/**
+ * v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
+ * @hdl:	The control handler.
+ * @ctrl:	The control to add.
+ *
+ * It will return NULL if it was unable to add the control reference.
+ * If the control already belonged to the handler, then it will do
+ * nothing and just return @ctrl.
+ */
 struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
 					  struct v4l2_ctrl *ctrl);
 
-/** v4l2_ctrl_add_handler() - Add all controls from handler @add to
-  * handler @hdl.
-  * @hdl:	The control handler.
-  * @add:	The control handler whose controls you want to add to
-  *		the @hdl control handler.
-  * @filter:	This function will filter which controls should be added.
-  *
-  * Does nothing if either of the two handlers is a NULL pointer.
-  * If @filter is NULL, then all controls are added. Otherwise only those
-  * controls for which @filter returns true will be added.
-  * In case of an error @hdl->error will be set to the error code (if it
-  * wasn't set already).
-  */
+/**
+ * v4l2_ctrl_add_handler() - Add all controls from handler @add to
+ * handler @hdl.
+ * @hdl:	The control handler.
+ * @add:	The control handler whose controls you want to add to
+ *		the @hdl control handler.
+ * @filter:	This function will filter which controls should be added.
+ *
+ * Does nothing if either of the two handlers is a NULL pointer.
+ * If @filter is NULL, then all controls are added. Otherwise only those
+ * controls for which @filter returns true will be added.
+ * In case of an error @hdl->error will be set to the error code (if it
+ * wasn't set already).
+ */
 int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
 			  struct v4l2_ctrl_handler *add,
 			  bool (*filter)(const struct v4l2_ctrl *ctrl));
 
-/** v4l2_ctrl_radio_filter() - Standard filter for radio controls.
-  * @ctrl:	The control that is filtered.
-  *
-  * This will return true for any controls that are valid for radio device
-  * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM
-  * transmitter class controls.
-  *
-  * This function is to be used with v4l2_ctrl_add_handler().
-  */
+/**
+ * v4l2_ctrl_radio_filter() - Standard filter for radio controls.
+ * @ctrl:	The control that is filtered.
+ *
+ * This will return true for any controls that are valid for radio device
+ * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM
+ * transmitter class controls.
+ *
+ * This function is to be used with v4l2_ctrl_add_handler().
+ */
 bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl);
 
-/** v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster.
-  * @ncontrols:	The number of controls in this cluster.
-  * @controls: 	The cluster control array of size @ncontrols.
-  */
+/**
+ * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster.
+ * @ncontrols:	The number of controls in this cluster.
+ * @controls: 	The cluster control array of size @ncontrols.
+ */
 void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls);
 
 
-/** v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to
-  * that cluster and set it up for autofoo/foo-type handling.
-  * @ncontrols:	The number of controls in this cluster.
-  * @controls:	The cluster control array of size @ncontrols. The first control
-  *		must be the 'auto' control (e.g. autogain, autoexposure, etc.)
-  * @manual_val: The value for the first control in the cluster that equals the
-  *		manual setting.
-  * @set_volatile: If true, then all controls except the first auto control will
-  *		be volatile.
-  *
-  * Use for control groups where one control selects some automatic feature and
-  * the other controls are only active whenever the automatic feature is turned
-  * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs
-  * red and blue balance, etc.
-  *
-  * The behavior of such controls is as follows:
-  *
-  * When the autofoo control is set to automatic, then any manual controls
-  * are set to inactive and any reads will call g_volatile_ctrl (if the control
-  * was marked volatile).
-  *
-  * When the autofoo control is set to manual, then any manual controls will
-  * be marked active, and any reads will just return the current value without
-  * going through g_volatile_ctrl.
-  *
-  * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
-  * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
-  * if autofoo is in auto mode.
-  */
+/**
+ * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to
+ * that cluster and set it up for autofoo/foo-type handling.
+ * @ncontrols:	The number of controls in this cluster.
+ * @controls:	The cluster control array of size @ncontrols. The first control
+ *		must be the 'auto' control (e.g. autogain, autoexposure, etc.)
+ * @manual_val: The value for the first control in the cluster that equals the
+ *		manual setting.
+ * @set_volatile: If true, then all controls except the first auto control will
+ *		be volatile.
+ *
+ * Use for control groups where one control selects some automatic feature and
+ * the other controls are only active whenever the automatic feature is turned
+ * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs
+ * red and blue balance, etc.
+ *
+ * The behavior of such controls is as follows:
+ *
+ * When the autofoo control is set to automatic, then any manual controls
+ * are set to inactive and any reads will call g_volatile_ctrl (if the control
+ * was marked volatile).
+ *
+ * When the autofoo control is set to manual, then any manual controls will
+ * be marked active, and any reads will just return the current value without
+ * going through g_volatile_ctrl.
+ *
+ * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
+ * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
+ * if autofoo is in auto mode.
+ */
 void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
 			u8 manual_val, bool set_volatile);
 
 
-/** v4l2_ctrl_find() - Find a control with the given ID.
-  * @hdl:	The control handler.
-  * @id:	The control ID to find.
-  *
-  * If @hdl == NULL this will return NULL as well. Will lock the handler so
-  * do not use from inside &v4l2_ctrl_ops.
-  */
+/**
+ * v4l2_ctrl_find() - Find a control with the given ID.
+ * @hdl:	The control handler.
+ * @id:	The control ID to find.
+ *
+ * If @hdl == NULL this will return NULL as well. Will lock the handler so
+ * do not use from inside &v4l2_ctrl_ops.
+ */
 struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
 
-/** v4l2_ctrl_activate() - Make the control active or inactive.
-  * @ctrl:	The control to (de)activate.
-  * @active:	True if the control should become active.
-  *
-  * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically.
-  * Does nothing if @ctrl == NULL.
-  * This will usually be called from within the s_ctrl op.
-  * The V4L2_EVENT_CTRL event will be generated afterwards.
-  *
-  * This function assumes that the control handler is locked.
-  */
+/**
+ * v4l2_ctrl_activate() - Make the control active or inactive.
+ * @ctrl:	The control to (de)activate.
+ * @active:	True if the control should become active.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * This will usually be called from within the s_ctrl op.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ *
+ * This function assumes that the control handler is locked.
+ */
 void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
 
-/** v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
-  * @ctrl:	The control to (de)activate.
-  * @grabbed:	True if the control should become grabbed.
-  *
-  * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
-  * Does nothing if @ctrl == NULL.
-  * The V4L2_EVENT_CTRL event will be generated afterwards.
-  * This will usually be called when starting or stopping streaming in the
-  * driver.
-  *
-  * This function assumes that the control handler is not locked and will
-  * take the lock itself.
-  */
+/**
+ * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
+ * @ctrl:	The control to (de)activate.
+ * @grabbed:	True if the control should become grabbed.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ * This will usually be called when starting or stopping streaming in the
+ * driver.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
 void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
 
 
-/** __v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() */
+/**
+ *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range()
+ *
+ * @ctrl:	The control to update.
+ * @min:	The control's minimum value.
+ * @max:	The control's maximum value.
+ * @step:	The control's step value
+ * @def:	The control's default value.
+ *
+ * Update the range of a control on the fly. This works for control types
+ * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
+ * @step value is interpreted as a menu_skip_mask.
+ *
+ * An error is returned if one of the range arguments is invalid for this
+ * control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
 int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
 			     s64 min, s64 max, u64 step, s64 def);
 
-/** v4l2_ctrl_modify_range() - Update the range of a control.
-  * @ctrl:	The control to update.
-  * @min:	The control's minimum value.
-  * @max:	The control's maximum value.
-  * @step:	The control's step value
-  * @def:	The control's default value.
-  *
-  * Update the range of a control on the fly. This works for control types
-  * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
-  * @step value is interpreted as a menu_skip_mask.
-  *
-  * An error is returned if one of the range arguments is invalid for this
-  * control type.
-  *
-  * This function assumes that the control handler is not locked and will
-  * take the lock itself.
-  */
+/**
+ * v4l2_ctrl_modify_range() - Update the range of a control.
+ * @ctrl:	The control to update.
+ * @min:	The control's minimum value.
+ * @max:	The control's maximum value.
+ * @step:	The control's step value
+ * @def:	The control's default value.
+ *
+ * Update the range of a control on the fly. This works for control types
+ * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
+ * @step value is interpreted as a menu_skip_mask.
+ *
+ * An error is returned if one of the range arguments is invalid for this
+ * control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
 static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
 					 s64 min, s64 max, u64 step, s64 def)
 {
@@ -656,21 +711,23 @@
 	return rval;
 }
 
-/** v4l2_ctrl_notify() - Function to set a notify callback for a control.
-  * @ctrl:	The control.
-  * @notify:	The callback function.
-  * @priv:	The callback private handle, passed as argument to the callback.
-  *
-  * This function sets a callback function for the control. If @ctrl is NULL,
-  * then it will do nothing. If @notify is NULL, then the notify callback will
-  * be removed.
-  *
-  * There can be only one notify. If another already exists, then a WARN_ON
-  * will be issued and the function will do nothing.
-  */
+/**
+ * v4l2_ctrl_notify() - Function to set a notify callback for a control.
+ * @ctrl:	The control.
+ * @notify:	The callback function.
+ * @priv:	The callback private handle, passed as argument to the callback.
+ *
+ * This function sets a callback function for the control. If @ctrl is NULL,
+ * then it will do nothing. If @notify is NULL, then the notify callback will
+ * be removed.
+ *
+ * There can be only one notify. If another already exists, then a WARN_ON
+ * will be issued and the function will do nothing.
+ */
 void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv);
 
-/** v4l2_ctrl_get_name() - Get the name of the control
+/**
+ * v4l2_ctrl_get_name() - Get the name of the control
  * @id:		The control ID.
  *
  * This function returns the name of the given control ID or NULL if it isn't
@@ -678,7 +735,8 @@
  */
 const char *v4l2_ctrl_get_name(u32 id);
 
-/** v4l2_ctrl_get_menu() - Get the menu string array of the control
+/**
+ * v4l2_ctrl_get_menu() - Get the menu string array of the control
  * @id:		The control ID.
  *
  * This function returns the NULL-terminated menu string array name of the
@@ -686,7 +744,8 @@
  */
 const char * const *v4l2_ctrl_get_menu(u32 id);
 
-/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
+/**
+ * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
  * @id:		The control ID.
  * @len:	The size of the integer array.
  *
@@ -695,29 +754,41 @@
  */
 const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
 
-/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
-  * @ctrl:	The control.
-  *
-  * This returns the control's value safely by going through the control
-  * framework. This function will lock the control's handler, so it cannot be
-  * used from within the &v4l2_ctrl_ops functions.
-  *
-  * This function is for integer type controls only.
-  */
+/**
+ * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
+ * @ctrl:	The control.
+ *
+ * This returns the control's value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
 s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl);
 
-/** __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). */
+/**
+ * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl().
+ * @ctrl:	The control.
+ * @val:	The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
 int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val);
+
 /** v4l2_ctrl_s_ctrl() - Helper function to set the control's value from within a driver.
-  * @ctrl:	The control.
-  * @val:	The new value.
-  *
-  * This set the control's new value safely by going through the control
-  * framework. This function will lock the control's handler, so it cannot be
-  * used from within the &v4l2_ctrl_ops functions.
-  *
-  * This function is for integer type controls only.
-  */
+ * @ctrl:	The control.
+ * @val:	The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
 static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
 {
 	int rval;
@@ -729,30 +800,45 @@
 	return rval;
 }
 
-/** v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value from within a driver.
-  * @ctrl:	The control.
-  *
-  * This returns the control's value safely by going through the control
-  * framework. This function will lock the control's handler, so it cannot be
-  * used from within the &v4l2_ctrl_ops functions.
-  *
-  * This function is for 64-bit integer type controls only.
-  */
+/**
+ * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value
+ *	from within a driver.
+ * @ctrl:	The control.
+ *
+ * This returns the control's value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
 s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl);
 
-/** __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). */
+/**
+ * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64().
+ *
+ * @ctrl:	The control.
+ * @val:	The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
 int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val);
 
-/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value from within a driver.
-  * @ctrl:	The control.
-  * @val:	The new value.
-  *
-  * This set the control's new value safely by going through the control
-  * framework. This function will lock the control's handler, so it cannot be
-  * used from within the &v4l2_ctrl_ops functions.
-  *
-  * This function is for 64-bit integer type controls only.
-  */
+/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value
+ *	from within a driver.
+ *
+ * @ctrl:	The control.
+ * @val:	The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
 static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
 {
 	int rval;
@@ -764,19 +850,31 @@
 	return rval;
 }
 
-/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). */
+/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string().
+ *
+ * @ctrl:	The control.
+ * @s:		The new string.
+ *
+ * This set the control's new string safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for string type controls only.
+ */
 int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s);
 
-/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value from within a driver.
-  * @ctrl:	The control.
-  * @s:		The new string.
-  *
-  * This set the control's new string safely by going through the control
-  * framework. This function will lock the control's handler, so it cannot be
-  * used from within the &v4l2_ctrl_ops functions.
-  *
-  * This function is for string type controls only.
-  */
+/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value
+ *	 from within a driver.
+ *
+ * @ctrl:	The control.
+ * @s:		The new string.
+ *
+ * This set the control's new string safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for string type controls only.
+ */
 static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
 {
 	int rval;
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index eecd310..b6130b5 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -23,11 +23,14 @@
 
 #include <linux/videodev2.h>
 
-/** v4l2_dv_timings_presets: list of all dv_timings presets.
+/**
+ * v4l2_dv_timings_presets: list of all dv_timings presets.
  */
 extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
 
-/** v4l2_check_dv_timings_fnc - timings check callback
+/**
+ * v4l2_check_dv_timings_fnc - timings check callback
+ *
  * @t: the v4l2_dv_timings struct.
  * @handle: a handle from the driver.
  *
@@ -35,86 +38,101 @@
  */
 typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle);
 
-/** v4l2_valid_dv_timings() - are these timings valid?
-  * @t:	  the v4l2_dv_timings struct.
-  * @cap: the v4l2_dv_timings_cap capabilities.
-  * @fnc: callback to check if this timing is OK. May be NULL.
-  * @fnc_handle: a handle that is passed on to @fnc.
-  *
-  * Returns true if the given dv_timings struct is supported by the
-  * hardware capabilities and the callback function (if non-NULL), returns
-  * false otherwise.
-  */
+/**
+ * v4l2_valid_dv_timings() - are these timings valid?
+ *
+ * @t:	  the v4l2_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * Returns true if the given dv_timings struct is supported by the
+ * hardware capabilities and the callback function (if non-NULL), returns
+ * false otherwise.
+ */
 bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
 			   const struct v4l2_dv_timings_cap *cap,
 			   v4l2_check_dv_timings_fnc fnc,
 			   void *fnc_handle);
 
-/** v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV timings based on capabilities
-  * @t:	  the v4l2_enum_dv_timings struct.
-  * @cap: the v4l2_dv_timings_cap capabilities.
-  * @fnc: callback to check if this timing is OK. May be NULL.
-  * @fnc_handle: a handle that is passed on to @fnc.
-  *
-  * This enumerates dv_timings using the full list of possible CEA-861 and DMT
-  * timings, filtering out any timings that are not supported based on the
-  * hardware capabilities and the callback function (if non-NULL).
-  *
-  * If a valid timing for the given index is found, it will fill in @t and
-  * return 0, otherwise it returns -EINVAL.
-  */
+/**
+ * v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV
+ *	 timings based on capabilities
+ *
+ * @t:	  the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This enumerates dv_timings using the full list of possible CEA-861 and DMT
+ * timings, filtering out any timings that are not supported based on the
+ * hardware capabilities and the callback function (if non-NULL).
+ *
+ * If a valid timing for the given index is found, it will fill in @t and
+ * return 0, otherwise it returns -EINVAL.
+ */
 int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
 			     const struct v4l2_dv_timings_cap *cap,
 			     v4l2_check_dv_timings_fnc fnc,
 			     void *fnc_handle);
 
-/** v4l2_find_dv_timings_cap() - Find the closest timings struct
-  * @t:	  the v4l2_enum_dv_timings struct.
-  * @cap: the v4l2_dv_timings_cap capabilities.
-  * @pclock_delta: maximum delta between t->pixelclock and the timing struct
-  *		under consideration.
-  * @fnc: callback to check if a given timings struct is OK. May be NULL.
-  * @fnc_handle: a handle that is passed on to @fnc.
-  *
-  * This function tries to map the given timings to an entry in the
-  * full list of possible CEA-861 and DMT timings, filtering out any timings
-  * that are not supported based on the hardware capabilities and the callback
-  * function (if non-NULL).
-  *
-  * On success it will fill in @t with the found timings and it returns true.
-  * On failure it will return false.
-  */
+/**
+ * v4l2_find_dv_timings_cap() - Find the closest timings struct
+ *
+ * @t:	  the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @pclock_delta: maximum delta between t->pixelclock and the timing struct
+ *		under consideration.
+ * @fnc: callback to check if a given timings struct is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This function tries to map the given timings to an entry in the
+ * full list of possible CEA-861 and DMT timings, filtering out any timings
+ * that are not supported based on the hardware capabilities and the callback
+ * function (if non-NULL).
+ *
+ * On success it will fill in @t with the found timings and it returns true.
+ * On failure it will return false.
+ */
 bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
 			      const struct v4l2_dv_timings_cap *cap,
 			      unsigned pclock_delta,
 			      v4l2_check_dv_timings_fnc fnc,
 			      void *fnc_handle);
 
-/** v4l2_match_dv_timings() - do two timings match?
-  * @measured:	  the measured timings data.
-  * @standard:	  the timings according to the standard.
-  * @pclock_delta: maximum delta in Hz between standard->pixelclock and
-  * 		the measured timings.
-  *
-  * Returns true if the two timings match, returns false otherwise.
-  */
+/**
+ * v4l2_match_dv_timings() - do two timings match?
+ *
+ * @measured:	  the measured timings data.
+ * @standard:	  the timings according to the standard.
+ * @pclock_delta: maximum delta in Hz between standard->pixelclock and
+ * 		the measured timings.
+ *
+ * Returns true if the two timings match, returns false otherwise.
+ */
 bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured,
 			   const struct v4l2_dv_timings *standard,
 			   unsigned pclock_delta);
 
-/** v4l2_print_dv_timings() - log the contents of a dv_timings struct
-  * @dev_prefix:device prefix for each log line.
-  * @prefix:	additional prefix for each log line, may be NULL.
-  * @t:		the timings data.
-  * @detailed:	if true, give a detailed log.
-  */
+/**
+ * v4l2_print_dv_timings() - log the contents of a dv_timings struct
+ * @dev_prefix:device prefix for each log line.
+ * @prefix:	additional prefix for each log line, may be NULL.
+ * @t:		the timings data.
+ * @detailed:	if true, give a detailed log.
+ */
 void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
 			   const struct v4l2_dv_timings *t, bool detailed);
 
-/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+/**
+ * v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ *
  * @frame_height - the total height of the frame (including blanking) in lines.
  * @hfreq - the horizontal frequency in Hz.
  * @vsync - the height of the vertical sync in lines.
+ * @active_width - active width of image (does not include blanking). This
+ * information is needed only in case of version 2 of reduced blanking.
+ * In other cases, this parameter does not have any effect on timings.
  * @polarities - the horizontal and vertical polarities (same as struct
  *		v4l2_bt_timings polarities).
  * @interlaced - if this flag is true, it indicates interlaced format
@@ -125,9 +143,12 @@
  * in with the found CVT timings.
  */
 bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
-		u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt);
+		unsigned active_width, u32 polarities, bool interlaced,
+		struct v4l2_dv_timings *fmt);
 
-/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+/**
+ * v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ *
  * @frame_height - the total height of the frame (including blanking) in lines.
  * @hfreq - the horizontal frequency in Hz.
  * @vsync - the height of the vertical sync in lines.
@@ -149,8 +170,10 @@
 		u32 polarities, bool interlaced, struct v4l2_fract aspect,
 		struct v4l2_dv_timings *fmt);
 
-/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+/**
+ * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
  *	0x15 and 0x16 from the EDID.
+ *
  * @hor_landscape - byte 0x15 from the EDID.
  * @vert_portrait - byte 0x16 from the EDID.
  *
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 1ab9045..9792f90 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -68,10 +68,11 @@
 struct v4l2_subscribed_event;
 struct video_device;
 
-/** struct v4l2_kevent - Internal kernel event struct.
-  * @list:	List node for the v4l2_fh->available list.
-  * @sev:	Pointer to parent v4l2_subscribed_event.
-  * @event:	The event itself.
+/**
+ * struct v4l2_kevent - Internal kernel event struct.
+ * @list:	List node for the v4l2_fh->available list.
+ * @sev:	Pointer to parent v4l2_subscribed_event.
+ * @event:	The event itself.
   */
 struct v4l2_kevent {
 	struct list_head	list;
@@ -80,11 +81,12 @@
 };
 
 /** struct v4l2_subscribed_event_ops - Subscribed event operations.
-  * @add:	Optional callback, called when a new listener is added
-  * @del:	Optional callback, called when a listener stops listening
-  * @replace:	Optional callback that can replace event 'old' with event 'new'.
-  * @merge:	Optional callback that can merge event 'old' into event 'new'.
-  */
+ *
+ * @add:	Optional callback, called when a new listener is added
+ * @del:	Optional callback, called when a listener stops listening
+ * @replace:	Optional callback that can replace event 'old' with event 'new'.
+ * @merge:	Optional callback that can merge event 'old' into event 'new'.
+ */
 struct v4l2_subscribed_event_ops {
 	int  (*add)(struct v4l2_subscribed_event *sev, unsigned elems);
 	void (*del)(struct v4l2_subscribed_event *sev);
@@ -92,19 +94,20 @@
 	void (*merge)(const struct v4l2_event *old, struct v4l2_event *new);
 };
 
-/** struct v4l2_subscribed_event - Internal struct representing a subscribed event.
-  * @list:	List node for the v4l2_fh->subscribed list.
-  * @type:	Event type.
-  * @id:	Associated object ID (e.g. control ID). 0 if there isn't any.
-  * @flags:	Copy of v4l2_event_subscription->flags.
-  * @fh:	Filehandle that subscribed to this event.
-  * @node:	List node that hooks into the object's event list (if there is one).
-  * @ops:	v4l2_subscribed_event_ops
-  * @elems:	The number of elements in the events array.
-  * @first:	The index of the events containing the oldest available event.
-  * @in_use:	The number of queued events.
-  * @events:	An array of @elems events.
-  */
+/**
+ * struct v4l2_subscribed_event - Internal struct representing a subscribed event.
+ * @list:	List node for the v4l2_fh->subscribed list.
+ * @type:	Event type.
+ * @id:	Associated object ID (e.g. control ID). 0 if there isn't any.
+ * @flags:	Copy of v4l2_event_subscription->flags.
+ * @fh:	Filehandle that subscribed to this event.
+ * @node:	List node that hooks into the object's event list (if there is one).
+ * @ops:	v4l2_subscribed_event_ops
+ * @elems:	The number of elements in the events array.
+ * @first:	The index of the events containing the oldest available event.
+ * @in_use:	The number of queued events.
+ * @events:	An array of @elems events.
+ */
 struct v4l2_subscribed_event {
 	struct list_head	list;
 	u32			type;
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 098236c..3d184ab 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -48,13 +48,13 @@
 /**
  * struct v4l2_flash_config - V4L2 Flash sub-device initialization data
  * @dev_name:			the name of the media entity,
-				unique in the system
+ *				unique in the system
  * @torch_intensity:		constraints for the LED in torch mode
  * @indicator_intensity:	constraints for the indicator LED
  * @flash_faults:		bitmask of flash faults that the LED flash class
-				device can report; corresponding LED_FAULT* bit
-				definitions are available in the header file
-				<linux/led-class-flash.h>
+ *				device can report; corresponding LED_FAULT* bit
+ *				definitions are available in the header file
+ *				<linux/led-class-flash.h>
  * @has_external_strobe:	external strobe capability
  */
 struct v4l2_flash_config {
@@ -105,7 +105,7 @@
  * @fled_cdev:	LED flash class device to wrap
  * @iled_cdev:	LED flash class device representing indicator LED associated
  *		with fled_cdev, may be NULL
- * @flash_ops:	V4L2 Flash device ops
+ * @ops:	V4L2 Flash device ops
  * @config:	initialization data for V4L2 Flash sub-device
  *
  * Create V4L2 Flash sub-device wrapping given LED subsystem device.
@@ -123,7 +123,7 @@
 
 /**
  * v4l2_flash_release - release V4L2 Flash sub-device
- * @flash: the V4L2 Flash sub-device to release
+ * @v4l2_flash: the V4L2 Flash sub-device to release
  *
  * Release V4L2 Flash sub-device.
  */
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 73069e4..34cc99e 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -65,7 +65,7 @@
 					 V4L2_MBUS_CSI2_CHANNEL_2 | V4L2_MBUS_CSI2_CHANNEL_3)
 
 /**
- * v4l2_mbus_type - media bus type
+ * enum v4l2_mbus_type - media bus type
  * @V4L2_MBUS_PARALLEL:	parallel interface with hsync and vsync
  * @V4L2_MBUS_BT656:	parallel interface with embedded synchronisation, can
  *			also be used for BT.1120
@@ -78,7 +78,7 @@
 };
 
 /**
- * v4l2_mbus_config - media bus configuration
+ * struct v4l2_mbus_config - media bus configuration
  * @type:	in: interface type
  * @flags:	in / out: configuration flags, depending on @type
  */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3bbd96d..8849aab 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -40,6 +40,10 @@
  *		v4l2_m2m_job_finish() (as if the transaction ended normally).
  *		This function does not have to (and will usually not) wait
  *		until the device enters a state when it can be stopped.
+ * @lock:	optional. Define a driver's own lock callback, instead of using
+ *		m2m_ctx->q_lock.
+ * @unlock:	optional. Define a driver's own unlock callback, instead of
+ *		using m2m_ctx->q_lock.
  */
 struct v4l2_m2m_ops {
 	void (*device_run)(void *priv);
@@ -161,6 +165,8 @@
 /**
  * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
  * use
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline
 unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -171,6 +177,8 @@
 /**
  * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
  * ready for use
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline
 unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -183,6 +191,8 @@
 /**
  * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
  * buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
 {
@@ -192,6 +202,8 @@
 /**
  * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
  * ready buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
 {
@@ -200,6 +212,8 @@
 
 /**
  * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline
 struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -209,6 +223,8 @@
 
 /**
  * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline
 struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -221,6 +237,8 @@
 /**
  * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
  * buffers and return it
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 {
@@ -230,6 +248,8 @@
 /**
  * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
  * ready buffers and return it
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 {
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 4e18318..b273cf9 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -44,6 +44,7 @@
 
 struct v4l2_device;
 struct v4l2_ctrl_handler;
+struct v4l2_event;
 struct v4l2_event_subscription;
 struct v4l2_fh;
 struct v4l2_subdev;
@@ -117,34 +118,67 @@
 	u8 strength;	/* Pin drive strength */
 };
 
-/*
-   s_io_pin_config: configure one or more chip I/O pins for chips that
-	multiplex different internal signal pads out to IO pins.  This function
-	takes a pointer to an array of 'n' pin configuration entries, one for
-	each pin being configured.  This function could be called at times
-	other than just subdevice initialization.
-
-   init: initialize the sensor registers to some sort of reasonable default
-	values. Do not use for new drivers and should be removed in existing
-	drivers.
-
-   load_fw: load firmware.
-
-   reset: generic reset command. The argument selects which subsystems to
-	reset. Passing 0 will always reset the whole chip. Do not use for new
-	drivers without discussing this first on the linux-media mailinglist.
-	There should be no reason normally to reset a device.
-
-   s_gpio: set GPIO pins. Very simple right now, might need to be extended with
-	a direction argument if needed.
-
-   s_power: puts subdevice in power saving mode (on == 0) or normal operation
-	mode (on == 1).
-
-   interrupt_service_routine: Called by the bridge chip's interrupt service
-	handler, when an interrupt status has be raised due to this subdev,
-	so that this subdev can handle the details.  It may schedule work to be
-	performed later.  It must not sleep.  *Called from an IRQ context*.
+/**
+ * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs
+ *
+ * @log_status: callback for VIDIOC_LOG_STATUS ioctl handler code.
+ *
+ * @s_io_pin_config: configure one or more chip I/O pins for chips that
+ *	multiplex different internal signal pads out to IO pins.  This function
+ *	takes a pointer to an array of 'n' pin configuration entries, one for
+ *	each pin being configured.  This function could be called at times
+ *	other than just subdevice initialization.
+ *
+ * @init: initialize the sensor registers to some sort of reasonable default
+ *	values. Do not use for new drivers and should be removed in existing
+ *	drivers.
+ *
+ * @load_fw: load firmware.
+ *
+ * @reset: generic reset command. The argument selects which subsystems to
+ *	reset. Passing 0 will always reset the whole chip. Do not use for new
+ *	drivers without discussing this first on the linux-media mailinglist.
+ *	There should be no reason normally to reset a device.
+ *
+ * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
+ *	a direction argument if needed.
+ *
+ * @queryctrl: callback for VIDIOC_QUERYCTL ioctl handler code.
+ *
+ * @g_ctrl: callback for VIDIOC_G_CTRL ioctl handler code.
+ *
+ * @s_ctrl: callback for VIDIOC_S_CTRL ioctl handler code.
+ *
+ * @g_ext_ctrls: callback for VIDIOC_G_EXT_CTRLS ioctl handler code.
+ *
+ * @s_ext_ctrls: callback for VIDIOC_S_EXT_CTRLS ioctl handler code.
+ *
+ * @try_ext_ctrls: callback for VIDIOC_TRY_EXT_CTRLS ioctl handler code.
+ *
+ * @querymenu: callback for VIDIOC_QUERYMENU ioctl handler code.
+ *
+ * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
+ *	   used to provide support for private ioctls used on the driver.
+ *
+ * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel,
+ *		    in order to fix data passed from/to userspace.
+ *
+ * @g_register: callback for VIDIOC_G_REGISTER ioctl handler code.
+ *
+ * @s_register: callback for VIDIOC_G_REGISTER ioctl handler code.
+ *
+ * @s_power: puts subdevice in power saving mode (on == 0) or normal operation
+ *	mode (on == 1).
+ *
+ * @interrupt_service_routine: Called by the bridge chip's interrupt service
+ *	handler, when an interrupt status has be raised due to this subdev,
+ *	so that this subdev can handle the details.  It may schedule work to be
+ *	performed later.  It must not sleep.  *Called from an IRQ context*.
+ *
+ * @subscribe_event: used by the drivers to request the control framework that
+ *		     for it to be warned when the value of a control changes.
+ *
+ * @unsubscribe_event: remove event subscription from the control framework.
  */
 struct v4l2_subdev_core_ops {
 	int (*log_status)(struct v4l2_subdev *sd);
@@ -179,18 +213,32 @@
 				 struct v4l2_event_subscription *sub);
 };
 
-/* s_radio: v4l device was opened in radio mode.
-
-   g_frequency: freq->type must be filled in. Normally done by video_ioctl2
-	or the bridge driver.
-
-   g_tuner:
-   s_tuner: vt->type must be filled in. Normally done by video_ioctl2 or the
-	bridge driver.
-
-   s_type_addr: sets tuner type and its I2C addr.
-
-   s_config: sets tda9887 specific stuff, like port1, port2 and qss
+/**
+ * struct s_radio - Callbacks used when v4l device was opened in radio mode.
+ *
+ * @s_radio: callback for VIDIOC_S_RADIO ioctl handler code.
+ *
+ * @s_frequency: callback for VIDIOC_S_FREQUENCY ioctl handler code.
+ *
+ * @g_frequency: callback for VIDIOC_G_FREQUENCY ioctl handler code.
+ *		 freq->type must be filled in. Normally done by video_ioctl2
+ *		or the bridge driver.
+ *
+ * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS ioctl handler code.
+ *
+ * @g_tuner: callback for VIDIOC_G_TUNER ioctl handler code.
+ *
+ * @s_tuner: callback for VIDIOC_S_TUNER ioctl handler code. vt->type must be
+ *	     filled in. Normally done by video_ioctl2 or the
+ *	bridge driver.
+ *
+ * @g_modulator: callback for VIDIOC_G_MODULATOR ioctl handler code.
+ *
+ * @s_modulator: callback for VIDIOC_S_MODULATOR ioctl handler code.
+ *
+ * @s_type_addr: sets tuner type and its I2C addr.
+ *
+ * @s_config: sets tda9887 specific stuff, like port1, port2 and qss
  */
 struct v4l2_subdev_tuner_ops {
 	int (*s_radio)(struct v4l2_subdev *sd);
@@ -205,25 +253,31 @@
 	int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config);
 };
 
-/* s_clock_freq: set the frequency (in Hz) of the audio clock output.
-	Used to slave an audio processor to the video decoder, ensuring that
-	audio and video remain synchronized. Usual values for the frequency
-	are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
-	-EINVAL is returned.
-
-   s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
-	way to select I2S clock used by driving digital audio streams at some
-	board designs. Usual values for the frequency are 1024000 and 2048000.
-	If the frequency is not supported, then -EINVAL is returned.
-
-   s_routing: used to define the input and/or output pins of an audio chip,
-	and any additional configuration data.
-	Never attempt to use user-level input IDs (e.g. Composite, S-Video,
-	Tuner) at this level. An i2c device shouldn't know about whether an
-	input pin is connected to a Composite connector, become on another
-	board or platform it might be connected to something else entirely.
-	The calling driver is responsible for mapping a user-level input to
-	the right pins on the i2c device.
+/**
+ * struct v4l2_subdev_audio_ops - Callbacks used for audio-related settings
+ *
+ * @s_clock_freq: set the frequency (in Hz) of the audio clock output.
+ *	Used to slave an audio processor to the video decoder, ensuring that
+ *	audio and video remain synchronized. Usual values for the frequency
+ *	are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
+ *	-EINVAL is returned.
+ *
+ * @s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
+ *	way to select I2S clock used by driving digital audio streams at some
+ *	board designs. Usual values for the frequency are 1024000 and 2048000.
+ *	If the frequency is not supported, then -EINVAL is returned.
+ *
+ * @s_routing: used to define the input and/or output pins of an audio chip,
+ *	and any additional configuration data.
+ *	Never attempt to use user-level input IDs (e.g. Composite, S-Video,
+ *	Tuner) at this level. An i2c device shouldn't know about whether an
+ *	input pin is connected to a Composite connector, become on another
+ *	board or platform it might be connected to something else entirely.
+ *	The calling driver is responsible for mapping a user-level input to
+ *	the right pins on the i2c device.
+ *
+ * @s_stream: used to notify the audio code that stream will start or has
+ *	stopped.
  */
 struct v4l2_subdev_audio_ops {
 	int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
@@ -242,6 +296,7 @@
 
 /**
  * struct v4l2_mbus_frame_desc_entry - media bus frame description structure
+ *
  * @flags: V4L2_MBUS_FRAME_DESC_FL_* flags
  * @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set
  * @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB
@@ -265,45 +320,73 @@
 	unsigned short num_entries;
 };
 
-/*
-   s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
-	video input devices.
-
-   g_std_output: get current standard for video OUTPUT devices. This is ignored
-	by video input devices.
-
-   g_tvnorms: get v4l2_std_id with all standards supported by the video
-	CAPTURE device. This is ignored by video output devices.
-
-   g_tvnorms_output: get v4l2_std_id with all standards supported by the video
-	OUTPUT device. This is ignored by video capture devices.
-
-   s_crystal_freq: sets the frequency of the crystal used to generate the
-	clocks in Hz. An extra flags field allows device specific configuration
-	regarding clock frequency dividers, etc. If not used, then set flags
-	to 0. If the frequency is not supported, then -EINVAL is returned.
-
-   g_input_status: get input status. Same as the status field in the v4l2_input
-	struct.
-
-   s_routing: see s_routing in audio_ops, except this version is for video
-	devices.
-
-   s_dv_timings(): Set custom dv timings in the sub device. This is used
-	when sub device is capable of setting detailed timing information
-	in the hardware to generate/detect the video signal.
-
-   g_dv_timings(): Get custom dv timings in the sub device.
-
-   g_mbus_config: get supported mediabus configurations
-
-   s_mbus_config: set a certain mediabus configuration. This operation is added
-	for compatibility with soc-camera drivers and should not be used by new
-	software.
-
-   s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
-	can adjust @size to a lower value and must not write more data to the
-	buffer starting at @data than the original value of @size.
+/**
+ * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened
+ * 				  in video mode.
+ *
+ * @s_routing: see s_routing in audio_ops, except this version is for video
+ *	devices.
+ *
+ * @s_crystal_freq: sets the frequency of the crystal used to generate the
+ *	clocks in Hz. An extra flags field allows device specific configuration
+ *	regarding clock frequency dividers, etc. If not used, then set flags
+ *	to 0. If the frequency is not supported, then -EINVAL is returned.
+ *
+ * @g_std: callback for VIDIOC_G_STD ioctl handler code.
+ *
+ * @s_std: callback for VIDIOC_S_STD ioctl handler code.
+ *
+ * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
+ *	video input devices.
+ *
+ * @g_std_output: get current standard for video OUTPUT devices. This is ignored
+ *	by video input devices.
+ *
+ * @querystd: callback for VIDIOC_QUERYSTD ioctl handler code.
+ *
+ * @g_tvnorms: get v4l2_std_id with all standards supported by the video
+ *	CAPTURE device. This is ignored by video output devices.
+ *
+ * @g_tvnorms_output: get v4l2_std_id with all standards supported by the video
+ *	OUTPUT device. This is ignored by video capture devices.
+ *
+ * @g_input_status: get input status. Same as the status field in the v4l2_input
+ *	struct.
+ *
+ * @s_stream: used to notify the driver that a video stream will start or has
+ *	stopped.
+ *
+ * @cropcap: callback for VIDIOC_CROPCAP ioctl handler code.
+ *
+ * @g_crop: callback for VIDIOC_G_CROP ioctl handler code.
+ *
+ * @s_crop: callback for VIDIOC_S_CROP ioctl handler code.
+ *
+ * @g_parm: callback for VIDIOC_G_PARM ioctl handler code.
+ *
+ * @s_parm: callback for VIDIOC_S_PARM ioctl handler code.
+ *
+ * @g_frame_interval: callback for VIDIOC_G_FRAMEINTERVAL ioctl handler code.
+ *
+ * @s_frame_interval: callback for VIDIOC_S_FRAMEINTERVAL ioctl handler code.
+ *
+ * @s_dv_timings: Set custom dv timings in the sub device. This is used
+ *	when sub device is capable of setting detailed timing information
+ *	in the hardware to generate/detect the video signal.
+ *
+ * @g_dv_timings: Get custom dv timings in the sub device.
+ *
+ * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS ioctl handler code.
+ *
+ * @g_mbus_config: get supported mediabus configurations
+ *
+ * @s_mbus_config: set a certain mediabus configuration. This operation is added
+ *	for compatibility with soc-camera drivers and should not be used by new
+ *	software.
+ *
+ * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
+ *	can adjust @size to a lower value and must not write more data to the
+ *	buffer starting at @data than the original value of @size.
  */
 struct v4l2_subdev_video_ops {
 	int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -340,34 +423,39 @@
 			   unsigned int *size);
 };
 
-/*
-   decode_vbi_line: video decoders that support sliced VBI need to implement
-	this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
-	start of the VBI data that was generated by the decoder. The driver
-	then parses the sliced VBI data and sets the other fields in the
-	struct accordingly. The pointer p is updated to point to the start of
-	the payload which can be copied verbatim into the data field of the
-	v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
-	type field is set to 0 on return.
-
-   s_vbi_data: used to generate VBI signals on a video signal.
-	v4l2_sliced_vbi_data is filled with the data packets that should be
-	output. Note that if you set the line field to 0, then that VBI signal
-	is disabled. If no valid VBI data was found, then the type field is
-	set to 0 on return.
-
-   g_vbi_data: used to obtain the sliced VBI packet from a readback register.
-	Not all video decoders support this. If no data is available because
-	the readback register contains invalid or erroneous data -EIO is
-	returned. Note that you must fill in the 'id' member and the 'field'
-	member (to determine whether CC data from the first or second field
-	should be obtained).
-
-   s_raw_fmt: setup the video encoder/decoder for raw VBI.
-
-   g_sliced_fmt: retrieve the current sliced VBI settings.
-
-   s_sliced_fmt: setup the sliced VBI settings.
+/**
+ * struct v4l2_subdev_vbi_ops - Callbacks used when v4l device was opened
+ * 				  in video mode via the vbi device node.
+ *
+ *  @decode_vbi_line: video decoders that support sliced VBI need to implement
+ *	this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
+ *	start of the VBI data that was generated by the decoder. The driver
+ *	then parses the sliced VBI data and sets the other fields in the
+ *	struct accordingly. The pointer p is updated to point to the start of
+ *	the payload which can be copied verbatim into the data field of the
+ *	v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
+ *	type field is set to 0 on return.
+ *
+ * @s_vbi_data: used to generate VBI signals on a video signal.
+ *	v4l2_sliced_vbi_data is filled with the data packets that should be
+ *	output. Note that if you set the line field to 0, then that VBI signal
+ *	is disabled. If no valid VBI data was found, then the type field is
+ *	set to 0 on return.
+ *
+ * @g_vbi_data: used to obtain the sliced VBI packet from a readback register.
+ *	Not all video decoders support this. If no data is available because
+ *	the readback register contains invalid or erroneous data -EIO is
+ *	returned. Note that you must fill in the 'id' member and the 'field'
+ *	member (to determine whether CC data from the first or second field
+ *	should be obtained).
+ *
+ * @g_sliced_vbi_cap: callback for VIDIOC_SLICED_VBI_CAP ioctl handler code.
+ *
+ * @s_raw_fmt: setup the video encoder/decoder for raw VBI.
+ *
+ * @g_sliced_fmt: retrieve the current sliced VBI settings.
+ *
+ * @s_sliced_fmt: setup the sliced VBI settings.
  */
 struct v4l2_subdev_vbi_ops {
 	int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
@@ -480,8 +568,39 @@
 
 /**
  * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
+ *
+ * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
+ *		    code.
+ * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
+ *		     code.
+ *
+ * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL ioctl
+ *			 handler code.
+ *
+ * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT ioctl handler code.
+ *
+ * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT ioctl handler code.
+ *
+ * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION ioctl handler code.
+ *
+ * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION ioctl handler code.
+ *
+ * @get_edid: callback for VIDIOC_SUBDEV_G_EDID ioctl handler code.
+ *
+ * @set_edid: callback for VIDIOC_SUBDEV_S_EDID ioctl handler code.
+ *
+ * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP ioctl handler
+ *		    code.
+ *
+ * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS ioctl handler
+ *		     code.
+ *
+ * @link_validate: used by the media controller code to check if the links
+ *		   that belongs to a pipeline can be used for stream.
+ *
  * @get_frame_desc: get the current low level media bus frame parameters.
- * @get_frame_desc: set the low level media bus frame parameters, @fd array
+ *
+ * @set_frame_desc: set the low level media bus frame parameters, @fd array
  *                  may be adjusted by the subdev driver to device capabilities.
  */
 struct v4l2_subdev_pad_ops {
@@ -695,4 +814,7 @@
 #define v4l2_subdev_has_op(sd, o, f) \
 	((sd)->ops->o && (sd)->ops->o->f)
 
+void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
+			      const struct v4l2_event *ev);
+
 #endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index c192e1b..589b56c 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -364,7 +364,9 @@
  *		start_streaming() can be called. Used when a DMA engine
  *		cannot be started unless at least this number of buffers
  *		have been queued into the driver.
- *
+ */
+/*
+ * Private elements (won't appear at the DocBook):
  * @mmap_lock:	private mutex used when buffers are allocated/freed/mmapped
  * @memory:	current memory type used
  * @bufs:	videobuf buffer structures
@@ -407,7 +409,7 @@
 	gfp_t				gfp_flags;
 	u32				min_buffers_needed;
 
-/* private: internal use only */
+	/* private: internal use only */
 	struct mutex			mmap_lock;
 	enum v4l2_memory		memory;
 	struct vb2_buffer		*bufs[VIDEO_MAX_FRAME];
@@ -484,7 +486,8 @@
 		loff_t *ppos, int nonblock);
 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
 		loff_t *ppos, int nonblock);
-/**
+
+/*
  * vb2_thread_fnc - callback function for use with vb2_thread
  *
  * This is called whenever a buffer is dequeued in the thread.
@@ -577,7 +580,6 @@
  * vb2_get_plane_payload() - get bytesused for the plane plane_no
  * @vb:		buffer for which plane payload should be set
  * @plane_no:	plane number for which payload should be set
- * @size:	payload in bytes
  */
 static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
 				 unsigned int plane_no)
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index f05444c..9f36641 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -17,7 +17,8 @@
 #include <media/videobuf2-core.h>
 
 /**
- * vb2_vmarea_handler - common vma refcount tracking handler
+ * struct vb2_vmarea_handler - common vma refcount tracking handler
+ *
  * @refcount:	pointer to refcount entry in the buffer
  * @put:	callback to function that decreases buffer refcount
  * @arg:	argument for @put callback
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7a6c1d6..f2ffe5b 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -200,4 +200,14 @@
 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
 			   loff_t *off);
 
+/*
+ * For EEH, a driver may want to assert a PERST will reload the same image
+ * from flash into the FPGA.
+ *
+ * This is a property of the entire adapter, not a single AFU, so drivers
+ * should set this property with care!
+ */
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+				  bool perst_reloads_same_image);
+
 #endif /* _MISC_CXL_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index 43c6abc..7aa7844 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1042,42 +1042,9 @@
 #endif
 };
 
-/*
- * Bits in struct cg_proto.flags
- */
-enum cg_proto_flags {
-	/* Currently active and new sockets should be assigned to cgroups */
-	MEMCG_SOCK_ACTIVE,
-	/* It was ever activated; we must disarm static keys on destruction */
-	MEMCG_SOCK_ACTIVATED,
-};
-
-struct cg_proto {
-	struct page_counter	memory_allocated;	/* Current allocated memory. */
-	struct percpu_counter	sockets_allocated;	/* Current number of sockets. */
-	int			memory_pressure;
-	long			sysctl_mem[3];
-	unsigned long		flags;
-	/*
-	 * memcg field is used to find which memcg we belong directly
-	 * Each memcg struct can hold more than one cg_proto, so container_of
-	 * won't really cut.
-	 *
-	 * The elegant solution would be having an inverse function to
-	 * proto_cgroup in struct proto, but that means polluting the structure
-	 * for everybody, instead of just for memcg users.
-	 */
-	struct mem_cgroup	*memcg;
-};
-
 int proto_register(struct proto *prot, int alloc_slab);
 void proto_unregister(struct proto *prot);
 
-static inline bool memcg_proto_active(struct cg_proto *cg_proto)
-{
-	return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
-}
-
 #ifdef SOCK_REFCNT_DEBUG
 static inline void sk_refcnt_debug_inc(struct sock *sk)
 {
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index b0f898e..43c1cf0 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2760,52 +2760,6 @@
 }
 
 /**
- * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
- *   by an HCA.
- * @pd: The protection domain associated assigned to the registered region.
- * @phys_buf_array: Specifies a list of physical buffers to use in the
- *   memory region.
- * @num_phys_buf: Specifies the size of the phys_buf_array.
- * @mr_access_flags: Specifies the memory access rights.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
-			     struct ib_phys_buf *phys_buf_array,
-			     int num_phys_buf,
-			     int mr_access_flags,
-			     u64 *iova_start);
-
-/**
- * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
- *   Conceptually, this call performs the functions deregister memory region
- *   followed by register physical memory region.  Where possible,
- *   resources are reused instead of deallocated and reallocated.
- * @mr: The memory region to modify.
- * @mr_rereg_mask: A bit-mask used to indicate which of the following
- *   properties of the memory region are being modified.
- * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
- *   the new protection domain to associated with the memory region,
- *   otherwise, this parameter is ignored.
- * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- *   field specifies a list of physical buffers to use in the new
- *   translation, otherwise, this parameter is ignored.
- * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- *   field specifies the size of the phys_buf_array, otherwise, this
- *   parameter is ignored.
- * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
- *   field specifies the new memory access rights, otherwise, this
- *   parameter is ignored.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-int ib_rereg_phys_mr(struct ib_mr *mr,
-		     int mr_rereg_mask,
-		     struct ib_pd *pd,
-		     struct ib_phys_buf *phys_buf_array,
-		     int num_phys_buf,
-		     int mr_access_flags,
-		     u64 *iova_start);
-
-/**
  * ib_query_mr - Retrieves information about a specific memory region.
  * @mr: The memory region to retrieve information about.
  * @mr_attr: The attributes of the specified memory region.
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 370f2909..44202ff 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -51,11 +51,6 @@
 	unsigned int reg;
 };
 
-struct tegra_smmu_ops {
-	void (*flush_dcache)(struct page *page, unsigned long offset,
-			     size_t size);
-};
-
 struct tegra_smmu_soc {
 	const struct tegra_mc_client *clients;
 	unsigned int num_clients;
@@ -66,9 +61,8 @@
 	bool supports_round_robin_arbitration;
 	bool supports_request_limit;
 
+	unsigned int num_tlb_lines;
 	unsigned int num_asids;
-
-	const struct tegra_smmu_ops *ops;
 };
 
 struct tegra_mc;
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 0e9d75b..74bc8547 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -584,6 +584,8 @@
 void snd_ac97_suspend(struct snd_ac97 *ac97);
 void snd_ac97_resume(struct snd_ac97 *ac97);
 #endif
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+	unsigned int id_mask);
 
 /* quirk types */
 enum {
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index adb5ba5..930b41e 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -4,14 +4,17 @@
 #ifndef __SOUND_HDA_I915_H
 #define __SOUND_HDA_I915_H
 
+#include <drm/i915_component.h>
+
 #ifdef CONFIG_SND_HDA_I915
 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
 int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
 int snd_hdac_get_display_clk(struct hdac_bus *bus);
 int snd_hdac_i915_init(struct hdac_bus *bus);
 int snd_hdac_i915_exit(struct hdac_bus *bus);
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
 #else
-static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
 {
 	return 0;
 }
@@ -31,6 +34,10 @@
 {
 	return 0;
 }
+static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
+{
+	return -ENODEV;
+}
 #endif
 
 #endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ae995e5..2ae8812 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -160,6 +160,10 @@
 #define AZX_SPB_BASE			0x08
 /* Interval used to calculate the iterating register offset */
 #define AZX_SPB_INTERVAL		0x08
+/* SPIB base */
+#define AZX_SPB_SPIB			0x00
+/* SPIB MAXFIFO base*/
+#define AZX_SPB_MAXFIFO			0x04
 
 /* registers of Global Time Synchronization Capability Structure */
 #define AZX_GTS_CAP_ID			0x1
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 4caf1fd..49bc836 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -119,6 +119,7 @@
 void snd_hdac_device_unregister(struct hdac_device *codec);
 
 int snd_hdac_refresh_widgets(struct hdac_device *codec);
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec);
 
 unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
 			       unsigned int verb, unsigned int parm);
@@ -164,15 +165,15 @@
 }
 
 #ifdef CONFIG_PM
-void snd_hdac_power_up(struct hdac_device *codec);
-void snd_hdac_power_down(struct hdac_device *codec);
-void snd_hdac_power_up_pm(struct hdac_device *codec);
-void snd_hdac_power_down_pm(struct hdac_device *codec);
+int snd_hdac_power_up(struct hdac_device *codec);
+int snd_hdac_power_down(struct hdac_device *codec);
+int snd_hdac_power_up_pm(struct hdac_device *codec);
+int snd_hdac_power_down_pm(struct hdac_device *codec);
 #else
-static inline void snd_hdac_power_up(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down(struct hdac_device *codec) {}
-static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
 #endif
 
 /*
@@ -437,6 +438,8 @@
 struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
 					   struct snd_pcm_substream *substream);
 void snd_hdac_stream_release(struct hdac_stream *azx_dev);
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+					int dir, int stream_tag);
 
 int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
 void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 0f89df1..94210dc 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -34,6 +34,7 @@
 void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
 int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
 void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
 
 #define ebus_to_hbus(ebus)	(&(ebus)->bus)
 #define hbus_to_ebus(_bus) \
@@ -62,6 +63,8 @@
  * @hstream: hdac_stream
  * @pphc_addr: processing pipe host stream pointer
  * @pplc_addr: processing pipe link stream pointer
+ * @spib_addr: software position in buffers stream pointer
+ * @fifo_addr: software position Max fifos stream pointer
  * @decoupled: stream host and link is decoupled
  * @link_locked: link is locked
  * @link_prepared: link is prepared
@@ -73,6 +76,9 @@
 	void __iomem *pphc_addr;
 	void __iomem *pplc_addr;
 
+	void __iomem *spib_addr;
+	void __iomem *fifo_addr;
+
 	bool decoupled:1;
 	bool link_locked:1;
 	bool link_prepared;
@@ -99,6 +105,11 @@
 				struct hdac_ext_stream *azx_dev, bool decouple);
 void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
 
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream, u32 value);
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream);
+
 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
 void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
 void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
@@ -115,6 +126,7 @@
 
 int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
 int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
 void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
 				 int stream);
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
@@ -129,4 +141,63 @@
 	writew(((readw(addr + reg) & ~(mask)) | (val)), \
 		addr + reg)
 
+
+struct hdac_ext_device;
+
+/* ops common to all codec drivers */
+struct hdac_ext_codec_ops {
+	int (*build_controls)(struct hdac_ext_device *dev);
+	int (*init)(struct hdac_ext_device *dev);
+	void (*free)(struct hdac_ext_device *dev);
+};
+
+struct hda_dai_map {
+	char *dai_name;
+	hda_nid_t nid;
+	u32	maxbps;
+};
+
+#define HDA_MAX_NIDS 16
+
+/**
+ * struct hdac_ext_device - HDAC Ext device
+ *
+ * @hdac: hdac core device
+ * @nid_list - the dai map which matches the dai-name with the nid
+ * @map_cur_idx - the idx in use in dai_map
+ * @ops - the hda codec ops common to all codec drivers
+ * @pvt_data - private data, for asoc contains asoc codec object
+ */
+struct hdac_ext_device {
+	struct hdac_device hdac;
+	struct hdac_ext_bus *ebus;
+
+	/* soc-dai to nid map */
+	struct hda_dai_map nid_list[HDA_MAX_NIDS];
+	unsigned int map_cur_idx;
+
+	/* codec ops */
+	struct hdac_ext_codec_ops ops;
+
+	void *private_data;
+};
+
+#define to_ehdac_device(dev) (container_of((dev), \
+				 struct hdac_ext_device, hdac))
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_ext_driver {
+	struct hdac_driver hdac;
+
+	int	(*probe)(struct hdac_ext_device *dev);
+	int	(*remove)(struct hdac_ext_device *dev);
+	void	(*shutdown)(struct hdac_ext_device *dev);
+};
+
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
+
+#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+
 #endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 4cecd0c..bb7b2eb 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -61,6 +61,14 @@
 /*
  * flags
  */
+struct rsnd_ctu_platform_info {
+	u32 flags;
+};
+
+struct rsnd_mix_platform_info {
+	u32 flags;
+};
+
 struct rsnd_dvc_platform_info {
 	u32 flags;
 };
@@ -68,6 +76,8 @@
 struct rsnd_dai_path_info {
 	struct rsnd_ssi_platform_info *ssi;
 	struct rsnd_src_platform_info *src;
+	struct rsnd_ctu_platform_info *ctu;
+	struct rsnd_mix_platform_info *mix;
 	struct rsnd_dvc_platform_info *dvc;
 };
 
@@ -93,6 +103,10 @@
 	int ssi_info_nr;
 	struct rsnd_src_platform_info *src_info;
 	int src_info_nr;
+	struct rsnd_ctu_platform_info *ctu_info;
+	int ctu_info_nr;
+	struct rsnd_mix_platform_info *mix_info;
+	int mix_info_nr;
 	struct rsnd_dvc_platform_info *dvc_info;
 	int dvc_info_nr;
 	struct rsnd_dai_platform_info *dai_info;
diff --git a/include/sound/rt298.h b/include/sound/rt298.h
new file mode 100644
index 0000000..7fffeaa
--- /dev/null
+++ b/include/sound/rt298.h
@@ -0,0 +1,20 @@
+/*
+ * linux/sound/rt286.h -- Platform data for RT286
+ *
+ * Copyright 2013 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT298_H
+#define __LINUX_SND_RT298_H
+
+struct rt298_platform_data {
+	bool cbj_en; /*combo jack enable*/
+	bool gpio2_en; /*GPIO2 enable*/
+	bool suspend_power_off; /* power is off during suspend */
+};
+
+#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 37d95a8..5abba03 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -397,6 +397,7 @@
 			    const struct snd_soc_dapm_route *route, int num);
 int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
 			     const struct snd_soc_dapm_route *route, int num);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
 
 /* dapm events */
 void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
@@ -511,9 +512,18 @@
 struct snd_soc_dapm_path {
 	const char *name;
 
-	/* source (input) and sink (output) widgets */
-	struct snd_soc_dapm_widget *source;
-	struct snd_soc_dapm_widget *sink;
+	/*
+	 * source (input) and sink (output) widgets
+	 * The union is for convience, since it is a lot nicer to type
+	 * p->source, rather than p->node[SND_SOC_DAPM_DIR_IN]
+	 */
+	union {
+		struct {
+			struct snd_soc_dapm_widget *source;
+			struct snd_soc_dapm_widget *sink;
+		};
+		struct snd_soc_dapm_widget *node[2];
+	};
 
 	/* status */
 	u32 connect:1;	/* source and sink widgets are connected */
@@ -524,8 +534,7 @@
 	int (*connected)(struct snd_soc_dapm_widget *source,
 			 struct snd_soc_dapm_widget *sink);
 
-	struct list_head list_source;
-	struct list_head list_sink;
+	struct list_head list_node[2];
 	struct list_head list_kcontrol;
 	struct list_head list;
 };
@@ -559,8 +568,7 @@
 	unsigned char new_power:1;		/* power from this run */
 	unsigned char power_checked:1;		/* power checked this run */
 	unsigned char is_supply:1;		/* Widget is a supply type widget */
-	unsigned char is_sink:1;		/* Widget is a sink type widget */
-	unsigned char is_source:1;		/* Widget is a source type widget */
+	unsigned char is_ep:2;			/* Widget is a endpoint type widget */
 	int subseq;				/* sort within widget type */
 
 	int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -575,16 +583,14 @@
 	struct snd_kcontrol **kcontrols;
 	struct snd_soc_dobj dobj;
 
-	/* widget input and outputs */
-	struct list_head sources;
-	struct list_head sinks;
+	/* widget input and output edges */
+	struct list_head edges[2];
 
 	/* used during DAPM updates */
 	struct list_head work_list;
 	struct list_head power_list;
 	struct list_head dirty;
-	int inputs;
-	int outputs;
+	int endpoints[2];
 
 	struct clk *clk;
 };
@@ -672,4 +678,58 @@
 	return dapm->bias_level;
 }
 
+enum snd_soc_dapm_direction {
+	SND_SOC_DAPM_DIR_IN,
+	SND_SOC_DAPM_DIR_OUT
+};
+
+#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
+
+#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
+#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
+ *   specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ *       incoming or outgoing widgets
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_path(w, dir, p) \
+	list_for_each_entry(p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
+ *   specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ *       incoming or outgoing widgets
+ * @p: The path iterator variable
+ * @next_p: Temporary storage for the next path
+ *
+ *  This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
+ *  it is safe to remove the current path from the list while iterating
+ */
+#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
+	list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths leaving a
+ *  widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_sink_path(w, p) \
+	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
+
+/**
+ * snd_soc_dapm_widget_for_each_source_path - Iterates over all paths leading to
+ *  a widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_source_path(w, p) \
+	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
+
 #endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 427bc41..086cd7f 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -89,6 +89,13 @@
 		struct snd_ctl_elem_info *uinfo);
 };
 
+/* Bytes ext operations, for TLV byte controls */
+struct snd_soc_tplg_bytes_ext_ops {
+	u32 id;
+	int (*get)(unsigned int __user *bytes, unsigned int size);
+	int (*put)(const unsigned int __user *bytes, unsigned int size);
+};
+
 /*
  * DAPM widget event handlers - used to map handlers onto widgets.
  */
@@ -136,9 +143,13 @@
 	int (*manifest)(struct snd_soc_component *,
 		struct snd_soc_tplg_manifest *);
 
-	/* bespoke kcontrol handlers available for binding */
+	/* vendor specific kcontrol handlers available for binding */
 	const struct snd_soc_tplg_kcontrol_ops *io_ops;
 	int io_ops_count;
+
+	/* vendor specific bytes ext handlers available for binding */
+	const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+	int bytes_ext_ops_count;
 };
 
 #ifdef CONFIG_SND_SOC_TOPOLOGY
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 93df8bf..884e728 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -526,7 +526,8 @@
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
 struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+	unsigned int id, unsigned int id_mask);
 void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
 
 int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
@@ -619,6 +620,7 @@
  * @pin:    name of the pin to update
  * @mask:   bits to check for in reported jack status
  * @invert: if non-zero then pin is enabled when status is not reported
+ * @list:   internal list entry
  */
 struct snd_soc_jack_pin {
 	struct list_head list;
@@ -635,7 +637,7 @@
  * @jack_type: type of jack that is expected for this voltage
  * @debounce_time: debounce_time for jack, codec driver should wait for this
  *		duration before reading the adc for voltages
- * @:list: list container
+ * @list:   internal list entry
  */
 struct snd_soc_jack_zone {
 	unsigned int min_mv;
@@ -651,12 +653,12 @@
  * @gpio:         legacy gpio number
  * @idx:          gpio descriptor index within the function of the GPIO
  *                consumer device
- * @gpiod_dev     GPIO consumer device
+ * @gpiod_dev:    GPIO consumer device
  * @name:         gpio name. Also as connection ID for the GPIO consumer
  *                device function name lookup
  * @report:       value to report when jack detected
  * @invert:       report presence in low state
- * @debouce_time: debouce time in ms
+ * @debounce_time: debounce time in ms
  * @wake:	  enable as wake source
  * @jack_status_check: callback function which overrides the detection
  *		       to provide more complex checks (eg, reading an
@@ -672,11 +674,13 @@
 	int debounce_time;
 	bool wake;
 
+	/* private: */
 	struct snd_soc_jack *jack;
 	struct delayed_work work;
 	struct gpio_desc *desc;
 
 	void *data;
+	/* public: */
 	int (*jack_status_check)(void *data);
 };
 
@@ -758,7 +762,6 @@
 
 	unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
 	unsigned int registered_as_component:1;
-	unsigned int probed:1;
 
 	struct list_head list;
 
@@ -792,7 +795,6 @@
 
 	/* Don't use these, use snd_soc_component_get_dapm() */
 	struct snd_soc_dapm_context dapm;
-	struct snd_soc_dapm_context *dapm_ptr;
 
 	const struct snd_kcontrol_new *controls;
 	unsigned int num_controls;
@@ -832,9 +834,6 @@
 	/* component */
 	struct snd_soc_component component;
 
-	/* Don't access this directly, use snd_soc_codec_get_dapm() */
-	struct snd_soc_dapm_context dapm;
-
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_reg;
 #endif
@@ -1277,7 +1276,7 @@
 static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
 	struct snd_soc_dapm_context *dapm)
 {
-	return container_of(dapm, struct snd_soc_codec, dapm);
+	return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
 }
 
 /**
@@ -1302,7 +1301,7 @@
 static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
 	struct snd_soc_component *component)
 {
-	return component->dapm_ptr;
+	return &component->dapm;
 }
 
 /**
@@ -1314,12 +1313,12 @@
 static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
 	struct snd_soc_codec *codec)
 {
-	return &codec->dapm;
+	return snd_soc_component_get_dapm(&codec->component);
 }
 
 /**
  * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
- * @dapm: The CODEC for which to initialize the DAPM bias level
+ * @codec: The CODEC for which to initialize the DAPM bias level
  * @level: The DAPM level to initialize to
  *
  * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
@@ -1604,6 +1603,10 @@
 int snd_soc_of_parse_tdm_slot(struct device_node *np,
 			      unsigned int *slots,
 			      unsigned int *slot_width);
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+				   struct snd_soc_codec_conf *codec_conf,
+				   struct device_node *of_node,
+				   const char *propname);
 int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
 				   const char *propname);
 unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 88cf39d..317a1ed 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
 #include <linux/tracepoint.h>
 
 #define DAPM_DIRECT "(direct)"
+#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
 
 struct snd_soc_jack;
 struct snd_soc_codec;
@@ -152,62 +153,38 @@
 		  (int)__entry->path_checks, (int)__entry->neighbour_checks)
 );
 
-TRACE_EVENT(snd_soc_dapm_output_path,
+TRACE_EVENT(snd_soc_dapm_path,
 
 	TP_PROTO(struct snd_soc_dapm_widget *widget,
+		enum snd_soc_dapm_direction dir,
 		struct snd_soc_dapm_path *path),
 
-	TP_ARGS(widget, path),
+	TP_ARGS(widget, dir, path),
 
 	TP_STRUCT__entry(
 		__string(	wname,	widget->name		)
 		__string(	pname,	path->name ? path->name : DAPM_DIRECT)
-		__string(	psname,	path->sink->name	)
-		__field(	int,	path_sink		)
+		__string(	pnname,	path->node[dir]->name	)
+		__field(	int,	path_node		)
 		__field(	int,	path_connect		)
+		__field(	int,	path_dir		)
 	),
 
 	TP_fast_assign(
 		__assign_str(wname, widget->name);
 		__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
-		__assign_str(psname, path->sink->name);
+		__assign_str(pnname, path->node[dir]->name);
 		__entry->path_connect = path->connect;
-		__entry->path_sink = (long)path->sink;
+		__entry->path_node = (long)path->node[dir];
+		__entry->path_dir = dir;
 	),
 
-	TP_printk("%c%s -> %s -> %s",
-		(int) __entry->path_sink &&
+	TP_printk("%c%s %s %s %s %s",
+		(int) __entry->path_node &&
 		(int) __entry->path_connect ? '*' : ' ',
-		__get_str(wname), __get_str(pname), __get_str(psname))
-);
-
-TRACE_EVENT(snd_soc_dapm_input_path,
-
-	TP_PROTO(struct snd_soc_dapm_widget *widget,
-		struct snd_soc_dapm_path *path),
-
-	TP_ARGS(widget, path),
-
-	TP_STRUCT__entry(
-		__string(	wname,	widget->name		)
-		__string(	pname,	path->name ? path->name : DAPM_DIRECT)
-		__string(	psname,	path->source->name	)
-		__field(	int,	path_source		)
-		__field(	int,	path_connect		)
-	),
-
-	TP_fast_assign(
-		__assign_str(wname, widget->name);
-		__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
-		__assign_str(psname, path->source->name);
-		__entry->path_connect = path->connect;
-		__entry->path_source = (long)path->source;
-	),
-
-	TP_printk("%c%s <- %s <- %s",
-		(int) __entry->path_source &&
-		(int) __entry->path_connect ? '*' : ' ',
-		__get_str(wname), __get_str(pname), __get_str(psname))
+		__get_str(wname), DAPM_ARROW(__entry->path_dir),
+		__get_str(pname), DAPM_ARROW(__entry->path_dir),
+		__get_str(pnname))
 );
 
 TRACE_EVENT(snd_soc_dapm_connected,
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
deleted file mode 100644
index fc733d2..0000000
--- a/include/trace/events/ext3.h
+++ /dev/null
@@ -1,866 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ext3
-
-#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EXT3_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(ext3_free_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	uid_t,	uid			)
-		__field(	gid_t,	gid			)
-		__field(	blkcnt_t, blocks		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->mode	= inode->i_mode;
-		__entry->uid	= i_uid_read(inode);
-		__entry->gid	= i_gid_read(inode);
-		__entry->blocks	= inode->i_blocks;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->uid, __entry->gid,
-		  (unsigned long) __entry->blocks)
-);
-
-TRACE_EVENT(ext3_request_inode,
-	TP_PROTO(struct inode *dir, int mode),
-
-	TP_ARGS(dir, mode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	dir			)
-		__field(	umode_t, mode			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= dir->i_sb->s_dev;
-		__entry->dir	= dir->i_ino;
-		__entry->mode	= mode;
-	),
-
-	TP_printk("dev %d,%d dir %lu mode 0%o",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_allocate_inode,
-	TP_PROTO(struct inode *inode, struct inode *dir, int mode),
-
-	TP_ARGS(inode, dir, mode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	ino_t,	dir			)
-		__field(	umode_t, mode			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->dir	= dir->i_ino;
-		__entry->mode	= mode;
-	),
-
-	TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_evict_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	int,	nlink			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->nlink	= inode->i_nlink;
-	),
-
-	TP_printk("dev %d,%d ino %lu nlink %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->nlink)
-);
-
-TRACE_EVENT(ext3_drop_inode,
-	TP_PROTO(struct inode *inode, int drop),
-
-	TP_ARGS(inode, drop),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	int,	drop			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->drop	= drop;
-	),
-
-	TP_printk("dev %d,%d ino %lu drop %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->drop)
-);
-
-TRACE_EVENT(ext3_mark_inode_dirty,
-	TP_PROTO(struct inode *inode, unsigned long IP),
-
-	TP_ARGS(inode, IP),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(unsigned long,	ip			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->ip	= IP;
-	),
-
-	TP_printk("dev %d,%d ino %lu caller %pS",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, (void *)__entry->ip)
-);
-
-TRACE_EVENT(ext3_write_begin,
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int flags),
-
-	TP_ARGS(inode, pos, len, flags),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned int, len		)
-		__field(	unsigned int, flags		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->pos	= pos;
-		__entry->len	= len;
-		__entry->flags	= flags;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->flags)
-);
-
-DECLARE_EVENT_CLASS(ext3__write_end,
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-			unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned int, len		)
-		__field(	unsigned int, copied		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->pos	= pos;
-		__entry->len	= len;
-		__entry->copied	= copied;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DECLARE_EVENT_CLASS(ext3__page_op,
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	pgoff_t, index			)
-
-	),
-
-	TP_fast_assign(
-		__entry->index	= page->index;
-		__entry->ino	= page->mapping->host->i_ino;
-		__entry->dev	= page->mapping->host->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu page_index %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->index)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_readpage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_releasepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-TRACE_EVENT(ext3_invalidatepage,
-	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
-
-	TP_ARGS(page, offset, length),
-
-	TP_STRUCT__entry(
-		__field(	pgoff_t, index			)
-		__field(	unsigned int, offset		)
-		__field(	unsigned int, length		)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-
-	),
-
-	TP_fast_assign(
-		__entry->index	= page->index;
-		__entry->offset	= offset;
-		__entry->length	= length;
-		__entry->ino	= page->mapping->host->i_ino;
-		__entry->dev	= page->mapping->host->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->index, __entry->offset, __entry->length)
-);
-
-TRACE_EVENT(ext3_discard_blocks,
-	TP_PROTO(struct super_block *sb, unsigned long blk,
-			unsigned long count),
-
-	TP_ARGS(sb, blk, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	blk		)
-		__field(	unsigned long,	count		)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->blk	= blk;
-		__entry->count	= count;
-	),
-
-	TP_printk("dev %d,%d blk %lu count %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->blk, __entry->count)
-);
-
-TRACE_EVENT(ext3_request_blocks,
-	TP_PROTO(struct inode *inode, unsigned long goal,
-		 unsigned long count),
-
-	TP_ARGS(inode, goal, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	unsigned long, count		)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->count	= count;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->count, __entry->goal)
-);
-
-TRACE_EVENT(ext3_allocate_blocks,
-	TP_PROTO(struct inode *inode, unsigned long goal,
-		 unsigned long count, unsigned long block),
-
-	TP_ARGS(inode, goal, count, block),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	unsigned long,	block		)
-		__field(	unsigned long, count		)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->block	= block;
-		__entry->count	= count;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		   __entry->count, __entry->block,
-		  __entry->goal)
-);
-
-TRACE_EVENT(ext3_free_blocks,
-	TP_PROTO(struct inode *inode, unsigned long block,
-		 unsigned long count),
-
-	TP_ARGS(inode, block, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	unsigned long,	block		)
-		__field(	unsigned long,	count		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= inode->i_sb->s_dev;
-		__entry->ino		= inode->i_ino;
-		__entry->mode		= inode->i_mode;
-		__entry->block		= block;
-		__entry->count		= count;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->block, __entry->count)
-);
-
-TRACE_EVENT(ext3_sync_file_enter,
-	TP_PROTO(struct file *file, int datasync),
-
-	TP_ARGS(file, datasync),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	ino_t,	parent			)
-		__field(	int,	datasync		)
-	),
-
-	TP_fast_assign(
-		struct dentry *dentry = file->f_path.dentry;
-
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->datasync	= datasync;
-		__entry->parent		= d_inode(dentry->d_parent)->i_ino;
-	),
-
-	TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long) __entry->parent, __entry->datasync)
-);
-
-TRACE_EVENT(ext3_sync_file_exit,
-	TP_PROTO(struct inode *inode, int ret),
-
-	TP_ARGS(inode, ret),
-
-	TP_STRUCT__entry(
-		__field(	int,	ret			)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->ret		= ret;
-		__entry->ino		= inode->i_ino;
-		__entry->dev		= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->ret)
-);
-
-TRACE_EVENT(ext3_sync_fs,
-	TP_PROTO(struct super_block *sb, int wait),
-
-	TP_ARGS(sb, wait),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	wait			)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->wait	= wait;
-	),
-
-	TP_printk("dev %d,%d wait %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->wait)
-);
-
-TRACE_EVENT(ext3_rsv_window_add,
-	TP_PROTO(struct super_block *sb,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(sb, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-	),
-
-	TP_printk("dev %d,%d start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_discard_reservation,
-	TP_PROTO(struct inode *inode,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(inode, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long)__entry->ino, __entry->start,
-		  __entry->end)
-);
-
-TRACE_EVENT(ext3_alloc_new_reservation,
-	TP_PROTO(struct super_block *sb, unsigned long goal),
-
-	TP_ARGS(sb, goal),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d goal %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->goal)
-);
-
-TRACE_EVENT(ext3_reserved,
-	TP_PROTO(struct super_block *sb, unsigned long block,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(sb, block, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	block		)
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->block	= block;
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-		__entry->dev	= sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d block %lu, start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->block, __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_forget,
-	TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
-
-	TP_ARGS(inode, is_metadata, block),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	int,	is_metadata		)
-		__field(	unsigned long,	block		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->mode	= inode->i_mode;
-		__entry->is_metadata = is_metadata;
-		__entry->block	= block;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->is_metadata, __entry->block)
-);
-
-TRACE_EVENT(ext3_read_block_bitmap,
-	TP_PROTO(struct super_block *sb, unsigned int group),
-
-	TP_ARGS(sb, group),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	__u32,	group			)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->group	= group;
-	),
-
-	TP_printk("dev %d,%d group %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->group)
-);
-
-TRACE_EVENT(ext3_direct_IO_enter,
-	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
-
-	TP_ARGS(inode, offset, len, rw),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned long,	len		)
-		__field(	int,	rw			)
-	),
-
-	TP_fast_assign(
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->pos	= offset;
-		__entry->len	= len;
-		__entry->rw	= rw;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->rw)
-);
-
-TRACE_EVENT(ext3_direct_IO_exit,
-	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
-		 int rw, int ret),
-
-	TP_ARGS(inode, offset, len, rw, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned long,	len		)
-		__field(	int,	rw			)
-		__field(	int,	ret			)
-	),
-
-	TP_fast_assign(
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->pos	= offset;
-		__entry->len	= len;
-		__entry->rw	= rw;
-		__entry->ret	= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->rw, __entry->ret)
-);
-
-TRACE_EVENT(ext3_unlink_enter,
-	TP_PROTO(struct inode *parent, struct dentry *dentry),
-
-	TP_ARGS(parent, dentry),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	parent			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	size			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->parent		= parent->i_ino;
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->size		= d_inode(dentry)->i_size;
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu size %lld parent %ld",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long)__entry->size,
-		  (unsigned long) __entry->parent)
-);
-
-TRACE_EVENT(ext3_unlink_exit,
-	TP_PROTO(struct dentry *dentry, int ret),
-
-	TP_ARGS(dentry, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	int,	ret			)
-	),
-
-	TP_fast_assign(
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-		__entry->ret		= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->ret)
-);
-
-DECLARE_EVENT_CLASS(ext3__truncate,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	blkcnt_t,	blocks		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->blocks	= inode->i_blocks;
-	),
-
-	TP_printk("dev %d,%d ino %lu blocks %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
-
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
-
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode)
-);
-
-TRACE_EVENT(ext3_get_blocks_enter,
-	TP_PROTO(struct inode *inode, unsigned long lblk,
-		 unsigned long len, int create),
-
-	TP_ARGS(inode, lblk, len, create),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	lblk		)
-		__field(	unsigned long,	len		)
-		__field(	int,		create		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->lblk	= lblk;
-		__entry->len	= len;
-		__entry->create	= create;
-	),
-
-	TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->lblk, __entry->len, __entry->create)
-);
-
-TRACE_EVENT(ext3_get_blocks_exit,
-	TP_PROTO(struct inode *inode, unsigned long lblk,
-		 unsigned long pblk, unsigned long len, int ret),
-
-	TP_ARGS(inode, lblk, pblk, len, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	lblk		)
-		__field(	unsigned long,	pblk		)
-		__field(	unsigned long,	len		)
-		__field(	int,		ret		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->lblk	= lblk;
-		__entry->pblk	= pblk;
-		__entry->len	= len;
-		__entry->ret	= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		   __entry->lblk, __entry->pblk,
-		  __entry->len, __entry->ret)
-);
-
-TRACE_EVENT(ext3_load_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino		)
-		__field(	dev_t,	dev		)
-	),
-
-	TP_fast_assign(
-		__entry->ino		= inode->i_ino;
-		__entry->dev		= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino)
-);
-
-#endif /* _TRACE_EXT3_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 04856a2..a019465 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1099,11 +1099,11 @@
 TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
 
 	TP_PROTO(struct inode *inode, unsigned int pgofs,
-						struct extent_node *en),
+						struct extent_info *ei),
 
-	TP_ARGS(inode, pgofs, en),
+	TP_ARGS(inode, pgofs, ei),
 
-	TP_CONDITION(en),
+	TP_CONDITION(ei),
 
 	TP_STRUCT__entry(
 		__field(dev_t,	dev)
@@ -1118,9 +1118,9 @@
 		__entry->dev = inode->i_sb->s_dev;
 		__entry->ino = inode->i_ino;
 		__entry->pgofs = pgofs;
-		__entry->fofs = en->ei.fofs;
-		__entry->blk = en->ei.blk;
-		__entry->len = en->ei.len;
+		__entry->fofs = ei->fofs;
+		__entry->blk = ei->blk;
+		__entry->len = ei->len;
 	),
 
 	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
deleted file mode 100644
index da6f259..0000000
--- a/include/trace/events/jbd.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM jbd
-
-#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_JBD_H
-
-#include <linux/jbd.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(jbd_checkpoint,
-
-	TP_PROTO(journal_t *journal, int result),
-
-	TP_ARGS(journal, result),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	result			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->result		= result;
-	),
-
-	TP_printk("dev %d,%d result %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->result)
-);
-
-DECLARE_EVENT_CLASS(jbd_commit,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_start_commit,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_locking,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_logging,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-TRACE_EVENT(jbd_drop_transaction,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction)
-);
-
-TRACE_EVENT(jbd_end_commit,
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-		__field(	int,	head			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-		__entry->head		= journal->j_tail_sequence;
-	),
-
-	TP_printk("dev %d,%d transaction %d head %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction, __entry->head)
-);
-
-TRACE_EVENT(jbd_do_submit_data,
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		   __entry->transaction)
-);
-
-TRACE_EVENT(jbd_cleanup_journal_tail,
-
-	TP_PROTO(journal_t *journal, tid_t first_tid,
-		 unsigned long block_nr, unsigned long freed),
-
-	TP_ARGS(journal, first_tid, block_nr, freed),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	tid_t,	tail_sequence		)
-		__field(	tid_t,	first_tid		)
-		__field(unsigned long,	block_nr		)
-		__field(unsigned long,	freed			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->tail_sequence	= journal->j_tail_sequence;
-		__entry->first_tid	= first_tid;
-		__entry->block_nr	= block_nr;
-		__entry->freed		= freed;
-	),
-
-	TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->tail_sequence, __entry->first_tid,
-		  __entry->block_nr, __entry->freed)
-);
-
-TRACE_EVENT(journal_write_superblock,
-	TP_PROTO(journal_t *journal, int write_op),
-
-	TP_ARGS(journal, write_op),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	write_op		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->write_op	= write_op;
-	),
-
-	TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
-		  MINOR(__entry->dev), __entry->write_op)
-);
-
-#endif /* _TRACE_JBD_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index fd1a02c..003dca9 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -529,18 +529,21 @@
 
 	TP_STRUCT__entry(
 		__field(struct svc_xprt *, xprt)
-		__field(struct svc_rqst *, rqst)
+		__field_struct(struct sockaddr_storage, ss)
+		__field(int, pid)
+		__field(unsigned long, flags)
 	),
 
 	TP_fast_assign(
 		__entry->xprt = xprt;
-		__entry->rqst = rqst;
+		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+		__entry->pid = rqst? rqst->rq_task->pid : 0;
+		__entry->flags = xprt ? xprt->xpt_flags : 0;
 	),
 
 	TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
-		(struct sockaddr *)&__entry->xprt->xpt_remote,
-		__entry->rqst ? __entry->rqst->rq_task->pid : 0,
-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
+		(struct sockaddr *)&__entry->ss,
+		__entry->pid, show_svc_xprt_flags(__entry->flags))
 );
 
 TRACE_EVENT(svc_xprt_dequeue,
@@ -589,16 +592,20 @@
 	TP_STRUCT__entry(
 		__field(struct svc_xprt *, xprt)
 		__field(int, len)
+		__field_struct(struct sockaddr_storage, ss)
+		__field(unsigned long, flags)
 	),
 
 	TP_fast_assign(
 		__entry->xprt = xprt;
+		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
 		__entry->len = len;
+		__entry->flags = xprt ? xprt->xpt_flags : 0;
 	),
 
 	TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
-		(struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
+		(struct sockaddr *)&__entry->ss,
+		__entry->len, show_svc_xprt_flags(__entry->flags))
 );
 #endif /* _TRACE_SUNRPC_H */
 
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index dee3bb1..2cca6cd 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -46,7 +46,7 @@
 	TP_fast_assign(
 		__entry->pid = task->pid;
 		memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
-		memcpy(entry->newcomm, comm, TASK_COMM_LEN);
+		strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
 		__entry->oom_score_adj = task->signal->oom_score_adj;
 	),
 
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 4250f36..bc8815f 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -11,7 +11,8 @@
 	EM(  TLB_FLUSH_ON_TASK_SWITCH,	"flush on task switch" )	\
 	EM(  TLB_REMOTE_SHOOTDOWN,	"remote shootdown" )		\
 	EM(  TLB_LOCAL_SHOOTDOWN,	"local shootdown" )		\
-	EMe( TLB_LOCAL_MM_SHOOTDOWN,	"local mm shootdown" )
+	EM(  TLB_LOCAL_MM_SHOOTDOWN,	"local mm shootdown" )		\
+	EMe( TLB_REMOTE_SEND_IPI,	"remote ipi send" )
 
 /*
  * First define the enums in TLB_FLUSH_REASON to be exported to userspace
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 89d0497..dbf017b 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -93,90 +93,183 @@
 		{ V4L2_TC_USERBITS_USERDEFINED,	"USERBITS_USERDEFINED" }, \
 		{ V4L2_TC_USERBITS_8BITCHARS,	"USERBITS_8BITCHARS" })
 
-#define V4L2_TRACE_EVENT(event_name)					\
-	TRACE_EVENT(event_name,						\
-		TP_PROTO(int minor, struct v4l2_buffer *buf),		\
-									\
-		TP_ARGS(minor, buf),					\
-									\
-		TP_STRUCT__entry(					\
-			__field(int, minor)				\
-			__field(u32, index)				\
-			__field(u32, type)				\
-			__field(u32, bytesused)				\
-			__field(u32, flags)				\
-			__field(u32, field)				\
-			__field(s64, timestamp)				\
-			__field(u32, timecode_type)			\
-			__field(u32, timecode_flags)			\
-			__field(u8, timecode_frames)			\
-			__field(u8, timecode_seconds)			\
-			__field(u8, timecode_minutes)			\
-			__field(u8, timecode_hours)			\
-			__field(u8, timecode_userbits0)			\
-			__field(u8, timecode_userbits1)			\
-			__field(u8, timecode_userbits2)			\
-			__field(u8, timecode_userbits3)			\
-			__field(u32, sequence)				\
-		),							\
-									\
-		TP_fast_assign(						\
-			__entry->minor = minor;				\
-			__entry->index = buf->index;			\
-			__entry->type = buf->type;			\
-			__entry->bytesused = buf->bytesused;		\
-			__entry->flags = buf->flags;			\
-			__entry->field = buf->field;			\
-			__entry->timestamp =				\
-				timeval_to_ns(&buf->timestamp);		\
-			__entry->timecode_type = buf->timecode.type;	\
-			__entry->timecode_flags = buf->timecode.flags;	\
-			__entry->timecode_frames =			\
-				buf->timecode.frames;			\
-			__entry->timecode_seconds =			\
-				buf->timecode.seconds;			\
-			__entry->timecode_minutes =			\
-				buf->timecode.minutes;			\
-			__entry->timecode_hours = buf->timecode.hours;	\
-			__entry->timecode_userbits0 =			\
-				buf->timecode.userbits[0];		\
-			__entry->timecode_userbits1 =			\
-				buf->timecode.userbits[1];		\
-			__entry->timecode_userbits2 =			\
-				buf->timecode.userbits[2];		\
-			__entry->timecode_userbits3 =			\
-				buf->timecode.userbits[3];		\
-			__entry->sequence = buf->sequence;		\
-		),							\
-									\
-		TP_printk("minor = %d, index = %u, type = %s, "		\
-			  "bytesused = %u, flags = %s, "		\
-			  "field = %s, timestamp = %llu, timecode = { "	\
-			  "type = %s, flags = %s, frames = %u, "	\
-			  "seconds = %u, minutes = %u, hours = %u, "	\
-			  "userbits = { %u %u %u %u } }, "		\
-			  "sequence = %u", __entry->minor,		\
-			  __entry->index, show_type(__entry->type),	\
-			  __entry->bytesused,				\
-			  show_flags(__entry->flags),			\
-			  show_field(__entry->field),			\
-			  __entry->timestamp,				\
-			  show_timecode_type(__entry->timecode_type),	\
-			  show_timecode_flags(__entry->timecode_flags),	\
-			  __entry->timecode_frames,			\
-			  __entry->timecode_seconds,			\
-			  __entry->timecode_minutes,			\
-			  __entry->timecode_hours,			\
-			  __entry->timecode_userbits0,			\
-			  __entry->timecode_userbits1,			\
-			  __entry->timecode_userbits2,			\
-			  __entry->timecode_userbits3,			\
-			  __entry->sequence				\
-		)							\
-	)
+DECLARE_EVENT_CLASS(v4l2_event_class,
+	TP_PROTO(int minor, struct v4l2_buffer *buf),
 
-V4L2_TRACE_EVENT(v4l2_dqbuf);
-V4L2_TRACE_EVENT(v4l2_qbuf);
+	TP_ARGS(minor, buf),
+
+	TP_STRUCT__entry(
+		__field(int, minor)
+		__field(u32, index)
+		__field(u32, type)
+		__field(u32, bytesused)
+		__field(u32, flags)
+		__field(u32, field)
+		__field(s64, timestamp)
+		__field(u32, timecode_type)
+		__field(u32, timecode_flags)
+		__field(u8, timecode_frames)
+		__field(u8, timecode_seconds)
+		__field(u8, timecode_minutes)
+		__field(u8, timecode_hours)
+		__field(u8, timecode_userbits0)
+		__field(u8, timecode_userbits1)
+		__field(u8, timecode_userbits2)
+		__field(u8, timecode_userbits3)
+		__field(u32, sequence)
+	),
+
+	TP_fast_assign(
+		__entry->minor = minor;
+		__entry->index = buf->index;
+		__entry->type = buf->type;
+		__entry->bytesused = buf->bytesused;
+		__entry->flags = buf->flags;
+		__entry->field = buf->field;
+		__entry->timestamp = timeval_to_ns(&buf->timestamp);
+		__entry->timecode_type = buf->timecode.type;
+		__entry->timecode_flags = buf->timecode.flags;
+		__entry->timecode_frames = buf->timecode.frames;
+		__entry->timecode_seconds = buf->timecode.seconds;
+		__entry->timecode_minutes = buf->timecode.minutes;
+		__entry->timecode_hours = buf->timecode.hours;
+		__entry->timecode_userbits0 = buf->timecode.userbits[0];
+		__entry->timecode_userbits1 = buf->timecode.userbits[1];
+		__entry->timecode_userbits2 = buf->timecode.userbits[2];
+		__entry->timecode_userbits3 = buf->timecode.userbits[3];
+		__entry->sequence = buf->sequence;
+	),
+
+	TP_printk("minor = %d, index = %u, type = %s, bytesused = %u, "
+		  "flags = %s, field = %s, timestamp = %llu, "
+		  "timecode = { type = %s, flags = %s, frames = %u, "
+		  "seconds = %u, minutes = %u, hours = %u, "
+		  "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
+		  __entry->index, show_type(__entry->type),
+		  __entry->bytesused,
+		  show_flags(__entry->flags),
+		  show_field(__entry->field),
+		  __entry->timestamp,
+		  show_timecode_type(__entry->timecode_type),
+		  show_timecode_flags(__entry->timecode_flags),
+		  __entry->timecode_frames,
+		  __entry->timecode_seconds,
+		  __entry->timecode_minutes,
+		  __entry->timecode_hours,
+		  __entry->timecode_userbits0,
+		  __entry->timecode_userbits1,
+		  __entry->timecode_userbits2,
+		  __entry->timecode_userbits3,
+		  __entry->sequence
+	)
+)
+
+DEFINE_EVENT(v4l2_event_class, v4l2_dqbuf,
+	TP_PROTO(int minor, struct v4l2_buffer *buf),
+	TP_ARGS(minor, buf)
+);
+
+DEFINE_EVENT(v4l2_event_class, v4l2_qbuf,
+	TP_PROTO(int minor, struct v4l2_buffer *buf),
+	TP_ARGS(minor, buf)
+);
+
+DECLARE_EVENT_CLASS(vb2_event_class,
+	TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+	TP_ARGS(q, vb),
+
+	TP_STRUCT__entry(
+		__field(int, minor)
+		__field(u32, queued_count)
+		__field(int, owned_by_drv_count)
+		__field(u32, index)
+		__field(u32, type)
+		__field(u32, bytesused)
+		__field(u32, flags)
+		__field(u32, field)
+		__field(s64, timestamp)
+		__field(u32, timecode_type)
+		__field(u32, timecode_flags)
+		__field(u8, timecode_frames)
+		__field(u8, timecode_seconds)
+		__field(u8, timecode_minutes)
+		__field(u8, timecode_hours)
+		__field(u8, timecode_userbits0)
+		__field(u8, timecode_userbits1)
+		__field(u8, timecode_userbits2)
+		__field(u8, timecode_userbits3)
+		__field(u32, sequence)
+	),
+
+	TP_fast_assign(
+		__entry->minor = q->owner ? q->owner->vdev->minor : -1;
+		__entry->queued_count = q->queued_count;
+		__entry->owned_by_drv_count =
+			atomic_read(&q->owned_by_drv_count);
+		__entry->index = vb->v4l2_buf.index;
+		__entry->type = vb->v4l2_buf.type;
+		__entry->bytesused = vb->v4l2_planes[0].bytesused;
+		__entry->flags = vb->v4l2_buf.flags;
+		__entry->field = vb->v4l2_buf.field;
+		__entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp);
+		__entry->timecode_type = vb->v4l2_buf.timecode.type;
+		__entry->timecode_flags = vb->v4l2_buf.timecode.flags;
+		__entry->timecode_frames = vb->v4l2_buf.timecode.frames;
+		__entry->timecode_seconds = vb->v4l2_buf.timecode.seconds;
+		__entry->timecode_minutes = vb->v4l2_buf.timecode.minutes;
+		__entry->timecode_hours = vb->v4l2_buf.timecode.hours;
+		__entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0];
+		__entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1];
+		__entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2];
+		__entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3];
+		__entry->sequence = vb->v4l2_buf.sequence;
+	),
+
+	TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "
+		  "type = %s, bytesused = %u, flags = %s, field = %s, "
+		  "timestamp = %llu, timecode = { type = %s, flags = %s, "
+		  "frames = %u, seconds = %u, minutes = %u, hours = %u, "
+		  "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
+		  __entry->queued_count,
+		  __entry->owned_by_drv_count,
+		  __entry->index, show_type(__entry->type),
+		  __entry->bytesused,
+		  show_flags(__entry->flags),
+		  show_field(__entry->field),
+		  __entry->timestamp,
+		  show_timecode_type(__entry->timecode_type),
+		  show_timecode_flags(__entry->timecode_flags),
+		  __entry->timecode_frames,
+		  __entry->timecode_seconds,
+		  __entry->timecode_minutes,
+		  __entry->timecode_hours,
+		  __entry->timecode_userbits0,
+		  __entry->timecode_userbits1,
+		  __entry->timecode_userbits2,
+		  __entry->timecode_userbits3,
+		  __entry->sequence
+	)
+)
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_done,
+	TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+	TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
+	TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+	TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
+	TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+	TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_qbuf,
+	TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+	TP_ARGS(q, vb)
+);
 
 #endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */
 
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2f295cd..8c5e8b9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -34,6 +34,13 @@
 /* color index */
 #define DRM_FORMAT_C8		fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
+/* 8 bpp Red */
+#define DRM_FORMAT_R8		fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88		fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88		fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
 /* 8 bpp RGB */
 #define DRM_FORMAT_RGB332	fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
 #define DRM_FORMAT_BGR233	fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index db809b7..dbd16a2 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -354,9 +354,15 @@
 #define I915_PARAM_REVISION              32
 #define I915_PARAM_SUBSLICE_TOTAL	 33
 #define I915_PARAM_EU_TOTAL		 34
+#define I915_PARAM_HAS_GPU_RESET	 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 
 typedef struct drm_i915_getparam {
-	int param;
+	s32 param;
+	/*
+	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
+	 * compat32 code. Don't repeat this mistake.
+	 */
 	int __user *value;
 } drm_i915_getparam_t;
 
@@ -764,7 +770,12 @@
 #define I915_EXEC_BSD_RING1		(1<<13)
 #define I915_EXEC_BSD_RING2		(2<<13)
 
-#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
+/** Tell the kernel that the batchbuffer is processed by
+ *  the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
 
 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1114,6 +1125,7 @@
 	__u32 size;
 	__u64 param;
 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
 	__u64 value;
 };
 
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedb..05b2049 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
 #define DRM_VMW_GB_SURFACE_CREATE    23
 #define DRM_VMW_GB_SURFACE_REF       24
 #define DRM_VMW_SYNCCPU              25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
 
 /*************************************************************************/
 /**
@@ -88,6 +89,8 @@
 #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
+#define DRM_VMW_PARAM_SCREEN_TARGET    11
+#define DRM_VMW_PARAM_DX               12
 
 /**
  * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@
  * Argument to the DRM_VMW_EXECBUF Ioctl.
  */
 
-#define DRM_VMW_EXECBUF_VERSION 1
+#define DRM_VMW_EXECBUF_VERSION 2
 
 struct drm_vmw_execbuf_arg {
 	uint64_t commands;
@@ -305,6 +308,8 @@
 	uint64_t fence_rep;
 	uint32_t version;
 	uint32_t flags;
+	uint32_t context_handle;
+	uint32_t pad64;
 };
 
 /**
@@ -825,7 +830,6 @@
 enum drm_vmw_shader_type {
 	drm_vmw_shader_type_vs = 0,
 	drm_vmw_shader_type_ps,
-	drm_vmw_shader_type_gs
 };
 
 
@@ -907,6 +911,8 @@
  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
  *                    if none.
  * @base_size         Size of the base mip level for all faces.
+ * @array_size        Must be zero for non-DX hardware, and if non-zero
+ *                    svga3d_flags must have proper bind flags setup.
  *
  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@
 	uint32_t multisample_count;
 	uint32_t autogen_filter;
 	uint32_t buffer_handle;
-	uint32_t pad64;
+	uint32_t array_size;
 	struct drm_vmw_size base_size;
 };
 
@@ -1059,4 +1065,28 @@
 	uint32_t pad64;
 };
 
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+	drm_vmw_context_legacy,
+	drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+	enum drm_vmw_extended_context req;
+	struct drm_vmw_context_arg rep;
+};
 #endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index aafb993..70ff1d9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -456,3 +456,4 @@
 header-y += xilinx-v4l2-controls.h
 header-y += zorro.h
 header-y += zorro_ids.h
+header-y += userfaultfd.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d3475e1..843540c 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -266,6 +266,7 @@
 #define AUDIT_OBJ_UID	109
 #define AUDIT_OBJ_GID	110
 #define AUDIT_FIELD_COMPARE	111
+#define AUDIT_EXE	112
 
 #define AUDIT_ARG0      200
 #define AUDIT_ARG1      (AUDIT_ARG0+1)
@@ -324,8 +325,10 @@
 
 #define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT	0x00000001
 #define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME	0x00000002
+#define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH	0x00000004
 #define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
-				  AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME)
+				  AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
+				  AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH)
 
 /* deprecated: AUDIT_VERSION_* */
 #define AUDIT_VERSION_LATEST 		AUDIT_FEATURE_BITMAP_ALL
@@ -382,6 +385,9 @@
 #define AUDIT_ARCH_SHEL64	(EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_SPARC	(EM_SPARC)
 #define AUDIT_ARCH_SPARC64	(EM_SPARCV9|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_TILEGX	(EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEGX32	(EM_TILEGX|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEPRO	(EM_TILEPRO|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 
 #define AUDIT_PERM_EXEC		1
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index 3060783..df56c8f 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -26,7 +26,7 @@
 /* Version of the device interface */
 #define DLM_DEVICE_VERSION_MAJOR 6
 #define DLM_DEVICE_VERSION_MINOR 0
-#define DLM_DEVICE_VERSION_PATCH 1
+#define DLM_DEVICE_VERSION_PATCH 2
 
 /* struct passed to the lock write */
 struct dlm_lock_params {
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index b088296..3429a3b 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -38,6 +38,8 @@
 #define EM_ALTERA_NIOS2	113	/* Altera Nios II soft-core processor */
 #define EM_TI_C6000	140	/* TI C6X DSPs */
 #define EM_AARCH64	183	/* ARM 64 bit */
+#define EM_TILEPRO	188	/* Tilera TILEPro */
+#define EM_TILEGX	191	/* Tilera TILE-Gx */
 #define EM_FRV		0x5441	/* Fujitsu FR-V */
 #define EM_AVR32	0x18ad	/* Atmel AVR32 */
 
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 2b94ea2..5b4a4be 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -87,7 +87,7 @@
 		__u32 handle;
 		__u32 flags;
 		__u64 err_address;
-		__u64 mask;
+		__u64 length;
 	} __packed records[0];
 } __packed;
 
@@ -111,6 +111,11 @@
 	ND_CMD_VENDOR = 9,
 };
 
+enum {
+	ND_ARS_VOLATILE = 1,
+	ND_ARS_PERSISTENT = 2,
+};
+
 static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
 {
 	static const char * const names[] = {
@@ -194,4 +199,9 @@
 enum {
 	ND_MIN_NAMESPACE_SIZE = 0x00400000,
 };
+
+enum ars_masks {
+	ARS_STATUS_MASK = 0x0000FFFF,
+	ARS_EXT_STATUS_SHIFT = 16,
+};
 #endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 2119c7c..2b871e0 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -15,7 +15,7 @@
 
 #include <linux/types.h>
 
-#define NFS4_BITMAP_SIZE	2
+#define NFS4_BITMAP_SIZE	3
 #define NFS4_VERIFIER_SIZE	8
 #define NFS4_STATEID_SEQID_SIZE 4
 #define NFS4_STATEID_OTHER_SIZE 12
diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h
index 9bb9771..5527266 100644
--- a/include/uapi/linux/nfsacl.h
+++ b/include/uapi/linux/nfsacl.h
@@ -22,6 +22,7 @@
 #define NFS_ACLCNT		0x0002
 #define NFS_DFACL		0x0004
 #define NFS_DFACLCNT		0x0008
+#define NFS_ACL_MASK		0x000f
 
 /* Flag for Default ACL entries */
 #define NFS_ACL_DEFAULT		0x1000
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 31891d9..a8d0759 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -190,4 +190,11 @@
 # define PR_FP_MODE_FR		(1 << 0)	/* 64b FP registers */
 # define PR_FP_MODE_FRE		(1 << 1)	/* 32b compatibility */
 
+/* Control the ambient capability set */
+#define PR_CAP_AMBIENT			47
+# define PR_CAP_AMBIENT_IS_SET		1
+# define PR_CAP_AMBIENT_RAISE		2
+# define PR_CAP_AMBIENT_LOWER		3
+# define PR_CAP_AMBIENT_CLEAR_ALL	4
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index cf1019e..a7a6979 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -89,9 +89,11 @@
 #define PTRACE_O_TRACESECCOMP	(1 << PTRACE_EVENT_SECCOMP)
 
 /* eventless options */
-#define PTRACE_O_EXITKILL	(1 << 20)
+#define PTRACE_O_EXITKILL		(1 << 20)
+#define PTRACE_O_SUSPEND_SECCOMP	(1 << 21)
 
-#define PTRACE_O_MASK		(0x000000ff | PTRACE_O_EXITKILL)
+#define PTRACE_O_MASK		(\
+	0x000000ff | PTRACE_O_EXITKILL | PTRACE_O_SUSPEND_SECCOMP)
 
 #include <asm/ptrace.h>
 
diff --git a/include/uapi/linux/securebits.h b/include/uapi/linux/securebits.h
index 985aac9..35ac35c 100644
--- a/include/uapi/linux/securebits.h
+++ b/include/uapi/linux/securebits.h
@@ -43,9 +43,18 @@
 #define SECBIT_KEEP_CAPS	(issecure_mask(SECURE_KEEP_CAPS))
 #define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
 
+/* When set, a process cannot add new capabilities to its ambient set. */
+#define SECURE_NO_CAP_AMBIENT_RAISE		6
+#define SECURE_NO_CAP_AMBIENT_RAISE_LOCKED	7  /* make bit-6 immutable */
+
+#define SECBIT_NO_CAP_AMBIENT_RAISE (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
+#define SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED \
+			(issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED))
+
 #define SECURE_ALL_BITS		(issecure_mask(SECURE_NOROOT) | \
 				 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
-				 issecure_mask(SECURE_KEEP_CAPS))
+				 issecure_mask(SECURE_KEEP_CAPS) | \
+				 issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
 #define SECURE_ALL_LOCKS	(SECURE_ALL_BITS << 1)
 
 #endif /* _UAPI_LINUX_SECUREBITS_H */
diff --git a/include/uapi/linux/toshiba.h b/include/uapi/linux/toshiba.h
index e9bef5b..c58bf4b 100644
--- a/include/uapi/linux/toshiba.h
+++ b/include/uapi/linux/toshiba.h
@@ -1,6 +1,7 @@
 /* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops 
  *
  * Copyright (c) 1996-2000  Jonathan A. Buzzard (jonathan@buzzard.org.uk)
+ * Copyright (c) 2015  Azael Avalos <coproscefalo@gmail.com>
  *
  * Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers
  * on making sure the structure is aligned and packed.
@@ -20,9 +21,18 @@
 #ifndef _UAPI_LINUX_TOSHIBA_H
 #define _UAPI_LINUX_TOSHIBA_H
 
-#define TOSH_PROC "/proc/toshiba"
-#define TOSH_DEVICE "/dev/toshiba"
-#define TOSH_SMM _IOWR('t', 0x90, int)	/* broken: meant 24 bytes */
+/*
+ * Toshiba modules paths
+ */
+
+#define TOSH_PROC		"/proc/toshiba"
+#define TOSH_DEVICE		"/dev/toshiba"
+#define TOSHIBA_ACPI_PROC	"/proc/acpi/toshiba"
+#define TOSHIBA_ACPI_DEVICE	"/dev/toshiba_acpi"
+
+/*
+ * Toshiba SMM structure
+ */
 
 typedef struct {
 	unsigned int eax;
@@ -33,5 +43,21 @@
 	unsigned int edi __attribute__ ((packed));
 } SMMRegisters;
 
+/*
+ * IOCTLs (0x90 - 0x91)
+ */
+
+#define TOSH_SMM		_IOWR('t', 0x90, SMMRegisters)
+/*
+ * Convenience toshiba_acpi command.
+ *
+ * The System Configuration Interface (SCI) is opened/closed internally
+ * to avoid userspace of buggy BIOSes.
+ *
+ * The toshiba_acpi module checks whether the eax register is set with
+ * SCI_GET (0xf300) or SCI_SET (0xf400), returning -EINVAL if not.
+ */
+#define TOSHIBA_ACPI_SCI	_IOWR('t', 0x91, SMMRegisters)
+
 
 #endif /* _UAPI_LINUX_TOSHIBA_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
new file mode 100644
index 0000000..df0e09b
--- /dev/null
+++ b/include/uapi/linux/userfaultfd.h
@@ -0,0 +1,169 @@
+/*
+ *  include/linux/userfaultfd.h
+ *
+ *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_H
+#define _LINUX_USERFAULTFD_H
+
+#include <linux/types.h>
+
+#include <linux/compiler.h>
+
+#define UFFD_API ((__u64)0xAA)
+/*
+ * After implementing the respective features it will become:
+ * #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
+ *			      UFFD_FEATURE_EVENT_FORK)
+ */
+#define UFFD_API_FEATURES (0)
+#define UFFD_API_IOCTLS				\
+	((__u64)1 << _UFFDIO_REGISTER |		\
+	 (__u64)1 << _UFFDIO_UNREGISTER |	\
+	 (__u64)1 << _UFFDIO_API)
+#define UFFD_API_RANGE_IOCTLS			\
+	((__u64)1 << _UFFDIO_WAKE |		\
+	 (__u64)1 << _UFFDIO_COPY |		\
+	 (__u64)1 << _UFFDIO_ZEROPAGE)
+
+/*
+ * Valid ioctl command number range with this API is from 0x00 to
+ * 0x3F.  UFFDIO_API is the fixed number, everything else can be
+ * changed by implementing a different UFFD_API. If sticking to the
+ * same UFFD_API more ioctl can be added and userland will be aware of
+ * which ioctl the running kernel implements through the ioctl command
+ * bitmask written by the UFFDIO_API.
+ */
+#define _UFFDIO_REGISTER		(0x00)
+#define _UFFDIO_UNREGISTER		(0x01)
+#define _UFFDIO_WAKE			(0x02)
+#define _UFFDIO_COPY			(0x03)
+#define _UFFDIO_ZEROPAGE		(0x04)
+#define _UFFDIO_API			(0x3F)
+
+/* userfaultfd ioctl ids */
+#define UFFDIO 0xAA
+#define UFFDIO_API		_IOWR(UFFDIO, _UFFDIO_API,	\
+				      struct uffdio_api)
+#define UFFDIO_REGISTER		_IOWR(UFFDIO, _UFFDIO_REGISTER, \
+				      struct uffdio_register)
+#define UFFDIO_UNREGISTER	_IOR(UFFDIO, _UFFDIO_UNREGISTER,	\
+				     struct uffdio_range)
+#define UFFDIO_WAKE		_IOR(UFFDIO, _UFFDIO_WAKE,	\
+				     struct uffdio_range)
+#define UFFDIO_COPY		_IOWR(UFFDIO, _UFFDIO_COPY,	\
+				      struct uffdio_copy)
+#define UFFDIO_ZEROPAGE		_IOWR(UFFDIO, _UFFDIO_ZEROPAGE,	\
+				      struct uffdio_zeropage)
+
+/* read() structure */
+struct uffd_msg {
+	__u8	event;
+
+	__u8	reserved1;
+	__u16	reserved2;
+	__u32	reserved3;
+
+	union {
+		struct {
+			__u64	flags;
+			__u64	address;
+		} pagefault;
+
+		struct {
+			/* unused reserved fields */
+			__u64	reserved1;
+			__u64	reserved2;
+			__u64	reserved3;
+		} reserved;
+	} arg;
+} __packed;
+
+/*
+ * Start at 0x12 and not at 0 to be more strict against bugs.
+ */
+#define UFFD_EVENT_PAGEFAULT	0x12
+#if 0 /* not available yet */
+#define UFFD_EVENT_FORK		0x13
+#endif
+
+/* flags for UFFD_EVENT_PAGEFAULT */
+#define UFFD_PAGEFAULT_FLAG_WRITE	(1<<0)	/* If this was a write fault */
+#define UFFD_PAGEFAULT_FLAG_WP		(1<<1)	/* If reason is VM_UFFD_WP */
+
+struct uffdio_api {
+	/* userland asks for an API number and the features to enable */
+	__u64 api;
+	/*
+	 * Kernel answers below with the all available features for
+	 * the API, this notifies userland of which events and/or
+	 * which flags for each event are enabled in the current
+	 * kernel.
+	 *
+	 * Note: UFFD_EVENT_PAGEFAULT and UFFD_PAGEFAULT_FLAG_WRITE
+	 * are to be considered implicitly always enabled in all kernels as
+	 * long as the uffdio_api.api requested matches UFFD_API.
+	 */
+#if 0 /* not available yet */
+#define UFFD_FEATURE_PAGEFAULT_FLAG_WP		(1<<0)
+#define UFFD_FEATURE_EVENT_FORK			(1<<1)
+#endif
+	__u64 features;
+
+	__u64 ioctls;
+};
+
+struct uffdio_range {
+	__u64 start;
+	__u64 len;
+};
+
+struct uffdio_register {
+	struct uffdio_range range;
+#define UFFDIO_REGISTER_MODE_MISSING	((__u64)1<<0)
+#define UFFDIO_REGISTER_MODE_WP		((__u64)1<<1)
+	__u64 mode;
+
+	/*
+	 * kernel answers which ioctl commands are available for the
+	 * range, keep at the end as the last 8 bytes aren't read.
+	 */
+	__u64 ioctls;
+};
+
+struct uffdio_copy {
+	__u64 dst;
+	__u64 src;
+	__u64 len;
+	/*
+	 * There will be a wrprotection flag later that allows to map
+	 * pages wrprotected on the fly. And such a flag will be
+	 * available if the wrprotection ioctl are implemented for the
+	 * range according to the uffdio_register.ioctls.
+	 */
+#define UFFDIO_COPY_MODE_DONTWAKE		((__u64)1<<0)
+	__u64 mode;
+
+	/*
+	 * "copy" is written by the ioctl and must be at the end: the
+	 * copy_from_user will not read the last 8 bytes.
+	 */
+	__s64 copy;
+};
+
+struct uffdio_zeropage {
+	struct uffdio_range range;
+#define UFFDIO_ZEROPAGE_MODE_DONTWAKE		((__u64)1<<0)
+	__u64 mode;
+
+	/*
+	 * "zeropage" is written by the ioctl and must be at the end:
+	 * the copy_from_user will not read the last 8 bytes.
+	 */
+	__s64 zeropage;
+};
+
+#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 9f6e108..d448c53 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -174,6 +174,10 @@
  * We reserve 16 controls for this driver. */
 #define V4L2_CID_USER_ADV7180_BASE		(V4L2_CID_USER_BASE + 0x1070)
 
+/* The base for the tc358743 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_TC358743_BASE		(V4L2_CID_USER_BASE + 0x1080)
+
 /* MPEG-class control IDs */
 /* The MPEG controls are applicable to all codec controls
  * and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/vsp1.h b/include/uapi/linux/vsp1.h
index e18858f..9a82369 100644
--- a/include/uapi/linux/vsp1.h
+++ b/include/uapi/linux/vsp1.h
@@ -28,7 +28,7 @@
 	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct vsp1_lut_config)
 
 struct vsp1_lut_config {
-	u32 lut[256];
+	__u32 lut[256];
 };
 
 #endif	/* __VSP1_USER_H__ */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 99a8ca1..1e889aa 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -29,8 +29,10 @@
 
 #define CXL_START_WORK_AMR		0x0000000000000001ULL
 #define CXL_START_WORK_NUM_IRQS		0x0000000000000002ULL
+#define CXL_START_WORK_ERR_FF		0x0000000000000004ULL
 #define CXL_START_WORK_ALL		(CXL_START_WORK_AMR |\
-					 CXL_START_WORK_NUM_IRQS)
+					 CXL_START_WORK_NUM_IRQS |\
+					 CXL_START_WORK_ERR_FF)
 
 
 /* Possible modes that an afu can be in */
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index 0530e5a..d8fc96e 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -296,6 +296,7 @@
 
 /* Video buffer addresses */
 #define VIDW_BUF_START(_buff)			(0xA0 + ((_buff) * 8))
+#define VIDW_BUF_START_S(_buff)			(0x40A0 + ((_buff) * 8))
 #define VIDW_BUF_START1(_buff)			(0xA4 + ((_buff) * 8))
 #define VIDW_BUF_END(_buff)			(0xD0 + ((_buff) * 8))
 #define VIDW_BUF_END1(_buff)			(0xD4 + ((_buff) * 8))
diff --git a/include/video/vga.h b/include/video/vga.h
index cac567f..d334e64 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -18,7 +18,7 @@
 #define __linux_video_vga_h__
 
 #include <linux/types.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/vga.h>
 #include <asm/byteorder.h>
 
diff --git a/include/xen/events.h b/include/xen/events.h
index 7d95fdf..88da2ab 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -92,7 +92,6 @@
 #ifdef CONFIG_TRACING
 #define trace_xen_hvm_callback_vector xen_hvm_callback_vector
 #endif
-extern int xen_have_vector_callback;
 int xen_set_callback_via(uint64_t via);
 void xen_evtchn_do_upcall(struct pt_regs *regs);
 void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 5cc49ea..8e03587 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -474,6 +474,23 @@
 };
 DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking);
 
+#define XENPF_get_symbol      63
+struct xenpf_symdata {
+	/* IN/OUT variables */
+	uint32_t	namelen; /* size of 'name' buffer */
+
+	/* IN/OUT variables */
+	uint32_t	symnum; /* IN:  Symbol to read                       */
+				/* OUT: Next available symbol. If same as IN */
+				/* then  we reached the end                  */
+
+	/* OUT variables */
+	GUEST_HANDLE(char) name;
+	uint64_t	address;
+	char            type;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
+
 struct xen_platform_op {
 	uint32_t cmd;
 	uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -495,6 +512,7 @@
 		struct xenpf_cpu_hotadd        cpu_add;
 		struct xenpf_mem_hotadd        mem_add;
 		struct xenpf_core_parking      core_parking;
+		struct xenpf_symdata           symdata;
 		uint8_t                        pad[128];
 	} u;
 };
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index a483789..167071c 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -80,6 +80,7 @@
 #define __HYPERVISOR_kexec_op             37
 #define __HYPERVISOR_tmem_op              38
 #define __HYPERVISOR_xc_reserved_op       39 /* reserved for XenClient */
+#define __HYPERVISOR_xenpmu_op            40
 
 /* Architecture-specific hypercall definitions. */
 #define __HYPERVISOR_arch_0               48
@@ -112,6 +113,7 @@
 #define VIRQ_MEM_EVENT  10 /* G. (DOM0) A memory event has occured           */
 #define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient                     */
 #define VIRQ_ENOMEM     12 /* G. (DOM0) Low on heap memory       */
+#define VIRQ_XENPMU     13  /* PMC interrupt                                 */
 
 /* Architecture-specific VIRQ definitions. */
 #define VIRQ_ARCH_0    16
@@ -585,26 +587,29 @@
 };
 
 /*
- * Start-of-day memory layout for the initial domain (DOM0):
+ * Start-of-day memory layout
+ *
  *  1. The domain is started within contiguous virtual-memory region.
  *  2. The contiguous region begins and ends on an aligned 4MB boundary.
- *  3. The region start corresponds to the load address of the OS image.
- *     If the load address is not 4MB aligned then the address is rounded down.
- *  4. This the order of bootstrap elements in the initial virtual region:
+ *  3. This the order of bootstrap elements in the initial virtual region:
  *      a. relocated kernel image
  *      b. initial ram disk              [mod_start, mod_len]
+ *         (may be omitted)
  *      c. list of allocated page frames [mfn_list, nr_pages]
+ *         (unless relocated due to XEN_ELFNOTE_INIT_P2M)
  *      d. start_info_t structure        [register ESI (x86)]
- *      e. bootstrap page tables         [pt_base, CR3 (x86)]
- *      f. bootstrap stack               [register ESP (x86)]
- *  5. Bootstrap elements are packed together, but each is 4kB-aligned.
- *  6. The initial ram disk may be omitted.
- *  7. The list of page frames forms a contiguous 'pseudo-physical' memory
+ *         in case of dom0 this page contains the console info, too
+ *      e. unless dom0: xenstore ring page
+ *      f. unless dom0: console ring page
+ *      g. bootstrap page tables         [pt_base, CR3 (x86)]
+ *      h. bootstrap stack               [register ESP (x86)]
+ *  4. Bootstrap elements are packed together, but each is 4kB-aligned.
+ *  5. The list of page frames forms a contiguous 'pseudo-physical' memory
  *     layout for the domain. In particular, the bootstrap virtual-memory
  *     region is a 1:1 mapping to the first section of the pseudo-physical map.
- *  8. All bootstrap elements are mapped read-writable for the guest OS. The
+ *  6. All bootstrap elements are mapped read-writable for the guest OS. The
  *     only exception is the bootstrap page table, which is mapped read-only.
- *  9. There is guaranteed to be at least 512kB padding after the final
+ *  7. There is guaranteed to be at least 512kB padding after the final
  *     bootstrap element. If necessary, the bootstrap virtual region is
  *     extended by an extra 4MB to ensure this.
  */
@@ -641,10 +646,12 @@
 };
 
 /* These flags are passed in the 'flags' field of start_info_t. */
-#define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
-#define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
-#define SIF_MULTIBOOT_MOD (1<<2)  /* Is mod_start a multiboot module? */
-#define SIF_MOD_START_PFN (1<<3)  /* Is mod_start a PFN? */
+#define SIF_PRIVILEGED      (1<<0)  /* Is the domain privileged? */
+#define SIF_INITDOMAIN      (1<<1)  /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD   (1<<2)  /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN   (1<<3)  /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4)  /* Do Xen tools understand a virt. mapped */
+				    /* P->M making the 3 level tree obsolete? */
 #define SIF_PM_MASK       (0xFF<<8) /* reserve 1 byte for xen-pm options */
 
 /*
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
new file mode 100644
index 0000000..139efc9
--- /dev/null
+++ b/include/xen/interface/xenpmu.h
@@ -0,0 +1,94 @@
+#ifndef __XEN_PUBLIC_XENPMU_H__
+#define __XEN_PUBLIC_XENPMU_H__
+
+#include "xen.h"
+
+#define XENPMU_VER_MAJ    0
+#define XENPMU_VER_MIN    1
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args);
+ *
+ * @cmd  == XENPMU_* (PMU operation)
+ * @args == struct xenpmu_params
+ */
+/* ` enum xenpmu_op { */
+#define XENPMU_mode_get        0 /* Also used for getting PMU version */
+#define XENPMU_mode_set        1
+#define XENPMU_feature_get     2
+#define XENPMU_feature_set     3
+#define XENPMU_init            4
+#define XENPMU_finish          5
+#define XENPMU_lvtpc_set       6
+#define XENPMU_flush           7
+
+/* ` } */
+
+/* Parameters structure for HYPERVISOR_xenpmu_op call */
+struct xen_pmu_params {
+	/* IN/OUT parameters */
+	struct {
+		uint32_t maj;
+		uint32_t min;
+	} version;
+	uint64_t val;
+
+	/* IN parameters */
+	uint32_t vcpu;
+	uint32_t pad;
+};
+
+/* PMU modes:
+ * - XENPMU_MODE_OFF:   No PMU virtualization
+ * - XENPMU_MODE_SELF:  Guests can profile themselves
+ * - XENPMU_MODE_HV:    Guests can profile themselves, dom0 profiles
+ *                      itself and Xen
+ * - XENPMU_MODE_ALL:   Only dom0 has access to VPMU and it profiles
+ *                      everyone: itself, the hypervisor and the guests.
+ */
+#define XENPMU_MODE_OFF           0
+#define XENPMU_MODE_SELF          (1<<0)
+#define XENPMU_MODE_HV            (1<<1)
+#define XENPMU_MODE_ALL           (1<<2)
+
+/*
+ * PMU features:
+ * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
+ */
+#define XENPMU_FEATURE_INTEL_BTS  1
+
+/*
+ * Shared PMU data between hypervisor and PV(H) domains.
+ *
+ * The hypervisor fills out this structure during PMU interrupt and sends an
+ * interrupt to appropriate VCPU.
+ * Architecture-independent fields of xen_pmu_data are WO for the hypervisor
+ * and RO for the guest but some fields in xen_pmu_arch can be writable
+ * by both the hypervisor and the guest (see arch-$arch/pmu.h).
+ */
+struct xen_pmu_data {
+	/* Interrupted VCPU */
+	uint32_t vcpu_id;
+
+	/*
+	 * Physical processor on which the interrupt occurred. On non-privileged
+	 * guests set to vcpu_id;
+	 */
+	uint32_t pcpu_id;
+
+	/*
+	 * Domain that was interrupted. On non-privileged guests set to
+	 * DOMID_SELF.
+	 * On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
+	 * XENPMU_MODE_ALL mode, domain ID of another domain.
+	 */
+	domid_t  domain_id;
+
+	uint8_t pad[6];
+
+	/* Architecture-specific information */
+	struct xen_pmu_arch pmu;
+};
+
+#endif /* __XEN_PUBLIC_XENPMU_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
index c5ed20b..a5983da 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -9,8 +9,8 @@
 }
 
 struct xen_memory_region {
-	phys_addr_t start;
-	phys_addr_t size;
+	unsigned long start_pfn;
+	unsigned long n_pfns;
 };
 
 #define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
diff --git a/init/Kconfig b/init/Kconfig
index bb9b4dd..02da9f1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -883,6 +883,16 @@
 	bool
 
 #
+# For architectures that prefer to flush all TLBs after a number of pages
+# are unmapped instead of sending one IPI per page to flush. The architecture
+# must provide guarantees on what happens if a clean TLB cache entry is
+# written after the unmap. Details are in mm/rmap.c near the check for
+# should_defer_flush. The architecture should also consider if the full flush
+# and the refill costs are offset by the savings of sending fewer IPIs.
+config ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+	bool
+
+#
 # For architectures that know their GCC __int128 support is sound
 #
 config ARCH_SUPPORTS_INT128
@@ -917,7 +927,6 @@
 menuconfig CGROUPS
 	bool "Control Group support"
 	select KERNFS
-	select PERCPU_RWSEM
 	help
 	  This option adds support for grouping sets of processes together, for
 	  use with process control subsystems such as Cpusets, CFS, memory
@@ -1576,6 +1585,14 @@
 	  applications use these syscalls, you can disable this option to save
 	  space.
 
+config USERFAULTFD
+	bool "Enable userfaultfd() system call"
+	select ANON_INODES
+	depends on MMU
+	help
+	  Enable the userfaultfd() system call that allows to intercept and
+	  handle page faults in userland.
+
 config PCI_QUIRKS
 	default y
 	bool "Enable PCI quirk workarounds" if EXPERT
@@ -1748,17 +1765,23 @@
 
 	  See Documentation/nommu-mmap.txt for more information.
 
-config SYSTEM_TRUSTED_KEYRING
-	bool "Provide system-wide ring of trusted keys"
-	depends on KEYS
+config SYSTEM_DATA_VERIFICATION
+	def_bool n
+	select SYSTEM_TRUSTED_KEYRING
+	select KEYS
+	select CRYPTO
+	select ASYMMETRIC_KEY_TYPE
+	select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+	select PUBLIC_KEY_ALGO_RSA
+	select ASN1
+	select OID_REGISTRY
+	select X509_CERTIFICATE_PARSER
+	select PKCS7_MESSAGE_PARSER
 	help
-	  Provide a system keyring to which trusted keys can be added.  Keys in
-	  the keyring are considered to be trusted.  Keys may be added at will
-	  by the kernel from compiled-in data and from hardware key stores, but
-	  userspace may only add extra keys if those keys can be verified by
-	  keys already in the keyring.
-
-	  Keys in this keyring are used by module signature checking.
+	  Provide PKCS#7 message verification using the contents of the system
+	  trusted keyring to provide public keys.  This then can be used for
+	  module verification, kexec image verification and firmware blob
+	  verification.
 
 config PROFILING
 	bool "Profiling support"
@@ -1868,20 +1891,16 @@
 config MODULE_SIG
 	bool "Module signature verification"
 	depends on MODULES
-	select SYSTEM_TRUSTED_KEYRING
-	select KEYS
-	select CRYPTO
-	select ASYMMETRIC_KEY_TYPE
-	select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
-	select PUBLIC_KEY_ALGO_RSA
-	select ASN1
-	select OID_REGISTRY
-	select X509_CERTIFICATE_PARSER
+	select SYSTEM_DATA_VERIFICATION
 	help
 	  Check modules for valid signatures upon load: the signature
 	  is simply appended to the module. For more information see
 	  Documentation/module-signing.txt.
 
+	  Note that this option adds the OpenSSL development packages as a
+	  kernel build dependency so that the signing tool can use its crypto
+	  library.
+
 	  !!!WARNING!!!  If you enable this option, you MUST make sure that the
 	  module DOES NOT get stripped after being signed.  This includes the
 	  debuginfo strip done by some packagers (such as rpmbuild) and
diff --git a/kernel/Makefile b/kernel/Makefile
index 718fb8a..e0d7587 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -45,7 +45,6 @@
 obj-y += up.o
 endif
 obj-$(CONFIG_UID16) += uid16.o
-obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_MODULE_SIG) += module_signing.o
 obj-$(CONFIG_KALLSYMS) += kallsyms.o
@@ -65,7 +64,7 @@
 obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
 obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
 obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
-obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
+obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o audit_fsnotify.o
 obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
 obj-$(CONFIG_GCOV_KERNEL) += gcov/
 obj-$(CONFIG_KPROBES) += kprobes.o
@@ -100,6 +99,8 @@
 obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
 obj-$(CONFIG_TORTURE_TEST) += torture.o
 
+obj-$(CONFIG_HAS_IOMEM) += memremap.o
+
 $(obj)/configs.o: $(obj)/config_data.h
 
 # config_data.h contains the same information as ikconfig.h but gzipped.
@@ -112,99 +113,3 @@
 targets += config_data.h
 $(obj)/config_data.h: $(obj)/config_data.gz FORCE
 	$(call filechk,ikconfiggz)
-
-###############################################################################
-#
-# Roll all the X.509 certificates that we can find together and pull them into
-# the kernel so that they get loaded into the system trusted keyring during
-# boot.
-#
-# We look in the source root and the build root for all files whose name ends
-# in ".x509".  Unfortunately, this will generate duplicate filenames, so we
-# have make canonicalise the pathnames and then sort them to discard the
-# duplicates.
-#
-###############################################################################
-ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
-X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
-X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509
-X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
-				$(or $(realpath $(CERT)),$(CERT))))
-X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw))
-
-ifeq ($(X509_CERTIFICATES),)
-$(warning *** No X.509 certificates found ***)
-endif
-
-ifneq ($(wildcard $(obj)/.x509.list),)
-ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES))
-$(warning X.509 certificate list changed to "$(X509_CERTIFICATES)" from "$(shell cat $(obj)/.x509.list)")
-$(shell rm $(obj)/.x509.list)
-endif
-endif
-
-kernel/system_certificates.o: $(obj)/x509_certificate_list
-
-quiet_cmd_x509certs  = CERTS   $@
-      cmd_x509certs  = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; $(kecho) "  - Including cert $(X509)")
-
-targets += $(obj)/x509_certificate_list
-$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
-	$(call if_changed,x509certs)
-
-targets += $(obj)/.x509.list
-$(obj)/.x509.list:
-	@echo $(X509_CERTIFICATES) >$@
-endif
-
-clean-files := x509_certificate_list .x509.list
-
-ifeq ($(CONFIG_MODULE_SIG),y)
-###############################################################################
-#
-# If module signing is requested, say by allyesconfig, but a key has not been
-# supplied, then one will need to be generated to make sure the build does not
-# fail and that the kernel may be used afterwards.
-#
-###############################################################################
-ifndef CONFIG_MODULE_SIG_HASH
-$(error Could not determine digest type to use from kernel config)
-endif
-
-signing_key.priv signing_key.x509: x509.genkey
-	@echo "###"
-	@echo "### Now generating an X.509 key pair to be used for signing modules."
-	@echo "###"
-	@echo "### If this takes a long time, you might wish to run rngd in the"
-	@echo "### background to keep the supply of entropy topped up.  It"
-	@echo "### needs to be run as root, and uses a hardware random"
-	@echo "### number generator if one is available."
-	@echo "###"
-	openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \
-		-batch -x509 -config x509.genkey \
-		-outform DER -out signing_key.x509 \
-		-keyout signing_key.priv 2>&1
-	@echo "###"
-	@echo "### Key pair generated."
-	@echo "###"
-
-x509.genkey:
-	@echo Generating X.509 key generation config
-	@echo  >x509.genkey "[ req ]"
-	@echo >>x509.genkey "default_bits = 4096"
-	@echo >>x509.genkey "distinguished_name = req_distinguished_name"
-	@echo >>x509.genkey "prompt = no"
-	@echo >>x509.genkey "string_mask = utf8only"
-	@echo >>x509.genkey "x509_extensions = myexts"
-	@echo >>x509.genkey
-	@echo >>x509.genkey "[ req_distinguished_name ]"
-	@echo >>x509.genkey "#O = Unspecified company"
-	@echo >>x509.genkey "CN = Build time autogenerated kernel key"
-	@echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company"
-	@echo >>x509.genkey
-	@echo >>x509.genkey "[ myexts ]"
-	@echo >>x509.genkey "basicConstraints=critical,CA:FALSE"
-	@echo >>x509.genkey "keyUsage=digitalSignature"
-	@echo >>x509.genkey "subjectKeyIdentifier=hash"
-	@echo >>x509.genkey "authorityKeyIdentifier=keyid"
-endif
diff --git a/kernel/audit.c b/kernel/audit.c
index f9e6065..662c007 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1761,7 +1761,7 @@
 	} else
 		audit_log_format(ab, " name=(null)");
 
-	if (n->ino != (unsigned long)-1)
+	if (n->ino != AUDIT_INO_UNSET)
 		audit_log_format(ab, " inode=%lu"
 				 " dev=%02x:%02x mode=%#ho"
 				 " ouid=%u ogid=%u rdev=%02x:%02x",
diff --git a/kernel/audit.h b/kernel/audit.h
index d641f9b..dadf86a 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -50,6 +50,7 @@
 
 /* Rule lists */
 struct audit_watch;
+struct audit_fsnotify_mark;
 struct audit_tree;
 struct audit_chunk;
 
@@ -252,6 +253,7 @@
 extern int selinux_audit_rule_update(void);
 
 extern struct mutex audit_filter_mutex;
+extern int audit_del_rule(struct audit_entry *);
 extern void audit_free_rule_rcu(struct rcu_head *);
 extern struct list_head audit_filter_list[];
 
@@ -269,6 +271,15 @@
 extern void audit_remove_watch_rule(struct audit_krule *krule);
 extern char *audit_watch_path(struct audit_watch *watch);
 extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev);
+
+extern struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pathname, int len);
+extern char *audit_mark_path(struct audit_fsnotify_mark *mark);
+extern void audit_remove_mark(struct audit_fsnotify_mark *audit_mark);
+extern void audit_remove_mark_rule(struct audit_krule *krule);
+extern int audit_mark_compare(struct audit_fsnotify_mark *mark, unsigned long ino, dev_t dev);
+extern int audit_dupe_exe(struct audit_krule *new, struct audit_krule *old);
+extern int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark);
+
 #else
 #define audit_put_watch(w) {}
 #define audit_get_watch(w) {}
@@ -278,6 +289,13 @@
 #define audit_watch_path(w) ""
 #define audit_watch_compare(w, i, d) 0
 
+#define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL))
+#define audit_mark_path(m) ""
+#define audit_remove_mark(m)
+#define audit_remove_mark_rule(k)
+#define audit_mark_compare(m, i, d) 0
+#define audit_exe_compare(t, m) (-EINVAL)
+#define audit_dupe_exe(n, o) (-EINVAL)
 #endif /* CONFIG_AUDIT_WATCH */
 
 #ifdef CONFIG_AUDIT_TREE
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
new file mode 100644
index 0000000..27c6046
--- /dev/null
+++ b/kernel/audit_fsnotify.c
@@ -0,0 +1,216 @@
+/* audit_fsnotify.c -- tracking inodes
+ *
+ * Copyright 2003-2009,2014-2015 Red Hat, Inc.
+ * Copyright 2005 Hewlett-Packard Development Company, L.P.
+ * Copyright 2005 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/audit.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/namei.h>
+#include <linux/netlink.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include "audit.h"
+
+/*
+ * this mark lives on the parent directory of the inode in question.
+ * but dev, ino, and path are about the child
+ */
+struct audit_fsnotify_mark {
+	dev_t dev;		/* associated superblock device */
+	unsigned long ino;	/* associated inode number */
+	char *path;		/* insertion path */
+	struct fsnotify_mark mark; /* fsnotify mark on the inode */
+	struct audit_krule *rule;
+};
+
+/* fsnotify handle. */
+static struct fsnotify_group *audit_fsnotify_group;
+
+/* fsnotify events we care about. */
+#define AUDIT_FS_EVENTS (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
+			 FS_MOVE_SELF | FS_EVENT_ON_CHILD)
+
+static void audit_fsnotify_mark_free(struct audit_fsnotify_mark *audit_mark)
+{
+	kfree(audit_mark->path);
+	kfree(audit_mark);
+}
+
+static void audit_fsnotify_free_mark(struct fsnotify_mark *mark)
+{
+	struct audit_fsnotify_mark *audit_mark;
+
+	audit_mark = container_of(mark, struct audit_fsnotify_mark, mark);
+	audit_fsnotify_mark_free(audit_mark);
+}
+
+char *audit_mark_path(struct audit_fsnotify_mark *mark)
+{
+	return mark->path;
+}
+
+int audit_mark_compare(struct audit_fsnotify_mark *mark, unsigned long ino, dev_t dev)
+{
+	if (mark->ino == AUDIT_INO_UNSET)
+		return 0;
+	return (mark->ino == ino) && (mark->dev == dev);
+}
+
+static void audit_update_mark(struct audit_fsnotify_mark *audit_mark,
+			     struct inode *inode)
+{
+	audit_mark->dev = inode ? inode->i_sb->s_dev : AUDIT_DEV_UNSET;
+	audit_mark->ino = inode ? inode->i_ino : AUDIT_INO_UNSET;
+}
+
+struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pathname, int len)
+{
+	struct audit_fsnotify_mark *audit_mark;
+	struct path path;
+	struct dentry *dentry;
+	struct inode *inode;
+	int ret;
+
+	if (pathname[0] != '/' || pathname[len-1] == '/')
+		return ERR_PTR(-EINVAL);
+
+	dentry = kern_path_locked(pathname, &path);
+	if (IS_ERR(dentry))
+		return (void *)dentry; /* returning an error */
+	inode = path.dentry->d_inode;
+	mutex_unlock(&inode->i_mutex);
+
+	audit_mark = kzalloc(sizeof(*audit_mark), GFP_KERNEL);
+	if (unlikely(!audit_mark)) {
+		audit_mark = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	fsnotify_init_mark(&audit_mark->mark, audit_fsnotify_free_mark);
+	audit_mark->mark.mask = AUDIT_FS_EVENTS;
+	audit_mark->path = pathname;
+	audit_update_mark(audit_mark, dentry->d_inode);
+	audit_mark->rule = krule;
+
+	ret = fsnotify_add_mark(&audit_mark->mark, audit_fsnotify_group, inode, NULL, true);
+	if (ret < 0) {
+		audit_fsnotify_mark_free(audit_mark);
+		audit_mark = ERR_PTR(ret);
+	}
+out:
+	dput(dentry);
+	path_put(&path);
+	return audit_mark;
+}
+
+static void audit_mark_log_rule_change(struct audit_fsnotify_mark *audit_mark, char *op)
+{
+	struct audit_buffer *ab;
+	struct audit_krule *rule = audit_mark->rule;
+
+	if (!audit_enabled)
+		return;
+	ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
+	if (unlikely(!ab))
+		return;
+	audit_log_format(ab, "auid=%u ses=%u op=",
+			 from_kuid(&init_user_ns, audit_get_loginuid(current)),
+			 audit_get_sessionid(current));
+	audit_log_string(ab, op);
+	audit_log_format(ab, " path=");
+	audit_log_untrustedstring(ab, audit_mark->path);
+	audit_log_key(ab, rule->filterkey);
+	audit_log_format(ab, " list=%d res=1", rule->listnr);
+	audit_log_end(ab);
+}
+
+void audit_remove_mark(struct audit_fsnotify_mark *audit_mark)
+{
+	fsnotify_destroy_mark(&audit_mark->mark, audit_fsnotify_group);
+	fsnotify_put_mark(&audit_mark->mark);
+}
+
+void audit_remove_mark_rule(struct audit_krule *krule)
+{
+	struct audit_fsnotify_mark *mark = krule->exe;
+
+	audit_remove_mark(mark);
+}
+
+static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark)
+{
+	struct audit_krule *rule = audit_mark->rule;
+	struct audit_entry *entry = container_of(rule, struct audit_entry, rule);
+
+	audit_mark_log_rule_change(audit_mark, "autoremove_rule");
+	audit_del_rule(entry);
+}
+
+/* Update mark data in audit rules based on fsnotify events. */
+static int audit_mark_handle_event(struct fsnotify_group *group,
+				    struct inode *to_tell,
+				    struct fsnotify_mark *inode_mark,
+				    struct fsnotify_mark *vfsmount_mark,
+				    u32 mask, void *data, int data_type,
+				    const unsigned char *dname, u32 cookie)
+{
+	struct audit_fsnotify_mark *audit_mark;
+	struct inode *inode = NULL;
+
+	audit_mark = container_of(inode_mark, struct audit_fsnotify_mark, mark);
+
+	BUG_ON(group != audit_fsnotify_group);
+
+	switch (data_type) {
+	case (FSNOTIFY_EVENT_PATH):
+		inode = ((struct path *)data)->dentry->d_inode;
+		break;
+	case (FSNOTIFY_EVENT_INODE):
+		inode = (struct inode *)data;
+		break;
+	default:
+		BUG();
+		return 0;
+	};
+
+	if (mask & (FS_CREATE|FS_MOVED_TO|FS_DELETE|FS_MOVED_FROM)) {
+		if (audit_compare_dname_path(dname, audit_mark->path, AUDIT_NAME_FULL))
+			return 0;
+		audit_update_mark(audit_mark, inode);
+	} else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
+		audit_autoremove_mark_rule(audit_mark);
+
+	return 0;
+}
+
+static const struct fsnotify_ops audit_mark_fsnotify_ops = {
+	.handle_event =	audit_mark_handle_event,
+};
+
+static int __init audit_fsnotify_init(void)
+{
+	audit_fsnotify_group = fsnotify_alloc_group(&audit_mark_fsnotify_ops);
+	if (IS_ERR(audit_fsnotify_group)) {
+		audit_fsnotify_group = NULL;
+		audit_panic("cannot create audit fsnotify group");
+	}
+	return 0;
+}
+device_initcall(audit_fsnotify_init);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index b0f9877..94ecdab 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -479,6 +479,8 @@
 		if (rule->tree) {
 			/* not a half-baked one */
 			audit_tree_log_remove_rule(rule);
+			if (entry->rule.exe)
+				audit_remove_mark(entry->rule.exe);
 			rule->tree = NULL;
 			list_del_rcu(&entry->list);
 			list_del(&entry->rule.list);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 6e30024..656c7e9 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -138,7 +138,7 @@
 
 int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
 {
-	return (watch->ino != (unsigned long)-1) &&
+	return (watch->ino != AUDIT_INO_UNSET) &&
 		(watch->ino == ino) &&
 		(watch->dev == dev);
 }
@@ -179,8 +179,8 @@
 	INIT_LIST_HEAD(&watch->rules);
 	atomic_set(&watch->count, 1);
 	watch->path = path;
-	watch->dev = (dev_t)-1;
-	watch->ino = (unsigned long)-1;
+	watch->dev = AUDIT_DEV_UNSET;
+	watch->ino = AUDIT_INO_UNSET;
 
 	return watch;
 }
@@ -203,7 +203,6 @@
 	if (IS_ERR(watch))
 		return PTR_ERR(watch);
 
-	audit_get_watch(watch);
 	krule->watch = watch;
 
 	return 0;
@@ -313,6 +312,8 @@
 				list_replace(&oentry->rule.list,
 					     &nentry->rule.list);
 			}
+			if (oentry->rule.exe)
+				audit_remove_mark(oentry->rule.exe);
 
 			audit_watch_log_rule_change(r, owatch, "updated_rules");
 
@@ -343,6 +344,8 @@
 		list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
 			e = container_of(r, struct audit_entry, rule);
 			audit_watch_log_rule_change(r, w, "remove_rule");
+			if (e->rule.exe)
+				audit_remove_mark(e->rule.exe);
 			list_del(&r->rlist);
 			list_del(&r->list);
 			list_del_rcu(&e->list);
@@ -387,19 +390,20 @@
 
 		watch_found = 1;
 
-		/* put krule's and initial refs to temporary watch */
-		audit_put_watch(watch);
+		/* put krule's ref to temporary watch */
 		audit_put_watch(watch);
 
 		audit_get_watch(w);
 		krule->watch = watch = w;
+
+		audit_put_parent(parent);
 		break;
 	}
 
 	if (!watch_found) {
-		audit_get_parent(parent);
 		watch->parent = parent;
 
+		audit_get_watch(watch);
 		list_add(&watch->wlist, &parent->watches);
 	}
 	list_add(&krule->rlist, &watch->rules);
@@ -437,9 +441,6 @@
 
 	audit_add_to_parent(krule, parent);
 
-	/* match get in audit_find_parent or audit_init_parent */
-	audit_put_parent(parent);
-
 	h = audit_hash_ino((u32)watch->ino);
 	*list = &audit_inode_hash[h];
 error:
@@ -496,7 +497,7 @@
 	if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
 		audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0);
 	else if (mask & (FS_DELETE|FS_MOVED_FROM))
-		audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
+		audit_update_watch(parent, dname, AUDIT_DEV_UNSET, AUDIT_INO_UNSET, 1);
 	else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
 		audit_remove_parent_watches(parent);
 
@@ -517,3 +518,36 @@
 	return 0;
 }
 device_initcall(audit_watch_init);
+
+int audit_dupe_exe(struct audit_krule *new, struct audit_krule *old)
+{
+	struct audit_fsnotify_mark *audit_mark;
+	char *pathname;
+
+	pathname = kstrdup(audit_mark_path(old->exe), GFP_KERNEL);
+	if (!pathname)
+		return -ENOMEM;
+
+	audit_mark = audit_alloc_mark(new, pathname, strlen(pathname));
+	if (IS_ERR(audit_mark)) {
+		kfree(pathname);
+		return PTR_ERR(audit_mark);
+	}
+	new->exe = audit_mark;
+
+	return 0;
+}
+
+int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
+{
+	struct file *exe_file;
+	unsigned long ino;
+	dev_t dev;
+
+	rcu_read_lock();
+	exe_file = rcu_dereference(tsk->mm->exe_file);
+	ino = exe_file->f_inode->i_ino;
+	dev = exe_file->f_inode->i_sb->s_dev;
+	rcu_read_unlock();
+	return audit_mark_compare(mark, ino, dev);
+}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 72e1660a..7714d93 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -405,6 +405,12 @@
 		if (f->val > AUDIT_MAX_FIELD_COMPARE)
 			return -EINVAL;
 		break;
+	case AUDIT_EXE:
+		if (f->op != Audit_equal)
+			return -EINVAL;
+		if (entry->rule.listnr != AUDIT_FILTER_EXIT)
+			return -EINVAL;
+		break;
 	};
 	return 0;
 }
@@ -419,6 +425,7 @@
 	size_t remain = datasz - sizeof(struct audit_rule_data);
 	int i;
 	char *str;
+	struct audit_fsnotify_mark *audit_mark;
 
 	entry = audit_to_entry_common(data);
 	if (IS_ERR(entry))
@@ -539,6 +546,24 @@
 			entry->rule.buflen += f->val;
 			entry->rule.filterkey = str;
 			break;
+		case AUDIT_EXE:
+			if (entry->rule.exe || f->val > PATH_MAX)
+				goto exit_free;
+			str = audit_unpack_string(&bufp, &remain, f->val);
+			if (IS_ERR(str)) {
+				err = PTR_ERR(str);
+				goto exit_free;
+			}
+			entry->rule.buflen += f->val;
+
+			audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
+			if (IS_ERR(audit_mark)) {
+				kfree(str);
+				err = PTR_ERR(audit_mark);
+				goto exit_free;
+			}
+			entry->rule.exe = audit_mark;
+			break;
 		}
 	}
 
@@ -549,10 +574,10 @@
 	return entry;
 
 exit_free:
-	if (entry->rule.watch)
-		audit_put_watch(entry->rule.watch); /* matches initial get */
 	if (entry->rule.tree)
 		audit_put_tree(entry->rule.tree); /* that's the temporary one */
+	if (entry->rule.exe)
+		audit_remove_mark(entry->rule.exe); /* that's the template one */
 	audit_free_rule(entry);
 	return ERR_PTR(err);
 }
@@ -617,6 +642,10 @@
 			data->buflen += data->values[i] =
 				audit_pack_string(&bufp, krule->filterkey);
 			break;
+		case AUDIT_EXE:
+			data->buflen += data->values[i] =
+				audit_pack_string(&bufp, audit_mark_path(krule->exe));
+			break;
 		case AUDIT_LOGINUID_SET:
 			if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
 				data->fields[i] = AUDIT_LOGINUID;
@@ -680,6 +709,12 @@
 			if (strcmp(a->filterkey, b->filterkey))
 				return 1;
 			break;
+		case AUDIT_EXE:
+			/* both paths exist based on above type compare */
+			if (strcmp(audit_mark_path(a->exe),
+				   audit_mark_path(b->exe)))
+				return 1;
+			break;
 		case AUDIT_UID:
 		case AUDIT_EUID:
 		case AUDIT_SUID:
@@ -801,8 +836,14 @@
 				err = -ENOMEM;
 			else
 				new->filterkey = fk;
+			break;
+		case AUDIT_EXE:
+			err = audit_dupe_exe(new, old);
+			break;
 		}
 		if (err) {
+			if (new->exe)
+				audit_remove_mark(new->exe);
 			audit_free_rule(entry);
 			return ERR_PTR(err);
 		}
@@ -863,7 +904,7 @@
 	struct audit_watch *watch = entry->rule.watch;
 	struct audit_tree *tree = entry->rule.tree;
 	struct list_head *list;
-	int err;
+	int err = 0;
 #ifdef CONFIG_AUDITSYSCALL
 	int dont_count = 0;
 
@@ -881,7 +922,7 @@
 		/* normally audit_add_tree_rule() will free it on failure */
 		if (tree)
 			audit_put_tree(tree);
-		goto error;
+		return err;
 	}
 
 	if (watch) {
@@ -895,14 +936,14 @@
 			 */
 			if (tree)
 				audit_put_tree(tree);
-			goto error;
+			return err;
 		}
 	}
 	if (tree) {
 		err = audit_add_tree_rule(&entry->rule);
 		if (err) {
 			mutex_unlock(&audit_filter_mutex);
-			goto error;
+			return err;
 		}
 	}
 
@@ -933,19 +974,13 @@
 #endif
 	mutex_unlock(&audit_filter_mutex);
 
- 	return 0;
-
-error:
-	if (watch)
-		audit_put_watch(watch); /* tmp watch, matches initial get */
 	return err;
 }
 
 /* Remove an existing rule from filterlist. */
-static inline int audit_del_rule(struct audit_entry *entry)
+int audit_del_rule(struct audit_entry *entry)
 {
 	struct audit_entry  *e;
-	struct audit_watch *watch = entry->rule.watch;
 	struct audit_tree *tree = entry->rule.tree;
 	struct list_head *list;
 	int ret = 0;
@@ -961,7 +996,6 @@
 	mutex_lock(&audit_filter_mutex);
 	e = audit_find_rule(entry, &list);
 	if (!e) {
-		mutex_unlock(&audit_filter_mutex);
 		ret = -ENOENT;
 		goto out;
 	}
@@ -972,9 +1006,8 @@
 	if (e->rule.tree)
 		audit_remove_tree_rule(&e->rule);
 
-	list_del_rcu(&e->list);
-	list_del(&e->rule.list);
-	call_rcu(&e->rcu, audit_free_rule_rcu);
+	if (e->rule.exe)
+		audit_remove_mark_rule(&e->rule);
 
 #ifdef CONFIG_AUDITSYSCALL
 	if (!dont_count)
@@ -983,11 +1016,14 @@
 	if (!audit_match_signal(entry))
 		audit_signals--;
 #endif
-	mutex_unlock(&audit_filter_mutex);
+
+	list_del_rcu(&e->list);
+	list_del(&e->rule.list);
+	call_rcu(&e->rcu, audit_free_rule_rcu);
 
 out:
-	if (watch)
-		audit_put_watch(watch); /* match initial get */
+	mutex_unlock(&audit_filter_mutex);
+
 	if (tree)
 		audit_put_tree(tree);	/* that's the temporary one */
 
@@ -1077,8 +1113,11 @@
 		WARN_ON(1);
 	}
 
-	if (err || type == AUDIT_DEL_RULE)
+	if (err || type == AUDIT_DEL_RULE) {
+		if (entry->rule.exe)
+			audit_remove_mark(entry->rule.exe);
 		audit_free_rule(entry);
+	}
 
 	return err;
 }
@@ -1370,6 +1409,8 @@
 		return 0;
 
 	nentry = audit_dupe_rule(r);
+	if (entry->rule.exe)
+		audit_remove_mark(entry->rule.exe);
 	if (IS_ERR(nentry)) {
 		/* save the first error encountered for the
 		 * return value */
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index e85bdfd..b86cc04 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -180,7 +180,7 @@
 		return 0;
 
 	list_for_each_entry(n, &ctx->names_list, list) {
-		if ((n->ino != -1) &&
+		if ((n->ino != AUDIT_INO_UNSET) &&
 		    ((n->mode & S_IFMT) == mode))
 			return 1;
 	}
@@ -466,6 +466,9 @@
 				result = audit_comparator(ctx->ppid, f->op, f->val);
 			}
 			break;
+		case AUDIT_EXE:
+			result = audit_exe_compare(tsk, rule->exe);
+			break;
 		case AUDIT_UID:
 			result = audit_uid_comparator(cred->uid, f->op, f->uid);
 			break;
@@ -1680,7 +1683,7 @@
 		aname->should_free = true;
 	}
 
-	aname->ino = (unsigned long)-1;
+	aname->ino = AUDIT_INO_UNSET;
 	aname->type = type;
 	list_add_tail(&aname->list, &context->names_list);
 
@@ -1922,7 +1925,7 @@
 	if (inode)
 		audit_copy_inode(found_child, dentry, inode);
 	else
-		found_child->ino = (unsigned long)-1;
+		found_child->ino = AUDIT_INO_UNSET;
 }
 EXPORT_SYMBOL_GPL(__audit_inode_child);
 
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f3f5cd5..2cf0f79 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1342,7 +1342,7 @@
 	if (root != &cgrp_dfl_root)
 		for_each_subsys(ss, ssid)
 			if (root->subsys_mask & (1 << ssid))
-				seq_printf(seq, ",%s", ss->legacy_name);
+				seq_show_option(seq, ss->legacy_name, NULL);
 	if (root->flags & CGRP_ROOT_NOPREFIX)
 		seq_puts(seq, ",noprefix");
 	if (root->flags & CGRP_ROOT_XATTR)
@@ -1350,13 +1350,14 @@
 
 	spin_lock(&release_agent_path_lock);
 	if (strlen(root->release_agent_path))
-		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+		seq_show_option(seq, "release_agent",
+				root->release_agent_path);
 	spin_unlock(&release_agent_path_lock);
 
 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
 		seq_puts(seq, ",clone_children");
 	if (strlen(root->name))
-		seq_printf(seq, ",name=%s", root->name);
+		seq_show_option(seq, "name", root->name);
 	return 0;
 }
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 03aa2e6..7d5f0f1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -454,8 +454,9 @@
 		tmp->vm_mm = mm;
 		if (anon_vma_fork(tmp, mpnt))
 			goto fail_nomem_anon_vma_fork;
-		tmp->vm_flags &= ~VM_LOCKED;
+		tmp->vm_flags &= ~(VM_LOCKED|VM_UFFD_MISSING|VM_UFFD_WP);
 		tmp->vm_next = tmp->vm_prev = NULL;
+		tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 		file = tmp->vm_file;
 		if (file) {
 			struct inode *inode = file_inode(file);
diff --git a/kernel/futex.c b/kernel/futex.c
index c4a182f..6e443ef 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -64,6 +64,7 @@
 #include <linux/hugetlb.h>
 #include <linux/freezer.h>
 #include <linux/bootmem.h>
+#include <linux/fault-inject.h>
 
 #include <asm/futex.h>
 
@@ -258,6 +259,66 @@
 
 static struct futex_hash_bucket *futex_queues;
 
+/*
+ * Fault injections for futexes.
+ */
+#ifdef CONFIG_FAIL_FUTEX
+
+static struct {
+	struct fault_attr attr;
+
+	u32 ignore_private;
+} fail_futex = {
+	.attr = FAULT_ATTR_INITIALIZER,
+	.ignore_private = 0,
+};
+
+static int __init setup_fail_futex(char *str)
+{
+	return setup_fault_attr(&fail_futex.attr, str);
+}
+__setup("fail_futex=", setup_fail_futex);
+
+static bool should_fail_futex(bool fshared)
+{
+	if (fail_futex.ignore_private && !fshared)
+		return false;
+
+	return should_fail(&fail_futex.attr, 1);
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_futex_debugfs(void)
+{
+	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+	struct dentry *dir;
+
+	dir = fault_create_debugfs_attr("fail_futex", NULL,
+					&fail_futex.attr);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	if (!debugfs_create_bool("ignore-private", mode, dir,
+				 &fail_futex.ignore_private)) {
+		debugfs_remove_recursive(dir);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+late_initcall(fail_futex_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#else
+static inline bool should_fail_futex(bool fshared)
+{
+	return false;
+}
+#endif /* CONFIG_FAIL_FUTEX */
+
 static inline void futex_get_mm(union futex_key *key)
 {
 	atomic_inc(&key->private.mm->mm_count);
@@ -413,6 +474,9 @@
 	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(fshared)))
+		return -EFAULT;
+
 	/*
 	 * PROCESS_PRIVATE futexes are fast.
 	 * As the mm cannot disappear under us and the 'key' only needs
@@ -428,6 +492,10 @@
 	}
 
 again:
+	/* Ignore any VERIFY_READ mapping (futex common case) */
+	if (unlikely(should_fail_futex(fshared)))
+		return -EFAULT;
+
 	err = get_user_pages_fast(address, 1, 1, &page);
 	/*
 	 * If write access is not required (eg. FUTEX_WAIT), try
@@ -516,7 +584,7 @@
 		 * A RO anonymous page will never change and thus doesn't make
 		 * sense for futex operations.
 		 */
-		if (ro) {
+		if (unlikely(should_fail_futex(fshared)) || ro) {
 			err = -EFAULT;
 			goto out;
 		}
@@ -974,6 +1042,9 @@
 {
 	u32 uninitialized_var(curval);
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
 		return -EFAULT;
 
@@ -1015,12 +1086,18 @@
 	if (get_futex_value_locked(&uval, uaddr))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	/*
 	 * Detect deadlocks.
 	 */
 	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
 		return -EDEADLK;
 
+	if ((unlikely(should_fail_futex(true))))
+		return -EDEADLK;
+
 	/*
 	 * Lookup existing state first. If it exists, try to attach to
 	 * its pi_state.
@@ -1155,6 +1232,9 @@
 	 */
 	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
+	if (unlikely(should_fail_futex(true)))
+		ret = -EFAULT;
+
 	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
 		ret = -EFAULT;
 	else if (curval != uval)
@@ -1457,6 +1537,9 @@
 	if (get_futex_value_locked(&curval, pifutex))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	/*
 	 * Find the top_waiter and determine if there are additional waiters.
 	 * If the caller intends to requeue more than 1 waiter to pifutex,
@@ -2268,8 +2351,11 @@
 /*
  * Userspace tried a 0 -> TID atomic transition of the futex value
  * and failed. The kernel side here does the whole locking operation:
- * if there are waiters then it will block, it does PI, etc. (Due to
- * races the kernel might see a 0 value of the futex too.)
+ * if there are waiters then it will block as a consequence of relying
+ * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
+ * a 0 value of the futex too.).
+ *
+ * Also serves as futex trylock_pi()'ing, and due semantics.
  */
 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
 			 ktime_t *time, int trylock)
@@ -2300,6 +2386,10 @@
 
 	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
 	if (unlikely(ret)) {
+		/*
+		 * Atomic work succeeded and we got the lock,
+		 * or failed. Either way, we do _not_ block.
+		 */
 		switch (ret) {
 		case 1:
 			/* We got the lock. */
@@ -2530,7 +2620,7 @@
  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
  * @uaddr:	the futex we initially wait on (non-pi)
  * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
- * 		the same type, no requeueing from private to shared, etc.
+ *		the same type, no requeueing from private to shared, etc.
  * @val:	the expected value of uaddr
  * @abs_time:	absolute timeout
  * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
@@ -3005,6 +3095,8 @@
 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
 		      cmd == FUTEX_WAIT_BITSET ||
 		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
+		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
+			return -EFAULT;
 		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
 			return -EFAULT;
 		if (!timespec_valid(&ts))
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 52ebaca..f7dd15d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -54,7 +54,7 @@
 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 }
 
-static void jump_label_update(struct static_key *key, int enable);
+static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
@@ -63,13 +63,8 @@
 		return;
 
 	jump_label_lock();
-	if (atomic_read(&key->enabled) == 0) {
-		if (!jump_label_get_branch_default(key))
-			jump_label_update(key, JUMP_LABEL_ENABLE);
-		else
-			jump_label_update(key, JUMP_LABEL_DISABLE);
-	}
-	atomic_inc(&key->enabled);
+	if (atomic_inc_return(&key->enabled) == 1)
+		jump_label_update(key);
 	jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -87,10 +82,7 @@
 		atomic_inc(&key->enabled);
 		schedule_delayed_work(work, rate_limit);
 	} else {
-		if (!jump_label_get_branch_default(key))
-			jump_label_update(key, JUMP_LABEL_DISABLE);
-		else
-			jump_label_update(key, JUMP_LABEL_ENABLE);
+		jump_label_update(key);
 	}
 	jump_label_unlock();
 }
@@ -149,7 +141,7 @@
 	return 0;
 }
 
-/* 
+/*
  * Update code which is definitely not currently executing.
  * Architectures which need heavyweight synchronization to modify
  * running code can override this to make the non-live update case
@@ -158,37 +150,54 @@
 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
 					    enum jump_label_type type)
 {
-	arch_jump_label_transform(entry, type);	
+	arch_jump_label_transform(entry, type);
+}
+
+static inline struct jump_entry *static_key_entries(struct static_key *key)
+{
+	return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
+}
+
+static inline bool static_key_type(struct static_key *key)
+{
+	return (unsigned long)key->entries & JUMP_TYPE_MASK;
+}
+
+static inline struct static_key *jump_entry_key(struct jump_entry *entry)
+{
+	return (struct static_key *)((unsigned long)entry->key & ~1UL);
+}
+
+static bool jump_entry_branch(struct jump_entry *entry)
+{
+	return (unsigned long)entry->key & 1UL;
+}
+
+static enum jump_label_type jump_label_type(struct jump_entry *entry)
+{
+	struct static_key *key = jump_entry_key(entry);
+	bool enabled = static_key_enabled(key);
+	bool branch = jump_entry_branch(entry);
+
+	/* See the comment in linux/jump_label.h */
+	return enabled ^ branch;
 }
 
 static void __jump_label_update(struct static_key *key,
 				struct jump_entry *entry,
-				struct jump_entry *stop, int enable)
+				struct jump_entry *stop)
 {
-	for (; (entry < stop) &&
-	      (entry->key == (jump_label_t)(unsigned long)key);
-	      entry++) {
+	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
 		/*
 		 * entry->code set to 0 invalidates module init text sections
 		 * kernel_text_address() verifies we are not in core kernel
 		 * init code, see jump_label_invalidate_module_init().
 		 */
 		if (entry->code && kernel_text_address(entry->code))
-			arch_jump_label_transform(entry, enable);
+			arch_jump_label_transform(entry, jump_label_type(entry));
 	}
 }
 
-static enum jump_label_type jump_label_type(struct static_key *key)
-{
-	bool true_branch = jump_label_get_branch_default(key);
-	bool state = static_key_enabled(key);
-
-	if ((!true_branch && state) || (true_branch && !state))
-		return JUMP_LABEL_ENABLE;
-
-	return JUMP_LABEL_DISABLE;
-}
-
 void __init jump_label_init(void)
 {
 	struct jump_entry *iter_start = __start___jump_table;
@@ -202,8 +211,11 @@
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		struct static_key *iterk;
 
-		iterk = (struct static_key *)(unsigned long)iter->key;
-		arch_jump_label_transform_static(iter, jump_label_type(iterk));
+		/* rewrite NOPs */
+		if (jump_label_type(iter) == JUMP_LABEL_NOP)
+			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
+
+		iterk = jump_entry_key(iter);
 		if (iterk == key)
 			continue;
 
@@ -222,6 +234,16 @@
 
 #ifdef CONFIG_MODULES
 
+static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
+{
+	struct static_key *key = jump_entry_key(entry);
+	bool type = static_key_type(key);
+	bool branch = jump_entry_branch(entry);
+
+	/* See the comment in linux/jump_label.h */
+	return type ^ branch;
+}
+
 struct static_key_mod {
 	struct static_key_mod *next;
 	struct jump_entry *entries;
@@ -243,17 +265,15 @@
 				start, end);
 }
 
-static void __jump_label_mod_update(struct static_key *key, int enable)
+static void __jump_label_mod_update(struct static_key *key)
 {
-	struct static_key_mod *mod = key->next;
+	struct static_key_mod *mod;
 
-	while (mod) {
+	for (mod = key->next; mod; mod = mod->next) {
 		struct module *m = mod->mod;
 
 		__jump_label_update(key, mod->entries,
-				    m->jump_entries + m->num_jump_entries,
-				    enable);
-		mod = mod->next;
+				    m->jump_entries + m->num_jump_entries);
 	}
 }
 
@@ -276,7 +296,9 @@
 		return;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
+		/* Only write NOPs for arch_branch_static(). */
+		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
+			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
 	}
 }
 
@@ -297,7 +319,7 @@
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		struct static_key *iterk;
 
-		iterk = (struct static_key *)(unsigned long)iter->key;
+		iterk = jump_entry_key(iter);
 		if (iterk == key)
 			continue;
 
@@ -318,8 +340,9 @@
 		jlm->next = key->next;
 		key->next = jlm;
 
-		if (jump_label_type(key) == JUMP_LABEL_ENABLE)
-			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
+		/* Only update if we've changed from our initial state */
+		if (jump_label_type(iter) != jump_label_init_type(iter))
+			__jump_label_update(key, iter, iter_stop);
 	}
 
 	return 0;
@@ -334,10 +357,10 @@
 	struct static_key_mod *jlm, **prev;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		if (iter->key == (jump_label_t)(unsigned long)key)
+		if (jump_entry_key(iter) == key)
 			continue;
 
-		key = (struct static_key *)(unsigned long)iter->key;
+		key = jump_entry_key(iter);
 
 		if (within_module(iter->key, mod))
 			continue;
@@ -439,14 +462,14 @@
 	return ret;
 }
 
-static void jump_label_update(struct static_key *key, int enable)
+static void jump_label_update(struct static_key *key)
 {
 	struct jump_entry *stop = __stop___jump_table;
-	struct jump_entry *entry = jump_label_get_entries(key);
+	struct jump_entry *entry = static_key_entries(key);
 #ifdef CONFIG_MODULES
 	struct module *mod;
 
-	__jump_label_mod_update(key, enable);
+	__jump_label_mod_update(key);
 
 	preempt_disable();
 	mod = __module_address((unsigned long)key);
@@ -456,7 +479,44 @@
 #endif
 	/* if there are no users, entry can be NULL */
 	if (entry)
-		__jump_label_update(key, entry, stop, enable);
+		__jump_label_update(key, entry, stop);
 }
 
-#endif
+#ifdef CONFIG_STATIC_KEYS_SELFTEST
+static DEFINE_STATIC_KEY_TRUE(sk_true);
+static DEFINE_STATIC_KEY_FALSE(sk_false);
+
+static __init int jump_label_test(void)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		WARN_ON(static_key_enabled(&sk_true.key) != true);
+		WARN_ON(static_key_enabled(&sk_false.key) != false);
+
+		WARN_ON(!static_branch_likely(&sk_true));
+		WARN_ON(!static_branch_unlikely(&sk_true));
+		WARN_ON(static_branch_likely(&sk_false));
+		WARN_ON(static_branch_unlikely(&sk_false));
+
+		static_branch_disable(&sk_true);
+		static_branch_enable(&sk_false);
+
+		WARN_ON(static_key_enabled(&sk_true.key) == true);
+		WARN_ON(static_key_enabled(&sk_false.key) == false);
+
+		WARN_ON(static_branch_likely(&sk_true));
+		WARN_ON(static_branch_unlikely(&sk_true));
+		WARN_ON(!static_branch_likely(&sk_false));
+		WARN_ON(!static_branch_unlikely(&sk_false));
+
+		static_branch_enable(&sk_true);
+		static_branch_disable(&sk_false);
+	}
+
+	return 0;
+}
+late_initcall(jump_label_test);
+#endif /* STATIC_KEYS_SELFTEST */
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 490924c..9ff173d 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -248,15 +248,16 @@
  * kthread_create_on_node - create a kthread.
  * @threadfn: the function to run until signal_pending(current).
  * @data: data ptr for @threadfn.
- * @node: memory node number.
+ * @node: task and thread structures for the thread are allocated on this node
  * @namefmt: printf-style name for the thread.
  *
  * Description: This helper function creates and names a kernel
  * thread.  The thread will be stopped: use wake_up_process() to start
- * it.  See also kthread_run().
+ * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
+ * is affine to all CPUs.
  *
  * If thread is going to be bound on a particular cpu, give its node
- * in @node, to get NUMA affinity for kthread stack, or else give -1.
+ * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
  * When woken, the thread will run @threadfn() with @data as its
  * argument. @threadfn() can either call do_exit() directly if it is a
  * standalone thread for which no one will call kthread_stop(), or
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 7dd5c99..8e96f6c 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -1,5 +1,5 @@
 
-obj-y += mutex.o semaphore.o rwsem.o
+obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
 
 ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
@@ -20,11 +20,9 @@
 obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
-obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
-obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 652a8ee..f325672 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -88,6 +88,19 @@
 	__up_read(&brw->rw_sem);
 }
 
+int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
+{
+	if (unlikely(!update_fast_ctr(brw, +1))) {
+		if (!__down_read_trylock(&brw->rw_sem))
+			return 0;
+		atomic_inc(&brw->slow_read_ctr);
+		__up_read(&brw->rw_sem);
+	}
+
+	rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
+	return 1;
+}
+
 void percpu_up_read(struct percpu_rw_semaphore *brw)
 {
 	rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 6c5da483..f17a3e3 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -55,27 +55,29 @@
 {
 	while ((cnts & _QW_WMASK) == _QW_LOCKED) {
 		cpu_relax_lowlatency();
-		cnts = smp_load_acquire((u32 *)&lock->cnts);
+		cnts = atomic_read_acquire(&lock->cnts);
 	}
 }
 
 /**
- * queue_read_lock_slowpath - acquire read lock of a queue rwlock
+ * queued_read_lock_slowpath - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
+ * @cnts: Current qrwlock lock value
  */
-void queue_read_lock_slowpath(struct qrwlock *lock)
+void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
 {
-	u32 cnts;
-
 	/*
 	 * Readers come here when they cannot get the lock without waiting
 	 */
 	if (unlikely(in_interrupt())) {
 		/*
-		 * Readers in interrupt context will spin until the lock is
-		 * available without waiting in the queue.
+		 * Readers in interrupt context will get the lock immediately
+		 * if the writer is just waiting (not holding the lock yet).
+		 * The rspin_until_writer_unlock() function returns immediately
+		 * in this case. Otherwise, they will spin (with ACQUIRE
+		 * semantics) until the lock is available without waiting in
+		 * the queue.
 		 */
-		cnts = smp_load_acquire((u32 *)&lock->cnts);
 		rspin_until_writer_unlock(lock, cnts);
 		return;
 	}
@@ -87,16 +89,11 @@
 	arch_spin_lock(&lock->lock);
 
 	/*
-	 * At the head of the wait queue now, wait until the writer state
-	 * goes to 0 and then try to increment the reader count and get
-	 * the lock. It is possible that an incoming writer may steal the
-	 * lock in the interim, so it is necessary to check the writer byte
-	 * to make sure that the write lock isn't taken.
+	 * The ACQUIRE semantics of the following spinning code ensure
+	 * that accesses can't leak upwards out of our subsequent critical
+	 * section in the case that the lock is currently held for write.
 	 */
-	while (atomic_read(&lock->cnts) & _QW_WMASK)
-		cpu_relax_lowlatency();
-
-	cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
+	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
 	rspin_until_writer_unlock(lock, cnts);
 
 	/*
@@ -104,13 +101,13 @@
 	 */
 	arch_spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(queue_read_lock_slowpath);
+EXPORT_SYMBOL(queued_read_lock_slowpath);
 
 /**
- * queue_write_lock_slowpath - acquire write lock of a queue rwlock
+ * queued_write_lock_slowpath - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-void queue_write_lock_slowpath(struct qrwlock *lock)
+void queued_write_lock_slowpath(struct qrwlock *lock)
 {
 	u32 cnts;
 
@@ -119,7 +116,7 @@
 
 	/* Try to acquire the lock directly if no reader is present */
 	if (!atomic_read(&lock->cnts) &&
-	    (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0))
+	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
 		goto unlock;
 
 	/*
@@ -130,7 +127,7 @@
 		struct __qrwlock *l = (struct __qrwlock *)lock;
 
 		if (!READ_ONCE(l->wmode) &&
-		   (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0))
+		   (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
 			break;
 
 		cpu_relax_lowlatency();
@@ -140,8 +137,8 @@
 	for (;;) {
 		cnts = atomic_read(&lock->cnts);
 		if ((cnts == _QW_WAITING) &&
-		    (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
-				    _QW_LOCKED) == _QW_WAITING))
+		    (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
+					    _QW_LOCKED) == _QW_WAITING))
 			break;
 
 		cpu_relax_lowlatency();
@@ -149,4 +146,4 @@
 unlock:
 	arch_spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(queue_write_lock_slowpath);
+EXPORT_SYMBOL(queued_write_lock_slowpath);
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 38c4920..337c881 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -239,8 +239,8 @@
 
 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-
+static __always_inline void __pv_kick_node(struct qspinlock *lock,
+					   struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_head(struct qspinlock *lock,
 					   struct mcs_spinlock *node) { }
 
@@ -440,7 +440,7 @@
 		cpu_relax();
 
 	arch_mcs_spin_unlock_contended(&next->locked);
-	pv_kick_node(next);
+	pv_kick_node(lock, next);
 
 release:
 	/*
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index df19ae4..c8e6e9a 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -22,9 +22,14 @@
 
 #define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
 
+/*
+ * Queue node uses: vcpu_running & vcpu_halted.
+ * Queue head uses: vcpu_running & vcpu_hashed.
+ */
 enum vcpu_state {
 	vcpu_running = 0,
-	vcpu_halted,
+	vcpu_halted,		/* Used only in pv_wait_node */
+	vcpu_hashed,		/* = pv_hash'ed + vcpu_halted */
 };
 
 struct pv_node {
@@ -153,7 +158,8 @@
 
 /*
  * Wait for node->locked to become true, halt the vcpu after a short spin.
- * pv_kick_node() is used to wake the vcpu again.
+ * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
+ * behalf.
  */
 static void pv_wait_node(struct mcs_spinlock *node)
 {
@@ -172,9 +178,9 @@
 		 *
 		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
 		 *     MB			      MB
-		 * [L] pn->locked		[RmW] pn->state = vcpu_running
+		 * [L] pn->locked		[RmW] pn->state = vcpu_hashed
 		 *
-		 * Matches the xchg() from pv_kick_node().
+		 * Matches the cmpxchg() from pv_kick_node().
 		 */
 		smp_store_mb(pn->state, vcpu_halted);
 
@@ -182,9 +188,10 @@
 			pv_wait(&pn->state, vcpu_halted);
 
 		/*
-		 * Reset the vCPU state to avoid unncessary CPU kicking
+		 * If pv_kick_node() changed us to vcpu_hashed, retain that value
+		 * so that pv_wait_head() knows to not also try to hash this lock.
 		 */
-		WRITE_ONCE(pn->state, vcpu_running);
+		cmpxchg(&pn->state, vcpu_halted, vcpu_running);
 
 		/*
 		 * If the locked flag is still not set after wakeup, it is a
@@ -194,6 +201,7 @@
 		 * MCS lock will be released soon.
 		 */
 	}
+
 	/*
 	 * By now our node->locked should be 1 and our caller will not actually
 	 * spin-wait for it. We do however rely on our caller to do a
@@ -202,24 +210,35 @@
 }
 
 /*
- * Called after setting next->locked = 1, used to wake those stuck in
- * pv_wait_node().
+ * Called after setting next->locked = 1 when we're the lock owner.
+ *
+ * Instead of waking the waiters stuck in pv_wait_node() advance their state such
+ * that they're waiting in pv_wait_head(), this avoids a wake/sleep cycle.
  */
-static void pv_kick_node(struct mcs_spinlock *node)
+static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
 {
 	struct pv_node *pn = (struct pv_node *)node;
+	struct __qspinlock *l = (void *)lock;
 
 	/*
-	 * Note that because node->locked is already set, this actual
-	 * mcs_spinlock entry could be re-used already.
+	 * If the vCPU is indeed halted, advance its state to match that of
+	 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
+	 * observe its next->locked value and advance itself.
 	 *
-	 * This should be fine however, kicking people for no reason is
-	 * harmless.
-	 *
-	 * See the comment in pv_wait_node().
+	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
 	 */
-	if (xchg(&pn->state, vcpu_running) == vcpu_halted)
-		pv_kick(pn->cpu);
+	if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
+		return;
+
+	/*
+	 * Put the lock into the hash table and set the _Q_SLOW_VAL.
+	 *
+	 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
+	 * the hash table later on at unlock time, no atomic instruction is
+	 * needed.
+	 */
+	WRITE_ONCE(l->locked, _Q_SLOW_VAL);
+	(void)pv_hash(lock, pn);
 }
 
 /*
@@ -233,6 +252,13 @@
 	struct qspinlock **lp = NULL;
 	int loop;
 
+	/*
+	 * If pv_kick_node() already advanced our state, we don't need to
+	 * insert ourselves into the hash table anymore.
+	 */
+	if (READ_ONCE(pn->state) == vcpu_hashed)
+		lp = (struct qspinlock **)1;
+
 	for (;;) {
 		for (loop = SPIN_THRESHOLD; loop; loop--) {
 			if (!READ_ONCE(l->locked))
@@ -240,17 +266,22 @@
 			cpu_relax();
 		}
 
-		WRITE_ONCE(pn->state, vcpu_halted);
 		if (!lp) { /* ONCE */
+			WRITE_ONCE(pn->state, vcpu_hashed);
 			lp = pv_hash(lock, pn);
+
 			/*
-			 * lp must be set before setting _Q_SLOW_VAL
+			 * We must hash before setting _Q_SLOW_VAL, such that
+			 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
+			 * we'll be sure to be able to observe our hash entry.
 			 *
-			 * [S] lp = lock                [RmW] l = l->locked = 0
-			 *     MB                             MB
-			 * [S] l->locked = _Q_SLOW_VAL  [L]   lp
+			 *   [S] pn->state
+			 *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
+			 *       MB                           RMB
+			 * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
+			 *                                [L] pn->state
 			 *
-			 * Matches the cmpxchg() in __pv_queued_spin_unlock().
+			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
 			 */
 			if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
 				/*
@@ -287,24 +318,34 @@
 {
 	struct __qspinlock *l = (void *)lock;
 	struct pv_node *node;
-	u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
+	u8 locked;
 
 	/*
 	 * We must not unlock if SLOW, because in that case we must first
 	 * unhash. Otherwise it would be possible to have multiple @lock
 	 * entries, which would be BAD.
 	 */
-	if (likely(lockval == _Q_LOCKED_VAL))
+	locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
+	if (likely(locked == _Q_LOCKED_VAL))
 		return;
 
-	if (unlikely(lockval != _Q_SLOW_VAL)) {
-		if (debug_locks_silent)
-			return;
-		WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+	if (unlikely(locked != _Q_SLOW_VAL)) {
+		WARN(!debug_locks_silent,
+		     "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
+		     (unsigned long)lock, atomic_read(&lock->val));
 		return;
 	}
 
 	/*
+	 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
+	 * so we need a barrier to order the read of the node data in
+	 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
+	 *
+	 * Matches the cmpxchg() in pv_wait_head() setting _Q_SLOW_VAL.
+	 */
+	smp_rmb();
+
+	/*
 	 * Since the above failed to release, this must be the SLOW path.
 	 * Therefore start by looking up the blocked node and unhashing it.
 	 */
@@ -319,8 +360,11 @@
 	/*
 	 * At this point the memory pointed at by lock can be freed/reused,
 	 * however we can still use the pv_node to kick the CPU.
+	 * The other vCPU may not really be halted, but kicking an active
+	 * vCPU is harmless other than the additional latency in completing
+	 * the unlock.
 	 */
-	if (READ_ONCE(node->state) == vcpu_halted)
+	if (READ_ONCE(node->state) == vcpu_hashed)
 		pv_kick(node->cpu);
 }
 /*
diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
deleted file mode 100644
index 1d96dd0..0000000
--- a/kernel/locking/rtmutex-tester.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * RT-Mutex-tester: scriptable tester for rt mutexes
- *
- * started by Thomas Gleixner:
- *
- *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- *
- */
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/sched/rt.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/freezer.h>
-#include <linux/stat.h>
-
-#include "rtmutex.h"
-
-#define MAX_RT_TEST_THREADS	8
-#define MAX_RT_TEST_MUTEXES	8
-
-static spinlock_t rttest_lock;
-static atomic_t rttest_event;
-
-struct test_thread_data {
-	int			opcode;
-	int			opdata;
-	int			mutexes[MAX_RT_TEST_MUTEXES];
-	int			event;
-	struct device		dev;
-};
-
-static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
-static struct task_struct *threads[MAX_RT_TEST_THREADS];
-static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
-
-enum test_opcodes {
-	RTTEST_NOP = 0,
-	RTTEST_SCHEDOT,		/* 1 Sched other, data = nice */
-	RTTEST_SCHEDRT,		/* 2 Sched fifo, data = prio */
-	RTTEST_LOCK,		/* 3 Lock uninterruptible, data = lockindex */
-	RTTEST_LOCKNOWAIT,	/* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
-	RTTEST_LOCKINT,		/* 5 Lock interruptible, data = lockindex */
-	RTTEST_LOCKINTNOWAIT,	/* 6 Lock interruptible no wait in wakeup, data = lockindex */
-	RTTEST_LOCKCONT,	/* 7 Continue locking after the wakeup delay */
-	RTTEST_UNLOCK,		/* 8 Unlock, data = lockindex */
-	/* 9, 10 - reserved for BKL commemoration */
-	RTTEST_SIGNAL = 11,	/* 11 Signal other test thread, data = thread id */
-	RTTEST_RESETEVENT = 98,	/* 98 Reset event counter */
-	RTTEST_RESET = 99,	/* 99 Reset all pending operations */
-};
-
-static int handle_op(struct test_thread_data *td, int lockwakeup)
-{
-	int i, id, ret = -EINVAL;
-
-	switch(td->opcode) {
-
-	case RTTEST_NOP:
-		return 0;
-
-	case RTTEST_LOCKCONT:
-		td->mutexes[td->opdata] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		return 0;
-
-	case RTTEST_RESET:
-		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
-			if (td->mutexes[i] == 4) {
-				rt_mutex_unlock(&mutexes[i]);
-				td->mutexes[i] = 0;
-			}
-		}
-		return 0;
-
-	case RTTEST_RESETEVENT:
-		atomic_set(&rttest_event, 0);
-		return 0;
-
-	default:
-		if (lockwakeup)
-			return ret;
-	}
-
-	switch(td->opcode) {
-
-	case RTTEST_LOCK:
-	case RTTEST_LOCKNOWAIT:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
-			return ret;
-
-		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		rt_mutex_lock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = 4;
-		return 0;
-
-	case RTTEST_LOCKINT:
-	case RTTEST_LOCKINTNOWAIT:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
-			return ret;
-
-		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = ret ? 0 : 4;
-		return ret ? -EINTR : 0;
-
-	case RTTEST_UNLOCK:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
-			return ret;
-
-		td->event = atomic_add_return(1, &rttest_event);
-		rt_mutex_unlock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = 0;
-		return 0;
-
-	default:
-		break;
-	}
-	return ret;
-}
-
-/*
- * Schedule replacement for rtsem_down(). Only called for threads with
- * PF_MUTEX_TESTER set.
- *
- * This allows us to have finegrained control over the event flow.
- *
- */
-void schedule_rt_mutex_test(struct rt_mutex *mutex)
-{
-	int tid, op, dat;
-	struct test_thread_data *td;
-
-	/* We have to lookup the task */
-	for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
-		if (threads[tid] == current)
-			break;
-	}
-
-	BUG_ON(tid == MAX_RT_TEST_THREADS);
-
-	td = &thread_data[tid];
-
-	op = td->opcode;
-	dat = td->opdata;
-
-	switch (op) {
-	case RTTEST_LOCK:
-	case RTTEST_LOCKINT:
-	case RTTEST_LOCKNOWAIT:
-	case RTTEST_LOCKINTNOWAIT:
-		if (mutex != &mutexes[dat])
-			break;
-
-		if (td->mutexes[dat] != 1)
-			break;
-
-		td->mutexes[dat] = 2;
-		td->event = atomic_add_return(1, &rttest_event);
-		break;
-
-	default:
-		break;
-	}
-
-	schedule();
-
-
-	switch (op) {
-	case RTTEST_LOCK:
-	case RTTEST_LOCKINT:
-		if (mutex != &mutexes[dat])
-			return;
-
-		if (td->mutexes[dat] != 2)
-			return;
-
-		td->mutexes[dat] = 3;
-		td->event = atomic_add_return(1, &rttest_event);
-		break;
-
-	case RTTEST_LOCKNOWAIT:
-	case RTTEST_LOCKINTNOWAIT:
-		if (mutex != &mutexes[dat])
-			return;
-
-		if (td->mutexes[dat] != 2)
-			return;
-
-		td->mutexes[dat] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		return;
-
-	default:
-		return;
-	}
-
-	td->opcode = 0;
-
-	for (;;) {
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		if (td->opcode > 0) {
-			int ret;
-
-			set_current_state(TASK_RUNNING);
-			ret = handle_op(td, 1);
-			set_current_state(TASK_INTERRUPTIBLE);
-			if (td->opcode == RTTEST_LOCKCONT)
-				break;
-			td->opcode = ret;
-		}
-
-		/* Wait for the next command to be executed */
-		schedule();
-	}
-
-	/* Restore previous command and data */
-	td->opcode = op;
-	td->opdata = dat;
-}
-
-static int test_func(void *data)
-{
-	struct test_thread_data *td = data;
-	int ret;
-
-	current->flags |= PF_MUTEX_TESTER;
-	set_freezable();
-	allow_signal(SIGHUP);
-
-	for(;;) {
-
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		if (td->opcode > 0) {
-			set_current_state(TASK_RUNNING);
-			ret = handle_op(td, 0);
-			set_current_state(TASK_INTERRUPTIBLE);
-			td->opcode = ret;
-		}
-
-		/* Wait for the next command to be executed */
-		schedule();
-		try_to_freeze();
-
-		if (signal_pending(current))
-			flush_signals(current);
-
-		if(kthread_should_stop())
-			break;
-	}
-	return 0;
-}
-
-/**
- * sysfs_test_command - interface for test commands
- * @dev:	thread reference
- * @buf:	command for actual step
- * @count:	length of buffer
- *
- * command syntax:
- *
- * opcode:data
- */
-static ssize_t sysfs_test_command(struct device *dev, struct device_attribute *attr,
-				  const char *buf, size_t count)
-{
-	struct sched_param schedpar;
-	struct test_thread_data *td;
-	char cmdbuf[32];
-	int op, dat, tid, ret;
-
-	td = container_of(dev, struct test_thread_data, dev);
-	tid = td->dev.id;
-
-	/* strings from sysfs write are not 0 terminated! */
-	if (count >= sizeof(cmdbuf))
-		return -EINVAL;
-
-	/* strip of \n: */
-	if (buf[count-1] == '\n')
-		count--;
-	if (count < 1)
-		return -EINVAL;
-
-	memcpy(cmdbuf, buf, count);
-	cmdbuf[count] = 0;
-
-	if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
-		return -EINVAL;
-
-	switch (op) {
-	case RTTEST_SCHEDOT:
-		schedpar.sched_priority = 0;
-		ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
-		if (ret)
-			return ret;
-		set_user_nice(current, 0);
-		break;
-
-	case RTTEST_SCHEDRT:
-		schedpar.sched_priority = dat;
-		ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
-		if (ret)
-			return ret;
-		break;
-
-	case RTTEST_SIGNAL:
-		send_sig(SIGHUP, threads[tid], 0);
-		break;
-
-	default:
-		if (td->opcode > 0)
-			return -EBUSY;
-		td->opdata = dat;
-		td->opcode = op;
-		wake_up_process(threads[tid]);
-	}
-
-	return count;
-}
-
-/**
- * sysfs_test_status - sysfs interface for rt tester
- * @dev:	thread to query
- * @buf:	char buffer to be filled with thread status info
- */
-static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *attr,
-				 char *buf)
-{
-	struct test_thread_data *td;
-	struct task_struct *tsk;
-	char *curr = buf;
-	int i;
-
-	td = container_of(dev, struct test_thread_data, dev);
-	tsk = threads[td->dev.id];
-
-	spin_lock(&rttest_lock);
-
-	curr += sprintf(curr,
-		"O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
-		td->opcode, td->event, tsk->state,
-			(MAX_RT_PRIO - 1) - tsk->prio,
-			(MAX_RT_PRIO - 1) - tsk->normal_prio,
-		tsk->pi_blocked_on);
-
-	for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
-		curr += sprintf(curr, "%d", td->mutexes[i]);
-
-	spin_unlock(&rttest_lock);
-
-	curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
-			mutexes[td->dev.id].owner);
-
-	return curr - buf;
-}
-
-static DEVICE_ATTR(status, S_IRUSR, sysfs_test_status, NULL);
-static DEVICE_ATTR(command, S_IWUSR, NULL, sysfs_test_command);
-
-static struct bus_type rttest_subsys = {
-	.name = "rttest",
-	.dev_name = "rttest",
-};
-
-static int init_test_thread(int id)
-{
-	thread_data[id].dev.bus = &rttest_subsys;
-	thread_data[id].dev.id = id;
-
-	threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
-	if (IS_ERR(threads[id]))
-		return PTR_ERR(threads[id]);
-
-	return device_register(&thread_data[id].dev);
-}
-
-static int init_rttest(void)
-{
-	int ret, i;
-
-	spin_lock_init(&rttest_lock);
-
-	for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
-		rt_mutex_init(&mutexes[i]);
-
-	ret = subsys_system_register(&rttest_subsys, NULL);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
-		ret = init_test_thread(i);
-		if (ret)
-			break;
-		ret = device_create_file(&thread_data[i].dev, &dev_attr_status);
-		if (ret)
-			break;
-		ret = device_create_file(&thread_data[i].dev, &dev_attr_command);
-		if (ret)
-			break;
-	}
-
-	printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
-
-	return ret;
-}
-
-device_initcall(init_rttest);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 5674b07..7781d80 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1120,7 +1120,7 @@
 
 		debug_rt_mutex_print_deadlock(waiter);
 
-		schedule_rt_mutex(lock);
+		schedule();
 
 		raw_spin_lock(&lock->wait_lock);
 		set_current_state(state);
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 7844f8f..4f5f83c 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -15,28 +15,6 @@
 #include <linux/rtmutex.h>
 
 /*
- * The rtmutex in kernel tester is independent of rtmutex debugging. We
- * call schedule_rt_mutex_test() instead of schedule() for the tasks which
- * belong to the tester. That way we can delay the wakeup path of those
- * threads to provoke lock stealing and testing of  complex boosting scenarios.
- */
-#ifdef CONFIG_RT_MUTEX_TESTER
-
-extern void schedule_rt_mutex_test(struct rt_mutex *lock);
-
-#define schedule_rt_mutex(_lock)				\
-  do {								\
-	if (!(current->flags & PF_MUTEX_TESTER))		\
-		schedule();					\
-	else							\
-		schedule_rt_mutex_test(_lock);			\
-  } while (0)
-
-#else
-# define schedule_rt_mutex(_lock)			schedule()
-#endif
-
-/*
  * This is the control structure for tasks blocked on a rt_mutex,
  * which is allocated on the kernel stack on of the blocked task.
  *
diff --git a/kernel/memremap.c b/kernel/memremap.c
new file mode 100644
index 0000000..72b0c66
--- /dev/null
+++ b/kernel/memremap.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/memory_hotplug.h>
+
+#ifndef ioremap_cache
+/* temporary while we convert existing ioremap_cache users to memremap */
+__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
+{
+	return ioremap(offset, size);
+}
+#endif
+
+/**
+ * memremap() - remap an iomem_resource as cacheable memory
+ * @offset: iomem resource start address
+ * @size: size of remap
+ * @flags: either MEMREMAP_WB or MEMREMAP_WT
+ *
+ * memremap() is "ioremap" for cases where it is known that the resource
+ * being mapped does not have i/o side effects and the __iomem
+ * annotation is not applicable.
+ *
+ * MEMREMAP_WB - matches the default mapping for "System RAM" on
+ * the architecture.  This is usually a read-allocate write-back cache.
+ * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
+ * memremap() will bypass establishing a new mapping and instead return
+ * a pointer into the direct map.
+ *
+ * MEMREMAP_WT - establish a mapping whereby writes either bypass the
+ * cache or are written through to memory and never exist in a
+ * cache-dirty state with respect to program visibility.  Attempts to
+ * map "System RAM" with this mapping type will fail.
+ */
+void *memremap(resource_size_t offset, size_t size, unsigned long flags)
+{
+	int is_ram = region_intersects(offset, size, "System RAM");
+	void *addr = NULL;
+
+	if (is_ram == REGION_MIXED) {
+		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
+				&offset, (unsigned long) size);
+		return NULL;
+	}
+
+	/* Try all mapping types requested until one returns non-NULL */
+	if (flags & MEMREMAP_WB) {
+		flags &= ~MEMREMAP_WB;
+		/*
+		 * MEMREMAP_WB is special in that it can be satisifed
+		 * from the direct map.  Some archs depend on the
+		 * capability of memremap() to autodetect cases where
+		 * the requested range is potentially in "System RAM"
+		 */
+		if (is_ram == REGION_INTERSECTS)
+			addr = __va(offset);
+		else
+			addr = ioremap_cache(offset, size);
+	}
+
+	/*
+	 * If we don't have a mapping yet and more request flags are
+	 * pending then we will be attempting to establish a new virtual
+	 * address mapping.  Enforce that this mapping is not aliasing
+	 * "System RAM"
+	 */
+	if (!addr && is_ram == REGION_INTERSECTS && flags) {
+		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
+				&offset, (unsigned long) size);
+		return NULL;
+	}
+
+	if (!addr && (flags & MEMREMAP_WT)) {
+		flags &= ~MEMREMAP_WT;
+		addr = ioremap_wt(offset, size);
+	}
+
+	return addr;
+}
+EXPORT_SYMBOL(memremap);
+
+void memunmap(void *addr)
+{
+	if (is_vmalloc_addr(addr))
+		iounmap((void __iomem *) addr);
+}
+EXPORT_SYMBOL(memunmap);
+
+static void devm_memremap_release(struct device *dev, void *res)
+{
+	memunmap(res);
+}
+
+static int devm_memremap_match(struct device *dev, void *res, void *match_data)
+{
+	return *(void **)res == match_data;
+}
+
+void *devm_memremap(struct device *dev, resource_size_t offset,
+		size_t size, unsigned long flags)
+{
+	void **ptr, *addr;
+
+	ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return NULL;
+
+	addr = memremap(offset, size, flags);
+	if (addr) {
+		*ptr = addr;
+		devres_add(dev, ptr);
+	} else
+		devres_free(ptr);
+
+	return addr;
+}
+EXPORT_SYMBOL(devm_memremap);
+
+void devm_memunmap(struct device *dev, void *addr)
+{
+	WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match,
+			       addr));
+	memunmap(addr);
+}
+EXPORT_SYMBOL(devm_memunmap);
+
+#ifdef CONFIG_ZONE_DEVICE
+struct page_map {
+	struct resource res;
+};
+
+static void devm_memremap_pages_release(struct device *dev, void *res)
+{
+	struct page_map *page_map = res;
+
+	/* pages are dead and unused, undo the arch mapping */
+	arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
+}
+
+void *devm_memremap_pages(struct device *dev, struct resource *res)
+{
+	int is_ram = region_intersects(res->start, resource_size(res),
+			"System RAM");
+	struct page_map *page_map;
+	int error, nid;
+
+	if (is_ram == REGION_MIXED) {
+		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
+				__func__, res);
+		return ERR_PTR(-ENXIO);
+	}
+
+	if (is_ram == REGION_INTERSECTS)
+		return __va(res->start);
+
+	page_map = devres_alloc(devm_memremap_pages_release,
+			sizeof(*page_map), GFP_KERNEL);
+	if (!page_map)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(&page_map->res, res, sizeof(*res));
+
+	nid = dev_to_node(dev);
+	if (nid < 0)
+		nid = 0;
+
+	error = arch_add_memory(nid, res->start, resource_size(res), true);
+	if (error) {
+		devres_free(page_map);
+		return ERR_PTR(error);
+	}
+
+	devres_add(dev, page_map);
+	return __va(res->start);
+}
+EXPORT_SYMBOL(devm_memremap_pages);
+#endif /* CONFIG_ZONE_DEVICE */
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index be5b8fa..bd62f5c 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -10,11 +10,8 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/err.h>
-#include <crypto/public_key.h>
-#include <crypto/hash.h>
-#include <keys/asymmetric-type.h>
 #include <keys/system_keyring.h>
+#include <crypto/public_key.h>
 #include "module-internal.h"
 
 /*
@@ -28,170 +25,22 @@
  *	- Information block
  */
 struct module_signature {
-	u8	algo;		/* Public-key crypto algorithm [enum pkey_algo] */
-	u8	hash;		/* Digest algorithm [enum hash_algo] */
-	u8	id_type;	/* Key identifier type [enum pkey_id_type] */
-	u8	signer_len;	/* Length of signer's name */
-	u8	key_id_len;	/* Length of key identifier */
+	u8	algo;		/* Public-key crypto algorithm [0] */
+	u8	hash;		/* Digest algorithm [0] */
+	u8	id_type;	/* Key identifier type [PKEY_ID_PKCS7] */
+	u8	signer_len;	/* Length of signer's name [0] */
+	u8	key_id_len;	/* Length of key identifier [0] */
 	u8	__pad[3];
 	__be32	sig_len;	/* Length of signature data */
 };
 
 /*
- * Digest the module contents.
- */
-static struct public_key_signature *mod_make_digest(enum hash_algo hash,
-						    const void *mod,
-						    unsigned long modlen)
-{
-	struct public_key_signature *pks;
-	struct crypto_shash *tfm;
-	struct shash_desc *desc;
-	size_t digest_size, desc_size;
-	int ret;
-
-	pr_devel("==>%s()\n", __func__);
-	
-	/* Allocate the hashing algorithm we're going to need and find out how
-	 * big the hash operational data will be.
-	 */
-	tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
-	if (IS_ERR(tfm))
-		return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm);
-
-	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
-	digest_size = crypto_shash_digestsize(tfm);
-
-	/* We allocate the hash operational data storage on the end of our
-	 * context data and the digest output buffer on the end of that.
-	 */
-	ret = -ENOMEM;
-	pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
-	if (!pks)
-		goto error_no_pks;
-
-	pks->pkey_hash_algo	= hash;
-	pks->digest		= (u8 *)pks + sizeof(*pks) + desc_size;
-	pks->digest_size	= digest_size;
-
-	desc = (void *)pks + sizeof(*pks);
-	desc->tfm   = tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	ret = crypto_shash_init(desc);
-	if (ret < 0)
-		goto error;
-
-	ret = crypto_shash_finup(desc, mod, modlen, pks->digest);
-	if (ret < 0)
-		goto error;
-
-	crypto_free_shash(tfm);
-	pr_devel("<==%s() = ok\n", __func__);
-	return pks;
-
-error:
-	kfree(pks);
-error_no_pks:
-	crypto_free_shash(tfm);
-	pr_devel("<==%s() = %d\n", __func__, ret);
-	return ERR_PTR(ret);
-}
-
-/*
- * Extract an MPI array from the signature data.  This represents the actual
- * signature.  Each raw MPI is prefaced by a BE 2-byte value indicating the
- * size of the MPI in bytes.
- *
- * RSA signatures only have one MPI, so currently we only read one.
- */
-static int mod_extract_mpi_array(struct public_key_signature *pks,
-				 const void *data, size_t len)
-{
-	size_t nbytes;
-	MPI mpi;
-
-	if (len < 3)
-		return -EBADMSG;
-	nbytes = ((const u8 *)data)[0] << 8 | ((const u8 *)data)[1];
-	data += 2;
-	len -= 2;
-	if (len != nbytes)
-		return -EBADMSG;
-
-	mpi = mpi_read_raw_data(data, nbytes);
-	if (!mpi)
-		return -ENOMEM;
-	pks->mpi[0] = mpi;
-	pks->nr_mpi = 1;
-	return 0;
-}
-
-/*
- * Request an asymmetric key.
- */
-static struct key *request_asymmetric_key(const char *signer, size_t signer_len,
-					  const u8 *key_id, size_t key_id_len)
-{
-	key_ref_t key;
-	size_t i;
-	char *id, *q;
-
-	pr_devel("==>%s(,%zu,,%zu)\n", __func__, signer_len, key_id_len);
-
-	/* Construct an identifier. */
-	id = kmalloc(signer_len + 2 + key_id_len * 2 + 1, GFP_KERNEL);
-	if (!id)
-		return ERR_PTR(-ENOKEY);
-
-	memcpy(id, signer, signer_len);
-
-	q = id + signer_len;
-	*q++ = ':';
-	*q++ = ' ';
-	for (i = 0; i < key_id_len; i++) {
-		*q++ = hex_asc[*key_id >> 4];
-		*q++ = hex_asc[*key_id++ & 0x0f];
-	}
-
-	*q = 0;
-
-	pr_debug("Look up: \"%s\"\n", id);
-
-	key = keyring_search(make_key_ref(system_trusted_keyring, 1),
-			     &key_type_asymmetric, id);
-	if (IS_ERR(key))
-		pr_warn("Request for unknown module key '%s' err %ld\n",
-			id, PTR_ERR(key));
-	kfree(id);
-
-	if (IS_ERR(key)) {
-		switch (PTR_ERR(key)) {
-			/* Hide some search errors */
-		case -EACCES:
-		case -ENOTDIR:
-		case -EAGAIN:
-			return ERR_PTR(-ENOKEY);
-		default:
-			return ERR_CAST(key);
-		}
-	}
-
-	pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key_ref_to_ptr(key)));
-	return key_ref_to_ptr(key);
-}
-
-/*
  * Verify the signature on a module.
  */
 int mod_verify_sig(const void *mod, unsigned long *_modlen)
 {
-	struct public_key_signature *pks;
 	struct module_signature ms;
-	struct key *key;
-	const void *sig;
 	size_t modlen = *_modlen, sig_len;
-	int ret;
 
 	pr_devel("==>%s(,%zu)\n", __func__, modlen);
 
@@ -205,46 +54,24 @@
 	if (sig_len >= modlen)
 		return -EBADMSG;
 	modlen -= sig_len;
-	if ((size_t)ms.signer_len + ms.key_id_len >= modlen)
-		return -EBADMSG;
-	modlen -= (size_t)ms.signer_len + ms.key_id_len;
-
 	*_modlen = modlen;
-	sig = mod + modlen;
 
-	/* For the moment, only support RSA and X.509 identifiers */
-	if (ms.algo != PKEY_ALGO_RSA ||
-	    ms.id_type != PKEY_ID_X509)
+	if (ms.id_type != PKEY_ID_PKCS7) {
+		pr_err("Module is not signed with expected PKCS#7 message\n");
 		return -ENOPKG;
-
-	if (ms.hash >= PKEY_HASH__LAST ||
-	    !hash_algo_name[ms.hash])
-		return -ENOPKG;
-
-	key = request_asymmetric_key(sig, ms.signer_len,
-				     sig + ms.signer_len, ms.key_id_len);
-	if (IS_ERR(key))
-		return PTR_ERR(key);
-
-	pks = mod_make_digest(ms.hash, mod, modlen);
-	if (IS_ERR(pks)) {
-		ret = PTR_ERR(pks);
-		goto error_put_key;
 	}
 
-	ret = mod_extract_mpi_array(pks, sig + ms.signer_len + ms.key_id_len,
-				    sig_len);
-	if (ret < 0)
-		goto error_free_pks;
+	if (ms.algo != 0 ||
+	    ms.hash != 0 ||
+	    ms.signer_len != 0 ||
+	    ms.key_id_len != 0 ||
+	    ms.__pad[0] != 0 ||
+	    ms.__pad[1] != 0 ||
+	    ms.__pad[2] != 0) {
+		pr_err("PKCS#7 signature info has unexpected non-zero params\n");
+		return -EBADMSG;
+	}
 
-	ret = verify_signature(key, pks);
-	pr_devel("verify_signature() = %d\n", ret);
-
-error_free_pks:
-	mpi_free(pks->rsa.s);
-	kfree(pks);
-error_put_key:
-	key_put(key);
-	pr_devel("<==%s() = %d\n", __func__, ret);
-	return ret;	
+	return system_verify_data(mod, modlen, mod + modlen, sig_len,
+				  VERIFYING_MODULE_SIGNATURE);
 }
diff --git a/kernel/profile.c b/kernel/profile.c
index a7bcd28..99513e1 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -339,7 +339,7 @@
 		node = cpu_to_mem(cpu);
 		per_cpu(cpu_profile_flip, cpu) = 0;
 		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
-			page = alloc_pages_exact_node(node,
+			page = __alloc_pages_node(node,
 					GFP_KERNEL | __GFP_ZERO,
 					0);
 			if (!page)
@@ -347,7 +347,7 @@
 			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
 		}
 		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
-			page = alloc_pages_exact_node(node,
+			page = __alloc_pages_node(node,
 					GFP_KERNEL | __GFP_ZERO,
 					0);
 			if (!page)
@@ -543,14 +543,14 @@
 		int node = cpu_to_mem(cpu);
 		struct page *page;
 
-		page = alloc_pages_exact_node(node,
+		page = __alloc_pages_node(node,
 				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
 				0);
 		if (!page)
 			goto out_cleanup;
 		per_cpu(cpu_profile_hits, cpu)[1]
 				= (struct profile_hit *)page_address(page);
-		page = alloc_pages_exact_node(node,
+		page = __alloc_pages_node(node,
 				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
 				0);
 		if (!page)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index c8e0e05..787320d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -556,6 +556,19 @@
 	if (data & ~(unsigned long)PTRACE_O_MASK)
 		return -EINVAL;
 
+	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
+		if (!config_enabled(CONFIG_CHECKPOINT_RESTORE) ||
+		    !config_enabled(CONFIG_SECCOMP))
+			return -EINVAL;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
+		    current->ptrace & PT_SUSPEND_SECCOMP)
+			return -EPERM;
+	}
+
 	/* Avoid intermediate state when all opts are cleared */
 	flags = child->ptrace;
 	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
diff --git a/kernel/resource.c b/kernel/resource.c
index fed052a..f150dbb 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -492,40 +492,51 @@
 }
 EXPORT_SYMBOL_GPL(page_is_ram);
 
-/*
- * Search for a resouce entry that fully contains the specified region.
- * If found, return 1 if it is RAM, 0 if not.
- * If not found, or region is not fully contained, return -1
+/**
+ * region_intersects() - determine intersection of region with known resources
+ * @start: region start address
+ * @size: size of region
+ * @name: name of resource (in iomem_resource)
  *
- * Used by the ioremap functions to ensure the user is not remapping RAM and is
- * a vast speed up over walking through the resource table page by page.
+ * Check if the specified region partially overlaps or fully eclipses a
+ * resource identified by @name.  Return REGION_DISJOINT if the region
+ * does not overlap @name, return REGION_MIXED if the region overlaps
+ * @type and another resource, and return REGION_INTERSECTS if the
+ * region overlaps @type and no other defined resource. Note, that
+ * REGION_INTERSECTS is also returned in the case when the specified
+ * region overlaps RAM and undefined memory holes.
+ *
+ * region_intersect() is used by memory remapping functions to ensure
+ * the user is not remapping RAM and is a vast speed up over walking
+ * through the resource table page by page.
  */
-int region_is_ram(resource_size_t start, unsigned long size)
+int region_intersects(resource_size_t start, size_t size, const char *name)
 {
-	struct resource *p;
-	resource_size_t end = start + size - 1;
 	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-	const char *name = "System RAM";
-	int ret = -1;
+	resource_size_t end = start + size - 1;
+	int type = 0; int other = 0;
+	struct resource *p;
 
 	read_lock(&resource_lock);
 	for (p = iomem_resource.child; p ; p = p->sibling) {
-		if (p->end < start)
-			continue;
+		bool is_type = strcmp(p->name, name) == 0 && p->flags == flags;
 
-		if (p->start <= start && end <= p->end) {
-			/* resource fully contains region */
-			if ((p->flags != flags) || strcmp(p->name, name))
-				ret = 0;
-			else
-				ret = 1;
-			break;
-		}
-		if (end < p->start)
-			break;	/* not found */
+		if (start >= p->start && start <= p->end)
+			is_type ? type++ : other++;
+		if (end >= p->start && end <= p->end)
+			is_type ? type++ : other++;
+		if (p->start >= start && p->end <= end)
+			is_type ? type++ : other++;
 	}
 	read_unlock(&resource_lock);
-	return ret;
+
+	if (other == 0)
+		return type ? REGION_INTERSECTS : REGION_DISJOINT;
+
+	if (type)
+		return REGION_MIXED;
+
+	return REGION_DISJOINT;
 }
 
 void __weak arch_remove_reservations(struct resource *avail)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8420c2..3595403 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -164,14 +164,12 @@
 
 static void sched_feat_disable(int i)
 {
-	if (static_key_enabled(&sched_feat_keys[i]))
-		static_key_slow_dec(&sched_feat_keys[i]);
+	static_key_disable(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-	if (!static_key_enabled(&sched_feat_keys[i]))
-		static_key_slow_inc(&sched_feat_keys[i]);
+	static_key_enable(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 052e026..272d932 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -106,9 +106,10 @@
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked);
 
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
+			  void *key)
 {
-	__wake_up_common(q, mode, 1, 0, key);
+	__wake_up_common(q, mode, nr, 0, key);
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 
@@ -283,7 +284,7 @@
 	if (!list_empty(&wait->task_list))
 		list_del_init(&wait->task_list);
 	else if (waitqueue_active(q))
-		__wake_up_locked_key(q, mode, key);
+		__wake_up_locked_key(q, mode, 1, key);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL(abort_exclusive_wait);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 245df6b..5bd4779 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -175,17 +175,16 @@
  */
 static u32 seccomp_run_filters(struct seccomp_data *sd)
 {
-	struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter);
 	struct seccomp_data sd_local;
 	u32 ret = SECCOMP_RET_ALLOW;
+	/* Make sure cross-thread synced filter points somewhere sane. */
+	struct seccomp_filter *f =
+			lockless_dereference(current->seccomp.filter);
 
 	/* Ensure unexpected behavior doesn't result in failing open. */
 	if (unlikely(WARN_ON(f == NULL)))
 		return SECCOMP_RET_KILL;
 
-	/* Make sure cross-thread synced filter points somewhere sane. */
-	smp_read_barrier_depends();
-
 	if (!sd) {
 		populate_seccomp_data(&sd_local);
 		sd = &sd_local;
@@ -549,7 +548,11 @@
 {
 	int mode = current->seccomp.mode;
 
-	if (mode == 0)
+	if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
+	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
+		return;
+
+	if (mode == SECCOMP_MODE_DISABLED)
 		return;
 	else if (mode == SECCOMP_MODE_STRICT)
 		__secure_computing_strict(this_syscall);
@@ -650,6 +653,10 @@
 	int this_syscall = sd ? sd->nr :
 		syscall_get_nr(current, task_pt_regs(current));
 
+	if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
+	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
+		return SECCOMP_PHASE1_OK;
+
 	switch (mode) {
 	case SECCOMP_MODE_STRICT:
 		__secure_computing_strict(this_syscall);  /* may call do_exit */
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 7c434c3..a818cbc 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -113,7 +113,8 @@
 		if (kthread_should_stop()) {
 			__set_current_state(TASK_RUNNING);
 			preempt_enable();
-			if (ht->cleanup)
+			/* cleanup must mirror setup */
+			if (ht->cleanup && td->status != HP_THREAD_NONE)
 				ht->cleanup(td->cpu, cpu_online(td->cpu));
 			kfree(td);
 			return 0;
@@ -259,15 +260,6 @@
 {
 	unsigned int cpu;
 
-	/* Unpark any threads that were voluntarily parked. */
-	for_each_cpu_not(cpu, ht->cpumask) {
-		if (cpu_online(cpu)) {
-			struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
-			if (tsk)
-				kthread_unpark(tsk);
-		}
-	}
-
 	/* We need to destroy also the parked threads of offline cpus */
 	for_each_possible_cpu(cpu) {
 		struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
@@ -281,19 +273,22 @@
 }
 
 /**
- * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
+ * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
+ * 					    to hotplug
  * @plug_thread:	Hotplug thread descriptor
+ * @cpumask:		The cpumask where threads run
  *
  * Creates and starts the threads on all online cpus.
  */
-int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
+					   const struct cpumask *cpumask)
 {
 	unsigned int cpu;
 	int ret = 0;
 
 	if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
 		return -ENOMEM;
-	cpumask_copy(plug_thread->cpumask, cpu_possible_mask);
+	cpumask_copy(plug_thread->cpumask, cpumask);
 
 	get_online_cpus();
 	mutex_lock(&smpboot_threads_lock);
@@ -301,9 +296,11 @@
 		ret = __smpboot_create_thread(plug_thread, cpu);
 		if (ret) {
 			smpboot_destroy_threads(plug_thread);
+			free_cpumask_var(plug_thread->cpumask);
 			goto out;
 		}
-		smpboot_unpark_thread(plug_thread, cpu);
+		if (cpumask_test_cpu(cpu, cpumask))
+			smpboot_unpark_thread(plug_thread, cpu);
 	}
 	list_add(&plug_thread->list, &hotplug_threads);
 out:
@@ -311,7 +308,7 @@
 	put_online_cpus();
 	return ret;
 }
-EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
+EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
 
 /**
  * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index ca7d84f..03c3875 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -219,6 +219,7 @@
 cond_syscall(sys_eventfd);
 cond_syscall(sys_eventfd2);
 cond_syscall(sys_memfd_create);
+cond_syscall(sys_userfaultfd);
 
 /* performance counters: */
 cond_syscall(sys_perf_event_open);
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
deleted file mode 100644
index 3e9868d47..0000000
--- a/kernel/system_certificates.S
+++ /dev/null
@@ -1,20 +0,0 @@
-#include <linux/export.h>
-#include <linux/init.h>
-
-	__INITRODATA
-
-	.align 8
-	.globl VMLINUX_SYMBOL(system_certificate_list)
-VMLINUX_SYMBOL(system_certificate_list):
-__cert_list_start:
-	.incbin "kernel/x509_certificate_list"
-__cert_list_end:
-
-	.align 8
-	.globl VMLINUX_SYMBOL(system_certificate_list_size)
-VMLINUX_SYMBOL(system_certificate_list_size):
-#ifdef CONFIG_64BIT
-	.quad __cert_list_end - __cert_list_start
-#else
-	.long __cert_list_end - __cert_list_start
-#endif
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
deleted file mode 100644
index 875f64e..0000000
--- a/kernel/system_keyring.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/* System trusted keyring for trusted public keys
- *
- * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cred.h>
-#include <linux/err.h>
-#include <keys/asymmetric-type.h>
-#include <keys/system_keyring.h>
-#include "module-internal.h"
-
-struct key *system_trusted_keyring;
-EXPORT_SYMBOL_GPL(system_trusted_keyring);
-
-extern __initconst const u8 system_certificate_list[];
-extern __initconst const unsigned long system_certificate_list_size;
-
-/*
- * Load the compiled-in keys
- */
-static __init int system_trusted_keyring_init(void)
-{
-	pr_notice("Initialise system trusted keyring\n");
-
-	system_trusted_keyring =
-		keyring_alloc(".system_keyring",
-			      KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
-			      ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
-			      KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
-			      KEY_ALLOC_NOT_IN_QUOTA, NULL);
-	if (IS_ERR(system_trusted_keyring))
-		panic("Can't allocate system trusted keyring\n");
-
-	set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags);
-	return 0;
-}
-
-/*
- * Must be initialised before we try and load the keys into the keyring.
- */
-device_initcall(system_trusted_keyring_init);
-
-/*
- * Load the compiled-in list of X.509 certificates.
- */
-static __init int load_system_certificate_list(void)
-{
-	key_ref_t key;
-	const u8 *p, *end;
-	size_t plen;
-
-	pr_notice("Loading compiled-in X.509 certificates\n");
-
-	p = system_certificate_list;
-	end = p + system_certificate_list_size;
-	while (p < end) {
-		/* Each cert begins with an ASN.1 SEQUENCE tag and must be more
-		 * than 256 bytes in size.
-		 */
-		if (end - p < 4)
-			goto dodgy_cert;
-		if (p[0] != 0x30 &&
-		    p[1] != 0x82)
-			goto dodgy_cert;
-		plen = (p[2] << 8) | p[3];
-		plen += 4;
-		if (plen > end - p)
-			goto dodgy_cert;
-
-		key = key_create_or_update(make_key_ref(system_trusted_keyring, 1),
-					   "asymmetric",
-					   NULL,
-					   p,
-					   plen,
-					   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
-					   KEY_USR_VIEW | KEY_USR_READ),
-					   KEY_ALLOC_NOT_IN_QUOTA |
-					   KEY_ALLOC_TRUSTED);
-		if (IS_ERR(key)) {
-			pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
-			       PTR_ERR(key));
-		} else {
-			set_bit(KEY_FLAG_BUILTIN, &key_ref_to_ptr(key)->flags);
-			pr_notice("Loaded X.509 cert '%s'\n",
-				  key_ref_to_ptr(key)->description);
-			key_ref_put(key);
-		}
-		p += plen;
-	}
-
-	return 0;
-
-dodgy_cert:
-	pr_err("Problem parsing in-kernel X.509 certificate list\n");
-	return 0;
-}
-late_initcall(load_system_certificate_list);
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 8727032..53fa971 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -18,6 +18,8 @@
  * This is like the signal handler which runs in kernel mode, but it doesn't
  * try to wake up the @task.
  *
+ * Note: there is no ordering guarantee on works queued here.
+ *
  * RETURNS:
  * 0 if succeeds or -ESRCH.
  */
@@ -108,16 +110,6 @@
 		raw_spin_unlock_wait(&task->pi_lock);
 		smp_mb();
 
-		/* Reverse the list to run the works in fifo order */
-		head = NULL;
-		do {
-			next = work->next;
-			work->next = head;
-			head = work;
-			work = next;
-		} while (work);
-
-		work = head;
 		do {
 			next = work->next;
 			work->func(work);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eb11011..b0623ac 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -630,13 +630,18 @@
 		goto out;
 	}
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	avg = rec->time;
+	do_div(avg, rec->counter);
+	if (tracing_thresh && (avg < tracing_thresh))
+		goto out;
+#endif
+
 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	seq_puts(m, "    ");
-	avg = rec->time;
-	do_div(avg, rec->counter);
 
 	/* Sample standard deviation (s^2) */
 	if (rec->counter <= 1)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6260717..fc347f8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -400,6 +400,17 @@
 };
 
 /*
+ * Structure to hold event state and handle nested events.
+ */
+struct rb_event_info {
+	u64			ts;
+	u64			delta;
+	unsigned long		length;
+	struct buffer_page	*tail_page;
+	int			add_timestamp;
+};
+
+/*
  * Used for which event context the event is in.
  *  NMI     = 0
  *  IRQ     = 1
@@ -1876,73 +1887,6 @@
 	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
 }
 
-static inline int
-rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
-		   struct ring_buffer_event *event)
-{
-	unsigned long addr = (unsigned long)event;
-	unsigned long index;
-
-	index = rb_event_index(event);
-	addr &= PAGE_MASK;
-
-	return cpu_buffer->commit_page->page == (void *)addr &&
-		rb_commit_index(cpu_buffer) == index;
-}
-
-static void
-rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
-{
-	unsigned long max_count;
-
-	/*
-	 * We only race with interrupts and NMIs on this CPU.
-	 * If we own the commit event, then we can commit
-	 * all others that interrupted us, since the interruptions
-	 * are in stack format (they finish before they come
-	 * back to us). This allows us to do a simple loop to
-	 * assign the commit to the tail.
-	 */
- again:
-	max_count = cpu_buffer->nr_pages * 100;
-
-	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
-		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
-			return;
-		if (RB_WARN_ON(cpu_buffer,
-			       rb_is_reader_page(cpu_buffer->tail_page)))
-			return;
-		local_set(&cpu_buffer->commit_page->page->commit,
-			  rb_page_write(cpu_buffer->commit_page));
-		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
-		cpu_buffer->write_stamp =
-			cpu_buffer->commit_page->page->time_stamp;
-		/* add barrier to keep gcc from optimizing too much */
-		barrier();
-	}
-	while (rb_commit_index(cpu_buffer) !=
-	       rb_page_write(cpu_buffer->commit_page)) {
-
-		local_set(&cpu_buffer->commit_page->page->commit,
-			  rb_page_write(cpu_buffer->commit_page));
-		RB_WARN_ON(cpu_buffer,
-			   local_read(&cpu_buffer->commit_page->page->commit) &
-			   ~RB_WRITE_MASK);
-		barrier();
-	}
-
-	/* again, keep gcc from optimizing */
-	barrier();
-
-	/*
-	 * If an interrupt came in just after the first while loop
-	 * and pushed the tail page forward, we will be left with
-	 * a dangling commit that will never go forward.
-	 */
-	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
-		goto again;
-}
-
 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
@@ -1968,64 +1912,6 @@
 	iter->head = 0;
 }
 
-/* Slow path, do not inline */
-static noinline struct ring_buffer_event *
-rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
-{
-	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
-
-	/* Not the first event on the page? */
-	if (rb_event_index(event)) {
-		event->time_delta = delta & TS_MASK;
-		event->array[0] = delta >> TS_SHIFT;
-	} else {
-		/* nope, just zero it */
-		event->time_delta = 0;
-		event->array[0] = 0;
-	}
-
-	return skip_time_extend(event);
-}
-
-/**
- * rb_update_event - update event type and data
- * @event: the event to update
- * @type: the type of event
- * @length: the size of the event field in the ring buffer
- *
- * Update the type and data fields of the event. The length
- * is the actual size that is written to the ring buffer,
- * and with this, we can determine what to place into the
- * data field.
- */
-static void
-rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
-		struct ring_buffer_event *event, unsigned length,
-		int add_timestamp, u64 delta)
-{
-	/* Only a commit updates the timestamp */
-	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
-		delta = 0;
-
-	/*
-	 * If we need to add a timestamp, then we
-	 * add it to the start of the resevered space.
-	 */
-	if (unlikely(add_timestamp)) {
-		event = rb_add_time_stamp(event, delta);
-		length -= RB_LEN_TIME_EXTEND;
-		delta = 0;
-	}
-
-	event->time_delta = delta;
-	length -= RB_EVNT_HDR_SIZE;
-	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
-		event->type_len = 0;
-		event->array[0] = length;
-	} else
-		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
-}
-
 /*
  * rb_handle_head_page - writer hit the head page
  *
@@ -2184,29 +2070,13 @@
 	return 0;
 }
 
-static unsigned rb_calculate_event_length(unsigned length)
-{
-	struct ring_buffer_event event; /* Used only for sizeof array */
-
-	/* zero length can cause confusions */
-	if (!length)
-		length++;
-
-	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
-		length += sizeof(event.array[0]);
-
-	length += RB_EVNT_HDR_SIZE;
-	length = ALIGN(length, RB_ARCH_ALIGNMENT);
-
-	return length;
-}
-
 static inline void
 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
-	      struct buffer_page *tail_page,
-	      unsigned long tail, unsigned long length)
+	      unsigned long tail, struct rb_event_info *info)
 {
+	struct buffer_page *tail_page = info->tail_page;
 	struct ring_buffer_event *event;
+	unsigned long length = info->length;
 
 	/*
 	 * Only the event that crossed the page boundary
@@ -2276,13 +2146,14 @@
  */
 static noinline struct ring_buffer_event *
 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
-	     unsigned long length, unsigned long tail,
-	     struct buffer_page *tail_page, u64 ts)
+	     unsigned long tail, struct rb_event_info *info)
 {
+	struct buffer_page *tail_page = info->tail_page;
 	struct buffer_page *commit_page = cpu_buffer->commit_page;
 	struct ring_buffer *buffer = cpu_buffer->buffer;
 	struct buffer_page *next_page;
 	int ret;
+	u64 ts;
 
 	next_page = tail_page;
 
@@ -2368,75 +2239,121 @@
 
  out_again:
 
-	rb_reset_tail(cpu_buffer, tail_page, tail, length);
+	rb_reset_tail(cpu_buffer, tail, info);
 
 	/* fail and let the caller try again */
 	return ERR_PTR(-EAGAIN);
 
  out_reset:
 	/* reset write */
-	rb_reset_tail(cpu_buffer, tail_page, tail, length);
+	rb_reset_tail(cpu_buffer, tail, info);
 
 	return NULL;
 }
 
-static struct ring_buffer_event *
-__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
-		  unsigned long length, u64 ts,
-		  u64 delta, int add_timestamp)
+/* Slow path, do not inline */
+static noinline struct ring_buffer_event *
+rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
 {
-	struct buffer_page *tail_page;
-	struct ring_buffer_event *event;
-	unsigned long tail, write;
+	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
 
-	/*
-	 * If the time delta since the last event is too big to
-	 * hold in the time field of the event, then we append a
-	 * TIME EXTEND event ahead of the data event.
-	 */
-	if (unlikely(add_timestamp))
-		length += RB_LEN_TIME_EXTEND;
+	/* Not the first event on the page? */
+	if (rb_event_index(event)) {
+		event->time_delta = delta & TS_MASK;
+		event->array[0] = delta >> TS_SHIFT;
+	} else {
+		/* nope, just zero it */
+		event->time_delta = 0;
+		event->array[0] = 0;
+	}
 
-	tail_page = cpu_buffer->tail_page;
-	write = local_add_return(length, &tail_page->write);
+	return skip_time_extend(event);
+}
 
-	/* set write to only the index of the write */
-	write &= RB_WRITE_MASK;
-	tail = write - length;
+static inline int rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
+				     struct ring_buffer_event *event);
 
-	/*
-	 * If this is the first commit on the page, then it has the same
-	 * timestamp as the page itself.
-	 */
-	if (!tail)
+/**
+ * rb_update_event - update event type and data
+ * @event: the event to update
+ * @type: the type of event
+ * @length: the size of the event field in the ring buffer
+ *
+ * Update the type and data fields of the event. The length
+ * is the actual size that is written to the ring buffer,
+ * and with this, we can determine what to place into the
+ * data field.
+ */
+static void
+rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
+		struct ring_buffer_event *event,
+		struct rb_event_info *info)
+{
+	unsigned length = info->length;
+	u64 delta = info->delta;
+
+	/* Only a commit updates the timestamp */
+	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
 		delta = 0;
 
-	/* See if we shot pass the end of this buffer page */
-	if (unlikely(write > BUF_PAGE_SIZE))
-		return rb_move_tail(cpu_buffer, length, tail,
-				    tail_page, ts);
+	/*
+	 * If we need to add a timestamp, then we
+	 * add it to the start of the resevered space.
+	 */
+	if (unlikely(info->add_timestamp)) {
+		event = rb_add_time_stamp(event, delta);
+		length -= RB_LEN_TIME_EXTEND;
+		delta = 0;
+	}
 
-	/* We reserved something on the buffer */
+	event->time_delta = delta;
+	length -= RB_EVNT_HDR_SIZE;
+	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
+		event->type_len = 0;
+		event->array[0] = length;
+	} else
+		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
+}
 
-	event = __rb_page_index(tail_page, tail);
-	kmemcheck_annotate_bitfield(event, bitfield);
-	rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
+static unsigned rb_calculate_event_length(unsigned length)
+{
+	struct ring_buffer_event event; /* Used only for sizeof array */
 
-	local_inc(&tail_page->entries);
+	/* zero length can cause confusions */
+	if (!length)
+		length++;
+
+	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
+		length += sizeof(event.array[0]);
+
+	length += RB_EVNT_HDR_SIZE;
+	length = ALIGN(length, RB_ARCH_ALIGNMENT);
 
 	/*
-	 * If this is the first commit on the page, then update
-	 * its timestamp.
+	 * In case the time delta is larger than the 27 bits for it
+	 * in the header, we need to add a timestamp. If another
+	 * event comes in when trying to discard this one to increase
+	 * the length, then the timestamp will be added in the allocated
+	 * space of this event. If length is bigger than the size needed
+	 * for the TIME_EXTEND, then padding has to be used. The events
+	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
+	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
+	 * As length is a multiple of 4, we only need to worry if it
+	 * is 12 (RB_LEN_TIME_EXTEND + 4).
 	 */
-	if (!tail)
-		tail_page->page->time_stamp = ts;
+	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
+		length += RB_ALIGNMENT;
 
-	/* account for these added bytes */
-	local_add(length, &cpu_buffer->entries_bytes);
-
-	return event;
+	return length;
 }
 
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline bool sched_clock_stable(void)
+{
+	return true;
+}
+#endif
+
 static inline int
 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
 		  struct ring_buffer_event *event)
@@ -2483,6 +2400,59 @@
 	local_inc(&cpu_buffer->commits);
 }
 
+static void
+rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+{
+	unsigned long max_count;
+
+	/*
+	 * We only race with interrupts and NMIs on this CPU.
+	 * If we own the commit event, then we can commit
+	 * all others that interrupted us, since the interruptions
+	 * are in stack format (they finish before they come
+	 * back to us). This allows us to do a simple loop to
+	 * assign the commit to the tail.
+	 */
+ again:
+	max_count = cpu_buffer->nr_pages * 100;
+
+	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
+		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
+			return;
+		if (RB_WARN_ON(cpu_buffer,
+			       rb_is_reader_page(cpu_buffer->tail_page)))
+			return;
+		local_set(&cpu_buffer->commit_page->page->commit,
+			  rb_page_write(cpu_buffer->commit_page));
+		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
+		cpu_buffer->write_stamp =
+			cpu_buffer->commit_page->page->time_stamp;
+		/* add barrier to keep gcc from optimizing too much */
+		barrier();
+	}
+	while (rb_commit_index(cpu_buffer) !=
+	       rb_page_write(cpu_buffer->commit_page)) {
+
+		local_set(&cpu_buffer->commit_page->page->commit,
+			  rb_page_write(cpu_buffer->commit_page));
+		RB_WARN_ON(cpu_buffer,
+			   local_read(&cpu_buffer->commit_page->page->commit) &
+			   ~RB_WRITE_MASK);
+		barrier();
+	}
+
+	/* again, keep gcc from optimizing */
+	barrier();
+
+	/*
+	 * If an interrupt came in just after the first while loop
+	 * and pushed the tail page forward, we will be left with
+	 * a dangling commit that will never go forward.
+	 */
+	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
+		goto again;
+}
+
 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	unsigned long commits;
@@ -2515,91 +2485,94 @@
 	}
 }
 
-static struct ring_buffer_event *
-rb_reserve_next_event(struct ring_buffer *buffer,
-		      struct ring_buffer_per_cpu *cpu_buffer,
-		      unsigned long length)
+static inline void rb_event_discard(struct ring_buffer_event *event)
 {
-	struct ring_buffer_event *event;
-	u64 ts, delta;
-	int nr_loops = 0;
-	int add_timestamp;
-	u64 diff;
+	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+		event = skip_time_extend(event);
 
-	rb_start_commit(cpu_buffer);
+	/* array[0] holds the actual length for the discarded event */
+	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
+	event->type_len = RINGBUF_TYPE_PADDING;
+	/* time delta must be non zero */
+	if (!event->time_delta)
+		event->time_delta = 1;
+}
 
-#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
-	/*
-	 * Due to the ability to swap a cpu buffer from a buffer
-	 * it is possible it was swapped before we committed.
-	 * (committing stops a swap). We check for it here and
-	 * if it happened, we have to fail the write.
-	 */
-	barrier();
-	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
-		local_dec(&cpu_buffer->committing);
-		local_dec(&cpu_buffer->commits);
-		return NULL;
-	}
-#endif
+static inline int
+rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
+		   struct ring_buffer_event *event)
+{
+	unsigned long addr = (unsigned long)event;
+	unsigned long index;
 
-	length = rb_calculate_event_length(length);
- again:
-	add_timestamp = 0;
-	delta = 0;
+	index = rb_event_index(event);
+	addr &= PAGE_MASK;
+
+	return cpu_buffer->commit_page->page == (void *)addr &&
+		rb_commit_index(cpu_buffer) == index;
+}
+
+static void
+rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
+		      struct ring_buffer_event *event)
+{
+	u64 delta;
 
 	/*
-	 * We allow for interrupts to reenter here and do a trace.
-	 * If one does, it will cause this original code to loop
-	 * back here. Even with heavy interrupts happening, this
-	 * should only happen a few times in a row. If this happens
-	 * 1000 times in a row, there must be either an interrupt
-	 * storm or we have something buggy.
-	 * Bail!
+	 * The event first in the commit queue updates the
+	 * time stamp.
 	 */
-	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
-		goto out_fail;
-
-	ts = rb_time_stamp(cpu_buffer->buffer);
-	diff = ts - cpu_buffer->write_stamp;
-
-	/* make sure this diff is calculated here */
-	barrier();
-
-	/* Did the write stamp get updated already? */
-	if (likely(ts >= cpu_buffer->write_stamp)) {
-		delta = diff;
-		if (unlikely(test_time_stamp(delta))) {
-			int local_clock_stable = 1;
-#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-			local_clock_stable = sched_clock_stable();
-#endif
-			WARN_ONCE(delta > (1ULL << 59),
-				  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
-				  (unsigned long long)delta,
-				  (unsigned long long)ts,
-				  (unsigned long long)cpu_buffer->write_stamp,
-				  local_clock_stable ? "" :
-				  "If you just came from a suspend/resume,\n"
-				  "please switch to the trace global clock:\n"
-				  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
-			add_timestamp = 1;
-		}
+	if (rb_event_is_commit(cpu_buffer, event)) {
+		/*
+		 * A commit event that is first on a page
+		 * updates the write timestamp with the page stamp
+		 */
+		if (!rb_event_index(event))
+			cpu_buffer->write_stamp =
+				cpu_buffer->commit_page->page->time_stamp;
+		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+			delta = event->array[0];
+			delta <<= TS_SHIFT;
+			delta += event->time_delta;
+			cpu_buffer->write_stamp += delta;
+		} else
+			cpu_buffer->write_stamp += event->time_delta;
 	}
+}
 
-	event = __rb_reserve_next(cpu_buffer, length, ts,
-				  delta, add_timestamp);
-	if (unlikely(PTR_ERR(event) == -EAGAIN))
-		goto again;
-
-	if (!event)
-		goto out_fail;
-
-	return event;
-
- out_fail:
+static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
+		      struct ring_buffer_event *event)
+{
+	local_inc(&cpu_buffer->entries);
+	rb_update_write_stamp(cpu_buffer, event);
 	rb_end_commit(cpu_buffer);
-	return NULL;
+}
+
+static __always_inline void
+rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+{
+	bool pagebusy;
+
+	if (buffer->irq_work.waiters_pending) {
+		buffer->irq_work.waiters_pending = false;
+		/* irq_work_queue() supplies it's own memory barriers */
+		irq_work_queue(&buffer->irq_work.work);
+	}
+
+	if (cpu_buffer->irq_work.waiters_pending) {
+		cpu_buffer->irq_work.waiters_pending = false;
+		/* irq_work_queue() supplies it's own memory barriers */
+		irq_work_queue(&cpu_buffer->irq_work.work);
+	}
+
+	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+
+	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
+		cpu_buffer->irq_work.wakeup_full = true;
+		cpu_buffer->irq_work.full_waiters_pending = false;
+		/* irq_work_queue() supplies it's own memory barriers */
+		irq_work_queue(&cpu_buffer->irq_work.work);
+	}
 }
 
 /*
@@ -2672,6 +2645,178 @@
 }
 
 /**
+ * ring_buffer_unlock_commit - commit a reserved
+ * @buffer: The buffer to commit to
+ * @event: The event pointer to commit.
+ *
+ * This commits the data to the ring buffer, and releases any locks held.
+ *
+ * Must be paired with ring_buffer_lock_reserve.
+ */
+int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+			      struct ring_buffer_event *event)
+{
+	struct ring_buffer_per_cpu *cpu_buffer;
+	int cpu = raw_smp_processor_id();
+
+	cpu_buffer = buffer->buffers[cpu];
+
+	rb_commit(cpu_buffer, event);
+
+	rb_wakeups(buffer, cpu_buffer);
+
+	trace_recursive_unlock(cpu_buffer);
+
+	preempt_enable_notrace();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
+
+static noinline void
+rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
+		    struct rb_event_info *info)
+{
+	WARN_ONCE(info->delta > (1ULL << 59),
+		  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
+		  (unsigned long long)info->delta,
+		  (unsigned long long)info->ts,
+		  (unsigned long long)cpu_buffer->write_stamp,
+		  sched_clock_stable() ? "" :
+		  "If you just came from a suspend/resume,\n"
+		  "please switch to the trace global clock:\n"
+		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
+	info->add_timestamp = 1;
+}
+
+static struct ring_buffer_event *
+__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+		  struct rb_event_info *info)
+{
+	struct ring_buffer_event *event;
+	struct buffer_page *tail_page;
+	unsigned long tail, write;
+
+	/*
+	 * If the time delta since the last event is too big to
+	 * hold in the time field of the event, then we append a
+	 * TIME EXTEND event ahead of the data event.
+	 */
+	if (unlikely(info->add_timestamp))
+		info->length += RB_LEN_TIME_EXTEND;
+
+	tail_page = info->tail_page = cpu_buffer->tail_page;
+	write = local_add_return(info->length, &tail_page->write);
+
+	/* set write to only the index of the write */
+	write &= RB_WRITE_MASK;
+	tail = write - info->length;
+
+	/*
+	 * If this is the first commit on the page, then it has the same
+	 * timestamp as the page itself.
+	 */
+	if (!tail)
+		info->delta = 0;
+
+	/* See if we shot pass the end of this buffer page */
+	if (unlikely(write > BUF_PAGE_SIZE))
+		return rb_move_tail(cpu_buffer, tail, info);
+
+	/* We reserved something on the buffer */
+
+	event = __rb_page_index(tail_page, tail);
+	kmemcheck_annotate_bitfield(event, bitfield);
+	rb_update_event(cpu_buffer, event, info);
+
+	local_inc(&tail_page->entries);
+
+	/*
+	 * If this is the first commit on the page, then update
+	 * its timestamp.
+	 */
+	if (!tail)
+		tail_page->page->time_stamp = info->ts;
+
+	/* account for these added bytes */
+	local_add(info->length, &cpu_buffer->entries_bytes);
+
+	return event;
+}
+
+static struct ring_buffer_event *
+rb_reserve_next_event(struct ring_buffer *buffer,
+		      struct ring_buffer_per_cpu *cpu_buffer,
+		      unsigned long length)
+{
+	struct ring_buffer_event *event;
+	struct rb_event_info info;
+	int nr_loops = 0;
+	u64 diff;
+
+	rb_start_commit(cpu_buffer);
+
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
+	/*
+	 * Due to the ability to swap a cpu buffer from a buffer
+	 * it is possible it was swapped before we committed.
+	 * (committing stops a swap). We check for it here and
+	 * if it happened, we have to fail the write.
+	 */
+	barrier();
+	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
+		local_dec(&cpu_buffer->committing);
+		local_dec(&cpu_buffer->commits);
+		return NULL;
+	}
+#endif
+
+	info.length = rb_calculate_event_length(length);
+ again:
+	info.add_timestamp = 0;
+	info.delta = 0;
+
+	/*
+	 * We allow for interrupts to reenter here and do a trace.
+	 * If one does, it will cause this original code to loop
+	 * back here. Even with heavy interrupts happening, this
+	 * should only happen a few times in a row. If this happens
+	 * 1000 times in a row, there must be either an interrupt
+	 * storm or we have something buggy.
+	 * Bail!
+	 */
+	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
+		goto out_fail;
+
+	info.ts = rb_time_stamp(cpu_buffer->buffer);
+	diff = info.ts - cpu_buffer->write_stamp;
+
+	/* make sure this diff is calculated here */
+	barrier();
+
+	/* Did the write stamp get updated already? */
+	if (likely(info.ts >= cpu_buffer->write_stamp)) {
+		info.delta = diff;
+		if (unlikely(test_time_stamp(info.delta)))
+			rb_handle_timestamp(cpu_buffer, &info);
+	}
+
+	event = __rb_reserve_next(cpu_buffer, &info);
+
+	if (unlikely(PTR_ERR(event) == -EAGAIN))
+		goto again;
+
+	if (!event)
+		goto out_fail;
+
+	return event;
+
+ out_fail:
+	rb_end_commit(cpu_buffer);
+	return NULL;
+}
+
+/**
  * ring_buffer_lock_reserve - reserve a part of the buffer
  * @buffer: the ring buffer to reserve from
  * @length: the length of the data to reserve (excluding event header)
@@ -2729,111 +2874,6 @@
 }
 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
 
-static void
-rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
-		      struct ring_buffer_event *event)
-{
-	u64 delta;
-
-	/*
-	 * The event first in the commit queue updates the
-	 * time stamp.
-	 */
-	if (rb_event_is_commit(cpu_buffer, event)) {
-		/*
-		 * A commit event that is first on a page
-		 * updates the write timestamp with the page stamp
-		 */
-		if (!rb_event_index(event))
-			cpu_buffer->write_stamp =
-				cpu_buffer->commit_page->page->time_stamp;
-		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
-			delta = event->array[0];
-			delta <<= TS_SHIFT;
-			delta += event->time_delta;
-			cpu_buffer->write_stamp += delta;
-		} else
-			cpu_buffer->write_stamp += event->time_delta;
-	}
-}
-
-static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
-		      struct ring_buffer_event *event)
-{
-	local_inc(&cpu_buffer->entries);
-	rb_update_write_stamp(cpu_buffer, event);
-	rb_end_commit(cpu_buffer);
-}
-
-static __always_inline void
-rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
-{
-	bool pagebusy;
-
-	if (buffer->irq_work.waiters_pending) {
-		buffer->irq_work.waiters_pending = false;
-		/* irq_work_queue() supplies it's own memory barriers */
-		irq_work_queue(&buffer->irq_work.work);
-	}
-
-	if (cpu_buffer->irq_work.waiters_pending) {
-		cpu_buffer->irq_work.waiters_pending = false;
-		/* irq_work_queue() supplies it's own memory barriers */
-		irq_work_queue(&cpu_buffer->irq_work.work);
-	}
-
-	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
-
-	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
-		cpu_buffer->irq_work.wakeup_full = true;
-		cpu_buffer->irq_work.full_waiters_pending = false;
-		/* irq_work_queue() supplies it's own memory barriers */
-		irq_work_queue(&cpu_buffer->irq_work.work);
-	}
-}
-
-/**
- * ring_buffer_unlock_commit - commit a reserved
- * @buffer: The buffer to commit to
- * @event: The event pointer to commit.
- *
- * This commits the data to the ring buffer, and releases any locks held.
- *
- * Must be paired with ring_buffer_lock_reserve.
- */
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
-			      struct ring_buffer_event *event)
-{
-	struct ring_buffer_per_cpu *cpu_buffer;
-	int cpu = raw_smp_processor_id();
-
-	cpu_buffer = buffer->buffers[cpu];
-
-	rb_commit(cpu_buffer, event);
-
-	rb_wakeups(buffer, cpu_buffer);
-
-	trace_recursive_unlock(cpu_buffer);
-
-	preempt_enable_notrace();
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
-
-static inline void rb_event_discard(struct ring_buffer_event *event)
-{
-	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
-		event = skip_time_extend(event);
-
-	/* array[0] holds the actual length for the discarded event */
-	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
-	event->type_len = RINGBUF_TYPE_PADDING;
-	/* time delta must be non zero */
-	if (!event->time_delta)
-		event->time_delta = 1;
-}
-
 /*
  * Decrement the entries to the page that an event is on.
  * The event does not even need to exist, only the pointer
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index abcbf7f..6e79408 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3035,7 +3035,7 @@
 	if (!iter)
 		return ERR_PTR(-ENOMEM);
 
-	iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
+	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
 				    GFP_KERNEL);
 	if (!iter->buffer_iter)
 		goto release;
@@ -6990,7 +6990,7 @@
 	trace_init_global_iter(&iter);
 
 	for_each_tracing_cpu(cpu) {
-		atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
+		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
 	}
 
 	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 404a372..7ca09cd 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -30,6 +30,7 @@
 DEFINE_MUTEX(event_mutex);
 
 LIST_HEAD(ftrace_events);
+static LIST_HEAD(ftrace_generic_fields);
 static LIST_HEAD(ftrace_common_fields);
 
 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
@@ -94,6 +95,10 @@
 	struct ftrace_event_field *field;
 	struct list_head *head;
 
+	field = __find_event_field(&ftrace_generic_fields, name);
+	if (field)
+		return field;
+
 	field = __find_event_field(&ftrace_common_fields, name);
 	if (field)
 		return field;
@@ -144,6 +149,13 @@
 }
 EXPORT_SYMBOL_GPL(trace_define_field);
 
+#define __generic_field(type, item, filter_type)			\
+	ret = __trace_define_field(&ftrace_generic_fields, #type,	\
+				   #item, 0, 0, is_signed_type(type),	\
+				   filter_type);			\
+	if (ret)							\
+		return ret;
+
 #define __common_field(type, item)					\
 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
 				   "common_" #item,			\
@@ -153,6 +165,16 @@
 	if (ret)							\
 		return ret;
 
+static int trace_define_generic_fields(void)
+{
+	int ret;
+
+	__generic_field(int, cpu, FILTER_OTHER);
+	__generic_field(char *, comm, FILTER_PTR_STRING);
+
+	return ret;
+}
+
 static int trace_define_common_fields(void)
 {
 	int ret;
@@ -2671,6 +2693,9 @@
 	if (!entry)
 		pr_warn("Could not create tracefs 'available_events' entry\n");
 
+	if (trace_define_generic_fields())
+		pr_warn("tracing: Failed to allocated generic fields");
+
 	if (trace_define_common_fields())
 		pr_warn("tracing: Failed to allocate common fields");
 
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index d81d6f3..bd1bf18 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -252,6 +252,50 @@
 	return match;
 }
 
+/* Filter predicate for CPUs. */
+static int filter_pred_cpu(struct filter_pred *pred, void *event)
+{
+	int cpu, cmp;
+	int match = 0;
+
+	cpu = raw_smp_processor_id();
+	cmp = pred->val;
+
+	switch (pred->op) {
+	case OP_EQ:
+		match = cpu == cmp;
+		break;
+	case OP_LT:
+		match = cpu < cmp;
+		break;
+	case OP_LE:
+		match = cpu <= cmp;
+		break;
+	case OP_GT:
+		match = cpu > cmp;
+		break;
+	case OP_GE:
+		match = cpu >= cmp;
+		break;
+	default:
+		break;
+	}
+
+	return !!match == !pred->not;
+}
+
+/* Filter predicate for COMM. */
+static int filter_pred_comm(struct filter_pred *pred, void *event)
+{
+	int cmp, match;
+
+	cmp = pred->regex.match(current->comm, &pred->regex,
+				pred->regex.field_len);
+	match = cmp ^ pred->not;
+
+	return match;
+}
+
 static int filter_pred_none(struct filter_pred *pred, void *event)
 {
 	return 0;
@@ -1002,7 +1046,10 @@
 	if (is_string_field(field)) {
 		filter_build_regex(pred);
 
-		if (field->filter_type == FILTER_STATIC_STRING) {
+		if (!strcmp(field->name, "comm")) {
+			fn = filter_pred_comm;
+			pred->regex.field_len = TASK_COMM_LEN;
+		} else if (field->filter_type == FILTER_STATIC_STRING) {
 			fn = filter_pred_string;
 			pred->regex.field_len = field->size;
 		} else if (field->filter_type == FILTER_DYN_STRING)
@@ -1025,7 +1072,10 @@
 		}
 		pred->val = val;
 
-		fn = select_comparison_fn(pred->op, field->size,
+		if (!strcmp(field->name, "cpu"))
+			fn = filter_pred_cpu;
+		else
+			fn = select_comparison_fn(pred->op, field->size,
 					  field->is_signed);
 		if (!fn) {
 			parse_error(ps, FILT_ERR_INVALID_OP, 0);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 8968bf7..ca98445 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -715,13 +715,13 @@
 
 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 		trace_seq_printf(s, ".%s", nsecs_str);
-		len += strlen(nsecs_str);
+		len += strlen(nsecs_str) + 1;
 	}
 
 	trace_seq_puts(s, " us ");
 
 	/* Print remaining spaces to fit the row's width */
-	for (i = len; i < 7; i++)
+	for (i = len; i < 8; i++)
 		trace_seq_putc(s, ' ');
 }
 
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index dfab253..8e481a8 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -496,6 +496,8 @@
 	char			sym;
 } mark[] = {
 	MARK(1000000000ULL	, '$'), /* 1 sec */
+	MARK(100000000ULL	, '@'), /* 100 msec */
+	MARK(10000000ULL	, '*'), /* 10 msec */
 	MARK(1000000ULL		, '#'), /* 1000 usecs */
 	MARK(100000ULL		, '!'), /* 100 usecs */
 	MARK(10000ULL		, '+'), /* 10 usecs */
@@ -508,7 +510,7 @@
 	int size = ARRAY_SIZE(mark);
 
 	for (i = 0; i < size; i++) {
-		if (d >= mark[i].val)
+		if (d > mark[i].val)
 			break;
 	}
 
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 3f34496..b746399 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -18,12 +18,6 @@
 
 #define STACK_TRACE_ENTRIES 500
 
-#ifdef CC_USING_FENTRY
-# define fentry		1
-#else
-# define fentry		0
-#endif
-
 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
@@ -35,7 +29,7 @@
  */
 static struct stack_trace max_stack_trace = {
 	.max_entries		= STACK_TRACE_ENTRIES - 1,
-	.entries		= &stack_dump_trace[1],
+	.entries		= &stack_dump_trace[0],
 };
 
 static unsigned long max_stack_size;
@@ -55,7 +49,7 @@
 
 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
 			   "        -----    ----   --------\n",
-			   max_stack_trace.nr_entries - 1);
+			   max_stack_trace.nr_entries);
 
 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
 		if (stack_dump_trace[i] == ULONG_MAX)
@@ -77,7 +71,7 @@
 	unsigned long this_size, flags; unsigned long *p, *top, *start;
 	static int tracer_frame;
 	int frame_size = ACCESS_ONCE(tracer_frame);
-	int i;
+	int i, x;
 
 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
 	this_size = THREAD_SIZE - this_size;
@@ -105,26 +99,20 @@
 	max_stack_size = this_size;
 
 	max_stack_trace.nr_entries = 0;
-
-	if (using_ftrace_ops_list_func())
-		max_stack_trace.skip = 4;
-	else
-		max_stack_trace.skip = 3;
+	max_stack_trace.skip = 3;
 
 	save_stack_trace(&max_stack_trace);
 
-	/*
-	 * Add the passed in ip from the function tracer.
-	 * Searching for this on the stack will skip over
-	 * most of the overhead from the stack tracer itself.
-	 */
-	stack_dump_trace[0] = ip;
-	max_stack_trace.nr_entries++;
+	/* Skip over the overhead of the stack tracer itself */
+	for (i = 0; i < max_stack_trace.nr_entries; i++) {
+		if (stack_dump_trace[i] == ip)
+			break;
+	}
 
 	/*
 	 * Now find where in the stack these are.
 	 */
-	i = 0;
+	x = 0;
 	start = stack;
 	top = (unsigned long *)
 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
@@ -139,12 +127,15 @@
 	while (i < max_stack_trace.nr_entries) {
 		int found = 0;
 
-		stack_dump_index[i] = this_size;
+		stack_dump_index[x] = this_size;
 		p = start;
 
 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
+			if (stack_dump_trace[i] == ULONG_MAX)
+				break;
 			if (*p == stack_dump_trace[i]) {
-				this_size = stack_dump_index[i++] =
+				stack_dump_trace[x] = stack_dump_trace[i++];
+				this_size = stack_dump_index[x++] =
 					(top - p) * sizeof(unsigned long);
 				found = 1;
 				/* Start the search from here */
@@ -156,7 +147,7 @@
 				 * out what that is, then figure it out
 				 * now.
 				 */
-				if (unlikely(!tracer_frame) && i == 1) {
+				if (unlikely(!tracer_frame)) {
 					tracer_frame = (p - stack) *
 						sizeof(unsigned long);
 					max_stack_size -= tracer_frame;
@@ -168,6 +159,10 @@
 			i++;
 	}
 
+	max_stack_trace.nr_entries = x;
+	for (; x < i; x++)
+		stack_dump_trace[x] = ULONG_MAX;
+
 	if (task_stack_end_corrupted(current)) {
 		print_max_stack();
 		BUG();
@@ -192,24 +187,7 @@
 	if (per_cpu(trace_active, cpu)++ != 0)
 		goto out;
 
-	/*
-	 * When fentry is used, the traced function does not get
-	 * its stack frame set up, and we lose the parent.
-	 * The ip is pretty useless because the function tracer
-	 * was called before that function set up its stack frame.
-	 * In this case, we use the parent ip.
-	 *
-	 * By adding the return address of either the parent ip
-	 * or the current ip we can disregard most of the stack usage
-	 * caused by the stack tracer itself.
-	 *
-	 * The function tracer always reports the address of where the
-	 * mcount call was, but the stack will hold the return address.
-	 */
-	if (fentry)
-		ip = parent_ip;
-	else
-		ip += MCOUNT_INSN_SIZE;
+	ip += MCOUNT_INSN_SIZE;
 
 	check_stack(ip, &stack);
 
@@ -284,7 +262,7 @@
 {
 	long n = *pos - 1;
 
-	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
+	if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
 		return NULL;
 
 	m->private = (void *)n;
@@ -354,7 +332,7 @@
 		seq_printf(m, "        Depth    Size   Location"
 			   "    (%d entries)\n"
 			   "        -----    ----   --------\n",
-			   max_stack_trace.nr_entries - 1);
+			   max_stack_trace.nr_entries);
 
 		if (!stack_tracer_enabled && !max_stack_size)
 			print_disabled(m);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index f65a0a0..88fefa6 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -39,6 +39,7 @@
 	cred->cap_inheritable = CAP_EMPTY_SET;
 	cred->cap_permitted = CAP_FULL_SET;
 	cred->cap_effective = CAP_FULL_SET;
+	cred->cap_ambient = CAP_EMPTY_SET;
 	cred->cap_bset = CAP_FULL_SET;
 #ifdef CONFIG_KEYS
 	key_put(cred->request_key_auth);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index a6ffa43..64ed1c3 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,6 +24,7 @@
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
 #include <linux/perf_event.h>
+#include <linux/kthread.h>
 
 /*
  * The run state of the lockup detectors is controlled by the content of the
@@ -66,7 +67,26 @@
 #define for_each_watchdog_cpu(cpu) \
 	for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+/*
+ * The 'watchdog_running' variable is set to 1 when the watchdog threads
+ * are registered/started and is set to 0 when the watchdog threads are
+ * unregistered/stopped, so it is an indicator whether the threads exist.
+ */
 static int __read_mostly watchdog_running;
+/*
+ * If a subsystem has a need to deactivate the watchdog temporarily, it
+ * can use the suspend/resume interface to achieve this. The content of
+ * the 'watchdog_suspended' variable reflects this state. Existing threads
+ * are parked/unparked by the lockup_detector_{suspend|resume} functions
+ * (see comment blocks pertaining to those functions for further details).
+ *
+ * 'watchdog_suspended' also prevents threads from being registered/started
+ * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
+ * of 'watchdog_running' cannot change while the watchdog is deactivated
+ * temporarily (see related code in 'proc' handlers).
+ */
+static int __read_mostly watchdog_suspended;
+
 static u64 __read_mostly sample_period;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -613,46 +633,9 @@
 	}
 }
 
-void watchdog_nmi_enable_all(void)
-{
-	int cpu;
-
-	mutex_lock(&watchdog_proc_mutex);
-
-	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-		goto unlock;
-
-	get_online_cpus();
-	for_each_watchdog_cpu(cpu)
-		watchdog_nmi_enable(cpu);
-	put_online_cpus();
-
-unlock:
-	mutex_unlock(&watchdog_proc_mutex);
-}
-
-void watchdog_nmi_disable_all(void)
-{
-	int cpu;
-
-	mutex_lock(&watchdog_proc_mutex);
-
-	if (!watchdog_running)
-		goto unlock;
-
-	get_online_cpus();
-	for_each_watchdog_cpu(cpu)
-		watchdog_nmi_disable(cpu);
-	put_online_cpus();
-
-unlock:
-	mutex_unlock(&watchdog_proc_mutex);
-}
 #else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-void watchdog_nmi_enable_all(void) {}
-void watchdog_nmi_disable_all(void) {}
 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
 static struct smp_hotplug_thread watchdog_threads = {
@@ -666,62 +649,102 @@
 	.unpark			= watchdog_enable,
 };
 
-static void restart_watchdog_hrtimer(void *info)
+/*
+ * park all watchdog threads that are specified in 'watchdog_cpumask'
+ */
+static int watchdog_park_threads(void)
 {
-	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
-	int ret;
+	int cpu, ret = 0;
 
-	/*
-	 * No need to cancel and restart hrtimer if it is currently executing
-	 * because it will reprogram itself with the new period now.
-	 * We should never see it unqueued here because we are running per-cpu
-	 * with interrupts disabled.
-	 */
-	ret = hrtimer_try_to_cancel(hrtimer);
-	if (ret == 1)
-		hrtimer_start(hrtimer, ns_to_ktime(sample_period),
-				HRTIMER_MODE_REL_PINNED);
+	get_online_cpus();
+	for_each_watchdog_cpu(cpu) {
+		ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
+		if (ret)
+			break;
+	}
+	if (ret) {
+		for_each_watchdog_cpu(cpu)
+			kthread_unpark(per_cpu(softlockup_watchdog, cpu));
+	}
+	put_online_cpus();
+
+	return ret;
 }
 
-static void update_watchdog(int cpu)
-{
-	/*
-	 * Make sure that perf event counter will adopt to a new
-	 * sampling period. Updating the sampling period directly would
-	 * be much nicer but we do not have an API for that now so
-	 * let's use a big hammer.
-	 * Hrtimer will adopt the new period on the next tick but this
-	 * might be late already so we have to restart the timer as well.
-	 */
-	watchdog_nmi_disable(cpu);
-	smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
-	watchdog_nmi_enable(cpu);
-}
-
-static void update_watchdog_all_cpus(void)
+/*
+ * unpark all watchdog threads that are specified in 'watchdog_cpumask'
+ */
+static void watchdog_unpark_threads(void)
 {
 	int cpu;
 
 	get_online_cpus();
 	for_each_watchdog_cpu(cpu)
-		update_watchdog(cpu);
+		kthread_unpark(per_cpu(softlockup_watchdog, cpu));
 	put_online_cpus();
 }
 
+/*
+ * Suspend the hard and soft lockup detector by parking the watchdog threads.
+ */
+int lockup_detector_suspend(void)
+{
+	int ret = 0;
+
+	mutex_lock(&watchdog_proc_mutex);
+	/*
+	 * Multiple suspend requests can be active in parallel (counted by
+	 * the 'watchdog_suspended' variable). If the watchdog threads are
+	 * running, the first caller takes care that they will be parked.
+	 * The state of 'watchdog_running' cannot change while a suspend
+	 * request is active (see related code in 'proc' handlers).
+	 */
+	if (watchdog_running && !watchdog_suspended)
+		ret = watchdog_park_threads();
+
+	if (ret == 0)
+		watchdog_suspended++;
+
+	mutex_unlock(&watchdog_proc_mutex);
+
+	return ret;
+}
+
+/*
+ * Resume the hard and soft lockup detector by unparking the watchdog threads.
+ */
+void lockup_detector_resume(void)
+{
+	mutex_lock(&watchdog_proc_mutex);
+
+	watchdog_suspended--;
+	/*
+	 * The watchdog threads are unparked if they were previously running
+	 * and if there is no more active suspend request.
+	 */
+	if (watchdog_running && !watchdog_suspended)
+		watchdog_unpark_threads();
+
+	mutex_unlock(&watchdog_proc_mutex);
+}
+
+static void update_watchdog_all_cpus(void)
+{
+	watchdog_park_threads();
+	watchdog_unpark_threads();
+}
+
 static int watchdog_enable_all_cpus(void)
 {
 	int err = 0;
 
 	if (!watchdog_running) {
-		err = smpboot_register_percpu_thread(&watchdog_threads);
+		err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
+							     &watchdog_cpumask);
 		if (err)
 			pr_err("Failed to create watchdog threads, disabled\n");
-		else {
-			if (smpboot_update_cpumask_percpu_thread(
-				    &watchdog_threads, &watchdog_cpumask))
-				pr_err("Failed to set cpumask for watchdog threads\n");
+		else
 			watchdog_running = 1;
-		}
 	} else {
 		/*
 		 * Enable/disable the lockup detectors or
@@ -787,6 +810,12 @@
 
 	mutex_lock(&watchdog_proc_mutex);
 
+	if (watchdog_suspended) {
+		/* no parameter changes allowed while watchdog is suspended */
+		err = -EAGAIN;
+		goto out;
+	}
+
 	/*
 	 * If the parameter is being read return the state of the corresponding
 	 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
@@ -872,6 +901,12 @@
 
 	mutex_lock(&watchdog_proc_mutex);
 
+	if (watchdog_suspended) {
+		/* no parameter changes allowed while watchdog is suspended */
+		err = -EAGAIN;
+		goto out;
+	}
+
 	old = ACCESS_ONCE(watchdog_thresh);
 	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
@@ -903,6 +938,13 @@
 	int err;
 
 	mutex_lock(&watchdog_proc_mutex);
+
+	if (watchdog_suspended) {
+		/* no parameter changes allowed while watchdog is suspended */
+		err = -EAGAIN;
+		goto out;
+	}
+
 	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
 	if (!err && write) {
 		/* Remove impossible cpus to keep sysctl output cleaner. */
@@ -920,6 +962,7 @@
 				pr_err("cpumask update failed\n");
 		}
 	}
+out:
 	mutex_unlock(&watchdog_proc_mutex);
 	return err;
 }
@@ -932,10 +975,8 @@
 
 #ifdef CONFIG_NO_HZ_FULL
 	if (tick_nohz_full_enabled()) {
-		if (!cpumask_empty(tick_nohz_full_mask))
-			pr_info("Disabling watchdog on nohz_full cores by default\n");
-		cpumask_andnot(&watchdog_cpumask, cpu_possible_mask,
-			       tick_nohz_full_mask);
+		pr_info("Disabling watchdog on nohz_full cores by default\n");
+		cpumask_copy(&watchdog_cpumask, housekeeping_mask);
 	} else
 		cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
 #else
diff --git a/lib/Kconfig b/lib/Kconfig
index a165552..2e491ac 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -53,9 +53,6 @@
 config STMP_DEVICE
 	bool
 
-config PERCPU_RWSEM
-	bool
-
 config ARCH_USE_CMPXCHG_LOCKREF
 	bool
 
@@ -528,4 +525,7 @@
 config ARCH_HAS_PMEM_API
 	bool
 
+config ARCH_HAS_MMIO_FLUSH
+	bool
+
 endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3e0b662..ab76b99 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -916,12 +916,6 @@
 	 This allows rt mutex semantics violations and rt mutex related
 	 deadlocks (lockups) to be detected and reported automatically.
 
-config RT_MUTEX_TESTER
-	bool "Built-in scriptable tester for rt-mutexes"
-	depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
-	help
-	  This option enables a rt-mutex tester.
-
 config DEBUG_SPINLOCK
 	bool "Spinlock and rw-lock debugging: basic checks"
 	depends on DEBUG_KERNEL
@@ -1528,6 +1522,13 @@
 	  and to test how the mmc host driver handles retries from
 	  the block device.
 
+config FAIL_FUTEX
+	bool "Fault-injection capability for futexes"
+	select DEBUG_FS
+	depends on FAULT_INJECTION && FUTEX
+	help
+	  Provide fault-injection capability for futexes.
+
 config FAULT_INJECTION_DEBUG_FS
 	bool "Debugfs entries for fault-injection capabilities"
 	depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -1826,6 +1827,15 @@
 	        memtest=17, mean do 17 test patterns.
 	  If you are unsure how to answer this question, answer N.
 
+config TEST_STATIC_KEYS
+	tristate "Test static keys"
+	default n
+	depends on m
+	help
+	  Test the static key interfaces.
+
+	  If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index f261006..13a7c6a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@
 	 sha1.o md5.o irq_regs.o argv_split.o \
 	 proportions.o flex_proportions.o ratelimit.o show_mem.o \
 	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-	 earlycpio.o seq_buf.o
+	 earlycpio.o seq_buf.o nmi_backtrace.o
 
 obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
 lib-$(CONFIG_MMU) += ioremap.o
@@ -39,6 +39,8 @@
 obj-$(CONFIG_TEST_LKM) += test_module.o
 obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1a000bb..2b3f46c 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -24,15 +24,20 @@
 	[ASN1_OP_MATCH_JUMP]			= 1 + 1 + 1,
 	[ASN1_OP_MATCH_JUMP_OR_SKIP]		= 1 + 1 + 1,
 	[ASN1_OP_MATCH_ANY]			= 1,
+	[ASN1_OP_MATCH_ANY_OR_SKIP]		= 1,
 	[ASN1_OP_MATCH_ANY_ACT]			= 1         + 1,
+	[ASN1_OP_MATCH_ANY_ACT_OR_SKIP]		= 1         + 1,
 	[ASN1_OP_COND_MATCH_OR_SKIP]		= 1 + 1,
 	[ASN1_OP_COND_MATCH_ACT_OR_SKIP]	= 1 + 1     + 1,
 	[ASN1_OP_COND_MATCH_JUMP_OR_SKIP]	= 1 + 1 + 1,
 	[ASN1_OP_COND_MATCH_ANY]		= 1,
+	[ASN1_OP_COND_MATCH_ANY_OR_SKIP]	= 1,
 	[ASN1_OP_COND_MATCH_ANY_ACT]		= 1         + 1,
+	[ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP]	= 1         + 1,
 	[ASN1_OP_COND_FAIL]			= 1,
 	[ASN1_OP_COMPLETE]			= 1,
 	[ASN1_OP_ACT]				= 1         + 1,
+	[ASN1_OP_MAYBE_ACT]			= 1         + 1,
 	[ASN1_OP_RETURN]			= 1,
 	[ASN1_OP_END_SEQ]			= 1,
 	[ASN1_OP_END_SEQ_OF]			= 1     + 1,
@@ -177,6 +182,7 @@
 	unsigned char flags = 0;
 #define FLAG_INDEFINITE_LENGTH	0x01
 #define FLAG_MATCHED		0x02
+#define FLAG_LAST_MATCHED	0x04 /* Last tag matched */
 #define FLAG_CONS		0x20 /* Corresponds to CONS bit in the opcode tag
 				      * - ie. whether or not we are going to parse
 				      *   a compound type.
@@ -208,9 +214,9 @@
 		unsigned char tmp;
 
 		/* Skip conditional matches if possible */
-		if ((op & ASN1_OP_MATCH__COND &&
-		     flags & FLAG_MATCHED) ||
-		    dp == datalen) {
+		if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) ||
+		    (op & ASN1_OP_MATCH__SKIP && dp == datalen)) {
+			flags &= ~FLAG_LAST_MATCHED;
 			pc += asn1_op_lengths[op];
 			goto next_op;
 		}
@@ -302,7 +308,9 @@
 	/* Decide how to handle the operation */
 	switch (op) {
 	case ASN1_OP_MATCH_ANY_ACT:
+	case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
 	case ASN1_OP_COND_MATCH_ANY_ACT:
+	case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
 		ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
 		if (ret < 0)
 			return ret;
@@ -319,8 +327,10 @@
 	case ASN1_OP_MATCH:
 	case ASN1_OP_MATCH_OR_SKIP:
 	case ASN1_OP_MATCH_ANY:
+	case ASN1_OP_MATCH_ANY_OR_SKIP:
 	case ASN1_OP_COND_MATCH_OR_SKIP:
 	case ASN1_OP_COND_MATCH_ANY:
+	case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
 	skip_data:
 		if (!(flags & FLAG_CONS)) {
 			if (flags & FLAG_INDEFINITE_LENGTH) {
@@ -422,8 +432,15 @@
 		pc += asn1_op_lengths[op];
 		goto next_op;
 
+	case ASN1_OP_MAYBE_ACT:
+		if (!(flags & FLAG_LAST_MATCHED)) {
+			pc += asn1_op_lengths[op];
+			goto next_op;
+		}
 	case ASN1_OP_ACT:
 		ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
+		if (ret < 0)
+			return ret;
 		pc += asn1_op_lengths[op];
 		goto next_op;
 
@@ -431,6 +448,7 @@
 		if (unlikely(jsp <= 0))
 			goto jump_stack_underflow;
 		pc = jump_stack[--jsp];
+		flags |= FLAG_MATCHED | FLAG_LAST_MATCHED;
 		goto next_op;
 
 	default:
@@ -438,7 +456,8 @@
 	}
 
 	/* Shouldn't reach here */
-	pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op);
+	pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n",
+	       op, pc);
 	return -EBADMSG;
 
 data_overrun_error:
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 1298c05e..2886eba 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -102,6 +102,9 @@
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 0211d30..83c33a5b 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,8 +16,39 @@
 #include <linux/kernel.h>
 #include <linux/atomic.h>
 
+#define TEST(bit, op, c_op, val)				\
+do {								\
+	atomic##bit##_set(&v, v0);				\
+	r = v0;							\
+	atomic##bit##_##op(val, &v);				\
+	r c_op val;						\
+	WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n",	\
+		(unsigned long long)atomic##bit##_read(&v),	\
+		(unsigned long long)r);				\
+} while (0)
+
+static __init void test_atomic(void)
+{
+	int v0 = 0xaaa31337;
+	int v1 = 0xdeadbeef;
+	int onestwos = 0x11112222;
+	int one = 1;
+
+	atomic_t v;
+	int r;
+
+	TEST(, add, +=, onestwos);
+	TEST(, add, +=, -one);
+	TEST(, sub, -=, onestwos);
+	TEST(, sub, -=, -one);
+	TEST(, or, |=, v1);
+	TEST(, and, &=, v1);
+	TEST(, xor, ^=, v1);
+	TEST(, andnot, &= ~, v1);
+}
+
 #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
-static __init int test_atomic64(void)
+static __init void test_atomic64(void)
 {
 	long long v0 = 0xaaa31337c001d00dLL;
 	long long v1 = 0xdeadbeefdeafcafeLL;
@@ -34,15 +65,14 @@
 	BUG_ON(v.counter != r);
 	BUG_ON(atomic64_read(&v) != r);
 
-	INIT(v0);
-	atomic64_add(onestwos, &v);
-	r += onestwos;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
-	atomic64_add(-one, &v);
-	r += -one;
-	BUG_ON(v.counter != r);
+	TEST(64, add, +=, onestwos);
+	TEST(64, add, +=, -one);
+	TEST(64, sub, -=, onestwos);
+	TEST(64, sub, -=, -one);
+	TEST(64, or, |=, v1);
+	TEST(64, and, &=, v1);
+	TEST(64, xor, ^=, v1);
+	TEST(64, andnot, &= ~, v1);
 
 	INIT(v0);
 	r += onestwos;
@@ -55,16 +85,6 @@
 	BUG_ON(v.counter != r);
 
 	INIT(v0);
-	atomic64_sub(onestwos, &v);
-	r -= onestwos;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
-	atomic64_sub(-one, &v);
-	r -= -one;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
 	r -= onestwos;
 	BUG_ON(atomic64_sub_return(onestwos, &v) != r);
 	BUG_ON(v.counter != r);
@@ -147,6 +167,12 @@
 	BUG_ON(!atomic64_inc_not_zero(&v));
 	r += one;
 	BUG_ON(v.counter != r);
+}
+
+static __init int test_atomics(void)
+{
+	test_atomic();
+	test_atomic64();
 
 #ifdef CONFIG_X86
 	pr_info("passed for %s platform %s CX8 and %s SSE\n",
@@ -166,4 +192,4 @@
 	return 0;
 }
 
-core_initcall(test_atomic64);
+core_initcall(test_atomics);
diff --git a/lib/devres.c b/lib/devres.c
index fbe2aac..f13a246 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -119,10 +119,9 @@
  * @dev: generic device to handle the resource for
  * @res: resource to be handled
  *
- * Checks that a resource is a valid memory region, requests the memory region
- * and ioremaps it either as cacheable or as non-cacheable memory depending on
- * the resource's flags. All operations are managed and will be undone on
- * driver detach.
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach.
  *
  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
  * on failure. Usage example:
@@ -153,11 +152,7 @@
 		return IOMEM_ERR_PTR(-EBUSY);
 	}
 
-	if (res->flags & IORESOURCE_CACHEABLE)
-		dest_ptr = devm_ioremap(dev, res->start, size);
-	else
-		dest_ptr = devm_ioremap_nocache(dev, res->start, size);
-
+	dest_ptr = devm_ioremap(dev, res->start, size);
 	if (!dest_ptr) {
 		dev_err(dev, "ioremap failed for resource %pR\n", res);
 		devm_release_mem_region(dev, res->start, size);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index daf0afb..116a166 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -160,6 +160,7 @@
 		pool->min_alloc_order = min_alloc_order;
 		pool->algo = gen_pool_first_fit;
 		pool->data = NULL;
+		pool->name = NULL;
 	}
 	return pool;
 }
@@ -252,8 +253,8 @@
 
 		kfree(chunk);
 	}
+	kfree_const(pool->name);
 	kfree(pool);
-	return;
 }
 EXPORT_SYMBOL(gen_pool_destroy);
 
@@ -570,54 +571,89 @@
 	gen_pool_destroy(*(struct gen_pool **)res);
 }
 
+static int devm_gen_pool_match(struct device *dev, void *res, void *data)
+{
+	struct gen_pool **p = res;
+
+	/* NULL data matches only a pool without an assigned name */
+	if (!data && !(*p)->name)
+		return 1;
+
+	if (!data || !(*p)->name)
+		return 0;
+
+	return !strcmp((*p)->name, data);
+}
+
+/**
+ * gen_pool_get - Obtain the gen_pool (if any) for a device
+ * @dev: device to retrieve the gen_pool from
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
+ *
+ * Returns the gen_pool for the device if one is present, or NULL.
+ */
+struct gen_pool *gen_pool_get(struct device *dev, const char *name)
+{
+	struct gen_pool **p;
+
+	p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
+			(void *)name);
+	if (!p)
+		return NULL;
+	return *p;
+}
+EXPORT_SYMBOL_GPL(gen_pool_get);
+
 /**
  * devm_gen_pool_create - managed gen_pool_create
  * @dev: device that provides the gen_pool
  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
- * @nid: node id of the node the pool structure should be allocated on, or -1
+ * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
  *
  * Create a new special memory pool that can be used to manage special purpose
  * memory not managed by the regular kmalloc/kfree interface. The pool will be
  * automatically destroyed by the device management code.
  */
 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
-		int nid)
+				      int nid, const char *name)
 {
 	struct gen_pool **ptr, *pool;
+	const char *pool_name = NULL;
+
+	/* Check that genpool to be created is uniquely addressed on device */
+	if (gen_pool_get(dev, name))
+		return ERR_PTR(-EINVAL);
+
+	if (name) {
+		pool_name = kstrdup_const(name, GFP_KERNEL);
+		if (!pool_name)
+			return ERR_PTR(-ENOMEM);
+	}
 
 	ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
 	if (!ptr)
-		return NULL;
+		goto free_pool_name;
 
 	pool = gen_pool_create(min_alloc_order, nid);
-	if (pool) {
-		*ptr = pool;
-		devres_add(dev, ptr);
-	} else {
-		devres_free(ptr);
-	}
+	if (!pool)
+		goto free_devres;
+
+	*ptr = pool;
+	pool->name = pool_name;
+	devres_add(dev, ptr);
 
 	return pool;
+
+free_devres:
+	devres_free(ptr);
+free_pool_name:
+	kfree_const(pool_name);
+
+	return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(devm_gen_pool_create);
 
-/**
- * gen_pool_get - Obtain the gen_pool (if any) for a device
- * @dev: device to retrieve the gen_pool from
- *
- * Returns the gen_pool for the device if one is present, or NULL.
- */
-struct gen_pool *gen_pool_get(struct device *dev)
-{
-	struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
-					NULL);
-
-	if (!p)
-		return NULL;
-	return *p;
-}
-EXPORT_SYMBOL_GPL(gen_pool_get);
-
 #ifdef CONFIG_OF
 /**
  * of_gen_pool_get - find a pool by phandle property
@@ -633,16 +669,30 @@
 	const char *propname, int index)
 {
 	struct platform_device *pdev;
-	struct device_node *np_pool;
+	struct device_node *np_pool, *parent;
+	const char *name = NULL;
+	struct gen_pool *pool = NULL;
 
 	np_pool = of_parse_phandle(np, propname, index);
 	if (!np_pool)
 		return NULL;
+
 	pdev = of_find_device_by_node(np_pool);
+	if (!pdev) {
+		/* Check if named gen_pool is created by parent node device */
+		parent = of_get_parent(np_pool);
+		pdev = of_find_device_by_node(parent);
+		of_node_put(parent);
+
+		of_property_read_string(np_pool, "label", &name);
+		if (!name)
+			name = np_pool->name;
+	}
+	if (pdev)
+		pool = gen_pool_get(&pdev->dev, name);
 	of_node_put(np_pool);
-	if (!pdev)
-		return NULL;
-	return gen_pool_get(&pdev->dev);
+
+	return pool;
 }
 EXPORT_SYMBOL_GPL(of_gen_pool_get);
 #endif /* CONFIG_OF */
diff --git a/lib/lockref.c b/lib/lockref.c
index 494994b..5a92189 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,14 +4,6 @@
 #if USE_CMPXCHG_LOCKREF
 
 /*
- * Allow weakly-ordered memory architectures to provide barrier-less
- * cmpxchg semantics for lockref updates.
- */
-#ifndef cmpxchg64_relaxed
-# define cmpxchg64_relaxed cmpxchg64
-#endif
-
-/*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
  */
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
new file mode 100644
index 0000000..88d3d32
--- /dev/null
+++ b/lib/nmi_backtrace.c
@@ -0,0 +1,162 @@
+/*
+ *  NMI backtrace support
+ *
+ * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
+ * with the following header:
+ *
+ *  HW NMI watchdog support
+ *
+ *  started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ *  Arch specific calls to support NMI watchdog
+ *
+ *  Bits copied from original nmi.c file
+ */
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kprobes.h>
+#include <linux/nmi.h>
+#include <linux/seq_buf.h>
+
+#ifdef arch_trigger_all_cpu_backtrace
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+static cpumask_t printtrace_mask;
+
+#define NMI_BUF_SIZE		4096
+
+struct nmi_seq_buf {
+	unsigned char		buffer[NMI_BUF_SIZE];
+	struct seq_buf		seq;
+};
+
+/* Safe printing in NMI context */
+static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
+{
+	const char *buf = s->buffer + start;
+
+	printk("%.*s", (end - start) + 1, buf);
+}
+
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+				   void (*raise)(cpumask_t *mask))
+{
+	struct nmi_seq_buf *s;
+	int i, cpu, this_cpu = get_cpu();
+
+	if (test_and_set_bit(0, &backtrace_flag)) {
+		/*
+		 * If there is already a trigger_all_cpu_backtrace() in progress
+		 * (backtrace_flag == 1), don't output double cpu dump infos.
+		 */
+		put_cpu();
+		return;
+	}
+
+	cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+	if (!include_self)
+		cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+
+	cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
+
+	/*
+	 * Set up per_cpu seq_buf buffers that the NMIs running on the other
+	 * CPUs will write to.
+	 */
+	for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
+		s = &per_cpu(nmi_print_seq, cpu);
+		seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
+	}
+
+	if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+		pr_info("Sending NMI to %s CPUs:\n",
+			(include_self ? "all" : "other"));
+		raise(to_cpumask(backtrace_mask));
+	}
+
+	/* Wait for up to 10 seconds for all CPUs to do the backtrace */
+	for (i = 0; i < 10 * 1000; i++) {
+		if (cpumask_empty(to_cpumask(backtrace_mask)))
+			break;
+		mdelay(1);
+		touch_softlockup_watchdog();
+	}
+
+	/*
+	 * Now that all the NMIs have triggered, we can dump out their
+	 * back traces safely to the console.
+	 */
+	for_each_cpu(cpu, &printtrace_mask) {
+		int len, last_i = 0;
+
+		s = &per_cpu(nmi_print_seq, cpu);
+		len = seq_buf_used(&s->seq);
+		if (!len)
+			continue;
+
+		/* Print line by line. */
+		for (i = 0; i < len; i++) {
+			if (s->buffer[i] == '\n') {
+				print_seq_line(s, last_i, i);
+				last_i = i + 1;
+			}
+		}
+		/* Check if there was a partial line. */
+		if (last_i < len) {
+			print_seq_line(s, last_i, len - 1);
+			pr_cont("\n");
+		}
+	}
+
+	clear_bit(0, &backtrace_flag);
+	smp_mb__after_atomic();
+	put_cpu();
+}
+
+/*
+ * It is not safe to call printk() directly from NMI handlers.
+ * It may be fine if the NMI detected a lock up and we have no choice
+ * but to do so, but doing a NMI on all other CPUs to get a back trace
+ * can be done with a sysrq-l. We don't want that to lock up, which
+ * can happen if the NMI interrupts a printk in progress.
+ *
+ * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
+ * the content into a per cpu seq_buf buffer. Then when the NMIs are
+ * all done, we can safely dump the contents of the seq_buf to a printk()
+ * from a non NMI context.
+ */
+static int nmi_vprintk(const char *fmt, va_list args)
+{
+	struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
+	unsigned int len = seq_buf_used(&s->seq);
+
+	seq_buf_vprintf(&s->seq, fmt, args);
+	return seq_buf_used(&s->seq) - len;
+}
+
+bool nmi_cpu_backtrace(struct pt_regs *regs)
+{
+	int cpu = smp_processor_id();
+
+	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+		printk_func_t printk_func_save = this_cpu_read(printk_func);
+
+		/* Replace printk to write into the NMI seq */
+		this_cpu_write(printk_func, nmi_vprintk);
+		pr_warn("NMI backtrace for cpu %d\n", cpu);
+		show_regs(regs);
+		this_cpu_write(printk_func, printk_func_save);
+
+		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+		return true;
+	}
+
+	return false;
+}
+NOKPROBE_SYMBOL(nmi_cpu_backtrace);
+#endif
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 5f5d24d..c10fba4 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -41,11 +41,8 @@
 		len = maxlen;
 	if (flags & IORESOURCE_IO)
 		return __pci_ioport_map(dev, start, len);
-	if (flags & IORESOURCE_MEM) {
-		if (flags & IORESOURCE_CACHEABLE)
-			return ioremap(start, len);
-		return ioremap_nocache(start, len);
-	}
+	if (flags & IORESOURCE_MEM)
+		return ioremap(start, len);
 	/* What? */
 	return NULL;
 }
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
index d9ad6ee..7076ef1 100644
--- a/lib/raid6/neon.c
+++ b/lib/raid6/neon.c
@@ -40,9 +40,20 @@
 					(unsigned long)bytes, ptrs);	\
 		kernel_neon_end();					\
 	}								\
+	static void raid6_neon ## _n ## _xor_syndrome(int disks,	\
+					int start, int stop, 		\
+					size_t bytes, void **ptrs)	\
+	{								\
+		void raid6_neon ## _n  ## _xor_syndrome_real(int,	\
+				int, int, unsigned long, void**);	\
+		kernel_neon_begin();					\
+		raid6_neon ## _n ## _xor_syndrome_real(disks,		\
+			start, stop, (unsigned long)bytes, ptrs);	\
+		kernel_neon_end();					\
+	}								\
 	struct raid6_calls const raid6_neonx ## _n = {			\
 		raid6_neon ## _n ## _gen_syndrome,			\
-		NULL,		/* XOR not yet implemented */		\
+		raid6_neon ## _n ## _xor_syndrome,			\
 		raid6_have_neon,					\
 		"neonx" #_n,						\
 		0							\
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index 1b9ed79..4fa51b7 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -3,6 +3,7 @@
  *   neon.uc - RAID-6 syndrome calculation using ARM NEON instructions
  *
  *   Copyright (C) 2012 Rob Herring
+ *   Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  *   Based on altivec.uc:
  *     Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
@@ -78,3 +79,48 @@
 		vst1q_u8(&q[d+NSIZE*$$], wq$$);
 	}
 }
+
+void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
+				    unsigned long bytes, void **ptrs)
+{
+	uint8_t **dptr = (uint8_t **)ptrs;
+	uint8_t *p, *q;
+	int d, z, z0;
+
+	register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+	const unative_t x1d = NBYTES(0x1d);
+
+	z0 = stop;		/* P/Q right side optimization */
+	p = dptr[disks-2];	/* XOR parity */
+	q = dptr[disks-1];	/* RS syndrome */
+
+	for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+		wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+		wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$);
+
+		/* P/Q data pages */
+		for ( z = z0-1 ; z >= start ; z-- ) {
+			wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+			wp$$ = veorq_u8(wp$$, wd$$);
+			w2$$ = MASK(wq$$);
+			w1$$ = SHLBYTE(wq$$);
+
+			w2$$ = vandq_u8(w2$$, x1d);
+			w1$$ = veorq_u8(w1$$, w2$$);
+			wq$$ = veorq_u8(w1$$, wd$$);
+		}
+		/* P/Q left side optimization */
+		for ( z = start-1 ; z >= 0 ; z-- ) {
+			w2$$ = MASK(wq$$);
+			w1$$ = SHLBYTE(wq$$);
+
+			w2$$ = vandq_u8(w2$$, x1d);
+			wq$$ = veorq_u8(w1$$, w2$$);
+		}
+		w1$$ = vld1q_u8(&q[d+NSIZE*$$]);
+		wq$$ = veorq_u8(wq$$, w1$$);
+
+		vst1q_u8(&p[d+NSIZE*$$], wp$$);
+		vst1q_u8(&q[d+NSIZE*$$], wq$$);
+	}
+}
diff --git a/lib/show_mem.c b/lib/show_mem.c
index adc98e18..1feed6a 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -38,11 +38,9 @@
 
 	printk("%lu pages RAM\n", total);
 	printk("%lu pages HighMem/MovableOnly\n", highmem);
-#ifdef CONFIG_CMA
-	printk("%lu pages reserved\n", (reserved - totalcma_pages));
-	printk("%lu pages cma reserved\n", totalcma_pages);
-#else
 	printk("%lu pages reserved\n", reserved);
+#ifdef CONFIG_CMA
+	printk("%lu pages cma reserved\n", totalcma_pages);
 #endif
 #ifdef CONFIG_QUICKLIST
 	printk("%lu pages in pagetable cache\n",
diff --git a/lib/test_static_key_base.c b/lib/test_static_key_base.c
new file mode 100644
index 0000000..729447a
--- /dev/null
+++ b/lib/test_static_key_base.c
@@ -0,0 +1,68 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ *      Jason Baron       <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_old_true_key);
+struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_inv_old_true_key);
+struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_old_false_key);
+struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_inv_old_false_key);
+
+/* new keys */
+DEFINE_STATIC_KEY_TRUE(base_true_key);
+EXPORT_SYMBOL_GPL(base_true_key);
+DEFINE_STATIC_KEY_TRUE(base_inv_true_key);
+EXPORT_SYMBOL_GPL(base_inv_true_key);
+DEFINE_STATIC_KEY_FALSE(base_false_key);
+EXPORT_SYMBOL_GPL(base_false_key);
+DEFINE_STATIC_KEY_FALSE(base_inv_false_key);
+EXPORT_SYMBOL_GPL(base_inv_false_key);
+
+static void invert_key(struct static_key *key)
+{
+	if (static_key_enabled(key))
+		static_key_disable(key);
+	else
+		static_key_enable(key);
+}
+
+static int __init test_static_key_base_init(void)
+{
+	invert_key(&base_inv_old_true_key);
+	invert_key(&base_inv_old_false_key);
+	invert_key(&base_inv_true_key.key);
+	invert_key(&base_inv_false_key.key);
+
+	return 0;
+}
+
+static void __exit test_static_key_base_exit(void)
+{
+}
+
+module_init(test_static_key_base_init);
+module_exit(test_static_key_base_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c
new file mode 100644
index 0000000..c61b299
--- /dev/null
+++ b/lib/test_static_keys.c
@@ -0,0 +1,225 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ *      Jason Baron       <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key old_true_key	= STATIC_KEY_INIT_TRUE;
+struct static_key old_false_key	= STATIC_KEY_INIT_FALSE;
+
+/* new api */
+DEFINE_STATIC_KEY_TRUE(true_key);
+DEFINE_STATIC_KEY_FALSE(false_key);
+
+/* external */
+extern struct static_key base_old_true_key;
+extern struct static_key base_inv_old_true_key;
+extern struct static_key base_old_false_key;
+extern struct static_key base_inv_old_false_key;
+
+/* new api */
+extern struct static_key_true base_true_key;
+extern struct static_key_true base_inv_true_key;
+extern struct static_key_false base_false_key;
+extern struct static_key_false base_inv_false_key;
+
+
+struct test_key {
+	bool			init_state;
+	struct static_key	*key;
+	bool			(*test_key)(void);
+};
+
+#define test_key_func(key, branch) \
+	({bool func(void) { return branch(key); } func;	})
+
+static void invert_key(struct static_key *key)
+{
+	if (static_key_enabled(key))
+		static_key_disable(key);
+	else
+		static_key_enable(key);
+}
+
+static void invert_keys(struct test_key *keys, int size)
+{
+	struct static_key *previous = NULL;
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (previous != keys[i].key) {
+			invert_key(keys[i].key);
+			previous = keys[i].key;
+		}
+	}
+}
+
+static int verify_keys(struct test_key *keys, int size, bool invert)
+{
+	int i;
+	bool ret, init;
+
+	for (i = 0; i < size; i++) {
+		ret = static_key_enabled(keys[i].key);
+		init = keys[i].init_state;
+		if (ret != (invert ? !init : init))
+			return -EINVAL;
+		ret = keys[i].test_key();
+		if (static_key_enabled(keys[i].key)) {
+			if (!ret)
+				return -EINVAL;
+		} else {
+			if (ret)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int __init test_static_key_init(void)
+{
+	int ret;
+	int size;
+
+	struct test_key static_key_tests[] = {
+		/* internal keys - old keys */
+		{
+			.init_state	= true,
+			.key		= &old_true_key,
+			.test_key	= test_key_func(&old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &old_false_key,
+			.test_key	= test_key_func(&old_false_key, static_key_false),
+		},
+		/* internal keys - new keys */
+		{
+			.init_state	= true,
+			.key		= &true_key.key,
+			.test_key	= test_key_func(&true_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &true_key.key,
+			.test_key	= test_key_func(&true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &false_key.key,
+			.test_key	= test_key_func(&false_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &false_key.key,
+			.test_key	= test_key_func(&false_key, static_branch_unlikely),
+		},
+		/* external keys - old keys */
+		{
+			.init_state	= true,
+			.key		= &base_old_true_key,
+			.test_key	= test_key_func(&base_old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_old_true_key,
+			.test_key	= test_key_func(&base_inv_old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_old_false_key,
+			.test_key	= test_key_func(&base_old_false_key, static_key_false),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_old_false_key,
+			.test_key	= test_key_func(&base_inv_old_false_key, static_key_false),
+		},
+		/* external keys - new keys */
+		{
+			.init_state	= true,
+			.key		= &base_true_key.key,
+			.test_key	= test_key_func(&base_true_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_true_key.key,
+			.test_key	= test_key_func(&base_true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_true_key.key,
+			.test_key	= test_key_func(&base_inv_true_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_true_key.key,
+			.test_key	= test_key_func(&base_inv_true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_false_key.key,
+			.test_key	= test_key_func(&base_false_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_false_key.key,
+			.test_key	= test_key_func(&base_false_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_false_key.key,
+			.test_key	= test_key_func(&base_inv_false_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_false_key.key,
+			.test_key	= test_key_func(&base_inv_false_key, static_branch_unlikely),
+		},
+	};
+
+	size = ARRAY_SIZE(static_key_tests);
+
+	ret = verify_keys(static_key_tests, size, false);
+	if (ret)
+		goto out;
+
+	invert_keys(static_key_tests, size);
+	ret = verify_keys(static_key_tests, size, true);
+	if (ret)
+		goto out;
+
+	invert_keys(static_key_tests, size);
+	ret = verify_keys(static_key_tests, size, false);
+	if (ret)
+		goto out;
+	return 0;
+out:
+	return ret;
+}
+
+static void __exit test_static_key_exit(void)
+{
+}
+
+module_init(test_static_key_init);
+module_exit(test_static_key_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/mm/Kconfig b/mm/Kconfig
index e79de2b..3a4070f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -299,15 +299,9 @@
 # On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often
 # have more than 4GB of memory, but we don't currently use the IOTLB to present
 # a 32-bit address to OHCI.  So we need to use a bounce pool instead.
-#
-# We also use the bounce pool to provide stable page writes for jbd.  jbd
-# initiates buffer writeback without locking the page or setting PG_writeback,
-# and fixing that behavior (a second time; jbd2 doesn't have this problem) is
-# a major rework effort.  Instead, use the bounce buffer to snapshot pages
-# (until jbd goes away).  The only jbd user is ext3.
 config NEED_BOUNCE_POOL
 	bool
-	default y if (TILE && USB_OHCI_HCD) || (BLK_DEV_INTEGRITY && JBD)
+	default y if TILE && USB_OHCI_HCD
 
 config NR_QUICK
 	int
@@ -654,3 +648,20 @@
 	  when kswapd starts. This has a potential performance impact on
 	  processes running early in the lifetime of the systemm until kswapd
 	  finishes the initialisation.
+
+config ZONE_DEVICE
+	bool "Device memory (pmem, etc...) hotplug support" if EXPERT
+	default !ZONE_DMA
+	depends on !ZONE_DMA
+	depends on MEMORY_HOTPLUG
+	depends on MEMORY_HOTREMOVE
+	depends on X86_64 #arch_add_memory() comprehends device memory
+
+	help
+	  Device memory hotplug support allows for establishing pmem,
+	  or other device driver discovered memory regions, in the
+	  memmap. This allows pfn_to_page() lookups of otherwise
+	  "device-physical" addresses which is needed for using a DAX
+	  mapping in an O_DIRECT operation, among other things.
+
+	  If FS_DAX is enabled, then say Y.
diff --git a/mm/Makefile b/mm/Makefile
index 98c4eae..b424d5e 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -78,3 +78,4 @@
 obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
 obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
 obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
+obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index dac5bf5..ee8d7fd 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -55,13 +55,13 @@
 
 	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
 	spin_lock(&wb->list_lock);
-	list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
+	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
 		nr_dirty++;
-	list_for_each_entry(inode, &wb->b_io, i_wb_list)
+	list_for_each_entry(inode, &wb->b_io, i_io_list)
 		nr_io++;
-	list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
+	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
 		nr_more_io++;
-	list_for_each_entry(inode, &wb->b_dirty_time, i_wb_list)
+	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
 		if (inode->i_state & I_DIRTY_TIME)
 			nr_dirty_time++;
 	spin_unlock(&wb->list_lock);
diff --git a/mm/bootmem.c b/mm/bootmem.c
index a23dd19..3b63807 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -236,6 +236,7 @@
 	count += pages;
 	while (pages--)
 		__free_pages_bootmem(page++, cur++, 0);
+	bdata->node_bootmem_map = NULL;
 
 	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
 
@@ -294,6 +295,9 @@
 		sidx + bdata->node_min_pfn,
 		eidx + bdata->node_min_pfn);
 
+	if (WARN_ON(bdata->node_bootmem_map == NULL))
+		return;
+
 	if (bdata->hint_idx > sidx)
 		bdata->hint_idx = sidx;
 
@@ -314,6 +318,9 @@
 		eidx + bdata->node_min_pfn,
 		flags);
 
+	if (WARN_ON(bdata->node_bootmem_map == NULL))
+		return 0;
+
 	for (idx = sidx; idx < eidx; idx++)
 		if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
 			if (exclusive) {
diff --git a/mm/compaction.c b/mm/compaction.c
index 018f08d..c5c627a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -207,6 +207,13 @@
 	return !get_pageblock_skip(page);
 }
 
+static void reset_cached_positions(struct zone *zone)
+{
+	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
+	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
+	zone->compact_cached_free_pfn = zone_end_pfn(zone);
+}
+
 /*
  * This function is called to clear all cached information on pageblocks that
  * should be skipped for page isolation when the migrate and free page scanner
@@ -218,9 +225,6 @@
 	unsigned long end_pfn = zone_end_pfn(zone);
 	unsigned long pfn;
 
-	zone->compact_cached_migrate_pfn[0] = start_pfn;
-	zone->compact_cached_migrate_pfn[1] = start_pfn;
-	zone->compact_cached_free_pfn = end_pfn;
 	zone->compact_blockskip_flush = false;
 
 	/* Walk the zone and mark every pageblock as suitable for isolation */
@@ -238,6 +242,8 @@
 
 		clear_pageblock_skip(page);
 	}
+
+	reset_cached_positions(zone);
 }
 
 void reset_isolation_suitable(pg_data_t *pgdat)
@@ -431,6 +437,24 @@
 
 		if (!valid_page)
 			valid_page = page;
+
+		/*
+		 * For compound pages such as THP and hugetlbfs, we can save
+		 * potentially a lot of iterations if we skip them at once.
+		 * The check is racy, but we can consider only valid values
+		 * and the only danger is skipping too much.
+		 */
+		if (PageCompound(page)) {
+			unsigned int comp_order = compound_order(page);
+
+			if (likely(comp_order < MAX_ORDER)) {
+				blockpfn += (1UL << comp_order) - 1;
+				cursor += (1UL << comp_order) - 1;
+			}
+
+			goto isolate_fail;
+		}
+
 		if (!PageBuddy(page))
 			goto isolate_fail;
 
@@ -490,6 +514,13 @@
 
 	}
 
+	/*
+	 * There is a tiny chance that we have read bogus compound_order(),
+	 * so be careful to not go outside of the pageblock.
+	 */
+	if (unlikely(blockpfn > end_pfn))
+		blockpfn = end_pfn;
+
 	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
 					nr_scanned, total_isolated);
 
@@ -674,6 +705,8 @@
 
 	/* Time to isolate some pages for migration */
 	for (; low_pfn < end_pfn; low_pfn++) {
+		bool is_lru;
+
 		/*
 		 * Periodically drop the lock (if held) regardless of its
 		 * contention, to give chance to IRQs. Abort async compaction
@@ -717,36 +750,35 @@
 		 * It's possible to migrate LRU pages and balloon pages
 		 * Skip any other type of page
 		 */
-		if (!PageLRU(page)) {
+		is_lru = PageLRU(page);
+		if (!is_lru) {
 			if (unlikely(balloon_page_movable(page))) {
 				if (balloon_page_isolate(page)) {
 					/* Successfully isolated */
 					goto isolate_success;
 				}
 			}
-			continue;
 		}
 
 		/*
-		 * PageLRU is set. lru_lock normally excludes isolation
-		 * splitting and collapsing (collapsing has already happened
-		 * if PageLRU is set) but the lock is not necessarily taken
-		 * here and it is wasteful to take it just to check transhuge.
-		 * Check TransHuge without lock and skip the whole pageblock if
-		 * it's either a transhuge or hugetlbfs page, as calling
-		 * compound_order() without preventing THP from splitting the
-		 * page underneath us may return surprising results.
+		 * Regardless of being on LRU, compound pages such as THP and
+		 * hugetlbfs are not to be compacted. We can potentially save
+		 * a lot of iterations if we skip them at once. The check is
+		 * racy, but we can consider only valid values and the only
+		 * danger is skipping too much.
 		 */
-		if (PageTransHuge(page)) {
-			if (!locked)
-				low_pfn = ALIGN(low_pfn + 1,
-						pageblock_nr_pages) - 1;
-			else
-				low_pfn += (1 << compound_order(page)) - 1;
+		if (PageCompound(page)) {
+			unsigned int comp_order = compound_order(page);
+
+			if (likely(comp_order < MAX_ORDER))
+				low_pfn += (1UL << comp_order) - 1;
 
 			continue;
 		}
 
+		if (!is_lru)
+			continue;
+
 		/*
 		 * Migration will fail if an anonymous page is pinned in memory,
 		 * so avoid taking lru_lock and isolating it unnecessarily in an
@@ -763,11 +795,17 @@
 			if (!locked)
 				break;
 
-			/* Recheck PageLRU and PageTransHuge under lock */
+			/* Recheck PageLRU and PageCompound under lock */
 			if (!PageLRU(page))
 				continue;
-			if (PageTransHuge(page)) {
-				low_pfn += (1 << compound_order(page)) - 1;
+
+			/*
+			 * Page become compound since the non-locked check,
+			 * and it's on LRU. It can only be a THP so the order
+			 * is safe to read and it's 0 for tail pages.
+			 */
+			if (unlikely(PageCompound(page))) {
+				low_pfn += (1UL << compound_order(page)) - 1;
 				continue;
 			}
 		}
@@ -778,7 +816,7 @@
 		if (__isolate_lru_page(page, isolate_mode) != 0)
 			continue;
 
-		VM_BUG_ON_PAGE(PageTransCompound(page), page);
+		VM_BUG_ON_PAGE(PageCompound(page), page);
 
 		/* Successfully isolated */
 		del_page_from_lru_list(page, lruvec, page_lru(page));
@@ -898,6 +936,16 @@
 }
 
 /*
+ * Test whether the free scanner has reached the same or lower pageblock than
+ * the migration scanner, and compaction should thus terminate.
+ */
+static inline bool compact_scanners_met(struct compact_control *cc)
+{
+	return (cc->free_pfn >> pageblock_order)
+		<= (cc->migrate_pfn >> pageblock_order);
+}
+
+/*
  * Based on information in the current compact_control, find blocks
  * suitable for isolating free pages from and then isolate them.
  */
@@ -933,8 +981,7 @@
 	 * pages on cc->migratepages. We stop searching if the migrate
 	 * and free page scanners meet or enough free pages are isolated.
 	 */
-	for (; block_start_pfn >= low_pfn &&
-			cc->nr_migratepages > cc->nr_freepages;
+	for (; block_start_pfn >= low_pfn;
 				block_end_pfn = block_start_pfn,
 				block_start_pfn -= pageblock_nr_pages,
 				isolate_start_pfn = block_start_pfn) {
@@ -966,6 +1013,8 @@
 					block_end_pfn, freelist, false);
 
 		/*
+		 * If we isolated enough freepages, or aborted due to async
+		 * compaction being contended, terminate the loop.
 		 * Remember where the free scanner should restart next time,
 		 * which is where isolate_freepages_block() left off.
 		 * But if it scanned the whole pageblock, isolate_start_pfn
@@ -974,27 +1023,31 @@
 		 * In that case we will however want to restart at the start
 		 * of the previous pageblock.
 		 */
-		cc->free_pfn = (isolate_start_pfn < block_end_pfn) ?
-				isolate_start_pfn :
-				block_start_pfn - pageblock_nr_pages;
-
-		/*
-		 * isolate_freepages_block() might have aborted due to async
-		 * compaction being contended
-		 */
-		if (cc->contended)
+		if ((cc->nr_freepages >= cc->nr_migratepages)
+							|| cc->contended) {
+			if (isolate_start_pfn >= block_end_pfn)
+				isolate_start_pfn =
+					block_start_pfn - pageblock_nr_pages;
 			break;
+		} else {
+			/*
+			 * isolate_freepages_block() should not terminate
+			 * prematurely unless contended, or isolated enough
+			 */
+			VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+		}
 	}
 
 	/* split_free_page does not map the pages */
 	map_pages(freelist);
 
 	/*
-	 * If we crossed the migrate scanner, we want to keep it that way
-	 * so that compact_finished() may detect this
+	 * Record where the free scanner will restart next time. Either we
+	 * broke from the loop and set isolate_start_pfn based on the last
+	 * call to isolate_freepages_block(), or we met the migration scanner
+	 * and the loop terminated due to isolate_start_pfn < low_pfn
 	 */
-	if (block_start_pfn < low_pfn)
-		cc->free_pfn = cc->migrate_pfn;
+	cc->free_pfn = isolate_start_pfn;
 }
 
 /*
@@ -1062,6 +1115,7 @@
 					struct compact_control *cc)
 {
 	unsigned long low_pfn, end_pfn;
+	unsigned long isolate_start_pfn;
 	struct page *page;
 	const isolate_mode_t isolate_mode =
 		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
@@ -1110,6 +1164,7 @@
 			continue;
 
 		/* Perform the isolation */
+		isolate_start_pfn = low_pfn;
 		low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
 								isolate_mode);
 
@@ -1119,6 +1174,15 @@
 		}
 
 		/*
+		 * Record where we could have freed pages by migration and not
+		 * yet flushed them to buddy allocator.
+		 * - this is the lowest page that could have been isolated and
+		 * then freed by migration.
+		 */
+		if (cc->nr_migratepages && !cc->last_migrated_pfn)
+			cc->last_migrated_pfn = isolate_start_pfn;
+
+		/*
 		 * Either we isolated something and proceed with migration. Or
 		 * we failed and compact_zone should decide if we should
 		 * continue or not.
@@ -1127,12 +1191,8 @@
 	}
 
 	acct_isolated(zone, cc);
-	/*
-	 * Record where migration scanner will be restarted. If we end up in
-	 * the same pageblock as the free scanner, make the scanners fully
-	 * meet so that compact_finished() terminates compaction.
-	 */
-	cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
+	/* Record where migration scanner will be restarted. */
+	cc->migrate_pfn = low_pfn;
 
 	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
 }
@@ -1147,11 +1207,9 @@
 		return COMPACT_PARTIAL;
 
 	/* Compaction run completes if the migrate and free scanner meet */
-	if (cc->free_pfn <= cc->migrate_pfn) {
+	if (compact_scanners_met(cc)) {
 		/* Let the next compaction start anew. */
-		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
-		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
-		zone->compact_cached_free_pfn = zone_end_pfn(zone);
+		reset_cached_positions(zone);
 
 		/*
 		 * Mark that the PG_migrate_skip information should be cleared
@@ -1295,7 +1353,6 @@
 	unsigned long end_pfn = zone_end_pfn(zone);
 	const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
 	const bool sync = cc->mode != MIGRATE_ASYNC;
-	unsigned long last_migrated_pfn = 0;
 
 	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
 							cc->classzone_idx);
@@ -1333,6 +1390,7 @@
 		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
 		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
 	}
+	cc->last_migrated_pfn = 0;
 
 	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
 				cc->free_pfn, end_pfn, sync);
@@ -1342,7 +1400,6 @@
 	while ((ret = compact_finished(zone, cc, migratetype)) ==
 						COMPACT_CONTINUE) {
 		int err;
-		unsigned long isolate_start_pfn = cc->migrate_pfn;
 
 		switch (isolate_migratepages(zone, cc)) {
 		case ISOLATE_ABORT:
@@ -1376,22 +1433,12 @@
 			 * migrate_pages() may return -ENOMEM when scanners meet
 			 * and we want compact_finished() to detect it
 			 */
-			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
+			if (err == -ENOMEM && !compact_scanners_met(cc)) {
 				ret = COMPACT_PARTIAL;
 				goto out;
 			}
 		}
 
-		/*
-		 * Record where we could have freed pages by migration and not
-		 * yet flushed them to buddy allocator. We use the pfn that
-		 * isolate_migratepages() started from in this loop iteration
-		 * - this is the lowest page that could have been isolated and
-		 * then freed by migration.
-		 */
-		if (!last_migrated_pfn)
-			last_migrated_pfn = isolate_start_pfn;
-
 check_drain:
 		/*
 		 * Has the migration scanner moved away from the previous
@@ -1400,18 +1447,18 @@
 		 * compact_finished() can detect immediately if allocation
 		 * would succeed.
 		 */
-		if (cc->order > 0 && last_migrated_pfn) {
+		if (cc->order > 0 && cc->last_migrated_pfn) {
 			int cpu;
 			unsigned long current_block_start =
 				cc->migrate_pfn & ~((1UL << cc->order) - 1);
 
-			if (last_migrated_pfn < current_block_start) {
+			if (cc->last_migrated_pfn < current_block_start) {
 				cpu = get_cpu();
 				lru_add_drain_cpu(cpu);
 				drain_local_pages(zone);
 				put_cpu();
 				/* No more flushing until we migrate again */
-				last_migrated_pfn = 0;
+				cc->last_migrated_pfn = 0;
 			}
 		}
 
diff --git a/mm/dmapool.c b/mm/dmapool.c
index fd5fe43..71a8998 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -242,7 +242,7 @@
 	return page;
 }
 
-static inline int is_page_busy(struct dma_page *page)
+static inline bool is_page_busy(struct dma_page *page)
 {
 	return page->in_use != 0;
 }
@@ -271,6 +271,9 @@
 {
 	bool empty = false;
 
+	if (unlikely(!pool))
+		return;
+
 	mutex_lock(&pools_reg_lock);
 	mutex_lock(&pools_lock);
 	list_del(&pool->pools);
@@ -334,7 +337,7 @@
 	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
 	spin_unlock_irqrestore(&pool->lock, flags);
 
-	page = pool_alloc_page(pool, mem_flags);
+	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
 	if (!page)
 		return NULL;
 
@@ -372,9 +375,14 @@
 			break;
 		}
 	}
-	memset(retval, POOL_POISON_ALLOCATED, pool->size);
+	if (!(mem_flags & __GFP_ZERO))
+		memset(retval, POOL_POISON_ALLOCATED, pool->size);
 #endif
 	spin_unlock_irqrestore(&pool->lock, flags);
+
+	if (mem_flags & __GFP_ZERO)
+		memset(retval, 0, pool->size);
+
 	return retval;
 }
 EXPORT_SYMBOL(dma_pool_alloc);
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index e10ccd2..23f744d 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -217,6 +217,35 @@
 	return (__force void *)__early_ioremap(phys_addr, size,
 					       FIXMAP_PAGE_NORMAL);
 }
+#ifdef FIXMAP_PAGE_RO
+void __init *
+early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+{
+	return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_RO);
+}
+#endif
+
+#define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
+
+void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
+{
+	unsigned long slop, clen;
+	char *p;
+
+	while (size) {
+		slop = src & ~PAGE_MASK;
+		clen = size;
+		if (clen > MAX_MAP_CHUNK - slop)
+			clen = MAX_MAP_CHUNK - slop;
+		p = early_memremap(src & PAGE_MASK, clen + slop);
+		memcpy(dest, p + slop, clen);
+		early_memunmap(p, clen + slop);
+		dest += clen;
+		src += clen;
+		size -= clen;
+	}
+}
+
 #else /* CONFIG_MMU */
 
 void __init __iomem *
@@ -231,6 +260,11 @@
 {
 	return (void *)phys_addr;
 }
+void __init *
+early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+{
+	return (void *)phys_addr;
+}
 
 void __init early_iounmap(void __iomem *addr, unsigned long size)
 {
diff --git a/mm/filemap.c b/mm/filemap.c
index 1283fc8..72940fb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -674,7 +674,7 @@
 		do {
 			cpuset_mems_cookie = read_mems_allowed_begin();
 			n = cpuset_mem_spread_node();
-			page = alloc_pages_exact_node(n, gfp, 0);
+			page = __alloc_pages_node(n, gfp, 0);
 		} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
 
 		return page;
@@ -2473,21 +2473,6 @@
 						iov_iter_count(i));
 
 again:
-		/*
-		 * Bring in the user page that we will copy from _first_.
-		 * Otherwise there's a nasty deadlock on copying from the
-		 * same page as we're writing to, without it being marked
-		 * up-to-date.
-		 *
-		 * Not only is this an optimisation, but it is also required
-		 * to check that the address is actually valid, when atomic
-		 * usercopies are used, below.
-		 */
-		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
-			status = -EFAULT;
-			break;
-		}
-
 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
 						&page, &fsdata);
 		if (unlikely(status < 0))
@@ -2495,8 +2480,17 @@
 
 		if (mapping_writably_mapped(mapping))
 			flush_dcache_page(page);
-
+		/*
+		 * 'page' is now locked.  If we are trying to copy from a
+		 * mapping of 'page' in userspace, the copy might fault and
+		 * would need PageUptodate() to complete.  But, page can not be
+		 * made Uptodate without acquiring the page lock, which we hold.
+		 * Deadlock.  Avoid with pagefault_disable().  Fix up below with
+		 * iov_iter_fault_in_readable().
+		 */
+		pagefault_disable();
 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+		pagefault_enable();
 		flush_dcache_page(page);
 
 		status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2519,6 +2513,14 @@
 			 */
 			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
 						iov_iter_single_seg_count(i));
+			/*
+			 * This is the fallback to recover if the copy from
+			 * userspace above faults.
+			 */
+			if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+				status = -EFAULT;
+				break;
+			}
 			goto again;
 		}
 		pos += copied;
diff --git a/mm/gup.c b/mm/gup.c
index 6297f6b..a798293 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -12,7 +12,9 @@
 #include <linux/sched.h>
 #include <linux/rwsem.h>
 #include <linux/hugetlb.h>
+
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 
 #include "internal.h"
 
@@ -32,6 +34,30 @@
 	return NULL;
 }
 
+static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
+		pte_t *pte, unsigned int flags)
+{
+	/* No page to get reference */
+	if (flags & FOLL_GET)
+		return -EFAULT;
+
+	if (flags & FOLL_TOUCH) {
+		pte_t entry = *pte;
+
+		if (flags & FOLL_WRITE)
+			entry = pte_mkdirty(entry);
+		entry = pte_mkyoung(entry);
+
+		if (!pte_same(*pte, entry)) {
+			set_pte_at(vma->vm_mm, address, pte, entry);
+			update_mmu_cache(vma, address, pte);
+		}
+	}
+
+	/* Proper page table entry exists, but no corresponding struct page */
+	return -EEXIST;
+}
+
 static struct page *follow_page_pte(struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmd, unsigned int flags)
 {
@@ -73,10 +99,21 @@
 
 	page = vm_normal_page(vma, address, pte);
 	if (unlikely(!page)) {
-		if ((flags & FOLL_DUMP) ||
-		    !is_zero_pfn(pte_pfn(pte)))
-			goto bad_page;
-		page = pte_page(pte);
+		if (flags & FOLL_DUMP) {
+			/* Avoid special (like zero) pages in core dumps */
+			page = ERR_PTR(-EFAULT);
+			goto out;
+		}
+
+		if (is_zero_pfn(pte_pfn(pte))) {
+			page = pte_page(pte);
+		} else {
+			int ret;
+
+			ret = follow_pfn_pte(vma, address, ptep, flags);
+			page = ERR_PTR(ret);
+			goto out;
+		}
 	}
 
 	if (flags & FOLL_GET)
@@ -114,12 +151,9 @@
 			unlock_page(page);
 		}
 	}
+out:
 	pte_unmap_unlock(ptep, ptl);
 	return page;
-bad_page:
-	pte_unmap_unlock(ptep, ptl);
-	return ERR_PTR(-EFAULT);
-
 no_page:
 	pte_unmap_unlock(ptep, ptl);
 	if (!pte_none(pte))
@@ -489,9 +523,15 @@
 				goto next_page;
 			}
 			BUG();
-		}
-		if (IS_ERR(page))
+		} else if (PTR_ERR(page) == -EEXIST) {
+			/*
+			 * Proper page table entry exists, but no corresponding
+			 * struct page.
+			 */
+			goto next_page;
+		} else if (IS_ERR(page)) {
 			return i ? i : PTR_ERR(page);
+		}
 		if (pages) {
 			pages[i] = page;
 			flush_anon_page(vma, page, start);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 097c7a4..b16279c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -16,6 +16,7 @@
 #include <linux/swap.h>
 #include <linux/shrinker.h>
 #include <linux/mm_inline.h>
+#include <linux/dax.h>
 #include <linux/kthread.h>
 #include <linux/khugepaged.h>
 #include <linux/freezer.h>
@@ -23,6 +24,7 @@
 #include <linux/pagemap.h>
 #include <linux/migrate.h>
 #include <linux/hashtable.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -104,7 +106,7 @@
 };
 
 
-static int set_recommended_min_free_kbytes(void)
+static void set_recommended_min_free_kbytes(void)
 {
 	struct zone *zone;
 	int nr_zones = 0;
@@ -139,7 +141,6 @@
 		min_free_kbytes = recommended_min;
 	}
 	setup_per_zone_wmarks();
-	return 0;
 }
 
 static int start_stop_khugepaged(void)
@@ -171,12 +172,7 @@
 static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
 
-static inline bool is_huge_zero_pmd(pmd_t pmd)
-{
-	return is_huge_zero_page(pmd_page(pmd));
-}
-
-static struct page *get_huge_zero_page(void)
+struct page *get_huge_zero_page(void)
 {
 	struct page *zero_page;
 retry:
@@ -716,21 +712,27 @@
 
 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 					struct vm_area_struct *vma,
-					unsigned long haddr, pmd_t *pmd,
-					struct page *page, gfp_t gfp)
+					unsigned long address, pmd_t *pmd,
+					struct page *page, gfp_t gfp,
+					unsigned int flags)
 {
 	struct mem_cgroup *memcg;
 	pgtable_t pgtable;
 	spinlock_t *ptl;
+	unsigned long haddr = address & HPAGE_PMD_MASK;
 
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-	if (mem_cgroup_try_charge(page, mm, gfp, &memcg))
-		return VM_FAULT_OOM;
+	if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) {
+		put_page(page);
+		count_vm_event(THP_FAULT_FALLBACK);
+		return VM_FAULT_FALLBACK;
+	}
 
 	pgtable = pte_alloc_one(mm, haddr);
 	if (unlikely(!pgtable)) {
 		mem_cgroup_cancel_charge(page, memcg);
+		put_page(page);
 		return VM_FAULT_OOM;
 	}
 
@@ -750,6 +752,21 @@
 		pte_free(mm, pgtable);
 	} else {
 		pmd_t entry;
+
+		/* Deliver the page fault to userland */
+		if (userfaultfd_missing(vma)) {
+			int ret;
+
+			spin_unlock(ptl);
+			mem_cgroup_cancel_charge(page, memcg);
+			put_page(page);
+			pte_free(mm, pgtable);
+			ret = handle_userfault(vma, address, flags,
+					       VM_UFFD_MISSING);
+			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
+			return ret;
+		}
+
 		entry = mk_huge_pmd(page, vma->vm_page_prot);
 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 		page_add_new_anon_rmap(page, vma, haddr);
@@ -760,6 +777,7 @@
 		add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
 		atomic_long_inc(&mm->nr_ptes);
 		spin_unlock(ptl);
+		count_vm_event(THP_FAULT_ALLOC);
 	}
 
 	return 0;
@@ -806,6 +824,7 @@
 		pgtable_t pgtable;
 		struct page *zero_page;
 		bool set;
+		int ret;
 		pgtable = pte_alloc_one(mm, haddr);
 		if (unlikely(!pgtable))
 			return VM_FAULT_OOM;
@@ -816,14 +835,28 @@
 			return VM_FAULT_FALLBACK;
 		}
 		ptl = pmd_lock(mm, pmd);
-		set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
-				zero_page);
-		spin_unlock(ptl);
+		ret = 0;
+		set = false;
+		if (pmd_none(*pmd)) {
+			if (userfaultfd_missing(vma)) {
+				spin_unlock(ptl);
+				ret = handle_userfault(vma, address, flags,
+						       VM_UFFD_MISSING);
+				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
+			} else {
+				set_huge_zero_page(pgtable, mm, vma,
+						   haddr, pmd,
+						   zero_page);
+				spin_unlock(ptl);
+				set = true;
+			}
+		} else
+			spin_unlock(ptl);
 		if (!set) {
 			pte_free(mm, pgtable);
 			put_huge_zero_page();
 		}
-		return 0;
+		return ret;
 	}
 	gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
@@ -831,14 +864,51 @@
 		count_vm_event(THP_FAULT_FALLBACK);
 		return VM_FAULT_FALLBACK;
 	}
-	if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) {
-		put_page(page);
-		count_vm_event(THP_FAULT_FALLBACK);
-		return VM_FAULT_FALLBACK;
-	}
+	return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
+					    flags);
+}
 
-	count_vm_event(THP_FAULT_ALLOC);
-	return 0;
+static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+		pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	pmd_t entry;
+	spinlock_t *ptl;
+
+	ptl = pmd_lock(mm, pmd);
+	if (pmd_none(*pmd)) {
+		entry = pmd_mkhuge(pfn_pmd(pfn, prot));
+		if (write) {
+			entry = pmd_mkyoung(pmd_mkdirty(entry));
+			entry = maybe_pmd_mkwrite(entry, vma);
+		}
+		set_pmd_at(mm, addr, pmd, entry);
+		update_mmu_cache_pmd(vma, addr, pmd);
+	}
+	spin_unlock(ptl);
+}
+
+int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+			pmd_t *pmd, unsigned long pfn, bool write)
+{
+	pgprot_t pgprot = vma->vm_page_prot;
+	/*
+	 * If we had pmd_special, we could avoid all these restrictions,
+	 * but we need to be consistent with PTEs and architectures that
+	 * can't support a 'special' bit.
+	 */
+	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+						(VM_PFNMAP|VM_MIXEDMAP));
+	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
+	if (addr < vma->vm_start || addr >= vma->vm_end)
+		return VM_FAULT_SIGBUS;
+	if (track_pfn_insert(vma, &pgprot, pfn))
+		return VM_FAULT_SIGBUS;
+	insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
+	return VM_FAULT_NOPAGE;
 }
 
 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -873,16 +943,14 @@
 	 */
 	if (is_huge_zero_pmd(pmd)) {
 		struct page *zero_page;
-		bool set;
 		/*
 		 * get_huge_zero_page() will never allocate a new page here,
 		 * since we already have a zero page to copy. It just takes a
 		 * reference.
 		 */
 		zero_page = get_huge_zero_page();
-		set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
+		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
 				zero_page);
-		BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
 		ret = 0;
 		goto out_unlock;
 	}
@@ -1387,41 +1455,41 @@
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		 pmd_t *pmd, unsigned long addr)
 {
+	pmd_t orig_pmd;
 	spinlock_t *ptl;
-	int ret = 0;
 
-	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
-		struct page *page;
-		pgtable_t pgtable;
-		pmd_t orig_pmd;
-		/*
-		 * For architectures like ppc64 we look at deposited pgtable
-		 * when calling pmdp_huge_get_and_clear. So do the
-		 * pgtable_trans_huge_withdraw after finishing pmdp related
-		 * operations.
-		 */
-		orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
-							tlb->fullmm);
-		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
-		pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
-		if (is_huge_zero_pmd(orig_pmd)) {
-			atomic_long_dec(&tlb->mm->nr_ptes);
-			spin_unlock(ptl);
+	if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1)
+		return 0;
+	/*
+	 * For architectures like ppc64 we look at deposited pgtable
+	 * when calling pmdp_huge_get_and_clear. So do the
+	 * pgtable_trans_huge_withdraw after finishing pmdp related
+	 * operations.
+	 */
+	orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
+			tlb->fullmm);
+	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+	if (vma_is_dax(vma)) {
+		spin_unlock(ptl);
+		if (is_huge_zero_pmd(orig_pmd))
 			put_huge_zero_page();
-		} else {
-			page = pmd_page(orig_pmd);
-			page_remove_rmap(page);
-			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
-			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-			VM_BUG_ON_PAGE(!PageHead(page), page);
-			atomic_long_dec(&tlb->mm->nr_ptes);
-			spin_unlock(ptl);
-			tlb_remove_page(tlb, page);
-		}
-		pte_free(tlb->mm, pgtable);
-		ret = 1;
+	} else if (is_huge_zero_pmd(orig_pmd)) {
+		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
+		atomic_long_dec(&tlb->mm->nr_ptes);
+		spin_unlock(ptl);
+		put_huge_zero_page();
+	} else {
+		struct page *page = pmd_page(orig_pmd);
+		page_remove_rmap(page);
+		VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
+		add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+		VM_BUG_ON_PAGE(!PageHead(page), page);
+		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
+		atomic_long_dec(&tlb->mm->nr_ptes);
+		spin_unlock(ptl);
+		tlb_remove_page(tlb, page);
 	}
-	return ret;
+	return 1;
 }
 
 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
@@ -2133,7 +2201,8 @@
 	     _pte++, address += PAGE_SIZE) {
 		pte_t pteval = *_pte;
 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
-			if (++none_or_zero <= khugepaged_max_ptes_none)
+			if (!userfaultfd_armed(vma) &&
+			    ++none_or_zero <= khugepaged_max_ptes_none)
 				continue;
 			else
 				goto out;
@@ -2257,8 +2326,12 @@
 
 static void khugepaged_alloc_sleep(void)
 {
-	wait_event_freezable_timeout(khugepaged_wait, false,
-			msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+	DEFINE_WAIT(wait);
+
+	add_wait_queue(&khugepaged_wait, &wait);
+	freezable_schedule_timeout_interruptible(
+		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+	remove_wait_queue(&khugepaged_wait, &wait);
 }
 
 static int khugepaged_node_load[MAX_NUMNODES];
@@ -2345,7 +2418,7 @@
 	 */
 	up_read(&mm->mmap_sem);
 
-	*hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
+	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 	if (unlikely(!*hpage)) {
 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 		*hpage = ERR_PTR(-ENOMEM);
@@ -2586,7 +2659,8 @@
 	     _pte++, _address += PAGE_SIZE) {
 		pte_t pteval = *_pte;
 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
-			if (++none_or_zero <= khugepaged_max_ptes_none)
+			if (!userfaultfd_armed(vma) &&
+			    ++none_or_zero <= khugepaged_max_ptes_none)
 				continue;
 			else
 				goto out_unmap;
@@ -2882,7 +2956,7 @@
 		pmd_t *pmd)
 {
 	spinlock_t *ptl;
-	struct page *page;
+	struct page *page = NULL;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long haddr = address & HPAGE_PMD_MASK;
 	unsigned long mmun_start;	/* For mmu_notifiers */
@@ -2895,25 +2969,27 @@
 again:
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 	ptl = pmd_lock(mm, pmd);
-	if (unlikely(!pmd_trans_huge(*pmd))) {
-		spin_unlock(ptl);
-		mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-		return;
-	}
-	if (is_huge_zero_pmd(*pmd)) {
+	if (unlikely(!pmd_trans_huge(*pmd)))
+		goto unlock;
+	if (vma_is_dax(vma)) {
+		pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+		if (is_huge_zero_pmd(_pmd))
+			put_huge_zero_page();
+	} else if (is_huge_zero_pmd(*pmd)) {
 		__split_huge_zero_page_pmd(vma, haddr, pmd);
-		spin_unlock(ptl);
-		mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-		return;
+	} else {
+		page = pmd_page(*pmd);
+		VM_BUG_ON_PAGE(!page_count(page), page);
+		get_page(page);
 	}
-	page = pmd_page(*pmd);
-	VM_BUG_ON_PAGE(!page_count(page), page);
-	get_page(page);
+ unlock:
 	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
-	split_huge_page(page);
+	if (!page)
+		return;
 
+	split_huge_page(page);
 	put_page(page);
 
 	/*
@@ -2962,7 +3038,7 @@
 	split_huge_page_pmd_mm(mm, address, pmd);
 }
 
-void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+void vma_adjust_trans_huge(struct vm_area_struct *vma,
 			     unsigned long start,
 			     unsigned long end,
 			     long adjust_next)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a8c3087..999fb0a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -64,7 +64,7 @@
  * prevent spurious OOMs when the hugepage pool is fully utilized.
  */
 static int num_fault_mutexes;
-static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
+struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
 
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -240,11 +240,14 @@
 
 /*
  * Add the huge page range represented by [f, t) to the reserve
- * map.  Existing regions will be expanded to accommodate the
- * specified range.  We know only existing regions need to be
- * expanded, because region_add is only called after region_chg
- * with the same range.  If a new file_region structure must
- * be allocated, it is done in region_chg.
+ * map.  In the normal case, existing regions will be expanded
+ * to accommodate the specified range.  Sufficient regions should
+ * exist for expansion due to the previous call to region_chg
+ * with the same range.  However, it is possible that region_del
+ * could have been called after region_chg and modifed the map
+ * in such a way that no region exists to be expanded.  In this
+ * case, pull a region descriptor from the cache associated with
+ * the map and use that for the new range.
  *
  * Return the number of new huge pages added to the map.  This
  * number is greater than or equal to zero.
@@ -261,6 +264,28 @@
 		if (f <= rg->to)
 			break;
 
+	/*
+	 * If no region exists which can be expanded to include the
+	 * specified range, the list must have been modified by an
+	 * interleving call to region_del().  Pull a region descriptor
+	 * from the cache and use it for this range.
+	 */
+	if (&rg->link == head || t < rg->from) {
+		VM_BUG_ON(resv->region_cache_count <= 0);
+
+		resv->region_cache_count--;
+		nrg = list_first_entry(&resv->region_cache, struct file_region,
+					link);
+		list_del(&nrg->link);
+
+		nrg->from = f;
+		nrg->to = t;
+		list_add(&nrg->link, rg->link.prev);
+
+		add += t - f;
+		goto out_locked;
+	}
+
 	/* Round our left edge to the current segment if it encloses us. */
 	if (f > rg->from)
 		f = rg->from;
@@ -294,6 +319,8 @@
 	add += t - nrg->to;		/* Added to end of region */
 	nrg->to = t;
 
+out_locked:
+	resv->adds_in_progress--;
 	spin_unlock(&resv->lock);
 	VM_BUG_ON(add < 0);
 	return add;
@@ -312,11 +339,14 @@
  * so that the subsequent region_add call will have all the
  * regions it needs and will not fail.
  *
- * Returns the number of huge pages that need to be added
- * to the existing reservation map for the range [f, t).
- * This number is greater or equal to zero.  -ENOMEM is
- * returned if a new file_region structure is needed and can
- * not be allocated.
+ * Upon entry, region_chg will also examine the cache of region descriptors
+ * associated with the map.  If there are not enough descriptors cached, one
+ * will be allocated for the in progress add operation.
+ *
+ * Returns the number of huge pages that need to be added to the existing
+ * reservation map for the range [f, t).  This number is greater or equal to
+ * zero.  -ENOMEM is returned if a new file_region structure or cache entry
+ * is needed and can not be allocated.
  */
 static long region_chg(struct resv_map *resv, long f, long t)
 {
@@ -326,6 +356,31 @@
 
 retry:
 	spin_lock(&resv->lock);
+retry_locked:
+	resv->adds_in_progress++;
+
+	/*
+	 * Check for sufficient descriptors in the cache to accommodate
+	 * the number of in progress add operations.
+	 */
+	if (resv->adds_in_progress > resv->region_cache_count) {
+		struct file_region *trg;
+
+		VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
+		/* Must drop lock to allocate a new descriptor. */
+		resv->adds_in_progress--;
+		spin_unlock(&resv->lock);
+
+		trg = kmalloc(sizeof(*trg), GFP_KERNEL);
+		if (!trg)
+			return -ENOMEM;
+
+		spin_lock(&resv->lock);
+		list_add(&trg->link, &resv->region_cache);
+		resv->region_cache_count++;
+		goto retry_locked;
+	}
+
 	/* Locate the region we are before or in. */
 	list_for_each_entry(rg, head, link)
 		if (f <= rg->to)
@@ -336,6 +391,7 @@
 	 * size such that we can guarantee to record the reservation. */
 	if (&rg->link == head || t < rg->from) {
 		if (!nrg) {
+			resv->adds_in_progress--;
 			spin_unlock(&resv->lock);
 			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 			if (!nrg)
@@ -385,43 +441,131 @@
 }
 
 /*
- * Truncate the reserve map at index 'end'.  Modify/truncate any
- * region which contains end.  Delete any regions past end.
- * Return the number of huge pages removed from the map.
+ * Abort the in progress add operation.  The adds_in_progress field
+ * of the resv_map keeps track of the operations in progress between
+ * calls to region_chg and region_add.  Operations are sometimes
+ * aborted after the call to region_chg.  In such cases, region_abort
+ * is called to decrement the adds_in_progress counter.
+ *
+ * NOTE: The range arguments [f, t) are not needed or used in this
+ * routine.  They are kept to make reading the calling code easier as
+ * arguments will match the associated region_chg call.
  */
-static long region_truncate(struct resv_map *resv, long end)
+static void region_abort(struct resv_map *resv, long f, long t)
+{
+	spin_lock(&resv->lock);
+	VM_BUG_ON(!resv->region_cache_count);
+	resv->adds_in_progress--;
+	spin_unlock(&resv->lock);
+}
+
+/*
+ * Delete the specified range [f, t) from the reserve map.  If the
+ * t parameter is LONG_MAX, this indicates that ALL regions after f
+ * should be deleted.  Locate the regions which intersect [f, t)
+ * and either trim, delete or split the existing regions.
+ *
+ * Returns the number of huge pages deleted from the reserve map.
+ * In the normal case, the return value is zero or more.  In the
+ * case where a region must be split, a new region descriptor must
+ * be allocated.  If the allocation fails, -ENOMEM will be returned.
+ * NOTE: If the parameter t == LONG_MAX, then we will never split
+ * a region and possibly return -ENOMEM.  Callers specifying
+ * t == LONG_MAX do not need to check for -ENOMEM error.
+ */
+static long region_del(struct resv_map *resv, long f, long t)
 {
 	struct list_head *head = &resv->regions;
 	struct file_region *rg, *trg;
-	long chg = 0;
+	struct file_region *nrg = NULL;
+	long del = 0;
 
+retry:
 	spin_lock(&resv->lock);
-	/* Locate the region we are either in or before. */
-	list_for_each_entry(rg, head, link)
-		if (end <= rg->to)
+	list_for_each_entry_safe(rg, trg, head, link) {
+		if (rg->to <= f)
+			continue;
+		if (rg->from >= t)
 			break;
-	if (&rg->link == head)
-		goto out;
 
-	/* If we are in the middle of a region then adjust it. */
-	if (end > rg->from) {
-		chg = rg->to - end;
-		rg->to = end;
-		rg = list_entry(rg->link.next, typeof(*rg), link);
+		if (f > rg->from && t < rg->to) { /* Must split region */
+			/*
+			 * Check for an entry in the cache before dropping
+			 * lock and attempting allocation.
+			 */
+			if (!nrg &&
+			    resv->region_cache_count > resv->adds_in_progress) {
+				nrg = list_first_entry(&resv->region_cache,
+							struct file_region,
+							link);
+				list_del(&nrg->link);
+				resv->region_cache_count--;
+			}
+
+			if (!nrg) {
+				spin_unlock(&resv->lock);
+				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+				if (!nrg)
+					return -ENOMEM;
+				goto retry;
+			}
+
+			del += t - f;
+
+			/* New entry for end of split region */
+			nrg->from = t;
+			nrg->to = rg->to;
+			INIT_LIST_HEAD(&nrg->link);
+
+			/* Original entry is trimmed */
+			rg->to = f;
+
+			list_add(&nrg->link, &rg->link);
+			nrg = NULL;
+			break;
+		}
+
+		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
+			del += rg->to - rg->from;
+			list_del(&rg->link);
+			kfree(rg);
+			continue;
+		}
+
+		if (f <= rg->from) {	/* Trim beginning of region */
+			del += t - rg->from;
+			rg->from = t;
+		} else {		/* Trim end of region */
+			del += rg->to - f;
+			rg->to = f;
+		}
 	}
 
-	/* Drop any remaining regions. */
-	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		chg += rg->to - rg->from;
-		list_del(&rg->link);
-		kfree(rg);
-	}
-
-out:
 	spin_unlock(&resv->lock);
-	return chg;
+	kfree(nrg);
+	return del;
+}
+
+/*
+ * A rare out of memory error was encountered which prevented removal of
+ * the reserve map region for a page.  The huge page itself was free'ed
+ * and removed from the page cache.  This routine will adjust the subpool
+ * usage count, and the global reserve count if needed.  By incrementing
+ * these counts, the reserve map entry which could not be deleted will
+ * appear as a "reserved" entry instead of simply dangling with incorrect
+ * counts.
+ */
+void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
+{
+	struct hugepage_subpool *spool = subpool_inode(inode);
+	long rsv_adjust;
+
+	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
+	if (restore_reserve && rsv_adjust) {
+		struct hstate *h = hstate_inode(inode);
+
+		hugetlb_acct_memory(h, 1);
+	}
 }
 
 /*
@@ -544,22 +688,44 @@
 struct resv_map *resv_map_alloc(void)
 {
 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
-	if (!resv_map)
+	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
+
+	if (!resv_map || !rg) {
+		kfree(resv_map);
+		kfree(rg);
 		return NULL;
+	}
 
 	kref_init(&resv_map->refs);
 	spin_lock_init(&resv_map->lock);
 	INIT_LIST_HEAD(&resv_map->regions);
 
+	resv_map->adds_in_progress = 0;
+
+	INIT_LIST_HEAD(&resv_map->region_cache);
+	list_add(&rg->link, &resv_map->region_cache);
+	resv_map->region_cache_count = 1;
+
 	return resv_map;
 }
 
 void resv_map_release(struct kref *ref)
 {
 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
+	struct list_head *head = &resv_map->region_cache;
+	struct file_region *rg, *trg;
 
 	/* Clear out any active regions before we release the map. */
-	region_truncate(resv_map, 0);
+	region_del(resv_map, 0, LONG_MAX);
+
+	/* ... and any entries left in the cache */
+	list_for_each_entry_safe(rg, trg, head, link) {
+		list_del(&rg->link);
+		kfree(rg);
+	}
+
+	VM_BUG_ON(resv_map->adds_in_progress);
+
 	kfree(resv_map);
 }
 
@@ -616,7 +782,7 @@
 }
 
 /* Returns true if the VMA has associated reserve pages */
-static int vma_has_reserves(struct vm_area_struct *vma, long chg)
+static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
 {
 	if (vma->vm_flags & VM_NORESERVE) {
 		/*
@@ -629,23 +795,34 @@
 		 * properly, so add work-around here.
 		 */
 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
-			return 1;
+			return true;
 		else
-			return 0;
+			return false;
 	}
 
 	/* Shared mappings always use reserves */
-	if (vma->vm_flags & VM_MAYSHARE)
-		return 1;
+	if (vma->vm_flags & VM_MAYSHARE) {
+		/*
+		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
+		 * be a region map for all pages.  The only situation where
+		 * there is no region map is if a hole was punched via
+		 * fallocate.  In this case, there really are no reverves to
+		 * use.  This situation is indicated if chg != 0.
+		 */
+		if (chg)
+			return false;
+		else
+			return true;
+	}
 
 	/*
 	 * Only the process that called mmap() has reserves for
 	 * private mappings.
 	 */
 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-		return 1;
+		return true;
 
-	return 0;
+	return false;
 }
 
 static void enqueue_huge_page(struct hstate *h, struct page *page)
@@ -1154,7 +1331,7 @@
 {
 	struct page *page;
 
-	page = alloc_pages_exact_node(nid,
+	page = __alloc_pages_node(nid,
 		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
 						__GFP_REPEAT|__GFP_NOWARN,
 		huge_page_order(h));
@@ -1306,7 +1483,7 @@
 				   __GFP_REPEAT|__GFP_NOWARN,
 				   huge_page_order(h));
 	else
-		page = alloc_pages_exact_node(nid,
+		page = __alloc_pages_node(nid,
 			htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
 			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
 
@@ -1473,16 +1650,19 @@
 	}
 }
 
+
 /*
- * vma_needs_reservation and vma_commit_reservation are used by the huge
- * page allocation routines to manage reservations.
+ * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
+ * are used by the huge page allocation routines to manage reservations.
  *
  * vma_needs_reservation is called to determine if the huge page at addr
  * within the vma has an associated reservation.  If a reservation is
  * needed, the value 1 is returned.  The caller is then responsible for
  * managing the global reservation and subpool usage counts.  After
  * the huge page has been allocated, vma_commit_reservation is called
- * to add the page to the reservation map.
+ * to add the page to the reservation map.  If the page allocation fails,
+ * the reservation must be ended instead of committed.  vma_end_reservation
+ * is called in such cases.
  *
  * In the normal case, vma_commit_reservation returns the same value
  * as the preceding vma_needs_reservation call.  The only time this
@@ -1490,9 +1670,14 @@
  * is the responsibility of the caller to notice the difference and
  * take appropriate action.
  */
+enum vma_resv_mode {
+	VMA_NEEDS_RESV,
+	VMA_COMMIT_RESV,
+	VMA_END_RESV,
+};
 static long __vma_reservation_common(struct hstate *h,
 				struct vm_area_struct *vma, unsigned long addr,
-				bool commit)
+				enum vma_resv_mode mode)
 {
 	struct resv_map *resv;
 	pgoff_t idx;
@@ -1503,10 +1688,20 @@
 		return 1;
 
 	idx = vma_hugecache_offset(h, vma, addr);
-	if (commit)
-		ret = region_add(resv, idx, idx + 1);
-	else
+	switch (mode) {
+	case VMA_NEEDS_RESV:
 		ret = region_chg(resv, idx, idx + 1);
+		break;
+	case VMA_COMMIT_RESV:
+		ret = region_add(resv, idx, idx + 1);
+		break;
+	case VMA_END_RESV:
+		region_abort(resv, idx, idx + 1);
+		ret = 0;
+		break;
+	default:
+		BUG();
+	}
 
 	if (vma->vm_flags & VM_MAYSHARE)
 		return ret;
@@ -1517,47 +1712,79 @@
 static long vma_needs_reservation(struct hstate *h,
 			struct vm_area_struct *vma, unsigned long addr)
 {
-	return __vma_reservation_common(h, vma, addr, false);
+	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
 }
 
 static long vma_commit_reservation(struct hstate *h,
 			struct vm_area_struct *vma, unsigned long addr)
 {
-	return __vma_reservation_common(h, vma, addr, true);
+	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
 }
 
-static struct page *alloc_huge_page(struct vm_area_struct *vma,
+static void vma_end_reservation(struct hstate *h,
+			struct vm_area_struct *vma, unsigned long addr)
+{
+	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
+}
+
+struct page *alloc_huge_page(struct vm_area_struct *vma,
 				    unsigned long addr, int avoid_reserve)
 {
 	struct hugepage_subpool *spool = subpool_vma(vma);
 	struct hstate *h = hstate_vma(vma);
 	struct page *page;
-	long chg, commit;
+	long map_chg, map_commit;
+	long gbl_chg;
 	int ret, idx;
 	struct hugetlb_cgroup *h_cg;
 
 	idx = hstate_index(h);
 	/*
-	 * Processes that did not create the mapping will have no
-	 * reserves and will not have accounted against subpool
-	 * limit. Check that the subpool limit can be made before
-	 * satisfying the allocation MAP_NORESERVE mappings may also
-	 * need pages and subpool limit allocated allocated if no reserve
-	 * mapping overlaps.
+	 * Examine the region/reserve map to determine if the process
+	 * has a reservation for the page to be allocated.  A return
+	 * code of zero indicates a reservation exists (no change).
 	 */
-	chg = vma_needs_reservation(h, vma, addr);
-	if (chg < 0)
+	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
+	if (map_chg < 0)
 		return ERR_PTR(-ENOMEM);
-	if (chg || avoid_reserve)
-		if (hugepage_subpool_get_pages(spool, 1) < 0)
+
+	/*
+	 * Processes that did not create the mapping will have no
+	 * reserves as indicated by the region/reserve map. Check
+	 * that the allocation will not exceed the subpool limit.
+	 * Allocations for MAP_NORESERVE mappings also need to be
+	 * checked against any subpool limit.
+	 */
+	if (map_chg || avoid_reserve) {
+		gbl_chg = hugepage_subpool_get_pages(spool, 1);
+		if (gbl_chg < 0) {
+			vma_end_reservation(h, vma, addr);
 			return ERR_PTR(-ENOSPC);
+		}
+
+		/*
+		 * Even though there was no reservation in the region/reserve
+		 * map, there could be reservations associated with the
+		 * subpool that can be used.  This would be indicated if the
+		 * return value of hugepage_subpool_get_pages() is zero.
+		 * However, if avoid_reserve is specified we still avoid even
+		 * the subpool reservations.
+		 */
+		if (avoid_reserve)
+			gbl_chg = 1;
+	}
 
 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
 	if (ret)
 		goto out_subpool_put;
 
 	spin_lock(&hugetlb_lock);
-	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
+	/*
+	 * glb_chg is passed to indicate whether or not a page must be taken
+	 * from the global free pool (global change).  gbl_chg == 0 indicates
+	 * a reservation exists for the allocation.
+	 */
+	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
 	if (!page) {
 		spin_unlock(&hugetlb_lock);
 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
@@ -1573,8 +1800,8 @@
 
 	set_page_private(page, (unsigned long)spool);
 
-	commit = vma_commit_reservation(h, vma, addr);
-	if (unlikely(chg > commit)) {
+	map_commit = vma_commit_reservation(h, vma, addr);
+	if (unlikely(map_chg > map_commit)) {
 		/*
 		 * The page was added to the reservation map between
 		 * vma_needs_reservation and vma_commit_reservation.
@@ -1594,8 +1821,9 @@
 out_uncharge_cgroup:
 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
 out_subpool_put:
-	if (chg || avoid_reserve)
+	if (map_chg || avoid_reserve)
 		hugepage_subpool_put_pages(spool, 1);
+	vma_end_reservation(h, vma, addr);
 	return ERR_PTR(-ENOSPC);
 }
 
@@ -2311,7 +2539,7 @@
 	}
 
 	kobject_put(hugepages_kobj);
-	kfree(htlb_fault_mutex_table);
+	kfree(hugetlb_fault_mutex_table);
 }
 module_exit(hugetlb_exit);
 
@@ -2344,12 +2572,12 @@
 #else
 	num_fault_mutexes = 1;
 #endif
-	htlb_fault_mutex_table =
+	hugetlb_fault_mutex_table =
 		kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
-	BUG_ON(!htlb_fault_mutex_table);
+	BUG_ON(!hugetlb_fault_mutex_table);
 
 	for (i = 0; i < num_fault_mutexes; i++)
-		mutex_init(&htlb_fault_mutex_table[i]);
+		mutex_init(&hugetlb_fault_mutex_table[i]);
 	return 0;
 }
 module_init(hugetlb_init);
@@ -3147,6 +3375,23 @@
 	return page != NULL;
 }
 
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+			   pgoff_t idx)
+{
+	struct inode *inode = mapping->host;
+	struct hstate *h = hstate_inode(inode);
+	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+
+	if (err)
+		return err;
+	ClearPagePrivate(page);
+
+	spin_lock(&inode->i_lock);
+	inode->i_blocks += blocks_per_huge_page(h);
+	spin_unlock(&inode->i_lock);
+	return 0;
+}
+
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			   struct address_space *mapping, pgoff_t idx,
 			   unsigned long address, pte_t *ptep, unsigned int flags)
@@ -3194,21 +3439,13 @@
 		set_page_huge_active(page);
 
 		if (vma->vm_flags & VM_MAYSHARE) {
-			int err;
-			struct inode *inode = mapping->host;
-
-			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+			int err = huge_add_to_page_cache(page, mapping, idx);
 			if (err) {
 				put_page(page);
 				if (err == -EEXIST)
 					goto retry;
 				goto out;
 			}
-			ClearPagePrivate(page);
-
-			spin_lock(&inode->i_lock);
-			inode->i_blocks += blocks_per_huge_page(h);
-			spin_unlock(&inode->i_lock);
 		} else {
 			lock_page(page);
 			if (unlikely(anon_vma_prepare(vma))) {
@@ -3236,11 +3473,14 @@
 	 * any allocations necessary to record that reservation occur outside
 	 * the spinlock.
 	 */
-	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
+	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
 		if (vma_needs_reservation(h, vma, address) < 0) {
 			ret = VM_FAULT_OOM;
 			goto backout_unlocked;
 		}
+		/* Just decrements count, does not deallocate */
+		vma_end_reservation(h, vma, address);
+	}
 
 	ptl = huge_pte_lockptr(h, mm, ptep);
 	spin_lock(ptl);
@@ -3280,7 +3520,7 @@
 }
 
 #ifdef CONFIG_SMP
-static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
 			    struct vm_area_struct *vma,
 			    struct address_space *mapping,
 			    pgoff_t idx, unsigned long address)
@@ -3305,7 +3545,7 @@
  * For uniprocesor systems we always use a single mutex, so just
  * return 0 and avoid the hashing overhead.
  */
-static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
 			    struct vm_area_struct *vma,
 			    struct address_space *mapping,
 			    pgoff_t idx, unsigned long address)
@@ -3353,8 +3593,8 @@
 	 * get spurious allocation failures if two CPUs race to instantiate
 	 * the same page in the page cache.
 	 */
-	hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
-	mutex_lock(&htlb_fault_mutex_table[hash]);
+	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
+	mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
 	entry = huge_ptep_get(ptep);
 	if (huge_pte_none(entry)) {
@@ -3387,6 +3627,8 @@
 			ret = VM_FAULT_OOM;
 			goto out_mutex;
 		}
+		/* Just decrements count, does not deallocate */
+		vma_end_reservation(h, vma, address);
 
 		if (!(vma->vm_flags & VM_MAYSHARE))
 			pagecache_page = hugetlbfs_pagecache_page(h,
@@ -3437,7 +3679,7 @@
 		put_page(pagecache_page);
 	}
 out_mutex:
-	mutex_unlock(&htlb_fault_mutex_table[hash]);
+	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 	/*
 	 * Generally it's safe to hold refcount during waiting page lock. But
 	 * here we just wait to defer the next page fault to avoid busy loop and
@@ -3726,12 +3968,15 @@
 	}
 	return 0;
 out_err:
+	if (!vma || vma->vm_flags & VM_MAYSHARE)
+		region_abort(resv_map, from, to);
 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
 		kref_put(&resv_map->refs, resv_map_release);
 	return ret;
 }
 
-void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+								long freed)
 {
 	struct hstate *h = hstate_inode(inode);
 	struct resv_map *resv_map = inode_resv_map(inode);
@@ -3739,8 +3984,17 @@
 	struct hugepage_subpool *spool = subpool_inode(inode);
 	long gbl_reserve;
 
-	if (resv_map)
-		chg = region_truncate(resv_map, offset);
+	if (resv_map) {
+		chg = region_del(resv_map, start, end);
+		/*
+		 * region_del() can fail in the rare case where a region
+		 * must be split and another region descriptor can not be
+		 * allocated.  If end == LONG_MAX, it will not fail.
+		 */
+		if (chg < 0)
+			return chg;
+	}
+
 	spin_lock(&inode->i_lock);
 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
 	spin_unlock(&inode->i_lock);
@@ -3751,6 +4005,8 @@
 	 */
 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
 	hugetlb_acct_memory(h, -gbl_reserve);
+
+	return 0;
 }
 
 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -3779,7 +4035,7 @@
 	return saddr;
 }
 
-static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
 {
 	unsigned long base = addr & PUD_MASK;
 	unsigned long end = base + PUD_SIZE;
@@ -3789,8 +4045,8 @@
 	 */
 	if (vma->vm_flags & VM_MAYSHARE &&
 	    vma->vm_start <= base && end <= vma->vm_end)
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
 /*
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index bf73ac1..aeba0ed 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -58,7 +58,7 @@
 	pr_info("Injecting memory failure at pfn %#lx\n", pfn);
 	return memory_failure(pfn, 18, MF_COUNT_INCREASED);
 put_out:
-	put_page(p);
+	put_hwpoison_page(p);
 	return 0;
 }
 
diff --git a/mm/internal.h b/mm/internal.h
index 36b23f1..bc0fa9a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -182,6 +182,7 @@
 	unsigned long nr_migratepages;	/* Number of pages to migrate */
 	unsigned long free_pfn;		/* isolate_freepages search base */
 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+	unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
 	enum migrate_mode mode;		/* Async or sync migration mode */
 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
 	int order;			/* order a direct compactor needs */
@@ -426,4 +427,19 @@
 #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
 #define ALLOC_FAIR		0x100 /* fair zone allocation */
 
+enum ttu_flags;
+struct tlbflush_unmap_batch;
+
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+void try_to_unmap_flush(void);
+void try_to_unmap_flush_dirty(void);
+#else
+static inline void try_to_unmap_flush(void)
+{
+}
+static inline void try_to_unmap_flush_dirty(void)
+{
+}
+
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index cf79f11..f532f6a 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -838,6 +838,7 @@
 	}
 
 	if (crt_early_log >= ARRAY_SIZE(early_log)) {
+		crt_early_log++;
 		kmemleak_disable();
 		return;
 	}
@@ -1882,7 +1883,7 @@
 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
 
-	if (crt_early_log >= ARRAY_SIZE(early_log))
+	if (crt_early_log > ARRAY_SIZE(early_log))
 		pr_warning("Early log buffer exceeded (%d), please increase "
 			   "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
 
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 909eca2..e1da19f 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -99,8 +99,8 @@
 	struct list_lru_one *l;
 
 	spin_lock(&nlru->lock);
-	l = list_lru_from_kmem(nlru, item);
 	if (list_empty(item)) {
+		l = list_lru_from_kmem(nlru, item);
 		list_add_tail(item, &l->list);
 		l->nr_items++;
 		spin_unlock(&nlru->lock);
@@ -118,8 +118,8 @@
 	struct list_lru_one *l;
 
 	spin_lock(&nlru->lock);
-	l = list_lru_from_kmem(nlru, item);
 	if (!list_empty(item)) {
+		l = list_lru_from_kmem(nlru, item);
 		list_del_init(item);
 		l->nr_items--;
 		spin_unlock(&nlru->lock);
diff --git a/mm/madvise.c b/mm/madvise.c
index 64bb8a2..c889fcb 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -103,7 +103,8 @@
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
-				vma->vm_file, pgoff, vma_policy(vma));
+			  vma->vm_file, pgoff, vma_policy(vma),
+			  vma->vm_userfaultfd_ctx);
 	if (*prev) {
 		vma = *prev;
 		goto success;
@@ -300,7 +301,7 @@
 
 	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */
 
-	if (vma->vm_flags & (VM_LOCKED | VM_HUGETLB))
+	if (vma->vm_flags & VM_LOCKED)
 		return -EINVAL;
 
 	f = vma->vm_file;
@@ -385,7 +386,7 @@
 	}
 }
 
-static int
+static bool
 madvise_behavior_valid(int behavior)
 {
 	switch (behavior) {
@@ -407,10 +408,10 @@
 #endif
 	case MADV_DONTDUMP:
 	case MADV_DODUMP:
-		return 1;
+		return true;
 
 	default:
-		return 0;
+		return false;
 	}
 }
 
diff --git a/mm/memblock.c b/mm/memblock.c
index 87108e7..1c7b647 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -91,7 +91,7 @@
 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
 }
 
-static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
+bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
 					phys_addr_t base, phys_addr_t size)
 {
 	unsigned long i;
@@ -103,7 +103,7 @@
 			break;
 	}
 
-	return (i < type->cnt) ? i : -1;
+	return i < type->cnt;
 }
 
 /*
@@ -566,6 +566,10 @@
 		 * area, insert that portion.
 		 */
 		if (rbase > base) {
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+			WARN_ON(nid != memblock_get_region_node(rgn));
+#endif
+			WARN_ON(flags != rgn->flags);
 			nr_new++;
 			if (insert)
 				memblock_insert_region(type, i++, base,
@@ -611,14 +615,14 @@
 						int nid,
 						unsigned long flags)
 {
-	struct memblock_type *_rgn = &memblock.memory;
+	struct memblock_type *type = &memblock.memory;
 
 	memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
 		     (unsigned long long)base,
 		     (unsigned long long)base + size - 1,
 		     flags, (void *)_RET_IP_);
 
-	return memblock_add_range(_rgn, base, size, nid, flags);
+	return memblock_add_range(type, base, size, nid, flags);
 }
 
 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
@@ -758,7 +762,7 @@
  *
  * This function isolates region [@base, @base + @size), and sets/clears flag
  *
- * Return 0 on succees, -errno on failure.
+ * Return 0 on success, -errno on failure.
  */
 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 				phys_addr_t size, int set, int flag)
@@ -785,7 +789,7 @@
  * @base: the base phys addr of the region
  * @size: the size of the region
  *
- * Return 0 on succees, -errno on failure.
+ * Return 0 on success, -errno on failure.
  */
 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 {
@@ -797,7 +801,7 @@
  * @base: the base phys addr of the region
  * @size: the size of the region
  *
- * Return 0 on succees, -errno on failure.
+ * Return 0 on success, -errno on failure.
  */
 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 {
@@ -809,7 +813,7 @@
  * @base: the base phys addr of the region
  * @size: the size of the region
  *
- * Return 0 on succees, -errno on failure.
+ * Return 0 on success, -errno on failure.
  */
 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 {
@@ -831,10 +835,10 @@
 					   phys_addr_t *out_start,
 					   phys_addr_t *out_end)
 {
-	struct memblock_type *rsv = &memblock.reserved;
+	struct memblock_type *type = &memblock.reserved;
 
-	if (*idx >= 0 && *idx < rsv->cnt) {
-		struct memblock_region *r = &rsv->regions[*idx];
+	if (*idx >= 0 && *idx < type->cnt) {
+		struct memblock_region *r = &type->regions[*idx];
 		phys_addr_t base = r->base;
 		phys_addr_t size = r->size;
 
@@ -972,7 +976,7 @@
  * in type_b.
  *
  * @idx: pointer to u64 loop variable
- * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
  * @flags: pick from blocks based on memory attributes
  * @type_a: pointer to memblock_type from where the range is taken
  * @type_b: pointer to memblock_type which excludes memory from being taken
@@ -1562,12 +1566,12 @@
  * Check if the region [@base, @base+@size) intersects a reserved memory block.
  *
  * RETURNS:
- * 0 if false, non-zero if true
+ * True if they intersect, false if not.
  */
-int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
+bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 {
 	memblock_cap_size(base, &size);
-	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
+	return memblock_overlaps_region(&memblock.reserved, base, size);
 }
 
 void __init_memblock memblock_trim_memory(phys_addr_t align)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index acb93c5..1742a2d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -111,56 +111,10 @@
 	"unevictable",
 };
 
-/*
- * Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremated by the number of pages. This counter is used for
- * for trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- */
-enum mem_cgroup_events_target {
-	MEM_CGROUP_TARGET_THRESH,
-	MEM_CGROUP_TARGET_SOFTLIMIT,
-	MEM_CGROUP_TARGET_NUMAINFO,
-	MEM_CGROUP_NTARGETS,
-};
 #define THRESHOLDS_EVENTS_TARGET 128
 #define SOFTLIMIT_EVENTS_TARGET 1024
 #define NUMAINFO_EVENTS_TARGET	1024
 
-struct mem_cgroup_stat_cpu {
-	long count[MEM_CGROUP_STAT_NSTATS];
-	unsigned long events[MEMCG_NR_EVENTS];
-	unsigned long nr_page_events;
-	unsigned long targets[MEM_CGROUP_NTARGETS];
-};
-
-struct reclaim_iter {
-	struct mem_cgroup *position;
-	/* scan generation, increased every round-trip */
-	unsigned int generation;
-};
-
-/*
- * per-zone information in memory controller.
- */
-struct mem_cgroup_per_zone {
-	struct lruvec		lruvec;
-	unsigned long		lru_size[NR_LRU_LISTS];
-
-	struct reclaim_iter	iter[DEF_PRIORITY + 1];
-
-	struct rb_node		tree_node;	/* RB tree node */
-	unsigned long		usage_in_excess;/* Set to the value by which */
-						/* the soft limit is exceeded*/
-	bool			on_tree;
-	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
-						/* use container_of	   */
-};
-
-struct mem_cgroup_per_node {
-	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
-};
-
 /*
  * Cgroups above their limits are maintained in a RB-Tree, independent of
  * their hierarchy representation
@@ -181,32 +135,6 @@
 
 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 
-struct mem_cgroup_threshold {
-	struct eventfd_ctx *eventfd;
-	unsigned long threshold;
-};
-
-/* For threshold */
-struct mem_cgroup_threshold_ary {
-	/* An array index points to threshold just below or equal to usage. */
-	int current_threshold;
-	/* Size of entries[] */
-	unsigned int size;
-	/* Array of thresholds */
-	struct mem_cgroup_threshold entries[0];
-};
-
-struct mem_cgroup_thresholds {
-	/* Primary thresholds array */
-	struct mem_cgroup_threshold_ary *primary;
-	/*
-	 * Spare threshold array.
-	 * This is needed to make mem_cgroup_unregister_event() "never fail".
-	 * It must be able to store at least primary->size - 1 entries.
-	 */
-	struct mem_cgroup_threshold_ary *spare;
-};
-
 /* for OOM */
 struct mem_cgroup_eventfd_list {
 	struct list_head list;
@@ -256,113 +184,6 @@
 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 
-/*
- * The memory controller data structure. The memory controller controls both
- * page cache and RSS per cgroup. We would eventually like to provide
- * statistics based on the statistics developed by Rik Van Riel for clock-pro,
- * to help the administrator determine what knobs to tune.
- */
-struct mem_cgroup {
-	struct cgroup_subsys_state css;
-
-	/* Accounted resources */
-	struct page_counter memory;
-	struct page_counter memsw;
-	struct page_counter kmem;
-
-	/* Normal memory consumption range */
-	unsigned long low;
-	unsigned long high;
-
-	unsigned long soft_limit;
-
-	/* vmpressure notifications */
-	struct vmpressure vmpressure;
-
-	/* css_online() has been completed */
-	int initialized;
-
-	/*
-	 * Should the accounting and control be hierarchical, per subtree?
-	 */
-	bool use_hierarchy;
-
-	/* protected by memcg_oom_lock */
-	bool		oom_lock;
-	int		under_oom;
-
-	int	swappiness;
-	/* OOM-Killer disable */
-	int		oom_kill_disable;
-
-	/* protect arrays of thresholds */
-	struct mutex thresholds_lock;
-
-	/* thresholds for memory usage. RCU-protected */
-	struct mem_cgroup_thresholds thresholds;
-
-	/* thresholds for mem+swap usage. RCU-protected */
-	struct mem_cgroup_thresholds memsw_thresholds;
-
-	/* For oom notifier event fd */
-	struct list_head oom_notify;
-
-	/*
-	 * Should we move charges of a task when a task is moved into this
-	 * mem_cgroup ? And what type of charges should we move ?
-	 */
-	unsigned long move_charge_at_immigrate;
-	/*
-	 * set > 0 if pages under this cgroup are moving to other cgroup.
-	 */
-	atomic_t		moving_account;
-	/* taken only while moving_account > 0 */
-	spinlock_t		move_lock;
-	struct task_struct	*move_lock_task;
-	unsigned long		move_lock_flags;
-	/*
-	 * percpu counter.
-	 */
-	struct mem_cgroup_stat_cpu __percpu *stat;
-	spinlock_t pcp_counter_lock;
-
-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
-	struct cg_proto tcp_mem;
-#endif
-#if defined(CONFIG_MEMCG_KMEM)
-        /* Index in the kmem_cache->memcg_params.memcg_caches array */
-	int kmemcg_id;
-	bool kmem_acct_activated;
-	bool kmem_acct_active;
-#endif
-
-	int last_scanned_node;
-#if MAX_NUMNODES > 1
-	nodemask_t	scan_nodes;
-	atomic_t	numainfo_events;
-	atomic_t	numainfo_updating;
-#endif
-
-#ifdef CONFIG_CGROUP_WRITEBACK
-	struct list_head cgwb_list;
-	struct wb_domain cgwb_domain;
-#endif
-
-	/* List of events which userspace want to receive */
-	struct list_head event_list;
-	spinlock_t event_list_lock;
-
-	struct mem_cgroup_per_node *nodeinfo[0];
-	/* WARNING: nodeinfo must be the last member here */
-};
-
-#ifdef CONFIG_MEMCG_KMEM
-bool memcg_kmem_is_active(struct mem_cgroup *memcg)
-{
-	return memcg->kmem_acct_active;
-}
-#endif
-
 /* Stuffs for move charges at task migration. */
 /*
  * Types of charges to be moved.
@@ -423,11 +244,6 @@
  */
 static DEFINE_MUTEX(memcg_create_mutex);
 
-struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
-{
-	return s ? container_of(s, struct mem_cgroup, css) : NULL;
-}
-
 /* Some nice accessors for the vmpressure. */
 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 {
@@ -499,8 +315,7 @@
 		rcu_read_lock();
 		memcg = mem_cgroup_from_task(current);
 		cg_proto = sk->sk_prot->proto_cgroup(memcg);
-		if (!mem_cgroup_is_root(memcg) &&
-		    memcg_proto_active(cg_proto) &&
+		if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) &&
 		    css_tryget_online(&memcg->css)) {
 			sk->sk_cgrp = cg_proto;
 		}
@@ -593,11 +408,6 @@
 	return &memcg->nodeinfo[nid]->zoneinfo[zid];
 }
 
-struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
-{
-	return &memcg->css;
-}
-
 /**
  * mem_cgroup_css_from_page - css of the memcg associated with a page
  * @page: page of interest
@@ -876,14 +686,6 @@
 	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 }
 
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
-{
-	struct mem_cgroup_per_zone *mz;
-
-	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
-	return mz->lru_size[lru];
-}
-
 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 						  int nid,
 						  unsigned int lru_mask)
@@ -986,6 +788,7 @@
 
 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 }
+EXPORT_SYMBOL(mem_cgroup_from_task);
 
 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
@@ -1031,7 +834,7 @@
 				   struct mem_cgroup *prev,
 				   struct mem_cgroup_reclaim_cookie *reclaim)
 {
-	struct reclaim_iter *uninitialized_var(iter);
+	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
 	struct cgroup_subsys_state *css = NULL;
 	struct mem_cgroup *memcg = NULL;
 	struct mem_cgroup *pos = NULL;
@@ -1173,30 +976,6 @@
 	     iter != NULL;				\
 	     iter = mem_cgroup_iter(NULL, iter, NULL))
 
-void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
-{
-	struct mem_cgroup *memcg;
-
-	rcu_read_lock();
-	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
-	if (unlikely(!memcg))
-		goto out;
-
-	switch (idx) {
-	case PGFAULT:
-		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
-		break;
-	case PGMAJFAULT:
-		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
-		break;
-	default:
-		BUG();
-	}
-out:
-	rcu_read_unlock();
-}
-EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
-
 /**
  * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
  * @zone: zone of the wanted lruvec
@@ -1295,15 +1074,6 @@
 	VM_BUG_ON((long)(*lru_size) < 0);
 }
 
-bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root)
-{
-	if (root == memcg)
-		return true;
-	if (!root->use_hierarchy)
-		return false;
-	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
-}
-
 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
 {
 	struct mem_cgroup *task_memcg;
@@ -1330,39 +1100,6 @@
 	return ret;
 }
 
-int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
-{
-	unsigned long inactive_ratio;
-	unsigned long inactive;
-	unsigned long active;
-	unsigned long gb;
-
-	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
-	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
-
-	gb = (inactive + active) >> (30 - PAGE_SHIFT);
-	if (gb)
-		inactive_ratio = int_sqrt(10 * gb);
-	else
-		inactive_ratio = 1;
-
-	return inactive * inactive_ratio < active;
-}
-
-bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
-{
-	struct mem_cgroup_per_zone *mz;
-	struct mem_cgroup *memcg;
-
-	if (mem_cgroup_disabled())
-		return true;
-
-	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
-	memcg = mz->memcg;
-
-	return !!(memcg->css.flags & CSS_ONLINE);
-}
-
 #define mem_cgroup_from_counter(counter, member)	\
 	container_of(counter, struct mem_cgroup, member)
 
@@ -1394,15 +1131,6 @@
 	return margin;
 }
 
-int mem_cgroup_swappiness(struct mem_cgroup *memcg)
-{
-	/* root ? */
-	if (mem_cgroup_disabled() || !memcg->css.parent)
-		return vm_swappiness;
-
-	return memcg->swappiness;
-}
-
 /*
  * A routine for checking "mem" is under move_account() or not.
  *
@@ -1545,6 +1273,12 @@
 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
 				     int order)
 {
+	struct oom_control oc = {
+		.zonelist = NULL,
+		.nodemask = NULL,
+		.gfp_mask = gfp_mask,
+		.order = order,
+	};
 	struct mem_cgroup *iter;
 	unsigned long chosen_points = 0;
 	unsigned long totalpages;
@@ -1563,7 +1297,7 @@
 		goto unlock;
 	}
 
-	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg);
+	check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
 	totalpages = mem_cgroup_get_limit(memcg) ? : 1;
 	for_each_mem_cgroup_tree(iter, memcg) {
 		struct css_task_iter it;
@@ -1571,8 +1305,7 @@
 
 		css_task_iter_start(&iter->css, &it);
 		while ((task = css_task_iter_next(&it))) {
-			switch (oom_scan_process_thread(task, totalpages, NULL,
-							false)) {
+			switch (oom_scan_process_thread(&oc, task, totalpages)) {
 			case OOM_SCAN_SELECT:
 				if (chosen)
 					put_task_struct(chosen);
@@ -1610,8 +1343,8 @@
 
 	if (chosen) {
 		points = chosen_points * 1000 / totalpages;
-		oom_kill_process(chosen, gfp_mask, order, points, totalpages,
-				 memcg, NULL, "Memory cgroup out of memory");
+		oom_kill_process(&oc, chosen, points, totalpages, memcg,
+				 "Memory cgroup out of memory");
 	}
 unlock:
 	mutex_unlock(&oom_lock);
@@ -2062,23 +1795,6 @@
 }
 EXPORT_SYMBOL(mem_cgroup_end_page_stat);
 
-/**
- * mem_cgroup_update_page_stat - update page state statistics
- * @memcg: memcg to account against
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- *
- * See mem_cgroup_begin_page_stat() for locking requirements.
- */
-void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
-				 enum mem_cgroup_stat_index idx, int val)
-{
-	VM_BUG_ON(!rcu_read_lock_held());
-
-	if (memcg)
-		this_cpu_add(memcg->stat->count[idx], val);
-}
-
 /*
  * size of first charge trial. "32" comes from vmscan.c's magic value.
  * TODO: maybe necessary to use big numbers in big irons.
@@ -2504,16 +2220,6 @@
 	css_put_many(&memcg->css, nr_pages);
 }
 
-/*
- * helper for acessing a memcg's index. It will be used as an index in the
- * child cache array in kmem_cache, and also to derive its name. This function
- * will return -1 when this is not a kmem-limited memcg.
- */
-int memcg_cache_id(struct mem_cgroup *memcg)
-{
-	return memcg ? memcg->kmemcg_id : -1;
-}
-
 static int memcg_alloc_cache_id(void)
 {
 	int id, size;
@@ -5127,10 +4833,12 @@
 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
 				 struct cgroup_taskset *tset)
 {
-	struct task_struct *p = cgroup_taskset_first(tset);
-	int ret = 0;
 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+	struct mem_cgroup *from;
+	struct task_struct *p;
+	struct mm_struct *mm;
 	unsigned long move_flags;
+	int ret = 0;
 
 	/*
 	 * We are now commited to this value whatever it is. Changes in this
@@ -5138,36 +4846,37 @@
 	 * So we need to save it, and keep it going.
 	 */
 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
-	if (move_flags) {
-		struct mm_struct *mm;
-		struct mem_cgroup *from = mem_cgroup_from_task(p);
+	if (!move_flags)
+		return 0;
 
-		VM_BUG_ON(from == memcg);
+	p = cgroup_taskset_first(tset);
+	from = mem_cgroup_from_task(p);
 
-		mm = get_task_mm(p);
-		if (!mm)
-			return 0;
-		/* We move charges only when we move a owner of the mm */
-		if (mm->owner == p) {
-			VM_BUG_ON(mc.from);
-			VM_BUG_ON(mc.to);
-			VM_BUG_ON(mc.precharge);
-			VM_BUG_ON(mc.moved_charge);
-			VM_BUG_ON(mc.moved_swap);
+	VM_BUG_ON(from == memcg);
 
-			spin_lock(&mc.lock);
-			mc.from = from;
-			mc.to = memcg;
-			mc.flags = move_flags;
-			spin_unlock(&mc.lock);
-			/* We set mc.moving_task later */
+	mm = get_task_mm(p);
+	if (!mm)
+		return 0;
+	/* We move charges only when we move a owner of the mm */
+	if (mm->owner == p) {
+		VM_BUG_ON(mc.from);
+		VM_BUG_ON(mc.to);
+		VM_BUG_ON(mc.precharge);
+		VM_BUG_ON(mc.moved_charge);
+		VM_BUG_ON(mc.moved_swap);
 
-			ret = mem_cgroup_precharge_mc(mm);
-			if (ret)
-				mem_cgroup_clear_mc();
-		}
-		mmput(mm);
+		spin_lock(&mc.lock);
+		mc.from = from;
+		mc.to = memcg;
+		mc.flags = move_flags;
+		spin_unlock(&mc.lock);
+		/* We set mc.moving_task later */
+
+		ret = mem_cgroup_precharge_mc(mm);
+		if (ret)
+			mem_cgroup_clear_mc();
 	}
+	mmput(mm);
 	return ret;
 }
 
@@ -5521,19 +5230,6 @@
 };
 
 /**
- * mem_cgroup_events - count memory events against a cgroup
- * @memcg: the memory cgroup
- * @idx: the event index
- * @nr: the number of events to account for
- */
-void mem_cgroup_events(struct mem_cgroup *memcg,
-		       enum mem_cgroup_events_index idx,
-		       unsigned int nr)
-{
-	this_cpu_add(memcg->stat->events[idx], nr);
-}
-
-/**
  * mem_cgroup_low - check if memory consumption is below the normal range
  * @root: the highest ancestor to consider
  * @memcg: the memory cgroup to check
@@ -5965,7 +5661,13 @@
 	if (!mem_cgroup_is_root(memcg))
 		page_counter_uncharge(&memcg->memory, 1);
 
-	/* Caller disabled preemption with mapping->tree_lock */
+	/*
+	 * Interrupts should be disabled here because the caller holds the
+	 * mapping->tree_lock lock which is taken with interrupts-off. It is
+	 * important here to have the interrupts disabled because it is the
+	 * only synchronisation we have for udpating the per-CPU variables.
+	 */
+	VM_BUG_ON(!irqs_disabled());
 	mem_cgroup_charge_statistics(memcg, page, -1);
 	memcg_check_events(memcg, page);
 }
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1f4446a9..eeda648 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -146,7 +146,7 @@
 	if (!mem)
 		return -EINVAL;
 
-	css = mem_cgroup_css(mem);
+	css = &mem->css;
 	ino = cgroup_ino(css->cgroup);
 	css_put(css);
 
@@ -934,6 +934,27 @@
 }
 EXPORT_SYMBOL_GPL(get_hwpoison_page);
 
+/**
+ * put_hwpoison_page() - Put refcount for memory error handling:
+ * @page:	raw error page (hit by memory error)
+ */
+void put_hwpoison_page(struct page *page)
+{
+	struct page *head = compound_head(page);
+
+	if (PageHuge(head)) {
+		put_page(head);
+		return;
+	}
+
+	if (PageTransHuge(head))
+		if (page != head)
+			put_page(head);
+
+	put_page(page);
+}
+EXPORT_SYMBOL_GPL(put_hwpoison_page);
+
 /*
  * Do all that is necessary to remove user space mappings. Unmap
  * the pages and send SIGBUS to the processes if the data was dirty.
@@ -1100,7 +1121,7 @@
 		nr_pages = 1 << compound_order(hpage);
 	else /* normal page or thp */
 		nr_pages = 1;
-	atomic_long_add(nr_pages, &num_poisoned_pages);
+	num_poisoned_pages_add(nr_pages);
 
 	/*
 	 * We need/can do nothing about count=0 pages.
@@ -1128,7 +1149,7 @@
 			if (PageHWPoison(hpage)) {
 				if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
 				    || (p != hpage && TestSetPageHWPoison(hpage))) {
-					atomic_long_sub(nr_pages, &num_poisoned_pages);
+					num_poisoned_pages_sub(nr_pages);
 					unlock_page(hpage);
 					return 0;
 				}
@@ -1152,10 +1173,8 @@
 			else
 				pr_err("MCE: %#lx: thp split failed\n", pfn);
 			if (TestClearPageHWPoison(p))
-				atomic_long_sub(nr_pages, &num_poisoned_pages);
-			put_page(p);
-			if (p != hpage)
-				put_page(hpage);
+				num_poisoned_pages_sub(nr_pages);
+			put_hwpoison_page(p);
 			return -EBUSY;
 		}
 		VM_BUG_ON_PAGE(!page_count(p), p);
@@ -1214,16 +1233,16 @@
 	 */
 	if (!PageHWPoison(p)) {
 		printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
-		atomic_long_sub(nr_pages, &num_poisoned_pages);
+		num_poisoned_pages_sub(nr_pages);
 		unlock_page(hpage);
-		put_page(hpage);
+		put_hwpoison_page(hpage);
 		return 0;
 	}
 	if (hwpoison_filter(p)) {
 		if (TestClearPageHWPoison(p))
-			atomic_long_sub(nr_pages, &num_poisoned_pages);
+			num_poisoned_pages_sub(nr_pages);
 		unlock_page(hpage);
-		put_page(hpage);
+		put_hwpoison_page(hpage);
 		return 0;
 	}
 
@@ -1237,7 +1256,7 @@
 	if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
 		action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED);
 		unlock_page(hpage);
-		put_page(hpage);
+		put_hwpoison_page(hpage);
 		return 0;
 	}
 	/*
@@ -1426,6 +1445,22 @@
 		return 0;
 	}
 
+	if (page_count(page) > 1) {
+		pr_info("MCE: Someone grabs the hwpoison page %#lx\n", pfn);
+		return 0;
+	}
+
+	if (page_mapped(page)) {
+		pr_info("MCE: Someone maps the hwpoison page %#lx\n", pfn);
+		return 0;
+	}
+
+	if (page_mapping(page)) {
+		pr_info("MCE: the hwpoison page has non-NULL mapping %#lx\n",
+			pfn);
+		return 0;
+	}
+
 	/*
 	 * unpoison_memory() can encounter thp only when the thp is being
 	 * worked by memory_failure() and the page lock is not held yet.
@@ -1450,7 +1485,7 @@
 			return 0;
 		}
 		if (TestClearPageHWPoison(p))
-			atomic_long_dec(&num_poisoned_pages);
+			num_poisoned_pages_dec();
 		pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
 		return 0;
 	}
@@ -1464,16 +1499,16 @@
 	 */
 	if (TestClearPageHWPoison(page)) {
 		pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
-		atomic_long_sub(nr_pages, &num_poisoned_pages);
+		num_poisoned_pages_sub(nr_pages);
 		freeit = 1;
 		if (PageHuge(page))
 			clear_page_hwpoison_huge_page(page);
 	}
 	unlock_page(page);
 
-	put_page(page);
+	put_hwpoison_page(page);
 	if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
-		put_page(page);
+		put_hwpoison_page(page);
 
 	return 0;
 }
@@ -1486,7 +1521,7 @@
 		return alloc_huge_page_node(page_hstate(compound_head(p)),
 						   nid);
 	else
-		return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+		return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
 }
 
 /*
@@ -1533,7 +1568,7 @@
 		/*
 		 * Try to free it.
 		 */
-		put_page(page);
+		put_hwpoison_page(page);
 		shake_page(page, 1);
 
 		/*
@@ -1542,7 +1577,7 @@
 		ret = __get_any_page(page, pfn, 0);
 		if (!PageLRU(page)) {
 			/* Drop page reference which is from __get_any_page() */
-			put_page(page);
+			put_hwpoison_page(page);
 			pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
 				pfn, page->flags);
 			return -EIO;
@@ -1565,7 +1600,7 @@
 	lock_page(hpage);
 	if (PageHWPoison(hpage)) {
 		unlock_page(hpage);
-		put_page(hpage);
+		put_hwpoison_page(hpage);
 		pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
 		return -EBUSY;
 	}
@@ -1576,7 +1611,7 @@
 	 * get_any_page() and isolate_huge_page() takes a refcount each,
 	 * so need to drop one here.
 	 */
-	put_page(hpage);
+	put_hwpoison_page(hpage);
 	if (!ret) {
 		pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
 		return -EBUSY;
@@ -1600,11 +1635,10 @@
 		if (PageHuge(page)) {
 			set_page_hwpoison_huge_page(hpage);
 			dequeue_hwpoisoned_huge_page(hpage);
-			atomic_long_add(1 << compound_order(hpage),
-					&num_poisoned_pages);
+			num_poisoned_pages_add(1 << compound_order(hpage));
 		} else {
 			SetPageHWPoison(page);
-			atomic_long_inc(&num_poisoned_pages);
+			num_poisoned_pages_inc();
 		}
 	}
 	return ret;
@@ -1625,7 +1659,7 @@
 	wait_on_page_writeback(page);
 	if (PageHWPoison(page)) {
 		unlock_page(page);
-		put_page(page);
+		put_hwpoison_page(page);
 		pr_info("soft offline: %#lx page already poisoned\n", pfn);
 		return -EBUSY;
 	}
@@ -1640,10 +1674,10 @@
 	 * would need to fix isolation locking first.
 	 */
 	if (ret == 1) {
-		put_page(page);
+		put_hwpoison_page(page);
 		pr_info("soft_offline: %#lx: invalidated\n", pfn);
 		SetPageHWPoison(page);
-		atomic_long_inc(&num_poisoned_pages);
+		num_poisoned_pages_inc();
 		return 0;
 	}
 
@@ -1657,14 +1691,12 @@
 	 * Drop page reference which is came from get_any_page()
 	 * successful isolate_lru_page() already took another one.
 	 */
-	put_page(page);
+	put_hwpoison_page(page);
 	if (!ret) {
 		LIST_HEAD(pagelist);
 		inc_zone_page_state(page, NR_ISOLATED_ANON +
 					page_is_file_cache(page));
 		list_add(&page->lru, &pagelist);
-		if (!TestSetPageHWPoison(page))
-			atomic_long_inc(&num_poisoned_pages);
 		ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
 					MIGRATE_SYNC, MR_MEMORY_FAILURE);
 		if (ret) {
@@ -1679,8 +1711,6 @@
 				pfn, ret, page->flags);
 			if (ret > 0)
 				ret = -EIO;
-			if (TestClearPageHWPoison(page))
-				atomic_long_dec(&num_poisoned_pages);
 		}
 	} else {
 		pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
@@ -1719,12 +1749,16 @@
 
 	if (PageHWPoison(page)) {
 		pr_info("soft offline: %#lx page already poisoned\n", pfn);
+		if (flags & MF_COUNT_INCREASED)
+			put_hwpoison_page(page);
 		return -EBUSY;
 	}
 	if (!PageHuge(page) && PageTransHuge(hpage)) {
 		if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
 			pr_info("soft offline: %#lx: failed to split THP\n",
 				pfn);
+			if (flags & MF_COUNT_INCREASED)
+				put_hwpoison_page(page);
 			return -EBUSY;
 		}
 	}
@@ -1742,11 +1776,10 @@
 		if (PageHuge(page)) {
 			set_page_hwpoison_huge_page(hpage);
 			if (!dequeue_hwpoisoned_huge_page(hpage))
-				atomic_long_add(1 << compound_order(hpage),
-					&num_poisoned_pages);
+				num_poisoned_pages_add(1 << compound_order(hpage));
 		} else {
 			if (!TestSetPageHWPoison(page))
-				atomic_long_inc(&num_poisoned_pages);
+				num_poisoned_pages_inc();
 		}
 	}
 	return ret;
diff --git a/mm/memory.c b/mm/memory.c
index 388dcf9..6cd0b21 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -61,6 +61,7 @@
 #include <linux/string.h>
 #include <linux/dma-debug.h>
 #include <linux/debugfs.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -180,22 +181,22 @@
 
 #ifdef HAVE_GENERIC_MMU_GATHER
 
-static int tlb_next_batch(struct mmu_gather *tlb)
+static bool tlb_next_batch(struct mmu_gather *tlb)
 {
 	struct mmu_gather_batch *batch;
 
 	batch = tlb->active;
 	if (batch->next) {
 		tlb->active = batch->next;
-		return 1;
+		return true;
 	}
 
 	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
-		return 0;
+		return false;
 
 	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 	if (!batch)
-		return 0;
+		return false;
 
 	tlb->batch_count++;
 	batch->next = NULL;
@@ -205,7 +206,7 @@
 	tlb->active->next = batch;
 	tlb->active = batch;
 
-	return 1;
+	return true;
 }
 
 /* tlb_gather_mmu
@@ -2425,8 +2426,6 @@
 	if (details.last_index < details.first_index)
 		details.last_index = ULONG_MAX;
 
-
-	/* DAX uses i_mmap_lock to serialise file truncate vs page fault */
 	i_mmap_lock_write(mapping);
 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
@@ -2685,6 +2684,12 @@
 		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 		if (!pte_none(*page_table))
 			goto unlock;
+		/* Deliver the page fault to userland, check inside PT lock */
+		if (userfaultfd_missing(vma)) {
+			pte_unmap_unlock(page_table, ptl);
+			return handle_userfault(vma, address, flags,
+						VM_UFFD_MISSING);
+		}
 		goto setpte;
 	}
 
@@ -2713,6 +2718,15 @@
 	if (!pte_none(*page_table))
 		goto release;
 
+	/* Deliver the page fault to userland, check inside PT lock */
+	if (userfaultfd_missing(vma)) {
+		pte_unmap_unlock(page_table, ptl);
+		mem_cgroup_cancel_charge(page, memcg);
+		page_cache_release(page);
+		return handle_userfault(vma, address, flags,
+					VM_UFFD_MISSING);
+	}
+
 	inc_mm_counter_fast(mm, MM_ANONPAGES);
 	page_add_new_anon_rmap(page, vma, address);
 	mem_cgroup_commit_charge(page, memcg, false);
@@ -2999,9 +3013,9 @@
 		} else {
 			/*
 			 * The fault handler has no page to lock, so it holds
-			 * i_mmap_lock for read to protect against truncate.
+			 * i_mmap_lock for write to protect against truncate.
 			 */
-			i_mmap_unlock_read(vma->vm_file->f_mapping);
+			i_mmap_unlock_write(vma->vm_file->f_mapping);
 		}
 		goto uncharge_out;
 	}
@@ -3015,9 +3029,9 @@
 	} else {
 		/*
 		 * The fault handler has no page to lock, so it holds
-		 * i_mmap_lock for read to protect against truncate.
+		 * i_mmap_lock for write to protect against truncate.
 		 */
-		i_mmap_unlock_read(vma->vm_file->f_mapping);
+		i_mmap_unlock_write(vma->vm_file->f_mapping);
 	}
 	return ret;
 uncharge_out:
@@ -3216,6 +3230,27 @@
 	return 0;
 }
 
+static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, pmd_t *pmd, unsigned int flags)
+{
+	if (!vma->vm_ops)
+		return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
+	if (vma->vm_ops->pmd_fault)
+		return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
+	return VM_FAULT_FALLBACK;
+}
+
+static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
+			unsigned int flags)
+{
+	if (!vma->vm_ops)
+		return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
+	if (vma->vm_ops->pmd_fault)
+		return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
+	return VM_FAULT_FALLBACK;
+}
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3251,12 +3286,12 @@
 	barrier();
 	if (!pte_present(entry)) {
 		if (pte_none(entry)) {
-			if (vma->vm_ops)
+			if (vma_is_anonymous(vma))
+				return do_anonymous_page(mm, vma, address,
+							 pte, pmd, flags);
+			else
 				return do_fault(mm, vma, address, pte, pmd,
 						flags, entry);
-
-			return do_anonymous_page(mm, vma, address, pte, pmd,
-					flags);
 		}
 		return do_swap_page(mm, vma, address,
 					pte, pmd, flags, entry);
@@ -3318,10 +3353,7 @@
 	if (!pmd)
 		return VM_FAULT_OOM;
 	if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
-		int ret = VM_FAULT_FALLBACK;
-		if (!vma->vm_ops)
-			ret = do_huge_pmd_anonymous_page(mm, vma, address,
-					pmd, flags);
+		int ret = create_huge_pmd(mm, vma, address, pmd, flags);
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;
 	} else {
@@ -3345,8 +3377,8 @@
 							     orig_pmd, pmd);
 
 			if (dirty && !pmd_write(orig_pmd)) {
-				ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
-							  orig_pmd);
+				ret = wp_huge_pmd(mm, vma, address, pmd,
+							orig_pmd, flags);
 				if (!(ret & VM_FAULT_FALLBACK))
 					return ret;
 			} else {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6da82bc..aa992e2 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -778,7 +778,10 @@
 
 	start = phys_start_pfn << PAGE_SHIFT;
 	size = nr_pages * PAGE_SIZE;
-	ret = release_mem_region_adjustable(&iomem_resource, start, size);
+
+	/* in the ZONE_DEVICE case device driver owns the memory region */
+	if (!is_dev_zone(zone))
+		ret = release_mem_region_adjustable(&iomem_resource, start, size);
 	if (ret) {
 		resource_size_t endres = start + size - 1;
 
@@ -1215,8 +1218,13 @@
 	return 0;
 }
 
-int zone_for_memory(int nid, u64 start, u64 size, int zone_default)
+int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
+		bool for_device)
 {
+#ifdef CONFIG_ZONE_DEVICE
+	if (for_device)
+		return ZONE_DEVICE;
+#endif
 	if (should_add_memory_movable(nid, start, size))
 		return ZONE_MOVABLE;
 
@@ -1248,6 +1256,14 @@
 
 	mem_hotplug_begin();
 
+	/*
+	 * Add new range to memblock so that when hotadd_new_pgdat() is called
+	 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
+	 * this new range and calculate total pages correctly.  The range will
+	 * be removed at hot-remove time.
+	 */
+	memblock_add_node(start, size, nid);
+
 	new_node = !node_online(nid);
 	if (new_node) {
 		pgdat = hotadd_new_pgdat(nid, start);
@@ -1257,7 +1273,7 @@
 	}
 
 	/* call arch's memory hotadd */
-	ret = arch_add_memory(nid, start, size);
+	ret = arch_add_memory(nid, start, size, false);
 
 	if (ret < 0)
 		goto error;
@@ -1277,7 +1293,6 @@
 
 	/* create new memmap entry */
 	firmware_map_add_hotplug(start, start + size, "System RAM");
-	memblock_add_node(start, size, nid);
 
 	goto out;
 
@@ -1286,6 +1301,7 @@
 	if (new_pgdat)
 		rollback_node_hotadd(nid, pgdat);
 	release_memory_resource(res);
+	memblock_remove(start, size);
 
 out:
 	mem_hotplug_done();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 99d4c1d..87a1779 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -608,9 +608,6 @@
 
 	qp->prev = vma;
 
-	if (vma->vm_flags & VM_PFNMAP)
-		return 1;
-
 	if (flags & MPOL_MF_LAZY) {
 		/* Similar to task_numa_work, skip inaccessible VMAs */
 		if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
@@ -722,8 +719,8 @@
 		pgoff = vma->vm_pgoff +
 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-				  vma->anon_vma, vma->vm_file, pgoff,
-				  new_pol);
+				 vma->anon_vma, vma->vm_file, pgoff,
+				 new_pol, vma->vm_userfaultfd_ctx);
 		if (prev) {
 			vma = prev;
 			next = vma->vm_next;
@@ -945,7 +942,7 @@
 		return alloc_huge_page_node(page_hstate(compound_head(page)),
 					node);
 	else
-		return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE |
+		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
 						    __GFP_THISNODE, 0);
 }
 
@@ -2001,7 +1998,7 @@
 		nmask = policy_nodemask(gfp, pol);
 		if (!nmask || node_isset(hpage_node, *nmask)) {
 			mpol_cond_put(pol);
-			page = alloc_pages_exact_node(hpage_node,
+			page = __alloc_pages_node(hpage_node,
 						gfp | __GFP_THISNODE, order);
 			goto out;
 		}
diff --git a/mm/mempool.c b/mm/mempool.c
index 2cc08de..4c533bc 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -150,6 +150,9 @@
  */
 void mempool_destroy(mempool_t *pool)
 {
+	if (unlikely(!pool))
+		return;
+
 	while (pool->curr_nr) {
 		void *element = remove_element(pool);
 		pool->free(element, pool->pool_data);
diff --git a/mm/memtest.c b/mm/memtest.c
index 0a1cc13..8eaa4c3 100644
--- a/mm/memtest.c
+++ b/mm/memtest.c
@@ -1,11 +1,6 @@
 #include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
 #include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
 #include <linux/init.h>
-#include <linux/pfn.h>
 #include <linux/memblock.h>
 
 static u64 patterns[] __initdata = {
@@ -31,10 +26,8 @@
 
 static void __init reserve_bad_mem(u64 pattern, phys_addr_t start_bad, phys_addr_t end_bad)
 {
-	printk(KERN_INFO "  %016llx bad mem addr %010llx - %010llx reserved\n",
-	       (unsigned long long) pattern,
-	       (unsigned long long) start_bad,
-	       (unsigned long long) end_bad);
+	pr_info("  %016llx bad mem addr %pa - %pa reserved\n",
+		cpu_to_be64(pattern), &start_bad, &end_bad);
 	memblock_reserve(start_bad, end_bad - start_bad);
 }
 
@@ -79,26 +72,26 @@
 		this_start = clamp(this_start, start, end);
 		this_end = clamp(this_end, start, end);
 		if (this_start < this_end) {
-			printk(KERN_INFO "  %010llx - %010llx pattern %016llx\n",
-			       (unsigned long long)this_start,
-			       (unsigned long long)this_end,
-			       (unsigned long long)cpu_to_be64(pattern));
+			pr_info("  %pa - %pa pattern %016llx\n",
+				&this_start, &this_end, cpu_to_be64(pattern));
 			memtest(pattern, this_start, this_end - this_start);
 		}
 	}
 }
 
 /* default is disabled */
-static int memtest_pattern __initdata;
+static unsigned int memtest_pattern __initdata;
 
 static int __init parse_memtest(char *arg)
 {
+	int ret = 0;
+
 	if (arg)
-		memtest_pattern = simple_strtoul(arg, NULL, 0);
+		ret = kstrtouint(arg, 0, &memtest_pattern);
 	else
 		memtest_pattern = ARRAY_SIZE(patterns);
 
-	return 0;
+	return ret;
 }
 
 early_param("memtest", parse_memtest);
@@ -111,7 +104,7 @@
 	if (!memtest_pattern)
 		return;
 
-	printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
+	pr_info("early_memtest: # of tests: %u\n", memtest_pattern);
 	for (i = memtest_pattern-1; i < UINT_MAX; --i) {
 		idx = i % ARRAY_SIZE(patterns);
 		do_one_pass(patterns[idx], start, end);
diff --git a/mm/migrate.c b/mm/migrate.c
index eb42671..02ce25d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,8 +880,7 @@
 	/* Establish migration ptes or remove ptes */
 	if (page_mapped(page)) {
 		try_to_unmap(page,
-			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
-			TTU_IGNORE_HWPOISON);
+			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 		page_was_mapped = 1;
 	}
 
@@ -952,9 +951,11 @@
 		dec_zone_page_state(page, NR_ISOLATED_ANON +
 				page_is_file_cache(page));
 		/* Soft-offlined page shouldn't go through lru cache list */
-		if (reason == MR_MEMORY_FAILURE)
+		if (reason == MR_MEMORY_FAILURE) {
 			put_page(page);
-		else
+			if (!test_set_page_hwpoison(page))
+				num_poisoned_pages_inc();
+		} else
 			putback_lru_page(page);
 	}
 
@@ -1194,7 +1195,7 @@
 		return alloc_huge_page_node(page_hstate(compound_head(p)),
 					pm->node);
 	else
-		return alloc_pages_exact_node(pm->node,
+		return __alloc_pages_node(pm->node,
 				GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
 }
 
@@ -1226,7 +1227,9 @@
 		if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
 			goto set_status;
 
-		page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
+		/* FOLL_DUMP to ignore special (like zero) pages */
+		page = follow_page(vma, pp->addr,
+				FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
 
 		err = PTR_ERR(page);
 		if (IS_ERR(page))
@@ -1236,10 +1239,6 @@
 		if (!page)
 			goto set_status;
 
-		/* Use PageReserved to check for zero page */
-		if (PageReserved(page))
-			goto put_and_set;
-
 		pp->page = page;
 		err = page_to_nid(page);
 
@@ -1396,18 +1395,14 @@
 		if (!vma || addr < vma->vm_start)
 			goto set_status;
 
-		page = follow_page(vma, addr, 0);
+		/* FOLL_DUMP to ignore special (like zero) pages */
+		page = follow_page(vma, addr, FOLL_DUMP);
 
 		err = PTR_ERR(page);
 		if (IS_ERR(page))
 			goto set_status;
 
-		err = -ENOENT;
-		/* Use PageReserved to check for zero page */
-		if (!page || PageReserved(page))
-			goto set_status;
-
-		err = page_to_nid(page);
+		err = page ? page_to_nid(page) : -ENOENT;
 set_status:
 		*status = err;
 
@@ -1560,7 +1555,7 @@
 	int nid = (int) data;
 	struct page *newpage;
 
-	newpage = alloc_pages_exact_node(nid,
+	newpage = __alloc_pages_node(nid,
 					 (GFP_HIGHUSER_MOVABLE |
 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
 					  __GFP_NORETRY | __GFP_NOWARN) &
diff --git a/mm/mlock.c b/mm/mlock.c
index 6fd2cf1..25936680 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -510,7 +510,8 @@
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
-			  vma->vm_file, pgoff, vma_policy(vma));
+			  vma->vm_file, pgoff, vma_policy(vma),
+			  vma->vm_userfaultfd_ctx);
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/mmap.c b/mm/mmap.c
index f126923..b6be324 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -41,6 +41,7 @@
 #include <linux/notifier.h>
 #include <linux/memory.h>
 #include <linux/printk.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -919,7 +920,8 @@
  * per-vma resources, so we don't attempt to merge those.
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
-			struct file *file, unsigned long vm_flags)
+				struct file *file, unsigned long vm_flags,
+				struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
 	/*
 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -935,6 +937,8 @@
 		return 0;
 	if (vma->vm_ops && vma->vm_ops->close)
 		return 0;
+	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
+		return 0;
 	return 1;
 }
 
@@ -965,9 +969,11 @@
  */
 static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
-	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+		     struct anon_vma *anon_vma, struct file *file,
+		     pgoff_t vm_pgoff,
+		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
-	if (is_mergeable_vma(vma, file, vm_flags) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
 			return 1;
@@ -984,9 +990,11 @@
  */
 static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
-	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+		    struct anon_vma *anon_vma, struct file *file,
+		    pgoff_t vm_pgoff,
+		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
-	if (is_mergeable_vma(vma, file, vm_flags) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
 		vm_pglen = vma_pages(vma);
@@ -1029,7 +1037,8 @@
 			struct vm_area_struct *prev, unsigned long addr,
 			unsigned long end, unsigned long vm_flags,
 			struct anon_vma *anon_vma, struct file *file,
-			pgoff_t pgoff, struct mempolicy *policy)
+			pgoff_t pgoff, struct mempolicy *policy,
+			struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1056,14 +1065,17 @@
 	if (prev && prev->vm_end == addr &&
 			mpol_equal(vma_policy(prev), policy) &&
 			can_vma_merge_after(prev, vm_flags,
-						anon_vma, file, pgoff)) {
+					    anon_vma, file, pgoff,
+					    vm_userfaultfd_ctx)) {
 		/*
 		 * OK, it can.  Can we now merge in the successor as well?
 		 */
 		if (next && end == next->vm_start &&
 				mpol_equal(policy, vma_policy(next)) &&
 				can_vma_merge_before(next, vm_flags,
-					anon_vma, file, pgoff+pglen) &&
+						     anon_vma, file,
+						     pgoff+pglen,
+						     vm_userfaultfd_ctx) &&
 				is_mergeable_anon_vma(prev->anon_vma,
 						      next->anon_vma, NULL)) {
 							/* cases 1, 6 */
@@ -1084,7 +1096,8 @@
 	if (next && end == next->vm_start &&
 			mpol_equal(policy, vma_policy(next)) &&
 			can_vma_merge_before(next, vm_flags,
-					anon_vma, file, pgoff+pglen)) {
+					     anon_vma, file, pgoff+pglen,
+					     vm_userfaultfd_ctx)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = vma_adjust(prev, prev->vm_start,
 				addr, prev->vm_pgoff, NULL);
@@ -1570,8 +1583,8 @@
 	/*
 	 * Can we just expand an old mapping?
 	 */
-	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
-			NULL);
+	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
+			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
 	if (vma)
 		goto out;
 
@@ -2442,7 +2455,7 @@
 	      unsigned long addr, int new_below)
 {
 	struct vm_area_struct *new;
-	int err = -ENOMEM;
+	int err;
 
 	if (is_vm_hugetlb_page(vma) && (addr &
 					~(huge_page_mask(hstate_vma(vma)))))
@@ -2450,7 +2463,7 @@
 
 	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!new)
-		goto out_err;
+		return -ENOMEM;
 
 	/* most fields are the same, copy all, and then fixup */
 	*new = *vma;
@@ -2498,7 +2511,6 @@
 	mpol_put(vma_policy(new));
  out_free_vma:
 	kmem_cache_free(vm_area_cachep, new);
- out_err:
 	return err;
 }
 
@@ -2757,7 +2769,7 @@
 
 	/* Can we just expand an old private anonymous mapping? */
 	vma = vma_merge(mm, prev, addr, addr + len, flags,
-					NULL, NULL, pgoff, NULL);
+			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
 	if (vma)
 		goto out;
 
@@ -2859,6 +2871,13 @@
 	struct vm_area_struct *prev;
 	struct rb_node **rb_link, *rb_parent;
 
+	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
+			   &prev, &rb_link, &rb_parent))
+		return -ENOMEM;
+	if ((vma->vm_flags & VM_ACCOUNT) &&
+	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
+		return -ENOMEM;
+
 	/*
 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
 	 * until its first write fault, when page's anon_vma and index
@@ -2871,16 +2890,10 @@
 	 * using the existing file pgoff checks and manipulations.
 	 * Similarly in do_mmap_pgoff and in do_brk.
 	 */
-	if (!vma->vm_file) {
+	if (vma_is_anonymous(vma)) {
 		BUG_ON(vma->anon_vma);
 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
 	}
-	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
-			   &prev, &rb_link, &rb_parent))
-		return -ENOMEM;
-	if ((vma->vm_flags & VM_ACCOUNT) &&
-	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
-		return -ENOMEM;
 
 	vma_link(mm, vma, prev, rb_link, rb_parent);
 	return 0;
@@ -2905,7 +2918,7 @@
 	 * If anonymous vma has not yet been faulted, update new pgoff
 	 * to match new location, to increase its chance of merging.
 	 */
-	if (unlikely(!vma->vm_file && !vma->anon_vma)) {
+	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
 		pgoff = addr >> PAGE_SHIFT;
 		faulted_in_anon_vma = false;
 	}
@@ -2913,7 +2926,8 @@
 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
 		return NULL;	/* should never get here */
 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
-			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+			    vma->vm_userfaultfd_ctx);
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
@@ -2938,30 +2952,31 @@
 		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
 	} else {
 		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-		if (new_vma) {
-			*new_vma = *vma;
-			new_vma->vm_start = addr;
-			new_vma->vm_end = addr + len;
-			new_vma->vm_pgoff = pgoff;
-			if (vma_dup_policy(vma, new_vma))
-				goto out_free_vma;
-			INIT_LIST_HEAD(&new_vma->anon_vma_chain);
-			if (anon_vma_clone(new_vma, vma))
-				goto out_free_mempol;
-			if (new_vma->vm_file)
-				get_file(new_vma->vm_file);
-			if (new_vma->vm_ops && new_vma->vm_ops->open)
-				new_vma->vm_ops->open(new_vma);
-			vma_link(mm, new_vma, prev, rb_link, rb_parent);
-			*need_rmap_locks = false;
-		}
+		if (!new_vma)
+			goto out;
+		*new_vma = *vma;
+		new_vma->vm_start = addr;
+		new_vma->vm_end = addr + len;
+		new_vma->vm_pgoff = pgoff;
+		if (vma_dup_policy(vma, new_vma))
+			goto out_free_vma;
+		INIT_LIST_HEAD(&new_vma->anon_vma_chain);
+		if (anon_vma_clone(new_vma, vma))
+			goto out_free_mempol;
+		if (new_vma->vm_file)
+			get_file(new_vma->vm_file);
+		if (new_vma->vm_ops && new_vma->vm_ops->open)
+			new_vma->vm_ops->open(new_vma);
+		vma_link(mm, new_vma, prev, rb_link, rb_parent);
+		*need_rmap_locks = false;
 	}
 	return new_vma;
 
- out_free_mempol:
+out_free_mempol:
 	mpol_put(vma_policy(new_vma));
- out_free_vma:
+out_free_vma:
 	kmem_cache_free(vm_area_cachep, new_vma);
+out:
 	return NULL;
 }
 
@@ -3013,21 +3028,13 @@
 	pgoff_t pgoff;
 	struct page **pages;
 
-	/*
-	 * special mappings have no vm_file, and in that case, the mm
-	 * uses vm_pgoff internally. So we have to subtract it from here.
-	 * We are allowed to do this because we are the mm; do not copy
-	 * this code into drivers!
-	 */
-	pgoff = vmf->pgoff - vma->vm_pgoff;
-
 	if (vma->vm_ops == &legacy_special_mapping_vmops)
 		pages = vma->vm_private_data;
 	else
 		pages = ((struct vm_special_mapping *)vma->vm_private_data)->
 			pages;
 
-	for (; pgoff && *pages; ++pages)
+	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
 		pgoff--;
 
 	if (*pages) {
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e7d6f11..ef5be8e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -292,7 +292,8 @@
 	 */
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
-			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+			   vma->vm_userfaultfd_ctx);
 	if (*pprev) {
 		vma = *pprev;
 		goto success;
diff --git a/mm/mremap.c b/mm/mremap.c
index a7c93ec..5a71cce 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -276,6 +276,12 @@
 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
 				     need_rmap_locks);
 	if (moved_len < old_len) {
+		err = -ENOMEM;
+	} else if (vma->vm_ops && vma->vm_ops->mremap) {
+		err = vma->vm_ops->mremap(new_vma);
+	}
+
+	if (unlikely(err)) {
 		/*
 		 * On error, move entries back from new area to old,
 		 * which will succeed since page tables still there,
@@ -286,16 +292,8 @@
 		vma = new_vma;
 		old_len = new_len;
 		old_addr = new_addr;
-		new_addr = -ENOMEM;
+		new_addr = err;
 	} else {
-		if (vma->vm_file && vma->vm_file->f_op->mremap) {
-			err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
-			if (err < 0) {
-				move_page_tables(new_vma, new_addr, vma,
-						 old_addr, moved_len, true);
-				return err;
-			}
-		}
 		arch_remap(mm, old_addr, old_addr + old_len,
 			   new_addr, new_addr + new_len);
 	}
@@ -348,6 +346,7 @@
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma = find_vma(mm, addr);
+	unsigned long pgoff;
 
 	if (!vma || vma->vm_start > addr)
 		return ERR_PTR(-EFAULT);
@@ -359,17 +358,17 @@
 	if (old_len > vma->vm_end - addr)
 		return ERR_PTR(-EFAULT);
 
-	/* Need to be careful about a growing mapping */
-	if (new_len > old_len) {
-		unsigned long pgoff;
+	if (new_len == old_len)
+		return vma;
 
-		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
-			return ERR_PTR(-EFAULT);
-		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
-		pgoff += vma->vm_pgoff;
-		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
-			return ERR_PTR(-EINVAL);
-	}
+	/* Need to be careful about a growing mapping */
+	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
+	pgoff += vma->vm_pgoff;
+	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
+		return ERR_PTR(-EINVAL);
+
+	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
+		return ERR_PTR(-EFAULT);
 
 	if (vma->vm_flags & VM_LOCKED) {
 		unsigned long locked, lock_limit;
@@ -408,13 +407,8 @@
 	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
 		goto out;
 
-	/* Check if the location we're moving into overlaps the
-	 * old location at all, and fail if it does.
-	 */
-	if ((new_addr <= addr) && (new_addr+new_len) > addr)
-		goto out;
-
-	if ((addr <= new_addr) && (addr+old_len) > new_addr)
+	/* Ensure the old/new locations do not overlap */
+	if (addr + old_len > new_addr && new_addr + new_len > addr)
 		goto out;
 
 	ret = do_munmap(mm, new_addr, new_len);
@@ -580,8 +574,10 @@
 		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
 	}
 out:
-	if (ret & ~PAGE_MASK)
+	if (ret & ~PAGE_MASK) {
 		vm_unacct_memory(charged);
+		locked = 0;
+	}
 	up_write(&current->mm->mmap_sem);
 	if (locked && new_len > old_len)
 		mm_populate(new_addr + old_len, new_len - old_len);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index dff991e..1ecc0bc 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -196,27 +196,26 @@
  * Determine the type of allocation constraint.
  */
 #ifdef CONFIG_NUMA
-static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
-				gfp_t gfp_mask, nodemask_t *nodemask,
-				unsigned long *totalpages)
+static enum oom_constraint constrained_alloc(struct oom_control *oc,
+					     unsigned long *totalpages)
 {
 	struct zone *zone;
 	struct zoneref *z;
-	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
 	bool cpuset_limited = false;
 	int nid;
 
 	/* Default to all available memory */
 	*totalpages = totalram_pages + total_swap_pages;
 
-	if (!zonelist)
+	if (!oc->zonelist)
 		return CONSTRAINT_NONE;
 	/*
 	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
 	 * to kill current.We have to random task kill in this case.
 	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
 	 */
-	if (gfp_mask & __GFP_THISNODE)
+	if (oc->gfp_mask & __GFP_THISNODE)
 		return CONSTRAINT_NONE;
 
 	/*
@@ -224,17 +223,18 @@
 	 * the page allocator means a mempolicy is in effect.  Cpuset policy
 	 * is enforced in get_page_from_freelist().
 	 */
-	if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) {
+	if (oc->nodemask &&
+	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
 		*totalpages = total_swap_pages;
-		for_each_node_mask(nid, *nodemask)
+		for_each_node_mask(nid, *oc->nodemask)
 			*totalpages += node_spanned_pages(nid);
 		return CONSTRAINT_MEMORY_POLICY;
 	}
 
 	/* Check this allocation failure is caused by cpuset's wall function */
-	for_each_zone_zonelist_nodemask(zone, z, zonelist,
-			high_zoneidx, nodemask)
-		if (!cpuset_zone_allowed(zone, gfp_mask))
+	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
+			high_zoneidx, oc->nodemask)
+		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
 			cpuset_limited = true;
 
 	if (cpuset_limited) {
@@ -246,20 +246,18 @@
 	return CONSTRAINT_NONE;
 }
 #else
-static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
-				gfp_t gfp_mask, nodemask_t *nodemask,
-				unsigned long *totalpages)
+static enum oom_constraint constrained_alloc(struct oom_control *oc,
+					     unsigned long *totalpages)
 {
 	*totalpages = totalram_pages + total_swap_pages;
 	return CONSTRAINT_NONE;
 }
 #endif
 
-enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
-		unsigned long totalpages, const nodemask_t *nodemask,
-		bool force_kill)
+enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
+			struct task_struct *task, unsigned long totalpages)
 {
-	if (oom_unkillable_task(task, NULL, nodemask))
+	if (oom_unkillable_task(task, NULL, oc->nodemask))
 		return OOM_SCAN_CONTINUE;
 
 	/*
@@ -267,7 +265,7 @@
 	 * Don't allow any other task to have access to the reserves.
 	 */
 	if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
-		if (!force_kill)
+		if (oc->order != -1)
 			return OOM_SCAN_ABORT;
 	}
 	if (!task->mm)
@@ -280,7 +278,7 @@
 	if (oom_task_origin(task))
 		return OOM_SCAN_SELECT;
 
-	if (task_will_free_mem(task) && !force_kill)
+	if (task_will_free_mem(task) && oc->order != -1)
 		return OOM_SCAN_ABORT;
 
 	return OOM_SCAN_OK;
@@ -289,12 +287,9 @@
 /*
  * Simple selection loop. We chose the process with the highest
  * number of 'points'.  Returns -1 on scan abort.
- *
- * (not docbooked, we don't want this one cluttering up the manual)
  */
-static struct task_struct *select_bad_process(unsigned int *ppoints,
-		unsigned long totalpages, const nodemask_t *nodemask,
-		bool force_kill)
+static struct task_struct *select_bad_process(struct oom_control *oc,
+		unsigned int *ppoints, unsigned long totalpages)
 {
 	struct task_struct *g, *p;
 	struct task_struct *chosen = NULL;
@@ -304,8 +299,7 @@
 	for_each_process_thread(g, p) {
 		unsigned int points;
 
-		switch (oom_scan_process_thread(p, totalpages, nodemask,
-						force_kill)) {
+		switch (oom_scan_process_thread(oc, p, totalpages)) {
 		case OOM_SCAN_SELECT:
 			chosen = p;
 			chosen_points = ULONG_MAX;
@@ -318,7 +312,7 @@
 		case OOM_SCAN_OK:
 			break;
 		};
-		points = oom_badness(p, NULL, nodemask, totalpages);
+		points = oom_badness(p, NULL, oc->nodemask, totalpages);
 		if (!points || points < chosen_points)
 			continue;
 		/* Prefer thread group leaders for display purposes */
@@ -380,13 +374,13 @@
 	rcu_read_unlock();
 }
 
-static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
-			struct mem_cgroup *memcg, const nodemask_t *nodemask)
+static void dump_header(struct oom_control *oc, struct task_struct *p,
+			struct mem_cgroup *memcg)
 {
 	task_lock(current);
 	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
 		"oom_score_adj=%hd\n",
-		current->comm, gfp_mask, order,
+		current->comm, oc->gfp_mask, oc->order,
 		current->signal->oom_score_adj);
 	cpuset_print_task_mems_allowed(current);
 	task_unlock(current);
@@ -396,7 +390,7 @@
 	else
 		show_mem(SHOW_MEM_FILTER_NODES);
 	if (sysctl_oom_dump_tasks)
-		dump_tasks(memcg, nodemask);
+		dump_tasks(memcg, oc->nodemask);
 }
 
 /*
@@ -487,10 +481,9 @@
  * Must be called while holding a reference to p, which will be released upon
  * returning.
  */
-void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+void oom_kill_process(struct oom_control *oc, struct task_struct *p,
 		      unsigned int points, unsigned long totalpages,
-		      struct mem_cgroup *memcg, nodemask_t *nodemask,
-		      const char *message)
+		      struct mem_cgroup *memcg, const char *message)
 {
 	struct task_struct *victim = p;
 	struct task_struct *child;
@@ -514,7 +507,7 @@
 	task_unlock(p);
 
 	if (__ratelimit(&oom_rs))
-		dump_header(p, gfp_mask, order, memcg, nodemask);
+		dump_header(oc, p, memcg);
 
 	task_lock(p);
 	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
@@ -537,7 +530,7 @@
 			/*
 			 * oom_badness() returns 0 if the thread is unkillable
 			 */
-			child_points = oom_badness(child, memcg, nodemask,
+			child_points = oom_badness(child, memcg, oc->nodemask,
 								totalpages);
 			if (child_points > victim_points) {
 				put_task_struct(victim);
@@ -600,8 +593,7 @@
 /*
  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
  */
-void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
-			int order, const nodemask_t *nodemask,
+void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint,
 			struct mem_cgroup *memcg)
 {
 	if (likely(!sysctl_panic_on_oom))
@@ -615,7 +607,10 @@
 		if (constraint != CONSTRAINT_NONE)
 			return;
 	}
-	dump_header(NULL, gfp_mask, order, memcg, nodemask);
+	/* Do not panic for oom kills triggered by sysrq */
+	if (oc->order == -1)
+		return;
+	dump_header(oc, NULL, memcg);
 	panic("Out of memory: %s panic_on_oom is enabled\n",
 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
 }
@@ -635,28 +630,21 @@
 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 
 /**
- * __out_of_memory - kill the "best" process when we run out of memory
- * @zonelist: zonelist pointer
- * @gfp_mask: memory allocation flags
- * @order: amount of memory being requested as a power of 2
- * @nodemask: nodemask passed to page allocator
- * @force_kill: true if a task must be killed, even if others are exiting
+ * out_of_memory - kill the "best" process when we run out of memory
+ * @oc: pointer to struct oom_control
  *
  * If we run out of memory, we have the choice between either
  * killing a random task (bad), letting the system crash (worse)
  * OR try to be smart about which process to kill. Note that we
  * don't have to be perfect here, we just have to be good.
  */
-bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
-		   int order, nodemask_t *nodemask, bool force_kill)
+bool out_of_memory(struct oom_control *oc)
 {
-	const nodemask_t *mpol_mask;
 	struct task_struct *p;
 	unsigned long totalpages;
 	unsigned long freed = 0;
 	unsigned int uninitialized_var(points);
 	enum oom_constraint constraint = CONSTRAINT_NONE;
-	int killed = 0;
 
 	if (oom_killer_disabled)
 		return false;
@@ -664,7 +652,7 @@
 	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
 	if (freed > 0)
 		/* Got some memory back in the last second. */
-		goto out;
+		return true;
 
 	/*
 	 * If current has a pending SIGKILL or is exiting, then automatically
@@ -677,47 +665,42 @@
 	if (current->mm &&
 	    (fatal_signal_pending(current) || task_will_free_mem(current))) {
 		mark_oom_victim(current);
-		goto out;
+		return true;
 	}
 
 	/*
 	 * Check if there were limitations on the allocation (only relevant for
 	 * NUMA) that may require different handling.
 	 */
-	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
-						&totalpages);
-	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
-	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask, NULL);
+	constraint = constrained_alloc(oc, &totalpages);
+	if (constraint != CONSTRAINT_MEMORY_POLICY)
+		oc->nodemask = NULL;
+	check_panic_on_oom(oc, constraint, NULL);
 
 	if (sysctl_oom_kill_allocating_task && current->mm &&
-	    !oom_unkillable_task(current, NULL, nodemask) &&
+	    !oom_unkillable_task(current, NULL, oc->nodemask) &&
 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
 		get_task_struct(current);
-		oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
-				 nodemask,
+		oom_kill_process(oc, current, 0, totalpages, NULL,
 				 "Out of memory (oom_kill_allocating_task)");
-		goto out;
+		return true;
 	}
 
-	p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
+	p = select_bad_process(oc, &points, totalpages);
 	/* Found nothing?!?! Either we hang forever, or we panic. */
-	if (!p) {
-		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
+	if (!p && oc->order != -1) {
+		dump_header(oc, NULL, NULL);
 		panic("Out of memory and no killable processes...\n");
 	}
-	if (p != (void *)-1UL) {
-		oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
-				 nodemask, "Out of memory");
-		killed = 1;
-	}
-out:
-	/*
-	 * Give the killed threads a good chance of exiting before trying to
-	 * allocate memory again.
-	 */
-	if (killed)
+	if (p && p != (void *)-1UL) {
+		oom_kill_process(oc, p, points, totalpages, NULL,
+				 "Out of memory");
+		/*
+		 * Give the killed process a good chance to exit before trying
+		 * to allocate memory again.
+		 */
 		schedule_timeout_killable(1);
-
+	}
 	return true;
 }
 
@@ -728,13 +711,20 @@
  */
 void pagefault_out_of_memory(void)
 {
+	struct oom_control oc = {
+		.zonelist = NULL,
+		.nodemask = NULL,
+		.gfp_mask = 0,
+		.order = 0,
+	};
+
 	if (mem_cgroup_oom_synchronize(true))
 		return;
 
 	if (!mutex_trylock(&oom_lock))
 		return;
 
-	if (!out_of_memory(NULL, 0, 0, NULL, false)) {
+	if (!out_of_memory(&oc)) {
 		/*
 		 * There shouldn't be any user tasks runnable while the
 		 * OOM killer is disabled, so the current task has to
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5b5240b..48aaf7b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -125,6 +125,24 @@
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
+/*
+ * A cached value of the page's pageblock's migratetype, used when the page is
+ * put on a pcplist. Used to avoid the pageblock migratetype lookup when
+ * freeing from pcplists in most cases, at the cost of possibly becoming stale.
+ * Also the migratetype set in the page does not necessarily match the pcplist
+ * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
+ * other index - this ensures that it will be put on the correct CMA freelist.
+ */
+static inline int get_pcppage_migratetype(struct page *page)
+{
+	return page->index;
+}
+
+static inline void set_pcppage_migratetype(struct page *page, int migratetype)
+{
+	page->index = migratetype;
+}
+
 #ifdef CONFIG_PM_SLEEP
 /*
  * The following functions are used by the suspend/hibernate code to temporarily
@@ -206,6 +224,9 @@
 	 "HighMem",
 #endif
 	 "Movable",
+#ifdef CONFIG_ZONE_DEVICE
+	 "Device",
+#endif
 };
 
 int min_free_kbytes = 1024;
@@ -788,7 +809,11 @@
 			page = list_entry(list->prev, struct page, lru);
 			/* must delete as __free_one_page list manipulates */
 			list_del(&page->lru);
-			mt = get_freepage_migratetype(page);
+
+			mt = get_pcppage_migratetype(page);
+			/* MIGRATE_ISOLATE page should not go to pcplists */
+			VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+			/* Pageblock could have been isolated meanwhile */
 			if (unlikely(has_isolate_pageblock(zone)))
 				mt = get_pageblock_migratetype(page);
 
@@ -952,7 +977,6 @@
 	migratetype = get_pfnblock_migratetype(page, pfn);
 	local_irq_save(flags);
 	__count_vm_events(PGFREE, 1 << order);
-	set_freepage_migratetype(page, migratetype);
 	free_one_page(page_zone(page), page, pfn, order, migratetype);
 	local_irq_restore(flags);
 }
@@ -1380,7 +1404,7 @@
 		rmv_page_order(page);
 		area->nr_free--;
 		expand(zone, page, order, current_order, area, migratetype);
-		set_freepage_migratetype(page, migratetype);
+		set_pcppage_migratetype(page, migratetype);
 		return page;
 	}
 
@@ -1457,7 +1481,6 @@
 		order = page_order(page);
 		list_move(&page->lru,
 			  &zone->free_area[order].free_list[migratetype]);
-		set_freepage_migratetype(page, migratetype);
 		page += 1 << order;
 		pages_moved += 1 << order;
 	}
@@ -1627,14 +1650,13 @@
 		expand(zone, page, order, current_order, area,
 					start_migratetype);
 		/*
-		 * The freepage_migratetype may differ from pageblock's
+		 * The pcppage_migratetype may differ from pageblock's
 		 * migratetype depending on the decisions in
-		 * try_to_steal_freepages(). This is OK as long as it
-		 * does not differ for MIGRATE_CMA pageblocks. For CMA
-		 * we need to make sure unallocated pages flushed from
-		 * pcp lists are returned to the correct freelist.
+		 * find_suitable_fallback(). This is OK as long as it does not
+		 * differ for MIGRATE_CMA pageblocks. Those can be used as
+		 * fallback only via special __rmqueue_cma_fallback() function
 		 */
-		set_freepage_migratetype(page, start_migratetype);
+		set_pcppage_migratetype(page, start_migratetype);
 
 		trace_mm_page_alloc_extfrag(page, order, current_order,
 			start_migratetype, fallback_mt);
@@ -1710,7 +1732,7 @@
 		else
 			list_add_tail(&page->lru, list);
 		list = &page->lru;
-		if (is_migrate_cma(get_freepage_migratetype(page)))
+		if (is_migrate_cma(get_pcppage_migratetype(page)))
 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
 					      -(1 << order));
 	}
@@ -1907,7 +1929,7 @@
 		return;
 
 	migratetype = get_pfnblock_migratetype(page, pfn);
-	set_freepage_migratetype(page, migratetype);
+	set_pcppage_migratetype(page, migratetype);
 	local_irq_save(flags);
 	__count_vm_event(PGFREE);
 
@@ -2112,7 +2134,7 @@
 		if (!page)
 			goto failed;
 		__mod_zone_freepage_state(zone, -(1 << order),
-					  get_freepage_migratetype(page));
+					  get_pcppage_migratetype(page));
 	}
 
 	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
@@ -2693,6 +2715,12 @@
 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
 	const struct alloc_context *ac, unsigned long *did_some_progress)
 {
+	struct oom_control oc = {
+		.zonelist = ac->zonelist,
+		.nodemask = ac->nodemask,
+		.gfp_mask = gfp_mask,
+		.order = order,
+	};
 	struct page *page;
 
 	*did_some_progress = 0;
@@ -2744,8 +2772,7 @@
 			goto out;
 	}
 	/* Exhausted what can be done so it's blamo time */
-	if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
-			|| WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
+	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
 		*did_some_progress = 1;
 out:
 	mutex_unlock(&oom_lock);
@@ -3487,8 +3514,6 @@
  *
  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  * back.
- * Note this is not alloc_pages_exact_node() which allocates on a specific node,
- * but is not exact.
  */
 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
 {
@@ -5063,7 +5088,7 @@
 {
 	unsigned long zone_start_pfn, zone_end_pfn;
 
-	/* When hotadd a new node, the node should be empty */
+	/* When hotadd a new node from cpu_up(), the node should be empty */
 	if (!node_start_pfn && !node_end_pfn)
 		return 0;
 
@@ -5130,7 +5155,7 @@
 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
 	unsigned long zone_start_pfn, zone_end_pfn;
 
-	/* When hotadd a new node, the node should be empty */
+	/* When hotadd a new node from cpu_up(), the node should be empty */
 	if (!node_start_pfn && !node_end_pfn)
 		return 0;
 
@@ -5303,8 +5328,7 @@
  *
  * NOTE: pgdat should get zeroed by caller.
  */
-static void __paginginit free_area_init_core(struct pglist_data *pgdat,
-		unsigned long node_start_pfn, unsigned long node_end_pfn)
+static void __paginginit free_area_init_core(struct pglist_data *pgdat)
 {
 	enum zone_type j;
 	int nid = pgdat->node_id;
@@ -5455,7 +5479,8 @@
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
-		(u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1);
+		(u64)start_pfn << PAGE_SHIFT,
+		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
 #endif
 	calculate_node_totalpages(pgdat, start_pfn, end_pfn,
 				  zones_size, zholes_size);
@@ -5467,7 +5492,7 @@
 		(unsigned long)pgdat->node_mem_map);
 #endif
 
-	free_area_init_core(pgdat, start_pfn, end_pfn);
+	free_area_init_core(pgdat);
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -5478,11 +5503,9 @@
  */
 void __init setup_nr_node_ids(void)
 {
-	unsigned int node;
-	unsigned int highest = 0;
+	unsigned int highest;
 
-	for_each_node_mask(node, node_possible_map)
-		highest = node;
+	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
 	nr_node_ids = highest + 1;
 }
 #endif
@@ -6003,7 +6026,7 @@
  * set_dma_reserve - set the specified number of pages reserved in the first zone
  * @new_dma_reserve: The number of pages to mark reserved
  *
- * The per-cpu batchsize and zone watermarks are determined by present_pages.
+ * The per-cpu batchsize and zone watermarks are determined by managed_pages.
  * In the DMA zone, a significant percentage may be consumed by kernel image
  * and other unfreeable allocations which can skew the watermarks badly. This
  * function may optionally be used to account for unfreeable pages in the
@@ -6056,7 +6079,7 @@
 }
 
 /*
- * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
+ * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
  *	or min_free_kbytes changes.
  */
 static void calculate_totalreserve_pages(void)
@@ -6100,7 +6123,7 @@
 
 /*
  * setup_per_zone_lowmem_reserve - called whenever
- *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
+ *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
  *	has a correct pages reserved value, so an adequate number of
  *	pages are left in the zone after a successful __alloc_pages().
  */
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 303c908..4568fd5 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -9,7 +9,8 @@
 #include <linux/hugetlb.h>
 #include "internal.h"
 
-int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
+static int set_migratetype_isolate(struct page *page,
+				bool skip_hwpoisoned_pages)
 {
 	struct zone *zone;
 	unsigned long flags, pfn;
@@ -72,7 +73,7 @@
 	return ret;
 }
 
-void unset_migratetype_isolate(struct page *page, unsigned migratetype)
+static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
 	struct zone *zone;
 	unsigned long flags, nr_pages;
@@ -223,34 +224,16 @@
 			continue;
 		}
 		page = pfn_to_page(pfn);
-		if (PageBuddy(page)) {
+		if (PageBuddy(page))
 			/*
-			 * If race between isolatation and allocation happens,
-			 * some free pages could be in MIGRATE_MOVABLE list
-			 * although pageblock's migratation type of the page
-			 * is MIGRATE_ISOLATE. Catch it and move the page into
-			 * MIGRATE_ISOLATE list.
+			 * If the page is on a free list, it has to be on
+			 * the correct MIGRATE_ISOLATE freelist. There is no
+			 * simple way to verify that as VM_BUG_ON(), though.
 			 */
-			if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
-				struct page *end_page;
-
-				end_page = page + (1 << page_order(page)) - 1;
-				move_freepages(page_zone(page), page, end_page,
-						MIGRATE_ISOLATE);
-			}
 			pfn += 1 << page_order(page);
-		}
-		else if (page_count(page) == 0 &&
-			get_freepage_migratetype(page) == MIGRATE_ISOLATE)
-			pfn += 1;
-		else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
-			/*
-			 * The HWPoisoned page may be not in buddy
-			 * system, and page_count() is not 0.
-			 */
+		else if (skip_hwpoisoned_pages && PageHWPoison(page))
+			/* A HWPoisoned page cannot be also PageBuddy */
 			pfn++;
-			continue;
-		}
 		else
 			break;
 	}
diff --git a/mm/rmap.c b/mm/rmap.c
index 171b687..0db38e7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -62,6 +62,8 @@
 
 #include <asm/tlbflush.h>
 
+#include <trace/events/tlb.h>
+
 #include "internal.h"
 
 static struct kmem_cache *anon_vma_cachep;
@@ -583,6 +585,107 @@
 	return address;
 }
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+static void percpu_flush_tlb_batch_pages(void *data)
+{
+	/*
+	 * All TLB entries are flushed on the assumption that it is
+	 * cheaper to flush all TLBs and let them be refilled than
+	 * flushing individual PFNs. Note that we do not track mm's
+	 * to flush as that might simply be multiple full TLB flushes
+	 * for no gain.
+	 */
+	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+	flush_tlb_local();
+}
+
+/*
+ * Flush TLB entries for recently unmapped pages from remote CPUs. It is
+ * important if a PTE was dirty when it was unmapped that it's flushed
+ * before any IO is initiated on the page to prevent lost writes. Similarly,
+ * it must be flushed before freeing to prevent data leakage.
+ */
+void try_to_unmap_flush(void)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+	int cpu;
+
+	if (!tlb_ubc->flush_required)
+		return;
+
+	cpu = get_cpu();
+
+	trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
+
+	if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
+		percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
+
+	if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
+		smp_call_function_many(&tlb_ubc->cpumask,
+			percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
+	}
+	cpumask_clear(&tlb_ubc->cpumask);
+	tlb_ubc->flush_required = false;
+	tlb_ubc->writable = false;
+	put_cpu();
+}
+
+/* Flush iff there are potentially writable TLB entries that can race with IO */
+void try_to_unmap_flush_dirty(void)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+
+	if (tlb_ubc->writable)
+		try_to_unmap_flush();
+}
+
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+		struct page *page, bool writable)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+
+	cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+	tlb_ubc->flush_required = true;
+
+	/*
+	 * If the PTE was dirty then it's best to assume it's writable. The
+	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+	 * before the page is queued for IO.
+	 */
+	if (writable)
+		tlb_ubc->writable = true;
+}
+
+/*
+ * Returns true if the TLB flush should be deferred to the end of a batch of
+ * unmap operations to reduce IPIs.
+ */
+static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+{
+	bool should_defer = false;
+
+	if (!(flags & TTU_BATCH_FLUSH))
+		return false;
+
+	/* If remote CPUs need to be flushed then defer batch the flush */
+	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
+		should_defer = true;
+	put_cpu();
+
+	return should_defer;
+}
+#else
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+		struct page *page, bool writable)
+{
+}
+
+static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+{
+	return false;
+}
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
 /*
  * At what user virtual address is page expected in vma?
  * Caller should check the page is actually part of the vma.
@@ -1220,7 +1323,20 @@
 
 	/* Nuke the page table entry. */
 	flush_cache_page(vma, address, page_to_pfn(page));
-	pteval = ptep_clear_flush(vma, address, pte);
+	if (should_defer_flush(mm, flags)) {
+		/*
+		 * We clear the PTE but do not flush so potentially a remote
+		 * CPU could still be writing to the page. If the entry was
+		 * previously clean then the architecture must guarantee that
+		 * a clear->dirty transition on a cached TLB entry is written
+		 * through and traps if the PTE is unmapped.
+		 */
+		pteval = ptep_get_and_clear(mm, address, pte);
+
+		set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
+	} else {
+		pteval = ptep_clear_flush(vma, address, pte);
+	}
 
 	/* Move the dirty bit to the physical page now the pte is gone. */
 	if (pte_dirty(pteval))
diff --git a/mm/shmem.c b/mm/shmem.c
index dbe0c1e..48ce829 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -542,6 +542,21 @@
 }
 EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
+static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry,
+			 struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+	struct shmem_inode_info *info = SHMEM_I(inode);
+
+	spin_lock(&info->lock);
+	shmem_recalc_inode(inode);
+	spin_unlock(&info->lock);
+
+	generic_fillattr(inode, stat);
+
+	return 0;
+}
+
 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = d_inode(dentry);
@@ -3122,6 +3137,7 @@
 };
 
 static const struct inode_operations shmem_inode_operations = {
+	.getattr	= shmem_getattr,
 	.setattr	= shmem_setattr,
 #ifdef CONFIG_TMPFS_XATTR
 	.setxattr	= shmem_setxattr,
diff --git a/mm/slab.c b/mm/slab.c
index bbd0b47..c77ebe6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1595,7 +1595,7 @@
 	if (memcg_charge_slab(cachep, flags, cachep->gfporder))
 		return NULL;
 
-	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
+	page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
 	if (!page) {
 		memcg_uncharge_slab(cachep, cachep->gfporder);
 		slab_out_of_memory(cachep, flags, nodeid);
@@ -3416,6 +3416,19 @@
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+	__kmem_cache_free_bulk(s, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_free_bulk);
+
+bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+								void **p)
+{
+	return __kmem_cache_alloc_bulk(s, flags, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+
 #ifdef CONFIG_TRACING
 void *
 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
diff --git a/mm/slab.h b/mm/slab.h
index 8da63e4..a3a967d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -163,6 +163,15 @@
 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 		       size_t count, loff_t *ppos);
 
+/*
+ * Generic implementation of bulk operations
+ * These are useful for situations in which the allocator cannot
+ * perform optimizations. In that case segments of the objecct listed
+ * may be allocated or freed using these operations.
+ */
+void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
+bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+
 #ifdef CONFIG_MEMCG_KMEM
 /*
  * Iterate over all memcg caches of the given root cache. The caller must hold
@@ -321,7 +330,7 @@
 		return cachep;
 
 	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
-	       __func__, cachep->name, s->name);
+	       __func__, s->name, cachep->name);
 	WARN_ON_ONCE(1);
 	return s;
 }
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8683110..5ce4fae 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -104,6 +104,29 @@
 }
 #endif
 
+void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
+{
+	size_t i;
+
+	for (i = 0; i < nr; i++)
+		kmem_cache_free(s, p[i]);
+}
+
+bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
+								void **p)
+{
+	size_t i;
+
+	for (i = 0; i < nr; i++) {
+		void *x = p[i] = kmem_cache_alloc(s, flags);
+		if (!x) {
+			__kmem_cache_free_bulk(s, i, p);
+			return false;
+		}
+	}
+	return true;
+}
+
 #ifdef CONFIG_MEMCG_KMEM
 void slab_init_memcg_params(struct kmem_cache *s)
 {
@@ -477,7 +500,7 @@
 			     struct kmem_cache *root_cache)
 {
 	static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
-	struct cgroup_subsys_state *css = mem_cgroup_css(memcg);
+	struct cgroup_subsys_state *css = &memcg->css;
 	struct memcg_cache_array *arr;
 	struct kmem_cache *s = NULL;
 	char *cache_name;
@@ -617,6 +640,9 @@
 	bool need_rcu_barrier = false;
 	bool busy = false;
 
+	if (unlikely(!s))
+		return;
+
 	BUG_ON(!is_root_cache(s));
 
 	get_online_cpus();
diff --git a/mm/slob.c b/mm/slob.c
index 4765f65..0d7e5df 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -45,7 +45,7 @@
  * NUMA support in SLOB is fairly simplistic, pushing most of the real
  * logic down to the page allocator, and simply doing the node accounting
  * on the upper levels. In the event that a node id is explicitly
- * provided, alloc_pages_exact_node() with the specified node id is used
+ * provided, __alloc_pages_node() with the specified node id is used
  * instead. The common case (or when the node id isn't explicitly provided)
  * will default to the current node, as per numa_node_id().
  *
@@ -193,7 +193,7 @@
 
 #ifdef CONFIG_NUMA
 	if (node != NUMA_NO_NODE)
-		page = alloc_pages_exact_node(node, gfp, order);
+		page = __alloc_pages_node(node, gfp, order);
 	else
 #endif
 		page = alloc_pages(gfp, order);
@@ -611,6 +611,19 @@
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+	__kmem_cache_free_bulk(s, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_free_bulk);
+
+bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+								void **p)
+{
+	return __kmem_cache_alloc_bulk(s, flags, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+
 int __kmem_cache_shutdown(struct kmem_cache *c)
 {
 	/* No way to check for remaining objects */
diff --git a/mm/slub.c b/mm/slub.c
index f68c0e5..f614b5d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1306,6 +1306,17 @@
 	kasan_slab_free(s, x);
 }
 
+static void setup_object(struct kmem_cache *s, struct page *page,
+				void *object)
+{
+	setup_object_debug(s, page, object);
+	if (unlikely(s->ctor)) {
+		kasan_unpoison_object_data(s, object);
+		s->ctor(object);
+		kasan_poison_object_data(s, object);
+	}
+}
+
 /*
  * Slab allocation and freeing
  */
@@ -1323,7 +1334,7 @@
 	if (node == NUMA_NO_NODE)
 		page = alloc_pages(flags, order);
 	else
-		page = alloc_pages_exact_node(node, flags, order);
+		page = __alloc_pages_node(node, flags, order);
 
 	if (!page)
 		memcg_uncharge_slab(s, order);
@@ -1336,6 +1347,8 @@
 	struct page *page;
 	struct kmem_cache_order_objects oo = s->oo;
 	gfp_t alloc_gfp;
+	void *start, *p;
+	int idx, order;
 
 	flags &= gfp_allowed_mask;
 
@@ -1349,6 +1362,8 @@
 	 * so we fall-back to the minimum order allocation.
 	 */
 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+	if ((alloc_gfp & __GFP_WAIT) && oo_order(oo) > oo_order(s->min))
+		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_WAIT;
 
 	page = alloc_slab_page(s, alloc_gfp, node, oo);
 	if (unlikely(!page)) {
@@ -1359,13 +1374,13 @@
 		 * Try a lower order alloc if possible
 		 */
 		page = alloc_slab_page(s, alloc_gfp, node, oo);
-
-		if (page)
-			stat(s, ORDER_FALLBACK);
+		if (unlikely(!page))
+			goto out;
+		stat(s, ORDER_FALLBACK);
 	}
 
-	if (kmemcheck_enabled && page
-		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+	if (kmemcheck_enabled &&
+	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
 		int pages = 1 << oo_order(oo);
 
 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1395,9 @@
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}
 
-	if (flags & __GFP_WAIT)
-		local_irq_disable();
-	if (!page)
-		return NULL;
-
 	page->objects = oo_objects(oo);
-	mod_zone_page_state(page_zone(page),
-		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
-		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-		1 << oo_order(oo));
-
-	return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
-				void *object)
-{
-	setup_object_debug(s, page, object);
-	if (unlikely(s->ctor)) {
-		kasan_unpoison_object_data(s, object);
-		s->ctor(object);
-		kasan_poison_object_data(s, object);
-	}
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
-	struct page *page;
-	void *start;
-	void *p;
-	int order;
-	int idx;
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-		BUG();
-	}
-
-	page = allocate_slab(s,
-		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-	if (!page)
-		goto out;
 
 	order = compound_order(page);
-	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab_cache = s;
 	__SetPageSlab(page);
 	if (page_is_pfmemalloc(page))
@@ -1448,10 +1421,34 @@
 	page->freelist = start;
 	page->inuse = page->objects;
 	page->frozen = 1;
+
 out:
+	if (flags & __GFP_WAIT)
+		local_irq_disable();
+	if (!page)
+		return NULL;
+
+	mod_zone_page_state(page_zone(page),
+		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
+		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+		1 << oo_order(oo));
+
+	inc_slabs_node(s, page_to_nid(page), page->objects);
+
 	return page;
 }
 
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+		BUG();
+	}
+
+	return allocate_slab(s,
+		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
 	int order = compound_order(page);
@@ -2712,7 +2709,7 @@
 	 * Determine the currently cpus per cpu slab.
 	 * The cpu may change afterward. However that does not matter since
 	 * data is retrieved via this pointer. If we are on the same cpu
-	 * during the cmpxchg then the free will succedd.
+	 * during the cmpxchg then the free will succeed.
 	 */
 	do {
 		tid = this_cpu_read(s->cpu_slab->tid);
@@ -2750,6 +2747,113 @@
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
+/* Note that interrupts must be enabled when calling this function. */
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+	struct kmem_cache_cpu *c;
+	struct page *page;
+	int i;
+
+	local_irq_disable();
+	c = this_cpu_ptr(s->cpu_slab);
+
+	for (i = 0; i < size; i++) {
+		void *object = p[i];
+
+		BUG_ON(!object);
+		/* kmem cache debug support */
+		s = cache_from_obj(s, object);
+		if (unlikely(!s))
+			goto exit;
+		slab_free_hook(s, object);
+
+		page = virt_to_head_page(object);
+
+		if (c->page == page) {
+			/* Fastpath: local CPU free */
+			set_freepointer(s, object, c->freelist);
+			c->freelist = object;
+		} else {
+			c->tid = next_tid(c->tid);
+			local_irq_enable();
+			/* Slowpath: overhead locked cmpxchg_double_slab */
+			__slab_free(s, page, object, _RET_IP_);
+			local_irq_disable();
+			c = this_cpu_ptr(s->cpu_slab);
+		}
+	}
+exit:
+	c->tid = next_tid(c->tid);
+	local_irq_enable();
+}
+EXPORT_SYMBOL(kmem_cache_free_bulk);
+
+/* Note that interrupts must be enabled when calling this function. */
+bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+			   void **p)
+{
+	struct kmem_cache_cpu *c;
+	int i;
+
+	/*
+	 * Drain objects in the per cpu slab, while disabling local
+	 * IRQs, which protects against PREEMPT and interrupts
+	 * handlers invoking normal fastpath.
+	 */
+	local_irq_disable();
+	c = this_cpu_ptr(s->cpu_slab);
+
+	for (i = 0; i < size; i++) {
+		void *object = c->freelist;
+
+		if (unlikely(!object)) {
+			local_irq_enable();
+			/*
+			 * Invoking slow path likely have side-effect
+			 * of re-populating per CPU c->freelist
+			 */
+			p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
+					    _RET_IP_, c);
+			if (unlikely(!p[i])) {
+				__kmem_cache_free_bulk(s, i, p);
+				return false;
+			}
+			local_irq_disable();
+			c = this_cpu_ptr(s->cpu_slab);
+			continue; /* goto for-loop */
+		}
+
+		/* kmem_cache debug support */
+		s = slab_pre_alloc_hook(s, flags);
+		if (unlikely(!s)) {
+			__kmem_cache_free_bulk(s, i, p);
+			c->tid = next_tid(c->tid);
+			local_irq_enable();
+			return false;
+		}
+
+		c->freelist = get_freepointer(s, object);
+		p[i] = object;
+
+		/* kmem_cache debug support */
+		slab_post_alloc_hook(s, flags, object);
+	}
+	c->tid = next_tid(c->tid);
+	local_irq_enable();
+
+	/* Clear memory outside IRQ disabled fastpath loop */
+	if (unlikely(flags & __GFP_ZERO)) {
+		int j;
+
+		for (j = 0; j < i; j++)
+			memset(p[j], 0, s->object_size);
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+
+
 /*
  * Object placement in a slab is made very easy because we always start at
  * offset 0. If we tune the size of the object to the alignment then we can
@@ -5181,7 +5285,7 @@
 	s->kobj.kset = cache_kset(s);
 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
 	if (err)
-		goto out_put_kobj;
+		goto out;
 
 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
 	if (err)
@@ -5208,8 +5312,6 @@
 	return err;
 out_del_kobj:
 	kobject_del(&s->kobj);
-out_put_kobj:
-	kobject_put(&s->kobj);
 	goto out;
 }
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8bc8e66..d504adb 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -288,17 +288,14 @@
 	return page;
 }
 
-/* 
- * Locate a page of swap in physical memory, reserving swap cache space
- * and reading the disk if it is not already cached.
- * A failure return means that either the page allocation failed or that
- * the swap entry is no longer in use.
- */
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-			struct vm_area_struct *vma, unsigned long addr)
+struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+			struct vm_area_struct *vma, unsigned long addr,
+			bool *new_page_allocated)
 {
 	struct page *found_page, *new_page = NULL;
+	struct address_space *swapper_space = swap_address_space(entry);
 	int err;
+	*new_page_allocated = false;
 
 	do {
 		/*
@@ -306,8 +303,7 @@
 		 * called after lookup_swap_cache() failed, re-calling
 		 * that would confuse statistics.
 		 */
-		found_page = find_get_page(swap_address_space(entry),
-					entry.val);
+		found_page = find_get_page(swapper_space, entry.val);
 		if (found_page)
 			break;
 
@@ -366,7 +362,7 @@
 			 * Initiate read into locked page and return.
 			 */
 			lru_cache_add_anon(new_page);
-			swap_readpage(new_page);
+			*new_page_allocated = true;
 			return new_page;
 		}
 		radix_tree_preload_end();
@@ -384,6 +380,25 @@
 	return found_page;
 }
 
+/*
+ * Locate a page of swap in physical memory, reserving swap cache space
+ * and reading the disk if it is not already cached.
+ * A failure return means that either the page allocation failed or that
+ * the swap entry is no longer in use.
+ */
+struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+			struct vm_area_struct *vma, unsigned long addr)
+{
+	bool page_was_allocated;
+	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
+			vma, addr, &page_was_allocated);
+
+	if (page_was_allocated)
+		swap_readpage(retpage);
+
+	return retpage;
+}
+
 static unsigned long swapin_nr_pages(unsigned long offset)
 {
 	static unsigned long prev_offset;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 41e4581..5887731 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -875,6 +875,48 @@
 }
 
 /*
+ * How many references to @entry are currently swapped out?
+ * This considers COUNT_CONTINUED so it returns exact answer.
+ */
+int swp_swapcount(swp_entry_t entry)
+{
+	int count, tmp_count, n;
+	struct swap_info_struct *p;
+	struct page *page;
+	pgoff_t offset;
+	unsigned char *map;
+
+	p = swap_info_get(entry);
+	if (!p)
+		return 0;
+
+	count = swap_count(p->swap_map[swp_offset(entry)]);
+	if (!(count & COUNT_CONTINUED))
+		goto out;
+
+	count &= ~COUNT_CONTINUED;
+	n = SWAP_MAP_MAX + 1;
+
+	offset = swp_offset(entry);
+	page = vmalloc_to_page(p->swap_map + offset);
+	offset &= ~PAGE_MASK;
+	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
+
+	do {
+		page = list_entry(page->lru.next, struct page, lru);
+		map = kmap_atomic(page);
+		tmp_count = map[offset];
+		kunmap_atomic(map);
+
+		count += (tmp_count & ~COUNT_CONTINUED) * n;
+		n *= (SWAP_CONT_MAX + 1);
+	} while (tmp_count & COUNT_CONTINUED);
+out:
+	spin_unlock(&p->lock);
+	return count;
+}
+
+/*
  * We can write to an anon page without COW if there are no other references
  * to it.  And as a side-effect, free up its swap: because the old content
  * on disk will never be read, and seeking back there to write new content
@@ -2143,11 +2185,10 @@
 	if (S_ISBLK(inode->i_mode)) {
 		p->bdev = bdgrab(I_BDEV(inode));
 		error = blkdev_get(p->bdev,
-				   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
-				   sys_swapon);
+				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
 		if (error < 0) {
 			p->bdev = NULL;
-			return -EINVAL;
+			return error;
 		}
 		p->old_block_size = block_size(p->bdev);
 		error = set_blocksize(p->bdev, PAGE_SIZE);
@@ -2348,7 +2389,6 @@
 	struct filename *name;
 	struct file *swap_file = NULL;
 	struct address_space *mapping;
-	int i;
 	int prio;
 	int error;
 	union swap_header *swap_header;
@@ -2388,19 +2428,8 @@
 
 	p->swap_file = swap_file;
 	mapping = swap_file->f_mapping;
-
-	for (i = 0; i < nr_swapfiles; i++) {
-		struct swap_info_struct *q = swap_info[i];
-
-		if (q == p || !q->swap_file)
-			continue;
-		if (mapping == q->swap_file->f_mapping) {
-			error = -EBUSY;
-			goto bad_swap;
-		}
-	}
-
 	inode = mapping->host;
+
 	/* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
 	error = claim_swapfile(p, inode);
 	if (unlikely(error))
@@ -2433,6 +2462,8 @@
 		goto bad_swap;
 	}
 	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+		int cpu;
+
 		p->flags |= SWP_SOLIDSTATE;
 		/*
 		 * select a random position to start with to help wear leveling
@@ -2451,9 +2482,9 @@
 			error = -ENOMEM;
 			goto bad_swap;
 		}
-		for_each_possible_cpu(i) {
+		for_each_possible_cpu(cpu) {
 			struct percpu_cluster *cluster;
-			cluster = per_cpu_ptr(p->percpu_cluster, i);
+			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
 			cluster_set_null(&cluster->index);
 		}
 	}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
new file mode 100644
index 0000000..77fee93
--- /dev/null
+++ b/mm/userfaultfd.c
@@ -0,0 +1,308 @@
+/*
+ *  mm/userfaultfd.c
+ *
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/mmu_notifier.h>
+#include <asm/tlbflush.h>
+#include "internal.h"
+
+static int mcopy_atomic_pte(struct mm_struct *dst_mm,
+			    pmd_t *dst_pmd,
+			    struct vm_area_struct *dst_vma,
+			    unsigned long dst_addr,
+			    unsigned long src_addr,
+			    struct page **pagep)
+{
+	struct mem_cgroup *memcg;
+	pte_t _dst_pte, *dst_pte;
+	spinlock_t *ptl;
+	void *page_kaddr;
+	int ret;
+	struct page *page;
+
+	if (!*pagep) {
+		ret = -ENOMEM;
+		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
+		if (!page)
+			goto out;
+
+		page_kaddr = kmap_atomic(page);
+		ret = copy_from_user(page_kaddr,
+				     (const void __user *) src_addr,
+				     PAGE_SIZE);
+		kunmap_atomic(page_kaddr);
+
+		/* fallback to copy_from_user outside mmap_sem */
+		if (unlikely(ret)) {
+			ret = -EFAULT;
+			*pagep = page;
+			/* don't free the page */
+			goto out;
+		}
+	} else {
+		page = *pagep;
+		*pagep = NULL;
+	}
+
+	/*
+	 * The memory barrier inside __SetPageUptodate makes sure that
+	 * preceeding stores to the page contents become visible before
+	 * the set_pte_at() write.
+	 */
+	__SetPageUptodate(page);
+
+	ret = -ENOMEM;
+	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg))
+		goto out_release;
+
+	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
+	if (dst_vma->vm_flags & VM_WRITE)
+		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
+
+	ret = -EEXIST;
+	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+	if (!pte_none(*dst_pte))
+		goto out_release_uncharge_unlock;
+
+	inc_mm_counter(dst_mm, MM_ANONPAGES);
+	page_add_new_anon_rmap(page, dst_vma, dst_addr);
+	mem_cgroup_commit_charge(page, memcg, false);
+	lru_cache_add_active_or_unevictable(page, dst_vma);
+
+	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+
+	/* No need to invalidate - it was non-present before */
+	update_mmu_cache(dst_vma, dst_addr, dst_pte);
+
+	pte_unmap_unlock(dst_pte, ptl);
+	ret = 0;
+out:
+	return ret;
+out_release_uncharge_unlock:
+	pte_unmap_unlock(dst_pte, ptl);
+	mem_cgroup_cancel_charge(page, memcg);
+out_release:
+	page_cache_release(page);
+	goto out;
+}
+
+static int mfill_zeropage_pte(struct mm_struct *dst_mm,
+			      pmd_t *dst_pmd,
+			      struct vm_area_struct *dst_vma,
+			      unsigned long dst_addr)
+{
+	pte_t _dst_pte, *dst_pte;
+	spinlock_t *ptl;
+	int ret;
+
+	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+					 dst_vma->vm_page_prot));
+	ret = -EEXIST;
+	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+	if (!pte_none(*dst_pte))
+		goto out_unlock;
+	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+	/* No need to invalidate - it was non-present before */
+	update_mmu_cache(dst_vma, dst_addr, dst_pte);
+	ret = 0;
+out_unlock:
+	pte_unmap_unlock(dst_pte, ptl);
+	return ret;
+}
+
+static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd = NULL;
+
+	pgd = pgd_offset(mm, address);
+	pud = pud_alloc(mm, pgd, address);
+	if (pud)
+		/*
+		 * Note that we didn't run this because the pmd was
+		 * missing, the *pmd may be already established and in
+		 * turn it may also be a trans_huge_pmd.
+		 */
+		pmd = pmd_alloc(mm, pud, address);
+	return pmd;
+}
+
+static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
+					      unsigned long dst_start,
+					      unsigned long src_start,
+					      unsigned long len,
+					      bool zeropage)
+{
+	struct vm_area_struct *dst_vma;
+	ssize_t err;
+	pmd_t *dst_pmd;
+	unsigned long src_addr, dst_addr;
+	long copied;
+	struct page *page;
+
+	/*
+	 * Sanitize the command parameters:
+	 */
+	BUG_ON(dst_start & ~PAGE_MASK);
+	BUG_ON(len & ~PAGE_MASK);
+
+	/* Does the address range wrap, or is the span zero-sized? */
+	BUG_ON(src_start + len <= src_start);
+	BUG_ON(dst_start + len <= dst_start);
+
+	src_addr = src_start;
+	dst_addr = dst_start;
+	copied = 0;
+	page = NULL;
+retry:
+	down_read(&dst_mm->mmap_sem);
+
+	/*
+	 * Make sure the vma is not shared, that the dst range is
+	 * both valid and fully within a single existing vma.
+	 */
+	err = -EINVAL;
+	dst_vma = find_vma(dst_mm, dst_start);
+	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+		goto out_unlock;
+	if (dst_start < dst_vma->vm_start ||
+	    dst_start + len > dst_vma->vm_end)
+		goto out_unlock;
+
+	/*
+	 * Be strict and only allow __mcopy_atomic on userfaultfd
+	 * registered ranges to prevent userland errors going
+	 * unnoticed. As far as the VM consistency is concerned, it
+	 * would be perfectly safe to remove this check, but there's
+	 * no useful usage for __mcopy_atomic ouside of userfaultfd
+	 * registered ranges. This is after all why these are ioctls
+	 * belonging to the userfaultfd and not syscalls.
+	 */
+	if (!dst_vma->vm_userfaultfd_ctx.ctx)
+		goto out_unlock;
+
+	/*
+	 * FIXME: only allow copying on anonymous vmas, tmpfs should
+	 * be added.
+	 */
+	if (dst_vma->vm_ops)
+		goto out_unlock;
+
+	/*
+	 * Ensure the dst_vma has a anon_vma or this page
+	 * would get a NULL anon_vma when moved in the
+	 * dst_vma.
+	 */
+	err = -ENOMEM;
+	if (unlikely(anon_vma_prepare(dst_vma)))
+		goto out_unlock;
+
+	while (src_addr < src_start + len) {
+		pmd_t dst_pmdval;
+
+		BUG_ON(dst_addr >= dst_start + len);
+
+		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
+		if (unlikely(!dst_pmd)) {
+			err = -ENOMEM;
+			break;
+		}
+
+		dst_pmdval = pmd_read_atomic(dst_pmd);
+		/*
+		 * If the dst_pmd is mapped as THP don't
+		 * override it and just be strict.
+		 */
+		if (unlikely(pmd_trans_huge(dst_pmdval))) {
+			err = -EEXIST;
+			break;
+		}
+		if (unlikely(pmd_none(dst_pmdval)) &&
+		    unlikely(__pte_alloc(dst_mm, dst_vma, dst_pmd,
+					 dst_addr))) {
+			err = -ENOMEM;
+			break;
+		}
+		/* If an huge pmd materialized from under us fail */
+		if (unlikely(pmd_trans_huge(*dst_pmd))) {
+			err = -EFAULT;
+			break;
+		}
+
+		BUG_ON(pmd_none(*dst_pmd));
+		BUG_ON(pmd_trans_huge(*dst_pmd));
+
+		if (!zeropage)
+			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
+					       dst_addr, src_addr, &page);
+		else
+			err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma,
+						 dst_addr);
+
+		cond_resched();
+
+		if (unlikely(err == -EFAULT)) {
+			void *page_kaddr;
+
+			up_read(&dst_mm->mmap_sem);
+			BUG_ON(!page);
+
+			page_kaddr = kmap(page);
+			err = copy_from_user(page_kaddr,
+					     (const void __user *) src_addr,
+					     PAGE_SIZE);
+			kunmap(page);
+			if (unlikely(err)) {
+				err = -EFAULT;
+				goto out;
+			}
+			goto retry;
+		} else
+			BUG_ON(page);
+
+		if (!err) {
+			dst_addr += PAGE_SIZE;
+			src_addr += PAGE_SIZE;
+			copied += PAGE_SIZE;
+
+			if (fatal_signal_pending(current))
+				err = -EINTR;
+		}
+		if (err)
+			break;
+	}
+
+out_unlock:
+	up_read(&dst_mm->mmap_sem);
+out:
+	if (page)
+		page_cache_release(page);
+	BUG_ON(copied < 0);
+	BUG_ON(err > 0);
+	BUG_ON(!copied && !err);
+	return copied ? copied : err;
+}
+
+ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
+		     unsigned long src_start, unsigned long len)
+{
+	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
+}
+
+ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
+		       unsigned long len)
+{
+	return __mcopy_atomic(dst_mm, start, 0, len, true);
+}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8286938..2d978b2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -175,7 +175,7 @@
 	if (!memcg)
 		return true;
 #ifdef CONFIG_CGROUP_WRITEBACK
-	if (cgroup_on_dfl(mem_cgroup_css(memcg)->cgroup))
+	if (memcg->css.cgroup)
 		return true;
 #endif
 	return false;
@@ -985,7 +985,7 @@
 		 *    __GFP_IO|__GFP_FS for this reason); but more thought
 		 *    would probably show more reasons.
 		 *
-		 * 3) Legacy memcg encounters a page that is not already marked
+		 * 3) Legacy memcg encounters a page that is already marked
 		 *    PageReclaim. memcg does not have any dirty pages
 		 *    throttling so we could easily OOM just because too many
 		 *    pages are in writeback and there is nothing else to
@@ -1015,12 +1015,15 @@
 				 */
 				SetPageReclaim(page);
 				nr_writeback++;
-
 				goto keep_locked;
 
 			/* Case 3 above */
 			} else {
+				unlock_page(page);
 				wait_on_page_writeback(page);
+				/* then go back and try same page again */
+				list_add_tail(&page->lru, page_list);
+				continue;
 			}
 		}
 
@@ -1057,7 +1060,8 @@
 		 * processes. Try to unmap it here.
 		 */
 		if (page_mapped(page) && mapping) {
-			switch (try_to_unmap(page, ttu_flags)) {
+			switch (try_to_unmap(page,
+					ttu_flags|TTU_BATCH_FLUSH)) {
 			case SWAP_FAIL:
 				goto activate_locked;
 			case SWAP_AGAIN:
@@ -1097,7 +1101,12 @@
 			if (!sc->may_writepage)
 				goto keep_locked;
 
-			/* Page is dirty, try to write it out here */
+			/*
+			 * Page is dirty. Flush the TLB if a writable entry
+			 * potentially exists to avoid CPU writes after IO
+			 * starts and then write it out here.
+			 */
+			try_to_unmap_flush_dirty();
 			switch (pageout(page, mapping, sc)) {
 			case PAGE_KEEP:
 				goto keep_locked;
@@ -1190,7 +1199,7 @@
 		if (PageSwapCache(page))
 			try_to_free_swap(page);
 		unlock_page(page);
-		putback_lru_page(page);
+		list_add(&page->lru, &ret_pages);
 		continue;
 
 activate_locked:
@@ -1208,6 +1217,7 @@
 	}
 
 	mem_cgroup_uncharge_list(&free_pages);
+	try_to_unmap_flush();
 	free_hot_cold_page_list(&free_pages, true);
 
 	list_splice(&ret_pages, page_list);
@@ -1352,7 +1362,8 @@
 	unsigned long nr_taken = 0;
 	unsigned long scan;
 
-	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
+	for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
+					!list_empty(src); scan++) {
 		struct page *page;
 		int nr_pages;
 
@@ -2151,6 +2162,23 @@
 	}
 }
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+static void init_tlb_ubc(void)
+{
+	/*
+	 * This deliberately does not clear the cpumask as it's expensive
+	 * and unnecessary. If there happens to be data in there then the
+	 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
+	 * then will be cleared.
+	 */
+	current->tlb_ubc.flush_required = false;
+}
+#else
+static inline void init_tlb_ubc(void)
+{
+}
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
@@ -2185,6 +2213,8 @@
 	scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
 			 sc->priority == DEF_PRIORITY);
 
+	init_tlb_ubc();
+
 	blk_start_plug(&plug);
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 					nr[LRU_INACTIVE_FILE]) {
diff --git a/mm/zbud.c b/mm/zbud.c
index f3bf6f7..fa48bcdf 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -96,10 +96,10 @@
 	struct list_head buddied;
 	struct list_head lru;
 	u64 pages_nr;
-	struct zbud_ops *ops;
+	const struct zbud_ops *ops;
 #ifdef CONFIG_ZPOOL
 	struct zpool *zpool;
-	struct zpool_ops *zpool_ops;
+	const struct zpool_ops *zpool_ops;
 #endif
 };
 
@@ -133,12 +133,12 @@
 		return -ENOENT;
 }
 
-static struct zbud_ops zbud_zpool_ops = {
+static const struct zbud_ops zbud_zpool_ops = {
 	.evict =	zbud_zpool_evict
 };
 
 static void *zbud_zpool_create(char *name, gfp_t gfp,
-			       struct zpool_ops *zpool_ops,
+			       const struct zpool_ops *zpool_ops,
 			       struct zpool *zpool)
 {
 	struct zbud_pool *pool;
@@ -302,7 +302,7 @@
  * Return: pointer to the new zbud pool or NULL if the metadata allocation
  * failed.
  */
-struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
+struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
 {
 	struct zbud_pool *pool;
 	int i;
diff --git a/mm/zpool.c b/mm/zpool.c
index 722a4f6..68d2dd8 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -22,7 +22,7 @@
 
 	struct zpool_driver *driver;
 	void *pool;
-	struct zpool_ops *ops;
+	const struct zpool_ops *ops;
 
 	struct list_head list;
 };
@@ -115,7 +115,7 @@
  * Returns: New zpool on success, NULL on failure.
  */
 struct zpool *zpool_create_pool(char *type, char *name, gfp_t gfp,
-		struct zpool_ops *ops)
+		const struct zpool_ops *ops)
 {
 	struct zpool_driver *driver;
 	struct zpool *zpool;
@@ -320,20 +320,6 @@
 	return zpool->driver->total_size(zpool->pool);
 }
 
-static int __init init_zpool(void)
-{
-	pr_info("loaded\n");
-	return 0;
-}
-
-static void __exit exit_zpool(void)
-{
-	pr_info("unloaded\n");
-}
-
-module_init(init_zpool);
-module_exit(exit_zpool);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
 MODULE_DESCRIPTION("Common API for compressed memory storage");
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0a7f81a..f135b1b 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -169,14 +169,12 @@
 	NR_ZS_STAT_TYPE,
 };
 
-#ifdef CONFIG_ZSMALLOC_STAT
-
-static struct dentry *zs_stat_root;
-
 struct zs_size_stat {
 	unsigned long objs[NR_ZS_STAT_TYPE];
 };
 
+#ifdef CONFIG_ZSMALLOC_STAT
+static struct dentry *zs_stat_root;
 #endif
 
 /*
@@ -201,6 +199,8 @@
 static const int fullness_threshold_frac = 4;
 
 struct size_class {
+	spinlock_t lock;
+	struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
 	/*
 	 * Size of objects stored in this class. Must be multiple
 	 * of ZS_ALIGN.
@@ -210,16 +210,10 @@
 
 	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
 	int pages_per_zspage;
+	struct zs_size_stat stats;
+
 	/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
 	bool huge;
-
-#ifdef CONFIG_ZSMALLOC_STAT
-	struct zs_size_stat stats;
-#endif
-
-	spinlock_t lock;
-
-	struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
 };
 
 /*
@@ -251,6 +245,15 @@
 	gfp_t flags;	/* allocation flags used when growing pool */
 	atomic_long_t pages_allocated;
 
+	struct zs_pool_stats stats;
+
+	/* Compact classes */
+	struct shrinker shrinker;
+	/*
+	 * To signify that register_shrinker() was successful
+	 * and unregister_shrinker() will not Oops.
+	 */
+	bool shrinker_enabled;
 #ifdef CONFIG_ZSMALLOC_STAT
 	struct dentry *stat_dentry;
 #endif
@@ -285,8 +288,7 @@
 
 static void destroy_handle_cache(struct zs_pool *pool)
 {
-	if (pool->handle_cachep)
-		kmem_cache_destroy(pool->handle_cachep);
+	kmem_cache_destroy(pool->handle_cachep);
 }
 
 static unsigned long alloc_handle(struct zs_pool *pool)
@@ -309,7 +311,8 @@
 
 #ifdef CONFIG_ZPOOL
 
-static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops,
+static void *zs_zpool_create(char *name, gfp_t gfp,
+			     const struct zpool_ops *zpool_ops,
 			     struct zpool *zpool)
 {
 	return zs_create_pool(name, gfp);
@@ -441,8 +444,6 @@
 	return min(zs_size_classes - 1, idx);
 }
 
-#ifdef CONFIG_ZSMALLOC_STAT
-
 static inline void zs_stat_inc(struct size_class *class,
 				enum zs_stat_type type, unsigned long cnt)
 {
@@ -461,6 +462,8 @@
 	return class->stats.objs[type];
 }
 
+#ifdef CONFIG_ZSMALLOC_STAT
+
 static int __init zs_stat_init(void)
 {
 	if (!debugfs_initialized())
@@ -576,23 +579,6 @@
 }
 
 #else /* CONFIG_ZSMALLOC_STAT */
-
-static inline void zs_stat_inc(struct size_class *class,
-				enum zs_stat_type type, unsigned long cnt)
-{
-}
-
-static inline void zs_stat_dec(struct size_class *class,
-				enum zs_stat_type type, unsigned long cnt)
-{
-}
-
-static inline unsigned long zs_stat_get(struct size_class *class,
-				enum zs_stat_type type)
-{
-	return 0;
-}
-
 static int __init zs_stat_init(void)
 {
 	return 0;
@@ -610,7 +596,6 @@
 static inline void zs_pool_stat_destroy(struct zs_pool *pool)
 {
 }
-
 #endif
 
 
@@ -658,13 +643,22 @@
 	if (fullness >= _ZS_NR_FULLNESS_GROUPS)
 		return;
 
-	head = &class->fullness_list[fullness];
-	if (*head)
-		list_add_tail(&page->lru, &(*head)->lru);
-
-	*head = page;
 	zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
 			CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
+
+	head = &class->fullness_list[fullness];
+	if (!*head) {
+		*head = page;
+		return;
+	}
+
+	/*
+	 * We want to see more ZS_FULL pages and less almost
+	 * empty/full. Put pages with higher ->inuse first.
+	 */
+	list_add_tail(&page->lru, &(*head)->lru);
+	if (page->inuse >= (*head)->inuse)
+		*head = page;
 }
 
 /*
@@ -1495,7 +1489,7 @@
 }
 EXPORT_SYMBOL_GPL(zs_free);
 
-static void zs_object_copy(unsigned long src, unsigned long dst,
+static void zs_object_copy(unsigned long dst, unsigned long src,
 				struct size_class *class)
 {
 	struct page *s_page, *d_page;
@@ -1602,8 +1596,6 @@
 	 /* Starting object index within @s_page which used for live object
 	  * in the subpage. */
 	int index;
-	/* how many of objects are migrated */
-	int nr_migrated;
 };
 
 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
@@ -1614,7 +1606,6 @@
 	struct page *s_page = cc->s_page;
 	struct page *d_page = cc->d_page;
 	unsigned long index = cc->index;
-	int nr_migrated = 0;
 	int ret = 0;
 
 	while (1) {
@@ -1636,23 +1627,21 @@
 
 		used_obj = handle_to_obj(handle);
 		free_obj = obj_malloc(d_page, class, handle);
-		zs_object_copy(used_obj, free_obj, class);
+		zs_object_copy(free_obj, used_obj, class);
 		index++;
 		record_obj(handle, free_obj);
 		unpin_tag(handle);
 		obj_free(pool, class, used_obj);
-		nr_migrated++;
 	}
 
 	/* Remember last position in this iteration */
 	cc->s_page = s_page;
 	cc->index = index;
-	cc->nr_migrated = nr_migrated;
 
 	return ret;
 }
 
-static struct page *alloc_target_page(struct size_class *class)
+static struct page *isolate_target_page(struct size_class *class)
 {
 	int i;
 	struct page *page;
@@ -1668,8 +1657,17 @@
 	return page;
 }
 
-static void putback_zspage(struct zs_pool *pool, struct size_class *class,
-				struct page *first_page)
+/*
+ * putback_zspage - add @first_page into right class's fullness list
+ * @pool: target pool
+ * @class: destination class
+ * @first_page: target page
+ *
+ * Return @fist_page's fullness_group
+ */
+static enum fullness_group putback_zspage(struct zs_pool *pool,
+			struct size_class *class,
+			struct page *first_page)
 {
 	enum fullness_group fullness;
 
@@ -1687,50 +1685,72 @@
 
 		free_zspage(first_page);
 	}
+
+	return fullness;
 }
 
 static struct page *isolate_source_page(struct size_class *class)
 {
-	struct page *page;
+	int i;
+	struct page *page = NULL;
 
-	page = class->fullness_list[ZS_ALMOST_EMPTY];
-	if (page)
-		remove_zspage(page, class, ZS_ALMOST_EMPTY);
+	for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) {
+		page = class->fullness_list[i];
+		if (!page)
+			continue;
+
+		remove_zspage(page, class, i);
+		break;
+	}
 
 	return page;
 }
 
-static unsigned long __zs_compact(struct zs_pool *pool,
-				struct size_class *class)
+/*
+ *
+ * Based on the number of unused allocated objects calculate
+ * and return the number of pages that we can free.
+ */
+static unsigned long zs_can_compact(struct size_class *class)
 {
-	int nr_to_migrate;
+	unsigned long obj_wasted;
+
+	obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
+		zs_stat_get(class, OBJ_USED);
+
+	obj_wasted /= get_maxobj_per_zspage(class->size,
+			class->pages_per_zspage);
+
+	return obj_wasted * class->pages_per_zspage;
+}
+
+static void __zs_compact(struct zs_pool *pool, struct size_class *class)
+{
 	struct zs_compact_control cc;
 	struct page *src_page;
 	struct page *dst_page = NULL;
-	unsigned long nr_total_migrated = 0;
 
 	spin_lock(&class->lock);
 	while ((src_page = isolate_source_page(class))) {
 
 		BUG_ON(!is_first_page(src_page));
 
-		/* The goal is to migrate all live objects in source page */
-		nr_to_migrate = src_page->inuse;
+		if (!zs_can_compact(class))
+			break;
+
 		cc.index = 0;
 		cc.s_page = src_page;
 
-		while ((dst_page = alloc_target_page(class))) {
+		while ((dst_page = isolate_target_page(class))) {
 			cc.d_page = dst_page;
 			/*
-			 * If there is no more space in dst_page, try to
-			 * allocate another zspage.
+			 * If there is no more space in dst_page, resched
+			 * and see if anyone had allocated another zspage.
 			 */
 			if (!migrate_zspage(pool, class, &cc))
 				break;
 
 			putback_zspage(pool, class, dst_page);
-			nr_total_migrated += cc.nr_migrated;
-			nr_to_migrate -= cc.nr_migrated;
 		}
 
 		/* Stop if we couldn't find slot */
@@ -1738,9 +1758,9 @@
 			break;
 
 		putback_zspage(pool, class, dst_page);
-		putback_zspage(pool, class, src_page);
+		if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
+			pool->stats.pages_compacted += class->pages_per_zspage;
 		spin_unlock(&class->lock);
-		nr_total_migrated += cc.nr_migrated;
 		cond_resched();
 		spin_lock(&class->lock);
 	}
@@ -1749,14 +1769,11 @@
 		putback_zspage(pool, class, src_page);
 
 	spin_unlock(&class->lock);
-
-	return nr_total_migrated;
 }
 
 unsigned long zs_compact(struct zs_pool *pool)
 {
 	int i;
-	unsigned long nr_migrated = 0;
 	struct size_class *class;
 
 	for (i = zs_size_classes - 1; i >= 0; i--) {
@@ -1765,13 +1782,80 @@
 			continue;
 		if (class->index != i)
 			continue;
-		nr_migrated += __zs_compact(pool, class);
+		__zs_compact(pool, class);
 	}
 
-	return nr_migrated;
+	return pool->stats.pages_compacted;
 }
 EXPORT_SYMBOL_GPL(zs_compact);
 
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
+{
+	memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
+}
+EXPORT_SYMBOL_GPL(zs_pool_stats);
+
+static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
+		struct shrink_control *sc)
+{
+	unsigned long pages_freed;
+	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
+			shrinker);
+
+	pages_freed = pool->stats.pages_compacted;
+	/*
+	 * Compact classes and calculate compaction delta.
+	 * Can run concurrently with a manually triggered
+	 * (by user) compaction.
+	 */
+	pages_freed = zs_compact(pool) - pages_freed;
+
+	return pages_freed ? pages_freed : SHRINK_STOP;
+}
+
+static unsigned long zs_shrinker_count(struct shrinker *shrinker,
+		struct shrink_control *sc)
+{
+	int i;
+	struct size_class *class;
+	unsigned long pages_to_free = 0;
+	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
+			shrinker);
+
+	if (!pool->shrinker_enabled)
+		return 0;
+
+	for (i = zs_size_classes - 1; i >= 0; i--) {
+		class = pool->size_class[i];
+		if (!class)
+			continue;
+		if (class->index != i)
+			continue;
+
+		pages_to_free += zs_can_compact(class);
+	}
+
+	return pages_to_free;
+}
+
+static void zs_unregister_shrinker(struct zs_pool *pool)
+{
+	if (pool->shrinker_enabled) {
+		unregister_shrinker(&pool->shrinker);
+		pool->shrinker_enabled = false;
+	}
+}
+
+static int zs_register_shrinker(struct zs_pool *pool)
+{
+	pool->shrinker.scan_objects = zs_shrinker_scan;
+	pool->shrinker.count_objects = zs_shrinker_count;
+	pool->shrinker.batch = 0;
+	pool->shrinker.seeks = DEFAULT_SEEKS;
+
+	return register_shrinker(&pool->shrinker);
+}
+
 /**
  * zs_create_pool - Creates an allocation pool to work from.
  * @flags: allocation flags used to allocate pool metadata
@@ -1857,6 +1941,12 @@
 	if (zs_pool_stat_create(name, pool))
 		goto err;
 
+	/*
+	 * Not critical, we still can use the pool
+	 * and user can trigger compaction manually.
+	 */
+	if (zs_register_shrinker(pool) == 0)
+		pool->shrinker_enabled = true;
 	return pool;
 
 err:
@@ -1869,6 +1959,7 @@
 {
 	int i;
 
+	zs_unregister_shrinker(pool);
 	zs_pool_stat_destroy(pool);
 
 	for (i = 0; i < zs_size_classes; i++) {
diff --git a/mm/zswap.c b/mm/zswap.c
index 2d5727b..48a1d08 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -446,75 +446,14 @@
 static int zswap_get_swap_cache_page(swp_entry_t entry,
 				struct page **retpage)
 {
-	struct page *found_page, *new_page = NULL;
-	struct address_space *swapper_space = swap_address_space(entry);
-	int err;
+	bool page_was_allocated;
 
-	*retpage = NULL;
-	do {
-		/*
-		 * First check the swap cache.  Since this is normally
-		 * called after lookup_swap_cache() failed, re-calling
-		 * that would confuse statistics.
-		 */
-		found_page = find_get_page(swapper_space, entry.val);
-		if (found_page)
-			break;
-
-		/*
-		 * Get a new page to read into from swap.
-		 */
-		if (!new_page) {
-			new_page = alloc_page(GFP_KERNEL);
-			if (!new_page)
-				break; /* Out of memory */
-		}
-
-		/*
-		 * call radix_tree_preload() while we can wait.
-		 */
-		err = radix_tree_preload(GFP_KERNEL);
-		if (err)
-			break;
-
-		/*
-		 * Swap entry may have been freed since our caller observed it.
-		 */
-		err = swapcache_prepare(entry);
-		if (err == -EEXIST) { /* seems racy */
-			radix_tree_preload_end();
-			continue;
-		}
-		if (err) { /* swp entry is obsolete ? */
-			radix_tree_preload_end();
-			break;
-		}
-
-		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
-		__set_page_locked(new_page);
-		SetPageSwapBacked(new_page);
-		err = __add_to_swap_cache(new_page, entry);
-		if (likely(!err)) {
-			radix_tree_preload_end();
-			lru_cache_add_anon(new_page);
-			*retpage = new_page;
-			return ZSWAP_SWAPCACHE_NEW;
-		}
-		radix_tree_preload_end();
-		ClearPageSwapBacked(new_page);
-		__clear_page_locked(new_page);
-		/*
-		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
-		 * clear SWAP_HAS_CACHE flag.
-		 */
-		swapcache_free(entry);
-	} while (err != -ENOMEM);
-
-	if (new_page)
-		page_cache_release(new_page);
-	if (!found_page)
+	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
+			NULL, 0, &page_was_allocated);
+	if (page_was_allocated)
+		return ZSWAP_SWAPCACHE_NEW;
+	if (!*retpage)
 		return ZSWAP_SWAPCACHE_FAIL;
-	*retpage = found_page;
 	return ZSWAP_SWAPCACHE_EXIST;
 }
 
@@ -816,7 +755,7 @@
 	zswap_trees[type] = NULL;
 }
 
-static struct zpool_ops zswap_zpool_ops = {
+static const struct zpool_ops zswap_zpool_ops = {
 	.evict = zswap_writeback_entry
 };
 
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index f30329f..69a4d30 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -517,8 +517,11 @@
 	struct ceph_options *opt = client->options;
 	size_t pos = m->count;
 
-	if (opt->name)
-		seq_printf(m, "name=%s,", opt->name);
+	if (opt->name) {
+		seq_puts(m, "name=");
+		seq_escape(m, opt->name, ", \t\n\\");
+		seq_putc(m, ',');
+	}
 	if (opt->key)
 		seq_puts(m, "secret=<hidden>,");
 
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4feda2d..548240d 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -23,7 +23,7 @@
 };
 #define uc_uid			uc_base.cr_uid
 
-#define UNX_WRITESLACK		(21 + (UNX_MAXNODENAME >> 2))
+#define UNX_WRITESLACK		(21 + XDR_QUADLEN(UNX_MAXNODENAME))
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY	RPCDBG_AUTH
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2928aff..4a2340a 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -44,7 +44,7 @@
 static void cache_init(struct cache_head *h)
 {
 	time_t now = seconds_since_boot();
-	h->next = NULL;
+	INIT_HLIST_NODE(&h->cache_list);
 	h->flags = 0;
 	kref_init(&h->ref);
 	h->expiry_time = now + CACHE_NEW_EXPIRY;
@@ -54,15 +54,14 @@
 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
 				       struct cache_head *key, int hash)
 {
-	struct cache_head **head,  **hp;
-	struct cache_head *new = NULL, *freeme = NULL;
+	struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
+	struct hlist_head *head;
 
 	head = &detail->hash_table[hash];
 
 	read_lock(&detail->hash_lock);
 
-	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
-		struct cache_head *tmp = *hp;
+	hlist_for_each_entry(tmp, head, cache_list) {
 		if (detail->match(tmp, key)) {
 			if (cache_is_expired(detail, tmp))
 				/* This entry is expired, we will discard it. */
@@ -88,12 +87,10 @@
 	write_lock(&detail->hash_lock);
 
 	/* check if entry appeared while we slept */
-	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
-		struct cache_head *tmp = *hp;
+	hlist_for_each_entry(tmp, head, cache_list) {
 		if (detail->match(tmp, key)) {
 			if (cache_is_expired(detail, tmp)) {
-				*hp = tmp->next;
-				tmp->next = NULL;
+				hlist_del_init(&tmp->cache_list);
 				detail->entries --;
 				freeme = tmp;
 				break;
@@ -104,8 +101,8 @@
 			return tmp;
 		}
 	}
-	new->next = *head;
-	*head = new;
+
+	hlist_add_head(&new->cache_list, head);
 	detail->entries++;
 	cache_get(new);
 	write_unlock(&detail->hash_lock);
@@ -143,7 +140,6 @@
 	 * If 'old' is not VALID, we update it directly,
 	 * otherwise we need to replace it
 	 */
-	struct cache_head **head;
 	struct cache_head *tmp;
 
 	if (!test_bit(CACHE_VALID, &old->flags)) {
@@ -168,15 +164,13 @@
 	}
 	cache_init(tmp);
 	detail->init(tmp, old);
-	head = &detail->hash_table[hash];
 
 	write_lock(&detail->hash_lock);
 	if (test_bit(CACHE_NEGATIVE, &new->flags))
 		set_bit(CACHE_NEGATIVE, &tmp->flags);
 	else
 		detail->update(tmp, new);
-	tmp->next = *head;
-	*head = tmp;
+	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
 	detail->entries++;
 	cache_get(tmp);
 	cache_fresh_locked(tmp, new->expiry_time);
@@ -416,28 +410,29 @@
 	/* find a non-empty bucket in the table */
 	while (current_detail &&
 	       current_index < current_detail->hash_size &&
-	       current_detail->hash_table[current_index] == NULL)
+	       hlist_empty(&current_detail->hash_table[current_index]))
 		current_index++;
 
 	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
 
 	if (current_detail && current_index < current_detail->hash_size) {
-		struct cache_head *ch, **cp;
+		struct cache_head *ch = NULL;
 		struct cache_detail *d;
+		struct hlist_head *head;
+		struct hlist_node *tmp;
 
 		write_lock(&current_detail->hash_lock);
 
 		/* Ok, now to clean this strand */
 
-		cp = & current_detail->hash_table[current_index];
-		for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
+		head = &current_detail->hash_table[current_index];
+		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 			if (current_detail->nextcheck > ch->expiry_time)
 				current_detail->nextcheck = ch->expiry_time+1;
 			if (!cache_is_expired(current_detail, ch))
 				continue;
 
-			*cp = ch->next;
-			ch->next = NULL;
+			hlist_del_init(&ch->cache_list);
 			current_detail->entries--;
 			rv = 1;
 			break;
@@ -1270,18 +1265,13 @@
  * get a header, then pass each real item in the cache
  */
 
-struct handle {
-	struct cache_detail *cd;
-};
-
-static void *c_start(struct seq_file *m, loff_t *pos)
+void *cache_seq_start(struct seq_file *m, loff_t *pos)
 	__acquires(cd->hash_lock)
 {
 	loff_t n = *pos;
 	unsigned int hash, entry;
 	struct cache_head *ch;
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
-
+	struct cache_detail *cd = m->private;
 
 	read_lock(&cd->hash_lock);
 	if (!n--)
@@ -1289,7 +1279,7 @@
 	hash = n >> 32;
 	entry = n & ((1LL<<32) - 1);
 
-	for (ch=cd->hash_table[hash]; ch; ch=ch->next)
+	hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
 		if (!entry--)
 			return ch;
 	n &= ~((1LL<<32) - 1);
@@ -1297,51 +1287,57 @@
 		hash++;
 		n += 1LL<<32;
 	} while(hash < cd->hash_size &&
-		cd->hash_table[hash]==NULL);
+		hlist_empty(&cd->hash_table[hash]));
 	if (hash >= cd->hash_size)
 		return NULL;
 	*pos = n+1;
-	return cd->hash_table[hash];
+	return hlist_entry_safe(cd->hash_table[hash].first,
+				struct cache_head, cache_list);
 }
+EXPORT_SYMBOL_GPL(cache_seq_start);
 
-static void *c_next(struct seq_file *m, void *p, loff_t *pos)
+void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
 {
 	struct cache_head *ch = p;
 	int hash = (*pos >> 32);
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
+	struct cache_detail *cd = m->private;
 
 	if (p == SEQ_START_TOKEN)
 		hash = 0;
-	else if (ch->next == NULL) {
+	else if (ch->cache_list.next == NULL) {
 		hash++;
 		*pos += 1LL<<32;
 	} else {
 		++*pos;
-		return ch->next;
+		return hlist_entry_safe(ch->cache_list.next,
+					struct cache_head, cache_list);
 	}
 	*pos &= ~((1LL<<32) - 1);
 	while (hash < cd->hash_size &&
-	       cd->hash_table[hash] == NULL) {
+	       hlist_empty(&cd->hash_table[hash])) {
 		hash++;
 		*pos += 1LL<<32;
 	}
 	if (hash >= cd->hash_size)
 		return NULL;
 	++*pos;
-	return cd->hash_table[hash];
+	return hlist_entry_safe(cd->hash_table[hash].first,
+				struct cache_head, cache_list);
 }
+EXPORT_SYMBOL_GPL(cache_seq_next);
 
-static void c_stop(struct seq_file *m, void *p)
+void cache_seq_stop(struct seq_file *m, void *p)
 	__releases(cd->hash_lock)
 {
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
+	struct cache_detail *cd = m->private;
 	read_unlock(&cd->hash_lock);
 }
+EXPORT_SYMBOL_GPL(cache_seq_stop);
 
 static int c_show(struct seq_file *m, void *p)
 {
 	struct cache_head *cp = p;
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
+	struct cache_detail *cd = m->private;
 
 	if (p == SEQ_START_TOKEN)
 		return cd->cache_show(m, cd, NULL);
@@ -1364,33 +1360,36 @@
 }
 
 static const struct seq_operations cache_content_op = {
-	.start	= c_start,
-	.next	= c_next,
-	.stop	= c_stop,
+	.start	= cache_seq_start,
+	.next	= cache_seq_next,
+	.stop	= cache_seq_stop,
 	.show	= c_show,
 };
 
 static int content_open(struct inode *inode, struct file *file,
 			struct cache_detail *cd)
 {
-	struct handle *han;
+	struct seq_file *seq;
+	int err;
 
 	if (!cd || !try_module_get(cd->owner))
 		return -EACCES;
-	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
-	if (han == NULL) {
+
+	err = seq_open(file, &cache_content_op);
+	if (err) {
 		module_put(cd->owner);
-		return -ENOMEM;
+		return err;
 	}
 
-	han->cd = cd;
+	seq = file->private_data;
+	seq->private = cd;
 	return 0;
 }
 
 static int content_release(struct inode *inode, struct file *file,
 		struct cache_detail *cd)
 {
-	int ret = seq_release_private(inode, file);
+	int ret = seq_release(inode, file);
 	module_put(cd->owner);
 	return ret;
 }
@@ -1665,17 +1664,21 @@
 struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
 {
 	struct cache_detail *cd;
+	int i;
 
 	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
 	if (cd == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
+	cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
 				 GFP_KERNEL);
 	if (cd->hash_table == NULL) {
 		kfree(cd);
 		return ERR_PTR(-ENOMEM);
 	}
+
+	for (i = 0; i < cd->hash_size; i++)
+		INIT_HLIST_HEAD(&cd->hash_table[i]);
 	cd->net = net;
 	return cd;
 }
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 337ca85..b140c09 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -297,7 +297,7 @@
 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 	ret = atomic_dec_and_test(&task->tk_count);
 	if (waitqueue_active(wq))
-		__wake_up_locked_key(wq, TASK_NORMAL, &k);
+		__wake_up_locked_key(wq, TASK_NORMAL, 1, &k);
 	spin_unlock_irqrestore(&wq->lock, flags);
 	return ret;
 }
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5a16d8d..a8f579d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -34,36 +34,19 @@
 
 static void svc_unregister(const struct svc_serv *serv, struct net *net);
 
-#define svc_serv_is_pooled(serv)    ((serv)->sv_function)
+#define svc_serv_is_pooled(serv)    ((serv)->sv_ops->svo_function)
 
-/*
- * Mode for mapping cpus to pools.
- */
-enum {
-	SVC_POOL_AUTO = -1,	/* choose one of the others */
-	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
-				 * (legacy & UP mode) */
-	SVC_POOL_PERCPU,	/* one pool per cpu */
-	SVC_POOL_PERNODE	/* one pool per numa node */
-};
 #define SVC_POOL_DEFAULT	SVC_POOL_GLOBAL
 
 /*
  * Structure for mapping cpus to pools and vice versa.
  * Setup once during sunrpc initialisation.
  */
-static struct svc_pool_map {
-	int count;			/* How many svc_servs use us */
-	int mode;			/* Note: int not enum to avoid
-					 * warnings about "enumeration value
-					 * not handled in switch" */
-	unsigned int npools;
-	unsigned int *pool_to;		/* maps pool id to cpu or node */
-	unsigned int *to_pool;		/* maps cpu or node to pool id */
-} svc_pool_map = {
-	.count = 0,
+struct svc_pool_map svc_pool_map = {
 	.mode = SVC_POOL_DEFAULT
 };
+EXPORT_SYMBOL_GPL(svc_pool_map);
+
 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
 
 static int
@@ -236,7 +219,7 @@
  * vice versa).  Initialise the map if we're the first user.
  * Returns the number of pools.
  */
-static unsigned int
+unsigned int
 svc_pool_map_get(void)
 {
 	struct svc_pool_map *m = &svc_pool_map;
@@ -271,7 +254,7 @@
 	mutex_unlock(&svc_pool_map_mutex);
 	return m->npools;
 }
-
+EXPORT_SYMBOL_GPL(svc_pool_map_get);
 
 /*
  * Drop a reference to the global map of cpus to pools.
@@ -280,7 +263,7 @@
  * mode using the pool_mode module option without
  * rebooting or re-loading sunrpc.ko.
  */
-static void
+void
 svc_pool_map_put(void)
 {
 	struct svc_pool_map *m = &svc_pool_map;
@@ -297,7 +280,7 @@
 
 	mutex_unlock(&svc_pool_map_mutex);
 }
-
+EXPORT_SYMBOL_GPL(svc_pool_map_put);
 
 static int svc_pool_map_get_node(unsigned int pidx)
 {
@@ -423,7 +406,7 @@
  */
 static struct svc_serv *
 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
-	     void (*shutdown)(struct svc_serv *serv, struct net *net))
+	     struct svc_serv_ops *ops)
 {
 	struct svc_serv	*serv;
 	unsigned int vers;
@@ -440,7 +423,7 @@
 		bufsize = RPCSVC_MAXPAYLOAD;
 	serv->sv_max_payload = bufsize? bufsize : 4096;
 	serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
-	serv->sv_shutdown  = shutdown;
+	serv->sv_ops = ops;
 	xdrsize = 0;
 	while (prog) {
 		prog->pg_lovers = prog->pg_nvers-1;
@@ -486,26 +469,22 @@
 
 struct svc_serv *
 svc_create(struct svc_program *prog, unsigned int bufsize,
-	   void (*shutdown)(struct svc_serv *serv, struct net *net))
+	   struct svc_serv_ops *ops)
 {
-	return __svc_create(prog, bufsize, /*npools*/1, shutdown);
+	return __svc_create(prog, bufsize, /*npools*/1, ops);
 }
 EXPORT_SYMBOL_GPL(svc_create);
 
 struct svc_serv *
 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
-		  void (*shutdown)(struct svc_serv *serv, struct net *net),
-		  svc_thread_fn func, struct module *mod)
+		  struct svc_serv_ops *ops)
 {
 	struct svc_serv *serv;
 	unsigned int npools = svc_pool_map_get();
 
-	serv = __svc_create(prog, bufsize, npools, shutdown);
+	serv = __svc_create(prog, bufsize, npools, ops);
 	if (!serv)
 		goto out_err;
-
-	serv->sv_function = func;
-	serv->sv_module = mod;
 	return serv;
 out_err:
 	svc_pool_map_put();
@@ -517,8 +496,8 @@
 {
 	svc_close_net(serv, net);
 
-	if (serv->sv_shutdown)
-		serv->sv_shutdown(serv, net);
+	if (serv->sv_ops->svo_shutdown)
+		serv->sv_ops->svo_shutdown(serv, net);
 }
 EXPORT_SYMBOL_GPL(svc_shutdown_net);
 
@@ -604,40 +583,52 @@
 }
 
 struct svc_rqst *
-svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
 {
 	struct svc_rqst	*rqstp;
 
 	rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
 	if (!rqstp)
-		goto out_enomem;
+		return rqstp;
 
-	serv->sv_nrthreads++;
 	__set_bit(RQ_BUSY, &rqstp->rq_flags);
 	spin_lock_init(&rqstp->rq_lock);
 	rqstp->rq_server = serv;
 	rqstp->rq_pool = pool;
+
+	rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
+	if (!rqstp->rq_argp)
+		goto out_enomem;
+
+	rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
+	if (!rqstp->rq_resp)
+		goto out_enomem;
+
+	if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
+		goto out_enomem;
+
+	return rqstp;
+out_enomem:
+	svc_rqst_free(rqstp);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(svc_rqst_alloc);
+
+struct svc_rqst *
+svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+{
+	struct svc_rqst	*rqstp;
+
+	rqstp = svc_rqst_alloc(serv, pool, node);
+	if (!rqstp)
+		return ERR_PTR(-ENOMEM);
+
+	serv->sv_nrthreads++;
 	spin_lock_bh(&pool->sp_lock);
 	pool->sp_nrthreads++;
 	list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
 	spin_unlock_bh(&pool->sp_lock);
-
-	rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
-	if (!rqstp->rq_argp)
-		goto out_thread;
-
-	rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
-	if (!rqstp->rq_resp)
-		goto out_thread;
-
-	if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
-		goto out_thread;
-
 	return rqstp;
-out_thread:
-	svc_exit_thread(rqstp);
-out_enomem:
-	return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL_GPL(svc_prepare_thread);
 
@@ -739,12 +730,12 @@
 			break;
 		}
 
-		__module_get(serv->sv_module);
-		task = kthread_create_on_node(serv->sv_function, rqstp,
+		__module_get(serv->sv_ops->svo_module);
+		task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
 					      node, "%s", serv->sv_name);
 		if (IS_ERR(task)) {
 			error = PTR_ERR(task);
-			module_put(serv->sv_module);
+			module_put(serv->sv_ops->svo_module);
 			svc_exit_thread(rqstp);
 			break;
 		}
@@ -772,15 +763,21 @@
  * mutex" for the service.
  */
 void
-svc_exit_thread(struct svc_rqst *rqstp)
+svc_rqst_free(struct svc_rqst *rqstp)
 {
-	struct svc_serv	*serv = rqstp->rq_server;
-	struct svc_pool	*pool = rqstp->rq_pool;
-
 	svc_release_buffer(rqstp);
 	kfree(rqstp->rq_resp);
 	kfree(rqstp->rq_argp);
 	kfree(rqstp->rq_auth_data);
+	kfree_rcu(rqstp, rq_rcu_head);
+}
+EXPORT_SYMBOL_GPL(svc_rqst_free);
+
+void
+svc_exit_thread(struct svc_rqst *rqstp)
+{
+	struct svc_serv	*serv = rqstp->rq_server;
+	struct svc_pool	*pool = rqstp->rq_pool;
 
 	spin_lock_bh(&pool->sp_lock);
 	pool->sp_nrthreads--;
@@ -788,7 +785,7 @@
 		list_del_rcu(&rqstp->rq_all);
 	spin_unlock_bh(&pool->sp_lock);
 
-	kfree_rcu(rqstp, rq_rcu_head);
+	svc_rqst_free(rqstp);
 
 	/* Release the server */
 	if (serv)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 163ac45..a6cbb21 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -24,7 +24,6 @@
 static struct cache_deferred_req *svc_defer(struct cache_req *req);
 static void svc_age_temp_xprts(unsigned long closure);
 static void svc_delete_xprt(struct svc_xprt *xprt);
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt);
 
 /* apparently the "standard" is that clients close
  * idle connections after 5 minutes, servers after
@@ -225,12 +224,12 @@
 	}
 
 	/* As soon as we clear busy, the xprt could be closed and
-	 * 'put', so we need a reference to call svc_xprt_do_enqueue with:
+	 * 'put', so we need a reference to call svc_enqueue_xprt with:
 	 */
 	svc_xprt_get(xprt);
 	smp_mb__before_atomic();
 	clear_bit(XPT_BUSY, &xprt->xpt_flags);
-	svc_xprt_do_enqueue(xprt);
+	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
 	svc_xprt_put(xprt);
 }
 
@@ -320,7 +319,7 @@
 	return false;
 }
 
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+void svc_xprt_do_enqueue(struct svc_xprt *xprt)
 {
 	struct svc_pool *pool;
 	struct svc_rqst	*rqstp = NULL;
@@ -402,6 +401,7 @@
 out:
 	trace_svc_xprt_do_enqueue(xprt, rqstp);
 }
+EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
 
 /*
  * Queue up a transport with data pending. If there are idle nfsd
@@ -412,7 +412,7 @@
 {
 	if (test_bit(XPT_BUSY, &xprt->xpt_flags))
 		return;
-	svc_xprt_do_enqueue(xprt);
+	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
 }
 EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
 
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index f1e8daf..cb25c89 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -39,6 +39,25 @@
 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 	    struct rpcrdma_create_data_internal *cdata)
 {
+	struct ib_device_attr *devattr = &ia->ri_devattr;
+	struct ib_mr *mr;
+
+	/* Obtain an lkey to use for the regbufs, which are
+	 * protected from remote access.
+	 */
+	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
+		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
+	} else {
+		mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE);
+		if (IS_ERR(mr)) {
+			pr_err("%s: ib_get_dma_mr for failed with %lX\n",
+			       __func__, PTR_ERR(mr));
+			return -ENOMEM;
+		}
+		ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
+		ia->ri_dma_mr = mr;
+	}
+
 	return 0;
 }
 
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 04ea914..63f282e 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -189,6 +189,11 @@
 	struct ib_device_attr *devattr = &ia->ri_devattr;
 	int depth, delta;
 
+	/* Obtain an lkey to use for the regbufs, which are
+	 * protected from remote access.
+	 */
+	ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
+
 	ia->ri_max_frmr_depth =
 			min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
 			      devattr->max_fast_reg_page_list_len);
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 41985d0..72cf8b1 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -23,6 +23,29 @@
 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 		 struct rpcrdma_create_data_internal *cdata)
 {
+	struct ib_device_attr *devattr = &ia->ri_devattr;
+	struct ib_mr *mr;
+
+	/* Obtain an rkey to use for RPC data payloads.
+	 */
+	mr = ib_get_dma_mr(ia->ri_pd,
+			   IB_ACCESS_LOCAL_WRITE |
+			   IB_ACCESS_REMOTE_WRITE |
+			   IB_ACCESS_REMOTE_READ);
+	if (IS_ERR(mr)) {
+		pr_err("%s: ib_get_dma_mr for failed with %lX\n",
+		       __func__, PTR_ERR(mr));
+		return -ENOMEM;
+	}
+	ia->ri_dma_mr = mr;
+
+	/* Obtain an lkey to use for regbufs.
+	 */
+	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
+		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
+	else
+		ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
+
 	return 0;
 }
 
@@ -51,7 +74,7 @@
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
 	rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
-	seg->mr_rkey = ia->ri_bind_mem->rkey;
+	seg->mr_rkey = ia->ri_dma_mr->rkey;
 	seg->mr_base = seg->mr_dma;
 	seg->mr_nsegs = 1;
 	return 1;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 84ea37d..bc8bd65 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -71,6 +71,67 @@
 };
 #endif
 
+/* The client can send a request inline as long as the RPCRDMA header
+ * plus the RPC call fit under the transport's inline limit. If the
+ * combined call message size exceeds that limit, the client must use
+ * the read chunk list for this operation.
+ */
+static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
+{
+	unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
+
+	return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
+}
+
+/* The client can't know how large the actual reply will be. Thus it
+ * plans for the largest possible reply for that particular ULP
+ * operation. If the maximum combined reply message size exceeds that
+ * limit, the client must provide a write list or a reply chunk for
+ * this request.
+ */
+static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
+{
+	unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
+
+	return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
+}
+
+static int
+rpcrdma_tail_pullup(struct xdr_buf *buf)
+{
+	size_t tlen = buf->tail[0].iov_len;
+	size_t skip = tlen & 3;
+
+	/* Do not include the tail if it is only an XDR pad */
+	if (tlen < 4)
+		return 0;
+
+	/* xdr_write_pages() adds a pad at the beginning of the tail
+	 * if the content in "buf->pages" is unaligned. Force the
+	 * tail's actual content to land at the next XDR position
+	 * after the head instead.
+	 */
+	if (skip) {
+		unsigned char *src, *dst;
+		unsigned int count;
+
+		src = buf->tail[0].iov_base;
+		dst = buf->head[0].iov_base;
+		dst += buf->head[0].iov_len;
+
+		src += skip;
+		tlen -= skip;
+
+		dprintk("RPC:       %s: skip=%zu, memmove(%p, %p, %zu)\n",
+			__func__, skip, dst, src, tlen);
+
+		for (count = tlen; count; count--)
+			*dst++ = *src++;
+	}
+
+	return tlen;
+}
+
 /*
  * Chunk assembly from upper layer xdr_buf.
  *
@@ -122,6 +183,10 @@
 	if (len && n == nsegs)
 		return -EIO;
 
+	/* When encoding the read list, the tail is always sent inline */
+	if (type == rpcrdma_readch)
+		return n;
+
 	if (xdrbuf->tail[0].iov_len) {
 		/* the rpcrdma protocol allows us to omit any trailing
 		 * xdr pad bytes, saving the server an RDMA operation. */
@@ -297,8 +362,7 @@
  * pre-registered memory buffer for this request. For small amounts
  * of data, this is efficient. The cutoff value is tunable.
  */
-static int
-rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
+static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
 {
 	int i, npages, curlen;
 	int copy_len;
@@ -310,16 +374,9 @@
 	destp = rqst->rq_svec[0].iov_base;
 	curlen = rqst->rq_svec[0].iov_len;
 	destp += curlen;
-	/*
-	 * Do optional padding where it makes sense. Alignment of write
-	 * payload can help the server, if our setting is accurate.
-	 */
-	pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
-	if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
-		pad = 0;	/* don't pad this request */
 
-	dprintk("RPC:       %s: pad %d destp 0x%p len %d hdrlen %d\n",
-		__func__, pad, destp, rqst->rq_slen, curlen);
+	dprintk("RPC:       %s: destp 0x%p len %d hdrlen %d\n",
+		__func__, destp, rqst->rq_slen, curlen);
 
 	copy_len = rqst->rq_snd_buf.page_len;
 
@@ -355,7 +412,6 @@
 		page_base = 0;
 	}
 	/* header now contains entire send message */
-	return pad;
 }
 
 /*
@@ -380,7 +436,7 @@
 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 	char *base;
-	size_t rpclen, padlen;
+	size_t rpclen;
 	ssize_t hdrlen;
 	enum rpcrdma_chunktype rtype, wtype;
 	struct rpcrdma_msg *headerp;
@@ -402,28 +458,15 @@
 	/*
 	 * Chunks needed for results?
 	 *
+	 * o Read ops return data as write chunk(s), header as inline.
 	 * o If the expected result is under the inline threshold, all ops
-	 *   return as inline (but see later).
+	 *   return as inline.
 	 * o Large non-read ops return as a single reply chunk.
-	 * o Large read ops return data as write chunk(s), header as inline.
-	 *
-	 * Note: the NFS code sending down multiple result segments implies
-	 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
 	 */
-
-	/*
-	 * This code can handle read chunks, write chunks OR reply
-	 * chunks -- only one type. If the request is too big to fit
-	 * inline, then we will choose read chunks. If the request is
-	 * a READ, then use write chunks to separate the file data
-	 * into pages; otherwise use reply chunks.
-	 */
-	if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
-		wtype = rpcrdma_noch;
-	else if (rqst->rq_rcv_buf.page_len == 0)
-		wtype = rpcrdma_replych;
-	else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
+	if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
 		wtype = rpcrdma_writech;
+	else if (rpcrdma_results_inline(rqst))
+		wtype = rpcrdma_noch;
 	else
 		wtype = rpcrdma_replych;
 
@@ -432,21 +475,25 @@
 	 *
 	 * o If the total request is under the inline threshold, all ops
 	 *   are sent as inline.
-	 * o Large non-write ops are sent with the entire message as a
-	 *   single read chunk (protocol 0-position special case).
 	 * o Large write ops transmit data as read chunk(s), header as
 	 *   inline.
+	 * o Large non-write ops are sent with the entire message as a
+	 *   single read chunk (protocol 0-position special case).
 	 *
-	 * Note: the NFS code sending down multiple argument segments
-	 * implies the op is a write.
-	 * TBD check NFSv4 setacl
+	 * This assumes that the upper layer does not present a request
+	 * that both has a data payload, and whose non-data arguments
+	 * by themselves are larger than the inline threshold.
 	 */
-	if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
+	if (rpcrdma_args_inline(rqst)) {
 		rtype = rpcrdma_noch;
-	else if (rqst->rq_snd_buf.page_len == 0)
-		rtype = rpcrdma_areadch;
-	else
+	} else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
 		rtype = rpcrdma_readch;
+	} else {
+		r_xprt->rx_stats.nomsg_call_count++;
+		headerp->rm_type = htonl(RDMA_NOMSG);
+		rtype = rpcrdma_areadch;
+		rpclen = 0;
+	}
 
 	/* The following simplification is not true forever */
 	if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
@@ -458,7 +505,6 @@
 	}
 
 	hdrlen = RPCRDMA_HDRLEN_MIN;
-	padlen = 0;
 
 	/*
 	 * Pull up any extra send data into the preregistered buffer.
@@ -467,45 +513,15 @@
 	 */
 	if (rtype == rpcrdma_noch) {
 
-		padlen = rpcrdma_inline_pullup(rqst,
-						RPCRDMA_INLINE_PAD_VALUE(rqst));
+		rpcrdma_inline_pullup(rqst);
 
-		if (padlen) {
-			headerp->rm_type = rdma_msgp;
-			headerp->rm_body.rm_padded.rm_align =
-				cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst));
-			headerp->rm_body.rm_padded.rm_thresh =
-				cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH);
-			headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
-			headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
-			headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
-			hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
-			if (wtype != rpcrdma_noch) {
-				dprintk("RPC:       %s: invalid chunk list\n",
-					__func__);
-				return -EIO;
-			}
-		} else {
-			headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
-			headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
-			headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
-			/* new length after pullup */
-			rpclen = rqst->rq_svec[0].iov_len;
-			/*
-			 * Currently we try to not actually use read inline.
-			 * Reply chunks have the desirable property that
-			 * they land, packed, directly in the target buffers
-			 * without headers, so they require no fixup. The
-			 * additional RDMA Write op sends the same amount
-			 * of data, streams on-the-wire and adds no overhead
-			 * on receive. Therefore, we request a reply chunk
-			 * for non-writes wherever feasible and efficient.
-			 */
-			if (wtype == rpcrdma_noch)
-				wtype = rpcrdma_replych;
-		}
-	}
-
+		headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
+		headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
+		headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
+		/* new length after pullup */
+		rpclen = rqst->rq_svec[0].iov_len;
+	} else if (rtype == rpcrdma_readch)
+		rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
 	if (rtype != rpcrdma_noch) {
 		hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
 					       headerp, rtype);
@@ -518,9 +534,9 @@
 	if (hdrlen < 0)
 		return hdrlen;
 
-	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd padlen %zd"
+	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd"
 		" headerp 0x%p base 0x%p lkey 0x%x\n",
-		__func__, transfertypes[wtype], hdrlen, rpclen, padlen,
+		__func__, transfertypes[wtype], hdrlen, rpclen,
 		headerp, base, rdmab_lkey(req->rl_rdmabuf));
 
 	/*
@@ -534,26 +550,15 @@
 	req->rl_send_iov[0].length = hdrlen;
 	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
 
+	req->rl_niovs = 1;
+	if (rtype == rpcrdma_areadch)
+		return 0;
+
 	req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
 	req->rl_send_iov[1].length = rpclen;
 	req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
 
 	req->rl_niovs = 2;
-
-	if (padlen) {
-		struct rpcrdma_ep *ep = &r_xprt->rx_ep;
-
-		req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf);
-		req->rl_send_iov[2].length = padlen;
-		req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf);
-
-		req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
-		req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
-		req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf);
-
-		req->rl_niovs = 4;
-	}
-
 	return 0;
 }
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index d25cd43..1dfae83 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -136,6 +136,79 @@
 	return dma_addr;
 }
 
+/* Returns the address of the first read chunk or <nul> if no read chunk
+ * is present
+ */
+struct rpcrdma_read_chunk *
+svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
+{
+	struct rpcrdma_read_chunk *ch =
+		(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+
+	if (ch->rc_discrim == xdr_zero)
+		return NULL;
+	return ch;
+}
+
+/* Returns the address of the first read write array element or <nul>
+ * if no write array list is present
+ */
+static struct rpcrdma_write_array *
+svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
+{
+	if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
+	    rmsgp->rm_body.rm_chunks[1] == xdr_zero)
+		return NULL;
+	return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
+}
+
+/* Returns the address of the first reply array element or <nul> if no
+ * reply array is present
+ */
+static struct rpcrdma_write_array *
+svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
+{
+	struct rpcrdma_read_chunk *rch;
+	struct rpcrdma_write_array *wr_ary;
+	struct rpcrdma_write_array *rp_ary;
+
+	/* XXX: Need to fix when reply chunk may occur with read list
+	 *	and/or write list.
+	 */
+	if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
+	    rmsgp->rm_body.rm_chunks[1] != xdr_zero)
+		return NULL;
+
+	rch = svc_rdma_get_read_chunk(rmsgp);
+	if (rch) {
+		while (rch->rc_discrim != xdr_zero)
+			rch++;
+
+		/* The reply chunk follows an empty write array located
+		 * at 'rc_position' here. The reply array is at rc_target.
+		 */
+		rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
+		goto found_it;
+	}
+
+	wr_ary = svc_rdma_get_write_array(rmsgp);
+	if (wr_ary) {
+		int chunk = be32_to_cpu(wr_ary->wc_nchunks);
+
+		rp_ary = (struct rpcrdma_write_array *)
+			 &wr_ary->wc_array[chunk].wc_target.rs_length;
+		goto found_it;
+	}
+
+	/* No read list, no write list */
+	rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
+
+ found_it:
+	if (rp_ary->wc_discrim == xdr_zero)
+		return NULL;
+	return rp_ary;
+}
+
 /* Assumptions:
  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
  */
@@ -384,6 +457,7 @@
 		      int byte_count)
 {
 	struct ib_send_wr send_wr;
+	u32 xdr_off;
 	int sge_no;
 	int sge_bytes;
 	int page_no;
@@ -418,8 +492,8 @@
 	ctxt->direction = DMA_TO_DEVICE;
 
 	/* Map the payload indicated by 'byte_count' */
+	xdr_off = 0;
 	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
-		int xdr_off = 0;
 		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
 		byte_count -= sge_bytes;
 		ctxt->sge[sge_no].addr =
@@ -457,6 +531,13 @@
 	}
 	rqstp->rq_next_page = rqstp->rq_respages + 1;
 
+	/* The loop above bumps sc_dma_used for each sge. The
+	 * xdr_buf.tail gets a separate sge, but resides in the
+	 * same page as xdr_buf.head. Don't count it twice.
+	 */
+	if (sge_no > ctxt->count)
+		atomic_dec(&rdma->sc_dma_used);
+
 	if (sge_no > rdma->sc_max_sge) {
 		pr_err("svcrdma: Too many sges (%d)\n", sge_no);
 		goto err;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6b36279..21e4036 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -91,7 +91,7 @@
 	.xcl_name = "rdma",
 	.xcl_owner = THIS_MODULE,
 	.xcl_ops = &svc_rdma_ops,
-	.xcl_max_payload = RPCRDMA_MAXPAYLOAD,
+	.xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
 	.xcl_ident = XPRT_TRANSPORT_RDMA,
 };
 
@@ -659,6 +659,7 @@
 		if (xprt) {
 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
 			svc_xprt_enqueue(xprt);
+			svc_xprt_put(xprt);
 		}
 		break;
 	default:
@@ -1201,40 +1202,6 @@
 	return 1;
 }
 
-/*
- * Attempt to register the kvec representing the RPC memory with the
- * device.
- *
- * Returns:
- *  NULL : The device does not support fastreg or there were no more
- *         fastreg mr.
- *  frmr : The kvec register request was successfully posted.
- *    <0 : An error was encountered attempting to register the kvec.
- */
-int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
-		     struct svc_rdma_fastreg_mr *frmr)
-{
-	struct ib_send_wr fastreg_wr;
-	u8 key;
-
-	/* Bump the key */
-	key = (u8)(frmr->mr->lkey & 0x000000FF);
-	ib_update_fast_reg_key(frmr->mr, ++key);
-
-	/* Prepare FASTREG WR */
-	memset(&fastreg_wr, 0, sizeof fastreg_wr);
-	fastreg_wr.opcode = IB_WR_FAST_REG_MR;
-	fastreg_wr.send_flags = IB_SEND_SIGNALED;
-	fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
-	fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
-	fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
-	fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
-	fastreg_wr.wr.fast_reg.length = frmr->map_len;
-	fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
-	fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
-	return svc_rdma_send(xprt, &fastreg_wr);
-}
-
 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
 {
 	struct ib_send_wr *bad_wr, *n_wr;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 680f888..64443eb 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -175,10 +175,8 @@
 }
 
 static void
-xprt_rdma_format_addresses(struct rpc_xprt *xprt)
+xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap)
 {
-	struct sockaddr *sap = (struct sockaddr *)
-					&rpcx_to_rdmad(xprt).addr;
 	char buf[128];
 
 	switch (sap->sa_family) {
@@ -302,7 +300,7 @@
 	struct rpc_xprt *xprt;
 	struct rpcrdma_xprt *new_xprt;
 	struct rpcrdma_ep *new_ep;
-	struct sockaddr_in *sin;
+	struct sockaddr *sap;
 	int rc;
 
 	if (args->addrlen > sizeof(xprt->addr)) {
@@ -333,26 +331,20 @@
 	 * Set up RDMA-specific connect data.
 	 */
 
-	/* Put server RDMA address in local cdata */
-	memcpy(&cdata.addr, args->dstaddr, args->addrlen);
+	sap = (struct sockaddr *)&cdata.addr;
+	memcpy(sap, args->dstaddr, args->addrlen);
 
 	/* Ensure xprt->addr holds valid server TCP (not RDMA)
 	 * address, for any side protocols which peek at it */
 	xprt->prot = IPPROTO_TCP;
 	xprt->addrlen = args->addrlen;
-	memcpy(&xprt->addr, &cdata.addr, xprt->addrlen);
+	memcpy(&xprt->addr, sap, xprt->addrlen);
 
-	sin = (struct sockaddr_in *)&cdata.addr;
-	if (ntohs(sin->sin_port) != 0)
+	if (rpc_get_port(sap))
 		xprt_set_bound(xprt);
 
-	dprintk("RPC:       %s: %pI4:%u\n",
-		__func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port));
-
-	/* Set max requests */
 	cdata.max_requests = xprt->max_reqs;
 
-	/* Set some length limits */
 	cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
 	cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
 
@@ -375,8 +367,7 @@
 
 	new_xprt = rpcx_to_rdmax(xprt);
 
-	rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr,
-				xprt_rdma_memreg_strategy);
+	rc = rpcrdma_ia_open(new_xprt, sap, xprt_rdma_memreg_strategy);
 	if (rc)
 		goto out1;
 
@@ -409,7 +400,7 @@
 	INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
 			  xprt_rdma_connect_worker);
 
-	xprt_rdma_format_addresses(xprt);
+	xprt_rdma_format_addresses(xprt, sap);
 	xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
 	if (xprt->max_payload == 0)
 		goto out4;
@@ -420,6 +411,9 @@
 	if (!try_module_get(THIS_MODULE))
 		goto out4;
 
+	dprintk("RPC:       %s: %s:%s\n", __func__,
+		xprt->address_strings[RPC_DISPLAY_ADDR],
+		xprt->address_strings[RPC_DISPLAY_PORT]);
 	return xprt;
 
 out4:
@@ -653,31 +647,30 @@
 	if (xprt_connected(xprt))
 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
 
-	seq_printf(seq,
-	  "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
-	  "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
-
-	   0,	/* need a local port? */
-	   xprt->stat.bind_count,
-	   xprt->stat.connect_count,
-	   xprt->stat.connect_time,
-	   idle_time,
-	   xprt->stat.sends,
-	   xprt->stat.recvs,
-	   xprt->stat.bad_xids,
-	   xprt->stat.req_u,
-	   xprt->stat.bklog_u,
-
-	   r_xprt->rx_stats.read_chunk_count,
-	   r_xprt->rx_stats.write_chunk_count,
-	   r_xprt->rx_stats.reply_chunk_count,
-	   r_xprt->rx_stats.total_rdma_request,
-	   r_xprt->rx_stats.total_rdma_reply,
-	   r_xprt->rx_stats.pullup_copy_count,
-	   r_xprt->rx_stats.fixup_copy_count,
-	   r_xprt->rx_stats.hardway_register_count,
-	   r_xprt->rx_stats.failed_marshal_count,
-	   r_xprt->rx_stats.bad_reply_count);
+	seq_puts(seq, "\txprt:\trdma ");
+	seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ",
+		   0,	/* need a local port? */
+		   xprt->stat.bind_count,
+		   xprt->stat.connect_count,
+		   xprt->stat.connect_time,
+		   idle_time,
+		   xprt->stat.sends,
+		   xprt->stat.recvs,
+		   xprt->stat.bad_xids,
+		   xprt->stat.req_u,
+		   xprt->stat.bklog_u);
+	seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu\n",
+		   r_xprt->rx_stats.read_chunk_count,
+		   r_xprt->rx_stats.write_chunk_count,
+		   r_xprt->rx_stats.reply_chunk_count,
+		   r_xprt->rx_stats.total_rdma_request,
+		   r_xprt->rx_stats.total_rdma_reply,
+		   r_xprt->rx_stats.pullup_copy_count,
+		   r_xprt->rx_stats.fixup_copy_count,
+		   r_xprt->rx_stats.hardway_register_count,
+		   r_xprt->rx_stats.failed_marshal_count,
+		   r_xprt->rx_stats.bad_reply_count,
+		   r_xprt->rx_stats.nomsg_call_count);
 }
 
 static int
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 891c4ed..f73d7a7 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -52,6 +52,7 @@
 #include <linux/prefetch.h>
 #include <linux/sunrpc/addr.h>
 #include <asm/bitops.h>
+#include <linux/module.h> /* try_module_get()/module_put() */
 
 #include "xprt_rdma.h"
 
@@ -414,6 +415,14 @@
 	return 0;
 }
 
+static void rpcrdma_destroy_id(struct rdma_cm_id *id)
+{
+	if (id) {
+		module_put(id->device->owner);
+		rdma_destroy_id(id);
+	}
+}
+
 static struct rdma_cm_id *
 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
 			struct rpcrdma_ia *ia, struct sockaddr *addr)
@@ -440,6 +449,17 @@
 	}
 	wait_for_completion_interruptible_timeout(&ia->ri_done,
 				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
+
+	/* FIXME:
+	 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
+	 * be pinned while there are active NFS/RDMA mounts to prevent
+	 * hangs and crashes at umount time.
+	 */
+	if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
+		dprintk("RPC:       %s: Failed to get device module\n",
+			__func__);
+		ia->ri_async_rc = -ENODEV;
+	}
 	rc = ia->ri_async_rc;
 	if (rc)
 		goto out;
@@ -449,16 +469,17 @@
 	if (rc) {
 		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
 			__func__, rc);
-		goto out;
+		goto put;
 	}
 	wait_for_completion_interruptible_timeout(&ia->ri_done,
 				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
 	rc = ia->ri_async_rc;
 	if (rc)
-		goto out;
+		goto put;
 
 	return id;
-
+put:
+	module_put(id->device->owner);
 out:
 	rdma_destroy_id(id);
 	return ERR_PTR(rc);
@@ -493,9 +514,11 @@
 int
 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
 {
-	int rc, mem_priv;
 	struct rpcrdma_ia *ia = &xprt->rx_ia;
 	struct ib_device_attr *devattr = &ia->ri_devattr;
+	int rc;
+
+	ia->ri_dma_mr = NULL;
 
 	ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
 	if (IS_ERR(ia->ri_id)) {
@@ -519,11 +542,6 @@
 		goto out3;
 	}
 
-	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
-		ia->ri_have_dma_lkey = 1;
-		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-	}
-
 	if (memreg == RPCRDMA_FRMR) {
 		/* Requires both frmr reg and local dma lkey */
 		if (((devattr->device_cap_flags &
@@ -539,42 +557,19 @@
 		if (!ia->ri_device->alloc_fmr) {
 			dprintk("RPC:       %s: MTHCAFMR registration "
 				"not supported by HCA\n", __func__);
-			memreg = RPCRDMA_ALLPHYSICAL;
+			goto out3;
 		}
 	}
 
-	/*
-	 * Optionally obtain an underlying physical identity mapping in
-	 * order to do a memory window-based bind. This base registration
-	 * is protected from remote access - that is enabled only by binding
-	 * for the specific bytes targeted during each RPC operation, and
-	 * revoked after the corresponding completion similar to a storage
-	 * adapter.
-	 */
 	switch (memreg) {
 	case RPCRDMA_FRMR:
 		ia->ri_ops = &rpcrdma_frwr_memreg_ops;
 		break;
 	case RPCRDMA_ALLPHYSICAL:
 		ia->ri_ops = &rpcrdma_physical_memreg_ops;
-		mem_priv = IB_ACCESS_LOCAL_WRITE |
-				IB_ACCESS_REMOTE_WRITE |
-				IB_ACCESS_REMOTE_READ;
-		goto register_setup;
+		break;
 	case RPCRDMA_MTHCAFMR:
 		ia->ri_ops = &rpcrdma_fmr_memreg_ops;
-		if (ia->ri_have_dma_lkey)
-			break;
-		mem_priv = IB_ACCESS_LOCAL_WRITE;
-	register_setup:
-		ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
-		if (IS_ERR(ia->ri_bind_mem)) {
-			printk(KERN_ALERT "%s: ib_get_dma_mr for "
-				"phys register failed with %lX\n",
-				__func__, PTR_ERR(ia->ri_bind_mem));
-			rc = -ENOMEM;
-			goto out3;
-		}
 		break;
 	default:
 		printk(KERN_ERR "RPC: Unsupported memory "
@@ -592,7 +587,7 @@
 	ib_dealloc_pd(ia->ri_pd);
 	ia->ri_pd = NULL;
 out2:
-	rdma_destroy_id(ia->ri_id);
+	rpcrdma_destroy_id(ia->ri_id);
 	ia->ri_id = NULL;
 out1:
 	return rc;
@@ -606,19 +601,11 @@
 void
 rpcrdma_ia_close(struct rpcrdma_ia *ia)
 {
-	int rc;
-
 	dprintk("RPC:       %s: entering\n", __func__);
-	if (ia->ri_bind_mem != NULL) {
-		rc = ib_dereg_mr(ia->ri_bind_mem);
-		dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
-			__func__, rc);
-	}
-
 	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
 		if (ia->ri_id->qp)
 			rdma_destroy_qp(ia->ri_id);
-		rdma_destroy_id(ia->ri_id);
+		rpcrdma_destroy_id(ia->ri_id);
 		ia->ri_id = NULL;
 	}
 
@@ -639,6 +626,12 @@
 	struct ib_cq_init_attr cq_attr = {};
 	int rc, err;
 
+	if (devattr->max_sge < RPCRDMA_MAX_IOVS) {
+		dprintk("RPC:       %s: insufficient sge's available\n",
+			__func__);
+		return -ENOMEM;
+	}
+
 	/* check provider's send/recv wr limits */
 	if (cdata->max_requests > devattr->max_qp_wr)
 		cdata->max_requests = devattr->max_qp_wr;
@@ -651,21 +644,13 @@
 	if (rc)
 		return rc;
 	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
-	ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
+	ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
 	ep->rep_attr.cap.max_recv_sge = 1;
 	ep->rep_attr.cap.max_inline_data = 0;
 	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 	ep->rep_attr.qp_type = IB_QPT_RC;
 	ep->rep_attr.port_num = ~0;
 
-	if (cdata->padding) {
-		ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding,
-						      GFP_KERNEL);
-		if (IS_ERR(ep->rep_padbuf))
-			return PTR_ERR(ep->rep_padbuf);
-	} else
-		ep->rep_padbuf = NULL;
-
 	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
 		"iovs: send %d recv %d\n",
 		__func__,
@@ -748,7 +733,8 @@
 		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
 			__func__, err);
 out1:
-	rpcrdma_free_regbuf(ia, ep->rep_padbuf);
+	if (ia->ri_dma_mr)
+		ib_dereg_mr(ia->ri_dma_mr);
 	return rc;
 }
 
@@ -775,8 +761,6 @@
 		ia->ri_id->qp = NULL;
 	}
 
-	rpcrdma_free_regbuf(ia, ep->rep_padbuf);
-
 	rpcrdma_clean_cq(ep->rep_attr.recv_cq);
 	rc = ib_destroy_cq(ep->rep_attr.recv_cq);
 	if (rc)
@@ -788,6 +772,12 @@
 	if (rc)
 		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
 			__func__, rc);
+
+	if (ia->ri_dma_mr) {
+		rc = ib_dereg_mr(ia->ri_dma_mr);
+		dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
+			__func__, rc);
+	}
 }
 
 /*
@@ -825,7 +815,7 @@
 		if (ia->ri_device != id->device) {
 			printk("RPC:       %s: can't reconnect on "
 				"different device!\n", __func__);
-			rdma_destroy_id(id);
+			rpcrdma_destroy_id(id);
 			rc = -ENETUNREACH;
 			goto out;
 		}
@@ -834,7 +824,7 @@
 		if (rc) {
 			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
 				__func__, rc);
-			rdma_destroy_id(id);
+			rpcrdma_destroy_id(id);
 			rc = -ENETUNREACH;
 			goto out;
 		}
@@ -845,7 +835,7 @@
 		write_unlock(&ia->ri_qplock);
 
 		rdma_destroy_qp(old);
-		rdma_destroy_id(old);
+		rpcrdma_destroy_id(old);
 	} else {
 		dprintk("RPC:       %s: connecting...\n", __func__);
 		rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
@@ -1229,75 +1219,6 @@
 		(unsigned long long)seg->mr_dma, seg->mr_dmalen);
 }
 
-static int
-rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
-				struct ib_mr **mrp, struct ib_sge *iov)
-{
-	struct ib_phys_buf ipb;
-	struct ib_mr *mr;
-	int rc;
-
-	/*
-	 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
-	 */
-	iov->addr = ib_dma_map_single(ia->ri_device,
-			va, len, DMA_BIDIRECTIONAL);
-	if (ib_dma_mapping_error(ia->ri_device, iov->addr))
-		return -ENOMEM;
-
-	iov->length = len;
-
-	if (ia->ri_have_dma_lkey) {
-		*mrp = NULL;
-		iov->lkey = ia->ri_dma_lkey;
-		return 0;
-	} else if (ia->ri_bind_mem != NULL) {
-		*mrp = NULL;
-		iov->lkey = ia->ri_bind_mem->lkey;
-		return 0;
-	}
-
-	ipb.addr = iov->addr;
-	ipb.size = iov->length;
-	mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
-			IB_ACCESS_LOCAL_WRITE, &iov->addr);
-
-	dprintk("RPC:       %s: phys convert: 0x%llx "
-			"registered 0x%llx length %d\n",
-			__func__, (unsigned long long)ipb.addr,
-			(unsigned long long)iov->addr, len);
-
-	if (IS_ERR(mr)) {
-		*mrp = NULL;
-		rc = PTR_ERR(mr);
-		dprintk("RPC:       %s: failed with %i\n", __func__, rc);
-	} else {
-		*mrp = mr;
-		iov->lkey = mr->lkey;
-		rc = 0;
-	}
-
-	return rc;
-}
-
-static int
-rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
-				struct ib_mr *mr, struct ib_sge *iov)
-{
-	int rc;
-
-	ib_dma_unmap_single(ia->ri_device,
-			    iov->addr, iov->length, DMA_BIDIRECTIONAL);
-
-	if (NULL == mr)
-		return 0;
-
-	rc = ib_dereg_mr(mr);
-	if (rc)
-		dprintk("RPC:       %s: ib_dereg_mr failed %i\n", __func__, rc);
-	return rc;
-}
-
 /**
  * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
  * @ia: controlling rpcrdma_ia
@@ -1317,26 +1238,29 @@
 rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
 {
 	struct rpcrdma_regbuf *rb;
-	int rc;
+	struct ib_sge *iov;
 
-	rc = -ENOMEM;
 	rb = kmalloc(sizeof(*rb) + size, flags);
 	if (rb == NULL)
 		goto out;
 
-	rb->rg_size = size;
-	rb->rg_owner = NULL;
-	rc = rpcrdma_register_internal(ia, rb->rg_base, size,
-				       &rb->rg_mr, &rb->rg_iov);
-	if (rc)
+	iov = &rb->rg_iov;
+	iov->addr = ib_dma_map_single(ia->ri_device,
+				      (void *)rb->rg_base, size,
+				      DMA_BIDIRECTIONAL);
+	if (ib_dma_mapping_error(ia->ri_device, iov->addr))
 		goto out_free;
 
+	iov->length = size;
+	iov->lkey = ia->ri_dma_lkey;
+	rb->rg_size = size;
+	rb->rg_owner = NULL;
 	return rb;
 
 out_free:
 	kfree(rb);
 out:
-	return ERR_PTR(rc);
+	return ERR_PTR(-ENOMEM);
 }
 
 /**
@@ -1347,10 +1271,15 @@
 void
 rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
 {
-	if (rb) {
-		rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
-		kfree(rb);
-	}
+	struct ib_sge *iov;
+
+	if (!rb)
+		return;
+
+	iov = &rb->rg_iov;
+	ib_dma_unmap_single(ia->ri_device,
+			    iov->addr, iov->length, DMA_BIDIRECTIONAL);
+	kfree(rb);
 }
 
 /*
@@ -1363,9 +1292,11 @@
 		struct rpcrdma_ep *ep,
 		struct rpcrdma_req *req)
 {
+	struct ib_device *device = ia->ri_device;
 	struct ib_send_wr send_wr, *send_wr_fail;
 	struct rpcrdma_rep *rep = req->rl_reply;
-	int rc;
+	struct ib_sge *iov = req->rl_send_iov;
+	int i, rc;
 
 	if (rep) {
 		rc = rpcrdma_ep_post_recv(ia, ep, rep);
@@ -1376,22 +1307,15 @@
 
 	send_wr.next = NULL;
 	send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
-	send_wr.sg_list = req->rl_send_iov;
+	send_wr.sg_list = iov;
 	send_wr.num_sge = req->rl_niovs;
 	send_wr.opcode = IB_WR_SEND;
-	if (send_wr.num_sge == 4)	/* no need to sync any pad (constant) */
-		ib_dma_sync_single_for_device(ia->ri_device,
-					      req->rl_send_iov[3].addr,
-					      req->rl_send_iov[3].length,
-					      DMA_TO_DEVICE);
-	ib_dma_sync_single_for_device(ia->ri_device,
-				      req->rl_send_iov[1].addr,
-				      req->rl_send_iov[1].length,
-				      DMA_TO_DEVICE);
-	ib_dma_sync_single_for_device(ia->ri_device,
-				      req->rl_send_iov[0].addr,
-				      req->rl_send_iov[0].length,
-				      DMA_TO_DEVICE);
+
+	for (i = 0; i < send_wr.num_sge; i++)
+		ib_dma_sync_single_for_device(device, iov[i].addr,
+					      iov[i].length, DMA_TO_DEVICE);
+	dprintk("RPC:       %s: posting %d s/g entries\n",
+		__func__, send_wr.num_sge);
 
 	if (DECR_CQCOUNT(ep) > 0)
 		send_wr.send_flags = 0;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index f49dd8b..0251222 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -51,7 +51,6 @@
 #include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
 #include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
 #include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */
-#include <linux/sunrpc/svc.h>		/* RPCSVC_MAXPAYLOAD */
 
 #define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
 #define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */
@@ -65,9 +64,8 @@
 	struct ib_device	*ri_device;
 	struct rdma_cm_id 	*ri_id;
 	struct ib_pd		*ri_pd;
-	struct ib_mr		*ri_bind_mem;
+	struct ib_mr		*ri_dma_mr;
 	u32			ri_dma_lkey;
-	int			ri_have_dma_lkey;
 	struct completion	ri_done;
 	int			ri_async_rc;
 	unsigned int		ri_max_frmr_depth;
@@ -89,7 +87,6 @@
 	int			rep_connected;
 	struct ib_qp_init_attr	rep_attr;
 	wait_queue_head_t 	rep_connect_wait;
-	struct rpcrdma_regbuf	*rep_padbuf;
 	struct rdma_conn_param	rep_remote_cma;
 	struct sockaddr_storage	rep_remote_addr;
 	struct delayed_work	rep_connect_worker;
@@ -119,7 +116,6 @@
 struct rpcrdma_regbuf {
 	size_t			rg_size;
 	struct rpcrdma_req	*rg_owner;
-	struct ib_mr		*rg_mr;
 	struct ib_sge		rg_iov;
 	__be32			rg_base[0] __attribute__ ((aligned(256)));
 };
@@ -165,8 +161,7 @@
  * struct rpcrdma_buffer. N is the max number of outstanding requests.
  */
 
-/* temporary static scatter/gather max */
-#define RPCRDMA_MAX_DATA_SEGS	(64)	/* max scatter/gather */
+#define RPCRDMA_MAX_DATA_SEGS	((1 * 1024 * 1024) / PAGE_SIZE)
 #define RPCRDMA_MAX_SEGS 	(RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
 
 struct rpcrdma_buffer;
@@ -258,16 +253,18 @@
 	char		*mr_offset;	/* kva if no page, else offset */
 };
 
+#define RPCRDMA_MAX_IOVS	(2)
+
 struct rpcrdma_req {
-	unsigned int	rl_niovs;	/* 0, 2 or 4 */
-	unsigned int	rl_nchunks;	/* non-zero if chunks */
-	unsigned int	rl_connect_cookie;	/* retry detection */
-	struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
+	unsigned int		rl_niovs;
+	unsigned int		rl_nchunks;
+	unsigned int		rl_connect_cookie;
+	struct rpcrdma_buffer	*rl_buffer;
 	struct rpcrdma_rep	*rl_reply;/* holder for reply buffer */
-	struct ib_sge	rl_send_iov[4];	/* for active requests */
-	struct rpcrdma_regbuf *rl_rdmabuf;
-	struct rpcrdma_regbuf *rl_sendbuf;
-	struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
+	struct ib_sge		rl_send_iov[RPCRDMA_MAX_IOVS];
+	struct rpcrdma_regbuf	*rl_rdmabuf;
+	struct rpcrdma_regbuf	*rl_sendbuf;
+	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
 };
 
 static inline struct rpcrdma_req *
@@ -342,6 +339,7 @@
 	unsigned long		hardway_register_count;
 	unsigned long		failed_marshal_count;
 	unsigned long		bad_reply_count;
+	unsigned long		nomsg_call_count;
 };
 
 /*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 0030376..7be90bc 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -822,6 +822,8 @@
 	if (atomic_read(&transport->xprt.swapper))
 		sk_clear_memalloc(sk);
 
+	kernel_sock_shutdown(sock, SHUT_RDWR);
+
 	write_lock_bh(&sk->sk_callback_lock);
 	transport->inet = NULL;
 	transport->sock = NULL;
@@ -829,6 +831,7 @@
 	sk->sk_user_data = NULL;
 
 	xs_restore_old_callbacks(transport, sk);
+	xprt_clear_connected(xprt);
 	write_unlock_bh(&sk->sk_callback_lock);
 	xs_sock_reset_connection_flags(xprt);
 
@@ -1866,7 +1869,7 @@
 		sk->sk_data_ready = xs_local_data_ready;
 		sk->sk_write_space = xs_udp_write_space;
 		sk->sk_error_report = xs_error_report;
-		sk->sk_allocation = GFP_ATOMIC;
+		sk->sk_allocation = GFP_NOIO;
 
 		xprt_clear_connected(xprt);
 
@@ -2051,7 +2054,7 @@
 		sk->sk_user_data = xprt;
 		sk->sk_data_ready = xs_udp_data_ready;
 		sk->sk_write_space = xs_udp_write_space;
-		sk->sk_allocation = GFP_ATOMIC;
+		sk->sk_allocation = GFP_NOIO;
 
 		xprt_set_connected(xprt);
 
@@ -2153,7 +2156,7 @@
 		sk->sk_state_change = xs_tcp_state_change;
 		sk->sk_write_space = xs_tcp_write_space;
 		sk->sk_error_report = xs_error_report;
-		sk->sk_allocation = GFP_ATOMIC;
+		sk->sk_allocation = GFP_NOIO;
 
 		/* socket options */
 		sock_reset_flag(sk, SOCK_LINGER);
@@ -2279,13 +2282,14 @@
 
 	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
 
-	/* Start by resetting any existing state */
-	xs_reset_transport(transport);
-
-	if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
+	if (transport->sock != NULL) {
 		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
 				"seconds\n",
 				xprt, xprt->reestablish_timeout / HZ);
+
+		/* Start by resetting any existing state */
+		xs_reset_transport(transport);
+
 		queue_delayed_work(rpciod_workqueue,
 				   &transport->connect_worker,
 				   xprt->reestablish_timeout);
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 5ecfe93..12efbbe 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -10,3 +10,5 @@
 docproc
 sortextable
 asn1_compiler
+extract-cert
+sign-file
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index d3437b8..1db6d73 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -128,6 +128,10 @@
 cc-disable-warning = $(call try-run,\
 	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
+# cc-name
+# Expands to either gcc or clang
+cc-name = $(shell $(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc)
+
 # cc-version
 cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
 
@@ -303,3 +307,54 @@
 
 echo-why = $(call escsq, $(strip $(why)))
 endif
+
+###############################################################################
+#
+# When a Kconfig string contains a filename, it is suitable for
+# passing to shell commands. It is surrounded by double-quotes, and
+# any double-quotes or backslashes within it are escaped by
+# backslashes.
+#
+# This is no use for dependencies or $(wildcard). We need to strip the
+# surrounding quotes and the escaping from quotes and backslashes, and
+# we *do* need to escape any spaces in the string. So, for example:
+#
+# Usage: $(eval $(call config_filename,FOO))
+#
+# Defines FOO_FILENAME based on the contents of the CONFIG_FOO option,
+# transformed as described above to be suitable for use within the
+# makefile.
+#
+# Also, if the filename is a relative filename and exists in the source
+# tree but not the build tree, define FOO_SRCPREFIX as $(srctree)/ to
+# be prefixed to *both* command invocation and dependencies.
+#
+# Note: We also print the filenames in the quiet_cmd_foo text, and
+# perhaps ought to have a version specially escaped for that purpose.
+# But it's only cosmetic, and $(patsubst "%",%,$(CONFIG_FOO)) is good
+# enough.  It'll strip the quotes in the common case where there's no
+# space and it's a simple filename, and it'll retain the quotes when
+# there's a space. There are some esoteric cases in which it'll print
+# the wrong thing, but we don't really care. The actual dependencies
+# and commands *do* get it right, with various combinations of single
+# and double quotes, backslashes and spaces in the filenames.
+#
+###############################################################################
+#
+space_escape := %%%SPACE%%%
+#
+define config_filename
+ifneq ($$(CONFIG_$(1)),"")
+$(1)_FILENAME := $$(subst \\,\,$$(subst \$$(quote),$$(quote),$$(subst $$(space_escape),\$$(space),$$(patsubst "%",%,$$(subst $$(space),$$(space_escape),$$(CONFIG_$(1)))))))
+ifneq ($$(patsubst /%,%,$$(firstword $$($(1)_FILENAME))),$$(firstword $$($(1)_FILENAME)))
+else
+ifeq ($$(wildcard $$($(1)_FILENAME)),)
+ifneq ($$(wildcard $$(srctree)/$$($(1)_FILENAME)),)
+$(1)_SRCPREFIX := $(srctree)/
+endif
+endif
+endif
+endif
+endef
+#
+###############################################################################
diff --git a/scripts/Lindent b/scripts/Lindent
index 9c4b3e2..6d889de 100755
--- a/scripts/Lindent
+++ b/scripts/Lindent
@@ -1,6 +1,9 @@
 #!/bin/sh
 PARAM="-npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1"
 RES=`indent --version`
+if [ "$RES" = "" ]; then
+	exit 1
+fi
 V1=`echo $RES | cut -d' ' -f3 | cut -d'.' -f1`
 V2=`echo $RES | cut -d' ' -f3 | cut -d'.' -f2`
 V3=`echo $RES | cut -d' ' -f3 | cut -d'.' -f3`
diff --git a/scripts/Makefile b/scripts/Makefile
index 2016a64..1b26617 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -16,9 +16,13 @@
 hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
 hostprogs-$(CONFIG_BUILDTIME_EXTABLE_SORT) += sortextable
 hostprogs-$(CONFIG_ASN1)	 += asn1_compiler
+hostprogs-$(CONFIG_MODULE_SIG)	 += sign-file
+hostprogs-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += extract-cert
 
 HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
 HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
+HOSTLOADLIBES_sign-file = -lcrypto
+HOSTLOADLIBES_extract-cert = -lcrypto
 
 always		:= $(hostprogs-y) $(hostprogs-m)
 
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index f734033..4efedcb 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -56,7 +56,7 @@
 KBUILD_CFLAGS += $(warning)
 else
 
-ifeq ($(COMPILER),clang)
+ifeq ($(cc-name),clang)
 KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides)
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-value)
 KBUILD_CFLAGS += $(call cc-disable-warning, format)
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index e48a4e9..07650ee 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -22,7 +22,7 @@
     mkdir -p $(2) ; \
     cp $@ $(2) ; \
     $(mod_strip_cmd) $(2)/$(notdir $@) ; \
-    $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) ; \
+    $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) && \
     $(mod_compress_cmd) $(2)/$(notdir $@)
 
 # Modules built outside the kernel source tree go into extra by default
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index 7750e9c..e000f44 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -13,6 +13,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdint.h>
+#include <stdbool.h>
 #include <string.h>
 #include <ctype.h>
 #include <unistd.h>
@@ -293,8 +294,8 @@
 
 struct action {
 	struct action	*next;
+	char		*name;
 	unsigned char	index;
-	char		name[];
 };
 
 static struct action *action_list;
@@ -305,15 +306,17 @@
 	enum token_type	token_type : 8;
 	unsigned char	size;
 	struct action	*action;
-	const char	*value;
+	char		*content;
 	struct type	*type;
 };
 
 static struct token *token_list;
 static unsigned nr_tokens;
-static _Bool verbose;
+static bool verbose_opt;
+static bool debug_opt;
 
-#define debug(fmt, ...) do { if (verbose) printf(fmt, ## __VA_ARGS__); } while (0)
+#define verbose(fmt, ...) do { if (verbose_opt) printf(fmt, ## __VA_ARGS__); } while (0)
+#define debug(fmt, ...) do { if (debug_opt) printf(fmt, ## __VA_ARGS__); } while (0)
 
 static int directive_compare(const void *_key, const void *_pdir)
 {
@@ -325,11 +328,9 @@
 	dlen = strlen(dir);
 	clen = (dlen < token->size) ? dlen : token->size;
 
-	//debug("cmp(%*.*s,%s) = ",
-	//       (int)token->size, (int)token->size, token->value,
-	//       dir);
+	//debug("cmp(%s,%s) = ", token->content, dir);
 
-	val = memcmp(token->value, dir, clen);
+	val = memcmp(token->content, dir, clen);
 	if (val != 0) {
 		//debug("%d [cmp]\n", val);
 		return val;
@@ -349,7 +350,7 @@
 static void tokenise(char *buffer, char *end)
 {
 	struct token *tokens;
-	char *line, *nl, *p, *q;
+	char *line, *nl, *start, *p, *q;
 	unsigned tix, lineno;
 
 	/* Assume we're going to have half as many tokens as we have
@@ -408,11 +409,11 @@
 				break;
 
 			tokens[tix].line = lineno;
-			tokens[tix].value = p;
+			start = p;
 
 			/* Handle string tokens */
 			if (isalpha(*p)) {
-				const char **dir;
+				const char **dir, *start = p;
 
 				/* Can be a directive, type name or element
 				 * name.  Find the end of the name.
@@ -423,10 +424,18 @@
 				tokens[tix].size = q - p;
 				p = q;
 
+				tokens[tix].content = malloc(tokens[tix].size + 1);
+				if (!tokens[tix].content) {
+					perror(NULL);
+					exit(1);
+				}
+				memcpy(tokens[tix].content, start, tokens[tix].size);
+				tokens[tix].content[tokens[tix].size] = 0;
+				
 				/* If it begins with a lowercase letter then
 				 * it's an element name
 				 */
-				if (islower(tokens[tix].value[0])) {
+				if (islower(tokens[tix].content[0])) {
 					tokens[tix++].token_type = TOKEN_ELEMENT_NAME;
 					continue;
 				}
@@ -455,6 +464,13 @@
 					q++;
 				tokens[tix].size = q - p;
 				p = q;
+				tokens[tix].content = malloc(tokens[tix].size + 1);
+				if (!tokens[tix].content) {
+					perror(NULL);
+					exit(1);
+				}
+				memcpy(tokens[tix].content, start, tokens[tix].size);
+				tokens[tix].content[tokens[tix].size] = 0;
 				tokens[tix++].token_type = TOKEN_NUMBER;
 				continue;
 			}
@@ -463,6 +479,7 @@
 				if (memcmp(p, "::=", 3) == 0) {
 					p += 3;
 					tokens[tix].size = 3;
+					tokens[tix].content = "::=";
 					tokens[tix++].token_type = TOKEN_ASSIGNMENT;
 					continue;
 				}
@@ -472,12 +489,14 @@
 				if (memcmp(p, "({", 2) == 0) {
 					p += 2;
 					tokens[tix].size = 2;
+					tokens[tix].content = "({";
 					tokens[tix++].token_type = TOKEN_OPEN_ACTION;
 					continue;
 				}
 				if (memcmp(p, "})", 2) == 0) {
 					p += 2;
 					tokens[tix].size = 2;
+					tokens[tix].content = "})";
 					tokens[tix++].token_type = TOKEN_CLOSE_ACTION;
 					continue;
 				}
@@ -488,22 +507,27 @@
 				switch (*p) {
 				case '{':
 					p += 1;
+					tokens[tix].content = "{";
 					tokens[tix++].token_type = TOKEN_OPEN_CURLY;
 					continue;
 				case '}':
 					p += 1;
+					tokens[tix].content = "}";
 					tokens[tix++].token_type = TOKEN_CLOSE_CURLY;
 					continue;
 				case '[':
 					p += 1;
+					tokens[tix].content = "[";
 					tokens[tix++].token_type = TOKEN_OPEN_SQUARE;
 					continue;
 				case ']':
 					p += 1;
+					tokens[tix].content = "]";
 					tokens[tix++].token_type = TOKEN_CLOSE_SQUARE;
 					continue;
 				case ',':
 					p += 1;
+					tokens[tix].content = ",";
 					tokens[tix++].token_type = TOKEN_COMMA;
 					continue;
 				default:
@@ -518,22 +542,20 @@
 	}
 
 	nr_tokens = tix;
-	debug("Extracted %u tokens\n", nr_tokens);
+	verbose("Extracted %u tokens\n", nr_tokens);
 
 #if 0
 	{
 		int n;
 		for (n = 0; n < nr_tokens; n++)
-			debug("Token %3u: '%*.*s'\n",
-			       n,
-			       (int)token_list[n].size, (int)token_list[n].size,
-			       token_list[n].value);
+			debug("Token %3u: '%s'\n", n, token_list[n].content);
 	}
 #endif
 }
 
 static void build_type_list(void);
 static void parse(void);
+static void dump_elements(void);
 static void render(FILE *out, FILE *hdr);
 
 /*
@@ -548,16 +570,27 @@
 	char *kbuild_verbose;
 	int fd;
 
+	kbuild_verbose = getenv("KBUILD_VERBOSE");
+	if (kbuild_verbose)
+		verbose_opt = atoi(kbuild_verbose);
+
+	while (argc > 4) {
+		if (strcmp(argv[1], "-v") == 0)
+			verbose_opt = true;
+		else if (strcmp(argv[1], "-d") == 0)
+			debug_opt = true;
+		else
+			break;
+		memmove(&argv[1], &argv[2], (argc - 2) * sizeof(char *));
+		argc--;
+	}
+
 	if (argc != 4) {
-		fprintf(stderr, "Format: %s <grammar-file> <c-file> <hdr-file>\n",
+		fprintf(stderr, "Format: %s [-v] [-d] <grammar-file> <c-file> <hdr-file>\n",
 			argv[0]);
 		exit(2);
 	}
 
-	kbuild_verbose = getenv("KBUILD_VERBOSE");
-	if (kbuild_verbose)
-		verbose = atoi(kbuild_verbose);
-
 	filename = argv[1];
 	outputname = argv[2];
 	headername = argv[3];
@@ -608,6 +641,7 @@
 	tokenise(buffer, buffer + readlen);
 	build_type_list();
 	parse();
+	dump_elements();
 
 	out = fopen(outputname, "w");
 	if (!out) {
@@ -666,7 +700,7 @@
 	unsigned	flags;
 #define ELEMENT_IMPLICIT	0x0001
 #define ELEMENT_EXPLICIT	0x0002
-#define ELEMENT_MARKED		0x0004
+#define ELEMENT_TAG_SPECIFIED	0x0004
 #define ELEMENT_RENDERED	0x0008
 #define ELEMENT_SKIPPABLE	0x0010
 #define ELEMENT_CONDITIONAL	0x0020
@@ -693,7 +727,7 @@
 	if ((*a)->name->size != (*b)->name->size)
 		return (*a)->name->size - (*b)->name->size;
 	else
-		return memcmp((*a)->name->value, (*b)->name->value,
+		return memcmp((*a)->name->content, (*b)->name->content,
 			      (*a)->name->size);
 }
 
@@ -706,7 +740,7 @@
 	if (token->size != type->name->size)
 		return token->size - type->name->size;
 	else
-		return memcmp(token->value, type->name->value,
+		return memcmp(token->content, type->name->content,
 			      token->size);
 }
 
@@ -756,14 +790,11 @@
 
 	qsort(type_index, nr, sizeof(type_index[0]), type_index_compare);
 
-	debug("Extracted %u types\n", nr_types);
+	verbose("Extracted %u types\n", nr_types);
 #if 0
 	for (n = 0; n < nr_types; n++) {
 		struct type *type = type_index[n];
-		debug("- %*.*s\n",
-		       (int)type->name->size,
-		       (int)type->name->size,
-		       type->name->value);
+		debug("- %*.*s\n", type->name->content);
 	}
 #endif
 }
@@ -793,15 +824,14 @@
 		type->element->type_def = type;
 
 		if (cursor != type[1].name) {
-			fprintf(stderr, "%s:%d: Parse error at token '%*.*s'\n",
-				filename, cursor->line,
-				(int)cursor->size, (int)cursor->size, cursor->value);
+			fprintf(stderr, "%s:%d: Parse error at token '%s'\n",
+				filename, cursor->line, cursor->content);
 			exit(1);
 		}
 
 	} while (type++, !(type->flags & TYPE_STOP_MARKER));
 
-	debug("Extracted %u actions\n", nr_actions);
+	verbose("Extracted %u actions\n", nr_actions);
 }
 
 static struct element *element_list;
@@ -862,33 +892,31 @@
 			cursor++;
 			break;
 		default:
-			fprintf(stderr, "%s:%d: Unrecognised tag class token '%*.*s'\n",
-				filename, cursor->line,
-				(int)cursor->size, (int)cursor->size, cursor->value);
+			fprintf(stderr, "%s:%d: Unrecognised tag class token '%s'\n",
+				filename, cursor->line, cursor->content);
 			exit(1);
 		}
 
 		if (cursor >= end)
 			goto overrun_error;
 		if (cursor->token_type != TOKEN_NUMBER) {
-			fprintf(stderr, "%s:%d: Missing tag number '%*.*s'\n",
-				filename, cursor->line,
-				(int)cursor->size, (int)cursor->size, cursor->value);
+			fprintf(stderr, "%s:%d: Missing tag number '%s'\n",
+				filename, cursor->line, cursor->content);
 			exit(1);
 		}
 
 		element->tag &= ~0x1f;
-		element->tag |= strtoul(cursor->value, &p, 10);
-		if (p - cursor->value != cursor->size)
+		element->tag |= strtoul(cursor->content, &p, 10);
+		element->flags |= ELEMENT_TAG_SPECIFIED;
+		if (p - cursor->content != cursor->size)
 			abort();
 		cursor++;
 
 		if (cursor >= end)
 			goto overrun_error;
 		if (cursor->token_type != TOKEN_CLOSE_SQUARE) {
-			fprintf(stderr, "%s:%d: Missing closing square bracket '%*.*s'\n",
-				filename, cursor->line,
-				(int)cursor->size, (int)cursor->size, cursor->value);
+			fprintf(stderr, "%s:%d: Missing closing square bracket '%s'\n",
+				filename, cursor->line, cursor->content);
 			exit(1);
 		}
 		cursor++;
@@ -988,9 +1016,8 @@
 		ref = bsearch(cursor, type_index, nr_types, sizeof(type_index[0]),
 			      type_finder);
 		if (!ref) {
-			fprintf(stderr, "%s:%d: Type '%*.*s' undefined\n",
-				filename, cursor->line,
-				(int)cursor->size, (int)cursor->size, cursor->value);
+			fprintf(stderr, "%s:%d: Type '%s' undefined\n",
+				filename, cursor->line, cursor->content);
 			exit(1);
 		}
 		cursor->type = *ref;
@@ -1039,9 +1066,8 @@
 		break;
 
 	default:
-		fprintf(stderr, "%s:%d: Token '%*.*s' does not introduce a type\n",
-			filename, cursor->line,
-			(int)cursor->size, (int)cursor->size, cursor->value);
+		fprintf(stderr, "%s:%d: Token '%s' does not introduce a type\n",
+			filename, cursor->line, cursor->content);
 		exit(1);
 	}
 
@@ -1058,20 +1084,18 @@
 		if (cursor >= end)
 			goto overrun_error;
 		if (cursor->token_type != TOKEN_ELEMENT_NAME) {
-			fprintf(stderr, "%s:%d: Token '%*.*s' is not an action function name\n",
-				filename, cursor->line,
-				(int)cursor->size, (int)cursor->size, cursor->value);
+			fprintf(stderr, "%s:%d: Token '%s' is not an action function name\n",
+				filename, cursor->line, cursor->content);
 			exit(1);
 		}
 
-		action = malloc(sizeof(struct action) + cursor->size + 1);
+		action = malloc(sizeof(struct action));
 		if (!action) {
 			perror(NULL);
 			exit(1);
 		}
 		action->index = 0;
-		memcpy(action->name, cursor->value, cursor->size);
-		action->name[cursor->size] = 0;
+		action->name = cursor->content;
 
 		for (ppaction = &action_list;
 		     *ppaction;
@@ -1101,9 +1125,8 @@
 		if (cursor >= end)
 			goto overrun_error;
 		if (cursor->token_type != TOKEN_CLOSE_ACTION) {
-			fprintf(stderr, "%s:%d: Missing close action, got '%*.*s'\n",
-				filename, cursor->line,
-				(int)cursor->size, (int)cursor->size, cursor->value);
+			fprintf(stderr, "%s:%d: Missing close action, got '%s'\n",
+				filename, cursor->line, cursor->content);
 			exit(1);
 		}
 		cursor++;
@@ -1113,9 +1136,8 @@
 	return top;
 
 parse_error:
-	fprintf(stderr, "%s:%d: Unexpected token '%*.*s'\n",
-		filename, cursor->line,
-		(int)cursor->size, (int)cursor->size, cursor->value);
+	fprintf(stderr, "%s:%d: Unexpected token '%s'\n",
+		filename, cursor->line, cursor->content);
 	exit(1);
 
 overrun_error:
@@ -1133,9 +1155,8 @@
 	struct token *cursor = *_cursor, *name;
 
 	if (cursor->token_type != TOKEN_OPEN_CURLY) {
-		fprintf(stderr, "%s:%d: Expected compound to start with brace not '%*.*s'\n",
-			filename, cursor->line,
-			(int)cursor->size, (int)cursor->size, cursor->value);
+		fprintf(stderr, "%s:%d: Expected compound to start with brace not '%s'\n",
+			filename, cursor->line, cursor->content);
 		exit(1);
 	}
 	cursor++;
@@ -1176,9 +1197,8 @@
 	children->flags &= ~ELEMENT_CONDITIONAL;
 
 	if (cursor->token_type != TOKEN_CLOSE_CURLY) {
-		fprintf(stderr, "%s:%d: Expected compound closure, got '%*.*s'\n",
-			filename, cursor->line,
-			(int)cursor->size, (int)cursor->size, cursor->value);
+		fprintf(stderr, "%s:%d: Expected compound closure, got '%s'\n",
+			filename, cursor->line, cursor->content);
 		exit(1);
 	}
 	cursor++;
@@ -1191,6 +1211,52 @@
 	exit(1);
 }
 
+static void dump_element(const struct element *e, int level)
+{
+	const struct element *c;
+	const struct type *t = e->type_def;
+	const char *name = e->name ? e->name->content : ".";
+	const char *tname = t && t->name ? t->name->content : ".";
+	char tag[32];
+
+	if (e->class == 0 && e->method == 0 && e->tag == 0)
+		strcpy(tag, "<...>");
+	else if (e->class == ASN1_UNIV)
+		sprintf(tag, "%s %s %s",
+			asn1_classes[e->class],
+			asn1_methods[e->method],
+			asn1_universal_tags[e->tag]);
+	else
+		sprintf(tag, "%s %s %u",
+			asn1_classes[e->class],
+			asn1_methods[e->method],
+			e->tag);
+
+	printf("%c%c%c%c%c %c %*s[*] \e[33m%s\e[m %s %s \e[35m%s\e[m\n",
+	       e->flags & ELEMENT_IMPLICIT ? 'I' : '-',
+	       e->flags & ELEMENT_EXPLICIT ? 'E' : '-',
+	       e->flags & ELEMENT_TAG_SPECIFIED ? 'T' : '-',
+	       e->flags & ELEMENT_SKIPPABLE ? 'S' : '-',
+	       e->flags & ELEMENT_CONDITIONAL ? 'C' : '-',
+	       "-tTqQcaro"[e->compound],
+	       level, "",
+	       tag,
+	       tname,
+	       name,
+	       e->action ? e->action->name : "");
+	if (e->compound == TYPE_REF)
+		dump_element(e->type->type->element, level + 3);
+	else
+		for (c = e->children; c; c = c->next)
+			dump_element(c, level + 3);
+}
+
+static void dump_elements(void)
+{
+	if (debug_opt)
+		dump_element(type_list[0].element, 0);
+}
+
 static void render_element(FILE *out, struct element *e, struct element *tag);
 static void render_out_of_line_list(FILE *out);
 
@@ -1292,7 +1358,7 @@
 	}
 
 	/* We do two passes - the first one calculates all the offsets */
-	debug("Pass 1\n");
+	verbose("Pass 1\n");
 	nr_entries = 0;
 	root = &type_list[0];
 	render_element(NULL, root->element, NULL);
@@ -1303,7 +1369,7 @@
 		e->flags &= ~ELEMENT_RENDERED;
 
 	/* And then we actually render */
-	debug("Pass 2\n");
+	verbose("Pass 2\n");
 	fprintf(out, "\n");
 	fprintf(out, "static const unsigned char %s_machine[] = {\n",
 		grammar_name);
@@ -1376,7 +1442,7 @@
  */
 static void render_element(FILE *out, struct element *e, struct element *tag)
 {
-	struct element *ec;
+	struct element *ec, *x;
 	const char *cond, *act;
 	int entry, skippable = 0, outofline = 0;
 
@@ -1389,9 +1455,7 @@
 		outofline = 1;
 
 	if (e->type_def && out) {
-		render_more(out, "\t// %*.*s\n",
-			    (int)e->type_def->name->size, (int)e->type_def->name->size,
-			    e->type_def->name->value);
+		render_more(out, "\t// %s\n", e->type_def->name->content);
 	}
 
 	/* Render the operation */
@@ -1400,11 +1464,10 @@
 	act = e->action ? "_ACT" : "";
 	switch (e->compound) {
 	case ANY:
-		render_opcode(out, "ASN1_OP_%sMATCH_ANY%s,", cond, act);
+		render_opcode(out, "ASN1_OP_%sMATCH_ANY%s%s,",
+			      cond, act, skippable ? "_OR_SKIP" : "");
 		if (e->name)
-			render_more(out, "\t\t// %*.*s",
-				    (int)e->name->size, (int)e->name->size,
-				    e->name->value);
+			render_more(out, "\t\t// %s", e->name->content);
 		render_more(out, "\n");
 		goto dont_render_tag;
 
@@ -1435,15 +1498,15 @@
 		break;
 	}
 
-	if (e->name)
-		render_more(out, "\t\t// %*.*s",
-			    (int)e->name->size, (int)e->name->size,
-			    e->name->value);
+	x = tag ?: e;
+	if (x->name)
+		render_more(out, "\t\t// %s", x->name->content);
 	render_more(out, "\n");
 
 	/* Render the tag */
-	if (!tag)
+	if (!tag || !(tag->flags & ELEMENT_TAG_SPECIFIED))
 		tag = e;
+
 	if (tag->class == ASN1_UNIV &&
 	    tag->tag != 14 &&
 	    tag->tag != 15 &&
@@ -1465,7 +1528,8 @@
 	case TYPE_REF:
 		render_element(out, e->type->type->element, tag);
 		if (e->action)
-			render_opcode(out, "ASN1_OP_ACT,\n");
+			render_opcode(out, "ASN1_OP_%sACT,\n",
+				      skippable ? "MAYBE_" : "");
 		break;
 
 	case SEQUENCE:
@@ -1474,10 +1538,8 @@
 			 * skipability */
 			render_opcode(out, "_jump_target(%u),", e->entry_index);
 			if (e->type_def && e->type_def->name)
-				render_more(out, "\t\t// --> %*.*s",
-					    (int)e->type_def->name->size,
-					    (int)e->type_def->name->size,
-					    e->type_def->name->value);
+				render_more(out, "\t\t// --> %s",
+					    e->type_def->name->content);
 			render_more(out, "\n");
 			if (!(e->flags & ELEMENT_RENDERED)) {
 				e->flags |= ELEMENT_RENDERED;
@@ -1502,10 +1564,8 @@
 			 * skipability */
 			render_opcode(out, "_jump_target(%u),", e->entry_index);
 			if (e->type_def && e->type_def->name)
-				render_more(out, "\t\t// --> %*.*s",
-					    (int)e->type_def->name->size,
-					    (int)e->type_def->name->size,
-					    e->type_def->name->value);
+				render_more(out, "\t\t// --> %s",
+					    e->type_def->name->content);
 			render_more(out, "\n");
 			if (!(e->flags & ELEMENT_RENDERED)) {
 				e->flags |= ELEMENT_RENDERED;
@@ -1539,7 +1599,7 @@
 
 	case CHOICE:
 		for (ec = e->children; ec; ec = ec->next)
-			render_element(out, ec, NULL);
+			render_element(out, ec, ec);
 		if (!skippable)
 			render_opcode(out, "ASN1_OP_COND_FAIL,\n");
 		if (e->action)
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index b304068..c68fd61 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -192,23 +192,6 @@
 }
 
 /*
- * Clear the set of configuration strings.
- */
-static void clear_config(void)
-{
-	struct item *aux, *next;
-	unsigned int i;
-
-	for (i = 0; i < HASHSZ; i++) {
-		for (aux = hashtab[i]; aux; aux = next) {
-			next = aux->next;
-			free(aux);
-		}
-		hashtab[i] = NULL;
-	}
-}
-
-/*
  * Record the use of a CONFIG_* word.
  */
 static void use_config(const char *m, int slen)
@@ -251,7 +234,8 @@
 			continue;
 		if (memcmp(p, "CONFIG_", 7))
 			continue;
-		for (q = p + 7; q < map + len; q++) {
+		p += 7;
+		for (q = p; q < map + len; q++) {
 			if (!(isalnum(*q) || *q == '_'))
 				goto found;
 		}
@@ -260,9 +244,9 @@
 	found:
 		if (!memcmp(q - 7, "_MODULE", 7))
 			q -= 7;
-		if( (q-p-7) < 0 )
+		if (q - p < 0)
 			continue;
-		use_config(p+7, q-p-7);
+		use_config(p, q - p);
 	}
 }
 
@@ -324,8 +308,6 @@
 	int saw_any_target = 0;
 	int is_first_dep = 0;
 
-	clear_config();
-
 	while (m < end) {
 		/* Skip any "white space" */
 		while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
diff --git a/scripts/coccinelle/api/alloc/pool_zalloc-simple.cocci b/scripts/coccinelle/api/alloc/pool_zalloc-simple.cocci
new file mode 100644
index 0000000..9b7eb32
--- /dev/null
+++ b/scripts/coccinelle/api/alloc/pool_zalloc-simple.cocci
@@ -0,0 +1,84 @@
+///
+/// Use *_pool_zalloc rather than *_pool_alloc followed by memset with 0
+///
+// Copyright: (C) 2015 Intel Corp.  GPLv2.
+// Options: --no-includes --include-headers
+//
+// Keywords: dma_pool_zalloc, pci_pool_zalloc
+//
+
+virtual context
+virtual patch
+virtual org
+virtual report
+
+//----------------------------------------------------------
+//  For context mode
+//----------------------------------------------------------
+
+@depends on context@
+expression x;
+statement S;
+@@
+
+* x = \(dma_pool_alloc\|pci_pool_alloc\)(...);
+  if ((x==NULL) || ...) S
+* memset(x,0, ...);
+
+//----------------------------------------------------------
+//  For patch mode
+//----------------------------------------------------------
+
+@depends on patch@
+expression x;
+expression a,b,c;
+statement S;
+@@
+
+- x = dma_pool_alloc(a,b,c);
++ x = dma_pool_zalloc(a,b,c);
+  if ((x==NULL) || ...) S
+- memset(x,0,...);
+
+@depends on patch@
+expression x;
+expression a,b,c;
+statement S;
+@@
+
+- x = pci_pool_alloc(a,b,c);
++ x = pci_pool_zalloc(a,b,c);
+  if ((x==NULL) || ...) S
+- memset(x,0,...);
+
+//----------------------------------------------------------
+//  For org and report mode
+//----------------------------------------------------------
+
+@r depends on org || report@
+expression x;
+expression a,b,c;
+statement S;
+position p;
+@@
+
+ x = @p\(dma_pool_alloc\|pci_pool_alloc\)(a,b,c);
+ if ((x==NULL) || ...) S
+ memset(x,0, ...);
+
+@script:python depends on org@
+p << r.p;
+x << r.x;
+@@
+
+msg="%s" % (x)
+msg_safe=msg.replace("[","@(").replace("]",")")
+coccilib.org.print_todo(p[0], msg_safe)
+
+@script:python depends on report@
+p << r.p;
+x << r.x;
+@@
+
+msg="WARNING: *_pool_zalloc should be used for %s, instead of *_pool_alloc/memset" % (x)
+coccilib.report.print_report(p[0], msg)
diff --git a/scripts/coccinelle/api/platform_no_drv_owner.cocci b/scripts/coccinelle/api/platform_no_drv_owner.cocci
index e065b9e..c5e3f73f 100644
--- a/scripts/coccinelle/api/platform_no_drv_owner.cocci
+++ b/scripts/coccinelle/api/platform_no_drv_owner.cocci
@@ -9,11 +9,14 @@
 virtual report
 
 @match1@
+declarer name module_i2c_driver;
 declarer name module_platform_driver;
 declarer name module_platform_driver_probe;
 identifier __driver;
 @@
 (
+	module_i2c_driver(__driver);
+|
 	module_platform_driver(__driver);
 |
 	module_platform_driver_probe(__driver, ...);
@@ -28,6 +31,15 @@
 		}
 	};
 
+@fix1_i2c depends on match1 && patch && !context && !org && !report@
+identifier match1.__driver;
+@@
+	static struct i2c_driver __driver = {
+		.driver = {
+-			.owner = THIS_MODULE,
+		}
+	};
+
 @match2@
 identifier __driver;
 @@
@@ -37,6 +49,8 @@
 	platform_driver_probe(&__driver, ...)
 |
 	platform_create_bundle(&__driver, ...)
+|
+	i2c_add_driver(&__driver)
 )
 
 @fix2 depends on match2 && patch && !context && !org && !report@
@@ -48,6 +62,15 @@
 		}
 	};
 
+@fix2_i2c depends on match2 && patch && !context && !org && !report@
+identifier match2.__driver;
+@@
+	static struct i2c_driver __driver = {
+		.driver = {
+-			.owner = THIS_MODULE,
+		}
+	};
+
 // ----------------------------------------------------------------------------
 
 @fix1_context depends on match1 && !patch && (context || org || report)@
@@ -61,6 +84,17 @@
 		}
 	};
 
+@fix1_i2c_context depends on match1 && !patch && (context || org || report)@
+identifier match1.__driver;
+position j0;
+@@
+
+	static struct i2c_driver __driver = {
+		.driver = {
+*			.owner@j0 = THIS_MODULE,
+		}
+	};
+
 @fix2_context depends on match2 && !patch && (context || org || report)@
 identifier match2.__driver;
 position j0;
@@ -72,6 +106,17 @@
 		}
 	};
 
+@fix2_i2c_context depends on match2 && !patch && (context || org || report)@
+identifier match2.__driver;
+position j0;
+@@
+
+	static struct i2c_driver __driver = {
+		.driver = {
+*			.owner@j0 = THIS_MODULE,
+		}
+	};
+
 // ----------------------------------------------------------------------------
 
 @script:python fix1_org depends on org@
@@ -81,6 +126,13 @@
 msg = "No need to set .owner here. The core will do it."
 coccilib.org.print_todo(j0[0], msg)
 
+@script:python fix1_i2c_org depends on org@
+j0 << fix1_i2c_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.org.print_todo(j0[0], msg)
+
 @script:python fix2_org depends on org@
 j0 << fix2_context.j0;
 @@
@@ -88,6 +140,13 @@
 msg = "No need to set .owner here. The core will do it."
 coccilib.org.print_todo(j0[0], msg)
 
+@script:python fix2_i2c_org depends on org@
+j0 << fix2_i2c_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.org.print_todo(j0[0], msg)
+
 // ----------------------------------------------------------------------------
 
 @script:python fix1_report depends on report@
@@ -97,6 +156,13 @@
 msg = "No need to set .owner here. The core will do it."
 coccilib.report.print_report(j0[0], msg)
 
+@script:python fix1_i2c_report depends on report@
+j0 << fix1_i2c_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.report.print_report(j0[0], msg)
+
 @script:python fix2_report depends on report@
 j0 << fix2_context.j0;
 @@
@@ -104,3 +170,10 @@
 msg = "No need to set .owner here. The core will do it."
 coccilib.report.print_report(j0[0], msg)
 
+@script:python fix2_i2c_report depends on report@
+j0 << fix2_i2c_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.report.print_report(j0[0], msg)
+
diff --git a/scripts/coccinelle/api/pm_runtime.cocci b/scripts/coccinelle/api/pm_runtime.cocci
index f01789e..b7042d0 100644
--- a/scripts/coccinelle/api/pm_runtime.cocci
+++ b/scripts/coccinelle/api/pm_runtime.cocci
@@ -1,5 +1,5 @@
 /// Make sure pm_runtime_* calls does not use unnecessary IS_ERR_VALUE
-//
+///
 // Keywords: pm_runtime
 // Confidence: Medium
 // Copyright (C) 2013 Texas Instruments Incorporated - GPLv2.
diff --git a/scripts/coccinelle/api/simple_open.cocci b/scripts/coccinelle/api/simple_open.cocci
index b67e174..bd1a2a4 100644
--- a/scripts/coccinelle/api/simple_open.cocci
+++ b/scripts/coccinelle/api/simple_open.cocci
@@ -1,5 +1,5 @@
-/// This removes an open coded simple_open() function
-/// and replaces file operations references to the function
+/// Remove an open coded simple_open() function
+/// and replace file operations references to the function
 /// with simple_open() instead.
 ///
 // Confidence: High
diff --git a/scripts/coccinelle/api/vma_pages.cocci b/scripts/coccinelle/api/vma_pages.cocci
new file mode 100644
index 0000000..3e52e11
--- /dev/null
+++ b/scripts/coccinelle/api/vma_pages.cocci
@@ -0,0 +1,60 @@
+///
+/// Use vma_pages function on vma object instead of explicit computation.
+///
+//  Confidence: High
+//  Keywords: vma_pages vma
+//  Comment: Based on resource_size.cocci
+
+virtual context
+virtual patch
+virtual org
+virtual report
+
+//----------------------------------------------------------
+//  For context mode
+//----------------------------------------------------------
+
+@r_context depends on context && !patch && !org && !report@
+struct vm_area_struct *vma;
+@@
+
+* (vma->vm_end - vma->vm_start) >> PAGE_SHIFT
+
+//----------------------------------------------------------
+//  For patch mode
+//----------------------------------------------------------
+
+@r_patch depends on !context && patch && !org && !report@
+struct vm_area_struct *vma;
+@@
+
+- ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
++ vma_pages(vma)
+
+//----------------------------------------------------------
+//  For org mode
+//----------------------------------------------------------
+
+@r_org depends on !context && !patch && (org || report)@
+struct vm_area_struct *vma;
+position p;
+@@
+
+  (vma->vm_end@p - vma->vm_start) >> PAGE_SHIFT
+
+@script:python depends on report@
+p << r_org.p;
+x << r_org.vma;
+@@
+
+msg="WARNING: Consider using vma_pages helper on %s" % (x)
+coccilib.report.print_report(p[0], msg)
+
+@script:python depends on org@
+p << r_org.p;
+x << r_org.vma;
+@@
+
+msg="WARNING: Consider using vma_pages helper on %s" % (x)
+msg_safe=msg.replace("[","@(").replace("]",")")
+coccilib.org.print_todo(p[0], msg_safe)
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci
index 8aebd18..c2663c6 100644
--- a/scripts/coccinelle/misc/ifaddr.cocci
+++ b/scripts/coccinelle/misc/ifaddr.cocci
@@ -1,5 +1,4 @@
-/// the address of a variable or field is non-zero is likely always to bo
-/// non-zero
+/// The address of a variable or field is likely always to be non-zero.
 ///
 // Confidence: High
 // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
diff --git a/scripts/coccinelle/misc/irqf_oneshot.cocci b/scripts/coccinelle/misc/irqf_oneshot.cocci
index a24a754..b421150 100644
--- a/scripts/coccinelle/misc/irqf_oneshot.cocci
+++ b/scripts/coccinelle/misc/irqf_oneshot.cocci
@@ -1,5 +1,8 @@
-/// Make sure threaded IRQs without a primary handler are always request with
-/// IRQF_ONESHOT
+/// Since commit 1c6c69525b40 ("genirq: Reject bogus threaded irq requests")
+/// threaded IRQs without a primary handler need to be requested with
+/// IRQF_ONESHOT, otherwise the request will fail.
+///
+/// So pass the IRQF_ONESHOT flag in this case.
 ///
 //
 // Confidence: Good
diff --git a/scripts/coccinelle/misc/returnvar.cocci b/scripts/coccinelle/misc/returnvar.cocci
index 605955a..d8286ef 100644
--- a/scripts/coccinelle/misc/returnvar.cocci
+++ b/scripts/coccinelle/misc/returnvar.cocci
@@ -1,5 +1,5 @@
 ///
-/// Removes unneeded variable used to store return value.
+/// Remove unneeded variable used to store return value.
 ///
 // Confidence: Moderate
 // Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6.  GPLv2.
diff --git a/scripts/coccinelle/misc/semicolon.cocci b/scripts/coccinelle/misc/semicolon.cocci
index a47eba2..6740c65 100644
--- a/scripts/coccinelle/misc/semicolon.cocci
+++ b/scripts/coccinelle/misc/semicolon.cocci
@@ -1,5 +1,5 @@
 ///
-/// Removes unneeded semicolon.
+/// Remove unneeded semicolon.
 ///
 // Confidence: Moderate
 // Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6.  GPLv2.
diff --git a/scripts/coccinelle/misc/simple_return.cocci b/scripts/coccinelle/misc/simple_return.cocci
index 47f7084..e8b6313 100644
--- a/scripts/coccinelle/misc/simple_return.cocci
+++ b/scripts/coccinelle/misc/simple_return.cocci
@@ -1,6 +1,6 @@
 /// Simplify a trivial if-return sequence.  Possibly combine with a
 /// preceding function call.
-//
+///
 // Confidence: High
 // Copyright: (C) 2014 Julia Lawall, INRIA/LIP6.  GPLv2.
 // Copyright: (C) 2014 Gilles Muller, INRIA/LiP6.  GPLv2.
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 515c4c0..00d6d53c 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -14,11 +14,14 @@
 
 parse_symbol() {
 	# The structure of symbol at this point is:
-	#   [name]+[offset]/[total length]
+	#   ([name]+[offset]/[total length])
 	#
 	# For example:
 	#   do_basic_setup+0x9c/0xbf
 
+	# Remove the englobing parenthesis
+	symbol=${symbol#\(}
+	symbol=${symbol%\)}
 
 	# Strip the symbol name so that we could look it up
 	local name=${symbol%+*}
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
new file mode 100644
index 0000000..fd0db01
--- /dev/null
+++ b/scripts/extract-cert.c
@@ -0,0 +1,166 @@
+/* Extract X.509 certificate in DER form from PKCS#11 or PEM.
+ *
+ * Copyright © 2014 Red Hat, Inc. All Rights Reserved.
+ * Copyright © 2015 Intel Corporation.
+ *
+ * Authors: David Howells <dhowells@redhat.com>
+ *          David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <getopt.h>
+#include <err.h>
+#include <arpa/inet.h>
+#include <openssl/bio.h>
+#include <openssl/evp.h>
+#include <openssl/pem.h>
+#include <openssl/pkcs7.h>
+#include <openssl/err.h>
+#include <openssl/engine.h>
+
+#define PKEY_ID_PKCS7 2
+
+static __attribute__((noreturn))
+void format(void)
+{
+	fprintf(stderr,
+		"Usage: scripts/extract-cert <source> <dest>\n");
+	exit(2);
+}
+
+static void display_openssl_errors(int l)
+{
+	const char *file;
+	char buf[120];
+	int e, line;
+
+	if (ERR_peek_error() == 0)
+		return;
+	fprintf(stderr, "At main.c:%d:\n", l);
+
+	while ((e = ERR_get_error_line(&file, &line))) {
+		ERR_error_string(e, buf);
+		fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
+	}
+}
+
+static void drain_openssl_errors(void)
+{
+	const char *file;
+	int line;
+
+	if (ERR_peek_error() == 0)
+		return;
+	while (ERR_get_error_line(&file, &line)) {}
+}
+
+#define ERR(cond, fmt, ...)				\
+	do {						\
+		bool __cond = (cond);			\
+		display_openssl_errors(__LINE__);	\
+		if (__cond) {				\
+			err(1, fmt, ## __VA_ARGS__);	\
+		}					\
+	} while(0)
+
+static const char *key_pass;
+static BIO *wb;
+static char *cert_dst;
+int kbuild_verbose;
+
+static void write_cert(X509 *x509)
+{
+	char buf[200];
+
+	if (!wb) {
+		wb = BIO_new_file(cert_dst, "wb");
+		ERR(!wb, "%s", cert_dst);
+	}
+	X509_NAME_oneline(X509_get_subject_name(x509), buf, sizeof(buf));
+	ERR(!i2d_X509_bio(wb, x509), cert_dst);
+	if (kbuild_verbose)
+		fprintf(stderr, "Extracted cert: %s\n", buf);
+}
+
+int main(int argc, char **argv)
+{
+	char *cert_src;
+
+	OpenSSL_add_all_algorithms();
+	ERR_load_crypto_strings();
+	ERR_clear_error();
+
+	kbuild_verbose = atoi(getenv("KBUILD_VERBOSE")?:"0");
+
+        key_pass = getenv("KBUILD_SIGN_PIN");
+
+	if (argc != 3)
+		format();
+
+	cert_src = argv[1];
+	cert_dst = argv[2];
+
+	if (!cert_src[0]) {
+		/* Invoked with no input; create empty file */
+		FILE *f = fopen(cert_dst, "wb");
+		ERR(!f, "%s", cert_dst);
+		fclose(f);
+		exit(0);
+	} else if (!strncmp(cert_src, "pkcs11:", 7)) {
+		ENGINE *e;
+		struct {
+			const char *cert_id;
+			X509 *cert;
+		} parms;
+
+		parms.cert_id = cert_src;
+		parms.cert = NULL;
+
+		ENGINE_load_builtin_engines();
+		drain_openssl_errors();
+		e = ENGINE_by_id("pkcs11");
+		ERR(!e, "Load PKCS#11 ENGINE");
+		if (ENGINE_init(e))
+			drain_openssl_errors();
+		else
+			ERR(1, "ENGINE_init");
+		if (key_pass)
+			ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
+		ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
+		ERR(!parms.cert, "Get X.509 from PKCS#11");
+		write_cert(parms.cert);
+	} else {
+		BIO *b;
+		X509 *x509;
+
+		b = BIO_new_file(cert_src, "rb");
+		ERR(!b, "%s", cert_src);
+
+		while (1) {
+			x509 = PEM_read_bio_X509(b, NULL, NULL, NULL);
+			if (wb && !x509) {
+				unsigned long err = ERR_peek_last_error();
+				if (ERR_GET_LIB(err) == ERR_LIB_PEM &&
+				    ERR_GET_REASON(err) == PEM_R_NO_START_LINE) {
+					ERR_clear_error();
+					break;
+				}
+			}
+			ERR(!x509, "%s", cert_src);
+			write_cert(x509);
+		}
+	}
+
+	BIO_free(wb);
+
+	return 0;
+}
diff --git a/scripts/genksyms/parse.tab.c_shipped b/scripts/genksyms/parse.tab.c_shipped
index c9f0f0ce..99950b5 100644
--- a/scripts/genksyms/parse.tab.c_shipped
+++ b/scripts/genksyms/parse.tab.c_shipped
@@ -1,4 +1,4 @@
-/* A Bison parser, made by GNU Bison 2.5.1.  */
+/* A Bison parser, made by GNU Bison 2.7.  */
 
 /* Bison implementation for Yacc-like parsers in C
    
@@ -44,7 +44,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.5.1"
+#define YYBISON_VERSION "2.7"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
@@ -58,8 +58,6 @@
 /* Pull parsers.  */
 #define YYPULL 1
 
-/* Using locations.  */
-#define YYLSP_NEEDED 0
 
 
 
@@ -125,11 +123,6 @@
 #  endif
 # endif
 
-/* Enabling traces.  */
-#ifndef YYDEBUG
-# define YYDEBUG 1
-#endif
-
 /* Enabling verbose error messages.  */
 #ifdef YYERROR_VERBOSE
 # undef YYERROR_VERBOSE
@@ -138,11 +131,14 @@
 # define YYERROR_VERBOSE 0
 #endif
 
-/* Enabling the token table.  */
-#ifndef YYTOKEN_TABLE
-# define YYTOKEN_TABLE 0
-#endif
 
+/* Enabling traces.  */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int yydebug;
+#endif
 
 /* Tokens.  */
 #ifndef YYTOKENTYPE
@@ -196,7 +192,6 @@
 #endif
 
 
-
 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
 typedef int YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
@@ -204,6 +199,23 @@
 # define YYSTYPE_IS_DECLARED 1
 #endif
 
+extern YYSTYPE yylval;
+
+#ifdef YYPARSE_PARAM
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void *YYPARSE_PARAM);
+#else
+int yyparse ();
+#endif
+#else /* ! YYPARSE_PARAM */
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void);
+#else
+int yyparse ();
+#endif
+#endif /* ! YYPARSE_PARAM */
+
+
 
 /* Copy the second part of user declarations.  */
 
@@ -260,24 +272,24 @@
 # if defined YYENABLE_NLS && YYENABLE_NLS
 #  if ENABLE_NLS
 #   include <libintl.h> /* INFRINGES ON USER NAME SPACE */
-#   define YY_(msgid) dgettext ("bison-runtime", msgid)
+#   define YY_(Msgid) dgettext ("bison-runtime", Msgid)
 #  endif
 # endif
 # ifndef YY_
-#  define YY_(msgid) msgid
+#  define YY_(Msgid) Msgid
 # endif
 #endif
 
 /* Suppress unused-variable warnings by "using" E.  */
 #if ! defined lint || defined __GNUC__
-# define YYUSE(e) ((void) (e))
+# define YYUSE(E) ((void) (E))
 #else
-# define YYUSE(e) /* empty */
+# define YYUSE(E) /* empty */
 #endif
 
 /* Identity function, used to suppress warnings about constant conditions.  */
 #ifndef lint
-# define YYID(n) (n)
+# define YYID(N) (N)
 #else
 #if (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
@@ -427,16 +439,16 @@
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  4
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   514
+#define YYLAST   515
 
 /* YYNTOKENS -- Number of terminals.  */
 #define YYNTOKENS  54
 /* YYNNTS -- Number of nonterminals.  */
 #define YYNNTS  49
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  132
+#define YYNRULES  133
 /* YYNRULES -- Number of states.  */
-#define YYNSTATES  187
+#define YYNSTATES  188
 
 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
 #define YYUNDEFTOK  2
@@ -492,13 +504,13 @@
       97,   101,   105,   109,   112,   115,   118,   120,   122,   124,
      126,   128,   130,   132,   134,   136,   138,   140,   143,   144,
      146,   148,   151,   153,   155,   157,   159,   162,   164,   166,
-     171,   176,   179,   183,   187,   190,   192,   194,   196,   201,
-     206,   209,   213,   217,   220,   222,   226,   227,   229,   231,
-     235,   238,   241,   243,   244,   246,   248,   253,   258,   261,
-     265,   269,   273,   274,   276,   279,   283,   287,   288,   290,
-     292,   295,   299,   302,   303,   305,   307,   311,   314,   317,
-     319,   322,   323,   326,   330,   335,   337,   341,   343,   347,
-     350,   351,   353
+     168,   173,   178,   181,   185,   189,   192,   194,   196,   198,
+     203,   208,   211,   215,   219,   222,   224,   228,   229,   231,
+     233,   237,   240,   243,   245,   246,   248,   250,   255,   260,
+     263,   267,   271,   275,   276,   278,   281,   285,   289,   290,
+     292,   294,   297,   301,   304,   305,   307,   309,   313,   316,
+     319,   321,   324,   325,   328,   332,   337,   339,   343,   345,
+     349,   352,   353,   355
 };
 
 /* YYRHS -- A `-1'-separated list of the rules' RHS.  */
@@ -520,26 +532,27 @@
       13,    -1,     9,    -1,    26,    -1,     6,    -1,    42,    -1,
       50,    72,    -1,    -1,    73,    -1,    74,    -1,    73,    74,
       -1,     8,    -1,    27,    -1,    31,    -1,    18,    -1,    71,
-      75,    -1,    76,    -1,    38,    -1,    76,    48,    79,    49,
-      -1,    76,    48,     1,    49,    -1,    76,    34,    -1,    48,
-      75,    49,    -1,    48,     1,    49,    -1,    71,    77,    -1,
-      78,    -1,    38,    -1,    42,    -1,    78,    48,    79,    49,
-      -1,    78,    48,     1,    49,    -1,    78,    34,    -1,    48,
-      77,    49,    -1,    48,     1,    49,    -1,    80,    37,    -1,
-      80,    -1,    81,    47,    37,    -1,    -1,    81,    -1,    82,
-      -1,    81,    47,    82,    -1,    66,    83,    -1,    71,    83,
-      -1,    84,    -1,    -1,    38,    -1,    42,    -1,    84,    48,
-      79,    49,    -1,    84,    48,     1,    49,    -1,    84,    34,
-      -1,    48,    83,    49,    -1,    48,     1,    49,    -1,    65,
-      75,    33,    -1,    -1,    87,    -1,    51,    35,    -1,    52,
-      89,    46,    -1,    52,     1,    46,    -1,    -1,    90,    -1,
-      91,    -1,    90,    91,    -1,    65,    92,    45,    -1,     1,
-      45,    -1,    -1,    93,    -1,    94,    -1,    93,    47,    94,
-      -1,    77,    96,    -1,    38,    95,    -1,    95,    -1,    53,
-      35,    -1,    -1,    96,    31,    -1,    52,    98,    46,    -1,
-      52,    98,    47,    46,    -1,    99,    -1,    98,    47,    99,
-      -1,    38,    -1,    38,    51,    35,    -1,    30,    45,    -1,
-      -1,    30,    -1,    29,    48,    38,    49,    45,    -1
+      75,    -1,    76,    -1,    38,    -1,    42,    -1,    76,    48,
+      79,    49,    -1,    76,    48,     1,    49,    -1,    76,    34,
+      -1,    48,    75,    49,    -1,    48,     1,    49,    -1,    71,
+      77,    -1,    78,    -1,    38,    -1,    42,    -1,    78,    48,
+      79,    49,    -1,    78,    48,     1,    49,    -1,    78,    34,
+      -1,    48,    77,    49,    -1,    48,     1,    49,    -1,    80,
+      37,    -1,    80,    -1,    81,    47,    37,    -1,    -1,    81,
+      -1,    82,    -1,    81,    47,    82,    -1,    66,    83,    -1,
+      71,    83,    -1,    84,    -1,    -1,    38,    -1,    42,    -1,
+      84,    48,    79,    49,    -1,    84,    48,     1,    49,    -1,
+      84,    34,    -1,    48,    83,    49,    -1,    48,     1,    49,
+      -1,    65,    75,    33,    -1,    -1,    87,    -1,    51,    35,
+      -1,    52,    89,    46,    -1,    52,     1,    46,    -1,    -1,
+      90,    -1,    91,    -1,    90,    91,    -1,    65,    92,    45,
+      -1,     1,    45,    -1,    -1,    93,    -1,    94,    -1,    93,
+      47,    94,    -1,    77,    96,    -1,    38,    95,    -1,    95,
+      -1,    53,    35,    -1,    -1,    96,    31,    -1,    52,    98,
+      46,    -1,    52,    98,    47,    46,    -1,    99,    -1,    98,
+      47,    99,    -1,    38,    -1,    38,    51,    35,    -1,    30,
+      45,    -1,    -1,    30,    -1,    29,    48,    38,    49,    45,
+      -1
 };
 
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
@@ -552,17 +565,17 @@
      237,   239,   241,   246,   249,   250,   254,   255,   256,   257,
      258,   259,   260,   261,   262,   263,   264,   268,   273,   274,
      278,   279,   283,   283,   283,   284,   292,   293,   297,   306,
-     308,   310,   312,   314,   321,   322,   326,   327,   328,   330,
-     332,   334,   336,   341,   342,   343,   347,   348,   352,   353,
-     358,   363,   365,   369,   370,   378,   382,   384,   386,   388,
-     390,   395,   404,   405,   410,   415,   416,   420,   421,   425,
-     426,   430,   432,   437,   438,   442,   443,   447,   448,   449,
-     453,   457,   458,   462,   463,   467,   468,   471,   476,   484,
-     488,   489,   493
+     315,   317,   319,   321,   323,   330,   331,   335,   336,   337,
+     339,   341,   343,   345,   350,   351,   352,   356,   357,   361,
+     362,   367,   372,   374,   378,   379,   387,   391,   393,   395,
+     397,   399,   404,   413,   414,   419,   424,   425,   429,   430,
+     434,   435,   439,   441,   446,   447,   451,   452,   456,   457,
+     458,   462,   466,   467,   471,   472,   476,   477,   480,   485,
+     493,   497,   498,   502
 };
 #endif
 
-#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
+#if YYDEBUG || YYERROR_VERBOSE || 0
 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
    First, the terminals, then, starting at YYNTOKENS, nonterminals.  */
 static const char *const yytname[] =
@@ -621,13 +634,13 @@
       69,    69,    69,    69,    69,    69,    70,    70,    70,    70,
       70,    70,    70,    70,    70,    70,    70,    71,    72,    72,
       73,    73,    74,    74,    74,    74,    75,    75,    76,    76,
-      76,    76,    76,    76,    77,    77,    78,    78,    78,    78,
-      78,    78,    78,    79,    79,    79,    80,    80,    81,    81,
-      82,    83,    83,    84,    84,    84,    84,    84,    84,    84,
-      84,    85,    86,    86,    87,    88,    88,    89,    89,    90,
-      90,    91,    91,    92,    92,    93,    93,    94,    94,    94,
-      95,    96,    96,    97,    97,    98,    98,    99,    99,   100,
-     101,   101,   102
+      76,    76,    76,    76,    76,    77,    77,    78,    78,    78,
+      78,    78,    78,    78,    79,    79,    79,    80,    80,    81,
+      81,    82,    83,    83,    84,    84,    84,    84,    84,    84,
+      84,    84,    85,    86,    86,    87,    88,    88,    89,    89,
+      90,    90,    91,    91,    92,    92,    93,    93,    94,    94,
+      94,    95,    96,    96,    97,    97,    98,    98,    99,    99,
+     100,   101,   101,   102
 };
 
 /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
@@ -639,14 +652,14 @@
        1,     1,     1,     1,     1,     4,     1,     2,     2,     2,
        3,     3,     3,     2,     2,     2,     1,     1,     1,     1,
        1,     1,     1,     1,     1,     1,     1,     2,     0,     1,
-       1,     2,     1,     1,     1,     1,     2,     1,     1,     4,
-       4,     2,     3,     3,     2,     1,     1,     1,     4,     4,
-       2,     3,     3,     2,     1,     3,     0,     1,     1,     3,
-       2,     2,     1,     0,     1,     1,     4,     4,     2,     3,
-       3,     3,     0,     1,     2,     3,     3,     0,     1,     1,
-       2,     3,     2,     0,     1,     1,     3,     2,     2,     1,
-       2,     0,     2,     3,     4,     1,     3,     1,     3,     2,
-       0,     1,     5
+       1,     2,     1,     1,     1,     1,     2,     1,     1,     1,
+       4,     4,     2,     3,     3,     2,     1,     1,     1,     4,
+       4,     2,     3,     3,     2,     1,     3,     0,     1,     1,
+       3,     2,     2,     1,     0,     1,     1,     4,     4,     2,
+       3,     3,     3,     0,     1,     2,     3,     3,     0,     1,
+       1,     2,     3,     2,     0,     1,     1,     3,     2,     2,
+       1,     2,     0,     2,     3,     4,     1,     3,     1,     3,
+       2,     0,     1,     5
 };
 
 /* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
@@ -660,187 +673,187 @@
        0,     0,     0,    64,    36,    56,     5,    10,    17,    23,
       24,    26,    27,    33,    34,    11,    12,    13,    14,    15,
       39,     0,    43,     6,    37,     0,    44,    22,    38,    45,
-       0,     0,   129,    68,     0,    58,     0,    18,    19,     0,
-     130,    67,    25,    42,   127,     0,   125,    22,    40,     0,
-     113,     0,     0,   109,     9,    17,    41,    93,     0,     0,
-       0,     0,    57,    59,    60,    16,     0,    66,   131,   101,
-     121,    71,     0,     0,   123,     0,     7,   112,   106,    76,
-      77,     0,     0,     0,   121,    75,     0,   114,   115,   119,
-     105,     0,   110,   130,    94,    56,     0,    93,    90,    92,
-      35,     0,    73,    72,    61,    20,   102,     0,     0,    84,
-      87,    88,   128,   124,   126,   118,     0,    76,     0,   120,
-      74,   117,    80,     0,   111,     0,     0,    95,     0,    91,
-      98,     0,   132,   122,     0,    21,   103,    70,    69,    83,
-       0,    82,    81,     0,     0,   116,   100,    99,     0,     0,
-     104,    85,    89,    79,    78,    97,    96
+       0,     0,   130,    68,    69,     0,    58,     0,    18,    19,
+       0,   131,    67,    25,    42,   128,     0,   126,    22,    40,
+       0,   114,     0,     0,   110,     9,    17,    41,    94,     0,
+       0,     0,     0,    57,    59,    60,    16,     0,    66,   132,
+     102,   122,    72,     0,     0,   124,     0,     7,   113,   107,
+      77,    78,     0,     0,     0,   122,    76,     0,   115,   116,
+     120,   106,     0,   111,   131,    95,    56,     0,    94,    91,
+      93,    35,     0,    74,    73,    61,    20,   103,     0,     0,
+      85,    88,    89,   129,   125,   127,   119,     0,    77,     0,
+     121,    75,   118,    81,     0,   112,     0,     0,    96,     0,
+      92,    99,     0,   133,   123,     0,    21,   104,    71,    70,
+      84,     0,    83,    82,     0,     0,   117,   101,   100,     0,
+       0,   105,    86,    90,    80,    79,    98,    97
 };
 
 /* YYDEFGOTO[NTERM-NUM].  */
 static const yytype_int16 yydefgoto[] =
 {
-      -1,     1,     2,     3,    36,    77,    57,    37,    66,    67,
-      68,    80,    39,    40,    41,    42,    43,    69,    92,    93,
-      44,   123,    71,   114,   115,   138,   139,   140,   141,   128,
-     129,    45,   165,   166,    56,    81,    82,    83,   116,   117,
-     118,   119,   136,    52,    75,    76,    46,   100,    47
+      -1,     1,     2,     3,    36,    78,    57,    37,    67,    68,
+      69,    81,    39,    40,    41,    42,    43,    70,    93,    94,
+      44,   124,    72,   115,   116,   139,   140,   141,   142,   129,
+     130,    45,   166,   167,    56,    82,    83,    84,   117,   118,
+     119,   120,   137,    52,    76,    77,    46,   101,    47
 };
 
 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
    STATE-NUM.  */
-#define YYPACT_NINF -140
+#define YYPACT_NINF -92
 static const yytype_int16 yypact[] =
 {
-    -140,    29,  -140,   207,  -140,  -140,    40,  -140,  -140,  -140,
-    -140,  -140,   -27,  -140,    44,  -140,  -140,  -140,  -140,  -140,
-    -140,  -140,  -140,  -140,   -22,  -140,   -18,  -140,  -140,  -140,
-      -9,    22,    28,  -140,  -140,  -140,  -140,  -140,    42,   472,
-    -140,  -140,  -140,  -140,  -140,  -140,  -140,  -140,  -140,  -140,
-      46,    43,  -140,  -140,    47,   107,  -140,   472,    47,  -140,
-     472,    62,  -140,  -140,    16,    -3,    57,    56,  -140,    42,
-      35,   -11,  -140,  -140,    53,    48,  -140,   472,  -140,    51,
-      21,    59,   157,  -140,  -140,    42,  -140,   388,    58,    60,
-      70,    81,  -140,    -3,  -140,  -140,    42,  -140,  -140,  -140,
-    -140,  -140,   253,    71,  -140,   -20,  -140,  -140,  -140,    83,
-    -140,     5,   102,    34,  -140,    12,    95,    94,  -140,  -140,
-    -140,    97,  -140,   113,  -140,  -140,     2,    41,  -140,    27,
-    -140,    99,  -140,  -140,  -140,  -140,   -24,    98,   101,   109,
-     104,  -140,  -140,  -140,  -140,  -140,   105,  -140,   110,  -140,
-    -140,   117,  -140,   298,  -140,    21,   112,  -140,   120,  -140,
-    -140,   343,  -140,  -140,   121,  -140,  -140,  -140,  -140,  -140,
-     434,  -140,  -140,   131,   137,  -140,  -140,  -140,   138,   141,
-    -140,  -140,  -140,  -140,  -140,  -140,  -140
+     -92,    19,   -92,   208,   -92,   -92,    39,   -92,   -92,   -92,
+     -92,   -92,   -27,   -92,    23,   -92,   -92,   -92,   -92,   -92,
+     -92,   -92,   -92,   -92,   -22,   -92,     9,   -92,   -92,   -92,
+      -6,    16,    25,   -92,   -92,   -92,   -92,   -92,    31,   473,
+     -92,   -92,   -92,   -92,   -92,   -92,   -92,   -92,   -92,   -92,
+      49,    37,   -92,   -92,    51,   108,   -92,   473,    51,   -92,
+     473,    59,   -92,   -92,   -92,    12,    -3,    60,    57,   -92,
+      31,    -7,    24,   -92,   -92,    55,    42,   -92,   473,   -92,
+      46,   -21,    61,   158,   -92,   -92,    31,   -92,   389,    71,
+      82,    88,    89,   -92,    -3,   -92,   -92,    31,   -92,   -92,
+     -92,   -92,   -92,   254,    73,   -92,   -24,   -92,   -92,   -92,
+      90,   -92,    17,    75,    45,   -92,    32,    96,    95,   -92,
+     -92,   -92,    99,   -92,   115,   -92,   -92,     3,    48,   -92,
+      34,   -92,   102,   -92,   -92,   -92,   -92,   -11,   100,   103,
+     111,   104,   -92,   -92,   -92,   -92,   -92,   106,   -92,   113,
+     -92,   -92,   126,   -92,   299,   -92,   -21,   121,   -92,   132,
+     -92,   -92,   344,   -92,   -92,   125,   -92,   -92,   -92,   -92,
+     -92,   435,   -92,   -92,   138,   139,   -92,   -92,   -92,   142,
+     143,   -92,   -92,   -92,   -92,   -92,   -92,   -92
 };
 
 /* YYPGOTO[NTERM-NUM].  */
 static const yytype_int16 yypgoto[] =
 {
-    -140,  -140,   190,  -140,  -140,  -140,  -140,   -45,  -140,  -140,
-      96,     1,   -60,   -31,  -140,  -140,  -140,   -78,  -140,  -140,
-     -55,    -7,  -140,   -92,  -140,  -139,  -140,  -140,   -59,   -39,
-    -140,  -140,  -140,  -140,   -13,  -140,  -140,   111,  -140,  -140,
-      39,    87,    84,   147,  -140,   106,  -140,  -140,  -140
+     -92,   -92,   192,   -92,   -92,   -92,   -92,   -47,   -92,   -92,
+      97,     0,   -60,   -32,   -92,   -92,   -92,   -79,   -92,   -92,
+     -58,   -26,   -92,   -38,   -92,   -91,   -92,   -92,   -59,   -28,
+     -92,   -92,   -92,   -92,   -20,   -92,   -92,   112,   -92,   -92,
+      41,    91,    83,   149,   -92,   101,   -92,   -92,   -92
 };
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
    positive, shift that token.  If negative, reduce the rule which
    number is the opposite.  If YYTABLE_NINF, syntax error.  */
-#define YYTABLE_NINF -109
+#define YYTABLE_NINF -110
 static const yytype_int16 yytable[] =
 {
-      87,    88,   113,   156,    38,    10,   146,   163,    72,   127,
-      94,    50,    84,    59,   174,    20,    54,    90,    74,   148,
-      58,   150,   179,   101,    29,    51,   143,   164,    33,     4,
-      55,    70,   106,   113,    55,   113,   -93,   102,   134,    60,
-     124,    78,    87,   147,   157,    86,   152,   110,   127,   127,
-     126,   -93,    65,   111,    63,    65,    72,    91,    85,   109,
-     153,   160,    97,   110,    64,    98,    65,    53,    99,   111,
-      61,    65,   147,    62,   112,   161,   110,   113,    85,   124,
-      63,    74,   111,   157,    65,    48,    49,   158,   159,   126,
-      64,    65,    65,    87,   104,   105,   107,   108,    51,    55,
-      89,    87,    95,    96,   103,   120,   142,   130,    79,   131,
-      87,   182,     7,     8,     9,    10,    11,    12,    13,   132,
-      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
-     133,    26,    27,    28,    29,    30,   112,   149,    33,    34,
-     154,   155,   107,    98,   162,   -22,   169,   167,   163,    35,
-     168,   170,   -22,  -107,   171,   -22,   180,   -22,   121,   172,
-     -22,   176,     7,     8,     9,    10,    11,    12,    13,   177,
-      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
-     183,    26,    27,    28,    29,    30,   184,   185,    33,    34,
-     186,     5,   135,   122,   175,   -22,   145,    73,   151,    35,
-       0,     0,   -22,  -108,     0,   -22,     0,   -22,     6,     0,
-     -22,   144,     7,     8,     9,    10,    11,    12,    13,    14,
-      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
-      25,    26,    27,    28,    29,    30,    31,    32,    33,    34,
-       0,     0,     0,     0,     0,   -22,     0,     0,     0,    35,
-       0,     0,   -22,     0,   137,   -22,     0,   -22,     7,     8,
-       9,    10,    11,    12,    13,     0,    15,    16,    17,    18,
-      19,    20,    21,    22,    23,    24,     0,    26,    27,    28,
-      29,    30,     0,     0,    33,    34,     0,     0,     0,     0,
-     -86,     0,     0,     0,     0,    35,     0,     0,     0,   173,
-       0,     0,   -86,     7,     8,     9,    10,    11,    12,    13,
-       0,    15,    16,    17,    18,    19,    20,    21,    22,    23,
-      24,     0,    26,    27,    28,    29,    30,     0,     0,    33,
-      34,     0,     0,     0,     0,   -86,     0,     0,     0,     0,
-      35,     0,     0,     0,   178,     0,     0,   -86,     7,     8,
-       9,    10,    11,    12,    13,     0,    15,    16,    17,    18,
-      19,    20,    21,    22,    23,    24,     0,    26,    27,    28,
-      29,    30,     0,     0,    33,    34,     0,     0,     0,     0,
-     -86,     0,     0,     0,     0,    35,     0,     0,     0,     0,
-       0,     0,   -86,     7,     8,     9,    10,    11,    12,    13,
-       0,    15,    16,    17,    18,    19,    20,    21,    22,    23,
-      24,     0,    26,    27,    28,    29,    30,     0,     0,    33,
-      34,     0,     0,     0,     0,     0,   124,     0,     0,     0,
-     125,     0,     0,     0,     0,     0,   126,     0,    65,     7,
+      88,    89,   114,    38,   157,    10,    59,    73,    95,   128,
+      85,    50,    71,    91,    75,    20,    54,   110,   147,     4,
+     164,   111,   144,    99,    29,    51,   100,   112,    33,    66,
+      55,   107,   113,   114,    79,   114,   135,   -94,    87,    92,
+     165,   125,    60,    88,    98,   158,    53,    58,   128,   128,
+      63,   127,   -94,    66,    64,   148,    73,    86,   102,   111,
+      65,    55,    66,   175,    61,   112,   153,    66,   161,    63,
+      62,   180,   103,    64,   149,    75,   151,   114,    86,    65,
+     154,    66,   162,   148,    48,    49,   125,   111,   105,   106,
+     158,   108,   109,   112,    88,    66,   127,    90,    66,   159,
+     160,    51,    88,    55,    97,    96,   104,   121,   143,    80,
+     150,    88,   183,     7,     8,     9,    10,    11,    12,    13,
+     131,    15,    16,    17,    18,    19,    20,    21,    22,    23,
+      24,   132,    26,    27,    28,    29,    30,   133,   134,    33,
+      34,   155,   156,   113,   108,    99,   -22,   163,   170,   168,
+      35,   171,   169,   -22,  -108,   172,   -22,   164,   -22,   122,
+     181,   -22,   173,     7,     8,     9,    10,    11,    12,    13,
+     177,    15,    16,    17,    18,    19,    20,    21,    22,    23,
+      24,   178,    26,    27,    28,    29,    30,   184,   185,    33,
+      34,   186,   187,     5,   136,   123,   -22,   176,   152,    74,
+      35,   146,     0,   -22,  -109,     0,   -22,   145,   -22,     6,
+       0,   -22,     0,     7,     8,     9,    10,    11,    12,    13,
+      14,    15,    16,    17,    18,    19,    20,    21,    22,    23,
+      24,    25,    26,    27,    28,    29,    30,    31,    32,    33,
+      34,     0,     0,     0,     0,     0,   -22,     0,     0,     0,
+      35,     0,     0,   -22,     0,   138,   -22,     0,   -22,     7,
        8,     9,    10,    11,    12,    13,     0,    15,    16,    17,
       18,    19,    20,    21,    22,    23,    24,     0,    26,    27,
       28,    29,    30,     0,     0,    33,    34,     0,     0,     0,
-       0,   181,     0,     0,     0,     0,    35,     7,     8,     9,
-      10,    11,    12,    13,     0,    15,    16,    17,    18,    19,
-      20,    21,    22,    23,    24,     0,    26,    27,    28,    29,
-      30,     0,     0,    33,    34,     0,     0,     0,     0,     0,
-       0,     0,     0,     0,    35
+       0,   -87,     0,     0,     0,     0,    35,     0,     0,     0,
+     174,     0,     0,   -87,     7,     8,     9,    10,    11,    12,
+      13,     0,    15,    16,    17,    18,    19,    20,    21,    22,
+      23,    24,     0,    26,    27,    28,    29,    30,     0,     0,
+      33,    34,     0,     0,     0,     0,   -87,     0,     0,     0,
+       0,    35,     0,     0,     0,   179,     0,     0,   -87,     7,
+       8,     9,    10,    11,    12,    13,     0,    15,    16,    17,
+      18,    19,    20,    21,    22,    23,    24,     0,    26,    27,
+      28,    29,    30,     0,     0,    33,    34,     0,     0,     0,
+       0,   -87,     0,     0,     0,     0,    35,     0,     0,     0,
+       0,     0,     0,   -87,     7,     8,     9,    10,    11,    12,
+      13,     0,    15,    16,    17,    18,    19,    20,    21,    22,
+      23,    24,     0,    26,    27,    28,    29,    30,     0,     0,
+      33,    34,     0,     0,     0,     0,     0,   125,     0,     0,
+       0,   126,     0,     0,     0,     0,     0,   127,     0,    66,
+       7,     8,     9,    10,    11,    12,    13,     0,    15,    16,
+      17,    18,    19,    20,    21,    22,    23,    24,     0,    26,
+      27,    28,    29,    30,     0,     0,    33,    34,     0,     0,
+       0,     0,   182,     0,     0,     0,     0,    35,     7,     8,
+       9,    10,    11,    12,    13,     0,    15,    16,    17,    18,
+      19,    20,    21,    22,    23,    24,     0,    26,    27,    28,
+      29,    30,     0,     0,    33,    34,     0,     0,     0,     0,
+       0,     0,     0,     0,     0,    35
 };
 
-#define yypact_value_is_default(yystate) \
-  ((yystate) == (-140))
+#define yypact_value_is_default(Yystate) \
+  (!!((Yystate) == (-92)))
 
-#define yytable_value_is_error(yytable_value) \
+#define yytable_value_is_error(Yytable_value) \
   YYID (0)
 
 static const yytype_int16 yycheck[] =
 {
-      60,    60,    80,     1,     3,     8,     1,    31,    39,    87,
-      65,    38,    57,    26,   153,    18,    38,     1,    38,   111,
-      38,   113,   161,    34,    27,    52,    46,    51,    31,     0,
-      52,    38,    77,   111,    52,   113,    34,    48,    93,    48,
-      38,    54,   102,    38,    42,    58,    34,    42,   126,   127,
-      48,    49,    50,    48,    38,    50,    87,    64,    57,    38,
-      48,    34,    69,    42,    48,    30,    50,    23,    33,    48,
-      48,    50,    38,    45,    53,    48,    42,   155,    77,    38,
-      38,    38,    48,    42,    50,    45,    46,   126,   127,    48,
-      48,    50,    50,   153,    46,    47,    45,    46,    52,    52,
-      38,   161,    45,    47,    51,    46,    35,    49,     1,    49,
-     170,   170,     5,     6,     7,     8,     9,    10,    11,    49,
-      13,    14,    15,    16,    17,    18,    19,    20,    21,    22,
-      49,    24,    25,    26,    27,    28,    53,    35,    31,    32,
-      45,    47,    45,    30,    45,    38,    37,    49,    31,    42,
-      49,    47,    45,    46,    49,    48,    35,    50,     1,    49,
-      53,    49,     5,     6,     7,     8,     9,    10,    11,    49,
-      13,    14,    15,    16,    17,    18,    19,    20,    21,    22,
-      49,    24,    25,    26,    27,    28,    49,    49,    31,    32,
-      49,     1,    96,    82,   155,    38,   109,    50,   114,    42,
-      -1,    -1,    45,    46,    -1,    48,    -1,    50,     1,    -1,
-      53,   105,     5,     6,     7,     8,     9,    10,    11,    12,
-      13,    14,    15,    16,    17,    18,    19,    20,    21,    22,
-      23,    24,    25,    26,    27,    28,    29,    30,    31,    32,
-      -1,    -1,    -1,    -1,    -1,    38,    -1,    -1,    -1,    42,
-      -1,    -1,    45,    -1,     1,    48,    -1,    50,     5,     6,
-       7,     8,     9,    10,    11,    -1,    13,    14,    15,    16,
-      17,    18,    19,    20,    21,    22,    -1,    24,    25,    26,
-      27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,    -1,
-      37,    -1,    -1,    -1,    -1,    42,    -1,    -1,    -1,     1,
-      -1,    -1,    49,     5,     6,     7,     8,     9,    10,    11,
-      -1,    13,    14,    15,    16,    17,    18,    19,    20,    21,
-      22,    -1,    24,    25,    26,    27,    28,    -1,    -1,    31,
-      32,    -1,    -1,    -1,    -1,    37,    -1,    -1,    -1,    -1,
-      42,    -1,    -1,    -1,     1,    -1,    -1,    49,     5,     6,
-       7,     8,     9,    10,    11,    -1,    13,    14,    15,    16,
-      17,    18,    19,    20,    21,    22,    -1,    24,    25,    26,
-      27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,    -1,
-      37,    -1,    -1,    -1,    -1,    42,    -1,    -1,    -1,    -1,
-      -1,    -1,    49,     5,     6,     7,     8,     9,    10,    11,
-      -1,    13,    14,    15,    16,    17,    18,    19,    20,    21,
-      22,    -1,    24,    25,    26,    27,    28,    -1,    -1,    31,
+      60,    60,    81,     3,     1,     8,    26,    39,    66,    88,
+      57,    38,    38,     1,    38,    18,    38,    38,     1,     0,
+      31,    42,    46,    30,    27,    52,    33,    48,    31,    50,
+      52,    78,    53,   112,    54,   114,    94,    34,    58,    65,
+      51,    38,    48,   103,    70,    42,    23,    38,   127,   128,
+      38,    48,    49,    50,    42,    38,    88,    57,    34,    42,
+      48,    52,    50,   154,    48,    48,    34,    50,    34,    38,
+      45,   162,    48,    42,   112,    38,   114,   156,    78,    48,
+      48,    50,    48,    38,    45,    46,    38,    42,    46,    47,
+      42,    45,    46,    48,   154,    50,    48,    38,    50,   127,
+     128,    52,   162,    52,    47,    45,    51,    46,    35,     1,
+      35,   171,   171,     5,     6,     7,     8,     9,    10,    11,
+      49,    13,    14,    15,    16,    17,    18,    19,    20,    21,
+      22,    49,    24,    25,    26,    27,    28,    49,    49,    31,
+      32,    45,    47,    53,    45,    30,    38,    45,    37,    49,
+      42,    47,    49,    45,    46,    49,    48,    31,    50,     1,
+      35,    53,    49,     5,     6,     7,     8,     9,    10,    11,
+      49,    13,    14,    15,    16,    17,    18,    19,    20,    21,
+      22,    49,    24,    25,    26,    27,    28,    49,    49,    31,
+      32,    49,    49,     1,    97,    83,    38,   156,   115,    50,
+      42,   110,    -1,    45,    46,    -1,    48,   106,    50,     1,
+      -1,    53,    -1,     5,     6,     7,     8,     9,    10,    11,
+      12,    13,    14,    15,    16,    17,    18,    19,    20,    21,
+      22,    23,    24,    25,    26,    27,    28,    29,    30,    31,
       32,    -1,    -1,    -1,    -1,    -1,    38,    -1,    -1,    -1,
-      42,    -1,    -1,    -1,    -1,    -1,    48,    -1,    50,     5,
+      42,    -1,    -1,    45,    -1,     1,    48,    -1,    50,     5,
        6,     7,     8,     9,    10,    11,    -1,    13,    14,    15,
       16,    17,    18,    19,    20,    21,    22,    -1,    24,    25,
       26,    27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,
-      -1,    37,    -1,    -1,    -1,    -1,    42,     5,     6,     7,
-       8,     9,    10,    11,    -1,    13,    14,    15,    16,    17,
-      18,    19,    20,    21,    22,    -1,    24,    25,    26,    27,
-      28,    -1,    -1,    31,    32,    -1,    -1,    -1,    -1,    -1,
-      -1,    -1,    -1,    -1,    42
+      -1,    37,    -1,    -1,    -1,    -1,    42,    -1,    -1,    -1,
+       1,    -1,    -1,    49,     5,     6,     7,     8,     9,    10,
+      11,    -1,    13,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    -1,    24,    25,    26,    27,    28,    -1,    -1,
+      31,    32,    -1,    -1,    -1,    -1,    37,    -1,    -1,    -1,
+      -1,    42,    -1,    -1,    -1,     1,    -1,    -1,    49,     5,
+       6,     7,     8,     9,    10,    11,    -1,    13,    14,    15,
+      16,    17,    18,    19,    20,    21,    22,    -1,    24,    25,
+      26,    27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,
+      -1,    37,    -1,    -1,    -1,    -1,    42,    -1,    -1,    -1,
+      -1,    -1,    -1,    49,     5,     6,     7,     8,     9,    10,
+      11,    -1,    13,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    -1,    24,    25,    26,    27,    28,    -1,    -1,
+      31,    32,    -1,    -1,    -1,    -1,    -1,    38,    -1,    -1,
+      -1,    42,    -1,    -1,    -1,    -1,    -1,    48,    -1,    50,
+       5,     6,     7,     8,     9,    10,    11,    -1,    13,    14,
+      15,    16,    17,    18,    19,    20,    21,    22,    -1,    24,
+      25,    26,    27,    28,    -1,    -1,    31,    32,    -1,    -1,
+      -1,    -1,    37,    -1,    -1,    -1,    -1,    42,     5,     6,
+       7,     8,     9,    10,    11,    -1,    13,    14,    15,    16,
+      17,    18,    19,    20,    21,    22,    -1,    24,    25,    26,
+      27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,    -1,
+      -1,    -1,    -1,    -1,    -1,    42
 };
 
 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
@@ -853,19 +866,19 @@
       28,    29,    30,    31,    32,    42,    58,    61,    65,    66,
       67,    68,    69,    70,    74,    85,   100,   102,    45,    46,
       38,    52,    97,    23,    38,    52,    88,    60,    38,    88,
-      48,    48,    45,    38,    48,    50,    62,    63,    64,    71,
-      75,    76,    67,    97,    38,    98,    99,    59,    88,     1,
-      65,    89,    90,    91,    61,    65,    88,    66,    82,    38,
-       1,    75,    72,    73,    74,    45,    47,    75,    30,    33,
-     101,    34,    48,    51,    46,    47,    61,    45,    46,    38,
-      42,    48,    53,    71,    77,    78,    92,    93,    94,    95,
-      46,     1,    91,    75,    38,    42,    48,    71,    83,    84,
-      49,    49,    49,    49,    74,    64,    96,     1,    79,    80,
-      81,    82,    35,    46,    99,    95,     1,    38,    77,    35,
-      77,    96,    34,    48,    45,    47,     1,    42,    83,    83,
-      34,    48,    45,    31,    51,    86,    87,    49,    49,    37,
-      47,    49,    49,     1,    79,    94,    49,    49,     1,    79,
-      35,    37,    82,    49,    49,    49,    49
+      48,    48,    45,    38,    42,    48,    50,    62,    63,    64,
+      71,    75,    76,    67,    97,    38,    98,    99,    59,    88,
+       1,    65,    89,    90,    91,    61,    65,    88,    66,    82,
+      38,     1,    75,    72,    73,    74,    45,    47,    75,    30,
+      33,   101,    34,    48,    51,    46,    47,    61,    45,    46,
+      38,    42,    48,    53,    71,    77,    78,    92,    93,    94,
+      95,    46,     1,    91,    75,    38,    42,    48,    71,    83,
+      84,    49,    49,    49,    49,    74,    64,    96,     1,    79,
+      80,    81,    82,    35,    46,    99,    95,     1,    38,    77,
+      35,    77,    96,    34,    48,    45,    47,     1,    42,    83,
+      83,    34,    48,    45,    31,    51,    86,    87,    49,    49,
+      37,    47,    49,    49,     1,    79,    94,    49,    49,     1,
+      79,    35,    37,    82,    49,    49,    49,    49
 };
 
 #define yyerrok		(yyerrstatus = 0)
@@ -912,46 +925,18 @@
     }								\
 while (YYID (0))
 
-
+/* Error token number */
 #define YYTERROR	1
 #define YYERRCODE	256
 
 
-/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
-   If N is 0, then set CURRENT to the empty location which ends
-   the previous symbol: RHS[0] (always defined).  */
-
-#define YYRHSLOC(Rhs, K) ((Rhs)[K])
-#ifndef YYLLOC_DEFAULT
-# define YYLLOC_DEFAULT(Current, Rhs, N)				\
-    do									\
-      if (YYID (N))                                                    \
-	{								\
-	  (Current).first_line   = YYRHSLOC (Rhs, 1).first_line;	\
-	  (Current).first_column = YYRHSLOC (Rhs, 1).first_column;	\
-	  (Current).last_line    = YYRHSLOC (Rhs, N).last_line;		\
-	  (Current).last_column  = YYRHSLOC (Rhs, N).last_column;	\
-	}								\
-      else								\
-	{								\
-	  (Current).first_line   = (Current).last_line   =		\
-	    YYRHSLOC (Rhs, 0).last_line;				\
-	  (Current).first_column = (Current).last_column =		\
-	    YYRHSLOC (Rhs, 0).last_column;				\
-	}								\
-    while (YYID (0))
-#endif
-
-
 /* This macro is provided for backward compatibility. */
-
 #ifndef YY_LOCATION_PRINT
 # define YY_LOCATION_PRINT(File, Loc) ((void) 0)
 #endif
 
 
 /* YYLEX -- calling `yylex' with the right arguments.  */
-
 #ifdef YYLEX_PARAM
 # define YYLEX yylex (YYLEX_PARAM)
 #else
@@ -1014,7 +999,7 @@
   switch (yytype)
     {
       default:
-	break;
+        break;
     }
 }
 
@@ -1256,7 +1241,6 @@
 {
   YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]);
   YYSIZE_T yysize = yysize0;
-  YYSIZE_T yysize1;
   enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
   /* Internationalized format string. */
   const char *yyformat = YY_NULL;
@@ -1319,11 +1303,13 @@
                     break;
                   }
                 yyarg[yycount++] = yytname[yyx];
-                yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]);
-                if (! (yysize <= yysize1
-                       && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
-                  return 2;
-                yysize = yysize1;
+                {
+                  YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]);
+                  if (! (yysize <= yysize1
+                         && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+                    return 2;
+                  yysize = yysize1;
+                }
               }
         }
     }
@@ -1343,10 +1329,12 @@
 # undef YYCASE_
     }
 
-  yysize1 = yysize + yystrlen (yyformat);
-  if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
-    return 2;
-  yysize = yysize1;
+  {
+    YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
+    if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+      return 2;
+    yysize = yysize1;
+  }
 
   if (*yymsg_alloc < yysize)
     {
@@ -1406,32 +1394,27 @@
     {
 
       default:
-	break;
+        break;
     }
 }
 
 
-/* Prevent warnings from -Wmissing-prototypes.  */
-#ifdef YYPARSE_PARAM
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void *YYPARSE_PARAM);
-#else
-int yyparse ();
-#endif
-#else /* ! YYPARSE_PARAM */
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void);
-#else
-int yyparse ();
-#endif
-#endif /* ! YYPARSE_PARAM */
 
 
 /* The lookahead symbol.  */
 int yychar;
 
+
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
 /* The semantic value of the lookahead symbol.  */
-YYSTYPE yylval;
+YYSTYPE yylval YY_INITIAL_VALUE(yyval_default);
 
 /* Number of syntax errors so far.  */
 int yynerrs;
@@ -1489,7 +1472,7 @@
   int yyn;
   int yyresult;
   /* Lookahead token as an internal (translated) token number.  */
-  int yytoken;
+  int yytoken = 0;
   /* The variables used to return semantic value and location from the
      action routines.  */
   YYSTYPE yyval;
@@ -1507,9 +1490,8 @@
      Keep to zero when no symbol should be popped.  */
   int yylen = 0;
 
-  yytoken = 0;
-  yyss = yyssa;
-  yyvs = yyvsa;
+  yyssp = yyss = yyssa;
+  yyvsp = yyvs = yyvsa;
   yystacksize = YYINITDEPTH;
 
   YYDPRINTF ((stderr, "Starting parse\n"));
@@ -1518,14 +1500,6 @@
   yyerrstatus = 0;
   yynerrs = 0;
   yychar = YYEMPTY; /* Cause a token to be read.  */
-
-  /* Initialize stack pointers.
-     Waste one element of value and location stack
-     so that they stay on the same level as the state stack.
-     The wasted elements are never initialized.  */
-  yyssp = yyss;
-  yyvsp = yyvs;
-
   goto yysetstate;
 
 /*------------------------------------------------------------.
@@ -1666,7 +1640,9 @@
   yychar = YYEMPTY;
 
   yystate = yyn;
+  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
   *++yyvsp = yylval;
+  YY_IGNORE_MAYBE_UNINITIALIZED_END
 
   goto yynewstate;
 
@@ -1916,7 +1892,14 @@
 
   case 69:
 
-    { (yyval) = (yyvsp[(4) - (4)]); }
+    { if (current_name != NULL) {
+		    error_with_pos("unexpected second declaration name");
+		    YYERROR;
+		  } else {
+		    current_name = (*(yyvsp[(1) - (1)]))->string;
+		    (yyval) = (yyvsp[(1) - (1)]);
+		  }
+		}
     break;
 
   case 70:
@@ -1926,12 +1909,12 @@
 
   case 71:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
   case 72:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 73:
@@ -1941,12 +1924,12 @@
 
   case 74:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 78:
+  case 75:
 
-    { (yyval) = (yyvsp[(4) - (4)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 79:
@@ -1956,12 +1939,12 @@
 
   case 80:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
   case 81:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 82:
@@ -1971,27 +1954,27 @@
 
   case 83:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 85:
+  case 84:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 86:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 89:
+  case 87:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = NULL; }
     break;
 
   case 90:
 
-    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
   case 91:
@@ -1999,12 +1982,17 @@
     { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
     break;
 
-  case 93:
+  case 92:
+
+    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
+    break;
+
+  case 94:
 
     { (yyval) = NULL; }
     break;
 
-  case 94:
+  case 95:
 
     { /* For version 2 checksums, we don't want to remember
 		     private parameter names.  */
@@ -2013,18 +2001,13 @@
 		}
     break;
 
-  case 95:
+  case 96:
 
     { remove_node((yyvsp[(1) - (1)]));
 		  (yyval) = (yyvsp[(1) - (1)]);
 		}
     break;
 
-  case 96:
-
-    { (yyval) = (yyvsp[(4) - (4)]); }
-    break;
-
   case 97:
 
     { (yyval) = (yyvsp[(4) - (4)]); }
@@ -2032,12 +2015,12 @@
 
   case 98:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
   case 99:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 100:
@@ -2047,6 +2030,11 @@
 
   case 101:
 
+    { (yyval) = (yyvsp[(3) - (3)]); }
+    break;
+
+  case 102:
+
     { struct string_list *decl = *(yyvsp[(2) - (3)]);
 		  *(yyvsp[(2) - (3)]) = NULL;
 		  add_symbol(current_name, SYM_NORMAL, decl, is_extern);
@@ -2054,19 +2042,14 @@
 		}
     break;
 
-  case 102:
+  case 103:
 
     { (yyval) = NULL; }
     break;
 
-  case 104:
-
-    { remove_list((yyvsp[(2) - (2)]), &(*(yyvsp[(1) - (2)]))->next); (yyval) = (yyvsp[(2) - (2)]); }
-    break;
-
   case 105:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { remove_list((yyvsp[(2) - (2)]), &(*(yyvsp[(1) - (2)]))->next); (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 106:
@@ -2076,65 +2059,70 @@
 
   case 107:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 110:
+  case 108:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = NULL; }
     break;
 
   case 111:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 112:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
   case 113:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 116:
+  case 114:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = NULL; }
     break;
 
   case 117:
 
-    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
   case 118:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
     break;
 
-  case 120:
+  case 119:
 
     { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 121:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 123:
+  case 122:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = NULL; }
     break;
 
   case 124:
 
+    { (yyval) = (yyvsp[(3) - (3)]); }
+    break;
+
+  case 125:
+
     { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
-  case 127:
+  case 128:
 
     {
 			const char *name = strdup((*(yyvsp[(1) - (1)]))->string);
@@ -2142,7 +2130,7 @@
 		}
     break;
 
-  case 128:
+  case 129:
 
     {
 			const char *name = strdup((*(yyvsp[(1) - (3)]))->string);
@@ -2151,17 +2139,17 @@
 		}
     break;
 
-  case 129:
+  case 130:
 
     { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 130:
+  case 131:
 
     { (yyval) = NULL; }
     break;
 
-  case 132:
+  case 133:
 
     { export_symbol((*(yyvsp[(3) - (5)]))->string); (yyval) = (yyvsp[(5) - (5)]); }
     break;
@@ -2330,7 +2318,9 @@
       YY_STACK_PRINT (yyss, yyssp);
     }
 
+  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
   *++yyvsp = yylval;
+  YY_IGNORE_MAYBE_UNINITIALIZED_END
 
 
   /* Shift the error token.  */
@@ -2404,4 +2394,3 @@
 {
   error_with_pos("%s", e);
 }
-
diff --git a/scripts/genksyms/parse.tab.h_shipped b/scripts/genksyms/parse.tab.h_shipped
index a4737de..4c00cef 100644
--- a/scripts/genksyms/parse.tab.h_shipped
+++ b/scripts/genksyms/parse.tab.h_shipped
@@ -1,4 +1,4 @@
-/* A Bison parser, made by GNU Bison 2.5.1.  */
+/* A Bison parser, made by GNU Bison 2.7.  */
 
 /* Bison interface for Yacc-like parsers in C
    
@@ -30,6 +30,15 @@
    This special exception was added by the Free Software Foundation in
    version 2.2 of Bison.  */
 
+#ifndef YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED
+# define YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED
+/* Enabling traces.  */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int yydebug;
+#endif
 
 /* Tokens.  */
 #ifndef YYTOKENTYPE
@@ -83,7 +92,6 @@
 #endif
 
 
-
 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
 typedef int YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
@@ -93,4 +101,18 @@
 
 extern YYSTYPE yylval;
 
+#ifdef YYPARSE_PARAM
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void *YYPARSE_PARAM);
+#else
+int yyparse ();
+#endif
+#else /* ! YYPARSE_PARAM */
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void);
+#else
+int yyparse ();
+#endif
+#endif /* ! YYPARSE_PARAM */
 
+#endif /* !YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED  */
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
index b9f4cf2..723ab30 100644
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -303,6 +303,15 @@
 		    $$ = $1;
 		  }
 		}
+	| TYPE
+		{ if (current_name != NULL) {
+		    error_with_pos("unexpected second declaration name");
+		    YYERROR;
+		  } else {
+		    current_name = (*$1)->string;
+		    $$ = $1;
+		  }
+		}
 	| direct_declarator '(' parameter_declaration_clause ')'
 		{ $$ = $4; }
 	| direct_declarator '(' error ')'
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index c814f57..0b7dc2f 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -268,8 +268,7 @@
 			goto load;
 		sym_add_change_count(1);
 		if (!sym_defconfig_list) {
-			if (modules_sym)
-				sym_calc_value(modules_sym);
+			sym_calc_value(modules_sym);
 			return 1;
 		}
 
@@ -404,9 +403,7 @@
 	}
 	free(line);
 	fclose(in);
-
-	if (modules_sym)
-		sym_calc_value(modules_sym);
+	sym_calc_value(modules_sym);
 	return 0;
 }
 
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index ec8e203..0d883b3 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -100,6 +100,10 @@
 # Merge files, printing warnings on overridden values
 for MERGE_FILE in $MERGE_LIST ; do
 	echo "Merging $MERGE_FILE"
+	if [ ! -r "$MERGE_FILE" ]; then
+		echo "The merge file '$MERGE_FILE' does not exist.  Exit." >&2
+		exit 1
+	fi
 	CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE)
 
 	for CFG in $CFG_LIST ; do
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 70c5ee1..50878dc 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -467,8 +467,7 @@
 	for_all_symbols(i, sym)
 		sym->flags &= ~SYMBOL_VALID;
 	sym_add_change_count(1);
-	if (modules_sym)
-		sym_calc_value(modules_sym);
+	sym_calc_value(modules_sym);
 }
 
 bool sym_tristate_within_range(struct symbol *sym, tristate val)
diff --git a/scripts/kconfig/zconf.gperf b/scripts/kconfig/zconf.gperf
index b6ac02d..ac498f0 100644
--- a/scripts/kconfig/zconf.gperf
+++ b/scripts/kconfig/zconf.gperf
@@ -22,6 +22,7 @@
 config,		T_CONFIG,	TF_COMMAND
 menuconfig,	T_MENUCONFIG,	TF_COMMAND
 help,		T_HELP,		TF_COMMAND
+---help---,	T_HELP,		TF_COMMAND
 if,		T_IF,		TF_COMMAND|TF_PARAM
 endif,		T_ENDIF,	TF_COMMAND
 depends,	T_DEPENDS,	TF_COMMAND
diff --git a/scripts/kconfig/zconf.hash.c_shipped b/scripts/kconfig/zconf.hash.c_shipped
index c77a8ef..360a62d 100644
--- a/scripts/kconfig/zconf.hash.c_shipped
+++ b/scripts/kconfig/zconf.hash.c_shipped
@@ -50,7 +50,7 @@
       73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
       73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
       73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
-      73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+      73, 73, 73, 73, 73,  0, 73, 73, 73, 73,
       73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
       73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
       73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
@@ -96,6 +96,7 @@
     char kconf_id_strings_str7[sizeof("default")];
     char kconf_id_strings_str8[sizeof("tristate")];
     char kconf_id_strings_str9[sizeof("endchoice")];
+    char kconf_id_strings_str10[sizeof("---help---")];
     char kconf_id_strings_str12[sizeof("def_tristate")];
     char kconf_id_strings_str13[sizeof("def_bool")];
     char kconf_id_strings_str14[sizeof("defconfig_list")];
@@ -132,6 +133,7 @@
     "default",
     "tristate",
     "endchoice",
+    "---help---",
     "def_tristate",
     "def_bool",
     "defconfig_list",
@@ -172,7 +174,7 @@
 {
   enum
     {
-      TOTAL_KEYWORDS = 33,
+      TOTAL_KEYWORDS = 34,
       MIN_WORD_LENGTH = 2,
       MAX_WORD_LENGTH = 14,
       MIN_HASH_VALUE = 2,
@@ -182,34 +184,36 @@
   static const struct kconf_id wordlist[] =
     {
       {-1}, {-1},
-#line 25 "scripts/kconfig/zconf.gperf"
+#line 26 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2,		T_IF,		TF_COMMAND|TF_PARAM},
-#line 36 "scripts/kconfig/zconf.gperf"
+#line 37 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str3,		T_TYPE,		TF_COMMAND, S_INT},
       {-1},
-#line 26 "scripts/kconfig/zconf.gperf"
+#line 27 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str5,		T_ENDIF,	TF_COMMAND},
       {-1},
-#line 29 "scripts/kconfig/zconf.gperf"
+#line 30 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7,	T_DEFAULT,	TF_COMMAND, S_UNKNOWN},
-#line 31 "scripts/kconfig/zconf.gperf"
+#line 32 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8,	T_TYPE,		TF_COMMAND, S_TRISTATE},
 #line 20 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str9,	T_ENDCHOICE,	TF_COMMAND},
-      {-1}, {-1},
-#line 32 "scripts/kconfig/zconf.gperf"
+#line 25 "scripts/kconfig/zconf.gperf"
+      {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str10,	T_HELP,		TF_COMMAND},
+      {-1},
+#line 33 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12,	T_DEFAULT,	TF_COMMAND, S_TRISTATE},
-#line 35 "scripts/kconfig/zconf.gperf"
+#line 36 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13,	T_DEFAULT,	TF_COMMAND, S_BOOLEAN},
-#line 45 "scripts/kconfig/zconf.gperf"
+#line 46 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14,	T_OPT_DEFCONFIG_LIST,TF_OPTION},
       {-1}, {-1},
-#line 43 "scripts/kconfig/zconf.gperf"
+#line 44 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17,		T_ON,		TF_PARAM},
-#line 28 "scripts/kconfig/zconf.gperf"
+#line 29 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18,	T_OPTIONAL,	TF_COMMAND},
       {-1}, {-1},
-#line 42 "scripts/kconfig/zconf.gperf"
+#line 43 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21,		T_OPTION,	TF_COMMAND},
 #line 17 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22,	T_ENDMENU,	TF_COMMAND},
@@ -219,51 +223,51 @@
 #line 23 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str25,	T_MENUCONFIG,	TF_COMMAND},
       {-1},
-#line 44 "scripts/kconfig/zconf.gperf"
+#line 45 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27,	T_OPT_MODULES,	TF_OPTION},
-#line 47 "scripts/kconfig/zconf.gperf"
+#line 48 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28,	T_OPT_ALLNOCONFIG_Y,TF_OPTION},
 #line 16 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29,		T_MENU,		TF_COMMAND},
       {-1},
-#line 39 "scripts/kconfig/zconf.gperf"
+#line 40 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31,		T_SELECT,	TF_COMMAND},
 #line 21 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32,	T_COMMENT,	TF_COMMAND},
-#line 46 "scripts/kconfig/zconf.gperf"
+#line 47 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33,		T_OPT_ENV,	TF_OPTION},
       {-1},
-#line 40 "scripts/kconfig/zconf.gperf"
+#line 41 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35,		T_RANGE,	TF_COMMAND},
 #line 19 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36,		T_CHOICE,	TF_COMMAND},
       {-1}, {-1},
-#line 33 "scripts/kconfig/zconf.gperf"
+#line 34 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str39,		T_TYPE,		TF_COMMAND, S_BOOLEAN},
       {-1},
 #line 18 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41,		T_SOURCE,	TF_COMMAND},
-#line 41 "scripts/kconfig/zconf.gperf"
+#line 42 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str42,	T_VISIBLE,	TF_COMMAND},
-#line 37 "scripts/kconfig/zconf.gperf"
+#line 38 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str43,		T_TYPE,		TF_COMMAND, S_HEX},
       {-1}, {-1},
 #line 22 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46,		T_CONFIG,	TF_COMMAND},
-#line 34 "scripts/kconfig/zconf.gperf"
+#line 35 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str47,	T_TYPE,		TF_COMMAND, S_BOOLEAN},
       {-1}, {-1}, {-1},
-#line 38 "scripts/kconfig/zconf.gperf"
+#line 39 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str51,		T_TYPE,		TF_COMMAND, S_STRING},
       {-1}, {-1},
 #line 24 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str54,		T_HELP,		TF_COMMAND},
       {-1},
-#line 30 "scripts/kconfig/zconf.gperf"
+#line 31 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str56,		T_PROMPT,	TF_COMMAND},
       {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1},
       {-1}, {-1}, {-1}, {-1}, {-1}, {-1},
-#line 27 "scripts/kconfig/zconf.gperf"
+#line 28 "scripts/kconfig/zconf.gperf"
       {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str72,	T_DEPENDS,	TF_COMMAND}
     };
 
@@ -285,5 +289,5 @@
     }
   return 0;
 }
-#line 48 "scripts/kconfig/zconf.gperf"
+#line 49 "scripts/kconfig/zconf.gperf"
 
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 200a3fe..c410d25 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -66,9 +66,16 @@
 	memcpy(text, str, size);
 	text[size] = 0;
 }
+
+static void warn_ignored_character(char chr)
+{
+	fprintf(stderr,
+	        "%s:%d:warning: ignoring unsupported character '%c'\n",
+	        zconf_curname(), zconf_lineno(), chr);
+}
 %}
 
-n	[A-Za-z0-9_]
+n	[A-Za-z0-9_-]
 
 %%
 	int str = 0;
@@ -106,7 +113,7 @@
 		zconflval.string = text;
 		return T_WORD;
 	}
-	.
+	.	warn_ignored_character(*yytext);
 	\n	{
 		BEGIN(INITIAL);
 		current_file->lineno++;
@@ -132,8 +139,7 @@
 		BEGIN(STRING);
 	}
 	\n	BEGIN(INITIAL); current_file->lineno++; return T_EOL;
-	---	/* ignore */
-	({n}|[-/.])+	{
+	({n}|[/.])+	{
 		const struct kconf_id *id = kconf_id_lookup(yytext, yyleng);
 		if (id && id->flags & TF_PARAM) {
 			zconflval.id = id;
@@ -146,11 +152,7 @@
 	#.*	/* comment */
 	\\\n	current_file->lineno++;
 	[[:blank:]]+
-	.	{
-		fprintf(stderr,
-		        "%s:%d:warning: ignoring unsupported character '%c'\n",
-		        zconf_curname(), zconf_lineno(), *yytext);
-	}
+	.	warn_ignored_character(*yytext);
 	<<EOF>> {
 		BEGIN(INITIAL);
 	}
diff --git a/scripts/kconfig/zconf.lex.c_shipped b/scripts/kconfig/zconf.lex.c_shipped
index dd4e86c..37fdf61 100644
--- a/scripts/kconfig/zconf.lex.c_shipped
+++ b/scripts/kconfig/zconf.lex.c_shipped
@@ -72,7 +72,6 @@
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
-#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -103,6 +102,8 @@
 #define UINT32_MAX             (4294967295U)
 #endif
 
+#endif /* ! C99 */
+
 #endif /* ! FLEXINT_H */
 
 #ifdef __cplusplus
@@ -159,7 +160,15 @@
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k.
+ * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
+ * Ditto for the __ia64__ case accordingly.
+ */
+#define YY_BUF_SIZE 32768
+#else
 #define YY_BUF_SIZE 16384
+#endif /* __ia64__ */
 #endif
 
 /* The state buf must be large enough to hold one state per character in the main buffer.
@@ -365,354 +374,338 @@
 
 extern char *zconftext;
 #define yytext_ptr zconftext
-static yyconst flex_int16_t yy_nxt[][19] =
+static yyconst flex_int16_t yy_nxt[][18] =
     {
     {
         0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-        0,    0,    0,    0,    0,    0,    0,    0,    0
+        0,    0,    0,    0,    0,    0,    0,    0
     },
 
     {
        11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
-       12,   12,   12,   12,   12,   12,   12,   12,   12
+       12,   12,   12,   12,   12,   12,   12,   12
     },
 
     {
        11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
-       12,   12,   12,   12,   12,   12,   12,   12,   12
+       12,   12,   12,   12,   12,   12,   12,   12
     },
 
     {
        11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
-       16,   16,   16,   18,   16,   16,   16,   16,   16
+       16,   18,   16,   16,   16,   16,   16,   16
     },
 
     {
        11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
-       16,   16,   16,   18,   16,   16,   16,   16,   16
+       16,   18,   16,   16,   16,   16,   16,   16
 
     },
 
     {
        11,   19,   20,   21,   19,   19,   19,   19,   19,   19,
-       19,   19,   19,   19,   19,   19,   19,   19,   19
+       19,   19,   19,   19,   19,   19,   19,   19
     },
 
     {
        11,   19,   20,   21,   19,   19,   19,   19,   19,   19,
-       19,   19,   19,   19,   19,   19,   19,   19,   19
+       19,   19,   19,   19,   19,   19,   19,   19
     },
 
     {
        11,   22,   22,   23,   22,   24,   22,   22,   24,   22,
-       22,   22,   22,   22,   22,   22,   22,   25,   22
+       22,   22,   22,   22,   22,   22,   25,   22
     },
 
     {
        11,   22,   22,   23,   22,   24,   22,   22,   24,   22,
-       22,   22,   22,   22,   22,   22,   22,   25,   22
+       22,   22,   22,   22,   22,   22,   25,   22
     },
 
     {
        11,   26,   27,   28,   29,   30,   31,   32,   30,   33,
-       34,   35,   36,   36,   37,   38,   39,   40,   41
+       34,   35,   35,   36,   37,   38,   39,   40
 
     },
 
     {
        11,   26,   27,   28,   29,   30,   31,   32,   30,   33,
-       34,   35,   36,   36,   37,   38,   39,   40,   41
+       34,   35,   35,   36,   37,   38,   39,   40
     },
 
     {
       -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
-      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11
+      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11
     },
 
     {
        11,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
-      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12
+      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12
     },
 
     {
-       11,  -13,   42,   43,  -13,  -13,   44,  -13,  -13,  -13,
-      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13
+       11,  -13,   41,   42,  -13,  -13,   43,  -13,  -13,  -13,
+      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13
     },
 
     {
        11,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
-      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14
+      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14
 
     },
 
     {
-       11,   45,   45,   46,   45,   45,   45,   45,   45,   45,
-       45,   45,   45,   45,   45,   45,   45,   45,   45
+       11,   44,   44,   45,   44,   44,   44,   44,   44,   44,
+       44,   44,   44,   44,   44,   44,   44,   44
     },
 
     {
        11,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
-      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16
+      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16
     },
 
     {
        11,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
-      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17
+      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17
     },
 
     {
        11,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,
-      -18,  -18,  -18,   47,  -18,  -18,  -18,  -18,  -18
+      -18,   46,  -18,  -18,  -18,  -18,  -18,  -18
     },
 
     {
-       11,   48,   48,  -19,   48,   48,   48,   48,   48,   48,
-       48,   48,   48,   48,   48,   48,   48,   48,   48
+       11,   47,   47,  -19,   47,   47,   47,   47,   47,   47,
+       47,   47,   47,   47,   47,   47,   47,   47
 
     },
 
     {
-       11,  -20,   49,   50,  -20,  -20,  -20,  -20,  -20,  -20,
-      -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20
+       11,  -20,   48,   49,  -20,  -20,  -20,  -20,  -20,  -20,
+      -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20
     },
 
     {
-       11,   51,  -21,  -21,   51,   51,   51,   51,   51,   51,
-       51,   51,   51,   51,   51,   51,   51,   51,   51
+       11,   50,  -21,  -21,   50,   50,   50,   50,   50,   50,
+       50,   50,   50,   50,   50,   50,   50,   50
     },
 
     {
-       11,   52,   52,   53,   52,  -22,   52,   52,  -22,   52,
-       52,   52,   52,   52,   52,   52,   52,  -22,   52
+       11,   51,   51,   52,   51,  -22,   51,   51,  -22,   51,
+       51,   51,   51,   51,   51,   51,  -22,   51
     },
 
     {
        11,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,
-      -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23
+      -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23
     },
 
     {
        11,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,
-      -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24
+      -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24
 
     },
 
     {
-       11,   54,   54,   55,   54,   54,   54,   54,   54,   54,
-       54,   54,   54,   54,   54,   54,   54,   54,   54
+       11,   53,   53,   54,   53,   53,   53,   53,   53,   53,
+       53,   53,   53,   53,   53,   53,   53,   53
     },
 
     {
        11,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,
-      -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26
+      -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26
     },
 
     {
-       11,  -27,   56,  -27,  -27,  -27,  -27,  -27,  -27,  -27,
-      -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27
+       11,  -27,   55,  -27,  -27,  -27,  -27,  -27,  -27,  -27,
+      -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27
     },
 
     {
        11,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,
-      -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28
+      -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28
     },
 
     {
        11,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,
-      -29,  -29,  -29,  -29,  -29,   57,  -29,  -29,  -29
+      -29,  -29,  -29,  -29,   56,  -29,  -29,  -29
 
     },
 
     {
        11,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,
-      -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30
+      -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30
     },
 
     {
-       11,   58,   58,  -31,   58,   58,   58,   58,   58,   58,
-       58,   58,   58,   58,   58,   58,   58,   58,   58
+       11,   57,   57,  -31,   57,   57,   57,   57,   57,   57,
+       57,   57,   57,   57,   57,   57,   57,   57
     },
 
     {
-       11,  -32,  -32,  -32,  -32,  -32,  -32,   59,  -32,  -32,
-      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32
+       11,  -32,  -32,  -32,  -32,  -32,  -32,   58,  -32,  -32,
+      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32
     },
 
     {
        11,  -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33,
-      -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33
+      -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33
     },
 
     {
        11,  -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34,
-      -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34
+      -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34
 
     },
 
     {
        11,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
-      -35,   60,   61,   61,  -35,  -35,  -35,  -35,  -35
+      -35,   59,   59,  -35,  -35,  -35,  -35,  -35
     },
 
     {
        11,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
-      -36,   61,   61,   61,  -36,  -36,  -36,  -36,  -36
+      -36,  -36,  -36,  -36,   60,  -36,  -36,  -36
     },
 
     {
        11,  -37,  -37,  -37,  -37,  -37,  -37,  -37,  -37,  -37,
-      -37,  -37,  -37,  -37,  -37,   62,  -37,  -37,  -37
+      -37,  -37,  -37,  -37,  -37,  -37,  -37,  -37
     },
 
     {
        11,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
-      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38
+      -38,  -38,  -38,  -38,   61,  -38,  -38,  -38
     },
 
     {
-       11,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
-      -39,  -39,  -39,  -39,  -39,   63,  -39,  -39,  -39
+       11,  -39,  -39,   62,  -39,  -39,  -39,  -39,  -39,  -39,
+      -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39
 
     },
 
     {
-       11,  -40,  -40,   64,  -40,  -40,  -40,  -40,  -40,  -40,
-      -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40
+       11,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,
+      -40,  -40,  -40,  -40,  -40,  -40,  -40,   63
     },
 
     {
-       11,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
-      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,   65
+       11,  -41,   41,   42,  -41,  -41,   43,  -41,  -41,  -41,
+      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41
     },
 
     {
-       11,  -42,   42,   43,  -42,  -42,   44,  -42,  -42,  -42,
-      -42,  -42,  -42,  -42,  -42,  -42,  -42,  -42,  -42
+       11,  -42,  -42,  -42,  -42,  -42,  -42,  -42,  -42,  -42,
+      -42,  -42,  -42,  -42,  -42,  -42,  -42,  -42
     },
 
     {
-       11,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,
-      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43
+       11,   44,   44,   45,   44,   44,   44,   44,   44,   44,
+       44,   44,   44,   44,   44,   44,   44,   44
     },
 
     {
-       11,   45,   45,   46,   45,   45,   45,   45,   45,   45,
-       45,   45,   45,   45,   45,   45,   45,   45,   45
+       11,   44,   44,   45,   44,   44,   44,   44,   44,   44,
+       44,   44,   44,   44,   44,   44,   44,   44
 
     },
 
     {
-       11,   45,   45,   46,   45,   45,   45,   45,   45,   45,
-       45,   45,   45,   45,   45,   45,   45,   45,   45
+       11,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,
+      -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45
     },
 
     {
        11,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,
-      -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46
+      -46,   46,  -46,  -46,  -46,  -46,  -46,  -46
     },
 
     {
-       11,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,
-      -47,  -47,  -47,   47,  -47,  -47,  -47,  -47,  -47
+       11,   47,   47,  -47,   47,   47,   47,   47,   47,   47,
+       47,   47,   47,   47,   47,   47,   47,   47
     },
 
     {
-       11,   48,   48,  -48,   48,   48,   48,   48,   48,   48,
-       48,   48,   48,   48,   48,   48,   48,   48,   48
+       11,  -48,   48,   49,  -48,  -48,  -48,  -48,  -48,  -48,
+      -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48
     },
 
     {
-       11,  -49,   49,   50,  -49,  -49,  -49,  -49,  -49,  -49,
-      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49
+       11,   50,  -49,  -49,   50,   50,   50,   50,   50,   50,
+       50,   50,   50,   50,   50,   50,   50,   50
 
     },
 
     {
-       11,   51,  -50,  -50,   51,   51,   51,   51,   51,   51,
-       51,   51,   51,   51,   51,   51,   51,   51,   51
+       11,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,
+      -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50
     },
 
     {
-       11,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,
-      -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51
+       11,   51,   51,   52,   51,  -51,   51,   51,  -51,   51,
+       51,   51,   51,   51,   51,   51,  -51,   51
     },
 
     {
-       11,   52,   52,   53,   52,  -52,   52,   52,  -52,   52,
-       52,   52,   52,   52,   52,   52,   52,  -52,   52
+       11,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
+      -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52
     },
 
     {
-       11,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,
-      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53
+       11,  -53,  -53,   54,  -53,  -53,  -53,  -53,  -53,  -53,
+      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53
     },
 
     {
-       11,  -54,  -54,   55,  -54,  -54,  -54,  -54,  -54,  -54,
-      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54
+       11,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,
+      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54
 
     },
 
     {
-       11,  -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55,
-      -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55
+       11,  -55,   55,  -55,  -55,  -55,  -55,  -55,  -55,  -55,
+      -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55
     },
 
     {
-       11,  -56,   56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,
-      -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56
+       11,  -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,
+      -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56
     },
 
     {
-       11,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
-      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57
+       11,   57,   57,  -57,   57,   57,   57,   57,   57,   57,
+       57,   57,   57,   57,   57,   57,   57,   57
     },
 
     {
-       11,   58,   58,  -58,   58,   58,   58,   58,   58,   58,
-       58,   58,   58,   58,   58,   58,   58,   58,   58
+       11,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,
+      -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58
     },
 
     {
        11,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,
-      -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59
+      -59,   59,   59,  -59,  -59,  -59,  -59,  -59
 
     },
 
     {
        11,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,
-      -60,   66,   61,   61,  -60,  -60,  -60,  -60,  -60
+      -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60
     },
 
     {
        11,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,
-      -61,   61,   61,   61,  -61,  -61,  -61,  -61,  -61
+      -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61
     },
 
     {
        11,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,
-      -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62
+      -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62
     },
 
     {
        11,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,
-      -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63
-    },
-
-    {
-       11,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,
-      -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64
-
-    },
-
-    {
-       11,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,
-      -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65
-    },
-
-    {
-       11,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,
-      -66,   61,   61,   61,  -66,  -66,  -66,  -66,  -66
+      -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63
     },
 
     } ;
@@ -732,8 +725,8 @@
 	*yy_cp = '\0'; \
 	(yy_c_buf_p) = yy_cp;
 
-#define YY_NUM_RULES 38
-#define YY_END_OF_BUFFER 39
+#define YY_NUM_RULES 37
+#define YY_END_OF_BUFFER 38
 /* This struct is not used in this scanner,
    but its presence is necessary. */
 struct yy_trans_info
@@ -741,15 +734,15 @@
 	flex_int32_t yy_verify;
 	flex_int32_t yy_nxt;
 	};
-static yyconst flex_int16_t yy_accept[67] =
+static yyconst flex_int16_t yy_accept[64] =
     {   0,
         0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-       39,    5,    4,    2,    3,    7,    8,    6,   37,   34,
-       36,   29,   33,   32,   31,   27,   26,   21,   13,   20,
-       24,   27,   11,   12,   23,   23,   18,   14,   19,   27,
-       27,    4,    2,    3,    3,    1,    6,   37,   34,   36,
-       35,   29,   28,   31,   30,   26,   15,   24,    9,   23,
-       23,   16,   17,   25,   10,   22
+       38,    5,    4,    2,    3,    7,    8,    6,   36,   33,
+       35,   28,   32,   31,   30,   26,   25,   21,   13,   20,
+       23,   26,   11,   12,   22,   18,   14,   19,   26,   26,
+        4,    2,    3,    3,    1,    6,   36,   33,   35,   34,
+       28,   27,   30,   29,   25,   15,   23,    9,   22,   16,
+       17,   24,   10
     } ;
 
 static yyconst flex_int32_t yy_ec[256] =
@@ -758,16 +751,16 @@
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    2,    4,    5,    6,    1,    1,    7,    8,    9,
-       10,    1,    1,    1,   11,   12,   12,   13,   13,   13,
-       13,   13,   13,   13,   13,   13,   13,    1,    1,   14,
-       15,   16,    1,    1,   13,   13,   13,   13,   13,   13,
-       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-        1,   17,    1,    1,   13,    1,   13,   13,   13,   13,
+       10,    1,    1,    1,   11,   12,   12,   11,   11,   11,
+       11,   11,   11,   11,   11,   11,   11,    1,    1,   13,
+       14,   15,    1,    1,   11,   11,   11,   11,   11,   11,
+       11,   11,   11,   11,   11,   11,   11,   11,   11,   11,
+       11,   11,   11,   11,   11,   11,   11,   11,   11,   11,
+        1,   16,    1,    1,   11,    1,   11,   11,   11,   11,
 
-       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-       13,   13,    1,   18,    1,    1,    1,    1,    1,    1,
+       11,   11,   11,   11,   11,   11,   11,   11,   11,   11,
+       11,   11,   11,   11,   11,   11,   11,   11,   11,   11,
+       11,   11,    1,   17,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
@@ -861,6 +854,13 @@
 	text[size] = 0;
 }
 
+static void warn_ignored_character(char chr)
+{
+	fprintf(stderr,
+	        "%s:%d:warning: ignoring unsupported character '%c'\n",
+	        zconf_curname(), zconf_lineno(), chr);
+}
+
 #define INITIAL 0
 #define COMMAND 1
 #define HELP 2
@@ -944,7 +944,12 @@
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k */
+#define YY_READ_BUF_SIZE 16384
+#else
 #define YY_READ_BUF_SIZE 8192
+#endif /* __ia64__ */
 #endif
 
 /* Copy whatever the last rule matched to the standard output. */
@@ -952,7 +957,7 @@
 /* This used to be an fputs(), but since the string might contain NUL's,
  * we now use fwrite().
  */
-#define ECHO fwrite( zconftext, zconfleng, 1, zconfout )
+#define ECHO do { if (fwrite( zconftext, zconfleng, 1, zconfout )) {} } while (0)
 #endif
 
 /* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
@@ -1132,7 +1137,7 @@
 	YY_BREAK
 case 7:
 YY_RULE_SETUP
-
+warn_ignored_character(*zconftext);
 	YY_BREAK
 case 8:
 /* rule 8 can match eol */
@@ -1203,10 +1208,6 @@
 	YY_BREAK
 case 22:
 YY_RULE_SETUP
-/* ignore */
-	YY_BREAK
-case 23:
-YY_RULE_SETUP
 {
 		const struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng);
 		if (id && id->flags & TF_PARAM) {
@@ -1218,26 +1219,22 @@
 		return T_WORD;
 	}
 	YY_BREAK
-case 24:
+case 23:
 YY_RULE_SETUP
 /* comment */
 	YY_BREAK
-case 25:
-/* rule 25 can match eol */
+case 24:
+/* rule 24 can match eol */
 YY_RULE_SETUP
 current_file->lineno++;
 	YY_BREAK
-case 26:
+case 25:
 YY_RULE_SETUP
 
 	YY_BREAK
-case 27:
+case 26:
 YY_RULE_SETUP
-{
-		fprintf(stderr,
-		        "%s:%d:warning: ignoring unsupported character '%c'\n",
-		        zconf_curname(), zconf_lineno(), *zconftext);
-	}
+warn_ignored_character(*zconftext);
 	YY_BREAK
 case YY_STATE_EOF(PARAM):
 {
@@ -1245,8 +1242,8 @@
 	}
 	YY_BREAK
 
-case 28:
-/* rule 28 can match eol */
+case 27:
+/* rule 27 can match eol */
 *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
 (yy_c_buf_p) = yy_cp -= 1;
 YY_DO_BEFORE_ACTION; /* set up zconftext again */
@@ -1257,14 +1254,14 @@
 		return T_WORD_QUOTE;
 	}
 	YY_BREAK
+case 28:
+YY_RULE_SETUP
+{
+		append_string(zconftext, zconfleng);
+	}
+	YY_BREAK
 case 29:
-YY_RULE_SETUP
-{
-		append_string(zconftext, zconfleng);
-	}
-	YY_BREAK
-case 30:
-/* rule 30 can match eol */
+/* rule 29 can match eol */
 *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
 (yy_c_buf_p) = yy_cp -= 1;
 YY_DO_BEFORE_ACTION; /* set up zconftext again */
@@ -1275,13 +1272,13 @@
 		return T_WORD_QUOTE;
 	}
 	YY_BREAK
-case 31:
+case 30:
 YY_RULE_SETUP
 {
 		append_string(zconftext + 1, zconfleng - 1);
 	}
 	YY_BREAK
-case 32:
+case 31:
 YY_RULE_SETUP
 {
 		if (str == zconftext[0]) {
@@ -1292,8 +1289,8 @@
 			append_string(zconftext, 1);
 	}
 	YY_BREAK
-case 33:
-/* rule 33 can match eol */
+case 32:
+/* rule 32 can match eol */
 YY_RULE_SETUP
 {
 		printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
@@ -1308,7 +1305,7 @@
 	}
 	YY_BREAK
 
-case 34:
+case 33:
 YY_RULE_SETUP
 {
 		ts = 0;
@@ -1333,8 +1330,8 @@
 		}
 	}
 	YY_BREAK
-case 35:
-/* rule 35 can match eol */
+case 34:
+/* rule 34 can match eol */
 *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
 (yy_c_buf_p) = yy_cp -= 1;
 YY_DO_BEFORE_ACTION; /* set up zconftext again */
@@ -1345,15 +1342,15 @@
 		return T_HELPTEXT;
 	}
 	YY_BREAK
-case 36:
-/* rule 36 can match eol */
+case 35:
+/* rule 35 can match eol */
 YY_RULE_SETUP
 {
 		current_file->lineno++;
 		append_string("\n", 1);
 	}
 	YY_BREAK
-case 37:
+case 36:
 YY_RULE_SETUP
 {
 		while (zconfleng) {
@@ -1384,7 +1381,7 @@
 	yyterminate();
 }
 	YY_BREAK
-case 38:
+case 37:
 YY_RULE_SETUP
 YY_FATAL_ERROR( "flex scanner jammed" );
 	YY_BREAK
@@ -2114,8 +2111,8 @@
 
 /** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
  * scan from a @e copy of @a bytes.
- * @param bytes the byte buffer to scan
- * @param len the number of bytes in the buffer pointed to by @a bytes.
+ * @param yybytes the byte buffer to scan
+ * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
  * 
  * @return the newly allocated buffer state object.
  */
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index a7bf5f6..9a08fb5 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -469,7 +469,7 @@
     } else {
 #	print STDERR "other section '$name' = '$contents'\n";
 	if (defined($sections{$name}) && ($sections{$name} ne "")) {
-		print STDERR "Error(${file}:$.): duplicate section name '$name'\n";
+		print STDERR "${file}:$.: error: duplicate section name '$name'\n";
 		++$errors;
 	}
 	$sections{$name} = $contents;
@@ -1820,7 +1820,7 @@
 			   });
     }
     else {
-	print STDERR "Error(${file}:$.): Cannot parse struct or union!\n";
+	print STDERR "${file}:$.: error: Cannot parse struct or union!\n";
 	++$errors;
     }
 }
@@ -1841,7 +1841,7 @@
 	    push @parameterlist, $arg;
 	    if (!$parameterdescs{$arg}) {
 		$parameterdescs{$arg} = $undescribed;
-		print STDERR "Warning(${file}:$.): Enum value '$arg' ".
+		print STDERR "${file}:$.: warning: Enum value '$arg' ".
 		    "not described in enum '$declaration_name'\n";
 	    }
 
@@ -1859,7 +1859,7 @@
 			   });
     }
     else {
-	print STDERR "Error(${file}:$.): Cannot parse enum!\n";
+	print STDERR "${file}:$.: error: Cannot parse enum!\n";
 	++$errors;
     }
 }
@@ -1887,7 +1887,7 @@
 			   });
     }
     else {
-	print STDERR "Error(${file}:$.): Cannot parse typedef!\n";
+	print STDERR "${file}:$.: error: Cannot parse typedef!\n";
 	++$errors;
     }
 }
@@ -2019,11 +2019,11 @@
 	    $parameterdescs{$param_name} = $undescribed;
 
 	    if (($type eq 'function') || ($type eq 'enum')) {
-		print STDERR "Warning(${file}:$.): Function parameter ".
+		print STDERR "${file}:$.: warning: Function parameter ".
 		    "or member '$param' not " .
 		    "described in '$declaration_name'\n";
 	    }
-	    print STDERR "Warning(${file}:$.):" .
+	    print STDERR "${file}:$.: warning:" .
 			 " No description found for parameter '$param'\n";
 	    ++$warnings;
 	}
@@ -2074,14 +2074,14 @@
 		}
 		if ($err) {
 			if ($decl_type eq "function") {
-				print STDERR "Warning(${file}:$.): " .
+				print STDERR "${file}:$.: warning: " .
 					"Excess function parameter " .
 					"'$sects[$sx]' " .
 					"description in '$decl_name'\n";
 				++$warnings;
 			} else {
 				if ($nested !~ m/\Q$sects[$sx]\E/) {
-				    print STDERR "Warning(${file}:$.): " .
+				    print STDERR "${file}:$.: warning: " .
 					"Excess struct/union/enum/typedef member " .
 					"'$sects[$sx]' " .
 					"description in '$decl_name'\n";
@@ -2107,7 +2107,7 @@
 
         if (!defined($sections{$section_return}) ||
             $sections{$section_return} eq "") {
-                print STDERR "Warning(${file}:$.): " .
+                print STDERR "${file}:$.: warning: " .
                         "No description found for return value of " .
                         "'$declaration_name'\n";
                 ++$warnings;
@@ -2186,7 +2186,7 @@
 
 	create_parameterlist($args, ',', $file);
     } else {
-	print STDERR "Warning(${file}:$.): cannot understand function prototype: '$prototype'\n";
+	print STDERR "${file}:$.: warning: cannot understand function prototype: '$prototype'\n";
 	return;
     }
 
@@ -2251,7 +2251,7 @@
 		$tracepointargs = $1;
 	}
 	if (($tracepointname eq 0) || ($tracepointargs eq 0)) {
-		print STDERR "Warning(${file}:$.): Unrecognized tracepoint format: \n".
+		print STDERR "${file}:$.: warning: Unrecognized tracepoint format: \n".
 			     "$prototype\n";
 	} else {
 		$prototype = "static inline void trace_$tracepointname($tracepointargs)";
@@ -2450,7 +2450,7 @@
 		}
 
 		if (($declaration_purpose eq "") && $verbose) {
-			print STDERR "Warning(${file}:$.): missing initial short description on line:\n";
+			print STDERR "${file}:$.: warning: missing initial short description on line:\n";
 			print STDERR $_;
 			++$warnings;
 		}
@@ -2468,10 +2468,10 @@
 		}
 
 		if ($verbose) {
-		    print STDERR "Info(${file}:$.): Scanning doc for $identifier\n";
+		    print STDERR "${file}:$.: info: Scanning doc for $identifier\n";
 		}
 	    } else {
-		print STDERR "Warning(${file}:$.): Cannot understand $_ on line $.",
+		print STDERR "${file}:$.: warning: Cannot understand $_ on line $.",
 		" - I thought it was a doc line\n";
 		++$warnings;
 		$state = 0;
@@ -2483,7 +2483,7 @@
 
 		if (($contents ne "") && ($contents ne "\n")) {
 		    if (!$in_doc_sect && $verbose) {
-			print STDERR "Warning(${file}:$.): contents before sections\n";
+			print STDERR "${file}:$.: warning: contents before sections\n";
 			++$warnings;
 		    }
 		    dump_section($file, $section, xml_escape($contents));
@@ -2509,7 +2509,7 @@
 		}
 		# look for doc_com + <text> + doc_end:
 		if ($_ =~ m'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') {
-		    print STDERR "Warning(${file}:$.): suspicious ending line: $_";
+		    print STDERR "${file}:$.: warning: suspicious ending line: $_";
 		    ++$warnings;
 		}
 
@@ -2539,7 +2539,7 @@
 		}
 	    } else {
 		# i dont know - bad line?  ignore.
-		print STDERR "Warning(${file}:$.): bad line: $_";
+		print STDERR "${file}:$.: warning: bad line: $_";
 		++$warnings;
 	    }
 	} elsif ($state == 5) { # scanning for split parameters
@@ -2631,7 +2631,7 @@
 	}
     }
     if ($initial_section_counter == $section_counter) {
-	print STDERR "Warning(${file}): no structured comments found\n";
+	print STDERR "${file}:1: warning: no structured comments found\n";
 	if (($function_only == 1) && ($show_not_found == 1)) {
 	    print STDERR "    Was looking for '$_'.\n" for keys %function_table;
 	}
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 168b43d..6a5e151 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -11,6 +11,12 @@
 
 #include "elfconfig.h"
 
+/* On BSD-alike OSes elf.h defines these according to host's word size */
+#undef ELF_ST_BIND
+#undef ELF_ST_TYPE
+#undef ELF_R_SYM
+#undef ELF_R_TYPE
+
 #if KERNEL_ELFCLASS == ELFCLASS32
 
 #define Elf_Ehdr    Elf32_Ehdr
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 99ca6e7..1aca224 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -21,37 +21,38 @@
 # Note that the rpm-pkg target cannot be used with KBUILD_OUTPUT,
 # but the binrpm-pkg target can; for some reason O= gets ignored.
 
-# Do we have rpmbuild, otherwise fall back to the older rpm
-RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \
-	           else echo rpm; fi)
-
 # Remove hyphens since they have special meaning in RPM filenames
 KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
+KDEB_SOURCENAME ?= linux-$(KERNELRELEASE)
+export KDEB_SOURCENAME
 # Include only those top-level files that are needed by make, plus the GPL copy
-TAR_CONTENT := $(KBUILD_ALLDIRS) kernel.spec .config .scmversion Makefile \
+TAR_CONTENT := $(KBUILD_ALLDIRS) .config .scmversion Makefile \
                Kbuild Kconfig COPYING $(wildcard localversion*)
-TAR_CONTENT := $(addprefix $(KERNELPATH)/,$(TAR_CONTENT))
 MKSPEC     := $(srctree)/scripts/package/mkspec
 
+quiet_cmd_src_tar = TAR     $(2).tar.gz
+      cmd_src_tar = \
+if test "$(objtree)" != "$(srctree)"; then \
+	echo "Building source tarball is not possible outside the"; \
+	echo "kernel source tree. Don't set KBUILD_OUTPUT, or use the"; \
+	echo "binrpm-pkg or bindeb-pkg target instead."; \
+	false; \
+fi ; \
+$(srctree)/scripts/setlocalversion --save-scmversion; \
+ln -sf $(srctree) $(2); \
+tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
+	$(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
+rm -f $(2) $(objtree)/.scmversion
+
 # rpm-pkg
 # ---------------------------------------------------------------------------
 rpm-pkg rpm: FORCE
-	@if test "$(objtree)" != "$(srctree)"; then \
-		echo "Building source + binary RPM is not possible outside the"; \
-		echo "kernel source tree. Don't set KBUILD_OUTPUT, or use the"; \
-		echo "binrpm-pkg target instead."; \
-		false; \
-	fi
 	$(MAKE) clean
-	ln -sf $(srctree) $(KERNELPATH)
 	$(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
-	$(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --save-scmversion
-	tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(TAR_CONTENT)
-	rm $(KERNELPATH)
-	rm -f $(objtree)/.scmversion
+	$(call cmd,src_tar,$(KERNELPATH),kernel.spec)
 	$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
 	mv -f $(objtree)/.tmp_version $(objtree)/.version
-	$(RPM) $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
+	rpmbuild --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
 	rm $(KERNELPATH).tar.gz kernel.spec
 
 # binrpm-pkg
@@ -62,7 +63,7 @@
 	$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
 	mv -f $(objtree)/.tmp_version $(objtree)/.version
 
-	$(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \
+	rpmbuild --define "_builddir $(objtree)" --target \
 		$(UTS_MACHINE) -bb $(objtree)/binkernel.spec
 	rm binkernel.spec
 
@@ -84,11 +85,17 @@
 	} && \
 	\
 	$$KBUILD_PKG_ROOTCMD $(CONFIG_SHELL) \
-		$(srctree)/scripts/package/builddeb
+		$(srctree)/scripts/package/builddeb $@
 
 deb-pkg: FORCE
+	$(MAKE) clean
+	$(call cmd,src_tar,$(KDEB_SOURCENAME))
 	$(MAKE) KBUILD_SRC=
-	$(call cmd,builddeb)
+	+$(call cmd,builddeb)
+
+bindeb-pkg: FORCE
+	$(MAKE) KBUILD_SRC=
+	+$(call cmd,builddeb)
 
 clean-dirs += $(objtree)/debian/
 
@@ -133,8 +140,9 @@
 # ---------------------------------------------------------------------------
 help: FORCE
 	@echo '  rpm-pkg             - Build both source and binary RPM kernel packages'
-	@echo '  binrpm-pkg          - Build only the binary kernel package'
-	@echo '  deb-pkg             - Build the kernel as a deb package'
+	@echo '  binrpm-pkg          - Build only the binary kernel RPM package'
+	@echo '  deb-pkg             - Build both source and binary deb kernel packages'
+	@echo '  bindeb-pkg          - Build only the binary kernel deb package'
 	@echo '  tar-pkg             - Build the kernel as an uncompressed tarball'
 	@echo '  targz-pkg           - Build the kernel as a gzip compressed tarball'
 	@echo '  tarbz2-pkg          - Build the kernel as a bzip2 compressed tarball'
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 88dbf23..0cd46e1 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -15,6 +15,8 @@
 create_package() {
 	local pname="$1" pdir="$2"
 
+	mkdir -m 755 -p "$pdir/DEBIAN"
+	mkdir -p "$pdir/usr/share/doc/$pname"
 	cp debian/copyright "$pdir/usr/share/doc/$pname/"
 	cp debian/changelog "$pdir/usr/share/doc/$pname/changelog.Debian"
 	gzip -9 "$pdir/usr/share/doc/$pname/changelog.Debian"
@@ -25,8 +27,13 @@
 	chown -R root:root "$pdir"
 	chmod -R go-w "$pdir"
 
+	# Create the package
+	dpkg-gencontrol $forcearch -Vkernel:debarch="${debarch}" -p$pname -P"$pdir"
+	dpkg --build "$pdir" ..
+}
+
+set_debarch() {
 	# Attempt to find the correct Debian architecture
-	local forcearch="" debarch=""
 	case "$UTS_MACHINE" in
 	i386|ia64|alpha)
 		debarch="$UTS_MACHINE" ;;
@@ -47,6 +54,7 @@
 	arm*)
 		debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;;
 	*)
+		debarch=$(dpkg --print-architecture)
 		echo "" >&2
 		echo "** ** **  WARNING  ** ** **" >&2
 		echo "" >&2
@@ -59,13 +67,8 @@
 	if [ -n "$KBUILD_DEBARCH" ] ; then
 		debarch="$KBUILD_DEBARCH"
 	fi
-	if [ -n "$debarch" ] ; then
-		forcearch="-DArchitecture=$debarch"
-	fi
+	forcearch="-DArchitecture=$debarch"
 
-	# Create the package
-	dpkg-gencontrol $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
-	dpkg --build "$pdir" ..
 }
 
 # Some variables and settings used throughout the script
@@ -76,6 +79,7 @@
 else
 	packageversion=$version-$revision
 fi
+sourcename=$KDEB_SOURCENAME
 tmpdir="$objtree/debian/tmp"
 fwdir="$objtree/debian/fwtmp"
 kernel_headers_dir="$objtree/debian/hdrtmp"
@@ -86,6 +90,9 @@
 kernel_headers_packagename=linux-headers-$version
 libc_headers_packagename=linux-libc-dev
 dbg_packagename=$packagename-dbg
+debarch=
+forcearch=
+set_debarch
 
 if [ "$ARCH" = "um" ] ; then
 	packagename=user-mode-linux-$version
@@ -110,24 +117,13 @@
 # Setup the directory structure
 rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir"
 mkdir -m 755 -p "$tmpdir/DEBIAN"
-mkdir -p  "$tmpdir/lib" "$tmpdir/boot" "$tmpdir/usr/share/doc/$packagename"
-mkdir -m 755 -p "$fwdir/DEBIAN"
-mkdir -p "$fwdir/lib/firmware/$version/" "$fwdir/usr/share/doc/$fwpackagename"
-mkdir -m 755 -p "$libc_headers_dir/DEBIAN"
-mkdir -p "$libc_headers_dir/usr/share/doc/$libc_headers_packagename"
-mkdir -m 755 -p "$kernel_headers_dir/DEBIAN"
-mkdir -p "$kernel_headers_dir/usr/share/doc/$kernel_headers_packagename"
+mkdir -p "$tmpdir/lib" "$tmpdir/boot"
+mkdir -p "$fwdir/lib/firmware/$version/"
 mkdir -p "$kernel_headers_dir/lib/modules/$version/"
-if [ "$ARCH" = "um" ] ; then
-	mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin"
-fi
-if [ -n "$BUILD_DEBUG" ] ; then
-	mkdir -p "$dbg_dir/usr/share/doc/$dbg_packagename"
-	mkdir -m 755 -p "$dbg_dir/DEBIAN"
-fi
 
 # Build and install the kernel
 if [ "$ARCH" = "um" ] ; then
+	mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin" "$tmpdir/usr/share/doc/$packagename"
 	$MAKE linux
 	cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map"
 	cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config"
@@ -143,6 +139,13 @@
 	cp arch/$ARCH/boot/$KBUILD_IMAGE "$tmpdir/$installed_image_path"
 fi
 
+if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
+	# Only some architectures with OF support have this target
+	if grep -q dtbs_install "${srctree}/arch/$SRCARCH/Makefile"; then
+		$MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
+	fi
+fi
+
 if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
 	INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_install
 	rm -f "$tmpdir/lib/modules/$version/build"
@@ -162,6 +165,12 @@
 			# then add a link to those
 			$OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $tmpdir/$module
 		done
+
+		# resign stripped modules
+		MODULE_SIG_ALL="$(grep -s '^CONFIG_MODULE_SIG_ALL=y' $KCONFIG_CONFIG || true)"
+		if [ -n "$MODULE_SIG_ALL" ]; then
+			INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_sign
+		fi
 	fi
 fi
 
@@ -206,7 +215,7 @@
 elif [ -n "$EMAIL" ]; then
        email=$EMAIL
 else
-       email=$(id -nu)@$(hostname -f)
+       email=$(id -nu)@$(hostname -f 2>/dev/null || hostname)
 fi
 if [ -n "$DEBFULLNAME" ]; then
        name=$DEBFULLNAME
@@ -230,7 +239,7 @@
 
 # Generate a simple changelog template
 cat <<EOF > debian/changelog
-linux-upstream ($packageversion) $distribution; urgency=low
+$sourcename ($packageversion) $distribution; urgency=low
 
   * Custom built Linux kernel.
 
@@ -257,12 +266,16 @@
 License version 2 can be found in \`/usr/share/common-licenses/GPL-2'.
 EOF
 
+
+build_depends="bc, kmod, cpio "
+
 # Generate a control file
 cat <<EOF > debian/control
-Source: linux-upstream
+Source: $sourcename
 Section: kernel
 Priority: optional
 Maintainer: $maintainer
+Build-Depends: $build_depends
 Standards-Version: 3.8.4
 Homepage: http://www.kernel.org/
 EOF
@@ -383,4 +396,33 @@
 	create_package "$dbg_packagename" "$dbg_dir"
 fi
 
+if [ "x$1" = "xdeb-pkg" ]
+then
+    cat <<EOF > debian/rules
+#!/usr/bin/make -f
+
+build:
+	\$(MAKE)
+
+binary-arch:
+	\$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg
+
+clean:
+	rm -rf debian/*tmp
+	mv debian/ debian.backup # debian/ might be cleaned away
+	\$(MAKE) clean
+	mv debian.backup debian
+
+binary: binary-arch
+EOF
+	mv ${sourcename}.tar.gz ../${sourcename}_${version}.orig.tar.gz
+	tar caf ../${sourcename}_${packageversion}.debian.tar.gz debian/{copyright,rules,changelog,control}
+	dpkg-source -cdebian/control -ldebian/changelog --format="3.0 (custom)" --target-format="3.0 (quilt)" \
+		-b / ../${sourcename}_${version}.orig.tar.gz  ../${sourcename}_${packageversion}.debian.tar.gz
+	mv ${sourcename}_${packageversion}*dsc ..
+	dpkg-genchanges > ../${sourcename}_${packageversion}_${debarch}.changes
+else
+	dpkg-genchanges -b > ../${sourcename}_${packageversion}_${debarch}.changes
+fi
+
 exit 0
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index d9ab94b..71004da 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -111,10 +111,8 @@
 echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
 
 echo "%ifnarch ppc64"
-echo 'cp vmlinux vmlinux.orig'
-echo 'bzip2 -9 vmlinux'
+echo 'bzip2 -9 --keep vmlinux'
 echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
-echo 'mv vmlinux.orig vmlinux'
 echo "%endif"
 
 if ! $PREBUILT; then
@@ -142,7 +140,6 @@
 echo ""
 echo "%files"
 echo '%defattr (-, root, root)'
-echo "%dir /lib/modules"
 echo "/lib/modules/$KERNELRELEASE"
 echo "%exclude /lib/modules/$KERNELRELEASE/build"
 echo "%exclude /lib/modules/$KERNELRELEASE/source"
diff --git a/scripts/rt-tester/check-all.sh b/scripts/rt-tester/check-all.sh
deleted file mode 100644
index 6b5c83b..0000000
--- a/scripts/rt-tester/check-all.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-function testit ()
-{
- printf "%-30s: " $1
- ./rt-tester.py $1 | grep Pass
-}
-
-testit t2-l1-2rt-sameprio.tst
-testit t2-l1-pi.tst
-testit t2-l1-signal.tst
-#testit t2-l2-2rt-deadlock.tst
-testit t3-l1-pi-1rt.tst
-testit t3-l1-pi-2rt.tst
-testit t3-l1-pi-3rt.tst
-testit t3-l1-pi-signal.tst
-testit t3-l1-pi-steal.tst
-testit t3-l2-pi.tst
-testit t4-l2-pi-deboost.tst
-testit t5-l4-pi-boost-deboost.tst
-testit t5-l4-pi-boost-deboost-setsched.tst
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
deleted file mode 100755
index 6d916c2..0000000
--- a/scripts/rt-tester/rt-tester.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-#
-# rt-mutex tester
-#
-# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-import os
-import sys
-import getopt
-import shutil
-import string
-
-# Globals
-quiet = 0
-test = 0
-comments = 0
-
-sysfsprefix = "/sys/devices/system/rttest/rttest"
-statusfile = "/status"
-commandfile = "/command"
-
-# Command opcodes
-cmd_opcodes = {
-    "schedother"    : "1",
-    "schedfifo"     : "2",
-    "lock"          : "3",
-    "locknowait"    : "4",
-    "lockint"       : "5",
-    "lockintnowait" : "6",
-    "lockcont"      : "7",
-    "unlock"        : "8",
-    "signal"        : "11",
-    "resetevent"    : "98",
-    "reset"         : "99",
-    }
-
-test_opcodes = {
-    "prioeq"        : ["P" , "eq" , None],
-    "priolt"        : ["P" , "lt" , None],
-    "priogt"        : ["P" , "gt" , None],
-    "nprioeq"       : ["N" , "eq" , None],
-    "npriolt"       : ["N" , "lt" , None],
-    "npriogt"       : ["N" , "gt" , None],
-    "unlocked"      : ["M" , "eq" , 0],
-    "trylock"       : ["M" , "eq" , 1],
-    "blocked"       : ["M" , "eq" , 2],
-    "blockedwake"   : ["M" , "eq" , 3],
-    "locked"        : ["M" , "eq" , 4],
-    "opcodeeq"      : ["O" , "eq" , None],
-    "opcodelt"      : ["O" , "lt" , None],
-    "opcodegt"      : ["O" , "gt" , None],
-    "eventeq"       : ["E" , "eq" , None],
-    "eventlt"       : ["E" , "lt" , None],
-    "eventgt"       : ["E" , "gt" , None],
-    }
-
-# Print usage information
-def usage():
-    print "rt-tester.py <-c -h -q -t> <testfile>"
-    print " -c    display comments after first command"
-    print " -h    help"
-    print " -q    quiet mode"
-    print " -t    test mode (syntax check)"
-    print " testfile: read test specification from testfile"
-    print " otherwise from stdin"
-    return
-
-# Print progress when not in quiet mode
-def progress(str):
-    if not quiet:
-        print str
-
-# Analyse a status value
-def analyse(val, top, arg):
-
-    intval = int(val)
-
-    if top[0] == "M":
-        intval = intval / (10 ** int(arg))
-	intval = intval % 10
-        argval = top[2]
-    elif top[0] == "O":
-        argval = int(cmd_opcodes.get(arg, arg))
-    else:
-        argval = int(arg)
-
-    # progress("%d %s %d" %(intval, top[1], argval))
-
-    if top[1] == "eq" and intval == argval:
-	return 1
-    if top[1] == "lt" and intval < argval:
-        return 1
-    if top[1] == "gt" and intval > argval:
-	return 1
-    return 0
-
-# Parse the commandline
-try:
-    (options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
-except getopt.GetoptError, ex:
-    usage()
-    sys.exit(1)
-
-# Parse commandline options
-for option, value in options:
-    if option == "-c":
-        comments = 1
-    elif option == "-q":
-        quiet = 1
-    elif option == "-t":
-        test = 1
-    elif option == '-h':
-        usage()
-        sys.exit(0)
-
-# Select the input source
-if arguments:
-    try:
-        fd = open(arguments[0])
-    except Exception,ex:
-        sys.stderr.write("File not found %s\n" %(arguments[0]))
-        sys.exit(1)
-else:
-    fd = sys.stdin
-
-linenr = 0
-
-# Read the test patterns
-while 1:
-
-    linenr = linenr + 1
-    line = fd.readline()
-    if not len(line):
-        break
-
-    line = line.strip()
-    parts = line.split(":")
-
-    if not parts or len(parts) < 1:
-        continue
-
-    if len(parts[0]) == 0:
-        continue
-
-    if parts[0].startswith("#"):
-	if comments > 1:
-	    progress(line)
-	continue
-
-    if comments == 1:
-	comments = 2
-
-    progress(line)
-
-    cmd = parts[0].strip().lower()
-    opc = parts[1].strip().lower()
-    tid = parts[2].strip()
-    dat = parts[3].strip()
-
-    try:
-        # Test or wait for a status value
-        if cmd == "t" or cmd == "w":
-            testop = test_opcodes[opc]
-
-            fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
-            if test:
-		print fname
-                continue
-
-            while 1:
-                query = 1
-                fsta = open(fname, 'r')
-                status = fsta.readline().strip()
-                fsta.close()
-                stat = status.split(",")
-                for s in stat:
-		    s = s.strip()
-                    if s.startswith(testop[0]):
-                        # Separate status value
-                        val = s[2:].strip()
-                        query = analyse(val, testop, dat)
-                        break
-                if query or cmd == "t":
-                    break
-
-            progress("   " + status)
-
-            if not query:
-                sys.stderr.write("Test failed in line %d\n" %(linenr))
-		sys.exit(1)
-
-        # Issue a command to the tester
-        elif cmd == "c":
-            cmdnr = cmd_opcodes[opc]
-            # Build command string and sys filename
-            cmdstr = "%s:%s" %(cmdnr, dat)
-            fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
-            if test:
-		print fname
-                continue
-            fcmd = open(fname, 'w')
-            fcmd.write(cmdstr)
-            fcmd.close()
-
-    except Exception,ex:
-    	sys.stderr.write(str(ex))
-        sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
-        if not test:
-            fd.close()
-            sys.exit(1)
-
-# Normal exit pass
-print "Pass"
-sys.exit(0)
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
deleted file mode 100644
index 3710c8b..0000000
--- a/scripts/rt-tester/t2-l1-2rt-sameprio.tst
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-C: locknowait:		1:	0
-W: locked:		0: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-W: locked:		1: 	0
-
-# Verify T0
-W: unlocked:		0: 	0
-T: prioeq:		0: 	80
-
-# Unlock
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
-# T1,T0 lock L0
-C: locknowait:		1: 	0
-C: locknowait:		0:	0
-W: locked:		1: 	0
-W: blocked:		0: 	0
-T: prioeq:		1: 	80
-
-# T1 unlock L0
-C: unlock:		1: 	0
-W: locked:		0: 	0
-
-# Verify T1
-W: unlocked:		1: 	0
-T: prioeq:		1: 	80
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
-
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst
deleted file mode 100644
index b4cc959..0000000
--- a/scripts/rt-tester/t2-l1-pi.tst
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock with priority inversion
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-W: locked:		1: 	0
-
-# Verify T1
-W: unlocked:		0: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst
deleted file mode 100644
index 1b57376..0000000
--- a/scripts/rt-tester/t2-l1-signal.tst
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock with priority inversion
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-
-# Interrupt T1
-C: signal:		1:	0
-W: unlocked:		1: 	0
-T: opcodeeq:		1:	-4
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
deleted file mode 100644
index 68b1062..0000000
--- a/scripts/rt-tester/t2-l2-2rt-deadlock.tst
+++ /dev/null
@@ -1,84 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 2 lock
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1:	1
-W: locked:		1: 	1
-
-# T0 lock L1
-C: lockintnowait:	0: 	1
-W: blocked:		0: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-
-# Make deadlock go away
-C: signal:		1:	0
-W: unlocked:		1:	0
-C: signal:		0:	0
-W: unlocked:		0:	1
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
-C: unlock:		1: 	1
-W: unlocked:		1: 	1
-
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst
deleted file mode 100644
index 8e6c8b1..0000000
--- a/scripts/rt-tester/t3-l1-pi-1rt.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst
deleted file mode 100644
index 69c2212..0000000
--- a/scripts/rt-tester/t3-l1-pi-2rt.tst
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-T: prioeq:		1:	81
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst
deleted file mode 100644
index 9b0f1eb..0000000
--- a/scripts/rt-tester/t3-l1-pi-3rt.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: prioeq:		0:	80
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: locked:		1: 	0
-W: unlocked:		2: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst
deleted file mode 100644
index 39ec74a..0000000
--- a/scripts/rt-tester/t3-l1-pi-signal.tst
+++ /dev/null
@@ -1,93 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-# Reset event counter
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set priorities
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-C: schedfifo:		2: 	81
-
-# T0 lock L0
-C: lock:		0:	0
-W: locked:		0: 	0
-
-# T1 lock L0, no wait in the wakeup path
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0:	80
-T: prioeq:		1:	80
-
-# T2 lock L0 interruptible, no wait in the wakeup path
-C: lockintnowait:	2:	0
-W: blocked:		2: 	0
-T: prioeq:		0:	81
-T: prioeq:		1:	80
-
-# Interrupt T2
-C: signal:		2:	2
-W: unlocked:		2:	0
-T: prioeq:		1:	80
-T: prioeq:		0:	80
-
-T: locked:		0:	0
-T: blocked:		1:	0
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T1 has locked L0 and exit
-W: locked:		1:	0
-W: unlocked:		0: 	0
-T: priolt:		0:	1
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
-
-
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst
deleted file mode 100644
index e03db7e..0000000
--- a/scripts/rt-tester/t3-l1-pi-steal.tst
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI steal pending ownership
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-C: schedfifo:		2: 	81
-
-# T0 lock L0
-C: lock:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: lock:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T1 is in the wakeup loop
-W: blockedwake:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: lock:		2: 	0
-# T1 leave wakeup loop
-C: lockcont:		1: 	0
-
-# T2 must have the lock and T1 must be blocked
-W: locked:		2: 	0
-W: blocked:		1: 	0
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-# Wait until T1 is in the wakeup loop and let it run
-W: blockedwake:		1: 	0
-C: lockcont:		1: 	0
-W: locked:		1: 	0
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst
deleted file mode 100644
index 7b59100..0000000
--- a/scripts/rt-tester/t3-l2-pi.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 2 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst
deleted file mode 100644
index 2f0e049..0000000
--- a/scripts/rt-tester/t4-l2-pi-deboost.tst
+++ /dev/null
@@ -1,118 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 4 threads 2 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T3 lock L0
-C: lockintnowait:	3: 	0
-W: blocked:		3: 	0
-T: prioeq:		0: 	83
-
-# T0 lock L1
-C: lock:		0: 	1
-W: blocked:		0: 	1
-T: prioeq:		1: 	83
-
-# T1 unlock L1
-C: unlock:		1:	1
-
-# Wait until T0 is in the wakeup code
-W: blockedwake:		0:	1
-
-# Verify that T1 is unboosted
-W: unlocked:		1: 	1
-T: priolt:		1: 	1
-
-# T2 lock L1 (T0 is boosted and pending owner !)
-C: locknowait:		2:	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	83
-
-# Interrupt T3 and wait until T3 returned
-C: signal:		3:	0
-W: unlocked:		3:	0
-
-# Verify prio of T0 (still pending owner,
-# but T2 is enqueued due to the previous boost by T3
-T: prioeq:		0:	82
-
-# Let T0 continue
-C: lockcont:		0:	1
-W: locked:		0:	1
-
-# Unlock L1 and let T2 get L1
-C: unlock:		0:	1
-W: locked:		2:	1
-
-# Verify that T0 is unboosted
-W: unlocked:		0:	1
-T: priolt:		0:	1
-
-# Unlock everything and exit
-C: unlock:		2:	1
-W: unlocked:		2:	1
-
-C: unlock:		0:	0
-W: unlocked:		0:	0
-
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
deleted file mode 100644
index 04f4034..0000000
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
+++ /dev/null
@@ -1,178 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 5 threads 4 lock PI - modify priority of blocked threads
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-C: schedfifo:		4: 	84
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L2
-C: locknowait:		2: 	2
-W: locked:		2: 	2
-
-# T2 lock L1
-C: lockintnowait:	2: 	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-
-# T3 lock L3
-C: locknowait:		3: 	3
-W: locked:		3: 	3
-
-# T3 lock L2
-C: lockintnowait:	3: 	2
-W: blocked:		3: 	2
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-
-# T4 lock L3
-C: lockintnowait:	4:	3
-W: blocked:		4: 	3
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-
-# Reduce prio of T4
-C: schedfifo:		4: 	80
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-T: prioeq:		4:	80
-
-# Increase prio of T4
-C: schedfifo:		4: 	84
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Reduce prio of T3
-C: schedfifo:		3: 	80
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Increase prio of T3
-C: schedfifo:		3: 	85
-T: prioeq:		0: 	85
-T: prioeq:		1:	85
-T: prioeq:		2:	85
-T: prioeq:		3:	85
-T: prioeq:		4:	84
-
-# Reduce prio of T3
-C: schedfifo:		3: 	83
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Signal T4
-C: signal:		4: 	0
-W: unlocked:		4: 	3
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-
-# Signal T3
-C: signal:		3: 	0
-W: unlocked:		3: 	2
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-T: prioeq:		2:	82
-
-# Signal T2
-C: signal:		2: 	0
-W: unlocked:		2: 	1
-T: prioeq:		0: 	81
-T: prioeq:		1:	81
-
-# Signal T1
-C: signal:		1: 	0
-W: unlocked:		1: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		3:	3
-C: unlock:		2:	2
-C: unlock:		1:	1
-C: unlock:		0:	0
-
-W: unlocked:		3:	3
-W: unlocked:		2:	2
-W: unlocked:		1:	1
-W: unlocked:		0:	0
-
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
deleted file mode 100644
index a48a6ee..0000000
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
+++ /dev/null
@@ -1,138 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 5 threads 4 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-C: schedfifo:		4: 	84
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L2
-C: locknowait:		2: 	2
-W: locked:		2: 	2
-
-# T2 lock L1
-C: lockintnowait:	2: 	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-
-# T3 lock L3
-C: locknowait:		3: 	3
-W: locked:		3: 	3
-
-# T3 lock L2
-C: lockintnowait:	3: 	2
-W: blocked:		3: 	2
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-
-# T4 lock L3
-C: lockintnowait:	4:	3
-W: blocked:		4: 	3
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-
-# Signal T4
-C: signal:		4: 	0
-W: unlocked:		4: 	3
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-
-# Signal T3
-C: signal:		3: 	0
-W: unlocked:		3: 	2
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-T: prioeq:		2:	82
-
-# Signal T2
-C: signal:		2: 	0
-W: unlocked:		2: 	1
-T: prioeq:		0: 	81
-T: prioeq:		1:	81
-
-# Signal T1
-C: signal:		1: 	0
-W: unlocked:		1: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		3:	3
-C: unlock:		2:	2
-C: unlock:		1:	1
-C: unlock:		0:	0
-
-W: unlocked:		3:	3
-W: unlocked:		2:	2
-W: unlocked:		1:	1
-W: unlocked:		0:	0
-
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index 62b34ce..e10beb1 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -98,6 +98,7 @@
 
 	/* types, roles, and allows */
 	fprintf(fout, "type base_t;\n");
+	fprintf(fout, "role base_r;\n");
 	fprintf(fout, "role base_r types { base_t };\n");
 	for (i = 0; secclass_map[i].name; i++)
 		fprintf(fout, "allow base_t base_t:%s *;\n",
diff --git a/scripts/sign-file b/scripts/sign-file
deleted file mode 100755
index 3906ee1..0000000
--- a/scripts/sign-file
+++ /dev/null
@@ -1,421 +0,0 @@
-#!/usr/bin/perl -w
-#
-# Sign a module file using the given key.
-#
-
-my $USAGE =
-"Usage: scripts/sign-file [-v] <hash algo> <key> <x509> <module> [<dest>]\n" .
-"       scripts/sign-file [-v] -s <raw sig> <hash algo> <x509> <module> [<dest>]\n";
-
-use strict;
-use FileHandle;
-use IPC::Open2;
-use Getopt::Std;
-
-my %opts;
-getopts('vs:', \%opts) or die $USAGE;
-my $verbose = $opts{'v'};
-my $signature_file = $opts{'s'};
-
-die $USAGE if ($#ARGV > 4);
-die $USAGE if (!$signature_file && $#ARGV < 3 || $signature_file && $#ARGV < 2);
-
-my $dgst = shift @ARGV;
-my $private_key;
-if (!$signature_file) {
-	$private_key = shift @ARGV;
-}
-my $x509 = shift @ARGV;
-my $module = shift @ARGV;
-my ($dest, $keep_orig);
-if (@ARGV) {
-	$dest = $ARGV[0];
-	$keep_orig = 1;
-} else {
-	$dest = $module . "~";
-}
-
-die "Can't read private key\n" if (!$signature_file && !-r $private_key);
-die "Can't read signature file\n" if ($signature_file && !-r $signature_file);
-die "Can't read X.509 certificate\n" unless (-r $x509);
-die "Can't read module\n" unless (-r $module);
-
-#
-# Function to read the contents of a file into a variable.
-#
-sub read_file($)
-{
-    my ($file) = @_;
-    my $contents;
-    my $len;
-
-    open(FD, "<$file") || die $file;
-    binmode FD;
-    my @st = stat(FD);
-    die $file if (!@st);
-    $len = read(FD, $contents, $st[7]) || die $file;
-    close(FD) || die $file;
-    die "$file: Wanted length ", $st[7], ", got ", $len, "\n"
-	if ($len != $st[7]);
-    return $contents;
-}
-
-###############################################################################
-#
-# First of all, we have to parse the X.509 certificate to find certain details
-# about it.
-#
-# We read the DER-encoded X509 certificate and parse it to extract the Subject
-# name and Subject Key Identifier.  Theis provides the data we need to build
-# the certificate identifier.
-#
-# The signer's name part of the identifier is fabricated from the commonName,
-# the organizationName or the emailAddress components of the X.509 subject
-# name.
-#
-# The subject key ID is used to select which of that signer's certificates
-# we're intending to use to sign the module.
-#
-###############################################################################
-my $x509_certificate = read_file($x509);
-
-my $UNIV = 0 << 6;
-my $APPL = 1 << 6;
-my $CONT = 2 << 6;
-my $PRIV = 3 << 6;
-
-my $CONS = 0x20;
-
-my $BOOLEAN	= 0x01;
-my $INTEGER	= 0x02;
-my $BIT_STRING	= 0x03;
-my $OCTET_STRING = 0x04;
-my $NULL	= 0x05;
-my $OBJ_ID	= 0x06;
-my $UTF8String	= 0x0c;
-my $SEQUENCE	= 0x10;
-my $SET		= 0x11;
-my $UTCTime	= 0x17;
-my $GeneralizedTime = 0x18;
-
-my %OIDs = (
-    pack("CCC", 85, 4, 3)	=> "commonName",
-    pack("CCC", 85, 4, 6)	=> "countryName",
-    pack("CCC", 85, 4, 10)	=> "organizationName",
-    pack("CCC", 85, 4, 11)	=> "organizationUnitName",
-    pack("CCCCCCCCC", 42, 134, 72, 134, 247, 13, 1, 1, 1) => "rsaEncryption",
-    pack("CCCCCCCCC", 42, 134, 72, 134, 247, 13, 1, 1, 5) => "sha1WithRSAEncryption",
-    pack("CCCCCCCCC", 42, 134, 72, 134, 247, 13, 1, 9, 1) => "emailAddress",
-    pack("CCC", 85, 29, 35)	=> "authorityKeyIdentifier",
-    pack("CCC", 85, 29, 14)	=> "subjectKeyIdentifier",
-    pack("CCC", 85, 29, 19)	=> "basicConstraints"
-);
-
-###############################################################################
-#
-# Extract an ASN.1 element from a string and return information about it.
-#
-###############################################################################
-sub asn1_extract($$@)
-{
-    my ($cursor, $expected_tag, $optional) = @_;
-
-    return [ -1 ]
-	if ($cursor->[1] == 0 && $optional);
-
-    die $x509, ": ", $cursor->[0], ": ASN.1 data underrun (elem ", $cursor->[1], ")\n"
-	if ($cursor->[1] < 2);
-
-    my ($tag, $len) = unpack("CC", substr(${$cursor->[2]}, $cursor->[0], 2));
-
-    if ($expected_tag != -1 && $tag != $expected_tag) {
-	return [ -1 ]
-	    if ($optional);
-	die $x509, ": ", $cursor->[0], ": ASN.1 unexpected tag (", $tag,
-	" not ", $expected_tag, ")\n";
-    }
-
-    $cursor->[0] += 2;
-    $cursor->[1] -= 2;
-
-    die $x509, ": ", $cursor->[0], ": ASN.1 long tag\n"
-	if (($tag & 0x1f) == 0x1f);
-    die $x509, ": ", $cursor->[0], ": ASN.1 indefinite length\n"
-	if ($len == 0x80);
-
-    if ($len > 0x80) {
-	my $l = $len - 0x80;
-	die $x509, ": ", $cursor->[0], ": ASN.1 data underrun (len len $l)\n"
-	    if ($cursor->[1] < $l);
-
-	if ($l == 0x1) {
-	    $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1));
-	} elsif ($l == 0x2) {
-	    $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2));
-	} elsif ($l == 0x3) {
-	    $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16;
-	    $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2));
-	} elsif ($l == 0x4) {
-	    $len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4));
-	} else {
-	    die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n";
-	}
-
-	$cursor->[0] += $l;
-	$cursor->[1] -= $l;
-    }
-
-    die $x509, ": ", $cursor->[0], ": ASN.1 data underrun (", $len, ")\n"
-	if ($cursor->[1] < $len);
-
-    my $ret = [ $tag, [ $cursor->[0], $len, $cursor->[2] ] ];
-    $cursor->[0] += $len;
-    $cursor->[1] -= $len;
-
-    return $ret;
-}
-
-###############################################################################
-#
-# Retrieve the data referred to by a cursor
-#
-###############################################################################
-sub asn1_retrieve($)
-{
-    my ($cursor) = @_;
-    my ($offset, $len, $data) = @$cursor;
-    return substr($$data, $offset, $len);
-}
-
-###############################################################################
-#
-# Roughly parse the X.509 certificate
-#
-###############################################################################
-my $cursor = [ 0, length($x509_certificate), \$x509_certificate ];
-
-my $cert = asn1_extract($cursor, $UNIV | $CONS | $SEQUENCE);
-my $tbs = asn1_extract($cert->[1], $UNIV | $CONS | $SEQUENCE);
-my $version = asn1_extract($tbs->[1], $CONT | $CONS | 0, 1);
-my $serial_number = asn1_extract($tbs->[1], $UNIV | $INTEGER);
-my $sig_type = asn1_extract($tbs->[1], $UNIV | $CONS | $SEQUENCE);
-my $issuer = asn1_extract($tbs->[1], $UNIV | $CONS | $SEQUENCE);
-my $validity = asn1_extract($tbs->[1], $UNIV | $CONS | $SEQUENCE);
-my $subject = asn1_extract($tbs->[1], $UNIV | $CONS | $SEQUENCE);
-my $key = asn1_extract($tbs->[1], $UNIV | $CONS | $SEQUENCE);
-my $issuer_uid = asn1_extract($tbs->[1], $CONT | $CONS | 1, 1);
-my $subject_uid = asn1_extract($tbs->[1], $CONT | $CONS | 2, 1);
-my $extension_list = asn1_extract($tbs->[1], $CONT | $CONS | 3, 1);
-
-my $subject_key_id = ();
-my $authority_key_id = ();
-
-#
-# Parse the extension list
-#
-if ($extension_list->[0] != -1) {
-    my $extensions = asn1_extract($extension_list->[1], $UNIV | $CONS | $SEQUENCE);
-
-    while ($extensions->[1]->[1] > 0) {
-	my $ext = asn1_extract($extensions->[1], $UNIV | $CONS | $SEQUENCE);
-	my $x_oid = asn1_extract($ext->[1], $UNIV | $OBJ_ID);
-	my $x_crit = asn1_extract($ext->[1], $UNIV | $BOOLEAN, 1);
-	my $x_val = asn1_extract($ext->[1], $UNIV | $OCTET_STRING);
-
-	my $raw_oid = asn1_retrieve($x_oid->[1]);
-	next if (!exists($OIDs{$raw_oid}));
-	my $x_type = $OIDs{$raw_oid};
-
-	my $raw_value = asn1_retrieve($x_val->[1]);
-
-	if ($x_type eq "subjectKeyIdentifier") {
-	    my $vcursor = [ 0, length($raw_value), \$raw_value ];
-
-	    $subject_key_id = asn1_extract($vcursor, $UNIV | $OCTET_STRING);
-	}
-    }
-}
-
-###############################################################################
-#
-# Determine what we're going to use as the signer's name.  In order of
-# preference, take one of: commonName, organizationName or emailAddress.
-#
-###############################################################################
-my $org = "";
-my $cn = "";
-my $email = "";
-
-while ($subject->[1]->[1] > 0) {
-    my $rdn = asn1_extract($subject->[1], $UNIV | $CONS | $SET);
-    my $attr = asn1_extract($rdn->[1], $UNIV | $CONS | $SEQUENCE);
-    my $n_oid = asn1_extract($attr->[1], $UNIV | $OBJ_ID);
-    my $n_val = asn1_extract($attr->[1], -1);
-
-    my $raw_oid = asn1_retrieve($n_oid->[1]);
-    next if (!exists($OIDs{$raw_oid}));
-    my $n_type = $OIDs{$raw_oid};
-
-    my $raw_value = asn1_retrieve($n_val->[1]);
-
-    if ($n_type eq "organizationName") {
-	$org = $raw_value;
-    } elsif ($n_type eq "commonName") {
-	$cn = $raw_value;
-    } elsif ($n_type eq "emailAddress") {
-	$email = $raw_value;
-    }
-}
-
-my $signers_name = $email;
-
-if ($org && $cn) {
-    # Don't use the organizationName if the commonName repeats it
-    if (length($org) <= length($cn) &&
-	substr($cn, 0, length($org)) eq $org) {
-	$signers_name = $cn;
-	goto got_id_name;
-    }
-
-    # Or a signifcant chunk of it
-    if (length($org) >= 7 &&
-	length($cn) >= 7 &&
-	substr($cn, 0, 7) eq substr($org, 0, 7)) {
-	$signers_name = $cn;
-	goto got_id_name;
-    }
-
-    $signers_name = $org . ": " . $cn;
-} elsif ($org) {
-    $signers_name = $org;
-} elsif ($cn) {
-    $signers_name = $cn;
-}
-
-got_id_name:
-
-die $x509, ": ", "X.509: Couldn't find the Subject Key Identifier extension\n"
-    if (!$subject_key_id);
-
-my $key_identifier = asn1_retrieve($subject_key_id->[1]);
-
-###############################################################################
-#
-# Create and attach the module signature
-#
-###############################################################################
-
-#
-# Signature parameters
-#
-my $algo = 1;		# Public-key crypto algorithm: RSA
-my $hash = 0;		# Digest algorithm
-my $id_type = 1;	# Identifier type: X.509
-
-#
-# Digest the data
-#
-my $prologue;
-if ($dgst eq "sha1") {
-    $prologue = pack("C*",
-		     0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
-		     0x2B, 0x0E, 0x03, 0x02, 0x1A,
-		     0x05, 0x00, 0x04, 0x14);
-    $hash = 2;
-} elsif ($dgst eq "sha224") {
-    $prologue = pack("C*",
-		     0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
-		     0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
-		     0x05, 0x00, 0x04, 0x1C);
-    $hash = 7;
-} elsif ($dgst eq "sha256") {
-    $prologue = pack("C*",
-		     0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
-		     0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
-		     0x05, 0x00, 0x04, 0x20);
-    $hash = 4;
-} elsif ($dgst eq "sha384") {
-    $prologue = pack("C*",
-		     0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
-		     0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
-		     0x05, 0x00, 0x04, 0x30);
-    $hash = 5;
-} elsif ($dgst eq "sha512") {
-    $prologue = pack("C*",
-		     0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
-		     0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
-		     0x05, 0x00, 0x04, 0x40);
-    $hash = 6;
-} else {
-    die "Unknown hash algorithm: $dgst\n";
-}
-
-my $signature;
-if ($signature_file) {
-	$signature = read_file($signature_file);
-} else {
-	#
-	# Generate the digest and read from openssl's stdout
-	#
-	my $digest;
-	$digest = readpipe("openssl dgst -$dgst -binary $module") || die "openssl dgst";
-
-	#
-	# Generate the binary signature, which will be just the integer that
-	# comprises the signature with no metadata attached.
-	#
-	my $pid;
-	$pid = open2(*read_from, *write_to,
-		     "openssl rsautl -sign -inkey $private_key -keyform PEM") ||
-	    die "openssl rsautl";
-	binmode write_to;
-	print write_to $prologue . $digest || die "pipe to openssl rsautl";
-	close(write_to) || die "pipe to openssl rsautl";
-
-	binmode read_from;
-	read(read_from, $signature, 4096) || die "pipe from openssl rsautl";
-	close(read_from) || die "pipe from openssl rsautl";
-	waitpid($pid, 0) || die;
-	die "openssl rsautl died: $?" if ($? >> 8);
-}
-$signature = pack("n", length($signature)) . $signature,
-
-#
-# Build the signed binary
-#
-my $unsigned_module = read_file($module);
-
-my $magic_number = "~Module signature appended~\n";
-
-my $info = pack("CCCCCxxxN",
-		$algo, $hash, $id_type,
-		length($signers_name),
-		length($key_identifier),
-		length($signature));
-
-if ($verbose) {
-    print "Size of unsigned module: ", length($unsigned_module), "\n";
-    print "Size of signer's name  : ", length($signers_name), "\n";
-    print "Size of key identifier : ", length($key_identifier), "\n";
-    print "Size of signature      : ", length($signature), "\n";
-    print "Size of information    : ", length($info), "\n";
-    print "Size of magic number   : ", length($magic_number), "\n";
-    print "Signer's name          : '", $signers_name, "'\n";
-    print "Digest                 : $dgst\n";
-}
-
-open(FD, ">$dest") || die $dest;
-binmode FD;
-print FD
-    $unsigned_module,
-    $signers_name,
-    $key_identifier,
-    $signature,
-    $info,
-    $magic_number
-    ;
-close FD || die $dest;
-
-if (!$keep_orig) {
-    rename($dest, $module) || die $module;
-}
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
new file mode 100755
index 0000000..058bba3
--- /dev/null
+++ b/scripts/sign-file.c
@@ -0,0 +1,260 @@
+/* Sign a module file using the given key.
+ *
+ * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <getopt.h>
+#include <err.h>
+#include <arpa/inet.h>
+#include <openssl/bio.h>
+#include <openssl/evp.h>
+#include <openssl/pem.h>
+#include <openssl/cms.h>
+#include <openssl/err.h>
+#include <openssl/engine.h>
+
+struct module_signature {
+	uint8_t		algo;		/* Public-key crypto algorithm [0] */
+	uint8_t		hash;		/* Digest algorithm [0] */
+	uint8_t		id_type;	/* Key identifier type [PKEY_ID_PKCS7] */
+	uint8_t		signer_len;	/* Length of signer's name [0] */
+	uint8_t		key_id_len;	/* Length of key identifier [0] */
+	uint8_t		__pad[3];
+	uint32_t	sig_len;	/* Length of signature data */
+};
+
+#define PKEY_ID_PKCS7 2
+
+static char magic_number[] = "~Module signature appended~\n";
+
+static __attribute__((noreturn))
+void format(void)
+{
+	fprintf(stderr,
+		"Usage: scripts/sign-file [-dp] <hash algo> <key> <x509> <module> [<dest>]\n");
+	exit(2);
+}
+
+static void display_openssl_errors(int l)
+{
+	const char *file;
+	char buf[120];
+	int e, line;
+
+	if (ERR_peek_error() == 0)
+		return;
+	fprintf(stderr, "At main.c:%d:\n", l);
+
+	while ((e = ERR_get_error_line(&file, &line))) {
+		ERR_error_string(e, buf);
+		fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
+	}
+}
+
+static void drain_openssl_errors(void)
+{
+	const char *file;
+	int line;
+
+	if (ERR_peek_error() == 0)
+		return;
+	while (ERR_get_error_line(&file, &line)) {}
+}
+
+#define ERR(cond, fmt, ...)				\
+	do {						\
+		bool __cond = (cond);			\
+		display_openssl_errors(__LINE__);	\
+		if (__cond) {				\
+			err(1, fmt, ## __VA_ARGS__);	\
+		}					\
+	} while(0)
+
+static const char *key_pass;
+
+static int pem_pw_cb(char *buf, int len, int w, void *v)
+{
+	int pwlen;
+
+	if (!key_pass)
+		return -1;
+
+	pwlen = strlen(key_pass);
+	if (pwlen >= len)
+		return -1;
+
+	strcpy(buf, key_pass);
+
+	/* If it's wrong, don't keep trying it. */
+	key_pass = NULL;
+
+	return pwlen;
+}
+
+int main(int argc, char **argv)
+{
+	struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 };
+	char *hash_algo = NULL;
+	char *private_key_name, *x509_name, *module_name, *dest_name;
+	bool save_cms = false, replace_orig;
+	bool sign_only = false;
+	unsigned char buf[4096];
+	unsigned long module_size, cms_size;
+	unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR;
+	const EVP_MD *digest_algo;
+	EVP_PKEY *private_key;
+	CMS_ContentInfo *cms;
+	X509 *x509;
+	BIO *b, *bd = NULL, *bm;
+	int opt, n;
+
+	OpenSSL_add_all_algorithms();
+	ERR_load_crypto_strings();
+	ERR_clear_error();
+
+	key_pass = getenv("KBUILD_SIGN_PIN");
+
+	do {
+		opt = getopt(argc, argv, "dpk");
+		switch (opt) {
+		case 'p': save_cms = true; break;
+		case 'd': sign_only = true; save_cms = true; break;
+		case 'k': use_keyid = CMS_USE_KEYID; break;
+		case -1: break;
+		default: format();
+		}
+	} while (opt != -1);
+
+	argc -= optind;
+	argv += optind;
+	if (argc < 4 || argc > 5)
+		format();
+
+	hash_algo = argv[0];
+	private_key_name = argv[1];
+	x509_name = argv[2];
+	module_name = argv[3];
+	if (argc == 5) {
+		dest_name = argv[4];
+		replace_orig = false;
+	} else {
+		ERR(asprintf(&dest_name, "%s.~signed~", module_name) < 0,
+		    "asprintf");
+		replace_orig = true;
+	}
+
+	/* Read the private key and the X.509 cert the PKCS#7 message
+	 * will point to.
+	 */
+	if (!strncmp(private_key_name, "pkcs11:", 7)) {
+		ENGINE *e;
+
+		ENGINE_load_builtin_engines();
+		drain_openssl_errors();
+		e = ENGINE_by_id("pkcs11");
+		ERR(!e, "Load PKCS#11 ENGINE");
+		if (ENGINE_init(e))
+			drain_openssl_errors();
+		else
+			ERR(1, "ENGINE_init");
+		if (key_pass)
+			ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
+		private_key = ENGINE_load_private_key(e, private_key_name, NULL,
+						      NULL);
+		ERR(!private_key, "%s", private_key_name);
+	} else {
+		b = BIO_new_file(private_key_name, "rb");
+		ERR(!b, "%s", private_key_name);
+		private_key = PEM_read_bio_PrivateKey(b, NULL, pem_pw_cb, NULL);
+		ERR(!private_key, "%s", private_key_name);
+		BIO_free(b);
+	}
+
+	b = BIO_new_file(x509_name, "rb");
+	ERR(!b, "%s", x509_name);
+	x509 = d2i_X509_bio(b, NULL); /* Binary encoded X.509 */
+	if (!x509) {
+		ERR(BIO_reset(b) != 1, "%s", x509_name);
+		x509 = PEM_read_bio_X509(b, NULL, NULL, NULL); /* PEM encoded X.509 */
+		if (x509)
+			drain_openssl_errors();
+	}
+	BIO_free(b);
+	ERR(!x509, "%s", x509_name);
+
+	/* Open the destination file now so that we can shovel the module data
+	 * across as we read it.
+	 */
+	if (!sign_only) {
+		bd = BIO_new_file(dest_name, "wb");
+		ERR(!bd, "%s", dest_name);
+	}
+
+	/* Digest the module data. */
+	OpenSSL_add_all_digests();
+	display_openssl_errors(__LINE__);
+	digest_algo = EVP_get_digestbyname(hash_algo);
+	ERR(!digest_algo, "EVP_get_digestbyname");
+
+	bm = BIO_new_file(module_name, "rb");
+	ERR(!bm, "%s", module_name);
+
+	/* Load the CMS message from the digest buffer. */
+	cms = CMS_sign(NULL, NULL, NULL, NULL,
+		       CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM);
+	ERR(!cms, "CMS_sign");
+
+	ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo,
+			     CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP |
+			     use_keyid | use_signed_attrs),
+	    "CMS_sign_add_signer");
+	ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
+	    "CMS_final");
+
+	if (save_cms) {
+		char *cms_name;
+
+		ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf");
+		b = BIO_new_file(cms_name, "wb");
+		ERR(!b, "%s", cms_name);
+		ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name);
+		BIO_free(b);
+	}
+
+	if (sign_only)
+		return 0;
+
+	/* Append the marker and the PKCS#7 message to the destination file */
+	ERR(BIO_reset(bm) < 0, "%s", module_name);
+	while ((n = BIO_read(bm, buf, sizeof(buf))),
+	       n > 0) {
+		ERR(BIO_write(bd, buf, n) < 0, "%s", dest_name);
+	}
+	ERR(n < 0, "%s", module_name);
+	module_size = BIO_number_written(bd);
+
+	ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
+	cms_size = BIO_number_written(bd) - module_size;
+	sig_info.sig_len = htonl(cms_size);
+	ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
+	ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
+
+	ERR(BIO_free(bd) < 0, "%s", dest_name);
+
+	/* Finally, if we're signing in place, replace the original. */
+	if (replace_orig)
+		ERR(rename(dest_name, module_name) < 0, "%s", dest_name);
+
+	return 0;
+}
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index bb8e4d0..946caf3 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -32,6 +32,7 @@
 accout||account
 accquire||acquire
 accquired||acquired
+accross||across
 acessable||accessible
 acess||access
 achitecture||architecture
@@ -100,8 +101,10 @@
 appropriatly||appropriately
 approriate||appropriate
 approriately||appropriately
+apropriate||appropriate
 aquainted||acquainted
 aquired||acquired
+aquisition||acquisition
 arbitary||arbitrary
 architechture||architecture
 arguement||argument
@@ -111,6 +114,8 @@
 arraival||arrival
 artifical||artificial
 artillary||artillery
+asign||assign
+assertation||assertion
 assiged||assigned
 assigment||assignment
 assigments||assignments
@@ -136,6 +141,7 @@
 automatized||automated
 automatizes||automates
 autonymous||autonomous
+auxillary||auxiliary
 auxilliary||auxiliary
 avaiable||available
 avaible||available
@@ -187,6 +193,7 @@
 carefuly||carefully
 cariage||carriage
 catagory||category
+cehck||check
 challange||challenge
 challanges||challenges
 chanell||channel
@@ -199,6 +206,8 @@
 charater||character
 charaters||characters
 charcter||character
+chcek||check
+chck||check
 checksuming||checksumming
 childern||children
 childs||children
@@ -231,6 +240,8 @@
 compatable||compatible
 compatibiliy||compatibility
 compatibilty||compatibility
+compatiblity||compatibility
+competion||completion
 compilant||compliant
 compleatly||completely
 completly||completely
@@ -291,6 +302,7 @@
 definate||definite
 definately||definitely
 defintion||definition
+defintions||definitions
 defualt||default
 defult||default
 deivce||device
@@ -306,6 +318,7 @@
 depreacte||deprecate
 desactivate||deactivate
 desciptors||descriptors
+descripton||description
 descrition||description
 descritptor||descriptor
 desctiptor||descriptor
@@ -327,6 +340,7 @@
 deviece||device
 diable||disable
 dictionnary||dictionary
+didnt||didn't
 diferent||different
 differrence||difference
 difinition||definition
@@ -344,6 +358,7 @@
 documantation||documentation
 documentaion||documentation
 documment||document
+doesnt||doesn't
 dorp||drop
 dosen||doesn
 downlad||download
@@ -450,11 +465,13 @@
 grahpical||graphical
 grapic||graphic
 guage||gauge
+guarenteed||guaranteed
 guarentee||guarantee
 halfs||halves
 hander||handler
 handfull||handful
 hanled||handled
+happend||happened
 harware||hardware
 heirarchically||hierarchically
 helpfull||helpful
@@ -512,6 +529,7 @@
 initilization||initialization
 initilize||initialize
 inofficial||unofficial
+insititute||institute
 instal||install
 inteface||interface
 integreated||integrated
@@ -546,6 +564,7 @@
 invokation||invocation
 invokations||invocations
 irrelevent||irrelevant
+isnt||isn't
 isssue||issue
 itslef||itself
 jave||java
@@ -558,6 +577,7 @@
 langauge||language
 langugage||language
 lauch||launch
+layed||laid
 leightweight||lightweight
 lengh||length
 lenght||length
@@ -714,6 +734,7 @@
 preceed||precede
 precendence||precedence
 precission||precision
+preemptable||preemptible
 prefered||preferred
 prefferably||preferably
 premption||preemption
@@ -744,6 +765,7 @@
 programm||program
 programms||programs
 progresss||progress
+promiscous||promiscuous
 promps||prompts
 pronnounced||pronounced
 prononciation||pronunciation
@@ -817,6 +839,7 @@
 resizeable||resizable
 resouces||resources
 resoures||resources
+responce||response
 ressizes||resizes
 ressource||resource
 ressources||resources
@@ -869,6 +892,7 @@
 settting||setting
 shotdown||shutdown
 shoud||should
+shouldnt||shouldn't
 shoule||should
 shrinked||shrunk
 siginificantly||significantly
@@ -913,9 +937,11 @@
 struc||struct
 structres||structures
 stuct||struct
+stucture||structure
 sturcture||structure
 subdirectoires||subdirectories
 suble||subtle
+substract||subtract
 succesfully||successfully
 succesful||successful
 successfull||successful
@@ -987,6 +1013,7 @@
 unexpeted||unexpected
 unfortunatelly||unfortunately
 unifiy||unify
+unintialized||uninitialized
 unknonw||unknown
 unknow||unknown
 unkown||unknown
@@ -1027,7 +1054,9 @@
 visiters||visitors
 vitual||virtual
 wating||waiting
+wether||whether
 whataver||whatever
+whcih||which
 whenver||whenever
 wheter||whether
 whe||when
diff --git a/scripts/stackdelta b/scripts/stackdelta
new file mode 100755
index 0000000..48eabf2
--- /dev/null
+++ b/scripts/stackdelta
@@ -0,0 +1,59 @@
+#!/usr/bin/perl
+
+# Read two files produced by the stackusage script, and show the
+# delta between them.
+#
+# Currently, only shows changes for functions listed in both files. We
+# could add an option to show also functions which have vanished or
+# appeared (which would often be due to gcc making other inlining
+# decisions).
+#
+# Another possible option would be a minimum absolute value for the
+# delta.
+#
+# A third possibility is for sorting by delta, but that can be
+# achieved by piping to sort -k5,5g.
+
+sub read_stack_usage_file {
+    my %su;
+    my $f = shift;
+    open(my $fh, '<', $f)
+	or die "cannot open $f: $!";
+    while (<$fh>) {
+	chomp;
+	my ($file, $func, $size, $type) = split;
+	# Old versions of gcc (at least 4.7) have an annoying quirk in
+	# that a (static) function whose name has been changed into
+	# for example ext4_find_unwritten_pgoff.isra.11 will show up
+	# in the .su file with a name of just "11". Since such a
+	# numeric suffix is likely to change across different
+	# commits/compilers/.configs or whatever else we're trying to
+	# tweak, we can't really track those functions, so we just
+	# silently skip them.
+	#
+	# Newer gcc (at least 5.0) report the full name, so again,
+	# since the suffix is likely to change, we strip it.
+	next if $func =~ m/^[0-9]+$/;
+	$func =~ s/\..*$//;
+	# Line numbers are likely to change; strip those.
+	$file =~ s/:[0-9]+$//;
+	$su{"${file}\t${func}"} = {size => $size, type => $type};
+    }
+    close($fh);
+    return \%su;
+}
+
+@ARGV == 2
+    or die "usage: $0 <old> <new>";
+
+my $old = read_stack_usage_file($ARGV[0]);
+my $new = read_stack_usage_file($ARGV[1]);
+my @common = sort grep {exists $new->{$_}} keys %$old;
+for (@common) {
+    my $x = $old->{$_}{size};
+    my $y = $new->{$_}{size};
+    my $delta = $y - $x;
+    if ($delta) {
+	printf "%s\t%d\t%d\t%+d\n", $_, $x, $y, $delta;
+    }
+}
diff --git a/scripts/stackusage b/scripts/stackusage
new file mode 100755
index 0000000..8cf2664
--- /dev/null
+++ b/scripts/stackusage
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+outfile=""
+now=`date +%s`
+
+while [ $# -gt 0 ]
+do
+    case "$1" in
+        -o)
+	    outfile="$2"
+	    shift 2;;
+	-h)
+	    echo "usage: $0 [-o outfile] <make options/args>"
+	    exit 0;;
+	*)  break;;
+    esac
+done
+
+if [ -z "$outfile" ]
+then
+    outfile=`mktemp --tmpdir stackusage.$$.XXXX`
+fi
+
+KCFLAGS="${KCFLAGS} -fstack-usage" make "$@"
+
+# Prepend directory name to file names, remove column information,
+# make file:line/function/size/type properly tab-separated.
+find . -name '*.su' -newermt "@${now}" -print |                     \
+    xargs perl -MFile::Basename -pe                                 \
+        '$d = dirname($ARGV); s#([^:]+:[0-9]+):[0-9]+:#$d/$1\t#;' | \
+    sort -k3,3nr > "${outfile}"
+
+echo "$0: output written to ${outfile}"
diff --git a/scripts/tags.sh b/scripts/tags.sh
index c0a932d..8e5aee6 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -170,7 +170,9 @@
 	--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
 	--regex-c='/^COMPAT_SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/compat_sys_\1/' \
 	--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/'		\
+	--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1_rcuidle/'	\
 	--regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/'	\
+	--regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1_rcuidle/' \
 	--regex-c++='/PAGEFLAG\(([^,)]*).*/Page\1/'			\
 	--regex-c++='/PAGEFLAG\(([^,)]*).*/SetPage\1/'			\
 	--regex-c++='/PAGEFLAG\(([^,)]*).*/ClearPage\1/'		\
@@ -233,7 +235,9 @@
 	--regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/'   \
 	--regex='/^COMPAT_SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/compat_sys_\1/' \
 	--regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/'		\
+	--regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1_rcuidle/'	\
 	--regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/' \
+	--regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1_rcuidle/' \
 	--regex='/PAGEFLAG(\([^,)]*\).*/Page\1/'			\
 	--regex='/PAGEFLAG(\([^,)]*\).*/SetPage\1/'		\
 	--regex='/PAGEFLAG(\([^,)]*\).*/ClearPage\1/'		\
diff --git a/security/Kconfig b/security/Kconfig
index bf4ec46..e4523789 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -132,7 +132,6 @@
 	default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
 	default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
 	default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
-	default DEFAULT_SECURITY_YAMA if SECURITY_YAMA
 	default DEFAULT_SECURITY_DAC
 
 	help
@@ -151,9 +150,6 @@
 	config DEFAULT_SECURITY_APPARMOR
 		bool "AppArmor" if SECURITY_APPARMOR=y
 
-	config DEFAULT_SECURITY_YAMA
-		bool "Yama" if SECURITY_YAMA=y
-
 	config DEFAULT_SECURITY_DAC
 		bool "Unix Discretionary Access Controls"
 
@@ -165,7 +161,6 @@
 	default "smack" if DEFAULT_SECURITY_SMACK
 	default "tomoyo" if DEFAULT_SECURITY_TOMOYO
 	default "apparmor" if DEFAULT_SECURITY_APPARMOR
-	default "yama" if DEFAULT_SECURITY_YAMA
 	default "" if DEFAULT_SECURITY_DAC
 
 endmenu
diff --git a/security/commoncap.c b/security/commoncap.c
index d103f5a4..1832cf7 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -267,6 +267,16 @@
 	new->cap_effective   = *effective;
 	new->cap_inheritable = *inheritable;
 	new->cap_permitted   = *permitted;
+
+	/*
+	 * Mask off ambient bits that are no longer both permitted and
+	 * inheritable.
+	 */
+	new->cap_ambient = cap_intersect(new->cap_ambient,
+					 cap_intersect(*permitted,
+						       *inheritable));
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EINVAL;
 	return 0;
 }
 
@@ -347,6 +357,7 @@
 
 		/*
 		 * pP' = (X & fP) | (pI & fI)
+		 * The addition of pA' is handled later.
 		 */
 		new->cap_permitted.cap[i] =
 			(new->cap_bset.cap[i] & permitted) |
@@ -474,10 +485,13 @@
 {
 	const struct cred *old = current_cred();
 	struct cred *new = bprm->cred;
-	bool effective, has_cap = false;
+	bool effective, has_cap = false, is_setid;
 	int ret;
 	kuid_t root_uid;
 
+	if (WARN_ON(!cap_ambient_invariant_ok(old)))
+		return -EPERM;
+
 	effective = false;
 	ret = get_file_caps(bprm, &effective, &has_cap);
 	if (ret < 0)
@@ -522,8 +536,9 @@
 	 *
 	 * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
 	 */
-	if ((!uid_eq(new->euid, old->uid) ||
-	     !gid_eq(new->egid, old->gid) ||
+	is_setid = !uid_eq(new->euid, old->uid) || !gid_eq(new->egid, old->gid);
+
+	if ((is_setid ||
 	     !cap_issubset(new->cap_permitted, old->cap_permitted)) &&
 	    bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
 		/* downgrade; they get no more than they had, and maybe less */
@@ -539,10 +554,28 @@
 	new->suid = new->fsuid = new->euid;
 	new->sgid = new->fsgid = new->egid;
 
+	/* File caps or setid cancels ambient. */
+	if (has_cap || is_setid)
+		cap_clear(new->cap_ambient);
+
+	/*
+	 * Now that we've computed pA', update pP' to give:
+	 *   pP' = (X & fP) | (pI & fI) | pA'
+	 */
+	new->cap_permitted = cap_combine(new->cap_permitted, new->cap_ambient);
+
+	/*
+	 * Set pE' = (fE ? pP' : pA').  Because pA' is zero if fE is set,
+	 * this is the same as pE' = (fE ? pP' : 0) | pA'.
+	 */
 	if (effective)
 		new->cap_effective = new->cap_permitted;
 	else
-		cap_clear(new->cap_effective);
+		new->cap_effective = new->cap_ambient;
+
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EPERM;
+
 	bprm->cap_effective = effective;
 
 	/*
@@ -557,7 +590,7 @@
 	 * Number 1 above might fail if you don't have a full bset, but I think
 	 * that is interesting information to audit.
 	 */
-	if (!cap_isclear(new->cap_effective)) {
+	if (!cap_issubset(new->cap_effective, new->cap_ambient)) {
 		if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
 		    !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
 		    issecure(SECURE_NOROOT)) {
@@ -568,6 +601,10 @@
 	}
 
 	new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
+
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EPERM;
+
 	return 0;
 }
 
@@ -589,7 +626,7 @@
 	if (!uid_eq(cred->uid, root_uid)) {
 		if (bprm->cap_effective)
 			return 1;
-		if (!cap_isclear(cred->cap_permitted))
+		if (!cap_issubset(cred->cap_permitted, cred->cap_ambient))
 			return 1;
 	}
 
@@ -691,10 +728,18 @@
 	     uid_eq(old->suid, root_uid)) &&
 	    (!uid_eq(new->uid, root_uid) &&
 	     !uid_eq(new->euid, root_uid) &&
-	     !uid_eq(new->suid, root_uid)) &&
-	    !issecure(SECURE_KEEP_CAPS)) {
-		cap_clear(new->cap_permitted);
-		cap_clear(new->cap_effective);
+	     !uid_eq(new->suid, root_uid))) {
+		if (!issecure(SECURE_KEEP_CAPS)) {
+			cap_clear(new->cap_permitted);
+			cap_clear(new->cap_effective);
+		}
+
+		/*
+		 * Pre-ambient programs expect setresuid to nonroot followed
+		 * by exec to drop capabilities.  We should make sure that
+		 * this remains the case.
+		 */
+		cap_clear(new->cap_ambient);
 	}
 	if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
 		cap_clear(new->cap_effective);
@@ -924,6 +969,44 @@
 			new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
 		return commit_creds(new);
 
+	case PR_CAP_AMBIENT:
+		if (arg2 == PR_CAP_AMBIENT_CLEAR_ALL) {
+			if (arg3 | arg4 | arg5)
+				return -EINVAL;
+
+			new = prepare_creds();
+			if (!new)
+				return -ENOMEM;
+			cap_clear(new->cap_ambient);
+			return commit_creds(new);
+		}
+
+		if (((!cap_valid(arg3)) | arg4 | arg5))
+			return -EINVAL;
+
+		if (arg2 == PR_CAP_AMBIENT_IS_SET) {
+			return !!cap_raised(current_cred()->cap_ambient, arg3);
+		} else if (arg2 != PR_CAP_AMBIENT_RAISE &&
+			   arg2 != PR_CAP_AMBIENT_LOWER) {
+			return -EINVAL;
+		} else {
+			if (arg2 == PR_CAP_AMBIENT_RAISE &&
+			    (!cap_raised(current_cred()->cap_permitted, arg3) ||
+			     !cap_raised(current_cred()->cap_inheritable,
+					 arg3) ||
+			     issecure(SECURE_NO_CAP_AMBIENT_RAISE)))
+				return -EPERM;
+
+			new = prepare_creds();
+			if (!new)
+				return -ENOMEM;
+			if (arg2 == PR_CAP_AMBIENT_RAISE)
+				cap_raise(new->cap_ambient, arg3);
+			else
+				cap_lower(new->cap_ambient, arg3);
+			return commit_creds(new);
+		}
+
 	default:
 		/* No functionality available - continue with default */
 		return -ENOSYS;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index bd536cb..43b4cdd 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -848,6 +848,7 @@
 	new->cap_inheritable	= old->cap_inheritable;
 	new->cap_permitted	= old->cap_permitted;
 	new->cap_effective	= old->cap_effective;
+	new->cap_ambient	= old->cap_ambient;
 	new->cap_bset		= old->cap_bset;
 
 	new->jit_keyring	= old->jit_keyring;
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 4ed9810..cccbf30 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -245,6 +245,21 @@
 		}
 		break;
 	}
+	case LSM_AUDIT_DATA_IOCTL_OP: {
+		struct inode *inode;
+
+		audit_log_d_path(ab, " path=", &a->u.op->path);
+
+		inode = a->u.op->path.dentry->d_inode;
+		if (inode) {
+			audit_log_format(ab, " dev=");
+			audit_log_untrustedstring(ab, inode->i_sb->s_id);
+			audit_log_format(ab, " ino=%lu", inode->i_ino);
+		}
+
+		audit_log_format(ab, " ioctlcmd=%hx", a->u.op->cmd);
+		break;
+	}
 	case LSM_AUDIT_DATA_DENTRY: {
 		struct inode *inode;
 
diff --git a/security/security.c b/security/security.c
index 75b85fd..46f405c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -56,18 +56,13 @@
 	pr_info("Security Framework initialized\n");
 
 	/*
-	 * Always load the capability module.
+	 * Load minor LSMs, with the capability module always first.
 	 */
 	capability_add_hooks();
-#ifdef CONFIG_SECURITY_YAMA_STACKED
-	/*
-	 * If Yama is configured for stacking load it next.
-	 */
 	yama_add_hooks();
-#endif
+
 	/*
-	 * Load the chosen module if there is one.
-	 * This will also find yama if it is stacking
+	 * Load all the remaining security modules.
 	 */
 	do_security_initcalls();
 
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 0b122b1..e60c79d 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/percpu.h>
+#include <linux/list.h>
 #include <net/sock.h>
 #include <linux/un.h>
 #include <net/af_unix.h>
@@ -48,6 +49,7 @@
 	u32			tsid;
 	u16			tclass;
 	struct av_decision	avd;
+	struct avc_xperms_node	*xp_node;
 };
 
 struct avc_node {
@@ -56,6 +58,16 @@
 	struct rcu_head		rhead;
 };
 
+struct avc_xperms_decision_node {
+	struct extended_perms_decision xpd;
+	struct list_head xpd_list; /* list of extended_perms_decision */
+};
+
+struct avc_xperms_node {
+	struct extended_perms xp;
+	struct list_head xpd_head; /* list head of extended_perms_decision */
+};
+
 struct avc_cache {
 	struct hlist_head	slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
 	spinlock_t		slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
@@ -80,6 +92,9 @@
 static struct avc_cache avc_cache;
 static struct avc_callback_node *avc_callbacks;
 static struct kmem_cache *avc_node_cachep;
+static struct kmem_cache *avc_xperms_data_cachep;
+static struct kmem_cache *avc_xperms_decision_cachep;
+static struct kmem_cache *avc_xperms_cachep;
 
 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
 {
@@ -101,6 +116,7 @@
 		return;
 	}
 
+	BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map));
 	perms = secclass_map[tclass-1].perms;
 
 	audit_log_format(ab, " {");
@@ -149,7 +165,7 @@
 		kfree(scontext);
 	}
 
-	BUG_ON(tclass >= ARRAY_SIZE(secclass_map));
+	BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map));
 	audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
 }
 
@@ -170,7 +186,17 @@
 	atomic_set(&avc_cache.lru_hint, 0);
 
 	avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
-					     0, SLAB_PANIC, NULL);
+					0, SLAB_PANIC, NULL);
+	avc_xperms_cachep = kmem_cache_create("avc_xperms_node",
+					sizeof(struct avc_xperms_node),
+					0, SLAB_PANIC, NULL);
+	avc_xperms_decision_cachep = kmem_cache_create(
+					"avc_xperms_decision_node",
+					sizeof(struct avc_xperms_decision_node),
+					0, SLAB_PANIC, NULL);
+	avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data",
+					sizeof(struct extended_perms_data),
+					0, SLAB_PANIC, NULL);
 
 	audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
 }
@@ -205,9 +231,261 @@
 			 slots_used, AVC_CACHE_SLOTS, max_chain_len);
 }
 
+/*
+ * using a linked list for extended_perms_decision lookup because the list is
+ * always small. i.e. less than 5, typically 1
+ */
+static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
+					struct avc_xperms_node *xp_node)
+{
+	struct avc_xperms_decision_node *xpd_node;
+
+	list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
+		if (xpd_node->xpd.driver == driver)
+			return &xpd_node->xpd;
+	}
+	return NULL;
+}
+
+static inline unsigned int
+avc_xperms_has_perm(struct extended_perms_decision *xpd,
+					u8 perm, u8 which)
+{
+	unsigned int rc = 0;
+
+	if ((which == XPERMS_ALLOWED) &&
+			(xpd->used & XPERMS_ALLOWED))
+		rc = security_xperm_test(xpd->allowed->p, perm);
+	else if ((which == XPERMS_AUDITALLOW) &&
+			(xpd->used & XPERMS_AUDITALLOW))
+		rc = security_xperm_test(xpd->auditallow->p, perm);
+	else if ((which == XPERMS_DONTAUDIT) &&
+			(xpd->used & XPERMS_DONTAUDIT))
+		rc = security_xperm_test(xpd->dontaudit->p, perm);
+	return rc;
+}
+
+static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
+				u8 driver, u8 perm)
+{
+	struct extended_perms_decision *xpd;
+	security_xperm_set(xp_node->xp.drivers.p, driver);
+	xpd = avc_xperms_decision_lookup(driver, xp_node);
+	if (xpd && xpd->allowed)
+		security_xperm_set(xpd->allowed->p, perm);
+}
+
+static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node)
+{
+	struct extended_perms_decision *xpd;
+
+	xpd = &xpd_node->xpd;
+	if (xpd->allowed)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->allowed);
+	if (xpd->auditallow)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow);
+	if (xpd->dontaudit)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit);
+	kmem_cache_free(avc_xperms_decision_cachep, xpd_node);
+}
+
+static void avc_xperms_free(struct avc_xperms_node *xp_node)
+{
+	struct avc_xperms_decision_node *xpd_node, *tmp;
+
+	if (!xp_node)
+		return;
+
+	list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) {
+		list_del(&xpd_node->xpd_list);
+		avc_xperms_decision_free(xpd_node);
+	}
+	kmem_cache_free(avc_xperms_cachep, xp_node);
+}
+
+static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
+					struct extended_perms_decision *src)
+{
+	dest->driver = src->driver;
+	dest->used = src->used;
+	if (dest->used & XPERMS_ALLOWED)
+		memcpy(dest->allowed->p, src->allowed->p,
+				sizeof(src->allowed->p));
+	if (dest->used & XPERMS_AUDITALLOW)
+		memcpy(dest->auditallow->p, src->auditallow->p,
+				sizeof(src->auditallow->p));
+	if (dest->used & XPERMS_DONTAUDIT)
+		memcpy(dest->dontaudit->p, src->dontaudit->p,
+				sizeof(src->dontaudit->p));
+}
+
+/*
+ * similar to avc_copy_xperms_decision, but only copy decision
+ * information relevant to this perm
+ */
+static inline void avc_quick_copy_xperms_decision(u8 perm,
+			struct extended_perms_decision *dest,
+			struct extended_perms_decision *src)
+{
+	/*
+	 * compute index of the u32 of the 256 bits (8 u32s) that contain this
+	 * command permission
+	 */
+	u8 i = perm >> 5;
+
+	dest->used = src->used;
+	if (dest->used & XPERMS_ALLOWED)
+		dest->allowed->p[i] = src->allowed->p[i];
+	if (dest->used & XPERMS_AUDITALLOW)
+		dest->auditallow->p[i] = src->auditallow->p[i];
+	if (dest->used & XPERMS_DONTAUDIT)
+		dest->dontaudit->p[i] = src->dontaudit->p[i];
+}
+
+static struct avc_xperms_decision_node
+		*avc_xperms_decision_alloc(u8 which)
+{
+	struct avc_xperms_decision_node *xpd_node;
+	struct extended_perms_decision *xpd;
+
+	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+				GFP_ATOMIC | __GFP_NOMEMALLOC);
+	if (!xpd_node)
+		return NULL;
+
+	xpd = &xpd_node->xpd;
+	if (which & XPERMS_ALLOWED) {
+		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->allowed)
+			goto error;
+	}
+	if (which & XPERMS_AUDITALLOW) {
+		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->auditallow)
+			goto error;
+	}
+	if (which & XPERMS_DONTAUDIT) {
+		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->dontaudit)
+			goto error;
+	}
+	return xpd_node;
+error:
+	avc_xperms_decision_free(xpd_node);
+	return NULL;
+}
+
+static int avc_add_xperms_decision(struct avc_node *node,
+			struct extended_perms_decision *src)
+{
+	struct avc_xperms_decision_node *dest_xpd;
+
+	node->ae.xp_node->xp.len++;
+	dest_xpd = avc_xperms_decision_alloc(src->used);
+	if (!dest_xpd)
+		return -ENOMEM;
+	avc_copy_xperms_decision(&dest_xpd->xpd, src);
+	list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
+	return 0;
+}
+
+static struct avc_xperms_node *avc_xperms_alloc(void)
+{
+	struct avc_xperms_node *xp_node;
+
+	xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+				GFP_ATOMIC|__GFP_NOMEMALLOC);
+	if (!xp_node)
+		return xp_node;
+	INIT_LIST_HEAD(&xp_node->xpd_head);
+	return xp_node;
+}
+
+static int avc_xperms_populate(struct avc_node *node,
+				struct avc_xperms_node *src)
+{
+	struct avc_xperms_node *dest;
+	struct avc_xperms_decision_node *dest_xpd;
+	struct avc_xperms_decision_node *src_xpd;
+
+	if (src->xp.len == 0)
+		return 0;
+	dest = avc_xperms_alloc();
+	if (!dest)
+		return -ENOMEM;
+
+	memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
+	dest->xp.len = src->xp.len;
+
+	/* for each source xpd allocate a destination xpd and copy */
+	list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
+		dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used);
+		if (!dest_xpd)
+			goto error;
+		avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd);
+		list_add(&dest_xpd->xpd_list, &dest->xpd_head);
+	}
+	node->ae.xp_node = dest;
+	return 0;
+error:
+	avc_xperms_free(dest);
+	return -ENOMEM;
+
+}
+
+static inline u32 avc_xperms_audit_required(u32 requested,
+					struct av_decision *avd,
+					struct extended_perms_decision *xpd,
+					u8 perm,
+					int result,
+					u32 *deniedp)
+{
+	u32 denied, audited;
+
+	denied = requested & ~avd->allowed;
+	if (unlikely(denied)) {
+		audited = denied & avd->auditdeny;
+		if (audited && xpd) {
+			if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT))
+				audited &= ~requested;
+		}
+	} else if (result) {
+		audited = denied = requested;
+	} else {
+		audited = requested & avd->auditallow;
+		if (audited && xpd) {
+			if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW))
+				audited &= ~requested;
+		}
+	}
+
+	*deniedp = denied;
+	return audited;
+}
+
+static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass,
+				u32 requested, struct av_decision *avd,
+				struct extended_perms_decision *xpd,
+				u8 perm, int result,
+				struct common_audit_data *ad)
+{
+	u32 audited, denied;
+
+	audited = avc_xperms_audit_required(
+			requested, avd, xpd, perm, result, &denied);
+	if (likely(!audited))
+		return 0;
+	return slow_avc_audit(ssid, tsid, tclass, requested,
+			audited, denied, result, ad, 0);
+}
+
 static void avc_node_free(struct rcu_head *rhead)
 {
 	struct avc_node *node = container_of(rhead, struct avc_node, rhead);
+	avc_xperms_free(node->ae.xp_node);
 	kmem_cache_free(avc_node_cachep, node);
 	avc_cache_stats_incr(frees);
 }
@@ -221,6 +499,7 @@
 
 static void avc_node_kill(struct avc_node *node)
 {
+	avc_xperms_free(node->ae.xp_node);
 	kmem_cache_free(avc_node_cachep, node);
 	avc_cache_stats_incr(frees);
 	atomic_dec(&avc_cache.active_nodes);
@@ -367,6 +646,7 @@
  * @tsid: target security identifier
  * @tclass: target security class
  * @avd: resulting av decision
+ * @xp_node: resulting extended permissions
  *
  * Insert an AVC entry for the SID pair
  * (@ssid, @tsid) and class @tclass.
@@ -378,7 +658,9 @@
  * the access vectors into a cache entry, returns
  * avc_node inserted. Otherwise, this function returns NULL.
  */
-static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
+static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
+				struct av_decision *avd,
+				struct avc_xperms_node *xp_node)
 {
 	struct avc_node *pos, *node = NULL;
 	int hvalue;
@@ -391,10 +673,15 @@
 	if (node) {
 		struct hlist_head *head;
 		spinlock_t *lock;
+		int rc = 0;
 
 		hvalue = avc_hash(ssid, tsid, tclass);
 		avc_node_populate(node, ssid, tsid, tclass, avd);
-
+		rc = avc_xperms_populate(node, xp_node);
+		if (rc) {
+			kmem_cache_free(avc_node_cachep, node);
+			return NULL;
+		}
 		head = &avc_cache.slots[hvalue];
 		lock = &avc_cache.slots_lock[hvalue];
 
@@ -523,14 +810,17 @@
  * @perms : Permission mask bits
  * @ssid,@tsid,@tclass : identifier of an AVC entry
  * @seqno : sequence number when decision was made
+ * @xpd: extended_perms_decision to be added to the node
  *
  * if a valid AVC entry doesn't exist,this function returns -ENOENT.
  * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
  * otherwise, this function updates the AVC entry. The original AVC-entry object
  * will release later by RCU.
  */
-static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
-			   u32 seqno)
+static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
+			u32 tsid, u16 tclass, u32 seqno,
+			struct extended_perms_decision *xpd,
+			u32 flags)
 {
 	int hvalue, rc = 0;
 	unsigned long flag;
@@ -574,9 +864,19 @@
 
 	avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
 
+	if (orig->ae.xp_node) {
+		rc = avc_xperms_populate(node, orig->ae.xp_node);
+		if (rc) {
+			kmem_cache_free(avc_node_cachep, node);
+			goto out_unlock;
+		}
+	}
+
 	switch (event) {
 	case AVC_CALLBACK_GRANT:
 		node->ae.avd.allowed |= perms;
+		if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
+			avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
 		break;
 	case AVC_CALLBACK_TRY_REVOKE:
 	case AVC_CALLBACK_REVOKE:
@@ -594,6 +894,9 @@
 	case AVC_CALLBACK_AUDITDENY_DISABLE:
 		node->ae.avd.auditdeny &= ~perms;
 		break;
+	case AVC_CALLBACK_ADD_XPERMS:
+		avc_add_xperms_decision(node, xpd);
+		break;
 	}
 	avc_node_replace(node, orig);
 out_unlock:
@@ -665,18 +968,20 @@
  * results in a bigger stack frame.
  */
 static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
-			 u16 tclass, struct av_decision *avd)
+			 u16 tclass, struct av_decision *avd,
+			 struct avc_xperms_node *xp_node)
 {
 	rcu_read_unlock();
-	security_compute_av(ssid, tsid, tclass, avd);
+	INIT_LIST_HEAD(&xp_node->xpd_head);
+	security_compute_av(ssid, tsid, tclass, avd, &xp_node->xp);
 	rcu_read_lock();
-	return avc_insert(ssid, tsid, tclass, avd);
+	return avc_insert(ssid, tsid, tclass, avd, xp_node);
 }
 
 static noinline int avc_denied(u32 ssid, u32 tsid,
-			 u16 tclass, u32 requested,
-			 unsigned flags,
-			 struct av_decision *avd)
+				u16 tclass, u32 requested,
+				u8 driver, u8 xperm, unsigned flags,
+				struct av_decision *avd)
 {
 	if (flags & AVC_STRICT)
 		return -EACCES;
@@ -684,11 +989,91 @@
 	if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
 		return -EACCES;
 
-	avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
-				tsid, tclass, avd->seqno);
+	avc_update_node(AVC_CALLBACK_GRANT, requested, driver, xperm, ssid,
+				tsid, tclass, avd->seqno, NULL, flags);
 	return 0;
 }
 
+/*
+ * The avc extended permissions logic adds an additional 256 bits of
+ * permissions to an avc node when extended permissions for that node are
+ * specified in the avtab. If the additional 256 permissions is not adequate,
+ * as-is the case with ioctls, then multiple may be chained together and the
+ * driver field is used to specify which set contains the permission.
+ */
+int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+			u8 driver, u8 xperm, struct common_audit_data *ad)
+{
+	struct avc_node *node;
+	struct av_decision avd;
+	u32 denied;
+	struct extended_perms_decision local_xpd;
+	struct extended_perms_decision *xpd = NULL;
+	struct extended_perms_data allowed;
+	struct extended_perms_data auditallow;
+	struct extended_perms_data dontaudit;
+	struct avc_xperms_node local_xp_node;
+	struct avc_xperms_node *xp_node;
+	int rc = 0, rc2;
+
+	xp_node = &local_xp_node;
+	BUG_ON(!requested);
+
+	rcu_read_lock();
+
+	node = avc_lookup(ssid, tsid, tclass);
+	if (unlikely(!node)) {
+		node = avc_compute_av(ssid, tsid, tclass, &avd, xp_node);
+	} else {
+		memcpy(&avd, &node->ae.avd, sizeof(avd));
+		xp_node = node->ae.xp_node;
+	}
+	/* if extended permissions are not defined, only consider av_decision */
+	if (!xp_node || !xp_node->xp.len)
+		goto decision;
+
+	local_xpd.allowed = &allowed;
+	local_xpd.auditallow = &auditallow;
+	local_xpd.dontaudit = &dontaudit;
+
+	xpd = avc_xperms_decision_lookup(driver, xp_node);
+	if (unlikely(!xpd)) {
+		/*
+		 * Compute the extended_perms_decision only if the driver
+		 * is flagged
+		 */
+		if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
+			avd.allowed &= ~requested;
+			goto decision;
+		}
+		rcu_read_unlock();
+		security_compute_xperms_decision(ssid, tsid, tclass, driver,
+						&local_xpd);
+		rcu_read_lock();
+		avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, xperm,
+				ssid, tsid, tclass, avd.seqno, &local_xpd, 0);
+	} else {
+		avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
+	}
+	xpd = &local_xpd;
+
+	if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED))
+		avd.allowed &= ~requested;
+
+decision:
+	denied = requested & ~(avd.allowed);
+	if (unlikely(denied))
+		rc = avc_denied(ssid, tsid, tclass, requested, driver, xperm,
+				AVC_EXTENDED_PERMS, &avd);
+
+	rcu_read_unlock();
+
+	rc2 = avc_xperms_audit(ssid, tsid, tclass, requested,
+			&avd, xpd, xperm, rc, ad);
+	if (rc2)
+		return rc2;
+	return rc;
+}
 
 /**
  * avc_has_perm_noaudit - Check permissions but perform no auditing.
@@ -716,6 +1101,7 @@
 			 struct av_decision *avd)
 {
 	struct avc_node *node;
+	struct avc_xperms_node xp_node;
 	int rc = 0;
 	u32 denied;
 
@@ -725,13 +1111,13 @@
 
 	node = avc_lookup(ssid, tsid, tclass);
 	if (unlikely(!node))
-		node = avc_compute_av(ssid, tsid, tclass, avd);
+		node = avc_compute_av(ssid, tsid, tclass, avd, &xp_node);
 	else
 		memcpy(avd, &node->ae.avd, sizeof(*avd));
 
 	denied = requested & ~(avd->allowed);
 	if (unlikely(denied))
-		rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
+		rc = avc_denied(ssid, tsid, tclass, requested, 0, 0, flags, avd);
 
 	rcu_read_unlock();
 	return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 564079c..e4369d8 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -254,10 +254,21 @@
 	struct inode_security_struct *isec = inode->i_security;
 	struct superblock_security_struct *sbsec = inode->i_sb->s_security;
 
-	spin_lock(&sbsec->isec_lock);
-	if (!list_empty(&isec->list))
+	/*
+	 * As not all inode security structures are in a list, we check for
+	 * empty list outside of the lock to make sure that we won't waste
+	 * time taking a lock doing nothing.
+	 *
+	 * The list_del_init() function can be safely called more than once.
+	 * It should not be possible for this function to be called with
+	 * concurrent list_add(), but for better safety against future changes
+	 * in the code, we use list_empty_careful() here.
+	 */
+	if (!list_empty_careful(&isec->list)) {
+		spin_lock(&sbsec->isec_lock);
 		list_del_init(&isec->list);
-	spin_unlock(&sbsec->isec_lock);
+		spin_unlock(&sbsec->isec_lock);
+	}
 
 	/*
 	 * The inode may still be referenced in a path walk and
@@ -1100,7 +1111,7 @@
 		seq_puts(m, prefix);
 		if (has_comma)
 			seq_putc(m, '\"');
-		seq_puts(m, opts->mnt_opts[i]);
+		seq_escape(m, opts->mnt_opts[i], "\"\n\\");
 		if (has_comma)
 			seq_putc(m, '\"');
 	}
@@ -1698,6 +1709,32 @@
 	return rc;
 }
 
+/*
+ * Determine the label for an inode that might be unioned.
+ */
+static int selinux_determine_inode_label(const struct inode *dir,
+					 const struct qstr *name,
+					 u16 tclass,
+					 u32 *_new_isid)
+{
+	const struct superblock_security_struct *sbsec = dir->i_sb->s_security;
+	const struct inode_security_struct *dsec = dir->i_security;
+	const struct task_security_struct *tsec = current_security();
+
+	if ((sbsec->flags & SE_SBINITIALIZED) &&
+	    (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) {
+		*_new_isid = sbsec->mntpoint_sid;
+	} else if ((sbsec->flags & SBLABEL_MNT) &&
+		   tsec->create_sid) {
+		*_new_isid = tsec->create_sid;
+	} else {
+		return security_transition_sid(tsec->sid, dsec->sid, tclass,
+					       name, _new_isid);
+	}
+
+	return 0;
+}
+
 /* Check whether a task can create a file. */
 static int may_create(struct inode *dir,
 		      struct dentry *dentry,
@@ -1714,7 +1751,6 @@
 	sbsec = dir->i_sb->s_security;
 
 	sid = tsec->sid;
-	newsid = tsec->create_sid;
 
 	ad.type = LSM_AUDIT_DATA_DENTRY;
 	ad.u.dentry = dentry;
@@ -1725,12 +1761,10 @@
 	if (rc)
 		return rc;
 
-	if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
-		rc = security_transition_sid(sid, dsec->sid, tclass,
-					     &dentry->d_name, &newsid);
-		if (rc)
-			return rc;
-	}
+	rc = selinux_determine_inode_label(dir, &dentry->d_name, tclass,
+					   &newsid);
+	if (rc)
+		return rc;
 
 	rc = avc_has_perm(sid, newsid, tclass, FILE__CREATE, &ad);
 	if (rc)
@@ -2704,32 +2738,14 @@
 					struct qstr *name, void **ctx,
 					u32 *ctxlen)
 {
-	const struct cred *cred = current_cred();
-	struct task_security_struct *tsec;
-	struct inode_security_struct *dsec;
-	struct superblock_security_struct *sbsec;
-	struct inode *dir = d_backing_inode(dentry->d_parent);
 	u32 newsid;
 	int rc;
 
-	tsec = cred->security;
-	dsec = dir->i_security;
-	sbsec = dir->i_sb->s_security;
-
-	if (tsec->create_sid && sbsec->behavior != SECURITY_FS_USE_MNTPOINT) {
-		newsid = tsec->create_sid;
-	} else {
-		rc = security_transition_sid(tsec->sid, dsec->sid,
-					     inode_mode_to_security_class(mode),
-					     name,
-					     &newsid);
-		if (rc) {
-			printk(KERN_WARNING
-				"%s: security_transition_sid failed, rc=%d\n",
-			       __func__, -rc);
-			return rc;
-		}
-	}
+	rc = selinux_determine_inode_label(d_inode(dentry->d_parent), name,
+					   inode_mode_to_security_class(mode),
+					   &newsid);
+	if (rc)
+		return rc;
 
 	return security_sid_to_context(newsid, (char **)ctx, ctxlen);
 }
@@ -2752,22 +2768,12 @@
 	sid = tsec->sid;
 	newsid = tsec->create_sid;
 
-	if ((sbsec->flags & SE_SBINITIALIZED) &&
-	    (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
-		newsid = sbsec->mntpoint_sid;
-	else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
-		rc = security_transition_sid(sid, dsec->sid,
-					     inode_mode_to_security_class(inode->i_mode),
-					     qstr, &newsid);
-		if (rc) {
-			printk(KERN_WARNING "%s:  "
-			       "security_transition_sid failed, rc=%d (dev=%s "
-			       "ino=%ld)\n",
-			       __func__,
-			       -rc, inode->i_sb->s_id, inode->i_ino);
-			return rc;
-		}
-	}
+	rc = selinux_determine_inode_label(
+		dir, qstr,
+		inode_mode_to_security_class(inode->i_mode),
+		&newsid);
+	if (rc)
+		return rc;
 
 	/* Possibly defer initialization to selinux_complete_init. */
 	if (sbsec->flags & SE_SBINITIALIZED) {
@@ -3228,6 +3234,46 @@
 	file_free_security(file);
 }
 
+/*
+ * Check whether a task has the ioctl permission and cmd
+ * operation to an inode.
+ */
+int ioctl_has_perm(const struct cred *cred, struct file *file,
+		u32 requested, u16 cmd)
+{
+	struct common_audit_data ad;
+	struct file_security_struct *fsec = file->f_security;
+	struct inode *inode = file_inode(file);
+	struct inode_security_struct *isec = inode->i_security;
+	struct lsm_ioctlop_audit ioctl;
+	u32 ssid = cred_sid(cred);
+	int rc;
+	u8 driver = cmd >> 8;
+	u8 xperm = cmd & 0xff;
+
+	ad.type = LSM_AUDIT_DATA_IOCTL_OP;
+	ad.u.op = &ioctl;
+	ad.u.op->cmd = cmd;
+	ad.u.op->path = file->f_path;
+
+	if (ssid != fsec->sid) {
+		rc = avc_has_perm(ssid, fsec->sid,
+				SECCLASS_FD,
+				FD__USE,
+				&ad);
+		if (rc)
+			goto out;
+	}
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass,
+			requested, driver, xperm, &ad);
+out:
+	return rc;
+}
+
 static int selinux_file_ioctl(struct file *file, unsigned int cmd,
 			      unsigned long arg)
 {
@@ -3270,7 +3316,7 @@
 	 * to the file's ioctl() function.
 	 */
 	default:
-		error = file_has_perm(cred, file, FILE__IOCTL);
+		error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
 	}
 	return error;
 }
@@ -4520,6 +4566,7 @@
 
 	sksec->peer_sid = SECINITSID_UNLABELED;
 	sksec->sid = SECINITSID_UNLABELED;
+	sksec->sclass = SECCLASS_SOCKET;
 	selinux_netlbl_sk_security_reset(sksec);
 	sk->sk_security = sksec;
 
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 5973c32..0999df0 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -143,6 +143,7 @@
 }
 
 #define AVC_STRICT 1 /* Ignore permissive mode. */
+#define AVC_EXTENDED_PERMS 2	/* update extended permissions */
 int avc_has_perm_noaudit(u32 ssid, u32 tsid,
 			 u16 tclass, u32 requested,
 			 unsigned flags,
@@ -156,6 +157,10 @@
 		       struct common_audit_data *auditdata,
 		       int flags);
 
+int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+		u8 driver, u8 perm, struct common_audit_data *ad);
+
+
 u32 avc_policy_seqno(void);
 
 #define AVC_CALLBACK_GRANT		1
@@ -166,6 +171,7 @@
 #define AVC_CALLBACK_AUDITALLOW_DISABLE	32
 #define AVC_CALLBACK_AUDITDENY_ENABLE	64
 #define AVC_CALLBACK_AUDITDENY_DISABLE	128
+#define AVC_CALLBACK_ADD_XPERMS		256
 
 int avc_add_callback(int (*callback)(u32 event), u32 events);
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 36993ad..6a681d2 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -35,13 +35,14 @@
 #define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS	27
 #define POLICYDB_VERSION_DEFAULT_TYPE	28
 #define POLICYDB_VERSION_CONSTRAINT_NAMES	29
+#define POLICYDB_VERSION_XPERMS_IOCTL	30
 
 /* Range of policy versions we understand*/
 #define POLICYDB_VERSION_MIN   POLICYDB_VERSION_BASE
 #ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
 #define POLICYDB_VERSION_MAX	CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
 #else
-#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_CONSTRAINT_NAMES
+#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_XPERMS_IOCTL
 #endif
 
 /* Mask for just the mount related flags */
@@ -109,11 +110,38 @@
 	u32 flags;
 };
 
+#define XPERMS_ALLOWED 1
+#define XPERMS_AUDITALLOW 2
+#define XPERMS_DONTAUDIT 4
+
+#define security_xperm_set(perms, x) (perms[x >> 5] |= 1 << (x & 0x1f))
+#define security_xperm_test(perms, x) (1 & (perms[x >> 5] >> (x & 0x1f)))
+struct extended_perms_data {
+	u32 p[8];
+};
+
+struct extended_perms_decision {
+	u8 used;
+	u8 driver;
+	struct extended_perms_data *allowed;
+	struct extended_perms_data *auditallow;
+	struct extended_perms_data *dontaudit;
+};
+
+struct extended_perms {
+	u16 len;	/* length associated decision chain */
+	struct extended_perms_data drivers; /* flag drivers that are used */
+};
+
 /* definitions of av_decision.flags */
 #define AVD_FLAGS_PERMISSIVE	0x0001
 
 void security_compute_av(u32 ssid, u32 tsid,
-			 u16 tclass, struct av_decision *avd);
+			 u16 tclass, struct av_decision *avd,
+			 struct extended_perms *xperms);
+
+void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass,
+			 u8 driver, struct extended_perms_decision *xpermd);
 
 void security_compute_av_user(u32 ssid, u32 tsid,
 			     u16 tclass, struct av_decision *avd);
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index b64f277..3628d3a 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -24,6 +24,7 @@
 #include "policydb.h"
 
 static struct kmem_cache *avtab_node_cachep;
+static struct kmem_cache *avtab_xperms_cachep;
 
 /* Based on MurmurHash3, written by Austin Appleby and placed in the
  * public domain.
@@ -70,11 +71,24 @@
 		  struct avtab_key *key, struct avtab_datum *datum)
 {
 	struct avtab_node *newnode;
+	struct avtab_extended_perms *xperms;
 	newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
 	if (newnode == NULL)
 		return NULL;
 	newnode->key = *key;
-	newnode->datum = *datum;
+
+	if (key->specified & AVTAB_XPERMS) {
+		xperms = kmem_cache_zalloc(avtab_xperms_cachep, GFP_KERNEL);
+		if (xperms == NULL) {
+			kmem_cache_free(avtab_node_cachep, newnode);
+			return NULL;
+		}
+		*xperms = *(datum->u.xperms);
+		newnode->datum.u.xperms = xperms;
+	} else {
+		newnode->datum.u.data = datum->u.data;
+	}
+
 	if (prev) {
 		newnode->next = prev->next;
 		prev->next = newnode;
@@ -107,8 +121,12 @@
 		if (key->source_type == cur->key.source_type &&
 		    key->target_type == cur->key.target_type &&
 		    key->target_class == cur->key.target_class &&
-		    (specified & cur->key.specified))
+		    (specified & cur->key.specified)) {
+			/* extended perms may not be unique */
+			if (specified & AVTAB_XPERMS)
+				break;
 			return -EEXIST;
+		}
 		if (key->source_type < cur->key.source_type)
 			break;
 		if (key->source_type == cur->key.source_type &&
@@ -271,6 +289,9 @@
 		while (cur) {
 			temp = cur;
 			cur = cur->next;
+			if (temp->key.specified & AVTAB_XPERMS)
+				kmem_cache_free(avtab_xperms_cachep,
+						temp->datum.u.xperms);
 			kmem_cache_free(avtab_node_cachep, temp);
 		}
 	}
@@ -359,7 +380,10 @@
 	AVTAB_AUDITALLOW,
 	AVTAB_TRANSITION,
 	AVTAB_CHANGE,
-	AVTAB_MEMBER
+	AVTAB_MEMBER,
+	AVTAB_XPERMS_ALLOWED,
+	AVTAB_XPERMS_AUDITALLOW,
+	AVTAB_XPERMS_DONTAUDIT
 };
 
 int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
@@ -369,10 +393,11 @@
 {
 	__le16 buf16[4];
 	u16 enabled;
-	__le32 buf32[7];
 	u32 items, items2, val, vers = pol->policyvers;
 	struct avtab_key key;
 	struct avtab_datum datum;
+	struct avtab_extended_perms xperms;
+	__le32 buf32[ARRAY_SIZE(xperms.perms.p)];
 	int i, rc;
 	unsigned set;
 
@@ -429,11 +454,15 @@
 			printk(KERN_ERR "SELinux: avtab: entry has both access vectors and types\n");
 			return -EINVAL;
 		}
+		if (val & AVTAB_XPERMS) {
+			printk(KERN_ERR "SELinux: avtab: entry has extended permissions\n");
+			return -EINVAL;
+		}
 
 		for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
 			if (val & spec_order[i]) {
 				key.specified = spec_order[i] | enabled;
-				datum.data = le32_to_cpu(buf32[items++]);
+				datum.u.data = le32_to_cpu(buf32[items++]);
 				rc = insertf(a, &key, &datum, p);
 				if (rc)
 					return rc;
@@ -476,14 +505,42 @@
 		return -EINVAL;
 	}
 
-	rc = next_entry(buf32, fp, sizeof(u32));
-	if (rc) {
-		printk(KERN_ERR "SELinux: avtab: truncated entry\n");
-		return rc;
+	if ((vers < POLICYDB_VERSION_XPERMS_IOCTL) &&
+			(key.specified & AVTAB_XPERMS)) {
+		printk(KERN_ERR "SELinux:  avtab:  policy version %u does not "
+				"support extended permissions rules and one "
+				"was specified\n", vers);
+		return -EINVAL;
+	} else if (key.specified & AVTAB_XPERMS) {
+		memset(&xperms, 0, sizeof(struct avtab_extended_perms));
+		rc = next_entry(&xperms.specified, fp, sizeof(u8));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		rc = next_entry(&xperms.driver, fp, sizeof(u8));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		for (i = 0; i < ARRAY_SIZE(xperms.perms.p); i++)
+			xperms.perms.p[i] = le32_to_cpu(buf32[i]);
+		datum.u.xperms = &xperms;
+	} else {
+		rc = next_entry(buf32, fp, sizeof(u32));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		datum.u.data = le32_to_cpu(*buf32);
 	}
-	datum.data = le32_to_cpu(*buf32);
 	if ((key.specified & AVTAB_TYPE) &&
-	    !policydb_type_isvalid(pol, datum.data)) {
+	    !policydb_type_isvalid(pol, datum.u.data)) {
 		printk(KERN_ERR "SELinux: avtab: invalid type\n");
 		return -EINVAL;
 	}
@@ -543,8 +600,9 @@
 int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
 {
 	__le16 buf16[4];
-	__le32 buf32[1];
+	__le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
 	int rc;
+	unsigned int i;
 
 	buf16[0] = cpu_to_le16(cur->key.source_type);
 	buf16[1] = cpu_to_le16(cur->key.target_type);
@@ -553,8 +611,22 @@
 	rc = put_entry(buf16, sizeof(u16), 4, fp);
 	if (rc)
 		return rc;
-	buf32[0] = cpu_to_le32(cur->datum.data);
-	rc = put_entry(buf32, sizeof(u32), 1, fp);
+
+	if (cur->key.specified & AVTAB_XPERMS) {
+		rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1, fp);
+		if (rc)
+			return rc;
+		rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp);
+		if (rc)
+			return rc;
+		for (i = 0; i < ARRAY_SIZE(cur->datum.u.xperms->perms.p); i++)
+			buf32[i] = cpu_to_le32(cur->datum.u.xperms->perms.p[i]);
+		rc = put_entry(buf32, sizeof(u32),
+				ARRAY_SIZE(cur->datum.u.xperms->perms.p), fp);
+	} else {
+		buf32[0] = cpu_to_le32(cur->datum.u.data);
+		rc = put_entry(buf32, sizeof(u32), 1, fp);
+	}
 	if (rc)
 		return rc;
 	return 0;
@@ -588,9 +660,13 @@
 	avtab_node_cachep = kmem_cache_create("avtab_node",
 					      sizeof(struct avtab_node),
 					      0, SLAB_PANIC, NULL);
+	avtab_xperms_cachep = kmem_cache_create("avtab_extended_perms",
+						sizeof(struct avtab_extended_perms),
+						0, SLAB_PANIC, NULL);
 }
 
 void avtab_cache_destroy(void)
 {
 	kmem_cache_destroy(avtab_node_cachep);
+	kmem_cache_destroy(avtab_xperms_cachep);
 }
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index adb451c..d946c9d 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -23,6 +23,7 @@
 #ifndef _SS_AVTAB_H_
 #define _SS_AVTAB_H_
 
+#include "security.h"
 #include <linux/flex_array.h>
 
 struct avtab_key {
@@ -37,13 +38,43 @@
 #define AVTAB_MEMBER		0x0020
 #define AVTAB_CHANGE		0x0040
 #define AVTAB_TYPE		(AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
+/* extended permissions */
+#define AVTAB_XPERMS_ALLOWED	0x0100
+#define AVTAB_XPERMS_AUDITALLOW	0x0200
+#define AVTAB_XPERMS_DONTAUDIT	0x0400
+#define AVTAB_XPERMS		(AVTAB_XPERMS_ALLOWED | \
+				AVTAB_XPERMS_AUDITALLOW | \
+				AVTAB_XPERMS_DONTAUDIT)
 #define AVTAB_ENABLED_OLD   0x80000000 /* reserved for used in cond_avtab */
 #define AVTAB_ENABLED		0x8000 /* reserved for used in cond_avtab */
 	u16 specified;	/* what field is specified */
 };
 
+/*
+ * For operations that require more than the 32 permissions provided by the avc
+ * extended permissions may be used to provide 256 bits of permissions.
+ */
+struct avtab_extended_perms {
+/* These are not flags. All 256 values may be used */
+#define AVTAB_XPERMS_IOCTLFUNCTION	0x01
+#define AVTAB_XPERMS_IOCTLDRIVER	0x02
+	/* extension of the avtab_key specified */
+	u8 specified; /* ioctl, netfilter, ... */
+	/*
+	 * if 256 bits is not adequate as is often the case with ioctls, then
+	 * multiple extended perms may be used and the driver field
+	 * specifies which permissions are included.
+	 */
+	u8 driver;
+	/* 256 bits of permissions */
+	struct extended_perms_data perms;
+};
+
 struct avtab_datum {
-	u32 data; /* access vector or type value */
+	union {
+		u32 data; /* access vector or type value */
+		struct avtab_extended_perms *xperms;
+	} u;
 };
 
 struct avtab_node {
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 62c6773..18643bf 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -15,6 +15,7 @@
 
 #include "security.h"
 #include "conditional.h"
+#include "services.h"
 
 /*
  * cond_evaluate_expr evaluates a conditional expr
@@ -612,21 +613,39 @@
 
 	return 0;
 }
-/* Determine whether additional permissions are granted by the conditional
- * av table, and if so, add them to the result
- */
-void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd)
+
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+		struct extended_perms_decision *xpermd)
 {
 	struct avtab_node *node;
 
-	if (!ctab || !key || !avd)
+	if (!ctab || !key || !xpermd)
+		return;
+
+	for (node = avtab_search_node(ctab, key); node;
+			node = avtab_search_node_next(node, key->specified)) {
+		if (node->key.specified & AVTAB_ENABLED)
+			services_compute_xperms_decision(xpermd, node);
+	}
+	return;
+
+}
+/* Determine whether additional permissions are granted by the conditional
+ * av table, and if so, add them to the result
+ */
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+		struct av_decision *avd, struct extended_perms *xperms)
+{
+	struct avtab_node *node;
+
+	if (!ctab || !key || !avd || !xperms)
 		return;
 
 	for (node = avtab_search_node(ctab, key); node;
 				node = avtab_search_node_next(node, key->specified)) {
 		if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
 		    (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
-			avd->allowed |= node->datum.data;
+			avd->allowed |= node->datum.u.data;
 		if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) ==
 		    (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)))
 			/* Since a '0' in an auditdeny mask represents a
@@ -634,10 +653,13 @@
 			 * the '&' operand to ensure that all '0's in the mask
 			 * are retained (much unlike the allow and auditallow cases).
 			 */
-			avd->auditdeny &= node->datum.data;
+			avd->auditdeny &= node->datum.u.data;
 		if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
 		    (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
-			avd->auditallow |= node->datum.data;
+			avd->auditallow |= node->datum.u.data;
+		if ((node->key.specified & AVTAB_ENABLED) &&
+				(node->key.specified & AVTAB_XPERMS))
+			services_compute_xperms_drivers(xperms, node);
 	}
 	return;
 }
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 4d1f874..ddb43e7 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -73,8 +73,10 @@
 int cond_write_bool(void *key, void *datum, void *ptr);
 int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
 
-void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);
-
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+		struct av_decision *avd, struct extended_perms *xperms);
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+		struct extended_perms_decision *xpermd);
 int evaluate_cond_node(struct policydb *p, struct cond_node *node);
 
 #endif /* _CONDITIONAL_H_ */
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 74aa224..992a315 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -148,6 +148,11 @@
 		.sym_num	= SYM_NUM,
 		.ocon_num	= OCON_NUM,
 	},
+	{
+		.version	= POLICYDB_VERSION_XPERMS_IOCTL,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
 };
 
 static struct policydb_compat_info *policydb_lookup_compat(int version)
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 9e2d8207..b7df12b 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -93,9 +93,10 @@
 				    u32 *scontext_len);
 
 static void context_struct_compute_av(struct context *scontext,
-				      struct context *tcontext,
-				      u16 tclass,
-				      struct av_decision *avd);
+					struct context *tcontext,
+					u16 tclass,
+					struct av_decision *avd,
+					struct extended_perms *xperms);
 
 struct selinux_mapping {
 	u16 value; /* policy value */
@@ -565,7 +566,8 @@
 		context_struct_compute_av(&lo_scontext,
 					  tcontext,
 					  tclass,
-					  &lo_avd);
+					  &lo_avd,
+					  NULL);
 		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
 			return;		/* no masked permission */
 		masked = ~lo_avd.allowed & avd->allowed;
@@ -580,7 +582,8 @@
 		context_struct_compute_av(scontext,
 					  &lo_tcontext,
 					  tclass,
-					  &lo_avd);
+					  &lo_avd,
+					  NULL);
 		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
 			return;		/* no masked permission */
 		masked = ~lo_avd.allowed & avd->allowed;
@@ -596,7 +599,8 @@
 		context_struct_compute_av(&lo_scontext,
 					  &lo_tcontext,
 					  tclass,
-					  &lo_avd);
+					  &lo_avd,
+					  NULL);
 		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
 			return;		/* no masked permission */
 		masked = ~lo_avd.allowed & avd->allowed;
@@ -613,13 +617,39 @@
 }
 
 /*
- * Compute access vectors based on a context structure pair for
- * the permissions in a particular class.
+ * flag which drivers have permissions
+ * only looking for ioctl based extended permssions
+ */
+void services_compute_xperms_drivers(
+		struct extended_perms *xperms,
+		struct avtab_node *node)
+{
+	unsigned int i;
+
+	if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+		/* if one or more driver has all permissions allowed */
+		for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++)
+			xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
+	} else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+		/* if allowing permissions within a driver */
+		security_xperm_set(xperms->drivers.p,
+					node->datum.u.xperms->driver);
+	}
+
+	/* If no ioctl commands are allowed, ignore auditallow and auditdeny */
+	if (node->key.specified & AVTAB_XPERMS_ALLOWED)
+		xperms->len = 1;
+}
+
+/*
+ * Compute access vectors and extended permissions based on a context
+ * structure pair for the permissions in a particular class.
  */
 static void context_struct_compute_av(struct context *scontext,
-				      struct context *tcontext,
-				      u16 tclass,
-				      struct av_decision *avd)
+					struct context *tcontext,
+					u16 tclass,
+					struct av_decision *avd,
+					struct extended_perms *xperms)
 {
 	struct constraint_node *constraint;
 	struct role_allow *ra;
@@ -633,6 +663,10 @@
 	avd->allowed = 0;
 	avd->auditallow = 0;
 	avd->auditdeny = 0xffffffff;
+	if (xperms) {
+		memset(&xperms->drivers, 0, sizeof(xperms->drivers));
+		xperms->len = 0;
+	}
 
 	if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
 		if (printk_ratelimit())
@@ -647,7 +681,7 @@
 	 * this permission check, then use it.
 	 */
 	avkey.target_class = tclass;
-	avkey.specified = AVTAB_AV;
+	avkey.specified = AVTAB_AV | AVTAB_XPERMS;
 	sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1);
 	BUG_ON(!sattr);
 	tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1);
@@ -660,15 +694,18 @@
 			     node;
 			     node = avtab_search_node_next(node, avkey.specified)) {
 				if (node->key.specified == AVTAB_ALLOWED)
-					avd->allowed |= node->datum.data;
+					avd->allowed |= node->datum.u.data;
 				else if (node->key.specified == AVTAB_AUDITALLOW)
-					avd->auditallow |= node->datum.data;
+					avd->auditallow |= node->datum.u.data;
 				else if (node->key.specified == AVTAB_AUDITDENY)
-					avd->auditdeny &= node->datum.data;
+					avd->auditdeny &= node->datum.u.data;
+				else if (xperms && (node->key.specified & AVTAB_XPERMS))
+					services_compute_xperms_drivers(xperms, node);
 			}
 
 			/* Check conditional av table for additional permissions */
-			cond_compute_av(&policydb.te_cond_avtab, &avkey, avd);
+			cond_compute_av(&policydb.te_cond_avtab, &avkey,
+					avd, xperms);
 
 		}
 	}
@@ -899,6 +936,139 @@
 	avd->flags = 0;
 }
 
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+					struct avtab_node *node)
+{
+	unsigned int i;
+
+	if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+		if (xpermd->driver != node->datum.u.xperms->driver)
+			return;
+	} else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+		if (!security_xperm_test(node->datum.u.xperms->perms.p,
+					xpermd->driver))
+			return;
+	} else {
+		BUG();
+	}
+
+	if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+		xpermd->used |= XPERMS_ALLOWED;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->allowed->p, 0xff,
+					sizeof(xpermd->allowed->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->allowed->p); i++)
+				xpermd->allowed->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else if (node->key.specified == AVTAB_XPERMS_AUDITALLOW) {
+		xpermd->used |= XPERMS_AUDITALLOW;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->auditallow->p, 0xff,
+					sizeof(xpermd->auditallow->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->auditallow->p); i++)
+				xpermd->auditallow->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else if (node->key.specified == AVTAB_XPERMS_DONTAUDIT) {
+		xpermd->used |= XPERMS_DONTAUDIT;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->dontaudit->p, 0xff,
+					sizeof(xpermd->dontaudit->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->dontaudit->p); i++)
+				xpermd->dontaudit->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else {
+		BUG();
+	}
+}
+
+void security_compute_xperms_decision(u32 ssid,
+				u32 tsid,
+				u16 orig_tclass,
+				u8 driver,
+				struct extended_perms_decision *xpermd)
+{
+	u16 tclass;
+	struct context *scontext, *tcontext;
+	struct avtab_key avkey;
+	struct avtab_node *node;
+	struct ebitmap *sattr, *tattr;
+	struct ebitmap_node *snode, *tnode;
+	unsigned int i, j;
+
+	xpermd->driver = driver;
+	xpermd->used = 0;
+	memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p));
+	memset(xpermd->auditallow->p, 0, sizeof(xpermd->auditallow->p));
+	memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
+
+	read_lock(&policy_rwlock);
+	if (!ss_initialized)
+		goto allow;
+
+	scontext = sidtab_search(&sidtab, ssid);
+	if (!scontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, ssid);
+		goto out;
+	}
+
+	tcontext = sidtab_search(&sidtab, tsid);
+	if (!tcontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, tsid);
+		goto out;
+	}
+
+	tclass = unmap_class(orig_tclass);
+	if (unlikely(orig_tclass && !tclass)) {
+		if (policydb.allow_unknown)
+			goto allow;
+		goto out;
+	}
+
+
+	if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
+		pr_warn_ratelimited("SELinux:  Invalid class %hu\n", tclass);
+		goto out;
+	}
+
+	avkey.target_class = tclass;
+	avkey.specified = AVTAB_XPERMS;
+	sattr = flex_array_get(policydb.type_attr_map_array,
+				scontext->type - 1);
+	BUG_ON(!sattr);
+	tattr = flex_array_get(policydb.type_attr_map_array,
+				tcontext->type - 1);
+	BUG_ON(!tattr);
+	ebitmap_for_each_positive_bit(sattr, snode, i) {
+		ebitmap_for_each_positive_bit(tattr, tnode, j) {
+			avkey.source_type = i + 1;
+			avkey.target_type = j + 1;
+			for (node = avtab_search_node(&policydb.te_avtab, &avkey);
+			     node;
+			     node = avtab_search_node_next(node, avkey.specified))
+				services_compute_xperms_decision(xpermd, node);
+
+			cond_compute_xperms(&policydb.te_cond_avtab,
+						&avkey, xpermd);
+		}
+	}
+out:
+	read_unlock(&policy_rwlock);
+	return;
+allow:
+	memset(xpermd->allowed->p, 0xff, sizeof(xpermd->allowed->p));
+	goto out;
+}
 
 /**
  * security_compute_av - Compute access vector decisions.
@@ -906,6 +1076,7 @@
  * @tsid: target security identifier
  * @tclass: target security class
  * @avd: access vector decisions
+ * @xperms: extended permissions
  *
  * Compute a set of access vector decisions based on the
  * SID pair (@ssid, @tsid) for the permissions in @tclass.
@@ -913,13 +1084,15 @@
 void security_compute_av(u32 ssid,
 			 u32 tsid,
 			 u16 orig_tclass,
-			 struct av_decision *avd)
+			 struct av_decision *avd,
+			 struct extended_perms *xperms)
 {
 	u16 tclass;
 	struct context *scontext = NULL, *tcontext = NULL;
 
 	read_lock(&policy_rwlock);
 	avd_init(avd);
+	xperms->len = 0;
 	if (!ss_initialized)
 		goto allow;
 
@@ -947,7 +1120,7 @@
 			goto allow;
 		goto out;
 	}
-	context_struct_compute_av(scontext, tcontext, tclass, avd);
+	context_struct_compute_av(scontext, tcontext, tclass, avd, xperms);
 	map_decision(orig_tclass, avd, policydb.allow_unknown);
 out:
 	read_unlock(&policy_rwlock);
@@ -993,7 +1166,7 @@
 		goto out;
 	}
 
-	context_struct_compute_av(scontext, tcontext, tclass, avd);
+	context_struct_compute_av(scontext, tcontext, tclass, avd, NULL);
  out:
 	read_unlock(&policy_rwlock);
 	return;
@@ -1515,7 +1688,7 @@
 
 	if (avdatum) {
 		/* Use the type from the type transition/member/change rule. */
-		newcontext.type = avdatum->data;
+		newcontext.type = avdatum->u.data;
 	}
 
 	/* if we have a objname this is a file trans check so check those rules */
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
index e8d907e..6abcd87 100644
--- a/security/selinux/ss/services.h
+++ b/security/selinux/ss/services.h
@@ -11,5 +11,11 @@
 
 extern struct policydb policydb;
 
+void services_compute_xperms_drivers(struct extended_perms *xperms,
+				struct avtab_node *node);
+
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+					struct avtab_node *node);
+
 #endif	/* _SS_SERVICES_H_ */
 
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 244e035..fff0c61 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -17,12 +17,27 @@
 #include <linux/spinlock.h>
 #include <linux/lsm_hooks.h>
 #include <linux/in.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/in6.h>
+#endif /* CONFIG_IPV6 */
 #include <net/netlabel.h>
 #include <linux/list.h>
 #include <linux/rculist.h>
 #include <linux/lsm_audit.h>
 
 /*
+ * Use IPv6 port labeling if IPv6 is enabled and secmarks
+ * are not being used.
+ */
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#define SMACK_IPV6_PORT_LABELING 1
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6) && defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#define SMACK_IPV6_SECMARK_LABELING 1
+#endif
+
+/*
  * Smack labels were limited to 23 characters for a long time.
  */
 #define SMK_LABELLEN	24
@@ -118,15 +133,30 @@
 };
 
 /*
- * An entry in the table identifying hosts.
+ * An entry in the table identifying IPv4 hosts.
  */
-struct smk_netlbladdr {
+struct smk_net4addr {
 	struct list_head	list;
-	struct sockaddr_in	smk_host;	/* network address */
+	struct in_addr		smk_host;	/* network address */
 	struct in_addr		smk_mask;	/* network mask */
+	int			smk_masks;	/* mask size */
 	struct smack_known	*smk_label;	/* label */
 };
 
+#if IS_ENABLED(CONFIG_IPV6)
+/*
+ * An entry in the table identifying IPv6 hosts.
+ */
+struct smk_net6addr {
+	struct list_head	list;
+	struct in6_addr		smk_host;	/* network address */
+	struct in6_addr		smk_mask;	/* network mask */
+	int			smk_masks;	/* mask size */
+	struct smack_known	*smk_label;	/* label */
+};
+#endif /* CONFIG_IPV6 */
+
+#ifdef SMACK_IPV6_PORT_LABELING
 /*
  * An entry in the table identifying ports.
  */
@@ -137,12 +167,31 @@
 	struct smack_known	*smk_in;	/* inbound label */
 	struct smack_known	*smk_out;	/* outgoing label */
 };
+#endif /* SMACK_IPV6_PORT_LABELING */
 
 struct smack_onlycap {
 	struct list_head	list;
 	struct smack_known	*smk_label;
 };
 
+/* Super block security struct flags for mount options */
+#define FSDEFAULT_MNT	0x01
+#define FSFLOOR_MNT	0x02
+#define FSHAT_MNT	0x04
+#define FSROOT_MNT	0x08
+#define FSTRANS_MNT	0x10
+
+#define NUM_SMK_MNT_OPTS	5
+
+enum {
+	Opt_error = -1,
+	Opt_fsdefault = 1,
+	Opt_fsfloor = 2,
+	Opt_fshat = 3,
+	Opt_fsroot = 4,
+	Opt_fstransmute = 5,
+};
+
 /*
  * Mount options
  */
@@ -152,6 +201,7 @@
 #define SMK_FSROOT	"smackfsroot="
 #define SMK_FSTRANS	"smackfstransmute="
 
+#define SMACK_DELETE_OPTION	"-DELETE"
 #define SMACK_CIPSO_OPTION 	"-CIPSO"
 
 /*
@@ -234,10 +284,6 @@
 	struct smack_audit_data sad;
 #endif
 };
-/*
- * These functions are in smack_lsm.c
- */
-struct inode_smack *new_inode_smack(struct smack_known *);
 
 /*
  * These functions are in smack_access.c
@@ -267,7 +313,6 @@
 #ifdef CONFIG_SECURITY_SMACK_BRINGUP
 extern struct smack_known *smack_unconfined;
 #endif
-extern struct smack_known smack_cipso_option;
 extern int smack_ptrace_rule;
 
 extern struct smack_known smack_known_floor;
@@ -279,7 +324,10 @@
 
 extern struct mutex	smack_known_lock;
 extern struct list_head smack_known_list;
-extern struct list_head smk_netlbladdr_list;
+extern struct list_head smk_net4addr_list;
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct list_head smk_net6addr_list;
+#endif /* CONFIG_IPV6 */
 
 extern struct mutex     smack_onlycap_lock;
 extern struct list_head smack_onlycap_list;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 00f6b38..bc1053f 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -639,6 +639,12 @@
 	struct smack_known *skp = smk_of_current();
 	struct smack_onlycap *sop;
 
+	/*
+	 * All kernel tasks are privileged
+	 */
+	if (unlikely(current->flags & PF_KTHREAD))
+		return 1;
+
 	if (!capable(cap))
 		return 0;
 
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index a143328..996c889 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -41,6 +41,7 @@
 #include <linux/msg.h>
 #include <linux/shm.h>
 #include <linux/binfmts.h>
+#include <linux/parser.h>
 #include "smack.h"
 
 #define TRANS_TRUE	"TRUE"
@@ -50,12 +51,21 @@
 #define SMK_RECEIVING	1
 #define SMK_SENDING	2
 
-#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#ifdef SMACK_IPV6_PORT_LABELING
 LIST_HEAD(smk_ipv6_port_list);
-#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
+#endif
 static struct kmem_cache *smack_inode_cache;
 int smack_enabled;
 
+static const match_table_t smk_mount_tokens = {
+	{Opt_fsdefault, SMK_FSDEFAULT "%s"},
+	{Opt_fsfloor, SMK_FSFLOOR "%s"},
+	{Opt_fshat, SMK_FSHAT "%s"},
+	{Opt_fsroot, SMK_FSROOT "%s"},
+	{Opt_fstransmute, SMK_FSTRANS "%s"},
+	{Opt_error, NULL},
+};
+
 #ifdef CONFIG_SECURITY_SMACK_BRINGUP
 static char *smk_bu_mess[] = {
 	"Bringup Error",	/* Unused */
@@ -281,7 +291,7 @@
  *
  * Returns the new blob or NULL if there's no memory available
  */
-struct inode_smack *new_inode_smack(struct smack_known *skp)
+static struct inode_smack *new_inode_smack(struct smack_known *skp)
 {
 	struct inode_smack *isp;
 
@@ -577,76 +587,197 @@
 }
 
 /**
- * smack_sb_kern_mount - Smack specific mount processing
+ * smack_parse_opts_str - parse Smack specific mount options
+ * @options: mount options string
+ * @opts: where to store converted mount opts
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ *
+ * converts Smack specific mount options to generic security option format
+ */
+static int smack_parse_opts_str(char *options,
+		struct security_mnt_opts *opts)
+{
+	char *p;
+	char *fsdefault = NULL;
+	char *fsfloor = NULL;
+	char *fshat = NULL;
+	char *fsroot = NULL;
+	char *fstransmute = NULL;
+	int rc = -ENOMEM;
+	int num_mnt_opts = 0;
+	int token;
+
+	opts->num_mnt_opts = 0;
+
+	if (!options)
+		return 0;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, smk_mount_tokens, args);
+
+		switch (token) {
+		case Opt_fsdefault:
+			if (fsdefault)
+				goto out_opt_err;
+			fsdefault = match_strdup(&args[0]);
+			if (!fsdefault)
+				goto out_err;
+			break;
+		case Opt_fsfloor:
+			if (fsfloor)
+				goto out_opt_err;
+			fsfloor = match_strdup(&args[0]);
+			if (!fsfloor)
+				goto out_err;
+			break;
+		case Opt_fshat:
+			if (fshat)
+				goto out_opt_err;
+			fshat = match_strdup(&args[0]);
+			if (!fshat)
+				goto out_err;
+			break;
+		case Opt_fsroot:
+			if (fsroot)
+				goto out_opt_err;
+			fsroot = match_strdup(&args[0]);
+			if (!fsroot)
+				goto out_err;
+			break;
+		case Opt_fstransmute:
+			if (fstransmute)
+				goto out_opt_err;
+			fstransmute = match_strdup(&args[0]);
+			if (!fstransmute)
+				goto out_err;
+			break;
+		default:
+			rc = -EINVAL;
+			pr_warn("Smack:  unknown mount option\n");
+			goto out_err;
+		}
+	}
+
+	opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
+	if (!opts->mnt_opts)
+		goto out_err;
+
+	opts->mnt_opts_flags = kcalloc(NUM_SMK_MNT_OPTS, sizeof(int),
+			GFP_ATOMIC);
+	if (!opts->mnt_opts_flags) {
+		kfree(opts->mnt_opts);
+		goto out_err;
+	}
+
+	if (fsdefault) {
+		opts->mnt_opts[num_mnt_opts] = fsdefault;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSDEFAULT_MNT;
+	}
+	if (fsfloor) {
+		opts->mnt_opts[num_mnt_opts] = fsfloor;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSFLOOR_MNT;
+	}
+	if (fshat) {
+		opts->mnt_opts[num_mnt_opts] = fshat;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSHAT_MNT;
+	}
+	if (fsroot) {
+		opts->mnt_opts[num_mnt_opts] = fsroot;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSROOT_MNT;
+	}
+	if (fstransmute) {
+		opts->mnt_opts[num_mnt_opts] = fstransmute;
+		opts->mnt_opts_flags[num_mnt_opts++] = FSTRANS_MNT;
+	}
+
+	opts->num_mnt_opts = num_mnt_opts;
+	return 0;
+
+out_opt_err:
+	rc = -EINVAL;
+	pr_warn("Smack: duplicate mount options\n");
+
+out_err:
+	kfree(fsdefault);
+	kfree(fsfloor);
+	kfree(fshat);
+	kfree(fsroot);
+	kfree(fstransmute);
+	return rc;
+}
+
+/**
+ * smack_set_mnt_opts - set Smack specific mount options
  * @sb: the file system superblock
- * @flags: the mount flags
- * @data: the smack mount options
+ * @opts: Smack mount options
+ * @kern_flags: mount option from kernel space or user space
+ * @set_kern_flags: where to store converted mount opts
  *
  * Returns 0 on success, an error code on failure
+ *
+ * Allow filesystems with binary mount data to explicitly set Smack mount
+ * labels.
  */
-static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
+static int smack_set_mnt_opts(struct super_block *sb,
+		struct security_mnt_opts *opts,
+		unsigned long kern_flags,
+		unsigned long *set_kern_flags)
 {
 	struct dentry *root = sb->s_root;
 	struct inode *inode = d_backing_inode(root);
 	struct superblock_smack *sp = sb->s_security;
 	struct inode_smack *isp;
 	struct smack_known *skp;
-	char *op;
-	char *commap;
+	int i;
+	int num_opts = opts->num_mnt_opts;
 	int transmute = 0;
-	int specified = 0;
 
 	if (sp->smk_initialized)
 		return 0;
 
 	sp->smk_initialized = 1;
 
-	for (op = data; op != NULL; op = commap) {
-		commap = strchr(op, ',');
-		if (commap != NULL)
-			*commap++ = '\0';
-
-		if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) {
-			op += strlen(SMK_FSHAT);
-			skp = smk_import_entry(op, 0);
-			if (IS_ERR(skp))
-				return PTR_ERR(skp);
-			sp->smk_hat = skp;
-			specified = 1;
-
-		} else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) {
-			op += strlen(SMK_FSFLOOR);
-			skp = smk_import_entry(op, 0);
-			if (IS_ERR(skp))
-				return PTR_ERR(skp);
-			sp->smk_floor = skp;
-			specified = 1;
-
-		} else if (strncmp(op, SMK_FSDEFAULT,
-				   strlen(SMK_FSDEFAULT)) == 0) {
-			op += strlen(SMK_FSDEFAULT);
-			skp = smk_import_entry(op, 0);
+	for (i = 0; i < num_opts; i++) {
+		switch (opts->mnt_opts_flags[i]) {
+		case FSDEFAULT_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
 			if (IS_ERR(skp))
 				return PTR_ERR(skp);
 			sp->smk_default = skp;
-			specified = 1;
-
-		} else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) {
-			op += strlen(SMK_FSROOT);
-			skp = smk_import_entry(op, 0);
+			break;
+		case FSFLOOR_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
+			if (IS_ERR(skp))
+				return PTR_ERR(skp);
+			sp->smk_floor = skp;
+			break;
+		case FSHAT_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
+			if (IS_ERR(skp))
+				return PTR_ERR(skp);
+			sp->smk_hat = skp;
+			break;
+		case FSROOT_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
 			if (IS_ERR(skp))
 				return PTR_ERR(skp);
 			sp->smk_root = skp;
-			specified = 1;
-
-		} else if (strncmp(op, SMK_FSTRANS, strlen(SMK_FSTRANS)) == 0) {
-			op += strlen(SMK_FSTRANS);
-			skp = smk_import_entry(op, 0);
+			break;
+		case FSTRANS_MNT:
+			skp = smk_import_entry(opts->mnt_opts[i], 0);
 			if (IS_ERR(skp))
 				return PTR_ERR(skp);
 			sp->smk_root = skp;
 			transmute = 1;
-			specified = 1;
+			break;
+		default:
+			break;
 		}
 	}
 
@@ -654,7 +785,7 @@
 		/*
 		 * Unprivileged mounts don't get to specify Smack values.
 		 */
-		if (specified)
+		if (num_opts)
 			return -EPERM;
 		/*
 		 * Unprivileged mounts get root and default from the caller.
@@ -663,6 +794,7 @@
 		sp->smk_root = skp;
 		sp->smk_default = skp;
 	}
+
 	/*
 	 * Initialize the root inode.
 	 */
@@ -682,6 +814,37 @@
 }
 
 /**
+ * smack_sb_kern_mount - Smack specific mount processing
+ * @sb: the file system superblock
+ * @flags: the mount flags
+ * @data: the smack mount options
+ *
+ * Returns 0 on success, an error code on failure
+ */
+static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
+{
+	int rc = 0;
+	char *options = data;
+	struct security_mnt_opts opts;
+
+	security_init_mnt_opts(&opts);
+
+	if (!options)
+		goto out;
+
+	rc = smack_parse_opts_str(options, &opts);
+	if (rc)
+		goto out_err;
+
+out:
+	rc = smack_set_mnt_opts(sb, &opts, 0, NULL);
+
+out_err:
+	security_free_mnt_opts(&opts);
+	return rc;
+}
+
+/**
  * smack_sb_statfs - Smack check on statfs
  * @dentry: identifies the file system in question
  *
@@ -2113,7 +2276,7 @@
 }
 
 /**
-* smack_host_label - check host based restrictions
+* smack_ipv4host_label - check host based restrictions
 * @sip: the object end
 *
 * looks for host based access restrictions
@@ -2124,30 +2287,96 @@
 *
 * Returns the label of the far end or NULL if it's not special.
 */
-static struct smack_known *smack_host_label(struct sockaddr_in *sip)
+static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip)
 {
-	struct smk_netlbladdr *snp;
+	struct smk_net4addr *snp;
 	struct in_addr *siap = &sip->sin_addr;
 
 	if (siap->s_addr == 0)
 		return NULL;
 
-	list_for_each_entry_rcu(snp, &smk_netlbladdr_list, list)
+	list_for_each_entry_rcu(snp, &smk_net4addr_list, list)
+		/*
+		 * we break after finding the first match because
+		 * the list is sorted from longest to shortest mask
+		 * so we have found the most specific match
+		 */
+		if (snp->smk_host.s_addr ==
+		    (siap->s_addr & snp->smk_mask.s_addr))
+			return snp->smk_label;
+
+	return NULL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/*
+ * smk_ipv6_localhost - Check for local ipv6 host address
+ * @sip: the address
+ *
+ * Returns boolean true if this is the localhost address
+ */
+static bool smk_ipv6_localhost(struct sockaddr_in6 *sip)
+{
+	__be16 *be16p = (__be16 *)&sip->sin6_addr;
+	__be32 *be32p = (__be32 *)&sip->sin6_addr;
+
+	if (be32p[0] == 0 && be32p[1] == 0 && be32p[2] == 0 && be16p[6] == 0 &&
+	    ntohs(be16p[7]) == 1)
+		return true;
+	return false;
+}
+
+/**
+* smack_ipv6host_label - check host based restrictions
+* @sip: the object end
+*
+* looks for host based access restrictions
+*
+* This version will only be appropriate for really small sets of single label
+* hosts.  The caller is responsible for ensuring that the RCU read lock is
+* taken before calling this function.
+*
+* Returns the label of the far end or NULL if it's not special.
+*/
+static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
+{
+	struct smk_net6addr *snp;
+	struct in6_addr *sap = &sip->sin6_addr;
+	int i;
+	int found = 0;
+
+	/*
+	 * It's local. Don't look for a host label.
+	 */
+	if (smk_ipv6_localhost(sip))
+		return NULL;
+
+	list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
 		/*
 		* we break after finding the first match because
 		* the list is sorted from longest to shortest mask
 		* so we have found the most specific match
 		*/
-		if ((&snp->smk_host.sin_addr)->s_addr ==
-		    (siap->s_addr & (&snp->smk_mask)->s_addr)) {
-			/* we have found the special CIPSO option */
-			if (snp->smk_label == &smack_cipso_option)
-				return NULL;
-			return snp->smk_label;
+		for (found = 1, i = 0; i < 8; i++) {
+			/*
+			 * If the label is NULL the entry has
+			 * been renounced. Ignore it.
+			 */
+			if (snp->smk_label == NULL)
+				continue;
+			if ((sap->s6_addr16[i] & snp->smk_mask.s6_addr16[i]) !=
+			    snp->smk_host.s6_addr16[i]) {
+				found = 0;
+				break;
+			}
 		}
+		if (found)
+			return snp->smk_label;
+	}
 
 	return NULL;
 }
+#endif /* CONFIG_IPV6 */
 
 /**
  * smack_netlabel - Set the secattr on a socket
@@ -2211,7 +2440,7 @@
 	struct smk_audit_info ad;
 
 	rcu_read_lock();
-	hkp = smack_host_label(sap);
+	hkp = smack_ipv4host_label(sap);
 	if (hkp != NULL) {
 #ifdef CONFIG_AUDIT
 		struct lsm_network_audit net;
@@ -2236,7 +2465,42 @@
 	return smack_netlabel(sk, sk_lbl);
 }
 
-#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * smk_ipv6_check - check Smack access
+ * @subject: subject Smack label
+ * @object: object Smack label
+ * @address: address
+ * @act: the action being taken
+ *
+ * Check an IPv6 access
+ */
+static int smk_ipv6_check(struct smack_known *subject,
+				struct smack_known *object,
+				struct sockaddr_in6 *address, int act)
+{
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+#endif
+	struct smk_audit_info ad;
+	int rc;
+
+#ifdef CONFIG_AUDIT
+	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+	ad.a.u.net->family = PF_INET6;
+	ad.a.u.net->dport = ntohs(address->sin6_port);
+	if (act == SMK_RECEIVING)
+		ad.a.u.net->v6info.saddr = address->sin6_addr;
+	else
+		ad.a.u.net->v6info.daddr = address->sin6_addr;
+#endif
+	rc = smk_access(subject, object, MAY_WRITE, &ad);
+	rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc);
+	return rc;
+}
+#endif /* CONFIG_IPV6 */
+
+#ifdef SMACK_IPV6_PORT_LABELING
 /**
  * smk_ipv6_port_label - Smack port access table management
  * @sock: socket
@@ -2320,48 +2584,43 @@
 static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
 				int act)
 {
-	__be16 *bep;
-	__be32 *be32p;
 	struct smk_port_label *spp;
 	struct socket_smack *ssp = sk->sk_security;
-	struct smack_known *skp;
-	unsigned short port = 0;
+	struct smack_known *skp = NULL;
+	unsigned short port;
 	struct smack_known *object;
-	struct smk_audit_info ad;
-	int rc;
-#ifdef CONFIG_AUDIT
-	struct lsm_network_audit net;
-#endif
 
 	if (act == SMK_RECEIVING) {
-		skp = smack_net_ambient;
+		skp = smack_ipv6host_label(address);
 		object = ssp->smk_in;
 	} else {
 		skp = ssp->smk_out;
-		object = smack_net_ambient;
+		object = smack_ipv6host_label(address);
 	}
 
 	/*
-	 * Get the IP address and port from the address.
+	 * The other end is a single label host.
 	 */
-	port = ntohs(address->sin6_port);
-	bep = (__be16 *)(&address->sin6_addr);
-	be32p = (__be32 *)(&address->sin6_addr);
+	if (skp != NULL && object != NULL)
+		return smk_ipv6_check(skp, object, address, act);
+	if (skp == NULL)
+		skp = smack_net_ambient;
+	if (object == NULL)
+		object = smack_net_ambient;
 
 	/*
 	 * It's remote, so port lookup does no good.
 	 */
-	if (be32p[0] || be32p[1] || be32p[2] || bep[6] || ntohs(bep[7]) != 1)
-		goto auditout;
+	if (!smk_ipv6_localhost(address))
+		return smk_ipv6_check(skp, object, address, act);
 
 	/*
 	 * It's local so the send check has to have passed.
 	 */
-	if (act == SMK_RECEIVING) {
-		skp = &smack_known_web;
-		goto auditout;
-	}
+	if (act == SMK_RECEIVING)
+		return 0;
 
+	port = ntohs(address->sin6_port);
 	list_for_each_entry(spp, &smk_ipv6_port_list, list) {
 		if (spp->smk_port != port)
 			continue;
@@ -2371,22 +2630,9 @@
 		break;
 	}
 
-auditout:
-
-#ifdef CONFIG_AUDIT
-	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
-	ad.a.u.net->family = sk->sk_family;
-	ad.a.u.net->dport = port;
-	if (act == SMK_RECEIVING)
-		ad.a.u.net->v6info.saddr = address->sin6_addr;
-	else
-		ad.a.u.net->v6info.daddr = address->sin6_addr;
-#endif
-	rc = smk_access(skp, object, MAY_WRITE, &ad);
-	rc = smk_bu_note("IPv6 port check", skp, object, MAY_WRITE, rc);
-	return rc;
+	return smk_ipv6_check(skp, object, address, act);
 }
-#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
+#endif /* SMACK_IPV6_PORT_LABELING */
 
 /**
  * smack_inode_setsecurity - set smack xattrs
@@ -2447,10 +2693,10 @@
 	} else
 		return -EOPNOTSUPP;
 
-#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#ifdef SMACK_IPV6_PORT_LABELING
 	if (sock->sk->sk_family == PF_INET6)
 		smk_ipv6_port_label(sock, NULL);
-#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
+#endif
 
 	return 0;
 }
@@ -2492,7 +2738,7 @@
 	return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
 }
 
-#ifndef CONFIG_SECURITY_SMACK_NETFILTER
+#ifdef SMACK_IPV6_PORT_LABELING
 /**
  * smack_socket_bind - record port binding information.
  * @sock: the socket
@@ -2506,14 +2752,11 @@
 static int smack_socket_bind(struct socket *sock, struct sockaddr *address,
 				int addrlen)
 {
-#if IS_ENABLED(CONFIG_IPV6)
 	if (sock->sk != NULL && sock->sk->sk_family == PF_INET6)
 		smk_ipv6_port_label(sock, address);
-#endif
-
 	return 0;
 }
-#endif /* !CONFIG_SECURITY_SMACK_NETFILTER */
+#endif /* SMACK_IPV6_PORT_LABELING */
 
 /**
  * smack_socket_connect - connect access check
@@ -2529,6 +2772,13 @@
 				int addrlen)
 {
 	int rc = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+	struct smack_known *rsp;
+	struct socket_smack *ssp = sock->sk->sk_security;
+#endif
 
 	if (sock->sk == NULL)
 		return 0;
@@ -2542,10 +2792,15 @@
 	case PF_INET6:
 		if (addrlen < sizeof(struct sockaddr_in6))
 			return -EINVAL;
-#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
-		rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
+#ifdef SMACK_IPV6_SECMARK_LABELING
+		rsp = smack_ipv6host_label(sip);
+		if (rsp != NULL)
+			rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
 						SMK_CONNECTING);
-#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
+		rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
+#endif
 		break;
 	}
 	return rc;
@@ -3431,9 +3686,13 @@
 				int size)
 {
 	struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
-#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
-#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+	struct socket_smack *ssp = sock->sk->sk_security;
+	struct smack_known *rsp;
+#endif
 	int rc = 0;
 
 	/*
@@ -3447,9 +3706,15 @@
 		rc = smack_netlabel_send(sock->sk, sip);
 		break;
 	case AF_INET6:
-#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#ifdef SMACK_IPV6_SECMARK_LABELING
+		rsp = smack_ipv6host_label(sap);
+		if (rsp != NULL)
+			rc = smk_ipv6_check(ssp->smk_out, rsp, sap,
+						SMK_CONNECTING);
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
 		rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING);
-#endif /* CONFIG_IPV6 && !CONFIG_SECURITY_SMACK_NETFILTER */
+#endif
 		break;
 	}
 	return rc;
@@ -3663,10 +3928,12 @@
 		proto = smk_skb_to_addr_ipv6(skb, &sadd);
 		if (proto != IPPROTO_UDP && proto != IPPROTO_TCP)
 			break;
-#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+#ifdef SMACK_IPV6_SECMARK_LABELING
 		if (skb && skb->secmark != 0)
 			skp = smack_from_secid(skb->secmark);
 		else
+			skp = smack_ipv6host_label(&sadd);
+		if (skp == NULL)
 			skp = smack_net_ambient;
 #ifdef CONFIG_AUDIT
 		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
@@ -3677,9 +3944,10 @@
 		rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
 		rc = smk_bu_note("IPv6 delivery", skp, ssp->smk_in,
 					MAY_WRITE, rc);
-#else /* CONFIG_SECURITY_SMACK_NETFILTER */
+#endif /* SMACK_IPV6_SECMARK_LABELING */
+#ifdef SMACK_IPV6_PORT_LABELING
 		rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
-#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
+#endif /* SMACK_IPV6_PORT_LABELING */
 		break;
 #endif /* CONFIG_IPV6 */
 	}
@@ -3777,13 +4045,11 @@
 		}
 		netlbl_secattr_destroy(&secattr);
 		break;
-#if IS_ENABLED(CONFIG_IPV6)
 	case PF_INET6:
-#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+#ifdef SMACK_IPV6_SECMARK_LABELING
 		s = skb->secmark;
-#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
+#endif
 		break;
-#endif /* CONFIG_IPV6 */
 	}
 	*secid = s;
 	if (s == 0)
@@ -3906,7 +4172,7 @@
 	hdr = ip_hdr(skb);
 	addr.sin_addr.s_addr = hdr->saddr;
 	rcu_read_lock();
-	hskp = smack_host_label(&addr);
+	hskp = smack_ipv4host_label(&addr);
 	rcu_read_unlock();
 
 	if (hskp == NULL)
@@ -4254,7 +4520,7 @@
 	return 0;
 }
 
-struct security_hook_list smack_hooks[] = {
+static struct security_hook_list smack_hooks[] = {
 	LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
 	LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
 	LSM_HOOK_INIT(syslog, smack_syslog),
@@ -4264,6 +4530,8 @@
 	LSM_HOOK_INIT(sb_copy_data, smack_sb_copy_data),
 	LSM_HOOK_INIT(sb_kern_mount, smack_sb_kern_mount),
 	LSM_HOOK_INIT(sb_statfs, smack_sb_statfs),
+	LSM_HOOK_INIT(sb_set_mnt_opts, smack_set_mnt_opts),
+	LSM_HOOK_INIT(sb_parse_opts_str, smack_parse_opts_str),
 
 	LSM_HOOK_INIT(bprm_set_creds, smack_bprm_set_creds),
 	LSM_HOOK_INIT(bprm_committing_creds, smack_bprm_committing_creds),
@@ -4356,9 +4624,9 @@
 	LSM_HOOK_INIT(unix_may_send, smack_unix_may_send),
 
 	LSM_HOOK_INIT(socket_post_create, smack_socket_post_create),
-#ifndef CONFIG_SECURITY_SMACK_NETFILTER
+#ifdef SMACK_IPV6_PORT_LABELING
 	LSM_HOOK_INIT(socket_bind, smack_socket_bind),
-#endif /* CONFIG_SECURITY_SMACK_NETFILTER */
+#endif
 	LSM_HOOK_INIT(socket_connect, smack_socket_connect),
 	LSM_HOOK_INIT(socket_sendmsg, smack_socket_sendmsg),
 	LSM_HOOK_INIT(socket_sock_rcv_skb, smack_socket_sock_rcv_skb),
@@ -4453,7 +4721,16 @@
 		return -ENOMEM;
 	}
 
-	printk(KERN_INFO "Smack:  Initializing.\n");
+	pr_info("Smack:  Initializing.\n");
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+	pr_info("Smack:  Netfilter enabled.\n");
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
+	pr_info("Smack:  IPv6 port labeling enabled.\n");
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+	pr_info("Smack:  IPv6 Netfilter enabled.\n");
+#endif
 
 	/*
 	 * Set the security state for the initial task.
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 2716d02..c20b154 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -29,6 +29,7 @@
 #include <linux/magic.h>
 #include "smack.h"
 
+#define BEBITS	(sizeof(__be32) * 8)
 /*
  * smackfs pseudo filesystem.
  */
@@ -40,7 +41,7 @@
 	SMK_DOI		= 5,	/* CIPSO DOI */
 	SMK_DIRECT	= 6,	/* CIPSO level indicating direct label */
 	SMK_AMBIENT	= 7,	/* internet ambient label */
-	SMK_NETLBLADDR	= 8,	/* single label hosts */
+	SMK_NET4ADDR	= 8,	/* single label hosts */
 	SMK_ONLYCAP	= 9,	/* the only "capable" label */
 	SMK_LOGGING	= 10,	/* logging */
 	SMK_LOAD_SELF	= 11,	/* task specific rules */
@@ -57,6 +58,9 @@
 #ifdef CONFIG_SECURITY_SMACK_BRINGUP
 	SMK_UNCONFINED	= 22,	/* define an unconfined label */
 #endif
+#if IS_ENABLED(CONFIG_IPV6)
+	SMK_NET6ADDR	= 23,	/* single label IPv6 hosts */
+#endif /* CONFIG_IPV6 */
 };
 
 /*
@@ -64,7 +68,10 @@
  */
 static DEFINE_MUTEX(smack_cipso_lock);
 static DEFINE_MUTEX(smack_ambient_lock);
-static DEFINE_MUTEX(smk_netlbladdr_lock);
+static DEFINE_MUTEX(smk_net4addr_lock);
+#if IS_ENABLED(CONFIG_IPV6)
+static DEFINE_MUTEX(smk_net6addr_lock);
+#endif /* CONFIG_IPV6 */
 
 /*
  * This is the "ambient" label for network traffic.
@@ -118,7 +125,10 @@
  * can write to the specified label.
  */
 
-LIST_HEAD(smk_netlbladdr_list);
+LIST_HEAD(smk_net4addr_list);
+#if IS_ENABLED(CONFIG_IPV6)
+LIST_HEAD(smk_net6addr_list);
+#endif /* CONFIG_IPV6 */
 
 /*
  * Rule lists are maintained for each label.
@@ -129,7 +139,7 @@
 	struct smack_rule	*smk_rule;
 };
 
-LIST_HEAD(smack_rule_list);
+static LIST_HEAD(smack_rule_list);
 
 struct smack_parsed_rule {
 	struct smack_known	*smk_subject;
@@ -140,11 +150,6 @@
 
 static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
 
-struct smack_known smack_cipso_option = {
-	.smk_known	= SMACK_CIPSO_OPTION,
-	.smk_secid	= 0,
-};
-
 /*
  * Values for parsing cipso rules
  * SMK_DIGITLEN: Length of a digit field in a rule.
@@ -1047,92 +1052,90 @@
  * Seq_file read operations for /smack/netlabel
  */
 
-static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos)
+static void *net4addr_seq_start(struct seq_file *s, loff_t *pos)
 {
-	return smk_seq_start(s, pos, &smk_netlbladdr_list);
+	return smk_seq_start(s, pos, &smk_net4addr_list);
 }
 
-static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos)
+static void *net4addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
-	return smk_seq_next(s, v, pos, &smk_netlbladdr_list);
+	return smk_seq_next(s, v, pos, &smk_net4addr_list);
 }
-#define BEBITS	(sizeof(__be32) * 8)
 
 /*
  * Print host/label pairs
  */
-static int netlbladdr_seq_show(struct seq_file *s, void *v)
+static int net4addr_seq_show(struct seq_file *s, void *v)
 {
 	struct list_head *list = v;
-	struct smk_netlbladdr *skp =
-			list_entry_rcu(list, struct smk_netlbladdr, list);
-	unsigned char *hp = (char *) &skp->smk_host.sin_addr.s_addr;
-	int maskn;
-	u32 temp_mask = be32_to_cpu(skp->smk_mask.s_addr);
+	struct smk_net4addr *skp =
+			list_entry_rcu(list, struct smk_net4addr, list);
+	char *kp = SMACK_CIPSO_OPTION;
 
-	for (maskn = 0; temp_mask; temp_mask <<= 1, maskn++);
-
-	seq_printf(s, "%u.%u.%u.%u/%d %s\n",
-		hp[0], hp[1], hp[2], hp[3], maskn, skp->smk_label->smk_known);
+	if (skp->smk_label != NULL)
+		kp = skp->smk_label->smk_known;
+	seq_printf(s, "%pI4/%d %s\n", &skp->smk_host.s_addr,
+			skp->smk_masks, kp);
 
 	return 0;
 }
 
-static const struct seq_operations netlbladdr_seq_ops = {
-	.start = netlbladdr_seq_start,
-	.next  = netlbladdr_seq_next,
-	.show  = netlbladdr_seq_show,
+static const struct seq_operations net4addr_seq_ops = {
+	.start = net4addr_seq_start,
+	.next  = net4addr_seq_next,
+	.show  = net4addr_seq_show,
 	.stop  = smk_seq_stop,
 };
 
 /**
- * smk_open_netlbladdr - open() for /smack/netlabel
+ * smk_open_net4addr - open() for /smack/netlabel
  * @inode: inode structure representing file
  * @file: "netlabel" file pointer
  *
- * Connect our netlbladdr_seq_* operations with /smack/netlabel
+ * Connect our net4addr_seq_* operations with /smack/netlabel
  * file_operations
  */
-static int smk_open_netlbladdr(struct inode *inode, struct file *file)
+static int smk_open_net4addr(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &netlbladdr_seq_ops);
+	return seq_open(file, &net4addr_seq_ops);
 }
 
 /**
- * smk_netlbladdr_insert
+ * smk_net4addr_insert
  * @new : netlabel to insert
  *
- * This helper insert netlabel in the smack_netlbladdrs list
+ * This helper insert netlabel in the smack_net4addrs list
  * sorted by netmask length (longest to smallest)
- * locked by &smk_netlbladdr_lock in smk_write_netlbladdr
+ * locked by &smk_net4addr_lock in smk_write_net4addr
  *
  */
-static void smk_netlbladdr_insert(struct smk_netlbladdr *new)
+static void smk_net4addr_insert(struct smk_net4addr *new)
 {
-	struct smk_netlbladdr *m, *m_next;
+	struct smk_net4addr *m;
+	struct smk_net4addr *m_next;
 
-	if (list_empty(&smk_netlbladdr_list)) {
-		list_add_rcu(&new->list, &smk_netlbladdr_list);
+	if (list_empty(&smk_net4addr_list)) {
+		list_add_rcu(&new->list, &smk_net4addr_list);
 		return;
 	}
 
-	m = list_entry_rcu(smk_netlbladdr_list.next,
-			   struct smk_netlbladdr, list);
+	m = list_entry_rcu(smk_net4addr_list.next,
+			   struct smk_net4addr, list);
 
 	/* the comparison '>' is a bit hacky, but works */
-	if (new->smk_mask.s_addr > m->smk_mask.s_addr) {
-		list_add_rcu(&new->list, &smk_netlbladdr_list);
+	if (new->smk_masks > m->smk_masks) {
+		list_add_rcu(&new->list, &smk_net4addr_list);
 		return;
 	}
 
-	list_for_each_entry_rcu(m, &smk_netlbladdr_list, list) {
-		if (list_is_last(&m->list, &smk_netlbladdr_list)) {
+	list_for_each_entry_rcu(m, &smk_net4addr_list, list) {
+		if (list_is_last(&m->list, &smk_net4addr_list)) {
 			list_add_rcu(&new->list, &m->list);
 			return;
 		}
 		m_next = list_entry_rcu(m->list.next,
-					struct smk_netlbladdr, list);
-		if (new->smk_mask.s_addr > m_next->smk_mask.s_addr) {
+					struct smk_net4addr, list);
+		if (new->smk_masks > m_next->smk_masks) {
 			list_add_rcu(&new->list, &m->list);
 			return;
 		}
@@ -1141,28 +1144,29 @@
 
 
 /**
- * smk_write_netlbladdr - write() for /smack/netlabel
+ * smk_write_net4addr - write() for /smack/netlabel
  * @file: file pointer, not actually used
  * @buf: where to get the data from
  * @count: bytes sent
  * @ppos: where to start
  *
- * Accepts only one netlbladdr per write call.
+ * Accepts only one net4addr per write call.
  * Returns number of bytes written or error code, as appropriate
  */
-static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
+static ssize_t smk_write_net4addr(struct file *file, const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	struct smk_netlbladdr *snp;
+	struct smk_net4addr *snp;
 	struct sockaddr_in newname;
 	char *smack;
-	struct smack_known *skp;
+	struct smack_known *skp = NULL;
 	char *data;
 	char *host = (char *)&newname.sin_addr.s_addr;
 	int rc;
 	struct netlbl_audit audit_info;
 	struct in_addr mask;
 	unsigned int m;
+	unsigned int masks;
 	int found;
 	u32 mask_bits = (1<<31);
 	__be32 nsa;
@@ -1200,7 +1204,7 @@
 	data[count] = '\0';
 
 	rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%u %s",
-		&host[0], &host[1], &host[2], &host[3], &m, smack);
+		&host[0], &host[1], &host[2], &host[3], &masks, smack);
 	if (rc != 6) {
 		rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s",
 			&host[0], &host[1], &host[2], &host[3], smack);
@@ -1209,8 +1213,9 @@
 			goto free_out;
 		}
 		m = BEBITS;
+		masks = 32;
 	}
-	if (m > BEBITS) {
+	if (masks > BEBITS) {
 		rc = -EINVAL;
 		goto free_out;
 	}
@@ -1225,16 +1230,16 @@
 			goto free_out;
 		}
 	} else {
-		/* check known options */
-		if (strcmp(smack, smack_cipso_option.smk_known) == 0)
-			skp = &smack_cipso_option;
-		else {
+		/*
+		 * Only the -CIPSO option is supported for IPv4
+		 */
+		if (strcmp(smack, SMACK_CIPSO_OPTION) != 0) {
 			rc = -EINVAL;
 			goto free_out;
 		}
 	}
 
-	for (temp_mask = 0; m > 0; m--) {
+	for (m = masks, temp_mask = 0; m > 0; m--) {
 		temp_mask |= mask_bits;
 		mask_bits >>= 1;
 	}
@@ -1245,14 +1250,13 @@
 	 * Only allow one writer at a time. Writes should be
 	 * quite rare and small in any case.
 	 */
-	mutex_lock(&smk_netlbladdr_lock);
+	mutex_lock(&smk_net4addr_lock);
 
 	nsa = newname.sin_addr.s_addr;
 	/* try to find if the prefix is already in the list */
 	found = 0;
-	list_for_each_entry_rcu(snp, &smk_netlbladdr_list, list) {
-		if (snp->smk_host.sin_addr.s_addr == nsa &&
-		    snp->smk_mask.s_addr == mask.s_addr) {
+	list_for_each_entry_rcu(snp, &smk_net4addr_list, list) {
+		if (snp->smk_host.s_addr == nsa && snp->smk_masks == masks) {
 			found = 1;
 			break;
 		}
@@ -1265,17 +1269,20 @@
 			rc = -ENOMEM;
 		else {
 			rc = 0;
-			snp->smk_host.sin_addr.s_addr = newname.sin_addr.s_addr;
+			snp->smk_host.s_addr = newname.sin_addr.s_addr;
 			snp->smk_mask.s_addr = mask.s_addr;
 			snp->smk_label = skp;
-			smk_netlbladdr_insert(snp);
+			snp->smk_masks = masks;
+			smk_net4addr_insert(snp);
 		}
 	} else {
-		/* we delete the unlabeled entry, only if the previous label
-		 * wasn't the special CIPSO option */
-		if (snp->smk_label != &smack_cipso_option)
+		/*
+		 * Delete the unlabeled entry, only if the previous label
+		 * wasn't the special CIPSO option
+		 */
+		if (snp->smk_label != NULL)
 			rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
-					&snp->smk_host.sin_addr, &snp->smk_mask,
+					&snp->smk_host, &snp->smk_mask,
 					PF_INET, &audit_info);
 		else
 			rc = 0;
@@ -1287,15 +1294,15 @@
 	 * this host so that incoming packets get labeled.
 	 * but only if we didn't get the special CIPSO option
 	 */
-	if (rc == 0 && skp != &smack_cipso_option)
+	if (rc == 0 && skp != NULL)
 		rc = netlbl_cfg_unlbl_static_add(&init_net, NULL,
-			&snp->smk_host.sin_addr, &snp->smk_mask, PF_INET,
+			&snp->smk_host, &snp->smk_mask, PF_INET,
 			snp->smk_label->smk_secid, &audit_info);
 
 	if (rc == 0)
 		rc = count;
 
-	mutex_unlock(&smk_netlbladdr_lock);
+	mutex_unlock(&smk_net4addr_lock);
 
 free_out:
 	kfree(smack);
@@ -1305,14 +1312,279 @@
 	return rc;
 }
 
-static const struct file_operations smk_netlbladdr_ops = {
-	.open           = smk_open_netlbladdr,
+static const struct file_operations smk_net4addr_ops = {
+	.open           = smk_open_net4addr,
 	.read		= seq_read,
 	.llseek         = seq_lseek,
-	.write		= smk_write_netlbladdr,
+	.write		= smk_write_net4addr,
 	.release        = seq_release,
 };
 
+#if IS_ENABLED(CONFIG_IPV6)
+/*
+ * Seq_file read operations for /smack/netlabel6
+ */
+
+static void *net6addr_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return smk_seq_start(s, pos, &smk_net6addr_list);
+}
+
+static void *net6addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	return smk_seq_next(s, v, pos, &smk_net6addr_list);
+}
+
+/*
+ * Print host/label pairs
+ */
+static int net6addr_seq_show(struct seq_file *s, void *v)
+{
+	struct list_head *list = v;
+	struct smk_net6addr *skp =
+			 list_entry(list, struct smk_net6addr, list);
+
+	if (skp->smk_label != NULL)
+		seq_printf(s, "%pI6/%d %s\n", &skp->smk_host, skp->smk_masks,
+				skp->smk_label->smk_known);
+
+	return 0;
+}
+
+static const struct seq_operations net6addr_seq_ops = {
+	.start = net6addr_seq_start,
+	.next  = net6addr_seq_next,
+	.show  = net6addr_seq_show,
+	.stop  = smk_seq_stop,
+};
+
+/**
+ * smk_open_net6addr - open() for /smack/netlabel
+ * @inode: inode structure representing file
+ * @file: "netlabel" file pointer
+ *
+ * Connect our net6addr_seq_* operations with /smack/netlabel
+ * file_operations
+ */
+static int smk_open_net6addr(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &net6addr_seq_ops);
+}
+
+/**
+ * smk_net6addr_insert
+ * @new : entry to insert
+ *
+ * This inserts an entry in the smack_net6addrs list
+ * sorted by netmask length (longest to smallest)
+ * locked by &smk_net6addr_lock in smk_write_net6addr
+ *
+ */
+static void smk_net6addr_insert(struct smk_net6addr *new)
+{
+	struct smk_net6addr *m_next;
+	struct smk_net6addr *m;
+
+	if (list_empty(&smk_net6addr_list)) {
+		list_add_rcu(&new->list, &smk_net6addr_list);
+		return;
+	}
+
+	m = list_entry_rcu(smk_net6addr_list.next,
+			   struct smk_net6addr, list);
+
+	if (new->smk_masks > m->smk_masks) {
+		list_add_rcu(&new->list, &smk_net6addr_list);
+		return;
+	}
+
+	list_for_each_entry_rcu(m, &smk_net6addr_list, list) {
+		if (list_is_last(&m->list, &smk_net6addr_list)) {
+			list_add_rcu(&new->list, &m->list);
+			return;
+		}
+		m_next = list_entry_rcu(m->list.next,
+					struct smk_net6addr, list);
+		if (new->smk_masks > m_next->smk_masks) {
+			list_add_rcu(&new->list, &m->list);
+			return;
+		}
+	}
+}
+
+
+/**
+ * smk_write_net6addr - write() for /smack/netlabel
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one net6addr per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct smk_net6addr *snp;
+	struct in6_addr newname;
+	struct in6_addr fullmask;
+	struct smack_known *skp = NULL;
+	char *smack;
+	char *data;
+	int rc = 0;
+	int found = 0;
+	int i;
+	unsigned int scanned[8];
+	unsigned int m;
+	unsigned int mask = 128;
+
+	/*
+	 * Must have privilege.
+	 * No partial writes.
+	 * Enough data must be present.
+	 * "<addr/mask, as a:b:c:d:e:f:g:h/e><space><label>"
+	 * "<addr, as a:b:c:d:e:f:g:h><space><label>"
+	 */
+	if (!smack_privileged(CAP_MAC_ADMIN))
+		return -EPERM;
+	if (*ppos != 0)
+		return -EINVAL;
+	if (count < SMK_NETLBLADDRMIN)
+		return -EINVAL;
+
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto free_data_out;
+	}
+
+	smack = kzalloc(count + 1, GFP_KERNEL);
+	if (smack == NULL) {
+		rc = -ENOMEM;
+		goto free_data_out;
+	}
+
+	data[count] = '\0';
+
+	i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x/%u %s",
+			&scanned[0], &scanned[1], &scanned[2], &scanned[3],
+			&scanned[4], &scanned[5], &scanned[6], &scanned[7],
+			&mask, smack);
+	if (i != 10) {
+		i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x %s",
+				&scanned[0], &scanned[1], &scanned[2],
+				&scanned[3], &scanned[4], &scanned[5],
+				&scanned[6], &scanned[7], smack);
+		if (i != 9) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+	}
+	if (mask > 128) {
+		rc = -EINVAL;
+		goto free_out;
+	}
+	for (i = 0; i < 8; i++) {
+		if (scanned[i] > 0xffff) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+		newname.s6_addr16[i] = htons(scanned[i]);
+	}
+
+	/*
+	 * If smack begins with '-', it is an option, don't import it
+	 */
+	if (smack[0] != '-') {
+		skp = smk_import_entry(smack, 0);
+		if (skp == NULL) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+	} else {
+		/*
+		 * Only -DELETE is supported for IPv6
+		 */
+		if (strcmp(smack, SMACK_DELETE_OPTION) != 0) {
+			rc = -EINVAL;
+			goto free_out;
+		}
+	}
+
+	for (i = 0, m = mask; i < 8; i++) {
+		if (m >= 16) {
+			fullmask.s6_addr16[i] = 0xffff;
+			m -= 16;
+		} else if (m > 0) {
+			fullmask.s6_addr16[i] = (1 << m) - 1;
+			m = 0;
+		} else
+			fullmask.s6_addr16[i] = 0;
+		newname.s6_addr16[i] &= fullmask.s6_addr16[i];
+	}
+
+	/*
+	 * Only allow one writer at a time. Writes should be
+	 * quite rare and small in any case.
+	 */
+	mutex_lock(&smk_net6addr_lock);
+	/*
+	 * Try to find the prefix in the list
+	 */
+	list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
+		if (mask != snp->smk_masks)
+			continue;
+		for (found = 1, i = 0; i < 8; i++) {
+			if (newname.s6_addr16[i] !=
+			    snp->smk_host.s6_addr16[i]) {
+				found = 0;
+				break;
+			}
+		}
+		if (found == 1)
+			break;
+	}
+	if (found == 0) {
+		snp = kzalloc(sizeof(*snp), GFP_KERNEL);
+		if (snp == NULL)
+			rc = -ENOMEM;
+		else {
+			snp->smk_host = newname;
+			snp->smk_mask = fullmask;
+			snp->smk_masks = mask;
+			snp->smk_label = skp;
+			smk_net6addr_insert(snp);
+		}
+	} else {
+		snp->smk_label = skp;
+	}
+
+	if (rc == 0)
+		rc = count;
+
+	mutex_unlock(&smk_net6addr_lock);
+
+free_out:
+	kfree(smack);
+free_data_out:
+	kfree(data);
+
+	return rc;
+}
+
+static const struct file_operations smk_net6addr_ops = {
+	.open           = smk_open_net6addr,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= smk_write_net6addr,
+	.release        = seq_release,
+};
+#endif /* CONFIG_IPV6 */
+
 /**
  * smk_read_doi - read() for /smack/doi
  * @filp: file pointer, not actually used
@@ -2320,11 +2592,7 @@
  */
 static int smk_init_sysfs(void)
 {
-	int err;
-	err = sysfs_create_mount_point(fs_kobj, "smackfs");
-	if (err)
-		return err;
-	return 0;
+	return sysfs_create_mount_point(fs_kobj, "smackfs");
 }
 
 /**
@@ -2519,8 +2787,8 @@
 			"direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
 		[SMK_AMBIENT] = {
 			"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
-		[SMK_NETLBLADDR] = {
-			"netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR},
+		[SMK_NET4ADDR] = {
+			"netlabel", &smk_net4addr_ops, S_IRUGO|S_IWUSR},
 		[SMK_ONLYCAP] = {
 			"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
 		[SMK_LOGGING] = {
@@ -2552,6 +2820,10 @@
 		[SMK_UNCONFINED] = {
 			"unconfined", &smk_unconfined_ops, S_IRUGO|S_IWUSR},
 #endif
+#if IS_ENABLED(CONFIG_IPV6)
+		[SMK_NET6ADDR] = {
+			"ipv6host", &smk_net6addr_ops, S_IRUGO|S_IWUSR},
+#endif /* CONFIG_IPV6 */
 		/* last one */
 			{""}
 	};
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
index 3123e1d..90c605e 100644
--- a/security/yama/Kconfig
+++ b/security/yama/Kconfig
@@ -6,14 +6,7 @@
 	  This selects Yama, which extends DAC support with additional
 	  system-wide security settings beyond regular Linux discretionary
 	  access controls. Currently available is ptrace scope restriction.
+	  Like capabilities, this security module stacks with other LSMs.
 	  Further information can be found in Documentation/security/Yama.txt.
 
 	  If you are unsure how to answer this question, answer N.
-
-config SECURITY_YAMA_STACKED
-	bool "Yama stacked with other LSMs"
-	depends on SECURITY_YAMA
-	default n
-	help
-	  When Yama is built into the kernel, force it to stack with the
-	  selected primary LSM.
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 5ebb896..d3c19c9 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -353,11 +353,6 @@
 	LSM_HOOK_INIT(task_free, yama_task_free),
 };
 
-void __init yama_add_hooks(void)
-{
-	security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks));
-}
-
 #ifdef CONFIG_SYSCTL
 static int yama_dointvec_minmax(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -396,26 +391,18 @@
 	},
 	{ }
 };
-#endif /* CONFIG_SYSCTL */
-
-static __init int yama_init(void)
+static void __init yama_init_sysctl(void)
 {
-#ifndef CONFIG_SECURITY_YAMA_STACKED
-	/*
-	 * If yama is being stacked this is already taken care of.
-	 */
-	if (!security_module_enable("yama"))
-		return 0;
-	yama_add_hooks();
-#endif
-	pr_info("Yama: becoming mindful.\n");
-
-#ifdef CONFIG_SYSCTL
 	if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
 		panic("Yama: sysctl registration failed.\n");
-#endif
-
-	return 0;
 }
+#else
+static inline void yama_init_sysctl(void) { }
+#endif /* CONFIG_SYSCTL */
 
-security_initcall(yama_init);
+void __init yama_add_hooks(void)
+{
+	pr_info("Yama: becoming mindful.\n");
+	security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks));
+	yama_init_sysctl();
+}
diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c
index 2b50cbe..52e4bc5 100644
--- a/sound/ac97_bus.c
+++ b/sound/ac97_bus.c
@@ -18,6 +18,68 @@
 #include <sound/ac97_codec.h>
 
 /*
+ * snd_ac97_check_id() - Reads and checks the vendor ID of the device
+ * @ac97: The AC97 device to check
+ * @id: The ID to compare to
+ * @id_mask: Mask that is applied to the device ID before comparing to @id
+ *
+ * If @id is 0 this function returns true if the read device vendor ID is
+ * a valid ID. If @id is non 0 this functions returns true if @id
+ * matches the read vendor ID. Otherwise the function returns false.
+ */
+static bool snd_ac97_check_id(struct snd_ac97 *ac97, unsigned int id,
+	unsigned int id_mask)
+{
+	ac97->id = ac97->bus->ops->read(ac97, AC97_VENDOR_ID1) << 16;
+	ac97->id |= ac97->bus->ops->read(ac97, AC97_VENDOR_ID2);
+
+	if (ac97->id == 0x0 || ac97->id == 0xffffffff)
+		return false;
+
+	if (id != 0 && id != (ac97->id & id_mask))
+		return false;
+
+	return true;
+}
+
+/**
+ * snd_ac97_reset() - Reset AC'97 device
+ * @ac97: The AC'97 device to reset
+ * @try_warm: Try a warm reset first
+ * @id: Expected device vendor ID
+ * @id_mask: Mask that is applied to the device ID before comparing to @id
+ *
+ * This function resets the AC'97 device. If @try_warm is true the function
+ * first performs a warm reset. If the warm reset is successful the function
+ * returns 1. Otherwise or if @try_warm is false the function issues cold reset
+ * followed by a warm reset. If this is successful the function returns 0,
+ * otherwise a negative error code. If @id is 0 any valid device ID will be
+ * accepted, otherwise only the ID that matches @id and @id_mask is accepted.
+ */
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+	unsigned int id_mask)
+{
+	struct snd_ac97_bus_ops *ops = ac97->bus->ops;
+
+	if (try_warm && ops->warm_reset) {
+		ops->warm_reset(ac97);
+		if (snd_ac97_check_id(ac97, id, id_mask))
+			return 1;
+	}
+
+	if (ops->reset)
+		ops->reset(ac97);
+	if (ops->warm_reset)
+		ops->warm_reset(ac97);
+
+	if (snd_ac97_check_id(ac97, id, id_mask))
+		return 0;
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(snd_ac97_reset);
+
+/*
  * Let drivers decide whether they want to support given codec from their
  * probe method. Drivers have direct access to the struct snd_ac97
  * structure and may  decide based on the id field amongst other things.
@@ -27,35 +89,9 @@
 	return 1;
 }
 
-#ifdef CONFIG_PM
-static int ac97_bus_suspend(struct device *dev, pm_message_t state)
-{
-	int ret = 0;
-
-	if (dev->driver && dev->driver->suspend)
-		ret = dev->driver->suspend(dev, state);
-
-	return ret;
-}
-
-static int ac97_bus_resume(struct device *dev)
-{
-	int ret = 0;
-
-	if (dev->driver && dev->driver->resume)
-		ret = dev->driver->resume(dev);
-
-	return ret;
-}
-#endif /* CONFIG_PM */
-
 struct bus_type ac97_bus_type = {
 	.name		= "ac97",
 	.match		= ac97_bus_match,
-#ifdef CONFIG_PM
-	.suspend	= ac97_bus_suspend,
-	.resume		= ac97_bus_resume,
-#endif /* CONFIG_PM */
 };
 
 static int __init ac97_bus_init(void)
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 23c371ec..a04edff 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -1050,7 +1050,6 @@
 static struct i2c_driver onyx_driver = {
 	.driver = {
 		.name = "aoa_codec_onyx",
-		.owner = THIS_MODULE,
 	},
 	.probe = onyx_i2c_probe,
 	.remove = onyx_i2c_remove,
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index 364c7c4..78ed1ff 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -939,7 +939,6 @@
 static struct i2c_driver tas_driver = {
 	.driver = {
 		.name = "aoa_codec_tas",
-		.owner = THIS_MODULE,
 	},
 	.probe = tas_i2c_probe,
 	.remove = tas_i2c_remove,
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 9dc5806..8f71f7e 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -1120,10 +1120,10 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int aoa_fabric_layout_suspend(struct soundbus_dev *sdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int aoa_fabric_layout_suspend(struct device *dev)
 {
-	struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
+	struct layout_dev *ldev = dev_get_drvdata(dev);
 
 	if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off)
 		ldev->gpio.methods->all_amps_off(&ldev->gpio);
@@ -1131,15 +1131,19 @@
 	return 0;
 }
 
-static int aoa_fabric_layout_resume(struct soundbus_dev *sdev)
+static int aoa_fabric_layout_resume(struct device *dev)
 {
-	struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
+	struct layout_dev *ldev = dev_get_drvdata(dev);
 
 	if (ldev->gpio.methods && ldev->gpio.methods->all_amps_restore)
 		ldev->gpio.methods->all_amps_restore(&ldev->gpio);
 
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(aoa_fabric_layout_pm_ops,
+	aoa_fabric_layout_suspend, aoa_fabric_layout_resume);
+
 #endif
 
 static struct soundbus_driver aoa_soundbus_driver = {
@@ -1147,12 +1151,11 @@
 	.owner = THIS_MODULE,
 	.probe = aoa_fabric_layout_probe,
 	.remove = aoa_fabric_layout_remove,
-#ifdef CONFIG_PM
-	.suspend = aoa_fabric_layout_suspend,
-	.resume = aoa_fabric_layout_resume,
-#endif
 	.driver = {
 		.owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &aoa_fabric_layout_pm_ops,
+#endif
 	}
 };
 
diff --git a/sound/aoa/soundbus/core.c b/sound/aoa/soundbus/core.c
index 3edf736..70bcaa7 100644
--- a/sound/aoa/soundbus/core.c
+++ b/sound/aoa/soundbus/core.c
@@ -126,30 +126,6 @@
 		drv->shutdown(soundbus_dev);
 }
 
-#ifdef CONFIG_PM
-
-static int soundbus_device_suspend(struct device *dev, pm_message_t state)
-{
-	struct soundbus_dev * soundbus_dev = to_soundbus_device(dev);
-	struct soundbus_driver * drv = to_soundbus_driver(dev->driver);
-
-	if (dev->driver && drv->suspend)
-		return drv->suspend(soundbus_dev, state);
-	return 0;
-}
-
-static int soundbus_device_resume(struct device * dev)
-{
-	struct soundbus_dev * soundbus_dev = to_soundbus_device(dev);
-	struct soundbus_driver * drv = to_soundbus_driver(dev->driver);
-
-	if (dev->driver && drv->resume)
-		return drv->resume(soundbus_dev);
-	return 0;
-}
-
-#endif /* CONFIG_PM */
-
 /* soundbus_dev_attrs is declared in sysfs.c */
 ATTRIBUTE_GROUPS(soundbus_dev);
 static struct bus_type soundbus_bus_type = {
@@ -158,10 +134,6 @@
 	.uevent		= soundbus_uevent,
 	.remove		= soundbus_device_remove,
 	.shutdown	= soundbus_device_shutdown,
-#ifdef CONFIG_PM
-	.suspend	= soundbus_device_suspend,
-	.resume		= soundbus_device_resume,
-#endif
 	.dev_groups	= soundbus_dev_groups,
 };
 
diff --git a/sound/aoa/soundbus/soundbus.h b/sound/aoa/soundbus/soundbus.h
index 21e756c..ae40224 100644
--- a/sound/aoa/soundbus/soundbus.h
+++ b/sound/aoa/soundbus/soundbus.h
@@ -188,8 +188,6 @@
 	int	(*probe)(struct soundbus_dev* dev);
 	int	(*remove)(struct soundbus_dev* dev);
 
-	int	(*suspend)(struct soundbus_dev* dev, pm_message_t state);
-	int	(*resume)(struct soundbus_dev* dev);
 	int	(*shutdown)(struct soundbus_dev* dev);
 
 	struct device_driver driver;
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index 7a2c1f5..c0f018a 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -211,26 +211,38 @@
 		      struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_bebob *bebob = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&bebob->substreams_counter);
 	amdtp_stream_set_pcm_format(&bebob->tx_stream,
 				    params_format(hw_params));
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+
+	return 0;
 }
 static int
 pcm_playback_hw_params(struct snd_pcm_substream *substream,
 		       struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_bebob *bebob = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&bebob->substreams_counter);
 	amdtp_stream_set_pcm_format(&bebob->rx_stream,
 				    params_format(hw_params));
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+
+	return 0;
 }
 
 static int
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index f7771451..4e67b1d 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -230,6 +230,12 @@
 			     struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_dice *dice = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&dice->mutex);
@@ -240,13 +246,18 @@
 	amdtp_stream_set_pcm_format(&dice->tx_stream,
 				    params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int playback_hw_params(struct snd_pcm_substream *substream,
 			      struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_dice *dice = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&dice->mutex);
@@ -257,8 +268,7 @@
 	amdtp_stream_set_pcm_format(&dice->rx_stream,
 				    params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index 8a34753..c30b2ff 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -244,25 +244,35 @@
 				 struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_efw *efw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&efw->capture_substreams);
 	amdtp_stream_set_pcm_format(&efw->tx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_efw *efw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&efw->playback_substreams);
 	amdtp_stream_set_pcm_format(&efw->rx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 67ade07..9c73930 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -231,7 +231,12 @@
 				 struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_oxfw *oxfw = substream->private_data;
+	int err;
 
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&oxfw->mutex);
@@ -241,13 +246,18 @@
 
 	amdtp_stream_set_pcm_format(&oxfw->tx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_oxfw *oxfw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&oxfw->mutex);
@@ -257,8 +267,7 @@
 
 	amdtp_stream_set_pcm_format(&oxfw->rx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 873d40f..77ad5b9 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -512,12 +512,11 @@
 	if (err < 0)
 		goto end;
 
-	formats[eid] = kmalloc(*len, GFP_KERNEL);
+	formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
 	if (formats[eid] == NULL) {
 		err = -ENOMEM;
 		goto end;
 	}
-	memcpy(formats[eid], buf, *len);
 
 	/* apply the format for each available sampling rate */
 	for (i = 0; i < ARRAY_SIZE(oxfw_rate_table); i++) {
@@ -531,12 +530,11 @@
 			continue;
 
 		eid++;
-		formats[eid] = kmalloc(*len, GFP_KERNEL);
+		formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
 		if (formats[eid] == NULL) {
 			err = -ENOMEM;
 			goto end;
 		}
-		memcpy(formats[eid], buf, *len);
 		formats[eid][2] = avc_stream_rate_table[i];
 	}
 
@@ -594,12 +592,11 @@
 		if (err < 0)
 			break;
 
-		formats[eid] = kmalloc(len, GFP_KERNEL);
+		formats[eid] = kmemdup(buf, len, GFP_KERNEL);
 		if (formats[eid] == NULL) {
 			err = -ENOMEM;
 			break;
 		}
-		memcpy(formats[eid], buf, len);
 
 		/* get next entry */
 		len = AVC_GENERIC_FRAME_MAXIMUM_BYTES;
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 0aa5d9e..4449d1a9 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -125,7 +125,7 @@
 }
 
 /**
- * snd_hdac_ext_device_init - initialize the HDA extended codec base device
+ * snd_hdac_ext_bus_device_init - initialize the HDA extended codec base device
  * @ebus: hdac extended bus to attach to
  * @addr: codec address
  *
@@ -133,14 +133,16 @@
  */
 int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
 {
+	struct hdac_ext_device *edev;
 	struct hdac_device *hdev = NULL;
 	struct hdac_bus *bus = ebus_to_hbus(ebus);
 	char name[15];
 	int ret;
 
-	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
-	if (!hdev)
+	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+	if (!edev)
 		return -ENOMEM;
+	hdev = &edev->hdac;
 
 	snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
 
@@ -158,6 +160,7 @@
 		snd_hdac_ext_bus_device_exit(hdev);
 		return ret;
 	}
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_init);
@@ -168,7 +171,94 @@
  */
 void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev)
 {
+	struct hdac_ext_device *edev = to_ehdac_device(hdev);
+
 	snd_hdac_device_exit(hdev);
-	kfree(hdev);
+	kfree(edev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_exit);
+
+/**
+ * snd_hdac_ext_bus_device_remove - remove HD-audio extended codec base devices
+ *
+ * @ebus: HD-audio extended bus
+ */
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus)
+{
+	struct hdac_device *codec, *__codec;
+	/*
+	 * we need to remove all the codec devices objects created in the
+	 * snd_hdac_ext_bus_device_init
+	 */
+	list_for_each_entry_safe(codec, __codec, &ebus->bus.codec_list, list) {
+		snd_hdac_device_unregister(codec);
+		put_device(&codec->dev);
+	}
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_remove);
+#define dev_to_hdac(dev) (container_of((dev), \
+			struct hdac_device, dev))
+
+static inline struct hdac_ext_driver *get_edrv(struct device *dev)
+{
+	struct hdac_driver *hdrv = drv_to_hdac_driver(dev->driver);
+	struct hdac_ext_driver *edrv = to_ehdac_driver(hdrv);
+
+	return edrv;
+}
+
+static inline struct hdac_ext_device *get_edev(struct device *dev)
+{
+	struct hdac_device *hdev = dev_to_hdac_dev(dev);
+	struct hdac_ext_device *edev = to_ehdac_device(hdev);
+
+	return edev;
+}
+
+static int hda_ext_drv_probe(struct device *dev)
+{
+	return (get_edrv(dev))->probe(get_edev(dev));
+}
+
+static int hdac_ext_drv_remove(struct device *dev)
+{
+	return (get_edrv(dev))->remove(get_edev(dev));
+}
+
+static void hdac_ext_drv_shutdown(struct device *dev)
+{
+	return (get_edrv(dev))->shutdown(get_edev(dev));
+}
+
+/**
+ * snd_hda_ext_driver_register - register a driver for ext hda devices
+ *
+ * @drv: ext hda driver structure
+ */
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv)
+{
+	drv->hdac.type = HDA_DEV_ASOC;
+	drv->hdac.driver.bus = &snd_hda_bus_type;
+	/* we use default match */
+
+	if (drv->probe)
+		drv->hdac.driver.probe = hda_ext_drv_probe;
+	if (drv->remove)
+		drv->hdac.driver.remove = hdac_ext_drv_remove;
+	if (drv->shutdown)
+		drv->hdac.driver.shutdown = hdac_ext_drv_shutdown;
+
+	return driver_register(&drv->hdac.driver);
+}
+EXPORT_SYMBOL_GPL(snd_hda_ext_driver_register);
+
+/**
+ * snd_hda_ext_driver_unregister - unregister a driver for ext hda devices
+ *
+ * @drv: ext hda driver structure
+ */
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv)
+{
+	driver_unregister(&drv->hdac.driver);
+}
+EXPORT_SYMBOL_GPL(snd_hda_ext_driver_unregister);
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 358f161..63215b1 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -177,8 +177,8 @@
 		hlink->bus = bus;
 		hlink->ml_addr = ebus->mlcap + AZX_ML_BASE +
 					(AZX_ML_INTERVAL * idx);
-		hlink->lcaps  = snd_hdac_chip_readl(bus, ML_LCAP);
-		hlink->lsdiid = snd_hdac_chip_readw(bus, ML_LSDIID);
+		hlink->lcaps  = readl(hlink->ml_addr + AZX_REG_ML_LCAP);
+		hlink->lsdiid = readw(hlink->ml_addr + AZX_REG_ML_LSDIID);
 
 		list_add_tail(&hlink->list, &ebus->hlink_list);
 	}
@@ -243,7 +243,7 @@
 	timeout = 50;
 
 	do {
-		val = snd_hdac_chip_readl(link->bus, ML_LCTL);
+		val = readl(link->ml_addr + AZX_REG_ML_LCTL);
 		if (enable) {
 			if (((val & mask) >> AZX_MLCTL_CPA))
 				return 0;
@@ -263,7 +263,7 @@
  */
 int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link)
 {
-	snd_hdac_chip_updatel(link->bus, ML_LCTL, 0, AZX_MLCTL_SPA);
+	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
 
 	return check_hdac_link_power_active(link, true);
 }
@@ -275,8 +275,28 @@
  */
 int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link)
 {
-	snd_hdac_chip_updatel(link->bus, ML_LCTL, AZX_MLCTL_SPA, 0);
+	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
 
 	return check_hdac_link_power_active(link, false);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down);
+
+/**
+ * snd_hdac_ext_bus_link_power_down_all -power down all hda link
+ * @ebus: HD-audio extended bus
+ */
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus)
+{
+	struct hdac_ext_link *hlink = NULL;
+	int ret;
+
+	list_for_each_entry(hlink, &ebus->hlink_list, list) {
+		snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
+		ret = check_hdac_link_power_active(hlink, false);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 3de47dd..33ba77d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -49,6 +49,16 @@
 				AZX_PPLC_INTERVAL * idx;
 	}
 
+	if (ebus->spbcap) {
+		stream->spib_addr = ebus->spbcap + AZX_SPB_BASE +
+					AZX_SPB_INTERVAL * idx +
+					AZX_SPB_SPIB;
+
+		stream->fifo_addr = ebus->spbcap + AZX_SPB_BASE +
+					AZX_SPB_INTERVAL * idx +
+					AZX_SPB_MAXFIFO;
+	}
+
 	stream->decoupled = false;
 	snd_hdac_stream_init(bus, &stream->hstream, idx, direction, tag);
 }
@@ -281,17 +291,12 @@
 	struct hdac_ext_stream *res = NULL;
 	struct hdac_stream *stream = NULL;
 	struct hdac_bus *hbus = &ebus->bus;
-	int key;
 
 	if (!ebus->ppcap) {
 		dev_err(hbus->dev, "stream type not supported\n");
 		return NULL;
 	}
 
-	/* make a non-zero unique key for the substream */
-	key = (substream->pcm->device << 16) | (substream->number << 2) |
-			(substream->stream + 1);
-
 	list_for_each_entry(stream, &hbus->stream_list, list) {
 		struct hdac_ext_stream *hstream = container_of(stream,
 						struct hdac_ext_stream,
@@ -310,7 +315,6 @@
 		spin_lock_irq(&hbus->reg_lock);
 		res->hstream.opened = 1;
 		res->hstream.running = 0;
-		res->hstream.assigned_key = key;
 		res->hstream.substream = substream;
 		spin_unlock_irq(&hbus->reg_lock);
 	}
@@ -423,7 +427,7 @@
 
 	mask |= (1 << index);
 
-	register_mask = snd_hdac_chip_readl(bus, SPB_SPBFCCTL);
+	register_mask = readl(ebus->spbcap + AZX_REG_SPB_SPBFCCTL);
 
 	mask |= register_mask;
 
@@ -435,6 +439,50 @@
 EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_spbcap_enable);
 
 /**
+ * snd_hdac_ext_stream_set_spib - sets the spib value of a stream
+ * @ebus: HD-audio ext core bus
+ * @stream: hdac_ext_stream
+ * @value: spib value to set
+ */
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream, u32 value)
+{
+	struct hdac_bus *bus = &ebus->bus;
+
+	if (!ebus->spbcap) {
+		dev_err(bus->dev, "Address of SPB capability is NULL");
+		return -EINVAL;
+	}
+
+	writel(value, stream->spib_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_set_spib);
+
+/**
+ * snd_hdac_ext_stream_get_spbmaxfifo - gets the spib value of a stream
+ * @ebus: HD-audio ext core bus
+ * @stream: hdac_ext_stream
+ *
+ * Return maxfifo for the stream
+ */
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream)
+{
+	struct hdac_bus *bus = &ebus->bus;
+
+	if (!ebus->spbcap) {
+		dev_err(bus->dev, "Address of SPB capability is NULL");
+		return -EINVAL;
+	}
+
+	return readl(stream->fifo_addr);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo);
+
+
+/**
  * snd_hdac_ext_stop_streams - stop all stream if running
  * @ebus: HD-audio ext core bus
  */
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index cdee710..db96042 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -372,6 +372,36 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets);
 
+/**
+ * snd_hdac_refresh_widget_sysfs - Reset the codec widgets and reinit the
+ * codec sysfs
+ * @codec: the codec object
+ *
+ * first we need to remove sysfs, then refresh widgets and lastly
+ * recreate it
+ */
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec)
+{
+	int ret;
+
+	if (device_is_registered(&codec->dev))
+		hda_widget_sysfs_exit(codec);
+	ret = snd_hdac_refresh_widgets(codec);
+	if (ret) {
+		dev_err(&codec->dev, "failed to refresh widget: %d\n", ret);
+		return ret;
+	}
+	if (device_is_registered(&codec->dev)) {
+		ret = hda_widget_sysfs_init(codec);
+		if (ret) {
+			dev_err(&codec->dev, "failed to init sysfs: %d\n", ret);
+			return ret;
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_refresh_widget_sysfs);
+
 /* return CONNLIST_LEN parameter of the given widget */
 static unsigned int get_num_conns(struct hdac_device *codec, hda_nid_t nid)
 {
@@ -501,23 +531,27 @@
  * This function calls the runtime PM helper to power up the given codec.
  * Unlike snd_hdac_power_up_pm(), you should call this only for the code
  * path that isn't included in PM path.  Otherwise it gets stuck.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_up(struct hdac_device *codec)
+int snd_hdac_power_up(struct hdac_device *codec)
 {
-	pm_runtime_get_sync(&codec->dev);
+	return pm_runtime_get_sync(&codec->dev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_up);
 
 /**
  * snd_hdac_power_down - power down the codec
  * @codec: the codec object
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_down(struct hdac_device *codec)
+int snd_hdac_power_down(struct hdac_device *codec)
 {
 	struct device *dev = &codec->dev;
 
 	pm_runtime_mark_last_busy(dev);
-	pm_runtime_put_autosuspend(dev);
+	return pm_runtime_put_autosuspend(dev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_down);
 
@@ -529,11 +563,14 @@
  * which may be called by PM suspend/resume again.  OTOH, if a power-up
  * call must wake up the sleeper (e.g. in a kctl callback), use
  * snd_hdac_power_up() instead.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_up_pm(struct hdac_device *codec)
+int snd_hdac_power_up_pm(struct hdac_device *codec)
 {
 	if (!atomic_inc_not_zero(&codec->in_pm))
-		snd_hdac_power_up(codec);
+		return snd_hdac_power_up(codec);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
 
@@ -543,11 +580,14 @@
  *
  * Like snd_hdac_power_up_pm(), this function is used in a recursive
  * code path like init code which may be called by PM suspend/resume again.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_down_pm(struct hdac_device *codec)
+int snd_hdac_power_down_pm(struct hdac_device *codec)
 {
 	if (atomic_dec_if_positive(&codec->in_pm) < 0)
-		snd_hdac_power_down(codec);
+		return snd_hdac_power_down(codec);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_down_pm);
 #endif
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 5676b84..55c3df4 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -134,6 +134,16 @@
 	return !strcmp(dev->driver->name, "i915");
 }
 
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
+{
+	if (WARN_ON(!hdac_acomp))
+		return -ENODEV;
+
+	hdac_acomp->audio_ops = aops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
+
 int snd_hdac_i915_init(struct hdac_bus *bus)
 {
 	struct component_match *match = NULL;
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 1eabcdf..b0ed870 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -410,8 +410,9 @@
 
 	err = reg_raw_write(codec, reg, val);
 	if (err == -EAGAIN) {
-		snd_hdac_power_up_pm(codec);
-		err = reg_raw_write(codec, reg, val);
+		err = snd_hdac_power_up_pm(codec);
+		if (!err)
+			err = reg_raw_write(codec, reg, val);
 		snd_hdac_power_down_pm(codec);
 	}
 	return err;
@@ -442,8 +443,9 @@
 
 	err = reg_raw_read(codec, reg, val);
 	if (err == -EAGAIN) {
-		snd_hdac_power_up_pm(codec);
-		err = reg_raw_read(codec, reg, val);
+		err = snd_hdac_power_up_pm(codec);
+		if (!err)
+			err = reg_raw_read(codec, reg, val);
 		snd_hdac_power_down_pm(codec);
 	}
 	return err;
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index 4c15d0a..8981159 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -286,6 +286,28 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_stream_release);
 
+/**
+ * snd_hdac_get_stream - return hdac_stream based on stream_tag and
+ * direction
+ *
+ * @bus: HD-audio core bus
+ * @dir: direction for the stream to be found
+ * @stream_tag: stream tag for stream to be found
+ */
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+					int dir, int stream_tag)
+{
+	struct hdac_stream *s;
+
+	list_for_each_entry(s, &bus->stream_list, list) {
+		if (s->direction == dir && s->stream_tag == stream_tag)
+			return s;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_get_stream);
+
 /*
  * set up a BDL entry
  */
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
index 0a6ce3b..c71142d 100644
--- a/sound/hda/hdac_sysfs.c
+++ b/sound/hda/hdac_sysfs.c
@@ -321,8 +321,7 @@
 			free_widget_node(*p, &widget_node_group);
 		kfree(tree->nodes);
 	}
-	if (tree->root)
-		kobject_put(tree->root);
+	kobject_put(tree->root);
 	kfree(tree);
 	codec->widgets = NULL;
 }
@@ -391,6 +390,9 @@
 {
 	int err;
 
+	if (codec->widgets)
+		return 0; /* already created */
+
 	err = widget_tree_create(codec);
 	if (err < 0) {
 		widget_tree_free(codec);
diff --git a/sound/pci/echoaudio/darla20_dsp.c b/sound/pci/echoaudio/darla20_dsp.c
index febee5b..320837b 100644
--- a/sound/pci/echoaudio/darla20_dsp.c
+++ b/sound/pci/echoaudio/darla20_dsp.c
@@ -44,18 +44,18 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_DARLA20_DSP;
 	chip->spdif_status = GD_SPDIF_STATUS_UNDEF;
 	chip->clock_state = GD_CLOCK_UNDEF;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/darla24_dsp.c b/sound/pci/echoaudio/darla24_dsp.c
index 7b4f6fd..8736b5e 100644
--- a/sound/pci/echoaudio/darla24_dsp.c
+++ b/sound/pci/echoaudio/darla24_dsp.c
@@ -44,17 +44,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_DARLA24_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_ESYNC;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/echo3g_dsp.c b/sound/pci/echoaudio/echo3g_dsp.c
index ae11ce1..6deb80c 100644
--- a/sound/pci/echoaudio/echo3g_dsp.c
+++ b/sound/pci/echoaudio/echo3g_dsp.c
@@ -59,8 +59,8 @@
 		cpu_to_le32((E3G_MAGIC_NUMBER / 48000) - 2);
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_ECHO3G_DSP;
 
 	/* Load the DSP code and the ASIC on the PCI card and get
@@ -78,8 +78,8 @@
 		chip->px_analog_in = chip->bx_analog_in = 14;
 		chip->px_digital_in = chip->bx_digital_in = 16;
 		chip->px_num = chip->bx_num = 24;
-		chip->has_phantom_power = TRUE;
-		chip->hasnt_input_nominal_level = TRUE;
+		chip->has_phantom_power = true;
+		chip->hasnt_input_nominal_level = true;
 	} else if (err == E3G_LAYLA3G_BOX_TYPE) {
 		chip->input_clock_types =	ECHO_CLOCK_BIT_INTERNAL |
 						ECHO_CLOCK_BIT_SPDIF |
@@ -106,10 +106,10 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->non_audio_spdif = FALSE;
-	chip->bad_board = FALSE;
-	chip->phantom_power = FALSE;
+	chip->professional_spdif = false;
+	chip->non_audio_spdif = false;
+	chip->bad_board = false;
+	chip->phantom_power = false;
 	return init_line_levels(chip);
 }
 
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 862db9a..1cb85ae 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2245,7 +2245,7 @@
 
 #ifdef ECHOCARD_HAS_MIDI
 	if (chip->midi_input_enabled)
-		enable_midi_input(chip, TRUE);
+		enable_midi_input(chip, true);
 	if (chip->midi_out)
 		snd_echo_midi_output_trigger(chip->midi_out, 1);
 #endif
diff --git a/sound/pci/echoaudio/echoaudio.h b/sound/pci/echoaudio/echoaudio.h
index 3251522..152ce15 100644
--- a/sound/pci/echoaudio/echoaudio.h
+++ b/sound/pci/echoaudio/echoaudio.h
@@ -153,9 +153,6 @@
 #define _ECHOAUDIO_H_
 
 
-#define TRUE 1
-#define FALSE 0
-
 #include "echoaudio_dsp.h"
 
 
@@ -378,8 +375,8 @@
 					 */
 	u8 output_clock;		/* Layla20 only */
 	char meters_enabled;		/* VU-meters status */
-	char asic_loaded;		/* Set TRUE when ASIC loaded */
-	char bad_board;			/* Set TRUE if DSP won't load */
+	char asic_loaded;		/* Set true when ASIC loaded */
+	char bad_board;			/* Set true if DSP won't load */
 	char professional_spdif;	/* 0 = consumer; 1 = professional */
 	char non_audio_spdif;		/* 3G - only */
 	char digital_in_automute;	/* Gina24, Layla24, Mona - only */
diff --git a/sound/pci/echoaudio/echoaudio_3g.c b/sound/pci/echoaudio/echoaudio_3g.c
index 2fa66dc..22c786b 100644
--- a/sound/pci/echoaudio/echoaudio_3g.c
+++ b/sound/pci/echoaudio/echoaudio_3g.c
@@ -41,7 +41,7 @@
 		return -EIO;
 
 	chip->comm_page->ext_box_status = cpu_to_le32(E3G_ASIC_NOT_LOADED);
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	clear_handshake(chip);
 	send_vector(chip, DSP_VC_TEST_ASIC);
 
@@ -55,7 +55,7 @@
 	if (box_status == E3G_ASIC_NOT_LOADED)
 		return -ENODEV;
 
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	return box_status & E3G_BOX_TYPE_MASK;
 }
 
@@ -243,7 +243,7 @@
 	 * 48 kHz, internal clock, S/PDIF RCA mode */
 	if (box_type >= 0) {
 		err = write_control_reg(chip, E3G_48KHZ,
-					E3G_FREQ_REG_DEFAULT, TRUE);
+					E3G_FREQ_REG_DEFAULT, true);
 		if (err < 0)
 			return err;
 	}
@@ -378,16 +378,16 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 1a9427a..15aae2f 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -103,8 +103,8 @@
 		cond_resched();
 	}
 
-	chip->bad_board = TRUE;		/* Set TRUE until DSP re-loaded */
-	dev_dbg(chip->card->dev, "write_dsp: Set bad_board to TRUE\n");
+	chip->bad_board = true;		/* Set true until DSP re-loaded */
+	dev_dbg(chip->card->dev, "write_dsp: Set bad_board to true\n");
 	return -EIO;
 }
 
@@ -126,8 +126,8 @@
 		cond_resched();
 	}
 
-	chip->bad_board = TRUE;		/* Set TRUE until DSP re-loaded */
-	dev_err(chip->card->dev, "read_dsp: Set bad_board to TRUE\n");
+	chip->bad_board = true;		/* Set true until DSP re-loaded */
+	dev_err(chip->card->dev, "read_dsp: Set bad_board to true\n");
 	return -EIO;
 }
 
@@ -166,7 +166,7 @@
 /* This card has no ASIC, just return ok */
 static inline int check_asic_status(struct echoaudio *chip)
 {
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	return 0;
 }
 
@@ -341,11 +341,11 @@
 		dev_warn(chip->card->dev, "DSP is already loaded!\n");
 		return 0;
 	}
-	chip->bad_board = TRUE;		/* Set TRUE until DSP loaded */
+	chip->bad_board = true;		/* Set true until DSP loaded */
 	chip->dsp_code = NULL;		/* Current DSP code not loaded */
-	chip->asic_loaded = FALSE;	/* Loading the DSP code will reset the ASIC */
+	chip->asic_loaded = false;	/* Loading the DSP code will reset the ASIC */
 
-	dev_dbg(chip->card->dev, "load_dsp: Set bad_board to TRUE\n");
+	dev_dbg(chip->card->dev, "load_dsp: Set bad_board to true\n");
 
 	/* If this board requires a resident loader, install it. */
 #ifdef DSP_56361
@@ -471,7 +471,7 @@
 			}
 
 			chip->dsp_code = code;		/* Show which DSP code loaded */
-			chip->bad_board = FALSE;	/* DSP OK */
+			chip->bad_board = false;	/* DSP OK */
 			return 0;
 		}
 		udelay(100);
@@ -951,10 +951,10 @@
 	/* Stops all active pipes (just to be sure) */
 	stop_transport(chip, chip->active_mask);
 
-	set_meters_on(chip, FALSE);
+	set_meters_on(chip, false);
 
 #ifdef ECHOCARD_HAS_MIDI
-	enable_midi_input(chip, FALSE);
+	enable_midi_input(chip, false);
 #endif
 
 	/* Go to sleep */
@@ -981,9 +981,9 @@
 
 	/* Init all the basic stuff */
 	chip->card_name = ECHOCARD_NAME;
-	chip->bad_board = TRUE;	/* Set TRUE until DSP loaded */
+	chip->bad_board = true;	/* Set true until DSP loaded */
 	chip->dsp_code = NULL;	/* Current DSP code not loaded */
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	memset(chip->comm_page, 0, sizeof(struct comm_page));
 
 	/* Init the comm page */
diff --git a/sound/pci/echoaudio/echoaudio_gml.c b/sound/pci/echoaudio/echoaudio_gml.c
index 23a0994..834b39e 100644
--- a/sound/pci/echoaudio/echoaudio_gml.c
+++ b/sound/pci/echoaudio/echoaudio_gml.c
@@ -48,7 +48,7 @@
 	if (read_dsp(chip, &asic_status) < 0) {
 		dev_err(chip->card->dev,
 			"check_asic_status: failed on read_dsp\n");
-		chip->asic_loaded = FALSE;
+		chip->asic_loaded = false;
 		return -EIO;
 	}
 
@@ -192,7 +192,7 @@
 		}
 	}
 
-	if ((err = write_control_reg(chip, control_reg, FALSE)))
+	if ((err = write_control_reg(chip, control_reg, false)))
 		return err;
 	chip->professional_spdif = prof;
 	dev_dbg(chip->card->dev, "set_professional_spdif to %s\n",
diff --git a/sound/pci/echoaudio/gina20.c b/sound/pci/echoaudio/gina20.c
index 4fa32a2..67bd0c9 100644
--- a/sound/pci/echoaudio/gina20.c
+++ b/sound/pci/echoaudio/gina20.c
@@ -23,7 +23,7 @@
 #define ECHOCARD_HAS_INPUT_GAIN
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 
 /* Pipe indexes */
 #define PX_ANALOG_OUT	0	/* 8 */
diff --git a/sound/pci/echoaudio/gina20_dsp.c b/sound/pci/echoaudio/gina20_dsp.c
index 5dafe92..b237757 100644
--- a/sound/pci/echoaudio/gina20_dsp.c
+++ b/sound/pci/echoaudio/gina20_dsp.c
@@ -48,19 +48,19 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_GINA20_DSP;
 	chip->spdif_status = GD_SPDIF_STATUS_UNDEF;
 	chip->clock_state = GD_CLOCK_UNDEF;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_SPDIF;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -69,7 +69,7 @@
 
 static int set_mixer_defaults(struct echoaudio *chip)
 {
-	chip->professional_spdif = FALSE;
+	chip->professional_spdif = false;
 	return init_line_levels(chip);
 }
 
diff --git a/sound/pci/echoaudio/gina24_dsp.c b/sound/pci/echoaudio/gina24_dsp.c
index 6971766..8eff2b4 100644
--- a/sound/pci/echoaudio/gina24_dsp.c
+++ b/sound/pci/echoaudio/gina24_dsp.c
@@ -52,7 +52,7 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
 		ECHO_CLOCK_BIT_ESYNC | ECHO_CLOCK_BIT_ESYNC96 |
@@ -76,7 +76,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -86,8 +86,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -152,7 +152,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err) {
 		control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
-		err = write_control_reg(chip, control_reg, TRUE);
+		err = write_control_reg(chip, control_reg, true);
 	}
 	return err;
 }
@@ -226,7 +226,7 @@
 	chip->sample_rate = rate;
 	dev_dbg(chip->card->dev, "set_sample_rate: %d clock %d\n", rate, clock);
 
-	return write_control_reg(chip, control_reg, FALSE);
+	return write_control_reg(chip, control_reg, false);
 }
 
 
@@ -274,7 +274,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -285,17 +285,17 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_CDROM:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
@@ -333,7 +333,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, TRUE);
+	err = write_control_reg(chip, control_reg, true);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/echoaudio/indigo_dsp.c b/sound/pci/echoaudio/indigo_dsp.c
index 54edd67..c97dc83 100644
--- a/sound/pci/echoaudio/indigo_dsp.c
+++ b/sound/pci/echoaudio/indigo_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigodj_dsp.c b/sound/pci/echoaudio/indigodj_dsp.c
index 2cf5cc0..2428b35 100644
--- a/sound/pci/echoaudio/indigodj_dsp.c
+++ b/sound/pci/echoaudio/indigodj_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DJ_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigodjx_dsp.c b/sound/pci/echoaudio/indigodjx_dsp.c
index 5252863..5fbd4a3 100644
--- a/sound/pci/echoaudio/indigodjx_dsp.c
+++ b/sound/pci/echoaudio/indigodjx_dsp.c
@@ -47,17 +47,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DJX_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	err = load_firmware(chip);
 	if (err < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigoio_dsp.c b/sound/pci/echoaudio/indigoio_dsp.c
index 4e81787..79b68ba 100644
--- a/sound/pci/echoaudio/indigoio_dsp.c
+++ b/sound/pci/echoaudio/indigoio_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_IO_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigoiox_dsp.c b/sound/pci/echoaudio/indigoiox_dsp.c
index 6de3f9b..1ae394e 100644
--- a/sound/pci/echoaudio/indigoiox_dsp.c
+++ b/sound/pci/echoaudio/indigoiox_dsp.c
@@ -47,17 +47,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_IOX_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	err = load_firmware(chip);
 	if (err < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/layla20.c b/sound/pci/echoaudio/layla20.c
index 12e5d21..fc8468d 100644
--- a/sound/pci/echoaudio/layla20.c
+++ b/sound/pci/echoaudio/layla20.c
@@ -26,7 +26,7 @@
 #define ECHOCARD_HAS_SUPER_INTERLEAVE
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 #define ECHOCARD_HAS_OUTPUT_CLOCK_SWITCH
 #define ECHOCARD_HAS_MIDI
 
diff --git a/sound/pci/echoaudio/layla20_dsp.c b/sound/pci/echoaudio/layla20_dsp.c
index f2024a3..5e5b6e2 100644
--- a/sound/pci/echoaudio/layla20_dsp.c
+++ b/sound/pci/echoaudio/layla20_dsp.c
@@ -51,8 +51,8 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_LAYLA20_DSP;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
@@ -62,7 +62,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -71,7 +71,7 @@
 
 static int set_mixer_defaults(struct echoaudio *chip)
 {
-	chip->professional_spdif = FALSE;
+	chip->professional_spdif = false;
 	return init_line_levels(chip);
 }
 
@@ -113,7 +113,7 @@
 	u32 asic_status;
 	int goodcnt, i;
 
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	for (i = goodcnt = 0; i < 5; i++) {
 		send_vector(chip, DSP_VC_TEST_ASIC);
 
@@ -127,7 +127,7 @@
 
 		if (asic_status == ASIC_ALREADY_LOADED) {
 			if (++goodcnt == 3) {
-				chip->asic_loaded = TRUE;
+				chip->asic_loaded = true;
 				return 0;
 			}
 		}
diff --git a/sound/pci/echoaudio/layla24_dsp.c b/sound/pci/echoaudio/layla24_dsp.c
index 4f11e81f..df28e51 100644
--- a/sound/pci/echoaudio/layla24_dsp.c
+++ b/sound/pci/echoaudio/layla24_dsp.c
@@ -51,8 +51,8 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_LAYLA24_DSP;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
@@ -64,7 +64,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	if ((err = init_line_levels(chip)) < 0)
 		return err;
@@ -77,8 +77,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -135,7 +135,7 @@
 	err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
 				FW_LAYLA24_2S_ASIC);
 	if (err < 0)
-		return FALSE;
+		return false;
 
 	/* Now give the external ASIC a little time to set up */
 	mdelay(10);
@@ -147,7 +147,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err)
 		err = write_control_reg(chip, GML_CONVERTER_ENABLE | GML_48KHZ,
-					TRUE);
+					true);
 	
 	return err;
 }
@@ -241,7 +241,7 @@
 	dev_dbg(chip->card->dev,
 		"set_sample_rate: %d clock %d\n", rate, control_reg);
 
-	return write_control_reg(chip, control_reg, FALSE);
+	return write_control_reg(chip, control_reg, false);
 }
 
 
@@ -287,7 +287,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -334,17 +334,17 @@
 	short asic;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		asic = FW_LAYLA24_2S_ASIC;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		asic = FW_LAYLA24_2A_ASIC;
 		break;
 	default:
@@ -383,7 +383,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, TRUE);
+	err = write_control_reg(chip, control_reg, true);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/echoaudio/mia.c b/sound/pci/echoaudio/mia.c
index 2f7562f..62b5240 100644
--- a/sound/pci/echoaudio/mia.c
+++ b/sound/pci/echoaudio/mia.c
@@ -26,7 +26,7 @@
 #define ECHOCARD_HAS_VMIXER
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32
 #define ECHOCARD_HAS_MIDI
 #define ECHOCARD_HAS_LINE_OUT_GAIN
diff --git a/sound/pci/echoaudio/mia_dsp.c b/sound/pci/echoaudio/mia_dsp.c
index fdad079..8f612a0 100644
--- a/sound/pci/echoaudio/mia_dsp.c
+++ b/sound/pci/echoaudio/mia_dsp.c
@@ -52,19 +52,19 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_MIA_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	if ((subdevice_id & 0x0000f) == MIA_MIDI_REV)
-		chip->has_midi = TRUE;
+		chip->has_midi = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_SPDIF;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/mona_dsp.c b/sound/pci/echoaudio/mona_dsp.c
index 843c7a5..dce9e57 100644
--- a/sound/pci/echoaudio/mona_dsp.c
+++ b/sound/pci/echoaudio/mona_dsp.c
@@ -52,7 +52,7 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
 		ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
@@ -69,7 +69,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -79,8 +79,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -148,7 +148,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err) {
 		control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
-		err = write_control_reg(chip, control_reg, TRUE);
+		err = write_control_reg(chip, control_reg, true);
 	}
 
 	return err;
@@ -356,7 +356,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -367,16 +367,16 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
@@ -415,7 +415,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, FALSE);
+	err = write_control_reg(chip, control_reg, false);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 55e5716..076b117 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -1741,7 +1741,7 @@
 static struct snd_kcontrol_new snd_audigy_capture_boost =
 {
 	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name =		"Analog Capture Boost",
+	.name =		"Mic Extra Boost",
 	.info =		snd_audigy_capture_boost_info,
 	.get =		snd_audigy_capture_boost_get,
 	.put =		snd_audigy_capture_boost_put
@@ -1819,8 +1819,6 @@
 		 * the Philips ADC for 24bit capture */
 		"PCM Playback Switch",
 		"PCM Playback Volume",
-		"Master Mono Playback Switch",
-		"Master Mono Playback Volume",
 		"Master Playback Switch",
 		"Master Playback Volume",
 		"PCM Out Path & Mute",
@@ -1830,10 +1828,16 @@
 		"Capture Switch",
 		"Capture Volume",
 		"Mic Select",
+		"Headphone Playback Switch",
+		"Headphone Playback Volume",
+		"3D Control - Center",
+		"3D Control - Depth",
+		"3D Control - Switch",
 		"Video Playback Switch",
 		"Video Playback Volume",
 		"Mic Playback Switch",
 		"Mic Playback Volume",
+		"External Amplifier",
 		NULL
 	};
 	static char *audigy_rename_ctls[] = {
@@ -1842,6 +1846,8 @@
 		/* "Wave Capture Volume", "PCM Capture Volume", */
 		"Wave Master Playback Volume", "Master Playback Volume",
 		"AMic Playback Volume", "Mic Playback Volume",
+		"Master Mono Playback Switch", "Phone Output Playback Switch",
+		"Master Mono Playback Volume", "Phone Output Playback Volume",
 		NULL
 	};
 	static char *audigy_rename_ctls_i2c_adc[] = {
@@ -1867,8 +1873,6 @@
 		 * the Philips ADC for 24bit capture */
 		"PCM Playback Switch",
 		"PCM Playback Volume",
-		"Master Mono Playback Switch",
-		"Master Mono Playback Volume",
 		"Capture Source",
 		"Capture Switch",
 		"Capture Volume",
@@ -1900,7 +1904,8 @@
 		"Aux Playback Volume", "Aux Capture Volume",
 		"Video Playback Switch", "Video Capture Switch",
 		"Video Playback Volume", "Video Capture Volume",
-
+		"Master Mono Playback Switch", "Phone Output Playback Switch",
+		"Master Mono Playback Volume", "Phone Output Playback Volume",
 		NULL
 	};
 
@@ -1935,6 +1940,9 @@
 			snd_ac97_write_cache(emu->ac97, AC97_MASTER, 0x0000);
 			/* set capture source to mic */
 			snd_ac97_write_cache(emu->ac97, AC97_REC_SEL, 0x0000);
+			/* set mono output (TAD) to mic */
+			snd_ac97_update_bits(emu->ac97, AC97_GENERAL_PURPOSE,
+				0x0200, 0x0200);
 			if (emu->card_capabilities->adc_1361t)
 				c = audigy_remove_ctls_1361t_adc;
 			else 
@@ -1996,11 +2004,6 @@
 		rename_ctl(card, "Analog Mix Capture Volume", "Line2 Capture Volume");
 		rename_ctl(card, "Aux2 Capture Volume", "Line3 Capture Volume");
 		rename_ctl(card, "Mic Capture Volume", "Unknown1 Capture Volume");
-		remove_ctl(card, "Headphone Playback Switch");
-		remove_ctl(card, "Headphone Playback Volume");
-		remove_ctl(card, "3D Control - Center");
-		remove_ctl(card, "3D Control - Depth");
-		remove_ctl(card, "3D Control - Switch");
 	}
 	if ((kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu)) == NULL)
 		return -ENOMEM;
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 03b7399..7f57a14 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -887,11 +887,32 @@
 static bool pin_config_match(struct hda_codec *codec,
 			     const struct hda_pintbl *pins)
 {
-	for (; pins->nid; pins++) {
-		u32 def_conf = snd_hda_codec_get_pincfg(codec, pins->nid);
-		if (pins->val != def_conf)
+	int i;
+
+	for (i = 0; i < codec->init_pins.used; i++) {
+		struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
+		hda_nid_t nid = pin->nid;
+		u32 cfg = pin->cfg;
+		const struct hda_pintbl *t_pins;
+		int found;
+
+		t_pins = pins;
+		found = 0;
+		for (; t_pins->nid; t_pins++) {
+			if (t_pins->nid == nid) {
+				found = 1;
+				if (t_pins->val == cfg)
+					break;
+				else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
+					break;
+				else
+					return false;
+			}
+		}
+		if (!found && (cfg & 0xf0000000) != 0x40000000)
 			return false;
 	}
+
 	return true;
 }
 
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index d1a2cb6..37f43a1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -53,76 +53,6 @@
 #define codec_has_clkstop(codec) \
 	((codec)->core.power_caps & AC_PWRST_CLKSTOP)
 
-/**
- * snd_hda_get_jack_location - Give a location string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack location, e.g. "Rear", "Front", etc.
- */
-const char *snd_hda_get_jack_location(u32 cfg)
-{
-	static char *bases[7] = {
-		"N/A", "Rear", "Front", "Left", "Right", "Top", "Bottom",
-	};
-	static unsigned char specials_idx[] = {
-		0x07, 0x08,
-		0x17, 0x18, 0x19,
-		0x37, 0x38
-	};
-	static char *specials[] = {
-		"Rear Panel", "Drive Bar",
-		"Riser", "HDMI", "ATAPI",
-		"Mobile-In", "Mobile-Out"
-	};
-	int i;
-	cfg = (cfg & AC_DEFCFG_LOCATION) >> AC_DEFCFG_LOCATION_SHIFT;
-	if ((cfg & 0x0f) < 7)
-		return bases[cfg & 0x0f];
-	for (i = 0; i < ARRAY_SIZE(specials_idx); i++) {
-		if (cfg == specials_idx[i])
-			return specials[i];
-	}
-	return "UNKNOWN";
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_location);
-
-/**
- * snd_hda_get_jack_connectivity - Give a connectivity string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack connectivity, i.e. external or internal connection.
- */
-const char *snd_hda_get_jack_connectivity(u32 cfg)
-{
-	static char *jack_locations[4] = { "Ext", "Int", "Sep", "Oth" };
-
-	return jack_locations[(cfg >> (AC_DEFCFG_LOCATION_SHIFT + 4)) & 3];
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_connectivity);
-
-/**
- * snd_hda_get_jack_type - Give a type string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack type, i.e. the purpose of the jack, such as Line-Out or CD.
- */
-const char *snd_hda_get_jack_type(u32 cfg)
-{
-	static char *jack_types[16] = {
-		"Line Out", "Speaker", "HP Out", "CD",
-		"SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
-		"Line In", "Aux", "Mic", "Telephony",
-		"SPDIF In", "Digital In", "Reserved", "Other"
-	};
-
-	return jack_types[(cfg & AC_DEFCFG_DEVICE)
-				>> AC_DEFCFG_DEVICE_SHIFT];
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_type);
-
 /*
  * Send and receive a verb - passed to exec_verb override for hdac_device
  */
@@ -975,7 +905,7 @@
 	if (codec->bus->modelname) {
 		codec->modelname = kstrdup(codec->bus->modelname, GFP_KERNEL);
 		if (!codec->modelname) {
-			err = -ENODEV;
+			err = -ENOMEM;
 			goto error;
 		}
 	}
@@ -1025,7 +955,7 @@
 	hda_nid_t fg;
 	int err;
 
-	err = snd_hdac_refresh_widgets(&codec->core);
+	err = snd_hdac_refresh_widget_sysfs(&codec->core);
 	if (err < 0)
 		return err;
 
@@ -3172,8 +3102,7 @@
 			struct snd_pcm_chmap *chmap;
 			const struct snd_pcm_chmap_elem *elem;
 
-			if (!pcm || !pcm->pcm || pcm->own_chmap ||
-			    !hinfo->substreams)
+			if (!pcm->pcm || pcm->own_chmap || !hinfo->substreams)
 				continue;
 			elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
 			err = snd_pcm_add_chmap_ctls(pcm->pcm, str, elem,
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 12837ab..2970413 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -469,13 +469,6 @@
 }
 
 /*
- * get widget information
- */
-const char *snd_hda_get_jack_connectivity(u32 cfg);
-const char *snd_hda_get_jack_type(u32 cfg);
-const char *snd_hda_get_jack_location(u32 cfg);
-
-/*
  * power saving
  */
 #define snd_hda_power_up(codec)		snd_hdac_power_up(&(codec)->core)
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index c746cd9..563984d 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -42,7 +42,7 @@
 	CEA_EDID_VER_RESERVED	= 4,
 };
 
-static char *cea_speaker_allocation_names[] = {
+static const char * const cea_speaker_allocation_names[] = {
 	/*  0 */ "FL/FR",
 	/*  1 */ "LFE",
 	/*  2 */ "FC",
@@ -56,7 +56,7 @@
 	/* 10 */ "FCH",
 };
 
-static char *eld_connection_type_names[4] = {
+static const char * const eld_connection_type_names[4] = {
 	"HDMI",
 	"DisplayPort",
 	"2-reserved",
@@ -94,7 +94,7 @@
 	AUDIO_CODING_XTYPE_FIRST_RESERVED	= 4,
 };
 
-static char *cea_audio_coding_type_names[] = {
+static const char * const cea_audio_coding_type_names[] = {
 	/*  0 */ "undefined",
 	/*  1 */ "LPCM",
 	/*  2 */ "AC-3",
@@ -482,14 +482,14 @@
 	struct parsed_hdmi_eld *e = &eld->info;
 	char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
 	int i;
-	static char *eld_version_names[32] = {
+	static const char * const eld_version_names[32] = {
 		"reserved",
 		"reserved",
 		"CEA-861D or below",
 		[3 ... 30] = "reserved",
 		[31] = "partial"
 	};
-	static char *cea_edid_version_names[8] = {
+	static const char * const cea_edid_version_names[8] = {
 		"no CEA EDID Timing Extension block present",
 		"CEA-861",
 		"CEA-861-A",
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index baaf7ed0..033aa84 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -36,24 +36,9 @@
 #define param_read(codec, nid, parm) \
 	snd_hdac_read_parm_uncached(&(codec)->core, nid, parm)
 
-static char *bits_names(unsigned int bits, char *names[], int size)
-{
-	int i, n;
-	static char buf[128];
-
-	for (i = 0, n = 0; i < size; i++) {
-		if (bits & (1U<<i) && names[i])
-			n += snprintf(buf + n, sizeof(buf) - n, " %s",
-				      names[i]);
-	}
-	buf[n] = '\0';
-
-	return buf;
-}
-
 static const char *get_wid_type_name(unsigned int wid_value)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		[AC_WID_AUD_OUT] = "Audio Output",
 		[AC_WID_AUD_IN] = "Audio Input",
 		[AC_WID_AUD_MIX] = "Audio Mixer",
@@ -241,7 +226,7 @@
 
 static const char *get_jack_connection(u32 cfg)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		"Unknown", "1/8", "1/4", "ATAPI",
 		"RCA", "Optical","Digital", "Analog",
 		"DIN", "XLR", "RJ11", "Comb",
@@ -256,7 +241,7 @@
 
 static const char *get_jack_color(u32 cfg)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		"Unknown", "Black", "Grey", "Blue",
 		"Green", "Red", "Orange", "Yellow",
 		"Purple", "Pink", NULL, NULL,
@@ -269,11 +254,74 @@
 		return "UNKNOWN";
 }
 
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack location, e.g. "Rear", "Front", etc.
+ */
+static const char *get_jack_location(u32 cfg)
+{
+	static const char * const bases[7] = {
+		"N/A", "Rear", "Front", "Left", "Right", "Top", "Bottom",
+	};
+	static const unsigned char specials_idx[] = {
+		0x07, 0x08,
+		0x17, 0x18, 0x19,
+		0x37, 0x38
+	};
+	static const char * const specials[] = {
+		"Rear Panel", "Drive Bar",
+		"Riser", "HDMI", "ATAPI",
+		"Mobile-In", "Mobile-Out"
+	};
+	int i;
+
+	cfg = (cfg & AC_DEFCFG_LOCATION) >> AC_DEFCFG_LOCATION_SHIFT;
+	if ((cfg & 0x0f) < 7)
+		return bases[cfg & 0x0f];
+	for (i = 0; i < ARRAY_SIZE(specials_idx); i++) {
+		if (cfg == specials_idx[i])
+			return specials[i];
+	}
+	return "UNKNOWN";
+}
+
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack connectivity, i.e. external or internal connection.
+ */
+static const char *get_jack_connectivity(u32 cfg)
+{
+	static const char * const jack_locations[4] = {
+		"Ext", "Int", "Sep", "Oth"
+	};
+
+	return jack_locations[(cfg >> (AC_DEFCFG_LOCATION_SHIFT + 4)) & 3];
+}
+
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack type, i.e. the purpose of the jack, such as Line-Out or CD.
+ */
+static const char *get_jack_type(u32 cfg)
+{
+	static const char * const jack_types[16] = {
+		"Line Out", "Speaker", "HP Out", "CD",
+		"SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
+		"Line In", "Aux", "Mic", "Telephony",
+		"SPDIF In", "Digital In", "Reserved", "Other"
+	};
+
+	return jack_types[(cfg & AC_DEFCFG_DEVICE)
+				>> AC_DEFCFG_DEVICE_SHIFT];
+}
+
 static void print_pin_caps(struct snd_info_buffer *buffer,
 			   struct hda_codec *codec, hda_nid_t nid,
 			   int *supports_vref)
 {
-	static char *jack_conns[4] = { "Jack", "N/A", "Fixed", "Both" };
+	static const char * const jack_conns[4] = {
+		"Jack", "N/A", "Fixed", "Both"
+	};
 	unsigned int caps, val;
 
 	caps = param_read(codec, nid, AC_PAR_PIN_CAP);
@@ -340,9 +388,9 @@
 	caps = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONFIG_DEFAULT, 0);
 	snd_iprintf(buffer, "  Pin Default 0x%08x: [%s] %s at %s %s\n", caps,
 		    jack_conns[(caps & AC_DEFCFG_PORT_CONN) >> AC_DEFCFG_PORT_CONN_SHIFT],
-		    snd_hda_get_jack_type(caps),
-		    snd_hda_get_jack_connectivity(caps),
-		    snd_hda_get_jack_location(caps));
+		    get_jack_type(caps),
+		    get_jack_connectivity(caps),
+		    get_jack_location(caps));
 	snd_iprintf(buffer, "    Conn = %s, Color = %s\n",
 		    get_jack_connection(caps),
 		    get_jack_color(caps));
@@ -478,7 +526,7 @@
 static void print_power_state(struct snd_info_buffer *buffer,
 			      struct hda_codec *codec, hda_nid_t nid)
 {
-	static char *names[] = {
+	static const char * const names[] = {
 		[ilog2(AC_PWRST_D0SUP)]		= "D0",
 		[ilog2(AC_PWRST_D1SUP)]		= "D1",
 		[ilog2(AC_PWRST_D2SUP)]		= "D2",
@@ -492,9 +540,16 @@
 	int sup = param_read(codec, nid, AC_PAR_POWER_STATE);
 	int pwr = snd_hda_codec_read(codec, nid, 0,
 				     AC_VERB_GET_POWER_STATE, 0);
-	if (sup != -1)
-		snd_iprintf(buffer, "  Power states: %s\n",
-			    bits_names(sup, names, ARRAY_SIZE(names)));
+	if (sup != -1) {
+		int i;
+
+		snd_iprintf(buffer, "  Power states: ");
+		for (i = 0; i < ARRAY_SIZE(names); i++) {
+			if (sup & (1U << i))
+				snd_iprintf(buffer, " %s", names[i]);
+		}
+		snd_iprintf(buffer, "\n");
+	}
 
 	snd_iprintf(buffer, "  Power: setting=%s, actual=%s",
 		    get_pwr_state(pwr & AC_PWRST_SETTING),
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0f039ab..186792f 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -763,6 +763,20 @@
 	QUIRK_ALIENWARE,
 };
 
+static const struct hda_pintbl alienware_pincfgs[] = {
+	{ 0x0b, 0x90170110 }, /* Builtin Speaker */
+	{ 0x0c, 0x411111f0 }, /* N/A */
+	{ 0x0d, 0x411111f0 }, /* N/A */
+	{ 0x0e, 0x411111f0 }, /* N/A */
+	{ 0x0f, 0x0321101f }, /* HP */
+	{ 0x10, 0x411111f0 }, /* Headset?  disabled for now */
+	{ 0x11, 0x03a11021 }, /* Mic */
+	{ 0x12, 0xd5a30140 }, /* Builtin Mic */
+	{ 0x13, 0x411111f0 }, /* N/A */
+	{ 0x18, 0x411111f0 }, /* N/A */
+	{}
+};
+
 static const struct snd_pci_quirk ca0132_quirks[] = {
 	SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15", QUIRK_ALIENWARE),
 	{}
@@ -3147,7 +3161,7 @@
 	auto_jack = spec->vnode_lswitch[VNID_HP_ASEL - VNODE_START_NID];
 
 	if (auto_jack)
-		jack_present = snd_hda_jack_detect(codec, spec->out_pins[1]);
+		jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_hp);
 	else
 		jack_present =
 			spec->vnode_lswitch[VNID_HP_SEL - VNODE_START_NID];
@@ -3309,7 +3323,7 @@
 	auto_jack = spec->vnode_lswitch[VNID_AMIC1_ASEL - VNODE_START_NID];
 
 	if (auto_jack)
-		jack_present = snd_hda_jack_detect(codec, spec->input_pins[0]);
+		jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_amic1);
 	else
 		jack_present =
 			spec->vnode_lswitch[VNID_AMIC1_SEL - VNODE_START_NID];
@@ -4617,37 +4631,54 @@
 	spec->multiout.num_dacs = 3;
 	spec->multiout.max_channels = 2;
 
-	spec->num_outputs = 2;
-	spec->out_pins[0] = 0x0b; /* speaker out */
 	if (spec->quirk == QUIRK_ALIENWARE) {
 		codec_dbg(codec, "ca0132_config: QUIRK_ALIENWARE applied.\n");
+		snd_hda_apply_pincfgs(codec, alienware_pincfgs);
+
+		spec->num_outputs = 2;
+		spec->out_pins[0] = 0x0b; /* speaker out */
 		spec->out_pins[1] = 0x0f;
-	} else{
+		spec->shared_out_nid = 0x2;
+		spec->unsol_tag_hp = 0x0f;
+
+		spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
+		spec->adcs[1] = 0x8; /* analog mic2 */
+		spec->adcs[2] = 0xa; /* what u hear */
+
+		spec->num_inputs = 3;
+		spec->input_pins[0] = 0x12;
+		spec->input_pins[1] = 0x11;
+		spec->input_pins[2] = 0x13;
+		spec->shared_mic_nid = 0x7;
+		spec->unsol_tag_amic1 = 0x11;
+	} else {
+		spec->num_outputs = 2;
+		spec->out_pins[0] = 0x0b; /* speaker out */
 		spec->out_pins[1] = 0x10; /* headphone out */
+		spec->shared_out_nid = 0x2;
+		spec->unsol_tag_hp = spec->out_pins[1];
+
+		spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
+		spec->adcs[1] = 0x8; /* analog mic2 */
+		spec->adcs[2] = 0xa; /* what u hear */
+
+		spec->num_inputs = 3;
+		spec->input_pins[0] = 0x12;
+		spec->input_pins[1] = 0x11;
+		spec->input_pins[2] = 0x13;
+		spec->shared_mic_nid = 0x7;
+		spec->unsol_tag_amic1 = spec->input_pins[0];
+
+		/* SPDIF I/O */
+		spec->dig_out = 0x05;
+		spec->multiout.dig_out_nid = spec->dig_out;
+		cfg->dig_out_pins[0] = 0x0c;
+		cfg->dig_outs = 1;
+		cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
+		spec->dig_in = 0x09;
+		cfg->dig_in_pin = 0x0e;
+		cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
 	}
-	spec->shared_out_nid = 0x2;
-	spec->unsol_tag_hp = spec->out_pins[1];
-
-	spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
-	spec->adcs[1] = 0x8; /* analog mic2 */
-	spec->adcs[2] = 0xa; /* what u hear */
-
-	spec->num_inputs = 3;
-	spec->input_pins[0] = 0x12;
-	spec->input_pins[1] = 0x11;
-	spec->input_pins[2] = 0x13;
-	spec->shared_mic_nid = 0x7;
-	spec->unsol_tag_amic1 = spec->input_pins[0];
-
-	/* SPDIF I/O */
-	spec->dig_out = 0x05;
-	spec->multiout.dig_out_nid = spec->dig_out;
-	cfg->dig_out_pins[0] = 0x0c;
-	cfg->dig_outs = 1;
-	cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
-	spec->dig_in = 0x09;
-	cfg->dig_in_pin = 0x0e;
-	cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
 }
 
 static int ca0132_prepare_verbs(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index a97db5f..acbfbe08 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -37,6 +37,8 @@
 #include <sound/jack.h>
 #include <sound/asoundef.h>
 #include <sound/tlv.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_i915.h>
 #include "hda_codec.h"
 #include "hda_local.h"
 #include "hda_jack.h"
@@ -144,6 +146,9 @@
 	 */
 	struct hda_multi_out multiout;
 	struct hda_pcm_stream pcm_playback;
+
+	/* i915/powerwell (Haswell+/Valleyview+) specific */
+	struct i915_audio_component_audio_ops i915_audio_ops;
 };
 
 
@@ -2191,6 +2196,9 @@
 	struct hdmi_spec *spec = codec->spec;
 	int pin_idx;
 
+	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
+		snd_hdac_i915_register_notifier(NULL);
+
 	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
 		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
 
@@ -2316,6 +2324,14 @@
 	snd_hda_codec_set_power_to_all(codec, fg, power_state);
 }
 
+static void intel_pin_eld_notify(void *audio_ptr, int port)
+{
+	struct hda_codec *codec = audio_ptr;
+	int pin_nid = port + 0x04;
+
+	check_presence_and_report(codec, pin_nid);
+}
+
 static int patch_generic_hdmi(struct hda_codec *codec)
 {
 	struct hdmi_spec *spec;
@@ -2342,8 +2358,12 @@
 	if (is_valleyview_plus(codec) || is_skylake(codec))
 		codec->core.link_power_control = 1;
 
-	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
+	if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
 		codec->depop_delay = 0;
+		spec->i915_audio_ops.audio_ptr = codec;
+		spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+		snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
+	}
 
 	if (hdmi_parse_codec(codec) < 0) {
 		codec->spec = NULL;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 374ea53..4e6b090 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5389,400 +5389,202 @@
 	{}
 };
 
-#define ALC255_STANDARD_PINS \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
-
 #define ALC256_STANDARD_PINS \
 	{0x12, 0x90a60140}, \
 	{0x14, 0x90170110}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
 	{0x21, 0x02211020}
 
 #define ALC282_STANDARD_PINS \
-	{0x14, 0x90170110}, \
-	{0x18, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
-
-#define ALC288_STANDARD_PINS \
-	{0x17, 0x411111f0}, \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1e, 0x411111f0}
+	{0x14, 0x90170110}
 
 #define ALC290_STANDARD_PINS \
-	{0x12, 0x99a30130}, \
-	{0x13, 0x40000000}, \
-	{0x16, 0x411111f0}, \
-	{0x17, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
+	{0x12, 0x99a30130}
 
 #define ALC292_STANDARD_PINS \
 	{0x14, 0x90170110}, \
-	{0x15, 0x0221401f}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1d, 0x40700001}
+	{0x15, 0x0221401f}
 
 #define ALC298_STANDARD_PINS \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1e, 0x411111f0}, \
-	{0x1f, 0x411111f0}
+	{0x12, 0x90a60130}, \
+	{0x21, 0x03211020}
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
-		{0x12, 0x40300000},
 		{0x14, 0x90170110},
-		{0x17, 0x411111f0},
-		{0x1d, 0x40538029},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60140},
 		{0x14, 0x90170110},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170130},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x01014020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221103f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170150},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x02011020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221105f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170110},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x01014020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
 		{0x17, 0x90170140},
-		{0x18, 0x40000000},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41163b05},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0321102f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170140},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211050}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170140},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211050}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60180},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x40000000},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x411111f0},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x411111f0},
-		{0x1d, 0x4077992d},
-		{0x1e, 0x411111ff}),
+		ALC256_STANDARD_PINS),
 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
 		{0x12, 0x90a60130},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x16, 0x411111f0},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40748605},
-		{0x1e, 0x411111f0}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED,
 		{0x12, 0x90a60140},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x16, 0x411111f0},
-		{0x17, 0x411111f0},
 		{0x18, 0x02811030},
-		{0x19, 0x411111f0},
 		{0x1a, 0x04a1103f},
-		{0x1b, 0x02011020},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
+		{0x1b, 0x02011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP 15 Touchsmart", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40f41905},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40020008},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40e00001},
 		{0x21, 0x03211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x03a11030},
-		{0x1d, 0x40e00001},
 		{0x21, 0x03211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
-		{0x19, 0x03a11030},
-		{0x1d, 0x40f00001},
-		{0x21, 0x03211020}),
-	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
-		ALC282_STANDARD_PINS,
-		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x04a11020},
-		{0x1d, 0x40f00001},
 		{0x21, 0x0421101f}),
-	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
-		ALC282_STANDARD_PINS,
-		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
-		{0x19, 0x03a11030},
-		{0x1d, 0x40f00001},
-		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x17, 0x40000000},
 		{0x19, 0x04a11030},
-		{0x1d, 0x40f00001},
 		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60130},
-		{0x17, 0x40020008},
-		{0x19, 0x411111f0},
-		{0x1d, 0x40e00001},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60130},
-		{0x17, 0x40020008},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40e00001},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL_XPS_13_GPIO6,
-		ALC288_STANDARD_PINS,
 		{0x12, 0x90a60120},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
-		{0x1d, 0x4076832d},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211040},
 		{0x18, 0x90170112},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211040},
 		{0x18, 0x90170110},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x0421101f},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11040},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11040}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11040},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11040}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
 		{0x16, 0x01014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x01a19030},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x01a19030}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
 		{0x16, 0x01014020},
 		{0x18, 0x02a19031},
-		{0x19, 0x01a1903e},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x01a1903e}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
-		{0x16, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1e, 0x411111f0}),
+		{0x12, 0x90a60140}),
 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
 		{0x13, 0x90a60140},
 		{0x16, 0x21014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x21a19030},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x21a19030}),
 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
-		{0x13, 0x90a60140},
-		{0x16, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
-		{0x13, 0x90a60140},
-		{0x16, 0x21014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x21a19030},
-		{0x1e, 0x411111ff}),
+		{0x13, 0x90a60140}),
 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC298_STANDARD_PINS,
-		{0x12, 0x90a60130},
-		{0x13, 0x40000000},
-		{0x14, 0x411111f0},
-		{0x17, 0x90170140},
-		{0x1d, 0x4068a36d},
-		{0x21, 0x03211020}),
+		{0x17, 0x90170110}),
+	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC298_STANDARD_PINS,
+		{0x17, 0x90170140}),
+	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC298_STANDARD_PINS,
+		{0x17, 0x90170150}),
 	{}
 };
 
@@ -6675,77 +6477,33 @@
 
 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
-		{0x12, 0x4004c000},
 		{0x14, 0x01014010},
-		{0x15, 0x411111f0},
-		{0x16, 0x411111f0},
 		{0x18, 0x01a19020},
-		{0x19, 0x411111f0},
 		{0x1a, 0x0181302f},
-		{0x1b, 0x0221401f},
-		{0x1c, 0x411111f0},
-		{0x1d, 0x4054c601},
-		{0x1e, 0x411111f0}),
+		{0x1b, 0x0221401f}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30130},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30140},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30150},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
-		{0x12, 0x411111f0},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x90a60130},
 		{0x14, 0x90170110},
-		{0x15, 0x0321101f},
-		{0x16, 0x40000000},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40d6832d},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x15, 0x0321101f}),
 	{}
 };
 
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index c19e021..9bba275 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -1526,7 +1526,7 @@
 
 static int snd_hdsp_create_midi (struct snd_card *card, struct hdsp *hdsp, int id)
 {
-	char buf[32];
+	char buf[40];
 
 	hdsp->midi[id].id = id;
 	hdsp->midi[id].rmidi = NULL;
@@ -1537,7 +1537,7 @@
 	hdsp->midi[id].pending = 0;
 	spin_lock_init (&hdsp->midi[id].lock);
 
-	sprintf (buf, "%s MIDI %d", card->shortname, id+1);
+	snprintf(buf, sizeof(buf), "%s MIDI %d", card->shortname, id + 1);
 	if (snd_rawmidi_new (card, buf, id, 1, 1, &hdsp->midi[id].rmidi) < 0)
 		return -1;
 
@@ -2806,7 +2806,8 @@
 	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
 
 	offset = ucontrol->id.index - 1;
-	snd_BUG_ON(offset < 0);
+	if (snd_BUG_ON(offset < 0))
+		return -EINVAL;
 
 	switch (hdsp->io_type) {
 	case Digiface:
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 6120a06..4373615 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 #include <sound/core.h>
 #include "pmac.h"
 
@@ -101,6 +102,7 @@
 	{ "keywest", 0 },		/* instantiated by us if needed */
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, keywest_i2c_id);
 
 static struct i2c_driver keywest_driver = {
 	.driver = {
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 1d651b8..225bfda 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -57,6 +57,7 @@
 source "sound/soc/sh/Kconfig"
 source "sound/soc/sirf/Kconfig"
 source "sound/soc/spear/Kconfig"
+source "sound/soc/sti/Kconfig"
 source "sound/soc/tegra/Kconfig"
 source "sound/soc/txx9/Kconfig"
 source "sound/soc/ux500/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 669648b..134aca1 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -39,6 +39,7 @@
 obj-$(CONFIG_SND_SOC)	+= sh/
 obj-$(CONFIG_SND_SOC)	+= sirf/
 obj-$(CONFIG_SND_SOC)	+= spear/
+obj-$(CONFIG_SND_SOC)	+= sti/
 obj-$(CONFIG_SND_SOC)	+= tegra/
 obj-$(CONFIG_SND_SOC)	+= txx9/
 obj-$(CONFIG_SND_SOC)	+= ux500/
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 841d059..ba8def5 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -290,7 +290,7 @@
 	int dir, dir_mask;
 	int ret;
 
-	pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
+	pr_debug("atmel_ssc_startup: SSC_SR=0x%x\n",
 		ssc_readl(ssc_p->ssc->regs, SR));
 
 	/* Enable PMC peripheral clock for this SSC */
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index dd94fea..5741c0a 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -344,14 +344,8 @@
 
 	platform_set_drvdata(pdev, dmadata);
 
-	return snd_soc_register_platform(&pdev->dev, &au1xpsc_soc_platform);
-}
-
-static int au1xpsc_pcm_drvremove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &au1xpsc_soc_platform);
 }
 
 static struct platform_driver au1xpsc_pcm_driver = {
@@ -359,7 +353,6 @@
 		.name	= "au1xpsc-pcm",
 	},
 	.probe		= au1xpsc_pcm_drvprobe,
-	.remove		= au1xpsc_pcm_drvremove,
 };
 
 module_platform_driver(au1xpsc_pcm_driver);
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 24cc7f4..fcf5a9a 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -312,14 +312,8 @@
 
 	platform_set_drvdata(pdev, ctx);
 
-	return snd_soc_register_platform(&pdev->dev, &alchemy_pcm_soc_platform);
-}
-
-static int alchemy_pcm_drvremove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &alchemy_pcm_soc_platform);
 }
 
 static struct platform_driver alchemy_pcmdma_driver = {
@@ -327,7 +321,6 @@
 		.name	= "alchemy-pcm-dma",
 	},
 	.probe		= alchemy_pcm_drvprobe,
-	.remove		= alchemy_pcm_drvremove,
 };
 
 module_platform_driver(alchemy_pcmdma_driver);
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index e742ef6..38e853a 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -305,19 +305,9 @@
 		return -ENOMEM;
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iores)
-		return -ENODEV;
-
-	ret = -EBUSY;
-	if (!devm_request_mem_region(&pdev->dev, iores->start,
-				     resource_size(iores),
-				     pdev->name))
-		return -EBUSY;
-
-	wd->mmio = devm_ioremap(&pdev->dev, iores->start,
-				resource_size(iores));
-	if (!wd->mmio)
-		return -EBUSY;
+	wd->mmio = devm_ioremap_resource(&pdev->dev, iores);
+	if (IS_ERR(wd->mmio))
+		return PTR_ERR(wd->mmio);
 
 	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	if (!dmares)
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 03fa1cb..8c435be 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -862,6 +862,8 @@
 	{},
 };
 
+MODULE_DEVICE_TABLE(of, bcm2835_i2s_of_match);
+
 static struct platform_driver bcm2835_i2s_driver = {
 	.probe		= bcm2835_i2s_probe,
 	.driver		= {
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 238913e..02ad260 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -450,13 +450,8 @@
 
 static int bf5xx_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &bf5xx_ac97_soc_platform);
-}
-
-static int bf5xx_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &bf5xx_ac97_soc_platform);
 }
 
 static struct platform_driver bf5xx_pcm_driver = {
@@ -465,7 +460,6 @@
 	},
 
 	.probe = bf5xx_soc_platform_probe,
-	.remove = bf5xx_soc_platform_remove,
 };
 
 module_platform_driver(bf5xx_pcm_driver);
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index d95477a..6cba211d 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -342,13 +342,8 @@
 
 static int bfin_i2s_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &bf5xx_i2s_soc_platform);
-}
-
-static int bfin_i2s_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &bf5xx_i2s_soc_platform);
 }
 
 static struct platform_driver bfin_i2s_pcm_driver = {
@@ -357,7 +352,6 @@
 	},
 
 	.probe = bfin_i2s_soc_platform_probe,
-	.remove = bfin_i2s_soc_platform_remove,
 };
 
 module_platform_driver(bfin_i2s_pcm_driver);
diff --git a/sound/soc/blackfin/bfin-eval-adau1x61.c b/sound/soc/blackfin/bfin-eval-adau1x61.c
index 4229f76..fddfe00c 100644
--- a/sound/soc/blackfin/bfin-eval-adau1x61.c
+++ b/sound/soc/blackfin/bfin-eval-adau1x61.c
@@ -108,6 +108,7 @@
 
 static struct snd_soc_card bfin_eval_adau1x61 = {
 	.name = "bfin-eval-adau1x61",
+	.owner = THIS_MODULE,
 	.driver_name = "eval-adau1x61",
 	.dai_link = &bfin_eval_adau1x61_dai,
 	.num_links = 1,
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 38b3dad..e8bed6b 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -156,33 +156,29 @@
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -900, 300, 0);
 
 /* {-23, -17, -13.5, -11, -9, -6, -3, 0}dB */
-static const unsigned int mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(mic_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-2300, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(-1700, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(-1350, 0, 0),
 	3, 3, TLV_DB_SCALE_ITEM(-1100, 0, 0),
-	4, 7, TLV_DB_SCALE_ITEM(-900, 300, 0),
-};
+	4, 7, TLV_DB_SCALE_ITEM(-900, 300, 0)
+);
 
 /* {0, 0, 0, -6, 0, 6, 12, 18}dB */
-static const unsigned int aux_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(0, 0, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-600, 600, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-600, 600, 0)
+);
 
 /* {-16, -13, -10, -7, -5.2, -3,3, -2.2, 0}dB, mute instead of -16dB */
-static const unsigned int out_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(out_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-1600, 300, 1),
 	4, 4, TLV_DB_SCALE_ITEM(-520, 0, 0),
 	5, 5, TLV_DB_SCALE_ITEM(-330, 0, 0),
-	6, 7, TLV_DB_SCALE_ITEM(-220, 220, 0),
-};
+	6, 7, TLV_DB_SCALE_ITEM(-220, 220, 0)
+);
 
-static const unsigned int st_tlv[] = {
-	TLV_DB_RANGE_HEAD(8),
+static const DECLARE_TLV_DB_RANGE(st_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-12041, 602, 0),
 	2, 3, TLV_DB_SCALE_ITEM(-11087, 250, 0),
 	4, 5, TLV_DB_SCALE_ITEM(-10643, 158, 0),
@@ -190,8 +186,8 @@
 	8, 9, TLV_DB_SCALE_ITEM(-10133, 92, 0),
 	10, 13, TLV_DB_SCALE_ITEM(-9958, 70, 0),
 	14, 17, TLV_DB_SCALE_ITEM(-9689, 53, 0),
-	18, 271, TLV_DB_SCALE_ITEM(-9484, 37, 0),
-};
+	18, 271, TLV_DB_SCALE_ITEM(-9484, 37, 0)
+);
 
 /* Sidetone Gain = M * 2^(-5-N) */
 struct st_gain {
@@ -1028,10 +1024,8 @@
 
 	if (dir == PM860X_CLK_DIR_OUT)
 		pm860x->dir = PM860X_CLK_DIR_OUT;
-	else {
-		pm860x->dir = PM860X_CLK_DIR_IN;
+	else	/* Slave mode is not supported */
 		return -EINVAL;
-	}
 
 	return 0;
 }
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index efaafce..0c9733e 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -53,6 +53,7 @@
 	select SND_SOC_CS4271_I2C if I2C
 	select SND_SOC_CS4271_SPI if SPI_MASTER
 	select SND_SOC_CS42XX8_I2C if I2C
+	select SND_SOC_CS4349 if I2C
 	select SND_SOC_CX20442 if TTY
 	select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_DA7213 if I2C
@@ -62,6 +63,8 @@
 	select SND_SOC_BT_SCO
 	select SND_SOC_ES8328_SPI if SPI_MASTER
 	select SND_SOC_ES8328_I2C if I2C
+	select SND_SOC_GTM601
+	select SND_SOC_ICS43432
 	select SND_SOC_ISABELLE if I2C
 	select SND_SOC_JZ4740_CODEC
 	select SND_SOC_LM4857 if I2C
@@ -83,6 +86,7 @@
 	select SND_SOC_PCM512x_I2C if I2C
 	select SND_SOC_PCM512x_SPI if SPI_MASTER
 	select SND_SOC_RT286 if I2C
+	select SND_SOC_RT298 if I2C
 	select SND_SOC_RT5631 if I2C
 	select SND_SOC_RT5640 if I2C
 	select SND_SOC_RT5645 if I2C
@@ -102,6 +106,7 @@
 	select SND_SOC_STA350 if I2C
 	select SND_SOC_STA529 if I2C
 	select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
+	select SND_SOC_STI_SAS
 	select SND_SOC_TAS2552 if I2C
 	select SND_SOC_TAS5086 if I2C
 	select SND_SOC_TAS571X if I2C
@@ -403,6 +408,11 @@
 	select SND_SOC_CS42XX8
 	select REGMAP_I2C
 
+# Cirrus Logic CS4349 HiFi DAC
+config SND_SOC_CS4349
+	tristate "Cirrus Logic CS4349 CODEC"
+	depends on I2C
+
 config SND_SOC_CX20442
 	tristate
 	depends on TTY
@@ -446,6 +456,12 @@
 	tristate
 	select SND_SOC_ES8328
 
+config SND_SOC_GTM601
+	tristate 'GTM601 UMTS modem audio codec'
+
+config SND_SOC_ICS43432
+	tristate
+
 config SND_SOC_ISABELLE
         tristate
 
@@ -512,12 +528,18 @@
 config SND_SOC_RL6347A
 	tristate
 	default y if SND_SOC_RT286=y
+	default y if SND_SOC_RT298=y
 	default m if SND_SOC_RT286=m
+	default m if SND_SOC_RT298=m
 
 config SND_SOC_RT286
 	tristate
 	depends on I2C
 
+config SND_SOC_RT298
+	tristate
+	depends on I2C
+
 config SND_SOC_RT5631
 	tristate "Realtek ALC5631/RT5631 CODEC"
 	depends on I2C
@@ -610,6 +632,9 @@
 config SND_SOC_STAC9766
 	tristate
 
+config SND_SOC_STI_SAS
+	tristate "codec Audio support for STI SAS codec"
+
 config SND_SOC_TAS2552
 	tristate "Texas Instruments TAS2552 Mono Audio amplifier"
 	depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index cf160d9..4a32077 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -45,6 +45,7 @@
 snd-soc-cs4271-spi-objs := cs4271-spi.o
 snd-soc-cs42xx8-objs := cs42xx8.o
 snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
+snd-soc-cs4349-objs := cs4349.o
 snd-soc-cx20442-objs := cx20442.o
 snd-soc-da7210-objs := da7210.o
 snd-soc-da7213-objs := da7213.o
@@ -55,6 +56,8 @@
 snd-soc-es8328-objs := es8328.o
 snd-soc-es8328-i2c-objs := es8328-i2c.o
 snd-soc-es8328-spi-objs := es8328-spi.o
+snd-soc-gtm601-objs := gtm601.o
+snd-soc-ics43432-objs := ics43432.o
 snd-soc-isabelle-objs := isabelle.o
 snd-soc-jz4740-codec-objs := jz4740.o
 snd-soc-l3-objs := l3.o
@@ -79,6 +82,7 @@
 snd-soc-rl6231-objs := rl6231.o
 snd-soc-rl6347a-objs := rl6347a.o
 snd-soc-rt286-objs := rt286.o
+snd-soc-rt298-objs := rt298.o
 snd-soc-rt5631-objs := rt5631.o
 snd-soc-rt5640-objs := rt5640.o
 snd-soc-rt5645-objs := rt5645.o
@@ -106,6 +110,7 @@
 snd-soc-sta350-objs := sta350.o
 snd-soc-sta529-objs := sta529.o
 snd-soc-stac9766-objs := stac9766.o
+snd-soc-sti-sas-objs := sti-sas.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tas571x-objs := tas571x.o
 snd-soc-tfa9879-objs := tfa9879.o
@@ -232,6 +237,7 @@
 obj-$(CONFIG_SND_SOC_CS4271_SPI)	+= snd-soc-cs4271-spi.o
 obj-$(CONFIG_SND_SOC_CS42XX8)	+= snd-soc-cs42xx8.o
 obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
+obj-$(CONFIG_SND_SOC_CS4349)	+= snd-soc-cs4349.o
 obj-$(CONFIG_SND_SOC_CX20442)	+= snd-soc-cx20442.o
 obj-$(CONFIG_SND_SOC_DA7210)	+= snd-soc-da7210.o
 obj-$(CONFIG_SND_SOC_DA7213)	+= snd-soc-da7213.o
@@ -242,6 +248,8 @@
 obj-$(CONFIG_SND_SOC_ES8328)	+= snd-soc-es8328.o
 obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
 obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
+obj-$(CONFIG_SND_SOC_GTM601)    += snd-soc-gtm601.o
+obj-$(CONFIG_SND_SOC_ICS43432)	+= snd-soc-ics43432.o
 obj-$(CONFIG_SND_SOC_ISABELLE)	+= snd-soc-isabelle.o
 obj-$(CONFIG_SND_SOC_JZ4740_CODEC)	+= snd-soc-jz4740-codec.o
 obj-$(CONFIG_SND_SOC_L3)	+= snd-soc-l3.o
@@ -266,6 +274,7 @@
 obj-$(CONFIG_SND_SOC_RL6231)	+= snd-soc-rl6231.o
 obj-$(CONFIG_SND_SOC_RL6347A)	+= snd-soc-rl6347a.o
 obj-$(CONFIG_SND_SOC_RT286)	+= snd-soc-rt286.o
+obj-$(CONFIG_SND_SOC_RT298)	+= snd-soc-rt298.o
 obj-$(CONFIG_SND_SOC_RT5631)	+= snd-soc-rt5631.o
 obj-$(CONFIG_SND_SOC_RT5640)	+= snd-soc-rt5640.o
 obj-$(CONFIG_SND_SOC_RT5645)	+= snd-soc-rt5645.o
@@ -289,6 +298,7 @@
 obj-$(CONFIG_SND_SOC_STA350)   += snd-soc-sta350.o
 obj-$(CONFIG_SND_SOC_STA529)   += snd-soc-sta529.o
 obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
+obj-$(CONFIG_SND_SOC_STI_SAS)	+= snd-soc-sti-sas.o
 obj-$(CONFIG_SND_SOC_TAS2552)	+= snd-soc-tas2552.o
 obj-$(CONFIG_SND_SOC_TAS5086)	+= snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TAS571X)	+= snd-soc-tas571x.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index c7d243d..affb192 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -1335,11 +1335,10 @@
 static DECLARE_TLV_DB_SCALE(hs_ear_dig_gain_tlv, -100, 100, 1);
 /* -1dB = Mute */
 
-static const unsigned int hs_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hs_gain_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-3200, 400, 0),
-	4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0),
-};
+	4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0)
+);
 
 static DECLARE_TLV_DB_SCALE(mic_gain_tlv, 0, 100, 0);
 
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 3cc69a6..9ef20db 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -202,19 +202,21 @@
 		.formats = SND_SOC_STD_AC97_FMTS, },
 };
 
+#define AD1980_VENDOR_ID 0x41445300
+#define AD1980_VENDOR_MASK 0xffffff00
+
 static int ad1980_reset(struct snd_soc_codec *codec, int try_warm)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
 	unsigned int retry_cnt = 0;
+	int ret;
 
 	do {
-		if (try_warm && soc_ac97_ops->warm_reset) {
-			soc_ac97_ops->warm_reset(ac97);
-			if (snd_soc_read(codec, AC97_RESET) == 0x0090)
-				return 1;
-		}
+		ret = snd_ac97_reset(ac97, true, AD1980_VENDOR_ID,
+			AD1980_VENDOR_MASK);
+		if (ret >= 0)
+			return 0;
 
-		soc_ac97_ops->reset(ac97);
 		/*
 		 * Set bit 16slot in register 74h, then every slot will has only
 		 * 16 bits. This command is sent out in 20bit mode, in which
@@ -223,8 +225,6 @@
 		 */
 		snd_soc_write(codec, AC97_AD_SERIAL_CFG, 0x9900);
 
-		if (snd_soc_read(codec, AC97_RESET)  == 0x0090)
-			return 0;
 	} while (retry_cnt++ < 10);
 
 	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
@@ -240,7 +240,7 @@
 	u16 vendor_id2;
 	u16 ext_status;
 
-	ac97 = snd_soc_new_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, 0, 0);
 	if (IS_ERR(ac97)) {
 		ret = PTR_ERR(ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -260,22 +260,10 @@
 	if (ret < 0)
 		goto reset_err;
 
-	/* Read out vendor ID to make sure it is ad1980 */
-	if (snd_soc_read(codec, AC97_VENDOR_ID1) != 0x4144) {
-		ret = -ENODEV;
-		goto reset_err;
-	}
-
 	vendor_id2 = snd_soc_read(codec, AC97_VENDOR_ID2);
-
-	if (vendor_id2 != 0x5370) {
-		if (vendor_id2 != 0x5374) {
-			ret = -ENODEV;
-			goto reset_err;
-		} else {
-			dev_warn(codec->dev,
-				"Found AD1981 - only 2/2 IN/OUT Channels supported\n");
-		}
+	if (vendor_id2 == 0x5374) {
+		dev_warn(codec->dev,
+			"Found AD1981 - only 2/2 IN/OUT Channels supported\n");
 	}
 
 	/* unmute captures and playbacks volume */
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index a431602..fe1353a 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -320,13 +320,12 @@
 	{ ADAU1373_DIGEN,		0x00 },
 };
 
-static const unsigned int adau1373_out_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(adau1373_out_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
 	16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
-	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
-};
+	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0)
+);
 
 static const DECLARE_TLV_DB_MINMAX(adau1373_digital_tlv, -9563, 0);
 static const DECLARE_TLV_DB_SCALE(adau1373_in_pga_tlv, -1300, 100, 1);
@@ -381,12 +380,11 @@
 	"158Hz", "232Hz", "347Hz", "520Hz",
 };
 
-static const unsigned int adau1373_bass_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(adau1373_bass_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
 	3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
-	5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
-};
+	5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0)
+);
 
 static SOC_ENUM_SINGLE_DECL(adau1373_bass_lpf_cutoff_enum,
 	ADAU1373_BASS1, 5, adau1373_bass_lpf_cutoff_text);
@@ -414,11 +412,10 @@
 static SOC_ENUM_SINGLE_DECL(adau1373_3d_cutoff_enum,
 	ADAU1373_3D_CTRL1, 0, adau1373_3d_cutoff_text);
 
-static const unsigned int adau1373_3d_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(adau1373_3d_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
-	1, 7, TLV_DB_LINEAR_ITEM(-1800, -120),
-};
+	1, 7, TLV_DB_LINEAR_ITEM(-1800, -120)
+);
 
 static const char *adau1373_lr_mux_text[] = {
 	"Mute",
@@ -1534,7 +1531,6 @@
 static struct i2c_driver adau1373_i2c_driver = {
 	.driver = {
 		.name = "adau1373",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1373_i2c_probe,
 	.remove = adau1373_i2c_remove,
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index ff7f846..de53c0d 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -915,7 +915,6 @@
 static struct i2c_driver adau1701_i2c_driver = {
 	.driver = {
 		.name	= "adau1701",
-		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(adau1701_dt_ids),
 	},
 	.probe		= adau1701_i2c_probe,
diff --git a/sound/soc/codecs/adau1761-i2c.c b/sound/soc/codecs/adau1761-i2c.c
index 862796d..348ccb1 100644
--- a/sound/soc/codecs/adau1761-i2c.c
+++ b/sound/soc/codecs/adau1761-i2c.c
@@ -47,7 +47,6 @@
 static struct i2c_driver adau1761_i2c_driver = {
 	.driver = {
 		.name = "adau1761",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1761_i2c_probe,
 	.remove = adau1761_i2c_remove,
diff --git a/sound/soc/codecs/adau1781-i2c.c b/sound/soc/codecs/adau1781-i2c.c
index 2ce4362..0e32bba 100644
--- a/sound/soc/codecs/adau1781-i2c.c
+++ b/sound/soc/codecs/adau1781-i2c.c
@@ -45,7 +45,6 @@
 static struct i2c_driver adau1781_i2c_driver = {
 	.driver = {
 		.name = "adau1781",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1781_i2c_probe,
 	.remove = adau1781_i2c_remove,
diff --git a/sound/soc/codecs/adau1977-i2c.c b/sound/soc/codecs/adau1977-i2c.c
index 9700e8c..21e7394 100644
--- a/sound/soc/codecs/adau1977-i2c.c
+++ b/sound/soc/codecs/adau1977-i2c.c
@@ -46,7 +46,6 @@
 static struct i2c_driver adau1977_i2c_driver = {
 	.driver = {
 		.name = "adau1977",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1977_i2c_probe,
 	.remove = adau1977_i2c_remove,
diff --git a/sound/soc/codecs/adav803.c b/sound/soc/codecs/adav803.c
index 66d9fce..52881fa 100644
--- a/sound/soc/codecs/adav803.c
+++ b/sound/soc/codecs/adav803.c
@@ -36,7 +36,6 @@
 static struct i2c_driver adav803_driver = {
 	.driver = {
 		.name = "adav803",
-		.owner = THIS_MODULE,
 	},
 	.probe = adav803_probe,
 	.remove = adav803_remove,
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 36d8425..198c924 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -113,7 +113,7 @@
 
 #define ADAV80X_PLL_OUTE_SYSCLKPD(x)		BIT(2 - (x))
 
-static struct reg_default adav80x_reg_defaults[] = {
+static const struct reg_default adav80x_reg_defaults[] = {
 	{ ADAV80X_PLAYBACK_CTRL,	0x01 },
 	{ ADAV80X_AUX_IN_CTRL,		0x01 },
 	{ ADAV80X_REC_CTRL,		0x02 },
@@ -865,7 +865,6 @@
 	.val_bits = 8,
 	.pad_bits = 1,
 	.reg_bits = 7,
-	.read_flag_mask = 0x01,
 
 	.max_register = ADAV80X_PLL_OUTE,
 
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 8670861..54428c6 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -444,7 +444,6 @@
 static struct i2c_driver ak4535_i2c_driver = {
 	.driver = {
 		.name = "ak4535",
-		.owner = THIS_MODULE,
 	},
 	.probe =    ak4535_i2c_probe,
 	.remove =   ak4535_i2c_remove,
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 2d0ff45..b14176f 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -609,7 +609,6 @@
 static struct i2c_driver ak4641_i2c_driver = {
 	.driver = {
 		.name = "ak4641",
-		.owner = THIS_MODULE,
 	},
 	.probe =    ak4641_i2c_probe,
 	.remove =   ak4641_i2c_remove,
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 7c0f6552..4a90143 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -64,12 +64,15 @@
 #define FIL1_0		0x1c
 #define FIL1_1		0x1d
 #define FIL1_2		0x1e
-#define FIL1_3		0x1f
+#define FIL1_3		0x1f	/* The maximum valid register for ak4642 */
 #define PW_MGMT4	0x20
 #define MD_CTL5		0x21
 #define LO_MS		0x22
 #define HP_MS		0x23
-#define SPK_MS		0x24
+#define SPK_MS		0x24	/* The maximum valid register for ak4643 */
+#define EQ_FBEQAB	0x25
+#define EQ_FBEQCD	0x26
+#define EQ_FBEQE	0x27	/* The maximum valid register for ak4648 */
 
 /* PW_MGMT1*/
 #define PMVCM		(1 << 6) /* VCOM Power Management */
@@ -241,7 +244,7 @@
 /*
  * ak4642 register cache
  */
-static const struct reg_default ak4642_reg[] = {
+static const struct reg_default ak4643_reg[] = {
 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
 	{  8, 0xe1 }, {  9, 0xe1 }, { 10, 0x18 }, { 11, 0x00 },
@@ -254,6 +257,14 @@
 	{ 36, 0x00 },
 };
 
+/* The default settings for 0x0 ~ 0x1f registers are the same for ak4642
+   and ak4643. So we reuse the ak4643 reg_default for ak4642.
+   The valid registers for ak4642 are 0x0 ~ 0x1f which is a subset of ak4643,
+   so define NUM_AK4642_REG_DEFAULTS for ak4642.
+*/
+#define ak4642_reg ak4643_reg
+#define NUM_AK4642_REG_DEFAULTS	(FIL1_3 + 1)
+
 static const struct reg_default ak4648_reg[] = {
 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
@@ -535,15 +546,23 @@
 static const struct regmap_config ak4642_regmap = {
 	.reg_bits		= 8,
 	.val_bits		= 8,
-	.max_register		= ARRAY_SIZE(ak4642_reg) + 1,
+	.max_register		= FIL1_3,
 	.reg_defaults		= ak4642_reg,
-	.num_reg_defaults	= ARRAY_SIZE(ak4642_reg),
+	.num_reg_defaults	= NUM_AK4642_REG_DEFAULTS,
+};
+
+static const struct regmap_config ak4643_regmap = {
+	.reg_bits		= 8,
+	.val_bits		= 8,
+	.max_register		= SPK_MS,
+	.reg_defaults		= ak4643_reg,
+	.num_reg_defaults	= ARRAY_SIZE(ak4643_reg),
 };
 
 static const struct regmap_config ak4648_regmap = {
 	.reg_bits		= 8,
 	.val_bits		= 8,
-	.max_register		= ARRAY_SIZE(ak4648_reg) + 1,
+	.max_register		= EQ_FBEQE,
 	.reg_defaults		= ak4648_reg,
 	.num_reg_defaults	= ARRAY_SIZE(ak4648_reg),
 };
@@ -553,7 +572,7 @@
 };
 
 static const struct ak4642_drvdata ak4643_drvdata = {
-	.regmap_config = &ak4642_regmap,
+	.regmap_config = &ak4643_regmap,
 };
 
 static const struct ak4642_drvdata ak4648_drvdata = {
@@ -626,7 +645,6 @@
 static struct i2c_driver ak4642_i2c_driver = {
 	.driver = {
 		.name = "ak4642-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = ak4642_of_match,
 	},
 	.probe		= ak4642_i2c_probe,
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 0e59063..c73a9f6 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -663,7 +663,6 @@
 static struct i2c_driver ak4671_i2c_driver = {
 	.driver = {
 		.name = "ak4671-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe = ak4671_i2c_probe,
 	.remove = ak4671_i2c_remove,
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 0fc24e0..d2e3a3e 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -82,12 +82,11 @@
 static const DECLARE_TLV_DB_SCALE(vol_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
 static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
-static const unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
 
 static const struct snd_kcontrol_new alc5621_vol_snd_controls[] = {
@@ -1085,7 +1084,6 @@
 static struct i2c_driver alc5623_i2c_driver = {
 	.driver = {
 		.name = "alc562x-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(alc5623_of_match),
 	},
 	.probe = alc5623_i2c_probe,
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 607a63b..4d3ba33 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -35,7 +35,7 @@
 /*
  * ALC5632 register cache
  */
-static struct reg_default  alc5632_reg_defaults[] = {
+static const struct reg_default alc5632_reg_defaults[] = {
 	{   2, 0x8080 },	/* R2   - Speaker Output Volume */
 	{   4, 0x8080 },	/* R4   - Headphone Output Volume */
 	{   6, 0x8080 },	/* R6   - AUXOUT Volume */
@@ -146,11 +146,10 @@
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
 /* -16.5db min scale, 1.5db steps, no mute */
 static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
-static const unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
-};
+	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0)
+);
 /* 0db min scale, 6 db steps, no mute */
 static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
 /* 0db min scalem 0.75db steps, no mute */
@@ -1183,7 +1182,6 @@
 static struct i2c_driver alc5632_i2c_driver = {
 	.driver = {
 		.name = "alc5632",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(alc5632_of_match),
 	},
 	.probe = alc5632_i2c_probe,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 802e05e..8a2221a 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1366,7 +1366,7 @@
 {
 	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
 	struct arizona *arizona = priv->arizona;
-	struct reg_default dac_comp[] = {
+	struct reg_sequence dac_comp[] = {
 		{ 0x80, 0x3 },
 		{ ARIZONA_DAC_COMP_1, 0 },
 		{ ARIZONA_DAC_COMP_2, 0 },
@@ -1504,7 +1504,7 @@
 	else
 		rates = &arizona_48k_bclk_rates[0];
 
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 
 	if (tdm_slots) {
 		arizona_aif_dbg(dai, "Configuring for %d %d bit TDM slots\n",
@@ -1756,17 +1756,6 @@
 }
 EXPORT_SYMBOL_GPL(arizona_init_dai);
 
-static irqreturn_t arizona_fll_clock_ok(int irq, void *data)
-{
-	struct arizona_fll *fll = data;
-
-	arizona_fll_dbg(fll, "clock OK\n");
-
-	complete(&fll->ok);
-
-	return IRQ_HANDLED;
-}
-
 static struct {
 	unsigned int min;
 	unsigned int max;
@@ -2048,17 +2037,18 @@
 static int arizona_enable_fll(struct arizona_fll *fll)
 {
 	struct arizona *arizona = fll->arizona;
-	unsigned long time_left;
 	bool use_sync = false;
 	int already_enabled = arizona_is_enabled_fll(fll);
 	struct arizona_fll_cfg cfg;
+	int i;
+	unsigned int val;
 
 	if (already_enabled < 0)
 		return already_enabled;
 
 	if (already_enabled) {
 		/* Facilitate smooth refclk across the transition */
-		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x7,
+		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
 					 ARIZONA_FLL1_GAIN_MASK, 0);
 		regmap_update_bits_async(fll->arizona->regmap, fll->base + 1,
 					 ARIZONA_FLL1_FREERUN,
@@ -2110,9 +2100,6 @@
 	if (!already_enabled)
 		pm_runtime_get(arizona->dev);
 
-	/* Clear any pending completions */
-	try_wait_for_completion(&fll->ok);
-
 	regmap_update_bits_async(arizona->regmap, fll->base + 1,
 				 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
 	if (use_sync)
@@ -2124,10 +2111,24 @@
 		regmap_update_bits_async(arizona->regmap, fll->base + 1,
 					 ARIZONA_FLL1_FREERUN, 0);
 
-	time_left = wait_for_completion_timeout(&fll->ok,
-					  msecs_to_jiffies(250));
-	if (time_left == 0)
+	arizona_fll_dbg(fll, "Waiting for FLL lock...\n");
+	val = 0;
+	for (i = 0; i < 15; i++) {
+		if (i < 5)
+			usleep_range(200, 400);
+		else
+			msleep(20);
+
+		regmap_read(arizona->regmap,
+			    ARIZONA_INTERRUPT_RAW_STATUS_5,
+			    &val);
+		if (val & (ARIZONA_FLL1_CLOCK_OK_STS << (fll->id - 1)))
+			break;
+	}
+	if (i == 15)
 		arizona_fll_warn(fll, "Timed out waiting for lock\n");
+	else
+		arizona_fll_dbg(fll, "FLL locked (%d polls)\n", i);
 
 	return 0;
 }
@@ -2212,11 +2213,8 @@
 int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
 		     int ok_irq, struct arizona_fll *fll)
 {
-	int ret;
 	unsigned int val;
 
-	init_completion(&fll->ok);
-
 	fll->id = id;
 	fll->base = base;
 	fll->arizona = arizona;
@@ -2238,13 +2236,6 @@
 	snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name),
 		 "FLL%d clock OK", id);
 
-	ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name,
-				  arizona_fll_clock_ok, fll);
-	if (ret != 0) {
-		dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n",
-			id, ret);
-	}
-
 	regmap_update_bits(arizona->regmap, fll->base + 1,
 			   ARIZONA_FLL1_FREERUN, 0);
 
@@ -2313,6 +2304,82 @@
 };
 EXPORT_SYMBOL_GPL(arizona_adsp2_rate_controls);
 
+static bool arizona_eq_filter_unstable(bool mode, __be16 _a, __be16 _b)
+{
+	s16 a = be16_to_cpu(_a);
+	s16 b = be16_to_cpu(_b);
+
+	if (!mode) {
+		return abs(a) >= 4096;
+	} else {
+		if (abs(b) >= 4096)
+			return true;
+
+		return (abs((a << 16) / (4096 - b)) >= 4096 << 4);
+	}
+}
+
+int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	struct soc_bytes *params = (void *)kcontrol->private_value;
+	unsigned int val;
+	__be16 *data;
+	int len;
+	int ret;
+
+	len = params->num_regs * regmap_get_val_bytes(arizona->regmap);
+
+	data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
+	if (!data)
+		return -ENOMEM;
+
+	data[0] &= cpu_to_be16(ARIZONA_EQ1_B1_MODE);
+
+	if (arizona_eq_filter_unstable(!!data[0], data[1], data[2]) ||
+	    arizona_eq_filter_unstable(true, data[4], data[5]) ||
+	    arizona_eq_filter_unstable(true, data[8], data[9]) ||
+	    arizona_eq_filter_unstable(true, data[12], data[13]) ||
+	    arizona_eq_filter_unstable(false, data[16], data[17])) {
+		dev_err(arizona->dev, "Rejecting unstable EQ coefficients\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = regmap_read(arizona->regmap, params->base, &val);
+	if (ret != 0)
+		goto out;
+
+	val &= ~ARIZONA_EQ1_B1_MODE;
+	data[0] |= cpu_to_be16(val);
+
+	ret = regmap_raw_write(arizona->regmap, params->base, data, len);
+
+out:
+	kfree(data);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(arizona_eq_coeff_put);
+
+int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	__be16 *data = (__be16 *)ucontrol->value.bytes.data;
+	s16 val = be16_to_cpu(*data);
+
+	if (abs(val) >= 4096) {
+		dev_err(arizona->dev, "Rejecting unstable LHPF coefficients\n");
+		return -EINVAL;
+	}
+
+	return snd_soc_bytes_put(kcontrol, ucontrol);
+}
+EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
+
 MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 43deb04..ada0a41 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -194,6 +194,20 @@
 	ARIZONA_MIXER_ROUTES(name " Preloader", name "L"), \
 	ARIZONA_MIXER_ROUTES(name " Preloader", name "R")
 
+#define ARIZONA_EQ_CONTROL(xname, xbase)                      \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname,   \
+	.info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
+	.put = arizona_eq_coeff_put, .private_value =         \
+	((unsigned long)&(struct soc_bytes) { .base = xbase,  \
+	 .num_regs = 20, .mask = ~ARIZONA_EQ1_B1_MODE }) }
+
+#define ARIZONA_LHPF_CONTROL(xname, xbase)                    \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname,   \
+	.info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
+	.put = arizona_lhpf_coeff_put, .private_value =       \
+	((unsigned long)&(struct soc_bytes) { .base = xbase,  \
+	 .num_regs = 1 }) }
+
 #define ARIZONA_RATE_ENUM_SIZE 4
 extern const char *arizona_rate_text[ARIZONA_RATE_ENUM_SIZE];
 extern const int arizona_rate_val[ARIZONA_RATE_ENUM_SIZE];
@@ -229,6 +243,11 @@
 			 struct snd_kcontrol *kcontrol,
 			 int event);
 
+extern int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol);
+extern int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol);
+
 extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
 			      int source, unsigned int freq, int dir);
 
@@ -242,7 +261,6 @@
 	int id;
 	unsigned int base;
 	unsigned int vco_mult;
-	struct completion ok;
 
 	unsigned int fout;
 	int sync_src;
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 8f40025..44c30fe 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -74,33 +74,8 @@
 static bool cs35l32_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_DEVID_AB:
-	case CS35L32_DEVID_CD:
-	case CS35L32_DEVID_E:
-	case CS35L32_FAB_ID:
-	case CS35L32_REV_ID:
-	case CS35L32_PWRCTL1:
-	case CS35L32_PWRCTL2:
-	case CS35L32_CLK_CTL:
-	case CS35L32_BATT_THRESHOLD:
-	case CS35L32_VMON:
-	case CS35L32_BST_CPCP_CTL:
-	case CS35L32_IMON_SCALING:
-	case CS35L32_AUDIO_LED_MNGR:
-	case CS35L32_ADSP_CTL:
-	case CS35L32_CLASSD_CTL:
-	case CS35L32_PROTECT_CTL:
-	case CS35L32_INT_MASK_1:
-	case CS35L32_INT_MASK_2:
-	case CS35L32_INT_MASK_3:
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
-	case CS35L32_FLASH_MODE:
-	case CS35L32_MOVIE_MODE:
-	case CS35L32_FLASH_TIMER:
-	case CS35L32_FLASH_INHIBIT:
+	case CS35L32_DEVID_AB ... CS35L32_AUDIO_LED_MNGR:
+	case CS35L32_ADSP_CTL ... CS35L32_FLASH_INHIBIT:
 		return true;
 	default:
 		return false;
@@ -110,15 +85,8 @@
 static bool cs35l32_volatile_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_DEVID_AB:
-	case CS35L32_DEVID_CD:
-	case CS35L32_DEVID_E:
-	case CS35L32_FAB_ID:
-	case CS35L32_REV_ID:
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
+	case CS35L32_DEVID_AB ... CS35L32_REV_ID:
+	case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
 		return true;
 	default:
 		return false;
@@ -128,10 +96,7 @@
 static bool cs35l32_precious_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
+	case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
 		return true;
 	default:
 		return false;
@@ -276,7 +241,7 @@
 };
 
 /* Current and threshold powerup sequence Pg37 in datasheet */
-static const struct reg_default cs35l32_monitor_patch[] = {
+static const struct reg_sequence cs35l32_monitor_patch[] = {
 
 	{ 0x00, 0x99 },
 	{ 0x48, 0x17 },
@@ -441,8 +406,7 @@
 	if (IS_ERR(cs35l32->reset_gpio))
 		return PTR_ERR(cs35l32->reset_gpio);
 
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
 
 	/* initialize codec */
 	ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg);
@@ -536,8 +500,7 @@
 	snd_soc_unregister_codec(&i2c_client->dev);
 
 	/* Hold down reset */
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
 
 	return 0;
 }
@@ -551,8 +514,7 @@
 	regcache_mark_dirty(cs35l32->regmap);
 
 	/* Hold down reset */
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
 
 	/* remove power */
 	regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies),
@@ -575,8 +537,7 @@
 		return ret;
 	}
 
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
 
 	regcache_cache_only(cs35l32->regmap, false);
 	regcache_sync(cs35l32->regmap);
@@ -607,7 +568,6 @@
 static struct i2c_driver cs35l32_i2c_driver = {
 	.driver = {
 		   .name = "cs35l32",
-		   .owner = THIS_MODULE,
 		   .pm = &cs35l32_runtime_pm,
 		   .of_match_table = cs35l32_of_match,
 		   },
diff --git a/sound/soc/codecs/cs35l32.h b/sound/soc/codecs/cs35l32.h
index 31ab804..1d6c250 100644
--- a/sound/soc/codecs/cs35l32.h
+++ b/sound/soc/codecs/cs35l32.h
@@ -80,7 +80,7 @@
 #define CS35L32_GAIN_MGR_MASK		0x08
 #define CS35L32_ADSP_SHARE_MASK		0x08
 #define CS35L32_ADSP_DATACFG_MASK	0x30
-#define CS35L32_SDOUT_3ST		0x80
+#define CS35L32_SDOUT_3ST		0x08
 #define CS35L32_BATT_REC_MASK		0x0E
 #define CS35L32_BATT_THRESH_MASK	0x30
 
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 8e36198..55db19d 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -60,23 +60,7 @@
 static bool cs4265_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS4265_PWRCTL:
-	case CS4265_DAC_CTL:
-	case CS4265_ADC_CTL:
-	case CS4265_MCLK_FREQ:
-	case CS4265_SIG_SEL:
-	case CS4265_CHB_PGA_CTL:
-	case CS4265_CHA_PGA_CTL:
-	case CS4265_ADC_CTL2:
-	case CS4265_DAC_CHA_VOL:
-	case CS4265_DAC_CHB_VOL:
-	case CS4265_DAC_CTL2:
-	case CS4265_SPDIF_CTL1:
-	case CS4265_SPDIF_CTL2:
-	case CS4265_INT_MASK:
-	case CS4265_STATUS_MODE_MSB:
-	case CS4265_STATUS_MODE_LSB:
-	case CS4265_CHIP_ID:
+	case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
 		return true;
 	default:
 		return false;
@@ -658,7 +642,6 @@
 static struct i2c_driver cs4265_i2c_driver = {
 	.driver = {
 		.name = "cs4265",
-		.owner = THIS_MODULE,
 		.of_match_table = cs4265_of_match,
 	},
 	.id_table = cs4265_id,
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index e6d4ff9..e07807d 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -751,7 +751,6 @@
 static struct i2c_driver cs4270_i2c_driver = {
 	.driver = {
 		.name = "cs4270",
-		.owner = THIS_MODULE,
 		.of_match_table = cs4270_of_match,
 	},
 	.id_table = cs4270_id,
diff --git a/sound/soc/codecs/cs4271-i2c.c b/sound/soc/codecs/cs4271-i2c.c
index b264da0..dcb3223 100644
--- a/sound/soc/codecs/cs4271-i2c.c
+++ b/sound/soc/codecs/cs4271-i2c.c
@@ -48,7 +48,6 @@
 static struct i2c_driver cs4271_i2c_driver = {
 	.driver = {
 		.name = "cs4271",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(cs4271_dt_ids),
 	},
 	.probe = cs4271_i2c_probe,
diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
index c40428f..9bad478 100644
--- a/sound/soc/codecs/cs42l51-i2c.c
+++ b/sound/soc/codecs/cs42l51-i2c.c
@@ -45,7 +45,6 @@
 static struct i2c_driver cs42l51_i2c_driver = {
 	.driver = {
 		.name = "cs42l51",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l51_of_match,
 	},
 	.probe = cs42l51_i2c_probe,
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 4de52c9..47b97fc 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -110,58 +110,7 @@
 static bool cs42l52_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L52_CHIP:
-	case CS42L52_PWRCTL1:
-	case CS42L52_PWRCTL2:
-	case CS42L52_PWRCTL3:
-	case CS42L52_CLK_CTL:
-	case CS42L52_IFACE_CTL1:
-	case CS42L52_IFACE_CTL2:
-	case CS42L52_ADC_PGA_A:
-	case CS42L52_ADC_PGA_B:
-	case CS42L52_ANALOG_HPF_CTL:
-	case CS42L52_ADC_HPF_FREQ:
-	case CS42L52_ADC_MISC_CTL:
-	case CS42L52_PB_CTL1:
-	case CS42L52_MISC_CTL:
-	case CS42L52_PB_CTL2:
-	case CS42L52_MICA_CTL:
-	case CS42L52_MICB_CTL:
-	case CS42L52_PGAA_CTL:
-	case CS42L52_PGAB_CTL:
-	case CS42L52_PASSTHRUA_VOL:
-	case CS42L52_PASSTHRUB_VOL:
-	case CS42L52_ADCA_VOL:
-	case CS42L52_ADCB_VOL:
-	case CS42L52_ADCA_MIXER_VOL:
-	case CS42L52_ADCB_MIXER_VOL:
-	case CS42L52_PCMA_MIXER_VOL:
-	case CS42L52_PCMB_MIXER_VOL:
-	case CS42L52_BEEP_FREQ:
-	case CS42L52_BEEP_VOL:
-	case CS42L52_BEEP_TONE_CTL:
-	case CS42L52_TONE_CTL:
-	case CS42L52_MASTERA_VOL:
-	case CS42L52_MASTERB_VOL:
-	case CS42L52_HPA_VOL:
-	case CS42L52_HPB_VOL:
-	case CS42L52_SPKA_VOL:
-	case CS42L52_SPKB_VOL:
-	case CS42L52_ADC_PCM_MIXER:
-	case CS42L52_LIMITER_CTL1:
-	case CS42L52_LIMITER_CTL2:
-	case CS42L52_LIMITER_AT_RATE:
-	case CS42L52_ALC_CTL:
-	case CS42L52_ALC_RATE:
-	case CS42L52_ALC_THRESHOLD:
-	case CS42L52_NOISE_GATE_CTL:
-	case CS42L52_CLK_STATUS:
-	case CS42L52_BATT_COMPEN:
-	case CS42L52_BATT_LEVEL:
-	case CS42L52_SPK_STATUS:
-	case CS42L52_TEM_CTL:
-	case CS42L52_THE_FOLDBACK:
-	case CS42L52_CHARGE_PUMP:
+	case CS42L52_CHIP ... CS42L52_CHARGE_PUMP:
 		return true;
 	default:
 		return false;
@@ -196,11 +145,10 @@
 
 static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
 
-static const unsigned int limiter_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(limiter_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const char * const cs42l52_adca_text[] = {
 	"Input1A", "Input2A", "Input3A", "Input4A", "PGA Input Left"};
@@ -919,7 +867,7 @@
 			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_U20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE)
 
-static struct snd_soc_dai_ops cs42l52_ops = {
+static const struct snd_soc_dai_ops cs42l52_ops = {
 	.hw_params	= cs42l52_pcm_hw_params,
 	.digital_mute	= cs42l52_digital_mute,
 	.set_fmt	= cs42l52_set_fmt,
@@ -1118,7 +1066,7 @@
 };
 
 /* Current and threshold powerup sequence Pg37 */
-static const struct reg_default cs42l52_threshold_patch[] = {
+static const struct reg_sequence cs42l52_threshold_patch[] = {
 
 	{ 0x00, 0x99 },
 	{ 0x3E, 0xBA },
@@ -1285,7 +1233,6 @@
 static struct i2c_driver cs42l52_i2c_driver = {
 	.driver = {
 		.name = "cs42l52",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l52_of_match,
 	},
 	.id_table = cs42l52_id,
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 1e11ba4..7cd5f76 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -115,52 +115,7 @@
 static bool cs42l56_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L56_CHIP_ID_1:
-	case CS42L56_CHIP_ID_2:
-	case CS42L56_PWRCTL_1:
-	case CS42L56_PWRCTL_2:
-	case CS42L56_CLKCTL_1:
-	case CS42L56_CLKCTL_2:
-	case CS42L56_SERIAL_FMT:
-	case CS42L56_CLASSH_CTL:
-	case CS42L56_MISC_CTL:
-	case CS42L56_INT_STATUS:
-	case CS42L56_PLAYBACK_CTL:
-	case CS42L56_DSP_MUTE_CTL:
-	case CS42L56_ADCA_MIX_VOLUME:
-	case CS42L56_ADCB_MIX_VOLUME:
-	case CS42L56_PCMA_MIX_VOLUME:
-	case CS42L56_PCMB_MIX_VOLUME:
-	case CS42L56_ANAINPUT_ADV_VOLUME:
-	case CS42L56_DIGINPUT_ADV_VOLUME:
-	case CS42L56_MASTER_A_VOLUME:
-	case CS42L56_MASTER_B_VOLUME:
-	case CS42L56_BEEP_FREQ_ONTIME:
-	case CS42L56_BEEP_FREQ_OFFTIME:
-	case CS42L56_BEEP_TONE_CFG:
-	case CS42L56_TONE_CTL:
-	case CS42L56_CHAN_MIX_SWAP:
-	case CS42L56_AIN_REFCFG_ADC_MUX:
-	case CS42L56_HPF_CTL:
-	case CS42L56_MISC_ADC_CTL:
-	case CS42L56_GAIN_BIAS_CTL:
-	case CS42L56_PGAA_MUX_VOLUME:
-	case CS42L56_PGAB_MUX_VOLUME:
-	case CS42L56_ADCA_ATTENUATOR:
-	case CS42L56_ADCB_ATTENUATOR:
-	case CS42L56_ALC_EN_ATTACK_RATE:
-	case CS42L56_ALC_RELEASE_RATE:
-	case CS42L56_ALC_THRESHOLD:
-	case CS42L56_NOISE_GATE_CTL:
-	case CS42L56_ALC_LIM_SFT_ZC:
-	case CS42L56_AMUTE_HPLO_MUX:
-	case CS42L56_HPA_VOLUME:
-	case CS42L56_HPB_VOLUME:
-	case CS42L56_LOA_VOLUME:
-	case CS42L56_LOB_VOLUME:
-	case CS42L56_LIM_THRESHOLD_CTL:
-	case CS42L56_LIM_CTL_RELEASE_RATE:
-	case CS42L56_LIM_ATTACK_RATE:
+	case CS42L56_CHIP_ID_1 ... CS42L56_LIM_ATTACK_RATE:
 		return true;
 	default:
 		return false;
@@ -185,21 +140,18 @@
 static DECLARE_TLV_DB_SCALE(preamp_tlv, 0, 1000, 0);
 static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
 
-static const unsigned int ngnb_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(ngnb_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-8200, 600, 0),
-	2, 5, TLV_DB_SCALE_ITEM(-7600, 300, 0),
-};
-static const unsigned int ngb_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	2, 5, TLV_DB_SCALE_ITEM(-7600, 300, 0)
+);
+static const DECLARE_TLV_DB_RANGE(ngb_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-6400, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-4600, 300, 0),
-};
-static const unsigned int alc_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	3, 7, TLV_DB_SCALE_ITEM(-4600, 300, 0)
+);
+static const DECLARE_TLV_DB_RANGE(alc_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const char * const beep_config_text[] = {
 	"Off", "Single", "Multiple", "Continuous"
@@ -989,7 +941,7 @@
 			SNDRV_PCM_FMTBIT_S32_LE)
 
 
-static struct snd_soc_dai_ops cs42l56_ops = {
+static const struct snd_soc_dai_ops cs42l56_ops = {
 	.hw_params	= cs42l56_pcm_hw_params,
 	.digital_mute	= cs42l56_digital_mute,
 	.set_fmt	= cs42l56_set_dai_fmt,
@@ -1408,7 +1360,6 @@
 static struct i2c_driver cs42l56_i2c_driver = {
 	.driver = {
 		.name = "cs42l56",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l56_of_match,
 	},
 	.id_table = cs42l56_id,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index b7853b9..42a8fd4 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -153,111 +153,18 @@
 static bool cs42l73_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L73_DEVID_AB:
-	case CS42L73_DEVID_CD:
-	case CS42L73_DEVID_E:
-	case CS42L73_REVID:
-	case CS42L73_PWRCTL1:
-	case CS42L73_PWRCTL2:
-	case CS42L73_PWRCTL3:
-	case CS42L73_CPFCHC:
-	case CS42L73_OLMBMSDC:
-	case CS42L73_DMMCC:
-	case CS42L73_XSPC:
-	case CS42L73_XSPMMCC:
-	case CS42L73_ASPC:
-	case CS42L73_ASPMMCC:
-	case CS42L73_VSPC:
-	case CS42L73_VSPMMCC:
-	case CS42L73_VXSPFS:
-	case CS42L73_MIOPC:
-	case CS42L73_ADCIPC:
-	case CS42L73_MICAPREPGAAVOL:
-	case CS42L73_MICBPREPGABVOL:
-	case CS42L73_IPADVOL:
-	case CS42L73_IPBDVOL:
-	case CS42L73_PBDC:
-	case CS42L73_HLADVOL:
-	case CS42L73_HLBDVOL:
-	case CS42L73_SPKDVOL:
-	case CS42L73_ESLDVOL:
-	case CS42L73_HPAAVOL:
-	case CS42L73_HPBAVOL:
-	case CS42L73_LOAAVOL:
-	case CS42L73_LOBAVOL:
-	case CS42L73_STRINV:
-	case CS42L73_XSPINV:
-	case CS42L73_ASPINV:
-	case CS42L73_VSPINV:
-	case CS42L73_LIMARATEHL:
-	case CS42L73_LIMRRATEHL:
-	case CS42L73_LMAXHL:
-	case CS42L73_LIMARATESPK:
-	case CS42L73_LIMRRATESPK:
-	case CS42L73_LMAXSPK:
-	case CS42L73_LIMARATEESL:
-	case CS42L73_LIMRRATEESL:
-	case CS42L73_LMAXESL:
-	case CS42L73_ALCARATE:
-	case CS42L73_ALCRRATE:
-	case CS42L73_ALCMINMAX:
-	case CS42L73_NGCAB:
-	case CS42L73_ALCNGMC:
-	case CS42L73_MIXERCTL:
-	case CS42L73_HLAIPAA:
-	case CS42L73_HLBIPBA:
-	case CS42L73_HLAXSPAA:
-	case CS42L73_HLBXSPBA:
-	case CS42L73_HLAASPAA:
-	case CS42L73_HLBASPBA:
-	case CS42L73_HLAVSPMA:
-	case CS42L73_HLBVSPMA:
-	case CS42L73_XSPAIPAA:
-	case CS42L73_XSPBIPBA:
-	case CS42L73_XSPAXSPAA:
-	case CS42L73_XSPBXSPBA:
-	case CS42L73_XSPAASPAA:
-	case CS42L73_XSPAASPBA:
-	case CS42L73_XSPAVSPMA:
-	case CS42L73_XSPBVSPMA:
-	case CS42L73_ASPAIPAA:
-	case CS42L73_ASPBIPBA:
-	case CS42L73_ASPAXSPAA:
-	case CS42L73_ASPBXSPBA:
-	case CS42L73_ASPAASPAA:
-	case CS42L73_ASPBASPBA:
-	case CS42L73_ASPAVSPMA:
-	case CS42L73_ASPBVSPMA:
-	case CS42L73_VSPAIPAA:
-	case CS42L73_VSPBIPBA:
-	case CS42L73_VSPAXSPAA:
-	case CS42L73_VSPBXSPBA:
-	case CS42L73_VSPAASPAA:
-	case CS42L73_VSPBASPBA:
-	case CS42L73_VSPAVSPMA:
-	case CS42L73_VSPBVSPMA:
-	case CS42L73_MMIXCTL:
-	case CS42L73_SPKMIPMA:
-	case CS42L73_SPKMXSPA:
-	case CS42L73_SPKMASPA:
-	case CS42L73_SPKMVSPMA:
-	case CS42L73_ESLMIPMA:
-	case CS42L73_ESLMXSPA:
-	case CS42L73_ESLMASPA:
-	case CS42L73_ESLMVSPMA:
-	case CS42L73_IM1:
-	case CS42L73_IM2:
+	case CS42L73_DEVID_AB ... CS42L73_DEVID_E:
+	case CS42L73_REVID ... CS42L73_IM2:
 		return true;
 	default:
 		return false;
 	}
 }
 
-static const unsigned int hpaloa_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hpaloa_tlv,
 	0, 13, TLV_DB_SCALE_ITEM(-7600, 200, 0),
-	14, 75, TLV_DB_SCALE_ITEM(-4900, 100, 0),
-};
+	14, 75, TLV_DB_SCALE_ITEM(-4900, 100, 0)
+);
 
 static DECLARE_TLV_DB_SCALE(adc_boost_tlv, 0, 2500, 0);
 
@@ -267,11 +174,10 @@
 
 static DECLARE_TLV_DB_SCALE(micpga_tlv, -600, 50, 0);
 
-static const unsigned int limiter_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(limiter_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(attn_tlv, -6300, 100, 1);
 
@@ -1236,8 +1142,8 @@
 	struct snd_soc_codec *codec = dai->codec;
 	int id = dai->id;
 
-	return snd_soc_update_bits(codec, CS42L73_SPC(id),
-					0x7F, tristate << 7);
+	return snd_soc_update_bits(codec, CS42L73_SPC(id), CS42L73_SP_3ST,
+				   tristate << 7);
 }
 
 static const struct snd_pcm_hw_constraint_list constraints_12_24 = {
@@ -1491,7 +1397,6 @@
 static struct i2c_driver cs42l73_i2c_driver = {
 	.driver = {
 		   .name = "cs42l73",
-		   .owner = THIS_MODULE,
 		   .of_match_table = cs42l73_of_match,
 		   },
 	.id_table = cs42l73_id,
diff --git a/sound/soc/codecs/cs42xx8-i2c.c b/sound/soc/codecs/cs42xx8-i2c.c
index 657dce2..800c1d5 100644
--- a/sound/soc/codecs/cs42xx8-i2c.c
+++ b/sound/soc/codecs/cs42xx8-i2c.c
@@ -20,7 +20,7 @@
 static int cs42xx8_i2c_probe(struct i2c_client *i2c,
 			     const struct i2c_device_id *id)
 {
-	u32 ret = cs42xx8_probe(&i2c->dev,
+	int ret = cs42xx8_probe(&i2c->dev,
 			devm_regmap_init_i2c(i2c, &cs42xx8_regmap_config));
 	if (ret)
 		return ret;
@@ -49,8 +49,8 @@
 static struct i2c_driver cs42xx8_i2c_driver = {
 	.driver = {
 		.name = "cs42xx8",
-		.owner = THIS_MODULE,
 		.pm = &cs42xx8_pm,
+		.of_match_table = cs42xx8_of_match,
 	},
 	.probe = cs42xx8_i2c_probe,
 	.remove = cs42xx8_i2c_remove,
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index e1d4686..d562e1b 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -425,7 +425,7 @@
 };
 EXPORT_SYMBOL_GPL(cs42888_data);
 
-static const struct of_device_id cs42xx8_of_match[] = {
+const struct of_device_id cs42xx8_of_match[] = {
 	{ .compatible = "cirrus,cs42448", .data = &cs42448_data, },
 	{ .compatible = "cirrus,cs42888", .data = &cs42888_data, },
 	{ /* sentinel */ }
@@ -435,16 +435,24 @@
 
 int cs42xx8_probe(struct device *dev, struct regmap *regmap)
 {
-	const struct of_device_id *of_id = of_match_device(cs42xx8_of_match, dev);
+	const struct of_device_id *of_id;
 	struct cs42xx8_priv *cs42xx8;
 	int ret, val, i;
 
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		dev_err(dev, "failed to allocate regmap: %d\n", ret);
+		return ret;
+	}
+
 	cs42xx8 = devm_kzalloc(dev, sizeof(*cs42xx8), GFP_KERNEL);
 	if (cs42xx8 == NULL)
 		return -ENOMEM;
 
+	cs42xx8->regmap = regmap;
 	dev_set_drvdata(dev, cs42xx8);
 
+	of_id = of_match_device(cs42xx8_of_match, dev);
 	if (of_id)
 		cs42xx8->drvdata = of_id->data;
 
@@ -482,13 +490,6 @@
 	/* Make sure hardware reset done */
 	msleep(5);
 
-	cs42xx8->regmap = regmap;
-	if (IS_ERR(cs42xx8->regmap)) {
-		ret = PTR_ERR(cs42xx8->regmap);
-		dev_err(dev, "failed to allocate regmap: %d\n", ret);
-		goto err_enable;
-	}
-
 	/*
 	 * We haven't marked the chip revision as volatile due to
 	 * sharing a register with the right input volume; explicitly
diff --git a/sound/soc/codecs/cs42xx8.h b/sound/soc/codecs/cs42xx8.h
index b2c10e5..d36c61b 100644
--- a/sound/soc/codecs/cs42xx8.h
+++ b/sound/soc/codecs/cs42xx8.h
@@ -22,6 +22,7 @@
 extern const struct cs42xx8_driver_data cs42448_data;
 extern const struct cs42xx8_driver_data cs42888_data;
 extern const struct regmap_config cs42xx8_regmap_config;
+extern const struct of_device_id cs42xx8_of_match[];
 int cs42xx8_probe(struct device *dev, struct regmap *regmap);
 
 /* CS42888 register map */
diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
new file mode 100644
index 0000000..0ac8fc5
--- /dev/null
+++ b/sound/soc/codecs/cs4349.c
@@ -0,0 +1,392 @@
+/*
+ * cs4349.c  --  CS4349 ALSA Soc Audio driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Authors: Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include "cs4349.h"
+
+
+static const struct reg_default cs4349_reg_defaults[] = {
+	{ 2, 0x00 },	/* r02	- Mode Control */
+	{ 3, 0x09 },	/* r03	- Volume, Mixing and Inversion Control */
+	{ 4, 0x81 },	/* r04	- Mute Control */
+	{ 5, 0x00 },	/* r05	- Channel A Volume Control */
+	{ 6, 0x00 },	/* r06	- Channel B Volume Control */
+	{ 7, 0xB1 },	/* r07	- Ramp and Filter Control */
+	{ 8, 0x1C },	/* r08	- Misc. Control */
+};
+
+/* Private data for the CS4349 */
+struct  cs4349_private {
+	struct regmap			*regmap;
+	struct gpio_desc		*reset_gpio;
+	unsigned int			mode;
+	int				rate;
+};
+
+static bool cs4349_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS4349_CHIPID ... CS4349_MISC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool cs4349_writeable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS4349_MODE ...  CS4349_MISC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int cs4349_set_dai_fmt(struct snd_soc_dai *codec_dai,
+			      unsigned int format)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
+	unsigned int fmt;
+
+	fmt = format & SND_SOC_DAIFMT_FORMAT_MASK;
+
+	switch (fmt) {
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_LEFT_J:
+	case SND_SOC_DAIFMT_RIGHT_J:
+		cs4349->mode = format & SND_SOC_DAIFMT_FORMAT_MASK;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cs4349_pcm_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
+	int fmt, ret;
+
+	cs4349->rate = params_rate(params);
+
+	switch (cs4349->mode) {
+	case SND_SOC_DAIFMT_I2S:
+		fmt = DIF_I2S;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		fmt = DIF_LEFT_JST;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		switch (params_width(params)) {
+		case 16:
+			fmt = DIF_RGHT_JST16;
+			break;
+		case 24:
+			fmt = DIF_RGHT_JST24;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_update_bits(codec, CS4349_MODE, DIF_MASK,
+				  MODE_FORMAT(fmt));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int cs4349_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int reg;
+
+	reg = 0;
+	if (mute)
+		reg = MUTE_AB_MASK;
+
+	return snd_soc_update_bits(codec, CS4349_MUTE, MUTE_AB_MASK, reg);
+}
+
+static DECLARE_TLV_DB_SCALE(dig_tlv, -12750, 50, 0);
+
+static const char * const chan_mix_texts[] = {
+	"Mute", "MuteA", "MuteA SwapB", "MuteA MonoB", "SwapA MuteB",
+	"BothR", "Swap", "SwapA MonoB", "MuteB", "Normal", "BothL",
+	"MonoB", "MonoA MuteB", "MonoA", "MonoA SwapB", "Mono",
+	/*Normal == Channel A = Left, Channel B = Right*/
+};
+
+static const char * const fm_texts[] = {
+	"Auto", "Single", "Double", "Quad",
+};
+
+static const char * const deemph_texts[] = {
+	"None", "44.1k", "48k", "32k",
+};
+
+static const char * const softr_zeroc_texts[] = {
+	"Immediate", "Zero Cross", "Soft Ramp", "SR on ZC",
+};
+
+static int deemph_values[] = {
+	0, 4, 8, 12,
+};
+
+static int softr_zeroc_values[] = {
+	0, 64, 128, 192,
+};
+
+static const struct soc_enum chan_mix_enum =
+	SOC_ENUM_SINGLE(CS4349_VMI, 0,
+			ARRAY_SIZE(chan_mix_texts),
+			chan_mix_texts);
+
+static const struct soc_enum fm_mode_enum =
+	SOC_ENUM_SINGLE(CS4349_MODE, 0,
+			ARRAY_SIZE(fm_texts),
+			fm_texts);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(deemph_enum, CS4349_MODE, 0, DEM_MASK,
+				deemph_texts, deemph_values);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(softr_zeroc_enum, CS4349_RMPFLT, 0,
+				SR_ZC_MASK, softr_zeroc_texts,
+				softr_zeroc_values);
+
+static const struct snd_kcontrol_new cs4349_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("Master Playback Volume",
+			 CS4349_VOLA, CS4349_VOLB, 0, 0xFF, 1, dig_tlv),
+	SOC_ENUM("Functional Mode", fm_mode_enum),
+	SOC_ENUM("De-Emphasis Control", deemph_enum),
+	SOC_ENUM("Soft Ramp Zero Cross Control", softr_zeroc_enum),
+	SOC_ENUM("Channel Mixer", chan_mix_enum),
+	SOC_SINGLE("VolA = VolB Switch", CS4349_VMI, 7, 1, 0),
+	SOC_SINGLE("InvertA Switch", CS4349_VMI, 6, 1, 0),
+	SOC_SINGLE("InvertB Switch", CS4349_VMI, 5, 1, 0),
+	SOC_SINGLE("Auto-Mute Switch", CS4349_MUTE, 7, 1, 0),
+	SOC_SINGLE("MUTEC A = B Switch", CS4349_MUTE, 5, 1, 0),
+	SOC_SINGLE("Soft Ramp Up Switch", CS4349_RMPFLT, 5, 1, 0),
+	SOC_SINGLE("Soft Ramp Down Switch", CS4349_RMPFLT, 4, 1, 0),
+	SOC_SINGLE("Slow Roll Off Filter Switch", CS4349_RMPFLT, 2, 1, 0),
+	SOC_SINGLE("Freeze Switch", CS4349_MISC, 5, 1, 0),
+	SOC_SINGLE("Popguard Switch", CS4349_MISC, 4, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget cs4349_dapm_widgets[] = {
+	SND_SOC_DAPM_DAC("HiFi DAC", NULL, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_OUTPUT("OutputA"),
+	SND_SOC_DAPM_OUTPUT("OutputB"),
+};
+
+static const struct snd_soc_dapm_route cs4349_routes[] = {
+	{"DAC Playback", NULL, "OutputA"},
+	{"DAC Playback", NULL, "OutputB"},
+
+	{"OutputA", NULL, "HiFi DAC"},
+	{"OutputB", NULL, "HiFi DAC"},
+};
+
+#define CS4349_PCM_FORMATS (SNDRV_PCM_FMTBIT_S8  | \
+			SNDRV_PCM_FMTBIT_S16_LE  | SNDRV_PCM_FMTBIT_S16_BE  | \
+			SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE | \
+			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE | \
+			SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE | \
+			SNDRV_PCM_FMTBIT_S24_LE  | SNDRV_PCM_FMTBIT_S24_BE  | \
+			SNDRV_PCM_FMTBIT_S32_LE)
+
+#define CS4349_PCM_RATES SNDRV_PCM_RATE_8000_192000
+
+static const struct snd_soc_dai_ops cs4349_dai_ops = {
+	.hw_params	= cs4349_pcm_hw_params,
+	.set_fmt	= cs4349_set_dai_fmt,
+	.digital_mute	= cs4349_digital_mute,
+};
+
+static struct snd_soc_dai_driver cs4349_dai = {
+	.name = "cs4349_hifi",
+	.playback = {
+		.stream_name	= "DAC Playback",
+		.channels_min	= 1,
+		.channels_max	= 2,
+		.rates		= CS4349_PCM_RATES,
+		.formats	= CS4349_PCM_FORMATS,
+	},
+	.ops = &cs4349_dai_ops,
+	.symmetric_rates = 1,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_cs4349 = {
+	.controls		= cs4349_snd_controls,
+	.num_controls		= ARRAY_SIZE(cs4349_snd_controls),
+
+	.dapm_widgets		= cs4349_dapm_widgets,
+	.num_dapm_widgets	= ARRAY_SIZE(cs4349_dapm_widgets),
+	.dapm_routes		= cs4349_routes,
+	.num_dapm_routes	= ARRAY_SIZE(cs4349_routes),
+};
+
+static const struct regmap_config cs4349_regmap = {
+	.reg_bits		= 8,
+	.val_bits		= 8,
+
+	.max_register		= CS4349_MISC,
+	.reg_defaults		= cs4349_reg_defaults,
+	.num_reg_defaults	= ARRAY_SIZE(cs4349_reg_defaults),
+	.readable_reg		= cs4349_readable_register,
+	.writeable_reg		= cs4349_writeable_register,
+	.cache_type		= REGCACHE_RBTREE,
+};
+
+static int cs4349_i2c_probe(struct i2c_client *client,
+				      const struct i2c_device_id *id)
+{
+	struct cs4349_private *cs4349;
+	int ret;
+
+	cs4349 = devm_kzalloc(&client->dev, sizeof(*cs4349), GFP_KERNEL);
+	if (!cs4349)
+		return -ENOMEM;
+
+	cs4349->regmap = devm_regmap_init_i2c(client, &cs4349_regmap);
+	if (IS_ERR(cs4349->regmap)) {
+		ret = PTR_ERR(cs4349->regmap);
+		dev_err(&client->dev, "regmap_init() failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Reset the Device */
+	cs4349->reset_gpio = devm_gpiod_get_optional(&client->dev,
+		"reset", GPIOD_OUT_LOW);
+	if (IS_ERR(cs4349->reset_gpio))
+		return PTR_ERR(cs4349->reset_gpio);
+
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
+
+	i2c_set_clientdata(client, cs4349);
+
+	return snd_soc_register_codec(&client->dev, &soc_codec_dev_cs4349,
+		&cs4349_dai, 1);
+}
+
+static int cs4349_i2c_remove(struct i2c_client *client)
+{
+	struct cs4349_private *cs4349 = i2c_get_clientdata(client);
+
+	snd_soc_unregister_codec(&client->dev);
+
+	/* Hold down reset */
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int cs4349_runtime_suspend(struct device *dev)
+{
+	struct cs4349_private *cs4349 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, PWR_DWN);
+	if (ret < 0)
+		return ret;
+
+	regcache_cache_only(cs4349->regmap, true);
+
+	/* Hold down reset */
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
+
+	return 0;
+}
+
+static int cs4349_runtime_resume(struct device *dev)
+{
+	struct cs4349_private *cs4349 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, 0);
+	if (ret < 0)
+		return ret;
+
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
+
+	regcache_cache_only(cs4349->regmap, false);
+	regcache_sync(cs4349->regmap);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs4349_runtime_pm = {
+	SET_RUNTIME_PM_OPS(cs4349_runtime_suspend, cs4349_runtime_resume,
+			   NULL)
+};
+
+static const struct of_device_id cs4349_of_match[] = {
+	{ .compatible = "cirrus,cs4349", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, cs4349_of_match);
+
+static const struct i2c_device_id cs4349_i2c_id[] = {
+	{"cs4349", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs4349_i2c_id);
+
+static struct i2c_driver cs4349_i2c_driver = {
+	.driver = {
+		.name		= "cs4349",
+		.of_match_table	= cs4349_of_match,
+	},
+	.id_table	= cs4349_i2c_id,
+	.probe		= cs4349_i2c_probe,
+	.remove		= cs4349_i2c_remove,
+};
+
+module_i2c_driver(cs4349_i2c_driver);
+
+MODULE_AUTHOR("Tim Howe <tim.howe@cirrus.com>");
+MODULE_DESCRIPTION("Cirrus Logic CS4349 ALSA SoC Codec Driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4349.h b/sound/soc/codecs/cs4349.h
new file mode 100644
index 0000000..d58c06a
--- /dev/null
+++ b/sound/soc/codecs/cs4349.h
@@ -0,0 +1,136 @@
+/*
+ * ALSA SoC CS4349 codec driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Author: Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __CS4349_H__
+#define __CS4349_H__
+
+/* CS4349 registers addresses */
+#define CS4349_CHIPID		0x01	/* Device and Rev ID, Read Only */
+#define CS4349_MODE		0x02	/* Mode Control */
+#define CS4349_VMI		0x03	/* Volume, Mixing, Inversion Control */
+#define CS4349_MUTE		0x04	/* Mute Control */
+#define CS4349_VOLA		0x05	/* DAC Channel A Volume Control */
+#define CS4349_VOLB		0x06	/* DAC Channel B Volume Control */
+#define CS4349_RMPFLT		0x07	/* Ramp and Filter Control */
+#define CS4349_MISC		0x08	/* Power Down,Freeze Control,Pop Stop*/
+
+#define CS4349_I2C_INCR		0x80
+
+
+/* Device and Revision ID */
+#define CS4349_REVA		0xF0	/* Rev A */
+#define CS4349_REVB		0xF1	/* Rev B */
+#define CS4349_REVC2		0xFF	/* Rev C2 */
+
+
+/* PDN_DONE Poll Maximum
+ * If soft ramp is set it will take much longer to power down
+ * the system.
+ */
+#define PDN_POLL_MAX		900
+
+
+/* Bitfield Definitions */
+
+/* CS4349_MODE */
+/* (Digital Interface Format, De-Emphasis Control, Functional Mode */
+#define DIF2			(1 << 6)
+#define DIF1			(1 << 5)
+#define DIF0			(1 << 4)
+#define DEM1			(1 << 3)
+#define DEM0			(1 << 2)
+#define FM1			(1 << 1)
+#define DIF_LEFT_JST		0x00
+#define DIF_I2S			0x01
+#define DIF_RGHT_JST16		0x02
+#define DIF_RGHT_JST24		0x03
+#define DIF_TDM0		0x04
+#define DIF_TDM1		0x05
+#define DIF_TDM2		0x06
+#define DIF_TDM3		0x07
+#define DIF_MASK		0x70
+#define MODE_FORMAT(x)		(((x)&7)<<4)
+#define DEM_MASK		0x0C
+#define NO_DEM			0x00
+#define DEM_441			0x04
+#define DEM_48K			0x08
+#define DEM_32K			0x0C
+#define FM_AUTO			0x00
+#define FM_SNGL			0x01
+#define FM_DBL			0x02
+#define FM_QUAD			0x03
+#define FM_SNGL_MIN		30000
+#define FM_SNGL_MAX		54000
+#define FM_DBL_MAX		108000
+#define FM_QUAD_MAX		216000
+#define FM_MASK			0x03
+
+/* CS4349_VMI (VMI = Volume, Mixing and Inversion Controls) */
+#define VOLBISA			(1 << 7)
+#define VOLAISB			(1 << 7)
+/* INVERT_A only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
+#define INVERT_A		(1 << 6)
+/* INVERT_B only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
+#define INVERT_B		(1 << 5)
+#define ATAPI3			(1 << 3)
+#define ATAPI2			(1 << 2)
+#define ATAPI1			(1 << 1)
+#define ATAPI0			(1 << 0)
+#define MUTEAB			0x00
+#define MUTEA_RIGHTB		0x01
+#define MUTEA_LEFTB		0x02
+#define MUTEA_SUMLRDIV2B	0x03
+#define RIGHTA_MUTEB		0x04
+#define RIGHTA_RIGHTB		0x05
+#define RIGHTA_LEFTB		0x06
+#define RIGHTA_SUMLRDIV2B	0x07
+#define LEFTA_MUTEB		0x08
+#define LEFTA_RIGHTB		0x09	/* Default */
+#define LEFTA_LEFTB		0x0A
+#define LEFTA_SUMLRDIV2B	0x0B
+#define SUMLRDIV2A_MUTEB	0x0C
+#define SUMLRDIV2A_RIGHTB	0x0D
+#define SUMLRDIV2A_LEFTB	0x0E
+#define SUMLRDIV2_AB		0x0F
+#define CHMIX_MASK		0x0F
+
+/* CS4349_MUTE */
+#define AUTOMUTE		(1 << 7)
+#define MUTEC_AB		(1 << 5)
+#define MUTE_A			(1 << 4)
+#define MUTE_B			(1 << 3)
+#define MUTE_AB_MASK		0x18
+
+/* CS4349_RMPFLT (Ramp and Filter Control) */
+#define SCZ1			(1 << 7)
+#define SCZ0			(1 << 6)
+#define RMP_UP			(1 << 5)
+#define RMP_DN			(1 << 4)
+#define FILT_SEL		(1 << 2)
+#define IMMDT_CHNG		0x31
+#define ZEROCRSS		0x71
+#define SOFT_RMP		0xB1
+#define SFTRMP_ZEROCRSS		0xF1
+#define SR_ZC_MASK		0xC0
+
+/* CS4349_MISC */
+#define PWR_DWN			(1 << 7)
+#define FREEZE			(1 << 5)
+#define POPG_EN			(1 << 4)
+
+#endif	/* __CS4349_H__ */
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 21810e5..7dc52fe 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -267,33 +267,29 @@
  *
  * Reserved area are considered as "mute".
  */
-static const unsigned int hp_out_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hp_out_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -54 dB to +15 dB */
-	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0),
-};
+	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
+);
 
-static const unsigned int lineout_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(lineout_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -54dB to 15dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
-};
+);
 
-static const unsigned int mono_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(mono_vol_tlv,
 	0x0, 0x2, TLV_DB_SCALE_ITEM(-1800, 0, 1),
 	/* -18dB to 6dB */
 	0x3, 0x7, TLV_DB_SCALE_ITEM(-1800, 600, 0)
-};
+);
 
-static const unsigned int aux1_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux1_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -48dB to 21dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-4800, 150, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(eq_gain_tlv, -1050, 150, 0);
 static const DECLARE_TLV_DB_SCALE(adc_eq_master_gain_tlv, -1800, 600, 1);
@@ -680,7 +676,7 @@
 	int master;
 };
 
-static struct reg_default da7210_reg_defaults[] = {
+static const struct reg_default da7210_reg_defaults[] = {
 	{ 0x00, 0x00 },
 	{ 0x01, 0x11 },
 	{ 0x03, 0x00 },
@@ -1182,7 +1178,7 @@
 
 #if IS_ENABLED(CONFIG_I2C)
 
-static struct reg_default da7210_regmap_i2c_patch[] = {
+static const struct reg_sequence da7210_regmap_i2c_patch[] = {
 
 	/* System controller master disable */
 	{ DA7210_STARTUP1, 0x00 },
@@ -1259,7 +1255,6 @@
 static struct i2c_driver da7210_i2c_driver = {
 	.driver = {
 		.name = "da7210",
-		.owner = THIS_MODULE,
 	},
 	.probe		= da7210_i2c_probe,
 	.remove		= da7210_i2c_remove,
@@ -1269,7 +1264,7 @@
 
 #if defined(CONFIG_SPI_MASTER)
 
-static struct reg_default da7210_regmap_spi_patch[] = {
+static const struct reg_sequence da7210_regmap_spi_patch[] = {
 	/* Dummy read to give two pulses over nCS for SPI */
 	{ DA7210_AUX2, 0x00 },
 	{ DA7210_AUX2, 0x00 },
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 238e48a..a9c86ef 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -28,27 +28,24 @@
 
 
 /* Gain and Volume */
-static const unsigned int aux_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_vol_tlv,
 	/* -54dB */
 	0x0, 0x11, TLV_DB_SCALE_ITEM(-5400, 0, 0),
 	/* -52.5dB to 15dB */
 	0x12, 0x3f, TLV_DB_SCALE_ITEM(-5250, 150, 0)
-};
+);
 
-static const unsigned int digital_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(digital_gain_tlv,
 	0x0, 0x07, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -78dB to 12dB */
 	0x08, 0x7f, TLV_DB_SCALE_ITEM(-7800, 75, 0)
-};
+);
 
-static const unsigned int alc_analog_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(alc_analog_gain_tlv,
 	0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* 0dB to 36dB */
 	0x01, 0x07, TLV_DB_SCALE_ITEM(0, 600, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_gain_tlv, -450, 150, 0);
@@ -954,7 +951,7 @@
 	{"LINE", NULL, "Lineout PGA"},
 };
 
-static struct reg_default da7213_reg_defaults[] = {
+static const struct reg_default da7213_reg_defaults[] = {
 	{ DA7213_DIG_ROUTING_DAI, 0x10 },
 	{ DA7213_SR, 0x0A },
 	{ DA7213_REFERENCES, 0x80 },
@@ -1585,7 +1582,6 @@
 static struct i2c_driver da7213_i2c_driver = {
 	.driver = {
 		.name = "da7213",
-		.owner = THIS_MODULE,
 	},
 	.probe		= da7213_i2c_probe,
 	.remove		= da7213_remove,
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 2075236..1d5a89c 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -43,7 +43,7 @@
 /*
  * da732x register cache - default settings
  */
-static struct reg_default da732x_reg_cache[] = {
+static const struct reg_default da732x_reg_cache[] = {
 	{ DA732X_REG_REF1		, 0x02 },
 	{ DA732X_REG_BIAS_EN		, 0x80 },
 	{ DA732X_REG_BIAS1		, 0x00 },
@@ -1196,13 +1196,7 @@
 #define	DA732X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops da732x_dai1_ops = {
-	.hw_params	= da732x_hw_params,
-	.set_fmt	= da732x_set_dai_fmt,
-	.set_sysclk	= da732x_set_dai_sysclk,
-};
-
-static struct snd_soc_dai_ops da732x_dai2_ops = {
+static const struct snd_soc_dai_ops da732x_dai_ops = {
 	.hw_params	= da732x_hw_params,
 	.set_fmt	= da732x_set_dai_fmt,
 	.set_sysclk	= da732x_set_dai_sysclk,
@@ -1227,7 +1221,7 @@
 			.rates = DA732X_RATES,
 			.formats = DA732X_FORMATS,
 		},
-		.ops = &da732x_dai1_ops,
+		.ops = &da732x_dai_ops,
 	},
 	{
 		.name	= "DA732X_AIFB",
@@ -1247,7 +1241,7 @@
 			.rates = DA732X_RATES,
 			.formats = DA732X_FORMATS,
 		},
-		.ops = &da732x_dai2_ops,
+		.ops = &da732x_dai_ops,
 	},
 };
 
@@ -1572,7 +1566,6 @@
 static struct i2c_driver da732x_i2c_driver = {
 	.driver		= {
 		.name	= "da7320",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= da732x_i2c_probe,
 	.remove		= da732x_i2c_remove,
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 66bb446..0b2ede8 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -289,26 +289,23 @@
 
 /* Gain and Volume */
 
-static const unsigned int aux_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(-5400, 0, 0),
 	/* -54dB to 15dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
-};
+);
 
-static const unsigned int digital_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(digital_gain_tlv,
 	0x0, 0x07, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -78dB to 12dB */
 	0x08, 0x7f, TLV_DB_SCALE_ITEM(-7800, 75, 0)
-};
+);
 
-static const unsigned int alc_analog_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(alc_analog_gain_tlv,
 	0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* 0dB to 36dB */
 	0x01, 0x07, TLV_DB_SCALE_ITEM(0, 600, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_gain_tlv, -450, 150, 0);
@@ -948,7 +945,7 @@
 	struct da9055_platform_data *pdata;
 };
 
-static struct reg_default da9055_reg_defaults[] = {
+static const struct reg_default da9055_reg_defaults[] = {
 	{ 0x21, 0x10 },
 	{ 0x22, 0x0A },
 	{ 0x23, 0x00 },
@@ -1533,12 +1530,12 @@
 	{ .compatible = "dlg,da9055-codec", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, da9055_of_match);
 
 /* I2C codec control layer */
 static struct i2c_driver da9055_i2c_driver = {
 	.driver = {
 		.name = "da9055-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9055_of_match),
 	},
 	.probe		= da9055_i2c_probe,
diff --git a/sound/soc/codecs/gtm601.c b/sound/soc/codecs/gtm601.c
new file mode 100644
index 0000000..0b80052
--- /dev/null
+++ b/sound/soc/codecs/gtm601.c
@@ -0,0 +1,95 @@
+/*
+ * This is a simple driver for the GTM601 Voice PCM interface
+ *
+ * Copyright (C) 2015 Goldelico GmbH
+ *
+ * Author: Marek Belisko <marek@goldelico.com>
+ *
+ * Based on wm8727.c driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+static const struct snd_soc_dapm_widget gtm601_dapm_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("AOUT"),
+	SND_SOC_DAPM_INPUT("AIN"),
+};
+
+static const struct snd_soc_dapm_route gtm601_dapm_routes[] = {
+	{ "AOUT", NULL, "Playback" },
+	{ "Capture", NULL, "AIN" },
+};
+
+static struct snd_soc_dai_driver gtm601_dai = {
+	.name = "gtm601",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_gtm601 = {
+	.dapm_widgets = gtm601_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(gtm601_dapm_widgets),
+	.dapm_routes = gtm601_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(gtm601_dapm_routes),
+};
+
+static int gtm601_platform_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev,
+			&soc_codec_dev_gtm601, &gtm601_dai, 1);
+}
+
+static int gtm601_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id gtm601_codec_of_match[] = {
+	{ .compatible = "option,gtm601", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, gtm601_codec_of_match);
+#endif
+
+static struct platform_driver gtm601_codec_driver = {
+	.driver = {
+		.name = "gtm601",
+		.of_match_table = of_match_ptr(gtm601_codec_of_match),
+	},
+	.probe = gtm601_platform_probe,
+	.remove = gtm601_platform_remove,
+};
+
+module_platform_driver(gtm601_codec_driver);
+
+MODULE_DESCRIPTION("ASoC gtm601 driver");
+MODULE_AUTHOR("Marek Belisko <marek@goldelico.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gtm601");
diff --git a/sound/soc/codecs/ics43432.c b/sound/soc/codecs/ics43432.c
new file mode 100644
index 0000000..dd850b9
--- /dev/null
+++ b/sound/soc/codecs/ics43432.c
@@ -0,0 +1,76 @@
+/*
+ * I2S MEMS microphone driver for InvenSense ICS-43432
+ *
+ * - Non configurable.
+ * - I2S interface, 64 BCLs per frame, 32 bits per channel, 24 bit data
+ *
+ * Copyright (c) 2015 Axis Communications AB
+ *
+ * Licensed under GPL v2.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#define ICS43432_RATE_MIN 7190 /* Hz, from data sheet */
+#define ICS43432_RATE_MAX 52800  /* Hz, from data sheet */
+
+#define ICS43432_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32)
+
+static struct snd_soc_dai_driver ics43432_dai = {
+	.name = "ics43432-hifi",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min = ICS43432_RATE_MIN,
+		.rate_max = ICS43432_RATE_MAX,
+		.rates = SNDRV_PCM_RATE_CONTINUOUS,
+		.formats = ICS43432_FORMATS,
+	},
+};
+
+static struct snd_soc_codec_driver ics43432_codec_driver = {
+};
+
+static int ics43432_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev, &ics43432_codec_driver,
+			&ics43432_dai, 1);
+}
+
+static int ics43432_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ics43432_ids[] = {
+	{ .compatible = "invensense,ics43432", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ics43432_ids);
+#endif
+
+static struct platform_driver ics43432_driver = {
+	.driver = {
+		.name = "ics43432",
+		.of_match_table = of_match_ptr(ics43432_ids),
+	},
+	.probe = ics43432_probe,
+	.remove = ics43432_remove,
+};
+
+module_platform_driver(ics43432_driver);
+
+MODULE_DESCRIPTION("ASoC ICS43432 driver");
+MODULE_AUTHOR("Ricard Wanderlof <ricardw@axis.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index ebd90283c..be44837 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -33,7 +33,7 @@
 
 
 /* Register default values for ISABELLE driver. */
-static struct reg_default isabelle_reg_defs[] = {
+static const struct reg_default isabelle_reg_defs[] = {
 	{ 0, 0x00 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -1016,25 +1016,25 @@
 #define ISABELLE_FORMATS (SNDRV_PCM_FMTBIT_S20_3LE |\
 			SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops isabelle_hs_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_hs_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_hs_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_hf_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_hf_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_hf_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_line_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_line_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_line_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_ul_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_ul_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 };
@@ -1149,7 +1149,6 @@
 static struct i2c_driver isabelle_i2c_driver = {
 	.driver = {
 		.name = "isabelle",
-		.owner = THIS_MODULE,
 	},
 	.probe = isabelle_i2c_probe,
 	.remove = isabelle_i2c_remove,
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 9363fdb..1f5ab99 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -78,11 +78,10 @@
 	struct regmap *regmap;
 };
 
-static const unsigned int jz4740_mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(jz4740_mic_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(0, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(jz4740_out_tlv, 0, 200, 0);
 static const DECLARE_TLV_DB_SCALE(jz4740_in_tlv, -3450, 150, 0);
diff --git a/sound/soc/codecs/lm4857.c b/sound/soc/codecs/lm4857.c
index 99ffc49..558de10 100644
--- a/sound/soc/codecs/lm4857.c
+++ b/sound/soc/codecs/lm4857.c
@@ -142,7 +142,6 @@
 static struct i2c_driver lm4857_i2c_driver = {
 	.driver = {
 		.name = "lm4857",
-		.owner = THIS_MODULE,
 	},
 	.probe = lm4857_i2c_probe,
 	.id_table = lm4857_i2c_id,
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 6600aa0..9af5640 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -30,7 +30,7 @@
 #include <asm/div64.h>
 #include "lm49453.h"
 
-static struct reg_default lm49453_reg_defs[] = {
+static const struct reg_default lm49453_reg_defs[] = {
 	{ 0, 0x00 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -188,7 +188,6 @@
 /* codec private data */
 struct lm49453_priv {
 	struct regmap *regmap;
-	int fs_rate;
 };
 
 /* capture path controls */
@@ -1112,13 +1111,10 @@
 			     struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	struct lm49453_priv *lm49453 = snd_soc_codec_get_drvdata(codec);
 	u16 clk_div = 0;
 
-	lm49453->fs_rate = params_rate(params);
-
 	/* Setting DAC clock dividers based on substream sample rate. */
-	switch (lm49453->fs_rate) {
+	switch (params_rate(params)) {
 	case 8000:
 	case 16000:
 	case 32000:
@@ -1291,35 +1287,35 @@
 #define LM49453_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 			 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops lm49453_headset_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_headset_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_hp_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ls_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ha_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_ep_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_ep_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ep_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
@@ -1460,7 +1456,6 @@
 static struct i2c_driver lm49453_i2c_driver = {
 	.driver = {
 		.name = "lm49453",
-		.owner = THIS_MODULE,
 	},
 	.probe = lm49453_i2c_probe,
 	.remove = lm49453_i2c_remove,
diff --git a/sound/soc/codecs/max9768.c b/sound/soc/codecs/max9768.c
index e1c196a..5b82e26 100644
--- a/sound/soc/codecs/max9768.c
+++ b/sound/soc/codecs/max9768.c
@@ -35,7 +35,7 @@
 	u32 flags;
 };
 
-static struct reg_default max9768_default_regs[] = {
+static const struct reg_default max9768_default_regs[] = {
 	{ 0, 0 },
 	{ 3,  MAX9768_CTRL_FILTERLESS},
 };
@@ -43,8 +43,8 @@
 static int max9768_get_gpio(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
 	int val = gpio_get_value_cansleep(max9768->mute_gpio);
 
 	ucontrol->value.integer.value[0] = !val;
@@ -55,16 +55,15 @@
 static int max9768_set_gpio(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
 
 	gpio_set_value_cansleep(max9768->mute_gpio, !ucontrol->value.integer.value[0]);
 
 	return 0;
 }
 
-static const unsigned int volume_tlv[] = {
-	TLV_DB_RANGE_HEAD(43),
+static const DECLARE_TLV_DB_RANGE(volume_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-16150, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(-9280, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(-9030, 0, 0),
@@ -107,8 +106,8 @@
 	51, 57, TLV_DB_SCALE_ITEM(290, 50, 0),
 	58, 58, TLV_DB_SCALE_ITEM(650, 0, 0),
 	59, 62, TLV_DB_SCALE_ITEM(700, 60, 0),
-	63, 63, TLV_DB_SCALE_ITEM(950, 0, 0),
-};
+	63, 63, TLV_DB_SCALE_ITEM(950, 0, 0)
+);
 
 static const struct snd_kcontrol_new max9768_volume[] = {
 	SOC_SINGLE_TLV("Playback Volume", MAX9768_VOL, 0, 63, 0, volume_tlv),
@@ -130,19 +129,20 @@
 	{ "OUT-", NULL, "IN" },
 };
 
-static int max9768_probe(struct snd_soc_codec *codec)
+static int max9768_probe(struct snd_soc_component *component)
 {
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(component);
 	int ret;
 
 	if (max9768->flags & MAX9768_FLAG_CLASSIC_PWM) {
-		ret = snd_soc_write(codec, MAX9768_CTRL, MAX9768_CTRL_PWM);
+		ret = regmap_write(max9768->regmap, MAX9768_CTRL,
+			MAX9768_CTRL_PWM);
 		if (ret)
 			return ret;
 	}
 
 	if (gpio_is_valid(max9768->mute_gpio)) {
-		ret = snd_soc_add_codec_controls(codec, max9768_mute,
+		ret = snd_soc_add_component_controls(component, max9768_mute,
 				ARRAY_SIZE(max9768_mute));
 		if (ret)
 			return ret;
@@ -151,7 +151,7 @@
 	return 0;
 }
 
-static struct snd_soc_codec_driver max9768_codec_driver = {
+static struct snd_soc_component_driver max9768_component_driver = {
 	.probe = max9768_probe,
 	.controls = max9768_volume,
 	.num_controls = ARRAY_SIZE(max9768_volume),
@@ -183,11 +183,13 @@
 
 	if (pdata) {
 		/* Mute on powerup to avoid clicks */
-		err = gpio_request_one(pdata->mute_gpio, GPIOF_INIT_HIGH, "MAX9768 Mute");
+		err = devm_gpio_request_one(&client->dev, pdata->mute_gpio,
+				GPIOF_INIT_HIGH, "MAX9768 Mute");
 		max9768->mute_gpio = err ?: pdata->mute_gpio;
 
 		/* Activate chip by releasing shutdown, enables I2C */
-		err = gpio_request_one(pdata->shdn_gpio, GPIOF_INIT_HIGH, "MAX9768 Shutdown");
+		err = devm_gpio_request_one(&client->dev, pdata->shdn_gpio,
+				GPIOF_INIT_HIGH, "MAX9768 Shutdown");
 		max9768->shdn_gpio = err ?: pdata->shdn_gpio;
 
 		max9768->flags = pdata->flags;
@@ -199,38 +201,11 @@
 	i2c_set_clientdata(client, max9768);
 
 	max9768->regmap = devm_regmap_init_i2c(client, &max9768_i2c_regmap_config);
-	if (IS_ERR(max9768->regmap)) {
-		err = PTR_ERR(max9768->regmap);
-		goto err_gpio_free;
-	}
+	if (IS_ERR(max9768->regmap))
+		return PTR_ERR(max9768->regmap);
 
-	err = snd_soc_register_codec(&client->dev, &max9768_codec_driver, NULL, 0);
-	if (err)
-		goto err_gpio_free;
-
-	return 0;
-
- err_gpio_free:
-	if (gpio_is_valid(max9768->shdn_gpio))
-		gpio_free(max9768->shdn_gpio);
-	if (gpio_is_valid(max9768->mute_gpio))
-		gpio_free(max9768->mute_gpio);
-
-	return err;
-}
-
-static int max9768_i2c_remove(struct i2c_client *client)
-{
-	struct max9768 *max9768 = i2c_get_clientdata(client);
-
-	snd_soc_unregister_codec(&client->dev);
-
-	if (gpio_is_valid(max9768->shdn_gpio))
-		gpio_free(max9768->shdn_gpio);
-	if (gpio_is_valid(max9768->mute_gpio))
-		gpio_free(max9768->mute_gpio);
-
-	return 0;
+	return devm_snd_soc_register_component(&client->dev,
+		&max9768_component_driver, NULL, 0);
 }
 
 static const struct i2c_device_id max9768_i2c_id[] = {
@@ -242,10 +217,8 @@
 static struct i2c_driver max9768_i2c_driver = {
 	.driver = {
 		.name = "max9768",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9768_i2c_probe,
-	.remove = max9768_i2c_remove,
 	.id_table = max9768_i2c_id,
 };
 module_i2c_driver(max9768_i2c_driver);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index d0f4534..20dcc49 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -258,292 +258,36 @@
 	{ 0xc9, 0x00 }, /* C9 DAI2 biquad */
 };
 
-static struct {
-       int readable;
-       int writable;
-       int vol;
-} max98088_access[M98088_REG_CNT] = {
-       { 0xFF, 0xFF, 1 }, /* 00 IRQ status */
-       { 0xFF, 0x00, 1 }, /* 01 MIC status */
-       { 0xFF, 0x00, 1 }, /* 02 jack status */
-       { 0x1F, 0x1F, 1 }, /* 03 battery voltage */
-       { 0xFF, 0xFF, 0 }, /* 04 */
-       { 0xFF, 0xFF, 0 }, /* 05 */
-       { 0xFF, 0xFF, 0 }, /* 06 */
-       { 0xFF, 0xFF, 0 }, /* 07 */
-       { 0xFF, 0xFF, 0 }, /* 08 */
-       { 0xFF, 0xFF, 0 }, /* 09 */
-       { 0xFF, 0xFF, 0 }, /* 0A */
-       { 0xFF, 0xFF, 0 }, /* 0B */
-       { 0xFF, 0xFF, 0 }, /* 0C */
-       { 0xFF, 0xFF, 0 }, /* 0D */
-       { 0xFF, 0xFF, 0 }, /* 0E */
-       { 0xFF, 0xFF, 0 }, /* 0F interrupt enable */
-
-       { 0xFF, 0xFF, 0 }, /* 10 master clock */
-       { 0xFF, 0xFF, 0 }, /* 11 DAI1 clock mode */
-       { 0xFF, 0xFF, 0 }, /* 12 DAI1 clock control */
-       { 0xFF, 0xFF, 0 }, /* 13 DAI1 clock control */
-       { 0xFF, 0xFF, 0 }, /* 14 DAI1 format */
-       { 0xFF, 0xFF, 0 }, /* 15 DAI1 clock */
-       { 0xFF, 0xFF, 0 }, /* 16 DAI1 config */
-       { 0xFF, 0xFF, 0 }, /* 17 DAI1 TDM */
-       { 0xFF, 0xFF, 0 }, /* 18 DAI1 filters */
-       { 0xFF, 0xFF, 0 }, /* 19 DAI2 clock mode */
-       { 0xFF, 0xFF, 0 }, /* 1A DAI2 clock control */
-       { 0xFF, 0xFF, 0 }, /* 1B DAI2 clock control */
-       { 0xFF, 0xFF, 0 }, /* 1C DAI2 format */
-       { 0xFF, 0xFF, 0 }, /* 1D DAI2 clock */
-       { 0xFF, 0xFF, 0 }, /* 1E DAI2 config */
-       { 0xFF, 0xFF, 0 }, /* 1F DAI2 TDM */
-
-       { 0xFF, 0xFF, 0 }, /* 20 DAI2 filters */
-       { 0xFF, 0xFF, 0 }, /* 21 data config */
-       { 0xFF, 0xFF, 0 }, /* 22 DAC mixer */
-       { 0xFF, 0xFF, 0 }, /* 23 left ADC mixer */
-       { 0xFF, 0xFF, 0 }, /* 24 right ADC mixer */
-       { 0xFF, 0xFF, 0 }, /* 25 left HP mixer */
-       { 0xFF, 0xFF, 0 }, /* 26 right HP mixer */
-       { 0xFF, 0xFF, 0 }, /* 27 HP control */
-       { 0xFF, 0xFF, 0 }, /* 28 left REC mixer */
-       { 0xFF, 0xFF, 0 }, /* 29 right REC mixer */
-       { 0xFF, 0xFF, 0 }, /* 2A REC control */
-       { 0xFF, 0xFF, 0 }, /* 2B left SPK mixer */
-       { 0xFF, 0xFF, 0 }, /* 2C right SPK mixer */
-       { 0xFF, 0xFF, 0 }, /* 2D SPK control */
-       { 0xFF, 0xFF, 0 }, /* 2E sidetone */
-       { 0xFF, 0xFF, 0 }, /* 2F DAI1 playback level */
-
-       { 0xFF, 0xFF, 0 }, /* 30 DAI1 playback level */
-       { 0xFF, 0xFF, 0 }, /* 31 DAI2 playback level */
-       { 0xFF, 0xFF, 0 }, /* 32 DAI2 playbakc level */
-       { 0xFF, 0xFF, 0 }, /* 33 left ADC level */
-       { 0xFF, 0xFF, 0 }, /* 34 right ADC level */
-       { 0xFF, 0xFF, 0 }, /* 35 MIC1 level */
-       { 0xFF, 0xFF, 0 }, /* 36 MIC2 level */
-       { 0xFF, 0xFF, 0 }, /* 37 INA level */
-       { 0xFF, 0xFF, 0 }, /* 38 INB level */
-       { 0xFF, 0xFF, 0 }, /* 39 left HP volume */
-       { 0xFF, 0xFF, 0 }, /* 3A right HP volume */
-       { 0xFF, 0xFF, 0 }, /* 3B left REC volume */
-       { 0xFF, 0xFF, 0 }, /* 3C right REC volume */
-       { 0xFF, 0xFF, 0 }, /* 3D left SPK volume */
-       { 0xFF, 0xFF, 0 }, /* 3E right SPK volume */
-       { 0xFF, 0xFF, 0 }, /* 3F MIC config */
-
-       { 0xFF, 0xFF, 0 }, /* 40 MIC threshold */
-       { 0xFF, 0xFF, 0 }, /* 41 excursion limiter filter */
-       { 0xFF, 0xFF, 0 }, /* 42 excursion limiter threshold */
-       { 0xFF, 0xFF, 0 }, /* 43 ALC */
-       { 0xFF, 0xFF, 0 }, /* 44 power limiter threshold */
-       { 0xFF, 0xFF, 0 }, /* 45 power limiter config */
-       { 0xFF, 0xFF, 0 }, /* 46 distortion limiter config */
-       { 0xFF, 0xFF, 0 }, /* 47 audio input */
-       { 0xFF, 0xFF, 0 }, /* 48 microphone */
-       { 0xFF, 0xFF, 0 }, /* 49 level control */
-       { 0xFF, 0xFF, 0 }, /* 4A bypass switches */
-       { 0xFF, 0xFF, 0 }, /* 4B jack detect */
-       { 0xFF, 0xFF, 0 }, /* 4C input enable */
-       { 0xFF, 0xFF, 0 }, /* 4D output enable */
-       { 0xFF, 0xFF, 0 }, /* 4E bias control */
-       { 0xFF, 0xFF, 0 }, /* 4F DAC power */
-
-       { 0xFF, 0xFF, 0 }, /* 50 DAC power */
-       { 0xFF, 0xFF, 0 }, /* 51 system */
-       { 0xFF, 0xFF, 0 }, /* 52 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 53 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 54 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 55 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 56 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 57 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 58 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 59 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5A DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5B DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5C DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5D DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5E DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5F DAI1 EQ2 */
-
-       { 0xFF, 0xFF, 0 }, /* 60 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 61 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 62 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 63 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 64 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 65 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 66 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 67 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 68 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 69 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6A DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6B DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6C DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6D DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6E DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6F DAI1 EQ3 */
-
-       { 0xFF, 0xFF, 0 }, /* 70 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 71 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 72 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 73 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 74 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 75 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 76 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 77 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 78 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 79 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 7A DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7B DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7C DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7D DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7E DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7F DAI1 EQ5 */
-
-       { 0xFF, 0xFF, 0 }, /* 80 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 81 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 82 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 83 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 84 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 85 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 86 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 87 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 88 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 89 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8A DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8B DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8C DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8D DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8E DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 8F DAI2 EQ2 */
-
-       { 0xFF, 0xFF, 0 }, /* 90 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 91 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 92 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 93 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 94 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 95 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 96 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 97 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 98 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 99 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9A DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9B DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9C DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9D DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9E DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9F DAI2 EQ3 */
-
-       { 0xFF, 0xFF, 0 }, /* A0 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* A1 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* A2 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A3 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A4 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A5 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A6 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A7 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A8 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A9 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AA DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AB DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AC DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AD DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AE DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AF DAI2 EQ5 */
-
-       { 0xFF, 0xFF, 0 }, /* B0 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B1 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B2 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B3 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B4 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B5 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B6 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B7 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B8 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B9 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BA DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BB DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BC DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BD DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BE DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BF DAI1 biquad */
-
-       { 0xFF, 0xFF, 0 }, /* C0 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C1 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C2 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C3 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C4 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C5 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C6 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C7 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C8 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C9 DAI2 biquad */
-       { 0x00, 0x00, 0 }, /* CA */
-       { 0x00, 0x00, 0 }, /* CB */
-       { 0x00, 0x00, 0 }, /* CC */
-       { 0x00, 0x00, 0 }, /* CD */
-       { 0x00, 0x00, 0 }, /* CE */
-       { 0x00, 0x00, 0 }, /* CF */
-
-       { 0x00, 0x00, 0 }, /* D0 */
-       { 0x00, 0x00, 0 }, /* D1 */
-       { 0x00, 0x00, 0 }, /* D2 */
-       { 0x00, 0x00, 0 }, /* D3 */
-       { 0x00, 0x00, 0 }, /* D4 */
-       { 0x00, 0x00, 0 }, /* D5 */
-       { 0x00, 0x00, 0 }, /* D6 */
-       { 0x00, 0x00, 0 }, /* D7 */
-       { 0x00, 0x00, 0 }, /* D8 */
-       { 0x00, 0x00, 0 }, /* D9 */
-       { 0x00, 0x00, 0 }, /* DA */
-       { 0x00, 0x00, 0 }, /* DB */
-       { 0x00, 0x00, 0 }, /* DC */
-       { 0x00, 0x00, 0 }, /* DD */
-       { 0x00, 0x00, 0 }, /* DE */
-       { 0x00, 0x00, 0 }, /* DF */
-
-       { 0x00, 0x00, 0 }, /* E0 */
-       { 0x00, 0x00, 0 }, /* E1 */
-       { 0x00, 0x00, 0 }, /* E2 */
-       { 0x00, 0x00, 0 }, /* E3 */
-       { 0x00, 0x00, 0 }, /* E4 */
-       { 0x00, 0x00, 0 }, /* E5 */
-       { 0x00, 0x00, 0 }, /* E6 */
-       { 0x00, 0x00, 0 }, /* E7 */
-       { 0x00, 0x00, 0 }, /* E8 */
-       { 0x00, 0x00, 0 }, /* E9 */
-       { 0x00, 0x00, 0 }, /* EA */
-       { 0x00, 0x00, 0 }, /* EB */
-       { 0x00, 0x00, 0 }, /* EC */
-       { 0x00, 0x00, 0 }, /* ED */
-       { 0x00, 0x00, 0 }, /* EE */
-       { 0x00, 0x00, 0 }, /* EF */
-
-       { 0x00, 0x00, 0 }, /* F0 */
-       { 0x00, 0x00, 0 }, /* F1 */
-       { 0x00, 0x00, 0 }, /* F2 */
-       { 0x00, 0x00, 0 }, /* F3 */
-       { 0x00, 0x00, 0 }, /* F4 */
-       { 0x00, 0x00, 0 }, /* F5 */
-       { 0x00, 0x00, 0 }, /* F6 */
-       { 0x00, 0x00, 0 }, /* F7 */
-       { 0x00, 0x00, 0 }, /* F8 */
-       { 0x00, 0x00, 0 }, /* F9 */
-       { 0x00, 0x00, 0 }, /* FA */
-       { 0x00, 0x00, 0 }, /* FB */
-       { 0x00, 0x00, 0 }, /* FC */
-       { 0x00, 0x00, 0 }, /* FD */
-       { 0x00, 0x00, 0 }, /* FE */
-       { 0xFF, 0x00, 1 }, /* FF */
-};
-
 static bool max98088_readable_register(struct device *dev, unsigned int reg)
 {
-       return max98088_access[reg].readable;
+	switch (reg) {
+	case M98088_REG_00_IRQ_STATUS ... 0xC9:
+	case M98088_REG_FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool max98088_writeable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case M98088_REG_03_BATTERY_VOLTAGE ... 0xC9:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static bool max98088_volatile_register(struct device *dev, unsigned int reg)
 {
-       return max98088_access[reg].vol;
+	switch (reg) {
+	case M98088_REG_00_IRQ_STATUS ... M98088_REG_03_BATTERY_VOLTAGE:
+	case M98088_REG_FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static const struct regmap_config max98088_regmap = {
@@ -551,6 +295,7 @@
 	.val_bits = 8,
 
 	.readable_reg = max98088_readable_register,
+	.writeable_reg = max98088_writeable_register,
 	.volatile_reg = max98088_volatile_register,
 	.max_register = 0xff,
 
@@ -680,29 +425,26 @@
        return 0;
 }
 
-static const unsigned int max98088_micboost_tlv[] = {
-       TLV_DB_RANGE_HEAD(2),
-       0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-       2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+static const DECLARE_TLV_DB_RANGE(max98088_micboost_tlv,
+	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
-static const unsigned int max98088_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98088_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98088_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98088_spk_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
 static const struct snd_kcontrol_new max98088_snd_controls[] = {
 
@@ -2011,7 +1753,6 @@
 static struct i2c_driver max98088_i2c_driver = {
 	.driver = {
 		.name = "max98088",
-		.owner = THIS_MODULE,
 	},
 	.probe  = max98088_i2c_probe,
 	.remove = max98088_i2c_remove,
diff --git a/sound/soc/codecs/max98088.h b/sound/soc/codecs/max98088.h
index be89a4f..efa39bf 100644
--- a/sound/soc/codecs/max98088.h
+++ b/sound/soc/codecs/max98088.h
@@ -16,7 +16,7 @@
  */
 #define M98088_REG_00_IRQ_STATUS            0x00
 #define M98088_REG_01_MIC_STATUS            0x01
-#define M98088_REG_02_JACK_STAUS            0x02
+#define M98088_REG_02_JACK_STATUS           0x02
 #define M98088_REG_03_BATTERY_VOLTAGE       0x03
 #define M98088_REG_0F_IRQ_ENABLE            0x0F
 #define M98088_REG_10_SYS_CLK               0x10
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 78268f05..584aab8 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -267,75 +267,8 @@
 static bool max98090_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case M98090_REG_DEVICE_STATUS:
-	case M98090_REG_JACK_STATUS:
-	case M98090_REG_INTERRUPT_S:
-	case M98090_REG_RESERVED:
-	case M98090_REG_LINE_INPUT_CONFIG:
-	case M98090_REG_LINE_INPUT_LEVEL:
-	case M98090_REG_INPUT_MODE:
-	case M98090_REG_MIC1_INPUT_LEVEL:
-	case M98090_REG_MIC2_INPUT_LEVEL:
-	case M98090_REG_MIC_BIAS_VOLTAGE:
-	case M98090_REG_DIGITAL_MIC_ENABLE:
-	case M98090_REG_DIGITAL_MIC_CONFIG:
-	case M98090_REG_LEFT_ADC_MIXER:
-	case M98090_REG_RIGHT_ADC_MIXER:
-	case M98090_REG_LEFT_ADC_LEVEL:
-	case M98090_REG_RIGHT_ADC_LEVEL:
-	case M98090_REG_ADC_BIQUAD_LEVEL:
-	case M98090_REG_ADC_SIDETONE:
-	case M98090_REG_SYSTEM_CLOCK:
-	case M98090_REG_CLOCK_MODE:
-	case M98090_REG_CLOCK_RATIO_NI_MSB:
-	case M98090_REG_CLOCK_RATIO_NI_LSB:
-	case M98090_REG_CLOCK_RATIO_MI_MSB:
-	case M98090_REG_CLOCK_RATIO_MI_LSB:
-	case M98090_REG_MASTER_MODE:
-	case M98090_REG_INTERFACE_FORMAT:
-	case M98090_REG_TDM_CONTROL:
-	case M98090_REG_TDM_FORMAT:
-	case M98090_REG_IO_CONFIGURATION:
-	case M98090_REG_FILTER_CONFIG:
-	case M98090_REG_DAI_PLAYBACK_LEVEL:
-	case M98090_REG_DAI_PLAYBACK_LEVEL_EQ:
-	case M98090_REG_LEFT_HP_MIXER:
-	case M98090_REG_RIGHT_HP_MIXER:
-	case M98090_REG_HP_CONTROL:
-	case M98090_REG_LEFT_HP_VOLUME:
-	case M98090_REG_RIGHT_HP_VOLUME:
-	case M98090_REG_LEFT_SPK_MIXER:
-	case M98090_REG_RIGHT_SPK_MIXER:
-	case M98090_REG_SPK_CONTROL:
-	case M98090_REG_LEFT_SPK_VOLUME:
-	case M98090_REG_RIGHT_SPK_VOLUME:
-	case M98090_REG_DRC_TIMING:
-	case M98090_REG_DRC_COMPRESSOR:
-	case M98090_REG_DRC_EXPANDER:
-	case M98090_REG_DRC_GAIN:
-	case M98090_REG_RCV_LOUTL_MIXER:
-	case M98090_REG_RCV_LOUTL_CONTROL:
-	case M98090_REG_RCV_LOUTL_VOLUME:
-	case M98090_REG_LOUTR_MIXER:
-	case M98090_REG_LOUTR_CONTROL:
-	case M98090_REG_LOUTR_VOLUME:
-	case M98090_REG_JACK_DETECT:
-	case M98090_REG_INPUT_ENABLE:
-	case M98090_REG_OUTPUT_ENABLE:
-	case M98090_REG_LEVEL_CONTROL:
-	case M98090_REG_DSP_FILTER_ENABLE:
-	case M98090_REG_BIAS_CONTROL:
-	case M98090_REG_DAC_CONTROL:
-	case M98090_REG_ADC_CONTROL:
-	case M98090_REG_DEVICE_SHUTDOWN:
-	case M98090_REG_EQUALIZER_BASE ... M98090_REG_EQUALIZER_BASE + 0x68:
-	case M98090_REG_RECORD_BIQUAD_BASE ... M98090_REG_RECORD_BIQUAD_BASE + 0x0E:
-	case M98090_REG_DMIC3_VOLUME:
-	case M98090_REG_DMIC4_VOLUME:
-	case M98090_REG_DMIC34_BQ_PREATTEN:
-	case M98090_REG_RECORD_TDM_SLOT:
-	case M98090_REG_SAMPLE_RATE:
-	case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
+	case M98090_REG_DEVICE_STATUS ... M98090_REG_INTERRUPT_S:
+	case M98090_REG_LINE_INPUT_CONFIG ... 0xD1:
 	case M98090_REG_REVISION_ID:
 		return true;
 	default:
@@ -360,22 +293,20 @@
 	return ret;
 }
 
-static const unsigned int max98090_micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_micboost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98090_mic_tlv, 0, 100, 0);
 
 static const DECLARE_TLV_DB_SCALE(max98090_line_single_ended_tlv,
 	-600, 600, 0);
 
-static const unsigned int max98090_line_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_line_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-600, 300, 0),
-	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0),
-};
+	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98090_avg_tlv, 0, 600, 0);
 static const DECLARE_TLV_DB_SCALE(max98090_av_tlv, -1200, 100, 0);
@@ -391,38 +322,34 @@
 static const DECLARE_TLV_DB_SCALE(max98090_drcexp_tlv, -6600, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98090_sdg_tlv, 50, 200, 0);
 
-static const unsigned int max98090_mixout_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_mixout_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-1200, 250, 0),
-	2, 3, TLV_DB_SCALE_ITEM(-600, 600, 0),
-};
+	2, 3, TLV_DB_SCALE_ITEM(-600, 600, 0)
+);
 
-static const unsigned int max98090_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98090_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_spk_tlv,
 	0, 4, TLV_DB_SCALE_ITEM(-4800, 400, 0),
 	5, 10, TLV_DB_SCALE_ITEM(-2900, 300, 0),
 	11, 14, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	15, 29, TLV_DB_SCALE_ITEM(-500, 100, 0),
-	30, 39, TLV_DB_SCALE_ITEM(950, 50, 0),
-};
+	30, 39, TLV_DB_SCALE_ITEM(950, 50, 0)
+);
 
-static const unsigned int max98090_rcv_lout_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_rcv_lout_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
 static int max98090_get_enab_tlv(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
@@ -850,6 +777,19 @@
 	return 0;
 }
 
+static int max98090_shdn_event(struct snd_soc_dapm_widget *w,
+				 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+
+	if (event & SND_SOC_DAPM_POST_PMU)
+		max98090->shdn_pending = true;
+
+	return 0;
+
+}
+
 static const char *mic1_mux_text[] = { "IN12", "IN56" };
 
 static SOC_ENUM_SINGLE_DECL(mic1_mux_enum,
@@ -1158,9 +1098,11 @@
 	SND_SOC_DAPM_SUPPLY("SDOEN", M98090_REG_IO_CONFIGURATION,
 		M98090_SDOEN_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("DMICL_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
-		 M98090_DIGMICL_SHIFT, 0, NULL, 0),
+		 M98090_DIGMICL_SHIFT, 0, max98090_shdn_event,
+			SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("DMICR_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
-		 M98090_DIGMICR_SHIFT, 0, NULL, 0),
+		 M98090_DIGMICR_SHIFT, 0, max98090_shdn_event,
+			 SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("AHPF", M98090_REG_FILTER_CONFIG,
 		M98090_AHPF_SHIFT, 0, NULL, 0),
 
@@ -1205,10 +1147,12 @@
 		&max98090_right_adc_mixer_controls[0],
 		ARRAY_SIZE(max98090_right_adc_mixer_controls)),
 
-	SND_SOC_DAPM_ADC("ADCL", NULL, M98090_REG_INPUT_ENABLE,
-		M98090_ADLEN_SHIFT, 0),
-	SND_SOC_DAPM_ADC("ADCR", NULL, M98090_REG_INPUT_ENABLE,
-		M98090_ADREN_SHIFT, 0),
+	SND_SOC_DAPM_ADC_E("ADCL", NULL, M98090_REG_INPUT_ENABLE,
+		M98090_ADLEN_SHIFT, 0, max98090_shdn_event,
+		SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_ADC_E("ADCR", NULL, M98090_REG_INPUT_ENABLE,
+		M98090_ADREN_SHIFT, 0, max98090_shdn_event,
+		SND_SOC_DAPM_POST_PMU),
 
 	SND_SOC_DAPM_AIF_OUT("AIFOUTL", "HiFi Capture", 0,
 		SND_SOC_NOPM, 0, 0),
@@ -1801,10 +1745,13 @@
 		if (IS_ERR(max98090->mclk))
 			break;
 
-		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON)
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
 			clk_disable_unprepare(max98090->mclk);
-		else
-			clk_prepare_enable(max98090->mclk);
+		} else {
+			ret = clk_prepare_enable(max98090->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
@@ -2383,7 +2330,7 @@
 #define MAX98090_RATES SNDRV_PCM_RATE_8000_96000
 #define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 
-static struct snd_soc_dai_ops max98090_dai_ops = {
+static const struct snd_soc_dai_ops max98090_dai_ops = {
 	.set_sysclk = max98090_dai_set_sysclk,
 	.set_fmt = max98090_dai_set_fmt,
 	.set_tdm_slot = max98090_set_tdm_slot,
@@ -2536,9 +2483,26 @@
 	return 0;
 }
 
+static void max98090_seq_notifier(struct snd_soc_dapm_context *dapm,
+	enum snd_soc_dapm_type event, int subseq)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+	struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+
+	if (max98090->shdn_pending) {
+		snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+				M98090_SHDNN_MASK, 0);
+		msleep(40);
+		snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+				M98090_SHDNN_MASK, M98090_SHDNN_MASK);
+		max98090->shdn_pending = false;
+	}
+}
+
 static struct snd_soc_codec_driver soc_codec_dev_max98090 = {
 	.probe   = max98090_probe,
 	.remove  = max98090_remove,
+	.seq_notifier = max98090_seq_notifier,
 	.set_bias_level = max98090_set_bias_level,
 };
 
@@ -2714,7 +2678,6 @@
 static struct i2c_driver max98090_i2c_driver = {
 	.driver = {
 		.name = "max98090",
-		.owner = THIS_MODULE,
 		.pm = &max98090_pm,
 		.of_match_table = of_match_ptr(max98090_of_match),
 		.acpi_match_table = ACPI_PTR(max98090_acpi_match),
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index 21ff743..bc610d9 100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
@@ -1543,6 +1543,7 @@
 	unsigned int pa2en;
 	unsigned int sidetone;
 	bool master;
+	bool shdn_pending;
 };
 
 int max98090_mic_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 9a46d3d..1fedac5 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -202,300 +202,36 @@
 	{ 0xff, 0x00 }, /* FF */
 };
 
-static struct {
-	int readable;
-	int writable;
-} max98095_access[M98095_REG_CNT] = {
-	{ 0x00, 0x00 }, /* 00 */
-	{ 0xFF, 0x00 }, /* 01 */
-	{ 0xFF, 0x00 }, /* 02 */
-	{ 0xFF, 0x00 }, /* 03 */
-	{ 0xFF, 0x00 }, /* 04 */
-	{ 0xFF, 0x00 }, /* 05 */
-	{ 0xFF, 0x00 }, /* 06 */
-	{ 0xFF, 0x00 }, /* 07 */
-	{ 0xFF, 0x00 }, /* 08 */
-	{ 0xFF, 0x00 }, /* 09 */
-	{ 0xFF, 0x00 }, /* 0A */
-	{ 0xFF, 0x00 }, /* 0B */
-	{ 0xFF, 0x00 }, /* 0C */
-	{ 0xFF, 0x00 }, /* 0D */
-	{ 0xFF, 0x00 }, /* 0E */
-	{ 0xFF, 0x9F }, /* 0F */
-	{ 0xFF, 0xFF }, /* 10 */
-	{ 0xFF, 0xFF }, /* 11 */
-	{ 0xFF, 0xFF }, /* 12 */
-	{ 0xFF, 0xFF }, /* 13 */
-	{ 0xFF, 0xFF }, /* 14 */
-	{ 0xFF, 0xFF }, /* 15 */
-	{ 0xFF, 0xFF }, /* 16 */
-	{ 0xFF, 0xFF }, /* 17 */
-	{ 0xFF, 0xFF }, /* 18 */
-	{ 0xFF, 0xFF }, /* 19 */
-	{ 0xFF, 0xFF }, /* 1A */
-	{ 0xFF, 0xFF }, /* 1B */
-	{ 0xFF, 0xFF }, /* 1C */
-	{ 0xFF, 0xFF }, /* 1D */
-	{ 0xFF, 0x77 }, /* 1E */
-	{ 0xFF, 0x77 }, /* 1F */
-	{ 0xFF, 0x77 }, /* 20 */
-	{ 0xFF, 0x77 }, /* 21 */
-	{ 0xFF, 0x77 }, /* 22 */
-	{ 0xFF, 0x77 }, /* 23 */
-	{ 0xFF, 0xFF }, /* 24 */
-	{ 0xFF, 0x7F }, /* 25 */
-	{ 0xFF, 0x31 }, /* 26 */
-	{ 0xFF, 0xFF }, /* 27 */
-	{ 0xFF, 0xFF }, /* 28 */
-	{ 0xFF, 0xFF }, /* 29 */
-	{ 0xFF, 0xF7 }, /* 2A */
-	{ 0xFF, 0x2F }, /* 2B */
-	{ 0xFF, 0xEF }, /* 2C */
-	{ 0xFF, 0xFF }, /* 2D */
-	{ 0xFF, 0xFF }, /* 2E */
-	{ 0xFF, 0xFF }, /* 2F */
-	{ 0xFF, 0xFF }, /* 30 */
-	{ 0xFF, 0xFF }, /* 31 */
-	{ 0xFF, 0xFF }, /* 32 */
-	{ 0xFF, 0xFF }, /* 33 */
-	{ 0xFF, 0xF7 }, /* 34 */
-	{ 0xFF, 0x2F }, /* 35 */
-	{ 0xFF, 0xCF }, /* 36 */
-	{ 0xFF, 0xFF }, /* 37 */
-	{ 0xFF, 0xFF }, /* 38 */
-	{ 0xFF, 0xFF }, /* 39 */
-	{ 0xFF, 0xFF }, /* 3A */
-	{ 0xFF, 0xFF }, /* 3B */
-	{ 0xFF, 0xFF }, /* 3C */
-	{ 0xFF, 0xFF }, /* 3D */
-	{ 0xFF, 0xF7 }, /* 3E */
-	{ 0xFF, 0x2F }, /* 3F */
-	{ 0xFF, 0xCF }, /* 40 */
-	{ 0xFF, 0xFF }, /* 41 */
-	{ 0xFF, 0x77 }, /* 42 */
-	{ 0xFF, 0xFF }, /* 43 */
-	{ 0xFF, 0xFF }, /* 44 */
-	{ 0xFF, 0xFF }, /* 45 */
-	{ 0xFF, 0xFF }, /* 46 */
-	{ 0xFF, 0xFF }, /* 47 */
-	{ 0xFF, 0xFF }, /* 48 */
-	{ 0xFF, 0x0F }, /* 49 */
-	{ 0xFF, 0xFF }, /* 4A */
-	{ 0xFF, 0xFF }, /* 4B */
-	{ 0xFF, 0x3F }, /* 4C */
-	{ 0xFF, 0x3F }, /* 4D */
-	{ 0xFF, 0x3F }, /* 4E */
-	{ 0xFF, 0xFF }, /* 4F */
-	{ 0xFF, 0x7F }, /* 50 */
-	{ 0xFF, 0x7F }, /* 51 */
-	{ 0xFF, 0x0F }, /* 52 */
-	{ 0xFF, 0x3F }, /* 53 */
-	{ 0xFF, 0x3F }, /* 54 */
-	{ 0xFF, 0x3F }, /* 55 */
-	{ 0xFF, 0xFF }, /* 56 */
-	{ 0xFF, 0xFF }, /* 57 */
-	{ 0xFF, 0xBF }, /* 58 */
-	{ 0xFF, 0x1F }, /* 59 */
-	{ 0xFF, 0xBF }, /* 5A */
-	{ 0xFF, 0x1F }, /* 5B */
-	{ 0xFF, 0xBF }, /* 5C */
-	{ 0xFF, 0x3F }, /* 5D */
-	{ 0xFF, 0x3F }, /* 5E */
-	{ 0xFF, 0x7F }, /* 5F */
-	{ 0xFF, 0x7F }, /* 60 */
-	{ 0xFF, 0x47 }, /* 61 */
-	{ 0xFF, 0x9F }, /* 62 */
-	{ 0xFF, 0x9F }, /* 63 */
-	{ 0xFF, 0x9F }, /* 64 */
-	{ 0xFF, 0x9F }, /* 65 */
-	{ 0xFF, 0x9F }, /* 66 */
-	{ 0xFF, 0xBF }, /* 67 */
-	{ 0xFF, 0xBF }, /* 68 */
-	{ 0xFF, 0xFF }, /* 69 */
-	{ 0xFF, 0xFF }, /* 6A */
-	{ 0xFF, 0x7F }, /* 6B */
-	{ 0xFF, 0xF7 }, /* 6C */
-	{ 0xFF, 0xFF }, /* 6D */
-	{ 0xFF, 0xFF }, /* 6E */
-	{ 0xFF, 0x1F }, /* 6F */
-	{ 0xFF, 0xF7 }, /* 70 */
-	{ 0xFF, 0xFF }, /* 71 */
-	{ 0xFF, 0xFF }, /* 72 */
-	{ 0xFF, 0x1F }, /* 73 */
-	{ 0xFF, 0xF7 }, /* 74 */
-	{ 0xFF, 0xFF }, /* 75 */
-	{ 0xFF, 0xFF }, /* 76 */
-	{ 0xFF, 0x1F }, /* 77 */
-	{ 0xFF, 0xF7 }, /* 78 */
-	{ 0xFF, 0xFF }, /* 79 */
-	{ 0xFF, 0xFF }, /* 7A */
-	{ 0xFF, 0x1F }, /* 7B */
-	{ 0xFF, 0xF7 }, /* 7C */
-	{ 0xFF, 0xFF }, /* 7D */
-	{ 0xFF, 0xFF }, /* 7E */
-	{ 0xFF, 0x1F }, /* 7F */
-	{ 0xFF, 0xF7 }, /* 80 */
-	{ 0xFF, 0xFF }, /* 81 */
-	{ 0xFF, 0xFF }, /* 82 */
-	{ 0xFF, 0x1F }, /* 83 */
-	{ 0xFF, 0x7F }, /* 84 */
-	{ 0xFF, 0x0F }, /* 85 */
-	{ 0xFF, 0xD8 }, /* 86 */
-	{ 0xFF, 0xFF }, /* 87 */
-	{ 0xFF, 0xEF }, /* 88 */
-	{ 0xFF, 0xFE }, /* 89 */
-	{ 0xFF, 0xFE }, /* 8A */
-	{ 0xFF, 0xFF }, /* 8B */
-	{ 0xFF, 0xFF }, /* 8C */
-	{ 0xFF, 0x3F }, /* 8D */
-	{ 0xFF, 0xFF }, /* 8E */
-	{ 0xFF, 0x3F }, /* 8F */
-	{ 0xFF, 0x8F }, /* 90 */
-	{ 0xFF, 0xFF }, /* 91 */
-	{ 0xFF, 0x3F }, /* 92 */
-	{ 0xFF, 0xFF }, /* 93 */
-	{ 0xFF, 0xFF }, /* 94 */
-	{ 0xFF, 0x0F }, /* 95 */
-	{ 0xFF, 0x3F }, /* 96 */
-	{ 0xFF, 0x8C }, /* 97 */
-	{ 0x00, 0x00 }, /* 98 */
-	{ 0x00, 0x00 }, /* 99 */
-	{ 0x00, 0x00 }, /* 9A */
-	{ 0x00, 0x00 }, /* 9B */
-	{ 0x00, 0x00 }, /* 9C */
-	{ 0x00, 0x00 }, /* 9D */
-	{ 0x00, 0x00 }, /* 9E */
-	{ 0x00, 0x00 }, /* 9F */
-	{ 0x00, 0x00 }, /* A0 */
-	{ 0x00, 0x00 }, /* A1 */
-	{ 0x00, 0x00 }, /* A2 */
-	{ 0x00, 0x00 }, /* A3 */
-	{ 0x00, 0x00 }, /* A4 */
-	{ 0x00, 0x00 }, /* A5 */
-	{ 0x00, 0x00 }, /* A6 */
-	{ 0x00, 0x00 }, /* A7 */
-	{ 0x00, 0x00 }, /* A8 */
-	{ 0x00, 0x00 }, /* A9 */
-	{ 0x00, 0x00 }, /* AA */
-	{ 0x00, 0x00 }, /* AB */
-	{ 0x00, 0x00 }, /* AC */
-	{ 0x00, 0x00 }, /* AD */
-	{ 0x00, 0x00 }, /* AE */
-	{ 0x00, 0x00 }, /* AF */
-	{ 0x00, 0x00 }, /* B0 */
-	{ 0x00, 0x00 }, /* B1 */
-	{ 0x00, 0x00 }, /* B2 */
-	{ 0x00, 0x00 }, /* B3 */
-	{ 0x00, 0x00 }, /* B4 */
-	{ 0x00, 0x00 }, /* B5 */
-	{ 0x00, 0x00 }, /* B6 */
-	{ 0x00, 0x00 }, /* B7 */
-	{ 0x00, 0x00 }, /* B8 */
-	{ 0x00, 0x00 }, /* B9 */
-	{ 0x00, 0x00 }, /* BA */
-	{ 0x00, 0x00 }, /* BB */
-	{ 0x00, 0x00 }, /* BC */
-	{ 0x00, 0x00 }, /* BD */
-	{ 0x00, 0x00 }, /* BE */
-	{ 0x00, 0x00 }, /* BF */
-	{ 0x00, 0x00 }, /* C0 */
-	{ 0x00, 0x00 }, /* C1 */
-	{ 0x00, 0x00 }, /* C2 */
-	{ 0x00, 0x00 }, /* C3 */
-	{ 0x00, 0x00 }, /* C4 */
-	{ 0x00, 0x00 }, /* C5 */
-	{ 0x00, 0x00 }, /* C6 */
-	{ 0x00, 0x00 }, /* C7 */
-	{ 0x00, 0x00 }, /* C8 */
-	{ 0x00, 0x00 }, /* C9 */
-	{ 0x00, 0x00 }, /* CA */
-	{ 0x00, 0x00 }, /* CB */
-	{ 0x00, 0x00 }, /* CC */
-	{ 0x00, 0x00 }, /* CD */
-	{ 0x00, 0x00 }, /* CE */
-	{ 0x00, 0x00 }, /* CF */
-	{ 0x00, 0x00 }, /* D0 */
-	{ 0x00, 0x00 }, /* D1 */
-	{ 0x00, 0x00 }, /* D2 */
-	{ 0x00, 0x00 }, /* D3 */
-	{ 0x00, 0x00 }, /* D4 */
-	{ 0x00, 0x00 }, /* D5 */
-	{ 0x00, 0x00 }, /* D6 */
-	{ 0x00, 0x00 }, /* D7 */
-	{ 0x00, 0x00 }, /* D8 */
-	{ 0x00, 0x00 }, /* D9 */
-	{ 0x00, 0x00 }, /* DA */
-	{ 0x00, 0x00 }, /* DB */
-	{ 0x00, 0x00 }, /* DC */
-	{ 0x00, 0x00 }, /* DD */
-	{ 0x00, 0x00 }, /* DE */
-	{ 0x00, 0x00 }, /* DF */
-	{ 0x00, 0x00 }, /* E0 */
-	{ 0x00, 0x00 }, /* E1 */
-	{ 0x00, 0x00 }, /* E2 */
-	{ 0x00, 0x00 }, /* E3 */
-	{ 0x00, 0x00 }, /* E4 */
-	{ 0x00, 0x00 }, /* E5 */
-	{ 0x00, 0x00 }, /* E6 */
-	{ 0x00, 0x00 }, /* E7 */
-	{ 0x00, 0x00 }, /* E8 */
-	{ 0x00, 0x00 }, /* E9 */
-	{ 0x00, 0x00 }, /* EA */
-	{ 0x00, 0x00 }, /* EB */
-	{ 0x00, 0x00 }, /* EC */
-	{ 0x00, 0x00 }, /* ED */
-	{ 0x00, 0x00 }, /* EE */
-	{ 0x00, 0x00 }, /* EF */
-	{ 0x00, 0x00 }, /* F0 */
-	{ 0x00, 0x00 }, /* F1 */
-	{ 0x00, 0x00 }, /* F2 */
-	{ 0x00, 0x00 }, /* F3 */
-	{ 0x00, 0x00 }, /* F4 */
-	{ 0x00, 0x00 }, /* F5 */
-	{ 0x00, 0x00 }, /* F6 */
-	{ 0x00, 0x00 }, /* F7 */
-	{ 0x00, 0x00 }, /* F8 */
-	{ 0x00, 0x00 }, /* F9 */
-	{ 0x00, 0x00 }, /* FA */
-	{ 0x00, 0x00 }, /* FB */
-	{ 0x00, 0x00 }, /* FC */
-	{ 0x00, 0x00 }, /* FD */
-	{ 0x00, 0x00 }, /* FE */
-	{ 0xFF, 0x00 }, /* FF */
-};
-
 static bool max98095_readable(struct device *dev, unsigned int reg)
 {
-	if (reg >= M98095_REG_CNT)
-		return 0;
-	return max98095_access[reg].readable != 0;
+	switch (reg) {
+	case M98095_001_HOST_INT_STS ... M98095_097_PWR_SYS:
+	case M98095_0FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool max98095_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case M98095_00F_HOST_CFG ... M98095_097_PWR_SYS:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static bool max98095_volatile(struct device *dev, unsigned int reg)
 {
-	if (reg > M98095_REG_MAX_CACHED)
-		return 1;
-
 	switch (reg) {
-	case M98095_000_HOST_DATA:
-	case M98095_001_HOST_INT_STS:
-	case M98095_002_HOST_RSP_STS:
-	case M98095_003_HOST_CMD_STS:
-	case M98095_004_CODEC_STS:
-	case M98095_005_DAI1_ALC_STS:
-	case M98095_006_DAI2_ALC_STS:
-	case M98095_007_JACK_AUTO_STS:
-	case M98095_008_JACK_MANUAL_STS:
-	case M98095_009_JACK_VBAT_STS:
-	case M98095_00A_ACC_ADC_STS:
-	case M98095_00B_MIC_NG_AGC_STS:
-	case M98095_00C_SPK_L_VOLT_STS:
-	case M98095_00D_SPK_R_VOLT_STS:
-	case M98095_00E_TEMP_SENSOR_STS:
-		return 1;
+	case M98095_000_HOST_DATA ... M98095_00E_TEMP_SENSOR_STS:
+	case M98095_REG_MAX_CACHED + 1 ... M98095_0FF_REV_ID:
+		return true;
+	default:
+		return false;
 	}
-
-	return 0;
 }
 
 static const struct regmap_config max98095_regmap = {
@@ -508,6 +244,7 @@
 	.cache_type = REGCACHE_RBTREE,
 
 	.readable_reg = max98095_readable,
+	.writeable_reg = max98095_writeable,
 	.volatile_reg = max98095_volatile,
 };
 
@@ -661,48 +398,43 @@
 	return 0;
 }
 
-static const unsigned int max98095_micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98095_micboost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98095_mic_tlv, 0, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98095_adc_tlv, -1200, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98095_adcboost_tlv, 0, 600, 0);
 
-static const unsigned int max98095_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98095_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98095_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max98095_spk_tlv,
 	0, 10, TLV_DB_SCALE_ITEM(-5900, 400, 0),
 	11, 18, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	19, 27, TLV_DB_SCALE_ITEM(-200, 100, 0),
-	28, 39, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 39, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
-static const unsigned int max98095_rcv_lout_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98095_rcv_lout_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
-static const unsigned int max98095_lin_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(max98095_lin_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-600, 300, 0),
 	3, 3, TLV_DB_SCALE_ITEM(300, 1100, 0),
-	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0),
-};
+	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0)
+);
 
 static const struct snd_kcontrol_new max98095_snd_controls[] = {
 
@@ -1653,10 +1385,13 @@
 		if (IS_ERR(max98095->mclk))
 			break;
 
-		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON)
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
 			clk_disable_unprepare(max98095->mclk);
-		else
-			clk_prepare_enable(max98095->mclk);
+		} else {
+			ret = clk_prepare_enable(max98095->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
@@ -2431,7 +2166,6 @@
 static struct i2c_driver max98095_i2c_driver = {
 	.driver = {
 		.name = "max98095",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max98095_of_match),
 	},
 	.probe  = max98095_i2c_probe,
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 3a2fda0..f5e3dce 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -31,6 +31,9 @@
 {
 	struct gpio_desc *sdmode = snd_soc_dai_get_drvdata(dai);
 
+	if (!sdmode)
+		return 0;
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
@@ -48,24 +51,21 @@
 }
 
 static const struct snd_soc_dapm_widget max98357a_dapm_widgets[] = {
-	SND_SOC_DAPM_DAC("SDMode", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_OUTPUT("Speaker"),
 };
 
 static const struct snd_soc_dapm_route max98357a_dapm_routes[] = {
-	{"Speaker", NULL, "SDMode"},
+	{"Speaker", NULL, "HiFi Playback"},
 };
 
 static int max98357a_codec_probe(struct snd_soc_codec *codec)
 {
 	struct gpio_desc *sdmode;
 
-	sdmode = devm_gpiod_get(codec->dev, "sdmode", GPIOD_OUT_LOW);
-	if (IS_ERR(sdmode)) {
-		dev_err(codec->dev, "%s() unable to get sdmode GPIO: %ld\n",
-				__func__, PTR_ERR(sdmode));
+	sdmode = devm_gpiod_get_optional(codec->dev, "sdmode", GPIOD_OUT_LOW);
+	if (IS_ERR(sdmode))
 		return PTR_ERR(sdmode);
-	}
+
 	snd_soc_codec_set_drvdata(codec, sdmode);
 
 	return 0;
@@ -79,7 +79,7 @@
 	.num_dapm_routes	= ARRAY_SIZE(max98357a_dapm_routes),
 };
 
-static struct snd_soc_dai_ops max98357a_dai_ops = {
+static const struct snd_soc_dai_ops max98357a_dai_ops = {
 	.trigger	= max98357a_daiops_trigger,
 };
 
@@ -104,15 +104,8 @@
 
 static int max98357a_platform_probe(struct platform_device *pdev)
 {
-	int ret;
-
-	ret = snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
+	return snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
 			&max98357a_dai_driver, 1);
-	if (ret)
-		dev_err(&pdev->dev, "%s() error registering codec driver: %d\n",
-				__func__, ret);
-
-	return ret;
 }
 
 static int max98357a_platform_remove(struct platform_device *pdev)
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index 481d58f1..c14a79d 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -67,13 +67,12 @@
 	.cache_type = REGCACHE_RBTREE,
 };
 
-static const unsigned int max9850_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max9850_tlv,
 	0x18, 0x1f, TLV_DB_SCALE_ITEM(-7450, 400, 0),
 	0x20, 0x33, TLV_DB_SCALE_ITEM(-4150, 200, 0),
 	0x34, 0x37, TLV_DB_SCALE_ITEM(-150, 100, 0),
-	0x38, 0x3f, TLV_DB_SCALE_ITEM(250, 50, 0),
-};
+	0x38, 0x3f, TLV_DB_SCALE_ITEM(250, 50, 0)
+);
 
 static const struct snd_kcontrol_new max9850_controls[] = {
 SOC_SINGLE_TLV("Headphone Volume", MAX9850_VOLUME, 0, 0x3f, 1, max9850_tlv),
@@ -352,7 +351,6 @@
 static struct i2c_driver max9850_i2c_driver = {
 	.driver = {
 		.name = "max9850",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9850_i2c_probe,
 	.remove = max9850_i2c_remove,
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 29549cd..61cc18e 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -20,9 +20,7 @@
 
 #include "max9877.h"
 
-static struct regmap *regmap;
-
-static struct reg_default max9877_regs[] = {
+static const struct reg_default max9877_regs[] = {
 	{ 0, 0x40 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -30,19 +28,17 @@
 	{ 4, 0x49 },
 };
 
-static const unsigned int max9877_pgain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max9877_pgain_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 900, 0),
-	2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0)
+);
 
-static const unsigned int max9877_output_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max9877_output_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
 	16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
-	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
-};
+	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0)
+);
 
 static const char *max9877_out_mode[] = {
 	"INA -> SPK",
@@ -123,7 +119,7 @@
 	{ "HPR", NULL, "SHDN" },
 };
 
-static const struct snd_soc_codec_driver max9877_codec = {
+static const struct snd_soc_component_driver max9877_component_driver = {
 	.controls = max9877_controls,
 	.num_controls = ARRAY_SIZE(max9877_controls),
 
@@ -145,6 +141,7 @@
 static int max9877_i2c_probe(struct i2c_client *client,
 			     const struct i2c_device_id *id)
 {
+	struct regmap *regmap;
 	int i;
 
 	regmap = devm_regmap_init_i2c(client, &max9877_regmap);
@@ -155,14 +152,8 @@
 	for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
 		regmap_write(regmap, max9877_regs[i].reg, max9877_regs[i].def);
 
-	return snd_soc_register_codec(&client->dev, &max9877_codec, NULL, 0);
-}
-
-static int max9877_i2c_remove(struct i2c_client *client)
-{
-	snd_soc_unregister_codec(&client->dev);
-
-	return 0;
+	return devm_snd_soc_register_component(&client->dev,
+			&max9877_component_driver, NULL, 0);
 }
 
 static const struct i2c_device_id max9877_i2c_id[] = {
@@ -174,10 +165,8 @@
 static struct i2c_driver max9877_i2c_driver = {
 	.driver = {
 		.name = "max9877",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9877_i2c_probe,
-	.remove = max9877_i2c_remove,
 	.id_table = max9877_i2c_id,
 };
 
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
index aad6642..5990de3 100644
--- a/sound/soc/codecs/max98925.c
+++ b/sound/soc/codecs/max98925.c
@@ -271,8 +271,6 @@
 			break;
 		}
 	}
-	dev_dbg(codec->dev, "%s: sample rate is %d, returning %d\n",
-				__func__, rate_table[i].rate, *value);
 	return ret;
 }
 
@@ -432,7 +430,7 @@
 	struct snd_soc_codec *codec = dai->codec;
 	struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
 
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		regmap_update_bits(max98925->regmap,
 				MAX98925_FORMAT,
@@ -523,7 +521,6 @@
 	struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
 
 	max98925->codec = codec;
-	codec->control_data = max98925->regmap;
 	regmap_write(max98925->regmap, MAX98925_GLOBAL_ENABLE, 0x00);
 	/* It's not the default but we need to set DAI_DLY */
 	regmap_write(max98925->regmap,
@@ -639,7 +636,6 @@
 static struct i2c_driver max98925_i2c_driver = {
 	.driver = {
 		.name = "max98925",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max98925_of_match),
 		.pm = NULL,
 	},
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 3d44fc5..3e770cb 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -650,14 +650,14 @@
 #define MC13783_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 	SNDRV_PCM_FMTBIT_S24_LE)
 
-static struct snd_soc_dai_ops mc13783_ops_dac = {
+static const struct snd_soc_dai_ops mc13783_ops_dac = {
 	.hw_params	= mc13783_pcm_hw_params_dac,
 	.set_fmt	= mc13783_set_fmt_async,
 	.set_sysclk	= mc13783_set_sysclk_dac,
 	.set_tdm_slot	= mc13783_set_tdm_slot_dac,
 };
 
-static struct snd_soc_dai_ops mc13783_ops_codec = {
+static const struct snd_soc_dai_ops mc13783_ops_codec = {
 	.hw_params	= mc13783_pcm_hw_params_codec,
 	.set_fmt	= mc13783_set_fmt_async,
 	.set_sysclk	= mc13783_set_sysclk_codec,
@@ -698,7 +698,7 @@
 	},
 };
 
-static struct snd_soc_dai_ops mc13783_ops_sync = {
+static const struct snd_soc_dai_ops mc13783_ops_sync = {
 	.hw_params	= mc13783_pcm_hw_params_sync,
 	.set_fmt	= mc13783_set_fmt_sync,
 	.set_sysclk	= mc13783_set_sysclk_sync,
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index b74118e..f561c78 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -199,7 +199,7 @@
 	{12288000, 48000, 0xc, 0x0, 0x30, 0x0, 0x4},
 };
 
-static struct reg_default ml26124_reg[] = {
+static const struct reg_default ml26124_reg[] = {
 	/* CLOCK control Register */
 	{0x00, 0x00 },	/* Sampling Rate */
 	{0x02, 0x00},	/* PLL NL */
@@ -597,7 +597,6 @@
 static struct i2c_driver ml26124_i2c_driver = {
 	.driver = {
 		.name = "ml26124",
-		.owner = THIS_MODULE,
 	},
 	.probe = ml26124_i2c_probe,
 	.remove = ml26124_i2c_remove,
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index e7ba557..5832523 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -95,17 +95,22 @@
 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
 	int i = 0, val = -1, enable = 0;
 
-	if (priv->deemph)
-		for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++)
-			if (pcm1681_deemph[i] == priv->rate)
+	if (priv->deemph) {
+		for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++) {
+			if (pcm1681_deemph[i] == priv->rate) {
 				val = i;
+				break;
+			}
+		}
+	}
 
 	if (val != -1) {
 		regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
 				   PCM1681_DEEMPH_RATE_MASK, val << 3);
 		enable = 1;
-	} else
+	} else {
 		enable = 0;
+	}
 
 	/* enable/disable deemphasis functionality */
 	return regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
@@ -330,7 +335,6 @@
 static struct i2c_driver pcm1681_i2c_driver = {
 	.driver = {
 		.name	= "pcm1681",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pcm1681_dt_ids),
 	},
 	.id_table	= pcm1681_i2c_id,
diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c
index dcdfac0..dbff416 100644
--- a/sound/soc/codecs/pcm512x-i2c.c
+++ b/sound/soc/codecs/pcm512x-i2c.c
@@ -67,7 +67,6 @@
 	.id_table	= pcm512x_i2c_id,
 	.driver		= {
 		.name	= "pcm512x",
-		.owner	= THIS_MODULE,
 		.of_match_table = pcm512x_of_match,
 		.pm     = &pcm512x_pm_ops,
 	},
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index de16429..047c489 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1117,7 +1117,7 @@
 		params_rate(params),
 		params_channels(params));
 
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		alen = PCM512x_ALEN_16;
 		break;
@@ -1132,7 +1132,7 @@
 		break;
 	default:
 		dev_err(codec->dev, "Bad frame size: %d\n",
-			snd_pcm_format_width(params_format(params)));
+			params_width(params));
 		return -EINVAL;
 	}
 
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 56650d6..aca479f 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -11,38 +11,98 @@
  */
 
 #include <linux/module.h>
+#include <linux/regmap.h>
 
 #include "rl6231.h"
 
 /**
- * rl6231_calc_dmic_clk - Calculate the parameter of dmic.
+ * rl6231_get_pre_div - Return the value of pre divider.
+ *
+ * @map: map for setting.
+ * @reg: register.
+ * @sft: shift.
+ *
+ * Return the value of pre divider from given register value.
+ * Return negative error code for unexpected register value.
+ */
+int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft)
+{
+	int pd, val;
+
+	regmap_read(map, reg, &val);
+
+	val = (val >> sft) & 0x7;
+
+	switch (val) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		pd = val + 1;
+		break;
+	case 4:
+		pd = 6;
+		break;
+	case 5:
+		pd = 8;
+		break;
+	case 6:
+		pd = 12;
+		break;
+	case 7:
+		pd = 16;
+		break;
+	default:
+		pd = -EINVAL;
+		break;
+	}
+
+	return pd;
+}
+EXPORT_SYMBOL_GPL(rl6231_get_pre_div);
+
+/**
+ * rl6231_calc_dmic_clk - Calculate the frequency divider parameter of dmic.
  *
  * @rate: base clock rate.
  *
- * Choose dmic clock between 1MHz and 3MHz.
- * It is better for clock to approximate 3MHz.
+ * Choose divider parameter that gives the highest possible DMIC frequency in
+ * 1MHz - 3MHz range.
  */
 int rl6231_calc_dmic_clk(int rate)
 {
-	int div[] = {2, 3, 4, 6, 8, 12}, idx = -EINVAL;
-	int i, red, bound, temp;
+	int div[] = {2, 3, 4, 6, 8, 12};
+	int i;
 
-	red = 3000000 * 12;
-	for (i = 0; i < ARRAY_SIZE(div); i++) {
-		bound = div[i] * 3000000;
-		if (rate > bound)
-			continue;
-		temp = bound - rate;
-		if (temp < red) {
-			red = temp;
-			idx = i;
-		}
+	if (rate < 1000000 * div[0]) {
+		pr_warn("Base clock rate %d is too low\n", rate);
+		return -EINVAL;
 	}
 
-	return idx;
+	for (i = 0; i < ARRAY_SIZE(div); i++) {
+		/* find divider that gives DMIC frequency below 3MHz */
+		if (3000000 * div[i] >= rate)
+			return i;
+	}
+
+	pr_warn("Base clock rate %d is too high\n", rate);
+	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(rl6231_calc_dmic_clk);
 
+struct pll_calc_map {
+	unsigned int pll_in;
+	unsigned int pll_out;
+	int k;
+	int n;
+	int m;
+	bool m_bp;
+};
+
+static const struct pll_calc_map pll_preset_table[] = {
+	{19200000,  24576000,  3, 30, 3, false},
+};
+
 /**
  * rl6231_pll_calc - Calcualte PLL M/N/K code.
  * @freq_in: external clock provided to codec.
@@ -57,7 +117,7 @@
 	const unsigned int freq_out, struct rl6231_pll_code *pll_code)
 {
 	int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
-	int k, red, n_t, pll_out, in_t, out_t;
+	int i, k, red, n_t, pll_out, in_t, out_t;
 	int n = 0, m = 0, m_t = 0;
 	int red_t = abs(freq_out - freq_in);
 	bool bypass = false;
@@ -65,6 +125,18 @@
 	if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
 		return -EINVAL;
 
+	for (i = 0; i < ARRAY_SIZE(pll_preset_table); i++) {
+		if (freq_in == pll_preset_table[i].pll_in &&
+			freq_out == pll_preset_table[i].pll_out) {
+			k = pll_preset_table[i].k;
+			m = pll_preset_table[i].m;
+			n = pll_preset_table[i].n;
+			bypass = pll_preset_table[i].m_bp;
+			pr_debug("Use preset PLL parameter table\n");
+			goto code_find;
+		}
+	}
+
 	k = 100000000 / freq_out - 2;
 	if (k > RL6231_PLL_K_MAX)
 		k = RL6231_PLL_K_MAX;
diff --git a/sound/soc/codecs/rl6231.h b/sound/soc/codecs/rl6231.h
index 0f7b057..4c77b44 100644
--- a/sound/soc/codecs/rl6231.h
+++ b/sound/soc/codecs/rl6231.h
@@ -30,5 +30,6 @@
 int rl6231_pll_calc(const unsigned int freq_in,
 	const unsigned int freq_out, struct rl6231_pll_code *pll_code);
 int rl6231_get_clk_info(int sclk, int rate);
+int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft);
 
 #endif /* __RL6231_H__ */
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 5c43e26..bd93658 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -38,7 +38,7 @@
 #define RT288_VENDOR_ID 0x10ec0288
 
 struct rt286_priv {
-	struct reg_default *index_cache;
+	const struct reg_default *index_cache;
 	int index_cache_size;
 	struct regmap *regmap;
 	struct snd_soc_codec *codec;
@@ -50,7 +50,7 @@
 	int clk_id;
 };
 
-static struct reg_default rt286_index_def[] = {
+static const struct reg_default rt286_index_def[] = {
 	{ 0x01, 0xaaaa },
 	{ 0x02, 0x8aaa },
 	{ 0x03, 0x0002 },
@@ -1108,7 +1108,7 @@
 };
 MODULE_DEVICE_TABLE(acpi, rt286_acpi_match);
 
-static struct dmi_system_id force_combo_jack_table[] = {
+static const struct dmi_system_id force_combo_jack_table[] = {
 	{
 		.ident = "Intel Wilson Beach",
 		.matches = {
@@ -1118,7 +1118,7 @@
 	{ }
 };
 
-static struct dmi_system_id dmi_dell_dino[] = {
+static const struct dmi_system_id dmi_dell_dino[] = {
 	{
 		.ident = "Dell Dino",
 		.matches = {
@@ -1157,7 +1157,7 @@
 	}
 	if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt286\n", val);
+			"Device with ID register %#x is not rt286\n", val);
 		return -ENODEV;
 	}
 
@@ -1259,7 +1259,6 @@
 static struct i2c_driver rt286_i2c_driver = {
 	.driver = {
 		   .name = "rt286",
-		   .owner = THIS_MODULE,
 		   .acpi_match_table = ACPI_PTR(rt286_acpi_match),
 		   },
 	.probe = rt286_i2c_probe,
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
new file mode 100644
index 0000000..3c2f0f8
--- /dev/null
+++ b/sound/soc/codecs/rt298.c
@@ -0,0 +1,1271 @@
+/*
+ * rt298.c  --  RT298 ALSA SoC audio codec driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/jack.h>
+#include <linux/workqueue.h>
+#include <sound/rt298.h>
+#include <sound/hda_verbs.h>
+
+#include "rl6347a.h"
+#include "rt298.h"
+
+#define RT298_VENDOR_ID 0x10ec0298
+
+struct rt298_priv {
+	struct reg_default *index_cache;
+	int index_cache_size;
+	struct regmap *regmap;
+	struct snd_soc_codec *codec;
+	struct rt298_platform_data pdata;
+	struct i2c_client *i2c;
+	struct snd_soc_jack *jack;
+	struct delayed_work jack_detect_work;
+	int sys_clk;
+	int clk_id;
+	int is_hp_in;
+};
+
+static struct reg_default rt298_index_def[] = {
+	{ 0x01, 0xaaaa },
+	{ 0x02, 0x8aaa },
+	{ 0x03, 0x0002 },
+	{ 0x04, 0xaf01 },
+	{ 0x08, 0x000d },
+	{ 0x09, 0xd810 },
+	{ 0x0a, 0x0120 },
+	{ 0x0b, 0x0000 },
+	{ 0x0d, 0x2800 },
+	{ 0x0f, 0x0000 },
+	{ 0x19, 0x0a17 },
+	{ 0x20, 0x0020 },
+	{ 0x33, 0x0208 },
+	{ 0x46, 0x0300 },
+	{ 0x49, 0x0004 },
+	{ 0x4f, 0x50e9 },
+	{ 0x50, 0x2000 },
+	{ 0x63, 0x2902 },
+	{ 0x67, 0x1111 },
+	{ 0x68, 0x1016 },
+	{ 0x69, 0x273f },
+};
+#define INDEX_CACHE_SIZE ARRAY_SIZE(rt298_index_def)
+
+static const struct reg_default rt298_reg[] = {
+	{ 0x00170500, 0x00000400 },
+	{ 0x00220000, 0x00000031 },
+	{ 0x00239000, 0x0000007f },
+	{ 0x0023a000, 0x0000007f },
+	{ 0x00270500, 0x00000400 },
+	{ 0x00370500, 0x00000400 },
+	{ 0x00870500, 0x00000400 },
+	{ 0x00920000, 0x00000031 },
+	{ 0x00935000, 0x000000c3 },
+	{ 0x00936000, 0x000000c3 },
+	{ 0x00970500, 0x00000400 },
+	{ 0x00b37000, 0x00000097 },
+	{ 0x00b37200, 0x00000097 },
+	{ 0x00b37300, 0x00000097 },
+	{ 0x00c37000, 0x00000000 },
+	{ 0x00c37100, 0x00000080 },
+	{ 0x01270500, 0x00000400 },
+	{ 0x01370500, 0x00000400 },
+	{ 0x01371f00, 0x411111f0 },
+	{ 0x01439000, 0x00000080 },
+	{ 0x0143a000, 0x00000080 },
+	{ 0x01470700, 0x00000000 },
+	{ 0x01470500, 0x00000400 },
+	{ 0x01470c00, 0x00000000 },
+	{ 0x01470100, 0x00000000 },
+	{ 0x01837000, 0x00000000 },
+	{ 0x01870500, 0x00000400 },
+	{ 0x02050000, 0x00000000 },
+	{ 0x02139000, 0x00000080 },
+	{ 0x0213a000, 0x00000080 },
+	{ 0x02170100, 0x00000000 },
+	{ 0x02170500, 0x00000400 },
+	{ 0x02170700, 0x00000000 },
+	{ 0x02270100, 0x00000000 },
+	{ 0x02370100, 0x00000000 },
+	{ 0x01870700, 0x00000020 },
+	{ 0x00830000, 0x000000c3 },
+	{ 0x00930000, 0x000000c3 },
+	{ 0x01270700, 0x00000000 },
+};
+
+static bool rt298_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0 ... 0xff:
+	case RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID):
+	case RT298_GET_HP_SENSE:
+	case RT298_GET_MIC1_SENSE:
+	case RT298_PROC_COEF:
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_MIC1, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_SPK_OUT, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_HP_OUT, 0):
+		return true;
+	default:
+		return true;
+	}
+
+
+}
+
+static bool rt298_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0 ... 0xff:
+	case RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID):
+	case RT298_GET_HP_SENSE:
+	case RT298_GET_MIC1_SENSE:
+	case RT298_SET_AUDIO_POWER:
+	case RT298_SET_HPO_POWER:
+	case RT298_SET_SPK_POWER:
+	case RT298_SET_DMIC1_POWER:
+	case RT298_SPK_MUX:
+	case RT298_HPO_MUX:
+	case RT298_ADC0_MUX:
+	case RT298_ADC1_MUX:
+	case RT298_SET_MIC1:
+	case RT298_SET_PIN_HPO:
+	case RT298_SET_PIN_SPK:
+	case RT298_SET_PIN_DMIC1:
+	case RT298_SPK_EAPD:
+	case RT298_SET_AMP_GAIN_HPO:
+	case RT298_SET_DMIC2_DEFAULT:
+	case RT298_DACL_GAIN:
+	case RT298_DACR_GAIN:
+	case RT298_ADCL_GAIN:
+	case RT298_ADCR_GAIN:
+	case RT298_MIC_GAIN:
+	case RT298_SPOL_GAIN:
+	case RT298_SPOR_GAIN:
+	case RT298_HPOL_GAIN:
+	case RT298_HPOR_GAIN:
+	case RT298_F_DAC_SWITCH:
+	case RT298_F_RECMIX_SWITCH:
+	case RT298_REC_MIC_SWITCH:
+	case RT298_REC_I2S_SWITCH:
+	case RT298_REC_LINE_SWITCH:
+	case RT298_REC_BEEP_SWITCH:
+	case RT298_DAC_FORMAT:
+	case RT298_ADC_FORMAT:
+	case RT298_COEF_INDEX:
+	case RT298_PROC_COEF:
+	case RT298_SET_AMP_GAIN_ADC_IN1:
+	case RT298_SET_AMP_GAIN_ADC_IN2:
+	case RT298_SET_POWER(RT298_DAC_OUT1):
+	case RT298_SET_POWER(RT298_DAC_OUT2):
+	case RT298_SET_POWER(RT298_ADC_IN1):
+	case RT298_SET_POWER(RT298_ADC_IN2):
+	case RT298_SET_POWER(RT298_DMIC2):
+	case RT298_SET_POWER(RT298_MIC1):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_MIC1, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_SPK_OUT, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_HP_OUT, 0):
+		return true;
+	default:
+		return false;
+	}
+}
+
+#ifdef CONFIG_PM
+static void rt298_index_sync(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < INDEX_CACHE_SIZE; i++) {
+		snd_soc_write(codec, rt298->index_cache[i].reg,
+				  rt298->index_cache[i].def);
+	}
+}
+#endif
+
+static int rt298_support_power_controls[] = {
+	RT298_DAC_OUT1,
+	RT298_DAC_OUT2,
+	RT298_ADC_IN1,
+	RT298_ADC_IN2,
+	RT298_MIC1,
+	RT298_DMIC1,
+	RT298_DMIC2,
+	RT298_SPK_OUT,
+	RT298_HP_OUT,
+};
+#define RT298_POWER_REG_LEN ARRAY_SIZE(rt298_support_power_controls)
+
+static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
+{
+	struct snd_soc_dapm_context *dapm;
+	unsigned int val, buf;
+
+	*hp = false;
+	*mic = false;
+
+	if (!rt298->codec)
+		return -EINVAL;
+
+	dapm = snd_soc_codec_get_dapm(rt298->codec);
+
+	if (rt298->pdata.cbj_en) {
+		regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
+		*hp = buf & 0x80000000;
+		if (*hp == rt298->is_hp_in)
+			return -1;
+		rt298->is_hp_in = *hp;
+		if (*hp) {
+			/* power on HV,VERF */
+			regmap_update_bits(rt298->regmap,
+				RT298_DC_GAIN, 0x200, 0x200);
+
+			snd_soc_dapm_force_enable_pin(dapm, "HV");
+			snd_soc_dapm_force_enable_pin(dapm, "VREF");
+			/* power LDO1 */
+			snd_soc_dapm_force_enable_pin(dapm, "LDO1");
+			snd_soc_dapm_sync(dapm);
+
+			regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24);
+			msleep(50);
+
+			regmap_update_bits(rt298->regmap,
+				RT298_CBJ_CTRL1, 0xfcc0, 0xd400);
+			msleep(300);
+			regmap_read(rt298->regmap, RT298_CBJ_CTRL2, &val);
+
+			if (0x0070 == (val & 0x0070)) {
+				*mic = true;
+			} else {
+				regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xfcc0, 0xe400);
+				msleep(300);
+				regmap_read(rt298->regmap,
+					RT298_CBJ_CTRL2, &val);
+				if (0x0070 == (val & 0x0070))
+					*mic = true;
+				else
+					*mic = false;
+			}
+			regmap_update_bits(rt298->regmap,
+				RT298_DC_GAIN, 0x200, 0x0);
+
+		} else {
+			*mic = false;
+			regmap_write(rt298->regmap, RT298_SET_MIC1, 0x20);
+		}
+	} else {
+		regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
+		*hp = buf & 0x80000000;
+		regmap_read(rt298->regmap, RT298_GET_MIC1_SENSE, &buf);
+		*mic = buf & 0x80000000;
+	}
+
+	snd_soc_dapm_disable_pin(dapm, "HV");
+	snd_soc_dapm_disable_pin(dapm, "VREF");
+	if (!*hp)
+		snd_soc_dapm_disable_pin(dapm, "LDO1");
+	snd_soc_dapm_sync(dapm);
+
+	pr_debug("*hp = %d *mic = %d\n", *hp, *mic);
+
+	return 0;
+}
+
+static void rt298_jack_detect_work(struct work_struct *work)
+{
+	struct rt298_priv *rt298 =
+		container_of(work, struct rt298_priv, jack_detect_work.work);
+	int status = 0;
+	bool hp = false;
+	bool mic = false;
+
+	if (rt298_jack_detect(rt298, &hp, &mic) < 0)
+		return;
+
+	if (hp == true)
+		status |= SND_JACK_HEADPHONE;
+
+	if (mic == true)
+		status |= SND_JACK_MICROPHONE;
+
+	snd_soc_jack_report(rt298->jack, status,
+		SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+}
+
+int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->jack = jack;
+
+	/* Send an initial empty report */
+	snd_soc_jack_report(rt298->jack, 0,
+		SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt298_mic_detect);
+
+static int is_mclk_mode(struct snd_soc_dapm_widget *source,
+			 struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	if (rt298->clk_id == RT298_SCLK_S_MCLK)
+		return 1;
+	else
+		return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -6350, 50, 0);
+static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
+
+static const struct snd_kcontrol_new rt298_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("DAC0 Playback Volume", RT298_DACL_GAIN,
+			    RT298_DACR_GAIN, 0, 0x7f, 0, out_vol_tlv),
+	SOC_DOUBLE_R_TLV("ADC0 Capture Volume", RT298_ADCL_GAIN,
+			    RT298_ADCR_GAIN, 0, 0x7f, 0, out_vol_tlv),
+	SOC_SINGLE_TLV("AMIC Volume", RT298_MIC_GAIN,
+			    0, 0x3, 0, mic_vol_tlv),
+	SOC_DOUBLE_R("Speaker Playback Switch", RT298_SPOL_GAIN,
+			    RT298_SPOR_GAIN, RT298_MUTE_SFT, 1, 1),
+};
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt298_front_mix[] = {
+	SOC_DAPM_SINGLE("DAC Switch",  RT298_F_DAC_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("RECMIX Switch", RT298_F_RECMIX_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt298_rec_mix[] = {
+	SOC_DAPM_SINGLE("Mic1 Switch", RT298_REC_MIC_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("I2S Switch", RT298_REC_I2S_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("Line1 Switch", RT298_REC_LINE_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("Beep Switch", RT298_REC_BEEP_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new spo_enable_control =
+	SOC_DAPM_SINGLE("Switch", RT298_SET_PIN_SPK,
+			RT298_SET_PIN_SFT, 1, 0);
+
+static const struct snd_kcontrol_new hpol_enable_control =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT298_HPOL_GAIN,
+			RT298_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new hpor_enable_control =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT298_HPOR_GAIN,
+			RT298_MUTE_SFT, 1, 1);
+
+/* ADC0 source */
+static const char * const rt298_adc_src[] = {
+	"Mic", "RECMIX", "Dmic"
+};
+
+static const int rt298_adc_values[] = {
+	0, 4, 5,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(
+	rt298_adc0_enum, RT298_ADC0_MUX, RT298_ADC_SEL_SFT,
+	RT298_ADC_SEL_MASK, rt298_adc_src, rt298_adc_values);
+
+static const struct snd_kcontrol_new rt298_adc0_mux =
+	SOC_DAPM_ENUM("ADC 0 source", rt298_adc0_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(
+	rt298_adc1_enum, RT298_ADC1_MUX, RT298_ADC_SEL_SFT,
+	RT298_ADC_SEL_MASK, rt298_adc_src, rt298_adc_values);
+
+static const struct snd_kcontrol_new rt298_adc1_mux =
+	SOC_DAPM_ENUM("ADC 1 source", rt298_adc1_enum);
+
+static const char * const rt298_dac_src[] = {
+	"Front", "Surround"
+};
+/* HP-OUT source */
+static SOC_ENUM_SINGLE_DECL(rt298_hpo_enum, RT298_HPO_MUX,
+				0, rt298_dac_src);
+
+static const struct snd_kcontrol_new rt298_hpo_mux =
+SOC_DAPM_ENUM("HPO source", rt298_hpo_enum);
+
+/* SPK-OUT source */
+static SOC_ENUM_SINGLE_DECL(rt298_spo_enum, RT298_SPK_MUX,
+				0, rt298_dac_src);
+
+static const struct snd_kcontrol_new rt298_spo_mux =
+SOC_DAPM_ENUM("SPO source", rt298_spo_enum);
+
+static int rt298_spk_event(struct snd_soc_dapm_widget *w,
+			    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_write(codec,
+			RT298_SPK_EAPD, RT298_SET_EAPD_HIGH);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_write(codec,
+			RT298_SPK_EAPD, RT298_SET_EAPD_LOW);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_set_dmic1_event(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_write(codec, RT298_SET_PIN_DMIC1, 0x20);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_write(codec, RT298_SET_PIN_DMIC1, 0);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_adc_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	unsigned int nid;
+
+	nid = (w->reg >> 20) & 0xff;
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec,
+			VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
+			0x7080, 0x7000);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec,
+			VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
+			0x7080, 0x7080);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_mic1_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL3, 0xc000, 0x8000);
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL2, 0xc000, 0x8000);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL3, 0xc000, 0x0000);
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL2, 0xc000, 0x0000);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_vref_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0000);
+		mdelay(50);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget rt298_dapm_widgets[] = {
+
+	SND_SOC_DAPM_SUPPLY_S("HV", 1, RT298_POWER_CTRL1,
+		12, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("VREF", RT298_POWER_CTRL1,
+		0, 1, rt298_vref_event, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_SUPPLY_S("BG_MBIAS", 1, RT298_POWER_CTRL2,
+		1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT298_POWER_CTRL2,
+		2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LDO2", 1, RT298_POWER_CTRL2,
+		3, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("VREF1", 1, RT298_POWER_CTRL2,
+		4, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LV", 2, RT298_POWER_CTRL1,
+		13, 1, NULL, 0),
+
+
+	SND_SOC_DAPM_SUPPLY("MCLK MODE", RT298_PLL_CTRL1,
+		5, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("MIC1 Input Buffer", SND_SOC_NOPM,
+		0, 0, rt298_mic1_event, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	/* Input Lines */
+	SND_SOC_DAPM_INPUT("DMIC1 Pin"),
+	SND_SOC_DAPM_INPUT("DMIC2 Pin"),
+	SND_SOC_DAPM_INPUT("MIC1"),
+	SND_SOC_DAPM_INPUT("LINE1"),
+	SND_SOC_DAPM_INPUT("Beep"),
+
+	/* DMIC */
+	SND_SOC_DAPM_PGA_E("DMIC1", RT298_SET_POWER(RT298_DMIC1), 0, 1,
+		NULL, 0, rt298_set_dmic1_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_PGA("DMIC2", RT298_SET_POWER(RT298_DMIC2), 0, 1,
+		NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DMIC Receiver", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+
+	/* REC Mixer */
+	SND_SOC_DAPM_MIXER("RECMIX", SND_SOC_NOPM, 0, 0,
+		rt298_rec_mix, ARRAY_SIZE(rt298_rec_mix)),
+
+	/* ADCs */
+	SND_SOC_DAPM_ADC("ADC 0", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("ADC 1", NULL, SND_SOC_NOPM, 0, 0),
+
+	/* ADC Mux */
+	SND_SOC_DAPM_MUX_E("ADC 0 Mux", RT298_SET_POWER(RT298_ADC_IN1), 0, 1,
+		&rt298_adc0_mux, rt298_adc_event, SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("ADC 1 Mux", RT298_SET_POWER(RT298_ADC_IN2), 0, 1,
+		&rt298_adc1_mux, rt298_adc_event, SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMU),
+
+	/* Audio Interface */
+	SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF2RX", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF2TX", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+	/* Output Side */
+	/* DACs */
+	SND_SOC_DAPM_DAC("DAC 0", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DAC 1", NULL, SND_SOC_NOPM, 0, 0),
+
+	/* Output Mux */
+	SND_SOC_DAPM_MUX("SPK Mux", SND_SOC_NOPM, 0, 0, &rt298_spo_mux),
+	SND_SOC_DAPM_MUX("HPO Mux", SND_SOC_NOPM, 0, 0, &rt298_hpo_mux),
+
+	SND_SOC_DAPM_SUPPLY("HP Power", RT298_SET_PIN_HPO,
+		RT298_SET_PIN_SFT, 0, NULL, 0),
+
+	/* Output Mixer */
+	SND_SOC_DAPM_MIXER("Front", RT298_SET_POWER(RT298_DAC_OUT1), 0, 1,
+			rt298_front_mix, ARRAY_SIZE(rt298_front_mix)),
+	SND_SOC_DAPM_PGA("Surround", RT298_SET_POWER(RT298_DAC_OUT2), 0, 1,
+			NULL, 0),
+
+	/* Output Pga */
+	SND_SOC_DAPM_SWITCH_E("SPO", SND_SOC_NOPM, 0, 0,
+		&spo_enable_control, rt298_spk_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_SWITCH("HPO L", SND_SOC_NOPM, 0, 0,
+		&hpol_enable_control),
+	SND_SOC_DAPM_SWITCH("HPO R", SND_SOC_NOPM, 0, 0,
+		&hpor_enable_control),
+
+	/* Output Lines */
+	SND_SOC_DAPM_OUTPUT("SPOL"),
+	SND_SOC_DAPM_OUTPUT("SPOR"),
+	SND_SOC_DAPM_OUTPUT("HPO Pin"),
+	SND_SOC_DAPM_OUTPUT("SPDIF"),
+};
+
+static const struct snd_soc_dapm_route rt298_dapm_routes[] = {
+
+	{"ADC 0", NULL, "MCLK MODE", is_mclk_mode},
+	{"ADC 1", NULL, "MCLK MODE", is_mclk_mode},
+	{"Front", NULL, "MCLK MODE", is_mclk_mode},
+	{"Surround", NULL, "MCLK MODE", is_mclk_mode},
+
+	{"HP Power", NULL, "LDO1"},
+	{"HP Power", NULL, "LDO2"},
+	{"HP Power", NULL, "LV"},
+	{"HP Power", NULL, "VREF1"},
+	{"HP Power", NULL, "BG_MBIAS"},
+
+	{"MIC1", NULL, "LDO1"},
+	{"MIC1", NULL, "LDO2"},
+	{"MIC1", NULL, "HV"},
+	{"MIC1", NULL, "LV"},
+	{"MIC1", NULL, "VREF"},
+	{"MIC1", NULL, "VREF1"},
+	{"MIC1", NULL, "BG_MBIAS"},
+	{"MIC1", NULL, "MIC1 Input Buffer"},
+
+	{"SPO", NULL, "LDO1"},
+	{"SPO", NULL, "LDO2"},
+	{"SPO", NULL, "HV"},
+	{"SPO", NULL, "LV"},
+	{"SPO", NULL, "VREF"},
+	{"SPO", NULL, "VREF1"},
+	{"SPO", NULL, "BG_MBIAS"},
+
+	{"DMIC1", NULL, "DMIC1 Pin"},
+	{"DMIC2", NULL, "DMIC2 Pin"},
+	{"DMIC1", NULL, "DMIC Receiver"},
+	{"DMIC2", NULL, "DMIC Receiver"},
+
+	{"RECMIX", "Beep Switch", "Beep"},
+	{"RECMIX", "Line1 Switch", "LINE1"},
+	{"RECMIX", "Mic1 Switch", "MIC1"},
+
+	{"ADC 0 Mux", "Dmic", "DMIC1"},
+	{"ADC 0 Mux", "RECMIX", "RECMIX"},
+	{"ADC 0 Mux", "Mic", "MIC1"},
+	{"ADC 1 Mux", "Dmic", "DMIC2"},
+	{"ADC 1 Mux", "RECMIX", "RECMIX"},
+	{"ADC 1 Mux", "Mic", "MIC1"},
+
+	{"ADC 0", NULL, "ADC 0 Mux"},
+	{"ADC 1", NULL, "ADC 1 Mux"},
+
+	{"AIF1TX", NULL, "ADC 0"},
+	{"AIF2TX", NULL, "ADC 1"},
+
+	{"DAC 0", NULL, "AIF1RX"},
+	{"DAC 1", NULL, "AIF2RX"},
+
+	{"Front", "DAC Switch", "DAC 0"},
+	{"Front", "RECMIX Switch", "RECMIX"},
+
+	{"Surround", NULL, "DAC 1"},
+
+	{"SPK Mux", "Front", "Front"},
+	{"SPK Mux", "Surround", "Surround"},
+
+	{"HPO Mux", "Front", "Front"},
+	{"HPO Mux", "Surround", "Surround"},
+
+	{"SPO", "Switch", "SPK Mux"},
+	{"HPO L", "Switch", "HPO Mux"},
+	{"HPO R", "Switch", "HPO Mux"},
+	{"HPO L", NULL, "HP Power"},
+	{"HPO R", NULL, "HP Power"},
+
+	{"SPOL", NULL, "SPO"},
+	{"SPOR", NULL, "SPO"},
+	{"HPO Pin", NULL, "HPO L"},
+	{"HPO Pin", NULL, "HPO R"},
+};
+
+static int rt298_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+	unsigned int val = 0;
+	int d_len_code;
+
+	switch (params_rate(params)) {
+	/* bit 14 0:48K 1:44.1K */
+	case 44100:
+	case 48000:
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported sample rate %d\n",
+					params_rate(params));
+		return -EINVAL;
+	}
+	switch (rt298->sys_clk) {
+	case 12288000:
+	case 24576000:
+		if (params_rate(params) != 48000) {
+			dev_err(codec->dev, "Sys_clk is not matched (%d %d)\n",
+					params_rate(params), rt298->sys_clk);
+			return -EINVAL;
+		}
+		break;
+	case 11289600:
+	case 22579200:
+		if (params_rate(params) != 44100) {
+			dev_err(codec->dev, "Sys_clk is not matched (%d %d)\n",
+					params_rate(params), rt298->sys_clk);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	if (params_channels(params) <= 16) {
+		/* bit 3:0 Number of Channel */
+		val |= (params_channels(params) - 1);
+	} else {
+		dev_err(codec->dev, "Unsupported channels %d\n",
+					params_channels(params));
+		return -EINVAL;
+	}
+
+	d_len_code = 0;
+	switch (params_width(params)) {
+	/* bit 6:4 Bits per Sample */
+	case 16:
+		d_len_code = 0;
+		val |= (0x1 << 4);
+		break;
+	case 32:
+		d_len_code = 2;
+		val |= (0x4 << 4);
+		break;
+	case 20:
+		d_len_code = 1;
+		val |= (0x2 << 4);
+		break;
+	case 24:
+		d_len_code = 2;
+		val |= (0x3 << 4);
+		break;
+	case 8:
+		d_len_code = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec,
+		RT298_I2S_CTRL1, 0x0018, d_len_code << 3);
+	dev_dbg(codec->dev, "format val = 0x%x\n", val);
+
+	snd_soc_update_bits(codec, RT298_DAC_FORMAT, 0x407f, val);
+	snd_soc_update_bits(codec, RT298_ADC_FORMAT, 0x407f, val);
+
+	return 0;
+}
+
+static int rt298_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x800, 0x800);
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x800, 0x0);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x0);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x1 << 8);
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x2 << 8);
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x3 << 8);
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* bit 15 Stream Type 0:PCM 1:Non-PCM */
+	snd_soc_update_bits(codec, RT298_DAC_FORMAT, 0x8000, 0);
+	snd_soc_update_bits(codec, RT298_ADC_FORMAT, 0x8000, 0);
+
+	return 0;
+}
+
+static int rt298_set_dai_sysclk(struct snd_soc_dai *dai,
+				int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s freq=%d\n", __func__, freq);
+
+	if (RT298_SCLK_S_MCLK == clk_id) {
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x0100, 0x0);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL1, 0x20, 0x20);
+	} else {
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x0100, 0x0100);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL, 0x4, 0x4);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL1, 0x20, 0x0);
+	}
+
+	switch (freq) {
+	case 19200000:
+		if (RT298_SCLK_S_MCLK == clk_id) {
+			dev_err(codec->dev, "Should not use MCLK\n");
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x40, 0x40);
+		break;
+	case 24000000:
+		if (RT298_SCLK_S_MCLK == clk_id) {
+			dev_err(codec->dev, "Should not use MCLK\n");
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x40, 0x0);
+		break;
+	case 12288000:
+	case 11289600:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x8, 0x0);
+		snd_soc_update_bits(codec,
+			RT298_CLK_DIV, 0xfc1e, 0x0004);
+		break;
+	case 24576000:
+	case 22579200:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x8, 0x8);
+		snd_soc_update_bits(codec,
+			RT298_CLK_DIV, 0xfc1e, 0x5406);
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported system clock\n");
+		return -EINVAL;
+	}
+
+	rt298->sys_clk = freq;
+	rt298->clk_id = clk_id;
+
+	return 0;
+}
+
+static int rt298_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	dev_dbg(codec->dev, "%s ratio=%d\n", __func__, ratio);
+	if (50 == ratio)
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x1000, 0x1000);
+	else
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x1000, 0x0);
+
+
+	return 0;
+}
+
+static int rt298_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		if (SND_SOC_BIAS_STANDBY ==
+			snd_soc_codec_get_bias_level(codec)) {
+			snd_soc_write(codec,
+				RT298_SET_AUDIO_POWER, AC_PWRST_D0);
+			snd_soc_update_bits(codec, 0x0d, 0x200, 0x200);
+			snd_soc_update_bits(codec, 0x52, 0x80, 0x0);
+			mdelay(20);
+			snd_soc_update_bits(codec, 0x0d, 0x200, 0x0);
+			snd_soc_update_bits(codec, 0x52, 0x80, 0x80);
+		}
+		break;
+
+	case SND_SOC_BIAS_ON:
+		mdelay(30);
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0400);
+
+		break;
+
+	case SND_SOC_BIAS_STANDBY:
+		snd_soc_write(codec,
+			RT298_SET_AUDIO_POWER, AC_PWRST_D3);
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0000);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static irqreturn_t rt298_irq(int irq, void *data)
+{
+	struct rt298_priv *rt298 = data;
+	bool hp = false;
+	bool mic = false;
+	int ret, status = 0;
+
+	ret = rt298_jack_detect(rt298, &hp, &mic);
+
+	/* Clear IRQ */
+	regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x1, 0x1);
+
+	if (ret == 0) {
+		if (hp == true)
+			status |= SND_JACK_HEADPHONE;
+
+		if (mic == true)
+			status |= SND_JACK_MICROPHONE;
+
+		snd_soc_jack_report(rt298->jack, status,
+			SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+
+		pm_wakeup_event(&rt298->i2c->dev, 300);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int rt298_probe(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->codec = codec;
+
+	if (rt298->i2c->irq) {
+		regmap_update_bits(rt298->regmap,
+					RT298_IRQ_CTRL, 0x2, 0x2);
+
+		INIT_DELAYED_WORK(&rt298->jack_detect_work,
+					rt298_jack_detect_work);
+		schedule_delayed_work(&rt298->jack_detect_work,
+					msecs_to_jiffies(1250));
+	}
+
+	return 0;
+}
+
+static int rt298_remove(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	cancel_delayed_work_sync(&rt298->jack_detect_work);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt298_suspend(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->is_hp_in = -1;
+	regcache_cache_only(rt298->regmap, true);
+	regcache_mark_dirty(rt298->regmap);
+
+	return 0;
+}
+
+static int rt298_resume(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	regcache_cache_only(rt298->regmap, false);
+	rt298_index_sync(codec);
+	regcache_sync(rt298->regmap);
+
+	return 0;
+}
+#else
+#define rt298_suspend NULL
+#define rt298_resume NULL
+#endif
+
+#define RT298_STEREO_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+#define RT298_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static const struct snd_soc_dai_ops rt298_aif_dai_ops = {
+	.hw_params = rt298_hw_params,
+	.set_fmt = rt298_set_dai_fmt,
+	.set_sysclk = rt298_set_dai_sysclk,
+	.set_bclk_ratio = rt298_set_bclk_ratio,
+};
+
+static struct snd_soc_dai_driver rt298_dai[] = {
+	{
+		.name = "rt298-aif1",
+		.id = RT298_AIF1,
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.ops = &rt298_aif_dai_ops,
+		.symmetric_rates = 1,
+	},
+	{
+		.name = "rt298-aif2",
+		.id = RT298_AIF2,
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.ops = &rt298_aif_dai_ops,
+		.symmetric_rates = 1,
+	},
+
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt298 = {
+	.probe = rt298_probe,
+	.remove = rt298_remove,
+	.suspend = rt298_suspend,
+	.resume = rt298_resume,
+	.set_bias_level = rt298_set_bias_level,
+	.idle_bias_off = true,
+	.controls = rt298_snd_controls,
+	.num_controls = ARRAY_SIZE(rt298_snd_controls),
+	.dapm_widgets = rt298_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rt298_dapm_widgets),
+	.dapm_routes = rt298_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(rt298_dapm_routes),
+};
+
+static const struct regmap_config rt298_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.max_register = 0x02370100,
+	.volatile_reg = rt298_volatile_register,
+	.readable_reg = rt298_readable_register,
+	.reg_write = rl6347a_hw_write,
+	.reg_read = rl6347a_hw_read,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = rt298_reg,
+	.num_reg_defaults = ARRAY_SIZE(rt298_reg),
+};
+
+static const struct i2c_device_id rt298_i2c_id[] = {
+	{"rt298", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, rt298_i2c_id);
+
+static const struct acpi_device_id rt298_acpi_match[] = {
+	{ "INT343A", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, rt298_acpi_match);
+
+static int rt298_i2c_probe(struct i2c_client *i2c,
+			   const struct i2c_device_id *id)
+{
+	struct rt298_platform_data *pdata = dev_get_platdata(&i2c->dev);
+	struct rt298_priv *rt298;
+	struct device *dev = &i2c->dev;
+	const struct acpi_device_id *acpiid;
+	int i, ret;
+
+	rt298 = devm_kzalloc(&i2c->dev,	sizeof(*rt298),
+				GFP_KERNEL);
+	if (NULL == rt298)
+		return -ENOMEM;
+
+	rt298->regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt298_regmap);
+	if (IS_ERR(rt298->regmap)) {
+		ret = PTR_ERR(rt298->regmap);
+		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	regmap_read(rt298->regmap,
+		RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &ret);
+	if (ret != RT298_VENDOR_ID) {
+		dev_err(&i2c->dev,
+			"Device with ID register %#x is not rt298\n", ret);
+		return -ENODEV;
+	}
+
+	rt298->index_cache = rt298_index_def;
+	rt298->index_cache_size = INDEX_CACHE_SIZE;
+	rt298->i2c = i2c;
+	i2c_set_clientdata(i2c, rt298);
+
+	/* restore codec default */
+	for (i = 0; i < INDEX_CACHE_SIZE; i++)
+		regmap_write(rt298->regmap, rt298->index_cache[i].reg,
+				rt298->index_cache[i].def);
+	for (i = 0; i < ARRAY_SIZE(rt298_reg); i++)
+		regmap_write(rt298->regmap, rt298_reg[i].reg,
+				rt298_reg[i].def);
+
+	if (pdata)
+		rt298->pdata = *pdata;
+
+	/* enable jack combo mode on supported devices */
+	acpiid = acpi_match_device(dev->driver->acpi_match_table, dev);
+	if (acpiid) {
+		rt298->pdata = *(struct rt298_platform_data *)
+				acpiid->driver_data;
+	}
+
+	/* VREF Charging */
+	regmap_update_bits(rt298->regmap, 0x04, 0x80, 0x80);
+	regmap_update_bits(rt298->regmap, 0x1b, 0x860, 0x860);
+	/* Vref2 */
+	regmap_update_bits(rt298->regmap, 0x08, 0x20, 0x20);
+
+	regmap_write(rt298->regmap, RT298_SET_AUDIO_POWER, AC_PWRST_D3);
+
+	for (i = 0; i < RT298_POWER_REG_LEN; i++)
+		regmap_write(rt298->regmap,
+			RT298_SET_POWER(rt298_support_power_controls[i]),
+			AC_PWRST_D1);
+
+	if (!rt298->pdata.cbj_en) {
+		regmap_write(rt298->regmap, RT298_CBJ_CTRL2, 0x0000);
+		regmap_write(rt298->regmap, RT298_MIC1_DET_CTRL, 0x0816);
+		regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xf000, 0xb000);
+	} else {
+		regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xf000, 0x5000);
+	}
+
+	mdelay(10);
+
+	if (!rt298->pdata.gpio2_en)
+		regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000);
+	else
+		regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0);
+
+	mdelay(10);
+
+	regmap_write(rt298->regmap, RT298_MISC_CTRL1, 0x0000);
+	regmap_update_bits(rt298->regmap,
+				RT298_WIND_FILTER_CTRL, 0x0082, 0x0082);
+	regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
+	rt298->is_hp_in = -1;
+
+	if (rt298->i2c->irq) {
+		ret = request_threaded_irq(rt298->i2c->irq, NULL, rt298_irq,
+			IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "rt298", rt298);
+		if (ret != 0) {
+			dev_err(&i2c->dev,
+				"Failed to reguest IRQ: %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt298,
+				     rt298_dai, ARRAY_SIZE(rt298_dai));
+
+	return ret;
+}
+
+static int rt298_i2c_remove(struct i2c_client *i2c)
+{
+	struct rt298_priv *rt298 = i2c_get_clientdata(i2c);
+
+	if (i2c->irq)
+		free_irq(i2c->irq, rt298);
+	snd_soc_unregister_codec(&i2c->dev);
+
+	return 0;
+}
+
+
+static struct i2c_driver rt298_i2c_driver = {
+	.driver = {
+		   .name = "rt298",
+		   .acpi_match_table = ACPI_PTR(rt298_acpi_match),
+		   },
+	.probe = rt298_i2c_probe,
+	.remove = rt298_i2c_remove,
+	.id_table = rt298_i2c_id,
+};
+
+module_i2c_driver(rt298_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT298 driver");
+MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/rt298.h b/sound/soc/codecs/rt298.h
new file mode 100644
index 0000000..31da162
--- /dev/null
+++ b/sound/soc/codecs/rt298.h
@@ -0,0 +1,206 @@
+/*
+ * rt298.h  --  RT298 ALSA SoC audio driver
+ *
+ * Copyright 2011 Realtek Microelectronics
+ * Author: Johnny Hsu <johnnyhsu@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT298_H__
+#define __RT298_H__
+
+#define VERB_CMD(V, N, D) ((N << 20) | (V << 8) | D)
+
+#define RT298_AUDIO_FUNCTION_GROUP			0x01
+#define RT298_DAC_OUT1					0x02
+#define RT298_DAC_OUT2					0x03
+#define RT298_DIG_CVT					0x06
+#define RT298_ADC_IN1					0x09
+#define RT298_ADC_IN2					0x08
+#define RT298_MIXER_IN					0x0b
+#define RT298_MIXER_OUT1				0x0c
+#define RT298_MIXER_OUT2				0x0d
+#define RT298_DMIC1					0x12
+#define RT298_DMIC2					0x13
+#define RT298_SPK_OUT					0x14
+#define RT298_MIC1					0x18
+#define RT298_LINE1					0x1a
+#define RT298_BEEP					0x1d
+#define RT298_SPDIF					0x1e
+#define RT298_VENDOR_REGISTERS				0x20
+#define RT298_HP_OUT					0x21
+#define RT298_MIXER_IN1					0x22
+#define RT298_MIXER_IN2					0x23
+
+#define RT298_SET_PIN_SFT				6
+#define RT298_SET_PIN_ENABLE				0x40
+#define RT298_SET_PIN_DISABLE				0
+#define RT298_SET_EAPD_HIGH				0x2
+#define RT298_SET_EAPD_LOW				0
+
+#define RT298_MUTE_SFT					7
+
+/* Verb commands */
+#define RT298_GET_PARAM(NID, PARAM) VERB_CMD(AC_VERB_PARAMETERS, NID, PARAM)
+#define RT298_SET_POWER(NID) VERB_CMD(AC_VERB_SET_POWER_STATE, NID, 0)
+#define RT298_SET_AUDIO_POWER RT298_SET_POWER(RT298_AUDIO_FUNCTION_GROUP)
+#define RT298_SET_HPO_POWER RT298_SET_POWER(RT298_HP_OUT)
+#define RT298_SET_SPK_POWER RT298_SET_POWER(RT298_SPK_OUT)
+#define RT298_SET_DMIC1_POWER RT298_SET_POWER(RT298_DMIC1)
+#define RT298_SPK_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_SPK_OUT, 0)
+#define RT298_HPO_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_HP_OUT, 0)
+#define RT298_ADC0_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_MIXER_IN1, 0)
+#define RT298_ADC1_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_MIXER_IN2, 0)
+#define RT298_SET_MIC1\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_MIC1, 0)
+#define RT298_SET_PIN_HPO\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_HP_OUT, 0)
+#define RT298_SET_PIN_SPK\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_SPK_OUT, 0)
+#define RT298_SET_PIN_DMIC1\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_DMIC1, 0)
+#define RT298_SET_PIN_SPDIF\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_SPDIF, 0)
+#define RT298_SET_PIN_DIG_CVT\
+	VERB_CMD(AC_VERB_SET_DIGI_CONVERT_1, RT298_DIG_CVT, 0)
+#define RT298_SPK_EAPD\
+	VERB_CMD(AC_VERB_SET_EAPD_BTLENABLE, RT298_SPK_OUT, 0)
+#define RT298_SET_AMP_GAIN_HPO\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0)
+#define RT298_SET_AMP_GAIN_ADC_IN1\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0)
+#define RT298_SET_AMP_GAIN_ADC_IN2\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN2, 0)
+#define RT298_GET_HP_SENSE\
+	VERB_CMD(AC_VERB_GET_PIN_SENSE, RT298_HP_OUT, 0)
+#define RT298_GET_MIC1_SENSE\
+	VERB_CMD(AC_VERB_GET_PIN_SENSE, RT298_MIC1, 0)
+#define RT298_SET_DMIC2_DEFAULT\
+	VERB_CMD(AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, RT298_DMIC2, 0)
+#define RT298_SET_SPDIF_DEFAULT\
+	VERB_CMD(AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, RT298_SPDIF, 0)
+#define RT298_DACL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_DAC_OUT1, 0xa000)
+#define RT298_DACR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_DAC_OUT1, 0x9000)
+#define RT298_ADCL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0x6000)
+#define RT298_ADCR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0x5000)
+#define RT298_MIC_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIC1, 0x7000)
+#define RT298_SPOL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_SPK_OUT, 0xa000)
+#define RT298_SPOR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_SPK_OUT, 0x9000)
+#define RT298_HPOL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0xa000)
+#define RT298_HPOR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0x9000)
+#define RT298_F_DAC_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_OUT1, 0x7000)
+#define RT298_F_RECMIX_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_OUT1, 0x7100)
+#define RT298_REC_MIC_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7000)
+#define RT298_REC_I2S_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7100)
+#define RT298_REC_LINE_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7200)
+#define RT298_REC_BEEP_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7300)
+#define RT298_DAC_FORMAT\
+	VERB_CMD(AC_VERB_SET_STREAM_FORMAT, RT298_DAC_OUT1, 0)
+#define RT298_ADC_FORMAT\
+	VERB_CMD(AC_VERB_SET_STREAM_FORMAT, RT298_ADC_IN1, 0)
+#define RT298_COEF_INDEX\
+	VERB_CMD(AC_VERB_SET_COEF_INDEX, RT298_VENDOR_REGISTERS, 0)
+#define RT298_PROC_COEF\
+	VERB_CMD(AC_VERB_SET_PROC_COEF, RT298_VENDOR_REGISTERS, 0)
+
+/* Index registers */
+#define RT298_A_BIAS_CTRL1	0x01
+#define RT298_A_BIAS_CTRL2	0x02
+#define RT298_POWER_CTRL1	0x03
+#define RT298_A_BIAS_CTRL3	0x04
+#define RT298_POWER_CTRL2	0x08
+#define RT298_I2S_CTRL1		0x09
+#define RT298_I2S_CTRL2		0x0a
+#define RT298_CLK_DIV		0x0b
+#define RT298_DC_GAIN		0x0d
+#define RT298_POWER_CTRL3	0x0f
+#define RT298_MIC1_DET_CTRL	0x19
+#define RT298_MISC_CTRL1	0x20
+#define RT298_IRQ_CTRL		0x33
+#define RT298_WIND_FILTER_CTRL	0x46
+#define RT298_PLL_CTRL1		0x49
+#define RT298_CBJ_CTRL1		0x4f
+#define RT298_CBJ_CTRL2		0x50
+#define RT298_PLL_CTRL		0x63
+#define RT298_DEPOP_CTRL1	0x66
+#define RT298_DEPOP_CTRL2	0x67
+#define RT298_DEPOP_CTRL3	0x68
+#define RT298_DEPOP_CTRL4	0x69
+
+/* SPDIF (0x06) */
+#define RT298_SPDIF_SEL_SFT	0
+#define RT298_SPDIF_SEL_PCM0	0
+#define RT298_SPDIF_SEL_PCM1	1
+#define RT298_SPDIF_SEL_SPOUT	2
+#define RT298_SPDIF_SEL_PP	3
+
+/* RECMIX (0x0b) */
+#define RT298_M_REC_BEEP_SFT	0
+#define RT298_M_REC_LINE1_SFT	1
+#define RT298_M_REC_MIC1_SFT	2
+#define RT298_M_REC_I2S_SFT	3
+
+/* Front (0x0c) */
+#define RT298_M_FRONT_DAC_SFT	0
+#define RT298_M_FRONT_REC_SFT	1
+
+/* SPK-OUT (0x14) */
+#define RT298_M_SPK_MUX_SFT	14
+#define RT298_SPK_SEL_MASK	0x1
+#define RT298_SPK_SEL_SFT	0
+#define RT298_SPK_SEL_F		0
+#define RT298_SPK_SEL_S		1
+
+/* HP-OUT (0x21) */
+#define RT298_M_HP_MUX_SFT	14
+#define RT298_HP_SEL_MASK	0x1
+#define RT298_HP_SEL_SFT	0
+#define RT298_HP_SEL_F		0
+#define RT298_HP_SEL_S		1
+
+/* ADC (0x22) (0x23) */
+#define RT298_ADC_SEL_MASK	0x7
+#define RT298_ADC_SEL_SFT	0
+#define RT298_ADC_SEL_SURR	0
+#define RT298_ADC_SEL_FRONT	1
+#define RT298_ADC_SEL_DMIC	2
+#define RT298_ADC_SEL_BEEP	4
+#define RT298_ADC_SEL_LINE1	5
+#define RT298_ADC_SEL_I2S	6
+#define RT298_ADC_SEL_MIC1	7
+
+#define RT298_SCLK_S_MCLK	0
+#define RT298_SCLK_S_PLL	1
+
+enum {
+	RT298_AIF1,
+	RT298_AIF2,
+	RT298_AIFS,
+};
+
+int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack);
+
+#endif /* __RT298_H__ */
+
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 058167c..1be2bab 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -174,16 +174,15 @@
 static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
-static unsigned int mic_bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(mic_bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static int rt5631_dmic_get(struct snd_kcontrol *kcontrol,
 		struct snd_ctl_elem_value *ucontrol)
@@ -1725,7 +1724,6 @@
 static struct i2c_driver rt5631_i2c_driver = {
 	.driver = {
 		.name = "rt5631",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(rt5631_i2c_dt_ids),
 	},
 	.probe = rt5631_i2c_probe,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 9bc78e5..e1ceeb8 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -51,7 +51,7 @@
 	  .window_len = 0x1, },
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5640_PR_BASE + 0x3d,	0x3600},
 	{RT5640_PR_BASE + 0x12,	0x0aa8},
 	{RT5640_PR_BASE + 0x14,	0x0aaa},
@@ -347,16 +347,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5640_data_select[] = {
@@ -459,10 +458,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5640->sysclk);
-
+	rate = rt5640->sysclk / rl6231_get_pre_div(rt5640->regmap,
+		RT5640_ADDA_CLK1, RT5640_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -984,6 +984,35 @@
 	return 0;
 }
 
+static int rt5640_lout_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		hp_amp_power_on(codec);
+		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+			RT5640_PWR_LM, RT5640_PWR_LM);
+		snd_soc_update_bits(codec, RT5640_OUTPUT,
+			RT5640_L_MUTE | RT5640_R_MUTE, 0);
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, RT5640_OUTPUT,
+			RT5640_L_MUTE | RT5640_R_MUTE,
+			RT5640_L_MUTE | RT5640_R_MUTE);
+		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+			RT5640_PWR_LM, 0);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
 static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
 			   struct snd_kcontrol *kcontrol, int event)
 {
@@ -1179,13 +1208,16 @@
 		0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)),
 	SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0,
 		0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)),
-	SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0,
+	SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
 		rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)),
 	SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM,
 		0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0,
 		rt5640_hp_event,
 		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0,
+		rt5640_lout_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1,
 		RT5640_PWR_HP_L_BIT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1,
@@ -1500,8 +1532,10 @@
 	{"HP R Playback", "Switch", "HP Amp"},
 	{"HPOL", NULL, "HP L Playback"},
 	{"HPOR", NULL, "HP R Playback"},
-	{"LOUTL", NULL, "LOUT MIX"},
-	{"LOUTR", NULL, "LOUT MIX"},
+
+	{"LOUT amp", NULL, "LOUT MIX"},
+	{"LOUTL", NULL, "LOUT amp"},
+	{"LOUTR", NULL, "LOUT amp"},
 };
 
 static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = {
@@ -2207,7 +2241,7 @@
 	regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val);
 	if (val != RT5640_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5640/39\n", val);
+			"Device with ID register %#x is not rt5640/39\n", val);
 		return -ENODEV;
 	}
 
@@ -2242,7 +2276,6 @@
 static struct i2c_driver rt5640_i2c_driver = {
 	.driver = {
 		.name = "rt5640",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5640_acpi_match),
 		.of_match_table = of_match_ptr(rt5640_of_match),
 	},
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 961bd7e..4972bf3 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -21,6 +21,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
 #include <linux/dmi.h>
+#include <linux/regulator/consumer.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -54,7 +55,7 @@
 	},
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5645_PR_BASE + 0x3d,	0x3600},
 	{RT5645_PR_BASE + 0x1c,	0xfd20},
 	{RT5645_PR_BASE + 0x20,	0x611f},
@@ -63,7 +64,7 @@
 };
 #define RT5645_INIT_REG_LEN ARRAY_SIZE(init_list)
 
-static const struct reg_default rt5650_init_list[] = {
+static const struct reg_sequence rt5650_init_list[] = {
 	{0xf6,	0x0100},
 };
 
@@ -223,6 +224,39 @@
 	{ 0xff, 0x6308 },
 };
 
+static const char *const rt5645_supply_names[] = {
+	"avdd",
+	"cpvdd",
+};
+
+struct rt5645_priv {
+	struct snd_soc_codec *codec;
+	struct rt5645_platform_data pdata;
+	struct regmap *regmap;
+	struct i2c_client *i2c;
+	struct gpio_desc *gpiod_hp_det;
+	struct snd_soc_jack *hp_jack;
+	struct snd_soc_jack *mic_jack;
+	struct snd_soc_jack *btn_jack;
+	struct delayed_work jack_detect_work;
+	struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
+
+	int codec_type;
+	int sysclk;
+	int sysclk_src;
+	int lrck[RT5645_AIFS];
+	int bclk[RT5645_AIFS];
+	int master[RT5645_AIFS];
+
+	int pll_src;
+	int pll_in;
+	int pll_out;
+
+	int jack_type;
+	bool en_button_func;
+	bool hp_on;
+};
+
 static int rt5645_reset(struct snd_soc_codec *codec)
 {
 	return snd_soc_write(codec, RT5645_RESET, 0);
@@ -360,6 +394,7 @@
 	case RT5645_DEPOP_M1:
 	case RT5645_DEPOP_M2:
 	case RT5645_DEPOP_M3:
+	case RT5645_CHARGE_PUMP:
 	case RT5645_MICBIAS:
 	case RT5645_A_JD_CTRL1:
 	case RT5645_VAD_CTRL4:
@@ -424,16 +459,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static const struct snd_kcontrol_new rt5645_snd_controls[] = {
 	/* Speaker Output Volume */
@@ -510,10 +544,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5645->sysclk);
-
+	rate = rt5645->sysclk / rl6231_get_pre_div(rt5645->regmap,
+		RT5645_ADDA_CLK1, RT5645_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -1331,15 +1366,23 @@
 	if (on) {
 		if (hp_amp_power_count <= 0) {
 			if (rt5645->codec_type == CODEC_TYPE_RT5650) {
+				snd_soc_write(codec, RT5645_DEPOP_M2, 0x3100);
 				snd_soc_write(codec, RT5645_CHARGE_PUMP,
 					0x0e06);
-				snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
+				snd_soc_write(codec, RT5645_DEPOP_M1, 0x000d);
+				regmap_write(rt5645->regmap, RT5645_PR_BASE +
+					RT5645_HP_DCC_INT1, 0x9f01);
+				msleep(20);
+				snd_soc_update_bits(codec, RT5645_DEPOP_M1,
+					RT5645_HP_CO_MASK, RT5645_HP_CO_EN);
 				regmap_write(rt5645->regmap, RT5645_PR_BASE +
 					0x3e, 0x7400);
 				snd_soc_write(codec, RT5645_DEPOP_M3, 0x0737);
 				regmap_write(rt5645->regmap, RT5645_PR_BASE +
 					RT5645_MAMP_INT_REG2, 0xfc00);
 				snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
+				mdelay(5);
+				rt5645->hp_on = true;
 			} else {
 				/* depop parameters */
 				snd_soc_update_bits(codec, RT5645_DEPOP_M2,
@@ -1553,6 +1596,27 @@
 	return 0;
 }
 
+static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *k, int  event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (rt5645->hp_on) {
+			msleep(100);
+			rt5645->hp_on = false;
+		}
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
 static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
 	SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
 		RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1697,15 +1761,6 @@
 	SND_SOC_DAPM_PGA("IF1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
 
 	/* IF1 2 Mux */
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc1_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc2_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc3_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc_in_mux),
-
 	SND_SOC_DAPM_MUX("IF2 ADC Mux", SND_SOC_NOPM,
 		0, 0, &rt5645_if2_adc_in_mux),
 
@@ -1716,14 +1771,6 @@
 	SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac0_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac1_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac2_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac3_tdm_sel_mux),
 	SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -1854,6 +1901,26 @@
 	SND_SOC_DAPM_OUTPUT("PDM1R"),
 	SND_SOC_DAPM_OUTPUT("SPOL"),
 	SND_SOC_DAPM_OUTPUT("SPOR"),
+	SND_SOC_DAPM_POST("DAPM_POST", rt5650_hp_event),
+};
+
+static const struct snd_soc_dapm_widget rt5645_specific_dapm_widgets[] = {
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac0_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac1_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac2_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac3_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc1_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc2_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc3_in_mux),
 };
 
 static const struct snd_soc_dapm_widget rt5650_specific_dapm_widgets[] = {
@@ -2642,7 +2709,7 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_PREPARE:
-		if (SND_SOC_BIAS_STANDBY == codec->dapm.bias_level) {
+		if (SND_SOC_BIAS_STANDBY == snd_soc_codec_get_bias_level(codec)) {
 			snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
 				RT5645_PWR_VREF1 | RT5645_PWR_MB |
 				RT5645_PWR_BG | RT5645_PWR_VREF2,
@@ -2686,94 +2753,15 @@
 	return 0;
 }
 
-static int rt5650_calibration(struct rt5645_priv *rt5645)
-{
-	int val, i;
-	int ret = -1;
-
-	regcache_cache_bypass(rt5645->regmap, true);
-	regmap_write(rt5645->regmap, RT5645_RESET, 0);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0800);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_CHOP_DAC_ADC,
-		0x3600);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x25, 0x7000);
-	regmap_write(rt5645->regmap, RT5645_I2S1_SDP, 0x8008);
-	/* headset type */
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL1, 0x2061);
-	regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0x2012);
-	regmap_write(rt5645->regmap, RT5645_PWR_MIXER, 0x0002);
-	regmap_write(rt5645->regmap, RT5645_PWR_VOL, 0x0020);
-	regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x1827);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x0827);
-	msleep(400);
-	/* Inline command */
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0001);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
-	/* Calbration */
-	regmap_write(rt5645->regmap, RT5645_GLB_CLK, 0x8000);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
-	regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x8800);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0xe8fa);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x8c04);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x3100);
-	regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0e06);
-	regmap_write(rt5645->regmap, RT5645_BASS_BACK, 0x8a13);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0820);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x000d);
-	/* Power on and Calbration */
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_HP_DCC_INT1,
-		0x9f01);
-	msleep(200);
-	for (i = 0; i < 5; i++) {
-		regmap_read(rt5645->regmap, RT5645_PR_BASE + 0x7a, &val);
-		if (val != 0 && val != 0x3f3f) {
-			ret = 0;
-			break;
-		}
-		msleep(50);
-	}
-	pr_debug("%s: PR-7A = 0x%x\n", __func__, val);
-
-	/* mute */
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x3e, 0x7400);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M3, 0x0737);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_MAMP_INT_REG2,
-		0xfc00);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x1140);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL2, 0x4020);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x0000);
-	msleep(350);
-
-	regcache_cache_bypass(rt5645->regmap, false);
-
-	return ret;
-}
-
 static void rt5645_enable_push_button_irq(struct snd_soc_codec *codec,
 	bool enable)
 {
-	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 
 	if (enable) {
-		snd_soc_dapm_mutex_lock(&codec->dapm);
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"ADC L power");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"ADC R power");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"LDO2");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"Mic Det Power");
-		snd_soc_dapm_sync_unlocked(&codec->dapm);
-		snd_soc_dapm_mutex_unlock(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "ADC L power");
+		snd_soc_dapm_force_enable_pin(dapm, "ADC R power");
+		snd_soc_dapm_sync(dapm);
 
 		snd_soc_update_bits(codec,
 					RT5645_INT_IRQ_ST, 0x8, 0x8);
@@ -2786,36 +2774,26 @@
 		snd_soc_update_bits(codec, RT5650_4BTN_IL_CMD2, 0x8000, 0x0);
 		snd_soc_update_bits(codec, RT5645_INT_IRQ_ST, 0x8, 0x0);
 
-		snd_soc_dapm_mutex_lock(&codec->dapm);
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"ADC L power");
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"ADC R power");
-		if (rt5645->pdata.jd_mode == 0)
-			snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-								"LDO2");
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"Mic Det Power");
-		snd_soc_dapm_sync_unlocked(&codec->dapm);
-		snd_soc_dapm_mutex_unlock(&codec->dapm);
+		snd_soc_dapm_disable_pin(dapm, "ADC L power");
+		snd_soc_dapm_disable_pin(dapm, "ADC R power");
+		snd_soc_dapm_sync(dapm);
 	}
 }
 
 static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
 	unsigned int val;
 
 	if (jack_insert) {
 		regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
 
-		if (codec->component.card->instantiated) {
-			/* for jack type detect */
-			snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
-			snd_soc_dapm_force_enable_pin(&codec->dapm,
-				"Mic Det Power");
-			snd_soc_dapm_sync(&codec->dapm);
-		} else {
+		/* for jack type detect */
+		snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+		snd_soc_dapm_force_enable_pin(dapm, "Mic Det Power");
+		snd_soc_dapm_sync(dapm);
+		if (!dapm->card->instantiated) {
 			/* Power up necessary bits for JD if dapm is
 			   not ready yet */
 			regmap_update_bits(rt5645->regmap, RT5645_PWR_ANLG1,
@@ -2828,14 +2806,15 @@
 		}
 
 		regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
-		regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006);
-		regmap_update_bits(rt5645->regmap,
-				   RT5645_IN1_CTRL2, 0x1000, 0x1000);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
+			RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
 		msleep(100);
-		regmap_update_bits(rt5645->regmap,
-				   RT5645_IN1_CTRL2, 0x1000, 0x0000);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, 0);
 
-		msleep(450);
+		msleep(600);
 		regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val);
 		val &= 0x7;
 		dev_dbg(codec->dev, "val = %d\n", val);
@@ -2846,43 +2825,46 @@
 				rt5645_enable_push_button_irq(codec, true);
 			}
 		} else {
-			if (codec->component.card->instantiated) {
-				snd_soc_dapm_disable_pin(&codec->dapm,
-					"Mic Det Power");
-				snd_soc_dapm_sync(&codec->dapm);
-			} else
-				regmap_update_bits(rt5645->regmap,
-					RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
+			snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+			snd_soc_dapm_sync(dapm);
 			rt5645->jack_type = SND_JACK_HEADPHONE;
 		}
 
+		snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
+		snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
+		snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001);
 	} else { /* jack out */
 		rt5645->jack_type = 0;
+
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
+			RT5645_CBJ_BST1_EN, 0);
+
 		if (rt5645->en_button_func)
 			rt5645_enable_push_button_irq(codec, false);
-		else {
-			if (codec->component.card->instantiated) {
-				if (rt5645->pdata.jd_mode == 0)
-					snd_soc_dapm_disable_pin(&codec->dapm,
-						"LDO2");
-				snd_soc_dapm_disable_pin(&codec->dapm,
-					"Mic Det Power");
-				snd_soc_dapm_sync(&codec->dapm);
-			} else {
-				if (rt5645->pdata.jd_mode == 0)
-					regmap_update_bits(rt5645->regmap,
-						RT5645_PWR_MIXER,
-						RT5645_PWR_LDO2, 0);
-				regmap_update_bits(rt5645->regmap,
-					RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
-			}
-		}
+
+		if (rt5645->pdata.jd_mode == 0)
+			snd_soc_dapm_disable_pin(dapm, "LDO2");
+		snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return rt5645->jack_type;
 }
 
-static int rt5645_irq_detection(struct rt5645_priv *rt5645);
+static int rt5645_button_detect(struct snd_soc_codec *codec)
+{
+	int btn_type, val;
+
+	val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
+	pr_debug("val=0x%x\n", val);
+	btn_type = val & 0xfff0;
+	snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
+
+	return btn_type;
+}
+
 static irqreturn_t rt5645_irq(int irq, void *data);
 
 int rt5645_set_jack_detect(struct snd_soc_codec *codec,
@@ -2913,38 +2895,10 @@
 {
 	struct rt5645_priv *rt5645 =
 		container_of(work, struct rt5645_priv, jack_detect_work.work);
-
-	rt5645_irq_detection(rt5645);
-}
-
-static irqreturn_t rt5645_irq(int irq, void *data)
-{
-	struct rt5645_priv *rt5645 = data;
-
-	queue_delayed_work(system_power_efficient_wq,
-			   &rt5645->jack_detect_work, msecs_to_jiffies(250));
-
-	return IRQ_HANDLED;
-}
-
-static int rt5645_button_detect(struct snd_soc_codec *codec)
-{
-	int btn_type, val;
-
-	val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
-	pr_debug("val=0x%x\n", val);
-	btn_type = val & 0xfff0;
-	snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
-
-	return btn_type;
-}
-
-static int rt5645_irq_detection(struct rt5645_priv *rt5645)
-{
 	int val, btn_type, gpio_state = 0, report = 0;
 
 	if (!rt5645->codec)
-		return -EINVAL;
+		return;
 
 	switch (rt5645->pdata.jd_mode) {
 	case 0: /* Not using rt5645 JD */
@@ -2958,7 +2912,7 @@
 				    report, SND_JACK_HEADPHONE);
 		snd_soc_jack_report(rt5645->mic_jack,
 				    report, SND_JACK_MICROPHONE);
-		return report;
+		return;
 	case 1: /* 2 port */
 		val = snd_soc_read(rt5645->codec, RT5645_A_JD_CTRL1) & 0x0070;
 		break;
@@ -3040,27 +2994,39 @@
 		snd_soc_jack_report(rt5645->btn_jack,
 			report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
 				SND_JACK_BTN_2 | SND_JACK_BTN_3);
+}
 
-	return report;
+static irqreturn_t rt5645_irq(int irq, void *data)
+{
+	struct rt5645_priv *rt5645 = data;
+
+	queue_delayed_work(system_power_efficient_wq,
+			   &rt5645->jack_detect_work, msecs_to_jiffies(250));
+
+	return IRQ_HANDLED;
 }
 
 static int rt5645_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
 
 	rt5645->codec = codec;
 
 	switch (rt5645->codec_type) {
 	case CODEC_TYPE_RT5645:
-		snd_soc_dapm_add_routes(&codec->dapm,
+		snd_soc_dapm_new_controls(dapm,
+			rt5645_specific_dapm_widgets,
+			ARRAY_SIZE(rt5645_specific_dapm_widgets));
+		snd_soc_dapm_add_routes(dapm,
 			rt5645_specific_dapm_routes,
 			ARRAY_SIZE(rt5645_specific_dapm_routes));
 		break;
 	case CODEC_TYPE_RT5650:
-		snd_soc_dapm_new_controls(&codec->dapm,
+		snd_soc_dapm_new_controls(dapm,
 			rt5650_specific_dapm_widgets,
 			ARRAY_SIZE(rt5650_specific_dapm_widgets));
-		snd_soc_dapm_add_routes(&codec->dapm,
+		snd_soc_dapm_add_routes(dapm,
 			rt5650_specific_dapm_routes,
 			ARRAY_SIZE(rt5650_specific_dapm_routes));
 		break;
@@ -3070,9 +3036,9 @@
 
 	/* for JD function */
 	if (rt5645->pdata.jd_mode) {
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "JD Power");
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+		snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return 0;
@@ -3113,7 +3079,7 @@
 #define RT5645_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5645_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5645_aif_dai_ops = {
 	.hw_params = rt5645_hw_params,
 	.set_fmt = rt5645_set_dai_fmt,
 	.set_sysclk = rt5645_set_dai_sysclk,
@@ -3224,7 +3190,7 @@
 	return 1;
 }
 
-static struct dmi_system_id dmi_platform_intel_braswell[] = {
+static const struct dmi_system_id dmi_platform_intel_braswell[] = {
 	{
 		.ident = "Intel Strago",
 		.callback = strago_quirk_cb,
@@ -3232,6 +3198,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
 		},
 	},
+	{
+		.ident = "Google Celes",
+		.callback = strago_quirk_cb,
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+		},
+	},
 	{ }
 };
 
@@ -3254,7 +3227,7 @@
 {
 	struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
 	struct rt5645_priv *rt5645;
-	int ret;
+	int ret, i;
 	unsigned int val;
 
 	rt5645 = devm_kzalloc(&i2c->dev, sizeof(struct rt5645_priv),
@@ -3288,6 +3261,24 @@
 		return ret;
 	}
 
+	for (i = 0; i < ARRAY_SIZE(rt5645->supplies); i++)
+		rt5645->supplies[i].supply = rt5645_supply_names[i];
+
+	ret = devm_regulator_bulk_get(&i2c->dev,
+				      ARRAY_SIZE(rt5645->supplies),
+				      rt5645->supplies);
+	if (ret) {
+		dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(rt5645->supplies),
+				    rt5645->supplies);
+	if (ret) {
+		dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
 	regmap_read(rt5645->regmap, RT5645_VENDOR_ID2, &val);
 
 	switch (val) {
@@ -3299,16 +3290,10 @@
 		break;
 	default:
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5645 or rt5650\n",
+			"Device with ID register %#x is not rt5645 or rt5650\n",
 			val);
-		return -ENODEV;
-	}
-
-	if (rt5645->codec_type == CODEC_TYPE_RT5650) {
-		ret = rt5650_calibration(rt5645);
-
-		if (ret < 0)
-			pr_err("calibration failed!\n");
+		ret = -ENODEV;
+		goto err_enable;
 	}
 
 	regmap_write(rt5645->regmap, RT5645_RESET, 0);
@@ -3398,8 +3383,6 @@
 		regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
 				   RT5645_IRQ_CLK_GATE_CTRL,
 				   RT5645_IRQ_CLK_GATE_CTRL);
-		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
-				   RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
 		regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
 				   RT5645_IRQ_CLK_INT, RT5645_IRQ_CLK_INT);
 		regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
@@ -3439,12 +3422,25 @@
 		ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
 			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
 			| IRQF_ONESHOT, "rt5645", rt5645);
-		if (ret)
+		if (ret) {
 			dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+			goto err_enable;
+		}
 	}
 
-	return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
-				      rt5645_dai, ARRAY_SIZE(rt5645_dai));
+	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
+				     rt5645_dai, ARRAY_SIZE(rt5645_dai));
+	if (ret)
+		goto err_irq;
+
+	return 0;
+
+err_irq:
+	if (rt5645->i2c->irq)
+		free_irq(rt5645->i2c->irq, rt5645);
+err_enable:
+	regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
+	return ret;
 }
 
 static int rt5645_i2c_remove(struct i2c_client *i2c)
@@ -3457,18 +3453,31 @@
 	cancel_delayed_work_sync(&rt5645->jack_detect_work);
 
 	snd_soc_unregister_codec(&i2c->dev);
+	regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
 
 	return 0;
 }
 
+static void rt5645_i2c_shutdown(struct i2c_client *i2c)
+{
+	struct rt5645_priv *rt5645 = i2c_get_clientdata(i2c);
+
+	regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
+		RT5645_RING2_SLEEVE_GND, RT5645_RING2_SLEEVE_GND);
+	regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, RT5645_CBJ_MN_JD,
+		RT5645_CBJ_MN_JD);
+	regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, RT5645_CBJ_BST1_EN,
+		0);
+}
+
 static struct i2c_driver rt5645_i2c_driver = {
 	.driver = {
 		.name = "rt5645",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5645_acpi_match),
 	},
 	.probe = rt5645_i2c_probe,
-	.remove   = rt5645_i2c_remove,
+	.remove = rt5645_i2c_remove,
+	.shutdown = rt5645_i2c_shutdown,
 	.id_table = rt5645_i2c_id,
 };
 module_i2c_driver(rt5645_i2c_driver);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 278bb9f..0e4cfc6 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2115,6 +2115,7 @@
 #define RT5645_JD_PSV_MODE			(0x1 << 12)
 #define RT5645_IRQ_CLK_GATE_CTRL		(0x1 << 11)
 #define RT5645_MICINDET_MANU			(0x1 << 7)
+#define RT5645_RING2_SLEEVE_GND			(0x1 << 5)
 
 /* Vendor ID (0xfd) */
 #define RT5645_VER_C				0x2
@@ -2181,32 +2182,6 @@
 int rt5645_sel_asrc_clk_src(struct snd_soc_codec *codec,
 		unsigned int filter_mask, unsigned int clk_src);
 
-struct rt5645_priv {
-	struct snd_soc_codec *codec;
-	struct rt5645_platform_data pdata;
-	struct regmap *regmap;
-	struct i2c_client *i2c;
-	struct gpio_desc *gpiod_hp_det;
-	struct snd_soc_jack *hp_jack;
-	struct snd_soc_jack *mic_jack;
-	struct snd_soc_jack *btn_jack;
-	struct delayed_work jack_detect_work;
-
-	int codec_type;
-	int sysclk;
-	int sysclk_src;
-	int lrck[RT5645_AIFS];
-	int bclk[RT5645_AIFS];
-	int master[RT5645_AIFS];
-
-	int pll_src;
-	int pll_in;
-	int pll_out;
-
-	int jack_type;
-	bool en_button_func;
-};
-
 int rt5645_set_jack_detect(struct snd_soc_codec *codec,
 	struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
 	struct snd_soc_jack *btn_jack);
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index a3506e1..1d40318 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -46,7 +46,7 @@
 	  .window_len = 0x1, },
 };
 
-static struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5651_PR_BASE + 0x3d,	0x3e00},
 };
 
@@ -292,16 +292,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5651_data_select[] = {
@@ -378,10 +377,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5651->sysclk);
-
+	rate = rt5651->sysclk / rl6231_get_pre_div(rt5651->regmap,
+		RT5651_ADDA_CLK1, RT5651_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -1769,7 +1769,7 @@
 	regmap_read(rt5651->regmap, RT5651_DEVICE_ID, &ret);
 	if (ret != RT5651_DEVICE_ID_VALUE) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5651\n", ret);
+			"Device with ID register %#x is not rt5651\n", ret);
 		return -ENODEV;
 	}
 
@@ -1806,7 +1806,6 @@
 static struct i2c_driver rt5651_i2c_driver = {
 	.driver = {
 		.name = "rt5651",
-		.owner = THIS_MODULE,
 	},
 	.probe = rt5651_i2c_probe,
 	.remove   = rt5651_i2c_remove,
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index a9123d4..49a9e70 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -51,7 +51,7 @@
 	  .window_len = 0x1, },
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{ RT5670_PR_BASE + 0x14, 0x9a8a },
 	{ RT5670_PR_BASE + 0x38, 0x3ba1 },
 	{ RT5670_PR_BASE + 0x3d, 0x3640 },
@@ -592,16 +592,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5670_data_select[] = {
@@ -683,10 +682,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5670->sysclk);
-
+	rate = rt5670->sysclk / rl6231_get_pre_div(rt5670->regmap,
+		RT5670_ADDA_CLK1, RT5670_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -2720,7 +2720,7 @@
 #define RT5670_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5670_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5670_aif_dai_ops = {
 	.hw_params = rt5670_hw_params,
 	.set_fmt = rt5670_set_dai_fmt,
 	.set_sysclk = rt5670_set_dai_sysclk,
@@ -2863,7 +2863,7 @@
 	regmap_read(rt5670->regmap, RT5670_VENDOR_ID2, &val);
 	if (val != RT5670_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5670/72\n", val);
+			"Device with ID register %#x is not rt5670/72\n", val);
 		return -ENODEV;
 	}
 
@@ -3043,7 +3043,6 @@
 static struct i2c_driver rt5670_i2c_driver = {
 	.driver = {
 		.name = "rt5670",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5670_acpi_match),
 	},
 	.probe = rt5670_i2c_probe,
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index ef6348c..3505aaf 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -31,84 +31,197 @@
 
 #include "rt5677-spi.h"
 
-static struct spi_device *g_spi;
+#define RT5677_SPI_BURST_LEN	240
+#define RT5677_SPI_HEADER	5
+#define RT5677_SPI_FREQ		6000000
 
-/**
- * rt5677_spi_write - Write data to SPI.
- * @txbuf: Data Buffer for writing.
- * @len: Data length.
- *
- *
- * Returns true for success.
+/* The AddressPhase and DataPhase of SPI commands are MSB first on the wire.
+ * DataPhase word size of 16-bit commands is 2 bytes.
+ * DataPhase word size of 32-bit commands is 4 bytes.
+ * DataPhase word size of burst commands is 8 bytes.
+ * The DSP CPU is little-endian.
  */
-int rt5677_spi_write(u8 *txbuf, size_t len)
+#define RT5677_SPI_WRITE_BURST	0x5
+#define RT5677_SPI_READ_BURST	0x4
+#define RT5677_SPI_WRITE_32	0x3
+#define RT5677_SPI_READ_32	0x2
+#define RT5677_SPI_WRITE_16	0x1
+#define RT5677_SPI_READ_16	0x0
+
+static struct spi_device *g_spi;
+static DEFINE_MUTEX(spi_mutex);
+
+/* Select a suitable transfer command for the next transfer to ensure
+ * the transfer address is always naturally aligned while minimizing
+ * the total number of transfers required.
+ *
+ * 3 transfer commands are available:
+ * RT5677_SPI_READ/WRITE_16:	Transfer 2 bytes
+ * RT5677_SPI_READ/WRITE_32:	Transfer 4 bytes
+ * RT5677_SPI_READ/WRITE_BURST:	Transfer any multiples of 8 bytes
+ *
+ * For example, reading 260 bytes at 0x60030002 uses the following commands:
+ * 0x60030002 RT5677_SPI_READ_16	2 bytes
+ * 0x60030004 RT5677_SPI_READ_32	4 bytes
+ * 0x60030008 RT5677_SPI_READ_BURST	240 bytes
+ * 0x600300F8 RT5677_SPI_READ_BURST	8 bytes
+ * 0x60030100 RT5677_SPI_READ_32	4 bytes
+ * 0x60030104 RT5677_SPI_READ_16	2 bytes
+ *
+ * Input:
+ * @read: true for read commands; false for write commands
+ * @align: alignment of the next transfer address
+ * @remain: number of bytes remaining to transfer
+ *
+ * Output:
+ * @len: number of bytes to transfer with the selected command
+ * Returns the selected command
+ */
+static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
 {
-	int status;
+	u8 cmd;
 
-	status = spi_write(g_spi, txbuf, len);
+	if (align == 2 || align == 6 || remain == 2) {
+		cmd = RT5677_SPI_READ_16;
+		*len = 2;
+	} else if (align == 4 || remain <= 6) {
+		cmd = RT5677_SPI_READ_32;
+		*len = 4;
+	} else {
+		cmd = RT5677_SPI_READ_BURST;
+		*len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
+	}
+	return read ? cmd : cmd + 1;
+}
 
-	if (status)
-		dev_err(&g_spi->dev, "rt5677_spi_write error %d\n", status);
+/* Copy dstlen bytes from src to dst, while reversing byte order for each word.
+ * If srclen < dstlen, zeros are padded.
+ */
+static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
+{
+	u32 w, i, si;
+	u32 word_size = min_t(u32, dstlen, 8);
 
+	for (w = 0; w < dstlen; w += word_size) {
+		for (i = 0; i < word_size; i++) {
+			si = w + word_size - i - 1;
+			dst[w + i] = si < srclen ? src[si] : 0;
+		}
+	}
+}
+
+/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
+int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
+{
+	u32 offset;
+	int status = 0;
+	struct spi_transfer t[2];
+	struct spi_message m;
+	/* +4 bytes is for the DummyPhase following the AddressPhase */
+	u8 header[RT5677_SPI_HEADER + 4];
+	u8 body[RT5677_SPI_BURST_LEN];
+	u8 spi_cmd;
+	u8 *cb = rxbuf;
+
+	if (!g_spi)
+		return -ENODEV;
+
+	if ((addr & 1) || (len & 1)) {
+		dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
+		return -EACCES;
+	}
+
+	memset(t, 0, sizeof(t));
+	t[0].tx_buf = header;
+	t[0].len = sizeof(header);
+	t[0].speed_hz = RT5677_SPI_FREQ;
+	t[1].rx_buf = body;
+	t[1].speed_hz = RT5677_SPI_FREQ;
+	spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t));
+
+	for (offset = 0; offset < len; offset += t[1].len) {
+		spi_cmd = rt5677_spi_select_cmd(true, (addr + offset) & 7,
+				len - offset, &t[1].len);
+
+		/* Construct SPI message header */
+		header[0] = spi_cmd;
+		header[1] = ((addr + offset) & 0xff000000) >> 24;
+		header[2] = ((addr + offset) & 0x00ff0000) >> 16;
+		header[3] = ((addr + offset) & 0x0000ff00) >> 8;
+		header[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+		mutex_lock(&spi_mutex);
+		status |= spi_sync(g_spi, &m);
+		mutex_unlock(&spi_mutex);
+
+		/* Copy data back to caller buffer */
+		rt5677_spi_reverse(cb + offset, t[1].len, body, t[1].len);
+	}
+	return status;
+}
+EXPORT_SYMBOL_GPL(rt5677_spi_read);
+
+/* Write DSP address space using SPI. addr has to be 2-byte aligned.
+ * If len is not 2-byte aligned, an extra byte of zero is written at the end
+ * as padding.
+ */
+int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
+{
+	u32 offset, len_with_pad = len;
+	int status = 0;
+	struct spi_transfer t;
+	struct spi_message m;
+	/* +1 byte is for the DummyPhase following the DataPhase */
+	u8 buf[RT5677_SPI_HEADER + RT5677_SPI_BURST_LEN + 1];
+	u8 *body = buf + RT5677_SPI_HEADER;
+	u8 spi_cmd;
+	const u8 *cb = txbuf;
+
+	if (!g_spi)
+		return -ENODEV;
+
+	if (addr & 1) {
+		dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
+		return -EACCES;
+	}
+
+	if (len & 1)
+		len_with_pad = len + 1;
+
+	memset(&t, 0, sizeof(t));
+	t.tx_buf = buf;
+	t.speed_hz = RT5677_SPI_FREQ;
+	spi_message_init_with_transfers(&m, &t, 1);
+
+	for (offset = 0; offset < len_with_pad;) {
+		spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
+				len_with_pad - offset, &t.len);
+
+		/* Construct SPI message header */
+		buf[0] = spi_cmd;
+		buf[1] = ((addr + offset) & 0xff000000) >> 24;
+		buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+		buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+		buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+		/* Fetch data from caller buffer */
+		rt5677_spi_reverse(body, t.len, cb + offset, len - offset);
+		offset += t.len;
+		t.len += RT5677_SPI_HEADER + 1;
+
+		mutex_lock(&spi_mutex);
+		status |= spi_sync(g_spi, &m);
+		mutex_unlock(&spi_mutex);
+	}
 	return status;
 }
 EXPORT_SYMBOL_GPL(rt5677_spi_write);
 
-/**
- * rt5677_spi_burst_write - Write data to SPI by rt5677 dsp memory address.
- * @addr: Start address.
- * @txbuf: Data Buffer for writng.
- * @len: Data length, it must be a multiple of 8.
- *
- *
- * Returns true for success.
- */
-int rt5677_spi_burst_write(u32 addr, const struct firmware *fw)
+int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw)
 {
-	u8 spi_cmd = RT5677_SPI_CMD_BURST_WRITE;
-	u8 *write_buf;
-	unsigned int i, end, offset = 0;
-
-	write_buf = kmalloc(RT5677_SPI_BUF_LEN + 6, GFP_KERNEL);
-
-	if (write_buf == NULL)
-		return -ENOMEM;
-
-	while (offset < fw->size) {
-		if (offset + RT5677_SPI_BUF_LEN <= fw->size)
-			end = RT5677_SPI_BUF_LEN;
-		else
-			end = fw->size % RT5677_SPI_BUF_LEN;
-
-		write_buf[0] = spi_cmd;
-		write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
-		write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
-		write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
-		write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
-
-		for (i = 0; i < end; i += 8) {
-			write_buf[i + 12] = fw->data[offset + i + 0];
-			write_buf[i + 11] = fw->data[offset + i + 1];
-			write_buf[i + 10] = fw->data[offset + i + 2];
-			write_buf[i +  9] = fw->data[offset + i + 3];
-			write_buf[i +  8] = fw->data[offset + i + 4];
-			write_buf[i +  7] = fw->data[offset + i + 5];
-			write_buf[i +  6] = fw->data[offset + i + 6];
-			write_buf[i +  5] = fw->data[offset + i + 7];
-		}
-
-		write_buf[end + 5] = spi_cmd;
-
-		rt5677_spi_write(write_buf, end + 6);
-
-		offset += RT5677_SPI_BUF_LEN;
-	}
-
-	kfree(write_buf);
-
-	return 0;
+	return rt5677_spi_write(addr, fw->data, fw->size);
 }
-EXPORT_SYMBOL_GPL(rt5677_spi_burst_write);
+EXPORT_SYMBOL_GPL(rt5677_spi_write_firmware);
 
 static int rt5677_spi_probe(struct spi_device *spi)
 {
diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h
index ec41b2b..662db16 100644
--- a/sound/soc/codecs/rt5677-spi.h
+++ b/sound/soc/codecs/rt5677-spi.h
@@ -12,10 +12,8 @@
 #ifndef __RT5677_SPI_H__
 #define __RT5677_SPI_H__
 
-#define RT5677_SPI_BUF_LEN 240
-#define RT5677_SPI_CMD_BURST_WRITE 0x05
-
-int rt5677_spi_write(u8 *txbuf, size_t len);
-int rt5677_spi_burst_write(u32 addr, const struct firmware *fw);
+int rt5677_spi_read(u32 addr, void *rxbuf, size_t len);
+int rt5677_spi_write(u32 addr, const void *txbuf, size_t len);
+int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw);
 
 #endif /* __RT5677_SPI_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 31d969a..b4cd7e3 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -15,13 +15,12 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
-#include <linux/of_gpio.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/firmware.h>
-#include <linux/gpio.h>
+#include <linux/property.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -54,7 +53,7 @@
 	},
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5677_ASRC_12,	0x0018},
 	{RT5677_PR_BASE + 0x3d,	0x364d},
 	{RT5677_PR_BASE + 0x17,	0x4fc0},
@@ -746,14 +745,14 @@
 		ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1,
 			codec->dev);
 		if (ret == 0) {
-			rt5677_spi_burst_write(0x50000000, rt5677->fw1);
+			rt5677_spi_write_firmware(0x50000000, rt5677->fw1);
 			release_firmware(rt5677->fw1);
 		}
 
 		ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2,
 			codec->dev);
 		if (ret == 0) {
-			rt5677_spi_burst_write(0x60000000, rt5677->fw2);
+			rt5677_spi_write_firmware(0x60000000, rt5677->fw2);
 			release_firmware(rt5677->fw2);
 		}
 
@@ -789,16 +788,15 @@
 static const DECLARE_TLV_DB_SCALE(st_vol_tlv, -4650, 150, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
 		struct snd_ctl_elem_value *ucontrol)
@@ -917,8 +915,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
-	int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
+	int idx, rate;
 
+	rate = rt5677->sysclk / rl6231_get_pre_div(rt5677->regmap,
+		RT5677_CLK_TREE_CTRL1, RT5677_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -4764,10 +4765,8 @@
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
 	regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
-	if (gpio_is_valid(rt5677->pow_ldo2))
-		gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
-	if (gpio_is_valid(rt5677->reset_pin))
-		gpio_set_value_cansleep(rt5677->reset_pin, 0);
+	gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
+	gpiod_set_value_cansleep(rt5677->reset_pin, 0);
 
 	return 0;
 }
@@ -4781,10 +4780,8 @@
 		regcache_cache_only(rt5677->regmap, true);
 		regcache_mark_dirty(rt5677->regmap);
 
-		if (gpio_is_valid(rt5677->pow_ldo2))
-			gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
-		if (gpio_is_valid(rt5677->reset_pin))
-			gpio_set_value_cansleep(rt5677->reset_pin, 0);
+		gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
+		gpiod_set_value_cansleep(rt5677->reset_pin, 0);
 	}
 
 	return 0;
@@ -4795,12 +4792,9 @@
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
 	if (!rt5677->dsp_vad_en) {
-		if (gpio_is_valid(rt5677->pow_ldo2))
-			gpio_set_value_cansleep(rt5677->pow_ldo2, 1);
-		if (gpio_is_valid(rt5677->reset_pin))
-			gpio_set_value_cansleep(rt5677->reset_pin, 1);
-		if (gpio_is_valid(rt5677->pow_ldo2) ||
-		    gpio_is_valid(rt5677->reset_pin))
+		gpiod_set_value_cansleep(rt5677->pow_ldo2, 1);
+		gpiod_set_value_cansleep(rt5677->reset_pin, 1);
+		if (rt5677->pow_ldo2 || rt5677->reset_pin)
 			msleep(10);
 
 		regcache_cache_only(rt5677->regmap, false);
@@ -4863,7 +4857,7 @@
 #define RT5677_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5677_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5677_aif_dai_ops = {
 	.hw_params = rt5677_hw_params,
 	.set_fmt = rt5677_set_dai_fmt,
 	.set_sysclk = rt5677_set_dai_sysclk,
@@ -5024,45 +5018,29 @@
 };
 MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
 
-static int rt5677_parse_dt(struct rt5677_priv *rt5677, struct device_node *np)
+static void rt5677_read_device_properties(struct rt5677_priv *rt5677,
+		struct device *dev)
 {
-	rt5677->pdata.in1_diff = of_property_read_bool(np,
-					"realtek,in1-differential");
-	rt5677->pdata.in2_diff = of_property_read_bool(np,
-					"realtek,in2-differential");
-	rt5677->pdata.lout1_diff = of_property_read_bool(np,
-					"realtek,lout1-differential");
-	rt5677->pdata.lout2_diff = of_property_read_bool(np,
-					"realtek,lout2-differential");
-	rt5677->pdata.lout3_diff = of_property_read_bool(np,
-					"realtek,lout3-differential");
+	rt5677->pdata.in1_diff = device_property_read_bool(dev,
+			"realtek,in1-differential");
+	rt5677->pdata.in2_diff = device_property_read_bool(dev,
+			"realtek,in2-differential");
+	rt5677->pdata.lout1_diff = device_property_read_bool(dev,
+			"realtek,lout1-differential");
+	rt5677->pdata.lout2_diff = device_property_read_bool(dev,
+			"realtek,lout2-differential");
+	rt5677->pdata.lout3_diff = device_property_read_bool(dev,
+			"realtek,lout3-differential");
 
-	rt5677->pow_ldo2 = of_get_named_gpio(np,
-					"realtek,pow-ldo2-gpio", 0);
-	rt5677->reset_pin = of_get_named_gpio(np,
-					"realtek,reset-gpio", 0);
+	device_property_read_u8_array(dev, "realtek,gpio-config",
+			rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
 
-	/*
-	 * POW_LDO2 is optional (it may be statically tied on the board).
-	 * -ENOENT means that the property doesn't exist, i.e. there is no
-	 * GPIO, so is not an error. Any other error code means the property
-	 * exists, but could not be parsed.
-	 */
-	if (!gpio_is_valid(rt5677->pow_ldo2) &&
-			(rt5677->pow_ldo2 != -ENOENT))
-		return rt5677->pow_ldo2;
-	if (!gpio_is_valid(rt5677->reset_pin) &&
-			(rt5677->reset_pin != -ENOENT))
-		return rt5677->reset_pin;
-
-	of_property_read_u8_array(np, "realtek,gpio-config",
-		rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
-
-	of_property_read_u32(np, "realtek,jd1-gpio", &rt5677->pdata.jd1_gpio);
-	of_property_read_u32(np, "realtek,jd2-gpio", &rt5677->pdata.jd2_gpio);
-	of_property_read_u32(np, "realtek,jd3-gpio", &rt5677->pdata.jd3_gpio);
-
-	return 0;
+	device_property_read_u32(dev, "realtek,jd1-gpio",
+			&rt5677->pdata.jd1_gpio);
+	device_property_read_u32(dev, "realtek,jd2-gpio",
+			&rt5677->pdata.jd2_gpio);
+	device_property_read_u32(dev, "realtek,jd3-gpio",
+			&rt5677->pdata.jd3_gpio);
 }
 
 static struct regmap_irq rt5677_irqs[] = {
@@ -5145,43 +5123,29 @@
 
 	if (pdata)
 		rt5677->pdata = *pdata;
+	else
+		rt5677_read_device_properties(rt5677, &i2c->dev);
 
-	if (i2c->dev.of_node) {
-		ret = rt5677_parse_dt(rt5677, i2c->dev.of_node);
-		if (ret) {
-			dev_err(&i2c->dev, "Failed to parse device tree: %d\n",
-				ret);
-			return ret;
-		}
-	} else {
-		rt5677->pow_ldo2 = -EINVAL;
-		rt5677->reset_pin = -EINVAL;
+	/* pow-ldo2 and reset are optional. The codec pins may be statically
+	 * connected on the board without gpios. If the gpio device property
+	 * isn't specified, devm_gpiod_get_optional returns NULL.
+	 */
+	rt5677->pow_ldo2 = devm_gpiod_get_optional(&i2c->dev,
+			"realtek,pow-ldo2", GPIOD_OUT_HIGH);
+	if (IS_ERR(rt5677->pow_ldo2)) {
+		ret = PTR_ERR(rt5677->pow_ldo2);
+		dev_err(&i2c->dev, "Failed to request POW_LDO2: %d\n", ret);
+		return ret;
+	}
+	rt5677->reset_pin = devm_gpiod_get_optional(&i2c->dev,
+			"realtek,reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(rt5677->reset_pin)) {
+		ret = PTR_ERR(rt5677->reset_pin);
+		dev_err(&i2c->dev, "Failed to request RESET: %d\n", ret);
+		return ret;
 	}
 
-	if (gpio_is_valid(rt5677->pow_ldo2)) {
-		ret = devm_gpio_request_one(&i2c->dev, rt5677->pow_ldo2,
-					    GPIOF_OUT_INIT_HIGH,
-					    "RT5677 POW_LDO2");
-		if (ret < 0) {
-			dev_err(&i2c->dev, "Failed to request POW_LDO2 %d: %d\n",
-				rt5677->pow_ldo2, ret);
-			return ret;
-		}
-	}
-
-	if (gpio_is_valid(rt5677->reset_pin)) {
-		ret = devm_gpio_request_one(&i2c->dev, rt5677->reset_pin,
-					    GPIOF_OUT_INIT_HIGH,
-					    "RT5677 RESET");
-		if (ret < 0) {
-			dev_err(&i2c->dev, "Failed to request RESET %d: %d\n",
-				rt5677->reset_pin, ret);
-			return ret;
-		}
-	}
-
-	if (gpio_is_valid(rt5677->pow_ldo2) ||
-	    gpio_is_valid(rt5677->reset_pin)) {
+	if (rt5677->pow_ldo2 || rt5677->reset_pin) {
 		/* Wait a while until I2C bus becomes available. The datasheet
 		 * does not specify the exact we should wait but startup
 		 * sequence mentiones at least a few milliseconds.
@@ -5209,7 +5173,7 @@
 	regmap_read(rt5677->regmap, RT5677_VENDOR_ID2, &val);
 	if (val != RT5677_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5677\n", val);
+			"Device with ID register %#x is not rt5677\n", val);
 		return -ENODEV;
 	}
 
@@ -5273,7 +5237,6 @@
 static struct i2c_driver rt5677_i2c_driver = {
 	.driver = {
 		.name = "rt5677",
-		.owner = THIS_MODULE,
 	},
 	.probe = rt5677_i2c_probe,
 	.remove   = rt5677_i2c_remove,
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index 7eca38a..d46855a 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -14,6 +14,7 @@
 
 #include <sound/rt5677.h>
 #include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
 
 /* Info */
 #define RT5677_RESET				0x00
@@ -1775,8 +1776,8 @@
 	int pll_src;
 	int pll_in;
 	int pll_out;
-	int pow_ldo2; /* POW_LDO2 pin */
-	int reset_pin; /* RESET pin */
+	struct gpio_desc *pow_ldo2; /* POW_LDO2 pin */
+	struct gpio_desc *reset_pin; /* RESET pin */
 	enum rt5677_type type;
 #ifdef CONFIG_GPIOLIB
 	struct gpio_chip gpio_chip;
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e673f6c..bfda25e 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -406,11 +406,10 @@
 static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
 
 /* tlv for mic gain, 0db 20db 30db 40db */
-static const unsigned int mic_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(mic_gain_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
-	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
-};
+	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0)
+);
 
 /* tlv for hp volume, -51.5db to 12.0db, step .5db */
 static const DECLARE_TLV_DB_SCALE(headphone_volume, -5150, 50, 0);
@@ -1601,7 +1600,6 @@
 static struct i2c_driver sgtl5000_i2c_driver = {
 	.driver = {
 		   .name = "sgtl5000",
-		   .owner = THIS_MODULE,
 		   .of_match_table = sgtl5000_dt_ids,
 		   },
 	.probe = sgtl5000_i2c_probe,
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 3e72964..a8402d0 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -208,7 +208,7 @@
 	return err;
 }
 
-static struct snd_soc_dai_ops si476x_dai_ops = {
+static const struct snd_soc_dai_ops si476x_dai_ops = {
 	.hw_params	= si476x_codec_hw_params,
 	.set_fmt	= si476x_codec_set_dai_fmt,
 };
diff --git a/sound/soc/codecs/sirf-audio-codec.c b/sound/soc/codecs/sirf-audio-codec.c
index 29cb442..6bfd25c 100644
--- a/sound/soc/codecs/sirf-audio-codec.c
+++ b/sound/soc/codecs/sirf-audio-codec.c
@@ -370,11 +370,11 @@
 	return 0;
 }
 
-struct snd_soc_dai_ops sirf_audio_codec_dai_ops = {
+static const struct snd_soc_dai_ops sirf_audio_codec_dai_ops = {
 	.trigger = sirf_audio_codec_trigger,
 };
 
-struct snd_soc_dai_driver sirf_audio_codec_dai = {
+static struct snd_soc_dai_driver sirf_audio_codec_dai = {
 	.name = "sirf-audio-codec",
 	.playback = {
 		.stream_name = "Playback",
diff --git a/sound/soc/codecs/ssm2518.c b/sound/soc/codecs/ssm2518.c
index f30de76..ddb0203 100644
--- a/sound/soc/codecs/ssm2518.c
+++ b/sound/soc/codecs/ssm2518.c
@@ -806,6 +806,14 @@
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id ssm2518_dt_ids[] = {
+	{ .compatible = "adi,ssm2518", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ssm2518_dt_ids);
+#endif
+
 static const struct i2c_device_id ssm2518_i2c_ids[] = {
 	{ "ssm2518", 0 },
 	{ }
@@ -815,7 +823,7 @@
 static struct i2c_driver ssm2518_driver = {
 	.driver = {
 		.name = "ssm2518",
-		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(ssm2518_dt_ids),
 	},
 	.probe = ssm2518_i2c_probe,
 	.remove = ssm2518_i2c_remove,
diff --git a/sound/soc/codecs/ssm2602-i2c.c b/sound/soc/codecs/ssm2602-i2c.c
index 0d9779d..173ba85 100644
--- a/sound/soc/codecs/ssm2602-i2c.c
+++ b/sound/soc/codecs/ssm2602-i2c.c
@@ -52,7 +52,6 @@
 static struct i2c_driver ssm2602_i2c_driver = {
 	.driver = {
 		.name = "ssm2602",
-		.owner = THIS_MODULE,
 		.of_match_table = ssm2602_of_match,
 	},
 	.probe = ssm2602_i2c_probe,
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 69a773a..4452fea 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -75,11 +75,10 @@
 			ssm2602_deemph),
 };
 
-static const unsigned int ssm260x_outmix_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(ssm260x_outmix_tlv,
 	0, 47, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 0),
-	48, 127, TLV_DB_SCALE_ITEM(-7400, 100, 0),
-};
+	48, 127, TLV_DB_SCALE_ITEM(-7400, 100, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(ssm260x_inpga_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(ssm260x_sidetone_tlv, -1500, 300, 0);
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 84a4f5a..e619d56 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -10,6 +10,7 @@
  * Licensed under the GPL-2.
  */
 
+#include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
@@ -173,6 +174,12 @@
 	SND_SOC_DAPM_SWITCH("Amplifier Boost", SSM4567_REG_POWER_CTRL, 3, 1,
 		&ssm4567_amplifier_boost_control),
 
+	SND_SOC_DAPM_SIGGEN("Sense"),
+
+	SND_SOC_DAPM_PGA("Current Sense", SSM4567_REG_POWER_CTRL, 4, 1, NULL, 0),
+	SND_SOC_DAPM_PGA("Voltage Sense", SSM4567_REG_POWER_CTRL, 5, 1, NULL, 0),
+	SND_SOC_DAPM_PGA("VBAT Sense", SSM4567_REG_POWER_CTRL, 6, 1, NULL, 0),
+
 	SND_SOC_DAPM_OUTPUT("OUT"),
 };
 
@@ -180,6 +187,13 @@
 	{ "OUT", NULL, "Amplifier Boost" },
 	{ "Amplifier Boost", "Switch", "DAC" },
 	{ "OUT", NULL, "DAC" },
+
+	{ "Current Sense", NULL, "Sense" },
+	{ "Voltage Sense", NULL, "Sense" },
+	{ "VBAT Sense", NULL, "Sense" },
+	{ "Capture Sense", NULL, "Current Sense" },
+	{ "Capture Sense", NULL, "Voltage Sense" },
+	{ "Capture Sense", NULL, "VBAT Sense" },
 };
 
 static int ssm4567_hw_params(struct snd_pcm_substream *substream,
@@ -387,6 +401,14 @@
 		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
 			SNDRV_PCM_FMTBIT_S32,
 	},
+	.capture = {
+		.stream_name = "Capture Sense",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+			SNDRV_PCM_FMTBIT_S32,
+	},
 	.ops = &ssm4567_dai_ops,
 };
 
@@ -456,10 +478,20 @@
 };
 MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids);
 
+#ifdef CONFIG_ACPI
+
+static const struct acpi_device_id ssm4567_acpi_match[] = {
+	{ "INT343B", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, ssm4567_acpi_match);
+
+#endif
+
 static struct i2c_driver ssm4567_driver = {
 	.driver = {
 		.name = "ssm4567",
-		.owner = THIS_MODULE,
+		.acpi_match_table = ACPI_PTR(ssm4567_acpi_match),
 	},
 	.probe = ssm4567_i2c_probe,
 	.remove = ssm4567_i2c_remove,
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 60eff36..a9844b2 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -1144,7 +1144,6 @@
 static struct i2c_driver sta32x_i2c_driver = {
 	.driver = {
 		.name = "sta32x",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(st32x_dt_ids),
 	},
 	.probe =    sta32x_i2c_probe,
diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
index bd819a3..33a4612 100644
--- a/sound/soc/codecs/sta350.c
+++ b/sound/soc/codecs/sta350.c
@@ -1264,7 +1264,6 @@
 static struct i2c_driver sta350_i2c_driver = {
 	.driver = {
 		.name = "sta350",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(st350_dt_ids),
 	},
 	.probe =    sta350_i2c_probe,
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index 4f70378..2cdaca9 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -339,9 +339,6 @@
 	struct sta529 *sta529;
 	int ret;
 
-	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return -EINVAL;
-
 	sta529 = devm_kzalloc(&i2c->dev, sizeof(struct sta529), GFP_KERNEL);
 	if (!sta529)
 		return -ENOMEM;
@@ -379,7 +376,6 @@
 static struct i2c_driver sta529_i2c_driver = {
 	.driver = {
 		.name = "sta529",
-		.owner = THIS_MODULE,
 	},
 	.probe		= sta529_i2c_probe,
 	.remove		= sta529_i2c_remove,
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index ed4cca7..0945c51 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -28,6 +28,9 @@
 
 #include "stac9766.h"
 
+#define STAC9766_VENDOR_ID 0x83847666
+#define STAC9766_VENDOR_ID_MASK 0xffffffff
+
 /*
  * STAC9766 register cache
  */
@@ -239,45 +242,12 @@
 	return 0;
 }
 
-static int stac9766_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(ac97);
-		if (stac9766_ac97_read(codec, 0) == stac9766_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(ac97);
-	if (stac9766_ac97_read(codec, 0) != stac9766_reg[0])
-		return -EIO;
-	return 0;
-}
-
 static int stac9766_codec_resume(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	u16 id, reset;
 
-	reset = 0;
-	/* give the codec an AC97 warm reset to start the link */
-reset:
-	if (reset > 5) {
-		dev_err(codec->dev, "Failed to resume\n");
-		return -EIO;
-	}
-	ac97->bus->ops->warm_reset(ac97);
-	id = soc_ac97_ops->read(ac97, AC97_VENDOR_ID2);
-	if (id != 0x4c13) {
-		stac9766_reset(codec, 0);
-		reset++;
-		goto reset;
-	}
-
-	return 0;
+	return snd_ac97_reset(ac97, true, STAC9766_VENDOR_ID,
+		STAC9766_VENDOR_ID_MASK);
 }
 
 static const struct snd_soc_dai_ops stac9766_dai_ops_analog = {
@@ -330,28 +300,15 @@
 static int stac9766_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
-	int ret = 0;
 
-	ac97 = snd_soc_new_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, STAC9766_VENDOR_ID,
+			STAC9766_VENDOR_ID_MASK);
 	if (IS_ERR(ac97))
 		return PTR_ERR(ac97);
 
 	snd_soc_codec_set_drvdata(codec, ac97);
 
-	/* do a cold reset for the controller and then try
-	 * a warm reset followed by an optional cold reset for codec */
-	stac9766_reset(codec, 0);
-	ret = stac9766_reset(codec, 1);
-	if (ret < 0) {
-		dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-		goto codec_err;
-	}
-
 	return 0;
-
-codec_err:
-	snd_soc_free_ac97_codec(ac97);
-	return ret;
 }
 
 static int stac9766_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
new file mode 100644
index 0000000..160d61a
--- /dev/null
+++ b/sound/soc/codecs/sti-sas.c
@@ -0,0 +1,628 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+/* chipID supported */
+#define CHIPID_STIH416 0
+#define CHIPID_STIH407 1
+
+/* DAC definitions */
+
+/* stih416 DAC registers */
+/* sysconf 2517: Audio-DAC-Control */
+#define STIH416_AUDIO_DAC_CTRL 0x00000814
+/* sysconf 2519: Audio-Gue-Control */
+#define STIH416_AUDIO_GLUE_CTRL 0x0000081C
+
+#define STIH416_DAC_NOT_STANDBY	0x3
+#define STIH416_DAC_SOFTMUTE	0x4
+#define STIH416_DAC_ANA_NOT_PWR	0x5
+#define STIH416_DAC_NOT_PNDBG	0x6
+
+#define STIH416_DAC_NOT_STANDBY_MASK	BIT(STIH416_DAC_NOT_STANDBY)
+#define STIH416_DAC_SOFTMUTE_MASK	BIT(STIH416_DAC_SOFTMUTE)
+#define STIH416_DAC_ANA_NOT_PWR_MASK	BIT(STIH416_DAC_ANA_NOT_PWR)
+#define STIH416_DAC_NOT_PNDBG_MASK	BIT(STIH416_DAC_NOT_PNDBG)
+
+/* stih407 DAC registers */
+/* sysconf 5041: Audio-Gue-Control */
+#define STIH407_AUDIO_GLUE_CTRL 0x000000A4
+/* sysconf 5042: Audio-DAC-Control */
+#define STIH407_AUDIO_DAC_CTRL 0x000000A8
+
+/* DAC definitions */
+#define STIH407_DAC_SOFTMUTE		0x0
+#define STIH407_DAC_STANDBY_ANA		0x1
+#define STIH407_DAC_STANDBY		0x2
+
+#define STIH407_DAC_SOFTMUTE_MASK	BIT(STIH407_DAC_SOFTMUTE)
+#define STIH407_DAC_STANDBY_ANA_MASK    BIT(STIH407_DAC_STANDBY_ANA)
+#define STIH407_DAC_STANDBY_MASK        BIT(STIH407_DAC_STANDBY)
+
+/* SPDIF definitions */
+#define SPDIF_BIPHASE_ENABLE		0x6
+#define SPDIF_BIPHASE_IDLE		0x7
+
+#define SPDIF_BIPHASE_ENABLE_MASK	BIT(SPDIF_BIPHASE_ENABLE)
+#define SPDIF_BIPHASE_IDLE_MASK		BIT(SPDIF_BIPHASE_IDLE)
+
+enum {
+	STI_SAS_DAI_SPDIF_OUT,
+	STI_SAS_DAI_ANALOG_OUT,
+};
+
+static const struct reg_default stih416_sas_reg_defaults[] = {
+	{ STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
+	{ STIH407_AUDIO_DAC_CTRL, 0x000000000 },
+};
+
+static const struct reg_default stih407_sas_reg_defaults[] = {
+	{ STIH416_AUDIO_DAC_CTRL, 0x000000000 },
+	{ STIH416_AUDIO_GLUE_CTRL, 0x00000040 },
+};
+
+struct sti_dac_audio {
+	struct regmap *regmap;
+	struct regmap *virt_regmap;
+	struct regmap_field  **field;
+	struct reset_control *rst;
+	int mclk;
+};
+
+struct sti_spdif_audio {
+	struct regmap *regmap;
+	struct regmap_field  **field;
+	int mclk;
+};
+
+/* device data structure */
+struct sti_sas_dev_data {
+	const int chipid; /* IC version */
+	const struct regmap_config *regmap;
+	const struct snd_soc_dai_ops *dac_ops;  /* DAC function callbacks */
+	const struct snd_soc_dapm_widget *dapm_widgets; /* dapms declaration */
+	const int num_dapm_widgets; /* dapms declaration */
+	const struct snd_soc_dapm_route *dapm_routes; /* route declaration */
+	const int num_dapm_routes; /* route declaration */
+};
+
+/* driver data structure */
+struct sti_sas_data {
+	struct device *dev;
+	const struct sti_sas_dev_data *dev_data;
+	struct sti_dac_audio dac;
+	struct sti_spdif_audio spdif;
+};
+
+/* Read a register from the sysconf reg bank */
+static int sti_sas_read_reg(void *context, unsigned int reg,
+			    unsigned int *value)
+{
+	struct sti_sas_data *drvdata = context;
+	int status;
+	u32 val;
+
+	status = regmap_read(drvdata->dac.regmap, reg, &val);
+	*value = (unsigned int)val;
+
+	return status;
+}
+
+/* Read a register from the sysconf reg bank */
+static int sti_sas_write_reg(void *context, unsigned int reg,
+			     unsigned int value)
+{
+	struct sti_sas_data *drvdata = context;
+	int status;
+
+	status = regmap_write(drvdata->dac.regmap, reg, value);
+
+	return status;
+}
+
+static int  sti_sas_init_sas_registers(struct snd_soc_codec *codec,
+				       struct sti_sas_data *data)
+{
+	int ret;
+	/*
+	 * DAC and SPDIF are activated by default
+	 * put them in IDLE to save power
+	 */
+
+	/* Initialise bi-phase formatter to disabled */
+	ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+				  SPDIF_BIPHASE_ENABLE_MASK, 0);
+
+	if (!ret)
+		/* Initialise bi-phase formatter idle value to 0 */
+		ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					  SPDIF_BIPHASE_IDLE_MASK, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update SPDIF registers");
+		return ret;
+	}
+
+	/* Init DAC configuration */
+	switch (data->dev_data->chipid) {
+	case CHIPID_STIH407:
+		/* init configuration */
+		ret =  snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					   STIH407_DAC_STANDBY_MASK,
+					   STIH407_DAC_STANDBY_MASK);
+
+		if (!ret)
+			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+						  STIH407_DAC_STANDBY_ANA_MASK,
+						  STIH407_DAC_STANDBY_ANA_MASK);
+		if (!ret)
+			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+						  STIH407_DAC_SOFTMUTE_MASK,
+						  STIH407_DAC_SOFTMUTE_MASK);
+		break;
+	case CHIPID_STIH416:
+		ret =  snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					   STIH416_DAC_NOT_STANDBY_MASK, 0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_ANA_NOT_PWR, 0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_NOT_PNDBG_MASK,
+						   0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_SOFTMUTE_MASK,
+						   STIH416_DAC_SOFTMUTE_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update DAC registers");
+		return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * DAC
+ */
+static int sti_sas_dac_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	/* Sanity check only */
+	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: Unsupporter master mask 0x%x\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int stih416_dac_probe(struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	struct sti_dac_audio *dac = &drvdata->dac;
+
+	/* Get reset control */
+	dac->rst = devm_reset_control_get(codec->dev, "dac_rst");
+	if (IS_ERR(dac->rst)) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: DAC reset control not defined !\n",
+			__func__);
+		dac->rst = NULL;
+		return -EFAULT;
+	}
+	/* Put the DAC into reset */
+	reset_control_assert(dac->rst);
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget stih416_sas_dapm_widgets[] = {
+	SND_SOC_DAPM_PGA("DAC bandgap", STIH416_AUDIO_DAC_CTRL,
+			 STIH416_DAC_NOT_PNDBG_MASK, 0, NULL, 0),
+	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH416_AUDIO_DAC_CTRL,
+			     STIH416_DAC_ANA_NOT_PWR, 0, NULL, 0),
+	SND_SOC_DAPM_DAC("DAC standby",  "dac_p", STIH416_AUDIO_DAC_CTRL,
+			 STIH416_DAC_NOT_STANDBY, 0),
+	SND_SOC_DAPM_OUTPUT("DAC Output"),
+};
+
+static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
+	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH407_AUDIO_DAC_CTRL,
+			     STIH407_DAC_STANDBY_ANA, 1, NULL, 0),
+	SND_SOC_DAPM_DAC("DAC standby",  "dac_p", STIH407_AUDIO_DAC_CTRL,
+			 STIH407_DAC_STANDBY, 1),
+	SND_SOC_DAPM_OUTPUT("DAC Output"),
+};
+
+static const struct snd_soc_dapm_route stih416_sas_route[] = {
+	{"DAC Output", NULL, "DAC bandgap"},
+	{"DAC Output", NULL, "DAC standby ana"},
+	{"DAC standby ana", NULL, "DAC standby"},
+};
+
+static const struct snd_soc_dapm_route stih407_sas_route[] = {
+	{"DAC Output", NULL, "DAC standby ana"},
+	{"DAC standby ana", NULL, "DAC standby"},
+};
+
+static int stih416_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	if (mute) {
+		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					    STIH416_DAC_SOFTMUTE_MASK,
+					    STIH416_DAC_SOFTMUTE_MASK);
+	} else {
+		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					    STIH416_DAC_SOFTMUTE_MASK, 0);
+	}
+}
+
+static int stih407_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	if (mute) {
+		return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					    STIH407_DAC_SOFTMUTE_MASK,
+					    STIH407_DAC_SOFTMUTE_MASK);
+	} else {
+		return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					    STIH407_DAC_SOFTMUTE_MASK,
+					    0);
+	}
+}
+
+/*
+ * SPDIF
+ */
+static int sti_sas_spdif_set_fmt(struct snd_soc_dai *dai,
+				 unsigned int fmt)
+{
+	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: Unsupporter master mask 0x%x\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * sti_sas_spdif_trigger:
+ * Trigger function is used to ensure that BiPhase Formater is disabled
+ * before CPU dai is stopped.
+ * This is mandatory to avoid that BPF is stalled
+ */
+static int sti_sas_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					    SPDIF_BIPHASE_ENABLE_MASK,
+					    SPDIF_BIPHASE_ENABLE_MASK);
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					    SPDIF_BIPHASE_ENABLE_MASK,
+					    0);
+	default:
+		return -EINVAL;
+	}
+}
+
+static bool sti_sas_volatile_register(struct device *dev, unsigned int reg)
+{
+	if (reg == STIH407_AUDIO_GLUE_CTRL)
+		return true;
+
+	return false;
+}
+
+/*
+ * CODEC DAIS
+ */
+
+/*
+ * sti_sas_set_sysclk:
+ * get MCLK input frequency to check that MCLK-FS ratio is coherent
+ */
+static int sti_sas_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+			      unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+
+	if (dir == SND_SOC_CLOCK_OUT)
+		return 0;
+
+	if (clk_id != 0)
+		return -EINVAL;
+
+	switch (dai->id) {
+	case STI_SAS_DAI_SPDIF_OUT:
+		drvdata->spdif.mclk = freq;
+		break;
+
+	case STI_SAS_DAI_ANALOG_OUT:
+		drvdata->dac.mclk = freq;
+		break;
+	}
+
+	return 0;
+}
+
+static int sti_sas_prepare(struct snd_pcm_substream *substream,
+			   struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	switch (dai->id) {
+	case STI_SAS_DAI_SPDIF_OUT:
+		if ((drvdata->spdif.mclk / runtime->rate) != 128) {
+			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			return -EINVAL;
+		}
+		break;
+	case STI_SAS_DAI_ANALOG_OUT:
+		if ((drvdata->dac.mclk / runtime->rate) != 256) {
+			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			return -EINVAL;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops stih416_dac_ops = {
+	.set_fmt = sti_sas_dac_set_fmt,
+	.mute_stream = stih416_sas_dac_mute,
+	.prepare = sti_sas_prepare,
+	.set_sysclk = sti_sas_set_sysclk,
+};
+
+static const struct snd_soc_dai_ops stih407_dac_ops = {
+	.set_fmt = sti_sas_dac_set_fmt,
+	.mute_stream = stih407_sas_dac_mute,
+	.prepare = sti_sas_prepare,
+	.set_sysclk = sti_sas_set_sysclk,
+};
+
+static const struct regmap_config stih407_sas_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+
+	.max_register = STIH407_AUDIO_DAC_CTRL,
+	.reg_defaults = stih407_sas_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults),
+	.volatile_reg = sti_sas_volatile_register,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_read = sti_sas_read_reg,
+	.reg_write = sti_sas_write_reg,
+};
+
+static const struct regmap_config stih416_sas_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+
+	.max_register = STIH416_AUDIO_DAC_CTRL,
+	.reg_defaults = stih416_sas_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(stih416_sas_reg_defaults),
+	.volatile_reg = sti_sas_volatile_register,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_read = sti_sas_read_reg,
+	.reg_write = sti_sas_write_reg,
+};
+
+static const struct sti_sas_dev_data stih416_data = {
+	.chipid = CHIPID_STIH416,
+	.regmap = &stih416_sas_regmap,
+	.dac_ops = &stih416_dac_ops,
+	.dapm_widgets = stih416_sas_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(stih416_sas_dapm_widgets),
+	.dapm_routes =	stih416_sas_route,
+	.num_dapm_routes = ARRAY_SIZE(stih416_sas_route),
+};
+
+static const struct sti_sas_dev_data stih407_data = {
+	.chipid = CHIPID_STIH407,
+	.regmap = &stih407_sas_regmap,
+	.dac_ops = &stih407_dac_ops,
+	.dapm_widgets = stih407_sas_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(stih407_sas_dapm_widgets),
+	.dapm_routes =	stih407_sas_route,
+	.num_dapm_routes = ARRAY_SIZE(stih407_sas_route),
+};
+
+static struct snd_soc_dai_driver sti_sas_dai[] = {
+	{
+		.name = "sas-dai-spdif-out",
+		.id = STI_SAS_DAI_SPDIF_OUT,
+		.playback = {
+			.stream_name = "spdif_p",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 |
+				 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = (struct snd_soc_dai_ops[]) {
+			{
+				.set_fmt = sti_sas_spdif_set_fmt,
+				.trigger = sti_sas_spdif_trigger,
+				.set_sysclk = sti_sas_set_sysclk,
+				.prepare = sti_sas_prepare,
+			}
+		},
+	},
+	{
+		.name = "sas-dai-dac",
+		.id = STI_SAS_DAI_ANALOG_OUT,
+		.playback = {
+			.stream_name = "dac_p",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int sti_sas_resume(struct snd_soc_codec *codec)
+{
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+
+	return sti_sas_init_sas_registers(codec, drvdata);
+}
+#else
+#define sti_sas_resume NULL
+#endif
+
+static int sti_sas_codec_probe(struct snd_soc_codec *codec)
+{
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	int ret;
+
+	ret = sti_sas_init_sas_registers(codec, drvdata);
+
+	return ret;
+}
+
+static struct snd_soc_codec_driver sti_sas_driver = {
+	.probe = sti_sas_codec_probe,
+	.resume = sti_sas_resume,
+};
+
+static const struct of_device_id sti_sas_dev_match[] = {
+	{
+		.compatible = "st,stih416-sas-codec",
+		.data = &stih416_data,
+	},
+	{
+		.compatible = "st,stih407-sas-codec",
+		.data = &stih407_data,
+	},
+	{},
+};
+
+static int sti_sas_driver_probe(struct platform_device *pdev)
+{
+	struct device_node *pnode = pdev->dev.of_node;
+	struct sti_sas_data *drvdata;
+	const struct of_device_id *of_id;
+
+	/* Allocate device structure */
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(struct sti_sas_data),
+			       GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	/* Populate data structure depending on compatibility */
+	of_id = of_match_node(sti_sas_dev_match, pnode);
+	if (!of_id->data) {
+		dev_err(&pdev->dev, "data associated to device is missing");
+		return -EINVAL;
+	}
+
+	drvdata->dev_data = (struct sti_sas_dev_data *)of_id->data;
+
+	/* Initialise device structure */
+	drvdata->dev = &pdev->dev;
+
+	/* Request the DAC & SPDIF registers memory region */
+	drvdata->dac.virt_regmap = devm_regmap_init(&pdev->dev, NULL, drvdata,
+						    drvdata->dev_data->regmap);
+	if (IS_ERR(drvdata->dac.virt_regmap)) {
+		dev_err(&pdev->dev, "audio registers not enabled\n");
+		return PTR_ERR(drvdata->dac.virt_regmap);
+	}
+
+	/* Request the syscon region */
+	drvdata->dac.regmap =
+		syscon_regmap_lookup_by_phandle(pnode, "st,syscfg");
+	if (IS_ERR(drvdata->dac.regmap)) {
+		dev_err(&pdev->dev, "syscon registers not available\n");
+		return PTR_ERR(drvdata->dac.regmap);
+	}
+	drvdata->spdif.regmap = drvdata->dac.regmap;
+
+	/* Set DAC dai probe */
+	if (drvdata->dev_data->chipid == CHIPID_STIH416)
+		sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].probe = stih416_dac_probe;
+
+	sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].ops = drvdata->dev_data->dac_ops;
+
+	/* Set dapms*/
+	sti_sas_driver.dapm_widgets = drvdata->dev_data->dapm_widgets;
+	sti_sas_driver.num_dapm_widgets = drvdata->dev_data->num_dapm_widgets;
+
+	sti_sas_driver.dapm_routes = drvdata->dev_data->dapm_routes;
+	sti_sas_driver.num_dapm_routes = drvdata->dev_data->num_dapm_routes;
+
+	/* Store context */
+	dev_set_drvdata(&pdev->dev, drvdata);
+
+	return snd_soc_register_codec(&pdev->dev, &sti_sas_driver,
+					sti_sas_dai,
+					ARRAY_SIZE(sti_sas_dai));
+}
+
+static int sti_sas_driver_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver sti_sas_platform_driver = {
+	.driver = {
+		.name = "sti-sas-codec",
+		.of_match_table = sti_sas_dev_match,
+	},
+	.probe = sti_sas_driver_probe,
+	.remove = sti_sas_driver_remove,
+};
+
+module_platform_driver(sti_sas_platform_driver);
+
+MODULE_DESCRIPTION("audio codec for STMicroelectronics sti platforms");
+MODULE_AUTHOR("Arnaud.pouliquen@st.com");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 4f25a7d..e3a0bca 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -38,7 +38,7 @@
 
 #include "tas2552.h"
 
-static struct reg_default tas2552_reg_defs[] = {
+static const struct reg_default tas2552_reg_defs[] = {
 	{TAS2552_CFG_1, 0x22},
 	{TAS2552_CFG_3, 0x80},
 	{TAS2552_DOUT, 0x00},
@@ -493,8 +493,7 @@
 	regcache_cache_only(tas2552->regmap, true);
 	regcache_mark_dirty(tas2552->regmap);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	return 0;
 }
@@ -503,8 +502,7 @@
 {
 	struct tas2552_data *tas2552 = dev_get_drvdata(dev);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 1);
+	gpiod_set_value(tas2552->enable_gpio, 1);
 
 	tas2552_sw_shutdown(tas2552, 0);
 
@@ -520,7 +518,7 @@
 			   NULL)
 };
 
-static struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
+static const struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
 	.hw_params	= tas2552_hw_params,
 	.prepare	= tas2552_prepare,
 	.set_sysclk	= tas2552_set_dai_sysclk,
@@ -585,8 +583,7 @@
 		return ret;
 	}
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 1);
+	gpiod_set_value(tas2552->enable_gpio, 1);
 
 	ret = pm_runtime_get_sync(codec->dev);
 	if (ret < 0) {
@@ -610,8 +607,7 @@
 	return 0;
 
 probe_fail:
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies),
 					tas2552->supplies);
@@ -624,8 +620,7 @@
 
 	pm_runtime_put(codec->dev);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	return 0;
 };
@@ -769,7 +764,6 @@
 static struct i2c_driver tas2552_i2c_driver = {
 	.driver = {
 		.name = "tas2552",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tas2552_of_match),
 		.pm = &tas2552_pm,
 	},
diff --git a/sound/soc/codecs/tas2552.h b/sound/soc/codecs/tas2552.h
index 5746f8f..e34752b 100644
--- a/sound/soc/codecs/tas2552.h
+++ b/sound/soc/codecs/tas2552.h
@@ -42,7 +42,7 @@
 #define TAS2552_BOOST_APT_CTRL		0x14
 #define TAS2552_VER_NUM			0x16
 #define TAS2552_VBAT_DATA		0x19
-#define TAS2552_MAX_REG			0x20
+#define TAS2552_MAX_REG			TAS2552_VBAT_DATA
 
 /* CFG1 Register Masks */
 #define TAS2552_DEV_RESET		(1 << 0)
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 32942be..d49d25d 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -266,10 +266,14 @@
 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
 	int i, val = 0;
 
-	if (priv->deemph)
-		for (i = 0; i < ARRAY_SIZE(tas5086_deemph); i++)
-			if (tas5086_deemph[i] == priv->rate)
+	if (priv->deemph) {
+		for (i = 0; i < ARRAY_SIZE(tas5086_deemph); i++) {
+			if (tas5086_deemph[i] == priv->rate) {
 				val = i;
+				break;
+			}
+		}
+	}
 
 	return regmap_update_bits(priv->regmap, TAS5086_SYS_CONTROL_1,
 				  TAS5086_DEEMPH_MASK, val);
@@ -994,7 +998,6 @@
 static struct i2c_driver tas5086_i2c_driver = {
 	.driver = {
 		.name	= "tas5086",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tas5086_dt_ids),
 	},
 	.id_table	= tas5086_i2c_id,
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 85bcc37..39307ad 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -179,7 +179,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
 			if (!IS_ERR(priv->mclk)) {
 				ret = clk_prepare_enable(priv->mclk);
 				if (ret) {
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index aab0af6..cb5310d 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -160,7 +160,7 @@
 	return 0;
 }
 
-static struct reg_default tfa9879_regs[] = {
+static const struct reg_default tfa9879_regs[] = {
 	{ TFA9879_DEVICE_CONTROL,	0x0000 }, /* 0x00 */
 	{ TFA9879_SERIAL_INTERFACE_1,	0x0a18 }, /* 0x01 */
 	{ TFA9879_PCM_IOM2_FORMAT_1,	0x0007 }, /* 0x02 */
@@ -314,7 +314,6 @@
 static struct i2c_driver tfa9879_i2c_driver = {
 	.driver = {
 		.name = "tfa9879",
-		.owner = THIS_MODULE,
 	},
 	.probe = tfa9879_i2c_probe,
 	.remove = tfa9879_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index c4c960f..ee4def4 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1121,7 +1121,7 @@
 	.num_dapm_routes	= ARRAY_SIZE(aic31xx_audio_map),
 };
 
-static struct snd_soc_dai_ops aic31xx_dai_ops = {
+static const struct snd_soc_dai_ops aic31xx_dai_ops = {
 	.hw_params	= aic31xx_hw_params,
 	.set_sysclk	= aic31xx_set_dai_sysclk,
 	.set_fmt	= aic31xx_set_dai_fmt,
@@ -1283,7 +1283,6 @@
 static struct i2c_driver aic31xx_i2c_driver = {
 	.driver = {
 		.name	= "tlv320aic31xx-codec",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tlv320aic31xx_of_match),
 	},
 	.probe		= aic31xx_i2c_probe,
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index ad6cb90..f2d3191 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -871,7 +871,6 @@
 static struct i2c_driver aic32x4_i2c_driver = {
 	.driver = {
 		.name = "tlv320aic32x4",
-		.owner = THIS_MODULE,
 		.of_match_table = aic32x4_of_id,
 	},
 	.probe =    aic32x4_i2c_probe,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index a7cf19b..1a82b19 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1668,7 +1668,7 @@
 };
 MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
 
-static const struct reg_default aic3007_class_d[] = {
+static const struct reg_sequence aic3007_class_d[] = {
 	/* Class-D speaker driver init; datasheet p. 46 */
 	{ AIC3X_PAGE_SELECT, 0x0D },
 	{ 0xD, 0x0D },
@@ -1825,7 +1825,6 @@
 static struct i2c_driver aic3x_i2c_driver = {
 	.driver = {
 		.name = "tlv320aic3x-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tlv320aic3x_of_match),
 	},
 	.probe	= aic3x_i2c_probe,
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d67a311..781398fb 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -1585,7 +1585,6 @@
 static struct i2c_driver tlv320dac33_i2c_driver = {
 	.driver = {
 		.name = "tlv320dac33-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe		= dac33_i2c_probe,
 	.remove		= dac33_i2c_remove,
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 6fac9e0..11d85c5 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -259,8 +259,7 @@
  * TPA6130 volume. From -59.5 to 4 dB with increasing step size when going
  * down in gain.
  */
-static const unsigned int tpa6130_tlv[] = {
-	TLV_DB_RANGE_HEAD(10),
+static const DECLARE_TLV_DB_RANGE(tpa6130_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-5950, 600, 0),
 	2, 3, TLV_DB_SCALE_ITEM(-5000, 250, 0),
 	4, 5, TLV_DB_SCALE_ITEM(-4550, 160, 0),
@@ -270,8 +269,8 @@
 	12, 13, TLV_DB_SCALE_ITEM(-3040, 180, 0),
 	14, 20, TLV_DB_SCALE_ITEM(-2710, 110, 0),
 	21, 37, TLV_DB_SCALE_ITEM(-1960, 74, 0),
-	38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0),
-};
+	38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0)
+);
 
 static const struct snd_kcontrol_new tpa6130a2_controls[] = {
 	SOC_SINGLE_EXT_TLV("TPA6130A2 Headphone Playback Volume",
@@ -280,12 +279,11 @@
 		       tpa6130_tlv),
 };
 
-static const unsigned int tpa6140_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(tpa6140_tlv,
 	0, 8, TLV_DB_SCALE_ITEM(-5900, 400, 0),
 	9, 16, TLV_DB_SCALE_ITEM(-2500, 200, 0),
-	17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0),
-};
+	17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0)
+);
 
 static const struct snd_kcontrol_new tpa6140a2_controls[] = {
 	SOC_SINGLE_EXT_TLV("TPA6140A2 Headphone Playback Volume",
@@ -488,7 +486,6 @@
 static struct i2c_driver tpa6130a2_i2c_driver = {
 	.driver = {
 		.name = "tpa6130a2",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tpa6130a2_of_match),
 	},
 	.probe = tpa6130a2_probe,
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 12232d7..4356843 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -23,11 +23,13 @@
 #include "ts3a227e.h"
 
 struct ts3a227e {
+	struct device *dev;
 	struct regmap *regmap;
 	struct snd_soc_jack *jack;
 	bool plugged;
 	bool mic_present;
 	unsigned int buttons_held;
+	int irq;
 };
 
 /* Button values to be reported on the jack */
@@ -189,16 +191,28 @@
 	struct ts3a227e *ts3a227e = (struct ts3a227e *)data;
 	struct regmap *regmap = ts3a227e->regmap;
 	unsigned int int_reg, kp_int_reg, acc_reg, i;
+	struct device *dev = ts3a227e->dev;
+	int ret;
 
 	/* Check for plug/unplug. */
-	regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg);
+	ret = regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg);
+	if (ret) {
+		dev_err(dev, "failed to clear interrupt ret=%d\n", ret);
+		return IRQ_NONE;
+	}
+
 	if (int_reg & (DETECTION_COMPLETE_EVENT | INS_REM_EVENT)) {
 		regmap_read(regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
 		ts3a227e_new_jack_state(ts3a227e, acc_reg);
 	}
 
 	/* Report any key events. */
-	regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg);
+	ret = regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg);
+	if (ret) {
+		dev_err(dev, "failed to clear key interrupt ret=%d\n", ret);
+		return IRQ_NONE;
+	}
+
 	for (i = 0; i < TS3A227E_NUM_BUTTONS; i++) {
 		if (kp_int_reg & PRESS_MASK(i))
 			ts3a227e->buttons_held |= (1 << i);
@@ -283,6 +297,8 @@
 		return -ENOMEM;
 
 	i2c_set_clientdata(i2c, ts3a227e);
+	ts3a227e->dev = dev;
+	ts3a227e->irq = i2c->irq;
 
 	ts3a227e->regmap = devm_regmap_init_i2c(i2c, &ts3a227e_regmap_config);
 	if (IS_ERR(ts3a227e->regmap))
@@ -320,6 +336,32 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int ts3a227e_suspend(struct device *dev)
+{
+	struct ts3a227e *ts3a227e = dev_get_drvdata(dev);
+
+	dev_dbg(ts3a227e->dev, "suspend disable irq\n");
+	disable_irq(ts3a227e->irq);
+
+	return 0;
+}
+
+static int ts3a227e_resume(struct device *dev)
+{
+	struct ts3a227e *ts3a227e = dev_get_drvdata(dev);
+
+	dev_dbg(ts3a227e->dev, "resume enable irq\n");
+	enable_irq(ts3a227e->irq);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops ts3a227e_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(ts3a227e_suspend, ts3a227e_resume)
+};
+
 static const struct i2c_device_id ts3a227e_i2c_ids[] = {
 	{ "ts3a227e", 0 },
 	{ }
@@ -335,7 +377,7 @@
 static struct i2c_driver ts3a227e_driver = {
 	.driver = {
 		.name = "ts3a227e",
-		.owner = THIS_MODULE,
+		.pm = &ts3a227e_pm,
 		.of_match_table = of_match_ptr(ts3a227e_of_match),
 	},
 	.probe = ts3a227e_i2c_probe,
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 90f5f04..2713e18 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -524,12 +524,11 @@
 	SOC_DAPM_SINGLE("Switch", TWL4030_REG_VDL_APGA_CTL, 2, 1, 0);
 
 /* Digital bypass gain, mute instead of -30dB */
-static const unsigned int twl4030_dapm_dbypass_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(twl4030_dapm_dbypass_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-3000, 600, 1),
 	2, 3, TLV_DB_SCALE_ITEM(-2400, 0, 0),
-	4, 7, TLV_DB_SCALE_ITEM(-1800, 600, 0),
-};
+	4, 7, TLV_DB_SCALE_ITEM(-1800, 600, 0)
+);
 
 /* Digital bypass left (TX1L -> RX2L) */
 static const struct snd_kcontrol_new twl4030_dapm_dbypassl_control =
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 913edf2..e190263 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -37,74 +37,53 @@
 
 	struct snd_pcm_substream *master_substream;
 	struct snd_pcm_substream *slave_substream;
+
+	struct regmap *regmap;
+	struct uda134x_platform_data *pd;
 };
 
-/* In-data addresses are hard-coded into the reg-cache values */
-static const char uda134x_reg[UDA134X_REGS_NUM] = {
-	/* Extended address registers */
-	0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
-	/* Status, data regs */
-	0x00, 0x83, 0x00, 0x40, 0x80, 0xC0, 0x00,
+static const struct reg_default uda134x_reg_defaults[] = {
+	{ UDA134X_EA000, 0x04 },
+	{ UDA134X_EA001, 0x04 },
+	{ UDA134X_EA010, 0x04 },
+	{ UDA134X_EA011, 0x00 },
+	{ UDA134X_EA100, 0x00 },
+	{ UDA134X_EA101, 0x00 },
+	{ UDA134X_EA110, 0x00 },
+	{ UDA134X_EA111, 0x00 },
+	{ UDA134X_STATUS0, 0x00 },
+	{ UDA134X_STATUS1, 0x03 },
+	{ UDA134X_DATA000, 0x00 },
+	{ UDA134X_DATA001, 0x00 },
+	{ UDA134X_DATA010, 0x00 },
+	{ UDA134X_DATA011, 0x00 },
+	{ UDA134X_DATA1, 0x00 },
 };
 
 /*
- * The codec has no support for reading its registers except for peak level...
- */
-static inline unsigned int uda134x_read_reg_cache(struct snd_soc_codec *codec,
-	unsigned int reg)
-{
-	u8 *cache = codec->reg_cache;
-
-	if (reg >= UDA134X_REGS_NUM)
-		return -1;
-	return cache[reg];
-}
-
-/*
- * Write the register cache
- */
-static inline void uda134x_write_reg_cache(struct snd_soc_codec *codec,
-	u8 reg, unsigned int value)
-{
-	u8 *cache = codec->reg_cache;
-
-	if (reg >= UDA134X_REGS_NUM)
-		return;
-	cache[reg] = value;
-}
-
-/*
  * Write to the uda134x registers
  *
  */
-static int uda134x_write(struct snd_soc_codec *codec, unsigned int reg,
+static int uda134x_regmap_write(void *context, unsigned int reg,
 	unsigned int value)
 {
+	struct uda134x_platform_data *pd = context;
 	int ret;
 	u8 addr;
 	u8 data = value;
-	struct uda134x_platform_data *pd = codec->control_data;
-
-	pr_debug("%s reg: %02X, value:%02X\n", __func__, reg, value);
-
-	if (reg >= UDA134X_REGS_NUM) {
-		printk(KERN_ERR "%s unknown register: reg: %u",
-		       __func__, reg);
-		return -EINVAL;
-	}
-
-	uda134x_write_reg_cache(codec, reg, value);
 
 	switch (reg) {
 	case UDA134X_STATUS0:
 	case UDA134X_STATUS1:
 		addr = UDA134X_STATUS_ADDR;
+		data |= (reg - UDA134X_STATUS0) << 7;
 		break;
 	case UDA134X_DATA000:
 	case UDA134X_DATA001:
 	case UDA134X_DATA010:
 	case UDA134X_DATA011:
 		addr = UDA134X_DATA0_ADDR;
+		data |= (reg - UDA134X_DATA000) << 6;
 		break;
 	case UDA134X_DATA1:
 		addr = UDA134X_DATA1_ADDR;
@@ -133,27 +112,28 @@
 
 static inline void uda134x_reset(struct snd_soc_codec *codec)
 {
-	u8 reset_reg = uda134x_read_reg_cache(codec, UDA134X_STATUS0);
-	uda134x_write(codec, UDA134X_STATUS0, reset_reg | (1<<6));
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	unsigned int mask = 1<<6;
+
+	regmap_update_bits(uda134x->regmap, UDA134X_STATUS0, mask, mask);
 	msleep(1);
-	uda134x_write(codec, UDA134X_STATUS0, reset_reg & ~(1<<6));
+	regmap_update_bits(uda134x->regmap, UDA134X_STATUS0, mask, 0);
 }
 
 static int uda134x_mute(struct snd_soc_dai *dai, int mute)
 {
-	struct snd_soc_codec *codec = dai->codec;
-	u8 mute_reg = uda134x_read_reg_cache(codec, UDA134X_DATA010);
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(dai->codec);
+	unsigned int mask = 1<<2;
+	unsigned int val;
 
 	pr_debug("%s mute: %d\n", __func__, mute);
 
 	if (mute)
-		mute_reg |= (1<<2);
+		val = mask;
 	else
-		mute_reg &= ~(1<<2);
+		val = 0;
 
-	uda134x_write(codec, UDA134X_DATA010, mute_reg);
-
-	return 0;
+	return regmap_update_bits(uda134x->regmap, UDA134X_DATA010, mask, val);
 }
 
 static int uda134x_startup(struct snd_pcm_substream *substream,
@@ -205,7 +185,7 @@
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
-	u8 hw_params;
+	unsigned int hw_params = 0;
 
 	if (substream == uda134x->slave_substream) {
 		pr_debug("%s ignoring hw_params for slave substream\n",
@@ -213,10 +193,6 @@
 		return 0;
 	}
 
-	hw_params = uda134x_read_reg_cache(codec, UDA134X_STATUS0);
-	hw_params &= STATUS0_SYSCLK_MASK;
-	hw_params &= STATUS0_DAIFMT_MASK;
-
 	pr_debug("%s sysclk: %d, rate:%d\n", __func__,
 		 uda134x->sysclk, params_rate(params));
 
@@ -267,9 +243,8 @@
 		return -EINVAL;
 	}
 
-	uda134x_write(codec, UDA134X_STATUS0, hw_params);
-
-	return 0;
+	return regmap_update_bits(uda134x->regmap, UDA134X_STATUS0,
+		STATUS0_SYSCLK_MASK | STATUS0_DAIFMT_MASK, hw_params);
 }
 
 static int uda134x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
@@ -324,10 +299,8 @@
 static int uda134x_set_bias_level(struct snd_soc_codec *codec,
 				  enum snd_soc_bias_level level)
 {
-	struct uda134x_platform_data *pd = codec->control_data;
-	int i;
-	u8 *cache = codec->reg_cache;
-
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	struct uda134x_platform_data *pd = uda134x->pd;
 	pr_debug("%s bias level %d\n", __func__, level);
 
 	switch (level) {
@@ -337,17 +310,17 @@
 		/* power on */
 		if (pd->power) {
 			pd->power(1);
-			/* Sync reg_cache with the hardware */
-			for (i = 0; i < ARRAY_SIZE(uda134x_reg); i++)
-				codec->driver->write(codec, i, *cache++);
+			regcache_sync(uda134x->regmap);
 		}
 		break;
 	case SND_SOC_BIAS_STANDBY:
 		break;
 	case SND_SOC_BIAS_OFF:
 		/* power off */
-		if (pd->power)
+		if (pd->power) {
 			pd->power(0);
+			regcache_mark_dirty(uda134x->regmap);
+		}
 		break;
 	}
 	return 0;
@@ -478,21 +451,14 @@
 static int uda134x_soc_probe(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
-	struct uda134x_priv *uda134x;
-	struct uda134x_platform_data *pd = codec->component.card->dev->platform_data;
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	struct uda134x_platform_data *pd = uda134x->pd;
 	const struct snd_soc_dapm_widget *widgets;
 	unsigned num_widgets;
-
 	int ret;
 
 	printk(KERN_INFO "UDA134X SoC Audio Codec\n");
 
-	if (!pd) {
-		printk(KERN_ERR "UDA134X SoC codec: "
-		       "missing L3 bitbang function\n");
-		return -ENODEV;
-	}
-
 	switch (pd->model) {
 	case UDA134X_UDA1340:
 	case UDA134X_UDA1341:
@@ -506,13 +472,6 @@
 		return -EINVAL;
 	}
 
-	uda134x = kzalloc(sizeof(struct uda134x_priv), GFP_KERNEL);
-	if (uda134x == NULL)
-		return -ENOMEM;
-	snd_soc_codec_set_drvdata(codec, uda134x);
-
-	codec->control_data = pd;
-
 	if (pd->power)
 		pd->power(1);
 
@@ -530,7 +489,6 @@
 	if (ret) {
 		printk(KERN_ERR "%s failed to register dapm controls: %d",
 			__func__, ret);
-		kfree(uda134x);
 		return ret;
 	}
 
@@ -551,36 +509,19 @@
 	default:
 		printk(KERN_ERR "%s unknown codec type: %d",
 			__func__, pd->model);
-		kfree(uda134x);
 		return -EINVAL;
 	}
 
 	if (ret < 0) {
 		printk(KERN_ERR "UDA134X: failed to register controls\n");
-		kfree(uda134x);
 		return ret;
 	}
 
 	return 0;
 }
 
-/* power down chip */
-static int uda134x_soc_remove(struct snd_soc_codec *codec)
-{
-	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
-
-	kfree(uda134x);
-	return 0;
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
 	.probe =        uda134x_soc_probe,
-	.remove =       uda134x_soc_remove,
-	.reg_cache_size = sizeof(uda134x_reg),
-	.reg_word_size = sizeof(u8),
-	.reg_cache_default = uda134x_reg,
-	.reg_cache_step = 1,
-	.read = uda134x_read_reg_cache,
 	.set_bias_level = uda134x_set_bias_level,
 	.suspend_bias_off = true,
 
@@ -590,8 +531,39 @@
 	.num_dapm_routes = ARRAY_SIZE(uda134x_dapm_routes),
 };
 
+static const struct regmap_config uda134x_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = UDA134X_DATA1,
+	.reg_defaults = uda134x_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(uda134x_reg_defaults),
+	.cache_type = REGCACHE_RBTREE,
+
+	.reg_write = uda134x_regmap_write,
+};
+
 static int uda134x_codec_probe(struct platform_device *pdev)
 {
+	struct uda134x_platform_data *pd = pdev->dev.platform_data;
+	struct uda134x_priv *uda134x;
+
+	if (!pd) {
+		dev_err(&pdev->dev, "Missing L3 bitbang function\n");
+		return -ENODEV;
+	}
+
+	uda134x = devm_kzalloc(&pdev->dev, sizeof(*uda134x), GFP_KERNEL);
+	if (!uda134x)
+		return -ENOMEM;
+
+	uda134x->pd = pd;
+	platform_set_drvdata(pdev, uda134x);
+
+	uda134x->regmap = devm_regmap_init(&pdev->dev, NULL, pd,
+		&uda134x_regmap_config);
+	if (IS_ERR(uda134x->regmap))
+		return PTR_ERR(uda134x->regmap);
+
 	return snd_soc_register_codec(&pdev->dev,
 			&soc_codec_dev_uda134x, &uda134x_dai, 1);
 }
diff --git a/sound/soc/codecs/uda134x.h b/sound/soc/codecs/uda134x.h
index 9faae06..e41ab38 100644
--- a/sound/soc/codecs/uda134x.h
+++ b/sound/soc/codecs/uda134x.h
@@ -26,8 +26,6 @@
 #define UDA134X_DATA011 13
 #define UDA134X_DATA1   14
 
-#define UDA134X_REGS_NUM 15
-
 #define STATUS0_DAIFMT_MASK (~(7<<1))
 #define STATUS0_SYSCLK_MASK (~(3<<4))
 
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 6e159f5..35f0469 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -269,12 +269,11 @@
  * from -66 dB in 0.5 dB steps (2 dB steps, really) and
  * from -52 dB in 0.25 dB steps
  */
-static const unsigned int mvol_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(mvol_tlv,
 	0, 15, TLV_DB_SCALE_ITEM(-8200, 100, 1),
 	16, 43, TLV_DB_SCALE_ITEM(-6600, 50, 0),
-	44, 252, TLV_DB_SCALE_ITEM(-5200, 25, 0),
-};
+	44, 252, TLV_DB_SCALE_ITEM(-5200, 25, 0)
+);
 
 /*
  * from -72 dB in 1.5 dB steps (6 dB steps really),
@@ -282,13 +281,12 @@
  * from -60 dB in 0.5 dB steps (2 dB steps really) and
  * from -46 dB in 0.25 dB steps
  */
-static const unsigned int vc_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(vc_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7800, 150, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-6600, 75, 0),
 	16, 43, TLV_DB_SCALE_ITEM(-6000, 50, 0),
-	44, 228, TLV_DB_SCALE_ITEM(-4600, 25, 0),
-};
+	44, 228, TLV_DB_SCALE_ITEM(-4600, 25, 0)
+);
 
 /* from 0 to 6 dB in 2 dB steps if SPF mode != flat */
 static DECLARE_TLV_DB_SCALE(tr_tlv, 0, 200, 0);
@@ -810,7 +808,6 @@
 static struct i2c_driver uda1380_i2c_driver = {
 	.driver = {
 		.name =  "uda1380-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe =    uda1380_i2c_probe,
 	.remove =   uda1380_i2c_remove,
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 6560a66..f2c6ad4 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -953,7 +953,7 @@
 		trigger = IRQF_TRIGGER_FALLING;
 	trigger |= IRQF_ONESHOT;
 
-	ret = request_threaded_irq(irq, NULL, wm0010_irq, trigger | IRQF_ONESHOT,
+	ret = request_threaded_irq(irq, NULL, wm0010_irq, trigger,
 				   "wm0010", wm0010);
 	if (ret) {
 		dev_err(wm0010->dev, "Failed to request IRQ %d: %d\n",
@@ -1003,7 +1003,6 @@
 static struct spi_driver wm0010_spi_driver = {
 	.driver = {
 		.name	= "wm0010",
-		.bus 	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= wm0010_spi_probe,
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index 048f005..ec45c5b 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -251,7 +251,6 @@
 static struct i2c_driver wm1250_ev1_i2c_driver = {
 	.driver = {
 		.name = "wm1250-ev1",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm1250_ev1_probe,
 	.remove =   wm1250_ev1_remove,
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 21d5402..786abd0 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -942,7 +942,6 @@
 static struct i2c_driver wm2000_i2c_driver = {
 	.driver = {
 		.name = "wm2000",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm2000_i2c_probe,
 	.remove = wm2000_i2c_remove,
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index c830832..fd1439e 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -166,7 +166,7 @@
 	{ .type = WMFW_ADSP1_ZM, .base = WM2200_DSP2_ZM_BASE },
 };
 
-static struct reg_default wm2200_reg_defaults[] = {
+static const struct reg_default wm2200_reg_defaults[] = {
 	{ 0x000B, 0x0000 },   /* R11    - Tone Generator 1 */
 	{ 0x0102, 0x0000 },   /* R258   - Clocking 3 */
 	{ 0x0103, 0x0011 },   /* R259   - Clocking 4 */
@@ -897,7 +897,7 @@
 	}
 }
 
-static const struct reg_default wm2200_reva_patch[] = {
+static const struct reg_sequence wm2200_reva_patch[] = {
 	{ 0x07, 0x0003 },
 	{ 0x102, 0x0200 },
 	{ 0x203, 0x0084 },
@@ -1702,7 +1702,7 @@
 	int *bclk_rates;
 
 	/* Data sizes if not using TDM */
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 	if (wl < 0)
 		return wl;
 	fl = snd_soc_params_to_frame_size(params);
@@ -2481,7 +2481,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm2200_pm = {
+static const struct dev_pm_ops wm2200_pm = {
 	SET_RUNTIME_PM_OPS(wm2200_runtime_suspend, wm2200_runtime_resume,
 			   NULL)
 };
@@ -2495,7 +2495,6 @@
 static struct i2c_driver wm2200_i2c_driver = {
 	.driver = {
 		.name = "wm2200",
-		.owner = THIS_MODULE,
 		.pm = &wm2200_pm,
 	},
 	.probe =    wm2200_i2c_probe,
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 4c10cd8..c2cdcae 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1247,7 +1247,7 @@
 	{ "PWM2", NULL, "PWM2 Driver" },
 };
 
-static const struct reg_default wm5100_reva_patches[] = {
+static const struct reg_sequence wm5100_reva_patches[] = {
 	{ WM5100_AUDIO_IF_1_10, 0 },
 	{ WM5100_AUDIO_IF_1_11, 1 },
 	{ WM5100_AUDIO_IF_1_12, 2 },
@@ -1408,7 +1408,7 @@
 	base = dai->driver->base;
 
 	/* Data sizes if not using TDM */
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 	if (wl < 0)
 		return wl;
 	fl = snd_soc_params_to_frame_size(params);
@@ -2570,13 +2570,11 @@
 
 		if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 			ret = request_threaded_irq(i2c->irq, NULL,
-						   wm5100_edge_irq,
-						   irq_flags | IRQF_ONESHOT,
+						   wm5100_edge_irq, irq_flags,
 						   "wm5100", wm5100);
 		else
 			ret = request_threaded_irq(i2c->irq, NULL, wm5100_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm5100",
+						   irq_flags, "wm5100",
 						   wm5100);
 
 		if (ret != 0) {
@@ -2708,7 +2706,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm5100_pm = {
+static const struct dev_pm_ops wm5100_pm = {
 	SET_RUNTIME_PM_OPS(wm5100_runtime_suspend, wm5100_runtime_resume,
 			   NULL)
 };
@@ -2722,7 +2720,6 @@
 static struct i2c_driver wm5100_i2c_driver = {
 	.driver = {
 		.name = "wm5100",
-		.owner = THIS_MODULE,
 		.pm = &wm5100_pm,
 	},
 	.probe =    wm5100_i2c_probe,
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index d097f09e5..64637d1 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -788,8 +788,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -801,8 +800,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -814,8 +812,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -827,8 +824,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -851,10 +847,10 @@
 ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE),
@@ -1883,7 +1879,7 @@
 	ret = snd_soc_add_codec_controls(codec,
 					 arizona_adsp2_rate_controls, 1);
 	if (ret)
-		return ret;
+		goto err_adsp2_codec_probe;
 
 	arizona_init_spk(codec);
 	arizona_init_gpio(codec);
@@ -1893,6 +1889,11 @@
 	priv->core.arizona->dapm = dapm;
 
 	return 0;
+
+err_adsp2_codec_probe:
+	wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
+
+	return ret;
 }
 
 static int wm5102_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 709fcc6..9756578 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -131,6 +131,25 @@
 	{ 0x33fb, 0xfe00 },
 };
 
+static const struct reg_default wm5110_sysclk_reve_patch[] = {
+	{ 0x3270, 0xE410 },
+	{ 0x3271, 0x3078 },
+	{ 0x3272, 0xE410 },
+	{ 0x3273, 0x3070 },
+	{ 0x3274, 0xE410 },
+	{ 0x3275, 0x3066 },
+	{ 0x3276, 0xE410 },
+	{ 0x3277, 0x3056 },
+	{ 0x327A, 0xE414 },
+	{ 0x327B, 0x3078 },
+	{ 0x327C, 0xE414 },
+	{ 0x327D, 0x3070 },
+	{ 0x327E, 0xE414 },
+	{ 0x327F, 0x3066 },
+	{ 0x3280, 0xE414 },
+	{ 0x3281, 0x3056 },
+};
+
 static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
 			    struct snd_kcontrol *kcontrol, int event)
 {
@@ -146,7 +165,9 @@
 		patch_size = ARRAY_SIZE(wm5110_sysclk_revd_patch);
 		break;
 	default:
-		return 0;
+		patch = wm5110_sysclk_reve_patch;
+		patch_size = ARRAY_SIZE(wm5110_sysclk_reve_patch);
+		break;
 	}
 
 	switch (event) {
@@ -164,6 +185,249 @@
 	return 0;
 }
 
+static const struct reg_sequence wm5110_no_dre_left_enable[] = {
+	{ 0x3024, 0xE410 },
+	{ 0x3025, 0x0056 },
+	{ 0x301B, 0x0224 },
+	{ 0x301F, 0x4263 },
+	{ 0x3021, 0x5291 },
+	{ 0x3030, 0xE410 },
+	{ 0x3031, 0x3066 },
+	{ 0x3032, 0xE410 },
+	{ 0x3033, 0x3070 },
+	{ 0x3034, 0xE410 },
+	{ 0x3035, 0x3078 },
+	{ 0x3036, 0xE410 },
+	{ 0x3037, 0x3080 },
+	{ 0x3038, 0xE410 },
+	{ 0x3039, 0x3080 },
+};
+
+static const struct reg_sequence wm5110_dre_left_enable[] = {
+	{ 0x3024, 0x0231 },
+	{ 0x3025, 0x0B00 },
+	{ 0x301B, 0x0227 },
+	{ 0x301F, 0x4266 },
+	{ 0x3021, 0x5294 },
+	{ 0x3030, 0xE231 },
+	{ 0x3031, 0x0266 },
+	{ 0x3032, 0x8231 },
+	{ 0x3033, 0x4B15 },
+	{ 0x3034, 0x8231 },
+	{ 0x3035, 0x0B15 },
+	{ 0x3036, 0xE231 },
+	{ 0x3037, 0x5294 },
+	{ 0x3038, 0x0231 },
+	{ 0x3039, 0x0B00 },
+};
+
+static const struct reg_sequence wm5110_no_dre_right_enable[] = {
+	{ 0x3074, 0xE414 },
+	{ 0x3075, 0x0056 },
+	{ 0x306B, 0x0224 },
+	{ 0x306F, 0x4263 },
+	{ 0x3071, 0x5291 },
+	{ 0x3080, 0xE414 },
+	{ 0x3081, 0x3066 },
+	{ 0x3082, 0xE414 },
+	{ 0x3083, 0x3070 },
+	{ 0x3084, 0xE414 },
+	{ 0x3085, 0x3078 },
+	{ 0x3086, 0xE414 },
+	{ 0x3087, 0x3080 },
+	{ 0x3088, 0xE414 },
+	{ 0x3089, 0x3080 },
+};
+
+static const struct reg_sequence wm5110_dre_right_enable[] = {
+	{ 0x3074, 0x0231 },
+	{ 0x3075, 0x0B00 },
+	{ 0x306B, 0x0227 },
+	{ 0x306F, 0x4266 },
+	{ 0x3071, 0x5294 },
+	{ 0x3080, 0xE231 },
+	{ 0x3081, 0x0266 },
+	{ 0x3082, 0x8231 },
+	{ 0x3083, 0x4B17 },
+	{ 0x3084, 0x8231 },
+	{ 0x3085, 0x0B17 },
+	{ 0x3086, 0xE231 },
+	{ 0x3087, 0x5294 },
+	{ 0x3088, 0x0231 },
+	{ 0x3089, 0x0B00 },
+};
+
+static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->arizona;
+	unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE);
+	const struct reg_sequence *wseq;
+	int nregs;
+
+	switch (w->shift) {
+	case ARIZONA_OUT1L_ENA_SHIFT:
+		if (val & ARIZONA_DRE1L_ENA_MASK) {
+			wseq = wm5110_dre_left_enable;
+			nregs = ARRAY_SIZE(wm5110_dre_left_enable);
+		} else {
+			wseq = wm5110_no_dre_left_enable;
+			nregs = ARRAY_SIZE(wm5110_no_dre_left_enable);
+			priv->out_up_delay += 10;
+		}
+		break;
+	case ARIZONA_OUT1R_ENA_SHIFT:
+		if (val & ARIZONA_DRE1R_ENA_MASK) {
+			wseq = wm5110_dre_right_enable;
+			nregs = ARRAY_SIZE(wm5110_dre_right_enable);
+		} else {
+			wseq = wm5110_no_dre_right_enable;
+			nregs = ARRAY_SIZE(wm5110_no_dre_right_enable);
+			priv->out_up_delay += 10;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	return regmap_multi_reg_write(arizona->regmap, wseq, nregs);
+}
+
+static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE);
+
+	switch (w->shift) {
+	case ARIZONA_OUT1L_ENA_SHIFT:
+		if (!(val & ARIZONA_DRE1L_ENA_MASK)) {
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG1, ARIZONA_WS_TRG1);
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG1, 0);
+			priv->out_down_delay += 27;
+		}
+		break;
+	case ARIZONA_OUT1R_ENA_SHIFT:
+		if (!(val & ARIZONA_DRE1R_ENA_MASK)) {
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG2, ARIZONA_WS_TRG2);
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG2, 0);
+			priv->out_down_delay += 27;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int wm5110_hp_ev(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	switch (priv->arizona->rev) {
+	case 0 ... 3:
+		break;
+	default:
+		switch (event) {
+		case SND_SOC_DAPM_PRE_PMU:
+			wm5110_hp_pre_enable(w);
+			break;
+		case SND_SOC_DAPM_PRE_PMD:
+			wm5110_hp_pre_disable(w);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+
+	return arizona_hp_ev(w, kcontrol, event);
+}
+
+static int wm5110_clear_pga_volume(struct arizona *arizona, int output)
+{
+	struct reg_sequence clear_pga = {
+		ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4, 0x80
+	};
+	int ret;
+
+	ret = regmap_multi_reg_write_bypassed(arizona->regmap, &clear_pga, 1);
+	if (ret)
+		dev_err(arizona->dev, "Failed to clear PGA (0x%x): %d\n",
+			clear_pga.reg, ret);
+
+	return ret;
+}
+
+static int wm5110_put_dre(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int ena, dre;
+	unsigned int mask = (0x1 << mc->shift) | (0x1 << mc->rshift);
+	unsigned int lnew = (!!ucontrol->value.integer.value[0]) << mc->shift;
+	unsigned int rnew = (!!ucontrol->value.integer.value[1]) << mc->rshift;
+	unsigned int lold, rold;
+	unsigned int lena, rena;
+	int ret;
+
+	snd_soc_dapm_mutex_lock(dapm);
+
+	ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &ena);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to read output state: %d\n", ret);
+		goto err;
+	}
+	ret = regmap_read(arizona->regmap, ARIZONA_DRE_ENABLE, &dre);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to read DRE state: %d\n", ret);
+		goto err;
+	}
+
+	lold = dre & (1 << mc->shift);
+	rold = dre & (1 << mc->rshift);
+	/* Enables are channel wise swapped from the DRE enables */
+	lena = ena & (1 << mc->rshift);
+	rena = ena & (1 << mc->shift);
+
+	if ((lena && lnew != lold) || (rena && rnew != rold)) {
+		dev_err(arizona->dev, "Can't change DRE on active outputs\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	ret = regmap_update_bits(arizona->regmap, ARIZONA_DRE_ENABLE,
+				 mask, lnew | rnew);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to set DRE: %d\n", ret);
+		goto err;
+	}
+
+	/* Force reset of PGA volumes, if turning DRE off */
+	if (!lnew && lold)
+		wm5110_clear_pga_volume(arizona, mc->shift);
+
+	if (!rnew && rold)
+		wm5110_clear_pga_volume(arizona, mc->rshift);
+
+err:
+	snd_soc_dapm_mutex_unlock(dapm);
+
+	return ret;
+}
+
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
@@ -247,8 +511,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -260,8 +523,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -273,8 +535,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -286,8 +547,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -314,10 +574,10 @@
 ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
 SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
@@ -409,12 +669,15 @@
 SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
 	   ARIZONA_SPK2R_MUTE_SHIFT, 1, 1),
 
-SOC_DOUBLE("HPOUT1 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE1L_ENA_SHIFT, ARIZONA_DRE1R_ENA_SHIFT, 1, 0),
-SOC_DOUBLE("HPOUT2 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE2L_ENA_SHIFT, ARIZONA_DRE2R_ENA_SHIFT, 1, 0),
-SOC_DOUBLE("HPOUT3 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE3L_ENA_SHIFT, ARIZONA_DRE3R_ENA_SHIFT, 1, 0),
+SOC_DOUBLE_EXT("HPOUT1 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE1L_ENA_SHIFT, ARIZONA_DRE1R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
+SOC_DOUBLE_EXT("HPOUT2 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE2L_ENA_SHIFT, ARIZONA_DRE2R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
+SOC_DOUBLE_EXT("HPOUT3 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE3L_ENA_SHIFT, ARIZONA_DRE3R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
 
 SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp),
 SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp),
@@ -904,11 +1167,11 @@
 		    ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
 
 SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
-		   ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+		   ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev,
 		   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
 		   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
-		   ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+		   ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev,
 		   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
 		   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1,
@@ -1611,18 +1874,24 @@
 	for (i = 0; i < WM5110_NUM_ADSP; ++i) {
 		ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec);
 		if (ret)
-			return ret;
+			goto err_adsp2_codec_probe;
 	}
 
 	ret = snd_soc_add_codec_controls(codec,
 					 arizona_adsp2_rate_controls,
 					 WM5110_NUM_ADSP);
 	if (ret)
-		return ret;
+		goto err_adsp2_codec_probe;
 
 	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
 
 	return 0;
+
+err_adsp2_codec_probe:
+	for (--i; i >= 0; --i)
+		wm_adsp2_codec_remove(&priv->core.adsp[i], codec);
+
+	return ret;
 }
 
 static int wm5110_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 41c62c1..ffbf3df 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -394,11 +394,10 @@
 static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
 static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
 
-static const unsigned int capture_sd_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(capture_sd_tlv,
 	0, 12, TLV_DB_SCALE_ITEM(-3600, 300, 1),
-	13, 15, TLV_DB_SCALE_ITEM(0, 0, 0),
-};
+	13, 15, TLV_DB_SCALE_ITEM(0, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm8350_snd_controls[] = {
 	SOC_ENUM("Playback Deemphasis", wm8350_enum[0]),
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index d755508..b1d346a 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -370,10 +370,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
-};
+static const DECLARE_TLV_DB_SCALE(in_mix_tlv, -1200, 600, 0);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8400_dapm_lin12_pga_controls[] = {
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index dac5beb..b098a83 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -598,6 +598,7 @@
 	{ .compatible = "wlf,wm8510" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8510_of_match);
 
 static const struct regmap_config wm8510_regmap = {
 	.reg_bits = 7,
@@ -690,7 +691,6 @@
 static struct i2c_driver wm8510_i2c_driver = {
 	.driver = {
 		.name = "wm8510",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8510_of_match,
 	},
 	.probe =    wm8510_i2c_probe,
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 43ea8ae..aa287a3 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -430,6 +430,7 @@
 	{ .compatible = "wlf,wm8523" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8523_of_match);
 
 static const struct regmap_config wm8523_regmap = {
 	.reg_bits = 8,
@@ -534,7 +535,6 @@
 static struct i2c_driver wm8523_i2c_driver = {
 	.driver = {
 		.name = "wm8523",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8523_of_match,
 	},
 	.probe =    wm8523_i2c_probe,
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 759a792..66602bf 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -916,6 +916,7 @@
 	{ .compatible = "wlf,wm8580" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8580_of_match);
 
 static const struct regmap_config wm8580_regmap = {
 	.reg_bits = 7,
@@ -978,7 +979,6 @@
 static struct i2c_driver wm8580_i2c_driver = {
 	.driver = {
 		.name = "wm8580",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8580_of_match,
 	},
 	.probe =    wm8580_i2c_probe,
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index cc8251f..44b9e0a 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -478,7 +478,6 @@
 static struct i2c_driver wm8711_i2c_driver = {
 	.driver = {
 		.name = "wm8711",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8711_of_match,
 	},
 	.probe =    wm8711_i2c_probe,
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index f1a173e..cd7b024 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -319,7 +319,6 @@
 static struct i2c_driver wm8728_i2c_driver = {
 	.driver = {
 		.name = "wm8728",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8728_of_match,
 	},
 	.probe =    wm8728_i2c_probe,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 915ea11..ace8645 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -79,12 +79,7 @@
 	return reg == WM8731_RESET;
 }
 
-static bool wm8731_writeable(struct device *dev, unsigned int reg)
-{
-	return reg <= WM8731_RESET;
-}
-
-#define wm8731_reset(c)	snd_soc_write(c, WM8731_RESET, 0)
+#define wm8731_reset(m)	regmap_write(m, WM8731_RESET, 0)
 
 static const char *wm8731_input_select[] = {"Line In", "Mic"};
 
@@ -496,8 +491,11 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		if (wm8731->mclk)
-			clk_prepare_enable(wm8731->mclk);
+		if (wm8731->mclk) {
+			ret = clk_prepare_enable(wm8731->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 	case SND_SOC_BIAS_PREPARE:
 		break;
@@ -571,69 +569,63 @@
 	.symmetric_rates = 1,
 };
 
-static int wm8731_probe(struct snd_soc_codec *codec)
+static int wm8731_request_supplies(struct device *dev,
+		struct wm8731_priv *wm8731)
 {
-	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
 	int ret = 0, i;
 
 	for (i = 0; i < ARRAY_SIZE(wm8731->supplies); i++)
 		wm8731->supplies[i].supply = wm8731_supply_names[i];
 
-	ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8731->supplies),
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(wm8731->supplies),
 				 wm8731->supplies);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		dev_err(dev, "Failed to request supplies: %d\n", ret);
 		return ret;
 	}
 
 	ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
 				    wm8731->supplies);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		dev_err(dev, "Failed to enable supplies: %d\n", ret);
 		return ret;
 	}
 
-	ret = wm8731_reset(codec);
+	return 0;
+}
+
+static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
+{
+	int ret = 0;
+
+	ret = wm8731_reset(wm8731->regmap);
 	if (ret < 0) {
-		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		dev_err(dev, "Failed to issue reset: %d\n", ret);
 		goto err_regulator_enable;
 	}
 
-	snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	/* Clear POWEROFF, keep everything else disabled */
+	regmap_write(wm8731->regmap, WM8731_PWR, 0x7f);
 
 	/* Latch the update bits */
-	snd_soc_update_bits(codec, WM8731_LOUT1V, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_ROUT1V, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_LINVOL, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_LOUT1V, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_ROUT1V, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_LINVOL, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_RINVOL, 0x100, 0);
 
 	/* Disable bypass path by default */
-	snd_soc_update_bits(codec, WM8731_APANA, 0x8, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_APANA, 0x8, 0);
 
-	/* Regulators will have been enabled by bias management */
-	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
-	return 0;
+	regcache_mark_dirty(wm8731->regmap);
 
 err_regulator_enable:
+	/* Regulators will be enabled by bias management */
 	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
 
 	return ret;
 }
 
-/* power down chip */
-static int wm8731_remove(struct snd_soc_codec *codec)
-{
-	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
-
-	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
-	return 0;
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_wm8731 = {
-	.probe =	wm8731_probe,
-	.remove =	wm8731_remove,
 	.set_bias_level = wm8731_set_bias_level,
 	.suspend_bias_off = true,
 
@@ -658,7 +650,6 @@
 
 	.max_register = WM8731_RESET,
 	.volatile_reg = wm8731_volatile,
-	.writeable_reg = wm8731_writeable,
 
 	.cache_type = REGCACHE_RBTREE,
 	.reg_defaults = wm8731_reg_defaults,
@@ -690,6 +681,12 @@
 
 	mutex_init(&wm8731->lock);
 
+	spi_set_drvdata(spi, wm8731);
+
+	ret = wm8731_request_supplies(&spi->dev, wm8731);
+	if (ret != 0)
+		return ret;
+
 	wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap);
 	if (IS_ERR(wm8731->regmap)) {
 		ret = PTR_ERR(wm8731->regmap);
@@ -698,7 +695,9 @@
 		return ret;
 	}
 
-	spi_set_drvdata(spi, wm8731);
+	ret = wm8731_hw_init(&spi->dev, wm8731);
+	if (ret != 0)
+		return ret;
 
 	ret = snd_soc_register_codec(&spi->dev,
 			&soc_codec_dev_wm8731, &wm8731_dai, 1);
@@ -754,6 +753,12 @@
 
 	mutex_init(&wm8731->lock);
 
+	i2c_set_clientdata(i2c, wm8731);
+
+	ret = wm8731_request_supplies(&i2c->dev, wm8731);
+	if (ret != 0)
+		return ret;
+
 	wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
 	if (IS_ERR(wm8731->regmap)) {
 		ret = PTR_ERR(wm8731->regmap);
@@ -762,7 +767,9 @@
 		return ret;
 	}
 
-	i2c_set_clientdata(i2c, wm8731);
+	ret = wm8731_hw_init(&i2c->dev, wm8731);
+	if (ret != 0)
+		return ret;
 
 	ret = snd_soc_register_codec(&i2c->dev,
 			&soc_codec_dev_wm8731, &wm8731_dai, 1);
@@ -789,7 +796,6 @@
 static struct i2c_driver wm8731_i2c_driver = {
 	.driver = {
 		.name = "wm8731",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8731_of_match,
 	},
 	.probe =    wm8731_i2c_probe,
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index 6ad606f..e4a03d9 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -79,13 +79,12 @@
 	return snd_soc_write(codec, WM8737_RESET, 0);
 }
 
-static const unsigned int micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(micboost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
@@ -657,7 +656,6 @@
 static struct i2c_driver wm8737_i2c_driver = {
 	.driver = {
 		.name = "wm8737",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8737_of_match,
 	},
 	.probe =    wm8737_i2c_probe,
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index b346237..de42c03 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -61,25 +61,6 @@
 	{ 32, 0x0002 },     /* R32 - ADDITONAL_CONTROL_1 */
 };
 
-static bool wm8741_readable(struct device *dev, unsigned int reg)
-{
-	switch (reg) {
-	case WM8741_DACLLSB_ATTENUATION:
-	case WM8741_DACLMSB_ATTENUATION:
-	case WM8741_DACRLSB_ATTENUATION:
-	case WM8741_DACRMSB_ATTENUATION:
-	case WM8741_VOLUME_CONTROL:
-	case WM8741_FORMAT_CONTROL:
-	case WM8741_FILTER_CONTROL:
-	case WM8741_MODE_CONTROL_1:
-	case WM8741_MODE_CONTROL_2:
-	case WM8741_ADDITIONAL_CONTROL_1:
-		return true;
-	default:
-		return false;
-	}
-}
-
 static int wm8741_reset(struct snd_soc_codec *codec)
 {
 	return snd_soc_write(codec, WM8741_RESET, 0);
@@ -278,51 +259,38 @@
 	switch (freq) {
 	case 0:
 		wm8741->sysclk_constraints = NULL;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 11289600:
 		wm8741->sysclk_constraints = &constraints_11289;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 12288000:
 		wm8741->sysclk_constraints = &constraints_12288;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 16384000:
 		wm8741->sysclk_constraints = &constraints_16384;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 16934400:
 		wm8741->sysclk_constraints = &constraints_16934;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 18432000:
 		wm8741->sysclk_constraints = &constraints_18432;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 22579200:
 	case 33868800:
 		wm8741->sysclk_constraints = &constraints_22579;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 24576000:
 		wm8741->sysclk_constraints = &constraints_24576;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 36864000:
 		wm8741->sysclk_constraints = &constraints_36864;
-		wm8741->sysclk = freq;
-		return 0;
+		break;
+	default:
+		return -EINVAL;
 	}
-	return -EINVAL;
+
+	wm8741->sysclk = freq;
+	return 0;
 }
 
 static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
@@ -554,8 +522,6 @@
 	.reg_defaults = wm8741_reg_defaults,
 	.num_reg_defaults = ARRAY_SIZE(wm8741_reg_defaults),
 	.cache_type = REGCACHE_RBTREE,
-
-	.readable_reg = wm8741_readable,
 };
 
 static int wm8741_set_pdata(struct device *dev, struct wm8741_priv *wm8741)
@@ -633,7 +599,6 @@
 static struct i2c_driver wm8741_i2c_driver = {
 	.driver = {
 		.name = "wm8741",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8741_of_match,
 	},
 	.probe =    wm8741_i2c_probe,
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 56d89b0..873933a 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -826,7 +826,6 @@
 static struct i2c_driver wm8750_i2c_driver = {
 	.driver = {
 		.name = "wm8750",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8750_of_match,
 	},
 	.probe =    wm8750_i2c_probe,
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index feb2997a..a801c6d 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -138,11 +138,6 @@
 	return reg == WM8753_RESET;
 }
 
-static bool wm8753_writeable(struct device *dev, unsigned int reg)
-{
-	return reg <= WM8753_ADCTL2;
-}
-
 /* codec private data */
 struct wm8753_priv {
 	struct regmap *regmap;
@@ -276,12 +271,11 @@
 static const DECLARE_TLV_DB_SCALE(mic_preamp_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
 static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
-static const unsigned int out_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(out_tlv,
 	/* 0000000 - 0101111 = "Analogue mute" */
 	0, 48, TLV_DB_SCALE_ITEM(-25500, 0, 0),
-	48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0),
-};
+	48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0)
+);
 static const DECLARE_TLV_DB_SCALE(mix_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(voice_mix_tlv, -1200, 300, 0);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0);
@@ -1510,7 +1504,6 @@
 	.val_bits = 9,
 
 	.max_register = WM8753_ADCTL2,
-	.writeable_reg = wm8753_writeable,
 	.volatile_reg = wm8753_volatile,
 
 	.cache_type = REGCACHE_RBTREE,
@@ -1609,7 +1602,6 @@
 static struct i2c_driver wm8753_i2c_driver = {
 	.driver = {
 		.name = "wm8753",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8753_of_match,
 	},
 	.probe =    wm8753_i2c_probe,
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index ece9b44..183c9a4 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -265,7 +265,7 @@
 	}
 
 	/* Set word length */
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		iface = 0;
 		break;
@@ -280,7 +280,7 @@
 		break;
 	default:
 		dev_err(codec->dev, "Unsupported sample size: %i\n",
-			snd_pcm_format_width(params_format(params)));
+			params_width(params));
 		return -EINVAL;
 	}
 
@@ -536,7 +536,6 @@
 static struct i2c_driver wm8776_i2c_driver = {
 	.driver = {
 		.name = "wm8776",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8776_of_match,
 	},
 	.probe =    wm8776_i2c_probe,
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index 6596f5f..f27464c 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -49,7 +49,6 @@
 static struct i2c_driver wm8804_i2c_driver = {
 	.driver = {
 		.name = "wm8804",
-		.owner = THIS_MODULE,
 		.pm = &wm8804_pm,
 		.of_match_table = wm8804_of_match,
 	},
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index c195c2e..8d91470 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -98,7 +98,7 @@
 WM8804_REGULATOR_EVENT(1)
 
 static const char *txsrc_text[] = { "S/PDIF RX", "AIF" };
-static const SOC_ENUM_SINGLE_DECL(txsrc, WM8804_SPDTX4, 6, txsrc_text);
+static SOC_ENUM_SINGLE_DECL(txsrc, WM8804_SPDTX4, 6, txsrc_text);
 
 static const struct snd_kcontrol_new wm8804_tx_source_mux[] = {
 	SOC_DAPM_ENUM_EXT("Input Source", txsrc,
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index f3759ec..98900aa 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1312,7 +1312,6 @@
 static struct i2c_driver wm8900_i2c_driver = {
 	.driver = {
 		.name = "wm8900",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8900_i2c_probe,
 	.remove =   wm8900_i2c_remove,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index b5322c1..b011253 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -2193,7 +2193,6 @@
 static struct i2c_driver wm8903_i2c_driver = {
 	.driver = {
 		.name = "wm8903",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8903_of_match,
 	},
 	.probe =    wm8903_i2c_probe,
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 265a4a5..b783743 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1837,7 +1837,9 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		clk_prepare_enable(wm8904->mclk);
+		ret = clk_prepare_enable(wm8904->mclk);
+		if (ret)
+			return ret;
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
@@ -2292,7 +2294,6 @@
 static struct i2c_driver wm8904_i2c_driver = {
 	.driver = {
 		.name = "wm8904",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(wm8904_of_match),
 	},
 	.probe =    wm8904_i2c_probe,
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 98ef0ba..f6f9395 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -787,7 +787,6 @@
 static struct i2c_driver wm8940_i2c_driver = {
 	.driver = {
 		.name = "wm8940",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8940_i2c_probe,
 	.remove =   wm8940_i2c_remove,
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 2d591c2..12e4435 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -1009,7 +1009,6 @@
 static struct i2c_driver wm8955_i2c_driver = {
 	.driver = {
 		.name = "wm8955",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8955_i2c_probe,
 	.remove =   wm8955_i2c_remove,
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 94c5c46..e3b7d0c 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -48,6 +48,9 @@
 #define WM8960_DISOP     0x40
 #define WM8960_DRES_MASK 0x30
 
+static bool is_pll_freq_available(unsigned int source, unsigned int target);
+static int wm8960_set_pll(struct snd_soc_codec *codec,
+		unsigned int freq_in, unsigned int freq_out);
 /*
  * wm8960 register cache
  * We can't read the WM8960 register space when we are
@@ -126,9 +129,12 @@
 	struct snd_soc_dapm_widget *rout1;
 	struct snd_soc_dapm_widget *out3;
 	bool deemph;
-	int playback_fs;
+	int lrclk;
 	int bclk;
 	int sysclk;
+	int clk_id;
+	int freq_in;
+	bool is_stream_in_use[2];
 	struct wm8960_data pdata;
 };
 
@@ -164,8 +170,8 @@
 	if (wm8960->deemph) {
 		best = 1;
 		for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
-			if (abs(deemph_settings[i] - wm8960->playback_fs) <
-			    abs(deemph_settings[best] - wm8960->playback_fs))
+			if (abs(deemph_settings[i] - wm8960->lrclk) <
+			    abs(deemph_settings[best] - wm8960->lrclk))
 				best = i;
 		}
 
@@ -565,6 +571,9 @@
 	{  8000, 5 },
 };
 
+/* -1 for reserved value */
+static const int sysclk_divs[] = { 1, -1, 2, -1 };
+
 /* Multiply 256 for internal 256 div */
 static const int dac_divs[] = { 256, 384, 512, 768, 1024, 1408, 1536 };
 
@@ -574,61 +583,110 @@
 	120, 160, 220, 240, 320, 320, 320
 };
 
-static void wm8960_configure_clocking(struct snd_soc_codec *codec,
-		bool tx, int lrclk)
+static int wm8960_configure_clocking(struct snd_soc_codec *codec)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	int sysclk, bclk, lrclk, freq_out, freq_in;
 	u16 iface1 = snd_soc_read(codec, WM8960_IFACE1);
-	u16 iface2 = snd_soc_read(codec, WM8960_IFACE2);
-	u32 sysclk;
-	int i, j;
+	int i, j, k;
 
 	if (!(iface1 & (1<<6))) {
 		dev_dbg(codec->dev,
 			"Codec is slave mode, no need to configure clock\n");
-		return;
+		return 0;
 	}
 
-	if (!wm8960->sysclk) {
-		dev_dbg(codec->dev, "No SYSCLK configured\n");
-		return;
+	if (wm8960->clk_id != WM8960_SYSCLK_MCLK && !wm8960->freq_in) {
+		dev_err(codec->dev, "No MCLK configured\n");
+		return -EINVAL;
 	}
 
-	if (!wm8960->bclk || !lrclk) {
-		dev_dbg(codec->dev, "No audio clocks configured\n");
-		return;
+	freq_in = wm8960->freq_in;
+	bclk = wm8960->bclk;
+	lrclk = wm8960->lrclk;
+	/*
+	 * If it's sysclk auto mode, check if the MCLK can provide sysclk or
+	 * not. If MCLK can provide sysclk, using MCLK to provide sysclk
+	 * directly. Otherwise, auto select a available pll out frequency
+	 * and set PLL.
+	 */
+	if (wm8960->clk_id == WM8960_SYSCLK_AUTO) {
+		/* disable the PLL and using MCLK to provide sysclk */
+		wm8960_set_pll(codec, 0, 0);
+		freq_out = freq_in;
+	} else if (wm8960->sysclk) {
+		freq_out = wm8960->sysclk;
+	} else {
+		dev_err(codec->dev, "No SYSCLK configured\n");
+		return -EINVAL;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(dac_divs); ++i) {
-		if (wm8960->sysclk == lrclk * dac_divs[i]) {
-			for (j = 0; j < ARRAY_SIZE(bclk_divs); ++j) {
-				sysclk = wm8960->bclk * bclk_divs[j] / 10;
-				if (wm8960->sysclk == sysclk)
+	/* check if the sysclk frequency is available. */
+	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+		if (sysclk_divs[i] == -1)
+			continue;
+		sysclk = freq_out / sysclk_divs[i];
+		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+			if (sysclk == dac_divs[j] * lrclk) {
+				for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
+					if (sysclk == bclk * bclk_divs[k] / 10)
+						break;
+				if (k != ARRAY_SIZE(bclk_divs))
 					break;
 			}
-			if(j != ARRAY_SIZE(bclk_divs))
+		}
+		if (j != ARRAY_SIZE(dac_divs))
+			break;
+	}
+
+	if (i != ARRAY_SIZE(sysclk_divs)) {
+		goto configure_clock;
+	} else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
+		dev_err(codec->dev, "failed to configure clock\n");
+		return -EINVAL;
+	}
+	/* get a available pll out frequency and set pll */
+	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+		if (sysclk_divs[i] == -1)
+			continue;
+		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+			sysclk = lrclk * dac_divs[j];
+			freq_out = sysclk * sysclk_divs[i];
+
+			for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) {
+				if (sysclk == bclk * bclk_divs[k] / 10 &&
+				    is_pll_freq_available(freq_in, freq_out)) {
+					wm8960_set_pll(codec,
+						       freq_in, freq_out);
+					break;
+				} else {
+					continue;
+				}
+			}
+			if (k != ARRAY_SIZE(bclk_divs))
 				break;
 		}
+		if (j != ARRAY_SIZE(dac_divs))
+			break;
 	}
 
-	if (i == ARRAY_SIZE(dac_divs)) {
-		dev_err(codec->dev, "Unsupported sysclk %d\n", wm8960->sysclk);
-		return;
+	if (i == ARRAY_SIZE(sysclk_divs)) {
+		dev_err(codec->dev, "failed to configure clock\n");
+		return -EINVAL;
 	}
 
-	/*
-	 * configure frame clock. If ADCLRC configure as GPIO pin, DACLRC
-	 * pin is used as a frame clock for ADCs and DACs.
-	 */
-	if (iface2 & (1<<6))
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, i << 3);
-	else if (tx)
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, i << 3);
-	else if (!tx)
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 6, i << 6);
+configure_clock:
+	/* configure sysclk clock */
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 3 << 1, i << 1);
+
+	/* configure frame clock */
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, j << 3);
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 6, j << 6);
 
 	/* configure bit clock */
-	snd_soc_update_bits(codec, WM8960_CLOCK2, 0xf, j);
+	snd_soc_update_bits(codec, WM8960_CLOCK2, 0xf, k);
+
+	return 0;
 }
 
 static int wm8960_hw_params(struct snd_pcm_substream *substream,
@@ -667,9 +725,9 @@
 		return -EINVAL;
 	}
 
+	wm8960->lrclk = params_rate(params);
 	/* Update filters for the new rate */
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		wm8960->playback_fs = params_rate(params);
+	if (tx) {
 		wm8960_set_deemph(codec);
 	} else {
 		for (i = 0; i < ARRAY_SIZE(alc_rates); i++)
@@ -682,7 +740,23 @@
 	/* set iface */
 	snd_soc_write(codec, WM8960_IFACE1, iface);
 
-	wm8960_configure_clocking(codec, tx, params_rate(params));
+	wm8960->is_stream_in_use[tx] = true;
+
+	if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON &&
+	    !wm8960->is_stream_in_use[!tx])
+		return wm8960_configure_clocking(codec);
+
+	return 0;
+}
+
+static int wm8960_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
+	wm8960->is_stream_in_use[tx] = false;
 
 	return 0;
 }
@@ -702,6 +776,7 @@
 				      enum snd_soc_bias_level level)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	u16 pm2 = snd_soc_read(codec, WM8960_POWER2);
 	int ret;
 
 	switch (level) {
@@ -721,11 +796,22 @@
 				}
 			}
 
+			ret = wm8960_configure_clocking(codec);
+			if (ret)
+				return ret;
+
 			/* Set VMID to 2x50k */
 			snd_soc_update_bits(codec, WM8960_POWER1, 0x180, 0x80);
 			break;
 
 		case SND_SOC_BIAS_ON:
+			/*
+			 * If it's sysclk auto mode, and the pll is enabled,
+			 * disable the pll
+			 */
+			if (wm8960->clk_id == WM8960_SYSCLK_AUTO && (pm2 & 0x1))
+				wm8960_set_pll(codec, 0, 0);
+
 			if (!IS_ERR(wm8960->mclk))
 				clk_disable_unprepare(wm8960->mclk);
 			break;
@@ -780,6 +866,7 @@
 					 enum snd_soc_bias_level level)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	u16 pm2 = snd_soc_read(codec, WM8960_POWER2);
 	int reg, ret;
 
 	switch (level) {
@@ -831,9 +918,21 @@
 					return ret;
 				}
 			}
+
+			ret = wm8960_configure_clocking(codec);
+			if (ret)
+				return ret;
+
 			break;
 
 		case SND_SOC_BIAS_ON:
+			/*
+			 * If it's sysclk auto mode, and the pll is enabled,
+			 * disable the pll
+			 */
+			if (wm8960->clk_id == WM8960_SYSCLK_AUTO && (pm2 & 0x1))
+				wm8960_set_pll(codec, 0, 0);
+
 			if (!IS_ERR(wm8960->mclk))
 				clk_disable_unprepare(wm8960->mclk);
 
@@ -892,6 +991,28 @@
 	u32 k:24;
 };
 
+static bool is_pll_freq_available(unsigned int source, unsigned int target)
+{
+	unsigned int Ndiv;
+
+	if (source == 0 || target == 0)
+		return false;
+
+	/* Scale up target to PLL operating frequency */
+	target *= 4;
+	Ndiv = target / source;
+
+	if (Ndiv < 6) {
+		source >>= 1;
+		Ndiv = target / source;
+	}
+
+	if ((Ndiv < 6) || (Ndiv > 12))
+		return false;
+
+	return true;
+}
+
 /* The size in bits of the pll divide multiplied by 10
  * to allow rounding later */
 #define FIXED_PLL_SIZE ((1 << 24) * 10)
@@ -943,10 +1064,9 @@
 	return 0;
 }
 
-static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
-		int source, unsigned int freq_in, unsigned int freq_out)
+static int wm8960_set_pll(struct snd_soc_codec *codec,
+		unsigned int freq_in, unsigned int freq_out)
 {
-	struct snd_soc_codec *codec = codec_dai->codec;
 	u16 reg;
 	static struct _pll_div pll_div;
 	int ret;
@@ -986,6 +1106,20 @@
 	return 0;
 }
 
+static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+		int source, unsigned int freq_in, unsigned int freq_out)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+
+	wm8960->freq_in = freq_in;
+
+	if (pll_id == WM8960_SYSCLK_AUTO)
+		return 0;
+
+	return wm8960_set_pll(codec, freq_in, freq_out);
+}
+
 static int wm8960_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
 		int div_id, int div)
 {
@@ -1043,11 +1177,14 @@
 		snd_soc_update_bits(codec, WM8960_CLOCK1,
 					0x1, WM8960_SYSCLK_PLL);
 		break;
+	case WM8960_SYSCLK_AUTO:
+		break;
 	default:
 		return -EINVAL;
 	}
 
 	wm8960->sysclk = freq;
+	wm8960->clk_id = clk_id;
 
 	return 0;
 }
@@ -1060,6 +1197,7 @@
 
 static const struct snd_soc_dai_ops wm8960_dai_ops = {
 	.hw_params = wm8960_hw_params,
+	.hw_free = wm8960_hw_free,
 	.digital_mute = wm8960_mute,
 	.set_fmt = wm8960_set_dai_fmt,
 	.set_clkdiv = wm8960_set_dai_clkdiv,
@@ -1216,7 +1354,6 @@
 static struct i2c_driver wm8960_i2c_driver = {
 	.driver = {
 		.name = "wm8960",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8960_of_match,
 	},
 	.probe =    wm8960_i2c_probe,
diff --git a/sound/soc/codecs/wm8960.h b/sound/soc/codecs/wm8960.h
index 2d8163d..ab3220d 100644
--- a/sound/soc/codecs/wm8960.h
+++ b/sound/soc/codecs/wm8960.h
@@ -82,6 +82,7 @@
 
 #define WM8960_SYSCLK_MCLK		(0 << 0)
 #define WM8960_SYSCLK_PLL		(1 << 0)
+#define WM8960_SYSCLK_AUTO		(2 << 0)
 
 #define WM8960_DAC_DIV_1		(0 << 3)
 #define WM8960_DAC_DIV_1_5		(1 << 3)
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index a057662..e30446a 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -331,13 +331,12 @@
 static const DECLARE_TLV_DB_SCALE(hp_sec_tlv, -700, 100, 0);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
-static unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0,  0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(13, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(20, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(29, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(29, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -2325, 75, 0);
 
 static const struct snd_kcontrol_new wm8961_snd_controls[] = {
@@ -982,7 +981,6 @@
 static struct i2c_driver wm8961_i2c_driver = {
 	.driver = {
 		.name = "wm8961",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8961_i2c_probe,
 	.remove =   wm8961_i2c_remove,
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index c5748fd..b4eb975 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -113,7 +113,7 @@
 WM8962_REGULATOR_EVENT(6)
 WM8962_REGULATOR_EVENT(7)
 
-static struct reg_default wm8962_reg[] = {
+static const struct reg_default wm8962_reg[] = {
 	{ 0, 0x009F },   /* R0     - Left Input volume */
 	{ 1, 0x049F },   /* R1     - Right Input volume */
 	{ 2, 0x0000 },   /* R2     - HPOUTL volume */
@@ -1456,14 +1456,13 @@
 
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
-static const unsigned int mixinpga_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(mixinpga_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
 	2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
 	3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
 	5, 5, TLV_DB_SCALE_ITEM(2400, 0, 0),
-	6, 7, TLV_DB_SCALE_ITEM(2700, 300, 0),
-};
+	6, 7, TLV_DB_SCALE_ITEM(2700, 300, 0)
+);
 static const DECLARE_TLV_DB_SCALE(beep_tlv, -9600, 600, 1);
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0);
@@ -1471,11 +1470,10 @@
 static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
-static const unsigned int classd_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(classd_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 
 static int wm8962_dsp2_write_config(struct snd_soc_codec *codec)
@@ -3495,7 +3493,7 @@
 };
 
 /* Improve power consumption for IN4 DC measurement mode */
-static const struct reg_default wm8962_dc_measure[] = {
+static const struct reg_sequence wm8962_dc_measure[] = {
 	{ 0xfd, 0x1 },
 	{ 0xcc, 0x40 },
 	{ 0xfd, 0 },
@@ -3859,7 +3857,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm8962_pm = {
+static const struct dev_pm_ops wm8962_pm = {
 	SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL)
 };
 
@@ -3878,7 +3876,6 @@
 static struct i2c_driver wm8962_i2c_driver = {
 	.driver = {
 		.name = "wm8962",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8962_of_match,
 		.pm = &wm8962_pm,
 	},
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index b51184c..2cdde32 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -710,7 +710,6 @@
 static struct i2c_driver wm8971_i2c_driver = {
 	.driver = {
 		.name = "wm8971",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8971_i2c_probe,
 	.remove =   wm8971_i2c_remove,
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 33b16a7..0a60677 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -634,7 +634,6 @@
 static struct i2c_driver wm8974_i2c_driver = {
 	.driver = {
 		.name = "wm8974",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8974_i2c_probe,
 	.remove =   wm8974_i2c_remove,
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index cfc8cdf..d36d600 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -1072,7 +1072,6 @@
 static struct i2c_driver wm8978_i2c_driver = {
 	.driver = {
 		.name = "wm8978",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8978_i2c_probe,
 	.remove =   wm8978_i2c_remove,
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index 2fdd2c6..f3193fb 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -84,66 +84,6 @@
 	{ 0x3D, 0x0000 },      /* R61 - BIAS CTRL */
 };
 
-static const struct wm8983_reg_access {
-	u16 read; /* Mask of readable bits */
-	u16 write; /* Mask of writable bits */
-} wm8983_access_masks[WM8983_MAX_REGISTER + 1] = {
-	[0x00] = { 0x0000, 0x01FF }, /* R0  - Software Reset */
-	[0x01] = { 0x0000, 0x01FF }, /* R1  - Power management 1 */
-	[0x02] = { 0x0000, 0x01FF }, /* R2  - Power management 2 */
-	[0x03] = { 0x0000, 0x01EF }, /* R3  - Power management 3 */
-	[0x04] = { 0x0000, 0x01FF }, /* R4  - Audio Interface */
-	[0x05] = { 0x0000, 0x003F }, /* R5  - Companding control */
-	[0x06] = { 0x0000, 0x01FD }, /* R6  - Clock Gen control */
-	[0x07] = { 0x0000, 0x000F }, /* R7  - Additional control */
-	[0x08] = { 0x0000, 0x003F }, /* R8  - GPIO Control */
-	[0x09] = { 0x0000, 0x0070 }, /* R9  - Jack Detect Control 1 */
-	[0x0A] = { 0x0000, 0x004F }, /* R10 - DAC Control */
-	[0x0B] = { 0x0000, 0x01FF }, /* R11 - Left DAC digital Vol */
-	[0x0C] = { 0x0000, 0x01FF }, /* R12 - Right DAC digital vol */
-	[0x0D] = { 0x0000, 0x00FF }, /* R13 - Jack Detect Control 2 */
-	[0x0E] = { 0x0000, 0x01FB }, /* R14 - ADC Control */
-	[0x0F] = { 0x0000, 0x01FF }, /* R15 - Left ADC Digital Vol */
-	[0x10] = { 0x0000, 0x01FF }, /* R16 - Right ADC Digital Vol */
-	[0x12] = { 0x0000, 0x017F }, /* R18 - EQ1 - low shelf */
-	[0x13] = { 0x0000, 0x017F }, /* R19 - EQ2 - peak 1 */
-	[0x14] = { 0x0000, 0x017F }, /* R20 - EQ3 - peak 2 */
-	[0x15] = { 0x0000, 0x017F }, /* R21 - EQ4 - peak 3 */
-	[0x16] = { 0x0000, 0x007F }, /* R22 - EQ5 - high shelf */
-	[0x18] = { 0x0000, 0x01FF }, /* R24 - DAC Limiter 1 */
-	[0x19] = { 0x0000, 0x007F }, /* R25 - DAC Limiter 2 */
-	[0x1B] = { 0x0000, 0x01FF }, /* R27 - Notch Filter 1 */
-	[0x1C] = { 0x0000, 0x017F }, /* R28 - Notch Filter 2 */
-	[0x1D] = { 0x0000, 0x017F }, /* R29 - Notch Filter 3 */
-	[0x1E] = { 0x0000, 0x017F }, /* R30 - Notch Filter 4 */
-	[0x20] = { 0x0000, 0x01BF }, /* R32 - ALC control 1 */
-	[0x21] = { 0x0000, 0x00FF }, /* R33 - ALC control 2 */
-	[0x22] = { 0x0000, 0x01FF }, /* R34 - ALC control 3 */
-	[0x23] = { 0x0000, 0x000F }, /* R35 - Noise Gate */
-	[0x24] = { 0x0000, 0x001F }, /* R36 - PLL N */
-	[0x25] = { 0x0000, 0x003F }, /* R37 - PLL K 1 */
-	[0x26] = { 0x0000, 0x01FF }, /* R38 - PLL K 2 */
-	[0x27] = { 0x0000, 0x01FF }, /* R39 - PLL K 3 */
-	[0x29] = { 0x0000, 0x000F }, /* R41 - 3D control */
-	[0x2A] = { 0x0000, 0x01E7 }, /* R42 - OUT4 to ADC */
-	[0x2B] = { 0x0000, 0x01BF }, /* R43 - Beep control */
-	[0x2C] = { 0x0000, 0x0177 }, /* R44 - Input ctrl */
-	[0x2D] = { 0x0000, 0x01FF }, /* R45 - Left INP PGA gain ctrl */
-	[0x2E] = { 0x0000, 0x01FF }, /* R46 - Right INP PGA gain ctrl */
-	[0x2F] = { 0x0000, 0x0177 }, /* R47 - Left ADC BOOST ctrl */
-	[0x30] = { 0x0000, 0x0177 }, /* R48 - Right ADC BOOST ctrl */
-	[0x31] = { 0x0000, 0x007F }, /* R49 - Output ctrl */
-	[0x32] = { 0x0000, 0x01FF }, /* R50 - Left mixer ctrl */
-	[0x33] = { 0x0000, 0x01FF }, /* R51 - Right mixer ctrl */
-	[0x34] = { 0x0000, 0x01FF }, /* R52 - LOUT1 (HP) volume ctrl */
-	[0x35] = { 0x0000, 0x01FF }, /* R53 - ROUT1 (HP) volume ctrl */
-	[0x36] = { 0x0000, 0x01FF }, /* R54 - LOUT2 (SPK) volume ctrl */
-	[0x37] = { 0x0000, 0x01FF }, /* R55 - ROUT2 (SPK) volume ctrl */
-	[0x38] = { 0x0000, 0x004F }, /* R56 - OUT3 mixer ctrl */
-	[0x39] = { 0x0000, 0x00FF }, /* R57 - OUT4 (MONO) mix ctrl */
-	[0x3D] = { 0x0000, 0x0100 }  /* R61 - BIAS CTRL */
-};
-
 /* vol/gain update regs */
 static const int vol_update_regs[] = {
 	WM8983_LEFT_DAC_DIGITAL_VOL,
@@ -605,12 +545,19 @@
 	return 0;
 }
 
-static bool wm8983_readable(struct device *dev, unsigned int reg)
+static bool wm8983_writeable(struct device *dev, unsigned int reg)
 {
-	if (reg > WM8983_MAX_REGISTER)
-		return 0;
-
-	return wm8983_access_masks[reg].read != 0;
+	switch (reg) {
+	case WM8983_SOFTWARE_RESET ... WM8983_RIGHT_ADC_DIGITAL_VOL:
+	case WM8983_EQ1_LOW_SHELF ... WM8983_DAC_LIMITER_2:
+	case WM8983_NOTCH_FILTER_1 ... WM8983_NOTCH_FILTER_4:
+	case WM8983_ALC_CONTROL_1 ... WM8983_PLL_K_3:
+	case WM8983_3D_CONTROL ... WM8983_OUT4_MONO_MIX_CTRL:
+	case WM8983_BIAS_CTRL:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static int wm8983_dac_mute(struct snd_soc_dai *dai, int mute)
@@ -1048,8 +995,9 @@
 	.reg_defaults = wm8983_defaults,
 	.num_reg_defaults = ARRAY_SIZE(wm8983_defaults),
 	.cache_type = REGCACHE_RBTREE,
+	.max_register = WM8983_MAX_REGISTER,
 
-	.readable_reg = wm8983_readable,
+	.writeable_reg = wm8983_writeable,
 };
 
 #if defined(CONFIG_SPI_MASTER)
@@ -1133,7 +1081,6 @@
 static struct i2c_driver wm8983_i2c_driver = {
 	.driver = {
 		.name = "wm8983",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8983_i2c_probe,
 	.remove = wm8983_i2c_remove,
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 8a85f50..9c3c151 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1144,7 +1144,6 @@
 static struct i2c_driver wm8985_i2c_driver = {
 	.driver = {
 		.name = "wm8985",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8985_i2c_probe,
 	.remove = wm8985_i2c_remove,
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index f13a995..c88ce99 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -919,7 +919,6 @@
 static struct i2c_driver wm8988_i2c_driver = {
 	.driver = {
 		.name = "wm8988",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8988_i2c_probe,
 	.remove =   wm8988_i2c_remove,
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 1993fd2..23ecd30 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -418,10 +418,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
-};
+static const DECLARE_TLV_DB_SCALE(in_mix_tlv, -1200, 600, 0);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8990_dapm_lin12_pga_controls[] = {
@@ -1356,7 +1353,6 @@
 static struct i2c_driver wm8990_i2c_driver = {
 	.driver = {
 		.name = "wm8990",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8990_i2c_probe,
 	.remove =   wm8990_i2c_remove,
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 44a6777..c9ee0ac 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -111,45 +111,14 @@
 	}
 }
 
-static const unsigned int rec_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-1500, 600),
-};
-
-static const unsigned int in_pga_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 0x1F, TLV_DB_LINEAR_ITEM(-1650, 3000),
-};
-
-static const unsigned int out_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(0, -2100),
-};
-
-static const unsigned int out_pga_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 127, TLV_DB_LINEAR_ITEM(-7300, 600),
-};
-
-static const unsigned int out_omix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-600, 0),
-};
-
-static const unsigned int out_dac_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 255, TLV_DB_LINEAR_ITEM(-7163, 0),
-};
-
-static const unsigned int in_adc_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 255, TLV_DB_LINEAR_ITEM(-7163, 1763),
-};
-
-static const unsigned int out_sidetone_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 31, TLV_DB_LINEAR_ITEM(-3600, 0),
-};
+static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
+static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
+static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
+static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
+static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
+static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
+static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
+static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
 
 static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
 				      struct snd_ctl_elem_value *ucontrol)
@@ -429,10 +398,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
-};
+static const DECLARE_TLV_DB_LINEAR(in_mix_tlv, -1200, 600);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8991_dapm_lin12_pga_controls[] = {
@@ -1363,7 +1329,6 @@
 static struct i2c_driver wm8991_i2c_driver = {
 	.driver = {
 		.name = "wm8991",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8991_i2c_probe,
 	.remove = wm8991_i2c_remove,
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 8a8db86..8668c4c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -41,7 +41,7 @@
 	"SPKVDD",
 };
 
-static struct reg_default wm8993_reg_defaults[] = {
+static const struct reg_default wm8993_reg_defaults[] = {
 	{ 1,   0x0000 },     /* R1   - Power Management (1) */
 	{ 2,   0x6000 },     /* R2   - Power Management (2) */
 	{ 3,   0x0000 },     /* R3   - Power Management (3) */
@@ -628,11 +628,10 @@
 static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
-static const unsigned int drc_max_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(drc_max_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -1800, 300, 0);
 static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
@@ -1595,7 +1594,7 @@
 #endif
 
 /* Tune DC servo configuration */
-static struct reg_default wm8993_regmap_patch[] = {
+static const struct reg_sequence wm8993_regmap_patch[] = {
 	{ 0x44, 3 },
 	{ 0x56, 3 },
 	{ 0x44, 0 },
@@ -1742,7 +1741,6 @@
 static struct i2c_driver wm8993_i2c_driver = {
 	.driver = {
 		.name = "wm8993",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8993_i2c_probe,
 	.remove =   wm8993_i2c_remove,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 962e1d3..2ccbb32 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1942,14 +1942,16 @@
 	{ "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
 
 	/* AIF3 output */
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1L" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1R" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2L" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2R" },
-	{ "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCL" },
-	{ "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCR" },
-	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACL" },
-	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACR" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1L" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1R" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2L" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2R" },
+	{ "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCL" },
+	{ "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCR" },
+	{ "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACL" },
+	{ "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACR" },
+
+	{ "AIF3ADCDAT", NULL, "AIF3ADC Mux" },
 
 	/* Loopback */
 	{ "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" },
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 505b65f..eda52a9 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2298,7 +2298,6 @@
 static struct i2c_driver wm8995_i2c_driver = {
 	.driver = {
 		.name = "wm8995",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8995_i2c_probe,
 	.remove = wm8995_i2c_remove,
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 3dd063f..f7ccd9f 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -117,7 +117,7 @@
 WM8996_REGULATOR_EVENT(1)
 WM8996_REGULATOR_EVENT(2)
 
-static struct reg_default wm8996_reg[] = {
+static const struct reg_default wm8996_reg[] = {
 	{ WM8996_POWER_MANAGEMENT_1, 0x0 },
 	{ WM8996_POWER_MANAGEMENT_2, 0x0 },
 	{ WM8996_POWER_MANAGEMENT_3, 0x0 },
@@ -1780,7 +1780,7 @@
 	wm8996->rx_rate[dai->id] = params_rate(params);
 
 	/* Needs looking at for TDM */
-	bits = snd_pcm_format_width(params_format(params));
+	bits = params_width(params);
 	if (bits < 0)
 		return bits;
 	aifdata |= (bits << WM8996_AIF1TX_WL_SHIFT) | bits;
@@ -2647,12 +2647,10 @@
 		if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 			ret = request_threaded_irq(i2c->irq, NULL,
 						   wm8996_edge_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm8996", codec);
+						   irq_flags, "wm8996", codec);
 		else
 			ret = request_threaded_irq(i2c->irq, NULL, wm8996_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm8996", codec);
+						   irq_flags, "wm8996", codec);
 
 		if (ret == 0) {
 			/* Unmask the interrupt */
@@ -3100,7 +3098,6 @@
 static struct i2c_driver wm8996_i2c_driver = {
 	.driver = {
 		.name = "wm8996",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8996_i2c_probe,
 	.remove =   wm8996_i2c_remove,
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 4134dc7..b4dba3a 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -174,8 +174,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -187,8 +186,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -200,8 +198,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -213,8 +210,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -242,10 +238,10 @@
 SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
 SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 SOC_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]),
 SOC_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]),
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 8a8b1c0..ccb3b15 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -30,7 +30,7 @@
 #include <sound/wm9081.h>
 #include "wm9081.h"
 
-static struct reg_default wm9081_reg[] = {
+static const struct reg_default wm9081_reg[] = {
 	{  2, 0x00B9 },     /* R2  - Analogue Lineout */
 	{  3, 0x00B9 },     /* R3  - Analogue Speaker PGA */
 	{  4, 0x0001 },     /* R4  - VMID Control */
@@ -243,13 +243,12 @@
 static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_out_tlv, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
-static unsigned int drc_max_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(drc_max_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(1200, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -300, 50, 0);
 
@@ -1378,7 +1377,6 @@
 static struct i2c_driver wm9081_i2c_driver = {
 	.driver = {
 		.name = "wm9081",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm9081_i2c_probe,
 	.remove =   wm9081_i2c_remove,
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 13d23fc..5d73729 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -162,23 +162,20 @@
 		dev_err(codec->dev, "Timed out waiting for DC Servo\n");
 }
 
-static const unsigned int in_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(in_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
 	1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
-	4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
-};
-static const unsigned int mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	4, 6, TLV_DB_SCALE_ITEM(600, 600, 0)
+);
+static const DECLARE_TLV_DB_RANGE(mix_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-	3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(0, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
-static const unsigned int spkboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(spkboost_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm9090_controls[] = {
 SOC_SINGLE_TLV("IN1A Volume", WM9090_IN1_LINE_INPUT_A_VOLUME, 0, 6, 0,
@@ -636,7 +633,6 @@
 static struct i2c_driver wm9090_i2c_driver = {
 	.driver = {
 		.name = "wm9090",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm9090_i2c_probe,
 	.remove = wm9090_i2c_remove,
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 5cc457e..744842c 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -22,6 +22,9 @@
 
 #include "wm9705.h"
 
+#define WM9705_VENDOR_ID 0x574d4c05
+#define WM9705_VENDOR_ID_MASK 0xffffffff
+
 /*
  * WM9705 register cache
  */
@@ -293,21 +296,6 @@
 	}
 };
 
-static int wm9705_reset(struct snd_soc_codec *codec)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
-	if (soc_ac97_ops->reset) {
-		soc_ac97_ops->reset(ac97);
-		if (ac97_read(codec, 0) == wm9705_reg[0])
-			return 0; /* Success */
-	}
-
-	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-
-	return -EIO;
-}
-
 #ifdef CONFIG_PM
 static int wm9705_soc_suspend(struct snd_soc_codec *codec)
 {
@@ -324,7 +312,8 @@
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9705_reset(codec);
+	ret = snd_ac97_reset(ac97, true, WM9705_VENDOR_ID,
+		WM9705_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -342,30 +331,17 @@
 static int wm9705_soc_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
-	int ret = 0;
 
-	ac97 = snd_soc_alloc_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
+		WM9705_VENDOR_ID_MASK);
 	if (IS_ERR(ac97)) {
-		ret = PTR_ERR(ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec\n");
-		return ret;
+		return PTR_ERR(ac97);
 	}
 
-	ret = wm9705_reset(codec);
-	if (ret)
-		goto err_put_device;
-
-	ret = device_add(&ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	snd_soc_codec_set_drvdata(codec, ac97);
 
 	return 0;
-
-err_put_device:
-	put_device(&ac97->dev);
-	return ret;
 }
 
 static int wm9705_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 1fda104..488a922 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -23,6 +23,9 @@
 #include <sound/tlv.h>
 #include "wm9712.h"
 
+#define WM9712_VENDOR_ID 0x574d4c12
+#define WM9712_VENDOR_ID_MASK 0xffffffff
+
 struct wm9712_priv {
 	struct snd_ac97 *ac97;
 	unsigned int hp_mixer[2];
@@ -613,35 +616,14 @@
 	return 0;
 }
 
-static int wm9712_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(wm9712->ac97);
-		if (ac97_read(codec, 0) == wm9712_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(wm9712->ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(wm9712->ac97);
-	if (ac97_read(codec, 0) != wm9712_reg[0])
-		goto err;
-	return 0;
-
-err:
-	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-	return -EIO;
-}
-
 static int wm9712_soc_resume(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9712_reset(codec, 1);
+	ret = snd_ac97_reset(wm9712->ac97, true, WM9712_VENDOR_ID,
+		WM9712_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -663,31 +645,20 @@
 static int wm9712_soc_probe(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
+	int ret;
 
-	wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
+	wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
+		WM9712_VENDOR_ID_MASK);
 	if (IS_ERR(wm9712->ac97)) {
 		ret = PTR_ERR(wm9712->ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
 		return ret;
 	}
 
-	ret = wm9712_reset(codec, 0);
-	if (ret < 0)
-		goto err_put_device;
-
-	ret = device_add(&wm9712->ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	/* set alc mux to none */
 	ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
 
 	return 0;
-
-err_put_device:
-	put_device(&wm9712->ac97->dev);
-	return ret;
 }
 
 static int wm9712_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 89cd2d6..4083a51 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -29,6 +29,9 @@
 
 #include "wm9713.h"
 
+#define WM9713_VENDOR_ID 0x574d4c13
+#define WM9713_VENDOR_ID_MASK 0xffffffff
+
 struct wm9713_priv {
 	struct snd_ac97 *ac97;
 	u32 pll_in; /* PLL input frequency */
@@ -116,11 +119,10 @@
 static const DECLARE_TLV_DB_SCALE(out_tlv, -4650, 150, 0);
 static const DECLARE_TLV_DB_SCALE(main_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(misc_tlv, -1500, 300, 0);
-static unsigned int mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const  DECLARE_TLV_DB_RANGE(mic_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm9713_snd_ac97_controls[] = {
 SOC_DOUBLE_TLV("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1, out_tlv),
@@ -1123,28 +1125,6 @@
 	},
 };
 
-int wm9713_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(wm9713->ac97);
-		if (ac97_read(codec, 0) == wm9713_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(wm9713->ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(wm9713->ac97);
-	if (ac97_read(codec, 0) != wm9713_reg[0]) {
-		dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(wm9713_reset);
-
 static int wm9713_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
@@ -1196,7 +1176,8 @@
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9713_reset(codec, 1);
+	ret = snd_ac97_reset(wm9713->ac97, true, WM9713_VENDOR_ID,
+		WM9713_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -1222,32 +1203,18 @@
 static int wm9713_soc_probe(struct snd_soc_codec *codec)
 {
 	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0, reg;
+	int reg;
 
-	wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
+	wm9713->ac97 = snd_soc_new_ac97_codec(codec, WM9713_VENDOR_ID,
+		WM9713_VENDOR_ID_MASK);
 	if (IS_ERR(wm9713->ac97))
 		return PTR_ERR(wm9713->ac97);
 
-	/* do a cold reset for the controller and then try
-	 * a warm reset followed by an optional cold reset for codec */
-	wm9713_reset(codec, 0);
-	ret = wm9713_reset(codec, 1);
-	if (ret < 0)
-		goto err_put_device;
-
-	ret = device_add(&wm9713->ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	/* unmute the adc - move to kcontrol */
 	reg = ac97_read(codec, AC97_CD) & 0x7fff;
 	ac97_write(codec, AC97_CD, reg);
 
 	return 0;
-
-err_put_device:
-	put_device(&wm9713->ac97->dev);
-	return ret;
 }
 
 static int wm9713_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.h b/sound/soc/codecs/wm9713.h
index 793da86..53df11b 100644
--- a/sound/soc/codecs/wm9713.h
+++ b/sound/soc/codecs/wm9713.h
@@ -45,6 +45,4 @@
 #define WM9713_DAI_AC97_AUX		1
 #define WM9713_DAI_PCM_VOICE	2
 
-int wm9713_reset(struct snd_soc_codec *codec,  int try_warm);
-
 #endif
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index fd86bd1..624b3b9 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -38,11 +38,10 @@
 static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
 static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
 static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
-static const unsigned int spkboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(spkboost_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(line_tlv, -600, 600, 0);
 
 static const char *speaker_ref_text[] = {
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 56cb4d9..ec98548 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -651,23 +651,15 @@
 static int davinci_i2s_probe(struct platform_device *pdev)
 {
 	struct davinci_mcbsp_dev *dev;
-	struct resource *mem, *ioarea, *res;
+	struct resource *mem, *res;
+	void __iomem *io_base;
 	int *dma;
 	int ret;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "no mem resource?\n");
-		return -ENODEV;
-	}
-
-	ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-					 resource_size(mem),
-					 pdev->name);
-	if (!ioarea) {
-		dev_err(&pdev->dev, "McBSP region already claimed\n");
-		return -EBUSY;
-	}
+	io_base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
 
 	dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev),
 			   GFP_KERNEL);
@@ -679,12 +671,7 @@
 		return -ENODEV;
 	clk_enable(dev->clk);
 
-	dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!dev->base) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err_release_clk;
-	}
+	dev->base = io_base;
 
 	dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
 	    (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index b960e62..add6bb9 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1613,7 +1613,7 @@
 static int davinci_mcasp_probe(struct platform_device *pdev)
 {
 	struct snd_dmaengine_dai_dma_data *dma_data;
-	struct resource *mem, *ioarea, *res, *dat;
+	struct resource *mem, *res, *dat;
 	struct davinci_mcasp_pdata *pdata;
 	struct davinci_mcasp *mcasp;
 	char *irq_name;
@@ -1648,22 +1648,12 @@
 		}
 	}
 
-	ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-			resource_size(mem), pdev->name);
-	if (!ioarea) {
-		dev_err(&pdev->dev, "Audio region already claimed\n");
-		return -EBUSY;
-	}
+	mcasp->base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(mcasp->base))
+		return PTR_ERR(mcasp->base);
 
 	pm_runtime_enable(&pdev->dev);
 
-	mcasp->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!mcasp->base) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-
 	mcasp->op_mode = pdata->op_mode;
 	/* sanity check for tdm slots parameter */
 	if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index fabd05f..c77d921 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -231,8 +231,9 @@
 
 	dev_set_drvdata(&pdev->dev, davinci_vcif_dev);
 
-	ret = snd_soc_register_component(&pdev->dev, &davinci_vcif_component,
-					 &davinci_vcif_dai, 1);
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &davinci_vcif_component,
+					      &davinci_vcif_dai, 1);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "could not register dai\n");
 		return ret;
@@ -241,23 +242,14 @@
 	ret = edma_pcm_platform_register(&pdev->dev);
 	if (ret) {
 		dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
-		snd_soc_unregister_component(&pdev->dev);
 		return ret;
 	}
 
 	return 0;
 }
 
-static int davinci_vcif_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-
-	return 0;
-}
-
 static struct platform_driver davinci_vcif_driver = {
 	.probe		= davinci_vcif_probe,
-	.remove		= davinci_vcif_remove,
 	.driver		= {
 		.name	= "davinci-vcif",
 	},
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index e1aa3834..883087f 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -182,7 +182,7 @@
 		);
 	} else {
 		if (np) {
-			/* The eukrea,asoc-tlv320 driver was explicitely
+			/* The eukrea,asoc-tlv320 driver was explicitly
 			 * requested (through the device tree).
 			 */
 			dev_err(&pdev->dev,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index de43887..5aeb6ed 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -23,6 +23,7 @@
 
 #include "../codecs/sgtl5000.h"
 #include "../codecs/wm8962.h"
+#include "../codecs/wm8960.h"
 
 #define RX 0
 #define TX 1
@@ -407,6 +408,7 @@
 	struct fsl_asoc_card_priv *priv;
 	struct i2c_client *codec_dev;
 	struct clk *codec_clk;
+	const char *codec_dai_name;
 	u32 width;
 	int ret;
 
@@ -459,6 +461,7 @@
 
 	/* Diversify the card configurations */
 	if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
+		codec_dai_name = "cs42888";
 		priv->card.set_bias_level = NULL;
 		priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq;
 		priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
@@ -467,14 +470,22 @@
 		priv->cpu_priv.slot_width = 32;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
 	} else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
+		codec_dai_name = "sgtl5000";
 		priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
 	} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) {
+		codec_dai_name = "wm8962";
 		priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
 		priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK;
 		priv->codec_priv.fll_id = WM8962_SYSCLK_FLL;
 		priv->codec_priv.pll_id = WM8962_FLL;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+	} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) {
+		codec_dai_name = "wm8960-hifi";
+		priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
+		priv->codec_priv.fll_id = WM8960_SYSCLK_AUTO;
+		priv->codec_priv.pll_id = WM8960_SYSCLK_AUTO;
+		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
 	} else {
 		dev_err(&pdev->dev, "unknown Device Tree compatible\n");
 		return -EINVAL;
@@ -521,7 +532,7 @@
 	/* Normal DAI Link */
 	priv->dai_link[0].cpu_of_node = cpu_np;
 	priv->dai_link[0].codec_of_node = codec_np;
-	priv->dai_link[0].codec_dai_name = codec_dev->name;
+	priv->dai_link[0].codec_dai_name = codec_dai_name;
 	priv->dai_link[0].platform_of_node = cpu_np;
 	priv->dai_link[0].dai_fmt = priv->dai_fmt;
 	priv->card.num_links = 1;
@@ -530,7 +541,7 @@
 		/* DPCM DAI Links only if ASRC exsits */
 		priv->dai_link[1].cpu_of_node = asrc_np;
 		priv->dai_link[1].platform_of_node = asrc_np;
-		priv->dai_link[2].codec_dai_name = codec_dev->name;
+		priv->dai_link[2].codec_dai_name = codec_dai_name;
 		priv->dai_link[2].codec_of_node = codec_np;
 		priv->dai_link[2].cpu_of_node = cpu_np;
 		priv->dai_link[2].dai_fmt = priv->dai_fmt;
@@ -578,6 +589,7 @@
 	{ .compatible = "fsl,imx-audio-cs42888", },
 	{ .compatible = "fsl,imx-audio-sgtl5000", },
 	{ .compatible = "fsl,imx-audio-wm8962", },
+	{ .compatible = "fsl,imx-audio-wm8960", },
 	{}
 };
 
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index c068494..9f087d4 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -931,14 +931,29 @@
 static int fsl_asrc_runtime_resume(struct device *dev)
 {
 	struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
-	int i;
+	int i, ret;
 
-	clk_prepare_enable(asrc_priv->mem_clk);
-	clk_prepare_enable(asrc_priv->ipg_clk);
-	for (i = 0; i < ASRC_CLK_MAX_NUM; i++)
-		clk_prepare_enable(asrc_priv->asrck_clk[i]);
+	ret = clk_prepare_enable(asrc_priv->mem_clk);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(asrc_priv->ipg_clk);
+	if (ret)
+		goto disable_mem_clk;
+	for (i = 0; i < ASRC_CLK_MAX_NUM; i++) {
+		ret = clk_prepare_enable(asrc_priv->asrck_clk[i]);
+		if (ret)
+			goto disable_asrck_clk;
+	}
 
 	return 0;
+
+disable_asrck_clk:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(asrc_priv->asrck_clk[i]);
+	clk_disable_unprepare(asrc_priv->ipg_clk);
+disable_mem_clk:
+	clk_disable_unprepare(asrc_priv->mem_clk);
+	return ret;
 }
 
 static int fsl_asrc_runtime_suspend(struct device *dev)
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 45eee13..837979e 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -839,7 +839,7 @@
 		return ret;
 	}
 
-	ret = imx_pcm_dma_init(pdev);
+	ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
 	if (ret)
 		dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
 
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 5c73bea..a18fd92 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -791,7 +791,7 @@
 		return ret;
 
 	if (sai->sai_on_imx)
-		return imx_pcm_dma_init(pdev);
+		return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
 	else
 		return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
 }
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 0662809..b95fbc3 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -13,7 +13,8 @@
 
 #define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
 			 SNDRV_PCM_FMTBIT_S20_3LE |\
-			 SNDRV_PCM_FMTBIT_S24_LE)
+			 SNDRV_PCM_FMTBIT_S24_LE |\
+			 SNDRV_PCM_FMTBIT_S32_LE)
 
 /* SAI Register Map Register */
 #define FSL_SAI_TCSR	0x00 /* SAI Transmit Control */
@@ -45,7 +46,7 @@
 #define FSL_SAI_xFR(tx)		(tx ? FSL_SAI_TFR : FSL_SAI_RFR)
 #define FSL_SAI_xMR(tx)		(tx ? FSL_SAI_TMR : FSL_SAI_RMR)
 
-/* SAI Transmit/Recieve Control Register */
+/* SAI Transmit/Receive Control Register */
 #define FSL_SAI_CSR_TERE	BIT(31)
 #define FSL_SAI_CSR_FR		BIT(25)
 #define FSL_SAI_CSR_SR		BIT(24)
@@ -67,10 +68,10 @@
 #define FSL_SAI_CSR_FRIE	BIT(8)
 #define FSL_SAI_CSR_FRDE	BIT(0)
 
-/* SAI Transmit and Recieve Configuration 1 Register */
+/* SAI Transmit and Receive Configuration 1 Register */
 #define FSL_SAI_CR1_RFW_MASK	0x1f
 
-/* SAI Transmit and Recieve Configuration 2 Register */
+/* SAI Transmit and Receive Configuration 2 Register */
 #define FSL_SAI_CR2_SYNC	BIT(30)
 #define FSL_SAI_CR2_MSEL_MASK	(0x3 << 26)
 #define FSL_SAI_CR2_MSEL_BUS	0
@@ -82,12 +83,12 @@
 #define FSL_SAI_CR2_BCD_MSTR	BIT(24)
 #define FSL_SAI_CR2_DIV_MASK	0xff
 
-/* SAI Transmit and Recieve Configuration 3 Register */
+/* SAI Transmit and Receive Configuration 3 Register */
 #define FSL_SAI_CR3_TRCE	BIT(16)
 #define FSL_SAI_CR3_WDFL(x)	(x)
 #define FSL_SAI_CR3_WDFL_MASK	0x1f
 
-/* SAI Transmit and Recieve Configuration 4 Register */
+/* SAI Transmit and Receive Configuration 4 Register */
 #define FSL_SAI_CR4_FRSZ(x)	(((x) - 1) << 16)
 #define FSL_SAI_CR4_FRSZ_MASK	(0x1f << 16)
 #define FSL_SAI_CR4_SYWD(x)	(((x) - 1) << 8)
@@ -97,7 +98,7 @@
 #define FSL_SAI_CR4_FSP		BIT(1)
 #define FSL_SAI_CR4_FSD_MSTR	BIT(0)
 
-/* SAI Transmit and Recieve Configuration 5 Register */
+/* SAI Transmit and Receive Configuration 5 Register */
 #define FSL_SAI_CR5_WNW(x)	(((x) - 1) << 24)
 #define FSL_SAI_CR5_WNW_MASK	(0x1f << 24)
 #define FSL_SAI_CR5_W0W(x)	(((x) - 1) << 16)
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 8e93221..ab729f2 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -454,7 +454,8 @@
 	struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 	struct platform_device *pdev = spdif_priv->pdev;
 	struct regmap *regmap = spdif_priv->regmap;
-	u32 scr, mask, i;
+	u32 scr, mask;
+	int i;
 	int ret;
 
 	/* Reset module and interrupts only for first initialization */
@@ -482,13 +483,18 @@
 		mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK |
 			SCR_TXSEL_MASK | SCR_USRC_SEL_MASK |
 			SCR_TXFIFO_FSEL_MASK;
-		for (i = 0; i < SPDIF_TXRATE_MAX; i++)
-			clk_prepare_enable(spdif_priv->txclk[i]);
+		for (i = 0; i < SPDIF_TXRATE_MAX; i++) {
+			ret = clk_prepare_enable(spdif_priv->txclk[i]);
+			if (ret)
+				goto disable_txclk;
+		}
 	} else {
 		scr = SCR_RXFIFO_FSEL_IF8 | SCR_RXFIFO_AUTOSYNC;
 		mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK|
 			SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK;
-		clk_prepare_enable(spdif_priv->rxclk);
+		ret = clk_prepare_enable(spdif_priv->rxclk);
+		if (ret)
+			goto err;
 	}
 	regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr);
 
@@ -497,6 +503,9 @@
 
 	return 0;
 
+disable_txclk:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(spdif_priv->txclk[i]);
 err:
 	clk_disable_unprepare(spdif_priv->coreclk);
 
@@ -707,7 +716,7 @@
 	return ret;
 }
 
-/* Q-subcode infomation. The byte size is SPDIF_UBITS_SIZE/8 */
+/* Q-subcode information. The byte size is SPDIF_UBITS_SIZE/8 */
 static int fsl_spdif_qinfo(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -739,7 +748,7 @@
 	return ret;
 }
 
-/* Valid bit infomation */
+/* Valid bit information */
 static int fsl_spdif_vbit_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -767,7 +776,7 @@
 	return 0;
 }
 
-/* DPLL lock infomation */
+/* DPLL lock information */
 static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -1255,7 +1264,7 @@
 		return ret;
 	}
 
-	ret = imx_pcm_dma_init(pdev);
+	ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
 	if (ret)
 		dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
 
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c0b940e..8ec6fb2 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -156,7 +156,7 @@
  *
  * @dbg_stats: Debugging statistics
  *
- * @soc: SoC specifc data
+ * @soc: SoC specific data
  */
 struct fsl_ssi_private {
 	struct regmap *regs;
@@ -900,14 +900,16 @@
 		scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
 		break;
 	default:
-		return -EINVAL;
+		if (!fsl_ssi_is_ac97(ssi_private))
+			return -EINVAL;
 	}
 
 	stcr |= strcr;
 	srcr |= strcr;
 
-	if (ssi_private->cpu_dai_drv.symmetric_rates) {
-		/* Need to clear RXDIR when using SYNC mode */
+	if (ssi_private->cpu_dai_drv.symmetric_rates
+			|| fsl_ssi_is_ac97(ssi_private)) {
+		/* Need to clear RXDIR when using SYNC or AC97 mode */
 		srcr &= ~CCSR_SSI_SRCR_RXDIR;
 		scr |= CCSR_SSI_SCR_SYN;
 	}
@@ -1101,6 +1103,7 @@
 
 static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
 	.bus_control = true,
+	.probe = fsl_ssi_dai_probe,
 	.playback = {
 		.stream_name = "AC97 Playback",
 		.channels_min = 2,
@@ -1127,10 +1130,17 @@
 	struct regmap *regs = fsl_ac97_data->regs;
 	unsigned int lreg;
 	unsigned int lval;
+	int ret;
 
 	if (reg > 0x7f)
 		return;
 
+	ret = clk_prepare_enable(fsl_ac97_data->clk);
+	if (ret) {
+		pr_err("ac97 write clk_prepare_enable failed: %d\n",
+			ret);
+		return;
+	}
 
 	lreg = reg <<  12;
 	regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1141,6 +1151,8 @@
 	regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
 			CCSR_SSI_SACNT_WR);
 	udelay(100);
+
+	clk_disable_unprepare(fsl_ac97_data->clk);
 }
 
 static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
@@ -1151,6 +1163,14 @@
 	unsigned short val = -1;
 	u32 reg_val;
 	unsigned int lreg;
+	int ret;
+
+	ret = clk_prepare_enable(fsl_ac97_data->clk);
+	if (ret) {
+		pr_err("ac97 read clk_prepare_enable failed: %d\n",
+			ret);
+		return -1;
+	}
 
 	lreg = (reg & 0x7f) <<  12;
 	regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1162,6 +1182,8 @@
 	regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
 	val = (reg_val >> 4) & 0xffff;
 
+	clk_disable_unprepare(fsl_ac97_data->clk);
+
 	return val;
 }
 
@@ -1210,7 +1232,7 @@
 		}
 	}
 
-	/* For those SLAVE implementations, we ingore non-baudclk cases
+	/* For those SLAVE implementations, we ignore non-baudclk cases
 	 * and, instead, abandon MASTER mode that needs baud clock.
 	 */
 	ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
@@ -1257,7 +1279,7 @@
 		if (ret)
 			goto error_pcm;
 	} else {
-		ret = imx_pcm_dma_init(pdev);
+		ret = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
 		if (ret)
 			goto error_pcm;
 	}
@@ -1320,7 +1342,11 @@
 
 		fsl_ac97_data = ssi_private;
 
-		snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+		ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+		if (ret) {
+			dev_err(&pdev->dev, "could not set AC'97 ops\n");
+			return ret;
+		}
 	} else {
 		/* Initialize this copy of the CPU DAI driver structure */
 		memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@@ -1357,7 +1383,9 @@
 
 	/* Are the RX and the TX clocks locked? */
 	if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
-		ssi_private->cpu_dai_drv.symmetric_rates = 1;
+		if (!fsl_ssi_is_ac97(ssi_private))
+			ssi_private->cpu_dai_drv.symmetric_rates = 1;
+
 		ssi_private->cpu_dai_drv.symmetric_channels = 1;
 		ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
 	}
@@ -1434,6 +1462,27 @@
 		_fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
 				     ssi_private->dai_fmt);
 
+	if (fsl_ssi_is_ac97(ssi_private)) {
+		u32 ssi_idx;
+
+		ret = of_property_read_u32(np, "cell-index", &ssi_idx);
+		if (ret) {
+			dev_err(&pdev->dev, "cannot get SSI index property\n");
+			goto error_sound_card;
+		}
+
+		ssi_private->pdev =
+			platform_device_register_data(NULL,
+					"ac97-codec", ssi_idx, NULL, 0);
+		if (IS_ERR(ssi_private->pdev)) {
+			ret = PTR_ERR(ssi_private->pdev);
+			dev_err(&pdev->dev,
+				"failed to register AC97 codec platform: %d\n",
+				ret);
+			goto error_sound_card;
+		}
+	}
+
 	return 0;
 
 error_sound_card:
@@ -1458,6 +1507,9 @@
 	if (ssi_private->soc->imx)
 		fsl_ssi_imx_clean(pdev, ssi_private);
 
+	if (fsl_ssi_is_ac97(ssi_private))
+		snd_soc_set_ac97_ops(NULL);
+
 	return 0;
 }
 
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 0db94f49..1fc01ed 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -40,7 +40,7 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_PAUSE |
 		SNDRV_PCM_INFO_RESUME,
-	.buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
+	.buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
 	.period_bytes_min = 128,
 	.period_bytes_max = 65535, /* Limited by SDMA engine */
 	.periods_min = 2,
@@ -52,13 +52,30 @@
 	.pcm_hardware = &imx_pcm_hardware,
 	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
 	.compat_filter_fn = filter,
-	.prealloc_buffer_size = IMX_SSI_DMABUF_SIZE,
+	.prealloc_buffer_size = IMX_DEFAULT_DMABUF_SIZE,
 };
 
-int imx_pcm_dma_init(struct platform_device *pdev)
+int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
 {
+	struct snd_dmaengine_pcm_config *config;
+	struct snd_pcm_hardware *pcm_hardware;
+
+	config = devm_kzalloc(&pdev->dev,
+			sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
+	*config = imx_dmaengine_pcm_config;
+	if (size)
+		config->prealloc_buffer_size = size;
+
+	pcm_hardware = devm_kzalloc(&pdev->dev,
+			sizeof(struct snd_pcm_hardware), GFP_KERNEL);
+	*pcm_hardware = imx_pcm_hardware;
+	if (size)
+		pcm_hardware->buffer_bytes_max = size;
+
+	config->pcm_hardware = pcm_hardware;
+
 	return devm_snd_dmaengine_pcm_register(&pdev->dev,
-		&imx_dmaengine_pcm_config,
+		config,
 		SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h
index c79cb27..133c4470a 100644
--- a/sound/soc/fsl/imx-pcm.h
+++ b/sound/soc/fsl/imx-pcm.h
@@ -20,6 +20,11 @@
  */
 #define IMX_SSI_DMABUF_SIZE	(64 * 1024)
 
+#define IMX_DEFAULT_DMABUF_SIZE	(64 * 1024)
+#define IMX_SAI_DMABUF_SIZE	(64 * 1024)
+#define IMX_SPDIF_DMABUF_SIZE	(64 * 1024)
+#define IMX_ESAI_DMABUF_SIZE	(256 * 1024)
+
 static inline void
 imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
 	int dma, enum sdma_peripheral_type peripheral_type)
@@ -39,9 +44,9 @@
 };
 
 #if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
-int imx_pcm_dma_init(struct platform_device *pdev);
+int imx_pcm_dma_init(struct platform_device *pdev, size_t size);
 #else
-static inline int imx_pcm_dma_init(struct platform_device *pdev)
+static inline int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
 {
 	return -ENODEV;
 }
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 461ce27..48b2d24 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -603,7 +603,7 @@
 	ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
 	ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
-	ssi->dma_init = imx_pcm_dma_init(pdev);
+	ssi->dma_init = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
 
 	if (ssi->fiq_init && ssi->dma_init) {
 		ret = ssi->fiq_init;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index d555493..3ff76d4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -76,6 +76,7 @@
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
 	struct simple_dai_props *dai_props =
 		&priv->dai_props[rtd - rtd->card->rtd];
@@ -91,8 +92,16 @@
 		mclk = params_rate(params) * mclk_fs;
 		ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
 					     SND_SOC_CLOCK_IN);
+		if (ret && ret != -ENOTSUPP)
+			goto err;
+
+		ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+					     SND_SOC_CLOCK_OUT);
+		if (ret && ret != -ENOTSUPP)
+			goto err;
 	}
 
+err:
 	return ret;
 }
 
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index f3060a4..05fde5e6e 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -26,14 +26,9 @@
 	depends on ACPI
 
 config SND_SOC_INTEL_SST
-	tristate "ASoC support for Intel(R) Smart Sound Technology"
+	tristate
 	select SND_SOC_INTEL_SST_ACPI if ACPI
 	depends on (X86 || COMPILE_TEST)
-	depends on DW_DMAC_CORE
-	help
-          This adds support for Intel(R) Smart Sound Technology (SST).
-          Say Y if you have such a device
-          If unsure select "N".
 
 config SND_SOC_INTEL_SST_ACPI
 	tristate
@@ -46,8 +41,9 @@
 
 config SND_SOC_INTEL_HASWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \
-		   I2C_DESIGNWARE_PLATFORM
+	depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT5640
 	help
@@ -58,7 +54,9 @@
 
 config SND_SOC_INTEL_BYT_RT5640_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
+	depends on X86_INTEL_LPSS && I2C
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_RT5640
 	help
@@ -67,7 +65,9 @@
 
 config SND_SOC_INTEL_BYT_MAX98090_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
+	depends on X86_INTEL_LPSS && I2C
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_MAX98090
 	help
@@ -76,8 +76,10 @@
 
 config SND_SOC_INTEL_BROADWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \
+	depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
 		   I2C_DESIGNWARE_PLATFORM
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT286
 	help
@@ -132,3 +134,8 @@
       This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
       platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
       If unsure select "N".
+
+config SND_SOC_INTEL_SKYLAKE
+	tristate
+	select SND_HDA_EXT_CORE
+	select SND_SOC_INTEL_SST
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 6de5d5c..2b45435 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
 obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
 obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
 
 # Machine support
 obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 31e9b9e..d55388e 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -132,7 +132,7 @@
 			      sizeof(cmd.header) + cmd.header.length);
 }
 
-int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
+static int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
 		       struct snd_ctl_elem_info *uinfo)
 {
 	struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;
@@ -1298,7 +1298,7 @@
 		dev_dbg(dai->dev, "Stream name=%s\n",
 				dai->playback_widget->name);
 		w = dai->playback_widget;
-		list_for_each_entry(p, &w->sinks, list_source) {
+		snd_soc_dapm_widget_for_each_sink_path(w, p) {
 			if (p->connected && !p->connected(w, p->sink))
 				continue;
 
@@ -1317,7 +1317,7 @@
 		dev_dbg(dai->dev, "Stream name=%s\n",
 				dai->capture_widget->name);
 		w = dai->capture_widget;
-		list_for_each_entry(p, &w->sources, list_sink) {
+		snd_soc_dapm_widget_for_each_source_path(w, p) {
 			if (p->connected && !p->connected(w, p->sink))
 				continue;
 
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 641ebe6..683e501 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -33,7 +33,6 @@
 
 struct sst_device *sst;
 static DEFINE_MUTEX(sst_lock);
-extern struct snd_compr_ops sst_platform_compr_ops;
 
 int sst_register_dsp(struct sst_device *dev)
 {
diff --git a/sound/soc/intel/atom/sst-mfld-platform.h b/sound/soc/intel/atom/sst-mfld-platform.h
index 2409b23..cb32cc7 100644
--- a/sound/soc/intel/atom/sst-mfld-platform.h
+++ b/sound/soc/intel/atom/sst-mfld-platform.h
@@ -25,6 +25,7 @@
 #include "sst-atom-controls.h"
 
 extern struct sst_device *sst;
+extern struct snd_compr_ops sst_platform_compr_ops;
 
 #define SST_MONO		1
 #define SST_STEREO		2
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 0e0e4d9..ce689c5 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -151,6 +151,7 @@
 		usage_count = GET_USAGE_COUNT(dev);
 		dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
 		if (ret < 0) {
+			pm_runtime_put_sync(dev);
 			dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
 			return ret;
 		}
@@ -204,8 +205,10 @@
 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
 
 	retval = pm_runtime_get_sync(ctx->dev);
-	if (retval < 0)
+	if (retval < 0) {
+		pm_runtime_put_sync(ctx->dev);
 		return retval;
+	}
 
 	str_id = sst_get_stream(ctx, str_params);
 	if (str_id > 0) {
@@ -672,8 +675,10 @@
 	if (NULL == bytes)
 		return -EINVAL;
 	ret_val = pm_runtime_get_sync(ctx->dev);
-	if (ret_val < 0)
+	if (ret_val < 0) {
+		pm_runtime_put_sync(ctx->dev);
 		return ret_val;
+	}
 
 	ret_val = sst_send_byte_stream_mrfld(ctx, bytes);
 	sst_pm_runtime_put(ctx);
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 5a27861..3dc7358 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -352,10 +352,9 @@
 	 * copy from mailbox
 	 **/
 	if (msg_high.part.large) {
-		data = kzalloc(msg_low, GFP_KERNEL);
+		data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
 		if (!data)
 			return;
-		memcpy(data, (void *) msg->mailbox_data, msg_low);
 		/* Copy command id so that we can use to put sst to reset */
 		dsp_hdr = (struct ipc_dsp_hdr *)data;
 		cmd_id = dsp_hdr->cmd_id;
diff --git a/sound/soc/intel/boards/byt-max98090.c b/sound/soc/intel/boards/byt-max98090.c
index 7ab8cc9..d9f81b8 100644
--- a/sound/soc/intel/boards/byt-max98090.c
+++ b/sound/soc/intel/boards/byt-max98090.c
@@ -126,6 +126,7 @@
 
 static struct snd_soc_card byt_max98090_card = {
 	.name = "byt-max98090",
+	.owner = THIS_MODULE,
 	.dai_link = byt_max98090_dais,
 	.num_links = ARRAY_SIZE(byt_max98090_dais),
 	.dapm_widgets = byt_max98090_widgets,
diff --git a/sound/soc/intel/boards/byt-rt5640.c b/sound/soc/intel/boards/byt-rt5640.c
index ae89b9b9..de9788a 100644
--- a/sound/soc/intel/boards/byt-rt5640.c
+++ b/sound/soc/intel/boards/byt-rt5640.c
@@ -197,6 +197,7 @@
 
 static struct snd_soc_card byt_rt5640_card = {
 	.name = "byt-rt5640",
+	.owner = THIS_MODULE,
 	.dai_link = byt_rt5640_dais,
 	.num_links = ARRAY_SIZE(byt_rt5640_dais),
 	.dapm_widgets = byt_rt5640_widgets,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 7f55d59..c445312 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -185,6 +185,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_byt = {
 	.name = "baytrailcraudio",
+	.owner = THIS_MODULE,
 	.dai_link = byt_dailink,
 	.num_links = ARRAY_SIZE(byt_dailink),
 	.dapm_widgets = byt_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 70f8321..49f4869 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -104,21 +104,17 @@
 static int cht_ti_jack_event(struct notifier_block *nb,
 		unsigned long event, void *data)
 {
-
 	struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
-	struct snd_soc_dai *codec_dai = jack->card->rtd->codec_dai;
-	struct snd_soc_codec *codec = codec_dai->codec;
+	struct snd_soc_dapm_context *dapm = &jack->card->dapm;
 
 	if (event & SND_JACK_MICROPHONE) {
-
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "SHDN");
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "MICBIAS");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "SHDN");
+		snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
+		snd_soc_dapm_sync(dapm);
 	} else {
-
-		snd_soc_dapm_disable_pin(&codec->dapm, "MICBIAS");
-		snd_soc_dapm_disable_pin(&codec->dapm, "SHDN");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+		snd_soc_dapm_disable_pin(dapm, "SHDN");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return 0;
@@ -279,6 +275,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_cht = {
 	.name = "chtmax98090",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.aux_dev = &cht_max98090_headset_dev,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index bdcaf46..7be8461 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -305,6 +305,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_chtrt5645 = {
 	.name = "chtrt5645",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
@@ -317,6 +318,7 @@
 
 static struct snd_soc_card snd_soc_card_chtrt5650 = {
 	.name = "chtrt5650",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 2c9cc5b..23fe040 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -323,6 +323,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_cht = {
 	.name = "cherrytrailcraudio",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h
index 396d545..cbd568e 100644
--- a/sound/soc/intel/common/sst-dsp-priv.h
+++ b/sound/soc/intel/common/sst-dsp-priv.h
@@ -22,6 +22,8 @@
 #include <linux/interrupt.h>
 #include <linux/firmware.h>
 
+#include "../skylake/skl-sst-dsp.h"
+
 struct sst_mem_block;
 struct sst_module;
 struct sst_fw;
@@ -258,6 +260,8 @@
  */
 struct sst_dsp {
 
+	/* Shared for all platforms */
+
 	/* runtime */
 	struct sst_dsp_device *sst_dev;
 	spinlock_t spinlock;	/* IPC locking */
@@ -268,10 +272,6 @@
 	int irq;
 	u32 id;
 
-	/* list of free and used ADSP memory blocks */
-	struct list_head used_block_list;
-	struct list_head free_block_list;
-
 	/* operations */
 	struct sst_ops *ops;
 
@@ -284,6 +284,12 @@
 	/* mailbox */
 	struct sst_mailbox mailbox;
 
+	/* HSW/Byt data */
+
+	/* list of free and used ADSP memory blocks */
+	struct list_head used_block_list;
+	struct list_head free_block_list;
+
 	/* SST FW files loaded and their modules */
 	struct list_head module_list;
 	struct list_head fw_list;
@@ -299,6 +305,15 @@
 	/* DMA FW loading */
 	struct sst_dma *dma;
 	bool fw_use_dma;
+
+	/* SKL data */
+
+	/* To allocate CL dma buffers */
+	struct skl_dsp_loader_ops dsp_ops;
+	struct skl_dsp_fw_ops fw_ops;
+	int sst_state;
+	struct skl_cl_dev cl_dev;
+	u32 intr_status;
 };
 
 /* Size optimised DRAM/IRAM memcpy */
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 64e9421..a627236 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 
 #include "sst-dsp.h"
 #include "sst-dsp-priv.h"
@@ -196,6 +197,22 @@
 }
 EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64_unlocked);
 
+/* This is for registers bits with attribute RWC */
+void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value)
+{
+	unsigned int old, new;
+	u32 ret;
+
+	ret = sst_dsp_shim_read_unlocked(sst, offset);
+
+	old = ret;
+	new = (old & (~mask)) | (value & mask);
+
+	sst_dsp_shim_write_unlocked(sst, offset, new);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced_unlocked);
+
 int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
 				u32 mask, u32 value)
 {
@@ -222,6 +239,60 @@
 }
 EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64);
 
+/* This is for registers bits with attribute RWC */
+void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sst->spinlock, flags);
+	sst_dsp_shim_update_bits_forced_unlocked(sst, offset, mask, value);
+	spin_unlock_irqrestore(&sst->spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced);
+
+int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
+			 u32 target, u32 timeout, char *operation)
+{
+	int time, ret;
+	u32 reg;
+	bool done = false;
+
+	/*
+	 * we will poll for couple of ms using mdelay, if not successful
+	 * then go to longer sleep using usleep_range
+	 */
+
+	/* check if set state successful */
+	for (time = 0; time < 5; time++) {
+		if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target) {
+			done = true;
+			break;
+		}
+		mdelay(1);
+	}
+
+	if (done ==  false) {
+		/* sleeping in 10ms steps so adjust timeout value */
+		timeout /= 10;
+
+		for (time = 0; time < timeout; time++) {
+			if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target)
+				break;
+
+			usleep_range(5000, 10000);
+		}
+	}
+
+	reg = sst_dsp_shim_read_unlocked(ctx, offset);
+	dev_info(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
+			(time < timeout) ? "successful" : "timedout");
+	ret = time < timeout ? 0 : -ETIME;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_register_poll);
+
 void sst_dsp_dump(struct sst_dsp *sst)
 {
 	if (sst->ops->dump)
diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h
index 96aeb25..1f45f18 100644
--- a/sound/soc/intel/common/sst-dsp.h
+++ b/sound/soc/intel/common/sst-dsp.h
@@ -230,6 +230,8 @@
 u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset);
 int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
 				u64 mask, u64 value);
+void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value);
 
 /* SHIM Read / Write Unlocked for callers already holding sst lock */
 void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value);
@@ -240,6 +242,8 @@
 u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset);
 int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
 					u64 mask, u64 value);
+void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value);
 
 /* Internal generic low-level SST IO functions - can be overidden */
 void sst_shim32_write(void __iomem *addr, u32 offset, u32 value);
@@ -278,6 +282,8 @@
 void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
 void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
 void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes);
+int sst_dsp_register_poll(struct sst_dsp  *dsp, u32 offset, u32 mask,
+		 u32 expected_value, u32 timeout, char *operation);
 
 /* Debug */
 void sst_dsp_dump(struct sst_dsp *sst);
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
new file mode 100644
index 0000000..27db221
--- /dev/null
+++ b/sound/soc/intel/skylake/Makefile
@@ -0,0 +1,9 @@
+snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
+
+# Skylake IPC Support
+snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
+		skl-sst.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
new file mode 100644
index 0000000..826d4fd
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -0,0 +1,884 @@
+/*
+ *  skl-message.c - HDA DSP interface for FW registration, Pipe and Module
+ *  configurations
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	   Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include "skl-sst-dsp.h"
+#include "skl-sst-ipc.h"
+#include "skl.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-topology.h"
+#include "skl-tplg-interface.h"
+
+static int skl_alloc_dma_buf(struct device *dev,
+		struct snd_dma_buffer *dmab, size_t size)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	return  bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
+}
+
+static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	bus->io_ops->dma_free_pages(bus, dmab);
+
+	return 0;
+}
+
+int skl_init_dsp(struct skl *skl)
+{
+	void __iomem *mmio_base;
+	struct hdac_ext_bus *ebus = &skl->ebus;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int irq = bus->irq;
+	struct skl_dsp_loader_ops loader_ops;
+	int ret;
+
+	loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
+	loader_ops.free_dma_buf = skl_free_dma_buf;
+
+	/* enable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+
+	/* read the BAR of the ADSP MMIO */
+	mmio_base = pci_ioremap_bar(skl->pci, 4);
+	if (mmio_base == NULL) {
+		dev_err(bus->dev, "ioremap error\n");
+		return -ENXIO;
+	}
+
+	ret = skl_sst_dsp_init(bus->dev, mmio_base, irq,
+			loader_ops, &skl->skl_sst);
+
+	dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
+
+	return ret;
+}
+
+void skl_free_dsp(struct skl *skl)
+{
+	struct hdac_ext_bus *ebus = &skl->ebus;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl_sst *ctx =  skl->skl_sst;
+
+	/* disable  ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
+
+	skl_sst_dsp_cleanup(bus->dev, ctx);
+	if (ctx->dsp->addr.lpe)
+		iounmap(ctx->dsp->addr.lpe);
+}
+
+int skl_suspend_dsp(struct skl *skl)
+{
+	struct skl_sst *ctx = skl->skl_sst;
+	int ret;
+
+	/* if ppcap is not supported return 0 */
+	if (!skl->ebus.ppcap)
+		return 0;
+
+	ret = skl_dsp_sleep(ctx->dsp);
+	if (ret < 0)
+		return ret;
+
+	/* disable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
+
+	return 0;
+}
+
+int skl_resume_dsp(struct skl *skl)
+{
+	struct skl_sst *ctx = skl->skl_sst;
+
+	/* if ppcap is not supported return 0 */
+	if (!skl->ebus.ppcap)
+		return 0;
+
+	/* enable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+
+	return skl_dsp_wake(ctx->dsp);
+}
+
+enum skl_bitdepth skl_get_bit_depth(int params)
+{
+	switch (params) {
+	case 8:
+		return SKL_DEPTH_8BIT;
+
+	case 16:
+		return SKL_DEPTH_16BIT;
+
+	case 24:
+		return SKL_DEPTH_24BIT;
+
+	case 32:
+		return SKL_DEPTH_32BIT;
+
+	default:
+		return SKL_DEPTH_INVALID;
+
+	}
+}
+
+static u32 skl_create_channel_map(enum skl_ch_cfg ch_cfg)
+{
+	u32 config;
+
+	switch (ch_cfg) {
+	case SKL_CH_CFG_MONO:
+		config =  (0xFFFFFFF0 | SKL_CHANNEL_LEFT);
+		break;
+
+	case SKL_CH_CFG_STEREO:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4));
+		break;
+
+	case SKL_CH_CFG_2_1:
+		config = (0xFFFFF000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4)
+			| (SKL_CHANNEL_LFE << 8));
+		break;
+
+	case SKL_CH_CFG_3_0:
+		config =  (0xFFFFF000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8));
+		break;
+
+	case SKL_CH_CFG_3_1:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LFE << 12));
+		break;
+
+	case SKL_CH_CFG_QUATRO:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4)
+			| (SKL_CHANNEL_LEFT_SURROUND << 8)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 12));
+		break;
+
+	case SKL_CH_CFG_4_0:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_CENTER_SURROUND << 12));
+		break;
+
+	case SKL_CH_CFG_5_0:
+		config = (0xFFF00000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LEFT_SURROUND << 12)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 16));
+		break;
+
+	case SKL_CH_CFG_5_1:
+		config = (0xFF000000 | SKL_CHANNEL_CENTER
+			| (SKL_CHANNEL_LEFT << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LEFT_SURROUND << 12)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 16)
+			| (SKL_CHANNEL_LFE << 20));
+		break;
+
+	case SKL_CH_CFG_DUAL_MONO:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_LEFT << 4));
+		break;
+
+	case SKL_CH_CFG_I2S_DUAL_STEREO_0:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4));
+		break;
+
+	case SKL_CH_CFG_I2S_DUAL_STEREO_1:
+		config = (0xFFFF00FF | (SKL_CHANNEL_LEFT << 8)
+			| (SKL_CHANNEL_RIGHT << 12));
+		break;
+
+	default:
+		config =  0xFFFFFFFF;
+		break;
+
+	}
+
+	return config;
+}
+
+/*
+ * Each module in DSP expects a base module configuration, which consists of
+ * PCM format information, which we calculate in driver and resource values
+ * which are read from widget information passed through topology binary
+ * This is send when we create a module with INIT_INSTANCE IPC msg
+ */
+static void skl_set_base_module_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_base_cfg *base_cfg)
+{
+	struct skl_module_fmt *format = &mconfig->in_fmt;
+
+	base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
+
+	base_cfg->audio_fmt.s_freq = format->s_freq;
+	base_cfg->audio_fmt.bit_depth = format->bit_depth;
+	base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
+	base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
+
+	dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
+			format->bit_depth, format->valid_bit_depth,
+			format->ch_cfg);
+
+	base_cfg->audio_fmt.channel_map = skl_create_channel_map(
+					base_cfg->audio_fmt.ch_cfg);
+
+	base_cfg->audio_fmt.interleaving = SKL_INTERLEAVING_PER_CHANNEL;
+
+	base_cfg->cps = mconfig->mcps;
+	base_cfg->ibs = mconfig->ibs;
+	base_cfg->obs = mconfig->obs;
+}
+
+/*
+ * Copies copier capabilities into copier module and updates copier module
+ * config size.
+ */
+static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
+				struct skl_cpr_cfg *cpr_mconfig)
+{
+	if (mconfig->formats_config.caps_size == 0)
+		return;
+
+	memcpy(cpr_mconfig->gtw_cfg.config_data,
+			mconfig->formats_config.caps,
+			mconfig->formats_config.caps_size);
+
+	cpr_mconfig->gtw_cfg.config_length =
+			(mconfig->formats_config.caps_size) / 4;
+}
+
+/*
+ * Calculate the gatewat settings required for copier module, type of
+ * gateway and index of gateway to use
+ */
+static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_cpr_cfg *cpr_mconfig)
+{
+	union skl_connector_node_id node_id = {0};
+	struct skl_pipe_params *params = mconfig->pipe->p_params;
+
+	switch (mconfig->dev_type) {
+	case SKL_DEVICE_BT:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
+			SKL_DMA_I2S_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id +
+					(mconfig->vbus_id << 3);
+		break;
+
+	case SKL_DEVICE_I2S:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
+			SKL_DMA_I2S_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id +
+					 (mconfig->time_slot << 1) +
+					 (mconfig->vbus_id << 3);
+		break;
+
+	case SKL_DEVICE_DMIC:
+		node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
+		node_id.node.vindex = mconfig->vbus_id +
+					 (mconfig->time_slot);
+		break;
+
+	case SKL_DEVICE_HDALINK:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_HDA_LINK_OUTPUT_CLASS :
+			SKL_DMA_HDA_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->link_dma_id;
+		break;
+
+	default:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_HDA_HOST_OUTPUT_CLASS :
+			SKL_DMA_HDA_HOST_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id;
+		break;
+	}
+
+	cpr_mconfig->gtw_cfg.node_id = node_id.val;
+
+	if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
+		cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
+	else
+		cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
+
+	cpr_mconfig->cpr_feature_mask = 0;
+	cpr_mconfig->gtw_cfg.config_length  = 0;
+
+	skl_copy_copier_caps(mconfig, cpr_mconfig);
+}
+
+static void skl_setup_out_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_audio_data_format *out_fmt)
+{
+	struct skl_module_fmt *format = &mconfig->out_fmt;
+
+	out_fmt->number_of_channels = (u8)format->channels;
+	out_fmt->s_freq = format->s_freq;
+	out_fmt->bit_depth = format->bit_depth;
+	out_fmt->valid_bit_depth = format->valid_bit_depth;
+	out_fmt->ch_cfg = format->ch_cfg;
+
+	out_fmt->channel_map = skl_create_channel_map(out_fmt->ch_cfg);
+	out_fmt->interleaving = SKL_INTERLEAVING_PER_CHANNEL;
+
+	dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
+		out_fmt->number_of_channels, format->s_freq, format->bit_depth);
+}
+
+/*
+ * DSP needs SRC module for frequency conversion, SRC takes base module
+ * configuration and the target frequency as extra parameter passed as src
+ * config
+ */
+static void skl_set_src_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_src_module_cfg *src_mconfig)
+{
+	struct skl_module_fmt *fmt = &mconfig->out_fmt;
+
+	skl_set_base_module_format(ctx, mconfig,
+		(struct skl_base_cfg *)src_mconfig);
+
+	src_mconfig->src_cfg = fmt->s_freq;
+}
+
+/*
+ * DSP needs updown module to do channel conversion. updown module take base
+ * module configuration and channel configuration
+ * It also take coefficients and now we have defaults applied here
+ */
+static void skl_set_updown_mixer_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_up_down_mixer_cfg *mixer_mconfig)
+{
+	struct skl_module_fmt *fmt = &mconfig->out_fmt;
+	int i = 0;
+
+	skl_set_base_module_format(ctx,	mconfig,
+		(struct skl_base_cfg *)mixer_mconfig);
+	mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
+
+	/* Select F/W default coefficient */
+	mixer_mconfig->coeff_sel = 0x0;
+
+	/* User coeff, don't care since we are selecting F/W defaults */
+	for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
+		mixer_mconfig->coeff[i] = 0xDEADBEEF;
+}
+
+/*
+ * 'copier' is DSP internal module which copies data from Host DMA (HDA host
+ * dma) or link (hda link, SSP, PDM)
+ * Here we calculate the copier module parameters, like PCM format, output
+ * format, gateway settings
+ * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
+ */
+static void skl_set_copier_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_cpr_cfg *cpr_mconfig)
+{
+	struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
+	struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
+
+	skl_set_base_module_format(ctx, mconfig, base_cfg);
+
+	skl_setup_out_format(ctx, mconfig, out_fmt);
+	skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
+}
+
+static u16 skl_get_module_param_size(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig)
+{
+	u16 param_size;
+
+	switch (mconfig->m_type) {
+	case SKL_MODULE_TYPE_COPIER:
+		param_size = sizeof(struct skl_cpr_cfg);
+		param_size += mconfig->formats_config.caps_size;
+		return param_size;
+
+	case SKL_MODULE_TYPE_SRCINT:
+		return sizeof(struct skl_src_module_cfg);
+
+	case SKL_MODULE_TYPE_UPDWMIX:
+		return sizeof(struct skl_up_down_mixer_cfg);
+
+	default:
+		/*
+		 * return only base cfg when no specific module type is
+		 * specified
+		 */
+		return sizeof(struct skl_base_cfg);
+	}
+
+	return 0;
+}
+
+/*
+ * DSP firmware supports various modules like copier, SRC, updown etc.
+ * These modules required various parameters to be calculated and sent for
+ * the module initialization to DSP. By default a generic module needs only
+ * base module format configuration
+ */
+
+static int skl_set_module_format(struct skl_sst *ctx,
+			struct skl_module_cfg *module_config,
+			u16 *module_config_size,
+			void **param_data)
+{
+	u16 param_size;
+
+	param_size  = skl_get_module_param_size(ctx, module_config);
+
+	*param_data = kzalloc(param_size, GFP_KERNEL);
+	if (NULL == *param_data)
+		return -ENOMEM;
+
+	*module_config_size = param_size;
+
+	switch (module_config->m_type) {
+	case SKL_MODULE_TYPE_COPIER:
+		skl_set_copier_format(ctx, module_config, *param_data);
+		break;
+
+	case SKL_MODULE_TYPE_SRCINT:
+		skl_set_src_format(ctx, module_config, *param_data);
+		break;
+
+	case SKL_MODULE_TYPE_UPDWMIX:
+		skl_set_updown_mixer_format(ctx, module_config, *param_data);
+		break;
+
+	default:
+		skl_set_base_module_format(ctx, module_config, *param_data);
+		break;
+
+	}
+
+	dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
+			module_config->id.module_id, param_size);
+	print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
+			*param_data, param_size, false);
+	return 0;
+}
+
+static int skl_get_queue_index(struct skl_module_pin *mpin,
+				struct skl_module_inst_id id, int max)
+{
+	int i;
+
+	for (i = 0; i < max; i++)  {
+		if (mpin[i].id.module_id == id.module_id &&
+			mpin[i].id.instance_id == id.instance_id)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Allocates queue for each module.
+ * if dynamic, the pin_index is allocated 0 to max_pin.
+ * In static, the pin_index is fixed based on module_id and instance id
+ */
+static int skl_alloc_queue(struct skl_module_pin *mpin,
+			struct skl_module_inst_id id, int max)
+{
+	int i;
+
+	/*
+	 * if pin in dynamic, find first free pin
+	 * otherwise find match module and instance id pin as topology will
+	 * ensure a unique pin is assigned to this so no need to
+	 * allocate/free
+	 */
+	for (i = 0; i < max; i++)  {
+		if (mpin[i].is_dynamic) {
+			if (!mpin[i].in_use) {
+				mpin[i].in_use = true;
+				mpin[i].id.module_id = id.module_id;
+				mpin[i].id.instance_id = id.instance_id;
+				return i;
+			}
+		} else {
+			if (mpin[i].id.module_id == id.module_id &&
+				mpin[i].id.instance_id == id.instance_id)
+				return i;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
+{
+	if (mpin[q_index].is_dynamic) {
+		mpin[q_index].in_use = false;
+		mpin[q_index].id.module_id = 0;
+		mpin[q_index].id.instance_id = 0;
+	}
+}
+
+/*
+ * A module needs to be instanataited in DSP. A mdoule is present in a
+ * collection of module referred as a PIPE.
+ * We first calculate the module format, based on module type and then
+ * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
+ */
+int skl_init_module(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig, char *param)
+{
+	u16 module_config_size = 0;
+	void *param_data = NULL;
+	int ret;
+	struct skl_ipc_init_instance_msg msg;
+
+	dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
+		 mconfig->id.module_id, mconfig->id.instance_id);
+
+	if (mconfig->pipe->state != SKL_PIPE_CREATED) {
+		dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
+				 mconfig->pipe->state, mconfig->pipe->ppl_id);
+		return -EIO;
+	}
+
+	ret = skl_set_module_format(ctx, mconfig,
+			&module_config_size, &param_data);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
+		return ret;
+	}
+
+	msg.module_id = mconfig->id.module_id;
+	msg.instance_id = mconfig->id.instance_id;
+	msg.ppl_instance_id = mconfig->pipe->ppl_id;
+	msg.param_data_size = module_config_size;
+	msg.core_id = mconfig->core_id;
+
+	ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
+		kfree(param_data);
+		return ret;
+	}
+	mconfig->m_state = SKL_MODULE_INIT_DONE;
+
+	return ret;
+}
+
+static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module)
+{
+	dev_dbg(ctx->dev, "%s: src module_id = %d  src_instance=%d\n",
+		__func__, src_module->id.module_id, src_module->id.instance_id);
+	dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
+		 dst_module->id.module_id, dst_module->id.instance_id);
+
+	dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
+		src_module->m_state, dst_module->m_state);
+}
+
+/*
+ * On module freeup, we need to unbind the module with modules
+ * it is already bind.
+ * Find the pin allocated and unbind then using bind_unbind IPC
+ */
+int skl_unbind_modules(struct skl_sst *ctx,
+			struct skl_module_cfg *src_mcfg,
+			struct skl_module_cfg *dst_mcfg)
+{
+	int ret;
+	struct skl_ipc_bind_unbind_msg msg;
+	struct skl_module_inst_id src_id = src_mcfg->id;
+	struct skl_module_inst_id dst_id = dst_mcfg->id;
+	int in_max = dst_mcfg->max_in_queue;
+	int out_max = src_mcfg->max_out_queue;
+	int src_index, dst_index;
+
+	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
+
+	if (src_mcfg->m_state != SKL_MODULE_BIND_DONE)
+		return 0;
+
+	/*
+	 * if intra module unbind, check if both modules are BIND,
+	 * then send unbind
+	 */
+	if ((src_mcfg->pipe->ppl_id != dst_mcfg->pipe->ppl_id) &&
+				dst_mcfg->m_state != SKL_MODULE_BIND_DONE)
+		return 0;
+	else if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+				 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
+		return 0;
+
+	/* get src queue index */
+	src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
+	if (src_index < 0)
+		return -EINVAL;
+
+	msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
+
+	/* get dst queue index */
+	dst_index  = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
+	if (dst_index < 0)
+		return -EINVAL;
+
+	msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
+
+	msg.module_id = src_mcfg->id.module_id;
+	msg.instance_id = src_mcfg->id.instance_id;
+	msg.dst_module_id = dst_mcfg->id.module_id;
+	msg.dst_instance_id = dst_mcfg->id.instance_id;
+	msg.bind = false;
+
+	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
+	if (!ret) {
+		src_mcfg->m_state = SKL_MODULE_UNINIT;
+		/* free queue only if unbind is success */
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		skl_free_queue(dst_mcfg->m_in_pin, dst_index);
+	}
+
+	return ret;
+}
+
+/*
+ * Once a module is instantiated it need to be 'bind' with other modules in
+ * the pipeline. For binding we need to find the module pins which are bind
+ * together
+ * This function finds the pins and then sends bund_unbind IPC message to
+ * DSP using IPC helper
+ */
+int skl_bind_modules(struct skl_sst *ctx,
+			struct skl_module_cfg *src_mcfg,
+			struct skl_module_cfg *dst_mcfg)
+{
+	int ret;
+	struct skl_ipc_bind_unbind_msg msg;
+	struct skl_module_inst_id src_id = src_mcfg->id;
+	struct skl_module_inst_id dst_id = dst_mcfg->id;
+	int in_max = dst_mcfg->max_in_queue;
+	int out_max = src_mcfg->max_out_queue;
+	int src_index, dst_index;
+
+	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
+
+	if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+		dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
+		return 0;
+
+	src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_id, out_max);
+	if (src_index < 0)
+		return -EINVAL;
+
+	msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
+	dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_id, in_max);
+	if (dst_index < 0) {
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		return -EINVAL;
+	}
+
+	msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
+
+	dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
+			 msg.src_queue, msg.dst_queue);
+
+	msg.module_id = src_mcfg->id.module_id;
+	msg.instance_id = src_mcfg->id.instance_id;
+	msg.dst_module_id = dst_mcfg->id.module_id;
+	msg.dst_instance_id = dst_mcfg->id.instance_id;
+	msg.bind = true;
+
+	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
+
+	if (!ret) {
+		src_mcfg->m_state = SKL_MODULE_BIND_DONE;
+	} else {
+		/* error case , if IPC fails, clear the queue index */
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		skl_free_queue(dst_mcfg->m_in_pin, dst_index);
+	}
+
+	return ret;
+}
+
+static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
+	enum skl_ipc_pipeline_state state)
+{
+	dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
+
+	return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
+}
+
+/*
+ * A pipeline is a collection of modules. Before a module in instantiated a
+ * pipeline needs to be created for it.
+ * This function creates pipeline, by sending create pipeline IPC messages
+ * to FW
+ */
+int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
+
+	ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
+				pipe->pipe_priority, pipe->ppl_id);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to create pipeline\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_CREATED;
+
+	return 0;
+}
+
+/*
+ * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
+ * pause the pipeline first and then delete it
+ * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
+ * DMA engines and releases resources
+ */
+int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
+
+	/* If pipe is not started, do not try to stop the pipe in FW. */
+	if (pipe->state > SKL_PIPE_STARTED) {
+		ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+		if (ret < 0) {
+			dev_err(ctx->dev, "Failed to stop pipeline\n");
+			return ret;
+		}
+
+		pipe->state = SKL_PIPE_PAUSED;
+	} else {
+		/* If pipe was not created in FW, do not try to delete it */
+		if (pipe->state < SKL_PIPE_CREATED)
+			return 0;
+
+		ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
+		if (ret < 0)
+			dev_err(ctx->dev, "Failed to delete pipeline\n");
+	}
+
+	return ret;
+}
+
+/*
+ * A pipeline is also a scheduling entity in DSP which can be run, stopped
+ * For processing data the pipe need to be run by sending IPC set pipe state
+ * to DSP
+ */
+int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
+
+	/* If pipe was not created in FW, do not try to pause or delete */
+	if (pipe->state < SKL_PIPE_CREATED)
+		return 0;
+
+	/* Pipe has to be paused before it is started */
+	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to pause pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_PAUSED;
+
+	ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to start pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_STARTED;
+
+	return 0;
+}
+
+/*
+ * Stop the pipeline by sending set pipe state IPC
+ * DSP doesnt implement stop so we always send pause message
+ */
+int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
+
+	/* If pipe was not created in FW, do not try to pause or delete */
+	if (pipe->state < SKL_PIPE_PAUSED)
+		return 0;
+
+	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "Failed to stop pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_CREATED;
+
+	return 0;
+}
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
new file mode 100644
index 0000000..13036b1
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -0,0 +1,140 @@
+/*
+ *  skl-nhlt.c - Intel SKL Platform NHLT parsing
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#include "skl.h"
+
+/* Unique identification for getting NHLT blobs */
+static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
+				0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
+
+#define DSDT_NHLT_PATH "\\_SB.PCI0.HDAS"
+
+void __iomem *skl_nhlt_init(struct device *dev)
+{
+	acpi_handle handle;
+	union acpi_object *obj;
+	struct nhlt_resource_desc  *nhlt_ptr = NULL;
+
+	if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) {
+		dev_err(dev, "Requested NHLT device not found\n");
+		return NULL;
+	}
+
+	obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
+	if (obj && obj->type == ACPI_TYPE_BUFFER) {
+		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
+
+		return ioremap_cache(nhlt_ptr->min_addr, nhlt_ptr->length);
+	}
+
+	dev_err(dev, "device specific method to extract NHLT blob failed\n");
+	return NULL;
+}
+
+void skl_nhlt_free(void __iomem *addr)
+{
+	iounmap(addr);
+	addr = NULL;
+}
+
+static struct nhlt_specific_cfg *skl_get_specific_cfg(
+		struct device *dev, struct nhlt_fmt *fmt,
+		u8 no_ch, u32 rate, u16 bps)
+{
+	struct nhlt_specific_cfg *sp_config;
+	struct wav_fmt *wfmt;
+	struct nhlt_fmt_cfg *fmt_config = fmt->fmt_config;
+	int i;
+
+	dev_dbg(dev, "Format count =%d\n", fmt->fmt_count);
+
+	for (i = 0; i < fmt->fmt_count; i++) {
+		wfmt = &fmt_config->fmt_ext.fmt;
+		dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", wfmt->channels,
+			 wfmt->bits_per_sample, wfmt->samples_per_sec);
+		if (wfmt->channels == no_ch && wfmt->samples_per_sec == rate &&
+					wfmt->bits_per_sample == bps) {
+			sp_config = &fmt_config->config;
+
+			return sp_config;
+		}
+
+		fmt_config = (struct nhlt_fmt_cfg *)(fmt_config->config.caps +
+						fmt_config->config.size);
+	}
+
+	return NULL;
+}
+
+static void dump_config(struct device *dev, u32 instance_id, u8 linktype,
+		u8 s_fmt, u8 num_channels, u32 s_rate, u8 dirn, u16 bps)
+{
+	dev_dbg(dev, "Input configuration\n");
+	dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", num_channels, s_fmt, s_rate);
+	dev_dbg(dev, "vbus_id=%d link_type=%d\n", instance_id, linktype);
+	dev_dbg(dev, "bits_per_sample=%d\n", bps);
+}
+
+static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
+				u32 instance_id, u8 link_type, u8 dirn)
+{
+	dev_dbg(dev, "vbus_id=%d link_type=%d dir=%d\n",
+		epnt->virtual_bus_id, epnt->linktype, epnt->direction);
+
+	if ((epnt->virtual_bus_id == instance_id) &&
+			(epnt->linktype == link_type) &&
+			(epnt->direction == dirn))
+		return true;
+	else
+		return false;
+}
+
+struct nhlt_specific_cfg
+*skl_get_ep_blob(struct skl *skl, u32 instance, u8 link_type,
+			u8 s_fmt, u8 num_ch, u32 s_rate, u8 dirn)
+{
+	struct nhlt_fmt *fmt;
+	struct nhlt_endpoint *epnt;
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct device *dev = bus->dev;
+	struct nhlt_specific_cfg *sp_config;
+	struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+	u16 bps = num_ch * s_fmt;
+	u8 j;
+
+	dump_config(dev, instance, link_type, s_fmt, num_ch, s_rate, dirn, bps);
+
+	epnt = (struct nhlt_endpoint *)nhlt->desc;
+
+	dev_dbg(dev, "endpoint count =%d\n", nhlt->endpoint_count);
+
+	for (j = 0; j < nhlt->endpoint_count; j++) {
+		if (skl_check_ep_match(dev, epnt, instance, link_type, dirn)) {
+			fmt = (struct nhlt_fmt *)(epnt->config.caps +
+						 epnt->config.size);
+			sp_config = skl_get_specific_cfg(dev, fmt, num_ch, s_rate, bps);
+			if (sp_config)
+				return sp_config;
+		}
+
+		epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+	}
+
+	return NULL;
+}
diff --git a/sound/soc/intel/skylake/skl-nhlt.h b/sound/soc/intel/skylake/skl-nhlt.h
new file mode 100644
index 0000000..3769f9f
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.h
@@ -0,0 +1,106 @@
+/*
+ *  skl-nhlt.h - Intel HDA Platform NHLT header
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __SKL_NHLT_H__
+#define __SKL_NHLT_H__
+
+#include <linux/acpi.h>
+
+struct wav_fmt {
+	u16 fmt_tag;
+	u16 channels;
+	u32 samples_per_sec;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 bits_per_sample;
+	u16 cb_size;
+} __packed;
+
+struct wav_fmt_ext {
+	struct wav_fmt fmt;
+	union samples {
+		u16 valid_bits_per_sample;
+		u16 samples_per_block;
+		u16 reserved;
+	} sample;
+	u32 channel_mask;
+	u8 sub_fmt[16];
+} __packed;
+
+enum nhlt_link_type {
+	NHLT_LINK_HDA = 0,
+	NHLT_LINK_DSP = 1,
+	NHLT_LINK_DMIC = 2,
+	NHLT_LINK_SSP = 3,
+	NHLT_LINK_INVALID
+};
+
+enum nhlt_device_type {
+	NHLT_DEVICE_BT = 0,
+	NHLT_DEVICE_DMIC = 1,
+	NHLT_DEVICE_I2S = 4,
+	NHLT_DEVICE_INVALID
+};
+
+struct nhlt_specific_cfg {
+	u32 size;
+	u8 caps[0];
+} __packed;
+
+struct nhlt_fmt_cfg {
+	struct wav_fmt_ext fmt_ext;
+	struct nhlt_specific_cfg config;
+} __packed;
+
+struct nhlt_fmt {
+	u8 fmt_count;
+	struct nhlt_fmt_cfg fmt_config[0];
+} __packed;
+
+struct nhlt_endpoint {
+	u32  length;
+	u8   linktype;
+	u8   instance_id;
+	u16  vendor_id;
+	u16  device_id;
+	u16  revision_id;
+	u32  subsystem_id;
+	u8   device_type;
+	u8   direction;
+	u8   virtual_bus_id;
+	struct nhlt_specific_cfg config;
+} __packed;
+
+struct nhlt_acpi_table {
+	struct acpi_table_header header;
+	u8 endpoint_count;
+	struct nhlt_endpoint desc[0];
+} __packed;
+
+struct nhlt_resource_desc  {
+	u32 extra;
+	u16 flags;
+	u64 addr_spc_gra;
+	u64 min_addr;
+	u64 max_addr;
+	u64 addr_trans_offset;
+	u64 length;
+} __packed;
+
+#endif
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
new file mode 100644
index 0000000..7d617bf
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -0,0 +1,916 @@
+/*
+ *  skl-pcm.c -ASoC HDA Platform driver file implementing PCM functionality
+ *
+ *  Copyright (C) 2014-2015 Intel Corp
+ *  Author:  Jeeja KP <jeeja.kp@intel.com>
+ *
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "skl.h"
+
+#define HDA_MONO 1
+#define HDA_STEREO 2
+
+static struct snd_pcm_hardware azx_pcm_hw = {
+	.info =			(SNDRV_PCM_INFO_MMAP |
+				 SNDRV_PCM_INFO_INTERLEAVED |
+				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				 SNDRV_PCM_INFO_MMAP_VALID |
+				 SNDRV_PCM_INFO_PAUSE |
+				 SNDRV_PCM_INFO_SYNC_START |
+				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
+				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
+				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
+	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =		SNDRV_PCM_RATE_48000,
+	.rate_min =		48000,
+	.rate_max =		48000,
+	.channels_min =		2,
+	.channels_max =		2,
+	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
+	.period_bytes_min =	128,
+	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
+	.periods_min =		2,
+	.periods_max =		AZX_MAX_FRAG,
+	.fifo_size =		0,
+};
+
+static inline
+struct hdac_ext_stream *get_hdac_ext_stream(struct snd_pcm_substream *substream)
+{
+	return substream->runtime->private_data;
+}
+
+static struct hdac_ext_bus *get_bus_ctx(struct snd_pcm_substream *substream)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct hdac_stream *hstream = hdac_stream(stream);
+	struct hdac_bus *bus = hstream->bus;
+
+	return hbus_to_ebus(bus);
+}
+
+static int skl_substream_alloc_pages(struct hdac_ext_bus *ebus,
+				 struct snd_pcm_substream *substream,
+				 size_t size)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+
+	hdac_stream(stream)->bufsize = 0;
+	hdac_stream(stream)->period_bytes = 0;
+	hdac_stream(stream)->format_val = 0;
+
+	return snd_pcm_lib_malloc_pages(substream, size);
+}
+
+static int skl_substream_free_pages(struct hdac_bus *bus,
+				struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_pages(substream);
+}
+
+static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
+				 struct snd_pcm_runtime *runtime)
+{
+	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+	/* avoid wrap-around with wall-clock */
+	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+				     20, 178000000);
+}
+
+static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *ebus)
+{
+	if (ebus->ppcap)
+		return HDAC_EXT_STREAM_TYPE_HOST;
+	else
+		return HDAC_EXT_STREAM_TYPE_COUPLED;
+}
+
+static int skl_pcm_open(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct skl_dma_params *dma_params;
+	int ret;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	ret = pm_runtime_get_sync(dai->dev);
+	if (ret)
+		return ret;
+
+	stream = snd_hdac_ext_stream_assign(ebus, substream,
+					skl_get_host_stream_type(ebus));
+	if (stream == NULL)
+		return -EBUSY;
+
+	skl_set_pcm_constrains(ebus, runtime);
+
+	/*
+	 * disable WALLCLOCK timestamps for capture streams
+	 * until we figure out how to handle digital inputs
+	 */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
+		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
+	}
+
+	runtime->private_data = stream;
+
+	dma_params = kzalloc(sizeof(*dma_params), GFP_KERNEL);
+	if (!dma_params)
+		return -ENOMEM;
+
+	dma_params->stream_tag = hdac_stream(stream)->stream_tag;
+	snd_soc_dai_set_dma_data(dai, substream, dma_params);
+
+	dev_dbg(dai->dev, "stream tag set in dma params=%d\n",
+				 dma_params->stream_tag);
+	snd_pcm_set_sync(substream);
+
+	return 0;
+}
+
+static int skl_get_format(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct skl_dma_params *dma_params;
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	int format_val = 0;
+
+	if (ebus->ppcap) {
+		struct snd_pcm_runtime *runtime = substream->runtime;
+
+		format_val = snd_hdac_calc_stream_format(runtime->rate,
+						runtime->channels,
+						runtime->format,
+						32, 0);
+	} else {
+		struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+		dma_params = snd_soc_dai_get_dma_data(codec_dai, substream);
+		if (dma_params)
+			format_val = dma_params->format;
+	}
+
+	return format_val;
+}
+
+static int skl_pcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	unsigned int format_val;
+	int err;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	if (hdac_stream(stream)->prepared) {
+		dev_dbg(dai->dev, "already stream is prepared - returning\n");
+		return 0;
+	}
+
+	format_val = skl_get_format(substream, dai);
+	dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d\n",
+				hdac_stream(stream)->stream_tag, format_val);
+	snd_hdac_stream_reset(hdac_stream(stream));
+
+	err = snd_hdac_stream_set_params(hdac_stream(stream), format_val);
+	if (err < 0)
+		return err;
+
+	err = snd_hdac_stream_setup(hdac_stream(stream));
+	if (err < 0)
+		return err;
+
+	hdac_stream(stream)->prepared = 1;
+
+	return err;
+}
+
+static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int ret, dma_id;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	ret = skl_substream_alloc_pages(ebus, substream,
+					  params_buffer_bytes(params));
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(dai->dev, "format_val, rate=%d, ch=%d, format=%d\n",
+			runtime->rate, runtime->channels, runtime->format);
+
+	dma_id = hdac_stream(stream)->stream_tag - 1;
+	dev_dbg(dai->dev, "dma_id=%d\n", dma_id);
+
+	return 0;
+}
+
+static void skl_pcm_close(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct skl_dma_params *dma_params = NULL;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	snd_hdac_ext_stream_release(stream, skl_get_host_stream_type(ebus));
+
+	dma_params = snd_soc_dai_get_dma_data(dai, substream);
+	/*
+	 * now we should set this to NULL as we are freeing by the
+	 * dma_params
+	 */
+	snd_soc_dai_set_dma_data(dai, substream, NULL);
+
+	pm_runtime_mark_last_busy(dai->dev);
+	pm_runtime_put_autosuspend(dai->dev);
+	kfree(dma_params);
+}
+
+static int skl_pcm_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	snd_hdac_stream_cleanup(hdac_stream(stream));
+	hdac_stream(stream)->prepared = 0;
+
+	return skl_substream_free_pages(ebus_to_hbus(ebus), substream);
+}
+
+static int skl_link_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *link_dev;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct skl_dma_params *dma_params;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int dma_id;
+
+	pr_debug("%s\n", __func__);
+	link_dev = snd_hdac_ext_stream_assign(ebus, substream,
+					HDAC_EXT_STREAM_TYPE_LINK);
+	if (!link_dev)
+		return -EBUSY;
+
+	snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
+
+	/* set the stream tag in the codec dai dma params  */
+	dma_params = (struct skl_dma_params *)
+			snd_soc_dai_get_dma_data(codec_dai, substream);
+	if (dma_params)
+		dma_params->stream_tag =  hdac_stream(link_dev)->stream_tag;
+	snd_soc_dai_set_dma_data(codec_dai, substream, (void *)dma_params);
+	dma_id = hdac_stream(link_dev)->stream_tag - 1;
+
+	return 0;
+}
+
+static int skl_link_pcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *link_dev =
+			snd_soc_dai_get_dma_data(dai, substream);
+	unsigned int format_val = 0;
+	struct skl_dma_params *dma_params;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_pcm_hw_params *params;
+	struct snd_interval *channels, *rate;
+	struct hdac_ext_link *link;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	if (link_dev->link_prepared) {
+		dev_dbg(dai->dev, "already stream is prepared - returning\n");
+		return 0;
+	}
+	params  = devm_kzalloc(dai->dev, sizeof(*params), GFP_KERNEL);
+	if (params == NULL)
+		return -ENOMEM;
+
+	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+	channels->min = channels->max = substream->runtime->channels;
+	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+	rate->min = rate->max = substream->runtime->rate;
+	snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+					SNDRV_PCM_HW_PARAM_FIRST_MASK],
+					substream->runtime->format);
+
+
+	dma_params  = (struct skl_dma_params *)
+			snd_soc_dai_get_dma_data(codec_dai, substream);
+	if (dma_params)
+		format_val = dma_params->format;
+	dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d codec_dai_name=%s\n",
+			hdac_stream(link_dev)->stream_tag, format_val, codec_dai->name);
+
+	snd_hdac_ext_link_stream_reset(link_dev);
+
+	snd_hdac_ext_link_stream_setup(link_dev, format_val);
+
+	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	if (!link)
+		return -EINVAL;
+
+	snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_dev)->stream_tag);
+	link_dev->link_prepared = 1;
+
+	return 0;
+}
+
+static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
+	int cmd, struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *link_dev =
+				snd_soc_dai_get_dma_data(dai, substream);
+
+	dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		snd_hdac_ext_link_stream_start(link_dev);
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		snd_hdac_ext_link_stream_clear(link_dev);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int skl_link_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct hdac_ext_stream *link_dev =
+				snd_soc_dai_get_dma_data(dai, substream);
+	struct hdac_ext_link *link;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	link_dev->link_prepared = 0;
+
+	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	if (!link)
+		return -EINVAL;
+
+	snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+	snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
+	return 0;
+}
+
+static int skl_hda_be_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	return pm_runtime_get_sync(dai->dev);
+}
+
+static void skl_hda_be_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	pm_runtime_mark_last_busy(dai->dev);
+	pm_runtime_put_autosuspend(dai->dev);
+}
+
+static struct snd_soc_dai_ops skl_pcm_dai_ops = {
+	.startup = skl_pcm_open,
+	.shutdown = skl_pcm_close,
+	.prepare = skl_pcm_prepare,
+	.hw_params = skl_pcm_hw_params,
+	.hw_free = skl_pcm_hw_free,
+};
+
+static struct snd_soc_dai_ops skl_dmic_dai_ops = {
+	.startup = skl_hda_be_startup,
+	.shutdown = skl_hda_be_shutdown,
+};
+
+static struct snd_soc_dai_ops skl_link_dai_ops = {
+	.startup = skl_hda_be_startup,
+	.prepare = skl_link_pcm_prepare,
+	.hw_params = skl_link_hw_params,
+	.hw_free = skl_link_hw_free,
+	.trigger = skl_link_pcm_trigger,
+	.shutdown = skl_hda_be_shutdown,
+};
+
+static struct snd_soc_dai_driver skl_platform_dai[] = {
+{
+	.name = "System Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "System Playback",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+	.capture = {
+		.stream_name = "System Capture",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "Reference Pin",
+	.ops = &skl_pcm_dai_ops,
+	.capture = {
+		.stream_name = "Reference Capture",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "Deepbuffer Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "Deepbuffer Playback",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "LowLatency Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "Low Latency Playback",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+/* BE CPU  Dais */
+{
+	.name = "iDisp Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "iDisp Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "DMIC01 Pin",
+	.ops = &skl_dmic_dai_ops,
+	.capture = {
+		.stream_name = "DMIC01 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "DMIC23 Pin",
+	.ops = &skl_dmic_dai_ops,
+	.capture = {
+		.stream_name = "DMIC23 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "HD-Codec Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "HD-Codec Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "HD-Codec Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "HD-Codec-SPK Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "HD-Codec-SPK Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "HD-Codec-AMIC Pin",
+	.ops = &skl_link_dai_ops,
+	.capture = {
+		.stream_name = "HD-Codec-AMIC Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+};
+
+static int skl_platform_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__,
+					dai_link->cpu_dai_name);
+
+	runtime = substream->runtime;
+	snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw);
+
+	return 0;
+}
+
+static int skl_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_ext_stream *stream;
+	struct snd_pcm_substream *s;
+	bool start;
+	int sbits = 0;
+	unsigned long cookie;
+	struct hdac_stream *hstr;
+
+	stream = get_hdac_ext_stream(substream);
+	hstr = hdac_stream(stream);
+
+	dev_dbg(bus->dev, "In %s cmd=%d\n", __func__, cmd);
+
+	if (!hstr->prepared)
+		return -EPIPE;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		start = true;
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		start = false;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	snd_pcm_group_for_each_entry(s, substream) {
+		if (s->pcm->card != substream->pcm->card)
+			continue;
+		stream = get_hdac_ext_stream(s);
+		sbits |= 1 << hdac_stream(stream)->index;
+		snd_pcm_trigger_done(s, substream);
+	}
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	/* first, set SYNC bits of corresponding streams */
+	snd_hdac_stream_sync_trigger(hstr, true, sbits, AZX_REG_SSYNC);
+
+	snd_pcm_group_for_each_entry(s, substream) {
+		if (s->pcm->card != substream->pcm->card)
+			continue;
+		stream = get_hdac_ext_stream(s);
+		if (start)
+			snd_hdac_stream_start(hdac_stream(stream), true);
+		else
+			snd_hdac_stream_stop(hdac_stream(stream));
+	}
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	snd_hdac_stream_sync(hstr, start, sbits);
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	/* reset SYNC bits */
+	snd_hdac_stream_sync_trigger(hstr, false, sbits, AZX_REG_SSYNC);
+	if (start)
+		snd_hdac_stream_timecounter_init(hstr, sbits);
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	return 0;
+}
+
+static int skl_dsp_trigger(struct snd_pcm_substream *substream,
+		int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct hdac_ext_stream *stream;
+	int start;
+	unsigned long cookie;
+	struct hdac_stream *hstr;
+
+	dev_dbg(bus->dev, "In %s cmd=%d streamname=%s\n", __func__, cmd, cpu_dai->name);
+
+	stream = get_hdac_ext_stream(substream);
+	hstr = hdac_stream(stream);
+
+	if (!hstr->prepared)
+		return -EPIPE;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		start = 1;
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		start = 0;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	if (start)
+		snd_hdac_stream_start(hdac_stream(stream), true);
+	else
+		snd_hdac_stream_stop(hdac_stream(stream));
+
+	if (start)
+		snd_hdac_stream_timecounter_init(hstr, 0);
+
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	return 0;
+}
+static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+
+	if (ebus->ppcap)
+		return skl_dsp_trigger(substream, cmd);
+	else
+		return skl_pcm_trigger(substream, cmd);
+}
+
+/* calculate runtime delay from LPIB */
+static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
+				struct hdac_ext_stream *sstream,
+				unsigned int pos)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_stream *hstream = hdac_stream(sstream);
+	struct snd_pcm_substream *substream = hstream->substream;
+	int stream = substream->stream;
+	unsigned int lpib_pos = snd_hdac_stream_get_pos_lpib(hstream);
+	int delay;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		delay = pos - lpib_pos;
+	else
+		delay = lpib_pos - pos;
+
+	if (delay < 0) {
+		if (delay >= hstream->delay_negative_threshold)
+			delay = 0;
+		else
+			delay += hstream->bufsize;
+	}
+
+	if (delay >= hstream->period_bytes) {
+		dev_info(bus->dev,
+			 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
+			 delay, hstream->period_bytes);
+		delay = 0;
+	}
+
+	return bytes_to_frames(substream->runtime, delay);
+}
+
+static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
+					int codec_delay)
+{
+	struct hdac_stream *hstr = hdac_stream(hstream);
+	struct snd_pcm_substream *substream = hstr->substream;
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	unsigned int pos;
+	int delay;
+
+	/* use the position buffer as default */
+	pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
+
+	if (pos >= hdac_stream(hstream)->bufsize)
+		pos = 0;
+
+	if (substream->runtime) {
+		delay = skl_get_delay_from_lpib(ebus, hstream, pos)
+						 + codec_delay;
+		substream->runtime->delay += delay;
+	}
+
+	return pos;
+}
+
+static snd_pcm_uframes_t skl_platform_pcm_pointer
+			(struct snd_pcm_substream *substream)
+{
+	struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
+
+	return bytes_to_frames(substream->runtime,
+			       skl_get_position(hstream, 0));
+}
+
+static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
+				u64 nsec)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	u64 codec_frames, codec_nsecs;
+
+	if (!codec_dai->driver->ops->delay)
+		return nsec;
+
+	codec_frames = codec_dai->driver->ops->delay(substream, codec_dai);
+	codec_nsecs = div_u64(codec_frames * 1000000000LL,
+			      substream->runtime->rate);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		return nsec + codec_nsecs;
+
+	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
+}
+
+static int skl_get_time_info(struct snd_pcm_substream *substream,
+			struct timespec *system_ts, struct timespec *audio_ts,
+			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
+{
+	struct hdac_ext_stream *sstream = get_hdac_ext_stream(substream);
+	struct hdac_stream *hstr = hdac_stream(sstream);
+	u64 nsec;
+
+	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
+		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
+
+		snd_pcm_gettime(substream->runtime, system_ts);
+
+		nsec = timecounter_read(&hstr->tc);
+		nsec = div_u64(nsec, 3); /* can be optimized */
+		if (audio_tstamp_config->report_delay)
+			nsec = skl_adjust_codec_delay(substream, nsec);
+
+		*audio_ts = ns_to_timespec(nsec);
+
+		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
+		audio_tstamp_report->accuracy_report = 1; /* rest of struct is valid */
+		audio_tstamp_report->accuracy = 42; /* 24MHzWallClk == 42ns resolution */
+
+	} else {
+		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
+	}
+
+	return 0;
+}
+
+static struct snd_pcm_ops skl_platform_ops = {
+	.open = skl_platform_open,
+	.ioctl = snd_pcm_lib_ioctl,
+	.trigger = skl_platform_pcm_trigger,
+	.pointer = skl_platform_pcm_pointer,
+	.get_time_info =  skl_get_time_info,
+	.mmap = snd_pcm_lib_default_mmap,
+	.page = snd_pcm_sgbuf_ops_page,
+};
+
+static void skl_pcm_free(struct snd_pcm *pcm)
+{
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+#define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
+
+static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct snd_pcm *pcm = rtd->pcm;
+	unsigned int size;
+	int retval = 0;
+	struct skl *skl = ebus_to_skl(ebus);
+
+	if (dai->driver->playback.channels_min ||
+		dai->driver->capture.channels_min) {
+		/* buffer pre-allocation */
+		size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
+		if (size > MAX_PREALLOC_SIZE)
+			size = MAX_PREALLOC_SIZE;
+		retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+						SNDRV_DMA_TYPE_DEV_SG,
+						snd_dma_pci_data(skl->pci),
+						size, MAX_PREALLOC_SIZE);
+		if (retval) {
+			dev_err(dai->dev, "dma buffer allocationf fail\n");
+			return retval;
+		}
+	}
+
+	return retval;
+}
+
+static struct snd_soc_platform_driver skl_platform_drv  = {
+	.ops		= &skl_platform_ops,
+	.pcm_new	= skl_pcm_new,
+	.pcm_free	= skl_pcm_free,
+};
+
+static const struct snd_soc_component_driver skl_component = {
+	.name           = "pcm",
+};
+
+int skl_platform_register(struct device *dev)
+{
+	int ret;
+
+	ret = snd_soc_register_platform(dev, &skl_platform_drv);
+	if (ret) {
+		dev_err(dev, "soc platform registration failed %d\n", ret);
+		return ret;
+	}
+	ret = snd_soc_register_component(dev, &skl_component,
+				skl_platform_dai,
+				ARRAY_SIZE(skl_platform_dai));
+	if (ret) {
+		dev_err(dev, "soc component registration failed %d\n", ret);
+		snd_soc_unregister_platform(dev);
+	}
+
+	return ret;
+
+}
+
+int skl_platform_unregister(struct device *dev)
+{
+	snd_soc_unregister_component(dev);
+	snd_soc_unregister_platform(dev);
+	return 0;
+}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
new file mode 100644
index 0000000..44748ba
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -0,0 +1,327 @@
+/*
+ * skl-sst-cldma.c - Code Loader DMA handler
+ *
+ * Copyright (C) 2015, Intel Corporation.
+ * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/kthread.h>
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+
+static void skl_cldma_int_enable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
+				SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
+}
+
+void skl_cldma_int_disable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
+}
+
+/* Code loader helper APIs */
+static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
+		struct snd_dma_buffer *dmab_data,
+		u32 **bdlp, int size, int with_ioc)
+{
+	u32 *bdl = *bdlp;
+
+	ctx->cl_dev.frags = 0;
+	while (size > 0) {
+		phys_addr_t addr = virt_to_phys(dmab_data->area +
+				(ctx->cl_dev.frags * ctx->cl_dev.bufsize));
+
+		bdl[0] = cpu_to_le32(lower_32_bits(addr));
+		bdl[1] = cpu_to_le32(upper_32_bits(addr));
+
+		bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
+
+		size -= ctx->cl_dev.bufsize;
+		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
+
+		bdl += 4;
+		ctx->cl_dev.frags++;
+	}
+}
+
+/*
+ * Setup controller
+ * Configure the registers to update the dma buffer address and
+ * enable interrupts.
+ * Note: Using the channel 1 for transfer
+ */
+static void skl_cldma_setup_controller(struct sst_dsp  *ctx,
+		struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
+		u32 count)
+{
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
+			CL_SD_BDLPLBA(dmab_bdl->addr));
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
+			CL_SD_BDLPUBA(dmab_bdl->addr));
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
+}
+
+static void skl_cldma_setup_spb(struct sst_dsp  *ctx,
+		unsigned int size, bool enable)
+{
+	if (enable)
+		sst_dsp_shim_update_bits_unlocked(ctx,
+				SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+				CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+				CL_SPBFIFO_SPBFCCTL_SPIBE(1));
+
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
+}
+
+static void skl_cldma_cleanup_spb(struct sst_dsp  *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+			CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+			CL_SPBFIFO_SPBFCCTL_SPIBE(0));
+
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
+}
+
+static void skl_cldma_trigger(struct sst_dsp  *ctx, bool enable)
+{
+	if (enable)
+		sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(1));
+	else
+		sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(0));
+}
+
+static void skl_cldma_cleanup(struct sst_dsp  *ctx)
+{
+	skl_cldma_cleanup_spb(ctx);
+
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
+}
+
+static int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
+{
+	int ret = 0;
+
+	if (!wait_event_timeout(ctx->cl_dev.wait_queue,
+				ctx->cl_dev.wait_condition,
+				msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
+		dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
+		ret = -EIO;
+		goto cleanup;
+	}
+
+	dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
+	if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
+		dev_err(ctx->dev, "%s: DMA Error\n", __func__);
+		ret = -EIO;
+	}
+
+cleanup:
+	ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
+	return ret;
+}
+
+static void skl_cldma_stop(struct sst_dsp *ctx)
+{
+	ctx->cl_dev.ops.cl_trigger(ctx, false);
+}
+
+static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
+		const void *curr_pos, bool intr_enable, bool trigger)
+{
+	dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
+	dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
+			ctx->cl_dev.dma_buffer_offset, trigger);
+	dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
+
+	memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
+			curr_pos, size);
+
+	if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
+		ctx->cl_dev.dma_buffer_offset = 0;
+	else
+		ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
+
+	ctx->cl_dev.wait_condition = false;
+
+	if (intr_enable)
+		skl_cldma_int_enable(ctx);
+
+	ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
+	if (trigger)
+		ctx->cl_dev.ops.cl_trigger(ctx, true);
+}
+
+/*
+ * The CL dma doesn't have any way to update the transfer status until a BDL
+ * buffer is fully transferred
+ *
+ * So Copying is divided in two parts.
+ * 1. Interrupt on buffer done where the size to be transferred is more than
+ *    ring buffer size.
+ * 2. Polling on fw register to identify if data left to transferred doesn't
+ *    fill the ring buffer. Caller takes care of polling the required status
+ *    register to identify the transfer status.
+ */
+static int
+skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
+{
+	int ret = 0;
+	bool start = true;
+	unsigned int excess_bytes;
+	u32 size;
+	unsigned int bytes_left = total_size;
+	const void *curr_pos = bin;
+
+	if (total_size <= 0)
+		return -EINVAL;
+
+	dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
+
+	while (bytes_left) {
+		if (bytes_left > ctx->cl_dev.bufsize) {
+
+			/*
+			 * dma transfers only till the write pointer as
+			 * updated in spib
+			 */
+			if (ctx->cl_dev.curr_spib_pos == 0)
+				ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
+
+			size = ctx->cl_dev.bufsize;
+			skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
+
+			start = false;
+			ret = skl_cldma_wait_interruptible(ctx);
+			if (ret < 0) {
+				skl_cldma_stop(ctx);
+				return ret;
+			}
+
+		} else {
+			skl_cldma_int_disable(ctx);
+
+			if ((ctx->cl_dev.curr_spib_pos + bytes_left)
+							<= ctx->cl_dev.bufsize) {
+				ctx->cl_dev.curr_spib_pos += bytes_left;
+			} else {
+				excess_bytes = bytes_left -
+					(ctx->cl_dev.bufsize -
+					ctx->cl_dev.curr_spib_pos);
+				ctx->cl_dev.curr_spib_pos = excess_bytes;
+			}
+
+			size = bytes_left;
+			skl_cldma_fill_buffer(ctx, size,
+					curr_pos, false, start);
+		}
+		bytes_left -= size;
+		curr_pos = curr_pos + size;
+	}
+
+	return ret;
+}
+
+void skl_cldma_process_intr(struct sst_dsp *ctx)
+{
+	u8 cl_dma_intr_status;
+
+	cl_dma_intr_status =
+		sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
+
+	if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
+		ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
+	else
+		ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
+
+	ctx->cl_dev.wait_condition = true;
+	wake_up(&ctx->cl_dev.wait_queue);
+}
+
+int skl_cldma_prepare(struct sst_dsp *ctx)
+{
+	int ret;
+	u32 *bdl;
+
+	ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
+
+	/* Allocate cl ops */
+	ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
+	ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
+	ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
+	ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
+	ctx->cl_dev.ops.cl_trigger = skl_cldma_trigger;
+	ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
+	ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
+	ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
+
+	/* Allocate buffer*/
+	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
+			&ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
+		return ret;
+	}
+	/* Setup Code loader BDL */
+	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
+			&ctx->cl_dev.dmab_bdl, PAGE_SIZE);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
+		ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
+		return ret;
+	}
+	bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
+
+	/* Allocate BDLs */
+	ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
+			&bdl, ctx->cl_dev.bufsize, 1);
+	ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
+			ctx->cl_dev.bufsize, ctx->cl_dev.frags);
+
+	ctx->cl_dev.curr_spib_pos = 0;
+	ctx->cl_dev.dma_buffer_offset = 0;
+	init_waitqueue_head(&ctx->cl_dev.wait_queue);
+
+	return ret;
+}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.h b/sound/soc/intel/skylake/skl-sst-cldma.h
new file mode 100644
index 0000000..99e4c86
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.h
@@ -0,0 +1,251 @@
+/*
+ * Intel Code Loader DMA support
+ *
+ * Copyright (C) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef SKL_SST_CLDMA_H_
+#define SKL_SST_CLDMA_H_
+
+#define FW_CL_STREAM_NUMBER		0x1
+
+#define DMA_ADDRESS_128_BITS_ALIGNMENT	7
+#define BDL_ALIGN(x)			(x >> DMA_ADDRESS_128_BITS_ALIGNMENT)
+
+#define SKL_ADSPIC_CL_DMA			0x2
+#define SKL_ADSPIS_CL_DMA			0x2
+#define SKL_CL_DMA_SD_INT_DESC_ERR		0x10 /* Descriptor error interrupt */
+#define SKL_CL_DMA_SD_INT_FIFO_ERR		0x08 /* FIFO error interrupt */
+#define SKL_CL_DMA_SD_INT_COMPLETE		0x04 /* Buffer completion interrupt */
+
+/* Intel HD Audio Code Loader DMA Registers */
+
+#define HDA_ADSP_LOADER_BASE		0x80
+
+/* Stream Registers */
+#define SKL_ADSP_REG_CL_SD_CTL			(HDA_ADSP_LOADER_BASE + 0x00)
+#define SKL_ADSP_REG_CL_SD_STS			(HDA_ADSP_LOADER_BASE + 0x03)
+#define SKL_ADSP_REG_CL_SD_LPIB			(HDA_ADSP_LOADER_BASE + 0x04)
+#define SKL_ADSP_REG_CL_SD_CBL			(HDA_ADSP_LOADER_BASE + 0x08)
+#define SKL_ADSP_REG_CL_SD_LVI			(HDA_ADSP_LOADER_BASE + 0x0c)
+#define SKL_ADSP_REG_CL_SD_FIFOW		(HDA_ADSP_LOADER_BASE + 0x0e)
+#define SKL_ADSP_REG_CL_SD_FIFOSIZE		(HDA_ADSP_LOADER_BASE + 0x10)
+#define SKL_ADSP_REG_CL_SD_FORMAT		(HDA_ADSP_LOADER_BASE + 0x12)
+#define SKL_ADSP_REG_CL_SD_FIFOL		(HDA_ADSP_LOADER_BASE + 0x14)
+#define SKL_ADSP_REG_CL_SD_BDLPL		(HDA_ADSP_LOADER_BASE + 0x18)
+#define SKL_ADSP_REG_CL_SD_BDLPU		(HDA_ADSP_LOADER_BASE + 0x1c)
+
+/* CL: Software Position Based FIFO Capability Registers */
+#define SKL_ADSP_REG_CL_SPBFIFO			(HDA_ADSP_LOADER_BASE + 0x20)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCH		(SKL_ADSP_REG_CL_SPBFIFO + 0x0)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL	(SKL_ADSP_REG_CL_SPBFIFO + 0x4)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPIB		(SKL_ADSP_REG_CL_SPBFIFO + 0x8)
+#define SKL_ADSP_REG_CL_SPBFIFO_MAXFIFOS	(SKL_ADSP_REG_CL_SPBFIFO + 0xc)
+
+/* CL: Stream Descriptor x Control */
+
+/* Stream Reset */
+#define CL_SD_CTL_SRST_SHIFT		0
+#define CL_SD_CTL_SRST_MASK		(1 << CL_SD_CTL_SRST_SHIFT)
+#define CL_SD_CTL_SRST(x)		\
+			((x << CL_SD_CTL_SRST_SHIFT) & CL_SD_CTL_SRST_MASK)
+
+/* Stream Run */
+#define CL_SD_CTL_RUN_SHIFT		1
+#define CL_SD_CTL_RUN_MASK		(1 << CL_SD_CTL_RUN_SHIFT)
+#define CL_SD_CTL_RUN(x)		\
+			((x << CL_SD_CTL_RUN_SHIFT) & CL_SD_CTL_RUN_MASK)
+
+/* Interrupt On Completion Enable */
+#define CL_SD_CTL_IOCE_SHIFT		2
+#define CL_SD_CTL_IOCE_MASK		(1 << CL_SD_CTL_IOCE_SHIFT)
+#define CL_SD_CTL_IOCE(x)		\
+			((x << CL_SD_CTL_IOCE_SHIFT) & CL_SD_CTL_IOCE_MASK)
+
+/* FIFO Error Interrupt Enable */
+#define CL_SD_CTL_FEIE_SHIFT		3
+#define CL_SD_CTL_FEIE_MASK		(1 << CL_SD_CTL_FEIE_SHIFT)
+#define CL_SD_CTL_FEIE(x)		\
+			((x << CL_SD_CTL_FEIE_SHIFT) & CL_SD_CTL_FEIE_MASK)
+
+/* Descriptor Error Interrupt Enable */
+#define CL_SD_CTL_DEIE_SHIFT		4
+#define CL_SD_CTL_DEIE_MASK		(1 << CL_SD_CTL_DEIE_SHIFT)
+#define CL_SD_CTL_DEIE(x)		\
+			((x << CL_SD_CTL_DEIE_SHIFT) & CL_SD_CTL_DEIE_MASK)
+
+/* FIFO Limit Change */
+#define CL_SD_CTL_FIFOLC_SHIFT		5
+#define CL_SD_CTL_FIFOLC_MASK		(1 << CL_SD_CTL_FIFOLC_SHIFT)
+#define CL_SD_CTL_FIFOLC(x)		\
+			((x << CL_SD_CTL_FIFOLC_SHIFT) & CL_SD_CTL_FIFOLC_MASK)
+
+/* Stripe Control */
+#define CL_SD_CTL_STRIPE_SHIFT		16
+#define CL_SD_CTL_STRIPE_MASK		(0x3 << CL_SD_CTL_STRIPE_SHIFT)
+#define CL_SD_CTL_STRIPE(x)		\
+			((x << CL_SD_CTL_STRIPE_SHIFT) & CL_SD_CTL_STRIPE_MASK)
+
+/* Traffic Priority */
+#define CL_SD_CTL_TP_SHIFT		18
+#define CL_SD_CTL_TP_MASK		(1 << CL_SD_CTL_TP_SHIFT)
+#define CL_SD_CTL_TP(x)			\
+			((x << CL_SD_CTL_TP_SHIFT) & CL_SD_CTL_TP_MASK)
+
+/* Bidirectional Direction Control */
+#define CL_SD_CTL_DIR_SHIFT		19
+#define CL_SD_CTL_DIR_MASK		(1 << CL_SD_CTL_DIR_SHIFT)
+#define CL_SD_CTL_DIR(x)		\
+			((x << CL_SD_CTL_DIR_SHIFT) & CL_SD_CTL_DIR_MASK)
+
+/* Stream Number */
+#define CL_SD_CTL_STRM_SHIFT		20
+#define CL_SD_CTL_STRM_MASK		(0xf << CL_SD_CTL_STRM_SHIFT)
+#define CL_SD_CTL_STRM(x)		\
+			((x << CL_SD_CTL_STRM_SHIFT) & CL_SD_CTL_STRM_MASK)
+
+/* CL: Stream Descriptor x Status */
+
+/* Buffer Completion Interrupt Status */
+#define CL_SD_STS_BCIS(x)		CL_SD_CTL_IOCE(x)
+
+/* FIFO Error */
+#define CL_SD_STS_FIFOE(x)		CL_SD_CTL_FEIE(x)
+
+/* Descriptor Error */
+#define CL_SD_STS_DESE(x)		CL_SD_CTL_DEIE(x)
+
+/* FIFO Ready */
+#define CL_SD_STS_FIFORDY(x)	CL_SD_CTL_FIFOLC(x)
+
+
+/* CL: Stream Descriptor x Last Valid Index */
+#define CL_SD_LVI_SHIFT			0
+#define CL_SD_LVI_MASK			(0xff << CL_SD_LVI_SHIFT)
+#define CL_SD_LVI(x)			((x << CL_SD_LVI_SHIFT) & CL_SD_LVI_MASK)
+
+/* CL: Stream Descriptor x FIFO Eviction Watermark */
+#define CL_SD_FIFOW_SHIFT		0
+#define CL_SD_FIFOW_MASK		(0x7 << CL_SD_FIFOW_SHIFT)
+#define CL_SD_FIFOW(x)			\
+			((x << CL_SD_FIFOW_SHIFT) & CL_SD_FIFOW_MASK)
+
+/* CL: Stream Descriptor x Buffer Descriptor List Pointer Lower Base Address */
+
+/* Protect Bits */
+#define CL_SD_BDLPLBA_PROT_SHIFT	0
+#define CL_SD_BDLPLBA_PROT_MASK		(1 << CL_SD_BDLPLBA_PROT_SHIFT)
+#define CL_SD_BDLPLBA_PROT(x)		\
+		((x << CL_SD_BDLPLBA_PROT_SHIFT) & CL_SD_BDLPLBA_PROT_MASK)
+
+/* Buffer Descriptor List Lower Base Address */
+#define CL_SD_BDLPLBA_SHIFT		7
+#define CL_SD_BDLPLBA_MASK		(0x1ffffff << CL_SD_BDLPLBA_SHIFT)
+#define CL_SD_BDLPLBA(x)		\
+	((BDL_ALIGN(lower_32_bits(x)) << CL_SD_BDLPLBA_SHIFT) & CL_SD_BDLPLBA_MASK)
+
+/* Buffer Descriptor List Upper Base Address */
+#define CL_SD_BDLPUBA_SHIFT		0
+#define CL_SD_BDLPUBA_MASK		(0xffffffff << CL_SD_BDLPUBA_SHIFT)
+#define CL_SD_BDLPUBA(x)		\
+		((upper_32_bits(x) << CL_SD_BDLPUBA_SHIFT) & CL_SD_BDLPUBA_MASK)
+
+/*
+ * Code Loader - Software Position Based FIFO
+ * Capability Registers x Software Position Based FIFO Header
+ */
+
+/* Next Capability Pointer */
+#define CL_SPBFIFO_SPBFCH_PTR_SHIFT	0
+#define CL_SPBFIFO_SPBFCH_PTR_MASK	(0xff << CL_SPBFIFO_SPBFCH_PTR_SHIFT)
+#define CL_SPBFIFO_SPBFCH_PTR(x)	\
+		((x << CL_SPBFIFO_SPBFCH_PTR_SHIFT) & CL_SPBFIFO_SPBFCH_PTR_MASK)
+
+/* Capability Identifier */
+#define CL_SPBFIFO_SPBFCH_ID_SHIFT	16
+#define CL_SPBFIFO_SPBFCH_ID_MASK	(0xfff << CL_SPBFIFO_SPBFCH_ID_SHIFT)
+#define CL_SPBFIFO_SPBFCH_ID(x)		\
+		((x << CL_SPBFIFO_SPBFCH_ID_SHIFT) & CL_SPBFIFO_SPBFCH_ID_MASK)
+
+/* Capability Version */
+#define CL_SPBFIFO_SPBFCH_VER_SHIFT	28
+#define CL_SPBFIFO_SPBFCH_VER_MASK	(0xf << CL_SPBFIFO_SPBFCH_VER_SHIFT)
+#define CL_SPBFIFO_SPBFCH_VER(x)	\
+	((x << CL_SPBFIFO_SPBFCH_VER_SHIFT) & CL_SPBFIFO_SPBFCH_VER_MASK)
+
+/* Software Position in Buffer Enable */
+#define CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT	0
+#define CL_SPBFIFO_SPBFCCTL_SPIBE_MASK	(1 << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
+#define CL_SPBFIFO_SPBFCCTL_SPIBE(x)	\
+	((x << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
+
+/* SST IPC SKL defines */
+#define SKL_WAIT_TIMEOUT		500	/* 500 msec */
+#define SKL_MAX_BUFFER_SIZE		(32 * PAGE_SIZE)
+
+enum skl_cl_dma_wake_states {
+	SKL_CL_DMA_STATUS_NONE = 0,
+	SKL_CL_DMA_BUF_COMPLETE,
+	SKL_CL_DMA_ERR,	/* TODO: Expand the error states */
+};
+
+struct sst_dsp;
+
+struct skl_cl_dev_ops {
+	void (*cl_setup_bdle)(struct sst_dsp *ctx,
+			struct snd_dma_buffer *dmab_data,
+			u32 **bdlp, int size, int with_ioc);
+	void (*cl_setup_controller)(struct sst_dsp *ctx,
+			struct snd_dma_buffer *dmab_bdl,
+			unsigned int max_size, u32 page_count);
+	void (*cl_setup_spb)(struct sst_dsp  *ctx,
+			unsigned int size, bool enable);
+	void (*cl_cleanup_spb)(struct sst_dsp  *ctx);
+	void (*cl_trigger)(struct sst_dsp  *ctx, bool enable);
+	void (*cl_cleanup_controller)(struct sst_dsp  *ctx);
+	int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx,
+			const void *bin, u32 size);
+	void (*cl_stop_dma)(struct sst_dsp *ctx);
+};
+
+/**
+ * skl_cl_dev - holds information for code loader dma transfer
+ *
+ * @dmab_data: buffer pointer
+ * @dmab_bdl: buffer descriptor list
+ * @bufsize: ring buffer size
+ * @frags: Last valid buffer descriptor index in the BDL
+ * @curr_spib_pos: Current position in ring buffer
+ * @dma_buffer_offset: dma buffer offset
+ * @ops: operations supported on CL dma
+ * @wait_queue: wait queue to wake for wake event
+ * @wake_status: DMA wake status
+ * @wait_condition: condition to wait on wait queue
+ * @cl_dma_lock: for synchronized access to cldma
+ */
+struct skl_cl_dev {
+	struct snd_dma_buffer dmab_data;
+	struct snd_dma_buffer dmab_bdl;
+
+	unsigned int bufsize;
+	unsigned int frags;
+
+	unsigned int curr_spib_pos;
+	unsigned int dma_buffer_offset;
+	struct skl_cl_dev_ops ops;
+
+	wait_queue_head_t wait_queue;
+	int wake_status;
+	bool wait_condition;
+};
+
+#endif /* SKL_SST_CLDMA_H_ */
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
new file mode 100644
index 0000000..94875b0
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -0,0 +1,342 @@
+/*
+ * skl-sst-dsp.c - SKL SST library generic function
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <sound/pcm.h>
+
+#include "../common/sst-dsp.h"
+#include "../common/sst-ipc.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-ipc.h"
+
+/* various timeout values */
+#define SKL_DSP_PU_TO		50
+#define SKL_DSP_PD_TO		50
+#define SKL_DSP_RESET_TO	50
+
+void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
+{
+	mutex_lock(&ctx->mutex);
+	ctx->sst_state = state;
+	mutex_unlock(&ctx->mutex);
+}
+
+static int skl_dsp_core_set_reset_state(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK,
+			SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK));
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CRST_MASK,
+			SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK),
+			SKL_DSP_RESET_TO,
+			"Set reset");
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) !=
+				SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) {
+		dev_err(ctx->dev, "Set reset state failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_unset_reset_state(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "In %s\n", __func__);
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+					SKL_ADSPCS_CRST_MASK, 0);
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CRST_MASK,
+			0,
+			SKL_DSP_RESET_TO,
+			"Unset reset");
+
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) != 0) {
+		dev_err(ctx->dev, "Unset reset state failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static bool is_skl_dsp_core_enable(struct sst_dsp  *ctx)
+{
+	int val;
+	bool is_enable;
+
+	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
+
+	is_enable = ((val & SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) &&
+			(val & SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK)) &&
+			!(val & SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) &&
+			!(val & SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK)));
+
+	dev_dbg(ctx->dev, "DSP core is enabled=%d\n", is_enable);
+	return is_enable;
+}
+
+static int skl_dsp_reset_core(struct sst_dsp *ctx)
+{
+	/* stall core */
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+
+	/* set reset state */
+	return skl_dsp_core_set_reset_state(ctx);
+}
+
+static int skl_dsp_start_core(struct sst_dsp *ctx)
+{
+	int ret;
+
+	/* unset reset state */
+	ret = skl_dsp_core_unset_reset_state(ctx);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "dsp unset reset fails\n");
+		return ret;
+	}
+
+	/* run core */
+	dev_dbg(ctx->dev, "run core...\n");
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				~SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+
+	if (!is_skl_dsp_core_enable(ctx)) {
+		skl_dsp_reset_core(ctx);
+		dev_err(ctx->dev, "DSP core enable failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_power_up(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_SPA_MASK, SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK));
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CPA_MASK,
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK),
+			SKL_DSP_PU_TO,
+			"Power up");
+
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) !=
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) {
+		dev_err(ctx->dev, "DSP core power up failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_power_down(struct sst_dsp  *ctx)
+{
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+					SKL_ADSPCS_SPA_MASK, 0);
+
+	/* poll with timeout to check if operation successful */
+	return sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_SPA_MASK,
+			0,
+			SKL_DSP_PD_TO,
+			"Power down");
+}
+
+static int skl_dsp_enable_core(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* power up */
+	ret = skl_dsp_core_power_up(ctx);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "dsp core power up failed\n");
+		return ret;
+	}
+
+	return skl_dsp_start_core(ctx);
+}
+
+int skl_dsp_disable_core(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	ret = skl_dsp_reset_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "dsp core reset failed\n");
+		return ret;
+	}
+
+	/* power down core*/
+	ret = skl_dsp_core_power_down(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "dsp core power down failed\n");
+		return ret;
+	}
+
+	if (is_skl_dsp_core_enable(ctx)) {
+		dev_err(ctx->dev, "DSP core disable failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+int skl_dsp_boot(struct sst_dsp *ctx)
+{
+	int ret;
+
+	if (is_skl_dsp_core_enable(ctx)) {
+		dev_dbg(ctx->dev, "dsp core is already enabled, so reset the dap core\n");
+		ret = skl_dsp_reset_core(ctx);
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp reset failed\n");
+			return ret;
+		}
+
+		ret = skl_dsp_start_core(ctx);
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp start failed\n");
+			return ret;
+		}
+	} else {
+		dev_dbg(ctx->dev, "disable and enable to make sure DSP is invalid state\n");
+		ret = skl_dsp_disable_core(ctx);
+
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp disable core failes\n");
+			return ret;
+		}
+		ret = skl_dsp_enable_core(ctx);
+	}
+
+	return ret;
+}
+
+irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
+{
+	struct sst_dsp *ctx = dev_id;
+	u32 val;
+	irqreturn_t result = IRQ_NONE;
+
+	spin_lock(&ctx->spinlock);
+
+	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
+	ctx->intr_status = val;
+
+	if (val & SKL_ADSPIS_IPC) {
+		skl_ipc_int_disable(ctx);
+		result = IRQ_WAKE_THREAD;
+	}
+
+	if (val & SKL_ADSPIS_CL_DMA) {
+		skl_cldma_int_disable(ctx);
+		result = IRQ_WAKE_THREAD;
+	}
+
+	spin_unlock(&ctx->spinlock);
+
+	return result;
+}
+
+int skl_dsp_wake(struct sst_dsp *ctx)
+{
+	return ctx->fw_ops.set_state_D0(ctx);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_wake);
+
+int skl_dsp_sleep(struct sst_dsp *ctx)
+{
+	return ctx->fw_ops.set_state_D3(ctx);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_sleep);
+
+struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
+		struct sst_dsp_device *sst_dev, int irq)
+{
+	int ret;
+	struct sst_dsp *sst;
+
+	sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
+	if (sst == NULL)
+		return NULL;
+
+	spin_lock_init(&sst->spinlock);
+	mutex_init(&sst->mutex);
+	sst->dev = dev;
+	sst->sst_dev = sst_dev;
+	sst->irq = irq;
+	sst->ops = sst_dev->ops;
+	sst->thread_context = sst_dev->thread_context;
+
+	/* Initialise SST Audio DSP */
+	if (sst->ops->init) {
+		ret = sst->ops->init(sst, NULL);
+		if (ret < 0)
+			return NULL;
+	}
+
+	/* Register the ISR */
+	ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
+		sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
+	if (ret) {
+		dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
+			       sst->irq);
+		return NULL;
+	}
+
+	return sst;
+}
+
+void skl_dsp_free(struct sst_dsp *dsp)
+{
+	skl_ipc_int_disable(dsp);
+
+	free_irq(dsp->irq, dsp);
+	skl_dsp_disable_core(dsp);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_free);
+
+bool is_skl_dsp_running(struct sst_dsp *ctx)
+{
+	return (ctx->sst_state == SKL_DSP_RUNNING);
+}
+EXPORT_SYMBOL_GPL(is_skl_dsp_running);
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
new file mode 100644
index 0000000..6bfcef4
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -0,0 +1,145 @@
+/*
+ * Skylake SST DSP Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __SKL_SST_DSP_H__
+#define __SKL_SST_DSP_H__
+
+#include <linux/interrupt.h>
+#include <sound/memalloc.h>
+#include "skl-sst-cldma.h"
+
+struct sst_dsp;
+struct skl_sst;
+struct sst_dsp_device;
+
+/* Intel HD Audio General DSP Registers */
+#define SKL_ADSP_GEN_BASE		0x0
+#define SKL_ADSP_REG_ADSPCS		(SKL_ADSP_GEN_BASE + 0x04)
+#define SKL_ADSP_REG_ADSPIC		(SKL_ADSP_GEN_BASE + 0x08)
+#define SKL_ADSP_REG_ADSPIS		(SKL_ADSP_GEN_BASE + 0x0C)
+#define SKL_ADSP_REG_ADSPIC2		(SKL_ADSP_GEN_BASE + 0x10)
+#define SKL_ADSP_REG_ADSPIS2		(SKL_ADSP_GEN_BASE + 0x14)
+
+/* Intel HD Audio Inter-Processor Communication Registers */
+#define SKL_ADSP_IPC_BASE		0x40
+#define SKL_ADSP_REG_HIPCT		(SKL_ADSP_IPC_BASE + 0x00)
+#define SKL_ADSP_REG_HIPCTE		(SKL_ADSP_IPC_BASE + 0x04)
+#define SKL_ADSP_REG_HIPCI		(SKL_ADSP_IPC_BASE + 0x08)
+#define SKL_ADSP_REG_HIPCIE		(SKL_ADSP_IPC_BASE + 0x0C)
+#define SKL_ADSP_REG_HIPCCTL		(SKL_ADSP_IPC_BASE + 0x10)
+
+/*  HIPCI */
+#define SKL_ADSP_REG_HIPCI_BUSY		BIT(31)
+
+/* HIPCIE */
+#define SKL_ADSP_REG_HIPCIE_DONE	BIT(30)
+
+/* HIPCCTL */
+#define SKL_ADSP_REG_HIPCCTL_DONE	BIT(1)
+#define SKL_ADSP_REG_HIPCCTL_BUSY	BIT(0)
+
+/* HIPCT */
+#define SKL_ADSP_REG_HIPCT_BUSY		BIT(31)
+
+/* Intel HD Audio SRAM Window 1 */
+#define SKL_ADSP_SRAM1_BASE		0xA000
+
+#define SKL_ADSP_MMIO_LEN		0x10000
+
+#define SKL_ADSP_W0_STAT_SZ		0x800
+
+#define SKL_ADSP_W0_UP_SZ		0x800
+
+#define SKL_ADSP_W1_SZ			0x1000
+
+#define SKL_FW_STS_MASK			0xf
+
+#define SKL_FW_INIT			0x1
+#define SKL_FW_RFW_START		0xf
+
+#define SKL_ADSPIC_IPC			1
+#define SKL_ADSPIS_IPC			1
+
+/* ADSPCS - Audio DSP Control & Status */
+#define SKL_DSP_CORES		1
+#define SKL_DSP_CORE0_MASK	1
+#define SKL_DSP_CORES_MASK	((1 << SKL_DSP_CORES) - 1)
+
+/* Core Reset - asserted high */
+#define SKL_ADSPCS_CRST_SHIFT	0
+#define SKL_ADSPCS_CRST_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_CRST_SHIFT)
+#define SKL_ADSPCS_CRST(x)	((x << SKL_ADSPCS_CRST_SHIFT) & SKL_ADSPCS_CRST_MASK)
+
+/* Core run/stall - when set to '1' core is stalled */
+#define SKL_ADSPCS_CSTALL_SHIFT	8
+#define SKL_ADSPCS_CSTALL_MASK	(SKL_DSP_CORES_MASK <<	\
+					SKL_ADSPCS_CSTALL_SHIFT)
+#define SKL_ADSPCS_CSTALL(x)	((x << SKL_ADSPCS_CSTALL_SHIFT) &	\
+				SKL_ADSPCS_CSTALL_MASK)
+
+/* Set Power Active - when set to '1' turn cores on */
+#define SKL_ADSPCS_SPA_SHIFT	16
+#define SKL_ADSPCS_SPA_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_SPA_SHIFT)
+#define SKL_ADSPCS_SPA(x)	((x << SKL_ADSPCS_SPA_SHIFT) & SKL_ADSPCS_SPA_MASK)
+
+/* Current Power Active - power status of cores, set by hardware */
+#define SKL_ADSPCS_CPA_SHIFT	24
+#define SKL_ADSPCS_CPA_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_CPA_SHIFT)
+#define SKL_ADSPCS_CPA(x)	((x << SKL_ADSPCS_CPA_SHIFT) & SKL_ADSPCS_CPA_MASK)
+
+#define SST_DSP_POWER_D0	0x0  /* full On */
+#define SST_DSP_POWER_D3	0x3  /* Off */
+
+enum skl_dsp_states {
+	SKL_DSP_RUNNING = 1,
+	SKL_DSP_RESET,
+};
+
+struct skl_dsp_fw_ops {
+	int (*load_fw)(struct sst_dsp  *ctx);
+	/* FW module parser/loader */
+	int (*parse_fw)(struct sst_dsp *ctx);
+	int (*set_state_D0)(struct sst_dsp *ctx);
+	int (*set_state_D3)(struct sst_dsp *ctx);
+	unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
+};
+
+struct skl_dsp_loader_ops {
+	int (*alloc_dma_buf)(struct device *dev,
+		struct snd_dma_buffer *dmab, size_t size);
+	int (*free_dma_buf)(struct device *dev,
+		struct snd_dma_buffer *dmab);
+};
+
+void skl_cldma_process_intr(struct sst_dsp *ctx);
+void skl_cldma_int_disable(struct sst_dsp *ctx);
+int skl_cldma_prepare(struct sst_dsp *ctx);
+
+void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
+struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
+		struct sst_dsp_device *sst_dev, int irq);
+int skl_dsp_disable_core(struct sst_dsp  *ctx);
+bool is_skl_dsp_running(struct sst_dsp *ctx);
+irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
+int skl_dsp_wake(struct sst_dsp *ctx);
+int skl_dsp_sleep(struct sst_dsp *ctx);
+void skl_dsp_free(struct sst_dsp *dsp);
+
+int skl_dsp_boot(struct sst_dsp *ctx);
+int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+		struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp);
+void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
+
+#endif /*__SKL_SST_DSP_H__*/
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
new file mode 100644
index 0000000..937a0a3
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -0,0 +1,771 @@
+/*
+ * skl-sst-ipc.c - Intel skl IPC Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-dsp.h"
+#include "skl-sst-ipc.h"
+
+
+#define IPC_IXC_STATUS_BITS		24
+
+/* Global Message - Generic */
+#define IPC_GLB_TYPE_SHIFT		24
+#define IPC_GLB_TYPE_MASK		(0xf << IPC_GLB_TYPE_SHIFT)
+#define IPC_GLB_TYPE(x)			((x) << IPC_GLB_TYPE_SHIFT)
+
+/* Global Message - Reply */
+#define IPC_GLB_REPLY_STATUS_SHIFT	24
+#define IPC_GLB_REPLY_STATUS_MASK	((0x1 << IPC_GLB_REPLY_STATUS_SHIFT) - 1)
+#define IPC_GLB_REPLY_STATUS(x)		((x) << IPC_GLB_REPLY_STATUS_SHIFT)
+
+#define IPC_TIMEOUT_MSECS		3000
+
+#define IPC_EMPTY_LIST_SIZE		8
+
+#define IPC_MSG_TARGET_SHIFT		30
+#define IPC_MSG_TARGET_MASK		0x1
+#define IPC_MSG_TARGET(x)		(((x) & IPC_MSG_TARGET_MASK) \
+					<< IPC_MSG_TARGET_SHIFT)
+
+#define IPC_MSG_DIR_SHIFT		29
+#define IPC_MSG_DIR_MASK		0x1
+#define IPC_MSG_DIR(x)			(((x) & IPC_MSG_DIR_MASK) \
+					<< IPC_MSG_DIR_SHIFT)
+/* Global Notification Message */
+#define IPC_GLB_NOTIFY_TYPE_SHIFT	16
+#define IPC_GLB_NOTIFY_TYPE_MASK	0xFF
+#define IPC_GLB_NOTIFY_TYPE(x)		(((x) >> IPC_GLB_NOTIFY_TYPE_SHIFT) \
+					& IPC_GLB_NOTIFY_TYPE_MASK)
+
+#define IPC_GLB_NOTIFY_MSG_TYPE_SHIFT	24
+#define IPC_GLB_NOTIFY_MSG_TYPE_MASK	0x1F
+#define IPC_GLB_NOTIFY_MSG_TYPE(x)	(((x) >> IPC_GLB_NOTIFY_MSG_TYPE_SHIFT)	\
+						& IPC_GLB_NOTIFY_MSG_TYPE_MASK)
+
+#define IPC_GLB_NOTIFY_RSP_SHIFT	29
+#define IPC_GLB_NOTIFY_RSP_MASK		0x1
+#define IPC_GLB_NOTIFY_RSP_TYPE(x)	(((x) >> IPC_GLB_NOTIFY_RSP_SHIFT) \
+					& IPC_GLB_NOTIFY_RSP_MASK)
+
+/* Pipeline operations */
+
+/* Create pipeline message */
+#define IPC_PPL_MEM_SIZE_SHIFT		0
+#define IPC_PPL_MEM_SIZE_MASK		0x7FF
+#define IPC_PPL_MEM_SIZE(x)		(((x) & IPC_PPL_MEM_SIZE_MASK) \
+					<< IPC_PPL_MEM_SIZE_SHIFT)
+
+#define IPC_PPL_TYPE_SHIFT		11
+#define IPC_PPL_TYPE_MASK		0x1F
+#define IPC_PPL_TYPE(x)			(((x) & IPC_PPL_TYPE_MASK) \
+					<< IPC_PPL_TYPE_SHIFT)
+
+#define IPC_INSTANCE_ID_SHIFT		16
+#define IPC_INSTANCE_ID_MASK		0xFF
+#define IPC_INSTANCE_ID(x)		(((x) & IPC_INSTANCE_ID_MASK) \
+					<< IPC_INSTANCE_ID_SHIFT)
+
+/* Set pipeline state message */
+#define IPC_PPL_STATE_SHIFT		0
+#define IPC_PPL_STATE_MASK		0x1F
+#define IPC_PPL_STATE(x)		(((x) & IPC_PPL_STATE_MASK) \
+					<< IPC_PPL_STATE_SHIFT)
+
+/* Module operations primary register */
+#define IPC_MOD_ID_SHIFT		0
+#define IPC_MOD_ID_MASK		0xFFFF
+#define IPC_MOD_ID(x)		(((x) & IPC_MOD_ID_MASK) \
+					<< IPC_MOD_ID_SHIFT)
+
+#define IPC_MOD_INSTANCE_ID_SHIFT	16
+#define IPC_MOD_INSTANCE_ID_MASK	0xFF
+#define IPC_MOD_INSTANCE_ID(x)	(((x) & IPC_MOD_INSTANCE_ID_MASK) \
+					<< IPC_MOD_INSTANCE_ID_SHIFT)
+
+/* Init instance message extension register */
+#define IPC_PARAM_BLOCK_SIZE_SHIFT	0
+#define IPC_PARAM_BLOCK_SIZE_MASK	0xFFFF
+#define IPC_PARAM_BLOCK_SIZE(x)		(((x) & IPC_PARAM_BLOCK_SIZE_MASK) \
+					<< IPC_PARAM_BLOCK_SIZE_SHIFT)
+
+#define IPC_PPL_INSTANCE_ID_SHIFT	16
+#define IPC_PPL_INSTANCE_ID_MASK	0xFF
+#define IPC_PPL_INSTANCE_ID(x)		(((x) & IPC_PPL_INSTANCE_ID_MASK) \
+					<< IPC_PPL_INSTANCE_ID_SHIFT)
+
+#define IPC_CORE_ID_SHIFT		24
+#define IPC_CORE_ID_MASK		0x1F
+#define IPC_CORE_ID(x)			(((x) & IPC_CORE_ID_MASK) \
+					<< IPC_CORE_ID_SHIFT)
+
+/* Bind/Unbind message extension register */
+#define IPC_DST_MOD_ID_SHIFT		0
+#define IPC_DST_MOD_ID(x)		(((x) & IPC_MOD_ID_MASK) \
+					<< IPC_DST_MOD_ID_SHIFT)
+
+#define IPC_DST_MOD_INSTANCE_ID_SHIFT 16
+#define IPC_DST_MOD_INSTANCE_ID(x)	(((x) & IPC_MOD_INSTANCE_ID_MASK) \
+					<< IPC_DST_MOD_INSTANCE_ID_SHIFT)
+
+#define IPC_DST_QUEUE_SHIFT		24
+#define IPC_DST_QUEUE_MASK		0x7
+#define IPC_DST_QUEUE(x)		(((x) & IPC_DST_QUEUE_MASK) \
+					<< IPC_DST_QUEUE_SHIFT)
+
+#define IPC_SRC_QUEUE_SHIFT		27
+#define IPC_SRC_QUEUE_MASK		0x7
+#define IPC_SRC_QUEUE(x)		(((x) & IPC_SRC_QUEUE_MASK) \
+					<< IPC_SRC_QUEUE_SHIFT)
+
+/* Save pipeline messgae extension register */
+#define IPC_DMA_ID_SHIFT		0
+#define IPC_DMA_ID_MASK			0x1F
+#define IPC_DMA_ID(x)			(((x) & IPC_DMA_ID_MASK) \
+					<< IPC_DMA_ID_SHIFT)
+/* Large Config message extension register */
+#define IPC_DATA_OFFSET_SZ_SHIFT	0
+#define IPC_DATA_OFFSET_SZ_MASK		0xFFFFF
+#define IPC_DATA_OFFSET_SZ(x)		(((x) & IPC_DATA_OFFSET_SZ_MASK) \
+					<< IPC_DATA_OFFSET_SZ_SHIFT)
+#define IPC_DATA_OFFSET_SZ_CLEAR	~(IPC_DATA_OFFSET_SZ_MASK \
+					  << IPC_DATA_OFFSET_SZ_SHIFT)
+
+#define IPC_LARGE_PARAM_ID_SHIFT	20
+#define IPC_LARGE_PARAM_ID_MASK		0xFF
+#define IPC_LARGE_PARAM_ID(x)		(((x) & IPC_LARGE_PARAM_ID_MASK) \
+					<< IPC_LARGE_PARAM_ID_SHIFT)
+
+#define IPC_FINAL_BLOCK_SHIFT		28
+#define IPC_FINAL_BLOCK_MASK		0x1
+#define IPC_FINAL_BLOCK(x)		(((x) & IPC_FINAL_BLOCK_MASK) \
+					<< IPC_FINAL_BLOCK_SHIFT)
+
+#define IPC_INITIAL_BLOCK_SHIFT		29
+#define IPC_INITIAL_BLOCK_MASK		0x1
+#define IPC_INITIAL_BLOCK(x)		(((x) & IPC_INITIAL_BLOCK_MASK) \
+					<< IPC_INITIAL_BLOCK_SHIFT)
+#define IPC_INITIAL_BLOCK_CLEAR		~(IPC_INITIAL_BLOCK_MASK \
+					  << IPC_INITIAL_BLOCK_SHIFT)
+
+enum skl_ipc_msg_target {
+	IPC_FW_GEN_MSG = 0,
+	IPC_MOD_MSG = 1
+};
+
+enum skl_ipc_msg_direction {
+	IPC_MSG_REQUEST = 0,
+	IPC_MSG_REPLY = 1
+};
+
+/* Global Message Types */
+enum skl_ipc_glb_type {
+	IPC_GLB_GET_FW_VERSION = 0, /* Retrieves firmware version */
+	IPC_GLB_LOAD_MULTIPLE_MODS = 15,
+	IPC_GLB_UNLOAD_MULTIPLE_MODS = 16,
+	IPC_GLB_CREATE_PPL = 17,
+	IPC_GLB_DELETE_PPL = 18,
+	IPC_GLB_SET_PPL_STATE = 19,
+	IPC_GLB_GET_PPL_STATE = 20,
+	IPC_GLB_GET_PPL_CONTEXT_SIZE = 21,
+	IPC_GLB_SAVE_PPL = 22,
+	IPC_GLB_RESTORE_PPL = 23,
+	IPC_GLB_NOTIFY = 26,
+	IPC_GLB_MAX_IPC_MSG_NUMBER = 31 /* Maximum message number */
+};
+
+enum skl_ipc_glb_reply {
+	IPC_GLB_REPLY_SUCCESS = 0,
+
+	IPC_GLB_REPLY_UNKNOWN_MSG_TYPE = 1,
+	IPC_GLB_REPLY_ERROR_INVALID_PARAM = 2,
+
+	IPC_GLB_REPLY_BUSY = 3,
+	IPC_GLB_REPLY_PENDING = 4,
+	IPC_GLB_REPLY_FAILURE = 5,
+	IPC_GLB_REPLY_INVALID_REQUEST = 6,
+
+	IPC_GLB_REPLY_OUT_OF_MEMORY = 7,
+	IPC_GLB_REPLY_OUT_OF_MIPS = 8,
+
+	IPC_GLB_REPLY_INVALID_RESOURCE_ID = 9,
+	IPC_GLB_REPLY_INVALID_RESOURCE_STATE = 10,
+
+	IPC_GLB_REPLY_MOD_MGMT_ERROR = 100,
+	IPC_GLB_REPLY_MOD_LOAD_CL_FAILED = 101,
+	IPC_GLB_REPLY_MOD_LOAD_INVALID_HASH = 102,
+
+	IPC_GLB_REPLY_MOD_UNLOAD_INST_EXIST = 103,
+	IPC_GLB_REPLY_MOD_NOT_INITIALIZED = 104,
+
+	IPC_GLB_REPLY_INVALID_CONFIG_PARAM_ID = 120,
+	IPC_GLB_REPLY_INVALID_CONFIG_DATA_LEN = 121,
+	IPC_GLB_REPLY_GATEWAY_NOT_INITIALIZED = 140,
+	IPC_GLB_REPLY_GATEWAY_NOT_EXIST = 141,
+
+	IPC_GLB_REPLY_PPL_NOT_INITIALIZED = 160,
+	IPC_GLB_REPLY_PPL_NOT_EXIST = 161,
+	IPC_GLB_REPLY_PPL_SAVE_FAILED = 162,
+	IPC_GLB_REPLY_PPL_RESTORE_FAILED = 163,
+
+	IPC_MAX_STATUS = ((1<<IPC_IXC_STATUS_BITS)-1)
+};
+
+enum skl_ipc_notification_type {
+	IPC_GLB_NOTIFY_GLITCH = 0,
+	IPC_GLB_NOTIFY_OVERRUN = 1,
+	IPC_GLB_NOTIFY_UNDERRUN = 2,
+	IPC_GLB_NOTIFY_END_STREAM = 3,
+	IPC_GLB_NOTIFY_PHRASE_DETECTED = 4,
+	IPC_GLB_NOTIFY_RESOURCE_EVENT = 5,
+	IPC_GLB_NOTIFY_LOG_BUFFER_STATUS = 6,
+	IPC_GLB_NOTIFY_TIMESTAMP_CAPTURED = 7,
+	IPC_GLB_NOTIFY_FW_READY = 8
+};
+
+/* Module Message Types */
+enum skl_ipc_module_msg {
+	IPC_MOD_INIT_INSTANCE = 0,
+	IPC_MOD_CONFIG_GET = 1,
+	IPC_MOD_CONFIG_SET = 2,
+	IPC_MOD_LARGE_CONFIG_GET = 3,
+	IPC_MOD_LARGE_CONFIG_SET = 4,
+	IPC_MOD_BIND = 5,
+	IPC_MOD_UNBIND = 6,
+	IPC_MOD_SET_DX = 7
+};
+
+static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
+		size_t tx_size)
+{
+	if (tx_size)
+		memcpy(msg->tx_data, tx_data, tx_size);
+}
+
+static bool skl_ipc_is_dsp_busy(struct sst_dsp *dsp)
+{
+	u32 hipci;
+
+	hipci = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCI);
+	return (hipci & SKL_ADSP_REG_HIPCI_BUSY);
+}
+
+/* Lock to be held by caller */
+static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
+{
+	struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->header);
+
+	if (msg->tx_size)
+		sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
+	sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCIE,
+						header->extension);
+	sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCI,
+		header->primary | SKL_ADSP_REG_HIPCI_BUSY);
+}
+
+static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
+				u64 ipc_header)
+{
+	struct ipc_message *msg =  NULL;
+	struct skl_ipc_header *header = (struct skl_ipc_header *)(&ipc_header);
+
+	if (list_empty(&ipc->rx_list)) {
+		dev_err(ipc->dev, "ipc: rx list is empty but received 0x%x\n",
+			header->primary);
+		goto out;
+	}
+
+	msg = list_first_entry(&ipc->rx_list, struct ipc_message, list);
+
+out:
+	return msg;
+
+}
+
+static int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
+		struct skl_ipc_header header)
+{
+	struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+
+	if (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
+		switch (IPC_GLB_NOTIFY_TYPE(header.primary)) {
+
+		case IPC_GLB_NOTIFY_UNDERRUN:
+			dev_err(ipc->dev, "FW Underrun %x\n", header.primary);
+			break;
+
+		case IPC_GLB_NOTIFY_RESOURCE_EVENT:
+			dev_err(ipc->dev, "MCPS Budget Violation: %x\n",
+						header.primary);
+			break;
+
+		case IPC_GLB_NOTIFY_FW_READY:
+			skl->boot_complete = true;
+			wake_up(&skl->boot_wait);
+			break;
+
+		default:
+			dev_err(ipc->dev, "ipc: Unhandled error msg=%x",
+						header.primary);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
+		struct skl_ipc_header header)
+{
+	struct ipc_message *msg;
+	u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
+	u64 *ipc_header = (u64 *)(&header);
+
+	msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+	if (msg == NULL) {
+		dev_dbg(ipc->dev, "ipc: rx list is empty\n");
+		return;
+	}
+
+	/* first process the header */
+	switch (reply) {
+	case IPC_GLB_REPLY_SUCCESS:
+		dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary);
+		break;
+
+	case IPC_GLB_REPLY_OUT_OF_MEMORY:
+		dev_err(ipc->dev, "ipc fw reply: %x: no memory\n", header.primary);
+		msg->errno = -ENOMEM;
+		break;
+
+	case IPC_GLB_REPLY_BUSY:
+		dev_err(ipc->dev, "ipc fw reply: %x: Busy\n", header.primary);
+		msg->errno = -EBUSY;
+		break;
+
+	default:
+		dev_err(ipc->dev, "Unknown ipc reply: 0x%x", reply);
+		msg->errno = -EINVAL;
+		break;
+	}
+
+	if (reply != IPC_GLB_REPLY_SUCCESS) {
+		dev_err(ipc->dev, "ipc FW reply: reply=%d", reply);
+		dev_err(ipc->dev, "FW Error Code: %u\n",
+			ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
+	}
+
+	list_del(&msg->list);
+	sst_ipc_tx_msg_reply_complete(ipc, msg);
+}
+
+irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
+{
+	struct sst_dsp *dsp = context;
+	struct skl_sst *skl = sst_dsp_get_thread_context(dsp);
+	struct sst_generic_ipc *ipc = &skl->ipc;
+	struct skl_ipc_header header = {0};
+	u32 hipcie, hipct, hipcte;
+	int ipc_irq = 0;
+
+	if (dsp->intr_status & SKL_ADSPIS_CL_DMA)
+		skl_cldma_process_intr(dsp);
+
+	/* Here we handle IPC interrupts only */
+	if (!(dsp->intr_status & SKL_ADSPIS_IPC))
+		return IRQ_NONE;
+
+	hipcie = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCIE);
+	hipct = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCT);
+
+	/* reply message from DSP */
+	if (hipcie & SKL_ADSP_REG_HIPCIE_DONE) {
+		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
+			SKL_ADSP_REG_HIPCCTL_DONE, 0);
+
+		/* clear DONE bit - tell DSP we have completed the operation */
+		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCIE,
+			SKL_ADSP_REG_HIPCIE_DONE, SKL_ADSP_REG_HIPCIE_DONE);
+
+		ipc_irq = 1;
+
+		/* unmask Done interrupt */
+		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
+			SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
+	}
+
+	/* New message from DSP */
+	if (hipct & SKL_ADSP_REG_HIPCT_BUSY) {
+		hipcte = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCTE);
+		header.primary = hipct;
+		header.extension = hipcte;
+		dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
+						header.primary);
+		dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
+						header.extension);
+
+		if (IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
+			/* Handle Immediate reply from DSP Core */
+			skl_ipc_process_reply(ipc, header);
+		} else {
+			dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
+			skl_ipc_process_notification(ipc, header);
+		}
+		/* clear  busy interrupt */
+		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCT,
+			SKL_ADSP_REG_HIPCT_BUSY, SKL_ADSP_REG_HIPCT_BUSY);
+		ipc_irq = 1;
+	}
+
+	if (ipc_irq == 0)
+		return IRQ_NONE;
+
+	skl_ipc_int_enable(dsp);
+
+	/* continue to send any remaining messages... */
+	queue_kthread_work(&ipc->kworker, &ipc->kwork);
+
+	return IRQ_HANDLED;
+}
+
+void skl_ipc_int_enable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_ADSPIC,
+			SKL_ADSPIC_IPC, SKL_ADSPIC_IPC);
+}
+
+void skl_ipc_int_disable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
+			SKL_ADSPIC_IPC, 0);
+}
+
+void skl_ipc_op_int_enable(struct sst_dsp *ctx)
+{
+	/* enable IPC DONE interrupt */
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
+
+	/* Enable IPC BUSY interrupt */
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_BUSY, SKL_ADSP_REG_HIPCCTL_BUSY);
+}
+
+bool skl_ipc_int_status(struct sst_dsp *ctx)
+{
+	return sst_dsp_shim_read_unlocked(ctx,
+			SKL_ADSP_REG_ADSPIS) & SKL_ADSPIS_IPC;
+}
+
+int skl_ipc_init(struct device *dev, struct skl_sst *skl)
+{
+	struct sst_generic_ipc *ipc;
+	int err;
+
+	ipc = &skl->ipc;
+	ipc->dsp = skl->dsp;
+	ipc->dev = dev;
+
+	ipc->tx_data_max_size = SKL_ADSP_W1_SZ;
+	ipc->rx_data_max_size = SKL_ADSP_W0_UP_SZ;
+
+	err = sst_ipc_init(ipc);
+	if (err)
+		return err;
+
+	ipc->ops.tx_msg = skl_ipc_tx_msg;
+	ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
+	ipc->ops.is_dsp_busy = skl_ipc_is_dsp_busy;
+
+	return 0;
+}
+
+void skl_ipc_free(struct sst_generic_ipc *ipc)
+{
+	/* Disable IPC DONE interrupt */
+	sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_DONE, 0);
+
+	/* Disable IPC BUSY interrupt */
+	sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_BUSY, 0);
+
+	sst_ipc_fini(ipc);
+}
+
+int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_CREATE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+	header.primary |= IPC_PPL_TYPE(ppl_type);
+	header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: create pipeline fail, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_create_pipeline);
+
+int skl_ipc_delete_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_DELETE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: delete pipeline failed, err %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_delete_pipeline);
+
+int skl_ipc_set_pipeline_state(struct sst_generic_ipc *ipc,
+		u8 instance_id, enum skl_ipc_pipeline_state state)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_SET_PPL_STATE);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+	header.primary |= IPC_PPL_STATE(state);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: set pipeline state failed, err: %d\n", ret);
+		return ret;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_pipeline_state);
+
+int
+skl_ipc_save_pipeline(struct sst_generic_ipc *ipc, u8 instance_id, int dma_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_SAVE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	header.extension = IPC_DMA_ID(dma_id);
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: save pipeline failed, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_save_pipeline);
+
+int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_RESTORE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: restore  pipeline failed, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_restore_pipeline);
+
+int skl_ipc_set_dx(struct sst_generic_ipc *ipc, u8 instance_id,
+		u16 module_id, struct skl_ipc_dxstate_info *dx)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_DX);
+	header.primary |= IPC_MOD_INSTANCE_ID(instance_id);
+	header.primary |= IPC_MOD_ID(module_id);
+
+	dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
+			 header.primary, header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
+				dx, sizeof(dx), NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: set dx failed, err %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_dx);
+
+int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
+		struct skl_ipc_init_instance_msg *msg, void *param_data)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+	u32 *buffer = (u32 *)param_data;
+	 /* param_block_size must be in dwords */
+	u16 param_block_size = msg->param_data_size / sizeof(u32);
+
+	print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
+		16, 4, buffer, param_block_size, false);
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_INIT_INSTANCE);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_CORE_ID(msg->core_id);
+	header.extension |= IPC_PPL_INSTANCE_ID(msg->ppl_instance_id);
+	header.extension |= IPC_PARAM_BLOCK_SIZE(param_block_size);
+
+	dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
+			 header.primary, header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, param_data,
+			msg->param_data_size, NULL, 0);
+
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: init instance failed\n");
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_init_instance);
+
+int skl_ipc_bind_unbind(struct sst_generic_ipc *ipc,
+		struct skl_ipc_bind_unbind_msg *msg)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	u8 bind_unbind = msg->bind ? IPC_MOD_BIND : IPC_MOD_UNBIND;
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(bind_unbind);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_DST_MOD_ID(msg->dst_module_id);
+	header.extension |= IPC_DST_MOD_INSTANCE_ID(msg->dst_instance_id);
+	header.extension |= IPC_DST_QUEUE(msg->dst_queue);
+	header.extension |= IPC_SRC_QUEUE(msg->src_queue);
+
+	dev_dbg(ipc->dev, "In %s hdr=%x ext=%x\n", __func__, header.primary,
+			 header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: bind/unbind faileden");
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_bind_unbind);
+
+int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
+		struct skl_ipc_large_config_msg *msg, u32 *param)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret = 0;
+	size_t sz_remaining, tx_size, data_offset;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_LARGE_CONFIG_SET);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_DATA_OFFSET_SZ(msg->param_data_size);
+	header.extension |= IPC_LARGE_PARAM_ID(msg->large_param_id);
+	header.extension |= IPC_FINAL_BLOCK(0);
+	header.extension |= IPC_INITIAL_BLOCK(1);
+
+	sz_remaining = msg->param_data_size;
+	data_offset = 0;
+	while (sz_remaining != 0) {
+		tx_size = sz_remaining > SKL_ADSP_W1_SZ
+				? SKL_ADSP_W1_SZ : sz_remaining;
+		if (tx_size == sz_remaining)
+			header.extension |= IPC_FINAL_BLOCK(1);
+
+		dev_dbg(ipc->dev, "In %s primary=%#x ext=%#x\n", __func__,
+			header.primary, header.extension);
+		dev_dbg(ipc->dev, "transmitting offset: %#x, size: %#x\n",
+			(unsigned)data_offset, (unsigned)tx_size);
+		ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
+					  ((char *)param) + data_offset,
+					  tx_size, NULL, 0);
+		if (ret < 0) {
+			dev_err(ipc->dev,
+				"ipc: set large config fail, err: %d\n", ret);
+			return ret;
+		}
+		sz_remaining -= tx_size;
+		data_offset = msg->param_data_size - sz_remaining;
+
+		/* clear the fields */
+		header.extension &= IPC_INITIAL_BLOCK_CLEAR;
+		header.extension &= IPC_DATA_OFFSET_SZ_CLEAR;
+		/* fill the fields */
+		header.extension |= IPC_INITIAL_BLOCK(0);
+		header.extension |= IPC_DATA_OFFSET_SZ(data_offset);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_large_config);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
new file mode 100644
index 0000000..9f5f672
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -0,0 +1,125 @@
+/*
+ * Intel SKL IPC Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __SKL_IPC_H
+#define __SKL_IPC_H
+
+#include <linux/kthread.h>
+#include <linux/irqreturn.h>
+#include "../common/sst-ipc.h"
+
+struct sst_dsp;
+struct skl_sst;
+struct sst_generic_ipc;
+
+enum skl_ipc_pipeline_state {
+	PPL_INVALID_STATE =	0,
+	PPL_UNINITIALIZED =	1,
+	PPL_RESET =		2,
+	PPL_PAUSED =		3,
+	PPL_RUNNING =		4,
+	PPL_ERROR_STOP =	5,
+	PPL_SAVED =		6,
+	PPL_RESTORED =		7
+};
+
+struct skl_ipc_dxstate_info {
+	u32 core_mask;
+	u32 dx_mask;
+};
+
+struct skl_ipc_header {
+	u32 primary;
+	u32 extension;
+};
+
+struct skl_sst {
+	struct device *dev;
+	struct sst_dsp *dsp;
+
+	/* boot */
+	wait_queue_head_t boot_wait;
+	bool boot_complete;
+
+	/* IPC messaging */
+	struct sst_generic_ipc ipc;
+};
+
+struct skl_ipc_init_instance_msg {
+	u32 module_id;
+	u32 instance_id;
+	u16 param_data_size;
+	u8 ppl_instance_id;
+	u8 core_id;
+};
+
+struct skl_ipc_bind_unbind_msg {
+	u32 module_id;
+	u32 instance_id;
+	u32 dst_module_id;
+	u32 dst_instance_id;
+	u8 src_queue;
+	u8 dst_queue;
+	bool bind;
+};
+
+struct skl_ipc_large_config_msg {
+	u32 module_id;
+	u32 instance_id;
+	u32 large_param_id;
+	u32 param_data_size;
+};
+
+#define SKL_IPC_BOOT_MSECS		3000
+
+#define SKL_IPC_D3_MASK	0
+#define SKL_IPC_D0_MASK	3
+
+irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
+
+int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
+
+int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
+
+int skl_ipc_set_pipeline_state(struct sst_generic_ipc *sst_ipc,
+		u8 instance_id,	enum skl_ipc_pipeline_state state);
+
+int skl_ipc_save_pipeline(struct sst_generic_ipc *ipc,
+		u8 instance_id, int dma_id);
+
+int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id);
+
+int skl_ipc_init_instance(struct sst_generic_ipc *sst_ipc,
+		struct skl_ipc_init_instance_msg *msg, void *param_data);
+
+int skl_ipc_bind_unbind(struct sst_generic_ipc *sst_ipc,
+		struct skl_ipc_bind_unbind_msg *msg);
+
+int skl_ipc_set_dx(struct sst_generic_ipc *ipc,
+		u8 instance_id, u16 module_id, struct skl_ipc_dxstate_info *dx);
+
+int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
+		struct skl_ipc_large_config_msg *msg, u32 *param);
+
+void skl_ipc_int_enable(struct sst_dsp *dsp);
+void skl_ipc_op_int_enable(struct sst_dsp *ctx);
+void skl_ipc_int_disable(struct sst_dsp *dsp);
+
+bool skl_ipc_int_status(struct sst_dsp *dsp);
+void skl_ipc_free(struct sst_generic_ipc *ipc);
+int skl_ipc_init(struct device *dev, struct skl_sst *skl);
+
+#endif /* __SKL_IPC_H */
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
new file mode 100644
index 0000000..c18ea51
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -0,0 +1,280 @@
+/*
+ * skl-sst.c - HDA DSP library functions for SKL platform
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "../common/sst-ipc.h"
+#include "skl-sst-ipc.h"
+
+#define SKL_BASEFW_TIMEOUT	300
+#define SKL_INIT_TIMEOUT	1000
+
+/* Intel HD Audio SRAM Window 0*/
+#define SKL_ADSP_SRAM0_BASE	0x8000
+
+/* Firmware status window */
+#define SKL_ADSP_FW_STATUS	SKL_ADSP_SRAM0_BASE
+#define SKL_ADSP_ERROR_CODE	(SKL_ADSP_FW_STATUS + 0x4)
+
+#define SKL_INSTANCE_ID		0
+#define SKL_BASE_FW_MODULE_ID	0
+
+static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
+{
+	u32 cur_sts;
+
+	cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
+
+	return (cur_sts == status);
+}
+
+static int skl_transfer_firmware(struct sst_dsp *ctx,
+		const void *basefw, u32 base_fw_size)
+{
+	int ret = 0;
+
+	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_FW_STATUS,
+			SKL_FW_STS_MASK,
+			SKL_FW_RFW_START,
+			SKL_BASEFW_TIMEOUT,
+			"Firmware boot");
+
+	ctx->cl_dev.ops.cl_stop_dma(ctx);
+
+	return ret;
+}
+
+static int skl_load_base_firmware(struct sst_dsp *ctx)
+{
+	int ret = 0, i;
+	const struct firmware *fw = NULL;
+	struct skl_sst *skl = ctx->thread_context;
+	u32 reg;
+
+	ret = request_firmware(&fw, "dsp_fw_release.bin", ctx->dev);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Request firmware failed %d\n", ret);
+		skl_dsp_disable_core(ctx);
+		return -EIO;
+	}
+
+	/* enable Interrupt */
+	skl_ipc_int_enable(ctx);
+	skl_ipc_op_int_enable(ctx);
+
+	/* check ROM Status */
+	for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
+		if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
+			dev_dbg(ctx->dev,
+				"ROM loaded, we can continue with FW loading\n");
+			break;
+		}
+		mdelay(1);
+	}
+	if (!i) {
+		reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
+		dev_err(ctx->dev,
+			"Timeout waiting for ROM init done, reg:0x%x\n", reg);
+		ret = -EIO;
+		goto skl_load_base_firmware_failed;
+	}
+
+	ret = skl_transfer_firmware(ctx, fw->data, fw->size);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
+		goto skl_load_base_firmware_failed;
+	} else {
+		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
+					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
+		if (ret == 0) {
+			dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
+			ret = -EIO;
+			goto skl_load_base_firmware_failed;
+		}
+
+		dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
+		skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+	}
+	release_firmware(fw);
+
+	return 0;
+
+skl_load_base_firmware_failed:
+	skl_dsp_disable_core(ctx);
+	release_firmware(fw);
+	return ret;
+}
+
+static int skl_set_dsp_D0(struct sst_dsp *ctx)
+{
+	int ret;
+
+	ret = skl_load_base_firmware(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "unable to load firmware\n");
+		return ret;
+	}
+
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+
+	return ret;
+}
+
+static int skl_set_dsp_D3(struct sst_dsp *ctx)
+{
+	int ret;
+	struct skl_ipc_dxstate_info dx;
+	struct skl_sst *skl = ctx->thread_context;
+
+	dev_dbg(ctx->dev, "In %s:\n", __func__);
+	mutex_lock(&ctx->mutex);
+	if (!is_skl_dsp_running(ctx)) {
+		mutex_unlock(&ctx->mutex);
+		return 0;
+	}
+	mutex_unlock(&ctx->mutex);
+
+	dx.core_mask = SKL_DSP_CORE0_MASK;
+	dx.dx_mask = SKL_IPC_D3_MASK;
+	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set DSP to D3 state\n");
+		return ret;
+	}
+
+	ret = skl_dsp_disable_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "disable dsp core failed ret: %d\n", ret);
+		ret = -EIO;
+	}
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+
+	return ret;
+}
+
+static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
+{
+	 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
+}
+
+static struct skl_dsp_fw_ops skl_fw_ops = {
+	.set_state_D0 = skl_set_dsp_D0,
+	.set_state_D3 = skl_set_dsp_D3,
+	.load_fw = skl_load_base_firmware,
+	.get_fw_errcode = skl_get_errorcode,
+};
+
+static struct sst_ops skl_ops = {
+	.irq_handler = skl_dsp_sst_interrupt,
+	.write = sst_shim32_write,
+	.read = sst_shim32_read,
+	.ram_read = sst_memcpy_fromio_32,
+	.ram_write = sst_memcpy_toio_32,
+	.free = skl_dsp_free,
+};
+
+static struct sst_dsp_device skl_dev = {
+	.thread = skl_dsp_irq_thread_handler,
+	.ops = &skl_ops,
+};
+
+int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+		struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
+{
+	struct skl_sst *skl;
+	struct sst_dsp *sst;
+	int ret;
+
+	skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
+	if (skl == NULL)
+		return -ENOMEM;
+
+	skl->dev = dev;
+	skl_dev.thread_context = skl;
+
+	skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
+	if (!skl->dsp) {
+		dev_err(skl->dev, "%s: no device\n", __func__);
+		return -ENODEV;
+	}
+
+	sst = skl->dsp;
+
+	sst->addr.lpe = mmio_base;
+	sst->addr.shim = mmio_base;
+	sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
+			SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
+
+	sst->dsp_ops = dsp_ops;
+	sst->fw_ops = skl_fw_ops;
+
+	ret = skl_ipc_init(dev, skl);
+	if (ret)
+		return ret;
+
+	skl->boot_complete = false;
+	init_waitqueue_head(&skl->boot_wait);
+
+	ret = skl_dsp_boot(sst);
+	if (ret < 0) {
+		dev_err(skl->dev, "Boot dsp core failed ret: %d", ret);
+		goto free_ipc;
+	}
+
+	ret = skl_cldma_prepare(sst);
+	if (ret < 0) {
+		dev_err(dev, "CL dma prepare failed : %d", ret);
+		goto free_ipc;
+	}
+
+
+	ret = sst->fw_ops.load_fw(sst);
+	if (ret < 0) {
+		dev_err(dev, "Load base fw failed : %d", ret);
+		return ret;
+	}
+
+	if (dsp)
+		*dsp = skl;
+
+	return 0;
+
+free_ipc:
+	skl_ipc_free(&skl->ipc);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
+
+void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
+{
+	skl_ipc_free(&ctx->ipc);
+	ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
+	ctx->dsp->ops->free(ctx->dsp);
+}
+EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Skylake IPC driver");
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
new file mode 100644
index 0000000..8c7767b
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -0,0 +1,286 @@
+/*
+ *  skl_topology.h - Intel HDA Platform topology header file
+ *
+ *  Copyright (C) 2014-15 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SKL_TOPOLOGY_H__
+#define __SKL_TOPOLOGY_H__
+
+#include <linux/types.h>
+
+#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
+#include "skl.h"
+#include "skl-tplg-interface.h"
+
+#define BITS_PER_BYTE 8
+#define MAX_TS_GROUPS 8
+#define MAX_DMIC_TS_GROUPS 4
+#define MAX_FIXED_DMIC_PARAMS_SIZE 727
+
+/* Maximum number of coefficients up down mixer module */
+#define UP_DOWN_MIXER_MAX_COEFF		6
+
+enum skl_channel_index {
+	SKL_CHANNEL_LEFT = 0,
+	SKL_CHANNEL_RIGHT = 1,
+	SKL_CHANNEL_CENTER = 2,
+	SKL_CHANNEL_LEFT_SURROUND = 3,
+	SKL_CHANNEL_CENTER_SURROUND = 3,
+	SKL_CHANNEL_RIGHT_SURROUND = 4,
+	SKL_CHANNEL_LFE = 7,
+	SKL_CHANNEL_INVALID = 0xF,
+};
+
+enum skl_bitdepth {
+	SKL_DEPTH_8BIT = 8,
+	SKL_DEPTH_16BIT = 16,
+	SKL_DEPTH_24BIT = 24,
+	SKL_DEPTH_32BIT = 32,
+	SKL_DEPTH_INVALID
+};
+
+enum skl_interleaving {
+	/* [s1_ch1...s1_chN,...,sM_ch1...sM_chN] */
+	SKL_INTERLEAVING_PER_CHANNEL = 0,
+	/* [s1_ch1...sM_ch1,...,s1_chN...sM_chN] */
+	SKL_INTERLEAVING_PER_SAMPLE = 1,
+};
+
+enum skl_s_freq {
+	SKL_FS_8000 = 8000,
+	SKL_FS_11025 = 11025,
+	SKL_FS_12000 = 12000,
+	SKL_FS_16000 = 16000,
+	SKL_FS_22050 = 22050,
+	SKL_FS_24000 = 24000,
+	SKL_FS_32000 = 32000,
+	SKL_FS_44100 = 44100,
+	SKL_FS_48000 = 48000,
+	SKL_FS_64000 = 64000,
+	SKL_FS_88200 = 88200,
+	SKL_FS_96000 = 96000,
+	SKL_FS_128000 = 128000,
+	SKL_FS_176400 = 176400,
+	SKL_FS_192000 = 192000,
+	SKL_FS_INVALID
+};
+
+enum skl_widget_type {
+	SKL_WIDGET_VMIXER = 1,
+	SKL_WIDGET_MIXER = 2,
+	SKL_WIDGET_PGA = 3,
+	SKL_WIDGET_MUX = 4
+};
+
+struct skl_audio_data_format {
+	enum skl_s_freq s_freq;
+	enum skl_bitdepth bit_depth;
+	u32 channel_map;
+	enum skl_ch_cfg ch_cfg;
+	enum skl_interleaving interleaving;
+	u8 number_of_channels;
+	u8 valid_bit_depth;
+	u8 sample_type;
+	u8 reserved[1];
+} __packed;
+
+struct skl_base_cfg {
+	u32 cps;
+	u32 ibs;
+	u32 obs;
+	u32 is_pages;
+	struct skl_audio_data_format audio_fmt;
+};
+
+struct skl_cpr_gtw_cfg {
+	u32 node_id;
+	u32 dma_buffer_size;
+	u32 config_length;
+	/* not mandatory; required only for DMIC/I2S */
+	u32 config_data[1];
+} __packed;
+
+struct skl_cpr_cfg {
+	struct skl_base_cfg base_cfg;
+	struct skl_audio_data_format out_fmt;
+	u32 cpr_feature_mask;
+	struct skl_cpr_gtw_cfg gtw_cfg;
+} __packed;
+
+
+struct skl_src_module_cfg {
+	struct skl_base_cfg base_cfg;
+	enum skl_s_freq src_cfg;
+} __packed;
+
+struct skl_up_down_mixer_cfg {
+	struct skl_base_cfg base_cfg;
+	enum skl_ch_cfg out_ch_cfg;
+	/* This should be set to 1 if user coefficients are required */
+	u32 coeff_sel;
+	/* Pass the user coeff in this array */
+	s32 coeff[UP_DOWN_MIXER_MAX_COEFF];
+} __packed;
+
+enum skl_dma_type {
+	SKL_DMA_HDA_HOST_OUTPUT_CLASS = 0,
+	SKL_DMA_HDA_HOST_INPUT_CLASS = 1,
+	SKL_DMA_HDA_HOST_INOUT_CLASS = 2,
+	SKL_DMA_HDA_LINK_OUTPUT_CLASS = 8,
+	SKL_DMA_HDA_LINK_INPUT_CLASS = 9,
+	SKL_DMA_HDA_LINK_INOUT_CLASS = 0xA,
+	SKL_DMA_DMIC_LINK_INPUT_CLASS = 0xB,
+	SKL_DMA_I2S_LINK_OUTPUT_CLASS = 0xC,
+	SKL_DMA_I2S_LINK_INPUT_CLASS = 0xD,
+};
+
+union skl_ssp_dma_node {
+	u8 val;
+	struct {
+		u8 dual_mono:1;
+		u8 time_slot:3;
+		u8 i2s_instance:4;
+	} dma_node;
+};
+
+union skl_connector_node_id {
+	u32 val;
+	struct {
+		u32 vindex:8;
+		u32 dma_type:4;
+		u32 rsvd:20;
+	} node;
+};
+
+struct skl_module_fmt {
+	u32 channels;
+	u32 s_freq;
+	u32 bit_depth;
+	u32 valid_bit_depth;
+	u32 ch_cfg;
+};
+
+struct skl_module_inst_id {
+	u32 module_id;
+	u32 instance_id;
+};
+
+struct skl_module_pin {
+	struct skl_module_inst_id id;
+	u8 pin_index;
+	bool is_dynamic;
+	bool in_use;
+};
+
+struct skl_specific_cfg {
+	u32 caps_size;
+	u32 *caps;
+};
+
+enum skl_pipe_state {
+	SKL_PIPE_INVALID = 0,
+	SKL_PIPE_CREATED = 1,
+	SKL_PIPE_PAUSED = 2,
+	SKL_PIPE_STARTED = 3
+};
+
+struct skl_pipe_module {
+	struct snd_soc_dapm_widget *w;
+	struct list_head node;
+};
+
+struct skl_pipe_params {
+	u8 host_dma_id;
+	u8 link_dma_id;
+	u32 ch;
+	u32 s_freq;
+	u32 s_fmt;
+	u8 linktype;
+	int stream;
+};
+
+struct skl_pipe {
+	u8 ppl_id;
+	u8 pipe_priority;
+	u16 conn_type;
+	u32 memory_pages;
+	struct skl_pipe_params *p_params;
+	enum skl_pipe_state state;
+	struct list_head w_list;
+};
+
+enum skl_module_state {
+	SKL_MODULE_UNINIT = 0,
+	SKL_MODULE_INIT_DONE = 1,
+	SKL_MODULE_LOADED = 2,
+	SKL_MODULE_UNLOADED = 3,
+	SKL_MODULE_BIND_DONE = 4
+};
+
+struct skl_module_cfg {
+	struct skl_module_inst_id id;
+	struct skl_module_fmt in_fmt;
+	struct skl_module_fmt out_fmt;
+	u8 max_in_queue;
+	u8 max_out_queue;
+	u8 in_queue_mask;
+	u8 out_queue_mask;
+	u8 in_queue;
+	u8 out_queue;
+	u32 mcps;
+	u32 ibs;
+	u32 obs;
+	u8 is_loadable;
+	u8 core_id;
+	u8 dev_type;
+	u8 dma_id;
+	u8 time_slot;
+	u32 params_fixup;
+	u32 converter;
+	u32 vbus_id;
+	struct skl_module_pin *m_in_pin;
+	struct skl_module_pin *m_out_pin;
+	enum skl_module_type m_type;
+	enum skl_hw_conn_type  hw_conn_type;
+	enum skl_module_state m_state;
+	struct skl_pipe *pipe;
+	struct skl_specific_cfg formats_config;
+};
+
+int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_pause_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_init_module(struct skl_sst *ctx, struct skl_module_cfg *module_config,
+	char *param);
+
+int skl_bind_modules(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module);
+
+int skl_unbind_modules(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module);
+
+enum skl_bitdepth skl_get_bit_depth(int params);
+#endif
diff --git a/sound/soc/intel/skylake/skl-tplg-interface.h b/sound/soc/intel/skylake/skl-tplg-interface.h
new file mode 100644
index 0000000..a506898
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-tplg-interface.h
@@ -0,0 +1,88 @@
+/*
+ * skl-tplg-interface.h - Intel DSP FW private data interface
+ *
+ * Copyright (C) 2015 Intel Corp
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *	    Nilofer, Samreen <samreen.nilofer@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __HDA_TPLG_INTERFACE_H__
+#define __HDA_TPLG_INTERFACE_H__
+
+/**
+ * enum skl_ch_cfg - channel configuration
+ *
+ * @SKL_CH_CFG_MONO:	One channel only
+ * @SKL_CH_CFG_STEREO:	L & R
+ * @SKL_CH_CFG_2_1:	L, R & LFE
+ * @SKL_CH_CFG_3_0:	L, C & R
+ * @SKL_CH_CFG_3_1:	L, C, R & LFE
+ * @SKL_CH_CFG_QUATRO:	L, R, Ls & Rs
+ * @SKL_CH_CFG_4_0:	L, C, R & Cs
+ * @SKL_CH_CFG_5_0:	L, C, R, Ls & Rs
+ * @SKL_CH_CFG_5_1:	L, C, R, Ls, Rs & LFE
+ * @SKL_CH_CFG_DUAL_MONO: One channel replicated in two
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_0: Stereo(L,R) in 4 slots, 1st stream:[ L, R, -, - ]
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_1: Stereo(L,R) in 4 slots, 2nd stream:[ -, -, L, R ]
+ * @SKL_CH_CFG_INVALID:	Invalid
+ */
+enum skl_ch_cfg {
+	SKL_CH_CFG_MONO = 0,
+	SKL_CH_CFG_STEREO = 1,
+	SKL_CH_CFG_2_1 = 2,
+	SKL_CH_CFG_3_0 = 3,
+	SKL_CH_CFG_3_1 = 4,
+	SKL_CH_CFG_QUATRO = 5,
+	SKL_CH_CFG_4_0 = 6,
+	SKL_CH_CFG_5_0 = 7,
+	SKL_CH_CFG_5_1 = 8,
+	SKL_CH_CFG_DUAL_MONO = 9,
+	SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
+	SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
+	SKL_CH_CFG_INVALID
+};
+
+enum skl_module_type {
+	SKL_MODULE_TYPE_MIXER = 0,
+	SKL_MODULE_TYPE_COPIER,
+	SKL_MODULE_TYPE_UPDWMIX,
+	SKL_MODULE_TYPE_SRCINT
+};
+
+enum skl_core_affinity {
+	SKL_AFFINITY_CORE_0 = 0,
+	SKL_AFFINITY_CORE_1,
+	SKL_AFFINITY_CORE_MAX
+};
+
+enum skl_pipe_conn_type {
+	SKL_PIPE_CONN_TYPE_NONE = 0,
+	SKL_PIPE_CONN_TYPE_FE,
+	SKL_PIPE_CONN_TYPE_BE
+};
+
+enum skl_hw_conn_type {
+	SKL_CONN_NONE = 0,
+	SKL_CONN_SOURCE = 1,
+	SKL_CONN_SINK = 2
+};
+
+enum skl_dev_type {
+	SKL_DEVICE_BT = 0x0,
+	SKL_DEVICE_DMIC = 0x1,
+	SKL_DEVICE_I2S = 0x2,
+	SKL_DEVICE_SLIMBUS = 0x3,
+	SKL_DEVICE_HDALINK = 0x4,
+	SKL_DEVICE_NONE
+};
+#endif
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
new file mode 100644
index 0000000..348d094
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.c
@@ -0,0 +1,536 @@
+/*
+ *  skl.c - Implementation of ASoC Intel SKL HD Audio driver
+ *
+ *  Copyright (C) 2014-2015 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ *  Derived mostly from Intel HDA driver with following copyrights:
+ *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
+ *                     PeiSen Hou <pshou@realtek.com.tw>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include "skl.h"
+
+/*
+ * initialize the PCI registers
+ */
+static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg,
+			    unsigned char mask, unsigned char val)
+{
+	unsigned char data;
+
+	pci_read_config_byte(pci, reg, &data);
+	data &= ~mask;
+	data |= (val & mask);
+	pci_write_config_byte(pci, reg, data);
+}
+
+static void skl_init_pci(struct skl *skl)
+{
+	struct hdac_ext_bus *ebus = &skl->ebus;
+
+	/*
+	 * Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
+	 * TCSEL == Traffic Class Select Register, which sets PCI express QOS
+	 * Ensuring these bits are 0 clears playback static on some HD Audio
+	 * codecs.
+	 * The PCI register TCSEL is defined in the Intel manuals.
+	 */
+	dev_dbg(ebus_to_hbus(ebus)->dev, "Clearing TCSEL\n");
+	skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0);
+}
+
+/* called from IRQ */
+static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
+{
+	snd_pcm_period_elapsed(hstr->substream);
+}
+
+static irqreturn_t skl_interrupt(int irq, void *dev_id)
+{
+	struct hdac_ext_bus *ebus = dev_id;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	u32 status;
+
+	if (!pm_runtime_active(bus->dev))
+		return IRQ_NONE;
+
+	spin_lock(&bus->reg_lock);
+
+	status = snd_hdac_chip_readl(bus, INTSTS);
+	if (status == 0 || status == 0xffffffff) {
+		spin_unlock(&bus->reg_lock);
+		return IRQ_NONE;
+	}
+
+	/* clear rirb int */
+	status = snd_hdac_chip_readb(bus, RIRBSTS);
+	if (status & RIRB_INT_MASK) {
+		if (status & RIRB_INT_RESPONSE)
+			snd_hdac_bus_update_rirb(bus);
+		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+	}
+
+	spin_unlock(&bus->reg_lock);
+
+	return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
+{
+	struct hdac_ext_bus *ebus = dev_id;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	u32 status;
+
+	status = snd_hdac_chip_readl(bus, INTSTS);
+
+	snd_hdac_bus_handle_stream_irq(bus, status, skl_stream_update);
+
+	return IRQ_HANDLED;
+}
+
+static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
+{
+	struct skl *skl = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int ret;
+
+	ret = request_threaded_irq(skl->pci->irq, skl_interrupt,
+			skl_threaded_handler,
+			IRQF_SHARED,
+			KBUILD_MODNAME, ebus);
+	if (ret) {
+		dev_err(bus->dev,
+			"unable to grab IRQ %d, disabling device\n",
+			skl->pci->irq);
+		return ret;
+	}
+
+	bus->irq = skl->pci->irq;
+	pci_intx(skl->pci, 1);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * power management
+ */
+static int skl_suspend(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	snd_hdac_bus_stop_chip(bus);
+	snd_hdac_bus_enter_link_reset(bus);
+
+	return 0;
+}
+
+static int skl_resume(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl *hda = ebus_to_skl(ebus);
+
+	skl_init_pci(hda);
+
+	snd_hdac_bus_init_chip(bus, 1);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int skl_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	dev_dbg(bus->dev, "in %s\n", __func__);
+
+	/* enable controller wake up event */
+	snd_hdac_chip_updatew(bus, WAKEEN, 0, STATESTS_INT_MASK);
+
+	snd_hdac_bus_stop_chip(bus);
+	snd_hdac_bus_enter_link_reset(bus);
+
+	return 0;
+}
+
+static int skl_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl *hda = ebus_to_skl(ebus);
+	int status;
+
+	dev_dbg(bus->dev, "in %s\n", __func__);
+
+	/* Read STATESTS before controller reset */
+	status = snd_hdac_chip_readw(bus, STATESTS);
+
+	skl_init_pci(hda);
+	snd_hdac_bus_init_chip(bus, true);
+	/* disable controller Wake Up event */
+	snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops skl_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
+	SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
+};
+
+/*
+ * destructor
+ */
+static int skl_free(struct hdac_ext_bus *ebus)
+{
+	struct skl *skl  = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	skl->init_failed = 1; /* to be sure */
+
+	snd_hdac_ext_stop_streams(ebus);
+
+	if (bus->irq >= 0)
+		free_irq(bus->irq, (void *)bus);
+	if (bus->remap_addr)
+		iounmap(bus->remap_addr);
+
+	snd_hdac_bus_free_stream_pages(bus);
+	snd_hdac_stream_free_all(ebus);
+	snd_hdac_link_free_all(ebus);
+	pci_release_regions(skl->pci);
+	pci_disable_device(skl->pci);
+
+	snd_hdac_ext_bus_exit(ebus);
+
+	return 0;
+}
+
+static int skl_dmic_device_register(struct skl *skl)
+{
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct platform_device *pdev;
+	int ret;
+
+	/* SKL has one dmic port, so allocate dmic device for this */
+	pdev = platform_device_alloc("dmic-codec", -1);
+	if (!pdev) {
+		dev_err(bus->dev, "failed to allocate dmic device\n");
+		return -ENOMEM;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		dev_err(bus->dev, "failed to add dmic device: %d\n", ret);
+		platform_device_put(pdev);
+		return ret;
+	}
+	skl->dmic_dev = pdev;
+
+	return 0;
+}
+
+static void skl_dmic_device_unregister(struct skl *skl)
+{
+	if (skl->dmic_dev)
+		platform_device_unregister(skl->dmic_dev);
+}
+
+/*
+ * Probe the given codec address
+ */
+static int probe_codec(struct hdac_ext_bus *ebus, int addr)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
+		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
+	unsigned int res;
+
+	mutex_lock(&bus->cmd_mutex);
+	snd_hdac_bus_send_cmd(bus, cmd);
+	snd_hdac_bus_get_response(bus, addr, &res);
+	mutex_unlock(&bus->cmd_mutex);
+	if (res == -1)
+		return -EIO;
+	dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
+
+	return snd_hdac_ext_bus_device_init(ebus, addr);
+}
+
+/* Codec initialization */
+static int skl_codec_create(struct hdac_ext_bus *ebus)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int c, max_slots;
+
+	max_slots = HDA_MAX_CODECS;
+
+	/* First try to probe all given codec slots */
+	for (c = 0; c < max_slots; c++) {
+		if ((bus->codec_mask & (1 << c))) {
+			if (probe_codec(ebus, c) < 0) {
+				/*
+				 * Some BIOSen give you wrong codec addresses
+				 * that don't exist
+				 */
+				dev_warn(bus->dev,
+					 "Codec #%d probe error; disabling it...\n", c);
+				bus->codec_mask &= ~(1 << c);
+				/*
+				 * More badly, accessing to a non-existing
+				 * codec often screws up the controller bus,
+				 * and disturbs the further communications.
+				 * Thus if an error occurs during probing,
+				 * better to reset the controller bus to get
+				 * back to the sanity state.
+				 */
+				snd_hdac_bus_stop_chip(bus);
+				snd_hdac_bus_init_chip(bus, true);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static const struct hdac_bus_ops bus_core_ops = {
+	.command = snd_hdac_bus_send_cmd,
+	.get_response = snd_hdac_bus_get_response,
+};
+
+/*
+ * constructor
+ */
+static int skl_create(struct pci_dev *pci,
+		      const struct hdac_io_ops *io_ops,
+		      struct skl **rskl)
+{
+	struct skl *skl;
+	struct hdac_ext_bus *ebus;
+
+	int err;
+
+	*rskl = NULL;
+
+	err = pci_enable_device(pci);
+	if (err < 0)
+		return err;
+
+	skl = devm_kzalloc(&pci->dev, sizeof(*skl), GFP_KERNEL);
+	if (!skl) {
+		pci_disable_device(pci);
+		return -ENOMEM;
+	}
+	ebus = &skl->ebus;
+	snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
+	ebus->bus.use_posbuf = 1;
+	skl->pci = pci;
+
+	ebus->bus.bdl_pos_adj = 0;
+
+	*rskl = skl;
+
+	return 0;
+}
+
+static int skl_first_init(struct hdac_ext_bus *ebus)
+{
+	struct skl *skl = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct pci_dev *pci = skl->pci;
+	int err;
+	unsigned short gcap;
+	int cp_streams, pb_streams, start_idx;
+
+	err = pci_request_regions(pci, "Skylake HD audio");
+	if (err < 0)
+		return err;
+
+	bus->addr = pci_resource_start(pci, 0);
+	bus->remap_addr = pci_ioremap_bar(pci, 0);
+	if (bus->remap_addr == NULL) {
+		dev_err(bus->dev, "ioremap error\n");
+		return -ENXIO;
+	}
+
+	snd_hdac_ext_bus_parse_capabilities(ebus);
+
+	if (skl_acquire_irq(ebus, 0) < 0)
+		return -EBUSY;
+
+	pci_set_master(pci);
+	synchronize_irq(bus->irq);
+
+	gcap = snd_hdac_chip_readw(bus, GCAP);
+	dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap);
+
+	/* allow 64bit DMA address if supported by H/W */
+	if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
+		dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
+	} else {
+		dma_set_mask(bus->dev, DMA_BIT_MASK(32));
+		dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
+	}
+
+	/* read number of streams from GCAP register */
+	cp_streams = (gcap >> 8) & 0x0f;
+	pb_streams = (gcap >> 12) & 0x0f;
+
+	if (!pb_streams && !cp_streams)
+		return -EIO;
+
+	ebus->num_streams = cp_streams + pb_streams;
+
+	/* initialize streams */
+	snd_hdac_ext_stream_init_all
+		(ebus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
+	start_idx = cp_streams;
+	snd_hdac_ext_stream_init_all
+		(ebus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
+
+	err = snd_hdac_bus_alloc_stream_pages(bus);
+	if (err < 0)
+		return err;
+
+	/* initialize chip */
+	skl_init_pci(skl);
+
+	snd_hdac_bus_init_chip(bus, true);
+
+	/* codec detection */
+	if (!bus->codec_mask) {
+		dev_err(bus->dev, "no codecs found!\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int skl_probe(struct pci_dev *pci,
+		     const struct pci_device_id *pci_id)
+{
+	struct skl *skl;
+	struct hdac_ext_bus *ebus = NULL;
+	struct hdac_bus *bus = NULL;
+	int err;
+
+	/* we use ext core ops, so provide NULL for ops here */
+	err = skl_create(pci, NULL, &skl);
+	if (err < 0)
+		return err;
+
+	ebus = &skl->ebus;
+	bus = ebus_to_hbus(ebus);
+
+	err = skl_first_init(ebus);
+	if (err < 0)
+		goto out_free;
+
+	pci_set_drvdata(skl->pci, ebus);
+
+	/* check if dsp is there */
+	if (ebus->ppcap) {
+		/* TODO register with dsp IPC */
+		dev_dbg(bus->dev, "Register dsp\n");
+	}
+
+	if (ebus->mlcap)
+		snd_hdac_ext_bus_get_ml_capabilities(ebus);
+
+	/* create device for soc dmic */
+	err = skl_dmic_device_register(skl);
+	if (err < 0)
+		goto out_free;
+
+	/* register platform dai and controls */
+	err = skl_platform_register(bus->dev);
+	if (err < 0)
+		goto out_dmic_free;
+
+	/* create codec instances */
+	err = skl_codec_create(ebus);
+	if (err < 0)
+		goto out_unregister;
+
+	/*configure PM */
+	pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(bus->dev);
+	pm_runtime_put_noidle(bus->dev);
+	pm_runtime_allow(bus->dev);
+
+	return 0;
+
+out_unregister:
+	skl_platform_unregister(bus->dev);
+out_dmic_free:
+	skl_dmic_device_unregister(skl);
+out_free:
+	skl->init_failed = 1;
+	skl_free(ebus);
+
+	return err;
+}
+
+static void skl_remove(struct pci_dev *pci)
+{
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct skl *skl = ebus_to_skl(ebus);
+
+	if (pci_dev_run_wake(pci))
+		pm_runtime_get_noresume(&pci->dev);
+	pci_dev_put(pci);
+	skl_platform_unregister(&pci->dev);
+	skl_dmic_device_unregister(skl);
+	skl_free(ebus);
+	dev_set_drvdata(&pci->dev, NULL);
+}
+
+/* PCI IDs */
+static const struct pci_device_id skl_ids[] = {
+	/* Sunrise Point-LP */
+	{ PCI_DEVICE(0x8086, 0x9d70), 0},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, skl_ids);
+
+/* pci_driver definition */
+static struct pci_driver skl_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = skl_ids,
+	.probe = skl_probe,
+	.remove = skl_remove,
+	.driver = {
+		.pm = &skl_pm,
+	},
+};
+module_pci_driver(skl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Skylake ASoC HDA driver");
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
new file mode 100644
index 0000000..f7fdbb0
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.h
@@ -0,0 +1,84 @@
+/*
+ *  skl.h - HD Audio skylake defintions.
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SOUND_SOC_SKL_H
+#define __SOUND_SOC_SKL_H
+
+#include <sound/hda_register.h>
+#include <sound/hdaudio_ext.h>
+#include "skl-nhlt.h"
+
+#define SKL_SUSPEND_DELAY 2000
+
+/* Vendor Specific Registers */
+#define AZX_REG_VS_EM1			0x1000
+#define AZX_REG_VS_INRC			0x1004
+#define AZX_REG_VS_OUTRC		0x1008
+#define AZX_REG_VS_FIFOTRK		0x100C
+#define AZX_REG_VS_FIFOTRK2		0x1010
+#define AZX_REG_VS_EM2			0x1030
+#define AZX_REG_VS_EM3L			0x1038
+#define AZX_REG_VS_EM3U			0x103C
+#define AZX_REG_VS_EM4L			0x1040
+#define AZX_REG_VS_EM4U			0x1044
+#define AZX_REG_VS_LTRC			0x1048
+#define AZX_REG_VS_D0I3C		0x104A
+#define AZX_REG_VS_PCE			0x104B
+#define AZX_REG_VS_L2MAGC		0x1050
+#define AZX_REG_VS_L2LAHPT		0x1054
+#define AZX_REG_VS_SDXDPIB_XBASE	0x1084
+#define AZX_REG_VS_SDXDPIB_XINTERVAL	0x20
+#define AZX_REG_VS_SDXEFIFOS_XBASE	0x1094
+#define AZX_REG_VS_SDXEFIFOS_XINTERVAL	0x20
+
+struct skl {
+	struct hdac_ext_bus ebus;
+	struct pci_dev *pci;
+
+	unsigned int init_failed:1; /* delayed init failed */
+	struct platform_device *dmic_dev;
+
+	void __iomem *nhlt; /* nhlt ptr */
+	struct skl_sst *skl_sst; /* sst skl ctx */
+};
+
+#define skl_to_ebus(s)	(&(s)->ebus)
+#define ebus_to_skl(sbus) \
+	container_of(sbus, struct skl, sbus)
+
+/* to pass dai dma data */
+struct skl_dma_params {
+	u32 format;
+	u8 stream_tag;
+};
+
+int skl_platform_unregister(struct device *dev);
+int skl_platform_register(struct device *dev);
+
+void __iomem *skl_nhlt_init(struct device *dev);
+void skl_nhlt_free(void __iomem *addr);
+struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
+			u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
+
+int skl_init_dsp(struct skl *skl);
+void skl_free_dsp(struct skl *skl);
+int skl_suspend_dsp(struct skl *skl);
+int skl_resume_dsp(struct skl *skl);
+#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 4cf2245..dbfdfe9 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -148,10 +148,14 @@
 	dram = mv_mbus_dram_info();
 	addr = substream->dma_buffer.addr;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (priv->substream_play)
+			return -EBUSY;
 		priv->substream_play = substream;
 		kirkwood_dma_conf_mbus_windows(priv->io,
 			KIRKWOOD_PLAYBACK_WIN, addr, dram);
 	} else {
+		if (priv->substream_rec)
+			return -EBUSY;
 		priv->substream_rec = substream;
 		kirkwood_dma_conf_mbus_windows(priv->io,
 			KIRKWOOD_RECORD_WIN, addr, dram);
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 2d2536a..684e8a7 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -136,6 +136,7 @@
 
 static struct snd_soc_card mt8173_max98090_card = {
 	.name = "mt8173-max98090",
+	.owner = THIS_MODULE,
 	.dai_link = mt8173_max98090_dais,
 	.num_links = ARRAY_SIZE(mt8173_max98090_dais),
 	.controls = mt8173_max98090_controls,
@@ -202,7 +203,6 @@
 static struct platform_driver mt8173_max98090_driver = {
 	.driver = {
 		   .name = "mt8173-max98090",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mt8173_max98090_dt_match,
 #ifdef CONFIG_PM
 		   .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 6f52eca..86cf975 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -191,6 +191,7 @@
 
 static struct snd_soc_card mt8173_rt5650_rt5676_card = {
 	.name = "mtk-rt5650-rt5676",
+	.owner = THIS_MODULE,
 	.dai_link = mt8173_rt5650_rt5676_dais,
 	.num_links = ARRAY_SIZE(mt8173_rt5650_rt5676_dais),
 	.codec_conf = mt8173_rt5650_rt5676_codec_conf,
@@ -269,7 +270,6 @@
 static struct platform_driver mt8173_rt5650_rt5676_driver = {
 	.driver = {
 		   .name = "mtk-rt5650-rt5676",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mt8173_rt5650_rt5676_dt_match,
 #ifdef CONFIG_PM
 		   .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mtk-afe-common.h b/sound/soc/mediatek/mtk-afe-common.h
index a88b175..cc4393c 100644
--- a/sound/soc/mediatek/mtk-afe-common.h
+++ b/sound/soc/mediatek/mtk-afe-common.h
@@ -98,12 +98,4 @@
 	const struct mtk_afe_irq_data *irqdata;
 };
 
-struct mtk_afe {
-	/* address for ioremap audio hardware register */
-	void __iomem *base_addr;
-	struct device *dev;
-	struct regmap *regmap;
-	struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
-	struct clk *clocks[MTK_CLK_NUM];
-};
 #endif
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index 9863da7..d190fe0 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -45,18 +45,21 @@
 /* Memory interface */
 #define AFE_DL1_BASE		0x0040
 #define AFE_DL1_CUR		0x0044
+#define AFE_DL1_END		0x0048
 #define AFE_DL2_BASE		0x0050
 #define AFE_DL2_CUR		0x0054
 #define AFE_AWB_BASE		0x0070
 #define AFE_AWB_CUR		0x007c
 #define AFE_VUL_BASE		0x0080
 #define AFE_VUL_CUR		0x008c
+#define AFE_VUL_END		0x0088
 #define AFE_DAI_BASE		0x0090
 #define AFE_DAI_CUR		0x009c
 #define AFE_MOD_PCM_BASE	0x0330
 #define AFE_MOD_PCM_CUR		0x033c
 #define AFE_HDMI_OUT_BASE	0x0374
 #define AFE_HDMI_OUT_CUR	0x0378
+#define AFE_HDMI_OUT_END	0x037c
 
 #define AFE_ADDA2_TOP_CON0	0x0600
 
@@ -127,6 +130,34 @@
 	AFE_TDM_CH_ZERO,
 };
 
+static const unsigned int mtk_afe_backup_list[] = {
+	AUDIO_TOP_CON0,
+	AFE_CONN1,
+	AFE_CONN2,
+	AFE_CONN7,
+	AFE_CONN8,
+	AFE_DAC_CON1,
+	AFE_DL1_BASE,
+	AFE_DL1_END,
+	AFE_VUL_BASE,
+	AFE_VUL_END,
+	AFE_HDMI_OUT_BASE,
+	AFE_HDMI_OUT_END,
+	AFE_HDMI_CONN0,
+	AFE_DAC_CON0,
+};
+
+struct mtk_afe {
+	/* address for ioremap audio hardware register */
+	void __iomem *base_addr;
+	struct device *dev;
+	struct regmap *regmap;
+	struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
+	struct clk *clocks[MTK_CLK_NUM];
+	unsigned int backup_regs[ARRAY_SIZE(mtk_afe_backup_list)];
+	bool suspended;
+};
+
 static const struct snd_pcm_hardware mtk_afe_hardware = {
 	.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 		 SNDRV_PCM_INFO_MMAP_VALID),
@@ -722,11 +753,53 @@
 
 };
 
+static int mtk_afe_runtime_suspend(struct device *dev);
+static int mtk_afe_runtime_resume(struct device *dev);
+
+static int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int i;
+
+	dev_dbg(afe->dev, "%s\n", __func__);
+	if (pm_runtime_status_suspended(afe->dev) || afe->suspended)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
+		regmap_read(afe->regmap, mtk_afe_backup_list[i],
+			    &afe->backup_regs[i]);
+
+	afe->suspended = true;
+	mtk_afe_runtime_suspend(afe->dev);
+	return 0;
+}
+
+static int mtk_afe_dai_resume(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int i = 0;
+
+	dev_dbg(afe->dev, "%s\n", __func__);
+	if (pm_runtime_status_suspended(afe->dev) || !afe->suspended)
+		return 0;
+
+	mtk_afe_runtime_resume(afe->dev);
+
+	for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
+		regmap_write(afe->regmap, mtk_afe_backup_list[i],
+			     afe->backup_regs[i]);
+
+	afe->suspended = false;
+	return 0;
+}
+
 static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
 	/* FE DAIs: memory intefaces to CPU */
 	{
 		.name = "DL1", /* downlink 1 */
 		.id = MTK_AFE_MEMIF_DL1,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.playback = {
 			.stream_name = "DL1",
 			.channels_min = 1,
@@ -738,6 +811,8 @@
 	}, {
 		.name = "VUL", /* voice uplink */
 		.id = MTK_AFE_MEMIF_VUL,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.capture = {
 			.stream_name = "VUL",
 			.channels_min = 1,
@@ -774,6 +849,8 @@
 	{
 		.name = "HDMI",
 		.id = MTK_AFE_MEMIF_HDMI,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.playback = {
 			.stream_name = "HDMI",
 			.channels_min = 2,
@@ -820,10 +897,6 @@
 };
 
 static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = {
-	/* Backend DAIs  */
-	SND_SOC_DAPM_AIF_IN("I2S Capture", NULL, 0, SND_SOC_NOPM, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("I2S Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
-
 	/* inter-connections */
 	SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -855,11 +928,6 @@
 	{ "O10", "I18 Switch", "I18" },
 };
 
-static const struct snd_soc_dapm_widget mtk_afe_hdmi_widgets[] = {
-	/* Backend DAIs  */
-	SND_SOC_DAPM_AIF_OUT("HDMIO Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
-};
-
 static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = {
 	{"HDMIO Playback", NULL, "HDMI"},
 };
@@ -874,8 +942,6 @@
 
 static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = {
 	.name = "mtk-afe-hdmi-dai",
-	.dapm_widgets = mtk_afe_hdmi_widgets,
-	.num_dapm_widgets = ARRAY_SIZE(mtk_afe_hdmi_widgets),
 	.dapm_routes = mtk_afe_hdmi_routes,
 	.num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes),
 };
@@ -1220,7 +1286,6 @@
 static struct platform_driver mtk_afe_pcm_driver = {
 	.driver = {
 		   .name = "mtk-afe-pcm",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mtk_afe_pcm_dt_match,
 		   .pm = &mtk_afe_pm_ops,
 	},
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index 5ae5ca1..e093261 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -308,13 +308,7 @@
 
 static int nuc900_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
-}
-
-static int nuc900_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
 }
 
 static struct platform_driver nuc900_pcm_driver = {
@@ -323,7 +317,6 @@
 	},
 
 	.probe = nuc900_soc_platform_probe,
-	.remove = nuc900_soc_platform_remove,
 };
 
 module_platform_driver(nuc900_pcm_driver);
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 68a1252..c7563e2 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -965,25 +965,15 @@
 	mcbsp->free = true;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
-	if (!res) {
+	if (!res)
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		if (!res) {
-			dev_err(mcbsp->dev, "invalid memory resource\n");
-			return -ENOMEM;
-		}
-	}
-	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
-				     dev_name(&pdev->dev))) {
-		dev_err(mcbsp->dev, "memory region already claimed\n");
-		return -ENODEV;
-	}
+
+	mcbsp->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mcbsp->io_base))
+		return PTR_ERR(mcbsp->io_base);
 
 	mcbsp->phys_base = res->start;
 	mcbsp->reg_cache_size = resource_size(res);
-	mcbsp->io_base = devm_ioremap(&pdev->dev, res->start,
-				      resource_size(res));
-	if (!mcbsp->io_base)
-		return -ENOMEM;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
 	if (!res)
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index aeef25c..584b237 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -81,7 +81,15 @@
 	ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
 					 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
 	if (ret < 0) {
-		dev_err(dai->dev, "could not apply constraint\n");
+		dev_err(dai->dev, "Could not apply period constraint: %d\n",
+			ret);
+		return ret;
+	}
+	ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
+					 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 128);
+	if (ret < 0) {
+		dev_err(dai->dev, "Could not apply buffer constraint: %d\n",
+			ret);
 		return ret;
 	}
 
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 076bec6..732e749 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -154,8 +154,7 @@
 
 static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct snd_soc_codec *codec = rtd->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
 
 	/* All TWL4030 output pins are floating */
 	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
@@ -174,8 +173,7 @@
 
 static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct snd_soc_codec *codec = rtd->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
 
 	/* Not comnnected */
 	snd_soc_dapm_nc_pin(dapm, "HSMIC");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 1eb45dc..51e790d 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -232,13 +232,7 @@
 		mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].period_bytes_max =
 						pdata->period_max_capture;
 	}
-	return snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
-}
-
-static int mmp_pcm_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
 }
 
 static struct platform_driver mmp_pcm_driver = {
@@ -247,7 +241,6 @@
 	},
 
 	.probe = mmp_pcm_probe,
-	.remove = mmp_pcm_remove,
 };
 
 module_platform_driver(mmp_pcm_driver);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index fbe2e93..3da485e 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -813,14 +813,8 @@
 
 static int asoc_ssp_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
-					  &pxa_ssp_dai, 1);
-}
-
-static int asoc_ssp_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
+					       &pxa_ssp_dai, 1);
 }
 
 static struct platform_driver asoc_ssp_driver = {
@@ -830,7 +824,6 @@
 	},
 
 	.probe = asoc_ssp_probe,
-	.remove = asoc_ssp_remove,
 };
 
 module_platform_driver(asoc_ssp_driver);
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index e68290c..6b4e400 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -367,19 +367,12 @@
 
 static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
-					  &pxa_i2s_dai, 1);
-}
-
-static int pxa2xx_i2s_drv_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
+					       &pxa_i2s_dai, 1);
 }
 
 static struct platform_driver pxa2xx_i2s_driver = {
 	.probe = pxa2xx_i2s_drv_probe,
-	.remove = pxa2xx_i2s_drv_remove,
 
 	.driver = {
 		.name = "pxa2xx-i2s",
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index a51c9da..831ee37 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -124,13 +124,7 @@
 
 static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
-}
-
-static int pxa2xx_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
 }
 
 #ifdef CONFIG_OF
@@ -147,7 +141,6 @@
 	},
 
 	.probe = pxa2xx_soc_platform_probe,
-	.remove = pxa2xx_soc_platform_remove,
 };
 
 module_platform_driver(pxa_pcm_driver);
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 807fedf..3cc252e 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -1,5 +1,6 @@
 config SND_SOC_QCOM
 	tristate "ASoC support for QCOM platforms"
+	depends on ARCH_QCOM || COMPILE_TEST
 	help
           Say Y or M if you want to add support to use audio devices
           in Qualcomm Technologies SOC-based platforms.
@@ -14,19 +15,17 @@
 
 config SND_SOC_LPASS_IPQ806X
 	tristate
-	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_CPU
 	select SND_SOC_LPASS_PLATFORM
 
 config SND_SOC_LPASS_APQ8016
 	tristate
-	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_CPU
 	select SND_SOC_LPASS_PLATFORM
 
 config SND_SOC_STORM
 	tristate "ASoC I2S support for Storm boards"
-	depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST)
+	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_IPQ806X
 	select SND_SOC_MAX98357A
 	help
@@ -35,7 +34,7 @@
 
 config SND_SOC_APQ8016_SBC
 	tristate "SoC Audio support for APQ8016 SBC platforms"
-	depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST)
+	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_APQ8016
 	help
           Support for Qualcomm Technologies LPASS audio block in
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 23f3d59..97bc202 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -235,7 +235,7 @@
 	return ret;
 }
 
-struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
+const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
 	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
 	.startup	= lpass_cpu_daiops_startup,
 	.shutdown	= lpass_cpu_daiops_shutdown,
diff --git a/sound/soc/qcom/lpass-ipq806x.c b/sound/soc/qcom/lpass-ipq806x.c
index 7356d3a..7a41679 100644
--- a/sound/soc/qcom/lpass-ipq806x.c
+++ b/sound/soc/qcom/lpass-ipq806x.c
@@ -73,7 +73,7 @@
 	return 0;
 }
 
-struct lpass_variant ipq806x_data = {
+static struct lpass_variant ipq806x_data = {
 	.i2sctrl_reg_base	= 0x0010,
 	.i2sctrl_reg_stride	= 0x04,
 	.i2s_ports		= 5,
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index d6e86c1..0b63e2e 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -93,6 +93,6 @@
 int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev);
 int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev);
 int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai);
-extern struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
+extern const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
 
 #endif /* __LPASS_H__ */
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index e181826..58bae8e 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -14,3 +14,22 @@
 	  Say Y or M if you want to add support for I2S driver for
 	  Rockchip I2S device. The device supports upto maximum of
 	  8 channels each for play and record.
+
+config SND_SOC_ROCKCHIP_MAX98090
+	tristate "ASoC support for Rockchip boards using a MAX98090 codec"
+	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
+	select SND_SOC_ROCKCHIP_I2S
+	select SND_SOC_MAX98090
+	select SND_SOC_TS3A227E
+	help
+	  Say Y or M here if you want to add support for SoC audio on Rockchip
+	  boards using the MAX98090 codec, such as Veyron.
+
+config SND_SOC_ROCKCHIP_RT5645
+	tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec"
+	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
+	select SND_SOC_ROCKCHIP_I2S
+	select SND_SOC_RT5645
+	help
+	  Say Y or M here if you want to add support for SoC audio on Rockchip
+	  boards using the RT5645/RT5650 codec, such as Veyron.
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index b921909..1bc1dc3 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -2,3 +2,9 @@
 snd-soc-i2s-objs := rockchip_i2s.o
 
 obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-i2s.o
+
+snd-soc-rockchip-max98090-objs := rockchip_max98090.o
+snd-soc-rockchip-rt5645-objs := rockchip_rt5645.o
+
+obj-$(CONFIG_SND_SOC_ROCKCHIP_MAX98090) += snd-soc-rockchip-max98090.o
+obj-$(CONFIG_SND_SOC_ROCKCHIP_RT5645) += snd-soc-rockchip-rt5645.o
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index acb5be5..b936102 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -483,16 +483,14 @@
 		goto err_suspend;
 	}
 
-	ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+	ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not register PCM\n");
-		goto err_pcm_register;
+		return ret;
 	}
 
 	return 0;
 
-err_pcm_register:
-	snd_dmaengine_pcm_unregister(&pdev->dev);
 err_suspend:
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		i2s_runtime_suspend(&pdev->dev);
@@ -512,8 +510,6 @@
 
 	clk_disable_unprepare(i2s->mclk);
 	clk_disable_unprepare(i2s->hclk);
-	snd_dmaengine_pcm_unregister(&pdev->dev);
-	snd_soc_unregister_component(&pdev->dev);
 
 	return 0;
 }
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
new file mode 100644
index 0000000..26567b1
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -0,0 +1,236 @@
+/*
+ * Rockchip machine ASoC driver for boards using a MAX90809 CODEC.
+ *
+ * Copyright (c) 2014, ROCKCHIP CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "rockchip_i2s.h"
+#include "../codecs/ts3a227e.h"
+
+#define DRV_NAME "rockchip-snd-max98090"
+
+static struct snd_soc_jack headset_jack;
+static struct snd_soc_jack_pin headset_jack_pins[] = {
+	{
+		.pin = "Headset Jack",
+		.mask = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+			SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+			SND_JACK_BTN_2 | SND_JACK_BTN_3,
+	},
+};
+
+static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Mic", NULL),
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+static const struct snd_soc_dapm_route rk_audio_map[] = {
+	{"IN34", NULL, "Headset Mic"},
+	{"IN34", NULL, "MICBIAS"},
+	{"MICBIAS", NULL, "Headset Mic"},
+	{"DMICL", NULL, "Int Mic"},
+	{"Headphone", NULL, "HPL"},
+	{"Headphone", NULL, "HPR"},
+	{"Speaker", NULL, "SPKL"},
+	{"Speaker", NULL, "SPKR"},
+};
+
+static const struct snd_kcontrol_new rk_mc_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Headphone"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Mic"),
+	SOC_DAPM_PIN_SWITCH("Speaker"),
+};
+
+static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int mclk;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 48000:
+	case 96000:
+		mclk = 12288000;
+		break;
+	case 44100:
+		mclk = 11289600;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+				     SND_SOC_CLOCK_OUT);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int rk_init(struct snd_soc_pcm_runtime *runtime)
+{
+	/* Enable Headset and 4 Buttons Jack detection */
+	return snd_soc_card_jack_new(runtime->card, "Headset Jack",
+			       SND_JACK_HEADSET |
+			       SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+			       SND_JACK_BTN_2 | SND_JACK_BTN_3,
+			       &headset_jack,
+			       headset_jack_pins,
+			       ARRAY_SIZE(headset_jack_pins));
+}
+
+static int rk_98090_headset_init(struct snd_soc_component *component)
+{
+	return ts3a227e_enable_jack_detect(component, &headset_jack);
+}
+
+static struct snd_soc_ops rk_aif1_ops = {
+	.hw_params = rk_aif1_hw_params,
+};
+
+static struct snd_soc_aux_dev rk_98090_headset_dev = {
+	.name = "Headset Chip",
+	.init = rk_98090_headset_init,
+};
+
+static struct snd_soc_dai_link rk_dailink = {
+	.name = "max98090",
+	.stream_name = "Audio",
+	.codec_dai_name = "HiFi",
+	.init = rk_init,
+	.ops = &rk_aif1_ops,
+	/* set max98090 as slave */
+	.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_card_rk = {
+	.name = "ROCKCHIP-I2S",
+	.owner = THIS_MODULE,
+	.dai_link = &rk_dailink,
+	.num_links = 1,
+	.aux_dev = &rk_98090_headset_dev,
+	.num_aux_devs = 1,
+	.dapm_widgets = rk_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
+	.dapm_routes = rk_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(rk_audio_map),
+	.controls = rk_mc_controls,
+	.num_controls = ARRAY_SIZE(rk_mc_controls),
+};
+
+static int snd_rk_mc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct snd_soc_card *card = &snd_soc_card_rk;
+	struct device_node *np = pdev->dev.of_node;
+
+	/* register the soc card */
+	card->dev = &pdev->dev;
+
+	rk_dailink.codec_of_node = of_parse_phandle(np,
+			"rockchip,audio-codec", 0);
+	if (!rk_dailink.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.cpu_of_node = of_parse_phandle(np,
+			"rockchip,i2s-controller", 0);
+	if (!rk_dailink.cpu_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,i2s-controller' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
+
+	rk_98090_headset_dev.codec_of_node = of_parse_phandle(np,
+			"rockchip,headset-codec", 0);
+	if (!rk_98090_headset_dev.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,headset-codec' missing/invalid\n");
+		return -EINVAL;
+	}
+
+	ret = snd_soc_of_parse_card_name(card, "rockchip,model");
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc parse card name failed %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc register card failed %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id rockchip_max98090_of_match[] = {
+	{ .compatible = "rockchip,rockchip-audio-max98090", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_max98090_of_match);
+
+static struct platform_driver snd_rk_mc_driver = {
+	.probe = snd_rk_mc_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = rockchip_max98090_of_match,
+	},
+};
+
+module_platform_driver(snd_rk_mc_driver);
+
+MODULE_AUTHOR("jianqun <jay.xu@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip max98090 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
new file mode 100644
index 0000000..68c62e4
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -0,0 +1,225 @@
+/*
+ * Rockchip machine ASoC driver for boards using a RT5645/RT5650 CODEC.
+ *
+ * Copyright (c) 2015, ROCKCHIP CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "rockchip_i2s.h"
+
+#define DRV_NAME "rockchip-snd-rt5645"
+
+static struct snd_soc_jack headset_jack;
+
+/* Jack detect via rt5645 driver. */
+extern int rt5645_set_jack_detect(struct snd_soc_codec *codec,
+	struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
+	struct snd_soc_jack *btn_jack);
+
+static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphones", NULL),
+	SND_SOC_DAPM_SPK("Speakers", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route rk_audio_map[] = {
+	/* Input Lines */
+	{"DMIC L2", NULL, "Int Mic"},
+	{"DMIC R2", NULL, "Int Mic"},
+	{"RECMIXL", NULL, "Headset Mic"},
+	{"RECMIXR", NULL, "Headset Mic"},
+
+	/* Output Lines */
+	{"Headphones", NULL, "HPOR"},
+	{"Headphones", NULL, "HPOL"},
+	{"Speakers", NULL, "SPOL"},
+	{"Speakers", NULL, "SPOR"},
+};
+
+static const struct snd_kcontrol_new rk_mc_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Headphones"),
+	SOC_DAPM_PIN_SWITCH("Speakers"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Mic"),
+};
+
+static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int mclk;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 48000:
+	case 96000:
+		mclk = 12288000;
+		break;
+	case 44100:
+		mclk = 11289600;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+				     SND_SOC_CLOCK_OUT);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int rk_init(struct snd_soc_pcm_runtime *runtime)
+{
+	struct snd_soc_card *card = runtime->card;
+	int ret;
+
+	/* Enable Headset and 4 Buttons Jack detection */
+	ret = snd_soc_card_jack_new(card, "Headset Jack",
+				    SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+				    SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+				    SND_JACK_BTN_2 | SND_JACK_BTN_3,
+				    &headset_jack, NULL, 0);
+	if (ret) {
+		dev_err(card->dev, "New Headset Jack failed! (%d)\n", ret);
+		return ret;
+	}
+
+	return rt5645_set_jack_detect(runtime->codec,
+				     &headset_jack,
+				     &headset_jack,
+				     &headset_jack);
+}
+
+static struct snd_soc_ops rk_aif1_ops = {
+	.hw_params = rk_aif1_hw_params,
+};
+
+static struct snd_soc_dai_link rk_dailink = {
+	.name = "rt5645",
+	.stream_name = "rt5645 PCM",
+	.codec_dai_name = "rt5645-aif1",
+	.init = rk_init,
+	.ops = &rk_aif1_ops,
+	/* set rt5645 as slave */
+	.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_card_rk = {
+	.name = "I2S-RT5650",
+	.owner = THIS_MODULE,
+	.dai_link = &rk_dailink,
+	.num_links = 1,
+	.dapm_widgets = rk_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
+	.dapm_routes = rk_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(rk_audio_map),
+	.controls = rk_mc_controls,
+	.num_controls = ARRAY_SIZE(rk_mc_controls),
+};
+
+static int snd_rk_mc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct snd_soc_card *card = &snd_soc_card_rk;
+	struct device_node *np = pdev->dev.of_node;
+
+	/* register the soc card */
+	card->dev = &pdev->dev;
+
+	rk_dailink.codec_of_node = of_parse_phandle(np,
+			"rockchip,audio-codec", 0);
+	if (!rk_dailink.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.cpu_of_node = of_parse_phandle(np,
+			"rockchip,i2s-controller", 0);
+	if (!rk_dailink.cpu_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,i2s-controller' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
+
+	ret = snd_soc_of_parse_card_name(card, "rockchip,model");
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc parse card name failed %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc register card failed %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id rockchip_rt5645_of_match[] = {
+	{ .compatible = "rockchip,rockchip-audio-rt5645", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_rt5645_of_match);
+
+static struct platform_driver snd_rk_mc_driver = {
+	.probe = snd_rk_mc_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = rockchip_rt5645_of_match,
+	},
+};
+
+module_platform_driver(snd_rk_mc_driver);
+
+MODULE_AUTHOR("Xing Zheng <zhengxing@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip rt5645 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
index 8bf2e2c..ee1fda9 100644
--- a/sound/soc/samsung/arndale_rt5631.c
+++ b/sound/soc/samsung/arndale_rt5631.c
@@ -71,6 +71,7 @@
 
 static struct snd_soc_card arndale_rt5631 = {
 	.name = "Arndale RT5631",
+	.owner = THIS_MODULE,
 	.dai_link = arndale_rt5631_dai,
 	.num_links = ARRAY_SIZE(arndale_rt5631_dai),
 };
@@ -116,15 +117,6 @@
 	return ret;
 }
 
-static int arndale_audio_remove(struct platform_device *pdev)
-{
-	struct snd_soc_card *card = platform_get_drvdata(pdev);
-
-	snd_soc_unregister_card(card);
-
-	return 0;
-}
-
 static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
 	{ .compatible = "samsung,arndale-rt5631", },
 	{ .compatible = "samsung,arndale-alc5631", },
@@ -139,7 +131,6 @@
 		.of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
 	},
 	.probe = arndale_audio_probe,
-	.remove = arndale_audio_remove,
 };
 
 module_platform_driver(arndale_audio_driver);
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index 7651dc9..07ce2cf 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -56,6 +56,7 @@
 
 static struct snd_soc_card snow_snd = {
 	.name = "Snow-I2S",
+	.owner = THIS_MODULE,
 	.dai_link = snow_dai,
 	.num_links = ARRAY_SIZE(snow_dai),
 
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index fd11404..8fad444 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -327,13 +327,7 @@
 
 static int sh7760_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
-}
-
-static int sh7760_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
 }
 
 static struct platform_driver sh7760_pcm_driver = {
@@ -342,7 +336,6 @@
 	},
 
 	.probe = sh7760_soc_platform_probe,
-	.remove = sh7760_soc_platform_remove,
 };
 
 module_platform_driver(sh7760_pcm_driver);
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 142c066..0215c78 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1911,7 +1911,6 @@
 
 static const struct platform_device_id fsi_id_table[] = {
 	{ "sh_fsi",	(kernel_ulong_t)&fsi1_core },
-	{ "sh_fsi2",	(kernel_ulong_t)&fsi2_core },
 	{},
 };
 MODULE_DEVICE_TABLE(platform, fsi_id_table);
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index f1b4451..8b25850 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,4 +1,4 @@
-snd-soc-rcar-objs	:= core.o gen.o dma.o src.o adg.o ssi.o dvc.o
+snd-soc-rcar-objs	:= core.o gen.o dma.o adg.o ssi.o src.o ctu.o mix.o dvc.o
 obj-$(CONFIG_SND_SOC_RCAR)	+= snd-soc-rcar.o
 
 snd-soc-rsrc-card-objs	:= rsrc-card.o
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f1e5920..f3feed5 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -203,9 +203,9 @@
 }
 
 /*
- *	settting function
+ *	ADINR function
  */
-u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 {
 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -227,6 +227,64 @@
 	return adinr;
 }
 
+u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+{
+	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	u32 chan = runtime->channels;
+
+	switch (chan) {
+	case 1:
+	case 2:
+	case 4:
+	case 6:
+	case 8:
+		break;
+	default:
+		dev_warn(dev, "not supported channel\n");
+		chan = 0;
+		break;
+	}
+
+	return chan;
+}
+
+/*
+ *	DALIGN function
+ */
+u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+{
+	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+	struct rsnd_mod *target = src ? src : ssi;
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	u32 val = 0x76543210;
+	u32 mask = ~0;
+
+	mask <<= runtime->channels * 4;
+	val = val & mask;
+
+	switch (runtime->sample_bits) {
+	case 16:
+		val |= 0x67452301 & ~mask;
+		break;
+	case 32:
+		val |= 0x76543210 & ~mask;
+		break;
+	}
+
+	/*
+	 * exchange channeles on SRC if possible,
+	 * otherwise, R/L volume settings on DVC
+	 * changes inverted channels
+	 */
+	if (mod == target)
+		return val;
+	else
+		return 0x76543210;
+}
+
 /*
  *	rsnd_dai functions
  */
@@ -242,9 +300,9 @@
 	if (val == __rsnd_mod_call_##func) {				\
 		called = 1;						\
 		ret = (mod)->ops->func(mod, io, param);			\
-		mod->status = (mod->status & ~mask) +			\
-			(add << __rsnd_mod_shift_##func);		\
 	}								\
+	mod->status = (mod->status & ~mask) +				\
+		(add << __rsnd_mod_shift_##func);			\
 	dev_dbg(dev, "%s[%d] 0x%08x %s\n",				\
 		rsnd_mod_name(mod), rsnd_mod_id(mod), mod->status,	\
 		called ? #func : "");					\
@@ -274,21 +332,21 @@
 static int rsnd_dai_connect(struct rsnd_mod *mod,
 			    struct rsnd_dai_stream *io)
 {
+	struct rsnd_priv *priv;
+	struct device *dev;
+
 	if (!mod)
 		return -EIO;
 
-	if (io->mod[mod->type]) {
-		struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-		struct device *dev = rsnd_priv_to_dev(priv);
-
-		dev_err(dev, "%s[%d] is not empty\n",
-			rsnd_mod_name(mod),
-			rsnd_mod_id(mod));
-		return -EIO;
-	}
+	priv = rsnd_mod_to_priv(mod);
+	dev = rsnd_priv_to_dev(priv);
 
 	io->mod[mod->type] = mod;
 
+	dev_dbg(dev, "%s[%d] is connected to io (%s)\n",
+		rsnd_mod_name(mod), rsnd_mod_id(mod),
+		rsnd_io_is_play(io) ? "Playback" : "Capture");
+
 	return 0;
 }
 
@@ -517,7 +575,7 @@
 	.set_fmt	= rsnd_soc_dai_set_fmt,
 };
 
-#define rsnd_path_parse(priv, io, type)				\
+#define rsnd_path_add(priv, io, type)				\
 ({								\
 	struct rsnd_mod *mod;					\
 	int ret = 0;						\
@@ -533,7 +591,7 @@
 	ret;							\
 })
 
-#define rsnd_path_break(priv, io, type)				\
+#define rsnd_path_remove(priv, io, type)			\
 {								\
 	struct rsnd_mod *mod;					\
 	int id = -1;						\
@@ -547,6 +605,79 @@
 	}							\
 }
 
+void rsnd_path_parse(struct rsnd_priv *priv,
+		     struct rsnd_dai_stream *io)
+{
+	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
+	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
+	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *cmd;
+	struct device *dev = rsnd_priv_to_dev(priv);
+	u32 data;
+
+	/* Gen1 is not supported */
+	if (rsnd_is_gen1(priv))
+		return;
+
+	if (!mix && !dvc)
+		return;
+
+	if (mix) {
+		struct rsnd_dai *rdai;
+		int i;
+		u32 path[] = {
+			[0] = 0,
+			[1] = 1 << 0,
+			[2] = 0,
+			[3] = 0,
+			[4] = 0,
+			[5] = 1 << 8
+		};
+
+		/*
+		 * it is assuming that integrater is well understanding about
+		 * data path. Here doesn't check impossible connection,
+		 * like src2 + src5
+		 */
+		data = 0;
+		for_each_rsnd_dai(rdai, priv, i) {
+			io = &rdai->playback;
+			if (mix == rsnd_io_to_mod_mix(io))
+				data |= path[rsnd_mod_id(src)];
+
+			io = &rdai->capture;
+			if (mix == rsnd_io_to_mod_mix(io))
+				data |= path[rsnd_mod_id(src)];
+		}
+
+		/*
+		 * We can't use ctu = rsnd_io_ctu() here.
+		 * Since, ID of dvc/mix are 0 or 1 (= same as CMD number)
+		 * but ctu IDs are 0 - 7 (= CTU00 - CTU13)
+		 */
+		cmd = mix;
+	} else {
+		u32 path[] = {
+			[0] = 0x30000,
+			[1] = 0x30001,
+			[2] = 0x40000,
+			[3] = 0x10000,
+			[4] = 0x20000,
+			[5] = 0x40100
+		};
+
+		data = path[rsnd_mod_id(src)];
+
+		cmd = dvc;
+	}
+
+	dev_dbg(dev, "ctu/mix path = 0x%08x", data);
+
+	rsnd_mod_write(cmd, CMD_ROUTE_SLCT, data);
+
+	rsnd_mod_write(cmd, CMD_CTRL, 0x10);
+}
+
 static int rsnd_path_init(struct rsnd_priv *priv,
 			  struct rsnd_dai *rdai,
 			  struct rsnd_dai_stream *io)
@@ -564,18 +695,28 @@
 	 * using fixed path.
 	 */
 
-	/* SRC */
-	ret = rsnd_path_parse(priv, io, src);
+	/* SSI */
+	ret = rsnd_path_add(priv, io, ssi);
 	if (ret < 0)
 		return ret;
 
-	/* SSI */
-	ret = rsnd_path_parse(priv, io, ssi);
+	/* SRC */
+	ret = rsnd_path_add(priv, io, src);
+	if (ret < 0)
+		return ret;
+
+	/* CTU */
+	ret = rsnd_path_add(priv, io, ctu);
+	if (ret < 0)
+		return ret;
+
+	/* MIX */
+	ret = rsnd_path_add(priv, io, mix);
 	if (ret < 0)
 		return ret;
 
 	/* DVC */
-	ret = rsnd_path_parse(priv, io, dvc);
+	ret = rsnd_path_add(priv, io, dvc);
 	if (ret < 0)
 		return ret;
 
@@ -589,13 +730,15 @@
 	struct device_node *dai_node,	*dai_np;
 	struct device_node *ssi_node,	*ssi_np;
 	struct device_node *src_node,	*src_np;
+	struct device_node *ctu_node,	*ctu_np;
+	struct device_node *mix_node,	*mix_np;
 	struct device_node *dvc_node,	*dvc_np;
 	struct device_node *playback, *capture;
 	struct rsnd_dai_platform_info *dai_info;
 	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
 	struct device *dev = &pdev->dev;
 	int nr, i;
-	int dai_i, ssi_i, src_i, dvc_i;
+	int dai_i, ssi_i, src_i, ctu_i, mix_i, dvc_i;
 
 	if (!of_data)
 		return;
@@ -621,6 +764,8 @@
 
 	ssi_node = of_get_child_by_name(dev->of_node, "rcar_sound,ssi");
 	src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src");
+	ctu_node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
+	mix_node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
 	dvc_node = of_get_child_by_name(dev->of_node, "rcar_sound,dvc");
 
 #define mod_parse(name)							\
@@ -657,6 +802,8 @@
 
 			mod_parse(ssi);
 			mod_parse(src);
+			mod_parse(ctu);
+			mod_parse(mix);
 			mod_parse(dvc);
 
 			of_node_put(playback);
@@ -1033,8 +1180,8 @@
 		/*
 		 * remove SRC/DVC from DAI,
 		 */
-		rsnd_path_break(priv, io, src);
-		rsnd_path_break(priv, io, dvc);
+		rsnd_path_remove(priv, io, src);
+		rsnd_path_remove(priv, io, dvc);
 
 		/*
 		 * fallback
@@ -1069,6 +1216,8 @@
 		rsnd_dma_probe,
 		rsnd_ssi_probe,
 		rsnd_src_probe,
+		rsnd_ctu_probe,
+		rsnd_mix_probe,
 		rsnd_dvc_probe,
 		rsnd_adg_probe,
 		rsnd_dai_probe,
@@ -1164,6 +1313,8 @@
 			      struct rsnd_priv *priv) = {
 		rsnd_ssi_remove,
 		rsnd_src_remove,
+		rsnd_ctu_remove,
+		rsnd_mix_remove,
 		rsnd_dvc_remove,
 	};
 	int ret = 0, i;
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
new file mode 100644
index 0000000..05498bb
--- /dev/null
+++ b/sound/soc/sh/rcar/ctu.c
@@ -0,0 +1,171 @@
+/*
+ * ctu.c
+ *
+ * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+#define CTU_NAME_SIZE	16
+#define CTU_NAME "ctu"
+
+struct rsnd_ctu {
+	struct rsnd_ctu_platform_info *info; /* rcar_snd.h */
+	struct rsnd_mod mod;
+};
+
+#define rsnd_ctu_nr(priv) ((priv)->ctu_nr)
+#define for_each_rsnd_ctu(pos, priv, i)					\
+	for ((i) = 0;							\
+	     ((i) < rsnd_ctu_nr(priv)) &&				\
+		     ((pos) = (struct rsnd_ctu *)(priv)->ctu + i);	\
+	     i++)
+
+#define rsnd_ctu_initialize_lock(mod)	__rsnd_ctu_initialize_lock(mod, 1)
+#define rsnd_ctu_initialize_unlock(mod)	__rsnd_ctu_initialize_lock(mod, 0)
+static void __rsnd_ctu_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, CTU_CTUIR, enable);
+}
+
+static int rsnd_ctu_init(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_start(mod);
+
+	rsnd_ctu_initialize_lock(mod);
+
+	rsnd_mod_write(mod, CTU_ADINR, rsnd_get_adinr_chan(mod, io));
+
+	rsnd_ctu_initialize_unlock(mod);
+
+	return 0;
+}
+
+static int rsnd_ctu_quit(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_stop(mod);
+
+	return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ctu_ops = {
+	.name		= CTU_NAME,
+	.init		= rsnd_ctu_init,
+	.quit		= rsnd_ctu_quit,
+};
+
+struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id)
+{
+	if (WARN_ON(id < 0 || id >= rsnd_ctu_nr(priv)))
+		id = 0;
+
+	return &((struct rsnd_ctu *)(priv->ctu) + id)->mod;
+}
+
+static void rsnd_of_parse_ctu(struct platform_device *pdev,
+		       const struct rsnd_of_data *of_data,
+		       struct rsnd_priv *priv)
+{
+	struct device_node *node;
+	struct rsnd_ctu_platform_info *ctu_info;
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = &pdev->dev;
+	int nr;
+
+	if (!of_data)
+		return;
+
+	node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
+	if (!node)
+		return;
+
+	nr = of_get_child_count(node);
+	if (!nr)
+		goto rsnd_of_parse_ctu_end;
+
+	ctu_info = devm_kzalloc(dev,
+				sizeof(struct rsnd_ctu_platform_info) * nr,
+				GFP_KERNEL);
+	if (!ctu_info) {
+		dev_err(dev, "ctu info allocation error\n");
+		goto rsnd_of_parse_ctu_end;
+	}
+
+	info->ctu_info		= ctu_info;
+	info->ctu_info_nr	= nr;
+
+rsnd_of_parse_ctu_end:
+	of_node_put(node);
+
+}
+
+int rsnd_ctu_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv)
+{
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_ctu *ctu;
+	struct clk *clk;
+	char name[CTU_NAME_SIZE];
+	int i, nr, ret;
+
+	/* This driver doesn't support Gen1 at this point */
+	if (rsnd_is_gen1(priv)) {
+		dev_warn(dev, "CTU is not supported on Gen1\n");
+		return -EINVAL;
+	}
+
+	rsnd_of_parse_ctu(pdev, of_data, priv);
+
+	nr = info->ctu_info_nr;
+	if (!nr)
+		return 0;
+
+	ctu = devm_kzalloc(dev, sizeof(*ctu) * nr, GFP_KERNEL);
+	if (!ctu)
+		return -ENOMEM;
+
+	priv->ctu_nr	= nr;
+	priv->ctu	= ctu;
+
+	for_each_rsnd_ctu(ctu, priv, i) {
+		/*
+		 * CTU00, CTU01, CTU02, CTU03 => CTU0
+		 * CTU10, CTU11, CTU12, CTU13 => CTU1
+		 */
+		snprintf(name, CTU_NAME_SIZE, "%s.%d",
+			 CTU_NAME, i / 4);
+
+		clk = devm_clk_get(dev, name);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+
+		ctu->info = &info->ctu_info[i];
+
+		ret = rsnd_mod_init(priv, &ctu->mod, &rsnd_ctu_ops,
+				    clk, RSND_MOD_CTU, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void rsnd_ctu_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv)
+{
+	struct rsnd_ctu *ctu;
+	int i;
+
+	for_each_rsnd_ctu(ctu, priv, i) {
+		rsnd_mod_quit(&ctu->mod);
+	}
+}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index d306e29..bfbb8a5 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -27,6 +27,15 @@
 	int dmapp_num;
 };
 
+struct rsnd_dma_ops {
+	char *name;
+	void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+	void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+	int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
+		    struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
+	void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+};
+
 #define rsnd_priv_to_dmac(p)	((struct rsnd_dma_ctrl *)(p)->dma)
 
 /*
@@ -168,7 +177,7 @@
 		dma_cap_set(DMA_SLAVE, mask);
 
 		dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
-						  (void *)id);
+						  (void *)(uintptr_t)id);
 	}
 	if (IS_ERR_OR_NULL(dmaen->chan)) {
 		dmaen->chan = NULL;
@@ -182,7 +191,8 @@
 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
-	dev_dbg(dev, "dma : %pad -> %pad\n",
+	dev_dbg(dev, "%s %pad -> %pad\n",
+		dma->ops->name,
 		&cfg.src_addr, &cfg.dst_addr);
 
 	ret = dmaengine_slave_config(dmaen->chan, &cfg);
@@ -215,6 +225,7 @@
 }
 
 static struct rsnd_dma_ops rsnd_dmaen_ops = {
+	.name	= "audmac",
 	.start	= rsnd_dmaen_start,
 	.stop	= rsnd_dmaen_stop,
 	.init	= rsnd_dmaen_init,
@@ -360,6 +371,7 @@
 }
 
 static struct rsnd_dma_ops rsnd_dmapp_ops = {
+	.name	= "audmac-pp",
 	.start	= rsnd_dmapp_start,
 	.stop	= rsnd_dmapp_stop,
 	.init	= rsnd_dmapp_init,
@@ -414,7 +426,9 @@
 	phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
 	int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
 	int use_src = !!rsnd_io_to_mod_src(io);
-	int use_dvc = !!rsnd_io_to_mod_dvc(io);
+	int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
+		      !!rsnd_io_to_mod_mix(io) ||
+		      !!rsnd_io_to_mod_ctu(io);
 	int id = rsnd_mod_id(mod);
 	struct dma_addr {
 		dma_addr_t out_addr;
@@ -452,7 +466,7 @@
 	};
 
 	/* it shouldn't happen */
-	if (use_dvc && !use_src)
+	if (use_cmd && !use_src)
 		dev_err(dev, "DVC is selected without SRC\n");
 
 	/* use SSIU or SSI ? */
@@ -460,8 +474,8 @@
 		is_ssi++;
 
 	return (is_from) ?
-		dma_addrs[is_ssi][is_play][use_src + use_dvc].out_addr :
-		dma_addrs[is_ssi][is_play][use_src + use_dvc].in_addr;
+		dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
+		dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
 }
 
 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
@@ -482,7 +496,7 @@
 	return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
 }
 
-#define MOD_MAX 4 /* MEM/SSI/SRC/DVC */
+#define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
 static void rsnd_dma_of_path(struct rsnd_dma *dma,
 			     struct rsnd_dai_stream *io,
 			     int is_play,
@@ -492,55 +506,81 @@
 	struct rsnd_mod *this = rsnd_dma_to_mod(dma);
 	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
+	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
 	struct rsnd_mod *mod[MOD_MAX];
-	int i, index;
+	struct rsnd_mod *mod_start, *mod_end;
+	struct rsnd_priv *priv = rsnd_mod_to_priv(this);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	int nr, i;
 
+	if (!ssi)
+		return;
 
-	for (i = 0; i < MOD_MAX; i++)
+	nr = 0;
+	for (i = 0; i < MOD_MAX; i++) {
 		mod[i] = NULL;
-
-	/*
-	 * in play case...
-	 *
-	 * src -> dst
-	 *
-	 * mem -> SSI
-	 * mem -> SRC -> SSI
-	 * mem -> SRC -> DVC -> SSI
-	 */
-	mod[0] = NULL; /* for "mem" */
-	index = 1;
-	for (i = 1; i < MOD_MAX; i++) {
-		if (!src) {
-			mod[i] = ssi;
-		} else if (!dvc) {
-			mod[i] = src;
-			src = NULL;
-		} else {
-			if ((!is_play) && (this == src))
-				this = dvc;
-
-			mod[i] = (is_play) ? src : dvc;
-			i++;
-			mod[i] = (is_play) ? dvc : src;
-			src = NULL;
-			dvc = NULL;
-		}
-
-		if (mod[i] == this)
-			index = i;
-
-		if (mod[i] == ssi)
-			break;
+		nr += !!rsnd_io_to_mod(io, i);
 	}
 
-	if (is_play) {
-		*mod_from = mod[index - 1];
-		*mod_to   = mod[index];
+	/*
+	 * [S] -*-> [E]
+	 * [S] -*-> SRC -o-> [E]
+	 * [S] -*-> SRC -> DVC -o-> [E]
+	 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
+	 *
+	 * playback	[S] = mem
+	 *		[E] = SSI
+	 *
+	 * capture	[S] = SSI
+	 *		[E] = mem
+	 *
+	 * -*->		Audio DMAC
+	 * -o->		Audio DMAC peri peri
+	 */
+	mod_start	= (is_play) ? NULL : ssi;
+	mod_end		= (is_play) ? ssi  : NULL;
+
+	mod[0] = mod_start;
+	for (i = 1; i < nr; i++) {
+		if (src) {
+			mod[i] = src;
+			src = NULL;
+		} else if (ctu) {
+			mod[i] = ctu;
+			ctu = NULL;
+		} else if (mix) {
+			mod[i] = mix;
+			mix = NULL;
+		} else if (dvc) {
+			mod[i] = dvc;
+			dvc = NULL;
+		}
+	}
+	mod[i] = mod_end;
+
+	/*
+	 *		| SSI | SRC |
+	 * -------------+-----+-----+
+	 *  is_play	|  o  |  *  |
+	 * !is_play	|  *  |  o  |
+	 */
+	if ((this == ssi) == (is_play)) {
+		*mod_from	= mod[nr - 1];
+		*mod_to		= mod[nr];
 	} else {
-		*mod_from = mod[index];
-		*mod_to   = mod[index - 1];
+		*mod_from	= mod[0];
+		*mod_to		= mod[1];
+	}
+
+	dev_dbg(dev, "module connection (this is %s[%d])\n",
+		rsnd_mod_name(this), rsnd_mod_id(this));
+	for (i = 0; i <= nr; i++) {
+		dev_dbg(dev, "  %s[%d]%s\n",
+		       rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
+		       (mod[i] == *mod_from) ? " from" :
+		       (mod[i] == *mod_to)   ? " to" : "");
 	}
 }
 
@@ -568,10 +608,11 @@
 
 int rsnd_dma_init(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id)
 {
-	struct rsnd_mod *mod_from;
-	struct rsnd_mod *mod_to;
+	struct rsnd_mod *mod_from = NULL;
+	struct rsnd_mod *mod_to = NULL;
 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
 	int is_play = rsnd_io_is_play(io);
 
 	/*
@@ -598,6 +639,11 @@
 	if (rsnd_is_gen1(priv))
 		dma->ops = &rsnd_dmaen_ops;
 
+	dev_dbg(dev, "%s %s[%d] -> %s[%d]\n",
+		dma->ops->name,
+		rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
+		rsnd_mod_name(mod_to),   rsnd_mod_id(mod_to));
+
 	return dma->ops->init(io, dma, id, mod_from, mod_to);
 }
 
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 36fc020..5779638 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -24,6 +24,7 @@
 	struct rsnd_kctrl_cfg_s rdown;	/* Ramp Rate Down */
 };
 
+#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
 #define rsnd_dvc_of_node(priv) \
 	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
 
@@ -63,6 +64,19 @@
 	"0.125 dB/8192 steps",	 /* 10111 */
 };
 
+static void rsnd_dvc_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, DVC_SWRSR, 0);
+	rsnd_mod_write(mod, DVC_SWRSR, 1);
+}
+
+#define rsnd_dvc_initialize_lock(mod)	__rsnd_dvc_initialize_lock(mod, 1)
+#define rsnd_dvc_initialize_unlock(mod)	__rsnd_dvc_initialize_lock(mod, 0)
+static void __rsnd_dvc_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, DVC_DVUIR, enable);
+}
+
 static void rsnd_dvc_volume_update(struct rsnd_dai_stream *io,
 				   struct rsnd_mod *mod)
 {
@@ -135,49 +149,24 @@
 	return 0;
 }
 
-static int rsnd_dvc_init(struct rsnd_mod *dvc_mod,
+static int rsnd_dvc_init(struct rsnd_mod *mod,
 			 struct rsnd_dai_stream *io,
 			 struct rsnd_priv *priv)
 {
-	struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
-	struct device *dev = rsnd_priv_to_dev(priv);
-	int dvc_id = rsnd_mod_id(dvc_mod);
-	int src_id = rsnd_mod_id(src_mod);
-	u32 route[] = {
-		[0] = 0x30000,
-		[1] = 0x30001,
-		[2] = 0x40000,
-		[3] = 0x10000,
-		[4] = 0x20000,
-		[5] = 0x40100
-	};
+	rsnd_mod_hw_start(mod);
 
-	if (src_id >= ARRAY_SIZE(route)) {
-		dev_err(dev, "DVC%d isn't connected to SRC%d\n", dvc_id, src_id);
-		return -EINVAL;
-	}
+	rsnd_dvc_soft_reset(mod);
 
-	rsnd_mod_hw_start(dvc_mod);
+	rsnd_dvc_initialize_lock(mod);
 
-	/*
-	 * fixme
-	 * it doesn't support CTU/MIX
-	 */
-	rsnd_mod_write(dvc_mod, CMD_ROUTE_SLCT, route[src_id]);
+	rsnd_path_parse(priv, io);
 
-	rsnd_mod_write(dvc_mod, DVC_SWRSR, 0);
-	rsnd_mod_write(dvc_mod, DVC_SWRSR, 1);
-
-	rsnd_mod_write(dvc_mod, DVC_DVUIR, 1);
-
-	rsnd_mod_write(dvc_mod, DVC_ADINR, rsnd_get_adinr(dvc_mod, io));
+	rsnd_mod_write(mod, DVC_ADINR, rsnd_get_adinr_bit(mod, io));
 
 	/* ch0/ch1 Volume */
-	rsnd_dvc_volume_update(io, dvc_mod);
+	rsnd_dvc_volume_update(io, mod);
 
-	rsnd_mod_write(dvc_mod, DVC_DVUIR, 0);
-
-	rsnd_adg_set_cmd_timsel_gen2(dvc_mod, io);
+	rsnd_adg_set_cmd_timsel_gen2(mod, io);
 
 	return 0;
 }
@@ -195,6 +184,8 @@
 			  struct rsnd_dai_stream *io,
 			  struct rsnd_priv *priv)
 {
+	rsnd_dvc_initialize_unlock(mod);
+
 	rsnd_mod_write(mod, CMD_CTRL, 0x10);
 
 	return 0;
@@ -341,23 +332,21 @@
 	char name[RSND_DVC_NAME_SIZE];
 	int i, nr, ret;
 
-	rsnd_of_parse_dvc(pdev, of_data, priv);
-
-	nr = info->dvc_info_nr;
-	if (!nr)
-		return 0;
-
 	/* This driver doesn't support Gen1 at this point */
 	if (rsnd_is_gen1(priv)) {
 		dev_warn(dev, "CMD is not supported on Gen1\n");
 		return -EINVAL;
 	}
 
+	rsnd_of_parse_dvc(pdev, of_data, priv);
+
+	nr = info->dvc_info_nr;
+	if (!nr)
+		return 0;
+
 	dvc	= devm_kzalloc(dev, sizeof(*dvc) * nr, GFP_KERNEL);
-	if (!dvc) {
-		dev_err(dev, "CMD allocate failed\n");
+	if (!dvc)
 		return -ENOMEM;
-	}
 
 	priv->dvc_nr	= nr;
 	priv->dvc	= dvc;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 8c7dc51..f04d17b 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -103,6 +103,22 @@
 	regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
 }
 
+void rsnd_force_write(struct rsnd_priv *priv,
+		      struct rsnd_mod *mod,
+		      enum rsnd_reg reg, u32 data)
+{
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+	if (!rsnd_is_accessible_reg(priv, gen, reg))
+		return;
+
+	dev_dbg(dev, "w %s[%d] - %4d : %08x\n",
+		rsnd_mod_name(mod), rsnd_mod_id(mod), reg, data);
+
+	regmap_fields_force_write(gen->regs[reg], rsnd_mod_id(mod), data);
+}
+
 void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
 	       enum rsnd_reg reg, u32 mask, u32 data)
 {
@@ -200,12 +216,13 @@
 		/* FIXME: it needs SSI_MODE2/3 in the future */
 		RSND_GEN_M_REG(SSI_BUSIF_MODE,	0x0,	0x80),
 		RSND_GEN_M_REG(SSI_BUSIF_ADINR,	0x4,	0x80),
-		RSND_GEN_M_REG(BUSIF_DALIGN,	0x8,	0x80),
+		RSND_GEN_M_REG(SSI_BUSIF_DALIGN,0x8,	0x80),
 		RSND_GEN_M_REG(SSI_CTRL,	0x10,	0x80),
-		RSND_GEN_M_REG(INT_ENABLE,	0x18,	0x80),
+		RSND_GEN_M_REG(SSI_INT_ENABLE,	0x18,	0x80),
 	};
 	struct rsnd_regmap_field_conf conf_scu[] = {
 		RSND_GEN_M_REG(SRC_BUSIF_MODE,	0x0,	0x20),
+		RSND_GEN_M_REG(SRC_BUSIF_DALIGN,0x8,	0x20),
 		RSND_GEN_M_REG(SRC_ROUTE_MODE0,	0xc,	0x20),
 		RSND_GEN_M_REG(SRC_CTRL,	0x10,	0x20),
 		RSND_GEN_M_REG(SRC_INT_ENABLE0,	0x18,	0x20),
@@ -223,6 +240,18 @@
 		RSND_GEN_M_REG(SRC_SRCCR,	0x224,	0x40),
 		RSND_GEN_M_REG(SRC_BSDSR,	0x22c,	0x40),
 		RSND_GEN_M_REG(SRC_BSISR,	0x238,	0x40),
+		RSND_GEN_M_REG(CTU_CTUIR,	0x504,	0x100),
+		RSND_GEN_M_REG(CTU_ADINR,	0x508,	0x100),
+		RSND_GEN_M_REG(MIX_SWRSR,	0xd00,	0x40),
+		RSND_GEN_M_REG(MIX_MIXIR,	0xd04,	0x40),
+		RSND_GEN_M_REG(MIX_ADINR,	0xd08,	0x40),
+		RSND_GEN_M_REG(MIX_MIXMR,	0xd10,	0x40),
+		RSND_GEN_M_REG(MIX_MVPDR,	0xd14,	0x40),
+		RSND_GEN_M_REG(MIX_MDBAR,	0xd18,	0x40),
+		RSND_GEN_M_REG(MIX_MDBBR,	0xd1c,	0x40),
+		RSND_GEN_M_REG(MIX_MDBCR,	0xd20,	0x40),
+		RSND_GEN_M_REG(MIX_MDBDR,	0xd24,	0x40),
+		RSND_GEN_M_REG(MIX_MDBER,	0xd28,	0x40),
 		RSND_GEN_M_REG(DVC_SWRSR,	0xe00,	0x100),
 		RSND_GEN_M_REG(DVC_DVUIR,	0xe04,	0x100),
 		RSND_GEN_M_REG(DVC_ADINR,	0xe08,	0x100),
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
new file mode 100644
index 0000000..0d5c102
--- /dev/null
+++ b/sound/soc/sh/rcar/mix.c
@@ -0,0 +1,200 @@
+/*
+ * mix.c
+ *
+ * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+#define MIX_NAME_SIZE	16
+#define MIX_NAME "mix"
+
+struct rsnd_mix {
+	struct rsnd_mix_platform_info *info; /* rcar_snd.h */
+	struct rsnd_mod mod;
+};
+
+#define rsnd_mix_nr(priv) ((priv)->mix_nr)
+#define for_each_rsnd_mix(pos, priv, i)					\
+	for ((i) = 0;							\
+	     ((i) < rsnd_mix_nr(priv)) &&				\
+		     ((pos) = (struct rsnd_mix *)(priv)->mix + i);	\
+	     i++)
+
+
+static void rsnd_mix_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, MIX_SWRSR, 0);
+	rsnd_mod_write(mod, MIX_SWRSR, 1);
+}
+
+#define rsnd_mix_initialize_lock(mod)	__rsnd_mix_initialize_lock(mod, 1)
+#define rsnd_mix_initialize_unlock(mod)	__rsnd_mix_initialize_lock(mod, 0)
+static void __rsnd_mix_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, MIX_MIXIR, enable);
+}
+
+static void rsnd_mix_volume_update(struct rsnd_dai_stream *io,
+				  struct rsnd_mod *mod)
+{
+
+	/* Disable MIX dB setting */
+	rsnd_mod_write(mod, MIX_MDBER, 0);
+
+	rsnd_mod_write(mod, MIX_MDBAR, 0);
+	rsnd_mod_write(mod, MIX_MDBBR, 0);
+	rsnd_mod_write(mod, MIX_MDBCR, 0);
+	rsnd_mod_write(mod, MIX_MDBDR, 0);
+
+	/* Enable MIX dB setting */
+	rsnd_mod_write(mod, MIX_MDBER, 1);
+}
+
+static int rsnd_mix_init(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_start(mod);
+
+	rsnd_mix_soft_reset(mod);
+
+	rsnd_mix_initialize_lock(mod);
+
+	rsnd_mod_write(mod, MIX_ADINR, rsnd_get_adinr_chan(mod, io));
+
+	rsnd_path_parse(priv, io);
+
+	/* volume step */
+	rsnd_mod_write(mod, MIX_MIXMR, 0);
+	rsnd_mod_write(mod, MIX_MVPDR, 0);
+
+	rsnd_mix_volume_update(io, mod);
+
+	rsnd_mix_initialize_unlock(mod);
+
+	return 0;
+}
+
+static int rsnd_mix_quit(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_stop(mod);
+
+	return 0;
+}
+
+static struct rsnd_mod_ops rsnd_mix_ops = {
+	.name		= MIX_NAME,
+	.init		= rsnd_mix_init,
+	.quit		= rsnd_mix_quit,
+};
+
+struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id)
+{
+	if (WARN_ON(id < 0 || id >= rsnd_mix_nr(priv)))
+		id = 0;
+
+	return &((struct rsnd_mix *)(priv->mix) + id)->mod;
+}
+
+static void rsnd_of_parse_mix(struct platform_device *pdev,
+			      const struct rsnd_of_data *of_data,
+			      struct rsnd_priv *priv)
+{
+	struct device_node *node;
+	struct rsnd_mix_platform_info *mix_info;
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = &pdev->dev;
+	int nr;
+
+	if (!of_data)
+		return;
+
+	node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
+	if (!node)
+		return;
+
+	nr = of_get_child_count(node);
+	if (!nr)
+		goto rsnd_of_parse_mix_end;
+
+	mix_info = devm_kzalloc(dev,
+				sizeof(struct rsnd_mix_platform_info) * nr,
+				GFP_KERNEL);
+	if (!mix_info) {
+		dev_err(dev, "mix info allocation error\n");
+		goto rsnd_of_parse_mix_end;
+	}
+
+	info->mix_info		= mix_info;
+	info->mix_info_nr	= nr;
+
+rsnd_of_parse_mix_end:
+	of_node_put(node);
+
+}
+
+int rsnd_mix_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv)
+{
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_mix *mix;
+	struct clk *clk;
+	char name[MIX_NAME_SIZE];
+	int i, nr, ret;
+
+	/* This driver doesn't support Gen1 at this point */
+	if (rsnd_is_gen1(priv)) {
+		dev_warn(dev, "MIX is not supported on Gen1\n");
+		return -EINVAL;
+	}
+
+	rsnd_of_parse_mix(pdev, of_data, priv);
+
+	nr = info->mix_info_nr;
+	if (!nr)
+		return 0;
+
+	mix	= devm_kzalloc(dev, sizeof(*mix) * nr, GFP_KERNEL);
+	if (!mix)
+		return -ENOMEM;
+
+	priv->mix_nr	= nr;
+	priv->mix	= mix;
+
+	for_each_rsnd_mix(mix, priv, i) {
+		snprintf(name, MIX_NAME_SIZE, "%s.%d",
+			 MIX_NAME, i);
+
+		clk = devm_clk_get(dev, name);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+
+		mix->info = &info->mix_info[i];
+
+		ret = rsnd_mod_init(priv, &mix->mod, &rsnd_mix_ops,
+				    clk, RSND_MOD_MIX, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void rsnd_mix_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv)
+{
+	struct rsnd_mix *mix;
+	int i;
+
+	for_each_rsnd_mix(mix, priv, i) {
+		rsnd_mod_quit(&mix->mod);
+	}
+}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 09fcc54..7a0e52b 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -47,6 +47,18 @@
 	RSND_REG_SCU_SYS_STATUS0,
 	RSND_REG_SCU_SYS_INT_EN0,
 	RSND_REG_CMD_ROUTE_SLCT,
+	RSND_REG_CTU_CTUIR,
+	RSND_REG_CTU_ADINR,
+	RSND_REG_MIX_SWRSR,
+	RSND_REG_MIX_MIXIR,
+	RSND_REG_MIX_ADINR,
+	RSND_REG_MIX_MIXMR,
+	RSND_REG_MIX_MVPDR,
+	RSND_REG_MIX_MDBAR,
+	RSND_REG_MIX_MDBBR,
+	RSND_REG_MIX_MDBCR,
+	RSND_REG_MIX_MDBDR,
+	RSND_REG_MIX_MDBER,
 	RSND_REG_DVC_SWRSR,
 	RSND_REG_DVC_DVUIR,
 	RSND_REG_DVC_ADINR,
@@ -99,6 +111,7 @@
 	RSND_REG_SHARE26,
 	RSND_REG_SHARE27,
 	RSND_REG_SHARE28,
+	RSND_REG_SHARE29,
 
 	RSND_REG_MAX,
 };
@@ -119,7 +132,7 @@
 #define RSND_REG_SSI_CTRL		RSND_REG_SHARE02
 #define RSND_REG_SSI_BUSIF_MODE		RSND_REG_SHARE03
 #define RSND_REG_SSI_BUSIF_ADINR	RSND_REG_SHARE04
-#define RSND_REG_INT_ENABLE		RSND_REG_SHARE05
+#define RSND_REG_SSI_INT_ENABLE		RSND_REG_SHARE05
 #define RSND_REG_SRC_BSDSR		RSND_REG_SHARE06
 #define RSND_REG_SRC_BSISR		RSND_REG_SHARE07
 #define RSND_REG_DIV_EN			RSND_REG_SHARE08
@@ -136,13 +149,14 @@
 #define RSND_REG_AUDIO_CLK_SEL2		RSND_REG_SHARE19
 #define RSND_REG_CMD_CTRL		RSND_REG_SHARE20
 #define RSND_REG_CMDOUT_TIMSEL		RSND_REG_SHARE21
-#define RSND_REG_BUSIF_DALIGN		RSND_REG_SHARE22
+#define RSND_REG_SSI_BUSIF_DALIGN	RSND_REG_SHARE22
 #define RSND_REG_DVC_VRCTR		RSND_REG_SHARE23
 #define RSND_REG_DVC_VRPDR		RSND_REG_SHARE24
 #define RSND_REG_DVC_VRDBR		RSND_REG_SHARE25
 #define RSND_REG_SCU_SYS_STATUS1	RSND_REG_SHARE26
 #define RSND_REG_SCU_SYS_INT_EN1	RSND_REG_SHARE27
 #define RSND_REG_SRC_INT_ENABLE0	RSND_REG_SHARE28
+#define RSND_REG_SRC_BUSIF_DALIGN	RSND_REG_SHARE29
 
 struct rsnd_of_data;
 struct rsnd_priv;
@@ -157,27 +171,28 @@
 	rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r)
 #define rsnd_mod_write(m, r, d) \
 	rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
+#define rsnd_mod_force_write(m, r, d) \
+	rsnd_force_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
 #define rsnd_mod_bset(m, r, s, d) \
 	rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
 
 u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
 void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
 		enum rsnd_reg reg, u32 data);
+void rsnd_force_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
+		enum rsnd_reg reg, u32 data);
 void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
 		    u32 mask, u32 data);
-u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+void rsnd_path_parse(struct rsnd_priv *priv,
+		     struct rsnd_dai_stream *io);
 
 /*
  *	R-Car DMA
  */
 struct rsnd_dma;
-struct rsnd_dma_ops {
-	void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-	void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-	int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
-		    struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
-	void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-};
 
 struct rsnd_dmaen {
 	struct dma_chan		*chan;
@@ -217,6 +232,8 @@
  */
 enum rsnd_mod_type {
 	RSND_MOD_DVC = 0,
+	RSND_MOD_MIX,
+	RSND_MOD_CTU,
 	RSND_MOD_SRC,
 	RSND_MOD_SSI,
 	RSND_MOD_MAX,
@@ -312,7 +329,7 @@
 
 #define rsnd_mod_to_priv(mod) ((mod)->priv)
 #define rsnd_mod_to_dma(mod) (&(mod)->dma)
-#define rsnd_mod_id(mod) ((mod)->id)
+#define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1)
 #define rsnd_mod_hw_start(mod)	clk_enable((mod)->clk)
 #define rsnd_mod_hw_stop(mod)	clk_disable((mod)->clk)
 
@@ -345,9 +362,12 @@
 	int byte_per_period;
 	int next_period_byte;
 };
-#define rsnd_io_to_mod_ssi(io)	((io)->mod[RSND_MOD_SSI])
-#define rsnd_io_to_mod_src(io)	((io)->mod[RSND_MOD_SRC])
-#define rsnd_io_to_mod_dvc(io)	((io)->mod[RSND_MOD_DVC])
+#define rsnd_io_to_mod(io, i)	((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
+#define rsnd_io_to_mod_ssi(io)	rsnd_io_to_mod((io), RSND_MOD_SSI)
+#define rsnd_io_to_mod_src(io)	rsnd_io_to_mod((io), RSND_MOD_SRC)
+#define rsnd_io_to_mod_ctu(io)	rsnd_io_to_mod((io), RSND_MOD_CTU)
+#define rsnd_io_to_mod_mix(io)	rsnd_io_to_mod((io), RSND_MOD_MIX)
+#define rsnd_io_to_mod_dvc(io)	rsnd_io_to_mod((io), RSND_MOD_DVC)
 #define rsnd_io_to_rdai(io)	((io)->rdai)
 #define rsnd_io_to_priv(io)	(rsnd_rdai_to_priv(rsnd_io_to_rdai(io)))
 #define rsnd_io_is_play(io)	(&rsnd_io_to_rdai(io)->playback == io)
@@ -437,12 +457,6 @@
 	void *gen;
 
 	/*
-	 * below value will be filled on rsnd_src_probe()
-	 */
-	void *src;
-	int src_nr;
-
-	/*
 	 * below value will be filled on rsnd_adg_probe()
 	 */
 	void *adg;
@@ -459,6 +473,24 @@
 	int ssi_nr;
 
 	/*
+	 * below value will be filled on rsnd_src_probe()
+	 */
+	void *src;
+	int src_nr;
+
+	/*
+	 * below value will be filled on rsnd_ctu_probe()
+	 */
+	void *ctu;
+	int ctu_nr;
+
+	/*
+	 * below value will be filled on rsnd_mix_probe()
+	 */
+	void *mix;
+	int mix_nr;
+
+	/*
 	 * below value will be filled on rsnd_dvc_probe()
 	 */
 	void *dvc;
@@ -531,6 +563,19 @@
 		     u32 max);
 
 /*
+ *	R-Car SSI
+ */
+int rsnd_ssi_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv);
+void rsnd_ssi_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
+int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
+int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
+int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
+
+/*
  *	R-Car SRC
  */
 int rsnd_src_probe(struct platform_device *pdev,
@@ -550,20 +595,27 @@
 int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod);
 int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod);
 
-#define rsnd_src_nr(priv) ((priv)->src_nr)
-
 /*
- *	R-Car SSI
+ *	R-Car CTU
  */
-int rsnd_ssi_probe(struct platform_device *pdev,
+int rsnd_ctu_probe(struct platform_device *pdev,
 		   const struct rsnd_of_data *of_data,
 		   struct rsnd_priv *priv);
-void rsnd_ssi_remove(struct platform_device *pdev,
+
+void rsnd_ctu_remove(struct platform_device *pdev,
 		     struct rsnd_priv *priv);
-struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
-int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
-int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
-int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
+struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
+
+/*
+ *	R-Car MIX
+ */
+int rsnd_mix_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv);
+
+void rsnd_mix_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
 
 /*
  *	R-Car DVC
@@ -575,7 +627,4 @@
 		     struct rsnd_priv *priv);
 struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id);
 
-#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
-
-
 #endif
diff --git a/sound/soc/sh/rcar/rsrc-card.c b/sound/soc/sh/rcar/rsrc-card.c
index 84e9357..d61db9c 100644
--- a/sound/soc/sh/rcar/rsrc-card.c
+++ b/sound/soc/sh/rcar/rsrc-card.c
@@ -41,6 +41,7 @@
 static const struct of_device_id rsrc_card_of_match[] = {
 	{ .compatible = "renesas,rsrc-card,lager",	.data = &routes_of_ssi0_ak4642 },
 	{ .compatible = "renesas,rsrc-card,koelsch",	.data = &routes_of_ssi0_ak4642 },
+	{ .compatible = "renesas,rsrc-card", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
@@ -242,8 +243,15 @@
 		snd_soc_of_get_dai_name(np, &dai_link->codec_dai_name);
 
 		/* additional name prefix */
-		priv->codec_conf.of_node	= dai_link->codec_of_node;
-		priv->codec_conf.name_prefix	= of_data->prefix;
+		if (of_data) {
+			priv->codec_conf.of_node = dai_link->codec_of_node;
+			priv->codec_conf.name_prefix = of_data->prefix;
+		} else {
+			snd_soc_of_parse_audio_prefix(&priv->snd_card,
+						      &priv->codec_conf,
+						      dai_link->codec_of_node,
+						      "audio-prefix");
+		}
 
 		/* set dai_name */
 		snprintf(dai_props->dai_name, DAI_NAME_NUM, "be.%s",
@@ -361,8 +369,14 @@
 	priv->snd_card.num_links		= num;
 	priv->snd_card.codec_conf		= &priv->codec_conf;
 	priv->snd_card.num_configs		= 1;
-	priv->snd_card.of_dapm_routes		= of_data->routes;
-	priv->snd_card.num_of_dapm_routes	= of_data->num_routes;
+
+	if (of_data) {
+		priv->snd_card.of_dapm_routes		= of_data->routes;
+		priv->snd_card.num_of_dapm_routes	= of_data->num_routes;
+	} else {
+		snd_soc_of_parse_audio_routing(&priv->snd_card,
+					       "audio-routing");
+	}
 
 	/* Parse the card name from DT */
 	snd_soc_of_parse_card_name(&priv->snd_card, "card-name");
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index c61c171..89a18e1 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -30,6 +30,7 @@
 
 #define RSND_SRC_NAME_SIZE 16
 
+#define rsnd_src_nr(priv) ((priv)->src_nr)
 #define rsnd_enable_sync_convert(src) ((src)->sen.val)
 #define rsnd_src_of_node(priv) \
 	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src")
@@ -117,6 +118,20 @@
 /*
  *		Gen1/Gen2 common functions
  */
+static void rsnd_src_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, SRC_SWRSR, 0);
+	rsnd_mod_write(mod, SRC_SWRSR, 1);
+}
+
+
+#define rsnd_src_initialize_lock(mod)	__rsnd_src_initialize_lock(mod, 1)
+#define rsnd_src_initialize_unlock(mod)	__rsnd_src_initialize_lock(mod, 0)
+static void __rsnd_src_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, SRC_SRCIR, enable);
+}
+
 static struct dma_chan *rsnd_src_dma_req(struct rsnd_dai_stream *io,
 					 struct rsnd_mod *mod)
 {
@@ -133,7 +148,6 @@
 			int use_busif)
 {
 	struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
-	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 	int ssi_id = rsnd_mod_id(ssi_mod);
 
 	/*
@@ -170,27 +184,14 @@
 	 * DMA settings for SSIU
 	 */
 	if (use_busif) {
-		u32 val = 0x76543210;
-		u32 mask = ~0;
+		u32 val = rsnd_get_dalign(ssi_mod, io);
 
 		rsnd_mod_write(ssi_mod, SSI_BUSIF_ADINR,
-			       rsnd_get_adinr(ssi_mod, io));
+			       rsnd_get_adinr_bit(ssi_mod, io));
 		rsnd_mod_write(ssi_mod, SSI_BUSIF_MODE,  1);
 		rsnd_mod_write(ssi_mod, SSI_CTRL, 0x1);
 
-		mask <<= runtime->channels * 4;
-		val = val & mask;
-
-		switch (runtime->sample_bits) {
-		case 16:
-			val |= 0x67452301 & ~mask;
-			break;
-		case 32:
-			val |= 0x76543210 & ~mask;
-			break;
-		}
-		rsnd_mod_write(ssi_mod, BUSIF_DALIGN, val);
-
+		rsnd_mod_write(ssi_mod, SSI_BUSIF_DALIGN, val);
 	}
 
 	return 0;
@@ -215,10 +216,9 @@
 		return 0;
 
 	/* enable SSI interrupt if Gen2 */
-	if (rsnd_ssi_is_dma_mode(ssi_mod))
-		rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0e000000);
-	else
-		rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0f000000);
+	rsnd_mod_write(ssi_mod, SSI_INT_ENABLE,
+		       rsnd_ssi_is_dma_mode(ssi_mod) ?
+		       0x0e000000 : 0x0f000000);
 
 	return 0;
 }
@@ -231,7 +231,7 @@
 		return 0;
 
 	/* disable SSI interrupt if Gen2 */
-	rsnd_mod_write(ssi_mod, INT_ENABLE, 0x00000000);
+	rsnd_mod_write(ssi_mod, SSI_INT_ENABLE, 0x00000000);
 
 	return 0;
 }
@@ -294,12 +294,8 @@
 	if (convert_rate)
 		fsrate = 0x0400000 / convert_rate * runtime->rate;
 
-	/* set/clear soft reset */
-	rsnd_mod_write(mod, SRC_SWRSR, 0);
-	rsnd_mod_write(mod, SRC_SWRSR, 1);
-
 	/* Set channel number and output bit length */
-	rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr(mod, io));
+	rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr_bit(mod, io));
 
 	/* Enable the initial value of IFS */
 	if (fsrate) {
@@ -358,17 +354,15 @@
 
 	rsnd_mod_hw_start(mod);
 
+	rsnd_src_soft_reset(mod);
+
+	rsnd_src_initialize_lock(mod);
+
 	src->err = 0;
 
 	/* reset sync convert_rate */
 	src->sync.val = 0;
 
-	/*
-	 * Initialize the operation of the SRC internal circuits
-	 * see rsnd_src_start()
-	 */
-	rsnd_mod_write(mod, SRC_SRCIR, 1);
-
 	return 0;
 }
 
@@ -395,11 +389,7 @@
 
 static int rsnd_src_start(struct rsnd_mod *mod)
 {
-	/*
-	 * Cancel the initialization and operate the SRC function
-	 * see rsnd_src_init()
-	 */
-	rsnd_mod_write(mod, SRC_SRCIR, 0);
+	rsnd_src_initialize_unlock(mod);
 
 	return 0;
 }
@@ -617,6 +607,14 @@
 		int_val = 0;
 	}
 
+	/*
+	 * WORKAROUND
+	 *
+	 * ignore over flow error when rsnd_enable_sync_convert()
+	 */
+	if (rsnd_enable_sync_convert(src))
+		sys_int_val = sys_int_val & 0xffff;
+
 	rsnd_mod_write(mod, SRC_INT_ENABLE0, int_val);
 	rsnd_mod_bset(mod, SCU_SYS_INT_EN0, sys_int_mask, sys_int_val);
 	rsnd_mod_bset(mod, SCU_SYS_INT_EN1, sys_int_mask, sys_int_val);
@@ -632,11 +630,22 @@
 
 static bool rsnd_src_error_record_gen2(struct rsnd_mod *mod)
 {
-	u32 val = OUF_SRC(rsnd_mod_id(mod));
+	struct rsnd_src *src = rsnd_mod_to_src(mod);
+	u32 val0, val1;
 	bool ret = false;
 
-	if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val) ||
-	    (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val)) {
+	val0 = val1 = OUF_SRC(rsnd_mod_id(mod));
+
+	/*
+	 * WORKAROUND
+	 *
+	 * ignore over flow error when rsnd_enable_sync_convert()
+	 */
+	if (rsnd_enable_sync_convert(src))
+		val0 = val0 & 0xffff;
+
+	if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val0) ||
+	    (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val1)) {
 		struct rsnd_src *src = rsnd_mod_to_src(mod);
 
 		src->err++;
@@ -652,7 +661,20 @@
 static int _rsnd_src_start_gen2(struct rsnd_mod *mod,
 				struct rsnd_dai_stream *io)
 {
-	u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11;
+	struct rsnd_src *src = rsnd_mod_to_src(mod);
+	u32 val;
+
+	val = rsnd_get_dalign(mod, io);
+
+	rsnd_mod_write(mod, SRC_BUSIF_DALIGN, val);
+
+	/*
+	 * WORKAROUND
+	 *
+	 * Enable SRC output if you want to use sync convert together with DVC
+	 */
+	val = (rsnd_io_to_mod_dvc(io) && !rsnd_enable_sync_convert(src)) ?
+		0x01 : 0x11;
 
 	rsnd_mod_write(mod, SRC_CTRL, val);
 
@@ -922,13 +944,6 @@
 		return 0;
 
 	/*
-	 * We can't use SRC sync convert
-	 * if it has DVC
-	 */
-	if (rsnd_io_to_mod_dvc(io))
-		return 0;
-
-	/*
 	 * enable sync convert
 	 */
 	ret = rsnd_kctrl_new_s(mod, io, rtd,
@@ -1047,10 +1062,8 @@
 		return 0;
 
 	src	= devm_kzalloc(dev, sizeof(*src) * nr, GFP_KERNEL);
-	if (!src) {
-		dev_err(dev, "SRC allocate failed\n");
+	if (!src)
 		return -ENOMEM;
-	}
 
 	priv->src_nr	= nr;
 	priv->src	= src;
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 2fbe59f..d45b9a7 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -770,10 +770,8 @@
 	 */
 	nr	= info->ssi_info_nr;
 	ssi	= devm_kzalloc(dev, sizeof(*ssi) * nr, GFP_KERNEL);
-	if (!ssi) {
-		dev_err(dev, "SSI allocate failed\n");
+	if (!ssi)
 		return -ENOMEM;
-	}
 
 	priv->ssi	= ssi;
 	priv->ssi_nr	= nr;
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index ab13146..89ed1b1 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -385,14 +385,9 @@
 
 static int sh4_soc_dai_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &sh4_ssi_component,
-					  sh4_ssi_dai, ARRAY_SIZE(sh4_ssi_dai));
-}
-
-static int sh4_soc_dai_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &sh4_ssi_component,
+					       sh4_ssi_dai,
+					       ARRAY_SIZE(sh4_ssi_dai));
 }
 
 static struct platform_driver sh4_ssi_driver = {
@@ -401,7 +396,6 @@
 	},
 
 	.probe = sh4_soc_dai_probe,
-	.remove = sh4_soc_dai_remove,
 };
 
 module_platform_driver(sh4_ssi_driver);
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 08d7259..d40efc9 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -85,10 +85,19 @@
 /**
  * snd_soc_new_ac97_codec - initailise AC97 device
  * @codec: audio codec
+ * @id: The expected device ID
+ * @id_mask: Mask that is applied to the device ID before comparing with @id
  *
  * Initialises AC97 codec resources for use by ad-hoc devices only.
+ *
+ * If @id is not 0 this function will reset the device, then read the ID from
+ * the device and check if it matches the expected ID. If it doesn't match an
+ * error will be returned and device will not be registered.
+ *
+ * Returns: A PTR_ERR() on failure or a valid snd_ac97 struct on success.
  */
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+	unsigned int id, unsigned int id_mask)
 {
 	struct snd_ac97 *ac97;
 	int ret;
@@ -97,13 +106,24 @@
 	if (IS_ERR(ac97))
 		return ac97;
 
-	ret = device_add(&ac97->dev);
-	if (ret) {
-		put_device(&ac97->dev);
-		return ERR_PTR(ret);
+	if (id) {
+		ret = snd_ac97_reset(ac97, false, id, id_mask);
+		if (ret < 0) {
+			dev_err(codec->dev, "Failed to reset AC97 device: %d\n",
+				ret);
+			goto err_put_device;
+		}
 	}
 
+	ret = device_add(&ac97->dev);
+	if (ret)
+		goto err_put_device;
+
 	return ac97;
+
+err_put_device:
+	put_device(&ac97->dev);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0e1e69c..6173d15 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -654,10 +654,12 @@
 
 	/* suspend all CODECs */
 	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+		struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
 		/* If there are paths active then the CODEC will be held with
 		 * bias _ON and should not be suspended. */
 		if (!codec->suspended) {
-			switch (codec->dapm.bias_level) {
+			switch (snd_soc_dapm_get_bias_level(dapm)) {
 			case SND_SOC_BIAS_STANDBY:
 				/*
 				 * If the CODEC is capable of idle
@@ -665,7 +667,7 @@
 				 * means it's doing something,
 				 * otherwise fall through.
 				 */
-				if (codec->dapm.idle_bias_off) {
+				if (dapm->idle_bias_off) {
 					dev_dbg(codec->dev,
 						"ASoC: idle_bias_off CODEC on over suspend\n");
 					break;
@@ -978,7 +980,7 @@
 
 static void soc_remove_component(struct snd_soc_component *component)
 {
-	if (!component->probed)
+	if (!component->card)
 		return;
 
 	/* This is a HACK and will be removed soon */
@@ -991,7 +993,7 @@
 	snd_soc_dapm_free(snd_soc_component_get_dapm(component));
 
 	soc_cleanup_component_debugfs(component);
-	component->probed = 0;
+	component->card = NULL;
 	module_put(component->dev->driver->owner);
 }
 
@@ -1102,16 +1104,26 @@
 	struct snd_soc_dai *dai;
 	int ret;
 
-	if (component->probed)
+	if (!strcmp(component->name, "snd-soc-dummy"))
 		return 0;
 
+	if (component->card) {
+		if (component->card != card) {
+			dev_err(component->dev,
+				"Trying to bind component to card \"%s\" but is already bound to card \"%s\"\n",
+				card->name, component->card->name);
+			return -ENODEV;
+		}
+		return 0;
+	}
+
+	if (!try_module_get(component->dev->driver->owner))
+		return -ENODEV;
+
 	component->card = card;
 	dapm->card = card;
 	soc_set_name_prefix(card, component);
 
-	if (!try_module_get(component->dev->driver->owner))
-		return -ENODEV;
-
 	soc_init_component_debugfs(component);
 
 	if (component->dapm_widgets) {
@@ -1155,7 +1167,6 @@
 		snd_soc_dapm_add_routes(dapm, component->dapm_routes,
 					component->num_dapm_routes);
 
-	component->probed = 1;
 	list_add(&dapm->list, &card->dapm_list);
 
 	/* This is a HACK and will be removed soon */
@@ -1166,6 +1177,7 @@
 
 err_probe:
 	soc_cleanup_component_debugfs(component);
+	component->card = NULL;
 	module_put(component->dev->driver->owner);
 
 	return ret;
@@ -1449,7 +1461,7 @@
 		rtd->dev_registered = 0;
 	}
 
-	if (component && component->probed)
+	if (component)
 		soc_remove_component(component);
 }
 
@@ -2128,7 +2140,7 @@
 /**
  * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio.
  * @dai: DAI
- * @ratio Ratio of BCLK to Sample rate.
+ * @ratio: Ratio of BCLK to Sample rate.
  *
  * Configures the DAI for a preset BCLK to sample rate ratio.
  */
@@ -2652,10 +2664,7 @@
 	component->probe = component->driver->probe;
 	component->remove = component->driver->remove;
 
-	if (!component->dapm_ptr)
-		component->dapm_ptr = &component->dapm;
-
-	dapm = component->dapm_ptr;
+	dapm = &component->dapm;
 	dapm->dev = dev;
 	dapm->component = component;
 	dapm->bias_level = SND_SOC_BIAS_OFF;
@@ -2799,6 +2808,7 @@
 /**
  * snd_soc_unregister_component - Unregister a component from the ASoC core
  *
+ * @dev: The device to unregister
  */
 void snd_soc_unregister_component(struct device *dev)
 {
@@ -2839,7 +2849,7 @@
  * snd_soc_add_platform - Add a platform to the ASoC core
  * @dev: The parent device for the platform
  * @platform: The platform to add
- * @platform_driver: The driver for the platform
+ * @platform_drv: The driver for the platform
  */
 int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
 		const struct snd_soc_platform_driver *platform_drv)
@@ -2878,7 +2888,8 @@
 /**
  * snd_soc_register_platform - Register a platform with the ASoC core
  *
- * @platform: platform to register
+ * @dev: The device for the platform
+ * @platform_drv: The driver for the platform
  */
 int snd_soc_register_platform(struct device *dev,
 		const struct snd_soc_platform_driver *platform_drv)
@@ -2939,7 +2950,7 @@
 /**
  * snd_soc_unregister_platform - Unregister a platform from the ASoC core
  *
- * @platform: platform to unregister
+ * @dev: platform to unregister
  */
 void snd_soc_unregister_platform(struct device *dev)
 {
@@ -3030,13 +3041,17 @@
 /**
  * snd_soc_register_codec - Register a codec with the ASoC core
  *
- * @codec: codec to register
+ * @dev: The parent device for this codec
+ * @codec_drv: Codec driver
+ * @dai_drv: The associated DAI driver
+ * @num_dai: Number of DAIs
  */
 int snd_soc_register_codec(struct device *dev,
 			   const struct snd_soc_codec_driver *codec_drv,
 			   struct snd_soc_dai_driver *dai_drv,
 			   int num_dai)
 {
+	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_codec *codec;
 	struct snd_soc_dai *dai;
 	int ret, i;
@@ -3047,7 +3062,6 @@
 	if (codec == NULL)
 		return -ENOMEM;
 
-	codec->component.dapm_ptr = &codec->dapm;
 	codec->component.codec = codec;
 
 	ret = snd_soc_component_initialize(&codec->component,
@@ -3077,12 +3091,14 @@
 	if (codec_drv->read)
 		codec->component.read = snd_soc_codec_drv_read;
 	codec->component.ignore_pmdown_time = codec_drv->ignore_pmdown_time;
-	codec->dapm.idle_bias_off = codec_drv->idle_bias_off;
-	codec->dapm.suspend_bias_off = codec_drv->suspend_bias_off;
+
+	dapm = snd_soc_codec_get_dapm(codec);
+	dapm->idle_bias_off = codec_drv->idle_bias_off;
+	dapm->suspend_bias_off = codec_drv->suspend_bias_off;
 	if (codec_drv->seq_notifier)
-		codec->dapm.seq_notifier = codec_drv->seq_notifier;
+		dapm->seq_notifier = codec_drv->seq_notifier;
 	if (codec_drv->set_bias_level)
-		codec->dapm.set_bias_level = snd_soc_codec_set_bias_level;
+		dapm->set_bias_level = snd_soc_codec_set_bias_level;
 	codec->dev = dev;
 	codec->driver = codec_drv;
 	codec->component.val_bytes = codec_drv->reg_word_size;
@@ -3129,7 +3145,7 @@
 /**
  * snd_soc_unregister_codec - Unregister a codec from the ASoC core
  *
- * @codec: codec to unregister
+ * @dev: codec to unregister
  */
 void snd_soc_unregister_codec(struct device *dev)
 {
@@ -3304,6 +3320,26 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot);
 
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+				   struct snd_soc_codec_conf *codec_conf,
+				   struct device_node *of_node,
+				   const char *propname)
+{
+	struct device_node *np = card->dev->of_node;
+	const char *str;
+	int ret;
+
+	ret = of_property_read_string(np, propname, &str);
+	if (ret < 0) {
+		/* no prefix is not error */
+		return;
+	}
+
+	codec_conf->of_node	= of_node;
+	codec_conf->name_prefix	= str;
+}
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix);
+
 int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
 				   const char *propname)
 {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index e0de807..f4bf21a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -47,6 +47,13 @@
 
 #define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++;
 
+#define SND_SOC_DAPM_DIR_REVERSE(x) ((x == SND_SOC_DAPM_DIR_IN) ? \
+	SND_SOC_DAPM_DIR_OUT : SND_SOC_DAPM_DIR_IN)
+
+#define snd_soc_dapm_for_each_direction(dir) \
+	for ((dir) = SND_SOC_DAPM_DIR_IN; (dir) <= SND_SOC_DAPM_DIR_OUT; \
+		(dir)++)
+
 static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
 	const char *control,
@@ -167,45 +174,59 @@
 }
 
 /*
- * dapm_widget_invalidate_input_paths() - Invalidate the cached number of input
- *  paths
- * @w: The widget for which to invalidate the cached number of input paths
- *
- * The function resets the cached number of inputs for the specified widget and
- * all widgets that can be reached via outgoing paths from the widget.
- *
- * This function must be called if the number of input paths for a widget might
- * have changed. E.g. if the source state of a widget changes or a path is added
- * or activated with the widget as the sink.
+ * Common implementation for dapm_widget_invalidate_input_paths() and
+ * dapm_widget_invalidate_output_paths(). The function is inlined since the
+ * combined size of the two specialized functions is only marginally larger then
+ * the size of the generic function and at the same time the fast path of the
+ * specialized functions is significantly smaller than the generic function.
  */
-static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
+static __always_inline void dapm_widget_invalidate_paths(
+	struct snd_soc_dapm_widget *w, enum snd_soc_dapm_direction dir)
 {
-	struct snd_soc_dapm_widget *sink;
+	enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+	struct snd_soc_dapm_widget *node;
 	struct snd_soc_dapm_path *p;
 	LIST_HEAD(list);
 
 	dapm_assert_locked(w->dapm);
 
-	if (w->inputs == -1)
+	if (w->endpoints[dir] == -1)
 		return;
 
-	w->inputs = -1;
 	list_add_tail(&w->work_list, &list);
+	w->endpoints[dir] = -1;
 
 	list_for_each_entry(w, &list, work_list) {
-		list_for_each_entry(p, &w->sinks, list_source) {
+		snd_soc_dapm_widget_for_each_path(w, dir, p) {
 			if (p->is_supply || p->weak || !p->connect)
 				continue;
-			sink = p->sink;
-			if (sink->inputs != -1) {
-				sink->inputs = -1;
-				list_add_tail(&sink->work_list, &list);
+			node = p->node[rdir];
+			if (node->endpoints[dir] != -1) {
+				node->endpoints[dir] = -1;
+				list_add_tail(&node->work_list, &list);
 			}
 		}
 	}
 }
 
 /*
+ * dapm_widget_invalidate_input_paths() - Invalidate the cached number of
+ *  input paths
+ * @w: The widget for which to invalidate the cached number of input paths
+ *
+ * Resets the cached number of inputs for the specified widget and all widgets
+ * that can be reached via outcoming paths from the widget.
+ *
+ * This function must be called if the number of output paths for a widget might
+ * have changed. E.g. if the source state of a widget changes or a path is added
+ * or activated with the widget as the sink.
+ */
+static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
+{
+	dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_IN);
+}
+
+/*
  * dapm_widget_invalidate_output_paths() - Invalidate the cached number of
  *  output paths
  * @w: The widget for which to invalidate the cached number of output paths
@@ -219,29 +240,7 @@
  */
 static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w)
 {
-	struct snd_soc_dapm_widget *source;
-	struct snd_soc_dapm_path *p;
-	LIST_HEAD(list);
-
-	dapm_assert_locked(w->dapm);
-
-	if (w->outputs == -1)
-		return;
-
-	w->outputs = -1;
-	list_add_tail(&w->work_list, &list);
-
-	list_for_each_entry(w, &list, work_list) {
-		list_for_each_entry(p, &w->sources, list_sink) {
-			if (p->is_supply || p->weak || !p->connect)
-				continue;
-			source = p->source;
-			if (source->outputs != -1) {
-				source->outputs = -1;
-				list_add_tail(&source->work_list, &list);
-			}
-		}
-	}
+	dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_OUT);
 }
 
 /*
@@ -270,9 +269,9 @@
 	 * endpoints is either connected or disconnected that sum won't change,
 	 * so there is no need to re-check the path.
 	 */
-	if (p->source->inputs != 0)
+	if (p->source->endpoints[SND_SOC_DAPM_DIR_IN] != 0)
 		dapm_widget_invalidate_input_paths(p->sink);
-	if (p->sink->outputs != 0)
+	if (p->sink->endpoints[SND_SOC_DAPM_DIR_OUT] != 0)
 		dapm_widget_invalidate_output_paths(p->source);
 }
 
@@ -283,11 +282,11 @@
 	mutex_lock(&card->dapm_mutex);
 
 	list_for_each_entry(w, &card->widgets, list) {
-		if (w->is_sink || w->is_source) {
+		if (w->is_ep) {
 			dapm_mark_dirty(w, "Rechecking endpoints");
-			if (w->is_sink)
+			if (w->is_ep & SND_SOC_DAPM_EP_SINK)
 				dapm_widget_invalidate_output_paths(w);
-			if (w->is_source)
+			if (w->is_ep & SND_SOC_DAPM_EP_SOURCE)
 				dapm_widget_invalidate_input_paths(w);
 		}
 	}
@@ -894,7 +893,7 @@
 	/* add kcontrol */
 	for (i = 0; i < w->num_kcontrols; i++) {
 		/* match name */
-		list_for_each_entry(path, &w->sources, list_sink) {
+		snd_soc_dapm_widget_for_each_source_path(w, path) {
 			/* mixer/mux paths name must match control name */
 			if (path->name != (char *)w->kcontrol_news[i].name)
 				continue;
@@ -923,18 +922,18 @@
 static int dapm_new_mux(struct snd_soc_dapm_widget *w)
 {
 	struct snd_soc_dapm_context *dapm = w->dapm;
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *path;
-	struct list_head *paths;
 	const char *type;
 	int ret;
 
 	switch (w->id) {
 	case snd_soc_dapm_mux:
-		paths = &w->sources;
+		dir = SND_SOC_DAPM_DIR_OUT;
 		type = "mux";
 		break;
 	case snd_soc_dapm_demux:
-		paths = &w->sinks;
+		dir = SND_SOC_DAPM_DIR_IN;
 		type = "demux";
 		break;
 	default:
@@ -948,7 +947,7 @@
 		return -EINVAL;
 	}
 
-	if (list_empty(paths)) {
+	if (list_empty(&w->edges[dir])) {
 		dev_err(dapm->dev, "ASoC: %s %s has no paths\n", type, w->name);
 		return -EINVAL;
 	}
@@ -957,16 +956,9 @@
 	if (ret < 0)
 		return ret;
 
-	if (w->id == snd_soc_dapm_mux) {
-		list_for_each_entry(path, &w->sources, list_sink) {
-			if (path->name)
-				dapm_kcontrol_add_path(w->kcontrols[0], path);
-		}
-	} else {
-		list_for_each_entry(path, &w->sinks, list_source) {
-			if (path->name)
-				dapm_kcontrol_add_path(w->kcontrols[0], path);
-		}
+	snd_soc_dapm_widget_for_each_path(w, dir, path) {
+		if (path->name)
+			dapm_kcontrol_add_path(w->kcontrols[0], path);
 	}
 
 	return 0;
@@ -1032,43 +1024,79 @@
 	}
 }
 
-/* add widget to list if it's not already in the list */
-static int dapm_list_add_widget(struct snd_soc_dapm_widget_list **list,
-	struct snd_soc_dapm_widget *w)
+static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list,
+	struct list_head *widgets)
 {
-	struct snd_soc_dapm_widget_list *wlist;
-	int wlistsize, wlistentries, i;
+	struct snd_soc_dapm_widget *w;
+	struct list_head *it;
+	unsigned int size = 0;
+	unsigned int i = 0;
 
+	list_for_each(it, widgets)
+		size++;
+
+	*list = kzalloc(sizeof(**list) + size * sizeof(*w), GFP_KERNEL);
 	if (*list == NULL)
-		return -EINVAL;
-
-	wlist = *list;
-
-	/* is this widget already in the list */
-	for (i = 0; i < wlist->num_widgets; i++) {
-		if (wlist->widgets[i] == w)
-			return 0;
-	}
-
-	/* allocate some new space */
-	wlistentries = wlist->num_widgets + 1;
-	wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
-			wlistentries * sizeof(struct snd_soc_dapm_widget *);
-	*list = krealloc(wlist, wlistsize, GFP_KERNEL);
-	if (*list == NULL) {
-		dev_err(w->dapm->dev, "ASoC: can't allocate widget list for %s\n",
-			w->name);
 		return -ENOMEM;
+
+	list_for_each_entry(w, widgets, work_list)
+		(*list)->widgets[i++] = w;
+
+	(*list)->num_widgets = i;
+
+	return 0;
+}
+
+/*
+ * Common implementation for is_connected_output_ep() and
+ * is_connected_input_ep(). The function is inlined since the combined size of
+ * the two specialized functions is only marginally larger then the size of the
+ * generic function and at the same time the fast path of the specialized
+ * functions is significantly smaller than the generic function.
+ */
+static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
+	struct list_head *list, enum snd_soc_dapm_direction dir,
+	int (*fn)(struct snd_soc_dapm_widget *, struct list_head *))
+{
+	enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+	struct snd_soc_dapm_path *path;
+	int con = 0;
+
+	if (widget->endpoints[dir] >= 0)
+		return widget->endpoints[dir];
+
+	DAPM_UPDATE_STAT(widget, path_checks);
+
+	/* do we need to add this widget to the list ? */
+	if (list)
+		list_add_tail(&widget->work_list, list);
+
+	if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
+		widget->endpoints[dir] = snd_soc_dapm_suspend_check(widget);
+		return widget->endpoints[dir];
 	}
-	wlist = *list;
 
-	/* insert the widget */
-	dev_dbg(w->dapm->dev, "ASoC: added %s in widget list pos %d\n",
-			w->name, wlist->num_widgets);
+	snd_soc_dapm_widget_for_each_path(widget, rdir, path) {
+		DAPM_UPDATE_STAT(widget, neighbour_checks);
 
-	wlist->widgets[wlist->num_widgets] = w;
-	wlist->num_widgets++;
-	return 1;
+		if (path->weak || path->is_supply)
+			continue;
+
+		if (path->walking)
+			return 1;
+
+		trace_snd_soc_dapm_path(widget, dir, path);
+
+		if (path->connect) {
+			path->walking = 1;
+			con += fn(path->node[dir], list);
+			path->walking = 0;
+		}
+	}
+
+	widget->endpoints[dir] = con;
+
+	return con;
 }
 
 /*
@@ -1076,57 +1104,10 @@
  * output widget. Returns number of complete paths.
  */
 static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
-	struct snd_soc_dapm_widget_list **list)
+	struct list_head *list)
 {
-	struct snd_soc_dapm_path *path;
-	int con = 0;
-
-	if (widget->outputs >= 0)
-		return widget->outputs;
-
-	DAPM_UPDATE_STAT(widget, path_checks);
-
-	if (widget->is_sink && widget->connected) {
-		widget->outputs = snd_soc_dapm_suspend_check(widget);
-		return widget->outputs;
-	}
-
-	list_for_each_entry(path, &widget->sinks, list_source) {
-		DAPM_UPDATE_STAT(widget, neighbour_checks);
-
-		if (path->weak || path->is_supply)
-			continue;
-
-		if (path->walking)
-			return 1;
-
-		trace_snd_soc_dapm_output_path(widget, path);
-
-		if (path->connect) {
-			path->walking = 1;
-
-			/* do we need to add this widget to the list ? */
-			if (list) {
-				int err;
-				err = dapm_list_add_widget(list, path->sink);
-				if (err < 0) {
-					dev_err(widget->dapm->dev,
-						"ASoC: could not add widget %s\n",
-						widget->name);
-					path->walking = 0;
-					return con;
-				}
-			}
-
-			con += is_connected_output_ep(path->sink, list);
-
-			path->walking = 0;
-		}
-	}
-
-	widget->outputs = con;
-
-	return con;
+	return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_OUT,
+			is_connected_output_ep);
 }
 
 /*
@@ -1134,57 +1115,10 @@
  * input widget. Returns number of complete paths.
  */
 static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
-	struct snd_soc_dapm_widget_list **list)
+	struct list_head *list)
 {
-	struct snd_soc_dapm_path *path;
-	int con = 0;
-
-	if (widget->inputs >= 0)
-		return widget->inputs;
-
-	DAPM_UPDATE_STAT(widget, path_checks);
-
-	if (widget->is_source && widget->connected) {
-		widget->inputs = snd_soc_dapm_suspend_check(widget);
-		return widget->inputs;
-	}
-
-	list_for_each_entry(path, &widget->sources, list_sink) {
-		DAPM_UPDATE_STAT(widget, neighbour_checks);
-
-		if (path->weak || path->is_supply)
-			continue;
-
-		if (path->walking)
-			return 1;
-
-		trace_snd_soc_dapm_input_path(widget, path);
-
-		if (path->connect) {
-			path->walking = 1;
-
-			/* do we need to add this widget to the list ? */
-			if (list) {
-				int err;
-				err = dapm_list_add_widget(list, path->source);
-				if (err < 0) {
-					dev_err(widget->dapm->dev,
-						"ASoC: could not add widget %s\n",
-						widget->name);
-					path->walking = 0;
-					return con;
-				}
-			}
-
-			con += is_connected_input_ep(path->source, list);
-
-			path->walking = 0;
-		}
-	}
-
-	widget->inputs = con;
-
-	return con;
+	return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_IN,
+			is_connected_input_ep);
 }
 
 /**
@@ -1204,7 +1138,9 @@
 {
 	struct snd_soc_card *card = dai->component->card;
 	struct snd_soc_dapm_widget *w;
+	LIST_HEAD(widgets);
 	int paths;
+	int ret;
 
 	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
@@ -1213,14 +1149,21 @@
 	 * to reset the cached number of inputs and outputs.
 	 */
 	list_for_each_entry(w, &card->widgets, list) {
-		w->inputs = -1;
-		w->outputs = -1;
+		w->endpoints[SND_SOC_DAPM_DIR_IN] = -1;
+		w->endpoints[SND_SOC_DAPM_DIR_OUT] = -1;
 	}
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
-		paths = is_connected_output_ep(dai->playback_widget, list);
+		paths = is_connected_output_ep(dai->playback_widget, &widgets);
 	else
-		paths = is_connected_input_ep(dai->capture_widget, list);
+		paths = is_connected_input_ep(dai->capture_widget, &widgets);
+
+	/* Drop starting point */
+	list_del(widgets.next);
+
+	ret = dapm_widget_list_create(list, &widgets);
+	if (ret)
+		paths = ret;
 
 	trace_snd_soc_dapm_connected(paths, stream);
 	mutex_unlock(&card->dapm_mutex);
@@ -1321,7 +1264,7 @@
 	DAPM_UPDATE_STAT(w, power_checks);
 
 	/* Check if one of our outputs is connected */
-	list_for_each_entry(path, &w->sinks, list_source) {
+	snd_soc_dapm_widget_for_each_sink_path(w, path) {
 		DAPM_UPDATE_STAT(w, neighbour_checks);
 
 		if (path->weak)
@@ -1745,12 +1688,12 @@
 	/* If we changed our power state perhaps our neigbours changed
 	 * also.
 	 */
-	list_for_each_entry(path, &w->sources, list_sink)
+	snd_soc_dapm_widget_for_each_source_path(w, path)
 		dapm_widget_set_peer_power(path->source, power, path->connect);
 
 	/* Supplies can't affect their outputs, only their inputs */
 	if (!w->is_supply) {
-		list_for_each_entry(path, &w->sinks, list_source)
+		snd_soc_dapm_widget_for_each_sink_path(w, path)
 			dapm_widget_set_peer_power(path->sink, power,
 						   path->connect);
 	}
@@ -1951,6 +1894,7 @@
 {
 	struct snd_soc_dapm_widget *w = file->private_data;
 	struct snd_soc_card *card = w->dapm->card;
+	enum snd_soc_dapm_direction dir, rdir;
 	char *buf;
 	int in, out;
 	ssize_t ret;
@@ -1987,25 +1931,21 @@
 				w->sname,
 				w->active ? "active" : "inactive");
 
-	list_for_each_entry(p, &w->sources, list_sink) {
-		if (p->connected && !p->connected(w, p->source))
-			continue;
+	snd_soc_dapm_for_each_direction(dir) {
+		rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+		snd_soc_dapm_widget_for_each_path(w, dir, p) {
+			if (p->connected && !p->connected(w, p->node[rdir]))
+				continue;
 
-		if (p->connect)
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
-					" in  \"%s\" \"%s\"\n",
-					p->name ? p->name : "static",
-					p->source->name);
-	}
-	list_for_each_entry(p, &w->sinks, list_source) {
-		if (p->connected && !p->connected(w, p->sink))
-			continue;
+			if (!p->connect)
+				continue;
 
-		if (p->connect)
 			ret += snprintf(buf + ret, PAGE_SIZE - ret,
-					" out \"%s\" \"%s\"\n",
+					" %s  \"%s\" \"%s\"\n",
+					(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
 					p->name ? p->name : "static",
-					p->sink->name);
+					p->node[rdir]->name);
+		}
 	}
 
 	mutex_unlock(&card->dapm_mutex);
@@ -2223,14 +2163,16 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
 
-static ssize_t dapm_widget_show_codec(struct snd_soc_codec *codec, char *buf)
+static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
+	char *buf)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
 	struct snd_soc_dapm_widget *w;
 	int count = 0;
 	char *state = "not set";
 
-	list_for_each_entry(w, &codec->component.card->widgets, list) {
-		if (w->dapm != &codec->dapm)
+	list_for_each_entry(w, &cmpnt->card->widgets, list) {
+		if (w->dapm != dapm)
 			continue;
 
 		/* only display widgets that burnm power */
@@ -2258,7 +2200,7 @@
 		}
 	}
 
-	switch (codec->dapm.bias_level) {
+	switch (snd_soc_dapm_get_bias_level(dapm)) {
 	case SND_SOC_BIAS_ON:
 		state = "On";
 		break;
@@ -2287,8 +2229,9 @@
 	mutex_lock(&rtd->card->dapm_mutex);
 
 	for (i = 0; i < rtd->num_codecs; i++) {
-		struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
-		count += dapm_widget_show_codec(codec, buf + count);
+		struct snd_soc_component *cmpnt = rtd->codec_dais[i]->component;
+
+		count += dapm_widget_show_component(cmpnt, buf + count);
 	}
 
 	mutex_unlock(&rtd->card->dapm_mutex);
@@ -2305,37 +2248,43 @@
 
 static void dapm_free_path(struct snd_soc_dapm_path *path)
 {
-	list_del(&path->list_sink);
-	list_del(&path->list_source);
+	list_del(&path->list_node[SND_SOC_DAPM_DIR_IN]);
+	list_del(&path->list_node[SND_SOC_DAPM_DIR_OUT]);
 	list_del(&path->list_kcontrol);
 	list_del(&path->list);
 	kfree(path);
 }
 
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_dapm_path *p, *next_p;
+	enum snd_soc_dapm_direction dir;
+
+	list_del(&w->list);
+	/*
+	 * remove source and sink paths associated to this widget.
+	 * While removing the path, remove reference to it from both
+	 * source and sink widgets so that path is removed only once.
+	 */
+	snd_soc_dapm_for_each_direction(dir) {
+		snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p)
+			dapm_free_path(p);
+	}
+
+	kfree(w->kcontrols);
+	kfree_const(w->name);
+	kfree(w);
+}
+
 /* free all dapm widgets and resources */
 static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
-	struct snd_soc_dapm_path *p, *next_p;
 
 	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
 		if (w->dapm != dapm)
 			continue;
-		list_del(&w->list);
-		/*
-		 * remove source and sink paths associated to this widget.
-		 * While removing the path, remove reference to it from both
-		 * source and sink widgets so that path is removed only once.
-		 */
-		list_for_each_entry_safe(p, next_p, &w->sources, list_sink)
-			dapm_free_path(p);
-
-		list_for_each_entry_safe(p, next_p, &w->sinks, list_source)
-			dapm_free_path(p);
-
-		kfree(w->kcontrols);
-		kfree(w->name);
-		kfree(w);
+		snd_soc_dapm_free_widget(w);
 	}
 }
 
@@ -2441,20 +2390,22 @@
  */
 static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
 {
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *p;
+	unsigned int ep;
 
 	switch (w->id) {
 	case snd_soc_dapm_input:
 		/* On a fully routed card a input is never a source */
 		if (w->dapm->card->fully_routed)
-			break;
-		w->is_source = 1;
-		list_for_each_entry(p, &w->sources, list_sink) {
+			return;
+		ep = SND_SOC_DAPM_EP_SOURCE;
+		snd_soc_dapm_widget_for_each_source_path(w, p) {
 			if (p->source->id == snd_soc_dapm_micbias ||
 				p->source->id == snd_soc_dapm_mic ||
 				p->source->id == snd_soc_dapm_line ||
 				p->source->id == snd_soc_dapm_output) {
-					w->is_source = 0;
+					ep = 0;
 					break;
 			}
 		}
@@ -2462,25 +2413,30 @@
 	case snd_soc_dapm_output:
 		/* On a fully routed card a output is never a sink */
 		if (w->dapm->card->fully_routed)
-			break;
-		w->is_sink = 1;
-		list_for_each_entry(p, &w->sinks, list_source) {
+			return;
+		ep = SND_SOC_DAPM_EP_SINK;
+		snd_soc_dapm_widget_for_each_sink_path(w, p) {
 			if (p->sink->id == snd_soc_dapm_spk ||
 				p->sink->id == snd_soc_dapm_hp ||
 				p->sink->id == snd_soc_dapm_line ||
 				p->sink->id == snd_soc_dapm_input) {
-					w->is_sink = 0;
+					ep = 0;
 					break;
 			}
 		}
 		break;
 	case snd_soc_dapm_line:
-		w->is_sink = !list_empty(&w->sources);
-		w->is_source = !list_empty(&w->sinks);
+		ep = 0;
+		snd_soc_dapm_for_each_direction(dir) {
+			if (!list_empty(&w->edges[dir]))
+				ep |= SND_SOC_DAPM_DIR_TO_EP(dir);
+		}
 		break;
 	default:
-		break;
+		return;
 	}
+
+	w->is_ep = ep;
 }
 
 static int snd_soc_dapm_check_dynamic_path(struct snd_soc_dapm_context *dapm,
@@ -2533,6 +2489,8 @@
 	int (*connected)(struct snd_soc_dapm_widget *source,
 			 struct snd_soc_dapm_widget *sink))
 {
+	struct snd_soc_dapm_widget *widgets[2];
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *path;
 	int ret;
 
@@ -2565,13 +2523,14 @@
 	if (!path)
 		return -ENOMEM;
 
-	path->source = wsource;
-	path->sink = wsink;
+	path->node[SND_SOC_DAPM_DIR_IN] = wsource;
+	path->node[SND_SOC_DAPM_DIR_OUT] = wsink;
+	widgets[SND_SOC_DAPM_DIR_IN] = wsource;
+	widgets[SND_SOC_DAPM_DIR_OUT] = wsink;
+
 	path->connected = connected;
 	INIT_LIST_HEAD(&path->list);
 	INIT_LIST_HEAD(&path->list_kcontrol);
-	INIT_LIST_HEAD(&path->list_source);
-	INIT_LIST_HEAD(&path->list_sink);
 
 	if (wsource->is_supply || wsink->is_supply)
 		path->is_supply = 1;
@@ -2609,14 +2568,13 @@
 	}
 
 	list_add(&path->list, &dapm->card->paths);
-	list_add(&path->list_sink, &wsink->sources);
-	list_add(&path->list_source, &wsource->sinks);
+	snd_soc_dapm_for_each_direction(dir)
+		list_add(&path->list_node[dir], &widgets[dir]->edges[dir]);
 
-	dapm_update_widget_flags(wsource);
-	dapm_update_widget_flags(wsink);
-
-	dapm_mark_dirty(wsource, "Route added");
-	dapm_mark_dirty(wsink, "Route added");
+	snd_soc_dapm_for_each_direction(dir) {
+		dapm_update_widget_flags(widgets[dir]);
+		dapm_mark_dirty(widgets[dir], "Route added");
+	}
 
 	if (dapm->card->instantiated && path->connect)
 		dapm_path_invalidate(path);
@@ -2864,7 +2822,7 @@
 		dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n",
 			 route->source, route->sink);
 
-	list_for_each_entry(path, &source->sinks, list_source) {
+	snd_soc_dapm_widget_for_each_sink_path(source, path) {
 		if (path->sink == sink) {
 			path->weak = 1;
 			count++;
@@ -2918,7 +2876,7 @@
 
 /**
  * snd_soc_dapm_new_widgets - add new dapm widgets
- * @dapm: DAPM context
+ * @card: card to be checked for new dapm widgets
  *
  * Checks the codec for any new dapm widgets and creates them if found.
  *
@@ -3298,6 +3256,7 @@
 snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
 			 const struct snd_soc_dapm_widget *widget)
 {
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_widget *w;
 	const char *prefix;
 	int ret;
@@ -3344,7 +3303,7 @@
 	if (prefix)
 		w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
 	else
-		w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
+		w->name = kstrdup_const(widget->name, GFP_KERNEL);
 	if (w->name == NULL) {
 		kfree(w);
 		return NULL;
@@ -3352,27 +3311,27 @@
 
 	switch (w->id) {
 	case snd_soc_dapm_mic:
-		w->is_source = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_input:
 		if (!dapm->card->fully_routed)
-			w->is_source = 1;
+			w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_spk:
 	case snd_soc_dapm_hp:
-		w->is_sink = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SINK;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_output:
 		if (!dapm->card->fully_routed)
-			w->is_sink = 1;
+			w->is_ep = SND_SOC_DAPM_EP_SINK;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_vmid:
 	case snd_soc_dapm_siggen:
-		w->is_source = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_always_on_check_power;
 		break;
 	case snd_soc_dapm_mux:
@@ -3406,14 +3365,14 @@
 	}
 
 	w->dapm = dapm;
-	INIT_LIST_HEAD(&w->sources);
-	INIT_LIST_HEAD(&w->sinks);
 	INIT_LIST_HEAD(&w->list);
 	INIT_LIST_HEAD(&w->dirty);
 	list_add_tail(&w->list, &dapm->card->widgets);
 
-	w->inputs = -1;
-	w->outputs = -1;
+	snd_soc_dapm_for_each_direction(dir) {
+		INIT_LIST_HEAD(&w->edges[dir]);
+		w->endpoints[dir] = -1;
+	}
 
 	/* machine layer set ups unconnected pins and insertions */
 	w->connected = 1;
@@ -3467,19 +3426,17 @@
 	int ret;
 
 	if (WARN_ON(!config) ||
-	    WARN_ON(list_empty(&w->sources) || list_empty(&w->sinks)))
+	    WARN_ON(list_empty(&w->edges[SND_SOC_DAPM_DIR_OUT]) ||
+		    list_empty(&w->edges[SND_SOC_DAPM_DIR_IN])))
 		return -EINVAL;
 
 	/* We only support a single source and sink, pick the first */
-	source_p = list_first_entry(&w->sources, struct snd_soc_dapm_path,
-				    list_sink);
-	sink_p = list_first_entry(&w->sinks, struct snd_soc_dapm_path,
-				  list_source);
-
-	if (WARN_ON(!source_p || !sink_p) ||
-	    WARN_ON(!sink_p->source || !source_p->sink) ||
-	    WARN_ON(!source_p->source || !sink_p->sink))
-		return -EINVAL;
+	source_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_OUT],
+				    struct snd_soc_dapm_path,
+				    list_node[SND_SOC_DAPM_DIR_OUT]);
+	sink_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_IN],
+				    struct snd_soc_dapm_path,
+				    list_node[SND_SOC_DAPM_DIR_IN]);
 
 	source = source_p->source->priv;
 	sink = sink_p->sink->priv;
@@ -3821,11 +3778,6 @@
 	for (i = 0; i < rtd->num_codecs; i++) {
 		struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
 
-		/* there is no point in connecting BE DAI links with dummies */
-		if (snd_soc_dai_is_dummy(codec_dai) ||
-			snd_soc_dai_is_dummy(cpu_dai))
-			continue;
-
 		/* connect BE DAI playback if widgets are valid */
 		if (codec_dai->playback_widget && cpu_dai->playback_widget) {
 			source = cpu_dai->playback_widget;
@@ -3856,6 +3808,7 @@
 	int event)
 {
 	struct snd_soc_dapm_widget *w;
+	unsigned int ep;
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
 		w = dai->playback_widget;
@@ -3865,12 +3818,22 @@
 	if (w) {
 		dapm_mark_dirty(w, "stream event");
 
+		if (w->id == snd_soc_dapm_dai_in) {
+			ep = SND_SOC_DAPM_EP_SOURCE;
+			dapm_widget_invalidate_input_paths(w);
+		} else {
+			ep = SND_SOC_DAPM_EP_SINK;
+			dapm_widget_invalidate_output_paths(w);
+		}
+
 		switch (event) {
 		case SND_SOC_DAPM_STREAM_START:
 			w->active = 1;
+			w->is_ep = ep;
 			break;
 		case SND_SOC_DAPM_STREAM_STOP:
 			w->active = 0;
+			w->is_ep = 0;
 			break;
 		case SND_SOC_DAPM_STREAM_SUSPEND:
 		case SND_SOC_DAPM_STREAM_RESUME:
@@ -3878,14 +3841,6 @@
 		case SND_SOC_DAPM_STREAM_PAUSE_RELEASE:
 			break;
 		}
-
-		if (w->id == snd_soc_dapm_dai_in) {
-			w->is_source = w->active;
-			dapm_widget_invalidate_input_paths(w);
-		} else {
-			w->is_sink = w->active;
-			dapm_widget_invalidate_output_paths(w);
-		}
 	}
 }
 
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 256b9c9..70e4b9d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1231,24 +1231,17 @@
 }
 
 int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
-	int stream, struct snd_soc_dapm_widget_list **list_)
+	int stream, struct snd_soc_dapm_widget_list **list)
 {
 	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
-	struct snd_soc_dapm_widget_list *list;
 	int paths;
 
-	list = kzalloc(sizeof(struct snd_soc_dapm_widget_list) +
-			sizeof(struct snd_soc_dapm_widget *), GFP_KERNEL);
-	if (list == NULL)
-		return -ENOMEM;
-
 	/* get number of valid DAI paths and their widgets */
-	paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, &list);
+	paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list);
 
 	dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
 			stream ? "capture" : "playback");
 
-	*list_ = list;
 	return paths;
 }
 
@@ -1306,7 +1299,12 @@
 
 		switch (list->widgets[i]->id) {
 		case snd_soc_dapm_dai_in:
+			if (stream != SNDRV_PCM_STREAM_PLAYBACK)
+				continue;
+			break;
 		case snd_soc_dapm_dai_out:
+			if (stream != SNDRV_PCM_STREAM_CAPTURE)
+				continue;
 			break;
 		default:
 			continue;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 31068b8..69d01cd 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -45,12 +45,12 @@
 #define SOC_TPLG_PASS_VENDOR		1
 #define SOC_TPLG_PASS_MIXER		2
 #define SOC_TPLG_PASS_WIDGET		3
-#define SOC_TPLG_PASS_GRAPH		4
-#define SOC_TPLG_PASS_PINS		5
-#define SOC_TPLG_PASS_PCM_DAI		6
+#define SOC_TPLG_PASS_PCM_DAI		4
+#define SOC_TPLG_PASS_GRAPH		5
+#define SOC_TPLG_PASS_PINS		6
 
 #define SOC_TPLG_PASS_START	SOC_TPLG_PASS_MANIFEST
-#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_PCM_DAI
+#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_PINS
 
 struct soc_tplg {
 	const struct firmware *fw;
@@ -66,10 +66,14 @@
 	u32 index;	/* current block index */
 	u32 req_index;	/* required index, only loaded/free matching blocks */
 
-	/* kcontrol operations */
+	/* vendor specific kcontrol operations */
 	const struct snd_soc_tplg_kcontrol_ops *io_ops;
 	int io_ops_count;
 
+	/* vendor specific bytes ext handlers, for TLV bytes controls */
+	const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+	int bytes_ext_ops_count;
+
 	/* optional fw loading callbacks to component drivers */
 	struct snd_soc_tplg_ops *ops;
 };
@@ -508,19 +512,70 @@
 /* bind a kcontrol to it's IO handlers */
 static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
 	struct snd_kcontrol_new *k,
-	const struct snd_soc_tplg_kcontrol_ops *ops, int num_ops,
-	const struct snd_soc_tplg_kcontrol_ops *bops, int num_bops)
+	const struct soc_tplg *tplg)
 {
-	int i;
+	const struct snd_soc_tplg_kcontrol_ops *ops;
+	const struct snd_soc_tplg_bytes_ext_ops *ext_ops;
+	int num_ops, i;
 
-	/* try and map standard kcontrols handler first */
+	if (hdr->ops.info == SND_SOC_TPLG_CTL_BYTES
+		&& k->iface & SNDRV_CTL_ELEM_IFACE_MIXER
+		&& k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE
+		&& k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+		struct soc_bytes_ext *sbe;
+		struct snd_soc_tplg_bytes_control *be;
+
+		sbe = (struct soc_bytes_ext *)k->private_value;
+		be = container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+
+		/* TLV bytes controls need standard kcontrol info handler,
+		 * TLV callback and extended put/get handlers.
+		 */
+		k->info = snd_soc_bytes_info;
+		k->tlv.c = snd_soc_bytes_tlv_callback;
+
+		ext_ops = tplg->bytes_ext_ops;
+		num_ops = tplg->bytes_ext_ops_count;
+		for (i = 0; i < num_ops; i++) {
+			if (!sbe->put && ext_ops[i].id == be->ext_ops.put)
+				sbe->put = ext_ops[i].put;
+			if (!sbe->get && ext_ops[i].id == be->ext_ops.get)
+				sbe->get = ext_ops[i].get;
+		}
+
+		if (sbe->put && sbe->get)
+			return 0;
+		else
+			return -EINVAL;
+	}
+
+	/* try and map vendor specific kcontrol handlers first */
+	ops = tplg->io_ops;
+	num_ops = tplg->io_ops_count;
 	for (i = 0; i < num_ops; i++) {
 
-		if (ops[i].id == hdr->ops.put)
+		if (k->put == NULL && ops[i].id == hdr->ops.put)
 			k->put = ops[i].put;
-		if (ops[i].id == hdr->ops.get)
+		if (k->get == NULL && ops[i].id == hdr->ops.get)
 			k->get = ops[i].get;
-		if (ops[i].id == hdr->ops.info)
+		if (k->info == NULL && ops[i].id == hdr->ops.info)
+			k->info = ops[i].info;
+	}
+
+	/* vendor specific handlers found ? */
+	if (k->put && k->get && k->info)
+		return 0;
+
+	/* none found so try standard kcontrol handlers */
+	ops = io_ops;
+	num_ops = ARRAY_SIZE(io_ops);
+	for (i = 0; i < num_ops; i++) {
+
+		if (k->put == NULL && ops[i].id == hdr->ops.put)
+			k->put = ops[i].put;
+		if (k->get == NULL && ops[i].id == hdr->ops.get)
+			k->get = ops[i].get;
+		if (k->info == NULL && ops[i].id == hdr->ops.info)
 			k->info = ops[i].info;
 	}
 
@@ -528,21 +583,6 @@
 	if (k->put && k->get && k->info)
 		return 0;
 
-	/* none found so try bespoke handlers */
-	for (i = 0; i < num_bops; i++) {
-
-		if (k->put == NULL && bops[i].id == hdr->ops.put)
-			k->put = bops[i].put;
-		if (k->get == NULL && bops[i].id == hdr->ops.get)
-			k->get = bops[i].get;
-		if (k->info == NULL && bops[i].id == hdr->ops.info)
-			k->info = bops[i].info;
-	}
-
-	/* bespoke handlers found ? */
-	if (k->put && k->get && k->info)
-		return 0;
-
 	/* nothing to bind */
 	return -EINVAL;
 }
@@ -609,9 +649,7 @@
 	if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
 		return 0;
 
-	if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
-		kc->tlv.c = snd_soc_bytes_tlv_callback;
-	} else {
+	if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK)) {
 		tplg_tlv = &tc->tlv;
 		switch (tplg_tlv->type) {
 		case SNDRV_CTL_TLVT_DB_SCALE:
@@ -682,8 +720,7 @@
 		INIT_LIST_HEAD(&sbe->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &be->hdr, be->hdr.name);
 			kfree(sbe);
@@ -777,8 +814,7 @@
 		INIT_LIST_HEAD(&sm->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &mc->hdr, mc->hdr.name);
 			kfree(sm);
@@ -855,12 +891,12 @@
 	if (ec->items > sizeof(*ec->values))
 		return -EINVAL;
 
-	se->dobj.control.dvalues =
-		kmalloc(ec->items * sizeof(u32), GFP_KERNEL);
+	se->dobj.control.dvalues = kmemdup(ec->values,
+					   ec->items * sizeof(u32),
+					   GFP_KERNEL);
 	if (!se->dobj.control.dvalues)
 		return -ENOMEM;
 
-	memcpy(se->dobj.control.dvalues, ec->values, ec->items * sizeof(u32));
 	return 0;
 }
 
@@ -950,8 +986,7 @@
 		}
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &ec->hdr, ec->hdr.name);
 			kfree(se);
@@ -1093,7 +1128,7 @@
 	struct snd_soc_tplg_mixer_control *mc;
 	int i, err;
 
-	kc = kzalloc(sizeof(*kc) * num_kcontrols, GFP_KERNEL);
+	kc = kcalloc(num_kcontrols, sizeof(*kc), GFP_KERNEL);
 	if (kc == NULL)
 		return NULL;
 
@@ -1137,8 +1172,7 @@
 		INIT_LIST_HEAD(&sm->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc[i], io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc[i], tplg);
 		if (err) {
 			soc_control_err(tplg, &mc->hdr, mc->hdr.name);
 			kfree(sm);
@@ -1235,8 +1269,7 @@
 	}
 
 	/* map io handlers */
-	err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, io_ops,
-		ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+	err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, tplg);
 	if (err) {
 		soc_control_err(tplg, &ec->hdr, ec->hdr.name);
 		goto err_se;
@@ -1274,7 +1307,7 @@
 	struct snd_kcontrol_new *kc;
 	int i, err;
 
-	kc = kzalloc(sizeof(*kc) * count, GFP_KERNEL);
+	kc = kcalloc(count, sizeof(*kc), GFP_KERNEL);
 	if (!kc)
 		return NULL;
 
@@ -1297,7 +1330,6 @@
 			"ASoC: adding bytes kcontrol %s with access 0x%x\n",
 			be->hdr.name, be->hdr.access);
 
-		memset(kc, 0, sizeof(*kc));
 		kc[i].name = be->hdr.name;
 		kc[i].private_value = (long)sbe;
 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -1307,9 +1339,7 @@
 		INIT_LIST_HEAD(&sbe->dobj.list);
 
 		/* map standard io handlers and check for external handlers */
-		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc[i], io_ops,
-				ARRAY_SIZE(io_ops), tplg->io_ops,
-				tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc[i], tplg);
 		if (err) {
 			soc_control_err(tplg, &be->hdr, be->hdr.name);
 			kfree(sbe);
@@ -1737,6 +1767,8 @@
 	tplg.req_index = id;
 	tplg.io_ops = ops->io_ops;
 	tplg.io_ops_count = ops->io_ops_count;
+	tplg.bytes_ext_ops = ops->bytes_ext_ops;
+	tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
 
 	return soc_tplg_load(&tplg);
 }
@@ -1758,7 +1790,6 @@
 	u32 index)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
-	struct snd_soc_dapm_path *p, *next_p;
 
 	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
 
@@ -1770,31 +1801,9 @@
 		if (w->dobj.index != index &&
 			w->dobj.index != SND_SOC_TPLG_INDEX_ALL)
 			continue;
-
-		list_del(&w->list);
-
-		/*
-		 * remove source and sink paths associated to this widget.
-		 * While removing the path, remove reference to it from both
-		 * source and sink widgets so that path is removed only once.
-		 */
-		list_for_each_entry_safe(p, next_p, &w->sources, list_sink) {
-			list_del(&p->list_sink);
-			list_del(&p->list_source);
-			list_del(&p->list);
-			kfree(p);
-		}
-		list_for_each_entry_safe(p, next_p, &w->sinks, list_source) {
-			list_del(&p->list_sink);
-			list_del(&p->list_source);
-			list_del(&p->list);
-			kfree(p);
-		}
 		/* check and free and dynamic widget kcontrols */
 		snd_soc_tplg_widget_remove(w);
-		kfree(w->kcontrols);
-		kfree(w->name);
-		kfree(w);
+		snd_soc_dapm_free_widget(w);
 	}
 }
 EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index a402860..977a078 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -203,35 +203,25 @@
 	struct spdif_in_dev *host;
 	struct spear_spdif_platform_data *pdata;
 	struct resource *res, *res_fifo;
+	void __iomem *io_base;
 	int ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
+	io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
 
 	res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0);
 	if (!res_fifo)
 		return -EINVAL;
 
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				resource_size(res), pdev->name)) {
-		dev_warn(&pdev->dev, "Failed to get memory resourse\n");
-		return -ENOENT;
-	}
-
 	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 	if (!host) {
 		dev_warn(&pdev->dev, "kzalloc fail\n");
 		return -ENOMEM;
 	}
 
-	host->io_base = devm_ioremap(&pdev->dev, res->start,
-				resource_size(res));
-	if (!host->io_base) {
-		dev_warn(&pdev->dev, "ioremap failed\n");
-		return -ENOMEM;
-	}
-
+	host->io_base = io_base;
 	host->irq = platform_get_irq(pdev, 0);
 	if (host->irq < 0)
 		return -EINVAL;
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
index a7dc3c5..e8476da 100644
--- a/sound/soc/spear/spear_pcm.c
+++ b/sound/soc/spear/spear_pcm.c
@@ -44,7 +44,7 @@
 	*config = spear_dmaengine_pcm_config;
 	config->compat_filter_fn = filter;
 
-	return snd_dmaengine_pcm_register(dev, config,
+	return devm_snd_dmaengine_pcm_register(dev, config,
 		SND_DMAENGINE_PCM_FLAG_NO_DT |
 		SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
diff --git a/sound/soc/sti/Kconfig b/sound/soc/sti/Kconfig
new file mode 100644
index 0000000..64a6900
--- /dev/null
+++ b/sound/soc/sti/Kconfig
@@ -0,0 +1,11 @@
+#
+# STM SoC audio configuration
+#
+menuconfig SND_SOC_STI
+	tristate "SoC Audio support for STI System-On-Chip"
+	depends on SND_SOC
+	depends on ARCH_STI || COMPILE_TEST
+	select SND_SOC_GENERIC_DMAENGINE_PCM
+	help
+		Say Y if you want to enable ASoC-support for
+		any of the STI platforms (e.g. STIH416).
diff --git a/sound/soc/sti/Makefile b/sound/soc/sti/Makefile
new file mode 100644
index 0000000..4b188d2
--- /dev/null
+++ b/sound/soc/sti/Makefile
@@ -0,0 +1,4 @@
+# STI platform support
+snd-soc-sti-objs := sti_uniperif.o uniperif_player.o uniperif_reader.o
+
+obj-$(CONFIG_SND_SOC_STI) += snd-soc-sti.o
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
new file mode 100644
index 0000000..39bcefe
--- /dev/null
+++ b/sound/soc/sti/sti_uniperif.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "uniperif.h"
+
+/*
+ * sti_uniperiph_dai_create_ctrl
+ * This function is used to create Ctrl associated to DAI but also pcm device.
+ * Request is done by front end to associate ctrl with pcm device id
+ */
+static int sti_uniperiph_dai_create_ctrl(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	struct snd_kcontrol_new *ctrl;
+	int i;
+
+	if (!uni->num_ctrls)
+		return 0;
+
+	for (i = 0; i < uni->num_ctrls; i++) {
+		/*
+		 * Several Control can have same name. Controls are indexed on
+		 * Uniperipheral instance ID
+		 */
+		ctrl = &uni->snd_ctrls[i];
+		ctrl->index = uni->info->id;
+		ctrl->device = uni->info->id;
+	}
+
+	return snd_soc_add_dai_controls(dai, uni->snd_ctrls, uni->num_ctrls);
+}
+
+/*
+ * DAI
+ */
+int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct snd_dmaengine_dai_dma_data *dma_data;
+	int transfer_size;
+
+	transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
+
+	dma_data = snd_soc_dai_get_dma_data(dai, substream);
+	dma_data->maxburst = transfer_size;
+
+	return 0;
+}
+
+int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+
+	priv->dai_data.uni->daifmt = fmt;
+
+	return 0;
+}
+
+static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	int ret;
+
+	/* The uniperipheral should be in stopped state */
+	if (uni->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(uni->dev, "%s: invalid uni state( %d)",
+			__func__, (int)uni->state);
+		return -EBUSY;
+	}
+
+	/* Pinctrl: switch pinstate to sleep */
+	ret = pinctrl_pm_select_sleep_state(uni->dev);
+	if (ret)
+		dev_err(uni->dev, "%s: failed to select pinctrl state",
+			__func__);
+
+	return ret;
+}
+
+static int sti_uniperiph_dai_resume(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	int ret;
+
+	if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player")) {
+		ret = uni_player_resume(uni);
+		if (ret)
+			return ret;
+	}
+
+	/* pinctrl: switch pinstate to default */
+	ret = pinctrl_pm_select_default_state(uni->dev);
+	if (ret)
+		dev_err(uni->dev, "%s: failed to select pinctrl state",
+			__func__);
+
+	return ret;
+}
+
+static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+
+	/* DMA settings*/
+	if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player"))
+		snd_soc_dai_init_dma_data(dai, &dai_data->dma_data, NULL);
+	else
+		snd_soc_dai_init_dma_data(dai, NULL, &dai_data->dma_data);
+
+	dai_data->dma_data.addr = dai_data->uni->fifo_phys_address;
+	dai_data->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+	return sti_uniperiph_dai_create_ctrl(dai);
+}
+
+static const struct snd_soc_dai_driver sti_uniperiph_dai_template = {
+	.probe = sti_uniperiph_dai_probe,
+	.suspend = sti_uniperiph_dai_suspend,
+	.resume = sti_uniperiph_dai_resume
+};
+
+static const struct snd_soc_component_driver sti_uniperiph_dai_component = {
+	.name = "sti_cpu_dai",
+};
+
+static int sti_uniperiph_cpu_dai_of(struct device_node *node,
+				    struct sti_uniperiph_data *priv)
+{
+	const char *str;
+	int ret;
+	struct device *dev = &priv->pdev->dev;
+	struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+	struct snd_soc_dai_driver *dai = priv->dai;
+	struct snd_soc_pcm_stream *stream;
+	struct uniperif *uni;
+
+	uni = devm_kzalloc(dev, sizeof(*uni), GFP_KERNEL);
+	if (!uni)
+		return -ENOMEM;
+
+	*dai = sti_uniperiph_dai_template;
+	ret = of_property_read_string(node, "dai-name", &str);
+	if (ret < 0) {
+		dev_err(dev, "%s: dai name missing.\n", __func__);
+		return -EINVAL;
+	}
+	dai->name = str;
+
+	/* Get resources */
+	uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
+
+	if (!uni->mem_region) {
+		dev_err(dev, "Failed to get memory resource");
+		return -ENODEV;
+	}
+
+	uni->base = devm_ioremap_resource(dev, uni->mem_region);
+
+	if (IS_ERR(uni->base))
+		return PTR_ERR(uni->base);
+
+	uni->fifo_phys_address = uni->mem_region->start +
+				     UNIPERIF_FIFO_DATA_OFFSET(uni);
+
+	uni->irq = platform_get_irq(priv->pdev, 0);
+	if (uni->irq < 0) {
+		dev_err(dev, "Failed to get IRQ resource");
+		return -ENXIO;
+	}
+
+	dai_data->uni = uni;
+
+	if (of_device_is_compatible(node, "st,sti-uni-player")) {
+		uni_player_init(priv->pdev, uni);
+		stream = &dai->playback;
+	} else {
+		uni_reader_init(priv->pdev, uni);
+		stream = &dai->capture;
+	}
+	dai->ops = uni->dai_ops;
+
+	stream->stream_name = dai->name;
+	stream->channels_min = uni->hw->channels_min;
+	stream->channels_max = uni->hw->channels_max;
+	stream->rates = uni->hw->rates;
+	stream->formats = uni->hw->formats;
+
+	return 0;
+}
+
+static const struct snd_dmaengine_pcm_config dmaengine_pcm_config = {
+	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+};
+
+static int sti_uniperiph_probe(struct platform_device *pdev)
+{
+	struct sti_uniperiph_data *priv;
+	struct device_node *node = pdev->dev.of_node;
+	int ret;
+
+	/* Allocate the private data and the CPU_DAI array */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dai = devm_kzalloc(&pdev->dev, sizeof(*priv->dai), GFP_KERNEL);
+	if (!priv->dai)
+		return -ENOMEM;
+
+	priv->pdev = pdev;
+
+	ret = sti_uniperiph_cpu_dai_of(node, priv);
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &sti_uniperiph_dai_component,
+					      priv->dai, 1);
+	if (ret < 0)
+		return ret;
+
+	return devm_snd_dmaengine_pcm_register(&pdev->dev,
+					       &dmaengine_pcm_config, 0);
+}
+
+static const struct of_device_id snd_soc_sti_match[] = {
+	{ .compatible = "st,sti-uni-player", },
+	{ .compatible = "st,sti-uni-reader", },
+	{},
+};
+
+static struct platform_driver sti_uniperiph_driver = {
+	.driver = {
+		.name = "sti-uniperiph-dai",
+		.of_match_table = snd_soc_sti_match,
+	},
+	.probe = sti_uniperiph_probe,
+};
+module_platform_driver(sti_uniperiph_driver);
+
+MODULE_DESCRIPTION("uniperipheral DAI driver");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
new file mode 100644
index 0000000..f0fd5a9
--- /dev/null
+++ b/sound/soc/sti/uniperif.h
@@ -0,0 +1,1229 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef __SND_ST_AUD_UNIPERIF_H
+#define __SND_ST_AUD_UNIPERIF_H
+
+#include <linux/regmap.h>
+
+#include <sound/dmaengine_pcm.h>
+
+/*
+ * Register access macros
+ */
+
+#define GET_UNIPERIF_REG(ip, offset, shift, mask) \
+	((readl_relaxed(ip->base + offset) >> shift) & mask)
+#define SET_UNIPERIF_REG(ip, offset, shift, mask, value) \
+	writel_relaxed(((readl_relaxed(ip->base + offset) & \
+	~(mask << shift)) | (((value) & mask) << shift)), ip->base + offset)
+#define SET_UNIPERIF_BIT_REG(ip, offset, shift, mask, value) \
+	writel_relaxed((((value) & mask) << shift), ip->base + offset)
+
+/*
+ * AUD_UNIPERIF_SOFT_RST reg
+ */
+
+#define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
+#define GET_UNIPERIF_SOFT_RST(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		readl_relaxed(ip->base + UNIPERIF_SOFT_RST_OFFSET(ip)) : 0)
+#define SET_UNIPERIF_SOFT_RST(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_SOFT_RST_OFFSET(ip))
+
+/* SOFT_RST */
+#define UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip) 0x0
+#define UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip) 0x1
+#define SET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
+	SET_UNIPERIF_BIT_REG(ip, \
+		UNIPERIF_SOFT_RST_OFFSET(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip), 1)
+#define GET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_SOFT_RST_OFFSET(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
+
+/*
+ * AUD_UNIPERIF_FIFO_DATA reg
+ */
+
+#define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
+#define SET_UNIPERIF_DATA(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
+
+/*
+ * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ */
+
+#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
+#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
+#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
+	writel_relaxed(value, ip->base + \
+			UNIPERIF_CHANNEL_STA_REGN(ip, n))
+
+#define UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip) 0x0060
+#define GET_UNIPERIF_CHANNEL_STA_REG0(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG0(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip) 0x0064
+#define GET_UNIPERIF_CHANNEL_STA_REG1(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG1(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip) 0x0068
+#define GET_UNIPERIF_CHANNEL_STA_REG2(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG2(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip) 0x006C
+#define GET_UNIPERIF_CHANNEL_STA_REG3(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG3(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip) 0x0070
+#define GET_UNIPERIF_CHANNEL_STA_REG4(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG4(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip) 0x0074
+#define GET_UNIPERIF_CHANNEL_STA_REG5(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG5(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
+
+/*
+ *  AUD_UNIPERIF_ITS reg
+ */
+
+#define UNIPERIF_ITS_OFFSET(ip) 0x000C
+#define GET_UNIPERIF_ITS(ip) \
+	readl_relaxed(ip->base + UNIPERIF_ITS_OFFSET(ip))
+
+/* MEM_BLK_READ */
+#define UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip) 5
+#define UNIPERIF_ITS_MEM_BLK_READ_MASK(ip) \
+	(BIT(UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip)))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITS_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip)))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITS_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITS_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_DMA_ERROR_SHIFT(ip)))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip))))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+
+/*
+ *  AUD_UNIPERIF_ITS_BCLR reg
+ */
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITS_BCLR(ip, \
+		UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip))
+
+#define UNIPERIF_ITS_BCLR_OFFSET(ip) 0x0010
+#define SET_UNIPERIF_ITS_BCLR(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
+
+/*
+ *  AUD_UNIPERIF_ITM reg
+ */
+
+#define UNIPERIF_ITM_OFFSET(ip) 0x0018
+#define GET_UNIPERIF_ITM(ip) \
+	readl_relaxed(ip->base + UNIPERIF_ITM_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip)))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip))))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+
+/*
+ *  AUD_UNIPERIF_ITM_BCLR reg
+ */
+
+#define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
+#define SET_UNIPERIF_ITM_BCLR(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITM_BCLR_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITM_BCLR(ip, \
+		UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BCLR_DMA_ERROR(ip) \
+	SET_UNIPERIF_ITM_BCLR(ip, \
+		UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
+
+/*
+ *  AUD_UNIPERIF_ITM_BSET reg
+ */
+
+#define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
+#define SET_UNIPERIF_ITM_BSET(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITM_BSET_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip))
+
+/* MEM_BLK_READ */
+#define UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip) 5
+#define UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_DMA_ERROR(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip))))
+#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip))
+
+/*
+ * UNIPERIF_CONFIG reg
+ */
+
+#define UNIPERIF_CONFIG_OFFSET(ip) 0x0040
+#define GET_UNIPERIF_CONFIG(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CONFIG_OFFSET(ip))
+#define SET_UNIPERIF_CONFIG(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CONFIG_OFFSET(ip))
+
+/* PARITY_CNTR */
+#define UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip) 0
+#define UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_PARITY_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 1)
+
+/* CHANNEL_STA_CNTR */
+#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip) 1
+#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip),    \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 1)
+
+/* USER_DAT_CNTR */
+#define UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip) 2
+#define UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_USER_DAT_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 0)
+
+/* VALIDITY_DAT_CNTR */
+#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip) 3
+#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 1)
+
+/* ONE_BIT_AUD_SUPPORT */
+#define UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip) 4
+#define UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_ONE_BIT_AUD(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip))
+#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 1)
+
+/* MEMORY_FMT */
+#define UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip) 5
+#define UNIPERIF_CONFIG_MEM_FMT_MASK(ip) 0x1
+#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip) 0
+#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) 1
+#define GET_UNIPERIF_CONFIG_MEM_FMT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_MASK(ip))
+#define SET_UNIPERIF_CONFIG_MEM_FMT(ip, value)	\
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_MASK(ip), value)
+#define SET_UNIPERIF_CONFIG_MEM_FMT_16_0(ip)   \
+	SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
+		VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip))
+#define SET_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) \
+	SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
+		VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip))
+
+/* REPEAT_CHL_STS */
+#define UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip) 6
+#define UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_REPEAT_CHL_STS(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip))
+#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 1)
+
+/* BACK_STALL_REQ */
+#define UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 7 : -1)
+#define UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_BACK_STALL_REQ(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip))
+#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 1)
+
+/* FDMA_TRIGGER_LIMIT */
+#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip) 8
+#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip) 0x7F
+#define GET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip))
+#define SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip), value)
+
+/* CHL_STS_UPDATE */
+#define UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
+#define UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip),  \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip))
+#define SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip), 1)
+
+/* IDLE_MOD */
+#define UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip) 18
+#define UNIPERIF_CONFIG_IDLE_MOD_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_IDLE_MOD(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip))
+#define SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_IDLE_MOD_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 1)
+
+/* SUBFRAME_SELECTION */
+#define UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip) 19
+#define UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_SUBFRAME_SEL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip))
+#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF0_SUBF1(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 0)
+
+/* FULL_SW_CONTROL */
+#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip) 20
+#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_SPDIF_SW_CTRL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip))
+#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 0)
+
+/* MASTER_CLKEDGE */
+#define UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 24 : -1)
+#define UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_MSTR_CLKEDGE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip))
+#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_FALLING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_RISING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 0)
+
+/*
+ * UNIPERIF_CTRL reg
+ */
+
+#define UNIPERIF_CTRL_OFFSET(ip) 0x0044
+#define GET_UNIPERIF_CTRL(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CTRL_OFFSET(ip))
+#define SET_UNIPERIF_CTRL(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CTRL_OFFSET(ip))
+
+/* OPERATION */
+#define UNIPERIF_CTRL_OPERATION_SHIFT(ip) 0
+#define UNIPERIF_CTRL_OPERATION_MASK(ip) 0x7
+#define GET_UNIPERIF_CTRL_OPERATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip) 0
+#define SET_UNIPERIF_CTRL_OPERATION_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 1 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 2 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) 3
+#define SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip))
+/* This is the same as above! */
+#define VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) 3
+#define SET_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) 4
+#define SET_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 5 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 7)
+#define SET_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip))
+
+/* EXIT_STBY_ON_EOBLOCK */
+#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 3)
+#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip))
+#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 1)
+
+/* ROUNDING */
+#define UNIPERIF_CTRL_ROUNDING_SHIFT(ip) 4
+#define UNIPERIF_CTRL_ROUNDING_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_ROUNDING(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip))
+#define SET_UNIPERIF_CTRL_ROUNDING_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_ROUNDING_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip), 1)
+
+/* DIVIDER */
+#define UNIPERIF_CTRL_DIVIDER_SHIFT(ip) 5
+#define UNIPERIF_CTRL_DIVIDER_MASK(ip) 0xff
+#define GET_UNIPERIF_CTRL_DIVIDER(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
+		UNIPERIF_CTRL_DIVIDER_MASK(ip))
+#define SET_UNIPERIF_CTRL_DIVIDER(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
+		UNIPERIF_CTRL_DIVIDER_MASK(ip), value)
+
+/* BYTE_SWAP */
+#define UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 13 : -1)
+#define UNIPERIF_CTRL_BYTE_SWP_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_BYTE_SWP(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip))
+#define SET_UNIPERIF_CTRL_BYTE_SWP_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_BYTE_SWP_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 1)
+
+/* ZERO_STUFFING_HW_SW */
+#define UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 14 : -1)
+#define UNIPERIF_CTRL_ZERO_STUFF_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_ZERO_STUFF(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip))
+#define SET_UNIPERIF_CTRL_ZERO_STUFF_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_ZERO_STUFF_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 0)
+
+/* SPDIF_LAT */
+#define UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
+#define UNIPERIF_CTRL_SPDIF_LAT_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_SPDIF_LAT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip))
+#define SET_UNIPERIF_CTRL_SPDIF_LAT_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 0)
+
+/* EN_SPDIF_FORMATTING */
+#define UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip) 17
+#define UNIPERIF_CTRL_SPDIF_FMT_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_SPDIF_FMT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip))
+#define SET_UNIPERIF_CTRL_SPDIF_FMT_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 0)
+
+/* READER_OUT_SELECT */
+#define UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 18 : -1)
+#define UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_READER_OUT_SEL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip))
+#define SET_UNIPERIF_CTRL_READER_OUT_SEL_IN_MEM(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_READER_OUT_SEL_ON_I2S_LINE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
+
+/* UNDERFLOW_REC_WINDOW */
+#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
+#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip) 0xff
+#define GET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip))
+#define SET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip), value)
+
+/*
+ * UNIPERIF_I2S_FMT a.k.a UNIPERIF_FORMAT reg
+ */
+
+#define UNIPERIF_I2S_FMT_OFFSET(ip) 0x0048
+#define GET_UNIPERIF_I2S_FMT(ip) \
+	readl_relaxed(ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
+#define SET_UNIPERIF_I2S_FMT(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
+
+/* NBIT */
+#define UNIPERIF_I2S_FMT_NBIT_SHIFT(ip) 0
+#define UNIPERIF_I2S_FMT_NBIT_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_NBIT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NBIT_32(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_NBIT_16(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip), 1)
+
+/* DATA_SIZE */
+#define UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip) 1
+#define UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip) 0x7
+#define GET_UNIPERIF_I2S_FMT_DATA_SIZE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_18(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 1)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_20(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 2)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 3)
+#define SET_UNIPERIF_I2S_FMTL_DATA_SIZE_28(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 4)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 5)
+
+/* LR_POL */
+#define UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip) 4
+#define UNIPERIF_I2S_FMT_LR_POL_MASK(ip) 0x1
+#define VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) 0x0
+#define VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_LR_POL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_LR_POL(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_MASK(ip), value)
+#define SET_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) \
+	SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
+		VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip))
+#define SET_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) \
+	SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
+		VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip))
+
+/* SCLK_EDGE */
+#define UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip) 5
+#define UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_SCLK_EDGE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 1)
+
+/* PADDING */
+#define UNIPERIF_I2S_FMT_PADDING_SHIFT(ip) 6
+#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
+#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
+#define VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) 0x0
+#define VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_PADDING(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_PADDING_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_PADDING(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_PADDING_MASK(ip), value)
+#define SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) \
+	SET_UNIPERIF_I2S_FMT_PADDING(ip, \
+		VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip))
+#define SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) \
+	SET_UNIPERIF_I2S_FMT_PADDING(ip, \
+		VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip))
+
+/* ALIGN */
+#define UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip) 7
+#define UNIPERIF_I2S_FMT_ALIGN_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_ALIGN(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 1)
+
+/* ORDER */
+#define UNIPERIF_I2S_FMT_ORDER_SHIFT(ip) 8
+#define UNIPERIF_I2S_FMT_ORDER_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_ORDER(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_ORDER_LSB(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_ORDER_MSB(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip), 1)
+
+/* NUM_CH */
+#define UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip) 9
+#define UNIPERIF_I2S_FMT_NUM_CH_MASK(ip) 0x7
+#define GET_UNIPERIF_I2S_FMT_NUM_CH(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NUM_CH(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_MASK(ip), value)
+
+/* NO_OF_SAMPLES_TO_READ */
+#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip) 12
+#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip) 0xfffff
+#define GET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip), value)
+
+/*
+ * UNIPERIF_BIT_CONTROL reg
+ */
+
+#define UNIPERIF_BIT_CONTROL_OFFSET(ip)  \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0x004c)
+#define GET_UNIPERIF_BIT_CONTROL(ip) \
+	readl_relaxed(ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
+#define SET_UNIPERIF_BIT_CONTROL(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
+
+/* CLR_UNDERFLOW_DURATION */
+#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip) 0
+#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip) 0x1
+#define GET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip))
+#define SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip), 1)
+
+/* CHL_STS_UPDATE */
+#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip) 1
+#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip) 0x1
+#define GET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip))
+#define SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
+	SET_UNIPERIF_BIT_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip), 1)
+
+/*
+ * UNIPERIF_STATUS_1 reg
+ */
+
+#define UNIPERIF_STATUS_1_OFFSET(ip) 0x0050
+#define GET_UNIPERIF_STATUS_1(ip) \
+	readl_relaxed(ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
+#define SET_UNIPERIF_STATUS_1(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
+
+/* UNDERFLOW_DURATION */
+#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
+#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip) 0xff
+#define GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_STATUS_1_OFFSET(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip))
+#define SET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_STATUS_1_OFFSET(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
+
+/*
+ * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ */
+
+#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
+#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
+#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
+	writel_relaxed(value, ip->base + \
+			UNIPERIF_CHANNEL_STA_REGN(ip, n))
+
+/*
+ * AUD_UNIPERIF_USER_VALIDITY reg
+ */
+
+#define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
+#define GET_UNIPERIF_USER_VALIDITY(ip) \
+	readl_relaxed(ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
+#define SET_UNIPERIF_USER_VALIDITY(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
+
+/* VALIDITY_LEFT_AND_RIGHT */
+#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip) 0
+#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip) 0x3
+#define GET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_USER_VALIDITY_OFFSET(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip))
+#define SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_USER_VALIDITY_OFFSET(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip), \
+		value ? 0x3 : 0)
+
+/*
+ * UNIPERIF_DBG_STANDBY_LEFT_SP reg
+ */
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip) 0x0150
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 0xFFFFFF)
+#define GET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip))
+#define SET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
+
+/*
+ * uniperipheral IP capabilities
+ */
+
+#define UNIPERIF_FIFO_SIZE		70 /* FIFO is 70 cells deep */
+#define UNIPERIF_FIFO_FRAMES		4  /* FDMA trigger limit in frames */
+
+/*
+ * Uniperipheral IP revisions
+ */
+enum uniperif_version {
+	SND_ST_UNIPERIF_VERSION_UNKNOWN,
+	/* SASG1 (Orly), Newman */
+	SND_ST_UNIPERIF_VERSION_C6AUD0_UNI_1_0,
+	/* SASC1, SASG2 (Orly2) */
+	SND_ST_UNIPERIF_VERSION_UNI_PLR_1_0,
+	/* SASC1, SASG2 (Orly2), TELSS, Cannes */
+	SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
+	/* TELSS (SASC1) */
+	SND_ST_UNIPERIF_VERSION_TDM_PLR_1_0,
+	/* Cannes/Monaco */
+	SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+};
+
+enum uniperif_type {
+	SND_ST_UNIPERIF_PLAYER_TYPE_NONE,
+	SND_ST_UNIPERIF_PLAYER_TYPE_HDMI,
+	SND_ST_UNIPERIF_PLAYER_TYPE_PCM,
+	SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF
+};
+
+enum uniperif_state {
+	UNIPERIF_STATE_STOPPED,
+	UNIPERIF_STATE_STARTED,
+	UNIPERIF_STATE_STANDBY,
+	UNIPERIF_STATE_UNDERFLOW,
+	UNIPERIF_STATE_OVERFLOW = UNIPERIF_STATE_UNDERFLOW,
+	UNIPERIF_STATE_XRUN
+};
+
+enum uniperif_iec958_encoding_mode {
+	UNIPERIF_IEC958_ENCODING_MODE_PCM,
+	UNIPERIF_IEC958_ENCODING_MODE_ENCODED
+};
+
+struct uniperif_info {
+	int id; /* instance value of the uniperipheral IP */
+	enum uniperif_type player_type;
+	int underflow_enabled;		/* Underflow recovery mode */
+};
+
+struct uniperif_iec958_settings {
+	enum uniperif_iec958_encoding_mode encoding_mode;
+	struct snd_aes_iec958 iec958;
+};
+
+struct uniperif {
+	/* System information */
+	struct uniperif_info *info;
+	struct device *dev;
+	int ver; /* IP version, used by register access macros */
+	struct regmap_field *clk_sel;
+
+	/* capabilities */
+	const struct snd_pcm_hardware *hw;
+
+	/* Resources */
+	struct resource *mem_region;
+	void __iomem *base;
+	unsigned long fifo_phys_address;
+	int irq;
+
+	/* Clocks */
+	struct clk *clk;
+	int mclk;
+	int clk_adj;
+
+	/* Runtime data */
+	enum uniperif_state state;
+
+	struct snd_pcm_substream *substream;
+
+	/* Specific to IEC958 player */
+	struct uniperif_iec958_settings stream_settings;
+	struct mutex ctrl_lock; /* For resource updated by stream and controls*/
+
+	/*alsa ctrl*/
+	struct snd_kcontrol_new *snd_ctrls;
+	int num_ctrls;
+
+	/* dai properties */
+	unsigned int daifmt;
+
+	/* DAI callbacks */
+	const struct snd_soc_dai_ops *dai_ops;
+};
+
+struct sti_uniperiph_dai {
+	int stream;
+	struct uniperif *uni;
+	struct snd_dmaengine_dai_dma_data dma_data;
+};
+
+struct sti_uniperiph_data {
+	struct platform_device *pdev;
+	struct snd_soc_dai_driver *dai;
+	struct sti_uniperiph_dai dai_data;
+};
+
+/* uniperiph player*/
+int uni_player_init(struct platform_device *pdev,
+		    struct uniperif *uni_player);
+int uni_player_resume(struct uniperif *player);
+
+/* uniperiph reader */
+int uni_reader_init(struct platform_device *pdev,
+		    struct uniperif *uni_reader);
+
+/* common */
+int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai,
+			      unsigned int fmt);
+
+int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai);
+
+#endif
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
new file mode 100644
index 0000000..f6eefe1
--- /dev/null
+++ b/sound/soc/sti/uniperif_player.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/asoundef.h>
+#include <sound/soc.h>
+
+#include "uniperif.h"
+
+/*
+ * Some hardware-related definitions
+ */
+
+/* sys config registers definitions */
+#define SYS_CFG_AUDIO_GLUE 0xA4
+#define SYS_CFG_AUDI0_GLUE_PCM_CLKX 8
+
+/*
+ * Driver specific types.
+ */
+#define UNIPERIF_PLAYER_TYPE_IS_HDMI(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_HDMI)
+#define UNIPERIF_PLAYER_TYPE_IS_PCM(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_PCM)
+#define UNIPERIF_PLAYER_TYPE_IS_SPDIF(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF)
+#define UNIPERIF_PLAYER_TYPE_IS_IEC958(p) \
+	(UNIPERIF_PLAYER_TYPE_IS_HDMI(p) || \
+		UNIPERIF_PLAYER_TYPE_IS_SPDIF(p))
+
+#define UNIPERIF_PLAYER_CLK_ADJ_MIN  -999999
+#define UNIPERIF_PLAYER_CLK_ADJ_MAX  1000000
+
+/*
+ * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
+ * integrate  DAI_CPU capability in term of rate and supported channels
+ */
+static const struct snd_pcm_hardware uni_player_pcm_hw = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min = 8000,
+	.rate_max = 192000,
+
+	.channels_min = 2,
+	.channels_max = 8,
+
+	.periods_min = 2,
+	.periods_max = 48,
+
+	.period_bytes_min = 128,
+	.period_bytes_max = 64 * PAGE_SIZE,
+	.buffer_bytes_max = 256 * PAGE_SIZE
+};
+
+static inline int reset_player(struct uniperif *player)
+{
+	int count = 10;
+
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+		while (GET_UNIPERIF_SOFT_RST_SOFT_RST(player) && count) {
+			udelay(5);
+			count--;
+		}
+	}
+
+	if (!count) {
+		dev_err(player->dev, "Failed to reset uniperif");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * uni_player_irq_handler
+ * In case of error audio stream is stopped; stop action is protected via PCM
+ * stream lock to avoid race condition with trigger callback.
+ */
+static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t ret = IRQ_NONE;
+	struct uniperif *player = dev_id;
+	unsigned int status;
+	unsigned int tmp;
+
+	if (player->state == UNIPERIF_STATE_STOPPED) {
+		/* Unexpected IRQ: do nothing */
+		return IRQ_NONE;
+	}
+
+	/* Get interrupt status & clear them immediately */
+	status = GET_UNIPERIF_ITS(player);
+	SET_UNIPERIF_ITS_BCLR(player, status);
+
+	/* Check for fifo error (underrun) */
+	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(player))) {
+		dev_err(player->dev, "FIFO underflow error detected");
+
+		/* Interrupt is just for information when underflow recovery */
+		if (player->info->underflow_enabled) {
+			/* Update state to underflow */
+			player->state = UNIPERIF_STATE_UNDERFLOW;
+
+		} else {
+			/* Disable interrupt so doesn't continually fire */
+			SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
+
+			/* Stop the player */
+			snd_pcm_stream_lock(player->substream);
+			snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+			snd_pcm_stream_unlock(player->substream);
+		}
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check for dma error (overrun) */
+	if (unlikely(status & UNIPERIF_ITS_DMA_ERROR_MASK(player))) {
+		dev_err(player->dev, "DMA error detected");
+
+		/* Disable interrupt so doesn't continually fire */
+		SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
+
+		/* Stop the player */
+		snd_pcm_stream_lock(player->substream);
+		snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(player->substream);
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check for underflow recovery done */
+	if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
+		if (!player->info->underflow_enabled) {
+			dev_err(player->dev, "unexpected Underflow recovering");
+			return -EPERM;
+		}
+		/* Read the underflow recovery duration */
+		tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
+
+		/* Clear the underflow recovery duration */
+		SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(player);
+
+		/* Update state to started */
+		player->state = UNIPERIF_STATE_STARTED;
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check if underflow recovery failed */
+	if (unlikely(status &
+		     UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(player))) {
+		dev_err(player->dev, "Underflow recovery failed");
+
+		/* Stop the player */
+		snd_pcm_stream_lock(player->substream);
+		snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(player->substream);
+
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int uni_player_clk_set_rate(struct uniperif *player, unsigned long rate)
+{
+	int rate_adjusted, rate_achieved, delta, ret;
+	int adjustment = player->clk_adj;
+
+	/*
+	 *             a
+	 * F = f + --------- * f = f + d
+	 *          1000000
+	 *
+	 *         a
+	 * d = --------- * f
+	 *      1000000
+	 *
+	 * where:
+	 *   f - nominal rate
+	 *   a - adjustment in ppm (parts per milion)
+	 *   F - rate to be set in synthesizer
+	 *   d - delta (difference) between f and F
+	 */
+	if (adjustment < 0) {
+		/* div64_64 operates on unsigned values... */
+		delta = -1;
+		adjustment = -adjustment;
+	} else {
+		delta = 1;
+	}
+	/* 500000 ppm is 0.5, which is used to round up values */
+	delta *= (int)div64_u64((uint64_t)rate *
+				(uint64_t)adjustment + 500000, 1000000);
+	rate_adjusted = rate + delta;
+
+	/* Adjusted rate should never be == 0 */
+	if (!rate_adjusted)
+		return -EINVAL;
+
+	ret = clk_set_rate(player->clk, rate_adjusted);
+	if (ret < 0)
+		return ret;
+
+	rate_achieved = clk_get_rate(player->clk);
+	if (!rate_achieved)
+		/* If value is 0 means that clock or parent not valid */
+		return -EINVAL;
+
+	/*
+	 * Using ALSA's adjustment control, we can modify the rate to be up
+	 * to twice as much as requested, but no more
+	 */
+	delta = rate_achieved - rate;
+	if (delta < 0) {
+		/* div64_64 operates on unsigned values... */
+		delta = -delta;
+		adjustment = -1;
+	} else {
+		adjustment = 1;
+	}
+	/* Frequency/2 is added to round up result */
+	adjustment *= (int)div64_u64((uint64_t)delta * 1000000 + rate / 2,
+				     rate);
+	player->clk_adj = adjustment;
+	return 0;
+}
+
+static void uni_player_set_channel_status(struct uniperif *player,
+					  struct snd_pcm_runtime *runtime)
+{
+	int n;
+	unsigned int status;
+
+	/*
+	 * Some AVRs and TVs require the channel status to contain a correct
+	 * sampling frequency. If no sample rate is already specified, then
+	 * set one.
+	 */
+	mutex_lock(&player->ctrl_lock);
+	if (runtime && (player->stream_settings.iec958.status[3]
+					== IEC958_AES3_CON_FS_NOTID)) {
+		switch (runtime->rate) {
+		case 22050:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_22050;
+			break;
+		case 44100:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_44100;
+			break;
+		case 88200:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_88200;
+			break;
+		case 176400:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_176400;
+			break;
+		case 24000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_24000;
+			break;
+		case 48000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_48000;
+			break;
+		case 96000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_96000;
+			break;
+		case 192000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_192000;
+			break;
+		case 32000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_32000;
+			break;
+		default:
+			/* Mark as sampling frequency not indicated */
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_NOTID;
+			break;
+		}
+	}
+
+	/* Audio mode:
+	 * Use audio mode status to select PCM or encoded mode
+	 */
+	if (player->stream_settings.iec958.status[0] & IEC958_AES0_NONAUDIO)
+		player->stream_settings.encoding_mode =
+			UNIPERIF_IEC958_ENCODING_MODE_ENCODED;
+	else
+		player->stream_settings.encoding_mode =
+			UNIPERIF_IEC958_ENCODING_MODE_PCM;
+
+	if (player->stream_settings.encoding_mode ==
+		UNIPERIF_IEC958_ENCODING_MODE_PCM)
+		/* Clear user validity bits */
+		SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
+	else
+		/* Set user validity bits */
+		SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 1);
+
+	/* Program the new channel status */
+	for (n = 0; n < 6; ++n) {
+		status  =
+		player->stream_settings.iec958.status[0 + (n * 4)] & 0xf;
+		status |=
+		player->stream_settings.iec958.status[1 + (n * 4)] << 8;
+		status |=
+		player->stream_settings.iec958.status[2 + (n * 4)] << 16;
+		status |=
+		player->stream_settings.iec958.status[3 + (n * 4)] << 24;
+		SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
+	}
+	mutex_unlock(&player->ctrl_lock);
+
+	/* Update the channel status */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
+	else
+		SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
+}
+
+static int uni_player_prepare_iec958(struct uniperif *player,
+				     struct snd_pcm_runtime *runtime)
+{
+	int clk_div;
+
+	clk_div = player->mclk / runtime->rate;
+
+	/* Oversampling must be multiple of 128 as iec958 frame is 32-bits */
+	if ((clk_div % 128) || (clk_div <= 0)) {
+		dev_err(player->dev, "%s: invalid clk_div %d",
+			__func__, clk_div);
+		return -EINVAL;
+	}
+
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* 16/16 memory format */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
+		/* 16-bits per sub-frame */
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		/* Set 16-bit sample precision */
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/* 16/0 memory format */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+		/* 32-bits per sub-frame */
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		/* Set 24-bit sample precision */
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(player);
+		break;
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	/* Set parity to be calculated by the hardware */
+	SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(player);
+
+	/* Set channel status bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(player);
+
+	/* Set user data bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(player);
+
+	/* Set validity bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(player);
+
+	/* Set full software control to disabled */
+	SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(player);
+
+	SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
+
+	/* Update the channel status */
+	uni_player_set_channel_status(player, runtime);
+
+	/* Clear the user validity user bits */
+	SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
+
+	/* Disable one-bit audio mode */
+	SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
+
+	/* Enable consecutive frames repetition of Z preamble (not for HBRA) */
+	SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(player);
+
+	/* Change to SUF0_SUBF1 and left/right channels swap! */
+	SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(player);
+
+	/* Set data output as MSB first */
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+
+	if (player->stream_settings.encoding_mode ==
+				UNIPERIF_IEC958_ENCODING_MODE_ENCODED)
+		SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(player);
+	else
+		SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(player);
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
+
+	/* Set rounding to off */
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+
+	/* Set clock divisor */
+	SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / 128);
+
+	/* Set the spdif latency to not wait before starting player */
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+
+	/*
+	 * Ensure iec958 formatting is off. It will be enabled in function
+	 * uni_player_start() at the same time as the operation
+	 * mode is set to work around a silicon issue.
+	 */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
+	else
+		SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+
+	return 0;
+}
+
+static int uni_player_prepare_pcm(struct uniperif *player,
+				  struct snd_pcm_runtime *runtime)
+{
+	int output_frame_size, slot_width, clk_div;
+
+	/* Force slot width to 32 in I2S mode (HW constraint) */
+	if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+		SND_SOC_DAIFMT_I2S) {
+		slot_width = 32;
+	} else {
+		switch (runtime->format) {
+		case SNDRV_PCM_FORMAT_S16_LE:
+			slot_width = 16;
+			break;
+		default:
+			slot_width = 32;
+			break;
+		}
+	}
+	output_frame_size = slot_width * runtime->channels;
+
+	clk_div = player->mclk / runtime->rate;
+	/*
+	 * For 32 bits subframe clk_div must be a multiple of 128,
+	 * for 16 bits must be a multiple of 64
+	 */
+	if ((slot_width == 32) && (clk_div % 128)) {
+		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		return -EINVAL;
+	}
+
+	if ((slot_width == 16) && (clk_div % 64)) {
+		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Number of bits per subframe (which is one channel sample)
+	 * on output - Transfer 16 or 32 bits from FIFO
+	 */
+	switch (slot_width) {
+	case 32:
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
+		break;
+	case 16:
+		SET_UNIPERIF_I2S_FMT_NBIT_16(player);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
+		break;
+	default:
+		dev_err(player->dev, "subframe format not supported");
+		return -EINVAL;
+	}
+
+	/* Configure data memory format */
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* One data word contains two samples */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/*
+		 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
+		 * on the left than zeros (if less than 32 bytes)"... ;-)
+		 */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+		break;
+
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	/* Set rounding to off */
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+
+	/* Set clock divisor */
+	SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / (2 * output_frame_size));
+
+	/* Number of channelsmust be even*/
+	if ((runtime->channels % 2) || (runtime->channels < 2) ||
+	    (runtime->channels > 10)) {
+		dev_err(player->dev, "%s: invalid nb of channels", __func__);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
+
+	/* Set 1-bit audio format to disabled */
+	SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+
+	/* No iec958 formatting as outputting to DAC  */
+	SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
+
+	return 0;
+}
+
+/*
+ * ALSA uniperipheral iec958 controls
+ */
+static int  uni_player_ctl_iec958_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+	uinfo->count = 1;
+
+	return 0;
+}
+
+static int uni_player_ctl_iec958_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_aes_iec958 *iec958 = &player->stream_settings.iec958;
+
+	mutex_lock(&player->ctrl_lock);
+	ucontrol->value.iec958.status[0] = iec958->status[0];
+	ucontrol->value.iec958.status[1] = iec958->status[1];
+	ucontrol->value.iec958.status[2] = iec958->status[2];
+	ucontrol->value.iec958.status[3] = iec958->status[3];
+	mutex_unlock(&player->ctrl_lock);
+	return 0;
+}
+
+static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_aes_iec958 *iec958 =  &player->stream_settings.iec958;
+
+	mutex_lock(&player->ctrl_lock);
+	iec958->status[0] = ucontrol->value.iec958.status[0];
+	iec958->status[1] = ucontrol->value.iec958.status[1];
+	iec958->status[2] = ucontrol->value.iec958.status[2];
+	iec958->status[3] = ucontrol->value.iec958.status[3];
+	mutex_unlock(&player->ctrl_lock);
+
+	uni_player_set_channel_status(player, NULL);
+
+	return 0;
+}
+
+static struct snd_kcontrol_new uni_player_iec958_ctl = {
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+	.info = uni_player_ctl_iec958_info,
+	.get = uni_player_ctl_iec958_get,
+	.put = uni_player_ctl_iec958_put,
+};
+
+/*
+ * uniperif rate adjustement control
+ */
+static int snd_sti_clk_adjustment_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = UNIPERIF_PLAYER_CLK_ADJ_MIN;
+	uinfo->value.integer.max = UNIPERIF_PLAYER_CLK_ADJ_MAX;
+	uinfo->value.integer.step = 1;
+
+	return 0;
+}
+
+static int snd_sti_clk_adjustment_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	mutex_lock(&player->ctrl_lock);
+	ucontrol->value.integer.value[0] = player->clk_adj;
+	mutex_unlock(&player->ctrl_lock);
+
+	return 0;
+}
+
+static int snd_sti_clk_adjustment_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	int ret = 0;
+
+	if ((ucontrol->value.integer.value[0] < UNIPERIF_PLAYER_CLK_ADJ_MIN) ||
+	    (ucontrol->value.integer.value[0] > UNIPERIF_PLAYER_CLK_ADJ_MAX))
+		return -EINVAL;
+
+	mutex_lock(&player->ctrl_lock);
+	player->clk_adj = ucontrol->value.integer.value[0];
+
+	if (player->mclk)
+		ret = uni_player_clk_set_rate(player, player->mclk);
+	mutex_unlock(&player->ctrl_lock);
+
+	return ret;
+}
+
+static struct snd_kcontrol_new uni_player_clk_adj_ctl = {
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.name = "PCM Playback Oversampling Freq. Adjustment",
+	.info = snd_sti_clk_adjustment_info,
+	.get = snd_sti_clk_adjustment_get,
+	.put = snd_sti_clk_adjustment_put,
+};
+
+static struct snd_kcontrol_new *snd_sti_pcm_ctl[] = {
+	&uni_player_clk_adj_ctl,
+};
+
+static struct snd_kcontrol_new *snd_sti_iec_ctl[] = {
+	&uni_player_iec958_ctl,
+	&uni_player_clk_adj_ctl,
+};
+
+static int uni_player_startup(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	player->clk_adj = 0;
+
+	return 0;
+}
+
+static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+				 unsigned int freq, int dir)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	int ret;
+
+	if (dir == SND_SOC_CLOCK_IN)
+		return 0;
+
+	if (clk_id != 0)
+		return -EINVAL;
+
+	mutex_lock(&player->ctrl_lock);
+	ret = uni_player_clk_set_rate(player, freq);
+	if (!ret)
+		player->mclk = freq;
+	mutex_unlock(&player->ctrl_lock);
+
+	return ret;
+}
+
+static int uni_player_prepare(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int transfer_size, trigger_limit;
+	int ret;
+
+	/* The player should be stopped */
+	if (player->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state %d", __func__,
+			player->state);
+		return -EINVAL;
+	}
+
+	/* Calculate transfer size (in fifo cells and bytes) for frame count */
+	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+
+	/* Calculate number of empty cells available before asserting DREQ */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+	} else {
+		/*
+		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+		 * FDMA_TRIGGER_LIMIT also controls when the state switches
+		 * from OFF or STANDBY to AUDIO DATA.
+		 */
+		trigger_limit = transfer_size;
+	}
+
+	/* Trigger limit must be an even number */
+	if ((!trigger_limit % 2) || (trigger_limit != 1 && transfer_size % 2) ||
+	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(player))) {
+		dev_err(player->dev, "invalid trigger limit %d", trigger_limit);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
+
+	/* Uniperipheral setup depends on player type */
+	switch (player->info->player_type) {
+	case SND_ST_UNIPERIF_PLAYER_TYPE_HDMI:
+		ret = uni_player_prepare_iec958(player, runtime);
+		break;
+	case SND_ST_UNIPERIF_PLAYER_TYPE_PCM:
+		ret = uni_player_prepare_pcm(player, runtime);
+		break;
+	case SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF:
+		ret = uni_player_prepare_iec958(player, runtime);
+		break;
+	default:
+		dev_err(player->dev, "invalid player type");
+		return -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	switch (player->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+		break;
+	}
+
+	switch (player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(player);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
+		break;
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(player, 0);
+
+	/* Reset uniperipheral player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	return reset_player(player);
+}
+
+static int uni_player_start(struct uniperif *player)
+{
+	int ret;
+
+	/* The player should be stopped */
+	if (player->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state", __func__);
+		return -EINVAL;
+	}
+
+	ret = clk_prepare_enable(player->clk);
+	if (ret) {
+		dev_err(player->dev, "%s: Failed to enable clock", __func__);
+		return ret;
+	}
+
+	/* Clear any pending interrupts */
+	SET_UNIPERIF_ITS_BCLR(player, GET_UNIPERIF_ITS(player));
+
+	/* Set the interrupt mask */
+	SET_UNIPERIF_ITM_BSET_DMA_ERROR(player);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(player);
+
+	/* Enable underflow recovery interrupts */
+	if (player->info->underflow_enabled) {
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(player);
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
+	}
+
+	/* Reset uniperipheral player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	ret = reset_player(player);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Does not use IEC61937 features of the uniperipheral hardware.
+	 * Instead it performs IEC61937 in software and inserts it directly
+	 * into the audio data stream. As such, when encoded mode is selected,
+	 * linear pcm mode is still used, but with the differences of the
+	 * channel status bits set for encoded mode and the validity bits set.
+	 */
+	SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(player);
+
+	/*
+	 * If iec958 formatting is required for hdmi or spdif, then it must be
+	 * enabled after the operation mode is set. If set prior to this, it
+	 * will not take affect and hang the player.
+	 */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player))
+				SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+
+	/* Force channel status update (no update if clk disable) */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
+	else
+		SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
+
+	/* Update state to started */
+	player->state = UNIPERIF_STATE_STARTED;
+
+	return 0;
+}
+
+static int uni_player_stop(struct uniperif *player)
+{
+	int ret;
+
+	/* The player should not be in stopped state */
+	if (player->state == UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state", __func__);
+		return -EINVAL;
+	}
+
+	/* Turn the player off */
+	SET_UNIPERIF_CTRL_OPERATION_OFF(player);
+
+	/* Soft reset the player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	ret = reset_player(player);
+	if (ret < 0)
+		return ret;
+
+	/* Disable interrupts */
+	SET_UNIPERIF_ITM_BCLR(player, GET_UNIPERIF_ITM(player));
+
+	/* Disable clock */
+	clk_disable_unprepare(player->clk);
+
+	/* Update state to stopped and return */
+	player->state = UNIPERIF_STATE_STOPPED;
+
+	return 0;
+}
+
+int uni_player_resume(struct uniperif *player)
+{
+	int ret;
+
+	/* Select the frequency synthesizer clock */
+	if (player->clk_sel) {
+		ret = regmap_field_write(player->clk_sel, 1);
+		if (ret) {
+			dev_err(player->dev,
+				"%s: Failed to select freq synth clock",
+				__func__);
+			return ret;
+		}
+	}
+
+	SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+	SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_player_resume);
+
+static int uni_player_trigger(struct snd_pcm_substream *substream,
+			      int cmd, struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		return uni_player_start(player);
+	case SNDRV_PCM_TRIGGER_STOP:
+		return uni_player_stop(player);
+	case SNDRV_PCM_TRIGGER_RESUME:
+		return uni_player_resume(player);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void uni_player_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	if (player->state != UNIPERIF_STATE_STOPPED)
+		/* Stop the player */
+		uni_player_stop(player);
+}
+
+static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
+					struct uniperif *player)
+{
+	int bit_offset;
+	struct device_node *node = pdev->dev.of_node;
+	struct regmap *regmap;
+
+	bit_offset = SYS_CFG_AUDI0_GLUE_PCM_CLKX + player->info->id;
+
+	regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
+
+	if (regmap) {
+		struct reg_field regfield =
+			REG_FIELD(SYS_CFG_AUDIO_GLUE, bit_offset, bit_offset);
+
+		player->clk_sel = regmap_field_alloc(regmap, regfield);
+	} else {
+		dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int uni_player_parse_dt(struct platform_device *pdev,
+			       struct uniperif *player)
+{
+	struct uniperif_info *info;
+	struct device *dev = &pdev->dev;
+	struct device_node *pnode = pdev->dev.of_node;
+	const char *mode;
+
+	/* Allocate memory for the info structure */
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	of_property_read_u32(pnode, "version", &player->ver);
+	if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
+		dev_err(dev, "Unknown uniperipheral version ");
+		return -EINVAL;
+	}
+	/* Underflow recovery is only supported on later ip revisions */
+	if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		info->underflow_enabled = 1;
+
+	of_property_read_u32(pnode, "uniperiph-id", &info->id);
+
+	/* Read the device mode property */
+	of_property_read_string(pnode, "mode", &mode);
+
+	if (strcasecmp(mode, "hdmi") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
+	else if (strcasecmp(mode, "pcm") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_PCM;
+	else if (strcasecmp(mode, "spdif") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF;
+	else
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_NONE;
+
+	/* Save the info structure */
+	player->info = info;
+
+	/* Get the PCM_CLK_SEL bit from audio-glue-ctrl SoC register */
+	if (uni_player_parse_dt_clk_glue(pdev, player))
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops uni_player_dai_ops = {
+		.startup = uni_player_startup,
+		.shutdown = uni_player_shutdown,
+		.prepare = uni_player_prepare,
+		.trigger = uni_player_trigger,
+		.hw_params = sti_uniperiph_dai_hw_params,
+		.set_fmt = sti_uniperiph_dai_set_fmt,
+		.set_sysclk = uni_player_set_sysclk
+};
+
+int uni_player_init(struct platform_device *pdev,
+		    struct uniperif *player)
+{
+	int ret = 0;
+
+	player->dev = &pdev->dev;
+	player->state = UNIPERIF_STATE_STOPPED;
+	player->hw = &uni_player_pcm_hw;
+	player->dai_ops = &uni_player_dai_ops;
+
+	ret = uni_player_parse_dt(pdev, player);
+
+	if (ret < 0) {
+		dev_err(player->dev, "Failed to parse DeviceTree");
+		return ret;
+	}
+
+	/* Get uniperif resource */
+	player->clk = of_clk_get(pdev->dev.of_node, 0);
+	if (IS_ERR(player->clk))
+		ret = PTR_ERR(player->clk);
+
+	/* Select the frequency synthesizer clock */
+	if (player->clk_sel) {
+		ret = regmap_field_write(player->clk_sel, 1);
+		if (ret) {
+			dev_err(player->dev,
+				"%s: Failed to select freq synth clock",
+				__func__);
+			return ret;
+		}
+	}
+
+	ret = devm_request_irq(&pdev->dev, player->irq,
+			       uni_player_irq_handler, IRQF_SHARED,
+			       dev_name(&pdev->dev), player);
+	if (ret < 0)
+		return ret;
+
+	mutex_init(&player->ctrl_lock);
+
+	/* Ensure that disabled by default */
+	SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+	SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
+
+	if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) {
+		/* Set default iec958 status bits  */
+
+		/* Consumer, PCM, copyright, 2ch, mode 0 */
+		player->stream_settings.iec958.status[0] = 0x00;
+		/* Broadcast reception category */
+		player->stream_settings.iec958.status[1] =
+					IEC958_AES1_CON_GENERAL;
+		/* Do not take into account source or channel number */
+		player->stream_settings.iec958.status[2] =
+					IEC958_AES2_CON_SOURCE_UNSPEC;
+		/* Sampling frequency not indicated */
+		player->stream_settings.iec958.status[3] =
+					IEC958_AES3_CON_FS_NOTID;
+		/* Max sample word 24-bit, sample word length not indicated */
+		player->stream_settings.iec958.status[4] =
+					IEC958_AES4_CON_MAX_WORDLEN_24 |
+					IEC958_AES4_CON_WORDLEN_24_20;
+
+		player->num_ctrls = ARRAY_SIZE(snd_sti_iec_ctl);
+		player->snd_ctrls = snd_sti_iec_ctl[0];
+	} else {
+		player->num_ctrls = ARRAY_SIZE(snd_sti_pcm_ctl);
+		player->snd_ctrls = snd_sti_pcm_ctl[0];
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_player_init);
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
new file mode 100644
index 0000000..c502626
--- /dev/null
+++ b/sound/soc/sti/uniperif_reader.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <sound/soc.h>
+
+#include "uniperif.h"
+
+/*
+ * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
+ * integrate unireader capability in term of rate and supported channels
+ */
+static const struct snd_pcm_hardware uni_reader_pcm_hw = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min = 8000,
+	.rate_max = 96000,
+
+	.channels_min = 2,
+	.channels_max = 8,
+
+	.periods_min = 2,
+	.periods_max = 48,
+
+	.period_bytes_min = 128,
+	.period_bytes_max = 64 * PAGE_SIZE,
+	.buffer_bytes_max = 256 * PAGE_SIZE
+};
+
+/*
+ * uni_reader_irq_handler
+ * In case of error audio stream is stopped; stop action is protected via PCM
+ * stream lock  to avoid race condition with trigger callback.
+ */
+static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t ret = IRQ_NONE;
+	struct uniperif *reader = dev_id;
+	unsigned int status;
+
+	if (reader->state == UNIPERIF_STATE_STOPPED) {
+		/* Unexpected IRQ: do nothing */
+		dev_warn(reader->dev, "unexpected IRQ ");
+		return IRQ_HANDLED;
+	}
+
+	/* Get interrupt status & clear them immediately */
+	status = GET_UNIPERIF_ITS(reader);
+	SET_UNIPERIF_ITS_BCLR(reader, status);
+
+	/* Check for fifo overflow error */
+	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
+		dev_err(reader->dev, "FIFO error detected");
+
+		snd_pcm_stream_lock(reader->substream);
+		snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(reader->substream);
+
+		return IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int uni_reader_prepare(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int transfer_size, trigger_limit;
+	int slot_width;
+	int count = 10;
+
+	/* The reader should be stopped */
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state %d", __func__,
+			reader->state);
+		return -EINVAL;
+	}
+
+	/* Calculate transfer size (in fifo cells and bytes) for frame count */
+	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+
+	/* Calculate number of empty cells available before asserting DREQ */
+	if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+	else
+		/*
+		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+		 * FDMA_TRIGGER_LIMIT also controls when the state switches
+		 * from OFF or STANDBY to AUDIO DATA.
+		 */
+		trigger_limit = transfer_size;
+
+	/* Trigger limit must be an even number */
+	if ((!trigger_limit % 2) ||
+	    (trigger_limit != 1 && transfer_size % 2) ||
+	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
+		dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
+
+	switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_IB_IF:
+	case SND_SOC_DAIFMT_NB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
+		break;
+	default:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
+	}
+
+	/* Force slot width to 32 in I2S mode */
+	if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
+		== SND_SOC_DAIFMT_I2S) {
+		slot_width = 32;
+	} else {
+		switch (runtime->format) {
+		case SNDRV_PCM_FORMAT_S16_LE:
+			slot_width = 16;
+			break;
+		default:
+			slot_width = 32;
+			break;
+		}
+	}
+
+	/* Number of bits per subframe (i.e one channel sample) on input. */
+	switch (slot_width) {
+	case 32:
+		SET_UNIPERIF_I2S_FMT_NBIT_32(reader);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
+		break;
+	case 16:
+		SET_UNIPERIF_I2S_FMT_NBIT_16(reader);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(reader);
+		break;
+	default:
+		dev_err(reader->dev, "subframe format not supported");
+		return -EINVAL;
+	}
+
+	/* Configure data memory format */
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* One data word contains two samples */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(reader);
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/*
+		 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
+		 * on the MSB then zeros (if less than 32 bytes)"...
+		 */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
+		break;
+
+	default:
+		dev_err(reader->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(reader);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
+		break;
+	default:
+		dev_err(reader->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
+
+	/* Data clocking (changing) on the rising edge */
+	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
+
+	/* Number of channels must be even */
+
+	if ((runtime->channels % 2) || (runtime->channels < 2) ||
+	    (runtime->channels > 10)) {
+		dev_err(reader->dev, "%s: invalid nb of channels", __func__);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
+
+	/* Clear any pending interrupts */
+	SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
+
+	SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(reader, 0);
+
+	/* Set the interrupt mask */
+	SET_UNIPERIF_ITM_BSET_DMA_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(reader);
+
+	/* Enable underflow recovery interrupts */
+	if (reader->info->underflow_enabled) {
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(reader);
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(reader);
+	}
+
+	/* Reset uniperipheral reader */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(reader);
+
+	while (GET_UNIPERIF_SOFT_RST_SOFT_RST(reader)) {
+		udelay(5);
+		count--;
+	}
+	if (!count) {
+		dev_err(reader->dev, "Failed to reset uniperif");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int uni_reader_start(struct uniperif *reader)
+{
+	/* The reader should be stopped */
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		return -EINVAL;
+	}
+
+	/* Enable reader interrupts (and clear possible stalled ones) */
+	SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
+
+	/* Launch the reader */
+	SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(reader);
+
+	/* Update state to started */
+	reader->state = UNIPERIF_STATE_STARTED;
+	return 0;
+}
+
+static int uni_reader_stop(struct uniperif *reader)
+{
+	/* The reader should not be in stopped state */
+	if (reader->state == UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		return -EINVAL;
+	}
+
+	/* Turn the reader off */
+	SET_UNIPERIF_CTRL_OPERATION_OFF(reader);
+
+	/* Disable interrupts */
+	SET_UNIPERIF_ITM_BCLR(reader, GET_UNIPERIF_ITM(reader));
+
+	/* Update state to stopped and return */
+	reader->state = UNIPERIF_STATE_STOPPED;
+
+	return 0;
+}
+
+static int  uni_reader_trigger(struct snd_pcm_substream *substream,
+			       int cmd, struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		return  uni_reader_start(reader);
+	case SNDRV_PCM_TRIGGER_STOP:
+		return  uni_reader_stop(reader);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void uni_reader_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		/* Stop the reader */
+		uni_reader_stop(reader);
+	}
+}
+
+static int uni_reader_parse_dt(struct platform_device *pdev,
+			       struct uniperif *reader)
+{
+	struct uniperif_info *info;
+	struct device_node *node = pdev->dev.of_node;
+
+	/* Allocate memory for the info structure */
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	of_property_read_u32(node, "version", &reader->ver);
+
+	/* Save the info structure */
+	reader->info = info;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops uni_reader_dai_ops = {
+		.shutdown = uni_reader_shutdown,
+		.prepare = uni_reader_prepare,
+		.trigger = uni_reader_trigger,
+		.hw_params = sti_uniperiph_dai_hw_params,
+		.set_fmt = sti_uniperiph_dai_set_fmt,
+};
+
+int uni_reader_init(struct platform_device *pdev,
+		    struct uniperif *reader)
+{
+	int ret = 0;
+
+	reader->dev = &pdev->dev;
+	reader->state = UNIPERIF_STATE_STOPPED;
+	reader->hw = &uni_reader_pcm_hw;
+	reader->dai_ops = &uni_reader_dai_ops;
+
+	dev_err(reader->dev, "%s: enter\n", __func__);
+	ret = uni_reader_parse_dt(pdev, reader);
+	if (ret < 0) {
+		dev_err(reader->dev, "Failed to parse DeviceTree");
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, reader->irq,
+			       uni_reader_irq_handler, IRQF_SHARED,
+			       dev_name(&pdev->dev), reader);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to request IRQ");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_reader_init);
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
index f52600b..89add13 100644
--- a/sound/soc/tegra/tegra20_das.c
+++ b/sound/soc/tegra/tegra20_das.c
@@ -133,7 +133,7 @@
 
 static int tegra20_das_probe(struct platform_device *pdev)
 {
-	struct resource *res, *region;
+	struct resource *res;
 	void __iomem *regs;
 	int ret = 0;
 
@@ -149,24 +149,9 @@
 	das->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err;
-	}
-
-	region = devm_request_mem_region(&pdev->dev, res->start,
-					 resource_size(res), pdev->name);
-	if (!region) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err;
-	}
-
-	regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err;
 	}
 
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 05f1c6e..14106fa 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -339,7 +339,7 @@
 static int tegra20_i2s_platform_probe(struct platform_device *pdev)
 {
 	struct tegra20_i2s *i2s;
-	struct resource *mem, *memregion;
+	struct resource *mem;
 	void __iomem *regs;
 	int ret;
 
@@ -362,24 +362,9 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err_clk_put;
 	}
 
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 9141477..a0c3640 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -265,7 +265,7 @@
 static int tegra20_spdif_platform_probe(struct platform_device *pdev)
 {
 	struct tegra20_spdif *spdif;
-	struct resource *mem, *memregion, *dmareq;
+	struct resource *mem, *dmareq;
 	void __iomem *regs;
 	int ret;
 
@@ -273,45 +273,26 @@
 			     GFP_KERNEL);
 	if (!spdif) {
 		dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 	dev_set_drvdata(&pdev->dev, spdif);
 
-	spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out");
+	spdif->clk_spdif_out = devm_clk_get(&pdev->dev, "spdif_out");
 	if (IS_ERR(spdif->clk_spdif_out)) {
 		pr_err("Can't retrieve spdif clock\n");
 		ret = PTR_ERR(spdif->clk_spdif_out);
-		goto err;
+		return ret;
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	if (!dmareq) {
 		dev_err(&pdev->dev, "No DMA resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put;
+		return -ENODEV;
 	}
 
 	spdif->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
@@ -319,7 +300,7 @@
 	if (IS_ERR(spdif->regmap)) {
 		dev_err(&pdev->dev, "regmap init failed\n");
 		ret = PTR_ERR(spdif->regmap);
-		goto err_clk_put;
+		return ret;
 	}
 
 	spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
@@ -335,7 +316,7 @@
 	}
 
 	ret = snd_soc_register_component(&pdev->dev, &tegra20_spdif_component,
-				   &tegra20_spdif_dai, 1);
+					 &tegra20_spdif_dai, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
 		ret = -ENOMEM;
@@ -357,16 +338,12 @@
 		tegra20_spdif_runtime_suspend(&pdev->dev);
 err_pm_disable:
 	pm_runtime_disable(&pdev->dev);
-err_clk_put:
-	clk_put(spdif->clk_spdif_out);
-err:
+
 	return ret;
 }
 
 static int tegra20_spdif_platform_remove(struct platform_device *pdev)
 {
-	struct tegra20_spdif *spdif = dev_get_drvdata(&pdev->dev);
-
 	pm_runtime_disable(&pdev->dev);
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		tegra20_spdif_runtime_suspend(&pdev->dev);
@@ -374,8 +351,6 @@
 	tegra_pcm_platform_unregister(&pdev->dev);
 	snd_soc_unregister_component(&pdev->dev);
 
-	clk_put(spdif->clk_spdif_out);
-
 	return 0;
 }
 
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index bc94e5d..fef3b9a 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -521,7 +521,7 @@
 	const struct tegra30_ahub_soc_data *soc_data;
 	struct reset_control *rst;
 	int i;
-	struct resource *res0, *res1, *region;
+	struct resource *res0, *res1;
 	void __iomem *regs_apbif, *regs_ahub;
 	int ret = 0;
 
@@ -549,103 +549,67 @@
 			dev_err(&pdev->dev, "Can't get reset %s\n",
 				configlink_mods[i].rst_name);
 			ret = PTR_ERR(rst);
-			goto err;
+			return ret;
 		}
 
 		ret = reset_control_deassert(rst);
 		reset_control_put(rst);
 		if (ret)
-			goto err;
+			return ret;
 	}
 
 	ahub = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_ahub),
 			    GFP_KERNEL);
 	if (!ahub) {
 		dev_err(&pdev->dev, "Can't allocate tegra30_ahub\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 	dev_set_drvdata(&pdev->dev, ahub);
 
 	ahub->soc_data = soc_data;
 	ahub->dev = &pdev->dev;
 
-	ahub->clk_d_audio = clk_get(&pdev->dev, "d_audio");
+	ahub->clk_d_audio = devm_clk_get(&pdev->dev, "d_audio");
 	if (IS_ERR(ahub->clk_d_audio)) {
 		dev_err(&pdev->dev, "Can't retrieve ahub d_audio clock\n");
 		ret = PTR_ERR(ahub->clk_d_audio);
-		goto err;
+		return ret;
 	}
 
-	ahub->clk_apbif = clk_get(&pdev->dev, "apbif");
+	ahub->clk_apbif = devm_clk_get(&pdev->dev, "apbif");
 	if (IS_ERR(ahub->clk_apbif)) {
 		dev_err(&pdev->dev, "Can't retrieve ahub apbif clock\n");
 		ret = PTR_ERR(ahub->clk_apbif);
-		goto err_clk_put_d_audio;
+		return ret;
 	}
 
 	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res0) {
-		dev_err(&pdev->dev, "No apbif memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put_apbif;
-	}
+	regs_apbif = devm_ioremap_resource(&pdev->dev, res0);
+	if (IS_ERR(regs_apbif))
+		return PTR_ERR(regs_apbif);
 
-	region = devm_request_mem_region(&pdev->dev, res0->start,
-					 resource_size(res0), DRV_NAME);
-	if (!region) {
-		dev_err(&pdev->dev, "request region apbif failed\n");
-		ret = -EBUSY;
-		goto err_clk_put_apbif;
-	}
 	ahub->apbif_addr = res0->start;
 
-	regs_apbif = devm_ioremap(&pdev->dev, res0->start,
-				  resource_size(res0));
-	if (!regs_apbif) {
-		dev_err(&pdev->dev, "ioremap apbif failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put_apbif;
-	}
-
 	ahub->regmap_apbif = devm_regmap_init_mmio(&pdev->dev, regs_apbif,
 					&tegra30_ahub_apbif_regmap_config);
 	if (IS_ERR(ahub->regmap_apbif)) {
 		dev_err(&pdev->dev, "apbif regmap init failed\n");
 		ret = PTR_ERR(ahub->regmap_apbif);
-		goto err_clk_put_apbif;
+		return ret;
 	}
 	regcache_cache_only(ahub->regmap_apbif, true);
 
 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!res1) {
-		dev_err(&pdev->dev, "No ahub memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put_apbif;
-	}
-
-	region = devm_request_mem_region(&pdev->dev, res1->start,
-					 resource_size(res1), DRV_NAME);
-	if (!region) {
-		dev_err(&pdev->dev, "request region ahub failed\n");
-		ret = -EBUSY;
-		goto err_clk_put_apbif;
-	}
-
-	regs_ahub = devm_ioremap(&pdev->dev, res1->start,
-				 resource_size(res1));
-	if (!regs_ahub) {
-		dev_err(&pdev->dev, "ioremap ahub failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put_apbif;
-	}
+	regs_ahub = devm_ioremap_resource(&pdev->dev, res1);
+	if (IS_ERR(regs_ahub))
+		return PTR_ERR(regs_ahub);
 
 	ahub->regmap_ahub = devm_regmap_init_mmio(&pdev->dev, regs_ahub,
 					&tegra30_ahub_ahub_regmap_config);
 	if (IS_ERR(ahub->regmap_ahub)) {
 		dev_err(&pdev->dev, "ahub regmap init failed\n");
 		ret = PTR_ERR(ahub->regmap_ahub);
-		goto err_clk_put_apbif;
+		return ret;
 	}
 	regcache_cache_only(ahub->regmap_ahub, true);
 
@@ -662,12 +626,7 @@
 
 err_pm_disable:
 	pm_runtime_disable(&pdev->dev);
-err_clk_put_apbif:
-	clk_put(ahub->clk_apbif);
-err_clk_put_d_audio:
-	clk_put(ahub->clk_d_audio);
-	ahub = NULL;
-err:
+
 	return ret;
 }
 
@@ -680,11 +639,6 @@
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		tegra30_ahub_runtime_suspend(&pdev->dev);
 
-	clk_put(ahub->clk_apbif);
-	clk_put(ahub->clk_d_audio);
-
-	ahub = NULL;
-
 	return 0;
 }
 
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index fe36375..8e55583 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -379,7 +379,7 @@
 	struct tegra30_i2s *i2s;
 	const struct of_device_id *match;
 	u32 cif_ids[2];
-	struct resource *mem, *memregion;
+	struct resource *mem;
 	void __iomem *regs;
 	int ret;
 
@@ -419,24 +419,9 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err_clk_put;
 	}
 
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 88eacfd..a8f705b 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -411,13 +411,8 @@
 
 static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &txx9aclc_soc_platform);
-}
-
-static int txx9aclc_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &txx9aclc_soc_platform);
 }
 
 static struct platform_driver txx9aclc_pcm_driver = {
@@ -426,7 +421,6 @@
 	},
 
 	.probe = txx9aclc_soc_platform_probe,
-	.remove = txx9aclc_soc_platform_remove,
 };
 
 module_platform_driver(txx9aclc_pcm_driver);
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 978f2d7..f5df08d 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -773,20 +773,22 @@
 	}
 	prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)pdev->name, 50);
 
-	drvdata->pclk = clk_get(&pdev->dev, "apb_pclk");
+	drvdata->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
 	if (IS_ERR(drvdata->pclk)) {
 		ret = (int)PTR_ERR(drvdata->pclk);
-		dev_err(&pdev->dev, "%s: ERROR: clk_get of pclk failed (%d)!\n",
+		dev_err(&pdev->dev,
+			"%s: ERROR: devm_clk_get of pclk failed (%d)!\n",
 			__func__, ret);
-		goto err_pclk;
+		return ret;
 	}
 
-	drvdata->clk = clk_get(&pdev->dev, NULL);
+	drvdata->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(drvdata->clk)) {
 		ret = (int)PTR_ERR(drvdata->clk);
-		dev_err(&pdev->dev, "%s: ERROR: clk_get failed (%d)!\n",
+		dev_err(&pdev->dev,
+			"%s: ERROR: devm_clk_get failed (%d)!\n",
 			__func__, ret);
-		goto err_clk;
+		return ret;
 	}
 
 	ret = ux500_msp_i2s_init_msp(pdev, &drvdata->msp,
@@ -795,7 +797,7 @@
 		dev_err(&pdev->dev,
 			"%s: ERROR: Failed to init MSP-struct (%d)!",
 			__func__, ret);
-		goto err_init_msp;
+		return ret;
 	}
 	dev_set_drvdata(&pdev->dev, drvdata);
 
@@ -804,7 +806,7 @@
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Error: %s: Failed to register MSP%d!\n",
 			__func__, drvdata->msp->id);
-		goto err_init_msp;
+		return ret;
 	}
 
 	ret = ux500_pcm_register_platform(pdev);
@@ -819,13 +821,6 @@
 
 err_reg_plat:
 	snd_soc_unregister_component(&pdev->dev);
-err_init_msp:
-	clk_put(drvdata->clk);
-err_clk:
-	clk_put(drvdata->pclk);
-err_pclk:
-	devm_regulator_put(drvdata->reg_vape);
-
 	return ret;
 }
 
@@ -837,12 +832,8 @@
 
 	snd_soc_unregister_component(&pdev->dev);
 
-	devm_regulator_put(drvdata->reg_vape);
 	prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s");
 
-	clk_put(drvdata->clk);
-	clk_put(drvdata->pclk);
-
 	ux500_msp_i2s_cleanup_msp(pdev, drvdata->msp);
 
 	return 0;
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index 1cfb19e..8382ffa 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -75,7 +75,7 @@
 	 * stream in the pcm_close callback it synchronizes with the interrupt
 	 * handler by means of synchronize_rcu call.
 	 */
-	struct snd_pcm_substream *tx_substream;
+	struct snd_pcm_substream __rcu *tx_substream;
 	unsigned (*tx_fn)(struct xtfpga_i2s *i2s,
 			  struct snd_pcm_runtime *runtime,
 			  unsigned tx_ptr);
@@ -474,11 +474,6 @@
 						     card->dev, size, size);
 }
 
-static void xtfpga_pcm_free(struct snd_pcm *pcm)
-{
-	snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
 static const struct snd_pcm_ops xtfpga_pcm_ops = {
 	.open		= xtfpga_pcm_open,
 	.close		= xtfpga_pcm_close,
@@ -490,7 +485,6 @@
 
 static const struct snd_soc_platform_driver xtfpga_soc_platform = {
 	.pcm_new	= xtfpga_pcm_new,
-	.pcm_free	= xtfpga_pcm_free,
 	.ops		= &xtfpga_pcm_ops,
 };
 
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 1930c42..1cad93d 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -380,7 +380,7 @@
 	struct zx_i2s_info *zx_i2s;
 	int ret;
 
-	zx_i2s =  kzalloc(sizeof(*zx_i2s), GFP_KERNEL);
+	zx_i2s = devm_kzalloc(&pdev->dev, sizeof(*zx_i2s), GFP_KERNEL);
 	if (!zx_i2s)
 		return -ENOMEM;
 
@@ -401,8 +401,8 @@
 	writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
 	platform_set_drvdata(pdev, zx_i2s);
 
-	ret = snd_soc_register_component(&pdev->dev, &zx_i2s_component,
-					 &zx_i2s_dai, 1);
+	ret = devm_snd_soc_register_component(&pdev->dev, &zx_i2s_component,
+					      &zx_i2s_dai, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "Register DAI failed: %d\n", ret);
 		return ret;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 0450593..18f5664 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -365,13 +365,15 @@
 	}
 
 	mutex_init(&chip->mutex);
-	init_rwsem(&chip->shutdown_rwsem);
+	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
 	chip->card = card;
 	chip->setup = device_setup[idx];
 	chip->autoclock = autoclock;
-	chip->probing = 1;
+	atomic_set(&chip->active, 1); /* avoid autopm during probing */
+	atomic_set(&chip->usage_count, 0);
+	atomic_set(&chip->shutdown, 0);
 
 	chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
 			      le16_to_cpu(dev->descriptor.idProduct));
@@ -495,13 +497,13 @@
 	mutex_lock(&register_mutex);
 	for (i = 0; i < SNDRV_CARDS; i++) {
 		if (usb_chip[i] && usb_chip[i]->dev == dev) {
-			if (usb_chip[i]->shutdown) {
+			if (atomic_read(&usb_chip[i]->shutdown)) {
 				dev_err(&dev->dev, "USB device is in the shutdown state, cannot create a card instance\n");
 				err = -EIO;
 				goto __error;
 			}
 			chip = usb_chip[i];
-			chip->probing = 1;
+			atomic_inc(&chip->active); /* avoid autopm */
 			break;
 		}
 	}
@@ -561,8 +563,8 @@
 
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
-	chip->probing = 0;
 	usb_set_intfdata(intf, chip);
+	atomic_dec(&chip->active);
 	mutex_unlock(&register_mutex);
 	return 0;
 
@@ -570,7 +572,7 @@
 	if (chip) {
 		if (!chip->num_interfaces)
 			snd_card_free(chip->card);
-		chip->probing = 0;
+		atomic_dec(&chip->active);
 	}
 	mutex_unlock(&register_mutex);
 	return err;
@@ -585,23 +587,23 @@
 	struct snd_usb_audio *chip = usb_get_intfdata(intf);
 	struct snd_card *card;
 	struct list_head *p;
-	bool was_shutdown;
 
 	if (chip == (void *)-1L)
 		return;
 
 	card = chip->card;
-	down_write(&chip->shutdown_rwsem);
-	was_shutdown = chip->shutdown;
-	chip->shutdown = 1;
-	up_write(&chip->shutdown_rwsem);
 
 	mutex_lock(&register_mutex);
-	if (!was_shutdown) {
+	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
 		struct snd_usb_endpoint *ep;
 		struct usb_mixer_interface *mixer;
 
+		/* wait until all pending tasks done;
+		 * they are protected by snd_usb_lock_shutdown()
+		 */
+		wait_event(chip->shutdown_wait,
+			   !atomic_read(&chip->usage_count));
 		snd_card_disconnect(card);
 		/* release the pcm resources */
 		list_for_each_entry(as, &chip->pcm_list, list) {
@@ -631,28 +633,50 @@
 	}
 }
 
+/* lock the shutdown (disconnect) task and autoresume */
+int snd_usb_lock_shutdown(struct snd_usb_audio *chip)
+{
+	int err;
+
+	atomic_inc(&chip->usage_count);
+	if (atomic_read(&chip->shutdown)) {
+		err = -EIO;
+		goto error;
+	}
+	err = snd_usb_autoresume(chip);
+	if (err < 0)
+		goto error;
+	return 0;
+
+ error:
+	if (atomic_dec_and_test(&chip->usage_count))
+		wake_up(&chip->shutdown_wait);
+	return err;
+}
+
+/* autosuspend and unlock the shutdown */
+void snd_usb_unlock_shutdown(struct snd_usb_audio *chip)
+{
+	snd_usb_autosuspend(chip);
+	if (atomic_dec_and_test(&chip->usage_count))
+		wake_up(&chip->shutdown_wait);
+}
+
 #ifdef CONFIG_PM
 
 int snd_usb_autoresume(struct snd_usb_audio *chip)
 {
-	int err = -ENODEV;
-
-	down_read(&chip->shutdown_rwsem);
-	if (chip->probing || chip->in_pm)
-		err = 0;
-	else if (!chip->shutdown)
-		err = usb_autopm_get_interface(chip->pm_intf);
-	up_read(&chip->shutdown_rwsem);
-
-	return err;
+	if (atomic_read(&chip->shutdown))
+		return -EIO;
+	if (atomic_inc_return(&chip->active) == 1)
+		return usb_autopm_get_interface(chip->pm_intf);
+	return 0;
 }
 
 void snd_usb_autosuspend(struct snd_usb_audio *chip)
 {
-	down_read(&chip->shutdown_rwsem);
-	if (!chip->shutdown && !chip->probing && !chip->in_pm)
+	if (atomic_dec_and_test(&chip->active))
 		usb_autopm_put_interface(chip->pm_intf);
-	up_read(&chip->shutdown_rwsem);
 }
 
 static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
@@ -665,30 +689,20 @@
 	if (chip == (void *)-1L)
 		return 0;
 
-	if (!PMSG_IS_AUTO(message)) {
+	chip->autosuspended = !!PMSG_IS_AUTO(message);
+	if (!chip->autosuspended)
 		snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
-		if (!chip->num_suspended_intf++) {
-			list_for_each_entry(as, &chip->pcm_list, list) {
-				snd_pcm_suspend_all(as->pcm);
-				as->substream[0].need_setup_ep =
-					as->substream[1].need_setup_ep = true;
-			}
-			list_for_each(p, &chip->midi_list) {
-				snd_usbmidi_suspend(p);
-			}
+	if (!chip->num_suspended_intf++) {
+		list_for_each_entry(as, &chip->pcm_list, list) {
+			snd_pcm_suspend_all(as->pcm);
+			as->substream[0].need_setup_ep =
+				as->substream[1].need_setup_ep = true;
 		}
-	} else {
-		/*
-		 * otherwise we keep the rest of the system in the dark
-		 * to keep this transparent
-		 */
-		if (!chip->num_suspended_intf++)
-			chip->autosuspended = 1;
-	}
-
-	if (chip->num_suspended_intf == 1)
+		list_for_each(p, &chip->midi_list)
+			snd_usbmidi_suspend(p);
 		list_for_each_entry(mixer, &chip->mixer_list, list)
 			snd_usb_mixer_suspend(mixer);
+	}
 
 	return 0;
 }
@@ -705,7 +719,7 @@
 	if (--chip->num_suspended_intf)
 		return 0;
 
-	chip->in_pm = 1;
+	atomic_inc(&chip->active); /* avoid autopm */
 	/*
 	 * ALSA leaves material resumption to user space
 	 * we just notify and restart the mixers
@@ -725,7 +739,7 @@
 	chip->autosuspended = 0;
 
 err_out:
-	chip->in_pm = 0;
+	atomic_dec(&chip->active); /* allow autopm after this point */
 	return err;
 }
 
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 03b0744..e6f7189 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -355,8 +355,10 @@
 	if (unlikely(urb->status == -ENOENT ||		/* unlinked */
 		     urb->status == -ENODEV ||		/* device removed */
 		     urb->status == -ECONNRESET ||	/* unlinked */
-		     urb->status == -ESHUTDOWN ||	/* device disabled */
-		     ep->chip->shutdown))		/* device disconnected */
+		     urb->status == -ESHUTDOWN))	/* device disabled */
+		goto exit_clear;
+	/* device disconnected */
+	if (unlikely(atomic_read(&ep->chip->shutdown)))
 		goto exit_clear;
 
 	if (usb_pipeout(ep->pipe)) {
@@ -529,7 +531,7 @@
 {
 	unsigned int i;
 
-	if (!force && ep->chip->shutdown) /* to be sure... */
+	if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */
 		return -EBADFD;
 
 	clear_bit(EP_FLAG_RUNNING, &ep->flags);
@@ -868,7 +870,7 @@
 	int err;
 	unsigned int i;
 
-	if (ep->chip->shutdown)
+	if (atomic_read(&ep->chip->shutdown))
 		return -EBADFD;
 
 	/* already running? */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 6b3acba..f494dce 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -282,6 +282,21 @@
 	return val;
 }
 
+static int uac2_ctl_value_size(int val_type)
+{
+	switch (val_type) {
+	case USB_MIXER_S32:
+	case USB_MIXER_U32:
+		return 4;
+	case USB_MIXER_S16:
+	case USB_MIXER_U16:
+		return 2;
+	default:
+		return 1;
+	}
+	return 0; /* unreachable */
+}
+
 
 /*
  * retrieve a mixer value
@@ -296,14 +311,11 @@
 	int timeout = 10;
 	int idx = 0, err;
 
-	err = snd_usb_autoresume(chip);
+	err = snd_usb_lock_shutdown(chip);
 	if (err < 0)
 		return -EIO;
 
-	down_read(&chip->shutdown_rwsem);
 	while (timeout-- > 0) {
-		if (chip->shutdown)
-			break;
 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
 		if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
 				    USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
@@ -319,8 +331,7 @@
 	err = -EINVAL;
 
  out:
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -328,14 +339,14 @@
 			    int validx, int *value_ret)
 {
 	struct snd_usb_audio *chip = cval->head.mixer->chip;
-	unsigned char buf[2 + 3 * sizeof(__u16)]; /* enough space for one range */
+	unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
 	unsigned char *val;
 	int idx = 0, ret, size;
 	__u8 bRequest;
 
 	if (request == UAC_GET_CUR) {
 		bRequest = UAC2_CS_CUR;
-		size = sizeof(__u16);
+		size = uac2_ctl_value_size(cval->val_type);
 	} else {
 		bRequest = UAC2_CS_RANGE;
 		size = sizeof(buf);
@@ -343,21 +354,15 @@
 
 	memset(buf, 0, sizeof(buf));
 
-	ret = snd_usb_autoresume(chip) ? -EIO : 0;
+	ret = snd_usb_lock_shutdown(chip) ? -EIO : 0;
 	if (ret)
 		goto error;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		ret = -ENODEV;
-	} else {
-		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
-		ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
+	idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+	ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
 			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
 			      validx, idx, buf, size);
-	}
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 
 	if (ret < 0) {
 error:
@@ -446,7 +451,7 @@
 				int request, int validx, int value_set)
 {
 	struct snd_usb_audio *chip = cval->head.mixer->chip;
-	unsigned char buf[2];
+	unsigned char buf[4];
 	int idx = 0, val_len, err, timeout = 10;
 
 	validx += cval->idx_off;
@@ -454,8 +459,7 @@
 	if (cval->head.mixer->protocol == UAC_VERSION_1) {
 		val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
 	} else { /* UAC_VERSION_2 */
-		/* audio class v2 controls are always 2 bytes in size */
-		val_len = sizeof(__u16);
+		val_len = uac2_ctl_value_size(cval->val_type);
 
 		/* FIXME */
 		if (request != UAC_SET_CUR) {
@@ -469,13 +473,14 @@
 	value_set = convert_bytes_value(cval, value_set);
 	buf[0] = value_set & 0xff;
 	buf[1] = (value_set >> 8) & 0xff;
-	err = snd_usb_autoresume(chip);
+	buf[2] = (value_set >> 16) & 0xff;
+	buf[3] = (value_set >> 24) & 0xff;
+
+	err = snd_usb_lock_shutdown(chip);
 	if (err < 0)
 		return -EIO;
-	down_read(&chip->shutdown_rwsem);
+
 	while (timeout-- > 0) {
-		if (chip->shutdown)
-			break;
 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
 		if (snd_usb_ctl_msg(chip->dev,
 				    usb_sndctrlpipe(chip->dev, 0), request,
@@ -490,8 +495,7 @@
 	err = -EINVAL;
 
  out:
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -715,15 +719,21 @@
 				term->name = d->iTerminal;
 			} else { /* UAC_VERSION_2 */
 				struct uac2_input_terminal_descriptor *d = p1;
+
+				/* call recursively to verify that the
+				 * referenced clock entity is valid */
+				err = check_input_term(state, d->bCSourceID, term);
+				if (err < 0)
+					return err;
+
+				/* save input term properties after recursion,
+				 * to ensure they are not overriden by the
+				 * recursion calls */
+				term->id = id;
 				term->type = le16_to_cpu(d->wTerminalType);
 				term->channels = d->bNrChannels;
 				term->chconfig = le32_to_cpu(d->bmChannelConfig);
 				term->name = d->iTerminal;
-
-				/* call recursively to get the clock selectors */
-				err = check_input_term(state, d->bCSourceID, term);
-				if (err < 0)
-					return err;
 			}
 			return 0;
 		case UAC_FEATURE_UNIT: {
@@ -798,24 +808,25 @@
 /* feature unit control information */
 struct usb_feature_control_info {
 	const char *name;
-	unsigned int type;	/* control type (mute, volume, etc.) */
+	int type;	/* data type for uac1 */
+	int type_uac2;	/* data type for uac2 if different from uac1, else -1 */
 };
 
 static struct usb_feature_control_info audio_feature_info[] = {
-	{ "Mute",			USB_MIXER_INV_BOOLEAN },
-	{ "Volume",			USB_MIXER_S16 },
-	{ "Tone Control - Bass",	USB_MIXER_S8 },
-	{ "Tone Control - Mid",		USB_MIXER_S8 },
-	{ "Tone Control - Treble",	USB_MIXER_S8 },
-	{ "Graphic Equalizer",		USB_MIXER_S8 }, /* FIXME: not implemeted yet */
-	{ "Auto Gain Control",		USB_MIXER_BOOLEAN },
-	{ "Delay Control",		USB_MIXER_U16 }, /* FIXME: U32 in UAC2 */
-	{ "Bass Boost",			USB_MIXER_BOOLEAN },
-	{ "Loudness",			USB_MIXER_BOOLEAN },
+	{ "Mute",			USB_MIXER_INV_BOOLEAN, -1 },
+	{ "Volume",			USB_MIXER_S16, -1 },
+	{ "Tone Control - Bass",	USB_MIXER_S8, -1 },
+	{ "Tone Control - Mid",		USB_MIXER_S8, -1 },
+	{ "Tone Control - Treble",	USB_MIXER_S8, -1 },
+	{ "Graphic Equalizer",		USB_MIXER_S8, -1 }, /* FIXME: not implemeted yet */
+	{ "Auto Gain Control",		USB_MIXER_BOOLEAN, -1 },
+	{ "Delay Control",		USB_MIXER_U16, USB_MIXER_U32 },
+	{ "Bass Boost",			USB_MIXER_BOOLEAN, -1 },
+	{ "Loudness",			USB_MIXER_BOOLEAN, -1 },
 	/* UAC2 specific */
-	{ "Input Gain Control",		USB_MIXER_S16 },
-	{ "Input Gain Pad Control",	USB_MIXER_S16 },
-	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN },
+	{ "Input Gain Control",		USB_MIXER_S16, -1 },
+	{ "Input Gain Pad Control",	USB_MIXER_S16, -1 },
+	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN, -1 },
 };
 
 /* private_free callback */
@@ -1215,6 +1226,7 @@
 			      int readonly_mask)
 {
 	struct uac_feature_unit_descriptor *desc = raw_desc;
+	struct usb_feature_control_info *ctl_info;
 	unsigned int len = 0;
 	int mapped_name = 0;
 	int nameid = uac_feature_unit_iFeature(desc);
@@ -1240,7 +1252,13 @@
 	snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
 	cval->control = control;
 	cval->cmask = ctl_mask;
-	cval->val_type = audio_feature_info[control-1].type;
+	ctl_info = &audio_feature_info[control-1];
+	if (state->mixer->protocol == UAC_VERSION_1)
+		cval->val_type = ctl_info->type;
+	else /* UAC_VERSION_2 */
+		cval->val_type = ctl_info->type_uac2 >= 0 ?
+			ctl_info->type_uac2 : ctl_info->type;
+
 	if (ctl_mask == 0) {
 		cval->channels = 1;	/* master channel */
 		cval->master_readonly = readonly_mask;
@@ -2522,7 +2540,7 @@
 		for (c = 0; c < MAX_CHANNELS; c++) {
 			if (!(cval->cmask & (1 << c)))
 				continue;
-			if (cval->cached & (1 << c)) {
+			if (cval->cached & (1 << (c + 1))) {
 				err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
 							cval->cache_val[idx]);
 				if (err < 0)
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index d3268f0..3417ef3 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -33,6 +33,8 @@
 	USB_MIXER_U8,
 	USB_MIXER_S16,
 	USB_MIXER_U16,
+	USB_MIXER_S32,
+	USB_MIXER_U32,
 };
 
 typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 337c317..d3608c0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -308,11 +308,10 @@
 	struct snd_usb_audio *chip = mixer->chip;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto out;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+
 	if (chip->usb_id == USB_ID(0x041e, 0x3042))
 		err = snd_usb_ctl_msg(chip->dev,
 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
@@ -329,8 +328,7 @@
 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      value, index + 2, NULL, 0);
- out:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -441,16 +439,15 @@
 
 	for (i = 0; jacks[i].name; ++i) {
 		snd_iprintf(buffer, "%s: ", jacks[i].name);
-		down_read(&mixer->chip->shutdown_rwsem);
-		if (mixer->chip->shutdown)
-			err = 0;
-		else
-			err = snd_usb_ctl_msg(mixer->chip->dev,
+		err = snd_usb_lock_shutdown(mixer->chip);
+		if (err < 0)
+			return;
+		err = snd_usb_ctl_msg(mixer->chip->dev,
 				      usb_rcvctrlpipe(mixer->chip->dev, 0),
 				      UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
 				      USB_RECIP_INTERFACE, 0,
 				      jacks[i].unitid << 8, buf, 3);
-		up_read(&mixer->chip->shutdown_rwsem);
+		snd_usb_unlock_shutdown(mixer->chip);
 		if (err == 3 && (buf[0] == 3 || buf[0] == 6))
 			snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
 		else
@@ -481,11 +478,9 @@
 	int err;
 	unsigned char buf[2];
 
-	down_read(&chip->shutdown_rwsem);
-	if (mixer->chip->shutdown) {
-		err = -ENODEV;
-		goto out;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	buf[0] = 0x01;
 	buf[1] = value ? 0x02 : 0x01;
@@ -493,8 +488,7 @@
 		      usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
 		      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
 		      0x0400, 0x0e00, buf, 2);
- out:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -554,15 +548,14 @@
 	struct snd_usb_audio *chip = mixer->chip;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = snd_usb_ctl_msg(chip->dev,
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = snd_usb_ctl_msg(chip->dev,
 			      usb_sndctrlpipe(chip->dev, 0), 0x08,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      50, 0, &status, 1);
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -623,11 +616,9 @@
 	int err;
 	unsigned char buff[3];
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto err;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	/* Prepare for magic command to toggle clock source */
 	err = snd_usb_ctl_msg(chip->dev,
@@ -683,7 +674,7 @@
 		goto err;
 
 err:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -778,15 +769,14 @@
 	unsigned int pval = list->kctl->private_value;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
-				      (pval >> 16) & 0xff,
-				      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
-				      pval >> 24, pval & 0xffff, NULL, 0, 1000);
-	up_read(&chip->shutdown_rwsem);
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
+			      (pval >> 16) & 0xff,
+			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+			      pval >> 24, pval & 0xffff, NULL, 0, 1000);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -944,18 +934,17 @@
 	value[0] = pval >> 24;
 	value[1] = 0;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = snd_usb_ctl_msg(chip->dev,
-				      usb_sndctrlpipe(chip->dev, 0),
-				      UAC_SET_CUR,
-				      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
-				      pval & 0xff00,
-				      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
-				      value, 2);
-	up_read(&chip->shutdown_rwsem);
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = snd_usb_ctl_msg(chip->dev,
+			      usb_sndctrlpipe(chip->dev, 0),
+			      UAC_SET_CUR,
+			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+			      pval & 0xff00,
+			      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
+			      value, 2);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1519,11 +1508,9 @@
 	unsigned char data[3];
 	int rate;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	ucontrol->value.iec958.status[0] = kcontrol->private_value & 0xff;
 	ucontrol->value.iec958.status[1] = (kcontrol->private_value >> 8) & 0xff;
@@ -1551,7 +1538,7 @@
 
 	err = 0;
  end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1562,11 +1549,9 @@
 	u8 reg;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	reg = ((pval >> 4) & 0xf0) | (pval & 0x0f);
 	err = snd_usb_ctl_msg(chip->dev,
@@ -1594,7 +1579,7 @@
 		goto end;
 
  end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1650,11 +1635,9 @@
 	u8 reg = list->kctl->private_value;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	err = snd_usb_ctl_msg(chip->dev,
 			usb_sndctrlpipe(chip->dev, 0),
@@ -1665,8 +1648,7 @@
 			NULL,
 			0);
 
- end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index b4ef410..cdac517 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -80,7 +80,7 @@
 	unsigned int hwptr_done;
 
 	subs = (struct snd_usb_substream *)substream->runtime->private_data;
-	if (subs->stream->chip->shutdown)
+	if (atomic_read(&subs->stream->chip->shutdown))
 		return SNDRV_PCM_POS_XRUN;
 	spin_lock(&subs->lock);
 	hwptr_done = subs->hwptr_done;
@@ -391,6 +391,20 @@
 	 */
 	attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
 
+	if ((is_playback && (attr != USB_ENDPOINT_SYNC_ASYNC)) ||
+		(!is_playback && (attr != USB_ENDPOINT_SYNC_ADAPTIVE))) {
+
+		/*
+		 * In these modes the notion of sync_endpoint is irrelevant.
+		 * Reset pointers to avoid using stale data from previously
+		 * used settings, e.g. when configuration and endpoints were
+		 * changed
+		 */
+
+		subs->sync_endpoint = NULL;
+		subs->data_endpoint->sync_master = NULL;
+	}
+
 	err = set_sync_ep_implicit_fb_quirk(subs, dev, altsd, attr);
 	if (err < 0)
 		return err;
@@ -398,10 +412,17 @@
 	if (altsd->bNumEndpoints < 2)
 		return 0;
 
-	if ((is_playback && attr != USB_ENDPOINT_SYNC_ASYNC) ||
+	if ((is_playback && (attr == USB_ENDPOINT_SYNC_SYNC ||
+			     attr == USB_ENDPOINT_SYNC_ADAPTIVE)) ||
 	    (!is_playback && attr != USB_ENDPOINT_SYNC_ADAPTIVE))
 		return 0;
 
+	/*
+	 * In case of illegal SYNC_NONE for OUT endpoint, we keep going to see
+	 * if we don't find a sync endpoint, as on M-Audio Transit. In case of
+	 * error fall back to SYNC mode and don't create sync endpoint
+	 */
+
 	/* check sync-pipe endpoint */
 	/* ... and check descriptor size before accessing bSynchAddress
 	   because there is a version of the SB Audigy 2 NX firmware lacking
@@ -415,6 +436,8 @@
 			   get_endpoint(alts, 1)->bmAttributes,
 			   get_endpoint(alts, 1)->bLength,
 			   get_endpoint(alts, 1)->bSynchAddress);
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
 	}
 	ep = get_endpoint(alts, 1)->bEndpointAddress;
@@ -425,6 +448,8 @@
 			"%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
 			   fmt->iface, fmt->altsetting,
 			   is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
 	}
 
@@ -436,8 +461,11 @@
 						   implicit_fb ?
 							SND_USB_ENDPOINT_TYPE_DATA :
 							SND_USB_ENDPOINT_TYPE_SYNC);
-	if (!subs->sync_endpoint)
+	if (!subs->sync_endpoint) {
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
+	}
 
 	subs->data_endpoint->sync_master = subs->sync_endpoint;
 
@@ -707,12 +735,11 @@
 		return -EINVAL;
 	}
 
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (subs->stream->chip->shutdown)
-		ret = -ENODEV;
-	else
-		ret = set_format(subs, fmt);
-	up_read(&subs->stream->chip->shutdown_rwsem);
+	ret = snd_usb_lock_shutdown(subs->stream->chip);
+	if (ret < 0)
+		return ret;
+	ret = set_format(subs, fmt);
+	snd_usb_unlock_shutdown(subs->stream->chip);
 	if (ret < 0)
 		return ret;
 
@@ -735,13 +762,12 @@
 	subs->cur_audiofmt = NULL;
 	subs->cur_rate = 0;
 	subs->period_bytes = 0;
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (!subs->stream->chip->shutdown) {
+	if (!snd_usb_lock_shutdown(subs->stream->chip)) {
 		stop_endpoints(subs, true);
 		snd_usb_endpoint_deactivate(subs->sync_endpoint);
 		snd_usb_endpoint_deactivate(subs->data_endpoint);
+		snd_usb_unlock_shutdown(subs->stream->chip);
 	}
-	up_read(&subs->stream->chip->shutdown_rwsem);
 	return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
 
@@ -763,11 +789,9 @@
 		return -ENXIO;
 	}
 
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (subs->stream->chip->shutdown) {
-		ret = -ENODEV;
-		goto unlock;
-	}
+	ret = snd_usb_lock_shutdown(subs->stream->chip);
+	if (ret < 0)
+		return ret;
 	if (snd_BUG_ON(!subs->data_endpoint)) {
 		ret = -EIO;
 		goto unlock;
@@ -816,7 +840,7 @@
 		ret = start_endpoints(subs, true);
 
  unlock:
-	up_read(&subs->stream->chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(subs->stream->chip);
 	return ret;
 }
 
@@ -1218,9 +1242,11 @@
 
 	stop_endpoints(subs, true);
 
-	if (!as->chip->shutdown && subs->interface >= 0) {
+	if (subs->interface >= 0 &&
+	    !snd_usb_lock_shutdown(subs->stream->chip)) {
 		usb_set_interface(subs->dev, subs->interface, 0);
 		subs->interface = -1;
+		snd_usb_unlock_shutdown(subs->stream->chip);
 	}
 
 	subs->pcm_substream = NULL;
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 5f761ab..0ac89e2 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -46,14 +46,14 @@
 static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
 {
 	struct snd_usb_audio *chip = entry->private_data;
-	if (!chip->shutdown)
+	if (!atomic_read(&chip->shutdown))
 		snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum);
 }
 
 static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
 {
 	struct snd_usb_audio *chip = entry->private_data;
-	if (!chip->shutdown)
+	if (!atomic_read(&chip->shutdown))
 		snd_iprintf(buffer, "%04x:%04x\n", 
 			    USB_ID_VENDOR(chip->usb_id),
 			    USB_ID_PRODUCT(chip->usb_id));
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 91d0380..33a1764 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -37,11 +37,11 @@
 	struct usb_interface *pm_intf;
 	u32 usb_id;
 	struct mutex mutex;
-	struct rw_semaphore shutdown_rwsem;
-	unsigned int shutdown:1;
-	unsigned int probing:1;
-	unsigned int in_pm:1;
 	unsigned int autosuspended:1;	
+	atomic_t active;
+	atomic_t shutdown;
+	atomic_t usage_count;
+	wait_queue_head_t shutdown_wait;
 	unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
 	
 	int num_interfaces;
@@ -115,4 +115,7 @@
 #define combine_triple(s)  (combine_word(s) | ((unsigned int)(s)[2] << 16))
 #define combine_quad(s)    (combine_triple(s) | ((unsigned int)(s)[3] << 24))
 
+int snd_usb_lock_shutdown(struct snd_usb_audio *chip);
+void snd_usb_unlock_shutdown(struct snd_usb_audio *chip);
+
 #endif /* __USBAUDIO_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 4fa4bc4..4252fc2 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -880,15 +880,26 @@
 }
 
 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
-					   size_t obj_buf_sz)
+					   size_t obj_buf_sz,
+					   const char *name)
 {
+	char tmp_name[64];
+
 	/* param validation */
 	if (!obj_buf || obj_buf_sz <= 0)
 		return NULL;
 
-	pr_debug("loading object from buffer\n");
+	if (!name) {
+		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
+			 (unsigned long)obj_buf,
+			 (unsigned long)obj_buf_sz);
+		tmp_name[sizeof(tmp_name) - 1] = '\0';
+		name = tmp_name;
+	}
+	pr_debug("loading object '%s' from buffer\n",
+		 name);
 
-	return __bpf_object__open("[buffer]", obj_buf, obj_buf_sz);
+	return __bpf_object__open(name, obj_buf, obj_buf_sz);
 }
 
 int bpf_object__unload(struct bpf_object *obj)
@@ -975,6 +986,14 @@
 	return next;
 }
 
+const char *
+bpf_object__get_name(struct bpf_object *obj)
+{
+	if (!obj)
+		return NULL;
+	return obj->path;
+}
+
 struct bpf_program *
 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
 {
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index ea8adc2..f16170c 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -28,12 +28,14 @@
 
 struct bpf_object *bpf_object__open(const char *path);
 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
-					   size_t obj_buf_sz);
+					   size_t obj_buf_sz,
+					   const char *name);
 void bpf_object__close(struct bpf_object *object);
 
 /* Load/unload object into/from kernel */
 int bpf_object__load(struct bpf_object *obj);
 int bpf_object__unload(struct bpf_object *obj);
+const char *bpf_object__get_name(struct bpf_object *obj);
 
 struct bpf_object *bpf_object__next(struct bpf_object *prev);
 #define bpf_object__for_each_safe(pos, tmp)			\
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 347a273..2e9ce77 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -276,7 +276,11 @@
 --intr-regs::
 Capture machine state (registers) at interrupt, i.e., on counter overflows for
 each sample. List of captured registers depends on the architecture. This option
-is off by default.
+is off by default. It is possible to select the registers to sample using their
+symbolic names, e.g. on x86, ax, si. To list the available registers use
+--intr-regs=\?. To name registers, pass a comma separated list such as
+--intr-regs=ax,bx. The list of register is architecture dependent.
+
 
 --running-time::
 Record running and enabled time for read events (:S)
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 614b2c7..dc3ec78 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -116,7 +116,7 @@
 --fields::
         Comma separated list of fields to print. Options are:
         comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
-	srcline, period, flags.
+	srcline, period, iregs, flags.
         Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -f sw:comm,tid,time,ip,sym  and -f trace:time,cpu,trace
diff --git a/tools/perf/arch/sh/util/dwarf-regs.c b/tools/perf/arch/sh/util/dwarf-regs.c
index 0d0897f..f8dfa89 100644
--- a/tools/perf/arch/sh/util/dwarf-regs.c
+++ b/tools/perf/arch/sh/util/dwarf-regs.c
@@ -51,5 +51,5 @@
 /* Return architecture dependent register string (for kprobe-tracer) */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= SH_MAX_REGS) ? sh_regs_table[n] : NULL;
+	return (n < SH_MAX_REGS) ? sh_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/sparc/util/dwarf-regs.c b/tools/perf/arch/sparc/util/dwarf-regs.c
index 92eda41..b704fdb 100644
--- a/tools/perf/arch/sparc/util/dwarf-regs.c
+++ b/tools/perf/arch/sparc/util/dwarf-regs.c
@@ -39,5 +39,5 @@
  */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
+	return (n < SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 2c55e1b..ff63649f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -2,6 +2,7 @@
 libperf-y += tsc.o
 libperf-y += pmu.o
 libperf-y += kvm-stat.o
+libperf-y += perf_regs.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index be22dd4..a08de0a 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -71,5 +71,5 @@
 /* Return architecture dependent register string (for kprobe-tracer) */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
+	return (n < ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
new file mode 100644
index 0000000..c5db14f
--- /dev/null
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -0,0 +1,28 @@
+#include "../../perf.h"
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+	SMPL_REG(AX, PERF_REG_X86_AX),
+	SMPL_REG(BX, PERF_REG_X86_BX),
+	SMPL_REG(CX, PERF_REG_X86_CX),
+	SMPL_REG(DX, PERF_REG_X86_DX),
+	SMPL_REG(SI, PERF_REG_X86_SI),
+	SMPL_REG(DI, PERF_REG_X86_DI),
+	SMPL_REG(BP, PERF_REG_X86_BP),
+	SMPL_REG(SP, PERF_REG_X86_SP),
+	SMPL_REG(IP, PERF_REG_X86_IP),
+	SMPL_REG(FLAGS, PERF_REG_X86_FLAGS),
+	SMPL_REG(CS, PERF_REG_X86_CS),
+	SMPL_REG(SS, PERF_REG_X86_SS),
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+	SMPL_REG(R8, PERF_REG_X86_R8),
+	SMPL_REG(R9, PERF_REG_X86_R9),
+	SMPL_REG(R10, PERF_REG_X86_R10),
+	SMPL_REG(R11, PERF_REG_X86_R11),
+	SMPL_REG(R12, PERF_REG_X86_R12),
+	SMPL_REG(R13, PERF_REG_X86_R13),
+	SMPL_REG(R14, PERF_REG_X86_R14),
+	SMPL_REG(R15, PERF_REG_X86_R15),
+#endif
+	SMPL_REG_END
+};
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a660022..142eeb3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -27,8 +27,10 @@
 #include "util/cpumap.h"
 #include "util/thread_map.h"
 #include "util/data.h"
+#include "util/perf_regs.h"
 #include "util/auxtrace.h"
 #include "util/parse-branch-options.h"
+#include "util/parse-regs-options.h"
 
 #include <unistd.h>
 #include <sched.h>
@@ -279,7 +281,7 @@
 
 	evlist__for_each(evlist, pos) {
 try_again:
-		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
+		if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
 			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
 				if (verbose)
 					ui__warning("%s\n", msg);
@@ -1080,8 +1082,9 @@
 		    "sample transaction flags (special events only)"),
 	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
 		    "use per-thread mmaps"),
-	OPT_BOOLEAN('I', "intr-regs", &record.opts.sample_intr_regs,
-		    "Sample machine registers on interrupt"),
+	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
+		    "sample selected machine registers on interrupt,"
+		    " use -I ? to list register names", parse_regs),
 	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
 		    "Record running/enabled time of read (:S) events"),
 	OPT_CALLBACK('k', "clockid", &record.opts,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 4430340..eb51325 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -6,6 +6,7 @@
 #include "util/exec_cmd.h"
 #include "util/header.h"
 #include "util/parse-options.h"
+#include "util/perf_regs.h"
 #include "util/session.h"
 #include "util/tool.h"
 #include "util/symbol.h"
@@ -46,6 +47,7 @@
 	PERF_OUTPUT_SYMOFFSET       = 1U << 11,
 	PERF_OUTPUT_SRCLINE         = 1U << 12,
 	PERF_OUTPUT_PERIOD          = 1U << 13,
+	PERF_OUTPUT_IREGS	    = 1U << 14,
 };
 
 struct output_option {
@@ -66,6 +68,7 @@
 	{.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
 	{.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
 	{.str = "period", .field = PERF_OUTPUT_PERIOD},
+	{.str = "iregs", .field = PERF_OUTPUT_IREGS},
 };
 
 /* default set to maintain compatibility with current format */
@@ -255,6 +258,11 @@
 					PERF_OUTPUT_PERIOD))
 		return -EINVAL;
 
+	if (PRINT_FIELD(IREGS) &&
+		perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
+					PERF_OUTPUT_IREGS))
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -352,6 +360,24 @@
 	return 0;
 }
 
+static void print_sample_iregs(union perf_event *event __maybe_unused,
+			  struct perf_sample *sample,
+			  struct thread *thread __maybe_unused,
+			  struct perf_event_attr *attr)
+{
+	struct regs_dump *regs = &sample->intr_regs;
+	uint64_t mask = attr->sample_regs_intr;
+	unsigned i = 0, r;
+
+	if (!regs)
+		return;
+
+	for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
+		u64 val = regs->regs[i++];
+		printf("%5s:0x%"PRIx64" ", perf_reg_name(r), val);
+	}
+}
+
 static void print_sample_start(struct perf_sample *sample,
 			       struct thread *thread,
 			       struct perf_evsel *evsel)
@@ -525,6 +551,9 @@
 				     PERF_MAX_STACK_DEPTH);
 	}
 
+	if (PRINT_FIELD(IREGS))
+		print_sample_iregs(event, sample, thread, attr);
+
 	printf("\n");
 }
 
@@ -1643,7 +1672,7 @@
 		     "comma separated output fields prepend with 'type:'. "
 		     "Valid types: hw,sw,trace,raw. "
 		     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-		     "addr,symoff,period,flags", parse_output_fields),
+		     "addr,symoff,period,iregs,flags", parse_output_fields),
 	OPT_BOOLEAN('a', "all-cpus", &system_wide,
 		    "system-wide collection from all CPUs"),
 	OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index cccb4cf..90129ac 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -54,7 +54,6 @@
 	bool	     sample_time_set;
 	bool	     callgraph_set;
 	bool	     period;
-	bool	     sample_intr_regs;
 	bool	     running_time;
 	bool	     full_auxtrace;
 	bool	     auxtrace_snapshot_mode;
@@ -64,6 +63,7 @@
 	unsigned int auxtrace_mmap_pages;
 	unsigned int user_freq;
 	u64          branch_stack;
+	u64	     sample_intr_regs;
 	u64	     default_interval;
 	u64	     user_interval;
 	size_t	     auxtrace_snapshot_size;
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index a337356..52d5597 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -26,7 +26,7 @@
 {
 	struct bpf_object *obj;
 
-	obj = bpf_object__open_buffer(obj_buf, obj_buf_sz);
+	obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, NULL);
 	if (!obj)
 		return -1;
 	bpf_object__close(obj);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index e912856..349bc96 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -75,6 +75,7 @@
 libperf-y += srcline.o
 libperf-y += data.o
 libperf-$(CONFIG_X86) += tsc.o
+libperf-$(CONFIG_AUXTRACE) += tsc.o
 libperf-y += cloexec.o
 libperf-y += thread-stack.o
 libperf-$(CONFIG_AUXTRACE) += auxtrace.o
@@ -82,6 +83,7 @@
 libperf-$(CONFIG_AUXTRACE) += intel-pt.o
 libperf-$(CONFIG_AUXTRACE) += intel-bts.o
 libperf-y += parse-branch-options.o
+libperf-y += parse-regs-options.o
 
 libperf-$(CONFIG_LIBELF) += symbol-elf.o
 libperf-$(CONFIG_LIBELF) += probe-file.o
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8d00039..d51a520 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1181,6 +1181,10 @@
 		if (evsel->filter == NULL)
 			continue;
 
+		/*
+		 * filters only work for tracepoint event, which doesn't have cpu limit.
+		 * So evlist and evsel should always be same.
+		 */
 		err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
 		if (err) {
 			*err_evsel = evsel;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index bac25f4..c53f791 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -787,7 +787,7 @@
 		perf_evsel__config_callgraph(evsel, opts, &callchain_param);
 
 	if (opts->sample_intr_regs) {
-		attr->sample_regs_intr = PERF_REGS_MASK;
+		attr->sample_regs_intr = opts->sample_intr_regs;
 		perf_evsel__set_sample_bit(evsel, REGS_INTR);
 	}
 
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 240730d..2386322 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -4,6 +4,7 @@
 inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt
 
 $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+	$(call rule_mkdir)
 	@$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
 
 $(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 9e4eb8f..d23138c 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -146,6 +146,9 @@
 		case 4:
 			intel_pt_insn->rel = bswap_32(insn->immediate.value);
 			break;
+		default:
+			intel_pt_insn->rel = 0;
+			break;
 		}
 #else
 		intel_pt_insn->rel = insn->immediate.value;
diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
new file mode 100644
index 0000000..4f2c1c2
--- /dev/null
+++ b/tools/perf/util/parse-regs-options.c
@@ -0,0 +1,71 @@
+#include "perf.h"
+#include "util/util.h"
+#include "util/debug.h"
+#include "util/parse-options.h"
+#include "util/parse-regs-options.h"
+
+int
+parse_regs(const struct option *opt, const char *str, int unset)
+{
+	uint64_t *mode = (uint64_t *)opt->value;
+	const struct sample_reg *r;
+	char *s, *os = NULL, *p;
+	int ret = -1;
+
+	if (unset)
+		return 0;
+
+	/*
+	 * cannot set it twice
+	 */
+	if (*mode)
+		return -1;
+
+	/* str may be NULL in case no arg is passed to -I */
+	if (str) {
+		/* because str is read-only */
+		s = os = strdup(str);
+		if (!s)
+			return -1;
+
+		for (;;) {
+			p = strchr(s, ',');
+			if (p)
+				*p = '\0';
+
+			if (!strcmp(s, "?")) {
+				fprintf(stderr, "available registers: ");
+				for (r = sample_reg_masks; r->name; r++) {
+					fprintf(stderr, "%s ", r->name);
+				}
+				fputc('\n', stderr);
+				/* just printing available regs */
+				return -1;
+			}
+			for (r = sample_reg_masks; r->name; r++) {
+				if (!strcasecmp(s, r->name))
+					break;
+			}
+			if (!r->name) {
+				ui__warning("unknown register %s,"
+					    " check man page\n", s);
+				goto error;
+			}
+
+			*mode |= r->mask;
+
+			if (!p)
+				break;
+
+			s = p + 1;
+		}
+	}
+	ret = 0;
+
+	/* default to all possible regs */
+	if (*mode == 0)
+		*mode = PERF_REGS_MASK;
+error:
+	free(os);
+	return ret;
+}
diff --git a/tools/perf/util/parse-regs-options.h b/tools/perf/util/parse-regs-options.h
new file mode 100644
index 0000000..7d762b1
--- /dev/null
+++ b/tools/perf/util/parse-regs-options.h
@@ -0,0 +1,5 @@
+#ifndef _PERF_PARSE_REGS_OPTIONS_H
+#define _PERF_PARSE_REGS_OPTIONS_H 1
+struct option;
+int parse_regs(const struct option *opt, const char *str, int unset);
+#endif /* _PERF_PARSE_REGS_OPTIONS_H */
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 43168fb..885e8ac 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -2,6 +2,10 @@
 #include "perf_regs.h"
 #include "event.h"
 
+const struct sample_reg __weak sample_reg_masks[] = {
+	SMPL_REG_END
+};
+
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
 	int i, idx = 0;
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 980dbf7..2984dcc 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -5,6 +5,15 @@
 
 struct regs_dump;
 
+struct sample_reg {
+	const char *name;
+	uint64_t mask;
+};
+#define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) }
+#define SMPL_REG_END { .name = NULL }
+
+extern const struct sample_reg sample_reg_masks[];
+
 #ifdef HAVE_PERF_REGS_SUPPORT
 #include <perf_regs.h>
 
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index f56914c..38b00ec 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -1,9 +1,12 @@
-ldflags-y += --wrap=ioremap_wt
 ldflags-y += --wrap=ioremap_wc
+ldflags-y += --wrap=memremap
 ldflags-y += --wrap=devm_ioremap_nocache
-ldflags-y += --wrap=ioremap_cache
+ldflags-y += --wrap=devm_memremap
+ldflags-y += --wrap=devm_memunmap
 ldflags-y += --wrap=ioremap_nocache
 ldflags-y += --wrap=iounmap
+ldflags-y += --wrap=memunmap
+ldflags-y += --wrap=__devm_request_region
 ldflags-y += --wrap=__request_region
 ldflags-y += --wrap=__release_region
 
@@ -15,6 +18,7 @@
 obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
 obj-$(CONFIG_ND_BTT) += nd_btt.o
 obj-$(CONFIG_ND_BLK) += nd_blk.o
+obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
 obj-$(CONFIG_ACPI_NFIT) += nfit.o
 
 nfit-y := $(ACPI_SRC)/nfit.o
@@ -29,6 +33,9 @@
 nd_blk-y := $(NVDIMM_SRC)/blk.o
 nd_blk-y += config_check.o
 
+nd_e820-y := $(NVDIMM_SRC)/e820.o
+nd_e820-y += config_check.o
+
 libnvdimm-y := $(NVDIMM_SRC)/core.o
 libnvdimm-y += $(NVDIMM_SRC)/bus.o
 libnvdimm-y += $(NVDIMM_SRC)/dimm_devs.o
@@ -37,7 +44,9 @@
 libnvdimm-y += $(NVDIMM_SRC)/region.o
 libnvdimm-y += $(NVDIMM_SRC)/namespace_devs.o
 libnvdimm-y += $(NVDIMM_SRC)/label.o
+libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o
 libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
+libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
 libnvdimm-y += config_check.o
 
 obj-m += test/
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index 64bfaa5..b725131 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -80,11 +80,46 @@
 }
 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
 
-void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size)
+void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
+		size_t size, unsigned long flags)
 {
-	return __nfit_test_ioremap(offset, size, ioremap_cache);
+	struct nfit_test_resource *nfit_res;
+
+	rcu_read_lock();
+	nfit_res = get_nfit_res(offset);
+	rcu_read_unlock();
+	if (nfit_res)
+		return nfit_res->buf + offset - nfit_res->res->start;
+	return devm_memremap(dev, offset, size, flags);
 }
-EXPORT_SYMBOL(__wrap_ioremap_cache);
+EXPORT_SYMBOL(__wrap_devm_memremap);
+
+void *__wrap_memremap(resource_size_t offset, size_t size,
+		unsigned long flags)
+{
+	struct nfit_test_resource *nfit_res;
+
+	rcu_read_lock();
+	nfit_res = get_nfit_res(offset);
+	rcu_read_unlock();
+	if (nfit_res)
+		return nfit_res->buf + offset - nfit_res->res->start;
+	return memremap(offset, size, flags);
+}
+EXPORT_SYMBOL(__wrap_memremap);
+
+void __wrap_devm_memunmap(struct device *dev, void *addr)
+{
+	struct nfit_test_resource *nfit_res;
+
+	rcu_read_lock();
+	nfit_res = get_nfit_res((unsigned long) addr);
+	rcu_read_unlock();
+	if (nfit_res)
+		return;
+	return devm_memunmap(dev, addr);
+}
+EXPORT_SYMBOL(__wrap_devm_memunmap);
 
 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
 {
@@ -92,12 +127,6 @@
 }
 EXPORT_SYMBOL(__wrap_ioremap_nocache);
 
-void __iomem *__wrap_ioremap_wt(resource_size_t offset, unsigned long size)
-{
-	return __nfit_test_ioremap(offset, size, ioremap_wt);
-}
-EXPORT_SYMBOL(__wrap_ioremap_wt);
-
 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
 {
 	return __nfit_test_ioremap(offset, size, ioremap_wc);
@@ -117,9 +146,22 @@
 }
 EXPORT_SYMBOL(__wrap_iounmap);
 
-struct resource *__wrap___request_region(struct resource *parent,
-		resource_size_t start, resource_size_t n, const char *name,
-		int flags)
+void __wrap_memunmap(void *addr)
+{
+	struct nfit_test_resource *nfit_res;
+
+	rcu_read_lock();
+	nfit_res = get_nfit_res((unsigned long) addr);
+	rcu_read_unlock();
+	if (nfit_res)
+		return;
+	return memunmap(addr);
+}
+EXPORT_SYMBOL(__wrap_memunmap);
+
+static struct resource *nfit_test_request_region(struct device *dev,
+		struct resource *parent, resource_size_t start,
+		resource_size_t n, const char *name, int flags)
 {
 	struct nfit_test_resource *nfit_res;
 
@@ -147,10 +189,29 @@
 			return res;
 		}
 	}
+	if (dev)
+		return __devm_request_region(dev, parent, start, n, name);
 	return __request_region(parent, start, n, name, flags);
 }
+
+struct resource *__wrap___request_region(struct resource *parent,
+		resource_size_t start, resource_size_t n, const char *name,
+		int flags)
+{
+	return nfit_test_request_region(NULL, parent, start, n, name, flags);
+}
 EXPORT_SYMBOL(__wrap___request_region);
 
+struct resource *__wrap___devm_request_region(struct device *dev,
+		struct resource *parent, resource_size_t start,
+		resource_size_t n, const char *name)
+{
+	if (!dev)
+		return NULL;
+	return nfit_test_request_region(dev, parent, start, n, name, 0);
+}
+EXPORT_SYMBOL(__wrap___devm_request_region);
+
 void __wrap___release_region(struct resource *parent, resource_size_t start,
 				resource_size_t n)
 {
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index d0bdae4..021e6f9 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -147,75 +147,153 @@
 	return container_of(pdev, struct nfit_test, pdev);
 }
 
+static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
+		unsigned int buf_len)
+{
+	if (buf_len < sizeof(*nd_cmd))
+		return -EINVAL;
+
+	nd_cmd->status = 0;
+	nd_cmd->config_size = LABEL_SIZE;
+	nd_cmd->max_xfer = SZ_4K;
+
+	return 0;
+}
+
+static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
+		*nd_cmd, unsigned int buf_len, void *label)
+{
+	unsigned int len, offset = nd_cmd->in_offset;
+	int rc;
+
+	if (buf_len < sizeof(*nd_cmd))
+		return -EINVAL;
+	if (offset >= LABEL_SIZE)
+		return -EINVAL;
+	if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
+		return -EINVAL;
+
+	nd_cmd->status = 0;
+	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
+	memcpy(nd_cmd->out_buf, label + offset, len);
+	rc = buf_len - sizeof(*nd_cmd) - len;
+
+	return rc;
+}
+
+static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
+		unsigned int buf_len, void *label)
+{
+	unsigned int len, offset = nd_cmd->in_offset;
+	u32 *status;
+	int rc;
+
+	if (buf_len < sizeof(*nd_cmd))
+		return -EINVAL;
+	if (offset >= LABEL_SIZE)
+		return -EINVAL;
+	if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
+		return -EINVAL;
+
+	status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
+	*status = 0;
+	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
+	memcpy(label + offset, nd_cmd->in_buf, len);
+	rc = buf_len - sizeof(*nd_cmd) - (len + 4);
+
+	return rc;
+}
+
+static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
+		unsigned int buf_len)
+{
+	if (buf_len < sizeof(*nd_cmd))
+		return -EINVAL;
+
+	nd_cmd->max_ars_out = 256;
+	nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
+
+	return 0;
+}
+
+static int nfit_test_cmd_ars_start(struct nd_cmd_ars_start *nd_cmd,
+		unsigned int buf_len)
+{
+	if (buf_len < sizeof(*nd_cmd))
+		return -EINVAL;
+
+	nd_cmd->status = 0;
+
+	return 0;
+}
+
+static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
+		unsigned int buf_len)
+{
+	if (buf_len < sizeof(*nd_cmd))
+		return -EINVAL;
+
+	nd_cmd->out_length = 256;
+	nd_cmd->num_records = 0;
+	nd_cmd->status = 0;
+
+	return 0;
+}
+
 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
 		unsigned int buf_len)
 {
 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
-	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
-	int i, rc;
+	int i, rc = 0;
 
-	if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
-		return -ENOTTY;
+	if (nvdimm) {
+		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 
-	/* lookup label space for the given dimm */
-	for (i = 0; i < ARRAY_SIZE(handle); i++)
-		if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i])
+		if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
+			return -ENOTTY;
+
+		/* lookup label space for the given dimm */
+		for (i = 0; i < ARRAY_SIZE(handle); i++)
+			if (__to_nfit_memdev(nfit_mem)->device_handle ==
+					handle[i])
+				break;
+		if (i >= ARRAY_SIZE(handle))
+			return -ENXIO;
+
+		switch (cmd) {
+		case ND_CMD_GET_CONFIG_SIZE:
+			rc = nfit_test_cmd_get_config_size(buf, buf_len);
 			break;
-	if (i >= ARRAY_SIZE(handle))
-		return -ENXIO;
+		case ND_CMD_GET_CONFIG_DATA:
+			rc = nfit_test_cmd_get_config_data(buf, buf_len,
+				t->label[i]);
+			break;
+		case ND_CMD_SET_CONFIG_DATA:
+			rc = nfit_test_cmd_set_config_data(buf, buf_len,
+				t->label[i]);
+			break;
+		default:
+			return -ENOTTY;
+		}
+	} else {
+		if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask))
+			return -ENOTTY;
 
-	switch (cmd) {
-	case ND_CMD_GET_CONFIG_SIZE: {
-		struct nd_cmd_get_config_size *nd_cmd = buf;
-
-		if (buf_len < sizeof(*nd_cmd))
-			return -EINVAL;
-		nd_cmd->status = 0;
-		nd_cmd->config_size = LABEL_SIZE;
-		nd_cmd->max_xfer = SZ_4K;
-		rc = 0;
-		break;
-	}
-	case ND_CMD_GET_CONFIG_DATA: {
-		struct nd_cmd_get_config_data_hdr *nd_cmd = buf;
-		unsigned int len, offset = nd_cmd->in_offset;
-
-		if (buf_len < sizeof(*nd_cmd))
-			return -EINVAL;
-		if (offset >= LABEL_SIZE)
-			return -EINVAL;
-		if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
-			return -EINVAL;
-
-		nd_cmd->status = 0;
-		len = min(nd_cmd->in_length, LABEL_SIZE - offset);
-		memcpy(nd_cmd->out_buf, t->label[i] + offset, len);
-		rc = buf_len - sizeof(*nd_cmd) - len;
-		break;
-	}
-	case ND_CMD_SET_CONFIG_DATA: {
-		struct nd_cmd_set_config_hdr *nd_cmd = buf;
-		unsigned int len, offset = nd_cmd->in_offset;
-		u32 *status;
-
-		if (buf_len < sizeof(*nd_cmd))
-			return -EINVAL;
-		if (offset >= LABEL_SIZE)
-			return -EINVAL;
-		if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
-			return -EINVAL;
-
-		status = buf + nd_cmd->in_length + sizeof(*nd_cmd);
-		*status = 0;
-		len = min(nd_cmd->in_length, LABEL_SIZE - offset);
-		memcpy(t->label[i] + offset, nd_cmd->in_buf, len);
-		rc = buf_len - sizeof(*nd_cmd) - (len + 4);
-		break;
-	}
-	default:
-		return -ENOTTY;
+		switch (cmd) {
+		case ND_CMD_ARS_CAP:
+			rc = nfit_test_cmd_ars_cap(buf, buf_len);
+			break;
+		case ND_CMD_ARS_START:
+			rc = nfit_test_cmd_ars_start(buf, buf_len);
+			break;
+		case ND_CMD_ARS_STATUS:
+			rc = nfit_test_cmd_ars_status(buf, buf_len);
+			break;
+		default:
+			return -ENOTTY;
+		}
 	}
 
 	return rc;
@@ -876,6 +954,9 @@
 	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
 	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
 	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
+	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
+	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
+	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
 	nd_desc = &acpi_desc->nd_desc;
 	nd_desc->ndctl = nfit_test_ctl;
 }
@@ -948,9 +1029,13 @@
 
 	lane = nd_region_acquire_lane(nd_region);
 	if (rw)
-		memcpy(mmio->base + dpa, iobuf, len);
-	else
-		memcpy(iobuf, mmio->base + dpa, len);
+		memcpy(mmio->addr.base + dpa, iobuf, len);
+	else {
+		memcpy(iobuf, mmio->addr.base + dpa, len);
+
+		/* give us some some coverage of the mmio_flush_range() API */
+		mmio_flush_range(mmio->addr.base + dpa, len);
+	}
 	nd_region_release_lane(nd_region, lane);
 
 	return 0;
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 24ae9e8..0501511 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -20,8 +20,10 @@
 TARGETS += timers
 endif
 TARGETS += user
+TARGETS += jumplabel
 TARGETS += vm
 TARGETS += x86
+TARGETS += zram
 #Please keep the TARGETS list alphabetically sorted
 # Run "make quicktest=1 run_tests" or
 # "make quicktest=1 kselftest from top level Makefile
@@ -71,7 +73,6 @@
 	@# Ask all targets to install their files
 	mkdir -p $(INSTALL_PATH)
 	for TARGET in $(TARGETS); do \
-		mkdir -p $(INSTALL_PATH)/$$TARGET ; \
 		make -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
 	done;
 
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile
index 1822356..d27108b 100644
--- a/tools/testing/selftests/breakpoints/Makefile
+++ b/tools/testing/selftests/breakpoints/Makefile
@@ -1,22 +1,12 @@
 # Taken from perf makefile
 uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
-ifeq ($(ARCH),i386)
-        ARCH := x86
-endif
-ifeq ($(ARCH),x86_64)
-	ARCH := x86
-endif
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
 
+ifeq ($(ARCH),x86)
+TEST_PROGS := breakpoint_test
+endif
 
 all:
-ifeq ($(ARCH),x86)
-	gcc breakpoint_test.c -o breakpoint_test
-else
-	echo "Not an x86 target, can't build breakpoints selftests"
-endif
-
-TEST_PROGS := breakpoint_test
 
 include ../lib.mk
 
diff --git a/tools/testing/selftests/capabilities/.gitignore b/tools/testing/selftests/capabilities/.gitignore
new file mode 100644
index 0000000..b732dd0
--- /dev/null
+++ b/tools/testing/selftests/capabilities/.gitignore
@@ -0,0 +1,2 @@
+test_execve
+validate_cap
diff --git a/tools/testing/selftests/capabilities/Makefile b/tools/testing/selftests/capabilities/Makefile
new file mode 100644
index 0000000..8c8f0c1
--- /dev/null
+++ b/tools/testing/selftests/capabilities/Makefile
@@ -0,0 +1,18 @@
+all:
+
+include ../lib.mk
+
+.PHONY: all clean
+
+TARGETS := validate_cap test_execve
+TEST_PROGS := test_execve
+
+CFLAGS := -O2 -g -std=gnu99 -Wall -lcap-ng
+
+all: $(TARGETS)
+
+clean:
+	$(RM) $(TARGETS)
+
+$(TARGETS): %: %.c
+	$(CC) -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
new file mode 100644
index 0000000..10a21a9
--- /dev/null
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -0,0 +1,427 @@
+#define _GNU_SOURCE
+
+#include <cap-ng.h>
+#include <err.h>
+#include <linux/capability.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <sched.h>
+#include <sys/mount.h>
+#include <limits.h>
+#include <libgen.h>
+#include <malloc.h>
+#include <sys/wait.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+
+#ifndef PR_CAP_AMBIENT
+#define PR_CAP_AMBIENT			47
+# define PR_CAP_AMBIENT_IS_SET		1
+# define PR_CAP_AMBIENT_RAISE		2
+# define PR_CAP_AMBIENT_LOWER		3
+# define PR_CAP_AMBIENT_CLEAR_ALL	4
+#endif
+
+static int nerrs;
+
+static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
+{
+	char buf[4096];
+	int fd;
+	ssize_t written;
+	int buf_len;
+
+	buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+	if (buf_len < 0) {
+		err(1, "vsnprintf failed");
+	}
+	if (buf_len >= sizeof(buf)) {
+		errx(1, "vsnprintf output truncated");
+	}
+
+	fd = open(filename, O_WRONLY);
+	if (fd < 0) {
+		if ((errno == ENOENT) && enoent_ok)
+			return;
+		err(1, "open of %s failed", filename);
+	}
+	written = write(fd, buf, buf_len);
+	if (written != buf_len) {
+		if (written >= 0) {
+			errx(1, "short write to %s", filename);
+		} else {
+			err(1, "write to %s failed", filename);
+		}
+	}
+	if (close(fd) != 0) {
+		err(1, "close of %s failed", filename);
+	}
+}
+
+static void maybe_write_file(char *filename, char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vmaybe_write_file(true, filename, fmt, ap);
+	va_end(ap);
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vmaybe_write_file(false, filename, fmt, ap);
+	va_end(ap);
+}
+
+static bool create_and_enter_ns(uid_t inner_uid)
+{
+	uid_t outer_uid;
+	gid_t outer_gid;
+	int i;
+	bool have_outer_privilege;
+
+	outer_uid = getuid();
+	outer_gid = getgid();
+
+	/*
+	 * TODO: If we're already root, we could skip creating the userns.
+	 */
+
+	if (unshare(CLONE_NEWNS) == 0) {
+		printf("[NOTE]\tUsing global UIDs for tests\n");
+		if (prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0) != 0)
+			err(1, "PR_SET_KEEPCAPS");
+		if (setresuid(inner_uid, inner_uid, -1) != 0)
+			err(1, "setresuid");
+
+		// Re-enable effective caps
+		capng_get_caps_process();
+		for (i = 0; i < CAP_LAST_CAP; i++)
+			if (capng_have_capability(CAPNG_PERMITTED, i))
+				capng_update(CAPNG_ADD, CAPNG_EFFECTIVE, i);
+		if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+			err(1, "capng_apply");
+
+		have_outer_privilege = true;
+	} else if (unshare(CLONE_NEWUSER | CLONE_NEWNS) == 0) {
+		printf("[NOTE]\tUsing a user namespace for tests\n");
+		maybe_write_file("/proc/self/setgroups", "deny");
+		write_file("/proc/self/uid_map", "%d %d 1", inner_uid, outer_uid);
+		write_file("/proc/self/gid_map", "0 %d 1", outer_gid);
+
+		have_outer_privilege = false;
+	} else {
+		errx(1, "must be root or be able to create a userns");
+	}
+
+	if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL) != 0)
+		err(1, "remount everything private");
+
+	return have_outer_privilege;
+}
+
+static void chdir_to_tmpfs(void)
+{
+	char cwd[PATH_MAX];
+	if (getcwd(cwd, sizeof(cwd)) != cwd)
+		err(1, "getcwd");
+
+	if (mount("private_tmp", ".", "tmpfs", 0, "mode=0777") != 0)
+		err(1, "mount private tmpfs");
+
+	if (chdir(cwd) != 0)
+		err(1, "chdir to private tmpfs");
+
+	if (umount2(".", MNT_DETACH) != 0)
+		err(1, "detach private tmpfs");
+}
+
+static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
+{
+	int from = openat(fromfd, fromname, O_RDONLY);
+	if (from == -1)
+		err(1, "open copy source");
+
+	int to = open(toname, O_CREAT | O_WRONLY | O_EXCL, 0700);
+
+	while (true) {
+		char buf[4096];
+		ssize_t sz = read(from, buf, sizeof(buf));
+		if (sz == 0)
+			break;
+		if (sz < 0)
+			err(1, "read");
+
+		if (write(to, buf, sz) != sz)
+			err(1, "write");	/* no short writes on tmpfs */
+	}
+
+	close(from);
+	close(to);
+}
+
+static bool fork_wait(void)
+{
+	pid_t child = fork();
+	if (child == 0) {
+		nerrs = 0;
+		return true;
+	} else if (child > 0) {
+		int status;
+		if (waitpid(child, &status, 0) != child ||
+		    !WIFEXITED(status)) {
+			printf("[FAIL]\tChild died\n");
+			nerrs++;
+		} else if (WEXITSTATUS(status) != 0) {
+			printf("[FAIL]\tChild failed\n");
+			nerrs++;
+		} else {
+			printf("[OK]\tChild succeeded\n");
+		}
+
+		return false;
+	} else {
+		err(1, "fork");
+	}
+}
+
+static void exec_other_validate_cap(const char *name,
+				    bool eff, bool perm, bool inh, bool ambient)
+{
+	execl(name, name, (eff ? "1" : "0"),
+	      (perm ? "1" : "0"), (inh ? "1" : "0"), (ambient ? "1" : "0"),
+	      NULL);
+	err(1, "execl");
+}
+
+static void exec_validate_cap(bool eff, bool perm, bool inh, bool ambient)
+{
+	exec_other_validate_cap("./validate_cap", eff, perm, inh, ambient);
+}
+
+static int do_tests(int uid, const char *our_path)
+{
+	bool have_outer_privilege = create_and_enter_ns(uid);
+
+	int ourpath_fd = open(our_path, O_RDONLY | O_DIRECTORY);
+	if (ourpath_fd == -1)
+		err(1, "open '%s'", our_path);
+
+	chdir_to_tmpfs();
+
+	copy_fromat_to(ourpath_fd, "validate_cap", "validate_cap");
+
+	if (have_outer_privilege) {
+		uid_t gid = getegid();
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_suidroot");
+		if (chown("validate_cap_suidroot", 0, -1) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_suidroot", S_ISUID | 0700) != 0)
+			err(1, "chmod");
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_suidnonroot");
+		if (chown("validate_cap_suidnonroot", uid + 1, -1) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_suidnonroot", S_ISUID | 0700) != 0)
+			err(1, "chmod");
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_sgidroot");
+		if (chown("validate_cap_sgidroot", -1, 0) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_sgidroot", S_ISGID | 0710) != 0)
+			err(1, "chmod");
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_sgidnonroot");
+		if (chown("validate_cap_sgidnonroot", -1, gid + 1) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
+			err(1, "chmod");
+}
+
+	capng_get_caps_process();
+
+	/* Make sure that i starts out clear */
+	capng_update(CAPNG_DROP, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+
+	if (uid == 0) {
+		printf("[RUN]\tRoot => ep\n");
+		if (fork_wait())
+			exec_validate_cap(true, true, false, false);
+	} else {
+		printf("[RUN]\tNon-root => no caps\n");
+		if (fork_wait())
+			exec_validate_cap(false, false, false, false);
+	}
+
+	printf("[OK]\tCheck cap_ambient manipulation rules\n");
+
+	/* We should not be able to add ambient caps yet. */
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != -1 || errno != EPERM) {
+		if (errno == EINVAL)
+			printf("[FAIL]\tPR_CAP_AMBIENT_RAISE isn't supported\n");
+		else
+			printf("[FAIL]\tPR_CAP_AMBIENT_RAISE should have failed eith EPERM on a non-inheritable cap\n");
+		return 1;
+	}
+	printf("[OK]\tPR_CAP_AMBIENT_RAISE failed on non-inheritable cap\n");
+
+	capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_RAW);
+	capng_update(CAPNG_DROP, CAPNG_PERMITTED, CAP_NET_RAW);
+	capng_update(CAPNG_DROP, CAPNG_EFFECTIVE, CAP_NET_RAW);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_RAW, 0, 0, 0) != -1 || errno != EPERM) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_RAISE should have failed on a non-permitted cap\n");
+		return 1;
+	}
+	printf("[OK]\tPR_CAP_AMBIENT_RAISE failed on non-permitted cap\n");
+
+	capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_RAISE should have succeeded\n");
+		return 1;
+	}
+	printf("[OK]\tPR_CAP_AMBIENT_RAISE worked\n");
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 1) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_IS_SET is broken\n");
+		return 1;
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0, 0, 0, 0) != 0)
+		err(1, "PR_CAP_AMBIENT_CLEAR_ALL");
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_CLEAR_ALL didn't work\n");
+		return 1;
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0)
+		err(1, "PR_CAP_AMBIENT_RAISE");
+
+	capng_update(CAPNG_DROP, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
+		printf("[FAIL]\tDropping I should have dropped A\n");
+		return 1;
+	}
+
+	printf("[OK]\tBasic manipulation appears to work\n");
+
+	capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+	if (uid == 0) {
+		printf("[RUN]\tRoot +i => eip\n");
+		if (fork_wait())
+			exec_validate_cap(true, true, true, false);
+	} else {
+		printf("[RUN]\tNon-root +i => i\n");
+		if (fork_wait())
+			exec_validate_cap(false, false, true, false);
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0)
+		err(1, "PR_CAP_AMBIENT_RAISE");
+
+	printf("[RUN]\tUID %d +ia => eipa\n", uid);
+	if (fork_wait())
+		exec_validate_cap(true, true, true, true);
+
+	/* The remaining tests need real privilege */
+
+	if (!have_outer_privilege) {
+		printf("[SKIP]\tSUID/SGID tests (needs privilege)\n");
+		goto done;
+	}
+
+	if (uid == 0) {
+		printf("[RUN]\tRoot +ia, suidroot => eipa\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_suidroot",
+						true, true, true, true);
+
+		printf("[RUN]\tRoot +ia, suidnonroot => ip\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_suidnonroot",
+						false, true, true, false);
+
+		printf("[RUN]\tRoot +ia, sgidroot => eipa\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_sgidroot",
+						true, true, true, true);
+
+		if (fork_wait()) {
+			printf("[RUN]\tRoot, gid != 0, +ia, sgidroot => eip\n");
+			if (setresgid(1, 1, 1) != 0)
+				err(1, "setresgid");
+			exec_other_validate_cap("./validate_cap_sgidroot",
+						true, true, true, false);
+		}
+
+		printf("[RUN]\tRoot +ia, sgidnonroot => eip\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_sgidnonroot",
+						true, true, true, false);
+	} else {
+		printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
+		exec_other_validate_cap("./validate_cap_sgidnonroot",
+						false, false, true, false);
+
+		if (fork_wait()) {
+			printf("[RUN]\tNon-root +ia, sgidroot => i\n");
+			if (setresgid(1, 1, 1) != 0)
+				err(1, "setresgid");
+			exec_other_validate_cap("./validate_cap_sgidroot",
+						false, false, true, false);
+		}
+	}
+
+done:
+	return nerrs ? 1 : 0;
+}
+
+int main(int argc, char **argv)
+{
+	char *tmp1, *tmp2, *our_path;
+
+	/* Find our path */
+	tmp1 = strdup(argv[0]);
+	if (!tmp1)
+		err(1, "strdup");
+	tmp2 = dirname(tmp1);
+	our_path = strdup(tmp2);
+	if (!our_path)
+		err(1, "strdup");
+	free(tmp1);
+
+	if (fork_wait()) {
+		printf("[RUN]\t+++ Tests with uid == 0 +++\n");
+		return do_tests(0, our_path);
+	}
+
+	if (fork_wait()) {
+		printf("[RUN]\t+++ Tests with uid != 0 +++\n");
+		return do_tests(1, our_path);
+	}
+
+	return nerrs ? 1 : 0;
+}
diff --git a/tools/testing/selftests/capabilities/validate_cap.c b/tools/testing/selftests/capabilities/validate_cap.c
new file mode 100644
index 0000000..dd3c45f
--- /dev/null
+++ b/tools/testing/selftests/capabilities/validate_cap.c
@@ -0,0 +1,73 @@
+#include <cap-ng.h>
+#include <err.h>
+#include <linux/capability.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <sys/auxv.h>
+
+#ifndef PR_CAP_AMBIENT
+#define PR_CAP_AMBIENT			47
+# define PR_CAP_AMBIENT_IS_SET		1
+# define PR_CAP_AMBIENT_RAISE		2
+# define PR_CAP_AMBIENT_LOWER		3
+# define PR_CAP_AMBIENT_CLEAR_ALL	4
+#endif
+
+#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 19)
+# define HAVE_GETAUXVAL
+#endif
+
+static bool bool_arg(char **argv, int i)
+{
+	if (!strcmp(argv[i], "0"))
+		return false;
+	else if (!strcmp(argv[i], "1"))
+		return true;
+	else
+		errx(1, "wrong argv[%d]", i);
+}
+
+int main(int argc, char **argv)
+{
+	const char *atsec = "";
+
+	/*
+	 * Be careful just in case a setgid or setcapped copy of this
+	 * helper gets out.
+	 */
+
+	if (argc != 5)
+		errx(1, "wrong argc");
+
+#ifdef HAVE_GETAUXVAL
+	if (getauxval(AT_SECURE))
+		atsec = " (AT_SECURE is set)";
+	else
+		atsec = " (AT_SECURE is not set)";
+#endif
+
+	capng_get_caps_process();
+
+	if (capng_have_capability(CAPNG_EFFECTIVE, CAP_NET_BIND_SERVICE) != bool_arg(argv, 1)) {
+		printf("[FAIL]\tWrong effective state%s\n", atsec);
+		return 1;
+	}
+	if (capng_have_capability(CAPNG_PERMITTED, CAP_NET_BIND_SERVICE) != bool_arg(argv, 2)) {
+		printf("[FAIL]\tWrong permitted state%s\n", atsec);
+		return 1;
+	}
+	if (capng_have_capability(CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE) != bool_arg(argv, 3)) {
+		printf("[FAIL]\tWrong inheritable state%s\n", atsec);
+		return 1;
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != bool_arg(argv, 4)) {
+		printf("[FAIL]\tWrong ambient state%s\n", atsec);
+		return 1;
+	}
+
+	printf("[OK]\tCapabilities after execve were correct\n");
+	return 0;
+}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index ee412ba..97f1c67 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -12,11 +12,14 @@
 	$(RUN_TESTS)
 
 define INSTALL_RULE
-	mkdir -p $(INSTALL_PATH)
-	@for TEST_DIR in $(TEST_DIRS); do\
-		cp -r $$TEST_DIR $(INSTALL_PATH); \
-	done;
-	install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
+	@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then			\
+		mkdir -p $(INSTALL_PATH);								\
+		for TEST_DIR in $(TEST_DIRS); do							\
+			cp -r $$TEST_DIR $(INSTALL_PATH);						\
+		done;											\
+		echo "install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)";	\
+		install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES);		\
+	fi
 endef
 
 install: all
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 41cc3ed..ee179e2 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -2,8 +2,9 @@
 	$(MAKE) -C ../
 
 TEST_PROGS := hugetlb_vs_thp_test subpage_prot
+TEST_FILES := tempfile
 
-all: $(TEST_PROGS) tempfile
+all: $(TEST_PROGS) $(TEST_FILES)
 
 $(TEST_PROGS): ../harness.c
 
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index c5abe7f..a004b4c 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -14,6 +14,7 @@
 #include <linux/filter.h>
 #include <sys/prctl.h>
 #include <sys/ptrace.h>
+#include <sys/types.h>
 #include <sys/user.h>
 #include <linux/prctl.h>
 #include <linux/ptrace.h>
@@ -82,7 +83,13 @@
 };
 #endif
 
+#if __BYTE_ORDER == __LITTLE_ENDIAN
 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
+#else
+#error "wut? Unknown __BYTE_ORDER?!"
+#endif
 
 #define SIBLING_EXIT_UNKILLED	0xbadbeef
 #define SIBLING_EXIT_FAILURE	0xbadface
@@ -1199,6 +1206,10 @@
 # define ARCH_REGS	struct user_pt_regs
 # define SYSCALL_NUM	regs[8]
 # define SYSCALL_RET	regs[0]
+#elif defined(__powerpc__)
+# define ARCH_REGS	struct pt_regs
+# define SYSCALL_NUM	gpr[0]
+# define SYSCALL_RET	gpr[3]
 #else
 # error "Do not know how to find your architecture's registers and syscalls"
 #endif
@@ -1232,7 +1243,7 @@
 	ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
 	EXPECT_EQ(0, ret);
 
-#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__)
 	{
 		regs.SYSCALL_NUM = syscall;
 	}
@@ -1396,6 +1407,8 @@
 #  define __NR_seccomp 383
 # elif defined(__aarch64__)
 #  define __NR_seccomp 277
+# elif defined(__powerpc__)
+#  define __NR_seccomp 358
 # else
 #  warning "seccomp syscall number unknown for this architecture"
 #  define __NR_seccomp 0xffff
diff --git a/tools/testing/selftests/static_keys/Makefile b/tools/testing/selftests/static_keys/Makefile
new file mode 100644
index 0000000..9cdadf3
--- /dev/null
+++ b/tools/testing/selftests/static_keys/Makefile
@@ -0,0 +1,8 @@
+# Makefile for static keys selftests
+
+# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
+all:
+
+TEST_PROGS := test_static_keys.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
new file mode 100644
index 0000000..1261e3f
--- /dev/null
+++ b/tools/testing/selftests/static_keys/test_static_keys.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# Runs static keys kernel module tests
+
+if /sbin/modprobe -q test_static_key_base; then
+	if /sbin/modprobe -q test_static_keys; then
+		echo "static_key: ok"
+		/sbin/modprobe -q -r test_static_keys
+		/sbin/modprobe -q -r test_static_key_base
+	else
+		echo "static_keys: [FAIL]"
+		/sbin/modprobe -q -r test_static_key_base
+	fi
+else
+	echo "static_key: [FAIL]"
+	exit 1
+fi
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 231b9a0..d36fab7 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -4,14 +4,16 @@
 BINARIES = compaction_test
 BINARIES += hugepage-mmap
 BINARIES += hugepage-shm
-BINARIES += hugetlbfstest
 BINARIES += map_hugetlb
 BINARIES += thuge-gen
 BINARIES += transhuge-stress
+BINARIES += userfaultfd
 
 all: $(BINARIES)
 %: %.c
 	$(CC) $(CFLAGS) -o $@ $^ -lrt
+userfaultfd: userfaultfd.c
+	$(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread
 
 TEST_PROGS := run_vmtests
 TEST_FILES := $(BINARIES)
diff --git a/tools/testing/selftests/vm/hugetlbfstest.c b/tools/testing/selftests/vm/hugetlbfstest.c
deleted file mode 100644
index 02e1072..0000000
--- a/tools/testing/selftests/vm/hugetlbfstest.c
+++ /dev/null
@@ -1,86 +0,0 @@
-#define _GNU_SOURCE
-#include <assert.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-typedef unsigned long long u64;
-
-static size_t length = 1 << 24;
-
-static u64 read_rss(void)
-{
-	char buf[4096], *s = buf;
-	int i, fd;
-	u64 rss;
-
-	fd = open("/proc/self/statm", O_RDONLY);
-	assert(fd > 2);
-	memset(buf, 0, sizeof(buf));
-	read(fd, buf, sizeof(buf) - 1);
-	for (i = 0; i < 1; i++)
-		s = strchr(s, ' ') + 1;
-	rss = strtoull(s, NULL, 10);
-	return rss << 12; /* assumes 4k pagesize */
-}
-
-static void do_mmap(int fd, int extra_flags, int unmap)
-{
-	int *p;
-	int flags = MAP_PRIVATE | MAP_POPULATE | extra_flags;
-	u64 before, after;
-	int ret;
-
-	before = read_rss();
-	p = mmap(NULL, length, PROT_READ | PROT_WRITE, flags, fd, 0);
-	assert(p != MAP_FAILED ||
-			!"mmap returned an unexpected error");
-	after = read_rss();
-	assert(llabs(after - before - length) < 0x40000 ||
-			!"rss didn't grow as expected");
-	if (!unmap)
-		return;
-	ret = munmap(p, length);
-	assert(!ret || !"munmap returned an unexpected error");
-	after = read_rss();
-	assert(llabs(after - before) < 0x40000 ||
-			!"rss didn't shrink as expected");
-}
-
-static int open_file(const char *path)
-{
-	int fd, err;
-
-	unlink(path);
-	fd = open(path, O_CREAT | O_RDWR | O_TRUNC | O_EXCL
-			| O_LARGEFILE | O_CLOEXEC, 0600);
-	assert(fd > 2);
-	unlink(path);
-	err = ftruncate(fd, length);
-	assert(!err);
-	return fd;
-}
-
-int main(void)
-{
-	int hugefd, fd;
-
-	fd = open_file("/dev/shm/hugetlbhog");
-	hugefd = open_file("/hugepages/hugetlbhog");
-
-	system("echo 100 > /proc/sys/vm/nr_hugepages");
-	do_mmap(-1, MAP_ANONYMOUS, 1);
-	do_mmap(fd, 0, 1);
-	do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 1);
-	do_mmap(hugefd, 0, 1);
-	do_mmap(hugefd, MAP_HUGETLB, 1);
-	/* Leak the last one to test do_exit() */
-	do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 0);
-	printf("oll korrekt.\n");
-	return 0;
-}
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 49ece11..9179ce8 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -75,10 +75,14 @@
 	echo "[PASS]"
 fi
 
+echo "NOTE: The above hugetlb tests provide minimal coverage.  Use"
+echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for"
+echo "      hugetlb regression testing."
+
 echo "--------------------"
-echo "running hugetlbfstest"
+echo "running userfaultfd"
 echo "--------------------"
-./hugetlbfstest
+./userfaultfd 128 32
 if [ $? -ne 0 ]; then
 	echo "[FAIL]"
 	exitcode=1
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
new file mode 100644
index 0000000..2c7cca6
--- /dev/null
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -0,0 +1,639 @@
+/*
+ * Stress userfaultfd syscall.
+ *
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ *
+ * This test allocates two virtual areas and bounces the physical
+ * memory across the two virtual areas (from area_src to area_dst)
+ * using userfaultfd.
+ *
+ * There are three threads running per CPU:
+ *
+ * 1) one per-CPU thread takes a per-page pthread_mutex in a random
+ *    page of the area_dst (while the physical page may still be in
+ *    area_src), and increments a per-page counter in the same page,
+ *    and checks its value against a verification region.
+ *
+ * 2) another per-CPU thread handles the userfaults generated by
+ *    thread 1 above. userfaultfd blocking reads or poll() modes are
+ *    exercised interleaved.
+ *
+ * 3) one last per-CPU thread transfers the memory in the background
+ *    at maximum bandwidth (if not already transferred by thread
+ *    2). Each cpu thread takes cares of transferring a portion of the
+ *    area.
+ *
+ * When all threads of type 3 completed the transfer, one bounce is
+ * complete. area_src and area_dst are then swapped. All threads are
+ * respawned and so the bounce is immediately restarted in the
+ * opposite direction.
+ *
+ * per-CPU threads 1 by triggering userfaults inside
+ * pthread_mutex_lock will also verify the atomicity of the memory
+ * transfer (UFFDIO_COPY).
+ *
+ * The program takes two parameters: the amounts of physical memory in
+ * megabytes (MiB) of the area and the number of bounces to execute.
+ *
+ * # 100MiB 99999 bounces
+ * ./userfaultfd 100 99999
+ *
+ * # 1GiB 99 bounces
+ * ./userfaultfd 1000 99
+ *
+ * # 10MiB-~6GiB 999 bounces, continue forever unless an error triggers
+ * while ./userfaultfd $[RANDOM % 6000 + 10] 999; do true; done
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <time.h>
+#include <signal.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <pthread.h>
+#include "../../../../include/uapi/linux/userfaultfd.h"
+
+#ifdef __x86_64__
+#define __NR_userfaultfd 323
+#elif defined(__i386__)
+#define __NR_userfaultfd 374
+#elif defined(__powewrpc__)
+#define __NR_userfaultfd 364
+#else
+#error "missing __NR_userfaultfd definition"
+#endif
+
+static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+
+#define BOUNCE_RANDOM		(1<<0)
+#define BOUNCE_RACINGFAULTS	(1<<1)
+#define BOUNCE_VERIFY		(1<<2)
+#define BOUNCE_POLL		(1<<3)
+static int bounces;
+
+static unsigned long long *count_verify;
+static int uffd, finished, *pipefd;
+static char *area_src, *area_dst;
+static char *zeropage;
+pthread_attr_t attr;
+
+/* pthread_mutex_t starts at page offset 0 */
+#define area_mutex(___area, ___nr)					\
+	((pthread_mutex_t *) ((___area) + (___nr)*page_size))
+/*
+ * count is placed in the page after pthread_mutex_t naturally aligned
+ * to avoid non alignment faults on non-x86 archs.
+ */
+#define area_count(___area, ___nr)					\
+	((volatile unsigned long long *) ((unsigned long)		\
+				 ((___area) + (___nr)*page_size +	\
+				  sizeof(pthread_mutex_t) +		\
+				  sizeof(unsigned long long) - 1) &	\
+				 ~(unsigned long)(sizeof(unsigned long long) \
+						  -  1)))
+
+static int my_bcmp(char *str1, char *str2, size_t n)
+{
+	unsigned long i;
+	for (i = 0; i < n; i++)
+		if (str1[i] != str2[i])
+			return 1;
+	return 0;
+}
+
+static void *locking_thread(void *arg)
+{
+	unsigned long cpu = (unsigned long) arg;
+	struct random_data rand;
+	unsigned long page_nr = *(&(page_nr)); /* uninitialized warning */
+	int32_t rand_nr;
+	unsigned long long count;
+	char randstate[64];
+	unsigned int seed;
+	time_t start;
+
+	if (bounces & BOUNCE_RANDOM) {
+		seed = (unsigned int) time(NULL) - bounces;
+		if (!(bounces & BOUNCE_RACINGFAULTS))
+			seed += cpu;
+		bzero(&rand, sizeof(rand));
+		bzero(&randstate, sizeof(randstate));
+		if (initstate_r(seed, randstate, sizeof(randstate), &rand))
+			fprintf(stderr, "srandom_r error\n"), exit(1);
+	} else {
+		page_nr = -bounces;
+		if (!(bounces & BOUNCE_RACINGFAULTS))
+			page_nr += cpu * nr_pages_per_cpu;
+	}
+
+	while (!finished) {
+		if (bounces & BOUNCE_RANDOM) {
+			if (random_r(&rand, &rand_nr))
+				fprintf(stderr, "random_r 1 error\n"), exit(1);
+			page_nr = rand_nr;
+			if (sizeof(page_nr) > sizeof(rand_nr)) {
+				if (random_r(&rand, &rand_nr))
+					fprintf(stderr, "random_r 2 error\n"), exit(1);
+				page_nr |= (((unsigned long) rand_nr) << 16) <<
+					   16;
+			}
+		} else
+			page_nr += 1;
+		page_nr %= nr_pages;
+
+		start = time(NULL);
+		if (bounces & BOUNCE_VERIFY) {
+			count = *area_count(area_dst, page_nr);
+			if (!count)
+				fprintf(stderr,
+					"page_nr %lu wrong count %Lu %Lu\n",
+					page_nr, count,
+					count_verify[page_nr]), exit(1);
+
+
+			/*
+			 * We can't use bcmp (or memcmp) because that
+			 * returns 0 erroneously if the memory is
+			 * changing under it (even if the end of the
+			 * page is never changing and always
+			 * different).
+			 */
+#if 1
+			if (!my_bcmp(area_dst + page_nr * page_size, zeropage,
+				     page_size))
+				fprintf(stderr,
+					"my_bcmp page_nr %lu wrong count %Lu %Lu\n",
+					page_nr, count,
+					count_verify[page_nr]), exit(1);
+#else
+			unsigned long loops;
+
+			loops = 0;
+			/* uncomment the below line to test with mutex */
+			/* pthread_mutex_lock(area_mutex(area_dst, page_nr)); */
+			while (!bcmp(area_dst + page_nr * page_size, zeropage,
+				     page_size)) {
+				loops += 1;
+				if (loops > 10)
+					break;
+			}
+			/* uncomment below line to test with mutex */
+			/* pthread_mutex_unlock(area_mutex(area_dst, page_nr)); */
+			if (loops) {
+				fprintf(stderr,
+					"page_nr %lu all zero thread %lu %p %lu\n",
+					page_nr, cpu, area_dst + page_nr * page_size,
+					loops);
+				if (loops > 10)
+					exit(1);
+			}
+#endif
+		}
+
+		pthread_mutex_lock(area_mutex(area_dst, page_nr));
+		count = *area_count(area_dst, page_nr);
+		if (count != count_verify[page_nr]) {
+			fprintf(stderr,
+				"page_nr %lu memory corruption %Lu %Lu\n",
+				page_nr, count,
+				count_verify[page_nr]), exit(1);
+		}
+		count++;
+		*area_count(area_dst, page_nr) = count_verify[page_nr] = count;
+		pthread_mutex_unlock(area_mutex(area_dst, page_nr));
+
+		if (time(NULL) - start > 1)
+			fprintf(stderr,
+				"userfault too slow %ld "
+				"possible false positive with overcommit\n",
+				time(NULL) - start);
+	}
+
+	return NULL;
+}
+
+static int copy_page(unsigned long offset)
+{
+	struct uffdio_copy uffdio_copy;
+
+	if (offset >= nr_pages * page_size)
+		fprintf(stderr, "unexpected offset %lu\n",
+			offset), exit(1);
+	uffdio_copy.dst = (unsigned long) area_dst + offset;
+	uffdio_copy.src = (unsigned long) area_src + offset;
+	uffdio_copy.len = page_size;
+	uffdio_copy.mode = 0;
+	uffdio_copy.copy = 0;
+	if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy)) {
+		/* real retval in ufdio_copy.copy */
+		if (uffdio_copy.copy != -EEXIST)
+			fprintf(stderr, "UFFDIO_COPY error %Ld\n",
+				uffdio_copy.copy), exit(1);
+	} else if (uffdio_copy.copy != page_size) {
+		fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
+			uffdio_copy.copy), exit(1);
+	} else
+		return 1;
+	return 0;
+}
+
+static void *uffd_poll_thread(void *arg)
+{
+	unsigned long cpu = (unsigned long) arg;
+	struct pollfd pollfd[2];
+	struct uffd_msg msg;
+	int ret;
+	unsigned long offset;
+	char tmp_chr;
+	unsigned long userfaults = 0;
+
+	pollfd[0].fd = uffd;
+	pollfd[0].events = POLLIN;
+	pollfd[1].fd = pipefd[cpu*2];
+	pollfd[1].events = POLLIN;
+
+	for (;;) {
+		ret = poll(pollfd, 2, -1);
+		if (!ret)
+			fprintf(stderr, "poll error %d\n", ret), exit(1);
+		if (ret < 0)
+			perror("poll"), exit(1);
+		if (pollfd[1].revents & POLLIN) {
+			if (read(pollfd[1].fd, &tmp_chr, 1) != 1)
+				fprintf(stderr, "read pipefd error\n"),
+					exit(1);
+			break;
+		}
+		if (!(pollfd[0].revents & POLLIN))
+			fprintf(stderr, "pollfd[0].revents %d\n",
+				pollfd[0].revents), exit(1);
+		ret = read(uffd, &msg, sizeof(msg));
+		if (ret < 0) {
+			if (errno == EAGAIN)
+				continue;
+			perror("nonblocking read error"), exit(1);
+		}
+		if (msg.event != UFFD_EVENT_PAGEFAULT)
+			fprintf(stderr, "unexpected msg event %u\n",
+				msg.event), exit(1);
+		if (msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
+			fprintf(stderr, "unexpected write fault\n"), exit(1);
+		offset = (char *)(unsigned long)msg.arg.pagefault.address -
+			 area_dst;
+		offset &= ~(page_size-1);
+		if (copy_page(offset))
+			userfaults++;
+	}
+	return (void *)userfaults;
+}
+
+pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void *uffd_read_thread(void *arg)
+{
+	unsigned long *this_cpu_userfaults;
+	struct uffd_msg msg;
+	unsigned long offset;
+	int ret;
+
+	this_cpu_userfaults = (unsigned long *) arg;
+	*this_cpu_userfaults = 0;
+
+	pthread_mutex_unlock(&uffd_read_mutex);
+	/* from here cancellation is ok */
+
+	for (;;) {
+		ret = read(uffd, &msg, sizeof(msg));
+		if (ret != sizeof(msg)) {
+			if (ret < 0)
+				perror("blocking read error"), exit(1);
+			else
+				fprintf(stderr, "short read\n"), exit(1);
+		}
+		if (msg.event != UFFD_EVENT_PAGEFAULT)
+			fprintf(stderr, "unexpected msg event %u\n",
+				msg.event), exit(1);
+		if (bounces & BOUNCE_VERIFY &&
+		    msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
+			fprintf(stderr, "unexpected write fault\n"), exit(1);
+		offset = (char *)(unsigned long)msg.arg.pagefault.address -
+			 area_dst;
+		offset &= ~(page_size-1);
+		if (copy_page(offset))
+			(*this_cpu_userfaults)++;
+	}
+	return (void *)NULL;
+}
+
+static void *background_thread(void *arg)
+{
+	unsigned long cpu = (unsigned long) arg;
+	unsigned long page_nr;
+
+	for (page_nr = cpu * nr_pages_per_cpu;
+	     page_nr < (cpu+1) * nr_pages_per_cpu;
+	     page_nr++)
+		copy_page(page_nr * page_size);
+
+	return NULL;
+}
+
+static int stress(unsigned long *userfaults)
+{
+	unsigned long cpu;
+	pthread_t locking_threads[nr_cpus];
+	pthread_t uffd_threads[nr_cpus];
+	pthread_t background_threads[nr_cpus];
+	void **_userfaults = (void **) userfaults;
+
+	finished = 0;
+	for (cpu = 0; cpu < nr_cpus; cpu++) {
+		if (pthread_create(&locking_threads[cpu], &attr,
+				   locking_thread, (void *)cpu))
+			return 1;
+		if (bounces & BOUNCE_POLL) {
+			if (pthread_create(&uffd_threads[cpu], &attr,
+					   uffd_poll_thread, (void *)cpu))
+				return 1;
+		} else {
+			if (pthread_create(&uffd_threads[cpu], &attr,
+					   uffd_read_thread,
+					   &_userfaults[cpu]))
+				return 1;
+			pthread_mutex_lock(&uffd_read_mutex);
+		}
+		if (pthread_create(&background_threads[cpu], &attr,
+				   background_thread, (void *)cpu))
+			return 1;
+	}
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		if (pthread_join(background_threads[cpu], NULL))
+			return 1;
+
+	/*
+	 * Be strict and immediately zap area_src, the whole area has
+	 * been transferred already by the background treads. The
+	 * area_src could then be faulted in in a racy way by still
+	 * running uffdio_threads reading zeropages after we zapped
+	 * area_src (but they're guaranteed to get -EEXIST from
+	 * UFFDIO_COPY without writing zero pages into area_dst
+	 * because the background threads already completed).
+	 */
+	if (madvise(area_src, nr_pages * page_size, MADV_DONTNEED)) {
+		perror("madvise");
+		return 1;
+	}
+
+	for (cpu = 0; cpu < nr_cpus; cpu++) {
+		char c;
+		if (bounces & BOUNCE_POLL) {
+			if (write(pipefd[cpu*2+1], &c, 1) != 1) {
+				fprintf(stderr, "pipefd write error\n");
+				return 1;
+			}
+			if (pthread_join(uffd_threads[cpu], &_userfaults[cpu]))
+				return 1;
+		} else {
+			if (pthread_cancel(uffd_threads[cpu]))
+				return 1;
+			if (pthread_join(uffd_threads[cpu], NULL))
+				return 1;
+		}
+	}
+
+	finished = 1;
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		if (pthread_join(locking_threads[cpu], NULL))
+			return 1;
+
+	return 0;
+}
+
+static int userfaultfd_stress(void)
+{
+	void *area;
+	char *tmp_area;
+	unsigned long nr;
+	struct uffdio_register uffdio_register;
+	struct uffdio_api uffdio_api;
+	unsigned long cpu;
+	int uffd_flags;
+	unsigned long userfaults[nr_cpus];
+
+	if (posix_memalign(&area, page_size, nr_pages * page_size)) {
+		fprintf(stderr, "out of memory\n");
+		return 1;
+	}
+	area_src = area;
+	if (posix_memalign(&area, page_size, nr_pages * page_size)) {
+		fprintf(stderr, "out of memory\n");
+		return 1;
+	}
+	area_dst = area;
+
+	uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+	if (uffd < 0) {
+		fprintf(stderr,
+			"userfaultfd syscall not available in this kernel\n");
+		return 1;
+	}
+	uffd_flags = fcntl(uffd, F_GETFD, NULL);
+
+	uffdio_api.api = UFFD_API;
+	uffdio_api.features = 0;
+	if (ioctl(uffd, UFFDIO_API, &uffdio_api)) {
+		fprintf(stderr, "UFFDIO_API\n");
+		return 1;
+	}
+	if (uffdio_api.api != UFFD_API) {
+		fprintf(stderr, "UFFDIO_API error %Lu\n", uffdio_api.api);
+		return 1;
+	}
+
+	count_verify = malloc(nr_pages * sizeof(unsigned long long));
+	if (!count_verify) {
+		perror("count_verify");
+		return 1;
+	}
+
+	for (nr = 0; nr < nr_pages; nr++) {
+		*area_mutex(area_src, nr) = (pthread_mutex_t)
+			PTHREAD_MUTEX_INITIALIZER;
+		count_verify[nr] = *area_count(area_src, nr) = 1;
+	}
+
+	pipefd = malloc(sizeof(int) * nr_cpus * 2);
+	if (!pipefd) {
+		perror("pipefd");
+		return 1;
+	}
+	for (cpu = 0; cpu < nr_cpus; cpu++) {
+		if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) {
+			perror("pipe");
+			return 1;
+		}
+	}
+
+	if (posix_memalign(&area, page_size, page_size)) {
+		fprintf(stderr, "out of memory\n");
+		return 1;
+	}
+	zeropage = area;
+	bzero(zeropage, page_size);
+
+	pthread_mutex_lock(&uffd_read_mutex);
+
+	pthread_attr_init(&attr);
+	pthread_attr_setstacksize(&attr, 16*1024*1024);
+
+	while (bounces--) {
+		unsigned long expected_ioctls;
+
+		printf("bounces: %d, mode:", bounces);
+		if (bounces & BOUNCE_RANDOM)
+			printf(" rnd");
+		if (bounces & BOUNCE_RACINGFAULTS)
+			printf(" racing");
+		if (bounces & BOUNCE_VERIFY)
+			printf(" ver");
+		if (bounces & BOUNCE_POLL)
+			printf(" poll");
+		printf(", ");
+		fflush(stdout);
+
+		if (bounces & BOUNCE_POLL)
+			fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+		else
+			fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK);
+
+		/* register */
+		uffdio_register.range.start = (unsigned long) area_dst;
+		uffdio_register.range.len = nr_pages * page_size;
+		uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+		if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
+			fprintf(stderr, "register failure\n");
+			return 1;
+		}
+		expected_ioctls = (1 << _UFFDIO_WAKE) |
+				  (1 << _UFFDIO_COPY) |
+				  (1 << _UFFDIO_ZEROPAGE);
+		if ((uffdio_register.ioctls & expected_ioctls) !=
+		    expected_ioctls) {
+			fprintf(stderr,
+				"unexpected missing ioctl for anon memory\n");
+			return 1;
+		}
+
+		/*
+		 * The madvise done previously isn't enough: some
+		 * uffd_thread could have read userfaults (one of
+		 * those already resolved by the background thread)
+		 * and it may be in the process of calling
+		 * UFFDIO_COPY. UFFDIO_COPY will read the zapped
+		 * area_src and it would map a zero page in it (of
+		 * course such a UFFDIO_COPY is perfectly safe as it'd
+		 * return -EEXIST). The problem comes at the next
+		 * bounce though: that racing UFFDIO_COPY would
+		 * generate zeropages in the area_src, so invalidating
+		 * the previous MADV_DONTNEED. Without this additional
+		 * MADV_DONTNEED those zeropages leftovers in the
+		 * area_src would lead to -EEXIST failure during the
+		 * next bounce, effectively leaving a zeropage in the
+		 * area_dst.
+		 *
+		 * Try to comment this out madvise to see the memory
+		 * corruption being caught pretty quick.
+		 *
+		 * khugepaged is also inhibited to collapse THP after
+		 * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's
+		 * required to MADV_DONTNEED here.
+		 */
+		if (madvise(area_dst, nr_pages * page_size, MADV_DONTNEED)) {
+			perror("madvise 2");
+			return 1;
+		}
+
+		/* bounce pass */
+		if (stress(userfaults))
+			return 1;
+
+		/* unregister */
+		if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) {
+			fprintf(stderr, "register failure\n");
+			return 1;
+		}
+
+		/* verification */
+		if (bounces & BOUNCE_VERIFY) {
+			for (nr = 0; nr < nr_pages; nr++) {
+				if (my_bcmp(area_dst,
+					    area_dst + nr * page_size,
+					    sizeof(pthread_mutex_t))) {
+					fprintf(stderr,
+						"error mutex 2 %lu\n",
+						nr);
+					bounces = 0;
+				}
+				if (*area_count(area_dst, nr) != count_verify[nr]) {
+					fprintf(stderr,
+						"error area_count %Lu %Lu %lu\n",
+						*area_count(area_src, nr),
+						count_verify[nr],
+						nr);
+					bounces = 0;
+				}
+			}
+		}
+
+		/* prepare next bounce */
+		tmp_area = area_src;
+		area_src = area_dst;
+		area_dst = tmp_area;
+
+		printf("userfaults:");
+		for (cpu = 0; cpu < nr_cpus; cpu++)
+			printf(" %lu", userfaults[cpu]);
+		printf("\n");
+	}
+
+	return 0;
+}
+
+int main(int argc, char **argv)
+{
+	if (argc < 3)
+		fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
+	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+	page_size = sysconf(_SC_PAGE_SIZE);
+	if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) >
+	    page_size)
+		fprintf(stderr, "Impossible to run this test\n"), exit(2);
+	nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size /
+		nr_cpus;
+	if (!nr_pages_per_cpu) {
+		fprintf(stderr, "invalid MiB\n");
+		fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
+	}
+	bounces = atoi(argv[2]);
+	if (bounces <= 0) {
+		fprintf(stderr, "invalid bounces\n");
+		fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
+	}
+	nr_pages = nr_pages_per_cpu * nr_cpus;
+	printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
+	       nr_pages, nr_pages_per_cpu);
+	return userfaultfd_stress();
+}
diff --git a/tools/testing/selftests/zram/Makefile b/tools/testing/selftests/zram/Makefile
new file mode 100644
index 0000000..29d8034
--- /dev/null
+++ b/tools/testing/selftests/zram/Makefile
@@ -0,0 +1,9 @@
+all:
+
+TEST_PROGS := zram.sh
+TEST_FILES := zram01.sh zram02.sh zram_lib.sh
+
+include ../lib.mk
+
+clean:
+	$(RM) err.log
diff --git a/tools/testing/selftests/zram/README b/tools/testing/selftests/zram/README
new file mode 100644
index 0000000..eb17917
--- /dev/null
+++ b/tools/testing/selftests/zram/README
@@ -0,0 +1,40 @@
+zram: Compressed RAM based block devices
+----------------------------------------
+* Introduction
+
+The zram module creates RAM based block devices named /dev/zram<id>
+(<id> = 0, 1, ...). Pages written to these disks are compressed and stored
+in memory itself. These disks allow very fast I/O and compression provides
+good amounts of memory savings. Some of the usecases include /tmp storage,
+use as swap disks, various caches under /var and maybe many more :)
+
+Statistics for individual zram devices are exported through sysfs nodes at
+/sys/block/zram<id>/
+
+Kconfig required:
+CONFIG_ZRAM=y
+CONFIG_ZRAM_LZ4_COMPRESS=y
+CONFIG_ZPOOL=y
+CONFIG_ZSMALLOC=y
+
+ZRAM Testcases
+--------------
+zram_lib.sh: create library with initialization/cleanup functions
+zram.sh: For sanity check of CONFIG_ZRAM and to run zram01 and zram02
+
+Two functional tests: zram01 and zram02:
+zram01.sh: creates general purpose ram disks with ext4 filesystems
+zram02.sh: creates block device for swap
+
+Commands required for testing:
+ - bc
+ - dd
+ - free
+ - awk
+ - mkswap
+ - swapon
+ - swapoff
+ - mkfs/ mkfs.ext4
+
+For more information please refer:
+kernel-source-tree/Documentation/blockdev/zram.txt
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
new file mode 100755
index 0000000..20de9a7
--- /dev/null
+++ b/tools/testing/selftests/zram/zram.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+TCID="zram.sh"
+
+check_prereqs()
+{
+	local msg="skip all tests:"
+
+	if [ $UID != 0 ]; then
+		echo $msg must be run as root >&2
+		exit 0
+	fi
+}
+
+run_zram () {
+echo "--------------------"
+echo "running zram tests"
+echo "--------------------"
+./zram01.sh
+echo ""
+./zram02.sh
+}
+
+check_prereqs
+
+# check zram module exists
+MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
+if [ -f $MODULE_PATH ]; then
+	run_zram
+elif [ -b /dev/zram0 ]; then
+	run_zram
+else
+	echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
+	echo "$TCID : CONFIG_ZRAM is not set"
+	exit 1
+fi
diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh
new file mode 100755
index 0000000..b9566a6
--- /dev/null
+++ b/tools/testing/selftests/zram/zram01.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+# Copyright (c) 2015 Oracle and/or its affiliates. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it would be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+# GNU General Public License for more details.
+#
+# Test creates several zram devices with different filesystems on them.
+# It fills each device with zeros and checks that compression works.
+#
+# Author: Alexey Kodanev <alexey.kodanev@oracle.com>
+# Modified: Naresh Kamboju <naresh.kamboju@linaro.org>
+
+TCID="zram01"
+ERR_CODE=0
+
+. ./zram_lib.sh
+
+# Test will create the following number of zram devices:
+dev_num=1
+# This is a list of parameters for zram devices.
+# Number of items must be equal to 'dev_num' parameter.
+zram_max_streams="2"
+
+# The zram sysfs node 'disksize' value can be either in bytes,
+# or you can use mem suffixes. But in some old kernels, mem
+# suffixes are not supported, for example, in RHEL6.6GA's kernel
+# layer, it uses strict_strtoull() to parse disksize which does
+# not support mem suffixes, in some newer kernels, they use
+# memparse() which supports mem suffixes. So here we just use
+# bytes to make sure everything works correctly.
+zram_sizes="2097152" # 2MB
+zram_mem_limits="2M"
+zram_filesystems="ext4"
+zram_algs="lzo"
+
+zram_fill_fs()
+{
+	local mem_free0=$(free -m | awk 'NR==2 {print $4}')
+
+	for i in $(seq 0 $(($dev_num - 1))); do
+		echo "fill zram$i..."
+		local b=0
+		while [ true ]; do
+			dd conv=notrunc if=/dev/zero of=zram${i}/file \
+				oflag=append count=1 bs=1024 status=none \
+				> /dev/null 2>&1 || break
+			b=$(($b + 1))
+		done
+		echo "zram$i can be filled with '$b' KB"
+	done
+
+	local mem_free1=$(free -m | awk 'NR==2 {print $4}')
+	local used_mem=$(($mem_free0 - $mem_free1))
+
+	local total_size=0
+	for sm in $zram_sizes; do
+		local s=$(echo $sm | sed 's/M//')
+		total_size=$(($total_size + $s))
+	done
+
+	echo "zram used ${used_mem}M, zram disk sizes ${total_size}M"
+
+	local v=$((100 * $total_size / $used_mem))
+
+	if [ "$v" -lt 100 ]; then
+		echo "FAIL compression ratio: 0.$v:1"
+		ERR_CODE=-1
+		zram_cleanup
+		return
+	fi
+
+	echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
+}
+
+check_prereqs
+zram_load
+zram_max_streams
+zram_compress_alg
+zram_set_disksizes
+zram_set_memlimit
+zram_makefs
+zram_mount
+
+zram_fill_fs
+zram_cleanup
+zram_unload
+
+if [ $ERR_CODE -ne 0 ]; then
+	echo "$TCID : [FAIL]"
+else
+	echo "$TCID : [PASS]"
+fi
diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh
new file mode 100755
index 0000000..74569b8
--- /dev/null
+++ b/tools/testing/selftests/zram/zram02.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Copyright (c) 2015 Oracle and/or its affiliates. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it would be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+# GNU General Public License for more details.
+#
+# Test checks that we can create swap zram device.
+#
+# Author: Alexey Kodanev <alexey.kodanev@oracle.com>
+# Modified: Naresh Kamboju <naresh.kamboju@linaro.org>
+
+TCID="zram02"
+ERR_CODE=0
+
+. ./zram_lib.sh
+
+# Test will create the following number of zram devices:
+dev_num=1
+# This is a list of parameters for zram devices.
+# Number of items must be equal to 'dev_num' parameter.
+zram_max_streams="2"
+
+# The zram sysfs node 'disksize' value can be either in bytes,
+# or you can use mem suffixes. But in some old kernels, mem
+# suffixes are not supported, for example, in RHEL6.6GA's kernel
+# layer, it uses strict_strtoull() to parse disksize which does
+# not support mem suffixes, in some newer kernels, they use
+# memparse() which supports mem suffixes. So here we just use
+# bytes to make sure everything works correctly.
+zram_sizes="1048576" # 1M
+zram_mem_limits="1M"
+
+check_prereqs
+zram_load
+zram_max_streams
+zram_set_disksizes
+zram_set_memlimit
+zram_makeswap
+zram_swapoff
+zram_cleanup
+zram_unload
+
+if [ $ERR_CODE -ne 0 ]; then
+	echo "$TCID : [FAIL]"
+else
+	echo "$TCID : [PASS]"
+fi
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
new file mode 100755
index 0000000..424e68e
--- /dev/null
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -0,0 +1,232 @@
+#!/bin/sh
+# Copyright (c) 2015 Oracle and/or its affiliates. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it would be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+# GNU General Public License for more details.
+#
+# Author: Alexey Kodanev <alexey.kodanev@oracle.com>
+# Modified: Naresh Kamboju <naresh.kamboju@linaro.org>
+
+MODULE=0
+dev_makeswap=-1
+dev_mounted=-1
+
+trap INT
+
+check_prereqs()
+{
+	local msg="skip all tests:"
+
+	if [ $UID != 0 ]; then
+		echo $msg must be run as root >&2
+		exit 0
+	fi
+}
+
+zram_cleanup()
+{
+	echo "zram cleanup"
+	local i=
+	for i in $(seq 0 $dev_makeswap); do
+		swapoff /dev/zram$i
+	done
+
+	for i in $(seq 0 $dev_mounted); do
+		umount /dev/zram$i
+	done
+
+	for i in $(seq 0 $(($dev_num - 1))); do
+		echo 1 > /sys/block/zram${i}/reset
+		rm -rf zram$i
+	done
+
+}
+
+zram_unload()
+{
+	if [ $MODULE -ne 0 ] ; then
+		echo "zram rmmod zram"
+		rmmod zram > /dev/null 2>&1
+	fi
+}
+
+zram_load()
+{
+	# check zram module exists
+	MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
+	if [ -f $MODULE_PATH ]; then
+		MODULE=1
+		echo "create '$dev_num' zram device(s)"
+		modprobe zram num_devices=$dev_num
+		if [ $? -ne 0 ]; then
+			echo "failed to insert zram module"
+			exit 1
+		fi
+
+		dev_num_created=$(ls /dev/zram* | wc -w)
+
+		if [ "$dev_num_created" -ne "$dev_num" ]; then
+			echo "unexpected num of devices: $dev_num_created"
+			ERR_CODE=-1
+		else
+			echo "zram load module successful"
+		fi
+	elif [ -b /dev/zram0 ]; then
+		echo "/dev/zram0 device file found: OK"
+	else
+		echo "ERROR: No zram.ko module or no /dev/zram0 device found"
+		echo "$TCID : CONFIG_ZRAM is not set"
+		exit 1
+	fi
+}
+
+zram_max_streams()
+{
+	echo "set max_comp_streams to zram device(s)"
+
+	local i=0
+	for max_s in $zram_max_streams; do
+		local sys_path="/sys/block/zram${i}/max_comp_streams"
+		echo $max_s > $sys_path || \
+			echo "FAIL failed to set '$max_s' to $sys_path"
+		sleep 1
+		local max_streams=$(cat $sys_path)
+
+		[ "$max_s" -ne "$max_streams" ] && \
+			echo "FAIL can't set max_streams '$max_s', get $max_stream"
+
+		i=$(($i + 1))
+		echo "$sys_path = '$max_streams' ($i/$dev_num)"
+	done
+
+	echo "zram max streams: OK"
+}
+
+zram_compress_alg()
+{
+	echo "test that we can set compression algorithm"
+
+	local algs=$(cat /sys/block/zram0/comp_algorithm)
+	echo "supported algs: $algs"
+	local i=0
+	for alg in $zram_algs; do
+		local sys_path="/sys/block/zram${i}/comp_algorithm"
+		echo "$alg" >	$sys_path || \
+			echo "FAIL can't set '$alg' to $sys_path"
+		i=$(($i + 1))
+		echo "$sys_path = '$alg' ($i/$dev_num)"
+	done
+
+	echo "zram set compression algorithm: OK"
+}
+
+zram_set_disksizes()
+{
+	echo "set disk size to zram device(s)"
+	local i=0
+	for ds in $zram_sizes; do
+		local sys_path="/sys/block/zram${i}/disksize"
+		echo "$ds" >	$sys_path || \
+			echo "FAIL can't set '$ds' to $sys_path"
+
+		i=$(($i + 1))
+		echo "$sys_path = '$ds' ($i/$dev_num)"
+	done
+
+	echo "zram set disksizes: OK"
+}
+
+zram_set_memlimit()
+{
+	echo "set memory limit to zram device(s)"
+
+	local i=0
+	for ds in $zram_mem_limits; do
+		local sys_path="/sys/block/zram${i}/mem_limit"
+		echo "$ds" >	$sys_path || \
+			echo "FAIL can't set '$ds' to $sys_path"
+
+		i=$(($i + 1))
+		echo "$sys_path = '$ds' ($i/$dev_num)"
+	done
+
+	echo "zram set memory limit: OK"
+}
+
+zram_makeswap()
+{
+	echo "make swap with zram device(s)"
+	local i=0
+	for i in $(seq 0 $(($dev_num - 1))); do
+		mkswap /dev/zram$i > err.log 2>&1
+		if [ $? -ne 0 ]; then
+			cat err.log
+			echo "FAIL mkswap /dev/zram$1 failed"
+		fi
+
+		swapon /dev/zram$i > err.log 2>&1
+		if [ $? -ne 0 ]; then
+			cat err.log
+			echo "FAIL swapon /dev/zram$1 failed"
+		fi
+
+		echo "done with /dev/zram$i"
+		dev_makeswap=$i
+	done
+
+	echo "zram making zram mkswap and swapon: OK"
+}
+
+zram_swapoff()
+{
+	local i=
+	for i in $(seq 0 $dev_makeswap); do
+		swapoff /dev/zram$i > err.log 2>&1
+		if [ $? -ne 0 ]; then
+			cat err.log
+			echo "FAIL swapoff /dev/zram$i failed"
+		fi
+	done
+	dev_makeswap=-1
+
+	echo "zram swapoff: OK"
+}
+
+zram_makefs()
+{
+	local i=0
+	for fs in $zram_filesystems; do
+		# if requested fs not supported default it to ext2
+		which mkfs.$fs > /dev/null 2>&1 || fs=ext2
+
+		echo "make $fs filesystem on /dev/zram$i"
+		mkfs.$fs /dev/zram$i > err.log 2>&1
+		if [ $? -ne 0 ]; then
+			cat err.log
+			echo "FAIL failed to make $fs on /dev/zram$i"
+		fi
+		i=$(($i + 1))
+		echo "zram mkfs.$fs: OK"
+	done
+}
+
+zram_mount()
+{
+	local i=0
+	for i in $(seq 0 $(($dev_num - 1))); do
+		echo "mount /dev/zram$i"
+		mkdir zram$i
+		mount /dev/zram$i zram$i > /dev/null || \
+			echo "FAIL mount /dev/zram$i failed"
+		dev_mounted=$i
+	done
+
+	echo "zram mount of zram device(s): OK"
+}
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 8bdf16b..7f73fa3 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -57,23 +57,15 @@
  * pagemap kernel ABI bits
  */
 
-#define PM_ENTRY_BYTES      sizeof(uint64_t)
-#define PM_STATUS_BITS      3
-#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
-#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
-#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
-#define PM_PSHIFT_BITS      6
-#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
-#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
-#define __PM_PSHIFT(x)      (((uint64_t) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
-#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
-#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
-
-#define __PM_SOFT_DIRTY      (1LL)
-#define PM_PRESENT          PM_STATUS(4LL)
-#define PM_SWAP             PM_STATUS(2LL)
-#define PM_SOFT_DIRTY       __PM_PSHIFT(__PM_SOFT_DIRTY)
-
+#define PM_ENTRY_BYTES		8
+#define PM_PFRAME_BITS		55
+#define PM_PFRAME_MASK		((1LL << PM_PFRAME_BITS) - 1)
+#define PM_PFRAME(x)		((x) & PM_PFRAME_MASK)
+#define PM_SOFT_DIRTY		(1ULL << 55)
+#define PM_MMAP_EXCLUSIVE	(1ULL << 56)
+#define PM_FILE			(1ULL << 61)
+#define PM_SWAP			(1ULL << 62)
+#define PM_PRESENT		(1ULL << 63)
 
 /*
  * kernel page flags
@@ -100,6 +92,8 @@
 #define KPF_SLOB_FREE		49
 #define KPF_SLUB_FROZEN		50
 #define KPF_SLUB_DEBUG		51
+#define KPF_FILE		62
+#define KPF_MMAP_EXCLUSIVE	63
 
 #define KPF_ALL_BITS		((uint64_t)~0ULL)
 #define KPF_HACKERS_BITS	(0xffffULL << 32)
@@ -149,6 +143,9 @@
 	[KPF_SLOB_FREE]		= "P:slob_free",
 	[KPF_SLUB_FROZEN]	= "A:slub_frozen",
 	[KPF_SLUB_DEBUG]	= "E:slub_debug",
+
+	[KPF_FILE]		= "F:file",
+	[KPF_MMAP_EXCLUSIVE]	= "1:mmap_exclusive",
 };
 
 
@@ -452,6 +449,10 @@
 
 	if (pme & PM_SOFT_DIRTY)
 		flags |= BIT(SOFTDIRTY);
+	if (pme & PM_FILE)
+		flags |= BIT(FILE);
+	if (pme & PM_MMAP_EXCLUSIVE)
+		flags |= BIT(MMAP_EXCLUSIVE);
 
 	return flags;
 }